diff --git a/[refs] b/[refs] index ec8f6646a949..09b82392d709 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: e4e88f31bcb5f05f24b9ae518d4ecb44e1a7774d +refs/heads/master: a2a3dfb8efc5d14cf39358ae0ec1da39667c2e6c diff --git a/trunk/Documentation/DocBook/debugobjects.tmpl b/trunk/Documentation/DocBook/debugobjects.tmpl index 24979f691e3e..08ff908aa7a2 100644 --- a/trunk/Documentation/DocBook/debugobjects.tmpl +++ b/trunk/Documentation/DocBook/debugobjects.tmpl @@ -96,7 +96,6 @@ debug_object_deactivate debug_object_destroy debug_object_free - debug_object_assert_init Each of these functions takes the address of the real object and a pointer to the object type specific debug description @@ -274,26 +273,6 @@ debug checks. - - - debug_object_assert_init - - This function is called to assert that an object has been - initialized. - - - When the real object is not tracked by debugobjects, it calls - fixup_assert_init of the object type description structure - provided by the caller, with the hardcoded object state - ODEBUG_NOT_AVAILABLE. The fixup function can correct the problem - by calling debug_object_init and other specific initializing - functions. - - - When the real object is already tracked by debugobjects it is - ignored. - - Fixup functions @@ -402,35 +381,6 @@ statistics. - - fixup_assert_init - - This function is called from the debug code whenever a problem - in debug_object_assert_init is detected. - - - Called from debug_object_assert_init() with a hardcoded state - ODEBUG_STATE_NOTAVAILABLE when the object is not found in the - debug bucket. - - - The function returns 1 when the fixup was successful, - otherwise 0. The return value is used to update the - statistics. - - - Note, this function should make sure debug_object_init() is - called before returning. - - - The handling of statically initialized objects is a special - case. The fixup function should check if this is a legitimate - case of a statically initialized object or not. In this case only - debug_object_init() should be called to make the object known to - the tracker. Then the function should return 0 because this is not - a real fixup. - - Known Bugs And Assumptions diff --git a/trunk/Documentation/RCU/checklist.txt b/trunk/Documentation/RCU/checklist.txt index bff2d8be1e18..0c134f8afc6f 100644 --- a/trunk/Documentation/RCU/checklist.txt +++ b/trunk/Documentation/RCU/checklist.txt @@ -328,12 +328,6 @@ over a rather long period of time, but improvements are always welcome! RCU rather than SRCU, because RCU is almost always faster and easier to use than is SRCU. - If you need to enter your read-side critical section in a - hardirq or exception handler, and then exit that same read-side - critical section in the task that was interrupted, then you need - to srcu_read_lock_raw() and srcu_read_unlock_raw(), which avoid - the lockdep checking that would otherwise this practice illegal. - Also unlike other forms of RCU, explicit initialization and cleanup is required via init_srcu_struct() and cleanup_srcu_struct(). These are passed a "struct srcu_struct" diff --git a/trunk/Documentation/RCU/rcu.txt b/trunk/Documentation/RCU/rcu.txt index bf778332a28f..31852705b586 100644 --- a/trunk/Documentation/RCU/rcu.txt +++ b/trunk/Documentation/RCU/rcu.txt @@ -38,11 +38,11 @@ o How can the updater tell when a grace period has completed Preemptible variants of RCU (CONFIG_TREE_PREEMPT_RCU) get the same effect, but require that the readers manipulate CPU-local - counters. These counters allow limited types of blocking within - RCU read-side critical sections. SRCU also uses CPU-local - counters, and permits general blocking within RCU read-side - critical sections. These variants of RCU detect grace periods - by sampling these counters. + counters. These counters allow limited types of blocking + within RCU read-side critical sections. SRCU also uses + CPU-local counters, and permits general blocking within + RCU read-side critical sections. These two variants of + RCU detect grace periods by sampling these counters. o If I am running on a uniprocessor kernel, which can only do one thing at a time, why should I wait for a grace period? diff --git a/trunk/Documentation/RCU/stallwarn.txt b/trunk/Documentation/RCU/stallwarn.txt index 083d88cbc089..4e959208f736 100644 --- a/trunk/Documentation/RCU/stallwarn.txt +++ b/trunk/Documentation/RCU/stallwarn.txt @@ -101,11 +101,6 @@ o A CPU-bound real-time task in a CONFIG_PREEMPT_RT kernel that CONFIG_TREE_PREEMPT_RCU case, you might see stall-warning messages. -o A hardware or software issue shuts off the scheduler-clock - interrupt on a CPU that is not in dyntick-idle mode. This - problem really has happened, and seems to be most likely to - result in RCU CPU stall warnings for CONFIG_NO_HZ=n kernels. - o A bug in the RCU implementation. o A hardware failure. This is quite unlikely, but has occurred @@ -114,11 +109,12 @@ o A hardware failure. This is quite unlikely, but has occurred This resulted in a series of RCU CPU stall warnings, eventually leading the realization that the CPU had failed. -The RCU, RCU-sched, and RCU-bh implementations have CPU stall warning. -SRCU does not have its own CPU stall warnings, but its calls to -synchronize_sched() will result in RCU-sched detecting RCU-sched-related -CPU stalls. Please note that RCU only detects CPU stalls when there is -a grace period in progress. No grace period, no CPU stall warnings. +The RCU, RCU-sched, and RCU-bh implementations have CPU stall +warning. SRCU does not have its own CPU stall warnings, but its +calls to synchronize_sched() will result in RCU-sched detecting +RCU-sched-related CPU stalls. Please note that RCU only detects +CPU stalls when there is a grace period in progress. No grace period, +no CPU stall warnings. To diagnose the cause of the stall, inspect the stack traces. The offending function will usually be near the top of the stack. diff --git a/trunk/Documentation/RCU/torture.txt b/trunk/Documentation/RCU/torture.txt index d67068d0d2b9..783d6c134d3f 100644 --- a/trunk/Documentation/RCU/torture.txt +++ b/trunk/Documentation/RCU/torture.txt @@ -61,24 +61,11 @@ nreaders This is the number of RCU reading threads supported. To properly exercise RCU implementations with preemptible read-side critical sections. -onoff_interval - The number of seconds between each attempt to execute a - randomly selected CPU-hotplug operation. Defaults to - zero, which disables CPU hotplugging. In HOTPLUG_CPU=n - kernels, rcutorture will silently refuse to do any - CPU-hotplug operations regardless of what value is - specified for onoff_interval. - shuffle_interval The number of seconds to keep the test threads affinitied to a particular subset of the CPUs, defaults to 3 seconds. Used in conjunction with test_no_idle_hz. -shutdown_secs The number of seconds to run the test before terminating - the test and powering off the system. The default is - zero, which disables test termination and system shutdown. - This capability is useful for automated testing. - stat_interval The number of seconds between output of torture statistics (via printk()). Regardless of the interval, statistics are printed when the module is unloaded. diff --git a/trunk/Documentation/RCU/trace.txt b/trunk/Documentation/RCU/trace.txt index 49587abfc2f7..aaf65f6c6cd7 100644 --- a/trunk/Documentation/RCU/trace.txt +++ b/trunk/Documentation/RCU/trace.txt @@ -105,10 +105,14 @@ o "dt" is the current value of the dyntick counter that is incremented or one greater than the interrupt-nesting depth otherwise. The number after the second "/" is the NMI nesting depth. + This field is displayed only for CONFIG_NO_HZ kernels. + o "df" is the number of times that some other CPU has forced a quiescent state on behalf of this CPU due to this CPU being in dynticks-idle state. + This field is displayed only for CONFIG_NO_HZ kernels. + o "of" is the number of times that some other CPU has forced a quiescent state on behalf of this CPU due to this CPU being offline. In a perfect world, this might never happen, but it diff --git a/trunk/Documentation/RCU/whatisRCU.txt b/trunk/Documentation/RCU/whatisRCU.txt index 6bbe8dcdc3da..6ef692667e2f 100644 --- a/trunk/Documentation/RCU/whatisRCU.txt +++ b/trunk/Documentation/RCU/whatisRCU.txt @@ -4,7 +4,6 @@ to start learning about RCU: 1. What is RCU, Fundamentally? http://lwn.net/Articles/262464/ 2. What is RCU? Part 2: Usage http://lwn.net/Articles/263130/ 3. RCU part 3: the RCU API http://lwn.net/Articles/264090/ -4. The RCU API, 2010 Edition http://lwn.net/Articles/418853/ What is RCU? @@ -835,8 +834,6 @@ SRCU: Critical sections Grace period Barrier srcu_read_lock synchronize_srcu N/A srcu_read_unlock synchronize_srcu_expedited - srcu_read_lock_raw - srcu_read_unlock_raw srcu_dereference SRCU: Initialization/cleanup @@ -858,33 +855,27 @@ list can be helpful: a. Will readers need to block? If so, you need SRCU. -b. Is it necessary to start a read-side critical section in a - hardirq handler or exception handler, and then to complete - this read-side critical section in the task that was - interrupted? If so, you need SRCU's srcu_read_lock_raw() and - srcu_read_unlock_raw() primitives. - -c. What about the -rt patchset? If readers would need to block +b. What about the -rt patchset? If readers would need to block in an non-rt kernel, you need SRCU. If readers would block in a -rt kernel, but not in a non-rt kernel, SRCU is not necessary. -d. Do you need to treat NMI handlers, hardirq handlers, +c. Do you need to treat NMI handlers, hardirq handlers, and code segments with preemption disabled (whether via preempt_disable(), local_irq_save(), local_bh_disable(), or some other mechanism) as if they were explicit RCU readers? If so, you need RCU-sched. -e. Do you need RCU grace periods to complete even in the face +d. Do you need RCU grace periods to complete even in the face of softirq monopolization of one or more of the CPUs? For example, is your code subject to network-based denial-of-service attacks? If so, you need RCU-bh. -f. Is your workload too update-intensive for normal use of +e. Is your workload too update-intensive for normal use of RCU, but inappropriate for other synchronization mechanisms? If so, consider SLAB_DESTROY_BY_RCU. But please be careful! -g. Otherwise, use RCU. +f. Otherwise, use RCU. Of course, this all assumes that you have determined that RCU is in fact the right tool for your job. diff --git a/trunk/Documentation/atomic_ops.txt b/trunk/Documentation/atomic_ops.txt index 27f2b21a9d5c..3bd585b44927 100644 --- a/trunk/Documentation/atomic_ops.txt +++ b/trunk/Documentation/atomic_ops.txt @@ -84,93 +84,6 @@ compiler optimizes the section accessing atomic_t variables. *** YOU HAVE BEEN WARNED! *** -Properly aligned pointers, longs, ints, and chars (and unsigned -equivalents) may be atomically loaded from and stored to in the same -sense as described for atomic_read() and atomic_set(). The ACCESS_ONCE() -macro should be used to prevent the compiler from using optimizations -that might otherwise optimize accesses out of existence on the one hand, -or that might create unsolicited accesses on the other. - -For example consider the following code: - - while (a > 0) - do_something(); - -If the compiler can prove that do_something() does not store to the -variable a, then the compiler is within its rights transforming this to -the following: - - tmp = a; - if (a > 0) - for (;;) - do_something(); - -If you don't want the compiler to do this (and you probably don't), then -you should use something like the following: - - while (ACCESS_ONCE(a) < 0) - do_something(); - -Alternatively, you could place a barrier() call in the loop. - -For another example, consider the following code: - - tmp_a = a; - do_something_with(tmp_a); - do_something_else_with(tmp_a); - -If the compiler can prove that do_something_with() does not store to the -variable a, then the compiler is within its rights to manufacture an -additional load as follows: - - tmp_a = a; - do_something_with(tmp_a); - tmp_a = a; - do_something_else_with(tmp_a); - -This could fatally confuse your code if it expected the same value -to be passed to do_something_with() and do_something_else_with(). - -The compiler would be likely to manufacture this additional load if -do_something_with() was an inline function that made very heavy use -of registers: reloading from variable a could save a flush to the -stack and later reload. To prevent the compiler from attacking your -code in this manner, write the following: - - tmp_a = ACCESS_ONCE(a); - do_something_with(tmp_a); - do_something_else_with(tmp_a); - -For a final example, consider the following code, assuming that the -variable a is set at boot time before the second CPU is brought online -and never changed later, so that memory barriers are not needed: - - if (a) - b = 9; - else - b = 42; - -The compiler is within its rights to manufacture an additional store -by transforming the above code into the following: - - b = 42; - if (a) - b = 9; - -This could come as a fatal surprise to other code running concurrently -that expected b to never have the value 42 if a was zero. To prevent -the compiler from doing this, write something like: - - if (a) - ACCESS_ONCE(b) = 9; - else - ACCESS_ONCE(b) = 42; - -Don't even -think- about doing this without proper use of memory barriers, -locks, or atomic operations if variable a can change at runtime! - -*** WARNING: ACCESS_ONCE() DOES NOT IMPLY A BARRIER! *** - Now, we move onto the atomic operation interfaces typically implemented with the help of assembly code. diff --git a/trunk/Documentation/cgroups/memory.txt b/trunk/Documentation/cgroups/memory.txt index 4d8774f6f48a..cc0ebc5241b3 100644 --- a/trunk/Documentation/cgroups/memory.txt +++ b/trunk/Documentation/cgroups/memory.txt @@ -44,8 +44,8 @@ Features: - oom-killer disable knob and oom-notifier - Root cgroup has no limit controls. - Kernel memory support is work in progress, and the current version provides - basically functionality. (See Section 2.7) + Kernel memory and Hugepages are not under control yet. We just manage + pages on LRU. To add more controls, we have to take care of performance. Brief summary of control files. @@ -72,9 +72,6 @@ Brief summary of control files. memory.oom_control # set/show oom controls. memory.numa_stat # show the number of memory usage per numa node - memory.kmem.tcp.limit_in_bytes # set/show hard limit for tcp buf memory - memory.kmem.tcp.usage_in_bytes # show current tcp buf memory allocation - 1. History The memory controller has a long history. A request for comments for the memory @@ -258,27 +255,6 @@ When oom event notifier is registered, event will be delivered. per-zone-per-cgroup LRU (cgroup's private LRU) is just guarded by zone->lru_lock, it has no lock of its own. -2.7 Kernel Memory Extension (CONFIG_CGROUP_MEM_RES_CTLR_KMEM) - -With the Kernel memory extension, the Memory Controller is able to limit -the amount of kernel memory used by the system. Kernel memory is fundamentally -different than user memory, since it can't be swapped out, which makes it -possible to DoS the system by consuming too much of this precious resource. - -Kernel memory limits are not imposed for the root cgroup. Usage for the root -cgroup may or may not be accounted. - -Currently no soft limit is implemented for kernel memory. It is future work -to trigger slab reclaim when those limits are reached. - -2.7.1 Current Kernel Memory resources accounted - -* sockets memory pressure: some sockets protocols have memory pressure -thresholds. The Memory Controller allows them to be controlled individually -per cgroup, instead of globally. - -* tcp memory pressure: sockets memory pressure for the tcp protocol. - 3. User Interface 0. Configuration diff --git a/trunk/Documentation/cgroups/net_prio.txt b/trunk/Documentation/cgroups/net_prio.txt deleted file mode 100644 index 01b322635591..000000000000 --- a/trunk/Documentation/cgroups/net_prio.txt +++ /dev/null @@ -1,53 +0,0 @@ -Network priority cgroup -------------------------- - -The Network priority cgroup provides an interface to allow an administrator to -dynamically set the priority of network traffic generated by various -applications - -Nominally, an application would set the priority of its traffic via the -SO_PRIORITY socket option. This however, is not always possible because: - -1) The application may not have been coded to set this value -2) The priority of application traffic is often a site-specific administrative - decision rather than an application defined one. - -This cgroup allows an administrator to assign a process to a group which defines -the priority of egress traffic on a given interface. Network priority groups can -be created by first mounting the cgroup filesystem. - -# mount -t cgroup -onet_prio none /sys/fs/cgroup/net_prio - -With the above step, the initial group acting as the parent accounting group -becomes visible at '/sys/fs/cgroup/net_prio'. This group includes all tasks in -the system. '/sys/fs/cgroup/net_prio/tasks' lists the tasks in this cgroup. - -Each net_prio cgroup contains two files that are subsystem specific - -net_prio.prioidx -This file is read-only, and is simply informative. It contains a unique integer -value that the kernel uses as an internal representation of this cgroup. - -net_prio.ifpriomap -This file contains a map of the priorities assigned to traffic originating from -processes in this group and egressing the system on various interfaces. It -contains a list of tuples in the form . Contents of this file -can be modified by echoing a string into the file using the same tuple format. -for example: - -echo "eth0 5" > /sys/fs/cgroups/net_prio/iscsi/net_prio.ifpriomap - -This command would force any traffic originating from processes belonging to the -iscsi net_prio cgroup and egressing on interface eth0 to have the priority of -said traffic set to the value 5. The parent accounting group also has a -writeable 'net_prio.ifpriomap' file that can be used to set a system default -priority. - -Priorities are set immediately prior to queueing a frame to the device -queueing discipline (qdisc) so priorities will be assigned prior to the hardware -queue selection being made. - -One usage for the net_prio cgroup is with mqprio qdisc allowing application -traffic to be steered to hardware/driver based traffic classes. These mappings -can then be managed by administrators or other networking protocols such as -DCBX. diff --git a/trunk/Documentation/devicetree/bindings/net/calxeda-xgmac.txt b/trunk/Documentation/devicetree/bindings/net/calxeda-xgmac.txt deleted file mode 100644 index 411727a3f82d..000000000000 --- a/trunk/Documentation/devicetree/bindings/net/calxeda-xgmac.txt +++ /dev/null @@ -1,15 +0,0 @@ -* Calxeda Highbank 10Gb XGMAC Ethernet - -Required properties: -- compatible : Should be "calxeda,hb-xgmac" -- reg : Address and length of the register set for the device -- interrupts : Should contain 3 xgmac interrupts. The 1st is main interrupt. - The 2nd is pwr mgt interrupt. The 3rd is low power state interrupt. - -Example: - -ethernet@fff50000 { - compatible = "calxeda,hb-xgmac"; - reg = <0xfff50000 0x1000>; - interrupts = <0 77 4 0 78 4 0 79 4>; -}; diff --git a/trunk/Documentation/devicetree/bindings/net/can/cc770.txt b/trunk/Documentation/devicetree/bindings/net/can/cc770.txt deleted file mode 100644 index 77027bf6460a..000000000000 --- a/trunk/Documentation/devicetree/bindings/net/can/cc770.txt +++ /dev/null @@ -1,53 +0,0 @@ -Memory mapped Bosch CC770 and Intel AN82527 CAN controller - -Note: The CC770 is a CAN controller from Bosch, which is 100% -compatible with the old AN82527 from Intel, but with "bugs" being fixed. - -Required properties: - -- compatible : should be "bosch,cc770" for the CC770 and "intc,82527" - for the AN82527. - -- reg : should specify the chip select, address offset and size required - to map the registers of the controller. The size is usually 0x80. - -- interrupts : property with a value describing the interrupt source - (number and sensitivity) required for the controller. - -Optional properties: - -- bosch,external-clock-frequency : frequency of the external oscillator - clock in Hz. Note that the internal clock frequency used by the - controller is half of that value. If not specified, a default - value of 16000000 (16 MHz) is used. - -- bosch,clock-out-frequency : slock frequency in Hz on the CLKOUT pin. - If not specified or if the specified value is 0, the CLKOUT pin - will be disabled. - -- bosch,slew-rate : slew rate of the CLKOUT signal. If not specified, - a resonable value will be calculated. - -- bosch,disconnect-rx0-input : see data sheet. - -- bosch,disconnect-rx1-input : see data sheet. - -- bosch,disconnect-tx1-output : see data sheet. - -- bosch,polarity-dominant : see data sheet. - -- bosch,divide-memory-clock : see data sheet. - -- bosch,iso-low-speed-mux : see data sheet. - -For further information, please have a look to the CC770 or AN82527. - -Examples: - -can@3,100 { - compatible = "bosch,cc770"; - reg = <3 0x100 0x80>; - interrupts = <2 0>; - interrupt-parent = <&mpic>; - bosch,external-clock-frequency = <16000000>; -}; diff --git a/trunk/Documentation/devicetree/bindings/powerpc/fsl/srio-rmu.txt b/trunk/Documentation/devicetree/bindings/powerpc/fsl/srio-rmu.txt deleted file mode 100644 index b9a8a2bcfae7..000000000000 --- a/trunk/Documentation/devicetree/bindings/powerpc/fsl/srio-rmu.txt +++ /dev/null @@ -1,163 +0,0 @@ -Message unit node: - -For SRIO controllers that implement the message unit as part of the controller -this node is required. For devices with RMAN this node should NOT exist. The -node is composed of three types of sub-nodes ("fsl-srio-msg-unit", -"fsl-srio-dbell-unit" and "fsl-srio-port-write-unit"). - -See srio.txt for more details about generic SRIO controller details. - - - compatible - Usage: required - Value type: - Definition: Must include "fsl,srio-rmu-vX.Y", "fsl,srio-rmu". - - The version X.Y should match the general SRIO controller's IP Block - revision register's Major(X) and Minor (Y) value. - - - reg - Usage: required - Value type: - Definition: A standard property. Specifies the physical address and - length of the SRIO configuration registers for message units - and doorbell units. - - - fsl,liodn - Usage: optional-but-recommended (for devices with PAMU) - Value type: - Definition: The logical I/O device number for the PAMU (IOMMU) to be - correctly configured for SRIO accesses. The property should - not exist on devices that do not support PAMU. - - The LIODN value is associated with all RMU transactions - (msg-unit, doorbell, port-write). - -Sub-Nodes for RMU: The RMU node is composed of multiple sub-nodes that -correspond to the actual sub-controllers in the RMU. The manual for a given -SoC will detail which and how many of these sub-controllers are implemented. - -Message Unit: - - - compatible - Usage: required - Value type: - Definition: Must include "fsl,srio-msg-unit-vX.Y", "fsl,srio-msg-unit". - - The version X.Y should match the general SRIO controller's IP Block - revision register's Major(X) and Minor (Y) value. - - - reg - Usage: required - Value type: - Definition: A standard property. Specifies the physical address and - length of the SRIO configuration registers for message units - and doorbell units. - - - interrupts - Usage: required - Value type: - Definition: Specifies the interrupts generated by this device. The - value of the interrupts property consists of one interrupt - specifier. The format of the specifier is defined by the - binding document describing the node's interrupt parent. - - A pair of IRQs are specified in this property. The first - element is associated with the transmit (TX) interrupt and the - second element is associated with the receive (RX) interrupt. - -Doorbell Unit: - - - compatible - Usage: required - Value type: - Definition: Must include: - "fsl,srio-dbell-unit-vX.Y", "fsl,srio-dbell-unit" - - The version X.Y should match the general SRIO controller's IP Block - revision register's Major(X) and Minor (Y) value. - - - reg - Usage: required - Value type: - Definition: A standard property. Specifies the physical address and - length of the SRIO configuration registers for message units - and doorbell units. - - - interrupts - Usage: required - Value type: - Definition: Specifies the interrupts generated by this device. The - value of the interrupts property consists of one interrupt - specifier. The format of the specifier is defined by the - binding document describing the node's interrupt parent. - - A pair of IRQs are specified in this property. The first - element is associated with the transmit (TX) interrupt and the - second element is associated with the receive (RX) interrupt. - -Port-Write Unit: - - - compatible - Usage: required - Value type: - Definition: Must include: - "fsl,srio-port-write-unit-vX.Y", "fsl,srio-port-write-unit" - - The version X.Y should match the general SRIO controller's IP Block - revision register's Major(X) and Minor (Y) value. - - - reg - Usage: required - Value type: - Definition: A standard property. Specifies the physical address and - length of the SRIO configuration registers for message units - and doorbell units. - - - interrupts - Usage: required - Value type: - Definition: Specifies the interrupts generated by this device. The - value of the interrupts property consists of one interrupt - specifier. The format of the specifier is defined by the - binding document describing the node's interrupt parent. - - A single IRQ that handles port-write conditions is - specified by this property. (Typically shared with error). - - Note: All other standard properties (see the ePAPR) are allowed - but are optional. - -Example: - rmu: rmu@d3000 { - compatible = "fsl,srio-rmu"; - reg = <0xd3000 0x400>; - ranges = <0x0 0xd3000 0x400>; - fsl,liodn = <0xc8>; - - message-unit@0 { - compatible = "fsl,srio-msg-unit"; - reg = <0x0 0x100>; - interrupts = < - 60 2 0 0 /* msg1_tx_irq */ - 61 2 0 0>;/* msg1_rx_irq */ - }; - message-unit@100 { - compatible = "fsl,srio-msg-unit"; - reg = <0x100 0x100>; - interrupts = < - 62 2 0 0 /* msg2_tx_irq */ - 63 2 0 0>;/* msg2_rx_irq */ - }; - doorbell-unit@400 { - compatible = "fsl,srio-dbell-unit"; - reg = <0x400 0x80>; - interrupts = < - 56 2 0 0 /* bell_outb_irq */ - 57 2 0 0>;/* bell_inb_irq */ - }; - port-write-unit@4e0 { - compatible = "fsl,srio-port-write-unit"; - reg = <0x4e0 0x20>; - interrupts = <16 2 1 11>; - }; - }; diff --git a/trunk/Documentation/devicetree/bindings/powerpc/fsl/srio.txt b/trunk/Documentation/devicetree/bindings/powerpc/fsl/srio.txt deleted file mode 100644 index b039bcbee134..000000000000 --- a/trunk/Documentation/devicetree/bindings/powerpc/fsl/srio.txt +++ /dev/null @@ -1,103 +0,0 @@ -* Freescale Serial RapidIO (SRIO) Controller - -RapidIO port node: -Properties: - - compatible - Usage: required - Value type: - Definition: Must include "fsl,srio" for IP blocks with IP Block - Revision Register (SRIO IPBRR1) Major ID equal to 0x01c0. - - Optionally, a compatiable string of "fsl,srio-vX.Y" where X is Major - version in IP Block Revision Register and Y is Minor version. If this - compatiable is provided it should be ordered before "fsl,srio". - - - reg - Usage: required - Value type: - Definition: A standard property. Specifies the physical address and - length of the SRIO configuration registers. The size should - be set to 0x11000. - - - interrupts - Usage: required - Value type: - Definition: Specifies the interrupts generated by this device. The - value of the interrupts property consists of one interrupt - specifier. The format of the specifier is defined by the - binding document describing the node's interrupt parent. - - A single IRQ that handles error conditions is specified by this - property. (Typically shared with port-write). - - - fsl,srio-rmu-handle: - Usage: required if rmu node is defined - Value type: - Definition: A single value that points to the RMU. - (See srio-rmu.txt for more details on RMU node binding) - -Port Child Nodes: There should a port child node for each port that exists in -the controller. The ports are numbered starting at one (1) and should have -the following properties: - - - cell-index - Usage: required - Value type: - Definition: A standard property. Matches the port id. - - - ranges - Usage: required if local access windows preset - Value type: - Definition: A standard property. Utilized to describe the memory mapped - IO space utilized by the controller. This corresponds to the - setting of the local access windows that are targeted to this - SRIO port. - - - fsl,liodn - Usage: optional-but-recommended (for devices with PAMU) - Value type: - Definition: The logical I/O device number for the PAMU (IOMMU) to be - correctly configured for SRIO accesses. The property should - not exist on devices that do not support PAMU. - - For HW (ie, the P4080) that only supports a LIODN for both - memory and maintenance transactions then a single LIODN is - represented in the property for both transactions. - - For HW (ie, the P304x/P5020, etc) that supports an LIODN for - memory transactions and a unique LIODN for maintenance - transactions then a pair of LIODNs are represented in the - property. Within the pair, the first element represents the - LIODN associated with memory transactions and the second element - represents the LIODN associated with maintenance transactions - for the port. - -Note: All other standard properties (see ePAPR) are allowed but are optional. - -Example: - - rapidio: rapidio@ffe0c0000 { - #address-cells = <2>; - #size-cells = <2>; - reg = <0xf 0xfe0c0000 0 0x11000>; - compatible = "fsl,srio"; - interrupts = <16 2 1 11>; /* err_irq */ - fsl,srio-rmu-handle = <&rmu>; - ranges; - - port1 { - cell-index = <1>; - #address-cells = <2>; - #size-cells = <2>; - fsl,liodn = <34>; - ranges = <0 0 0xc 0x20000000 0 0x10000000>; - }; - - port2 { - cell-index = <2>; - #address-cells = <2>; - #size-cells = <2>; - fsl,liodn = <48>; - ranges = <0 0 0xc 0x30000000 0 0x10000000>; - }; - }; diff --git a/trunk/Documentation/feature-removal-schedule.txt b/trunk/Documentation/feature-removal-schedule.txt index 33f7327d0451..3d849122b5b1 100644 --- a/trunk/Documentation/feature-removal-schedule.txt +++ b/trunk/Documentation/feature-removal-schedule.txt @@ -263,7 +263,8 @@ Who: Ravikiran Thirumalai What: Code that is now under CONFIG_WIRELESS_EXT_SYSFS (in net/core/net-sysfs.c) -When: 3.5 +When: After the only user (hal) has seen a release with the patches + for enough time, probably some time in 2010. Why: Over 1K .text/.data size reduction, data is available in other ways (ioctls) Who: Johannes Berg diff --git a/trunk/Documentation/kernel-parameters.txt b/trunk/Documentation/kernel-parameters.txt index e229769606f2..81c287fad79d 100644 --- a/trunk/Documentation/kernel-parameters.txt +++ b/trunk/Documentation/kernel-parameters.txt @@ -1885,11 +1885,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted. arch_perfmon: [X86] Force use of architectural perfmon on Intel CPUs instead of the CPU specific event set. - timer: [X86] Force use of architectural NMI - timer mode (see also oprofile.timer - for generic hr timer mode) - [s390] Force legacy basic mode sampling - (report cpu_type "timer") oops=panic Always panic on oopses. Default is to just kill the process, but there is a small probability of @@ -2755,10 +2750,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted. functions are at fixed addresses, they make nice targets for exploits that can control RIP. - emulate [default] Vsyscalls turn into traps and are - emulated reasonably safely. + emulate Vsyscalls turn into traps and are emulated + reasonably safely. - native Vsyscalls are native syscall instructions. + native [default] Vsyscalls are native syscall + instructions. This is a little bit faster than trapping and makes a few dynamic recompilers work better than they would in emulation mode. diff --git a/trunk/Documentation/lockdep-design.txt b/trunk/Documentation/lockdep-design.txt index 5dbc99c04f6e..abf768c681e2 100644 --- a/trunk/Documentation/lockdep-design.txt +++ b/trunk/Documentation/lockdep-design.txt @@ -221,66 +221,3 @@ when the chain is validated for the first time, is then put into a hash table, which hash-table can be checked in a lockfree manner. If the locking chain occurs again later on, the hash table tells us that we dont have to validate the chain again. - -Troubleshooting: ----------------- - -The validator tracks a maximum of MAX_LOCKDEP_KEYS number of lock classes. -Exceeding this number will trigger the following lockdep warning: - - (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS)) - -By default, MAX_LOCKDEP_KEYS is currently set to 8191, and typical -desktop systems have less than 1,000 lock classes, so this warning -normally results from lock-class leakage or failure to properly -initialize locks. These two problems are illustrated below: - -1. Repeated module loading and unloading while running the validator - will result in lock-class leakage. The issue here is that each - load of the module will create a new set of lock classes for - that module's locks, but module unloading does not remove old - classes (see below discussion of reuse of lock classes for why). - Therefore, if that module is loaded and unloaded repeatedly, - the number of lock classes will eventually reach the maximum. - -2. Using structures such as arrays that have large numbers of - locks that are not explicitly initialized. For example, - a hash table with 8192 buckets where each bucket has its own - spinlock_t will consume 8192 lock classes -unless- each spinlock - is explicitly initialized at runtime, for example, using the - run-time spin_lock_init() as opposed to compile-time initializers - such as __SPIN_LOCK_UNLOCKED(). Failure to properly initialize - the per-bucket spinlocks would guarantee lock-class overflow. - In contrast, a loop that called spin_lock_init() on each lock - would place all 8192 locks into a single lock class. - - The moral of this story is that you should always explicitly - initialize your locks. - -One might argue that the validator should be modified to allow -lock classes to be reused. However, if you are tempted to make this -argument, first review the code and think through the changes that would -be required, keeping in mind that the lock classes to be removed are -likely to be linked into the lock-dependency graph. This turns out to -be harder to do than to say. - -Of course, if you do run out of lock classes, the next thing to do is -to find the offending lock classes. First, the following command gives -you the number of lock classes currently in use along with the maximum: - - grep "lock-classes" /proc/lockdep_stats - -This command produces the following output on a modest system: - - lock-classes: 748 [max: 8191] - -If the number allocated (748 above) increases continually over time, -then there is likely a leak. The following command can be used to -identify the leaking lock classes: - - grep "BD" /proc/lockdep - -Run the command and save the output, then compare against the output from -a later run of this command to identify the leakers. This same output -can also help you find situations where runtime lock initialization has -been omitted. diff --git a/trunk/Documentation/networking/00-INDEX b/trunk/Documentation/networking/00-INDEX index 9ad9ddeb384c..bbce1215434a 100644 --- a/trunk/Documentation/networking/00-INDEX +++ b/trunk/Documentation/networking/00-INDEX @@ -144,8 +144,6 @@ nfc.txt - The Linux Near Field Communication (NFS) subsystem. olympic.txt - IBM PCI Pit/Pit-Phy/Olympic Token Ring driver info. -openvswitch.txt - - Open vSwitch developer documentation. operstates.txt - Overview of network interface operational states. packet_mmap.txt diff --git a/trunk/Documentation/networking/batman-adv.txt b/trunk/Documentation/networking/batman-adv.txt index 221ad0cdf11f..c86d03f18a5b 100644 --- a/trunk/Documentation/networking/batman-adv.txt +++ b/trunk/Documentation/networking/batman-adv.txt @@ -200,16 +200,15 @@ abled during run time. Following log_levels are defined: 0 - All debug output disabled 1 - Enable messages related to routing / flooding / broadcasting -2 - Enable messages related to route added / changed / deleted -4 - Enable messages related to translation table operations -7 - Enable all messages +2 - Enable route or tt entry added / changed / deleted +3 - Enable all messages The debug output can be changed at runtime using the file /sys/class/net/bat0/mesh/log_level. e.g. # echo 2 > /sys/class/net/bat0/mesh/log_level -will enable debug messages for when routes change. +will enable debug messages for when routes or TTs change. BATCTL diff --git a/trunk/Documentation/networking/bonding.txt b/trunk/Documentation/networking/bonding.txt index 080ad26690ae..91df678fb7f8 100644 --- a/trunk/Documentation/networking/bonding.txt +++ b/trunk/Documentation/networking/bonding.txt @@ -196,23 +196,6 @@ or, for backwards compatibility, the option value. E.g., The parameters are as follows: -active_slave - - Specifies the new active slave for modes that support it - (active-backup, balance-alb and balance-tlb). Possible values - are the name of any currently enslaved interface, or an empty - string. If a name is given, the slave and its link must be up in order - to be selected as the new active slave. If an empty string is - specified, the current active slave is cleared, and a new active - slave is selected automatically. - - Note that this is only available through the sysfs interface. No module - parameter by this name exists. - - The normal value of this option is the name of the currently - active slave, or the empty string if there is no active slave or - the current mode does not use an active slave. - ad_select Specifies the 802.3ad aggregation selection logic to use. The diff --git a/trunk/Documentation/networking/ieee802154.txt b/trunk/Documentation/networking/ieee802154.txt index 1dc1c24a7547..f41ea2405220 100644 --- a/trunk/Documentation/networking/ieee802154.txt +++ b/trunk/Documentation/networking/ieee802154.txt @@ -78,30 +78,3 @@ in software. This is currently WIP. See header include/net/mac802154.h and several drivers in drivers/ieee802154/. -6LoWPAN Linux implementation -============================ - -The IEEE 802.15.4 standard specifies an MTU of 128 bytes, yielding about 80 -octets of actual MAC payload once security is turned on, on a wireless link -with a link throughput of 250 kbps or less. The 6LoWPAN adaptation format -[RFC4944] was specified to carry IPv6 datagrams over such constrained links, -taking into account limited bandwidth, memory, or energy resources that are -expected in applications such as wireless Sensor Networks. [RFC4944] defines -a Mesh Addressing header to support sub-IP forwarding, a Fragmentation header -to support the IPv6 minimum MTU requirement [RFC2460], and stateless header -compression for IPv6 datagrams (LOWPAN_HC1 and LOWPAN_HC2) to reduce the -relatively large IPv6 and UDP headers down to (in the best case) several bytes. - -In Semptember 2011 the standard update was published - [RFC6282]. -It deprecates HC1 and HC2 compression and defines IPHC encoding format which is -used in this Linux implementation. - -All the code related to 6lowpan you may find in files: net/ieee802154/6lowpan.* - -To setup 6lowpan interface you need (busybox release > 1.17.0): -1. Add IEEE802.15.4 interface and initialize PANid; -2. Add 6lowpan interface by command like: - # ip link add link wpan0 name lowpan0 type lowpan -3. Set MAC (if needs): - # ip link set lowpan0 address de:ad:be:ef:ca:fe:ba:be -4. Bring up 'lowpan0' interface diff --git a/trunk/Documentation/networking/ifenslave.c b/trunk/Documentation/networking/ifenslave.c index ac5debb2f16c..65968fbf1e49 100644 --- a/trunk/Documentation/networking/ifenslave.c +++ b/trunk/Documentation/networking/ifenslave.c @@ -539,14 +539,12 @@ static int if_getconfig(char *ifname) metric = 0; } else metric = ifr.ifr_metric; - printf("The result of SIOCGIFMETRIC is %d\n", metric); strcpy(ifr.ifr_name, ifname); if (ioctl(skfd, SIOCGIFMTU, &ifr) < 0) mtu = 0; else mtu = ifr.ifr_mtu; - printf("The result of SIOCGIFMTU is %d\n", mtu); strcpy(ifr.ifr_name, ifname); if (ioctl(skfd, SIOCGIFDSTADDR, &ifr) < 0) { diff --git a/trunk/Documentation/networking/ip-sysctl.txt b/trunk/Documentation/networking/ip-sysctl.txt index ad3e80e17b4f..589f2da5d545 100644 --- a/trunk/Documentation/networking/ip-sysctl.txt +++ b/trunk/Documentation/networking/ip-sysctl.txt @@ -31,16 +31,6 @@ neigh/default/gc_thresh3 - INTEGER when using large numbers of interfaces and when communicating with large numbers of directly-connected peers. -neigh/default/unres_qlen_bytes - INTEGER - The maximum number of bytes which may be used by packets - queued for each unresolved address by other network layers. - (added in linux 3.3) - -neigh/default/unres_qlen - INTEGER - The maximum number of packets which may be queued for each - unresolved address by other network layers. - (deprecated in linux 3.3) : use unres_qlen_bytes instead. - mtu_expires - INTEGER Time, in seconds, that cached PMTU information is kept. @@ -175,9 +165,6 @@ tcp_congestion_control - STRING connections. The algorithm "reno" is always available, but additional choices may be available based on kernel configuration. Default is set as part of kernel configuration. - For passive connections, the listener congestion control choice - is inherited. - [see setsockopt(listenfd, SOL_TCP, TCP_CONGESTION, "name" ...) ] tcp_cookie_size - INTEGER Default size of TCP Cookie Transactions (TCPCT) option, that may be diff --git a/trunk/Documentation/networking/openvswitch.txt b/trunk/Documentation/networking/openvswitch.txt deleted file mode 100644 index b8a048b8df3a..000000000000 --- a/trunk/Documentation/networking/openvswitch.txt +++ /dev/null @@ -1,195 +0,0 @@ -Open vSwitch datapath developer documentation -============================================= - -The Open vSwitch kernel module allows flexible userspace control over -flow-level packet processing on selected network devices. It can be -used to implement a plain Ethernet switch, network device bonding, -VLAN processing, network access control, flow-based network control, -and so on. - -The kernel module implements multiple "datapaths" (analogous to -bridges), each of which can have multiple "vports" (analogous to ports -within a bridge). Each datapath also has associated with it a "flow -table" that userspace populates with "flows" that map from keys based -on packet headers and metadata to sets of actions. The most common -action forwards the packet to another vport; other actions are also -implemented. - -When a packet arrives on a vport, the kernel module processes it by -extracting its flow key and looking it up in the flow table. If there -is a matching flow, it executes the associated actions. If there is -no match, it queues the packet to userspace for processing (as part of -its processing, userspace will likely set up a flow to handle further -packets of the same type entirely in-kernel). - - -Flow key compatibility ----------------------- - -Network protocols evolve over time. New protocols become important -and existing protocols lose their prominence. For the Open vSwitch -kernel module to remain relevant, it must be possible for newer -versions to parse additional protocols as part of the flow key. It -might even be desirable, someday, to drop support for parsing -protocols that have become obsolete. Therefore, the Netlink interface -to Open vSwitch is designed to allow carefully written userspace -applications to work with any version of the flow key, past or future. - -To support this forward and backward compatibility, whenever the -kernel module passes a packet to userspace, it also passes along the -flow key that it parsed from the packet. Userspace then extracts its -own notion of a flow key from the packet and compares it against the -kernel-provided version: - - - If userspace's notion of the flow key for the packet matches the - kernel's, then nothing special is necessary. - - - If the kernel's flow key includes more fields than the userspace - version of the flow key, for example if the kernel decoded IPv6 - headers but userspace stopped at the Ethernet type (because it - does not understand IPv6), then again nothing special is - necessary. Userspace can still set up a flow in the usual way, - as long as it uses the kernel-provided flow key to do it. - - - If the userspace flow key includes more fields than the - kernel's, for example if userspace decoded an IPv6 header but - the kernel stopped at the Ethernet type, then userspace can - forward the packet manually, without setting up a flow in the - kernel. This case is bad for performance because every packet - that the kernel considers part of the flow must go to userspace, - but the forwarding behavior is correct. (If userspace can - determine that the values of the extra fields would not affect - forwarding behavior, then it could set up a flow anyway.) - -How flow keys evolve over time is important to making this work, so -the following sections go into detail. - - -Flow key format ---------------- - -A flow key is passed over a Netlink socket as a sequence of Netlink -attributes. Some attributes represent packet metadata, defined as any -information about a packet that cannot be extracted from the packet -itself, e.g. the vport on which the packet was received. Most -attributes, however, are extracted from headers within the packet, -e.g. source and destination addresses from Ethernet, IP, or TCP -headers. - -The header file defines the exact format of the -flow key attributes. For informal explanatory purposes here, we write -them as comma-separated strings, with parentheses indicating arguments -and nesting. For example, the following could represent a flow key -corresponding to a TCP packet that arrived on vport 1: - - in_port(1), eth(src=e0:91:f5:21:d0:b2, dst=00:02:e3:0f:80:a4), - eth_type(0x0800), ipv4(src=172.16.0.20, dst=172.18.0.52, proto=17, tos=0, - frag=no), tcp(src=49163, dst=80) - -Often we ellipsize arguments not important to the discussion, e.g.: - - in_port(1), eth(...), eth_type(0x0800), ipv4(...), tcp(...) - - -Basic rule for evolving flow keys ---------------------------------- - -Some care is needed to really maintain forward and backward -compatibility for applications that follow the rules listed under -"Flow key compatibility" above. - -The basic rule is obvious: - - ------------------------------------------------------------------ - New network protocol support must only supplement existing flow - key attributes. It must not change the meaning of already defined - flow key attributes. - ------------------------------------------------------------------ - -This rule does have less-obvious consequences so it is worth working -through a few examples. Suppose, for example, that the kernel module -did not already implement VLAN parsing. Instead, it just interpreted -the 802.1Q TPID (0x8100) as the Ethertype then stopped parsing the -packet. The flow key for any packet with an 802.1Q header would look -essentially like this, ignoring metadata: - - eth(...), eth_type(0x8100) - -Naively, to add VLAN support, it makes sense to add a new "vlan" flow -key attribute to contain the VLAN tag, then continue to decode the -encapsulated headers beyond the VLAN tag using the existing field -definitions. With this change, an TCP packet in VLAN 10 would have a -flow key much like this: - - eth(...), vlan(vid=10, pcp=0), eth_type(0x0800), ip(proto=6, ...), tcp(...) - -But this change would negatively affect a userspace application that -has not been updated to understand the new "vlan" flow key attribute. -The application could, following the flow compatibility rules above, -ignore the "vlan" attribute that it does not understand and therefore -assume that the flow contained IP packets. This is a bad assumption -(the flow only contains IP packets if one parses and skips over the -802.1Q header) and it could cause the application's behavior to change -across kernel versions even though it follows the compatibility rules. - -The solution is to use a set of nested attributes. This is, for -example, why 802.1Q support uses nested attributes. A TCP packet in -VLAN 10 is actually expressed as: - - eth(...), eth_type(0x8100), vlan(vid=10, pcp=0), encap(eth_type(0x0800), - ip(proto=6, ...), tcp(...))) - -Notice how the "eth_type", "ip", and "tcp" flow key attributes are -nested inside the "encap" attribute. Thus, an application that does -not understand the "vlan" key will not see either of those attributes -and therefore will not misinterpret them. (Also, the outer eth_type -is still 0x8100, not changed to 0x0800.) - -Handling malformed packets --------------------------- - -Don't drop packets in the kernel for malformed protocol headers, bad -checksums, etc. This would prevent userspace from implementing a -simple Ethernet switch that forwards every packet. - -Instead, in such a case, include an attribute with "empty" content. -It doesn't matter if the empty content could be valid protocol values, -as long as those values are rarely seen in practice, because userspace -can always forward all packets with those values to userspace and -handle them individually. - -For example, consider a packet that contains an IP header that -indicates protocol 6 for TCP, but which is truncated just after the IP -header, so that the TCP header is missing. The flow key for this -packet would include a tcp attribute with all-zero src and dst, like -this: - - eth(...), eth_type(0x0800), ip(proto=6, ...), tcp(src=0, dst=0) - -As another example, consider a packet with an Ethernet type of 0x8100, -indicating that a VLAN TCI should follow, but which is truncated just -after the Ethernet type. The flow key for this packet would include -an all-zero-bits vlan and an empty encap attribute, like this: - - eth(...), eth_type(0x8100), vlan(0), encap() - -Unlike a TCP packet with source and destination ports 0, an -all-zero-bits VLAN TCI is not that rare, so the CFI bit (aka -VLAN_TAG_PRESENT inside the kernel) is ordinarily set in a vlan -attribute expressly to allow this situation to be distinguished. -Thus, the flow key in this second example unambiguously indicates a -missing or malformed VLAN TCI. - -Other rules ------------ - -The other rules for flow keys are much less subtle: - - - Duplicate attributes are not allowed at a given nesting level. - - - Ordering of attributes is not significant. - - - When the kernel sends a given flow key to userspace, it always - composes it the same way. This allows userspace to hash and - compare entire flow keys that it may not be able to fully - interpret. diff --git a/trunk/Documentation/networking/packet_mmap.txt b/trunk/Documentation/networking/packet_mmap.txt index 1c08a4b0981f..4acea6603720 100644 --- a/trunk/Documentation/networking/packet_mmap.txt +++ b/trunk/Documentation/networking/packet_mmap.txt @@ -155,7 +155,7 @@ As capture, each frame contains two parts: /* fill sockaddr_ll struct to prepare binding */ my_addr.sll_family = AF_PACKET; - my_addr.sll_protocol = htons(ETH_P_ALL); + my_addr.sll_protocol = ETH_P_ALL; my_addr.sll_ifindex = s_ifr.ifr_ifindex; /* bind socket to eth0 */ diff --git a/trunk/Documentation/networking/scaling.txt b/trunk/Documentation/networking/scaling.txt index 579994afbe06..a177de21d28e 100644 --- a/trunk/Documentation/networking/scaling.txt +++ b/trunk/Documentation/networking/scaling.txt @@ -208,7 +208,7 @@ The counter in rps_dev_flow_table values records the length of the current CPU's backlog when a packet in this flow was last enqueued. Each backlog queue has a head counter that is incremented on dequeue. A tail counter is computed as head counter + queue length. In other words, the counter -in rps_dev_flow[i] records the last element in flow i that has +in rps_dev_flow_table[i] records the last element in flow i that has been enqueued onto the currently designated CPU for flow i (of course, entry i is actually selected by hash and multiple flows may hash to the same entry i). @@ -224,7 +224,7 @@ following is true: - The current CPU's queue head counter >= the recorded tail counter value in rps_dev_flow[i] -- The current CPU is unset (equal to RPS_NO_CPU) +- The current CPU is unset (equal to NR_CPUS) - The current CPU is offline After this check, the packet is sent to the (possibly updated) current @@ -235,7 +235,7 @@ CPU. ==== RFS Configuration -RFS is only available if the kconfig symbol CONFIG_RPS is enabled (on +RFS is only available if the kconfig symbol CONFIG_RFS is enabled (on by default for SMP). The functionality remains disabled until explicitly configured. The number of entries in the global flow table is set through: @@ -258,7 +258,7 @@ For a single queue device, the rps_flow_cnt value for the single queue would normally be configured to the same value as rps_sock_flow_entries. For a multi-queue device, the rps_flow_cnt for each queue might be configured as rps_sock_flow_entries / N, where N is the number of -queues. So for instance, if rps_sock_flow_entries is set to 32768 and there +queues. So for instance, if rps_flow_entries is set to 32768 and there are 16 configured receive queues, rps_flow_cnt for each queue might be configured as 2048. diff --git a/trunk/Documentation/networking/stmmac.txt b/trunk/Documentation/networking/stmmac.txt index d0aeeadd264b..8d67980fabe8 100644 --- a/trunk/Documentation/networking/stmmac.txt +++ b/trunk/Documentation/networking/stmmac.txt @@ -4,16 +4,14 @@ Copyright (C) 2007-2010 STMicroelectronics Ltd Author: Giuseppe Cavallaro This is the driver for the MAC 10/100/1000 on-chip Ethernet controllers -(Synopsys IP blocks). +(Synopsys IP blocks); it has been fully tested on STLinux platforms. Currently this network device driver is for all STM embedded MAC/GMAC -(i.e. 7xxx/5xxx SoCs), SPEAr (arm), Loongson1B (mips) and XLINX XC2V3000 -FF1152AMT0221 D1215994A VIRTEX FPGA board. +(i.e. 7xxx/5xxx SoCs) and it's known working on other platforms i.e. ARM SPEAr. -DWC Ether MAC 10/100/1000 Universal version 3.60a (and older) and DWC Ether MAC 10/100 -Universal version 4.0 have been used for developing this driver. - -This driver supports both the platform bus and PCI. +DWC Ether MAC 10/100/1000 Universal version 3.41a and DWC Ether MAC 10/100 +Universal version 4.0 have been used for developing the first code +implementation. Please, for more information also visit: www.stlinux.com @@ -279,5 +277,5 @@ In fact, these can generate an huge amount of debug messages. 6) TODO: o XGMAC is not supported. - o Add the EEE - Energy Efficient Ethernet - o Add the PTP - precision time protocol + o Review the timer optimisation code to use an embedded device that will be + available in new chip generations. diff --git a/trunk/Documentation/networking/team.txt b/trunk/Documentation/networking/team.txt deleted file mode 100644 index 5a013686b9ea..000000000000 --- a/trunk/Documentation/networking/team.txt +++ /dev/null @@ -1,2 +0,0 @@ -Team devices are driven from userspace via libteam library which is here: - https://github.com/jpirko/libteam diff --git a/trunk/Documentation/trace/events.txt b/trunk/Documentation/trace/events.txt index bb24c2a0e870..b510564aac7e 100644 --- a/trunk/Documentation/trace/events.txt +++ b/trunk/Documentation/trace/events.txt @@ -191,6 +191,8 @@ And for string fields they are: Currently, only exact string matches are supported. +Currently, the maximum number of predicates in a filter is 16. + 5.2 Setting filters ------------------- diff --git a/trunk/Documentation/virtual/kvm/api.txt b/trunk/Documentation/virtual/kvm/api.txt index e2a4b5287361..7945b0bd35e2 100644 --- a/trunk/Documentation/virtual/kvm/api.txt +++ b/trunk/Documentation/virtual/kvm/api.txt @@ -1100,15 +1100,6 @@ emulate them efficiently. The fields in each entry are defined as follows: eax, ebx, ecx, edx: the values returned by the cpuid instruction for this function/index combination -The TSC deadline timer feature (CPUID leaf 1, ecx[24]) is always returned -as false, since the feature depends on KVM_CREATE_IRQCHIP for local APIC -support. Instead it is reported via - - ioctl(KVM_CHECK_EXTENSION, KVM_CAP_TSC_DEADLINE_TIMER) - -if that returns true and you use KVM_CREATE_IRQCHIP, or if you emulate the -feature in userspace, then you can enable the feature for KVM_SET_CPUID2. - 4.47 KVM_PPC_GET_PVINFO Capability: KVM_CAP_PPC_GET_PVINFO @@ -1160,13 +1151,6 @@ following flags are specified: /* Depends on KVM_CAP_IOMMU */ #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) -The KVM_DEV_ASSIGN_ENABLE_IOMMU flag is a mandatory option to ensure -isolation of the device. Usages not specifying this flag are deprecated. - -Only PCI header type 0 devices with PCI BAR resources are supported by -device assignment. The user requesting this ioctl must have read/write -access to the PCI sysfs resource files associated with the device. - 4.49 KVM_DEASSIGN_PCI_DEVICE Capability: KVM_CAP_DEVICE_DEASSIGNMENT diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS index 2747a7e9e7b2..6afba60c3904 100644 --- a/trunk/MAINTAINERS +++ b/trunk/MAINTAINERS @@ -1698,9 +1698,11 @@ F: arch/x86/include/asm/tce.h CAN NETWORK LAYER M: Oliver Hartkopp +M: Oliver Hartkopp +M: Urs Thuermann L: linux-can@vger.kernel.org -W: http://gitorious.org/linux-can -T: git git://gitorious.org/linux-can/linux-can-next.git +L: netdev@vger.kernel.org +W: http://developer.berlios.de/projects/socketcan/ S: Maintained F: net/can/ F: include/linux/can.h @@ -1711,10 +1713,9 @@ F: include/linux/can/gw.h CAN NETWORK DRIVERS M: Wolfgang Grandegger -M: Marc Kleine-Budde L: linux-can@vger.kernel.org -W: http://gitorious.org/linux-can -T: git git://gitorious.org/linux-can/linux-can-next.git +L: netdev@vger.kernel.org +W: http://developer.berlios.de/projects/socketcan/ S: Maintained F: drivers/net/can/ F: include/linux/can/dev.h @@ -2699,7 +2700,7 @@ FIREWIRE SUBSYSTEM M: Stefan Richter L: linux1394-devel@lists.sourceforge.net W: http://ieee1394.wiki.kernel.org/ -T: git git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6.git S: Maintained F: drivers/firewire/ F: include/linux/firewire*.h @@ -4011,7 +4012,7 @@ M: Josh Boyer M: Matt Porter W: http://www.penguinppc.org/ L: linuxppc-dev@lists.ozlabs.org -T: git git://git.infradead.org/users/jwboyer/powerpc-4xx.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/jwboyer/powerpc-4xx.git S: Maintained F: arch/powerpc/platforms/40x/ F: arch/powerpc/platforms/44x/ @@ -4854,14 +4855,6 @@ S: Maintained T: git git://openrisc.net/~jonas/linux F: arch/openrisc -OPENVSWITCH -M: Jesse Gross -L: dev@openvswitch.org -W: http://openvswitch.org -T: git git://git.kernel.org/pub/scm/linux/kernel/git/jesse/openvswitch.git -S: Maintained -F: net/openvswitch/ - OPL4 DRIVER M: Clemens Ladisch L: alsa-devel@alsa-project.org (moderated for non-subscribers) @@ -5381,7 +5374,6 @@ S: Supported F: drivers/scsi/qla4xxx/ QLOGIC QLA3XXX NETWORK DRIVER -M: Jitendra Kalsaria M: Ron Mercer M: linux-driver@qlogic.com L: netdev@vger.kernel.org @@ -5901,6 +5893,7 @@ F: drivers/net/ethernet/emulex/benet/ SFC NETWORK DRIVER M: Solarflare linux maintainers +M: Steve Hodgson M: Ben Hutchings L: netdev@vger.kernel.org S: Supported @@ -6508,13 +6501,6 @@ W: http://tcp-lp-mod.sourceforge.net/ S: Maintained F: net/ipv4/tcp_lp.c -TEAM DRIVER -M: Jiri Pirko -L: netdev@vger.kernel.org -S: Supported -F: drivers/net/team/ -F: include/linux/if_team.h - TEGRA SUPPORT M: Colin Cross M: Olof Johansson diff --git a/trunk/Makefile b/trunk/Makefile index adddd11c3b3b..ea51081812f3 100644 --- a/trunk/Makefile +++ b/trunk/Makefile @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 2 SUBLEVEL = 0 -EXTRAVERSION = +EXTRAVERSION = -rc7 NAME = Saber-toothed Squirrel # *DOCUMENTATION* diff --git a/trunk/arch/Kconfig b/trunk/arch/Kconfig index 2505740b81d2..4b0669cbb3b0 100644 --- a/trunk/arch/Kconfig +++ b/trunk/arch/Kconfig @@ -30,10 +30,6 @@ config OPROFILE_EVENT_MULTIPLEX config HAVE_OPROFILE bool -config OPROFILE_NMI_TIMER - def_bool y - depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI - config KPROBES bool "Kprobes" depends on MODULES diff --git a/trunk/arch/alpha/include/asm/socket.h b/trunk/arch/alpha/include/asm/socket.h index 082355f159e6..06edfefc3373 100644 --- a/trunk/arch/alpha/include/asm/socket.h +++ b/trunk/arch/alpha/include/asm/socket.h @@ -69,9 +69,6 @@ #define SO_RXQ_OVFL 40 -#define SO_WIFI_STATUS 41 -#define SCM_WIFI_STATUS SO_WIFI_STATUS - /* O_NONBLOCK clashes with the bits used for socket types. Therefore we * have to define SOCK_NONBLOCK to a different value here. */ diff --git a/trunk/arch/arm/Kconfig b/trunk/arch/arm/Kconfig index b259c7c644e3..776d76b8cb69 100644 --- a/trunk/arch/arm/Kconfig +++ b/trunk/arch/arm/Kconfig @@ -1246,7 +1246,7 @@ config PL310_ERRATA_588369 config ARM_ERRATA_720789 bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID" - depends on CPU_V7 + depends on CPU_V7 && SMP help This option enables the workaround for the 720789 Cortex-A9 (prior to r2p0) erratum. A faulty ASID can be sent to the other CPUs for the @@ -1282,7 +1282,7 @@ config ARM_ERRATA_743622 config ARM_ERRATA_751472 bool "ARM errata: Interrupted ICIALLUIS may prevent completion of broadcasted operation" - depends on CPU_V7 + depends on CPU_V7 && SMP help This option enables the workaround for the 751472 Cortex-A9 (prior to r3p0) erratum. An interrupted ICIALLUIS operation may prevent the diff --git a/trunk/arch/arm/common/pl330.c b/trunk/arch/arm/common/pl330.c index 8d8df744f7a5..f407a6b35d3d 100644 --- a/trunk/arch/arm/common/pl330.c +++ b/trunk/arch/arm/common/pl330.c @@ -221,6 +221,17 @@ */ #define MCODE_BUFF_PER_REQ 256 +/* + * Mark a _pl330_req as free. + * We do it by writing DMAEND as the first instruction + * because no valid request is going to have DMAEND as + * its first instruction to execute. + */ +#define MARK_FREE(req) do { \ + _emit_END(0, (req)->mc_cpu); \ + (req)->mc_len = 0; \ + } while (0) + /* If the _pl330_req is available to the client */ #define IS_FREE(req) (*((u8 *)((req)->mc_cpu)) == CMD_DMAEND) @@ -290,10 +301,8 @@ struct pl330_thread { struct pl330_dmac *dmac; /* Only two at a time */ struct _pl330_req req[2]; - /* Index of the last enqueued request */ + /* Index of the last submitted request */ unsigned lstenq; - /* Index of the last submitted request or -1 if the DMA is stopped */ - int req_running; }; enum pl330_dmac_state { @@ -769,22 +778,6 @@ static inline void _execute_DBGINSN(struct pl330_thread *thrd, writel(0, regs + DBGCMD); } -/* - * Mark a _pl330_req as free. - * We do it by writing DMAEND as the first instruction - * because no valid request is going to have DMAEND as - * its first instruction to execute. - */ -static void mark_free(struct pl330_thread *thrd, int idx) -{ - struct _pl330_req *req = &thrd->req[idx]; - - _emit_END(0, req->mc_cpu); - req->mc_len = 0; - - thrd->req_running = -1; -} - static inline u32 _state(struct pl330_thread *thrd) { void __iomem *regs = thrd->dmac->pinfo->base; @@ -843,6 +836,31 @@ static inline u32 _state(struct pl330_thread *thrd) } } +/* If the request 'req' of thread 'thrd' is currently active */ +static inline bool _req_active(struct pl330_thread *thrd, + struct _pl330_req *req) +{ + void __iomem *regs = thrd->dmac->pinfo->base; + u32 buf = req->mc_bus, pc = readl(regs + CPC(thrd->id)); + + if (IS_FREE(req)) + return false; + + return (pc >= buf && pc <= buf + req->mc_len) ? true : false; +} + +/* Returns 0 if the thread is inactive, ID of active req + 1 otherwise */ +static inline unsigned _thrd_active(struct pl330_thread *thrd) +{ + if (_req_active(thrd, &thrd->req[0])) + return 1; /* First req active */ + + if (_req_active(thrd, &thrd->req[1])) + return 2; /* Second req active */ + + return 0; +} + static void _stop(struct pl330_thread *thrd) { void __iomem *regs = thrd->dmac->pinfo->base; @@ -874,22 +892,17 @@ static bool _trigger(struct pl330_thread *thrd) struct _arg_GO go; unsigned ns; u8 insn[6] = {0, 0, 0, 0, 0, 0}; - int idx; /* Return if already ACTIVE */ if (_state(thrd) != PL330_STATE_STOPPED) return true; - idx = 1 - thrd->lstenq; - if (!IS_FREE(&thrd->req[idx])) - req = &thrd->req[idx]; - else { - idx = thrd->lstenq; - if (!IS_FREE(&thrd->req[idx])) - req = &thrd->req[idx]; - else - req = NULL; - } + if (!IS_FREE(&thrd->req[1 - thrd->lstenq])) + req = &thrd->req[1 - thrd->lstenq]; + else if (!IS_FREE(&thrd->req[thrd->lstenq])) + req = &thrd->req[thrd->lstenq]; + else + req = NULL; /* Return if no request */ if (!req || !req->r) @@ -920,8 +933,6 @@ static bool _trigger(struct pl330_thread *thrd) /* Only manager can execute GO */ _execute_DBGINSN(thrd, insn, true); - thrd->req_running = idx; - return true; } @@ -1371,8 +1382,8 @@ static void pl330_dotask(unsigned long data) thrd->req[0].r = NULL; thrd->req[1].r = NULL; - mark_free(thrd, 0); - mark_free(thrd, 1); + MARK_FREE(&thrd->req[0]); + MARK_FREE(&thrd->req[1]); /* Clear the reset flag */ pl330->dmac_tbd.reset_chan &= ~(1 << i); @@ -1450,12 +1461,14 @@ int pl330_update(const struct pl330_info *pi) thrd = &pl330->channels[id]; - active = thrd->req_running; - if (active == -1) /* Aborted */ + active = _thrd_active(thrd); + if (!active) /* Aborted */ continue; + active -= 1; + rqdone = &thrd->req[active]; - mark_free(thrd, active); + MARK_FREE(rqdone); /* Get going again ASAP */ _start(thrd); @@ -1496,7 +1509,7 @@ int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op) struct pl330_thread *thrd = ch_id; struct pl330_dmac *pl330; unsigned long flags; - int ret = 0, active = thrd->req_running; + int ret = 0, active; if (!thrd || thrd->free || thrd->dmac->state == DYING) return -EINVAL; @@ -1512,24 +1525,28 @@ int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op) thrd->req[0].r = NULL; thrd->req[1].r = NULL; - mark_free(thrd, 0); - mark_free(thrd, 1); + MARK_FREE(&thrd->req[0]); + MARK_FREE(&thrd->req[1]); break; case PL330_OP_ABORT: + active = _thrd_active(thrd); + /* Make sure the channel is stopped */ _stop(thrd); /* ABORT is only for the active req */ - if (active == -1) + if (!active) break; + active--; + thrd->req[active].r = NULL; - mark_free(thrd, active); + MARK_FREE(&thrd->req[active]); /* Start the next */ case PL330_OP_START: - if ((active == -1) && !_start(thrd)) + if (!_thrd_active(thrd) && !_start(thrd)) ret = -EIO; break; @@ -1570,13 +1587,14 @@ int pl330_chan_status(void *ch_id, struct pl330_chanstatus *pstatus) else pstatus->faulting = false; - active = thrd->req_running; + active = _thrd_active(thrd); - if (active == -1) { + if (!active) { /* Indicate that the thread is not running */ pstatus->top_req = NULL; pstatus->wait_req = NULL; } else { + active--; pstatus->top_req = thrd->req[active].r; pstatus->wait_req = !IS_FREE(&thrd->req[1 - active]) ? thrd->req[1 - active].r : NULL; @@ -1641,9 +1659,9 @@ void *pl330_request_channel(const struct pl330_info *pi) thrd->free = false; thrd->lstenq = 1; thrd->req[0].r = NULL; - mark_free(thrd, 0); + MARK_FREE(&thrd->req[0]); thrd->req[1].r = NULL; - mark_free(thrd, 1); + MARK_FREE(&thrd->req[1]); break; } } @@ -1749,14 +1767,14 @@ static inline void _reset_thread(struct pl330_thread *thrd) thrd->req[0].mc_bus = pl330->mcode_bus + (thrd->id * pi->mcbufsz); thrd->req[0].r = NULL; - mark_free(thrd, 0); + MARK_FREE(&thrd->req[0]); thrd->req[1].mc_cpu = thrd->req[0].mc_cpu + pi->mcbufsz / 2; thrd->req[1].mc_bus = thrd->req[0].mc_bus + pi->mcbufsz / 2; thrd->req[1].r = NULL; - mark_free(thrd, 1); + MARK_FREE(&thrd->req[1]); } static int dmac_alloc_threads(struct pl330_dmac *pl330) diff --git a/trunk/arch/arm/configs/imx_v4_v5_defconfig b/trunk/arch/arm/configs/imx_v4_v5_defconfig index cf497ce41dfe..11a4192197c8 100644 --- a/trunk/arch/arm/configs/imx_v4_v5_defconfig +++ b/trunk/arch/arm/configs/imx_v4_v5_defconfig @@ -18,10 +18,9 @@ CONFIG_ARCH_MXC=y CONFIG_ARCH_IMX_V4_V5=y CONFIG_ARCH_MX1ADS=y CONFIG_MACH_SCB9328=y -CONFIG_MACH_APF9328=y CONFIG_MACH_MX21ADS=y CONFIG_MACH_MX25_3DS=y -CONFIG_MACH_EUKREA_CPUIMX25SD=y +CONFIG_MACH_EUKREA_CPUIMX25=y CONFIG_MACH_MX27ADS=y CONFIG_MACH_PCM038=y CONFIG_MACH_CPUIMX27=y @@ -73,16 +72,17 @@ CONFIG_MTD_CFI_GEOMETRY=y CONFIG_MTD_CFI_INTELEXT=y CONFIG_MTD_PHYSMAP=y CONFIG_MTD_NAND=y -CONFIG_MTD_NAND_MXC=y CONFIG_MTD_UBI=y CONFIG_MISC_DEVICES=y CONFIG_EEPROM_AT24=y CONFIG_EEPROM_AT25=y CONFIG_NETDEVICES=y -CONFIG_DM9000=y +CONFIG_NET_ETHERNET=y CONFIG_SMC91X=y +CONFIG_DM9000=y CONFIG_SMC911X=y -CONFIG_SMSC_PHY=y +# CONFIG_NETDEV_1000 is not set +# CONFIG_NETDEV_10000 is not set # CONFIG_INPUT_MOUSEDEV is not set CONFIG_INPUT_EVDEV=y # CONFIG_INPUT_KEYBOARD is not set @@ -100,7 +100,6 @@ CONFIG_I2C_CHARDEV=y CONFIG_I2C_IMX=y CONFIG_SPI=y CONFIG_SPI_IMX=y -CONFIG_SPI_SPIDEV=y CONFIG_W1=y CONFIG_W1_MASTER_MXC=y CONFIG_W1_SLAVE_THERM=y @@ -140,7 +139,6 @@ CONFIG_MMC=y CONFIG_MMC_MXC=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y -CONFIG_LEDS_GPIO=y CONFIG_LEDS_MC13783=y CONFIG_LEDS_TRIGGERS=y CONFIG_LEDS_TRIGGER_TIMER=y diff --git a/trunk/arch/arm/include/asm/socket.h b/trunk/arch/arm/include/asm/socket.h index dec6f9afb3cf..90ffd04b8e74 100644 --- a/trunk/arch/arm/include/asm/socket.h +++ b/trunk/arch/arm/include/asm/socket.h @@ -62,7 +62,4 @@ #define SO_RXQ_OVFL 40 -#define SO_WIFI_STATUS 41 -#define SCM_WIFI_STATUS SO_WIFI_STATUS - #endif /* _ASM_SOCKET_H */ diff --git a/trunk/arch/arm/kernel/process.c b/trunk/arch/arm/kernel/process.c index e8e8fe505df1..3d0c6fb74ae4 100644 --- a/trunk/arch/arm/kernel/process.c +++ b/trunk/arch/arm/kernel/process.c @@ -183,8 +183,7 @@ void cpu_idle(void) /* endless idle loop with no priority at all */ while (1) { - tick_nohz_idle_enter(); - rcu_idle_enter(); + tick_nohz_stop_sched_tick(1); leds_event(led_idle_start); while (!need_resched()) { #ifdef CONFIG_HOTPLUG_CPU @@ -214,8 +213,7 @@ void cpu_idle(void) } } leds_event(led_idle_end); - rcu_idle_exit(); - tick_nohz_idle_exit(); + tick_nohz_restart_sched_tick(); preempt_enable_no_resched(); schedule(); preempt_disable(); diff --git a/trunk/arch/arm/kernel/setup.c b/trunk/arch/arm/kernel/setup.c index c0b59bff6be6..8fc2c8fcbdc6 100644 --- a/trunk/arch/arm/kernel/setup.c +++ b/trunk/arch/arm/kernel/setup.c @@ -52,7 +52,6 @@ #include #include #include -#include #if defined(CONFIG_DEPRECATED_PARAM_STRUCT) #include "compat.h" diff --git a/trunk/arch/arm/mach-exynos/cpu.c b/trunk/arch/arm/mach-exynos/cpu.c index cc8d4bd6d0f7..90ec247f3b37 100644 --- a/trunk/arch/arm/mach-exynos/cpu.c +++ b/trunk/arch/arm/mach-exynos/cpu.c @@ -110,6 +110,11 @@ static struct map_desc exynos4_iodesc[] __initdata = { .pfn = __phys_to_pfn(EXYNOS4_PA_DMC0), .length = SZ_4K, .type = MT_DEVICE, + }, { + .virtual = (unsigned long)S5P_VA_SROMC, + .pfn = __phys_to_pfn(EXYNOS4_PA_SROMC), + .length = SZ_4K, + .type = MT_DEVICE, }, { .virtual = (unsigned long)S3C_VA_USB_HSPHY, .pfn = __phys_to_pfn(EXYNOS4_PA_HSPHY), diff --git a/trunk/arch/arm/mach-imx/Kconfig b/trunk/arch/arm/mach-imx/Kconfig index 0e6f1af260b6..c44aa974e79c 100644 --- a/trunk/arch/arm/mach-imx/Kconfig +++ b/trunk/arch/arm/mach-imx/Kconfig @@ -132,7 +132,7 @@ config MACH_MX25_3DS select IMX_HAVE_PLATFORM_MXC_NAND select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX -config MACH_EUKREA_CPUIMX25SD +config MACH_EUKREA_CPUIMX25 bool "Support Eukrea CPUIMX25 Platform" select SOC_IMX25 select IMX_HAVE_PLATFORM_FLEXCAN @@ -148,7 +148,7 @@ config MACH_EUKREA_CPUIMX25SD choice prompt "Baseboard" - depends on MACH_EUKREA_CPUIMX25SD + depends on MACH_EUKREA_CPUIMX25 default MACH_EUKREA_MBIMXSD25_BASEBOARD config MACH_EUKREA_MBIMXSD25_BASEBOARD @@ -542,7 +542,7 @@ config MACH_MX35_3DS Include support for MX35PDK platform. This includes specific configurations for the board and its peripherals. -config MACH_EUKREA_CPUIMX35SD +config MACH_EUKREA_CPUIMX35 bool "Support Eukrea CPUIMX35 Platform" select SOC_IMX35 select IMX_HAVE_PLATFORM_FLEXCAN @@ -560,7 +560,7 @@ config MACH_EUKREA_CPUIMX35SD choice prompt "Baseboard" - depends on MACH_EUKREA_CPUIMX35SD + depends on MACH_EUKREA_CPUIMX35 default MACH_EUKREA_MBIMXSD35_BASEBOARD config MACH_EUKREA_MBIMXSD35_BASEBOARD diff --git a/trunk/arch/arm/mach-imx/Makefile b/trunk/arch/arm/mach-imx/Makefile index d97f409ce98b..aba73214c2a8 100644 --- a/trunk/arch/arm/mach-imx/Makefile +++ b/trunk/arch/arm/mach-imx/Makefile @@ -24,7 +24,7 @@ obj-$(CONFIG_MACH_MX21ADS) += mach-mx21ads.o # i.MX25 based machines obj-$(CONFIG_MACH_MX25_3DS) += mach-mx25_3ds.o -obj-$(CONFIG_MACH_EUKREA_CPUIMX25SD) += mach-eukrea_cpuimx25.o +obj-$(CONFIG_MACH_EUKREA_CPUIMX25) += mach-eukrea_cpuimx25.o obj-$(CONFIG_MACH_EUKREA_MBIMXSD25_BASEBOARD) += eukrea_mbimxsd25-baseboard.o # i.MX27 based machines @@ -57,7 +57,7 @@ obj-$(CONFIG_MACH_BUG) += mach-bug.o # i.MX35 based machines obj-$(CONFIG_MACH_PCM043) += mach-pcm043.o obj-$(CONFIG_MACH_MX35_3DS) += mach-mx35_3ds.o -obj-$(CONFIG_MACH_EUKREA_CPUIMX35SD) += mach-cpuimx35.o +obj-$(CONFIG_MACH_EUKREA_CPUIMX35) += mach-cpuimx35.o obj-$(CONFIG_MACH_EUKREA_MBIMXSD35_BASEBOARD) += eukrea_mbimxsd35-baseboard.o obj-$(CONFIG_MACH_VPR200) += mach-vpr200.o diff --git a/trunk/arch/arm/mach-imx/clock-imx35.c b/trunk/arch/arm/mach-imx/clock-imx35.c index ac8238caecb9..8116f119517d 100644 --- a/trunk/arch/arm/mach-imx/clock-imx35.c +++ b/trunk/arch/arm/mach-imx/clock-imx35.c @@ -507,7 +507,7 @@ static struct clk_lookup lookups[] = { int __init mx35_clocks_init() { - unsigned int cgr2 = 3 << 26; + unsigned int cgr2 = 3 << 26, cgr3 = 0; #if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC) cgr2 |= 3 << 16; @@ -521,12 +521,6 @@ int __init mx35_clocks_init() __raw_writel((3 << 18), CCM_BASE + CCM_CGR0); __raw_writel((3 << 2) | (3 << 4) | (3 << 6) | (3 << 8) | (3 << 16), CCM_BASE + CCM_CGR1); - __raw_writel(cgr2, CCM_BASE + CCM_CGR2); - __raw_writel(0, CCM_BASE + CCM_CGR3); - - clk_enable(&iim_clk); - imx_print_silicon_rev("i.MX35", mx35_revision()); - clk_disable(&iim_clk); /* * Check if we came up in internal boot mode. If yes, we need some @@ -535,11 +529,17 @@ int __init mx35_clocks_init() */ if (!(__raw_readl(CCM_BASE + CCM_RCSR) & (3 << 10))) { /* Additionally turn on UART1, SCC, and IIM clocks */ - clk_enable(&iim_clk); - clk_enable(&uart1_clk); - clk_enable(&scc_clk); + cgr2 |= 3 << 16 | 3 << 4; + cgr3 |= 3 << 2; } + __raw_writel(cgr2, CCM_BASE + CCM_CGR2); + __raw_writel(cgr3, CCM_BASE + CCM_CGR3); + + clk_enable(&iim_clk); + imx_print_silicon_rev("i.MX35", mx35_revision()); + clk_disable(&iim_clk); + #ifdef CONFIG_MXC_USE_EPIT epit_timer_init(&epit1_clk, MX35_IO_ADDRESS(MX35_EPIT1_BASE_ADDR), MX35_INT_EPIT1); diff --git a/trunk/arch/arm/mach-imx/mach-cpuimx35.c b/trunk/arch/arm/mach-imx/mach-cpuimx35.c index 362aae780601..66af2e8f7e57 100644 --- a/trunk/arch/arm/mach-imx/mach-cpuimx35.c +++ b/trunk/arch/arm/mach-imx/mach-cpuimx35.c @@ -53,18 +53,12 @@ static const struct imxi2c_platform_data .bitrate = 100000, }; -#define TSC2007_IRQGPIO IMX_GPIO_NR(3, 2) -static int tsc2007_get_pendown_state(void) -{ - return !gpio_get_value(TSC2007_IRQGPIO); -} - static struct tsc2007_platform_data tsc2007_info = { .model = 2007, .x_plate_ohms = 180, - .get_pendown_state = tsc2007_get_pendown_state, }; +#define TSC2007_IRQGPIO IMX_GPIO_NR(3, 2) static struct i2c_board_info eukrea_cpuimx35_i2c_devices[] = { { I2C_BOARD_INFO("pcf8563", 0x51), diff --git a/trunk/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/trunk/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index eef43e2e163e..7f8915ad5099 100644 --- a/trunk/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/trunk/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c @@ -3247,14 +3247,18 @@ static __initdata struct omap_hwmod *omap3xxx_hwmods[] = { /* 3430ES1-only hwmods */ static __initdata struct omap_hwmod *omap3430es1_hwmods[] = { + &omap3xxx_iva_hwmod, &omap3430es1_dss_core_hwmod, + &omap3xxx_mailbox_hwmod, NULL }; /* 3430ES2+-only hwmods */ static __initdata struct omap_hwmod *omap3430es2plus_hwmods[] = { + &omap3xxx_iva_hwmod, &omap3xxx_dss_core_hwmod, &omap3xxx_usbhsotg_hwmod, + &omap3xxx_mailbox_hwmod, NULL }; diff --git a/trunk/arch/arm/mm/init.c b/trunk/arch/arm/mm/init.c index 7c38474e533a..fbdd12ea3a58 100644 --- a/trunk/arch/arm/mm/init.c +++ b/trunk/arch/arm/mm/init.c @@ -32,7 +32,6 @@ #include #include -#include #include "mm.h" @@ -333,6 +332,7 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); + memblock_init(); for (i = 0; i < mi->nr_banks; i++) memblock_add(mi->bank[i].start, mi->bank[i].size); @@ -371,7 +371,7 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) if (mdesc->reserve) mdesc->reserve(); - memblock_allow_resize(); + memblock_analyze(); memblock_dump_all(); } diff --git a/trunk/arch/arm/mm/proc-v7.S b/trunk/arch/arm/mm/proc-v7.S index e70a73731eaa..2c559ac38142 100644 --- a/trunk/arch/arm/mm/proc-v7.S +++ b/trunk/arch/arm/mm/proc-v7.S @@ -363,13 +363,11 @@ __v7_setup: orreq r10, r10, #1 << 6 @ set bit #6 mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register #endif -#if defined(CONFIG_ARM_ERRATA_751472) && defined(CONFIG_SMP) - ALT_SMP(cmp r6, #0x30) @ present prior to r3p0 - ALT_UP_B(1f) +#ifdef CONFIG_ARM_ERRATA_751472 + cmp r6, #0x30 @ present prior to r3p0 mrclt p15, 0, r10, c15, c0, 1 @ read diagnostic register orrlt r10, r10, #1 << 11 @ set bit #11 mcrlt p15, 0, r10, c15, c0, 1 @ write diagnostic register -1: #endif 3: mov r10, #0 diff --git a/trunk/arch/arm/oprofile/common.c b/trunk/arch/arm/oprofile/common.c index 4e0a371630b3..c074e66ad224 100644 --- a/trunk/arch/arm/oprofile/common.c +++ b/trunk/arch/arm/oprofile/common.c @@ -116,7 +116,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) return oprofile_perf_init(ops); } -void oprofile_arch_exit(void) +void __exit oprofile_arch_exit(void) { oprofile_perf_exit(); } diff --git a/trunk/arch/arm/plat-mxc/cpufreq.c b/trunk/arch/arm/plat-mxc/cpufreq.c index 73db34bf588a..adbff706ef6f 100644 --- a/trunk/arch/arm/plat-mxc/cpufreq.c +++ b/trunk/arch/arm/plat-mxc/cpufreq.c @@ -98,7 +98,7 @@ static int mxc_set_target(struct cpufreq_policy *policy, return ret; } -static int mxc_cpufreq_init(struct cpufreq_policy *policy) +static int __init mxc_cpufreq_init(struct cpufreq_policy *policy) { int ret; int i; diff --git a/trunk/arch/arm/plat-mxc/include/mach/uncompress.h b/trunk/arch/arm/plat-mxc/include/mach/uncompress.h index 477971b00930..88fd40452567 100644 --- a/trunk/arch/arm/plat-mxc/include/mach/uncompress.h +++ b/trunk/arch/arm/plat-mxc/include/mach/uncompress.h @@ -98,7 +98,6 @@ static __inline__ void __arch_decomp_setup(unsigned long arch_id) case MACH_TYPE_PCM043: case MACH_TYPE_LILLY1131: case MACH_TYPE_VPR200: - case MACH_TYPE_EUKREA_CPUIMX35SD: uart_base = MX3X_UART1_BASE_ADDR; break; case MACH_TYPE_MAGX_ZN5: diff --git a/trunk/arch/arm/plat-mxc/pwm.c b/trunk/arch/arm/plat-mxc/pwm.c index e032717f7d02..845de59f07ed 100644 --- a/trunk/arch/arm/plat-mxc/pwm.c +++ b/trunk/arch/arm/plat-mxc/pwm.c @@ -77,15 +77,6 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns) do_div(c, period_ns); duty_cycles = c; - /* - * according to imx pwm RM, the real period value should be - * PERIOD value in PWMPR plus 2. - */ - if (period_cycles > 2) - period_cycles -= 2; - else - period_cycles = 0; - writel(duty_cycles, pwm->mmio_base + MX3_PWMSAR); writel(period_cycles, pwm->mmio_base + MX3_PWMPR); diff --git a/trunk/arch/arm/plat-orion/gpio.c b/trunk/arch/arm/plat-orion/gpio.c index 10d160888133..41ab97ebe4cf 100644 --- a/trunk/arch/arm/plat-orion/gpio.c +++ b/trunk/arch/arm/plat-orion/gpio.c @@ -384,16 +384,12 @@ void __init orion_gpio_init(int gpio_base, int ngpio, struct orion_gpio_chip *ochip; struct irq_chip_generic *gc; struct irq_chip_type *ct; - char gc_label[16]; if (orion_gpio_chip_count == ARRAY_SIZE(orion_gpio_chips)) return; - snprintf(gc_label, sizeof(gc_label), "orion_gpio%d", - orion_gpio_chip_count); - ochip = orion_gpio_chips + orion_gpio_chip_count; - ochip->chip.label = kstrdup(gc_label, GFP_KERNEL); + ochip->chip.label = "orion_gpio"; ochip->chip.request = orion_gpio_request; ochip->chip.direction_input = orion_gpio_direction_input; ochip->chip.get = orion_gpio_get; diff --git a/trunk/arch/arm/plat-samsung/include/plat/cpu-freq-core.h b/trunk/arch/arm/plat-samsung/include/plat/cpu-freq-core.h index 95509d8eb140..dac4760c0f0a 100644 --- a/trunk/arch/arm/plat-samsung/include/plat/cpu-freq-core.h +++ b/trunk/arch/arm/plat-samsung/include/plat/cpu-freq-core.h @@ -202,6 +202,14 @@ extern int s3c_plltab_register(struct cpufreq_frequency_table *plls, extern struct s3c_cpufreq_config *s3c_cpufreq_getconfig(void); extern struct s3c_iotimings *s3c_cpufreq_getiotimings(void); +extern void s3c2410_iotiming_debugfs(struct seq_file *seq, + struct s3c_cpufreq_config *cfg, + union s3c_iobank *iob); + +extern void s3c2412_iotiming_debugfs(struct seq_file *seq, + struct s3c_cpufreq_config *cfg, + union s3c_iobank *iob); + #ifdef CONFIG_CPU_FREQ_S3C24XX_DEBUGFS #define s3c_cpufreq_debugfs_call(x) x #else @@ -218,10 +226,6 @@ extern void s3c2410_cpufreq_setrefresh(struct s3c_cpufreq_config *cfg); extern void s3c2410_set_fvco(struct s3c_cpufreq_config *cfg); #ifdef CONFIG_S3C2410_IOTIMING -extern void s3c2410_iotiming_debugfs(struct seq_file *seq, - struct s3c_cpufreq_config *cfg, - union s3c_iobank *iob); - extern int s3c2410_iotiming_calc(struct s3c_cpufreq_config *cfg, struct s3c_iotimings *iot); @@ -231,7 +235,6 @@ extern int s3c2410_iotiming_get(struct s3c_cpufreq_config *cfg, extern void s3c2410_iotiming_set(struct s3c_cpufreq_config *cfg, struct s3c_iotimings *iot); #else -#define s3c2410_iotiming_debugfs NULL #define s3c2410_iotiming_calc NULL #define s3c2410_iotiming_get NULL #define s3c2410_iotiming_set NULL @@ -239,10 +242,8 @@ extern void s3c2410_iotiming_set(struct s3c_cpufreq_config *cfg, /* S3C2412 compatible routines */ -#ifdef CONFIG_S3C2412_IOTIMING -extern void s3c2412_iotiming_debugfs(struct seq_file *seq, - struct s3c_cpufreq_config *cfg, - union s3c_iobank *iob); +extern int s3c2412_iotiming_get(struct s3c_cpufreq_config *cfg, + struct s3c_iotimings *timings); extern int s3c2412_iotiming_get(struct s3c_cpufreq_config *cfg, struct s3c_iotimings *timings); @@ -252,12 +253,6 @@ extern int s3c2412_iotiming_calc(struct s3c_cpufreq_config *cfg, extern void s3c2412_iotiming_set(struct s3c_cpufreq_config *cfg, struct s3c_iotimings *iot); -#else -#define s3c2412_iotiming_debugfs NULL -#define s3c2412_iotiming_calc NULL -#define s3c2412_iotiming_get NULL -#define s3c2412_iotiming_set NULL -#endif /* CONFIG_S3C2412_IOTIMING */ #ifdef CONFIG_CPU_FREQ_S3C24XX_DEBUG #define s3c_freq_dbg(x...) printk(KERN_INFO x) diff --git a/trunk/arch/avr32/include/asm/socket.h b/trunk/arch/avr32/include/asm/socket.h index 247b88c760be..c8d1fae49476 100644 --- a/trunk/arch/avr32/include/asm/socket.h +++ b/trunk/arch/avr32/include/asm/socket.h @@ -62,7 +62,4 @@ #define SO_RXQ_OVFL 40 -#define SO_WIFI_STATUS 41 -#define SCM_WIFI_STATUS SO_WIFI_STATUS - #endif /* __ASM_AVR32_SOCKET_H */ diff --git a/trunk/arch/avr32/kernel/process.c b/trunk/arch/avr32/kernel/process.c index ea3395750324..ef5a2a08fcca 100644 --- a/trunk/arch/avr32/kernel/process.c +++ b/trunk/arch/avr32/kernel/process.c @@ -34,12 +34,10 @@ void cpu_idle(void) { /* endless idle loop with no priority at all */ while (1) { - tick_nohz_idle_enter(); - rcu_idle_enter(); + tick_nohz_stop_sched_tick(1); while (!need_resched()) cpu_idle_sleep(); - rcu_idle_exit(); - tick_nohz_idle_exit(); + tick_nohz_restart_sched_tick(); preempt_enable_no_resched(); schedule(); preempt_disable(); diff --git a/trunk/arch/blackfin/kernel/process.c b/trunk/arch/blackfin/kernel/process.c index 8dd0416673cb..6a80a9e9fc4a 100644 --- a/trunk/arch/blackfin/kernel/process.c +++ b/trunk/arch/blackfin/kernel/process.c @@ -88,12 +88,10 @@ void cpu_idle(void) #endif if (!idle) idle = default_idle; - tick_nohz_idle_enter(); - rcu_idle_enter(); + tick_nohz_stop_sched_tick(1); while (!need_resched()) idle(); - rcu_idle_exit(); - tick_nohz_idle_exit(); + tick_nohz_restart_sched_tick(); preempt_enable_no_resched(); schedule(); preempt_disable(); diff --git a/trunk/arch/cris/arch-v32/kernel/time.c b/trunk/arch/cris/arch-v32/kernel/time.c index 6773fc83a670..bb978ede8985 100644 --- a/trunk/arch/cris/arch-v32/kernel/time.c +++ b/trunk/arch/cris/arch-v32/kernel/time.c @@ -47,12 +47,14 @@ static struct clocksource cont_rotime = { .rating = 300, .read = read_cont_rotime, .mask = CLOCKSOURCE_MASK(32), + .shift = 10, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; static int __init etrax_init_cont_rotime(void) { - clocksource_register_khz(&cont_rotime, 100000); + cont_rotime.mult = clocksource_khz2mult(100000, cont_rotime.shift); + clocksource_register(&cont_rotime); return 0; } arch_initcall(etrax_init_cont_rotime); diff --git a/trunk/arch/cris/include/asm/socket.h b/trunk/arch/cris/include/asm/socket.h index e269264df7c4..1a4a61909ca8 100644 --- a/trunk/arch/cris/include/asm/socket.h +++ b/trunk/arch/cris/include/asm/socket.h @@ -64,9 +64,6 @@ #define SO_RXQ_OVFL 40 -#define SO_WIFI_STATUS 41 -#define SCM_WIFI_STATUS SO_WIFI_STATUS - #endif /* _ASM_SOCKET_H */ diff --git a/trunk/arch/frv/include/asm/socket.h b/trunk/arch/frv/include/asm/socket.h index ce80fdadcce5..a6b26880c1ec 100644 --- a/trunk/arch/frv/include/asm/socket.h +++ b/trunk/arch/frv/include/asm/socket.h @@ -62,8 +62,5 @@ #define SO_RXQ_OVFL 40 -#define SO_WIFI_STATUS 41 -#define SCM_WIFI_STATUS SO_WIFI_STATUS - #endif /* _ASM_SOCKET_H */ diff --git a/trunk/arch/h8300/include/asm/socket.h b/trunk/arch/h8300/include/asm/socket.h index cf1daab6f27e..04c0f4596eb5 100644 --- a/trunk/arch/h8300/include/asm/socket.h +++ b/trunk/arch/h8300/include/asm/socket.h @@ -62,7 +62,4 @@ #define SO_RXQ_OVFL 40 -#define SO_WIFI_STATUS 41 -#define SCM_WIFI_STATUS SO_WIFI_STATUS - #endif /* _ASM_SOCKET_H */ diff --git a/trunk/arch/ia64/Kconfig b/trunk/arch/ia64/Kconfig index 3b7a7c483785..27489b6dd533 100644 --- a/trunk/arch/ia64/Kconfig +++ b/trunk/arch/ia64/Kconfig @@ -23,9 +23,6 @@ config IA64 select HAVE_ARCH_TRACEHOOK select HAVE_DMA_API_DEBUG select HAVE_GENERIC_HARDIRQS - select HAVE_MEMBLOCK - select HAVE_MEMBLOCK_NODE_MAP - select ARCH_DISCARD_MEMBLOCK select GENERIC_IRQ_PROBE select GENERIC_PENDING_IRQ if SMP select IRQ_PER_CPU @@ -477,6 +474,9 @@ config NODES_SHIFT MAX_NUMNODES will be 2^(This value). If in doubt, use the default. +config ARCH_POPULATES_NODE_MAP + def_bool y + # VIRTUAL_MEM_MAP and FLAT_NODE_MEM_MAP are functionally equivalent. # VIRTUAL_MEM_MAP has been retained for historical reasons. config VIRTUAL_MEM_MAP diff --git a/trunk/arch/ia64/include/asm/cputime.h b/trunk/arch/ia64/include/asm/cputime.h index 3deac956d325..6073b187528a 100644 --- a/trunk/arch/ia64/include/asm/cputime.h +++ b/trunk/arch/ia64/include/asm/cputime.h @@ -26,53 +26,59 @@ #include #include -typedef u64 __nocast cputime_t; -typedef u64 __nocast cputime64_t; +typedef u64 cputime_t; +typedef u64 cputime64_t; +#define cputime_zero ((cputime_t)0) #define cputime_one_jiffy jiffies_to_cputime(1) +#define cputime_max ((~((cputime_t)0) >> 1) - 1) +#define cputime_add(__a, __b) ((__a) + (__b)) +#define cputime_sub(__a, __b) ((__a) - (__b)) +#define cputime_div(__a, __n) ((__a) / (__n)) +#define cputime_halve(__a) ((__a) >> 1) +#define cputime_eq(__a, __b) ((__a) == (__b)) +#define cputime_gt(__a, __b) ((__a) > (__b)) +#define cputime_ge(__a, __b) ((__a) >= (__b)) +#define cputime_lt(__a, __b) ((__a) < (__b)) +#define cputime_le(__a, __b) ((__a) <= (__b)) + +#define cputime64_zero ((cputime64_t)0) +#define cputime64_add(__a, __b) ((__a) + (__b)) +#define cputime64_sub(__a, __b) ((__a) - (__b)) +#define cputime_to_cputime64(__ct) (__ct) /* * Convert cputime <-> jiffies (HZ) */ -#define cputime_to_jiffies(__ct) \ - ((__force u64)(__ct) / (NSEC_PER_SEC / HZ)) -#define jiffies_to_cputime(__jif) \ - (__force cputime_t)((__jif) * (NSEC_PER_SEC / HZ)) -#define cputime64_to_jiffies64(__ct) \ - ((__force u64)(__ct) / (NSEC_PER_SEC / HZ)) -#define jiffies64_to_cputime64(__jif) \ - (__force cputime64_t)((__jif) * (NSEC_PER_SEC / HZ)) +#define cputime_to_jiffies(__ct) ((__ct) / (NSEC_PER_SEC / HZ)) +#define jiffies_to_cputime(__jif) ((__jif) * (NSEC_PER_SEC / HZ)) +#define cputime64_to_jiffies64(__ct) ((__ct) / (NSEC_PER_SEC / HZ)) +#define jiffies64_to_cputime64(__jif) ((__jif) * (NSEC_PER_SEC / HZ)) /* * Convert cputime <-> microseconds */ -#define cputime_to_usecs(__ct) \ - ((__force u64)(__ct) / NSEC_PER_USEC) -#define usecs_to_cputime(__usecs) \ - (__force cputime_t)((__usecs) * NSEC_PER_USEC) -#define usecs_to_cputime64(__usecs) \ - (__force cputime64_t)((__usecs) * NSEC_PER_USEC) +#define cputime_to_usecs(__ct) ((__ct) / NSEC_PER_USEC) +#define usecs_to_cputime(__usecs) ((__usecs) * NSEC_PER_USEC) /* * Convert cputime <-> seconds */ -#define cputime_to_secs(__ct) \ - ((__force u64)(__ct) / NSEC_PER_SEC) -#define secs_to_cputime(__secs) \ - (__force cputime_t)((__secs) * NSEC_PER_SEC) +#define cputime_to_secs(__ct) ((__ct) / NSEC_PER_SEC) +#define secs_to_cputime(__secs) ((__secs) * NSEC_PER_SEC) /* * Convert cputime <-> timespec (nsec) */ static inline cputime_t timespec_to_cputime(const struct timespec *val) { - u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec; - return (__force cputime_t) ret; + cputime_t ret = val->tv_sec * NSEC_PER_SEC; + return (ret + val->tv_nsec); } static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val) { - val->tv_sec = (__force u64) ct / NSEC_PER_SEC; - val->tv_nsec = (__force u64) ct % NSEC_PER_SEC; + val->tv_sec = ct / NSEC_PER_SEC; + val->tv_nsec = ct % NSEC_PER_SEC; } /* @@ -80,28 +86,25 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val) */ static inline cputime_t timeval_to_cputime(struct timeval *val) { - u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC; - return (__force cputime_t) ret; + cputime_t ret = val->tv_sec * NSEC_PER_SEC; + return (ret + val->tv_usec * NSEC_PER_USEC); } static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val) { - val->tv_sec = (__force u64) ct / NSEC_PER_SEC; - val->tv_usec = ((__force u64) ct % NSEC_PER_SEC) / NSEC_PER_USEC; + val->tv_sec = ct / NSEC_PER_SEC; + val->tv_usec = (ct % NSEC_PER_SEC) / NSEC_PER_USEC; } /* * Convert cputime <-> clock (USER_HZ) */ -#define cputime_to_clock_t(__ct) \ - ((__force u64)(__ct) / (NSEC_PER_SEC / USER_HZ)) -#define clock_t_to_cputime(__x) \ - (__force cputime_t)((__x) * (NSEC_PER_SEC / USER_HZ)) +#define cputime_to_clock_t(__ct) ((__ct) / (NSEC_PER_SEC / USER_HZ)) +#define clock_t_to_cputime(__x) ((__x) * (NSEC_PER_SEC / USER_HZ)) /* * Convert cputime64 to clock. */ -#define cputime64_to_clock_t(__ct) \ - cputime_to_clock_t((__force cputime_t)__ct) +#define cputime64_to_clock_t(__ct) cputime_to_clock_t((cputime_t)__ct) #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ #endif /* __IA64_CPUTIME_H */ diff --git a/trunk/arch/ia64/include/asm/socket.h b/trunk/arch/ia64/include/asm/socket.h index 4b03664e3fb5..51427eaa51ba 100644 --- a/trunk/arch/ia64/include/asm/socket.h +++ b/trunk/arch/ia64/include/asm/socket.h @@ -71,7 +71,4 @@ #define SO_RXQ_OVFL 40 -#define SO_WIFI_STATUS 41 -#define SCM_WIFI_STATUS SO_WIFI_STATUS - #endif /* _ASM_IA64_SOCKET_H */ diff --git a/trunk/arch/ia64/mm/contig.c b/trunk/arch/ia64/mm/contig.c index 1516d1dc11fd..f114a3b14c6a 100644 --- a/trunk/arch/ia64/mm/contig.c +++ b/trunk/arch/ia64/mm/contig.c @@ -16,7 +16,6 @@ */ #include #include -#include #include #include #include @@ -349,7 +348,7 @@ paging_init (void) printk("Virtual mem_map starts at 0x%p\n", mem_map); } #else /* !CONFIG_VIRTUAL_MEM_MAP */ - memblock_add_node(0, PFN_PHYS(max_low_pfn), 0); + add_active_range(0, 0, max_low_pfn); free_area_init_nodes(max_zone_pfns); #endif /* !CONFIG_VIRTUAL_MEM_MAP */ zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); diff --git a/trunk/arch/ia64/mm/init.c b/trunk/arch/ia64/mm/init.c index 13df239dbed1..00cb0e26c64e 100644 --- a/trunk/arch/ia64/mm/init.c +++ b/trunk/arch/ia64/mm/init.c @@ -10,7 +10,6 @@ #include #include #include -#include #include #include #include @@ -558,7 +557,8 @@ int __init register_active_ranges(u64 start, u64 len, int nid) #endif if (start < end) - memblock_add_node(__pa(start), end - start, nid); + add_active_range(nid, __pa(start) >> PAGE_SHIFT, + __pa(end) >> PAGE_SHIFT); return 0; } diff --git a/trunk/arch/m32r/include/asm/socket.h b/trunk/arch/m32r/include/asm/socket.h index e8b8c5bb053c..469787c30098 100644 --- a/trunk/arch/m32r/include/asm/socket.h +++ b/trunk/arch/m32r/include/asm/socket.h @@ -62,7 +62,4 @@ #define SO_RXQ_OVFL 40 -#define SO_WIFI_STATUS 41 -#define SCM_WIFI_STATUS SO_WIFI_STATUS - #endif /* _ASM_M32R_SOCKET_H */ diff --git a/trunk/arch/m68k/include/asm/socket.h b/trunk/arch/m68k/include/asm/socket.h index d4708ce466e0..9bf49c87d954 100644 --- a/trunk/arch/m68k/include/asm/socket.h +++ b/trunk/arch/m68k/include/asm/socket.h @@ -62,7 +62,4 @@ #define SO_RXQ_OVFL 40 -#define SO_WIFI_STATUS 41 -#define SCM_WIFI_STATUS SO_WIFI_STATUS - #endif /* _ASM_SOCKET_H */ diff --git a/trunk/arch/microblaze/include/asm/memblock.h b/trunk/arch/microblaze/include/asm/memblock.h new file mode 100644 index 000000000000..20a8e257c77f --- /dev/null +++ b/trunk/arch/microblaze/include/asm/memblock.h @@ -0,0 +1,14 @@ +/* + * Copyright (C) 2008 Michal Simek + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef _ASM_MICROBLAZE_MEMBLOCK_H +#define _ASM_MICROBLAZE_MEMBLOCK_H + +#endif /* _ASM_MICROBLAZE_MEMBLOCK_H */ + + diff --git a/trunk/arch/microblaze/kernel/process.c b/trunk/arch/microblaze/kernel/process.c index 7dcb5bfffb75..95cc295976a7 100644 --- a/trunk/arch/microblaze/kernel/process.c +++ b/trunk/arch/microblaze/kernel/process.c @@ -103,12 +103,10 @@ void cpu_idle(void) if (!idle) idle = default_idle; - tick_nohz_idle_enter(); - rcu_idle_enter(); + tick_nohz_stop_sched_tick(1); while (!need_resched()) idle(); - rcu_idle_exit(); - tick_nohz_idle_exit(); + tick_nohz_restart_sched_tick(); preempt_enable_no_resched(); schedule(); diff --git a/trunk/arch/microblaze/kernel/prom.c b/trunk/arch/microblaze/kernel/prom.c index 80d314e81901..977484add216 100644 --- a/trunk/arch/microblaze/kernel/prom.c +++ b/trunk/arch/microblaze/kernel/prom.c @@ -122,6 +122,7 @@ void __init early_init_devtree(void *params) of_scan_flat_dt(early_init_dt_scan_chosen, cmd_line); /* Scan memory nodes and rebuild MEMBLOCKs */ + memblock_init(); of_scan_flat_dt(early_init_dt_scan_root, NULL); of_scan_flat_dt(early_init_dt_scan_memory, NULL); @@ -129,7 +130,7 @@ void __init early_init_devtree(void *params) strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE); parse_early_param(); - memblock_allow_resize(); + memblock_analyze(); pr_debug("Phys. mem: %lx\n", (unsigned long) memblock_phys_mem_size()); diff --git a/trunk/arch/mips/Kconfig b/trunk/arch/mips/Kconfig index 9c652eb68aaa..d46f1da18a3c 100644 --- a/trunk/arch/mips/Kconfig +++ b/trunk/arch/mips/Kconfig @@ -25,9 +25,6 @@ config MIPS select GENERIC_IRQ_SHOW select HAVE_ARCH_JUMP_LABEL select IRQ_FORCED_THREADING - select HAVE_MEMBLOCK - select HAVE_MEMBLOCK_NODE_MAP - select ARCH_DISCARD_MEMBLOCK menu "Machine selection" @@ -2067,6 +2064,9 @@ config ARCH_DISCONTIGMEM_ENABLE or have huge holes in the physical address space for other reasons. See for more. +config ARCH_POPULATES_NODE_MAP + def_bool y + config ARCH_SPARSEMEM_ENABLE bool select SPARSEMEM_STATIC diff --git a/trunk/arch/mips/include/asm/ip32/mace.h b/trunk/arch/mips/include/asm/ip32/mace.h index c523123df380..d08d7c672139 100644 --- a/trunk/arch/mips/include/asm/ip32/mace.h +++ b/trunk/arch/mips/include/asm/ip32/mace.h @@ -95,7 +95,7 @@ struct mace_video { * Ethernet interface */ struct mace_ethernet { - volatile u64 mac_ctrl; + volatile unsigned long mac_ctrl; volatile unsigned long int_stat; volatile unsigned long dma_ctrl; volatile unsigned long timer; diff --git a/trunk/arch/mips/include/asm/socket.h b/trunk/arch/mips/include/asm/socket.h index ad5c0a7a02a7..9de5190f2487 100644 --- a/trunk/arch/mips/include/asm/socket.h +++ b/trunk/arch/mips/include/asm/socket.h @@ -82,9 +82,6 @@ To add: #define SO_REUSEPORT 0x0200 /* Allow local address and port reuse. */ #define SO_RXQ_OVFL 40 -#define SO_WIFI_STATUS 41 -#define SCM_WIFI_STATUS SO_WIFI_STATUS - #ifdef __KERNEL__ /** sock_type - Socket types diff --git a/trunk/arch/mips/kernel/process.c b/trunk/arch/mips/kernel/process.c index 7955409051c4..c47f96e453c0 100644 --- a/trunk/arch/mips/kernel/process.c +++ b/trunk/arch/mips/kernel/process.c @@ -56,8 +56,7 @@ void __noreturn cpu_idle(void) /* endless idle loop with no priority at all */ while (1) { - tick_nohz_idle_enter(); - rcu_idle_enter(); + tick_nohz_stop_sched_tick(1); while (!need_resched() && cpu_online(cpu)) { #ifdef CONFIG_MIPS_MT_SMTC extern void smtc_idle_loop_hook(void); @@ -78,8 +77,7 @@ void __noreturn cpu_idle(void) system_state == SYSTEM_BOOTING)) play_dead(); #endif - rcu_idle_exit(); - tick_nohz_idle_exit(); + tick_nohz_restart_sched_tick(); preempt_enable_no_resched(); schedule(); preempt_disable(); diff --git a/trunk/arch/mips/kernel/setup.c b/trunk/arch/mips/kernel/setup.c index b1cb8f87d7b4..84af26ab2212 100644 --- a/trunk/arch/mips/kernel/setup.c +++ b/trunk/arch/mips/kernel/setup.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include @@ -353,7 +352,7 @@ static void __init bootmem_init(void) continue; #endif - memblock_add_node(PFN_PHYS(start), PFN_PHYS(end - start), 0); + add_active_range(0, start, end); } /* diff --git a/trunk/arch/mips/sgi-ip27/ip27-memory.c b/trunk/arch/mips/sgi-ip27/ip27-memory.c index b105eca3c020..bc1297109cc5 100644 --- a/trunk/arch/mips/sgi-ip27/ip27-memory.c +++ b/trunk/arch/mips/sgi-ip27/ip27-memory.c @@ -12,7 +12,6 @@ */ #include #include -#include #include #include #include @@ -382,8 +381,8 @@ static void __init szmem(void) continue; } num_physpages += slot_psize; - memblock_add_node(PFN_PHYS(slot_getbasepfn(node, slot)), - PFN_PHYS(slot_psize), node); + add_active_range(node, slot_getbasepfn(node, slot), + slot_getbasepfn(node, slot) + slot_psize); } } } diff --git a/trunk/arch/mn10300/include/asm/socket.h b/trunk/arch/mn10300/include/asm/socket.h index 876356d78522..4e60c4281288 100644 --- a/trunk/arch/mn10300/include/asm/socket.h +++ b/trunk/arch/mn10300/include/asm/socket.h @@ -62,7 +62,4 @@ #define SO_RXQ_OVFL 40 -#define SO_WIFI_STATUS 41 -#define SCM_WIFI_STATUS SO_WIFI_STATUS - #endif /* _ASM_SOCKET_H */ diff --git a/trunk/arch/openrisc/include/asm/memblock.h b/trunk/arch/openrisc/include/asm/memblock.h new file mode 100644 index 000000000000..bbe5a1c788cb --- /dev/null +++ b/trunk/arch/openrisc/include/asm/memblock.h @@ -0,0 +1,24 @@ +/* + * OpenRISC Linux + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * OpenRISC implementation: + * Copyright (C) 2003 Matjaz Breskvar + * Copyright (C) 2010-2011 Jonas Bonn + * et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __ASM_OPENRISC_MEMBLOCK_H +#define __ASM_OPENRISC_MEMBLOCK_H + +/* empty */ + +#endif /* __ASM_OPENRISC_MEMBLOCK_H */ diff --git a/trunk/arch/openrisc/kernel/idle.c b/trunk/arch/openrisc/kernel/idle.c index e5fc78877830..d5bc5f813e89 100644 --- a/trunk/arch/openrisc/kernel/idle.c +++ b/trunk/arch/openrisc/kernel/idle.c @@ -51,8 +51,7 @@ void cpu_idle(void) /* endless idle loop with no priority at all */ while (1) { - tick_nohz_idle_enter(); - rcu_idle_enter(); + tick_nohz_stop_sched_tick(1); while (!need_resched()) { check_pgt_cache(); @@ -70,8 +69,7 @@ void cpu_idle(void) set_thread_flag(TIF_POLLING_NRFLAG); } - rcu_idle_exit(); - tick_nohz_idle_exit(); + tick_nohz_restart_sched_tick(); preempt_enable_no_resched(); schedule(); preempt_disable(); diff --git a/trunk/arch/openrisc/kernel/prom.c b/trunk/arch/openrisc/kernel/prom.c index 3d4478f6c942..1bb58ba89afa 100644 --- a/trunk/arch/openrisc/kernel/prom.c +++ b/trunk/arch/openrisc/kernel/prom.c @@ -76,13 +76,14 @@ void __init early_init_devtree(void *params) of_scan_flat_dt(early_init_dt_scan_chosen, cmd_line); /* Scan memory nodes and rebuild MEMBLOCKs */ + memblock_init(); of_scan_flat_dt(early_init_dt_scan_root, NULL); of_scan_flat_dt(early_init_dt_scan_memory, NULL); /* Save command line for /proc/cmdline and then parse parameters */ strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE); - memblock_allow_resize(); + memblock_analyze(); /* We must copy the flattend device tree from init memory to regular * memory because the device tree references the strings in it diff --git a/trunk/arch/parisc/include/asm/socket.h b/trunk/arch/parisc/include/asm/socket.h index d28c51b61067..225b7d6a1a0a 100644 --- a/trunk/arch/parisc/include/asm/socket.h +++ b/trunk/arch/parisc/include/asm/socket.h @@ -61,9 +61,6 @@ #define SO_RXQ_OVFL 0x4021 -#define SO_WIFI_STATUS 0x4022 -#define SCM_WIFI_STATUS SO_WIFI_STATUS - /* O_NONBLOCK clashes with the bits used for socket types. Therefore we * have to define SOCK_NONBLOCK to a different value here. */ diff --git a/trunk/arch/parisc/kernel/time.c b/trunk/arch/parisc/kernel/time.c index 7c0774397b89..45b7389d77aa 100644 --- a/trunk/arch/parisc/kernel/time.c +++ b/trunk/arch/parisc/kernel/time.c @@ -198,6 +198,8 @@ static struct clocksource clocksource_cr16 = { .rating = 300, .read = read_cr16, .mask = CLOCKSOURCE_MASK(BITS_PER_LONG), + .mult = 0, /* to be set */ + .shift = 22, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; @@ -268,5 +270,7 @@ void __init time_init(void) /* register at clocksource framework */ current_cr16_khz = PAGE0->mem_10msec/10; /* kHz */ - clocksource_register_khz(&clocksource_cr16, current_cr16_khz); + clocksource_cr16.mult = clocksource_khz2mult(current_cr16_khz, + clocksource_cr16.shift); + clocksource_register(&clocksource_cr16); } diff --git a/trunk/arch/powerpc/Kconfig b/trunk/arch/powerpc/Kconfig index 692ac7588e20..951e18f5335b 100644 --- a/trunk/arch/powerpc/Kconfig +++ b/trunk/arch/powerpc/Kconfig @@ -87,10 +87,6 @@ config ARCH_HAS_ILOG2_U64 bool default y if 64BIT -config ARCH_HAS_CPU_IDLE_WAIT - bool - default y - config GENERIC_HWEIGHT bool default y @@ -121,7 +117,6 @@ config PPC select HAVE_KRETPROBES select HAVE_ARCH_TRACEHOOK select HAVE_MEMBLOCK - select HAVE_MEMBLOCK_NODE_MAP select HAVE_DMA_ATTRS select HAVE_DMA_API_DEBUG select USE_GENERIC_SMP_HELPERS if SMP @@ -137,7 +132,6 @@ config PPC select IRQ_PER_CPU select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW_LEVEL - select IRQ_FORCED_THREADING select HAVE_RCU_TABLE_FREE if SMP select HAVE_SYSCALL_TRACEPOINTS select HAVE_BPF_JIT if (PPC64 && NET) @@ -368,9 +362,8 @@ config KEXEC config CRASH_DUMP bool "Build a kdump crash kernel" - depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP && !PPC_47x) - select RELOCATABLE if PPC64 || 44x - select DYNAMIC_MEMSTART if FSL_BOOKE + depends on PPC64 || 6xx || FSL_BOOKE + select RELOCATABLE if PPC64 || FSL_BOOKE help Build a kernel suitable for use as a kdump capture kernel. The same kernel binary can be used as production kernel and dump @@ -428,6 +421,9 @@ config ARCH_SPARSEMEM_DEFAULT def_bool y depends on (SMP && PPC_PSERIES) || PPC_PS3 +config ARCH_POPULATES_NODE_MAP + def_bool y + config SYS_SUPPORTS_HUGETLBFS bool @@ -691,10 +687,6 @@ config FSL_LBC controller. Also contains some common code used by drivers for specific local bus peripherals. -config FSL_IFC - bool - depends on FSL_SOC - config FSL_GTM bool depends on PPC_83xx || QUICC_ENGINE || CPM2 @@ -780,10 +772,6 @@ source "drivers/rapidio/Kconfig" endmenu -config NONSTATIC_KERNEL - bool - default n - menu "Advanced setup" depends on PPC32 @@ -833,32 +821,13 @@ config LOWMEM_CAM_NUM int "Number of CAMs to use to map low memory" if LOWMEM_CAM_NUM_BOOL default 3 -config DYNAMIC_MEMSTART - bool "Enable page aligned dynamic load address for kernel (EXPERIMENTAL)" - depends on EXPERIMENTAL && ADVANCED_OPTIONS && FLATMEM && (FSL_BOOKE || 44x) - select NONSTATIC_KERNEL - help - This option enables the kernel to be loaded at any page aligned - physical address. The kernel creates a mapping from KERNELBASE to - the address where the kernel is loaded. The page size here implies - the TLB page size of the mapping for kernel on the particular platform. - Please refer to the init code for finding the TLB page size. - - DYNAMIC_MEMSTART is an easy way of implementing pseudo-RELOCATABLE - kernel image, where the only restriction is the page aligned kernel - load address. When this option is enabled, the compile time physical - address CONFIG_PHYSICAL_START is ignored. - - This option is overridden by CONFIG_RELOCATABLE - config RELOCATABLE bool "Build a relocatable kernel (EXPERIMENTAL)" - depends on EXPERIMENTAL && ADVANCED_OPTIONS && FLATMEM && 44x - select NONSTATIC_KERNEL + depends on EXPERIMENTAL && ADVANCED_OPTIONS && FLATMEM && (FSL_BOOKE || PPC_47x) help This builds a kernel image that is capable of running at the - location the kernel is loaded at, without any alignment restrictions. - This feature is a superset of DYNAMIC_MEMSTART and hence overrides it. + location the kernel is loaded at (some alignment restrictions may + exist). One use is for the kexec on panic case where the recovery kernel must live at a different physical address than the primary @@ -868,11 +837,7 @@ config RELOCATABLE it has been loaded at and the compile time physical addresses CONFIG_PHYSICAL_START is ignored. However CONFIG_PHYSICAL_START setting can still be useful to bootwrappers that need to know the - load address of the kernel (eg. u-boot/mkimage). - -config RELOCATABLE_PPC32 - def_bool y - depends on PPC32 && RELOCATABLE + load location of the kernel (eg. u-boot/mkimage). config PAGE_OFFSET_BOOL bool "Set custom page offset address" @@ -902,7 +867,7 @@ config KERNEL_START_BOOL config KERNEL_START hex "Virtual address of kernel base" if KERNEL_START_BOOL default PAGE_OFFSET if PAGE_OFFSET_BOOL - default "0xc2000000" if CRASH_DUMP && !NONSTATIC_KERNEL + default "0xc2000000" if CRASH_DUMP && !RELOCATABLE default "0xc0000000" config PHYSICAL_START_BOOL @@ -915,7 +880,7 @@ config PHYSICAL_START_BOOL config PHYSICAL_START hex "Physical address where the kernel is loaded" if PHYSICAL_START_BOOL - default "0x02000000" if PPC_STD_MMU && CRASH_DUMP && !NONSTATIC_KERNEL + default "0x02000000" if PPC_STD_MMU && CRASH_DUMP && !RELOCATABLE default "0x00000000" config PHYSICAL_ALIGN @@ -961,7 +926,6 @@ endmenu if PPC64 config RELOCATABLE bool "Build a relocatable kernel" - select NONSTATIC_KERNEL help This builds a kernel image that is capable of running anywhere in the RMA (real memory area) at any 16k-aligned base address. diff --git a/trunk/arch/powerpc/Kconfig.debug b/trunk/arch/powerpc/Kconfig.debug index 4ccb2a009f74..1b8a9c905cf7 100644 --- a/trunk/arch/powerpc/Kconfig.debug +++ b/trunk/arch/powerpc/Kconfig.debug @@ -336,16 +336,4 @@ config PPC_EARLY_DEBUG_CPM_ADDR platform probing is done, all platforms selected must share the same address. -config STRICT_DEVMEM - def_bool y - prompt "Filter access to /dev/mem" - help - This option restricts access to /dev/mem. If this option is - disabled, you allow userspace access to all memory, including - kernel and userspace memory. Accidental memory access is likely - to be disastrous. - Memory access is required for experts who want to debug the kernel. - - If you are unsure, say Y. - endmenu diff --git a/trunk/arch/powerpc/Makefile b/trunk/arch/powerpc/Makefile index b8b105c01c64..70ba0c0a1223 100644 --- a/trunk/arch/powerpc/Makefile +++ b/trunk/arch/powerpc/Makefile @@ -63,9 +63,9 @@ override CC += -m$(CONFIG_WORD_SIZE) override AR := GNUTARGET=elf$(CONFIG_WORD_SIZE)-powerpc $(AR) endif -LDFLAGS_vmlinux-y := -Bstatic -LDFLAGS_vmlinux-$(CONFIG_RELOCATABLE) := -pie -LDFLAGS_vmlinux := $(LDFLAGS_vmlinux-y) +LDFLAGS_vmlinux-yy := -Bstatic +LDFLAGS_vmlinux-$(CONFIG_PPC64)$(CONFIG_RELOCATABLE) := -pie +LDFLAGS_vmlinux := $(LDFLAGS_vmlinux-yy) CFLAGS-$(CONFIG_PPC64) := -mminimal-toc -mtraceback=no -mcall-aixdesc CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 -mmultiple @@ -131,7 +131,8 @@ KBUILD_CFLAGS += -mno-sched-epilog endif cpu-as-$(CONFIG_4xx) += -Wa,-m405 -cpu-as-$(CONFIG_ALTIVEC) += -Wa,-maltivec +cpu-as-$(CONFIG_6xx) += -Wa,-maltivec +cpu-as-$(CONFIG_POWER4) += -Wa,-maltivec cpu-as-$(CONFIG_E500) += -Wa,-me500 cpu-as-$(CONFIG_E200) += -Wa,-me200 @@ -165,7 +166,7 @@ all: zImage # With make 3.82 we cannot mix normal and wildcard targets BOOT_TARGETS1 := zImage zImage.initrd uImage -BOOT_TARGETS2 := zImage% dtbImage% treeImage.% cuImage.% simpleImage.% uImage.% +BOOT_TARGETS2 := zImage% dtbImage% treeImage.% cuImage.% simpleImage.% PHONY += $(BOOT_TARGETS1) $(BOOT_TARGETS2) diff --git a/trunk/arch/powerpc/boot/Makefile b/trunk/arch/powerpc/boot/Makefile index 15986e70799c..72ee8c1fba48 100644 --- a/trunk/arch/powerpc/boot/Makefile +++ b/trunk/arch/powerpc/boot/Makefile @@ -45,7 +45,6 @@ $(obj)/cuboot-katmai.o: BOOTCFLAGS += -mcpu=405 $(obj)/cuboot-acadia.o: BOOTCFLAGS += -mcpu=405 $(obj)/treeboot-walnut.o: BOOTCFLAGS += -mcpu=405 $(obj)/treeboot-iss4xx.o: BOOTCFLAGS += -mcpu=405 -$(obj)/treeboot-currituck.o: BOOTCFLAGS += -mcpu=405 $(obj)/virtex405-head.o: BOOTAFLAGS += -mcpu=405 @@ -80,8 +79,7 @@ src-plat := of.c cuboot-52xx.c cuboot-824x.c cuboot-83xx.c cuboot-85xx.c holly.c cuboot-warp.c cuboot-85xx-cpm2.c cuboot-yosemite.c simpleboot.c \ virtex405-head.S virtex.c redboot-83xx.c cuboot-sam440ep.c \ cuboot-acadia.c cuboot-amigaone.c cuboot-kilauea.c \ - gamecube-head.S gamecube.c wii-head.S wii.c treeboot-iss4xx.c \ - treeboot-currituck.c + gamecube-head.S gamecube.c wii-head.S wii.c treeboot-iss4xx.c src-boot := $(src-wlib) $(src-plat) empty.c src-boot := $(addprefix $(obj)/, $(src-boot)) @@ -201,7 +199,6 @@ image-$(CONFIG_EP405) += dtbImage.ep405 image-$(CONFIG_HOTFOOT) += cuImage.hotfoot image-$(CONFIG_WALNUT) += treeImage.walnut image-$(CONFIG_ACADIA) += cuImage.acadia -image-$(CONFIG_OBS600) += uImage.obs600 # Board ports in arch/powerpc/platform/44x/Kconfig image-$(CONFIG_EBONY) += treeImage.ebony cuImage.ebony @@ -215,7 +212,6 @@ image-$(CONFIG_WARP) += cuImage.warp image-$(CONFIG_YOSEMITE) += cuImage.yosemite image-$(CONFIG_ISS4xx) += treeImage.iss4xx \ treeImage.iss4xx-mpic -image-$(CONFIG_CURRITUCK) += treeImage.currituck # Board ports in arch/powerpc/platform/8xx/Kconfig image-$(CONFIG_MPC86XADS) += cuImage.mpc866ads @@ -320,12 +316,6 @@ $(obj)/zImage.iseries: vmlinux $(obj)/uImage: vmlinux $(wrapperbits) $(call if_changed,wrap,uboot) -$(obj)/uImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits) - $(call if_changed,wrap,uboot-$*,,$(obj)/$*.dtb,$(obj)/ramdisk.image.gz) - -$(obj)/uImage.%: vmlinux $(obj)/%.dtb $(wrapperbits) - $(call if_changed,wrap,uboot-$*,,$(obj)/$*.dtb) - $(obj)/cuImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits) $(call if_changed,wrap,cuboot-$*,,$(obj)/$*.dtb,$(obj)/ramdisk.image.gz) diff --git a/trunk/arch/powerpc/boot/dcr.h b/trunk/arch/powerpc/boot/dcr.h index cc73f7a95e26..645a7c964e5f 100644 --- a/trunk/arch/powerpc/boot/dcr.h +++ b/trunk/arch/powerpc/boot/dcr.h @@ -9,12 +9,6 @@ }) #define mtdcr(rn, val) \ asm volatile("mtdcr %0,%1" : : "i"(rn), "r"(val)) -#define mfdcrx(rn) \ - ({ \ - unsigned long rval; \ - asm volatile("mfdcrx %0,%1" : "=r"(rval) : "r"(rn)); \ - rval; \ - }) /* 440GP/440GX SDRAM controller DCRs */ #define DCRN_SDRAM0_CFGADDR 0x010 diff --git a/trunk/arch/powerpc/boot/div64.S b/trunk/arch/powerpc/boot/div64.S index bbcb8a4cc121..d271ab542673 100644 --- a/trunk/arch/powerpc/boot/div64.S +++ b/trunk/arch/powerpc/boot/div64.S @@ -57,55 +57,3 @@ __div64_32: stw r8,4(r3) mr r3,r6 # return the remainder in r3 blr - -/* - * Extended precision shifts. - * - * Updated to be valid for shift counts from 0 to 63 inclusive. - * -- Gabriel - * - * R3/R4 has 64 bit value - * R5 has shift count - * result in R3/R4 - * - * ashrdi3: arithmetic right shift (sign propagation) - * lshrdi3: logical right shift - * ashldi3: left shift - */ - .globl __ashrdi3 -__ashrdi3: - subfic r6,r5,32 - srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count - addi r7,r5,32 # could be xori, or addi with -32 - slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count) - rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0 - sraw r7,r3,r7 # t2 = MSW >> (count-32) - or r4,r4,r6 # LSW |= t1 - slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2 - sraw r3,r3,r5 # MSW = MSW >> count - or r4,r4,r7 # LSW |= t2 - blr - - .globl __ashldi3 -__ashldi3: - subfic r6,r5,32 - slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count - addi r7,r5,32 # could be xori, or addi with -32 - srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count) - slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32) - or r3,r3,r6 # MSW |= t1 - slw r4,r4,r5 # LSW = LSW << count - or r3,r3,r7 # MSW |= t2 - blr - - .globl __lshrdi3 -__lshrdi3: - subfic r6,r5,32 - srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count - addi r7,r5,32 # could be xori, or addi with -32 - slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count) - srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32) - or r4,r4,r6 # LSW |= t1 - srw r3,r3,r5 # MSW = MSW >> count - or r4,r4,r7 # LSW |= t2 - blr diff --git a/trunk/arch/powerpc/boot/dts/asp834x-redboot.dts b/trunk/arch/powerpc/boot/dts/asp834x-redboot.dts index 227290db866d..261d10c4534b 100644 --- a/trunk/arch/powerpc/boot/dts/asp834x-redboot.dts +++ b/trunk/arch/powerpc/boot/dts/asp834x-redboot.dts @@ -256,7 +256,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; clock-frequency = <400000000>; interrupts = <9 0x8>; @@ -266,7 +266,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; clock-frequency = <400000000>; interrupts = <10 0x8>; diff --git a/trunk/arch/powerpc/boot/dts/currituck.dts b/trunk/arch/powerpc/boot/dts/currituck.dts deleted file mode 100644 index b801dd06e573..000000000000 --- a/trunk/arch/powerpc/boot/dts/currituck.dts +++ /dev/null @@ -1,237 +0,0 @@ -/* - * Device Tree Source for IBM Embedded PPC 476 Platform - * - * Copyright © 2011 Tony Breeds IBM Corporation - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without - * any warranty of any kind, whether express or implied. - */ - -/dts-v1/; - -/memreserve/ 0x01f00000 0x00100000; // spin table - -/ { - #address-cells = <2>; - #size-cells = <2>; - model = "ibm,currituck"; - compatible = "ibm,currituck"; - dcr-parent = <&{/cpus/cpu@0}>; - - aliases { - serial0 = &UART0; - }; - - cpus { - #address-cells = <1>; - #size-cells = <0>; - - cpu@0 { - device_type = "cpu"; - model = "PowerPC,476"; - reg = <0>; - clock-frequency = <1600000000>; // 1.6 GHz - timebase-frequency = <100000000>; // 100Mhz - i-cache-line-size = <32>; - d-cache-line-size = <32>; - i-cache-size = <32768>; - d-cache-size = <32768>; - dcr-controller; - dcr-access-method = "native"; - status = "ok"; - }; - cpu@1 { - device_type = "cpu"; - model = "PowerPC,476"; - reg = <1>; - clock-frequency = <1600000000>; // 1.6 GHz - timebase-frequency = <100000000>; // 100Mhz - i-cache-line-size = <32>; - d-cache-line-size = <32>; - i-cache-size = <32768>; - d-cache-size = <32768>; - dcr-controller; - dcr-access-method = "native"; - status = "disabled"; - enable-method = "spin-table"; - cpu-release-addr = <0x0 0x01f00000>; - }; - }; - - memory { - device_type = "memory"; - reg = <0x0 0x0 0x0 0x0>; // filled in by zImage - }; - - MPIC: interrupt-controller { - compatible = "chrp,open-pic"; - interrupt-controller; - dcr-reg = <0xffc00000 0x00040000>; - #address-cells = <0>; - #size-cells = <0>; - #interrupt-cells = <2>; - - }; - - plb { - compatible = "ibm,plb6"; - #address-cells = <2>; - #size-cells = <2>; - ranges; - clock-frequency = <200000000>; // 200Mhz - - POB0: opb { - compatible = "ibm,opb-4xx", "ibm,opb"; - #address-cells = <1>; - #size-cells = <1>; - /* Wish there was a nicer way of specifying a full - * 32-bit range - */ - ranges = <0x00000000 0x00000200 0x00000000 0x80000000 - 0x80000000 0x00000200 0x80000000 0x80000000>; - clock-frequency = <100000000>; - - UART0: serial@10000000 { - device_type = "serial"; - compatible = "ns16750", "ns16550"; - reg = <0x10000000 0x00000008>; - virtual-reg = <0xe1000000>; - clock-frequency = <1851851>; // PCIe refclk/MCGC0_CTL[UART] - current-speed = <115200>; - interrupt-parent = <&MPIC>; - interrupts = <34 2>; - }; - - IIC0: i2c@00000000 { - compatible = "ibm,iic-currituck", "ibm,iic"; - reg = <0x0 0x00000014>; - interrupt-parent = <&MPIC>; - interrupts = <79 2>; - #address-cells = <1>; - #size-cells = <0>; - rtc@68 { - compatible = "stm,m41t80", "m41st85"; - reg = <0x68>; - }; - }; - }; - - PCIE0: pciex@10100000000 { // 4xGBIF1 - device_type = "pci"; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - compatible = "ibm,plb-pciex-476fpe", "ibm,plb-pciex"; - primary; - port = <0x0>; /* port number */ - reg = <0x00000101 0x00000000 0x0 0x10000000 /* Config space access */ - 0x00000100 0x00000000 0x0 0x00001000>; /* UTL Registers space access */ - dcr-reg = <0x80 0x20>; - -// pci_space < pci_addr > < cpu_addr > < size > - ranges = <0x02000000 0x00000000 0x80000000 0x00000110 0x80000000 0x0 0x80000000 - 0x01000000 0x0 0x0 0x00000140 0x0 0x0 0x00010000>; - - /* Inbound starting at 0 to memsize filled in by zImage */ - dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x0 0x0>; - - /* This drives busses 0 to 0xf */ - bus-range = <0x0 0xf>; - - /* Legacy interrupts (note the weird polarity, the bridge seems - * to invert PCIe legacy interrupts). - * We are de-swizzling here because the numbers are actually for - * port of the root complex virtual P2P bridge. But I want - * to avoid putting a node for it in the tree, so the numbers - * below are basically de-swizzled numbers. - * The real slot is on idsel 0, so the swizzling is 1:1 - */ - interrupt-map-mask = <0x0 0x0 0x0 0x7>; - interrupt-map = < - 0x0 0x0 0x0 0x1 &MPIC 46 0x2 /* int A */ - 0x0 0x0 0x0 0x2 &MPIC 47 0x2 /* int B */ - 0x0 0x0 0x0 0x3 &MPIC 48 0x2 /* int C */ - 0x0 0x0 0x0 0x4 &MPIC 49 0x2 /* int D */>; - }; - - PCIE1: pciex@30100000000 { // 4xGBIF0 - device_type = "pci"; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - compatible = "ibm,plb-pciex-476fpe", "ibm,plb-pciex"; - primary; - port = <0x1>; /* port number */ - reg = <0x00000301 0x00000000 0x0 0x10000000 /* Config space access */ - 0x00000300 0x00000000 0x0 0x00001000>; /* UTL Registers space access */ - dcr-reg = <0x60 0x20>; - - ranges = <0x02000000 0x00000000 0x80000000 0x00000310 0x80000000 0x0 0x80000000 - 0x01000000 0x0 0x0 0x00000340 0x0 0x0 0x00010000>; - - /* Inbound starting at 0 to memsize filled in by zImage */ - dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x0 0x0>; - - /* This drives busses 0 to 0xf */ - bus-range = <0x0 0xf>; - - /* Legacy interrupts (note the weird polarity, the bridge seems - * to invert PCIe legacy interrupts). - * We are de-swizzling here because the numbers are actually for - * port of the root complex virtual P2P bridge. But I want - * to avoid putting a node for it in the tree, so the numbers - * below are basically de-swizzled numbers. - * The real slot is on idsel 0, so the swizzling is 1:1 - */ - interrupt-map-mask = <0x0 0x0 0x0 0x7>; - interrupt-map = < - 0x0 0x0 0x0 0x1 &MPIC 38 0x2 /* int A */ - 0x0 0x0 0x0 0x2 &MPIC 39 0x2 /* int B */ - 0x0 0x0 0x0 0x3 &MPIC 40 0x2 /* int C */ - 0x0 0x0 0x0 0x4 &MPIC 41 0x2 /* int D */>; - }; - - PCIE2: pciex@38100000000 { // 2xGBIF0 - device_type = "pci"; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - compatible = "ibm,plb-pciex-476fpe", "ibm,plb-pciex"; - primary; - port = <0x2>; /* port number */ - reg = <0x00000381 0x00000000 0x0 0x10000000 /* Config space access */ - 0x00000380 0x00000000 0x0 0x00001000>; /* UTL Registers space access */ - dcr-reg = <0xA0 0x20>; - - ranges = <0x02000000 0x00000000 0x80000000 0x00000390 0x80000000 0x0 0x80000000 - 0x01000000 0x0 0x0 0x000003C0 0x0 0x0 0x00010000>; - - /* Inbound starting at 0 to memsize filled in by zImage */ - dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x0 0x0>; - - /* This drives busses 0 to 0xf */ - bus-range = <0x0 0xf>; - - /* Legacy interrupts (note the weird polarity, the bridge seems - * to invert PCIe legacy interrupts). - * We are de-swizzling here because the numbers are actually for - * port of the root complex virtual P2P bridge. But I want - * to avoid putting a node for it in the tree, so the numbers - * below are basically de-swizzled numbers. - * The real slot is on idsel 0, so the swizzling is 1:1 - */ - interrupt-map-mask = <0x0 0x0 0x0 0x7>; - interrupt-map = < - 0x0 0x0 0x0 0x1 &MPIC 54 0x2 /* int A */ - 0x0 0x0 0x0 0x2 &MPIC 55 0x2 /* int B */ - 0x0 0x0 0x0 0x3 &MPIC 56 0x2 /* int C */ - 0x0 0x0 0x0 0x4 &MPIC 57 0x2 /* int D */>; - }; - - }; - - chosen { - linux,stdout-path = &UART0; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/mpc8536si-post.dtsi b/trunk/arch/powerpc/boot/dts/fsl/mpc8536si-post.dtsi deleted file mode 100644 index 89af62637707..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/mpc8536si-post.dtsi +++ /dev/null @@ -1,248 +0,0 @@ -/* - * MPC8536 Silicon/SoC Device Tree Source (post include) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -&lbc { - #address-cells = <2>; - #size-cells = <1>; - compatible = "fsl,mpc8536-elbc", "fsl,elbc", "simple-bus"; - interrupts = <19 2 0 0>; -}; - -/* controller at 0x8000 */ -&pci0 { - compatible = "fsl,mpc8540-pci"; - device_type = "pci"; - interrupts = <24 0x2 0 0>; - bus-range = <0 0xff>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; -}; - -/* controller at 0x9000 */ -&pci1 { - compatible = "fsl,mpc8548-pcie"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0 255>; - clock-frequency = <33333333>; - interrupts = <25 2 0 0>; - - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <25 2 0 0>; - interrupt-map-mask = <0xf800 0 0 7>; - - interrupt-map = < - /* IDSEL 0x0 */ - 0000 0x0 0x0 0x1 &mpic 0x4 0x1 0x0 0x0 - 0000 0x0 0x0 0x2 &mpic 0x5 0x1 0x0 0x0 - 0000 0x0 0x0 0x3 &mpic 0x6 0x1 0x0 0x0 - 0000 0x0 0x0 0x4 &mpic 0x7 0x1 0x0 0x0 - >; - }; -}; - -/* controller at 0xa000 */ -&pci2 { - compatible = "fsl,mpc8548-pcie"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0 255>; - clock-frequency = <33333333>; - interrupts = <26 2 0 0>; - - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <26 2 0 0>; - interrupt-map-mask = <0xf800 0 0 7>; - interrupt-map = < - /* IDSEL 0x0 */ - 0000 0x0 0x0 0x1 &mpic 0x0 0x1 0x0 0x0 - 0000 0x0 0x0 0x2 &mpic 0x1 0x1 0x0 0x0 - 0000 0x0 0x0 0x3 &mpic 0x2 0x1 0x0 0x0 - 0000 0x0 0x0 0x4 &mpic 0x3 0x1 0x0 0x0 - >; - }; -}; - -/* controller at 0xb000 */ -&pci3 { - compatible = "fsl,mpc8548-pcie"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0 255>; - clock-frequency = <33333333>; - interrupts = <27 2 0 0>; - - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <27 2 0 0>; - interrupt-map-mask = <0xf800 0 0 7>; - interrupt-map = < - /* IDSEL 0x0 */ - 0000 0x0 0x0 0x1 &mpic 0x8 0x1 0x0 0x0 - 0000 0x0 0x0 0x2 &mpic 0x9 0x1 0x0 0x0 - 0000 0x0 0x0 0x3 &mpic 0xa 0x1 0x0 0x0 - 0000 0x0 0x0 0x4 &mpic 0xb 0x1 0x0 0x0 - >; - }; -}; -&soc { - #address-cells = <1>; - #size-cells = <1>; - device_type = "soc"; - compatible = "fsl,mpc8536-immr", "simple-bus"; - bus-frequency = <0>; // Filled out by uboot. - - ecm-law@0 { - compatible = "fsl,ecm-law"; - reg = <0x0 0x1000>; - fsl,num-laws = <12>; - }; - - ecm@1000 { - compatible = "fsl,mpc8536-ecm", "fsl,ecm"; - reg = <0x1000 0x1000>; - interrupts = <17 2 0 0>; - }; - - memory-controller@2000 { - compatible = "fsl,mpc8536-memory-controller"; - reg = <0x2000 0x1000>; - interrupts = <18 2 0 0>; - }; - -/include/ "pq3-i2c-0.dtsi" -/include/ "pq3-i2c-1.dtsi" -/include/ "pq3-duart-0.dtsi" - -/include/ "pq3-espi-0.dtsi" - spi@7000 { - fsl,espi-num-chipselects = <4>; - }; - -/include/ "pq3-gpio-0.dtsi" - - /* mark compat w/8572 to get some erratum treatment */ - gpio-controller@f000 { - compatible = "fsl,mpc8572-gpio", "fsl,pq3-gpio"; - }; - - sata@18000 { - compatible = "fsl,mpc8536-sata", "fsl,pq-sata"; - reg = <0x18000 0x1000>; - cell-index = <1>; - interrupts = <74 0x2 0 0>; - }; - - sata@19000 { - compatible = "fsl,mpc8536-sata", "fsl,pq-sata"; - reg = <0x19000 0x1000>; - cell-index = <2>; - interrupts = <41 0x2 0 0>; - }; - - L2: l2-cache-controller@20000 { - compatible = "fsl,mpc8536-l2-cache-controller"; - reg = <0x20000 0x1000>; - cache-line-size = <32>; // 32 bytes - cache-size = <0x80000>; // L2, 512K - interrupts = <16 2 0 0>; - }; - -/include/ "pq3-dma-0.dtsi" -/include/ "pq3-etsec1-0.dtsi" -/include/ "pq3-etsec1-timer-0.dtsi" - - usb@22000 { - compatible = "fsl,mpc8536-usb2-mph", "fsl-usb2-mph"; - reg = <0x22000 0x1000>; - #address-cells = <1>; - #size-cells = <0>; - interrupts = <28 0x2 0 0>; - }; - - usb@23000 { - compatible = "fsl,mpc8536-usb2-mph", "fsl-usb2-mph"; - reg = <0x23000 0x1000>; - #address-cells = <1>; - #size-cells = <0>; - interrupts = <46 0x2 0 0>; - }; - - ptp_clock@24e00 { - interrupts = <68 2 0 0 69 2 0 0 70 2 0 0 71 2 0 0>; - }; - -/include/ "pq3-etsec1-2.dtsi" - - ethernet@26000 { - cell-index = <1>; - }; - - usb@2b000 { - compatible = "fsl,mpc8536-usb2-dr", "fsl-usb2-dr"; - reg = <0x2b000 0x1000>; - #address-cells = <1>; - #size-cells = <0>; - interrupts = <60 0x2 0 0>; - }; - -/include/ "pq3-esdhc-0.dtsi" -/include/ "pq3-sec3.0-0.dtsi" -/include/ "pq3-mpic.dtsi" -/include/ "pq3-mpic-timer-B.dtsi" - - global-utilities@e0000 { - compatible = "fsl,mpc8536-guts"; - reg = <0xe0000 0x1000>; - fsl,has-rstcr; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/mpc8536si-pre.dtsi b/trunk/arch/powerpc/boot/dts/fsl/mpc8536si-pre.dtsi deleted file mode 100644 index 7de45a784df6..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/mpc8536si-pre.dtsi +++ /dev/null @@ -1,63 +0,0 @@ -/* - * MPC8536 Silicon/SoC Device Tree Source (pre include) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/dts-v1/; -/ { - compatible = "fsl,MPC8536"; - #address-cells = <2>; - #size-cells = <2>; - interrupt-parent = <&mpic>; - - aliases { - serial0 = &serial0; - serial1 = &serial1; - ethernet0 = &enet0; - ethernet1 = &enet2; - pci0 = &pci0; - pci1 = &pci1; - pci2 = &pci2; - pci3 = &pci3; - }; - - cpus { - #address-cells = <1>; - #size-cells = <0>; - - PowerPC,8536@0 { - device_type = "cpu"; - reg = <0x0>; - next-level-cache = <&L2>; - }; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/mpc8544si-post.dtsi b/trunk/arch/powerpc/boot/dts/fsl/mpc8544si-post.dtsi deleted file mode 100644 index b68eb119faef..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/mpc8544si-post.dtsi +++ /dev/null @@ -1,191 +0,0 @@ -/* - * MPC8544 Silicon/SoC Device Tree Source (post include) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -&lbc { - #address-cells = <2>; - #size-cells = <1>; - compatible = "fsl,mpc8544-lbc", "fsl,pq3-localbus", "simple-bus"; - interrupts = <19 2 0 0>; -}; - -/* controller at 0x8000 */ -&pci0 { - compatible = "fsl,mpc8540-pci"; - device_type = "pci"; - interrupts = <24 0x2 0 0>; - bus-range = <0 0xff>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; -}; - -/* controller at 0x9000 */ -&pci1 { - compatible = "fsl,mpc8548-pcie"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0 255>; - clock-frequency = <33333333>; - interrupts = <25 2 0 0>; - - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <25 2 0 0>; - interrupt-map-mask = <0xf800 0 0 7>; - - interrupt-map = < - /* IDSEL 0x0 */ - 0000 0x0 0x0 0x1 &mpic 0x4 0x1 0x0 0x0 - 0000 0x0 0x0 0x2 &mpic 0x5 0x1 0x0 0x0 - 0000 0x0 0x0 0x3 &mpic 0x6 0x1 0x0 0x0 - 0000 0x0 0x0 0x4 &mpic 0x7 0x1 0x0 0x0 - >; - }; -}; - -/* controller at 0xa000 */ -&pci2 { - compatible = "fsl,mpc8548-pcie"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0 255>; - clock-frequency = <33333333>; - interrupts = <26 2 0 0>; - - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <26 2 0 0>; - interrupt-map-mask = <0xf800 0 0 7>; - interrupt-map = < - /* IDSEL 0x0 */ - 0000 0x0 0x0 0x1 &mpic 0x0 0x1 0x0 0x0 - 0000 0x0 0x0 0x2 &mpic 0x1 0x1 0x0 0x0 - 0000 0x0 0x0 0x3 &mpic 0x2 0x1 0x0 0x0 - 0000 0x0 0x0 0x4 &mpic 0x3 0x1 0x0 0x0 - >; - }; -}; - -/* controller at 0xb000 */ -&pci3 { - compatible = "fsl,mpc8548-pcie"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0 255>; - clock-frequency = <33333333>; - interrupts = <27 2 0 0>; - - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <27 2 0 0>; - interrupt-map-mask = <0xf800 0 0 7>; - interrupt-map = < - /* IDSEL 0x0 */ - 0000 0x0 0x0 0x1 &mpic 0x8 0x1 0x0 0x0 - 0000 0x0 0x0 0x2 &mpic 0x9 0x1 0x0 0x0 - 0000 0x0 0x0 0x3 &mpic 0xa 0x1 0x0 0x0 - 0000 0x0 0x0 0x4 &mpic 0xb 0x1 0x0 0x0 - >; - }; -}; - -&soc { - #address-cells = <1>; - #size-cells = <1>; - device_type = "soc"; - compatible = "fsl,mpc8544-immr", "simple-bus"; - bus-frequency = <0>; // Filled out by uboot. - - ecm-law@0 { - compatible = "fsl,ecm-law"; - reg = <0x0 0x1000>; - fsl,num-laws = <10>; - }; - - ecm@1000 { - compatible = "fsl,mpc8544-ecm", "fsl,ecm"; - reg = <0x1000 0x1000>; - interrupts = <17 2 0 0>; - }; - - memory-controller@2000 { - compatible = "fsl,mpc8544-memory-controller"; - reg = <0x2000 0x1000>; - interrupts = <18 2 0 0>; - }; - -/include/ "pq3-i2c-0.dtsi" -/include/ "pq3-i2c-1.dtsi" -/include/ "pq3-duart-0.dtsi" - - L2: l2-cache-controller@20000 { - compatible = "fsl,mpc8544-l2-cache-controller"; - reg = <0x20000 0x1000>; - cache-line-size = <32>; // 32 bytes - cache-size = <0x40000>; // L2, 256K - interrupts = <16 2 0 0>; - }; - -/include/ "pq3-dma-0.dtsi" -/include/ "pq3-etsec1-0.dtsi" -/include/ "pq3-etsec1-2.dtsi" - - ethernet@26000 { - cell-index = <1>; - }; - -/include/ "pq3-sec2.1-0.dtsi" -/include/ "pq3-mpic.dtsi" - - global-utilities@e0000 { - compatible = "fsl,mpc8544-guts"; - reg = <0xe0000 0x1000>; - fsl,has-rstcr; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/mpc8544si-pre.dtsi b/trunk/arch/powerpc/boot/dts/fsl/mpc8544si-pre.dtsi deleted file mode 100644 index 8777f9239d9e..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/mpc8544si-pre.dtsi +++ /dev/null @@ -1,63 +0,0 @@ -/* - * MPC8544 Silicon/SoC Device Tree Source (pre include) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/dts-v1/; -/ { - compatible = "fsl,MPC8544"; - #address-cells = <2>; - #size-cells = <2>; - interrupt-parent = <&mpic>; - - aliases { - serial0 = &serial0; - serial1 = &serial1; - ethernet0 = &enet0; - ethernet1 = &enet2; - pci0 = &pci0; - pci1 = &pci1; - pci2 = &pci2; - pci3 = &pci3; - }; - - cpus { - #address-cells = <1>; - #size-cells = <0>; - - PowerPC,8544@0 { - device_type = "cpu"; - reg = <0x0>; - next-level-cache = <&L2>; - }; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/mpc8548si-post.dtsi b/trunk/arch/powerpc/boot/dts/fsl/mpc8548si-post.dtsi deleted file mode 100644 index 9d8023a69d7d..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/mpc8548si-post.dtsi +++ /dev/null @@ -1,143 +0,0 @@ -/* - * MPC8548 Silicon/SoC Device Tree Source (post include) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -&lbc { - #address-cells = <2>; - #size-cells = <1>; - compatible = "fsl,mpc8548-lbc", "fsl,pq3-localbus", "simple-bus"; - interrupts = <19 2 0 0>; -}; - -/* controller at 0x8000 */ -&pci0 { - compatible = "fsl,mpc8540-pcix", "fsl,mpc8540-pci"; - device_type = "pci"; - interrupts = <24 0x2 0 0>; - bus-range = <0 0xff>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; -}; - -/* controller at 0x9000 */ -&pci1 { - compatible = "fsl,mpc8540-pci"; - device_type = "pci"; - interrupts = <25 0x2 0 0>; - bus-range = <0 0xff>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; -}; - -/* controller at 0xa000 */ -&pci2 { - compatible = "fsl,mpc8548-pcie"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0 255>; - clock-frequency = <33333333>; - interrupts = <26 2 0 0>; - - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <26 2 0 0>; - interrupt-map-mask = <0xf800 0 0 7>; - interrupt-map = < - /* IDSEL 0x0 */ - 0000 0x0 0x0 0x1 &mpic 0x0 0x1 0x0 0x0 - 0000 0x0 0x0 0x2 &mpic 0x1 0x1 0x0 0x0 - 0000 0x0 0x0 0x3 &mpic 0x2 0x1 0x0 0x0 - 0000 0x0 0x0 0x4 &mpic 0x3 0x1 0x0 0x0 - >; - }; -}; - -&soc { - #address-cells = <1>; - #size-cells = <1>; - device_type = "soc"; - compatible = "fsl,mpc8548-immr", "simple-bus"; - bus-frequency = <0>; // Filled out by uboot. - - ecm-law@0 { - compatible = "fsl,ecm-law"; - reg = <0x0 0x1000>; - fsl,num-laws = <10>; - }; - - ecm@1000 { - compatible = "fsl,mpc8548-ecm", "fsl,ecm"; - reg = <0x1000 0x1000>; - interrupts = <17 2 0 0>; - }; - - memory-controller@2000 { - compatible = "fsl,mpc8548-memory-controller"; - reg = <0x2000 0x1000>; - interrupts = <18 2 0 0>; - }; - -/include/ "pq3-i2c-0.dtsi" -/include/ "pq3-i2c-1.dtsi" -/include/ "pq3-duart-0.dtsi" - - L2: l2-cache-controller@20000 { - compatible = "fsl,mpc8548-l2-cache-controller"; - reg = <0x20000 0x1000>; - cache-line-size = <32>; // 32 bytes - cache-size = <0x80000>; // L2, 512K - interrupts = <16 2 0 0>; - }; - -/include/ "pq3-dma-0.dtsi" -/include/ "pq3-etsec1-0.dtsi" -/include/ "pq3-etsec1-1.dtsi" -/include/ "pq3-etsec1-2.dtsi" -/include/ "pq3-etsec1-3.dtsi" - -/include/ "pq3-sec2.1-0.dtsi" -/include/ "pq3-mpic.dtsi" - - global-utilities@e0000 { - compatible = "fsl,mpc8548-guts"; - reg = <0xe0000 0x1000>; - fsl,has-rstcr; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/mpc8548si-pre.dtsi b/trunk/arch/powerpc/boot/dts/fsl/mpc8548si-pre.dtsi deleted file mode 100644 index 289f1218d755..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/mpc8548si-pre.dtsi +++ /dev/null @@ -1,62 +0,0 @@ -/* - * MPC8548 Silicon/SoC Device Tree Source (pre include) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/dts-v1/; -/ { - compatible = "fsl,MPC8548"; - #address-cells = <2>; - #size-cells = <2>; - interrupt-parent = <&mpic>; - - aliases { - serial0 = &serial0; - serial1 = &serial1; - ethernet0 = &enet0; - ethernet1 = &enet2; - pci0 = &pci0; - pci1 = &pci1; - pci2 = &pci2; - }; - - cpus { - #address-cells = <1>; - #size-cells = <0>; - - PowerPC,8548@0 { - device_type = "cpu"; - reg = <0x0>; - next-level-cache = <&L2>; - }; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/mpc8568si-post.dtsi b/trunk/arch/powerpc/boot/dts/fsl/mpc8568si-post.dtsi deleted file mode 100644 index 64e7075a9cd4..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/mpc8568si-post.dtsi +++ /dev/null @@ -1,270 +0,0 @@ -/* - * MPC8568 Silicon/SoC Device Tree Source (post include) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -&lbc { - #address-cells = <2>; - #size-cells = <1>; - compatible = "fsl,mpc8568-localbus", "fsl,pq3-localbus", "simple-bus"; - interrupts = <19 2 0 0>; - sleep = <&pmc 0x08000000>; -}; - -/* controller at 0x8000 */ -&pci0 { - compatible = "fsl,mpc8540-pci"; - device_type = "pci"; - interrupts = <24 0x2 0 0>; - bus-range = <0 0xff>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - sleep = <&pmc 0x80000000>; -}; - -/* controller at 0xa000 */ -&pci1 { - compatible = "fsl,mpc8548-pcie"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0 255>; - clock-frequency = <33333333>; - interrupts = <26 2 0 0>; - sleep = <&pmc 0x20000000>; - - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <26 2 0 0>; - interrupt-map-mask = <0xf800 0 0 7>; - interrupt-map = < - /* IDSEL 0x0 */ - 0000 0x0 0x0 0x1 &mpic 0x0 0x1 0x0 0x0 - 0000 0x0 0x0 0x2 &mpic 0x1 0x1 0x0 0x0 - 0000 0x0 0x0 0x3 &mpic 0x2 0x1 0x0 0x0 - 0000 0x0 0x0 0x4 &mpic 0x3 0x1 0x0 0x0 - >; - }; -}; - -&rio { - compatible = "fsl,srio"; - interrupts = <48 2 0 0>; - #address-cells = <2>; - #size-cells = <2>; - fsl,srio-rmu-handle = <&rmu>; - sleep = <&pmc 0x00080000>; - ranges; - - port1 { - #address-cells = <2>; - #size-cells = <2>; - cell-index = <1>; - }; -}; - -&soc { - #address-cells = <1>; - #size-cells = <1>; - device_type = "soc"; - compatible = "fsl,mpc8568-immr", "simple-bus"; - bus-frequency = <0>; // Filled out by uboot. - - ecm-law@0 { - compatible = "fsl,ecm-law"; - reg = <0x0 0x1000>; - fsl,num-laws = <10>; - }; - - ecm@1000 { - compatible = "fsl,mpc8568-ecm", "fsl,ecm"; - reg = <0x1000 0x1000>; - interrupts = <17 2 0 0>; - }; - - memory-controller@2000 { - compatible = "fsl,mpc8568-memory-controller"; - reg = <0x2000 0x1000>; - interrupts = <18 2 0 0>; - }; - - i2c-sleep-nexus { - #address-cells = <1>; - #size-cells = <1>; - compatible = "simple-bus"; - sleep = <&pmc 0x00000004>; - ranges; - -/include/ "pq3-i2c-0.dtsi" -/include/ "pq3-i2c-1.dtsi" - - }; - - duart-sleep-nexus { - #address-cells = <1>; - #size-cells = <1>; - compatible = "simple-bus"; - sleep = <&pmc 0x00000002>; - ranges; - -/include/ "pq3-duart-0.dtsi" - - }; - - L2: l2-cache-controller@20000 { - compatible = "fsl,mpc8568-l2-cache-controller"; - reg = <0x20000 0x1000>; - cache-line-size = <32>; // 32 bytes - cache-size = <0x80000>; // L2, 512K - interrupts = <16 2 0 0>; - }; - -/include/ "pq3-dma-0.dtsi" - dma@21300 { - sleep = <&pmc 0x00000400>; - }; - -/include/ "pq3-etsec1-0.dtsi" - ethernet@24000 { - sleep = <&pmc 0x00000080>; - }; - -/include/ "pq3-etsec1-1.dtsi" - ethernet@25000 { - sleep = <&pmc 0x00000040>; - }; - - par_io@e0100 { - reg = <0xe0100 0x100>; - device_type = "par_io"; - }; - -/include/ "pq3-sec2.1-0.dtsi" - crypto@30000 { - sleep = <&pmc 0x01000000>; - }; - -/include/ "pq3-mpic.dtsi" -/include/ "pq3-rmu-0.dtsi" - rmu@d3000 { - sleep = <&pmc 0x00040000>; - }; - - global-utilities@e0000 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "fsl,mpc8568-guts", "fsl,mpc8548-guts"; - reg = <0xe0000 0x1000>; - ranges = <0 0xe0000 0x1000>; - fsl,has-rstcr; - - pmc: power@70 { - compatible = "fsl,mpc8568-pmc", - "fsl,mpc8548-pmc"; - reg = <0x70 0x20>; - }; - }; -}; - -&qe { - #address-cells = <1>; - #size-cells = <1>; - device_type = "qe"; - compatible = "fsl,qe"; - sleep = <&pmc 0x00000800>; - brg-frequency = <0>; - bus-frequency = <396000000>; - fsl,qe-num-riscs = <2>; - fsl,qe-num-snums = <28>; - - qeic: interrupt-controller@80 { - interrupt-controller; - compatible = "fsl,qe-ic"; - #address-cells = <0>; - #interrupt-cells = <1>; - reg = <0x80 0x80>; - interrupts = <46 2 0 0 46 2 0 0>; //high:30 low:30 - interrupt-parent = <&mpic>; - }; - - spi@4c0 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "fsl,spi"; - reg = <0x4c0 0x40>; - cell-index = <0>; - interrupts = <2>; - interrupt-parent = <&qeic>; - }; - - spi@500 { - #address-cells = <1>; - #size-cells = <0>; - cell-index = <1>; - compatible = "fsl,spi"; - reg = <0x500 0x40>; - interrupts = <1>; - interrupt-parent = <&qeic>; - }; - - ucc@2000 { - cell-index = <1>; - reg = <0x2000 0x200>; - interrupts = <32>; - interrupt-parent = <&qeic>; - }; - - ucc@3000 { - cell-index = <2>; - reg = <0x3000 0x200>; - interrupts = <33>; - interrupt-parent = <&qeic>; - }; - - muram@10000 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "fsl,qe-muram", "fsl,cpm-muram"; - ranges = <0x0 0x10000 0x10000>; - - data-only@0 { - compatible = "fsl,qe-muram-data", - "fsl,cpm-muram-data"; - reg = <0x0 0x10000>; - }; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/mpc8568si-pre.dtsi b/trunk/arch/powerpc/boot/dts/fsl/mpc8568si-pre.dtsi deleted file mode 100644 index eacd62c5fe6c..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/mpc8568si-pre.dtsi +++ /dev/null @@ -1,65 +0,0 @@ -/* - * MPC8568 Silicon/SoC Device Tree Source (pre include) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/dts-v1/; -/ { - compatible = "fsl,MPC8568"; - #address-cells = <2>; - #size-cells = <2>; - interrupt-parent = <&mpic>; - - aliases { - serial0 = &serial0; - serial1 = &serial1; - ethernet0 = &enet0; - ethernet1 = &enet1; - ethernet2 = &enet2; - ethernet3 = &enet3; - pci0 = &pci0; - pci1 = &pci1; - }; - - cpus { - #address-cells = <1>; - #size-cells = <0>; - - PowerPC,8568@0 { - device_type = "cpu"; - reg = <0x0>; - next-level-cache = <&L2>; - sleep = <&pmc 0x00008000 // core - &pmc 0x00004000>; // timebase - }; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/mpc8569si-post.dtsi b/trunk/arch/powerpc/boot/dts/fsl/mpc8569si-post.dtsi deleted file mode 100644 index 3e6346a4a183..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/mpc8569si-post.dtsi +++ /dev/null @@ -1,304 +0,0 @@ -/* - * MPC8569 Silicon/SoC Device Tree Source (post include) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -&lbc { - #address-cells = <2>; - #size-cells = <1>; - compatible = "fsl,mpc8569-elbc", "fsl,elbc", "simple-bus"; - interrupts = <19 2 0 0>; - sleep = <&pmc 0x08000000>; -}; - -/* controller at 0xa000 */ -&pci1 { - compatible = "fsl,mpc8548-pcie"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0 255>; - clock-frequency = <33333333>; - interrupts = <26 2 0 0>; - sleep = <&pmc 0x20000000>; - - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <26 2 0 0>; - interrupt-map-mask = <0xf800 0 0 7>; - interrupt-map = < - /* IDSEL 0x0 */ - 0000 0x0 0x0 0x1 &mpic 0x0 0x1 0x0 0x0 - 0000 0x0 0x0 0x2 &mpic 0x1 0x1 0x0 0x0 - 0000 0x0 0x0 0x3 &mpic 0x2 0x1 0x0 0x0 - 0000 0x0 0x0 0x4 &mpic 0x3 0x1 0x0 0x0 - >; - }; -}; - -&rio { - compatible = "fsl,srio"; - interrupts = <48 2 0 0>; - #address-cells = <2>; - #size-cells = <2>; - fsl,srio-rmu-handle = <&rmu>; - sleep = <&pmc 0x00080000>; - ranges; - - port1 { - #address-cells = <2>; - #size-cells = <2>; - cell-index = <1>; - }; - - port2 { - #address-cells = <2>; - #size-cells = <2>; - cell-index = <2>; - }; -}; - -&soc { - #address-cells = <1>; - #size-cells = <1>; - device_type = "soc"; - compatible = "fsl,mpc8569-immr", "simple-bus"; - bus-frequency = <0>; // Filled out by uboot. - - ecm-law@0 { - compatible = "fsl,ecm-law"; - reg = <0x0 0x1000>; - fsl,num-laws = <10>; - }; - - ecm@1000 { - compatible = "fsl,mpc8569-ecm", "fsl,ecm"; - reg = <0x1000 0x1000>; - interrupts = <17 2 0 0>; - }; - - memory-controller@2000 { - compatible = "fsl,mpc8569-memory-controller"; - reg = <0x2000 0x1000>; - interrupts = <18 2 0 0>; - }; - - i2c-sleep-nexus { - #address-cells = <1>; - #size-cells = <1>; - compatible = "simple-bus"; - sleep = <&pmc 0x00000004>; - ranges; - -/include/ "pq3-i2c-0.dtsi" -/include/ "pq3-i2c-1.dtsi" - - }; - - duart-sleep-nexus { - #address-cells = <1>; - #size-cells = <1>; - compatible = "simple-bus"; - sleep = <&pmc 0x00000002>; - ranges; - -/include/ "pq3-duart-0.dtsi" - - }; - - L2: l2-cache-controller@20000 { - compatible = "fsl,mpc8569-l2-cache-controller"; - reg = <0x20000 0x1000>; - cache-line-size = <32>; // 32 bytes - cache-size = <0x80000>; // L2, 512K - interrupts = <16 2 0 0>; - }; - -/include/ "pq3-dma-0.dtsi" -/include/ "pq3-esdhc-0.dtsi" - sdhc@2e000 { - sleep = <&pmc 0x00200000>; - }; - - par_io@e0100 { - #address-cells = <1>; - #size-cells = <1>; - reg = <0xe0100 0x100>; - ranges = <0x0 0xe0100 0x100>; - device_type = "par_io"; - }; - -/include/ "pq3-sec3.1-0.dtsi" - crypto@30000 { - sleep = <&pmc 0x01000000>; - }; - -/include/ "pq3-mpic.dtsi" -/include/ "pq3-rmu-0.dtsi" - rmu@d3000 { - sleep = <&pmc 0x00040000>; - }; - - global-utilities@e0000 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "fsl,mpc8569-guts", "fsl,mpc8548-guts"; - reg = <0xe0000 0x1000>; - ranges = <0 0xe0000 0x1000>; - fsl,has-rstcr; - - pmc: power@70 { - compatible = "fsl,mpc8569-pmc", - "fsl,mpc8548-pmc"; - reg = <0x70 0x20>; - }; - }; -}; - -&qe { - #address-cells = <1>; - #size-cells = <1>; - device_type = "qe"; - compatible = "fsl,qe"; - sleep = <&pmc 0x00000800>; - brg-frequency = <0>; - bus-frequency = <0>; - fsl,qe-num-riscs = <4>; - fsl,qe-num-snums = <46>; - - qeic: interrupt-controller@80 { - interrupt-controller; - compatible = "fsl,qe-ic"; - #address-cells = <0>; - #interrupt-cells = <1>; - reg = <0x80 0x80>; - interrupts = <46 2 0 0 46 2 0 0>; //high:30 low:30 - interrupt-parent = <&mpic>; - }; - - timer@440 { - compatible = "fsl,mpc8569-qe-gtm", - "fsl,qe-gtm", "fsl,gtm"; - reg = <0x440 0x40>; - interrupts = <12 13 14 15>; - interrupt-parent = <&qeic>; - /* Filled in by U-Boot */ - clock-frequency = <0>; - }; - - spi@4c0 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "fsl,mpc8569-qe-spi", "fsl,spi"; - reg = <0x4c0 0x40>; - cell-index = <0>; - interrupts = <2>; - interrupt-parent = <&qeic>; - }; - - spi@500 { - #address-cells = <1>; - #size-cells = <0>; - cell-index = <1>; - compatible = "fsl,spi"; - reg = <0x500 0x40>; - interrupts = <1>; - interrupt-parent = <&qeic>; - }; - - usb@6c0 { - compatible = "fsl,mpc8569-qe-usb", - "fsl,mpc8323-qe-usb"; - reg = <0x6c0 0x40 0x8b00 0x100>; - interrupts = <11>; - interrupt-parent = <&qeic>; - }; - - ucc@2000 { - cell-index = <1>; - reg = <0x2000 0x200>; - interrupts = <32>; - interrupt-parent = <&qeic>; - }; - - ucc@2200 { - cell-index = <3>; - reg = <0x2200 0x200>; - interrupts = <34>; - interrupt-parent = <&qeic>; - }; - - ucc@3000 { - cell-index = <2>; - reg = <0x3000 0x200>; - interrupts = <33>; - interrupt-parent = <&qeic>; - }; - - ucc@3200 { - cell-index = <4>; - reg = <0x3200 0x200>; - interrupts = <35>; - interrupt-parent = <&qeic>; - }; - - ucc@3400 { - cell-index = <6>; - reg = <0x3400 0x200>; - interrupts = <41>; - interrupt-parent = <&qeic>; - }; - - ucc@3600 { - cell-index = <8>; - reg = <0x3600 0x200>; - interrupts = <43>; - interrupt-parent = <&qeic>; - }; - - muram@10000 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "fsl,qe-muram", "fsl,cpm-muram"; - ranges = <0x0 0x10000 0x20000>; - - data-only@0 { - compatible = "fsl,qe-muram-data", - "fsl,cpm-muram-data"; - reg = <0x0 0x20000>; - }; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/mpc8569si-pre.dtsi b/trunk/arch/powerpc/boot/dts/fsl/mpc8569si-pre.dtsi deleted file mode 100644 index b07064d11930..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/mpc8569si-pre.dtsi +++ /dev/null @@ -1,64 +0,0 @@ -/* - * MPC8569 Silicon/SoC Device Tree Source (pre include) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/dts-v1/; -/ { - compatible = "fsl,MPC8569"; - #address-cells = <2>; - #size-cells = <2>; - interrupt-parent = <&mpic>; - - aliases { - serial0 = &serial0; - serial1 = &serial1; - ethernet0 = &enet0; - ethernet1 = &enet1; - ethernet2 = &enet2; - ethernet3 = &enet3; - pci1 = &pci1; - }; - - cpus { - #address-cells = <1>; - #size-cells = <0>; - - PowerPC,8569@0 { - device_type = "cpu"; - reg = <0x0>; - next-level-cache = <&L2>; - sleep = <&pmc 0x00008000 // core - &pmc 0x00004000>; // timebase - }; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/mpc8572si-post.dtsi b/trunk/arch/powerpc/boot/dts/fsl/mpc8572si-post.dtsi deleted file mode 100644 index d44e25a48734..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/mpc8572si-post.dtsi +++ /dev/null @@ -1,196 +0,0 @@ -/* - * MPC8572 Silicon/SoC Device Tree Source (post include) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -&lbc { - #address-cells = <2>; - #size-cells = <1>; - compatible = "fsl,mpc8572-elbc", "fsl,elbc", "simple-bus"; - interrupts = <19 2 0 0>; -}; - -/* controller at 0x8000 */ -&pci0 { - compatible = "fsl,mpc8548-pcie"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0 255>; - clock-frequency = <33333333>; - interrupts = <24 2 0 0>; - - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <24 2 0 0>; - interrupt-map-mask = <0xf800 0 0 7>; - - interrupt-map = < - /* IDSEL 0x0 */ - 0000 0x0 0x0 0x1 &mpic 0x8 0x1 0x0 0x0 - 0000 0x0 0x0 0x2 &mpic 0x9 0x1 0x0 0x0 - 0000 0x0 0x0 0x3 &mpic 0xa 0x1 0x0 0x0 - 0000 0x0 0x0 0x4 &mpic 0xb 0x1 0x0 0x0 - >; - }; -}; - -/* controller at 0x9000 */ -&pci1 { - compatible = "fsl,mpc8548-pcie"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0 255>; - clock-frequency = <33333333>; - interrupts = <25 2 0 0>; - - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <25 2 0 0>; - interrupt-map-mask = <0xf800 0 0 7>; - - interrupt-map = < - /* IDSEL 0x0 */ - 0000 0x0 0x0 0x1 &mpic 0x4 0x1 0x0 0x0 - 0000 0x0 0x0 0x2 &mpic 0x5 0x1 0x0 0x0 - 0000 0x0 0x0 0x3 &mpic 0x6 0x1 0x0 0x0 - 0000 0x0 0x0 0x4 &mpic 0x7 0x1 0x0 0x0 - >; - }; -}; - -/* controller at 0xa000 */ -&pci2 { - compatible = "fsl,mpc8548-pcie"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0 255>; - clock-frequency = <33333333>; - interrupts = <26 2 0 0>; - - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <26 2 0 0>; - interrupt-map-mask = <0xf800 0 0 7>; - interrupt-map = < - /* IDSEL 0x0 */ - 0000 0x0 0x0 0x1 &mpic 0x0 0x1 0x0 0x0 - 0000 0x0 0x0 0x2 &mpic 0x1 0x1 0x0 0x0 - 0000 0x0 0x0 0x3 &mpic 0x2 0x1 0x0 0x0 - 0000 0x0 0x0 0x4 &mpic 0x3 0x1 0x0 0x0 - >; - }; -}; - -&soc { - #address-cells = <1>; - #size-cells = <1>; - device_type = "soc"; - compatible = "fsl,mpc8572-immr", "simple-bus"; - bus-frequency = <0>; // Filled out by uboot. - - ecm-law@0 { - compatible = "fsl,ecm-law"; - reg = <0x0 0x1000>; - fsl,num-laws = <12>; - }; - - ecm@1000 { - compatible = "fsl,mpc8572-ecm", "fsl,ecm"; - reg = <0x1000 0x1000>; - interrupts = <17 2 0 0>; - }; - - memory-controller@2000 { - compatible = "fsl,mpc8572-memory-controller"; - reg = <0x2000 0x1000>; - interrupts = <18 2 0 0>; - }; - - memory-controller@6000 { - compatible = "fsl,mpc8572-memory-controller"; - reg = <0x6000 0x1000>; - interrupts = <18 2 0 0>; - }; - -/include/ "pq3-i2c-0.dtsi" -/include/ "pq3-i2c-1.dtsi" -/include/ "pq3-duart-0.dtsi" -/include/ "pq3-dma-1.dtsi" -/include/ "pq3-gpio-0.dtsi" - gpio-controller@f000 { - compatible = "fsl,mpc8572-gpio", "fsl,pq3-gpio"; - }; - - L2: l2-cache-controller@20000 { - compatible = "fsl,mpc8572-l2-cache-controller"; - reg = <0x20000 0x1000>; - cache-line-size = <32>; // 32 bytes - cache-size = <0x100000>; // L2,1M - interrupts = <16 2 0 0>; - }; - -/include/ "pq3-dma-0.dtsi" -/include/ "pq3-etsec1-0.dtsi" -/include/ "pq3-etsec1-timer-0.dtsi" - - ptp_clock@24e00 { - interrupts = <68 2 0 0 69 2 0 0 70 2 0 0 71 2 0 0>; - }; - -/include/ "pq3-etsec1-1.dtsi" -/include/ "pq3-etsec1-2.dtsi" -/include/ "pq3-etsec1-3.dtsi" -/include/ "pq3-sec3.0-0.dtsi" -/include/ "pq3-mpic.dtsi" -/include/ "pq3-mpic-timer-B.dtsi" - - global-utilities@e0000 { - compatible = "fsl,mpc8572-guts"; - reg = <0xe0000 0x1000>; - fsl,has-rstcr; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/mpc8572si-pre.dtsi b/trunk/arch/powerpc/boot/dts/fsl/mpc8572si-pre.dtsi deleted file mode 100644 index ca188326c2ca..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/mpc8572si-pre.dtsi +++ /dev/null @@ -1,70 +0,0 @@ -/* - * MPC8572 Silicon/SoC Device Tree Source (pre include) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/dts-v1/; -/ { - compatible = "fsl,MPC8572"; - #address-cells = <2>; - #size-cells = <2>; - interrupt-parent = <&mpic>; - - aliases { - serial0 = &serial0; - serial1 = &serial1; - ethernet0 = &enet0; - ethernet1 = &enet1; - ethernet2 = &enet2; - ethernet3 = &enet3; - pci0 = &pci0; - pci1 = &pci1; - pci2 = &pci2; - }; - - cpus { - #address-cells = <1>; - #size-cells = <0>; - - PowerPC,8572@0 { - device_type = "cpu"; - reg = <0x0>; - next-level-cache = <&L2>; - }; - - PowerPC,8572@1 { - device_type = "cpu"; - reg = <0x1>; - next-level-cache = <&L2>; - }; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi b/trunk/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi deleted file mode 100644 index bd9e163c764b..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi +++ /dev/null @@ -1,198 +0,0 @@ -/* - * P1010/P1014 Silicon/SoC Device Tree Source (post include) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -&ifc { - #address-cells = <2>; - #size-cells = <1>; - compatible = "fsl,ifc", "simple-bus"; - interrupts = <16 2 0 0 19 2 0 0>; -}; - -/* controller at 0x9000 */ -&pci0 { - compatible = "fsl,p1010-pcie", "fsl,qoriq-pcie-v2.3", "fsl,qoriq-pcie-v2.2"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0 255>; - clock-frequency = <33333333>; - interrupts = <16 2 0 0>; - - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <16 2 0 0>; - interrupt-map-mask = <0xf800 0 0 7>; - interrupt-map = < - /* IDSEL 0x0 */ - 0000 0x0 0x0 0x1 &mpic 0x4 0x1 0x0 0x0 - 0000 0x0 0x0 0x2 &mpic 0x5 0x1 0x0 0x0 - 0000 0x0 0x0 0x3 &mpic 0x6 0x1 0x0 0x0 - 0000 0x0 0x0 0x4 &mpic 0x7 0x1 0x0 0x0 - >; - }; -}; - -/* controller at 0xa000 */ -&pci1 { - compatible = "fsl,p1010-pcie", "fsl,qoriq-pcie-v2.3", "fsl,qoriq-pcie-v2.2"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0 255>; - clock-frequency = <33333333>; - interrupts = <16 2 0 0>; - - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <16 2 0 0>; - interrupt-map-mask = <0xf800 0 0 7>; - - interrupt-map = < - /* IDSEL 0x0 */ - 0000 0x0 0x0 0x1 &mpic 0x0 0x1 0x0 0x0 - 0000 0x0 0x0 0x2 &mpic 0x1 0x1 0x0 0x0 - 0000 0x0 0x0 0x3 &mpic 0x2 0x1 0x0 0x0 - 0000 0x0 0x0 0x4 &mpic 0x3 0x1 0x0 0x0 - >; - }; -}; - -&soc { - #address-cells = <1>; - #size-cells = <1>; - device_type = "soc"; - compatible = "fsl,p1010-immr", "simple-bus"; - bus-frequency = <0>; // Filled out by uboot. - - ecm-law@0 { - compatible = "fsl,ecm-law"; - reg = <0x0 0x1000>; - fsl,num-laws = <12>; - }; - - ecm@1000 { - compatible = "fsl,p1010-ecm", "fsl,ecm"; - reg = <0x1000 0x1000>; - interrupts = <16 2 0 0>; - }; - - memory-controller@2000 { - compatible = "fsl,p1010-memory-controller"; - reg = <0x2000 0x1000>; - interrupts = <16 2 0 0>; - }; - -/include/ "pq3-i2c-0.dtsi" -/include/ "pq3-i2c-1.dtsi" -/include/ "pq3-duart-0.dtsi" -/include/ "pq3-espi-0.dtsi" - spi0: spi@7000 { - fsl,espi-num-chipselects = <1>; - }; - -/include/ "pq3-gpio-0.dtsi" -/include/ "pq3-sata2-0.dtsi" -/include/ "pq3-sata2-1.dtsi" - - can0: can@1c000 { - compatible = "fsl,p1010-flexcan"; - reg = <0x1c000 0x1000>; - interrupts = <48 0x2 0 0>; - }; - - can1: can@1d000 { - compatible = "fsl,p1010-flexcan"; - reg = <0x1d000 0x1000>; - interrupts = <61 0x2 0 0>; - }; - - L2: l2-cache-controller@20000 { - compatible = "fsl,p1010-l2-cache-controller", - "fsl,p1014-l2-cache-controller"; - reg = <0x20000 0x1000>; - cache-line-size = <32>; // 32 bytes - cache-size = <0x40000>; // L2,256K - interrupts = <16 2 0 0>; - }; - -/include/ "pq3-dma-0.dtsi" -/include/ "pq3-usb2-dr-0.dtsi" -/include/ "pq3-esdhc-0.dtsi" - sdhc@2e000 { - fsl,sdhci-auto-cmd12; - }; - -/include/ "pq3-sec4.4-0.dtsi" -/include/ "pq3-mpic.dtsi" -/include/ "pq3-mpic-timer-B.dtsi" - -/include/ "pq3-etsec2-0.dtsi" - enet0: ethernet@b0000 { - queue-group@b0000 { - fsl,rx-bit-map = <0xff>; - fsl,tx-bit-map = <0xff>; - }; - }; - -/include/ "pq3-etsec2-1.dtsi" - enet1: ethernet@b1000 { - queue-group@b1000 { - fsl,rx-bit-map = <0xff>; - fsl,tx-bit-map = <0xff>; - }; - }; - -/include/ "pq3-etsec2-2.dtsi" - enet2: ethernet@b2000 { - queue-group@b2000 { - fsl,rx-bit-map = <0xff>; - fsl,tx-bit-map = <0xff>; - }; - - }; - - global-utilities@e0000 { - compatible = "fsl,p1010-guts"; - reg = <0xe0000 0x1000>; - fsl,has-rstcr; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/p1010si-pre.dtsi b/trunk/arch/powerpc/boot/dts/fsl/p1010si-pre.dtsi deleted file mode 100644 index 7354a8f90ea5..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/p1010si-pre.dtsi +++ /dev/null @@ -1,64 +0,0 @@ -/* - * P1010/P1014 Silicon/SoC Device Tree Source (pre include) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/dts-v1/; -/ { - compatible = "fsl,P1010"; - #address-cells = <2>; - #size-cells = <2>; - interrupt-parent = <&mpic>; - - aliases { - serial0 = &serial0; - serial1 = &serial1; - ethernet0 = &enet0; - ethernet1 = &enet1; - ethernet2 = &enet2; - pci0 = &pci0; - pci1 = &pci1; - can0 = &can0; - can1 = &can1; - }; - - cpus { - #address-cells = <1>; - #size-cells = <0>; - - PowerPC,P1010@0 { - device_type = "cpu"; - reg = <0x0>; - next-level-cache = <&L2>; - }; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/p1020si-post.dtsi b/trunk/arch/powerpc/boot/dts/fsl/p1020si-post.dtsi deleted file mode 100644 index fc924c5ffebe..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/p1020si-post.dtsi +++ /dev/null @@ -1,174 +0,0 @@ -/* - * P1020/P1011 Silicon/SoC Device Tree Source (post include) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -&lbc { - #address-cells = <2>; - #size-cells = <1>; - compatible = "fsl,p1020-elbc", "fsl,elbc", "simple-bus"; - interrupts = <19 2 0 0>; -}; - -/* controller at 0x9000 */ -&pci0 { - compatible = "fsl,mpc8548-pcie"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0 255>; - clock-frequency = <33333333>; - interrupts = <16 2 0 0>; - - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <16 2 0 0>; - interrupt-map-mask = <0xf800 0 0 7>; - interrupt-map = < - /* IDSEL 0x0 */ - 0000 0x0 0x0 0x1 &mpic 0x4 0x1 0x0 0x0 - 0000 0x0 0x0 0x2 &mpic 0x5 0x1 0x0 0x0 - 0000 0x0 0x0 0x3 &mpic 0x6 0x1 0x0 0x0 - 0000 0x0 0x0 0x4 &mpic 0x7 0x1 0x0 0x0 - >; - }; -}; - -/* controller at 0xa000 */ -&pci1 { - compatible = "fsl,mpc8548-pcie"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0 255>; - clock-frequency = <33333333>; - interrupts = <16 2 0 0>; - - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <16 2 0 0>; - interrupt-map-mask = <0xf800 0 0 7>; - - interrupt-map = < - /* IDSEL 0x0 */ - 0000 0x0 0x0 0x1 &mpic 0x0 0x1 0x0 0x0 - 0000 0x0 0x0 0x2 &mpic 0x1 0x1 0x0 0x0 - 0000 0x0 0x0 0x3 &mpic 0x2 0x1 0x0 0x0 - 0000 0x0 0x0 0x4 &mpic 0x3 0x1 0x0 0x0 - >; - }; -}; - -&soc { - #address-cells = <1>; - #size-cells = <1>; - device_type = "soc"; - compatible = "fsl,p1020-immr", "simple-bus"; - bus-frequency = <0>; // Filled out by uboot. - - ecm-law@0 { - compatible = "fsl,ecm-law"; - reg = <0x0 0x1000>; - fsl,num-laws = <12>; - }; - - ecm@1000 { - compatible = "fsl,p1020-ecm", "fsl,ecm"; - reg = <0x1000 0x1000>; - interrupts = <16 2 0 0>; - }; - - memory-controller@2000 { - compatible = "fsl,p1020-memory-controller"; - reg = <0x2000 0x1000>; - interrupts = <16 2 0 0>; - }; - -/include/ "pq3-i2c-0.dtsi" -/include/ "pq3-i2c-1.dtsi" -/include/ "pq3-duart-0.dtsi" - -/include/ "pq3-espi-0.dtsi" - spi@7000 { - fsl,espi-num-chipselects = <4>; - }; - -/include/ "pq3-gpio-0.dtsi" - - L2: l2-cache-controller@20000 { - compatible = "fsl,p1020-l2-cache-controller"; - reg = <0x20000 0x1000>; - cache-line-size = <32>; // 32 bytes - cache-size = <0x40000>; // L2,256K - interrupts = <16 2 0 0>; - }; - -/include/ "pq3-dma-0.dtsi" -/include/ "pq3-usb2-dr-0.dtsi" -/include/ "pq3-usb2-dr-1.dtsi" - -/include/ "pq3-esdhc-0.dtsi" -/include/ "pq3-sec3.3-0.dtsi" - -/include/ "pq3-mpic.dtsi" -/include/ "pq3-mpic-timer-B.dtsi" - -/include/ "pq3-etsec2-0.dtsi" - enet0: enet0_grp2: ethernet@b0000 { - }; - -/include/ "pq3-etsec2-1.dtsi" - enet1: enet1_grp2: ethernet@b1000 { - }; - -/include/ "pq3-etsec2-2.dtsi" - enet2: enet2_grp2: ethernet@b2000 { - }; - - global-utilities@e0000 { - compatible = "fsl,p1020-guts"; - reg = <0xe0000 0x1000>; - fsl,has-rstcr; - }; -}; - -/include/ "pq3-etsec2-grp2-0.dtsi" -/include/ "pq3-etsec2-grp2-1.dtsi" -/include/ "pq3-etsec2-grp2-2.dtsi" diff --git a/trunk/arch/powerpc/boot/dts/fsl/p1020si-pre.dtsi b/trunk/arch/powerpc/boot/dts/fsl/p1020si-pre.dtsi deleted file mode 100644 index 6f0376e554eb..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/p1020si-pre.dtsi +++ /dev/null @@ -1,68 +0,0 @@ -/* - * P1020/P1011 Silicon/SoC Device Tree Source (pre include) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/dts-v1/; -/ { - compatible = "fsl,P1020"; - #address-cells = <2>; - #size-cells = <2>; - interrupt-parent = <&mpic>; - - aliases { - serial0 = &serial0; - serial1 = &serial1; - ethernet0 = &enet0; - ethernet1 = &enet1; - ethernet2 = &enet2; - pci0 = &pci0; - pci1 = &pci1; - }; - - cpus { - #address-cells = <1>; - #size-cells = <0>; - - PowerPC,P1020@0 { - device_type = "cpu"; - reg = <0x0>; - next-level-cache = <&L2>; - }; - - PowerPC,P1020@1 { - device_type = "cpu"; - reg = <0x1>; - next-level-cache = <&L2>; - }; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi b/trunk/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi deleted file mode 100644 index 38ba54d1e32e..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi +++ /dev/null @@ -1,225 +0,0 @@ -/* - * P1021/P1012 Silicon/SoC Device Tree Source (post include) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -&lbc { - #address-cells = <2>; - #size-cells = <1>; - compatible = "fsl,p1021-elbc", "fsl,elbc", "simple-bus"; - interrupts = <19 2 0 0>; -}; - -/* controller at 0x9000 */ -&pci0 { - compatible = "fsl,mpc8548-pcie"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0 255>; - clock-frequency = <33333333>; - interrupts = <16 2 0 0>; - - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <16 2 0 0>; - interrupt-map-mask = <0xf800 0 0 7>; - interrupt-map = < - /* IDSEL 0x0 */ - 0000 0x0 0x0 0x1 &mpic 0x4 0x1 0x0 0x0 - 0000 0x0 0x0 0x2 &mpic 0x5 0x1 0x0 0x0 - 0000 0x0 0x0 0x3 &mpic 0x6 0x1 0x0 0x0 - 0000 0x0 0x0 0x4 &mpic 0x7 0x1 0x0 0x0 - >; - }; -}; - -/* controller at 0xa000 */ -&pci1 { - compatible = "fsl,mpc8548-pcie"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0 255>; - clock-frequency = <33333333>; - interrupts = <16 2 0 0>; - - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <16 2 0 0>; - interrupt-map-mask = <0xf800 0 0 7>; - - interrupt-map = < - /* IDSEL 0x0 */ - 0000 0x0 0x0 0x1 &mpic 0x0 0x1 0x0 0x0 - 0000 0x0 0x0 0x2 &mpic 0x1 0x1 0x0 0x0 - 0000 0x0 0x0 0x3 &mpic 0x2 0x1 0x0 0x0 - 0000 0x0 0x0 0x4 &mpic 0x3 0x1 0x0 0x0 - >; - }; -}; - -&soc { - #address-cells = <1>; - #size-cells = <1>; - device_type = "soc"; - compatible = "fsl,p1021-immr", "simple-bus"; - bus-frequency = <0>; // Filled out by uboot. - - ecm-law@0 { - compatible = "fsl,ecm-law"; - reg = <0x0 0x1000>; - fsl,num-laws = <12>; - }; - - ecm@1000 { - compatible = "fsl,p1021-ecm", "fsl,ecm"; - reg = <0x1000 0x1000>; - interrupts = <16 2 0 0>; - }; - - memory-controller@2000 { - compatible = "fsl,p1021-memory-controller"; - reg = <0x2000 0x1000>; - interrupts = <16 2 0 0>; - }; - -/include/ "pq3-i2c-0.dtsi" -/include/ "pq3-i2c-1.dtsi" -/include/ "pq3-duart-0.dtsi" - -/include/ "pq3-espi-0.dtsi" - spi@7000 { - fsl,espi-num-chipselects = <4>; - }; - -/include/ "pq3-gpio-0.dtsi" - - L2: l2-cache-controller@20000 { - compatible = "fsl,p1021-l2-cache-controller"; - reg = <0x20000 0x1000>; - cache-line-size = <32>; // 32 bytes - cache-size = <0x40000>; // L2,256K - interrupts = <16 2 0 0>; - }; - -/include/ "pq3-dma-0.dtsi" -/include/ "pq3-usb2-dr-0.dtsi" - -/include/ "pq3-esdhc-0.dtsi" -/include/ "pq3-sec3.3-0.dtsi" - -/include/ "pq3-mpic.dtsi" -/include/ "pq3-mpic-timer-B.dtsi" - -/include/ "pq3-etsec2-0.dtsi" - enet0: enet0_grp2: ethernet@b0000 { - }; - -/include/ "pq3-etsec2-1.dtsi" - enet1: enet1_grp2: ethernet@b1000 { - }; - -/include/ "pq3-etsec2-2.dtsi" - enet2: enet2_grp2: ethernet@b2000 { - }; - - global-utilities@e0000 { - compatible = "fsl,p1021-guts"; - reg = <0xe0000 0x1000>; - fsl,has-rstcr; - }; -}; - -&qe { - #address-cells = <1>; - #size-cells = <1>; - device_type = "qe"; - compatible = "fsl,qe"; - fsl,qe-num-riscs = <1>; - fsl,qe-num-snums = <28>; - - qeic: interrupt-controller@80 { - interrupt-controller; - compatible = "fsl,qe-ic"; - #address-cells = <0>; - #interrupt-cells = <1>; - reg = <0x80 0x80>; - interrupts = <63 2 0 0 60 2 0 0>; //high:47 low:44 - }; - - ucc@2000 { - cell-index = <1>; - reg = <0x2000 0x200>; - interrupts = <32>; - interrupt-parent = <&qeic>; - }; - - mdio@2120 { - #address-cells = <1>; - #size-cells = <0>; - reg = <0x2120 0x18>; - compatible = "fsl,ucc-mdio"; - }; - - ucc@2400 { - cell-index = <5>; - reg = <0x2400 0x200>; - interrupts = <40>; - interrupt-parent = <&qeic>; - }; - - muram@10000 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "fsl,qe-muram", "fsl,cpm-muram"; - ranges = <0x0 0x10000 0x6000>; - - data-only@0 { - compatible = "fsl,qe-muram-data", - "fsl,cpm-muram-data"; - reg = <0x0 0x6000>; - }; - }; -}; - -/include/ "pq3-etsec2-grp2-0.dtsi" -/include/ "pq3-etsec2-grp2-1.dtsi" -/include/ "pq3-etsec2-grp2-2.dtsi" diff --git a/trunk/arch/powerpc/boot/dts/fsl/p1021si-pre.dtsi b/trunk/arch/powerpc/boot/dts/fsl/p1021si-pre.dtsi deleted file mode 100644 index 4abd54bc3308..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/p1021si-pre.dtsi +++ /dev/null @@ -1,68 +0,0 @@ -/* - * P1021/P1012 Silicon/SoC Device Tree Source (pre include) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/dts-v1/; -/ { - compatible = "fsl,P1021"; - #address-cells = <2>; - #size-cells = <2>; - interrupt-parent = <&mpic>; - - aliases { - serial0 = &serial0; - serial1 = &serial1; - ethernet0 = &enet0; - ethernet1 = &enet1; - ethernet2 = &enet2; - pci0 = &pci0; - pci1 = &pci1; - }; - - cpus { - #address-cells = <1>; - #size-cells = <0>; - - PowerPC,P1021@0 { - device_type = "cpu"; - reg = <0x0>; - next-level-cache = <&L2>; - }; - - PowerPC,P1021@1 { - device_type = "cpu"; - reg = <0x1>; - next-level-cache = <&L2>; - }; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi b/trunk/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi deleted file mode 100644 index 16239b199d0a..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi +++ /dev/null @@ -1,235 +0,0 @@ -/* - * P1022/P1013 Silicon/SoC Device Tree Source (post include) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -&lbc { - #address-cells = <2>; - #size-cells = <1>; - compatible = "fsl,p1022-elbc", "fsl,elbc", "simple-bus"; - interrupts = <19 2 0 0>; -}; - -/* controller at 0x9000 */ -&pci0 { - compatible = "fsl,p1022-pcie"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0 255>; - clock-frequency = <33333333>; - interrupts = <16 2 0 0>; - - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <16 2 0 0>; - interrupt-map-mask = <0xf800 0 0 7>; - interrupt-map = < - /* IDSEL 0x0 */ - 0000 0x0 0x0 0x1 &mpic 0x4 0x1 0x0 0x0 - 0000 0x0 0x0 0x2 &mpic 0x5 0x1 0x0 0x0 - 0000 0x0 0x0 0x3 &mpic 0x6 0x1 0x0 0x0 - 0000 0x0 0x0 0x4 &mpic 0x7 0x1 0x0 0x0 - >; - }; -}; - -/* controller at 0xa000 */ -&pci1 { - compatible = "fsl,p1022-pcie"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0 255>; - clock-frequency = <33333333>; - interrupts = <16 2 0 0>; - - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <16 2 0 0>; - interrupt-map-mask = <0xf800 0 0 7>; - - interrupt-map = < - /* IDSEL 0x0 */ - 0000 0x0 0x0 0x1 &mpic 0x0 0x1 0x0 0x0 - 0000 0x0 0x0 0x2 &mpic 0x1 0x1 0x0 0x0 - 0000 0x0 0x0 0x3 &mpic 0x2 0x1 0x0 0x0 - 0000 0x0 0x0 0x4 &mpic 0x3 0x1 0x0 0x0 - >; - }; -}; - -/* controller at 0xb000 */ -&pci2 { - compatible = "fsl,p1022-pcie"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0 255>; - clock-frequency = <33333333>; - interrupts = <16 2 0 0>; - - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <16 2 0 0>; - interrupt-map-mask = <0xf800 0 0 7>; - - interrupt-map = < - /* IDSEL 0x0 */ - 0000 0x0 0x0 0x1 &mpic 0x8 0x1 0x0 0x0 - 0000 0x0 0x0 0x2 &mpic 0x9 0x1 0x0 0x0 - 0000 0x0 0x0 0x3 &mpic 0xa 0x1 0x0 0x0 - 0000 0x0 0x0 0x4 &mpic 0xb 0x1 0x0 0x0 - >; - }; -}; - -&soc { - #address-cells = <1>; - #size-cells = <1>; - device_type = "soc"; - compatible = "fsl,p1022-immr", "simple-bus"; - bus-frequency = <0>; // Filled out by uboot. - - ecm-law@0 { - compatible = "fsl,ecm-law"; - reg = <0x0 0x1000>; - fsl,num-laws = <12>; - }; - - ecm@1000 { - compatible = "fsl,p1022-ecm", "fsl,ecm"; - reg = <0x1000 0x1000>; - interrupts = <16 2 0 0>; - }; - - memory-controller@2000 { - compatible = "fsl,p1022-memory-controller"; - reg = <0x2000 0x1000>; - interrupts = <16 2 0 0>; - }; - -/include/ "pq3-i2c-0.dtsi" -/include/ "pq3-i2c-1.dtsi" -/include/ "pq3-duart-0.dtsi" -/include/ "pq3-espi-0.dtsi" - spi@7000 { - fsl,espi-num-chipselects = <4>; - }; - -/include/ "pq3-dma-1.dtsi" - dma@c300 { - dma00: dma-channel@0 { - compatible = "fsl,ssi-dma-channel"; - }; - dma01: dma-channel@80 { - compatible = "fsl,ssi-dma-channel"; - }; - }; - -/include/ "pq3-gpio-0.dtsi" - - display@10000 { - compatible = "fsl,diu", "fsl,p1022-diu"; - reg = <0x10000 1000>; - interrupts = <64 2 0 0>; - }; - - ssi@15000 { - compatible = "fsl,mpc8610-ssi"; - cell-index = <0>; - reg = <0x15000 0x100>; - interrupts = <75 2 0 0>; - fsl,playback-dma = <&dma00>; - fsl,capture-dma = <&dma01>; - fsl,fifo-depth = <15>; - }; - -/include/ "pq3-sata2-0.dtsi" -/include/ "pq3-sata2-1.dtsi" - - L2: l2-cache-controller@20000 { - compatible = "fsl,p1022-l2-cache-controller"; - reg = <0x20000 0x1000>; - cache-line-size = <32>; // 32 bytes - cache-size = <0x40000>; // L2,256K - interrupts = <16 2 0 0>; - }; - -/include/ "pq3-dma-0.dtsi" -/include/ "pq3-usb2-dr-0.dtsi" -/include/ "pq3-usb2-dr-1.dtsi" - -/include/ "pq3-esdhc-0.dtsi" - sdhc@2e000 { - fsl,sdhci-auto-cmd12; - }; - -/include/ "pq3-sec3.3-0.dtsi" -/include/ "pq3-mpic.dtsi" -/include/ "pq3-mpic-timer-B.dtsi" - -/include/ "pq3-etsec2-0.dtsi" - enet0: enet0_grp2: ethernet@b0000 { - }; - -/include/ "pq3-etsec2-1.dtsi" - enet1: enet1_grp2: ethernet@b1000 { - }; - - global-utilities@e0000 { - compatible = "fsl,p1022-guts"; - reg = <0xe0000 0x1000>; - fsl,has-rstcr; - }; - - power@e0070{ - compatible = "fsl,mpc8536-pmc", "fsl,mpc8548-pmc"; - reg = <0xe0070 0x20>; - }; - -}; - -/include/ "pq3-etsec2-grp2-0.dtsi" -/include/ "pq3-etsec2-grp2-1.dtsi" diff --git a/trunk/arch/powerpc/boot/dts/fsl/p1022si-pre.dtsi b/trunk/arch/powerpc/boot/dts/fsl/p1022si-pre.dtsi deleted file mode 100644 index e930f4f7ca89..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/p1022si-pre.dtsi +++ /dev/null @@ -1,68 +0,0 @@ -/* - * P1022/P1013 Silicon/SoC Device Tree Source (pre include) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/dts-v1/; -/ { - compatible = "fsl,P1022"; - #address-cells = <2>; - #size-cells = <2>; - interrupt-parent = <&mpic>; - - aliases { - serial0 = &serial0; - serial1 = &serial1; - ethernet0 = &enet0; - ethernet1 = &enet1; - pci0 = &pci0; - pci1 = &pci1; - pci2 = &pci2; - }; - - cpus { - #address-cells = <1>; - #size-cells = <0>; - - PowerPC,P1022@0 { - device_type = "cpu"; - reg = <0x0>; - next-level-cache = <&L2>; - }; - - PowerPC,P1022@1 { - device_type = "cpu"; - reg = <0x1>; - next-level-cache = <&L2>; - }; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi b/trunk/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi deleted file mode 100644 index b06bb4cc1fe8..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi +++ /dev/null @@ -1,224 +0,0 @@ -/* - * P1023/P1017 Silicon/SoC Device Tree Source (post include) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -&lbc { - #address-cells = <2>; - #size-cells = <1>; - compatible = "fsl,p1023-elbc", "fsl,elbc", "simple-bus"; - interrupts = <19 2 0 0>; -}; - -/* controller at 0xa000 */ -&pci0 { - compatible = "fsl,p1023-pcie", "fsl,qoriq-pcie-v2.2"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0x0 0xff>; - clock-frequency = <33333333>; - interrupts = <16 2 0 0>; - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <16 2 0 0>; - }; -}; - -/* controller at 0x9000 */ -&pci1 { - compatible = "fsl,p1023-pcie", "fsl,qoriq-pcie-v2.2"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0 0xff>; - clock-frequency = <33333333>; - interrupts = <16 2 0 0>; - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <16 2 0 0>; - }; -}; - -/* controller at 0xb000 */ -&pci2 { - compatible = "fsl,p1023-pcie", "fsl,qoriq-pcie-v2.2"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0x0 0xff>; - clock-frequency = <33333333>; - interrupts = <16 2 0 0>; - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <16 2 0 0>; - }; -}; - -&soc { - #address-cells = <1>; - #size-cells = <1>; - device_type = "soc"; - compatible = "fsl,p1023-immr", "simple-bus"; - bus-frequency = <0>; // Filled out by uboot. - - ecm-law@0 { - compatible = "fsl,ecm-law"; - reg = <0x0 0x1000>; - fsl,num-laws = <12>; - }; - - ecm@1000 { - compatible = "fsl,p1023-ecm", "fsl,ecm"; - reg = <0x1000 0x1000>; - interrupts = <16 2 0 0>; - }; - - memory-controller@2000 { - compatible = "fsl,p1023-memory-controller"; - reg = <0x2000 0x1000>; - interrupts = <16 2 0 0>; - }; - -/include/ "pq3-i2c-0.dtsi" -/include/ "pq3-i2c-1.dtsi" -/include/ "pq3-duart-0.dtsi" - -/include/ "pq3-espi-0.dtsi" - spi@7000 { - fsl,espi-num-chipselects = <4>; - }; - -/include/ "pq3-gpio-0.dtsi" - - L2: l2-cache-controller@20000 { - compatible = "fsl,p1023-l2-cache-controller"; - reg = <0x20000 0x1000>; - cache-line-size = <32>; // 32 bytes - cache-size = <0x40000>; // L2,256K - interrupts = <16 2 0 0>; - }; - -/include/ "pq3-dma-0.dtsi" -/include/ "pq3-usb2-dr-0.dtsi" - - crypto: crypto@300000 { - compatible = "fsl,sec-v4.2", "fsl,sec-v4.0"; - #address-cells = <1>; - #size-cells = <1>; - reg = <0x30000 0x10000>; - ranges = <0 0x30000 0x10000>; - interrupts = <58 2 0 0>; - - sec_jr0: jr@1000 { - compatible = "fsl,sec-v4.2-job-ring", - "fsl,sec-v4.0-job-ring"; - reg = <0x1000 0x1000>; - interrupts = <45 2 0 0>; - }; - - sec_jr1: jr@2000 { - compatible = "fsl,sec-v4.2-job-ring", - "fsl,sec-v4.0-job-ring"; - reg = <0x2000 0x1000>; - interrupts = <45 2 0 0>; - }; - - sec_jr2: jr@3000 { - compatible = "fsl,sec-v4.2-job-ring", - "fsl,sec-v4.0-job-ring"; - reg = <0x3000 0x1000>; - interrupts = <57 2 0 0>; - }; - - sec_jr3: jr@4000 { - compatible = "fsl,sec-v4.2-job-ring", - "fsl,sec-v4.0-job-ring"; - reg = <0x4000 0x1000>; - interrupts = <57 2 0 0>; - }; - - rtic@6000 { - compatible = "fsl,sec-v4.2-rtic", - "fsl,sec-v4.0-rtic"; - #address-cells = <1>; - #size-cells = <1>; - reg = <0x6000 0x100>; - ranges = <0x0 0x6100 0xe00>; - - rtic_a: rtic-a@0 { - compatible = "fsl,sec-v4.2-rtic-memory", - "fsl,sec-v4.0-rtic-memory"; - reg = <0x00 0x20 0x100 0x80>; - }; - - rtic_b: rtic-b@20 { - compatible = "fsl,sec-v4.2-rtic-memory", - "fsl,sec-v4.0-rtic-memory"; - reg = <0x20 0x20 0x200 0x80>; - }; - - rtic_c: rtic-c@40 { - compatible = "fsl,sec-v4.2-rtic-memory", - "fsl,sec-v4.0-rtic-memory"; - reg = <0x40 0x20 0x300 0x80>; - }; - - rtic_d: rtic-d@60 { - compatible = "fsl,sec-v4.2-rtic-memory", - "fsl,sec-v4.0-rtic-memory"; - reg = <0x60 0x20 0x500 0x80>; - }; - }; - }; - -/include/ "pq3-mpic.dtsi" -/include/ "pq3-mpic-timer-B.dtsi" - - global-utilities@e0000 { - compatible = "fsl,p1023-guts"; - reg = <0xe0000 0x1000>; - fsl,has-rstcr; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/p1023si-pre.dtsi b/trunk/arch/powerpc/boot/dts/fsl/p1023si-pre.dtsi deleted file mode 100644 index ac45f6d93385..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/p1023si-pre.dtsi +++ /dev/null @@ -1,76 +0,0 @@ -/* - * P1023/P1017 Silicon/SoC Device Tree Source (pre include) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/dts-v1/; -/ { - compatible = "fsl,P1023"; - #address-cells = <2>; - #size-cells = <2>; - interrupt-parent = <&mpic>; - - aliases { - serial0 = &serial0; - serial1 = &serial1; - pci0 = &pci0; - pci1 = &pci1; - pci2 = &pci2; - - crypto = &crypto; - sec_jr0 = &sec_jr0; - sec_jr1 = &sec_jr1; - sec_jr2 = &sec_jr2; - sec_jr3 = &sec_jr3; - rtic_a = &rtic_a; - rtic_b = &rtic_b; - rtic_c = &rtic_c; - rtic_d = &rtic_d; - }; - - cpus { - #address-cells = <1>; - #size-cells = <0>; - - PowerPC,P1023@0 { - device_type = "cpu"; - reg = <0x0>; - next-level-cache = <&L2>; - }; - - PowerPC,P1023@1 { - device_type = "cpu"; - reg = <0x1>; - next-level-cache = <&L2>; - }; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi b/trunk/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi deleted file mode 100644 index c041050561a7..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi +++ /dev/null @@ -1,194 +0,0 @@ -/* - * P2020/P2010 Silicon/SoC Device Tree Source (post include) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -&lbc { - #address-cells = <2>; - #size-cells = <1>; - compatible = "fsl,p2020-elbc", "fsl,elbc", "simple-bus"; - interrupts = <19 2 0 0>; -}; - -/* controller at 0xa000 */ -&pci0 { - compatible = "fsl,mpc8548-pcie"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0 255>; - clock-frequency = <33333333>; - interrupts = <26 2 0 0>; - - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <26 2 0 0>; - interrupt-map-mask = <0xf800 0 0 7>; - interrupt-map = < - /* IDSEL 0x0 */ - 0000 0x0 0x0 0x1 &mpic 0x0 0x1 0x0 0x0 - 0000 0x0 0x0 0x2 &mpic 0x1 0x1 0x0 0x0 - 0000 0x0 0x0 0x3 &mpic 0x2 0x1 0x0 0x0 - 0000 0x0 0x0 0x4 &mpic 0x3 0x1 0x0 0x0 - >; - }; -}; - -/* controller at 0x9000 */ -&pci1 { - compatible = "fsl,mpc8548-pcie"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0 255>; - clock-frequency = <33333333>; - interrupts = <25 2 0 0>; - - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <25 2 0 0>; - interrupt-map-mask = <0xf800 0 0 7>; - - interrupt-map = < - /* IDSEL 0x0 */ - 0000 0x0 0x0 0x1 &mpic 0x4 0x1 0x0 0x0 - 0000 0x0 0x0 0x2 &mpic 0x5 0x1 0x0 0x0 - 0000 0x0 0x0 0x3 &mpic 0x6 0x1 0x0 0x0 - 0000 0x0 0x0 0x4 &mpic 0x7 0x1 0x0 0x0 - >; - }; -}; - -/* controller at 0x8000 */ -&pci2 { - compatible = "fsl,mpc8548-pcie"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0 255>; - clock-frequency = <33333333>; - interrupts = <24 2 0 0>; - - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <24 2 0 0>; - interrupt-map-mask = <0xf800 0 0 7>; - - interrupt-map = < - /* IDSEL 0x0 */ - 0000 0x0 0x0 0x1 &mpic 0x8 0x1 0x0 0x0 - 0000 0x0 0x0 0x2 &mpic 0x9 0x1 0x0 0x0 - 0000 0x0 0x0 0x3 &mpic 0xa 0x1 0x0 0x0 - 0000 0x0 0x0 0x4 &mpic 0xb 0x1 0x0 0x0 - >; - }; -}; - -&soc { - #address-cells = <1>; - #size-cells = <1>; - device_type = "soc"; - compatible = "fsl,p2020-immr", "simple-bus"; - bus-frequency = <0>; // Filled out by uboot. - - ecm-law@0 { - compatible = "fsl,ecm-law"; - reg = <0x0 0x1000>; - fsl,num-laws = <12>; - }; - - ecm@1000 { - compatible = "fsl,p2020-ecm", "fsl,ecm"; - reg = <0x1000 0x1000>; - interrupts = <17 2 0 0>; - }; - - memory-controller@2000 { - compatible = "fsl,p2020-memory-controller"; - reg = <0x2000 0x1000>; - interrupts = <18 2 0 0>; - }; - -/include/ "pq3-i2c-0.dtsi" -/include/ "pq3-i2c-1.dtsi" -/include/ "pq3-duart-0.dtsi" -/include/ "pq3-espi-0.dtsi" - spi0: spi@7000 { - fsl,espi-num-chipselects = <4>; - }; - -/include/ "pq3-dma-1.dtsi" -/include/ "pq3-gpio-0.dtsi" - - L2: l2-cache-controller@20000 { - compatible = "fsl,p2020-l2-cache-controller"; - reg = <0x20000 0x1000>; - cache-line-size = <32>; // 32 bytes - cache-size = <0x80000>; // L2,512K - interrupts = <16 2 0 0>; - }; - -/include/ "pq3-dma-0.dtsi" -/include/ "pq3-usb2-dr-0.dtsi" -/include/ "pq3-etsec1-0.dtsi" -/include/ "pq3-etsec1-timer-0.dtsi" - - ptp_clock@24e00 { - interrupts = <68 2 0 0 69 2 0 0 70 2 0 0>; - }; - - -/include/ "pq3-etsec1-1.dtsi" -/include/ "pq3-etsec1-2.dtsi" -/include/ "pq3-esdhc-0.dtsi" -/include/ "pq3-sec3.1-0.dtsi" -/include/ "pq3-mpic.dtsi" -/include/ "pq3-mpic-timer-B.dtsi" - - global-utilities@e0000 { - compatible = "fsl,p2020-guts"; - reg = <0xe0000 0x1000>; - fsl,has-rstcr; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/p2020si-pre.dtsi b/trunk/arch/powerpc/boot/dts/fsl/p2020si-pre.dtsi deleted file mode 100644 index 3213288641d1..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/p2020si-pre.dtsi +++ /dev/null @@ -1,69 +0,0 @@ -/* - * P2020/P2010 Silicon/SoC Device Tree Source (pre include) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/dts-v1/; -/ { - compatible = "fsl,P2020"; - #address-cells = <2>; - #size-cells = <2>; - interrupt-parent = <&mpic>; - - aliases { - serial0 = &serial0; - serial1 = &serial1; - ethernet0 = &enet0; - ethernet1 = &enet1; - ethernet2 = &enet2; - pci0 = &pci0; - pci1 = &pci1; - pci2 = &pci2; - }; - - cpus { - #address-cells = <1>; - #size-cells = <0>; - - PowerPC,P2020@0 { - device_type = "cpu"; - reg = <0x0>; - next-level-cache = <&L2>; - }; - - PowerPC,P2020@1 { - device_type = "cpu"; - reg = <0x1>; - next-level-cache = <&L2>; - }; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi b/trunk/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi deleted file mode 100644 index 234a399ddeb2..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi +++ /dev/null @@ -1,325 +0,0 @@ -/* - * P2041/P2040 Silicon/SoC Device Tree Source (post include) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -&lbc { - compatible = "fsl,p2041-elbc", "fsl,elbc", "simple-bus"; - interrupts = <25 2 0 0>; - #address-cells = <2>; - #size-cells = <1>; -}; - -/* controller at 0x200000 */ -&pci0 { - compatible = "fsl,p2041-pcie", "fsl,qoriq-pcie-v2.2"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0x0 0xff>; - clock-frequency = <33333333>; - interrupts = <16 2 1 15>; - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <16 2 1 15>; - interrupt-map-mask = <0xf800 0 0 7>; - interrupt-map = < - /* IDSEL 0x0 */ - 0000 0 0 1 &mpic 40 1 0 0 - 0000 0 0 2 &mpic 1 1 0 0 - 0000 0 0 3 &mpic 2 1 0 0 - 0000 0 0 4 &mpic 3 1 0 0 - >; - }; -}; - -/* controller at 0x201000 */ -&pci1 { - compatible = "fsl,p2041-pcie", "fsl,qoriq-pcie-v2.2"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0 0xff>; - clock-frequency = <33333333>; - interrupts = <16 2 1 14>; - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <16 2 1 14>; - interrupt-map-mask = <0xf800 0 0 7>; - interrupt-map = < - /* IDSEL 0x0 */ - 0000 0 0 1 &mpic 41 1 0 0 - 0000 0 0 2 &mpic 5 1 0 0 - 0000 0 0 3 &mpic 6 1 0 0 - 0000 0 0 4 &mpic 7 1 0 0 - >; - }; -}; - -/* controller at 0x202000 */ -&pci2 { - compatible = "fsl,p2041-pcie", "fsl,qoriq-pcie-v2.2"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0x0 0xff>; - clock-frequency = <33333333>; - interrupts = <16 2 1 13>; - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <16 2 1 13>; - interrupt-map-mask = <0xf800 0 0 7>; - interrupt-map = < - /* IDSEL 0x0 */ - 0000 0 0 1 &mpic 42 1 0 0 - 0000 0 0 2 &mpic 9 1 0 0 - 0000 0 0 3 &mpic 10 1 0 0 - 0000 0 0 4 &mpic 11 1 0 0 - >; - }; -}; - -&rio { - compatible = "fsl,srio"; - interrupts = <16 2 1 11>; - #address-cells = <2>; - #size-cells = <2>; - ranges; - - port1 { - #address-cells = <2>; - #size-cells = <2>; - cell-index = <1>; - }; - - port2 { - #address-cells = <2>; - #size-cells = <2>; - cell-index = <2>; - }; -}; - -&dcsr { - #address-cells = <1>; - #size-cells = <1>; - compatible = "fsl,dcsr", "simple-bus"; - - dcsr-epu@0 { - compatible = "fsl,dcsr-epu"; - interrupts = <52 2 0 0 - 84 2 0 0 - 85 2 0 0>; - reg = <0x0 0x1000>; - }; - dcsr-npc { - compatible = "fsl,dcsr-npc"; - reg = <0x1000 0x1000 0x1000000 0x8000>; - }; - dcsr-nxc@2000 { - compatible = "fsl,dcsr-nxc"; - reg = <0x2000 0x1000>; - }; - dcsr-corenet { - compatible = "fsl,dcsr-corenet"; - reg = <0x8000 0x1000 0xB0000 0x1000>; - }; - dcsr-dpaa@9000 { - compatible = "fsl,p2041-dcsr-dpaa", "fsl,dcsr-dpaa"; - reg = <0x9000 0x1000>; - }; - dcsr-ocn@11000 { - compatible = "fsl,p2041-dcsr-ocn", "fsl,dcsr-ocn"; - reg = <0x11000 0x1000>; - }; - dcsr-ddr@12000 { - compatible = "fsl,dcsr-ddr"; - dev-handle = <&ddr1>; - reg = <0x12000 0x1000>; - }; - dcsr-nal@18000 { - compatible = "fsl,p2041-dcsr-nal", "fsl,dcsr-nal"; - reg = <0x18000 0x1000>; - }; - dcsr-rcpm@22000 { - compatible = "fsl,p2041-dcsr-rcpm", "fsl,dcsr-rcpm"; - reg = <0x22000 0x1000>; - }; - dcsr-cpu-sb-proxy@40000 { - compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; - cpu-handle = <&cpu0>; - reg = <0x40000 0x1000>; - }; - dcsr-cpu-sb-proxy@41000 { - compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; - cpu-handle = <&cpu1>; - reg = <0x41000 0x1000>; - }; - dcsr-cpu-sb-proxy@42000 { - compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; - cpu-handle = <&cpu2>; - reg = <0x42000 0x1000>; - }; - dcsr-cpu-sb-proxy@43000 { - compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; - cpu-handle = <&cpu3>; - reg = <0x43000 0x1000>; - }; -}; - -&soc { - #address-cells = <1>; - #size-cells = <1>; - device_type = "soc"; - compatible = "simple-bus"; - - soc-sram-error { - compatible = "fsl,soc-sram-error"; - interrupts = <16 2 1 29>; - }; - - corenet-law@0 { - compatible = "fsl,corenet-law"; - reg = <0x0 0x1000>; - fsl,num-laws = <32>; - }; - - ddr1: memory-controller@8000 { - compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller"; - reg = <0x8000 0x1000>; - interrupts = <16 2 1 23>; - }; - - cpc: l3-cache-controller@10000 { - compatible = "fsl,p2041-l3-cache-controller", "fsl,p4080-l3-cache-controller", "cache"; - reg = <0x10000 0x1000>; - interrupts = <16 2 1 27>; - }; - - corenet-cf@18000 { - compatible = "fsl,corenet-cf"; - reg = <0x18000 0x1000>; - interrupts = <16 2 1 31>; - fsl,ccf-num-csdids = <32>; - fsl,ccf-num-snoopids = <32>; - }; - - iommu@20000 { - compatible = "fsl,pamu-v1.0", "fsl,pamu"; - reg = <0x20000 0x4000>; - interrupts = < - 24 2 0 0 - 16 2 1 30>; - }; - -/include/ "qoriq-mpic.dtsi" - - guts: global-utilities@e0000 { - compatible = "fsl,qoriq-device-config-1.0"; - reg = <0xe0000 0xe00>; - fsl,has-rstcr; - #sleep-cells = <1>; - fsl,liodn-bits = <12>; - }; - - pins: global-utilities@e0e00 { - compatible = "fsl,qoriq-pin-control-1.0"; - reg = <0xe0e00 0x200>; - #sleep-cells = <2>; - }; - - clockgen: global-utilities@e1000 { - compatible = "fsl,p2041-clockgen", "fsl,qoriq-clockgen-1.0"; - reg = <0xe1000 0x1000>; - clock-frequency = <0>; - }; - - rcpm: global-utilities@e2000 { - compatible = "fsl,qoriq-rcpm-1.0"; - reg = <0xe2000 0x1000>; - #sleep-cells = <1>; - }; - - sfp: sfp@e8000 { - compatible = "fsl,p2041-sfp", "fsl,qoriq-sfp-1.0"; - reg = <0xe8000 0x1000>; - }; - - serdes: serdes@ea000 { - compatible = "fsl,p2041-serdes"; - reg = <0xea000 0x1000>; - }; - -/include/ "qoriq-dma-0.dtsi" -/include/ "qoriq-dma-1.dtsi" -/include/ "qoriq-espi-0.dtsi" - spi@110000 { - fsl,espi-num-chipselects = <4>; - }; - -/include/ "qoriq-esdhc-0.dtsi" - sdhc@114000 { - sdhci,auto-cmd12; - }; - -/include/ "qoriq-i2c-0.dtsi" -/include/ "qoriq-i2c-1.dtsi" -/include/ "qoriq-duart-0.dtsi" -/include/ "qoriq-duart-1.dtsi" -/include/ "qoriq-gpio-0.dtsi" -/include/ "qoriq-usb2-mph-0.dtsi" - usb0: usb@210000 { - phy_type = "utmi"; - port0; - }; - -/include/ "qoriq-usb2-dr-0.dtsi" - usb1: usb@211000 { - dr_mode = "host"; - phy_type = "utmi"; - }; - -/include/ "qoriq-sata2-0.dtsi" -/include/ "qoriq-sata2-1.dtsi" -/include/ "qoriq-sec4.2-0.dtsi" -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/p2041si-pre.dtsi b/trunk/arch/powerpc/boot/dts/fsl/p2041si-pre.dtsi deleted file mode 100644 index 2d0a40d6b10f..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/p2041si-pre.dtsi +++ /dev/null @@ -1,111 +0,0 @@ -/* - * P2041 Silicon/SoC Device Tree Source (pre include) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/dts-v1/; -/ { - compatible = "fsl,P2041"; - #address-cells = <2>; - #size-cells = <2>; - interrupt-parent = <&mpic>; - - aliases { - ccsr = &soc; - dcsr = &dcsr; - - serial0 = &serial0; - serial1 = &serial1; - serial2 = &serial2; - serial3 = &serial3; - pci0 = &pci0; - pci1 = &pci1; - pci2 = &pci2; - usb0 = &usb0; - usb1 = &usb1; - dma0 = &dma0; - dma1 = &dma1; - sdhc = &sdhc; - msi0 = &msi0; - msi1 = &msi1; - msi2 = &msi2; - - crypto = &crypto; - sec_jr0 = &sec_jr0; - sec_jr1 = &sec_jr1; - sec_jr2 = &sec_jr2; - sec_jr3 = &sec_jr3; - rtic_a = &rtic_a; - rtic_b = &rtic_b; - rtic_c = &rtic_c; - rtic_d = &rtic_d; - sec_mon = &sec_mon; - }; - - cpus { - #address-cells = <1>; - #size-cells = <0>; - - cpu0: PowerPC,e500mc@0 { - device_type = "cpu"; - reg = <0>; - next-level-cache = <&L2_0>; - L2_0: l2-cache { - next-level-cache = <&cpc>; - }; - }; - cpu1: PowerPC,e500mc@1 { - device_type = "cpu"; - reg = <1>; - next-level-cache = <&L2_1>; - L2_1: l2-cache { - next-level-cache = <&cpc>; - }; - }; - cpu2: PowerPC,e500mc@2 { - device_type = "cpu"; - reg = <2>; - next-level-cache = <&L2_2>; - L2_2: l2-cache { - next-level-cache = <&cpc>; - }; - }; - cpu3: PowerPC,e500mc@3 { - device_type = "cpu"; - reg = <3>; - next-level-cache = <&L2_3>; - L2_3: l2-cache { - next-level-cache = <&cpc>; - }; - }; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi b/trunk/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi deleted file mode 100644 index d41d08de7f7e..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi +++ /dev/null @@ -1,352 +0,0 @@ -/* - * P3041 Silicon/SoC Device Tree Source (post include) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -&lbc { - compatible = "fsl,p3041-elbc", "fsl,elbc", "simple-bus"; - interrupts = <25 2 0 0>; - #address-cells = <2>; - #size-cells = <1>; -}; - -/* controller at 0x200000 */ -&pci0 { - compatible = "fsl,p3041-pcie", "fsl,qoriq-pcie-v2.2"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0x0 0xff>; - clock-frequency = <33333333>; - interrupts = <16 2 1 15>; - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <16 2 1 15>; - interrupt-map-mask = <0xf800 0 0 7>; - interrupt-map = < - /* IDSEL 0x0 */ - 0000 0 0 1 &mpic 40 1 0 0 - 0000 0 0 2 &mpic 1 1 0 0 - 0000 0 0 3 &mpic 2 1 0 0 - 0000 0 0 4 &mpic 3 1 0 0 - >; - }; -}; - -/* controller at 0x201000 */ -&pci1 { - compatible = "fsl,p3041-pcie", "fsl,qoriq-pcie-v2.2"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0 0xff>; - clock-frequency = <33333333>; - interrupts = <16 2 1 14>; - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <16 2 1 14>; - interrupt-map-mask = <0xf800 0 0 7>; - interrupt-map = < - /* IDSEL 0x0 */ - 0000 0 0 1 &mpic 41 1 0 0 - 0000 0 0 2 &mpic 5 1 0 0 - 0000 0 0 3 &mpic 6 1 0 0 - 0000 0 0 4 &mpic 7 1 0 0 - >; - }; -}; - -/* controller at 0x202000 */ -&pci2 { - compatible = "fsl,p3041-pcie", "fsl,qoriq-pcie-v2.2"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0x0 0xff>; - clock-frequency = <33333333>; - interrupts = <16 2 1 13>; - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <16 2 1 13>; - interrupt-map-mask = <0xf800 0 0 7>; - interrupt-map = < - /* IDSEL 0x0 */ - 0000 0 0 1 &mpic 42 1 0 0 - 0000 0 0 2 &mpic 9 1 0 0 - 0000 0 0 3 &mpic 10 1 0 0 - 0000 0 0 4 &mpic 11 1 0 0 - >; - }; -}; - -/* controller at 0x203000 */ -&pci3 { - compatible = "fsl,p3041-pcie", "fsl,qoriq-pcie-v2.2"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0x0 0xff>; - clock-frequency = <33333333>; - interrupts = <16 2 1 12>; - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <16 2 1 12>; - interrupt-map-mask = <0xf800 0 0 7>; - interrupt-map = < - /* IDSEL 0x0 */ - 0000 0 0 1 &mpic 43 1 0 0 - 0000 0 0 2 &mpic 0 1 0 0 - 0000 0 0 3 &mpic 4 1 0 0 - 0000 0 0 4 &mpic 8 1 0 0 - >; - }; -}; - -&rio { - compatible = "fsl,srio"; - interrupts = <16 2 1 11>; - #address-cells = <2>; - #size-cells = <2>; - ranges; - - port1 { - #address-cells = <2>; - #size-cells = <2>; - cell-index = <1>; - }; - - port2 { - #address-cells = <2>; - #size-cells = <2>; - cell-index = <2>; - }; -}; - -&dcsr { - #address-cells = <1>; - #size-cells = <1>; - compatible = "fsl,dcsr", "simple-bus"; - - dcsr-epu@0 { - compatible = "fsl,dcsr-epu"; - interrupts = <52 2 0 0 - 84 2 0 0 - 85 2 0 0>; - reg = <0x0 0x1000>; - }; - dcsr-npc { - compatible = "fsl,dcsr-npc"; - reg = <0x1000 0x1000 0x1000000 0x8000>; - }; - dcsr-nxc@2000 { - compatible = "fsl,dcsr-nxc"; - reg = <0x2000 0x1000>; - }; - dcsr-corenet { - compatible = "fsl,dcsr-corenet"; - reg = <0x8000 0x1000 0xB0000 0x1000>; - }; - dcsr-dpaa@9000 { - compatible = "fsl,p3041-dcsr-dpaa", "fsl,dcsr-dpaa"; - reg = <0x9000 0x1000>; - }; - dcsr-ocn@11000 { - compatible = "fsl,p3041-dcsr-ocn", "fsl,dcsr-ocn"; - reg = <0x11000 0x1000>; - }; - dcsr-ddr@12000 { - compatible = "fsl,dcsr-ddr"; - dev-handle = <&ddr1>; - reg = <0x12000 0x1000>; - }; - dcsr-nal@18000 { - compatible = "fsl,p3041-dcsr-nal", "fsl,dcsr-nal"; - reg = <0x18000 0x1000>; - }; - dcsr-rcpm@22000 { - compatible = "fsl,p3041-dcsr-rcpm", "fsl,dcsr-rcpm"; - reg = <0x22000 0x1000>; - }; - dcsr-cpu-sb-proxy@40000 { - compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; - cpu-handle = <&cpu0>; - reg = <0x40000 0x1000>; - }; - dcsr-cpu-sb-proxy@41000 { - compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; - cpu-handle = <&cpu1>; - reg = <0x41000 0x1000>; - }; - dcsr-cpu-sb-proxy@42000 { - compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; - cpu-handle = <&cpu2>; - reg = <0x42000 0x1000>; - }; - dcsr-cpu-sb-proxy@43000 { - compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; - cpu-handle = <&cpu3>; - reg = <0x43000 0x1000>; - }; -}; - -&soc { - #address-cells = <1>; - #size-cells = <1>; - device_type = "soc"; - compatible = "simple-bus"; - - soc-sram-error { - compatible = "fsl,soc-sram-error"; - interrupts = <16 2 1 29>; - }; - - corenet-law@0 { - compatible = "fsl,corenet-law"; - reg = <0x0 0x1000>; - fsl,num-laws = <32>; - }; - - ddr1: memory-controller@8000 { - compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller"; - reg = <0x8000 0x1000>; - interrupts = <16 2 1 23>; - }; - - cpc: l3-cache-controller@10000 { - compatible = "fsl,p3041-l3-cache-controller", "fsl,p4080-l3-cache-controller", "cache"; - reg = <0x10000 0x1000>; - interrupts = <16 2 1 27>; - }; - - corenet-cf@18000 { - compatible = "fsl,corenet-cf"; - reg = <0x18000 0x1000>; - interrupts = <16 2 1 31>; - fsl,ccf-num-csdids = <32>; - fsl,ccf-num-snoopids = <32>; - }; - - iommu@20000 { - compatible = "fsl,pamu-v1.0", "fsl,pamu"; - reg = <0x20000 0x4000>; - interrupts = < - 24 2 0 0 - 16 2 1 30>; - }; - -/include/ "qoriq-mpic.dtsi" - - guts: global-utilities@e0000 { - compatible = "fsl,qoriq-device-config-1.0"; - reg = <0xe0000 0xe00>; - fsl,has-rstcr; - #sleep-cells = <1>; - fsl,liodn-bits = <12>; - }; - - pins: global-utilities@e0e00 { - compatible = "fsl,qoriq-pin-control-1.0"; - reg = <0xe0e00 0x200>; - #sleep-cells = <2>; - }; - - clockgen: global-utilities@e1000 { - compatible = "fsl,p3041-clockgen", "fsl,qoriq-clockgen-1.0"; - reg = <0xe1000 0x1000>; - clock-frequency = <0>; - }; - - rcpm: global-utilities@e2000 { - compatible = "fsl,qoriq-rcpm-1.0"; - reg = <0xe2000 0x1000>; - #sleep-cells = <1>; - }; - - sfp: sfp@e8000 { - compatible = "fsl,p3041-sfp", "fsl,qoriq-sfp-1.0"; - reg = <0xe8000 0x1000>; - }; - - serdes: serdes@ea000 { - compatible = "fsl,p3041-serdes"; - reg = <0xea000 0x1000>; - }; - -/include/ "qoriq-dma-0.dtsi" -/include/ "qoriq-dma-1.dtsi" -/include/ "qoriq-espi-0.dtsi" - spi@110000 { - fsl,espi-num-chipselects = <4>; - }; - -/include/ "qoriq-esdhc-0.dtsi" - sdhc@114000 { - sdhci,auto-cmd12; - }; - -/include/ "qoriq-i2c-0.dtsi" -/include/ "qoriq-i2c-1.dtsi" -/include/ "qoriq-duart-0.dtsi" -/include/ "qoriq-duart-1.dtsi" -/include/ "qoriq-gpio-0.dtsi" -/include/ "qoriq-usb2-mph-0.dtsi" - usb0: usb@210000 { - phy_type = "utmi"; - port0; - }; - -/include/ "qoriq-usb2-dr-0.dtsi" - usb1: usb@211000 { - dr_mode = "host"; - phy_type = "utmi"; - }; - -/include/ "qoriq-sata2-0.dtsi" -/include/ "qoriq-sata2-1.dtsi" -/include/ "qoriq-sec4.2-0.dtsi" -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/p3041si-pre.dtsi b/trunk/arch/powerpc/boot/dts/fsl/p3041si-pre.dtsi deleted file mode 100644 index 136def3536b6..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/p3041si-pre.dtsi +++ /dev/null @@ -1,112 +0,0 @@ -/* - * P3041 Silicon/SoC Device Tree Source (pre include) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/dts-v1/; -/ { - compatible = "fsl,P3041"; - #address-cells = <2>; - #size-cells = <2>; - interrupt-parent = <&mpic>; - - aliases { - ccsr = &soc; - dcsr = &dcsr; - - serial0 = &serial0; - serial1 = &serial1; - serial2 = &serial2; - serial3 = &serial3; - pci0 = &pci0; - pci1 = &pci1; - pci2 = &pci2; - pci3 = &pci3; - usb0 = &usb0; - usb1 = &usb1; - dma0 = &dma0; - dma1 = &dma1; - sdhc = &sdhc; - msi0 = &msi0; - msi1 = &msi1; - msi2 = &msi2; - - crypto = &crypto; - sec_jr0 = &sec_jr0; - sec_jr1 = &sec_jr1; - sec_jr2 = &sec_jr2; - sec_jr3 = &sec_jr3; - rtic_a = &rtic_a; - rtic_b = &rtic_b; - rtic_c = &rtic_c; - rtic_d = &rtic_d; - sec_mon = &sec_mon; - }; - - cpus { - #address-cells = <1>; - #size-cells = <0>; - - cpu0: PowerPC,e500mc@0 { - device_type = "cpu"; - reg = <0>; - next-level-cache = <&L2_0>; - L2_0: l2-cache { - next-level-cache = <&cpc>; - }; - }; - cpu1: PowerPC,e500mc@1 { - device_type = "cpu"; - reg = <1>; - next-level-cache = <&L2_1>; - L2_1: l2-cache { - next-level-cache = <&cpc>; - }; - }; - cpu2: PowerPC,e500mc@2 { - device_type = "cpu"; - reg = <2>; - next-level-cache = <&L2_2>; - L2_2: l2-cache { - next-level-cache = <&cpc>; - }; - }; - cpu3: PowerPC,e500mc@3 { - device_type = "cpu"; - reg = <3>; - next-level-cache = <&L2_3>; - L2_3: l2-cache { - next-level-cache = <&cpc>; - }; - }; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/p3060si-post.dtsi b/trunk/arch/powerpc/boot/dts/fsl/p3060si-post.dtsi deleted file mode 100644 index a63edd195ae5..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/p3060si-post.dtsi +++ /dev/null @@ -1,296 +0,0 @@ -/* - * P3060 Silicon/SoC Device Tree Source (post include) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -&lbc { - compatible = "fsl,p3060-elbc", "fsl,elbc", "simple-bus"; - interrupts = <25 2 0 0>; - #address-cells = <2>; - #size-cells = <1>; -}; - -/* controller at 0x200000 */ -&pci0 { - compatible = "fsl,p3060-pcie", "fsl,qoriq-pcie-v2.2"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0x0 0xff>; - clock-frequency = <33333333>; - interrupts = <16 2 1 15>; - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <16 2 1 15>; - interrupt-map-mask = <0xf800 0 0 7>; - interrupt-map = < - /* IDSEL 0x0 */ - 0000 0 0 1 &mpic 40 1 0 0 - 0000 0 0 2 &mpic 1 1 0 0 - 0000 0 0 3 &mpic 2 1 0 0 - 0000 0 0 4 &mpic 3 1 0 0 - >; - }; -}; - -/* controller at 0x201000 */ -&pci1 { - compatible = "fsl,p3060-pcie", "fsl,qoriq-pcie-v2.2"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0 0xff>; - clock-frequency = <33333333>; - interrupts = <16 2 1 14>; - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <16 2 1 14>; - interrupt-map-mask = <0xf800 0 0 7>; - interrupt-map = < - /* IDSEL 0x0 */ - 0000 0 0 1 &mpic 41 1 0 0 - 0000 0 0 2 &mpic 5 1 0 0 - 0000 0 0 3 &mpic 6 1 0 0 - 0000 0 0 4 &mpic 7 1 0 0 - >; - }; -}; - -&rio { - compatible = "fsl,srio"; - interrupts = <16 2 1 11>; - #address-cells = <2>; - #size-cells = <2>; - fsl,srio-rmu-handle = <&rmu>; - ranges; - - port1 { - #address-cells = <2>; - #size-cells = <2>; - cell-index = <1>; - }; - - port2 { - #address-cells = <2>; - #size-cells = <2>; - cell-index = <2>; - }; -}; - -&dcsr { - #address-cells = <1>; - #size-cells = <1>; - compatible = "fsl,dcsr", "simple-bus"; - - dcsr-epu@0 { - compatible = "fsl,dcsr-epu"; - interrupts = <52 2 0 0 - 84 2 0 0 - 85 2 0 0>; - reg = <0x0 0x1000>; - }; - dcsr-npc { - compatible = "fsl,dcsr-npc"; - reg = <0x1000 0x1000 0x1000000 0x8000>; - }; - dcsr-nxc@2000 { - compatible = "fsl,dcsr-nxc"; - reg = <0x2000 0x1000>; - }; - dcsr-corenet { - compatible = "fsl,dcsr-corenet"; - reg = <0x8000 0x1000 0xB0000 0x1000>; - }; - dcsr-dpaa@9000 { - compatible = "fsl,p3060-dcsr-dpaa", "fsl,dcsr-dpaa"; - reg = <0x9000 0x1000>; - }; - dcsr-ocn@11000 { - compatible = "fsl,p3060-dcsr-ocn", "fsl,dcsr-ocn"; - reg = <0x11000 0x1000>; - }; - dcsr-ddr@12000 { - compatible = "fsl,dcsr-ddr"; - dev-handle = <&ddr1>; - reg = <0x12000 0x1000>; - }; - dcsr-nal@18000 { - compatible = "fsl,p3060-dcsr-nal", "fsl,dcsr-nal"; - reg = <0x18000 0x1000>; - }; - dcsr-rcpm@22000 { - compatible = "fsl,p3060-dcsr-rcpm", "fsl,dcsr-rcpm"; - reg = <0x22000 0x1000>; - }; - dcsr-cpu-sb-proxy@40000 { - compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; - cpu-handle = <&cpu0>; - reg = <0x40000 0x1000>; - }; - dcsr-cpu-sb-proxy@41000 { - compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; - cpu-handle = <&cpu1>; - reg = <0x41000 0x1000>; - }; - dcsr-cpu-sb-proxy@44000 { - compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; - cpu-handle = <&cpu4>; - reg = <0x44000 0x1000>; - }; - dcsr-cpu-sb-proxy@45000 { - compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; - cpu-handle = <&cpu5>; - reg = <0x45000 0x1000>; - }; - dcsr-cpu-sb-proxy@46000 { - compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; - cpu-handle = <&cpu6>; - reg = <0x46000 0x1000>; - }; - dcsr-cpu-sb-proxy@47000 { - compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; - cpu-handle = <&cpu7>; - reg = <0x47000 0x1000>; - }; - -}; - -&soc { - #address-cells = <1>; - #size-cells = <1>; - device_type = "soc"; - compatible = "simple-bus"; - - soc-sram-error { - compatible = "fsl,soc-sram-error"; - interrupts = <16 2 1 29>; - }; - - corenet-law@0 { - compatible = "fsl,corenet-law"; - reg = <0x0 0x1000>; - fsl,num-laws = <32>; - }; - - ddr1: memory-controller@8000 { - compatible = "fsl,qoriq-memory-controller-v4.4", "fsl,qoriq-memory-controller"; - reg = <0x8000 0x1000>; - interrupts = <16 2 1 23>; - }; - - cpc: l3-cache-controller@10000 { - compatible = "fsl,p3060-l3-cache-controller", "cache"; - reg = <0x10000 0x1000 - 0x11000 0x1000>; - interrupts = <16 2 1 27 - 16 2 1 26>; - }; - - corenet-cf@18000 { - compatible = "fsl,corenet-cf"; - reg = <0x18000 0x1000>; - interrupts = <16 2 1 31>; - fsl,ccf-num-csdids = <32>; - fsl,ccf-num-snoopids = <32>; - }; - - iommu@20000 { - compatible = "fsl,pamu-v1.0", "fsl,pamu"; - reg = <0x20000 0x5000>; - interrupts = < - 24 2 0 0 - 16 2 1 30>; - }; - -/include/ "qoriq-rmu-0.dtsi" -/include/ "qoriq-mpic.dtsi" - - guts: global-utilities@e0000 { - compatible = "fsl,qoriq-device-config-1.0"; - reg = <0xe0000 0xe00>; - fsl,has-rstcr; - #sleep-cells = <1>; - fsl,liodn-bits = <12>; - }; - - pins: global-utilities@e0e00 { - compatible = "fsl,qoriq-pin-control-1.0"; - reg = <0xe0e00 0x200>; - #sleep-cells = <2>; - }; - - clockgen: global-utilities@e1000 { - compatible = "fsl,p3060-clockgen", "fsl,qoriq-clockgen-1.0"; - reg = <0xe1000 0x1000>; - clock-frequency = <0>; - }; - - rcpm: global-utilities@e2000 { - compatible = "fsl,qoriq-rcpm-1.0"; - reg = <0xe2000 0x1000>; - #sleep-cells = <1>; - }; - - sfp: sfp@e8000 { - compatible = "fsl,p3060-sfp", "fsl,qoriq-sfp-1.0"; - reg = <0xe8000 0x1000>; - }; - - serdes: serdes@ea000 { - compatible = "fsl,p3060-serdes"; - reg = <0xea000 0x1000>; - }; - -/include/ "qoriq-dma-0.dtsi" -/include/ "qoriq-dma-1.dtsi" -/include/ "qoriq-espi-0.dtsi" - spi@110000 { - fsl,espi-num-chipselects = <4>; - }; - -/include/ "qoriq-i2c-0.dtsi" -/include/ "qoriq-i2c-1.dtsi" -/include/ "qoriq-duart-0.dtsi" -/include/ "qoriq-duart-1.dtsi" -/include/ "qoriq-gpio-0.dtsi" -/include/ "qoriq-usb2-mph-0.dtsi" -/include/ "qoriq-usb2-dr-0.dtsi" -/include/ "qoriq-sec4.1-0.dtsi" -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/p3060si-pre.dtsi b/trunk/arch/powerpc/boot/dts/fsl/p3060si-pre.dtsi deleted file mode 100644 index 00c8e70e7b90..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/p3060si-pre.dtsi +++ /dev/null @@ -1,125 +0,0 @@ -/* - * P3060 Silicon/SoC Device Tree Source (pre include) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/dts-v1/; -/ { - compatible = "fsl,P3060"; - #address-cells = <2>; - #size-cells = <2>; - interrupt-parent = <&mpic>; - - aliases { - ccsr = &soc; - dcsr = &dcsr; - - serial0 = &serial0; - serial1 = &serial1; - serial2 = &serial2; - serial3 = &serial3; - pci0 = &pci0; - pci1 = &pci1; - usb0 = &usb0; - usb1 = &usb1; - dma0 = &dma0; - dma1 = &dma1; - msi0 = &msi0; - msi1 = &msi1; - msi2 = &msi2; - - crypto = &crypto; - sec_jr0 = &sec_jr0; - sec_jr1 = &sec_jr1; - sec_jr2 = &sec_jr2; - sec_jr3 = &sec_jr3; - rtic_a = &rtic_a; - rtic_b = &rtic_b; - rtic_c = &rtic_c; - rtic_d = &rtic_d; - sec_mon = &sec_mon; - }; - - cpus { - #address-cells = <1>; - #size-cells = <0>; - - cpu0: PowerPC,e500mc@0 { - device_type = "cpu"; - reg = <0>; - next-level-cache = <&L2_0>; - L2_0: l2-cache { - next-level-cache = <&cpc>; - }; - }; - cpu1: PowerPC,e500mc@1 { - device_type = "cpu"; - reg = <1>; - next-level-cache = <&L2_1>; - L2_1: l2-cache { - next-level-cache = <&cpc>; - }; - }; - cpu4: PowerPC,e500mc@4 { - device_type = "cpu"; - reg = <4>; - next-level-cache = <&L2_4>; - L2_4: l2-cache { - next-level-cache = <&cpc>; - }; - }; - cpu5: PowerPC,e500mc@5 { - device_type = "cpu"; - reg = <5>; - next-level-cache = <&L2_5>; - L2_5: l2-cache { - next-level-cache = <&cpc>; - }; - }; - cpu6: PowerPC,e500mc@6 { - device_type = "cpu"; - reg = <6>; - next-level-cache = <&L2_6>; - L2_6: l2-cache { - next-level-cache = <&cpc>; - }; - }; - cpu7: PowerPC,e500mc@7 { - device_type = "cpu"; - reg = <7>; - next-level-cache = <&L2_7>; - L2_7: l2-cache { - next-level-cache = <&cpc>; - }; - }; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi b/trunk/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi deleted file mode 100644 index 8d35d2c1f694..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi +++ /dev/null @@ -1,350 +0,0 @@ -/* - * P4080/P4040 Silicon/SoC Device Tree Source (post include) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -&lbc { - compatible = "fsl,p4080-elbc", "fsl,elbc", "simple-bus"; - interrupts = <25 2 0 0>; - #address-cells = <2>; - #size-cells = <1>; -}; - -/* controller at 0x200000 */ -&pci0 { - compatible = "fsl,p4080-pcie"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0x0 0xff>; - clock-frequency = <33333333>; - interrupts = <16 2 1 15>; - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <16 2 1 15>; - interrupt-map-mask = <0xf800 0 0 7>; - interrupt-map = < - /* IDSEL 0x0 */ - 0000 0 0 1 &mpic 40 1 0 0 - 0000 0 0 2 &mpic 1 1 0 0 - 0000 0 0 3 &mpic 2 1 0 0 - 0000 0 0 4 &mpic 3 1 0 0 - >; - }; -}; - -/* controller at 0x201000 */ -&pci1 { - compatible = "fsl,p4080-pcie"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0 0xff>; - clock-frequency = <33333333>; - interrupts = <16 2 1 14>; - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <16 2 1 14>; - interrupt-map-mask = <0xf800 0 0 7>; - interrupt-map = < - /* IDSEL 0x0 */ - 0000 0 0 1 &mpic 41 1 0 0 - 0000 0 0 2 &mpic 5 1 0 0 - 0000 0 0 3 &mpic 6 1 0 0 - 0000 0 0 4 &mpic 7 1 0 0 - >; - }; -}; - -/* controller at 0x202000 */ -&pci2 { - compatible = "fsl,p4080-pcie"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0x0 0xff>; - clock-frequency = <33333333>; - interrupts = <16 2 1 13>; - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <16 2 1 13>; - interrupt-map-mask = <0xf800 0 0 7>; - interrupt-map = < - /* IDSEL 0x0 */ - 0000 0 0 1 &mpic 42 1 0 0 - 0000 0 0 2 &mpic 9 1 0 0 - 0000 0 0 3 &mpic 10 1 0 0 - 0000 0 0 4 &mpic 11 1 0 0 - >; - }; -}; - -&rio { - compatible = "fsl,srio"; - interrupts = <16 2 1 11>; - #address-cells = <2>; - #size-cells = <2>; - fsl,srio-rmu-handle = <&rmu>; - ranges; - - port1 { - #address-cells = <2>; - #size-cells = <2>; - cell-index = <1>; - }; - - port2 { - #address-cells = <2>; - #size-cells = <2>; - cell-index = <2>; - }; -}; - -&dcsr { - #address-cells = <1>; - #size-cells = <1>; - compatible = "fsl,dcsr", "simple-bus"; - - dcsr-epu@0 { - compatible = "fsl,dcsr-epu"; - interrupts = <52 2 0 0 - 84 2 0 0 - 85 2 0 0>; - reg = <0x0 0x1000>; - }; - dcsr-npc { - compatible = "fsl,dcsr-npc"; - reg = <0x1000 0x1000 0x1000000 0x8000>; - }; - dcsr-nxc@2000 { - compatible = "fsl,dcsr-nxc"; - reg = <0x2000 0x1000>; - }; - dcsr-corenet { - compatible = "fsl,dcsr-corenet"; - reg = <0x8000 0x1000 0xB0000 0x1000>; - }; - dcsr-dpaa@9000 { - compatible = "fsl,p4080-dcsr-dpaa", "fsl,dcsr-dpaa"; - reg = <0x9000 0x1000>; - }; - dcsr-ocn@11000 { - compatible = "fsl,p4080-dcsr-ocn", "fsl,dcsr-ocn"; - reg = <0x11000 0x1000>; - }; - dcsr-ddr@12000 { - compatible = "fsl,dcsr-ddr"; - dev-handle = <&ddr1>; - reg = <0x12000 0x1000>; - }; - dcsr-ddr@13000 { - compatible = "fsl,dcsr-ddr"; - dev-handle = <&ddr2>; - reg = <0x13000 0x1000>; - }; - dcsr-nal@18000 { - compatible = "fsl,p4080-dcsr-nal", "fsl,dcsr-nal"; - reg = <0x18000 0x1000>; - }; - dcsr-rcpm@22000 { - compatible = "fsl,p4080-dcsr-rcpm", "fsl,dcsr-rcpm"; - reg = <0x22000 0x1000>; - }; - dcsr-cpu-sb-proxy@40000 { - compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; - cpu-handle = <&cpu0>; - reg = <0x40000 0x1000>; - }; - dcsr-cpu-sb-proxy@41000 { - compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; - cpu-handle = <&cpu1>; - reg = <0x41000 0x1000>; - }; - dcsr-cpu-sb-proxy@42000 { - compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; - cpu-handle = <&cpu2>; - reg = <0x42000 0x1000>; - }; - dcsr-cpu-sb-proxy@43000 { - compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; - cpu-handle = <&cpu3>; - reg = <0x43000 0x1000>; - }; - dcsr-cpu-sb-proxy@44000 { - compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; - cpu-handle = <&cpu4>; - reg = <0x44000 0x1000>; - }; - dcsr-cpu-sb-proxy@45000 { - compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; - cpu-handle = <&cpu5>; - reg = <0x45000 0x1000>; - }; - dcsr-cpu-sb-proxy@46000 { - compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; - cpu-handle = <&cpu6>; - reg = <0x46000 0x1000>; - }; - dcsr-cpu-sb-proxy@47000 { - compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; - cpu-handle = <&cpu7>; - reg = <0x47000 0x1000>; - }; - -}; - -&soc { - #address-cells = <1>; - #size-cells = <1>; - device_type = "soc"; - compatible = "simple-bus"; - - soc-sram-error { - compatible = "fsl,soc-sram-error"; - interrupts = <16 2 1 29>; - }; - - corenet-law@0 { - compatible = "fsl,corenet-law"; - reg = <0x0 0x1000>; - fsl,num-laws = <32>; - }; - - ddr1: memory-controller@8000 { - compatible = "fsl,qoriq-memory-controller-v4.4", "fsl,qoriq-memory-controller"; - reg = <0x8000 0x1000>; - interrupts = <16 2 1 23>; - }; - - ddr2: memory-controller@9000 { - compatible = "fsl,qoriq-memory-controller-v4.4","fsl,qoriq-memory-controller"; - reg = <0x9000 0x1000>; - interrupts = <16 2 1 22>; - }; - - cpc: l3-cache-controller@10000 { - compatible = "fsl,p4080-l3-cache-controller", "cache"; - reg = <0x10000 0x1000 - 0x11000 0x1000>; - interrupts = <16 2 1 27 - 16 2 1 26>; - }; - - corenet-cf@18000 { - compatible = "fsl,corenet-cf"; - reg = <0x18000 0x1000>; - interrupts = <16 2 1 31>; - fsl,ccf-num-csdids = <32>; - fsl,ccf-num-snoopids = <32>; - }; - - iommu@20000 { - compatible = "fsl,pamu-v1.0", "fsl,pamu"; - reg = <0x20000 0x5000>; - interrupts = < - 24 2 0 0 - 16 2 1 30>; - }; - -/include/ "qoriq-rmu-0.dtsi" -/include/ "qoriq-mpic.dtsi" - - guts: global-utilities@e0000 { - compatible = "fsl,qoriq-device-config-1.0"; - reg = <0xe0000 0xe00>; - fsl,has-rstcr; - #sleep-cells = <1>; - fsl,liodn-bits = <12>; - }; - - pins: global-utilities@e0e00 { - compatible = "fsl,qoriq-pin-control-1.0"; - reg = <0xe0e00 0x200>; - #sleep-cells = <2>; - }; - - clockgen: global-utilities@e1000 { - compatible = "fsl,p4080-clockgen", "fsl,qoriq-clockgen-1.0"; - reg = <0xe1000 0x1000>; - clock-frequency = <0>; - }; - - rcpm: global-utilities@e2000 { - compatible = "fsl,qoriq-rcpm-1.0"; - reg = <0xe2000 0x1000>; - #sleep-cells = <1>; - }; - - sfp: sfp@e8000 { - compatible = "fsl,p4080-sfp", "fsl,qoriq-sfp-1.0"; - reg = <0xe8000 0x1000>; - }; - - serdes: serdes@ea000 { - compatible = "fsl,p4080-serdes"; - reg = <0xea000 0x1000>; - }; - -/include/ "qoriq-dma-0.dtsi" -/include/ "qoriq-dma-1.dtsi" -/include/ "qoriq-espi-0.dtsi" - spi@110000 { - fsl,espi-num-chipselects = <4>; - }; - -/include/ "qoriq-esdhc-0.dtsi" - sdhc@114000 { - voltage-ranges = <3300 3300>; - sdhci,auto-cmd12; - }; - -/include/ "qoriq-i2c-0.dtsi" -/include/ "qoriq-i2c-1.dtsi" -/include/ "qoriq-duart-0.dtsi" -/include/ "qoriq-duart-1.dtsi" -/include/ "qoriq-gpio-0.dtsi" -/include/ "qoriq-usb2-mph-0.dtsi" -/include/ "qoriq-usb2-dr-0.dtsi" -/include/ "qoriq-sec4.0-0.dtsi" -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/p4080si-pre.dtsi b/trunk/arch/powerpc/boot/dts/fsl/p4080si-pre.dtsi deleted file mode 100644 index b9556ee3a639..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/p4080si-pre.dtsi +++ /dev/null @@ -1,143 +0,0 @@ -/* - * P4080/P4040 Silicon/SoC Device Tree Source (pre include) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/dts-v1/; -/ { - compatible = "fsl,P4080"; - #address-cells = <2>; - #size-cells = <2>; - interrupt-parent = <&mpic>; - - aliases { - ccsr = &soc; - dcsr = &dcsr; - - serial0 = &serial0; - serial1 = &serial1; - serial2 = &serial2; - serial3 = &serial3; - pci0 = &pci0; - pci1 = &pci1; - pci2 = &pci2; - usb0 = &usb0; - usb1 = &usb1; - dma0 = &dma0; - dma1 = &dma1; - sdhc = &sdhc; - msi0 = &msi0; - msi1 = &msi1; - msi2 = &msi2; - - crypto = &crypto; - sec_jr0 = &sec_jr0; - sec_jr1 = &sec_jr1; - sec_jr2 = &sec_jr2; - sec_jr3 = &sec_jr3; - rtic_a = &rtic_a; - rtic_b = &rtic_b; - rtic_c = &rtic_c; - rtic_d = &rtic_d; - sec_mon = &sec_mon; - }; - - cpus { - #address-cells = <1>; - #size-cells = <0>; - - cpu0: PowerPC,e500mc@0 { - device_type = "cpu"; - reg = <0>; - next-level-cache = <&L2_0>; - L2_0: l2-cache { - next-level-cache = <&cpc>; - }; - }; - cpu1: PowerPC,e500mc@1 { - device_type = "cpu"; - reg = <1>; - next-level-cache = <&L2_1>; - L2_1: l2-cache { - next-level-cache = <&cpc>; - }; - }; - cpu2: PowerPC,e500mc@2 { - device_type = "cpu"; - reg = <2>; - next-level-cache = <&L2_2>; - L2_2: l2-cache { - next-level-cache = <&cpc>; - }; - }; - cpu3: PowerPC,e500mc@3 { - device_type = "cpu"; - reg = <3>; - next-level-cache = <&L2_3>; - L2_3: l2-cache { - next-level-cache = <&cpc>; - }; - }; - cpu4: PowerPC,e500mc@4 { - device_type = "cpu"; - reg = <4>; - next-level-cache = <&L2_4>; - L2_4: l2-cache { - next-level-cache = <&cpc>; - }; - }; - cpu5: PowerPC,e500mc@5 { - device_type = "cpu"; - reg = <5>; - next-level-cache = <&L2_5>; - L2_5: l2-cache { - next-level-cache = <&cpc>; - }; - }; - cpu6: PowerPC,e500mc@6 { - device_type = "cpu"; - reg = <6>; - next-level-cache = <&L2_6>; - L2_6: l2-cache { - next-level-cache = <&cpc>; - }; - }; - cpu7: PowerPC,e500mc@7 { - device_type = "cpu"; - reg = <7>; - next-level-cache = <&L2_7>; - L2_7: l2-cache { - next-level-cache = <&cpc>; - }; - }; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi b/trunk/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi deleted file mode 100644 index 914074b91a85..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi +++ /dev/null @@ -1,355 +0,0 @@ -/* - * P5020/5010 Silicon/SoC Device Tree Source (post include) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -&lbc { - compatible = "fsl,p5020-elbc", "fsl,elbc", "simple-bus"; - interrupts = <25 2 0 0>; - #address-cells = <2>; - #size-cells = <1>; -}; - -/* controller at 0x200000 */ -&pci0 { - compatible = "fsl,p5020-pcie", "fsl,qoriq-pcie-v2.2"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0x0 0xff>; - clock-frequency = <33333333>; - interrupts = <16 2 1 15>; - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <16 2 1 15>; - interrupt-map-mask = <0xf800 0 0 7>; - interrupt-map = < - /* IDSEL 0x0 */ - 0000 0 0 1 &mpic 40 1 0 0 - 0000 0 0 2 &mpic 1 1 0 0 - 0000 0 0 3 &mpic 2 1 0 0 - 0000 0 0 4 &mpic 3 1 0 0 - >; - }; -}; - -/* controller at 0x201000 */ -&pci1 { - compatible = "fsl,p5020-pcie", "fsl,qoriq-pcie-v2.2"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0 0xff>; - clock-frequency = <33333333>; - interrupts = <16 2 1 14>; - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <16 2 1 14>; - interrupt-map-mask = <0xf800 0 0 7>; - interrupt-map = < - /* IDSEL 0x0 */ - 0000 0 0 1 &mpic 41 1 0 0 - 0000 0 0 2 &mpic 5 1 0 0 - 0000 0 0 3 &mpic 6 1 0 0 - 0000 0 0 4 &mpic 7 1 0 0 - >; - }; -}; - -/* controller at 0x202000 */ -&pci2 { - compatible = "fsl,p5020-pcie", "fsl,qoriq-pcie-v2.2"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0x0 0xff>; - clock-frequency = <33333333>; - interrupts = <16 2 1 13>; - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <16 2 1 13>; - interrupt-map-mask = <0xf800 0 0 7>; - interrupt-map = < - /* IDSEL 0x0 */ - 0000 0 0 1 &mpic 42 1 0 0 - 0000 0 0 2 &mpic 9 1 0 0 - 0000 0 0 3 &mpic 10 1 0 0 - 0000 0 0 4 &mpic 11 1 0 0 - >; - }; -}; - -/* controller at 0x203000 */ -&pci3 { - compatible = "fsl,p5020-pcie", "fsl,qoriq-pcie-v2.2"; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - bus-range = <0x0 0xff>; - clock-frequency = <33333333>; - interrupts = <16 2 1 12>; - pcie@0 { - reg = <0 0 0 0 0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - interrupts = <16 2 1 12>; - interrupt-map-mask = <0xf800 0 0 7>; - interrupt-map = < - /* IDSEL 0x0 */ - 0000 0 0 1 &mpic 43 1 0 0 - 0000 0 0 2 &mpic 0 1 0 0 - 0000 0 0 3 &mpic 4 1 0 0 - 0000 0 0 4 &mpic 8 1 0 0 - >; - }; -}; - -&rio { - compatible = "fsl,srio"; - interrupts = <16 2 1 11>; - #address-cells = <2>; - #size-cells = <2>; - ranges; - - port1 { - #address-cells = <2>; - #size-cells = <2>; - cell-index = <1>; - }; - - port2 { - #address-cells = <2>; - #size-cells = <2>; - cell-index = <2>; - }; -}; - -&dcsr { - #address-cells = <1>; - #size-cells = <1>; - compatible = "fsl,dcsr", "simple-bus"; - - dcsr-epu@0 { - compatible = "fsl,dcsr-epu"; - interrupts = <52 2 0 0 - 84 2 0 0 - 85 2 0 0>; - reg = <0x0 0x1000>; - }; - dcsr-npc { - compatible = "fsl,dcsr-npc"; - reg = <0x1000 0x1000 0x1000000 0x8000>; - }; - dcsr-nxc@2000 { - compatible = "fsl,dcsr-nxc"; - reg = <0x2000 0x1000>; - }; - dcsr-corenet { - compatible = "fsl,dcsr-corenet"; - reg = <0x8000 0x1000 0xB0000 0x1000>; - }; - dcsr-dpaa@9000 { - compatible = "fsl,p5020-dcsr-dpaa", "fsl,dcsr-dpaa"; - reg = <0x9000 0x1000>; - }; - dcsr-ocn@11000 { - compatible = "fsl,p5020-dcsr-ocn", "fsl,dcsr-ocn"; - reg = <0x11000 0x1000>; - }; - dcsr-ddr@12000 { - compatible = "fsl,dcsr-ddr"; - dev-handle = <&ddr1>; - reg = <0x12000 0x1000>; - }; - dcsr-ddr@13000 { - compatible = "fsl,dcsr-ddr"; - dev-handle = <&ddr2>; - reg = <0x13000 0x1000>; - }; - dcsr-nal@18000 { - compatible = "fsl,p5020-dcsr-nal", "fsl,dcsr-nal"; - reg = <0x18000 0x1000>; - }; - dcsr-rcpm@22000 { - compatible = "fsl,p5020-dcsr-rcpm", "fsl,dcsr-rcpm"; - reg = <0x22000 0x1000>; - }; - dcsr-cpu-sb-proxy@40000 { - compatible = "fsl,dcsr-e5500-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; - cpu-handle = <&cpu0>; - reg = <0x40000 0x1000>; - }; - dcsr-cpu-sb-proxy@41000 { - compatible = "fsl,dcsr-e5500-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; - cpu-handle = <&cpu1>; - reg = <0x41000 0x1000>; - }; -}; - -&soc { - #address-cells = <1>; - #size-cells = <1>; - device_type = "soc"; - compatible = "simple-bus"; - - soc-sram-error { - compatible = "fsl,soc-sram-error"; - interrupts = <16 2 1 29>; - }; - - corenet-law@0 { - compatible = "fsl,corenet-law"; - reg = <0x0 0x1000>; - fsl,num-laws = <32>; - }; - - ddr1: memory-controller@8000 { - compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller"; - reg = <0x8000 0x1000>; - interrupts = <16 2 1 23>; - }; - - ddr2: memory-controller@9000 { - compatible = "fsl,qoriq-memory-controller-v4.5","fsl,qoriq-memory-controller"; - reg = <0x9000 0x1000>; - interrupts = <16 2 1 22>; - }; - - cpc: l3-cache-controller@10000 { - compatible = "fsl,p5020-l3-cache-controller", "fsl,p4080-l3-cache-controller", "cache"; - reg = <0x10000 0x1000 - 0x11000 0x1000>; - interrupts = <16 2 1 27 - 16 2 1 26>; - }; - - corenet-cf@18000 { - compatible = "fsl,corenet-cf"; - reg = <0x18000 0x1000>; - interrupts = <16 2 1 31>; - fsl,ccf-num-csdids = <32>; - fsl,ccf-num-snoopids = <32>; - }; - - iommu@20000 { - compatible = "fsl,pamu-v1.0", "fsl,pamu"; - reg = <0x20000 0x4000>; - interrupts = < - 24 2 0 0 - 16 2 1 30>; - }; - -/include/ "qoriq-mpic.dtsi" - - guts: global-utilities@e0000 { - compatible = "fsl,qoriq-device-config-1.0"; - reg = <0xe0000 0xe00>; - fsl,has-rstcr; - #sleep-cells = <1>; - fsl,liodn-bits = <12>; - }; - - pins: global-utilities@e0e00 { - compatible = "fsl,qoriq-pin-control-1.0"; - reg = <0xe0e00 0x200>; - #sleep-cells = <2>; - }; - - clockgen: global-utilities@e1000 { - compatible = "fsl,p5020-clockgen", "fsl,qoriq-clockgen-1.0"; - reg = <0xe1000 0x1000>; - clock-frequency = <0>; - }; - - rcpm: global-utilities@e2000 { - compatible = "fsl,qoriq-rcpm-1.0"; - reg = <0xe2000 0x1000>; - #sleep-cells = <1>; - }; - - sfp: sfp@e8000 { - compatible = "fsl,p5020-sfp", "fsl,qoriq-sfp-1.0"; - reg = <0xe8000 0x1000>; - }; - - serdes: serdes@ea000 { - compatible = "fsl,p5020-serdes"; - reg = <0xea000 0x1000>; - }; - -/include/ "qoriq-dma-0.dtsi" -/include/ "qoriq-dma-1.dtsi" -/include/ "qoriq-espi-0.dtsi" - spi@110000 { - fsl,espi-num-chipselects = <4>; - }; - -/include/ "qoriq-esdhc-0.dtsi" - sdhc@114000 { - sdhci,auto-cmd12; - }; - -/include/ "qoriq-i2c-0.dtsi" -/include/ "qoriq-i2c-1.dtsi" -/include/ "qoriq-duart-0.dtsi" -/include/ "qoriq-duart-1.dtsi" -/include/ "qoriq-gpio-0.dtsi" -/include/ "qoriq-usb2-mph-0.dtsi" - usb0: usb@210000 { - phy_type = "utmi"; - port0; - }; - -/include/ "qoriq-usb2-dr-0.dtsi" - usb1: usb@211000 { - dr_mode = "host"; - phy_type = "utmi"; - }; - -/include/ "qoriq-sata2-0.dtsi" -/include/ "qoriq-sata2-1.dtsi" -/include/ "qoriq-sec4.2-0.dtsi" -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/p5020si-pre.dtsi b/trunk/arch/powerpc/boot/dts/fsl/p5020si-pre.dtsi deleted file mode 100644 index ae823a47584e..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/p5020si-pre.dtsi +++ /dev/null @@ -1,96 +0,0 @@ -/* - * P5020/P5010 Silicon/SoC Device Tree Source (pre include) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/dts-v1/; -/ { - compatible = "fsl,P5020"; - #address-cells = <2>; - #size-cells = <2>; - interrupt-parent = <&mpic>; - - aliases { - ccsr = &soc; - dcsr = &dcsr; - - serial0 = &serial0; - serial1 = &serial1; - serial2 = &serial2; - serial3 = &serial3; - pci0 = &pci0; - pci1 = &pci1; - pci2 = &pci2; - pci3 = &pci3; - usb0 = &usb0; - usb1 = &usb1; - dma0 = &dma0; - dma1 = &dma1; - sdhc = &sdhc; - msi0 = &msi0; - msi1 = &msi1; - msi2 = &msi2; - - crypto = &crypto; - sec_jr0 = &sec_jr0; - sec_jr1 = &sec_jr1; - sec_jr2 = &sec_jr2; - sec_jr3 = &sec_jr3; - rtic_a = &rtic_a; - rtic_b = &rtic_b; - rtic_c = &rtic_c; - rtic_d = &rtic_d; - sec_mon = &sec_mon; - }; - - cpus { - #address-cells = <1>; - #size-cells = <0>; - - cpu0: PowerPC,e5500@0 { - device_type = "cpu"; - reg = <0>; - next-level-cache = <&L2_0>; - L2_0: l2-cache { - next-level-cache = <&cpc>; - }; - }; - cpu1: PowerPC,e5500@1 { - device_type = "cpu"; - reg = <1>; - next-level-cache = <&L2_1>; - L2_1: l2-cache { - next-level-cache = <&cpc>; - }; - }; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/pq3-dma-0.dtsi b/trunk/arch/powerpc/boot/dts/fsl/pq3-dma-0.dtsi deleted file mode 100644 index b5b37ad30e75..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/pq3-dma-0.dtsi +++ /dev/null @@ -1,66 +0,0 @@ -/* - * PQ3 DMA device tree stub [ controller @ offset 0x21000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -dma@21300 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "fsl,eloplus-dma"; - reg = <0x21300 0x4>; - ranges = <0x0 0x21100 0x200>; - cell-index = <0>; - dma-channel@0 { - compatible = "fsl,eloplus-dma-channel"; - reg = <0x0 0x80>; - cell-index = <0>; - interrupts = <20 2 0 0>; - }; - dma-channel@80 { - compatible = "fsl,eloplus-dma-channel"; - reg = <0x80 0x80>; - cell-index = <1>; - interrupts = <21 2 0 0>; - }; - dma-channel@100 { - compatible = "fsl,eloplus-dma-channel"; - reg = <0x100 0x80>; - cell-index = <2>; - interrupts = <22 2 0 0>; - }; - dma-channel@180 { - compatible = "fsl,eloplus-dma-channel"; - reg = <0x180 0x80>; - cell-index = <3>; - interrupts = <23 2 0 0>; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/pq3-dma-1.dtsi b/trunk/arch/powerpc/boot/dts/fsl/pq3-dma-1.dtsi deleted file mode 100644 index 28cb8a55d807..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/pq3-dma-1.dtsi +++ /dev/null @@ -1,66 +0,0 @@ -/* - * PQ3 DMA device tree stub [ controller @ offset 0xc300 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -dma@c300 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "fsl,eloplus-dma"; - reg = <0xc300 0x4>; - ranges = <0x0 0xc100 0x200>; - cell-index = <1>; - dma-channel@0 { - compatible = "fsl,eloplus-dma-channel"; - reg = <0x0 0x80>; - cell-index = <0>; - interrupts = <76 2 0 0>; - }; - dma-channel@80 { - compatible = "fsl,eloplus-dma-channel"; - reg = <0x80 0x80>; - cell-index = <1>; - interrupts = <77 2 0 0>; - }; - dma-channel@100 { - compatible = "fsl,eloplus-dma-channel"; - reg = <0x100 0x80>; - cell-index = <2>; - interrupts = <78 2 0 0>; - }; - dma-channel@180 { - compatible = "fsl,eloplus-dma-channel"; - reg = <0x180 0x80>; - cell-index = <3>; - interrupts = <79 2 0 0>; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/pq3-duart-0.dtsi b/trunk/arch/powerpc/boot/dts/fsl/pq3-duart-0.dtsi deleted file mode 100644 index 5e268fdb9d1f..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/pq3-duart-0.dtsi +++ /dev/null @@ -1,51 +0,0 @@ -/* - * PQ3 DUART device tree stub [ controller @ offset 0x4000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -serial0: serial@4500 { - cell-index = <0>; - device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; - reg = <0x4500 0x100>; - clock-frequency = <0>; - interrupts = <42 2 0 0>; -}; - -serial1: serial@4600 { - cell-index = <1>; - device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; - reg = <0x4600 0x100>; - clock-frequency = <0>; - interrupts = <42 2 0 0>; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/pq3-esdhc-0.dtsi b/trunk/arch/powerpc/boot/dts/fsl/pq3-esdhc-0.dtsi deleted file mode 100644 index 5743433e278e..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/pq3-esdhc-0.dtsi +++ /dev/null @@ -1,41 +0,0 @@ -/* - * PQ3 eSDHC device tree stub [ controller @ offset 0x2e000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -sdhc@2e000 { - compatible = "fsl,esdhc"; - reg = <0x2e000 0x1000>; - interrupts = <72 0x2 0 0>; - /* Filled in by U-Boot */ - clock-frequency = <0>; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/pq3-espi-0.dtsi b/trunk/arch/powerpc/boot/dts/fsl/pq3-espi-0.dtsi deleted file mode 100644 index 75854b2e0391..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/pq3-espi-0.dtsi +++ /dev/null @@ -1,41 +0,0 @@ -/* - * PQ3 eSPI device tree stub [ controller @ offset 0x7000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -spi@7000 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "fsl,mpc8536-espi"; - reg = <0x7000 0x1000>; - interrupts = <59 0x2 0 0>; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/pq3-etsec1-0.dtsi b/trunk/arch/powerpc/boot/dts/fsl/pq3-etsec1-0.dtsi deleted file mode 100644 index a1979ae334a7..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/pq3-etsec1-0.dtsi +++ /dev/null @@ -1,53 +0,0 @@ -/* - * PQ3 eTSEC device tree stub [ @ offsets 0x24000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -ethernet@24000 { - #address-cells = <1>; - #size-cells = <1>; - cell-index = <0>; - device_type = "network"; - model = "eTSEC"; - compatible = "gianfar"; - reg = <0x24000 0x1000>; - ranges = <0x0 0x24000 0x1000>; - local-mac-address = [ 00 00 00 00 00 00 ]; - interrupts = <29 2 0 0 30 2 0 0 34 2 0 0>; -}; - -mdio@24520 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "fsl,gianfar-mdio"; - reg = <0x24520 0x20>; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/pq3-etsec1-1.dtsi b/trunk/arch/powerpc/boot/dts/fsl/pq3-etsec1-1.dtsi deleted file mode 100644 index 4c4fdde1ec2a..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/pq3-etsec1-1.dtsi +++ /dev/null @@ -1,53 +0,0 @@ -/* - * PQ3 eTSEC device tree stub [ @ offsets 0x25000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -ethernet@25000 { - #address-cells = <1>; - #size-cells = <1>; - cell-index = <1>; - device_type = "network"; - model = "eTSEC"; - compatible = "gianfar"; - reg = <0x25000 0x1000>; - ranges = <0x0 0x25000 0x1000>; - local-mac-address = [ 00 00 00 00 00 00 ]; - interrupts = <35 2 0 0 36 2 0 0 40 2 0 0>; -}; - -mdio@25520 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "fsl,gianfar-tbi"; - reg = <0x25520 0x20>; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/pq3-etsec1-2.dtsi b/trunk/arch/powerpc/boot/dts/fsl/pq3-etsec1-2.dtsi deleted file mode 100644 index 4b8ab438668a..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/pq3-etsec1-2.dtsi +++ /dev/null @@ -1,53 +0,0 @@ -/* - * PQ3 eTSEC device tree stub [ @ offsets 0x26000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -ethernet@26000 { - #address-cells = <1>; - #size-cells = <1>; - cell-index = <2>; - device_type = "network"; - model = "eTSEC"; - compatible = "gianfar"; - reg = <0x26000 0x1000>; - ranges = <0x0 0x26000 0x1000>; - local-mac-address = [ 00 00 00 00 00 00 ]; - interrupts = <31 2 0 0 32 2 0 0 33 2 0 0>; -}; - -mdio@26520 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "fsl,gianfar-tbi"; - reg = <0x26520 0x20>; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/pq3-etsec1-3.dtsi b/trunk/arch/powerpc/boot/dts/fsl/pq3-etsec1-3.dtsi deleted file mode 100644 index 40c9137729ae..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/pq3-etsec1-3.dtsi +++ /dev/null @@ -1,53 +0,0 @@ -/* - * PQ3 eTSEC device tree stub [ @ offsets 0x27000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -ethernet@27000 { - #address-cells = <1>; - #size-cells = <1>; - cell-index = <3>; - device_type = "network"; - model = "eTSEC"; - compatible = "gianfar"; - reg = <0x27000 0x1000>; - ranges = <0x0 0x27000 0x1000>; - local-mac-address = [ 00 00 00 00 00 00 ]; - interrupts = <37 2 0 0 38 2 0 0 39 2 0 0>; -}; - -mdio@27520 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "fsl,gianfar-tbi"; - reg = <0x27520 0x20>; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/pq3-etsec1-timer-0.dtsi b/trunk/arch/powerpc/boot/dts/fsl/pq3-etsec1-timer-0.dtsi deleted file mode 100644 index efe2ca04bce8..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/pq3-etsec1-timer-0.dtsi +++ /dev/null @@ -1,39 +0,0 @@ -/* - * PQ3 eTSEC Timer (IEEE 1588) device tree stub [ @ offsets 0x24e00 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -ptp_clock@24e00 { - compatible = "fsl,etsec-ptp"; - reg = <0x24e00 0xb0>; - interrupts = <68 2 0 0 69 2 0 0>; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/pq3-etsec2-0.dtsi b/trunk/arch/powerpc/boot/dts/fsl/pq3-etsec2-0.dtsi deleted file mode 100644 index 1382fec9e8c5..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/pq3-etsec2-0.dtsi +++ /dev/null @@ -1,60 +0,0 @@ -/* - * PQ3 eTSEC2 device tree stub [ @ offsets 0x24000/0xb0000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - - -mdio@24000 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "fsl,etsec2-mdio"; - reg = <0x24000 0x1000 0xb0030 0x4>; -}; - -ethernet@b0000 { - #address-cells = <1>; - #size-cells = <1>; - device_type = "network"; - model = "eTSEC"; - compatible = "fsl,etsec2"; - fsl,num_rx_queues = <0x8>; - fsl,num_tx_queues = <0x8>; - fsl,magic-packet; - local-mac-address = [ 00 00 00 00 00 00 ]; - - queue-group@b0000 { - #address-cells = <1>; - #size-cells = <1>; - reg = <0xb0000 0x1000>; - interrupts = <29 2 0 0 30 2 0 0 34 2 0 0>; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/pq3-etsec2-1.dtsi b/trunk/arch/powerpc/boot/dts/fsl/pq3-etsec2-1.dtsi deleted file mode 100644 index 221cd2ea5b31..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/pq3-etsec2-1.dtsi +++ /dev/null @@ -1,60 +0,0 @@ -/* - * PQ3 eTSEC2 device tree stub [ @ offsets 0x25000/0xb1000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - - -mdio@25000 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "fsl,etsec2-tbi"; - reg = <0x25000 0x1000 0xb1030 0x4>; -}; - -ethernet@b1000 { - #address-cells = <1>; - #size-cells = <1>; - device_type = "network"; - model = "eTSEC"; - compatible = "fsl,etsec2"; - fsl,num_rx_queues = <0x8>; - fsl,num_tx_queues = <0x8>; - fsl,magic-packet; - local-mac-address = [ 00 00 00 00 00 00 ]; - - queue-group@b1000 { - #address-cells = <1>; - #size-cells = <1>; - reg = <0xb1000 0x1000>; - interrupts = <35 2 0 0 36 2 0 0 40 2 0 0>; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/pq3-etsec2-2.dtsi b/trunk/arch/powerpc/boot/dts/fsl/pq3-etsec2-2.dtsi deleted file mode 100644 index 61456c317609..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/pq3-etsec2-2.dtsi +++ /dev/null @@ -1,59 +0,0 @@ -/* - * PQ3 eTSEC2 device tree stub [ @ offsets 0x26000/0xb2000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -mdio@26000 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "fsl,etsec2-tbi"; - reg = <0x26000 0x1000 0xb1030 0x4>; -}; - -ethernet@b2000 { - #address-cells = <1>; - #size-cells = <1>; - device_type = "network"; - model = "eTSEC"; - compatible = "fsl,etsec2"; - fsl,num_rx_queues = <0x8>; - fsl,num_tx_queues = <0x8>; - fsl,magic-packet; - local-mac-address = [ 00 00 00 00 00 00 ]; - - queue-group@b2000 { - #address-cells = <1>; - #size-cells = <1>; - reg = <0xb2000 0x1000>; - interrupts = <31 2 0 0 32 2 0 0 33 2 0 0>; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/pq3-etsec2-grp2-0.dtsi b/trunk/arch/powerpc/boot/dts/fsl/pq3-etsec2-grp2-0.dtsi deleted file mode 100644 index 034ab8fac22f..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/pq3-etsec2-grp2-0.dtsi +++ /dev/null @@ -1,42 +0,0 @@ -/* - * PQ3 eTSEC2 Group 2 device tree stub [ @ offsets 0xb4000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -&enet0_grp2 { - queue-group@b4000 { - #address-cells = <1>; - #size-cells = <1>; - reg = <0xb4000 0x1000>; - interrupts = <17 2 0 0 18 2 0 0 24 2 0 0>; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/pq3-etsec2-grp2-1.dtsi b/trunk/arch/powerpc/boot/dts/fsl/pq3-etsec2-grp2-1.dtsi deleted file mode 100644 index 3be9ba3b374e..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/pq3-etsec2-grp2-1.dtsi +++ /dev/null @@ -1,42 +0,0 @@ -/* - * PQ3 eTSEC2 Group 2 device tree stub [ @ offsets 0xb5000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -&enet1_grp2 { - queue-group@b5000 { - #address-cells = <1>; - #size-cells = <1>; - reg = <0xb5000 0x1000>; - interrupts = <51 2 0 0 52 2 0 0 67 2 0 0>; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/pq3-etsec2-grp2-2.dtsi b/trunk/arch/powerpc/boot/dts/fsl/pq3-etsec2-grp2-2.dtsi deleted file mode 100644 index 02a33457048c..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/pq3-etsec2-grp2-2.dtsi +++ /dev/null @@ -1,42 +0,0 @@ -/* - * PQ3 eTSEC2 Group 2 device tree stub [ @ offsets 0xb6000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -&enet2_grp2 { - queue-group@b6000 { - #address-cells = <1>; - #size-cells = <1>; - reg = <0xb6000 0x1000>; - interrupts = <25 2 0 0 26 2 0 0 27 2 0 0>; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/pq3-gpio-0.dtsi b/trunk/arch/powerpc/boot/dts/fsl/pq3-gpio-0.dtsi deleted file mode 100644 index 72a3ef5945c1..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/pq3-gpio-0.dtsi +++ /dev/null @@ -1,41 +0,0 @@ -/* - * PQ3 GPIO device tree stub [ controller @ offset 0xf000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -gpio-controller@f000 { - #gpio-cells = <2>; - compatible = "fsl,pq3-gpio"; - reg = <0xf000 0x100>; - interrupts = <47 0x2 0 0>; - gpio-controller; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/pq3-i2c-0.dtsi b/trunk/arch/powerpc/boot/dts/fsl/pq3-i2c-0.dtsi deleted file mode 100644 index d1dd6fb82a78..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/pq3-i2c-0.dtsi +++ /dev/null @@ -1,43 +0,0 @@ -/* - * PQ3 I2C device tree stub [ controller @ offset 0x3000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -i2c@3000 { - #address-cells = <1>; - #size-cells = <0>; - cell-index = <0>; - compatible = "fsl-i2c"; - reg = <0x3000 0x100>; - interrupts = <43 2 0 0>; - dfsrr; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/pq3-i2c-1.dtsi b/trunk/arch/powerpc/boot/dts/fsl/pq3-i2c-1.dtsi deleted file mode 100644 index a9bd803e2090..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/pq3-i2c-1.dtsi +++ /dev/null @@ -1,43 +0,0 @@ -/* - * PQ3 I2C device tree stub [ controller @ offset 0x3100 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -i2c@3100 { - #address-cells = <1>; - #size-cells = <0>; - cell-index = <1>; - compatible = "fsl-i2c"; - reg = <0x3100 0x100>; - interrupts = <43 2 0 0>; - dfsrr; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/pq3-mpic-timer-B.dtsi b/trunk/arch/powerpc/boot/dts/fsl/pq3-mpic-timer-B.dtsi deleted file mode 100644 index 8734cffae1a1..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/pq3-mpic-timer-B.dtsi +++ /dev/null @@ -1,42 +0,0 @@ -/* - * PQ3 MPIC Timer (Group B) device tree stub [ controller @ offset 0x42100 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -timer@42100 { - compatible = "fsl,mpic-global-timer"; - reg = <0x42100 0x100 0x42300 4>; - interrupts = <4 0 3 0 - 5 0 3 0 - 6 0 3 0 - 7 0 3 0>; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/pq3-mpic.dtsi b/trunk/arch/powerpc/boot/dts/fsl/pq3-mpic.dtsi deleted file mode 100644 index 5c8046065844..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/pq3-mpic.dtsi +++ /dev/null @@ -1,66 +0,0 @@ -/* - * PQ3 MPIC device tree stub [ controller @ offset 0x40000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -mpic: pic@40000 { - interrupt-controller; - #address-cells = <0>; - #interrupt-cells = <4>; - reg = <0x40000 0x40000>; - compatible = "fsl,mpic"; - device_type = "open-pic"; -}; - -timer@41100 { - compatible = "fsl,mpic-global-timer"; - reg = <0x41100 0x100 0x41300 4>; - interrupts = <0 0 3 0 - 1 0 3 0 - 2 0 3 0 - 3 0 3 0>; -}; - -msi@41600 { - compatible = "fsl,mpic-msi"; - reg = <0x41600 0x80>; - msi-available-ranges = <0 0x100>; - interrupts = < - 0xe0 0 0 0 - 0xe1 0 0 0 - 0xe2 0 0 0 - 0xe3 0 0 0 - 0xe4 0 0 0 - 0xe5 0 0 0 - 0xe6 0 0 0 - 0xe7 0 0 0>; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/pq3-rmu-0.dtsi b/trunk/arch/powerpc/boot/dts/fsl/pq3-rmu-0.dtsi deleted file mode 100644 index 587ca9ffad7d..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/pq3-rmu-0.dtsi +++ /dev/null @@ -1,68 +0,0 @@ -/* - * PQ3 RIO Message Unit device tree stub [ controller @ offset 0xd3000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -rmu: rmu@d3000 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "fsl,srio-rmu"; - reg = <0xd3000 0x500>; - ranges = <0x0 0xd3000 0x500>; - - message-unit@0 { - compatible = "fsl,srio-msg-unit"; - reg = <0x0 0x100>; - interrupts = < - 53 2 0 0 /* msg1_tx_irq */ - 54 2 0 0>;/* msg1_rx_irq */ - }; - message-unit@100 { - compatible = "fsl,srio-msg-unit"; - reg = <0x100 0x100>; - interrupts = < - 55 2 0 0 /* msg2_tx_irq */ - 56 2 0 0>;/* msg2_rx_irq */ - }; - doorbell-unit@400 { - compatible = "fsl,srio-dbell-unit"; - reg = <0x400 0x80>; - interrupts = < - 49 2 0 0 /* bell_outb_irq */ - 50 2 0 0>;/* bell_inb_irq */ - }; - port-write-unit@4e0 { - compatible = "fsl,srio-port-write-unit"; - reg = <0x4e0 0x20>; - interrupts = <48 2 0 0>; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/pq3-sata2-0.dtsi b/trunk/arch/powerpc/boot/dts/fsl/pq3-sata2-0.dtsi deleted file mode 100644 index 3c28dd08d38b..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/pq3-sata2-0.dtsi +++ /dev/null @@ -1,40 +0,0 @@ -/* - * PQ3 SATAv2 device tree stub [ controller @ offset 0x18000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -sata@18000 { - compatible = "fsl,pq-sata-v2"; - reg = <0x18000 0x1000>; - cell-index = <1>; - interrupts = <74 0x2 0 0>; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/pq3-sata2-1.dtsi b/trunk/arch/powerpc/boot/dts/fsl/pq3-sata2-1.dtsi deleted file mode 100644 index eefaf2855e3b..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/pq3-sata2-1.dtsi +++ /dev/null @@ -1,40 +0,0 @@ -/* - * PQ3 SATAv2 device tree stub [ controller @ offset 0x19000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -sata@19000 { - compatible = "fsl,pq-sata-v2"; - reg = <0x19000 0x1000>; - cell-index = <2>; - interrupts = <41 0x2 0 0>; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/pq3-sec2.1-0.dtsi b/trunk/arch/powerpc/boot/dts/fsl/pq3-sec2.1-0.dtsi deleted file mode 100644 index 02a5c7ae72d0..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/pq3-sec2.1-0.dtsi +++ /dev/null @@ -1,43 +0,0 @@ -/* - * PQ3 Sec/Crypto 2.1 device tree stub [ controller @ offset 0x30000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -crypto@30000 { - compatible = "fsl,sec2.1", "fsl,sec2.0"; - reg = <0x30000 0x10000>; - interrupts = <45 2 0 0>; - fsl,num-channels = <4>; - fsl,channel-fifo-len = <24>; - fsl,exec-units-mask = <0xfe>; - fsl,descriptor-types-mask = <0x12b0ebf>; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/pq3-sec3.0-0.dtsi b/trunk/arch/powerpc/boot/dts/fsl/pq3-sec3.0-0.dtsi deleted file mode 100644 index bba1ba44ccf0..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/pq3-sec3.0-0.dtsi +++ /dev/null @@ -1,45 +0,0 @@ -/* - * PQ3 Sec/Crypto 3.0 device tree stub [ controller @ offset 0x30000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -crypto@30000 { - compatible = "fsl,sec3.0", - "fsl,sec2.4", "fsl,sec2.2", "fsl,sec2.1", - "fsl,sec2.0"; - reg = <0x30000 0x10000>; - interrupts = <45 2 0 0 58 2 0 0>; - fsl,num-channels = <4>; - fsl,channel-fifo-len = <24>; - fsl,exec-units-mask = <0x9fe>; - fsl,descriptor-types-mask = <0x3ab0ebf>; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/pq3-sec3.1-0.dtsi b/trunk/arch/powerpc/boot/dts/fsl/pq3-sec3.1-0.dtsi deleted file mode 100644 index 8f0a5669bee5..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/pq3-sec3.1-0.dtsi +++ /dev/null @@ -1,45 +0,0 @@ -/* - * PQ3 Sec/Crypto 3.1 device tree stub [ controller @ offset 0x30000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -crypto@30000 { - compatible = "fsl,sec3.1", "fsl,sec3.0", - "fsl,sec2.4", "fsl,sec2.2", "fsl,sec2.1", - "fsl,sec2.0"; - reg = <0x30000 0x10000>; - interrupts = <45 2 0 0 58 2 0 0>; - fsl,num-channels = <4>; - fsl,channel-fifo-len = <24>; - fsl,exec-units-mask = <0xbfe>; - fsl,descriptor-types-mask = <0x3ab0ebf>; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/pq3-sec3.3-0.dtsi b/trunk/arch/powerpc/boot/dts/fsl/pq3-sec3.3-0.dtsi deleted file mode 100644 index c227f2748a24..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/pq3-sec3.3-0.dtsi +++ /dev/null @@ -1,45 +0,0 @@ -/* - * PQ3 Sec/Crypto 3.3 device tree stub [ controller @ offset 0x30000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -crypto@30000 { - compatible = "fsl,sec3.3", "fsl,sec3.1", "fsl,sec3.0", - "fsl,sec2.4", "fsl,sec2.2", "fsl,sec2.1", - "fsl,sec2.0"; - reg = <0x30000 0x10000>; - interrupts = <45 2 0 0 58 2 0 0>; - fsl,num-channels = <4>; - fsl,channel-fifo-len = <24>; - fsl,exec-units-mask = <0x97c>; - fsl,descriptor-types-mask = <0x3a30abf>; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/pq3-sec4.4-0.dtsi b/trunk/arch/powerpc/boot/dts/fsl/pq3-sec4.4-0.dtsi deleted file mode 100644 index bf957a7fca2a..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/pq3-sec4.4-0.dtsi +++ /dev/null @@ -1,65 +0,0 @@ -/* - * PQ3 Sec/Crypto 4.4 device tree stub [ controller @ offset 0x30000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -crypto@30000 { - compatible = "fsl,sec4.4", "fsl,sec4.0"; - #address-cells = <1>; - #size-cells = <1>; - reg = <0x30000 0x10000>; - interrupts = <58 2 0 0>; - - sec_jr0: jr@1000 { - compatible = "fsl,sec4.4-job-ring", "fsl,sec4.0-job-ring"; - reg = <0x1000 0x1000>; - interrupts = <45 2 0 0>; - }; - - sec_jr1: jr@2000 { - compatible = "fsl,sec4.4-job-ring", "fsl,sec4.0-job-ring"; - reg = <0x2000 0x1000>; - interrupts = <45 2 0 0>; - }; - - sec_jr2: jr@3000 { - compatible = "fsl,sec4.4-job-ring", "fsl,sec4.0-job-ring"; - reg = <0x3000 0x1000>; - interrupts = <45 2 0 0>; - }; - - sec_jr3: jr@4000 { - compatible = "fsl,sec4.4-job-ring", "fsl,sec4.0-job-ring"; - reg = <0x4000 0x1000>; - interrupts = <45 2 0 0>; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/pq3-usb2-dr-0.dtsi b/trunk/arch/powerpc/boot/dts/fsl/pq3-usb2-dr-0.dtsi deleted file mode 100644 index 185ab9dc3ecd..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/pq3-usb2-dr-0.dtsi +++ /dev/null @@ -1,41 +0,0 @@ -/* - * PQ3 USB DR device tree stub [ controller @ offset 0x22000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -usb@22000 { - compatible = "fsl-usb2-dr"; - reg = <0x22000 0x1000>; - #address-cells = <1>; - #size-cells = <0>; - interrupts = <28 0x2 0 0>; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/pq3-usb2-dr-1.dtsi b/trunk/arch/powerpc/boot/dts/fsl/pq3-usb2-dr-1.dtsi deleted file mode 100644 index fe24cd612fff..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/pq3-usb2-dr-1.dtsi +++ /dev/null @@ -1,41 +0,0 @@ -/* - * PQ3 USB DR device tree stub [ controller @ offset 0x23000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -usb@23000 { - compatible = "fsl-usb2-dr"; - reg = <0x23000 0x1000>; - #address-cells = <1>; - #size-cells = <0>; - interrupts = <46 0x2 0 0>; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/qoriq-dma-0.dtsi b/trunk/arch/powerpc/boot/dts/fsl/qoriq-dma-0.dtsi deleted file mode 100644 index 1aebf3ea4ca5..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/qoriq-dma-0.dtsi +++ /dev/null @@ -1,66 +0,0 @@ -/* - * QorIQ DMA device tree stub [ controller @ offset 0x100000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -dma0: dma@100300 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "fsl,eloplus-dma"; - reg = <0x100300 0x4>; - ranges = <0x0 0x100100 0x200>; - cell-index = <0>; - dma-channel@0 { - compatible = "fsl,eloplus-dma-channel"; - reg = <0x0 0x80>; - cell-index = <0>; - interrupts = <28 2 0 0>; - }; - dma-channel@80 { - compatible = "fsl,eloplus-dma-channel"; - reg = <0x80 0x80>; - cell-index = <1>; - interrupts = <29 2 0 0>; - }; - dma-channel@100 { - compatible = "fsl,eloplus-dma-channel"; - reg = <0x100 0x80>; - cell-index = <2>; - interrupts = <30 2 0 0>; - }; - dma-channel@180 { - compatible = "fsl,eloplus-dma-channel"; - reg = <0x180 0x80>; - cell-index = <3>; - interrupts = <31 2 0 0>; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/qoriq-dma-1.dtsi b/trunk/arch/powerpc/boot/dts/fsl/qoriq-dma-1.dtsi deleted file mode 100644 index ecf5e180fe79..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/qoriq-dma-1.dtsi +++ /dev/null @@ -1,66 +0,0 @@ -/* - * QorIQ DMA device tree stub [ controller @ offset 0x101000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -dma1: dma@101300 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "fsl,eloplus-dma"; - reg = <0x101300 0x4>; - ranges = <0x0 0x101100 0x200>; - cell-index = <1>; - dma-channel@0 { - compatible = "fsl,eloplus-dma-channel"; - reg = <0x0 0x80>; - cell-index = <0>; - interrupts = <32 2 0 0>; - }; - dma-channel@80 { - compatible = "fsl,eloplus-dma-channel"; - reg = <0x80 0x80>; - cell-index = <1>; - interrupts = <33 2 0 0>; - }; - dma-channel@100 { - compatible = "fsl,eloplus-dma-channel"; - reg = <0x100 0x80>; - cell-index = <2>; - interrupts = <34 2 0 0>; - }; - dma-channel@180 { - compatible = "fsl,eloplus-dma-channel"; - reg = <0x180 0x80>; - cell-index = <3>; - interrupts = <35 2 0 0>; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/qoriq-duart-0.dtsi b/trunk/arch/powerpc/boot/dts/fsl/qoriq-duart-0.dtsi deleted file mode 100644 index 225c07b4e8ab..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/qoriq-duart-0.dtsi +++ /dev/null @@ -1,51 +0,0 @@ -/* - * QorIQ DUART device tree stub [ controller @ offset 0x11c000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -serial0: serial@11c500 { - cell-index = <0>; - device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; - reg = <0x11c500 0x100>; - clock-frequency = <0>; - interrupts = <36 2 0 0>; -}; - -serial1: serial@11c600 { - cell-index = <1>; - device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; - reg = <0x11c600 0x100>; - clock-frequency = <0>; - interrupts = <36 2 0 0>; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/qoriq-duart-1.dtsi b/trunk/arch/powerpc/boot/dts/fsl/qoriq-duart-1.dtsi deleted file mode 100644 index d23233a56b91..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/qoriq-duart-1.dtsi +++ /dev/null @@ -1,51 +0,0 @@ -/* - * QorIQ DUART device tree stub [ controller @ offset 0x11d000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -serial2: serial@11d500 { - cell-index = <2>; - device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; - reg = <0x11d500 0x100>; - clock-frequency = <0>; - interrupts = <37 2 0 0>; -}; - -serial3: serial@11d600 { - cell-index = <3>; - device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; - reg = <0x11d600 0x100>; - clock-frequency = <0>; - interrupts = <37 2 0 0>; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/qoriq-esdhc-0.dtsi b/trunk/arch/powerpc/boot/dts/fsl/qoriq-esdhc-0.dtsi deleted file mode 100644 index 20835ae216c7..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/qoriq-esdhc-0.dtsi +++ /dev/null @@ -1,40 +0,0 @@ -/* - * QorIQ eSDHC device tree stub [ controller @ offset 0x114000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -sdhc: sdhc@114000 { - compatible = "fsl,esdhc"; - reg = <0x114000 0x1000>; - interrupts = <48 2 0 0>; - clock-frequency = <0>; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/qoriq-espi-0.dtsi b/trunk/arch/powerpc/boot/dts/fsl/qoriq-espi-0.dtsi deleted file mode 100644 index 6db06975e095..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/qoriq-espi-0.dtsi +++ /dev/null @@ -1,41 +0,0 @@ -/* - * QorIQ eSPI device tree stub [ controller @ offset 0x110000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -spi@110000 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "fsl,mpc8536-espi"; - reg = <0x110000 0x1000>; - interrupts = <53 0x2 0 0>; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/qoriq-gpio-0.dtsi b/trunk/arch/powerpc/boot/dts/fsl/qoriq-gpio-0.dtsi deleted file mode 100644 index cf714f5f68bc..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/qoriq-gpio-0.dtsi +++ /dev/null @@ -1,41 +0,0 @@ -/* - * QorIQ GPIO device tree stub [ controller @ offset 0x130000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -gpio0: gpio@130000 { - compatible = "fsl,qoriq-gpio"; - reg = <0x130000 0x1000>; - interrupts = <55 2 0 0>; - #gpio-cells = <2>; - gpio-controller; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/qoriq-i2c-0.dtsi b/trunk/arch/powerpc/boot/dts/fsl/qoriq-i2c-0.dtsi deleted file mode 100644 index 5f9bf7debe4c..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/qoriq-i2c-0.dtsi +++ /dev/null @@ -1,53 +0,0 @@ -/* - * QorIQ I2C device tree stub [ controller @ offset 0x118000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -i2c@118000 { - #address-cells = <1>; - #size-cells = <0>; - cell-index = <0>; - compatible = "fsl-i2c"; - reg = <0x118000 0x100>; - interrupts = <38 2 0 0>; - dfsrr; -}; - -i2c@118100 { - #address-cells = <1>; - #size-cells = <0>; - cell-index = <1>; - compatible = "fsl-i2c"; - reg = <0x118100 0x100>; - interrupts = <38 2 0 0>; - dfsrr; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/qoriq-i2c-1.dtsi b/trunk/arch/powerpc/boot/dts/fsl/qoriq-i2c-1.dtsi deleted file mode 100644 index 7989bf5eeb53..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/qoriq-i2c-1.dtsi +++ /dev/null @@ -1,53 +0,0 @@ -/* - * QorIQ I2C device tree stub [ controller @ offset 0x119000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -i2c@119000 { - #address-cells = <1>; - #size-cells = <0>; - cell-index = <2>; - compatible = "fsl-i2c"; - reg = <0x119000 0x100>; - interrupts = <39 2 0 0>; - dfsrr; -}; - -i2c@119100 { - #address-cells = <1>; - #size-cells = <0>; - cell-index = <3>; - compatible = "fsl-i2c"; - reg = <0x119100 0x100>; - interrupts = <39 2 0 0>; - dfsrr; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/qoriq-mpic.dtsi b/trunk/arch/powerpc/boot/dts/fsl/qoriq-mpic.dtsi deleted file mode 100644 index b9bada6a87dc..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/qoriq-mpic.dtsi +++ /dev/null @@ -1,106 +0,0 @@ -/* - * QorIQ MPIC device tree stub [ controller @ offset 0x40000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -mpic: pic@40000 { - interrupt-controller; - #address-cells = <0>; - #interrupt-cells = <4>; - reg = <0x40000 0x40000>; - compatible = "fsl,mpic", "chrp,open-pic"; - device_type = "open-pic"; - clock-frequency = <0x0>; -}; - -timer@41100 { - compatible = "fsl,mpic-global-timer"; - reg = <0x41100 0x100 0x41300 4>; - interrupts = <0 0 3 0 - 1 0 3 0 - 2 0 3 0 - 3 0 3 0>; -}; - -msi0: msi@41600 { - compatible = "fsl,mpic-msi"; - reg = <0x41600 0x200>; - msi-available-ranges = <0 0x100>; - interrupts = < - 0xe0 0 0 0 - 0xe1 0 0 0 - 0xe2 0 0 0 - 0xe3 0 0 0 - 0xe4 0 0 0 - 0xe5 0 0 0 - 0xe6 0 0 0 - 0xe7 0 0 0>; -}; - -msi1: msi@41800 { - compatible = "fsl,mpic-msi"; - reg = <0x41800 0x200>; - msi-available-ranges = <0 0x100>; - interrupts = < - 0xe8 0 0 0 - 0xe9 0 0 0 - 0xea 0 0 0 - 0xeb 0 0 0 - 0xec 0 0 0 - 0xed 0 0 0 - 0xee 0 0 0 - 0xef 0 0 0>; -}; - -msi2: msi@41a00 { - compatible = "fsl,mpic-msi"; - reg = <0x41a00 0x200>; - msi-available-ranges = <0 0x100>; - interrupts = < - 0xf0 0 0 0 - 0xf1 0 0 0 - 0xf2 0 0 0 - 0xf3 0 0 0 - 0xf4 0 0 0 - 0xf5 0 0 0 - 0xf6 0 0 0 - 0xf7 0 0 0>; -}; - -timer@42100 { - compatible = "fsl,mpic-global-timer"; - reg = <0x42100 0x100 0x42300 4>; - interrupts = <4 0 3 0 - 5 0 3 0 - 6 0 3 0 - 7 0 3 0>; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/qoriq-rmu-0.dtsi b/trunk/arch/powerpc/boot/dts/fsl/qoriq-rmu-0.dtsi deleted file mode 100644 index ca7fec792e53..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/qoriq-rmu-0.dtsi +++ /dev/null @@ -1,68 +0,0 @@ -/* - * QorIQ RIO Message Unit device tree stub [ controller @ offset 0xd3000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -rmu: rmu@d3000 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "fsl,srio-rmu"; - reg = <0xd3000 0x500>; - ranges = <0x0 0xd3000 0x500>; - - message-unit@0 { - compatible = "fsl,srio-msg-unit"; - reg = <0x0 0x100>; - interrupts = < - 60 2 0 0 /* msg1_tx_irq */ - 61 2 0 0>;/* msg1_rx_irq */ - }; - message-unit@100 { - compatible = "fsl,srio-msg-unit"; - reg = <0x100 0x100>; - interrupts = < - 62 2 0 0 /* msg2_tx_irq */ - 63 2 0 0>;/* msg2_rx_irq */ - }; - doorbell-unit@400 { - compatible = "fsl,srio-dbell-unit"; - reg = <0x400 0x80>; - interrupts = < - 56 2 0 0 /* bell_outb_irq */ - 57 2 0 0>;/* bell_inb_irq */ - }; - port-write-unit@4e0 { - compatible = "fsl,srio-port-write-unit"; - reg = <0x4e0 0x20>; - interrupts = <16 2 1 11>; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/qoriq-sata2-0.dtsi b/trunk/arch/powerpc/boot/dts/fsl/qoriq-sata2-0.dtsi deleted file mode 100644 index b642047fdecf..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/qoriq-sata2-0.dtsi +++ /dev/null @@ -1,39 +0,0 @@ -/* - * QorIQ SATAv2 device tree stub [ controller @ offset 0x220000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -sata@220000 { - compatible = "fsl,pq-sata-v2"; - reg = <0x220000 0x1000>; - interrupts = <68 0x2 0 0>; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/qoriq-sata2-1.dtsi b/trunk/arch/powerpc/boot/dts/fsl/qoriq-sata2-1.dtsi deleted file mode 100644 index c57370259750..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/qoriq-sata2-1.dtsi +++ /dev/null @@ -1,39 +0,0 @@ -/* - * QorIQ SATAv2 device tree stub [ controller @ offset 0x221000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -sata@221000 { - compatible = "fsl,pq-sata-v2"; - reg = <0x221000 0x1000>; - interrupts = <69 0x2 0 0>; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/qoriq-sec4.0-0.dtsi b/trunk/arch/powerpc/boot/dts/fsl/qoriq-sec4.0-0.dtsi deleted file mode 100644 index 0cbbac329539..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/qoriq-sec4.0-0.dtsi +++ /dev/null @@ -1,100 +0,0 @@ -/* - * QorIQ Sec/Crypto 4.0 device tree stub [ controller @ offset 0x300000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -crypto: crypto@300000 { - compatible = "fsl,sec-v4.0"; - #address-cells = <1>; - #size-cells = <1>; - reg = <0x300000 0x10000>; - ranges = <0 0x300000 0x10000>; - interrupts = <92 2 0 0>; - - sec_jr0: jr@1000 { - compatible = "fsl,sec-v4.0-job-ring"; - reg = <0x1000 0x1000>; - interrupts = <88 2 0 0>; - }; - - sec_jr1: jr@2000 { - compatible = "fsl,sec-v4.0-job-ring"; - reg = <0x2000 0x1000>; - interrupts = <89 2 0 0>; - }; - - sec_jr2: jr@3000 { - compatible = "fsl,sec-v4.0-job-ring"; - reg = <0x3000 0x1000>; - interrupts = <90 2 0 0>; - }; - - sec_jr3: jr@4000 { - compatible = "fsl,sec-v4.0-job-ring"; - reg = <0x4000 0x1000>; - interrupts = <91 2 0 0>; - }; - - rtic@6000 { - compatible = "fsl,sec-v4.0-rtic"; - #address-cells = <1>; - #size-cells = <1>; - reg = <0x6000 0x100>; - ranges = <0x0 0x6100 0xe00>; - - rtic_a: rtic-a@0 { - compatible = "fsl,sec-v4.0-rtic-memory"; - reg = <0x00 0x20 0x100 0x80>; - }; - - rtic_b: rtic-b@20 { - compatible = "fsl,sec-v4.0-rtic-memory"; - reg = <0x20 0x20 0x200 0x80>; - }; - - rtic_c: rtic-c@40 { - compatible = "fsl,sec-v4.0-rtic-memory"; - reg = <0x40 0x20 0x300 0x80>; - }; - - rtic_d: rtic-d@60 { - compatible = "fsl,sec-v4.0-rtic-memory"; - reg = <0x60 0x20 0x500 0x80>; - }; - }; -}; - -sec_mon: sec_mon@314000 { - compatible = "fsl,sec-v4.0-mon"; - reg = <0x314000 0x1000>; - interrupts = <93 2 0 0>; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/qoriq-sec4.1-0.dtsi b/trunk/arch/powerpc/boot/dts/fsl/qoriq-sec4.1-0.dtsi deleted file mode 100644 index 3308986bba0d..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/qoriq-sec4.1-0.dtsi +++ /dev/null @@ -1,109 +0,0 @@ -/* - * QorIQ Sec/Crypto 4.1 device tree stub [ controller @ offset 0x300000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -crypto: crypto@300000 { - compatible = "fsl,sec-v4.1", "fsl,sec-v4.0"; - #address-cells = <1>; - #size-cells = <1>; - reg = <0x300000 0x10000>; - ranges = <0 0x300000 0x10000>; - interrupts = <92 2 0 0>; - - sec_jr0: jr@1000 { - compatible = "fsl,sec-v4.1-job-ring", - "fsl,sec-v4.0-job-ring"; - reg = <0x1000 0x1000>; - interrupts = <88 2 0 0>; - }; - - sec_jr1: jr@2000 { - compatible = "fsl,sec-v4.1-job-ring", - "fsl,sec-v4.0-job-ring"; - reg = <0x2000 0x1000>; - interrupts = <89 2 0 0>; - }; - - sec_jr2: jr@3000 { - compatible = "fsl,sec-v4.1-job-ring", - "fsl,sec-v4.0-job-ring"; - reg = <0x3000 0x1000>; - interrupts = <90 2 0 0>; - }; - - sec_jr3: jr@4000 { - compatible = "fsl,sec-v4.1-job-ring", - "fsl,sec-v4.0-job-ring"; - reg = <0x4000 0x1000>; - interrupts = <91 2 0 0>; - }; - - rtic@6000 { - compatible = "fsl,sec-v4.1-rtic", - "fsl,sec-v4.0-rtic"; - #address-cells = <1>; - #size-cells = <1>; - reg = <0x6000 0x100>; - ranges = <0x0 0x6100 0xe00>; - - rtic_a: rtic-a@0 { - compatible = "fsl,sec-v4.1-rtic-memory", - "fsl,sec-v4.0-rtic-memory"; - reg = <0x00 0x20 0x100 0x80>; - }; - - rtic_b: rtic-b@20 { - compatible = "fsl,sec-v4.1-rtic-memory", - "fsl,sec-v4.0-rtic-memory"; - reg = <0x20 0x20 0x200 0x80>; - }; - - rtic_c: rtic-c@40 { - compatible = "fsl,sec-v4.1-rtic-memory", - "fsl,sec-v4.0-rtic-memory"; - reg = <0x40 0x20 0x300 0x80>; - }; - - rtic_d: rtic-d@60 { - compatible = "fsl,sec-v4.1-rtic-memory", - "fsl,sec-v4.0-rtic-memory"; - reg = <0x60 0x20 0x500 0x80>; - }; - }; -}; - -sec_mon: sec_mon@314000 { - compatible = "fsl,sec-v4.1-mon", "fsl,sec-v4.0-mon"; - reg = <0x314000 0x1000>; - interrupts = <93 2 0 0>; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/qoriq-sec4.2-0.dtsi b/trunk/arch/powerpc/boot/dts/fsl/qoriq-sec4.2-0.dtsi deleted file mode 100644 index 7990e0d3d6f2..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/qoriq-sec4.2-0.dtsi +++ /dev/null @@ -1,109 +0,0 @@ -/* - * QorIQ Sec/Crypto 4.2 device tree stub [ controller @ offset 0x300000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -crypto: crypto@300000 { - compatible = "fsl,sec-v4.2", "fsl,sec-v4.0"; - #address-cells = <1>; - #size-cells = <1>; - reg = <0x300000 0x10000>; - ranges = <0 0x300000 0x10000>; - interrupts = <92 2 0 0>; - - sec_jr0: jr@1000 { - compatible = "fsl,sec-v4.2-job-ring", - "fsl,sec-v4.0-job-ring"; - reg = <0x1000 0x1000>; - interrupts = <88 2 0 0>; - }; - - sec_jr1: jr@2000 { - compatible = "fsl,sec-v4.2-job-ring", - "fsl,sec-v4.0-job-ring"; - reg = <0x2000 0x1000>; - interrupts = <89 2 0 0>; - }; - - sec_jr2: jr@3000 { - compatible = "fsl,sec-v4.2-job-ring", - "fsl,sec-v4.0-job-ring"; - reg = <0x3000 0x1000>; - interrupts = <90 2 0 0>; - }; - - sec_jr3: jr@4000 { - compatible = "fsl,sec-v4.2-job-ring", - "fsl,sec-v4.0-job-ring"; - reg = <0x4000 0x1000>; - interrupts = <91 2 0 0>; - }; - - rtic@6000 { - compatible = "fsl,sec-v4.2-rtic", - "fsl,sec-v4.0-rtic"; - #address-cells = <1>; - #size-cells = <1>; - reg = <0x6000 0x100>; - ranges = <0x0 0x6100 0xe00>; - - rtic_a: rtic-a@0 { - compatible = "fsl,sec-v4.2-rtic-memory", - "fsl,sec-v4.0-rtic-memory"; - reg = <0x00 0x20 0x100 0x80>; - }; - - rtic_b: rtic-b@20 { - compatible = "fsl,sec-v4.2-rtic-memory", - "fsl,sec-v4.0-rtic-memory"; - reg = <0x20 0x20 0x200 0x80>; - }; - - rtic_c: rtic-c@40 { - compatible = "fsl,sec-v4.2-rtic-memory", - "fsl,sec-v4.0-rtic-memory"; - reg = <0x40 0x20 0x300 0x80>; - }; - - rtic_d: rtic-d@60 { - compatible = "fsl,sec-v4.2-rtic-memory", - "fsl,sec-v4.0-rtic-memory"; - reg = <0x60 0x20 0x500 0x80>; - }; - }; -}; - -sec_mon: sec_mon@314000 { - compatible = "fsl,sec-v4.2-mon", "fsl,sec-v4.0-mon"; - reg = <0x314000 0x1000>; - interrupts = <93 2 0 0>; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/qoriq-usb2-dr-0.dtsi b/trunk/arch/powerpc/boot/dts/fsl/qoriq-usb2-dr-0.dtsi deleted file mode 100644 index 4dd6f84c239c..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/qoriq-usb2-dr-0.dtsi +++ /dev/null @@ -1,41 +0,0 @@ -/* - * QorIQ USB DR device tree stub [ controller @ offset 0x211000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -usb@211000 { - compatible = "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr"; - reg = <0x211000 0x1000>; - #address-cells = <1>; - #size-cells = <0>; - interrupts = <45 0x2 0 0>; -}; diff --git a/trunk/arch/powerpc/boot/dts/fsl/qoriq-usb2-mph-0.dtsi b/trunk/arch/powerpc/boot/dts/fsl/qoriq-usb2-mph-0.dtsi deleted file mode 100644 index f053835aa1c7..000000000000 --- a/trunk/arch/powerpc/boot/dts/fsl/qoriq-usb2-mph-0.dtsi +++ /dev/null @@ -1,41 +0,0 @@ -/* - * QorIQ USB Host device tree stub [ controller @ offset 0x210000 ] - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -usb@210000 { - compatible = "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph"; - reg = <0x210000 0x1000>; - #address-cells = <1>; - #size-cells = <0>; - interrupts = <44 0x2 0 0>; -}; diff --git a/trunk/arch/powerpc/boot/dts/gef_ppc9a.dts b/trunk/arch/powerpc/boot/dts/gef_ppc9a.dts index 38dcb96c8e26..2266bbb303d0 100644 --- a/trunk/arch/powerpc/boot/dts/gef_ppc9a.dts +++ b/trunk/arch/powerpc/boot/dts/gef_ppc9a.dts @@ -339,7 +339,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; clock-frequency = <0>; interrupts = <0x2a 0x2>; @@ -349,7 +349,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; clock-frequency = <0>; interrupts = <0x1c 0x2>; diff --git a/trunk/arch/powerpc/boot/dts/gef_sbc310.dts b/trunk/arch/powerpc/boot/dts/gef_sbc310.dts index 5ab8932d09b7..429e87d9acef 100644 --- a/trunk/arch/powerpc/boot/dts/gef_sbc310.dts +++ b/trunk/arch/powerpc/boot/dts/gef_sbc310.dts @@ -337,7 +337,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; clock-frequency = <0>; interrupts = <0x2a 0x2>; @@ -347,7 +347,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; clock-frequency = <0>; interrupts = <0x1c 0x2>; diff --git a/trunk/arch/powerpc/boot/dts/gef_sbc610.dts b/trunk/arch/powerpc/boot/dts/gef_sbc610.dts index d5341f5741aa..d81201ac2cad 100644 --- a/trunk/arch/powerpc/boot/dts/gef_sbc610.dts +++ b/trunk/arch/powerpc/boot/dts/gef_sbc610.dts @@ -337,7 +337,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; clock-frequency = <0>; interrupts = <0x2a 0x2>; @@ -347,7 +347,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; clock-frequency = <0>; interrupts = <0x1c 0x2>; diff --git a/trunk/arch/powerpc/boot/dts/klondike.dts b/trunk/arch/powerpc/boot/dts/klondike.dts deleted file mode 100644 index 8c9429033618..000000000000 --- a/trunk/arch/powerpc/boot/dts/klondike.dts +++ /dev/null @@ -1,227 +0,0 @@ -/* - * Device Tree for Klondike (APM8018X) board. - * - * Copyright (c) 2010, Applied Micro Circuits Corporation - * Author: Tanmay Inamdar - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 of - * the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, - * MA 02111-1307 USA - * - */ - -/dts-v1/; - -/ { - #address-cells = <1>; - #size-cells = <1>; - model = "apm,klondike"; - compatible = "apm,klondike"; - dcr-parent = <&{/cpus/cpu@0}>; - - aliases { - ethernet0 = &EMAC0; - ethernet1 = &EMAC1; - }; - - cpus { - #address-cells = <1>; - #size-cells = <0>; - - cpu@0 { - device_type = "cpu"; - model = "PowerPC,apm8018x"; - reg = <0x00000000>; - clock-frequency = <300000000>; /* Filled in by U-Boot */ - timebase-frequency = <300000000>; /* Filled in by U-Boot */ - i-cache-line-size = <32>; - d-cache-line-size = <32>; - i-cache-size = <16384>; /* 16 kB */ - d-cache-size = <16384>; /* 16 kB */ - dcr-controller; - dcr-access-method = "native"; - }; - }; - - memory { - device_type = "memory"; - reg = <0x00000000 0x20000000>; /* Filled in by U-Boot */ - }; - - UIC0: interrupt-controller { - compatible = "ibm,uic"; - interrupt-controller; - cell-index = <0>; - dcr-reg = <0x0c0 0x010>; - #address-cells = <0>; - #size-cells = <0>; - #interrupt-cells = <2>; - }; - - UIC1: interrupt-controller1 { - compatible = "ibm,uic"; - interrupt-controller; - cell-index = <1>; - dcr-reg = <0x0d0 0x010>; - #address-cells = <0>; - #size-cells = <0>; - #interrupt-cells = <2>; - interrupts = <0x1e 0x4 0x1f 0x4>; /* cascade */ - interrupt-parent = <&UIC0>; - }; - - UIC2: interrupt-controller2 { - compatible = "ibm,uic"; - interrupt-controller; - cell-index = <2>; - dcr-reg = <0x0e0 0x010>; - #address-cells = <0>; - #size-cells = <0>; - #interrupt-cells = <2>; - interrupts = <0x0a 0x4 0x0b 0x4>; /* cascade */ - interrupt-parent = <&UIC0>; - }; - - UIC3: interrupt-controller3 { - compatible = "ibm,uic"; - interrupt-controller; - cell-index = <3>; - dcr-reg = <0x0f0 0x010>; - #address-cells = <0>; - #size-cells = <0>; - #interrupt-cells = <2>; - interrupts = <0x10 0x4 0x11 0x4>; /* cascade */ - interrupt-parent = <&UIC0>; - }; - - plb { - compatible = "ibm,plb4"; - #address-cells = <1>; - #size-cells = <1>; - ranges; - clock-frequency = <0>; /* Filled in by U-Boot */ - - SDRAM0: memory-controller { - compatible = "ibm,sdram-apm8018x"; - dcr-reg = <0x010 0x002>; - }; - - MAL0: mcmal { - compatible = "ibm,mcmal2"; - dcr-reg = <0x180 0x062>; - num-tx-chans = <2>; - num-rx-chans = <16>; - #address-cells = <0>; - #size-cells = <0>; - interrupt-parent = <&UIC1>; - interrupts = ; - }; - - POB0: opb { - compatible = "ibm,opb"; - #address-cells = <1>; - #size-cells = <1>; - ranges = <0x20000000 0x20000000 0x30000000 - 0x50000000 0x50000000 0x10000000 - 0x60000000 0x60000000 0x10000000 - 0xFE000000 0xFE000000 0x00010000>; - dcr-reg = <0x100 0x020>; - clock-frequency = <300000000>; /* Filled in by U-Boot */ - - RGMII0: emac-rgmii@400a2000 { - compatible = "ibm,rgmii"; - reg = <0x400a2000 0x00000010>; - has-mdio; - }; - - TAH0: emac-tah@400a3000 { - compatible = "ibm,tah"; - reg = <0x400a3000 0x100>; - }; - - TAH1: emac-tah@400a4000 { - compatible = "ibm,tah"; - reg = <0x400a4000 0x100>; - }; - - EMAC0: ethernet@400a0000 { - compatible = "ibm,emac4", "ibm-emac4sync"; - interrupt-parent = <&EMAC0>; - interrupts = <0x0>; - #interrupt-cells = <1>; - #address-cells = <0>; - #size-cells = <0>; - interrupt-map = ; - reg = <0x400a0000 0x00000100>; - local-mac-address = [000000000000]; /* Filled in by U-Boot */ - mal-device = <&MAL0>; - mal-tx-channel = <0x0>; - mal-rx-channel = <0x0>; - cell-index = <0>; - max-frame-size = <9000>; - rx-fifo-size = <4096>; - tx-fifo-size = <2048>; - phy-mode = "rgmii"; - phy-address = <0x2>; - turbo = "no"; - phy-map = <0x00000000>; - rgmii-device = <&RGMII0>; - rgmii-channel = <0>; - tah-device = <&TAH0>; - tah-channel = <0>; - has-inverted-stacr-oc; - has-new-stacr-staopc; - }; - - EMAC1: ethernet@400a1000 { - compatible = "ibm,emac4", "ibm-emac4sync"; - status = "disabled"; - interrupt-parent = <&EMAC1>; - interrupts = <0x0>; - #interrupt-cells = <1>; - #address-cells = <0>; - #size-cells = <0>; - interrupt-map = ; - reg = <0x400a1000 0x00000100>; - local-mac-address = [000000000000]; /* Filled in by U-Boot */ - mal-device = <&MAL0>; - mal-tx-channel = <1>; - mal-rx-channel = <8>; - cell-index = <1>; - max-frame-size = <9000>; - rx-fifo-size = <4096>; - tx-fifo-size = <2048>; - phy-mode = "rgmii"; - phy-address = <0x3>; - turbo = "no"; - phy-map = <0x00000000>; - rgmii-device = <&RGMII0>; - rgmii-channel = <1>; - tah-device = <&TAH1>; - tah-channel = <0>; - has-inverted-stacr-oc; - has-new-stacr-staopc; - mdio-device = <&EMAC0>; - }; - }; - }; - - chosen { - linux,stdout-path = "/plb/opb/serial@50001000"; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/kmeter1.dts b/trunk/arch/powerpc/boot/dts/kmeter1.dts index 983aee185793..d16bae1230f7 100644 --- a/trunk/arch/powerpc/boot/dts/kmeter1.dts +++ b/trunk/arch/powerpc/boot/dts/kmeter1.dts @@ -80,7 +80,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; clock-frequency = <264000000>; interrupts = <9 0x8>; diff --git a/trunk/arch/powerpc/boot/dts/kuroboxHD.dts b/trunk/arch/powerpc/boot/dts/kuroboxHD.dts index 0a4545159e80..8d725d10882f 100644 --- a/trunk/arch/powerpc/boot/dts/kuroboxHD.dts +++ b/trunk/arch/powerpc/boot/dts/kuroboxHD.dts @@ -84,7 +84,7 @@ XXXX add flash parts, rtc, ?? serial0: serial@80004500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x80004500 0x8>; clock-frequency = <97553800>; current-speed = <9600>; @@ -95,7 +95,7 @@ XXXX add flash parts, rtc, ?? serial1: serial@80004600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x80004600 0x8>; clock-frequency = <97553800>; current-speed = <57600>; diff --git a/trunk/arch/powerpc/boot/dts/kuroboxHG.dts b/trunk/arch/powerpc/boot/dts/kuroboxHG.dts index 0e758b347cdb..b13a11eb81b0 100644 --- a/trunk/arch/powerpc/boot/dts/kuroboxHG.dts +++ b/trunk/arch/powerpc/boot/dts/kuroboxHG.dts @@ -84,7 +84,7 @@ XXXX add flash parts, rtc, ?? serial0: serial@80004500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x80004500 0x8>; clock-frequency = <130041000>; current-speed = <9600>; @@ -95,7 +95,7 @@ XXXX add flash parts, rtc, ?? serial1: serial@80004600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x80004600 0x8>; clock-frequency = <130041000>; current-speed = <57600>; diff --git a/trunk/arch/powerpc/boot/dts/mpc8308_p1m.dts b/trunk/arch/powerpc/boot/dts/mpc8308_p1m.dts index 22b0832b6c31..697b3f6b78bf 100644 --- a/trunk/arch/powerpc/boot/dts/mpc8308_p1m.dts +++ b/trunk/arch/powerpc/boot/dts/mpc8308_p1m.dts @@ -233,7 +233,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; clock-frequency = <133333333>; interrupts = <9 0x8>; @@ -243,7 +243,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; clock-frequency = <133333333>; interrupts = <10 0x8>; diff --git a/trunk/arch/powerpc/boot/dts/mpc8308rdb.dts b/trunk/arch/powerpc/boot/dts/mpc8308rdb.dts index f66d10d95a8d..a0bd1881081e 100644 --- a/trunk/arch/powerpc/boot/dts/mpc8308rdb.dts +++ b/trunk/arch/powerpc/boot/dts/mpc8308rdb.dts @@ -208,7 +208,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; clock-frequency = <133333333>; interrupts = <9 0x8>; @@ -218,7 +218,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; clock-frequency = <133333333>; interrupts = <10 0x8>; diff --git a/trunk/arch/powerpc/boot/dts/mpc8313erdb.dts b/trunk/arch/powerpc/boot/dts/mpc8313erdb.dts index 1c836c6c5be6..ac1eb320c7b4 100644 --- a/trunk/arch/powerpc/boot/dts/mpc8313erdb.dts +++ b/trunk/arch/powerpc/boot/dts/mpc8313erdb.dts @@ -261,7 +261,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; clock-frequency = <0>; interrupts = <9 0x8>; @@ -271,7 +271,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; clock-frequency = <0>; interrupts = <10 0x8>; diff --git a/trunk/arch/powerpc/boot/dts/mpc8315erdb.dts b/trunk/arch/powerpc/boot/dts/mpc8315erdb.dts index 811848e93aef..4dd08c322979 100644 --- a/trunk/arch/powerpc/boot/dts/mpc8315erdb.dts +++ b/trunk/arch/powerpc/boot/dts/mpc8315erdb.dts @@ -265,7 +265,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; clock-frequency = <133333333>; interrupts = <9 0x8>; @@ -275,7 +275,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; clock-frequency = <133333333>; interrupts = <10 0x8>; diff --git a/trunk/arch/powerpc/boot/dts/mpc832x_mds.dts b/trunk/arch/powerpc/boot/dts/mpc832x_mds.dts index da9c72ddc343..05ad8c98e527 100644 --- a/trunk/arch/powerpc/boot/dts/mpc832x_mds.dts +++ b/trunk/arch/powerpc/boot/dts/mpc832x_mds.dts @@ -105,7 +105,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; clock-frequency = <0>; interrupts = <9 0x8>; @@ -115,7 +115,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; clock-frequency = <0>; interrupts = <10 0x8>; diff --git a/trunk/arch/powerpc/boot/dts/mpc832x_rdb.dts b/trunk/arch/powerpc/boot/dts/mpc832x_rdb.dts index ff7b15b340a3..f4fadb23ad6f 100644 --- a/trunk/arch/powerpc/boot/dts/mpc832x_rdb.dts +++ b/trunk/arch/powerpc/boot/dts/mpc832x_rdb.dts @@ -83,7 +83,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; clock-frequency = <0>; interrupts = <9 0x8>; @@ -93,7 +93,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; clock-frequency = <0>; interrupts = <10 0x8>; diff --git a/trunk/arch/powerpc/boot/dts/mpc8349emitx.dts b/trunk/arch/powerpc/boot/dts/mpc8349emitx.dts index 2608679d0d4a..505dc842d808 100644 --- a/trunk/arch/powerpc/boot/dts/mpc8349emitx.dts +++ b/trunk/arch/powerpc/boot/dts/mpc8349emitx.dts @@ -283,7 +283,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; clock-frequency = <0>; // from bootloader interrupts = <9 0x8>; @@ -293,7 +293,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; clock-frequency = <0>; // from bootloader interrupts = <10 0x8>; diff --git a/trunk/arch/powerpc/boot/dts/mpc8349emitxgp.dts b/trunk/arch/powerpc/boot/dts/mpc8349emitxgp.dts index 6cd044d8fb89..eb732115f016 100644 --- a/trunk/arch/powerpc/boot/dts/mpc8349emitxgp.dts +++ b/trunk/arch/powerpc/boot/dts/mpc8349emitxgp.dts @@ -189,7 +189,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; clock-frequency = <0>; // from bootloader interrupts = <9 0x8>; @@ -199,7 +199,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; clock-frequency = <0>; // from bootloader interrupts = <10 0x8>; diff --git a/trunk/arch/powerpc/boot/dts/mpc834x_mds.dts b/trunk/arch/powerpc/boot/dts/mpc834x_mds.dts index 4552864082c2..230febb9b72f 100644 --- a/trunk/arch/powerpc/boot/dts/mpc834x_mds.dts +++ b/trunk/arch/powerpc/boot/dts/mpc834x_mds.dts @@ -242,7 +242,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; clock-frequency = <0>; interrupts = <9 0x8>; @@ -252,7 +252,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; clock-frequency = <0>; interrupts = <10 0x8>; diff --git a/trunk/arch/powerpc/boot/dts/mpc836x_mds.dts b/trunk/arch/powerpc/boot/dts/mpc836x_mds.dts index c0e450a551bf..45cfa1c50a2a 100644 --- a/trunk/arch/powerpc/boot/dts/mpc836x_mds.dts +++ b/trunk/arch/powerpc/boot/dts/mpc836x_mds.dts @@ -136,7 +136,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; clock-frequency = <264000000>; interrupts = <9 0x8>; @@ -146,7 +146,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; clock-frequency = <264000000>; interrupts = <10 0x8>; diff --git a/trunk/arch/powerpc/boot/dts/mpc836x_rdk.dts b/trunk/arch/powerpc/boot/dts/mpc836x_rdk.dts index b6e9aec1d860..bdf4459677b1 100644 --- a/trunk/arch/powerpc/boot/dts/mpc836x_rdk.dts +++ b/trunk/arch/powerpc/boot/dts/mpc836x_rdk.dts @@ -102,7 +102,7 @@ serial0: serial@4500 { device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; interrupts = <9 8>; interrupt-parent = <&ipic>; @@ -112,7 +112,7 @@ serial1: serial@4600 { device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; interrupts = <10 8>; interrupt-parent = <&ipic>; diff --git a/trunk/arch/powerpc/boot/dts/mpc8377_mds.dts b/trunk/arch/powerpc/boot/dts/mpc8377_mds.dts index cfccef57cd1d..855782c5e5ec 100644 --- a/trunk/arch/powerpc/boot/dts/mpc8377_mds.dts +++ b/trunk/arch/powerpc/boot/dts/mpc8377_mds.dts @@ -276,7 +276,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; clock-frequency = <0>; interrupts = <9 0x8>; @@ -286,7 +286,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; clock-frequency = <0>; interrupts = <10 0x8>; diff --git a/trunk/arch/powerpc/boot/dts/mpc8377_rdb.dts b/trunk/arch/powerpc/boot/dts/mpc8377_rdb.dts index 353deff1b7f6..dbc1b988b29d 100644 --- a/trunk/arch/powerpc/boot/dts/mpc8377_rdb.dts +++ b/trunk/arch/powerpc/boot/dts/mpc8377_rdb.dts @@ -321,7 +321,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; clock-frequency = <0>; interrupts = <9 0x8>; @@ -331,7 +331,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; clock-frequency = <0>; interrupts = <10 0x8>; diff --git a/trunk/arch/powerpc/boot/dts/mpc8377_wlan.dts b/trunk/arch/powerpc/boot/dts/mpc8377_wlan.dts index ef4a305a0d0c..9ea783056969 100644 --- a/trunk/arch/powerpc/boot/dts/mpc8377_wlan.dts +++ b/trunk/arch/powerpc/boot/dts/mpc8377_wlan.dts @@ -304,7 +304,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; clock-frequency = <0>; interrupts = <9 0x8>; @@ -314,7 +314,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; clock-frequency = <0>; interrupts = <10 0x8>; diff --git a/trunk/arch/powerpc/boot/dts/mpc8378_mds.dts b/trunk/arch/powerpc/boot/dts/mpc8378_mds.dts index 538fcb927337..f70cf6000839 100644 --- a/trunk/arch/powerpc/boot/dts/mpc8378_mds.dts +++ b/trunk/arch/powerpc/boot/dts/mpc8378_mds.dts @@ -315,7 +315,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; clock-frequency = <0>; interrupts = <9 0x8>; @@ -325,7 +325,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; clock-frequency = <0>; interrupts = <10 0x8>; diff --git a/trunk/arch/powerpc/boot/dts/mpc8378_rdb.dts b/trunk/arch/powerpc/boot/dts/mpc8378_rdb.dts index 32333a908f3d..3447eb9f6e88 100644 --- a/trunk/arch/powerpc/boot/dts/mpc8378_rdb.dts +++ b/trunk/arch/powerpc/boot/dts/mpc8378_rdb.dts @@ -321,7 +321,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; clock-frequency = <0>; interrupts = <9 0x8>; @@ -331,7 +331,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; clock-frequency = <0>; interrupts = <10 0x8>; diff --git a/trunk/arch/powerpc/boot/dts/mpc8379_mds.dts b/trunk/arch/powerpc/boot/dts/mpc8379_mds.dts index 5387092fdfb4..645ec51cc6e1 100644 --- a/trunk/arch/powerpc/boot/dts/mpc8379_mds.dts +++ b/trunk/arch/powerpc/boot/dts/mpc8379_mds.dts @@ -313,7 +313,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; clock-frequency = <0>; interrupts = <9 0x8>; @@ -323,7 +323,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; clock-frequency = <0>; interrupts = <10 0x8>; diff --git a/trunk/arch/powerpc/boot/dts/mpc8379_rdb.dts b/trunk/arch/powerpc/boot/dts/mpc8379_rdb.dts index 46224c2430ff..15560c619b04 100644 --- a/trunk/arch/powerpc/boot/dts/mpc8379_rdb.dts +++ b/trunk/arch/powerpc/boot/dts/mpc8379_rdb.dts @@ -319,7 +319,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; clock-frequency = <0>; interrupts = <9 0x8>; @@ -329,7 +329,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; clock-frequency = <0>; interrupts = <10 0x8>; diff --git a/trunk/arch/powerpc/boot/dts/mpc8536ds.dts b/trunk/arch/powerpc/boot/dts/mpc8536ds.dts index c15881574fdc..a75c10eed269 100644 --- a/trunk/arch/powerpc/boot/dts/mpc8536ds.dts +++ b/trunk/arch/powerpc/boot/dts/mpc8536ds.dts @@ -9,11 +9,24 @@ * option) any later version. */ -/include/ "fsl/mpc8536si-pre.dtsi" +/dts-v1/; / { model = "fsl,mpc8536ds"; compatible = "fsl,mpc8536ds"; + #address-cells = <2>; + #size-cells = <2>; + + aliases { + ethernet0 = &enet0; + ethernet1 = &enet1; + serial0 = &serial0; + serial1 = &serial1; + pci0 = &pci0; + pci1 = &pci1; + pci2 = &pci2; + pci3 = &pci3; + }; cpus { #cpus = <1>; @@ -32,34 +45,403 @@ reg = <0 0 0 0>; // Filled by U-Boot }; - lbc: localbus@ffe05000 { - reg = <0 0xffe05000 0 0x1000>; - }; - - board_soc: soc: soc@ffe00000 { + soc@ffe00000 { + #address-cells = <1>; + #size-cells = <1>; + device_type = "soc"; + compatible = "simple-bus"; ranges = <0x0 0 0xffe00000 0x100000>; + bus-frequency = <0>; // Filled out by uboot. + + ecm-law@0 { + compatible = "fsl,ecm-law"; + reg = <0x0 0x1000>; + fsl,num-laws = <12>; + }; + + ecm@1000 { + compatible = "fsl,mpc8536-ecm", "fsl,ecm"; + reg = <0x1000 0x1000>; + interrupts = <17 2>; + interrupt-parent = <&mpic>; + }; + + memory-controller@2000 { + compatible = "fsl,mpc8536-memory-controller"; + reg = <0x2000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <18 0x2>; + }; + + L2: l2-cache-controller@20000 { + compatible = "fsl,mpc8536-l2-cache-controller"; + reg = <0x20000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <16 0x2>; + }; + + i2c@3000 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <0>; + compatible = "fsl-i2c"; + reg = <0x3000 0x100>; + interrupts = <43 0x2>; + interrupt-parent = <&mpic>; + dfsrr; + }; + + i2c@3100 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <1>; + compatible = "fsl-i2c"; + reg = <0x3100 0x100>; + interrupts = <43 0x2>; + interrupt-parent = <&mpic>; + dfsrr; + rtc@68 { + compatible = "dallas,ds3232"; + reg = <0x68>; + interrupts = <0 0x1>; + interrupt-parent = <&mpic>; + }; + }; + + spi@7000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,mpc8536-espi"; + reg = <0x7000 0x1000>; + interrupts = <59 0x2>; + interrupt-parent = <&mpic>; + fsl,espi-num-chipselects = <4>; + + flash@0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "spansion,s25sl12801"; + reg = <0>; + spi-max-frequency = <40000000>; + partition@u-boot { + label = "u-boot"; + reg = <0x00000000 0x00100000>; + read-only; + }; + partition@kernel { + label = "kernel"; + reg = <0x00100000 0x00500000>; + read-only; + }; + partition@dtb { + label = "dtb"; + reg = <0x00600000 0x00100000>; + read-only; + }; + partition@fs { + label = "file system"; + reg = <0x00700000 0x00900000>; + }; + }; + flash@1 { + compatible = "spansion,s25sl12801"; + reg = <1>; + spi-max-frequency = <40000000>; + }; + flash@2 { + compatible = "spansion,s25sl12801"; + reg = <2>; + spi-max-frequency = <40000000>; + }; + flash@3 { + compatible = "spansion,s25sl12801"; + reg = <3>; + spi-max-frequency = <40000000>; + }; + }; + + dma@21300 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,mpc8536-dma", "fsl,eloplus-dma"; + reg = <0x21300 4>; + ranges = <0 0x21100 0x200>; + cell-index = <0>; + dma-channel@0 { + compatible = "fsl,mpc8536-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x0 0x80>; + cell-index = <0>; + interrupt-parent = <&mpic>; + interrupts = <20 2>; + }; + dma-channel@80 { + compatible = "fsl,mpc8536-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x80 0x80>; + cell-index = <1>; + interrupt-parent = <&mpic>; + interrupts = <21 2>; + }; + dma-channel@100 { + compatible = "fsl,mpc8536-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x100 0x80>; + cell-index = <2>; + interrupt-parent = <&mpic>; + interrupts = <22 2>; + }; + dma-channel@180 { + compatible = "fsl,mpc8536-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x180 0x80>; + cell-index = <3>; + interrupt-parent = <&mpic>; + interrupts = <23 2>; + }; + }; + + usb@22000 { + compatible = "fsl,mpc8536-usb2-mph", "fsl-usb2-mph"; + reg = <0x22000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + interrupt-parent = <&mpic>; + interrupts = <28 0x2>; + phy_type = "ulpi"; + }; + + usb@23000 { + compatible = "fsl,mpc8536-usb2-mph", "fsl-usb2-mph"; + reg = <0x23000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + interrupt-parent = <&mpic>; + interrupts = <46 0x2>; + phy_type = "ulpi"; + }; + + enet0: ethernet@24000 { + #address-cells = <1>; + #size-cells = <1>; + cell-index = <0>; + device_type = "network"; + model = "eTSEC"; + compatible = "gianfar"; + reg = <0x24000 0x1000>; + ranges = <0x0 0x24000 0x1000>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupts = <29 2 30 2 34 2>; + interrupt-parent = <&mpic>; + tbi-handle = <&tbi0>; + phy-handle = <&phy1>; + phy-connection-type = "rgmii-id"; + + mdio@520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-mdio"; + reg = <0x520 0x20>; + + phy0: ethernet-phy@0 { + interrupt-parent = <&mpic>; + interrupts = <10 0x1>; + reg = <0>; + device_type = "ethernet-phy"; + }; + phy1: ethernet-phy@1 { + interrupt-parent = <&mpic>; + interrupts = <10 0x1>; + reg = <1>; + device_type = "ethernet-phy"; + }; + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + }; + + enet1: ethernet@26000 { + #address-cells = <1>; + #size-cells = <1>; + cell-index = <1>; + device_type = "network"; + model = "eTSEC"; + compatible = "gianfar"; + reg = <0x26000 0x1000>; + ranges = <0x0 0x26000 0x1000>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupts = <31 2 32 2 33 2>; + interrupt-parent = <&mpic>; + tbi-handle = <&tbi1>; + phy-handle = <&phy0>; + phy-connection-type = "rgmii-id"; + + mdio@520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + }; + + usb@2b000 { + compatible = "fsl,mpc8536-usb2-dr", "fsl-usb2-dr"; + reg = <0x2b000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + interrupt-parent = <&mpic>; + interrupts = <60 0x2>; + dr_mode = "peripheral"; + phy_type = "ulpi"; + }; + + sdhci@2e000 { + compatible = "fsl,mpc8536-esdhc", "fsl,esdhc"; + reg = <0x2e000 0x1000>; + interrupts = <72 0x2>; + interrupt-parent = <&mpic>; + clock-frequency = <250000000>; + }; + + serial0: serial@4500 { + cell-index = <0>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x4500 0x100>; + clock-frequency = <0>; + interrupts = <42 0x2>; + interrupt-parent = <&mpic>; + }; + + serial1: serial@4600 { + cell-index = <1>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x4600 0x100>; + clock-frequency = <0>; + interrupts = <42 0x2>; + interrupt-parent = <&mpic>; + }; + + crypto@30000 { + compatible = "fsl,sec3.0", "fsl,sec2.4", "fsl,sec2.2", + "fsl,sec2.1", "fsl,sec2.0"; + reg = <0x30000 0x10000>; + interrupts = <45 2 58 2>; + interrupt-parent = <&mpic>; + fsl,num-channels = <4>; + fsl,channel-fifo-len = <24>; + fsl,exec-units-mask = <0x9fe>; + fsl,descriptor-types-mask = <0x3ab0ebf>; + }; + + sata@18000 { + compatible = "fsl,mpc8536-sata", "fsl,pq-sata"; + reg = <0x18000 0x1000>; + cell-index = <1>; + interrupts = <74 0x2>; + interrupt-parent = <&mpic>; + }; + + sata@19000 { + compatible = "fsl,mpc8536-sata", "fsl,pq-sata"; + reg = <0x19000 0x1000>; + cell-index = <2>; + interrupts = <41 0x2>; + interrupt-parent = <&mpic>; + }; + + global-utilities@e0000 { //global utilities block + compatible = "fsl,mpc8548-guts"; + reg = <0xe0000 0x1000>; + fsl,has-rstcr; + }; + + mpic: pic@40000 { + clock-frequency = <0>; + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <2>; + reg = <0x40000 0x40000>; + compatible = "chrp,open-pic"; + device_type = "open-pic"; + big-endian; + }; + + msi@41600 { + compatible = "fsl,mpc8536-msi", "fsl,mpic-msi"; + reg = <0x41600 0x80>; + msi-available-ranges = <0 0x100>; + interrupts = < + 0xe0 0 + 0xe1 0 + 0xe2 0 + 0xe3 0 + 0xe4 0 + 0xe5 0 + 0xe6 0 + 0xe7 0>; + interrupt-parent = <&mpic>; + }; }; pci0: pci@ffe08000 { - reg = <0 0xffe08000 0 0x1000>; - ranges = <0x02000000 0 0x80000000 0 0x80000000 0 0x10000000 - 0x01000000 0 0x00000000 0 0xffc00000 0 0x00010000>; - clock-frequency = <66666666>; + compatible = "fsl,mpc8540-pci"; + device_type = "pci"; interrupt-map-mask = <0xf800 0x0 0x0 0x7>; interrupt-map = < /* IDSEL 0x11 J17 Slot 1 */ - 0x8800 0 0 1 &mpic 1 1 0 0 - 0x8800 0 0 2 &mpic 2 1 0 0 - 0x8800 0 0 3 &mpic 3 1 0 0 - 0x8800 0 0 4 &mpic 4 1 0 0>; + 0x8800 0 0 1 &mpic 1 1 + 0x8800 0 0 2 &mpic 2 1 + 0x8800 0 0 3 &mpic 3 1 + 0x8800 0 0 4 &mpic 4 1>; + + interrupt-parent = <&mpic>; + interrupts = <24 0x2>; + bus-range = <0 0xff>; + ranges = <0x02000000 0 0x80000000 0 0x80000000 0 0x10000000 + 0x01000000 0 0x00000000 0 0xffc00000 0 0x00010000>; + clock-frequency = <66666666>; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + reg = <0 0xffe08000 0 0x1000>; }; pci1: pcie@ffe09000 { + compatible = "fsl,mpc8548-pcie"; + device_type = "pci"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; reg = <0 0xffe09000 0 0x1000>; + bus-range = <0 0xff>; ranges = <0x02000000 0 0x98000000 0 0x98000000 0 0x08000000 0x01000000 0 0x00000000 0 0xffc20000 0 0x00010000>; + clock-frequency = <33333333>; + interrupt-parent = <&mpic>; + interrupts = <25 0x2>; + interrupt-map-mask = <0xf800 0 0 7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0 0 1 &mpic 4 1 + 0000 0 0 2 &mpic 5 1 + 0000 0 0 3 &mpic 6 1 + 0000 0 0 4 &mpic 7 1 + >; pcie@0 { + reg = <0 0 0 0 0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; ranges = <0x02000000 0 0x98000000 0x02000000 0 0x98000000 0 0x08000000 @@ -71,10 +453,31 @@ }; pci2: pcie@ffe0a000 { + compatible = "fsl,mpc8548-pcie"; + device_type = "pci"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; reg = <0 0xffe0a000 0 0x1000>; + bus-range = <0 0xff>; ranges = <0x02000000 0 0x90000000 0 0x90000000 0 0x08000000 0x01000000 0 0x00000000 0 0xffc10000 0 0x00010000>; + clock-frequency = <33333333>; + interrupt-parent = <&mpic>; + interrupts = <26 0x2>; + interrupt-map-mask = <0xf800 0 0 7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0 0 1 &mpic 0 1 + 0000 0 0 2 &mpic 1 1 + 0000 0 0 3 &mpic 2 1 + 0000 0 0 4 &mpic 3 1 + >; pcie@0 { + reg = <0 0 0 0 0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; ranges = <0x02000000 0 0x90000000 0x02000000 0 0x90000000 0 0x08000000 @@ -86,10 +489,32 @@ }; pci3: pcie@ffe0b000 { + compatible = "fsl,mpc8548-pcie"; + device_type = "pci"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; reg = <0 0xffe0b000 0 0x1000>; + bus-range = <0 0xff>; ranges = <0x02000000 0 0xa0000000 0 0xa0000000 0 0x20000000 0x01000000 0 0x00000000 0 0xffc30000 0 0x00010000>; + clock-frequency = <33333333>; + interrupt-parent = <&mpic>; + interrupts = <27 0x2>; + interrupt-map-mask = <0xf800 0 0 7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0 0 1 &mpic 8 1 + 0000 0 0 2 &mpic 9 1 + 0000 0 0 3 &mpic 10 1 + 0000 0 0 4 &mpic 11 1 + >; + pcie@0 { + reg = <0 0 0 0 0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; ranges = <0x02000000 0 0xa0000000 0x02000000 0 0xa0000000 0 0x20000000 @@ -100,6 +525,3 @@ }; }; }; - -/include/ "fsl/mpc8536si-post.dtsi" -/include/ "mpc8536ds.dtsi" diff --git a/trunk/arch/powerpc/boot/dts/mpc8536ds.dtsi b/trunk/arch/powerpc/boot/dts/mpc8536ds.dtsi deleted file mode 100644 index 1462e4cf49d7..000000000000 --- a/trunk/arch/powerpc/boot/dts/mpc8536ds.dtsi +++ /dev/null @@ -1,141 +0,0 @@ -/* - * MPC8536DS Device Tree Source stub (no addresses or top-level ranges) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -&board_soc { - i2c@3100 { - rtc@68 { - compatible = "dallas,ds3232"; - reg = <0x68>; - interrupts = <0 0x1 0 0>; - }; - }; - - spi@7000 { - flash@0 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "spansion,s25sl12801"; - reg = <0>; - spi-max-frequency = <40000000>; - partition@u-boot { - label = "u-boot"; - reg = <0x00000000 0x00100000>; - read-only; - }; - partition@kernel { - label = "kernel"; - reg = <0x00100000 0x00500000>; - read-only; - }; - partition@dtb { - label = "dtb"; - reg = <0x00600000 0x00100000>; - read-only; - }; - partition@fs { - label = "file system"; - reg = <0x00700000 0x00900000>; - }; - }; - flash@1 { - compatible = "spansion,s25sl12801"; - reg = <1>; - spi-max-frequency = <40000000>; - }; - flash@2 { - compatible = "spansion,s25sl12801"; - reg = <2>; - spi-max-frequency = <40000000>; - }; - flash@3 { - compatible = "spansion,s25sl12801"; - reg = <3>; - spi-max-frequency = <40000000>; - }; - }; - - usb@22000 { - phy_type = "ulpi"; - }; - - usb@23000 { - phy_type = "ulpi"; - }; - - enet0: ethernet@24000 { - tbi-handle = <&tbi0>; - phy-handle = <&phy1>; - phy-connection-type = "rgmii-id"; - }; - - mdio@24520 { - phy0: ethernet-phy@0 { - interrupts = <10 0x1 0 0>; - reg = <0>; - device_type = "ethernet-phy"; - }; - phy1: ethernet-phy@1 { - interrupts = <10 0x1 0 0>; - reg = <1>; - device_type = "ethernet-phy"; - }; - tbi0: tbi-phy@11 { - reg = <0x11>; - device_type = "tbi-phy"; - }; - }; - - enet2: ethernet@26000 { - tbi-handle = <&tbi1>; - phy-handle = <&phy0>; - phy-connection-type = "rgmii-id"; - }; - - mdio@26520 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "fsl,gianfar-tbi"; - reg = <0x26520 0x20>; - - tbi1: tbi-phy@11 { - reg = <0x11>; - device_type = "tbi-phy"; - }; - }; - - usb@2b000 { - dr_mode = "peripheral"; - phy_type = "ulpi"; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/mpc8536ds_36b.dts b/trunk/arch/powerpc/boot/dts/mpc8536ds_36b.dts index 8f4b929b1d1d..d95b26021e62 100644 --- a/trunk/arch/powerpc/boot/dts/mpc8536ds_36b.dts +++ b/trunk/arch/powerpc/boot/dts/mpc8536ds_36b.dts @@ -1,5 +1,5 @@ /* - * MPC8536DS Device Tree Source (36-bit address map) + * MPC8536 DS Device Tree Source * * Copyright 2008-2009 Freescale Semiconductor, Inc. * @@ -9,11 +9,24 @@ * option) any later version. */ -/include/ "fsl/mpc8536si-pre.dtsi" +/dts-v1/; / { model = "fsl,mpc8536ds"; compatible = "fsl,mpc8536ds"; + #address-cells = <2>; + #size-cells = <2>; + + aliases { + ethernet0 = &enet0; + ethernet1 = &enet1; + serial0 = &serial0; + serial1 = &serial1; + pci0 = &pci0; + pci1 = &pci1; + pci2 = &pci2; + pci3 = &pci3; + }; cpus { #cpus = <1>; @@ -32,34 +45,351 @@ reg = <0 0 0 0>; // Filled by U-Boot }; - lbc: localbus@ffe05000 { - reg = <0 0xffe05000 0 0x1000>; - }; - - board_soc: soc: soc@fffe00000 { + soc@fffe00000 { + #address-cells = <1>; + #size-cells = <1>; + device_type = "soc"; + compatible = "simple-bus"; ranges = <0x0 0xf 0xffe00000 0x100000>; + bus-frequency = <0>; // Filled out by uboot. + + ecm-law@0 { + compatible = "fsl,ecm-law"; + reg = <0x0 0x1000>; + fsl,num-laws = <12>; + }; + + ecm@1000 { + compatible = "fsl,mpc8536-ecm", "fsl,ecm"; + reg = <0x1000 0x1000>; + interrupts = <17 2>; + interrupt-parent = <&mpic>; + }; + + memory-controller@2000 { + compatible = "fsl,mpc8536-memory-controller"; + reg = <0x2000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <18 0x2>; + }; + + L2: l2-cache-controller@20000 { + compatible = "fsl,mpc8536-l2-cache-controller"; + reg = <0x20000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <16 0x2>; + }; + + i2c@3000 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <0>; + compatible = "fsl-i2c"; + reg = <0x3000 0x100>; + interrupts = <43 0x2>; + interrupt-parent = <&mpic>; + dfsrr; + }; + + i2c@3100 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <1>; + compatible = "fsl-i2c"; + reg = <0x3100 0x100>; + interrupts = <43 0x2>; + interrupt-parent = <&mpic>; + dfsrr; + rtc@68 { + compatible = "dallas,ds3232"; + reg = <0x68>; + interrupts = <0 0x1>; + interrupt-parent = <&mpic>; + }; + }; + + dma@21300 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,mpc8536-dma", "fsl,eloplus-dma"; + reg = <0x21300 4>; + ranges = <0 0x21100 0x200>; + cell-index = <0>; + dma-channel@0 { + compatible = "fsl,mpc8536-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x0 0x80>; + cell-index = <0>; + interrupt-parent = <&mpic>; + interrupts = <20 2>; + }; + dma-channel@80 { + compatible = "fsl,mpc8536-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x80 0x80>; + cell-index = <1>; + interrupt-parent = <&mpic>; + interrupts = <21 2>; + }; + dma-channel@100 { + compatible = "fsl,mpc8536-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x100 0x80>; + cell-index = <2>; + interrupt-parent = <&mpic>; + interrupts = <22 2>; + }; + dma-channel@180 { + compatible = "fsl,mpc8536-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x180 0x80>; + cell-index = <3>; + interrupt-parent = <&mpic>; + interrupts = <23 2>; + }; + }; + + usb@22000 { + compatible = "fsl,mpc8536-usb2-mph", "fsl-usb2-mph"; + reg = <0x22000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + interrupt-parent = <&mpic>; + interrupts = <28 0x2>; + phy_type = "ulpi"; + }; + + usb@23000 { + compatible = "fsl,mpc8536-usb2-mph", "fsl-usb2-mph"; + reg = <0x23000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + interrupt-parent = <&mpic>; + interrupts = <46 0x2>; + phy_type = "ulpi"; + }; + + enet0: ethernet@24000 { + #address-cells = <1>; + #size-cells = <1>; + cell-index = <0>; + device_type = "network"; + model = "eTSEC"; + compatible = "gianfar"; + reg = <0x24000 0x1000>; + ranges = <0x0 0x24000 0x1000>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupts = <29 2 30 2 34 2>; + interrupt-parent = <&mpic>; + tbi-handle = <&tbi0>; + phy-handle = <&phy1>; + phy-connection-type = "rgmii-id"; + + mdio@520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-mdio"; + reg = <0x520 0x20>; + + phy0: ethernet-phy@0 { + interrupt-parent = <&mpic>; + interrupts = <10 0x1>; + reg = <0>; + device_type = "ethernet-phy"; + }; + phy1: ethernet-phy@1 { + interrupt-parent = <&mpic>; + interrupts = <10 0x1>; + reg = <1>; + device_type = "ethernet-phy"; + }; + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + }; + + enet1: ethernet@26000 { + #address-cells = <1>; + #size-cells = <1>; + cell-index = <1>; + device_type = "network"; + model = "eTSEC"; + compatible = "gianfar"; + reg = <0x26000 0x1000>; + ranges = <0x0 0x26000 0x1000>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupts = <31 2 32 2 33 2>; + interrupt-parent = <&mpic>; + tbi-handle = <&tbi1>; + phy-handle = <&phy0>; + phy-connection-type = "rgmii-id"; + + mdio@520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + }; + + usb@2b000 { + compatible = "fsl,mpc8536-usb2-dr", "fsl-usb2-dr"; + reg = <0x2b000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + interrupt-parent = <&mpic>; + interrupts = <60 0x2>; + dr_mode = "peripheral"; + phy_type = "ulpi"; + }; + + sdhci@2e000 { + compatible = "fsl,mpc8536-esdhc", "fsl,esdhc"; + reg = <0x2e000 0x1000>; + interrupts = <72 0x2>; + interrupt-parent = <&mpic>; + clock-frequency = <250000000>; + }; + + serial0: serial@4500 { + cell-index = <0>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x4500 0x100>; + clock-frequency = <0>; + interrupts = <42 0x2>; + interrupt-parent = <&mpic>; + }; + + serial1: serial@4600 { + cell-index = <1>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x4600 0x100>; + clock-frequency = <0>; + interrupts = <42 0x2>; + interrupt-parent = <&mpic>; + }; + + crypto@30000 { + compatible = "fsl,sec3.0", "fsl,sec2.4", "fsl,sec2.2", + "fsl,sec2.1", "fsl,sec2.0"; + reg = <0x30000 0x10000>; + interrupts = <45 2 58 2>; + interrupt-parent = <&mpic>; + fsl,num-channels = <4>; + fsl,channel-fifo-len = <24>; + fsl,exec-units-mask = <0x9fe>; + fsl,descriptor-types-mask = <0x3ab0ebf>; + }; + + sata@18000 { + compatible = "fsl,mpc8536-sata", "fsl,pq-sata"; + reg = <0x18000 0x1000>; + cell-index = <1>; + interrupts = <74 0x2>; + interrupt-parent = <&mpic>; + }; + + sata@19000 { + compatible = "fsl,mpc8536-sata", "fsl,pq-sata"; + reg = <0x19000 0x1000>; + cell-index = <2>; + interrupts = <41 0x2>; + interrupt-parent = <&mpic>; + }; + + global-utilities@e0000 { //global utilities block + compatible = "fsl,mpc8548-guts"; + reg = <0xe0000 0x1000>; + fsl,has-rstcr; + }; + + mpic: pic@40000 { + clock-frequency = <0>; + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <2>; + reg = <0x40000 0x40000>; + compatible = "chrp,open-pic"; + device_type = "open-pic"; + big-endian; + }; + + msi@41600 { + compatible = "fsl,mpc8536-msi", "fsl,mpic-msi"; + reg = <0x41600 0x80>; + msi-available-ranges = <0 0x100>; + interrupts = < + 0xe0 0 + 0xe1 0 + 0xe2 0 + 0xe3 0 + 0xe4 0 + 0xe5 0 + 0xe6 0 + 0xe7 0>; + interrupt-parent = <&mpic>; + }; }; - pci0: pci@ffe08000 { - reg = <0xf 0xffe08000 0 0x1000>; - ranges = <0x02000000 0 0xf0000000 0xc 0x00000000 0 0x10000000 - 0x01000000 0 0x00000000 0xf 0xffc00000 0 0x00010000>; - clock-frequency = <66666666>; + pci0: pci@fffe08000 { + compatible = "fsl,mpc8540-pci"; + device_type = "pci"; interrupt-map-mask = <0xf800 0x0 0x0 0x7>; interrupt-map = < /* IDSEL 0x11 J17 Slot 1 */ - 0x8800 0 0 1 &mpic 1 1 0 0 - 0x8800 0 0 2 &mpic 2 1 0 0 - 0x8800 0 0 3 &mpic 3 1 0 0 - 0x8800 0 0 4 &mpic 4 1 0 0>; + 0x8800 0 0 1 &mpic 1 1 + 0x8800 0 0 2 &mpic 2 1 + 0x8800 0 0 3 &mpic 3 1 + 0x8800 0 0 4 &mpic 4 1>; + + interrupt-parent = <&mpic>; + interrupts = <24 0x2>; + bus-range = <0 0xff>; + ranges = <0x02000000 0 0xf0000000 0xc 0x00000000 0 0x10000000 + 0x01000000 0 0x00000000 0xf 0xffc00000 0 0x00010000>; + clock-frequency = <66666666>; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + reg = <0xf 0xffe08000 0 0x1000>; }; - pci1: pcie@ffe09000 { + pci1: pcie@fffe09000 { + compatible = "fsl,mpc8548-pcie"; + device_type = "pci"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; reg = <0xf 0xffe09000 0 0x1000>; + bus-range = <0 0xff>; ranges = <0x02000000 0 0xf8000000 0xc 0x18000000 0 0x08000000 0x01000000 0 0x00000000 0xf 0xffc20000 0 0x00010000>; + clock-frequency = <33333333>; + interrupt-parent = <&mpic>; + interrupts = <25 0x2>; + interrupt-map-mask = <0xf800 0 0 7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0 0 1 &mpic 4 1 + 0000 0 0 2 &mpic 5 1 + 0000 0 0 3 &mpic 6 1 + 0000 0 0 4 &mpic 7 1 + >; pcie@0 { + reg = <0 0 0 0 0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; ranges = <0x02000000 0 0xf8000000 0x02000000 0 0xf8000000 0 0x08000000 @@ -71,10 +401,31 @@ }; pci2: pcie@fffe0a000 { + compatible = "fsl,mpc8548-pcie"; + device_type = "pci"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; reg = <0xf 0xffe0a000 0 0x1000>; + bus-range = <0 0xff>; ranges = <0x02000000 0 0xf8000000 0xc 0x10000000 0 0x08000000 0x01000000 0 0x00000000 0xf 0xffc10000 0 0x00010000>; + clock-frequency = <33333333>; + interrupt-parent = <&mpic>; + interrupts = <26 0x2>; + interrupt-map-mask = <0xf800 0 0 7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0 0 1 &mpic 0 1 + 0000 0 0 2 &mpic 1 1 + 0000 0 0 3 &mpic 2 1 + 0000 0 0 4 &mpic 3 1 + >; pcie@0 { + reg = <0 0 0 0 0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; ranges = <0x02000000 0 0xf8000000 0x02000000 0 0xf8000000 0 0x08000000 @@ -86,10 +437,32 @@ }; pci3: pcie@fffe0b000 { + compatible = "fsl,mpc8548-pcie"; + device_type = "pci"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; reg = <0xf 0xffe0b000 0 0x1000>; + bus-range = <0 0xff>; ranges = <0x02000000 0 0xe0000000 0xc 0x20000000 0 0x20000000 0x01000000 0 0x00000000 0xf 0xffc30000 0 0x00010000>; + clock-frequency = <33333333>; + interrupt-parent = <&mpic>; + interrupts = <27 0x2>; + interrupt-map-mask = <0xf800 0 0 7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0 0 1 &mpic 8 1 + 0000 0 0 2 &mpic 9 1 + 0000 0 0 3 &mpic 10 1 + 0000 0 0 4 &mpic 11 1 + >; + pcie@0 { + reg = <0 0 0 0 0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; ranges = <0x02000000 0 0xe0000000 0x02000000 0 0xe0000000 0 0x20000000 @@ -100,6 +473,3 @@ }; }; }; - -/include/ "fsl/mpc8536si-post.dtsi" -/include/ "mpc8536ds.dtsi" diff --git a/trunk/arch/powerpc/boot/dts/mpc8540ads.dts b/trunk/arch/powerpc/boot/dts/mpc8540ads.dts index f99fb110c97f..8d1bf0fd9268 100644 --- a/trunk/arch/powerpc/boot/dts/mpc8540ads.dts +++ b/trunk/arch/powerpc/boot/dts/mpc8540ads.dts @@ -243,7 +243,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; // reg base, size clock-frequency = <0>; // should we fill in in uboot? interrupts = <42 2>; @@ -253,7 +253,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; // reg base, size clock-frequency = <0>; // should we fill in in uboot? interrupts = <42 2>; diff --git a/trunk/arch/powerpc/boot/dts/mpc8541cds.dts b/trunk/arch/powerpc/boot/dts/mpc8541cds.dts index 0f5e93912799..87ff96549fac 100644 --- a/trunk/arch/powerpc/boot/dts/mpc8541cds.dts +++ b/trunk/arch/powerpc/boot/dts/mpc8541cds.dts @@ -209,7 +209,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; // reg base, size clock-frequency = <0>; // should we fill in in uboot? interrupts = <42 2>; @@ -219,7 +219,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; // reg base, size clock-frequency = <0>; // should we fill in in uboot? interrupts = <42 2>; diff --git a/trunk/arch/powerpc/boot/dts/mpc8544ds.dts b/trunk/arch/powerpc/boot/dts/mpc8544ds.dts index e934987e882b..d793968743c9 100644 --- a/trunk/arch/powerpc/boot/dts/mpc8544ds.dts +++ b/trunk/arch/powerpc/boot/dts/mpc8544ds.dts @@ -9,52 +9,339 @@ * option) any later version. */ -/include/ "fsl/mpc8544si-pre.dtsi" - +/dts-v1/; / { model = "MPC8544DS"; compatible = "MPC8544DS", "MPC85xxDS"; + #address-cells = <1>; + #size-cells = <1>; + + aliases { + ethernet0 = &enet0; + ethernet1 = &enet1; + serial0 = &serial0; + serial1 = &serial1; + pci0 = &pci0; + pci1 = &pci1; + pci2 = &pci2; + pci3 = &pci3; + }; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + PowerPC,8544@0 { + device_type = "cpu"; + reg = <0x0>; + d-cache-line-size = <32>; // 32 bytes + i-cache-line-size = <32>; // 32 bytes + d-cache-size = <0x8000>; // L1, 32K + i-cache-size = <0x8000>; // L1, 32K + timebase-frequency = <0>; + bus-frequency = <0>; + clock-frequency = <0>; + next-level-cache = <&L2>; + }; + }; memory { device_type = "memory"; - reg = <0 0 0 0>; // Filled by U-Boot + reg = <0x0 0x0>; // Filled by U-Boot }; - lbc: localbus@e0005000 { - reg = <0 0xe0005000 0 0x1000>; - }; + soc8544@e0000000 { + #address-cells = <1>; + #size-cells = <1>; + device_type = "soc"; + compatible = "simple-bus"; + + ranges = <0x0 0xe0000000 0x100000>; + bus-frequency = <0>; // Filled out by uboot. + + ecm-law@0 { + compatible = "fsl,ecm-law"; + reg = <0x0 0x1000>; + fsl,num-laws = <10>; + }; + + ecm@1000 { + compatible = "fsl,mpc8544-ecm", "fsl,ecm"; + reg = <0x1000 0x1000>; + interrupts = <17 2>; + interrupt-parent = <&mpic>; + }; + + memory-controller@2000 { + compatible = "fsl,mpc8544-memory-controller"; + reg = <0x2000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <18 2>; + }; + + L2: l2-cache-controller@20000 { + compatible = "fsl,mpc8544-l2-cache-controller"; + reg = <0x20000 0x1000>; + cache-line-size = <32>; // 32 bytes + cache-size = <0x40000>; // L2, 256K + interrupt-parent = <&mpic>; + interrupts = <16 2>; + }; + + i2c@3000 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <0>; + compatible = "fsl-i2c"; + reg = <0x3000 0x100>; + interrupts = <43 2>; + interrupt-parent = <&mpic>; + dfsrr; + }; + + i2c@3100 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <1>; + compatible = "fsl-i2c"; + reg = <0x3100 0x100>; + interrupts = <43 2>; + interrupt-parent = <&mpic>; + dfsrr; + }; + + dma@21300 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,mpc8544-dma", "fsl,eloplus-dma"; + reg = <0x21300 0x4>; + ranges = <0x0 0x21100 0x200>; + cell-index = <0>; + dma-channel@0 { + compatible = "fsl,mpc8544-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x0 0x80>; + cell-index = <0>; + interrupt-parent = <&mpic>; + interrupts = <20 2>; + }; + dma-channel@80 { + compatible = "fsl,mpc8544-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x80 0x80>; + cell-index = <1>; + interrupt-parent = <&mpic>; + interrupts = <21 2>; + }; + dma-channel@100 { + compatible = "fsl,mpc8544-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x100 0x80>; + cell-index = <2>; + interrupt-parent = <&mpic>; + interrupts = <22 2>; + }; + dma-channel@180 { + compatible = "fsl,mpc8544-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x180 0x80>; + cell-index = <3>; + interrupt-parent = <&mpic>; + interrupts = <23 2>; + }; + }; + + enet0: ethernet@24000 { + #address-cells = <1>; + #size-cells = <1>; + cell-index = <0>; + device_type = "network"; + model = "TSEC"; + compatible = "gianfar"; + reg = <0x24000 0x1000>; + ranges = <0x0 0x24000 0x1000>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupts = <29 2 30 2 34 2>; + interrupt-parent = <&mpic>; + phy-handle = <&phy0>; + tbi-handle = <&tbi0>; + phy-connection-type = "rgmii-id"; + + mdio@520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-mdio"; + reg = <0x520 0x20>; + + phy0: ethernet-phy@0 { + interrupt-parent = <&mpic>; + interrupts = <10 1>; + reg = <0x0>; + device_type = "ethernet-phy"; + }; + phy1: ethernet-phy@1 { + interrupt-parent = <&mpic>; + interrupts = <10 1>; + reg = <0x1>; + device_type = "ethernet-phy"; + }; + + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + }; + + enet1: ethernet@26000 { + #address-cells = <1>; + #size-cells = <1>; + cell-index = <1>; + device_type = "network"; + model = "TSEC"; + compatible = "gianfar"; + reg = <0x26000 0x1000>; + ranges = <0x0 0x26000 0x1000>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupts = <31 2 32 2 33 2>; + interrupt-parent = <&mpic>; + phy-handle = <&phy1>; + tbi-handle = <&tbi1>; + phy-connection-type = "rgmii-id"; + + mdio@520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + }; + + serial0: serial@4500 { + cell-index = <0>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x4500 0x100>; + clock-frequency = <0>; + interrupts = <42 2>; + interrupt-parent = <&mpic>; + }; - board_soc: soc: soc8544@e0000000 { - ranges = <0x0 0x0 0xe0000000 0x100000>; + serial1: serial@4600 { + cell-index = <1>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x4600 0x100>; + clock-frequency = <0>; + interrupts = <42 2>; + interrupt-parent = <&mpic>; + }; + + global-utilities@e0000 { //global utilities block + compatible = "fsl,mpc8548-guts"; + reg = <0xe0000 0x1000>; + fsl,has-rstcr; + }; + + crypto@30000 { + compatible = "fsl,sec2.1", "fsl,sec2.0"; + reg = <0x30000 0x10000>; + interrupts = <45 2>; + interrupt-parent = <&mpic>; + fsl,num-channels = <4>; + fsl,channel-fifo-len = <24>; + fsl,exec-units-mask = <0xfe>; + fsl,descriptor-types-mask = <0x12b0ebf>; + }; + + mpic: pic@40000 { + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <2>; + reg = <0x40000 0x40000>; + compatible = "chrp,open-pic"; + device_type = "open-pic"; + }; + + msi@41600 { + compatible = "fsl,mpc8544-msi", "fsl,mpic-msi"; + reg = <0x41600 0x80>; + msi-available-ranges = <0 0x100>; + interrupts = < + 0xe0 0 + 0xe1 0 + 0xe2 0 + 0xe3 0 + 0xe4 0 + 0xe5 0 + 0xe6 0 + 0xe7 0>; + interrupt-parent = <&mpic>; + }; }; pci0: pci@e0008000 { - reg = <0 0xe0008000 0 0x1000>; - ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000 - 0x1000000 0x0 0x00000000 0 0xe1000000 0x0 0x10000>; - clock-frequency = <66666666>; + compatible = "fsl,mpc8540-pci"; + device_type = "pci"; interrupt-map-mask = <0xf800 0x0 0x0 0x7>; interrupt-map = < /* IDSEL 0x11 J17 Slot 1 */ - 0x8800 0x0 0x0 0x1 &mpic 0x2 0x1 0 0 - 0x8800 0x0 0x0 0x2 &mpic 0x3 0x1 0 0 - 0x8800 0x0 0x0 0x3 &mpic 0x4 0x1 0 0 - 0x8800 0x0 0x0 0x4 &mpic 0x1 0x1 0 0 + 0x8800 0x0 0x0 0x1 &mpic 0x2 0x1 + 0x8800 0x0 0x0 0x2 &mpic 0x3 0x1 + 0x8800 0x0 0x0 0x3 &mpic 0x4 0x1 + 0x8800 0x0 0x0 0x4 &mpic 0x1 0x1 /* IDSEL 0x12 J16 Slot 2 */ - 0x9000 0x0 0x0 0x1 &mpic 0x3 0x1 0 0 - 0x9000 0x0 0x0 0x2 &mpic 0x4 0x1 0 0 - 0x9000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0 - 0x9000 0x0 0x0 0x4 &mpic 0x1 0x1 0 0>; + 0x9000 0x0 0x0 0x1 &mpic 0x3 0x1 + 0x9000 0x0 0x0 0x2 &mpic 0x4 0x1 + 0x9000 0x0 0x0 0x3 &mpic 0x2 0x1 + 0x9000 0x0 0x0 0x4 &mpic 0x1 0x1>; + + interrupt-parent = <&mpic>; + interrupts = <24 2>; + bus-range = <0 255>; + ranges = <0x2000000 0x0 0xc0000000 0xc0000000 0x0 0x20000000 + 0x1000000 0x0 0x0 0xe1000000 0x0 0x10000>; + clock-frequency = <66666666>; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + reg = <0xe0008000 0x1000>; }; pci1: pcie@e0009000 { - reg = <0x0 0xe0009000 0x0 0x1000>; - ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000 - 0x1000000 0x0 0x00000000 0 0xe1010000 0x0 0x10000>; + compatible = "fsl,mpc8548-pcie"; + device_type = "pci"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + reg = <0xe0009000 0x1000>; + bus-range = <0 255>; + ranges = <0x2000000 0x0 0x80000000 0x80000000 0x0 0x20000000 + 0x1000000 0x0 0x0 0xe1010000 0x0 0x10000>; + clock-frequency = <33333333>; + interrupt-parent = <&mpic>; + interrupts = <25 2>; + interrupt-map-mask = <0xf800 0x0 0x0 0x7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0x0 0x0 0x1 &mpic 0x4 0x1 + 0000 0x0 0x0 0x2 &mpic 0x5 0x1 + 0000 0x0 0x0 0x3 &mpic 0x6 0x1 + 0000 0x0 0x0 0x4 &mpic 0x7 0x1 + >; pcie@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; ranges = <0x2000000 0x0 0x80000000 0x2000000 0x0 0x80000000 0x0 0x20000000 @@ -66,10 +353,31 @@ }; pci2: pcie@e000a000 { - reg = <0x0 0xe000a000 0x0 0x1000>; - ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x10000000 - 0x1000000 0x0 0x00000000 0 0xe1020000 0x0 0x10000>; + compatible = "fsl,mpc8548-pcie"; + device_type = "pci"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + reg = <0xe000a000 0x1000>; + bus-range = <0 255>; + ranges = <0x2000000 0x0 0xa0000000 0xa0000000 0x0 0x10000000 + 0x1000000 0x0 0x0 0xe1020000 0x0 0x10000>; + clock-frequency = <33333333>; + interrupt-parent = <&mpic>; + interrupts = <26 2>; + interrupt-map-mask = <0xf800 0x0 0x0 0x7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0x0 0x0 0x1 &mpic 0x0 0x1 + 0000 0x0 0x0 0x2 &mpic 0x1 0x1 + 0000 0x0 0x0 0x3 &mpic 0x2 0x1 + 0000 0x0 0x0 0x4 &mpic 0x3 0x1 + >; pcie@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; ranges = <0x2000000 0x0 0xa0000000 0x2000000 0x0 0xa0000000 0x0 0x10000000 @@ -80,11 +388,44 @@ }; }; - board_pci3: pci3: pcie@e000b000 { - reg = <0x0 0xe000b000 0x0 0x1000>; - ranges = <0x2000000 0x0 0xb0000000 0 0xb0000000 0x0 0x100000 - 0x1000000 0x0 0x00000000 0 0xb0100000 0x0 0x100000>; + pci3: pcie@e000b000 { + compatible = "fsl,mpc8548-pcie"; + device_type = "pci"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + reg = <0xe000b000 0x1000>; + bus-range = <0 255>; + ranges = <0x2000000 0x0 0xb0000000 0xb0000000 0x0 0x100000 + 0x1000000 0x0 0x0 0xb0100000 0x0 0x100000>; + clock-frequency = <33333333>; + interrupt-parent = <&mpic>; + interrupts = <27 2>; + interrupt-map-mask = <0xff00 0x0 0x0 0x1>; + interrupt-map = < + // IDSEL 0x1c USB + 0xe000 0x0 0x0 0x1 &i8259 0xc 0x2 + 0xe100 0x0 0x0 0x2 &i8259 0x9 0x2 + 0xe200 0x0 0x0 0x3 &i8259 0xa 0x2 + 0xe300 0x0 0x0 0x4 &i8259 0xb 0x2 + + // IDSEL 0x1d Audio + 0xe800 0x0 0x0 0x1 &i8259 0x6 0x2 + + // IDSEL 0x1e Legacy + 0xf000 0x0 0x0 0x1 &i8259 0x7 0x2 + 0xf100 0x0 0x0 0x1 &i8259 0x7 0x2 + + // IDSEL 0x1f IDE/SATA + 0xf800 0x0 0x0 0x1 &i8259 0xe 0x2 + 0xf900 0x0 0x0 0x1 &i8259 0x5 0x2 + >; + pcie@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; ranges = <0x2000000 0x0 0xb0000000 0x2000000 0x0 0xb0000000 0x0 0x100000 @@ -92,14 +433,70 @@ 0x1000000 0x0 0x0 0x1000000 0x0 0x0 0x0 0x100000>; + + uli1575@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + ranges = <0x2000000 0x0 0xb0000000 + 0x2000000 0x0 0xb0000000 + 0x0 0x100000 + + 0x1000000 0x0 0x0 + 0x1000000 0x0 0x0 + 0x0 0x100000>; + isa@1e { + device_type = "isa"; + #interrupt-cells = <2>; + #size-cells = <1>; + #address-cells = <2>; + reg = <0xf000 0x0 0x0 0x0 0x0>; + ranges = <0x1 0x0 + 0x1000000 0x0 0x0 + 0x1000>; + interrupt-parent = <&i8259>; + + i8259: interrupt-controller@20 { + reg = <0x1 0x20 0x2 + 0x1 0xa0 0x2 + 0x1 0x4d0 0x2>; + interrupt-controller; + device_type = "interrupt-controller"; + #address-cells = <0>; + #interrupt-cells = <2>; + compatible = "chrp,iic"; + interrupts = <9 2>; + interrupt-parent = <&mpic>; + }; + + i8042@60 { + #size-cells = <0>; + #address-cells = <1>; + reg = <0x1 0x60 0x1 0x1 0x64 0x1>; + interrupts = <1 3 12 3>; + interrupt-parent = <&i8259>; + + keyboard@0 { + reg = <0x0>; + compatible = "pnpPNP,303"; + }; + + mouse@1 { + reg = <0x1>; + compatible = "pnpPNP,f03"; + }; + }; + + rtc@70 { + compatible = "pnpPNP,b00"; + reg = <0x1 0x70 0x2>; + }; + + gpio@400 { + reg = <0x1 0x400 0x80>; + }; + }; + }; }; }; }; - -/* - * mpc8544ds.dtsi must be last to ensure board_pci3 overrides pci3 settings - * for interrupt-map & interrupt-map-mask - */ - -/include/ "fsl/mpc8544si-post.dtsi" -/include/ "mpc8544ds.dtsi" diff --git a/trunk/arch/powerpc/boot/dts/mpc8544ds.dtsi b/trunk/arch/powerpc/boot/dts/mpc8544ds.dtsi deleted file mode 100644 index 270f64b90f4e..000000000000 --- a/trunk/arch/powerpc/boot/dts/mpc8544ds.dtsi +++ /dev/null @@ -1,161 +0,0 @@ -/* - * MPC8544DS Device Tree Source stub (no addresses or top-level ranges) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -&board_soc { - enet0: ethernet@24000 { - phy-handle = <&phy0>; - tbi-handle = <&tbi0>; - phy-connection-type = "rgmii-id"; - }; - - mdio@24520 { - phy0: ethernet-phy@0 { - interrupts = <10 1 0 0>; - reg = <0x0>; - device_type = "ethernet-phy"; - }; - phy1: ethernet-phy@1 { - interrupts = <10 1 0 0>; - reg = <0x1>; - device_type = "ethernet-phy"; - }; - - tbi0: tbi-phy@11 { - reg = <0x11>; - device_type = "tbi-phy"; - }; - }; - - enet2: ethernet@26000 { - phy-handle = <&phy1>; - tbi-handle = <&tbi1>; - phy-connection-type = "rgmii-id"; - }; - - mdio@26520 { - tbi1: tbi-phy@11 { - reg = <0x11>; - device_type = "tbi-phy"; - }; - }; -}; - -&board_pci3 { - pcie@0 { - interrupt-map-mask = <0xff00 0x0 0x0 0x7>; - interrupt-map = < - // IDSEL 0x1c USB - 0xe000 0x0 0x0 0x1 &i8259 0xc 0x2 - 0xe100 0x0 0x0 0x2 &i8259 0x9 0x2 - 0xe200 0x0 0x0 0x3 &i8259 0xa 0x2 - 0xe300 0x0 0x0 0x4 &i8259 0xb 0x2 - - // IDSEL 0x1d Audio - 0xe800 0x0 0x0 0x1 &i8259 0x6 0x2 - - // IDSEL 0x1e Legacy - 0xf000 0x0 0x0 0x1 &i8259 0x7 0x2 - 0xf100 0x0 0x0 0x1 &i8259 0x7 0x2 - - // IDSEL 0x1f IDE/SATA - 0xf800 0x0 0x0 0x1 &i8259 0xe 0x2 - 0xf900 0x0 0x0 0x1 &i8259 0x5 0x2 - >; - - - uli1575@0 { - reg = <0x0 0x0 0x0 0x0 0x0>; - #size-cells = <2>; - #address-cells = <3>; - ranges = <0x2000000 0x0 0xb0000000 - 0x2000000 0x0 0xb0000000 - 0x0 0x100000 - - 0x1000000 0x0 0x0 - 0x1000000 0x0 0x0 - 0x0 0x100000>; - isa@1e { - device_type = "isa"; - #interrupt-cells = <2>; - #size-cells = <1>; - #address-cells = <2>; - reg = <0xf000 0x0 0x0 0x0 0x0>; - ranges = <0x1 0x0 0x1000000 0x0 0x0 - 0x1000>; - interrupt-parent = <&i8259>; - - i8259: interrupt-controller@20 { - reg = <0x1 0x20 0x2 - 0x1 0xa0 0x2 - 0x1 0x4d0 0x2>; - interrupt-controller; - device_type = "interrupt-controller"; - #address-cells = <0>; - #interrupt-cells = <2>; - compatible = "chrp,iic"; - interrupts = <9 2 0 0>; - interrupt-parent = <&mpic>; - }; - - i8042@60 { - #size-cells = <0>; - #address-cells = <1>; - reg = <0x1 0x60 0x1 0x1 0x64 0x1>; - interrupts = <1 3 12 3>; - interrupt-parent = - <&i8259>; - - keyboard@0 { - reg = <0x0>; - compatible = "pnpPNP,303"; - }; - - mouse@1 { - reg = <0x1>; - compatible = "pnpPNP,f03"; - }; - }; - - rtc@70 { - compatible = "pnpPNP,b00"; - reg = <0x1 0x70 0x2>; - }; - - gpio@400 { - reg = <0x1 0x400 0x80>; - }; - }; - }; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/mpc8548cds.dts b/trunk/arch/powerpc/boot/dts/mpc8548cds.dts index 07b8dae0f46e..a17a5572fb73 100644 --- a/trunk/arch/powerpc/boot/dts/mpc8548cds.dts +++ b/trunk/arch/powerpc/boot/dts/mpc8548cds.dts @@ -9,11 +9,13 @@ * option) any later version. */ -/include/ "fsl/mpc8548si-pre.dtsi" +/dts-v1/; / { model = "MPC8548CDS"; compatible = "MPC8548CDS", "MPC85xxCDS"; + #address-cells = <1>; + #size-cells = <1>; aliases { ethernet0 = &enet0; @@ -27,19 +29,76 @@ pci2 = &pci2; }; + cpus { + #address-cells = <1>; + #size-cells = <0>; + + PowerPC,8548@0 { + device_type = "cpu"; + reg = <0x0>; + d-cache-line-size = <32>; // 32 bytes + i-cache-line-size = <32>; // 32 bytes + d-cache-size = <0x8000>; // L1, 32K + i-cache-size = <0x8000>; // L1, 32K + timebase-frequency = <0>; // 33 MHz, from uboot + bus-frequency = <0>; // 166 MHz + clock-frequency = <0>; // 825 MHz, from uboot + next-level-cache = <&L2>; + }; + }; + memory { device_type = "memory"; - reg = <0 0 0x0 0x8000000>; // 128M at 0x0 + reg = <0x0 0x8000000>; // 128M at 0x0 }; - lbc: localbus@e0005000 { - reg = <0 0xe0005000 0 0x1000>; - }; + soc8548@e0000000 { + #address-cells = <1>; + #size-cells = <1>; + device_type = "soc"; + compatible = "simple-bus"; + ranges = <0x0 0xe0000000 0x100000>; + bus-frequency = <0>; + + ecm-law@0 { + compatible = "fsl,ecm-law"; + reg = <0x0 0x1000>; + fsl,num-laws = <10>; + }; + + ecm@1000 { + compatible = "fsl,mpc8548-ecm", "fsl,ecm"; + reg = <0x1000 0x1000>; + interrupts = <17 2>; + interrupt-parent = <&mpic>; + }; + + memory-controller@2000 { + compatible = "fsl,mpc8548-memory-controller"; + reg = <0x2000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <18 2>; + }; - soc: soc8548@e0000000 { - ranges = <0 0x0 0xe0000000 0x100000>; + L2: l2-cache-controller@20000 { + compatible = "fsl,mpc8548-l2-cache-controller"; + reg = <0x20000 0x1000>; + cache-line-size = <32>; // 32 bytes + cache-size = <0x80000>; // L2, 512K + interrupt-parent = <&mpic>; + interrupts = <16 2>; + }; i2c@3000 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <0>; + compatible = "fsl-i2c"; + reg = <0x3000 0x100>; + interrupts = <43 2>; + interrupt-parent = <&mpic>; + dfsrr; + eeprom@50 { compatible = "atmel,24c64"; reg = <0x50>; @@ -57,178 +116,351 @@ }; i2c@3100 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <1>; + compatible = "fsl-i2c"; + reg = <0x3100 0x100>; + interrupts = <43 2>; + interrupt-parent = <&mpic>; + dfsrr; + eeprom@50 { compatible = "atmel,24c64"; reg = <0x50>; }; }; - enet0: ethernet@24000 { - tbi-handle = <&tbi0>; - phy-handle = <&phy0>; - }; - - mdio@24520 { - phy0: ethernet-phy@0 { - interrupts = <5 1 0 0>; - reg = <0x0>; - device_type = "ethernet-phy"; + dma@21300 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,mpc8548-dma", "fsl,eloplus-dma"; + reg = <0x21300 0x4>; + ranges = <0x0 0x21100 0x200>; + cell-index = <0>; + dma-channel@0 { + compatible = "fsl,mpc8548-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x0 0x80>; + cell-index = <0>; + interrupt-parent = <&mpic>; + interrupts = <20 2>; }; - phy1: ethernet-phy@1 { - interrupts = <5 1 0 0>; - reg = <0x1>; - device_type = "ethernet-phy"; + dma-channel@80 { + compatible = "fsl,mpc8548-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x80 0x80>; + cell-index = <1>; + interrupt-parent = <&mpic>; + interrupts = <21 2>; }; - phy2: ethernet-phy@2 { - interrupts = <5 1 0 0>; - reg = <0x2>; - device_type = "ethernet-phy"; + dma-channel@100 { + compatible = "fsl,mpc8548-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x100 0x80>; + cell-index = <2>; + interrupt-parent = <&mpic>; + interrupts = <22 2>; }; - phy3: ethernet-phy@3 { - interrupts = <5 1 0 0>; - reg = <0x3>; - device_type = "ethernet-phy"; + dma-channel@180 { + compatible = "fsl,mpc8548-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x180 0x80>; + cell-index = <3>; + interrupt-parent = <&mpic>; + interrupts = <23 2>; }; - tbi0: tbi-phy@11 { - reg = <0x11>; - device_type = "tbi-phy"; + }; + + enet0: ethernet@24000 { + #address-cells = <1>; + #size-cells = <1>; + cell-index = <0>; + device_type = "network"; + model = "eTSEC"; + compatible = "gianfar"; + reg = <0x24000 0x1000>; + ranges = <0x0 0x24000 0x1000>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupts = <29 2 30 2 34 2>; + interrupt-parent = <&mpic>; + tbi-handle = <&tbi0>; + phy-handle = <&phy0>; + + mdio@520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-mdio"; + reg = <0x520 0x20>; + + phy0: ethernet-phy@0 { + interrupt-parent = <&mpic>; + interrupts = <5 1>; + reg = <0x0>; + device_type = "ethernet-phy"; + }; + phy1: ethernet-phy@1 { + interrupt-parent = <&mpic>; + interrupts = <5 1>; + reg = <0x1>; + device_type = "ethernet-phy"; + }; + phy2: ethernet-phy@2 { + interrupt-parent = <&mpic>; + interrupts = <5 1>; + reg = <0x2>; + device_type = "ethernet-phy"; + }; + phy3: ethernet-phy@3 { + interrupt-parent = <&mpic>; + interrupts = <5 1>; + reg = <0x3>; + device_type = "ethernet-phy"; + }; + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; }; }; enet1: ethernet@25000 { + #address-cells = <1>; + #size-cells = <1>; + cell-index = <1>; + device_type = "network"; + model = "eTSEC"; + compatible = "gianfar"; + reg = <0x25000 0x1000>; + ranges = <0x0 0x25000 0x1000>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupts = <35 2 36 2 40 2>; + interrupt-parent = <&mpic>; tbi-handle = <&tbi1>; phy-handle = <&phy1>; - }; - mdio@25520 { - tbi1: tbi-phy@11 { - reg = <0x11>; - device_type = "tbi-phy"; + mdio@520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; }; }; enet2: ethernet@26000 { + #address-cells = <1>; + #size-cells = <1>; + cell-index = <2>; + device_type = "network"; + model = "eTSEC"; + compatible = "gianfar"; + reg = <0x26000 0x1000>; + ranges = <0x0 0x26000 0x1000>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupts = <31 2 32 2 33 2>; + interrupt-parent = <&mpic>; tbi-handle = <&tbi2>; phy-handle = <&phy2>; - }; - mdio@26520 { - tbi2: tbi-phy@11 { - reg = <0x11>; - device_type = "tbi-phy"; + mdio@520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x520 0x20>; + + tbi2: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; }; }; enet3: ethernet@27000 { + #address-cells = <1>; + #size-cells = <1>; + cell-index = <3>; + device_type = "network"; + model = "eTSEC"; + compatible = "gianfar"; + reg = <0x27000 0x1000>; + ranges = <0x0 0x27000 0x1000>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupts = <37 2 38 2 39 2>; + interrupt-parent = <&mpic>; tbi-handle = <&tbi3>; phy-handle = <&phy3>; - }; - mdio@27520 { - tbi3: tbi-phy@11 { - reg = <0x11>; - device_type = "tbi-phy"; + mdio@520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x520 0x20>; + + tbi3: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; }; }; + + serial0: serial@4500 { + cell-index = <0>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x4500 0x100>; // reg base, size + clock-frequency = <0>; // should we fill in in uboot? + interrupts = <42 2>; + interrupt-parent = <&mpic>; + }; + + serial1: serial@4600 { + cell-index = <1>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x4600 0x100>; // reg base, size + clock-frequency = <0>; // should we fill in in uboot? + interrupts = <42 2>; + interrupt-parent = <&mpic>; + }; + + global-utilities@e0000 { //global utilities reg + compatible = "fsl,mpc8548-guts"; + reg = <0xe0000 0x1000>; + fsl,has-rstcr; + }; + + crypto@30000 { + compatible = "fsl,sec2.1", "fsl,sec2.0"; + reg = <0x30000 0x10000>; + interrupts = <45 2>; + interrupt-parent = <&mpic>; + fsl,num-channels = <4>; + fsl,channel-fifo-len = <24>; + fsl,exec-units-mask = <0xfe>; + fsl,descriptor-types-mask = <0x12b0ebf>; + }; + + mpic: pic@40000 { + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <2>; + reg = <0x40000 0x40000>; + compatible = "chrp,open-pic"; + device_type = "open-pic"; + }; }; pci0: pci@e0008000 { - reg = <0 0xe0008000 0 0x1000>; - ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x10000000 - 0x1000000 0x0 0x00000000 0 0xe2000000 0x0 0x800000>; - clock-frequency = <66666666>; interrupt-map-mask = <0xf800 0x0 0x0 0x7>; interrupt-map = < /* IDSEL 0x4 (PCIX Slot 2) */ - 0x2000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0 - 0x2000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0 - 0x2000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0 - 0x2000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0 + 0x2000 0x0 0x0 0x1 &mpic 0x0 0x1 + 0x2000 0x0 0x0 0x2 &mpic 0x1 0x1 + 0x2000 0x0 0x0 0x3 &mpic 0x2 0x1 + 0x2000 0x0 0x0 0x4 &mpic 0x3 0x1 /* IDSEL 0x5 (PCIX Slot 3) */ - 0x2800 0x0 0x0 0x1 &mpic 0x1 0x1 0 0 - 0x2800 0x0 0x0 0x2 &mpic 0x2 0x1 0 0 - 0x2800 0x0 0x0 0x3 &mpic 0x3 0x1 0 0 - 0x2800 0x0 0x0 0x4 &mpic 0x0 0x1 0 0 + 0x2800 0x0 0x0 0x1 &mpic 0x1 0x1 + 0x2800 0x0 0x0 0x2 &mpic 0x2 0x1 + 0x2800 0x0 0x0 0x3 &mpic 0x3 0x1 + 0x2800 0x0 0x0 0x4 &mpic 0x0 0x1 /* IDSEL 0x6 (PCIX Slot 4) */ - 0x3000 0x0 0x0 0x1 &mpic 0x2 0x1 0 0 - 0x3000 0x0 0x0 0x2 &mpic 0x3 0x1 0 0 - 0x3000 0x0 0x0 0x3 &mpic 0x0 0x1 0 0 - 0x3000 0x0 0x0 0x4 &mpic 0x1 0x1 0 0 + 0x3000 0x0 0x0 0x1 &mpic 0x2 0x1 + 0x3000 0x0 0x0 0x2 &mpic 0x3 0x1 + 0x3000 0x0 0x0 0x3 &mpic 0x0 0x1 + 0x3000 0x0 0x0 0x4 &mpic 0x1 0x1 /* IDSEL 0x8 (PCIX Slot 5) */ - 0x4000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0 - 0x4000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0 - 0x4000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0 - 0x4000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0 + 0x4000 0x0 0x0 0x1 &mpic 0x0 0x1 + 0x4000 0x0 0x0 0x2 &mpic 0x1 0x1 + 0x4000 0x0 0x0 0x3 &mpic 0x2 0x1 + 0x4000 0x0 0x0 0x4 &mpic 0x3 0x1 /* IDSEL 0xC (Tsi310 bridge) */ - 0x6000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0 - 0x6000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0 - 0x6000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0 - 0x6000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0 + 0x6000 0x0 0x0 0x1 &mpic 0x0 0x1 + 0x6000 0x0 0x0 0x2 &mpic 0x1 0x1 + 0x6000 0x0 0x0 0x3 &mpic 0x2 0x1 + 0x6000 0x0 0x0 0x4 &mpic 0x3 0x1 /* IDSEL 0x14 (Slot 2) */ - 0xa000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0 - 0xa000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0 - 0xa000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0 - 0xa000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0 + 0xa000 0x0 0x0 0x1 &mpic 0x0 0x1 + 0xa000 0x0 0x0 0x2 &mpic 0x1 0x1 + 0xa000 0x0 0x0 0x3 &mpic 0x2 0x1 + 0xa000 0x0 0x0 0x4 &mpic 0x3 0x1 /* IDSEL 0x15 (Slot 3) */ - 0xa800 0x0 0x0 0x1 &mpic 0x1 0x1 0 0 - 0xa800 0x0 0x0 0x2 &mpic 0x2 0x1 0 0 - 0xa800 0x0 0x0 0x3 &mpic 0x3 0x1 0 0 - 0xa800 0x0 0x0 0x4 &mpic 0x0 0x1 0 0 + 0xa800 0x0 0x0 0x1 &mpic 0x1 0x1 + 0xa800 0x0 0x0 0x2 &mpic 0x2 0x1 + 0xa800 0x0 0x0 0x3 &mpic 0x3 0x1 + 0xa800 0x0 0x0 0x4 &mpic 0x0 0x1 /* IDSEL 0x16 (Slot 4) */ - 0xb000 0x0 0x0 0x1 &mpic 0x2 0x1 0 0 - 0xb000 0x0 0x0 0x2 &mpic 0x3 0x1 0 0 - 0xb000 0x0 0x0 0x3 &mpic 0x0 0x1 0 0 - 0xb000 0x0 0x0 0x4 &mpic 0x1 0x1 0 0 + 0xb000 0x0 0x0 0x1 &mpic 0x2 0x1 + 0xb000 0x0 0x0 0x2 &mpic 0x3 0x1 + 0xb000 0x0 0x0 0x3 &mpic 0x0 0x1 + 0xb000 0x0 0x0 0x4 &mpic 0x1 0x1 /* IDSEL 0x18 (Slot 5) */ - 0xc000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0 - 0xc000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0 - 0xc000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0 - 0xc000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0 + 0xc000 0x0 0x0 0x1 &mpic 0x0 0x1 + 0xc000 0x0 0x0 0x2 &mpic 0x1 0x1 + 0xc000 0x0 0x0 0x3 &mpic 0x2 0x1 + 0xc000 0x0 0x0 0x4 &mpic 0x3 0x1 /* IDSEL 0x1C (Tsi310 bridge PCI primary) */ - 0xe000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0 - 0xe000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0 - 0xe000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0 - 0xe000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0>; + 0xe000 0x0 0x0 0x1 &mpic 0x0 0x1 + 0xe000 0x0 0x0 0x2 &mpic 0x1 0x1 + 0xe000 0x0 0x0 0x3 &mpic 0x2 0x1 + 0xe000 0x0 0x0 0x4 &mpic 0x3 0x1>; + + interrupt-parent = <&mpic>; + interrupts = <24 2>; + bus-range = <0 0>; + ranges = <0x2000000 0x0 0x80000000 0x80000000 0x0 0x10000000 + 0x1000000 0x0 0x0 0xe2000000 0x0 0x800000>; + clock-frequency = <66666666>; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + reg = <0xe0008000 0x1000>; + compatible = "fsl,mpc8540-pcix", "fsl,mpc8540-pci"; + device_type = "pci"; pci_bridge@1c { interrupt-map-mask = <0xf800 0x0 0x0 0x7>; interrupt-map = < /* IDSEL 0x00 (PrPMC Site) */ - 0000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0 - 0000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0 - 0000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0 - 0000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0 + 0000 0x0 0x0 0x1 &mpic 0x0 0x1 + 0000 0x0 0x0 0x2 &mpic 0x1 0x1 + 0000 0x0 0x0 0x3 &mpic 0x2 0x1 + 0000 0x0 0x0 0x4 &mpic 0x3 0x1 /* IDSEL 0x04 (VIA chip) */ - 0x2000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0 - 0x2000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0 - 0x2000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0 - 0x2000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0 + 0x2000 0x0 0x0 0x1 &mpic 0x0 0x1 + 0x2000 0x0 0x0 0x2 &mpic 0x1 0x1 + 0x2000 0x0 0x0 0x3 &mpic 0x2 0x1 + 0x2000 0x0 0x0 0x4 &mpic 0x3 0x1 /* IDSEL 0x05 (8139) */ - 0x2800 0x0 0x0 0x1 &mpic 0x1 0x1 0 0 + 0x2800 0x0 0x0 0x1 &mpic 0x1 0x1 /* IDSEL 0x06 (Slot 6) */ - 0x3000 0x0 0x0 0x1 &mpic 0x2 0x1 0 0 - 0x3000 0x0 0x0 0x2 &mpic 0x3 0x1 0 0 - 0x3000 0x0 0x0 0x3 &mpic 0x0 0x1 0 0 - 0x3000 0x0 0x0 0x4 &mpic 0x1 0x1 0 0 + 0x3000 0x0 0x0 0x1 &mpic 0x2 0x1 + 0x3000 0x0 0x0 0x2 &mpic 0x3 0x1 + 0x3000 0x0 0x0 0x3 &mpic 0x0 0x1 + 0x3000 0x0 0x0 0x4 &mpic 0x1 0x1 /* IDESL 0x07 (Slot 7) */ - 0x3800 0x0 0x0 0x1 &mpic 0x3 0x1 0 0 - 0x3800 0x0 0x0 0x2 &mpic 0x0 0x1 0 0 - 0x3800 0x0 0x0 0x3 &mpic 0x1 0x1 0 0 - 0x3800 0x0 0x0 0x4 &mpic 0x2 0x1 0 0>; + 0x3800 0x0 0x0 0x1 &mpic 0x3 0x1 + 0x3800 0x0 0x0 0x2 &mpic 0x0 0x1 + 0x3800 0x0 0x0 0x3 &mpic 0x1 0x1 + 0x3800 0x0 0x0 0x4 &mpic 0x2 0x1>; reg = <0xe000 0x0 0x0 0x0 0x0>; #interrupt-cells = <1>; @@ -260,7 +492,7 @@ #address-cells = <0>; #interrupt-cells = <2>; compatible = "chrp,iic"; - interrupts = <0 1 0 0>; + interrupts = <0 1>; interrupt-parent = <&mpic>; }; @@ -273,25 +505,56 @@ }; pci1: pci@e0009000 { - reg = <0 0xe0009000 0 0x1000>; - ranges = <0x2000000 0x0 0x90000000 0 0x90000000 0x0 0x10000000 - 0x1000000 0x0 0x00000000 0 0xe2800000 0x0 0x800000>; - clock-frequency = <66666666>; interrupt-map-mask = <0xf800 0x0 0x0 0x7>; interrupt-map = < /* IDSEL 0x15 */ - 0xa800 0x0 0x0 0x1 &mpic 0xb 0x1 0 0 - 0xa800 0x0 0x0 0x2 &mpic 0x1 0x1 0 0 - 0xa800 0x0 0x0 0x3 &mpic 0x2 0x1 0 0 - 0xa800 0x0 0x0 0x4 &mpic 0x3 0x1 0 0>; + 0xa800 0x0 0x0 0x1 &mpic 0xb 0x1 + 0xa800 0x0 0x0 0x2 &mpic 0x1 0x1 + 0xa800 0x0 0x0 0x3 &mpic 0x2 0x1 + 0xa800 0x0 0x0 0x4 &mpic 0x3 0x1>; + + interrupt-parent = <&mpic>; + interrupts = <25 2>; + bus-range = <0 0>; + ranges = <0x2000000 0x0 0x90000000 0x90000000 0x0 0x10000000 + 0x1000000 0x0 0x0 0xe2800000 0x0 0x800000>; + clock-frequency = <66666666>; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + reg = <0xe0009000 0x1000>; + compatible = "fsl,mpc8540-pci"; + device_type = "pci"; }; pci2: pcie@e000a000 { - reg = <0 0xe000a000 0 0x1000>; - ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000 - 0x1000000 0x0 0x00000000 0 0xe3000000 0x0 0x100000>; + interrupt-map-mask = <0xf800 0x0 0x0 0x7>; + interrupt-map = < + + /* IDSEL 0x0 (PEX) */ + 00000 0x0 0x0 0x1 &mpic 0x0 0x1 + 00000 0x0 0x0 0x2 &mpic 0x1 0x1 + 00000 0x0 0x0 0x3 &mpic 0x2 0x1 + 00000 0x0 0x0 0x4 &mpic 0x3 0x1>; + + interrupt-parent = <&mpic>; + interrupts = <26 2>; + bus-range = <0 255>; + ranges = <0x2000000 0x0 0xa0000000 0xa0000000 0x0 0x20000000 + 0x1000000 0x0 0x0 0xe3000000 0x0 0x100000>; + clock-frequency = <33333333>; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + reg = <0xe000a000 0x1000>; + compatible = "fsl,mpc8548-pcie"; + device_type = "pci"; pcie@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; ranges = <0x2000000 0x0 0xa0000000 0x2000000 0x0 0xa0000000 0x0 0x20000000 @@ -302,5 +565,3 @@ }; }; }; - -/include/ "fsl/mpc8548si-post.dtsi" diff --git a/trunk/arch/powerpc/boot/dts/mpc8555cds.dts b/trunk/arch/powerpc/boot/dts/mpc8555cds.dts index fe10438613d6..5c5614f9eb17 100644 --- a/trunk/arch/powerpc/boot/dts/mpc8555cds.dts +++ b/trunk/arch/powerpc/boot/dts/mpc8555cds.dts @@ -209,7 +209,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; // reg base, size clock-frequency = <0>; // should we fill in in uboot? interrupts = <42 2>; @@ -219,7 +219,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; // reg base, size clock-frequency = <0>; // should we fill in in uboot? interrupts = <42 2>; diff --git a/trunk/arch/powerpc/boot/dts/mpc8568mds.dts b/trunk/arch/powerpc/boot/dts/mpc8568mds.dts index 09598bb5d443..647daf8e7291 100644 --- a/trunk/arch/powerpc/boot/dts/mpc8568mds.dts +++ b/trunk/arch/powerpc/boot/dts/mpc8568mds.dts @@ -9,25 +9,60 @@ * option) any later version. */ -/include/ "fsl/mpc8568si-pre.dtsi" +/dts-v1/; / { model = "MPC8568EMDS"; compatible = "MPC8568EMDS", "MPC85xxMDS"; + #address-cells = <1>; + #size-cells = <1>; aliases { + ethernet0 = &enet0; + ethernet1 = &enet1; + ethernet2 = &enet2; + ethernet3 = &enet3; + serial0 = &serial0; + serial1 = &serial1; pci0 = &pci0; pci1 = &pci1; - rapidio0 = &rio; + rapidio0 = &rio0; + }; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + PowerPC,8568@0 { + device_type = "cpu"; + reg = <0x0>; + d-cache-line-size = <32>; // 32 bytes + i-cache-line-size = <32>; // 32 bytes + d-cache-size = <0x8000>; // L1, 32K + i-cache-size = <0x8000>; // L1, 32K + sleep = <&pmc 0x00008000 // core + &pmc 0x00004000>; // timebase + timebase-frequency = <0>; + bus-frequency = <0>; + clock-frequency = <0>; + next-level-cache = <&L2>; + }; }; memory { device_type = "memory"; - reg = <0x0 0x0 0x0 0x0>; + reg = <0x0 0x10000000>; }; - lbc: localbus@e0005000 { - reg = <0x0 0xe0005000 0x0 0x1000>; + localbus@e0005000 { + #address-cells = <2>; + #size-cells = <1>; + compatible = "fsl,mpc8568-localbus", "fsl,pq3-localbus", + "simple-bus"; + reg = <0xe0005000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <19 2>; + ranges = <0x0 0x0 0xfe000000 0x02000000 0x1 0x0 0xf8000000 0x00008000 0x2 0x0 0xf0000000 0x04000000 @@ -69,65 +104,288 @@ }; }; - soc: soc8568@e0000000 { - ranges = <0x0 0x0 0xe0000000 0x100000>; + soc8568@e0000000 { + #address-cells = <1>; + #size-cells = <1>; + device_type = "soc"; + compatible = "simple-bus"; + ranges = <0x0 0xe0000000 0x100000>; + bus-frequency = <0>; + + ecm-law@0 { + compatible = "fsl,ecm-law"; + reg = <0x0 0x1000>; + fsl,num-laws = <10>; + }; + + ecm@1000 { + compatible = "fsl,mpc8568-ecm", "fsl,ecm"; + reg = <0x1000 0x1000>; + interrupts = <17 2>; + interrupt-parent = <&mpic>; + }; + + memory-controller@2000 { + compatible = "fsl,mpc8568-memory-controller"; + reg = <0x2000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <18 2>; + }; + + L2: l2-cache-controller@20000 { + compatible = "fsl,mpc8568-l2-cache-controller"; + reg = <0x20000 0x1000>; + cache-line-size = <32>; // 32 bytes + cache-size = <0x80000>; // L2, 512K + interrupt-parent = <&mpic>; + interrupts = <16 2>; + }; i2c-sleep-nexus { + #address-cells = <1>; + #size-cells = <1>; + compatible = "simple-bus"; + sleep = <&pmc 0x00000004>; + ranges; + i2c@3000 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <0>; + compatible = "fsl-i2c"; + reg = <0x3000 0x100>; + interrupts = <43 2>; + interrupt-parent = <&mpic>; + dfsrr; + rtc@68 { compatible = "dallas,ds1374"; reg = <0x68>; - interrupts = <3 1 0 0>; + interrupts = <3 1>; + interrupt-parent = <&mpic>; }; }; - }; - enet0: ethernet@24000 { - tbi-handle = <&tbi0>; - phy-handle = <&phy2>; + i2c@3100 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <1>; + compatible = "fsl-i2c"; + reg = <0x3100 0x100>; + interrupts = <43 2>; + interrupt-parent = <&mpic>; + dfsrr; + }; }; - mdio@24520 { - phy0: ethernet-phy@7 { - interrupts = <1 1 0 0>; - reg = <0x7>; - device_type = "ethernet-phy"; + dma@21300 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,mpc8568-dma", "fsl,eloplus-dma"; + reg = <0x21300 0x4>; + ranges = <0x0 0x21100 0x200>; + cell-index = <0>; + sleep = <&pmc 0x00000400>; + + dma-channel@0 { + compatible = "fsl,mpc8568-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x0 0x80>; + cell-index = <0>; + interrupt-parent = <&mpic>; + interrupts = <20 2>; }; - phy1: ethernet-phy@1 { - interrupts = <2 1 0 0>; - reg = <0x1>; - device_type = "ethernet-phy"; + dma-channel@80 { + compatible = "fsl,mpc8568-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x80 0x80>; + cell-index = <1>; + interrupt-parent = <&mpic>; + interrupts = <21 2>; }; - phy2: ethernet-phy@2 { - interrupts = <1 1 0 0>; - reg = <0x2>; - device_type = "ethernet-phy"; + dma-channel@100 { + compatible = "fsl,mpc8568-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x100 0x80>; + cell-index = <2>; + interrupt-parent = <&mpic>; + interrupts = <22 2>; }; - phy3: ethernet-phy@3 { - interrupts = <2 1 0 0>; - reg = <0x3>; - device_type = "ethernet-phy"; + dma-channel@180 { + compatible = "fsl,mpc8568-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x180 0x80>; + cell-index = <3>; + interrupt-parent = <&mpic>; + interrupts = <23 2>; }; - tbi0: tbi-phy@11 { - reg = <0x11>; - device_type = "tbi-phy"; + }; + + enet0: ethernet@24000 { + #address-cells = <1>; + #size-cells = <1>; + cell-index = <0>; + device_type = "network"; + model = "eTSEC"; + compatible = "gianfar"; + reg = <0x24000 0x1000>; + ranges = <0x0 0x24000 0x1000>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupts = <29 2 30 2 34 2>; + interrupt-parent = <&mpic>; + tbi-handle = <&tbi0>; + phy-handle = <&phy2>; + sleep = <&pmc 0x00000080>; + + mdio@520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-mdio"; + reg = <0x520 0x20>; + + phy0: ethernet-phy@7 { + interrupt-parent = <&mpic>; + interrupts = <1 1>; + reg = <0x7>; + device_type = "ethernet-phy"; + }; + phy1: ethernet-phy@1 { + interrupt-parent = <&mpic>; + interrupts = <2 1>; + reg = <0x1>; + device_type = "ethernet-phy"; + }; + phy2: ethernet-phy@2 { + interrupt-parent = <&mpic>; + interrupts = <1 1>; + reg = <0x2>; + device_type = "ethernet-phy"; + }; + phy3: ethernet-phy@3 { + interrupt-parent = <&mpic>; + interrupts = <2 1>; + reg = <0x3>; + device_type = "ethernet-phy"; + }; + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; }; }; enet1: ethernet@25000 { + #address-cells = <1>; + #size-cells = <1>; + cell-index = <1>; + device_type = "network"; + model = "eTSEC"; + compatible = "gianfar"; + reg = <0x25000 0x1000>; + ranges = <0x0 0x25000 0x1000>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupts = <35 2 36 2 40 2>; + interrupt-parent = <&mpic>; tbi-handle = <&tbi1>; phy-handle = <&phy3>; sleep = <&pmc 0x00000040>; + + mdio@520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; }; - mdio@25520 { - tbi1: tbi-phy@11 { - reg = <0x11>; - device_type = "tbi-phy"; + duart-sleep-nexus { + #address-cells = <1>; + #size-cells = <1>; + compatible = "simple-bus"; + sleep = <&pmc 0x00000002>; + ranges; + + serial0: serial@4500 { + cell-index = <0>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x4500 0x100>; + clock-frequency = <0>; + interrupts = <42 2>; + interrupt-parent = <&mpic>; + }; + + serial1: serial@4600 { + cell-index = <1>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x4600 0x100>; + clock-frequency = <0>; + interrupts = <42 2>; + interrupt-parent = <&mpic>; }; }; + global-utilities@e0000 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,mpc8568-guts", "fsl,mpc8548-guts"; + reg = <0xe0000 0x1000>; + ranges = <0 0xe0000 0x1000>; + fsl,has-rstcr; + + pmc: power@70 { + compatible = "fsl,mpc8568-pmc", + "fsl,mpc8548-pmc"; + reg = <0x70 0x20>; + }; + }; + + crypto@30000 { + compatible = "fsl,sec2.1", "fsl,sec2.0"; + reg = <0x30000 0x10000>; + interrupts = <45 2>; + interrupt-parent = <&mpic>; + fsl,num-channels = <4>; + fsl,channel-fifo-len = <24>; + fsl,exec-units-mask = <0xfe>; + fsl,descriptor-types-mask = <0x12b0ebf>; + sleep = <&pmc 0x01000000>; + }; + + mpic: pic@40000 { + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <2>; + reg = <0x40000 0x40000>; + compatible = "chrp,open-pic"; + device_type = "open-pic"; + }; + + msi@41600 { + compatible = "fsl,mpc8568-msi", "fsl,mpic-msi"; + reg = <0x41600 0x80>; + msi-available-ranges = <0 0x100>; + interrupts = < + 0xe0 0 + 0xe1 0 + 0xe2 0 + 0xe3 0 + 0xe4 0 + 0xe5 0 + 0xe6 0 + 0xe7 0>; + interrupt-parent = <&mpic>; + }; + par_io@e0100 { + reg = <0xe0100 0x100>; + device_type = "par_io"; num-ports = <7>; pio1: ucc_pin@01 { @@ -190,21 +448,57 @@ }; }; - qe: qe@e0080000 { - ranges = <0x0 0x0 0xe0080000 0x40000>; - reg = <0x0 0xe0080000 0x0 0x480>; + qe@e0080000 { + #address-cells = <1>; + #size-cells = <1>; + device_type = "qe"; + compatible = "fsl,qe"; + ranges = <0x0 0xe0080000 0x40000>; + reg = <0xe0080000 0x480>; + sleep = <&pmc 0x00000800>; + brg-frequency = <0>; + bus-frequency = <396000000>; + fsl,qe-num-riscs = <2>; + fsl,qe-num-snums = <28>; + + muram@10000 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,qe-muram", "fsl,cpm-muram"; + ranges = <0x0 0x10000 0x10000>; + + data-only@0 { + compatible = "fsl,qe-muram-data", + "fsl,cpm-muram-data"; + reg = <0x0 0x10000>; + }; + }; spi@4c0 { + cell-index = <0>; + compatible = "fsl,spi"; + reg = <0x4c0 0x40>; + interrupts = <2>; + interrupt-parent = <&qeic>; mode = "cpu"; }; spi@500 { + cell-index = <1>; + compatible = "fsl,spi"; + reg = <0x500 0x40>; + interrupts = <1>; + interrupt-parent = <&qeic>; mode = "cpu"; }; enet2: ucc@2000 { device_type = "network"; compatible = "ucc_geth"; + cell-index = <1>; + reg = <0x2000 0x200>; + interrupts = <32>; + interrupt-parent = <&qeic>; local-mac-address = [ 00 00 00 00 00 00 ]; rx-clock-name = "none"; tx-clock-name = "clk16"; @@ -216,6 +510,10 @@ enet3: ucc@3000 { device_type = "network"; compatible = "ucc_geth"; + cell-index = <2>; + reg = <0x3000 0x200>; + interrupts = <33>; + interrupt-parent = <&qeic>; local-mac-address = [ 00 00 00 00 00 00 ]; rx-clock-name = "none"; tx-clock-name = "clk16"; @@ -234,57 +532,102 @@ * gianfar's MDIO bus */ qe_phy0: ethernet-phy@07 { interrupt-parent = <&mpic>; - interrupts = <1 1 0 0>; + interrupts = <1 1>; reg = <0x7>; device_type = "ethernet-phy"; }; qe_phy1: ethernet-phy@01 { interrupt-parent = <&mpic>; - interrupts = <2 1 0 0>; + interrupts = <2 1>; reg = <0x1>; device_type = "ethernet-phy"; }; qe_phy2: ethernet-phy@02 { interrupt-parent = <&mpic>; - interrupts = <1 1 0 0>; + interrupts = <1 1>; reg = <0x2>; device_type = "ethernet-phy"; }; qe_phy3: ethernet-phy@03 { interrupt-parent = <&mpic>; - interrupts = <2 1 0 0>; + interrupts = <2 1>; reg = <0x3>; device_type = "ethernet-phy"; }; }; + + qeic: interrupt-controller@80 { + interrupt-controller; + compatible = "fsl,qe-ic"; + #address-cells = <0>; + #interrupt-cells = <1>; + reg = <0x80 0x80>; + big-endian; + interrupts = <46 2 46 2>; //high:30 low:30 + interrupt-parent = <&mpic>; + }; + }; pci0: pci@e0008000 { - reg = <0x0 0xe0008000 0x0 0x1000>; - ranges = <0x2000000 0x0 0x80000000 0x0 0x80000000 0x0 0x20000000 - 0x1000000 0x0 0x00000000 0x0 0xe2000000 0x0 0x800000>; - clock-frequency = <66666666>; interrupt-map-mask = <0xf800 0x0 0x0 0x7>; interrupt-map = < /* IDSEL 0x12 AD18 */ - 0x9000 0x0 0x0 0x1 &mpic 0x5 0x1 0 0 - 0x9000 0x0 0x0 0x2 &mpic 0x6 0x1 0 0 - 0x9000 0x0 0x0 0x3 &mpic 0x7 0x1 0 0 - 0x9000 0x0 0x0 0x4 &mpic 0x4 0x1 0 0 + 0x9000 0x0 0x0 0x1 &mpic 0x5 0x1 + 0x9000 0x0 0x0 0x2 &mpic 0x6 0x1 + 0x9000 0x0 0x0 0x3 &mpic 0x7 0x1 + 0x9000 0x0 0x0 0x4 &mpic 0x4 0x1 /* IDSEL 0x13 AD19 */ - 0x9800 0x0 0x0 0x1 &mpic 0x6 0x1 0 0 - 0x9800 0x0 0x0 0x2 &mpic 0x7 0x1 0 0 - 0x9800 0x0 0x0 0x3 &mpic 0x4 0x1 0 0 - 0x9800 0x0 0x0 0x4 &mpic 0x5 0x1 0 0>; + 0x9800 0x0 0x0 0x1 &mpic 0x6 0x1 + 0x9800 0x0 0x0 0x2 &mpic 0x7 0x1 + 0x9800 0x0 0x0 0x3 &mpic 0x4 0x1 + 0x9800 0x0 0x0 0x4 &mpic 0x5 0x1>; + + interrupt-parent = <&mpic>; + interrupts = <24 2>; + bus-range = <0 255>; + ranges = <0x2000000 0x0 0x80000000 0x80000000 0x0 0x20000000 + 0x1000000 0x0 0x0 0xe2000000 0x0 0x800000>; + sleep = <&pmc 0x80000000>; + clock-frequency = <66666666>; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + reg = <0xe0008000 0x1000>; + compatible = "fsl,mpc8540-pci"; + device_type = "pci"; }; /* PCI Express */ pci1: pcie@e000a000 { - ranges = <0x2000000 0x0 0xa0000000 0x0 0xa0000000 0x0 0x10000000 - 0x1000000 0x0 0x00000000 0x0 0xe2800000 0x0 0x800000>; - reg = <0x0 0xe000a000 0x0 0x1000>; + interrupt-map-mask = <0xf800 0x0 0x0 0x7>; + interrupt-map = < + + /* IDSEL 0x0 (PEX) */ + 00000 0x0 0x0 0x1 &mpic 0x0 0x1 + 00000 0x0 0x0 0x2 &mpic 0x1 0x1 + 00000 0x0 0x0 0x3 &mpic 0x2 0x1 + 00000 0x0 0x0 0x4 &mpic 0x3 0x1>; + + interrupt-parent = <&mpic>; + interrupts = <26 2>; + bus-range = <0 255>; + ranges = <0x2000000 0x0 0xa0000000 0xa0000000 0x0 0x10000000 + 0x1000000 0x0 0x0 0xe2800000 0x0 0x800000>; + sleep = <&pmc 0x20000000>; + clock-frequency = <33333333>; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + reg = <0xe000a000 0x1000>; + compatible = "fsl,mpc8548-pcie"; + device_type = "pci"; pcie@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; ranges = <0x2000000 0x0 0xa0000000 0x2000000 0x0 0xa0000000 0x0 0x10000000 @@ -295,11 +638,22 @@ }; }; - rio: rapidio@e00c00000 { - reg = <0x0 0xe00c0000 0x0 0x20000>; - port1 { - ranges = <0x0 0x0 0x0 0xc0000000 0x0 0x20000000>; - }; + rio0: rapidio@e00c00000 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "fsl,mpc8568-rapidio", "fsl,rapidio-delta"; + reg = <0xe00c0000 0x20000>; + ranges = <0x0 0x0 0xc0000000 0x0 0x20000000>; + interrupts = <48 2 /* error */ + 49 2 /* bell_outb */ + 50 2 /* bell_inb */ + 53 2 /* msg1_tx */ + 54 2 /* msg1_rx */ + 55 2 /* msg2_tx */ + 56 2 /* msg2_rx */>; + interrupt-parent = <&mpic>; + sleep = <&pmc 0x00080000 /* controller */ + &pmc 0x00040000>; /* message unit */ }; leds { @@ -318,5 +672,3 @@ }; }; }; - -/include/ "fsl/mpc8568si-post.dtsi" diff --git a/trunk/arch/powerpc/boot/dts/mpc8569mds.dts b/trunk/arch/powerpc/boot/dts/mpc8569mds.dts index 7e283c891b7f..8b72eaff5b03 100644 --- a/trunk/arch/powerpc/boot/dts/mpc8569mds.dts +++ b/trunk/arch/powerpc/boot/dts/mpc8569mds.dts @@ -9,36 +9,66 @@ * option) any later version. */ -/include/ "fsl/mpc8569si-pre.dtsi" +/dts-v1/; / { model = "MPC8569EMDS"; compatible = "fsl,MPC8569EMDS"; - #address-cells = <2>; - #size-cells = <2>; - interrupt-parent = <&mpic>; + #address-cells = <1>; + #size-cells = <1>; aliases { + serial0 = &serial0; + serial1 = &serial1; + ethernet0 = &enet0; + ethernet1 = &enet1; ethernet2 = &enet2; ethernet3 = &enet3; ethernet5 = &enet5; ethernet7 = &enet7; - rapidio0 = &rio; + pci1 = &pci1; + rapidio0 = &rio0; + }; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + PowerPC,8569@0 { + device_type = "cpu"; + reg = <0x0>; + d-cache-line-size = <32>; // 32 bytes + i-cache-line-size = <32>; // 32 bytes + d-cache-size = <0x8000>; // L1, 32K + i-cache-size = <0x8000>; // L1, 32K + sleep = <&pmc 0x00008000 // core + &pmc 0x00004000>; // timebase + timebase-frequency = <0>; + bus-frequency = <0>; + clock-frequency = <0>; + next-level-cache = <&L2>; + }; }; memory { device_type = "memory"; }; - lbc: localbus@e0005000 { - reg = <0x0 0xe0005000 0x0 0x1000>; - - ranges = <0x0 0x0 0x0 0xfe000000 0x02000000 - 0x1 0x0 0x0 0xf8000000 0x00008000 - 0x2 0x0 0x0 0xf0000000 0x04000000 - 0x3 0x0 0x0 0xfc000000 0x00008000 - 0x4 0x0 0x0 0xf8008000 0x00008000 - 0x5 0x0 0x0 0xf8010000 0x00008000>; + localbus@e0005000 { + #address-cells = <2>; + #size-cells = <1>; + compatible = "fsl,mpc8569-elbc", "fsl,elbc", "simple-bus"; + reg = <0xe0005000 0x1000>; + interrupts = <19 2>; + interrupt-parent = <&mpic>; + sleep = <&pmc 0x08000000>; + + ranges = <0x0 0x0 0xfe000000 0x02000000 + 0x1 0x0 0xf8000000 0x00008000 + 0x2 0x0 0xf0000000 0x04000000 + 0x3 0x0 0xfc000000 0x00008000 + 0x4 0x0 0xf8008000 0x00008000 + 0x5 0x0 0xf8010000 0x00008000>; nor@0,0 { #address-cells = <1>; @@ -103,25 +133,220 @@ }; }; - soc: soc@e0000000 { - ranges = <0x0 0x0 0xe0000000 0x100000>; + soc@e0000000 { + #address-cells = <1>; + #size-cells = <1>; + device_type = "soc"; + compatible = "fsl,mpc8569-immr", "simple-bus"; + ranges = <0x0 0xe0000000 0x100000>; + bus-frequency = <0>; + + ecm-law@0 { + compatible = "fsl,ecm-law"; + reg = <0x0 0x1000>; + fsl,num-laws = <10>; + }; + + ecm@1000 { + compatible = "fsl,mpc8569-ecm", "fsl,ecm"; + reg = <0x1000 0x1000>; + interrupts = <17 2>; + interrupt-parent = <&mpic>; + }; + + memory-controller@2000 { + compatible = "fsl,mpc8569-memory-controller"; + reg = <0x2000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <18 2>; + }; i2c-sleep-nexus { + #address-cells = <1>; + #size-cells = <1>; + compatible = "simple-bus"; + sleep = <&pmc 0x00000004>; + ranges; + i2c@3000 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <0>; + compatible = "fsl-i2c"; + reg = <0x3000 0x100>; + interrupts = <43 2>; + interrupt-parent = <&mpic>; + dfsrr; + rtc@68 { compatible = "dallas,ds1374"; reg = <0x68>; - interrupts = <3 1 0 0>; + interrupts = <3 1>; + interrupt-parent = <&mpic>; }; }; + + i2c@3100 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <1>; + compatible = "fsl-i2c"; + reg = <0x3100 0x100>; + interrupts = <43 2>; + interrupt-parent = <&mpic>; + dfsrr; + }; + }; + + duart-sleep-nexus { + #address-cells = <1>; + #size-cells = <1>; + compatible = "simple-bus"; + sleep = <&pmc 0x00000002>; + ranges; + + serial0: serial@4500 { + cell-index = <0>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x4500 0x100>; + clock-frequency = <0>; + interrupts = <42 2>; + interrupt-parent = <&mpic>; + }; + + serial1: serial@4600 { + cell-index = <1>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x4600 0x100>; + clock-frequency = <0>; + interrupts = <42 2>; + interrupt-parent = <&mpic>; + }; + }; + + L2: l2-cache-controller@20000 { + compatible = "fsl,mpc8569-l2-cache-controller"; + reg = <0x20000 0x1000>; + cache-line-size = <32>; // 32 bytes + cache-size = <0x80000>; // L2, 512K + interrupt-parent = <&mpic>; + interrupts = <16 2>; }; - sdhc@2e000 { + dma@21300 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,mpc8569-dma", "fsl,eloplus-dma"; + reg = <0x21300 0x4>; + ranges = <0x0 0x21100 0x200>; + cell-index = <0>; + dma-channel@0 { + compatible = "fsl,mpc8569-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x0 0x80>; + cell-index = <0>; + interrupt-parent = <&mpic>; + interrupts = <20 2>; + }; + dma-channel@80 { + compatible = "fsl,mpc8569-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x80 0x80>; + cell-index = <1>; + interrupt-parent = <&mpic>; + interrupts = <21 2>; + }; + dma-channel@100 { + compatible = "fsl,mpc8569-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x100 0x80>; + cell-index = <2>; + interrupt-parent = <&mpic>; + interrupts = <22 2>; + }; + dma-channel@180 { + compatible = "fsl,mpc8569-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x180 0x80>; + cell-index = <3>; + interrupt-parent = <&mpic>; + interrupts = <23 2>; + }; + }; + + sdhci@2e000 { + compatible = "fsl,mpc8569-esdhc", "fsl,esdhc"; + reg = <0x2e000 0x1000>; + interrupts = <72 0x8>; + interrupt-parent = <&mpic>; + sleep = <&pmc 0x00200000>; + /* Filled in by U-Boot */ + clock-frequency = <0>; status = "disabled"; sdhci,1-bit-only; }; + crypto@30000 { + compatible = "fsl,sec3.1", "fsl,sec3.0", "fsl,sec2.4", + "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0"; + reg = <0x30000 0x10000>; + interrupts = <45 2 58 2>; + interrupt-parent = <&mpic>; + fsl,num-channels = <4>; + fsl,channel-fifo-len = <24>; + fsl,exec-units-mask = <0xbfe>; + fsl,descriptor-types-mask = <0x3ab0ebf>; + sleep = <&pmc 0x01000000>; + }; + + mpic: pic@40000 { + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <2>; + reg = <0x40000 0x40000>; + compatible = "chrp,open-pic"; + device_type = "open-pic"; + }; + + msi@41600 { + compatible = "fsl,mpc8568-msi", "fsl,mpic-msi"; + reg = <0x41600 0x80>; + msi-available-ranges = <0 0x100>; + interrupts = < + 0xe0 0 + 0xe1 0 + 0xe2 0 + 0xe3 0 + 0xe4 0 + 0xe5 0 + 0xe6 0 + 0xe7 0>; + interrupt-parent = <&mpic>; + }; + + global-utilities@e0000 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,mpc8569-guts", "fsl,mpc8548-guts"; + reg = <0xe0000 0x1000>; + ranges = <0 0xe0000 0x1000>; + fsl,has-rstcr; + + pmc: power@70 { + compatible = "fsl,mpc8569-pmc", + "fsl,mpc8548-pmc"; + reg = <0x70 0x20>; + }; + }; + par_io@e0100 { + #address-cells = <1>; + #size-cells = <1>; + reg = <0xe0100 0x100>; + ranges = <0x0 0xe0100 0x100>; + device_type = "par_io"; num-ports = <7>; qe_pio_e: gpio-controller@80 { @@ -222,11 +447,47 @@ }; }; - qe: qe@e0080000 { - ranges = <0x0 0x0 0xe0080000 0x40000>; - reg = <0x0 0xe0080000 0x0 0x480>; + qe@e0080000 { + #address-cells = <1>; + #size-cells = <1>; + device_type = "qe"; + compatible = "fsl,qe"; + ranges = <0x0 0xe0080000 0x40000>; + reg = <0xe0080000 0x480>; + sleep = <&pmc 0x00000800>; + brg-frequency = <0>; + bus-frequency = <0>; + fsl,qe-num-riscs = <4>; + fsl,qe-num-snums = <46>; + + qeic: interrupt-controller@80 { + interrupt-controller; + compatible = "fsl,qe-ic"; + #address-cells = <0>; + #interrupt-cells = <1>; + reg = <0x80 0x80>; + interrupts = <46 2 46 2>; //high:30 low:30 + interrupt-parent = <&mpic>; + }; + + timer@440 { + compatible = "fsl,mpc8569-qe-gtm", + "fsl,qe-gtm", "fsl,gtm"; + reg = <0x440 0x40>; + interrupts = <12 13 14 15>; + interrupt-parent = <&qeic>; + /* Filled in by U-Boot */ + clock-frequency = <0>; + }; spi@4c0 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,mpc8569-qe-spi", "fsl,spi"; + reg = <0x4c0 0x40>; + cell-index = <0>; + interrupts = <2>; + interrupt-parent = <&qeic>; gpios = <&qe_pio_e 30 0>; mode = "cpu-qe"; @@ -238,10 +499,20 @@ }; spi@500 { + cell-index = <1>; + compatible = "fsl,spi"; + reg = <0x500 0x40>; + interrupts = <1>; + interrupt-parent = <&qeic>; mode = "cpu"; }; usb@6c0 { + compatible = "fsl,mpc8569-qe-usb", + "fsl,mpc8323-qe-usb"; + reg = <0x6c0 0x40 0x8b00 0x100>; + interrupts = <11>; + interrupt-parent = <&qeic>; fsl,fullspeed-clock = "clk5"; fsl,lowspeed-clock = "brg10"; gpios = <&qe_pio_f 3 0 /* USBOE */ @@ -256,6 +527,10 @@ enet0: ucc@2000 { device_type = "network"; compatible = "ucc_geth"; + cell-index = <1>; + reg = <0x2000 0x200>; + interrupts = <32>; + interrupt-parent = <&qeic>; local-mac-address = [ 00 00 00 00 00 00 ]; rx-clock-name = "none"; tx-clock-name = "clk12"; @@ -273,33 +548,35 @@ qe_phy0: ethernet-phy@07 { interrupt-parent = <&mpic>; - interrupts = <1 1 0 0>; + interrupts = <1 1>; reg = <0x7>; device_type = "ethernet-phy"; }; qe_phy1: ethernet-phy@01 { interrupt-parent = <&mpic>; - interrupts = <2 1 0 0>; + interrupts = <2 1>; reg = <0x1>; device_type = "ethernet-phy"; }; qe_phy2: ethernet-phy@02 { interrupt-parent = <&mpic>; - interrupts = <3 1 0 0>; + interrupts = <3 1>; reg = <0x2>; device_type = "ethernet-phy"; }; qe_phy3: ethernet-phy@03 { interrupt-parent = <&mpic>; - interrupts = <4 1 0 0>; + interrupts = <4 1>; reg = <0x3>; device_type = "ethernet-phy"; }; qe_phy5: ethernet-phy@04 { + interrupt-parent = <&mpic>; reg = <0x04>; device_type = "ethernet-phy"; }; qe_phy7: ethernet-phy@06 { + interrupt-parent = <&mpic>; reg = <0x6>; device_type = "ethernet-phy"; }; @@ -333,6 +610,10 @@ enet2: ucc@2200 { device_type = "network"; compatible = "ucc_geth"; + cell-index = <3>; + reg = <0x2200 0x200>; + interrupts = <34>; + interrupt-parent = <&qeic>; local-mac-address = [ 00 00 00 00 00 00 ]; rx-clock-name = "none"; tx-clock-name = "clk12"; @@ -356,6 +637,10 @@ enet1: ucc@3000 { device_type = "network"; compatible = "ucc_geth"; + cell-index = <2>; + reg = <0x3000 0x200>; + interrupts = <33>; + interrupt-parent = <&qeic>; local-mac-address = [ 00 00 00 00 00 00 ]; rx-clock-name = "none"; tx-clock-name = "clk17"; @@ -379,6 +664,10 @@ enet3: ucc@3200 { device_type = "network"; compatible = "ucc_geth"; + cell-index = <4>; + reg = <0x3200 0x200>; + interrupts = <35>; + interrupt-parent = <&qeic>; local-mac-address = [ 00 00 00 00 00 00 ]; rx-clock-name = "none"; tx-clock-name = "clk17"; @@ -402,6 +691,10 @@ enet5: ucc@3400 { device_type = "network"; compatible = "ucc_geth"; + cell-index = <6>; + reg = <0x3400 0x200>; + interrupts = <41>; + interrupt-parent = <&qeic>; local-mac-address = [ 00 00 00 00 00 00 ]; rx-clock-name = "none"; tx-clock-name = "none"; @@ -413,6 +706,10 @@ enet7: ucc@3600 { device_type = "network"; compatible = "ucc_geth"; + cell-index = <8>; + reg = <0x3600 0x200>; + interrupts = <43>; + interrupt-parent = <&qeic>; local-mac-address = [ 00 00 00 00 00 00 ]; rx-clock-name = "none"; tx-clock-name = "none"; @@ -420,14 +717,50 @@ phy-handle = <&qe_phy7>; phy-connection-type = "sgmii"; }; + + muram@10000 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,qe-muram", "fsl,cpm-muram"; + ranges = <0x0 0x10000 0x20000>; + + data-only@0 { + compatible = "fsl,qe-muram-data", + "fsl,cpm-muram-data"; + reg = <0x0 0x20000>; + }; + }; + }; /* PCI Express */ pci1: pcie@e000a000 { - reg = <0x0 0xe000a000 0x0 0x1000>; - ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x10000000 - 0x1000000 0x0 0x00000000 0 0xe2800000 0x0 0x00800000>; + compatible = "fsl,mpc8548-pcie"; + device_type = "pci"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + reg = <0xe000a000 0x1000>; + interrupt-map-mask = <0xf800 0x0 0x0 0x7>; + interrupt-map = < + /* IDSEL 0x0 (PEX) */ + 00000 0x0 0x0 0x1 &mpic 0x0 0x1 + 00000 0x0 0x0 0x2 &mpic 0x1 0x1 + 00000 0x0 0x0 0x3 &mpic 0x2 0x1 + 00000 0x0 0x0 0x4 &mpic 0x3 0x1>; + + interrupt-parent = <&mpic>; + interrupts = <26 2>; + bus-range = <0 255>; + ranges = <0x2000000 0x0 0xa0000000 0xa0000000 0x0 0x10000000 + 0x1000000 0x0 0x00000000 0xe2800000 0x0 0x00800000>; + sleep = <&pmc 0x20000000>; + clock-frequency = <33333333>; pcie@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; ranges = <0x2000000 0x0 0xa0000000 0x2000000 0x0 0xa0000000 0x0 0x10000000 @@ -438,15 +771,20 @@ }; }; - rio: rapidio@e00c00000 { - reg = <0x0 0xe00c0000 0x0 0x20000>; - port1 { - ranges = <0x0 0x0 0x0 0xc0000000 0x0 0x20000000>; - }; - port2 { - status = "disabled"; - }; + rio0: rapidio@e00c00000 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "fsl,mpc8569-rapidio", "fsl,rapidio-delta"; + reg = <0xe00c0000 0x20000>; + ranges = <0x0 0x0 0xc0000000 0x0 0x20000000>; + interrupts = <48 2 /* error */ + 49 2 /* bell_outb */ + 50 2 /* bell_inb */ + 53 2 /* msg1_tx */ + 54 2 /* msg1_rx */ + 55 2 /* msg2_tx */ + 56 2 /* msg2_rx */>; + interrupt-parent = <&mpic>; + sleep = <&pmc 0x00080000>; }; }; - -/include/ "fsl/mpc8569si-post.dtsi" diff --git a/trunk/arch/powerpc/boot/dts/mpc8572ds.dts b/trunk/arch/powerpc/boot/dts/mpc8572ds.dts index 0c9f2955deb4..f6c04d25e916 100644 --- a/trunk/arch/powerpc/boot/dts/mpc8572ds.dts +++ b/trunk/arch/powerpc/boot/dts/mpc8572ds.dts @@ -9,18 +9,67 @@ * option) any later version. */ -/include/ "fsl/mpc8572si-pre.dtsi" - +/dts-v1/; / { model = "fsl,MPC8572DS"; compatible = "fsl,MPC8572DS"; + #address-cells = <2>; + #size-cells = <2>; + + aliases { + ethernet0 = &enet0; + ethernet1 = &enet1; + ethernet2 = &enet2; + ethernet3 = &enet3; + serial0 = &serial0; + serial1 = &serial1; + pci0 = &pci0; + pci1 = &pci1; + pci2 = &pci2; + }; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + PowerPC,8572@0 { + device_type = "cpu"; + reg = <0x0>; + d-cache-line-size = <32>; // 32 bytes + i-cache-line-size = <32>; // 32 bytes + d-cache-size = <0x8000>; // L1, 32K + i-cache-size = <0x8000>; // L1, 32K + timebase-frequency = <0>; + bus-frequency = <0>; + clock-frequency = <0>; + next-level-cache = <&L2>; + }; + + PowerPC,8572@1 { + device_type = "cpu"; + reg = <0x1>; + d-cache-line-size = <32>; // 32 bytes + i-cache-line-size = <32>; // 32 bytes + d-cache-size = <0x8000>; // L1, 32K + i-cache-size = <0x8000>; // L1, 32K + timebase-frequency = <0>; + bus-frequency = <0>; + clock-frequency = <0>; + next-level-cache = <&L2>; + }; + }; memory { device_type = "memory"; }; - board_lbc: lbc: localbus@ffe05000 { + localbus@ffe05000 { + #address-cells = <2>; + #size-cells = <1>; + compatible = "fsl,mpc8572-elbc", "fsl,elbc", "simple-bus"; reg = <0 0xffe05000 0 0x1000>; + interrupts = <19 2>; + interrupt-parent = <&mpic>; ranges = <0x0 0x0 0x0 0xe8000000 0x08000000 0x1 0x0 0x0 0xe0000000 0x08000000 @@ -29,17 +78,601 @@ 0x4 0x0 0x0 0xffa40000 0x00040000 0x5 0x0 0x0 0xffa80000 0x00040000 0x6 0x0 0x0 0xffac0000 0x00040000>; + + nor@0,0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "cfi-flash"; + reg = <0x0 0x0 0x8000000>; + bank-width = <2>; + device-width = <1>; + + ramdisk@0 { + reg = <0x0 0x03000000>; + read-only; + }; + + diagnostic@3000000 { + reg = <0x03000000 0x00e00000>; + read-only; + }; + + dink@3e00000 { + reg = <0x03e00000 0x00200000>; + read-only; + }; + + kernel@4000000 { + reg = <0x04000000 0x00400000>; + read-only; + }; + + jffs2@4400000 { + reg = <0x04400000 0x03b00000>; + }; + + dtb@7f00000 { + reg = <0x07f00000 0x00080000>; + read-only; + }; + + u-boot@7f80000 { + reg = <0x07f80000 0x00080000>; + read-only; + }; + }; + + nand@2,0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,mpc8572-fcm-nand", + "fsl,elbc-fcm-nand"; + reg = <0x2 0x0 0x40000>; + + u-boot@0 { + reg = <0x0 0x02000000>; + read-only; + }; + + jffs2@2000000 { + reg = <0x02000000 0x10000000>; + }; + + ramdisk@12000000 { + reg = <0x12000000 0x08000000>; + read-only; + }; + + kernel@1a000000 { + reg = <0x1a000000 0x04000000>; + }; + + dtb@1e000000 { + reg = <0x1e000000 0x01000000>; + read-only; + }; + + empty@1f000000 { + reg = <0x1f000000 0x21000000>; + }; + }; + + nand@4,0 { + compatible = "fsl,mpc8572-fcm-nand", + "fsl,elbc-fcm-nand"; + reg = <0x4 0x0 0x40000>; + }; + + nand@5,0 { + compatible = "fsl,mpc8572-fcm-nand", + "fsl,elbc-fcm-nand"; + reg = <0x5 0x0 0x40000>; + }; + + nand@6,0 { + compatible = "fsl,mpc8572-fcm-nand", + "fsl,elbc-fcm-nand"; + reg = <0x6 0x0 0x40000>; + }; }; - board_soc: soc: soc8572@ffe00000 { + soc8572@ffe00000 { + #address-cells = <1>; + #size-cells = <1>; + device_type = "soc"; + compatible = "simple-bus"; ranges = <0x0 0 0xffe00000 0x100000>; + bus-frequency = <0>; // Filled out by uboot. + + ecm-law@0 { + compatible = "fsl,ecm-law"; + reg = <0x0 0x1000>; + fsl,num-laws = <12>; + }; + + ecm@1000 { + compatible = "fsl,mpc8572-ecm", "fsl,ecm"; + reg = <0x1000 0x1000>; + interrupts = <17 2>; + interrupt-parent = <&mpic>; + }; + + memory-controller@2000 { + compatible = "fsl,mpc8572-memory-controller"; + reg = <0x2000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <18 2>; + }; + + memory-controller@6000 { + compatible = "fsl,mpc8572-memory-controller"; + reg = <0x6000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <18 2>; + }; + + L2: l2-cache-controller@20000 { + compatible = "fsl,mpc8572-l2-cache-controller"; + reg = <0x20000 0x1000>; + cache-line-size = <32>; // 32 bytes + cache-size = <0x100000>; // L2, 1M + interrupt-parent = <&mpic>; + interrupts = <16 2>; + }; + + i2c@3000 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <0>; + compatible = "fsl-i2c"; + reg = <0x3000 0x100>; + interrupts = <43 2>; + interrupt-parent = <&mpic>; + dfsrr; + }; + + i2c@3100 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <1>; + compatible = "fsl-i2c"; + reg = <0x3100 0x100>; + interrupts = <43 2>; + interrupt-parent = <&mpic>; + dfsrr; + }; + + dma@c300 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,mpc8572-dma", "fsl,eloplus-dma"; + reg = <0xc300 0x4>; + ranges = <0x0 0xc100 0x200>; + cell-index = <1>; + dma-channel@0 { + compatible = "fsl,mpc8572-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x0 0x80>; + cell-index = <0>; + interrupt-parent = <&mpic>; + interrupts = <76 2>; + }; + dma-channel@80 { + compatible = "fsl,mpc8572-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x80 0x80>; + cell-index = <1>; + interrupt-parent = <&mpic>; + interrupts = <77 2>; + }; + dma-channel@100 { + compatible = "fsl,mpc8572-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x100 0x80>; + cell-index = <2>; + interrupt-parent = <&mpic>; + interrupts = <78 2>; + }; + dma-channel@180 { + compatible = "fsl,mpc8572-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x180 0x80>; + cell-index = <3>; + interrupt-parent = <&mpic>; + interrupts = <79 2>; + }; + }; + + dma@21300 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,mpc8572-dma", "fsl,eloplus-dma"; + reg = <0x21300 0x4>; + ranges = <0x0 0x21100 0x200>; + cell-index = <0>; + dma-channel@0 { + compatible = "fsl,mpc8572-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x0 0x80>; + cell-index = <0>; + interrupt-parent = <&mpic>; + interrupts = <20 2>; + }; + dma-channel@80 { + compatible = "fsl,mpc8572-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x80 0x80>; + cell-index = <1>; + interrupt-parent = <&mpic>; + interrupts = <21 2>; + }; + dma-channel@100 { + compatible = "fsl,mpc8572-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x100 0x80>; + cell-index = <2>; + interrupt-parent = <&mpic>; + interrupts = <22 2>; + }; + dma-channel@180 { + compatible = "fsl,mpc8572-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x180 0x80>; + cell-index = <3>; + interrupt-parent = <&mpic>; + interrupts = <23 2>; + }; + }; + + ptp_clock@24E00 { + compatible = "fsl,etsec-ptp"; + reg = <0x24E00 0xB0>; + interrupts = <68 2 69 2 70 2 71 2>; + interrupt-parent = < &mpic >; + fsl,tclk-period = <5>; + fsl,tmr-prsc = <200>; + fsl,tmr-add = <0xAAAAAAAB>; + fsl,tmr-fiper1 = <0x3B9AC9FB>; + fsl,tmr-fiper2 = <0x3B9AC9FB>; + fsl,max-adj = <499999999>; + }; + + enet0: ethernet@24000 { + #address-cells = <1>; + #size-cells = <1>; + cell-index = <0>; + device_type = "network"; + model = "eTSEC"; + compatible = "gianfar"; + reg = <0x24000 0x1000>; + ranges = <0x0 0x24000 0x1000>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupts = <29 2 30 2 34 2>; + interrupt-parent = <&mpic>; + tbi-handle = <&tbi0>; + phy-handle = <&phy0>; + phy-connection-type = "rgmii-id"; + + mdio@520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-mdio"; + reg = <0x520 0x20>; + + phy0: ethernet-phy@0 { + interrupt-parent = <&mpic>; + interrupts = <10 1>; + reg = <0x0>; + }; + phy1: ethernet-phy@1 { + interrupt-parent = <&mpic>; + interrupts = <10 1>; + reg = <0x1>; + }; + phy2: ethernet-phy@2 { + interrupt-parent = <&mpic>; + interrupts = <10 1>; + reg = <0x2>; + }; + phy3: ethernet-phy@3 { + interrupt-parent = <&mpic>; + interrupts = <10 1>; + reg = <0x3>; + }; + + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + }; + + enet1: ethernet@25000 { + #address-cells = <1>; + #size-cells = <1>; + cell-index = <1>; + device_type = "network"; + model = "eTSEC"; + compatible = "gianfar"; + reg = <0x25000 0x1000>; + ranges = <0x0 0x25000 0x1000>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupts = <35 2 36 2 40 2>; + interrupt-parent = <&mpic>; + tbi-handle = <&tbi1>; + phy-handle = <&phy1>; + phy-connection-type = "rgmii-id"; + + mdio@520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + }; + + enet2: ethernet@26000 { + #address-cells = <1>; + #size-cells = <1>; + cell-index = <2>; + device_type = "network"; + model = "eTSEC"; + compatible = "gianfar"; + reg = <0x26000 0x1000>; + ranges = <0x0 0x26000 0x1000>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupts = <31 2 32 2 33 2>; + interrupt-parent = <&mpic>; + tbi-handle = <&tbi2>; + phy-handle = <&phy2>; + phy-connection-type = "rgmii-id"; + + mdio@520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x520 0x20>; + + tbi2: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + }; + + enet3: ethernet@27000 { + #address-cells = <1>; + #size-cells = <1>; + cell-index = <3>; + device_type = "network"; + model = "eTSEC"; + compatible = "gianfar"; + reg = <0x27000 0x1000>; + ranges = <0x0 0x27000 0x1000>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupts = <37 2 38 2 39 2>; + interrupt-parent = <&mpic>; + tbi-handle = <&tbi3>; + phy-handle = <&phy3>; + phy-connection-type = "rgmii-id"; + + mdio@520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x520 0x20>; + + tbi3: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + }; + + serial0: serial@4500 { + cell-index = <0>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x4500 0x100>; + clock-frequency = <0>; + interrupts = <42 2>; + interrupt-parent = <&mpic>; + }; + + serial1: serial@4600 { + cell-index = <1>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x4600 0x100>; + clock-frequency = <0>; + interrupts = <42 2>; + interrupt-parent = <&mpic>; + }; + + global-utilities@e0000 { //global utilities block + compatible = "fsl,mpc8572-guts"; + reg = <0xe0000 0x1000>; + fsl,has-rstcr; + }; + + msi@41600 { + compatible = "fsl,mpc8572-msi", "fsl,mpic-msi"; + reg = <0x41600 0x80>; + msi-available-ranges = <0 0x100>; + interrupts = < + 0xe0 0 + 0xe1 0 + 0xe2 0 + 0xe3 0 + 0xe4 0 + 0xe5 0 + 0xe6 0 + 0xe7 0>; + interrupt-parent = <&mpic>; + }; + + crypto@30000 { + compatible = "fsl,sec3.0", "fsl,sec2.4", "fsl,sec2.2", + "fsl,sec2.1", "fsl,sec2.0"; + reg = <0x30000 0x10000>; + interrupts = <45 2 58 2>; + interrupt-parent = <&mpic>; + fsl,num-channels = <4>; + fsl,channel-fifo-len = <24>; + fsl,exec-units-mask = <0x9fe>; + fsl,descriptor-types-mask = <0x3ab0ebf>; + }; + + mpic: pic@40000 { + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <2>; + reg = <0x40000 0x40000>; + compatible = "chrp,open-pic"; + device_type = "open-pic"; + }; }; - board_pci0: pci0: pcie@ffe08000 { + pci0: pcie@ffe08000 { + compatible = "fsl,mpc8548-pcie"; + device_type = "pci"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; reg = <0 0xffe08000 0 0x1000>; + bus-range = <0 255>; ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x00010000>; + clock-frequency = <33333333>; + interrupt-parent = <&mpic>; + interrupts = <24 2>; + interrupt-map-mask = <0xff00 0x0 0x0 0x7>; + interrupt-map = < + /* IDSEL 0x11 func 0 - PCI slot 1 */ + 0x8800 0x0 0x0 0x1 &mpic 0x2 0x1 + 0x8800 0x0 0x0 0x2 &mpic 0x3 0x1 + 0x8800 0x0 0x0 0x3 &mpic 0x4 0x1 + 0x8800 0x0 0x0 0x4 &mpic 0x1 0x1 + + /* IDSEL 0x11 func 1 - PCI slot 1 */ + 0x8900 0x0 0x0 0x1 &mpic 0x2 0x1 + 0x8900 0x0 0x0 0x2 &mpic 0x3 0x1 + 0x8900 0x0 0x0 0x3 &mpic 0x4 0x1 + 0x8900 0x0 0x0 0x4 &mpic 0x1 0x1 + + /* IDSEL 0x11 func 2 - PCI slot 1 */ + 0x8a00 0x0 0x0 0x1 &mpic 0x2 0x1 + 0x8a00 0x0 0x0 0x2 &mpic 0x3 0x1 + 0x8a00 0x0 0x0 0x3 &mpic 0x4 0x1 + 0x8a00 0x0 0x0 0x4 &mpic 0x1 0x1 + + /* IDSEL 0x11 func 3 - PCI slot 1 */ + 0x8b00 0x0 0x0 0x1 &mpic 0x2 0x1 + 0x8b00 0x0 0x0 0x2 &mpic 0x3 0x1 + 0x8b00 0x0 0x0 0x3 &mpic 0x4 0x1 + 0x8b00 0x0 0x0 0x4 &mpic 0x1 0x1 + + /* IDSEL 0x11 func 4 - PCI slot 1 */ + 0x8c00 0x0 0x0 0x1 &mpic 0x2 0x1 + 0x8c00 0x0 0x0 0x2 &mpic 0x3 0x1 + 0x8c00 0x0 0x0 0x3 &mpic 0x4 0x1 + 0x8c00 0x0 0x0 0x4 &mpic 0x1 0x1 + + /* IDSEL 0x11 func 5 - PCI slot 1 */ + 0x8d00 0x0 0x0 0x1 &mpic 0x2 0x1 + 0x8d00 0x0 0x0 0x2 &mpic 0x3 0x1 + 0x8d00 0x0 0x0 0x3 &mpic 0x4 0x1 + 0x8d00 0x0 0x0 0x4 &mpic 0x1 0x1 + + /* IDSEL 0x11 func 6 - PCI slot 1 */ + 0x8e00 0x0 0x0 0x1 &mpic 0x2 0x1 + 0x8e00 0x0 0x0 0x2 &mpic 0x3 0x1 + 0x8e00 0x0 0x0 0x3 &mpic 0x4 0x1 + 0x8e00 0x0 0x0 0x4 &mpic 0x1 0x1 + + /* IDSEL 0x11 func 7 - PCI slot 1 */ + 0x8f00 0x0 0x0 0x1 &mpic 0x2 0x1 + 0x8f00 0x0 0x0 0x2 &mpic 0x3 0x1 + 0x8f00 0x0 0x0 0x3 &mpic 0x4 0x1 + 0x8f00 0x0 0x0 0x4 &mpic 0x1 0x1 + + /* IDSEL 0x12 func 0 - PCI slot 2 */ + 0x9000 0x0 0x0 0x1 &mpic 0x3 0x1 + 0x9000 0x0 0x0 0x2 &mpic 0x4 0x1 + 0x9000 0x0 0x0 0x3 &mpic 0x1 0x1 + 0x9000 0x0 0x0 0x4 &mpic 0x2 0x1 + + /* IDSEL 0x12 func 1 - PCI slot 2 */ + 0x9100 0x0 0x0 0x1 &mpic 0x3 0x1 + 0x9100 0x0 0x0 0x2 &mpic 0x4 0x1 + 0x9100 0x0 0x0 0x3 &mpic 0x1 0x1 + 0x9100 0x0 0x0 0x4 &mpic 0x2 0x1 + + /* IDSEL 0x12 func 2 - PCI slot 2 */ + 0x9200 0x0 0x0 0x1 &mpic 0x3 0x1 + 0x9200 0x0 0x0 0x2 &mpic 0x4 0x1 + 0x9200 0x0 0x0 0x3 &mpic 0x1 0x1 + 0x9200 0x0 0x0 0x4 &mpic 0x2 0x1 + + /* IDSEL 0x12 func 3 - PCI slot 2 */ + 0x9300 0x0 0x0 0x1 &mpic 0x3 0x1 + 0x9300 0x0 0x0 0x2 &mpic 0x4 0x1 + 0x9300 0x0 0x0 0x3 &mpic 0x1 0x1 + 0x9300 0x0 0x0 0x4 &mpic 0x2 0x1 + + /* IDSEL 0x12 func 4 - PCI slot 2 */ + 0x9400 0x0 0x0 0x1 &mpic 0x3 0x1 + 0x9400 0x0 0x0 0x2 &mpic 0x4 0x1 + 0x9400 0x0 0x0 0x3 &mpic 0x1 0x1 + 0x9400 0x0 0x0 0x4 &mpic 0x2 0x1 + + /* IDSEL 0x12 func 5 - PCI slot 2 */ + 0x9500 0x0 0x0 0x1 &mpic 0x3 0x1 + 0x9500 0x0 0x0 0x2 &mpic 0x4 0x1 + 0x9500 0x0 0x0 0x3 &mpic 0x1 0x1 + 0x9500 0x0 0x0 0x4 &mpic 0x2 0x1 + + /* IDSEL 0x12 func 6 - PCI slot 2 */ + 0x9600 0x0 0x0 0x1 &mpic 0x3 0x1 + 0x9600 0x0 0x0 0x2 &mpic 0x4 0x1 + 0x9600 0x0 0x0 0x3 &mpic 0x1 0x1 + 0x9600 0x0 0x0 0x4 &mpic 0x2 0x1 + + /* IDSEL 0x12 func 7 - PCI slot 2 */ + 0x9700 0x0 0x0 0x1 &mpic 0x3 0x1 + 0x9700 0x0 0x0 0x2 &mpic 0x4 0x1 + 0x9700 0x0 0x0 0x3 &mpic 0x1 0x1 + 0x9700 0x0 0x0 0x4 &mpic 0x2 0x1 + + // IDSEL 0x1c USB + 0xe000 0x0 0x0 0x1 &i8259 0xc 0x2 + 0xe100 0x0 0x0 0x2 &i8259 0x9 0x2 + 0xe200 0x0 0x0 0x3 &i8259 0xa 0x2 + 0xe300 0x0 0x0 0x4 &i8259 0xb 0x2 + + // IDSEL 0x1d Audio + 0xe800 0x0 0x0 0x1 &i8259 0x6 0x2 + + // IDSEL 0x1e Legacy + 0xf000 0x0 0x0 0x1 &i8259 0x7 0x2 + 0xf100 0x0 0x0 0x1 &i8259 0x7 0x2 + + // IDSEL 0x1f IDE/SATA + 0xf800 0x0 0x0 0x1 &i8259 0xe 0x2 + 0xf900 0x0 0x0 0x1 &i8259 0x5 0x2 + + >; + pcie@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; ranges = <0x2000000 0x0 0x80000000 0x2000000 0x0 0x80000000 0x0 0x20000000 @@ -47,14 +680,99 @@ 0x1000000 0x0 0x0 0x1000000 0x0 0x0 0x0 0x10000>; + uli1575@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + ranges = <0x2000000 0x0 0x80000000 + 0x2000000 0x0 0x80000000 + 0x0 0x20000000 + + 0x1000000 0x0 0x0 + 0x1000000 0x0 0x0 + 0x0 0x10000>; + isa@1e { + device_type = "isa"; + #interrupt-cells = <2>; + #size-cells = <1>; + #address-cells = <2>; + reg = <0xf000 0x0 0x0 0x0 0x0>; + ranges = <0x1 0x0 0x1000000 0x0 0x0 + 0x1000>; + interrupt-parent = <&i8259>; + + i8259: interrupt-controller@20 { + reg = <0x1 0x20 0x2 + 0x1 0xa0 0x2 + 0x1 0x4d0 0x2>; + interrupt-controller; + device_type = "interrupt-controller"; + #address-cells = <0>; + #interrupt-cells = <2>; + compatible = "chrp,iic"; + interrupts = <9 2>; + interrupt-parent = <&mpic>; + }; + + i8042@60 { + #size-cells = <0>; + #address-cells = <1>; + reg = <0x1 0x60 0x1 0x1 0x64 0x1>; + interrupts = <1 3 12 3>; + interrupt-parent = + <&i8259>; + + keyboard@0 { + reg = <0x0>; + compatible = "pnpPNP,303"; + }; + + mouse@1 { + reg = <0x1>; + compatible = "pnpPNP,f03"; + }; + }; + + rtc@70 { + compatible = "pnpPNP,b00"; + reg = <0x1 0x70 0x2>; + }; + + gpio@400 { + reg = <0x1 0x400 0x80>; + }; + }; + }; }; + }; pci1: pcie@ffe09000 { + compatible = "fsl,mpc8548-pcie"; + device_type = "pci"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; reg = <0 0xffe09000 0 0x1000>; + bus-range = <0 255>; ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x00010000>; + clock-frequency = <33333333>; + interrupt-parent = <&mpic>; + interrupts = <25 2>; + interrupt-map-mask = <0xf800 0x0 0x0 0x7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0x0 0x0 0x1 &mpic 0x4 0x1 + 0000 0x0 0x0 0x2 &mpic 0x5 0x1 + 0000 0x0 0x0 0x3 &mpic 0x6 0x1 + 0000 0x0 0x0 0x4 &mpic 0x7 0x1 + >; pcie@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; ranges = <0x2000000 0x0 0xa0000000 0x2000000 0x0 0xa0000000 0x0 0x20000000 @@ -66,10 +784,31 @@ }; pci2: pcie@ffe0a000 { + compatible = "fsl,mpc8548-pcie"; + device_type = "pci"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; reg = <0 0xffe0a000 0 0x1000>; + bus-range = <0 255>; ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000 0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x00010000>; + clock-frequency = <33333333>; + interrupt-parent = <&mpic>; + interrupts = <26 2>; + interrupt-map-mask = <0xf800 0x0 0x0 0x7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0x0 0x0 0x1 &mpic 0x0 0x1 + 0000 0x0 0x0 0x2 &mpic 0x1 0x1 + 0000 0x0 0x0 0x3 &mpic 0x2 0x1 + 0000 0x0 0x0 0x4 &mpic 0x3 0x1 + >; pcie@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; ranges = <0x2000000 0x0 0xc0000000 0x2000000 0x0 0xc0000000 0x0 0x20000000 @@ -80,11 +819,3 @@ }; }; }; - -/* - * mpc8572ds.dtsi must be last to ensure board_pci0 overrides pci0 settings - * for interrupt-map & interrupt-map-mask - */ - -/include/ "fsl/mpc8572si-post.dtsi" -/include/ "mpc8572ds.dtsi" diff --git a/trunk/arch/powerpc/boot/dts/mpc8572ds.dtsi b/trunk/arch/powerpc/boot/dts/mpc8572ds.dtsi deleted file mode 100644 index c3d4fac0532a..000000000000 --- a/trunk/arch/powerpc/boot/dts/mpc8572ds.dtsi +++ /dev/null @@ -1,397 +0,0 @@ -/* - * MPC8572DS Device Tree Source stub (no addresses or top-level ranges) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -&board_lbc { - nor@0,0 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "cfi-flash"; - reg = <0x0 0x0 0x8000000>; - bank-width = <2>; - device-width = <1>; - - ramdisk@0 { - reg = <0x0 0x03000000>; - read-only; - }; - - diagnostic@3000000 { - reg = <0x03000000 0x00e00000>; - read-only; - }; - - dink@3e00000 { - reg = <0x03e00000 0x00200000>; - read-only; - }; - - kernel@4000000 { - reg = <0x04000000 0x00400000>; - read-only; - }; - - jffs2@4400000 { - reg = <0x04400000 0x03b00000>; - }; - - dtb@7f00000 { - reg = <0x07f00000 0x00080000>; - read-only; - }; - - u-boot@7f80000 { - reg = <0x07f80000 0x00080000>; - read-only; - }; - }; - - nand@2,0 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "fsl,mpc8572-fcm-nand", - "fsl,elbc-fcm-nand"; - reg = <0x2 0x0 0x40000>; - - u-boot@0 { - reg = <0x0 0x02000000>; - read-only; - }; - - jffs2@2000000 { - reg = <0x02000000 0x10000000>; - }; - - ramdisk@12000000 { - reg = <0x12000000 0x08000000>; - read-only; - }; - - kernel@1a000000 { - reg = <0x1a000000 0x04000000>; - }; - - dtb@1e000000 { - reg = <0x1e000000 0x01000000>; - read-only; - }; - - empty@1f000000 { - reg = <0x1f000000 0x21000000>; - }; - }; - - nand@4,0 { - compatible = "fsl,mpc8572-fcm-nand", - "fsl,elbc-fcm-nand"; - reg = <0x4 0x0 0x40000>; - }; - - nand@5,0 { - compatible = "fsl,mpc8572-fcm-nand", - "fsl,elbc-fcm-nand"; - reg = <0x5 0x0 0x40000>; - }; - - nand@6,0 { - compatible = "fsl,mpc8572-fcm-nand", - "fsl,elbc-fcm-nand"; - reg = <0x6 0x0 0x40000>; - }; -}; - -&board_soc { - enet0: ethernet@24000 { - tbi-handle = <&tbi0>; - phy-handle = <&phy0>; - phy-connection-type = "rgmii-id"; - }; - - mdio@24520 { - phy0: ethernet-phy@0 { - interrupts = <10 1 0 0>; - reg = <0x0>; - }; - phy1: ethernet-phy@1 { - interrupts = <10 1 0 0>; - reg = <0x1>; - }; - phy2: ethernet-phy@2 { - interrupts = <10 1 0 0>; - reg = <0x2>; - }; - phy3: ethernet-phy@3 { - interrupts = <10 1 0 0>; - reg = <0x3>; - }; - - tbi0: tbi-phy@11 { - reg = <0x11>; - device_type = "tbi-phy"; - }; - }; - - ptp_clock@24e00 { - fsl,tclk-period = <5>; - fsl,tmr-prsc = <200>; - fsl,tmr-add = <0xAAAAAAAB>; - fsl,tmr-fiper1 = <0x3B9AC9FB>; - fsl,tmr-fiper2 = <0x3B9AC9FB>; - fsl,max-adj = <499999999>; - }; - - enet1: ethernet@25000 { - tbi-handle = <&tbi1>; - phy-handle = <&phy1>; - phy-connection-type = "rgmii-id"; - - }; - - mdio@25520 { - tbi1: tbi-phy@11 { - reg = <0x11>; - device_type = "tbi-phy"; - }; - }; - - enet2: ethernet@26000 { - tbi-handle = <&tbi2>; - phy-handle = <&phy2>; - phy-connection-type = "rgmii-id"; - - }; - mdio@26520 { - tbi2: tbi-phy@11 { - reg = <0x11>; - device_type = "tbi-phy"; - }; - }; - - enet3: ethernet@27000 { - tbi-handle = <&tbi3>; - phy-handle = <&phy3>; - phy-connection-type = "rgmii-id"; - }; - - mdio@27520 { - tbi3: tbi-phy@11 { - reg = <0x11>; - device_type = "tbi-phy"; - }; - }; -}; - -&board_pci0 { - pcie@0 { - interrupt-map-mask = <0xff00 0x0 0x0 0x7>; - interrupt-map = < - /* IDSEL 0x11 func 0 - PCI slot 1 */ - 0x8800 0x0 0x0 0x1 &mpic 0x2 0x1 0 0 - 0x8800 0x0 0x0 0x2 &mpic 0x3 0x1 0 0 - 0x8800 0x0 0x0 0x3 &mpic 0x4 0x1 0 0 - 0x8800 0x0 0x0 0x4 &mpic 0x1 0x1 0 0 - - /* IDSEL 0x11 func 1 - PCI slot 1 */ - 0x8900 0x0 0x0 0x1 &mpic 0x2 0x1 0 0 - 0x8900 0x0 0x0 0x2 &mpic 0x3 0x1 0 0 - 0x8900 0x0 0x0 0x3 &mpic 0x4 0x1 0 0 - 0x8900 0x0 0x0 0x4 &mpic 0x1 0x1 0 0 - - /* IDSEL 0x11 func 2 - PCI slot 1 */ - 0x8a00 0x0 0x0 0x1 &mpic 0x2 0x1 0 0 - 0x8a00 0x0 0x0 0x2 &mpic 0x3 0x1 0 0 - 0x8a00 0x0 0x0 0x3 &mpic 0x4 0x1 0 0 - 0x8a00 0x0 0x0 0x4 &mpic 0x1 0x1 0 0 - - /* IDSEL 0x11 func 3 - PCI slot 1 */ - 0x8b00 0x0 0x0 0x1 &mpic 0x2 0x1 0 0 - 0x8b00 0x0 0x0 0x2 &mpic 0x3 0x1 0 0 - 0x8b00 0x0 0x0 0x3 &mpic 0x4 0x1 0 0 - 0x8b00 0x0 0x0 0x4 &mpic 0x1 0x1 0 0 - - /* IDSEL 0x11 func 4 - PCI slot 1 */ - 0x8c00 0x0 0x0 0x1 &mpic 0x2 0x1 0 0 - 0x8c00 0x0 0x0 0x2 &mpic 0x3 0x1 0 0 - 0x8c00 0x0 0x0 0x3 &mpic 0x4 0x1 0 0 - 0x8c00 0x0 0x0 0x4 &mpic 0x1 0x1 0 0 - - /* IDSEL 0x11 func 5 - PCI slot 1 */ - 0x8d00 0x0 0x0 0x1 &mpic 0x2 0x1 0 0 - 0x8d00 0x0 0x0 0x2 &mpic 0x3 0x1 0 0 - 0x8d00 0x0 0x0 0x3 &mpic 0x4 0x1 0 0 - 0x8d00 0x0 0x0 0x4 &mpic 0x1 0x1 0 0 - - /* IDSEL 0x11 func 6 - PCI slot 1 */ - 0x8e00 0x0 0x0 0x1 &mpic 0x2 0x1 0 0 - 0x8e00 0x0 0x0 0x2 &mpic 0x3 0x1 0 0 - 0x8e00 0x0 0x0 0x3 &mpic 0x4 0x1 0 0 - 0x8e00 0x0 0x0 0x4 &mpic 0x1 0x1 0 0 - - /* IDSEL 0x11 func 7 - PCI slot 1 */ - 0x8f00 0x0 0x0 0x1 &mpic 0x2 0x1 0 0 - 0x8f00 0x0 0x0 0x2 &mpic 0x3 0x1 0 0 - 0x8f00 0x0 0x0 0x3 &mpic 0x4 0x1 0 0 - 0x8f00 0x0 0x0 0x4 &mpic 0x1 0x1 0 0 - - /* IDSEL 0x12 func 0 - PCI slot 2 */ - 0x9000 0x0 0x0 0x1 &mpic 0x3 0x1 0 0 - 0x9000 0x0 0x0 0x2 &mpic 0x4 0x1 0 0 - 0x9000 0x0 0x0 0x3 &mpic 0x1 0x1 0 0 - 0x9000 0x0 0x0 0x4 &mpic 0x2 0x1 0 0 - - /* IDSEL 0x12 func 1 - PCI slot 2 */ - 0x9100 0x0 0x0 0x1 &mpic 0x3 0x1 0 0 - 0x9100 0x0 0x0 0x2 &mpic 0x4 0x1 0 0 - 0x9100 0x0 0x0 0x3 &mpic 0x1 0x1 0 0 - 0x9100 0x0 0x0 0x4 &mpic 0x2 0x1 0 0 - - /* IDSEL 0x12 func 2 - PCI slot 2 */ - 0x9200 0x0 0x0 0x1 &mpic 0x3 0x1 0 0 - 0x9200 0x0 0x0 0x2 &mpic 0x4 0x1 0 0 - 0x9200 0x0 0x0 0x3 &mpic 0x1 0x1 0 0 - 0x9200 0x0 0x0 0x4 &mpic 0x2 0x1 0 0 - - /* IDSEL 0x12 func 3 - PCI slot 2 */ - 0x9300 0x0 0x0 0x1 &mpic 0x3 0x1 0 0 - 0x9300 0x0 0x0 0x2 &mpic 0x4 0x1 0 0 - 0x9300 0x0 0x0 0x3 &mpic 0x1 0x1 0 0 - 0x9300 0x0 0x0 0x4 &mpic 0x2 0x1 0 0 - - /* IDSEL 0x12 func 4 - PCI slot 2 */ - 0x9400 0x0 0x0 0x1 &mpic 0x3 0x1 0 0 - 0x9400 0x0 0x0 0x2 &mpic 0x4 0x1 0 0 - 0x9400 0x0 0x0 0x3 &mpic 0x1 0x1 0 0 - 0x9400 0x0 0x0 0x4 &mpic 0x2 0x1 0 0 - - /* IDSEL 0x12 func 5 - PCI slot 2 */ - 0x9500 0x0 0x0 0x1 &mpic 0x3 0x1 0 0 - 0x9500 0x0 0x0 0x2 &mpic 0x4 0x1 0 0 - 0x9500 0x0 0x0 0x3 &mpic 0x1 0x1 0 0 - 0x9500 0x0 0x0 0x4 &mpic 0x2 0x1 0 0 - - /* IDSEL 0x12 func 6 - PCI slot 2 */ - 0x9600 0x0 0x0 0x1 &mpic 0x3 0x1 0 0 - 0x9600 0x0 0x0 0x2 &mpic 0x4 0x1 0 0 - 0x9600 0x0 0x0 0x3 &mpic 0x1 0x1 0 0 - 0x9600 0x0 0x0 0x4 &mpic 0x2 0x1 0 0 - - /* IDSEL 0x12 func 7 - PCI slot 2 */ - 0x9700 0x0 0x0 0x1 &mpic 0x3 0x1 0 0 - 0x9700 0x0 0x0 0x2 &mpic 0x4 0x1 0 0 - 0x9700 0x0 0x0 0x3 &mpic 0x1 0x1 0 0 - 0x9700 0x0 0x0 0x4 &mpic 0x2 0x1 0 0 - - // IDSEL 0x1c USB - 0xe000 0x0 0x0 0x1 &i8259 0xc 0x2 - 0xe100 0x0 0x0 0x2 &i8259 0x9 0x2 - 0xe200 0x0 0x0 0x3 &i8259 0xa 0x2 - 0xe300 0x0 0x0 0x4 &i8259 0xb 0x2 - - // IDSEL 0x1d Audio - 0xe800 0x0 0x0 0x1 &i8259 0x6 0x2 - - // IDSEL 0x1e Legacy - 0xf000 0x0 0x0 0x1 &i8259 0x7 0x2 - 0xf100 0x0 0x0 0x1 &i8259 0x7 0x2 - - // IDSEL 0x1f IDE/SATA - 0xf800 0x0 0x0 0x1 &i8259 0xe 0x2 - 0xf900 0x0 0x0 0x1 &i8259 0x5 0x2 - >; - - - uli1575@0 { - reg = <0x0 0x0 0x0 0x0 0x0>; - #size-cells = <2>; - #address-cells = <3>; - ranges = <0x2000000 0x0 0x80000000 - 0x2000000 0x0 0x80000000 - 0x0 0x20000000 - - 0x1000000 0x0 0x0 - 0x1000000 0x0 0x0 - 0x0 0x10000>; - isa@1e { - device_type = "isa"; - #interrupt-cells = <2>; - #size-cells = <1>; - #address-cells = <2>; - reg = <0xf000 0x0 0x0 0x0 0x0>; - ranges = <0x1 0x0 0x1000000 0x0 0x0 - 0x1000>; - interrupt-parent = <&i8259>; - - i8259: interrupt-controller@20 { - reg = <0x1 0x20 0x2 - 0x1 0xa0 0x2 - 0x1 0x4d0 0x2>; - interrupt-controller; - device_type = "interrupt-controller"; - #address-cells = <0>; - #interrupt-cells = <2>; - compatible = "chrp,iic"; - interrupts = <9 2 0 0>; - interrupt-parent = <&mpic>; - }; - - i8042@60 { - #size-cells = <0>; - #address-cells = <1>; - reg = <0x1 0x60 0x1 0x1 0x64 0x1>; - interrupts = <1 3 12 3>; - interrupt-parent = - <&i8259>; - - keyboard@0 { - reg = <0x0>; - compatible = "pnpPNP,303"; - }; - - mouse@1 { - reg = <0x1>; - compatible = "pnpPNP,f03"; - }; - }; - - rtc@70 { - compatible = "pnpPNP,b00"; - reg = <0x1 0x70 0x2>; - }; - - gpio@400 { - reg = <0x1 0x400 0x80>; - }; - }; - }; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/mpc8572ds_36b.dts b/trunk/arch/powerpc/boot/dts/mpc8572ds_36b.dts index 6c3d0b305e1b..f6365db3b97d 100644 --- a/trunk/arch/powerpc/boot/dts/mpc8572ds_36b.dts +++ b/trunk/arch/powerpc/boot/dts/mpc8572ds_36b.dts @@ -1,5 +1,5 @@ /* - * MPC8572DS Device Tree Source (36-bit address map) + * MPC8572 DS Device Tree Source * * Copyright 2007-2009 Freescale Semiconductor Inc. * @@ -9,18 +9,67 @@ * option) any later version. */ -/include/ "fsl/mpc8572si-pre.dtsi" - +/dts-v1/; / { model = "fsl,MPC8572DS"; compatible = "fsl,MPC8572DS"; + #address-cells = <2>; + #size-cells = <2>; + + aliases { + ethernet0 = &enet0; + ethernet1 = &enet1; + ethernet2 = &enet2; + ethernet3 = &enet3; + serial0 = &serial0; + serial1 = &serial1; + pci0 = &pci0; + pci1 = &pci1; + pci2 = &pci2; + }; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + PowerPC,8572@0 { + device_type = "cpu"; + reg = <0x0>; + d-cache-line-size = <32>; // 32 bytes + i-cache-line-size = <32>; // 32 bytes + d-cache-size = <0x8000>; // L1, 32K + i-cache-size = <0x8000>; // L1, 32K + timebase-frequency = <0>; + bus-frequency = <0>; + clock-frequency = <0>; + next-level-cache = <&L2>; + }; + + PowerPC,8572@1 { + device_type = "cpu"; + reg = <0x1>; + d-cache-line-size = <32>; // 32 bytes + i-cache-line-size = <32>; // 32 bytes + d-cache-size = <0x8000>; // L1, 32K + i-cache-size = <0x8000>; // L1, 32K + timebase-frequency = <0>; + bus-frequency = <0>; + clock-frequency = <0>; + next-level-cache = <&L2>; + }; + }; memory { device_type = "memory"; }; - board_lbc: lbc: localbus@fffe05000 { + localbus@fffe05000 { + #address-cells = <2>; + #size-cells = <1>; + compatible = "fsl,mpc8572-elbc", "fsl,elbc", "simple-bus"; reg = <0xf 0xffe05000 0 0x1000>; + interrupts = <19 2>; + interrupt-parent = <&mpic>; ranges = <0x0 0x0 0xf 0xe8000000 0x08000000 0x1 0x0 0xf 0xe0000000 0x08000000 @@ -29,17 +78,588 @@ 0x4 0x0 0xf 0xffa40000 0x00040000 0x5 0x0 0xf 0xffa80000 0x00040000 0x6 0x0 0xf 0xffac0000 0x00040000>; + + nor@0,0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "cfi-flash"; + reg = <0x0 0x0 0x8000000>; + bank-width = <2>; + device-width = <1>; + + ramdisk@0 { + reg = <0x0 0x03000000>; + read-only; + }; + + diagnostic@3000000 { + reg = <0x03000000 0x00e00000>; + read-only; + }; + + dink@3e00000 { + reg = <0x03e00000 0x00200000>; + read-only; + }; + + kernel@4000000 { + reg = <0x04000000 0x00400000>; + read-only; + }; + + jffs2@4400000 { + reg = <0x04400000 0x03b00000>; + }; + + dtb@7f00000 { + reg = <0x07f00000 0x00080000>; + read-only; + }; + + u-boot@7f80000 { + reg = <0x07f80000 0x00080000>; + read-only; + }; + }; + + nand@2,0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,mpc8572-fcm-nand", + "fsl,elbc-fcm-nand"; + reg = <0x2 0x0 0x40000>; + + u-boot@0 { + reg = <0x0 0x02000000>; + read-only; + }; + + jffs2@2000000 { + reg = <0x02000000 0x10000000>; + }; + + ramdisk@12000000 { + reg = <0x12000000 0x08000000>; + read-only; + }; + + kernel@1a000000 { + reg = <0x1a000000 0x04000000>; + }; + + dtb@1e000000 { + reg = <0x1e000000 0x01000000>; + read-only; + }; + + empty@1f000000 { + reg = <0x1f000000 0x21000000>; + }; + }; + + nand@4,0 { + compatible = "fsl,mpc8572-fcm-nand", + "fsl,elbc-fcm-nand"; + reg = <0x4 0x0 0x40000>; + }; + + nand@5,0 { + compatible = "fsl,mpc8572-fcm-nand", + "fsl,elbc-fcm-nand"; + reg = <0x5 0x0 0x40000>; + }; + + nand@6,0 { + compatible = "fsl,mpc8572-fcm-nand", + "fsl,elbc-fcm-nand"; + reg = <0x6 0x0 0x40000>; + }; }; - board_soc: soc: soc8572@fffe00000 { + soc8572@fffe00000 { + #address-cells = <1>; + #size-cells = <1>; + device_type = "soc"; + compatible = "simple-bus"; ranges = <0x0 0xf 0xffe00000 0x100000>; + bus-frequency = <0>; // Filled out by uboot. + + ecm-law@0 { + compatible = "fsl,ecm-law"; + reg = <0x0 0x1000>; + fsl,num-laws = <12>; + }; + + ecm@1000 { + compatible = "fsl,mpc8572-ecm", "fsl,ecm"; + reg = <0x1000 0x1000>; + interrupts = <17 2>; + interrupt-parent = <&mpic>; + }; + + memory-controller@2000 { + compatible = "fsl,mpc8572-memory-controller"; + reg = <0x2000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <18 2>; + }; + + memory-controller@6000 { + compatible = "fsl,mpc8572-memory-controller"; + reg = <0x6000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <18 2>; + }; + + L2: l2-cache-controller@20000 { + compatible = "fsl,mpc8572-l2-cache-controller"; + reg = <0x20000 0x1000>; + cache-line-size = <32>; // 32 bytes + cache-size = <0x100000>; // L2, 1M + interrupt-parent = <&mpic>; + interrupts = <16 2>; + }; + + i2c@3000 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <0>; + compatible = "fsl-i2c"; + reg = <0x3000 0x100>; + interrupts = <43 2>; + interrupt-parent = <&mpic>; + dfsrr; + }; + + i2c@3100 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <1>; + compatible = "fsl-i2c"; + reg = <0x3100 0x100>; + interrupts = <43 2>; + interrupt-parent = <&mpic>; + dfsrr; + }; + + dma@c300 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,mpc8572-dma", "fsl,eloplus-dma"; + reg = <0xc300 0x4>; + ranges = <0x0 0xc100 0x200>; + cell-index = <1>; + dma-channel@0 { + compatible = "fsl,mpc8572-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x0 0x80>; + cell-index = <0>; + interrupt-parent = <&mpic>; + interrupts = <76 2>; + }; + dma-channel@80 { + compatible = "fsl,mpc8572-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x80 0x80>; + cell-index = <1>; + interrupt-parent = <&mpic>; + interrupts = <77 2>; + }; + dma-channel@100 { + compatible = "fsl,mpc8572-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x100 0x80>; + cell-index = <2>; + interrupt-parent = <&mpic>; + interrupts = <78 2>; + }; + dma-channel@180 { + compatible = "fsl,mpc8572-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x180 0x80>; + cell-index = <3>; + interrupt-parent = <&mpic>; + interrupts = <79 2>; + }; + }; + + dma@21300 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,mpc8572-dma", "fsl,eloplus-dma"; + reg = <0x21300 0x4>; + ranges = <0x0 0x21100 0x200>; + cell-index = <0>; + dma-channel@0 { + compatible = "fsl,mpc8572-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x0 0x80>; + cell-index = <0>; + interrupt-parent = <&mpic>; + interrupts = <20 2>; + }; + dma-channel@80 { + compatible = "fsl,mpc8572-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x80 0x80>; + cell-index = <1>; + interrupt-parent = <&mpic>; + interrupts = <21 2>; + }; + dma-channel@100 { + compatible = "fsl,mpc8572-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x100 0x80>; + cell-index = <2>; + interrupt-parent = <&mpic>; + interrupts = <22 2>; + }; + dma-channel@180 { + compatible = "fsl,mpc8572-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x180 0x80>; + cell-index = <3>; + interrupt-parent = <&mpic>; + interrupts = <23 2>; + }; + }; + + enet0: ethernet@24000 { + #address-cells = <1>; + #size-cells = <1>; + cell-index = <0>; + device_type = "network"; + model = "eTSEC"; + compatible = "gianfar"; + reg = <0x24000 0x1000>; + ranges = <0x0 0x24000 0x1000>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupts = <29 2 30 2 34 2>; + interrupt-parent = <&mpic>; + tbi-handle = <&tbi0>; + phy-handle = <&phy0>; + phy-connection-type = "rgmii-id"; + + mdio@520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-mdio"; + reg = <0x520 0x20>; + + phy0: ethernet-phy@0 { + interrupt-parent = <&mpic>; + interrupts = <10 1>; + reg = <0x0>; + }; + phy1: ethernet-phy@1 { + interrupt-parent = <&mpic>; + interrupts = <10 1>; + reg = <0x1>; + }; + phy2: ethernet-phy@2 { + interrupt-parent = <&mpic>; + interrupts = <10 1>; + reg = <0x2>; + }; + phy3: ethernet-phy@3 { + interrupt-parent = <&mpic>; + interrupts = <10 1>; + reg = <0x3>; + }; + + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + }; + + enet1: ethernet@25000 { + #address-cells = <1>; + #size-cells = <1>; + cell-index = <1>; + device_type = "network"; + model = "eTSEC"; + compatible = "gianfar"; + reg = <0x25000 0x1000>; + ranges = <0x0 0x25000 0x1000>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupts = <35 2 36 2 40 2>; + interrupt-parent = <&mpic>; + tbi-handle = <&tbi1>; + phy-handle = <&phy1>; + phy-connection-type = "rgmii-id"; + + mdio@520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + }; + + enet2: ethernet@26000 { + #address-cells = <1>; + #size-cells = <1>; + cell-index = <2>; + device_type = "network"; + model = "eTSEC"; + compatible = "gianfar"; + reg = <0x26000 0x1000>; + ranges = <0x0 0x26000 0x1000>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupts = <31 2 32 2 33 2>; + interrupt-parent = <&mpic>; + tbi-handle = <&tbi2>; + phy-handle = <&phy2>; + phy-connection-type = "rgmii-id"; + + mdio@520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x520 0x20>; + + tbi2: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + }; + + enet3: ethernet@27000 { + #address-cells = <1>; + #size-cells = <1>; + cell-index = <3>; + device_type = "network"; + model = "eTSEC"; + compatible = "gianfar"; + reg = <0x27000 0x1000>; + ranges = <0x0 0x27000 0x1000>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupts = <37 2 38 2 39 2>; + interrupt-parent = <&mpic>; + tbi-handle = <&tbi3>; + phy-handle = <&phy3>; + phy-connection-type = "rgmii-id"; + + mdio@520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x520 0x20>; + + tbi3: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + }; + + serial0: serial@4500 { + cell-index = <0>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x4500 0x100>; + clock-frequency = <0>; + interrupts = <42 2>; + interrupt-parent = <&mpic>; + }; + + serial1: serial@4600 { + cell-index = <1>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x4600 0x100>; + clock-frequency = <0>; + interrupts = <42 2>; + interrupt-parent = <&mpic>; + }; + + global-utilities@e0000 { //global utilities block + compatible = "fsl,mpc8572-guts"; + reg = <0xe0000 0x1000>; + fsl,has-rstcr; + }; + + msi@41600 { + compatible = "fsl,mpc8572-msi", "fsl,mpic-msi"; + reg = <0x41600 0x80>; + msi-available-ranges = <0 0x100>; + interrupts = < + 0xe0 0 + 0xe1 0 + 0xe2 0 + 0xe3 0 + 0xe4 0 + 0xe5 0 + 0xe6 0 + 0xe7 0>; + interrupt-parent = <&mpic>; + }; + + crypto@30000 { + compatible = "fsl,sec3.0", "fsl,sec2.4", "fsl,sec2.2", + "fsl,sec2.1", "fsl,sec2.0"; + reg = <0x30000 0x10000>; + interrupts = <45 2 58 2>; + interrupt-parent = <&mpic>; + fsl,num-channels = <4>; + fsl,channel-fifo-len = <24>; + fsl,exec-units-mask = <0x9fe>; + fsl,descriptor-types-mask = <0x3ab0ebf>; + }; + + mpic: pic@40000 { + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <2>; + reg = <0x40000 0x40000>; + compatible = "chrp,open-pic"; + device_type = "open-pic"; + }; }; - board_pci0: pci0: pcie@fffe08000 { + pci0: pcie@fffe08000 { + compatible = "fsl,mpc8548-pcie"; + device_type = "pci"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; reg = <0xf 0xffe08000 0 0x1000>; + bus-range = <0 255>; ranges = <0x2000000 0x0 0xe0000000 0xc 0x00000000 0x0 0x20000000 0x1000000 0x0 0x00000000 0xf 0xffc00000 0x0 0x00010000>; + clock-frequency = <33333333>; + interrupt-parent = <&mpic>; + interrupts = <24 2>; + interrupt-map-mask = <0xff00 0x0 0x0 0x7>; + interrupt-map = < + /* IDSEL 0x11 func 0 - PCI slot 1 */ + 0x8800 0x0 0x0 0x1 &mpic 0x2 0x1 + 0x8800 0x0 0x0 0x2 &mpic 0x3 0x1 + 0x8800 0x0 0x0 0x3 &mpic 0x4 0x1 + 0x8800 0x0 0x0 0x4 &mpic 0x1 0x1 + + /* IDSEL 0x11 func 1 - PCI slot 1 */ + 0x8900 0x0 0x0 0x1 &mpic 0x2 0x1 + 0x8900 0x0 0x0 0x2 &mpic 0x3 0x1 + 0x8900 0x0 0x0 0x3 &mpic 0x4 0x1 + 0x8900 0x0 0x0 0x4 &mpic 0x1 0x1 + + /* IDSEL 0x11 func 2 - PCI slot 1 */ + 0x8a00 0x0 0x0 0x1 &mpic 0x2 0x1 + 0x8a00 0x0 0x0 0x2 &mpic 0x3 0x1 + 0x8a00 0x0 0x0 0x3 &mpic 0x4 0x1 + 0x8a00 0x0 0x0 0x4 &mpic 0x1 0x1 + + /* IDSEL 0x11 func 3 - PCI slot 1 */ + 0x8b00 0x0 0x0 0x1 &mpic 0x2 0x1 + 0x8b00 0x0 0x0 0x2 &mpic 0x3 0x1 + 0x8b00 0x0 0x0 0x3 &mpic 0x4 0x1 + 0x8b00 0x0 0x0 0x4 &mpic 0x1 0x1 + + /* IDSEL 0x11 func 4 - PCI slot 1 */ + 0x8c00 0x0 0x0 0x1 &mpic 0x2 0x1 + 0x8c00 0x0 0x0 0x2 &mpic 0x3 0x1 + 0x8c00 0x0 0x0 0x3 &mpic 0x4 0x1 + 0x8c00 0x0 0x0 0x4 &mpic 0x1 0x1 + + /* IDSEL 0x11 func 5 - PCI slot 1 */ + 0x8d00 0x0 0x0 0x1 &mpic 0x2 0x1 + 0x8d00 0x0 0x0 0x2 &mpic 0x3 0x1 + 0x8d00 0x0 0x0 0x3 &mpic 0x4 0x1 + 0x8d00 0x0 0x0 0x4 &mpic 0x1 0x1 + + /* IDSEL 0x11 func 6 - PCI slot 1 */ + 0x8e00 0x0 0x0 0x1 &mpic 0x2 0x1 + 0x8e00 0x0 0x0 0x2 &mpic 0x3 0x1 + 0x8e00 0x0 0x0 0x3 &mpic 0x4 0x1 + 0x8e00 0x0 0x0 0x4 &mpic 0x1 0x1 + + /* IDSEL 0x11 func 7 - PCI slot 1 */ + 0x8f00 0x0 0x0 0x1 &mpic 0x2 0x1 + 0x8f00 0x0 0x0 0x2 &mpic 0x3 0x1 + 0x8f00 0x0 0x0 0x3 &mpic 0x4 0x1 + 0x8f00 0x0 0x0 0x4 &mpic 0x1 0x1 + + /* IDSEL 0x12 func 0 - PCI slot 2 */ + 0x9000 0x0 0x0 0x1 &mpic 0x3 0x1 + 0x9000 0x0 0x0 0x2 &mpic 0x4 0x1 + 0x9000 0x0 0x0 0x3 &mpic 0x1 0x1 + 0x9000 0x0 0x0 0x4 &mpic 0x2 0x1 + + /* IDSEL 0x12 func 1 - PCI slot 2 */ + 0x9100 0x0 0x0 0x1 &mpic 0x3 0x1 + 0x9100 0x0 0x0 0x2 &mpic 0x4 0x1 + 0x9100 0x0 0x0 0x3 &mpic 0x1 0x1 + 0x9100 0x0 0x0 0x4 &mpic 0x2 0x1 + + /* IDSEL 0x12 func 2 - PCI slot 2 */ + 0x9200 0x0 0x0 0x1 &mpic 0x3 0x1 + 0x9200 0x0 0x0 0x2 &mpic 0x4 0x1 + 0x9200 0x0 0x0 0x3 &mpic 0x1 0x1 + 0x9200 0x0 0x0 0x4 &mpic 0x2 0x1 + + /* IDSEL 0x12 func 3 - PCI slot 2 */ + 0x9300 0x0 0x0 0x1 &mpic 0x3 0x1 + 0x9300 0x0 0x0 0x2 &mpic 0x4 0x1 + 0x9300 0x0 0x0 0x3 &mpic 0x1 0x1 + 0x9300 0x0 0x0 0x4 &mpic 0x2 0x1 + + /* IDSEL 0x12 func 4 - PCI slot 2 */ + 0x9400 0x0 0x0 0x1 &mpic 0x3 0x1 + 0x9400 0x0 0x0 0x2 &mpic 0x4 0x1 + 0x9400 0x0 0x0 0x3 &mpic 0x1 0x1 + 0x9400 0x0 0x0 0x4 &mpic 0x2 0x1 + + /* IDSEL 0x12 func 5 - PCI slot 2 */ + 0x9500 0x0 0x0 0x1 &mpic 0x3 0x1 + 0x9500 0x0 0x0 0x2 &mpic 0x4 0x1 + 0x9500 0x0 0x0 0x3 &mpic 0x1 0x1 + 0x9500 0x0 0x0 0x4 &mpic 0x2 0x1 + + /* IDSEL 0x12 func 6 - PCI slot 2 */ + 0x9600 0x0 0x0 0x1 &mpic 0x3 0x1 + 0x9600 0x0 0x0 0x2 &mpic 0x4 0x1 + 0x9600 0x0 0x0 0x3 &mpic 0x1 0x1 + 0x9600 0x0 0x0 0x4 &mpic 0x2 0x1 + + /* IDSEL 0x12 func 7 - PCI slot 2 */ + 0x9700 0x0 0x0 0x1 &mpic 0x3 0x1 + 0x9700 0x0 0x0 0x2 &mpic 0x4 0x1 + 0x9700 0x0 0x0 0x3 &mpic 0x1 0x1 + 0x9700 0x0 0x0 0x4 &mpic 0x2 0x1 + + // IDSEL 0x1c USB + 0xe000 0x0 0x0 0x1 &i8259 0xc 0x2 + 0xe100 0x0 0x0 0x2 &i8259 0x9 0x2 + 0xe200 0x0 0x0 0x3 &i8259 0xa 0x2 + 0xe300 0x0 0x0 0x4 &i8259 0xb 0x2 + + // IDSEL 0x1d Audio + 0xe800 0x0 0x0 0x1 &i8259 0x6 0x2 + + // IDSEL 0x1e Legacy + 0xf000 0x0 0x0 0x1 &i8259 0x7 0x2 + 0xf100 0x0 0x0 0x1 &i8259 0x7 0x2 + + // IDSEL 0x1f IDE/SATA + 0xf800 0x0 0x0 0x1 &i8259 0xe 0x2 + 0xf900 0x0 0x0 0x1 &i8259 0x5 0x2 + + >; + pcie@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; ranges = <0x2000000 0x0 0xe0000000 0x2000000 0x0 0xe0000000 0x0 0x20000000 @@ -47,14 +667,99 @@ 0x1000000 0x0 0x0 0x1000000 0x0 0x0 0x0 0x10000>; + uli1575@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + ranges = <0x2000000 0x0 0xe0000000 + 0x2000000 0x0 0xe0000000 + 0x0 0x20000000 + + 0x1000000 0x0 0x0 + 0x1000000 0x0 0x0 + 0x0 0x10000>; + isa@1e { + device_type = "isa"; + #interrupt-cells = <2>; + #size-cells = <1>; + #address-cells = <2>; + reg = <0xf000 0x0 0x0 0x0 0x0>; + ranges = <0x1 0x0 0x1000000 0x0 0x0 + 0x1000>; + interrupt-parent = <&i8259>; + + i8259: interrupt-controller@20 { + reg = <0x1 0x20 0x2 + 0x1 0xa0 0x2 + 0x1 0x4d0 0x2>; + interrupt-controller; + device_type = "interrupt-controller"; + #address-cells = <0>; + #interrupt-cells = <2>; + compatible = "chrp,iic"; + interrupts = <9 2>; + interrupt-parent = <&mpic>; + }; + + i8042@60 { + #size-cells = <0>; + #address-cells = <1>; + reg = <0x1 0x60 0x1 0x1 0x64 0x1>; + interrupts = <1 3 12 3>; + interrupt-parent = + <&i8259>; + + keyboard@0 { + reg = <0x0>; + compatible = "pnpPNP,303"; + }; + + mouse@1 { + reg = <0x1>; + compatible = "pnpPNP,f03"; + }; + }; + + rtc@70 { + compatible = "pnpPNP,b00"; + reg = <0x1 0x70 0x2>; + }; + + gpio@400 { + reg = <0x1 0x400 0x80>; + }; + }; + }; }; + }; pci1: pcie@fffe09000 { + compatible = "fsl,mpc8548-pcie"; + device_type = "pci"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; reg = <0xf 0xffe09000 0 0x1000>; + bus-range = <0 255>; ranges = <0x2000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x00010000>; + clock-frequency = <33333333>; + interrupt-parent = <&mpic>; + interrupts = <25 2>; + interrupt-map-mask = <0xf800 0x0 0x0 0x7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0x0 0x0 0x1 &mpic 0x4 0x1 + 0000 0x0 0x0 0x2 &mpic 0x5 0x1 + 0000 0x0 0x0 0x3 &mpic 0x6 0x1 + 0000 0x0 0x0 0x4 &mpic 0x7 0x1 + >; pcie@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; ranges = <0x2000000 0x0 0xe0000000 0x2000000 0x0 0xe0000000 0x0 0x20000000 @@ -66,10 +771,31 @@ }; pci2: pcie@fffe0a000 { + compatible = "fsl,mpc8548-pcie"; + device_type = "pci"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; reg = <0xf 0xffe0a000 0 0x1000>; + bus-range = <0 255>; ranges = <0x2000000 0x0 0xe0000000 0xc 0x40000000 0x0 0x20000000 0x1000000 0x0 0x00000000 0xf 0xffc20000 0x0 0x00010000>; + clock-frequency = <33333333>; + interrupt-parent = <&mpic>; + interrupts = <26 2>; + interrupt-map-mask = <0xf800 0x0 0x0 0x7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0x0 0x0 0x1 &mpic 0x0 0x1 + 0000 0x0 0x0 0x2 &mpic 0x1 0x1 + 0000 0x0 0x0 0x3 &mpic 0x2 0x1 + 0000 0x0 0x0 0x4 &mpic 0x3 0x1 + >; pcie@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; ranges = <0x2000000 0x0 0xe0000000 0x2000000 0x0 0xe0000000 0x0 0x20000000 @@ -80,11 +806,3 @@ }; }; }; - -/* - * mpc8572ds.dtsi must be last to ensure board_pci0 overrides pci0 settings - * for interrupt-map & interrupt-map-mask - */ - -/include/ "fsl/mpc8572si-post.dtsi" -/include/ "mpc8572ds.dtsi" diff --git a/trunk/arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts b/trunk/arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts index d34d12712125..3375c2ab0c32 100644 --- a/trunk/arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts +++ b/trunk/arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts @@ -14,69 +14,494 @@ * option) any later version. */ -/include/ "mpc8572ds.dts" - +/dts-v1/; / { model = "fsl,MPC8572DS"; compatible = "fsl,MPC8572DS", "fsl,MPC8572DS-CAMP"; + #address-cells = <1>; + #size-cells = <1>; + + aliases { + ethernet0 = &enet0; + ethernet1 = &enet1; + serial0 = &serial0; + pci0 = &pci0; + pci1 = &pci1; + }; cpus { + #address-cells = <1>; + #size-cells = <0>; + PowerPC,8572@0 { + device_type = "cpu"; + reg = <0x0>; + d-cache-line-size = <32>; // 32 bytes + i-cache-line-size = <32>; // 32 bytes + d-cache-size = <0x8000>; // L1, 32K + i-cache-size = <0x8000>; // L1, 32K + timebase-frequency = <0>; + bus-frequency = <0>; + clock-frequency = <0>; + next-level-cache = <&L2>; }; - PowerPC,8572@1 { - status = "disabled"; - }; + }; - localbus@ffe05000 { - status = "disabled"; + memory { + device_type = "memory"; + reg = <0x0 0x0>; // Filled by U-Boot }; soc8572@ffe00000 { - serial@4600 { - status = "disabled"; + #address-cells = <1>; + #size-cells = <1>; + device_type = "soc"; + compatible = "simple-bus"; + ranges = <0x0 0xffe00000 0x100000>; + bus-frequency = <0>; // Filled out by uboot. + + ecm-law@0 { + compatible = "fsl,ecm-law"; + reg = <0x0 0x1000>; + fsl,num-laws = <12>; }; - dma@c300 { - status = "disabled"; + + ecm@1000 { + compatible = "fsl,mpc8572-ecm", "fsl,ecm"; + reg = <0x1000 0x1000>; + interrupts = <17 2>; + interrupt-parent = <&mpic>; }; - gpio-controller@f000 { + + memory-controller@2000 { + compatible = "fsl,mpc8572-memory-controller"; + reg = <0x2000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <18 2>; }; - l2-cache-controller@20000 { + + memory-controller@6000 { + compatible = "fsl,mpc8572-memory-controller"; + reg = <0x6000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <18 2>; + }; + + L2: l2-cache-controller@20000 { + compatible = "fsl,mpc8572-l2-cache-controller"; + reg = <0x20000 0x1000>; + cache-line-size = <32>; // 32 bytes cache-size = <0x80000>; // L2, 512K + interrupt-parent = <&mpic>; + interrupts = <16 2>; }; - ethernet@26000 { - status = "disabled"; + + i2c@3000 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <0>; + compatible = "fsl-i2c"; + reg = <0x3000 0x100>; + interrupts = <43 2>; + interrupt-parent = <&mpic>; + dfsrr; }; - mdio@26520 { - status = "disabled"; + + i2c@3100 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <1>; + compatible = "fsl-i2c"; + reg = <0x3100 0x100>; + interrupts = <43 2>; + interrupt-parent = <&mpic>; + dfsrr; }; - ethernet@27000 { - status = "disabled"; + + dma@21300 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,mpc8572-dma", "fsl,eloplus-dma"; + reg = <0x21300 0x4>; + ranges = <0x0 0x21100 0x200>; + cell-index = <0>; + dma-channel@0 { + compatible = "fsl,mpc8572-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x0 0x80>; + cell-index = <0>; + interrupt-parent = <&mpic>; + interrupts = <20 2>; + }; + dma-channel@80 { + compatible = "fsl,mpc8572-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x80 0x80>; + cell-index = <1>; + interrupt-parent = <&mpic>; + interrupts = <21 2>; + }; + dma-channel@100 { + compatible = "fsl,mpc8572-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x100 0x80>; + cell-index = <2>; + interrupt-parent = <&mpic>; + interrupts = <22 2>; + }; + dma-channel@180 { + compatible = "fsl,mpc8572-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x180 0x80>; + cell-index = <3>; + interrupt-parent = <&mpic>; + interrupts = <23 2>; + }; }; - mdio@27520 { - status = "disabled"; + + enet0: ethernet@24000 { + #address-cells = <1>; + #size-cells = <1>; + cell-index = <0>; + device_type = "network"; + model = "eTSEC"; + compatible = "gianfar"; + reg = <0x24000 0x1000>; + ranges = <0x0 0x24000 0x1000>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupts = <29 2 30 2 34 2>; + interrupt-parent = <&mpic>; + phy-handle = <&phy0>; + phy-connection-type = "rgmii-id"; + + mdio@520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-mdio"; + reg = <0x520 0x20>; + + phy0: ethernet-phy@0 { + interrupt-parent = <&mpic>; + interrupts = <10 1>; + reg = <0x0>; + }; + phy1: ethernet-phy@1 { + interrupt-parent = <&mpic>; + interrupts = <10 1>; + reg = <0x1>; + }; + }; }; - pic@40000 { - protected-sources = < - 31 32 33 37 38 39 /* enet2 enet3 */ - 76 77 78 79 26 42 /* dma2 pci2 serial*/ - 0xe4 0xe5 0xe6 0xe7 /* msi */ - >; + + enet1: ethernet@25000 { + cell-index = <1>; + device_type = "network"; + model = "eTSEC"; + compatible = "gianfar"; + reg = <0x25000 0x1000>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupts = <35 2 36 2 40 2>; + interrupt-parent = <&mpic>; + phy-handle = <&phy1>; + phy-connection-type = "rgmii-id"; + }; + + serial0: serial@4500 { + cell-index = <0>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x4500 0x100>; + clock-frequency = <0>; }; msi@41600 { + compatible = "fsl,mpc8572-msi", "fsl,mpic-msi"; + reg = <0x41600 0x80>; msi-available-ranges = <0 0x80>; interrupts = < 0xe0 0 0xe1 0 0xe2 0 0xe3 0>; + interrupt-parent = <&mpic>; + }; + + global-utilities@e0000 { //global utilities block + compatible = "fsl,mpc8572-guts"; + reg = <0xe0000 0x1000>; + fsl,has-rstcr; + }; + + crypto@30000 { + compatible = "fsl,sec3.0", "fsl,sec2.4", "fsl,sec2.2", + "fsl,sec2.1", "fsl,sec2.0"; + reg = <0x30000 0x10000>; + interrupts = <45 2 58 2>; + interrupt-parent = <&mpic>; + fsl,num-channels = <4>; + fsl,channel-fifo-len = <24>; + fsl,exec-units-mask = <0x9fe>; + fsl,descriptor-types-mask = <0x3ab0ebf>; + }; + + mpic: pic@40000 { + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <2>; + reg = <0x40000 0x40000>; + compatible = "chrp,open-pic"; + device_type = "open-pic"; + protected-sources = < + 31 32 33 37 38 39 /* enet2 enet3 */ + 76 77 78 79 26 42 /* dma2 pci2 serial*/ + 0xe4 0xe5 0xe6 0xe7 /* msi */ + >; }; - timer@42100 { - status = "disabled"; + }; + + pci0: pcie@ffe08000 { + compatible = "fsl,mpc8548-pcie"; + device_type = "pci"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + reg = <0xffe08000 0x1000>; + bus-range = <0 255>; + ranges = <0x2000000 0x0 0x80000000 0x80000000 0x0 0x20000000 + 0x1000000 0x0 0x0 0xffc00000 0x0 0x10000>; + clock-frequency = <33333333>; + interrupt-parent = <&mpic>; + interrupts = <24 2>; + interrupt-map-mask = <0xff00 0x0 0x0 0x7>; + interrupt-map = < + /* IDSEL 0x11 func 0 - PCI slot 1 */ + 0x8800 0x0 0x0 0x1 &mpic 0x2 0x1 + 0x8800 0x0 0x0 0x2 &mpic 0x3 0x1 + 0x8800 0x0 0x0 0x3 &mpic 0x4 0x1 + 0x8800 0x0 0x0 0x4 &mpic 0x1 0x1 + + /* IDSEL 0x11 func 1 - PCI slot 1 */ + 0x8900 0x0 0x0 0x1 &mpic 0x2 0x1 + 0x8900 0x0 0x0 0x2 &mpic 0x3 0x1 + 0x8900 0x0 0x0 0x3 &mpic 0x4 0x1 + 0x8900 0x0 0x0 0x4 &mpic 0x1 0x1 + + /* IDSEL 0x11 func 2 - PCI slot 1 */ + 0x8a00 0x0 0x0 0x1 &mpic 0x2 0x1 + 0x8a00 0x0 0x0 0x2 &mpic 0x3 0x1 + 0x8a00 0x0 0x0 0x3 &mpic 0x4 0x1 + 0x8a00 0x0 0x0 0x4 &mpic 0x1 0x1 + + /* IDSEL 0x11 func 3 - PCI slot 1 */ + 0x8b00 0x0 0x0 0x1 &mpic 0x2 0x1 + 0x8b00 0x0 0x0 0x2 &mpic 0x3 0x1 + 0x8b00 0x0 0x0 0x3 &mpic 0x4 0x1 + 0x8b00 0x0 0x0 0x4 &mpic 0x1 0x1 + + /* IDSEL 0x11 func 4 - PCI slot 1 */ + 0x8c00 0x0 0x0 0x1 &mpic 0x2 0x1 + 0x8c00 0x0 0x0 0x2 &mpic 0x3 0x1 + 0x8c00 0x0 0x0 0x3 &mpic 0x4 0x1 + 0x8c00 0x0 0x0 0x4 &mpic 0x1 0x1 + + /* IDSEL 0x11 func 5 - PCI slot 1 */ + 0x8d00 0x0 0x0 0x1 &mpic 0x2 0x1 + 0x8d00 0x0 0x0 0x2 &mpic 0x3 0x1 + 0x8d00 0x0 0x0 0x3 &mpic 0x4 0x1 + 0x8d00 0x0 0x0 0x4 &mpic 0x1 0x1 + + /* IDSEL 0x11 func 6 - PCI slot 1 */ + 0x8e00 0x0 0x0 0x1 &mpic 0x2 0x1 + 0x8e00 0x0 0x0 0x2 &mpic 0x3 0x1 + 0x8e00 0x0 0x0 0x3 &mpic 0x4 0x1 + 0x8e00 0x0 0x0 0x4 &mpic 0x1 0x1 + + /* IDSEL 0x11 func 7 - PCI slot 1 */ + 0x8f00 0x0 0x0 0x1 &mpic 0x2 0x1 + 0x8f00 0x0 0x0 0x2 &mpic 0x3 0x1 + 0x8f00 0x0 0x0 0x3 &mpic 0x4 0x1 + 0x8f00 0x0 0x0 0x4 &mpic 0x1 0x1 + + /* IDSEL 0x12 func 0 - PCI slot 2 */ + 0x9000 0x0 0x0 0x1 &mpic 0x3 0x1 + 0x9000 0x0 0x0 0x2 &mpic 0x4 0x1 + 0x9000 0x0 0x0 0x3 &mpic 0x1 0x1 + 0x9000 0x0 0x0 0x4 &mpic 0x2 0x1 + + /* IDSEL 0x12 func 1 - PCI slot 2 */ + 0x9100 0x0 0x0 0x1 &mpic 0x3 0x1 + 0x9100 0x0 0x0 0x2 &mpic 0x4 0x1 + 0x9100 0x0 0x0 0x3 &mpic 0x1 0x1 + 0x9100 0x0 0x0 0x4 &mpic 0x2 0x1 + + /* IDSEL 0x12 func 2 - PCI slot 2 */ + 0x9200 0x0 0x0 0x1 &mpic 0x3 0x1 + 0x9200 0x0 0x0 0x2 &mpic 0x4 0x1 + 0x9200 0x0 0x0 0x3 &mpic 0x1 0x1 + 0x9200 0x0 0x0 0x4 &mpic 0x2 0x1 + + /* IDSEL 0x12 func 3 - PCI slot 2 */ + 0x9300 0x0 0x0 0x1 &mpic 0x3 0x1 + 0x9300 0x0 0x0 0x2 &mpic 0x4 0x1 + 0x9300 0x0 0x0 0x3 &mpic 0x1 0x1 + 0x9300 0x0 0x0 0x4 &mpic 0x2 0x1 + + /* IDSEL 0x12 func 4 - PCI slot 2 */ + 0x9400 0x0 0x0 0x1 &mpic 0x3 0x1 + 0x9400 0x0 0x0 0x2 &mpic 0x4 0x1 + 0x9400 0x0 0x0 0x3 &mpic 0x1 0x1 + 0x9400 0x0 0x0 0x4 &mpic 0x2 0x1 + + /* IDSEL 0x12 func 5 - PCI slot 2 */ + 0x9500 0x0 0x0 0x1 &mpic 0x3 0x1 + 0x9500 0x0 0x0 0x2 &mpic 0x4 0x1 + 0x9500 0x0 0x0 0x3 &mpic 0x1 0x1 + 0x9500 0x0 0x0 0x4 &mpic 0x2 0x1 + + /* IDSEL 0x12 func 6 - PCI slot 2 */ + 0x9600 0x0 0x0 0x1 &mpic 0x3 0x1 + 0x9600 0x0 0x0 0x2 &mpic 0x4 0x1 + 0x9600 0x0 0x0 0x3 &mpic 0x1 0x1 + 0x9600 0x0 0x0 0x4 &mpic 0x2 0x1 + + /* IDSEL 0x12 func 7 - PCI slot 2 */ + 0x9700 0x0 0x0 0x1 &mpic 0x3 0x1 + 0x9700 0x0 0x0 0x2 &mpic 0x4 0x1 + 0x9700 0x0 0x0 0x3 &mpic 0x1 0x1 + 0x9700 0x0 0x0 0x4 &mpic 0x2 0x1 + + // IDSEL 0x1c USB + 0xe000 0x0 0x0 0x1 &i8259 0xc 0x2 + 0xe100 0x0 0x0 0x2 &i8259 0x9 0x2 + 0xe200 0x0 0x0 0x3 &i8259 0xa 0x2 + 0xe300 0x0 0x0 0x4 &i8259 0xb 0x2 + + // IDSEL 0x1d Audio + 0xe800 0x0 0x0 0x1 &i8259 0x6 0x2 + + // IDSEL 0x1e Legacy + 0xf000 0x0 0x0 0x1 &i8259 0x7 0x2 + 0xf100 0x0 0x0 0x1 &i8259 0x7 0x2 + + // IDSEL 0x1f IDE/SATA + 0xf800 0x0 0x0 0x1 &i8259 0xe 0x2 + 0xf900 0x0 0x0 0x1 &i8259 0x5 0x2 + + >; + + pcie@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; + ranges = <0x2000000 0x0 0x80000000 + 0x2000000 0x0 0x80000000 + 0x0 0x20000000 + + 0x1000000 0x0 0x0 + 0x1000000 0x0 0x0 + 0x0 0x10000>; + uli1575@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + ranges = <0x2000000 0x0 0x80000000 + 0x2000000 0x0 0x80000000 + 0x0 0x20000000 + + 0x1000000 0x0 0x0 + 0x1000000 0x0 0x0 + 0x0 0x10000>; + isa@1e { + device_type = "isa"; + #interrupt-cells = <2>; + #size-cells = <1>; + #address-cells = <2>; + reg = <0xf000 0x0 0x0 0x0 0x0>; + ranges = <0x1 0x0 0x1000000 0x0 0x0 + 0x1000>; + interrupt-parent = <&i8259>; + + i8259: interrupt-controller@20 { + reg = <0x1 0x20 0x2 + 0x1 0xa0 0x2 + 0x1 0x4d0 0x2>; + interrupt-controller; + device_type = "interrupt-controller"; + #address-cells = <0>; + #interrupt-cells = <2>; + compatible = "chrp,iic"; + interrupts = <9 2>; + interrupt-parent = <&mpic>; + }; + + i8042@60 { + #size-cells = <0>; + #address-cells = <1>; + reg = <0x1 0x60 0x1 0x1 0x64 0x1>; + interrupts = <1 3 12 3>; + interrupt-parent = + <&i8259>; + + keyboard@0 { + reg = <0x0>; + compatible = "pnpPNP,303"; + }; + + mouse@1 { + reg = <0x1>; + compatible = "pnpPNP,f03"; + }; + }; + + rtc@70 { + compatible = "pnpPNP,b00"; + reg = <0x1 0x70 0x2>; + }; + + gpio@400 { + reg = <0x1 0x400 0x80>; + }; + }; + }; }; + }; - pcie@ffe0a000 { - status = "disabled"; + + pci1: pcie@ffe09000 { + compatible = "fsl,mpc8548-pcie"; + device_type = "pci"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + reg = <0xffe09000 0x1000>; + bus-range = <0 255>; + ranges = <0x2000000 0x0 0xa0000000 0xa0000000 0x0 0x20000000 + 0x1000000 0x0 0x0 0xffc10000 0x0 0x10000>; + clock-frequency = <33333333>; + interrupt-parent = <&mpic>; + interrupts = <25 2>; + interrupt-map-mask = <0xf800 0x0 0x0 0x7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0x0 0x0 0x1 &mpic 0x4 0x1 + 0000 0x0 0x0 0x2 &mpic 0x5 0x1 + 0000 0x0 0x0 0x3 &mpic 0x6 0x1 + 0000 0x0 0x0 0x4 &mpic 0x7 0x1 + >; + pcie@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; + ranges = <0x2000000 0x0 0xa0000000 + 0x2000000 0x0 0xa0000000 + 0x0 0x20000000 + + 0x1000000 0x0 0x0 + 0x1000000 0x0 0x0 + 0x0 0x10000>; + }; }; }; diff --git a/trunk/arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts b/trunk/arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts index d6a8fafc0d0d..e7b477f6a3fe 100644 --- a/trunk/arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts +++ b/trunk/arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts @@ -15,74 +15,169 @@ * option) any later version. */ -/include/ "mpc8572ds.dts" - +/dts-v1/; / { model = "fsl,MPC8572DS"; compatible = "fsl,MPC8572DS", "fsl,MPC8572DS-CAMP"; + #address-cells = <1>; + #size-cells = <1>; + + aliases { + ethernet2 = &enet2; + ethernet3 = &enet3; + serial0 = &serial0; + pci2 = &pci2; + }; cpus { - PowerPC,8572@0 { - status = "disabled"; - }; + #address-cells = <1>; + #size-cells = <0>; + PowerPC,8572@1 { + device_type = "cpu"; + reg = <0x1>; + d-cache-line-size = <32>; // 32 bytes + i-cache-line-size = <32>; // 32 bytes + d-cache-size = <0x8000>; // L1, 32K + i-cache-size = <0x8000>; // L1, 32K + timebase-frequency = <0>; + bus-frequency = <0>; + clock-frequency = <0>; + next-level-cache = <&L2>; }; }; - localbus@ffe05000 { - status = "disabled"; + memory { + device_type = "memory"; + reg = <0x0 0x0>; // Filled by U-Boot }; soc8572@ffe00000 { - ecm-law@0 { - status = "disabled"; - }; - ecm@1000 { - status = "disabled"; - }; - memory-controller@2000 { - status = "disabled"; - }; - memory-controller@6000 { - status = "disabled"; - }; - i2c@3000 { - status = "disabled"; - }; - i2c@3100 { - status = "disabled"; - }; - serial@4500 { - status = "disabled"; - }; - gpio-controller@f000 { - status = "disabled"; - }; - l2-cache-controller@20000 { - cache-size = <0x80000>; // L2, 512K - }; - dma@21300 { - status = "disabled"; + #address-cells = <1>; + #size-cells = <1>; + device_type = "soc"; + compatible = "simple-bus"; + ranges = <0x0 0xffe00000 0x100000>; + bus-frequency = <0>; // Filled out by uboot. + + L2: l2-cache-controller@20000 { + compatible = "fsl,mpc8572-l2-cache-controller"; + reg = <0x20000 0x1000>; + cache-line-size = <32>; // 32 bytes + cache-size = <0x80000>; // L2, 512K + interrupt-parent = <&mpic>; }; - ethernet@24000 { - status = "disabled"; + + dma@c300 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,mpc8572-dma", "fsl,eloplus-dma"; + reg = <0xc300 0x4>; + ranges = <0x0 0xc100 0x200>; + cell-index = <0>; + dma-channel@0 { + compatible = "fsl,mpc8572-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x0 0x80>; + cell-index = <0>; + interrupt-parent = <&mpic>; + interrupts = <76 2>; + }; + dma-channel@80 { + compatible = "fsl,mpc8572-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x80 0x80>; + cell-index = <1>; + interrupt-parent = <&mpic>; + interrupts = <77 2>; + }; + dma-channel@100 { + compatible = "fsl,mpc8572-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x100 0x80>; + cell-index = <2>; + interrupt-parent = <&mpic>; + interrupts = <78 2>; + }; + dma-channel@180 { + compatible = "fsl,mpc8572-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x180 0x80>; + cell-index = <3>; + interrupt-parent = <&mpic>; + interrupts = <79 2>; + }; }; + mdio@24520 { - status = "disabled"; + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-mdio"; + reg = <0x24520 0x20>; + + phy2: ethernet-phy@2 { + interrupt-parent = <&mpic>; + reg = <0x2>; + }; + phy3: ethernet-phy@3 { + interrupt-parent = <&mpic>; + reg = <0x3>; + }; }; - ptp_clock@24e00 { - status = "disabled"; + + enet2: ethernet@26000 { + cell-index = <2>; + device_type = "network"; + model = "eTSEC"; + compatible = "gianfar"; + reg = <0x26000 0x1000>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupts = <31 2 32 2 33 2>; + interrupt-parent = <&mpic>; + phy-handle = <&phy2>; + phy-connection-type = "rgmii-id"; }; - ethernet@25000 { - status = "disabled"; + + enet3: ethernet@27000 { + cell-index = <3>; + device_type = "network"; + model = "eTSEC"; + compatible = "gianfar"; + reg = <0x27000 0x1000>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupts = <37 2 38 2 39 2>; + interrupt-parent = <&mpic>; + phy-handle = <&phy3>; + phy-connection-type = "rgmii-id"; }; - mdio@25520 { - status = "disabled"; + + msi@41600 { + compatible = "fsl,mpc8572-msi", "fsl,mpic-msi"; + reg = <0x41600 0x80>; + msi-available-ranges = <0x80 0x80>; + interrupts = < + 0xe4 0 + 0xe5 0 + 0xe6 0 + 0xe7 0>; + interrupt-parent = <&mpic>; }; - crypto@30000 { - status = "disabled"; + + serial0: serial@4600 { + cell-index = <1>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x4600 0x100>; + clock-frequency = <0>; }; - pic@40000 { + + mpic: pic@40000 { + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <2>; + reg = <0x40000 0x40000>; + compatible = "chrp,open-pic"; + device_type = "open-pic"; protected-sources = < 18 16 10 42 45 58 /* MEM L2 mdio serial crypto */ 29 30 34 35 36 40 /* enet0 enet1 */ @@ -94,25 +189,41 @@ 0xe0 0xe1 0xe2 0xe3 /* msi */ >; }; - timer@41100 { - status = "disabled"; - }; - msi@41600 { - msi-available-ranges = <0x80 0x80>; - interrupts = < - 0xe4 0 - 0xe5 0 - 0xe6 0 - 0xe7 0>; - }; - global-utilities@e0000 { - status = "disabled"; - }; }; - pcie@ffe08000 { - status = "disabled"; - }; - pcie@ffe09000 { - status = "disabled"; + + pci2: pcie@ffe0a000 { + compatible = "fsl,mpc8548-pcie"; + device_type = "pci"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + reg = <0xffe0a000 0x1000>; + bus-range = <0 255>; + ranges = <0x2000000 0x0 0xc0000000 0xc0000000 0x0 0x20000000 + 0x1000000 0x0 0x0 0xffc20000 0x0 0x10000>; + clock-frequency = <33333333>; + interrupt-parent = <&mpic>; + interrupts = <26 2>; + interrupt-map-mask = <0xf800 0x0 0x0 0x7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0x0 0x0 0x1 &mpic 0x0 0x1 + 0000 0x0 0x0 0x2 &mpic 0x1 0x1 + 0000 0x0 0x0 0x3 &mpic 0x2 0x1 + 0000 0x0 0x0 0x4 &mpic 0x3 0x1 + >; + pcie@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; + ranges = <0x2000000 0x0 0xc0000000 + 0x2000000 0x0 0xc0000000 + 0x0 0x20000000 + + 0x1000000 0x0 0x0 + 0x1000000 0x0 0x0 + 0x0 0x10000>; + }; }; }; diff --git a/trunk/arch/powerpc/boot/dts/mpc8610_hpcd.dts b/trunk/arch/powerpc/boot/dts/mpc8610_hpcd.dts index 6a109a0ceac9..83c3218cb4da 100644 --- a/trunk/arch/powerpc/boot/dts/mpc8610_hpcd.dts +++ b/trunk/arch/powerpc/boot/dts/mpc8610_hpcd.dts @@ -175,7 +175,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; clock-frequency = <0>; interrupts = <42 2>; @@ -186,7 +186,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; clock-frequency = <0>; interrupts = <42 2>; diff --git a/trunk/arch/powerpc/boot/dts/mpc8641_hpcn.dts b/trunk/arch/powerpc/boot/dts/mpc8641_hpcn.dts index 1e8666ccbed8..848320e4d3c4 100644 --- a/trunk/arch/powerpc/boot/dts/mpc8641_hpcn.dts +++ b/trunk/arch/powerpc/boot/dts/mpc8641_hpcn.dts @@ -26,6 +26,13 @@ serial1 = &serial1; pci0 = &pci0; pci1 = &pci1; +/* + * Only one of Rapid IO or PCI can be present due to HW limitations and + * due to the fact that the 2 now share address space in the new memory + * map. The most likely case is that we have PCI, so comment out the + * rapidio node. Leave it here for reference. + */ + /* rapidio0 = &rapidio0; */ }; cpus { @@ -328,7 +335,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; clock-frequency = <0>; interrupts = <42 2>; @@ -338,7 +345,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; clock-frequency = <0>; interrupts = <28 2>; @@ -354,41 +361,6 @@ device_type = "open-pic"; }; - rmu: rmu@d3000 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "fsl,srio-rmu"; - reg = <0xd3000 0x500>; - ranges = <0x0 0xd3000 0x500>; - - message-unit@0 { - compatible = "fsl,srio-msg-unit"; - reg = <0x0 0x100>; - interrupts = < - 53 2 /* msg1_tx_irq */ - 54 2>;/* msg1_rx_irq */ - }; - message-unit@100 { - compatible = "fsl,srio-msg-unit"; - reg = <0x100 0x100>; - interrupts = < - 55 2 /* msg2_tx_irq */ - 56 2>;/* msg2_rx_irq */ - }; - doorbell-unit@400 { - compatible = "fsl,srio-dbell-unit"; - reg = <0x400 0x80>; - interrupts = < - 49 2 /* bell_outb_irq */ - 50 2>;/* bell_inb_irq */ - }; - port-write-unit@4e0 { - compatible = "fsl,srio-port-write-unit"; - reg = <0x4e0 0x20>; - interrupts = <48 2>; - }; - }; - global-utilities@e0000 { compatible = "fsl,mpc8641-guts"; reg = <0xe0000 0x1000>; @@ -640,27 +612,16 @@ }; }; /* - * Only one of Rapid IO or PCI can be present due to HW limitations and - * due to the fact that the 2 now share address space in the new memory - * map. The most likely case is that we have PCI, so comment out the - * rapidio node. Leave it here for reference. - - rapidio@ffec0000 { - reg = <0xffec0000 0x11000>; - compatible = "fsl,srio"; - interrupt-parent = <&mpic>; - interrupts = <48 2>; + rapidio0: rapidio@ffec0000 { #address-cells = <2>; #size-cells = <2>; - fsl,srio-rmu-handle = <&rmu>; - ranges; - - port1 { - #address-cells = <2>; - #size-cells = <2>; - cell-index = <1>; - ranges = <0 0 0x80000000 0 0x20000000>; - }; + compatible = "fsl,rapidio-delta"; + reg = <0xffec0000 0x20000>; + ranges = <0 0 0x80000000 0 0x20000000>; + interrupt-parent = <&mpic>; + // err_irq bell_outb_irq bell_inb_irq + // msg1_tx_irq msg1_rx_irq msg2_tx_irq msg2_rx_irq + interrupts = <48 2 49 2 50 2 53 2 54 2 55 2 56 2>; }; */ diff --git a/trunk/arch/powerpc/boot/dts/mpc8641_hpcn_36b.dts b/trunk/arch/powerpc/boot/dts/mpc8641_hpcn_36b.dts index fd4cd4da60b5..8be8e701e1d3 100644 --- a/trunk/arch/powerpc/boot/dts/mpc8641_hpcn_36b.dts +++ b/trunk/arch/powerpc/boot/dts/mpc8641_hpcn_36b.dts @@ -328,7 +328,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; clock-frequency = <0>; interrupts = <42 2>; @@ -338,7 +338,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; clock-frequency = <0>; interrupts = <28 2>; diff --git a/trunk/arch/powerpc/boot/dts/obs600.dts b/trunk/arch/powerpc/boot/dts/obs600.dts deleted file mode 100644 index 18e7d79ee4c3..000000000000 --- a/trunk/arch/powerpc/boot/dts/obs600.dts +++ /dev/null @@ -1,314 +0,0 @@ -/* - * Device Tree Source for PlatHome OpenBlockS 600 (405EX) - * - * Copyright 2011 Ben Herrenschmidt, IBM Corp. - * - * Based on Kilauea by: - * - * Copyright 2007-2009 DENX Software Engineering, Stefan Roese - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without - * any warranty of any kind, whether express or implied. - */ - -/dts-v1/; - -/ { - #address-cells = <1>; - #size-cells = <1>; - model = "PlatHome,OpenBlockS 600"; - compatible = "plathome,obs600"; - dcr-parent = <&{/cpus/cpu@0}>; - - aliases { - ethernet0 = &EMAC0; - ethernet1 = &EMAC1; - serial0 = &UART0; - serial1 = &UART1; - }; - - cpus { - #address-cells = <1>; - #size-cells = <0>; - - cpu@0 { - device_type = "cpu"; - model = "PowerPC,405EX"; - reg = <0x00000000>; - clock-frequency = <0>; /* Filled in by U-Boot */ - timebase-frequency = <0>; /* Filled in by U-Boot */ - i-cache-line-size = <32>; - d-cache-line-size = <32>; - i-cache-size = <16384>; /* 16 kB */ - d-cache-size = <16384>; /* 16 kB */ - dcr-controller; - dcr-access-method = "native"; - }; - }; - - memory { - device_type = "memory"; - reg = <0x00000000 0x00000000>; /* Filled in by U-Boot */ - }; - - UIC0: interrupt-controller { - compatible = "ibm,uic-405ex", "ibm,uic"; - interrupt-controller; - cell-index = <0>; - dcr-reg = <0x0c0 0x009>; - #address-cells = <0>; - #size-cells = <0>; - #interrupt-cells = <2>; - }; - - UIC1: interrupt-controller1 { - compatible = "ibm,uic-405ex","ibm,uic"; - interrupt-controller; - cell-index = <1>; - dcr-reg = <0x0d0 0x009>; - #address-cells = <0>; - #size-cells = <0>; - #interrupt-cells = <2>; - interrupts = <0x1e 0x4 0x1f 0x4>; /* cascade */ - interrupt-parent = <&UIC0>; - }; - - UIC2: interrupt-controller2 { - compatible = "ibm,uic-405ex","ibm,uic"; - interrupt-controller; - cell-index = <2>; - dcr-reg = <0x0e0 0x009>; - #address-cells = <0>; - #size-cells = <0>; - #interrupt-cells = <2>; - interrupts = <0x1c 0x4 0x1d 0x4>; /* cascade */ - interrupt-parent = <&UIC0>; - }; - - CPM0: cpm { - compatible = "ibm,cpm"; - dcr-access-method = "native"; - dcr-reg = <0x0b0 0x003>; - unused-units = <0x00000000>; - idle-doze = <0x02000000>; - standby = <0xe3e74800>; - }; - - plb { - compatible = "ibm,plb-405ex", "ibm,plb4"; - #address-cells = <1>; - #size-cells = <1>; - ranges; - clock-frequency = <0>; /* Filled in by U-Boot */ - - SDRAM0: memory-controller { - compatible = "ibm,sdram-405ex", "ibm,sdram-4xx-ddr2"; - dcr-reg = <0x010 0x002>; - interrupt-parent = <&UIC2>; - interrupts = <0x5 0x4 /* ECC DED Error */ - 0x6 0x4>; /* ECC SEC Error */ - }; - - CRYPTO: crypto@ef700000 { - compatible = "amcc,ppc405ex-crypto", "amcc,ppc4xx-crypto"; - reg = <0xef700000 0x80400>; - interrupt-parent = <&UIC0>; - interrupts = <0x17 0x2>; - }; - - MAL0: mcmal { - compatible = "ibm,mcmal-405ex", "ibm,mcmal2"; - dcr-reg = <0x180 0x062>; - num-tx-chans = <2>; - num-rx-chans = <2>; - interrupt-parent = <&MAL0>; - interrupts = <0x0 0x1 0x2 0x3 0x4>; - #interrupt-cells = <1>; - #address-cells = <0>; - #size-cells = <0>; - interrupt-map = ; - interrupt-map-mask = <0xffffffff>; - }; - - POB0: opb { - compatible = "ibm,opb-405ex", "ibm,opb"; - #address-cells = <1>; - #size-cells = <1>; - ranges = <0x80000000 0x80000000 0x10000000 - 0xef600000 0xef600000 0x00a00000 - 0xf0000000 0xf0000000 0x10000000>; - dcr-reg = <0x0a0 0x005>; - clock-frequency = <0>; /* Filled in by U-Boot */ - - EBC0: ebc { - compatible = "ibm,ebc-405ex", "ibm,ebc"; - dcr-reg = <0x012 0x002>; - #address-cells = <2>; - #size-cells = <1>; - clock-frequency = <0>; /* Filled in by U-Boot */ - /* ranges property is supplied by U-Boot */ - interrupts = <0x5 0x1>; - interrupt-parent = <&UIC1>; - - nor_flash@0,0 { - compatible = "amd,s29gl512n", "cfi-flash"; - bank-width = <2>; - reg = <0x00000000 0x00000000 0x08000000>; - #address-cells = <1>; - #size-cells = <1>; - partition@0 { - label = "kernel + initrd"; - reg = <0x00000000 0x03de0000>; - }; - partition@3de0000 { - label = "user config area"; - reg = <0x03de0000 0x00080000>; - }; - partition@3e60000 { - label = "user program area"; - reg = <0x03e60000 0x04000000>; - }; - partition@7e60000 { - label = "flat device tree"; - reg = <0x07e60000 0x00080000>; - }; - partition@7ee0000 { - label = "test program"; - reg = <0x07ee0000 0x00080000>; - }; - partition@7f60000 { - label = "u-boot env"; - reg = <0x07f60000 0x00040000>; - }; - partition@7fa0000 { - label = "u-boot"; - reg = <0x07fa0000 0x00060000>; - }; - }; - }; - - UART0: serial@ef600200 { - device_type = "serial"; - compatible = "ns16550"; - reg = <0xef600200 0x00000008>; - virtual-reg = <0xef600200>; - clock-frequency = <0>; /* Filled in by U-Boot */ - current-speed = <0>; - interrupt-parent = <&UIC0>; - interrupts = <0x1a 0x4>; - }; - - UART1: serial@ef600300 { - device_type = "serial"; - compatible = "ns16550"; - reg = <0xef600300 0x00000008>; - virtual-reg = <0xef600300>; - clock-frequency = <0>; /* Filled in by U-Boot */ - current-speed = <0>; - interrupt-parent = <&UIC0>; - interrupts = <0x1 0x4>; - }; - - IIC0: i2c@ef600400 { - compatible = "ibm,iic-405ex", "ibm,iic"; - reg = <0xef600400 0x00000014>; - interrupt-parent = <&UIC0>; - interrupts = <0x2 0x4>; - #address-cells = <1>; - #size-cells = <0>; - - rtc@68 { - compatible = "dallas,ds1340"; - reg = <0x68>; - }; - }; - - IIC1: i2c@ef600500 { - compatible = "ibm,iic-405ex", "ibm,iic"; - reg = <0xef600500 0x00000014>; - interrupt-parent = <&UIC0>; - interrupts = <0x7 0x4>; - }; - - RGMII0: emac-rgmii@ef600b00 { - compatible = "ibm,rgmii-405ex", "ibm,rgmii"; - reg = <0xef600b00 0x00000104>; - has-mdio; - }; - - EMAC0: ethernet@ef600900 { - linux,network-index = <0x0>; - device_type = "network"; - compatible = "ibm,emac-405ex", "ibm,emac4sync"; - interrupt-parent = <&EMAC0>; - interrupts = <0x0 0x1>; - #interrupt-cells = <1>; - #address-cells = <0>; - #size-cells = <0>; - interrupt-map = ; - reg = <0xef600900 0x000000c4>; - local-mac-address = [000000000000]; /* Filled in by U-Boot */ - mal-device = <&MAL0>; - mal-tx-channel = <0>; - mal-rx-channel = <0>; - cell-index = <0>; - max-frame-size = <9000>; - rx-fifo-size = <4096>; - tx-fifo-size = <2048>; - rx-fifo-size-gige = <16384>; - tx-fifo-size-gige = <16384>; - phy-mode = "rgmii"; - phy-map = <0x00000000>; - rgmii-device = <&RGMII0>; - rgmii-channel = <0>; - has-inverted-stacr-oc; - has-new-stacr-staopc; - }; - - EMAC1: ethernet@ef600a00 { - linux,network-index = <0x1>; - device_type = "network"; - compatible = "ibm,emac-405ex", "ibm,emac4sync"; - interrupt-parent = <&EMAC1>; - interrupts = <0x0 0x1>; - #interrupt-cells = <1>; - #address-cells = <0>; - #size-cells = <0>; - interrupt-map = ; - reg = <0xef600a00 0x000000c4>; - local-mac-address = [000000000000]; /* Filled in by U-Boot */ - mal-device = <&MAL0>; - mal-tx-channel = <1>; - mal-rx-channel = <1>; - cell-index = <1>; - max-frame-size = <9000>; - rx-fifo-size = <4096>; - tx-fifo-size = <2048>; - rx-fifo-size-gige = <16384>; - tx-fifo-size-gige = <16384>; - phy-mode = "rgmii"; - phy-map = <0x00000000>; - rgmii-device = <&RGMII0>; - rgmii-channel = <1>; - has-inverted-stacr-oc; - has-new-stacr-staopc; - }; - - GPIO: gpio@ef600800 { - device_type = "gpio"; - compatible = "ibm,gpio-405ex", "ibm,ppc4xx-gpio"; - reg = <0xef600800 0x50>; - }; - }; - }; - chosen { - linux,stdout-path = "/plb/opb/serial@ef600200"; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/p1010rdb.dts b/trunk/arch/powerpc/boot/dts/p1010rdb.dts index b868d22984e9..d6c669c888e9 100644 --- a/trunk/arch/powerpc/boot/dts/p1010rdb.dts +++ b/trunk/arch/powerpc/boot/dts/p1010rdb.dts @@ -9,33 +9,230 @@ * option) any later version. */ -/include/ "fsl/p1010si-pre.dtsi" +/include/ "p1010si.dtsi" / { model = "fsl,P1010RDB"; compatible = "fsl,P1010RDB"; + aliases { + serial0 = &serial0; + serial1 = &serial1; + ethernet0 = &enet0; + ethernet1 = &enet1; + ethernet2 = &enet2; + pci0 = &pci0; + pci1 = &pci1; + can0 = &can0; + can1 = &can1; + }; + memory { device_type = "memory"; }; - board_ifc: ifc: ifc@ffe1e000 { + ifc@ffe1e000 { /* NOR, NAND Flashes and CPLD on board */ ranges = <0x0 0x0 0x0 0xee000000 0x02000000 0x1 0x0 0x0 0xff800000 0x00010000 0x3 0x0 0x0 0xffb00000 0x00000020>; - reg = <0x0 0xffe1e000 0 0x2000>; + + nor@0,0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "cfi-flash"; + reg = <0x0 0x0 0x2000000>; + bank-width = <2>; + device-width = <1>; + + partition@40000 { + /* 256KB for DTB Image */ + reg = <0x00040000 0x00040000>; + label = "NOR DTB Image"; + }; + + partition@80000 { + /* 7 MB for Linux Kernel Image */ + reg = <0x00080000 0x00700000>; + label = "NOR Linux Kernel Image"; + }; + + partition@800000 { + /* 20MB for JFFS2 based Root file System */ + reg = <0x00800000 0x01400000>; + label = "NOR JFFS2 Root File System"; + }; + + partition@1f00000 { + /* This location must not be altered */ + /* 512KB for u-boot Bootloader Image */ + /* 512KB for u-boot Environment Variables */ + reg = <0x01f00000 0x00100000>; + label = "NOR U-Boot Image"; + read-only; + }; + }; + + nand@1,0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,ifc-nand"; + reg = <0x1 0x0 0x10000>; + + partition@0 { + /* This location must not be altered */ + /* 1MB for u-boot Bootloader Image */ + reg = <0x0 0x00100000>; + label = "NAND U-Boot Image"; + read-only; + }; + + partition@100000 { + /* 1MB for DTB Image */ + reg = <0x00100000 0x00100000>; + label = "NAND DTB Image"; + }; + + partition@200000 { + /* 4MB for Linux Kernel Image */ + reg = <0x00200000 0x00400000>; + label = "NAND Linux Kernel Image"; + }; + + partition@600000 { + /* 4MB for Compressed Root file System Image */ + reg = <0x00600000 0x00400000>; + label = "NAND Compressed RFS Image"; + }; + + partition@a00000 { + /* 15MB for JFFS2 based Root file System */ + reg = <0x00a00000 0x00f00000>; + label = "NAND JFFS2 Root File System"; + }; + + partition@1900000 { + /* 7MB for User Area */ + reg = <0x01900000 0x00700000>; + label = "NAND User area"; + }; + }; + + cpld@3,0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,p1010rdb-cpld"; + reg = <0x3 0x0 0x0000020>; + bank-width = <1>; + device-width = <1>; + }; }; - board_soc: soc: soc@ffe00000 { - ranges = <0x0 0x0 0xffe00000 0x100000>; + soc@ffe00000 { + spi@7000 { + flash@0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "spansion,s25sl12801"; + reg = <0>; + spi-max-frequency = <50000000>; + + partition@0 { + /* 1MB for u-boot Bootloader Image */ + /* 1MB for Environment */ + reg = <0x0 0x00100000>; + label = "SPI Flash U-Boot Image"; + read-only; + }; + + partition@100000 { + /* 512KB for DTB Image */ + reg = <0x00100000 0x00080000>; + label = "SPI Flash DTB Image"; + }; + + partition@180000 { + /* 4MB for Linux Kernel Image */ + reg = <0x00180000 0x00400000>; + label = "SPI Flash Linux Kernel Image"; + }; + + partition@580000 { + /* 4MB for Compressed RFS Image */ + reg = <0x00580000 0x00400000>; + label = "SPI Flash Compressed RFSImage"; + }; + + partition@980000 { + /* 6.5MB for JFFS2 based RFS */ + reg = <0x00980000 0x00680000>; + label = "SPI Flash JFFS2 RFS"; + }; + }; + }; + + usb@22000 { + phy_type = "utmi"; + }; + + mdio@24000 { + phy0: ethernet-phy@0 { + interrupt-parent = <&mpic>; + interrupts = <3 1>; + reg = <0x1>; + }; + + phy1: ethernet-phy@1 { + interrupt-parent = <&mpic>; + interrupts = <2 1>; + reg = <0x0>; + }; + + phy2: ethernet-phy@2 { + interrupt-parent = <&mpic>; + interrupts = <2 1>; + reg = <0x2>; + }; + }; + + enet0: ethernet@b0000 { + phy-handle = <&phy0>; + phy-connection-type = "rgmii-id"; + }; + + enet1: ethernet@b1000 { + phy-handle = <&phy1>; + tbi-handle = <&tbi0>; + phy-connection-type = "sgmii"; + }; + + enet2: ethernet@b2000 { + phy-handle = <&phy2>; + tbi-handle = <&tbi1>; + phy-connection-type = "sgmii"; + }; }; pci0: pcie@ffe09000 { - reg = <0 0xffe09000 0 0x1000>; ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>; pcie@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; + interrupt-parent = <&mpic>; + interrupts = <16 2>; + interrupt-map-mask = <0xf800 0x0 0x0 0x7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0x0 0x0 0x1 &mpic 0x4 0x1 + 0000 0x0 0x0 0x2 &mpic 0x5 0x1 + 0000 0x0 0x0 0x3 &mpic 0x6 0x1 + 0000 0x0 0x0 0x4 &mpic 0x7 0x1 + >; + ranges = <0x2000000 0x0 0xa0000000 0x2000000 0x0 0xa0000000 0x0 0x20000000 @@ -47,10 +244,24 @@ }; pci1: pcie@ffe0a000 { - reg = <0 0xffe0a000 0 0x1000>; ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>; pcie@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; + interrupt-parent = <&mpic>; + interrupts = <16 2>; + interrupt-map-mask = <0xf800 0x0 0x0 0x7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0x0 0x0 0x1 &mpic 0x4 0x1 + 0000 0x0 0x0 0x2 &mpic 0x5 0x1 + 0000 0x0 0x0 0x3 &mpic 0x6 0x1 + 0000 0x0 0x0 0x4 &mpic 0x7 0x1 + >; ranges = <0x2000000 0x0 0x80000000 0x2000000 0x0 0x80000000 0x0 0x20000000 @@ -61,6 +272,3 @@ }; }; }; - -/include/ "p1010rdb.dtsi" -/include/ "fsl/p1010si-post.dtsi" diff --git a/trunk/arch/powerpc/boot/dts/p1010rdb.dtsi b/trunk/arch/powerpc/boot/dts/p1010rdb.dtsi deleted file mode 100644 index d4c4a7730285..000000000000 --- a/trunk/arch/powerpc/boot/dts/p1010rdb.dtsi +++ /dev/null @@ -1,234 +0,0 @@ -/* - * P1010 RDB Device Tree Source stub (no addresses or top-level ranges) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -&board_ifc { - nor@0,0 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "cfi-flash"; - reg = <0x0 0x0 0x2000000>; - bank-width = <2>; - device-width = <1>; - - partition@40000 { - /* 256KB for DTB Image */ - reg = <0x00040000 0x00040000>; - label = "NOR DTB Image"; - }; - - partition@80000 { - /* 7 MB for Linux Kernel Image */ - reg = <0x00080000 0x00700000>; - label = "NOR Linux Kernel Image"; - }; - - partition@800000 { - /* 20MB for JFFS2 based Root file System */ - reg = <0x00800000 0x01400000>; - label = "NOR JFFS2 Root File System"; - }; - - partition@1f00000 { - /* This location must not be altered */ - /* 512KB for u-boot Bootloader Image */ - /* 512KB for u-boot Environment Variables */ - reg = <0x01f00000 0x00100000>; - label = "NOR U-Boot Image"; - read-only; - }; - }; - - nand@1,0 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "fsl,ifc-nand"; - reg = <0x1 0x0 0x10000>; - - partition@0 { - /* This location must not be altered */ - /* 1MB for u-boot Bootloader Image */ - reg = <0x0 0x00100000>; - label = "NAND U-Boot Image"; - read-only; - }; - - partition@100000 { - /* 1MB for DTB Image */ - reg = <0x00100000 0x00100000>; - label = "NAND DTB Image"; - }; - - partition@200000 { - /* 4MB for Linux Kernel Image */ - reg = <0x00200000 0x00400000>; - label = "NAND Linux Kernel Image"; - }; - - partition@600000 { - /* 4MB for Compressed Root file System Image */ - reg = <0x00600000 0x00400000>; - label = "NAND Compressed RFS Image"; - }; - - partition@a00000 { - /* 15MB for JFFS2 based Root file System */ - reg = <0x00a00000 0x00f00000>; - label = "NAND JFFS2 Root File System"; - }; - - partition@1900000 { - /* 7MB for User Area */ - reg = <0x01900000 0x00700000>; - label = "NAND User area"; - }; - }; - - cpld@3,0 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "fsl,p1010rdb-cpld"; - reg = <0x3 0x0 0x0000020>; - bank-width = <1>; - device-width = <1>; - }; -}; - -&board_soc { - i2c@3000 { - rtc@68 { - compatible = "pericom,pt7c4338"; - reg = <0x68>; - }; - }; - - spi@7000 { - flash@0 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "spansion,s25sl12801"; - reg = <0>; - spi-max-frequency = <50000000>; - - partition@0 { - /* 1MB for u-boot Bootloader Image */ - /* 1MB for Environment */ - reg = <0x0 0x00100000>; - label = "SPI Flash U-Boot Image"; - read-only; - }; - - partition@100000 { - /* 512KB for DTB Image */ - reg = <0x00100000 0x00080000>; - label = "SPI Flash DTB Image"; - }; - - partition@180000 { - /* 4MB for Linux Kernel Image */ - reg = <0x00180000 0x00400000>; - label = "SPI Flash Linux Kernel Image"; - }; - - partition@580000 { - /* 4MB for Compressed RFS Image */ - reg = <0x00580000 0x00400000>; - label = "SPI Flash Compressed RFSImage"; - }; - - partition@980000 { - /* 6.5MB for JFFS2 based RFS */ - reg = <0x00980000 0x00680000>; - label = "SPI Flash JFFS2 RFS"; - }; - }; - }; - - usb@22000 { - phy_type = "utmi"; - dr_mode = "host"; - }; - - mdio@24000 { - phy0: ethernet-phy@0 { - interrupts = <3 1 0 0>; - reg = <0x1>; - }; - - phy1: ethernet-phy@1 { - interrupts = <2 1 0 0>; - reg = <0x0>; - }; - - phy2: ethernet-phy@2 { - interrupts = <2 1 0 0>; - reg = <0x2>; - }; - - tbi-phy@3 { - device-type = "tbi-phy"; - reg = <0x3>; - }; - }; - - mdio@25000 { - tbi0: tbi-phy@11 { - reg = <0x11>; - device_type = "tbi-phy"; - }; - }; - - mdio@26000 { - tbi1: tbi-phy@11 { - reg = <0x11>; - device_type = "tbi-phy"; - }; - }; - - enet0: ethernet@b0000 { - phy-handle = <&phy0>; - phy-connection-type = "rgmii-id"; - }; - - enet1: ethernet@b1000 { - phy-handle = <&phy1>; - tbi-handle = <&tbi0>; - phy-connection-type = "sgmii"; - }; - - enet2: ethernet@b2000 { - phy-handle = <&phy2>; - tbi-handle = <&tbi1>; - phy-connection-type = "sgmii"; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/p1010rdb_36b.dts b/trunk/arch/powerpc/boot/dts/p1010rdb_36b.dts deleted file mode 100644 index 64776f4a4651..000000000000 --- a/trunk/arch/powerpc/boot/dts/p1010rdb_36b.dts +++ /dev/null @@ -1,89 +0,0 @@ -/* - * P1010 RDB Device Tree Source (36-bit address map) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/include/ "fsl/p1010si-pre.dtsi" - -/ { - model = "fsl,P1010RDB"; - compatible = "fsl,P1010RDB"; - - memory { - device_type = "memory"; - }; - - board_ifc: ifc: ifc@fffe1e000 { - /* NOR, NAND Flashes and CPLD on board */ - ranges = <0x0 0x0 0xf 0xee000000 0x02000000 - 0x1 0x0 0xf 0xff800000 0x00010000 - 0x3 0x0 0xf 0xffb00000 0x00000020>; - reg = <0xf 0xffe1e000 0 0x2000>; - }; - - board_soc: soc: soc@fffe00000 { - ranges = <0x0 0xf 0xffe00000 0x100000>; - }; - - pci0: pcie@fffe09000 { - reg = <0xf 0xffe09000 0 0x1000>; - ranges = <0x2000000 0x0 0xc0000000 0xc 0x20000000 0x0 0x20000000 - 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>; - pcie@0 { - ranges = <0x2000000 0x0 0xc0000000 - 0x2000000 0x0 0xc0000000 - 0x0 0x20000000 - - 0x1000000 0x0 0x0 - 0x1000000 0x0 0x0 - 0x0 0x100000>; - }; - }; - - pci1: pcie@fffe0a000 { - reg = <0xf 0xffe0a000 0 0x1000>; - ranges = <0x2000000 0x0 0xc0000000 0xc 0x20000000 0x0 0x20000000 - 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>; - pcie@0 { - ranges = <0x2000000 0x0 0xc0000000 - 0x2000000 0x0 0xc0000000 - 0x0 0x20000000 - - 0x1000000 0x0 0x0 - 0x1000000 0x0 0x0 - 0x0 0x100000>; - }; - }; -}; - -/include/ "p1010rdb.dtsi" -/include/ "fsl/p1010si-post.dtsi" diff --git a/trunk/arch/powerpc/boot/dts/p1010si.dtsi b/trunk/arch/powerpc/boot/dts/p1010si.dtsi new file mode 100644 index 000000000000..cabe0a453ae6 --- /dev/null +++ b/trunk/arch/powerpc/boot/dts/p1010si.dtsi @@ -0,0 +1,374 @@ +/* + * P1010si Device Tree Source + * + * Copyright 2011 Freescale Semiconductor Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +/dts-v1/; +/ { + compatible = "fsl,P1010"; + #address-cells = <2>; + #size-cells = <2>; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + PowerPC,P1010@0 { + device_type = "cpu"; + reg = <0x0>; + next-level-cache = <&L2>; + }; + }; + + ifc@ffe1e000 { + #address-cells = <2>; + #size-cells = <1>; + compatible = "fsl,ifc", "simple-bus"; + reg = <0x0 0xffe1e000 0 0x2000>; + interrupts = <16 2 19 2>; + interrupt-parent = <&mpic>; + }; + + soc@ffe00000 { + #address-cells = <1>; + #size-cells = <1>; + device_type = "soc"; + compatible = "fsl,p1010-immr", "simple-bus"; + ranges = <0x0 0x0 0xffe00000 0x100000>; + bus-frequency = <0>; // Filled out by uboot. + + ecm-law@0 { + compatible = "fsl,ecm-law"; + reg = <0x0 0x1000>; + fsl,num-laws = <12>; + }; + + ecm@1000 { + compatible = "fsl,p1010-ecm", "fsl,ecm"; + reg = <0x1000 0x1000>; + interrupts = <16 2>; + interrupt-parent = <&mpic>; + }; + + memory-controller@2000 { + compatible = "fsl,p1010-memory-controller"; + reg = <0x2000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <16 2>; + }; + + i2c@3000 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <0>; + compatible = "fsl-i2c"; + reg = <0x3000 0x100>; + interrupts = <43 2>; + interrupt-parent = <&mpic>; + dfsrr; + }; + + i2c@3100 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <1>; + compatible = "fsl-i2c"; + reg = <0x3100 0x100>; + interrupts = <43 2>; + interrupt-parent = <&mpic>; + dfsrr; + }; + + serial0: serial@4500 { + cell-index = <0>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x4500 0x100>; + clock-frequency = <0>; + interrupts = <42 2>; + interrupt-parent = <&mpic>; + }; + + serial1: serial@4600 { + cell-index = <1>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x4600 0x100>; + clock-frequency = <0>; + interrupts = <42 2>; + interrupt-parent = <&mpic>; + }; + + spi@7000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,mpc8536-espi"; + reg = <0x7000 0x1000>; + interrupts = <59 0x2>; + interrupt-parent = <&mpic>; + fsl,espi-num-chipselects = <1>; + }; + + gpio: gpio-controller@f000 { + #gpio-cells = <2>; + compatible = "fsl,mpc8572-gpio"; + reg = <0xf000 0x100>; + interrupts = <47 0x2>; + interrupt-parent = <&mpic>; + gpio-controller; + }; + + sata@18000 { + compatible = "fsl,pq-sata-v2"; + reg = <0x18000 0x1000>; + cell-index = <1>; + interrupts = <74 0x2>; + interrupt-parent = <&mpic>; + }; + + sata@19000 { + compatible = "fsl,pq-sata-v2"; + reg = <0x19000 0x1000>; + cell-index = <2>; + interrupts = <41 0x2>; + interrupt-parent = <&mpic>; + }; + + can0: can@1c000 { + compatible = "fsl,p1010-flexcan"; + reg = <0x1c000 0x1000>; + interrupts = <48 0x2>; + interrupt-parent = <&mpic>; + }; + + can1: can@1d000 { + compatible = "fsl,p1010-flexcan"; + reg = <0x1d000 0x1000>; + interrupts = <61 0x2>; + interrupt-parent = <&mpic>; + }; + + L2: l2-cache-controller@20000 { + compatible = "fsl,p1010-l2-cache-controller", + "fsl,p1014-l2-cache-controller"; + reg = <0x20000 0x1000>; + cache-line-size = <32>; // 32 bytes + cache-size = <0x40000>; // L2,256K + interrupt-parent = <&mpic>; + interrupts = <16 2>; + }; + + dma@21300 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,p1010-dma", "fsl,eloplus-dma"; + reg = <0x21300 0x4>; + ranges = <0x0 0x21100 0x200>; + cell-index = <0>; + dma-channel@0 { + compatible = "fsl,p1010-dma-channel", "fsl,eloplus-dma-channel"; + reg = <0x0 0x80>; + cell-index = <0>; + interrupt-parent = <&mpic>; + interrupts = <20 2>; + }; + dma-channel@80 { + compatible = "fsl,p1010-dma-channel", "fsl,eloplus-dma-channel"; + reg = <0x80 0x80>; + cell-index = <1>; + interrupt-parent = <&mpic>; + interrupts = <21 2>; + }; + dma-channel@100 { + compatible = "fsl,p1010-dma-channel", "fsl,eloplus-dma-channel"; + reg = <0x100 0x80>; + cell-index = <2>; + interrupt-parent = <&mpic>; + interrupts = <22 2>; + }; + dma-channel@180 { + compatible = "fsl,p1010-dma-channel", "fsl,eloplus-dma-channel"; + reg = <0x180 0x80>; + cell-index = <3>; + interrupt-parent = <&mpic>; + interrupts = <23 2>; + }; + }; + + usb@22000 { + compatible = "fsl-usb2-dr"; + reg = <0x22000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + interrupt-parent = <&mpic>; + interrupts = <28 0x2>; + dr_mode = "host"; + }; + + mdio@24000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,etsec2-mdio"; + reg = <0x24000 0x1000 0xb0030 0x4>; + }; + + mdio@25000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,etsec2-tbi"; + reg = <0x25000 0x1000 0xb1030 0x4>; + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@26000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,etsec2-tbi"; + reg = <0x26000 0x1000 0xb1030 0x4>; + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + sdhci@2e000 { + compatible = "fsl,esdhc"; + reg = <0x2e000 0x1000>; + interrupts = <72 0x8>; + interrupt-parent = <&mpic>; + /* Filled in by U-Boot */ + clock-frequency = <0>; + fsl,sdhci-auto-cmd12; + }; + + enet0: ethernet@b0000 { + #address-cells = <1>; + #size-cells = <1>; + device_type = "network"; + model = "eTSEC"; + compatible = "fsl,etsec2"; + fsl,num_rx_queues = <0x8>; + fsl,num_tx_queues = <0x8>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupt-parent = <&mpic>; + + queue-group@0 { + #address-cells = <1>; + #size-cells = <1>; + reg = <0xb0000 0x1000>; + fsl,rx-bit-map = <0xff>; + fsl,tx-bit-map = <0xff>; + interrupts = <29 2 30 2 34 2>; + }; + + }; + + enet1: ethernet@b1000 { + #address-cells = <1>; + #size-cells = <1>; + device_type = "network"; + model = "eTSEC"; + compatible = "fsl,etsec2"; + fsl,num_rx_queues = <0x8>; + fsl,num_tx_queues = <0x8>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupt-parent = <&mpic>; + + queue-group@0 { + #address-cells = <1>; + #size-cells = <1>; + reg = <0xb1000 0x1000>; + fsl,rx-bit-map = <0xff>; + fsl,tx-bit-map = <0xff>; + interrupts = <35 2 36 2 40 2>; + }; + + }; + + enet2: ethernet@b2000 { + #address-cells = <1>; + #size-cells = <1>; + device_type = "network"; + model = "eTSEC"; + compatible = "fsl,etsec2"; + fsl,num_rx_queues = <0x8>; + fsl,num_tx_queues = <0x8>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupt-parent = <&mpic>; + + queue-group@0 { + #address-cells = <1>; + #size-cells = <1>; + reg = <0xb2000 0x1000>; + fsl,rx-bit-map = <0xff>; + fsl,tx-bit-map = <0xff>; + interrupts = <31 2 32 2 33 2>; + }; + + }; + + mpic: pic@40000 { + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <2>; + reg = <0x40000 0x40000>; + compatible = "chrp,open-pic"; + device_type = "open-pic"; + }; + + msi@41600 { + compatible = "fsl,p1010-msi", "fsl,mpic-msi"; + reg = <0x41600 0x80>; + msi-available-ranges = <0 0x100>; + interrupts = < + 0xe0 0 + 0xe1 0 + 0xe2 0 + 0xe3 0 + 0xe4 0 + 0xe5 0 + 0xe6 0 + 0xe7 0>; + interrupt-parent = <&mpic>; + }; + + global-utilities@e0000 { //global utilities block + compatible = "fsl,p1010-guts"; + reg = <0xe0000 0x1000>; + fsl,has-rstcr; + }; + }; + + pci0: pcie@ffe09000 { + compatible = "fsl,p1010-pcie", "fsl,qoriq-pcie-v2.3", "fsl,qoriq-pcie-v2.2"; + device_type = "pci"; + #size-cells = <2>; + #address-cells = <3>; + reg = <0 0xffe09000 0 0x1000>; + bus-range = <0 255>; + clock-frequency = <33333333>; + interrupt-parent = <&mpic>; + interrupts = <16 2>; + }; + + pci1: pcie@ffe0a000 { + compatible = "fsl,p1010-pcie", "fsl,qoriq-pcie-v2.3", "fsl,qoriq-pcie-v2.2"; + device_type = "pci"; + #size-cells = <2>; + #address-cells = <3>; + reg = <0 0xffe0a000 0 0x1000>; + bus-range = <0 255>; + clock-frequency = <33333333>; + interrupt-parent = <&mpic>; + interrupts = <16 2>; + }; +}; diff --git a/trunk/arch/powerpc/boot/dts/p1020rdb.dts b/trunk/arch/powerpc/boot/dts/p1020rdb.dts index 518bf99b1f50..d6a8ae458137 100644 --- a/trunk/arch/powerpc/boot/dts/p1020rdb.dts +++ b/trunk/arch/powerpc/boot/dts/p1020rdb.dts @@ -9,33 +9,267 @@ * option) any later version. */ -/include/ "fsl/p1020si-pre.dtsi" +/include/ "p1020si.dtsi" + / { model = "fsl,P1020RDB"; compatible = "fsl,P1020RDB"; + aliases { + serial0 = &serial0; + serial1 = &serial1; + ethernet0 = &enet0; + ethernet1 = &enet1; + ethernet2 = &enet2; + pci0 = &pci0; + pci1 = &pci1; + }; + memory { device_type = "memory"; }; - board_lbc: lbc: localbus@ffe05000 { - reg = <0 0xffe05000 0 0x1000>; + localbus@ffe05000 { /* NOR, NAND Flashes and Vitesse 5 port L2 switch */ ranges = <0x0 0x0 0x0 0xef000000 0x01000000 0x1 0x0 0x0 0xffa00000 0x00040000 0x2 0x0 0x0 0xffb00000 0x00020000>; + + nor@0,0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "cfi-flash"; + reg = <0x0 0x0 0x1000000>; + bank-width = <2>; + device-width = <1>; + + partition@0 { + /* This location must not be altered */ + /* 256KB for Vitesse 7385 Switch firmware */ + reg = <0x0 0x00040000>; + label = "NOR (RO) Vitesse-7385 Firmware"; + read-only; + }; + + partition@40000 { + /* 256KB for DTB Image */ + reg = <0x00040000 0x00040000>; + label = "NOR (RO) DTB Image"; + read-only; + }; + + partition@80000 { + /* 3.5 MB for Linux Kernel Image */ + reg = <0x00080000 0x00380000>; + label = "NOR (RO) Linux Kernel Image"; + read-only; + }; + + partition@400000 { + /* 11MB for JFFS2 based Root file System */ + reg = <0x00400000 0x00b00000>; + label = "NOR (RW) JFFS2 Root File System"; + }; + + partition@f00000 { + /* This location must not be altered */ + /* 512KB for u-boot Bootloader Image */ + /* 512KB for u-boot Environment Variables */ + reg = <0x00f00000 0x00100000>; + label = "NOR (RO) U-Boot Image"; + read-only; + }; + }; + + nand@1,0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,p1020-fcm-nand", + "fsl,elbc-fcm-nand"; + reg = <0x1 0x0 0x40000>; + + partition@0 { + /* This location must not be altered */ + /* 1MB for u-boot Bootloader Image */ + reg = <0x0 0x00100000>; + label = "NAND (RO) U-Boot Image"; + read-only; + }; + + partition@100000 { + /* 1MB for DTB Image */ + reg = <0x00100000 0x00100000>; + label = "NAND (RO) DTB Image"; + read-only; + }; + + partition@200000 { + /* 4MB for Linux Kernel Image */ + reg = <0x00200000 0x00400000>; + label = "NAND (RO) Linux Kernel Image"; + read-only; + }; + + partition@600000 { + /* 4MB for Compressed Root file System Image */ + reg = <0x00600000 0x00400000>; + label = "NAND (RO) Compressed RFS Image"; + read-only; + }; + + partition@a00000 { + /* 7MB for JFFS2 based Root file System */ + reg = <0x00a00000 0x00700000>; + label = "NAND (RW) JFFS2 Root File System"; + }; + + partition@1100000 { + /* 15MB for JFFS2 based Root file System */ + reg = <0x01100000 0x00f00000>; + label = "NAND (RW) Writable User area"; + }; + }; + + L2switch@2,0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "vitesse-7385"; + reg = <0x2 0x0 0x20000>; + }; + }; - board_soc: soc: soc@ffe00000 { - ranges = <0x0 0x0 0xffe00000 0x100000>; + soc@ffe00000 { + i2c@3000 { + rtc@68 { + compatible = "dallas,ds1339"; + reg = <0x68>; + }; + }; + + spi@7000 { + + fsl_m25p80@0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,espi-flash"; + reg = <0>; + linux,modalias = "fsl_m25p80"; + modal = "s25sl128b"; + spi-max-frequency = <50000000>; + mode = <0>; + + partition@0 { + /* 512KB for u-boot Bootloader Image */ + reg = <0x0 0x00080000>; + label = "SPI (RO) U-Boot Image"; + read-only; + }; + + partition@80000 { + /* 512KB for DTB Image */ + reg = <0x00080000 0x00080000>; + label = "SPI (RO) DTB Image"; + read-only; + }; + + partition@100000 { + /* 4MB for Linux Kernel Image */ + reg = <0x00100000 0x00400000>; + label = "SPI (RO) Linux Kernel Image"; + read-only; + }; + + partition@500000 { + /* 4MB for Compressed RFS Image */ + reg = <0x00500000 0x00400000>; + label = "SPI (RO) Compressed RFS Image"; + read-only; + }; + + partition@900000 { + /* 7MB for JFFS2 based RFS */ + reg = <0x00900000 0x00700000>; + label = "SPI (RW) JFFS2 RFS"; + }; + }; + }; + + mdio@24000 { + + phy0: ethernet-phy@0 { + interrupt-parent = <&mpic>; + interrupts = <3 1>; + reg = <0x0>; + }; + + phy1: ethernet-phy@1 { + interrupt-parent = <&mpic>; + interrupts = <2 1>; + reg = <0x1>; + }; + }; + + mdio@25000 { + + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + enet0: ethernet@b0000 { + fixed-link = <1 1 1000 0 0>; + phy-connection-type = "rgmii-id"; + + }; + + enet1: ethernet@b1000 { + phy-handle = <&phy0>; + tbi-handle = <&tbi0>; + phy-connection-type = "sgmii"; + + }; + + enet2: ethernet@b2000 { + phy-handle = <&phy1>; + phy-connection-type = "rgmii-id"; + + }; + + usb@22000 { + phy_type = "ulpi"; + }; + + /* USB2 is shared with localbus, so it must be disabled + by default. We can't put 'status = "disabled";' here + since U-Boot doesn't clear the status property when + it enables USB2. OTOH, U-Boot does create a new node + when there isn't any. So, just comment it out. + usb@23000 { + phy_type = "ulpi"; + }; + */ + }; pci0: pcie@ffe09000 { ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>; - reg = <0 0xffe09000 0 0x1000>; + interrupt-map-mask = <0xf800 0x0 0x0 0x7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0x0 0x0 0x1 &mpic 0x4 0x1 + 0000 0x0 0x0 0x2 &mpic 0x5 0x1 + 0000 0x0 0x0 0x3 &mpic 0x6 0x1 + 0000 0x0 0x0 0x4 &mpic 0x7 0x1 + >; pcie@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; ranges = <0x2000000 0x0 0xa0000000 0x2000000 0x0 0xa0000000 0x0 0x20000000 @@ -47,10 +281,21 @@ }; pci1: pcie@ffe0a000 { - reg = <0 0xffe0a000 0 0x1000>; ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>; + interrupt-map-mask = <0xf800 0x0 0x0 0x7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0x0 0x0 0x1 &mpic 0x0 0x1 + 0000 0x0 0x0 0x2 &mpic 0x1 0x1 + 0000 0x0 0x0 0x3 &mpic 0x2 0x1 + 0000 0x0 0x0 0x4 &mpic 0x3 0x1 + >; pcie@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; ranges = <0x2000000 0x0 0x80000000 0x2000000 0x0 0x80000000 0x0 0x20000000 @@ -61,6 +306,3 @@ }; }; }; - -/include/ "p1020rdb.dtsi" -/include/ "fsl/p1020si-post.dtsi" diff --git a/trunk/arch/powerpc/boot/dts/p1020rdb.dtsi b/trunk/arch/powerpc/boot/dts/p1020rdb.dtsi deleted file mode 100644 index b5bd86f4baf2..000000000000 --- a/trunk/arch/powerpc/boot/dts/p1020rdb.dtsi +++ /dev/null @@ -1,247 +0,0 @@ -/* - * P1020 RDB Device Tree Source stub (no addresses or top-level ranges) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -&board_lbc { - nor@0,0 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "cfi-flash"; - reg = <0x0 0x0 0x1000000>; - bank-width = <2>; - device-width = <1>; - - partition@0 { - /* This location must not be altered */ - /* 256KB for Vitesse 7385 Switch firmware */ - reg = <0x0 0x00040000>; - label = "NOR (RO) Vitesse-7385 Firmware"; - read-only; - }; - - partition@40000 { - /* 256KB for DTB Image */ - reg = <0x00040000 0x00040000>; - label = "NOR (RO) DTB Image"; - read-only; - }; - - partition@80000 { - /* 3.5 MB for Linux Kernel Image */ - reg = <0x00080000 0x00380000>; - label = "NOR (RO) Linux Kernel Image"; - read-only; - }; - - partition@400000 { - /* 11MB for JFFS2 based Root file System */ - reg = <0x00400000 0x00b00000>; - label = "NOR (RW) JFFS2 Root File System"; - }; - - partition@f00000 { - /* This location must not be altered */ - /* 512KB for u-boot Bootloader Image */ - /* 512KB for u-boot Environment Variables */ - reg = <0x00f00000 0x00100000>; - label = "NOR (RO) U-Boot Image"; - read-only; - }; - }; - - nand@1,0 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "fsl,p1020-fcm-nand", - "fsl,elbc-fcm-nand"; - reg = <0x1 0x0 0x40000>; - - partition@0 { - /* This location must not be altered */ - /* 1MB for u-boot Bootloader Image */ - reg = <0x0 0x00100000>; - label = "NAND (RO) U-Boot Image"; - read-only; - }; - - partition@100000 { - /* 1MB for DTB Image */ - reg = <0x00100000 0x00100000>; - label = "NAND (RO) DTB Image"; - read-only; - }; - - partition@200000 { - /* 4MB for Linux Kernel Image */ - reg = <0x00200000 0x00400000>; - label = "NAND (RO) Linux Kernel Image"; - read-only; - }; - - partition@600000 { - /* 4MB for Compressed Root file System Image */ - reg = <0x00600000 0x00400000>; - label = "NAND (RO) Compressed RFS Image"; - read-only; - }; - - partition@a00000 { - /* 7MB for JFFS2 based Root file System */ - reg = <0x00a00000 0x00700000>; - label = "NAND (RW) JFFS2 Root File System"; - }; - - partition@1100000 { - /* 15MB for JFFS2 based Root file System */ - reg = <0x01100000 0x00f00000>; - label = "NAND (RW) Writable User area"; - }; - }; - - L2switch@2,0 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "vitesse-7385"; - reg = <0x2 0x0 0x20000>; - }; -}; - -&board_soc { - i2c@3000 { - rtc@68 { - compatible = "dallas,ds1339"; - reg = <0x68>; - }; - }; - - spi@7000 { - flash@0 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "spansion,s25sl12801"; - reg = <0>; - spi-max-frequency = <40000000>; /* input clock */ - - partition@u-boot { - /* 512KB for u-boot Bootloader Image */ - reg = <0x0 0x00080000>; - label = "u-boot"; - read-only; - }; - - partition@dtb { - /* 512KB for DTB Image */ - reg = <0x00080000 0x00080000>; - label = "dtb"; - read-only; - }; - - partition@kernel { - /* 4MB for Linux Kernel Image */ - reg = <0x00100000 0x00400000>; - label = "kernel"; - read-only; - }; - - partition@fs { - /* 4MB for Compressed RFS Image */ - reg = <0x00500000 0x00400000>; - label = "file system"; - read-only; - }; - - partition@jffs-fs { - /* 7MB for JFFS2 based RFS */ - reg = <0x00900000 0x00700000>; - label = "file system jffs2"; - }; - }; - }; - - usb@22000 { - phy_type = "ulpi"; - }; - - /* USB2 is shared with localbus, so it must be disabled - by default. We can't put 'status = "disabled";' here - since U-Boot doesn't clear the status property when - it enables USB2. OTOH, U-Boot does create a new node - when there isn't any. So, just comment it out. - usb@23000 { - phy_type = "ulpi"; - }; - */ - - mdio@24000 { - phy0: ethernet-phy@0 { - interrupt-parent = <&mpic>; - interrupts = <3 1>; - reg = <0x0>; - }; - - phy1: ethernet-phy@1 { - interrupt-parent = <&mpic>; - interrupts = <2 1>; - reg = <0x1>; - }; - - tbi-phy@2 { - device_type = "tbi-phy"; - reg = <0x2>; - }; - }; - - mdio@25000 { - tbi0: tbi-phy@11 { - reg = <0x11>; - device_type = "tbi-phy"; - }; - }; - - enet0: ethernet@b0000 { - fixed-link = <1 1 1000 0 0>; - phy-connection-type = "rgmii-id"; - - }; - - enet1: ethernet@b1000 { - phy-handle = <&phy0>; - tbi-handle = <&tbi0>; - phy-connection-type = "sgmii"; - }; - - enet2: ethernet@b2000 { - phy-handle = <&phy1>; - phy-connection-type = "rgmii-id"; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/p1020rdb_36b.dts b/trunk/arch/powerpc/boot/dts/p1020rdb_36b.dts deleted file mode 100644 index bdbdb6097e57..000000000000 --- a/trunk/arch/powerpc/boot/dts/p1020rdb_36b.dts +++ /dev/null @@ -1,66 +0,0 @@ -/* - * P1020 RDB Device Tree Source (36-bit address map) - * - * Copyright 2009-2011 Freescale Semiconductor Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -/include/ "fsl/p1020si-pre.dtsi" -/ { - model = "fsl,P1020RDB"; - compatible = "fsl,P1020RDB"; - - memory { - device_type = "memory"; - }; - - board_lbc: lbc: localbus@fffe05000 { - reg = <0xf 0xffe05000 0 0x1000>; - - /* NOR, NAND Flashes and Vitesse 5 port L2 switch */ - ranges = <0x0 0x0 0xf 0xef000000 0x01000000 - 0x1 0x0 0xf 0xffa00000 0x00040000 - 0x2 0x0 0xf 0xffb00000 0x00020000>; - }; - - board_soc: soc: soc@fffe00000 { - ranges = <0x0 0xf 0xffe00000 0x100000>; - }; - - pci0: pcie@fffe09000 { - reg = <0xf 0xffe09000 0 0x1000>; - ranges = <0x2000000 0x0 0xc0000000 0xc 0x20000000 0x0 0x20000000 - 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>; - pcie@0 { - ranges = <0x2000000 0x0 0xc0000000 - 0x2000000 0x0 0xc0000000 - 0x0 0x20000000 - - 0x1000000 0x0 0x0 - 0x1000000 0x0 0x0 - 0x0 0x100000>; - }; - }; - - pci1: pcie@fffe0a000 { - reg = <0xf 0xffe0a000 0 0x1000>; - ranges = <0x2000000 0x0 0x80000000 0xc 0x00000000 0x0 0x20000000 - 0x1000000 0x0 0x00000000 0xf 0xffc00000 0x0 0x10000>; - pcie@0 { - ranges = <0x2000000 0x0 0x80000000 - 0x2000000 0x0 0x80000000 - 0x0 0x20000000 - - 0x1000000 0x0 0x0 - 0x1000000 0x0 0x0 - 0x0 0x100000>; - }; - }; -}; - -/include/ "p1020rdb.dtsi" -/include/ "fsl/p1020si-post.dtsi" diff --git a/trunk/arch/powerpc/boot/dts/p1020rdb_camp_core0.dts b/trunk/arch/powerpc/boot/dts/p1020rdb_camp_core0.dts index 41b4585c5da8..f0bf7f42f097 100644 --- a/trunk/arch/powerpc/boot/dts/p1020rdb_camp_core0.dts +++ b/trunk/arch/powerpc/boot/dts/p1020rdb_camp_core0.dts @@ -16,7 +16,7 @@ * option) any later version. */ -/include/ "p1020rdb.dts" +/include/ "p1020si.dtsi" / { model = "fsl,P1020RDB"; @@ -32,7 +32,7 @@ cpus { PowerPC,P1020@1 { - status = "disabled"; + status = "disabled"; }; }; @@ -45,19 +45,169 @@ }; soc@ffe00000 { + i2c@3000 { + rtc@68 { + compatible = "dallas,ds1339"; + reg = <0x68>; + }; + }; + serial1: serial@4600 { status = "disabled"; }; + spi@7000 { + fsl_m25p80@0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,espi-flash"; + reg = <0>; + linux,modalias = "fsl_m25p80"; + spi-max-frequency = <40000000>; + + partition@0 { + /* 512KB for u-boot Bootloader Image */ + reg = <0x0 0x00080000>; + label = "SPI (RO) U-Boot Image"; + read-only; + }; + + partition@80000 { + /* 512KB for DTB Image */ + reg = <0x00080000 0x00080000>; + label = "SPI (RO) DTB Image"; + read-only; + }; + + partition@100000 { + /* 4MB for Linux Kernel Image */ + reg = <0x00100000 0x00400000>; + label = "SPI (RO) Linux Kernel Image"; + read-only; + }; + + partition@500000 { + /* 4MB for Compressed RFS Image */ + reg = <0x00500000 0x00400000>; + label = "SPI (RO) Compressed RFS Image"; + read-only; + }; + + partition@900000 { + /* 7MB for JFFS2 based RFS */ + reg = <0x00900000 0x00700000>; + label = "SPI (RW) JFFS2 RFS"; + }; + }; + }; + + mdio@24000 { + phy0: ethernet-phy@0 { + interrupt-parent = <&mpic>; + interrupts = <3 1>; + reg = <0x0>; + }; + phy1: ethernet-phy@1 { + interrupt-parent = <&mpic>; + interrupts = <2 1>; + reg = <0x1>; + }; + }; + + mdio@25000 { + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + enet0: ethernet@b0000 { status = "disabled"; }; + enet1: ethernet@b1000 { + phy-handle = <&phy0>; + tbi-handle = <&tbi0>; + phy-connection-type = "sgmii"; + }; + + enet2: ethernet@b2000 { + phy-handle = <&phy1>; + phy-connection-type = "rgmii-id"; + }; + + usb@22000 { + phy_type = "ulpi"; + }; + + /* USB2 is shared with localbus, so it must be disabled + by default. We can't put 'status = "disabled";' here + since U-Boot doesn't clear the status property when + it enables USB2. OTOH, U-Boot does create a new node + when there isn't any. So, just comment it out. + usb@23000 { + phy_type = "ulpi"; + }; + */ + mpic: pic@40000 { protected-sources = < 42 29 30 34 /* serial1, enet0-queue-group0 */ 17 18 24 45 /* enet0-queue-group1, crypto */ >; }; + + }; + + pci0: pcie@ffe09000 { + ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000 + 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>; + interrupt-map-mask = <0xf800 0x0 0x0 0x7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0x0 0x0 0x1 &mpic 0x4 0x1 + 0000 0x0 0x0 0x2 &mpic 0x5 0x1 + 0000 0x0 0x0 0x3 &mpic 0x6 0x1 + 0000 0x0 0x0 0x4 &mpic 0x7 0x1 + >; + pcie@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; + ranges = <0x2000000 0x0 0xa0000000 + 0x2000000 0x0 0xa0000000 + 0x0 0x20000000 + + 0x1000000 0x0 0x0 + 0x1000000 0x0 0x0 + 0x0 0x100000>; + }; + }; + + pci1: pcie@ffe0a000 { + ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000 + 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>; + interrupt-map-mask = <0xf800 0x0 0x0 0x7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0x0 0x0 0x1 &mpic 0x0 0x1 + 0000 0x0 0x0 0x2 &mpic 0x1 0x1 + 0000 0x0 0x0 0x3 &mpic 0x2 0x1 + 0000 0x0 0x0 0x4 &mpic 0x3 0x1 + >; + pcie@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; + ranges = <0x2000000 0x0 0x80000000 + 0x2000000 0x0 0x80000000 + 0x0 0x20000000 + + 0x1000000 0x0 0x0 + 0x1000000 0x0 0x0 + 0x0 0x100000>; + }; }; }; diff --git a/trunk/arch/powerpc/boot/dts/p1020rdb_camp_core1.dts b/trunk/arch/powerpc/boot/dts/p1020rdb_camp_core1.dts index 517453821884..6ec02204a44e 100644 --- a/trunk/arch/powerpc/boot/dts/p1020rdb_camp_core1.dts +++ b/trunk/arch/powerpc/boot/dts/p1020rdb_camp_core1.dts @@ -15,7 +15,7 @@ * option) any later version. */ -/include/ "p1020rdb.dts" +/include/ "p1020si.dtsi" / { model = "fsl,P1020RDB"; @@ -28,7 +28,7 @@ cpus { PowerPC,P1020@0 { - status = "disabled"; + status = "disabled"; }; }; @@ -85,6 +85,12 @@ status = "disabled"; }; + enet0: ethernet@b0000 { + fixed-link = <1 1 1000 0 0>; + phy-connection-type = "rgmii-id"; + + }; + enet1: ethernet@b1000 { status = "disabled"; }; @@ -129,6 +135,7 @@ global-utilities@e0000 { //global utilities block status = "disabled"; }; + }; pci0: pcie@ffe09000 { diff --git a/trunk/arch/powerpc/boot/dts/p1020si.dtsi b/trunk/arch/powerpc/boot/dts/p1020si.dtsi new file mode 100644 index 000000000000..5c5acb66c3fc --- /dev/null +++ b/trunk/arch/powerpc/boot/dts/p1020si.dtsi @@ -0,0 +1,377 @@ +/* + * P1020si Device Tree Source + * + * Copyright 2011 Freescale Semiconductor Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +/dts-v1/; +/ { + compatible = "fsl,P1020"; + #address-cells = <2>; + #size-cells = <2>; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + PowerPC,P1020@0 { + device_type = "cpu"; + reg = <0x0>; + next-level-cache = <&L2>; + }; + + PowerPC,P1020@1 { + device_type = "cpu"; + reg = <0x1>; + next-level-cache = <&L2>; + }; + }; + + localbus@ffe05000 { + #address-cells = <2>; + #size-cells = <1>; + compatible = "fsl,p1020-elbc", "fsl,elbc", "simple-bus"; + reg = <0 0xffe05000 0 0x1000>; + interrupts = <19 2>; + interrupt-parent = <&mpic>; + }; + + soc@ffe00000 { + #address-cells = <1>; + #size-cells = <1>; + device_type = "soc"; + compatible = "fsl,p1020-immr", "simple-bus"; + ranges = <0x0 0x0 0xffe00000 0x100000>; + bus-frequency = <0>; // Filled out by uboot. + + ecm-law@0 { + compatible = "fsl,ecm-law"; + reg = <0x0 0x1000>; + fsl,num-laws = <12>; + }; + + ecm@1000 { + compatible = "fsl,p1020-ecm", "fsl,ecm"; + reg = <0x1000 0x1000>; + interrupts = <16 2>; + interrupt-parent = <&mpic>; + }; + + memory-controller@2000 { + compatible = "fsl,p1020-memory-controller"; + reg = <0x2000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <16 2>; + }; + + i2c@3000 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <0>; + compatible = "fsl-i2c"; + reg = <0x3000 0x100>; + interrupts = <43 2>; + interrupt-parent = <&mpic>; + dfsrr; + }; + + i2c@3100 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <1>; + compatible = "fsl-i2c"; + reg = <0x3100 0x100>; + interrupts = <43 2>; + interrupt-parent = <&mpic>; + dfsrr; + }; + + serial0: serial@4500 { + cell-index = <0>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x4500 0x100>; + clock-frequency = <0>; + interrupts = <42 2>; + interrupt-parent = <&mpic>; + }; + + serial1: serial@4600 { + cell-index = <1>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x4600 0x100>; + clock-frequency = <0>; + interrupts = <42 2>; + interrupt-parent = <&mpic>; + }; + + spi@7000 { + cell-index = <0>; + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,espi"; + reg = <0x7000 0x1000>; + interrupts = <59 0x2>; + interrupt-parent = <&mpic>; + mode = "cpu"; + }; + + gpio: gpio-controller@f000 { + #gpio-cells = <2>; + compatible = "fsl,mpc8572-gpio"; + reg = <0xf000 0x100>; + interrupts = <47 0x2>; + interrupt-parent = <&mpic>; + gpio-controller; + }; + + L2: l2-cache-controller@20000 { + compatible = "fsl,p1020-l2-cache-controller"; + reg = <0x20000 0x1000>; + cache-line-size = <32>; // 32 bytes + cache-size = <0x40000>; // L2,256K + interrupt-parent = <&mpic>; + interrupts = <16 2>; + }; + + dma@21300 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,eloplus-dma"; + reg = <0x21300 0x4>; + ranges = <0x0 0x21100 0x200>; + cell-index = <0>; + dma-channel@0 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x0 0x80>; + cell-index = <0>; + interrupt-parent = <&mpic>; + interrupts = <20 2>; + }; + dma-channel@80 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x80 0x80>; + cell-index = <1>; + interrupt-parent = <&mpic>; + interrupts = <21 2>; + }; + dma-channel@100 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x100 0x80>; + cell-index = <2>; + interrupt-parent = <&mpic>; + interrupts = <22 2>; + }; + dma-channel@180 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x180 0x80>; + cell-index = <3>; + interrupt-parent = <&mpic>; + interrupts = <23 2>; + }; + }; + + mdio@24000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,etsec2-mdio"; + reg = <0x24000 0x1000 0xb0030 0x4>; + + }; + + mdio@25000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,etsec2-tbi"; + reg = <0x25000 0x1000 0xb1030 0x4>; + + }; + + enet0: ethernet@b0000 { + #address-cells = <1>; + #size-cells = <1>; + device_type = "network"; + model = "eTSEC"; + compatible = "fsl,etsec2"; + fsl,num_rx_queues = <0x8>; + fsl,num_tx_queues = <0x8>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupt-parent = <&mpic>; + + queue-group@0 { + #address-cells = <1>; + #size-cells = <1>; + reg = <0xb0000 0x1000>; + interrupts = <29 2 30 2 34 2>; + }; + + queue-group@1 { + #address-cells = <1>; + #size-cells = <1>; + reg = <0xb4000 0x1000>; + interrupts = <17 2 18 2 24 2>; + }; + }; + + enet1: ethernet@b1000 { + #address-cells = <1>; + #size-cells = <1>; + device_type = "network"; + model = "eTSEC"; + compatible = "fsl,etsec2"; + fsl,num_rx_queues = <0x8>; + fsl,num_tx_queues = <0x8>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupt-parent = <&mpic>; + + queue-group@0 { + #address-cells = <1>; + #size-cells = <1>; + reg = <0xb1000 0x1000>; + interrupts = <35 2 36 2 40 2>; + }; + + queue-group@1 { + #address-cells = <1>; + #size-cells = <1>; + reg = <0xb5000 0x1000>; + interrupts = <51 2 52 2 67 2>; + }; + }; + + enet2: ethernet@b2000 { + #address-cells = <1>; + #size-cells = <1>; + device_type = "network"; + model = "eTSEC"; + compatible = "fsl,etsec2"; + fsl,num_rx_queues = <0x8>; + fsl,num_tx_queues = <0x8>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupt-parent = <&mpic>; + + queue-group@0 { + #address-cells = <1>; + #size-cells = <1>; + reg = <0xb2000 0x1000>; + interrupts = <31 2 32 2 33 2>; + }; + + queue-group@1 { + #address-cells = <1>; + #size-cells = <1>; + reg = <0xb6000 0x1000>; + interrupts = <25 2 26 2 27 2>; + }; + }; + + usb@22000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl-usb2-dr"; + reg = <0x22000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <28 0x2>; + }; + + /* USB2 is shared with localbus, so it must be disabled + by default. We can't put 'status = "disabled";' here + since U-Boot doesn't clear the status property when + it enables USB2. OTOH, U-Boot does create a new node + when there isn't any. So, just comment it out. + usb@23000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl-usb2-dr"; + reg = <0x23000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <46 0x2>; + phy_type = "ulpi"; + }; + */ + + sdhci@2e000 { + compatible = "fsl,p1020-esdhc", "fsl,esdhc"; + reg = <0x2e000 0x1000>; + interrupts = <72 0x2>; + interrupt-parent = <&mpic>; + /* Filled in by U-Boot */ + clock-frequency = <0>; + }; + + crypto@30000 { + compatible = "fsl,sec3.1", "fsl,sec3.0", "fsl,sec2.4", + "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0"; + reg = <0x30000 0x10000>; + interrupts = <45 2 58 2>; + interrupt-parent = <&mpic>; + fsl,num-channels = <4>; + fsl,channel-fifo-len = <24>; + fsl,exec-units-mask = <0xbfe>; + fsl,descriptor-types-mask = <0x3ab0ebf>; + }; + + mpic: pic@40000 { + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <2>; + reg = <0x40000 0x40000>; + compatible = "chrp,open-pic"; + device_type = "open-pic"; + }; + + msi@41600 { + compatible = "fsl,p1020-msi", "fsl,mpic-msi"; + reg = <0x41600 0x80>; + msi-available-ranges = <0 0x100>; + interrupts = < + 0xe0 0 + 0xe1 0 + 0xe2 0 + 0xe3 0 + 0xe4 0 + 0xe5 0 + 0xe6 0 + 0xe7 0>; + interrupt-parent = <&mpic>; + }; + + global-utilities@e0000 { //global utilities block + compatible = "fsl,p1020-guts","fsl,p2020-guts"; + reg = <0xe0000 0x1000>; + fsl,has-rstcr; + }; + }; + + pci0: pcie@ffe09000 { + compatible = "fsl,mpc8548-pcie"; + device_type = "pci"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + reg = <0 0xffe09000 0 0x1000>; + bus-range = <0 255>; + clock-frequency = <33333333>; + interrupt-parent = <&mpic>; + interrupts = <16 2>; + }; + + pci1: pcie@ffe0a000 { + compatible = "fsl,mpc8548-pcie"; + device_type = "pci"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + reg = <0 0xffe0a000 0 0x1000>; + bus-range = <0 255>; + clock-frequency = <33333333>; + interrupt-parent = <&mpic>; + interrupts = <16 2>; + }; +}; diff --git a/trunk/arch/powerpc/boot/dts/p1021mds.dts b/trunk/arch/powerpc/boot/dts/p1021mds.dts index d9540791e434..ad5b85269004 100644 --- a/trunk/arch/powerpc/boot/dts/p1021mds.dts +++ b/trunk/arch/powerpc/boot/dts/p1021mds.dts @@ -9,22 +9,53 @@ * option) any later version. */ -/include/ "fsl/p1021si-pre.dtsi" +/dts-v1/; / { model = "fsl,P1021"; compatible = "fsl,P1021MDS"; + #address-cells = <2>; + #size-cells = <2>; aliases { + serial0 = &serial0; + serial1 = &serial1; + ethernet0 = &enet0; + ethernet1 = &enet1; + ethernet2 = &enet2; ethernet3 = &enet3; ethernet4 = &enet4; + pci0 = &pci0; + pci1 = &pci1; + }; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + PowerPC,P1021@0 { + device_type = "cpu"; + reg = <0x0>; + next-level-cache = <&L2>; + }; + + PowerPC,P1021@1 { + device_type = "cpu"; + reg = <0x1>; + next-level-cache = <&L2>; + }; }; memory { device_type = "memory"; }; - lbc: localbus@ffe05000 { - reg = <0x0 0xffe05000 0x0 0x1000>; + localbus@ffe05000 { + #address-cells = <2>; + #size-cells = <1>; + compatible = "fsl,p1021-elbc", "fsl,elbc", "simple-bus"; + reg = <0 0xffe05000 0 0x1000>; + interrupts = <19 2>; + interrupt-parent = <&mpic>; /* NAND Flash, BCSR, PMC0/1*/ ranges = <0x0 0x0 0x0 0xfc000000 0x02000000 @@ -107,26 +138,99 @@ }; }; - soc: soc@ffe00000 { + soc@ffe00000 { + + #address-cells = <1>; + #size-cells = <1>; + device_type = "soc"; compatible = "fsl,p1021-immr", "simple-bus"; - ranges = <0x0 0x0 0xffe00000 0x100000>; + ranges = <0x0 0x0 0xffe00000 0x100000>; + bus-frequency = <0>; // Filled out by uboot. + + ecm-law@0 { + compatible = "fsl,ecm-law"; + reg = <0x0 0x1000>; + fsl,num-laws = <12>; + }; + + ecm@1000 { + compatible = "fsl,p1021-ecm", "fsl,ecm"; + reg = <0x1000 0x1000>; + interrupts = <16 2>; + interrupt-parent = <&mpic>; + }; + + memory-controller@2000 { + compatible = "fsl,p1021-memory-controller"; + reg = <0x2000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <16 2>; + }; i2c@3000 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <0>; + compatible = "fsl-i2c"; + reg = <0x3000 0x100>; + interrupts = <43 2>; + interrupt-parent = <&mpic>; + dfsrr; rtc@68 { compatible = "dallas,ds1374"; reg = <0x68>; }; }; - spi@7000 { + i2c@3100 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <1>; + compatible = "fsl-i2c"; + reg = <0x3100 0x100>; + interrupts = <43 2>; + interrupt-parent = <&mpic>; + dfsrr; + }; + + serial0: serial@4500 { + cell-index = <0>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x4500 0x100>; + clock-frequency = <0>; + interrupts = <42 2>; + interrupt-parent = <&mpic>; + }; - flash@0 { + serial1: serial@4600 { + cell-index = <1>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x4600 0x100>; + clock-frequency = <0>; + interrupts = <42 2>; + interrupt-parent = <&mpic>; + }; + + spi@7000 { + cell-index = <0>; + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,espi"; + reg = <0x7000 0x1000>; + interrupts = <59 0x2>; + interrupt-parent = <&mpic>; + espi,num-ss-bits = <4>; + mode = "cpu"; + + fsl_m25p80@0 { #address-cells = <1>; #size-cells = <1>; - compatible = "spansion,s25sl12801"; + compatible = "fsl,espi-flash"; reg = <0>; + linux,modalias = "fsl_m25p80"; spi-max-frequency = <40000000>; /* input clock */ - partition@u-boot { label = "u-boot-spi"; reg = <0x00000000 0x00100000>; @@ -149,49 +253,237 @@ }; }; + gpio: gpio-controller@f000 { + #gpio-cells = <2>; + compatible = "fsl,mpc8572-gpio"; + reg = <0xf000 0x100>; + interrupts = <47 0x2>; + interrupt-parent = <&mpic>; + gpio-controller; + }; + + L2: l2-cache-controller@20000 { + compatible = "fsl,p1021-l2-cache-controller"; + reg = <0x20000 0x1000>; + cache-line-size = <32>; // 32 bytes + cache-size = <0x40000>; // L2,256K + interrupt-parent = <&mpic>; + interrupts = <16 2>; + }; + + dma@21300 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,eloplus-dma"; + reg = <0x21300 0x4>; + ranges = <0x0 0x21100 0x200>; + cell-index = <0>; + dma-channel@0 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x0 0x80>; + cell-index = <0>; + interrupt-parent = <&mpic>; + interrupts = <20 2>; + }; + dma-channel@80 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x80 0x80>; + cell-index = <1>; + interrupt-parent = <&mpic>; + interrupts = <21 2>; + }; + dma-channel@100 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x100 0x80>; + cell-index = <2>; + interrupt-parent = <&mpic>; + interrupts = <22 2>; + }; + dma-channel@180 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x180 0x80>; + cell-index = <3>; + interrupt-parent = <&mpic>; + interrupts = <23 2>; + }; + }; + usb@22000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl-usb2-dr"; + reg = <0x22000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <28 0x2>; phy_type = "ulpi"; }; - mdio@24000 { + mdio@24000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,etsec2-mdio"; + reg = <0x24000 0x1000 0xb0030 0x4>; + phy0: ethernet-phy@0 { - interrupts = <1 1 0 0>; + interrupt-parent = <&mpic>; + interrupts = <1 1>; reg = <0x0>; }; phy1: ethernet-phy@1 { - interrupts = <2 1 0 0>; + interrupt-parent = <&mpic>; + interrupts = <2 1>; reg = <0x1>; }; phy4: ethernet-phy@4 { + interrupt-parent = <&mpic>; reg = <0x4>; }; - tbi-phy@5 { - device_type = "tbi-phy"; - reg = <0x5>; - }; }; mdio@25000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,etsec2-tbi"; + reg = <0x25000 0x1000 0xb1030 0x4>; tbi0: tbi-phy@11 { reg = <0x11>; device_type = "tbi-phy"; }; }; - ethernet@b0000 { + enet0: ethernet@B0000 { + #address-cells = <1>; + #size-cells = <1>; + cell-index = <0>; + device_type = "network"; + model = "eTSEC"; + compatible = "fsl,etsec2"; + fsl,num_rx_queues = <0x8>; + fsl,num_tx_queues = <0x8>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupt-parent = <&mpic>; phy-handle = <&phy0>; phy-connection-type = "rgmii-id"; + queue-group@0{ + #address-cells = <1>; + #size-cells = <1>; + reg = <0xB0000 0x1000>; + interrupts = <29 2 30 2 34 2>; + }; + queue-group@1{ + #address-cells = <1>; + #size-cells = <1>; + reg = <0xB4000 0x1000>; + interrupts = <17 2 18 2 24 2>; + }; }; - ethernet@b1000 { + enet1: ethernet@B1000 { + #address-cells = <1>; + #size-cells = <1>; + cell-index = <0>; + device_type = "network"; + model = "eTSEC"; + compatible = "fsl,etsec2"; + fsl,num_rx_queues = <0x8>; + fsl,num_tx_queues = <0x8>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupt-parent = <&mpic>; phy-handle = <&phy4>; tbi-handle = <&tbi0>; phy-connection-type = "sgmii"; + queue-group@0{ + #address-cells = <1>; + #size-cells = <1>; + reg = <0xB1000 0x1000>; + interrupts = <35 2 36 2 40 2>; + }; + queue-group@1{ + #address-cells = <1>; + #size-cells = <1>; + reg = <0xB5000 0x1000>; + interrupts = <51 2 52 2 67 2>; + }; }; - ethernet@b2000 { + enet2: ethernet@B2000 { + #address-cells = <1>; + #size-cells = <1>; + cell-index = <0>; + device_type = "network"; + model = "eTSEC"; + compatible = "fsl,etsec2"; + fsl,num_rx_queues = <0x8>; + fsl,num_tx_queues = <0x8>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupt-parent = <&mpic>; phy-handle = <&phy1>; phy-connection-type = "rgmii-id"; + queue-group@0{ + #address-cells = <1>; + #size-cells = <1>; + reg = <0xB2000 0x1000>; + interrupts = <31 2 32 2 33 2>; + }; + queue-group@1{ + #address-cells = <1>; + #size-cells = <1>; + reg = <0xB6000 0x1000>; + interrupts = <25 2 26 2 27 2>; + }; + }; + + sdhci@2e000 { + compatible = "fsl,p1021-esdhc", "fsl,esdhc"; + reg = <0x2e000 0x1000>; + interrupts = <72 0x2>; + interrupt-parent = <&mpic>; + /* Filled in by U-Boot */ + clock-frequency = <0>; + }; + + crypto@30000 { + compatible = "fsl,sec3.3", "fsl,sec3.1", + "fsl,sec3.0", "fsl,sec2.4", + "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0"; + reg = <0x30000 0x10000>; + interrupts = <45 2 58 2>; + interrupt-parent = <&mpic>; + fsl,num-channels = <4>; + fsl,channel-fifo-len = <24>; + fsl,exec-units-mask = <0x97c>; + fsl,descriptor-types-mask = <0x3a30abf>; + }; + + mpic: pic@40000 { + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <2>; + reg = <0x40000 0x40000>; + compatible = "chrp,open-pic"; + device_type = "open-pic"; + }; + + msi@41600 { + compatible = "fsl,p1021-msi", "fsl,mpic-msi"; + reg = <0x41600 0x80>; + msi-available-ranges = <0 0x100>; + interrupts = < + 0xe0 0 + 0xe1 0 + 0xe2 0 + 0xe3 0 + 0xe4 0 + 0xe5 0 + 0xe6 0 + 0xe7 0>; + interrupt-parent = <&mpic>; + }; + + global-utilities@e0000 { //global utilities block + compatible = "fsl,p1021-guts"; + reg = <0xe0000 0x1000>; + fsl,has-rstcr; }; par_io@e0100 { @@ -207,7 +499,8 @@ 0x1 0x13 0x1 0x0 0x1 0x0 /* QE_MUX_MDC */ 0x1 0x14 0x3 0x0 0x1 0x0 /* QE_MUX_MDIO */ 0x0 0x17 0x2 0x0 0x2 0x0 /* CLK12 */ - 0x0 0x18 0x2 0x0 0x1 0x0 /* CLK9 */ + 0x0 0x18 0x2 0x0 0x1 0x0 /* CLK9 +*/ 0x0 0x7 0x1 0x0 0x2 0x0 /* ENET1_TXD0_SER1_TXD0 */ 0x0 0x9 0x1 0x0 0x2 0x0 /* ENET1_TXD1_SER1_TXD1 */ 0x0 0xb 0x1 0x0 0x2 0x0 /* ENET1_TXD2_SER1_TXD2 */ @@ -242,10 +535,31 @@ }; pci0: pcie@ffe09000 { + compatible = "fsl,mpc8548-pcie"; + device_type = "pci"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; reg = <0 0xffe09000 0 0x1000>; + bus-range = <0 255>; ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>; + clock-frequency = <33333333>; + interrupt-parent = <&mpic>; + interrupts = <16 2>; + interrupt-map-mask = <0xf800 0 0 7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0 0 1 &mpic 4 1 + 0000 0 0 2 &mpic 5 1 + 0000 0 0 3 &mpic 6 1 + 0000 0 0 4 &mpic 7 1 + >; pcie@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; ranges = <0x2000000 0x0 0xa0000000 0x2000000 0x0 0xa0000000 0x0 0x20000000 @@ -257,10 +571,31 @@ }; pci1: pcie@ffe0a000 { + compatible = "fsl,mpc8548-pcie"; + device_type = "pci"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; reg = <0 0xffe0a000 0 0x1000>; + bus-range = <0 255>; ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000 0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x10000>; + clock-frequency = <33333333>; + interrupt-parent = <&mpic>; + interrupts = <16 2>; + interrupt-map-mask = <0xf800 0 0 7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0 0 1 &mpic 0 1 + 0000 0 0 2 &mpic 1 1 + 0000 0 0 3 &mpic 2 1 + 0000 0 0 4 &mpic 3 1 + >; pcie@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; ranges = <0x2000000 0x0 0xc0000000 0x2000000 0x0 0xc0000000 0x0 0x20000000 @@ -271,16 +606,36 @@ }; }; - qe: qe@ffe80000 { + qe@ffe80000 { + #address-cells = <1>; + #size-cells = <1>; + device_type = "qe"; + compatible = "fsl,qe"; ranges = <0x0 0x0 0xffe80000 0x40000>; reg = <0 0xffe80000 0 0x480>; brg-frequency = <0>; bus-frequency = <0>; + fsl,qe-num-riscs = <1>; + fsl,qe-num-snums = <28>; status = "disabled"; /* no firmware loaded */ + qeic: interrupt-controller@80 { + interrupt-controller; + compatible = "fsl,qe-ic"; + #address-cells = <0>; + #interrupt-cells = <1>; + reg = <0x80 0x80>; + interrupts = <63 2 60 2>; //high:47 low:44 + interrupt-parent = <&mpic>; + }; + enet3: ucc@2000 { device_type = "network"; compatible = "ucc_geth"; + cell-index = <1>; + reg = <0x2000 0x200>; + interrupts = <32>; + interrupt-parent = <&qeic>; local-mac-address = [ 00 00 00 00 00 00 ]; rx-clock-name = "clk12"; tx-clock-name = "clk9"; @@ -290,15 +645,20 @@ }; mdio@2120 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0x2120 0x18>; + compatible = "fsl,ucc-mdio"; + qe_phy0: ethernet-phy@0 { interrupt-parent = <&mpic>; - interrupts = <4 1 0 0>; + interrupts = <4 1>; reg = <0x0>; device_type = "ethernet-phy"; }; qe_phy1: ethernet-phy@03 { interrupt-parent = <&mpic>; - interrupts = <5 1 0 0>; + interrupts = <5 1>; reg = <0x3>; device_type = "ethernet-phy"; }; @@ -311,6 +671,10 @@ enet4: ucc@2400 { device_type = "network"; compatible = "ucc_geth"; + cell-index = <5>; + reg = <0x2400 0x200>; + interrupts = <40>; + interrupt-parent = <&qeic>; local-mac-address = [ 00 00 00 00 00 00 ]; rx-clock-name = "none"; tx-clock-name = "clk13"; @@ -318,7 +682,18 @@ phy-handle = <&qe_phy1>; phy-connection-type = "rmii"; }; + + muram@10000 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,qe-muram", "fsl,cpm-muram"; + ranges = <0x0 0x10000 0x6000>; + + data-only@0 { + compatible = "fsl,qe-muram-data", + "fsl,cpm-muram-data"; + reg = <0x0 0x6000>; + }; + }; }; }; - -/include/ "fsl/p1021si-post.dtsi" diff --git a/trunk/arch/powerpc/boot/dts/p1022ds.dts b/trunk/arch/powerpc/boot/dts/p1022ds.dts index ef95717db4bc..b9b8719a6204 100644 --- a/trunk/arch/powerpc/boot/dts/p1022ds.dts +++ b/trunk/arch/powerpc/boot/dts/p1022ds.dts @@ -8,36 +8,57 @@ * kind, whether express or implied. */ -/include/ "fsl/p1022si-pre.dtsi" +/dts-v1/; / { - model = "fsl,P1022DS"; + model = "fsl,P1022"; compatible = "fsl,P1022DS"; + #address-cells = <2>; + #size-cells = <2>; + interrupt-parent = <&mpic>; + + aliases { + ethernet0 = &enet0; + ethernet1 = &enet1; + serial0 = &serial0; + serial1 = &serial1; + pci0 = &pci0; + pci1 = &pci1; + pci2 = &pci2; + }; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + PowerPC,P1022@0 { + device_type = "cpu"; + reg = <0x0>; + next-level-cache = <&L2>; + }; + + PowerPC,P1022@1 { + device_type = "cpu"; + reg = <0x1>; + next-level-cache = <&L2>; + }; + }; memory { device_type = "memory"; }; - lbc: localbus@fffe05000 { - reg = <0xf 0xffe05000 0 0x1000>; + localbus@fffe05000 { + #address-cells = <2>; + #size-cells = <1>; + compatible = "fsl,p1022-elbc", "fsl,elbc", "simple-bus"; + reg = <0 0xffe05000 0 0x1000>; + interrupts = <19 2 0 0>; + ranges = <0x0 0x0 0xf 0xe8000000 0x08000000 0x1 0x0 0xf 0xe0000000 0x08000000 - 0x2 0x0 0xf 0xff800000 0x00040000 + 0x2 0x0 0x0 0xffa00000 0x00040000 0x3 0x0 0xf 0xffdf0000 0x00008000>; - /* - * This node is used to access the pixis via "indirect" mode, - * which is done by writing the pixis register index to chip - * select 0 and the value to/from chip select 1. Indirect - * mode is the only way to access the pixis when DIU video - * is enabled. Note that this assumes that the first column - * of the 'ranges' property above is the chip select number. - */ - board-control@0,0 { - compatible = "fsl,p1022ds-indirect-pixis"; - reg = <0x0 0x0 1 /* CS0 */ - 0x1 0x0 1>; /* CS1 */ - }; - nor@0,0 { #address-cells = <1>; #size-cells = <1>; @@ -140,10 +161,51 @@ }; }; - soc: soc@fffe00000 { + soc@fffe00000 { + #address-cells = <1>; + #size-cells = <1>; + device_type = "soc"; + compatible = "fsl,p1022-immr", "simple-bus"; ranges = <0x0 0xf 0xffe00000 0x100000>; + bus-frequency = <0>; // Filled out by uboot. + + ecm-law@0 { + compatible = "fsl,ecm-law"; + reg = <0x0 0x1000>; + fsl,num-laws = <12>; + }; + + ecm@1000 { + compatible = "fsl,p1022-ecm", "fsl,ecm"; + reg = <0x1000 0x1000>; + interrupts = <16 2 0 0>; + }; + + memory-controller@2000 { + compatible = "fsl,p1022-memory-controller"; + reg = <0x2000 0x1000>; + interrupts = <16 2 0 0>; + }; + + i2c@3000 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <0>; + compatible = "fsl-i2c"; + reg = <0x3000 0x100>; + interrupts = <43 2 0 0>; + dfsrr; + }; i2c@3100 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <1>; + compatible = "fsl-i2c"; + reg = <0x3100 0x100>; + interrupts = <43 2 0 0>; + dfsrr; + wm8776:codec@1a { compatible = "wlf,wm8776"; reg = <0x1a>; @@ -154,14 +216,41 @@ }; }; + serial0: serial@4500 { + cell-index = <0>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x4500 0x100>; + clock-frequency = <0>; + interrupts = <42 2 0 0>; + }; + + serial1: serial@4600 { + cell-index = <1>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x4600 0x100>; + clock-frequency = <0>; + interrupts = <42 2 0 0>; + }; + spi@7000 { - flash@0 { + cell-index = <0>; + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,espi"; + reg = <0x7000 0x1000>; + interrupts = <59 0x2 0 0>; + espi,num-ss-bits = <4>; + mode = "cpu"; + + fsl_m25p80@0 { #address-cells = <1>; #size-cells = <1>; - compatible = "spansion,s25sl12801"; + compatible = "fsl,espi-flash"; reg = <0>; + linux,modalias = "fsl_m25p80"; spi-max-frequency = <40000000>; /* input clock */ - partition@0 { label = "u-boot-spi"; reg = <0x00000000 0x00100000>; @@ -185,20 +274,115 @@ }; ssi@15000 { + compatible = "fsl,mpc8610-ssi"; + cell-index = <0>; + reg = <0x15000 0x100>; + interrupts = <75 2 0 0>; fsl,mode = "i2s-slave"; codec-handle = <&wm8776>; + fsl,playback-dma = <&dma00>; + fsl,capture-dma = <&dma01>; + fsl,fifo-depth = <15>; fsl,ssi-asynchronous; }; - usb@22000 { - phy_type = "ulpi"; + dma@c300 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,eloplus-dma"; + reg = <0xc300 0x4>; + ranges = <0x0 0xc100 0x200>; + cell-index = <1>; + dma00: dma-channel@0 { + compatible = "fsl,ssi-dma-channel"; + reg = <0x0 0x80>; + cell-index = <0>; + interrupts = <76 2 0 0>; + }; + dma01: dma-channel@80 { + compatible = "fsl,ssi-dma-channel"; + reg = <0x80 0x80>; + cell-index = <1>; + interrupts = <77 2 0 0>; + }; + dma-channel@100 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x100 0x80>; + cell-index = <2>; + interrupts = <78 2 0 0>; + }; + dma-channel@180 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x180 0x80>; + cell-index = <3>; + interrupts = <79 2 0 0>; + }; }; - usb@23000 { - status = "disabled"; + gpio: gpio-controller@f000 { + #gpio-cells = <2>; + compatible = "fsl,mpc8572-gpio"; + reg = <0xf000 0x100>; + interrupts = <47 0x2 0 0>; + gpio-controller; + }; + + L2: l2-cache-controller@20000 { + compatible = "fsl,p1022-l2-cache-controller"; + reg = <0x20000 0x1000>; + cache-line-size = <32>; // 32 bytes + cache-size = <0x40000>; // L2, 256K + interrupts = <16 2 0 0>; + }; + + dma@21300 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,eloplus-dma"; + reg = <0x21300 0x4>; + ranges = <0x0 0x21100 0x200>; + cell-index = <0>; + dma-channel@0 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x0 0x80>; + cell-index = <0>; + interrupts = <20 2 0 0>; + }; + dma-channel@80 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x80 0x80>; + cell-index = <1>; + interrupts = <21 2 0 0>; + }; + dma-channel@100 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x100 0x80>; + cell-index = <2>; + interrupts = <22 2 0 0>; + }; + dma-channel@180 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x180 0x80>; + cell-index = <3>; + interrupts = <23 2 0 0>; + }; + }; + + usb@22000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl-usb2-dr"; + reg = <0x22000 0x1000>; + interrupts = <28 0x2 0 0>; + phy_type = "ulpi"; }; mdio@24000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,etsec2-mdio"; + reg = <0x24000 0x1000 0xb0030 0x4>; + phy0: ethernet-phy@0 { interrupts = <3 1 0 0>; reg = <0x1>; @@ -207,28 +391,189 @@ interrupts = <9 1 0 0>; reg = <0x2>; }; - tbi-phy@2 { - device_type = "tbi-phy"; - reg = <0x2>; - }; }; - ethernet@b0000 { + mdio@25000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,etsec2-mdio"; + reg = <0x25000 0x1000 0xb1030 0x4>; + }; + + enet0: ethernet@B0000 { + #address-cells = <1>; + #size-cells = <1>; + cell-index = <0>; + device_type = "network"; + model = "eTSEC"; + compatible = "fsl,etsec2"; + fsl,num_rx_queues = <0x8>; + fsl,num_tx_queues = <0x8>; + fsl,magic-packet; + fsl,wake-on-filer; + local-mac-address = [ 00 00 00 00 00 00 ]; phy-handle = <&phy0>; phy-connection-type = "rgmii-id"; + queue-group@0{ + #address-cells = <1>; + #size-cells = <1>; + reg = <0xB0000 0x1000>; + interrupts = <29 2 0 0 30 2 0 0 34 2 0 0>; + }; + queue-group@1{ + #address-cells = <1>; + #size-cells = <1>; + reg = <0xB4000 0x1000>; + interrupts = <17 2 0 0 18 2 0 0 24 2 0 0>; + }; }; - ethernet@b1000 { + enet1: ethernet@B1000 { + #address-cells = <1>; + #size-cells = <1>; + cell-index = <0>; + device_type = "network"; + model = "eTSEC"; + compatible = "fsl,etsec2"; + fsl,num_rx_queues = <0x8>; + fsl,num_tx_queues = <0x8>; + local-mac-address = [ 00 00 00 00 00 00 ]; phy-handle = <&phy1>; phy-connection-type = "rgmii-id"; + queue-group@0{ + #address-cells = <1>; + #size-cells = <1>; + reg = <0xB1000 0x1000>; + interrupts = <35 2 0 0 36 2 0 0 40 2 0 0>; + }; + queue-group@1{ + #address-cells = <1>; + #size-cells = <1>; + reg = <0xB5000 0x1000>; + interrupts = <51 2 0 0 52 2 0 0 67 2 0 0>; + }; + }; + + sdhci@2e000 { + compatible = "fsl,p1022-esdhc", "fsl,esdhc"; + reg = <0x2e000 0x1000>; + interrupts = <72 0x2 0 0>; + fsl,sdhci-auto-cmd12; + /* Filled in by U-Boot */ + clock-frequency = <0>; + }; + + crypto@30000 { + compatible = "fsl,sec3.3", "fsl,sec3.1", "fsl,sec3.0", + "fsl,sec2.4", "fsl,sec2.2", "fsl,sec2.1", + "fsl,sec2.0"; + reg = <0x30000 0x10000>; + interrupts = <45 2 0 0 58 2 0 0>; + fsl,num-channels = <4>; + fsl,channel-fifo-len = <24>; + fsl,exec-units-mask = <0x97c>; + fsl,descriptor-types-mask = <0x3a30abf>; + }; + + sata@18000 { + compatible = "fsl,p1022-sata", "fsl,pq-sata-v2"; + reg = <0x18000 0x1000>; + cell-index = <1>; + interrupts = <74 0x2 0 0>; + }; + + sata@19000 { + compatible = "fsl,p1022-sata", "fsl,pq-sata-v2"; + reg = <0x19000 0x1000>; + cell-index = <2>; + interrupts = <41 0x2 0 0>; + }; + + power@e0070{ + compatible = "fsl,mpc8536-pmc", "fsl,mpc8548-pmc"; + reg = <0xe0070 0x20>; + }; + + display@10000 { + compatible = "fsl,diu", "fsl,p1022-diu"; + reg = <0x10000 1000>; + interrupts = <64 2 0 0>; + }; + + timer@41100 { + compatible = "fsl,mpic-global-timer"; + reg = <0x41100 0x100 0x41300 4>; + interrupts = <0 0 3 0 + 1 0 3 0 + 2 0 3 0 + 3 0 3 0>; + }; + + timer@42100 { + compatible = "fsl,mpic-global-timer"; + reg = <0x42100 0x100 0x42300 4>; + interrupts = <4 0 3 0 + 5 0 3 0 + 6 0 3 0 + 7 0 3 0>; + }; + + mpic: pic@40000 { + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <4>; + reg = <0x40000 0x40000>; + compatible = "fsl,mpic"; + device_type = "open-pic"; + }; + + msi@41600 { + compatible = "fsl,p1022-msi", "fsl,mpic-msi"; + reg = <0x41600 0x80>; + msi-available-ranges = <0 0x100>; + interrupts = < + 0xe0 0 0 0 + 0xe1 0 0 0 + 0xe2 0 0 0 + 0xe3 0 0 0 + 0xe4 0 0 0 + 0xe5 0 0 0 + 0xe6 0 0 0 + 0xe7 0 0 0>; + }; + + global-utilities@e0000 { //global utilities block + compatible = "fsl,p1022-guts"; + reg = <0xe0000 0x1000>; + fsl,has-rstcr; }; }; pci0: pcie@fffe09000 { + compatible = "fsl,p1022-pcie"; + device_type = "pci"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; reg = <0xf 0xffe09000 0 0x1000>; - ranges = <0x2000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000 + bus-range = <0 255>; + ranges = <0x2000000 0x0 0xa0000000 0xc 0x20000000 0x0 0x20000000 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>; + clock-frequency = <33333333>; + interrupts = <16 2 0 0>; + interrupt-map-mask = <0xf800 0 0 7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0 0 1 &mpic 4 1 + 0000 0 0 2 &mpic 5 1 + 0000 0 0 3 &mpic 6 1 + 0000 0 0 4 &mpic 7 1 + >; pcie@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; ranges = <0x2000000 0x0 0xe0000000 0x2000000 0x0 0xe0000000 0x0 0x20000000 @@ -240,11 +585,30 @@ }; pci1: pcie@fffe0a000 { + compatible = "fsl,p1022-pcie"; + device_type = "pci"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; reg = <0xf 0xffe0a000 0 0x1000>; - ranges = <0x2000000 0x0 0xe0000000 0xc 0x40000000 0x0 0x20000000 + bus-range = <0 255>; + ranges = <0x2000000 0x0 0xc0000000 0xc 0x40000000 0x0 0x20000000 0x1000000 0x0 0x00000000 0xf 0xffc20000 0x0 0x10000>; + clock-frequency = <33333333>; + interrupts = <16 2 0 0>; + interrupt-map-mask = <0xf800 0 0 7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0 0 1 &mpic 0 1 + 0000 0 0 2 &mpic 1 1 + 0000 0 0 3 &mpic 2 1 + 0000 0 0 4 &mpic 3 1 + >; pcie@0 { reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; ranges = <0x2000000 0x0 0xe0000000 0x2000000 0x0 0xe0000000 0x0 0x20000000 @@ -255,11 +619,32 @@ }; }; + pci2: pcie@fffe0b000 { + compatible = "fsl,p1022-pcie"; + device_type = "pci"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; reg = <0xf 0xffe0b000 0 0x1000>; - ranges = <0x2000000 0x0 0xe0000000 0xc 0x00000000 0x0 0x20000000 + bus-range = <0 255>; + ranges = <0x2000000 0x0 0x80000000 0xc 0x00000000 0x0 0x20000000 0x1000000 0x0 0x00000000 0xf 0xffc00000 0x0 0x10000>; + clock-frequency = <33333333>; + interrupts = <16 2 0 0>; + interrupt-map-mask = <0xf800 0 0 7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0 0 1 &mpic 8 1 + 0000 0 0 2 &mpic 9 1 + 0000 0 0 3 &mpic 10 1 + 0000 0 0 4 &mpic 11 1 + >; pcie@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; ranges = <0x2000000 0x0 0xe0000000 0x2000000 0x0 0xe0000000 0x0 0x20000000 @@ -270,5 +655,3 @@ }; }; }; - -/include/ "fsl/p1022si-post.dtsi" diff --git a/trunk/arch/powerpc/boot/dts/p1023rds.dts b/trunk/arch/powerpc/boot/dts/p1023rds.dts index beb6cb12e59d..d3b478242ea9 100644 --- a/trunk/arch/powerpc/boot/dts/p1023rds.dts +++ b/trunk/arch/powerpc/boot/dts/p1023rds.dts @@ -34,30 +34,137 @@ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -/include/ "fsl/p1023si-pre.dtsi" +/dts-v1/; / { model = "fsl,P1023"; compatible = "fsl,P1023RDS"; #address-cells = <2>; #size-cells = <2>; - interrupt-parent = <&mpic>; + + aliases { + serial0 = &serial0; + serial1 = &serial1; + pci0 = &pci0; + pci1 = &pci1; + pci2 = &pci2; + + crypto = &crypto; + sec_jr0 = &sec_jr0; + sec_jr1 = &sec_jr1; + sec_jr2 = &sec_jr2; + sec_jr3 = &sec_jr3; + rtic_a = &rtic_a; + rtic_b = &rtic_b; + rtic_c = &rtic_c; + rtic_d = &rtic_d; + }; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu0: PowerPC,P1023@0 { + device_type = "cpu"; + reg = <0x0>; + next-level-cache = <&L2>; + }; + + cpu1: PowerPC,P1023@1 { + device_type = "cpu"; + reg = <0x1>; + next-level-cache = <&L2>; + }; + }; memory { device_type = "memory"; }; - soc: soc@ff600000 { + soc@ff600000 { + #address-cells = <1>; + #size-cells = <1>; + device_type = "soc"; + compatible = "fsl,p1023-immr", "simple-bus"; ranges = <0x0 0x0 0xff600000 0x200000>; + bus-frequency = <0>; // Filled out by uboot. + + ecm-law@0 { + compatible = "fsl,ecm-law"; + reg = <0x0 0x1000>; + fsl,num-laws = <12>; + }; + + ecm@1000 { + compatible = "fsl,p1023-ecm", "fsl,ecm"; + reg = <0x1000 0x1000>; + interrupts = <16 2>; + interrupt-parent = <&mpic>; + }; + + memory-controller@2000 { + compatible = "fsl,p1023-memory-controller"; + reg = <0x2000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <16 2>; + }; i2c@3000 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <0>; + compatible = "fsl-i2c"; + reg = <0x3000 0x100>; + interrupts = <43 2>; + interrupt-parent = <&mpic>; + dfsrr; rtc@68 { compatible = "dallas,ds1374"; reg = <0x68>; }; }; + i2c@3100 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <1>; + compatible = "fsl-i2c"; + reg = <0x3100 0x100>; + interrupts = <43 2>; + interrupt-parent = <&mpic>; + dfsrr; + }; + + serial0: serial@4500 { + cell-index = <0>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x4500 0x100>; + clock-frequency = <0>; + interrupts = <42 2>; + interrupt-parent = <&mpic>; + }; + + serial1: serial@4600 { + cell-index = <1>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x4600 0x100>; + clock-frequency = <0>; + interrupts = <42 2>; + interrupt-parent = <&mpic>; + }; + spi@7000 { + cell-index = <0>; + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,p1023-espi", "fsl,mpc8536-espi"; + reg = <0x7000 0x1000>; + interrupts = <59 0x2>; + interrupt-parent = <&mpic>; + fsl,espi-num-chipselects = <4>; + fsl_dataflash@0 { #address-cells = <1>; #size-cells = <1>; @@ -79,14 +186,197 @@ }; }; + gpio: gpio-controller@f000 { + #gpio-cells = <2>; + compatible = "fsl,qoriq-gpio"; + reg = <0xf000 0x100>; + interrupts = <47 0x2>; + interrupt-parent = <&mpic>; + gpio-controller; + }; + + L2: l2-cache-controller@20000 { + compatible = "fsl,p1023-l2-cache-controller"; + reg = <0x20000 0x1000>; + cache-line-size = <32>; // 32 bytes + cache-size = <0x40000>; // L2,256K + interrupt-parent = <&mpic>; + interrupts = <16 2>; + }; + + dma@21300 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,eloplus-dma"; + reg = <0x21300 0x4>; + ranges = <0x0 0x21100 0x200>; + cell-index = <0>; + dma-channel@0 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x0 0x80>; + cell-index = <0>; + interrupt-parent = <&mpic>; + interrupts = <20 2>; + }; + dma-channel@80 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x80 0x80>; + cell-index = <1>; + interrupt-parent = <&mpic>; + interrupts = <21 2>; + }; + dma-channel@100 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x100 0x80>; + cell-index = <2>; + interrupt-parent = <&mpic>; + interrupts = <22 2>; + }; + dma-channel@180 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x180 0x80>; + cell-index = <3>; + interrupt-parent = <&mpic>; + interrupts = <23 2>; + }; + }; + usb@22000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl-usb2-dr"; + reg = <0x22000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <28 0x2>; dr_mode = "host"; phy_type = "ulpi"; }; + + crypto: crypto@300000 { + compatible = "fsl,sec-v4.2", "fsl,sec-v4.0"; + #address-cells = <1>; + #size-cells = <1>; + reg = <0x30000 0x10000>; + ranges = <0 0x30000 0x10000>; + interrupt-parent = <&mpic>; + interrupts = <58 2>; + + sec_jr0: jr@1000 { + compatible = "fsl,sec-v4.2-job-ring", + "fsl,sec-v4.0-job-ring"; + reg = <0x1000 0x1000>; + interrupts = <45 2>; + }; + + sec_jr1: jr@2000 { + compatible = "fsl,sec-v4.2-job-ring", + "fsl,sec-v4.0-job-ring"; + reg = <0x2000 0x1000>; + interrupts = <45 2>; + }; + + sec_jr2: jr@3000 { + compatible = "fsl,sec-v4.2-job-ring", + "fsl,sec-v4.0-job-ring"; + reg = <0x3000 0x1000>; + interrupts = <57 2>; + }; + + sec_jr3: jr@4000 { + compatible = "fsl,sec-v4.2-job-ring", + "fsl,sec-v4.0-job-ring"; + reg = <0x4000 0x1000>; + interrupts = <57 2>; + }; + + rtic@6000 { + compatible = "fsl,sec-v4.2-rtic", + "fsl,sec-v4.0-rtic"; + #address-cells = <1>; + #size-cells = <1>; + reg = <0x6000 0x100>; + ranges = <0x0 0x6100 0xe00>; + + rtic_a: rtic-a@0 { + compatible = "fsl,sec-v4.2-rtic-memory", + "fsl,sec-v4.0-rtic-memory"; + reg = <0x00 0x20 0x100 0x80>; + }; + + rtic_b: rtic-b@20 { + compatible = "fsl,sec-v4.2-rtic-memory", + "fsl,sec-v4.0-rtic-memory"; + reg = <0x20 0x20 0x200 0x80>; + }; + + rtic_c: rtic-c@40 { + compatible = "fsl,sec-v4.2-rtic-memory", + "fsl,sec-v4.0-rtic-memory"; + reg = <0x40 0x20 0x300 0x80>; + }; + + rtic_d: rtic-d@60 { + compatible = "fsl,sec-v4.2-rtic-memory", + "fsl,sec-v4.0-rtic-memory"; + reg = <0x60 0x20 0x500 0x80>; + }; + }; + }; + + power@e0070{ + compatible = "fsl,mpc8536-pmc", "fsl,mpc8548-pmc", + "fsl,p1022-pmc"; + reg = <0xe0070 0x20>; + etsec1_clk: soc-clk@B0{ + fsl,pmcdr-mask = <0x00000080>; + }; + etsec2_clk: soc-clk@B1{ + fsl,pmcdr-mask = <0x00000040>; + }; + etsec3_clk: soc-clk@B2{ + fsl,pmcdr-mask = <0x00000020>; + }; + }; + + mpic: pic@40000 { + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <2>; + reg = <0x40000 0x40000>; + compatible = "chrp,open-pic"; + device_type = "open-pic"; + }; + + msi@41600 { + compatible = "fsl,p1023-msi", "fsl,mpic-msi"; + reg = <0x41600 0x80>; + msi-available-ranges = <0 0x100>; + interrupts = < + 0xe0 0 + 0xe1 0 + 0xe2 0 + 0xe3 0 + 0xe4 0 + 0xe5 0 + 0xe6 0 + 0xe7 0>; + interrupt-parent = <&mpic>; + }; + + global-utilities@e0000 { //global utilities block + compatible = "fsl,p1023-guts"; + reg = <0xe0000 0x1000>; + fsl,has-rstcr; + }; }; - lbc: localbus@ff605000 { + localbus@ff605000 { + #address-cells = <2>; + #size-cells = <1>; + compatible = "fsl,p1023-elbc", "fsl,elbc", "simple-bus"; reg = <0 0xff605000 0 0x1000>; + interrupts = <19 2>; + interrupt-parent = <&mpic>; /* NOR Flash, BCSR */ ranges = <0x0 0x0 0x0 0xee000000 0x02000000 @@ -138,18 +428,34 @@ }; pci0: pcie@ff60a000 { + compatible = "fsl,p1023-pcie", "fsl,qoriq-pcie-v2.2"; + cell-index = <1>; + device_type = "pci"; + #size-cells = <2>; + #address-cells = <3>; reg = <0 0xff60a000 0 0x1000>; + bus-range = <0 255>; ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000 0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x10000>; + clock-frequency = <33333333>; + interrupt-parent = <&mpic>; + interrupts = <16 2>; pcie@0 { - /* IRQ[0:3] are pulled up on board, set to active-low */ + reg = <0x0 0x0 0x0 0x0 0x0>; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; + interrupt-parent = <&mpic>; + interrupts = <16 2>; interrupt-map-mask = <0xf800 0 0 7>; + /* IRQ[0:3] are pulled up on board, set to active-low */ interrupt-map = < /* IDSEL 0x0 */ - 0000 0 0 1 &mpic 0 1 0 0 - 0000 0 0 2 &mpic 1 1 0 0 - 0000 0 0 3 &mpic 2 1 0 0 - 0000 0 0 4 &mpic 3 1 0 0 + 0000 0 0 1 &mpic 0 1 + 0000 0 0 2 &mpic 1 1 + 0000 0 0 3 &mpic 2 1 + 0000 0 0 4 &mpic 3 1 >; ranges = <0x2000000 0x0 0xc0000000 0x2000000 0x0 0xc0000000 @@ -161,22 +467,38 @@ }; }; - board_pci1: pci1: pcie@ff609000 { + pci1: pcie@ff609000 { + compatible = "fsl,p1023-pcie", "fsl,qoriq-pcie-v2.2"; + cell-index = <2>; + device_type = "pci"; + #size-cells = <2>; + #address-cells = <3>; reg = <0 0xff609000 0 0x1000>; + bus-range = <0 255>; ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>; + clock-frequency = <33333333>; + interrupt-parent = <&mpic>; + interrupts = <16 2>; pcie@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; + interrupt-parent = <&mpic>; + interrupts = <16 2>; + interrupt-map-mask = <0xf800 0 0 7>; /* * IRQ[4:6] only for PCIe, set to active-high, * IRQ[7] is pulled up on board, set to active-low */ - interrupt-map-mask = <0xf800 0 0 7>; interrupt-map = < /* IDSEL 0x0 */ - 0000 0 0 1 &mpic 4 2 0 0 - 0000 0 0 2 &mpic 5 2 0 0 - 0000 0 0 3 &mpic 6 2 0 0 - 0000 0 0 4 &mpic 7 1 0 0 + 0000 0 0 1 &mpic 4 2 + 0000 0 0 2 &mpic 5 2 + 0000 0 0 3 &mpic 6 2 + 0000 0 0 4 &mpic 7 1 >; ranges = <0x2000000 0x0 0xa0000000 0x2000000 0x0 0xa0000000 @@ -189,21 +511,37 @@ }; pci2: pcie@ff60b000 { + cell-index = <3>; + compatible = "fsl,p1023-pcie", "fsl,qoriq-pcie-v2.2"; + device_type = "pci"; + #size-cells = <2>; + #address-cells = <3>; reg = <0 0xff60b000 0 0x1000>; + bus-range = <0 255>; ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>; + clock-frequency = <33333333>; + interrupt-parent = <&mpic>; + interrupts = <16 2>; pcie@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; + interrupt-parent = <&mpic>; + interrupts = <16 2>; + interrupt-map-mask = <0xf800 0 0 7>; /* * IRQ[8:10] are pulled up on board, set to active-low * IRQ[11] only for PCIe, set to active-high, */ - interrupt-map-mask = <0xf800 0 0 7>; interrupt-map = < /* IDSEL 0x0 */ - 0000 0 0 1 &mpic 8 1 0 0 - 0000 0 0 2 &mpic 9 1 0 0 - 0000 0 0 3 &mpic 10 1 0 0 - 0000 0 0 4 &mpic 11 2 0 0 + 0000 0 0 1 &mpic 8 1 + 0000 0 0 2 &mpic 9 1 + 0000 0 0 3 &mpic 10 1 + 0000 0 0 4 &mpic 11 2 >; ranges = <0x2000000 0x0 0x80000000 0x2000000 0x0 0x80000000 @@ -215,5 +553,3 @@ }; }; }; - -/include/ "fsl/p1023si-post.dtsi" diff --git a/trunk/arch/powerpc/boot/dts/p2020ds.dts b/trunk/arch/powerpc/boot/dts/p2020ds.dts index 237310cc7e6c..66f03d6477b2 100644 --- a/trunk/arch/powerpc/boot/dts/p2020ds.dts +++ b/trunk/arch/powerpc/boot/dts/p2020ds.dts @@ -9,17 +9,30 @@ * option) any later version. */ -/include/ "fsl/p2020si-pre.dtsi" +/include/ "p2020si.dtsi" / { model = "fsl,P2020DS"; compatible = "fsl,P2020DS"; + aliases { + ethernet0 = &enet0; + ethernet1 = &enet1; + ethernet2 = &enet2; + serial0 = &serial0; + serial1 = &serial1; + pci0 = &pci0; + pci1 = &pci1; + pci2 = &pci2; + }; + + memory { device_type = "memory"; }; - board_lbc: lbc: localbus@ffe05000 { + localbus@ffe05000 { + compatible = "fsl,elbc", "simple-bus"; ranges = <0x0 0x0 0x0 0xe8000000 0x08000000 0x1 0x0 0x0 0xe0000000 0x08000000 0x2 0x0 0x0 0xffa00000 0x00040000 @@ -27,18 +40,203 @@ 0x4 0x0 0x0 0xffa40000 0x00040000 0x5 0x0 0x0 0xffa80000 0x00040000 0x6 0x0 0x0 0xffac0000 0x00040000>; - reg = <0 0xffe05000 0 0x1000>; + + nor@0,0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "cfi-flash"; + reg = <0x0 0x0 0x8000000>; + bank-width = <2>; + device-width = <1>; + + ramdisk@0 { + reg = <0x0 0x03000000>; + read-only; + }; + + diagnostic@3000000 { + reg = <0x03000000 0x00e00000>; + read-only; + }; + + dink@3e00000 { + reg = <0x03e00000 0x00200000>; + read-only; + }; + + kernel@4000000 { + reg = <0x04000000 0x00400000>; + read-only; + }; + + jffs2@4400000 { + reg = <0x04400000 0x03b00000>; + }; + + dtb@7f00000 { + reg = <0x07f00000 0x00080000>; + read-only; + }; + + u-boot@7f80000 { + reg = <0x07f80000 0x00080000>; + read-only; + }; + }; + + nand@2,0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,elbc-fcm-nand"; + reg = <0x2 0x0 0x40000>; + + u-boot@0 { + reg = <0x0 0x02000000>; + read-only; + }; + + jffs2@2000000 { + reg = <0x02000000 0x10000000>; + }; + + ramdisk@12000000 { + reg = <0x12000000 0x08000000>; + read-only; + }; + + kernel@1a000000 { + reg = <0x1a000000 0x04000000>; + }; + + dtb@1e000000 { + reg = <0x1e000000 0x01000000>; + read-only; + }; + + empty@1f000000 { + reg = <0x1f000000 0x21000000>; + }; + }; + + board-control@3,0 { + compatible = "fsl,p2020ds-fpga", "fsl,fpga-ngpixis"; + reg = <0x3 0x0 0x30>; + }; + + nand@4,0 { + compatible = "fsl,elbc-fcm-nand"; + reg = <0x4 0x0 0x40000>; + }; + + nand@5,0 { + compatible = "fsl,elbc-fcm-nand"; + reg = <0x5 0x0 0x40000>; + }; + + nand@6,0 { + compatible = "fsl,elbc-fcm-nand"; + reg = <0x6 0x0 0x40000>; + }; }; - board_soc: soc: soc@ffe00000 { - ranges = <0x0 0x0 0xffe00000 0x100000>; + soc@ffe00000 { + + usb@22000 { + phy_type = "ulpi"; + }; + + mdio@24520 { + phy0: ethernet-phy@0 { + interrupt-parent = <&mpic>; + interrupts = <3 1>; + reg = <0x0>; + }; + phy1: ethernet-phy@1 { + interrupt-parent = <&mpic>; + interrupts = <3 1>; + reg = <0x1>; + }; + phy2: ethernet-phy@2 { + interrupt-parent = <&mpic>; + interrupts = <3 1>; + reg = <0x2>; + }; + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + + }; + + mdio@25520 { + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@26520 { + tbi2: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + + }; + + ptp_clock@24E00 { + compatible = "fsl,etsec-ptp"; + reg = <0x24E00 0xB0>; + interrupts = <68 2 69 2 70 2>; + interrupt-parent = < &mpic >; + fsl,tclk-period = <5>; + fsl,tmr-prsc = <200>; + fsl,tmr-add = <0xCCCCCCCD>; + fsl,tmr-fiper1 = <0x3B9AC9FB>; + fsl,tmr-fiper2 = <0x0001869B>; + fsl,max-adj = <249999999>; + }; + + enet0: ethernet@24000 { + tbi-handle = <&tbi0>; + phy-handle = <&phy0>; + phy-connection-type = "rgmii-id"; + }; + + enet1: ethernet@25000 { + tbi-handle = <&tbi1>; + phy-handle = <&phy1>; + phy-connection-type = "rgmii-id"; + + }; + + enet2: ethernet@26000 { + tbi-handle = <&tbi2>; + phy-handle = <&phy2>; + phy-connection-type = "rgmii-id"; + }; + + + msi@41600 { + compatible = "fsl,mpic-msi"; + }; }; - pci2: pcie@ffe08000 { + pci0: pcie@ffe08000 { ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>; - reg = <0 0xffe08000 0 0x1000>; + interrupt-map-mask = <0xf800 0x0 0x0 0x7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0x0 0x0 0x1 &mpic 0x8 0x1 + 0000 0x0 0x0 0x2 &mpic 0x9 0x1 + 0000 0x0 0x0 0x3 &mpic 0xa 0x1 + 0000 0x0 0x0 0x4 &mpic 0xb 0x1 + >; pcie@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; ranges = <0x2000000 0x0 0x80000000 0x2000000 0x0 0x80000000 0x0 0x20000000 @@ -49,11 +247,61 @@ }; }; - board_pci1: pci1: pcie@ffe09000 { + pci1: pcie@ffe09000 { ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>; - reg = <0 0xffe09000 0 0x1000>; + interrupt-map-mask = <0xff00 0x0 0x0 0x7>; + interrupt-map = < + + // IDSEL 0x11 func 0 - PCI slot 1 + 0x8800 0x0 0x0 0x1 &i8259 0x9 0x2 + 0x8800 0x0 0x0 0x2 &i8259 0xa 0x2 + + // IDSEL 0x11 func 1 - PCI slot 1 + 0x8900 0x0 0x0 0x1 &i8259 0x9 0x2 + 0x8900 0x0 0x0 0x2 &i8259 0xa 0x2 + + // IDSEL 0x11 func 2 - PCI slot 1 + 0x8a00 0x0 0x0 0x1 &i8259 0x9 0x2 + 0x8a00 0x0 0x0 0x2 &i8259 0xa 0x2 + + // IDSEL 0x11 func 3 - PCI slot 1 + 0x8b00 0x0 0x0 0x1 &i8259 0x9 0x2 + 0x8b00 0x0 0x0 0x2 &i8259 0xa 0x2 + + // IDSEL 0x11 func 4 - PCI slot 1 + 0x8c00 0x0 0x0 0x1 &i8259 0x9 0x2 + 0x8c00 0x0 0x0 0x2 &i8259 0xa 0x2 + + // IDSEL 0x11 func 5 - PCI slot 1 + 0x8d00 0x0 0x0 0x1 &i8259 0x9 0x2 + 0x8d00 0x0 0x0 0x2 &i8259 0xa 0x2 + + // IDSEL 0x11 func 6 - PCI slot 1 + 0x8e00 0x0 0x0 0x1 &i8259 0x9 0x2 + 0x8e00 0x0 0x0 0x2 &i8259 0xa 0x2 + + // IDSEL 0x11 func 7 - PCI slot 1 + 0x8f00 0x0 0x0 0x1 &i8259 0x9 0x2 + 0x8f00 0x0 0x0 0x2 &i8259 0xa 0x2 + + // IDSEL 0x1d Audio + 0xe800 0x0 0x0 0x1 &i8259 0x6 0x2 + + // IDSEL 0x1e Legacy + 0xf000 0x0 0x0 0x1 &i8259 0x7 0x2 + 0xf100 0x0 0x0 0x1 &i8259 0x7 0x2 + + // IDSEL 0x1f IDE/SATA + 0xf800 0x0 0x0 0x1 &i8259 0xe 0x2 + 0xf900 0x0 0x0 0x1 &i8259 0x5 0x2 + >; + pcie@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; ranges = <0x2000000 0x0 0xa0000000 0x2000000 0x0 0xa0000000 0x0 0x20000000 @@ -61,14 +309,89 @@ 0x1000000 0x0 0x0 0x1000000 0x0 0x0 0x0 0x10000>; + uli1575@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + ranges = <0x2000000 0x0 0xa0000000 + 0x2000000 0x0 0xa0000000 + 0x0 0x20000000 + + 0x1000000 0x0 0x0 + 0x1000000 0x0 0x0 + 0x0 0x10000>; + isa@1e { + device_type = "isa"; + #interrupt-cells = <2>; + #size-cells = <1>; + #address-cells = <2>; + reg = <0xf000 0x0 0x0 0x0 0x0>; + ranges = <0x1 0x0 0x1000000 0x0 0x0 + 0x1000>; + interrupt-parent = <&i8259>; + + i8259: interrupt-controller@20 { + reg = <0x1 0x20 0x2 + 0x1 0xa0 0x2 + 0x1 0x4d0 0x2>; + interrupt-controller; + device_type = "interrupt-controller"; + #address-cells = <0>; + #interrupt-cells = <2>; + compatible = "chrp,iic"; + interrupts = <4 1>; + interrupt-parent = <&mpic>; + }; + + i8042@60 { + #size-cells = <0>; + #address-cells = <1>; + reg = <0x1 0x60 0x1 0x1 0x64 0x1>; + interrupts = <1 3 12 3>; + interrupt-parent = + <&i8259>; + + keyboard@0 { + reg = <0x0>; + compatible = "pnpPNP,303"; + }; + + mouse@1 { + reg = <0x1>; + compatible = "pnpPNP,f03"; + }; + }; + + rtc@70 { + compatible = "pnpPNP,b00"; + reg = <0x1 0x70 0x2>; + }; + + gpio@400 { + reg = <0x1 0x400 0x80>; + }; + }; + }; }; + }; - pci0: pcie@ffe0a000 { + pci2: pcie@ffe0a000 { ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000 0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x10000>; - reg = <0 0xffe0a000 0 0x1000>; + interrupt-map-mask = <0xf800 0x0 0x0 0x7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0x0 0x0 0x1 &mpic 0x0 0x1 + 0000 0x0 0x0 0x2 &mpic 0x1 0x1 + 0000 0x0 0x0 0x3 &mpic 0x2 0x1 + 0000 0x0 0x0 0x4 &mpic 0x3 0x1 + >; pcie@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; ranges = <0x2000000 0x0 0xc0000000 0x2000000 0x0 0xc0000000 0x0 0x20000000 @@ -79,11 +402,3 @@ }; }; }; - -/* - * p2020ds.dtsi must be last to ensure board_pci0 overrides pci0 settings - * for interrupt-map & interrupt-map-mask - */ - -/include/ "fsl/p2020si-post.dtsi" -/include/ "p2020ds.dtsi" diff --git a/trunk/arch/powerpc/boot/dts/p2020ds.dtsi b/trunk/arch/powerpc/boot/dts/p2020ds.dtsi deleted file mode 100644 index c1cf6cef4dd6..000000000000 --- a/trunk/arch/powerpc/boot/dts/p2020ds.dtsi +++ /dev/null @@ -1,316 +0,0 @@ -/* - * P2020DS Device Tree Source stub (no addresses or top-level ranges) - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -&board_lbc { - nor@0,0 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "cfi-flash"; - reg = <0x0 0x0 0x8000000>; - bank-width = <2>; - device-width = <1>; - - ramdisk@0 { - reg = <0x0 0x03000000>; - read-only; - }; - - diagnostic@3000000 { - reg = <0x03000000 0x00e00000>; - read-only; - }; - - dink@3e00000 { - reg = <0x03e00000 0x00200000>; - read-only; - }; - - kernel@4000000 { - reg = <0x04000000 0x00400000>; - read-only; - }; - - jffs2@4400000 { - reg = <0x04400000 0x03b00000>; - }; - - dtb@7f00000 { - reg = <0x07f00000 0x00080000>; - read-only; - }; - - u-boot@7f80000 { - reg = <0x07f80000 0x00080000>; - read-only; - }; - }; - - nand@2,0 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "fsl,elbc-fcm-nand"; - reg = <0x2 0x0 0x40000>; - - u-boot@0 { - reg = <0x0 0x02000000>; - read-only; - }; - - jffs2@2000000 { - reg = <0x02000000 0x10000000>; - }; - - ramdisk@12000000 { - reg = <0x12000000 0x08000000>; - read-only; - }; - - kernel@1a000000 { - reg = <0x1a000000 0x04000000>; - }; - - dtb@1e000000 { - reg = <0x1e000000 0x01000000>; - read-only; - }; - - empty@1f000000 { - reg = <0x1f000000 0x21000000>; - }; - }; - - board-control@3,0 { - compatible = "fsl,p2020ds-fpga", "fsl,fpga-ngpixis"; - reg = <0x3 0x0 0x30>; - }; - - nand@4,0 { - compatible = "fsl,elbc-fcm-nand"; - reg = <0x4 0x0 0x40000>; - }; - - nand@5,0 { - compatible = "fsl,elbc-fcm-nand"; - reg = <0x5 0x0 0x40000>; - }; - - nand@6,0 { - compatible = "fsl,elbc-fcm-nand"; - reg = <0x6 0x0 0x40000>; - }; -}; - -&board_soc { - usb@22000 { - phy_type = "ulpi"; - }; - - mdio@24520 { - phy0: ethernet-phy@0 { - interrupts = <3 1 0 0>; - reg = <0x0>; - }; - phy1: ethernet-phy@1 { - interrupts = <3 1 0 0>; - reg = <0x1>; - }; - phy2: ethernet-phy@2 { - interrupts = <3 1 0 0>; - reg = <0x2>; - }; - tbi0: tbi-phy@11 { - reg = <0x11>; - device_type = "tbi-phy"; - }; - - }; - - mdio@25520 { - tbi1: tbi-phy@11 { - reg = <0x11>; - device_type = "tbi-phy"; - }; - }; - - mdio@26520 { - tbi2: tbi-phy@11 { - reg = <0x11>; - device_type = "tbi-phy"; - }; - - }; - - ptp_clock@24e00 { - fsl,tclk-period = <5>; - fsl,tmr-prsc = <200>; - fsl,tmr-add = <0xCCCCCCCD>; - fsl,tmr-fiper1 = <0x3B9AC9FB>; - fsl,tmr-fiper2 = <0x0001869B>; - fsl,max-adj = <249999999>; - }; - - enet0: ethernet@24000 { - tbi-handle = <&tbi0>; - phy-handle = <&phy0>; - phy-connection-type = "rgmii-id"; - }; - - enet1: ethernet@25000 { - tbi-handle = <&tbi1>; - phy-handle = <&phy1>; - phy-connection-type = "rgmii-id"; - - }; - - enet2: ethernet@26000 { - tbi-handle = <&tbi2>; - phy-handle = <&phy2>; - phy-connection-type = "rgmii-id"; - }; -}; - -&board_pci1 { - pcie@0 { - interrupt-map-mask = <0xff00 0x0 0x0 0x7>; - interrupt-map = < - - // IDSEL 0x11 func 0 - PCI slot 1 - 0x8800 0x0 0x0 0x1 &i8259 0x9 0x2 - 0x8800 0x0 0x0 0x2 &i8259 0xa 0x2 - - // IDSEL 0x11 func 1 - PCI slot 1 - 0x8900 0x0 0x0 0x1 &i8259 0x9 0x2 - 0x8900 0x0 0x0 0x2 &i8259 0xa 0x2 - - // IDSEL 0x11 func 2 - PCI slot 1 - 0x8a00 0x0 0x0 0x1 &i8259 0x9 0x2 - 0x8a00 0x0 0x0 0x2 &i8259 0xa 0x2 - - // IDSEL 0x11 func 3 - PCI slot 1 - 0x8b00 0x0 0x0 0x1 &i8259 0x9 0x2 - 0x8b00 0x0 0x0 0x2 &i8259 0xa 0x2 - - // IDSEL 0x11 func 4 - PCI slot 1 - 0x8c00 0x0 0x0 0x1 &i8259 0x9 0x2 - 0x8c00 0x0 0x0 0x2 &i8259 0xa 0x2 - - // IDSEL 0x11 func 5 - PCI slot 1 - 0x8d00 0x0 0x0 0x1 &i8259 0x9 0x2 - 0x8d00 0x0 0x0 0x2 &i8259 0xa 0x2 - - // IDSEL 0x11 func 6 - PCI slot 1 - 0x8e00 0x0 0x0 0x1 &i8259 0x9 0x2 - 0x8e00 0x0 0x0 0x2 &i8259 0xa 0x2 - - // IDSEL 0x11 func 7 - PCI slot 1 - 0x8f00 0x0 0x0 0x1 &i8259 0x9 0x2 - 0x8f00 0x0 0x0 0x2 &i8259 0xa 0x2 - - // IDSEL 0x1d Audio - 0xe800 0x0 0x0 0x1 &i8259 0x6 0x2 - - // IDSEL 0x1e Legacy - 0xf000 0x0 0x0 0x1 &i8259 0x7 0x2 - 0xf100 0x0 0x0 0x1 &i8259 0x7 0x2 - - // IDSEL 0x1f IDE/SATA - 0xf800 0x0 0x0 0x1 &i8259 0xe 0x2 - 0xf900 0x0 0x0 0x1 &i8259 0x5 0x2 - >; - - uli1575@0 { - reg = <0x0 0x0 0x0 0x0 0x0>; - #size-cells = <2>; - #address-cells = <3>; - ranges = <0x2000000 0x0 0xa0000000 - 0x2000000 0x0 0xa0000000 - 0x0 0x20000000 - - 0x1000000 0x0 0x0 - 0x1000000 0x0 0x0 - 0x0 0x10000>; - isa@1e { - device_type = "isa"; - #interrupt-cells = <2>; - #size-cells = <1>; - #address-cells = <2>; - reg = <0xf000 0x0 0x0 0x0 0x0>; - ranges = <0x1 0x0 0x1000000 0x0 0x0 - 0x1000>; - interrupt-parent = <&i8259>; - - i8259: interrupt-controller@20 { - reg = <0x1 0x20 0x2 - 0x1 0xa0 0x2 - 0x1 0x4d0 0x2>; - interrupt-controller; - device_type = "interrupt-controller"; - #address-cells = <0>; - #interrupt-cells = <2>; - compatible = "chrp,iic"; - interrupts = <4 1 0 0>; - interrupt-parent = <&mpic>; - }; - - i8042@60 { - #size-cells = <0>; - #address-cells = <1>; - reg = <0x1 0x60 0x1 0x1 0x64 0x1>; - interrupts = <1 3 12 3>; - interrupt-parent = - <&i8259>; - - keyboard@0 { - reg = <0x0>; - compatible = "pnpPNP,303"; - }; - - mouse@1 { - reg = <0x1>; - compatible = "pnpPNP,f03"; - }; - }; - - rtc@70 { - compatible = "pnpPNP,b00"; - reg = <0x1 0x70 0x2>; - }; - - gpio@400 { - reg = <0x1 0x400 0x80>; - }; - }; - }; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/p2020rdb.dts b/trunk/arch/powerpc/boot/dts/p2020rdb.dts index 26759a591712..1d7a05f3021e 100644 --- a/trunk/arch/powerpc/boot/dts/p2020rdb.dts +++ b/trunk/arch/powerpc/boot/dts/p2020rdb.dts @@ -9,7 +9,7 @@ * option) any later version. */ -/include/ "fsl/p2020si-pre.dtsi" +/include/ "p2020si.dtsi" / { model = "fsl,P2020RDB"; @@ -29,8 +29,7 @@ device_type = "memory"; }; - lbc: localbus@ffe05000 { - reg = <0 0xffe05000 0 0x1000>; + localbus@ffe05000 { /* NOR and NAND Flashes */ ranges = <0x0 0x0 0x0 0xef000000 0x01000000 @@ -141,9 +140,7 @@ }; - soc: soc@ffe00000 { - ranges = <0x0 0x0 0xffe00000 0x100000>; - + soc@ffe00000 { i2c@3000 { rtc@68 { compatible = "dallas,ds1339"; @@ -151,13 +148,17 @@ }; }; - spi@7000 { - flash@0 { + spi@7000 { + + fsl_m25p80@0 { #address-cells = <1>; #size-cells = <1>; - compatible = "spansion,s25sl12801"; + compatible = "fsl,espi-flash"; reg = <0>; + linux,modalias = "fsl_m25p80"; + modal = "s25sl128b"; spi-max-frequency = <50000000>; + mode = <0>; partition@0 { /* 512KB for u-boot Bootloader Image */ @@ -201,17 +202,15 @@ mdio@24520 { phy0: ethernet-phy@0 { - interrupts = <3 1 0 0>; + interrupt-parent = <&mpic>; + interrupts = <3 1>; reg = <0x0>; - }; + }; phy1: ethernet-phy@1 { - interrupts = <3 1 0 0>; + interrupt-parent = <&mpic>; + interrupts = <3 1>; reg = <0x1>; - }; - tbi-phy@2 { - device_type = "tbi-phy"; - reg = <0x2>; - }; + }; }; mdio@25520 { @@ -225,7 +224,11 @@ status = "disabled"; }; - ptp_clock@24e00 { + ptp_clock@24E00 { + compatible = "fsl,etsec-ptp"; + reg = <0x24E00 0xB0>; + interrupts = <68 2 69 2 70 2>; + interrupt-parent = < &mpic >; fsl,tclk-period = <5>; fsl,tmr-prsc = <200>; fsl,tmr-add = <0xCCCCCCCD>; @@ -249,18 +252,29 @@ phy-handle = <&phy1>; phy-connection-type = "rgmii-id"; }; + }; pci0: pcie@ffe08000 { - reg = <0 0xffe08000 0 0x1000>; status = "disabled"; }; pci1: pcie@ffe09000 { - reg = <0 0xffe09000 0 0x1000>; ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>; - pcie@0 { + interrupt-map-mask = <0xf800 0x0 0x0 0x7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0x0 0x0 0x1 &mpic 0x4 0x1 + 0000 0x0 0x0 0x2 &mpic 0x5 0x1 + 0000 0x0 0x0 0x3 &mpic 0x6 0x1 + 0000 0x0 0x0 0x4 &mpic 0x7 0x1 + >; + pcie@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; ranges = <0x2000000 0x0 0xa0000000 0x2000000 0x0 0xa0000000 0x0 0x20000000 @@ -272,10 +286,21 @@ }; pci2: pcie@ffe0a000 { - reg = <0 0xffe0a000 0 0x1000>; ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>; + interrupt-map-mask = <0xf800 0x0 0x0 0x7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0x0 0x0 0x1 &mpic 0x0 0x1 + 0000 0x0 0x0 0x2 &mpic 0x1 0x1 + 0000 0x0 0x0 0x3 &mpic 0x2 0x1 + 0000 0x0 0x0 0x4 &mpic 0x3 0x1 + >; pcie@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; ranges = <0x2000000 0x0 0x80000000 0x2000000 0x0 0x80000000 0x0 0x20000000 @@ -286,5 +311,3 @@ }; }; }; - -/include/ "fsl/p2020si-post.dtsi" diff --git a/trunk/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts b/trunk/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts index 66aac864c4cc..fc8ddddfccb6 100644 --- a/trunk/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts +++ b/trunk/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts @@ -14,16 +14,28 @@ * option) any later version. */ -/include/ "p2020rdb.dts" +/include/ "p2020si.dtsi" / { model = "fsl,P2020RDB"; compatible = "fsl,P2020RDB", "fsl,MPC85XXRDB-CAMP"; + aliases { + ethernet1 = &enet1; + ethernet2 = &enet2; + serial0 = &serial0; + pci0 = &pci0; + }; + cpus { PowerPC,P2020@1 { - status = "disabled"; + status = "disabled"; }; + + }; + + memory { + device_type = "memory"; }; localbus@ffe05000 { @@ -31,18 +43,115 @@ }; soc@ffe00000 { + i2c@3000 { + rtc@68 { + compatible = "dallas,ds1339"; + reg = <0x68>; + }; + }; + serial1: serial@4600 { status = "disabled"; }; + spi@7000 { + + fsl_m25p80@0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,espi-flash"; + reg = <0>; + linux,modalias = "fsl_m25p80"; + modal = "s25sl128b"; + spi-max-frequency = <50000000>; + mode = <0>; + + partition@0 { + /* 512KB for u-boot Bootloader Image */ + reg = <0x0 0x00080000>; + label = "SPI (RO) U-Boot Image"; + read-only; + }; + + partition@80000 { + /* 512KB for DTB Image */ + reg = <0x00080000 0x00080000>; + label = "SPI (RO) DTB Image"; + read-only; + }; + + partition@100000 { + /* 4MB for Linux Kernel Image */ + reg = <0x00100000 0x00400000>; + label = "SPI (RO) Linux Kernel Image"; + read-only; + }; + + partition@500000 { + /* 4MB for Compressed RFS Image */ + reg = <0x00500000 0x00400000>; + label = "SPI (RO) Compressed RFS Image"; + read-only; + }; + + partition@900000 { + /* 7MB for JFFS2 based RFS */ + reg = <0x00900000 0x00700000>; + label = "SPI (RW) JFFS2 RFS"; + }; + }; + }; + dma@c300 { status = "disabled"; }; + usb@22000 { + phy_type = "ulpi"; + }; + + mdio@24520 { + + phy0: ethernet-phy@0 { + interrupt-parent = <&mpic>; + interrupts = <3 1>; + reg = <0x0>; + }; + phy1: ethernet-phy@1 { + interrupt-parent = <&mpic>; + interrupts = <3 1>; + reg = <0x1>; + }; + }; + + mdio@25520 { + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@26520 { + status = "disabled"; + }; + enet0: ethernet@24000 { status = "disabled"; }; + enet1: ethernet@25000 { + tbi-handle = <&tbi0>; + phy-handle = <&phy0>; + phy-connection-type = "sgmii"; + + }; + + enet2: ethernet@26000 { + phy-handle = <&phy1>; + phy-connection-type = "rgmii-id"; + }; + + mpic: pic@40000 { protected-sources = < 42 76 77 78 79 /* serial1 , dma2 */ @@ -55,12 +164,40 @@ msi@41600 { status = "disabled"; }; + + }; pci0: pcie@ffe08000 { status = "disabled"; }; + pci1: pcie@ffe09000 { + ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000 + 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>; + interrupt-map-mask = <0xf800 0x0 0x0 0x7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0x0 0x0 0x1 &mpic 0x4 0x1 + 0000 0x0 0x0 0x2 &mpic 0x5 0x1 + 0000 0x0 0x0 0x3 &mpic 0x6 0x1 + 0000 0x0 0x0 0x4 &mpic 0x7 0x1 + >; + pcie@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; + ranges = <0x2000000 0x0 0xa0000000 + 0x2000000 0x0 0xa0000000 + 0x0 0x20000000 + + 0x1000000 0x0 0x0 + 0x1000000 0x0 0x0 + 0x0 0x100000>; + }; + }; + pci2: pcie@ffe0a000 { status = "disabled"; }; diff --git a/trunk/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts b/trunk/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts index 9bd8ef493dd2..261c34ba45ec 100644 --- a/trunk/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts +++ b/trunk/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts @@ -15,18 +15,28 @@ * option) any later version. */ -/include/ "p2020rdb.dts" +/include/ "p2020si.dtsi" / { model = "fsl,P2020RDB"; compatible = "fsl,P2020RDB", "fsl,MPC85XXRDB-CAMP"; + aliases { + ethernet0 = &enet0; + serial0 = &serial1; + pci1 = &pci1; + }; + cpus { PowerPC,P2020@0 { - status = "disabled"; + status = "disabled"; }; }; + memory { + device_type = "memory"; + }; + localbus@ffe05000 { status = "disabled"; }; @@ -60,10 +70,55 @@ status = "disabled"; }; + dma@c300 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,eloplus-dma"; + reg = <0xc300 0x4>; + ranges = <0x0 0xc100 0x200>; + cell-index = <1>; + dma-channel@0 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x0 0x80>; + cell-index = <0>; + interrupt-parent = <&mpic>; + interrupts = <76 2>; + }; + dma-channel@80 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x80 0x80>; + cell-index = <1>; + interrupt-parent = <&mpic>; + interrupts = <77 2>; + }; + dma-channel@100 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x100 0x80>; + cell-index = <2>; + interrupt-parent = <&mpic>; + interrupts = <78 2>; + }; + dma-channel@180 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x180 0x80>; + cell-index = <3>; + interrupt-parent = <&mpic>; + interrupts = <79 2>; + }; + }; + gpio: gpio-controller@f000 { status = "disabled"; }; + L2: l2-cache-controller@20000 { + compatible = "fsl,p2020-l2-cache-controller"; + reg = <0x20000 0x1000>; + cache-line-size = <32>; // 32 bytes + cache-size = <0x80000>; // L2,512K + interrupt-parent = <&mpic>; + }; + dma@21300 { status = "disabled"; }; @@ -84,6 +139,12 @@ status = "disabled"; }; + enet0: ethernet@24000 { + fixed-link = <1 1 1000 0 0>; + phy-connection-type = "rgmii-id"; + + }; + enet1: ethernet@25000 { status = "disabled"; }; @@ -109,6 +170,22 @@ >; }; + msi@41600 { + compatible = "fsl,p2020-msi", "fsl,mpic-msi"; + reg = <0x41600 0x80>; + msi-available-ranges = <0 0x100>; + interrupts = < + 0xe0 0 + 0xe1 0 + 0xe2 0 + 0xe3 0 + 0xe4 0 + 0xe5 0 + 0xe6 0 + 0xe7 0>; + interrupt-parent = <&mpic>; + }; + global-utilities@e0000 { //global utilities block status = "disabled"; }; @@ -122,4 +199,30 @@ pci1: pcie@ffe09000 { status = "disabled"; }; + + pci2: pcie@ffe0a000 { + ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000 + 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>; + interrupt-map-mask = <0xf800 0x0 0x0 0x7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0x0 0x0 0x1 &mpic 0x0 0x1 + 0000 0x0 0x0 0x2 &mpic 0x1 0x1 + 0000 0x0 0x0 0x3 &mpic 0x2 0x1 + 0000 0x0 0x0 0x4 &mpic 0x3 0x1 + >; + pcie@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; + ranges = <0x2000000 0x0 0x80000000 + 0x2000000 0x0 0x80000000 + 0x0 0x20000000 + + 0x1000000 0x0 0x0 + 0x1000000 0x0 0x0 + 0x0 0x100000>; + }; + }; }; diff --git a/trunk/arch/powerpc/boot/dts/p2020si.dtsi b/trunk/arch/powerpc/boot/dts/p2020si.dtsi new file mode 100644 index 000000000000..6def17f265d3 --- /dev/null +++ b/trunk/arch/powerpc/boot/dts/p2020si.dtsi @@ -0,0 +1,382 @@ +/* + * P2020 Device Tree Source + * + * Copyright 2011 Freescale Semiconductor Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +/dts-v1/; +/ { + compatible = "fsl,P2020"; + #address-cells = <2>; + #size-cells = <2>; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + PowerPC,P2020@0 { + device_type = "cpu"; + reg = <0x0>; + next-level-cache = <&L2>; + }; + + PowerPC,P2020@1 { + device_type = "cpu"; + reg = <0x1>; + next-level-cache = <&L2>; + }; + }; + + localbus@ffe05000 { + #address-cells = <2>; + #size-cells = <1>; + compatible = "fsl,p2020-elbc", "fsl,elbc", "simple-bus"; + reg = <0 0xffe05000 0 0x1000>; + interrupts = <19 2>; + interrupt-parent = <&mpic>; + }; + + soc@ffe00000 { + #address-cells = <1>; + #size-cells = <1>; + device_type = "soc"; + compatible = "fsl,p2020-immr", "simple-bus"; + ranges = <0x0 0x0 0xffe00000 0x100000>; + bus-frequency = <0>; // Filled out by uboot. + + ecm-law@0 { + compatible = "fsl,ecm-law"; + reg = <0x0 0x1000>; + fsl,num-laws = <12>; + }; + + ecm@1000 { + compatible = "fsl,p2020-ecm", "fsl,ecm"; + reg = <0x1000 0x1000>; + interrupts = <17 2>; + interrupt-parent = <&mpic>; + }; + + memory-controller@2000 { + compatible = "fsl,p2020-memory-controller"; + reg = <0x2000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <18 2>; + }; + + i2c@3000 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <0>; + compatible = "fsl-i2c"; + reg = <0x3000 0x100>; + interrupts = <43 2>; + interrupt-parent = <&mpic>; + dfsrr; + }; + + i2c@3100 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <1>; + compatible = "fsl-i2c"; + reg = <0x3100 0x100>; + interrupts = <43 2>; + interrupt-parent = <&mpic>; + dfsrr; + }; + + serial0: serial@4500 { + cell-index = <0>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x4500 0x100>; + clock-frequency = <0>; + interrupts = <42 2>; + interrupt-parent = <&mpic>; + }; + + serial1: serial@4600 { + cell-index = <1>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x4600 0x100>; + clock-frequency = <0>; + interrupts = <42 2>; + interrupt-parent = <&mpic>; + }; + + spi@7000 { + cell-index = <0>; + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,espi"; + reg = <0x7000 0x1000>; + interrupts = <59 0x2>; + interrupt-parent = <&mpic>; + mode = "cpu"; + }; + + dma@c300 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,eloplus-dma"; + reg = <0xc300 0x4>; + ranges = <0x0 0xc100 0x200>; + cell-index = <1>; + dma-channel@0 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x0 0x80>; + cell-index = <0>; + interrupt-parent = <&mpic>; + interrupts = <76 2>; + }; + dma-channel@80 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x80 0x80>; + cell-index = <1>; + interrupt-parent = <&mpic>; + interrupts = <77 2>; + }; + dma-channel@100 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x100 0x80>; + cell-index = <2>; + interrupt-parent = <&mpic>; + interrupts = <78 2>; + }; + dma-channel@180 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x180 0x80>; + cell-index = <3>; + interrupt-parent = <&mpic>; + interrupts = <79 2>; + }; + }; + + gpio: gpio-controller@f000 { + #gpio-cells = <2>; + compatible = "fsl,mpc8572-gpio"; + reg = <0xf000 0x100>; + interrupts = <47 0x2>; + interrupt-parent = <&mpic>; + gpio-controller; + }; + + L2: l2-cache-controller@20000 { + compatible = "fsl,p2020-l2-cache-controller"; + reg = <0x20000 0x1000>; + cache-line-size = <32>; // 32 bytes + cache-size = <0x80000>; // L2,512K + interrupt-parent = <&mpic>; + interrupts = <16 2>; + }; + + dma@21300 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,eloplus-dma"; + reg = <0x21300 0x4>; + ranges = <0x0 0x21100 0x200>; + cell-index = <0>; + dma-channel@0 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x0 0x80>; + cell-index = <0>; + interrupt-parent = <&mpic>; + interrupts = <20 2>; + }; + dma-channel@80 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x80 0x80>; + cell-index = <1>; + interrupt-parent = <&mpic>; + interrupts = <21 2>; + }; + dma-channel@100 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x100 0x80>; + cell-index = <2>; + interrupt-parent = <&mpic>; + interrupts = <22 2>; + }; + dma-channel@180 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x180 0x80>; + cell-index = <3>; + interrupt-parent = <&mpic>; + interrupts = <23 2>; + }; + }; + + usb@22000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl-usb2-dr"; + reg = <0x22000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <28 0x2>; + }; + + mdio@24520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-mdio"; + reg = <0x24520 0x20>; + }; + + mdio@25520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x26520 0x20>; + }; + + mdio@26520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x520 0x20>; + }; + + enet0: ethernet@24000 { + #address-cells = <1>; + #size-cells = <1>; + cell-index = <0>; + device_type = "network"; + model = "eTSEC"; + compatible = "gianfar"; + reg = <0x24000 0x1000>; + ranges = <0x0 0x24000 0x1000>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupts = <29 2 30 2 34 2>; + interrupt-parent = <&mpic>; + }; + + enet1: ethernet@25000 { + #address-cells = <1>; + #size-cells = <1>; + cell-index = <1>; + device_type = "network"; + model = "eTSEC"; + compatible = "gianfar"; + reg = <0x25000 0x1000>; + ranges = <0x0 0x25000 0x1000>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupts = <35 2 36 2 40 2>; + interrupt-parent = <&mpic>; + + }; + + enet2: ethernet@26000 { + #address-cells = <1>; + #size-cells = <1>; + cell-index = <2>; + device_type = "network"; + model = "eTSEC"; + compatible = "gianfar"; + reg = <0x26000 0x1000>; + ranges = <0x0 0x26000 0x1000>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupts = <31 2 32 2 33 2>; + interrupt-parent = <&mpic>; + + }; + + sdhci@2e000 { + compatible = "fsl,p2020-esdhc", "fsl,esdhc"; + reg = <0x2e000 0x1000>; + interrupts = <72 0x2>; + interrupt-parent = <&mpic>; + /* Filled in by U-Boot */ + clock-frequency = <0>; + }; + + crypto@30000 { + compatible = "fsl,sec3.1", "fsl,sec3.0", "fsl,sec2.4", + "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0"; + reg = <0x30000 0x10000>; + interrupts = <45 2 58 2>; + interrupt-parent = <&mpic>; + fsl,num-channels = <4>; + fsl,channel-fifo-len = <24>; + fsl,exec-units-mask = <0xbfe>; + fsl,descriptor-types-mask = <0x3ab0ebf>; + }; + + mpic: pic@40000 { + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <2>; + reg = <0x40000 0x40000>; + compatible = "chrp,open-pic"; + device_type = "open-pic"; + }; + + msi@41600 { + compatible = "fsl,p2020-msi", "fsl,mpic-msi"; + reg = <0x41600 0x80>; + msi-available-ranges = <0 0x100>; + interrupts = < + 0xe0 0 + 0xe1 0 + 0xe2 0 + 0xe3 0 + 0xe4 0 + 0xe5 0 + 0xe6 0 + 0xe7 0>; + interrupt-parent = <&mpic>; + }; + + global-utilities@e0000 { //global utilities block + compatible = "fsl,p2020-guts"; + reg = <0xe0000 0x1000>; + fsl,has-rstcr; + }; + }; + + pci0: pcie@ffe08000 { + compatible = "fsl,mpc8548-pcie"; + device_type = "pci"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + reg = <0 0xffe08000 0 0x1000>; + bus-range = <0 255>; + clock-frequency = <33333333>; + interrupt-parent = <&mpic>; + interrupts = <24 2>; + }; + + pci1: pcie@ffe09000 { + compatible = "fsl,mpc8548-pcie"; + device_type = "pci"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + reg = <0 0xffe09000 0 0x1000>; + bus-range = <0 255>; + clock-frequency = <33333333>; + interrupt-parent = <&mpic>; + interrupts = <25 2>; + }; + + pci2: pcie@ffe0a000 { + compatible = "fsl,mpc8548-pcie"; + device_type = "pci"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + reg = <0 0xffe0a000 0 0x1000>; + bus-range = <0 255>; + clock-frequency = <33333333>; + interrupt-parent = <&mpic>; + interrupts = <26 2>; + }; +}; diff --git a/trunk/arch/powerpc/boot/dts/p2041rdb.dts b/trunk/arch/powerpc/boot/dts/p2041rdb.dts index 4f957db01230..79b6895027c0 100644 --- a/trunk/arch/powerpc/boot/dts/p2041rdb.dts +++ b/trunk/arch/powerpc/boot/dts/p2041rdb.dts @@ -32,7 +32,7 @@ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -/include/ "fsl/p2041si-pre.dtsi" +/include/ "p2041si.dtsi" / { model = "fsl,P2041RDB"; @@ -50,8 +50,6 @@ }; soc: soc@ffe000000 { - ranges = <0x00000000 0xf 0xfe000000 0x1000000>; - reg = <0xf 0xfe000000 0 0x00001000>; spi@110000 { flash@0 { #address-cells = <1>; @@ -108,18 +106,7 @@ }; }; - rio: rapidio@ffe0c0000 { - reg = <0xf 0xfe0c0000 0 0x11000>; - - port1 { - ranges = <0 0 0xc 0x20000000 0 0x10000000>; - }; - port2 { - ranges = <0 0 0xc 0x30000000 0 0x10000000>; - }; - }; - - lbc: localbus@ffe124000 { + localbus@ffe124000 { reg = <0xf 0xfe124000 0 0x1000>; ranges = <0 0 0xf 0xe8000000 0x08000000>; @@ -135,7 +122,6 @@ reg = <0xf 0xfe200000 0 0x1000>; ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000 0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>; - fsl,msi = <&msi0>; pcie@0 { ranges = <0x02000000 0 0xe0000000 0x02000000 0 0xe0000000 @@ -151,7 +137,6 @@ reg = <0xf 0xfe201000 0 0x1000>; ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000 0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>; - fsl,msi = <&msi1>; pcie@0 { ranges = <0x02000000 0 0xe0000000 0x02000000 0 0xe0000000 @@ -167,7 +152,6 @@ reg = <0xf 0xfe202000 0 0x1000>; ranges = <0x02000000 0 0xe0000000 0xc 0x40000000 0 0x20000000 0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>; - fsl,msi = <&msi2>; pcie@0 { ranges = <0x02000000 0 0xe0000000 0x02000000 0 0xe0000000 @@ -179,5 +163,3 @@ }; }; }; - -/include/ "fsl/p2041si-post.dtsi" diff --git a/trunk/arch/powerpc/boot/dts/p2041si.dtsi b/trunk/arch/powerpc/boot/dts/p2041si.dtsi new file mode 100644 index 000000000000..f7492edd0dfd --- /dev/null +++ b/trunk/arch/powerpc/boot/dts/p2041si.dtsi @@ -0,0 +1,692 @@ +/* + * P2041 Silicon Device Tree Source + * + * Copyright 2011 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/dts-v1/; + +/ { + compatible = "fsl,P2041"; + #address-cells = <2>; + #size-cells = <2>; + interrupt-parent = <&mpic>; + + aliases { + ccsr = &soc; + dcsr = &dcsr; + + serial0 = &serial0; + serial1 = &serial1; + serial2 = &serial2; + serial3 = &serial3; + pci0 = &pci0; + pci1 = &pci1; + pci2 = &pci2; + usb0 = &usb0; + usb1 = &usb1; + dma0 = &dma0; + dma1 = &dma1; + sdhc = &sdhc; + msi0 = &msi0; + msi1 = &msi1; + msi2 = &msi2; + + crypto = &crypto; + sec_jr0 = &sec_jr0; + sec_jr1 = &sec_jr1; + sec_jr2 = &sec_jr2; + sec_jr3 = &sec_jr3; + rtic_a = &rtic_a; + rtic_b = &rtic_b; + rtic_c = &rtic_c; + rtic_d = &rtic_d; + sec_mon = &sec_mon; + }; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu0: PowerPC,e500mc@0 { + device_type = "cpu"; + reg = <0>; + next-level-cache = <&L2_0>; + L2_0: l2-cache { + next-level-cache = <&cpc>; + }; + }; + cpu1: PowerPC,e500mc@1 { + device_type = "cpu"; + reg = <1>; + next-level-cache = <&L2_1>; + L2_1: l2-cache { + next-level-cache = <&cpc>; + }; + }; + cpu2: PowerPC,e500mc@2 { + device_type = "cpu"; + reg = <2>; + next-level-cache = <&L2_2>; + L2_2: l2-cache { + next-level-cache = <&cpc>; + }; + }; + cpu3: PowerPC,e500mc@3 { + device_type = "cpu"; + reg = <3>; + next-level-cache = <&L2_3>; + L2_3: l2-cache { + next-level-cache = <&cpc>; + }; + }; + }; + + dcsr: dcsr@f00000000 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,dcsr", "simple-bus"; + + dcsr-epu@0 { + compatible = "fsl,dcsr-epu"; + interrupts = <52 2 0 0 + 84 2 0 0 + 85 2 0 0>; + interrupt-parent = <&mpic>; + reg = <0x0 0x1000>; + }; + dcsr-npc { + compatible = "fsl,dcsr-npc"; + reg = <0x1000 0x1000 0x1000000 0x8000>; + }; + dcsr-nxc@2000 { + compatible = "fsl,dcsr-nxc"; + reg = <0x2000 0x1000>; + }; + dcsr-corenet { + compatible = "fsl,dcsr-corenet"; + reg = <0x8000 0x1000 0xB0000 0x1000>; + }; + dcsr-dpaa@9000 { + compatible = "fsl,p2041-dcsr-dpaa", "fsl,dcsr-dpaa"; + reg = <0x9000 0x1000>; + }; + dcsr-ocn@11000 { + compatible = "fsl,p2041-dcsr-ocn", "fsl,dcsr-ocn"; + reg = <0x11000 0x1000>; + }; + dcsr-ddr@12000 { + compatible = "fsl,dcsr-ddr"; + dev-handle = <&ddr>; + reg = <0x12000 0x1000>; + }; + dcsr-nal@18000 { + compatible = "fsl,p2041-dcsr-nal", "fsl,dcsr-nal"; + reg = <0x18000 0x1000>; + }; + dcsr-rcpm@22000 { + compatible = "fsl,p2041-dcsr-rcpm", "fsl,dcsr-rcpm"; + reg = <0x22000 0x1000>; + }; + dcsr-cpu-sb-proxy@40000 { + compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; + cpu-handle = <&cpu0>; + reg = <0x40000 0x1000>; + }; + dcsr-cpu-sb-proxy@41000 { + compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; + cpu-handle = <&cpu1>; + reg = <0x41000 0x1000>; + }; + dcsr-cpu-sb-proxy@42000 { + compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; + cpu-handle = <&cpu2>; + reg = <0x42000 0x1000>; + }; + dcsr-cpu-sb-proxy@43000 { + compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; + cpu-handle = <&cpu3>; + reg = <0x43000 0x1000>; + }; + }; + + soc: soc@ffe000000 { + #address-cells = <1>; + #size-cells = <1>; + device_type = "soc"; + compatible = "simple-bus"; + ranges = <0x00000000 0xf 0xfe000000 0x1000000>; + reg = <0xf 0xfe000000 0 0x00001000>; + + soc-sram-error { + compatible = "fsl,soc-sram-error"; + interrupts = <16 2 1 29>; + }; + + corenet-law@0 { + compatible = "fsl,corenet-law"; + reg = <0x0 0x1000>; + fsl,num-laws = <32>; + }; + + ddr: memory-controller@8000 { + compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller"; + reg = <0x8000 0x1000>; + interrupts = <16 2 1 23>; + }; + + cpc: l3-cache-controller@10000 { + compatible = "fsl,p2041-l3-cache-controller", "fsl,p4080-l3-cache-controller", "cache"; + reg = <0x10000 0x1000>; + interrupts = <16 2 1 27>; + }; + + corenet-cf@18000 { + compatible = "fsl,corenet-cf"; + reg = <0x18000 0x1000>; + interrupts = <16 2 1 31>; + fsl,ccf-num-csdids = <32>; + fsl,ccf-num-snoopids = <32>; + }; + + iommu@20000 { + compatible = "fsl,pamu-v1.0", "fsl,pamu"; + reg = <0x20000 0x4000>; + interrupts = < + 24 2 0 0 + 16 2 1 30>; + }; + + mpic: pic@40000 { + clock-frequency = <0>; + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <4>; + reg = <0x40000 0x40000>; + compatible = "fsl,mpic", "chrp,open-pic"; + device_type = "open-pic"; + }; + + msi0: msi@41600 { + compatible = "fsl,mpic-msi"; + reg = <0x41600 0x200>; + msi-available-ranges = <0 0x100>; + interrupts = < + 0xe0 0 0 0 + 0xe1 0 0 0 + 0xe2 0 0 0 + 0xe3 0 0 0 + 0xe4 0 0 0 + 0xe5 0 0 0 + 0xe6 0 0 0 + 0xe7 0 0 0>; + }; + + msi1: msi@41800 { + compatible = "fsl,mpic-msi"; + reg = <0x41800 0x200>; + msi-available-ranges = <0 0x100>; + interrupts = < + 0xe8 0 0 0 + 0xe9 0 0 0 + 0xea 0 0 0 + 0xeb 0 0 0 + 0xec 0 0 0 + 0xed 0 0 0 + 0xee 0 0 0 + 0xef 0 0 0>; + }; + + msi2: msi@41a00 { + compatible = "fsl,mpic-msi"; + reg = <0x41a00 0x200>; + msi-available-ranges = <0 0x100>; + interrupts = < + 0xf0 0 0 0 + 0xf1 0 0 0 + 0xf2 0 0 0 + 0xf3 0 0 0 + 0xf4 0 0 0 + 0xf5 0 0 0 + 0xf6 0 0 0 + 0xf7 0 0 0>; + }; + + guts: global-utilities@e0000 { + compatible = "fsl,qoriq-device-config-1.0"; + reg = <0xe0000 0xe00>; + fsl,has-rstcr; + #sleep-cells = <1>; + fsl,liodn-bits = <12>; + }; + + pins: global-utilities@e0e00 { + compatible = "fsl,qoriq-pin-control-1.0"; + reg = <0xe0e00 0x200>; + #sleep-cells = <2>; + }; + + clockgen: global-utilities@e1000 { + compatible = "fsl,p2041-clockgen", "fsl,qoriq-clockgen-1.0"; + reg = <0xe1000 0x1000>; + clock-frequency = <0>; + }; + + rcpm: global-utilities@e2000 { + compatible = "fsl,qoriq-rcpm-1.0"; + reg = <0xe2000 0x1000>; + #sleep-cells = <1>; + }; + + sfp: sfp@e8000 { + compatible = "fsl,p2041-sfp", "fsl,qoriq-sfp-1.0"; + reg = <0xe8000 0x1000>; + }; + + serdes: serdes@ea000 { + compatible = "fsl,p2041-serdes"; + reg = <0xea000 0x1000>; + }; + + dma0: dma@100300 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,p2041-dma", "fsl,eloplus-dma"; + reg = <0x100300 0x4>; + ranges = <0x0 0x100100 0x200>; + cell-index = <0>; + dma-channel@0 { + compatible = "fsl,p2041-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x0 0x80>; + cell-index = <0>; + interrupts = <28 2 0 0>; + }; + dma-channel@80 { + compatible = "fsl,p2041-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x80 0x80>; + cell-index = <1>; + interrupts = <29 2 0 0>; + }; + dma-channel@100 { + compatible = "fsl,p2041-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x100 0x80>; + cell-index = <2>; + interrupts = <30 2 0 0>; + }; + dma-channel@180 { + compatible = "fsl,p2041-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x180 0x80>; + cell-index = <3>; + interrupts = <31 2 0 0>; + }; + }; + + dma1: dma@101300 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,p2041-dma", "fsl,eloplus-dma"; + reg = <0x101300 0x4>; + ranges = <0x0 0x101100 0x200>; + cell-index = <1>; + dma-channel@0 { + compatible = "fsl,p2041-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x0 0x80>; + cell-index = <0>; + interrupts = <32 2 0 0>; + }; + dma-channel@80 { + compatible = "fsl,p2041-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x80 0x80>; + cell-index = <1>; + interrupts = <33 2 0 0>; + }; + dma-channel@100 { + compatible = "fsl,p2041-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x100 0x80>; + cell-index = <2>; + interrupts = <34 2 0 0>; + }; + dma-channel@180 { + compatible = "fsl,p2041-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x180 0x80>; + cell-index = <3>; + interrupts = <35 2 0 0>; + }; + }; + + spi@110000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,p2041-espi", "fsl,mpc8536-espi"; + reg = <0x110000 0x1000>; + interrupts = <53 0x2 0 0>; + fsl,espi-num-chipselects = <4>; + }; + + sdhc: sdhc@114000 { + compatible = "fsl,p2041-esdhc", "fsl,esdhc"; + reg = <0x114000 0x1000>; + interrupts = <48 2 0 0>; + sdhci,auto-cmd12; + clock-frequency = <0>; + }; + + i2c@118000 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <0>; + compatible = "fsl-i2c"; + reg = <0x118000 0x100>; + interrupts = <38 2 0 0>; + dfsrr; + }; + + i2c@118100 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <1>; + compatible = "fsl-i2c"; + reg = <0x118100 0x100>; + interrupts = <38 2 0 0>; + dfsrr; + }; + + i2c@119000 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <2>; + compatible = "fsl-i2c"; + reg = <0x119000 0x100>; + interrupts = <39 2 0 0>; + dfsrr; + }; + + i2c@119100 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <3>; + compatible = "fsl-i2c"; + reg = <0x119100 0x100>; + interrupts = <39 2 0 0>; + dfsrr; + }; + + serial0: serial@11c500 { + cell-index = <0>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x11c500 0x100>; + clock-frequency = <0>; + interrupts = <36 2 0 0>; + }; + + serial1: serial@11c600 { + cell-index = <1>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x11c600 0x100>; + clock-frequency = <0>; + interrupts = <36 2 0 0>; + }; + + serial2: serial@11d500 { + cell-index = <2>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x11d500 0x100>; + clock-frequency = <0>; + interrupts = <37 2 0 0>; + }; + + serial3: serial@11d600 { + cell-index = <3>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x11d600 0x100>; + clock-frequency = <0>; + interrupts = <37 2 0 0>; + }; + + gpio0: gpio@130000 { + compatible = "fsl,p2041-gpio", "fsl,qoriq-gpio"; + reg = <0x130000 0x1000>; + interrupts = <55 2 0 0>; + #gpio-cells = <2>; + gpio-controller; + }; + + usb0: usb@210000 { + compatible = "fsl,p2041-usb2-mph", + "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph"; + reg = <0x210000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = <44 0x2 0 0>; + phy_type = "utmi"; + port0; + }; + + usb1: usb@211000 { + compatible = "fsl,p2041-usb2-dr", + "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr"; + reg = <0x211000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = <45 0x2 0 0>; + phy_type = "utmi"; + }; + + sata@220000 { + compatible = "fsl,p2041-sata", "fsl,pq-sata-v2"; + reg = <0x220000 0x1000>; + interrupts = <68 0x2 0 0>; + }; + + sata@221000 { + compatible = "fsl,p2041-sata", "fsl,pq-sata-v2"; + reg = <0x221000 0x1000>; + interrupts = <69 0x2 0 0>; + }; + + crypto: crypto@300000 { + compatible = "fsl,sec-v4.2", "fsl,sec-v4.0"; + #address-cells = <1>; + #size-cells = <1>; + reg = <0x300000 0x10000>; + ranges = <0 0x300000 0x10000>; + interrupts = <92 2 0 0>; + + sec_jr0: jr@1000 { + compatible = "fsl,sec-v4.2-job-ring", + "fsl,sec-v4.0-job-ring"; + reg = <0x1000 0x1000>; + interrupts = <88 2 0 0>; + }; + + sec_jr1: jr@2000 { + compatible = "fsl,sec-v4.2-job-ring", + "fsl,sec-v4.0-job-ring"; + reg = <0x2000 0x1000>; + interrupts = <89 2 0 0>; + }; + + sec_jr2: jr@3000 { + compatible = "fsl,sec-v4.2-job-ring", + "fsl,sec-v4.0-job-ring"; + reg = <0x3000 0x1000>; + interrupts = <90 2 0 0>; + }; + + sec_jr3: jr@4000 { + compatible = "fsl,sec-v4.2-job-ring", + "fsl,sec-v4.0-job-ring"; + reg = <0x4000 0x1000>; + interrupts = <91 2 0 0>; + }; + + rtic@6000 { + compatible = "fsl,sec-v4.2-rtic", + "fsl,sec-v4.0-rtic"; + #address-cells = <1>; + #size-cells = <1>; + reg = <0x6000 0x100>; + ranges = <0x0 0x6100 0xe00>; + + rtic_a: rtic-a@0 { + compatible = "fsl,sec-v4.2-rtic-memory", + "fsl,sec-v4.0-rtic-memory"; + reg = <0x00 0x20 0x100 0x80>; + }; + + rtic_b: rtic-b@20 { + compatible = "fsl,sec-v4.2-rtic-memory", + "fsl,sec-v4.0-rtic-memory"; + reg = <0x20 0x20 0x200 0x80>; + }; + + rtic_c: rtic-c@40 { + compatible = "fsl,sec-v4.2-rtic-memory", + "fsl,sec-v4.0-rtic-memory"; + reg = <0x40 0x20 0x300 0x80>; + }; + + rtic_d: rtic-d@60 { + compatible = "fsl,sec-v4.2-rtic-memory", + "fsl,sec-v4.0-rtic-memory"; + reg = <0x60 0x20 0x500 0x80>; + }; + }; + }; + + sec_mon: sec_mon@314000 { + compatible = "fsl,sec-v4.2-mon", "fsl,sec-v4.0-mon"; + reg = <0x314000 0x1000>; + interrupts = <93 2 0 0>; + }; + + }; + + localbus@ffe124000 { + compatible = "fsl,p2041-elbc", "fsl,elbc", "simple-bus"; + interrupts = <25 2 0 0>; + #address-cells = <2>; + #size-cells = <1>; + }; + + pci0: pcie@ffe200000 { + compatible = "fsl,p2041-pcie", "fsl,qoriq-pcie-v2.2"; + device_type = "pci"; + #size-cells = <2>; + #address-cells = <3>; + bus-range = <0x0 0xff>; + clock-frequency = <33333333>; + fsl,msi = <&msi0>; + interrupts = <16 2 1 15>; + pcie@0 { + reg = <0 0 0 0 0>; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; + interrupts = <16 2 1 15>; + interrupt-map-mask = <0xf800 0 0 7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0 0 1 &mpic 40 1 0 0 + 0000 0 0 2 &mpic 1 1 0 0 + 0000 0 0 3 &mpic 2 1 0 0 + 0000 0 0 4 &mpic 3 1 0 0 + >; + }; + }; + + pci1: pcie@ffe201000 { + compatible = "fsl,p2041-pcie", "fsl,qoriq-pcie-v2.2"; + device_type = "pci"; + #size-cells = <2>; + #address-cells = <3>; + bus-range = <0 0xff>; + clock-frequency = <33333333>; + fsl,msi = <&msi1>; + interrupts = <16 2 1 14>; + pcie@0 { + reg = <0 0 0 0 0>; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; + interrupts = <16 2 1 14>; + interrupt-map-mask = <0xf800 0 0 7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0 0 1 &mpic 41 1 0 0 + 0000 0 0 2 &mpic 5 1 0 0 + 0000 0 0 3 &mpic 6 1 0 0 + 0000 0 0 4 &mpic 7 1 0 0 + >; + }; + }; + + pci2: pcie@ffe202000 { + compatible = "fsl,p2041-pcie", "fsl,qoriq-pcie-v2.2"; + device_type = "pci"; + #size-cells = <2>; + #address-cells = <3>; + bus-range = <0x0 0xff>; + clock-frequency = <33333333>; + fsl,msi = <&msi2>; + interrupts = <16 2 1 13>; + pcie@0 { + reg = <0 0 0 0 0>; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; + interrupts = <16 2 1 13>; + interrupt-map-mask = <0xf800 0 0 7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0 0 1 &mpic 42 1 0 0 + 0000 0 0 2 &mpic 9 1 0 0 + 0000 0 0 3 &mpic 10 1 0 0 + 0000 0 0 4 &mpic 11 1 0 0 + >; + }; + }; +}; diff --git a/trunk/arch/powerpc/boot/dts/p3041ds.dts b/trunk/arch/powerpc/boot/dts/p3041ds.dts index f469145abaeb..bbd113b49a8f 100644 --- a/trunk/arch/powerpc/boot/dts/p3041ds.dts +++ b/trunk/arch/powerpc/boot/dts/p3041ds.dts @@ -32,7 +32,7 @@ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -/include/ "fsl/p3041si-pre.dtsi" +/include/ "p3041si.dtsi" / { model = "fsl,P3041DS"; @@ -50,8 +50,6 @@ }; soc: soc@ffe000000 { - ranges = <0x00000000 0xf 0xfe000000 0x1000000>; - reg = <0xf 0xfe000000 0 0x00001000>; spi@110000 { flash@0 { #address-cells = <1>; @@ -101,18 +99,7 @@ }; }; - rio: rapidio@ffe0c0000 { - reg = <0xf 0xfe0c0000 0 0x11000>; - - port1 { - ranges = <0 0 0xc 0x20000000 0 0x10000000>; - }; - port2 { - ranges = <0 0 0xc 0x30000000 0 0x10000000>; - }; - }; - - lbc: localbus@ffe124000 { + localbus@ffe124000 { reg = <0xf 0xfe124000 0 0x1000>; ranges = <0 0 0xf 0xe8000000 0x08000000 2 0 0xf 0xffa00000 0x00040000 @@ -173,7 +160,6 @@ reg = <0xf 0xfe200000 0 0x1000>; ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000 0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>; - fsl,msi = <&msi0>; pcie@0 { ranges = <0x02000000 0 0xe0000000 0x02000000 0 0xe0000000 @@ -189,7 +175,6 @@ reg = <0xf 0xfe201000 0 0x1000>; ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000 0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>; - fsl,msi = <&msi1>; pcie@0 { ranges = <0x02000000 0 0xe0000000 0x02000000 0 0xe0000000 @@ -205,7 +190,6 @@ reg = <0xf 0xfe202000 0 0x1000>; ranges = <0x02000000 0 0xe0000000 0xc 0x40000000 0 0x20000000 0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>; - fsl,msi = <&msi2>; pcie@0 { ranges = <0x02000000 0 0xe0000000 0x02000000 0 0xe0000000 @@ -221,7 +205,6 @@ reg = <0xf 0xfe203000 0 0x1000>; ranges = <0x02000000 0 0xe0000000 0xc 0x60000000 0 0x20000000 0x01000000 0 0x00000000 0xf 0xf8030000 0 0x00010000>; - fsl,msi = <&msi2>; pcie@0 { ranges = <0x02000000 0 0xe0000000 0x02000000 0 0xe0000000 @@ -233,5 +216,3 @@ }; }; }; - -/include/ "fsl/p3041si-post.dtsi" diff --git a/trunk/arch/powerpc/boot/dts/p3041si.dtsi b/trunk/arch/powerpc/boot/dts/p3041si.dtsi new file mode 100644 index 000000000000..87130b732bc7 --- /dev/null +++ b/trunk/arch/powerpc/boot/dts/p3041si.dtsi @@ -0,0 +1,729 @@ +/* + * P3041 Silicon Device Tree Source + * + * Copyright 2010-2011 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/dts-v1/; + +/ { + compatible = "fsl,P3041"; + #address-cells = <2>; + #size-cells = <2>; + interrupt-parent = <&mpic>; + + aliases { + ccsr = &soc; + dcsr = &dcsr; + + serial0 = &serial0; + serial1 = &serial1; + serial2 = &serial2; + serial3 = &serial3; + pci0 = &pci0; + pci1 = &pci1; + pci2 = &pci2; + pci3 = &pci3; + usb0 = &usb0; + usb1 = &usb1; + dma0 = &dma0; + dma1 = &dma1; + sdhc = &sdhc; + msi0 = &msi0; + msi1 = &msi1; + msi2 = &msi2; + + crypto = &crypto; + sec_jr0 = &sec_jr0; + sec_jr1 = &sec_jr1; + sec_jr2 = &sec_jr2; + sec_jr3 = &sec_jr3; + rtic_a = &rtic_a; + rtic_b = &rtic_b; + rtic_c = &rtic_c; + rtic_d = &rtic_d; + sec_mon = &sec_mon; + +/* + rio0 = &rapidio0; + */ + }; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu0: PowerPC,e500mc@0 { + device_type = "cpu"; + reg = <0>; + next-level-cache = <&L2_0>; + L2_0: l2-cache { + next-level-cache = <&cpc>; + }; + }; + cpu1: PowerPC,e500mc@1 { + device_type = "cpu"; + reg = <1>; + next-level-cache = <&L2_1>; + L2_1: l2-cache { + next-level-cache = <&cpc>; + }; + }; + cpu2: PowerPC,e500mc@2 { + device_type = "cpu"; + reg = <2>; + next-level-cache = <&L2_2>; + L2_2: l2-cache { + next-level-cache = <&cpc>; + }; + }; + cpu3: PowerPC,e500mc@3 { + device_type = "cpu"; + reg = <3>; + next-level-cache = <&L2_3>; + L2_3: l2-cache { + next-level-cache = <&cpc>; + }; + }; + }; + + dcsr: dcsr@f00000000 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,dcsr", "simple-bus"; + + dcsr-epu@0 { + compatible = "fsl,dcsr-epu"; + interrupts = <52 2 0 0 + 84 2 0 0 + 85 2 0 0>; + interrupt-parent = <&mpic>; + reg = <0x0 0x1000>; + }; + dcsr-npc { + compatible = "fsl,dcsr-npc"; + reg = <0x1000 0x1000 0x1000000 0x8000>; + }; + dcsr-nxc@2000 { + compatible = "fsl,dcsr-nxc"; + reg = <0x2000 0x1000>; + }; + dcsr-corenet { + compatible = "fsl,dcsr-corenet"; + reg = <0x8000 0x1000 0xB0000 0x1000>; + }; + dcsr-dpaa@9000 { + compatible = "fsl,p43041-dcsr-dpaa", "fsl,dcsr-dpaa"; + reg = <0x9000 0x1000>; + }; + dcsr-ocn@11000 { + compatible = "fsl,p43041-dcsr-ocn", "fsl,dcsr-ocn"; + reg = <0x11000 0x1000>; + }; + dcsr-ddr@12000 { + compatible = "fsl,dcsr-ddr"; + dev-handle = <&ddr>; + reg = <0x12000 0x1000>; + }; + dcsr-nal@18000 { + compatible = "fsl,p43041-dcsr-nal", "fsl,dcsr-nal"; + reg = <0x18000 0x1000>; + }; + dcsr-rcpm@22000 { + compatible = "fsl,p43041-dcsr-rcpm", "fsl,dcsr-rcpm"; + reg = <0x22000 0x1000>; + }; + dcsr-cpu-sb-proxy@40000 { + compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; + cpu-handle = <&cpu0>; + reg = <0x40000 0x1000>; + }; + dcsr-cpu-sb-proxy@41000 { + compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; + cpu-handle = <&cpu1>; + reg = <0x41000 0x1000>; + }; + dcsr-cpu-sb-proxy@42000 { + compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; + cpu-handle = <&cpu2>; + reg = <0x42000 0x1000>; + }; + dcsr-cpu-sb-proxy@43000 { + compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; + cpu-handle = <&cpu3>; + reg = <0x43000 0x1000>; + }; + }; + + soc: soc@ffe000000 { + #address-cells = <1>; + #size-cells = <1>; + device_type = "soc"; + compatible = "simple-bus"; + ranges = <0x00000000 0xf 0xfe000000 0x1000000>; + reg = <0xf 0xfe000000 0 0x00001000>; + + soc-sram-error { + compatible = "fsl,soc-sram-error"; + interrupts = <16 2 1 29>; + }; + + corenet-law@0 { + compatible = "fsl,corenet-law"; + reg = <0x0 0x1000>; + fsl,num-laws = <32>; + }; + + ddr: memory-controller@8000 { + compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller"; + reg = <0x8000 0x1000>; + interrupts = <16 2 1 23>; + }; + + cpc: l3-cache-controller@10000 { + compatible = "fsl,p3041-l3-cache-controller", "fsl,p4080-l3-cache-controller", "cache"; + reg = <0x10000 0x1000>; + interrupts = <16 2 1 27>; + }; + + corenet-cf@18000 { + compatible = "fsl,corenet-cf"; + reg = <0x18000 0x1000>; + interrupts = <16 2 1 31>; + fsl,ccf-num-csdids = <32>; + fsl,ccf-num-snoopids = <32>; + }; + + iommu@20000 { + compatible = "fsl,pamu-v1.0", "fsl,pamu"; + reg = <0x20000 0x4000>; + interrupts = < + 24 2 0 0 + 16 2 1 30>; + }; + + mpic: pic@40000 { + clock-frequency = <0>; + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <4>; + reg = <0x40000 0x40000>; + compatible = "fsl,mpic", "chrp,open-pic"; + device_type = "open-pic"; + }; + + msi0: msi@41600 { + compatible = "fsl,mpic-msi"; + reg = <0x41600 0x200>; + msi-available-ranges = <0 0x100>; + interrupts = < + 0xe0 0 0 0 + 0xe1 0 0 0 + 0xe2 0 0 0 + 0xe3 0 0 0 + 0xe4 0 0 0 + 0xe5 0 0 0 + 0xe6 0 0 0 + 0xe7 0 0 0>; + }; + + msi1: msi@41800 { + compatible = "fsl,mpic-msi"; + reg = <0x41800 0x200>; + msi-available-ranges = <0 0x100>; + interrupts = < + 0xe8 0 0 0 + 0xe9 0 0 0 + 0xea 0 0 0 + 0xeb 0 0 0 + 0xec 0 0 0 + 0xed 0 0 0 + 0xee 0 0 0 + 0xef 0 0 0>; + }; + + msi2: msi@41a00 { + compatible = "fsl,mpic-msi"; + reg = <0x41a00 0x200>; + msi-available-ranges = <0 0x100>; + interrupts = < + 0xf0 0 0 0 + 0xf1 0 0 0 + 0xf2 0 0 0 + 0xf3 0 0 0 + 0xf4 0 0 0 + 0xf5 0 0 0 + 0xf6 0 0 0 + 0xf7 0 0 0>; + }; + + guts: global-utilities@e0000 { + compatible = "fsl,qoriq-device-config-1.0"; + reg = <0xe0000 0xe00>; + fsl,has-rstcr; + #sleep-cells = <1>; + fsl,liodn-bits = <12>; + }; + + pins: global-utilities@e0e00 { + compatible = "fsl,qoriq-pin-control-1.0"; + reg = <0xe0e00 0x200>; + #sleep-cells = <2>; + }; + + clockgen: global-utilities@e1000 { + compatible = "fsl,p3041-clockgen", "fsl,qoriq-clockgen-1.0"; + reg = <0xe1000 0x1000>; + clock-frequency = <0>; + }; + + rcpm: global-utilities@e2000 { + compatible = "fsl,qoriq-rcpm-1.0"; + reg = <0xe2000 0x1000>; + #sleep-cells = <1>; + }; + + sfp: sfp@e8000 { + compatible = "fsl,p3041-sfp", "fsl,qoriq-sfp-1.0"; + reg = <0xe8000 0x1000>; + }; + + serdes: serdes@ea000 { + compatible = "fsl,p3041-serdes"; + reg = <0xea000 0x1000>; + }; + + dma0: dma@100300 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,p3041-dma", "fsl,eloplus-dma"; + reg = <0x100300 0x4>; + ranges = <0x0 0x100100 0x200>; + cell-index = <0>; + dma-channel@0 { + compatible = "fsl,p3041-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x0 0x80>; + cell-index = <0>; + interrupts = <28 2 0 0>; + }; + dma-channel@80 { + compatible = "fsl,p3041-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x80 0x80>; + cell-index = <1>; + interrupts = <29 2 0 0>; + }; + dma-channel@100 { + compatible = "fsl,p3041-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x100 0x80>; + cell-index = <2>; + interrupts = <30 2 0 0>; + }; + dma-channel@180 { + compatible = "fsl,p3041-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x180 0x80>; + cell-index = <3>; + interrupts = <31 2 0 0>; + }; + }; + + dma1: dma@101300 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,p3041-dma", "fsl,eloplus-dma"; + reg = <0x101300 0x4>; + ranges = <0x0 0x101100 0x200>; + cell-index = <1>; + dma-channel@0 { + compatible = "fsl,p3041-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x0 0x80>; + cell-index = <0>; + interrupts = <32 2 0 0>; + }; + dma-channel@80 { + compatible = "fsl,p3041-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x80 0x80>; + cell-index = <1>; + interrupts = <33 2 0 0>; + }; + dma-channel@100 { + compatible = "fsl,p3041-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x100 0x80>; + cell-index = <2>; + interrupts = <34 2 0 0>; + }; + dma-channel@180 { + compatible = "fsl,p3041-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x180 0x80>; + cell-index = <3>; + interrupts = <35 2 0 0>; + }; + }; + + spi@110000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,p3041-espi", "fsl,mpc8536-espi"; + reg = <0x110000 0x1000>; + interrupts = <53 0x2 0 0>; + fsl,espi-num-chipselects = <4>; + }; + + sdhc: sdhc@114000 { + compatible = "fsl,p3041-esdhc", "fsl,esdhc"; + reg = <0x114000 0x1000>; + interrupts = <48 2 0 0>; + sdhci,auto-cmd12; + clock-frequency = <0>; + }; + + i2c@118000 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <0>; + compatible = "fsl-i2c"; + reg = <0x118000 0x100>; + interrupts = <38 2 0 0>; + dfsrr; + }; + + i2c@118100 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <1>; + compatible = "fsl-i2c"; + reg = <0x118100 0x100>; + interrupts = <38 2 0 0>; + dfsrr; + }; + + i2c@119000 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <2>; + compatible = "fsl-i2c"; + reg = <0x119000 0x100>; + interrupts = <39 2 0 0>; + dfsrr; + }; + + i2c@119100 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <3>; + compatible = "fsl-i2c"; + reg = <0x119100 0x100>; + interrupts = <39 2 0 0>; + dfsrr; + }; + + serial0: serial@11c500 { + cell-index = <0>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x11c500 0x100>; + clock-frequency = <0>; + interrupts = <36 2 0 0>; + }; + + serial1: serial@11c600 { + cell-index = <1>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x11c600 0x100>; + clock-frequency = <0>; + interrupts = <36 2 0 0>; + }; + + serial2: serial@11d500 { + cell-index = <2>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x11d500 0x100>; + clock-frequency = <0>; + interrupts = <37 2 0 0>; + }; + + serial3: serial@11d600 { + cell-index = <3>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x11d600 0x100>; + clock-frequency = <0>; + interrupts = <37 2 0 0>; + }; + + gpio0: gpio@130000 { + compatible = "fsl,p3041-gpio", "fsl,qoriq-gpio"; + reg = <0x130000 0x1000>; + interrupts = <55 2 0 0>; + #gpio-cells = <2>; + gpio-controller; + }; + + usb0: usb@210000 { + compatible = "fsl,p3041-usb2-mph", + "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph"; + reg = <0x210000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = <44 0x2 0 0>; + phy_type = "utmi"; + port0; + }; + + usb1: usb@211000 { + compatible = "fsl,p3041-usb2-dr", + "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr"; + reg = <0x211000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = <45 0x2 0 0>; + dr_mode = "host"; + phy_type = "utmi"; + }; + + sata@220000 { + compatible = "fsl,p3041-sata", "fsl,pq-sata-v2"; + reg = <0x220000 0x1000>; + interrupts = <68 0x2 0 0>; + }; + + sata@221000 { + compatible = "fsl,p3041-sata", "fsl,pq-sata-v2"; + reg = <0x221000 0x1000>; + interrupts = <69 0x2 0 0>; + }; + + crypto: crypto@300000 { + compatible = "fsl,sec-v4.2", "fsl,sec-v4.0"; + #address-cells = <1>; + #size-cells = <1>; + reg = <0x300000 0x10000>; + ranges = <0 0x300000 0x10000>; + interrupts = <92 2 0 0>; + + sec_jr0: jr@1000 { + compatible = "fsl,sec-v4.2-job-ring", + "fsl,sec-v4.0-job-ring"; + reg = <0x1000 0x1000>; + interrupts = <88 2 0 0>; + }; + + sec_jr1: jr@2000 { + compatible = "fsl,sec-v4.2-job-ring", + "fsl,sec-v4.0-job-ring"; + reg = <0x2000 0x1000>; + interrupts = <89 2 0 0>; + }; + + sec_jr2: jr@3000 { + compatible = "fsl,sec-v4.2-job-ring", + "fsl,sec-v4.0-job-ring"; + reg = <0x3000 0x1000>; + interrupts = <90 2 0 0>; + }; + + sec_jr3: jr@4000 { + compatible = "fsl,sec-v4.2-job-ring", + "fsl,sec-v4.0-job-ring"; + reg = <0x4000 0x1000>; + interrupts = <91 2 0 0>; + }; + + rtic@6000 { + compatible = "fsl,sec-v4.2-rtic", + "fsl,sec-v4.0-rtic"; + #address-cells = <1>; + #size-cells = <1>; + reg = <0x6000 0x100>; + ranges = <0x0 0x6100 0xe00>; + + rtic_a: rtic-a@0 { + compatible = "fsl,sec-v4.2-rtic-memory", + "fsl,sec-v4.0-rtic-memory"; + reg = <0x00 0x20 0x100 0x80>; + }; + + rtic_b: rtic-b@20 { + compatible = "fsl,sec-v4.2-rtic-memory", + "fsl,sec-v4.0-rtic-memory"; + reg = <0x20 0x20 0x200 0x80>; + }; + + rtic_c: rtic-c@40 { + compatible = "fsl,sec-v4.2-rtic-memory", + "fsl,sec-v4.0-rtic-memory"; + reg = <0x40 0x20 0x300 0x80>; + }; + + rtic_d: rtic-d@60 { + compatible = "fsl,sec-v4.2-rtic-memory", + "fsl,sec-v4.0-rtic-memory"; + reg = <0x60 0x20 0x500 0x80>; + }; + }; + }; + + sec_mon: sec_mon@314000 { + compatible = "fsl,sec-v4.2-mon", "fsl,sec-v4.0-mon"; + reg = <0x314000 0x1000>; + interrupts = <93 2 0 0>; + }; + }; + +/* + rapidio0: rapidio@ffe0c0000 +*/ + + localbus@ffe124000 { + compatible = "fsl,p3041-elbc", "fsl,elbc", "simple-bus"; + interrupts = <25 2 0 0>; + #address-cells = <2>; + #size-cells = <1>; + }; + + pci0: pcie@ffe200000 { + compatible = "fsl,p3041-pcie", "fsl,qoriq-pcie-v2.2"; + device_type = "pci"; + #size-cells = <2>; + #address-cells = <3>; + bus-range = <0x0 0xff>; + clock-frequency = <0x1fca055>; + fsl,msi = <&msi0>; + interrupts = <16 2 1 15>; + + pcie@0 { + reg = <0 0 0 0 0>; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; + interrupts = <16 2 1 15>; + interrupt-map-mask = <0xf800 0 0 7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0 0 1 &mpic 40 1 0 0 + 0000 0 0 2 &mpic 1 1 0 0 + 0000 0 0 3 &mpic 2 1 0 0 + 0000 0 0 4 &mpic 3 1 0 0 + >; + }; + }; + + pci1: pcie@ffe201000 { + compatible = "fsl,p3041-pcie", "fsl,qoriq-pcie-v2.2"; + device_type = "pci"; + #size-cells = <2>; + #address-cells = <3>; + bus-range = <0 0xff>; + clock-frequency = <0x1fca055>; + fsl,msi = <&msi1>; + interrupts = <16 2 1 14>; + pcie@0 { + reg = <0 0 0 0 0>; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; + interrupts = <16 2 1 14>; + interrupt-map-mask = <0xf800 0 0 7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0 0 1 &mpic 41 1 0 0 + 0000 0 0 2 &mpic 5 1 0 0 + 0000 0 0 3 &mpic 6 1 0 0 + 0000 0 0 4 &mpic 7 1 0 0 + >; + }; + }; + + pci2: pcie@ffe202000 { + compatible = "fsl,p3041-pcie", "fsl,qoriq-pcie-v2.2"; + device_type = "pci"; + #size-cells = <2>; + #address-cells = <3>; + bus-range = <0x0 0xff>; + clock-frequency = <0x1fca055>; + fsl,msi = <&msi2>; + interrupts = <16 2 1 13>; + pcie@0 { + reg = <0 0 0 0 0>; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; + interrupts = <16 2 1 13>; + interrupt-map-mask = <0xf800 0 0 7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0 0 1 &mpic 42 1 0 0 + 0000 0 0 2 &mpic 9 1 0 0 + 0000 0 0 3 &mpic 10 1 0 0 + 0000 0 0 4 &mpic 11 1 0 0 + >; + }; + }; + + pci3: pcie@ffe203000 { + compatible = "fsl,p3041-pcie", "fsl,qoriq-pcie-v2.2"; + device_type = "pci"; + #size-cells = <2>; + #address-cells = <3>; + bus-range = <0x0 0xff>; + clock-frequency = <0x1fca055>; + fsl,msi = <&msi2>; + interrupts = <16 2 1 12>; + pcie@0 { + reg = <0 0 0 0 0>; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; + interrupts = <16 2 1 12>; + interrupt-map-mask = <0xf800 0 0 7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0 0 1 &mpic 43 1 0 0 + 0000 0 0 2 &mpic 0 1 0 0 + 0000 0 0 3 &mpic 4 1 0 0 + 0000 0 0 4 &mpic 8 1 0 0 + >; + }; + }; +}; diff --git a/trunk/arch/powerpc/boot/dts/p3060qds.dts b/trunk/arch/powerpc/boot/dts/p3060qds.dts index 529042e4b9a2..08b9193213e7 100644 --- a/trunk/arch/powerpc/boot/dts/p3060qds.dts +++ b/trunk/arch/powerpc/boot/dts/p3060qds.dts @@ -32,7 +32,7 @@ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -/include/ "fsl/p3060si-pre.dtsi" +/include/ "p3060si.dtsi" / { model = "fsl,P3060QDS"; @@ -50,8 +50,6 @@ }; soc: soc@ffe000000 { - ranges = <0x00000000 0xf 0xfe000000 0x1000000>; - reg = <0xf 0xfe000000 0 0x00001000>; spi@110000 { flash@0 { #address-cells = <1>; @@ -140,7 +138,7 @@ }; }; - rio: rapidio@ffe0c0000 { + rapidio@ffe0c0000 { reg = <0xf 0xfe0c0000 0 0x11000>; port1 { @@ -151,7 +149,7 @@ }; }; - lbc: localbus@ffe124000 { + localbus@ffe124000 { reg = <0xf 0xfe124000 0 0x1000>; ranges = <0 0 0xf 0xe8000000 0x08000000 2 0 0xf 0xffa00000 0x00040000 @@ -212,7 +210,6 @@ reg = <0xf 0xfe200000 0 0x1000>; ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000 0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>; - fsl,msi = <&msi0>; pcie@0 { ranges = <0x02000000 0 0xe0000000 0x02000000 0 0xe0000000 @@ -228,7 +225,6 @@ reg = <0xf 0xfe201000 0 0x1000>; ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000 0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>; - fsl,msi = <&msi1>; pcie@0 { ranges = <0x02000000 0 0xe0000000 0x02000000 0 0xe0000000 @@ -240,5 +236,3 @@ }; }; }; - -/include/ "fsl/p3060si-post.dtsi" diff --git a/trunk/arch/powerpc/boot/dts/p3060si.dtsi b/trunk/arch/powerpc/boot/dts/p3060si.dtsi new file mode 100644 index 000000000000..68947e157bbc --- /dev/null +++ b/trunk/arch/powerpc/boot/dts/p3060si.dtsi @@ -0,0 +1,719 @@ +/* + * P3060 Silicon Device Tree Source + * + * Copyright 2011 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/dts-v1/; + +/ { + compatible = "fsl,P3060"; + #address-cells = <2>; + #size-cells = <2>; + interrupt-parent = <&mpic>; + + aliases { + ccsr = &soc; + dcsr = &dcsr; + + serial0 = &serial0; + serial1 = &serial1; + serial2 = &serial2; + serial3 = &serial3; + pci0 = &pci0; + pci1 = &pci1; + usb0 = &usb0; + usb1 = &usb1; + dma0 = &dma0; + dma1 = &dma1; + msi0 = &msi0; + msi1 = &msi1; + msi2 = &msi2; + + crypto = &crypto; + sec_jr0 = &sec_jr0; + sec_jr1 = &sec_jr1; + sec_jr2 = &sec_jr2; + sec_jr3 = &sec_jr3; + rtic_a = &rtic_a; + rtic_b = &rtic_b; + rtic_c = &rtic_c; + rtic_d = &rtic_d; + sec_mon = &sec_mon; + }; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu0: PowerPC,e500mc@0 { + device_type = "cpu"; + reg = <0>; + next-level-cache = <&L2_0>; + L2_0: l2-cache { + next-level-cache = <&cpc>; + }; + }; + cpu1: PowerPC,e500mc@1 { + device_type = "cpu"; + reg = <1>; + next-level-cache = <&L2_1>; + L2_1: l2-cache { + next-level-cache = <&cpc>; + }; + }; + cpu4: PowerPC,e500mc@4 { + device_type = "cpu"; + reg = <4>; + next-level-cache = <&L2_4>; + L2_4: l2-cache { + next-level-cache = <&cpc>; + }; + }; + cpu5: PowerPC,e500mc@5 { + device_type = "cpu"; + reg = <5>; + next-level-cache = <&L2_5>; + L2_5: l2-cache { + next-level-cache = <&cpc>; + }; + }; + cpu6: PowerPC,e500mc@6 { + device_type = "cpu"; + reg = <6>; + next-level-cache = <&L2_6>; + L2_6: l2-cache { + next-level-cache = <&cpc>; + }; + }; + cpu7: PowerPC,e500mc@7 { + device_type = "cpu"; + reg = <7>; + next-level-cache = <&L2_7>; + L2_7: l2-cache { + next-level-cache = <&cpc>; + }; + }; + }; + + dcsr: dcsr@f00000000 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,dcsr", "simple-bus"; + + dcsr-epu@0 { + compatible = "fsl,dcsr-epu"; + interrupts = <52 2 0 0 + 84 2 0 0 + 85 2 0 0>; + interrupt-parent = <&mpic>; + reg = <0x0 0x1000>; + }; + dcsr-npc { + compatible = "fsl,dcsr-npc"; + reg = <0x1000 0x1000 0x1000000 0x8000>; + }; + dcsr-nxc@2000 { + compatible = "fsl,dcsr-nxc"; + reg = <0x2000 0x1000>; + }; + dcsr-corenet { + compatible = "fsl,dcsr-corenet"; + reg = <0x8000 0x1000 0xB0000 0x1000>; + }; + dcsr-dpaa@9000 { + compatible = "fsl,p3060-dcsr-dpaa", "fsl,dcsr-dpaa"; + reg = <0x9000 0x1000>; + }; + dcsr-ocn@11000 { + compatible = "fsl,p3060-dcsr-ocn", "fsl,dcsr-ocn"; + reg = <0x11000 0x1000>; + }; + dcsr-ddr@12000 { + compatible = "fsl,dcsr-ddr"; + dev-handle = <&ddr>; + reg = <0x12000 0x1000>; + }; + dcsr-nal@18000 { + compatible = "fsl,p3060-dcsr-nal", "fsl,dcsr-nal"; + reg = <0x18000 0x1000>; + }; + dcsr-rcpm@22000 { + compatible = "fsl,p3060-dcsr-rcpm", "fsl,dcsr-rcpm"; + reg = <0x22000 0x1000>; + }; + dcsr-cpu-sb-proxy@40000 { + compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; + cpu-handle = <&cpu0>; + reg = <0x40000 0x1000>; + }; + dcsr-cpu-sb-proxy@41000 { + compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; + cpu-handle = <&cpu1>; + reg = <0x41000 0x1000>; + }; + dcsr-cpu-sb-proxy@44000 { + compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; + cpu-handle = <&cpu4>; + reg = <0x44000 0x1000>; + }; + dcsr-cpu-sb-proxy@45000 { + compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; + cpu-handle = <&cpu5>; + reg = <0x45000 0x1000>; + }; + dcsr-cpu-sb-proxy@46000 { + compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; + cpu-handle = <&cpu6>; + reg = <0x46000 0x1000>; + }; + dcsr-cpu-sb-proxy@47000 { + compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; + cpu-handle = <&cpu7>; + reg = <0x47000 0x1000>; + }; + }; + + soc: soc@ffe000000 { + #address-cells = <1>; + #size-cells = <1>; + device_type = "soc"; + compatible = "simple-bus"; + ranges = <0x00000000 0xf 0xfe000000 0x1000000>; + reg = <0xf 0xfe000000 0 0x00001000>; + + soc-sram-error { + compatible = "fsl,soc-sram-error"; + interrupts = <16 2 1 29>; + }; + + corenet-law@0 { + compatible = "fsl,corenet-law"; + reg = <0x0 0x1000>; + fsl,num-laws = <32>; + }; + + ddr: memory-controller@8000 { + compatible = "fsl,qoriq-memory-controller-v4.4", "fsl,qoriq-memory-controller"; + reg = <0x8000 0x1000>; + interrupts = <16 2 1 23>; + }; + + cpc: l3-cache-controller@10000 { + compatible = "fsl,p3060-l3-cache-controller", "cache"; + reg = <0x10000 0x1000 + 0x11000 0x1000>; + interrupts = <16 2 1 27>; + }; + + corenet-cf@18000 { + compatible = "fsl,corenet-cf"; + reg = <0x18000 0x1000>; + interrupts = <16 2 1 31>; + fsl,ccf-num-csdids = <32>; + fsl,ccf-num-snoopids = <32>; + }; + + iommu@20000 { + compatible = "fsl,pamu-v1.0", "fsl,pamu"; + reg = <0x20000 0x5000>; + interrupts = < + 24 2 0 0 + 16 2 1 30>; + }; + + mpic: pic@40000 { + clock-frequency = <0>; + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <4>; + reg = <0x40000 0x40000>; + compatible = "fsl,mpic", "chrp,open-pic"; + device_type = "open-pic"; + }; + + msi0: msi@41600 { + compatible = "fsl,mpic-msi"; + reg = <0x41600 0x200>; + msi-available-ranges = <0 0x100>; + interrupts = < + 0xe0 0 0 0 + 0xe1 0 0 0 + 0xe2 0 0 0 + 0xe3 0 0 0 + 0xe4 0 0 0 + 0xe5 0 0 0 + 0xe6 0 0 0 + 0xe7 0 0 0>; + }; + + msi1: msi@41800 { + compatible = "fsl,mpic-msi"; + reg = <0x41800 0x200>; + msi-available-ranges = <0 0x100>; + interrupts = < + 0xe8 0 0 0 + 0xe9 0 0 0 + 0xea 0 0 0 + 0xeb 0 0 0 + 0xec 0 0 0 + 0xed 0 0 0 + 0xee 0 0 0 + 0xef 0 0 0>; + }; + + msi2: msi@41a00 { + compatible = "fsl,mpic-msi"; + reg = <0x41a00 0x200>; + msi-available-ranges = <0 0x100>; + interrupts = < + 0xf0 0 0 0 + 0xf1 0 0 0 + 0xf2 0 0 0 + 0xf3 0 0 0 + 0xf4 0 0 0 + 0xf5 0 0 0 + 0xf6 0 0 0 + 0xf7 0 0 0>; + }; + + rmu: rmu@d3000 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,srio-rmu"; + reg = <0xd3000 0x500>; + ranges = <0x0 0xd3000 0x500>; + + message-unit@0 { + compatible = "fsl,srio-msg-unit"; + reg = <0x0 0x100>; + interrupts = < + 60 2 0 0 /* msg1_tx_irq */ + 61 2 0 0>;/* msg1_rx_irq */ + }; + message-unit@100 { + compatible = "fsl,srio-msg-unit"; + reg = <0x100 0x100>; + interrupts = < + 62 2 0 0 /* msg2_tx_irq */ + 63 2 0 0>;/* msg2_rx_irq */ + }; + doorbell-unit@400 { + compatible = "fsl,srio-dbell-unit"; + reg = <0x400 0x80>; + interrupts = < + 56 2 0 0 /* bell_outb_irq */ + 57 2 0 0>;/* bell_inb_irq */ + }; + port-write-unit@4e0 { + compatible = "fsl,srio-port-write-unit"; + reg = <0x4e0 0x20>; + interrupts = <16 2 1 11>; + }; + }; + + guts: global-utilities@e0000 { + compatible = "fsl,qoriq-device-config-1.0"; + reg = <0xe0000 0xe00>; + fsl,has-rstcr; + #sleep-cells = <1>; + fsl,liodn-bits = <12>; + }; + + pins: global-utilities@e0e00 { + compatible = "fsl,qoriq-pin-control-1.0"; + reg = <0xe0e00 0x200>; + #sleep-cells = <2>; + }; + + clockgen: global-utilities@e1000 { + compatible = "fsl,p3060-clockgen", "fsl,qoriq-clockgen-1.0"; + reg = <0xe1000 0x1000>; + clock-frequency = <0>; + }; + + rcpm: global-utilities@e2000 { + compatible = "fsl,qoriq-rcpm-1.0"; + reg = <0xe2000 0x1000>; + #sleep-cells = <1>; + }; + + sfp: sfp@e8000 { + compatible = "fsl,p3060-sfp", "fsl,qoriq-sfp-1.0"; + reg = <0xe8000 0x1000>; + }; + + serdes: serdes@ea000 { + compatible = "fsl,p3060-serdes"; + reg = <0xea000 0x1000>; + }; + + dma0: dma@100300 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,p3060-dma", "fsl,eloplus-dma"; + reg = <0x100300 0x4>; + ranges = <0x0 0x100100 0x200>; + cell-index = <0>; + dma-channel@0 { + compatible = "fsl,p3060-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x0 0x80>; + cell-index = <0>; + interrupts = <28 2 0 0>; + }; + dma-channel@80 { + compatible = "fsl,p3060-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x80 0x80>; + cell-index = <1>; + interrupts = <29 2 0 0>; + }; + dma-channel@100 { + compatible = "fsl,p3060-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x100 0x80>; + cell-index = <2>; + interrupts = <30 2 0 0>; + }; + dma-channel@180 { + compatible = "fsl,p3060-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x180 0x80>; + cell-index = <3>; + interrupts = <31 2 0 0>; + }; + }; + + dma1: dma@101300 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,p3060-dma", "fsl,eloplus-dma"; + reg = <0x101300 0x4>; + ranges = <0x0 0x101100 0x200>; + cell-index = <1>; + dma-channel@0 { + compatible = "fsl,p3060-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x0 0x80>; + cell-index = <0>; + interrupts = <32 2 0 0>; + }; + dma-channel@80 { + compatible = "fsl,p3060-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x80 0x80>; + cell-index = <1>; + interrupts = <33 2 0 0>; + }; + dma-channel@100 { + compatible = "fsl,p3060-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x100 0x80>; + cell-index = <2>; + interrupts = <34 2 0 0>; + }; + dma-channel@180 { + compatible = "fsl,p3060-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x180 0x80>; + cell-index = <3>; + interrupts = <35 2 0 0>; + }; + }; + + spi@110000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,p3060-espi", "fsl,mpc8536-espi"; + reg = <0x110000 0x1000>; + interrupts = <53 0x2 0 0>; + fsl,espi-num-chipselects = <4>; + }; + + i2c@118000 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <0>; + compatible = "fsl-i2c"; + reg = <0x118000 0x100>; + interrupts = <38 2 0 0>; + dfsrr; + }; + + i2c@118100 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <1>; + compatible = "fsl-i2c"; + reg = <0x118100 0x100>; + interrupts = <38 2 0 0>; + dfsrr; + }; + + i2c@119000 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <2>; + compatible = "fsl-i2c"; + reg = <0x119000 0x100>; + interrupts = <39 2 0 0>; + dfsrr; + }; + + i2c@119100 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <3>; + compatible = "fsl-i2c"; + reg = <0x119100 0x100>; + interrupts = <39 2 0 0>; + dfsrr; + }; + + serial0: serial@11c500 { + cell-index = <0>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x11c500 0x100>; + clock-frequency = <0>; + interrupts = <36 2 0 0>; + }; + + serial1: serial@11c600 { + cell-index = <1>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x11c600 0x100>; + clock-frequency = <0>; + interrupts = <36 2 0 0>; + }; + + serial2: serial@11d500 { + cell-index = <2>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x11d500 0x100>; + clock-frequency = <0>; + interrupts = <37 2 0 0>; + }; + + serial3: serial@11d600 { + cell-index = <3>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x11d600 0x100>; + clock-frequency = <0>; + interrupts = <37 2 0 0>; + }; + + gpio0: gpio@130000 { + compatible = "fsl,p3060-gpio", "fsl,qoriq-gpio"; + reg = <0x130000 0x1000>; + interrupts = <55 2 0 0>; + #gpio-cells = <2>; + gpio-controller; + }; + + usb0: usb@210000 { + compatible = "fsl,p3060-usb2-mph", + "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph"; + reg = <0x210000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = <44 0x2 0 0>; + }; + + usb1: usb@211000 { + compatible = "fsl,p3060-usb2-dr", + "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr"; + reg = <0x211000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = <45 0x2 0 0>; + }; + + crypto: crypto@300000 { + compatible = "fsl,sec-v4.1", "fsl,sec-v4.0"; + #address-cells = <1>; + #size-cells = <1>; + reg = <0x300000 0x10000>; + ranges = <0 0x300000 0x10000>; + interrupt-parent = <&mpic>; + interrupts = <92 2 0 0>; + + sec_jr0: jr@1000 { + compatible = "fsl,sec-v4.1-job-ring", "fsl,sec-v4.0-job-ring"; + reg = <0x1000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <88 2 0 0>; + }; + + sec_jr1: jr@2000 { + compatible = "fsl,sec-v4.1-job-ring", "fsl,sec-v4.0-job-ring"; + reg = <0x2000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <89 2 0 0>; + }; + + sec_jr2: jr@3000 { + compatible = "fsl,sec-v4.1-job-ring", "fsl,sec-v4.0-job-ring"; + reg = <0x3000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <90 2 0 0>; + }; + + sec_jr3: jr@4000 { + compatible = "fsl,sec-v4.1-job-ring", "fsl,sec-v4.0-job-ring"; + reg = <0x4000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <91 2 0 0>; + }; + + rtic@6000 { + compatible = "fsl,sec-v4.1-rtic", "fsl,sec-v4.0-rtic"; + #address-cells = <1>; + #size-cells = <1>; + reg = <0x6000 0x100>; + ranges = <0x0 0x6100 0xe00>; + + rtic_a: rtic-a@0 { + compatible = "fsl,sec-v4.1-rtic-memory", "fsl,sec-v4.0-rtic-memory"; + reg = <0x00 0x20 0x100 0x80>; + }; + + rtic_b: rtic-b@20 { + compatible = "fsl,sec-v4.1-rtic-memory", "fsl,sec-v4.0-rtic-memory"; + reg = <0x20 0x20 0x200 0x80>; + }; + + rtic_c: rtic-c@40 { + compatible = "fsl,sec-v4.1-rtic-memory", "fsl,sec-v4.0-rtic-memory"; + reg = <0x40 0x20 0x300 0x80>; + }; + + rtic_d: rtic-d@60 { + compatible = "fsl,sec-v4.1-rtic-memory", "fsl,sec-v4.0-rtic-memory"; + reg = <0x60 0x20 0x500 0x80>; + }; + }; + }; + + sec_mon: sec_mon@314000 { + compatible = "fsl,sec-v4.1-mon", "fsl,sec-v4.0-mon"; + reg = <0x314000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <93 2 0 0>; + }; + }; + + rapidio@ffe0c0000 { + compatible = "fsl,srio"; + interrupts = <16 2 1 11>; + #address-cells = <2>; + #size-cells = <2>; + fsl,srio-rmu-handle = <&rmu>; + ranges; + + port1 { + #address-cells = <2>; + #size-cells = <2>; + cell-index = <1>; + }; + + port2 { + #address-cells = <2>; + #size-cells = <2>; + cell-index = <2>; + }; + }; + + localbus@ffe124000 { + compatible = "fsl,p3060-elbc", "fsl,elbc", "simple-bus"; + interrupts = <25 2 0 0>; + #address-cells = <2>; + #size-cells = <1>; + }; + + pci0: pcie@ffe200000 { + compatible = "fsl,p3060-pcie", "fsl,qoriq-pcie-v2.2"; + device_type = "pci"; + #size-cells = <2>; + #address-cells = <3>; + bus-range = <0x0 0xff>; + clock-frequency = <33333333>; + fsl,msi = <&msi0>; + interrupts = <16 2 1 15>; + pcie@0 { + reg = <0 0 0 0 0>; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; + interrupts = <16 2 1 15>; + interrupt-map-mask = <0xf800 0 0 7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0 0 1 &mpic 40 1 0 0 + 0000 0 0 2 &mpic 1 1 0 0 + 0000 0 0 3 &mpic 2 1 0 0 + 0000 0 0 4 &mpic 3 1 0 0 + >; + }; + }; + + pci1: pcie@ffe201000 { + compatible = "fsl,p3060-pcie", "fsl,qoriq-pcie-v2.2"; + device_type = "pci"; + #size-cells = <2>; + #address-cells = <3>; + bus-range = <0 0xff>; + clock-frequency = <33333333>; + fsl,msi = <&msi1>; + interrupts = <16 2 1 14>; + pcie@0 { + reg = <0 0 0 0 0>; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; + interrupts = <16 2 1 14>; + interrupt-map-mask = <0xf800 0 0 7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0 0 1 &mpic 41 1 0 0 + 0000 0 0 2 &mpic 5 1 0 0 + 0000 0 0 3 &mpic 6 1 0 0 + 0000 0 0 4 &mpic 7 1 0 0 + >; + }; + }; +}; diff --git a/trunk/arch/powerpc/boot/dts/p4080ds.dts b/trunk/arch/powerpc/boot/dts/p4080ds.dts index 6d60e54e50a0..c7916dc28014 100644 --- a/trunk/arch/powerpc/boot/dts/p4080ds.dts +++ b/trunk/arch/powerpc/boot/dts/p4080ds.dts @@ -32,7 +32,7 @@ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -/include/ "fsl/p4080si-pre.dtsi" +/include/ "p4080si.dtsi" / { model = "fsl,P4080DS"; @@ -50,9 +50,6 @@ }; soc: soc@ffe000000 { - ranges = <0x00000000 0xf 0xfe000000 0x1000000>; - reg = <0xf 0xfe000000 0 0x00001000>; - spi@110000 { flash@0 { #address-cells = <1>; @@ -108,18 +105,12 @@ }; }; - rio: rapidio@ffe0c0000 { - reg = <0xf 0xfe0c0000 0 0x11000>; - - port1 { - ranges = <0 0 0xc 0x20000000 0 0x10000000>; - }; - port2 { - ranges = <0 0 0xc 0x30000000 0 0x10000000>; - }; + rapidio0: rapidio@ffe0c0000 { + reg = <0xf 0xfe0c0000 0 0x20000>; + ranges = <0 0 0xc 0x20000000 0 0x01000000>; }; - lbc: localbus@ffe124000 { + localbus@ffe124000 { reg = <0xf 0xfe124000 0 0x1000>; ranges = <0 0 0xf 0xe8000000 0x08000000 3 0 0xf 0xffdf0000 0x00008000>; @@ -141,7 +132,6 @@ reg = <0xf 0xfe200000 0 0x1000>; ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000 0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>; - fsl,msi = <&msi0>; pcie@0 { ranges = <0x02000000 0 0xe0000000 0x02000000 0 0xe0000000 @@ -157,7 +147,6 @@ reg = <0xf 0xfe201000 0 0x1000>; ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000 0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>; - fsl,msi = <&msi1>; pcie@0 { ranges = <0x02000000 0 0xe0000000 0x02000000 0 0xe0000000 @@ -173,7 +162,6 @@ reg = <0xf 0xfe202000 0 0x1000>; ranges = <0x02000000 0 0xe0000000 0xc 0x40000000 0 0x20000000 0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>; - fsl,msi = <&msi2>; pcie@0 { ranges = <0x02000000 0 0xe0000000 0x02000000 0 0xe0000000 @@ -186,5 +174,3 @@ }; }; - -/include/ "fsl/p4080si-post.dtsi" diff --git a/trunk/arch/powerpc/boot/dts/p4080si.dtsi b/trunk/arch/powerpc/boot/dts/p4080si.dtsi new file mode 100644 index 000000000000..f20c01ab2473 --- /dev/null +++ b/trunk/arch/powerpc/boot/dts/p4080si.dtsi @@ -0,0 +1,755 @@ +/* + * P4080 Silicon Device Tree Source + * + * Copyright 2009-2011 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/dts-v1/; + +/ { + compatible = "fsl,P4080"; + #address-cells = <2>; + #size-cells = <2>; + interrupt-parent = <&mpic>; + + aliases { + ccsr = &soc; + dcsr = &dcsr; + + serial0 = &serial0; + serial1 = &serial1; + serial2 = &serial2; + serial3 = &serial3; + pci0 = &pci0; + pci1 = &pci1; + pci2 = &pci2; + usb0 = &usb0; + usb1 = &usb1; + dma0 = &dma0; + dma1 = &dma1; + sdhc = &sdhc; + msi0 = &msi0; + msi1 = &msi1; + msi2 = &msi2; + + crypto = &crypto; + sec_jr0 = &sec_jr0; + sec_jr1 = &sec_jr1; + sec_jr2 = &sec_jr2; + sec_jr3 = &sec_jr3; + rtic_a = &rtic_a; + rtic_b = &rtic_b; + rtic_c = &rtic_c; + rtic_d = &rtic_d; + sec_mon = &sec_mon; + + rio0 = &rapidio0; + }; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu0: PowerPC,e500mc@0 { + device_type = "cpu"; + reg = <0>; + next-level-cache = <&L2_0>; + L2_0: l2-cache { + next-level-cache = <&cpc>; + }; + }; + cpu1: PowerPC,e500mc@1 { + device_type = "cpu"; + reg = <1>; + next-level-cache = <&L2_1>; + L2_1: l2-cache { + next-level-cache = <&cpc>; + }; + }; + cpu2: PowerPC,e500mc@2 { + device_type = "cpu"; + reg = <2>; + next-level-cache = <&L2_2>; + L2_2: l2-cache { + next-level-cache = <&cpc>; + }; + }; + cpu3: PowerPC,e500mc@3 { + device_type = "cpu"; + reg = <3>; + next-level-cache = <&L2_3>; + L2_3: l2-cache { + next-level-cache = <&cpc>; + }; + }; + cpu4: PowerPC,e500mc@4 { + device_type = "cpu"; + reg = <4>; + next-level-cache = <&L2_4>; + L2_4: l2-cache { + next-level-cache = <&cpc>; + }; + }; + cpu5: PowerPC,e500mc@5 { + device_type = "cpu"; + reg = <5>; + next-level-cache = <&L2_5>; + L2_5: l2-cache { + next-level-cache = <&cpc>; + }; + }; + cpu6: PowerPC,e500mc@6 { + device_type = "cpu"; + reg = <6>; + next-level-cache = <&L2_6>; + L2_6: l2-cache { + next-level-cache = <&cpc>; + }; + }; + cpu7: PowerPC,e500mc@7 { + device_type = "cpu"; + reg = <7>; + next-level-cache = <&L2_7>; + L2_7: l2-cache { + next-level-cache = <&cpc>; + }; + }; + }; + + dcsr: dcsr@f00000000 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,dcsr", "simple-bus"; + + dcsr-epu@0 { + compatible = "fsl,dcsr-epu"; + interrupts = <52 2 0 0 + 84 2 0 0 + 85 2 0 0>; + interrupt-parent = <&mpic>; + reg = <0x0 0x1000>; + }; + dcsr-npc { + compatible = "fsl,dcsr-npc"; + reg = <0x1000 0x1000 0x1000000 0x8000>; + }; + dcsr-nxc@2000 { + compatible = "fsl,dcsr-nxc"; + reg = <0x2000 0x1000>; + }; + dcsr-corenet { + compatible = "fsl,dcsr-corenet"; + reg = <0x8000 0x1000 0xB0000 0x1000>; + }; + dcsr-dpaa@9000 { + compatible = "fsl,p4080-dcsr-dpaa", "fsl,dcsr-dpaa"; + reg = <0x9000 0x1000>; + }; + dcsr-ocn@11000 { + compatible = "fsl,p4080-dcsr-ocn", "fsl,dcsr-ocn"; + reg = <0x11000 0x1000>; + }; + dcsr-ddr@12000 { + compatible = "fsl,dcsr-ddr"; + dev-handle = <&ddr1>; + reg = <0x12000 0x1000>; + }; + dcsr-ddr@13000 { + compatible = "fsl,dcsr-ddr"; + dev-handle = <&ddr2>; + reg = <0x13000 0x1000>; + }; + dcsr-nal@18000 { + compatible = "fsl,p4080-dcsr-nal", "fsl,dcsr-nal"; + reg = <0x18000 0x1000>; + }; + dcsr-rcpm@22000 { + compatible = "fsl,p4080-dcsr-rcpm", "fsl,dcsr-rcpm"; + reg = <0x22000 0x1000>; + }; + dcsr-cpu-sb-proxy@40000 { + compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; + cpu-handle = <&cpu0>; + reg = <0x40000 0x1000>; + }; + dcsr-cpu-sb-proxy@41000 { + compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; + cpu-handle = <&cpu1>; + reg = <0x41000 0x1000>; + }; + dcsr-cpu-sb-proxy@42000 { + compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; + cpu-handle = <&cpu2>; + reg = <0x42000 0x1000>; + }; + dcsr-cpu-sb-proxy@43000 { + compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; + cpu-handle = <&cpu3>; + reg = <0x43000 0x1000>; + }; + dcsr-cpu-sb-proxy@44000 { + compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; + cpu-handle = <&cpu4>; + reg = <0x44000 0x1000>; + }; + dcsr-cpu-sb-proxy@45000 { + compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; + cpu-handle = <&cpu5>; + reg = <0x45000 0x1000>; + }; + dcsr-cpu-sb-proxy@46000 { + compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; + cpu-handle = <&cpu6>; + reg = <0x46000 0x1000>; + }; + dcsr-cpu-sb-proxy@47000 { + compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; + cpu-handle = <&cpu7>; + reg = <0x47000 0x1000>; + }; + }; + + soc: soc@ffe000000 { + #address-cells = <1>; + #size-cells = <1>; + device_type = "soc"; + compatible = "simple-bus"; + ranges = <0x00000000 0xf 0xfe000000 0x1000000>; + reg = <0xf 0xfe000000 0 0x00001000>; + + soc-sram-error { + compatible = "fsl,soc-sram-error"; + interrupts = <16 2 1 29>; + }; + + corenet-law@0 { + compatible = "fsl,corenet-law"; + reg = <0x0 0x1000>; + fsl,num-laws = <32>; + }; + + ddr1: memory-controller@8000 { + compatible = "fsl,qoriq-memory-controller-v4.4", "fsl,qoriq-memory-controller"; + reg = <0x8000 0x1000>; + interrupts = <16 2 1 23>; + }; + + ddr2: memory-controller@9000 { + compatible = "fsl,qoriq-memory-controller-v4.4","fsl,qoriq-memory-controller"; + reg = <0x9000 0x1000>; + interrupts = <16 2 1 22>; + }; + + cpc: l3-cache-controller@10000 { + compatible = "fsl,p4080-l3-cache-controller", "cache"; + reg = <0x10000 0x1000 + 0x11000 0x1000>; + interrupts = <16 2 1 27 + 16 2 1 26>; + }; + + corenet-cf@18000 { + compatible = "fsl,corenet-cf"; + reg = <0x18000 0x1000>; + interrupts = <16 2 1 31>; + fsl,ccf-num-csdids = <32>; + fsl,ccf-num-snoopids = <32>; + }; + + iommu@20000 { + compatible = "fsl,pamu-v1.0", "fsl,pamu"; + reg = <0x20000 0x5000>; + interrupts = < + 24 2 0 0 + 16 2 1 30>; + }; + + mpic: pic@40000 { + clock-frequency = <0>; + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <4>; + reg = <0x40000 0x40000>; + compatible = "fsl,mpic", "chrp,open-pic"; + device_type = "open-pic"; + }; + + msi0: msi@41600 { + compatible = "fsl,mpic-msi"; + reg = <0x41600 0x200>; + msi-available-ranges = <0 0x100>; + interrupts = < + 0xe0 0 0 0 + 0xe1 0 0 0 + 0xe2 0 0 0 + 0xe3 0 0 0 + 0xe4 0 0 0 + 0xe5 0 0 0 + 0xe6 0 0 0 + 0xe7 0 0 0>; + }; + + msi1: msi@41800 { + compatible = "fsl,mpic-msi"; + reg = <0x41800 0x200>; + msi-available-ranges = <0 0x100>; + interrupts = < + 0xe8 0 0 0 + 0xe9 0 0 0 + 0xea 0 0 0 + 0xeb 0 0 0 + 0xec 0 0 0 + 0xed 0 0 0 + 0xee 0 0 0 + 0xef 0 0 0>; + }; + + msi2: msi@41a00 { + compatible = "fsl,mpic-msi"; + reg = <0x41a00 0x200>; + msi-available-ranges = <0 0x100>; + interrupts = < + 0xf0 0 0 0 + 0xf1 0 0 0 + 0xf2 0 0 0 + 0xf3 0 0 0 + 0xf4 0 0 0 + 0xf5 0 0 0 + 0xf6 0 0 0 + 0xf7 0 0 0>; + }; + + guts: global-utilities@e0000 { + compatible = "fsl,qoriq-device-config-1.0"; + reg = <0xe0000 0xe00>; + fsl,has-rstcr; + #sleep-cells = <1>; + fsl,liodn-bits = <12>; + }; + + pins: global-utilities@e0e00 { + compatible = "fsl,qoriq-pin-control-1.0"; + reg = <0xe0e00 0x200>; + #sleep-cells = <2>; + }; + + clockgen: global-utilities@e1000 { + compatible = "fsl,p4080-clockgen", "fsl,qoriq-clockgen-1.0"; + reg = <0xe1000 0x1000>; + clock-frequency = <0>; + }; + + rcpm: global-utilities@e2000 { + compatible = "fsl,qoriq-rcpm-1.0"; + reg = <0xe2000 0x1000>; + #sleep-cells = <1>; + }; + + sfp: sfp@e8000 { + compatible = "fsl,p4080-sfp", "fsl,qoriq-sfp-1.0"; + reg = <0xe8000 0x1000>; + }; + + serdes: serdes@ea000 { + compatible = "fsl,p4080-serdes"; + reg = <0xea000 0x1000>; + }; + + dma0: dma@100300 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,p4080-dma", "fsl,eloplus-dma"; + reg = <0x100300 0x4>; + ranges = <0x0 0x100100 0x200>; + cell-index = <0>; + dma-channel@0 { + compatible = "fsl,p4080-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x0 0x80>; + cell-index = <0>; + interrupts = <28 2 0 0>; + }; + dma-channel@80 { + compatible = "fsl,p4080-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x80 0x80>; + cell-index = <1>; + interrupts = <29 2 0 0>; + }; + dma-channel@100 { + compatible = "fsl,p4080-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x100 0x80>; + cell-index = <2>; + interrupts = <30 2 0 0>; + }; + dma-channel@180 { + compatible = "fsl,p4080-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x180 0x80>; + cell-index = <3>; + interrupts = <31 2 0 0>; + }; + }; + + dma1: dma@101300 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,p4080-dma", "fsl,eloplus-dma"; + reg = <0x101300 0x4>; + ranges = <0x0 0x101100 0x200>; + cell-index = <1>; + dma-channel@0 { + compatible = "fsl,p4080-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x0 0x80>; + cell-index = <0>; + interrupts = <32 2 0 0>; + }; + dma-channel@80 { + compatible = "fsl,p4080-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x80 0x80>; + cell-index = <1>; + interrupts = <33 2 0 0>; + }; + dma-channel@100 { + compatible = "fsl,p4080-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x100 0x80>; + cell-index = <2>; + interrupts = <34 2 0 0>; + }; + dma-channel@180 { + compatible = "fsl,p4080-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x180 0x80>; + cell-index = <3>; + interrupts = <35 2 0 0>; + }; + }; + + spi@110000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,p4080-espi", "fsl,mpc8536-espi"; + reg = <0x110000 0x1000>; + interrupts = <53 0x2 0 0>; + fsl,espi-num-chipselects = <4>; + }; + + sdhc: sdhc@114000 { + compatible = "fsl,p4080-esdhc", "fsl,esdhc"; + reg = <0x114000 0x1000>; + interrupts = <48 2 0 0>; + voltage-ranges = <3300 3300>; + sdhci,auto-cmd12; + clock-frequency = <0>; + }; + + i2c@118000 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <0>; + compatible = "fsl-i2c"; + reg = <0x118000 0x100>; + interrupts = <38 2 0 0>; + dfsrr; + }; + + i2c@118100 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <1>; + compatible = "fsl-i2c"; + reg = <0x118100 0x100>; + interrupts = <38 2 0 0>; + dfsrr; + }; + + i2c@119000 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <2>; + compatible = "fsl-i2c"; + reg = <0x119000 0x100>; + interrupts = <39 2 0 0>; + dfsrr; + }; + + i2c@119100 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <3>; + compatible = "fsl-i2c"; + reg = <0x119100 0x100>; + interrupts = <39 2 0 0>; + dfsrr; + }; + + serial0: serial@11c500 { + cell-index = <0>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x11c500 0x100>; + clock-frequency = <0>; + interrupts = <36 2 0 0>; + }; + + serial1: serial@11c600 { + cell-index = <1>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x11c600 0x100>; + clock-frequency = <0>; + interrupts = <36 2 0 0>; + }; + + serial2: serial@11d500 { + cell-index = <2>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x11d500 0x100>; + clock-frequency = <0>; + interrupts = <37 2 0 0>; + }; + + serial3: serial@11d600 { + cell-index = <3>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x11d600 0x100>; + clock-frequency = <0>; + interrupts = <37 2 0 0>; + }; + + gpio0: gpio@130000 { + compatible = "fsl,p4080-gpio", "fsl,qoriq-gpio"; + reg = <0x130000 0x1000>; + interrupts = <55 2 0 0>; + #gpio-cells = <2>; + gpio-controller; + }; + + usb0: usb@210000 { + compatible = "fsl,p4080-usb2-mph", + "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph"; + reg = <0x210000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = <44 0x2 0 0>; + }; + + usb1: usb@211000 { + compatible = "fsl,p4080-usb2-dr", + "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr"; + reg = <0x211000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = <45 0x2 0 0>; + }; + + crypto: crypto@300000 { + compatible = "fsl,sec-v4.0"; + #address-cells = <1>; + #size-cells = <1>; + reg = <0x300000 0x10000>; + ranges = <0 0x300000 0x10000>; + interrupt-parent = <&mpic>; + interrupts = <92 2 0 0>; + + sec_jr0: jr@1000 { + compatible = "fsl,sec-v4.0-job-ring"; + reg = <0x1000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <88 2 0 0>; + }; + + sec_jr1: jr@2000 { + compatible = "fsl,sec-v4.0-job-ring"; + reg = <0x2000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <89 2 0 0>; + }; + + sec_jr2: jr@3000 { + compatible = "fsl,sec-v4.0-job-ring"; + reg = <0x3000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <90 2 0 0>; + }; + + sec_jr3: jr@4000 { + compatible = "fsl,sec-v4.0-job-ring"; + reg = <0x4000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <91 2 0 0>; + }; + + rtic@6000 { + compatible = "fsl,sec-v4.0-rtic"; + #address-cells = <1>; + #size-cells = <1>; + reg = <0x6000 0x100>; + ranges = <0x0 0x6100 0xe00>; + + rtic_a: rtic-a@0 { + compatible = "fsl,sec-v4.0-rtic-memory"; + reg = <0x00 0x20 0x100 0x80>; + }; + + rtic_b: rtic-b@20 { + compatible = "fsl,sec-v4.0-rtic-memory"; + reg = <0x20 0x20 0x200 0x80>; + }; + + rtic_c: rtic-c@40 { + compatible = "fsl,sec-v4.0-rtic-memory"; + reg = <0x40 0x20 0x300 0x80>; + }; + + rtic_d: rtic-d@60 { + compatible = "fsl,sec-v4.0-rtic-memory"; + reg = <0x60 0x20 0x500 0x80>; + }; + }; + }; + + sec_mon: sec_mon@314000 { + compatible = "fsl,sec-v4.0-mon"; + reg = <0x314000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <93 2 0 0>; + }; + }; + + rapidio0: rapidio@ffe0c0000 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "fsl,rapidio-delta"; + interrupts = < + 16 2 1 11 /* err_irq */ + 56 2 0 0 /* bell_outb_irq */ + 57 2 0 0 /* bell_inb_irq */ + 60 2 0 0 /* msg1_tx_irq */ + 61 2 0 0 /* msg1_rx_irq */ + 62 2 0 0 /* msg2_tx_irq */ + 63 2 0 0>; /* msg2_rx_irq */ + }; + + localbus@ffe124000 { + compatible = "fsl,p4080-elbc", "fsl,elbc", "simple-bus"; + interrupts = <25 2 0 0>; + #address-cells = <2>; + #size-cells = <1>; + }; + + pci0: pcie@ffe200000 { + compatible = "fsl,p4080-pcie"; + device_type = "pci"; + #size-cells = <2>; + #address-cells = <3>; + bus-range = <0x0 0xff>; + clock-frequency = <0x1fca055>; + fsl,msi = <&msi0>; + interrupts = <16 2 1 15>; + pcie@0 { + reg = <0 0 0 0 0>; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; + interrupts = <16 2 1 15>; + interrupt-map-mask = <0xf800 0 0 7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0 0 1 &mpic 40 1 0 0 + 0000 0 0 2 &mpic 1 1 0 0 + 0000 0 0 3 &mpic 2 1 0 0 + 0000 0 0 4 &mpic 3 1 0 0 + >; + }; + }; + + pci1: pcie@ffe201000 { + compatible = "fsl,p4080-pcie"; + device_type = "pci"; + #size-cells = <2>; + #address-cells = <3>; + bus-range = <0 0xff>; + clock-frequency = <0x1fca055>; + fsl,msi = <&msi1>; + interrupts = <16 2 1 14>; + pcie@0 { + reg = <0 0 0 0 0>; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; + interrupts = <16 2 1 14>; + interrupt-map-mask = <0xf800 0 0 7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0 0 1 &mpic 41 1 0 0 + 0000 0 0 2 &mpic 5 1 0 0 + 0000 0 0 3 &mpic 6 1 0 0 + 0000 0 0 4 &mpic 7 1 0 0 + >; + }; + }; + + pci2: pcie@ffe202000 { + compatible = "fsl,p4080-pcie"; + device_type = "pci"; + #size-cells = <2>; + #address-cells = <3>; + bus-range = <0x0 0xff>; + clock-frequency = <0x1fca055>; + fsl,msi = <&msi2>; + interrupts = <16 2 1 13>; + pcie@0 { + reg = <0 0 0 0 0>; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; + interrupts = <16 2 1 13>; + interrupt-map-mask = <0xf800 0 0 7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0 0 1 &mpic 42 1 0 0 + 0000 0 0 2 &mpic 9 1 0 0 + 0000 0 0 3 &mpic 10 1 0 0 + 0000 0 0 4 &mpic 11 1 0 0 + >; + }; + }; +}; diff --git a/trunk/arch/powerpc/boot/dts/p5020ds.dts b/trunk/arch/powerpc/boot/dts/p5020ds.dts index 1c250684c902..e6d40999ccd7 100644 --- a/trunk/arch/powerpc/boot/dts/p5020ds.dts +++ b/trunk/arch/powerpc/boot/dts/p5020ds.dts @@ -32,7 +32,7 @@ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -/include/ "fsl/p5020si-pre.dtsi" +/include/ "p5020si.dtsi" / { model = "fsl,P5020DS"; @@ -50,8 +50,6 @@ }; soc: soc@ffe000000 { - ranges = <0x00000000 0xf 0xfe000000 0x1000000>; - reg = <0xf 0xfe000000 0 0x00001000>; spi@110000 { flash@0 { #address-cells = <1>; @@ -101,18 +99,7 @@ }; }; - rio: rapidio@ffe0c0000 { - reg = <0xf 0xfe0c0000 0 0x11000>; - - port1 { - ranges = <0 0 0xc 0x20000000 0 0x10000000>; - }; - port2 { - ranges = <0 0 0xc 0x30000000 0 0x10000000>; - }; - }; - - lbc: localbus@ffe124000 { + localbus@ffe124000 { reg = <0xf 0xfe124000 0 0x1000>; ranges = <0 0 0xf 0xe8000000 0x08000000 2 0 0xf 0xffa00000 0x00040000 @@ -173,7 +160,7 @@ reg = <0xf 0xfe200000 0 0x1000>; ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000 0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>; - fsl,msi = <&msi0>; + pcie@0 { ranges = <0x02000000 0 0xe0000000 0x02000000 0 0xe0000000 @@ -189,7 +176,6 @@ reg = <0xf 0xfe201000 0 0x1000>; ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000 0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>; - fsl,msi = <&msi1>; pcie@0 { ranges = <0x02000000 0 0xe0000000 0x02000000 0 0xe0000000 @@ -205,7 +191,6 @@ reg = <0xf 0xfe202000 0 0x1000>; ranges = <0x02000000 0 0xe0000000 0xc 0x40000000 0 0x20000000 0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>; - fsl,msi = <&msi2>; pcie@0 { ranges = <0x02000000 0 0xe0000000 0x02000000 0 0xe0000000 @@ -221,7 +206,6 @@ reg = <0xf 0xfe203000 0 0x1000>; ranges = <0x02000000 0 0xe0000000 0xc 0x60000000 0 0x20000000 0x01000000 0 0x00000000 0xf 0xf8030000 0 0x00010000>; - fsl,msi = <&msi2>; pcie@0 { ranges = <0x02000000 0 0xe0000000 0x02000000 0 0xe0000000 @@ -233,5 +217,3 @@ }; }; }; - -/include/ "fsl/p5020si-post.dtsi" diff --git a/trunk/arch/powerpc/boot/dts/p5020si.dtsi b/trunk/arch/powerpc/boot/dts/p5020si.dtsi new file mode 100644 index 000000000000..e7948ad71fa3 --- /dev/null +++ b/trunk/arch/powerpc/boot/dts/p5020si.dtsi @@ -0,0 +1,716 @@ +/* + * P5020 Silicon Device Tree Source + * + * Copyright 2010-2011 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/dts-v1/; + +/ { + compatible = "fsl,P5020"; + #address-cells = <2>; + #size-cells = <2>; + interrupt-parent = <&mpic>; + + aliases { + ccsr = &soc; + dcsr = &dcsr; + + serial0 = &serial0; + serial1 = &serial1; + serial2 = &serial2; + serial3 = &serial3; + pci0 = &pci0; + pci1 = &pci1; + pci2 = &pci2; + pci3 = &pci3; + usb0 = &usb0; + usb1 = &usb1; + dma0 = &dma0; + dma1 = &dma1; + sdhc = &sdhc; + msi0 = &msi0; + msi1 = &msi1; + msi2 = &msi2; + + crypto = &crypto; + sec_jr0 = &sec_jr0; + sec_jr1 = &sec_jr1; + sec_jr2 = &sec_jr2; + sec_jr3 = &sec_jr3; + rtic_a = &rtic_a; + rtic_b = &rtic_b; + rtic_c = &rtic_c; + rtic_d = &rtic_d; + sec_mon = &sec_mon; + +/* + rio0 = &rapidio0; + */ + }; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu0: PowerPC,e5500@0 { + device_type = "cpu"; + reg = <0>; + next-level-cache = <&L2_0>; + L2_0: l2-cache { + next-level-cache = <&cpc>; + }; + }; + cpu1: PowerPC,e5500@1 { + device_type = "cpu"; + reg = <1>; + next-level-cache = <&L2_1>; + L2_1: l2-cache { + next-level-cache = <&cpc>; + }; + }; + }; + + dcsr: dcsr@f00000000 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,dcsr", "simple-bus"; + + dcsr-epu@0 { + compatible = "fsl,dcsr-epu"; + interrupts = <52 2 0 0 + 84 2 0 0 + 85 2 0 0>; + interrupt-parent = <&mpic>; + reg = <0x0 0x1000>; + }; + dcsr-npc { + compatible = "fsl,dcsr-npc"; + reg = <0x1000 0x1000 0x1000000 0x8000>; + }; + dcsr-nxc@2000 { + compatible = "fsl,dcsr-nxc"; + reg = <0x2000 0x1000>; + }; + dcsr-corenet { + compatible = "fsl,dcsr-corenet"; + reg = <0x8000 0x1000 0xB0000 0x1000>; + }; + dcsr-dpaa@9000 { + compatible = "fsl,p5020-dcsr-dpaa", "fsl,dcsr-dpaa"; + reg = <0x9000 0x1000>; + }; + dcsr-ocn@11000 { + compatible = "fsl,p5020-dcsr-ocn", "fsl,dcsr-ocn"; + reg = <0x11000 0x1000>; + }; + dcsr-ddr@12000 { + compatible = "fsl,dcsr-ddr"; + dev-handle = <&ddr1>; + reg = <0x12000 0x1000>; + }; + dcsr-ddr@13000 { + compatible = "fsl,dcsr-ddr"; + dev-handle = <&ddr2>; + reg = <0x13000 0x1000>; + }; + dcsr-nal@18000 { + compatible = "fsl,p5020-dcsr-nal", "fsl,dcsr-nal"; + reg = <0x18000 0x1000>; + }; + dcsr-rcpm@22000 { + compatible = "fsl,p5020-dcsr-rcpm", "fsl,dcsr-rcpm"; + reg = <0x22000 0x1000>; + }; + dcsr-cpu-sb-proxy@40000 { + compatible = "fsl,dcsr-e5500-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; + cpu-handle = <&cpu0>; + reg = <0x40000 0x1000>; + }; + dcsr-cpu-sb-proxy@41000 { + compatible = "fsl,dcsr-e5500-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; + cpu-handle = <&cpu1>; + reg = <0x41000 0x1000>; + }; + }; + + soc: soc@ffe000000 { + #address-cells = <1>; + #size-cells = <1>; + device_type = "soc"; + compatible = "simple-bus"; + ranges = <0x00000000 0xf 0xfe000000 0x1000000>; + reg = <0xf 0xfe000000 0 0x00001000>; + + soc-sram-error { + compatible = "fsl,soc-sram-error"; + interrupts = <16 2 1 29>; + }; + + corenet-law@0 { + compatible = "fsl,corenet-law"; + reg = <0x0 0x1000>; + fsl,num-laws = <32>; + }; + + ddr1: memory-controller@8000 { + compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller"; + reg = <0x8000 0x1000>; + interrupts = <16 2 1 23>; + }; + + ddr2: memory-controller@9000 { + compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller"; + reg = <0x9000 0x1000>; + interrupts = <16 2 1 22>; + }; + + cpc: l3-cache-controller@10000 { + compatible = "fsl,p5020-l3-cache-controller", "fsl,p4080-l3-cache-controller", "cache"; + reg = <0x10000 0x1000 + 0x11000 0x1000>; + interrupts = <16 2 1 27 + 16 2 1 26>; + }; + + corenet-cf@18000 { + compatible = "fsl,corenet-cf"; + reg = <0x18000 0x1000>; + interrupts = <16 2 1 31>; + fsl,ccf-num-csdids = <32>; + fsl,ccf-num-snoopids = <32>; + }; + + iommu@20000 { + compatible = "fsl,pamu-v1.0", "fsl,pamu"; + reg = <0x20000 0x4000>; + interrupts = < + 24 2 0 0 + 16 2 1 30>; + }; + + mpic: pic@40000 { + clock-frequency = <0>; + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <4>; + reg = <0x40000 0x40000>; + compatible = "fsl,mpic", "chrp,open-pic"; + device_type = "open-pic"; + }; + + msi0: msi@41600 { + compatible = "fsl,mpic-msi"; + reg = <0x41600 0x200>; + msi-available-ranges = <0 0x100>; + interrupts = < + 0xe0 0 0 0 + 0xe1 0 0 0 + 0xe2 0 0 0 + 0xe3 0 0 0 + 0xe4 0 0 0 + 0xe5 0 0 0 + 0xe6 0 0 0 + 0xe7 0 0 0>; + }; + + msi1: msi@41800 { + compatible = "fsl,mpic-msi"; + reg = <0x41800 0x200>; + msi-available-ranges = <0 0x100>; + interrupts = < + 0xe8 0 0 0 + 0xe9 0 0 0 + 0xea 0 0 0 + 0xeb 0 0 0 + 0xec 0 0 0 + 0xed 0 0 0 + 0xee 0 0 0 + 0xef 0 0 0>; + }; + + msi2: msi@41a00 { + compatible = "fsl,mpic-msi"; + reg = <0x41a00 0x200>; + msi-available-ranges = <0 0x100>; + interrupts = < + 0xf0 0 0 0 + 0xf1 0 0 0 + 0xf2 0 0 0 + 0xf3 0 0 0 + 0xf4 0 0 0 + 0xf5 0 0 0 + 0xf6 0 0 0 + 0xf7 0 0 0>; + }; + + guts: global-utilities@e0000 { + compatible = "fsl,qoriq-device-config-1.0"; + reg = <0xe0000 0xe00>; + fsl,has-rstcr; + #sleep-cells = <1>; + fsl,liodn-bits = <12>; + }; + + pins: global-utilities@e0e00 { + compatible = "fsl,qoriq-pin-control-1.0"; + reg = <0xe0e00 0x200>; + #sleep-cells = <2>; + }; + + clockgen: global-utilities@e1000 { + compatible = "fsl,p5020-clockgen", "fsl,qoriq-clockgen-1.0"; + reg = <0xe1000 0x1000>; + clock-frequency = <0>; + }; + + rcpm: global-utilities@e2000 { + compatible = "fsl,qoriq-rcpm-1.0"; + reg = <0xe2000 0x1000>; + #sleep-cells = <1>; + }; + + sfp: sfp@e8000 { + compatible = "fsl,p5020-sfp", "fsl,qoriq-sfp-1.0"; + reg = <0xe8000 0x1000>; + }; + + serdes: serdes@ea000 { + compatible = "fsl,p5020-serdes"; + reg = <0xea000 0x1000>; + }; + + dma0: dma@100300 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,p5020-dma", "fsl,eloplus-dma"; + reg = <0x100300 0x4>; + ranges = <0x0 0x100100 0x200>; + cell-index = <0>; + dma-channel@0 { + compatible = "fsl,p5020-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x0 0x80>; + cell-index = <0>; + interrupts = <28 2 0 0>; + }; + dma-channel@80 { + compatible = "fsl,p5020-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x80 0x80>; + cell-index = <1>; + interrupts = <29 2 0 0>; + }; + dma-channel@100 { + compatible = "fsl,p5020-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x100 0x80>; + cell-index = <2>; + interrupts = <30 2 0 0>; + }; + dma-channel@180 { + compatible = "fsl,p5020-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x180 0x80>; + cell-index = <3>; + interrupts = <31 2 0 0>; + }; + }; + + dma1: dma@101300 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,p5020-dma", "fsl,eloplus-dma"; + reg = <0x101300 0x4>; + ranges = <0x0 0x101100 0x200>; + cell-index = <1>; + dma-channel@0 { + compatible = "fsl,p5020-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x0 0x80>; + cell-index = <0>; + interrupts = <32 2 0 0>; + }; + dma-channel@80 { + compatible = "fsl,p5020-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x80 0x80>; + cell-index = <1>; + interrupts = <33 2 0 0>; + }; + dma-channel@100 { + compatible = "fsl,p5020-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x100 0x80>; + cell-index = <2>; + interrupts = <34 2 0 0>; + }; + dma-channel@180 { + compatible = "fsl,p5020-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x180 0x80>; + cell-index = <3>; + interrupts = <35 2 0 0>; + }; + }; + + spi@110000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,p5020-espi", "fsl,mpc8536-espi"; + reg = <0x110000 0x1000>; + interrupts = <53 0x2 0 0>; + fsl,espi-num-chipselects = <4>; + }; + + sdhc: sdhc@114000 { + compatible = "fsl,p5020-esdhc", "fsl,esdhc"; + reg = <0x114000 0x1000>; + interrupts = <48 2 0 0>; + sdhci,auto-cmd12; + clock-frequency = <0>; + }; + + i2c@118000 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <0>; + compatible = "fsl-i2c"; + reg = <0x118000 0x100>; + interrupts = <38 2 0 0>; + dfsrr; + }; + + i2c@118100 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <1>; + compatible = "fsl-i2c"; + reg = <0x118100 0x100>; + interrupts = <38 2 0 0>; + dfsrr; + }; + + i2c@119000 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <2>; + compatible = "fsl-i2c"; + reg = <0x119000 0x100>; + interrupts = <39 2 0 0>; + dfsrr; + }; + + i2c@119100 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <3>; + compatible = "fsl-i2c"; + reg = <0x119100 0x100>; + interrupts = <39 2 0 0>; + dfsrr; + }; + + serial0: serial@11c500 { + cell-index = <0>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x11c500 0x100>; + clock-frequency = <0>; + interrupts = <36 2 0 0>; + }; + + serial1: serial@11c600 { + cell-index = <1>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x11c600 0x100>; + clock-frequency = <0>; + interrupts = <36 2 0 0>; + }; + + serial2: serial@11d500 { + cell-index = <2>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x11d500 0x100>; + clock-frequency = <0>; + interrupts = <37 2 0 0>; + }; + + serial3: serial@11d600 { + cell-index = <3>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x11d600 0x100>; + clock-frequency = <0>; + interrupts = <37 2 0 0>; + }; + + gpio0: gpio@130000 { + compatible = "fsl,p5020-gpio", "fsl,qoriq-gpio"; + reg = <0x130000 0x1000>; + interrupts = <55 2 0 0>; + #gpio-cells = <2>; + gpio-controller; + }; + + usb0: usb@210000 { + compatible = "fsl,p5020-usb2-mph", + "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph"; + reg = <0x210000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = <44 0x2 0 0>; + phy_type = "utmi"; + port0; + }; + + usb1: usb@211000 { + compatible = "fsl,p5020-usb2-dr", + "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr"; + reg = <0x211000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = <45 0x2 0 0>; + dr_mode = "host"; + phy_type = "utmi"; + }; + + sata@220000 { + compatible = "fsl,p5020-sata", "fsl,pq-sata-v2"; + reg = <0x220000 0x1000>; + interrupts = <68 0x2 0 0>; + }; + + sata@221000 { + compatible = "fsl,p5020-sata", "fsl,pq-sata-v2"; + reg = <0x221000 0x1000>; + interrupts = <69 0x2 0 0>; + }; + + crypto: crypto@300000 { + compatible = "fsl,sec-v4.2", "fsl,sec-v4.0"; + #address-cells = <1>; + #size-cells = <1>; + reg = <0x300000 0x10000>; + ranges = <0 0x300000 0x10000>; + interrupts = <92 2 0 0>; + + sec_jr0: jr@1000 { + compatible = "fsl,sec-v4.2-job-ring", + "fsl,sec-v4.0-job-ring"; + reg = <0x1000 0x1000>; + interrupts = <88 2 0 0>; + }; + + sec_jr1: jr@2000 { + compatible = "fsl,sec-v4.2-job-ring", + "fsl,sec-v4.0-job-ring"; + reg = <0x2000 0x1000>; + interrupts = <89 2 0 0>; + }; + + sec_jr2: jr@3000 { + compatible = "fsl,sec-v4.2-job-ring", + "fsl,sec-v4.0-job-ring"; + reg = <0x3000 0x1000>; + interrupts = <90 2 0 0>; + }; + + sec_jr3: jr@4000 { + compatible = "fsl,sec-v4.2-job-ring", + "fsl,sec-v4.0-job-ring"; + reg = <0x4000 0x1000>; + interrupts = <91 2 0 0>; + }; + + rtic@6000 { + compatible = "fsl,sec-v4.2-rtic", + "fsl,sec-v4.0-rtic"; + #address-cells = <1>; + #size-cells = <1>; + reg = <0x6000 0x100>; + ranges = <0x0 0x6100 0xe00>; + + rtic_a: rtic-a@0 { + compatible = "fsl,sec-v4.2-rtic-memory", + "fsl,sec-v4.0-rtic-memory"; + reg = <0x00 0x20 0x100 0x80>; + }; + + rtic_b: rtic-b@20 { + compatible = "fsl,sec-v4.2-rtic-memory", + "fsl,sec-v4.0-rtic-memory"; + reg = <0x20 0x20 0x200 0x80>; + }; + + rtic_c: rtic-c@40 { + compatible = "fsl,sec-v4.2-rtic-memory", + "fsl,sec-v4.0-rtic-memory"; + reg = <0x40 0x20 0x300 0x80>; + }; + + rtic_d: rtic-d@60 { + compatible = "fsl,sec-v4.2-rtic-memory", + "fsl,sec-v4.0-rtic-memory"; + reg = <0x60 0x20 0x500 0x80>; + }; + }; + }; + + sec_mon: sec_mon@314000 { + compatible = "fsl,sec-v4.2-mon", "fsl,sec-v4.0-mon"; + reg = <0x314000 0x1000>; + interrupts = <93 2 0 0>; + }; + }; + +/* + rapidio0: rapidio@ffe0c0000 +*/ + + localbus@ffe124000 { + compatible = "fsl,p5020-elbc", "fsl,elbc", "simple-bus"; + interrupts = <25 2 0 0>; + #address-cells = <2>; + #size-cells = <1>; + }; + + pci0: pcie@ffe200000 { + compatible = "fsl,p5020-pcie", "fsl,qoriq-pcie-v2.2"; + device_type = "pci"; + #size-cells = <2>; + #address-cells = <3>; + bus-range = <0x0 0xff>; + clock-frequency = <0x1fca055>; + fsl,msi = <&msi0>; + interrupts = <16 2 1 15>; + + pcie@0 { + reg = <0 0 0 0 0>; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; + interrupts = <16 2 1 15>; + interrupt-map-mask = <0xf800 0 0 7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0 0 1 &mpic 40 1 0 0 + 0000 0 0 2 &mpic 1 1 0 0 + 0000 0 0 3 &mpic 2 1 0 0 + 0000 0 0 4 &mpic 3 1 0 0 + >; + }; + }; + + pci1: pcie@ffe201000 { + compatible = "fsl,p5020-pcie", "fsl,qoriq-pcie-v2.2"; + device_type = "pci"; + #size-cells = <2>; + #address-cells = <3>; + bus-range = <0 0xff>; + clock-frequency = <0x1fca055>; + fsl,msi = <&msi1>; + interrupts = <16 2 1 14>; + pcie@0 { + reg = <0 0 0 0 0>; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; + interrupts = <16 2 1 14>; + interrupt-map-mask = <0xf800 0 0 7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0 0 1 &mpic 41 1 0 0 + 0000 0 0 2 &mpic 5 1 0 0 + 0000 0 0 3 &mpic 6 1 0 0 + 0000 0 0 4 &mpic 7 1 0 0 + >; + }; + }; + + pci2: pcie@ffe202000 { + compatible = "fsl,p5020-pcie", "fsl,qoriq-pcie-v2.2"; + device_type = "pci"; + #size-cells = <2>; + #address-cells = <3>; + bus-range = <0x0 0xff>; + clock-frequency = <0x1fca055>; + fsl,msi = <&msi2>; + interrupts = <16 2 1 13>; + pcie@0 { + reg = <0 0 0 0 0>; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; + interrupts = <16 2 1 13>; + interrupt-map-mask = <0xf800 0 0 7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0 0 1 &mpic 42 1 0 0 + 0000 0 0 2 &mpic 9 1 0 0 + 0000 0 0 3 &mpic 10 1 0 0 + 0000 0 0 4 &mpic 11 1 0 0 + >; + }; + }; + + pci3: pcie@ffe203000 { + compatible = "fsl,p5020-pcie", "fsl,qoriq-pcie-v2.2"; + device_type = "pci"; + #size-cells = <2>; + #address-cells = <3>; + bus-range = <0x0 0xff>; + clock-frequency = <0x1fca055>; + fsl,msi = <&msi2>; + interrupts = <16 2 1 12>; + pcie@0 { + reg = <0 0 0 0 0>; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; + interrupts = <16 2 1 12>; + interrupt-map-mask = <0xf800 0 0 7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0 0 1 &mpic 43 1 0 0 + 0000 0 0 2 &mpic 0 1 0 0 + 0000 0 0 3 &mpic 4 1 0 0 + 0000 0 0 4 &mpic 8 1 0 0 + >; + }; + }; +}; diff --git a/trunk/arch/powerpc/boot/dts/sbc8349.dts b/trunk/arch/powerpc/boot/dts/sbc8349.dts index b1e45a8537a5..0dc90f9bd814 100644 --- a/trunk/arch/powerpc/boot/dts/sbc8349.dts +++ b/trunk/arch/powerpc/boot/dts/sbc8349.dts @@ -222,7 +222,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; clock-frequency = <0>; interrupts = <9 0x8>; @@ -232,7 +232,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; clock-frequency = <0>; interrupts = <10 0x8>; diff --git a/trunk/arch/powerpc/boot/dts/sbc8548.dts b/trunk/arch/powerpc/boot/dts/sbc8548.dts index 77be77116c2e..94a332251710 100644 --- a/trunk/arch/powerpc/boot/dts/sbc8548.dts +++ b/trunk/arch/powerpc/boot/dts/sbc8548.dts @@ -316,7 +316,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; // reg base, size clock-frequency = <0>; // should we fill in in uboot? interrupts = <0x2a 0x2>; @@ -326,7 +326,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; // reg base, size clock-frequency = <0>; // should we fill in in uboot? interrupts = <0x2a 0x2>; diff --git a/trunk/arch/powerpc/boot/dts/sbc8641d.dts b/trunk/arch/powerpc/boot/dts/sbc8641d.dts index 56bebce87842..ee5538feb455 100644 --- a/trunk/arch/powerpc/boot/dts/sbc8641d.dts +++ b/trunk/arch/powerpc/boot/dts/sbc8641d.dts @@ -347,7 +347,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; clock-frequency = <0>; interrupts = <42 2>; @@ -357,7 +357,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; clock-frequency = <0>; interrupts = <28 2>; diff --git a/trunk/arch/powerpc/boot/dts/socrates.dts b/trunk/arch/powerpc/boot/dts/socrates.dts index 134a5ff917e1..38c35404bdc3 100644 --- a/trunk/arch/powerpc/boot/dts/socrates.dts +++ b/trunk/arch/powerpc/boot/dts/socrates.dts @@ -199,7 +199,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; clock-frequency = <0>; interrupts = <42 2>; @@ -209,7 +209,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; clock-frequency = <0>; interrupts = <42 2>; diff --git a/trunk/arch/powerpc/boot/dts/storcenter.dts b/trunk/arch/powerpc/boot/dts/storcenter.dts index 2a555738517e..eab680ce10da 100644 --- a/trunk/arch/powerpc/boot/dts/storcenter.dts +++ b/trunk/arch/powerpc/boot/dts/storcenter.dts @@ -74,7 +74,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x20>; clock-frequency = <97553800>; /* Hz */ current-speed = <115200>; @@ -85,7 +85,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x20>; clock-frequency = <97553800>; /* Hz */ current-speed = <9600>; diff --git a/trunk/arch/powerpc/boot/dts/stxssa8555.dts b/trunk/arch/powerpc/boot/dts/stxssa8555.dts index 4f166b01c1b6..49efd44057d7 100644 --- a/trunk/arch/powerpc/boot/dts/stxssa8555.dts +++ b/trunk/arch/powerpc/boot/dts/stxssa8555.dts @@ -210,7 +210,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; // reg base, size clock-frequency = <0>; // should we fill in in uboot? interrupts = <42 2>; @@ -220,7 +220,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; // reg base, size clock-frequency = <0>; // should we fill in in uboot? interrupts = <42 2>; diff --git a/trunk/arch/powerpc/boot/dts/tqm8540.dts b/trunk/arch/powerpc/boot/dts/tqm8540.dts index ed264d9ae356..0a4cedbdcb55 100644 --- a/trunk/arch/powerpc/boot/dts/tqm8540.dts +++ b/trunk/arch/powerpc/boot/dts/tqm8540.dts @@ -250,7 +250,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; // reg base, size clock-frequency = <0>; // should we fill in in uboot? interrupts = <42 2>; @@ -260,7 +260,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; // reg base, size clock-frequency = <0>; // should we fill in in uboot? interrupts = <42 2>; diff --git a/trunk/arch/powerpc/boot/dts/tqm8541.dts b/trunk/arch/powerpc/boot/dts/tqm8541.dts index 925242115814..f49d09181312 100644 --- a/trunk/arch/powerpc/boot/dts/tqm8541.dts +++ b/trunk/arch/powerpc/boot/dts/tqm8541.dts @@ -224,7 +224,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; // reg base, size clock-frequency = <0>; // should we fill in in uboot? interrupts = <42 2>; @@ -234,7 +234,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; // reg base, size clock-frequency = <0>; // should we fill in in uboot? interrupts = <42 2>; diff --git a/trunk/arch/powerpc/boot/dts/tqm8548-bigflash.dts b/trunk/arch/powerpc/boot/dts/tqm8548-bigflash.dts index 6e1ac50852a4..9452c3c05114 100644 --- a/trunk/arch/powerpc/boot/dts/tqm8548-bigflash.dts +++ b/trunk/arch/powerpc/boot/dts/tqm8548-bigflash.dts @@ -305,7 +305,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; // reg base, size clock-frequency = <0>; // should we fill in in uboot? current-speed = <115200>; @@ -316,7 +316,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; // reg base, size clock-frequency = <0>; // should we fill in in uboot? current-speed = <115200>; @@ -352,7 +352,7 @@ ranges = < 0 0x0 0xfc000000 0x04000000 // NOR FLASH bank 1 1 0x0 0xf8000000 0x08000000 // NOR FLASH bank 0 - 2 0x0 0xa3000000 0x00008000 // CAN (2 x CC770) + 2 0x0 0xa3000000 0x00008000 // CAN (2 x i82527) 3 0x0 0xa3010000 0x00008000 // NAND FLASH >; @@ -393,27 +393,18 @@ }; /* Note: CAN support needs be enabled in U-Boot */ - can@2,0 { - compatible = "bosch,cc770"; // Bosch CC770 + can0@2,0 { + compatible = "intel,82527"; // Bosch CC770 reg = <2 0x0 0x100>; interrupts = <4 1>; interrupt-parent = <&mpic>; - bosch,external-clock-frequency = <16000000>; - bosch,disconnect-rx1-input; - bosch,disconnect-tx1-output; - bosch,iso-low-speed-mux; - bosch,clock-out-frequency = <16000000>; }; - can@2,100 { - compatible = "bosch,cc770"; // Bosch CC770 + can1@2,100 { + compatible = "intel,82527"; // Bosch CC770 reg = <2 0x100 0x100>; interrupts = <4 1>; interrupt-parent = <&mpic>; - bosch,external-clock-frequency = <16000000>; - bosch,disconnect-rx1-input; - bosch,disconnect-tx1-output; - bosch,iso-low-speed-mux; }; /* Note: NAND support needs to be enabled in U-Boot */ diff --git a/trunk/arch/powerpc/boot/dts/tqm8548.dts b/trunk/arch/powerpc/boot/dts/tqm8548.dts index 161e75eac7f7..619776f72c90 100644 --- a/trunk/arch/powerpc/boot/dts/tqm8548.dts +++ b/trunk/arch/powerpc/boot/dts/tqm8548.dts @@ -305,7 +305,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; // reg base, size clock-frequency = <0>; // should we fill in in uboot? current-speed = <115200>; @@ -316,7 +316,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; // reg base, size clock-frequency = <0>; // should we fill in in uboot? current-speed = <115200>; @@ -352,7 +352,7 @@ ranges = < 0 0x0 0xfc000000 0x04000000 // NOR FLASH bank 1 1 0x0 0xf8000000 0x08000000 // NOR FLASH bank 0 - 2 0x0 0xe3000000 0x00008000 // CAN (2 x CC770) + 2 0x0 0xe3000000 0x00008000 // CAN (2 x i82527) 3 0x0 0xe3010000 0x00008000 // NAND FLASH >; @@ -393,27 +393,18 @@ }; /* Note: CAN support needs be enabled in U-Boot */ - can@2,0 { - compatible = "bosch,cc770"; // Bosch CC770 + can0@2,0 { + compatible = "intel,82527"; // Bosch CC770 reg = <2 0x0 0x100>; interrupts = <4 1>; interrupt-parent = <&mpic>; - bosch,external-clock-frequency = <16000000>; - bosch,disconnect-rx1-input; - bosch,disconnect-tx1-output; - bosch,iso-low-speed-mux; - bosch,clock-out-frequency = <16000000>; }; - can@2,100 { - compatible = "bosch,cc770"; // Bosch CC770 + can1@2,100 { + compatible = "intel,82527"; // Bosch CC770 reg = <2 0x100 0x100>; interrupts = <4 1>; interrupt-parent = <&mpic>; - bosch,external-clock-frequency = <16000000>; - bosch,disconnect-rx1-input; - bosch,disconnect-tx1-output; - bosch,iso-low-speed-mux; }; /* Note: NAND support needs to be enabled in U-Boot */ diff --git a/trunk/arch/powerpc/boot/dts/tqm8555.dts b/trunk/arch/powerpc/boot/dts/tqm8555.dts index aa6ff0d3dd9a..81bad8cd3756 100644 --- a/trunk/arch/powerpc/boot/dts/tqm8555.dts +++ b/trunk/arch/powerpc/boot/dts/tqm8555.dts @@ -224,7 +224,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; // reg base, size clock-frequency = <0>; // should we fill in in uboot? interrupts = <42 2>; @@ -234,7 +234,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; // reg base, size clock-frequency = <0>; // should we fill in in uboot? interrupts = <42 2>; diff --git a/trunk/arch/powerpc/boot/dts/tqm8xx.dts b/trunk/arch/powerpc/boot/dts/tqm8xx.dts index c3dba2518d8c..f6da7ec49a8e 100644 --- a/trunk/arch/powerpc/boot/dts/tqm8xx.dts +++ b/trunk/arch/powerpc/boot/dts/tqm8xx.dts @@ -57,7 +57,6 @@ ranges = < 0x0 0x0 0x40000000 0x800000 - 0x3 0x0 0xc0000000 0x200 >; flash@0,0 { @@ -68,30 +67,6 @@ bank-width = <4>; device-width = <2>; }; - - /* Note: CAN support needs be enabled in U-Boot */ - can@3,0 { - compatible = "intc,82527"; - reg = <3 0x0 0x80>; - interrupts = <8 1>; - interrupt-parent = <&PIC>; - bosch,external-clock-frequency = <16000000>; - bosch,disconnect-rx1-input; - bosch,disconnect-tx1-output; - bosch,iso-low-speed-mux; - bosch,clock-out-frequency = <16000000>; - }; - - can@3,100 { - compatible = "intc,82527"; - reg = <3 0x100 0x80>; - interrupts = <8 1>; - interrupt-parent = <&PIC>; - bosch,external-clock-frequency = <16000000>; - bosch,disconnect-rx1-input; - bosch,disconnect-tx1-output; - bosch,iso-low-speed-mux; - }; }; soc@fff00000 { diff --git a/trunk/arch/powerpc/boot/dts/xcalibur1501.dts b/trunk/arch/powerpc/boot/dts/xcalibur1501.dts index cc00f4ddd9a7..ac0a617b4299 100644 --- a/trunk/arch/powerpc/boot/dts/xcalibur1501.dts +++ b/trunk/arch/powerpc/boot/dts/xcalibur1501.dts @@ -531,7 +531,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; clock-frequency = <0>; interrupts = <42 2>; @@ -542,7 +542,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; clock-frequency = <0>; interrupts = <42 2>; diff --git a/trunk/arch/powerpc/boot/dts/xpedite5200.dts b/trunk/arch/powerpc/boot/dts/xpedite5200.dts index 8fd7b7031357..c41a80c55e47 100644 --- a/trunk/arch/powerpc/boot/dts/xpedite5200.dts +++ b/trunk/arch/powerpc/boot/dts/xpedite5200.dts @@ -333,7 +333,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; clock-frequency = <0>; current-speed = <115200>; @@ -344,7 +344,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; clock-frequency = <0>; current-speed = <115200>; diff --git a/trunk/arch/powerpc/boot/dts/xpedite5200_xmon.dts b/trunk/arch/powerpc/boot/dts/xpedite5200_xmon.dts index 0baa8283d08c..c0efcbb45137 100644 --- a/trunk/arch/powerpc/boot/dts/xpedite5200_xmon.dts +++ b/trunk/arch/powerpc/boot/dts/xpedite5200_xmon.dts @@ -337,7 +337,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; clock-frequency = <0>; current-speed = <9600>; @@ -348,7 +348,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; clock-frequency = <0>; current-speed = <9600>; diff --git a/trunk/arch/powerpc/boot/dts/xpedite5301.dts b/trunk/arch/powerpc/boot/dts/xpedite5301.dts index 53c1c6a9752f..db7faf5ebb39 100644 --- a/trunk/arch/powerpc/boot/dts/xpedite5301.dts +++ b/trunk/arch/powerpc/boot/dts/xpedite5301.dts @@ -441,7 +441,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; clock-frequency = <0>; interrupts = <42 2>; @@ -452,7 +452,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; clock-frequency = <0>; interrupts = <42 2>; diff --git a/trunk/arch/powerpc/boot/dts/xpedite5330.dts b/trunk/arch/powerpc/boot/dts/xpedite5330.dts index 215225983150..c364ca6ff7d0 100644 --- a/trunk/arch/powerpc/boot/dts/xpedite5330.dts +++ b/trunk/arch/powerpc/boot/dts/xpedite5330.dts @@ -477,7 +477,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; clock-frequency = <0>; interrupts = <42 2>; @@ -488,7 +488,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; clock-frequency = <0>; interrupts = <42 2>; diff --git a/trunk/arch/powerpc/boot/dts/xpedite5370.dts b/trunk/arch/powerpc/boot/dts/xpedite5370.dts index 11dbda10d756..7a8a4afd56cf 100644 --- a/trunk/arch/powerpc/boot/dts/xpedite5370.dts +++ b/trunk/arch/powerpc/boot/dts/xpedite5370.dts @@ -439,7 +439,7 @@ serial0: serial@4500 { cell-index = <0>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4500 0x100>; clock-frequency = <0>; interrupts = <42 2>; @@ -450,7 +450,7 @@ serial1: serial@4600 { cell-index = <1>; device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; + compatible = "ns16550"; reg = <0x4600 0x100>; clock-frequency = <0>; interrupts = <42 2>; diff --git a/trunk/arch/powerpc/boot/treeboot-currituck.c b/trunk/arch/powerpc/boot/treeboot-currituck.c deleted file mode 100644 index 925ae43b7467..000000000000 --- a/trunk/arch/powerpc/boot/treeboot-currituck.c +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Copyright © 2011 Tony Breeds IBM Corporation - * - * Based on earlier code: - * Copyright (C) Paul Mackerras 1997. - * - * Matt Porter - * Copyright 2002-2005 MontaVista Software Inc. - * - * Eugene Surovegin or - * Copyright (c) 2003, 2004 Zultys Technologies - * - * Copyright 2007 David Gibson, IBM Corporation. - * Copyright 2010 Ben. Herrenschmidt, IBM Corporation. - * Copyright © 2011 David Kleikamp IBM Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ -#include -#include -#include "types.h" -#include "elf.h" -#include "string.h" -#include "stdio.h" -#include "page.h" -#include "ops.h" -#include "reg.h" -#include "io.h" -#include "dcr.h" -#include "4xx.h" -#include "44x.h" -#include "libfdt.h" - -BSS_STACK(4096); - -#define MAX_RANKS 0x4 -#define DDR3_MR0CF 0x80010011U - -static unsigned long long ibm_currituck_memsize; -static unsigned long long ibm_currituck_detect_memsize(void) -{ - u32 reg; - unsigned i; - unsigned long long memsize = 0; - - for(i = 0; i < MAX_RANKS; i++){ - reg = mfdcrx(DDR3_MR0CF + i); - - if (!(reg & 1)) - continue; - - reg &= 0x0000f000; - reg >>= 12; - memsize += (0x800000ULL << reg); - } - - return memsize; -} - -static void ibm_currituck_fixups(void) -{ - void *devp = finddevice("/"); - u32 dma_ranges[7]; - - dt_fixup_memory(0x0ULL, ibm_currituck_memsize); - - while ((devp = find_node_by_devtype(devp, "pci"))) { - if (getprop(devp, "dma-ranges", dma_ranges, sizeof(dma_ranges)) < 0) { - printf("%s: Failed to get dma-ranges\r\n", __func__); - continue; - } - - dma_ranges[5] = ibm_currituck_memsize >> 32; - dma_ranges[6] = ibm_currituck_memsize & 0xffffffffUL; - - setprop(devp, "dma-ranges", dma_ranges, sizeof(dma_ranges)); - } -} - -#define SPRN_PIR 0x11E /* Processor Indentification Register */ -void platform_init(void) -{ - unsigned long end_of_ram, avail_ram; - u32 pir_reg; - int node, size; - const u32 *timebase; - - ibm_currituck_memsize = ibm_currituck_detect_memsize(); - if (ibm_currituck_memsize >> 32) - end_of_ram = ~0UL; - else - end_of_ram = ibm_currituck_memsize; - avail_ram = end_of_ram - (unsigned long)_end; - - simple_alloc_init(_end, avail_ram, 128, 64); - platform_ops.fixups = ibm_currituck_fixups; - platform_ops.exit = ibm44x_dbcr_reset; - pir_reg = mfspr(SPRN_PIR); - - /* Make sure FDT blob is sane */ - if (fdt_check_header(_dtb_start) != 0) - fatal("Invalid device tree blob\n"); - - node = fdt_node_offset_by_prop_value(_dtb_start, -1, "device_type", - "cpu", sizeof("cpu")); - if (!node) - fatal("Cannot find cpu node\n"); - timebase = fdt_getprop(_dtb_start, node, "timebase-frequency", &size); - if (timebase && (size == 4)) - timebase_period_ns = 1000000000 / *timebase; - - fdt_set_boot_cpuid_phys(_dtb_start, pir_reg); - fdt_init(_dtb_start); - - serial_console_init(); -} diff --git a/trunk/arch/powerpc/boot/wrapper b/trunk/arch/powerpc/boot/wrapper index f090e6d2907e..c74531af72c0 100755 --- a/trunk/arch/powerpc/boot/wrapper +++ b/trunk/arch/powerpc/boot/wrapper @@ -163,7 +163,7 @@ coff) link_address='0x500000' pie= ;; -miboot|uboot*) +miboot|uboot) # miboot and U-boot want just the bare bits, not an ELF binary ext=bin objflags="-O binary" @@ -244,9 +244,6 @@ gamecube|wii) link_address='0x600000' platformo="$object/$platform-head.o $object/$platform.o" ;; -treeboot-currituck) - link_address='0x1000000' - ;; treeboot-iss4xx-mpic) platformo="$object/treeboot-iss4xx.o" ;; @@ -260,8 +257,6 @@ vmz="$tmpdir/`basename \"$kernel\"`.$ext" if [ -z "$cacheit" -o ! -f "$vmz$gzip" -o "$vmz$gzip" -ot "$kernel" ]; then ${CROSS}objcopy $objflags "$kernel" "$vmz.$$" - strip_size=$(stat -c %s $vmz.$$) - if [ -n "$gzip" ]; then gzip -n -f -9 "$vmz.$$" fi @@ -271,24 +266,6 @@ if [ -z "$cacheit" -o ! -f "$vmz$gzip" -o "$vmz$gzip" -ot "$kernel" ]; then else vmz="$vmz.$$" fi -else - # Calculate the vmlinux.strip size - ${CROSS}objcopy $objflags "$kernel" "$vmz.$$" - strip_size=$(stat -c %s $vmz.$$) - rm -f $vmz.$$ -fi - -# Round the size to next higher MB limit -round_size=$(((strip_size + 0xfffff) & 0xfff00000)) - -round_size=0x$(printf "%x" $round_size) -link_addr=$(printf "%d" $link_address) - -if [ $link_addr -lt $strip_size ]; then - echo "INFO: Uncompressed kernel (size 0x$(printf "%x\n" $strip_size))" \ - "overlaps the address of the wrapper($link_address)" - echo "INFO: Fixing the link_address of wrapper to ($round_size)" - link_address=$round_size fi vmz="$vmz$gzip" @@ -314,26 +291,6 @@ uboot) fi exit 0 ;; -uboot-obs600) - rm -f "$ofile" - # obs600 wants a multi image with an initrd, so we need to put a fake - # one in even when building a "normal" image. - if [ -n "$initrd" ]; then - real_rd="$initrd" - else - real_rd=`mktemp` - echo "\0" >>"$real_rd" - fi - ${MKIMAGE} -A ppc -O linux -T multi -C gzip -a $membase -e $membase \ - $uboot_version -d "$vmz":"$real_rd":"$dtb" "$ofile" - if [ -z "$initrd" ]; then - rm -f "$real_rd" - fi - if [ -z "$cacheit" ]; then - rm -f "$vmz" - fi - exit 0 - ;; esac addsec() { diff --git a/trunk/arch/powerpc/configs/40x/klondike_defconfig b/trunk/arch/powerpc/configs/40x/klondike_defconfig deleted file mode 100644 index c0d228dc73dc..000000000000 --- a/trunk/arch/powerpc/configs/40x/klondike_defconfig +++ /dev/null @@ -1,55 +0,0 @@ -CONFIG_40x=y -CONFIG_EXPERIMENTAL=y -CONFIG_SYSVIPC=y -CONFIG_LOG_BUF_SHIFT=14 -CONFIG_SYSFS_DEPRECATED=y -CONFIG_SYSFS_DEPRECATED_V2=y -CONFIG_BLK_DEV_INITRD=y -CONFIG_SYSCTL_SYSCALL=y -CONFIG_EMBEDDED=y -CONFIG_SLAB=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -# CONFIG_WALNUT is not set -CONFIG_APM8018X=y -# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -CONFIG_MATH_EMULATION=y -# CONFIG_MIGRATION is not set -# CONFIG_SUSPEND is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -CONFIG_PROC_DEVICETREE=y -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=35000 -CONFIG_SCSI=y -CONFIG_BLK_DEV_SD=y -CONFIG_CHR_DEV_SG=y -CONFIG_SCSI_SAS_ATTRS=y -# CONFIG_INPUT is not set -# CONFIG_SERIO is not set -# CONFIG_VT is not set -# CONFIG_UNIX98_PTYS is not set -# CONFIG_LEGACY_PTYS is not set -# CONFIG_DEVKMEM is not set -# CONFIG_HW_RANDOM is not set -# CONFIG_HWMON is not set -# CONFIG_USB_SUPPORT is not set -# CONFIG_IOMMU_SUPPORT is not set -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -CONFIG_EXT4_FS=y -CONFIG_MSDOS_FS=y -CONFIG_VFAT_FS=y -CONFIG_PROC_KCORE=y -CONFIG_TMPFS=y -CONFIG_CRAMFS=y -CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_ASCII=y -CONFIG_NLS_ISO8859_1=y -CONFIG_NLS_UTF8=y -CONFIG_AVERAGE=y -CONFIG_MAGIC_SYSRQ=y -# CONFIG_SCHED_DEBUG is not set -# CONFIG_DEBUG_BUGVERBOSE is not set -CONFIG_SYSCTL_SYSCALL_CHECK=y -# CONFIG_FTRACE is not set diff --git a/trunk/arch/powerpc/configs/40x/obs600_defconfig b/trunk/arch/powerpc/configs/40x/obs600_defconfig deleted file mode 100644 index 91c110dad2d6..000000000000 --- a/trunk/arch/powerpc/configs/40x/obs600_defconfig +++ /dev/null @@ -1,83 +0,0 @@ -CONFIG_40x=y -CONFIG_EXPERIMENTAL=y -CONFIG_SYSVIPC=y -CONFIG_POSIX_MQUEUE=y -CONFIG_LOG_BUF_SHIFT=14 -CONFIG_BLK_DEV_INITRD=y -CONFIG_EXPERT=y -CONFIG_KALLSYMS_ALL=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -# CONFIG_BLK_DEV_BSG is not set -# CONFIG_WALNUT is not set -CONFIG_OBS600=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_MATH_EMULATION=y -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_INET=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_IP_PNP_BOOTP=y -# CONFIG_INET_XFRM_MODE_TRANSPORT is not set -# CONFIG_INET_XFRM_MODE_TUNNEL is not set -# CONFIG_INET_XFRM_MODE_BEET is not set -# CONFIG_INET_LRO is not set -# CONFIG_IPV6 is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -CONFIG_CONNECTOR=y -CONFIG_MTD=y -CONFIG_MTD_CMDLINE_PARTS=y -CONFIG_MTD_OF_PARTS=y -CONFIG_MTD_CHAR=y -CONFIG_MTD_BLOCK=y -CONFIG_MTD_CFI=y -CONFIG_MTD_JEDECPROBE=y -CONFIG_MTD_CFI_AMDSTD=y -CONFIG_MTD_PHYSMAP_OF=y -CONFIG_MTD_NAND=y -CONFIG_MTD_NAND_NDFC=y -CONFIG_PROC_DEVICETREE=y -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=35000 -CONFIG_NETDEVICES=y -CONFIG_IBM_EMAC=y -CONFIG_IBM_EMAC_RXB=256 -CONFIG_IBM_EMAC_TXB=256 -# CONFIG_INPUT is not set -# CONFIG_SERIO is not set -# CONFIG_VT is not set -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_EXTENDED=y -CONFIG_SERIAL_8250_SHARE_IRQ=y -CONFIG_SERIAL_OF_PLATFORM=y -# CONFIG_HW_RANDOM is not set -CONFIG_I2C=y -CONFIG_I2C_CHARDEV=y -CONFIG_I2C_IBM_IIC=y -CONFIG_SENSORS_LM75=y -CONFIG_THERMAL=y -# CONFIG_USB_SUPPORT is not set -CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_DS1307=y -CONFIG_EXT2_FS=y -CONFIG_PROC_KCORE=y -CONFIG_TMPFS=y -CONFIG_CRAMFS=y -CONFIG_NFS_FS=y -CONFIG_NFS_V3=y -CONFIG_ROOT_NFS=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_DEBUG_FS=y -CONFIG_DETECT_HUNG_TASK=y -CONFIG_SYSCTL_SYSCALL_CHECK=y -CONFIG_CRYPTO=y -CONFIG_CRYPTO_CBC=y -CONFIG_CRYPTO_ECB=y -CONFIG_CRYPTO_PCBC=y -CONFIG_CRYPTO_MD5=y -CONFIG_CRYPTO_DES=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/trunk/arch/powerpc/configs/44x/currituck_defconfig b/trunk/arch/powerpc/configs/44x/currituck_defconfig deleted file mode 100644 index 4192322f8a7f..000000000000 --- a/trunk/arch/powerpc/configs/44x/currituck_defconfig +++ /dev/null @@ -1,110 +0,0 @@ -CONFIG_44x=y -CONFIG_SMP=y -CONFIG_EXPERIMENTAL=y -CONFIG_SYSVIPC=y -CONFIG_POSIX_MQUEUE=y -CONFIG_SPARSE_IRQ=y -CONFIG_LOG_BUF_SHIFT=14 -CONFIG_EXPERT=y -CONFIG_KALLSYMS_ALL=y -CONFIG_PROFILING=y -CONFIG_OPROFILE=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -# CONFIG_BLK_DEV_BSG is not set -CONFIG_PPC_47x=y -# CONFIG_EBONY is not set -CONFIG_CURRITUCK=y -CONFIG_HIGHMEM=y -CONFIG_HZ_100=y -CONFIG_MATH_EMULATION=y -CONFIG_IRQ_ALL_CPUS=y -CONFIG_CMDLINE_BOOL=y -CONFIG_CMDLINE="" -# CONFIG_SUSPEND is not set -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_INET=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_IP_PNP_BOOTP=y -# CONFIG_INET_XFRM_MODE_TRANSPORT is not set -# CONFIG_INET_XFRM_MODE_TUNNEL is not set -# CONFIG_INET_XFRM_MODE_BEET is not set -# CONFIG_INET_LRO is not set -# CONFIG_IPV6 is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -CONFIG_DEVTMPFS=y -CONFIG_DEVTMPFS_MOUNT=y -CONFIG_CONNECTOR=y -CONFIG_MTD=y -CONFIG_MTD_CHAR=y -CONFIG_MTD_BLOCK=y -CONFIG_MTD_JEDECPROBE=y -CONFIG_MTD_CFI_AMDSTD=y -CONFIG_MTD_PHYSMAP_OF=y -CONFIG_PROC_DEVICETREE=y -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=35000 -# CONFIG_SCSI_PROC_FS is not set -CONFIG_BLK_DEV_SD=y -# CONFIG_SCSI_LOWLEVEL is not set -CONFIG_ATA=y -# CONFIG_SATA_PMP is not set -CONFIG_SATA_SIL24=y -# CONFIG_ATA_SFF is not set -CONFIG_NETDEVICES=y -CONFIG_E1000E=y -# CONFIG_NETDEV_10000 is not set -# CONFIG_INPUT is not set -# CONFIG_SERIO is not set -# CONFIG_VT is not set -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_EXTENDED=y -CONFIG_SERIAL_8250_SHARE_IRQ=y -CONFIG_SERIAL_OF_PLATFORM=y -# CONFIG_HW_RANDOM is not set -CONFIG_I2C=y -CONFIG_I2C_IBM_IIC=y -# CONFIG_HWMON is not set -CONFIG_THERMAL=y -CONFIG_USB=y -CONFIG_USB_DEBUG=y -CONFIG_USB_EHCI_HCD=y -CONFIG_USB_OHCI_HCD=y -CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_M41T80=y -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y -CONFIG_PROC_KCORE=y -CONFIG_TMPFS=y -CONFIG_CRAMFS=y -CONFIG_NFS_FS=y -CONFIG_NFS_V3=y -CONFIG_NFS_V3_ACL=y -CONFIG_NFS_V4=y -CONFIG_NLS_DEFAULT="n" -CONFIG_MAGIC_SYSRQ=y -CONFIG_DEBUG_FS=y -CONFIG_DEBUG_KERNEL=y -CONFIG_DETECT_HUNG_TASK=y -CONFIG_DEBUG_INFO=y -CONFIG_SYSCTL_SYSCALL_CHECK=y -CONFIG_XMON=y -CONFIG_XMON_DEFAULT=y -CONFIG_PPC_EARLY_DEBUG=y -CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW=0x10000000 -CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH=0x200 -CONFIG_CRYPTO=y -CONFIG_CRYPTO_CBC=y -CONFIG_CRYPTO_ECB=y -CONFIG_CRYPTO_PCBC=y -CONFIG_CRYPTO_MD5=y -CONFIG_CRYPTO_DES=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set -# CONFIG_CRYPTO_HW is not set diff --git a/trunk/arch/powerpc/configs/44x/iss476-smp_defconfig b/trunk/arch/powerpc/configs/44x/iss476-smp_defconfig index ca00cf750d3e..a6eb6ad05b2d 100644 --- a/trunk/arch/powerpc/configs/44x/iss476-smp_defconfig +++ b/trunk/arch/powerpc/configs/44x/iss476-smp_defconfig @@ -25,8 +25,7 @@ CONFIG_CMDLINE_BOOL=y CONFIG_CMDLINE="root=/dev/issblk0" # CONFIG_PCI is not set CONFIG_ADVANCED_OPTIONS=y -CONFIG_NONSTATIC_KERNEL=y -CONFIG_DYNAMIC_MEMSTART=y +CONFIG_RELOCATABLE=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/trunk/arch/powerpc/configs/chroma_defconfig b/trunk/arch/powerpc/configs/chroma_defconfig deleted file mode 100644 index acf7fb280464..000000000000 --- a/trunk/arch/powerpc/configs/chroma_defconfig +++ /dev/null @@ -1,307 +0,0 @@ -CONFIG_PPC64=y -CONFIG_PPC_BOOK3E_64=y -# CONFIG_VIRT_CPU_ACCOUNTING is not set -CONFIG_SMP=y -CONFIG_NR_CPUS=256 -CONFIG_EXPERIMENTAL=y -CONFIG_SYSVIPC=y -CONFIG_POSIX_MQUEUE=y -CONFIG_BSD_PROCESS_ACCT=y -CONFIG_TASKSTATS=y -CONFIG_TASK_DELAY_ACCT=y -CONFIG_TASK_XACCT=y -CONFIG_TASK_IO_ACCOUNTING=y -CONFIG_AUDIT=y -CONFIG_AUDITSYSCALL=y -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_LOG_BUF_SHIFT=19 -CONFIG_CGROUPS=y -CONFIG_CGROUP_DEVICE=y -CONFIG_CPUSETS=y -CONFIG_CGROUP_CPUACCT=y -CONFIG_RESOURCE_COUNTERS=y -CONFIG_CGROUP_MEM_RES_CTLR=y -CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y -CONFIG_NAMESPACES=y -CONFIG_RELAY=y -CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="" -CONFIG_RD_BZIP2=y -CONFIG_RD_LZMA=y -CONFIG_INITRAMFS_COMPRESSION_GZIP=y -CONFIG_KALLSYMS_ALL=y -CONFIG_EMBEDDED=y -CONFIG_PERF_COUNTERS=y -CONFIG_PROFILING=y -CONFIG_OPROFILE=y -CONFIG_KPROBES=y -CONFIG_MODULES=y -CONFIG_MODULE_FORCE_LOAD=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y -CONFIG_MODVERSIONS=y -CONFIG_MODULE_SRCVERSION_ALL=y -CONFIG_SCOM_DEBUGFS=y -CONFIG_PPC_A2_DD2=y -CONFIG_KVM_GUEST=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_HZ_100=y -# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -CONFIG_BINFMT_MISC=y -CONFIG_NUMA=y -# CONFIG_MIGRATION is not set -CONFIG_PPC_64K_PAGES=y -CONFIG_SCHED_SMT=y -CONFIG_CMDLINE_BOOL=y -CONFIG_CMDLINE="" -# CONFIG_SECCOMP is not set -CONFIG_PCIEPORTBUS=y -# CONFIG_PCIEASPM is not set -CONFIG_PCI_MSI=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_XFRM_USER=m -CONFIG_XFRM_SUB_POLICY=y -CONFIG_XFRM_STATISTICS=y -CONFIG_NET_KEY=m -CONFIG_NET_KEY_MIGRATE=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_IP_ROUTE_MULTIPATH=y -CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_IP_PNP_BOOTP=y -CONFIG_NET_IPIP=y -CONFIG_IP_MROUTE=y -CONFIG_IP_PIMSM_V1=y -CONFIG_IP_PIMSM_V2=y -CONFIG_SYN_COOKIES=y -CONFIG_INET_AH=m -CONFIG_INET_ESP=m -CONFIG_INET_IPCOMP=m -CONFIG_IPV6=y -CONFIG_IPV6_PRIVACY=y -CONFIG_IPV6_ROUTER_PREF=y -CONFIG_IPV6_ROUTE_INFO=y -CONFIG_IPV6_OPTIMISTIC_DAD=y -CONFIG_INET6_AH=y -CONFIG_INET6_ESP=y -CONFIG_INET6_IPCOMP=y -CONFIG_IPV6_MIP6=y -CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=y -CONFIG_IPV6_TUNNEL=y -CONFIG_IPV6_MULTIPLE_TABLES=y -CONFIG_IPV6_SUBTREES=y -CONFIG_IPV6_MROUTE=y -CONFIG_IPV6_PIMSM_V2=y -CONFIG_NETFILTER=y -CONFIG_NF_CONNTRACK=m -CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CT_PROTO_UDPLITE=m -CONFIG_NF_CONNTRACK_FTP=m -CONFIG_NF_CONNTRACK_IRC=m -CONFIG_NF_CONNTRACK_TFTP=m -CONFIG_NF_CT_NETLINK=m -CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m -CONFIG_NETFILTER_XT_TARGET_CONNMARK=m -CONFIG_NETFILTER_XT_TARGET_MARK=m -CONFIG_NETFILTER_XT_TARGET_NFLOG=m -CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m -CONFIG_NETFILTER_XT_TARGET_TCPMSS=m -CONFIG_NETFILTER_XT_MATCH_COMMENT=m -CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m -CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m -CONFIG_NETFILTER_XT_MATCH_CONNMARK=m -CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m -CONFIG_NETFILTER_XT_MATCH_DCCP=m -CONFIG_NETFILTER_XT_MATCH_DSCP=m -CONFIG_NETFILTER_XT_MATCH_ESP=m -CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m -CONFIG_NETFILTER_XT_MATCH_HELPER=m -CONFIG_NETFILTER_XT_MATCH_IPRANGE=m -CONFIG_NETFILTER_XT_MATCH_LENGTH=m -CONFIG_NETFILTER_XT_MATCH_LIMIT=m -CONFIG_NETFILTER_XT_MATCH_MAC=m -CONFIG_NETFILTER_XT_MATCH_MARK=m -CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m -CONFIG_NETFILTER_XT_MATCH_OWNER=m -CONFIG_NETFILTER_XT_MATCH_POLICY=m -CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m -CONFIG_NETFILTER_XT_MATCH_QUOTA=m -CONFIG_NETFILTER_XT_MATCH_RATEEST=m -CONFIG_NETFILTER_XT_MATCH_REALM=m -CONFIG_NETFILTER_XT_MATCH_RECENT=m -CONFIG_NETFILTER_XT_MATCH_SCTP=m -CONFIG_NETFILTER_XT_MATCH_STATE=m -CONFIG_NETFILTER_XT_MATCH_STATISTIC=m -CONFIG_NETFILTER_XT_MATCH_STRING=m -CONFIG_NETFILTER_XT_MATCH_TCPMSS=m -CONFIG_NETFILTER_XT_MATCH_TIME=m -CONFIG_NETFILTER_XT_MATCH_U32=m -CONFIG_NF_CONNTRACK_IPV4=m -CONFIG_IP_NF_QUEUE=m -CONFIG_IP_NF_IPTABLES=m -CONFIG_IP_NF_MATCH_AH=m -CONFIG_IP_NF_MATCH_ECN=m -CONFIG_IP_NF_MATCH_TTL=m -CONFIG_IP_NF_FILTER=m -CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_TARGET_LOG=m -CONFIG_IP_NF_TARGET_ULOG=m -CONFIG_NF_NAT=m -CONFIG_IP_NF_TARGET_MASQUERADE=m -CONFIG_IP_NF_TARGET_NETMAP=m -CONFIG_IP_NF_TARGET_REDIRECT=m -CONFIG_NET_TCPPROBE=y -# CONFIG_WIRELESS is not set -CONFIG_NET_9P=y -CONFIG_NET_9P_DEBUG=y -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -CONFIG_DEVTMPFS=y -CONFIG_MTD=y -CONFIG_MTD_CHAR=y -CONFIG_MTD_BLOCK=y -CONFIG_MTD_CFI=y -CONFIG_MTD_CFI_ADV_OPTIONS=y -CONFIG_MTD_CFI_LE_BYTE_SWAP=y -CONFIG_MTD_CFI_INTELEXT=y -CONFIG_MTD_CFI_AMDSTD=y -CONFIG_MTD_CFI_STAA=y -CONFIG_MTD_PHYSMAP_OF=y -CONFIG_PROC_DEVICETREE=y -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_CRYPTOLOOP=y -CONFIG_BLK_DEV_NBD=m -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=65536 -CONFIG_CDROM_PKTCDVD=y -CONFIG_MISC_DEVICES=y -CONFIG_BLK_DEV_SD=y -CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y -CONFIG_CHR_DEV_SG=y -CONFIG_SCSI_MULTI_LUN=y -CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_SPI_ATTRS=y -CONFIG_SCSI_FC_ATTRS=y -CONFIG_SCSI_ISCSI_ATTRS=m -CONFIG_SCSI_SAS_ATTRS=m -CONFIG_SCSI_SRP_ATTRS=y -CONFIG_ATA=y -CONFIG_SATA_AHCI=y -CONFIG_SATA_SIL24=y -CONFIG_SATA_MV=y -CONFIG_SATA_SIL=y -CONFIG_PATA_CMD64X=y -CONFIG_PATA_MARVELL=y -CONFIG_PATA_SIL680=y -CONFIG_MD=y -CONFIG_BLK_DEV_MD=y -CONFIG_MD_LINEAR=y -CONFIG_BLK_DEV_DM=y -CONFIG_DM_CRYPT=y -CONFIG_DM_SNAPSHOT=y -CONFIG_DM_MIRROR=y -CONFIG_DM_ZERO=y -CONFIG_DM_UEVENT=y -CONFIG_NETDEVICES=y -CONFIG_TUN=y -CONFIG_E1000E=y -CONFIG_TIGON3=y -# CONFIG_WLAN is not set -# CONFIG_INPUT is not set -# CONFIG_SERIO is not set -# CONFIG_VT is not set -CONFIG_DEVPTS_MULTIPLE_INSTANCES=y -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_HW_RANDOM=y -CONFIG_RAW_DRIVER=y -CONFIG_MAX_RAW_DEVS=1024 -# CONFIG_HWMON is not set -# CONFIG_VGA_ARB is not set -# CONFIG_USB_SUPPORT is not set -CONFIG_EDAC=y -CONFIG_EDAC_MM_EDAC=y -CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_DS1511=y -CONFIG_RTC_DRV_DS1553=y -CONFIG_EXT2_FS=y -CONFIG_EXT2_FS_XATTR=y -CONFIG_EXT2_FS_POSIX_ACL=y -CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT2_FS_XIP=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y -CONFIG_EXT4_FS=y -# CONFIG_DNOTIFY is not set -CONFIG_FUSE_FS=y -CONFIG_ISO9660_FS=y -CONFIG_JOLIET=y -CONFIG_ZISOFS=y -CONFIG_UDF_FS=m -CONFIG_MSDOS_FS=y -CONFIG_VFAT_FS=y -CONFIG_PROC_KCORE=y -CONFIG_TMPFS=y -CONFIG_TMPFS_POSIX_ACL=y -CONFIG_CONFIGFS_FS=m -CONFIG_CRAMFS=y -CONFIG_NFS_FS=y -CONFIG_NFS_V3=y -CONFIG_NFS_V3_ACL=y -CONFIG_NFS_V4=y -CONFIG_NFS_V4_1=y -CONFIG_ROOT_NFS=y -CONFIG_CIFS=y -CONFIG_CIFS_WEAK_PW_HASH=y -CONFIG_CIFS_XATTR=y -CONFIG_CIFS_POSIX=y -CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_ASCII=y -CONFIG_NLS_ISO8859_1=y -CONFIG_CRC_CCITT=m -CONFIG_CRC_T10DIF=y -CONFIG_LIBCRC32C=m -CONFIG_PRINTK_TIME=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_STRIP_ASM_SYMS=y -CONFIG_DETECT_HUNG_TASK=y -# CONFIG_SCHED_DEBUG is not set -CONFIG_DEBUG_INFO=y -CONFIG_FTRACE_SYSCALLS=y -CONFIG_PPC_EMULATED_STATS=y -CONFIG_XMON=y -CONFIG_XMON_DEFAULT=y -CONFIG_VIRQ_DEBUG=y -CONFIG_PPC_EARLY_DEBUG=y -CONFIG_KEYS_DEBUG_PROC_KEYS=y -CONFIG_CRYPTO_NULL=m -CONFIG_CRYPTO_TEST=m -CONFIG_CRYPTO_CCM=m -CONFIG_CRYPTO_GCM=m -CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_SHA256=m -CONFIG_CRYPTO_SHA512=m -CONFIG_CRYPTO_TGR192=m -CONFIG_CRYPTO_WP512=m -CONFIG_CRYPTO_AES=m -CONFIG_CRYPTO_ANUBIS=m -CONFIG_CRYPTO_BLOWFISH=m -CONFIG_CRYPTO_CAST5=m -CONFIG_CRYPTO_CAST6=m -CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SALSA20=m -CONFIG_CRYPTO_SERPENT=m -CONFIG_CRYPTO_TEA=m -CONFIG_CRYPTO_TWOFISH=m -CONFIG_CRYPTO_LZO=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_VIRTUALIZATION=y diff --git a/trunk/arch/powerpc/configs/corenet32_smp_defconfig b/trunk/arch/powerpc/configs/corenet32_smp_defconfig index f8aef205d222..f087de6ec03f 100644 --- a/trunk/arch/powerpc/configs/corenet32_smp_defconfig +++ b/trunk/arch/powerpc/configs/corenet32_smp_defconfig @@ -37,8 +37,6 @@ CONFIG_FSL_LBC=y CONFIG_PCI=y CONFIG_PCIEPORTBUS=y # CONFIG_PCIEASPM is not set -CONFIG_RAPIDIO=y -CONFIG_FSL_RIO=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -96,17 +94,17 @@ CONFIG_SATA_SIL24=y CONFIG_SATA_SIL=y CONFIG_PATA_SIL680=y CONFIG_NETDEVICES=y -CONFIG_FSL_PQ_MDIO=y -CONFIG_E1000=y -CONFIG_E1000E=y CONFIG_VITESSE_PHY=y CONFIG_FIXED_PHY=y +CONFIG_NET_ETHERNET=y +CONFIG_E1000=y +CONFIG_E1000E=y +CONFIG_FSL_PQ_MDIO=y # CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set CONFIG_SERIO_LIBPS2=y # CONFIG_LEGACY_PTYS is not set -CONFIG_PPC_EPAPR_HV_BYTECHAN=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_EXTENDED=y @@ -157,7 +155,6 @@ CONFIG_VFAT_FS=y CONFIG_NTFS_FS=y CONFIG_PROC_KCORE=y CONFIG_TMPFS=y -CONFIG_HUGETLBFS=y CONFIG_JFFS2_FS=y CONFIG_CRAMFS=y CONFIG_NFS_FS=y diff --git a/trunk/arch/powerpc/configs/corenet64_smp_defconfig b/trunk/arch/powerpc/configs/corenet64_smp_defconfig index 7ed8d4cf2719..782822c32d15 100644 --- a/trunk/arch/powerpc/configs/corenet64_smp_defconfig +++ b/trunk/arch/powerpc/configs/corenet64_smp_defconfig @@ -23,8 +23,6 @@ CONFIG_P5020_DS=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_BINFMT_MISC=m -CONFIG_RAPIDIO=y -CONFIG_FSL_RIO=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -59,6 +57,7 @@ CONFIG_MISC_DEVICES=y CONFIG_EEPROM_LEGACY=y CONFIG_NETDEVICES=y CONFIG_DUMMY=y +CONFIG_NET_ETHERNET=y CONFIG_INPUT_FF_MEMLESS=m # CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set @@ -82,7 +81,6 @@ CONFIG_EXT3_FS=y # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set CONFIG_PROC_KCORE=y CONFIG_TMPFS=y -CONFIG_HUGETLBFS=y # CONFIG_MISC_FILESYSTEMS is not set CONFIG_PARTITION_ADVANCED=y CONFIG_MAC_PARTITION=y diff --git a/trunk/arch/powerpc/configs/mpc85xx_defconfig b/trunk/arch/powerpc/configs/mpc85xx_defconfig index f37a2ab48881..a1e5a178a4ac 100644 --- a/trunk/arch/powerpc/configs/mpc85xx_defconfig +++ b/trunk/arch/powerpc/configs/mpc85xx_defconfig @@ -1,4 +1,5 @@ CONFIG_PPC_85xx=y +CONFIG_PHYS_64BIT=y CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y @@ -92,14 +93,15 @@ CONFIG_SATA_FSL=y CONFIG_PATA_ALI=y CONFIG_NETDEVICES=y CONFIG_DUMMY=y -CONFIG_FS_ENET=y -CONFIG_UCC_GETH=y -CONFIG_GIANFAR=y CONFIG_MARVELL_PHY=y CONFIG_DAVICOM_PHY=y CONFIG_CICADA_PHY=y CONFIG_VITESSE_PHY=y CONFIG_FIXED_PHY=y +CONFIG_NET_ETHERNET=y +CONFIG_FS_ENET=y +CONFIG_GIANFAR=y +CONFIG_UCC_GETH=y CONFIG_INPUT_FF_MEMLESS=m # CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set @@ -118,9 +120,6 @@ CONFIG_NVRAM=y CONFIG_I2C=y CONFIG_I2C_CPM=m CONFIG_I2C_MPC=y -CONFIG_SPI=y -CONFIG_SPI_FSL_SPI=y -CONFIG_SPI_FSL_ESPI=y CONFIG_GPIO_MPC8XXX=y # CONFIG_HWMON is not set CONFIG_VIDEO_OUTPUT_CONTROL=y @@ -164,10 +163,6 @@ CONFIG_USB_OHCI_HCD=y CONFIG_USB_OHCI_HCD_PPC_OF_BE=y CONFIG_USB_OHCI_HCD_PPC_OF_LE=y CONFIG_USB_STORAGE=y -CONFIG_MMC=y -CONFIG_MMC_SDHCI=y -CONFIG_MMC_SDHCI_PLTFM=y -CONFIG_MMC_SDHCI_OF_ESDHC=y CONFIG_EDAC=y CONFIG_EDAC_MM_EDAC=y CONFIG_RTC_CLASS=y @@ -187,7 +182,6 @@ CONFIG_VFAT_FS=y CONFIG_NTFS_FS=y CONFIG_PROC_KCORE=y CONFIG_TMPFS=y -CONFIG_HUGETLBFS=y CONFIG_ADFS_FS=m CONFIG_AFFS_FS=m CONFIG_HFS_FS=m @@ -219,5 +213,4 @@ CONFIG_CRYPTO_SHA256=y CONFIG_CRYPTO_SHA512=y CONFIG_CRYPTO_AES=y # CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_DEV_FSL_CAAM=y CONFIG_CRYPTO_DEV_TALITOS=y diff --git a/trunk/arch/powerpc/configs/mpc85xx_smp_defconfig b/trunk/arch/powerpc/configs/mpc85xx_smp_defconfig index abdcd317cda7..dd1e41386c4c 100644 --- a/trunk/arch/powerpc/configs/mpc85xx_smp_defconfig +++ b/trunk/arch/powerpc/configs/mpc85xx_smp_defconfig @@ -1,4 +1,5 @@ CONFIG_PPC_85xx=y +CONFIG_PHYS_64BIT=y CONFIG_SMP=y CONFIG_NR_CPUS=8 CONFIG_EXPERIMENTAL=y @@ -25,7 +26,6 @@ CONFIG_MPC85xx_MDS=y CONFIG_MPC8536_DS=y CONFIG_MPC85xx_DS=y CONFIG_MPC85xx_RDB=y -CONFIG_P1010_RDB=y CONFIG_P1022_DS=y CONFIG_P1023_RDS=y CONFIG_SOCRATES=y @@ -94,14 +94,15 @@ CONFIG_SATA_FSL=y CONFIG_PATA_ALI=y CONFIG_NETDEVICES=y CONFIG_DUMMY=y -CONFIG_FS_ENET=y -CONFIG_UCC_GETH=y -CONFIG_GIANFAR=y CONFIG_MARVELL_PHY=y CONFIG_DAVICOM_PHY=y CONFIG_CICADA_PHY=y CONFIG_VITESSE_PHY=y CONFIG_FIXED_PHY=y +CONFIG_NET_ETHERNET=y +CONFIG_FS_ENET=y +CONFIG_GIANFAR=y +CONFIG_UCC_GETH=y CONFIG_INPUT_FF_MEMLESS=m # CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set @@ -120,9 +121,6 @@ CONFIG_NVRAM=y CONFIG_I2C=y CONFIG_I2C_CPM=m CONFIG_I2C_MPC=y -CONFIG_SPI=y -CONFIG_SPI_FSL_SPI=y -CONFIG_SPI_FSL_ESPI=y CONFIG_GPIO_MPC8XXX=y # CONFIG_HWMON is not set CONFIG_VIDEO_OUTPUT_CONTROL=y @@ -166,10 +164,6 @@ CONFIG_USB_OHCI_HCD=y CONFIG_USB_OHCI_HCD_PPC_OF_BE=y CONFIG_USB_OHCI_HCD_PPC_OF_LE=y CONFIG_USB_STORAGE=y -CONFIG_MMC=y -CONFIG_MMC_SDHCI=y -CONFIG_MMC_SDHCI_PLTFM=y -CONFIG_MMC_SDHCI_OF_ESDHC=y CONFIG_EDAC=y CONFIG_EDAC_MM_EDAC=y CONFIG_RTC_CLASS=y @@ -189,7 +183,6 @@ CONFIG_VFAT_FS=y CONFIG_NTFS_FS=y CONFIG_PROC_KCORE=y CONFIG_TMPFS=y -CONFIG_HUGETLBFS=y CONFIG_ADFS_FS=m CONFIG_AFFS_FS=m CONFIG_HFS_FS=m @@ -221,5 +214,4 @@ CONFIG_CRYPTO_SHA256=y CONFIG_CRYPTO_SHA512=y CONFIG_CRYPTO_AES=y # CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_DEV_FSL_CAAM=y CONFIG_CRYPTO_DEV_TALITOS=y diff --git a/trunk/arch/powerpc/configs/ppc64_defconfig b/trunk/arch/powerpc/configs/ppc64_defconfig index 2156e077859b..535711fcb13c 100644 --- a/trunk/arch/powerpc/configs/ppc64_defconfig +++ b/trunk/arch/powerpc/configs/ppc64_defconfig @@ -390,11 +390,6 @@ CONFIG_HUGETLBFS=y CONFIG_HFS_FS=m CONFIG_HFSPLUS_FS=m CONFIG_CRAMFS=m -CONFIG_SQUASHFS=m -CONFIG_SQUASHFS_XATTR=y -CONFIG_SQUASHFS_ZLIB=y -CONFIG_SQUASHFS_LZO=y -CONFIG_SQUASHFS_XZ=y CONFIG_NFS_FS=y CONFIG_NFS_V3=y CONFIG_NFS_V3_ACL=y diff --git a/trunk/arch/powerpc/configs/ps3_defconfig b/trunk/arch/powerpc/configs/ps3_defconfig index ded867871e97..185c292b0f1c 100644 --- a/trunk/arch/powerpc/configs/ps3_defconfig +++ b/trunk/arch/powerpc/configs/ps3_defconfig @@ -6,10 +6,10 @@ CONFIG_NR_CPUS=2 CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_SPARSE_IRQ=y +CONFIG_NAMESPACES=y CONFIG_BLK_DEV_INITRD=y -CONFIG_CC_OPTIMIZE_FOR_SIZE=y -CONFIG_EMBEDDED=y +CONFIG_EXPERT=y +CONFIG_KALLSYMS_EXTRA_PASS=y # CONFIG_PERF_EVENTS is not set # CONFIG_COMPAT_BRK is not set CONFIG_SLAB=y @@ -17,7 +17,6 @@ CONFIG_PROFILING=y CONFIG_OPROFILE=m CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y -# CONFIG_PPC_POWERNV is not set # CONFIG_PPC_PSERIES is not set # CONFIG_PPC_PMAC is not set CONFIG_PPC_PS3=y @@ -28,14 +27,14 @@ CONFIG_PS3_VRAM=m CONFIG_PS3_LPM=m # CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set CONFIG_HIGH_RES_TIMERS=y -# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_MISC=y CONFIG_KEXEC=y +CONFIG_SPARSE_IRQ=y # CONFIG_SPARSEMEM_VMEMMAP is not set CONFIG_SCHED_SMT=y CONFIG_CMDLINE_BOOL=y CONFIG_CMDLINE="" -CONFIG_PM_RUNTIME=y +CONFIG_PM=y CONFIG_PM_DEBUG=y # CONFIG_SECCOMP is not set # CONFIG_PCI is not set @@ -82,23 +81,20 @@ CONFIG_SCSI_MULTI_LUN=y CONFIG_MD=y CONFIG_BLK_DEV_DM=m CONFIG_NETDEVICES=y -# CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_VENDOR_CHELSIO is not set -# CONFIG_NET_VENDOR_INTEL is not set -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_STMICRO is not set +CONFIG_NET_ETHERNET=y CONFIG_GELIC_NET=y CONFIG_GELIC_WIRELESS=y -# CONFIG_NET_VENDOR_XILINX is not set +# CONFIG_NETDEV_10000 is not set CONFIG_USB_USBNET=m # CONFIG_USB_NET_CDCETHER is not set -# CONFIG_USB_NET_CDC_NCM is not set # CONFIG_USB_NET_NET1080 is not set # CONFIG_USB_NET_CDC_SUBSET is not set # CONFIG_USB_NET_ZAURUS is not set +CONFIG_PPP=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPP_ASYNC=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPPOE=m CONFIG_INPUT_FF_MEMLESS=m # CONFIG_INPUT_MOUSEDEV_PSAUX is not set CONFIG_INPUT_JOYDEV=m @@ -139,21 +135,22 @@ CONFIG_USB=m CONFIG_USB_ANNOUNCE_NEW_DEVICES=y CONFIG_USB_DEVICEFS=y # CONFIG_USB_DEVICE_CLASS is not set -CONFIG_USB_SUSPEND=y CONFIG_USB_MON=m CONFIG_USB_EHCI_HCD=m +CONFIG_USB_EHCI_TT_NEWSCHED=y # CONFIG_USB_EHCI_HCD_PPC_OF is not set CONFIG_USB_OHCI_HCD=m CONFIG_USB_STORAGE=m CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_PS3=y -# CONFIG_IOMMU_SUPPORT is not set +CONFIG_RTC_DRV_PS3=m CONFIG_EXT2_FS=m CONFIG_EXT3_FS=m # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set CONFIG_EXT4_FS=y +CONFIG_INOTIFY=y CONFIG_QUOTA=y CONFIG_QFMT_V2=y +CONFIG_AUTOFS_FS=m CONFIG_AUTOFS4_FS=m CONFIG_ISO9660_FS=m CONFIG_JOLIET=y @@ -170,17 +167,19 @@ CONFIG_CIFS=m CONFIG_NLS=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y -CONFIG_CRC_CCITT=m CONFIG_CRC_T10DIF=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_FS=y +CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y CONFIG_PROVE_LOCKING=y CONFIG_DEBUG_LOCKDEP=y +CONFIG_DEBUG_SPINLOCK_SLEEP=y CONFIG_DEBUG_INFO=y CONFIG_DEBUG_WRITECOUNT=y CONFIG_DEBUG_MEMORY_INIT=y CONFIG_DEBUG_LIST=y +# CONFIG_RCU_CPU_STALL_DETECTOR is not set CONFIG_SYSCTL_SYSCALL_CHECK=y # CONFIG_FTRACE is not set CONFIG_DEBUG_STACKOVERFLOW=y diff --git a/trunk/arch/powerpc/configs/pseries_defconfig b/trunk/arch/powerpc/configs/pseries_defconfig index 30e7d0d20e49..a72f2415a647 100644 --- a/trunk/arch/powerpc/configs/pseries_defconfig +++ b/trunk/arch/powerpc/configs/pseries_defconfig @@ -304,11 +304,6 @@ CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_HUGETLBFS=y CONFIG_CRAMFS=m -CONFIG_SQUASHFS=m -CONFIG_SQUASHFS_XATTR=y -CONFIG_SQUASHFS_ZLIB=y -CONFIG_SQUASHFS_LZO=y -CONFIG_SQUASHFS_XZ=y CONFIG_NFS_FS=y CONFIG_NFS_V3=y CONFIG_NFS_V3_ACL=y diff --git a/trunk/arch/powerpc/include/asm/Kbuild b/trunk/arch/powerpc/include/asm/Kbuild index 7e313f1ed183..d51df17c7e6f 100644 --- a/trunk/arch/powerpc/include/asm/Kbuild +++ b/trunk/arch/powerpc/include/asm/Kbuild @@ -34,5 +34,3 @@ header-y += termios.h header-y += types.h header-y += ucontext.h header-y += unistd.h - -generic-y += rwsem.h diff --git a/trunk/arch/powerpc/include/asm/cputable.h b/trunk/arch/powerpc/include/asm/cputable.h index ad55a1ccb9fb..e30442c539ce 100644 --- a/trunk/arch/powerpc/include/asm/cputable.h +++ b/trunk/arch/powerpc/include/asm/cputable.h @@ -201,7 +201,6 @@ extern const char *powerpc_base_platform; #define CPU_FTR_POPCNTB LONG_ASM_CONST(0x0400000000000000) #define CPU_FTR_POPCNTD LONG_ASM_CONST(0x0800000000000000) #define CPU_FTR_ICSWX LONG_ASM_CONST(0x1000000000000000) -#define CPU_FTR_VMX_COPY LONG_ASM_CONST(0x2000000000000000) #ifndef __ASSEMBLY__ @@ -426,7 +425,7 @@ extern const char *powerpc_base_platform; CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ - CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY) + CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE) #define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ @@ -438,7 +437,7 @@ extern const char *powerpc_base_platform; #define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2) #define CPU_FTRS_A2 (CPU_FTR_USE_TB | CPU_FTR_SMT | CPU_FTR_DBELL | \ - CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN | CPU_FTR_ICSWX) + CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN) #ifdef __powerpc64__ #ifdef CONFIG_PPC_BOOK3E diff --git a/trunk/arch/powerpc/include/asm/cputime.h b/trunk/arch/powerpc/include/asm/cputime.h index 487d46ff68a1..1cf20bdfbeca 100644 --- a/trunk/arch/powerpc/include/asm/cputime.h +++ b/trunk/arch/powerpc/include/asm/cputime.h @@ -29,8 +29,25 @@ static inline void setup_cputime_one_jiffy(void) { } #include #include -typedef u64 __nocast cputime_t; -typedef u64 __nocast cputime64_t; +typedef u64 cputime_t; +typedef u64 cputime64_t; + +#define cputime_zero ((cputime_t)0) +#define cputime_max ((~((cputime_t)0) >> 1) - 1) +#define cputime_add(__a, __b) ((__a) + (__b)) +#define cputime_sub(__a, __b) ((__a) - (__b)) +#define cputime_div(__a, __n) ((__a) / (__n)) +#define cputime_halve(__a) ((__a) >> 1) +#define cputime_eq(__a, __b) ((__a) == (__b)) +#define cputime_gt(__a, __b) ((__a) > (__b)) +#define cputime_ge(__a, __b) ((__a) >= (__b)) +#define cputime_lt(__a, __b) ((__a) < (__b)) +#define cputime_le(__a, __b) ((__a) <= (__b)) + +#define cputime64_zero ((cputime64_t)0) +#define cputime64_add(__a, __b) ((__a) + (__b)) +#define cputime64_sub(__a, __b) ((__a) - (__b)) +#define cputime_to_cputime64(__ct) (__ct) #ifdef __KERNEL__ @@ -48,7 +65,7 @@ DECLARE_PER_CPU(unsigned long, cputime_scaled_last_delta); static inline unsigned long cputime_to_jiffies(const cputime_t ct) { - return mulhdu((__force u64) ct, __cputime_jiffies_factor); + return mulhdu(ct, __cputime_jiffies_factor); } /* Estimate the scaled cputime by scaling the real cputime based on @@ -57,15 +74,14 @@ static inline cputime_t cputime_to_scaled(const cputime_t ct) { if (cpu_has_feature(CPU_FTR_SPURR) && __get_cpu_var(cputime_last_delta)) - return (__force u64) ct * - __get_cpu_var(cputime_scaled_last_delta) / - __get_cpu_var(cputime_last_delta); + return ct * __get_cpu_var(cputime_scaled_last_delta) / + __get_cpu_var(cputime_last_delta); return ct; } static inline cputime_t jiffies_to_cputime(const unsigned long jif) { - u64 ct; + cputime_t ct; unsigned long sec; /* have to be a little careful about overflow */ @@ -77,7 +93,7 @@ static inline cputime_t jiffies_to_cputime(const unsigned long jif) } if (sec) ct += (cputime_t) sec * tb_ticks_per_sec; - return (__force cputime_t) ct; + return ct; } static inline void setup_cputime_one_jiffy(void) @@ -87,7 +103,7 @@ static inline void setup_cputime_one_jiffy(void) static inline cputime64_t jiffies64_to_cputime64(const u64 jif) { - u64 ct; + cputime_t ct; u64 sec; /* have to be a little careful about overflow */ @@ -98,28 +114,28 @@ static inline cputime64_t jiffies64_to_cputime64(const u64 jif) do_div(ct, HZ); } if (sec) - ct += (u64) sec * tb_ticks_per_sec; - return (__force cputime64_t) ct; + ct += (cputime_t) sec * tb_ticks_per_sec; + return ct; } static inline u64 cputime64_to_jiffies64(const cputime_t ct) { - return mulhdu((__force u64) ct, __cputime_jiffies_factor); + return mulhdu(ct, __cputime_jiffies_factor); } /* * Convert cputime <-> microseconds */ -extern u64 __cputime_usec_factor; +extern u64 __cputime_msec_factor; static inline unsigned long cputime_to_usecs(const cputime_t ct) { - return mulhdu((__force u64) ct, __cputime_usec_factor); + return mulhdu(ct, __cputime_msec_factor) * USEC_PER_MSEC; } static inline cputime_t usecs_to_cputime(const unsigned long us) { - u64 ct; + cputime_t ct; unsigned long sec; /* have to be a little careful about overflow */ @@ -127,15 +143,13 @@ static inline cputime_t usecs_to_cputime(const unsigned long us) sec = us / 1000000; if (ct) { ct *= tb_ticks_per_sec; - do_div(ct, 1000000); + do_div(ct, 1000); } if (sec) ct += (cputime_t) sec * tb_ticks_per_sec; - return (__force cputime_t) ct; + return ct; } -#define usecs_to_cputime64(us) usecs_to_cputime(us) - /* * Convert cputime <-> seconds */ @@ -143,12 +157,12 @@ extern u64 __cputime_sec_factor; static inline unsigned long cputime_to_secs(const cputime_t ct) { - return mulhdu((__force u64) ct, __cputime_sec_factor); + return mulhdu(ct, __cputime_sec_factor); } static inline cputime_t secs_to_cputime(const unsigned long sec) { - return (__force cputime_t)((u64) sec * tb_ticks_per_sec); + return (cputime_t) sec * tb_ticks_per_sec; } /* @@ -156,7 +170,7 @@ static inline cputime_t secs_to_cputime(const unsigned long sec) */ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *p) { - u64 x = (__force u64) ct; + u64 x = ct; unsigned int frac; frac = do_div(x, tb_ticks_per_sec); @@ -168,11 +182,11 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *p) static inline cputime_t timespec_to_cputime(const struct timespec *p) { - u64 ct; + cputime_t ct; ct = (u64) p->tv_nsec * tb_ticks_per_sec; do_div(ct, 1000000000); - return (__force cputime_t)(ct + (u64) p->tv_sec * tb_ticks_per_sec); + return ct + (u64) p->tv_sec * tb_ticks_per_sec; } /* @@ -180,7 +194,7 @@ static inline cputime_t timespec_to_cputime(const struct timespec *p) */ static inline void cputime_to_timeval(const cputime_t ct, struct timeval *p) { - u64 x = (__force u64) ct; + u64 x = ct; unsigned int frac; frac = do_div(x, tb_ticks_per_sec); @@ -192,11 +206,11 @@ static inline void cputime_to_timeval(const cputime_t ct, struct timeval *p) static inline cputime_t timeval_to_cputime(const struct timeval *p) { - u64 ct; + cputime_t ct; ct = (u64) p->tv_usec * tb_ticks_per_sec; do_div(ct, 1000000); - return (__force cputime_t)(ct + (u64) p->tv_sec * tb_ticks_per_sec); + return ct + (u64) p->tv_sec * tb_ticks_per_sec; } /* @@ -206,12 +220,12 @@ extern u64 __cputime_clockt_factor; static inline unsigned long cputime_to_clock_t(const cputime_t ct) { - return mulhdu((__force u64) ct, __cputime_clockt_factor); + return mulhdu(ct, __cputime_clockt_factor); } static inline cputime_t clock_t_to_cputime(const unsigned long clk) { - u64 ct; + cputime_t ct; unsigned long sec; /* have to be a little careful about overflow */ @@ -222,8 +236,8 @@ static inline cputime_t clock_t_to_cputime(const unsigned long clk) do_div(ct, USER_HZ); } if (sec) - ct += (u64) sec * tb_ticks_per_sec; - return (__force cputime_t) ct; + ct += (cputime_t) sec * tb_ticks_per_sec; + return ct; } #define cputime64_to_clock_t(ct) cputime_to_clock_t((cputime_t)(ct)) diff --git a/trunk/arch/powerpc/include/asm/fsl_ifc.h b/trunk/arch/powerpc/include/asm/fsl_ifc.h deleted file mode 100644 index b955012939a2..000000000000 --- a/trunk/arch/powerpc/include/asm/fsl_ifc.h +++ /dev/null @@ -1,834 +0,0 @@ -/* Freescale Integrated Flash Controller - * - * Copyright 2011 Freescale Semiconductor, Inc - * - * Author: Dipen Dudhat - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef __ASM_FSL_IFC_H -#define __ASM_FSL_IFC_H - -#include -#include -#include - -#include -#include - -#define FSL_IFC_BANK_COUNT 4 - -/* - * CSPR - Chip Select Property Register - */ -#define CSPR_BA 0xFFFF0000 -#define CSPR_BA_SHIFT 16 -#define CSPR_PORT_SIZE 0x00000180 -#define CSPR_PORT_SIZE_SHIFT 7 -/* Port Size 8 bit */ -#define CSPR_PORT_SIZE_8 0x00000080 -/* Port Size 16 bit */ -#define CSPR_PORT_SIZE_16 0x00000100 -/* Port Size 32 bit */ -#define CSPR_PORT_SIZE_32 0x00000180 -/* Write Protect */ -#define CSPR_WP 0x00000040 -#define CSPR_WP_SHIFT 6 -/* Machine Select */ -#define CSPR_MSEL 0x00000006 -#define CSPR_MSEL_SHIFT 1 -/* NOR */ -#define CSPR_MSEL_NOR 0x00000000 -/* NAND */ -#define CSPR_MSEL_NAND 0x00000002 -/* GPCM */ -#define CSPR_MSEL_GPCM 0x00000004 -/* Bank Valid */ -#define CSPR_V 0x00000001 -#define CSPR_V_SHIFT 0 - -/* - * Address Mask Register - */ -#define IFC_AMASK_MASK 0xFFFF0000 -#define IFC_AMASK_SHIFT 16 -#define IFC_AMASK(n) (IFC_AMASK_MASK << \ - (__ilog2(n) - IFC_AMASK_SHIFT)) - -/* - * Chip Select Option Register IFC_NAND Machine - */ -/* Enable ECC Encoder */ -#define CSOR_NAND_ECC_ENC_EN 0x80000000 -#define CSOR_NAND_ECC_MODE_MASK 0x30000000 -/* 4 bit correction per 520 Byte sector */ -#define CSOR_NAND_ECC_MODE_4 0x00000000 -/* 8 bit correction per 528 Byte sector */ -#define CSOR_NAND_ECC_MODE_8 0x10000000 -/* Enable ECC Decoder */ -#define CSOR_NAND_ECC_DEC_EN 0x04000000 -/* Row Address Length */ -#define CSOR_NAND_RAL_MASK 0x01800000 -#define CSOR_NAND_RAL_SHIFT 20 -#define CSOR_NAND_RAL_1 0x00000000 -#define CSOR_NAND_RAL_2 0x00800000 -#define CSOR_NAND_RAL_3 0x01000000 -#define CSOR_NAND_RAL_4 0x01800000 -/* Page Size 512b, 2k, 4k */ -#define CSOR_NAND_PGS_MASK 0x00180000 -#define CSOR_NAND_PGS_SHIFT 16 -#define CSOR_NAND_PGS_512 0x00000000 -#define CSOR_NAND_PGS_2K 0x00080000 -#define CSOR_NAND_PGS_4K 0x00100000 -/* Spare region Size */ -#define CSOR_NAND_SPRZ_MASK 0x0000E000 -#define CSOR_NAND_SPRZ_SHIFT 13 -#define CSOR_NAND_SPRZ_16 0x00000000 -#define CSOR_NAND_SPRZ_64 0x00002000 -#define CSOR_NAND_SPRZ_128 0x00004000 -#define CSOR_NAND_SPRZ_210 0x00006000 -#define CSOR_NAND_SPRZ_218 0x00008000 -#define CSOR_NAND_SPRZ_224 0x0000A000 -/* Pages Per Block */ -#define CSOR_NAND_PB_MASK 0x00000700 -#define CSOR_NAND_PB_SHIFT 8 -#define CSOR_NAND_PB(n) ((__ilog2(n) - 5) << CSOR_NAND_PB_SHIFT) -/* Time for Read Enable High to Output High Impedance */ -#define CSOR_NAND_TRHZ_MASK 0x0000001C -#define CSOR_NAND_TRHZ_SHIFT 2 -#define CSOR_NAND_TRHZ_20 0x00000000 -#define CSOR_NAND_TRHZ_40 0x00000004 -#define CSOR_NAND_TRHZ_60 0x00000008 -#define CSOR_NAND_TRHZ_80 0x0000000C -#define CSOR_NAND_TRHZ_100 0x00000010 -/* Buffer control disable */ -#define CSOR_NAND_BCTLD 0x00000001 - -/* - * Chip Select Option Register - NOR Flash Mode - */ -/* Enable Address shift Mode */ -#define CSOR_NOR_ADM_SHFT_MODE_EN 0x80000000 -/* Page Read Enable from NOR device */ -#define CSOR_NOR_PGRD_EN 0x10000000 -/* AVD Toggle Enable during Burst Program */ -#define CSOR_NOR_AVD_TGL_PGM_EN 0x01000000 -/* Address Data Multiplexing Shift */ -#define CSOR_NOR_ADM_MASK 0x0003E000 -#define CSOR_NOR_ADM_SHIFT_SHIFT 13 -#define CSOR_NOR_ADM_SHIFT(n) ((n) << CSOR_NOR_ADM_SHIFT_SHIFT) -/* Type of the NOR device hooked */ -#define CSOR_NOR_NOR_MODE_AYSNC_NOR 0x00000000 -#define CSOR_NOR_NOR_MODE_AVD_NOR 0x00000020 -/* Time for Read Enable High to Output High Impedance */ -#define CSOR_NOR_TRHZ_MASK 0x0000001C -#define CSOR_NOR_TRHZ_SHIFT 2 -#define CSOR_NOR_TRHZ_20 0x00000000 -#define CSOR_NOR_TRHZ_40 0x00000004 -#define CSOR_NOR_TRHZ_60 0x00000008 -#define CSOR_NOR_TRHZ_80 0x0000000C -#define CSOR_NOR_TRHZ_100 0x00000010 -/* Buffer control disable */ -#define CSOR_NOR_BCTLD 0x00000001 - -/* - * Chip Select Option Register - GPCM Mode - */ -/* GPCM Mode - Normal */ -#define CSOR_GPCM_GPMODE_NORMAL 0x00000000 -/* GPCM Mode - GenericASIC */ -#define CSOR_GPCM_GPMODE_ASIC 0x80000000 -/* Parity Mode odd/even */ -#define CSOR_GPCM_PARITY_EVEN 0x40000000 -/* Parity Checking enable/disable */ -#define CSOR_GPCM_PAR_EN 0x20000000 -/* GPCM Timeout Count */ -#define CSOR_GPCM_GPTO_MASK 0x0F000000 -#define CSOR_GPCM_GPTO_SHIFT 24 -#define CSOR_GPCM_GPTO(n) ((__ilog2(n) - 8) << CSOR_GPCM_GPTO_SHIFT) -/* GPCM External Access Termination mode for read access */ -#define CSOR_GPCM_RGETA_EXT 0x00080000 -/* GPCM External Access Termination mode for write access */ -#define CSOR_GPCM_WGETA_EXT 0x00040000 -/* Address Data Multiplexing Shift */ -#define CSOR_GPCM_ADM_MASK 0x0003E000 -#define CSOR_GPCM_ADM_SHIFT_SHIFT 13 -#define CSOR_GPCM_ADM_SHIFT(n) ((n) << CSOR_GPCM_ADM_SHIFT_SHIFT) -/* Generic ASIC Parity error indication delay */ -#define CSOR_GPCM_GAPERRD_MASK 0x00000180 -#define CSOR_GPCM_GAPERRD_SHIFT 7 -#define CSOR_GPCM_GAPERRD(n) (((n) - 1) << CSOR_GPCM_GAPERRD_SHIFT) -/* Time for Read Enable High to Output High Impedance */ -#define CSOR_GPCM_TRHZ_MASK 0x0000001C -#define CSOR_GPCM_TRHZ_20 0x00000000 -#define CSOR_GPCM_TRHZ_40 0x00000004 -#define CSOR_GPCM_TRHZ_60 0x00000008 -#define CSOR_GPCM_TRHZ_80 0x0000000C -#define CSOR_GPCM_TRHZ_100 0x00000010 -/* Buffer control disable */ -#define CSOR_GPCM_BCTLD 0x00000001 - -/* - * Ready Busy Status Register (RB_STAT) - */ -/* CSn is READY */ -#define IFC_RB_STAT_READY_CS0 0x80000000 -#define IFC_RB_STAT_READY_CS1 0x40000000 -#define IFC_RB_STAT_READY_CS2 0x20000000 -#define IFC_RB_STAT_READY_CS3 0x10000000 - -/* - * General Control Register (GCR) - */ -#define IFC_GCR_MASK 0x8000F800 -/* reset all IFC hardware */ -#define IFC_GCR_SOFT_RST_ALL 0x80000000 -/* Turnaroud Time of external buffer */ -#define IFC_GCR_TBCTL_TRN_TIME 0x0000F800 -#define IFC_GCR_TBCTL_TRN_TIME_SHIFT 11 - -/* - * Common Event and Error Status Register (CM_EVTER_STAT) - */ -/* Chip select error */ -#define IFC_CM_EVTER_STAT_CSER 0x80000000 - -/* - * Common Event and Error Enable Register (CM_EVTER_EN) - */ -/* Chip select error checking enable */ -#define IFC_CM_EVTER_EN_CSEREN 0x80000000 - -/* - * Common Event and Error Interrupt Enable Register (CM_EVTER_INTR_EN) - */ -/* Chip select error interrupt enable */ -#define IFC_CM_EVTER_INTR_EN_CSERIREN 0x80000000 - -/* - * Common Transfer Error Attribute Register-0 (CM_ERATTR0) - */ -/* transaction type of error Read/Write */ -#define IFC_CM_ERATTR0_ERTYP_READ 0x80000000 -#define IFC_CM_ERATTR0_ERAID 0x0FF00000 -#define IFC_CM_ERATTR0_ERAID_SHIFT 20 -#define IFC_CM_ERATTR0_ESRCID 0x0000FF00 -#define IFC_CM_ERATTR0_ESRCID_SHIFT 8 - -/* - * Clock Control Register (CCR) - */ -#define IFC_CCR_MASK 0x0F0F8800 -/* Clock division ratio */ -#define IFC_CCR_CLK_DIV_MASK 0x0F000000 -#define IFC_CCR_CLK_DIV_SHIFT 24 -#define IFC_CCR_CLK_DIV(n) ((n-1) << IFC_CCR_CLK_DIV_SHIFT) -/* IFC Clock Delay */ -#define IFC_CCR_CLK_DLY_MASK 0x000F0000 -#define IFC_CCR_CLK_DLY_SHIFT 16 -#define IFC_CCR_CLK_DLY(n) ((n) << IFC_CCR_CLK_DLY_SHIFT) -/* Invert IFC clock before sending out */ -#define IFC_CCR_INV_CLK_EN 0x00008000 -/* Fedback IFC Clock */ -#define IFC_CCR_FB_IFC_CLK_SEL 0x00000800 - -/* - * Clock Status Register (CSR) - */ -/* Clk is stable */ -#define IFC_CSR_CLK_STAT_STABLE 0x80000000 - -/* - * IFC_NAND Machine Specific Registers - */ -/* - * NAND Configuration Register (NCFGR) - */ -/* Auto Boot Mode */ -#define IFC_NAND_NCFGR_BOOT 0x80000000 -/* Addressing Mode-ROW0+n/COL0 */ -#define IFC_NAND_NCFGR_ADDR_MODE_RC0 0x00000000 -/* Addressing Mode-ROW0+n/COL0+n */ -#define IFC_NAND_NCFGR_ADDR_MODE_RC1 0x00400000 -/* Number of loop iterations of FIR sequences for multi page operations */ -#define IFC_NAND_NCFGR_NUM_LOOP_MASK 0x0000F000 -#define IFC_NAND_NCFGR_NUM_LOOP_SHIFT 12 -#define IFC_NAND_NCFGR_NUM_LOOP(n) ((n) << IFC_NAND_NCFGR_NUM_LOOP_SHIFT) -/* Number of wait cycles */ -#define IFC_NAND_NCFGR_NUM_WAIT_MASK 0x000000FF -#define IFC_NAND_NCFGR_NUM_WAIT_SHIFT 0 - -/* - * NAND Flash Command Registers (NAND_FCR0/NAND_FCR1) - */ -/* General purpose FCM flash command bytes CMD0-CMD7 */ -#define IFC_NAND_FCR0_CMD0 0xFF000000 -#define IFC_NAND_FCR0_CMD0_SHIFT 24 -#define IFC_NAND_FCR0_CMD1 0x00FF0000 -#define IFC_NAND_FCR0_CMD1_SHIFT 16 -#define IFC_NAND_FCR0_CMD2 0x0000FF00 -#define IFC_NAND_FCR0_CMD2_SHIFT 8 -#define IFC_NAND_FCR0_CMD3 0x000000FF -#define IFC_NAND_FCR0_CMD3_SHIFT 0 -#define IFC_NAND_FCR1_CMD4 0xFF000000 -#define IFC_NAND_FCR1_CMD4_SHIFT 24 -#define IFC_NAND_FCR1_CMD5 0x00FF0000 -#define IFC_NAND_FCR1_CMD5_SHIFT 16 -#define IFC_NAND_FCR1_CMD6 0x0000FF00 -#define IFC_NAND_FCR1_CMD6_SHIFT 8 -#define IFC_NAND_FCR1_CMD7 0x000000FF -#define IFC_NAND_FCR1_CMD7_SHIFT 0 - -/* - * Flash ROW and COL Address Register (ROWn, COLn) - */ -/* Main/spare region locator */ -#define IFC_NAND_COL_MS 0x80000000 -/* Column Address */ -#define IFC_NAND_COL_CA_MASK 0x00000FFF - -/* - * NAND Flash Byte Count Register (NAND_BC) - */ -/* Byte Count for read/Write */ -#define IFC_NAND_BC 0x000001FF - -/* - * NAND Flash Instruction Registers (NAND_FIR0/NAND_FIR1/NAND_FIR2) - */ -/* NAND Machine specific opcodes OP0-OP14*/ -#define IFC_NAND_FIR0_OP0 0xFC000000 -#define IFC_NAND_FIR0_OP0_SHIFT 26 -#define IFC_NAND_FIR0_OP1 0x03F00000 -#define IFC_NAND_FIR0_OP1_SHIFT 20 -#define IFC_NAND_FIR0_OP2 0x000FC000 -#define IFC_NAND_FIR0_OP2_SHIFT 14 -#define IFC_NAND_FIR0_OP3 0x00003F00 -#define IFC_NAND_FIR0_OP3_SHIFT 8 -#define IFC_NAND_FIR0_OP4 0x000000FC -#define IFC_NAND_FIR0_OP4_SHIFT 2 -#define IFC_NAND_FIR1_OP5 0xFC000000 -#define IFC_NAND_FIR1_OP5_SHIFT 26 -#define IFC_NAND_FIR1_OP6 0x03F00000 -#define IFC_NAND_FIR1_OP6_SHIFT 20 -#define IFC_NAND_FIR1_OP7 0x000FC000 -#define IFC_NAND_FIR1_OP7_SHIFT 14 -#define IFC_NAND_FIR1_OP8 0x00003F00 -#define IFC_NAND_FIR1_OP8_SHIFT 8 -#define IFC_NAND_FIR1_OP9 0x000000FC -#define IFC_NAND_FIR1_OP9_SHIFT 2 -#define IFC_NAND_FIR2_OP10 0xFC000000 -#define IFC_NAND_FIR2_OP10_SHIFT 26 -#define IFC_NAND_FIR2_OP11 0x03F00000 -#define IFC_NAND_FIR2_OP11_SHIFT 20 -#define IFC_NAND_FIR2_OP12 0x000FC000 -#define IFC_NAND_FIR2_OP12_SHIFT 14 -#define IFC_NAND_FIR2_OP13 0x00003F00 -#define IFC_NAND_FIR2_OP13_SHIFT 8 -#define IFC_NAND_FIR2_OP14 0x000000FC -#define IFC_NAND_FIR2_OP14_SHIFT 2 - -/* - * Instruction opcodes to be programmed - * in FIR registers- 6bits - */ -enum ifc_nand_fir_opcodes { - IFC_FIR_OP_NOP, - IFC_FIR_OP_CA0, - IFC_FIR_OP_CA1, - IFC_FIR_OP_CA2, - IFC_FIR_OP_CA3, - IFC_FIR_OP_RA0, - IFC_FIR_OP_RA1, - IFC_FIR_OP_RA2, - IFC_FIR_OP_RA3, - IFC_FIR_OP_CMD0, - IFC_FIR_OP_CMD1, - IFC_FIR_OP_CMD2, - IFC_FIR_OP_CMD3, - IFC_FIR_OP_CMD4, - IFC_FIR_OP_CMD5, - IFC_FIR_OP_CMD6, - IFC_FIR_OP_CMD7, - IFC_FIR_OP_CW0, - IFC_FIR_OP_CW1, - IFC_FIR_OP_CW2, - IFC_FIR_OP_CW3, - IFC_FIR_OP_CW4, - IFC_FIR_OP_CW5, - IFC_FIR_OP_CW6, - IFC_FIR_OP_CW7, - IFC_FIR_OP_WBCD, - IFC_FIR_OP_RBCD, - IFC_FIR_OP_BTRD, - IFC_FIR_OP_RDSTAT, - IFC_FIR_OP_NWAIT, - IFC_FIR_OP_WFR, - IFC_FIR_OP_SBRD, - IFC_FIR_OP_UA, - IFC_FIR_OP_RB, -}; - -/* - * NAND Chip Select Register (NAND_CSEL) - */ -#define IFC_NAND_CSEL 0x0C000000 -#define IFC_NAND_CSEL_SHIFT 26 -#define IFC_NAND_CSEL_CS0 0x00000000 -#define IFC_NAND_CSEL_CS1 0x04000000 -#define IFC_NAND_CSEL_CS2 0x08000000 -#define IFC_NAND_CSEL_CS3 0x0C000000 - -/* - * NAND Operation Sequence Start (NANDSEQ_STRT) - */ -/* NAND Flash Operation Start */ -#define IFC_NAND_SEQ_STRT_FIR_STRT 0x80000000 -/* Automatic Erase */ -#define IFC_NAND_SEQ_STRT_AUTO_ERS 0x00800000 -/* Automatic Program */ -#define IFC_NAND_SEQ_STRT_AUTO_PGM 0x00100000 -/* Automatic Copyback */ -#define IFC_NAND_SEQ_STRT_AUTO_CPB 0x00020000 -/* Automatic Read Operation */ -#define IFC_NAND_SEQ_STRT_AUTO_RD 0x00004000 -/* Automatic Status Read */ -#define IFC_NAND_SEQ_STRT_AUTO_STAT_RD 0x00000800 - -/* - * NAND Event and Error Status Register (NAND_EVTER_STAT) - */ -/* Operation Complete */ -#define IFC_NAND_EVTER_STAT_OPC 0x80000000 -/* Flash Timeout Error */ -#define IFC_NAND_EVTER_STAT_FTOER 0x08000000 -/* Write Protect Error */ -#define IFC_NAND_EVTER_STAT_WPER 0x04000000 -/* ECC Error */ -#define IFC_NAND_EVTER_STAT_ECCER 0x02000000 -/* RCW Load Done */ -#define IFC_NAND_EVTER_STAT_RCW_DN 0x00008000 -/* Boot Loadr Done */ -#define IFC_NAND_EVTER_STAT_BOOT_DN 0x00004000 -/* Bad Block Indicator search select */ -#define IFC_NAND_EVTER_STAT_BBI_SRCH_SE 0x00000800 - -/* - * NAND Flash Page Read Completion Event Status Register - * (PGRDCMPL_EVT_STAT) - */ -#define PGRDCMPL_EVT_STAT_MASK 0xFFFF0000 -/* Small Page 0-15 Done */ -#define PGRDCMPL_EVT_STAT_SECTION_SP(n) (1 << (31 - (n))) -/* Large Page(2K) 0-3 Done */ -#define PGRDCMPL_EVT_STAT_LP_2K(n) (0xF << (28 - (n)*4)) -/* Large Page(4K) 0-1 Done */ -#define PGRDCMPL_EVT_STAT_LP_4K(n) (0xFF << (24 - (n)*8)) - -/* - * NAND Event and Error Enable Register (NAND_EVTER_EN) - */ -/* Operation complete event enable */ -#define IFC_NAND_EVTER_EN_OPC_EN 0x80000000 -/* Page read complete event enable */ -#define IFC_NAND_EVTER_EN_PGRDCMPL_EN 0x20000000 -/* Flash Timeout error enable */ -#define IFC_NAND_EVTER_EN_FTOER_EN 0x08000000 -/* Write Protect error enable */ -#define IFC_NAND_EVTER_EN_WPER_EN 0x04000000 -/* ECC error logging enable */ -#define IFC_NAND_EVTER_EN_ECCER_EN 0x02000000 - -/* - * NAND Event and Error Interrupt Enable Register (NAND_EVTER_INTR_EN) - */ -/* Enable interrupt for operation complete */ -#define IFC_NAND_EVTER_INTR_OPCIR_EN 0x80000000 -/* Enable interrupt for Page read complete */ -#define IFC_NAND_EVTER_INTR_PGRDCMPLIR_EN 0x20000000 -/* Enable interrupt for Flash timeout error */ -#define IFC_NAND_EVTER_INTR_FTOERIR_EN 0x08000000 -/* Enable interrupt for Write protect error */ -#define IFC_NAND_EVTER_INTR_WPERIR_EN 0x04000000 -/* Enable interrupt for ECC error*/ -#define IFC_NAND_EVTER_INTR_ECCERIR_EN 0x02000000 - -/* - * NAND Transfer Error Attribute Register-0 (NAND_ERATTR0) - */ -#define IFC_NAND_ERATTR0_MASK 0x0C080000 -/* Error on CS0-3 for NAND */ -#define IFC_NAND_ERATTR0_ERCS_CS0 0x00000000 -#define IFC_NAND_ERATTR0_ERCS_CS1 0x04000000 -#define IFC_NAND_ERATTR0_ERCS_CS2 0x08000000 -#define IFC_NAND_ERATTR0_ERCS_CS3 0x0C000000 -/* Transaction type of error Read/Write */ -#define IFC_NAND_ERATTR0_ERTTYPE_READ 0x00080000 - -/* - * NAND Flash Status Register (NAND_FSR) - */ -/* First byte of data read from read status op */ -#define IFC_NAND_NFSR_RS0 0xFF000000 -/* Second byte of data read from read status op */ -#define IFC_NAND_NFSR_RS1 0x00FF0000 - -/* - * ECC Error Status Registers (ECCSTAT0-ECCSTAT3) - */ -/* Number of ECC errors on sector n (n = 0-15) */ -#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR0_MASK 0x0F000000 -#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR0_SHIFT 24 -#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR1_MASK 0x000F0000 -#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR1_SHIFT 16 -#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR2_MASK 0x00000F00 -#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR2_SHIFT 8 -#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR3_MASK 0x0000000F -#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR3_SHIFT 0 -#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR4_MASK 0x0F000000 -#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR4_SHIFT 24 -#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR5_MASK 0x000F0000 -#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR5_SHIFT 16 -#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR6_MASK 0x00000F00 -#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR6_SHIFT 8 -#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR7_MASK 0x0000000F -#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR7_SHIFT 0 -#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR8_MASK 0x0F000000 -#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR8_SHIFT 24 -#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR9_MASK 0x000F0000 -#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR9_SHIFT 16 -#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR10_MASK 0x00000F00 -#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR10_SHIFT 8 -#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR11_MASK 0x0000000F -#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR11_SHIFT 0 -#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR12_MASK 0x0F000000 -#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR12_SHIFT 24 -#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR13_MASK 0x000F0000 -#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR13_SHIFT 16 -#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR14_MASK 0x00000F00 -#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR14_SHIFT 8 -#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR15_MASK 0x0000000F -#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR15_SHIFT 0 - -/* - * NAND Control Register (NANDCR) - */ -#define IFC_NAND_NCR_FTOCNT_MASK 0x1E000000 -#define IFC_NAND_NCR_FTOCNT_SHIFT 25 -#define IFC_NAND_NCR_FTOCNT(n) ((_ilog2(n) - 8) << IFC_NAND_NCR_FTOCNT_SHIFT) - -/* - * NAND_AUTOBOOT_TRGR - */ -/* Trigger RCW load */ -#define IFC_NAND_AUTOBOOT_TRGR_RCW_LD 0x80000000 -/* Trigget Auto Boot */ -#define IFC_NAND_AUTOBOOT_TRGR_BOOT_LD 0x20000000 - -/* - * NAND_MDR - */ -/* 1st read data byte when opcode SBRD */ -#define IFC_NAND_MDR_RDATA0 0xFF000000 -/* 2nd read data byte when opcode SBRD */ -#define IFC_NAND_MDR_RDATA1 0x00FF0000 - -/* - * NOR Machine Specific Registers - */ -/* - * NOR Event and Error Status Register (NOR_EVTER_STAT) - */ -/* NOR Command Sequence Operation Complete */ -#define IFC_NOR_EVTER_STAT_OPC_NOR 0x80000000 -/* Write Protect Error */ -#define IFC_NOR_EVTER_STAT_WPER 0x04000000 -/* Command Sequence Timeout Error */ -#define IFC_NOR_EVTER_STAT_STOER 0x01000000 - -/* - * NOR Event and Error Enable Register (NOR_EVTER_EN) - */ -/* NOR Command Seq complete event enable */ -#define IFC_NOR_EVTER_EN_OPCEN_NOR 0x80000000 -/* Write Protect Error Checking Enable */ -#define IFC_NOR_EVTER_EN_WPEREN 0x04000000 -/* Timeout Error Enable */ -#define IFC_NOR_EVTER_EN_STOEREN 0x01000000 - -/* - * NOR Event and Error Interrupt Enable Register (NOR_EVTER_INTR_EN) - */ -/* Enable interrupt for OPC complete */ -#define IFC_NOR_EVTER_INTR_OPCEN_NOR 0x80000000 -/* Enable interrupt for write protect error */ -#define IFC_NOR_EVTER_INTR_WPEREN 0x04000000 -/* Enable interrupt for timeout error */ -#define IFC_NOR_EVTER_INTR_STOEREN 0x01000000 - -/* - * NOR Transfer Error Attribute Register-0 (NOR_ERATTR0) - */ -/* Source ID for error transaction */ -#define IFC_NOR_ERATTR0_ERSRCID 0xFF000000 -/* AXI ID for error transation */ -#define IFC_NOR_ERATTR0_ERAID 0x000FF000 -/* Chip select corresponds to NOR error */ -#define IFC_NOR_ERATTR0_ERCS_CS0 0x00000000 -#define IFC_NOR_ERATTR0_ERCS_CS1 0x00000010 -#define IFC_NOR_ERATTR0_ERCS_CS2 0x00000020 -#define IFC_NOR_ERATTR0_ERCS_CS3 0x00000030 -/* Type of transaction read/write */ -#define IFC_NOR_ERATTR0_ERTYPE_READ 0x00000001 - -/* - * NOR Transfer Error Attribute Register-2 (NOR_ERATTR2) - */ -#define IFC_NOR_ERATTR2_ER_NUM_PHASE_EXP 0x000F0000 -#define IFC_NOR_ERATTR2_ER_NUM_PHASE_PER 0x00000F00 - -/* - * NOR Control Register (NORCR) - */ -#define IFC_NORCR_MASK 0x0F0F0000 -/* No. of Address/Data Phase */ -#define IFC_NORCR_NUM_PHASE_MASK 0x0F000000 -#define IFC_NORCR_NUM_PHASE_SHIFT 24 -#define IFC_NORCR_NUM_PHASE(n) ((n-1) << IFC_NORCR_NUM_PHASE_SHIFT) -/* Sequence Timeout Count */ -#define IFC_NORCR_STOCNT_MASK 0x000F0000 -#define IFC_NORCR_STOCNT_SHIFT 16 -#define IFC_NORCR_STOCNT(n) ((__ilog2(n) - 8) << IFC_NORCR_STOCNT_SHIFT) - -/* - * GPCM Machine specific registers - */ -/* - * GPCM Event and Error Status Register (GPCM_EVTER_STAT) - */ -/* Timeout error */ -#define IFC_GPCM_EVTER_STAT_TOER 0x04000000 -/* Parity error */ -#define IFC_GPCM_EVTER_STAT_PER 0x01000000 - -/* - * GPCM Event and Error Enable Register (GPCM_EVTER_EN) - */ -/* Timeout error enable */ -#define IFC_GPCM_EVTER_EN_TOER_EN 0x04000000 -/* Parity error enable */ -#define IFC_GPCM_EVTER_EN_PER_EN 0x01000000 - -/* - * GPCM Event and Error Interrupt Enable Register (GPCM_EVTER_INTR_EN) - */ -/* Enable Interrupt for timeout error */ -#define IFC_GPCM_EEIER_TOERIR_EN 0x04000000 -/* Enable Interrupt for Parity error */ -#define IFC_GPCM_EEIER_PERIR_EN 0x01000000 - -/* - * GPCM Transfer Error Attribute Register-0 (GPCM_ERATTR0) - */ -/* Source ID for error transaction */ -#define IFC_GPCM_ERATTR0_ERSRCID 0xFF000000 -/* AXI ID for error transaction */ -#define IFC_GPCM_ERATTR0_ERAID 0x000FF000 -/* Chip select corresponds to GPCM error */ -#define IFC_GPCM_ERATTR0_ERCS_CS0 0x00000000 -#define IFC_GPCM_ERATTR0_ERCS_CS1 0x00000040 -#define IFC_GPCM_ERATTR0_ERCS_CS2 0x00000080 -#define IFC_GPCM_ERATTR0_ERCS_CS3 0x000000C0 -/* Type of transaction read/Write */ -#define IFC_GPCM_ERATTR0_ERTYPE_READ 0x00000001 - -/* - * GPCM Transfer Error Attribute Register-2 (GPCM_ERATTR2) - */ -/* On which beat of address/data parity error is observed */ -#define IFC_GPCM_ERATTR2_PERR_BEAT 0x00000C00 -/* Parity Error on byte */ -#define IFC_GPCM_ERATTR2_PERR_BYTE 0x000000F0 -/* Parity Error reported in addr or data phase */ -#define IFC_GPCM_ERATTR2_PERR_DATA_PHASE 0x00000001 - -/* - * GPCM Status Register (GPCM_STAT) - */ -#define IFC_GPCM_STAT_BSY 0x80000000 /* GPCM is busy */ - -/* - * IFC Controller NAND Machine registers - */ -struct fsl_ifc_nand { - __be32 ncfgr; - u32 res1[0x4]; - __be32 nand_fcr0; - __be32 nand_fcr1; - u32 res2[0x8]; - __be32 row0; - u32 res3; - __be32 col0; - u32 res4; - __be32 row1; - u32 res5; - __be32 col1; - u32 res6; - __be32 row2; - u32 res7; - __be32 col2; - u32 res8; - __be32 row3; - u32 res9; - __be32 col3; - u32 res10[0x24]; - __be32 nand_fbcr; - u32 res11; - __be32 nand_fir0; - __be32 nand_fir1; - __be32 nand_fir2; - u32 res12[0x10]; - __be32 nand_csel; - u32 res13; - __be32 nandseq_strt; - u32 res14; - __be32 nand_evter_stat; - u32 res15; - __be32 pgrdcmpl_evt_stat; - u32 res16[0x2]; - __be32 nand_evter_en; - u32 res17[0x2]; - __be32 nand_evter_intr_en; - u32 res18[0x2]; - __be32 nand_erattr0; - __be32 nand_erattr1; - u32 res19[0x10]; - __be32 nand_fsr; - u32 res20; - __be32 nand_eccstat[4]; - u32 res21[0x20]; - __be32 nanndcr; - u32 res22[0x2]; - __be32 nand_autoboot_trgr; - u32 res23; - __be32 nand_mdr; - u32 res24[0x5C]; -}; - -/* - * IFC controller NOR Machine registers - */ -struct fsl_ifc_nor { - __be32 nor_evter_stat; - u32 res1[0x2]; - __be32 nor_evter_en; - u32 res2[0x2]; - __be32 nor_evter_intr_en; - u32 res3[0x2]; - __be32 nor_erattr0; - __be32 nor_erattr1; - __be32 nor_erattr2; - u32 res4[0x4]; - __be32 norcr; - u32 res5[0xEF]; -}; - -/* - * IFC controller GPCM Machine registers - */ -struct fsl_ifc_gpcm { - __be32 gpcm_evter_stat; - u32 res1[0x2]; - __be32 gpcm_evter_en; - u32 res2[0x2]; - __be32 gpcm_evter_intr_en; - u32 res3[0x2]; - __be32 gpcm_erattr0; - __be32 gpcm_erattr1; - __be32 gpcm_erattr2; - __be32 gpcm_stat; - u32 res4[0x1F3]; -}; - -/* - * IFC Controller Registers - */ -struct fsl_ifc_regs { - __be32 ifc_rev; - u32 res1[0x3]; - struct { - __be32 cspr; - u32 res2[0x2]; - } cspr_cs[FSL_IFC_BANK_COUNT]; - u32 res3[0x18]; - struct { - __be32 amask; - u32 res4[0x2]; - } amask_cs[FSL_IFC_BANK_COUNT]; - u32 res5[0x18]; - struct { - __be32 csor; - u32 res6[0x2]; - } csor_cs[FSL_IFC_BANK_COUNT]; - u32 res7[0x18]; - struct { - __be32 ftim[4]; - u32 res8[0x8]; - } ftim_cs[FSL_IFC_BANK_COUNT]; - u32 res9[0x60]; - __be32 rb_stat; - u32 res10[0x2]; - __be32 ifc_gcr; - u32 res11[0x2]; - __be32 cm_evter_stat; - u32 res12[0x2]; - __be32 cm_evter_en; - u32 res13[0x2]; - __be32 cm_evter_intr_en; - u32 res14[0x2]; - __be32 cm_erattr0; - __be32 cm_erattr1; - u32 res15[0x2]; - __be32 ifc_ccr; - __be32 ifc_csr; - u32 res16[0x2EB]; - struct fsl_ifc_nand ifc_nand; - struct fsl_ifc_nor ifc_nor; - struct fsl_ifc_gpcm ifc_gpcm; -}; - -extern unsigned int convert_ifc_address(phys_addr_t addr_base); -extern int fsl_ifc_find(phys_addr_t addr_base); - -/* overview of the fsl ifc controller */ - -struct fsl_ifc_ctrl { - /* device info */ - struct device *dev; - struct fsl_ifc_regs __iomem *regs; - int irq; - int nand_irq; - spinlock_t lock; - void *nand; - - u32 nand_stat; - wait_queue_head_t nand_wait; -}; - -extern struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev; - - -#endif /* __ASM_FSL_IFC_H */ diff --git a/trunk/arch/powerpc/include/asm/fsl_lbc.h b/trunk/arch/powerpc/include/asm/fsl_lbc.h index 420b45368fcf..8a0b5ece8f76 100644 --- a/trunk/arch/powerpc/include/asm/fsl_lbc.h +++ b/trunk/arch/powerpc/include/asm/fsl_lbc.h @@ -238,6 +238,8 @@ struct fsl_lbc_regs { #define FPAR_LP_CI_SHIFT 0 __be32 fbcr; /**< Flash Byte Count Register */ #define FBCR_BC 0x00000FFF + u8 res11[0x8]; + u8 res8[0xF00]; }; /* @@ -292,11 +294,6 @@ struct fsl_lbc_ctrl { /* status read from LTESR by irq handler */ unsigned int irq_status; - -#ifdef CONFIG_SUSPEND - /* save regs when system go to deep-sleep */ - struct fsl_lbc_regs *saved_regs; -#endif }; extern int fsl_upm_run_pattern(struct fsl_upm *upm, void __iomem *io_base, diff --git a/trunk/arch/powerpc/include/asm/hugetlb.h b/trunk/arch/powerpc/include/asm/hugetlb.h index dfdb95bc59a5..86004930a78e 100644 --- a/trunk/arch/powerpc/include/asm/hugetlb.h +++ b/trunk/arch/powerpc/include/asm/hugetlb.h @@ -5,6 +5,7 @@ #include extern struct kmem_cache *hugepte_cache; +extern void __init reserve_hugetlb_gpages(void); static inline pte_t *hugepd_page(hugepd_t hpd) { @@ -21,14 +22,14 @@ static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr, unsigned pdshift) { /* - * On FSL BookE, we have multiple higher-level table entries that - * point to the same hugepte. Just use the first one since they're all + * On 32-bit, we have multiple higher-level table entries that point to + * the same hugepte. Just use the first one since they're all * identical. So for that case, idx=0. */ unsigned long idx = 0; pte_t *dir = hugepd_page(*hpdp); -#ifndef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC64 idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(*hpdp); #endif @@ -52,8 +53,7 @@ static inline int is_hugepage_only_range(struct mm_struct *mm, } #endif -void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, - pte_t pte); +void book3e_hugetlb_preload(struct mm_struct *mm, unsigned long ea, pte_t pte); void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr); void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, @@ -124,17 +124,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte, int dirty) { -#ifdef HUGETLB_NEED_PRELOAD - /* - * The "return 1" forces a call of update_mmu_cache, which will write a - * TLB entry. Without this, platforms that don't do a write of the TLB - * entry in the TLB miss handler asm will fault ad infinitum. - */ - ptep_set_access_flags(vma, addr, ptep, pte, dirty); - return 1; -#else return ptep_set_access_flags(vma, addr, ptep, pte, dirty); -#endif } static inline pte_t huge_ptep_get(pte_t *ptep) @@ -152,22 +142,12 @@ static inline void arch_release_hugepage(struct page *page) } #else /* ! CONFIG_HUGETLB_PAGE */ -static inline void flush_hugetlb_page(struct vm_area_struct *vma, - unsigned long vmaddr) +static inline void reserve_hugetlb_gpages(void) { + pr_err("Cannot reserve gpages without hugetlb enabled\n"); } -#endif /* CONFIG_HUGETLB_PAGE */ - - -/* - * FSL Book3E platforms require special gpage handling - the gpages - * are reserved early in the boot process by memblock instead of via - * the .dts as on IBM platforms. - */ -#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_FSL_BOOK3E) -extern void __init reserve_hugetlb_gpages(void); -#else -static inline void reserve_hugetlb_gpages(void) +static inline void flush_hugetlb_page(struct vm_area_struct *vma, + unsigned long vmaddr) { } #endif diff --git a/trunk/arch/powerpc/include/asm/kdump.h b/trunk/arch/powerpc/include/asm/kdump.h index c9776202d7ec..bffd062adf79 100644 --- a/trunk/arch/powerpc/include/asm/kdump.h +++ b/trunk/arch/powerpc/include/asm/kdump.h @@ -32,11 +32,11 @@ #ifndef __ASSEMBLY__ -#if defined(CONFIG_CRASH_DUMP) && !defined(CONFIG_NONSTATIC_KERNEL) +#if defined(CONFIG_CRASH_DUMP) && !defined(CONFIG_RELOCATABLE) extern void reserve_kdump_trampoline(void); extern void setup_kdump_trampoline(void); #else -/* !CRASH_DUMP || !NONSTATIC_KERNEL */ +/* !CRASH_DUMP || RELOCATABLE */ static inline void reserve_kdump_trampoline(void) { ; } static inline void setup_kdump_trampoline(void) { ; } #endif diff --git a/trunk/arch/powerpc/include/asm/kexec.h b/trunk/arch/powerpc/include/asm/kexec.h index 16d7e33d35e9..f921eb121d39 100644 --- a/trunk/arch/powerpc/include/asm/kexec.h +++ b/trunk/arch/powerpc/include/asm/kexec.h @@ -49,6 +49,7 @@ #define KEXEC_STATE_REAL_MODE 2 #ifndef __ASSEMBLY__ +#include #include typedef void (*crash_shutdown_t)(void); @@ -72,6 +73,11 @@ extern void kexec_smp_wait(void); /* get and clear naca physid, wait for master to copy new code to 0 */ extern int crashing_cpu; extern void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)); +extern cpumask_t cpus_in_sr; +static inline int kexec_sr_activated(int cpu) +{ + return cpumask_test_cpu(cpu, &cpus_in_sr); +} struct kimage; struct pt_regs; @@ -88,6 +94,7 @@ extern void reserve_crashkernel(void); extern void machine_kexec_mask_interrupts(void); #else /* !CONFIG_KEXEC */ +static inline int kexec_sr_activated(int cpu) { return 0; } static inline void crash_kexec_secondary(struct pt_regs *regs) { } static inline int overlaps_crashkernel(unsigned long start, unsigned long size) diff --git a/trunk/arch/powerpc/include/asm/kvm_book3s.h b/trunk/arch/powerpc/include/asm/kvm_book3s.h index 69c7377d2071..d4df013ad779 100644 --- a/trunk/arch/powerpc/include/asm/kvm_book3s.h +++ b/trunk/arch/powerpc/include/asm/kvm_book3s.h @@ -381,6 +381,39 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) } #endif +static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r, + unsigned long pte_index) +{ + unsigned long rb, va_low; + + rb = (v & ~0x7fUL) << 16; /* AVA field */ + va_low = pte_index >> 3; + if (v & HPTE_V_SECONDARY) + va_low = ~va_low; + /* xor vsid from AVA */ + if (!(v & HPTE_V_1TB_SEG)) + va_low ^= v >> 12; + else + va_low ^= v >> 24; + va_low &= 0x7ff; + if (v & HPTE_V_LARGE) { + rb |= 1; /* L field */ + if (cpu_has_feature(CPU_FTR_ARCH_206) && + (r & 0xff000)) { + /* non-16MB large page, must be 64k */ + /* (masks depend on page size) */ + rb |= 0x1000; /* page encoding in LP field */ + rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */ + rb |= (va_low & 0xfe); /* AVAL field (P7 doesn't seem to care) */ + } + } else { + /* 4kB page */ + rb |= (va_low & 0x7ff) << 12; /* remaining 11b of VA */ + } + rb |= (v >> 54) & 0x300; /* B field */ + return rb; +} + /* Magic register values loaded into r3 and r4 before the 'sc' assembly * instruction for the OSI hypercalls */ #define OSI_SC_MAGIC_R3 0x113724FA diff --git a/trunk/arch/powerpc/include/asm/kvm_book3s_64.h b/trunk/arch/powerpc/include/asm/kvm_book3s_64.h index d0ac94f98f9e..e43fe42b9875 100644 --- a/trunk/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/trunk/arch/powerpc/include/asm/kvm_book3s_64.h @@ -29,37 +29,4 @@ static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu) #define SPAPR_TCE_SHIFT 12 -static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r, - unsigned long pte_index) -{ - unsigned long rb, va_low; - - rb = (v & ~0x7fUL) << 16; /* AVA field */ - va_low = pte_index >> 3; - if (v & HPTE_V_SECONDARY) - va_low = ~va_low; - /* xor vsid from AVA */ - if (!(v & HPTE_V_1TB_SEG)) - va_low ^= v >> 12; - else - va_low ^= v >> 24; - va_low &= 0x7ff; - if (v & HPTE_V_LARGE) { - rb |= 1; /* L field */ - if (cpu_has_feature(CPU_FTR_ARCH_206) && - (r & 0xff000)) { - /* non-16MB large page, must be 64k */ - /* (masks depend on page size) */ - rb |= 0x1000; /* page encoding in LP field */ - rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */ - rb |= (va_low & 0xfe); /* AVAL field (P7 doesn't seem to care) */ - } - } else { - /* 4kB page */ - rb |= (va_low & 0x7ff) << 12; /* remaining 11b of VA */ - } - rb |= (v >> 54) & 0x300; /* B field */ - return rb; -} - #endif /* __ASM_KVM_BOOK3S_64_H__ */ diff --git a/trunk/arch/powerpc/include/asm/lv1call.h b/trunk/arch/powerpc/include/asm/lv1call.h index 233f9ecae761..f77c708c67a0 100644 --- a/trunk/arch/powerpc/include/asm/lv1call.h +++ b/trunk/arch/powerpc/include/asm/lv1call.h @@ -231,7 +231,7 @@ LV1_CALL(allocate_memory, 4, 2, 0 ) LV1_CALL(write_htab_entry, 4, 0, 1 ) LV1_CALL(construct_virtual_address_space, 3, 2, 2 ) LV1_CALL(invalidate_htab_entries, 5, 0, 3 ) -LV1_CALL(get_virtual_address_space_id_of_ppe, 0, 1, 4 ) +LV1_CALL(get_virtual_address_space_id_of_ppe, 1, 1, 4 ) LV1_CALL(query_logical_partition_address_region_info, 1, 5, 6 ) LV1_CALL(select_virtual_address_space, 1, 0, 7 ) LV1_CALL(pause, 1, 0, 9 ) @@ -264,7 +264,7 @@ LV1_CALL(configure_execution_time_variable, 1, 0, 77 ) LV1_CALL(get_spe_irq_outlet, 2, 1, 78 ) LV1_CALL(set_spe_privilege_state_area_1_register, 3, 0, 79 ) LV1_CALL(create_repository_node, 6, 0, 90 ) -LV1_CALL(read_repository_node, 5, 2, 91 ) +LV1_CALL(get_repository_node_value, 5, 2, 91 ) LV1_CALL(modify_repository_node_value, 6, 0, 92 ) LV1_CALL(remove_repository_node, 4, 0, 93 ) LV1_CALL(read_htab_entries, 2, 5, 95 ) @@ -276,7 +276,7 @@ LV1_CALL(construct_io_irq_outlet, 1, 1, 120 ) LV1_CALL(destruct_io_irq_outlet, 1, 0, 121 ) LV1_CALL(map_htab, 1, 1, 122 ) LV1_CALL(unmap_htab, 1, 0, 123 ) -LV1_CALL(get_version_info, 0, 2, 127 ) +LV1_CALL(get_version_info, 0, 1, 127 ) LV1_CALL(insert_htab_entry, 6, 3, 158 ) LV1_CALL(read_virtual_uart, 3, 1, 162 ) LV1_CALL(write_virtual_uart, 3, 1, 163 ) @@ -294,9 +294,9 @@ LV1_CALL(unmap_device_dma_region, 4, 0, 177 ) LV1_CALL(net_add_multicast_address, 4, 0, 185 ) LV1_CALL(net_remove_multicast_address, 4, 0, 186 ) LV1_CALL(net_start_tx_dma, 4, 0, 187 ) -LV1_CALL(net_stop_tx_dma, 2, 0, 188 ) +LV1_CALL(net_stop_tx_dma, 3, 0, 188 ) LV1_CALL(net_start_rx_dma, 4, 0, 189 ) -LV1_CALL(net_stop_rx_dma, 2, 0, 190 ) +LV1_CALL(net_stop_rx_dma, 3, 0, 190 ) LV1_CALL(net_set_interrupt_status_indicator, 4, 0, 191 ) LV1_CALL(net_set_interrupt_mask, 4, 0, 193 ) LV1_CALL(net_control, 6, 2, 194 ) diff --git a/trunk/arch/powerpc/include/asm/machdep.h b/trunk/arch/powerpc/include/asm/machdep.h index bf37931d1ad6..b540d6fcedd6 100644 --- a/trunk/arch/powerpc/include/asm/machdep.h +++ b/trunk/arch/powerpc/include/asm/machdep.h @@ -213,9 +213,6 @@ struct machdep_calls { * allow assignment/enabling of the device. */ int (*pcibios_enable_device_hook)(struct pci_dev *); - /* Called after scan and before resource survey */ - void (*pcibios_fixup_phb)(struct pci_controller *hose); - /* Called to shutdown machine specific hardware not already controlled * by other drivers. */ diff --git a/trunk/arch/powerpc/include/asm/memblock.h b/trunk/arch/powerpc/include/asm/memblock.h new file mode 100644 index 000000000000..43efc345065e --- /dev/null +++ b/trunk/arch/powerpc/include/asm/memblock.h @@ -0,0 +1,8 @@ +#ifndef _ASM_POWERPC_MEMBLOCK_H +#define _ASM_POWERPC_MEMBLOCK_H + +#include + +#define MEMBLOCK_DBG(fmt...) udbg_printf(fmt) + +#endif /* _ASM_POWERPC_MEMBLOCK_H */ diff --git a/trunk/arch/powerpc/include/asm/mmu-book3e.h b/trunk/arch/powerpc/include/asm/mmu-book3e.h index f5f89cafebd0..0260ea5ec3c2 100644 --- a/trunk/arch/powerpc/include/asm/mmu-book3e.h +++ b/trunk/arch/powerpc/include/asm/mmu-book3e.h @@ -214,10 +214,6 @@ typedef struct { unsigned int id; unsigned int active; unsigned long vdso_base; -#ifdef CONFIG_PPC_ICSWX - struct spinlock *cop_lockp; /* guard cop related stuff */ - unsigned long acop; /* mask of enabled coprocessor types */ -#endif /* CONFIG_PPC_ICSWX */ #ifdef CONFIG_PPC_MM_SLICES u64 low_slices_psize; /* SLB page size encodings */ u64 high_slices_psize; /* 4 bits per slice for now */ @@ -258,13 +254,6 @@ extern int mmu_vmemmap_psize; #ifdef CONFIG_PPC64 extern unsigned long linear_map_top; - -/* - * 64-bit booke platforms don't load the tlb in the tlb miss handler code. - * HUGETLB_NEED_PRELOAD handles this - it causes huge_ptep_set_access_flags to - * return 1, indicating that the tlb requires preloading. - */ -#define HUGETLB_NEED_PRELOAD #endif #endif /* !__ASSEMBLY__ */ diff --git a/trunk/arch/powerpc/include/asm/mmu-hash64.h b/trunk/arch/powerpc/include/asm/mmu-hash64.h index 412ba493cb98..db645ec842bd 100644 --- a/trunk/arch/powerpc/include/asm/mmu-hash64.h +++ b/trunk/arch/powerpc/include/asm/mmu-hash64.h @@ -312,9 +312,10 @@ extern void slb_set_size(u16 size); * (i.e. everything above 0xC000000000000000), except the very top * segment, which simplifies several things. * - * - We allow for 16 significant bits of ESID and 19 bits of - * context for user addresses. i.e. 16T (44 bits) of address space for - * up to half a million contexts. + * - We allow for 15 significant bits of ESID and 20 bits of + * context for user addresses. i.e. 8T (43 bits) of address space for + * up to 1M contexts (although the page table structure and context + * allocation will need changes to take advantage of this). * * - The scramble function gives robust scattering in the hash * table (at least based on some initial results). The previous diff --git a/trunk/arch/powerpc/include/asm/mpic.h b/trunk/arch/powerpc/include/asm/mpic.h index 67b4d9837236..e6fae49e0b74 100644 --- a/trunk/arch/powerpc/include/asm/mpic.h +++ b/trunk/arch/powerpc/include/asm/mpic.h @@ -251,9 +251,6 @@ struct mpic_irq_save { /* The instance data of a given MPIC */ struct mpic { - /* The OpenFirmware dt node for this MPIC */ - struct device_node *node; - /* The remapper for this MPIC */ struct irq_host *irqhost; @@ -296,9 +293,6 @@ struct mpic /* Register access method */ enum mpic_reg_type reg_type; - /* The physical base address of the MPIC */ - phys_addr_t paddr; - /* The various ioremap'ed bases */ struct mpic_reg_bank gregs; struct mpic_reg_bank tmregs; @@ -337,11 +331,11 @@ struct mpic * Note setting any ID (leaving those bits to 0) means standard MPIC */ -/* - * This is a secondary ("chained") controller; it only uses the CPU0 - * registers. Primary controllers have IPIs and affinity control. +/* This is the primary controller, only that one has IPIs and + * has afinity control. A non-primary MPIC always uses CPU0 + * registers only */ -#define MPIC_SECONDARY 0x00000001 +#define MPIC_PRIMARY 0x00000001 /* Set this for a big-endian MPIC */ #define MPIC_BIG_ENDIAN 0x00000002 diff --git a/trunk/arch/powerpc/include/asm/opal.h b/trunk/arch/powerpc/include/asm/opal.h index a4b28f165b6c..2893e8f5406d 100644 --- a/trunk/arch/powerpc/include/asm/opal.h +++ b/trunk/arch/powerpc/include/asm/opal.h @@ -109,14 +109,6 @@ extern int opal_enter_rtas(struct rtas_args *args, #define OPAL_PCI_MAP_PE_DMA_WINDOW 44 #define OPAL_PCI_MAP_PE_DMA_WINDOW_REAL 45 #define OPAL_PCI_RESET 49 -#define OPAL_PCI_GET_HUB_DIAG_DATA 50 -#define OPAL_PCI_GET_PHB_DIAG_DATA 51 -#define OPAL_PCI_FENCE_PHB 52 -#define OPAL_PCI_REINIT 53 -#define OPAL_PCI_MASK_PE_ERROR 54 -#define OPAL_SET_SLOT_LED_STATUS 55 -#define OPAL_GET_EPOW_STATUS 56 -#define OPAL_SET_SYSTEM_ATTENTION_LED 57 #ifndef __ASSEMBLY__ @@ -177,11 +169,7 @@ enum OpalPendingState { OPAL_EVENT_NVRAM = 0x2, OPAL_EVENT_RTC = 0x4, OPAL_EVENT_CONSOLE_OUTPUT = 0x8, - OPAL_EVENT_CONSOLE_INPUT = 0x10, - OPAL_EVENT_ERROR_LOG_AVAIL = 0x20, - OPAL_EVENT_ERROR_LOG = 0x40, - OPAL_EVENT_EPOW = 0x80, - OPAL_EVENT_LED_STATUS = 0x100 + OPAL_EVENT_CONSOLE_INPUT = 0x10 }; /* Machine check related definitions */ @@ -270,49 +258,13 @@ enum OpalPeAction { OPAL_MAP_PE = 1 }; -enum OpalPeltvAction { - OPAL_REMOVE_PE_FROM_DOMAIN = 0, - OPAL_ADD_PE_TO_DOMAIN = 1 -}; - -enum OpalMveEnableAction { - OPAL_DISABLE_MVE = 0, - OPAL_ENABLE_MVE = 1 -}; - enum OpalPciResetAndReinitScope { OPAL_PHB_COMPLETE = 1, OPAL_PCI_LINK = 2, OPAL_PHB_ERROR = 3, OPAL_PCI_HOT_RESET = 4, OPAL_PCI_FUNDAMENTAL_RESET = 5, - OPAL_PCI_IODA_TABLE_RESET = 6, -}; - -enum OpalPciResetState { - OPAL_DEASSERT_RESET = 0, - OPAL_ASSERT_RESET = 1 + OPAL_PCI_IODA_RESET = 6, }; -enum OpalPciMaskAction { - OPAL_UNMASK_ERROR_TYPE = 0, - OPAL_MASK_ERROR_TYPE = 1 -}; - -enum OpalSlotLedType { - OPAL_SLOT_LED_ID_TYPE = 0, - OPAL_SLOT_LED_FAULT_TYPE = 1 -}; - -enum OpalLedAction { - OPAL_TURN_OFF_LED = 0, - OPAL_TURN_ON_LED = 1, - OPAL_QUERY_LED_STATE_AFTER_BUSY = 2 -}; - -enum OpalEpowStatus { - OPAL_EPOW_NONE = 0, - OPAL_EPOW_UPS = 1, - OPAL_EPOW_OVER_AMBIENT_TEMP = 2, - OPAL_EPOW_OVER_INTERNAL_TEMP = 3 -}; +enum OpalPciResetState { OPAL_DEASSERT_RESET = 0, OPAL_ASSERT_RESET = 1 }; struct opal_machine_check_event { enum OpalMCE_Version version:8; /* 0x00 */ @@ -362,74 +314,8 @@ struct opal_machine_check_event { } u; }; -/** - * This structure defines the overlay which will be used to store PHB error - * data upon request. - */ -enum { - OPAL_P7IOC_NUM_PEST_REGS = 128, -}; - -struct OpalIoP7IOCPhbErrorData { - uint32_t brdgCtl; - - // P7IOC utl regs - uint32_t portStatusReg; - uint32_t rootCmplxStatus; - uint32_t busAgentStatus; - - // P7IOC cfg regs - uint32_t deviceStatus; - uint32_t slotStatus; - uint32_t linkStatus; - uint32_t devCmdStatus; - uint32_t devSecStatus; - - // cfg AER regs - uint32_t rootErrorStatus; - uint32_t uncorrErrorStatus; - uint32_t corrErrorStatus; - uint32_t tlpHdr1; - uint32_t tlpHdr2; - uint32_t tlpHdr3; - uint32_t tlpHdr4; - uint32_t sourceId; - - uint32_t rsv3; - - // Record data about the call to allocate a buffer. - uint64_t errorClass; - uint64_t correlator; - - //P7IOC MMIO Error Regs - uint64_t p7iocPlssr; // n120 - uint64_t p7iocCsr; // n110 - uint64_t lemFir; // nC00 - uint64_t lemErrorMask; // nC18 - uint64_t lemWOF; // nC40 - uint64_t phbErrorStatus; // nC80 - uint64_t phbFirstErrorStatus; // nC88 - uint64_t phbErrorLog0; // nCC0 - uint64_t phbErrorLog1; // nCC8 - uint64_t mmioErrorStatus; // nD00 - uint64_t mmioFirstErrorStatus; // nD08 - uint64_t mmioErrorLog0; // nD40 - uint64_t mmioErrorLog1; // nD48 - uint64_t dma0ErrorStatus; // nD80 - uint64_t dma0FirstErrorStatus; // nD88 - uint64_t dma0ErrorLog0; // nDC0 - uint64_t dma0ErrorLog1; // nDC8 - uint64_t dma1ErrorStatus; // nE00 - uint64_t dma1FirstErrorStatus; // nE08 - uint64_t dma1ErrorLog0; // nE40 - uint64_t dma1ErrorLog1; // nE48 - uint64_t pestA[OPAL_P7IOC_NUM_PEST_REGS]; - uint64_t pestB[OPAL_P7IOC_NUM_PEST_REGS]; -}; - typedef struct oppanel_line { - const char * line; - uint64_t line_len; + /* XXX */ } oppanel_line_t; /* API functions */ @@ -527,15 +413,6 @@ int64_t opal_pci_map_pe_dma_window_real(uint64_t phb_id, uint16_t pe_number, uint64_t pci_mem_size); int64_t opal_pci_reset(uint64_t phb_id, uint8_t reset_scope, uint8_t assert_state); -int64_t opal_pci_get_hub_diag_data(uint64_t hub_id, void *diag_buffer, uint64_t diag_buffer_len); -int64_t opal_pci_get_phb_diag_data(uint64_t phb_id, void *diag_buffer, uint64_t diag_buffer_len); -int64_t opal_pci_fence_phb(uint64_t phb_id); -int64_t opal_pci_reinit(uint64_t phb_id, uint8_t reinit_scope); -int64_t opal_pci_mask_pe_error(uint64_t phb_id, uint16_t pe_number, uint8_t error_type, uint8_t mask_action); -int64_t opal_set_slot_led_status(uint64_t phb_id, uint64_t slot_id, uint8_t led_type, uint8_t led_action); -int64_t opal_get_epow_status(uint64_t *status); -int64_t opal_set_system_attention_led(uint8_t led_action); - /* Internal functions */ extern int early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data); diff --git a/trunk/arch/powerpc/include/asm/paca.h b/trunk/arch/powerpc/include/asm/paca.h index 269c05a36d91..17722c73ba2e 100644 --- a/trunk/arch/powerpc/include/asm/paca.h +++ b/trunk/arch/powerpc/include/asm/paca.h @@ -135,7 +135,6 @@ struct paca_struct { u8 hard_enabled; /* set if irqs are enabled in MSR */ u8 io_sync; /* writel() needs spin_unlock sync */ u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */ - u8 nap_state_lost; /* NV GPR values lost in power7_idle */ #ifdef CONFIG_PPC_POWERNV /* Pointer to OPAL machine check event structure set by the diff --git a/trunk/arch/powerpc/include/asm/page.h b/trunk/arch/powerpc/include/asm/page.h index f072e974f8a2..dd9c4fd038e0 100644 --- a/trunk/arch/powerpc/include/asm/page.h +++ b/trunk/arch/powerpc/include/asm/page.h @@ -92,34 +92,20 @@ extern unsigned int HPAGE_SHIFT; #define PAGE_OFFSET ASM_CONST(CONFIG_PAGE_OFFSET) #define LOAD_OFFSET ASM_CONST((CONFIG_KERNEL_START-CONFIG_PHYSICAL_START)) -#if defined(CONFIG_NONSTATIC_KERNEL) +#if defined(CONFIG_RELOCATABLE) #ifndef __ASSEMBLY__ extern phys_addr_t memstart_addr; extern phys_addr_t kernstart_addr; - -#ifdef CONFIG_RELOCATABLE_PPC32 -extern long long virt_phys_offset; #endif - -#endif /* __ASSEMBLY__ */ #define PHYSICAL_START kernstart_addr - -#else /* !CONFIG_NONSTATIC_KERNEL */ -#define PHYSICAL_START ASM_CONST(CONFIG_PHYSICAL_START) -#endif - -/* See Description below for VIRT_PHYS_OFFSET */ -#ifdef CONFIG_RELOCATABLE_PPC32 -#define VIRT_PHYS_OFFSET virt_phys_offset #else -#define VIRT_PHYS_OFFSET (KERNELBASE - PHYSICAL_START) +#define PHYSICAL_START ASM_CONST(CONFIG_PHYSICAL_START) #endif - #ifdef CONFIG_PPC64 #define MEMORY_START 0UL -#elif defined(CONFIG_NONSTATIC_KERNEL) +#elif defined(CONFIG_RELOCATABLE) #define MEMORY_START memstart_addr #else #define MEMORY_START (PHYSICAL_START + PAGE_OFFSET - KERNELBASE) @@ -139,77 +125,12 @@ extern long long virt_phys_offset; * determine MEMORY_START until then. However we can determine PHYSICAL_START * from information at hand (program counter, TLB lookup). * - * On BookE with RELOCATABLE (RELOCATABLE_PPC32) - * - * With RELOCATABLE_PPC32, we support loading the kernel at any physical - * address without any restriction on the page alignment. - * - * We find the runtime address of _stext and relocate ourselves based on - * the following calculation: - * - * virtual_base = ALIGN_DOWN(KERNELBASE,256M) + - * MODULO(_stext.run,256M) - * and create the following mapping: - * - * ALIGN_DOWN(_stext.run,256M) => ALIGN_DOWN(KERNELBASE,256M) - * - * When we process relocations, we cannot depend on the - * existing equation for the __va()/__pa() translations: - * - * __va(x) = (x) - PHYSICAL_START + KERNELBASE - * - * Where: - * PHYSICAL_START = kernstart_addr = Physical address of _stext - * KERNELBASE = Compiled virtual address of _stext. - * - * This formula holds true iff, kernel load address is TLB page aligned. - * - * In our case, we need to also account for the shift in the kernel Virtual - * address. - * - * E.g., - * - * Let the kernel be loaded at 64MB and KERNELBASE be 0xc0000000 (same as PAGE_OFFSET). - * In this case, we would be mapping 0 to 0xc0000000, and kernstart_addr = 64M - * - * Now __va(1MB) = (0x100000) - (0x4000000) + 0xc0000000 - * = 0xbc100000 , which is wrong. - * - * Rather, it should be : 0xc0000000 + 0x100000 = 0xc0100000 - * according to our mapping. - * - * Hence we use the following formula to get the translations right: - * - * __va(x) = (x) - [ PHYSICAL_START - Effective KERNELBASE ] - * - * Where : - * PHYSICAL_START = dynamic load address.(kernstart_addr variable) - * Effective KERNELBASE = virtual_base = - * = ALIGN_DOWN(KERNELBASE,256M) + - * MODULO(PHYSICAL_START,256M) - * - * To make the cost of __va() / __pa() more light weight, we introduce - * a new variable virt_phys_offset, which will hold : - * - * virt_phys_offset = Effective KERNELBASE - PHYSICAL_START - * = ALIGN_DOWN(KERNELBASE,256M) - - * ALIGN_DOWN(PHYSICALSTART,256M) - * - * Hence : - * - * __va(x) = x - PHYSICAL_START + Effective KERNELBASE - * = x + virt_phys_offset - * - * and - * __pa(x) = x + PHYSICAL_START - Effective KERNELBASE - * = x - virt_phys_offset - * * On non-Book-E PPC64 PAGE_OFFSET and MEMORY_START are constants so use * the other definitions for __va & __pa. */ #ifdef CONFIG_BOOKE -#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET)) -#define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET) +#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) - PHYSICAL_START + KERNELBASE)) +#define __pa(x) ((unsigned long)(x) + PHYSICAL_START - KERNELBASE) #else #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START)) #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START) @@ -369,7 +290,6 @@ extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg); extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *p); extern int page_is_ram(unsigned long pfn); -extern int devmem_is_allowed(unsigned long pfn); #ifdef CONFIG_PPC_SMLPAR void arch_free_page(struct page *page, int order); diff --git a/trunk/arch/powerpc/include/asm/page_64.h b/trunk/arch/powerpc/include/asm/page_64.h index fed85e6290e1..fb40ede6bc0d 100644 --- a/trunk/arch/powerpc/include/asm/page_64.h +++ b/trunk/arch/powerpc/include/asm/page_64.h @@ -130,9 +130,7 @@ do { \ #ifdef CONFIG_HUGETLB_PAGE -#ifdef CONFIG_PPC_MM_SLICES #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA -#endif #endif /* !CONFIG_HUGETLB_PAGE */ diff --git a/trunk/arch/powerpc/include/asm/pci-bridge.h b/trunk/arch/powerpc/include/asm/pci-bridge.h index 882b6aa6c857..56b879ab3a40 100644 --- a/trunk/arch/powerpc/include/asm/pci-bridge.h +++ b/trunk/arch/powerpc/include/asm/pci-bridge.h @@ -153,8 +153,8 @@ struct pci_dn { int pci_ext_config_space; /* for pci devices */ - struct pci_dev *pcidev; /* back-pointer to the pci device */ #ifdef CONFIG_EEH + struct pci_dev *pcidev; /* back-pointer to the pci device */ int class_code; /* pci device class */ int eeh_mode; /* See eeh.h for possible EEH_MODEs */ int eeh_config_addr; @@ -164,10 +164,6 @@ struct pci_dn { int eeh_false_positives; /* # times this device reported #ff's */ u32 config_space[16]; /* saved PCI config space */ #endif -#define IODA_INVALID_PE (-1) -#ifdef CONFIG_PPC_POWERNV - int pe_number; -#endif }; /* Get the pointer to a device_node's pci_dn */ diff --git a/trunk/arch/powerpc/include/asm/pci.h b/trunk/arch/powerpc/include/asm/pci.h index 1c92013466e3..49c3de582be0 100644 --- a/trunk/arch/powerpc/include/asm/pci.h +++ b/trunk/arch/powerpc/include/asm/pci.h @@ -184,6 +184,8 @@ extern void of_scan_pci_bridge(struct pci_dev *dev); extern void of_scan_bus(struct device_node *node, struct pci_bus *bus); extern void of_rescan_bus(struct device_node *node, struct pci_bus *bus); +extern int pci_read_irq_line(struct pci_dev *dev); + struct file; extern pgprot_t pci_phys_mem_access_prot(struct file *file, unsigned long pfn, diff --git a/trunk/arch/powerpc/include/asm/pgtable.h b/trunk/arch/powerpc/include/asm/pgtable.h index 2e0e4110f7ae..88b0bd925a8b 100644 --- a/trunk/arch/powerpc/include/asm/pgtable.h +++ b/trunk/arch/powerpc/include/asm/pgtable.h @@ -170,9 +170,6 @@ extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addre #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ _PAGE_COHERENT | _PAGE_WRITETHRU)) -#define pgprot_cached_noncoherent(prot) \ - (__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL)) - #define pgprot_writecombine pgprot_noncached_wc struct file; diff --git a/trunk/arch/powerpc/include/asm/processor.h b/trunk/arch/powerpc/include/asm/processor.h index b585bff1a022..eb11a446720e 100644 --- a/trunk/arch/powerpc/include/asm/processor.h +++ b/trunk/arch/powerpc/include/asm/processor.h @@ -382,9 +382,6 @@ static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32) } #endif -extern unsigned long cpuidle_disable; -enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF}; - #endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_PROCESSOR_H */ diff --git a/trunk/arch/powerpc/include/asm/reg.h b/trunk/arch/powerpc/include/asm/reg.h index 7fdc2c0b7fa0..559da199edb5 100644 --- a/trunk/arch/powerpc/include/asm/reg.h +++ b/trunk/arch/powerpc/include/asm/reg.h @@ -951,7 +951,6 @@ #define PVR_403GCX 0x00201400 #define PVR_405GP 0x40110000 #define PVR_476 0x11a52000 -#define PVR_476FPE 0x7ff50000 #define PVR_STB03XXX 0x40310000 #define PVR_NP405H 0x41410000 #define PVR_NP405L 0x41610000 diff --git a/trunk/arch/powerpc/include/asm/reg_booke.h b/trunk/arch/powerpc/include/asm/reg_booke.h index 500fe1dc43e6..03c48e819c8e 100644 --- a/trunk/arch/powerpc/include/asm/reg_booke.h +++ b/trunk/arch/powerpc/include/asm/reg_booke.h @@ -187,10 +187,6 @@ #define SPRN_CSRR1 SPRN_SRR3 /* Critical Save and Restore Register 1 */ #endif -#ifdef CONFIG_PPC_ICSWX -#define SPRN_HACOP 0x15F /* Hypervisor Available Coprocessor Register */ -#endif - /* Bit definitions for CCR1. */ #define CCR1_DPC 0x00000100 /* Disable L1 I-Cache/D-Cache parity checking */ #define CCR1_TCS 0x00000080 /* Timer Clock Select */ diff --git a/trunk/arch/powerpc/include/asm/rtas.h b/trunk/arch/powerpc/include/asm/rtas.h index 01c143bb77ae..41f69ae79d4e 100644 --- a/trunk/arch/powerpc/include/asm/rtas.h +++ b/trunk/arch/powerpc/include/asm/rtas.h @@ -245,12 +245,6 @@ extern int early_init_dt_scan_rtas(unsigned long node, extern void pSeries_log_error(char *buf, unsigned int err_type, int fatal); -#ifdef CONFIG_PPC_RTAS_DAEMON -extern void rtas_cancel_event_scan(void); -#else -static inline void rtas_cancel_event_scan(void) { } -#endif - /* Error types logged. */ #define ERR_FLAG_ALREADY_LOGGED 0x0 #define ERR_FLAG_BOOT 0x1 /* log was pulled from NVRAM on boot */ @@ -313,17 +307,5 @@ static inline u32 rtas_config_addr(int busno, int devfn, int reg) extern void __cpuinit rtas_give_timebase(void); extern void __cpuinit rtas_take_timebase(void); -#ifdef CONFIG_PPC_RTAS -static inline int page_is_rtas_user_buf(unsigned long pfn) -{ - unsigned long paddr = (pfn << PAGE_SHIFT); - if (paddr >= rtas_rmo_buf && paddr < (rtas_rmo_buf + RTAS_RMOBUF_MAX)) - return 1; - return 0; -} -#else -static inline int page_is_rtas_user_buf(unsigned long pfn) { return 0;} -#endif - #endif /* __KERNEL__ */ #endif /* _POWERPC_RTAS_H */ diff --git a/trunk/arch/powerpc/include/asm/rwsem.h b/trunk/arch/powerpc/include/asm/rwsem.h new file mode 100644 index 000000000000..bb1e2cdeb9bf --- /dev/null +++ b/trunk/arch/powerpc/include/asm/rwsem.h @@ -0,0 +1,132 @@ +#ifndef _ASM_POWERPC_RWSEM_H +#define _ASM_POWERPC_RWSEM_H + +#ifndef _LINUX_RWSEM_H +#error "Please don't include directly, use instead." +#endif + +#ifdef __KERNEL__ + +/* + * R/W semaphores for PPC using the stuff in lib/rwsem.c. + * Adapted largely from include/asm-i386/rwsem.h + * by Paul Mackerras . + */ + +/* + * the semaphore definition + */ +#ifdef CONFIG_PPC64 +# define RWSEM_ACTIVE_MASK 0xffffffffL +#else +# define RWSEM_ACTIVE_MASK 0x0000ffffL +#endif + +#define RWSEM_UNLOCKED_VALUE 0x00000000L +#define RWSEM_ACTIVE_BIAS 0x00000001L +#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1) +#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS +#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) + +/* + * lock for reading + */ +static inline void __down_read(struct rw_semaphore *sem) +{ + if (unlikely(atomic_long_inc_return((atomic_long_t *)&sem->count) <= 0)) + rwsem_down_read_failed(sem); +} + +static inline int __down_read_trylock(struct rw_semaphore *sem) +{ + long tmp; + + while ((tmp = sem->count) >= 0) { + if (tmp == cmpxchg(&sem->count, tmp, + tmp + RWSEM_ACTIVE_READ_BIAS)) { + return 1; + } + } + return 0; +} + +/* + * lock for writing + */ +static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) +{ + long tmp; + + tmp = atomic_long_add_return(RWSEM_ACTIVE_WRITE_BIAS, + (atomic_long_t *)&sem->count); + if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) + rwsem_down_write_failed(sem); +} + +static inline void __down_write(struct rw_semaphore *sem) +{ + __down_write_nested(sem, 0); +} + +static inline int __down_write_trylock(struct rw_semaphore *sem) +{ + long tmp; + + tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, + RWSEM_ACTIVE_WRITE_BIAS); + return tmp == RWSEM_UNLOCKED_VALUE; +} + +/* + * unlock after reading + */ +static inline void __up_read(struct rw_semaphore *sem) +{ + long tmp; + + tmp = atomic_long_dec_return((atomic_long_t *)&sem->count); + if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)) + rwsem_wake(sem); +} + +/* + * unlock after writing + */ +static inline void __up_write(struct rw_semaphore *sem) +{ + if (unlikely(atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS, + (atomic_long_t *)&sem->count) < 0)) + rwsem_wake(sem); +} + +/* + * implement atomic add functionality + */ +static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem) +{ + atomic_long_add(delta, (atomic_long_t *)&sem->count); +} + +/* + * downgrade write lock to read lock + */ +static inline void __downgrade_write(struct rw_semaphore *sem) +{ + long tmp; + + tmp = atomic_long_add_return(-RWSEM_WAITING_BIAS, + (atomic_long_t *)&sem->count); + if (tmp < 0) + rwsem_downgrade_wake(sem); +} + +/* + * implement exchange and add functionality + */ +static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) +{ + return atomic_long_add_return(delta, (atomic_long_t *)&sem->count); +} + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_RWSEM_H */ diff --git a/trunk/arch/powerpc/include/asm/socket.h b/trunk/arch/powerpc/include/asm/socket.h index 2fc2af8fbf59..866f7606da68 100644 --- a/trunk/arch/powerpc/include/asm/socket.h +++ b/trunk/arch/powerpc/include/asm/socket.h @@ -69,7 +69,4 @@ #define SO_RXQ_OVFL 40 -#define SO_WIFI_STATUS 41 -#define SCM_WIFI_STATUS SO_WIFI_STATUS - #endif /* _ASM_POWERPC_SOCKET_H */ diff --git a/trunk/arch/powerpc/include/asm/system.h b/trunk/arch/powerpc/include/asm/system.h index c377457d1b89..e30a13d1ee76 100644 --- a/trunk/arch/powerpc/include/asm/system.h +++ b/trunk/arch/powerpc/include/asm/system.h @@ -193,8 +193,8 @@ extern void cacheable_memzero(void *p, unsigned int nb); extern void *cacheable_memcpy(void *, const void *, unsigned int); extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long); extern void bad_page_fault(struct pt_regs *, unsigned long, int); +extern int die(const char *, struct pt_regs *, long); extern void _exception(int, struct pt_regs *, int, unsigned long); -extern void die(const char *, struct pt_regs *, long); extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val); #ifdef CONFIG_BOOKE_WDT @@ -221,15 +221,6 @@ extern unsigned long klimit; extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); extern int powersave_nap; /* set if nap mode can be used in idle loop */ -void cpu_idle_wait(void); - -#ifdef CONFIG_PSERIES_IDLE -extern void update_smt_snooze_delay(int snooze); -extern int pseries_notify_cpuidle_add_cpu(int cpu); -#else -static inline void update_smt_snooze_delay(int snooze) {} -static inline int pseries_notify_cpuidle_add_cpu(int cpu) { return 0; } -#endif /* * Atomic exchange diff --git a/trunk/arch/powerpc/include/asm/tce.h b/trunk/arch/powerpc/include/asm/tce.h index 743f36b38e5d..f663634cccc9 100644 --- a/trunk/arch/powerpc/include/asm/tce.h +++ b/trunk/arch/powerpc/include/asm/tce.h @@ -26,14 +26,10 @@ /* * Tces come in two formats, one for the virtual bus and a different - * format for PCI. PCI TCEs can have hardware or software maintianed - * coherency. + * format for PCI */ -#define TCE_VB 0 -#define TCE_PCI 1 -#define TCE_PCI_SWINV_CREATE 2 -#define TCE_PCI_SWINV_FREE 4 -#define TCE_PCI_SWINV_PAIR 8 +#define TCE_VB 0 +#define TCE_PCI 1 /* TCE page size is 4096 bytes (1 << 12) */ diff --git a/trunk/arch/powerpc/include/asm/time.h b/trunk/arch/powerpc/include/asm/time.h index 7eb10fb96cd0..fe6f7c2c9c68 100644 --- a/trunk/arch/powerpc/include/asm/time.h +++ b/trunk/arch/powerpc/include/asm/time.h @@ -219,7 +219,5 @@ DECLARE_PER_CPU(struct cpu_usage, cpu_usage_array); extern void secondary_cpu_time_init(void); extern void iSeries_time_init_early(void); -DECLARE_PER_CPU(u64, decrementers_next_tb); - #endif /* __KERNEL__ */ #endif /* __POWERPC_TIME_H */ diff --git a/trunk/arch/powerpc/include/asm/types.h b/trunk/arch/powerpc/include/asm/types.h index d82e94e6c2b4..8947b9827bc4 100644 --- a/trunk/arch/powerpc/include/asm/types.h +++ b/trunk/arch/powerpc/include/asm/types.h @@ -5,11 +5,8 @@ * This is here because we used to use l64 for 64bit powerpc * and we don't want to impact user mode with our change to ll64 * in the kernel. - * - * However, some user programs are fine with this. They can - * flag __SANE_USERSPACE_TYPES__ to get int-ll64.h here. */ -#if !defined(__SANE_USERSPACE_TYPES__) && defined(__powerpc64__) && !defined(__KERNEL__) +#if defined(__powerpc64__) && !defined(__KERNEL__) # include #else # include diff --git a/trunk/arch/powerpc/kernel/Makefile b/trunk/arch/powerpc/kernel/Makefile index ee728e433aa2..ce4f7f179117 100644 --- a/trunk/arch/powerpc/kernel/Makefile +++ b/trunk/arch/powerpc/kernel/Makefile @@ -85,8 +85,6 @@ extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o extra-$(CONFIG_8xx) := head_8xx.o extra-y += vmlinux.lds -obj-$(CONFIG_RELOCATABLE_PPC32) += reloc_32.o - obj-$(CONFIG_PPC32) += entry_32.o setup_32.o obj-$(CONFIG_PPC64) += dma-iommu.o iommu.o obj-$(CONFIG_KGDB) += kgdb.o diff --git a/trunk/arch/powerpc/kernel/asm-offsets.c b/trunk/arch/powerpc/kernel/asm-offsets.c index 04caee7d9bc1..7c5324f1ec9c 100644 --- a/trunk/arch/powerpc/kernel/asm-offsets.c +++ b/trunk/arch/powerpc/kernel/asm-offsets.c @@ -208,7 +208,6 @@ int main(void) DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time)); DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time)); DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save)); - DEFINE(PACA_NAPSTATELOST, offsetof(struct paca_struct, nap_state_lost)); #endif /* CONFIG_PPC64 */ /* RTAS */ diff --git a/trunk/arch/powerpc/kernel/cpu_setup_a2.S b/trunk/arch/powerpc/kernel/cpu_setup_a2.S index ebc62f42a237..7f818feaa7a5 100644 --- a/trunk/arch/powerpc/kernel/cpu_setup_a2.S +++ b/trunk/arch/powerpc/kernel/cpu_setup_a2.S @@ -41,16 +41,11 @@ _GLOBAL(__setup_cpu_a2) * core local but doing it always won't hurt */ -#ifdef CONFIG_PPC_ICSWX +#ifdef CONFIG_PPC_WSP_COPRO /* Make sure ACOP starts out as zero */ li r3,0 mtspr SPRN_ACOP,r3 - /* Skip the following if we are in Guest mode */ - mfmsr r3 - andis. r0,r3,MSR_GS@h - bne _icswx_skip_guest - /* Enable icswx instruction */ mfspr r3,SPRN_A2_CCR2 ori r3,r3,A2_CCR2_ENABLE_ICSWX @@ -59,8 +54,7 @@ _GLOBAL(__setup_cpu_a2) /* Unmask all CTs in HACOP */ li r3,-1 mtspr SPRN_HACOP,r3 -_icswx_skip_guest: -#endif /* CONFIG_PPC_ICSWX */ +#endif /* CONFIG_PPC_WSP_COPRO */ /* Enable doorbell */ mfspr r3,SPRN_A2_CCR2 diff --git a/trunk/arch/powerpc/kernel/cputable.c b/trunk/arch/powerpc/kernel/cputable.c index 81db9e2a8a20..edae5bb06f1f 100644 --- a/trunk/arch/powerpc/kernel/cputable.c +++ b/trunk/arch/powerpc/kernel/cputable.c @@ -1505,19 +1505,6 @@ static struct cpu_spec __initdata cpu_specs[] = { .machine_check = machine_check_4xx, .platform = "ppc405", }, - { /* APM8018X */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x7ff11432, - .cpu_name = "APM8018X", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | - PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, { /* default match */ .pvr_mask = 0x00000000, .pvr_value = 0x00000000, @@ -1843,20 +1830,6 @@ static struct cpu_spec __initdata cpu_specs[] = { .machine_check = machine_check_47x, .platform = "ppc470", }, - { /* 476fpe */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x7ff50000, - .cpu_name = "476fpe", - .cpu_features = CPU_FTRS_47X | CPU_FTR_476_DD2, - .cpu_user_features = COMMON_USER_BOOKE | - PPC_FEATURE_HAS_FPU, - .mmu_features = MMU_FTR_TYPE_47x | - MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL, - .icache_bsize = 32, - .dcache_bsize = 128, - .machine_check = machine_check_47x, - .platform = "ppc470", - }, { /* 476 iss */ .pvr_mask = 0xffff0000, .pvr_value = 0x00050000, diff --git a/trunk/arch/powerpc/kernel/crash.c b/trunk/arch/powerpc/kernel/crash.c index 28be3452e67a..d879809d5c45 100644 --- a/trunk/arch/powerpc/kernel/crash.c +++ b/trunk/arch/powerpc/kernel/crash.c @@ -10,85 +10,85 @@ * */ +#undef DEBUG + #include #include #include #include +#include #include #include #include +#include +#include #include #include #include +#include #include #include #include #include #include +#include #include #include #include -/* - * The primary CPU waits a while for all secondary CPUs to enter. This is to - * avoid sending an IPI if the secondary CPUs are entering - * crash_kexec_secondary on their own (eg via a system reset). - * - * The secondary timeout has to be longer than the primary. Both timeouts are - * in milliseconds. - */ -#define PRIMARY_TIMEOUT 500 -#define SECONDARY_TIMEOUT 1000 - -#define IPI_TIMEOUT 10000 -#define REAL_MODE_TIMEOUT 10000 +#ifdef DEBUG +#include +#define DBG(fmt...) udbg_printf(fmt) +#else +#define DBG(fmt...) +#endif -/* This keeps a track of which one is the crashing cpu. */ +/* This keeps a track of which one is crashing cpu. */ int crashing_cpu = -1; -static atomic_t cpus_in_crash; -static int time_to_dump; +static cpumask_t cpus_in_crash = CPU_MASK_NONE; +cpumask_t cpus_in_sr = CPU_MASK_NONE; #define CRASH_HANDLER_MAX 3 /* NULL terminated list of shutdown handles */ static crash_shutdown_t crash_shutdown_handles[CRASH_HANDLER_MAX+1]; static DEFINE_SPINLOCK(crash_handlers_lock); -static unsigned long crash_shutdown_buf[JMP_BUF_LEN]; -static int crash_shutdown_cpu = -1; - -static int handle_fault(struct pt_regs *regs) -{ - if (crash_shutdown_cpu == smp_processor_id()) - longjmp(crash_shutdown_buf, 1); - return 0; -} - #ifdef CONFIG_SMP +static atomic_t enter_on_soft_reset = ATOMIC_INIT(0); void crash_ipi_callback(struct pt_regs *regs) { - static cpumask_t cpus_state_saved = CPU_MASK_NONE; - int cpu = smp_processor_id(); if (!cpu_online(cpu)) return; hard_irq_disable(); - if (!cpumask_test_cpu(cpu, &cpus_state_saved)) { + if (!cpumask_test_cpu(cpu, &cpus_in_crash)) crash_save_cpu(regs, cpu); - cpumask_set_cpu(cpu, &cpus_state_saved); - } + cpumask_set_cpu(cpu, &cpus_in_crash); - atomic_inc(&cpus_in_crash); - smp_mb__after_atomic_inc(); + /* + * Entered via soft-reset - could be the kdump + * process is invoked using soft-reset or user activated + * it if some CPU did not respond to an IPI. + * For soft-reset, the secondary CPU can enter this func + * twice. 1 - using IPI, and 2. soft-reset. + * Tell the kexec CPU that entered via soft-reset and ready + * to go down. + */ + if (cpumask_test_cpu(cpu, &cpus_in_sr)) { + cpumask_clear_cpu(cpu, &cpus_in_sr); + atomic_inc(&enter_on_soft_reset); + } /* * Starting the kdump boot. * This barrier is needed to make sure that all CPUs are stopped. + * If not, soft-reset will be invoked to bring other CPUs. */ - while (!time_to_dump) + while (!cpumask_test_cpu(crashing_cpu, &cpus_in_crash)) cpu_relax(); if (ppc_md.kexec_cpu_down) @@ -103,99 +103,106 @@ void crash_ipi_callback(struct pt_regs *regs) /* NOTREACHED */ } +/* + * Wait until all CPUs are entered via soft-reset. + */ +static void crash_soft_reset_check(int cpu) +{ + unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */ + + cpumask_clear_cpu(cpu, &cpus_in_sr); + while (atomic_read(&enter_on_soft_reset) != ncpus) + cpu_relax(); +} + + static void crash_kexec_prepare_cpus(int cpu) { unsigned int msecs; - unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */ - int tries = 0; - int (*old_handler)(struct pt_regs *regs); - printk(KERN_EMERG "Sending IPI to other CPUs\n"); + unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */ crash_send_ipi(crash_ipi_callback); smp_wmb(); -again: /* * FIXME: Until we will have the way to stop other CPUs reliably, * the crash CPU will send an IPI and wait for other CPUs to * respond. + * Delay of at least 10 seconds. */ - msecs = IPI_TIMEOUT; - while ((atomic_read(&cpus_in_crash) < ncpus) && (--msecs > 0)) + printk(KERN_EMERG "Sending IPI to other cpus...\n"); + msecs = 10000; + while ((cpumask_weight(&cpus_in_crash) < ncpus) && (--msecs > 0)) { + cpu_relax(); mdelay(1); - - /* Would it be better to replace the trap vector here? */ - - if (atomic_read(&cpus_in_crash) >= ncpus) { - printk(KERN_EMERG "IPI complete\n"); - return; } - printk(KERN_EMERG "ERROR: %d cpu(s) not responding\n", - ncpus - atomic_read(&cpus_in_crash)); - - /* - * If we have a panic timeout set then we can't wait indefinitely - * for someone to activate system reset. We also give up on the - * second time through if system reset fail to work. - */ - if ((panic_timeout > 0) || (tries > 0)) - return; + /* Would it be better to replace the trap vector here? */ /* - * A system reset will cause all CPUs to take an 0x100 exception. - * The primary CPU returns here via setjmp, and the secondary - * CPUs reexecute the crash_kexec_secondary path. + * FIXME: In case if we do not get all CPUs, one possibility: ask the + * user to do soft reset such that we get all. + * Soft-reset will be used until better mechanism is implemented. */ - old_handler = __debugger; - __debugger = handle_fault; - crash_shutdown_cpu = smp_processor_id(); - - if (setjmp(crash_shutdown_buf) == 0) { - printk(KERN_EMERG "Activate system reset (dumprestart) " - "to stop other cpu(s)\n"); - - /* - * A system reset will force all CPUs to execute the - * crash code again. We need to reset cpus_in_crash so we - * wait for everyone to do this. - */ - atomic_set(&cpus_in_crash, 0); - smp_mb(); - - while (atomic_read(&cpus_in_crash) < ncpus) + if (cpumask_weight(&cpus_in_crash) < ncpus) { + printk(KERN_EMERG "done waiting: %d cpu(s) not responding\n", + ncpus - cpumask_weight(&cpus_in_crash)); + printk(KERN_EMERG "Activate soft-reset to stop other cpu(s)\n"); + cpumask_clear(&cpus_in_sr); + atomic_set(&enter_on_soft_reset, 0); + while (cpumask_weight(&cpus_in_crash) < ncpus) cpu_relax(); } - - crash_shutdown_cpu = -1; - __debugger = old_handler; - - tries++; - goto again; + /* + * Make sure all CPUs are entered via soft-reset if the kdump is + * invoked using soft-reset. + */ + if (cpumask_test_cpu(cpu, &cpus_in_sr)) + crash_soft_reset_check(cpu); + /* Leave the IPI callback set */ } /* - * This function will be called by secondary cpus. + * This function will be called by secondary cpus or by kexec cpu + * if soft-reset is activated to stop some CPUs. */ void crash_kexec_secondary(struct pt_regs *regs) { + int cpu = smp_processor_id(); unsigned long flags; - int msecs = SECONDARY_TIMEOUT; + int msecs = 5; local_irq_save(flags); - - /* Wait for the primary crash CPU to signal its progress */ + /* Wait 5ms if the kexec CPU is not entered yet. */ while (crashing_cpu < 0) { if (--msecs < 0) { - /* No response, kdump image may not have been loaded */ + /* + * Either kdump image is not loaded or + * kdump process is not started - Probably xmon + * exited using 'x'(exit and recover) or + * kexec_should_crash() failed for all running tasks. + */ + cpumask_clear_cpu(cpu, &cpus_in_sr); local_irq_restore(flags); return; } - mdelay(1); + cpu_relax(); + } + if (cpu == crashing_cpu) { + /* + * Panic CPU will enter this func only via soft-reset. + * Wait until all secondary CPUs entered and + * then start kexec boot. + */ + crash_soft_reset_check(cpu); + cpumask_set_cpu(crashing_cpu, &cpus_in_crash); + if (ppc_md.kexec_cpu_down) + ppc_md.kexec_cpu_down(1, 0); + machine_kexec(kexec_crash_image); + /* NOTREACHED */ } - crash_ipi_callback(regs); } @@ -204,7 +211,7 @@ void crash_kexec_secondary(struct pt_regs *regs) static void crash_kexec_prepare_cpus(int cpu) { /* - * move the secondaries to us so that we can copy + * move the secondarys to us so that we can copy * the new kernel 0-0x100 safely * * do this if kexec in setup.c ? @@ -218,6 +225,7 @@ static void crash_kexec_prepare_cpus(int cpu) void crash_kexec_secondary(struct pt_regs *regs) { + cpumask_clear(&cpus_in_sr); } #endif /* CONFIG_SMP */ @@ -228,7 +236,7 @@ static void crash_kexec_wait_realmode(int cpu) unsigned int msecs; int i; - msecs = REAL_MODE_TIMEOUT; + msecs = 10000; for (i=0; i < nr_cpu_ids && msecs > 0; i++) { if (i == cpu) continue; @@ -300,11 +308,22 @@ int crash_shutdown_unregister(crash_shutdown_t handler) } EXPORT_SYMBOL(crash_shutdown_unregister); +static unsigned long crash_shutdown_buf[JMP_BUF_LEN]; +static int crash_shutdown_cpu = -1; + +static int handle_fault(struct pt_regs *regs) +{ + if (crash_shutdown_cpu == smp_processor_id()) + longjmp(crash_shutdown_buf, 1); + return 0; +} + void default_machine_crash_shutdown(struct pt_regs *regs) { unsigned int i; int (*old_handler)(struct pt_regs *regs); + /* * This function is only called after the system * has panicked or is otherwise in a critical state. @@ -322,26 +341,15 @@ void default_machine_crash_shutdown(struct pt_regs *regs) * such that another IPI will not be sent. */ crashing_cpu = smp_processor_id(); - - /* - * If we came in via system reset, wait a while for the secondary - * CPUs to enter. - */ - if (TRAP(regs) == 0x100) - mdelay(PRIMARY_TIMEOUT); - - crash_kexec_prepare_cpus(crashing_cpu); - crash_save_cpu(regs, crashing_cpu); - - time_to_dump = 1; - + crash_kexec_prepare_cpus(crashing_cpu); + cpumask_set_cpu(crashing_cpu, &cpus_in_crash); crash_kexec_wait_realmode(crashing_cpu); machine_kexec_mask_interrupts(); /* - * Call registered shutdown routines safely. Swap out + * Call registered shutdown routines savely. Swap out * __debugger_fault_handler, and replace on exit. */ old_handler = __debugger_fault_handler; diff --git a/trunk/arch/powerpc/kernel/crash_dump.c b/trunk/arch/powerpc/kernel/crash_dump.c index b3ba5163eae2..424afb6b8fba 100644 --- a/trunk/arch/powerpc/kernel/crash_dump.c +++ b/trunk/arch/powerpc/kernel/crash_dump.c @@ -28,7 +28,7 @@ #define DBG(fmt...) #endif -#ifndef CONFIG_NONSTATIC_KERNEL +#ifndef CONFIG_RELOCATABLE void __init reserve_kdump_trampoline(void) { memblock_reserve(0, KDUMP_RESERVE_LIMIT); @@ -67,7 +67,7 @@ void __init setup_kdump_trampoline(void) DBG(" <- setup_kdump_trampoline()\n"); } -#endif /* CONFIG_NONSTATIC_KERNEL */ +#endif /* CONFIG_RELOCATABLE */ static int __init parse_savemaxmem(char *p) { diff --git a/trunk/arch/powerpc/kernel/exceptions-64s.S b/trunk/arch/powerpc/kernel/exceptions-64s.S index d4be7bb3dbdf..cf9c69b9189c 100644 --- a/trunk/arch/powerpc/kernel/exceptions-64s.S +++ b/trunk/arch/powerpc/kernel/exceptions-64s.S @@ -65,7 +65,7 @@ BEGIN_FTR_SECTION lbz r0,PACAPROCSTART(r13) cmpwi r0,0x80 bne 1f - li r0,1 + li r0,0 stb r0,PACAPROCSTART(r13) b kvm_start_guest 1: diff --git a/trunk/arch/powerpc/kernel/head_44x.S b/trunk/arch/powerpc/kernel/head_44x.S index 7dd2981bcc50..b725dab0f88a 100644 --- a/trunk/arch/powerpc/kernel/head_44x.S +++ b/trunk/arch/powerpc/kernel/head_44x.S @@ -64,35 +64,6 @@ _ENTRY(_start); mr r31,r3 /* save device tree ptr */ li r24,0 /* CPU number */ -#ifdef CONFIG_RELOCATABLE -/* - * Relocate ourselves to the current runtime address. - * This is called only by the Boot CPU. - * "relocate" is called with our current runtime virutal - * address. - * r21 will be loaded with the physical runtime address of _stext - */ - bl 0f /* Get our runtime address */ -0: mflr r21 /* Make it accessible */ - addis r21,r21,(_stext - 0b)@ha - addi r21,r21,(_stext - 0b)@l /* Get our current runtime base */ - - /* - * We have the runtime (virutal) address of our base. - * We calculate our shift of offset from a 256M page. - * We could map the 256M page we belong to at PAGE_OFFSET and - * get going from there. - */ - lis r4,KERNELBASE@h - ori r4,r4,KERNELBASE@l - rlwinm r6,r21,0,4,31 /* r6 = PHYS_START % 256M */ - rlwinm r5,r4,0,4,31 /* r5 = KERNELBASE % 256M */ - subf r3,r5,r6 /* r3 = r6 - r5 */ - add r3,r4,r3 /* Required Virutal Address */ - - bl relocate -#endif - bl init_cpu_state /* @@ -117,65 +88,6 @@ _ENTRY(_start); #ifdef CONFIG_RELOCATABLE /* - * Relocatable kernel support based on processing of dynamic - * relocation entries. - * - * r25 will contain RPN/ERPN for the start address of memory - * r21 will contain the current offset of _stext - */ - lis r3,kernstart_addr@ha - la r3,kernstart_addr@l(r3) - - /* - * Compute the kernstart_addr. - * kernstart_addr => (r6,r8) - * kernstart_addr & ~0xfffffff => (r6,r7) - */ - rlwinm r6,r25,0,28,31 /* ERPN. Bits 32-35 of Address */ - rlwinm r7,r25,0,0,3 /* RPN - assuming 256 MB page size */ - rlwinm r8,r21,0,4,31 /* r8 = (_stext & 0xfffffff) */ - or r8,r7,r8 /* Compute the lower 32bit of kernstart_addr */ - - /* Store kernstart_addr */ - stw r6,0(r3) /* higher 32bit */ - stw r8,4(r3) /* lower 32bit */ - - /* - * Compute the virt_phys_offset : - * virt_phys_offset = stext.run - kernstart_addr - * - * stext.run = (KERNELBASE & ~0xfffffff) + (kernstart_addr & 0xfffffff) - * When we relocate, we have : - * - * (kernstart_addr & 0xfffffff) = (stext.run & 0xfffffff) - * - * hence: - * virt_phys_offset = (KERNELBASE & ~0xfffffff) - (kernstart_addr & ~0xfffffff) - * - */ - - /* KERNELBASE&~0xfffffff => (r4,r5) */ - li r4, 0 /* higer 32bit */ - lis r5,KERNELBASE@h - rlwinm r5,r5,0,0,3 /* Align to 256M, lower 32bit */ - - /* - * 64bit subtraction. - */ - subfc r5,r7,r5 - subfe r4,r6,r4 - - /* Store virt_phys_offset */ - lis r3,virt_phys_offset@ha - la r3,virt_phys_offset@l(r3) - - stw r4,0(r3) - stw r5,4(r3) - -#elif defined(CONFIG_DYNAMIC_MEMSTART) - /* - * Mapping based, page aligned dynamic kernel loading. - * * r25 will contain RPN/ERPN for the start address of memory * * Add the difference between KERNELBASE and PAGE_OFFSET to the @@ -820,8 +732,6 @@ _GLOBAL(init_cpu_state) /* We use the PVR to differenciate 44x cores from 476 */ mfspr r3,SPRN_PVR srwi r3,r3,16 - cmplwi cr0,r3,PVR_476FPE@h - beq head_start_47x cmplwi cr0,r3,PVR_476@h beq head_start_47x cmplwi cr0,r3,PVR_476_ISS@h @@ -890,29 +800,12 @@ skpinv: addi r4,r4,1 /* Increment */ /* * Configure and load pinned entry into TLB slot 63. */ -#ifdef CONFIG_NONSTATIC_KERNEL - /* - * In case of a NONSTATIC_KERNEL we reuse the TLB XLAT - * entries of the initial mapping set by the boot loader. - * The XLAT entry is stored in r25 - */ - - /* Read the XLAT entry for our current mapping */ - tlbre r25,r23,PPC44x_TLB_XLAT - - lis r3,KERNELBASE@h - ori r3,r3,KERNELBASE@l - - /* Use our current RPN entry */ - mr r4,r25 -#else lis r3,PAGE_OFFSET@h ori r3,r3,PAGE_OFFSET@l /* Kernel is at the base of RAM */ li r4, 0 /* Load the kernel physical address */ -#endif /* Load the kernel PID = 0 */ li r0,0 diff --git a/trunk/arch/powerpc/kernel/head_fsl_booke.S b/trunk/arch/powerpc/kernel/head_fsl_booke.S index d5d78c4ceef6..9f5d210ddf3f 100644 --- a/trunk/arch/powerpc/kernel/head_fsl_booke.S +++ b/trunk/arch/powerpc/kernel/head_fsl_booke.S @@ -197,7 +197,7 @@ _ENTRY(__early_start) bl early_init -#ifdef CONFIG_DYNAMIC_MEMSTART +#ifdef CONFIG_RELOCATABLE lis r3,kernstart_addr@ha la r3,kernstart_addr@l(r3) #ifdef CONFIG_PHYS_64BIT diff --git a/trunk/arch/powerpc/kernel/idle.c b/trunk/arch/powerpc/kernel/idle.c index 7c66ce13da89..39a2baa6ad58 100644 --- a/trunk/arch/powerpc/kernel/idle.c +++ b/trunk/arch/powerpc/kernel/idle.c @@ -39,23 +39,13 @@ #define cpu_should_die() 0 #endif -unsigned long cpuidle_disable = IDLE_NO_OVERRIDE; -EXPORT_SYMBOL(cpuidle_disable); - static int __init powersave_off(char *arg) { ppc_md.power_save = NULL; - cpuidle_disable = IDLE_POWERSAVE_OFF; return 0; } __setup("powersave=off", powersave_off); -#if defined(CONFIG_PPC_PSERIES) && defined(CONFIG_TRACEPOINTS) -static const bool idle_uses_rcu = 1; -#else -static const bool idle_uses_rcu; -#endif - /* * The body of the idle task. */ @@ -66,10 +56,7 @@ void cpu_idle(void) set_thread_flag(TIF_POLLING_NRFLAG); while (1) { - tick_nohz_idle_enter(); - if (!idle_uses_rcu) - rcu_idle_enter(); - + tick_nohz_stop_sched_tick(1); while (!need_resched() && !cpu_should_die()) { ppc64_runlatch_off(); @@ -106,9 +93,7 @@ void cpu_idle(void) HMT_medium(); ppc64_runlatch_on(); - if (!idle_uses_rcu) - rcu_idle_exit(); - tick_nohz_idle_exit(); + tick_nohz_restart_sched_tick(); preempt_enable_no_resched(); if (cpu_should_die()) cpu_die(); @@ -117,29 +102,6 @@ void cpu_idle(void) } } - -/* - * cpu_idle_wait - Used to ensure that all the CPUs come out of the old - * idle loop and start using the new idle loop. - * Required while changing idle handler on SMP systems. - * Caller must have changed idle handler to the new value before the call. - * This window may be larger on shared systems. - */ -void cpu_idle_wait(void) -{ - int cpu; - smp_mb(); - - /* kick all the CPUs so that they exit out of old idle routine */ - get_online_cpus(); - for_each_online_cpu(cpu) { - if (cpu != smp_processor_id()) - smp_send_reschedule(cpu); - } - put_online_cpus(); -} -EXPORT_SYMBOL_GPL(cpu_idle_wait); - int powersave_nap; #ifdef CONFIG_SYSCTL diff --git a/trunk/arch/powerpc/kernel/idle_power7.S b/trunk/arch/powerpc/kernel/idle_power7.S index fcdff198da4b..3a70845a51c7 100644 --- a/trunk/arch/powerpc/kernel/idle_power7.S +++ b/trunk/arch/powerpc/kernel/idle_power7.S @@ -54,7 +54,6 @@ _GLOBAL(power7_idle) li r0,0 stb r0,PACASOFTIRQEN(r13) /* we'll hard-enable shortly */ stb r0,PACAHARDIRQEN(r13) - stb r0,PACA_NAPSTATELOST(r13) /* Continue saving state */ SAVE_GPR(2, r1) @@ -87,9 +86,6 @@ _GLOBAL(power7_wakeup_loss) rfid _GLOBAL(power7_wakeup_noloss) - lbz r0,PACA_NAPSTATELOST(r13) - cmpwi r0,0 - bne .power7_wakeup_loss ld r1,PACAR1(r13) ld r4,_MSR(r1) ld r5,_NIP(r1) diff --git a/trunk/arch/powerpc/kernel/irq.c b/trunk/arch/powerpc/kernel/irq.c index 701d4aceb4f4..5c3c46948d94 100644 --- a/trunk/arch/powerpc/kernel/irq.c +++ b/trunk/arch/powerpc/kernel/irq.c @@ -115,15 +115,6 @@ static inline notrace void set_soft_enabled(unsigned long enable) : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); } -static inline notrace void decrementer_check_overflow(void) -{ - u64 now = get_tb_or_rtc(); - u64 *next_tb = &__get_cpu_var(decrementers_next_tb); - - if (now >= *next_tb) - set_dec(1); -} - notrace void arch_local_irq_restore(unsigned long en) { /* @@ -173,21 +164,24 @@ notrace void arch_local_irq_restore(unsigned long en) */ local_paca->hard_enabled = en; - /* - * Trigger the decrementer if we have a pending event. Some processors - * only trigger on edge transitions of the sign bit. We might also - * have disabled interrupts long enough that the decrementer wrapped - * to positive. +#ifndef CONFIG_BOOKE + /* On server, re-trigger the decrementer if it went negative since + * some processors only trigger on edge transitions of the sign bit. + * + * BookE has a level sensitive decrementer (latches in TSR) so we + * don't need that */ - decrementer_check_overflow(); + if ((int)mfspr(SPRN_DEC) < 0) + mtspr(SPRN_DEC, 1); +#endif /* CONFIG_BOOKE */ /* * Force the delivery of pending soft-disabled interrupts on PS3. * Any HV call will have this side effect. */ if (firmware_has_feature(FW_FEATURE_PS3_LV1)) { - u64 tmp, tmp2; - lv1_get_version_info(&tmp, &tmp2); + u64 tmp; + lv1_get_version_info(&tmp); } __hard_irq_enable(); diff --git a/trunk/arch/powerpc/kernel/machine_kexec.c b/trunk/arch/powerpc/kernel/machine_kexec.c index c957b1202bdc..9ce1672afb59 100644 --- a/trunk/arch/powerpc/kernel/machine_kexec.c +++ b/trunk/arch/powerpc/kernel/machine_kexec.c @@ -107,6 +107,9 @@ void __init reserve_crashkernel(void) unsigned long long crash_size, crash_base; int ret; + /* this is necessary because of memblock_phys_mem_size() */ + memblock_analyze(); + /* use common parsing */ ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), &crash_size, &crash_base); @@ -125,7 +128,7 @@ void __init reserve_crashkernel(void) crash_size = resource_size(&crashk_res); -#ifndef CONFIG_NONSTATIC_KERNEL +#ifndef CONFIG_RELOCATABLE if (crashk_res.start != KDUMP_KERNELBASE) printk("Crash kernel location must be 0x%x\n", KDUMP_KERNELBASE); diff --git a/trunk/arch/powerpc/kernel/pci-common.c b/trunk/arch/powerpc/kernel/pci-common.c index fa4a573d6716..458ed3bee663 100644 --- a/trunk/arch/powerpc/kernel/pci-common.c +++ b/trunk/arch/powerpc/kernel/pci-common.c @@ -214,7 +214,7 @@ char __devinit *pcibios_setup(char *str) * If the interrupt is used, then gets the interrupt line from the * openfirmware and sets it in the pci_dev and pci_config line. */ -static int pci_read_irq_line(struct pci_dev *pci_dev) +int pci_read_irq_line(struct pci_dev *pci_dev) { struct of_irq oirq; unsigned int virq; @@ -283,6 +283,7 @@ static int pci_read_irq_line(struct pci_dev *pci_dev) return 0; } +EXPORT_SYMBOL(pci_read_irq_line); /* * Platform support for /proc/bus/pci/X/Y mmap()s, @@ -920,22 +921,18 @@ static void __devinit pcibios_fixup_resources(struct pci_dev *dev) struct resource *res = dev->resource + i; if (!res->flags) continue; - - /* If we're going to re-assign everything, we mark all resources - * as unset (and 0-base them). In addition, we mark BARs starting - * at 0 as unset as well, except if PCI_PROBE_ONLY is also set - * since in that case, we don't want to re-assign anything + /* On platforms that have PCI_PROBE_ONLY set, we don't + * consider 0 as an unassigned BAR value. It's technically + * a valid value, but linux doesn't like it... so when we can + * re-assign things, we do so, but if we can't, we keep it + * around and hope for the best... */ - if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) || - (res->start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) { - /* Only print message if not re-assigning */ - if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC)) - pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] " - "is unassigned\n", - pci_name(dev), i, - (unsigned long long)res->start, - (unsigned long long)res->end, - (unsigned int)res->flags); + if (res->start == 0 && !pci_has_flag(PCI_PROBE_ONLY)) { + pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] is unassigned\n", + pci_name(dev), i, + (unsigned long long)res->start, + (unsigned long long)res->end, + (unsigned int)res->flags); res->end -= res->start; res->start = 0; res->flags |= IORESOURCE_UNSET; @@ -1045,16 +1042,6 @@ static void __devinit pcibios_fixup_bridge(struct pci_bus *bus) if (i >= 3 && bus->self->transparent) continue; - /* If we are going to re-assign everything, mark the resource - * as unset and move it down to 0 - */ - if (pci_has_flag(PCI_REASSIGN_ALL_RSRC)) { - res->flags |= IORESOURCE_UNSET; - res->end -= res->start; - res->start = 0; - continue; - } - pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x] fixup...\n", pci_name(dev), i, (unsigned long long)res->start,\ @@ -1275,15 +1262,18 @@ void pcibios_allocate_bus_resources(struct pci_bus *bus) pci_bus_for_each_resource(bus, res, i) { if (!res || !res->flags || res->start > res->end || res->parent) continue; - - /* If the resource was left unset at this point, we clear it */ - if (res->flags & IORESOURCE_UNSET) - goto clear_resource; - if (bus->parent == NULL) pr = (res->flags & IORESOURCE_IO) ? &ioport_resource : &iomem_resource; else { + /* Don't bother with non-root busses when + * re-assigning all resources. We clear the + * resource flags as if they were colliding + * and as such ensure proper re-allocation + * later. + */ + if (pci_has_flag(PCI_REASSIGN_ALL_RSRC)) + goto clear_resource; pr = pci_find_parent_resource(bus->self, res); if (pr == res) { /* this happens when the generic PCI @@ -1314,9 +1304,9 @@ void pcibios_allocate_bus_resources(struct pci_bus *bus) if (reparent_resources(pr, res) == 0) continue; } - pr_warning("PCI: Cannot allocate resource region " - "%d of PCI bridge %d, will remap\n", i, bus->number); - clear_resource: + printk(KERN_WARNING "PCI: Cannot allocate resource region " + "%d of PCI bridge %d, will remap\n", i, bus->number); +clear_resource: res->start = res->end = 0; res->flags = 0; } @@ -1461,11 +1451,16 @@ void __init pcibios_resource_survey(void) { struct pci_bus *b; - /* Allocate and assign resources */ + /* Allocate and assign resources. If we re-assign everything, then + * we skip the allocate phase + */ list_for_each_entry(b, &pci_root_buses, node) pcibios_allocate_bus_resources(b); - pcibios_allocate_resources(0); - pcibios_allocate_resources(1); + + if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC)) { + pcibios_allocate_resources(0); + pcibios_allocate_resources(1); + } /* Before we start assigning unassigned resource, we try to reserve * the low IO area and the VGA memory area if they intersect the @@ -1737,12 +1732,6 @@ void __devinit pcibios_scan_phb(struct pci_controller *hose) if (mode == PCI_PROBE_NORMAL) hose->last_busno = bus->subordinate = pci_scan_child_bus(bus); - /* Platform gets a chance to do some global fixups before - * we proceed to resource allocation - */ - if (ppc_md.pcibios_fixup_phb) - ppc_md.pcibios_fixup_phb(hose); - /* Configure PCI Express settings */ if (bus && !pci_has_flag(PCI_PROBE_ONLY)) { struct pci_bus *child; @@ -1758,13 +1747,10 @@ void __devinit pcibios_scan_phb(struct pci_controller *hose) static void fixup_hide_host_resource_fsl(struct pci_dev *dev) { int i, class = dev->class >> 8; - /* When configured as agent, programing interface = 1 */ - int prog_if = dev->class & 0xf; if ((class == PCI_CLASS_PROCESSOR_POWERPC || class == PCI_CLASS_BRIDGE_OTHER) && (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) && - (prog_if == 0) && (dev->bus->parent == NULL)) { for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { dev->resource[i].start = 0; diff --git a/trunk/arch/powerpc/kernel/pci_dn.c b/trunk/arch/powerpc/kernel/pci_dn.c index dd9e4a04bf79..4e69deb89b37 100644 --- a/trunk/arch/powerpc/kernel/pci_dn.c +++ b/trunk/arch/powerpc/kernel/pci_dn.c @@ -50,9 +50,6 @@ void * __devinit update_dn_pci_info(struct device_node *dn, void *data) dn->data = pdn; pdn->node = dn; pdn->phb = phb; -#ifdef CONFIG_PPC_POWERNV - pdn->pe_number = IODA_INVALID_PE; -#endif regs = of_get_property(dn, "reg", NULL); if (regs) { /* First register entry is addr (00BBSS00) */ diff --git a/trunk/arch/powerpc/kernel/process.c b/trunk/arch/powerpc/kernel/process.c index ebe5766781aa..6457574c0b2f 100644 --- a/trunk/arch/powerpc/kernel/process.c +++ b/trunk/arch/powerpc/kernel/process.c @@ -584,32 +584,16 @@ static struct regbit { unsigned long bit; const char *name; } msr_bits[] = { -#if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE) - {MSR_SF, "SF"}, - {MSR_HV, "HV"}, -#endif - {MSR_VEC, "VEC"}, - {MSR_VSX, "VSX"}, -#ifdef CONFIG_BOOKE - {MSR_CE, "CE"}, -#endif {MSR_EE, "EE"}, {MSR_PR, "PR"}, {MSR_FP, "FP"}, + {MSR_VEC, "VEC"}, + {MSR_VSX, "VSX"}, {MSR_ME, "ME"}, -#ifdef CONFIG_BOOKE + {MSR_CE, "CE"}, {MSR_DE, "DE"}, -#else - {MSR_SE, "SE"}, - {MSR_BE, "BE"}, -#endif {MSR_IR, "IR"}, {MSR_DR, "DR"}, - {MSR_PMM, "PMM"}, -#ifndef CONFIG_BOOKE - {MSR_RI, "RI"}, - {MSR_LE, "LE"}, -#endif {0, NULL} }; diff --git a/trunk/arch/powerpc/kernel/prom.c b/trunk/arch/powerpc/kernel/prom.c index abe405dab34d..fa1235b0503b 100644 --- a/trunk/arch/powerpc/kernel/prom.c +++ b/trunk/arch/powerpc/kernel/prom.c @@ -733,6 +733,8 @@ void __init early_init_devtree(void *params) of_scan_flat_dt(early_init_dt_scan_chosen_ppc, cmd_line); /* Scan memory nodes and rebuild MEMBLOCKs */ + memblock_init(); + of_scan_flat_dt(early_init_dt_scan_root, NULL); of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL); @@ -754,14 +756,20 @@ void __init early_init_devtree(void *params) early_reserve_mem(); phyp_dump_reserve_mem(); - /* - * Ensure that total memory size is page-aligned, because otherwise - * mark_bootmem() gets upset. - */ - limit = ALIGN(memory_limit ?: memblock_phys_mem_size(), PAGE_SIZE); + limit = memory_limit; + if (! limit) { + phys_addr_t memsize; + + /* Ensure that total memory size is page-aligned, because + * otherwise mark_bootmem() gets upset. */ + memblock_analyze(); + memsize = memblock_phys_mem_size(); + if ((memsize & PAGE_MASK) != memsize) + limit = memsize & PAGE_MASK; + } memblock_enforce_memory_limit(limit); - memblock_allow_resize(); + memblock_analyze(); memblock_dump_all(); DBG("Phys. mem: %llx\n", memblock_phys_mem_size()); diff --git a/trunk/arch/powerpc/kernel/prom_init.c b/trunk/arch/powerpc/kernel/prom_init.c index eca626ea3f23..cc584865b3df 100644 --- a/trunk/arch/powerpc/kernel/prom_init.c +++ b/trunk/arch/powerpc/kernel/prom_init.c @@ -742,7 +742,7 @@ static unsigned char ibm_architecture_vec[] = { W(0xffffffff), /* virt_base */ W(0xffffffff), /* virt_size */ W(0xffffffff), /* load_base */ - W(256), /* 256MB min RMA */ + W(64), /* 64MB min RMA */ W(0xffffffff), /* full client load */ 0, /* min RMA percentage of total RAM */ 48, /* max log_2(hash table size) */ @@ -1224,6 +1224,14 @@ static void __init prom_init_mem(void) RELOC(alloc_bottom) = PAGE_ALIGN((unsigned long)&RELOC(_end) + 0x4000); + /* Check if we have an initrd after the kernel, if we do move our bottom + * point to after it + */ + if (RELOC(prom_initrd_start)) { + if (RELOC(prom_initrd_end) > RELOC(alloc_bottom)) + RELOC(alloc_bottom) = PAGE_ALIGN(RELOC(prom_initrd_end)); + } + /* * If prom_memory_limit is set we reduce the upper limits *except* for * alloc_top_high. This must be the real top of RAM so we can put @@ -1261,15 +1269,6 @@ static void __init prom_init_mem(void) RELOC(alloc_top) = RELOC(rmo_top); RELOC(alloc_top_high) = RELOC(ram_top); - /* - * Check if we have an initrd after the kernel but still inside - * the RMO. If we do move our bottom point to after it. - */ - if (RELOC(prom_initrd_start) && - RELOC(prom_initrd_start) < RELOC(rmo_top) && - RELOC(prom_initrd_end) > RELOC(alloc_bottom)) - RELOC(alloc_bottom) = PAGE_ALIGN(RELOC(prom_initrd_end)); - prom_printf("memory layout at init:\n"); prom_printf(" memory_limit : %x (16 MB aligned)\n", RELOC(prom_memory_limit)); prom_printf(" alloc_bottom : %x\n", RELOC(alloc_bottom)); @@ -2080,7 +2079,7 @@ static void __init prom_check_displays(void) /* Setup a usable color table when the appropriate * method is available. Should update this to set-colors */ clut = RELOC(default_colors); - for (i = 0; i < 16; i++, clut += 3) + for (i = 0; i < 32; i++, clut += 3) if (prom_set_color(ih, i, clut[0], clut[1], clut[2]) != 0) break; @@ -2845,7 +2844,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, RELOC(of_platform) = prom_find_machine_type(); prom_printf("Detected machine type: %x\n", RELOC(of_platform)); -#ifndef CONFIG_NONSTATIC_KERNEL +#ifndef CONFIG_RELOCATABLE /* Bail if this is a kdump kernel. */ if (PHYSICAL_START > 0) prom_panic("Error: You can't boot a kdump kernel from OF!\n"); @@ -2970,11 +2969,9 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, /* * in case stdin is USB and still active on IBM machines... * Unfortunately quiesce crashes on some powermacs if we have - * closed stdin already (in particular the powerbook 101). It - * appears that the OPAL version of OFW doesn't like it either. + * closed stdin already (in particular the powerbook 101). */ - if (RELOC(of_platform) != PLATFORM_POWERMAC && - RELOC(of_platform) != PLATFORM_OPAL) + if (RELOC(of_platform) != PLATFORM_POWERMAC) prom_close_stdin(); /* @@ -2990,12 +2987,8 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, * is common to us and kexec */ hdr = RELOC(dt_header_start); - - /* Don't print anything after quiesce under OPAL, it crashes OFW */ - if (RELOC(of_platform) != PLATFORM_OPAL) { - prom_printf("returning from prom_init\n"); - prom_debug("->dt_header_start=0x%x\n", hdr); - } + prom_printf("returning from prom_init\n"); + prom_debug("->dt_header_start=0x%x\n", hdr); #ifdef CONFIG_PPC32 reloc_got2(-offset); diff --git a/trunk/arch/powerpc/kernel/reloc_32.S b/trunk/arch/powerpc/kernel/reloc_32.S deleted file mode 100644 index ef46ba6e094f..000000000000 --- a/trunk/arch/powerpc/kernel/reloc_32.S +++ /dev/null @@ -1,208 +0,0 @@ -/* - * Code to process dynamic relocations for PPC32. - * - * Copyrights (C) IBM Corporation, 2011. - * Author: Suzuki Poulose - * - * - Based on ppc64 code - reloc_64.S - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include - -/* Dynamic section table entry tags */ -DT_RELA = 7 /* Tag for Elf32_Rela section */ -DT_RELASZ = 8 /* Size of the Rela relocs */ -DT_RELAENT = 9 /* Size of one Rela reloc entry */ - -STN_UNDEF = 0 /* Undefined symbol index */ -STB_LOCAL = 0 /* Local binding for the symbol */ - -R_PPC_ADDR16_LO = 4 /* Lower half of (S+A) */ -R_PPC_ADDR16_HI = 5 /* Upper half of (S+A) */ -R_PPC_ADDR16_HA = 6 /* High Adjusted (S+A) */ -R_PPC_RELATIVE = 22 - -/* - * r3 = desired final address - */ - -_GLOBAL(relocate) - - mflr r0 /* Save our LR */ - bl 0f /* Find our current runtime address */ -0: mflr r12 /* Make it accessible */ - mtlr r0 - - lwz r11, (p_dyn - 0b)(r12) - add r11, r11, r12 /* runtime address of .dynamic section */ - lwz r9, (p_rela - 0b)(r12) - add r9, r9, r12 /* runtime address of .rela.dyn section */ - lwz r10, (p_st - 0b)(r12) - add r10, r10, r12 /* runtime address of _stext section */ - lwz r13, (p_sym - 0b)(r12) - add r13, r13, r12 /* runtime address of .dynsym section */ - - /* - * Scan the dynamic section for RELA, RELASZ entries - */ - li r6, 0 - li r7, 0 - li r8, 0 -1: lwz r5, 0(r11) /* ELF_Dyn.d_tag */ - cmpwi r5, 0 /* End of ELF_Dyn[] */ - beq eodyn - cmpwi r5, DT_RELA - bne relasz - lwz r7, 4(r11) /* r7 = rela.link */ - b skip -relasz: - cmpwi r5, DT_RELASZ - bne relaent - lwz r8, 4(r11) /* r8 = Total Rela relocs size */ - b skip -relaent: - cmpwi r5, DT_RELAENT - bne skip - lwz r6, 4(r11) /* r6 = Size of one Rela reloc */ -skip: - addi r11, r11, 8 - b 1b -eodyn: /* End of Dyn Table scan */ - - /* Check if we have found all the entries */ - cmpwi r7, 0 - beq done - cmpwi r8, 0 - beq done - cmpwi r6, 0 - beq done - - - /* - * Work out the current offset from the link time address of .rela - * section. - * cur_offset[r7] = rela.run[r9] - rela.link [r7] - * _stext.link[r12] = _stext.run[r10] - cur_offset[r7] - * final_offset[r3] = _stext.final[r3] - _stext.link[r12] - */ - subf r7, r7, r9 /* cur_offset */ - subf r12, r7, r10 - subf r3, r12, r3 /* final_offset */ - - subf r8, r6, r8 /* relaz -= relaent */ - /* - * Scan through the .rela table and process each entry - * r9 - points to the current .rela table entry - * r13 - points to the symbol table - */ - - /* - * Check if we have a relocation based on symbol - * r5 will hold the value of the symbol. - */ -applyrela: - lwz r4, 4(r9) /* r4 = rela.r_info */ - srwi r5, r4, 8 /* ELF32_R_SYM(r_info) */ - cmpwi r5, STN_UNDEF /* sym == STN_UNDEF ? */ - beq get_type /* value = 0 */ - /* Find the value of the symbol at index(r5) */ - slwi r5, r5, 4 /* r5 = r5 * sizeof(Elf32_Sym) */ - add r12, r13, r5 /* r12 = &__dyn_sym[Index] */ - - /* - * GNU ld has a bug, where dynamic relocs based on - * STB_LOCAL symbols, the value should be assumed - * to be zero. - Alan Modra - */ - /* XXX: Do we need to check if we are using GNU ld ? */ - lbz r5, 12(r12) /* r5 = dyn_sym[Index].st_info */ - extrwi r5, r5, 4, 24 /* r5 = ELF32_ST_BIND(r5) */ - cmpwi r5, STB_LOCAL /* st_value = 0, ld bug */ - beq get_type /* We have r5 = 0 */ - lwz r5, 4(r12) /* r5 = __dyn_sym[Index].st_value */ - -get_type: - /* Load the relocation type to r4 */ - extrwi r4, r4, 8, 24 /* r4 = ELF32_R_TYPE(r_info) = ((char*)r4)[3] */ - - /* R_PPC_RELATIVE */ - cmpwi r4, R_PPC_RELATIVE - bne hi16 - lwz r4, 0(r9) /* r_offset */ - lwz r0, 8(r9) /* r_addend */ - add r0, r0, r3 /* final addend */ - stwx r0, r4, r7 /* memory[r4+r7]) = (u32)r0 */ - b nxtrela /* continue */ - - /* R_PPC_ADDR16_HI */ -hi16: - cmpwi r4, R_PPC_ADDR16_HI - bne ha16 - lwz r4, 0(r9) /* r_offset */ - lwz r0, 8(r9) /* r_addend */ - add r0, r0, r3 - add r0, r0, r5 /* r0 = (S+A+Offset) */ - extrwi r0, r0, 16, 0 /* r0 = (r0 >> 16) */ - b store_half - - /* R_PPC_ADDR16_HA */ -ha16: - cmpwi r4, R_PPC_ADDR16_HA - bne lo16 - lwz r4, 0(r9) /* r_offset */ - lwz r0, 8(r9) /* r_addend */ - add r0, r0, r3 - add r0, r0, r5 /* r0 = (S+A+Offset) */ - extrwi r5, r0, 1, 16 /* Extract bit 16 */ - extrwi r0, r0, 16, 0 /* r0 = (r0 >> 16) */ - add r0, r0, r5 /* Add it to r0 */ - b store_half - - /* R_PPC_ADDR16_LO */ -lo16: - cmpwi r4, R_PPC_ADDR16_LO - bne nxtrela - lwz r4, 0(r9) /* r_offset */ - lwz r0, 8(r9) /* r_addend */ - add r0, r0, r3 - add r0, r0, r5 /* r0 = (S+A+Offset) */ - extrwi r0, r0, 16, 16 /* r0 &= 0xffff */ - /* Fall through to */ - - /* Store half word */ -store_half: - sthx r0, r4, r7 /* memory[r4+r7] = (u16)r0 */ - -nxtrela: - /* - * We have to flush the modified instructions to the - * main storage from the d-cache. And also, invalidate the - * cached instructions in i-cache which has been modified. - * - * We delay the sync / isync operation till the end, since - * we won't be executing the modified instructions until - * we return from here. - */ - dcbst r4,r7 - sync /* Ensure the data is flushed before icbi */ - icbi r4,r7 - cmpwi r8, 0 /* relasz = 0 ? */ - ble done - add r9, r9, r6 /* move to next entry in the .rela table */ - subf r8, r6, r8 /* relasz -= relaent */ - b applyrela - -done: - sync /* Wait for the flush to finish */ - isync /* Discard prefetched instructions */ - blr - -p_dyn: .long __dynamic_start - 0b -p_rela: .long __rela_dyn_start - 0b -p_sym: .long __dynamic_symtab - 0b -p_st: .long _stext - 0b diff --git a/trunk/arch/powerpc/kernel/rtas_flash.c b/trunk/arch/powerpc/kernel/rtas_flash.c index 4174b4b23246..e037c7494fd8 100644 --- a/trunk/arch/powerpc/kernel/rtas_flash.c +++ b/trunk/arch/powerpc/kernel/rtas_flash.c @@ -567,12 +567,6 @@ static void rtas_flash_firmware(int reboot_type) return; } - /* - * Just before starting the firmware flash, cancel the event scan work - * to avoid any soft lockup issues. - */ - rtas_cancel_event_scan(); - /* * NOTE: the "first" block must be under 4GB, so we create * an entry with no data blocks in the reserved buffer in diff --git a/trunk/arch/powerpc/kernel/rtasd.c b/trunk/arch/powerpc/kernel/rtasd.c index 1045ff49cc6d..481ef064c8f1 100644 --- a/trunk/arch/powerpc/kernel/rtasd.c +++ b/trunk/arch/powerpc/kernel/rtasd.c @@ -472,13 +472,6 @@ static void start_event_scan(void) &event_scan_work, event_scan_delay); } -/* Cancel the rtas event scan work */ -void rtas_cancel_event_scan(void) -{ - cancel_delayed_work_sync(&event_scan_work); -} -EXPORT_SYMBOL_GPL(rtas_cancel_event_scan); - static int __init rtas_init(void) { struct proc_dir_entry *entry; diff --git a/trunk/arch/powerpc/kernel/setup_64.c b/trunk/arch/powerpc/kernel/setup_64.c index 4cb8f1e9d044..fb9bb46e7e88 100644 --- a/trunk/arch/powerpc/kernel/setup_64.c +++ b/trunk/arch/powerpc/kernel/setup_64.c @@ -35,8 +35,6 @@ #include #include #include -#include - #include #include #include @@ -66,7 +64,6 @@ #include #include #include -#include #include "setup.h" @@ -220,13 +217,6 @@ void __init early_setup(unsigned long dt_ptr) /* Initialize the hash table or TLB handling */ early_init_mmu(); - /* - * Reserve any gigantic pages requested on the command line. - * memblock needs to have been initialized by the time this is - * called since this will reserve memory. - */ - reserve_hugetlb_gpages(); - DBG(" <- early_setup()\n"); } diff --git a/trunk/arch/powerpc/kernel/smp.c b/trunk/arch/powerpc/kernel/smp.c index f0abe92f63f2..6df70907d60a 100644 --- a/trunk/arch/powerpc/kernel/smp.c +++ b/trunk/arch/powerpc/kernel/smp.c @@ -187,8 +187,7 @@ int smp_request_message_ipi(int virq, int msg) return 1; } #endif - err = request_irq(virq, smp_ipi_action[msg], - IRQF_PERCPU | IRQF_NO_THREAD, + err = request_irq(virq, smp_ipi_action[msg], IRQF_PERCPU, smp_ipi_name[msg], 0); WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n", virq, smp_ipi_name[msg], err); diff --git a/trunk/arch/powerpc/kernel/sysfs.c b/trunk/arch/powerpc/kernel/sysfs.c index 6fdf5ffe8c44..ce035c1905f0 100644 --- a/trunk/arch/powerpc/kernel/sysfs.c +++ b/trunk/arch/powerpc/kernel/sysfs.c @@ -18,7 +18,6 @@ #include #include #include -#include #include "cacheinfo.h" @@ -52,7 +51,6 @@ static ssize_t store_smt_snooze_delay(struct sys_device *dev, return -EINVAL; per_cpu(smt_snooze_delay, cpu->sysdev.id) = snooze; - update_smt_snooze_delay(snooze); return count; } @@ -179,13 +177,11 @@ SYSFS_PMCSETUP(mmcra, SPRN_MMCRA); SYSFS_PMCSETUP(purr, SPRN_PURR); SYSFS_PMCSETUP(spurr, SPRN_SPURR); SYSFS_PMCSETUP(dscr, SPRN_DSCR); -SYSFS_PMCSETUP(pir, SPRN_PIR); static SYSDEV_ATTR(mmcra, 0600, show_mmcra, store_mmcra); static SYSDEV_ATTR(spurr, 0600, show_spurr, NULL); static SYSDEV_ATTR(dscr, 0600, show_dscr, store_dscr); static SYSDEV_ATTR(purr, 0600, show_purr, store_purr); -static SYSDEV_ATTR(pir, 0400, show_pir, NULL); unsigned long dscr_default = 0; EXPORT_SYMBOL(dscr_default); @@ -396,9 +392,6 @@ static void __cpuinit register_cpu_online(unsigned int cpu) if (cpu_has_feature(CPU_FTR_DSCR)) sysdev_create_file(s, &attr_dscr); - - if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2)) - sysdev_create_file(s, &attr_pir); #endif /* CONFIG_PPC64 */ cacheinfo_cpu_online(cpu); @@ -469,9 +462,6 @@ static void unregister_cpu_online(unsigned int cpu) if (cpu_has_feature(CPU_FTR_DSCR)) sysdev_remove_file(s, &attr_dscr); - - if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2)) - sysdev_remove_file(s, &attr_pir); #endif /* CONFIG_PPC64 */ cacheinfo_cpu_offline(cpu); diff --git a/trunk/arch/powerpc/kernel/time.c b/trunk/arch/powerpc/kernel/time.c index 567dd7c3ac2a..522bb1dfc353 100644 --- a/trunk/arch/powerpc/kernel/time.c +++ b/trunk/arch/powerpc/kernel/time.c @@ -86,6 +86,8 @@ static struct clocksource clocksource_rtc = { .rating = 400, .flags = CLOCK_SOURCE_IS_CONTINUOUS, .mask = CLOCKSOURCE_MASK(64), + .shift = 22, + .mult = 0, /* To be filled in */ .read = rtc_read, }; @@ -95,6 +97,8 @@ static struct clocksource clocksource_timebase = { .rating = 400, .flags = CLOCK_SOURCE_IS_CONTINUOUS, .mask = CLOCKSOURCE_MASK(64), + .shift = 22, + .mult = 0, /* To be filled in */ .read = timebase_read, }; @@ -106,16 +110,22 @@ static void decrementer_set_mode(enum clock_event_mode mode, struct clock_event_device *dev); static struct clock_event_device decrementer_clockevent = { - .name = "decrementer", - .rating = 200, - .irq = 0, - .set_next_event = decrementer_set_next_event, - .set_mode = decrementer_set_mode, - .features = CLOCK_EVT_FEAT_ONESHOT, + .name = "decrementer", + .rating = 200, + .shift = 0, /* To be filled in */ + .mult = 0, /* To be filled in */ + .irq = 0, + .set_next_event = decrementer_set_next_event, + .set_mode = decrementer_set_mode, + .features = CLOCK_EVT_FEAT_ONESHOT, }; -DEFINE_PER_CPU(u64, decrementers_next_tb); -static DEFINE_PER_CPU(struct clock_event_device, decrementers); +struct decrementer_clock { + struct clock_event_device event; + u64 next_tb; +}; + +static DEFINE_PER_CPU(struct decrementer_clock, decrementers); #ifdef CONFIG_PPC_ISERIES static unsigned long __initdata iSeries_recal_titan; @@ -158,13 +168,13 @@ EXPORT_SYMBOL_GPL(ppc_tb_freq); #ifdef CONFIG_VIRT_CPU_ACCOUNTING /* * Factors for converting from cputime_t (timebase ticks) to - * jiffies, microseconds, seconds, and clock_t (1/USER_HZ seconds). + * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds). * These are all stored as 0.64 fixed-point binary fractions. */ u64 __cputime_jiffies_factor; EXPORT_SYMBOL(__cputime_jiffies_factor); -u64 __cputime_usec_factor; -EXPORT_SYMBOL(__cputime_usec_factor); +u64 __cputime_msec_factor; +EXPORT_SYMBOL(__cputime_msec_factor); u64 __cputime_sec_factor; EXPORT_SYMBOL(__cputime_sec_factor); u64 __cputime_clockt_factor; @@ -182,8 +192,8 @@ static void calc_cputime_factors(void) div128_by_32(HZ, 0, tb_ticks_per_sec, &res); __cputime_jiffies_factor = res.result_low; - div128_by_32(1000000, 0, tb_ticks_per_sec, &res); - __cputime_usec_factor = res.result_low; + div128_by_32(1000, 0, tb_ticks_per_sec, &res); + __cputime_msec_factor = res.result_low; div128_by_32(1, 0, tb_ticks_per_sec, &res); __cputime_sec_factor = res.result_low; div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res); @@ -431,7 +441,7 @@ EXPORT_SYMBOL(profile_pc); /* * This function recalibrates the timebase based on the 49-bit time-of-day * value in the Titan chip. The Titan is much more accurate than the value - * returned by the service processor for the timebase frequency. + * returned by the service processor for the timebase frequency. */ static int __init iSeries_tb_recal(void) @@ -566,8 +576,9 @@ void arch_irq_work_raise(void) void timer_interrupt(struct pt_regs * regs) { struct pt_regs *old_regs; - u64 *next_tb = &__get_cpu_var(decrementers_next_tb); - struct clock_event_device *evt = &__get_cpu_var(decrementers); + struct decrementer_clock *decrementer = &__get_cpu_var(decrementers); + struct clock_event_device *evt = &decrementer->event; + u64 now; /* Ensure a positive value is written to the decrementer, or else * some CPUs will continue to take decrementer exceptions. @@ -602,9 +613,16 @@ void timer_interrupt(struct pt_regs * regs) get_lppaca()->int_dword.fields.decr_int = 0; #endif - *next_tb = ~(u64)0; - if (evt->event_handler) - evt->event_handler(evt); + now = get_tb_or_rtc(); + if (now >= decrementer->next_tb) { + decrementer->next_tb = ~(u64)0; + if (evt->event_handler) + evt->event_handler(evt); + } else { + now = decrementer->next_tb - now; + if (now <= DECREMENTER_MAX) + set_dec((int)now); + } #ifdef CONFIG_PPC_ISERIES if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending()) @@ -632,9 +650,9 @@ static void generic_suspend_disable_irqs(void) * with suspending. */ - set_dec(DECREMENTER_MAX); + set_dec(0x7fffffff); local_irq_disable(); - set_dec(DECREMENTER_MAX); + set_dec(0x7fffffff); } static void generic_suspend_enable_irqs(void) @@ -806,8 +824,9 @@ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm, ++vdso_data->tb_update_count; smp_mb(); - /* 19342813113834067 ~= 2^(20+64) / 1e9 */ - new_tb_to_xs = (u64) mult * (19342813113834067ULL >> clock->shift); + /* XXX this assumes clock->shift == 22 */ + /* 4611686018 ~= 2^(20+64-22) / 1e9 */ + new_tb_to_xs = (u64) mult * 4611686018ULL; new_stamp_xsec = (u64) wall_time->tv_nsec * XSEC_PER_SEC; do_div(new_stamp_xsec, 1000000000); new_stamp_xsec += (u64) wall_time->tv_sec * XSEC_PER_SEC; @@ -858,7 +877,9 @@ static void __init clocksource_init(void) else clock = &clocksource_timebase; - if (clocksource_register_hz(clock, tb_ticks_per_sec)) { + clock->mult = clocksource_hz2mult(tb_ticks_per_sec, clock->shift); + + if (clocksource_register(clock)) { printk(KERN_ERR "clocksource: %s is already registered\n", clock->name); return; @@ -871,7 +892,7 @@ static void __init clocksource_init(void) static int decrementer_set_next_event(unsigned long evt, struct clock_event_device *dev) { - __get_cpu_var(decrementers_next_tb) = get_tb_or_rtc() + evt; + __get_cpu_var(decrementers).next_tb = get_tb_or_rtc() + evt; set_dec(evt); return 0; } @@ -883,9 +904,34 @@ static void decrementer_set_mode(enum clock_event_mode mode, decrementer_set_next_event(DECREMENTER_MAX, dev); } +static inline uint64_t div_sc64(unsigned long ticks, unsigned long nsec, + int shift) +{ + uint64_t tmp = ((uint64_t)ticks) << shift; + + do_div(tmp, nsec); + return tmp; +} + +static void __init setup_clockevent_multiplier(unsigned long hz) +{ + u64 mult, shift = 32; + + while (1) { + mult = div_sc64(hz, NSEC_PER_SEC, shift); + if (mult && (mult >> 32UL) == 0UL) + break; + + shift--; + } + + decrementer_clockevent.shift = shift; + decrementer_clockevent.mult = mult; +} + static void register_decrementer_clockevent(int cpu) { - struct clock_event_device *dec = &per_cpu(decrementers, cpu); + struct clock_event_device *dec = &per_cpu(decrementers, cpu).event; *dec = decrementer_clockevent; dec->cpumask = cpumask_of(cpu); @@ -900,8 +946,7 @@ static void __init init_decrementer_clockevent(void) { int cpu = smp_processor_id(); - clockevents_calc_mult_shift(&decrementer_clockevent, ppc_tb_freq, 4); - + setup_clockevent_multiplier(ppc_tb_freq); decrementer_clockevent.max_delta_ns = clockevent_delta2ns(DECREMENTER_MAX, &decrementer_clockevent); decrementer_clockevent.min_delta_ns = @@ -969,10 +1014,10 @@ void __init time_init(void) boot_tb = get_tb_or_rtc(); /* If platform provided a timezone (pmac), we correct the time */ - if (timezone_offset) { + if (timezone_offset) { sys_tz.tz_minuteswest = -timezone_offset / 60; sys_tz.tz_dsttime = 0; - } + } vdso_data->tb_update_count = 0; vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; diff --git a/trunk/arch/powerpc/kernel/traps.c b/trunk/arch/powerpc/kernel/traps.c index c091527efd89..5459d148a0f6 100644 --- a/trunk/arch/powerpc/kernel/traps.c +++ b/trunk/arch/powerpc/kernel/traps.c @@ -98,14 +98,18 @@ static void pmac_backlight_unblank(void) static inline void pmac_backlight_unblank(void) { } #endif -static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; -static int die_owner = -1; -static unsigned int die_nest_count; -static int die_counter; - -static unsigned __kprobes long oops_begin(struct pt_regs *regs) +int die(const char *str, struct pt_regs *regs, long err) { - int cpu; + static struct { + raw_spinlock_t lock; + u32 lock_owner; + int lock_owner_depth; + } die = { + .lock = __RAW_SPIN_LOCK_UNLOCKED(die.lock), + .lock_owner = -1, + .lock_owner_depth = 0 + }; + static int die_counter; unsigned long flags; if (debugger(regs)) @@ -113,107 +117,64 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs) oops_enter(); - /* racy, but better than risking deadlock. */ - raw_local_irq_save(flags); - cpu = smp_processor_id(); - if (!arch_spin_trylock(&die_lock)) { - if (cpu == die_owner) - /* nested oops. should stop eventually */; - else - arch_spin_lock(&die_lock); - } - die_nest_count++; - die_owner = cpu; - console_verbose(); - bust_spinlocks(1); - if (machine_is(powermac)) - pmac_backlight_unblank(); - return flags; -} - -static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, - int signr) -{ - bust_spinlocks(0); - die_owner = -1; - add_taint(TAINT_DIE); - die_nest_count--; - oops_exit(); - printk("\n"); - if (!die_nest_count) - /* Nest count reaches zero, release the lock. */ - arch_spin_unlock(&die_lock); - raw_local_irq_restore(flags); - - /* - * A system reset (0x100) is a request to dump, so we always send - * it through the crashdump code. - */ - if (kexec_should_crash(current) || (TRAP(regs) == 0x100)) { - crash_kexec(regs); - - /* - * We aren't the primary crash CPU. We need to send it - * to a holding pattern to avoid it ending up in the panic - * code. - */ - crash_kexec_secondary(regs); - } - - if (!signr) - return; - - /* - * While our oops output is serialised by a spinlock, output - * from panic() called below can race and corrupt it. If we - * know we are going to panic, delay for 1 second so we have a - * chance to get clean backtraces from all CPUs that are oopsing. - */ - if (in_interrupt() || panic_on_oops || !current->pid || - is_global_init(current)) { - mdelay(MSEC_PER_SEC); + if (die.lock_owner != raw_smp_processor_id()) { + console_verbose(); + raw_spin_lock_irqsave(&die.lock, flags); + die.lock_owner = smp_processor_id(); + die.lock_owner_depth = 0; + bust_spinlocks(1); + if (machine_is(powermac)) + pmac_backlight_unblank(); + } else { + local_save_flags(flags); } - if (in_interrupt()) - panic("Fatal exception in interrupt"); - if (panic_on_oops) - panic("Fatal exception"); - do_exit(signr); -} - -static int __kprobes __die(const char *str, struct pt_regs *regs, long err) -{ - printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); + if (++die.lock_owner_depth < 3) { + printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); #ifdef CONFIG_PREEMPT - printk("PREEMPT "); + printk("PREEMPT "); #endif #ifdef CONFIG_SMP - printk("SMP NR_CPUS=%d ", NR_CPUS); + printk("SMP NR_CPUS=%d ", NR_CPUS); #endif #ifdef CONFIG_DEBUG_PAGEALLOC - printk("DEBUG_PAGEALLOC "); + printk("DEBUG_PAGEALLOC "); #endif #ifdef CONFIG_NUMA - printk("NUMA "); + printk("NUMA "); #endif - printk("%s\n", ppc_md.name ? ppc_md.name : ""); + printk("%s\n", ppc_md.name ? ppc_md.name : ""); - if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP) - return 1; + if (notify_die(DIE_OOPS, str, regs, err, 255, + SIGSEGV) == NOTIFY_STOP) + return 1; - print_modules(); - show_regs(regs); + print_modules(); + show_regs(regs); + } else { + printk("Recursive die() failure, output suppressed\n"); + } - return 0; -} + bust_spinlocks(0); + die.lock_owner = -1; + add_taint(TAINT_DIE); + raw_spin_unlock_irqrestore(&die.lock, flags); -void die(const char *str, struct pt_regs *regs, long err) -{ - unsigned long flags = oops_begin(regs); + if (kexec_should_crash(current) || + kexec_sr_activated(smp_processor_id())) + crash_kexec(regs); + crash_kexec_secondary(regs); - if (__die(str, regs, err)) - err = 0; - oops_end(flags, regs, err); + if (in_interrupt()) + panic("Fatal exception in interrupt"); + + if (panic_on_oops) + panic("Fatal exception"); + + oops_exit(); + do_exit(err); + + return 0; } void user_single_step_siginfo(struct task_struct *tsk, @@ -234,11 +195,10 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) "at %016lx nip %016lx lr %016lx code %x\n"; if (!user_mode(regs)) { - die("Exception in kernel mode", regs, signr); - return; - } - - if (show_unhandled_signals && unhandled_signal(current, signr)) { + if (die("Exception in kernel mode", regs, signr)) + return; + } else if (show_unhandled_signals && + unhandled_signal(current, signr)) { printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, current->comm, current->pid, signr, addr, regs->nip, regs->link, code); @@ -260,8 +220,25 @@ void system_reset_exception(struct pt_regs *regs) return; } +#ifdef CONFIG_KEXEC + cpumask_set_cpu(smp_processor_id(), &cpus_in_sr); +#endif + die("System Reset", regs, SIGABRT); + /* + * Some CPUs when released from the debugger will execute this path. + * These CPUs entered the debugger via a soft-reset. If the CPU was + * hung before entering the debugger it will return to the hung + * state when exiting this function. This causes a problem in + * kdump since the hung CPU(s) will not respond to the IPI sent + * from kdump. To prevent the problem we call crash_kexec_secondary() + * here. If a kdump had not been initiated or we exit the debugger + * with the "exit and recover" command (x) crash_kexec_secondary() + * will return after 5ms and the CPU returns to its previous state. + */ + crash_kexec_secondary(regs); + /* Must die if the interrupt is not recoverable */ if (!(regs->msr & MSR_RI)) panic("Unrecoverable System Reset"); diff --git a/trunk/arch/powerpc/kernel/vmlinux.lds.S b/trunk/arch/powerpc/kernel/vmlinux.lds.S index 710a54005dfb..920276c0f6a1 100644 --- a/trunk/arch/powerpc/kernel/vmlinux.lds.S +++ b/trunk/arch/powerpc/kernel/vmlinux.lds.S @@ -170,13 +170,7 @@ SECTIONS } #ifdef CONFIG_RELOCATABLE . = ALIGN(8); - .dynsym : AT(ADDR(.dynsym) - LOAD_OFFSET) - { -#ifdef CONFIG_RELOCATABLE_PPC32 - __dynamic_symtab = .; -#endif - *(.dynsym) - } + .dynsym : AT(ADDR(.dynsym) - LOAD_OFFSET) { *(.dynsym) } .dynstr : AT(ADDR(.dynstr) - LOAD_OFFSET) { *(.dynstr) } .dynamic : AT(ADDR(.dynamic) - LOAD_OFFSET) { diff --git a/trunk/arch/powerpc/kvm/book3s_hv.c b/trunk/arch/powerpc/kvm/book3s_hv.c index 336983da9e72..0cb137a9b038 100644 --- a/trunk/arch/powerpc/kvm/book3s_hv.c +++ b/trunk/arch/powerpc/kvm/book3s_hv.c @@ -538,7 +538,7 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu) tpaca->kvm_hstate.napping = 0; vcpu->cpu = vc->pcpu; smp_wmb(); -#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP) +#ifdef CONFIG_PPC_ICP_NATIVE if (vcpu->arch.ptid) { tpaca->cpu_start = 0x80; wmb(); diff --git a/trunk/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/trunk/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 5c8b26183f50..44d8829334ab 100644 --- a/trunk/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/trunk/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -112,9 +112,6 @@ kvm_start_guest: stbcix r0, r5, r6 /* clear it */ stwcix r8, r5, r7 /* EOI it */ - /* NV GPR values from power7_idle() will no longer be valid */ - stb r0, PACA_NAPSTATELOST(r13) - .global kvmppc_hv_entry kvmppc_hv_entry: diff --git a/trunk/arch/powerpc/kvm/book3s_pr.c b/trunk/arch/powerpc/kvm/book3s_pr.c index e2cfb9e1e20e..3c791e1eb675 100644 --- a/trunk/arch/powerpc/kvm/book3s_pr.c +++ b/trunk/arch/powerpc/kvm/book3s_pr.c @@ -658,12 +658,10 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ulong cmd = kvmppc_get_gpr(vcpu, 3); int i; -#ifdef CONFIG_KVM_BOOK3S_64_PR if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) { r = RESUME_GUEST; break; } -#endif run->papr_hcall.nr = cmd; for (i = 0; i < 9; ++i) { diff --git a/trunk/arch/powerpc/kvm/e500.c b/trunk/arch/powerpc/kvm/e500.c index 8c0d45a6faf7..26d20903f2bc 100644 --- a/trunk/arch/powerpc/kvm/e500.c +++ b/trunk/arch/powerpc/kvm/e500.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include diff --git a/trunk/arch/powerpc/lib/Makefile b/trunk/arch/powerpc/lib/Makefile index 7735a2c2e6d9..166a6a0ad544 100644 --- a/trunk/arch/powerpc/lib/Makefile +++ b/trunk/arch/powerpc/lib/Makefile @@ -16,15 +16,13 @@ obj-$(CONFIG_HAS_IOMEM) += devres.o obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \ memcpy_64.o usercopy_64.o mem_64.o string.o \ - checksum_wrappers_64.o hweight_64.o \ - copyuser_power7.o + checksum_wrappers_64.o hweight_64.o obj-$(CONFIG_XMON) += sstep.o ldstfp.o obj-$(CONFIG_KPROBES) += sstep.o ldstfp.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += sstep.o ldstfp.o ifeq ($(CONFIG_PPC64),y) obj-$(CONFIG_SMP) += locks.o -obj-$(CONFIG_ALTIVEC) += copyuser_power7_vmx.o endif obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o diff --git a/trunk/arch/powerpc/lib/copyuser_64.S b/trunk/arch/powerpc/lib/copyuser_64.S index 773d38f90aaa..578b625d6a3c 100644 --- a/trunk/arch/powerpc/lib/copyuser_64.S +++ b/trunk/arch/powerpc/lib/copyuser_64.S @@ -11,12 +11,6 @@ .align 7 _GLOBAL(__copy_tofrom_user) -BEGIN_FTR_SECTION - nop -FTR_SECTION_ELSE - b __copy_tofrom_user_power7 -ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY) -_GLOBAL(__copy_tofrom_user_base) /* first check for a whole page copy on a page boundary */ cmpldi cr1,r5,16 cmpdi cr6,r5,4096 diff --git a/trunk/arch/powerpc/lib/copyuser_power7.S b/trunk/arch/powerpc/lib/copyuser_power7.S deleted file mode 100644 index 497db7b23bb1..000000000000 --- a/trunk/arch/powerpc/lib/copyuser_power7.S +++ /dev/null @@ -1,683 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright (C) IBM Corporation, 2011 - * - * Author: Anton Blanchard - */ -#include - -#define STACKFRAMESIZE 256 -#define STK_REG(i) (112 + ((i)-14)*8) - - .macro err1 -100: - .section __ex_table,"a" - .align 3 - .llong 100b,.Ldo_err1 - .previous - .endm - - .macro err2 -200: - .section __ex_table,"a" - .align 3 - .llong 200b,.Ldo_err2 - .previous - .endm - -#ifdef CONFIG_ALTIVEC - .macro err3 -300: - .section __ex_table,"a" - .align 3 - .llong 300b,.Ldo_err3 - .previous - .endm - - .macro err4 -400: - .section __ex_table,"a" - .align 3 - .llong 400b,.Ldo_err4 - .previous - .endm - - -.Ldo_err4: - ld r16,STK_REG(r16)(r1) - ld r15,STK_REG(r15)(r1) - ld r14,STK_REG(r14)(r1) -.Ldo_err3: - bl .exit_vmx_copy - ld r0,STACKFRAMESIZE+16(r1) - mtlr r0 - b .Lexit -#endif /* CONFIG_ALTIVEC */ - -.Ldo_err2: - ld r22,STK_REG(r22)(r1) - ld r21,STK_REG(r21)(r1) - ld r20,STK_REG(r20)(r1) - ld r19,STK_REG(r19)(r1) - ld r18,STK_REG(r18)(r1) - ld r17,STK_REG(r17)(r1) - ld r16,STK_REG(r16)(r1) - ld r15,STK_REG(r15)(r1) - ld r14,STK_REG(r14)(r1) -.Lexit: - addi r1,r1,STACKFRAMESIZE -.Ldo_err1: - ld r3,48(r1) - ld r4,56(r1) - ld r5,64(r1) - b __copy_tofrom_user_base - - -_GLOBAL(__copy_tofrom_user_power7) -#ifdef CONFIG_ALTIVEC - cmpldi r5,16 - cmpldi cr1,r5,4096 - - std r3,48(r1) - std r4,56(r1) - std r5,64(r1) - - blt .Lshort_copy - bgt cr1,.Lvmx_copy -#else - cmpldi r5,16 - - std r3,48(r1) - std r4,56(r1) - std r5,64(r1) - - blt .Lshort_copy -#endif - -.Lnonvmx_copy: - /* Get the source 8B aligned */ - neg r6,r4 - mtocrf 0x01,r6 - clrldi r6,r6,(64-3) - - bf cr7*4+3,1f -err1; lbz r0,0(r4) - addi r4,r4,1 -err1; stb r0,0(r3) - addi r3,r3,1 - -1: bf cr7*4+2,2f -err1; lhz r0,0(r4) - addi r4,r4,2 -err1; sth r0,0(r3) - addi r3,r3,2 - -2: bf cr7*4+1,3f -err1; lwz r0,0(r4) - addi r4,r4,4 -err1; stw r0,0(r3) - addi r3,r3,4 - -3: sub r5,r5,r6 - cmpldi r5,128 - blt 5f - - mflr r0 - stdu r1,-STACKFRAMESIZE(r1) - std r14,STK_REG(r14)(r1) - std r15,STK_REG(r15)(r1) - std r16,STK_REG(r16)(r1) - std r17,STK_REG(r17)(r1) - std r18,STK_REG(r18)(r1) - std r19,STK_REG(r19)(r1) - std r20,STK_REG(r20)(r1) - std r21,STK_REG(r21)(r1) - std r22,STK_REG(r22)(r1) - std r0,STACKFRAMESIZE+16(r1) - - srdi r6,r5,7 - mtctr r6 - - /* Now do cacheline (128B) sized loads and stores. */ - .align 5 -4: -err2; ld r0,0(r4) -err2; ld r6,8(r4) -err2; ld r7,16(r4) -err2; ld r8,24(r4) -err2; ld r9,32(r4) -err2; ld r10,40(r4) -err2; ld r11,48(r4) -err2; ld r12,56(r4) -err2; ld r14,64(r4) -err2; ld r15,72(r4) -err2; ld r16,80(r4) -err2; ld r17,88(r4) -err2; ld r18,96(r4) -err2; ld r19,104(r4) -err2; ld r20,112(r4) -err2; ld r21,120(r4) - addi r4,r4,128 -err2; std r0,0(r3) -err2; std r6,8(r3) -err2; std r7,16(r3) -err2; std r8,24(r3) -err2; std r9,32(r3) -err2; std r10,40(r3) -err2; std r11,48(r3) -err2; std r12,56(r3) -err2; std r14,64(r3) -err2; std r15,72(r3) -err2; std r16,80(r3) -err2; std r17,88(r3) -err2; std r18,96(r3) -err2; std r19,104(r3) -err2; std r20,112(r3) -err2; std r21,120(r3) - addi r3,r3,128 - bdnz 4b - - clrldi r5,r5,(64-7) - - ld r14,STK_REG(r14)(r1) - ld r15,STK_REG(r15)(r1) - ld r16,STK_REG(r16)(r1) - ld r17,STK_REG(r17)(r1) - ld r18,STK_REG(r18)(r1) - ld r19,STK_REG(r19)(r1) - ld r20,STK_REG(r20)(r1) - ld r21,STK_REG(r21)(r1) - ld r22,STK_REG(r22)(r1) - addi r1,r1,STACKFRAMESIZE - - /* Up to 127B to go */ -5: srdi r6,r5,4 - mtocrf 0x01,r6 - -6: bf cr7*4+1,7f -err1; ld r0,0(r4) -err1; ld r6,8(r4) -err1; ld r7,16(r4) -err1; ld r8,24(r4) -err1; ld r9,32(r4) -err1; ld r10,40(r4) -err1; ld r11,48(r4) -err1; ld r12,56(r4) - addi r4,r4,64 -err1; std r0,0(r3) -err1; std r6,8(r3) -err1; std r7,16(r3) -err1; std r8,24(r3) -err1; std r9,32(r3) -err1; std r10,40(r3) -err1; std r11,48(r3) -err1; std r12,56(r3) - addi r3,r3,64 - - /* Up to 63B to go */ -7: bf cr7*4+2,8f -err1; ld r0,0(r4) -err1; ld r6,8(r4) -err1; ld r7,16(r4) -err1; ld r8,24(r4) - addi r4,r4,32 -err1; std r0,0(r3) -err1; std r6,8(r3) -err1; std r7,16(r3) -err1; std r8,24(r3) - addi r3,r3,32 - - /* Up to 31B to go */ -8: bf cr7*4+3,9f -err1; ld r0,0(r4) -err1; ld r6,8(r4) - addi r4,r4,16 -err1; std r0,0(r3) -err1; std r6,8(r3) - addi r3,r3,16 - -9: clrldi r5,r5,(64-4) - - /* Up to 15B to go */ -.Lshort_copy: - mtocrf 0x01,r5 - bf cr7*4+0,12f -err1; lwz r0,0(r4) /* Less chance of a reject with word ops */ -err1; lwz r6,4(r4) - addi r4,r4,8 -err1; stw r0,0(r3) -err1; stw r6,4(r3) - addi r3,r3,8 - -12: bf cr7*4+1,13f -err1; lwz r0,0(r4) - addi r4,r4,4 -err1; stw r0,0(r3) - addi r3,r3,4 - -13: bf cr7*4+2,14f -err1; lhz r0,0(r4) - addi r4,r4,2 -err1; sth r0,0(r3) - addi r3,r3,2 - -14: bf cr7*4+3,15f -err1; lbz r0,0(r4) -err1; stb r0,0(r3) - -15: li r3,0 - blr - -.Lunwind_stack_nonvmx_copy: - addi r1,r1,STACKFRAMESIZE - b .Lnonvmx_copy - -#ifdef CONFIG_ALTIVEC -.Lvmx_copy: - mflr r0 - std r0,16(r1) - stdu r1,-STACKFRAMESIZE(r1) - bl .enter_vmx_copy - cmpwi r3,0 - ld r0,STACKFRAMESIZE+16(r1) - ld r3,STACKFRAMESIZE+48(r1) - ld r4,STACKFRAMESIZE+56(r1) - ld r5,STACKFRAMESIZE+64(r1) - mtlr r0 - - beq .Lunwind_stack_nonvmx_copy - - /* - * If source and destination are not relatively aligned we use a - * slower permute loop. - */ - xor r6,r4,r3 - rldicl. r6,r6,0,(64-4) - bne .Lvmx_unaligned_copy - - /* Get the destination 16B aligned */ - neg r6,r3 - mtocrf 0x01,r6 - clrldi r6,r6,(64-4) - - bf cr7*4+3,1f -err3; lbz r0,0(r4) - addi r4,r4,1 -err3; stb r0,0(r3) - addi r3,r3,1 - -1: bf cr7*4+2,2f -err3; lhz r0,0(r4) - addi r4,r4,2 -err3; sth r0,0(r3) - addi r3,r3,2 - -2: bf cr7*4+1,3f -err3; lwz r0,0(r4) - addi r4,r4,4 -err3; stw r0,0(r3) - addi r3,r3,4 - -3: bf cr7*4+0,4f -err3; ld r0,0(r4) - addi r4,r4,8 -err3; std r0,0(r3) - addi r3,r3,8 - -4: sub r5,r5,r6 - - /* Get the desination 128B aligned */ - neg r6,r3 - srdi r7,r6,4 - mtocrf 0x01,r7 - clrldi r6,r6,(64-7) - - li r9,16 - li r10,32 - li r11,48 - - bf cr7*4+3,5f -err3; lvx vr1,r0,r4 - addi r4,r4,16 -err3; stvx vr1,r0,r3 - addi r3,r3,16 - -5: bf cr7*4+2,6f -err3; lvx vr1,r0,r4 -err3; lvx vr0,r4,r9 - addi r4,r4,32 -err3; stvx vr1,r0,r3 -err3; stvx vr0,r3,r9 - addi r3,r3,32 - -6: bf cr7*4+1,7f -err3; lvx vr3,r0,r4 -err3; lvx vr2,r4,r9 -err3; lvx vr1,r4,r10 -err3; lvx vr0,r4,r11 - addi r4,r4,64 -err3; stvx vr3,r0,r3 -err3; stvx vr2,r3,r9 -err3; stvx vr1,r3,r10 -err3; stvx vr0,r3,r11 - addi r3,r3,64 - -7: sub r5,r5,r6 - srdi r6,r5,7 - - std r14,STK_REG(r14)(r1) - std r15,STK_REG(r15)(r1) - std r16,STK_REG(r16)(r1) - - li r12,64 - li r14,80 - li r15,96 - li r16,112 - - mtctr r6 - - /* - * Now do cacheline sized loads and stores. By this stage the - * cacheline stores are also cacheline aligned. - */ - .align 5 -8: -err4; lvx vr7,r0,r4 -err4; lvx vr6,r4,r9 -err4; lvx vr5,r4,r10 -err4; lvx vr4,r4,r11 -err4; lvx vr3,r4,r12 -err4; lvx vr2,r4,r14 -err4; lvx vr1,r4,r15 -err4; lvx vr0,r4,r16 - addi r4,r4,128 -err4; stvx vr7,r0,r3 -err4; stvx vr6,r3,r9 -err4; stvx vr5,r3,r10 -err4; stvx vr4,r3,r11 -err4; stvx vr3,r3,r12 -err4; stvx vr2,r3,r14 -err4; stvx vr1,r3,r15 -err4; stvx vr0,r3,r16 - addi r3,r3,128 - bdnz 8b - - ld r14,STK_REG(r14)(r1) - ld r15,STK_REG(r15)(r1) - ld r16,STK_REG(r16)(r1) - - /* Up to 127B to go */ - clrldi r5,r5,(64-7) - srdi r6,r5,4 - mtocrf 0x01,r6 - - bf cr7*4+1,9f -err3; lvx vr3,r0,r4 -err3; lvx vr2,r4,r9 -err3; lvx vr1,r4,r10 -err3; lvx vr0,r4,r11 - addi r4,r4,64 -err3; stvx vr3,r0,r3 -err3; stvx vr2,r3,r9 -err3; stvx vr1,r3,r10 -err3; stvx vr0,r3,r11 - addi r3,r3,64 - -9: bf cr7*4+2,10f -err3; lvx vr1,r0,r4 -err3; lvx vr0,r4,r9 - addi r4,r4,32 -err3; stvx vr1,r0,r3 -err3; stvx vr0,r3,r9 - addi r3,r3,32 - -10: bf cr7*4+3,11f -err3; lvx vr1,r0,r4 - addi r4,r4,16 -err3; stvx vr1,r0,r3 - addi r3,r3,16 - - /* Up to 15B to go */ -11: clrldi r5,r5,(64-4) - mtocrf 0x01,r5 - bf cr7*4+0,12f -err3; ld r0,0(r4) - addi r4,r4,8 -err3; std r0,0(r3) - addi r3,r3,8 - -12: bf cr7*4+1,13f -err3; lwz r0,0(r4) - addi r4,r4,4 -err3; stw r0,0(r3) - addi r3,r3,4 - -13: bf cr7*4+2,14f -err3; lhz r0,0(r4) - addi r4,r4,2 -err3; sth r0,0(r3) - addi r3,r3,2 - -14: bf cr7*4+3,15f -err3; lbz r0,0(r4) -err3; stb r0,0(r3) - -15: addi r1,r1,STACKFRAMESIZE - b .exit_vmx_copy /* tail call optimise */ - -.Lvmx_unaligned_copy: - /* Get the destination 16B aligned */ - neg r6,r3 - mtocrf 0x01,r6 - clrldi r6,r6,(64-4) - - bf cr7*4+3,1f -err3; lbz r0,0(r4) - addi r4,r4,1 -err3; stb r0,0(r3) - addi r3,r3,1 - -1: bf cr7*4+2,2f -err3; lhz r0,0(r4) - addi r4,r4,2 -err3; sth r0,0(r3) - addi r3,r3,2 - -2: bf cr7*4+1,3f -err3; lwz r0,0(r4) - addi r4,r4,4 -err3; stw r0,0(r3) - addi r3,r3,4 - -3: bf cr7*4+0,4f -err3; lwz r0,0(r4) /* Less chance of a reject with word ops */ -err3; lwz r7,4(r4) - addi r4,r4,8 -err3; stw r0,0(r3) -err3; stw r7,4(r3) - addi r3,r3,8 - -4: sub r5,r5,r6 - - /* Get the desination 128B aligned */ - neg r6,r3 - srdi r7,r6,4 - mtocrf 0x01,r7 - clrldi r6,r6,(64-7) - - li r9,16 - li r10,32 - li r11,48 - - lvsl vr16,0,r4 /* Setup permute control vector */ -err3; lvx vr0,0,r4 - addi r4,r4,16 - - bf cr7*4+3,5f -err3; lvx vr1,r0,r4 - vperm vr8,vr0,vr1,vr16 - addi r4,r4,16 -err3; stvx vr8,r0,r3 - addi r3,r3,16 - vor vr0,vr1,vr1 - -5: bf cr7*4+2,6f -err3; lvx vr1,r0,r4 - vperm vr8,vr0,vr1,vr16 -err3; lvx vr0,r4,r9 - vperm vr9,vr1,vr0,vr16 - addi r4,r4,32 -err3; stvx vr8,r0,r3 -err3; stvx vr9,r3,r9 - addi r3,r3,32 - -6: bf cr7*4+1,7f -err3; lvx vr3,r0,r4 - vperm vr8,vr0,vr3,vr16 -err3; lvx vr2,r4,r9 - vperm vr9,vr3,vr2,vr16 -err3; lvx vr1,r4,r10 - vperm vr10,vr2,vr1,vr16 -err3; lvx vr0,r4,r11 - vperm vr11,vr1,vr0,vr16 - addi r4,r4,64 -err3; stvx vr8,r0,r3 -err3; stvx vr9,r3,r9 -err3; stvx vr10,r3,r10 -err3; stvx vr11,r3,r11 - addi r3,r3,64 - -7: sub r5,r5,r6 - srdi r6,r5,7 - - std r14,STK_REG(r14)(r1) - std r15,STK_REG(r15)(r1) - std r16,STK_REG(r16)(r1) - - li r12,64 - li r14,80 - li r15,96 - li r16,112 - - mtctr r6 - - /* - * Now do cacheline sized loads and stores. By this stage the - * cacheline stores are also cacheline aligned. - */ - .align 5 -8: -err4; lvx vr7,r0,r4 - vperm vr8,vr0,vr7,vr16 -err4; lvx vr6,r4,r9 - vperm vr9,vr7,vr6,vr16 -err4; lvx vr5,r4,r10 - vperm vr10,vr6,vr5,vr16 -err4; lvx vr4,r4,r11 - vperm vr11,vr5,vr4,vr16 -err4; lvx vr3,r4,r12 - vperm vr12,vr4,vr3,vr16 -err4; lvx vr2,r4,r14 - vperm vr13,vr3,vr2,vr16 -err4; lvx vr1,r4,r15 - vperm vr14,vr2,vr1,vr16 -err4; lvx vr0,r4,r16 - vperm vr15,vr1,vr0,vr16 - addi r4,r4,128 -err4; stvx vr8,r0,r3 -err4; stvx vr9,r3,r9 -err4; stvx vr10,r3,r10 -err4; stvx vr11,r3,r11 -err4; stvx vr12,r3,r12 -err4; stvx vr13,r3,r14 -err4; stvx vr14,r3,r15 -err4; stvx vr15,r3,r16 - addi r3,r3,128 - bdnz 8b - - ld r14,STK_REG(r14)(r1) - ld r15,STK_REG(r15)(r1) - ld r16,STK_REG(r16)(r1) - - /* Up to 127B to go */ - clrldi r5,r5,(64-7) - srdi r6,r5,4 - mtocrf 0x01,r6 - - bf cr7*4+1,9f -err3; lvx vr3,r0,r4 - vperm vr8,vr0,vr3,vr16 -err3; lvx vr2,r4,r9 - vperm vr9,vr3,vr2,vr16 -err3; lvx vr1,r4,r10 - vperm vr10,vr2,vr1,vr16 -err3; lvx vr0,r4,r11 - vperm vr11,vr1,vr0,vr16 - addi r4,r4,64 -err3; stvx vr8,r0,r3 -err3; stvx vr9,r3,r9 -err3; stvx vr10,r3,r10 -err3; stvx vr11,r3,r11 - addi r3,r3,64 - -9: bf cr7*4+2,10f -err3; lvx vr1,r0,r4 - vperm vr8,vr0,vr1,vr16 -err3; lvx vr0,r4,r9 - vperm vr9,vr1,vr0,vr16 - addi r4,r4,32 -err3; stvx vr8,r0,r3 -err3; stvx vr9,r3,r9 - addi r3,r3,32 - -10: bf cr7*4+3,11f -err3; lvx vr1,r0,r4 - vperm vr8,vr0,vr1,vr16 - addi r4,r4,16 -err3; stvx vr8,r0,r3 - addi r3,r3,16 - - /* Up to 15B to go */ -11: clrldi r5,r5,(64-4) - addi r4,r4,-16 /* Unwind the +16 load offset */ - mtocrf 0x01,r5 - bf cr7*4+0,12f -err3; lwz r0,0(r4) /* Less chance of a reject with word ops */ -err3; lwz r6,4(r4) - addi r4,r4,8 -err3; stw r0,0(r3) -err3; stw r6,4(r3) - addi r3,r3,8 - -12: bf cr7*4+1,13f -err3; lwz r0,0(r4) - addi r4,r4,4 -err3; stw r0,0(r3) - addi r3,r3,4 - -13: bf cr7*4+2,14f -err3; lhz r0,0(r4) - addi r4,r4,2 -err3; sth r0,0(r3) - addi r3,r3,2 - -14: bf cr7*4+3,15f -err3; lbz r0,0(r4) -err3; stb r0,0(r3) - -15: addi r1,r1,STACKFRAMESIZE - b .exit_vmx_copy /* tail call optimise */ -#endif /* CONFiG_ALTIVEC */ diff --git a/trunk/arch/powerpc/lib/copyuser_power7_vmx.c b/trunk/arch/powerpc/lib/copyuser_power7_vmx.c deleted file mode 100644 index 6e1efadac48b..000000000000 --- a/trunk/arch/powerpc/lib/copyuser_power7_vmx.c +++ /dev/null @@ -1,50 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright (C) IBM Corporation, 2011 - * - * Authors: Sukadev Bhattiprolu - * Anton Blanchard - */ -#include -#include - -int enter_vmx_copy(void) -{ - if (in_interrupt()) - return 0; - - /* This acts as preempt_disable() as well and will make - * enable_kernel_altivec(). We need to disable page faults - * as they can call schedule and thus make us lose the VMX - * context. So on page faults, we just fail which will cause - * a fallback to the normal non-vmx copy. - */ - pagefault_disable(); - - enable_kernel_altivec(); - - return 1; -} - -/* - * This function must return 0 because we tail call optimise when calling - * from __copy_tofrom_user_power7 which returns 0 on success. - */ -int exit_vmx_copy(void) -{ - pagefault_enable(); - return 0; -} diff --git a/trunk/arch/powerpc/mm/44x_mmu.c b/trunk/arch/powerpc/mm/44x_mmu.c index 388b95e1a009..f60e006d90c3 100644 --- a/trunk/arch/powerpc/mm/44x_mmu.c +++ b/trunk/arch/powerpc/mm/44x_mmu.c @@ -78,7 +78,11 @@ static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys) "tlbwe %1,%3,%5\n" "tlbwe %0,%3,%6\n" : +#ifdef CONFIG_PPC47x + : "r" (PPC47x_TLB2_S_RWX), +#else : "r" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G), +#endif "r" (phys), "r" (virt | PPC44x_TLB_VALID | PPC44x_TLB_256M), "r" (entry), @@ -217,7 +221,7 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base, { u64 size; -#ifndef CONFIG_NONSTATIC_KERNEL +#ifndef CONFIG_RELOCATABLE /* We don't currently support the first MEMBLOCK not mapping 0 * physical on those processors */ diff --git a/trunk/arch/powerpc/mm/Makefile b/trunk/arch/powerpc/mm/Makefile index 3787b61f7d20..991ee813d2a8 100644 --- a/trunk/arch/powerpc/mm/Makefile +++ b/trunk/arch/powerpc/mm/Makefile @@ -21,8 +21,6 @@ obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o obj-$(CONFIG_PPC_STD_MMU) += hash_low_$(CONFIG_WORD_SIZE).o \ tlb_hash$(CONFIG_WORD_SIZE).o \ mmu_context_hash$(CONFIG_WORD_SIZE).o -obj-$(CONFIG_PPC_ICSWX) += icswx.o -obj-$(CONFIG_PPC_ICSWX_PID) += icswx_pid.o obj-$(CONFIG_40x) += 40x_mmu.o obj-$(CONFIG_44x) += 44x_mmu.o obj-$(CONFIG_PPC_FSL_BOOK3E) += fsl_booke_mmu.o diff --git a/trunk/arch/powerpc/mm/fault.c b/trunk/arch/powerpc/mm/fault.c index 2f0d1b032a89..5efe8c96d37f 100644 --- a/trunk/arch/powerpc/mm/fault.c +++ b/trunk/arch/powerpc/mm/fault.c @@ -44,8 +44,6 @@ #include #include -#include "icswx.h" - #ifdef CONFIG_KPROBES static inline int notify_page_fault(struct pt_regs *regs) { @@ -145,21 +143,6 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, is_write = error_code & ESR_DST; #endif /* CONFIG_4xx || CONFIG_BOOKE */ -#ifdef CONFIG_PPC_ICSWX - /* - * we need to do this early because this "data storage - * interrupt" does not update the DAR/DEAR so we don't want to - * look at it - */ - if (error_code & ICSWX_DSI_UCT) { - int ret; - - ret = acop_handle_fault(regs, address, error_code); - if (ret) - return ret; - } -#endif - if (notify_page_fault(regs)) return 0; diff --git a/trunk/arch/powerpc/mm/hugetlbpage-book3e.c b/trunk/arch/powerpc/mm/hugetlbpage-book3e.c index 3bc700655fc8..343ad0b87261 100644 --- a/trunk/arch/powerpc/mm/hugetlbpage-book3e.c +++ b/trunk/arch/powerpc/mm/hugetlbpage-book3e.c @@ -37,32 +37,31 @@ static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid) return found; } -void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, - pte_t pte) +void book3e_hugetlb_preload(struct mm_struct *mm, unsigned long ea, pte_t pte) { unsigned long mas1, mas2; u64 mas7_3; unsigned long psize, tsize, shift; unsigned long flags; - struct mm_struct *mm; #ifdef CONFIG_PPC_FSL_BOOK3E - int index, ncams; + int index, lz, ncams; + struct vm_area_struct *vma; #endif if (unlikely(is_kernel_addr(ea))) return; - mm = vma->vm_mm; - #ifdef CONFIG_PPC_MM_SLICES - psize = get_slice_psize(mm, ea); - tsize = mmu_get_tsize(psize); + psize = mmu_get_tsize(get_slice_psize(mm, ea)); + tsize = mmu_get_psize(psize); shift = mmu_psize_defs[psize].shift; #else - psize = vma_mmu_pagesize(vma); - shift = __ilog2(psize); - tsize = shift - 10; + vma = find_vma(mm, ea); + psize = vma_mmu_pagesize(vma); /* returns actual size in bytes */ + asm (PPC_CNTLZL "%0,%1" : "=r" (lz) : "r" (psize)); + shift = 31 - lz; + tsize = 21 - lz; #endif /* diff --git a/trunk/arch/powerpc/mm/hugetlbpage.c b/trunk/arch/powerpc/mm/hugetlbpage.c index a8b3cc7d90fe..8558b572e55d 100644 --- a/trunk/arch/powerpc/mm/hugetlbpage.c +++ b/trunk/arch/powerpc/mm/hugetlbpage.c @@ -29,22 +29,22 @@ unsigned int HPAGE_SHIFT; /* * Tracks gpages after the device tree is scanned and before the - * huge_boot_pages list is ready. On non-Freescale implementations, this is - * just used to track 16G pages and so is a single array. FSL-based - * implementations may have more than one gpage size, so we need multiple - * arrays + * huge_boot_pages list is ready. On 64-bit implementations, this is + * just used to track 16G pages and so is a single array. 32-bit + * implementations may have more than one gpage size due to limitations + * of the memory allocators, so we need multiple arrays */ -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC64 +#define MAX_NUMBER_GPAGES 1024 +static u64 gpage_freearray[MAX_NUMBER_GPAGES]; +static unsigned nr_gpages; +#else #define MAX_NUMBER_GPAGES 128 struct psize_gpages { u64 gpage_list[MAX_NUMBER_GPAGES]; unsigned int nr_gpages; }; static struct psize_gpages gpage_freearray[MMU_PAGE_COUNT]; -#else -#define MAX_NUMBER_GPAGES 1024 -static u64 gpage_freearray[MAX_NUMBER_GPAGES]; -static unsigned nr_gpages; #endif static inline int shift_to_mmu_psize(unsigned int shift) @@ -115,12 +115,12 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, struct kmem_cache *cachep; pte_t *new; -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC64 + cachep = PGT_CACHE(pdshift - pshift); +#else int i; int num_hugepd = 1 << (pshift - pdshift); cachep = hugepte_cache; -#else - cachep = PGT_CACHE(pdshift - pshift); #endif new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT); @@ -132,7 +132,12 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, return -ENOMEM; spin_lock(&mm->page_table_lock); -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC64 + if (!hugepd_none(*hpdp)) + kmem_cache_free(cachep, new); + else + hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift; +#else /* * We have multiple higher-level entries that point to the same * actual pte location. Fill in each as we go and backtrack on error. @@ -151,28 +156,11 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, hpdp->pd = 0; kmem_cache_free(cachep, new); } -#else - if (!hugepd_none(*hpdp)) - kmem_cache_free(cachep, new); - else - hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift; #endif spin_unlock(&mm->page_table_lock); return 0; } -/* - * These macros define how to determine which level of the page table holds - * the hpdp. - */ -#ifdef CONFIG_PPC_FSL_BOOK3E -#define HUGEPD_PGD_SHIFT PGDIR_SHIFT -#define HUGEPD_PUD_SHIFT PUD_SHIFT -#else -#define HUGEPD_PGD_SHIFT PUD_SHIFT -#define HUGEPD_PUD_SHIFT PMD_SHIFT -#endif - pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) { pgd_t *pg; @@ -185,13 +173,12 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz addr &= ~(sz-1); pg = pgd_offset(mm, addr); - - if (pshift >= HUGEPD_PGD_SHIFT) { + if (pshift >= PUD_SHIFT) { hpdp = (hugepd_t *)pg; } else { pdshift = PUD_SHIFT; pu = pud_alloc(mm, pg, addr); - if (pshift >= HUGEPD_PUD_SHIFT) { + if (pshift >= PMD_SHIFT) { hpdp = (hugepd_t *)pu; } else { pdshift = PMD_SHIFT; @@ -211,7 +198,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz return hugepte_offset(hpdp, addr, pdshift); } -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC32 /* Build list of addresses of gigantic pages. This function is used in early * boot before the buddy or bootmem allocator is setup. */ @@ -331,7 +318,7 @@ void __init reserve_hugetlb_gpages(void) } } -#else /* !PPC_FSL_BOOK3E */ +#else /* PPC64 */ /* Build list of addresses of gigantic pages. This function is used in early * boot before the buddy or bootmem allocator is setup. @@ -369,7 +356,7 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) return 0; } -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC32 #define HUGEPD_FREELIST_SIZE \ ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t)) @@ -429,11 +416,11 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif unsigned long pdmask = ~((1UL << pdshift) - 1); unsigned int num_hugepd = 1; -#ifdef CONFIG_PPC_FSL_BOOK3E - /* Note: On fsl the hpdp may be the first of several */ - num_hugepd = (1 << (hugepd_shift(*hpdp) - pdshift)); -#else +#ifdef CONFIG_PPC64 unsigned int shift = hugepd_shift(*hpdp); +#else + /* Note: On 32-bit the hpdp may be the first of several */ + num_hugepd = (1 << (hugepd_shift(*hpdp) - pdshift)); #endif start &= pdmask; @@ -451,11 +438,10 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif hpdp->pd = 0; tlb->need_flush = 1; - -#ifdef CONFIG_PPC_FSL_BOOK3E - hugepd_free(tlb, hugepte); -#else +#ifdef CONFIG_PPC64 pgtable_free_tlb(tlb, hugepte, pdshift - shift); +#else + hugepd_free(tlb, hugepte); #endif } @@ -468,23 +454,14 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, unsigned long start; start = addr; + pmd = pmd_offset(pud, addr); do { - pmd = pmd_offset(pud, addr); next = pmd_addr_end(addr, end); if (pmd_none(*pmd)) continue; -#ifdef CONFIG_PPC_FSL_BOOK3E - /* - * Increment next by the size of the huge mapping since - * there may be more than one entry at this level for a - * single hugepage, but all of them point to - * the same kmem cache that holds the hugepte. - */ - next = addr + (1 << hugepd_shift(*(hugepd_t *)pmd)); -#endif free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT, addr, next, floor, ceiling); - } while (addr = next, addr != end); + } while (pmd++, addr = next, addr != end); start &= PUD_MASK; if (start < floor) @@ -511,8 +488,8 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, unsigned long start; start = addr; + pud = pud_offset(pgd, addr); do { - pud = pud_offset(pgd, addr); next = pud_addr_end(addr, end); if (!is_hugepd(pud)) { if (pud_none_or_clear_bad(pud)) @@ -520,19 +497,10 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling); } else { -#ifdef CONFIG_PPC_FSL_BOOK3E - /* - * Increment next by the size of the huge mapping since - * there may be more than one entry at this level for a - * single hugepage, but all of them point to - * the same kmem cache that holds the hugepte. - */ - next = addr + (1 << hugepd_shift(*(hugepd_t *)pud)); -#endif free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT, addr, next, floor, ceiling); } - } while (addr = next, addr != end); + } while (pud++, addr = next, addr != end); start &= PGDIR_MASK; if (start < floor) @@ -587,12 +555,12 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb, continue; hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling); } else { -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC32 /* * Increment next by the size of the huge mapping since - * there may be more than one entry at the pgd level - * for a single hugepage, but all of them point to the - * same kmem cache that holds the hugepte. + * on 32-bit there may be more than one entry at the pgd + * level for a single hugepage, but all of them point to + * the same kmem cache that holds the hugepte. */ next = addr + (1 << hugepd_shift(*(hugepd_t *)pgd)); #endif @@ -730,17 +698,19 @@ int gup_hugepd(hugepd_t *hugepd, unsigned pdshift, return 1; } -#ifdef CONFIG_PPC_MM_SLICES unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { +#ifdef CONFIG_PPC_MM_SLICES struct hstate *hstate = hstate_file(file); int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate)); return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0); -} +#else + return get_unmapped_area(file, addr, len, pgoff, flags); #endif +} unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) { @@ -814,7 +784,7 @@ static int __init hugepage_setup_sz(char *str) } __setup("hugepagesz=", hugepage_setup_sz); -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_FSL_BOOKE struct kmem_cache *hugepte_cache; static int __init hugetlbpage_init(void) { diff --git a/trunk/arch/powerpc/mm/icswx.c b/trunk/arch/powerpc/mm/icswx.c deleted file mode 100644 index 5d9a59eaad93..000000000000 --- a/trunk/arch/powerpc/mm/icswx.c +++ /dev/null @@ -1,273 +0,0 @@ -/* - * ICSWX and ACOP Management - * - * Copyright (C) 2011 Anton Blanchard, IBM Corp. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "icswx.h" - -/* - * The processor and its L2 cache cause the icswx instruction to - * generate a COP_REQ transaction on PowerBus. The transaction has no - * address, and the processor does not perform an MMU access to - * authenticate the transaction. The command portion of the PowerBus - * COP_REQ transaction includes the LPAR_ID (LPID) and the coprocessor - * Process ID (PID), which the coprocessor compares to the authorized - * LPID and PID held in the coprocessor, to determine if the process - * is authorized to generate the transaction. The data of the COP_REQ - * transaction is 128-byte or less in size and is placed in cacheable - * memory on a 128-byte cache line boundary. - * - * The task to use a coprocessor should use use_cop() to mark the use - * of the Coprocessor Type (CT) and context switching. On a server - * class processor, the PID register is used only for coprocessor - * management + * and so a coprocessor PID is allocated before - * executing icswx + * instruction. Drop_cop() is used to free the - * coprocessor PID. - * - * Example: - * Host Fabric Interface (HFI) is a PowerPC network coprocessor. - * Each HFI have multiple windows. Each HFI window serves as a - * network device sending to and receiving from HFI network. - * HFI immediate send function uses icswx instruction. The immediate - * send function allows small (single cache-line) packets be sent - * without using the regular HFI send FIFO and doorbell, which are - * much slower than immediate send. - * - * For each task intending to use HFI immediate send, the HFI driver - * calls use_cop() to obtain a coprocessor PID for the task. - * The HFI driver then allocate a free HFI window and save the - * coprocessor PID to the HFI window to allow the task to use the - * HFI window. - * - * The HFI driver repeatedly creates immediate send packets and - * issues icswx instruction to send data through the HFI window. - * The HFI compares the coprocessor PID in the CPU PID register - * to the PID held in the HFI window to determine if the transaction - * is allowed. - * - * When the task to release the HFI window, the HFI driver calls - * drop_cop() to release the coprocessor PID. - */ - -void switch_cop(struct mm_struct *next) -{ -#ifdef CONFIG_ICSWX_PID - mtspr(SPRN_PID, next->context.cop_pid); -#endif - mtspr(SPRN_ACOP, next->context.acop); -} - -/** - * Start using a coprocessor. - * @acop: mask of coprocessor to be used. - * @mm: The mm the coprocessor to associate with. Most likely current mm. - * - * Return a positive PID if successful. Negative errno otherwise. - * The returned PID will be fed to the coprocessor to determine if an - * icswx transaction is authenticated. - */ -int use_cop(unsigned long acop, struct mm_struct *mm) -{ - int ret; - - if (!cpu_has_feature(CPU_FTR_ICSWX)) - return -ENODEV; - - if (!mm || !acop) - return -EINVAL; - - /* The page_table_lock ensures mm_users won't change under us */ - spin_lock(&mm->page_table_lock); - spin_lock(mm->context.cop_lockp); - - ret = get_cop_pid(mm); - if (ret < 0) - goto out; - - /* update acop */ - mm->context.acop |= acop; - - sync_cop(mm); - - /* - * If this is a threaded process then there might be other threads - * running. We need to send an IPI to force them to pick up any - * change in PID and ACOP. - */ - if (atomic_read(&mm->mm_users) > 1) - smp_call_function(sync_cop, mm, 1); - -out: - spin_unlock(mm->context.cop_lockp); - spin_unlock(&mm->page_table_lock); - - return ret; -} -EXPORT_SYMBOL_GPL(use_cop); - -/** - * Stop using a coprocessor. - * @acop: mask of coprocessor to be stopped. - * @mm: The mm the coprocessor associated with. - */ -void drop_cop(unsigned long acop, struct mm_struct *mm) -{ - int free_pid; - - if (!cpu_has_feature(CPU_FTR_ICSWX)) - return; - - if (WARN_ON_ONCE(!mm)) - return; - - /* The page_table_lock ensures mm_users won't change under us */ - spin_lock(&mm->page_table_lock); - spin_lock(mm->context.cop_lockp); - - mm->context.acop &= ~acop; - - free_pid = disable_cop_pid(mm); - sync_cop(mm); - - /* - * If this is a threaded process then there might be other threads - * running. We need to send an IPI to force them to pick up any - * change in PID and ACOP. - */ - if (atomic_read(&mm->mm_users) > 1) - smp_call_function(sync_cop, mm, 1); - - if (free_pid != COP_PID_NONE) - free_cop_pid(free_pid); - - spin_unlock(mm->context.cop_lockp); - spin_unlock(&mm->page_table_lock); -} -EXPORT_SYMBOL_GPL(drop_cop); - -static int acop_use_cop(int ct) -{ - /* todo */ - return -1; -} - -/* - * Get the instruction word at the NIP - */ -static u32 acop_get_inst(struct pt_regs *regs) -{ - u32 inst; - u32 __user *p; - - p = (u32 __user *)regs->nip; - if (!access_ok(VERIFY_READ, p, sizeof(*p))) - return 0; - - if (__get_user(inst, p)) - return 0; - - return inst; -} - -/** - * @regs: regsiters at time of interrupt - * @address: storage address - * @error_code: Fault code, usually the DSISR or ESR depending on - * processor type - * - * Return 0 if we are able to resolve the data storage fault that - * results from a CT miss in the ACOP register. - */ -int acop_handle_fault(struct pt_regs *regs, unsigned long address, - unsigned long error_code) -{ - int ct; - u32 inst = 0; - - if (!cpu_has_feature(CPU_FTR_ICSWX)) { - pr_info("No coprocessors available"); - _exception(SIGILL, regs, ILL_ILLOPN, address); - } - - if (!user_mode(regs)) { - /* this could happen if the HV denies the - * kernel access, for now we just die */ - die("ICSWX from kernel failed", regs, SIGSEGV); - } - - /* Some implementations leave us a hint for the CT */ - ct = ICSWX_GET_CT_HINT(error_code); - if (ct < 0) { - /* we have to peek at the instruction word to figure out CT */ - u32 ccw; - u32 rs; - - inst = acop_get_inst(regs); - if (inst == 0) - return -1; - - rs = (inst >> (31 - 10)) & 0x1f; - ccw = regs->gpr[rs]; - ct = (ccw >> 16) & 0x3f; - } - - if (!acop_use_cop(ct)) - return 0; - - /* at this point the CT is unknown to the system */ - pr_warn("%s[%d]: Coprocessor %d is unavailable", - current->comm, current->pid, ct); - - /* get inst if we don't already have it */ - if (inst == 0) { - inst = acop_get_inst(regs); - if (inst == 0) - return -1; - } - - /* Check if the instruction is the "record form" */ - if (inst & 1) { - /* - * the instruction is "record" form so we can reject - * using CR0 - */ - regs->ccr &= ~(0xful << 28); - regs->ccr |= ICSWX_RC_NOT_FOUND << 28; - - /* Move on to the next instruction */ - regs->nip += 4; - } else { - /* - * There is no architected mechanism to report a bad - * CT so we could either SIGILL or report nothing. - * Since the non-record version should only bu used - * for "hints" or "don't care" we should probably do - * nothing. However, I could see how some people - * might want an SIGILL so it here if you want it. - */ -#ifdef CONFIG_PPC_ICSWX_USE_SIGILL - _exception(SIGILL, regs, ILL_ILLOPN, address); -#else - regs->nip += 4; -#endif - } - - return 0; -} -EXPORT_SYMBOL_GPL(acop_handle_fault); diff --git a/trunk/arch/powerpc/mm/icswx.h b/trunk/arch/powerpc/mm/icswx.h deleted file mode 100644 index 42176bd0884c..000000000000 --- a/trunk/arch/powerpc/mm/icswx.h +++ /dev/null @@ -1,62 +0,0 @@ -#ifndef _ARCH_POWERPC_MM_ICSWX_H_ -#define _ARCH_POWERPC_MM_ICSWX_H_ - -/* - * ICSWX and ACOP Management - * - * Copyright (C) 2011 Anton Blanchard, IBM Corp. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - */ - -#include - -/* also used to denote that PIDs are not used */ -#define COP_PID_NONE 0 - -static inline void sync_cop(void *arg) -{ - struct mm_struct *mm = arg; - - if (mm == current->active_mm) - switch_cop(current->active_mm); -} - -#ifdef CONFIG_PPC_ICSWX_PID -extern int get_cop_pid(struct mm_struct *mm); -extern int disable_cop_pid(struct mm_struct *mm); -extern void free_cop_pid(int free_pid); -#else -#define get_cop_pid(m) (COP_PID_NONE) -#define disable_cop_pid(m) (COP_PID_NONE) -#define free_cop_pid(p) -#endif - -/* - * These are implementation bits for architected registers. If this - * ever becomes architecture the should be moved to reg.h et. al. - */ -/* UCT is the same bit for Server and Embedded */ -#define ICSWX_DSI_UCT 0x00004000 /* Unavailable Coprocessor Type */ - -#ifdef CONFIG_PPC_BOOK3E -/* Embedded implementation gives us no hints as to what the CT is */ -#define ICSWX_GET_CT_HINT(x) (-1) -#else -/* Server implementation contains the CT value in the DSISR */ -#define ICSWX_DSISR_CTMASK 0x00003f00 -#define ICSWX_GET_CT_HINT(x) (((x) & ICSWX_DSISR_CTMASK) >> 8) -#endif - -#define ICSWX_RC_STARTED 0x8 /* The request has been started */ -#define ICSWX_RC_NOT_IDLE 0x4 /* No coprocessor found idle */ -#define ICSWX_RC_NOT_FOUND 0x2 /* No coprocessor found */ -#define ICSWX_RC_UNDEFINED 0x1 /* Reserved */ - -extern int acop_handle_fault(struct pt_regs *regs, unsigned long address, - unsigned long error_code); -#endif /* !_ARCH_POWERPC_MM_ICSWX_H_ */ diff --git a/trunk/arch/powerpc/mm/icswx_pid.c b/trunk/arch/powerpc/mm/icswx_pid.c deleted file mode 100644 index 91e30eb7d054..000000000000 --- a/trunk/arch/powerpc/mm/icswx_pid.c +++ /dev/null @@ -1,87 +0,0 @@ -/* - * ICSWX and ACOP/PID Management - * - * Copyright (C) 2011 Anton Blanchard, IBM Corp. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include "icswx.h" - -#define COP_PID_MIN (COP_PID_NONE + 1) -#define COP_PID_MAX (0xFFFF) - -static DEFINE_SPINLOCK(mmu_context_acop_lock); -static DEFINE_IDA(cop_ida); - -static int new_cop_pid(struct ida *ida, int min_id, int max_id, - spinlock_t *lock) -{ - int index; - int err; - -again: - if (!ida_pre_get(ida, GFP_KERNEL)) - return -ENOMEM; - - spin_lock(lock); - err = ida_get_new_above(ida, min_id, &index); - spin_unlock(lock); - - if (err == -EAGAIN) - goto again; - else if (err) - return err; - - if (index > max_id) { - spin_lock(lock); - ida_remove(ida, index); - spin_unlock(lock); - return -ENOMEM; - } - - return index; -} - -int get_cop_pid(struct mm_struct *mm) -{ - int pid; - - if (mm->context.cop_pid == COP_PID_NONE) { - pid = new_cop_pid(&cop_ida, COP_PID_MIN, COP_PID_MAX, - &mmu_context_acop_lock); - if (pid >= 0) - mm->context.cop_pid = pid; - } - return mm->context.cop_pid; -} - -int disable_cop_pid(struct mm_struct *mm) -{ - int free_pid = COP_PID_NONE; - - if ((!mm->context.acop) && (mm->context.cop_pid != COP_PID_NONE)) { - free_pid = mm->context.cop_pid; - mm->context.cop_pid = COP_PID_NONE; - } - return free_pid; -} - -void free_cop_pid(int free_pid) -{ - spin_lock(&mmu_context_acop_lock); - ida_remove(&cop_ida, free_pid); - spin_unlock(&mmu_context_acop_lock); -} diff --git a/trunk/arch/powerpc/mm/init_32.c b/trunk/arch/powerpc/mm/init_32.c index 6157be2a7049..161cefde5c15 100644 --- a/trunk/arch/powerpc/mm/init_32.c +++ b/trunk/arch/powerpc/mm/init_32.c @@ -65,13 +65,6 @@ phys_addr_t memstart_addr = (phys_addr_t)~0ull; EXPORT_SYMBOL(memstart_addr); phys_addr_t kernstart_addr; EXPORT_SYMBOL(kernstart_addr); - -#ifdef CONFIG_RELOCATABLE_PPC32 -/* Used in __va()/__pa() */ -long long virt_phys_offset; -EXPORT_SYMBOL(virt_phys_offset); -#endif - phys_addr_t lowmem_end_addr; int boot_mapsize; @@ -141,7 +134,8 @@ void __init MMU_init(void) if (memblock.memory.cnt > 1) { #ifndef CONFIG_WII - memblock_enforce_memory_limit(memblock.memory.regions[0].size); + memblock.memory.cnt = 1; + memblock_analyze(); printk(KERN_WARNING "Only using first contiguous memory region"); #else wii_memory_fixups(); @@ -164,6 +158,7 @@ void __init MMU_init(void) #ifndef CONFIG_HIGHMEM total_memory = total_lowmem; memblock_enforce_memory_limit(total_lowmem); + memblock_analyze(); #endif /* CONFIG_HIGHMEM */ } diff --git a/trunk/arch/powerpc/mm/mem.c b/trunk/arch/powerpc/mm/mem.c index d974b79a3068..2dd6bdd31fe1 100644 --- a/trunk/arch/powerpc/mm/mem.c +++ b/trunk/arch/powerpc/mm/mem.c @@ -51,7 +51,6 @@ #include #include #include -#include #include "mmu_decl.h" @@ -200,7 +199,7 @@ void __init do_init_bootmem(void) unsigned long start_pfn, end_pfn; start_pfn = memblock_region_memory_base_pfn(reg); end_pfn = memblock_region_memory_end_pfn(reg); - memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0); + add_active_range(0, start_pfn, end_pfn); } /* Add all physical memory to the bootmem map, mark each area @@ -554,7 +553,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, #if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \ && defined(CONFIG_HUGETLB_PAGE) if (is_vm_hugetlb_page(vma)) - book3e_hugetlb_preload(vma, address, *ptep); + book3e_hugetlb_preload(vma->vm_mm, address, *ptep); #endif } @@ -586,23 +585,3 @@ static int add_system_ram_resources(void) return 0; } subsys_initcall(add_system_ram_resources); - -#ifdef CONFIG_STRICT_DEVMEM -/* - * devmem_is_allowed(): check to see if /dev/mem access to a certain address - * is valid. The argument is a physical page number. - * - * Access has to be given to non-kernel-ram areas as well, these contain the - * PCI mmio resources as well as potential bios/acpi data regions. - */ -int devmem_is_allowed(unsigned long pfn) -{ - if (iomem_is_exclusive(pfn << PAGE_SHIFT)) - return 0; - if (!page_is_ram(pfn)) - return 1; - if (page_is_rtas_user_buf(pfn)) - return 1; - return 0; -} -#endif /* CONFIG_STRICT_DEVMEM */ diff --git a/trunk/arch/powerpc/mm/mmap_64.c b/trunk/arch/powerpc/mm/mmap_64.c index 67a42ed0d2fc..5a783d8e8e8e 100644 --- a/trunk/arch/powerpc/mm/mmap_64.c +++ b/trunk/arch/powerpc/mm/mmap_64.c @@ -53,6 +53,14 @@ static inline int mmap_is_legacy(void) return sysctl_legacy_va_layout; } +/* + * Since get_random_int() returns the same value within a 1 jiffy window, + * we will almost always get the same randomisation for the stack and mmap + * region. This will mean the relative distance between stack and mmap will + * be the same. + * + * To avoid this we can shift the randomness by 1 bit. + */ static unsigned long mmap_rnd(void) { unsigned long rnd = 0; @@ -60,11 +68,11 @@ static unsigned long mmap_rnd(void) if (current->flags & PF_RANDOMIZE) { /* 8MB for 32bit, 1GB for 64bit */ if (is_32bit_task()) - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT))); + rnd = (long)(get_random_int() % (1<<(22-PAGE_SHIFT))); else - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT))); + rnd = (long)(get_random_int() % (1<<(29-PAGE_SHIFT))); } - return rnd << PAGE_SHIFT; + return (rnd << PAGE_SHIFT) * 2; } static inline unsigned long mmap_base(void) diff --git a/trunk/arch/powerpc/mm/mmu_context_hash64.c b/trunk/arch/powerpc/mm/mmu_context_hash64.c index 40677aa0190e..ca988a3d5fb2 100644 --- a/trunk/arch/powerpc/mm/mmu_context_hash64.c +++ b/trunk/arch/powerpc/mm/mmu_context_hash64.c @@ -24,7 +24,200 @@ #include -#include "icswx.h" +#ifdef CONFIG_PPC_ICSWX +/* + * The processor and its L2 cache cause the icswx instruction to + * generate a COP_REQ transaction on PowerBus. The transaction has + * no address, and the processor does not perform an MMU access + * to authenticate the transaction. The command portion of the + * PowerBus COP_REQ transaction includes the LPAR_ID (LPID) and + * the coprocessor Process ID (PID), which the coprocessor compares + * to the authorized LPID and PID held in the coprocessor, to determine + * if the process is authorized to generate the transaction. + * The data of the COP_REQ transaction is 128-byte or less and is + * placed in cacheable memory on a 128-byte cache line boundary. + * + * The task to use a coprocessor should use use_cop() to allocate + * a coprocessor PID before executing icswx instruction. use_cop() + * also enables the coprocessor context switching. Drop_cop() is + * used to free the coprocessor PID. + * + * Example: + * Host Fabric Interface (HFI) is a PowerPC network coprocessor. + * Each HFI have multiple windows. Each HFI window serves as a + * network device sending to and receiving from HFI network. + * HFI immediate send function uses icswx instruction. The immediate + * send function allows small (single cache-line) packets be sent + * without using the regular HFI send FIFO and doorbell, which are + * much slower than immediate send. + * + * For each task intending to use HFI immediate send, the HFI driver + * calls use_cop() to obtain a coprocessor PID for the task. + * The HFI driver then allocate a free HFI window and save the + * coprocessor PID to the HFI window to allow the task to use the + * HFI window. + * + * The HFI driver repeatedly creates immediate send packets and + * issues icswx instruction to send data through the HFI window. + * The HFI compares the coprocessor PID in the CPU PID register + * to the PID held in the HFI window to determine if the transaction + * is allowed. + * + * When the task to release the HFI window, the HFI driver calls + * drop_cop() to release the coprocessor PID. + */ + +#define COP_PID_NONE 0 +#define COP_PID_MIN (COP_PID_NONE + 1) +#define COP_PID_MAX (0xFFFF) + +static DEFINE_SPINLOCK(mmu_context_acop_lock); +static DEFINE_IDA(cop_ida); + +void switch_cop(struct mm_struct *next) +{ + mtspr(SPRN_PID, next->context.cop_pid); + mtspr(SPRN_ACOP, next->context.acop); +} + +static int new_cop_pid(struct ida *ida, int min_id, int max_id, + spinlock_t *lock) +{ + int index; + int err; + +again: + if (!ida_pre_get(ida, GFP_KERNEL)) + return -ENOMEM; + + spin_lock(lock); + err = ida_get_new_above(ida, min_id, &index); + spin_unlock(lock); + + if (err == -EAGAIN) + goto again; + else if (err) + return err; + + if (index > max_id) { + spin_lock(lock); + ida_remove(ida, index); + spin_unlock(lock); + return -ENOMEM; + } + + return index; +} + +static void sync_cop(void *arg) +{ + struct mm_struct *mm = arg; + + if (mm == current->active_mm) + switch_cop(current->active_mm); +} + +/** + * Start using a coprocessor. + * @acop: mask of coprocessor to be used. + * @mm: The mm the coprocessor to associate with. Most likely current mm. + * + * Return a positive PID if successful. Negative errno otherwise. + * The returned PID will be fed to the coprocessor to determine if an + * icswx transaction is authenticated. + */ +int use_cop(unsigned long acop, struct mm_struct *mm) +{ + int ret; + + if (!cpu_has_feature(CPU_FTR_ICSWX)) + return -ENODEV; + + if (!mm || !acop) + return -EINVAL; + + /* The page_table_lock ensures mm_users won't change under us */ + spin_lock(&mm->page_table_lock); + spin_lock(mm->context.cop_lockp); + + if (mm->context.cop_pid == COP_PID_NONE) { + ret = new_cop_pid(&cop_ida, COP_PID_MIN, COP_PID_MAX, + &mmu_context_acop_lock); + if (ret < 0) + goto out; + + mm->context.cop_pid = ret; + } + mm->context.acop |= acop; + + sync_cop(mm); + + /* + * If this is a threaded process then there might be other threads + * running. We need to send an IPI to force them to pick up any + * change in PID and ACOP. + */ + if (atomic_read(&mm->mm_users) > 1) + smp_call_function(sync_cop, mm, 1); + + ret = mm->context.cop_pid; + +out: + spin_unlock(mm->context.cop_lockp); + spin_unlock(&mm->page_table_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(use_cop); + +/** + * Stop using a coprocessor. + * @acop: mask of coprocessor to be stopped. + * @mm: The mm the coprocessor associated with. + */ +void drop_cop(unsigned long acop, struct mm_struct *mm) +{ + int free_pid = COP_PID_NONE; + + if (!cpu_has_feature(CPU_FTR_ICSWX)) + return; + + if (WARN_ON_ONCE(!mm)) + return; + + /* The page_table_lock ensures mm_users won't change under us */ + spin_lock(&mm->page_table_lock); + spin_lock(mm->context.cop_lockp); + + mm->context.acop &= ~acop; + + if ((!mm->context.acop) && (mm->context.cop_pid != COP_PID_NONE)) { + free_pid = mm->context.cop_pid; + mm->context.cop_pid = COP_PID_NONE; + } + + sync_cop(mm); + + /* + * If this is a threaded process then there might be other threads + * running. We need to send an IPI to force them to pick up any + * change in PID and ACOP. + */ + if (atomic_read(&mm->mm_users) > 1) + smp_call_function(sync_cop, mm, 1); + + if (free_pid != COP_PID_NONE) { + spin_lock(&mmu_context_acop_lock); + ida_remove(&cop_ida, free_pid); + spin_unlock(&mmu_context_acop_lock); + } + + spin_unlock(mm->context.cop_lockp); + spin_unlock(&mm->page_table_lock); +} +EXPORT_SYMBOL_GPL(drop_cop); + +#endif /* CONFIG_PPC_ICSWX */ static DEFINE_SPINLOCK(mmu_context_lock); static DEFINE_IDA(mmu_context_ida); diff --git a/trunk/arch/powerpc/mm/numa.c b/trunk/arch/powerpc/mm/numa.c index c0189c169bbb..b22a83a91cb8 100644 --- a/trunk/arch/powerpc/mm/numa.c +++ b/trunk/arch/powerpc/mm/numa.c @@ -127,25 +127,45 @@ static int __cpuinit fake_numa_create_new_node(unsigned long end_pfn, } /* - * get_node_active_region - Return active region containing pfn - * Active range returned is empty if none found. - * @pfn: The page to return the region for - * @node_ar: Returned set to the active region containing @pfn + * get_active_region_work_fn - A helper function for get_node_active_region + * Returns datax set to the start_pfn and end_pfn if they contain + * the initial value of datax->start_pfn between them + * @start_pfn: start page(inclusive) of region to check + * @end_pfn: end page(exclusive) of region to check + * @datax: comes in with ->start_pfn set to value to search for and + * goes out with active range if it contains it + * Returns 1 if search value is in range else 0 */ -static void __init get_node_active_region(unsigned long pfn, - struct node_active_region *node_ar) +static int __init get_active_region_work_fn(unsigned long start_pfn, + unsigned long end_pfn, void *datax) { - unsigned long start_pfn, end_pfn; - int i, nid; + struct node_active_region *data; + data = (struct node_active_region *)datax; - for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { - if (pfn >= start_pfn && pfn < end_pfn) { - node_ar->nid = nid; - node_ar->start_pfn = start_pfn; - node_ar->end_pfn = end_pfn; - break; - } + if (start_pfn <= data->start_pfn && end_pfn > data->start_pfn) { + data->start_pfn = start_pfn; + data->end_pfn = end_pfn; + return 1; } + return 0; + +} + +/* + * get_node_active_region - Return active region containing start_pfn + * Active range returned is empty if none found. + * @start_pfn: The page to return the region for. + * @node_ar: Returned set to the active region containing start_pfn + */ +static void __init get_node_active_region(unsigned long start_pfn, + struct node_active_region *node_ar) +{ + int nid = early_pfn_to_nid(start_pfn); + + node_ar->nid = nid; + node_ar->start_pfn = start_pfn; + node_ar->end_pfn = start_pfn; + work_with_active_regions(nid, get_active_region_work_fn, node_ar); } static void map_cpu_to_node(int cpu, int node) @@ -386,7 +406,7 @@ static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells) of_node_put(memory); } -static unsigned long read_n_cells(int n, const unsigned int **buf) +static unsigned long __devinit read_n_cells(int n, const unsigned int **buf) { unsigned long result = 0; @@ -690,7 +710,9 @@ static void __init parse_drconf_memory(struct device_node *memory) node_set_online(nid); sz = numa_enforce_memory_limit(base, size); if (sz) - memblock_set_node(base, sz, nid); + add_active_range(nid, base >> PAGE_SHIFT, + (base >> PAGE_SHIFT) + + (sz >> PAGE_SHIFT)); } while (--ranges); } } @@ -780,7 +802,8 @@ static int __init parse_numa_properties(void) continue; } - memblock_set_node(start, size, nid); + add_active_range(nid, start >> PAGE_SHIFT, + (start >> PAGE_SHIFT) + (size >> PAGE_SHIFT)); if (--ranges) goto new_range; @@ -816,8 +839,7 @@ static void __init setup_nonnuma(void) end_pfn = memblock_region_memory_end_pfn(reg); fake_numa_create_new_node(end_pfn, &nid); - memblock_set_node(PFN_PHYS(start_pfn), - PFN_PHYS(end_pfn - start_pfn), nid); + add_active_range(nid, start_pfn, end_pfn); node_set_online(nid); } } @@ -947,7 +969,7 @@ static struct notifier_block __cpuinitdata ppc64_numa_nb = { .priority = 1 /* Must run before sched domains notifier. */ }; -static void __init mark_reserved_regions_for_nid(int nid) +static void mark_reserved_regions_for_nid(int nid) { struct pglist_data *node = NODE_DATA(nid); struct memblock_region *reg; diff --git a/trunk/arch/powerpc/mm/tlb_low_64e.S b/trunk/arch/powerpc/mm/tlb_low_64e.S index ff672bd8fea9..dc4a5f385e41 100644 --- a/trunk/arch/powerpc/mm/tlb_low_64e.S +++ b/trunk/arch/powerpc/mm/tlb_low_64e.S @@ -94,11 +94,11 @@ srdi r15,r16,60 /* get region */ rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4 - bne- dtlb_miss_fault_bolted /* Bail if fault addr is invalid */ + bne- dtlb_miss_fault_bolted rlwinm r10,r11,32-19,27,27 rlwimi r10,r11,32-16,19,19 - cmpwi r15,0 /* user vs kernel check */ + cmpwi r15,0 ori r10,r10,_PAGE_PRESENT oris r11,r10,_PAGE_ACCESSED@h @@ -120,38 +120,44 @@ tlb_miss_common_bolted: rldicl r15,r16,64-PGDIR_SHIFT+3,64-PGD_INDEX_SIZE-3 cmpldi cr0,r14,0 clrrdi r15,r15,3 - beq tlb_miss_fault_bolted /* No PGDIR, bail */ + beq tlb_miss_fault_bolted BEGIN_MMU_FTR_SECTION /* Set the TLB reservation and search for existing entry. Then load * the entry. */ PPC_TLBSRX_DOT(0,r16) - ldx r14,r14,r15 /* grab pgd entry */ - beq normal_tlb_miss_done /* tlb exists already, bail */ + ldx r14,r14,r15 + beq normal_tlb_miss_done MMU_FTR_SECTION_ELSE - ldx r14,r14,r15 /* grab pgd entry */ + ldx r14,r14,r15 ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV) #ifndef CONFIG_PPC_64K_PAGES rldicl r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3 clrrdi r15,r15,3 - cmpdi cr0,r14,0 - bge tlb_miss_fault_bolted /* Bad pgd entry or hugepage; bail */ - ldx r14,r14,r15 /* grab pud entry */ + + cmpldi cr0,r14,0 + beq tlb_miss_fault_bolted + + ldx r14,r14,r15 #endif /* CONFIG_PPC_64K_PAGES */ rldicl r15,r16,64-PMD_SHIFT+3,64-PMD_INDEX_SIZE-3 clrrdi r15,r15,3 - cmpdi cr0,r14,0 - bge tlb_miss_fault_bolted - ldx r14,r14,r15 /* Grab pmd entry */ + + cmpldi cr0,r14,0 + beq tlb_miss_fault_bolted + + ldx r14,r14,r15 rldicl r15,r16,64-PAGE_SHIFT+3,64-PTE_INDEX_SIZE-3 clrrdi r15,r15,3 - cmpdi cr0,r14,0 - bge tlb_miss_fault_bolted - ldx r14,r14,r15 /* Grab PTE, normal (!huge) page */ + + cmpldi cr0,r14,0 + beq tlb_miss_fault_bolted + + ldx r14,r14,r15 /* Check if required permissions are met */ andc. r15,r11,r14 diff --git a/trunk/arch/powerpc/mm/tlb_nohash.c b/trunk/arch/powerpc/mm/tlb_nohash.c index df32a838dcfa..4e13d6f9023e 100644 --- a/trunk/arch/powerpc/mm/tlb_nohash.c +++ b/trunk/arch/powerpc/mm/tlb_nohash.c @@ -52,7 +52,7 @@ * indirect page table entries. */ #ifdef CONFIG_PPC_BOOK3E_MMU -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_FSL_BOOKE struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { [MMU_PAGE_4K] = { .shift = 12, @@ -615,6 +615,7 @@ static void __early_init_mmu(int boot_cpu) /* limit memory so we dont have linear faults */ memblock_enforce_memory_limit(linear_map_top); + memblock_analyze(); patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e); patch_exception(0x1e0, exc_instruction_tlb_miss_bolted_book3e); diff --git a/trunk/arch/powerpc/platforms/40x/Kconfig b/trunk/arch/powerpc/platforms/40x/Kconfig index baae85584b1c..153022971daa 100644 --- a/trunk/arch/powerpc/platforms/40x/Kconfig +++ b/trunk/arch/powerpc/platforms/40x/Kconfig @@ -100,16 +100,6 @@ config XILINX_VIRTEX_GENERIC_BOARD Most Virtex designs should use this unless it needs to do some special configuration at board probe time. -config OBS600 - bool "OpenBlockS 600" - depends on 40x - default n - select 405EX - select PPC40x_SIMPLE - help - This option enables support for PlatHome OpenBlockS 600 server - - config PPC40x_SIMPLE bool "Simple PowerPC 40x board support" depends on 40x @@ -196,14 +186,3 @@ config IBM405_ERR51 # bool # depends on !STB03xxx && PPC4xx_DMA # default y -# - -config APM8018X - bool "APM8018X" - depends on 40x - default n - select PPC40x_SIMPLE - help - This option enables support for the AppliedMicro APM8018X evaluation - board. - diff --git a/trunk/arch/powerpc/platforms/40x/ppc40x_simple.c b/trunk/arch/powerpc/platforms/40x/ppc40x_simple.c index 97612068fae3..e8dd5c5df7d9 100644 --- a/trunk/arch/powerpc/platforms/40x/ppc40x_simple.c +++ b/trunk/arch/powerpc/platforms/40x/ppc40x_simple.c @@ -55,9 +55,7 @@ static const char *board[] __initdata = { "amcc,haleakala", "amcc,kilauea", "amcc,makalu", - "apm,klondike", - "est,hotfoot", - "plathome,obs600" + "est,hotfoot" }; static int __init ppc40x_probe(void) diff --git a/trunk/arch/powerpc/platforms/44x/Kconfig b/trunk/arch/powerpc/platforms/44x/Kconfig index 5d5aaf6c91aa..762322ce24a9 100644 --- a/trunk/arch/powerpc/platforms/44x/Kconfig +++ b/trunk/arch/powerpc/platforms/44x/Kconfig @@ -186,16 +186,6 @@ config ISS4xx help This option enables support for the IBM ISS simulation environment -config CURRITUCK - bool "IBM Currituck (476fpe) Support" - depends on PPC_47x - default n - select SWIOTLB - select 476FPE - select PPC4xx_PCI_EXPRESS - help - This option enables support for the IBM Currituck (476fpe) evaluation board - config ICON bool "Icon" depends on 44x @@ -318,10 +308,6 @@ config 460SX select IBM_EMAC_ZMII select IBM_EMAC_TAH -config 476FPE - bool - select PPC_FPU - config APM821xx bool select PPC_FPU diff --git a/trunk/arch/powerpc/platforms/44x/Makefile b/trunk/arch/powerpc/platforms/44x/Makefile index d03833abec09..553db6007217 100644 --- a/trunk/arch/powerpc/platforms/44x/Makefile +++ b/trunk/arch/powerpc/platforms/44x/Makefile @@ -10,4 +10,3 @@ obj-$(CONFIG_XILINX_VIRTEX_5_FXT) += virtex.o obj-$(CONFIG_XILINX_ML510) += virtex_ml510.o obj-$(CONFIG_ISS4xx) += iss4xx.o obj-$(CONFIG_CANYONLANDS)+= canyonlands.o -obj-$(CONFIG_CURRITUCK) += currituck.o diff --git a/trunk/arch/powerpc/platforms/44x/currituck.c b/trunk/arch/powerpc/platforms/44x/currituck.c deleted file mode 100644 index 3f6229b5dee0..000000000000 --- a/trunk/arch/powerpc/platforms/44x/currituck.c +++ /dev/null @@ -1,204 +0,0 @@ -/* - * Currituck board specific routines - * - * Copyright © 2011 Tony Breeds IBM Corporation - * - * Based on earlier code: - * Matt Porter - * Copyright 2002-2005 MontaVista Software Inc. - * - * Eugene Surovegin or - * Copyright (c) 2003-2005 Zultys Technologies - * - * Rewritten and ported to the merged powerpc tree: - * Copyright 2007 David Gibson , IBM Corporation. - * Copyright © 2011 David Kliekamp IBM Corporation - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -static __initdata struct of_device_id ppc47x_of_bus[] = { - { .compatible = "ibm,plb4", }, - { .compatible = "ibm,plb6", }, - { .compatible = "ibm,opb", }, - { .compatible = "ibm,ebc", }, - {}, -}; - -/* The EEPROM is missing and the default values are bogus. This forces USB in - * to EHCI mode */ -static void __devinit quirk_ppc_currituck_usb_fixup(struct pci_dev *dev) -{ - if (of_machine_is_compatible("ibm,currituck")) { - pci_write_config_dword(dev, 0xe0, 0x0114231f); - pci_write_config_dword(dev, 0xe4, 0x00006c40); - } -} -DECLARE_PCI_FIXUP_HEADER(0x1033, 0x0035, quirk_ppc_currituck_usb_fixup); - -static int __init ppc47x_device_probe(void) -{ - of_platform_bus_probe(NULL, ppc47x_of_bus, NULL); - - return 0; -} -machine_device_initcall(ppc47x, ppc47x_device_probe); - -/* We can have either UICs or MPICs */ -static void __init ppc47x_init_irq(void) -{ - struct device_node *np; - - /* Find top level interrupt controller */ - for_each_node_with_property(np, "interrupt-controller") { - if (of_get_property(np, "interrupts", NULL) == NULL) - break; - } - if (np == NULL) - panic("Can't find top level interrupt controller"); - - /* Check type and do appropriate initialization */ - if (of_device_is_compatible(np, "chrp,open-pic")) { - /* The MPIC driver will get everything it needs from the - * device-tree, just pass 0 to all arguments - */ - struct mpic *mpic = - mpic_alloc(np, 0, 0, 0, 0, " MPIC "); - BUG_ON(mpic == NULL); - mpic_init(mpic); - ppc_md.get_irq = mpic_get_irq; - } else - panic("Unrecognized top level interrupt controller"); -} - -#ifdef CONFIG_SMP -static void __cpuinit smp_ppc47x_setup_cpu(int cpu) -{ - mpic_setup_this_cpu(); -} - -static int __cpuinit smp_ppc47x_kick_cpu(int cpu) -{ - struct device_node *cpunode = of_get_cpu_node(cpu, NULL); - const u64 *spin_table_addr_prop; - u32 *spin_table; - extern void start_secondary_47x(void); - - BUG_ON(cpunode == NULL); - - /* Assume spin table. We could test for the enable-method in - * the device-tree but currently there's little point as it's - * our only supported method - */ - spin_table_addr_prop = - of_get_property(cpunode, "cpu-release-addr", NULL); - - if (spin_table_addr_prop == NULL) { - pr_err("CPU%d: Can't start, missing cpu-release-addr !\n", - cpu); - return 1; - } - - /* Assume it's mapped as part of the linear mapping. This is a bit - * fishy but will work fine for now - * - * XXX: Is there any reason to assume differently? - */ - spin_table = (u32 *)__va(*spin_table_addr_prop); - pr_debug("CPU%d: Spin table mapped at %p\n", cpu, spin_table); - - spin_table[3] = cpu; - smp_wmb(); - spin_table[1] = __pa(start_secondary_47x); - mb(); - - return 0; -} - -static struct smp_ops_t ppc47x_smp_ops = { - .probe = smp_mpic_probe, - .message_pass = smp_mpic_message_pass, - .setup_cpu = smp_ppc47x_setup_cpu, - .kick_cpu = smp_ppc47x_kick_cpu, - .give_timebase = smp_generic_give_timebase, - .take_timebase = smp_generic_take_timebase, -}; - -static void __init ppc47x_smp_init(void) -{ - if (mmu_has_feature(MMU_FTR_TYPE_47x)) - smp_ops = &ppc47x_smp_ops; -} - -#else /* CONFIG_SMP */ -static void __init ppc47x_smp_init(void) { } -#endif /* CONFIG_SMP */ - -static void __init ppc47x_setup_arch(void) -{ - - /* No need to check the DMA config as we /know/ our windows are all of - * RAM. Lets hope that doesn't change */ -#ifdef CONFIG_SWIOTLB - if (memblock_end_of_DRAM() > 0xffffffff) { - ppc_swiotlb_enable = 1; - set_pci_dma_ops(&swiotlb_dma_ops); - ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb; - } -#endif - ppc47x_smp_init(); -} - -/* - * Called very early, MMU is off, device-tree isn't unflattened - */ -static int __init ppc47x_probe(void) -{ - unsigned long root = of_get_flat_dt_root(); - - if (!of_flat_dt_is_compatible(root, "ibm,currituck")) - return 0; - - return 1; -} - -/* Use USB controller should have been hardware swizzled but it wasn't :( */ -static void ppc47x_pci_irq_fixup(struct pci_dev *dev) -{ - if (dev->vendor == 0x1033 && (dev->device == 0x0035 || - dev->device == 0x00e0)) { - dev->irq = irq_create_mapping(NULL, 47); - pr_info("%s: Mapping irq 47 %d\n", __func__, dev->irq); - } -} - -define_machine(ppc47x) { - .name = "PowerPC 47x", - .probe = ppc47x_probe, - .progress = udbg_progress, - .init_IRQ = ppc47x_init_irq, - .setup_arch = ppc47x_setup_arch, - .pci_irq_fixup = ppc47x_pci_irq_fixup, - .restart = ppc4xx_reset_system, - .calibrate_decr = generic_calibrate_decr, -}; diff --git a/trunk/arch/powerpc/platforms/44x/iss4xx.c b/trunk/arch/powerpc/platforms/44x/iss4xx.c index 5b8cdbb82f80..19395f18b1db 100644 --- a/trunk/arch/powerpc/platforms/44x/iss4xx.c +++ b/trunk/arch/powerpc/platforms/44x/iss4xx.c @@ -71,7 +71,7 @@ static void __init iss4xx_init_irq(void) /* The MPIC driver will get everything it needs from the * device-tree, just pass 0 to all arguments */ - struct mpic *mpic = mpic_alloc(np, 0, 0, 0, 0, + struct mpic *mpic = mpic_alloc(np, 0, MPIC_PRIMARY, 0, 0, " MPIC "); BUG_ON(mpic == NULL); mpic_init(mpic); diff --git a/trunk/arch/powerpc/platforms/83xx/asp834x.c b/trunk/arch/powerpc/platforms/83xx/asp834x.c index 464ea8e0292d..aa0d84d22585 100644 --- a/trunk/arch/powerpc/platforms/83xx/asp834x.c +++ b/trunk/arch/powerpc/platforms/83xx/asp834x.c @@ -36,7 +36,38 @@ static void __init asp834x_setup_arch(void) mpc834x_usb_cfg(); } -machine_device_initcall(asp834x, mpc83xx_declare_of_platform_devices); +static void __init asp834x_init_IRQ(void) +{ + struct device_node *np; + + np = of_find_node_by_type(NULL, "ipic"); + if (!np) + return; + + ipic_init(np, 0); + + of_node_put(np); + + /* Initialize the default interrupt mapping priorities, + * in case the boot rom changed something on us. + */ + ipic_set_default_priority(); +} + +static struct __initdata of_device_id asp8347_ids[] = { + { .type = "soc", }, + { .compatible = "soc", }, + { .compatible = "simple-bus", }, + { .compatible = "gianfar", }, + {}, +}; + +static int __init asp8347_declare_of_platform_devices(void) +{ + of_platform_bus_probe(NULL, asp8347_ids, NULL); + return 0; +} +machine_device_initcall(asp834x, asp8347_declare_of_platform_devices); /* * Called very early, MMU is off, device-tree isn't unflattened @@ -51,7 +82,7 @@ define_machine(asp834x) { .name = "ASP8347E", .probe = asp834x_probe, .setup_arch = asp834x_setup_arch, - .init_IRQ = mpc83xx_ipic_init_IRQ, + .init_IRQ = asp834x_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, diff --git a/trunk/arch/powerpc/platforms/83xx/km83xx.c b/trunk/arch/powerpc/platforms/83xx/km83xx.c index 65eb792a0d00..c55129f5760a 100644 --- a/trunk/arch/powerpc/platforms/83xx/km83xx.c +++ b/trunk/arch/powerpc/platforms/83xx/km83xx.c @@ -51,14 +51,15 @@ */ static void __init mpc83xx_km_setup_arch(void) { -#ifdef CONFIG_QUICC_ENGINE struct device_node *np; -#endif if (ppc_md.progress) ppc_md.progress("kmpbec83xx_setup_arch()", 0); - mpc83xx_setup_pci(); +#ifdef CONFIG_PCI + for_each_compatible_node(np, "pci", "fsl,mpc8349-pci") + mpc83xx_add_bridge(np); +#endif #ifdef CONFIG_QUICC_ENGINE qe_reset(); @@ -121,7 +122,54 @@ static void __init mpc83xx_km_setup_arch(void) #endif /* CONFIG_QUICC_ENGINE */ } -machine_device_initcall(mpc83xx_km, mpc83xx_declare_of_platform_devices); +static struct of_device_id kmpbec83xx_ids[] = { + { .type = "soc", }, + { .compatible = "soc", }, + { .compatible = "simple-bus", }, + { .type = "qe", }, + { .compatible = "fsl,qe", }, + {}, +}; + +static int __init kmeter_declare_of_platform_devices(void) +{ + /* Publish the QE devices */ + of_platform_bus_probe(NULL, kmpbec83xx_ids, NULL); + + return 0; +} +machine_device_initcall(mpc83xx_km, kmeter_declare_of_platform_devices); + +static void __init mpc83xx_km_init_IRQ(void) +{ + struct device_node *np; + + np = of_find_compatible_node(NULL, NULL, "fsl,pq2pro-pic"); + if (!np) { + np = of_find_node_by_type(NULL, "ipic"); + if (!np) + return; + } + + ipic_init(np, 0); + + /* Initialize the default interrupt mapping priorities, + * in case the boot rom changed something on us. + */ + ipic_set_default_priority(); + of_node_put(np); + +#ifdef CONFIG_QUICC_ENGINE + np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic"); + if (!np) { + np = of_find_node_by_type(NULL, "qeic"); + if (!np) + return; + } + qe_ic_init(np, 0, qe_ic_cascade_low_ipic, qe_ic_cascade_high_ipic); + of_node_put(np); +#endif /* CONFIG_QUICC_ENGINE */ +} /* list of the supported boards */ static char *board[] __initdata = { @@ -150,7 +198,7 @@ define_machine(mpc83xx_km) { .name = "mpc83xx-km-platform", .probe = mpc83xx_km_probe, .setup_arch = mpc83xx_km_setup_arch, - .init_IRQ = mpc83xx_ipic_and_qe_init_IRQ, + .init_IRQ = mpc83xx_km_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, diff --git a/trunk/arch/powerpc/platforms/83xx/misc.c b/trunk/arch/powerpc/platforms/83xx/misc.c index 125336f750c6..f01806c940e1 100644 --- a/trunk/arch/powerpc/platforms/83xx/misc.c +++ b/trunk/arch/powerpc/platforms/83xx/misc.c @@ -11,15 +11,10 @@ #include #include -#include -#include #include #include -#include -#include #include -#include #include "mpc83xx.h" @@ -70,75 +65,3 @@ long __init mpc83xx_time_init(void) return 0; } - -void __init mpc83xx_ipic_init_IRQ(void) -{ - struct device_node *np; - - /* looking for fsl,pq2pro-pic which is asl compatible with fsl,ipic */ - np = of_find_compatible_node(NULL, NULL, "fsl,ipic"); - if (!np) - np = of_find_node_by_type(NULL, "ipic"); - if (!np) - return; - - ipic_init(np, 0); - - of_node_put(np); - - /* Initialize the default interrupt mapping priorities, - * in case the boot rom changed something on us. - */ - ipic_set_default_priority(); -} - -#ifdef CONFIG_QUICC_ENGINE -void __init mpc83xx_qe_init_IRQ(void) -{ - struct device_node *np; - - np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic"); - if (!np) { - np = of_find_node_by_type(NULL, "qeic"); - if (!np) - return; - } - qe_ic_init(np, 0, qe_ic_cascade_low_ipic, qe_ic_cascade_high_ipic); - of_node_put(np); -} - -void __init mpc83xx_ipic_and_qe_init_IRQ(void) -{ - mpc83xx_ipic_init_IRQ(); - mpc83xx_qe_init_IRQ(); -} -#endif /* CONFIG_QUICC_ENGINE */ - -static struct of_device_id __initdata of_bus_ids[] = { - { .type = "soc", }, - { .compatible = "soc", }, - { .compatible = "simple-bus" }, - { .compatible = "gianfar" }, - { .compatible = "gpio-leds", }, - { .type = "qe", }, - { .compatible = "fsl,qe", }, - {}, -}; - -int __init mpc83xx_declare_of_platform_devices(void) -{ - of_platform_bus_probe(NULL, of_bus_ids, NULL); - return 0; -} - -#ifdef CONFIG_PCI -void __init mpc83xx_setup_pci(void) -{ - struct device_node *np; - - for_each_compatible_node(np, "pci", "fsl,mpc8349-pci") - mpc83xx_add_bridge(np); - for_each_compatible_node(np, "pci", "fsl,mpc8314-pcie") - mpc83xx_add_bridge(np); -} -#endif diff --git a/trunk/arch/powerpc/platforms/83xx/mpc830x_rdb.c b/trunk/arch/powerpc/platforms/83xx/mpc830x_rdb.c index 4f2d9fea77b7..d0c4e15b7794 100644 --- a/trunk/arch/powerpc/platforms/83xx/mpc830x_rdb.c +++ b/trunk/arch/powerpc/platforms/83xx/mpc830x_rdb.c @@ -27,13 +27,36 @@ */ static void __init mpc830x_rdb_setup_arch(void) { +#ifdef CONFIG_PCI + struct device_node *np; +#endif + if (ppc_md.progress) ppc_md.progress("mpc830x_rdb_setup_arch()", 0); - mpc83xx_setup_pci(); +#ifdef CONFIG_PCI + for_each_compatible_node(np, "pci", "fsl,mpc8308-pcie") + mpc83xx_add_bridge(np); +#endif mpc831x_usb_cfg(); } +static void __init mpc830x_rdb_init_IRQ(void) +{ + struct device_node *np; + + np = of_find_node_by_type(NULL, "ipic"); + if (!np) + return; + + ipic_init(np, 0); + + /* Initialize the default interrupt mapping priorities, + * in case the boot rom changed something on us. + */ + ipic_set_default_priority(); +} + static const char *board[] __initdata = { "MPC8308RDB", "fsl,mpc8308rdb", @@ -49,13 +72,24 @@ static int __init mpc830x_rdb_probe(void) return of_flat_dt_match(of_get_flat_dt_root(), board); } -machine_device_initcall(mpc830x_rdb, mpc83xx_declare_of_platform_devices); +static struct of_device_id __initdata of_bus_ids[] = { + { .compatible = "simple-bus" }, + { .compatible = "gianfar" }, + {}, +}; + +static int __init declare_of_platform_devices(void) +{ + of_platform_bus_probe(NULL, of_bus_ids, NULL); + return 0; +} +machine_device_initcall(mpc830x_rdb, declare_of_platform_devices); define_machine(mpc830x_rdb) { .name = "MPC830x RDB", .probe = mpc830x_rdb_probe, .setup_arch = mpc830x_rdb_setup_arch, - .init_IRQ = mpc83xx_ipic_init_IRQ, + .init_IRQ = mpc830x_rdb_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, diff --git a/trunk/arch/powerpc/platforms/83xx/mpc831x_rdb.c b/trunk/arch/powerpc/platforms/83xx/mpc831x_rdb.c index fa25977c52de..f859ead49a8d 100644 --- a/trunk/arch/powerpc/platforms/83xx/mpc831x_rdb.c +++ b/trunk/arch/powerpc/platforms/83xx/mpc831x_rdb.c @@ -28,13 +28,38 @@ */ static void __init mpc831x_rdb_setup_arch(void) { +#ifdef CONFIG_PCI + struct device_node *np; +#endif + if (ppc_md.progress) ppc_md.progress("mpc831x_rdb_setup_arch()", 0); - mpc83xx_setup_pci(); +#ifdef CONFIG_PCI + for_each_compatible_node(np, "pci", "fsl,mpc8349-pci") + mpc83xx_add_bridge(np); + for_each_compatible_node(np, "pci", "fsl,mpc8314-pcie") + mpc83xx_add_bridge(np); +#endif mpc831x_usb_cfg(); } +static void __init mpc831x_rdb_init_IRQ(void) +{ + struct device_node *np; + + np = of_find_node_by_type(NULL, "ipic"); + if (!np) + return; + + ipic_init(np, 0); + + /* Initialize the default interrupt mapping priorities, + * in case the boot rom changed something on us. + */ + ipic_set_default_priority(); +} + static const char *board[] __initdata = { "MPC8313ERDB", "fsl,mpc8315erdb", @@ -49,13 +74,25 @@ static int __init mpc831x_rdb_probe(void) return of_flat_dt_match(of_get_flat_dt_root(), board); } -machine_device_initcall(mpc831x_rdb, mpc83xx_declare_of_platform_devices); +static struct of_device_id __initdata of_bus_ids[] = { + { .compatible = "simple-bus" }, + { .compatible = "gianfar" }, + { .compatible = "gpio-leds", }, + {}, +}; + +static int __init declare_of_platform_devices(void) +{ + of_platform_bus_probe(NULL, of_bus_ids, NULL); + return 0; +} +machine_device_initcall(mpc831x_rdb, declare_of_platform_devices); define_machine(mpc831x_rdb) { .name = "MPC831x RDB", .probe = mpc831x_rdb_probe, .setup_arch = mpc831x_rdb_setup_arch, - .init_IRQ = mpc83xx_ipic_init_IRQ, + .init_IRQ = mpc831x_rdb_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, diff --git a/trunk/arch/powerpc/platforms/83xx/mpc832x_mds.c b/trunk/arch/powerpc/platforms/83xx/mpc832x_mds.c index e36bc611dd6e..32a52896822f 100644 --- a/trunk/arch/powerpc/platforms/83xx/mpc832x_mds.c +++ b/trunk/arch/powerpc/platforms/83xx/mpc832x_mds.c @@ -72,7 +72,10 @@ static void __init mpc832x_sys_setup_arch(void) of_node_put(np); } - mpc83xx_setup_pci(); +#ifdef CONFIG_PCI + for_each_compatible_node(np, "pci", "fsl,mpc8349-pci") + mpc83xx_add_bridge(np); +#endif #ifdef CONFIG_QUICC_ENGINE qe_reset(); @@ -98,7 +101,51 @@ static void __init mpc832x_sys_setup_arch(void) #endif /* CONFIG_QUICC_ENGINE */ } -machine_device_initcall(mpc832x_mds, mpc83xx_declare_of_platform_devices); +static struct of_device_id mpc832x_ids[] = { + { .type = "soc", }, + { .compatible = "soc", }, + { .compatible = "simple-bus", }, + { .type = "qe", }, + { .compatible = "fsl,qe", }, + {}, +}; + +static int __init mpc832x_declare_of_platform_devices(void) +{ + /* Publish the QE devices */ + of_platform_bus_probe(NULL, mpc832x_ids, NULL); + + return 0; +} +machine_device_initcall(mpc832x_mds, mpc832x_declare_of_platform_devices); + +static void __init mpc832x_sys_init_IRQ(void) +{ + struct device_node *np; + + np = of_find_node_by_type(NULL, "ipic"); + if (!np) + return; + + ipic_init(np, 0); + + /* Initialize the default interrupt mapping priorities, + * in case the boot rom changed something on us. + */ + ipic_set_default_priority(); + of_node_put(np); + +#ifdef CONFIG_QUICC_ENGINE + np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic"); + if (!np) { + np = of_find_node_by_type(NULL, "qeic"); + if (!np) + return; + } + qe_ic_init(np, 0, qe_ic_cascade_low_ipic, qe_ic_cascade_high_ipic); + of_node_put(np); +#endif /* CONFIG_QUICC_ENGINE */ +} /* * Called very early, MMU is off, device-tree isn't unflattened @@ -114,7 +161,7 @@ define_machine(mpc832x_mds) { .name = "MPC832x MDS", .probe = mpc832x_sys_probe, .setup_arch = mpc832x_sys_setup_arch, - .init_IRQ = mpc83xx_ipic_and_qe_init_IRQ, + .init_IRQ = mpc832x_sys_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, diff --git a/trunk/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/trunk/arch/powerpc/platforms/83xx/mpc832x_rdb.c index eff5baabc3fb..17f99745f0e4 100644 --- a/trunk/arch/powerpc/platforms/83xx/mpc832x_rdb.c +++ b/trunk/arch/powerpc/platforms/83xx/mpc832x_rdb.c @@ -193,14 +193,17 @@ machine_device_initcall(mpc832x_rdb, mpc832x_spi_init); */ static void __init mpc832x_rdb_setup_arch(void) { -#if defined(CONFIG_QUICC_ENGINE) +#if defined(CONFIG_PCI) || defined(CONFIG_QUICC_ENGINE) struct device_node *np; #endif if (ppc_md.progress) ppc_md.progress("mpc832x_rdb_setup_arch()", 0); - mpc83xx_setup_pci(); +#ifdef CONFIG_PCI + for_each_compatible_node(np, "pci", "fsl,mpc8349-pci") + mpc83xx_add_bridge(np); +#endif #ifdef CONFIG_QUICC_ENGINE qe_reset(); @@ -215,7 +218,52 @@ static void __init mpc832x_rdb_setup_arch(void) #endif /* CONFIG_QUICC_ENGINE */ } -machine_device_initcall(mpc832x_rdb, mpc83xx_declare_of_platform_devices); +static struct of_device_id mpc832x_ids[] = { + { .type = "soc", }, + { .compatible = "soc", }, + { .compatible = "simple-bus", }, + { .type = "qe", }, + { .compatible = "fsl,qe", }, + {}, +}; + +static int __init mpc832x_declare_of_platform_devices(void) +{ + /* Publish the QE devices */ + of_platform_bus_probe(NULL, mpc832x_ids, NULL); + + return 0; +} +machine_device_initcall(mpc832x_rdb, mpc832x_declare_of_platform_devices); + +static void __init mpc832x_rdb_init_IRQ(void) +{ + + struct device_node *np; + + np = of_find_node_by_type(NULL, "ipic"); + if (!np) + return; + + ipic_init(np, 0); + + /* Initialize the default interrupt mapping priorities, + * in case the boot rom changed something on us. + */ + ipic_set_default_priority(); + of_node_put(np); + +#ifdef CONFIG_QUICC_ENGINE + np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic"); + if (!np) { + np = of_find_node_by_type(NULL, "qeic"); + if (!np) + return; + } + qe_ic_init(np, 0, qe_ic_cascade_low_ipic, qe_ic_cascade_high_ipic); + of_node_put(np); +#endif /* CONFIG_QUICC_ENGINE */ +} /* * Called very early, MMU is off, device-tree isn't unflattened @@ -231,7 +279,7 @@ define_machine(mpc832x_rdb) { .name = "MPC832x RDB", .probe = mpc832x_rdb_probe, .setup_arch = mpc832x_rdb_setup_arch, - .init_IRQ = mpc83xx_ipic_and_qe_init_IRQ, + .init_IRQ = mpc832x_rdb_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, diff --git a/trunk/arch/powerpc/platforms/83xx/mpc834x_itx.c b/trunk/arch/powerpc/platforms/83xx/mpc834x_itx.c index 39849dd1b5bb..6b45969567d4 100644 --- a/trunk/arch/powerpc/platforms/83xx/mpc834x_itx.c +++ b/trunk/arch/powerpc/platforms/83xx/mpc834x_itx.c @@ -41,12 +41,13 @@ static struct of_device_id __initdata mpc834x_itx_ids[] = { { .compatible = "fsl,pq2pro-localbus", }, + { .compatible = "simple-bus", }, + { .compatible = "gianfar", }, {}, }; static int __init mpc834x_itx_declare_of_platform_devices(void) { - mpc83xx_declare_of_platform_devices(); return of_platform_bus_probe(NULL, mpc834x_itx_ids, NULL); } machine_device_initcall(mpc834x_itx, mpc834x_itx_declare_of_platform_devices); @@ -58,14 +59,37 @@ machine_device_initcall(mpc834x_itx, mpc834x_itx_declare_of_platform_devices); */ static void __init mpc834x_itx_setup_arch(void) { +#ifdef CONFIG_PCI + struct device_node *np; +#endif + if (ppc_md.progress) ppc_md.progress("mpc834x_itx_setup_arch()", 0); - mpc83xx_setup_pci(); +#ifdef CONFIG_PCI + for_each_compatible_node(np, "pci", "fsl,mpc8349-pci") + mpc83xx_add_bridge(np); +#endif mpc834x_usb_cfg(); } +static void __init mpc834x_itx_init_IRQ(void) +{ + struct device_node *np; + + np = of_find_node_by_type(NULL, "ipic"); + if (!np) + return; + + ipic_init(np, 0); + + /* Initialize the default interrupt mapping priorities, + * in case the boot rom changed something on us. + */ + ipic_set_default_priority(); +} + /* * Called very early, MMU is off, device-tree isn't unflattened */ @@ -80,7 +104,7 @@ define_machine(mpc834x_itx) { .name = "MPC834x ITX", .probe = mpc834x_itx_probe, .setup_arch = mpc834x_itx_setup_arch, - .init_IRQ = mpc83xx_ipic_init_IRQ, + .init_IRQ = mpc834x_itx_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, diff --git a/trunk/arch/powerpc/platforms/83xx/mpc834x_mds.c b/trunk/arch/powerpc/platforms/83xx/mpc834x_mds.c index 5828d8e97c37..041c5177e737 100644 --- a/trunk/arch/powerpc/platforms/83xx/mpc834x_mds.c +++ b/trunk/arch/powerpc/platforms/83xx/mpc834x_mds.c @@ -77,15 +77,51 @@ static int mpc834xemds_usb_cfg(void) */ static void __init mpc834x_mds_setup_arch(void) { +#ifdef CONFIG_PCI + struct device_node *np; +#endif + if (ppc_md.progress) ppc_md.progress("mpc834x_mds_setup_arch()", 0); - mpc83xx_setup_pci(); +#ifdef CONFIG_PCI + for_each_compatible_node(np, "pci", "fsl,mpc8349-pci") + mpc83xx_add_bridge(np); +#endif mpc834xemds_usb_cfg(); } -machine_device_initcall(mpc834x_mds, mpc83xx_declare_of_platform_devices); +static void __init mpc834x_mds_init_IRQ(void) +{ + struct device_node *np; + + np = of_find_node_by_type(NULL, "ipic"); + if (!np) + return; + + ipic_init(np, 0); + + /* Initialize the default interrupt mapping priorities, + * in case the boot rom changed something on us. + */ + ipic_set_default_priority(); +} + +static struct of_device_id mpc834x_ids[] = { + { .type = "soc", }, + { .compatible = "soc", }, + { .compatible = "simple-bus", }, + { .compatible = "gianfar", }, + {}, +}; + +static int __init mpc834x_declare_of_platform_devices(void) +{ + of_platform_bus_probe(NULL, mpc834x_ids, NULL); + return 0; +} +machine_device_initcall(mpc834x_mds, mpc834x_declare_of_platform_devices); /* * Called very early, MMU is off, device-tree isn't unflattened @@ -101,7 +137,7 @@ define_machine(mpc834x_mds) { .name = "MPC834x MDS", .probe = mpc834x_mds_probe, .setup_arch = mpc834x_mds_setup_arch, - .init_IRQ = mpc83xx_ipic_init_IRQ, + .init_IRQ = mpc834x_mds_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, diff --git a/trunk/arch/powerpc/platforms/83xx/mpc836x_mds.c b/trunk/arch/powerpc/platforms/83xx/mpc836x_mds.c index ad8e4bcd7d55..934cc8c46bbc 100644 --- a/trunk/arch/powerpc/platforms/83xx/mpc836x_mds.c +++ b/trunk/arch/powerpc/platforms/83xx/mpc836x_mds.c @@ -80,7 +80,10 @@ static void __init mpc836x_mds_setup_arch(void) of_node_put(np); } - mpc83xx_setup_pci(); +#ifdef CONFIG_PCI + for_each_compatible_node(np, "pci", "fsl,mpc8349-pci") + mpc83xx_add_bridge(np); +#endif #ifdef CONFIG_QUICC_ENGINE qe_reset(); @@ -141,7 +144,23 @@ static void __init mpc836x_mds_setup_arch(void) #endif /* CONFIG_QUICC_ENGINE */ } -machine_device_initcall(mpc836x_mds, mpc83xx_declare_of_platform_devices); +static struct of_device_id mpc836x_ids[] = { + { .type = "soc", }, + { .compatible = "soc", }, + { .compatible = "simple-bus", }, + { .type = "qe", }, + { .compatible = "fsl,qe", }, + {}, +}; + +static int __init mpc836x_declare_of_platform_devices(void) +{ + /* Publish the QE devices */ + of_platform_bus_probe(NULL, mpc836x_ids, NULL); + + return 0; +} +machine_device_initcall(mpc836x_mds, mpc836x_declare_of_platform_devices); #ifdef CONFIG_QE_USB static int __init mpc836x_usb_cfg(void) @@ -207,6 +226,34 @@ static int __init mpc836x_usb_cfg(void) machine_arch_initcall(mpc836x_mds, mpc836x_usb_cfg); #endif /* CONFIG_QE_USB */ +static void __init mpc836x_mds_init_IRQ(void) +{ + struct device_node *np; + + np = of_find_node_by_type(NULL, "ipic"); + if (!np) + return; + + ipic_init(np, 0); + + /* Initialize the default interrupt mapping priorities, + * in case the boot rom changed something on us. + */ + ipic_set_default_priority(); + of_node_put(np); + +#ifdef CONFIG_QUICC_ENGINE + np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic"); + if (!np) { + np = of_find_node_by_type(NULL, "qeic"); + if (!np) + return; + } + qe_ic_init(np, 0, qe_ic_cascade_low_ipic, qe_ic_cascade_high_ipic); + of_node_put(np); +#endif /* CONFIG_QUICC_ENGINE */ +} + /* * Called very early, MMU is off, device-tree isn't unflattened */ @@ -221,7 +268,7 @@ define_machine(mpc836x_mds) { .name = "MPC836x MDS", .probe = mpc836x_mds_probe, .setup_arch = mpc836x_mds_setup_arch, - .init_IRQ = mpc83xx_ipic_and_qe_init_IRQ, + .init_IRQ = mpc836x_mds_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, diff --git a/trunk/arch/powerpc/platforms/83xx/mpc836x_rdk.c b/trunk/arch/powerpc/platforms/83xx/mpc836x_rdk.c index f8769d713d61..b0090aac9642 100644 --- a/trunk/arch/powerpc/platforms/83xx/mpc836x_rdk.c +++ b/trunk/arch/powerpc/platforms/83xx/mpc836x_rdk.c @@ -27,19 +27,61 @@ #include "mpc83xx.h" -machine_device_initcall(mpc836x_rdk, mpc83xx_declare_of_platform_devices); +static struct of_device_id __initdata mpc836x_rdk_ids[] = { + { .compatible = "simple-bus", }, + {}, +}; + +static int __init mpc836x_rdk_declare_of_platform_devices(void) +{ + return of_platform_bus_probe(NULL, mpc836x_rdk_ids, NULL); +} +machine_device_initcall(mpc836x_rdk, mpc836x_rdk_declare_of_platform_devices); static void __init mpc836x_rdk_setup_arch(void) { +#ifdef CONFIG_PCI + struct device_node *np; +#endif + if (ppc_md.progress) ppc_md.progress("mpc836x_rdk_setup_arch()", 0); - mpc83xx_setup_pci(); +#ifdef CONFIG_PCI + for_each_compatible_node(np, "pci", "fsl,mpc8349-pci") + mpc83xx_add_bridge(np); +#endif #ifdef CONFIG_QUICC_ENGINE qe_reset(); #endif } +static void __init mpc836x_rdk_init_IRQ(void) +{ + struct device_node *np; + + np = of_find_compatible_node(NULL, NULL, "fsl,ipic"); + if (!np) + return; + + ipic_init(np, 0); + + /* + * Initialize the default interrupt mapping priorities, + * in case the boot rom changed something on us. + */ + ipic_set_default_priority(); + of_node_put(np); +#ifdef CONFIG_QUICC_ENGINE + np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic"); + if (!np) + return; + + qe_ic_init(np, 0, qe_ic_cascade_low_ipic, qe_ic_cascade_high_ipic); + of_node_put(np); +#endif +} + /* * Called very early, MMU is off, device-tree isn't unflattened. */ @@ -54,7 +96,7 @@ define_machine(mpc836x_rdk) { .name = "MPC836x RDK", .probe = mpc836x_rdk_probe, .setup_arch = mpc836x_rdk_setup_arch, - .init_IRQ = mpc83xx_ipic_and_qe_init_IRQ, + .init_IRQ = mpc836x_rdk_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, diff --git a/trunk/arch/powerpc/platforms/83xx/mpc837x_mds.c b/trunk/arch/powerpc/platforms/83xx/mpc837x_mds.c index e53a60b6c863..83068322abd1 100644 --- a/trunk/arch/powerpc/platforms/83xx/mpc837x_mds.c +++ b/trunk/arch/powerpc/platforms/83xx/mpc837x_mds.c @@ -79,14 +79,54 @@ static int mpc837xmds_usb_cfg(void) */ static void __init mpc837x_mds_setup_arch(void) { +#ifdef CONFIG_PCI + struct device_node *np; +#endif + if (ppc_md.progress) ppc_md.progress("mpc837x_mds_setup_arch()", 0); - mpc83xx_setup_pci(); +#ifdef CONFIG_PCI + for_each_compatible_node(np, "pci", "fsl,mpc8349-pci") + mpc83xx_add_bridge(np); + for_each_compatible_node(np, "pci", "fsl,mpc8314-pcie") + mpc83xx_add_bridge(np); +#endif mpc837xmds_usb_cfg(); } -machine_device_initcall(mpc837x_mds, mpc83xx_declare_of_platform_devices); +static struct of_device_id mpc837x_ids[] = { + { .type = "soc", }, + { .compatible = "soc", }, + { .compatible = "simple-bus", }, + { .compatible = "gianfar", }, + {}, +}; + +static int __init mpc837x_declare_of_platform_devices(void) +{ + /* Publish platform_device */ + of_platform_bus_probe(NULL, mpc837x_ids, NULL); + + return 0; +} +machine_device_initcall(mpc837x_mds, mpc837x_declare_of_platform_devices); + +static void __init mpc837x_mds_init_IRQ(void) +{ + struct device_node *np; + + np = of_find_compatible_node(NULL, NULL, "fsl,ipic"); + if (!np) + return; + + ipic_init(np, 0); + + /* Initialize the default interrupt mapping priorities, + * in case the boot rom changed something on us. + */ + ipic_set_default_priority(); +} /* * Called very early, MMU is off, device-tree isn't unflattened @@ -102,7 +142,7 @@ define_machine(mpc837x_mds) { .name = "MPC837x MDS", .probe = mpc837x_mds_probe, .setup_arch = mpc837x_mds_setup_arch, - .init_IRQ = mpc83xx_ipic_init_IRQ, + .init_IRQ = mpc837x_mds_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, diff --git a/trunk/arch/powerpc/platforms/83xx/mpc837x_rdb.c b/trunk/arch/powerpc/platforms/83xx/mpc837x_rdb.c index 16c9c9cbbb7f..7bafbf2ec0f9 100644 --- a/trunk/arch/powerpc/platforms/83xx/mpc837x_rdb.c +++ b/trunk/arch/powerpc/platforms/83xx/mpc837x_rdb.c @@ -50,15 +50,56 @@ static void mpc837x_rdb_sd_cfg(void) */ static void __init mpc837x_rdb_setup_arch(void) { +#ifdef CONFIG_PCI + struct device_node *np; +#endif + if (ppc_md.progress) ppc_md.progress("mpc837x_rdb_setup_arch()", 0); - mpc83xx_setup_pci(); +#ifdef CONFIG_PCI + for_each_compatible_node(np, "pci", "fsl,mpc8349-pci") + mpc83xx_add_bridge(np); + for_each_compatible_node(np, "pci", "fsl,mpc8314-pcie") + mpc83xx_add_bridge(np); +#endif mpc837x_usb_cfg(); mpc837x_rdb_sd_cfg(); } -machine_device_initcall(mpc837x_rdb, mpc83xx_declare_of_platform_devices); +static struct of_device_id mpc837x_ids[] = { + { .type = "soc", }, + { .compatible = "soc", }, + { .compatible = "simple-bus", }, + { .compatible = "gianfar", }, + { .compatible = "gpio-leds", }, + {}, +}; + +static int __init mpc837x_declare_of_platform_devices(void) +{ + /* Publish platform_device */ + of_platform_bus_probe(NULL, mpc837x_ids, NULL); + + return 0; +} +machine_device_initcall(mpc837x_rdb, mpc837x_declare_of_platform_devices); + +static void __init mpc837x_rdb_init_IRQ(void) +{ + struct device_node *np; + + np = of_find_compatible_node(NULL, NULL, "fsl,ipic"); + if (!np) + return; + + ipic_init(np, 0); + + /* Initialize the default interrupt mapping priorities, + * in case the boot rom changed something on us. + */ + ipic_set_default_priority(); +} static const char *board[] __initdata = { "fsl,mpc8377rdb", @@ -80,7 +121,7 @@ define_machine(mpc837x_rdb) { .name = "MPC837x RDB/WLAN", .probe = mpc837x_rdb_probe, .setup_arch = mpc837x_rdb_setup_arch, - .init_IRQ = mpc83xx_ipic_init_IRQ, + .init_IRQ = mpc837x_rdb_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, diff --git a/trunk/arch/powerpc/platforms/83xx/mpc83xx.h b/trunk/arch/powerpc/platforms/83xx/mpc83xx.h index 0cf74d7ea1c5..82a434510d83 100644 --- a/trunk/arch/powerpc/platforms/83xx/mpc83xx.h +++ b/trunk/arch/powerpc/platforms/83xx/mpc83xx.h @@ -70,21 +70,5 @@ extern long mpc83xx_time_init(void); extern int mpc837x_usb_cfg(void); extern int mpc834x_usb_cfg(void); extern int mpc831x_usb_cfg(void); -extern void mpc83xx_ipic_init_IRQ(void); -#ifdef CONFIG_QUICC_ENGINE -extern void mpc83xx_qe_init_IRQ(void); -extern void mpc83xx_ipic_and_qe_init_IRQ(void); -#else -static inline void __init mpc83xx_qe_init_IRQ(void) {} -#define mpc83xx_ipic_and_qe_init_IRQ mpc83xx_ipic_init_IRQ -#endif /* CONFIG_QUICC_ENGINE */ - -#ifdef CONFIG_PCI -extern void mpc83xx_setup_pci(void); -#else -#define mpc83xx_setup_pci() do {} while (0) -#endif - -extern int mpc83xx_declare_of_platform_devices(void); #endif /* __MPC83XX_H__ */ diff --git a/trunk/arch/powerpc/platforms/83xx/sbc834x.c b/trunk/arch/powerpc/platforms/83xx/sbc834x.c index 8a81d7640b1f..af41d8c810a8 100644 --- a/trunk/arch/powerpc/platforms/83xx/sbc834x.c +++ b/trunk/arch/powerpc/platforms/83xx/sbc834x.c @@ -48,13 +48,52 @@ */ static void __init sbc834x_setup_arch(void) { +#ifdef CONFIG_PCI + struct device_node *np; +#endif + if (ppc_md.progress) ppc_md.progress("sbc834x_setup_arch()", 0); - mpc83xx_setup_pci(); +#ifdef CONFIG_PCI + for_each_compatible_node(np, "pci", "fsl,mpc8349-pci") + mpc83xx_add_bridge(np); +#endif + } -machine_device_initcall(sbc834x, mpc83xx_declare_of_platform_devices); +static void __init sbc834x_init_IRQ(void) +{ + struct device_node *np; + + np = of_find_node_by_type(NULL, "ipic"); + if (!np) + return; + + ipic_init(np, 0); + + /* Initialize the default interrupt mapping priorities, + * in case the boot rom changed something on us. + */ + ipic_set_default_priority(); + + of_node_put(np); +} + +static struct __initdata of_device_id sbc834x_ids[] = { + { .type = "soc", }, + { .compatible = "soc", }, + { .compatible = "simple-bus", }, + { .compatible = "gianfar", }, + {}, +}; + +static int __init sbc834x_declare_of_platform_devices(void) +{ + of_platform_bus_probe(NULL, sbc834x_ids, NULL); + return 0; +} +machine_device_initcall(sbc834x, sbc834x_declare_of_platform_devices); /* * Called very early, MMU is off, device-tree isn't unflattened @@ -63,14 +102,14 @@ static int __init sbc834x_probe(void) { unsigned long root = of_get_flat_dt_root(); - return of_flat_dt_is_compatible(root, "SBC834xE"); + return of_flat_dt_is_compatible(root, "SBC834x"); } define_machine(sbc834x) { - .name = "SBC834xE", + .name = "SBC834x", .probe = sbc834x_probe, .setup_arch = sbc834x_setup_arch, - .init_IRQ = mpc83xx_ipic_init_IRQ, + .init_IRQ = sbc834x_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, diff --git a/trunk/arch/powerpc/platforms/85xx/Makefile b/trunk/arch/powerpc/platforms/85xx/Makefile index 9cb2d4320dcc..bc5acb95917a 100644 --- a/trunk/arch/powerpc/platforms/85xx/Makefile +++ b/trunk/arch/powerpc/platforms/85xx/Makefile @@ -3,8 +3,6 @@ # obj-$(CONFIG_SMP) += smp.o -obj-y += common.o - obj-$(CONFIG_MPC8540_ADS) += mpc85xx_ads.o obj-$(CONFIG_MPC8560_ADS) += mpc85xx_ads.o obj-$(CONFIG_MPC85xx_CDS) += mpc85xx_cds.o diff --git a/trunk/arch/powerpc/platforms/85xx/common.c b/trunk/arch/powerpc/platforms/85xx/common.c deleted file mode 100644 index 9fef5302adc1..000000000000 --- a/trunk/arch/powerpc/platforms/85xx/common.c +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Routines common to most mpc85xx-based boards. - * - * This is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include - -#include - -#include "mpc85xx.h" - -static struct of_device_id __initdata mpc85xx_common_ids[] = { - { .type = "soc", }, - { .compatible = "soc", }, - { .compatible = "simple-bus", }, - { .name = "cpm", }, - { .name = "localbus", }, - { .compatible = "gianfar", }, - { .compatible = "fsl,qe", }, - { .compatible = "fsl,cpm2", }, - { .compatible = "fsl,srio", }, - {}, -}; - -int __init mpc85xx_common_publish_devices(void) -{ - return of_platform_bus_probe(NULL, mpc85xx_common_ids, NULL); -} -#ifdef CONFIG_CPM2 -static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) -{ - struct irq_chip *chip = irq_desc_get_chip(desc); - int cascade_irq; - - while ((cascade_irq = cpm2_get_irq()) >= 0) - generic_handle_irq(cascade_irq); - - chip->irq_eoi(&desc->irq_data); -} - - -void __init mpc85xx_cpm2_pic_init(void) -{ - struct device_node *np; - int irq; - - /* Setup CPM2 PIC */ - np = of_find_compatible_node(NULL, NULL, "fsl,cpm2-pic"); - if (np == NULL) { - printk(KERN_ERR "PIC init: can not find fsl,cpm2-pic node\n"); - return; - } - irq = irq_of_parse_and_map(np, 0); - if (irq == NO_IRQ) { - of_node_put(np); - printk(KERN_ERR "PIC init: got no IRQ for cpm cascade\n"); - return; - } - - cpm2_pic_init(np); - of_node_put(np); - irq_set_chained_handler(irq, cpm2_cascade); -} -#endif diff --git a/trunk/arch/powerpc/platforms/85xx/corenet_ds.c b/trunk/arch/powerpc/platforms/85xx/corenet_ds.c index 07e3e6c47371..802ad110b757 100644 --- a/trunk/arch/powerpc/platforms/85xx/corenet_ds.c +++ b/trunk/arch/powerpc/platforms/85xx/corenet_ds.c @@ -31,18 +31,32 @@ #include #include #include -#include "smp.h" void __init corenet_ds_pic_init(void) { struct mpic *mpic; - unsigned int flags = MPIC_BIG_ENDIAN | + struct resource r; + struct device_node *np = NULL; + unsigned int flags = MPIC_PRIMARY | MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU; + np = of_find_node_by_type(np, "open-pic"); + + if (np == NULL) { + printk(KERN_ERR "Could not find open-pic node\n"); + return; + } + + if (of_address_to_resource(np, 0, &r)) { + printk(KERN_ERR "Failed to map mpic register space\n"); + of_node_put(np); + return; + } + if (ppc_md.get_irq == mpic_get_coreint_irq) flags |= MPIC_ENABLE_COREINT; - mpic = mpic_alloc(NULL, 0, flags, 0, 256, " OpenPIC "); + mpic = mpic_alloc(np, r.start, flags, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); mpic_init(mpic); @@ -51,6 +65,10 @@ void __init corenet_ds_pic_init(void) /* * Setup the architecture */ +#ifdef CONFIG_SMP +void __init mpc85xx_smp_init(void); +#endif + void __init corenet_ds_setup_arch(void) { #ifdef CONFIG_PCI @@ -59,7 +77,9 @@ void __init corenet_ds_setup_arch(void) #endif dma_addr_t max = 0xffffffff; +#ifdef CONFIG_SMP mpc85xx_smp_init(); +#endif #ifdef CONFIG_PCI for_each_node_by_type(np, "pci") { @@ -92,7 +112,7 @@ static const struct of_device_id of_device_ids[] __devinitconst = { .compatible = "simple-bus" }, { - .compatible = "fsl,srio", + .compatible = "fsl,rapidio-delta", }, { .compatible = "fsl,p4080-pcie", diff --git a/trunk/arch/powerpc/platforms/85xx/ksi8560.c b/trunk/arch/powerpc/platforms/85xx/ksi8560.c index 20f75d7819c6..c46f9359be15 100644 --- a/trunk/arch/powerpc/platforms/85xx/ksi8560.c +++ b/trunk/arch/powerpc/platforms/85xx/ksi8560.c @@ -35,7 +35,6 @@ #include #include -#include "mpc85xx.h" #define KSI8560_CPLD_HVR 0x04 /* Hardware Version Register */ #define KSI8560_CPLD_PVR 0x08 /* PLD Version Register */ @@ -55,15 +54,60 @@ static void machine_restart(char *cmd) for (;;); } +static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + int cascade_irq; + + while ((cascade_irq = cpm2_get_irq()) >= 0) + generic_handle_irq(cascade_irq); + + chip->irq_eoi(&desc->irq_data); +} + static void __init ksi8560_pic_init(void) { - struct mpic *mpic = mpic_alloc(NULL, 0, - MPIC_WANTS_RESET | MPIC_BIG_ENDIAN, + struct mpic *mpic; + struct resource r; + struct device_node *np; +#ifdef CONFIG_CPM2 + int irq; +#endif + + np = of_find_node_by_type(NULL, "open-pic"); + + if (np == NULL) { + printk(KERN_ERR "Could not find open-pic node\n"); + return; + } + + if (of_address_to_resource(np, 0, &r)) { + printk(KERN_ERR "Could not map mpic register space\n"); + of_node_put(np); + return; + } + + mpic = mpic_alloc(np, r.start, + MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); + of_node_put(np); + mpic_init(mpic); - mpc85xx_cpm2_pic_init(); +#ifdef CONFIG_CPM2 + /* Setup CPM2 PIC */ + np = of_find_compatible_node(NULL, NULL, "fsl,cpm2-pic"); + if (np == NULL) { + printk(KERN_ERR "PIC init: can not find fsl,cpm2-pic node\n"); + return; + } + irq = irq_of_parse_and_map(np, 0); + + cpm2_pic_init(np); + of_node_put(np); + irq_set_chained_handler(irq, cpm2_cascade); +#endif } #ifdef CONFIG_CPM2 @@ -171,7 +215,22 @@ static void ksi8560_show_cpuinfo(struct seq_file *m) seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); } -machine_device_initcall(ksi8560, mpc85xx_common_publish_devices); +static struct of_device_id __initdata of_bus_ids[] = { + { .type = "soc", }, + { .type = "simple-bus", }, + { .name = "cpm", }, + { .name = "localbus", }, + { .compatible = "gianfar", }, + {}, +}; + +static int __init declare_of_platform_devices(void) +{ + of_platform_bus_probe(NULL, of_bus_ids, NULL); + + return 0; +} +machine_device_initcall(ksi8560, declare_of_platform_devices); /* * Called very early, device-tree isn't unflattened diff --git a/trunk/arch/powerpc/platforms/85xx/mpc8536_ds.c b/trunk/arch/powerpc/platforms/85xx/mpc8536_ds.c index cf266826682e..f79f2f102141 100644 --- a/trunk/arch/powerpc/platforms/85xx/mpc8536_ds.c +++ b/trunk/arch/powerpc/platforms/85xx/mpc8536_ds.c @@ -32,15 +32,31 @@ #include #include -#include "mpc85xx.h" - void __init mpc8536_ds_pic_init(void) { - struct mpic *mpic = mpic_alloc(NULL, 0, - MPIC_WANTS_RESET | + struct mpic *mpic; + struct resource r; + struct device_node *np; + + np = of_find_node_by_type(NULL, "open-pic"); + if (np == NULL) { + printk(KERN_ERR "Could not find open-pic node\n"); + return; + } + + if (of_address_to_resource(np, 0, &r)) { + printk(KERN_ERR "Failed to map mpic register space\n"); + of_node_put(np); + return; + } + + mpic = mpic_alloc(np, r.start, + MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); + of_node_put(np); + mpic_init(mpic); } @@ -88,7 +104,19 @@ static void __init mpc8536_ds_setup_arch(void) printk("MPC8536 DS board from Freescale Semiconductor\n"); } -machine_device_initcall(mpc8536_ds, mpc85xx_common_publish_devices); +static struct of_device_id __initdata mpc8536_ds_ids[] = { + { .type = "soc", }, + { .compatible = "soc", }, + { .compatible = "simple-bus", }, + { .compatible = "gianfar", }, + {}, +}; + +static int __init mpc8536_ds_publish_devices(void) +{ + return of_platform_bus_probe(NULL, mpc8536_ds_ids, NULL); +} +machine_device_initcall(mpc8536_ds, mpc8536_ds_publish_devices); machine_arch_initcall(mpc8536_ds, swiotlb_setup_bus_notifier); diff --git a/trunk/arch/powerpc/platforms/85xx/mpc85xx.h b/trunk/arch/powerpc/platforms/85xx/mpc85xx.h deleted file mode 100644 index 2aa7c5dc2c7f..000000000000 --- a/trunk/arch/powerpc/platforms/85xx/mpc85xx.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef MPC85xx_H -#define MPC85xx_H -extern int mpc85xx_common_publish_devices(void); - -#ifdef CONFIG_CPM2 -extern void mpc85xx_cpm2_pic_init(void); -#else -static inline void __init mpc85xx_cpm2_pic_init(void) {} -#endif /* CONFIG_CPM2 */ - -#endif diff --git a/trunk/arch/powerpc/platforms/85xx/mpc85xx_ads.c b/trunk/arch/powerpc/platforms/85xx/mpc85xx_ads.c index 3bebb5173bfc..3b2c9bb66199 100644 --- a/trunk/arch/powerpc/platforms/85xx/mpc85xx_ads.c +++ b/trunk/arch/powerpc/platforms/85xx/mpc85xx_ads.c @@ -35,8 +35,6 @@ #include #endif -#include "mpc85xx.h" - #ifdef CONFIG_PCI static int mpc85xx_exclude_device(struct pci_controller *hose, u_char bus, u_char devfn) @@ -48,15 +46,63 @@ static int mpc85xx_exclude_device(struct pci_controller *hose, } #endif /* CONFIG_PCI */ +#ifdef CONFIG_CPM2 + +static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + int cascade_irq; + + while ((cascade_irq = cpm2_get_irq()) >= 0) + generic_handle_irq(cascade_irq); + + chip->irq_eoi(&desc->irq_data); +} + +#endif /* CONFIG_CPM2 */ + static void __init mpc85xx_ads_pic_init(void) { - struct mpic *mpic = mpic_alloc(NULL, 0, - MPIC_WANTS_RESET | MPIC_BIG_ENDIAN, + struct mpic *mpic; + struct resource r; + struct device_node *np = NULL; +#ifdef CONFIG_CPM2 + int irq; +#endif + + np = of_find_node_by_type(np, "open-pic"); + if (!np) { + printk(KERN_ERR "Could not find open-pic node\n"); + return; + } + + if (of_address_to_resource(np, 0, &r)) { + printk(KERN_ERR "Could not map mpic register space\n"); + of_node_put(np); + return; + } + + mpic = mpic_alloc(np, r.start, + MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); + of_node_put(np); + mpic_init(mpic); - mpc85xx_cpm2_pic_init(); +#ifdef CONFIG_CPM2 + /* Setup CPM2 PIC */ + np = of_find_compatible_node(NULL, NULL, "fsl,cpm2-pic"); + if (np == NULL) { + printk(KERN_ERR "PIC init: can not find fsl,cpm2-pic node\n"); + return; + } + irq = irq_of_parse_and_map(np, 0); + + cpm2_pic_init(np); + of_node_put(np); + irq_set_chained_handler(irq, cpm2_cascade); +#endif } /* @@ -175,7 +221,23 @@ static void mpc85xx_ads_show_cpuinfo(struct seq_file *m) seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); } -machine_device_initcall(mpc85xx_ads, mpc85xx_common_publish_devices); +static struct of_device_id __initdata of_bus_ids[] = { + { .name = "soc", }, + { .type = "soc", }, + { .name = "cpm", }, + { .name = "localbus", }, + { .compatible = "simple-bus", }, + { .compatible = "gianfar", }, + {}, +}; + +static int __init declare_of_platform_devices(void) +{ + of_platform_bus_probe(NULL, of_bus_ids, NULL); + + return 0; +} +machine_device_initcall(mpc85xx_ads, declare_of_platform_devices); /* * Called very early, device-tree isn't unflattened diff --git a/trunk/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/trunk/arch/powerpc/platforms/85xx/mpc85xx_cds.c index 40f03da616a9..66cb8d64079f 100644 --- a/trunk/arch/powerpc/platforms/85xx/mpc85xx_cds.c +++ b/trunk/arch/powerpc/platforms/85xx/mpc85xx_cds.c @@ -46,8 +46,6 @@ #include #include -#include "mpc85xx.h" - /* CADMUS info */ /* xxx - galak, move into device tree */ #define CADMUS_BASE (0xf8004000) @@ -179,7 +177,7 @@ static irqreturn_t mpc85xx_8259_cascade_action(int irq, void *dev_id) static struct irqaction mpc85xxcds_8259_irqaction = { .handler = mpc85xx_8259_cascade_action, - .flags = IRQF_SHARED | IRQF_NO_THREAD, + .flags = IRQF_SHARED, .name = "8259 cascade", }; #endif /* PPC_I8259 */ @@ -188,10 +186,30 @@ static struct irqaction mpc85xxcds_8259_irqaction = { static void __init mpc85xx_cds_pic_init(void) { struct mpic *mpic; - mpic = mpic_alloc(NULL, 0, - MPIC_WANTS_RESET | MPIC_BIG_ENDIAN, + struct resource r; + struct device_node *np = NULL; + + np = of_find_node_by_type(np, "open-pic"); + + if (np == NULL) { + printk(KERN_ERR "Could not find open-pic node\n"); + return; + } + + if (of_address_to_resource(np, 0, &r)) { + printk(KERN_ERR "Failed to map mpic register space\n"); + of_node_put(np); + return; + } + + mpic = mpic_alloc(np, r.start, + MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); + + /* Return the mpic node */ + of_node_put(np); + mpic_init(mpic); } @@ -312,7 +330,19 @@ static int __init mpc85xx_cds_probe(void) return of_flat_dt_is_compatible(root, "MPC85xxCDS"); } -machine_device_initcall(mpc85xx_cds, mpc85xx_common_publish_devices); +static struct of_device_id __initdata of_bus_ids[] = { + { .type = "soc", }, + { .compatible = "soc", }, + { .compatible = "simple-bus", }, + { .compatible = "gianfar", }, + {}, +}; + +static int __init declare_of_platform_devices(void) +{ + return of_platform_bus_probe(NULL, of_bus_ids, NULL); +} +machine_device_initcall(mpc85xx_cds, declare_of_platform_devices); define_machine(mpc85xx_cds) { .name = "MPC85xx CDS", diff --git a/trunk/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/trunk/arch/powerpc/platforms/85xx/mpc85xx_ds.c index eefbb91e1d61..1b9a8cf1873a 100644 --- a/trunk/arch/powerpc/platforms/85xx/mpc85xx_ds.c +++ b/trunk/arch/powerpc/platforms/85xx/mpc85xx_ds.c @@ -35,9 +35,6 @@ #include #include -#include "smp.h" - -#include "mpc85xx.h" #undef DEBUG @@ -63,27 +60,43 @@ static void mpc85xx_8259_cascade(unsigned int irq, struct irq_desc *desc) void __init mpc85xx_ds_pic_init(void) { struct mpic *mpic; -#ifdef CONFIG_PPC_I8259 + struct resource r; struct device_node *np; +#ifdef CONFIG_PPC_I8259 struct device_node *cascade_node = NULL; int cascade_irq; #endif unsigned long root = of_get_flat_dt_root(); + np = of_find_node_by_type(NULL, "open-pic"); + if (np == NULL) { + printk(KERN_ERR "Could not find open-pic node\n"); + return; + } + + if (of_address_to_resource(np, 0, &r)) { + printk(KERN_ERR "Failed to map mpic register space\n"); + of_node_put(np); + return; + } + if (of_flat_dt_is_compatible(root, "fsl,MPC8572DS-CAMP")) { - mpic = mpic_alloc(NULL, 0, + mpic = mpic_alloc(np, r.start, + MPIC_PRIMARY | MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU, 0, 256, " OpenPIC "); } else { - mpic = mpic_alloc(NULL, 0, - MPIC_WANTS_RESET | + mpic = mpic_alloc(np, r.start, + MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU, 0, 256, " OpenPIC "); } BUG_ON(mpic == NULL); + of_node_put(np); + mpic_init(mpic); #ifdef CONFIG_PPC_I8259 @@ -139,6 +152,9 @@ static int mpc85xx_exclude_device(struct pci_controller *hose, /* * Setup the architecture */ +#ifdef CONFIG_SMP +extern void __init mpc85xx_smp_init(void); +#endif static void __init mpc85xx_ds_setup_arch(void) { #ifdef CONFIG_PCI @@ -171,7 +187,9 @@ static void __init mpc85xx_ds_setup_arch(void) ppc_md.pci_exclude_device = mpc85xx_exclude_device; #endif +#ifdef CONFIG_SMP mpc85xx_smp_init(); +#endif #ifdef CONFIG_SWIOTLB if (memblock_end_of_DRAM() > max) { @@ -201,9 +219,21 @@ static int __init mpc8544_ds_probe(void) return 0; } -machine_device_initcall(mpc8544_ds, mpc85xx_common_publish_devices); -machine_device_initcall(mpc8572_ds, mpc85xx_common_publish_devices); -machine_device_initcall(p2020_ds, mpc85xx_common_publish_devices); +static struct of_device_id __initdata mpc85xxds_ids[] = { + { .type = "soc", }, + { .compatible = "soc", }, + { .compatible = "simple-bus", }, + { .compatible = "gianfar", }, + {}, +}; + +static int __init mpc85xxds_publish_devices(void) +{ + return of_platform_bus_probe(NULL, mpc85xxds_ids, NULL); +} +machine_device_initcall(mpc8544_ds, mpc85xxds_publish_devices); +machine_device_initcall(mpc8572_ds, mpc85xxds_publish_devices); +machine_device_initcall(p2020_ds, mpc85xxds_publish_devices); machine_arch_initcall(mpc8544_ds, swiotlb_setup_bus_notifier); machine_arch_initcall(mpc8572_ds, swiotlb_setup_bus_notifier); diff --git a/trunk/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/trunk/arch/powerpc/platforms/85xx/mpc85xx_mds.c index 1d15a0cd2c82..a23a3ff634c5 100644 --- a/trunk/arch/powerpc/platforms/85xx/mpc85xx_mds.c +++ b/trunk/arch/powerpc/platforms/85xx/mpc85xx_mds.c @@ -51,9 +51,6 @@ #include #include #include -#include "smp.h" - -#include "mpc85xx.h" #undef DEBUG #ifdef DEBUG @@ -156,7 +153,30 @@ static int mpc8568_mds_phy_fixups(struct phy_device *phydev) * Setup the architecture * */ +#ifdef CONFIG_SMP +extern void __init mpc85xx_smp_init(void); +#endif + #ifdef CONFIG_QUICC_ENGINE +static struct of_device_id mpc85xx_qe_ids[] __initdata = { + { .type = "qe", }, + { .compatible = "fsl,qe", }, + { }, +}; + +static void __init mpc85xx_publish_qe_devices(void) +{ + struct device_node *np; + + np = of_find_compatible_node(NULL, NULL, "fsl,qe"); + if (!of_device_is_available(np)) { + of_node_put(np); + return; + } + + of_platform_bus_probe(NULL, mpc85xx_qe_ids, NULL); +} + static void __init mpc85xx_mds_reset_ucc_phys(void) { struct device_node *np; @@ -327,6 +347,7 @@ static void __init mpc85xx_mds_qeic_init(void) of_node_put(np); } #else +static void __init mpc85xx_publish_qe_devices(void) { } static void __init mpc85xx_mds_qe_init(void) { } static void __init mpc85xx_mds_qeic_init(void) { } #endif /* CONFIG_QUICC_ENGINE */ @@ -360,7 +381,9 @@ static void __init mpc85xx_mds_setup_arch(void) } #endif +#ifdef CONFIG_SMP mpc85xx_smp_init(); +#endif mpc85xx_mds_qe_init(); @@ -406,11 +429,24 @@ machine_arch_initcall(mpc8568_mds, board_fixups); machine_arch_initcall(mpc8569_mds, board_fixups); static struct of_device_id mpc85xx_ids[] = { + { .type = "soc", }, + { .compatible = "soc", }, + { .compatible = "simple-bus", }, + { .compatible = "gianfar", }, + { .compatible = "fsl,rapidio-delta", }, { .compatible = "fsl,mpc8548-guts", }, { .compatible = "gpio-leds", }, {}, }; +static struct of_device_id p1021_ids[] = { + { .type = "soc", }, + { .compatible = "soc", }, + { .compatible = "simple-bus", }, + { .compatible = "gianfar", }, + {}, +}; + static int __init mpc85xx_publish_devices(void) { if (machine_is(mpc8568_mds)) @@ -418,15 +454,23 @@ static int __init mpc85xx_publish_devices(void) if (machine_is(mpc8569_mds)) simple_gpiochip_init("fsl,mpc8569mds-bcsr-gpio"); - mpc85xx_common_publish_devices(); of_platform_bus_probe(NULL, mpc85xx_ids, NULL); + mpc85xx_publish_qe_devices(); + + return 0; +} + +static int __init p1021_publish_devices(void) +{ + of_platform_bus_probe(NULL, p1021_ids, NULL); + mpc85xx_publish_qe_devices(); return 0; } machine_device_initcall(mpc8568_mds, mpc85xx_publish_devices); machine_device_initcall(mpc8569_mds, mpc85xx_publish_devices); -machine_device_initcall(p1021_mds, mpc85xx_common_publish_devices); +machine_device_initcall(p1021_mds, p1021_publish_devices); machine_arch_initcall(mpc8568_mds, swiotlb_setup_bus_notifier); machine_arch_initcall(mpc8569_mds, swiotlb_setup_bus_notifier); @@ -434,11 +478,26 @@ machine_arch_initcall(p1021_mds, swiotlb_setup_bus_notifier); static void __init mpc85xx_mds_pic_init(void) { - struct mpic *mpic = mpic_alloc(NULL, 0, - MPIC_WANTS_RESET | MPIC_BIG_ENDIAN | + struct mpic *mpic; + struct resource r; + struct device_node *np = NULL; + + np = of_find_node_by_type(NULL, "open-pic"); + if (!np) + return; + + if (of_address_to_resource(np, 0, &r)) { + printk(KERN_ERR "Failed to map mpic register space\n"); + of_node_put(np); + return; + } + + mpic = mpic_alloc(np, r.start, + MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); + of_node_put(np); mpic_init(mpic); mpc85xx_mds_qeic_init(); diff --git a/trunk/arch/powerpc/platforms/85xx/mpc85xx_rdb.c b/trunk/arch/powerpc/platforms/85xx/mpc85xx_rdb.c index ccf520e890be..f5ff9110c97e 100644 --- a/trunk/arch/powerpc/platforms/85xx/mpc85xx_rdb.c +++ b/trunk/arch/powerpc/platforms/85xx/mpc85xx_rdb.c @@ -29,9 +29,6 @@ #include #include -#include "smp.h" - -#include "mpc85xx.h" #undef DEBUG @@ -45,28 +42,49 @@ void __init mpc85xx_rdb_pic_init(void) { struct mpic *mpic; + struct resource r; + struct device_node *np; unsigned long root = of_get_flat_dt_root(); + np = of_find_node_by_type(NULL, "open-pic"); + if (np == NULL) { + printk(KERN_ERR "Could not find open-pic node\n"); + return; + } + + if (of_address_to_resource(np, 0, &r)) { + printk(KERN_ERR "Failed to map mpic register space\n"); + of_node_put(np); + return; + } + if (of_flat_dt_is_compatible(root, "fsl,MPC85XXRDB-CAMP")) { - mpic = mpic_alloc(NULL, 0, + mpic = mpic_alloc(np, r.start, + MPIC_PRIMARY | MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU, 0, 256, " OpenPIC "); } else { - mpic = mpic_alloc(NULL, 0, - MPIC_WANTS_RESET | + mpic = mpic_alloc(np, r.start, + MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU, 0, 256, " OpenPIC "); } BUG_ON(mpic == NULL); + of_node_put(np); + mpic_init(mpic); + } /* * Setup the architecture */ +#ifdef CONFIG_SMP +extern void __init mpc85xx_smp_init(void); +#endif static void __init mpc85xx_rdb_setup_arch(void) { #ifdef CONFIG_PCI @@ -84,12 +102,27 @@ static void __init mpc85xx_rdb_setup_arch(void) #endif +#ifdef CONFIG_SMP mpc85xx_smp_init(); +#endif + printk(KERN_INFO "MPC85xx RDB board from Freescale Semiconductor\n"); } -machine_device_initcall(p2020_rdb, mpc85xx_common_publish_devices); -machine_device_initcall(p1020_rdb, mpc85xx_common_publish_devices); +static struct of_device_id __initdata mpc85xxrdb_ids[] = { + { .type = "soc", }, + { .compatible = "soc", }, + { .compatible = "simple-bus", }, + { .compatible = "gianfar", }, + {}, +}; + +static int __init mpc85xxrdb_publish_devices(void) +{ + return of_platform_bus_probe(NULL, mpc85xxrdb_ids, NULL); +} +machine_device_initcall(p2020_rdb, mpc85xxrdb_publish_devices); +machine_device_initcall(p1020_rdb, mpc85xxrdb_publish_devices); /* * Called very early, device-tree isn't unflattened diff --git a/trunk/arch/powerpc/platforms/85xx/p1010rdb.c b/trunk/arch/powerpc/platforms/85xx/p1010rdb.c index 538bc3f57e9d..d7387fa7f534 100644 --- a/trunk/arch/powerpc/platforms/85xx/p1010rdb.c +++ b/trunk/arch/powerpc/platforms/85xx/p1010rdb.c @@ -28,18 +28,33 @@ #include #include -#include "mpc85xx.h" - void __init p1010_rdb_pic_init(void) { - struct mpic *mpic = mpic_alloc(NULL, 0, - MPIC_WANTS_RESET | MPIC_BIG_ENDIAN | - MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU, + struct mpic *mpic; + struct resource r; + struct device_node *np; + + np = of_find_node_by_type(NULL, "open-pic"); + if (np == NULL) { + printk(KERN_ERR "Could not find open-pic node\n"); + return; + } + + if (of_address_to_resource(np, 0, &r)) { + printk(KERN_ERR "Failed to map mpic register space\n"); + of_node_put(np); + return; + } + + mpic = mpic_alloc(np, r.start, MPIC_PRIMARY | MPIC_WANTS_RESET | + MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); + of_node_put(np); mpic_init(mpic); + } @@ -66,7 +81,18 @@ static void __init p1010_rdb_setup_arch(void) printk(KERN_INFO "P1010 RDB board from Freescale Semiconductor\n"); } -machine_device_initcall(p1010_rdb, mpc85xx_common_publish_devices); +static struct of_device_id __initdata p1010rdb_ids[] = { + { .type = "soc", }, + { .compatible = "soc", }, + { .compatible = "simple-bus", }, + {}, +}; + +static int __init p1010rdb_publish_devices(void) +{ + return of_platform_bus_probe(NULL, p1010rdb_ids, NULL); +} +machine_device_initcall(p1010_rdb, p1010rdb_publish_devices); machine_arch_initcall(p1010_rdb, swiotlb_setup_bus_notifier); /* diff --git a/trunk/arch/powerpc/platforms/85xx/p1022_ds.c b/trunk/arch/powerpc/platforms/85xx/p1022_ds.c index bb3d84f4046f..fda15716fada 100644 --- a/trunk/arch/powerpc/platforms/85xx/p1022_ds.c +++ b/trunk/arch/powerpc/platforms/85xx/p1022_ds.c @@ -26,9 +26,6 @@ #include #include #include -#include "smp.h" - -#include "mpc85xx.h" #if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) @@ -241,15 +238,38 @@ p1022ds_valid_monitor_port(enum fsl_diu_monitor_port port) void __init p1022_ds_pic_init(void) { - struct mpic *mpic = mpic_alloc(NULL, 0, - MPIC_WANTS_RESET | + struct mpic *mpic; + struct resource r; + struct device_node *np; + + np = of_find_node_by_type(NULL, "open-pic"); + if (!np) { + pr_err("Could not find open-pic node\n"); + return; + } + + if (of_address_to_resource(np, 0, &r)) { + pr_err("Failed to map mpic register space\n"); + of_node_put(np); + return; + } + + mpic = mpic_alloc(np, r.start, + MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU, 0, 256, " OpenPIC "); + BUG_ON(mpic == NULL); + of_node_put(np); + mpic_init(mpic); } +#ifdef CONFIG_SMP +void __init mpc85xx_smp_init(void); +#endif + /* * Setup the architecture */ @@ -289,7 +309,9 @@ static void __init p1022_ds_setup_arch(void) diu_ops.valid_monitor_port = p1022ds_valid_monitor_port; #endif +#ifdef CONFIG_SMP mpc85xx_smp_init(); +#endif #ifdef CONFIG_SWIOTLB if (memblock_end_of_DRAM() > max) { @@ -303,6 +325,10 @@ static void __init p1022_ds_setup_arch(void) } static struct of_device_id __initdata p1022_ds_ids[] = { + { .type = "soc", }, + { .compatible = "soc", }, + { .compatible = "simple-bus", }, + { .compatible = "gianfar", }, /* So that the DMA channel nodes can be probed individually: */ { .compatible = "fsl,eloplus-dma", }, {}, @@ -310,7 +336,6 @@ static struct of_device_id __initdata p1022_ds_ids[] = { static int __init p1022_ds_publish_devices(void) { - mpc85xx_common_publish_devices(); return of_platform_bus_probe(NULL, p1022_ds_ids, NULL); } machine_device_initcall(p1022_ds, p1022_ds_publish_devices); diff --git a/trunk/arch/powerpc/platforms/85xx/p1023_rds.c b/trunk/arch/powerpc/platforms/85xx/p1023_rds.c index d951e7027bb6..835e0b335bfa 100644 --- a/trunk/arch/powerpc/platforms/85xx/p1023_rds.c +++ b/trunk/arch/powerpc/platforms/85xx/p1023_rds.c @@ -30,18 +30,19 @@ #include #include #include -#include "smp.h" #include #include -#include "mpc85xx.h" - /* ************************************************************************ * * Setup the architecture * */ +#ifdef CONFIG_SMP +void __init mpc85xx_smp_init(void); +#endif + static void __init mpc85xx_rds_setup_arch(void) { struct device_node *np; @@ -86,19 +87,53 @@ static void __init mpc85xx_rds_setup_arch(void) fsl_add_bridge(np, 0); #endif +#ifdef CONFIG_SMP mpc85xx_smp_init(); +#endif +} + +static struct of_device_id p1023_ids[] = { + { .type = "soc", }, + { .compatible = "soc", }, + { .compatible = "simple-bus", }, + {}, +}; + + +static int __init p1023_publish_devices(void) +{ + of_platform_bus_probe(NULL, p1023_ids, NULL); + + return 0; } -machine_device_initcall(p1023_rds, mpc85xx_common_publish_devices); +machine_device_initcall(p1023_rds, p1023_publish_devices); static void __init mpc85xx_rds_pic_init(void) { - struct mpic *mpic = mpic_alloc(NULL, 0, - MPIC_WANTS_RESET | MPIC_BIG_ENDIAN | + struct mpic *mpic; + struct resource r; + struct device_node *np = NULL; + + np = of_find_node_by_type(NULL, "open-pic"); + if (!np) { + printk(KERN_ERR "Could not find open-pic node\n"); + return; + } + + if (of_address_to_resource(np, 0, &r)) { + printk(KERN_ERR "Failed to map mpic register space\n"); + of_node_put(np); + return; + } + + mpic = mpic_alloc(np, r.start, + MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); + of_node_put(np); mpic_init(mpic); } diff --git a/trunk/arch/powerpc/platforms/85xx/sbc8548.c b/trunk/arch/powerpc/platforms/85xx/sbc8548.c index 184a50784617..14632a971225 100644 --- a/trunk/arch/powerpc/platforms/85xx/sbc8548.c +++ b/trunk/arch/powerpc/platforms/85xx/sbc8548.c @@ -48,16 +48,35 @@ #include #include -#include "mpc85xx.h" - static int sbc_rev; static void __init sbc8548_pic_init(void) { - struct mpic *mpic = mpic_alloc(NULL, 0, - MPIC_WANTS_RESET | MPIC_BIG_ENDIAN, + struct mpic *mpic; + struct resource r; + struct device_node *np = NULL; + + np = of_find_node_by_type(np, "open-pic"); + + if (np == NULL) { + printk(KERN_ERR "Could not find open-pic node\n"); + return; + } + + if (of_address_to_resource(np, 0, &r)) { + printk(KERN_ERR "Failed to map mpic register space\n"); + of_node_put(np); + return; + } + + mpic = mpic_alloc(np, r.start, + MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); + + /* Return the mpic node */ + of_node_put(np); + mpic_init(mpic); } @@ -130,7 +149,21 @@ static void sbc8548_show_cpuinfo(struct seq_file *m) seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); } -machine_device_initcall(sbc8548, mpc85xx_common_publish_devices); +static struct of_device_id __initdata of_bus_ids[] = { + { .name = "soc", }, + { .type = "soc", }, + { .compatible = "simple-bus", }, + { .compatible = "gianfar", }, + {}, +}; + +static int __init declare_of_platform_devices(void) +{ + of_platform_bus_probe(NULL, of_bus_ids, NULL); + + return 0; +} +machine_device_initcall(sbc8548, declare_of_platform_devices); /* * Called very early, device-tree isn't unflattened diff --git a/trunk/arch/powerpc/platforms/85xx/sbc8560.c b/trunk/arch/powerpc/platforms/85xx/sbc8560.c index 940752e93051..cebd786dc334 100644 --- a/trunk/arch/powerpc/platforms/85xx/sbc8560.c +++ b/trunk/arch/powerpc/platforms/85xx/sbc8560.c @@ -32,22 +32,68 @@ #include #include -#include "mpc85xx.h" - #ifdef CONFIG_CPM2 #include #include #endif +#ifdef CONFIG_CPM2 + +static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + int cascade_irq; + + while ((cascade_irq = cpm2_get_irq()) >= 0) + generic_handle_irq(cascade_irq); + + chip->irq_eoi(&desc->irq_data); +} + +#endif /* CONFIG_CPM2 */ + static void __init sbc8560_pic_init(void) { - struct mpic *mpic = mpic_alloc(NULL, 0, - MPIC_WANTS_RESET | MPIC_BIG_ENDIAN, + struct mpic *mpic; + struct resource r; + struct device_node *np = NULL; +#ifdef CONFIG_CPM2 + int irq; +#endif + + np = of_find_node_by_type(np, "open-pic"); + if (!np) { + printk(KERN_ERR "Could not find open-pic node\n"); + return; + } + + if (of_address_to_resource(np, 0, &r)) { + printk(KERN_ERR "Could not map mpic register space\n"); + of_node_put(np); + return; + } + + mpic = mpic_alloc(np, r.start, + MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); + of_node_put(np); + mpic_init(mpic); - mpc85xx_cpm2_pic_init(); +#ifdef CONFIG_CPM2 + /* Setup CPM2 PIC */ + np = of_find_compatible_node(NULL, NULL, "fsl,cpm2-pic"); + if (np == NULL) { + printk(KERN_ERR "PIC init: can not find fsl,cpm2-pic node\n"); + return; + } + irq = irq_of_parse_and_map(np, 0); + + cpm2_pic_init(np); + of_node_put(np); + irq_set_chained_handler(irq, cpm2_cascade); +#endif } /* @@ -162,7 +208,23 @@ static void sbc8560_show_cpuinfo(struct seq_file *m) seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); } -machine_device_initcall(sbc8560, mpc85xx_common_publish_devices); +static struct of_device_id __initdata of_bus_ids[] = { + { .name = "soc", }, + { .type = "soc", }, + { .name = "cpm", }, + { .name = "localbus", }, + { .compatible = "simple-bus", }, + { .compatible = "gianfar", }, + {}, +}; + +static int __init declare_of_platform_devices(void) +{ + of_platform_bus_probe(NULL, of_bus_ids, NULL); + + return 0; +} +machine_device_initcall(sbc8560, declare_of_platform_devices); /* * Called very early, device-tree isn't unflattened diff --git a/trunk/arch/powerpc/platforms/85xx/smp.c b/trunk/arch/powerpc/platforms/85xx/smp.c index ff4249044a3c..2df4785ffd4e 100644 --- a/trunk/arch/powerpc/platforms/85xx/smp.c +++ b/trunk/arch/powerpc/platforms/85xx/smp.c @@ -27,7 +27,6 @@ #include #include -#include "smp.h" extern void __early_start(void); diff --git a/trunk/arch/powerpc/platforms/85xx/smp.h b/trunk/arch/powerpc/platforms/85xx/smp.h deleted file mode 100644 index e2b44933ff19..000000000000 --- a/trunk/arch/powerpc/platforms/85xx/smp.h +++ /dev/null @@ -1,15 +0,0 @@ -#ifndef POWERPC_85XX_SMP_H_ -#define POWERPC_85XX_SMP_H_ 1 - -#include - -#ifdef CONFIG_SMP -void __init mpc85xx_smp_init(void); -#else -static inline void mpc85xx_smp_init(void) -{ - /* Nothing to do */ -} -#endif - -#endif /* not POWERPC_85XX_SMP_H_ */ diff --git a/trunk/arch/powerpc/platforms/85xx/socrates.c b/trunk/arch/powerpc/platforms/85xx/socrates.c index 18f635906b27..747d8fb3ab82 100644 --- a/trunk/arch/powerpc/platforms/85xx/socrates.c +++ b/trunk/arch/powerpc/platforms/85xx/socrates.c @@ -41,17 +41,32 @@ #include #include -#include "mpc85xx.h" #include "socrates_fpga_pic.h" static void __init socrates_pic_init(void) { + struct mpic *mpic; + struct resource r; struct device_node *np; - struct mpic *mpic = mpic_alloc(NULL, 0, - MPIC_WANTS_RESET | MPIC_BIG_ENDIAN, + np = of_find_node_by_type(NULL, "open-pic"); + if (!np) { + printk(KERN_ERR "Could not find open-pic node\n"); + return; + } + + if (of_address_to_resource(np, 0, &r)) { + printk(KERN_ERR "Could not map mpic register space\n"); + of_node_put(np); + return; + } + + mpic = mpic_alloc(np, r.start, + MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); + of_node_put(np); + mpic_init(mpic); np = of_find_compatible_node(NULL, NULL, "abb,socrates-fpga-pic"); @@ -81,7 +96,17 @@ static void __init socrates_setup_arch(void) #endif } -machine_device_initcall(socrates, mpc85xx_common_publish_devices); +static struct of_device_id __initdata socrates_of_bus_ids[] = { + { .compatible = "simple-bus", }, + { .compatible = "gianfar", }, + {}, +}; + +static int __init socrates_publish_devices(void) +{ + return of_platform_bus_probe(NULL, socrates_of_bus_ids, NULL); +} +machine_device_initcall(socrates, socrates_publish_devices); /* * Called very early, device-tree isn't unflattened diff --git a/trunk/arch/powerpc/platforms/85xx/stx_gp3.c b/trunk/arch/powerpc/platforms/85xx/stx_gp3.c index e9e5234b4e76..5387e9f06bdb 100644 --- a/trunk/arch/powerpc/platforms/85xx/stx_gp3.c +++ b/trunk/arch/powerpc/platforms/85xx/stx_gp3.c @@ -40,21 +40,70 @@ #include #include -#include "mpc85xx.h" - #ifdef CONFIG_CPM2 #include +#include + +static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + int cascade_irq; + + while ((cascade_irq = cpm2_get_irq()) >= 0) + generic_handle_irq(cascade_irq); + + chip->irq_eoi(&desc->irq_data); +} #endif /* CONFIG_CPM2 */ static void __init stx_gp3_pic_init(void) { - struct mpic *mpic = mpic_alloc(NULL, 0, - MPIC_WANTS_RESET | MPIC_BIG_ENDIAN, + struct mpic *mpic; + struct resource r; + struct device_node *np; +#ifdef CONFIG_CPM2 + int irq; +#endif + + np = of_find_node_by_type(NULL, "open-pic"); + if (!np) { + printk(KERN_ERR "Could not find open-pic node\n"); + return; + } + + if (of_address_to_resource(np, 0, &r)) { + printk(KERN_ERR "Could not map mpic register space\n"); + of_node_put(np); + return; + } + + mpic = mpic_alloc(np, r.start, + MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); + of_node_put(np); + mpic_init(mpic); - mpc85xx_cpm2_pic_init(); +#ifdef CONFIG_CPM2 + /* Setup CPM2 PIC */ + np = of_find_compatible_node(NULL, NULL, "fsl,cpm2-pic"); + if (np == NULL) { + printk(KERN_ERR "PIC init: can not find fsl,cpm2-pic node\n"); + return; + } + irq = irq_of_parse_and_map(np, 0); + + if (irq == NO_IRQ) { + of_node_put(np); + printk(KERN_ERR "PIC init: got no IRQ for cpm cascade\n"); + return; + } + + cpm2_pic_init(np); + of_node_put(np); + irq_set_chained_handler(irq, cpm2_cascade); +#endif } /* @@ -95,7 +144,19 @@ static void stx_gp3_show_cpuinfo(struct seq_file *m) seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); } -machine_device_initcall(stx_gp3, mpc85xx_common_publish_devices); +static struct of_device_id __initdata of_bus_ids[] = { + { .compatible = "simple-bus", }, + { .compatible = "gianfar", }, + {}, +}; + +static int __init declare_of_platform_devices(void) +{ + of_platform_bus_probe(NULL, of_bus_ids, NULL); + + return 0; +} +machine_device_initcall(stx_gp3, declare_of_platform_devices); /* * Called very early, device-tree isn't unflattened diff --git a/trunk/arch/powerpc/platforms/85xx/tqm85xx.c b/trunk/arch/powerpc/platforms/85xx/tqm85xx.c index bf7c89fb75bb..325de772725a 100644 --- a/trunk/arch/powerpc/platforms/85xx/tqm85xx.c +++ b/trunk/arch/powerpc/platforms/85xx/tqm85xx.c @@ -38,21 +38,70 @@ #include #include -#include "mpc85xx.h" - #ifdef CONFIG_CPM2 #include +#include + +static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + int cascade_irq; + + while ((cascade_irq = cpm2_get_irq()) >= 0) + generic_handle_irq(cascade_irq); + + chip->irq_eoi(&desc->irq_data); +} #endif /* CONFIG_CPM2 */ static void __init tqm85xx_pic_init(void) { - struct mpic *mpic = mpic_alloc(NULL, 0, - MPIC_WANTS_RESET | MPIC_BIG_ENDIAN, + struct mpic *mpic; + struct resource r; + struct device_node *np; +#ifdef CONFIG_CPM2 + int irq; +#endif + + np = of_find_node_by_type(NULL, "open-pic"); + if (!np) { + printk(KERN_ERR "Could not find open-pic node\n"); + return; + } + + if (of_address_to_resource(np, 0, &r)) { + printk(KERN_ERR "Could not map mpic register space\n"); + of_node_put(np); + return; + } + + mpic = mpic_alloc(np, r.start, + MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); + of_node_put(np); + mpic_init(mpic); - mpc85xx_cpm2_pic_init(); +#ifdef CONFIG_CPM2 + /* Setup CPM2 PIC */ + np = of_find_compatible_node(NULL, NULL, "fsl,cpm2-pic"); + if (np == NULL) { + printk(KERN_ERR "PIC init: can not find fsl,cpm2-pic node\n"); + return; + } + irq = irq_of_parse_and_map(np, 0); + + if (irq == NO_IRQ) { + of_node_put(np); + printk(KERN_ERR "PIC init: got no IRQ for cpm cascade\n"); + return; + } + + cpm2_pic_init(np); + of_node_put(np); + irq_set_chained_handler(irq, cpm2_cascade); +#endif } /* @@ -124,7 +173,19 @@ static void __init tqm85xx_ti1520_fixup(struct pci_dev *pdev) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1520, tqm85xx_ti1520_fixup); -machine_device_initcall(tqm85xx, mpc85xx_common_publish_devices); +static struct of_device_id __initdata of_bus_ids[] = { + { .compatible = "simple-bus", }, + { .compatible = "gianfar", }, + {}, +}; + +static int __init declare_of_platform_devices(void) +{ + of_platform_bus_probe(NULL, of_bus_ids, NULL); + + return 0; +} +machine_device_initcall(tqm85xx, declare_of_platform_devices); static const char *board[] __initdata = { "tqc,tqm8540", diff --git a/trunk/arch/powerpc/platforms/85xx/xes_mpc85xx.c b/trunk/arch/powerpc/platforms/85xx/xes_mpc85xx.c index 3a69f8b77de6..a9dc5e795123 100644 --- a/trunk/arch/powerpc/platforms/85xx/xes_mpc85xx.c +++ b/trunk/arch/powerpc/platforms/85xx/xes_mpc85xx.c @@ -32,9 +32,6 @@ #include #include -#include "smp.h" - -#include "mpc85xx.h" /* A few bit definitions needed for fixups on some boards */ #define MPC85xx_L2CTL_L2E 0x80000000 /* L2 enable */ @@ -43,11 +40,29 @@ void __init xes_mpc85xx_pic_init(void) { - struct mpic *mpic = mpic_alloc(NULL, 0, - MPIC_WANTS_RESET | + struct mpic *mpic; + struct resource r; + struct device_node *np; + + np = of_find_node_by_type(NULL, "open-pic"); + if (np == NULL) { + printk(KERN_ERR "Could not find open-pic node\n"); + return; + } + + if (of_address_to_resource(np, 0, &r)) { + printk(KERN_ERR "Failed to map mpic register space\n"); + of_node_put(np); + return; + } + + mpic = mpic_alloc(np, r.start, + MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); + of_node_put(np); + mpic_init(mpic); } @@ -121,6 +136,9 @@ static int primary_phb_addr; /* * Setup the architecture */ +#ifdef CONFIG_SMP +extern void __init mpc85xx_smp_init(void); +#endif static void __init xes_mpc85xx_setup_arch(void) { #ifdef CONFIG_PCI @@ -154,12 +172,26 @@ static void __init xes_mpc85xx_setup_arch(void) } #endif +#ifdef CONFIG_SMP mpc85xx_smp_init(); +#endif } -machine_device_initcall(xes_mpc8572, mpc85xx_common_publish_devices); -machine_device_initcall(xes_mpc8548, mpc85xx_common_publish_devices); -machine_device_initcall(xes_mpc8540, mpc85xx_common_publish_devices); +static struct of_device_id __initdata xes_mpc85xx_ids[] = { + { .type = "soc", }, + { .compatible = "soc", }, + { .compatible = "simple-bus", }, + { .compatible = "gianfar", }, + {}, +}; + +static int __init xes_mpc85xx_publish_devices(void) +{ + return of_platform_bus_probe(NULL, xes_mpc85xx_ids, NULL); +} +machine_device_initcall(xes_mpc8572, xes_mpc85xx_publish_devices); +machine_device_initcall(xes_mpc8548, xes_mpc85xx_publish_devices); +machine_device_initcall(xes_mpc8540, xes_mpc85xx_publish_devices); /* * Called very early, device-tree isn't unflattened diff --git a/trunk/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c b/trunk/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c index 569262ca499a..b11c3535f350 100644 --- a/trunk/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c +++ b/trunk/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c @@ -161,7 +161,7 @@ mpc86xx_time_init(void) static __initdata struct of_device_id of_bus_ids[] = { { .compatible = "simple-bus", }, - { .compatible = "fsl,srio", }, + { .compatible = "fsl,rapidio-delta", }, { .compatible = "gianfar", }, {}, }; diff --git a/trunk/arch/powerpc/platforms/86xx/pic.c b/trunk/arch/powerpc/platforms/86xx/pic.c index 52bbfa031531..8ef8960abda6 100644 --- a/trunk/arch/powerpc/platforms/86xx/pic.c +++ b/trunk/arch/powerpc/platforms/86xx/pic.c @@ -31,16 +31,26 @@ static void mpc86xx_8259_cascade(unsigned int irq, struct irq_desc *desc) void __init mpc86xx_init_irq(void) { -#ifdef CONFIG_PPC_I8259 + struct mpic *mpic; struct device_node *np; + struct resource res; +#ifdef CONFIG_PPC_I8259 struct device_node *cascade_node = NULL; int cascade_irq; #endif - struct mpic *mpic = mpic_alloc(NULL, 0, - MPIC_WANTS_RESET | MPIC_BIG_ENDIAN | - MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU, + /* Determine PIC address. */ + np = of_find_node_by_type(NULL, "open-pic"); + if (np == NULL) + return; + of_address_to_resource(np, 0, &res); + + mpic = mpic_alloc(np, res.start, + MPIC_PRIMARY | MPIC_WANTS_RESET | + MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS | + MPIC_SINGLE_DEST_CPU, 0, 256, " MPIC "); + of_node_put(np); BUG_ON(mpic == NULL); mpic_init(mpic); diff --git a/trunk/arch/powerpc/platforms/Kconfig b/trunk/arch/powerpc/platforms/Kconfig index 31e1adeaa92a..3fe6d927ad70 100644 --- a/trunk/arch/powerpc/platforms/Kconfig +++ b/trunk/arch/powerpc/platforms/Kconfig @@ -211,12 +211,6 @@ config PPC_PASEMI_CPUFREQ endmenu -menu "CPUIdle driver" - -source "drivers/cpuidle/Kconfig" - -endmenu - config PPC601_SYNC_FIX bool "Workarounds for PPC601 bugs" depends on 6xx && (PPC_PREP || PPC_PMAC) diff --git a/trunk/arch/powerpc/platforms/Kconfig.cputype b/trunk/arch/powerpc/platforms/Kconfig.cputype index 425db18580a2..fbecae0fbb49 100644 --- a/trunk/arch/powerpc/platforms/Kconfig.cputype +++ b/trunk/arch/powerpc/platforms/Kconfig.cputype @@ -174,6 +174,7 @@ config BOOKE config FSL_BOOKE bool depends on (E200 || E500) && PPC32 + select SYS_SUPPORTS_HUGETLBFS if PHYS_64BIT default y # this is for common code between PPC32 & PPC64 FSL BOOKE @@ -181,7 +182,6 @@ config PPC_FSL_BOOK3E bool select FSL_EMB_PERFMON select PPC_SMP_MUXED_IPI - select SYS_SUPPORTS_HUGETLBFS if PHYS_64BIT || PPC64 default y if FSL_BOOKE config PTE_64BIT @@ -236,7 +236,7 @@ config VSX config PPC_ICSWX bool "Support for PowerPC icswx coprocessor instruction" - depends on POWER4 || PPC_A2 + depends on POWER4 default n ---help--- @@ -252,25 +252,6 @@ config PPC_ICSWX If in doubt, say N here. -config PPC_ICSWX_PID - bool "icswx requires direct PID management" - depends on PPC_ICSWX && POWER4 - default y - ---help--- - The PID register in server is used explicitly for ICSWX. In - embedded systems PID managment is done by the system. - -config PPC_ICSWX_USE_SIGILL - bool "Should a bad CT cause a SIGILL?" - depends on PPC_ICSWX - default n - ---help--- - Should a bad CT used for "non-record form ICSWX" cause an - illegal intruction signal or should it be silent as - architected. - - If in doubt, say N here. - config SPE bool "SPE Support" depends on E200 || (E500 && !PPC_E500MC) @@ -309,7 +290,7 @@ config PPC_BOOK3E_MMU config PPC_MM_SLICES bool - default y if (!PPC_FSL_BOOK3E && PPC64 && HUGETLB_PAGE) || (PPC_STD_MMU_64 && PPC_64K_PAGES) + default y if (PPC64 && HUGETLB_PAGE) || (PPC_STD_MMU_64 && PPC_64K_PAGES) default n config VIRT_CPU_ACCOUNTING diff --git a/trunk/arch/powerpc/platforms/cell/iommu.c b/trunk/arch/powerpc/platforms/cell/iommu.c index ae9fc7bc17d6..592c3d51b817 100644 --- a/trunk/arch/powerpc/platforms/cell/iommu.c +++ b/trunk/arch/powerpc/platforms/cell/iommu.c @@ -1037,8 +1037,6 @@ static int __init cell_iommu_fixed_mapping_init(void) /* The fixed mapping is only supported on axon machines */ np = of_find_node_by_name(NULL, "axon"); - of_node_put(np); - if (!np) { pr_debug("iommu: fixed mapping disabled, no axons found\n"); return -1; diff --git a/trunk/arch/powerpc/platforms/cell/setup.c b/trunk/arch/powerpc/platforms/cell/setup.c index 62002a7edfed..0fc9b7256126 100644 --- a/trunk/arch/powerpc/platforms/cell/setup.c +++ b/trunk/arch/powerpc/platforms/cell/setup.c @@ -184,10 +184,24 @@ static int __init cell_publish_devices(void) } machine_subsys_initcall(cell, cell_publish_devices); +static void cell_mpic_cascade(unsigned int irq, struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct mpic *mpic = irq_desc_get_handler_data(desc); + unsigned int virq; + + virq = mpic_get_one_irq(mpic); + if (virq != NO_IRQ) + generic_handle_irq(virq); + + chip->irq_eoi(&desc->irq_data); +} + static void __init mpic_init_IRQ(void) { struct device_node *dn; struct mpic *mpic; + unsigned int virq; for (dn = NULL; (dn = of_find_node_by_name(dn, "interrupt-controller"));) { @@ -197,10 +211,19 @@ static void __init mpic_init_IRQ(void) /* The MPIC driver will get everything it needs from the * device-tree, just pass 0 to all arguments */ - mpic = mpic_alloc(dn, 0, MPIC_SECONDARY, 0, 0, " MPIC "); + mpic = mpic_alloc(dn, 0, 0, 0, 0, " MPIC "); if (mpic == NULL) continue; mpic_init(mpic); + + virq = irq_of_parse_and_map(dn, 0); + if (virq == NO_IRQ) + continue; + + printk(KERN_INFO "%s : hooking up to IRQ %d\n", + dn->full_name, virq); + irq_set_handler_data(virq, mpic); + irq_set_chained_handler(virq, cell_mpic_cascade); } } diff --git a/trunk/arch/powerpc/platforms/chrp/setup.c b/trunk/arch/powerpc/platforms/chrp/setup.c index f1f17bb2c33c..122786498419 100644 --- a/trunk/arch/powerpc/platforms/chrp/setup.c +++ b/trunk/arch/powerpc/platforms/chrp/setup.c @@ -435,7 +435,8 @@ static void __init chrp_find_openpic(void) if (len > 1) isu_size = iranges[3]; - chrp_mpic = mpic_alloc(np, opaddr, 0, isu_size, 0, " MPIC "); + chrp_mpic = mpic_alloc(np, opaddr, MPIC_PRIMARY, + isu_size, 0, " MPIC "); if (chrp_mpic == NULL) { printk(KERN_ERR "Failed to allocate MPIC structure\n"); goto bail; diff --git a/trunk/arch/powerpc/platforms/embedded6xx/holly.c b/trunk/arch/powerpc/platforms/embedded6xx/holly.c index 9cfcf20c0560..2e9bcf6444c8 100644 --- a/trunk/arch/powerpc/platforms/embedded6xx/holly.c +++ b/trunk/arch/powerpc/platforms/embedded6xx/holly.c @@ -148,14 +148,30 @@ static void __init holly_setup_arch(void) static void __init holly_init_IRQ(void) { struct mpic *mpic; + phys_addr_t mpic_paddr = 0; + struct device_node *tsi_pic; #ifdef CONFIG_PCI unsigned int cascade_pci_irq; struct device_node *tsi_pci; struct device_node *cascade_node = NULL; #endif - mpic = mpic_alloc(NULL, 0, - MPIC_BIG_ENDIAN | MPIC_WANTS_RESET | + tsi_pic = of_find_node_by_type(NULL, "open-pic"); + if (tsi_pic) { + unsigned int size; + const void *prop = of_get_property(tsi_pic, "reg", &size); + mpic_paddr = of_translate_address(tsi_pic, prop); + } + + if (mpic_paddr == 0) { + printk(KERN_ERR "%s: No tsi108 PIC found !\n", __func__); + return; + } + + pr_debug("%s: tsi108 pic phys_addr = 0x%x\n", __func__, (u32) mpic_paddr); + + mpic = mpic_alloc(tsi_pic, mpic_paddr, + MPIC_PRIMARY | MPIC_BIG_ENDIAN | MPIC_WANTS_RESET | MPIC_SPV_EOI | MPIC_NO_PTHROU_DIS | MPIC_REGSET_TSI108, 24, NR_IRQS-4, /* num_sources used */ @@ -163,7 +179,7 @@ static void __init holly_init_IRQ(void) BUG_ON(mpic == NULL); - mpic_assign_isu(mpic, 0, mpic->paddr + 0x100); + mpic_assign_isu(mpic, 0, mpic_paddr + 0x100); mpic_init(mpic); @@ -188,6 +204,7 @@ static void __init holly_init_IRQ(void) #endif /* Configure MPIC outputs to CPU0 */ tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0); + of_node_put(tsi_pic); } void holly_show_cpuinfo(struct seq_file *m) diff --git a/trunk/arch/powerpc/platforms/embedded6xx/linkstation.c b/trunk/arch/powerpc/platforms/embedded6xx/linkstation.c index bcfad92c9cec..244f997de791 100644 --- a/trunk/arch/powerpc/platforms/embedded6xx/linkstation.c +++ b/trunk/arch/powerpc/platforms/embedded6xx/linkstation.c @@ -81,19 +81,29 @@ static void __init linkstation_setup_arch(void) static void __init linkstation_init_IRQ(void) { struct mpic *mpic; + struct device_node *dnp; + const u32 *prop; + int size; + phys_addr_t paddr; - mpic = mpic_alloc(NULL, 0, MPIC_WANTS_RESET, - 4, 32, " EPIC "); + dnp = of_find_node_by_type(NULL, "open-pic"); + if (dnp == NULL) + return; + + prop = of_get_property(dnp, "reg", &size); + paddr = (phys_addr_t)of_translate_address(dnp, prop); + + mpic = mpic_alloc(dnp, paddr, MPIC_PRIMARY | MPIC_WANTS_RESET, 4, 32, " EPIC "); BUG_ON(mpic == NULL); /* PCI IRQs */ - mpic_assign_isu(mpic, 0, mpic->paddr + 0x10200); + mpic_assign_isu(mpic, 0, paddr + 0x10200); /* I2C */ - mpic_assign_isu(mpic, 1, mpic->paddr + 0x11000); + mpic_assign_isu(mpic, 1, paddr + 0x11000); /* ttyS0, ttyS1 */ - mpic_assign_isu(mpic, 2, mpic->paddr + 0x11100); + mpic_assign_isu(mpic, 2, paddr + 0x11100); mpic_init(mpic); } diff --git a/trunk/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/trunk/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c index f3350d786f5b..f8f33e16c6b6 100644 --- a/trunk/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c +++ b/trunk/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c @@ -102,14 +102,31 @@ static void __init mpc7448_hpc2_setup_arch(void) static void __init mpc7448_hpc2_init_IRQ(void) { struct mpic *mpic; + phys_addr_t mpic_paddr = 0; + struct device_node *tsi_pic; #ifdef CONFIG_PCI unsigned int cascade_pci_irq; struct device_node *tsi_pci; struct device_node *cascade_node = NULL; #endif - mpic = mpic_alloc(NULL, 0, - MPIC_BIG_ENDIAN | MPIC_WANTS_RESET | + tsi_pic = of_find_node_by_type(NULL, "open-pic"); + if (tsi_pic) { + unsigned int size; + const void *prop = of_get_property(tsi_pic, "reg", &size); + mpic_paddr = of_translate_address(tsi_pic, prop); + } + + if (mpic_paddr == 0) { + printk("%s: No tsi108 PIC found !\n", __func__); + return; + } + + DBG("%s: tsi108 pic phys_addr = 0x%x\n", __func__, + (u32) mpic_paddr); + + mpic = mpic_alloc(tsi_pic, mpic_paddr, + MPIC_PRIMARY | MPIC_BIG_ENDIAN | MPIC_WANTS_RESET | MPIC_SPV_EOI | MPIC_NO_PTHROU_DIS | MPIC_REGSET_TSI108, 24, NR_IRQS-4, /* num_sources used */ @@ -117,7 +134,7 @@ static void __init mpc7448_hpc2_init_IRQ(void) BUG_ON(mpic == NULL); - mpic_assign_isu(mpic, 0, mpic->paddr + 0x100); + mpic_assign_isu(mpic, 0, mpic_paddr + 0x100); mpic_init(mpic); @@ -142,6 +159,7 @@ static void __init mpc7448_hpc2_init_IRQ(void) #endif /* Configure MPIC outputs to CPU0 */ tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0); + of_node_put(tsi_pic); } void mpc7448_hpc2_show_cpuinfo(struct seq_file *m) diff --git a/trunk/arch/powerpc/platforms/embedded6xx/storcenter.c b/trunk/arch/powerpc/platforms/embedded6xx/storcenter.c index afa638834965..f1eebcae9bf0 100644 --- a/trunk/arch/powerpc/platforms/embedded6xx/storcenter.c +++ b/trunk/arch/powerpc/platforms/embedded6xx/storcenter.c @@ -83,17 +83,35 @@ static void __init storcenter_setup_arch(void) static void __init storcenter_init_IRQ(void) { struct mpic *mpic; - - mpic = mpic_alloc(NULL, 0, MPIC_WANTS_RESET, + struct device_node *dnp; + const void *prop; + int size; + phys_addr_t paddr; + + dnp = of_find_node_by_type(NULL, "open-pic"); + if (dnp == NULL) + return; + + prop = of_get_property(dnp, "reg", &size); + if (prop == NULL) { + of_node_put(dnp); + return; + } + + paddr = (phys_addr_t)of_translate_address(dnp, prop); + mpic = mpic_alloc(dnp, paddr, MPIC_PRIMARY | MPIC_WANTS_RESET, 16, 32, " OpenPIC "); + + of_node_put(dnp); + BUG_ON(mpic == NULL); /* * 16 Serial Interrupts followed by 16 Internal Interrupts. * I2C is the second internal, so it is at 17, 0x11020. */ - mpic_assign_isu(mpic, 0, mpic->paddr + 0x10200); - mpic_assign_isu(mpic, 1, mpic->paddr + 0x11000); + mpic_assign_isu(mpic, 0, paddr + 0x10200); + mpic_assign_isu(mpic, 1, paddr + 0x11000); mpic_init(mpic); } diff --git a/trunk/arch/powerpc/platforms/embedded6xx/wii.c b/trunk/arch/powerpc/platforms/embedded6xx/wii.c index 6d8dadf19f0b..1b5dc1a2e145 100644 --- a/trunk/arch/powerpc/platforms/embedded6xx/wii.c +++ b/trunk/arch/powerpc/platforms/embedded6xx/wii.c @@ -79,18 +79,23 @@ void __init wii_memory_fixups(void) BUG_ON(memblock.memory.cnt != 2); BUG_ON(!page_aligned(p[0].base) || !page_aligned(p[1].base)); - /* trim unaligned tail */ - memblock_remove(ALIGN(p[1].base + p[1].size, PAGE_SIZE), - (phys_addr_t)ULLONG_MAX); + p[0].size = _ALIGN_DOWN(p[0].size, PAGE_SIZE); + p[1].size = _ALIGN_DOWN(p[1].size, PAGE_SIZE); - /* determine hole, add & reserve them */ - wii_hole_start = ALIGN(p[0].base + p[0].size, PAGE_SIZE); + wii_hole_start = p[0].base + p[0].size; wii_hole_size = p[1].base - wii_hole_start; - memblock_add(wii_hole_start, wii_hole_size); - memblock_reserve(wii_hole_start, wii_hole_size); - BUG_ON(memblock.memory.cnt != 1); - __memblock_dump_all(); + pr_info("MEM1: <%08llx %08llx>\n", p[0].base, p[0].size); + pr_info("HOLE: <%08lx %08lx>\n", wii_hole_start, wii_hole_size); + pr_info("MEM2: <%08llx %08llx>\n", p[1].base, p[1].size); + + p[0].size += wii_hole_size + p[1].size; + + memblock.memory.cnt = 1; + memblock_analyze(); + + /* reserve the hole */ + memblock_reserve(wii_hole_start, wii_hole_size); /* allow ioremapping the address space in the hole */ __allow_ioremap_reserved = 1; diff --git a/trunk/arch/powerpc/platforms/iseries/setup.c b/trunk/arch/powerpc/platforms/iseries/setup.c index 8fc62586a973..ea0acbd8966d 100644 --- a/trunk/arch/powerpc/platforms/iseries/setup.c +++ b/trunk/arch/powerpc/platforms/iseries/setup.c @@ -563,8 +563,7 @@ static void yield_shared_processor(void) static void iseries_shared_idle(void) { while (1) { - tick_nohz_idle_enter(); - rcu_idle_enter(); + tick_nohz_stop_sched_tick(1); while (!need_resched() && !hvlpevent_is_pending()) { local_irq_disable(); ppc64_runlatch_off(); @@ -578,8 +577,7 @@ static void iseries_shared_idle(void) } ppc64_runlatch_on(); - rcu_idle_exit(); - tick_nohz_idle_exit(); + tick_nohz_restart_sched_tick(); if (hvlpevent_is_pending()) process_iSeries_events(); @@ -595,8 +593,7 @@ static void iseries_dedicated_idle(void) set_thread_flag(TIF_POLLING_NRFLAG); while (1) { - tick_nohz_idle_enter(); - rcu_idle_enter(); + tick_nohz_stop_sched_tick(1); if (!need_resched()) { while (!need_resched()) { ppc64_runlatch_off(); @@ -613,8 +610,7 @@ static void iseries_dedicated_idle(void) } ppc64_runlatch_on(); - rcu_idle_exit(); - tick_nohz_idle_exit(); + tick_nohz_restart_sched_tick(); preempt_enable_no_resched(); schedule(); preempt_disable(); diff --git a/trunk/arch/powerpc/platforms/maple/pci.c b/trunk/arch/powerpc/platforms/maple/pci.c index 401e3f3f74c8..dd2e48b28508 100644 --- a/trunk/arch/powerpc/platforms/maple/pci.c +++ b/trunk/arch/powerpc/platforms/maple/pci.c @@ -207,54 +207,6 @@ static volatile void __iomem *u3_ht_cfg_access(struct pci_controller* hose, return hose->cfg_data + u3_ht_cfa1(bus, devfn, offset); } -static int u3_ht_root_read_config(struct pci_controller *hose, u8 offset, - int len, u32 *val) -{ - volatile void __iomem *addr; - - addr = hose->cfg_addr; - addr += ((offset & ~3) << 2) + (4 - len - (offset & 3)); - - switch (len) { - case 1: - *val = in_8(addr); - break; - case 2: - *val = in_be16(addr); - break; - default: - *val = in_be32(addr); - break; - } - - return PCIBIOS_SUCCESSFUL; -} - -static int u3_ht_root_write_config(struct pci_controller *hose, u8 offset, - int len, u32 val) -{ - volatile void __iomem *addr; - - addr = hose->cfg_addr + ((offset & ~3) << 2) + (4 - len - (offset & 3)); - - if (offset >= PCI_BASE_ADDRESS_0 && offset < PCI_CAPABILITY_LIST) - return PCIBIOS_SUCCESSFUL; - - switch (len) { - case 1: - out_8(addr, val); - break; - case 2: - out_be16(addr, val); - break; - default: - out_be32(addr, val); - break; - } - - return PCIBIOS_SUCCESSFUL; -} - static int u3_ht_read_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 *val) { @@ -265,9 +217,6 @@ static int u3_ht_read_config(struct pci_bus *bus, unsigned int devfn, if (hose == NULL) return PCIBIOS_DEVICE_NOT_FOUND; - if (bus->number == hose->first_busno && devfn == PCI_DEVFN(0, 0)) - return u3_ht_root_read_config(hose, offset, len, val); - if (offset > 0xff) return PCIBIOS_BAD_REGISTER_NUMBER; @@ -303,9 +252,6 @@ static int u3_ht_write_config(struct pci_bus *bus, unsigned int devfn, if (hose == NULL) return PCIBIOS_DEVICE_NOT_FOUND; - if (bus->number == hose->first_busno && devfn == PCI_DEVFN(0, 0)) - return u3_ht_root_write_config(hose, offset, len, val); - if (offset > 0xff) return PCIBIOS_BAD_REGISTER_NUMBER; @@ -482,7 +428,6 @@ static void __init setup_u3_ht(struct pci_controller* hose) * reg_property and using some accessor functions instead */ hose->cfg_data = ioremap(0xf2000000, 0x02000000); - hose->cfg_addr = ioremap(0xf8070000, 0x1000); hose->first_busno = 0; hose->last_busno = 0xef; diff --git a/trunk/arch/powerpc/platforms/maple/setup.c b/trunk/arch/powerpc/platforms/maple/setup.c index 0bcbfe7b2c55..4c372047c94e 100644 --- a/trunk/arch/powerpc/platforms/maple/setup.c +++ b/trunk/arch/powerpc/platforms/maple/setup.c @@ -221,7 +221,7 @@ static void __init maple_init_IRQ(void) unsigned long openpic_addr = 0; int naddr, n, i, opplen, has_isus = 0; struct mpic *mpic; - unsigned int flags = 0; + unsigned int flags = MPIC_PRIMARY; /* Locate MPIC in the device-tree. Note that there is a bug * in Maple device-tree where the type of the controller is diff --git a/trunk/arch/powerpc/platforms/pasemi/setup.c b/trunk/arch/powerpc/platforms/pasemi/setup.c index 98b7a7c13176..6f3558210554 100644 --- a/trunk/arch/powerpc/platforms/pasemi/setup.c +++ b/trunk/arch/powerpc/platforms/pasemi/setup.c @@ -224,7 +224,7 @@ static __init void pas_init_IRQ(void) openpic_addr = of_read_number(opprop, naddr); printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr); - mpic_flags = MPIC_LARGE_VECTORS | MPIC_NO_BIAS; + mpic_flags = MPIC_PRIMARY | MPIC_LARGE_VECTORS | MPIC_NO_BIAS; nmiprop = of_get_property(mpic_node, "nmi-source", NULL); if (nmiprop) @@ -234,7 +234,7 @@ static __init void pas_init_IRQ(void) mpic_flags, 0, 0, "PASEMI-OPIC"); BUG_ON(!mpic); - mpic_assign_isu(mpic, 0, mpic->paddr + 0x10000); + mpic_assign_isu(mpic, 0, openpic_addr + 0x10000); mpic_init(mpic); /* The NMI/MCK source needs to be prio 15 */ if (nmiprop) { diff --git a/trunk/arch/powerpc/platforms/powermac/pic.c b/trunk/arch/powerpc/platforms/powermac/pic.c index 7761aabfc293..901bfbddc3dd 100644 --- a/trunk/arch/powerpc/platforms/powermac/pic.c +++ b/trunk/arch/powerpc/platforms/powermac/pic.c @@ -52,8 +52,13 @@ struct device_node *of_irq_dflt_pic; /* Default addresses */ static volatile struct pmac_irq_hw __iomem *pmac_irq_hw[4]; +#define GC_LEVEL_MASK 0x3ff00000 +#define OHARE_LEVEL_MASK 0x1ff00000 +#define HEATHROW_LEVEL_MASK 0x1ff00000 + static int max_irqs; static int max_real_irqs; +static u32 level_mask[4]; static DEFINE_RAW_SPINLOCK(pmac_pic_lock); @@ -212,7 +217,8 @@ static irqreturn_t gatwick_action(int cpl, void *dev_id) for (irq = max_irqs; (irq -= 32) >= max_real_irqs; ) { int i = irq >> 5; bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i]; - bits |= in_le32(&pmac_irq_hw[i]->level); + /* We must read level interrupts from the level register */ + bits |= (in_le32(&pmac_irq_hw[i]->level) & level_mask[i]); bits &= ppc_cached_irq_mask[i]; if (bits == 0) continue; @@ -242,7 +248,8 @@ static unsigned int pmac_pic_get_irq(void) for (irq = max_real_irqs; (irq -= 32) >= 0; ) { int i = irq >> 5; bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i]; - bits |= in_le32(&pmac_irq_hw[i]->level); + /* We must read level interrupts from the level register */ + bits |= (in_le32(&pmac_irq_hw[i]->level) & level_mask[i]); bits &= ppc_cached_irq_mask[i]; if (bits == 0) continue; @@ -277,14 +284,19 @@ static int pmac_pic_host_match(struct irq_host *h, struct device_node *node) static int pmac_pic_host_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hw) { + int level; + if (hw >= max_irqs) return -EINVAL; /* Mark level interrupts, set delayed disable for edge ones and set * handlers */ - irq_set_status_flags(virq, IRQ_LEVEL); - irq_set_chip_and_handler(virq, &pmac_pic, handle_level_irq); + level = !!(level_mask[hw >> 5] & (1UL << (hw & 0x1f))); + if (level) + irq_set_status_flags(virq, IRQ_LEVEL); + irq_set_chip_and_handler(virq, &pmac_pic, + level ? handle_level_irq : handle_edge_irq); return 0; } @@ -322,14 +334,21 @@ static void __init pmac_pic_probe_oldstyle(void) if ((master = of_find_node_by_name(NULL, "gc")) != NULL) { max_irqs = max_real_irqs = 32; + level_mask[0] = GC_LEVEL_MASK; } else if ((master = of_find_node_by_name(NULL, "ohare")) != NULL) { max_irqs = max_real_irqs = 32; + level_mask[0] = OHARE_LEVEL_MASK; + /* We might have a second cascaded ohare */ slave = of_find_node_by_name(NULL, "pci106b,7"); - if (slave) + if (slave) { max_irqs = 64; + level_mask[1] = OHARE_LEVEL_MASK; + } } else if ((master = of_find_node_by_name(NULL, "mac-io")) != NULL) { max_irqs = max_real_irqs = 64; + level_mask[0] = HEATHROW_LEVEL_MASK; + level_mask[1] = 0; /* We might have a second cascaded heathrow */ slave = of_find_node_by_name(master, "mac-io"); @@ -344,8 +363,11 @@ static void __init pmac_pic_probe_oldstyle(void) } /* We found a slave */ - if (slave) + if (slave) { max_irqs = 128; + level_mask[2] = HEATHROW_LEVEL_MASK; + level_mask[3] = 0; + } } BUG_ON(master == NULL); @@ -442,6 +464,18 @@ int of_irq_map_oldworld(struct device_node *device, int index, } #endif /* CONFIG_PPC32 */ +static void pmac_u3_cascade(unsigned int irq, struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct mpic *mpic = irq_desc_get_handler_data(desc); + unsigned int cascade_irq = mpic_get_one_irq(mpic); + + if (cascade_irq != NO_IRQ) + generic_handle_irq(cascade_irq); + + chip->irq_eoi(&desc->irq_data); +} + static void __init pmac_pic_setup_mpic_nmi(struct mpic *mpic) { #if defined(CONFIG_XMON) && defined(CONFIG_PPC32) @@ -464,8 +498,14 @@ static struct mpic * __init pmac_setup_one_mpic(struct device_node *np, int master) { const char *name = master ? " MPIC 1 " : " MPIC 2 "; + struct resource r; struct mpic *mpic; - unsigned int flags = master ? 0 : MPIC_SECONDARY; + unsigned int flags = master ? MPIC_PRIMARY : 0; + int rc; + + rc = of_address_to_resource(np, 0, &r); + if (rc) + return NULL; pmac_call_feature(PMAC_FTR_ENABLE_MPIC, np, 0, 0); @@ -479,7 +519,7 @@ static struct mpic * __init pmac_setup_one_mpic(struct device_node *np, if (master && (flags & MPIC_BIG_ENDIAN)) flags |= MPIC_U3_HT_IRQS; - mpic = mpic_alloc(np, 0, flags, 0, 0, name); + mpic = mpic_alloc(np, r.start, flags, 0, 0, name); if (mpic == NULL) return NULL; @@ -492,6 +532,7 @@ static int __init pmac_pic_probe_mpic(void) { struct mpic *mpic1, *mpic2; struct device_node *np, *master = NULL, *slave = NULL; + unsigned int cascade; /* We can have up to 2 MPICs cascaded */ for (np = NULL; (np = of_find_node_by_type(np, "open-pic")) @@ -527,14 +568,27 @@ static int __init pmac_pic_probe_mpic(void) of_node_put(master); - /* Set up a cascaded controller, if present */ - if (slave) { - mpic2 = pmac_setup_one_mpic(slave, 0); - if (mpic2 == NULL) - printk(KERN_ERR "Failed to setup slave MPIC\n"); + /* No slave, let's go out */ + if (slave == NULL) + return 0; + + /* Get/Map slave interrupt */ + cascade = irq_of_parse_and_map(slave, 0); + if (cascade == NO_IRQ) { + printk(KERN_ERR "Failed to map cascade IRQ\n"); + return 0; + } + + mpic2 = pmac_setup_one_mpic(slave, 0); + if (mpic2 == NULL) { + printk(KERN_ERR "Failed to setup slave MPIC\n"); of_node_put(slave); + return 0; } + irq_set_handler_data(cascade, mpic2); + irq_set_chained_handler(cascade, pmac_u3_cascade); + of_node_put(slave); return 0; } diff --git a/trunk/arch/powerpc/platforms/powermac/setup.c b/trunk/arch/powerpc/platforms/powermac/setup.c index 970ea1de4298..96580b189ec2 100644 --- a/trunk/arch/powerpc/platforms/powermac/setup.c +++ b/trunk/arch/powerpc/platforms/powermac/setup.c @@ -494,15 +494,11 @@ static int __init pmac_declare_of_platform_devices(void) return -1; np = of_find_node_by_name(NULL, "valkyrie"); - if (np) { + if (np) of_platform_device_create(np, "valkyrie", NULL); - of_node_put(np); - } np = of_find_node_by_name(NULL, "platinum"); - if (np) { + if (np) of_platform_device_create(np, "platinum", NULL); - of_node_put(np); - } np = of_find_node_by_type(NULL, "smu"); if (np) { of_platform_device_create(np, "smu", NULL); diff --git a/trunk/arch/powerpc/platforms/powermac/smp.c b/trunk/arch/powerpc/platforms/powermac/smp.c index 44d769258ebf..9b6a820bdd7d 100644 --- a/trunk/arch/powerpc/platforms/powermac/smp.c +++ b/trunk/arch/powerpc/platforms/powermac/smp.c @@ -200,7 +200,7 @@ static int psurge_secondary_ipi_init(void) if (psurge_secondary_virq) rc = request_irq(psurge_secondary_virq, psurge_ipi_intr, - IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL); + IRQF_PERCPU, "IPI", NULL); if (rc) pr_err("Failed to setup secondary cpu IPI\n"); @@ -408,13 +408,13 @@ static int __init smp_psurge_kick_cpu(int nr) static struct irqaction psurge_irqaction = { .handler = psurge_ipi_intr, - .flags = IRQF_PERCPU | IRQF_NO_THREAD, + .flags = IRQF_PERCPU, .name = "primary IPI", }; static void __init smp_psurge_setup_cpu(int cpu_nr) { - if (cpu_nr != 0 || !psurge_start) + if (cpu_nr != 0) return; /* reset the entry point so if we get another intr we won't diff --git a/trunk/arch/powerpc/platforms/powernv/Makefile b/trunk/arch/powerpc/platforms/powernv/Makefile index bcc3cb48a44e..31853008b418 100644 --- a/trunk/arch/powerpc/platforms/powernv/Makefile +++ b/trunk/arch/powerpc/platforms/powernv/Makefile @@ -2,4 +2,4 @@ obj-y += setup.o opal-takeover.o opal-wrappers.o opal.o obj-y += opal-rtc.o opal-nvram.o obj-$(CONFIG_SMP) += smp.o -obj-$(CONFIG_PCI) += pci.o pci-p5ioc2.o pci-ioda.o +obj-$(CONFIG_PCI) += pci.o pci-p5ioc2.o diff --git a/trunk/arch/powerpc/platforms/powernv/opal-wrappers.S b/trunk/arch/powerpc/platforms/powernv/opal-wrappers.S index 3bb07e5e43cd..4a3f46d8533e 100644 --- a/trunk/arch/powerpc/platforms/powernv/opal-wrappers.S +++ b/trunk/arch/powerpc/platforms/powernv/opal-wrappers.S @@ -99,11 +99,3 @@ OPAL_CALL(opal_write_oppanel, OPAL_WRITE_OPPANEL); OPAL_CALL(opal_pci_map_pe_dma_window, OPAL_PCI_MAP_PE_DMA_WINDOW); OPAL_CALL(opal_pci_map_pe_dma_window_real, OPAL_PCI_MAP_PE_DMA_WINDOW_REAL); OPAL_CALL(opal_pci_reset, OPAL_PCI_RESET); -OPAL_CALL(opal_pci_get_hub_diag_data, OPAL_PCI_GET_HUB_DIAG_DATA); -OPAL_CALL(opal_pci_get_phb_diag_data, OPAL_PCI_GET_PHB_DIAG_DATA); -OPAL_CALL(opal_pci_fence_phb, OPAL_PCI_FENCE_PHB); -OPAL_CALL(opal_pci_reinit, OPAL_PCI_REINIT); -OPAL_CALL(opal_pci_mask_pe_error, OPAL_PCI_MASK_PE_ERROR); -OPAL_CALL(opal_set_slot_led_status, OPAL_SET_SLOT_LED_STATUS); -OPAL_CALL(opal_get_epow_status, OPAL_GET_EPOW_STATUS); -OPAL_CALL(opal_set_system_attention_led, OPAL_SET_SYSTEM_ATTENTION_LED); diff --git a/trunk/arch/powerpc/platforms/powernv/pci-ioda.c b/trunk/arch/powerpc/platforms/powernv/pci-ioda.c deleted file mode 100644 index f31162cfdaa9..000000000000 --- a/trunk/arch/powerpc/platforms/powernv/pci-ioda.c +++ /dev/null @@ -1,1330 +0,0 @@ -/* - * Support PCI/PCIe on PowerNV platforms - * - * Copyright 2011 Benjamin Herrenschmidt, IBM Corp. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#undef DEBUG - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "powernv.h" -#include "pci.h" - -struct resource_wrap { - struct list_head link; - resource_size_t size; - resource_size_t align; - struct pci_dev *dev; /* Set if it's a device */ - struct pci_bus *bus; /* Set if it's a bridge */ -}; - -static int __pe_printk(const char *level, const struct pnv_ioda_pe *pe, - struct va_format *vaf) -{ - char pfix[32]; - - if (pe->pdev) - strlcpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix)); - else - sprintf(pfix, "%04x:%02x ", - pci_domain_nr(pe->pbus), pe->pbus->number); - return printk("pci %s%s: [PE# %.3d] %pV", level, pfix, pe->pe_number, vaf); -} - -#define define_pe_printk_level(func, kern_level) \ -static int func(const struct pnv_ioda_pe *pe, const char *fmt, ...) \ -{ \ - struct va_format vaf; \ - va_list args; \ - int r; \ - \ - va_start(args, fmt); \ - \ - vaf.fmt = fmt; \ - vaf.va = &args; \ - \ - r = __pe_printk(kern_level, pe, &vaf); \ - va_end(args); \ - \ - return r; \ -} \ - -define_pe_printk_level(pe_err, KERN_ERR); -define_pe_printk_level(pe_warn, KERN_WARNING); -define_pe_printk_level(pe_info, KERN_INFO); - - -/* Calculate resource usage & alignment requirement of a single - * device. This will also assign all resources within the device - * for a given type starting at 0 for the biggest one and then - * assigning in decreasing order of size. - */ -static void __devinit pnv_ioda_calc_dev(struct pci_dev *dev, unsigned int flags, - resource_size_t *size, - resource_size_t *align) -{ - resource_size_t start; - struct resource *r; - int i; - - pr_devel(" -> CDR %s\n", pci_name(dev)); - - *size = *align = 0; - - /* Clear the resources out and mark them all unset */ - for (i = 0; i <= PCI_ROM_RESOURCE; i++) { - r = &dev->resource[i]; - if (!(r->flags & flags)) - continue; - if (r->start) { - r->end -= r->start; - r->start = 0; - } - r->flags |= IORESOURCE_UNSET; - } - - /* We currently keep all memory resources together, we - * will handle prefetch & 64-bit separately in the future - * but for now we stick everybody in M32 - */ - start = 0; - for (;;) { - resource_size_t max_size = 0; - int max_no = -1; - - /* Find next biggest resource */ - for (i = 0; i <= PCI_ROM_RESOURCE; i++) { - r = &dev->resource[i]; - if (!(r->flags & IORESOURCE_UNSET) || - !(r->flags & flags)) - continue; - if (resource_size(r) > max_size) { - max_size = resource_size(r); - max_no = i; - } - } - if (max_no < 0) - break; - r = &dev->resource[max_no]; - if (max_size > *align) - *align = max_size; - *size += max_size; - r->start = start; - start += max_size; - r->end = r->start + max_size - 1; - r->flags &= ~IORESOURCE_UNSET; - pr_devel(" -> R%d %016llx..%016llx\n", - max_no, r->start, r->end); - } - pr_devel(" <- CDR %s size=%llx align=%llx\n", - pci_name(dev), *size, *align); -} - -/* Allocate a resource "wrap" for a given device or bridge and - * insert it at the right position in the sorted list - */ -static void __devinit pnv_ioda_add_wrap(struct list_head *list, - struct pci_bus *bus, - struct pci_dev *dev, - resource_size_t size, - resource_size_t align) -{ - struct resource_wrap *w1, *w = kzalloc(sizeof(*w), GFP_KERNEL); - - w->size = size; - w->align = align; - w->dev = dev; - w->bus = bus; - - list_for_each_entry(w1, list, link) { - if (w1->align < align) { - list_add_tail(&w->link, &w1->link); - return; - } - } - list_add_tail(&w->link, list); -} - -/* Offset device resources of a given type */ -static void __devinit pnv_ioda_offset_dev(struct pci_dev *dev, - unsigned int flags, - resource_size_t offset) -{ - struct resource *r; - int i; - - pr_devel(" -> ODR %s [%x] +%016llx\n", pci_name(dev), flags, offset); - - for (i = 0; i <= PCI_ROM_RESOURCE; i++) { - r = &dev->resource[i]; - if (r->flags & flags) { - dev->resource[i].start += offset; - dev->resource[i].end += offset; - } - } - - pr_devel(" <- ODR %s [%x] +%016llx\n", pci_name(dev), flags, offset); -} - -/* Offset bus resources (& all children) of a given type */ -static void __devinit pnv_ioda_offset_bus(struct pci_bus *bus, - unsigned int flags, - resource_size_t offset) -{ - struct resource *r; - struct pci_dev *dev; - struct pci_bus *cbus; - int i; - - pr_devel(" -> OBR %s [%x] +%016llx\n", - bus->self ? pci_name(bus->self) : "root", flags, offset); - - for (i = 0; i < 2; i++) { - r = bus->resource[i]; - if (r && (r->flags & flags)) { - bus->resource[i]->start += offset; - bus->resource[i]->end += offset; - } - } - list_for_each_entry(dev, &bus->devices, bus_list) - pnv_ioda_offset_dev(dev, flags, offset); - list_for_each_entry(cbus, &bus->children, node) - pnv_ioda_offset_bus(cbus, flags, offset); - - pr_devel(" <- OBR %s [%x]\n", - bus->self ? pci_name(bus->self) : "root", flags); -} - -/* This is the guts of our IODA resource allocation. This is called - * recursively for each bus in the system. It calculates all the - * necessary size and requirements for children and assign them - * resources such that: - * - * - Each function fits in it's own contiguous set of IO/M32 - * segment - * - * - All segments behind a P2P bridge are contiguous and obey - * alignment constraints of those bridges - */ -static void __devinit pnv_ioda_calc_bus(struct pci_bus *bus, unsigned int flags, - resource_size_t *size, - resource_size_t *align) -{ - struct pci_controller *hose = pci_bus_to_host(bus); - struct pnv_phb *phb = hose->private_data; - resource_size_t dev_size, dev_align, start; - resource_size_t min_align, min_balign; - struct pci_dev *cdev; - struct pci_bus *cbus; - struct list_head head; - struct resource_wrap *w; - unsigned int bres; - - *size = *align = 0; - - pr_devel("-> CBR %s [%x]\n", - bus->self ? pci_name(bus->self) : "root", flags); - - /* Calculate alignment requirements based on the type - * of resource we are working on - */ - if (flags & IORESOURCE_IO) { - bres = 0; - min_align = phb->ioda.io_segsize; - min_balign = 0x1000; - } else { - bres = 1; - min_align = phb->ioda.m32_segsize; - min_balign = 0x100000; - } - - /* Gather all our children resources ordered by alignment */ - INIT_LIST_HEAD(&head); - - /* - Busses */ - list_for_each_entry(cbus, &bus->children, node) { - pnv_ioda_calc_bus(cbus, flags, &dev_size, &dev_align); - pnv_ioda_add_wrap(&head, cbus, NULL, dev_size, dev_align); - } - - /* - Devices */ - list_for_each_entry(cdev, &bus->devices, bus_list) { - pnv_ioda_calc_dev(cdev, flags, &dev_size, &dev_align); - /* Align them to segment size */ - if (dev_align < min_align) - dev_align = min_align; - pnv_ioda_add_wrap(&head, NULL, cdev, dev_size, dev_align); - } - if (list_empty(&head)) - goto empty; - - /* Now we can do two things: assign offsets to them within that - * level and get our total alignment & size requirements. The - * assignment algorithm is going to be uber-trivial for now, we - * can try to be smarter later at filling out holes. - */ - start = bus->self ? 0 : bus->resource[bres]->start; - - /* Don't hand out IO 0 */ - if ((flags & IORESOURCE_IO) && !bus->self) - start += 0x1000; - - while(!list_empty(&head)) { - w = list_first_entry(&head, struct resource_wrap, link); - list_del(&w->link); - if (w->size) { - if (start) { - start = ALIGN(start, w->align); - if (w->dev) - pnv_ioda_offset_dev(w->dev,flags,start); - else if (w->bus) - pnv_ioda_offset_bus(w->bus,flags,start); - } - if (w->align > *align) - *align = w->align; - } - start += w->size; - kfree(w); - } - *size = start; - - /* Align and setup bridge resources */ - *align = max_t(resource_size_t, *align, - max_t(resource_size_t, min_align, min_balign)); - *size = ALIGN(*size, - max_t(resource_size_t, min_align, min_balign)); - empty: - /* Only setup P2P's, not the PHB itself */ - if (bus->self) { - WARN_ON(bus->resource[bres] == NULL); - bus->resource[bres]->start = 0; - bus->resource[bres]->flags = (*size) ? flags : 0; - bus->resource[bres]->end = (*size) ? (*size - 1) : 0; - - /* Clear prefetch bus resources for now */ - bus->resource[2]->flags = 0; - } - - pr_devel("<- CBR %s [%x] *size=%016llx *align=%016llx\n", - bus->self ? pci_name(bus->self) : "root", flags,*size,*align); -} - -static struct pci_dn *pnv_ioda_get_pdn(struct pci_dev *dev) -{ - struct device_node *np; - - np = pci_device_to_OF_node(dev); - if (!np) - return NULL; - return PCI_DN(np); -} - -static void __devinit pnv_ioda_setup_pe_segments(struct pci_dev *dev) -{ - struct pci_controller *hose = pci_bus_to_host(dev->bus); - struct pnv_phb *phb = hose->private_data; - struct pci_dn *pdn = pnv_ioda_get_pdn(dev); - unsigned int pe, i; - resource_size_t pos; - struct resource io_res; - struct resource m32_res; - struct pci_bus_region region; - int rc; - - /* Anything not referenced in the device-tree gets PE#0 */ - pe = pdn ? pdn->pe_number : 0; - - /* Calculate the device min/max */ - io_res.start = m32_res.start = (resource_size_t)-1; - io_res.end = m32_res.end = 0; - io_res.flags = IORESOURCE_IO; - m32_res.flags = IORESOURCE_MEM; - - for (i = 0; i <= PCI_ROM_RESOURCE; i++) { - struct resource *r = NULL; - if (dev->resource[i].flags & IORESOURCE_IO) - r = &io_res; - if (dev->resource[i].flags & IORESOURCE_MEM) - r = &m32_res; - if (!r) - continue; - if (dev->resource[i].start < r->start) - r->start = dev->resource[i].start; - if (dev->resource[i].end > r->end) - r->end = dev->resource[i].end; - } - - /* Setup IO segments */ - if (io_res.start < io_res.end) { - pcibios_resource_to_bus(dev, ®ion, &io_res); - pos = region.start; - i = pos / phb->ioda.io_segsize; - while(i < phb->ioda.total_pe && pos <= region.end) { - if (phb->ioda.io_segmap[i]) { - pr_err("%s: Trying to use IO seg #%d which is" - " already used by PE# %d\n", - pci_name(dev), i, - phb->ioda.io_segmap[i]); - /* XXX DO SOMETHING TO DISABLE DEVICE ? */ - break; - } - phb->ioda.io_segmap[i] = pe; - rc = opal_pci_map_pe_mmio_window(phb->opal_id, pe, - OPAL_IO_WINDOW_TYPE, - 0, i); - if (rc != OPAL_SUCCESS) { - pr_err("%s: OPAL error %d setting up mapping" - " for IO seg# %d\n", - pci_name(dev), rc, i); - /* XXX DO SOMETHING TO DISABLE DEVICE ? */ - break; - } - pos += phb->ioda.io_segsize; - i++; - }; - } - - /* Setup M32 segments */ - if (m32_res.start < m32_res.end) { - pcibios_resource_to_bus(dev, ®ion, &m32_res); - pos = region.start; - i = pos / phb->ioda.m32_segsize; - while(i < phb->ioda.total_pe && pos <= region.end) { - if (phb->ioda.m32_segmap[i]) { - pr_err("%s: Trying to use M32 seg #%d which is" - " already used by PE# %d\n", - pci_name(dev), i, - phb->ioda.m32_segmap[i]); - /* XXX DO SOMETHING TO DISABLE DEVICE ? */ - break; - } - phb->ioda.m32_segmap[i] = pe; - rc = opal_pci_map_pe_mmio_window(phb->opal_id, pe, - OPAL_M32_WINDOW_TYPE, - 0, i); - if (rc != OPAL_SUCCESS) { - pr_err("%s: OPAL error %d setting up mapping" - " for M32 seg# %d\n", - pci_name(dev), rc, i); - /* XXX DO SOMETHING TO DISABLE DEVICE ? */ - break; - } - pos += phb->ioda.m32_segsize; - i++; - } - } -} - -/* Check if a resource still fits in the total IO or M32 range - * for a given PHB - */ -static int __devinit pnv_ioda_resource_fit(struct pci_controller *hose, - struct resource *r) -{ - struct resource *bounds; - - if (r->flags & IORESOURCE_IO) - bounds = &hose->io_resource; - else if (r->flags & IORESOURCE_MEM) - bounds = &hose->mem_resources[0]; - else - return 1; - - if (r->start >= bounds->start && r->end <= bounds->end) - return 1; - r->flags = 0; - return 0; -} - -static void __devinit pnv_ioda_update_resources(struct pci_bus *bus) -{ - struct pci_controller *hose = pci_bus_to_host(bus); - struct pci_bus *cbus; - struct pci_dev *cdev; - unsigned int i; - - /* We used to clear all device enables here. However it looks like - * clearing MEM enable causes Obsidian (IPR SCS) to go bonkers, - * and shoot fatal errors to the PHB which in turns fences itself - * and we can't recover from that ... yet. So for now, let's leave - * the enables as-is and hope for the best. - */ - - /* Check if bus resources fit in our IO or M32 range */ - for (i = 0; bus->self && (i < 2); i++) { - struct resource *r = bus->resource[i]; - if (r && !pnv_ioda_resource_fit(hose, r)) - pr_err("%s: Bus %d resource %d disabled, no room\n", - pci_name(bus->self), bus->number, i); - } - - /* Update self if it's not a PHB */ - if (bus->self) - pci_setup_bridge(bus); - - /* Update child devices */ - list_for_each_entry(cdev, &bus->devices, bus_list) { - /* Check if resource fits, if not, disabled it */ - for (i = 0; i <= PCI_ROM_RESOURCE; i++) { - struct resource *r = &cdev->resource[i]; - if (!pnv_ioda_resource_fit(hose, r)) - pr_err("%s: Resource %d disabled, no room\n", - pci_name(cdev), i); - } - - /* Assign segments */ - pnv_ioda_setup_pe_segments(cdev); - - /* Update HW BARs */ - for (i = 0; i <= PCI_ROM_RESOURCE; i++) - pci_update_resource(cdev, i); - } - - /* Update child busses */ - list_for_each_entry(cbus, &bus->children, node) - pnv_ioda_update_resources(cbus); -} - -static int __devinit pnv_ioda_alloc_pe(struct pnv_phb *phb) -{ - unsigned long pe; - - do { - pe = find_next_zero_bit(phb->ioda.pe_alloc, - phb->ioda.total_pe, 0); - if (pe >= phb->ioda.total_pe) - return IODA_INVALID_PE; - } while(test_and_set_bit(pe, phb->ioda.pe_alloc)); - - phb->ioda.pe_array[pe].pe_number = pe; - return pe; -} - -static void __devinit pnv_ioda_free_pe(struct pnv_phb *phb, int pe) -{ - WARN_ON(phb->ioda.pe_array[pe].pdev); - - memset(&phb->ioda.pe_array[pe], 0, sizeof(struct pnv_ioda_pe)); - clear_bit(pe, phb->ioda.pe_alloc); -} - -/* Currently those 2 are only used when MSIs are enabled, this will change - * but in the meantime, we need to protect them to avoid warnings - */ -#ifdef CONFIG_PCI_MSI -static struct pnv_ioda_pe * __devinit __pnv_ioda_get_one_pe(struct pci_dev *dev) -{ - struct pci_controller *hose = pci_bus_to_host(dev->bus); - struct pnv_phb *phb = hose->private_data; - struct pci_dn *pdn = pnv_ioda_get_pdn(dev); - - if (!pdn) - return NULL; - if (pdn->pe_number == IODA_INVALID_PE) - return NULL; - return &phb->ioda.pe_array[pdn->pe_number]; -} - -static struct pnv_ioda_pe * __devinit pnv_ioda_get_pe(struct pci_dev *dev) -{ - struct pnv_ioda_pe *pe = __pnv_ioda_get_one_pe(dev); - - while (!pe && dev->bus->self) { - dev = dev->bus->self; - pe = __pnv_ioda_get_one_pe(dev); - if (pe) - pe = pe->bus_pe; - } - return pe; -} -#endif /* CONFIG_PCI_MSI */ - -static int __devinit pnv_ioda_configure_pe(struct pnv_phb *phb, - struct pnv_ioda_pe *pe) -{ - struct pci_dev *parent; - uint8_t bcomp, dcomp, fcomp; - long rc, rid_end, rid; - - /* Bus validation ? */ - if (pe->pbus) { - int count; - - dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER; - fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER; - parent = pe->pbus->self; - count = pe->pbus->subordinate - pe->pbus->secondary + 1; - switch(count) { - case 1: bcomp = OpalPciBusAll; break; - case 2: bcomp = OpalPciBus7Bits; break; - case 4: bcomp = OpalPciBus6Bits; break; - case 8: bcomp = OpalPciBus5Bits; break; - case 16: bcomp = OpalPciBus4Bits; break; - case 32: bcomp = OpalPciBus3Bits; break; - default: - pr_err("%s: Number of subordinate busses %d" - " unsupported\n", - pci_name(pe->pbus->self), count); - /* Do an exact match only */ - bcomp = OpalPciBusAll; - } - rid_end = pe->rid + (count << 8); - } else { - parent = pe->pdev->bus->self; - bcomp = OpalPciBusAll; - dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER; - fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER; - rid_end = pe->rid + 1; - } - - /* Associate PE in PELT */ - rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid, - bcomp, dcomp, fcomp, OPAL_MAP_PE); - if (rc) { - pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc); - return -ENXIO; - } - opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number, - OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); - - /* Add to all parents PELT-V */ - while (parent) { - struct pci_dn *pdn = pnv_ioda_get_pdn(parent); - if (pdn && pdn->pe_number != IODA_INVALID_PE) { - rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number, - pe->pe_number, OPAL_ADD_PE_TO_DOMAIN); - /* XXX What to do in case of error ? */ - } - parent = parent->bus->self; - } - /* Setup reverse map */ - for (rid = pe->rid; rid < rid_end; rid++) - phb->ioda.pe_rmap[rid] = pe->pe_number; - - /* Setup one MVTs on IODA1 */ - if (phb->type == PNV_PHB_IODA1) { - pe->mve_number = pe->pe_number; - rc = opal_pci_set_mve(phb->opal_id, pe->mve_number, - pe->pe_number); - if (rc) { - pe_err(pe, "OPAL error %ld setting up MVE %d\n", - rc, pe->mve_number); - pe->mve_number = -1; - } else { - rc = opal_pci_set_mve_enable(phb->opal_id, - pe->mve_number, OPAL_ENABLE_MVE); - if (rc) { - pe_err(pe, "OPAL error %ld enabling MVE %d\n", - rc, pe->mve_number); - pe->mve_number = -1; - } - } - } else if (phb->type == PNV_PHB_IODA2) - pe->mve_number = 0; - - return 0; -} - -static void __devinit pnv_ioda_link_pe_by_weight(struct pnv_phb *phb, - struct pnv_ioda_pe *pe) -{ - struct pnv_ioda_pe *lpe; - - list_for_each_entry(lpe, &phb->ioda.pe_list, link) { - if (lpe->dma_weight < pe->dma_weight) { - list_add_tail(&pe->link, &lpe->link); - return; - } - } - list_add_tail(&pe->link, &phb->ioda.pe_list); -} - -static unsigned int pnv_ioda_dma_weight(struct pci_dev *dev) -{ - /* This is quite simplistic. The "base" weight of a device - * is 10. 0 means no DMA is to be accounted for it. - */ - - /* If it's a bridge, no DMA */ - if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL) - return 0; - - /* Reduce the weight of slow USB controllers */ - if (dev->class == PCI_CLASS_SERIAL_USB_UHCI || - dev->class == PCI_CLASS_SERIAL_USB_OHCI || - dev->class == PCI_CLASS_SERIAL_USB_EHCI) - return 3; - - /* Increase the weight of RAID (includes Obsidian) */ - if ((dev->class >> 8) == PCI_CLASS_STORAGE_RAID) - return 15; - - /* Default */ - return 10; -} - -static struct pnv_ioda_pe * __devinit pnv_ioda_setup_dev_PE(struct pci_dev *dev) -{ - struct pci_controller *hose = pci_bus_to_host(dev->bus); - struct pnv_phb *phb = hose->private_data; - struct pci_dn *pdn = pnv_ioda_get_pdn(dev); - struct pnv_ioda_pe *pe; - int pe_num; - - if (!pdn) { - pr_err("%s: Device tree node not associated properly\n", - pci_name(dev)); - return NULL; - } - if (pdn->pe_number != IODA_INVALID_PE) - return NULL; - - /* PE#0 has been pre-set */ - if (dev->bus->number == 0) - pe_num = 0; - else - pe_num = pnv_ioda_alloc_pe(phb); - if (pe_num == IODA_INVALID_PE) { - pr_warning("%s: Not enough PE# available, disabling device\n", - pci_name(dev)); - return NULL; - } - - /* NOTE: We get only one ref to the pci_dev for the pdn, not for the - * pointer in the PE data structure, both should be destroyed at the - * same time. However, this needs to be looked at more closely again - * once we actually start removing things (Hotplug, SR-IOV, ...) - * - * At some point we want to remove the PDN completely anyways - */ - pe = &phb->ioda.pe_array[pe_num]; - pci_dev_get(dev); - pdn->pcidev = dev; - pdn->pe_number = pe_num; - pe->pdev = dev; - pe->pbus = NULL; - pe->tce32_seg = -1; - pe->mve_number = -1; - pe->rid = dev->bus->number << 8 | pdn->devfn; - - pe_info(pe, "Associated device to PE\n"); - - if (pnv_ioda_configure_pe(phb, pe)) { - /* XXX What do we do here ? */ - if (pe_num) - pnv_ioda_free_pe(phb, pe_num); - pdn->pe_number = IODA_INVALID_PE; - pe->pdev = NULL; - pci_dev_put(dev); - return NULL; - } - - /* Assign a DMA weight to the device */ - pe->dma_weight = pnv_ioda_dma_weight(dev); - if (pe->dma_weight != 0) { - phb->ioda.dma_weight += pe->dma_weight; - phb->ioda.dma_pe_count++; - } - - /* Link the PE */ - pnv_ioda_link_pe_by_weight(phb, pe); - - return pe; -} - -static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe) -{ - struct pci_dev *dev; - - list_for_each_entry(dev, &bus->devices, bus_list) { - struct pci_dn *pdn = pnv_ioda_get_pdn(dev); - - if (pdn == NULL) { - pr_warn("%s: No device node associated with device !\n", - pci_name(dev)); - continue; - } - pci_dev_get(dev); - pdn->pcidev = dev; - pdn->pe_number = pe->pe_number; - pe->dma_weight += pnv_ioda_dma_weight(dev); - if (dev->subordinate) - pnv_ioda_setup_same_PE(dev->subordinate, pe); - } -} - -static void __devinit pnv_ioda_setup_bus_PE(struct pci_dev *dev, - struct pnv_ioda_pe *ppe) -{ - struct pci_controller *hose = pci_bus_to_host(dev->bus); - struct pnv_phb *phb = hose->private_data; - struct pci_bus *bus = dev->subordinate; - struct pnv_ioda_pe *pe; - int pe_num; - - if (!bus) { - pr_warning("%s: Bridge without a subordinate bus !\n", - pci_name(dev)); - return; - } - pe_num = pnv_ioda_alloc_pe(phb); - if (pe_num == IODA_INVALID_PE) { - pr_warning("%s: Not enough PE# available, disabling bus\n", - pci_name(dev)); - return; - } - - pe = &phb->ioda.pe_array[pe_num]; - ppe->bus_pe = pe; - pe->pbus = bus; - pe->pdev = NULL; - pe->tce32_seg = -1; - pe->mve_number = -1; - pe->rid = bus->secondary << 8; - pe->dma_weight = 0; - - pe_info(pe, "Secondary busses %d..%d associated with PE\n", - bus->secondary, bus->subordinate); - - if (pnv_ioda_configure_pe(phb, pe)) { - /* XXX What do we do here ? */ - if (pe_num) - pnv_ioda_free_pe(phb, pe_num); - pe->pbus = NULL; - return; - } - - /* Associate it with all child devices */ - pnv_ioda_setup_same_PE(bus, pe); - - /* Account for one DMA PE if at least one DMA capable device exist - * below the bridge - */ - if (pe->dma_weight != 0) { - phb->ioda.dma_weight += pe->dma_weight; - phb->ioda.dma_pe_count++; - } - - /* Link the PE */ - pnv_ioda_link_pe_by_weight(phb, pe); -} - -static void __devinit pnv_ioda_setup_PEs(struct pci_bus *bus) -{ - struct pci_dev *dev; - struct pnv_ioda_pe *pe; - - list_for_each_entry(dev, &bus->devices, bus_list) { - pe = pnv_ioda_setup_dev_PE(dev); - if (pe == NULL) - continue; - /* Leaving the PCIe domain ... single PE# */ - if (dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) - pnv_ioda_setup_bus_PE(dev, pe); - else if (dev->subordinate) - pnv_ioda_setup_PEs(dev->subordinate); - } -} - -static void __devinit pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, - struct pci_dev *dev) -{ - /* We delay DMA setup after we have assigned all PE# */ -} - -static void __devinit pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, - struct pci_bus *bus) -{ - struct pci_dev *dev; - - list_for_each_entry(dev, &bus->devices, bus_list) { - set_iommu_table_base(&dev->dev, &pe->tce32_table); - if (dev->subordinate) - pnv_ioda_setup_bus_dma(pe, dev->subordinate); - } -} - -static void __devinit pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb, - struct pnv_ioda_pe *pe, - unsigned int base, - unsigned int segs) -{ - - struct page *tce_mem = NULL; - const __be64 *swinvp; - struct iommu_table *tbl; - unsigned int i; - int64_t rc; - void *addr; - - /* 256M DMA window, 4K TCE pages, 8 bytes TCE */ -#define TCE32_TABLE_SIZE ((0x10000000 / 0x1000) * 8) - - /* XXX FIXME: Handle 64-bit only DMA devices */ - /* XXX FIXME: Provide 64-bit DMA facilities & non-4K TCE tables etc.. */ - /* XXX FIXME: Allocate multi-level tables on PHB3 */ - - /* We shouldn't already have a 32-bit DMA associated */ - if (WARN_ON(pe->tce32_seg >= 0)) - return; - - /* Grab a 32-bit TCE table */ - pe->tce32_seg = base; - pe_info(pe, " Setting up 32-bit TCE table at %08x..%08x\n", - (base << 28), ((base + segs) << 28) - 1); - - /* XXX Currently, we allocate one big contiguous table for the - * TCEs. We only really need one chunk per 256M of TCE space - * (ie per segment) but that's an optimization for later, it - * requires some added smarts with our get/put_tce implementation - */ - tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL, - get_order(TCE32_TABLE_SIZE * segs)); - if (!tce_mem) { - pe_err(pe, " Failed to allocate a 32-bit TCE memory\n"); - goto fail; - } - addr = page_address(tce_mem); - memset(addr, 0, TCE32_TABLE_SIZE * segs); - - /* Configure HW */ - for (i = 0; i < segs; i++) { - rc = opal_pci_map_pe_dma_window(phb->opal_id, - pe->pe_number, - base + i, 1, - __pa(addr) + TCE32_TABLE_SIZE * i, - TCE32_TABLE_SIZE, 0x1000); - if (rc) { - pe_err(pe, " Failed to configure 32-bit TCE table," - " err %ld\n", rc); - goto fail; - } - } - - /* Setup linux iommu table */ - tbl = &pe->tce32_table; - pnv_pci_setup_iommu_table(tbl, addr, TCE32_TABLE_SIZE * segs, - base << 28); - - /* OPAL variant of P7IOC SW invalidated TCEs */ - swinvp = of_get_property(phb->hose->dn, "ibm,opal-tce-kill", NULL); - if (swinvp) { - /* We need a couple more fields -- an address and a data - * to or. Since the bus is only printed out on table free - * errors, and on the first pass the data will be a relative - * bus number, print that out instead. - */ - tbl->it_busno = 0; - tbl->it_index = (unsigned long)ioremap(be64_to_cpup(swinvp), 8); - tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE - | TCE_PCI_SWINV_PAIR; - } - iommu_init_table(tbl, phb->hose->node); - - if (pe->pdev) - set_iommu_table_base(&pe->pdev->dev, tbl); - else - pnv_ioda_setup_bus_dma(pe, pe->pbus); - - return; - fail: - /* XXX Failure: Try to fallback to 64-bit only ? */ - if (pe->tce32_seg >= 0) - pe->tce32_seg = -1; - if (tce_mem) - __free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs)); -} - -static void __devinit pnv_ioda_setup_dma(struct pnv_phb *phb) -{ - struct pci_controller *hose = phb->hose; - unsigned int residual, remaining, segs, tw, base; - struct pnv_ioda_pe *pe; - - /* If we have more PE# than segments available, hand out one - * per PE until we run out and let the rest fail. If not, - * then we assign at least one segment per PE, plus more based - * on the amount of devices under that PE - */ - if (phb->ioda.dma_pe_count > phb->ioda.tce32_count) - residual = 0; - else - residual = phb->ioda.tce32_count - - phb->ioda.dma_pe_count; - - pr_info("PCI: Domain %04x has %ld available 32-bit DMA segments\n", - hose->global_number, phb->ioda.tce32_count); - pr_info("PCI: %d PE# for a total weight of %d\n", - phb->ioda.dma_pe_count, phb->ioda.dma_weight); - - /* Walk our PE list and configure their DMA segments, hand them - * out one base segment plus any residual segments based on - * weight - */ - remaining = phb->ioda.tce32_count; - tw = phb->ioda.dma_weight; - base = 0; - list_for_each_entry(pe, &phb->ioda.pe_list, link) { - if (!pe->dma_weight) - continue; - if (!remaining) { - pe_warn(pe, "No DMA32 resources available\n"); - continue; - } - segs = 1; - if (residual) { - segs += ((pe->dma_weight * residual) + (tw / 2)) / tw; - if (segs > remaining) - segs = remaining; - } - pe_info(pe, "DMA weight %d, assigned %d DMA32 segments\n", - pe->dma_weight, segs); - pnv_pci_ioda_setup_dma_pe(phb, pe, base, segs); - remaining -= segs; - base += segs; - } -} - -#ifdef CONFIG_PCI_MSI -static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev, - unsigned int hwirq, unsigned int is_64, - struct msi_msg *msg) -{ - struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev); - unsigned int xive_num = hwirq - phb->msi_base; - uint64_t addr64; - uint32_t addr32, data; - int rc; - - /* No PE assigned ? bail out ... no MSI for you ! */ - if (pe == NULL) - return -ENXIO; - - /* Check if we have an MVE */ - if (pe->mve_number < 0) - return -ENXIO; - - /* Assign XIVE to PE */ - rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num); - if (rc) { - pr_warn("%s: OPAL error %d setting XIVE %d PE\n", - pci_name(dev), rc, xive_num); - return -EIO; - } - - if (is_64) { - rc = opal_get_msi_64(phb->opal_id, pe->mve_number, xive_num, 1, - &addr64, &data); - if (rc) { - pr_warn("%s: OPAL error %d getting 64-bit MSI data\n", - pci_name(dev), rc); - return -EIO; - } - msg->address_hi = addr64 >> 32; - msg->address_lo = addr64 & 0xfffffffful; - } else { - rc = opal_get_msi_32(phb->opal_id, pe->mve_number, xive_num, 1, - &addr32, &data); - if (rc) { - pr_warn("%s: OPAL error %d getting 32-bit MSI data\n", - pci_name(dev), rc); - return -EIO; - } - msg->address_hi = 0; - msg->address_lo = addr32; - } - msg->data = data; - - pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d)," - " address=%x_%08x data=%x PE# %d\n", - pci_name(dev), is_64 ? "64" : "32", hwirq, xive_num, - msg->address_hi, msg->address_lo, data, pe->pe_number); - - return 0; -} - -static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) -{ - unsigned int bmap_size; - const __be32 *prop = of_get_property(phb->hose->dn, - "ibm,opal-msi-ranges", NULL); - if (!prop) { - /* BML Fallback */ - prop = of_get_property(phb->hose->dn, "msi-ranges", NULL); - } - if (!prop) - return; - - phb->msi_base = be32_to_cpup(prop); - phb->msi_count = be32_to_cpup(prop + 1); - bmap_size = BITS_TO_LONGS(phb->msi_count) * sizeof(unsigned long); - phb->msi_map = zalloc_maybe_bootmem(bmap_size, GFP_KERNEL); - if (!phb->msi_map) { - pr_err("PCI %d: Failed to allocate MSI bitmap !\n", - phb->hose->global_number); - return; - } - phb->msi_setup = pnv_pci_ioda_msi_setup; - phb->msi32_support = 1; - pr_info(" Allocated bitmap for %d MSIs (base IRQ 0x%x)\n", - phb->msi_count, phb->msi_base); -} -#else -static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { } -#endif /* CONFIG_PCI_MSI */ - -/* This is the starting point of our IODA specific resource - * allocation process - */ -static void __devinit pnv_pci_ioda_fixup_phb(struct pci_controller *hose) -{ - resource_size_t size, align; - struct pci_bus *child; - - /* Associate PEs per functions */ - pnv_ioda_setup_PEs(hose->bus); - - /* Calculate all resources */ - pnv_ioda_calc_bus(hose->bus, IORESOURCE_IO, &size, &align); - pnv_ioda_calc_bus(hose->bus, IORESOURCE_MEM, &size, &align); - - /* Apply then to HW */ - pnv_ioda_update_resources(hose->bus); - - /* Setup DMA */ - pnv_ioda_setup_dma(hose->private_data); - - /* Configure PCI Express settings */ - list_for_each_entry(child, &hose->bus->children, node) { - struct pci_dev *self = child->self; - if (!self) - continue; - pcie_bus_configure_settings(child, self->pcie_mpss); - } -} - -/* Prevent enabling devices for which we couldn't properly - * assign a PE - */ -static int __devinit pnv_pci_enable_device_hook(struct pci_dev *dev) -{ - struct pci_dn *pdn = pnv_ioda_get_pdn(dev); - - if (!pdn || pdn->pe_number == IODA_INVALID_PE) - return -EINVAL; - return 0; -} - -static u32 pnv_ioda_bdfn_to_pe(struct pnv_phb *phb, struct pci_bus *bus, - u32 devfn) -{ - return phb->ioda.pe_rmap[(bus->number << 8) | devfn]; -} - -void __init pnv_pci_init_ioda1_phb(struct device_node *np) -{ - struct pci_controller *hose; - static int primary = 1; - struct pnv_phb *phb; - unsigned long size, m32map_off, iomap_off, pemap_off; - const u64 *prop64; - u64 phb_id; - void *aux; - long rc; - - pr_info(" Initializing IODA OPAL PHB %s\n", np->full_name); - - prop64 = of_get_property(np, "ibm,opal-phbid", NULL); - if (!prop64) { - pr_err(" Missing \"ibm,opal-phbid\" property !\n"); - return; - } - phb_id = be64_to_cpup(prop64); - pr_debug(" PHB-ID : 0x%016llx\n", phb_id); - - phb = alloc_bootmem(sizeof(struct pnv_phb)); - if (phb) { - memset(phb, 0, sizeof(struct pnv_phb)); - phb->hose = hose = pcibios_alloc_controller(np); - } - if (!phb || !phb->hose) { - pr_err("PCI: Failed to allocate PCI controller for %s\n", - np->full_name); - return; - } - - spin_lock_init(&phb->lock); - /* XXX Use device-tree */ - hose->first_busno = 0; - hose->last_busno = 0xff; - hose->private_data = phb; - phb->opal_id = phb_id; - phb->type = PNV_PHB_IODA1; - - /* Detect specific models for error handling */ - if (of_device_is_compatible(np, "ibm,p7ioc-pciex")) - phb->model = PNV_PHB_MODEL_P7IOC; - else - phb->model = PNV_PHB_MODEL_UNKNOWN; - - /* We parse "ranges" now since we need to deduce the register base - * from the IO base - */ - pci_process_bridge_OF_ranges(phb->hose, np, primary); - primary = 0; - - /* Magic formula from Milton */ - phb->regs = of_iomap(np, 0); - if (phb->regs == NULL) - pr_err(" Failed to map registers !\n"); - - - /* XXX This is hack-a-thon. This needs to be changed so that: - * - we obtain stuff like PE# etc... from device-tree - * - we properly re-allocate M32 ourselves - * (the OFW one isn't very good) - */ - - /* Initialize more IODA stuff */ - phb->ioda.total_pe = 128; - - phb->ioda.m32_size = resource_size(&hose->mem_resources[0]); - /* OFW Has already off top 64k of M32 space (MSI space) */ - phb->ioda.m32_size += 0x10000; - - phb->ioda.m32_segsize = phb->ioda.m32_size / phb->ioda.total_pe; - phb->ioda.m32_pci_base = hose->mem_resources[0].start - - hose->pci_mem_offset; - phb->ioda.io_size = hose->pci_io_size; - phb->ioda.io_segsize = phb->ioda.io_size / phb->ioda.total_pe; - phb->ioda.io_pci_base = 0; /* XXX calculate this ? */ - - /* Allocate aux data & arrays */ - size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long)); - m32map_off = size; - size += phb->ioda.total_pe; - iomap_off = size; - size += phb->ioda.total_pe; - pemap_off = size; - size += phb->ioda.total_pe * sizeof(struct pnv_ioda_pe); - aux = alloc_bootmem(size); - memset(aux, 0, size); - phb->ioda.pe_alloc = aux; - phb->ioda.m32_segmap = aux + m32map_off; - phb->ioda.io_segmap = aux + iomap_off; - phb->ioda.pe_array = aux + pemap_off; - set_bit(0, phb->ioda.pe_alloc); - - INIT_LIST_HEAD(&phb->ioda.pe_list); - - /* Calculate how many 32-bit TCE segments we have */ - phb->ioda.tce32_count = phb->ioda.m32_pci_base >> 28; - - /* Clear unusable m64 */ - hose->mem_resources[1].flags = 0; - hose->mem_resources[1].start = 0; - hose->mem_resources[1].end = 0; - hose->mem_resources[2].flags = 0; - hose->mem_resources[2].start = 0; - hose->mem_resources[2].end = 0; - -#if 0 - rc = opal_pci_set_phb_mem_window(opal->phb_id, - window_type, - window_num, - starting_real_address, - starting_pci_address, - segment_size); -#endif - - pr_info(" %d PE's M32: 0x%x [segment=0x%x] IO: 0x%x [segment=0x%x]\n", - phb->ioda.total_pe, - phb->ioda.m32_size, phb->ioda.m32_segsize, - phb->ioda.io_size, phb->ioda.io_segsize); - - if (phb->regs) { - pr_devel(" BUID = 0x%016llx\n", in_be64(phb->regs + 0x100)); - pr_devel(" PHB2_CR = 0x%016llx\n", in_be64(phb->regs + 0x160)); - pr_devel(" IO_BAR = 0x%016llx\n", in_be64(phb->regs + 0x170)); - pr_devel(" IO_BAMR = 0x%016llx\n", in_be64(phb->regs + 0x178)); - pr_devel(" IO_SAR = 0x%016llx\n", in_be64(phb->regs + 0x180)); - pr_devel(" M32_BAR = 0x%016llx\n", in_be64(phb->regs + 0x190)); - pr_devel(" M32_BAMR = 0x%016llx\n", in_be64(phb->regs + 0x198)); - pr_devel(" M32_SAR = 0x%016llx\n", in_be64(phb->regs + 0x1a0)); - } - phb->hose->ops = &pnv_pci_ops; - - /* Setup RID -> PE mapping function */ - phb->bdfn_to_pe = pnv_ioda_bdfn_to_pe; - - /* Setup TCEs */ - phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup; - - /* Setup MSI support */ - pnv_pci_init_ioda_msis(phb); - - /* We set both probe_only and PCI_REASSIGN_ALL_RSRC. This is an - * odd combination which essentially means that we skip all resource - * fixups and assignments in the generic code, and do it all - * ourselves here - */ - pci_probe_only = 1; - ppc_md.pcibios_fixup_phb = pnv_pci_ioda_fixup_phb; - ppc_md.pcibios_enable_device_hook = pnv_pci_enable_device_hook; - pci_add_flags(PCI_REASSIGN_ALL_RSRC); - - /* Reset IODA tables to a clean state */ - rc = opal_pci_reset(phb_id, OPAL_PCI_IODA_TABLE_RESET, OPAL_ASSERT_RESET); - if (rc) - pr_warning(" OPAL Error %ld performing IODA table reset !\n", rc); - opal_pci_set_pe(phb_id, 0, 0, 7, 1, 1 , OPAL_MAP_PE); -} - -void __init pnv_pci_init_ioda_hub(struct device_node *np) -{ - struct device_node *phbn; - const u64 *prop64; - u64 hub_id; - - pr_info("Probing IODA IO-Hub %s\n", np->full_name); - - prop64 = of_get_property(np, "ibm,opal-hubid", NULL); - if (!prop64) { - pr_err(" Missing \"ibm,opal-hubid\" property !\n"); - return; - } - hub_id = be64_to_cpup(prop64); - pr_devel(" HUB-ID : 0x%016llx\n", hub_id); - - /* Count child PHBs */ - for_each_child_of_node(np, phbn) { - /* Look for IODA1 PHBs */ - if (of_device_is_compatible(phbn, "ibm,ioda-phb")) - pnv_pci_init_ioda1_phb(phbn); - } -} diff --git a/trunk/arch/powerpc/platforms/powernv/pci-p5ioc2.c b/trunk/arch/powerpc/platforms/powernv/pci-p5ioc2.c index 264967770c3a..4c80f7c77d56 100644 --- a/trunk/arch/powerpc/platforms/powernv/pci-p5ioc2.c +++ b/trunk/arch/powerpc/platforms/powernv/pci-p5ioc2.c @@ -137,7 +137,6 @@ static void __init pnv_pci_init_p5ioc2_phb(struct device_node *np, phb->hose->private_data = phb; phb->opal_id = phb_id; phb->type = PNV_PHB_P5IOC2; - phb->model = PNV_PHB_MODEL_P5IOC2; phb->regs = of_iomap(np, 0); diff --git a/trunk/arch/powerpc/platforms/powernv/pci.c b/trunk/arch/powerpc/platforms/powernv/pci.c index a70bc1e385eb..85bb66d7f933 100644 --- a/trunk/arch/powerpc/platforms/powernv/pci.c +++ b/trunk/arch/powerpc/platforms/powernv/pci.c @@ -144,112 +144,6 @@ static void pnv_teardown_msi_irqs(struct pci_dev *pdev) } #endif /* CONFIG_PCI_MSI */ -static void pnv_pci_dump_p7ioc_diag_data(struct pnv_phb *phb) -{ - struct OpalIoP7IOCPhbErrorData *data = &phb->diag.p7ioc; - int i; - - pr_info("PHB %d diagnostic data:\n", phb->hose->global_number); - - pr_info(" brdgCtl = 0x%08x\n", data->brdgCtl); - - pr_info(" portStatusReg = 0x%08x\n", data->portStatusReg); - pr_info(" rootCmplxStatus = 0x%08x\n", data->rootCmplxStatus); - pr_info(" busAgentStatus = 0x%08x\n", data->busAgentStatus); - - pr_info(" deviceStatus = 0x%08x\n", data->deviceStatus); - pr_info(" slotStatus = 0x%08x\n", data->slotStatus); - pr_info(" linkStatus = 0x%08x\n", data->linkStatus); - pr_info(" devCmdStatus = 0x%08x\n", data->devCmdStatus); - pr_info(" devSecStatus = 0x%08x\n", data->devSecStatus); - - pr_info(" rootErrorStatus = 0x%08x\n", data->rootErrorStatus); - pr_info(" uncorrErrorStatus = 0x%08x\n", data->uncorrErrorStatus); - pr_info(" corrErrorStatus = 0x%08x\n", data->corrErrorStatus); - pr_info(" tlpHdr1 = 0x%08x\n", data->tlpHdr1); - pr_info(" tlpHdr2 = 0x%08x\n", data->tlpHdr2); - pr_info(" tlpHdr3 = 0x%08x\n", data->tlpHdr3); - pr_info(" tlpHdr4 = 0x%08x\n", data->tlpHdr4); - pr_info(" sourceId = 0x%08x\n", data->sourceId); - - pr_info(" errorClass = 0x%016llx\n", data->errorClass); - pr_info(" correlator = 0x%016llx\n", data->correlator); - - pr_info(" p7iocPlssr = 0x%016llx\n", data->p7iocPlssr); - pr_info(" p7iocCsr = 0x%016llx\n", data->p7iocCsr); - pr_info(" lemFir = 0x%016llx\n", data->lemFir); - pr_info(" lemErrorMask = 0x%016llx\n", data->lemErrorMask); - pr_info(" lemWOF = 0x%016llx\n", data->lemWOF); - pr_info(" phbErrorStatus = 0x%016llx\n", data->phbErrorStatus); - pr_info(" phbFirstErrorStatus = 0x%016llx\n", data->phbFirstErrorStatus); - pr_info(" phbErrorLog0 = 0x%016llx\n", data->phbErrorLog0); - pr_info(" phbErrorLog1 = 0x%016llx\n", data->phbErrorLog1); - pr_info(" mmioErrorStatus = 0x%016llx\n", data->mmioErrorStatus); - pr_info(" mmioFirstErrorStatus = 0x%016llx\n", data->mmioFirstErrorStatus); - pr_info(" mmioErrorLog0 = 0x%016llx\n", data->mmioErrorLog0); - pr_info(" mmioErrorLog1 = 0x%016llx\n", data->mmioErrorLog1); - pr_info(" dma0ErrorStatus = 0x%016llx\n", data->dma0ErrorStatus); - pr_info(" dma0FirstErrorStatus = 0x%016llx\n", data->dma0FirstErrorStatus); - pr_info(" dma0ErrorLog0 = 0x%016llx\n", data->dma0ErrorLog0); - pr_info(" dma0ErrorLog1 = 0x%016llx\n", data->dma0ErrorLog1); - pr_info(" dma1ErrorStatus = 0x%016llx\n", data->dma1ErrorStatus); - pr_info(" dma1FirstErrorStatus = 0x%016llx\n", data->dma1FirstErrorStatus); - pr_info(" dma1ErrorLog0 = 0x%016llx\n", data->dma1ErrorLog0); - pr_info(" dma1ErrorLog1 = 0x%016llx\n", data->dma1ErrorLog1); - - for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) { - if ((data->pestA[i] >> 63) == 0 && - (data->pestB[i] >> 63) == 0) - continue; - pr_info(" PE[%3d] PESTA = 0x%016llx\n", i, data->pestA[i]); - pr_info(" PESTB = 0x%016llx\n", data->pestB[i]); - } -} - -static void pnv_pci_dump_phb_diag_data(struct pnv_phb *phb) -{ - switch(phb->model) { - case PNV_PHB_MODEL_P7IOC: - pnv_pci_dump_p7ioc_diag_data(phb); - break; - default: - pr_warning("PCI %d: Can't decode this PHB diag data\n", - phb->hose->global_number); - } -} - -static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no) -{ - unsigned long flags, rc; - int has_diag; - - spin_lock_irqsave(&phb->lock, flags); - - rc = opal_pci_get_phb_diag_data(phb->opal_id, phb->diag.blob, PNV_PCI_DIAG_BUF_SIZE); - has_diag = (rc == OPAL_SUCCESS); - - rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no, - OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); - if (rc) { - pr_warning("PCI %d: Failed to clear EEH freeze state" - " for PE#%d, err %ld\n", - phb->hose->global_number, pe_no, rc); - - /* For now, let's only display the diag buffer when we fail to clear - * the EEH status. We'll do more sensible things later when we have - * proper EEH support. We need to make sure we don't pollute ourselves - * with the normal errors generated when probing empty slots - */ - if (has_diag) - pnv_pci_dump_phb_diag_data(phb); - else - pr_warning("PCI %d: No diag data available\n", - phb->hose->global_number); - } - - spin_unlock_irqrestore(&phb->lock, flags); -} - static void pnv_pci_config_check_eeh(struct pnv_phb *phb, struct pci_bus *bus, u32 bdfn) { @@ -271,8 +165,15 @@ static void pnv_pci_config_check_eeh(struct pnv_phb *phb, struct pci_bus *bus, } cfg_dbg(" -> EEH check, bdfn=%04x PE%d fstate=%x\n", bdfn, pe_no, fstate); - if (fstate != 0) - pnv_pci_handle_eeh_config(phb, pe_no); + if (fstate != 0) { + rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no, + OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); + if (rc) { + pr_warning("PCI %d: Failed to clear EEH freeze state" + " for PE#%d, err %lld\n", + phb->hose->global_number, pe_no, rc); + } + } } static int pnv_pci_read_config(struct pci_bus *bus, @@ -356,54 +257,12 @@ struct pci_ops pnv_pci_ops = { .write = pnv_pci_write_config, }; - -static void pnv_tce_invalidate(struct iommu_table *tbl, - u64 *startp, u64 *endp) -{ - u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index; - unsigned long start, end, inc; - - start = __pa(startp); - end = __pa(endp); - - - /* BML uses this case for p6/p7/galaxy2: Shift addr and put in node */ - if (tbl->it_busno) { - start <<= 12; - end <<= 12; - inc = 128 << 12; - start |= tbl->it_busno; - end |= tbl->it_busno; - } - /* p7ioc-style invalidation, 2 TCEs per write */ - else if (tbl->it_type & TCE_PCI_SWINV_PAIR) { - start |= (1ull << 63); - end |= (1ull << 63); - inc = 16; - } - /* Default (older HW) */ - else - inc = 128; - - end |= inc - 1; /* round up end to be different than start */ - - mb(); /* Ensure above stores are visible */ - while (start <= end) { - __raw_writeq(start, invalidate); - start += inc; - } - /* The iommu layer will do another mb() for us on build() and - * we don't care on free() - */ -} - - static int pnv_tce_build(struct iommu_table *tbl, long index, long npages, unsigned long uaddr, enum dma_data_direction direction, struct dma_attrs *attrs) { u64 proto_tce; - u64 *tcep, *tces; + u64 *tcep; u64 rpn; proto_tce = TCE_PCI_READ; // Read allowed @@ -411,33 +270,25 @@ static int pnv_tce_build(struct iommu_table *tbl, long index, long npages, if (direction != DMA_TO_DEVICE) proto_tce |= TCE_PCI_WRITE; - tces = tcep = ((u64 *)tbl->it_base) + index - tbl->it_offset; - rpn = __pa(uaddr) >> TCE_SHIFT; + tcep = ((u64 *)tbl->it_base) + index; - while (npages--) - *(tcep++) = proto_tce | (rpn++ << TCE_RPN_SHIFT); - - /* Some implementations won't cache invalid TCEs and thus may not - * need that flush. We'll probably turn it_type into a bit mask - * of flags if that becomes the case - */ - if (tbl->it_type & TCE_PCI_SWINV_CREATE) - pnv_tce_invalidate(tbl, tces, tcep - 1); + while (npages--) { + /* can't move this out since we might cross LMB boundary */ + rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT; + *tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT; + uaddr += TCE_PAGE_SIZE; + tcep++; + } return 0; } static void pnv_tce_free(struct iommu_table *tbl, long index, long npages) { - u64 *tcep, *tces; - - tces = tcep = ((u64 *)tbl->it_base) + index - tbl->it_offset; + u64 *tcep = ((u64 *)tbl->it_base) + index; while (npages--) *(tcep++) = 0; - - if (tbl->it_type & TCE_PCI_SWINV_FREE) - pnv_tce_invalidate(tbl, tces, tcep - 1); } void pnv_pci_setup_iommu_table(struct iommu_table *tbl, @@ -457,14 +308,13 @@ static struct iommu_table * __devinit pnv_pci_setup_bml_iommu(struct pci_controller *hose) { struct iommu_table *tbl; - const __be64 *basep, *swinvp; + const __be64 *basep; const __be32 *sizep; basep = of_get_property(hose->dn, "linux,tce-base", NULL); sizep = of_get_property(hose->dn, "linux,tce-size", NULL); if (basep == NULL || sizep == NULL) { - pr_err("PCI: %s has missing tce entries !\n", - hose->dn->full_name); + pr_err("PCI: %s has missing tce entries !\n", hose->dn->full_name); return NULL; } tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, hose->node); @@ -473,15 +323,6 @@ pnv_pci_setup_bml_iommu(struct pci_controller *hose) pnv_pci_setup_iommu_table(tbl, __va(be64_to_cpup(basep)), be32_to_cpup(sizep), 0); iommu_init_table(tbl, hose->node); - - /* Deal with SW invalidated TCEs when needed (BML way) */ - swinvp = of_get_property(hose->dn, "linux,tce-sw-invalidate-info", - NULL); - if (swinvp) { - tbl->it_busno = swinvp[1]; - tbl->it_index = (unsigned long)ioremap(swinvp[0], 8); - tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE; - } return tbl; } @@ -515,13 +356,6 @@ static void __devinit pnv_pci_dma_dev_setup(struct pci_dev *pdev) pnv_pci_dma_fallback_setup(hose, pdev); } -/* Fixup wrong class code in p7ioc root complex */ -static void __devinit pnv_p7ioc_rc_quirk(struct pci_dev *dev) -{ - dev->class = PCI_CLASS_BRIDGE_PCI << 8; -} -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM, 0x3b9, pnv_p7ioc_rc_quirk); - static int pnv_pci_probe_mode(struct pci_bus *bus) { struct pci_controller *hose = pci_bus_to_host(bus); @@ -566,24 +400,12 @@ void __init pnv_pci_init(void) init_pci_config_tokens(); find_and_init_phbs(); #endif /* CONFIG_PPC_POWERNV_RTAS */ - } - /* OPAL is here, do our normal stuff */ - else { - int found_ioda = 0; - - /* Look for IODA IO-Hubs. We don't support mixing IODA - * and p5ioc2 due to the need to change some global - * probing flags - */ - for_each_compatible_node(np, NULL, "ibm,ioda-hub") { - pnv_pci_init_ioda_hub(np); - found_ioda = 1; - } + } else { + /* OPAL is here, do our normal stuff */ /* Look for p5ioc2 IO-Hubs */ - if (!found_ioda) - for_each_compatible_node(np, NULL, "ibm,p5ioc2") - pnv_pci_init_p5ioc2_hub(np); + for_each_compatible_node(np, NULL, "ibm,p5ioc2") + pnv_pci_init_p5ioc2_hub(np); } /* Setup the linkage between OF nodes and PHBs */ diff --git a/trunk/arch/powerpc/platforms/powernv/pci.h b/trunk/arch/powerpc/platforms/powernv/pci.h index 8bc479634643..d4dbc4950936 100644 --- a/trunk/arch/powerpc/platforms/powernv/pci.h +++ b/trunk/arch/powerpc/platforms/powernv/pci.h @@ -9,63 +9,9 @@ enum pnv_phb_type { PNV_PHB_IODA2, }; -/* Precise PHB model for error management */ -enum pnv_phb_model { - PNV_PHB_MODEL_UNKNOWN, - PNV_PHB_MODEL_P5IOC2, - PNV_PHB_MODEL_P7IOC, -}; - -#define PNV_PCI_DIAG_BUF_SIZE 4096 - -/* Data associated with a PE, including IOMMU tracking etc.. */ -struct pnv_ioda_pe { - /* A PE can be associated with a single device or an - * entire bus (& children). In the former case, pdev - * is populated, in the later case, pbus is. - */ - struct pci_dev *pdev; - struct pci_bus *pbus; - - /* Effective RID (device RID for a device PE and base bus - * RID with devfn 0 for a bus PE) - */ - unsigned int rid; - - /* PE number */ - unsigned int pe_number; - - /* "Weight" assigned to the PE for the sake of DMA resource - * allocations - */ - unsigned int dma_weight; - - /* This is a PCI-E -> PCI-X bridge, this points to the - * corresponding bus PE - */ - struct pnv_ioda_pe *bus_pe; - - /* "Base" iommu table, ie, 4K TCEs, 32-bit DMA */ - int tce32_seg; - int tce32_segcount; - struct iommu_table tce32_table; - - /* XXX TODO: Add support for additional 64-bit iommus */ - - /* MSIs. MVE index is identical for for 32 and 64 bit MSI - * and -1 if not supported. (It's actually identical to the - * PE number) - */ - int mve_number; - - /* Link in list of PE#s */ - struct list_head link; -}; - struct pnv_phb { struct pci_controller *hose; enum pnv_phb_type type; - enum pnv_phb_model model; u64 opal_id; void __iomem *regs; spinlock_t lock; @@ -88,52 +34,7 @@ struct pnv_phb { struct { struct iommu_table iommu_table; } p5ioc2; - - struct { - /* Global bridge info */ - unsigned int total_pe; - unsigned int m32_size; - unsigned int m32_segsize; - unsigned int m32_pci_base; - unsigned int io_size; - unsigned int io_segsize; - unsigned int io_pci_base; - - /* PE allocation bitmap */ - unsigned long *pe_alloc; - - /* M32 & IO segment maps */ - unsigned int *m32_segmap; - unsigned int *io_segmap; - struct pnv_ioda_pe *pe_array; - - /* Reverse map of PEs, will have to extend if - * we are to support more than 256 PEs, indexed - * bus { bus, devfn } - */ - unsigned char pe_rmap[0x10000]; - - /* 32-bit TCE tables allocation */ - unsigned long tce32_count; - - /* Total "weight" for the sake of DMA resources - * allocation - */ - unsigned int dma_weight; - unsigned int dma_pe_count; - - /* Sorted list of used PE's, sorted at - * boot for resource allocation purposes - */ - struct list_head pe_list; - } ioda; }; - - /* PHB status structure */ - union { - unsigned char blob[PNV_PCI_DIAG_BUF_SIZE]; - struct OpalIoP7IOCPhbErrorData p7ioc; - } diag; }; extern struct pci_ops pnv_pci_ops; @@ -142,7 +43,6 @@ extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl, void *tce_mem, u64 tce_size, u64 dma_offset); extern void pnv_pci_init_p5ioc2_hub(struct device_node *np); -extern void pnv_pci_init_ioda_hub(struct device_node *np); #endif /* __POWERNV_PCI_H */ diff --git a/trunk/arch/powerpc/platforms/powernv/smp.c b/trunk/arch/powerpc/platforms/powernv/smp.c index 17210c526c52..e87736685243 100644 --- a/trunk/arch/powerpc/platforms/powernv/smp.c +++ b/trunk/arch/powerpc/platforms/powernv/smp.c @@ -75,7 +75,7 @@ int __devinit pnv_smp_kick_cpu(int nr) /* On OPAL v2 the CPU are still spinning inside OPAL itself, * get them back now */ - if (!paca[nr].cpu_start && firmware_has_feature(FW_FEATURE_OPALv2)) { + if (firmware_has_feature(FW_FEATURE_OPALv2)) { pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n", nr, pcpu); rc = opal_start_cpu(pcpu, start_here); if (rc != OPAL_SUCCESS) diff --git a/trunk/arch/powerpc/platforms/ps3/interrupt.c b/trunk/arch/powerpc/platforms/ps3/interrupt.c index 617efa12a3a5..1d6f4f478fe2 100644 --- a/trunk/arch/powerpc/platforms/ps3/interrupt.c +++ b/trunk/arch/powerpc/platforms/ps3/interrupt.c @@ -31,18 +31,18 @@ #if defined(DEBUG) #define DBG udbg_printf -#define FAIL udbg_printf #else -#define DBG pr_devel -#define FAIL pr_debug +#define DBG pr_debug #endif /** * struct ps3_bmp - a per cpu irq status and mask bitmap structure * @status: 256 bit status bitmap indexed by plug - * @unused_1: Alignment + * @unused_1: * @mask: 256 bit mask bitmap indexed by plug - * @unused_2: Alignment + * @unused_2: + * @lock: + * @ipi_debug_brk_mask: * * The HV maintains per SMT thread mappings of HV outlet to HV plug on * behalf of the guest. These mappings are implemented as 256 bit guest @@ -73,24 +73,21 @@ struct ps3_bmp { unsigned long mask; u64 unused_2[3]; }; + u64 ipi_debug_brk_mask; + spinlock_t lock; }; /** * struct ps3_private - a per cpu data structure * @bmp: ps3_bmp structure - * @bmp_lock: Syncronize access to bmp. - * @ipi_debug_brk_mask: Mask for debug break IPIs * @ppe_id: HV logical_ppe_id * @thread_id: HV thread_id - * @ipi_mask: Mask of IPI virqs */ struct ps3_private { struct ps3_bmp bmp __attribute__ ((aligned (PS3_BMP_MINALIGN))); - spinlock_t bmp_lock; u64 ppe_id; u64 thread_id; - unsigned long ipi_debug_brk_mask; unsigned long ipi_mask; }; @@ -108,7 +105,7 @@ static void ps3_chip_mask(struct irq_data *d) struct ps3_private *pd = irq_data_get_irq_chip_data(d); unsigned long flags; - DBG("%s:%d: thread_id %llu, virq %d\n", __func__, __LINE__, + pr_debug("%s:%d: thread_id %llu, virq %d\n", __func__, __LINE__, pd->thread_id, d->irq); local_irq_save(flags); @@ -129,7 +126,7 @@ static void ps3_chip_unmask(struct irq_data *d) struct ps3_private *pd = irq_data_get_irq_chip_data(d); unsigned long flags; - DBG("%s:%d: thread_id %llu, virq %d\n", __func__, __LINE__, + pr_debug("%s:%d: thread_id %llu, virq %d\n", __func__, __LINE__, pd->thread_id, d->irq); local_irq_save(flags); @@ -193,19 +190,19 @@ static int ps3_virq_setup(enum ps3_cpu_binding cpu, unsigned long outlet, *virq = irq_create_mapping(NULL, outlet); if (*virq == NO_IRQ) { - FAIL("%s:%d: irq_create_mapping failed: outlet %lu\n", + pr_debug("%s:%d: irq_create_mapping failed: outlet %lu\n", __func__, __LINE__, outlet); result = -ENOMEM; goto fail_create; } - DBG("%s:%d: outlet %lu => cpu %u, virq %u\n", __func__, __LINE__, + pr_debug("%s:%d: outlet %lu => cpu %u, virq %u\n", __func__, __LINE__, outlet, cpu, *virq); result = irq_set_chip_data(*virq, pd); if (result) { - FAIL("%s:%d: irq_set_chip_data failed\n", + pr_debug("%s:%d: irq_set_chip_data failed\n", __func__, __LINE__); goto fail_set; } @@ -231,13 +228,13 @@ static int ps3_virq_destroy(unsigned int virq) { const struct ps3_private *pd = irq_get_chip_data(virq); - DBG("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__, + pr_debug("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__, __LINE__, pd->ppe_id, pd->thread_id, virq); irq_set_chip_data(virq, NULL); irq_dispose_mapping(virq); - DBG("%s:%d <-\n", __func__, __LINE__); + pr_debug("%s:%d <-\n", __func__, __LINE__); return 0; } @@ -260,7 +257,7 @@ int ps3_irq_plug_setup(enum ps3_cpu_binding cpu, unsigned long outlet, result = ps3_virq_setup(cpu, outlet, virq); if (result) { - FAIL("%s:%d: ps3_virq_setup failed\n", __func__, __LINE__); + pr_debug("%s:%d: ps3_virq_setup failed\n", __func__, __LINE__); goto fail_setup; } @@ -272,7 +269,7 @@ int ps3_irq_plug_setup(enum ps3_cpu_binding cpu, unsigned long outlet, outlet, 0); if (result) { - FAIL("%s:%d: lv1_connect_irq_plug_ext failed: %s\n", + pr_info("%s:%d: lv1_connect_irq_plug_ext failed: %s\n", __func__, __LINE__, ps3_result(result)); result = -EPERM; goto fail_connect; @@ -301,7 +298,7 @@ int ps3_irq_plug_destroy(unsigned int virq) int result; const struct ps3_private *pd = irq_get_chip_data(virq); - DBG("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__, + pr_debug("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__, __LINE__, pd->ppe_id, pd->thread_id, virq); ps3_chip_mask(irq_get_irq_data(virq)); @@ -309,7 +306,7 @@ int ps3_irq_plug_destroy(unsigned int virq) result = lv1_disconnect_irq_plug_ext(pd->ppe_id, pd->thread_id, virq); if (result) - FAIL("%s:%d: lv1_disconnect_irq_plug_ext failed: %s\n", + pr_info("%s:%d: lv1_disconnect_irq_plug_ext failed: %s\n", __func__, __LINE__, ps3_result(result)); ps3_virq_destroy(virq); @@ -337,7 +334,7 @@ int ps3_event_receive_port_setup(enum ps3_cpu_binding cpu, unsigned int *virq) result = lv1_construct_event_receive_port(&outlet); if (result) { - FAIL("%s:%d: lv1_construct_event_receive_port failed: %s\n", + pr_debug("%s:%d: lv1_construct_event_receive_port failed: %s\n", __func__, __LINE__, ps3_result(result)); *virq = NO_IRQ; return result; @@ -363,14 +360,14 @@ int ps3_event_receive_port_destroy(unsigned int virq) { int result; - DBG(" -> %s:%d virq %u\n", __func__, __LINE__, virq); + pr_debug(" -> %s:%d virq %u\n", __func__, __LINE__, virq); ps3_chip_mask(irq_get_irq_data(virq)); result = lv1_destruct_event_receive_port(virq_to_hw(virq)); if (result) - FAIL("%s:%d: lv1_destruct_event_receive_port failed: %s\n", + pr_debug("%s:%d: lv1_destruct_event_receive_port failed: %s\n", __func__, __LINE__, ps3_result(result)); /* @@ -378,7 +375,7 @@ int ps3_event_receive_port_destroy(unsigned int virq) * calls from interrupt context (smp_call_function) when kexecing. */ - DBG(" <- %s:%d\n", __func__, __LINE__); + pr_debug(" <- %s:%d\n", __func__, __LINE__); return result; } @@ -414,7 +411,7 @@ int ps3_sb_event_receive_port_setup(struct ps3_system_bus_device *dev, dev->dev_id, virq_to_hw(*virq), dev->interrupt_id); if (result) { - FAIL("%s:%d: lv1_connect_interrupt_event_receive_port" + pr_debug("%s:%d: lv1_connect_interrupt_event_receive_port" " failed: %s\n", __func__, __LINE__, ps3_result(result)); ps3_event_receive_port_destroy(*virq); @@ -422,7 +419,7 @@ int ps3_sb_event_receive_port_setup(struct ps3_system_bus_device *dev, return result; } - DBG("%s:%d: interrupt_id %u, virq %u\n", __func__, __LINE__, + pr_debug("%s:%d: interrupt_id %u, virq %u\n", __func__, __LINE__, dev->interrupt_id, *virq); return 0; @@ -436,14 +433,14 @@ int ps3_sb_event_receive_port_destroy(struct ps3_system_bus_device *dev, int result; - DBG(" -> %s:%d: interrupt_id %u, virq %u\n", __func__, __LINE__, + pr_debug(" -> %s:%d: interrupt_id %u, virq %u\n", __func__, __LINE__, dev->interrupt_id, virq); result = lv1_disconnect_interrupt_event_receive_port(dev->bus_id, dev->dev_id, virq_to_hw(virq), dev->interrupt_id); if (result) - FAIL("%s:%d: lv1_disconnect_interrupt_event_receive_port" + pr_debug("%s:%d: lv1_disconnect_interrupt_event_receive_port" " failed: %s\n", __func__, __LINE__, ps3_result(result)); @@ -458,7 +455,7 @@ int ps3_sb_event_receive_port_destroy(struct ps3_system_bus_device *dev, result = ps3_virq_destroy(virq); BUG_ON(result); - DBG(" <- %s:%d\n", __func__, __LINE__); + pr_debug(" <- %s:%d\n", __func__, __LINE__); return result; } EXPORT_SYMBOL(ps3_sb_event_receive_port_destroy); @@ -483,7 +480,7 @@ int ps3_io_irq_setup(enum ps3_cpu_binding cpu, unsigned int interrupt_id, result = lv1_construct_io_irq_outlet(interrupt_id, &outlet); if (result) { - FAIL("%s:%d: lv1_construct_io_irq_outlet failed: %s\n", + pr_debug("%s:%d: lv1_construct_io_irq_outlet failed: %s\n", __func__, __LINE__, ps3_result(result)); return result; } @@ -513,7 +510,7 @@ int ps3_io_irq_destroy(unsigned int virq) result = lv1_destruct_io_irq_outlet(outlet); if (result) - FAIL("%s:%d: lv1_destruct_io_irq_outlet failed: %s\n", + pr_debug("%s:%d: lv1_destruct_io_irq_outlet failed: %s\n", __func__, __LINE__, ps3_result(result)); return result; @@ -545,7 +542,7 @@ int ps3_vuart_irq_setup(enum ps3_cpu_binding cpu, void* virt_addr_bmp, result = lv1_configure_virtual_uart_irq(lpar_addr, &outlet); if (result) { - FAIL("%s:%d: lv1_configure_virtual_uart_irq failed: %s\n", + pr_debug("%s:%d: lv1_configure_virtual_uart_irq failed: %s\n", __func__, __LINE__, ps3_result(result)); return result; } @@ -565,7 +562,7 @@ int ps3_vuart_irq_destroy(unsigned int virq) result = lv1_deconfigure_virtual_uart_irq(); if (result) { - FAIL("%s:%d: lv1_configure_virtual_uart_irq failed: %s\n", + pr_debug("%s:%d: lv1_configure_virtual_uart_irq failed: %s\n", __func__, __LINE__, ps3_result(result)); return result; } @@ -598,7 +595,7 @@ int ps3_spe_irq_setup(enum ps3_cpu_binding cpu, unsigned long spe_id, result = lv1_get_spe_irq_outlet(spe_id, class, &outlet); if (result) { - FAIL("%s:%d: lv1_get_spe_irq_outlet failed: %s\n", + pr_debug("%s:%d: lv1_get_spe_irq_outlet failed: %s\n", __func__, __LINE__, ps3_result(result)); return result; } @@ -629,7 +626,7 @@ int ps3_spe_irq_destroy(unsigned int virq) static void _dump_64_bmp(const char *header, const u64 *p, unsigned cpu, const char* func, int line) { - pr_debug("%s:%d: %s %u {%04llx_%04llx_%04llx_%04llx}\n", + pr_debug("%s:%d: %s %u {%04lx_%04lx_%04lx_%04lx}\n", func, line, header, cpu, *p >> 48, (*p >> 32) & 0xffff, (*p >> 16) & 0xffff, *p & 0xffff); @@ -638,7 +635,7 @@ static void _dump_64_bmp(const char *header, const u64 *p, unsigned cpu, static void __maybe_unused _dump_256_bmp(const char *header, const u64 *p, unsigned cpu, const char* func, int line) { - pr_debug("%s:%d: %s %u {%016llx:%016llx:%016llx:%016llx}\n", + pr_debug("%s:%d: %s %u {%016lx:%016lx:%016lx:%016lx}\n", func, line, header, cpu, p[0], p[1], p[2], p[3]); } @@ -647,10 +644,10 @@ static void _dump_bmp(struct ps3_private* pd, const char* func, int line) { unsigned long flags; - spin_lock_irqsave(&pd->bmp_lock, flags); + spin_lock_irqsave(&pd->bmp.lock, flags); _dump_64_bmp("stat", &pd->bmp.status, pd->thread_id, func, line); - _dump_64_bmp("mask", (u64*)&pd->bmp.mask, pd->thread_id, func, line); - spin_unlock_irqrestore(&pd->bmp_lock, flags); + _dump_64_bmp("mask", &pd->bmp.mask, pd->thread_id, func, line); + spin_unlock_irqrestore(&pd->bmp.lock, flags); } #define dump_mask(_x) _dump_mask(_x, __func__, __LINE__) @@ -659,9 +656,9 @@ static void __maybe_unused _dump_mask(struct ps3_private *pd, { unsigned long flags; - spin_lock_irqsave(&pd->bmp_lock, flags); - _dump_64_bmp("mask", (u64*)&pd->bmp.mask, pd->thread_id, func, line); - spin_unlock_irqrestore(&pd->bmp_lock, flags); + spin_lock_irqsave(&pd->bmp.lock, flags); + _dump_64_bmp("mask", &pd->bmp.mask, pd->thread_id, func, line); + spin_unlock_irqrestore(&pd->bmp.lock, flags); } #else static void dump_bmp(struct ps3_private* pd) {}; @@ -670,7 +667,7 @@ static void dump_bmp(struct ps3_private* pd) {}; static int ps3_host_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hwirq) { - DBG("%s:%d: hwirq %lu, virq %u\n", __func__, __LINE__, hwirq, + pr_debug("%s:%d: hwirq %lu, virq %u\n", __func__, __LINE__, hwirq, virq); irq_set_chip_and_handler(virq, &ps3_irq_chip, handle_fasteoi_irq); @@ -693,10 +690,10 @@ void __init ps3_register_ipi_debug_brk(unsigned int cpu, unsigned int virq) { struct ps3_private *pd = &per_cpu(ps3_private, cpu); - set_bit(63 - virq, &pd->ipi_debug_brk_mask); + pd->bmp.ipi_debug_brk_mask = 0x8000000000000000UL >> virq; - DBG("%s:%d: cpu %u, virq %u, mask %lxh\n", __func__, __LINE__, - cpu, virq, pd->ipi_debug_brk_mask); + pr_debug("%s:%d: cpu %u, virq %u, mask %llxh\n", __func__, __LINE__, + cpu, virq, pd->bmp.ipi_debug_brk_mask); } void __init ps3_register_ipi_irq(unsigned int cpu, unsigned int virq) @@ -717,14 +714,14 @@ static unsigned int ps3_get_irq(void) /* check for ipi break first to stop this cpu ASAP */ - if (x & pd->ipi_debug_brk_mask) - x &= pd->ipi_debug_brk_mask; + if (x & pd->bmp.ipi_debug_brk_mask) + x &= pd->bmp.ipi_debug_brk_mask; asm volatile("cntlzd %0,%1" : "=r" (plug) : "r" (x)); plug &= 0x3f; if (unlikely(plug == NO_IRQ)) { - DBG("%s:%d: no plug found: thread_id %llu\n", __func__, + pr_debug("%s:%d: no plug found: thread_id %llu\n", __func__, __LINE__, pd->thread_id); dump_bmp(&per_cpu(ps3_private, 0)); dump_bmp(&per_cpu(ps3_private, 1)); @@ -763,9 +760,9 @@ void __init ps3_init_IRQ(void) lv1_get_logical_ppe_id(&pd->ppe_id); pd->thread_id = get_hard_smp_processor_id(cpu); - spin_lock_init(&pd->bmp_lock); + spin_lock_init(&pd->bmp.lock); - DBG("%s:%d: ppe_id %llu, thread_id %llu, bmp %lxh\n", + pr_debug("%s:%d: ppe_id %llu, thread_id %llu, bmp %lxh\n", __func__, __LINE__, pd->ppe_id, pd->thread_id, ps3_mm_phys_to_lpar(__pa(&pd->bmp))); @@ -773,7 +770,7 @@ void __init ps3_init_IRQ(void) pd->thread_id, ps3_mm_phys_to_lpar(__pa(&pd->bmp))); if (result) - FAIL("%s:%d: lv1_configure_irq_state_bitmap failed:" + pr_debug("%s:%d: lv1_configure_irq_state_bitmap failed:" " %s\n", __func__, __LINE__, ps3_result(result)); } diff --git a/trunk/arch/powerpc/platforms/ps3/mm.c b/trunk/arch/powerpc/platforms/ps3/mm.c index 8bd6ba542691..72714ad27842 100644 --- a/trunk/arch/powerpc/platforms/ps3/mm.c +++ b/trunk/arch/powerpc/platforms/ps3/mm.c @@ -319,6 +319,7 @@ static int __init ps3_mm_add_memory(void) } memblock_add(start_addr, map.r1.size); + memblock_analyze(); result = online_pages(start_pfn, nr_pages); diff --git a/trunk/arch/powerpc/platforms/ps3/repository.c b/trunk/arch/powerpc/platforms/ps3/repository.c index 7bdfea336f5e..ca40f6afd35d 100644 --- a/trunk/arch/powerpc/platforms/ps3/repository.c +++ b/trunk/arch/powerpc/platforms/ps3/repository.c @@ -44,7 +44,7 @@ static void _dump_field(const char *hdr, u64 n, const char *func, int line) s[i] = (in[i] <= 126 && in[i] >= 32) ? in[i] : '.'; s[i] = 0; - pr_devel("%s:%d: %s%016llx : %s\n", func, line, hdr, n, s); + pr_debug("%s:%d: %s%016llx : %s\n", func, line, hdr, n, s); #endif } @@ -53,7 +53,7 @@ static void _dump_field(const char *hdr, u64 n, const char *func, int line) static void _dump_node_name(unsigned int lpar_id, u64 n1, u64 n2, u64 n3, u64 n4, const char *func, int line) { - pr_devel("%s:%d: lpar: %u\n", func, line, lpar_id); + pr_debug("%s:%d: lpar: %u\n", func, line, lpar_id); _dump_field("n1: ", n1, func, line); _dump_field("n2: ", n2, func, line); _dump_field("n3: ", n3, func, line); @@ -65,13 +65,13 @@ static void _dump_node_name(unsigned int lpar_id, u64 n1, u64 n2, u64 n3, static void _dump_node(unsigned int lpar_id, u64 n1, u64 n2, u64 n3, u64 n4, u64 v1, u64 v2, const char *func, int line) { - pr_devel("%s:%d: lpar: %u\n", func, line, lpar_id); + pr_debug("%s:%d: lpar: %u\n", func, line, lpar_id); _dump_field("n1: ", n1, func, line); _dump_field("n2: ", n2, func, line); _dump_field("n3: ", n3, func, line); _dump_field("n4: ", n4, func, line); - pr_devel("%s:%d: v1: %016llx\n", func, line, v1); - pr_devel("%s:%d: v2: %016llx\n", func, line, v2); + pr_debug("%s:%d: v1: %016llx\n", func, line, v1); + pr_debug("%s:%d: v2: %016llx\n", func, line, v2); } /** @@ -131,11 +131,11 @@ static int read_node(unsigned int lpar_id, u64 n1, u64 n2, u64 n3, u64 n4, lpar_id = id; } - result = lv1_read_repository_node(lpar_id, n1, n2, n3, n4, &v1, + result = lv1_get_repository_node_value(lpar_id, n1, n2, n3, n4, &v1, &v2); if (result) { - pr_warn("%s:%d: lv1_read_repository_node failed: %s\n", + pr_debug("%s:%d: lv1_get_repository_node_value failed: %s\n", __func__, __LINE__, ps3_result(result)); dump_node_name(lpar_id, n1, n2, n3, n4); return -ENOENT; @@ -149,10 +149,10 @@ static int read_node(unsigned int lpar_id, u64 n1, u64 n2, u64 n3, u64 n4, *_v2 = v2; if (v1 && !_v1) - pr_devel("%s:%d: warning: discarding non-zero v1: %016llx\n", + pr_debug("%s:%d: warning: discarding non-zero v1: %016llx\n", __func__, __LINE__, v1); if (v2 && !_v2) - pr_devel("%s:%d: warning: discarding non-zero v2: %016llx\n", + pr_debug("%s:%d: warning: discarding non-zero v2: %016llx\n", __func__, __LINE__, v2); return 0; @@ -323,16 +323,16 @@ int ps3_repository_find_device(struct ps3_repository_device *repo) result = ps3_repository_read_bus_num_dev(tmp.bus_index, &num_dev); if (result) { - pr_devel("%s:%d read_bus_num_dev failed\n", __func__, __LINE__); + pr_debug("%s:%d read_bus_num_dev failed\n", __func__, __LINE__); return result; } - pr_devel("%s:%d: bus_type %u, bus_index %u, bus_id %llu, num_dev %u\n", + pr_debug("%s:%d: bus_type %u, bus_index %u, bus_id %llu, num_dev %u\n", __func__, __LINE__, tmp.bus_type, tmp.bus_index, tmp.bus_id, num_dev); if (tmp.dev_index >= num_dev) { - pr_devel("%s:%d: no device found\n", __func__, __LINE__); + pr_debug("%s:%d: no device found\n", __func__, __LINE__); return -ENODEV; } @@ -340,7 +340,7 @@ int ps3_repository_find_device(struct ps3_repository_device *repo) &tmp.dev_type); if (result) { - pr_devel("%s:%d read_dev_type failed\n", __func__, __LINE__); + pr_debug("%s:%d read_dev_type failed\n", __func__, __LINE__); return result; } @@ -348,12 +348,12 @@ int ps3_repository_find_device(struct ps3_repository_device *repo) &tmp.dev_id); if (result) { - pr_devel("%s:%d ps3_repository_read_dev_id failed\n", __func__, + pr_debug("%s:%d ps3_repository_read_dev_id failed\n", __func__, __LINE__); return result; } - pr_devel("%s:%d: found: dev_type %u, dev_index %u, dev_id %llu\n", + pr_debug("%s:%d: found: dev_type %u, dev_index %u, dev_id %llu\n", __func__, __LINE__, tmp.dev_type, tmp.dev_index, tmp.dev_id); *repo = tmp; @@ -367,14 +367,14 @@ int ps3_repository_find_device_by_id(struct ps3_repository_device *repo, struct ps3_repository_device tmp; unsigned int num_dev; - pr_devel(" -> %s:%u: find device by id %llu:%llu\n", __func__, __LINE__, + pr_debug(" -> %s:%u: find device by id %llu:%llu\n", __func__, __LINE__, bus_id, dev_id); for (tmp.bus_index = 0; tmp.bus_index < 10; tmp.bus_index++) { result = ps3_repository_read_bus_id(tmp.bus_index, &tmp.bus_id); if (result) { - pr_devel("%s:%u read_bus_id(%u) failed\n", __func__, + pr_debug("%s:%u read_bus_id(%u) failed\n", __func__, __LINE__, tmp.bus_index); return result; } @@ -382,23 +382,23 @@ int ps3_repository_find_device_by_id(struct ps3_repository_device *repo, if (tmp.bus_id == bus_id) goto found_bus; - pr_devel("%s:%u: skip, bus_id %llu\n", __func__, __LINE__, + pr_debug("%s:%u: skip, bus_id %llu\n", __func__, __LINE__, tmp.bus_id); } - pr_devel(" <- %s:%u: bus not found\n", __func__, __LINE__); + pr_debug(" <- %s:%u: bus not found\n", __func__, __LINE__); return result; found_bus: result = ps3_repository_read_bus_type(tmp.bus_index, &tmp.bus_type); if (result) { - pr_devel("%s:%u read_bus_type(%u) failed\n", __func__, + pr_debug("%s:%u read_bus_type(%u) failed\n", __func__, __LINE__, tmp.bus_index); return result; } result = ps3_repository_read_bus_num_dev(tmp.bus_index, &num_dev); if (result) { - pr_devel("%s:%u read_bus_num_dev failed\n", __func__, + pr_debug("%s:%u read_bus_num_dev failed\n", __func__, __LINE__); return result; } @@ -408,7 +408,7 @@ int ps3_repository_find_device_by_id(struct ps3_repository_device *repo, tmp.dev_index, &tmp.dev_id); if (result) { - pr_devel("%s:%u read_dev_id(%u:%u) failed\n", __func__, + pr_debug("%s:%u read_dev_id(%u:%u) failed\n", __func__, __LINE__, tmp.bus_index, tmp.dev_index); return result; } @@ -416,21 +416,21 @@ int ps3_repository_find_device_by_id(struct ps3_repository_device *repo, if (tmp.dev_id == dev_id) goto found_dev; - pr_devel("%s:%u: skip, dev_id %llu\n", __func__, __LINE__, + pr_debug("%s:%u: skip, dev_id %llu\n", __func__, __LINE__, tmp.dev_id); } - pr_devel(" <- %s:%u: dev not found\n", __func__, __LINE__); + pr_debug(" <- %s:%u: dev not found\n", __func__, __LINE__); return result; found_dev: result = ps3_repository_read_dev_type(tmp.bus_index, tmp.dev_index, &tmp.dev_type); if (result) { - pr_devel("%s:%u read_dev_type failed\n", __func__, __LINE__); + pr_debug("%s:%u read_dev_type failed\n", __func__, __LINE__); return result; } - pr_devel(" <- %s:%u: found: type (%u:%u) index (%u:%u) id (%llu:%llu)\n", + pr_debug(" <- %s:%u: found: type (%u:%u) index (%u:%u) id (%llu:%llu)\n", __func__, __LINE__, tmp.bus_type, tmp.dev_type, tmp.bus_index, tmp.dev_index, tmp.bus_id, tmp.dev_id); *repo = tmp; @@ -443,18 +443,18 @@ int __devinit ps3_repository_find_devices(enum ps3_bus_type bus_type, int result = 0; struct ps3_repository_device repo; - pr_devel(" -> %s:%d: find bus_type %u\n", __func__, __LINE__, bus_type); + pr_debug(" -> %s:%d: find bus_type %u\n", __func__, __LINE__, bus_type); repo.bus_type = bus_type; result = ps3_repository_find_bus(repo.bus_type, 0, &repo.bus_index); if (result) { - pr_devel(" <- %s:%u: bus not found\n", __func__, __LINE__); + pr_debug(" <- %s:%u: bus not found\n", __func__, __LINE__); return result; } result = ps3_repository_read_bus_id(repo.bus_index, &repo.bus_id); if (result) { - pr_devel("%s:%d read_bus_id(%u) failed\n", __func__, __LINE__, + pr_debug("%s:%d read_bus_id(%u) failed\n", __func__, __LINE__, repo.bus_index); return result; } @@ -469,13 +469,13 @@ int __devinit ps3_repository_find_devices(enum ps3_bus_type bus_type, result = callback(&repo); if (result) { - pr_devel("%s:%d: abort at callback\n", __func__, + pr_debug("%s:%d: abort at callback\n", __func__, __LINE__); break; } } - pr_devel(" <- %s:%d\n", __func__, __LINE__); + pr_debug(" <- %s:%d\n", __func__, __LINE__); return result; } @@ -489,7 +489,7 @@ int ps3_repository_find_bus(enum ps3_bus_type bus_type, unsigned int from, for (i = from; i < 10; i++) { error = ps3_repository_read_bus_type(i, &type); if (error) { - pr_devel("%s:%d read_bus_type failed\n", + pr_debug("%s:%d read_bus_type failed\n", __func__, __LINE__); *bus_index = UINT_MAX; return error; @@ -509,7 +509,7 @@ int ps3_repository_find_interrupt(const struct ps3_repository_device *repo, int result = 0; unsigned int res_index; - pr_devel("%s:%d: find intr_type %u\n", __func__, __LINE__, intr_type); + pr_debug("%s:%d: find intr_type %u\n", __func__, __LINE__, intr_type); *interrupt_id = UINT_MAX; @@ -521,7 +521,7 @@ int ps3_repository_find_interrupt(const struct ps3_repository_device *repo, repo->dev_index, res_index, &t, &id); if (result) { - pr_devel("%s:%d read_dev_intr failed\n", + pr_debug("%s:%d read_dev_intr failed\n", __func__, __LINE__); return result; } @@ -535,7 +535,7 @@ int ps3_repository_find_interrupt(const struct ps3_repository_device *repo, if (res_index == 10) return -ENODEV; - pr_devel("%s:%d: found intr_type %u at res_index %u\n", + pr_debug("%s:%d: found intr_type %u at res_index %u\n", __func__, __LINE__, intr_type, res_index); return result; @@ -547,7 +547,7 @@ int ps3_repository_find_reg(const struct ps3_repository_device *repo, int result = 0; unsigned int res_index; - pr_devel("%s:%d: find reg_type %u\n", __func__, __LINE__, reg_type); + pr_debug("%s:%d: find reg_type %u\n", __func__, __LINE__, reg_type); *bus_addr = *len = 0; @@ -560,7 +560,7 @@ int ps3_repository_find_reg(const struct ps3_repository_device *repo, repo->dev_index, res_index, &t, &a, &l); if (result) { - pr_devel("%s:%d read_dev_reg failed\n", + pr_debug("%s:%d read_dev_reg failed\n", __func__, __LINE__); return result; } @@ -575,7 +575,7 @@ int ps3_repository_find_reg(const struct ps3_repository_device *repo, if (res_index == 10) return -ENODEV; - pr_devel("%s:%d: found reg_type %u at res_index %u\n", + pr_debug("%s:%d: found reg_type %u at res_index %u\n", __func__, __LINE__, reg_type, res_index); return result; @@ -1009,7 +1009,7 @@ int ps3_repository_dump_resource_info(const struct ps3_repository_device *repo) int result = 0; unsigned int res_index; - pr_devel(" -> %s:%d: (%u:%u)\n", __func__, __LINE__, + pr_debug(" -> %s:%d: (%u:%u)\n", __func__, __LINE__, repo->bus_index, repo->dev_index); for (res_index = 0; res_index < 10; res_index++) { @@ -1021,13 +1021,13 @@ int ps3_repository_dump_resource_info(const struct ps3_repository_device *repo) if (result) { if (result != LV1_NO_ENTRY) - pr_devel("%s:%d ps3_repository_read_dev_intr" + pr_debug("%s:%d ps3_repository_read_dev_intr" " (%u:%u) failed\n", __func__, __LINE__, repo->bus_index, repo->dev_index); break; } - pr_devel("%s:%d (%u:%u) intr_type %u, interrupt_id %u\n", + pr_debug("%s:%d (%u:%u) intr_type %u, interrupt_id %u\n", __func__, __LINE__, repo->bus_index, repo->dev_index, intr_type, interrupt_id); } @@ -1042,18 +1042,18 @@ int ps3_repository_dump_resource_info(const struct ps3_repository_device *repo) if (result) { if (result != LV1_NO_ENTRY) - pr_devel("%s:%d ps3_repository_read_dev_reg" + pr_debug("%s:%d ps3_repository_read_dev_reg" " (%u:%u) failed\n", __func__, __LINE__, repo->bus_index, repo->dev_index); break; } - pr_devel("%s:%d (%u:%u) reg_type %u, bus_addr %llxh, len %llxh\n", + pr_debug("%s:%d (%u:%u) reg_type %u, bus_addr %lxh, len %lxh\n", __func__, __LINE__, repo->bus_index, repo->dev_index, reg_type, bus_addr, len); } - pr_devel(" <- %s:%d\n", __func__, __LINE__); + pr_debug(" <- %s:%d\n", __func__, __LINE__); return result; } @@ -1063,22 +1063,22 @@ static int dump_stor_dev_info(struct ps3_repository_device *repo) unsigned int num_regions, region_index; u64 port, blk_size, num_blocks; - pr_devel(" -> %s:%d: (%u:%u)\n", __func__, __LINE__, + pr_debug(" -> %s:%d: (%u:%u)\n", __func__, __LINE__, repo->bus_index, repo->dev_index); result = ps3_repository_read_stor_dev_info(repo->bus_index, repo->dev_index, &port, &blk_size, &num_blocks, &num_regions); if (result) { - pr_devel("%s:%d ps3_repository_read_stor_dev_info" + pr_debug("%s:%d ps3_repository_read_stor_dev_info" " (%u:%u) failed\n", __func__, __LINE__, repo->bus_index, repo->dev_index); goto out; } - pr_devel("%s:%d (%u:%u): port %llu, blk_size %llu, num_blocks " - "%llu, num_regions %u\n", - __func__, __LINE__, repo->bus_index, repo->dev_index, - port, blk_size, num_blocks, num_regions); + pr_debug("%s:%d (%u:%u): port %lu, blk_size %lu, num_blocks " + "%lu, num_regions %u\n", + __func__, __LINE__, repo->bus_index, repo->dev_index, port, + blk_size, num_blocks, num_regions); for (region_index = 0; region_index < num_regions; region_index++) { unsigned int region_id; @@ -1088,20 +1088,19 @@ static int dump_stor_dev_info(struct ps3_repository_device *repo) repo->dev_index, region_index, ®ion_id, ®ion_start, ®ion_size); if (result) { - pr_devel("%s:%d ps3_repository_read_stor_dev_region" + pr_debug("%s:%d ps3_repository_read_stor_dev_region" " (%u:%u) failed\n", __func__, __LINE__, repo->bus_index, repo->dev_index); break; } - pr_devel("%s:%d (%u:%u) region_id %u, start %lxh, size %lxh\n", + pr_debug("%s:%d (%u:%u) region_id %u, start %lxh, size %lxh\n", __func__, __LINE__, repo->bus_index, repo->dev_index, - region_id, (unsigned long)region_start, - (unsigned long)region_size); + region_id, region_start, region_size); } out: - pr_devel(" <- %s:%d\n", __func__, __LINE__); + pr_debug(" <- %s:%d\n", __func__, __LINE__); return result; } @@ -1110,7 +1109,7 @@ static int dump_device_info(struct ps3_repository_device *repo, { int result = 0; - pr_devel(" -> %s:%d: bus_%u\n", __func__, __LINE__, repo->bus_index); + pr_debug(" -> %s:%d: bus_%u\n", __func__, __LINE__, repo->bus_index); for (repo->dev_index = 0; repo->dev_index < num_dev; repo->dev_index++) { @@ -1119,7 +1118,7 @@ static int dump_device_info(struct ps3_repository_device *repo, repo->dev_index, &repo->dev_type); if (result) { - pr_devel("%s:%d ps3_repository_read_dev_type" + pr_debug("%s:%d ps3_repository_read_dev_type" " (%u:%u) failed\n", __func__, __LINE__, repo->bus_index, repo->dev_index); break; @@ -1129,15 +1128,15 @@ static int dump_device_info(struct ps3_repository_device *repo, repo->dev_index, &repo->dev_id); if (result) { - pr_devel("%s:%d ps3_repository_read_dev_id" + pr_debug("%s:%d ps3_repository_read_dev_id" " (%u:%u) failed\n", __func__, __LINE__, repo->bus_index, repo->dev_index); continue; } - pr_devel("%s:%d (%u:%u): dev_type %u, dev_id %lu\n", __func__, + pr_debug("%s:%d (%u:%u): dev_type %u, dev_id %lu\n", __func__, __LINE__, repo->bus_index, repo->dev_index, - repo->dev_type, (unsigned long)repo->dev_id); + repo->dev_type, repo->dev_id); ps3_repository_dump_resource_info(repo); @@ -1145,7 +1144,7 @@ static int dump_device_info(struct ps3_repository_device *repo, dump_stor_dev_info(repo); } - pr_devel(" <- %s:%d\n", __func__, __LINE__); + pr_debug(" <- %s:%d\n", __func__, __LINE__); return result; } @@ -1154,7 +1153,7 @@ int ps3_repository_dump_bus_info(void) int result = 0; struct ps3_repository_device repo; - pr_devel(" -> %s:%d\n", __func__, __LINE__); + pr_debug(" -> %s:%d\n", __func__, __LINE__); memset(&repo, 0, sizeof(repo)); @@ -1165,7 +1164,7 @@ int ps3_repository_dump_bus_info(void) &repo.bus_type); if (result) { - pr_devel("%s:%d read_bus_type(%u) failed\n", + pr_debug("%s:%d read_bus_type(%u) failed\n", __func__, __LINE__, repo.bus_index); break; } @@ -1174,32 +1173,32 @@ int ps3_repository_dump_bus_info(void) &repo.bus_id); if (result) { - pr_devel("%s:%d read_bus_id(%u) failed\n", + pr_debug("%s:%d read_bus_id(%u) failed\n", __func__, __LINE__, repo.bus_index); continue; } if (repo.bus_index != repo.bus_id) - pr_devel("%s:%d bus_index != bus_id\n", + pr_debug("%s:%d bus_index != bus_id\n", __func__, __LINE__); result = ps3_repository_read_bus_num_dev(repo.bus_index, &num_dev); if (result) { - pr_devel("%s:%d read_bus_num_dev(%u) failed\n", + pr_debug("%s:%d read_bus_num_dev(%u) failed\n", __func__, __LINE__, repo.bus_index); continue; } - pr_devel("%s:%d bus_%u: bus_type %u, bus_id %lu, num_dev %u\n", + pr_debug("%s:%d bus_%u: bus_type %u, bus_id %lu, num_dev %u\n", __func__, __LINE__, repo.bus_index, repo.bus_type, - (unsigned long)repo.bus_id, num_dev); + repo.bus_id, num_dev); dump_device_info(&repo, num_dev); } - pr_devel(" <- %s:%d\n", __func__, __LINE__); + pr_debug(" <- %s:%d\n", __func__, __LINE__); return result; } diff --git a/trunk/arch/powerpc/platforms/ps3/setup.c b/trunk/arch/powerpc/platforms/ps3/setup.c index 2d664c5a83b0..e8ec1b2bfffd 100644 --- a/trunk/arch/powerpc/platforms/ps3/setup.c +++ b/trunk/arch/powerpc/platforms/ps3/setup.c @@ -193,12 +193,10 @@ static int ps3_set_dabr(unsigned long dabr) static void __init ps3_setup_arch(void) { - u64 tmp; DBG(" -> %s:%d\n", __func__, __LINE__); - lv1_get_version_info(&ps3_firmware_version.raw, &tmp); - + lv1_get_version_info(&ps3_firmware_version.raw); printk(KERN_INFO "PS3 firmware version %u.%u.%u\n", ps3_firmware_version.major, ps3_firmware_version.minor, ps3_firmware_version.rev); diff --git a/trunk/arch/powerpc/platforms/ps3/smp.c b/trunk/arch/powerpc/platforms/ps3/smp.c index 4b35166229fe..efc1cd8c034a 100644 --- a/trunk/arch/powerpc/platforms/ps3/smp.c +++ b/trunk/arch/powerpc/platforms/ps3/smp.c @@ -57,7 +57,7 @@ static void ps3_smp_message_pass(int cpu, int msg) " (%d)\n", __func__, __LINE__, cpu, msg, result); } -static int __init ps3_smp_probe(void) +static int ps3_smp_probe(void) { int cpu; diff --git a/trunk/arch/powerpc/platforms/ps3/spu.c b/trunk/arch/powerpc/platforms/ps3/spu.c index e17fa1432d80..451fad1c92a8 100644 --- a/trunk/arch/powerpc/platforms/ps3/spu.c +++ b/trunk/arch/powerpc/platforms/ps3/spu.c @@ -154,7 +154,7 @@ static unsigned long get_vas_id(void) u64 id; lv1_get_logical_ppe_id(&id); - lv1_get_virtual_address_space_id_of_ppe(&id); + lv1_get_virtual_address_space_id_of_ppe(id, &id); return id; } diff --git a/trunk/arch/powerpc/platforms/pseries/Kconfig b/trunk/arch/powerpc/platforms/pseries/Kconfig index ae7b6d41fed3..c81f6bb9c10f 100644 --- a/trunk/arch/powerpc/platforms/pseries/Kconfig +++ b/trunk/arch/powerpc/platforms/pseries/Kconfig @@ -120,12 +120,3 @@ config DTL which are accessible through a debugfs file. Say N if you are unsure. - -config PSERIES_IDLE - tristate "Cpuidle driver for pSeries platforms" - depends on CPU_IDLE - depends on PPC_PSERIES - default y - help - Select this option to enable processor idle state management - through cpuidle subsystem. diff --git a/trunk/arch/powerpc/platforms/pseries/Makefile b/trunk/arch/powerpc/platforms/pseries/Makefile index 236db46b4078..3556e402cbf5 100644 --- a/trunk/arch/powerpc/platforms/pseries/Makefile +++ b/trunk/arch/powerpc/platforms/pseries/Makefile @@ -22,7 +22,6 @@ obj-$(CONFIG_PHYP_DUMP) += phyp_dump.o obj-$(CONFIG_CMM) += cmm.o obj-$(CONFIG_DTL) += dtl.o obj-$(CONFIG_IO_EVENT_IRQ) += io_event_irq.o -obj-$(CONFIG_PSERIES_IDLE) += processor_idle.o ifeq ($(CONFIG_PPC_PSERIES),y) obj-$(CONFIG_SUSPEND) += suspend.o diff --git a/trunk/arch/powerpc/platforms/pseries/hvCall_inst.c b/trunk/arch/powerpc/platforms/pseries/hvCall_inst.c index c9311cfdfcac..f106662f4381 100644 --- a/trunk/arch/powerpc/platforms/pseries/hvCall_inst.c +++ b/trunk/arch/powerpc/platforms/pseries/hvCall_inst.c @@ -109,7 +109,7 @@ static void probe_hcall_entry(void *ignored, unsigned long opcode, unsigned long if (opcode > MAX_HCALL_OPCODE) return; - h = &__get_cpu_var(hcall_stats)[opcode / 4]; + h = &get_cpu_var(hcall_stats)[opcode / 4]; h->tb_start = mftb(); h->purr_start = mfspr(SPRN_PURR); } @@ -126,6 +126,8 @@ static void probe_hcall_exit(void *ignored, unsigned long opcode, unsigned long h->num_calls++; h->tb_total += mftb() - h->tb_start; h->purr_total += mfspr(SPRN_PURR) - h->purr_start; + + put_cpu_var(hcall_stats); } static int __init hcall_inst_init(void) diff --git a/trunk/arch/powerpc/platforms/pseries/iommu.c b/trunk/arch/powerpc/platforms/pseries/iommu.c index c442f2b1980f..b719d9709730 100644 --- a/trunk/arch/powerpc/platforms/pseries/iommu.c +++ b/trunk/arch/powerpc/platforms/pseries/iommu.c @@ -52,42 +52,13 @@ #include "plpar_wrappers.h" -static void tce_invalidate_pSeries_sw(struct iommu_table *tbl, - u64 *startp, u64 *endp) -{ - u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index; - unsigned long start, end, inc; - - start = __pa(startp); - end = __pa(endp); - inc = L1_CACHE_BYTES; /* invalidate a cacheline of TCEs at a time */ - - /* If this is non-zero, change the format. We shift the - * address and or in the magic from the device tree. */ - if (tbl->it_busno) { - start <<= 12; - end <<= 12; - inc <<= 12; - start |= tbl->it_busno; - end |= tbl->it_busno; - } - - end |= inc - 1; /* round up end to be different than start */ - - mb(); /* Make sure TCEs in memory are written */ - while (start <= end) { - out_be64(invalidate, start); - start += inc; - } -} - static int tce_build_pSeries(struct iommu_table *tbl, long index, long npages, unsigned long uaddr, enum dma_data_direction direction, struct dma_attrs *attrs) { u64 proto_tce; - u64 *tcep, *tces; + u64 *tcep; u64 rpn; proto_tce = TCE_PCI_READ; // Read allowed @@ -95,7 +66,7 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index, if (direction != DMA_TO_DEVICE) proto_tce |= TCE_PCI_WRITE; - tces = tcep = ((u64 *)tbl->it_base) + index; + tcep = ((u64 *)tbl->it_base) + index; while (npages--) { /* can't move this out since we might cross MEMBLOCK boundary */ @@ -105,24 +76,18 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index, uaddr += TCE_PAGE_SIZE; tcep++; } - - if (tbl->it_type == TCE_PCI_SWINV_CREATE) - tce_invalidate_pSeries_sw(tbl, tces, tcep - 1); return 0; } static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages) { - u64 *tcep, *tces; + u64 *tcep; - tces = tcep = ((u64 *)tbl->it_base) + index; + tcep = ((u64 *)tbl->it_base) + index; while (npages--) *(tcep++) = 0; - - if (tbl->it_type == TCE_PCI_SWINV_FREE) - tce_invalidate_pSeries_sw(tbl, tces, tcep - 1); } static unsigned long tce_get_pseries(struct iommu_table *tbl, long index) @@ -460,7 +425,7 @@ static void iommu_table_setparms(struct pci_controller *phb, struct iommu_table *tbl) { struct device_node *node; - const unsigned long *basep, *sw_inval; + const unsigned long *basep; const u32 *sizep; node = phb->dn; @@ -497,22 +462,6 @@ static void iommu_table_setparms(struct pci_controller *phb, tbl->it_index = 0; tbl->it_blocksize = 16; tbl->it_type = TCE_PCI; - - sw_inval = of_get_property(node, "linux,tce-sw-invalidate-info", NULL); - if (sw_inval) { - /* - * This property contains information on how to - * invalidate the TCE entry. The first property is - * the base MMIO address used to invalidate entries. - * The second property tells us the format of the TCE - * invalidate (whether it needs to be shifted) and - * some magic routing info to add to our invalidate - * command. - */ - tbl->it_index = (unsigned long) ioremap(sw_inval[0], 8); - tbl->it_busno = sw_inval[1]; /* overload this with magic */ - tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE; - } } /* diff --git a/trunk/arch/powerpc/platforms/pseries/lpar.c b/trunk/arch/powerpc/platforms/pseries/lpar.c index 948e0e3b3547..27a49508b410 100644 --- a/trunk/arch/powerpc/platforms/pseries/lpar.c +++ b/trunk/arch/powerpc/platforms/pseries/lpar.c @@ -554,10 +554,7 @@ void __trace_hcall_entry(unsigned long opcode, unsigned long *args) goto out; (*depth)++; - preempt_disable(); trace_hcall_entry(opcode, args); - if (opcode == H_CEDE) - rcu_idle_enter(); (*depth)--; out: @@ -578,10 +575,7 @@ void __trace_hcall_exit(long opcode, unsigned long retval, goto out; (*depth)++; - if (opcode == H_CEDE) - rcu_idle_exit(); trace_hcall_exit(opcode, retval, retbuf); - preempt_enable(); (*depth)--; out: diff --git a/trunk/arch/powerpc/platforms/pseries/nvram.c b/trunk/arch/powerpc/platforms/pseries/nvram.c index 330a57b7c17c..a76b22844d18 100644 --- a/trunk/arch/powerpc/platforms/pseries/nvram.c +++ b/trunk/arch/powerpc/platforms/pseries/nvram.c @@ -625,8 +625,6 @@ static void oops_to_nvram(struct kmsg_dumper *dumper, { static unsigned int oops_count = 0; static bool panicking = false; - static DEFINE_SPINLOCK(lock); - unsigned long flags; size_t text_len; unsigned int err_type = ERR_TYPE_KERNEL_PANIC_GZ; int rc = -1; @@ -657,9 +655,6 @@ static void oops_to_nvram(struct kmsg_dumper *dumper, if (clobbering_unread_rtas_event()) return; - if (!spin_trylock_irqsave(&lock, flags)) - return; - if (big_oops_buf) { text_len = capture_last_msgs(old_msgs, old_len, new_msgs, new_len, big_oops_buf, big_oops_buf_sz); @@ -675,6 +670,4 @@ static void oops_to_nvram(struct kmsg_dumper *dumper, (void) nvram_write_os_partition(&oops_log_partition, oops_buf, (int) (sizeof(*oops_len) + *oops_len), err_type, ++oops_count); - - spin_unlock_irqrestore(&lock, flags); } diff --git a/trunk/arch/powerpc/platforms/pseries/processor_idle.c b/trunk/arch/powerpc/platforms/pseries/processor_idle.c deleted file mode 100644 index 085fd3f45ad2..000000000000 --- a/trunk/arch/powerpc/platforms/pseries/processor_idle.c +++ /dev/null @@ -1,329 +0,0 @@ -/* - * processor_idle - idle state cpuidle driver. - * Adapted from drivers/idle/intel_idle.c and - * drivers/acpi/processor_idle.c - * - */ - -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#include "plpar_wrappers.h" -#include "pseries.h" - -struct cpuidle_driver pseries_idle_driver = { - .name = "pseries_idle", - .owner = THIS_MODULE, -}; - -#define MAX_IDLE_STATE_COUNT 2 - -static int max_idle_state = MAX_IDLE_STATE_COUNT - 1; -static struct cpuidle_device __percpu *pseries_cpuidle_devices; -static struct cpuidle_state *cpuidle_state_table; - -void update_smt_snooze_delay(int snooze) -{ - struct cpuidle_driver *drv = cpuidle_get_driver(); - if (drv) - drv->states[0].target_residency = snooze; -} - -static inline void idle_loop_prolog(unsigned long *in_purr, ktime_t *kt_before) -{ - - *kt_before = ktime_get_real(); - *in_purr = mfspr(SPRN_PURR); - /* - * Indicate to the HV that we are idle. Now would be - * a good time to find other work to dispatch. - */ - get_lppaca()->idle = 1; -} - -static inline s64 idle_loop_epilog(unsigned long in_purr, ktime_t kt_before) -{ - get_lppaca()->wait_state_cycles += mfspr(SPRN_PURR) - in_purr; - get_lppaca()->idle = 0; - - return ktime_to_us(ktime_sub(ktime_get_real(), kt_before)); -} - -static int snooze_loop(struct cpuidle_device *dev, - struct cpuidle_driver *drv, - int index) -{ - unsigned long in_purr; - ktime_t kt_before; - unsigned long start_snooze; - long snooze = drv->states[0].target_residency; - - idle_loop_prolog(&in_purr, &kt_before); - - if (snooze) { - start_snooze = get_tb() + snooze * tb_ticks_per_usec; - local_irq_enable(); - set_thread_flag(TIF_POLLING_NRFLAG); - - while ((snooze < 0) || (get_tb() < start_snooze)) { - if (need_resched() || cpu_is_offline(dev->cpu)) - goto out; - ppc64_runlatch_off(); - HMT_low(); - HMT_very_low(); - } - - HMT_medium(); - clear_thread_flag(TIF_POLLING_NRFLAG); - smp_mb(); - local_irq_disable(); - } - -out: - HMT_medium(); - dev->last_residency = - (int)idle_loop_epilog(in_purr, kt_before); - return index; -} - -static int dedicated_cede_loop(struct cpuidle_device *dev, - struct cpuidle_driver *drv, - int index) -{ - unsigned long in_purr; - ktime_t kt_before; - - idle_loop_prolog(&in_purr, &kt_before); - get_lppaca()->donate_dedicated_cpu = 1; - - ppc64_runlatch_off(); - HMT_medium(); - cede_processor(); - - get_lppaca()->donate_dedicated_cpu = 0; - dev->last_residency = - (int)idle_loop_epilog(in_purr, kt_before); - return index; -} - -static int shared_cede_loop(struct cpuidle_device *dev, - struct cpuidle_driver *drv, - int index) -{ - unsigned long in_purr; - ktime_t kt_before; - - idle_loop_prolog(&in_purr, &kt_before); - - /* - * Yield the processor to the hypervisor. We return if - * an external interrupt occurs (which are driven prior - * to returning here) or if a prod occurs from another - * processor. When returning here, external interrupts - * are enabled. - */ - cede_processor(); - - dev->last_residency = - (int)idle_loop_epilog(in_purr, kt_before); - return index; -} - -/* - * States for dedicated partition case. - */ -static struct cpuidle_state dedicated_states[MAX_IDLE_STATE_COUNT] = { - { /* Snooze */ - .name = "snooze", - .desc = "snooze", - .flags = CPUIDLE_FLAG_TIME_VALID, - .exit_latency = 0, - .target_residency = 0, - .enter = &snooze_loop }, - { /* CEDE */ - .name = "CEDE", - .desc = "CEDE", - .flags = CPUIDLE_FLAG_TIME_VALID, - .exit_latency = 1, - .target_residency = 10, - .enter = &dedicated_cede_loop }, -}; - -/* - * States for shared partition case. - */ -static struct cpuidle_state shared_states[MAX_IDLE_STATE_COUNT] = { - { /* Shared Cede */ - .name = "Shared Cede", - .desc = "Shared Cede", - .flags = CPUIDLE_FLAG_TIME_VALID, - .exit_latency = 0, - .target_residency = 0, - .enter = &shared_cede_loop }, -}; - -int pseries_notify_cpuidle_add_cpu(int cpu) -{ - struct cpuidle_device *dev = - per_cpu_ptr(pseries_cpuidle_devices, cpu); - if (dev && cpuidle_get_driver()) { - cpuidle_disable_device(dev); - cpuidle_enable_device(dev); - } - return 0; -} - -/* - * pseries_cpuidle_driver_init() - */ -static int pseries_cpuidle_driver_init(void) -{ - int idle_state; - struct cpuidle_driver *drv = &pseries_idle_driver; - - drv->state_count = 0; - - for (idle_state = 0; idle_state < MAX_IDLE_STATE_COUNT; ++idle_state) { - - if (idle_state > max_idle_state) - break; - - /* is the state not enabled? */ - if (cpuidle_state_table[idle_state].enter == NULL) - continue; - - drv->states[drv->state_count] = /* structure copy */ - cpuidle_state_table[idle_state]; - - if (cpuidle_state_table == dedicated_states) - drv->states[drv->state_count].target_residency = - __get_cpu_var(smt_snooze_delay); - - drv->state_count += 1; - } - - return 0; -} - -/* pseries_idle_devices_uninit(void) - * unregister cpuidle devices and de-allocate memory - */ -static void pseries_idle_devices_uninit(void) -{ - int i; - struct cpuidle_device *dev; - - for_each_possible_cpu(i) { - dev = per_cpu_ptr(pseries_cpuidle_devices, i); - cpuidle_unregister_device(dev); - } - - free_percpu(pseries_cpuidle_devices); - return; -} - -/* pseries_idle_devices_init() - * allocate, initialize and register cpuidle device - */ -static int pseries_idle_devices_init(void) -{ - int i; - struct cpuidle_driver *drv = &pseries_idle_driver; - struct cpuidle_device *dev; - - pseries_cpuidle_devices = alloc_percpu(struct cpuidle_device); - if (pseries_cpuidle_devices == NULL) - return -ENOMEM; - - for_each_possible_cpu(i) { - dev = per_cpu_ptr(pseries_cpuidle_devices, i); - dev->state_count = drv->state_count; - dev->cpu = i; - if (cpuidle_register_device(dev)) { - printk(KERN_DEBUG \ - "cpuidle_register_device %d failed!\n", i); - return -EIO; - } - } - - return 0; -} - -/* - * pseries_idle_probe() - * Choose state table for shared versus dedicated partition - */ -static int pseries_idle_probe(void) -{ - - if (!firmware_has_feature(FW_FEATURE_SPLPAR)) - return -ENODEV; - - if (cpuidle_disable != IDLE_NO_OVERRIDE) - return -ENODEV; - - if (max_idle_state == 0) { - printk(KERN_DEBUG "pseries processor idle disabled.\n"); - return -EPERM; - } - - if (get_lppaca()->shared_proc) - cpuidle_state_table = shared_states; - else - cpuidle_state_table = dedicated_states; - - return 0; -} - -static int __init pseries_processor_idle_init(void) -{ - int retval; - - retval = pseries_idle_probe(); - if (retval) - return retval; - - pseries_cpuidle_driver_init(); - retval = cpuidle_register_driver(&pseries_idle_driver); - if (retval) { - printk(KERN_DEBUG "Registration of pseries driver failed.\n"); - return retval; - } - - retval = pseries_idle_devices_init(); - if (retval) { - pseries_idle_devices_uninit(); - cpuidle_unregister_driver(&pseries_idle_driver); - return retval; - } - - printk(KERN_DEBUG "pseries_idle_driver registered\n"); - - return 0; -} - -static void __exit pseries_processor_idle_exit(void) -{ - - pseries_idle_devices_uninit(); - cpuidle_unregister_driver(&pseries_idle_driver); - - return; -} - -module_init(pseries_processor_idle_init); -module_exit(pseries_processor_idle_exit); - -MODULE_AUTHOR("Deepthi Dharwar "); -MODULE_DESCRIPTION("Cpuidle driver for POWER"); -MODULE_LICENSE("GPL"); diff --git a/trunk/arch/powerpc/platforms/pseries/pseries.h b/trunk/arch/powerpc/platforms/pseries/pseries.h index 9a3dda07566f..24c7162f11d9 100644 --- a/trunk/arch/powerpc/platforms/pseries/pseries.h +++ b/trunk/arch/powerpc/platforms/pseries/pseries.h @@ -57,7 +57,4 @@ extern struct device_node *dlpar_configure_connector(u32); extern int dlpar_attach_node(struct device_node *); extern int dlpar_detach_node(struct device_node *); -/* Snooze Delay, pseries_idle */ -DECLARE_PER_CPU(long, smt_snooze_delay); - #endif /* _PSERIES_PSERIES_H */ diff --git a/trunk/arch/powerpc/platforms/pseries/setup.c b/trunk/arch/powerpc/platforms/pseries/setup.c index f79f1278dfca..c3408ca8855e 100644 --- a/trunk/arch/powerpc/platforms/pseries/setup.c +++ b/trunk/arch/powerpc/platforms/pseries/setup.c @@ -39,7 +39,6 @@ #include #include #include -#include #include #include @@ -75,6 +74,9 @@ EXPORT_SYMBOL(CMO_PageSize); int fwnmi_active; /* TRUE if an FWNMI handler is present */ +static void pseries_shared_idle_sleep(void); +static void pseries_dedicated_idle_sleep(void); + static struct device_node *pSeries_mpic_node; static void pSeries_show_cpuinfo(struct seq_file *m) @@ -190,7 +192,8 @@ static void __init pseries_mpic_init_IRQ(void) BUG_ON(openpic_addr == 0); /* Setup the openpic driver */ - mpic = mpic_alloc(pSeries_mpic_node, openpic_addr, 0, + mpic = mpic_alloc(pSeries_mpic_node, openpic_addr, + MPIC_PRIMARY, 16, 250, /* isu size, irq count */ " MPIC "); BUG_ON(mpic == NULL); @@ -349,25 +352,8 @@ static int alloc_dispatch_log_kmem_cache(void) } early_initcall(alloc_dispatch_log_kmem_cache); -static void pSeries_idle(void) -{ - /* This would call on the cpuidle framework, and the back-end pseries - * driver to go to idle states - */ - if (cpuidle_idle_call()) { - /* On error, execute default handler - * to go into low thread priority and possibly - * low power mode. - */ - HMT_low(); - HMT_very_low(); - } -} - static void __init pSeries_setup_arch(void) { - panic_timeout = 10; - /* Discover PIC type and setup ppc_md accordingly */ pseries_discover_pic(); @@ -388,9 +374,18 @@ static void __init pSeries_setup_arch(void) pSeries_nvram_init(); + /* Choose an idle loop */ if (firmware_has_feature(FW_FEATURE_SPLPAR)) { vpa_init(boot_cpuid); - ppc_md.power_save = pSeries_idle; + if (get_lppaca()->shared_proc) { + printk(KERN_DEBUG "Using shared processor idle loop\n"); + ppc_md.power_save = pseries_shared_idle_sleep; + } else { + printk(KERN_DEBUG "Using dedicated idle loop\n"); + ppc_md.power_save = pseries_dedicated_idle_sleep; + } + } else { + printk(KERN_DEBUG "Using default idle loop\n"); } if (firmware_has_feature(FW_FEATURE_LPAR)) @@ -591,6 +586,80 @@ static int __init pSeries_probe(void) return 1; } + +DECLARE_PER_CPU(long, smt_snooze_delay); + +static void pseries_dedicated_idle_sleep(void) +{ + unsigned int cpu = smp_processor_id(); + unsigned long start_snooze; + unsigned long in_purr, out_purr; + long snooze = __get_cpu_var(smt_snooze_delay); + + /* + * Indicate to the HV that we are idle. Now would be + * a good time to find other work to dispatch. + */ + get_lppaca()->idle = 1; + get_lppaca()->donate_dedicated_cpu = 1; + in_purr = mfspr(SPRN_PURR); + + /* + * We come in with interrupts disabled, and need_resched() + * has been checked recently. If we should poll for a little + * while, do so. + */ + if (snooze) { + start_snooze = get_tb() + snooze * tb_ticks_per_usec; + local_irq_enable(); + set_thread_flag(TIF_POLLING_NRFLAG); + + while ((snooze < 0) || (get_tb() < start_snooze)) { + if (need_resched() || cpu_is_offline(cpu)) + goto out; + ppc64_runlatch_off(); + HMT_low(); + HMT_very_low(); + } + + HMT_medium(); + clear_thread_flag(TIF_POLLING_NRFLAG); + smp_mb(); + local_irq_disable(); + if (need_resched() || cpu_is_offline(cpu)) + goto out; + } + + cede_processor(); + +out: + HMT_medium(); + out_purr = mfspr(SPRN_PURR); + get_lppaca()->wait_state_cycles += out_purr - in_purr; + get_lppaca()->donate_dedicated_cpu = 0; + get_lppaca()->idle = 0; +} + +static void pseries_shared_idle_sleep(void) +{ + /* + * Indicate to the HV that we are idle. Now would be + * a good time to find other work to dispatch. + */ + get_lppaca()->idle = 1; + + /* + * Yield the processor to the hypervisor. We return if + * an external interrupt occurs (which are driven prior + * to returning here) or if a prod occurs from another + * processor. When returning here, external interrupts + * are enabled. + */ + cede_processor(); + + get_lppaca()->idle = 0; +} + static int pSeries_pci_probe_mode(struct pci_bus *bus) { if (firmware_has_feature(FW_FEATURE_LPAR)) diff --git a/trunk/arch/powerpc/platforms/pseries/smp.c b/trunk/arch/powerpc/platforms/pseries/smp.c index bbc3c42f6730..26e93fd4c62b 100644 --- a/trunk/arch/powerpc/platforms/pseries/smp.c +++ b/trunk/arch/powerpc/platforms/pseries/smp.c @@ -148,7 +148,6 @@ static void __devinit smp_xics_setup_cpu(int cpu) set_cpu_current_state(cpu, CPU_STATE_ONLINE); set_default_offline_state(cpu); #endif - pseries_notify_cpuidle_add_cpu(cpu); } static int __devinit smp_pSeries_kick_cpu(int nr) diff --git a/trunk/arch/powerpc/platforms/wsp/Kconfig b/trunk/arch/powerpc/platforms/wsp/Kconfig index 57d22a2f4ba9..bd560c786ed6 100644 --- a/trunk/arch/powerpc/platforms/wsp/Kconfig +++ b/trunk/arch/powerpc/platforms/wsp/Kconfig @@ -1,28 +1,20 @@ config PPC_WSP bool select PPC_A2 - select GENERIC_TBSYNC - select PPC_ICSWX select PPC_SCOM select PPC_XICS select PPC_ICP_NATIVE select PCI select PPC_IO_WORKAROUNDS if PCI select PPC_INDIRECT_PIO if PCI - select PPC_WSP_COPRO default n menu "WSP platform selection" depends on PPC_BOOK3E_64 config PPC_PSR2 - bool "PowerEN System Reference Platform 2" - select EPAPR_BOOT - select PPC_WSP - default y - -config PPC_CHROMA - bool "PowerEN PCIe Chroma Card" + bool "PSR-2 platform" + select GENERIC_TBSYNC select EPAPR_BOOT select PPC_WSP default y diff --git a/trunk/arch/powerpc/platforms/wsp/Makefile b/trunk/arch/powerpc/platforms/wsp/Makefile index 56817ac98fc9..a1486b436f02 100644 --- a/trunk/arch/powerpc/platforms/wsp/Makefile +++ b/trunk/arch/powerpc/platforms/wsp/Makefile @@ -1,10 +1,8 @@ ccflags-y += -mno-minimal-toc -obj-y += setup.o ics.o wsp.o -obj-$(CONFIG_PPC_PSR2) += psr2.o -obj-$(CONFIG_PPC_CHROMA) += chroma.o h8.o -obj-$(CONFIG_PPC_WSP) += opb_pic.o +obj-y += setup.o ics.o +obj-$(CONFIG_PPC_PSR2) += psr2.o opb_pic.o obj-$(CONFIG_PPC_WSP) += scom_wsp.o obj-$(CONFIG_SMP) += smp.o scom_smp.o obj-$(CONFIG_PCI) += wsp_pci.o -obj-$(CONFIG_PCI_MSI) += msi.o +obj-$(CONFIG_PCI_MSI) += msi.o \ No newline at end of file diff --git a/trunk/arch/powerpc/platforms/wsp/chroma.c b/trunk/arch/powerpc/platforms/wsp/chroma.c deleted file mode 100644 index ca6fa26f6e63..000000000000 --- a/trunk/arch/powerpc/platforms/wsp/chroma.c +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright 2008-2011, IBM Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "ics.h" -#include "wsp.h" - -void __init chroma_setup_arch(void) -{ - wsp_setup_arch(); - wsp_setup_h8(); - -} - -static int __init chroma_probe(void) -{ - unsigned long root = of_get_flat_dt_root(); - - if (!of_flat_dt_is_compatible(root, "ibm,wsp-chroma")) - return 0; - - return 1; -} - -define_machine(chroma_md) { - .name = "Chroma PCIe", - .probe = chroma_probe, - .setup_arch = chroma_setup_arch, - .restart = wsp_h8_restart, - .power_off = wsp_h8_power_off, - .halt = wsp_halt, - .calibrate_decr = generic_calibrate_decr, - .init_IRQ = wsp_setup_irq, - .progress = udbg_progress, - .power_save = book3e_idle, -}; - -machine_arch_initcall(chroma_md, wsp_probe_devices); diff --git a/trunk/arch/powerpc/platforms/wsp/h8.c b/trunk/arch/powerpc/platforms/wsp/h8.c deleted file mode 100644 index d18e6cc19df3..000000000000 --- a/trunk/arch/powerpc/platforms/wsp/h8.c +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Copyright 2008-2011, IBM Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include -#include - -#include "wsp.h" - -/* - * The UART connection to the H8 is over ttyS1 which is just a 16550. - * We assume that FW has it setup right and no one messes with it. - */ - - -static u8 __iomem *h8; - -#define RBR 0 /* Receiver Buffer Register */ -#define THR 0 /* Transmitter Holding Register */ -#define LSR 5 /* Line Status Register */ -#define LSR_DR 0x01 /* LSR value for Data-Ready */ -#define LSR_THRE 0x20 /* LSR value for Transmitter-Holding-Register-Empty */ -static void wsp_h8_putc(int c) -{ - u8 lsr; - - do { - lsr = readb(h8 + LSR); - } while ((lsr & LSR_THRE) != LSR_THRE); - writeb(c, h8 + THR); -} - -static int wsp_h8_getc(void) -{ - u8 lsr; - - do { - lsr = readb(h8 + LSR); - } while ((lsr & LSR_DR) != LSR_DR); - - return readb(h8 + RBR); -} - -static void wsp_h8_puts(const char *s, int sz) -{ - int i; - - for (i = 0; i < sz; i++) { - wsp_h8_putc(s[i]); - - /* no flow control so wait for echo */ - wsp_h8_getc(); - } - wsp_h8_putc('\r'); - wsp_h8_putc('\n'); -} - -static void wsp_h8_terminal_cmd(const char *cmd, int sz) -{ - hard_irq_disable(); - wsp_h8_puts(cmd, sz); - /* should never return, but just in case */ - for (;;) - continue; -} - - -void wsp_h8_restart(char *cmd) -{ - static const char restart[] = "warm-reset"; - - (void)cmd; - wsp_h8_terminal_cmd(restart, sizeof(restart) - 1); -} - -void wsp_h8_power_off(void) -{ - static const char off[] = "power-off"; - - wsp_h8_terminal_cmd(off, sizeof(off) - 1); -} - -static void __iomem *wsp_h8_getaddr(void) -{ - struct device_node *aliases; - struct device_node *uart; - struct property *path; - void __iomem *va = NULL; - - /* - * there is nothing in the devtree to tell us which is mapped - * to the H8, but se know it is the second serial port. - */ - - aliases = of_find_node_by_path("/aliases"); - if (aliases == NULL) - return NULL; - - path = of_find_property(aliases, "serial1", NULL); - if (path == NULL) - goto out; - - uart = of_find_node_by_path(path->value); - if (uart == NULL) - goto out; - - va = of_iomap(uart, 0); - - /* remove it so no one messes with it */ - of_detach_node(uart); - of_node_put(uart); - -out: - of_node_put(aliases); - - return va; -} - -void __init wsp_setup_h8(void) -{ - h8 = wsp_h8_getaddr(); - - /* Devtree change? lets hard map it anyway */ - if (h8 == NULL) { - pr_warn("UART to H8 could not be found"); - h8 = ioremap(0xffc0008000ULL, 0x100); - } -} diff --git a/trunk/arch/powerpc/platforms/wsp/opb_pic.c b/trunk/arch/powerpc/platforms/wsp/opb_pic.c index 19f353dfcd03..be05631a3c1c 100644 --- a/trunk/arch/powerpc/platforms/wsp/opb_pic.c +++ b/trunk/arch/powerpc/platforms/wsp/opb_pic.c @@ -320,8 +320,7 @@ void __init opb_pic_init(void) } /* Attach opb interrupt handler to new virtual IRQ */ - rc = request_irq(virq, opb_irq_handler, IRQF_NO_THREAD, - "OPB LS Cascade", opb); + rc = request_irq(virq, opb_irq_handler, 0, "OPB LS Cascade", opb); if (rc) { printk("opb: request_irq failed: %d\n", rc); continue; diff --git a/trunk/arch/powerpc/platforms/wsp/psr2.c b/trunk/arch/powerpc/platforms/wsp/psr2.c index 0c1ae06d0be1..166f2e4b4bee 100644 --- a/trunk/arch/powerpc/platforms/wsp/psr2.c +++ b/trunk/arch/powerpc/platforms/wsp/psr2.c @@ -14,10 +14,10 @@ #include #include #include -#include #include #include +#include #include #include "ics.h" @@ -27,8 +27,7 @@ static void psr2_spin(void) { hard_irq_disable(); - for (;;) - continue; + for (;;) ; } static void psr2_restart(char *cmd) @@ -36,14 +35,43 @@ static void psr2_restart(char *cmd) psr2_spin(); } -static int __init psr2_probe(void) +static int psr2_probe_devices(void) { - unsigned long root = of_get_flat_dt_root(); + struct device_node *np; - if (of_flat_dt_is_compatible(root, "ibm,wsp-chroma")) { - /* chroma systems also claim they are psr2s */ - return 0; + /* Our RTC is a ds1500. It seems to be programatically compatible + * with the ds1511 for which we have a driver so let's use that + */ + np = of_find_compatible_node(NULL, NULL, "dallas,ds1500"); + if (np != NULL) { + struct resource res; + if (of_address_to_resource(np, 0, &res) == 0) + platform_device_register_simple("ds1511", 0, &res, 1); } + return 0; +} +machine_arch_initcall(psr2_md, psr2_probe_devices); + +static void __init psr2_setup_arch(void) +{ + /* init to some ~sane value until calibrate_delay() runs */ + loops_per_jiffy = 50000000; + + scom_init_wsp(); + + /* Setup SMP callback */ +#ifdef CONFIG_SMP + a2_setup_smp(); +#endif +#ifdef CONFIG_PCI + wsp_setup_pci(); +#endif + +} + +static int __init psr2_probe(void) +{ + unsigned long root = of_get_flat_dt_root(); if (!of_flat_dt_is_compatible(root, "ibm,psr2")) return 0; @@ -51,17 +79,21 @@ static int __init psr2_probe(void) return 1; } +static void __init psr2_init_irq(void) +{ + wsp_init_irq(); + opb_pic_init(); +} + define_machine(psr2_md) { .name = "PSR2 A2", .probe = psr2_probe, - .setup_arch = wsp_setup_arch, + .setup_arch = psr2_setup_arch, .restart = psr2_restart, .power_off = psr2_spin, .halt = psr2_spin, .calibrate_decr = generic_calibrate_decr, - .init_IRQ = wsp_setup_irq, + .init_IRQ = psr2_init_irq, .progress = udbg_progress, .power_save = book3e_idle, }; - -machine_arch_initcall(psr2_md, wsp_probe_devices); diff --git a/trunk/arch/powerpc/platforms/wsp/wsp.c b/trunk/arch/powerpc/platforms/wsp/wsp.c deleted file mode 100644 index d25cc96c21b8..000000000000 --- a/trunk/arch/powerpc/platforms/wsp/wsp.c +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Copyright 2008-2011, IBM Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include - -#include - -#include "wsp.h" -#include "ics.h" - -#define WSP_SOC_COMPATIBLE "ibm,wsp-soc" -#define PBIC_COMPATIBLE "ibm,wsp-pbic" -#define COPRO_COMPATIBLE "ibm,wsp-coprocessor" - -static int __init wsp_probe_buses(void) -{ - static __initdata struct of_device_id bus_ids[] = { - /* - * every node in between needs to be here or you won't - * find it - */ - { .compatible = WSP_SOC_COMPATIBLE, }, - { .compatible = PBIC_COMPATIBLE, }, - { .compatible = COPRO_COMPATIBLE, }, - {}, - }; - of_platform_bus_probe(NULL, bus_ids, NULL); - - return 0; -} - -void __init wsp_setup_arch(void) -{ - /* init to some ~sane value until calibrate_delay() runs */ - loops_per_jiffy = 50000000; - - scom_init_wsp(); - - /* Setup SMP callback */ -#ifdef CONFIG_SMP - a2_setup_smp(); -#endif -#ifdef CONFIG_PCI - wsp_setup_pci(); -#endif -} - -void __init wsp_setup_irq(void) -{ - wsp_init_irq(); - opb_pic_init(); -} - - -int __init wsp_probe_devices(void) -{ - struct device_node *np; - - /* Our RTC is a ds1500. It seems to be programatically compatible - * with the ds1511 for which we have a driver so let's use that - */ - np = of_find_compatible_node(NULL, NULL, "dallas,ds1500"); - if (np != NULL) { - struct resource res; - if (of_address_to_resource(np, 0, &res) == 0) - platform_device_register_simple("ds1511", 0, &res, 1); - } - - wsp_probe_buses(); - - return 0; -} - -void wsp_halt(void) -{ - u64 val; - scom_map_t m; - struct device_node *dn; - struct device_node *mine; - struct device_node *me; - - me = of_get_cpu_node(smp_processor_id(), NULL); - mine = scom_find_parent(me); - - /* This will halt all the A2s but not power off the chip */ - for_each_node_with_property(dn, "scom-controller") { - if (dn == mine) - continue; - m = scom_map(dn, 0, 1); - - /* read-modify-write it so the HW probe does not get - * confused */ - val = scom_read(m, 0); - val |= 1; - scom_write(m, 0, val); - scom_unmap(m); - } - m = scom_map(mine, 0, 1); - val = scom_read(m, 0); - val |= 1; - scom_write(m, 0, val); - /* should never return */ - scom_unmap(m); -} diff --git a/trunk/arch/powerpc/platforms/wsp/wsp.h b/trunk/arch/powerpc/platforms/wsp/wsp.h index 10c1d1fff362..33479818f62a 100644 --- a/trunk/arch/powerpc/platforms/wsp/wsp.h +++ b/trunk/arch/powerpc/platforms/wsp/wsp.h @@ -6,25 +6,15 @@ /* Devtree compatible strings for major devices */ #define PCIE_COMPATIBLE "ibm,wsp-pciex" -extern void wsp_setup_arch(void); -extern void wsp_setup_irq(void); -extern int wsp_probe_devices(void); -extern void wsp_halt(void); - extern void wsp_setup_pci(void); extern void scom_init_wsp(void); extern void a2_setup_smp(void); extern int a2_scom_startup_cpu(unsigned int lcpu, int thr_idx, struct device_node *np); -extern int smp_a2_cpu_bootable(unsigned int nr); -extern int __devinit smp_a2_kick_cpu(int nr); - -extern void opb_pic_init(void); +int smp_a2_cpu_bootable(unsigned int nr); +int __devinit smp_a2_kick_cpu(int nr); -/* chroma specific managment */ -extern void wsp_h8_restart(char *cmd); -extern void wsp_h8_power_off(void); -extern void __init wsp_setup_h8(void); +void opb_pic_init(void); #endif /* __WSP_H */ diff --git a/trunk/arch/powerpc/relocs_check.pl b/trunk/arch/powerpc/relocs_check.pl index 7f5b83808862..d2571096c3e9 100755 --- a/trunk/arch/powerpc/relocs_check.pl +++ b/trunk/arch/powerpc/relocs_check.pl @@ -32,18 +32,8 @@ next if (!/\s+R_/); # These relocations are okay - # On PPC64: - # R_PPC64_RELATIVE, R_PPC64_NONE, R_PPC64_ADDR64 - # On PPC: - # R_PPC_RELATIVE, R_PPC_ADDR16_HI, - # R_PPC_ADDR16_HA,R_PPC_ADDR16_LO, - # R_PPC_NONE - - next if (/\bR_PPC64_RELATIVE\b/ or /\bR_PPC64_NONE\b/ or - /\bR_PPC64_ADDR64\s+mach_/); - next if (/\bR_PPC_ADDR16_LO\b/ or /\bR_PPC_ADDR16_HI\b/ or - /\bR_PPC_ADDR16_HA\b/ or /\bR_PPC_RELATIVE\b/ or - /\bR_PPC_NONE\b/); + next if (/R_PPC64_RELATIVE/ or /R_PPC64_NONE/ or + /R_PPC64_ADDR64\s+mach_/); # If we see this type of relcoation it's an idication that # we /may/ be using an old version of binutils. diff --git a/trunk/arch/powerpc/sysdev/Makefile b/trunk/arch/powerpc/sysdev/Makefile index 5e37b4717864..84e13253aec5 100644 --- a/trunk/arch/powerpc/sysdev/Makefile +++ b/trunk/arch/powerpc/sysdev/Makefile @@ -17,11 +17,10 @@ obj-$(CONFIG_FSL_SOC) += fsl_soc.o obj-$(CONFIG_FSL_PCI) += fsl_pci.o $(fsl-msi-obj-y) obj-$(CONFIG_FSL_PMC) += fsl_pmc.o obj-$(CONFIG_FSL_LBC) += fsl_lbc.o -obj-$(CONFIG_FSL_IFC) += fsl_ifc.o obj-$(CONFIG_FSL_GTM) += fsl_gtm.o obj-$(CONFIG_FSL_85XX_CACHE_SRAM) += fsl_85xx_l2ctlr.o fsl_85xx_cache_sram.o obj-$(CONFIG_SIMPLE_GPIO) += simple_gpio.o -obj-$(CONFIG_FSL_RIO) += fsl_rio.o fsl_rmu.o +obj-$(CONFIG_FSL_RIO) += fsl_rio.o obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o obj-$(CONFIG_QUICC_ENGINE) += qe_lib/ obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/ diff --git a/trunk/arch/powerpc/sysdev/fsl_ifc.c b/trunk/arch/powerpc/sysdev/fsl_ifc.c deleted file mode 100644 index b31f19f61031..000000000000 --- a/trunk/arch/powerpc/sysdev/fsl_ifc.c +++ /dev/null @@ -1,310 +0,0 @@ -/* - * Copyright 2011 Freescale Semiconductor, Inc - * - * Freescale Integrated Flash Controller - * - * Author: Dipen Dudhat - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev; -EXPORT_SYMBOL(fsl_ifc_ctrl_dev); - -/* - * convert_ifc_address - convert the base address - * @addr_base: base address of the memory bank - */ -unsigned int convert_ifc_address(phys_addr_t addr_base) -{ - return addr_base & CSPR_BA; -} -EXPORT_SYMBOL(convert_ifc_address); - -/* - * fsl_ifc_find - find IFC bank - * @addr_base: base address of the memory bank - * - * This function walks IFC banks comparing "Base address" field of the CSPR - * registers with the supplied addr_base argument. When bases match this - * function returns bank number (starting with 0), otherwise it returns - * appropriate errno value. - */ -int fsl_ifc_find(phys_addr_t addr_base) -{ - int i = 0; - - if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->regs) - return -ENODEV; - - for (i = 0; i < ARRAY_SIZE(fsl_ifc_ctrl_dev->regs->cspr_cs); i++) { - __be32 cspr = in_be32(&fsl_ifc_ctrl_dev->regs->cspr_cs[i].cspr); - if (cspr & CSPR_V && (cspr & CSPR_BA) == - convert_ifc_address(addr_base)) - return i; - } - - return -ENOENT; -} -EXPORT_SYMBOL(fsl_ifc_find); - -static int __devinit fsl_ifc_ctrl_init(struct fsl_ifc_ctrl *ctrl) -{ - struct fsl_ifc_regs __iomem *ifc = ctrl->regs; - - /* - * Clear all the common status and event registers - */ - if (in_be32(&ifc->cm_evter_stat) & IFC_CM_EVTER_STAT_CSER) - out_be32(&ifc->cm_evter_stat, IFC_CM_EVTER_STAT_CSER); - - /* enable all error and events */ - out_be32(&ifc->cm_evter_en, IFC_CM_EVTER_EN_CSEREN); - - /* enable all error and event interrupts */ - out_be32(&ifc->cm_evter_intr_en, IFC_CM_EVTER_INTR_EN_CSERIREN); - out_be32(&ifc->cm_erattr0, 0x0); - out_be32(&ifc->cm_erattr1, 0x0); - - return 0; -} - -static int fsl_ifc_ctrl_remove(struct platform_device *dev) -{ - struct fsl_ifc_ctrl *ctrl = dev_get_drvdata(&dev->dev); - - free_irq(ctrl->nand_irq, ctrl); - free_irq(ctrl->irq, ctrl); - - irq_dispose_mapping(ctrl->nand_irq); - irq_dispose_mapping(ctrl->irq); - - iounmap(ctrl->regs); - - dev_set_drvdata(&dev->dev, NULL); - kfree(ctrl); - - return 0; -} - -/* - * NAND events are split between an operational interrupt which only - * receives OPC, and an error interrupt that receives everything else, - * including non-NAND errors. Whichever interrupt gets to it first - * records the status and wakes the wait queue. - */ -static DEFINE_SPINLOCK(nand_irq_lock); - -static u32 check_nand_stat(struct fsl_ifc_ctrl *ctrl) -{ - struct fsl_ifc_regs __iomem *ifc = ctrl->regs; - unsigned long flags; - u32 stat; - - spin_lock_irqsave(&nand_irq_lock, flags); - - stat = in_be32(&ifc->ifc_nand.nand_evter_stat); - if (stat) { - out_be32(&ifc->ifc_nand.nand_evter_stat, stat); - ctrl->nand_stat = stat; - wake_up(&ctrl->nand_wait); - } - - spin_unlock_irqrestore(&nand_irq_lock, flags); - - return stat; -} - -static irqreturn_t fsl_ifc_nand_irq(int irqno, void *data) -{ - struct fsl_ifc_ctrl *ctrl = data; - - if (check_nand_stat(ctrl)) - return IRQ_HANDLED; - - return IRQ_NONE; -} - -/* - * NOTE: This interrupt is used to report ifc events of various kinds, - * such as transaction errors on the chipselects. - */ -static irqreturn_t fsl_ifc_ctrl_irq(int irqno, void *data) -{ - struct fsl_ifc_ctrl *ctrl = data; - struct fsl_ifc_regs __iomem *ifc = ctrl->regs; - u32 err_axiid, err_srcid, status, cs_err, err_addr; - irqreturn_t ret = IRQ_NONE; - - /* read for chip select error */ - cs_err = in_be32(&ifc->cm_evter_stat); - if (cs_err) { - dev_err(ctrl->dev, "transaction sent to IFC is not mapped to" - "any memory bank 0x%08X\n", cs_err); - /* clear the chip select error */ - out_be32(&ifc->cm_evter_stat, IFC_CM_EVTER_STAT_CSER); - - /* read error attribute registers print the error information */ - status = in_be32(&ifc->cm_erattr0); - err_addr = in_be32(&ifc->cm_erattr1); - - if (status & IFC_CM_ERATTR0_ERTYP_READ) - dev_err(ctrl->dev, "Read transaction error" - "CM_ERATTR0 0x%08X\n", status); - else - dev_err(ctrl->dev, "Write transaction error" - "CM_ERATTR0 0x%08X\n", status); - - err_axiid = (status & IFC_CM_ERATTR0_ERAID) >> - IFC_CM_ERATTR0_ERAID_SHIFT; - dev_err(ctrl->dev, "AXI ID of the error" - "transaction 0x%08X\n", err_axiid); - - err_srcid = (status & IFC_CM_ERATTR0_ESRCID) >> - IFC_CM_ERATTR0_ESRCID_SHIFT; - dev_err(ctrl->dev, "SRC ID of the error" - "transaction 0x%08X\n", err_srcid); - - dev_err(ctrl->dev, "Transaction Address corresponding to error" - "ERADDR 0x%08X\n", err_addr); - - ret = IRQ_HANDLED; - } - - if (check_nand_stat(ctrl)) - ret = IRQ_HANDLED; - - return ret; -} - -/* - * fsl_ifc_ctrl_probe - * - * called by device layer when it finds a device matching - * one our driver can handled. This code allocates all of - * the resources needed for the controller only. The - * resources for the NAND banks themselves are allocated - * in the chip probe function. -*/ -static int __devinit fsl_ifc_ctrl_probe(struct platform_device *dev) -{ - int ret = 0; - - - dev_info(&dev->dev, "Freescale Integrated Flash Controller\n"); - - fsl_ifc_ctrl_dev = kzalloc(sizeof(*fsl_ifc_ctrl_dev), GFP_KERNEL); - if (!fsl_ifc_ctrl_dev) - return -ENOMEM; - - dev_set_drvdata(&dev->dev, fsl_ifc_ctrl_dev); - - /* IOMAP the entire IFC region */ - fsl_ifc_ctrl_dev->regs = of_iomap(dev->dev.of_node, 0); - if (!fsl_ifc_ctrl_dev->regs) { - dev_err(&dev->dev, "failed to get memory region\n"); - ret = -ENODEV; - goto err; - } - - /* get the Controller level irq */ - fsl_ifc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0); - if (fsl_ifc_ctrl_dev->irq == NO_IRQ) { - dev_err(&dev->dev, "failed to get irq resource " - "for IFC\n"); - ret = -ENODEV; - goto err; - } - - /* get the nand machine irq */ - fsl_ifc_ctrl_dev->nand_irq = - irq_of_parse_and_map(dev->dev.of_node, 1); - if (fsl_ifc_ctrl_dev->nand_irq == NO_IRQ) { - dev_err(&dev->dev, "failed to get irq resource " - "for NAND Machine\n"); - ret = -ENODEV; - goto err; - } - - fsl_ifc_ctrl_dev->dev = &dev->dev; - - ret = fsl_ifc_ctrl_init(fsl_ifc_ctrl_dev); - if (ret < 0) - goto err; - - init_waitqueue_head(&fsl_ifc_ctrl_dev->nand_wait); - - ret = request_irq(fsl_ifc_ctrl_dev->irq, fsl_ifc_ctrl_irq, IRQF_SHARED, - "fsl-ifc", fsl_ifc_ctrl_dev); - if (ret != 0) { - dev_err(&dev->dev, "failed to install irq (%d)\n", - fsl_ifc_ctrl_dev->irq); - goto err_irq; - } - - ret = request_irq(fsl_ifc_ctrl_dev->nand_irq, fsl_ifc_nand_irq, 0, - "fsl-ifc-nand", fsl_ifc_ctrl_dev); - if (ret != 0) { - dev_err(&dev->dev, "failed to install irq (%d)\n", - fsl_ifc_ctrl_dev->nand_irq); - goto err_nandirq; - } - - return 0; - -err_nandirq: - free_irq(fsl_ifc_ctrl_dev->nand_irq, fsl_ifc_ctrl_dev); - irq_dispose_mapping(fsl_ifc_ctrl_dev->nand_irq); -err_irq: - free_irq(fsl_ifc_ctrl_dev->irq, fsl_ifc_ctrl_dev); - irq_dispose_mapping(fsl_ifc_ctrl_dev->irq); -err: - return ret; -} - -static const struct of_device_id fsl_ifc_match[] = { - { - .compatible = "fsl,ifc", - }, - {}, -}; - -static struct platform_driver fsl_ifc_ctrl_driver = { - .driver = { - .name = "fsl-ifc", - .of_match_table = fsl_ifc_match, - }, - .probe = fsl_ifc_ctrl_probe, - .remove = fsl_ifc_ctrl_remove, -}; - -module_platform_driver(fsl_ifc_ctrl_driver); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Freescale Semiconductor"); -MODULE_DESCRIPTION("Freescale Integrated Flash Controller driver"); diff --git a/trunk/arch/powerpc/sysdev/fsl_lbc.c b/trunk/arch/powerpc/sysdev/fsl_lbc.c index 483126d7b3c0..d5c3c90ee698 100644 --- a/trunk/arch/powerpc/sysdev/fsl_lbc.c +++ b/trunk/arch/powerpc/sysdev/fsl_lbc.c @@ -332,38 +332,6 @@ static int __devinit fsl_lbc_ctrl_probe(struct platform_device *dev) return ret; } -#ifdef CONFIG_SUSPEND - -/* save lbc registers */ -static int fsl_lbc_suspend(struct platform_device *pdev, pm_message_t state) -{ - struct fsl_lbc_ctrl *ctrl = dev_get_drvdata(&pdev->dev); - struct fsl_lbc_regs __iomem *lbc = ctrl->regs; - - ctrl->saved_regs = kmalloc(sizeof(struct fsl_lbc_regs), GFP_KERNEL); - if (!ctrl->saved_regs) - return -ENOMEM; - - _memcpy_fromio(ctrl->saved_regs, lbc, sizeof(struct fsl_lbc_regs)); - return 0; -} - -/* restore lbc registers */ -static int fsl_lbc_resume(struct platform_device *pdev) -{ - struct fsl_lbc_ctrl *ctrl = dev_get_drvdata(&pdev->dev); - struct fsl_lbc_regs __iomem *lbc = ctrl->regs; - - if (ctrl->saved_regs) { - _memcpy_toio(lbc, ctrl->saved_regs, - sizeof(struct fsl_lbc_regs)); - kfree(ctrl->saved_regs); - ctrl->saved_regs = NULL; - } - return 0; -} -#endif /* CONFIG_SUSPEND */ - static const struct of_device_id fsl_lbc_match[] = { { .compatible = "fsl,elbc", }, { .compatible = "fsl,pq3-localbus", }, @@ -378,10 +346,6 @@ static struct platform_driver fsl_lbc_ctrl_driver = { .of_match_table = fsl_lbc_match, }, .probe = fsl_lbc_ctrl_probe, -#ifdef CONFIG_SUSPEND - .suspend = fsl_lbc_suspend, - .resume = fsl_lbc_resume, -#endif }; static int __init fsl_lbc_init(void) diff --git a/trunk/arch/powerpc/sysdev/fsl_msi.c b/trunk/arch/powerpc/sysdev/fsl_msi.c index ecb5c1946d22..e5c344d336ea 100644 --- a/trunk/arch/powerpc/sysdev/fsl_msi.c +++ b/trunk/arch/powerpc/sysdev/fsl_msi.c @@ -23,8 +23,6 @@ #include #include #include -#include - #include "fsl_msi.h" #include "fsl_pci.h" @@ -150,49 +148,14 @@ static void fsl_compose_msi_msg(struct pci_dev *pdev, int hwirq, static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) { - struct pci_controller *hose = pci_bus_to_host(pdev->bus); - struct device_node *np; - phandle phandle = 0; int rc, hwirq = -ENOMEM; unsigned int virq; struct msi_desc *entry; struct msi_msg msg; struct fsl_msi *msi_data; - /* - * If the PCI node has an fsl,msi property, then we need to use it - * to find the specific MSI. - */ - np = of_parse_phandle(hose->dn, "fsl,msi", 0); - if (np) { - if (of_device_is_compatible(np, "fsl,mpic-msi") || - of_device_is_compatible(np, "fsl,vmpic-msi")) - phandle = np->phandle; - else { - dev_err(&pdev->dev, - "node %s has an invalid fsl,msi phandle %u\n", - hose->dn->full_name, np->phandle); - return -EINVAL; - } - } - list_for_each_entry(entry, &pdev->msi_list, list) { - /* - * Loop over all the MSI devices until we find one that has an - * available interrupt. - */ list_for_each_entry(msi_data, &msi_head, list) { - /* - * If the PCI node has an fsl,msi property, then we - * restrict our search to the corresponding MSI node. - * The simplest way is to skip over MSI nodes with the - * wrong phandle. Under the Freescale hypervisor, this - * has the additional benefit of skipping over MSI - * nodes that are not mapped in the PAMU. - */ - if (phandle && (phandle != msi_data->phandle)) - continue; - hwirq = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1); if (hwirq >= 0) break; @@ -200,14 +163,16 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) if (hwirq < 0) { rc = hwirq; - dev_err(&pdev->dev, "could not allocate MSI interrupt\n"); + pr_debug("%s: fail allocating msi interrupt\n", + __func__); goto out_free; } virq = irq_create_mapping(msi_data->irqhost, hwirq); if (virq == NO_IRQ) { - dev_err(&pdev->dev, "fail mapping hwirq %i\n", hwirq); + pr_debug("%s: fail mapping hwirq 0x%x\n", + __func__, hwirq); msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1); rc = -ENOSPC; goto out_free; @@ -236,7 +201,6 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc) u32 intr_index; u32 have_shift = 0; struct fsl_msi_cascade_data *cascade_data; - unsigned int ret; cascade_data = irq_get_handler_data(irq); msi_data = cascade_data->msi_data; @@ -268,14 +232,6 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc) case FSL_PIC_IP_IPIC: msir_value = fsl_msi_read(msi_data->msi_regs, msir_index * 0x4); break; - case FSL_PIC_IP_VMPIC: - ret = fh_vmpic_get_msir(virq_to_hw(irq), &msir_value); - if (ret) { - pr_err("fsl-msi: fh_vmpic_get_msir() failed for " - "irq %u (ret=%u)\n", irq, ret); - msir_value = 0; - } - break; } while (msir_value) { @@ -293,7 +249,6 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc) switch (msi_data->feature & FSL_PIC_IP_MASK) { case FSL_PIC_IP_MPIC: - case FSL_PIC_IP_VMPIC: chip->irq_eoi(idata); break; case FSL_PIC_IP_IPIC: @@ -323,8 +278,7 @@ static int fsl_of_msi_remove(struct platform_device *ofdev) } if (msi->bitmap.bitmap) msi_bitmap_free(&msi->bitmap); - if ((msi->feature & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC) - iounmap(msi->msi_regs); + iounmap(msi->msi_regs); kfree(msi); return 0; @@ -396,37 +350,25 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev) goto error_out; } - /* - * Under the Freescale hypervisor, the msi nodes don't have a 'reg' - * property. Instead, we use hypercalls to access the MSI. - */ - if ((features->fsl_pic_ip & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC) { - err = of_address_to_resource(dev->dev.of_node, 0, &res); - if (err) { - dev_err(&dev->dev, "invalid resource for node %s\n", + /* Get the MSI reg base */ + err = of_address_to_resource(dev->dev.of_node, 0, &res); + if (err) { + dev_err(&dev->dev, "%s resource error!\n", dev->dev.of_node->full_name); - goto error_out; - } + goto error_out; + } - msi->msi_regs = ioremap(res.start, resource_size(&res)); - if (!msi->msi_regs) { - dev_err(&dev->dev, "could not map node %s\n", - dev->dev.of_node->full_name); - goto error_out; - } - msi->msiir_offset = - features->msiir_offset + (res.start & 0xfffff); + msi->msi_regs = ioremap(res.start, resource_size(&res)); + if (!msi->msi_regs) { + dev_err(&dev->dev, "ioremap problem failed\n"); + goto error_out; } msi->feature = features->fsl_pic_ip; msi->irqhost->host_data = msi; - /* - * Remember the phandle, so that we can match with any PCI nodes - * that have an "fsl,msi" property. - */ - msi->phandle = dev->dev.of_node->phandle; + msi->msiir_offset = features->msiir_offset + (res.start & 0xfffff); rc = fsl_msi_init_allocator(msi); if (rc) { @@ -495,11 +437,6 @@ static const struct fsl_msi_feature ipic_msi_feature = { .msiir_offset = 0x38, }; -static const struct fsl_msi_feature vmpic_msi_feature = { - .fsl_pic_ip = FSL_PIC_IP_VMPIC, - .msiir_offset = 0, -}; - static const struct of_device_id fsl_of_msi_ids[] = { { .compatible = "fsl,mpic-msi", @@ -509,10 +446,6 @@ static const struct of_device_id fsl_of_msi_ids[] = { .compatible = "fsl,ipic-msi", .data = (void *)&ipic_msi_feature, }, - { - .compatible = "fsl,vmpic-msi", - .data = (void *)&vmpic_msi_feature, - }, {} }; diff --git a/trunk/arch/powerpc/sysdev/fsl_msi.h b/trunk/arch/powerpc/sysdev/fsl_msi.h index f6c646a52541..1313abbc5200 100644 --- a/trunk/arch/powerpc/sysdev/fsl_msi.h +++ b/trunk/arch/powerpc/sysdev/fsl_msi.h @@ -13,17 +13,15 @@ #ifndef _POWERPC_SYSDEV_FSL_MSI_H #define _POWERPC_SYSDEV_FSL_MSI_H -#include #include #define NR_MSI_REG 8 #define IRQS_PER_MSI_REG 32 #define NR_MSI_IRQS (NR_MSI_REG * IRQS_PER_MSI_REG) -#define FSL_PIC_IP_MASK 0x0000000F -#define FSL_PIC_IP_MPIC 0x00000001 -#define FSL_PIC_IP_IPIC 0x00000002 -#define FSL_PIC_IP_VMPIC 0x00000003 +#define FSL_PIC_IP_MASK 0x0000000F +#define FSL_PIC_IP_MPIC 0x00000001 +#define FSL_PIC_IP_IPIC 0x00000002 struct fsl_msi { struct irq_host *irqhost; @@ -38,8 +36,6 @@ struct fsl_msi { struct msi_bitmap bitmap; struct list_head list; /* support multiple MSI banks */ - - phandle phandle; }; #endif /* _POWERPC_SYSDEV_FSL_MSI_H */ diff --git a/trunk/arch/powerpc/sysdev/fsl_pci.c b/trunk/arch/powerpc/sysdev/fsl_pci.c index 3b61e8cf3421..4ce547e00473 100644 --- a/trunk/arch/powerpc/sysdev/fsl_pci.c +++ b/trunk/arch/powerpc/sysdev/fsl_pci.c @@ -65,30 +65,6 @@ static int __init fsl_pcie_check_link(struct pci_controller *hose) } #if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx) - -#define MAX_PHYS_ADDR_BITS 40 -static u64 pci64_dma_offset = 1ull << MAX_PHYS_ADDR_BITS; - -static int fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask) -{ - if (!dev->dma_mask || !dma_supported(dev, dma_mask)) - return -EIO; - - /* - * Fixup PCI devices that are able to DMA to above the physical - * address width of the SoC such that we can address any internal - * SoC address from across PCI if needed - */ - if ((dev->bus == &pci_bus_type) && - dma_mask >= DMA_BIT_MASK(MAX_PHYS_ADDR_BITS)) { - set_dma_ops(dev, &dma_direct_ops); - set_dma_offset(dev, pci64_dma_offset); - } - - *dev->dma_mask = dma_mask; - return 0; -} - static int __init setup_one_atmu(struct ccsr_pci __iomem *pci, unsigned int index, const struct resource *res, resource_size_t offset) @@ -137,8 +113,6 @@ static void __init setup_pci_atmu(struct pci_controller *hose, u32 piwar = PIWAR_EN | PIWAR_PF | PIWAR_TGI_LOCAL | PIWAR_READ_SNOOP | PIWAR_WRITE_SNOOP; char *name = hose->dn->full_name; - const u64 *reg; - int len; pr_debug("PCI memory map start 0x%016llx, size 0x%016llx\n", (u64)rsrc->start, (u64)resource_size(rsrc)); @@ -231,33 +205,6 @@ static void __init setup_pci_atmu(struct pci_controller *hose, /* Setup inbound mem window */ mem = memblock_end_of_DRAM(); - - /* - * The msi-address-64 property, if it exists, indicates the physical - * address of the MSIIR register. Normally, this register is located - * inside CCSR, so the ATMU that covers all of CCSR is used. But if - * this property exists, then we normally need to create a new ATMU - * for it. For now, however, we cheat. The only entity that creates - * this property is the Freescale hypervisor, and the address is - * specified in the partition configuration. Typically, the address - * is located in the page immediately after the end of DDR. If so, we - * can avoid allocating a new ATMU by extending the DDR ATMU by one - * page. - */ - reg = of_get_property(hose->dn, "msi-address-64", &len); - if (reg && (len == sizeof(u64))) { - u64 address = be64_to_cpup(reg); - - if ((address >= mem) && (address < (mem + PAGE_SIZE))) { - pr_info("%s: extending DDR ATMU to cover MSIIR", name); - mem += PAGE_SIZE; - } else { - /* TODO: Create a new ATMU for MSIIR */ - pr_warn("%s: msi-address-64 address of %llx is " - "unsupported\n", name, address); - } - } - sz = min(mem, paddr_lo); mem_log = __ilog2_u64(sz); @@ -281,37 +228,6 @@ static void __init setup_pci_atmu(struct pci_controller *hose, hose->dma_window_base_cur = 0x00000000; hose->dma_window_size = (resource_size_t)sz; - - /* - * if we have >4G of memory setup second PCI inbound window to - * let devices that are 64-bit address capable to work w/o - * SWIOTLB and access the full range of memory - */ - if (sz != mem) { - mem_log = __ilog2_u64(mem); - - /* Size window up if we dont fit in exact power-of-2 */ - if ((1ull << mem_log) != mem) - mem_log++; - - piwar = (piwar & ~PIWAR_SZ_MASK) | (mem_log - 1); - - /* Setup inbound memory window */ - out_be32(&pci->piw[win_idx].pitar, 0x00000000); - out_be32(&pci->piw[win_idx].piwbear, - pci64_dma_offset >> 44); - out_be32(&pci->piw[win_idx].piwbar, - pci64_dma_offset >> 12); - out_be32(&pci->piw[win_idx].piwar, piwar); - - /* - * install our own dma_set_mask handler to fixup dma_ops - * and dma_offset - */ - ppc_md.dma_set_mask = fsl_pci_dma_set_mask; - - pr_info("%s: Setup 64-bit PCI DMA window\n", name); - } } else { u64 paddr = 0; diff --git a/trunk/arch/powerpc/sysdev/fsl_rio.c b/trunk/arch/powerpc/sysdev/fsl_rio.c index a4c4f4a932d8..22ffccd8bef5 100644 --- a/trunk/arch/powerpc/sysdev/fsl_rio.c +++ b/trunk/arch/powerpc/sysdev/fsl_rio.c @@ -10,7 +10,7 @@ * - Added Port-Write message handling * - Added Machine Check exception handling * - * Copyright (C) 2007, 2008, 2010, 2011 Freescale Semiconductor, Inc. + * Copyright (C) 2007, 2008, 2010 Freescale Semiconductor, Inc. * Zhang Wei * * Copyright 2005 MontaVista Software, Inc. @@ -28,33 +28,240 @@ #include #include #include +#include +#include #include #include #include +#include -#include -#include +#include #include - -#include "fsl_rio.h" +#include #undef DEBUG_PW /* Port-Write debugging */ +/* RapidIO definition irq, which read from OF-tree */ +#define IRQ_RIO_BELL(m) (((struct rio_priv *)(m->priv))->bellirq) +#define IRQ_RIO_TX(m) (((struct rio_priv *)(m->priv))->txirq) +#define IRQ_RIO_RX(m) (((struct rio_priv *)(m->priv))->rxirq) +#define IRQ_RIO_PW(m) (((struct rio_priv *)(m->priv))->pwirq) + +#define IPWSR_CLEAR 0x98 +#define OMSR_CLEAR 0x1cb3 +#define IMSR_CLEAR 0x491 +#define IDSR_CLEAR 0x91 +#define ODSR_CLEAR 0x1c00 +#define LTLEECSR_ENABLE_ALL 0xFFC000FC +#define ESCSR_CLEAR 0x07120204 +#define IECSR_CLEAR 0x80000000 + #define RIO_PORT1_EDCSR 0x0640 #define RIO_PORT2_EDCSR 0x0680 #define RIO_PORT1_IECSR 0x10130 #define RIO_PORT2_IECSR 0x101B0 - +#define RIO_IM0SR 0x13064 +#define RIO_IM1SR 0x13164 +#define RIO_OM0SR 0x13004 +#define RIO_OM1SR 0x13104 + +#define RIO_ATMU_REGS_OFFSET 0x10c00 +#define RIO_P_MSG_REGS_OFFSET 0x11000 +#define RIO_S_MSG_REGS_OFFSET 0x13000 #define RIO_GCCSR 0x13c #define RIO_ESCSR 0x158 -#define ESCSR_CLEAR 0x07120204 #define RIO_PORT2_ESCSR 0x178 #define RIO_CCSR 0x15c +#define RIO_LTLEDCSR 0x0608 #define RIO_LTLEDCSR_IER 0x80000000 #define RIO_LTLEDCSR_PRT 0x01000000 -#define IECSR_CLEAR 0x80000000 +#define RIO_LTLEECSR 0x060c +#define RIO_EPWISR 0x10010 #define RIO_ISR_AACR 0x10120 #define RIO_ISR_AACR_AA 0x1 /* Accept All ID */ +#define RIO_MAINT_WIN_SIZE 0x400000 +#define RIO_DBELL_WIN_SIZE 0x1000 + +#define RIO_MSG_OMR_MUI 0x00000002 +#define RIO_MSG_OSR_TE 0x00000080 +#define RIO_MSG_OSR_QOI 0x00000020 +#define RIO_MSG_OSR_QFI 0x00000010 +#define RIO_MSG_OSR_MUB 0x00000004 +#define RIO_MSG_OSR_EOMI 0x00000002 +#define RIO_MSG_OSR_QEI 0x00000001 + +#define RIO_MSG_IMR_MI 0x00000002 +#define RIO_MSG_ISR_TE 0x00000080 +#define RIO_MSG_ISR_QFI 0x00000010 +#define RIO_MSG_ISR_DIQI 0x00000001 + +#define RIO_IPWMR_SEN 0x00100000 +#define RIO_IPWMR_QFIE 0x00000100 +#define RIO_IPWMR_EIE 0x00000020 +#define RIO_IPWMR_CQ 0x00000002 +#define RIO_IPWMR_PWE 0x00000001 + +#define RIO_IPWSR_QF 0x00100000 +#define RIO_IPWSR_TE 0x00000080 +#define RIO_IPWSR_QFI 0x00000010 +#define RIO_IPWSR_PWD 0x00000008 +#define RIO_IPWSR_PWB 0x00000004 + +/* EPWISR Error match value */ +#define RIO_EPWISR_PINT1 0x80000000 +#define RIO_EPWISR_PINT2 0x40000000 +#define RIO_EPWISR_MU 0x00000002 +#define RIO_EPWISR_PW 0x00000001 + +#define RIO_MSG_DESC_SIZE 32 +#define RIO_MSG_BUFFER_SIZE 4096 +#define RIO_MIN_TX_RING_SIZE 2 +#define RIO_MAX_TX_RING_SIZE 2048 +#define RIO_MIN_RX_RING_SIZE 2 +#define RIO_MAX_RX_RING_SIZE 2048 + +#define DOORBELL_DMR_DI 0x00000002 +#define DOORBELL_DSR_TE 0x00000080 +#define DOORBELL_DSR_QFI 0x00000010 +#define DOORBELL_DSR_DIQI 0x00000001 +#define DOORBELL_TID_OFFSET 0x02 +#define DOORBELL_SID_OFFSET 0x04 +#define DOORBELL_INFO_OFFSET 0x06 + +#define DOORBELL_MESSAGE_SIZE 0x08 +#define DBELL_SID(x) (*(u16 *)(x + DOORBELL_SID_OFFSET)) +#define DBELL_TID(x) (*(u16 *)(x + DOORBELL_TID_OFFSET)) +#define DBELL_INF(x) (*(u16 *)(x + DOORBELL_INFO_OFFSET)) + +struct rio_atmu_regs { + u32 rowtar; + u32 rowtear; + u32 rowbar; + u32 pad2; + u32 rowar; + u32 pad3[3]; +}; + +struct rio_msg_regs { + u32 omr; /* 0xD_3000 - Outbound message 0 mode register */ + u32 osr; /* 0xD_3004 - Outbound message 0 status register */ + u32 pad1; + u32 odqdpar; /* 0xD_300C - Outbound message 0 descriptor queue + dequeue pointer address register */ + u32 pad2; + u32 osar; /* 0xD_3014 - Outbound message 0 source address + register */ + u32 odpr; /* 0xD_3018 - Outbound message 0 destination port + register */ + u32 odatr; /* 0xD_301C - Outbound message 0 destination attributes + Register*/ + u32 odcr; /* 0xD_3020 - Outbound message 0 double-word count + register */ + u32 pad3; + u32 odqepar; /* 0xD_3028 - Outbound message 0 descriptor queue + enqueue pointer address register */ + u32 pad4[13]; + u32 imr; /* 0xD_3060 - Inbound message 0 mode register */ + u32 isr; /* 0xD_3064 - Inbound message 0 status register */ + u32 pad5; + u32 ifqdpar; /* 0xD_306C - Inbound message 0 frame queue dequeue + pointer address register*/ + u32 pad6; + u32 ifqepar; /* 0xD_3074 - Inbound message 0 frame queue enqueue + pointer address register */ + u32 pad7[226]; + u32 odmr; /* 0xD_3400 - Outbound doorbell mode register */ + u32 odsr; /* 0xD_3404 - Outbound doorbell status register */ + u32 res0[4]; + u32 oddpr; /* 0xD_3418 - Outbound doorbell destination port + register */ + u32 oddatr; /* 0xD_341c - Outbound doorbell destination attributes + register */ + u32 res1[3]; + u32 odretcr; /* 0xD_342C - Outbound doorbell retry error threshold + configuration register */ + u32 res2[12]; + u32 dmr; /* 0xD_3460 - Inbound doorbell mode register */ + u32 dsr; /* 0xD_3464 - Inbound doorbell status register */ + u32 pad8; + u32 dqdpar; /* 0xD_346C - Inbound doorbell queue dequeue Pointer + address register */ + u32 pad9; + u32 dqepar; /* 0xD_3474 - Inbound doorbell Queue enqueue pointer + address register */ + u32 pad10[26]; + u32 pwmr; /* 0xD_34E0 - Inbound port-write mode register */ + u32 pwsr; /* 0xD_34E4 - Inbound port-write status register */ + u32 epwqbar; /* 0xD_34E8 - Extended Port-Write Queue Base Address + register */ + u32 pwqbar; /* 0xD_34EC - Inbound port-write queue base address + register */ +}; + +struct rio_tx_desc { + u32 res1; + u32 saddr; + u32 dport; + u32 dattr; + u32 res2; + u32 res3; + u32 dwcnt; + u32 res4; +}; + +struct rio_dbell_ring { + void *virt; + dma_addr_t phys; +}; + +struct rio_msg_tx_ring { + void *virt; + dma_addr_t phys; + void *virt_buffer[RIO_MAX_TX_RING_SIZE]; + dma_addr_t phys_buffer[RIO_MAX_TX_RING_SIZE]; + int tx_slot; + int size; + void *dev_id; +}; + +struct rio_msg_rx_ring { + void *virt; + dma_addr_t phys; + void *virt_buffer[RIO_MAX_RX_RING_SIZE]; + int rx_slot; + int size; + void *dev_id; +}; + +struct rio_port_write_msg { + void *virt; + dma_addr_t phys; + u32 msg_count; + u32 err_count; + u32 discard_count; +}; + +struct rio_priv { + struct device *dev; + void __iomem *regs_win; + struct rio_atmu_regs __iomem *atmu_regs; + struct rio_atmu_regs __iomem *maint_atmu_regs; + struct rio_atmu_regs __iomem *dbell_atmu_regs; + void __iomem *dbell_win; + void __iomem *maint_win; + struct rio_msg_regs __iomem *msg_regs; + struct rio_dbell_ring dbell_ring; + struct rio_msg_tx_ring msg_tx_ring; + struct rio_msg_rx_ring msg_rx_ring; + struct rio_port_write_msg port_write_msg; + int bellirq; + int txirq; + int rxirq; + int pwirq; + struct work_struct pw_work; + struct kfifo pw_fifo; + spinlock_t pw_fifo_lock; +}; #define __fsl_read_rio_config(x, addr, err, op) \ __asm__ __volatile__( \ @@ -72,12 +279,7 @@ : "=r" (err), "=r" (x) \ : "b" (addr), "i" (-EFAULT), "0" (err)) -void __iomem *rio_regs_win; -void __iomem *rmu_regs_win; -resource_size_t rio_law_start; - -struct fsl_rio_dbell *dbell; -struct fsl_rio_pw *pw; +static void __iomem *rio_regs_win; #ifdef CONFIG_E500 int fsl_rio_mcheck_exception(struct pt_regs *regs) @@ -108,6 +310,42 @@ int fsl_rio_mcheck_exception(struct pt_regs *regs) EXPORT_SYMBOL_GPL(fsl_rio_mcheck_exception); #endif +/** + * fsl_rio_doorbell_send - Send a MPC85xx doorbell message + * @mport: RapidIO master port info + * @index: ID of RapidIO interface + * @destid: Destination ID of target device + * @data: 16-bit info field of RapidIO doorbell message + * + * Sends a MPC85xx doorbell message. Returns %0 on success or + * %-EINVAL on failure. + */ +static int fsl_rio_doorbell_send(struct rio_mport *mport, + int index, u16 destid, u16 data) +{ + struct rio_priv *priv = mport->priv; + pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n", + index, destid, data); + switch (mport->phy_type) { + case RIO_PHY_PARALLEL: + out_be32(&priv->dbell_atmu_regs->rowtar, destid << 22); + out_be16(priv->dbell_win, data); + break; + case RIO_PHY_SERIAL: + /* In the serial version silicons, such as MPC8548, MPC8641, + * below operations is must be. + */ + out_be32(&priv->msg_regs->odmr, 0x00000000); + out_be32(&priv->msg_regs->odretcr, 0x00000004); + out_be32(&priv->msg_regs->oddpr, destid << 16); + out_be32(&priv->msg_regs->oddatr, data); + out_be32(&priv->msg_regs->odmr, 0x00000001); + break; + } + + return 0; +} + /** * fsl_local_config_read - Generate a MPC85xx local config space read * @mport: RapidIO master port info @@ -146,8 +384,8 @@ static int fsl_local_config_write(struct rio_mport *mport, { struct rio_priv *priv = mport->priv; pr_debug - ("fsl_local_config_write: index %d offset %8.8x data %8.8x\n", - index, offset, data); + ("fsl_local_config_write: index %d offset %8.8x data %8.8x\n", + index, offset, data); out_be32(priv->regs_win + offset, data); return 0; @@ -175,9 +413,8 @@ fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid, u32 rval, err = 0; pr_debug - ("fsl_rio_config_read:" - " index %d destid %d hopcount %d offset %8.8x len %d\n", - index, destid, hopcount, offset, len); + ("fsl_rio_config_read: index %d destid %d hopcount %d offset %8.8x len %d\n", + index, destid, hopcount, offset, len); /* 16MB maintenance window possible */ /* allow only aligned access to maintenance registers */ @@ -186,7 +423,7 @@ fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid, out_be32(&priv->maint_atmu_regs->rowtar, (destid << 22) | (hopcount << 12) | (offset >> 12)); - out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10)); + out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10)); data = (u8 *) priv->maint_win + (offset & (RIO_MAINT_WIN_SIZE - 1)); switch (len) { @@ -233,9 +470,8 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid, struct rio_priv *priv = mport->priv; u8 *data; pr_debug - ("fsl_rio_config_write:" - " index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n", - index, destid, hopcount, offset, len, val); + ("fsl_rio_config_write: index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n", + index, destid, hopcount, offset, len, val); /* 16MB maintenance windows possible */ /* allow only aligned access to maintenance registers */ @@ -244,7 +480,7 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid, out_be32(&priv->maint_atmu_regs->rowtar, (destid << 22) | (hopcount << 12) | (offset >> 12)); - out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10)); + out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10)); data = (u8 *) priv->maint_win + (offset & (RIO_MAINT_WIN_SIZE - 1)); switch (len) { @@ -264,7 +500,590 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid, return 0; } -void fsl_rio_port_error_handler(int offset) +/** + * fsl_add_outb_message - Add message to the MPC85xx outbound message queue + * @mport: Master port with outbound message queue + * @rdev: Target of outbound message + * @mbox: Outbound mailbox + * @buffer: Message to add to outbound queue + * @len: Length of message + * + * Adds the @buffer message to the MPC85xx outbound message queue. Returns + * %0 on success or %-EINVAL on failure. + */ +static int +fsl_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox, + void *buffer, size_t len) +{ + struct rio_priv *priv = mport->priv; + u32 omr; + struct rio_tx_desc *desc = (struct rio_tx_desc *)priv->msg_tx_ring.virt + + priv->msg_tx_ring.tx_slot; + int ret = 0; + + pr_debug("RIO: fsl_add_outb_message(): destid %4.4x mbox %d buffer " \ + "%8.8x len %8.8x\n", rdev->destid, mbox, (int)buffer, len); + + if ((len < 8) || (len > RIO_MAX_MSG_SIZE)) { + ret = -EINVAL; + goto out; + } + + /* Copy and clear rest of buffer */ + memcpy(priv->msg_tx_ring.virt_buffer[priv->msg_tx_ring.tx_slot], buffer, + len); + if (len < (RIO_MAX_MSG_SIZE - 4)) + memset(priv->msg_tx_ring.virt_buffer[priv->msg_tx_ring.tx_slot] + + len, 0, RIO_MAX_MSG_SIZE - len); + + switch (mport->phy_type) { + case RIO_PHY_PARALLEL: + /* Set mbox field for message */ + desc->dport = mbox & 0x3; + + /* Enable EOMI interrupt, set priority, and set destid */ + desc->dattr = 0x28000000 | (rdev->destid << 2); + break; + case RIO_PHY_SERIAL: + /* Set mbox field for message, and set destid */ + desc->dport = (rdev->destid << 16) | (mbox & 0x3); + + /* Enable EOMI interrupt and priority */ + desc->dattr = 0x28000000; + break; + } + + /* Set transfer size aligned to next power of 2 (in double words) */ + desc->dwcnt = is_power_of_2(len) ? len : 1 << get_bitmask_order(len); + + /* Set snooping and source buffer address */ + desc->saddr = 0x00000004 + | priv->msg_tx_ring.phys_buffer[priv->msg_tx_ring.tx_slot]; + + /* Increment enqueue pointer */ + omr = in_be32(&priv->msg_regs->omr); + out_be32(&priv->msg_regs->omr, omr | RIO_MSG_OMR_MUI); + + /* Go to next descriptor */ + if (++priv->msg_tx_ring.tx_slot == priv->msg_tx_ring.size) + priv->msg_tx_ring.tx_slot = 0; + + out: + return ret; +} + +/** + * fsl_rio_tx_handler - MPC85xx outbound message interrupt handler + * @irq: Linux interrupt number + * @dev_instance: Pointer to interrupt-specific data + * + * Handles outbound message interrupts. Executes a register outbound + * mailbox event handler and acks the interrupt occurrence. + */ +static irqreturn_t +fsl_rio_tx_handler(int irq, void *dev_instance) +{ + int osr; + struct rio_mport *port = (struct rio_mport *)dev_instance; + struct rio_priv *priv = port->priv; + + osr = in_be32(&priv->msg_regs->osr); + + if (osr & RIO_MSG_OSR_TE) { + pr_info("RIO: outbound message transmission error\n"); + out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_TE); + goto out; + } + + if (osr & RIO_MSG_OSR_QOI) { + pr_info("RIO: outbound message queue overflow\n"); + out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_QOI); + goto out; + } + + if (osr & RIO_MSG_OSR_EOMI) { + u32 dqp = in_be32(&priv->msg_regs->odqdpar); + int slot = (dqp - priv->msg_tx_ring.phys) >> 5; + port->outb_msg[0].mcback(port, priv->msg_tx_ring.dev_id, -1, + slot); + + /* Ack the end-of-message interrupt */ + out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_EOMI); + } + + out: + return IRQ_HANDLED; +} + +/** + * fsl_open_outb_mbox - Initialize MPC85xx outbound mailbox + * @mport: Master port implementing the outbound message unit + * @dev_id: Device specific pointer to pass on event + * @mbox: Mailbox to open + * @entries: Number of entries in the outbound mailbox ring + * + * Initializes buffer ring, request the outbound message interrupt, + * and enables the outbound message unit. Returns %0 on success and + * %-EINVAL or %-ENOMEM on failure. + */ +static int +fsl_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) +{ + int i, j, rc = 0; + struct rio_priv *priv = mport->priv; + + if ((entries < RIO_MIN_TX_RING_SIZE) || + (entries > RIO_MAX_TX_RING_SIZE) || (!is_power_of_2(entries))) { + rc = -EINVAL; + goto out; + } + + /* Initialize shadow copy ring */ + priv->msg_tx_ring.dev_id = dev_id; + priv->msg_tx_ring.size = entries; + + for (i = 0; i < priv->msg_tx_ring.size; i++) { + priv->msg_tx_ring.virt_buffer[i] = + dma_alloc_coherent(priv->dev, RIO_MSG_BUFFER_SIZE, + &priv->msg_tx_ring.phys_buffer[i], GFP_KERNEL); + if (!priv->msg_tx_ring.virt_buffer[i]) { + rc = -ENOMEM; + for (j = 0; j < priv->msg_tx_ring.size; j++) + if (priv->msg_tx_ring.virt_buffer[j]) + dma_free_coherent(priv->dev, + RIO_MSG_BUFFER_SIZE, + priv->msg_tx_ring. + virt_buffer[j], + priv->msg_tx_ring. + phys_buffer[j]); + goto out; + } + } + + /* Initialize outbound message descriptor ring */ + priv->msg_tx_ring.virt = dma_alloc_coherent(priv->dev, + priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE, + &priv->msg_tx_ring.phys, GFP_KERNEL); + if (!priv->msg_tx_ring.virt) { + rc = -ENOMEM; + goto out_dma; + } + memset(priv->msg_tx_ring.virt, 0, + priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE); + priv->msg_tx_ring.tx_slot = 0; + + /* Point dequeue/enqueue pointers at first entry in ring */ + out_be32(&priv->msg_regs->odqdpar, priv->msg_tx_ring.phys); + out_be32(&priv->msg_regs->odqepar, priv->msg_tx_ring.phys); + + /* Configure for snooping */ + out_be32(&priv->msg_regs->osar, 0x00000004); + + /* Clear interrupt status */ + out_be32(&priv->msg_regs->osr, 0x000000b3); + + /* Hook up outbound message handler */ + rc = request_irq(IRQ_RIO_TX(mport), fsl_rio_tx_handler, 0, + "msg_tx", (void *)mport); + if (rc < 0) + goto out_irq; + + /* + * Configure outbound message unit + * Snooping + * Interrupts (all enabled, except QEIE) + * Chaining mode + * Disable + */ + out_be32(&priv->msg_regs->omr, 0x00100220); + + /* Set number of entries */ + out_be32(&priv->msg_regs->omr, + in_be32(&priv->msg_regs->omr) | + ((get_bitmask_order(entries) - 2) << 12)); + + /* Now enable the unit */ + out_be32(&priv->msg_regs->omr, in_be32(&priv->msg_regs->omr) | 0x1); + + out: + return rc; + + out_irq: + dma_free_coherent(priv->dev, + priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE, + priv->msg_tx_ring.virt, priv->msg_tx_ring.phys); + + out_dma: + for (i = 0; i < priv->msg_tx_ring.size; i++) + dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE, + priv->msg_tx_ring.virt_buffer[i], + priv->msg_tx_ring.phys_buffer[i]); + + return rc; +} + +/** + * fsl_close_outb_mbox - Shut down MPC85xx outbound mailbox + * @mport: Master port implementing the outbound message unit + * @mbox: Mailbox to close + * + * Disables the outbound message unit, free all buffers, and + * frees the outbound message interrupt. + */ +static void fsl_close_outb_mbox(struct rio_mport *mport, int mbox) +{ + struct rio_priv *priv = mport->priv; + /* Disable inbound message unit */ + out_be32(&priv->msg_regs->omr, 0); + + /* Free ring */ + dma_free_coherent(priv->dev, + priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE, + priv->msg_tx_ring.virt, priv->msg_tx_ring.phys); + + /* Free interrupt */ + free_irq(IRQ_RIO_TX(mport), (void *)mport); +} + +/** + * fsl_rio_rx_handler - MPC85xx inbound message interrupt handler + * @irq: Linux interrupt number + * @dev_instance: Pointer to interrupt-specific data + * + * Handles inbound message interrupts. Executes a registered inbound + * mailbox event handler and acks the interrupt occurrence. + */ +static irqreturn_t +fsl_rio_rx_handler(int irq, void *dev_instance) +{ + int isr; + struct rio_mport *port = (struct rio_mport *)dev_instance; + struct rio_priv *priv = port->priv; + + isr = in_be32(&priv->msg_regs->isr); + + if (isr & RIO_MSG_ISR_TE) { + pr_info("RIO: inbound message reception error\n"); + out_be32((void *)&priv->msg_regs->isr, RIO_MSG_ISR_TE); + goto out; + } + + /* XXX Need to check/dispatch until queue empty */ + if (isr & RIO_MSG_ISR_DIQI) { + /* + * We implement *only* mailbox 0, but can receive messages + * for any mailbox/letter to that mailbox destination. So, + * make the callback with an unknown/invalid mailbox number + * argument. + */ + port->inb_msg[0].mcback(port, priv->msg_rx_ring.dev_id, -1, -1); + + /* Ack the queueing interrupt */ + out_be32(&priv->msg_regs->isr, RIO_MSG_ISR_DIQI); + } + + out: + return IRQ_HANDLED; +} + +/** + * fsl_open_inb_mbox - Initialize MPC85xx inbound mailbox + * @mport: Master port implementing the inbound message unit + * @dev_id: Device specific pointer to pass on event + * @mbox: Mailbox to open + * @entries: Number of entries in the inbound mailbox ring + * + * Initializes buffer ring, request the inbound message interrupt, + * and enables the inbound message unit. Returns %0 on success + * and %-EINVAL or %-ENOMEM on failure. + */ +static int +fsl_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) +{ + int i, rc = 0; + struct rio_priv *priv = mport->priv; + + if ((entries < RIO_MIN_RX_RING_SIZE) || + (entries > RIO_MAX_RX_RING_SIZE) || (!is_power_of_2(entries))) { + rc = -EINVAL; + goto out; + } + + /* Initialize client buffer ring */ + priv->msg_rx_ring.dev_id = dev_id; + priv->msg_rx_ring.size = entries; + priv->msg_rx_ring.rx_slot = 0; + for (i = 0; i < priv->msg_rx_ring.size; i++) + priv->msg_rx_ring.virt_buffer[i] = NULL; + + /* Initialize inbound message ring */ + priv->msg_rx_ring.virt = dma_alloc_coherent(priv->dev, + priv->msg_rx_ring.size * RIO_MAX_MSG_SIZE, + &priv->msg_rx_ring.phys, GFP_KERNEL); + if (!priv->msg_rx_ring.virt) { + rc = -ENOMEM; + goto out; + } + + /* Point dequeue/enqueue pointers at first entry in ring */ + out_be32(&priv->msg_regs->ifqdpar, (u32) priv->msg_rx_ring.phys); + out_be32(&priv->msg_regs->ifqepar, (u32) priv->msg_rx_ring.phys); + + /* Clear interrupt status */ + out_be32(&priv->msg_regs->isr, 0x00000091); + + /* Hook up inbound message handler */ + rc = request_irq(IRQ_RIO_RX(mport), fsl_rio_rx_handler, 0, + "msg_rx", (void *)mport); + if (rc < 0) { + dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE, + priv->msg_tx_ring.virt_buffer[i], + priv->msg_tx_ring.phys_buffer[i]); + goto out; + } + + /* + * Configure inbound message unit: + * Snooping + * 4KB max message size + * Unmask all interrupt sources + * Disable + */ + out_be32(&priv->msg_regs->imr, 0x001b0060); + + /* Set number of queue entries */ + setbits32(&priv->msg_regs->imr, (get_bitmask_order(entries) - 2) << 12); + + /* Now enable the unit */ + setbits32(&priv->msg_regs->imr, 0x1); + + out: + return rc; +} + +/** + * fsl_close_inb_mbox - Shut down MPC85xx inbound mailbox + * @mport: Master port implementing the inbound message unit + * @mbox: Mailbox to close + * + * Disables the inbound message unit, free all buffers, and + * frees the inbound message interrupt. + */ +static void fsl_close_inb_mbox(struct rio_mport *mport, int mbox) +{ + struct rio_priv *priv = mport->priv; + /* Disable inbound message unit */ + out_be32(&priv->msg_regs->imr, 0); + + /* Free ring */ + dma_free_coherent(priv->dev, priv->msg_rx_ring.size * RIO_MAX_MSG_SIZE, + priv->msg_rx_ring.virt, priv->msg_rx_ring.phys); + + /* Free interrupt */ + free_irq(IRQ_RIO_RX(mport), (void *)mport); +} + +/** + * fsl_add_inb_buffer - Add buffer to the MPC85xx inbound message queue + * @mport: Master port implementing the inbound message unit + * @mbox: Inbound mailbox number + * @buf: Buffer to add to inbound queue + * + * Adds the @buf buffer to the MPC85xx inbound message queue. Returns + * %0 on success or %-EINVAL on failure. + */ +static int fsl_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf) +{ + int rc = 0; + struct rio_priv *priv = mport->priv; + + pr_debug("RIO: fsl_add_inb_buffer(), msg_rx_ring.rx_slot %d\n", + priv->msg_rx_ring.rx_slot); + + if (priv->msg_rx_ring.virt_buffer[priv->msg_rx_ring.rx_slot]) { + printk(KERN_ERR + "RIO: error adding inbound buffer %d, buffer exists\n", + priv->msg_rx_ring.rx_slot); + rc = -EINVAL; + goto out; + } + + priv->msg_rx_ring.virt_buffer[priv->msg_rx_ring.rx_slot] = buf; + if (++priv->msg_rx_ring.rx_slot == priv->msg_rx_ring.size) + priv->msg_rx_ring.rx_slot = 0; + + out: + return rc; +} + +/** + * fsl_get_inb_message - Fetch inbound message from the MPC85xx message unit + * @mport: Master port implementing the inbound message unit + * @mbox: Inbound mailbox number + * + * Gets the next available inbound message from the inbound message queue. + * A pointer to the message is returned on success or NULL on failure. + */ +static void *fsl_get_inb_message(struct rio_mport *mport, int mbox) +{ + struct rio_priv *priv = mport->priv; + u32 phys_buf, virt_buf; + void *buf = NULL; + int buf_idx; + + phys_buf = in_be32(&priv->msg_regs->ifqdpar); + + /* If no more messages, then bail out */ + if (phys_buf == in_be32(&priv->msg_regs->ifqepar)) + goto out2; + + virt_buf = (u32) priv->msg_rx_ring.virt + (phys_buf + - priv->msg_rx_ring.phys); + buf_idx = (phys_buf - priv->msg_rx_ring.phys) / RIO_MAX_MSG_SIZE; + buf = priv->msg_rx_ring.virt_buffer[buf_idx]; + + if (!buf) { + printk(KERN_ERR + "RIO: inbound message copy failed, no buffers\n"); + goto out1; + } + + /* Copy max message size, caller is expected to allocate that big */ + memcpy(buf, (void *)virt_buf, RIO_MAX_MSG_SIZE); + + /* Clear the available buffer */ + priv->msg_rx_ring.virt_buffer[buf_idx] = NULL; + + out1: + setbits32(&priv->msg_regs->imr, RIO_MSG_IMR_MI); + + out2: + return buf; +} + +/** + * fsl_rio_dbell_handler - MPC85xx doorbell interrupt handler + * @irq: Linux interrupt number + * @dev_instance: Pointer to interrupt-specific data + * + * Handles doorbell interrupts. Parses a list of registered + * doorbell event handlers and executes a matching event handler. + */ +static irqreturn_t +fsl_rio_dbell_handler(int irq, void *dev_instance) +{ + int dsr; + struct rio_mport *port = (struct rio_mport *)dev_instance; + struct rio_priv *priv = port->priv; + + dsr = in_be32(&priv->msg_regs->dsr); + + if (dsr & DOORBELL_DSR_TE) { + pr_info("RIO: doorbell reception error\n"); + out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_TE); + goto out; + } + + if (dsr & DOORBELL_DSR_QFI) { + pr_info("RIO: doorbell queue full\n"); + out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_QFI); + } + + /* XXX Need to check/dispatch until queue empty */ + if (dsr & DOORBELL_DSR_DIQI) { + u32 dmsg = + (u32) priv->dbell_ring.virt + + (in_be32(&priv->msg_regs->dqdpar) & 0xfff); + struct rio_dbell *dbell; + int found = 0; + + pr_debug + ("RIO: processing doorbell, sid %2.2x tid %2.2x info %4.4x\n", + DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg)); + + list_for_each_entry(dbell, &port->dbells, node) { + if ((dbell->res->start <= DBELL_INF(dmsg)) && + (dbell->res->end >= DBELL_INF(dmsg))) { + found = 1; + break; + } + } + if (found) { + dbell->dinb(port, dbell->dev_id, DBELL_SID(dmsg), DBELL_TID(dmsg), + DBELL_INF(dmsg)); + } else { + pr_debug + ("RIO: spurious doorbell, sid %2.2x tid %2.2x info %4.4x\n", + DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg)); + } + setbits32(&priv->msg_regs->dmr, DOORBELL_DMR_DI); + out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_DIQI); + } + + out: + return IRQ_HANDLED; +} + +/** + * fsl_rio_doorbell_init - MPC85xx doorbell interface init + * @mport: Master port implementing the inbound doorbell unit + * + * Initializes doorbell unit hardware and inbound DMA buffer + * ring. Called from fsl_rio_setup(). Returns %0 on success + * or %-ENOMEM on failure. + */ +static int fsl_rio_doorbell_init(struct rio_mport *mport) +{ + struct rio_priv *priv = mport->priv; + int rc = 0; + + /* Map outbound doorbell window immediately after maintenance window */ + priv->dbell_win = ioremap(mport->iores.start + RIO_MAINT_WIN_SIZE, + RIO_DBELL_WIN_SIZE); + if (!priv->dbell_win) { + printk(KERN_ERR + "RIO: unable to map outbound doorbell window\n"); + rc = -ENOMEM; + goto out; + } + + /* Initialize inbound doorbells */ + priv->dbell_ring.virt = dma_alloc_coherent(priv->dev, 512 * + DOORBELL_MESSAGE_SIZE, &priv->dbell_ring.phys, GFP_KERNEL); + if (!priv->dbell_ring.virt) { + printk(KERN_ERR "RIO: unable allocate inbound doorbell ring\n"); + rc = -ENOMEM; + iounmap(priv->dbell_win); + goto out; + } + + /* Point dequeue/enqueue pointers at first entry in ring */ + out_be32(&priv->msg_regs->dqdpar, (u32) priv->dbell_ring.phys); + out_be32(&priv->msg_regs->dqepar, (u32) priv->dbell_ring.phys); + + /* Clear interrupt status */ + out_be32(&priv->msg_regs->dsr, 0x00000091); + + /* Hook up doorbell handler */ + rc = request_irq(IRQ_RIO_BELL(mport), fsl_rio_dbell_handler, 0, + "dbell_rx", (void *)mport); + if (rc < 0) { + iounmap(priv->dbell_win); + dma_free_coherent(priv->dev, 512 * DOORBELL_MESSAGE_SIZE, + priv->dbell_ring.virt, priv->dbell_ring.phys); + printk(KERN_ERR + "MPC85xx RIO: unable to request inbound doorbell irq"); + goto out; + } + + /* Configure doorbells for snooping, 512 entries, and enable */ + out_be32(&priv->msg_regs->dmr, 0x00108161); + + out: + return rc; +} + +static void port_error_handler(struct rio_mport *port, int offset) { /*XXX: Error recovery is not implemented, we just clear errors */ out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0); @@ -279,6 +1098,263 @@ void fsl_rio_port_error_handler(int offset) out_be32((u32 *)(rio_regs_win + RIO_PORT2_ESCSR), ESCSR_CLEAR); } } + +static void msg_unit_error_handler(struct rio_mport *port) +{ + struct rio_priv *priv = port->priv; + + /*XXX: Error recovery is not implemented, we just clear errors */ + out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0); + + out_be32((u32 *)(rio_regs_win + RIO_IM0SR), IMSR_CLEAR); + out_be32((u32 *)(rio_regs_win + RIO_IM1SR), IMSR_CLEAR); + out_be32((u32 *)(rio_regs_win + RIO_OM0SR), OMSR_CLEAR); + out_be32((u32 *)(rio_regs_win + RIO_OM1SR), OMSR_CLEAR); + + out_be32(&priv->msg_regs->odsr, ODSR_CLEAR); + out_be32(&priv->msg_regs->dsr, IDSR_CLEAR); + + out_be32(&priv->msg_regs->pwsr, IPWSR_CLEAR); +} + +/** + * fsl_rio_port_write_handler - MPC85xx port write interrupt handler + * @irq: Linux interrupt number + * @dev_instance: Pointer to interrupt-specific data + * + * Handles port write interrupts. Parses a list of registered + * port write event handlers and executes a matching event handler. + */ +static irqreturn_t +fsl_rio_port_write_handler(int irq, void *dev_instance) +{ + u32 ipwmr, ipwsr; + struct rio_mport *port = (struct rio_mport *)dev_instance; + struct rio_priv *priv = port->priv; + u32 epwisr, tmp; + + epwisr = in_be32(priv->regs_win + RIO_EPWISR); + if (!(epwisr & RIO_EPWISR_PW)) + goto pw_done; + + ipwmr = in_be32(&priv->msg_regs->pwmr); + ipwsr = in_be32(&priv->msg_regs->pwsr); + +#ifdef DEBUG_PW + pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr); + if (ipwsr & RIO_IPWSR_QF) + pr_debug(" QF"); + if (ipwsr & RIO_IPWSR_TE) + pr_debug(" TE"); + if (ipwsr & RIO_IPWSR_QFI) + pr_debug(" QFI"); + if (ipwsr & RIO_IPWSR_PWD) + pr_debug(" PWD"); + if (ipwsr & RIO_IPWSR_PWB) + pr_debug(" PWB"); + pr_debug(" )\n"); +#endif + /* Schedule deferred processing if PW was received */ + if (ipwsr & RIO_IPWSR_QFI) { + /* Save PW message (if there is room in FIFO), + * otherwise discard it. + */ + if (kfifo_avail(&priv->pw_fifo) >= RIO_PW_MSG_SIZE) { + priv->port_write_msg.msg_count++; + kfifo_in(&priv->pw_fifo, priv->port_write_msg.virt, + RIO_PW_MSG_SIZE); + } else { + priv->port_write_msg.discard_count++; + pr_debug("RIO: ISR Discarded Port-Write Msg(s) (%d)\n", + priv->port_write_msg.discard_count); + } + /* Clear interrupt and issue Clear Queue command. This allows + * another port-write to be received. + */ + out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_QFI); + out_be32(&priv->msg_regs->pwmr, ipwmr | RIO_IPWMR_CQ); + + schedule_work(&priv->pw_work); + } + + if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) { + priv->port_write_msg.err_count++; + pr_debug("RIO: Port-Write Transaction Err (%d)\n", + priv->port_write_msg.err_count); + /* Clear Transaction Error: port-write controller should be + * disabled when clearing this error + */ + out_be32(&priv->msg_regs->pwmr, ipwmr & ~RIO_IPWMR_PWE); + out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_TE); + out_be32(&priv->msg_regs->pwmr, ipwmr); + } + + if (ipwsr & RIO_IPWSR_PWD) { + priv->port_write_msg.discard_count++; + pr_debug("RIO: Port Discarded Port-Write Msg(s) (%d)\n", + priv->port_write_msg.discard_count); + out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_PWD); + } + +pw_done: + if (epwisr & RIO_EPWISR_PINT1) { + tmp = in_be32(priv->regs_win + RIO_LTLEDCSR); + pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp); + port_error_handler(port, 0); + } + + if (epwisr & RIO_EPWISR_PINT2) { + tmp = in_be32(priv->regs_win + RIO_LTLEDCSR); + pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp); + port_error_handler(port, 1); + } + + if (epwisr & RIO_EPWISR_MU) { + tmp = in_be32(priv->regs_win + RIO_LTLEDCSR); + pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp); + msg_unit_error_handler(port); + } + + return IRQ_HANDLED; +} + +static void fsl_pw_dpc(struct work_struct *work) +{ + struct rio_priv *priv = container_of(work, struct rio_priv, pw_work); + unsigned long flags; + u32 msg_buffer[RIO_PW_MSG_SIZE/sizeof(u32)]; + + /* + * Process port-write messages + */ + spin_lock_irqsave(&priv->pw_fifo_lock, flags); + while (kfifo_out(&priv->pw_fifo, (unsigned char *)msg_buffer, + RIO_PW_MSG_SIZE)) { + /* Process one message */ + spin_unlock_irqrestore(&priv->pw_fifo_lock, flags); +#ifdef DEBUG_PW + { + u32 i; + pr_debug("%s : Port-Write Message:", __func__); + for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); i++) { + if ((i%4) == 0) + pr_debug("\n0x%02x: 0x%08x", i*4, + msg_buffer[i]); + else + pr_debug(" 0x%08x", msg_buffer[i]); + } + pr_debug("\n"); + } +#endif + /* Pass the port-write message to RIO core for processing */ + rio_inb_pwrite_handler((union rio_pw_msg *)msg_buffer); + spin_lock_irqsave(&priv->pw_fifo_lock, flags); + } + spin_unlock_irqrestore(&priv->pw_fifo_lock, flags); +} + +/** + * fsl_rio_pw_enable - enable/disable port-write interface init + * @mport: Master port implementing the port write unit + * @enable: 1=enable; 0=disable port-write message handling + */ +static int fsl_rio_pw_enable(struct rio_mport *mport, int enable) +{ + struct rio_priv *priv = mport->priv; + u32 rval; + + rval = in_be32(&priv->msg_regs->pwmr); + + if (enable) + rval |= RIO_IPWMR_PWE; + else + rval &= ~RIO_IPWMR_PWE; + + out_be32(&priv->msg_regs->pwmr, rval); + + return 0; +} + +/** + * fsl_rio_port_write_init - MPC85xx port write interface init + * @mport: Master port implementing the port write unit + * + * Initializes port write unit hardware and DMA buffer + * ring. Called from fsl_rio_setup(). Returns %0 on success + * or %-ENOMEM on failure. + */ +static int fsl_rio_port_write_init(struct rio_mport *mport) +{ + struct rio_priv *priv = mport->priv; + int rc = 0; + + /* Following configurations require a disabled port write controller */ + out_be32(&priv->msg_regs->pwmr, + in_be32(&priv->msg_regs->pwmr) & ~RIO_IPWMR_PWE); + + /* Initialize port write */ + priv->port_write_msg.virt = dma_alloc_coherent(priv->dev, + RIO_PW_MSG_SIZE, + &priv->port_write_msg.phys, GFP_KERNEL); + if (!priv->port_write_msg.virt) { + pr_err("RIO: unable allocate port write queue\n"); + return -ENOMEM; + } + + priv->port_write_msg.err_count = 0; + priv->port_write_msg.discard_count = 0; + + /* Point dequeue/enqueue pointers at first entry */ + out_be32(&priv->msg_regs->epwqbar, 0); + out_be32(&priv->msg_regs->pwqbar, (u32) priv->port_write_msg.phys); + + pr_debug("EIPWQBAR: 0x%08x IPWQBAR: 0x%08x\n", + in_be32(&priv->msg_regs->epwqbar), + in_be32(&priv->msg_regs->pwqbar)); + + /* Clear interrupt status IPWSR */ + out_be32(&priv->msg_regs->pwsr, + (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD)); + + /* Configure port write contoller for snooping enable all reporting, + clear queue full */ + out_be32(&priv->msg_regs->pwmr, + RIO_IPWMR_SEN | RIO_IPWMR_QFIE | RIO_IPWMR_EIE | RIO_IPWMR_CQ); + + + /* Hook up port-write handler */ + rc = request_irq(IRQ_RIO_PW(mport), fsl_rio_port_write_handler, + IRQF_SHARED, "port-write", (void *)mport); + if (rc < 0) { + pr_err("MPC85xx RIO: unable to request inbound doorbell irq"); + goto err_out; + } + /* Enable Error Interrupt */ + out_be32((u32 *)(rio_regs_win + RIO_LTLEECSR), LTLEECSR_ENABLE_ALL); + + INIT_WORK(&priv->pw_work, fsl_pw_dpc); + spin_lock_init(&priv->pw_fifo_lock); + if (kfifo_alloc(&priv->pw_fifo, RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) { + pr_err("FIFO allocation failed\n"); + rc = -ENOMEM; + goto err_out_irq; + } + + pr_debug("IPWMR: 0x%08x IPWSR: 0x%08x\n", + in_be32(&priv->msg_regs->pwmr), + in_be32(&priv->msg_regs->pwsr)); + + return rc; + +err_out_irq: + free_irq(IRQ_RIO_PW(mport), (void *)mport); +err_out: + dma_free_coherent(priv->dev, RIO_PW_MSG_SIZE, + priv->port_write_msg.virt, + priv->port_write_msg.phys); + return rc; +} + static inline void fsl_rio_info(struct device *dev, u32 ccsr) { const char *str; @@ -335,21 +1411,16 @@ int fsl_rio_setup(struct platform_device *dev) struct rio_mport *port; struct rio_priv *priv; int rc = 0; - const u32 *dt_range, *cell, *port_index; - u32 active_ports = 0; - struct resource regs, rmu_regs; - struct device_node *np, *rmu_node; + const u32 *dt_range, *cell; + struct resource regs; int rlen; u32 ccsr; - u64 range_start, range_size; + u64 law_start, law_size; int paw, aw, sw; - u32 i; - static int tmp; - struct device_node *rmu_np[MAX_MSG_UNIT_NUM] = {NULL}; if (!dev->dev.of_node) { dev_err(&dev->dev, "Device OF-Node is NULL"); - return -ENODEV; + return -EFAULT; } rc = of_address_to_resource(dev->dev.of_node, 0, ®s); @@ -358,17 +1429,37 @@ int fsl_rio_setup(struct platform_device *dev) dev->dev.of_node->full_name); return -EFAULT; } - dev_info(&dev->dev, "Of-device full name %s\n", - dev->dev.of_node->full_name); + dev_info(&dev->dev, "Of-device full name %s\n", dev->dev.of_node->full_name); dev_info(&dev->dev, "Regs: %pR\n", ®s); - rio_regs_win = ioremap(regs.start, resource_size(®s)); - if (!rio_regs_win) { - dev_err(&dev->dev, "Unable to map rio register window\n"); - rc = -ENOMEM; - goto err_rio_regs; + dt_range = of_get_property(dev->dev.of_node, "ranges", &rlen); + if (!dt_range) { + dev_err(&dev->dev, "Can't get %s property 'ranges'\n", + dev->dev.of_node->full_name); + return -EFAULT; } + /* Get node address wide */ + cell = of_get_property(dev->dev.of_node, "#address-cells", NULL); + if (cell) + aw = *cell; + else + aw = of_n_addr_cells(dev->dev.of_node); + /* Get node size wide */ + cell = of_get_property(dev->dev.of_node, "#size-cells", NULL); + if (cell) + sw = *cell; + else + sw = of_n_size_cells(dev->dev.of_node); + /* Get parent address wide wide */ + paw = of_n_addr_cells(dev->dev.of_node); + + law_start = of_read_number(dt_range + aw, paw); + law_size = of_read_number(dt_range + aw + paw, sw); + + dev_info(&dev->dev, "LAW start 0x%016llx, size 0x%016llx.\n", + law_start, law_size); + ops = kzalloc(sizeof(struct rio_ops), GFP_KERNEL); if (!ops) { rc = -ENOMEM; @@ -388,257 +1479,143 @@ int fsl_rio_setup(struct platform_device *dev) ops->add_inb_buffer = fsl_add_inb_buffer; ops->get_inb_message = fsl_get_inb_message; - rmu_node = of_parse_phandle(dev->dev.of_node, "fsl,srio-rmu-handle", 0); - if (!rmu_node) - goto err_rmu; - rc = of_address_to_resource(rmu_node, 0, &rmu_regs); - if (rc) { - dev_err(&dev->dev, "Can't get %s property 'reg'\n", - rmu_node->full_name); - goto err_rmu; - } - rmu_regs_win = ioremap(rmu_regs.start, resource_size(&rmu_regs)); - if (!rmu_regs_win) { - dev_err(&dev->dev, "Unable to map rmu register window\n"); + port = kzalloc(sizeof(struct rio_mport), GFP_KERNEL); + if (!port) { rc = -ENOMEM; - goto err_rmu; - } - for_each_compatible_node(np, NULL, "fsl,srio-msg-unit") { - rmu_np[tmp] = np; - tmp++; + goto err_port; } + port->index = 0; - /*set up doobell node*/ - np = of_find_compatible_node(NULL, NULL, "fsl,srio-dbell-unit"); - if (!np) { - rc = -ENODEV; - goto err_dbell; - } - dbell = kzalloc(sizeof(struct fsl_rio_dbell), GFP_KERNEL); - if (!(dbell)) { - dev_err(&dev->dev, "Can't alloc memory for 'fsl_rio_dbell'\n"); + priv = kzalloc(sizeof(struct rio_priv), GFP_KERNEL); + if (!priv) { + printk(KERN_ERR "Can't alloc memory for 'priv'\n"); rc = -ENOMEM; - goto err_dbell; + goto err_priv; } - dbell->dev = &dev->dev; - dbell->bellirq = irq_of_parse_and_map(np, 1); - dev_info(&dev->dev, "bellirq: %d\n", dbell->bellirq); - aw = of_n_addr_cells(np); - dt_range = of_get_property(np, "reg", &rlen); - if (!dt_range) { - pr_err("%s: unable to find 'reg' property\n", - np->full_name); - rc = -ENOMEM; - goto err_pw; + INIT_LIST_HEAD(&port->dbells); + port->iores.start = law_start; + port->iores.end = law_start + law_size - 1; + port->iores.flags = IORESOURCE_MEM; + port->iores.name = "rio_io_win"; + + if (request_resource(&iomem_resource, &port->iores) < 0) { + dev_err(&dev->dev, "RIO: Error requesting master port region" + " 0x%016llx-0x%016llx\n", + (u64)port->iores.start, (u64)port->iores.end); + rc = -ENOMEM; + goto err_res; } - range_start = of_read_number(dt_range, aw); - dbell->dbell_regs = (struct rio_dbell_regs *)(rmu_regs_win + - (u32)range_start); - /*set up port write node*/ - np = of_find_compatible_node(NULL, NULL, "fsl,srio-port-write-unit"); - if (!np) { - rc = -ENODEV; - goto err_pw; - } - pw = kzalloc(sizeof(struct fsl_rio_pw), GFP_KERNEL); - if (!(pw)) { - dev_err(&dev->dev, "Can't alloc memory for 'fsl_rio_pw'\n"); - rc = -ENOMEM; - goto err_pw; - } - pw->dev = &dev->dev; - pw->pwirq = irq_of_parse_and_map(np, 0); - dev_info(&dev->dev, "pwirq: %d\n", pw->pwirq); - aw = of_n_addr_cells(np); - dt_range = of_get_property(np, "reg", &rlen); - if (!dt_range) { - pr_err("%s: unable to find 'reg' property\n", - np->full_name); - rc = -ENOMEM; - goto err; - } - range_start = of_read_number(dt_range, aw); - pw->pw_regs = (struct rio_pw_regs *)(rmu_regs_win + (u32)range_start); - - /*set up ports node*/ - for_each_child_of_node(dev->dev.of_node, np) { - port_index = of_get_property(np, "cell-index", NULL); - if (!port_index) { - dev_err(&dev->dev, "Can't get %s property 'cell-index'\n", - np->full_name); - continue; - } - - dt_range = of_get_property(np, "ranges", &rlen); - if (!dt_range) { - dev_err(&dev->dev, "Can't get %s property 'ranges'\n", - np->full_name); - continue; - } - - /* Get node address wide */ - cell = of_get_property(np, "#address-cells", NULL); - if (cell) - aw = *cell; - else - aw = of_n_addr_cells(np); - /* Get node size wide */ - cell = of_get_property(np, "#size-cells", NULL); - if (cell) - sw = *cell; - else - sw = of_n_size_cells(np); - /* Get parent address wide wide */ - paw = of_n_addr_cells(np); - range_start = of_read_number(dt_range + aw, paw); - range_size = of_read_number(dt_range + aw + paw, sw); - - dev_info(&dev->dev, "%s: LAW start 0x%016llx, size 0x%016llx.\n", - np->full_name, range_start, range_size); - - port = kzalloc(sizeof(struct rio_mport), GFP_KERNEL); - if (!port) - continue; - - i = *port_index - 1; - port->index = (unsigned char)i; - - priv = kzalloc(sizeof(struct rio_priv), GFP_KERNEL); - if (!priv) { - dev_err(&dev->dev, "Can't alloc memory for 'priv'\n"); - kfree(port); - continue; - } - - INIT_LIST_HEAD(&port->dbells); - port->iores.start = range_start; - port->iores.end = port->iores.start + range_size - 1; - port->iores.flags = IORESOURCE_MEM; - port->iores.name = "rio_io_win"; - - if (request_resource(&iomem_resource, &port->iores) < 0) { - dev_err(&dev->dev, "RIO: Error requesting master port region" - " 0x%016llx-0x%016llx\n", - (u64)port->iores.start, (u64)port->iores.end); - kfree(priv); - kfree(port); - continue; - } - sprintf(port->name, "RIO mport %d", i); - - priv->dev = &dev->dev; - port->ops = ops; - port->priv = priv; - port->phys_efptr = 0x100; - priv->regs_win = rio_regs_win; - - /* Probe the master port phy type */ - ccsr = in_be32(priv->regs_win + RIO_CCSR + i*0x20); - port->phy_type = (ccsr & 1) ? RIO_PHY_SERIAL : RIO_PHY_PARALLEL; - if (port->phy_type == RIO_PHY_PARALLEL) { - dev_err(&dev->dev, "RIO: Parallel PHY type, unsupported port type!\n"); - release_resource(&port->iores); - kfree(priv); - kfree(port); - continue; - } - dev_info(&dev->dev, "RapidIO PHY type: Serial\n"); - /* Checking the port training status */ - if (in_be32((priv->regs_win + RIO_ESCSR + i*0x20)) & 1) { - dev_err(&dev->dev, "Port %d is not ready. " - "Try to restart connection...\n", i); + priv->pwirq = irq_of_parse_and_map(dev->dev.of_node, 0); + priv->bellirq = irq_of_parse_and_map(dev->dev.of_node, 2); + priv->txirq = irq_of_parse_and_map(dev->dev.of_node, 3); + priv->rxirq = irq_of_parse_and_map(dev->dev.of_node, 4); + dev_info(&dev->dev, "pwirq: %d, bellirq: %d, txirq: %d, rxirq %d\n", + priv->pwirq, priv->bellirq, priv->txirq, priv->rxirq); + + rio_init_dbell_res(&port->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff); + rio_init_mbox_res(&port->riores[RIO_INB_MBOX_RESOURCE], 0, 0); + rio_init_mbox_res(&port->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0); + strcpy(port->name, "RIO0 mport"); + + priv->dev = &dev->dev; + + port->ops = ops; + port->priv = priv; + port->phys_efptr = 0x100; + + priv->regs_win = ioremap(regs.start, resource_size(®s)); + rio_regs_win = priv->regs_win; + + /* Probe the master port phy type */ + ccsr = in_be32(priv->regs_win + RIO_CCSR); + port->phy_type = (ccsr & 1) ? RIO_PHY_SERIAL : RIO_PHY_PARALLEL; + dev_info(&dev->dev, "RapidIO PHY type: %s\n", + (port->phy_type == RIO_PHY_PARALLEL) ? "parallel" : + ((port->phy_type == RIO_PHY_SERIAL) ? "serial" : + "unknown")); + /* Checking the port training status */ + if (in_be32((priv->regs_win + RIO_ESCSR)) & 1) { + dev_err(&dev->dev, "Port is not ready. " + "Try to restart connection...\n"); + switch (port->phy_type) { + case RIO_PHY_SERIAL: /* Disable ports */ - out_be32(priv->regs_win - + RIO_CCSR + i*0x20, 0); + out_be32(priv->regs_win + RIO_CCSR, 0); /* Set 1x lane */ - setbits32(priv->regs_win - + RIO_CCSR + i*0x20, 0x02000000); + setbits32(priv->regs_win + RIO_CCSR, 0x02000000); /* Enable ports */ - setbits32(priv->regs_win - + RIO_CCSR + i*0x20, 0x00600000); - msleep(100); - if (in_be32((priv->regs_win - + RIO_ESCSR + i*0x20)) & 1) { - dev_err(&dev->dev, - "Port %d restart failed.\n", i); - release_resource(&port->iores); - kfree(priv); - kfree(port); - continue; - } - dev_info(&dev->dev, "Port %d restart success!\n", i); + setbits32(priv->regs_win + RIO_CCSR, 0x00600000); + break; + case RIO_PHY_PARALLEL: + /* Disable ports */ + out_be32(priv->regs_win + RIO_CCSR, 0x22000000); + /* Enable ports */ + out_be32(priv->regs_win + RIO_CCSR, 0x44000000); + break; } - fsl_rio_info(&dev->dev, ccsr); - - port->sys_size = (in_be32((priv->regs_win + RIO_PEF_CAR)) - & RIO_PEF_CTLS) >> 4; - dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n", - port->sys_size ? 65536 : 256); - - if (rio_register_mport(port)) { - release_resource(&port->iores); - kfree(priv); - kfree(port); - continue; + msleep(100); + if (in_be32((priv->regs_win + RIO_ESCSR)) & 1) { + dev_err(&dev->dev, "Port restart failed.\n"); + rc = -ENOLINK; + goto err; } - if (port->host_deviceid >= 0) - out_be32(priv->regs_win + RIO_GCCSR, RIO_PORT_GEN_HOST | - RIO_PORT_GEN_MASTER | RIO_PORT_GEN_DISCOVERED); - else - out_be32(priv->regs_win + RIO_GCCSR, - RIO_PORT_GEN_MASTER); - - priv->atmu_regs = (struct rio_atmu_regs *)(priv->regs_win - + ((i == 0) ? RIO_ATMU_REGS_PORT1_OFFSET : - RIO_ATMU_REGS_PORT2_OFFSET)); - - priv->maint_atmu_regs = priv->atmu_regs + 1; - - /* Set to receive any dist ID for serial RapidIO controller. */ - if (port->phy_type == RIO_PHY_SERIAL) - out_be32((priv->regs_win - + RIO_ISR_AACR + i*0x80), RIO_ISR_AACR_AA); - - /* Configure maintenance transaction window */ - out_be32(&priv->maint_atmu_regs->rowbar, - port->iores.start >> 12); - out_be32(&priv->maint_atmu_regs->rowar, - 0x80077000 | (ilog2(RIO_MAINT_WIN_SIZE) - 1)); - - priv->maint_win = ioremap(port->iores.start, - RIO_MAINT_WIN_SIZE); - - rio_law_start = range_start; - - fsl_rio_setup_rmu(port, rmu_np[i]); - - dbell->mport[i] = port; - - active_ports++; + dev_info(&dev->dev, "Port restart success!\n"); } + fsl_rio_info(&dev->dev, ccsr); - if (!active_ports) { - rc = -ENOLINK; + port->sys_size = (in_be32((priv->regs_win + RIO_PEF_CAR)) + & RIO_PEF_CTLS) >> 4; + dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n", + port->sys_size ? 65536 : 256); + + if (rio_register_mport(port)) goto err; - } - fsl_rio_doorbell_init(dbell); - fsl_rio_port_write_init(pw); + if (port->host_deviceid >= 0) + out_be32(priv->regs_win + RIO_GCCSR, RIO_PORT_GEN_HOST | + RIO_PORT_GEN_MASTER | RIO_PORT_GEN_DISCOVERED); + else + out_be32(priv->regs_win + RIO_GCCSR, 0x00000000); + + priv->atmu_regs = (struct rio_atmu_regs *)(priv->regs_win + + RIO_ATMU_REGS_OFFSET); + priv->maint_atmu_regs = priv->atmu_regs + 1; + priv->dbell_atmu_regs = priv->atmu_regs + 2; + priv->msg_regs = (struct rio_msg_regs *)(priv->regs_win + + ((port->phy_type == RIO_PHY_SERIAL) ? + RIO_S_MSG_REGS_OFFSET : RIO_P_MSG_REGS_OFFSET)); + + /* Set to receive any dist ID for serial RapidIO controller. */ + if (port->phy_type == RIO_PHY_SERIAL) + out_be32((priv->regs_win + RIO_ISR_AACR), RIO_ISR_AACR_AA); + + /* Configure maintenance transaction window */ + out_be32(&priv->maint_atmu_regs->rowbar, law_start >> 12); + out_be32(&priv->maint_atmu_regs->rowar, + 0x80077000 | (ilog2(RIO_MAINT_WIN_SIZE) - 1)); + + priv->maint_win = ioremap(law_start, RIO_MAINT_WIN_SIZE); + + /* Configure outbound doorbell window */ + out_be32(&priv->dbell_atmu_regs->rowbar, + (law_start + RIO_MAINT_WIN_SIZE) >> 12); + out_be32(&priv->dbell_atmu_regs->rowar, 0x8004200b); /* 4k */ + fsl_rio_doorbell_init(port); + fsl_rio_port_write_init(port); return 0; err: - kfree(pw); -err_pw: - kfree(dbell); -err_dbell: - iounmap(rmu_regs_win); -err_rmu: + iounmap(priv->regs_win); + release_resource(&port->iores); +err_res: + kfree(priv); +err_priv: + kfree(port); +err_port: kfree(ops); err_ops: - iounmap(rio_regs_win); -err_rio_regs: return rc; } @@ -654,7 +1631,7 @@ static int __devinit fsl_of_rio_rpn_probe(struct platform_device *dev) static const struct of_device_id fsl_of_rio_rpn_ids[] = { { - .compatible = "fsl,srio", + .compatible = "fsl,rapidio-delta", }, {}, }; diff --git a/trunk/arch/powerpc/sysdev/fsl_rio.h b/trunk/arch/powerpc/sysdev/fsl_rio.h deleted file mode 100644 index ae8e27405a0d..000000000000 --- a/trunk/arch/powerpc/sysdev/fsl_rio.h +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Freescale MPC85xx/MPC86xx RapidIO support - * - * Copyright 2009 Sysgo AG - * Thomas Moll - * - fixed maintenance access routines, check for aligned access - * - * Copyright 2009 Integrated Device Technology, Inc. - * Alex Bounine - * - Added Port-Write message handling - * - Added Machine Check exception handling - * - * Copyright (C) 2007, 2008, 2010, 2011 Freescale Semiconductor, Inc. - * Zhang Wei - * Lian Minghuan-B31939 - * Liu Gang - * - * Copyright 2005 MontaVista Software, Inc. - * Matt Porter - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#ifndef __FSL_RIO_H -#define __FSL_RIO_H - -#include -#include -#include - -#define RIO_REGS_WIN(mport) (((struct rio_priv *)(mport->priv))->regs_win) - -#define RIO_MAINT_WIN_SIZE 0x400000 -#define RIO_LTLEDCSR 0x0608 - -#define DOORBELL_ROWAR_EN 0x80000000 -#define DOORBELL_ROWAR_TFLOWLV 0x08000000 /* highest priority level */ -#define DOORBELL_ROWAR_PCI 0x02000000 /* PCI window */ -#define DOORBELL_ROWAR_NREAD 0x00040000 /* NREAD */ -#define DOORBELL_ROWAR_MAINTRD 0x00070000 /* maintenance read */ -#define DOORBELL_ROWAR_RES 0x00002000 /* wrtpy: reserverd */ -#define DOORBELL_ROWAR_MAINTWD 0x00007000 -#define DOORBELL_ROWAR_SIZE 0x0000000b /* window size is 4k */ - -#define RIO_ATMU_REGS_PORT1_OFFSET 0x10c00 -#define RIO_ATMU_REGS_PORT2_OFFSET 0x10e00 -#define RIO_S_DBELL_REGS_OFFSET 0x13400 -#define RIO_S_PW_REGS_OFFSET 0x134e0 -#define RIO_ATMU_REGS_DBELL_OFFSET 0x10C40 - -#define MAX_MSG_UNIT_NUM 2 -#define MAX_PORT_NUM 4 - -struct rio_atmu_regs { - u32 rowtar; - u32 rowtear; - u32 rowbar; - u32 pad1; - u32 rowar; - u32 pad2[3]; -}; - -struct rio_dbell_ring { - void *virt; - dma_addr_t phys; -}; - -struct rio_port_write_msg { - void *virt; - dma_addr_t phys; - u32 msg_count; - u32 err_count; - u32 discard_count; -}; - -struct fsl_rio_dbell { - struct rio_mport *mport[MAX_PORT_NUM]; - struct device *dev; - struct rio_dbell_regs __iomem *dbell_regs; - struct rio_dbell_ring dbell_ring; - int bellirq; -}; - -struct fsl_rio_pw { - struct device *dev; - struct rio_pw_regs __iomem *pw_regs; - struct rio_port_write_msg port_write_msg; - int pwirq; - struct work_struct pw_work; - struct kfifo pw_fifo; - spinlock_t pw_fifo_lock; -}; - -struct rio_priv { - struct device *dev; - void __iomem *regs_win; - struct rio_atmu_regs __iomem *atmu_regs; - struct rio_atmu_regs __iomem *maint_atmu_regs; - void __iomem *maint_win; - void *rmm_handle; /* RapidIO message manager(unit) Handle */ -}; - -extern void __iomem *rio_regs_win; -extern void __iomem *rmu_regs_win; - -extern resource_size_t rio_law_start; - -extern struct fsl_rio_dbell *dbell; -extern struct fsl_rio_pw *pw; - -extern int fsl_rio_setup_rmu(struct rio_mport *mport, - struct device_node *node); -extern int fsl_rio_port_write_init(struct fsl_rio_pw *pw); -extern int fsl_rio_pw_enable(struct rio_mport *mport, int enable); -extern void fsl_rio_port_error_handler(int offset); -extern int fsl_rio_doorbell_init(struct fsl_rio_dbell *dbell); - -extern int fsl_rio_doorbell_send(struct rio_mport *mport, - int index, u16 destid, u16 data); -extern int fsl_add_outb_message(struct rio_mport *mport, - struct rio_dev *rdev, - int mbox, void *buffer, size_t len); -extern int fsl_open_outb_mbox(struct rio_mport *mport, - void *dev_id, int mbox, int entries); -extern void fsl_close_outb_mbox(struct rio_mport *mport, int mbox); -extern int fsl_open_inb_mbox(struct rio_mport *mport, - void *dev_id, int mbox, int entries); -extern void fsl_close_inb_mbox(struct rio_mport *mport, int mbox); -extern int fsl_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf); -extern void *fsl_get_inb_message(struct rio_mport *mport, int mbox); - -#endif diff --git a/trunk/arch/powerpc/sysdev/fsl_rmu.c b/trunk/arch/powerpc/sysdev/fsl_rmu.c deleted file mode 100644 index 15485789e9db..000000000000 --- a/trunk/arch/powerpc/sysdev/fsl_rmu.c +++ /dev/null @@ -1,1104 +0,0 @@ -/* - * Freescale MPC85xx/MPC86xx RapidIO RMU support - * - * Copyright 2009 Sysgo AG - * Thomas Moll - * - fixed maintenance access routines, check for aligned access - * - * Copyright 2009 Integrated Device Technology, Inc. - * Alex Bounine - * - Added Port-Write message handling - * - Added Machine Check exception handling - * - * Copyright (C) 2007, 2008, 2010, 2011 Freescale Semiconductor, Inc. - * Zhang Wei - * Lian Minghuan-B31939 - * Liu Gang - * - * Copyright 2005 MontaVista Software, Inc. - * Matt Porter - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include -#include -#include -#include -#include - -#include "fsl_rio.h" - -#define GET_RMM_HANDLE(mport) \ - (((struct rio_priv *)(mport->priv))->rmm_handle) - -/* RapidIO definition irq, which read from OF-tree */ -#define IRQ_RIO_PW(m) (((struct fsl_rio_pw *)(m))->pwirq) -#define IRQ_RIO_BELL(m) (((struct fsl_rio_dbell *)(m))->bellirq) -#define IRQ_RIO_TX(m) (((struct fsl_rmu *)(GET_RMM_HANDLE(m)))->txirq) -#define IRQ_RIO_RX(m) (((struct fsl_rmu *)(GET_RMM_HANDLE(m)))->rxirq) - -#define RIO_MIN_TX_RING_SIZE 2 -#define RIO_MAX_TX_RING_SIZE 2048 -#define RIO_MIN_RX_RING_SIZE 2 -#define RIO_MAX_RX_RING_SIZE 2048 - -#define RIO_IPWMR_SEN 0x00100000 -#define RIO_IPWMR_QFIE 0x00000100 -#define RIO_IPWMR_EIE 0x00000020 -#define RIO_IPWMR_CQ 0x00000002 -#define RIO_IPWMR_PWE 0x00000001 - -#define RIO_IPWSR_QF 0x00100000 -#define RIO_IPWSR_TE 0x00000080 -#define RIO_IPWSR_QFI 0x00000010 -#define RIO_IPWSR_PWD 0x00000008 -#define RIO_IPWSR_PWB 0x00000004 - -#define RIO_EPWISR 0x10010 -/* EPWISR Error match value */ -#define RIO_EPWISR_PINT1 0x80000000 -#define RIO_EPWISR_PINT2 0x40000000 -#define RIO_EPWISR_MU 0x00000002 -#define RIO_EPWISR_PW 0x00000001 - -#define IPWSR_CLEAR 0x98 -#define OMSR_CLEAR 0x1cb3 -#define IMSR_CLEAR 0x491 -#define IDSR_CLEAR 0x91 -#define ODSR_CLEAR 0x1c00 -#define LTLEECSR_ENABLE_ALL 0xFFC000FC -#define RIO_LTLEECSR 0x060c - -#define RIO_IM0SR 0x64 -#define RIO_IM1SR 0x164 -#define RIO_OM0SR 0x4 -#define RIO_OM1SR 0x104 - -#define RIO_DBELL_WIN_SIZE 0x1000 - -#define RIO_MSG_OMR_MUI 0x00000002 -#define RIO_MSG_OSR_TE 0x00000080 -#define RIO_MSG_OSR_QOI 0x00000020 -#define RIO_MSG_OSR_QFI 0x00000010 -#define RIO_MSG_OSR_MUB 0x00000004 -#define RIO_MSG_OSR_EOMI 0x00000002 -#define RIO_MSG_OSR_QEI 0x00000001 - -#define RIO_MSG_IMR_MI 0x00000002 -#define RIO_MSG_ISR_TE 0x00000080 -#define RIO_MSG_ISR_QFI 0x00000010 -#define RIO_MSG_ISR_DIQI 0x00000001 - -#define RIO_MSG_DESC_SIZE 32 -#define RIO_MSG_BUFFER_SIZE 4096 - -#define DOORBELL_DMR_DI 0x00000002 -#define DOORBELL_DSR_TE 0x00000080 -#define DOORBELL_DSR_QFI 0x00000010 -#define DOORBELL_DSR_DIQI 0x00000001 -#define DOORBELL_TID_OFFSET 0x02 -#define DOORBELL_SID_OFFSET 0x04 -#define DOORBELL_INFO_OFFSET 0x06 - -#define DOORBELL_MESSAGE_SIZE 0x08 -#define DBELL_SID(x) (*(u16 *)(x + DOORBELL_SID_OFFSET)) -#define DBELL_TID(x) (*(u16 *)(x + DOORBELL_TID_OFFSET)) -#define DBELL_INF(x) (*(u16 *)(x + DOORBELL_INFO_OFFSET)) - -struct rio_msg_regs { - u32 omr; - u32 osr; - u32 pad1; - u32 odqdpar; - u32 pad2; - u32 osar; - u32 odpr; - u32 odatr; - u32 odcr; - u32 pad3; - u32 odqepar; - u32 pad4[13]; - u32 imr; - u32 isr; - u32 pad5; - u32 ifqdpar; - u32 pad6; - u32 ifqepar; -}; - -struct rio_dbell_regs { - u32 odmr; - u32 odsr; - u32 pad1[4]; - u32 oddpr; - u32 oddatr; - u32 pad2[3]; - u32 odretcr; - u32 pad3[12]; - u32 dmr; - u32 dsr; - u32 pad4; - u32 dqdpar; - u32 pad5; - u32 dqepar; -}; - -struct rio_pw_regs { - u32 pwmr; - u32 pwsr; - u32 epwqbar; - u32 pwqbar; -}; - - -struct rio_tx_desc { - u32 pad1; - u32 saddr; - u32 dport; - u32 dattr; - u32 pad2; - u32 pad3; - u32 dwcnt; - u32 pad4; -}; - -struct rio_msg_tx_ring { - void *virt; - dma_addr_t phys; - void *virt_buffer[RIO_MAX_TX_RING_SIZE]; - dma_addr_t phys_buffer[RIO_MAX_TX_RING_SIZE]; - int tx_slot; - int size; - void *dev_id; -}; - -struct rio_msg_rx_ring { - void *virt; - dma_addr_t phys; - void *virt_buffer[RIO_MAX_RX_RING_SIZE]; - int rx_slot; - int size; - void *dev_id; -}; - -struct fsl_rmu { - struct rio_msg_regs __iomem *msg_regs; - struct rio_msg_tx_ring msg_tx_ring; - struct rio_msg_rx_ring msg_rx_ring; - int txirq; - int rxirq; -}; - -/** - * fsl_rio_tx_handler - MPC85xx outbound message interrupt handler - * @irq: Linux interrupt number - * @dev_instance: Pointer to interrupt-specific data - * - * Handles outbound message interrupts. Executes a register outbound - * mailbox event handler and acks the interrupt occurrence. - */ -static irqreturn_t -fsl_rio_tx_handler(int irq, void *dev_instance) -{ - int osr; - struct rio_mport *port = (struct rio_mport *)dev_instance; - struct fsl_rmu *rmu = GET_RMM_HANDLE(port); - - osr = in_be32(&rmu->msg_regs->osr); - - if (osr & RIO_MSG_OSR_TE) { - pr_info("RIO: outbound message transmission error\n"); - out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_TE); - goto out; - } - - if (osr & RIO_MSG_OSR_QOI) { - pr_info("RIO: outbound message queue overflow\n"); - out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_QOI); - goto out; - } - - if (osr & RIO_MSG_OSR_EOMI) { - u32 dqp = in_be32(&rmu->msg_regs->odqdpar); - int slot = (dqp - rmu->msg_tx_ring.phys) >> 5; - if (port->outb_msg[0].mcback != NULL) { - port->outb_msg[0].mcback(port, rmu->msg_tx_ring.dev_id, - -1, - slot); - } - /* Ack the end-of-message interrupt */ - out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_EOMI); - } - -out: - return IRQ_HANDLED; -} - -/** - * fsl_rio_rx_handler - MPC85xx inbound message interrupt handler - * @irq: Linux interrupt number - * @dev_instance: Pointer to interrupt-specific data - * - * Handles inbound message interrupts. Executes a registered inbound - * mailbox event handler and acks the interrupt occurrence. - */ -static irqreturn_t -fsl_rio_rx_handler(int irq, void *dev_instance) -{ - int isr; - struct rio_mport *port = (struct rio_mport *)dev_instance; - struct fsl_rmu *rmu = GET_RMM_HANDLE(port); - - isr = in_be32(&rmu->msg_regs->isr); - - if (isr & RIO_MSG_ISR_TE) { - pr_info("RIO: inbound message reception error\n"); - out_be32((void *)&rmu->msg_regs->isr, RIO_MSG_ISR_TE); - goto out; - } - - /* XXX Need to check/dispatch until queue empty */ - if (isr & RIO_MSG_ISR_DIQI) { - /* - * Can receive messages for any mailbox/letter to that - * mailbox destination. So, make the callback with an - * unknown/invalid mailbox number argument. - */ - if (port->inb_msg[0].mcback != NULL) - port->inb_msg[0].mcback(port, rmu->msg_rx_ring.dev_id, - -1, - -1); - - /* Ack the queueing interrupt */ - out_be32(&rmu->msg_regs->isr, RIO_MSG_ISR_DIQI); - } - -out: - return IRQ_HANDLED; -} - -/** - * fsl_rio_dbell_handler - MPC85xx doorbell interrupt handler - * @irq: Linux interrupt number - * @dev_instance: Pointer to interrupt-specific data - * - * Handles doorbell interrupts. Parses a list of registered - * doorbell event handlers and executes a matching event handler. - */ -static irqreturn_t -fsl_rio_dbell_handler(int irq, void *dev_instance) -{ - int dsr; - struct fsl_rio_dbell *fsl_dbell = (struct fsl_rio_dbell *)dev_instance; - int i; - - dsr = in_be32(&fsl_dbell->dbell_regs->dsr); - - if (dsr & DOORBELL_DSR_TE) { - pr_info("RIO: doorbell reception error\n"); - out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_TE); - goto out; - } - - if (dsr & DOORBELL_DSR_QFI) { - pr_info("RIO: doorbell queue full\n"); - out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_QFI); - } - - /* XXX Need to check/dispatch until queue empty */ - if (dsr & DOORBELL_DSR_DIQI) { - u32 dmsg = - (u32) fsl_dbell->dbell_ring.virt + - (in_be32(&fsl_dbell->dbell_regs->dqdpar) & 0xfff); - struct rio_dbell *dbell; - int found = 0; - - pr_debug - ("RIO: processing doorbell," - " sid %2.2x tid %2.2x info %4.4x\n", - DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg)); - - for (i = 0; i < MAX_PORT_NUM; i++) { - if (fsl_dbell->mport[i]) { - list_for_each_entry(dbell, - &fsl_dbell->mport[i]->dbells, node) { - if ((dbell->res->start - <= DBELL_INF(dmsg)) - && (dbell->res->end - >= DBELL_INF(dmsg))) { - found = 1; - break; - } - } - if (found && dbell->dinb) { - dbell->dinb(fsl_dbell->mport[i], - dbell->dev_id, DBELL_SID(dmsg), - DBELL_TID(dmsg), - DBELL_INF(dmsg)); - break; - } - } - } - - if (!found) { - pr_debug - ("RIO: spurious doorbell," - " sid %2.2x tid %2.2x info %4.4x\n", - DBELL_SID(dmsg), DBELL_TID(dmsg), - DBELL_INF(dmsg)); - } - setbits32(&fsl_dbell->dbell_regs->dmr, DOORBELL_DMR_DI); - out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_DIQI); - } - -out: - return IRQ_HANDLED; -} - -void msg_unit_error_handler(void) -{ - - /*XXX: Error recovery is not implemented, we just clear errors */ - out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0); - - out_be32((u32 *)(rmu_regs_win + RIO_IM0SR), IMSR_CLEAR); - out_be32((u32 *)(rmu_regs_win + RIO_IM1SR), IMSR_CLEAR); - out_be32((u32 *)(rmu_regs_win + RIO_OM0SR), OMSR_CLEAR); - out_be32((u32 *)(rmu_regs_win + RIO_OM1SR), OMSR_CLEAR); - - out_be32(&dbell->dbell_regs->odsr, ODSR_CLEAR); - out_be32(&dbell->dbell_regs->dsr, IDSR_CLEAR); - - out_be32(&pw->pw_regs->pwsr, IPWSR_CLEAR); -} - -/** - * fsl_rio_port_write_handler - MPC85xx port write interrupt handler - * @irq: Linux interrupt number - * @dev_instance: Pointer to interrupt-specific data - * - * Handles port write interrupts. Parses a list of registered - * port write event handlers and executes a matching event handler. - */ -static irqreturn_t -fsl_rio_port_write_handler(int irq, void *dev_instance) -{ - u32 ipwmr, ipwsr; - struct fsl_rio_pw *pw = (struct fsl_rio_pw *)dev_instance; - u32 epwisr, tmp; - - epwisr = in_be32(rio_regs_win + RIO_EPWISR); - if (!(epwisr & RIO_EPWISR_PW)) - goto pw_done; - - ipwmr = in_be32(&pw->pw_regs->pwmr); - ipwsr = in_be32(&pw->pw_regs->pwsr); - -#ifdef DEBUG_PW - pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr); - if (ipwsr & RIO_IPWSR_QF) - pr_debug(" QF"); - if (ipwsr & RIO_IPWSR_TE) - pr_debug(" TE"); - if (ipwsr & RIO_IPWSR_QFI) - pr_debug(" QFI"); - if (ipwsr & RIO_IPWSR_PWD) - pr_debug(" PWD"); - if (ipwsr & RIO_IPWSR_PWB) - pr_debug(" PWB"); - pr_debug(" )\n"); -#endif - /* Schedule deferred processing if PW was received */ - if (ipwsr & RIO_IPWSR_QFI) { - /* Save PW message (if there is room in FIFO), - * otherwise discard it. - */ - if (kfifo_avail(&pw->pw_fifo) >= RIO_PW_MSG_SIZE) { - pw->port_write_msg.msg_count++; - kfifo_in(&pw->pw_fifo, pw->port_write_msg.virt, - RIO_PW_MSG_SIZE); - } else { - pw->port_write_msg.discard_count++; - pr_debug("RIO: ISR Discarded Port-Write Msg(s) (%d)\n", - pw->port_write_msg.discard_count); - } - /* Clear interrupt and issue Clear Queue command. This allows - * another port-write to be received. - */ - out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_QFI); - out_be32(&pw->pw_regs->pwmr, ipwmr | RIO_IPWMR_CQ); - - schedule_work(&pw->pw_work); - } - - if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) { - pw->port_write_msg.err_count++; - pr_debug("RIO: Port-Write Transaction Err (%d)\n", - pw->port_write_msg.err_count); - /* Clear Transaction Error: port-write controller should be - * disabled when clearing this error - */ - out_be32(&pw->pw_regs->pwmr, ipwmr & ~RIO_IPWMR_PWE); - out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_TE); - out_be32(&pw->pw_regs->pwmr, ipwmr); - } - - if (ipwsr & RIO_IPWSR_PWD) { - pw->port_write_msg.discard_count++; - pr_debug("RIO: Port Discarded Port-Write Msg(s) (%d)\n", - pw->port_write_msg.discard_count); - out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_PWD); - } - -pw_done: - if (epwisr & RIO_EPWISR_PINT1) { - tmp = in_be32(rio_regs_win + RIO_LTLEDCSR); - pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp); - fsl_rio_port_error_handler(0); - } - - if (epwisr & RIO_EPWISR_PINT2) { - tmp = in_be32(rio_regs_win + RIO_LTLEDCSR); - pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp); - fsl_rio_port_error_handler(1); - } - - if (epwisr & RIO_EPWISR_MU) { - tmp = in_be32(rio_regs_win + RIO_LTLEDCSR); - pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp); - msg_unit_error_handler(); - } - - return IRQ_HANDLED; -} - -static void fsl_pw_dpc(struct work_struct *work) -{ - struct fsl_rio_pw *pw = container_of(work, struct fsl_rio_pw, pw_work); - u32 msg_buffer[RIO_PW_MSG_SIZE/sizeof(u32)]; - - /* - * Process port-write messages - */ - while (kfifo_out_spinlocked(&pw->pw_fifo, (unsigned char *)msg_buffer, - RIO_PW_MSG_SIZE, &pw->pw_fifo_lock)) { - /* Process one message */ -#ifdef DEBUG_PW - { - u32 i; - pr_debug("%s : Port-Write Message:", __func__); - for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); i++) { - if ((i%4) == 0) - pr_debug("\n0x%02x: 0x%08x", i*4, - msg_buffer[i]); - else - pr_debug(" 0x%08x", msg_buffer[i]); - } - pr_debug("\n"); - } -#endif - /* Pass the port-write message to RIO core for processing */ - rio_inb_pwrite_handler((union rio_pw_msg *)msg_buffer); - } -} - -/** - * fsl_rio_pw_enable - enable/disable port-write interface init - * @mport: Master port implementing the port write unit - * @enable: 1=enable; 0=disable port-write message handling - */ -int fsl_rio_pw_enable(struct rio_mport *mport, int enable) -{ - u32 rval; - - rval = in_be32(&pw->pw_regs->pwmr); - - if (enable) - rval |= RIO_IPWMR_PWE; - else - rval &= ~RIO_IPWMR_PWE; - - out_be32(&pw->pw_regs->pwmr, rval); - - return 0; -} - -/** - * fsl_rio_port_write_init - MPC85xx port write interface init - * @mport: Master port implementing the port write unit - * - * Initializes port write unit hardware and DMA buffer - * ring. Called from fsl_rio_setup(). Returns %0 on success - * or %-ENOMEM on failure. - */ - -int fsl_rio_port_write_init(struct fsl_rio_pw *pw) -{ - int rc = 0; - - /* Following configurations require a disabled port write controller */ - out_be32(&pw->pw_regs->pwmr, - in_be32(&pw->pw_regs->pwmr) & ~RIO_IPWMR_PWE); - - /* Initialize port write */ - pw->port_write_msg.virt = dma_alloc_coherent(pw->dev, - RIO_PW_MSG_SIZE, - &pw->port_write_msg.phys, GFP_KERNEL); - if (!pw->port_write_msg.virt) { - pr_err("RIO: unable allocate port write queue\n"); - return -ENOMEM; - } - - pw->port_write_msg.err_count = 0; - pw->port_write_msg.discard_count = 0; - - /* Point dequeue/enqueue pointers at first entry */ - out_be32(&pw->pw_regs->epwqbar, 0); - out_be32(&pw->pw_regs->pwqbar, (u32) pw->port_write_msg.phys); - - pr_debug("EIPWQBAR: 0x%08x IPWQBAR: 0x%08x\n", - in_be32(&pw->pw_regs->epwqbar), - in_be32(&pw->pw_regs->pwqbar)); - - /* Clear interrupt status IPWSR */ - out_be32(&pw->pw_regs->pwsr, - (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD)); - - /* Configure port write contoller for snooping enable all reporting, - clear queue full */ - out_be32(&pw->pw_regs->pwmr, - RIO_IPWMR_SEN | RIO_IPWMR_QFIE | RIO_IPWMR_EIE | RIO_IPWMR_CQ); - - - /* Hook up port-write handler */ - rc = request_irq(IRQ_RIO_PW(pw), fsl_rio_port_write_handler, - IRQF_SHARED, "port-write", (void *)pw); - if (rc < 0) { - pr_err("MPC85xx RIO: unable to request inbound doorbell irq"); - goto err_out; - } - /* Enable Error Interrupt */ - out_be32((u32 *)(rio_regs_win + RIO_LTLEECSR), LTLEECSR_ENABLE_ALL); - - INIT_WORK(&pw->pw_work, fsl_pw_dpc); - spin_lock_init(&pw->pw_fifo_lock); - if (kfifo_alloc(&pw->pw_fifo, RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) { - pr_err("FIFO allocation failed\n"); - rc = -ENOMEM; - goto err_out_irq; - } - - pr_debug("IPWMR: 0x%08x IPWSR: 0x%08x\n", - in_be32(&pw->pw_regs->pwmr), - in_be32(&pw->pw_regs->pwsr)); - - return rc; - -err_out_irq: - free_irq(IRQ_RIO_PW(pw), (void *)pw); -err_out: - dma_free_coherent(pw->dev, RIO_PW_MSG_SIZE, - pw->port_write_msg.virt, - pw->port_write_msg.phys); - return rc; -} - -/** - * fsl_rio_doorbell_send - Send a MPC85xx doorbell message - * @mport: RapidIO master port info - * @index: ID of RapidIO interface - * @destid: Destination ID of target device - * @data: 16-bit info field of RapidIO doorbell message - * - * Sends a MPC85xx doorbell message. Returns %0 on success or - * %-EINVAL on failure. - */ -int fsl_rio_doorbell_send(struct rio_mport *mport, - int index, u16 destid, u16 data) -{ - pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n", - index, destid, data); - - /* In the serial version silicons, such as MPC8548, MPC8641, - * below operations is must be. - */ - out_be32(&dbell->dbell_regs->odmr, 0x00000000); - out_be32(&dbell->dbell_regs->odretcr, 0x00000004); - out_be32(&dbell->dbell_regs->oddpr, destid << 16); - out_be32(&dbell->dbell_regs->oddatr, (index << 20) | data); - out_be32(&dbell->dbell_regs->odmr, 0x00000001); - - return 0; -} - -/** - * fsl_add_outb_message - Add message to the MPC85xx outbound message queue - * @mport: Master port with outbound message queue - * @rdev: Target of outbound message - * @mbox: Outbound mailbox - * @buffer: Message to add to outbound queue - * @len: Length of message - * - * Adds the @buffer message to the MPC85xx outbound message queue. Returns - * %0 on success or %-EINVAL on failure. - */ -int -fsl_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox, - void *buffer, size_t len) -{ - struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); - u32 omr; - struct rio_tx_desc *desc = (struct rio_tx_desc *)rmu->msg_tx_ring.virt - + rmu->msg_tx_ring.tx_slot; - int ret = 0; - - pr_debug("RIO: fsl_add_outb_message(): destid %4.4x mbox %d buffer " \ - "%8.8x len %8.8x\n", rdev->destid, mbox, (int)buffer, len); - if ((len < 8) || (len > RIO_MAX_MSG_SIZE)) { - ret = -EINVAL; - goto out; - } - - /* Copy and clear rest of buffer */ - memcpy(rmu->msg_tx_ring.virt_buffer[rmu->msg_tx_ring.tx_slot], buffer, - len); - if (len < (RIO_MAX_MSG_SIZE - 4)) - memset(rmu->msg_tx_ring.virt_buffer[rmu->msg_tx_ring.tx_slot] - + len, 0, RIO_MAX_MSG_SIZE - len); - - /* Set mbox field for message, and set destid */ - desc->dport = (rdev->destid << 16) | (mbox & 0x3); - - /* Enable EOMI interrupt and priority */ - desc->dattr = 0x28000000 | ((mport->index) << 20); - - /* Set transfer size aligned to next power of 2 (in double words) */ - desc->dwcnt = is_power_of_2(len) ? len : 1 << get_bitmask_order(len); - - /* Set snooping and source buffer address */ - desc->saddr = 0x00000004 - | rmu->msg_tx_ring.phys_buffer[rmu->msg_tx_ring.tx_slot]; - - /* Increment enqueue pointer */ - omr = in_be32(&rmu->msg_regs->omr); - out_be32(&rmu->msg_regs->omr, omr | RIO_MSG_OMR_MUI); - - /* Go to next descriptor */ - if (++rmu->msg_tx_ring.tx_slot == rmu->msg_tx_ring.size) - rmu->msg_tx_ring.tx_slot = 0; - -out: - return ret; -} - -/** - * fsl_open_outb_mbox - Initialize MPC85xx outbound mailbox - * @mport: Master port implementing the outbound message unit - * @dev_id: Device specific pointer to pass on event - * @mbox: Mailbox to open - * @entries: Number of entries in the outbound mailbox ring - * - * Initializes buffer ring, request the outbound message interrupt, - * and enables the outbound message unit. Returns %0 on success and - * %-EINVAL or %-ENOMEM on failure. - */ -int -fsl_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) -{ - int i, j, rc = 0; - struct rio_priv *priv = mport->priv; - struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); - - if ((entries < RIO_MIN_TX_RING_SIZE) || - (entries > RIO_MAX_TX_RING_SIZE) || (!is_power_of_2(entries))) { - rc = -EINVAL; - goto out; - } - - /* Initialize shadow copy ring */ - rmu->msg_tx_ring.dev_id = dev_id; - rmu->msg_tx_ring.size = entries; - - for (i = 0; i < rmu->msg_tx_ring.size; i++) { - rmu->msg_tx_ring.virt_buffer[i] = - dma_alloc_coherent(priv->dev, RIO_MSG_BUFFER_SIZE, - &rmu->msg_tx_ring.phys_buffer[i], GFP_KERNEL); - if (!rmu->msg_tx_ring.virt_buffer[i]) { - rc = -ENOMEM; - for (j = 0; j < rmu->msg_tx_ring.size; j++) - if (rmu->msg_tx_ring.virt_buffer[j]) - dma_free_coherent(priv->dev, - RIO_MSG_BUFFER_SIZE, - rmu->msg_tx_ring. - virt_buffer[j], - rmu->msg_tx_ring. - phys_buffer[j]); - goto out; - } - } - - /* Initialize outbound message descriptor ring */ - rmu->msg_tx_ring.virt = dma_alloc_coherent(priv->dev, - rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE, - &rmu->msg_tx_ring.phys, GFP_KERNEL); - if (!rmu->msg_tx_ring.virt) { - rc = -ENOMEM; - goto out_dma; - } - memset(rmu->msg_tx_ring.virt, 0, - rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE); - rmu->msg_tx_ring.tx_slot = 0; - - /* Point dequeue/enqueue pointers at first entry in ring */ - out_be32(&rmu->msg_regs->odqdpar, rmu->msg_tx_ring.phys); - out_be32(&rmu->msg_regs->odqepar, rmu->msg_tx_ring.phys); - - /* Configure for snooping */ - out_be32(&rmu->msg_regs->osar, 0x00000004); - - /* Clear interrupt status */ - out_be32(&rmu->msg_regs->osr, 0x000000b3); - - /* Hook up outbound message handler */ - rc = request_irq(IRQ_RIO_TX(mport), fsl_rio_tx_handler, 0, - "msg_tx", (void *)mport); - if (rc < 0) - goto out_irq; - - /* - * Configure outbound message unit - * Snooping - * Interrupts (all enabled, except QEIE) - * Chaining mode - * Disable - */ - out_be32(&rmu->msg_regs->omr, 0x00100220); - - /* Set number of entries */ - out_be32(&rmu->msg_regs->omr, - in_be32(&rmu->msg_regs->omr) | - ((get_bitmask_order(entries) - 2) << 12)); - - /* Now enable the unit */ - out_be32(&rmu->msg_regs->omr, in_be32(&rmu->msg_regs->omr) | 0x1); - -out: - return rc; - -out_irq: - dma_free_coherent(priv->dev, - rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE, - rmu->msg_tx_ring.virt, rmu->msg_tx_ring.phys); - -out_dma: - for (i = 0; i < rmu->msg_tx_ring.size; i++) - dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE, - rmu->msg_tx_ring.virt_buffer[i], - rmu->msg_tx_ring.phys_buffer[i]); - - return rc; -} - -/** - * fsl_close_outb_mbox - Shut down MPC85xx outbound mailbox - * @mport: Master port implementing the outbound message unit - * @mbox: Mailbox to close - * - * Disables the outbound message unit, free all buffers, and - * frees the outbound message interrupt. - */ -void fsl_close_outb_mbox(struct rio_mport *mport, int mbox) -{ - struct rio_priv *priv = mport->priv; - struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); - - /* Disable inbound message unit */ - out_be32(&rmu->msg_regs->omr, 0); - - /* Free ring */ - dma_free_coherent(priv->dev, - rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE, - rmu->msg_tx_ring.virt, rmu->msg_tx_ring.phys); - - /* Free interrupt */ - free_irq(IRQ_RIO_TX(mport), (void *)mport); -} - -/** - * fsl_open_inb_mbox - Initialize MPC85xx inbound mailbox - * @mport: Master port implementing the inbound message unit - * @dev_id: Device specific pointer to pass on event - * @mbox: Mailbox to open - * @entries: Number of entries in the inbound mailbox ring - * - * Initializes buffer ring, request the inbound message interrupt, - * and enables the inbound message unit. Returns %0 on success - * and %-EINVAL or %-ENOMEM on failure. - */ -int -fsl_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) -{ - int i, rc = 0; - struct rio_priv *priv = mport->priv; - struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); - - if ((entries < RIO_MIN_RX_RING_SIZE) || - (entries > RIO_MAX_RX_RING_SIZE) || (!is_power_of_2(entries))) { - rc = -EINVAL; - goto out; - } - - /* Initialize client buffer ring */ - rmu->msg_rx_ring.dev_id = dev_id; - rmu->msg_rx_ring.size = entries; - rmu->msg_rx_ring.rx_slot = 0; - for (i = 0; i < rmu->msg_rx_ring.size; i++) - rmu->msg_rx_ring.virt_buffer[i] = NULL; - - /* Initialize inbound message ring */ - rmu->msg_rx_ring.virt = dma_alloc_coherent(priv->dev, - rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE, - &rmu->msg_rx_ring.phys, GFP_KERNEL); - if (!rmu->msg_rx_ring.virt) { - rc = -ENOMEM; - goto out; - } - - /* Point dequeue/enqueue pointers at first entry in ring */ - out_be32(&rmu->msg_regs->ifqdpar, (u32) rmu->msg_rx_ring.phys); - out_be32(&rmu->msg_regs->ifqepar, (u32) rmu->msg_rx_ring.phys); - - /* Clear interrupt status */ - out_be32(&rmu->msg_regs->isr, 0x00000091); - - /* Hook up inbound message handler */ - rc = request_irq(IRQ_RIO_RX(mport), fsl_rio_rx_handler, 0, - "msg_rx", (void *)mport); - if (rc < 0) { - dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE, - rmu->msg_tx_ring.virt_buffer[i], - rmu->msg_tx_ring.phys_buffer[i]); - goto out; - } - - /* - * Configure inbound message unit: - * Snooping - * 4KB max message size - * Unmask all interrupt sources - * Disable - */ - out_be32(&rmu->msg_regs->imr, 0x001b0060); - - /* Set number of queue entries */ - setbits32(&rmu->msg_regs->imr, (get_bitmask_order(entries) - 2) << 12); - - /* Now enable the unit */ - setbits32(&rmu->msg_regs->imr, 0x1); - -out: - return rc; -} - -/** - * fsl_close_inb_mbox - Shut down MPC85xx inbound mailbox - * @mport: Master port implementing the inbound message unit - * @mbox: Mailbox to close - * - * Disables the inbound message unit, free all buffers, and - * frees the inbound message interrupt. - */ -void fsl_close_inb_mbox(struct rio_mport *mport, int mbox) -{ - struct rio_priv *priv = mport->priv; - struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); - - /* Disable inbound message unit */ - out_be32(&rmu->msg_regs->imr, 0); - - /* Free ring */ - dma_free_coherent(priv->dev, rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE, - rmu->msg_rx_ring.virt, rmu->msg_rx_ring.phys); - - /* Free interrupt */ - free_irq(IRQ_RIO_RX(mport), (void *)mport); -} - -/** - * fsl_add_inb_buffer - Add buffer to the MPC85xx inbound message queue - * @mport: Master port implementing the inbound message unit - * @mbox: Inbound mailbox number - * @buf: Buffer to add to inbound queue - * - * Adds the @buf buffer to the MPC85xx inbound message queue. Returns - * %0 on success or %-EINVAL on failure. - */ -int fsl_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf) -{ - int rc = 0; - struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); - - pr_debug("RIO: fsl_add_inb_buffer(), msg_rx_ring.rx_slot %d\n", - rmu->msg_rx_ring.rx_slot); - - if (rmu->msg_rx_ring.virt_buffer[rmu->msg_rx_ring.rx_slot]) { - printk(KERN_ERR - "RIO: error adding inbound buffer %d, buffer exists\n", - rmu->msg_rx_ring.rx_slot); - rc = -EINVAL; - goto out; - } - - rmu->msg_rx_ring.virt_buffer[rmu->msg_rx_ring.rx_slot] = buf; - if (++rmu->msg_rx_ring.rx_slot == rmu->msg_rx_ring.size) - rmu->msg_rx_ring.rx_slot = 0; - -out: - return rc; -} - -/** - * fsl_get_inb_message - Fetch inbound message from the MPC85xx message unit - * @mport: Master port implementing the inbound message unit - * @mbox: Inbound mailbox number - * - * Gets the next available inbound message from the inbound message queue. - * A pointer to the message is returned on success or NULL on failure. - */ -void *fsl_get_inb_message(struct rio_mport *mport, int mbox) -{ - struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); - u32 phys_buf, virt_buf; - void *buf = NULL; - int buf_idx; - - phys_buf = in_be32(&rmu->msg_regs->ifqdpar); - - /* If no more messages, then bail out */ - if (phys_buf == in_be32(&rmu->msg_regs->ifqepar)) - goto out2; - - virt_buf = (u32) rmu->msg_rx_ring.virt + (phys_buf - - rmu->msg_rx_ring.phys); - buf_idx = (phys_buf - rmu->msg_rx_ring.phys) / RIO_MAX_MSG_SIZE; - buf = rmu->msg_rx_ring.virt_buffer[buf_idx]; - - if (!buf) { - printk(KERN_ERR - "RIO: inbound message copy failed, no buffers\n"); - goto out1; - } - - /* Copy max message size, caller is expected to allocate that big */ - memcpy(buf, (void *)virt_buf, RIO_MAX_MSG_SIZE); - - /* Clear the available buffer */ - rmu->msg_rx_ring.virt_buffer[buf_idx] = NULL; - -out1: - setbits32(&rmu->msg_regs->imr, RIO_MSG_IMR_MI); - -out2: - return buf; -} - -/** - * fsl_rio_doorbell_init - MPC85xx doorbell interface init - * @mport: Master port implementing the inbound doorbell unit - * - * Initializes doorbell unit hardware and inbound DMA buffer - * ring. Called from fsl_rio_setup(). Returns %0 on success - * or %-ENOMEM on failure. - */ -int fsl_rio_doorbell_init(struct fsl_rio_dbell *dbell) -{ - int rc = 0; - - /* Initialize inbound doorbells */ - dbell->dbell_ring.virt = dma_alloc_coherent(dbell->dev, 512 * - DOORBELL_MESSAGE_SIZE, &dbell->dbell_ring.phys, GFP_KERNEL); - if (!dbell->dbell_ring.virt) { - printk(KERN_ERR "RIO: unable allocate inbound doorbell ring\n"); - rc = -ENOMEM; - goto out; - } - - /* Point dequeue/enqueue pointers at first entry in ring */ - out_be32(&dbell->dbell_regs->dqdpar, (u32) dbell->dbell_ring.phys); - out_be32(&dbell->dbell_regs->dqepar, (u32) dbell->dbell_ring.phys); - - /* Clear interrupt status */ - out_be32(&dbell->dbell_regs->dsr, 0x00000091); - - /* Hook up doorbell handler */ - rc = request_irq(IRQ_RIO_BELL(dbell), fsl_rio_dbell_handler, 0, - "dbell_rx", (void *)dbell); - if (rc < 0) { - dma_free_coherent(dbell->dev, 512 * DOORBELL_MESSAGE_SIZE, - dbell->dbell_ring.virt, dbell->dbell_ring.phys); - printk(KERN_ERR - "MPC85xx RIO: unable to request inbound doorbell irq"); - goto out; - } - - /* Configure doorbells for snooping, 512 entries, and enable */ - out_be32(&dbell->dbell_regs->dmr, 0x00108161); - -out: - return rc; -} - -int fsl_rio_setup_rmu(struct rio_mport *mport, struct device_node *node) -{ - struct rio_priv *priv; - struct fsl_rmu *rmu; - u64 msg_start; - const u32 *msg_addr; - int mlen; - int aw; - - if (!mport || !mport->priv) - return -EINVAL; - - priv = mport->priv; - - if (!node) { - dev_warn(priv->dev, "Can't get %s property 'fsl,rmu'\n", - priv->dev->of_node->full_name); - return -EINVAL; - } - - rmu = kzalloc(sizeof(struct fsl_rmu), GFP_KERNEL); - if (!rmu) - return -ENOMEM; - - aw = of_n_addr_cells(node); - msg_addr = of_get_property(node, "reg", &mlen); - if (!msg_addr) { - pr_err("%s: unable to find 'reg' property of message-unit\n", - node->full_name); - kfree(rmu); - return -ENOMEM; - } - msg_start = of_read_number(msg_addr, aw); - - rmu->msg_regs = (struct rio_msg_regs *) - (rmu_regs_win + (u32)msg_start); - - rmu->txirq = irq_of_parse_and_map(node, 0); - rmu->rxirq = irq_of_parse_and_map(node, 1); - printk(KERN_INFO "%s: txirq: %d, rxirq %d\n", - node->full_name, rmu->txirq, rmu->rxirq); - - priv->rmm_handle = rmu; - - rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff); - rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 0); - rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0); - - return 0; -} diff --git a/trunk/arch/powerpc/sysdev/mpic.c b/trunk/arch/powerpc/sysdev/mpic.c index 4e9ccb1015de..8c7e8528e7c4 100644 --- a/trunk/arch/powerpc/sysdev/mpic.c +++ b/trunk/arch/powerpc/sysdev/mpic.c @@ -154,7 +154,7 @@ static inline unsigned int mpic_processor_id(struct mpic *mpic) { unsigned int cpu = 0; - if (!(mpic->flags & MPIC_SECONDARY)) + if (mpic->flags & MPIC_PRIMARY) cpu = hard_smp_processor_id(); return cpu; @@ -315,25 +315,29 @@ static void _mpic_map_mmio(struct mpic *mpic, phys_addr_t phys_addr, } #ifdef CONFIG_PPC_DCR -static void _mpic_map_dcr(struct mpic *mpic, struct mpic_reg_bank *rb, +static void _mpic_map_dcr(struct mpic *mpic, struct device_node *node, + struct mpic_reg_bank *rb, unsigned int offset, unsigned int size) { - phys_addr_t phys_addr = dcr_resource_start(mpic->node, 0); - rb->dhost = dcr_map(mpic->node, phys_addr + offset, size); + const u32 *dbasep; + + dbasep = of_get_property(node, "dcr-reg", NULL); + + rb->dhost = dcr_map(node, *dbasep + offset, size); BUG_ON(!DCR_MAP_OK(rb->dhost)); } -static inline void mpic_map(struct mpic *mpic, +static inline void mpic_map(struct mpic *mpic, struct device_node *node, phys_addr_t phys_addr, struct mpic_reg_bank *rb, unsigned int offset, unsigned int size) { if (mpic->flags & MPIC_USES_DCR) - _mpic_map_dcr(mpic, rb, offset, size); + _mpic_map_dcr(mpic, node, rb, offset, size); else _mpic_map_mmio(mpic, phys_addr, rb, offset, size); } #else /* CONFIG_PPC_DCR */ -#define mpic_map(m,p,b,o,s) _mpic_map_mmio(m,p,b,o,s) +#define mpic_map(m,n,p,b,o,s) _mpic_map_mmio(m,p,b,o,s) #endif /* !CONFIG_PPC_DCR */ @@ -897,7 +901,7 @@ int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type) if (vold != vnew) mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vnew); - return IRQ_SET_MASK_OK_NOCOPY; + return IRQ_SET_MASK_OK_NOCOPY;; } void mpic_set_vector(unsigned int virq, unsigned int vector) @@ -986,7 +990,7 @@ static int mpic_host_map(struct irq_host *h, unsigned int virq, #ifdef CONFIG_SMP else if (hw >= mpic->ipi_vecs[0]) { - WARN_ON(mpic->flags & MPIC_SECONDARY); + WARN_ON(!(mpic->flags & MPIC_PRIMARY)); DBG("mpic: mapping as IPI\n"); irq_set_chip_data(virq, mpic); @@ -997,7 +1001,7 @@ static int mpic_host_map(struct irq_host *h, unsigned int virq, #endif /* CONFIG_SMP */ if (hw >= mpic->timer_vecs[0] && hw <= mpic->timer_vecs[7]) { - WARN_ON(mpic->flags & MPIC_SECONDARY); + WARN_ON(!(mpic->flags & MPIC_PRIMARY)); DBG("mpic: mapping as timer\n"); irq_set_chip_data(virq, mpic); @@ -1111,28 +1115,17 @@ static int mpic_host_xlate(struct irq_host *h, struct device_node *ct, return 0; } -/* IRQ handler for a secondary MPIC cascaded from another IRQ controller */ -static void mpic_cascade(unsigned int irq, struct irq_desc *desc) -{ - struct irq_chip *chip = irq_desc_get_chip(desc); - struct mpic *mpic = irq_desc_get_handler_data(desc); - unsigned int virq; - - BUG_ON(!(mpic->flags & MPIC_SECONDARY)); - - virq = mpic_get_one_irq(mpic); - if (virq != NO_IRQ) - generic_handle_irq(virq); - - chip->irq_eoi(&desc->irq_data); -} - static struct irq_host_ops mpic_host_ops = { .match = mpic_host_match, .map = mpic_host_map, .xlate = mpic_host_xlate, }; +static int mpic_reset_prohibited(struct device_node *node) +{ + return node && of_get_property(node, "pic-no-reset", NULL); +} + /* * Exported functions */ @@ -1144,60 +1137,27 @@ struct mpic * __init mpic_alloc(struct device_node *node, unsigned int irq_count, const char *name) { - int i, psize, intvec_top; - struct mpic *mpic; - u32 greg_feature; - const char *vers; - const u32 *psrc; - - /* Default MPIC search parameters */ - static const struct of_device_id __initconst mpic_device_id[] = { - { .type = "open-pic", }, - { .compatible = "open-pic", }, - {}, - }; - - /* - * If we were not passed a device-tree node, then perform the default - * search for standardized a standardized OpenPIC. - */ - if (node) { - node = of_node_get(node); - } else { - node = of_find_matching_node(NULL, mpic_device_id); - if (!node) - return NULL; - } - - /* Pick the physical address from the device tree if unspecified */ - if (!phys_addr) { - /* Check if it is DCR-based */ - if (of_get_property(node, "dcr-reg", NULL)) { - flags |= MPIC_USES_DCR; - } else { - struct resource r; - if (of_address_to_resource(node, 0, &r)) - goto err_of_node_put; - phys_addr = r.start; - } - } + struct mpic *mpic; + u32 greg_feature; + const char *vers; + int i; + int intvec_top; + u64 paddr = phys_addr; mpic = kzalloc(sizeof(struct mpic), GFP_KERNEL); if (mpic == NULL) - goto err_of_node_put; + return NULL; mpic->name = name; - mpic->node = node; - mpic->paddr = phys_addr; mpic->hc_irq = mpic_irq_chip; mpic->hc_irq.name = name; - if (!(flags & MPIC_SECONDARY)) + if (flags & MPIC_PRIMARY) mpic->hc_irq.irq_set_affinity = mpic_set_affinity; #ifdef CONFIG_MPIC_U3_HT_IRQS mpic->hc_ht_irq = mpic_irq_ht_chip; mpic->hc_ht_irq.name = name; - if (!(flags & MPIC_SECONDARY)) + if (flags & MPIC_PRIMARY) mpic->hc_ht_irq.irq_set_affinity = mpic_set_affinity; #endif /* CONFIG_MPIC_U3_HT_IRQS */ @@ -1234,22 +1194,28 @@ struct mpic * __init mpic_alloc(struct device_node *node, mpic->spurious_vec = intvec_top; /* Check for "big-endian" in device-tree */ - if (of_get_property(mpic->node, "big-endian", NULL) != NULL) + if (node && of_get_property(node, "big-endian", NULL) != NULL) mpic->flags |= MPIC_BIG_ENDIAN; - if (of_device_is_compatible(mpic->node, "fsl,mpic")) + if (node && of_device_is_compatible(node, "fsl,mpic")) mpic->flags |= MPIC_FSL; /* Look for protected sources */ - psrc = of_get_property(mpic->node, "protected-sources", &psize); - if (psrc) { - /* Allocate a bitmap with one bit per interrupt */ - unsigned int mapsize = BITS_TO_LONGS(intvec_top + 1); - mpic->protected = kzalloc(mapsize*sizeof(long), GFP_KERNEL); - BUG_ON(mpic->protected == NULL); - for (i = 0; i < psize/sizeof(u32); i++) { - if (psrc[i] > intvec_top) - continue; - __set_bit(psrc[i], mpic->protected); + if (node) { + int psize; + unsigned int bits, mapsize; + const u32 *psrc = + of_get_property(node, "protected-sources", &psize); + if (psrc) { + psize /= 4; + bits = intvec_top + 1; + mapsize = BITS_TO_LONGS(bits) * sizeof(unsigned long); + mpic->protected = kzalloc(mapsize, GFP_KERNEL); + BUG_ON(mpic->protected == NULL); + for (i = 0; i < psize; i++) { + if (psrc[i] > intvec_top) + continue; + __set_bit(psrc[i], mpic->protected); + } } } @@ -1258,32 +1224,42 @@ struct mpic * __init mpic_alloc(struct device_node *node, #endif /* default register type */ - if (flags & MPIC_BIG_ENDIAN) - mpic->reg_type = mpic_access_mmio_be; - else - mpic->reg_type = mpic_access_mmio_le; + mpic->reg_type = (flags & MPIC_BIG_ENDIAN) ? + mpic_access_mmio_be : mpic_access_mmio_le; - /* - * An MPIC with a "dcr-reg" property must be accessed that way, but - * only if the kernel includes DCR support. - */ + /* If no physical address is passed in, a device-node is mandatory */ + BUG_ON(paddr == 0 && node == NULL); + + /* If no physical address passed in, check if it's dcr based */ + if (paddr == 0 && of_get_property(node, "dcr-reg", NULL) != NULL) { #ifdef CONFIG_PPC_DCR - if (flags & MPIC_USES_DCR) + mpic->flags |= MPIC_USES_DCR; mpic->reg_type = mpic_access_dcr; #else - BUG_ON(flags & MPIC_USES_DCR); -#endif + BUG(); +#endif /* CONFIG_PPC_DCR */ + } + + /* If the MPIC is not DCR based, and no physical address was passed + * in, try to obtain one + */ + if (paddr == 0 && !(mpic->flags & MPIC_USES_DCR)) { + const u32 *reg = of_get_property(node, "reg", NULL); + BUG_ON(reg == NULL); + paddr = of_translate_address(node, reg); + BUG_ON(paddr == OF_BAD_ADDR); + } /* Map the global registers */ - mpic_map(mpic, mpic->paddr, &mpic->gregs, MPIC_INFO(GREG_BASE), 0x1000); - mpic_map(mpic, mpic->paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000); + mpic_map(mpic, node, paddr, &mpic->gregs, MPIC_INFO(GREG_BASE), 0x1000); + mpic_map(mpic, node, paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000); /* Reset */ /* When using a device-node, reset requests are only honored if the MPIC * is allowed to reset. */ - if (of_get_property(mpic->node, "pic-no-reset", NULL)) + if (mpic_reset_prohibited(node)) mpic->flags |= MPIC_NO_RESET; if ((flags & MPIC_WANTS_RESET) && !(mpic->flags & MPIC_NO_RESET)) { @@ -1331,7 +1307,7 @@ struct mpic * __init mpic_alloc(struct device_node *node, for_each_possible_cpu(i) { unsigned int cpu = get_hard_smp_processor_id(i); - mpic_map(mpic, mpic->paddr, &mpic->cpuregs[cpu], + mpic_map(mpic, node, paddr, &mpic->cpuregs[cpu], MPIC_INFO(CPU_BASE) + cpu * MPIC_INFO(CPU_STRIDE), 0x1000); } @@ -1339,21 +1315,16 @@ struct mpic * __init mpic_alloc(struct device_node *node, /* Initialize main ISU if none provided */ if (mpic->isu_size == 0) { mpic->isu_size = mpic->num_sources; - mpic_map(mpic, mpic->paddr, &mpic->isus[0], + mpic_map(mpic, node, paddr, &mpic->isus[0], MPIC_INFO(IRQ_BASE), MPIC_INFO(IRQ_STRIDE) * mpic->isu_size); } mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1); mpic->isu_mask = (1 << mpic->isu_shift) - 1; - mpic->irqhost = irq_alloc_host(mpic->node, IRQ_HOST_MAP_LINEAR, + mpic->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, isu_size ? isu_size : mpic->num_sources, &mpic_host_ops, flags & MPIC_LARGE_VECTORS ? 2048 : 256); - - /* - * FIXME: The code leaks the MPIC object and mappings here; this - * is very unlikely to fail but it ought to be fixed anyways. - */ if (mpic->irqhost == NULL) return NULL; @@ -1376,23 +1347,19 @@ struct mpic * __init mpic_alloc(struct device_node *node, } printk(KERN_INFO "mpic: Setting up MPIC \"%s\" version %s at %llx," " max %d CPUs\n", - name, vers, (unsigned long long)mpic->paddr, num_possible_cpus()); + name, vers, (unsigned long long)paddr, num_possible_cpus()); printk(KERN_INFO "mpic: ISU size: %d, shift: %d, mask: %x\n", mpic->isu_size, mpic->isu_shift, mpic->isu_mask); mpic->next = mpics; mpics = mpic; - if (!(flags & MPIC_SECONDARY)) { + if (flags & MPIC_PRIMARY) { mpic_primary = mpic; irq_set_default_host(mpic->irqhost); } return mpic; - -err_of_node_put: - of_node_put(node); - return NULL; } void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num, @@ -1402,7 +1369,7 @@ void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num, BUG_ON(isu_num >= MPIC_MAX_ISU); - mpic_map(mpic, + mpic_map(mpic, mpic->irqhost->of_node, paddr, &mpic->isus[isu_num], 0, MPIC_INFO(IRQ_STRIDE) * mpic->isu_size); @@ -1418,7 +1385,8 @@ void __init mpic_set_default_senses(struct mpic *mpic, u8 *senses, int count) void __init mpic_init(struct mpic *mpic) { - int i, cpu; + int i; + int cpu; BUG_ON(mpic->num_sources == 0); @@ -1456,7 +1424,7 @@ void __init mpic_init(struct mpic *mpic) /* Do the HT PIC fixups on U3 broken mpic */ DBG("MPIC flags: %x\n", mpic->flags); - if ((mpic->flags & MPIC_U3_HT_IRQS) && !(mpic->flags & MPIC_SECONDARY)) { + if ((mpic->flags & MPIC_U3_HT_IRQS) && (mpic->flags & MPIC_PRIMARY)) { mpic_scan_ht_pics(mpic); mpic_u3msi_init(mpic); } @@ -1503,17 +1471,6 @@ void __init mpic_init(struct mpic *mpic) GFP_KERNEL); BUG_ON(mpic->save_data == NULL); #endif - - /* Check if this MPIC is chained from a parent interrupt controller */ - if (mpic->flags & MPIC_SECONDARY) { - int virq = irq_of_parse_and_map(mpic->node, 0); - if (virq != NO_IRQ) { - printk(KERN_INFO "%s: hooking up to IRQ %d\n", - mpic->node->full_name, virq); - irq_set_handler_data(virq, mpic); - irq_set_chained_handler(virq, &mpic_cascade); - } - } } void __init mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio) diff --git a/trunk/arch/powerpc/sysdev/ppc4xx_pci.c b/trunk/arch/powerpc/sysdev/ppc4xx_pci.c index 4f05f7542346..862f11b3821e 100644 --- a/trunk/arch/powerpc/sysdev/ppc4xx_pci.c +++ b/trunk/arch/powerpc/sysdev/ppc4xx_pci.c @@ -185,15 +185,9 @@ static int __init ppc4xx_parse_dma_ranges(struct pci_controller *hose, out: dma_offset_set = 1; pci_dram_offset = res->start; - hose->dma_window_base_cur = res->start; - hose->dma_window_size = resource_size(res); printk(KERN_INFO "4xx PCI DMA offset set to 0x%08lx\n", pci_dram_offset); - printk(KERN_INFO "4xx PCI DMA window base to 0x%016llx\n", - (unsigned long long)hose->dma_window_base_cur); - printk(KERN_INFO "DMA window size 0x%016llx\n", - (unsigned long long)hose->dma_window_size); return 0; } @@ -653,7 +647,6 @@ static unsigned int ppc4xx_pciex_port_count; struct ppc4xx_pciex_hwops { - bool want_sdr; int (*core_init)(struct device_node *np); int (*port_init_hw)(struct ppc4xx_pciex_port *port); int (*setup_utl)(struct ppc4xx_pciex_port *port); @@ -923,7 +916,6 @@ static int ppc440speB_pciex_init_utl(struct ppc4xx_pciex_port *port) static struct ppc4xx_pciex_hwops ppc440speA_pcie_hwops __initdata = { - .want_sdr = true, .core_init = ppc440spe_pciex_core_init, .port_init_hw = ppc440speA_pciex_init_port_hw, .setup_utl = ppc440speA_pciex_init_utl, @@ -932,7 +924,6 @@ static struct ppc4xx_pciex_hwops ppc440speA_pcie_hwops __initdata = static struct ppc4xx_pciex_hwops ppc440speB_pcie_hwops __initdata = { - .want_sdr = true, .core_init = ppc440spe_pciex_core_init, .port_init_hw = ppc440speB_pciex_init_port_hw, .setup_utl = ppc440speB_pciex_init_utl, @@ -1043,7 +1034,6 @@ static int ppc460ex_pciex_init_utl(struct ppc4xx_pciex_port *port) static struct ppc4xx_pciex_hwops ppc460ex_pcie_hwops __initdata = { - .want_sdr = true, .core_init = ppc460ex_pciex_core_init, .port_init_hw = ppc460ex_pciex_init_port_hw, .setup_utl = ppc460ex_pciex_init_utl, @@ -1191,7 +1181,6 @@ static void __init ppc460sx_pciex_check_link(struct ppc4xx_pciex_port *port) } static struct ppc4xx_pciex_hwops ppc460sx_pcie_hwops __initdata = { - .want_sdr = true, .core_init = ppc460sx_pciex_core_init, .port_init_hw = ppc460sx_pciex_init_port_hw, .setup_utl = ppc460sx_pciex_init_utl, @@ -1287,7 +1276,6 @@ static int ppc405ex_pciex_init_utl(struct ppc4xx_pciex_port *port) static struct ppc4xx_pciex_hwops ppc405ex_pcie_hwops __initdata = { - .want_sdr = true, .core_init = ppc405ex_pciex_core_init, .port_init_hw = ppc405ex_pciex_init_port_hw, .setup_utl = ppc405ex_pciex_init_utl, @@ -1296,52 +1284,6 @@ static struct ppc4xx_pciex_hwops ppc405ex_pcie_hwops __initdata = #endif /* CONFIG_40x */ -#ifdef CONFIG_476FPE -static int __init ppc_476fpe_pciex_core_init(struct device_node *np) -{ - return 4; -} - -static void __init ppc_476fpe_pciex_check_link(struct ppc4xx_pciex_port *port) -{ - u32 timeout_ms = 20; - u32 val = 0, mask = (PECFG_TLDLP_LNKUP|PECFG_TLDLP_PRESENT); - void __iomem *mbase = ioremap(port->cfg_space.start + 0x10000000, - 0x1000); - - printk(KERN_INFO "PCIE%d: Checking link...\n", port->index); - - if (mbase == NULL) { - printk(KERN_WARNING "PCIE%d: failed to get cfg space\n", - port->index); - return; - } - - while (timeout_ms--) { - val = in_le32(mbase + PECFG_TLDLP); - - if ((val & mask) == mask) - break; - msleep(10); - } - - if (val & PECFG_TLDLP_PRESENT) { - printk(KERN_INFO "PCIE%d: link is up !\n", port->index); - port->link = 1; - } else - printk(KERN_WARNING "PCIE%d: Link up failed\n", port->index); - - iounmap(mbase); - return; -} - -static struct ppc4xx_pciex_hwops ppc_476fpe_pcie_hwops __initdata = -{ - .core_init = ppc_476fpe_pciex_core_init, - .check_link = ppc_476fpe_pciex_check_link, -}; -#endif /* CONFIG_476FPE */ - /* Check that the core has been initied and if not, do it */ static int __init ppc4xx_pciex_check_core_init(struct device_node *np) { @@ -1366,10 +1308,6 @@ static int __init ppc4xx_pciex_check_core_init(struct device_node *np) #ifdef CONFIG_40x if (of_device_is_compatible(np, "ibm,plb-pciex-405ex")) ppc4xx_pciex_hwops = &ppc405ex_pcie_hwops; -#endif -#ifdef CONFIG_476FPE - if (of_device_is_compatible(np, "ibm,plb-pciex-476fpe")) - ppc4xx_pciex_hwops = &ppc_476fpe_pcie_hwops; #endif if (ppc4xx_pciex_hwops == NULL) { printk(KERN_WARNING "PCIE: unknown host type %s\n", @@ -1679,10 +1617,6 @@ static int __init ppc4xx_setup_one_pciex_POM(struct ppc4xx_pciex_port *port, dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL, sa | DCRO_PEGPL_460SX_OMR1MSKL_UOT | DCRO_PEGPL_OMRxMSKL_VAL); - else if (of_device_is_compatible(port->node, "ibm,plb-pciex-476fpe")) - dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL, - sa | DCRO_PEGPL_476FPE_OMR1MSKL_UOT - | DCRO_PEGPL_OMRxMSKL_VAL); else dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL, sa | DCRO_PEGPL_OMR1MSKL_UOT @@ -1805,10 +1739,9 @@ static void __init ppc4xx_configure_pciex_PIMs(struct ppc4xx_pciex_port *port, /* Calculate window size */ sa = (0xffffffffffffffffull << ilog2(size)); if (res->flags & IORESOURCE_PREFETCH) - sa |= PCI_BASE_ADDRESS_MEM_PREFETCH; + sa |= 0x8; - if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx") || - of_device_is_compatible(port->node, "ibm,plb-pciex-476fpe")) + if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx")) sa |= PCI_BASE_ADDRESS_MEM_TYPE_64; out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa)); @@ -2039,15 +1972,13 @@ static void __init ppc4xx_probe_pciex_bridge(struct device_node *np) } port->node = of_node_get(np); - if (ppc4xx_pciex_hwops->want_sdr) { - pval = of_get_property(np, "sdr-base", NULL); - if (pval == NULL) { - printk(KERN_ERR "PCIE: missing sdr-base for %s\n", - np->full_name); - return; - } - port->sdr_base = *pval; + pval = of_get_property(np, "sdr-base", NULL); + if (pval == NULL) { + printk(KERN_ERR "PCIE: missing sdr-base for %s\n", + np->full_name); + return; } + port->sdr_base = *pval; /* Check if device_type property is set to "pci" or "pci-endpoint". * Resulting from this setup this PCIe port will be configured diff --git a/trunk/arch/powerpc/sysdev/ppc4xx_pci.h b/trunk/arch/powerpc/sysdev/ppc4xx_pci.h index bb4821938ab1..32ce763a375a 100644 --- a/trunk/arch/powerpc/sysdev/ppc4xx_pci.h +++ b/trunk/arch/powerpc/sysdev/ppc4xx_pci.h @@ -476,13 +476,6 @@ #define DCRO_PEGPL_OMR1MSKL_UOT 0x00000002 #define DCRO_PEGPL_OMR3MSKL_IO 0x00000002 -/* 476FPE */ -#define PCCFG_LCPA 0x270 -#define PECFG_TLDLP 0x3F8 -#define PECFG_TLDLP_LNKUP 0x00000008 -#define PECFG_TLDLP_PRESENT 0x00000010 -#define DCRO_PEGPL_476FPE_OMR1MSKL_UOT 0x00000004 - /* SDR Bit Mappings */ #define PESDRx_RCSSET_HLDPLB 0x10000000 #define PESDRx_RCSSET_RSTGU 0x01000000 diff --git a/trunk/arch/powerpc/sysdev/xics/icp-hv.c b/trunk/arch/powerpc/sysdev/xics/icp-hv.c index 253dce98c16e..9518d367a64f 100644 --- a/trunk/arch/powerpc/sysdev/xics/icp-hv.c +++ b/trunk/arch/powerpc/sysdev/xics/icp-hv.c @@ -27,50 +27,33 @@ static inline unsigned int icp_hv_get_xirr(unsigned char cppr) { unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; long rc; - unsigned int ret = XICS_IRQ_SPURIOUS; rc = plpar_hcall(H_XIRR, retbuf, cppr); - if (rc == H_SUCCESS) { - ret = (unsigned int)retbuf[0]; - } else { - pr_err("%s: bad return code xirr cppr=0x%x returned %ld\n", - __func__, cppr, rc); - WARN_ON_ONCE(1); - } - - return ret; + if (rc != H_SUCCESS) + panic(" bad return code xirr - rc = %lx\n", rc); + return (unsigned int)retbuf[0]; } -static inline void icp_hv_set_cppr(u8 value) +static inline void icp_hv_set_xirr(unsigned int value) { - long rc = plpar_hcall_norets(H_CPPR, value); - if (rc != H_SUCCESS) { - pr_err("%s: bad return code cppr cppr=0x%x returned %ld\n", - __func__, value, rc); - WARN_ON_ONCE(1); - } + long rc = plpar_hcall_norets(H_EOI, value); + if (rc != H_SUCCESS) + panic("bad return code EOI - rc = %ld, value=%x\n", rc, value); } -static inline void icp_hv_set_xirr(unsigned int value) +static inline void icp_hv_set_cppr(u8 value) { - long rc = plpar_hcall_norets(H_EOI, value); - if (rc != H_SUCCESS) { - pr_err("%s: bad return code eoi xirr=0x%x returned %ld\n", - __func__, value, rc); - WARN_ON_ONCE(1); - icp_hv_set_cppr(value >> 24); - } + long rc = plpar_hcall_norets(H_CPPR, value); + if (rc != H_SUCCESS) + panic("bad return code cppr - rc = %lx\n", rc); } static inline void icp_hv_set_qirr(int n_cpu , u8 value) { - int hw_cpu = get_hard_smp_processor_id(n_cpu); - long rc = plpar_hcall_norets(H_IPI, hw_cpu, value); - if (rc != H_SUCCESS) { - pr_err("%s: bad return code qirr cpu=%d hw_cpu=%d mfrr=0x%x " - "returned %ld\n", __func__, n_cpu, hw_cpu, value, rc); - WARN_ON_ONCE(1); - } + long rc = plpar_hcall_norets(H_IPI, get_hard_smp_processor_id(n_cpu), + value); + if (rc != H_SUCCESS) + panic("bad return code qirr - rc = %lx\n", rc); } static void icp_hv_eoi(struct irq_data *d) diff --git a/trunk/arch/powerpc/sysdev/xics/xics-common.c b/trunk/arch/powerpc/sysdev/xics/xics-common.c index d72eda6a4c05..63762c672a03 100644 --- a/trunk/arch/powerpc/sysdev/xics/xics-common.c +++ b/trunk/arch/powerpc/sysdev/xics/xics-common.c @@ -137,7 +137,7 @@ static void xics_request_ipi(void) * IPIs are marked IRQF_PERCPU. The handler was set in map. */ BUG_ON(request_irq(ipi, icp_ops->ipi_action, - IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL)); + IRQF_PERCPU, "IPI", NULL)); } int __init xics_smp_probe(void) diff --git a/trunk/arch/powerpc/xmon/xmon.c b/trunk/arch/powerpc/xmon/xmon.c index cb95eea74d3d..03a217ae3be0 100644 --- a/trunk/arch/powerpc/xmon/xmon.c +++ b/trunk/arch/powerpc/xmon/xmon.c @@ -228,11 +228,13 @@ Commands:\n\ t print backtrace\n\ x exit monitor and recover\n\ X exit monitor and dont recover\n" -#if defined(CONFIG_PPC64) && !defined(CONFIG_PPC_BOOK3E) +#ifdef CONFIG_PPC64 " u dump segment table or SLB\n" -#elif defined(CONFIG_PPC_STD_MMU_32) +#endif +#ifdef CONFIG_PPC_STD_MMU_32 " u dump segment registers\n" -#elif defined(CONFIG_44x) || defined(CONFIG_PPC_BOOK3E) +#endif +#ifdef CONFIG_44x " u dump TLB\n" #endif " ? help\n" @@ -338,7 +340,7 @@ int cpus_are_in_xmon(void) static inline int unrecoverable_excp(struct pt_regs *regs) { -#if defined(CONFIG_4xx) || defined(CONFIG_PPC_BOOK3E) +#if defined(CONFIG_4xx) || defined(CONFIG_BOOK3E) /* We have no MSR_RI bit on 4xx or Book3e, so we simply return false */ return 0; #else @@ -883,11 +885,13 @@ cmds(struct pt_regs *excp) case 'u': dump_segments(); break; -#elif defined(CONFIG_4xx) +#endif +#ifdef CONFIG_4xx case 'u': dump_tlb_44x(); break; -#elif defined(CONFIG_PPC_BOOK3E) +#endif +#ifdef CONFIG_PPC_BOOK3E case 'u': dump_tlb_book3e(); break; diff --git a/trunk/arch/s390/Kconfig b/trunk/arch/s390/Kconfig index d48ede334434..373679b3744a 100644 --- a/trunk/arch/s390/Kconfig +++ b/trunk/arch/s390/Kconfig @@ -92,9 +92,6 @@ config S390 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 select HAVE_RCU_TABLE_FREE if SMP select ARCH_SAVE_PAGE_KEYS if HIBERNATION - select HAVE_MEMBLOCK - select HAVE_MEMBLOCK_NODE_MAP - select ARCH_DISCARD_MEMBLOCK select ARCH_INLINE_SPIN_TRYLOCK select ARCH_INLINE_SPIN_TRYLOCK_BH select ARCH_INLINE_SPIN_LOCK @@ -348,6 +345,9 @@ config WARN_DYNAMIC_STACK Say N if you are unsure. +config ARCH_POPULATES_NODE_MAP + def_bool y + comment "Kernel preemption" source "kernel/Kconfig.preempt" diff --git a/trunk/arch/s390/appldata/appldata_os.c b/trunk/arch/s390/appldata/appldata_os.c index 4de031d6b76c..92f1cb745d69 100644 --- a/trunk/arch/s390/appldata/appldata_os.c +++ b/trunk/arch/s390/appldata/appldata_os.c @@ -115,21 +115,21 @@ static void appldata_get_os_data(void *data) j = 0; for_each_online_cpu(i) { os_data->os_cpu[j].per_cpu_user = - cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_USER]); + cputime_to_jiffies(kstat_cpu(i).cpustat.user); os_data->os_cpu[j].per_cpu_nice = - cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_NICE]); + cputime_to_jiffies(kstat_cpu(i).cpustat.nice); os_data->os_cpu[j].per_cpu_system = - cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]); + cputime_to_jiffies(kstat_cpu(i).cpustat.system); os_data->os_cpu[j].per_cpu_idle = - cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IDLE]); + cputime_to_jiffies(kstat_cpu(i).cpustat.idle); os_data->os_cpu[j].per_cpu_irq = - cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IRQ]); + cputime_to_jiffies(kstat_cpu(i).cpustat.irq); os_data->os_cpu[j].per_cpu_softirq = - cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]); + cputime_to_jiffies(kstat_cpu(i).cpustat.softirq); os_data->os_cpu[j].per_cpu_iowait = - cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IOWAIT]); + cputime_to_jiffies(kstat_cpu(i).cpustat.iowait); os_data->os_cpu[j].per_cpu_steal = - cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_STEAL]); + cputime_to_jiffies(kstat_cpu(i).cpustat.steal); os_data->os_cpu[j].cpu_id = i; j++; } diff --git a/trunk/arch/s390/include/asm/cputime.h b/trunk/arch/s390/include/asm/cputime.h index c23c3900c304..081434878296 100644 --- a/trunk/arch/s390/include/asm/cputime.h +++ b/trunk/arch/s390/include/asm/cputime.h @@ -16,100 +16,114 @@ /* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */ -typedef unsigned long long __nocast cputime_t; -typedef unsigned long long __nocast cputime64_t; +typedef unsigned long long cputime_t; +typedef unsigned long long cputime64_t; -static inline unsigned long __div(unsigned long long n, unsigned long base) -{ #ifndef __s390x__ + +static inline unsigned int +__div(unsigned long long n, unsigned int base) +{ register_pair rp; rp.pair = n >> 1; asm ("dr %0,%1" : "+d" (rp) : "d" (base >> 1)); return rp.subreg.odd; -#else /* __s390x__ */ - return n / base; -#endif /* __s390x__ */ } -#define cputime_one_jiffy jiffies_to_cputime(1) - -/* - * Convert cputime to jiffies and back. - */ -static inline unsigned long cputime_to_jiffies(const cputime_t cputime) -{ - return __div((__force unsigned long long) cputime, 4096000000ULL / HZ); -} +#else /* __s390x__ */ -static inline cputime_t jiffies_to_cputime(const unsigned int jif) +static inline unsigned int +__div(unsigned long long n, unsigned int base) { - return (__force cputime_t)(jif * (4096000000ULL / HZ)); + return n / base; } -static inline u64 cputime64_to_jiffies64(cputime64_t cputime) -{ - unsigned long long jif = (__force unsigned long long) cputime; - do_div(jif, 4096000000ULL / HZ); - return jif; -} +#endif /* __s390x__ */ -static inline cputime64_t jiffies64_to_cputime64(const u64 jif) -{ - return (__force cputime64_t)(jif * (4096000000ULL / HZ)); +#define cputime_zero (0ULL) +#define cputime_one_jiffy jiffies_to_cputime(1) +#define cputime_max ((~0UL >> 1) - 1) +#define cputime_add(__a, __b) ((__a) + (__b)) +#define cputime_sub(__a, __b) ((__a) - (__b)) +#define cputime_div(__a, __n) ({ \ + unsigned long long __div = (__a); \ + do_div(__div,__n); \ + __div; \ +}) +#define cputime_halve(__a) ((__a) >> 1) +#define cputime_eq(__a, __b) ((__a) == (__b)) +#define cputime_gt(__a, __b) ((__a) > (__b)) +#define cputime_ge(__a, __b) ((__a) >= (__b)) +#define cputime_lt(__a, __b) ((__a) < (__b)) +#define cputime_le(__a, __b) ((__a) <= (__b)) +#define cputime_to_jiffies(__ct) (__div((__ct), 4096000000ULL / HZ)) +#define cputime_to_scaled(__ct) (__ct) +#define jiffies_to_cputime(__hz) ((cputime_t)(__hz) * (4096000000ULL / HZ)) + +#define cputime64_zero (0ULL) +#define cputime64_add(__a, __b) ((__a) + (__b)) +#define cputime_to_cputime64(__ct) (__ct) + +static inline u64 +cputime64_to_jiffies64(cputime64_t cputime) +{ + do_div(cputime, 4096000000ULL / HZ); + return cputime; } /* * Convert cputime to microseconds and back. */ -static inline unsigned int cputime_to_usecs(const cputime_t cputime) +static inline unsigned int +cputime_to_usecs(const cputime_t cputime) { - return (__force unsigned long long) cputime >> 12; + return cputime_div(cputime, 4096); } -static inline cputime_t usecs_to_cputime(const unsigned int m) +static inline cputime_t +usecs_to_cputime(const unsigned int m) { - return (__force cputime_t)(m * 4096ULL); + return (cputime_t) m * 4096; } -#define usecs_to_cputime64(m) usecs_to_cputime(m) - /* * Convert cputime to milliseconds and back. */ -static inline unsigned int cputime_to_secs(const cputime_t cputime) +static inline unsigned int +cputime_to_secs(const cputime_t cputime) { - return __div((__force unsigned long long) cputime, 2048000000) >> 1; + return __div(cputime, 2048000000) >> 1; } -static inline cputime_t secs_to_cputime(const unsigned int s) +static inline cputime_t +secs_to_cputime(const unsigned int s) { - return (__force cputime_t)(s * 4096000000ULL); + return (cputime_t) s * 4096000000ULL; } /* * Convert cputime to timespec and back. */ -static inline cputime_t timespec_to_cputime(const struct timespec *value) +static inline cputime_t +timespec_to_cputime(const struct timespec *value) { - unsigned long long ret = value->tv_sec * 4096000000ULL; - return (__force cputime_t)(ret + value->tv_nsec * 4096 / 1000); + return value->tv_nsec * 4096 / 1000 + (u64) value->tv_sec * 4096000000ULL; } -static inline void cputime_to_timespec(const cputime_t cputime, - struct timespec *value) +static inline void +cputime_to_timespec(const cputime_t cputime, struct timespec *value) { - unsigned long long __cputime = (__force unsigned long long) cputime; #ifndef __s390x__ register_pair rp; - rp.pair = __cputime >> 1; + rp.pair = cputime >> 1; asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL)); value->tv_nsec = rp.subreg.even * 1000 / 4096; value->tv_sec = rp.subreg.odd; #else - value->tv_nsec = (__cputime % 4096000000ULL) * 1000 / 4096; - value->tv_sec = __cputime / 4096000000ULL; + value->tv_nsec = (cputime % 4096000000ULL) * 1000 / 4096; + value->tv_sec = cputime / 4096000000ULL; #endif } @@ -118,52 +132,50 @@ static inline void cputime_to_timespec(const cputime_t cputime, * Since cputime and timeval have the same resolution (microseconds) * this is easy. */ -static inline cputime_t timeval_to_cputime(const struct timeval *value) +static inline cputime_t +timeval_to_cputime(const struct timeval *value) { - unsigned long long ret = value->tv_sec * 4096000000ULL; - return (__force cputime_t)(ret + value->tv_usec * 4096ULL); + return value->tv_usec * 4096 + (u64) value->tv_sec * 4096000000ULL; } -static inline void cputime_to_timeval(const cputime_t cputime, - struct timeval *value) +static inline void +cputime_to_timeval(const cputime_t cputime, struct timeval *value) { - unsigned long long __cputime = (__force unsigned long long) cputime; #ifndef __s390x__ register_pair rp; - rp.pair = __cputime >> 1; + rp.pair = cputime >> 1; asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL)); value->tv_usec = rp.subreg.even / 4096; value->tv_sec = rp.subreg.odd; #else - value->tv_usec = (__cputime % 4096000000ULL) / 4096; - value->tv_sec = __cputime / 4096000000ULL; + value->tv_usec = (cputime % 4096000000ULL) / 4096; + value->tv_sec = cputime / 4096000000ULL; #endif } /* * Convert cputime to clock and back. */ -static inline clock_t cputime_to_clock_t(cputime_t cputime) +static inline clock_t +cputime_to_clock_t(cputime_t cputime) { - unsigned long long clock = (__force unsigned long long) cputime; - do_div(clock, 4096000000ULL / USER_HZ); - return clock; + return cputime_div(cputime, 4096000000ULL / USER_HZ); } -static inline cputime_t clock_t_to_cputime(unsigned long x) +static inline cputime_t +clock_t_to_cputime(unsigned long x) { - return (__force cputime_t)(x * (4096000000ULL / USER_HZ)); + return (cputime_t) x * (4096000000ULL / USER_HZ); } /* * Convert cputime64 to clock. */ -static inline clock_t cputime64_to_clock_t(cputime64_t cputime) +static inline clock_t +cputime64_to_clock_t(cputime64_t cputime) { - unsigned long long clock = (__force unsigned long long) cputime; - do_div(clock, 4096000000ULL / USER_HZ); - return clock; + return cputime_div(cputime, 4096000000ULL / USER_HZ); } struct s390_idle_data { diff --git a/trunk/arch/s390/include/asm/socket.h b/trunk/arch/s390/include/asm/socket.h index 67b5c1b14b51..fdff1e995c73 100644 --- a/trunk/arch/s390/include/asm/socket.h +++ b/trunk/arch/s390/include/asm/socket.h @@ -70,7 +70,4 @@ #define SO_RXQ_OVFL 40 -#define SO_WIFI_STATUS 41 -#define SCM_WIFI_STATUS SO_WIFI_STATUS - #endif /* _ASM_SOCKET_H */ diff --git a/trunk/arch/s390/kernel/process.c b/trunk/arch/s390/kernel/process.c index 3201ae447990..9451b210a1b4 100644 --- a/trunk/arch/s390/kernel/process.c +++ b/trunk/arch/s390/kernel/process.c @@ -91,12 +91,10 @@ static void default_idle(void) void cpu_idle(void) { for (;;) { - tick_nohz_idle_enter(); - rcu_idle_enter(); + tick_nohz_stop_sched_tick(1); while (!need_resched()) default_idle(); - rcu_idle_exit(); - tick_nohz_idle_exit(); + tick_nohz_restart_sched_tick(); preempt_enable_no_resched(); schedule(); preempt_disable(); diff --git a/trunk/arch/s390/kernel/setup.c b/trunk/arch/s390/kernel/setup.c index f11d1b037c50..e54c4ff8abaa 100644 --- a/trunk/arch/s390/kernel/setup.c +++ b/trunk/arch/s390/kernel/setup.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include #include @@ -821,8 +820,7 @@ setup_memory(void) end_chunk = min(end_chunk, end_pfn); if (start_chunk >= end_chunk) continue; - memblock_add_node(PFN_PHYS(start_chunk), - PFN_PHYS(end_chunk - start_chunk), 0); + add_active_range(0, start_chunk, end_chunk); pfn = max(start_chunk, start_pfn); for (; pfn < end_chunk; pfn++) page_set_storage_key(PFN_PHYS(pfn), diff --git a/trunk/arch/s390/oprofile/hwsampler.c b/trunk/arch/s390/oprofile/hwsampler.c index 9daee91e6c3f..f43c0e4282af 100644 --- a/trunk/arch/s390/oprofile/hwsampler.c +++ b/trunk/arch/s390/oprofile/hwsampler.c @@ -22,7 +22,6 @@ #include #include "hwsampler.h" -#include "op_counter.h" #define MAX_NUM_SDB 511 #define MIN_NUM_SDB 1 @@ -897,8 +896,6 @@ static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt, if (sample_data_ptr->P == 1) { /* userspace sample */ unsigned int pid = sample_data_ptr->prim_asn; - if (!counter_config.user) - goto skip_sample; rcu_read_lock(); tsk = pid_task(find_vpid(pid), PIDTYPE_PID); if (tsk) @@ -906,8 +903,6 @@ static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt, rcu_read_unlock(); } else { /* kernelspace sample */ - if (!counter_config.kernel) - goto skip_sample; regs = task_pt_regs(current); } @@ -915,7 +910,7 @@ static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt, oprofile_add_ext_hw_sample(sample_data_ptr->ia, regs, 0, !sample_data_ptr->P, tsk); mutex_unlock(&hws_sem); - skip_sample: + sample_data_ptr++; } } diff --git a/trunk/arch/s390/oprofile/init.c b/trunk/arch/s390/oprofile/init.c index 2297be406c61..bd58b72454cf 100644 --- a/trunk/arch/s390/oprofile/init.c +++ b/trunk/arch/s390/oprofile/init.c @@ -2,11 +2,10 @@ * arch/s390/oprofile/init.c * * S390 Version - * Copyright (C) 2002-2011 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation * Author(s): Thomas Spatzier (tspat@de.ibm.com) * Author(s): Mahesh Salgaonkar (mahesh@linux.vnet.ibm.com) * Author(s): Heinz Graalfs (graalfs@linux.vnet.ibm.com) - * Author(s): Andreas Krebbel (krebbel@linux.vnet.ibm.com) * * @remark Copyright 2002-2011 OProfile authors */ @@ -15,8 +14,6 @@ #include #include #include -#include -#include #include "../../../drivers/oprofile/oprof.h" @@ -25,7 +22,6 @@ extern void s390_backtrace(struct pt_regs * const regs, unsigned int depth); #ifdef CONFIG_64BIT #include "hwsampler.h" -#include "op_counter.h" #define DEFAULT_INTERVAL 4127518 @@ -39,41 +35,16 @@ static unsigned long oprofile_max_interval; static unsigned long oprofile_sdbt_blocks = DEFAULT_SDBT_BLOCKS; static unsigned long oprofile_sdb_blocks = DEFAULT_SDB_BLOCKS; -static int hwsampler_enabled; +static int hwsampler_file; static int hwsampler_running; /* start_mutex must be held to change */ -static int hwsampler_available; static struct oprofile_operations timer_ops; -struct op_counter_config counter_config; - -enum __force_cpu_type { - reserved = 0, /* do not force */ - timer, -}; -static int force_cpu_type; - -static int set_cpu_type(const char *str, struct kernel_param *kp) -{ - if (!strcmp(str, "timer")) { - force_cpu_type = timer; - printk(KERN_INFO "oprofile: forcing timer to be returned " - "as cpu type\n"); - } else { - force_cpu_type = 0; - } - - return 0; -} -module_param_call(cpu_type, set_cpu_type, NULL, NULL, 0); -MODULE_PARM_DESC(cpu_type, "Force legacy basic mode sampling" - "(report cpu_type \"timer\""); - static int oprofile_hwsampler_start(void) { int retval; - hwsampler_running = hwsampler_enabled; + hwsampler_running = hwsampler_file; if (!hwsampler_running) return timer_ops.start(); @@ -101,16 +72,10 @@ static void oprofile_hwsampler_stop(void) return; } -/* - * File ops used for: - * /dev/oprofile/0/enabled - * /dev/oprofile/hwsampling/hwsampler (cpu_type = timer) - */ - static ssize_t hwsampler_read(struct file *file, char __user *buf, size_t count, loff_t *offset) { - return oprofilefs_ulong_to_user(hwsampler_enabled, buf, count, offset); + return oprofilefs_ulong_to_user(hwsampler_file, buf, count, offset); } static ssize_t hwsampler_write(struct file *file, char const __user *buf, @@ -126,9 +91,6 @@ static ssize_t hwsampler_write(struct file *file, char const __user *buf, if (retval <= 0) return retval; - if (val != 0 && val != 1) - return -EINVAL; - if (oprofile_started) /* * save to do without locking as we set @@ -137,7 +99,7 @@ static ssize_t hwsampler_write(struct file *file, char const __user *buf, */ return -EBUSY; - hwsampler_enabled = val; + hwsampler_file = val; return count; } @@ -147,311 +109,38 @@ static const struct file_operations hwsampler_fops = { .write = hwsampler_write, }; -/* - * File ops used for: - * /dev/oprofile/0/count - * /dev/oprofile/hwsampling/hw_interval (cpu_type = timer) - * - * Make sure that the value is within the hardware range. - */ - -static ssize_t hw_interval_read(struct file *file, char __user *buf, - size_t count, loff_t *offset) -{ - return oprofilefs_ulong_to_user(oprofile_hw_interval, buf, - count, offset); -} - -static ssize_t hw_interval_write(struct file *file, char const __user *buf, - size_t count, loff_t *offset) -{ - unsigned long val; - int retval; - - if (*offset) - return -EINVAL; - retval = oprofilefs_ulong_from_user(&val, buf, count); - if (retval) - return retval; - if (val < oprofile_min_interval) - oprofile_hw_interval = oprofile_min_interval; - else if (val > oprofile_max_interval) - oprofile_hw_interval = oprofile_max_interval; - else - oprofile_hw_interval = val; - - return count; -} - -static const struct file_operations hw_interval_fops = { - .read = hw_interval_read, - .write = hw_interval_write, -}; - -/* - * File ops used for: - * /dev/oprofile/0/event - * Only a single event with number 0 is supported with this counter. - * - * /dev/oprofile/0/unit_mask - * This is a dummy file needed by the user space tools. - * No value other than 0 is accepted or returned. - */ - -static ssize_t hwsampler_zero_read(struct file *file, char __user *buf, - size_t count, loff_t *offset) -{ - return oprofilefs_ulong_to_user(0, buf, count, offset); -} - -static ssize_t hwsampler_zero_write(struct file *file, char const __user *buf, - size_t count, loff_t *offset) -{ - unsigned long val; - int retval; - - if (*offset) - return -EINVAL; - - retval = oprofilefs_ulong_from_user(&val, buf, count); - if (retval) - return retval; - if (val != 0) - return -EINVAL; - return count; -} - -static const struct file_operations zero_fops = { - .read = hwsampler_zero_read, - .write = hwsampler_zero_write, -}; - -/* /dev/oprofile/0/kernel file ops. */ - -static ssize_t hwsampler_kernel_read(struct file *file, char __user *buf, - size_t count, loff_t *offset) -{ - return oprofilefs_ulong_to_user(counter_config.kernel, - buf, count, offset); -} - -static ssize_t hwsampler_kernel_write(struct file *file, char const __user *buf, - size_t count, loff_t *offset) -{ - unsigned long val; - int retval; - - if (*offset) - return -EINVAL; - - retval = oprofilefs_ulong_from_user(&val, buf, count); - if (retval) - return retval; - - if (val != 0 && val != 1) - return -EINVAL; - - counter_config.kernel = val; - - return count; -} - -static const struct file_operations kernel_fops = { - .read = hwsampler_kernel_read, - .write = hwsampler_kernel_write, -}; - -/* /dev/oprofile/0/user file ops. */ - -static ssize_t hwsampler_user_read(struct file *file, char __user *buf, - size_t count, loff_t *offset) -{ - return oprofilefs_ulong_to_user(counter_config.user, - buf, count, offset); -} - -static ssize_t hwsampler_user_write(struct file *file, char const __user *buf, - size_t count, loff_t *offset) -{ - unsigned long val; - int retval; - - if (*offset) - return -EINVAL; - - retval = oprofilefs_ulong_from_user(&val, buf, count); - if (retval) - return retval; - - if (val != 0 && val != 1) - return -EINVAL; - - counter_config.user = val; - - return count; -} - -static const struct file_operations user_fops = { - .read = hwsampler_user_read, - .write = hwsampler_user_write, -}; - - -/* - * File ops used for: /dev/oprofile/timer/enabled - * The value always has to be the inverted value of hwsampler_enabled. So - * no separate variable is created. That way we do not need locking. - */ - -static ssize_t timer_enabled_read(struct file *file, char __user *buf, - size_t count, loff_t *offset) -{ - return oprofilefs_ulong_to_user(!hwsampler_enabled, buf, count, offset); -} - -static ssize_t timer_enabled_write(struct file *file, char const __user *buf, - size_t count, loff_t *offset) -{ - unsigned long val; - int retval; - - if (*offset) - return -EINVAL; - - retval = oprofilefs_ulong_from_user(&val, buf, count); - if (retval) - return retval; - - if (val != 0 && val != 1) - return -EINVAL; - - /* Timer cannot be disabled without having hardware sampling. */ - if (val == 0 && !hwsampler_available) - return -EINVAL; - - if (oprofile_started) - /* - * save to do without locking as we set - * hwsampler_running in start() when start_mutex is - * held - */ - return -EBUSY; - - hwsampler_enabled = !val; - - return count; -} - -static const struct file_operations timer_enabled_fops = { - .read = timer_enabled_read, - .write = timer_enabled_write, -}; - - static int oprofile_create_hwsampling_files(struct super_block *sb, - struct dentry *root) + struct dentry *root) { - struct dentry *dir; - - dir = oprofilefs_mkdir(sb, root, "timer"); - if (!dir) - return -EINVAL; - - oprofilefs_create_file(sb, dir, "enabled", &timer_enabled_fops); - - if (!hwsampler_available) - return 0; + struct dentry *hw_dir; /* reinitialize default values */ - hwsampler_enabled = 1; - counter_config.kernel = 1; - counter_config.user = 1; - - if (!force_cpu_type) { - /* - * Create the counter file system. A single virtual - * counter is created which can be used to - * enable/disable hardware sampling dynamically from - * user space. The user space will configure a single - * counter with a single event. The value of 'event' - * and 'unit_mask' are not evaluated by the kernel code - * and can only be set to 0. - */ + hwsampler_file = 1; - dir = oprofilefs_mkdir(sb, root, "0"); - if (!dir) - return -EINVAL; + hw_dir = oprofilefs_mkdir(sb, root, "hwsampling"); + if (!hw_dir) + return -EINVAL; - oprofilefs_create_file(sb, dir, "enabled", &hwsampler_fops); - oprofilefs_create_file(sb, dir, "event", &zero_fops); - oprofilefs_create_file(sb, dir, "count", &hw_interval_fops); - oprofilefs_create_file(sb, dir, "unit_mask", &zero_fops); - oprofilefs_create_file(sb, dir, "kernel", &kernel_fops); - oprofilefs_create_file(sb, dir, "user", &user_fops); - oprofilefs_create_ulong(sb, dir, "hw_sdbt_blocks", - &oprofile_sdbt_blocks); + oprofilefs_create_file(sb, hw_dir, "hwsampler", &hwsampler_fops); + oprofilefs_create_ulong(sb, hw_dir, "hw_interval", + &oprofile_hw_interval); + oprofilefs_create_ro_ulong(sb, hw_dir, "hw_min_interval", + &oprofile_min_interval); + oprofilefs_create_ro_ulong(sb, hw_dir, "hw_max_interval", + &oprofile_max_interval); + oprofilefs_create_ulong(sb, hw_dir, "hw_sdbt_blocks", + &oprofile_sdbt_blocks); - } else { - /* - * Hardware sampling can be used but the cpu_type is - * forced to timer in order to deal with legacy user - * space tools. The /dev/oprofile/hwsampling fs is - * provided in that case. - */ - dir = oprofilefs_mkdir(sb, root, "hwsampling"); - if (!dir) - return -EINVAL; - - oprofilefs_create_file(sb, dir, "hwsampler", - &hwsampler_fops); - oprofilefs_create_file(sb, dir, "hw_interval", - &hw_interval_fops); - oprofilefs_create_ro_ulong(sb, dir, "hw_min_interval", - &oprofile_min_interval); - oprofilefs_create_ro_ulong(sb, dir, "hw_max_interval", - &oprofile_max_interval); - oprofilefs_create_ulong(sb, dir, "hw_sdbt_blocks", - &oprofile_sdbt_blocks); - } return 0; } static int oprofile_hwsampler_init(struct oprofile_operations *ops) { - /* - * Initialize the timer mode infrastructure as well in order - * to be able to switch back dynamically. oprofile_timer_init - * is not supposed to fail. - */ - if (oprofile_timer_init(ops)) - BUG(); - - memcpy(&timer_ops, ops, sizeof(timer_ops)); - ops->create_files = oprofile_create_hwsampling_files; - - /* - * If the user space tools do not support newer cpu types, - * the force_cpu_type module parameter - * can be used to always return \"timer\" as cpu type. - */ - if (force_cpu_type != timer) { - struct cpuid id; - - get_cpu_id (&id); - - switch (id.machine) { - case 0x2097: case 0x2098: ops->cpu_type = "s390/z10"; break; - case 0x2817: case 0x2818: ops->cpu_type = "s390/z196"; break; - default: return -ENODEV; - } - } - if (hwsampler_setup()) return -ENODEV; /* - * Query the range for the sampling interval from the - * hardware. + * create hwsampler files only if hwsampler_setup() succeeds. */ oprofile_min_interval = hwsampler_query_min_interval(); if (oprofile_min_interval == 0) @@ -466,17 +155,23 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops) if (oprofile_hw_interval > oprofile_max_interval) oprofile_hw_interval = oprofile_max_interval; - printk(KERN_INFO "oprofile: System z hardware sampling " - "facility found.\n"); + if (oprofile_timer_init(ops)) + return -ENODEV; + + printk(KERN_INFO "oprofile: using hardware sampling\n"); + + memcpy(&timer_ops, ops, sizeof(timer_ops)); ops->start = oprofile_hwsampler_start; ops->stop = oprofile_hwsampler_stop; + ops->create_files = oprofile_create_hwsampling_files; return 0; } static void oprofile_hwsampler_exit(void) { + oprofile_timer_exit(); hwsampler_shutdown(); } @@ -487,15 +182,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) ops->backtrace = s390_backtrace; #ifdef CONFIG_64BIT - - /* - * -ENODEV is not reported to the caller. The module itself - * will use the timer mode sampling as fallback and this is - * always available. - */ - hwsampler_available = oprofile_hwsampler_init(ops) == 0; - - return 0; + return oprofile_hwsampler_init(ops); #else return -ENODEV; #endif diff --git a/trunk/arch/s390/oprofile/op_counter.h b/trunk/arch/s390/oprofile/op_counter.h deleted file mode 100644 index 1a8d3ca09014..000000000000 --- a/trunk/arch/s390/oprofile/op_counter.h +++ /dev/null @@ -1,23 +0,0 @@ -/** - * arch/s390/oprofile/op_counter.h - * - * Copyright (C) 2011 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Author(s): Andreas Krebbel (krebbel@linux.vnet.ibm.com) - * - * @remark Copyright 2011 OProfile authors - */ - -#ifndef OP_COUNTER_H -#define OP_COUNTER_H - -struct op_counter_config { - /* `enabled' maps to the hwsampler_file variable. */ - /* `count' maps to the oprofile_hw_interval variable. */ - /* `event' and `unit_mask' are unused. */ - unsigned long kernel; - unsigned long user; -}; - -extern struct op_counter_config counter_config; - -#endif /* OP_COUNTER_H */ diff --git a/trunk/arch/score/Kconfig b/trunk/arch/score/Kconfig index 8b0c9464aa9d..df169e84db4e 100644 --- a/trunk/arch/score/Kconfig +++ b/trunk/arch/score/Kconfig @@ -4,9 +4,6 @@ config SCORE def_bool y select HAVE_GENERIC_HARDIRQS select GENERIC_IRQ_SHOW - select HAVE_MEMBLOCK - select HAVE_MEMBLOCK_NODE_MAP - select ARCH_DISCARD_MEMBLOCK choice prompt "System type" @@ -63,6 +60,9 @@ config 32BIT config ARCH_FLATMEM_ENABLE def_bool y +config ARCH_POPULATES_NODE_MAP + def_bool y + source "mm/Kconfig" config MEMORY_START diff --git a/trunk/arch/score/kernel/setup.c b/trunk/arch/score/kernel/setup.c index b48459afefdd..6f898c057878 100644 --- a/trunk/arch/score/kernel/setup.c +++ b/trunk/arch/score/kernel/setup.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include #include @@ -55,8 +54,7 @@ static void __init bootmem_init(void) /* Initialize the boot-time allocator with low memory only. */ bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn, min_low_pfn, max_low_pfn); - memblock_add_node(PFN_PHYS(min_low_pfn), - PFN_PHYS(max_low_pfn - min_low_pfn), 0); + add_active_range(0, min_low_pfn, max_low_pfn); free_bootmem(PFN_PHYS(start_pfn), (max_low_pfn - start_pfn) << PAGE_SHIFT); diff --git a/trunk/arch/sh/Kconfig b/trunk/arch/sh/Kconfig index 47a2f1c2cb0d..5629e2099130 100644 --- a/trunk/arch/sh/Kconfig +++ b/trunk/arch/sh/Kconfig @@ -4,7 +4,6 @@ config SUPERH select CLKDEV_LOOKUP select HAVE_IDE if HAS_IOPORT select HAVE_MEMBLOCK - select HAVE_MEMBLOCK_NODE_MAP select HAVE_OPROFILE select HAVE_GENERIC_DMA_COHERENT select HAVE_ARCH_TRACEHOOK diff --git a/trunk/arch/sh/include/asm/memblock.h b/trunk/arch/sh/include/asm/memblock.h new file mode 100644 index 000000000000..e87063fad2ea --- /dev/null +++ b/trunk/arch/sh/include/asm/memblock.h @@ -0,0 +1,4 @@ +#ifndef __ASM_SH_MEMBLOCK_H +#define __ASM_SH_MEMBLOCK_H + +#endif /* __ASM_SH_MEMBLOCK_H */ diff --git a/trunk/arch/sh/kernel/idle.c b/trunk/arch/sh/kernel/idle.c index 406508d4ce74..db4ecd731a00 100644 --- a/trunk/arch/sh/kernel/idle.c +++ b/trunk/arch/sh/kernel/idle.c @@ -89,8 +89,7 @@ void cpu_idle(void) /* endless idle loop with no priority at all */ while (1) { - tick_nohz_idle_enter(); - rcu_idle_enter(); + tick_nohz_stop_sched_tick(1); while (!need_resched()) { check_pgt_cache(); @@ -112,8 +111,7 @@ void cpu_idle(void) start_critical_timings(); } - rcu_idle_exit(); - tick_nohz_idle_exit(); + tick_nohz_restart_sched_tick(); preempt_enable_no_resched(); schedule(); preempt_disable(); diff --git a/trunk/arch/sh/kernel/machine_kexec.c b/trunk/arch/sh/kernel/machine_kexec.c index 9fea49f6e667..c5a33f007f88 100644 --- a/trunk/arch/sh/kernel/machine_kexec.c +++ b/trunk/arch/sh/kernel/machine_kexec.c @@ -157,6 +157,9 @@ void __init reserve_crashkernel(void) unsigned long long crash_size, crash_base; int ret; + /* this is necessary because of memblock_phys_mem_size() */ + memblock_analyze(); + ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), &crash_size, &crash_base); if (ret == 0 && crash_size > 0) { diff --git a/trunk/arch/sh/kernel/setup.c b/trunk/arch/sh/kernel/setup.c index 7b57bf1dc855..1a0e946679a4 100644 --- a/trunk/arch/sh/kernel/setup.c +++ b/trunk/arch/sh/kernel/setup.c @@ -230,8 +230,7 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn, pmb_bolt_mapping((unsigned long)__va(start), start, end - start, PAGE_KERNEL); - memblock_set_node(PFN_PHYS(start_pfn), - PFN_PHYS(end_pfn - start_pfn), nid); + add_active_range(nid, start_pfn, end_pfn); } void __init __weak plat_early_device_setup(void) diff --git a/trunk/arch/sh/mm/Kconfig b/trunk/arch/sh/mm/Kconfig index cb8f9920f4dd..c3e61b366493 100644 --- a/trunk/arch/sh/mm/Kconfig +++ b/trunk/arch/sh/mm/Kconfig @@ -143,6 +143,9 @@ config MAX_ACTIVE_REGIONS CPU_SUBTYPE_SH7785) default "1" +config ARCH_POPULATES_NODE_MAP + def_bool y + config ARCH_SELECT_MEMORY_MODEL def_bool y diff --git a/trunk/arch/sh/mm/init.c b/trunk/arch/sh/mm/init.c index 82cc576fab15..939ca0f356f6 100644 --- a/trunk/arch/sh/mm/init.c +++ b/trunk/arch/sh/mm/init.c @@ -324,6 +324,7 @@ void __init paging_init(void) unsigned long vaddr, end; int nid; + memblock_init(); sh_mv.mv_mem_init(); early_reserve_mem(); @@ -336,7 +337,7 @@ void __init paging_init(void) sh_mv.mv_mem_reserve(); memblock_enforce_memory_limit(memory_limit); - memblock_allow_resize(); + memblock_analyze(); memblock_dump_all(); diff --git a/trunk/arch/sh/oprofile/common.c b/trunk/arch/sh/oprofile/common.c index e4dd5d5a1115..b4c2d2b946dd 100644 --- a/trunk/arch/sh/oprofile/common.c +++ b/trunk/arch/sh/oprofile/common.c @@ -49,7 +49,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) return oprofile_perf_init(ops); } -void oprofile_arch_exit(void) +void __exit oprofile_arch_exit(void) { oprofile_perf_exit(); kfree(sh_pmu_op_name); @@ -60,5 +60,5 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) ops->backtrace = sh_backtrace; return -ENODEV; } -void oprofile_arch_exit(void) {} +void __exit oprofile_arch_exit(void) {} #endif /* CONFIG_HW_PERF_EVENTS */ diff --git a/trunk/arch/sparc/Kconfig b/trunk/arch/sparc/Kconfig index 70ae9d81870e..f92602e86607 100644 --- a/trunk/arch/sparc/Kconfig +++ b/trunk/arch/sparc/Kconfig @@ -43,7 +43,6 @@ config SPARC64 select HAVE_KPROBES select HAVE_RCU_TABLE_FREE if SMP select HAVE_MEMBLOCK - select HAVE_MEMBLOCK_NODE_MAP select HAVE_SYSCALL_WRAPPERS select HAVE_DYNAMIC_FTRACE select HAVE_FTRACE_MCOUNT_RECORD @@ -353,6 +352,9 @@ config NODES_SPAN_OTHER_NODES def_bool y depends on NEED_MULTIPLE_NODES +config ARCH_POPULATES_NODE_MAP + def_bool y if SPARC64 + config ARCH_SELECT_MEMORY_MODEL def_bool y if SPARC64 diff --git a/trunk/arch/sparc/include/asm/memblock.h b/trunk/arch/sparc/include/asm/memblock.h new file mode 100644 index 000000000000..c67b047ef85e --- /dev/null +++ b/trunk/arch/sparc/include/asm/memblock.h @@ -0,0 +1,8 @@ +#ifndef _SPARC64_MEMBLOCK_H +#define _SPARC64_MEMBLOCK_H + +#include + +#define MEMBLOCK_DBG(fmt...) prom_printf(fmt) + +#endif /* !(_SPARC64_MEMBLOCK_H) */ diff --git a/trunk/arch/sparc/include/asm/socket.h b/trunk/arch/sparc/include/asm/socket.h index 8af1b64168b3..9d3fefcff2f5 100644 --- a/trunk/arch/sparc/include/asm/socket.h +++ b/trunk/arch/sparc/include/asm/socket.h @@ -58,9 +58,6 @@ #define SO_RXQ_OVFL 0x0024 -#define SO_WIFI_STATUS 0x0025 -#define SCM_WIFI_STATUS SO_WIFI_STATUS - /* Security levels - as per NRL IPv6 - don't actually do anything */ #define SO_SECURITY_AUTHENTICATION 0x5001 #define SO_SECURITY_ENCRYPTION_TRANSPORT 0x5002 diff --git a/trunk/arch/sparc/kernel/process_64.c b/trunk/arch/sparc/kernel/process_64.c index 39d8b05201a2..3739a06a76cb 100644 --- a/trunk/arch/sparc/kernel/process_64.c +++ b/trunk/arch/sparc/kernel/process_64.c @@ -95,14 +95,12 @@ void cpu_idle(void) set_thread_flag(TIF_POLLING_NRFLAG); while(1) { - tick_nohz_idle_enter(); - rcu_idle_enter(); + tick_nohz_stop_sched_tick(1); while (!need_resched() && !cpu_is_offline(cpu)) sparc64_yield(cpu); - rcu_idle_exit(); - tick_nohz_idle_exit(); + tick_nohz_restart_sched_tick(); preempt_enable_no_resched(); diff --git a/trunk/arch/sparc/kernel/setup_32.c b/trunk/arch/sparc/kernel/setup_32.c index ffb883ddd0f0..fe1e3fc31bc5 100644 --- a/trunk/arch/sparc/kernel/setup_32.c +++ b/trunk/arch/sparc/kernel/setup_32.c @@ -84,7 +84,7 @@ static void prom_sync_me(void) prom_printf("PROM SYNC COMMAND...\n"); show_free_areas(0); - if (!is_idle_task(current)) { + if(current->pid != 0) { local_irq_enable(); sys_sync(); local_irq_disable(); diff --git a/trunk/arch/sparc/kernel/sys_sparc_64.c b/trunk/arch/sparc/kernel/sys_sparc_64.c index 232df9949530..441521ad8a3f 100644 --- a/trunk/arch/sparc/kernel/sys_sparc_64.c +++ b/trunk/arch/sparc/kernel/sys_sparc_64.c @@ -368,11 +368,11 @@ static unsigned long mmap_rnd(void) if (current->flags & PF_RANDOMIZE) { unsigned long val = get_random_int(); if (test_thread_flag(TIF_32BIT)) - rnd = (val % (1UL << (23UL-PAGE_SHIFT))); + rnd = (val % (1UL << (22UL-PAGE_SHIFT))); else - rnd = (val % (1UL << (30UL-PAGE_SHIFT))); + rnd = (val % (1UL << (29UL-PAGE_SHIFT))); } - return rnd << PAGE_SHIFT; + return (rnd << PAGE_SHIFT) * 2; } void arch_pick_mmap_layout(struct mm_struct *mm) diff --git a/trunk/arch/sparc/mm/init_64.c b/trunk/arch/sparc/mm/init_64.c index b3f5e7dfea51..8e073d802139 100644 --- a/trunk/arch/sparc/mm/init_64.c +++ b/trunk/arch/sparc/mm/init_64.c @@ -790,7 +790,7 @@ static int find_node(unsigned long addr) return -1; } -static u64 memblock_nid_range(u64 start, u64 end, int *nid) +u64 memblock_nid_range(u64 start, u64 end, int *nid) { *nid = find_node(start); start += PAGE_SIZE; @@ -808,7 +808,7 @@ static u64 memblock_nid_range(u64 start, u64 end, int *nid) return start; } #else -static u64 memblock_nid_range(u64 start, u64 end, int *nid) +u64 memblock_nid_range(u64 start, u64 end, int *nid) { *nid = 0; return end; @@ -816,7 +816,7 @@ static u64 memblock_nid_range(u64 start, u64 end, int *nid) #endif /* This must be invoked after performing all of the necessary - * memblock_set_node() calls for 'nid'. We need to be able to get + * add_active_range() calls for 'nid'. We need to be able to get * correct data from get_pfn_range_for_nid(). */ static void __init allocate_node_data(int nid) @@ -987,11 +987,14 @@ static void __init add_node_ranges(void) this_end = memblock_nid_range(start, end, &nid); - numadbg("Setting memblock NUMA node nid[%d] " + numadbg("Adding active range nid[%d] " "start[%lx] end[%lx]\n", nid, start, this_end); - memblock_set_node(start, this_end - start, nid); + add_active_range(nid, + start >> PAGE_SHIFT, + this_end >> PAGE_SHIFT); + start = this_end; } } @@ -1279,6 +1282,7 @@ static void __init bootmem_init_nonnuma(void) { unsigned long top_of_ram = memblock_end_of_DRAM(); unsigned long total_ram = memblock_phys_mem_size(); + struct memblock_region *reg; numadbg("bootmem_init_nonnuma()\n"); @@ -1288,8 +1292,20 @@ static void __init bootmem_init_nonnuma(void) (top_of_ram - total_ram) >> 20); init_node_masks_nonnuma(); - memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0); + + for_each_memblock(memory, reg) { + unsigned long start_pfn, end_pfn; + + if (!reg->size) + continue; + + start_pfn = memblock_region_memory_base_pfn(reg); + end_pfn = memblock_region_memory_end_pfn(reg); + add_active_range(0, start_pfn, end_pfn); + } + allocate_node_data(0); + node_set_online(0); } @@ -1753,6 +1769,8 @@ void __init paging_init(void) sun4v_ktsb_init(); } + memblock_init(); + /* Find available physical memory... * * Read it twice in order to work around a bug in openfirmware. @@ -1778,7 +1796,7 @@ void __init paging_init(void) memblock_enforce_memory_limit(cmdline_memory_size); - memblock_allow_resize(); + memblock_analyze(); memblock_dump_all(); set_bit(0, mmu_context_bmap); diff --git a/trunk/arch/tile/kernel/process.c b/trunk/arch/tile/kernel/process.c index 4c1ac6e5347a..9c45d8bbdf57 100644 --- a/trunk/arch/tile/kernel/process.c +++ b/trunk/arch/tile/kernel/process.c @@ -85,8 +85,7 @@ void cpu_idle(void) /* endless idle loop with no priority at all */ while (1) { - tick_nohz_idle_enter(); - rcu_idle_enter(); + tick_nohz_stop_sched_tick(1); while (!need_resched()) { if (cpu_is_offline(cpu)) BUG(); /* no HOTPLUG_CPU */ @@ -106,8 +105,7 @@ void cpu_idle(void) local_irq_enable(); current_thread_info()->status |= TS_POLLING; } - rcu_idle_exit(); - tick_nohz_idle_exit(); + tick_nohz_restart_sched_tick(); preempt_enable_no_resched(); schedule(); preempt_disable(); diff --git a/trunk/arch/tile/mm/fault.c b/trunk/arch/tile/mm/fault.c index c1eaaa1fcc20..25b7b90fd620 100644 --- a/trunk/arch/tile/mm/fault.c +++ b/trunk/arch/tile/mm/fault.c @@ -54,7 +54,7 @@ static noinline void force_sig_info_fault(const char *type, int si_signo, if (unlikely(tsk->pid < 2)) { panic("Signal %d (code %d) at %#lx sent to %s!", si_signo, si_code & 0xffff, address, - is_idle_task(tsk) ? "the idle task" : "init"); + tsk->pid ? "init" : "the idle task"); } info.si_signo = si_signo; @@ -515,7 +515,7 @@ static int handle_page_fault(struct pt_regs *regs, if (unlikely(tsk->pid < 2)) { panic("Kernel page fault running %s!", - is_idle_task(tsk) ? "the idle task" : "init"); + tsk->pid ? "init" : "the idle task"); } /* diff --git a/trunk/arch/um/kernel/process.c b/trunk/arch/um/kernel/process.c index 69f24905abdc..c5338351aecd 100644 --- a/trunk/arch/um/kernel/process.c +++ b/trunk/arch/um/kernel/process.c @@ -246,12 +246,10 @@ void default_idle(void) if (need_resched()) schedule(); - tick_nohz_idle_enter(); - rcu_idle_enter(); + tick_nohz_stop_sched_tick(1); nsecs = disable_timer(); idle_sleep(nsecs); - rcu_idle_exit(); - tick_nohz_idle_exit(); + tick_nohz_restart_sched_tick(); } } diff --git a/trunk/arch/um/kernel/time.c b/trunk/arch/um/kernel/time.c index 82a6e22f1f35..a08d9fab81f2 100644 --- a/trunk/arch/um/kernel/time.c +++ b/trunk/arch/um/kernel/time.c @@ -75,6 +75,8 @@ static struct clocksource itimer_clocksource = { .rating = 300, .read = itimer_read, .mask = CLOCKSOURCE_MASK(64), + .mult = 1000, + .shift = 0, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; @@ -92,9 +94,9 @@ static void __init setup_itimer(void) clockevent_delta2ns(60 * HZ, &itimer_clockevent); itimer_clockevent.min_delta_ns = clockevent_delta2ns(1, &itimer_clockevent); - err = clocksource_register_hz(&itimer_clocksource, USEC_PER_SEC); + err = clocksource_register(&itimer_clocksource); if (err) { - printk(KERN_ERR "clocksource_register_hz returned %d\n", err); + printk(KERN_ERR "clocksource_register returned %d\n", err); return; } clockevents_register_device(&itimer_clockevent); diff --git a/trunk/arch/unicore32/kernel/process.c b/trunk/arch/unicore32/kernel/process.c index 52edc2b62873..ba401df971ed 100644 --- a/trunk/arch/unicore32/kernel/process.c +++ b/trunk/arch/unicore32/kernel/process.c @@ -55,8 +55,7 @@ void cpu_idle(void) { /* endless idle loop with no priority at all */ while (1) { - tick_nohz_idle_enter(); - rcu_idle_enter(); + tick_nohz_stop_sched_tick(1); while (!need_resched()) { local_irq_disable(); stop_critical_timings(); @@ -64,8 +63,7 @@ void cpu_idle(void) local_irq_enable(); start_critical_timings(); } - rcu_idle_exit(); - tick_nohz_idle_exit(); + tick_nohz_restart_sched_tick(); preempt_enable_no_resched(); schedule(); preempt_disable(); diff --git a/trunk/arch/unicore32/kernel/setup.c b/trunk/arch/unicore32/kernel/setup.c index 673d7a89d8ff..471b6bca8da4 100644 --- a/trunk/arch/unicore32/kernel/setup.c +++ b/trunk/arch/unicore32/kernel/setup.c @@ -37,7 +37,6 @@ #include #include #include -#include #include "setup.h" diff --git a/trunk/arch/unicore32/mm/init.c b/trunk/arch/unicore32/mm/init.c index de186bde8975..3b379cddbc64 100644 --- a/trunk/arch/unicore32/mm/init.c +++ b/trunk/arch/unicore32/mm/init.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include "mm.h" @@ -246,6 +245,7 @@ void __init uc32_memblock_init(struct meminfo *mi) sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); + memblock_init(); for (i = 0; i < mi->nr_banks; i++) memblock_add(mi->bank[i].start, mi->bank[i].size); @@ -264,7 +264,7 @@ void __init uc32_memblock_init(struct meminfo *mi) uc32_mm_memblock_reserve(); - memblock_allow_resize(); + memblock_analyze(); memblock_dump_all(); } diff --git a/trunk/arch/unicore32/mm/mmu.c b/trunk/arch/unicore32/mm/mmu.c index 43c20b40e444..3e5c3e5a0b45 100644 --- a/trunk/arch/unicore32/mm/mmu.c +++ b/trunk/arch/unicore32/mm/mmu.c @@ -25,7 +25,6 @@ #include #include #include -#include #include diff --git a/trunk/arch/x86/Kconfig b/trunk/arch/x86/Kconfig index 5731eb70e0a0..efb42949cc09 100644 --- a/trunk/arch/x86/Kconfig +++ b/trunk/arch/x86/Kconfig @@ -26,8 +26,6 @@ config X86 select HAVE_IOREMAP_PROT select HAVE_KPROBES select HAVE_MEMBLOCK - select HAVE_MEMBLOCK_NODE_MAP - select ARCH_DISCARD_MEMBLOCK select ARCH_WANT_OPTIONAL_GPIOLIB select ARCH_WANT_FRAME_POINTERS select HAVE_DMA_ATTRS @@ -206,6 +204,9 @@ config ZONE_DMA32 bool default X86_64 +config ARCH_POPULATES_NODE_MAP + def_bool y + config AUDIT_ARCH bool default X86_64 @@ -342,7 +343,6 @@ config X86_EXTENDED_PLATFORM If you enable this option then you'll be able to select support for the following (non-PC) 64 bit x86 platforms: - Numascale NumaChip ScaleMP vSMP SGI Ultraviolet @@ -351,18 +351,6 @@ config X86_EXTENDED_PLATFORM endif # This is an alphabetically sorted list of 64 bit extended platforms # Please maintain the alphabetic order if and when there are additions -config X86_NUMACHIP - bool "Numascale NumaChip" - depends on X86_64 - depends on X86_EXTENDED_PLATFORM - depends on NUMA - depends on SMP - depends on X86_X2APIC - depends on !EDAC_AMD64 - ---help--- - Adds support for Numascale NumaChip large-SMP systems. Needed to - enable more than ~168 cores. - If you don't have one of these, you should say N here. config X86_VSMP bool "ScaleMP vSMP" diff --git a/trunk/arch/x86/ia32/ia32entry.S b/trunk/arch/x86/ia32/ia32entry.S index 3e274564f6bf..a6253ec1b284 100644 --- a/trunk/arch/x86/ia32/ia32entry.S +++ b/trunk/arch/x86/ia32/ia32entry.S @@ -134,7 +134,7 @@ ENTRY(ia32_sysenter_target) CFI_REL_OFFSET rsp,0 pushfq_cfi /*CFI_REL_OFFSET rflags,0*/ - movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d + movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d CFI_REGISTER rip,r10 pushq_cfi $__USER32_CS /*CFI_REL_OFFSET cs,0*/ @@ -150,8 +150,9 @@ ENTRY(ia32_sysenter_target) .section __ex_table,"a" .quad 1b,ia32_badarg .previous - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) + GET_THREAD_INFO(%r10) + orl $TS_COMPAT,TI_status(%r10) + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) CFI_REMEMBER_STATE jnz sysenter_tracesys cmpq $(IA32_NR_syscalls-1),%rax @@ -161,12 +162,13 @@ sysenter_do_call: sysenter_dispatch: call *ia32_sys_call_table(,%rax,8) movq %rax,RAX-ARGOFFSET(%rsp) + GET_THREAD_INFO(%r10) DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF - testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) + testl $_TIF_ALLWORK_MASK,TI_flags(%r10) jnz sysexit_audit sysexit_from_sys_call: - andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) + andl $~TS_COMPAT,TI_status(%r10) /* clear IF, that popfq doesn't enable interrupts early */ andl $~0x200,EFLAGS-R11(%rsp) movl RIP-R11(%rsp),%edx /* User %eip */ @@ -203,7 +205,7 @@ sysexit_from_sys_call: .endm .macro auditsys_exit exit - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10) jnz ia32_ret_from_sys_call TRACE_IRQS_ON sti @@ -213,11 +215,12 @@ sysexit_from_sys_call: movzbl %al,%edi /* zero-extend that into %edi */ inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */ call audit_syscall_exit + GET_THREAD_INFO(%r10) movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */ movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi cli TRACE_IRQS_OFF - testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) + testl %edi,TI_flags(%r10) jz \exit CLEAR_RREGS -ARGOFFSET jmp int_with_check @@ -235,7 +238,7 @@ sysexit_audit: sysenter_tracesys: #ifdef CONFIG_AUDITSYSCALL - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10) jz sysenter_auditsys #endif SAVE_REST @@ -306,8 +309,9 @@ ENTRY(ia32_cstar_target) .section __ex_table,"a" .quad 1b,ia32_badarg .previous - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) + GET_THREAD_INFO(%r10) + orl $TS_COMPAT,TI_status(%r10) + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) CFI_REMEMBER_STATE jnz cstar_tracesys cmpq $IA32_NR_syscalls-1,%rax @@ -317,12 +321,13 @@ cstar_do_call: cstar_dispatch: call *ia32_sys_call_table(,%rax,8) movq %rax,RAX-ARGOFFSET(%rsp) + GET_THREAD_INFO(%r10) DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF - testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) + testl $_TIF_ALLWORK_MASK,TI_flags(%r10) jnz sysretl_audit sysretl_from_sys_call: - andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) + andl $~TS_COMPAT,TI_status(%r10) RESTORE_ARGS 0,-ARG_SKIP,0,0,0 movl RIP-ARGOFFSET(%rsp),%ecx CFI_REGISTER rip,rcx @@ -350,7 +355,7 @@ sysretl_audit: cstar_tracesys: #ifdef CONFIG_AUDITSYSCALL - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10) jz cstar_auditsys #endif xchgl %r9d,%ebp @@ -415,8 +420,9 @@ ENTRY(ia32_syscall) /* note the registers are not zero extended to the sf. this could be a problem. */ SAVE_ARGS 0,1,0 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) + GET_THREAD_INFO(%r10) + orl $TS_COMPAT,TI_status(%r10) + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) jnz ia32_tracesys cmpq $(IA32_NR_syscalls-1),%rax ja ia32_badsys @@ -453,8 +459,8 @@ quiet_ni_syscall: CFI_ENDPROC .macro PTREGSCALL label, func, arg - ALIGN -GLOBAL(\label) + .globl \label +\label: leaq \func(%rip),%rax leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */ jmp ia32_ptregs_common @@ -471,8 +477,7 @@ GLOBAL(\label) PTREGSCALL stub32_vfork, sys_vfork, %rdi PTREGSCALL stub32_iopl, sys_iopl, %rsi - ALIGN -ia32_ptregs_common: +ENTRY(ia32_ptregs_common) popq %r11 CFI_ENDPROC CFI_STARTPROC32 simple diff --git a/trunk/arch/x86/include/asm/alternative-asm.h b/trunk/arch/x86/include/asm/alternative-asm.h index 952bd0100c5c..091508b533b4 100644 --- a/trunk/arch/x86/include/asm/alternative-asm.h +++ b/trunk/arch/x86/include/asm/alternative-asm.h @@ -4,10 +4,10 @@ #ifdef CONFIG_SMP .macro LOCK_PREFIX -672: lock +1: lock .section .smp_locks,"a" .balign 4 - .long 672b - . + .long 1b - . .previous .endm #else diff --git a/trunk/arch/x86/include/asm/apic.h b/trunk/arch/x86/include/asm/apic.h index 3ab9bdd87e79..1a6c09af048f 100644 --- a/trunk/arch/x86/include/asm/apic.h +++ b/trunk/arch/x86/include/asm/apic.h @@ -176,7 +176,6 @@ static inline u64 native_x2apic_icr_read(void) } extern int x2apic_phys; -extern int x2apic_preenabled; extern void check_x2apic(void); extern void enable_x2apic(void); extern void x2apic_icr_write(u32 low, u32 id); @@ -199,9 +198,6 @@ static inline void x2apic_force_phys(void) x2apic_phys = 1; } #else -static inline void disable_x2apic(void) -{ -} static inline void check_x2apic(void) { } @@ -216,7 +212,6 @@ static inline void x2apic_force_phys(void) { } -#define nox2apic 0 #define x2apic_preenabled 0 #define x2apic_supported() 0 #endif @@ -415,7 +410,6 @@ extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip); #endif #ifdef CONFIG_X86_LOCAL_APIC - static inline u32 apic_read(u32 reg) { return apic->read(reg); diff --git a/trunk/arch/x86/include/asm/apic_flat_64.h b/trunk/arch/x86/include/asm/apic_flat_64.h deleted file mode 100644 index a2d312796440..000000000000 --- a/trunk/arch/x86/include/asm/apic_flat_64.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef _ASM_X86_APIC_FLAT_64_H -#define _ASM_X86_APIC_FLAT_64_H - -extern void flat_init_apic_ldr(void); - -#endif - diff --git a/trunk/arch/x86/include/asm/apicdef.h b/trunk/arch/x86/include/asm/apicdef.h index 134bba00df09..3925d8007864 100644 --- a/trunk/arch/x86/include/asm/apicdef.h +++ b/trunk/arch/x86/include/asm/apicdef.h @@ -144,7 +144,6 @@ #define APIC_BASE (fix_to_virt(FIX_APIC_BASE)) #define APIC_BASE_MSR 0x800 -#define XAPIC_ENABLE (1UL << 11) #define X2APIC_ENABLE (1UL << 10) #ifdef CONFIG_X86_32 diff --git a/trunk/arch/x86/include/asm/bitops.h b/trunk/arch/x86/include/asm/bitops.h index b97596e2b68c..1775d6e5920e 100644 --- a/trunk/arch/x86/include/asm/bitops.h +++ b/trunk/arch/x86/include/asm/bitops.h @@ -380,8 +380,6 @@ static inline unsigned long __fls(unsigned long word) return word; } -#undef ADDR - #ifdef __KERNEL__ /** * ffs - find first set bit in word @@ -397,25 +395,10 @@ static inline unsigned long __fls(unsigned long word) static inline int ffs(int x) { int r; - -#ifdef CONFIG_X86_64 - /* - * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the - * dest reg is undefined if x==0, but their CPU architect says its - * value is written to set it to the same as before, except that the - * top 32 bits will be cleared. - * - * We cannot do this on 32 bits because at the very least some - * 486 CPUs did not behave this way. - */ - long tmp = -1; - asm("bsfl %1,%0" - : "=r" (r) - : "rm" (x), "0" (tmp)); -#elif defined(CONFIG_X86_CMOV) +#ifdef CONFIG_X86_CMOV asm("bsfl %1,%0\n\t" "cmovzl %2,%0" - : "=&r" (r) : "rm" (x), "r" (-1)); + : "=r" (r) : "rm" (x), "r" (-1)); #else asm("bsfl %1,%0\n\t" "jnz 1f\n\t" @@ -439,22 +422,7 @@ static inline int ffs(int x) static inline int fls(int x) { int r; - -#ifdef CONFIG_X86_64 - /* - * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the - * dest reg is undefined if x==0, but their CPU architect says its - * value is written to set it to the same as before, except that the - * top 32 bits will be cleared. - * - * We cannot do this on 32 bits because at the very least some - * 486 CPUs did not behave this way. - */ - long tmp = -1; - asm("bsrl %1,%0" - : "=r" (r) - : "rm" (x), "0" (tmp)); -#elif defined(CONFIG_X86_CMOV) +#ifdef CONFIG_X86_CMOV asm("bsrl %1,%0\n\t" "cmovzl %2,%0" : "=&r" (r) : "rm" (x), "rm" (-1)); @@ -466,35 +434,11 @@ static inline int fls(int x) #endif return r + 1; } +#endif /* __KERNEL__ */ -/** - * fls64 - find last set bit in a 64-bit word - * @x: the word to search - * - * This is defined in a similar way as the libc and compiler builtin - * ffsll, but returns the position of the most significant set bit. - * - * fls64(value) returns 0 if value is 0 or the position of the last - * set bit if value is nonzero. The last (most significant) bit is - * at position 64. - */ -#ifdef CONFIG_X86_64 -static __always_inline int fls64(__u64 x) -{ - long bitpos = -1; - /* - * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the - * dest reg is undefined if x==0, but their CPU architect says its - * value is written to set it to the same as before. - */ - asm("bsrq %1,%0" - : "+r" (bitpos) - : "rm" (x)); - return bitpos + 1; -} -#else -#include -#endif +#undef ADDR + +#ifdef __KERNEL__ #include @@ -506,6 +450,12 @@ static __always_inline int fls64(__u64 x) #include +#endif /* __KERNEL__ */ + +#include + +#ifdef __KERNEL__ + #include #include diff --git a/trunk/arch/x86/include/asm/cmpxchg.h b/trunk/arch/x86/include/asm/cmpxchg.h index 0c9fa2745f13..5d3acdf5a7a6 100644 --- a/trunk/arch/x86/include/asm/cmpxchg.h +++ b/trunk/arch/x86/include/asm/cmpxchg.h @@ -14,8 +14,6 @@ extern void __cmpxchg_wrong_size(void) __compiletime_error("Bad argument size for cmpxchg"); extern void __xadd_wrong_size(void) __compiletime_error("Bad argument size for xadd"); -extern void __add_wrong_size(void) - __compiletime_error("Bad argument size for add"); /* * Constants for operation sizes. On 32-bit, the 64-bit size it set to @@ -33,47 +31,60 @@ extern void __add_wrong_size(void) #define __X86_CASE_Q -1 /* sizeof will never return -1 */ #endif -/* - * An exchange-type operation, which takes a value and a pointer, and - * returns a the old value. - */ -#define __xchg_op(ptr, arg, op, lock) \ - ({ \ - __typeof__ (*(ptr)) __ret = (arg); \ - switch (sizeof(*(ptr))) { \ - case __X86_CASE_B: \ - asm volatile (lock #op "b %b0, %1\n" \ - : "+r" (__ret), "+m" (*(ptr)) \ - : : "memory", "cc"); \ - break; \ - case __X86_CASE_W: \ - asm volatile (lock #op "w %w0, %1\n" \ - : "+r" (__ret), "+m" (*(ptr)) \ - : : "memory", "cc"); \ - break; \ - case __X86_CASE_L: \ - asm volatile (lock #op "l %0, %1\n" \ - : "+r" (__ret), "+m" (*(ptr)) \ - : : "memory", "cc"); \ - break; \ - case __X86_CASE_Q: \ - asm volatile (lock #op "q %q0, %1\n" \ - : "+r" (__ret), "+m" (*(ptr)) \ - : : "memory", "cc"); \ - break; \ - default: \ - __ ## op ## _wrong_size(); \ - } \ - __ret; \ - }) - /* * Note: no "lock" prefix even on SMP: xchg always implies lock anyway. * Since this is generally used to protect other memory information, we * use "asm volatile" and "memory" clobbers to prevent gcc from moving * information around. */ -#define xchg(ptr, v) __xchg_op((ptr), (v), xchg, "") +#define __xchg(x, ptr, size) \ +({ \ + __typeof(*(ptr)) __x = (x); \ + switch (size) { \ + case __X86_CASE_B: \ + { \ + volatile u8 *__ptr = (volatile u8 *)(ptr); \ + asm volatile("xchgb %0,%1" \ + : "=q" (__x), "+m" (*__ptr) \ + : "0" (__x) \ + : "memory"); \ + break; \ + } \ + case __X86_CASE_W: \ + { \ + volatile u16 *__ptr = (volatile u16 *)(ptr); \ + asm volatile("xchgw %0,%1" \ + : "=r" (__x), "+m" (*__ptr) \ + : "0" (__x) \ + : "memory"); \ + break; \ + } \ + case __X86_CASE_L: \ + { \ + volatile u32 *__ptr = (volatile u32 *)(ptr); \ + asm volatile("xchgl %0,%1" \ + : "=r" (__x), "+m" (*__ptr) \ + : "0" (__x) \ + : "memory"); \ + break; \ + } \ + case __X86_CASE_Q: \ + { \ + volatile u64 *__ptr = (volatile u64 *)(ptr); \ + asm volatile("xchgq %0,%1" \ + : "=r" (__x), "+m" (*__ptr) \ + : "0" (__x) \ + : "memory"); \ + break; \ + } \ + default: \ + __xchg_wrong_size(); \ + } \ + __x; \ +}) + +#define xchg(ptr, v) \ + __xchg((v), (ptr), sizeof(*ptr)) /* * Atomic compare and exchange. Compare OLD with MEM, if identical, @@ -154,80 +165,46 @@ extern void __add_wrong_size(void) __cmpxchg_local((ptr), (old), (new), sizeof(*ptr)) #endif -/* - * xadd() adds "inc" to "*ptr" and atomically returns the previous - * value of "*ptr". - * - * xadd() is locked when multiple CPUs are online - * xadd_sync() is always locked - * xadd_local() is never locked - */ -#define __xadd(ptr, inc, lock) __xchg_op((ptr), (inc), xadd, lock) -#define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX) -#define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ") -#define xadd_local(ptr, inc) __xadd((ptr), (inc), "") - -#define __add(ptr, inc, lock) \ +#define __xadd(ptr, inc, lock) \ ({ \ __typeof__ (*(ptr)) __ret = (inc); \ switch (sizeof(*(ptr))) { \ case __X86_CASE_B: \ - asm volatile (lock "addb %b1, %0\n" \ - : "+m" (*(ptr)) : "ri" (inc) \ - : "memory", "cc"); \ + asm volatile (lock "xaddb %b0, %1\n" \ + : "+r" (__ret), "+m" (*(ptr)) \ + : : "memory", "cc"); \ break; \ case __X86_CASE_W: \ - asm volatile (lock "addw %w1, %0\n" \ - : "+m" (*(ptr)) : "ri" (inc) \ - : "memory", "cc"); \ + asm volatile (lock "xaddw %w0, %1\n" \ + : "+r" (__ret), "+m" (*(ptr)) \ + : : "memory", "cc"); \ break; \ case __X86_CASE_L: \ - asm volatile (lock "addl %1, %0\n" \ - : "+m" (*(ptr)) : "ri" (inc) \ - : "memory", "cc"); \ + asm volatile (lock "xaddl %0, %1\n" \ + : "+r" (__ret), "+m" (*(ptr)) \ + : : "memory", "cc"); \ break; \ case __X86_CASE_Q: \ - asm volatile (lock "addq %1, %0\n" \ - : "+m" (*(ptr)) : "ri" (inc) \ - : "memory", "cc"); \ + asm volatile (lock "xaddq %q0, %1\n" \ + : "+r" (__ret), "+m" (*(ptr)) \ + : : "memory", "cc"); \ break; \ default: \ - __add_wrong_size(); \ + __xadd_wrong_size(); \ } \ __ret; \ }) /* - * add_*() adds "inc" to "*ptr" + * xadd() adds "inc" to "*ptr" and atomically returns the previous + * value of "*ptr". * - * __add() takes a lock prefix - * add_smp() is locked when multiple CPUs are online - * add_sync() is always locked + * xadd() is locked when multiple CPUs are online + * xadd_sync() is always locked + * xadd_local() is never locked */ -#define add_smp(ptr, inc) __add((ptr), (inc), LOCK_PREFIX) -#define add_sync(ptr, inc) __add((ptr), (inc), "lock; ") - -#define __cmpxchg_double(pfx, p1, p2, o1, o2, n1, n2) \ -({ \ - bool __ret; \ - __typeof__(*(p1)) __old1 = (o1), __new1 = (n1); \ - __typeof__(*(p2)) __old2 = (o2), __new2 = (n2); \ - BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \ - BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \ - VM_BUG_ON((unsigned long)(p1) % (2 * sizeof(long))); \ - VM_BUG_ON((unsigned long)((p1) + 1) != (unsigned long)(p2)); \ - asm volatile(pfx "cmpxchg%c4b %2; sete %0" \ - : "=a" (__ret), "+d" (__old2), \ - "+m" (*(p1)), "+m" (*(p2)) \ - : "i" (2 * sizeof(long)), "a" (__old1), \ - "b" (__new1), "c" (__new2)); \ - __ret; \ -}) - -#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \ - __cmpxchg_double(LOCK_PREFIX, p1, p2, o1, o2, n1, n2) - -#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \ - __cmpxchg_double(, p1, p2, o1, o2, n1, n2) +#define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX) +#define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ") +#define xadd_local(ptr, inc) __xadd((ptr), (inc), "") #endif /* ASM_X86_CMPXCHG_H */ diff --git a/trunk/arch/x86/include/asm/cmpxchg_32.h b/trunk/arch/x86/include/asm/cmpxchg_32.h index 53f4b219336b..fbebb07dd80b 100644 --- a/trunk/arch/x86/include/asm/cmpxchg_32.h +++ b/trunk/arch/x86/include/asm/cmpxchg_32.h @@ -166,6 +166,52 @@ static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, #endif +#define cmpxchg8b(ptr, o1, o2, n1, n2) \ +({ \ + char __ret; \ + __typeof__(o2) __dummy; \ + __typeof__(*(ptr)) __old1 = (o1); \ + __typeof__(o2) __old2 = (o2); \ + __typeof__(*(ptr)) __new1 = (n1); \ + __typeof__(o2) __new2 = (n2); \ + asm volatile(LOCK_PREFIX "cmpxchg8b %2; setz %1" \ + : "=d"(__dummy), "=a" (__ret), "+m" (*ptr)\ + : "a" (__old1), "d"(__old2), \ + "b" (__new1), "c" (__new2) \ + : "memory"); \ + __ret; }) + + +#define cmpxchg8b_local(ptr, o1, o2, n1, n2) \ +({ \ + char __ret; \ + __typeof__(o2) __dummy; \ + __typeof__(*(ptr)) __old1 = (o1); \ + __typeof__(o2) __old2 = (o2); \ + __typeof__(*(ptr)) __new1 = (n1); \ + __typeof__(o2) __new2 = (n2); \ + asm volatile("cmpxchg8b %2; setz %1" \ + : "=d"(__dummy), "=a"(__ret), "+m" (*ptr)\ + : "a" (__old), "d"(__old2), \ + "b" (__new1), "c" (__new2), \ + : "memory"); \ + __ret; }) + + +#define cmpxchg_double(ptr, o1, o2, n1, n2) \ +({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 4); \ + VM_BUG_ON((unsigned long)(ptr) % 8); \ + cmpxchg8b((ptr), (o1), (o2), (n1), (n2)); \ +}) + +#define cmpxchg_double_local(ptr, o1, o2, n1, n2) \ +({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 4); \ + VM_BUG_ON((unsigned long)(ptr) % 8); \ + cmpxchg16b_local((ptr), (o1), (o2), (n1), (n2)); \ +}) + #define system_has_cmpxchg_double() cpu_has_cx8 #endif /* _ASM_X86_CMPXCHG_32_H */ diff --git a/trunk/arch/x86/include/asm/cmpxchg_64.h b/trunk/arch/x86/include/asm/cmpxchg_64.h index 614be87f1a9b..285da02c38fa 100644 --- a/trunk/arch/x86/include/asm/cmpxchg_64.h +++ b/trunk/arch/x86/include/asm/cmpxchg_64.h @@ -20,6 +20,49 @@ static inline void set_64bit(volatile u64 *ptr, u64 val) cmpxchg_local((ptr), (o), (n)); \ }) +#define cmpxchg16b(ptr, o1, o2, n1, n2) \ +({ \ + char __ret; \ + __typeof__(o2) __junk; \ + __typeof__(*(ptr)) __old1 = (o1); \ + __typeof__(o2) __old2 = (o2); \ + __typeof__(*(ptr)) __new1 = (n1); \ + __typeof__(o2) __new2 = (n2); \ + asm volatile(LOCK_PREFIX "cmpxchg16b %2;setz %1" \ + : "=d"(__junk), "=a"(__ret), "+m" (*ptr) \ + : "b"(__new1), "c"(__new2), \ + "a"(__old1), "d"(__old2)); \ + __ret; }) + + +#define cmpxchg16b_local(ptr, o1, o2, n1, n2) \ +({ \ + char __ret; \ + __typeof__(o2) __junk; \ + __typeof__(*(ptr)) __old1 = (o1); \ + __typeof__(o2) __old2 = (o2); \ + __typeof__(*(ptr)) __new1 = (n1); \ + __typeof__(o2) __new2 = (n2); \ + asm volatile("cmpxchg16b %2;setz %1" \ + : "=d"(__junk), "=a"(__ret), "+m" (*ptr) \ + : "b"(__new1), "c"(__new2), \ + "a"(__old1), "d"(__old2)); \ + __ret; }) + +#define cmpxchg_double(ptr, o1, o2, n1, n2) \ +({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + VM_BUG_ON((unsigned long)(ptr) % 16); \ + cmpxchg16b((ptr), (o1), (o2), (n1), (n2)); \ +}) + +#define cmpxchg_double_local(ptr, o1, o2, n1, n2) \ +({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + VM_BUG_ON((unsigned long)(ptr) % 16); \ + cmpxchg16b_local((ptr), (o1), (o2), (n1), (n2)); \ +}) + #define system_has_cmpxchg_double() cpu_has_cx16 #endif /* _ASM_X86_CMPXCHG_64_H */ diff --git a/trunk/arch/x86/include/asm/div64.h b/trunk/arch/x86/include/asm/div64.h index ced283ac79df..9a2d644c08ef 100644 --- a/trunk/arch/x86/include/asm/div64.h +++ b/trunk/arch/x86/include/asm/div64.h @@ -4,7 +4,6 @@ #ifdef CONFIG_X86_32 #include -#include /* * do_div() is NOT a C function. It wants to return @@ -22,20 +21,15 @@ ({ \ unsigned long __upper, __low, __high, __mod, __base; \ __base = (base); \ - if (__builtin_constant_p(__base) && is_power_of_2(__base)) { \ - __mod = n & (__base - 1); \ - n >>= ilog2(__base); \ - } else { \ - asm("" : "=a" (__low), "=d" (__high) : "A" (n));\ - __upper = __high; \ - if (__high) { \ - __upper = __high % (__base); \ - __high = __high / (__base); \ - } \ - asm("divl %2" : "=a" (__low), "=d" (__mod) \ - : "rm" (__base), "0" (__low), "1" (__upper)); \ - asm("" : "=A" (n) : "a" (__low), "d" (__high)); \ + asm("":"=a" (__low), "=d" (__high) : "A" (n)); \ + __upper = __high; \ + if (__high) { \ + __upper = __high % (__base); \ + __high = __high / (__base); \ } \ + asm("divl %2":"=a" (__low), "=d" (__mod) \ + : "rm" (__base), "0" (__low), "1" (__upper)); \ + asm("":"=A" (n) : "a" (__low), "d" (__high)); \ __mod; \ }) diff --git a/trunk/arch/x86/include/asm/e820.h b/trunk/arch/x86/include/asm/e820.h index 37782566af24..908b96957d88 100644 --- a/trunk/arch/x86/include/asm/e820.h +++ b/trunk/arch/x86/include/asm/e820.h @@ -117,7 +117,7 @@ static inline void early_memtest(unsigned long start, unsigned long end) extern unsigned long e820_end_of_ram_pfn(void); extern unsigned long e820_end_of_low_ram_pfn(void); -extern u64 early_reserve_e820(u64 sizet, u64 align); +extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align); void memblock_x86_fill(void); void memblock_find_dma_reserve(void); diff --git a/trunk/arch/x86/include/asm/hardirq.h b/trunk/arch/x86/include/asm/hardirq.h index da0b3ca815b7..55e4de613f0e 100644 --- a/trunk/arch/x86/include/asm/hardirq.h +++ b/trunk/arch/x86/include/asm/hardirq.h @@ -11,7 +11,6 @@ typedef struct { #ifdef CONFIG_X86_LOCAL_APIC unsigned int apic_timer_irqs; /* arch dependent */ unsigned int irq_spurious_count; - unsigned int icr_read_retry_count; #endif unsigned int x86_platform_ipis; /* arch dependent */ unsigned int apic_perf_irqs; diff --git a/trunk/arch/x86/include/asm/i387.h b/trunk/arch/x86/include/asm/i387.h index 6919e936345b..c9e09ea05644 100644 --- a/trunk/arch/x86/include/asm/i387.h +++ b/trunk/arch/x86/include/asm/i387.h @@ -218,7 +218,7 @@ static inline void fpu_fxsave(struct fpu *fpu) #ifdef CONFIG_SMP #define safe_address (__per_cpu_offset[0]) #else -#define safe_address (__get_cpu_var(kernel_cpustat).cpustat[CPUTIME_USER]) +#define safe_address (kstat_cpu(0).cpustat.user) #endif /* diff --git a/trunk/arch/x86/include/asm/insn.h b/trunk/arch/x86/include/asm/insn.h index 74df3f1eddfd..88c765e16410 100644 --- a/trunk/arch/x86/include/asm/insn.h +++ b/trunk/arch/x86/include/asm/insn.h @@ -137,13 +137,6 @@ static inline int insn_is_avx(struct insn *insn) return (insn->vex_prefix.value != 0); } -/* Ensure this instruction is decoded completely */ -static inline int insn_complete(struct insn *insn) -{ - return insn->opcode.got && insn->modrm.got && insn->sib.got && - insn->displacement.got && insn->immediate.got; -} - static inline insn_byte_t insn_vex_m_bits(struct insn *insn) { if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */ diff --git a/trunk/arch/x86/include/asm/mach_timer.h b/trunk/arch/x86/include/asm/mach_timer.h index 88d0c3c74c13..853728519ae9 100644 --- a/trunk/arch/x86/include/asm/mach_timer.h +++ b/trunk/arch/x86/include/asm/mach_timer.h @@ -15,7 +15,7 @@ #define CALIBRATE_TIME_MSEC 30 /* 30 msecs */ #define CALIBRATE_LATCH \ - ((PIT_TICK_RATE * CALIBRATE_TIME_MSEC + 1000/2)/1000) + ((CLOCK_TICK_RATE * CALIBRATE_TIME_MSEC + 1000/2)/1000) static inline void mach_prepare_counter(void) { diff --git a/trunk/arch/x86/include/asm/mc146818rtc.h b/trunk/arch/x86/include/asm/mc146818rtc.h index 0e8e85bb7c51..01fdf5674e24 100644 --- a/trunk/arch/x86/include/asm/mc146818rtc.h +++ b/trunk/arch/x86/include/asm/mc146818rtc.h @@ -81,8 +81,8 @@ static inline unsigned char current_lock_cmos_reg(void) #else #define lock_cmos_prefix(reg) do {} while (0) #define lock_cmos_suffix(reg) do {} while (0) -#define lock_cmos(reg) do { } while (0) -#define unlock_cmos() do { } while (0) +#define lock_cmos(reg) +#define unlock_cmos() #define do_i_have_lock_cmos() 0 #define current_lock_cmos_reg() 0 #endif diff --git a/trunk/arch/x86/include/asm/mce.h b/trunk/arch/x86/include/asm/mce.h index 6add827381c9..0e8ae57d3656 100644 --- a/trunk/arch/x86/include/asm/mce.h +++ b/trunk/arch/x86/include/asm/mce.h @@ -50,11 +50,10 @@ #define MCJ_CTX_MASK 3 #define MCJ_CTX(flags) ((flags) & MCJ_CTX_MASK) #define MCJ_CTX_RANDOM 0 /* inject context: random */ -#define MCJ_CTX_PROCESS 0x1 /* inject context: process */ -#define MCJ_CTX_IRQ 0x2 /* inject context: IRQ */ -#define MCJ_NMI_BROADCAST 0x4 /* do NMI broadcasting */ -#define MCJ_EXCEPTION 0x8 /* raise as exception */ -#define MCJ_IRQ_BRAODCAST 0x10 /* do IRQ broadcasting */ +#define MCJ_CTX_PROCESS 1 /* inject context: process */ +#define MCJ_CTX_IRQ 2 /* inject context: IRQ */ +#define MCJ_NMI_BROADCAST 4 /* do NMI broadcasting */ +#define MCJ_EXCEPTION 8 /* raise as exception */ /* Fields are zero when not available */ struct mce { @@ -121,8 +120,7 @@ struct mce_log { #ifdef __KERNEL__ -extern void mce_register_decode_chain(struct notifier_block *nb); -extern void mce_unregister_decode_chain(struct notifier_block *nb); +extern struct atomic_notifier_head x86_mce_decoder_chain; #include #include diff --git a/trunk/arch/x86/include/asm/memblock.h b/trunk/arch/x86/include/asm/memblock.h new file mode 100644 index 000000000000..0cd3800f33b9 --- /dev/null +++ b/trunk/arch/x86/include/asm/memblock.h @@ -0,0 +1,23 @@ +#ifndef _X86_MEMBLOCK_H +#define _X86_MEMBLOCK_H + +#define ARCH_DISCARD_MEMBLOCK + +u64 memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align); + +void memblock_x86_reserve_range(u64 start, u64 end, char *name); +void memblock_x86_free_range(u64 start, u64 end); +struct range; +int __get_free_all_memory_range(struct range **range, int nodeid, + unsigned long start_pfn, unsigned long end_pfn); +int get_free_all_memory_range(struct range **rangep, int nodeid); + +void memblock_x86_register_active_regions(int nid, unsigned long start_pfn, + unsigned long last_pfn); +u64 memblock_x86_hole_size(u64 start, u64 end); +u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align); +u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit); +u64 memblock_x86_memory_in_range(u64 addr, u64 limit); +bool memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align); + +#endif diff --git a/trunk/arch/x86/include/asm/microcode.h b/trunk/arch/x86/include/asm/microcode.h index 4ebe157bf73d..24215072d0e1 100644 --- a/trunk/arch/x86/include/asm/microcode.h +++ b/trunk/arch/x86/include/asm/microcode.h @@ -48,7 +48,6 @@ static inline struct microcode_ops * __init init_intel_microcode(void) #ifdef CONFIG_MICROCODE_AMD extern struct microcode_ops * __init init_amd_microcode(void); -extern void __exit exit_amd_microcode(void); static inline void get_ucode_data(void *to, const u8 *from, size_t n) { @@ -60,7 +59,6 @@ static inline struct microcode_ops * __init init_amd_microcode(void) { return NULL; } -static inline void __exit exit_amd_microcode(void) {} #endif #endif /* _ASM_X86_MICROCODE_H */ diff --git a/trunk/arch/x86/include/asm/numachip/numachip_csr.h b/trunk/arch/x86/include/asm/numachip/numachip_csr.h deleted file mode 100644 index 660f843df928..000000000000 --- a/trunk/arch/x86/include/asm/numachip/numachip_csr.h +++ /dev/null @@ -1,167 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Numascale NumaConnect-Specific Header file - * - * Copyright (C) 2011 Numascale AS. All rights reserved. - * - * Send feedback to - * - */ - -#ifndef _ASM_X86_NUMACHIP_NUMACHIP_CSR_H -#define _ASM_X86_NUMACHIP_NUMACHIP_CSR_H - -#include -#include -#include -#include -#include -#include - -#define CSR_NODE_SHIFT 16 -#define CSR_NODE_BITS(p) (((unsigned long)(p)) << CSR_NODE_SHIFT) -#define CSR_NODE_MASK 0x0fff /* 4K nodes */ - -/* 32K CSR space, b15 indicates geo/non-geo */ -#define CSR_OFFSET_MASK 0x7fffUL - -/* Global CSR space covers all 4K possible nodes with 64K CSR space per node */ -#define NUMACHIP_GCSR_BASE 0x3fff00000000ULL -#define NUMACHIP_GCSR_LIM 0x3fff0fffffffULL -#define NUMACHIP_GCSR_SIZE (NUMACHIP_GCSR_LIM - NUMACHIP_GCSR_BASE + 1) - -/* - * Local CSR space starts in global CSR space with "nodeid" = 0xfff0, however - * when using the direct mapping on x86_64, both start and size needs to be - * aligned with PMD_SIZE which is 2M - */ -#define NUMACHIP_LCSR_BASE 0x3ffffe000000ULL -#define NUMACHIP_LCSR_LIM 0x3fffffffffffULL -#define NUMACHIP_LCSR_SIZE (NUMACHIP_LCSR_LIM - NUMACHIP_LCSR_BASE + 1) - -static inline void *gcsr_address(int node, unsigned long offset) -{ - return __va(NUMACHIP_GCSR_BASE | (1UL << 15) | - CSR_NODE_BITS(node & CSR_NODE_MASK) | (offset & CSR_OFFSET_MASK)); -} - -static inline void *lcsr_address(unsigned long offset) -{ - return __va(NUMACHIP_LCSR_BASE | (1UL << 15) | - CSR_NODE_BITS(0xfff0) | (offset & CSR_OFFSET_MASK)); -} - -static inline unsigned int read_gcsr(int node, unsigned long offset) -{ - return swab32(readl(gcsr_address(node, offset))); -} - -static inline void write_gcsr(int node, unsigned long offset, unsigned int val) -{ - writel(swab32(val), gcsr_address(node, offset)); -} - -static inline unsigned int read_lcsr(unsigned long offset) -{ - return swab32(readl(lcsr_address(offset))); -} - -static inline void write_lcsr(unsigned long offset, unsigned int val) -{ - writel(swab32(val), lcsr_address(offset)); -} - -/* ========================================================================= */ -/* CSR_G0_STATE_CLEAR */ -/* ========================================================================= */ - -#define CSR_G0_STATE_CLEAR (0x000 + (0 << 12)) -union numachip_csr_g0_state_clear { - unsigned int v; - struct numachip_csr_g0_state_clear_s { - unsigned int _state:2; - unsigned int _rsvd_2_6:5; - unsigned int _lost:1; - unsigned int _rsvd_8_31:24; - } s; -}; - -/* ========================================================================= */ -/* CSR_G0_NODE_IDS */ -/* ========================================================================= */ - -#define CSR_G0_NODE_IDS (0x008 + (0 << 12)) -union numachip_csr_g0_node_ids { - unsigned int v; - struct numachip_csr_g0_node_ids_s { - unsigned int _initialid:16; - unsigned int _nodeid:12; - unsigned int _rsvd_28_31:4; - } s; -}; - -/* ========================================================================= */ -/* CSR_G3_EXT_IRQ_GEN */ -/* ========================================================================= */ - -#define CSR_G3_EXT_IRQ_GEN (0x030 + (3 << 12)) -union numachip_csr_g3_ext_irq_gen { - unsigned int v; - struct numachip_csr_g3_ext_irq_gen_s { - unsigned int _vector:8; - unsigned int _msgtype:3; - unsigned int _index:5; - unsigned int _destination_apic_id:16; - } s; -}; - -/* ========================================================================= */ -/* CSR_G3_EXT_IRQ_STATUS */ -/* ========================================================================= */ - -#define CSR_G3_EXT_IRQ_STATUS (0x034 + (3 << 12)) -union numachip_csr_g3_ext_irq_status { - unsigned int v; - struct numachip_csr_g3_ext_irq_status_s { - unsigned int _result:32; - } s; -}; - -/* ========================================================================= */ -/* CSR_G3_EXT_IRQ_DEST */ -/* ========================================================================= */ - -#define CSR_G3_EXT_IRQ_DEST (0x038 + (3 << 12)) -union numachip_csr_g3_ext_irq_dest { - unsigned int v; - struct numachip_csr_g3_ext_irq_dest_s { - unsigned int _irq:8; - unsigned int _rsvd_8_31:24; - } s; -}; - -/* ========================================================================= */ -/* CSR_G3_NC_ATT_MAP_SELECT */ -/* ========================================================================= */ - -#define CSR_G3_NC_ATT_MAP_SELECT (0x7fc + (3 << 12)) -union numachip_csr_g3_nc_att_map_select { - unsigned int v; - struct numachip_csr_g3_nc_att_map_select_s { - unsigned int _upper_address_bits:4; - unsigned int _select_ram:4; - unsigned int _rsvd_8_31:24; - } s; -}; - -/* ========================================================================= */ -/* CSR_G3_NC_ATT_MAP_SELECT_0-255 */ -/* ========================================================================= */ - -#define CSR_G3_NC_ATT_MAP_SELECT_0 (0x800 + (3 << 12)) - -#endif /* _ASM_X86_NUMACHIP_NUMACHIP_CSR_H */ - diff --git a/trunk/arch/x86/include/asm/percpu.h b/trunk/arch/x86/include/asm/percpu.h index 529bf07e8067..3470c9d0ebba 100644 --- a/trunk/arch/x86/include/asm/percpu.h +++ b/trunk/arch/x86/include/asm/percpu.h @@ -451,20 +451,23 @@ do { \ #endif /* !CONFIG_M386 */ #ifdef CONFIG_X86_CMPXCHG64 -#define percpu_cmpxchg8b_double(pcp1, pcp2, o1, o2, n1, n2) \ +#define percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2) \ ({ \ - bool __ret; \ - typeof(pcp1) __o1 = (o1), __n1 = (n1); \ - typeof(pcp2) __o2 = (o2), __n2 = (n2); \ + char __ret; \ + typeof(o1) __o1 = o1; \ + typeof(o1) __n1 = n1; \ + typeof(o2) __o2 = o2; \ + typeof(o2) __n2 = n2; \ + typeof(o2) __dummy = n2; \ asm volatile("cmpxchg8b "__percpu_arg(1)"\n\tsetz %0\n\t" \ - : "=a" (__ret), "+m" (pcp1), "+m" (pcp2), "+d" (__o2) \ - : "b" (__n1), "c" (__n2), "a" (__o1)); \ + : "=a"(__ret), "=m" (pcp1), "=d"(__dummy) \ + : "b"(__n1), "c"(__n2), "a"(__o1), "d"(__o2)); \ __ret; \ }) -#define __this_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double -#define this_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double -#define irqsafe_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double +#define __this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2) +#define this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2) +#define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2) #endif /* CONFIG_X86_CMPXCHG64 */ /* @@ -505,23 +508,31 @@ do { \ * it in software. The address used in the cmpxchg16 instruction must be * aligned to a 16 byte boundary. */ -#define percpu_cmpxchg16b_double(pcp1, pcp2, o1, o2, n1, n2) \ +#ifdef CONFIG_SMP +#define CMPXCHG16B_EMU_CALL "call this_cpu_cmpxchg16b_emu\n\t" ASM_NOP3 +#else +#define CMPXCHG16B_EMU_CALL "call this_cpu_cmpxchg16b_emu\n\t" ASM_NOP2 +#endif +#define percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) \ ({ \ - bool __ret; \ - typeof(pcp1) __o1 = (o1), __n1 = (n1); \ - typeof(pcp2) __o2 = (o2), __n2 = (n2); \ - alternative_io("leaq %P1,%%rsi\n\tcall this_cpu_cmpxchg16b_emu\n\t", \ - "cmpxchg16b " __percpu_arg(1) "\n\tsetz %0\n\t", \ + char __ret; \ + typeof(o1) __o1 = o1; \ + typeof(o1) __n1 = n1; \ + typeof(o2) __o2 = o2; \ + typeof(o2) __n2 = n2; \ + typeof(o2) __dummy; \ + alternative_io(CMPXCHG16B_EMU_CALL, \ + "cmpxchg16b " __percpu_prefix "(%%rsi)\n\tsetz %0\n\t", \ X86_FEATURE_CX16, \ - ASM_OUTPUT2("=a" (__ret), "+m" (pcp1), \ - "+m" (pcp2), "+d" (__o2)), \ - "b" (__n1), "c" (__n2), "a" (__o1) : "rsi"); \ + ASM_OUTPUT2("=a"(__ret), "=d"(__dummy)), \ + "S" (&pcp1), "b"(__n1), "c"(__n2), \ + "a"(__o1), "d"(__o2) : "memory"); \ __ret; \ }) -#define __this_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double -#define this_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double -#define irqsafe_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double +#define __this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) +#define this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) +#define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) #endif diff --git a/trunk/arch/x86/include/asm/perf_event.h b/trunk/arch/x86/include/asm/perf_event.h index 096c975e099f..f61c62f7d5d8 100644 --- a/trunk/arch/x86/include/asm/perf_event.h +++ b/trunk/arch/x86/include/asm/perf_event.h @@ -57,7 +57,6 @@ (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) #define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6 -#define ARCH_PERFMON_EVENTS_COUNT 7 /* * Intel "Architectural Performance Monitoring" CPUID @@ -73,19 +72,6 @@ union cpuid10_eax { unsigned int full; }; -union cpuid10_ebx { - struct { - unsigned int no_unhalted_core_cycles:1; - unsigned int no_instructions_retired:1; - unsigned int no_unhalted_reference_cycles:1; - unsigned int no_llc_reference:1; - unsigned int no_llc_misses:1; - unsigned int no_branch_instruction_retired:1; - unsigned int no_branch_misses_retired:1; - } split; - unsigned int full; -}; - union cpuid10_edx { struct { unsigned int num_counters_fixed:5; @@ -95,15 +81,6 @@ union cpuid10_edx { unsigned int full; }; -struct x86_pmu_capability { - int version; - int num_counters_gp; - int num_counters_fixed; - int bit_width_gp; - int bit_width_fixed; - unsigned int events_mask; - int events_mask_len; -}; /* * Fixed-purpose performance events: @@ -112,24 +89,23 @@ struct x86_pmu_capability { /* * All 3 fixed-mode PMCs are configured via this single MSR: */ -#define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d +#define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d /* * The counts are available in three separate MSRs: */ /* Instr_Retired.Any: */ -#define MSR_ARCH_PERFMON_FIXED_CTR0 0x309 -#define X86_PMC_IDX_FIXED_INSTRUCTIONS (X86_PMC_IDX_FIXED + 0) +#define MSR_ARCH_PERFMON_FIXED_CTR0 0x309 +#define X86_PMC_IDX_FIXED_INSTRUCTIONS (X86_PMC_IDX_FIXED + 0) /* CPU_CLK_Unhalted.Core: */ -#define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a -#define X86_PMC_IDX_FIXED_CPU_CYCLES (X86_PMC_IDX_FIXED + 1) +#define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a +#define X86_PMC_IDX_FIXED_CPU_CYCLES (X86_PMC_IDX_FIXED + 1) /* CPU_CLK_Unhalted.Ref: */ -#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b -#define X86_PMC_IDX_FIXED_REF_CYCLES (X86_PMC_IDX_FIXED + 2) -#define X86_PMC_MSK_FIXED_REF_CYCLES (1ULL << X86_PMC_IDX_FIXED_REF_CYCLES) +#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b +#define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2) /* * We model BTS tracing as another fixed-mode PMC. @@ -226,7 +202,6 @@ struct perf_guest_switch_msr { }; extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr); -extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap); #else static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr) { @@ -234,11 +209,6 @@ static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr) return NULL; } -static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap) -{ - memset(cap, 0, sizeof(*cap)); -} - static inline void perf_events_lapic_init(void) { } #endif diff --git a/trunk/arch/x86/include/asm/pgtable.h b/trunk/arch/x86/include/asm/pgtable.h index 49afb3f41eb6..18601c86fab1 100644 --- a/trunk/arch/x86/include/asm/pgtable.h +++ b/trunk/arch/x86/include/asm/pgtable.h @@ -703,7 +703,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, pte_update(mm, addr, ptep); } -#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0) +#define flush_tlb_fix_spurious_fault(vma, address) #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) diff --git a/trunk/arch/x86/include/asm/processor-flags.h b/trunk/arch/x86/include/asm/processor-flags.h index f8ab3eaad128..2dddb317bb39 100644 --- a/trunk/arch/x86/include/asm/processor-flags.h +++ b/trunk/arch/x86/include/asm/processor-flags.h @@ -6,7 +6,6 @@ * EFLAGS bits */ #define X86_EFLAGS_CF 0x00000001 /* Carry Flag */ -#define X86_EFLAGS_BIT1 0x00000002 /* Bit 1 - always on */ #define X86_EFLAGS_PF 0x00000004 /* Parity Flag */ #define X86_EFLAGS_AF 0x00000010 /* Auxiliary carry Flag */ #define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */ diff --git a/trunk/arch/x86/include/asm/processor.h b/trunk/arch/x86/include/asm/processor.h index aa9088c26931..b650435ffb53 100644 --- a/trunk/arch/x86/include/asm/processor.h +++ b/trunk/arch/x86/include/asm/processor.h @@ -99,6 +99,7 @@ struct cpuinfo_x86 { u16 apicid; u16 initial_apicid; u16 x86_clflush_size; +#ifdef CONFIG_SMP /* number of cores as seen by the OS: */ u16 booted_cores; /* Physical processor id: */ @@ -109,6 +110,7 @@ struct cpuinfo_x86 { u8 compute_unit_id; /* Index into per_cpu list: */ u16 cpu_index; +#endif u32 microcode; } __attribute__((__aligned__(SMP_CACHE_BYTES))); diff --git a/trunk/arch/x86/include/asm/spinlock.h b/trunk/arch/x86/include/asm/spinlock.h index a82c2bf504b6..972c260919a3 100644 --- a/trunk/arch/x86/include/asm/spinlock.h +++ b/trunk/arch/x86/include/asm/spinlock.h @@ -79,10 +79,23 @@ static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail; } +#if (NR_CPUS < 256) static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) { - __add(&lock->tickets.head, 1, UNLOCK_LOCK_PREFIX); + asm volatile(UNLOCK_LOCK_PREFIX "incb %0" + : "+m" (lock->head_tail) + : + : "memory", "cc"); } +#else +static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) +{ + asm volatile(UNLOCK_LOCK_PREFIX "incw %0" + : "+m" (lock->head_tail) + : + : "memory", "cc"); +} +#endif static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) { diff --git a/trunk/arch/x86/include/asm/thread_info.h b/trunk/arch/x86/include/asm/thread_info.h index 185b719ec61a..a1fe5c127b52 100644 --- a/trunk/arch/x86/include/asm/thread_info.h +++ b/trunk/arch/x86/include/asm/thread_info.h @@ -40,8 +40,7 @@ struct thread_info { */ __u8 supervisor_stack[0]; #endif - int sig_on_uaccess_error:1; - int uaccess_err:1; /* uaccess failed */ + int uaccess_err; }; #define INIT_THREAD_INFO(tsk) \ @@ -232,12 +231,6 @@ static inline struct thread_info *current_thread_info(void) movq PER_CPU_VAR(kernel_stack),reg ; \ subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg -/* - * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in - * a certain register (to be used in assembler memory operands). - */ -#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg) - #endif #endif /* !X86_32 */ diff --git a/trunk/arch/x86/include/asm/topology.h b/trunk/arch/x86/include/asm/topology.h index 800f77c60051..c00692476e9f 100644 --- a/trunk/arch/x86/include/asm/topology.h +++ b/trunk/arch/x86/include/asm/topology.h @@ -130,8 +130,10 @@ extern void setup_node_to_cpumask_map(void); .balance_interval = 1, \ } +#ifdef CONFIG_X86_64 extern int __node_distance(int, int); #define node_distance(a, b) __node_distance(a, b) +#endif #else /* !CONFIG_NUMA */ diff --git a/trunk/arch/x86/include/asm/tsc.h b/trunk/arch/x86/include/asm/tsc.h index 15d99153a96d..83e2efd181e2 100644 --- a/trunk/arch/x86/include/asm/tsc.h +++ b/trunk/arch/x86/include/asm/tsc.h @@ -51,8 +51,6 @@ extern int unsynchronized_tsc(void); extern int check_tsc_unstable(void); extern unsigned long native_calibrate_tsc(void); -extern int tsc_clocksource_reliable; - /* * Boot-time check whether the TSCs are synchronized across * all CPUs/cores: diff --git a/trunk/arch/x86/include/asm/uaccess.h b/trunk/arch/x86/include/asm/uaccess.h index 8be5f54d9360..36361bf6fdd1 100644 --- a/trunk/arch/x86/include/asm/uaccess.h +++ b/trunk/arch/x86/include/asm/uaccess.h @@ -462,7 +462,7 @@ struct __large_struct { unsigned long buf[100]; }; barrier(); #define uaccess_catch(err) \ - (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \ + (err) |= current_thread_info()->uaccess_err; \ current_thread_info()->uaccess_err = prev_err; \ } while (0) diff --git a/trunk/arch/x86/include/asm/x86_init.h b/trunk/arch/x86/include/asm/x86_init.h index 1ac860a09849..1971e652d24b 100644 --- a/trunk/arch/x86/include/asm/x86_init.h +++ b/trunk/arch/x86/include/asm/x86_init.h @@ -7,7 +7,6 @@ struct mpc_bus; struct mpc_cpu; struct mpc_table; -struct cpuinfo_x86; /** * struct x86_init_mpparse - platform specific mpparse ops @@ -148,7 +147,6 @@ struct x86_init_ops { */ struct x86_cpuinit_ops { void (*setup_percpu_clockev)(void); - void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node); }; /** @@ -188,6 +186,5 @@ extern struct x86_msi_ops x86_msi; extern void x86_init_noop(void); extern void x86_init_uint_noop(unsigned int unused); -extern void x86_default_fixup_cpu_id(struct cpuinfo_x86 *c, int node); #endif diff --git a/trunk/arch/x86/kernel/acpi/boot.c b/trunk/arch/x86/kernel/acpi/boot.c index ce664f33ea8e..4558f0d0822d 100644 --- a/trunk/arch/x86/kernel/acpi/boot.c +++ b/trunk/arch/x86/kernel/acpi/boot.c @@ -219,8 +219,6 @@ static int __init acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end) { struct acpi_madt_local_x2apic *processor = NULL; - int apic_id; - u8 enabled; processor = (struct acpi_madt_local_x2apic *)header; @@ -229,8 +227,6 @@ acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end) acpi_table_print_madt_entry(header); - apic_id = processor->local_apic_id; - enabled = processor->lapic_flags & ACPI_MADT_ENABLED; #ifdef CONFIG_X86_X2APIC /* * We need to register disabled CPU as well to permit @@ -239,10 +235,8 @@ acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end) * to not preallocating memory for all NR_CPUS * when we use CPU hotplug. */ - if (!cpu_has_x2apic && (apic_id >= 0xff) && enabled) - printk(KERN_WARNING PREFIX "x2apic entry ignored\n"); - else - acpi_register_lapic(apic_id, enabled); + acpi_register_lapic(processor->local_apic_id, /* APIC ID */ + processor->lapic_flags & ACPI_MADT_ENABLED); #else printk(KERN_WARNING PREFIX "x2apic entry ignored\n"); #endif diff --git a/trunk/arch/x86/kernel/amd_nb.c b/trunk/arch/x86/kernel/amd_nb.c index 013c1810ce72..4c39baa8facc 100644 --- a/trunk/arch/x86/kernel/amd_nb.c +++ b/trunk/arch/x86/kernel/amd_nb.c @@ -123,14 +123,16 @@ int amd_get_subcaches(int cpu) { struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link; unsigned int mask; - int cuid; + int cuid = 0; if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) return 0; pci_read_config_dword(link, 0x1d4, &mask); +#ifdef CONFIG_SMP cuid = cpu_data(cpu).compute_unit_id; +#endif return (mask >> (4 * cuid)) & 0xf; } @@ -139,7 +141,7 @@ int amd_set_subcaches(int cpu, int mask) static unsigned int reset, ban; struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu)); unsigned int reg; - int cuid; + int cuid = 0; if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf) return -EINVAL; @@ -157,7 +159,9 @@ int amd_set_subcaches(int cpu, int mask) pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000); } +#ifdef CONFIG_SMP cuid = cpu_data(cpu).compute_unit_id; +#endif mask <<= 4 * cuid; mask |= (0xf ^ (1 << cuid)) << 26; diff --git a/trunk/arch/x86/kernel/aperture_64.c b/trunk/arch/x86/kernel/aperture_64.c index 6e76c191a835..3d2661ca6542 100644 --- a/trunk/arch/x86/kernel/aperture_64.c +++ b/trunk/arch/x86/kernel/aperture_64.c @@ -88,13 +88,13 @@ static u32 __init allocate_aperture(void) */ addr = memblock_find_in_range(GART_MIN_ADDR, GART_MAX_ADDR, aper_size, aper_size); - if (!addr || addr + aper_size > GART_MAX_ADDR) { + if (addr == MEMBLOCK_ERROR || addr + aper_size > GART_MAX_ADDR) { printk(KERN_ERR "Cannot allocate aperture memory hole (%lx,%uK)\n", addr, aper_size>>10); return 0; } - memblock_reserve(addr, aper_size); + memblock_x86_reserve_range(addr, addr + aper_size, "aperture64"); /* * Kmemleak should not scan this block as it may not be mapped via the * kernel direct mapping. diff --git a/trunk/arch/x86/kernel/apic/Makefile b/trunk/arch/x86/kernel/apic/Makefile index 0ae0323b1f9c..767fd04f2843 100644 --- a/trunk/arch/x86/kernel/apic/Makefile +++ b/trunk/arch/x86/kernel/apic/Makefile @@ -10,7 +10,6 @@ obj-$(CONFIG_SMP) += ipi.o ifeq ($(CONFIG_X86_64),y) # APIC probe will depend on the listing order here -obj-$(CONFIG_X86_NUMACHIP) += apic_numachip.o obj-$(CONFIG_X86_UV) += x2apic_uv_x.o obj-$(CONFIG_X86_X2APIC) += x2apic_phys.o obj-$(CONFIG_X86_X2APIC) += x2apic_cluster.o diff --git a/trunk/arch/x86/kernel/apic/apic.c b/trunk/arch/x86/kernel/apic/apic.c index 2eec05b6d1b8..f98d84caf94c 100644 --- a/trunk/arch/x86/kernel/apic/apic.c +++ b/trunk/arch/x86/kernel/apic/apic.c @@ -146,26 +146,16 @@ __setup("apicpmtimer", setup_apicpmtimer); int x2apic_mode; #ifdef CONFIG_X86_X2APIC /* x2apic enabled before OS handover */ -int x2apic_preenabled; -static int x2apic_disabled; -static int nox2apic; +static int x2apic_preenabled; static __init int setup_nox2apic(char *str) { if (x2apic_enabled()) { - int apicid = native_apic_msr_read(APIC_ID); - - if (apicid >= 255) { - pr_warning("Apicid: %08x, cannot enforce nox2apic\n", - apicid); - return 0; - } - - pr_warning("x2apic already enabled. will disable it\n"); - } else - setup_clear_cpu_cap(X86_FEATURE_X2APIC); - - nox2apic = 1; + pr_warning("Bios already enabled x2apic, " + "can't enforce nox2apic"); + return 0; + } + setup_clear_cpu_cap(X86_FEATURE_X2APIC); return 0; } early_param("nox2apic", setup_nox2apic); @@ -260,7 +250,6 @@ u32 native_safe_apic_wait_icr_idle(void) send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY; if (!send_status) break; - inc_irq_stat(icr_read_retry_count); udelay(100); } while (timeout++ < 1000); @@ -887,8 +876,8 @@ void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs) * Besides, if we don't timer interrupts ignore the global * interrupt lock, which is the WrongThing (tm) to do. */ - irq_enter(); exit_idle(); + irq_enter(); local_apic_timer_interrupt(); irq_exit(); @@ -1442,45 +1431,6 @@ void __init bsp_end_local_APIC_setup(void) } #ifdef CONFIG_X86_X2APIC -/* - * Need to disable xapic and x2apic at the same time and then enable xapic mode - */ -static inline void __disable_x2apic(u64 msr) -{ - wrmsrl(MSR_IA32_APICBASE, - msr & ~(X2APIC_ENABLE | XAPIC_ENABLE)); - wrmsrl(MSR_IA32_APICBASE, msr & ~X2APIC_ENABLE); -} - -static __init void disable_x2apic(void) -{ - u64 msr; - - if (!cpu_has_x2apic) - return; - - rdmsrl(MSR_IA32_APICBASE, msr); - if (msr & X2APIC_ENABLE) { - u32 x2apic_id = read_apic_id(); - - if (x2apic_id >= 255) - panic("Cannot disable x2apic, id: %08x\n", x2apic_id); - - pr_info("Disabling x2apic\n"); - __disable_x2apic(msr); - - if (nox2apic) { - clear_cpu_cap(&cpu_data(0), X86_FEATURE_X2APIC); - setup_clear_cpu_cap(X86_FEATURE_X2APIC); - } - - x2apic_disabled = 1; - x2apic_mode = 0; - - register_lapic_address(mp_lapic_addr); - } -} - void check_x2apic(void) { if (x2apic_enabled()) { @@ -1491,20 +1441,15 @@ void check_x2apic(void) void enable_x2apic(void) { - u64 msr; - - rdmsrl(MSR_IA32_APICBASE, msr); - if (x2apic_disabled) { - __disable_x2apic(msr); - return; - } + int msr, msr2; if (!x2apic_mode) return; + rdmsr(MSR_IA32_APICBASE, msr, msr2); if (!(msr & X2APIC_ENABLE)) { printk_once(KERN_INFO "Enabling x2apic\n"); - wrmsrl(MSR_IA32_APICBASE, msr | X2APIC_ENABLE); + wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, msr2); } } #endif /* CONFIG_X86_X2APIC */ @@ -1541,34 +1486,25 @@ void __init enable_IR_x2apic(void) ret = save_ioapic_entries(); if (ret) { pr_info("Saving IO-APIC state failed: %d\n", ret); - return; + goto out; } local_irq_save(flags); legacy_pic->mask_all(); mask_ioapic_entries(); - if (x2apic_preenabled && nox2apic) - disable_x2apic(); - if (dmar_table_init_ret) ret = -1; else ret = enable_IR(); - if (!x2apic_supported()) - goto skip_x2apic; - if (ret < 0) { /* IR is required if there is APIC ID > 255 even when running * under KVM */ if (max_physical_apicid > 255 || - !hypervisor_x2apic_available()) { - if (x2apic_preenabled) - disable_x2apic(); - goto skip_x2apic; - } + !hypervisor_x2apic_available()) + goto nox2apic; /* * without IR all CPUs can be addressed by IOAPIC/MSI * only in physical mode @@ -1576,10 +1512,8 @@ void __init enable_IR_x2apic(void) x2apic_force_phys(); } - if (ret == IRQ_REMAP_XAPIC_MODE) { - pr_info("x2apic not enabled, IRQ remapping is in xapic mode\n"); - goto skip_x2apic; - } + if (ret == IRQ_REMAP_XAPIC_MODE) + goto nox2apic; x2apic_enabled = 1; @@ -1589,11 +1523,22 @@ void __init enable_IR_x2apic(void) pr_info("Enabled x2apic\n"); } -skip_x2apic: +nox2apic: if (ret < 0) /* IR enabling failed */ restore_ioapic_entries(); legacy_pic->restore_mask(); local_irq_restore(flags); + +out: + if (x2apic_enabled || !x2apic_supported()) + return; + + if (x2apic_preenabled) + panic("x2apic: enabled by BIOS but kernel init failed."); + else if (ret == IRQ_REMAP_XAPIC_MODE) + pr_info("x2apic not enabled, IRQ remapping is in xapic mode\n"); + else if (ret < 0) + pr_info("x2apic not enabled, IRQ remapping init failed\n"); } #ifdef CONFIG_X86_64 @@ -1864,8 +1809,8 @@ void smp_spurious_interrupt(struct pt_regs *regs) { u32 v; - irq_enter(); exit_idle(); + irq_enter(); /* * Check if this really is a spurious interrupt and ACK it * if it is a vectored one. Just in case... @@ -1901,8 +1846,8 @@ void smp_error_interrupt(struct pt_regs *regs) "Illegal register address", /* APIC Error Bit 7 */ }; - irq_enter(); exit_idle(); + irq_enter(); /* First tickle the hardware, only then report what went on. -- REW */ v0 = apic_read(APIC_ESR); apic_write(APIC_ESR, 0); diff --git a/trunk/arch/x86/kernel/apic/apic_flat_64.c b/trunk/arch/x86/kernel/apic/apic_flat_64.c index 8c3cdded6f2b..f7a41e4cae47 100644 --- a/trunk/arch/x86/kernel/apic/apic_flat_64.c +++ b/trunk/arch/x86/kernel/apic/apic_flat_64.c @@ -62,7 +62,7 @@ static void flat_vector_allocation_domain(int cpu, struct cpumask *retmask) * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel * document number 292116). So here it goes... */ -void flat_init_apic_ldr(void) +static void flat_init_apic_ldr(void) { unsigned long val; unsigned long num, id; @@ -171,14 +171,9 @@ static int flat_phys_pkg_id(int initial_apic_id, int index_msb) return initial_apic_id >> index_msb; } -static int flat_probe(void) -{ - return 1; -} - static struct apic apic_flat = { .name = "flat", - .probe = flat_probe, + .probe = NULL, .acpi_madt_oem_check = flat_acpi_madt_oem_check, .apic_id_registered = flat_apic_id_registered, diff --git a/trunk/arch/x86/kernel/apic/apic_numachip.c b/trunk/arch/x86/kernel/apic/apic_numachip.c deleted file mode 100644 index 09d3d8c1cd99..000000000000 --- a/trunk/arch/x86/kernel/apic/apic_numachip.c +++ /dev/null @@ -1,294 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Numascale NumaConnect-Specific APIC Code - * - * Copyright (C) 2011 Numascale AS. All rights reserved. - * - * Send feedback to - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -static int numachip_system __read_mostly; - -static struct apic apic_numachip __read_mostly; - -static unsigned int get_apic_id(unsigned long x) -{ - unsigned long value; - unsigned int id; - - rdmsrl(MSR_FAM10H_NODE_ID, value); - id = ((x >> 24) & 0xffU) | ((value << 2) & 0x3f00U); - - return id; -} - -static unsigned long set_apic_id(unsigned int id) -{ - unsigned long x; - - x = ((id & 0xffU) << 24); - return x; -} - -static unsigned int read_xapic_id(void) -{ - return get_apic_id(apic_read(APIC_ID)); -} - -static int numachip_apic_id_registered(void) -{ - return physid_isset(read_xapic_id(), phys_cpu_present_map); -} - -static int numachip_phys_pkg_id(int initial_apic_id, int index_msb) -{ - return initial_apic_id >> index_msb; -} - -static const struct cpumask *numachip_target_cpus(void) -{ - return cpu_online_mask; -} - -static void numachip_vector_allocation_domain(int cpu, struct cpumask *retmask) -{ - cpumask_clear(retmask); - cpumask_set_cpu(cpu, retmask); -} - -static int __cpuinit numachip_wakeup_secondary(int phys_apicid, unsigned long start_rip) -{ - union numachip_csr_g3_ext_irq_gen int_gen; - - int_gen.s._destination_apic_id = phys_apicid; - int_gen.s._vector = 0; - int_gen.s._msgtype = APIC_DM_INIT >> 8; - int_gen.s._index = 0; - - write_lcsr(CSR_G3_EXT_IRQ_GEN, int_gen.v); - - int_gen.s._msgtype = APIC_DM_STARTUP >> 8; - int_gen.s._vector = start_rip >> 12; - - write_lcsr(CSR_G3_EXT_IRQ_GEN, int_gen.v); - - atomic_set(&init_deasserted, 1); - return 0; -} - -static void numachip_send_IPI_one(int cpu, int vector) -{ - union numachip_csr_g3_ext_irq_gen int_gen; - int apicid = per_cpu(x86_cpu_to_apicid, cpu); - - int_gen.s._destination_apic_id = apicid; - int_gen.s._vector = vector; - int_gen.s._msgtype = (vector == NMI_VECTOR ? APIC_DM_NMI : APIC_DM_FIXED) >> 8; - int_gen.s._index = 0; - - write_lcsr(CSR_G3_EXT_IRQ_GEN, int_gen.v); -} - -static void numachip_send_IPI_mask(const struct cpumask *mask, int vector) -{ - unsigned int cpu; - - for_each_cpu(cpu, mask) - numachip_send_IPI_one(cpu, vector); -} - -static void numachip_send_IPI_mask_allbutself(const struct cpumask *mask, - int vector) -{ - unsigned int this_cpu = smp_processor_id(); - unsigned int cpu; - - for_each_cpu(cpu, mask) { - if (cpu != this_cpu) - numachip_send_IPI_one(cpu, vector); - } -} - -static void numachip_send_IPI_allbutself(int vector) -{ - unsigned int this_cpu = smp_processor_id(); - unsigned int cpu; - - for_each_online_cpu(cpu) { - if (cpu != this_cpu) - numachip_send_IPI_one(cpu, vector); - } -} - -static void numachip_send_IPI_all(int vector) -{ - numachip_send_IPI_mask(cpu_online_mask, vector); -} - -static void numachip_send_IPI_self(int vector) -{ - __default_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL); -} - -static unsigned int numachip_cpu_mask_to_apicid(const struct cpumask *cpumask) -{ - int cpu; - - /* - * We're using fixed IRQ delivery, can only return one phys APIC ID. - * May as well be the first. - */ - cpu = cpumask_first(cpumask); - if (likely((unsigned)cpu < nr_cpu_ids)) - return per_cpu(x86_cpu_to_apicid, cpu); - - return BAD_APICID; -} - -static unsigned int -numachip_cpu_mask_to_apicid_and(const struct cpumask *cpumask, - const struct cpumask *andmask) -{ - int cpu; - - /* - * We're using fixed IRQ delivery, can only return one phys APIC ID. - * May as well be the first. - */ - for_each_cpu_and(cpu, cpumask, andmask) { - if (cpumask_test_cpu(cpu, cpu_online_mask)) - break; - } - return per_cpu(x86_cpu_to_apicid, cpu); -} - -static int __init numachip_probe(void) -{ - return apic == &apic_numachip; -} - -static void __init map_csrs(void) -{ - printk(KERN_INFO "NumaChip: Mapping local CSR space (%016llx - %016llx)\n", - NUMACHIP_LCSR_BASE, NUMACHIP_LCSR_BASE + NUMACHIP_LCSR_SIZE - 1); - init_extra_mapping_uc(NUMACHIP_LCSR_BASE, NUMACHIP_LCSR_SIZE); - - printk(KERN_INFO "NumaChip: Mapping global CSR space (%016llx - %016llx)\n", - NUMACHIP_GCSR_BASE, NUMACHIP_GCSR_BASE + NUMACHIP_GCSR_SIZE - 1); - init_extra_mapping_uc(NUMACHIP_GCSR_BASE, NUMACHIP_GCSR_SIZE); -} - -static void fixup_cpu_id(struct cpuinfo_x86 *c, int node) -{ - c->phys_proc_id = node; - per_cpu(cpu_llc_id, smp_processor_id()) = node; -} - -static int __init numachip_system_init(void) -{ - unsigned int val; - - if (!numachip_system) - return 0; - - x86_cpuinit.fixup_cpu_id = fixup_cpu_id; - - map_csrs(); - - val = read_lcsr(CSR_G0_NODE_IDS); - printk(KERN_INFO "NumaChip: Local NodeID = %08x\n", val); - - return 0; -} -early_initcall(numachip_system_init); - -static int numachip_acpi_madt_oem_check(char *oem_id, char *oem_table_id) -{ - if (!strncmp(oem_id, "NUMASC", 6)) { - numachip_system = 1; - return 1; - } - - return 0; -} - -static struct apic apic_numachip __refconst = { - - .name = "NumaConnect system", - .probe = numachip_probe, - .acpi_madt_oem_check = numachip_acpi_madt_oem_check, - .apic_id_registered = numachip_apic_id_registered, - - .irq_delivery_mode = dest_Fixed, - .irq_dest_mode = 0, /* physical */ - - .target_cpus = numachip_target_cpus, - .disable_esr = 0, - .dest_logical = 0, - .check_apicid_used = NULL, - .check_apicid_present = NULL, - - .vector_allocation_domain = numachip_vector_allocation_domain, - .init_apic_ldr = flat_init_apic_ldr, - - .ioapic_phys_id_map = NULL, - .setup_apic_routing = NULL, - .multi_timer_check = NULL, - .cpu_present_to_apicid = default_cpu_present_to_apicid, - .apicid_to_cpu_present = NULL, - .setup_portio_remap = NULL, - .check_phys_apicid_present = default_check_phys_apicid_present, - .enable_apic_mode = NULL, - .phys_pkg_id = numachip_phys_pkg_id, - .mps_oem_check = NULL, - - .get_apic_id = get_apic_id, - .set_apic_id = set_apic_id, - .apic_id_mask = 0xffU << 24, - - .cpu_mask_to_apicid = numachip_cpu_mask_to_apicid, - .cpu_mask_to_apicid_and = numachip_cpu_mask_to_apicid_and, - - .send_IPI_mask = numachip_send_IPI_mask, - .send_IPI_mask_allbutself = numachip_send_IPI_mask_allbutself, - .send_IPI_allbutself = numachip_send_IPI_allbutself, - .send_IPI_all = numachip_send_IPI_all, - .send_IPI_self = numachip_send_IPI_self, - - .wakeup_secondary_cpu = numachip_wakeup_secondary, - .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, - .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, - .wait_for_init_deassert = NULL, - .smp_callin_clear_local_apic = NULL, - .inquire_remote_apic = NULL, /* REMRD not supported */ - - .read = native_apic_mem_read, - .write = native_apic_mem_write, - .icr_read = native_apic_icr_read, - .icr_write = native_apic_icr_write, - .wait_icr_idle = native_apic_wait_icr_idle, - .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, -}; -apic_driver(apic_numachip); - diff --git a/trunk/arch/x86/kernel/apic/io_apic.c b/trunk/arch/x86/kernel/apic/io_apic.c index fb072754bc1d..6d939d7847e2 100644 --- a/trunk/arch/x86/kernel/apic/io_apic.c +++ b/trunk/arch/x86/kernel/apic/io_apic.c @@ -2421,8 +2421,8 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) unsigned vector, me; ack_APIC_irq(); - irq_enter(); exit_idle(); + irq_enter(); me = smp_processor_id(); for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { @@ -2948,10 +2948,6 @@ static inline void __init check_timer(void) } local_irq_disable(); apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n"); - if (x2apic_preenabled) - apic_printk(APIC_QUIET, KERN_INFO - "Perhaps problem with the pre-enabled x2apic mode\n" - "Try booting with x2apic and interrupt-remapping disabled in the bios.\n"); panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a " "report. Then try booting with the 'noapic' option.\n"); out: diff --git a/trunk/arch/x86/kernel/check.c b/trunk/arch/x86/kernel/check.c index 5da1269e8ddc..452932d34730 100644 --- a/trunk/arch/x86/kernel/check.c +++ b/trunk/arch/x86/kernel/check.c @@ -62,8 +62,7 @@ early_param("memory_corruption_check_size", set_corruption_check_size); void __init setup_bios_corruption_check(void) { - phys_addr_t start, end; - u64 i; + u64 addr = PAGE_SIZE; /* assume first page is reserved anyway */ if (memory_corruption_check == -1) { memory_corruption_check = @@ -83,23 +82,28 @@ void __init setup_bios_corruption_check(void) corruption_check_size = round_up(corruption_check_size, PAGE_SIZE); - for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL) { - start = clamp_t(phys_addr_t, round_up(start, PAGE_SIZE), - PAGE_SIZE, corruption_check_size); - end = clamp_t(phys_addr_t, round_down(end, PAGE_SIZE), - PAGE_SIZE, corruption_check_size); - if (start >= end) - continue; + while (addr < corruption_check_size && num_scan_areas < MAX_SCAN_AREAS) { + u64 size; + addr = memblock_x86_find_in_range_size(addr, &size, PAGE_SIZE); - memblock_reserve(start, end - start); - scan_areas[num_scan_areas].addr = start; - scan_areas[num_scan_areas].size = end - start; + if (addr == MEMBLOCK_ERROR) + break; + + if (addr >= corruption_check_size) + break; + + if ((addr + size) > corruption_check_size) + size = corruption_check_size - addr; + + memblock_x86_reserve_range(addr, addr + size, "SCAN RAM"); + scan_areas[num_scan_areas].addr = addr; + scan_areas[num_scan_areas].size = size; + num_scan_areas++; /* Assume we've already mapped this early memory */ - memset(__va(start), 0, end - start); + memset(__va(addr), 0, size); - if (++num_scan_areas >= MAX_SCAN_AREAS) - break; + addr += size; } if (num_scan_areas) diff --git a/trunk/arch/x86/kernel/cpu/amd.c b/trunk/arch/x86/kernel/cpu/amd.c index f4773f4aae35..0bab2b18bb20 100644 --- a/trunk/arch/x86/kernel/cpu/amd.c +++ b/trunk/arch/x86/kernel/cpu/amd.c @@ -148,6 +148,7 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c) static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c) { +#ifdef CONFIG_SMP /* calling is from identify_secondary_cpu() ? */ if (!c->cpu_index) return; @@ -191,6 +192,7 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c) valid_k7: ; +#endif } static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) @@ -351,13 +353,6 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) if (node == NUMA_NO_NODE) node = per_cpu(cpu_llc_id, cpu); - /* - * If core numbers are inconsistent, it's likely a multi-fabric platform, - * so invoke platform-specific handler - */ - if (c->phys_proc_id != node) - x86_cpuinit.fixup_cpu_id(c, node); - if (!node_online(node)) { /* * Two possibilities here: diff --git a/trunk/arch/x86/kernel/cpu/centaur.c b/trunk/arch/x86/kernel/cpu/centaur.c index 159103c0b1f4..e58d978e0758 100644 --- a/trunk/arch/x86/kernel/cpu/centaur.c +++ b/trunk/arch/x86/kernel/cpu/centaur.c @@ -278,7 +278,7 @@ static void __cpuinit init_c3(struct cpuinfo_x86 *c) } #ifdef CONFIG_X86_32 /* Cyrix III family needs CX8 & PGE explicitly enabled. */ - if (c->x86_model >= 6 && c->x86_model <= 13) { + if (c->x86_model >= 6 && c->x86_model <= 9) { rdmsr(MSR_VIA_FCR, lo, hi); lo |= (1<<1 | 1<<7); wrmsr(MSR_VIA_FCR, lo, hi); diff --git a/trunk/arch/x86/kernel/cpu/common.c b/trunk/arch/x86/kernel/cpu/common.c index 850f2963a420..aa003b13a831 100644 --- a/trunk/arch/x86/kernel/cpu/common.c +++ b/trunk/arch/x86/kernel/cpu/common.c @@ -676,7 +676,9 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) if (this_cpu->c_early_init) this_cpu->c_early_init(c); +#ifdef CONFIG_SMP c->cpu_index = 0; +#endif filter_cpuid_features(c, false); setup_smep(c); @@ -762,7 +764,10 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c) c->apicid = c->initial_apicid; # endif #endif + +#ifdef CONFIG_X86_HT c->phys_proc_id = c->initial_apicid; +#endif } setup_smep(c); @@ -1135,15 +1140,6 @@ static void dbg_restore_debug_regs(void) #define dbg_restore_debug_regs() #endif /* ! CONFIG_KGDB */ -/* - * Prints an error where the NUMA and configured core-number mismatch and the - * platform didn't override this to fix it up - */ -void __cpuinit x86_default_fixup_cpu_id(struct cpuinfo_x86 *c, int node) -{ - pr_err("NUMA core number %d differs from configured core number %d\n", node, c->phys_proc_id); -} - /* * cpu_init() initializes state that is per-CPU. Some data is already * initialized (naturally) in the bootstrap process, such as the GDT diff --git a/trunk/arch/x86/kernel/cpu/cpu.h b/trunk/arch/x86/kernel/cpu/cpu.h index 8bacc7826fb3..1b22dcc51af4 100644 --- a/trunk/arch/x86/kernel/cpu/cpu.h +++ b/trunk/arch/x86/kernel/cpu/cpu.h @@ -1,4 +1,5 @@ #ifndef ARCH_X86_CPU_H + #define ARCH_X86_CPU_H struct cpu_model_info { @@ -34,4 +35,6 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[], extern void get_cpu_cap(struct cpuinfo_x86 *c); extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); -#endif /* ARCH_X86_CPU_H */ +extern void get_cpu_cap(struct cpuinfo_x86 *c); + +#endif diff --git a/trunk/arch/x86/kernel/cpu/intel.c b/trunk/arch/x86/kernel/cpu/intel.c index 3e6ff6cbf42a..523131213f08 100644 --- a/trunk/arch/x86/kernel/cpu/intel.c +++ b/trunk/arch/x86/kernel/cpu/intel.c @@ -181,6 +181,7 @@ static void __cpuinit trap_init_f00f_bug(void) static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c) { +#ifdef CONFIG_SMP /* calling is from identify_secondary_cpu() ? */ if (!c->cpu_index) return; @@ -197,6 +198,7 @@ static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c) WARN_ONCE(1, "WARNING: SMP operation may be unreliable" "with B stepping processors.\n"); } +#endif } static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) diff --git a/trunk/arch/x86/kernel/cpu/mcheck/mce-inject.c b/trunk/arch/x86/kernel/cpu/mcheck/mce-inject.c index fc4beb393577..319882ef848d 100644 --- a/trunk/arch/x86/kernel/cpu/mcheck/mce-inject.c +++ b/trunk/arch/x86/kernel/cpu/mcheck/mce-inject.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include #include @@ -93,18 +92,6 @@ static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs) return NMI_HANDLED; } -static void mce_irq_ipi(void *info) -{ - int cpu = smp_processor_id(); - struct mce *m = &__get_cpu_var(injectm); - - if (cpumask_test_cpu(cpu, mce_inject_cpumask) && - m->inject_flags & MCJ_EXCEPTION) { - cpumask_clear_cpu(cpu, mce_inject_cpumask); - raise_exception(m, NULL); - } -} - /* Inject mce on current CPU */ static int raise_local(void) { @@ -152,10 +139,9 @@ static void raise_mce(struct mce *m) return; #ifdef CONFIG_X86_LOCAL_APIC - if (m->inject_flags & (MCJ_IRQ_BRAODCAST | MCJ_NMI_BROADCAST)) { + if (m->inject_flags & MCJ_NMI_BROADCAST) { unsigned long start; int cpu; - get_online_cpus(); cpumask_copy(mce_inject_cpumask, cpu_online_mask); cpumask_clear_cpu(get_cpu(), mce_inject_cpumask); @@ -165,25 +151,13 @@ static void raise_mce(struct mce *m) MCJ_CTX(mcpu->inject_flags) != MCJ_CTX_RANDOM) cpumask_clear_cpu(cpu, mce_inject_cpumask); } - if (!cpumask_empty(mce_inject_cpumask)) { - if (m->inject_flags & MCJ_IRQ_BRAODCAST) { - /* - * don't wait because mce_irq_ipi is necessary - * to be sync with following raise_local - */ - preempt_disable(); - smp_call_function_many(mce_inject_cpumask, - mce_irq_ipi, NULL, 0); - preempt_enable(); - } else if (m->inject_flags & MCJ_NMI_BROADCAST) - apic->send_IPI_mask(mce_inject_cpumask, - NMI_VECTOR); - } + if (!cpumask_empty(mce_inject_cpumask)) + apic->send_IPI_mask(mce_inject_cpumask, NMI_VECTOR); start = jiffies; while (!cpumask_empty(mce_inject_cpumask)) { if (!time_before(jiffies, start + 2*HZ)) { printk(KERN_ERR - "Timeout waiting for mce inject %lx\n", + "Timeout waiting for mce inject NMI %lx\n", *cpumask_bits(mce_inject_cpumask)); break; } diff --git a/trunk/arch/x86/kernel/cpu/mcheck/mce.c b/trunk/arch/x86/kernel/cpu/mcheck/mce.c index cbe82b5918ce..2af127d4c3d1 100644 --- a/trunk/arch/x86/kernel/cpu/mcheck/mce.c +++ b/trunk/arch/x86/kernel/cpu/mcheck/mce.c @@ -95,6 +95,13 @@ static DECLARE_WAIT_QUEUE_HEAD(mce_chrdev_wait); static DEFINE_PER_CPU(struct mce, mces_seen); static int cpu_missing; +/* + * CPU/chipset specific EDAC code can register a notifier call here to print + * MCE errors in a human-readable form. + */ +ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain); +EXPORT_SYMBOL_GPL(x86_mce_decoder_chain); + /* MCA banks polled by the period polling timer for corrected events */ DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL @@ -102,12 +109,6 @@ DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { static DEFINE_PER_CPU(struct work_struct, mce_work); -/* - * CPU/chipset specific EDAC code can register a notifier call here to print - * MCE errors in a human-readable form. - */ -ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain); - /* Do initial initialization of a struct mce */ void mce_setup(struct mce *m) { @@ -118,7 +119,9 @@ void mce_setup(struct mce *m) m->time = get_seconds(); m->cpuvendor = boot_cpu_data.x86_vendor; m->cpuid = cpuid_eax(1); +#ifdef CONFIG_SMP m->socketid = cpu_data(m->extcpu).phys_proc_id; +#endif m->apicid = cpu_data(m->extcpu).initial_apicid; rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap); } @@ -187,57 +190,6 @@ void mce_log(struct mce *mce) set_bit(0, &mce_need_notify); } -static void drain_mcelog_buffer(void) -{ - unsigned int next, i, prev = 0; - - next = rcu_dereference_check_mce(mcelog.next); - - do { - struct mce *m; - - /* drain what was logged during boot */ - for (i = prev; i < next; i++) { - unsigned long start = jiffies; - unsigned retries = 1; - - m = &mcelog.entry[i]; - - while (!m->finished) { - if (time_after_eq(jiffies, start + 2*retries)) - retries++; - - cpu_relax(); - - if (!m->finished && retries >= 4) { - pr_err("MCE: skipping error being logged currently!\n"); - break; - } - } - smp_rmb(); - atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m); - } - - memset(mcelog.entry + prev, 0, (next - prev) * sizeof(*m)); - prev = next; - next = cmpxchg(&mcelog.next, prev, 0); - } while (next != prev); -} - - -void mce_register_decode_chain(struct notifier_block *nb) -{ - atomic_notifier_chain_register(&x86_mce_decoder_chain, nb); - drain_mcelog_buffer(); -} -EXPORT_SYMBOL_GPL(mce_register_decode_chain); - -void mce_unregister_decode_chain(struct notifier_block *nb) -{ - atomic_notifier_chain_unregister(&x86_mce_decoder_chain, nb); -} -EXPORT_SYMBOL_GPL(mce_unregister_decode_chain); - static void print_mce(struct mce *m) { int ret = 0; diff --git a/trunk/arch/x86/kernel/cpu/mcheck/mce_amd.c b/trunk/arch/x86/kernel/cpu/mcheck/mce_amd.c index 1d76872b6a45..f5474218cffe 100644 --- a/trunk/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/trunk/arch/x86/kernel/cpu/mcheck/mce_amd.c @@ -64,9 +64,11 @@ struct threshold_bank { }; static DEFINE_PER_CPU(struct threshold_bank * [NR_BANKS], threshold_banks); +#ifdef CONFIG_SMP static unsigned char shared_bank[NR_BANKS] = { 0, 0, 0, 0, 1 }; +#endif static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */ @@ -200,9 +202,10 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) if (!block) per_cpu(bank_map, cpu) |= (1 << bank); +#ifdef CONFIG_SMP if (shared_bank[bank] && c->cpu_core_id) break; - +#endif offset = setup_APIC_mce(offset, (high & MASK_LVTOFF_HI) >> 20); @@ -528,6 +531,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) sprintf(name, "threshold_bank%i", bank); +#ifdef CONFIG_SMP if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ i = cpumask_first(cpu_llc_shared_mask(cpu)); @@ -554,6 +558,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) goto out; } +#endif b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL); if (!b) { diff --git a/trunk/arch/x86/kernel/cpu/mcheck/therm_throt.c b/trunk/arch/x86/kernel/cpu/mcheck/therm_throt.c index 39c6089891e4..787e06c84ea6 100644 --- a/trunk/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/trunk/arch/x86/kernel/cpu/mcheck/therm_throt.c @@ -323,6 +323,17 @@ device_initcall(thermal_throttle_init_device); #endif /* CONFIG_SYSFS */ +/* + * Set up the most two significant bit to notify mce log that this thermal + * event type. + * This is a temp solution. May be changed in the future with mce log + * infrasture. + */ +#define CORE_THROTTLED (0) +#define CORE_POWER_LIMIT ((__u64)1 << 62) +#define PACKAGE_THROTTLED ((__u64)2 << 62) +#define PACKAGE_POWER_LIMIT ((__u64)3 << 62) + static void notify_thresholds(__u64 msr_val) { /* check whether the interrupt handler is defined; @@ -352,23 +363,27 @@ static void intel_thermal_interrupt(void) if (therm_throt_process(msr_val & THERM_STATUS_PROCHOT, THERMAL_THROTTLING_EVENT, CORE_LEVEL) != 0) - mce_log_therm_throt_event(msr_val); + mce_log_therm_throt_event(CORE_THROTTLED | msr_val); if (this_cpu_has(X86_FEATURE_PLN)) - therm_throt_process(msr_val & THERM_STATUS_POWER_LIMIT, + if (therm_throt_process(msr_val & THERM_STATUS_POWER_LIMIT, POWER_LIMIT_EVENT, - CORE_LEVEL); + CORE_LEVEL) != 0) + mce_log_therm_throt_event(CORE_POWER_LIMIT | msr_val); if (this_cpu_has(X86_FEATURE_PTS)) { rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val); - therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT, + if (therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT, THERMAL_THROTTLING_EVENT, - PACKAGE_LEVEL); + PACKAGE_LEVEL) != 0) + mce_log_therm_throt_event(PACKAGE_THROTTLED | msr_val); if (this_cpu_has(X86_FEATURE_PLN)) - therm_throt_process(msr_val & + if (therm_throt_process(msr_val & PACKAGE_THERM_STATUS_POWER_LIMIT, POWER_LIMIT_EVENT, - PACKAGE_LEVEL); + PACKAGE_LEVEL) != 0) + mce_log_therm_throt_event(PACKAGE_POWER_LIMIT + | msr_val); } } @@ -382,8 +397,8 @@ static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt; asmlinkage void smp_thermal_interrupt(struct pt_regs *regs) { - irq_enter(); exit_idle(); + irq_enter(); inc_irq_stat(irq_thermal_count); smp_thermal_vector(); irq_exit(); diff --git a/trunk/arch/x86/kernel/cpu/mcheck/threshold.c b/trunk/arch/x86/kernel/cpu/mcheck/threshold.c index aa578cadb940..d746df2909c9 100644 --- a/trunk/arch/x86/kernel/cpu/mcheck/threshold.c +++ b/trunk/arch/x86/kernel/cpu/mcheck/threshold.c @@ -19,8 +19,8 @@ void (*mce_threshold_vector)(void) = default_threshold_interrupt; asmlinkage void smp_threshold_interrupt(void) { - irq_enter(); exit_idle(); + irq_enter(); inc_irq_stat(irq_threshold_count); mce_threshold_vector(); irq_exit(); diff --git a/trunk/arch/x86/kernel/cpu/perf_event.c b/trunk/arch/x86/kernel/cpu/perf_event.c index 5adce1040b11..2bda212a0010 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event.c +++ b/trunk/arch/x86/kernel/cpu/perf_event.c @@ -484,195 +484,18 @@ static inline int is_x86_event(struct perf_event *event) return event->pmu == &pmu; } -/* - * Event scheduler state: - * - * Assign events iterating over all events and counters, beginning - * with events with least weights first. Keep the current iterator - * state in struct sched_state. - */ -struct sched_state { - int weight; - int event; /* event index */ - int counter; /* counter index */ - int unassigned; /* number of events to be assigned left */ - unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; -}; - -/* Total max is X86_PMC_IDX_MAX, but we are O(n!) limited */ -#define SCHED_STATES_MAX 2 - -struct perf_sched { - int max_weight; - int max_events; - struct event_constraint **constraints; - struct sched_state state; - int saved_states; - struct sched_state saved[SCHED_STATES_MAX]; -}; - -/* - * Initialize interator that runs through all events and counters. - */ -static void perf_sched_init(struct perf_sched *sched, struct event_constraint **c, - int num, int wmin, int wmax) -{ - int idx; - - memset(sched, 0, sizeof(*sched)); - sched->max_events = num; - sched->max_weight = wmax; - sched->constraints = c; - - for (idx = 0; idx < num; idx++) { - if (c[idx]->weight == wmin) - break; - } - - sched->state.event = idx; /* start with min weight */ - sched->state.weight = wmin; - sched->state.unassigned = num; -} - -static void perf_sched_save_state(struct perf_sched *sched) -{ - if (WARN_ON_ONCE(sched->saved_states >= SCHED_STATES_MAX)) - return; - - sched->saved[sched->saved_states] = sched->state; - sched->saved_states++; -} - -static bool perf_sched_restore_state(struct perf_sched *sched) -{ - if (!sched->saved_states) - return false; - - sched->saved_states--; - sched->state = sched->saved[sched->saved_states]; - - /* continue with next counter: */ - clear_bit(sched->state.counter++, sched->state.used); - - return true; -} - -/* - * Select a counter for the current event to schedule. Return true on - * success. - */ -static bool __perf_sched_find_counter(struct perf_sched *sched) -{ - struct event_constraint *c; - int idx; - - if (!sched->state.unassigned) - return false; - - if (sched->state.event >= sched->max_events) - return false; - - c = sched->constraints[sched->state.event]; - - /* Prefer fixed purpose counters */ - if (x86_pmu.num_counters_fixed) { - idx = X86_PMC_IDX_FIXED; - for_each_set_bit_cont(idx, c->idxmsk, X86_PMC_IDX_MAX) { - if (!__test_and_set_bit(idx, sched->state.used)) - goto done; - } - } - /* Grab the first unused counter starting with idx */ - idx = sched->state.counter; - for_each_set_bit_cont(idx, c->idxmsk, X86_PMC_IDX_FIXED) { - if (!__test_and_set_bit(idx, sched->state.used)) - goto done; - } - - return false; - -done: - sched->state.counter = idx; - - if (c->overlap) - perf_sched_save_state(sched); - - return true; -} - -static bool perf_sched_find_counter(struct perf_sched *sched) -{ - while (!__perf_sched_find_counter(sched)) { - if (!perf_sched_restore_state(sched)) - return false; - } - - return true; -} - -/* - * Go through all unassigned events and find the next one to schedule. - * Take events with the least weight first. Return true on success. - */ -static bool perf_sched_next_event(struct perf_sched *sched) -{ - struct event_constraint *c; - - if (!sched->state.unassigned || !--sched->state.unassigned) - return false; - - do { - /* next event */ - sched->state.event++; - if (sched->state.event >= sched->max_events) { - /* next weight */ - sched->state.event = 0; - sched->state.weight++; - if (sched->state.weight > sched->max_weight) - return false; - } - c = sched->constraints[sched->state.event]; - } while (c->weight != sched->state.weight); - - sched->state.counter = 0; /* start with first counter */ - - return true; -} - -/* - * Assign a counter for each event. - */ -static int perf_assign_events(struct event_constraint **constraints, int n, - int wmin, int wmax, int *assign) -{ - struct perf_sched sched; - - perf_sched_init(&sched, constraints, n, wmin, wmax); - - do { - if (!perf_sched_find_counter(&sched)) - break; /* failed */ - if (assign) - assign[sched.state.event] = sched.state.counter; - } while (perf_sched_next_event(&sched)); - - return sched.state.unassigned; -} - int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) { struct event_constraint *c, *constraints[X86_PMC_IDX_MAX]; unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; - int i, wmin, wmax, num = 0; + int i, j, w, wmax, num = 0; struct hw_perf_event *hwc; bitmap_zero(used_mask, X86_PMC_IDX_MAX); - for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) { + for (i = 0; i < n; i++) { c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]); constraints[i] = c; - wmin = min(wmin, c->weight); - wmax = max(wmax, c->weight); } /* @@ -698,11 +521,59 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) if (assign) assign[i] = hwc->idx; } + if (i == n) + goto done; + + /* + * begin slow path + */ + + bitmap_zero(used_mask, X86_PMC_IDX_MAX); - /* slow path */ - if (i != n) - num = perf_assign_events(constraints, n, wmin, wmax, assign); + /* + * weight = number of possible counters + * + * 1 = most constrained, only works on one counter + * wmax = least constrained, works on any counter + * + * assign events to counters starting with most + * constrained events. + */ + wmax = x86_pmu.num_counters; + /* + * when fixed event counters are present, + * wmax is incremented by 1 to account + * for one more choice + */ + if (x86_pmu.num_counters_fixed) + wmax++; + + for (w = 1, num = n; num && w <= wmax; w++) { + /* for each event */ + for (i = 0; num && i < n; i++) { + c = constraints[i]; + hwc = &cpuc->event_list[i]->hw; + + if (c->weight != w) + continue; + + for_each_set_bit(j, c->idxmsk, X86_PMC_IDX_MAX) { + if (!test_bit(j, used_mask)) + break; + } + + if (j == X86_PMC_IDX_MAX) + break; + + __set_bit(j, used_mask); + + if (assign) + assign[i] = j; + num--; + } + } +done: /* * scheduling failed or is just a simulation, * free resources if necessary @@ -1248,7 +1119,6 @@ static void __init pmu_check_apic(void) static int __init init_hw_perf_events(void) { - struct x86_pmu_quirk *quirk; struct event_constraint *c; int err; @@ -1277,8 +1147,8 @@ static int __init init_hw_perf_events(void) pr_cont("%s PMU driver.\n", x86_pmu.name); - for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next) - quirk->func(); + if (x86_pmu.quirks) + x86_pmu.quirks(); if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) { WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!", @@ -1301,18 +1171,12 @@ static int __init init_hw_perf_events(void) unconstrained = (struct event_constraint) __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, - 0, x86_pmu.num_counters, 0); + 0, x86_pmu.num_counters); if (x86_pmu.event_constraints) { - /* - * event on fixed counter2 (REF_CYCLES) only works on this - * counter, so do not extend mask to generic counters - */ for_each_event_constraint(c, x86_pmu.event_constraints) { - if (c->cmask != X86_RAW_EVENT_MASK - || c->idxmsk64 == X86_PMC_MSK_FIXED_REF_CYCLES) { + if (c->cmask != X86_RAW_EVENT_MASK) continue; - } c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1; c->weight += x86_pmu.num_counters; @@ -1702,15 +1566,3 @@ unsigned long perf_misc_flags(struct pt_regs *regs) return misc; } - -void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap) -{ - cap->version = x86_pmu.version; - cap->num_counters_gp = x86_pmu.num_counters; - cap->num_counters_fixed = x86_pmu.num_counters_fixed; - cap->bit_width_gp = x86_pmu.cntval_bits; - cap->bit_width_fixed = x86_pmu.cntval_bits; - cap->events_mask = (unsigned int)x86_pmu.events_maskl; - cap->events_mask_len = x86_pmu.events_mask_len; -} -EXPORT_SYMBOL_GPL(perf_get_x86_pmu_capability); diff --git a/trunk/arch/x86/kernel/cpu/perf_event.h b/trunk/arch/x86/kernel/cpu/perf_event.h index 8944062f46e2..b9698d40ac4b 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event.h +++ b/trunk/arch/x86/kernel/cpu/perf_event.h @@ -45,7 +45,6 @@ struct event_constraint { u64 code; u64 cmask; int weight; - int overlap; }; struct amd_nb { @@ -152,40 +151,15 @@ struct cpu_hw_events { void *kfree_on_online; }; -#define __EVENT_CONSTRAINT(c, n, m, w, o) {\ +#define __EVENT_CONSTRAINT(c, n, m, w) {\ { .idxmsk64 = (n) }, \ .code = (c), \ .cmask = (m), \ .weight = (w), \ - .overlap = (o), \ } #define EVENT_CONSTRAINT(c, n, m) \ - __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0) - -/* - * The overlap flag marks event constraints with overlapping counter - * masks. This is the case if the counter mask of such an event is not - * a subset of any other counter mask of a constraint with an equal or - * higher weight, e.g.: - * - * c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0); - * c_another1 = EVENT_CONSTRAINT(0, 0x07, 0); - * c_another2 = EVENT_CONSTRAINT(0, 0x38, 0); - * - * The event scheduler may not select the correct counter in the first - * cycle because it needs to know which subsequent events will be - * scheduled. It may fail to schedule the events then. So we set the - * overlap flag for such constraints to give the scheduler a hint which - * events to select for counter rescheduling. - * - * Care must be taken as the rescheduling algorithm is O(n!) which - * will increase scheduling cycles for an over-commited system - * dramatically. The number of such EVENT_CONSTRAINT_OVERLAP() macros - * and its counter masks must be kept at a minimum. - */ -#define EVENT_CONSTRAINT_OVERLAP(c, n, m) \ - __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1) + __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n)) /* * Constraint on the Event code. @@ -261,11 +235,6 @@ union perf_capabilities { u64 capabilities; }; -struct x86_pmu_quirk { - struct x86_pmu_quirk *next; - void (*func)(void); -}; - /* * struct x86_pmu - generic x86 pmu */ @@ -290,11 +259,6 @@ struct x86_pmu { int num_counters_fixed; int cntval_bits; u64 cntval_mask; - union { - unsigned long events_maskl; - unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)]; - }; - int events_mask_len; int apic; u64 max_period; struct event_constraint * @@ -304,7 +268,7 @@ struct x86_pmu { void (*put_event_constraints)(struct cpu_hw_events *cpuc, struct perf_event *event); struct event_constraint *event_constraints; - struct x86_pmu_quirk *quirks; + void (*quirks)(void); int perfctr_second_write; int (*cpu_prepare)(int cpu); @@ -345,15 +309,6 @@ struct x86_pmu { struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); }; -#define x86_add_quirk(func_) \ -do { \ - static struct x86_pmu_quirk __quirk __initdata = { \ - .func = func_, \ - }; \ - __quirk.next = x86_pmu.quirks; \ - x86_pmu.quirks = &__quirk; \ -} while (0) - #define ERF_NO_HT_SHARING 1 #define ERF_HAS_RSP_1 2 diff --git a/trunk/arch/x86/kernel/cpu/perf_event_amd.c b/trunk/arch/x86/kernel/cpu/perf_event_amd.c index 0397b23be8e9..aeefd45697a2 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event_amd.c +++ b/trunk/arch/x86/kernel/cpu/perf_event_amd.c @@ -492,7 +492,7 @@ static __initconst const struct x86_pmu amd_pmu = { static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0); static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0); static struct event_constraint amd_f15_PMC3 = EVENT_CONSTRAINT(0, 0x08, 0); -static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0); +static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT(0, 0x09, 0); static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0); static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0); diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel.c b/trunk/arch/x86/kernel/cpu/perf_event_intel.c index 3bd37bdf1b8e..8d601b18bf9f 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event_intel.c +++ b/trunk/arch/x86/kernel/cpu/perf_event_intel.c @@ -28,7 +28,6 @@ static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly = [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, - [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */ }; static struct event_constraint intel_core_event_constraints[] __read_mostly = @@ -46,7 +45,12 @@ static struct event_constraint intel_core2_event_constraints[] __read_mostly = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ - FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ + /* + * Core2 has Fixed Counter 2 listed as CPU_CLK_UNHALTED.REF and event + * 0x013c as CPU_CLK_UNHALTED.BUS and specifies there is a fixed + * ratio between these counters. + */ + /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ @@ -64,7 +68,7 @@ static struct event_constraint intel_nehalem_event_constraints[] __read_mostly = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ - FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ + /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */ INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */ INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */ @@ -86,7 +90,7 @@ static struct event_constraint intel_westmere_event_constraints[] __read_mostly { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ - FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ + /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */ INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */ @@ -98,7 +102,7 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ - FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ + /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */ INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ @@ -121,7 +125,7 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ - FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ + /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ EVENT_CONSTRAINT_END }; @@ -1165,7 +1169,7 @@ __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc, */ c = &unconstrained; } else if (intel_try_alt_er(event, orig_idx)) { - raw_spin_unlock_irqrestore(&era->lock, flags); + raw_spin_unlock(&era->lock); goto again; } raw_spin_unlock_irqrestore(&era->lock, flags); @@ -1515,7 +1519,7 @@ static __initconst const struct x86_pmu intel_pmu = { .guest_get_msrs = intel_guest_get_msrs, }; -static __init void intel_clovertown_quirk(void) +static void intel_clovertown_quirks(void) { /* * PEBS is unreliable due to: @@ -1541,60 +1545,19 @@ static __init void intel_clovertown_quirk(void) x86_pmu.pebs_constraints = NULL; } -static __init void intel_sandybridge_quirk(void) +static void intel_sandybridge_quirks(void) { printk(KERN_WARNING "PEBS disabled due to CPU errata.\n"); x86_pmu.pebs = 0; x86_pmu.pebs_constraints = NULL; } -static const struct { int id; char *name; } intel_arch_events_map[] __initconst = { - { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" }, - { PERF_COUNT_HW_INSTRUCTIONS, "instructions" }, - { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" }, - { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" }, - { PERF_COUNT_HW_CACHE_MISSES, "cache misses" }, - { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" }, - { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" }, -}; - -static __init void intel_arch_events_quirk(void) -{ - int bit; - - /* disable event that reported as not presend by cpuid */ - for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) { - intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0; - printk(KERN_WARNING "CPUID marked event: \'%s\' unavailable\n", - intel_arch_events_map[bit].name); - } -} - -static __init void intel_nehalem_quirk(void) -{ - union cpuid10_ebx ebx; - - ebx.full = x86_pmu.events_maskl; - if (ebx.split.no_branch_misses_retired) { - /* - * Erratum AAJ80 detected, we work it around by using - * the BR_MISP_EXEC.ANY event. This will over-count - * branch-misses, but it's still much better than the - * architectural event which is often completely bogus: - */ - intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89; - ebx.split.no_branch_misses_retired = 0; - x86_pmu.events_maskl = ebx.full; - printk(KERN_INFO "CPU erratum AAJ80 worked around\n"); - } -} - __init int intel_pmu_init(void) { union cpuid10_edx edx; union cpuid10_eax eax; - union cpuid10_ebx ebx; unsigned int unused; + unsigned int ebx; int version; if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { @@ -1611,8 +1574,8 @@ __init int intel_pmu_init(void) * Check whether the Architectural PerfMon supports * Branch Misses Retired hw_event or not. */ - cpuid(10, &eax.full, &ebx.full, &unused, &edx.full); - if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT) + cpuid(10, &eax.full, &ebx, &unused, &edx.full); + if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED) return -ENODEV; version = eax.split.version_id; @@ -1626,9 +1589,6 @@ __init int intel_pmu_init(void) x86_pmu.cntval_bits = eax.split.bit_width; x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1; - x86_pmu.events_maskl = ebx.full; - x86_pmu.events_mask_len = eax.split.mask_length; - /* * Quirk: v2 perfmon does not report fixed-purpose events, so * assume at least 3 events: @@ -1648,8 +1608,6 @@ __init int intel_pmu_init(void) intel_ds_init(); - x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */ - /* * Install the hw-cache-events table: */ @@ -1659,7 +1617,7 @@ __init int intel_pmu_init(void) break; case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */ - x86_add_quirk(intel_clovertown_quirk); + x86_pmu.quirks = intel_clovertown_quirks; case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */ case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */ case 29: /* six-core 45 nm xeon "Dunnington" */ @@ -1693,8 +1651,17 @@ __init int intel_pmu_init(void) /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1; - x86_add_quirk(intel_nehalem_quirk); + if (ebx & 0x40) { + /* + * Erratum AAJ80 detected, we work it around by using + * the BR_MISP_EXEC.ANY event. This will over-count + * branch-misses, but it's still much better than the + * architectural event which is often completely bogus: + */ + intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89; + pr_cont("erratum AAJ80 worked around, "); + } pr_cont("Nehalem events, "); break; @@ -1734,7 +1701,7 @@ __init int intel_pmu_init(void) break; case 42: /* SandyBridge */ - x86_add_quirk(intel_sandybridge_quirk); + x86_pmu.quirks = intel_sandybridge_quirks; case 45: /* SandyBridge, "Romely-EP" */ memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids)); @@ -1771,6 +1738,5 @@ __init int intel_pmu_init(void) break; } } - return 0; } diff --git a/trunk/arch/x86/kernel/cpu/powerflags.c b/trunk/arch/x86/kernel/cpu/powerflags.c index 7b3fe56b1c21..5abbea297e0c 100644 --- a/trunk/arch/x86/kernel/cpu/powerflags.c +++ b/trunk/arch/x86/kernel/cpu/powerflags.c @@ -16,6 +16,5 @@ const char *const x86_power_flags[32] = { "100mhzsteps", "hwpstate", "", /* tsc invariant mapped to constant_tsc */ - "cpb", /* core performance boost */ - "eff_freq_ro", /* Readonly aperf/mperf */ + /* nothing */ }; diff --git a/trunk/arch/x86/kernel/cpu/proc.c b/trunk/arch/x86/kernel/cpu/proc.c index 8022c6681485..14b23140e81f 100644 --- a/trunk/arch/x86/kernel/cpu/proc.c +++ b/trunk/arch/x86/kernel/cpu/proc.c @@ -64,10 +64,12 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) static int show_cpuinfo(struct seq_file *m, void *v) { struct cpuinfo_x86 *c = v; - unsigned int cpu; + unsigned int cpu = 0; int i; +#ifdef CONFIG_SMP cpu = c->cpu_index; +#endif seq_printf(m, "processor\t: %u\n" "vendor_id\t: %s\n" "cpu family\t: %d\n" diff --git a/trunk/arch/x86/kernel/e820.c b/trunk/arch/x86/kernel/e820.c index 8071e2f3d6eb..303a0e48f076 100644 --- a/trunk/arch/x86/kernel/e820.c +++ b/trunk/arch/x86/kernel/e820.c @@ -738,17 +738,35 @@ core_initcall(e820_mark_nvs_memory); /* * pre allocated 4k and reserved it in memblock and e820_saved */ -u64 __init early_reserve_e820(u64 size, u64 align) +u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align) { + u64 size = 0; u64 addr; + u64 start; - addr = __memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); - if (addr) { - e820_update_range_saved(addr, size, E820_RAM, E820_RESERVED); - printk(KERN_INFO "update e820_saved for early_reserve_e820\n"); - update_e820_saved(); + for (start = startt; ; start += size) { + start = memblock_x86_find_in_range_size(start, &size, align); + if (start == MEMBLOCK_ERROR) + return 0; + if (size >= sizet) + break; } +#ifdef CONFIG_X86_32 + if (start >= MAXMEM) + return 0; + if (start + size > MAXMEM) + size = MAXMEM - start; +#endif + + addr = round_down(start + size - sizet, align); + if (addr < start) + return 0; + memblock_x86_reserve_range(addr, addr + sizet, "new next"); + e820_update_range_saved(addr, sizet, E820_RAM, E820_RESERVED); + printk(KERN_INFO "update e820_saved for early_reserve_e820\n"); + update_e820_saved(); + return addr; } @@ -1072,7 +1090,7 @@ void __init memblock_x86_fill(void) * We are safe to enable resizing, beause memblock_x86_fill() * is rather later for x86 */ - memblock_allow_resize(); + memblock_can_resize = 1; for (i = 0; i < e820.nr_map; i++) { struct e820entry *ei = &e820.map[i]; @@ -1087,36 +1105,22 @@ void __init memblock_x86_fill(void) memblock_add(ei->addr, ei->size); } + memblock_analyze(); memblock_dump_all(); } void __init memblock_find_dma_reserve(void) { #ifdef CONFIG_X86_64 - u64 nr_pages = 0, nr_free_pages = 0; - unsigned long start_pfn, end_pfn; - phys_addr_t start, end; - int i; - u64 u; - + u64 free_size_pfn; + u64 mem_size_pfn; /* * need to find out used area below MAX_DMA_PFN * need to use memblock to get free size in [0, MAX_DMA_PFN] * at first, and assume boot_mem will not take below MAX_DMA_PFN */ - for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { - start_pfn = min_t(unsigned long, start_pfn, MAX_DMA_PFN); - end_pfn = min_t(unsigned long, end_pfn, MAX_DMA_PFN); - nr_pages += end_pfn - start_pfn; - } - - for_each_free_mem_range(u, MAX_NUMNODES, &start, &end, NULL) { - start_pfn = min_t(unsigned long, PFN_UP(start), MAX_DMA_PFN); - end_pfn = min_t(unsigned long, PFN_DOWN(end), MAX_DMA_PFN); - if (start_pfn < end_pfn) - nr_free_pages += end_pfn - start_pfn; - } - - set_dma_reserve(nr_pages - nr_free_pages); + mem_size_pfn = memblock_x86_memory_in_range(0, MAX_DMA_PFN << PAGE_SHIFT) >> PAGE_SHIFT; + free_size_pfn = memblock_x86_free_memory_in_range(0, MAX_DMA_PFN << PAGE_SHIFT) >> PAGE_SHIFT; + set_dma_reserve(mem_size_pfn - free_size_pfn); #endif } diff --git a/trunk/arch/x86/kernel/entry_32.S b/trunk/arch/x86/kernel/entry_32.S index 22d0e21b4dd7..f3f6f5344001 100644 --- a/trunk/arch/x86/kernel/entry_32.S +++ b/trunk/arch/x86/kernel/entry_32.S @@ -625,8 +625,6 @@ work_notifysig: # deal with pending signals and movl %esp, %eax jne work_notifysig_v86 # returning to kernel-space or # vm86-space - TRACE_IRQS_ON - ENABLE_INTERRUPTS(CLBR_NONE) xorl %edx, %edx call do_notify_resume jmp resume_userspace_sig @@ -640,8 +638,6 @@ work_notifysig_v86: #else movl %esp, %eax #endif - TRACE_IRQS_ON - ENABLE_INTERRUPTS(CLBR_NONE) xorl %edx, %edx call do_notify_resume jmp resume_userspace_sig diff --git a/trunk/arch/x86/kernel/entry_64.S b/trunk/arch/x86/kernel/entry_64.S index a20e1cb9dc87..faf8d5e74b0b 100644 --- a/trunk/arch/x86/kernel/entry_64.S +++ b/trunk/arch/x86/kernel/entry_64.S @@ -221,7 +221,7 @@ ENDPROC(native_usergs_sysret64) /*CFI_REL_OFFSET ss,0*/ pushq_cfi %rax /* rsp */ CFI_REL_OFFSET rsp,0 - pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_BIT1) /* eflags - interrupts on */ + pushq_cfi $X86_EFLAGS_IF /* eflags - interrupts on */ /*CFI_REL_OFFSET rflags,0*/ pushq_cfi $__KERNEL_CS /* cs */ /*CFI_REL_OFFSET cs,0*/ @@ -411,7 +411,7 @@ ENTRY(ret_from_fork) RESTORE_REST testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread? - jz retint_restore_args + je int_ret_from_sys_call testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET jnz int_ret_from_sys_call @@ -465,7 +465,7 @@ ENTRY(system_call) * after the swapgs, so that it can do the swapgs * for the guest and jump here on syscall. */ -GLOBAL(system_call_after_swapgs) +ENTRY(system_call_after_swapgs) movq %rsp,PER_CPU_VAR(old_rsp) movq PER_CPU_VAR(kernel_stack),%rsp @@ -478,7 +478,8 @@ GLOBAL(system_call_after_swapgs) movq %rax,ORIG_RAX-ARGOFFSET(%rsp) movq %rcx,RIP-ARGOFFSET(%rsp) CFI_REL_OFFSET rip,RIP-ARGOFFSET - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) + GET_THREAD_INFO(%rcx) + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx) jnz tracesys system_call_fastpath: cmpq $__NR_syscall_max,%rax @@ -495,9 +496,10 @@ ret_from_sys_call: /* edi: flagmask */ sysret_check: LOCKDEP_SYS_EXIT + GET_THREAD_INFO(%rcx) DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF - movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx + movl TI_flags(%rcx),%edx andl %edi,%edx jnz sysret_careful CFI_REMEMBER_STATE @@ -581,7 +583,7 @@ sysret_audit: /* Do syscall tracing */ tracesys: #ifdef CONFIG_AUDITSYSCALL - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx) jz auditsys #endif SAVE_REST @@ -610,6 +612,8 @@ tracesys: GLOBAL(int_ret_from_sys_call) DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF + testl $3,CS-ARGOFFSET(%rsp) + je retint_restore_args movl $_TIF_ALLWORK_MASK,%edi /* edi: mask to check */ GLOBAL(int_with_check) @@ -949,7 +953,6 @@ END(common_interrupt) ENTRY(\sym) INTR_FRAME pushq_cfi $~(\num) -.Lcommon_\sym: interrupt \do_sym jmp ret_from_intr CFI_ENDPROC @@ -973,21 +976,13 @@ apicinterrupt X86_PLATFORM_IPI_VECTOR \ x86_platform_ipi smp_x86_platform_ipi #ifdef CONFIG_SMP - ALIGN - INTR_FRAME -.irp idx,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, \ +.irp idx,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, \ 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 .if NUM_INVALIDATE_TLB_VECTORS > \idx -ENTRY(invalidate_interrupt\idx) - pushq_cfi $~(INVALIDATE_TLB_VECTOR_START+\idx) - jmp .Lcommon_invalidate_interrupt0 - CFI_ADJUST_CFA_OFFSET -8 -END(invalidate_interrupt\idx) +apicinterrupt (INVALIDATE_TLB_VECTOR_START)+\idx \ + invalidate_interrupt\idx smp_invalidate_interrupt .endif .endr - CFI_ENDPROC -apicinterrupt INVALIDATE_TLB_VECTOR_START, \ - invalidate_interrupt0, smp_invalidate_interrupt #endif apicinterrupt THRESHOLD_APIC_VECTOR \ diff --git a/trunk/arch/x86/kernel/head.c b/trunk/arch/x86/kernel/head.c index 48d9d4ea1020..af0699ba48cf 100644 --- a/trunk/arch/x86/kernel/head.c +++ b/trunk/arch/x86/kernel/head.c @@ -52,5 +52,5 @@ void __init reserve_ebda_region(void) lowmem = 0x9f000; /* reserve all memory between lowmem and the 1MB mark */ - memblock_reserve(lowmem, 0x100000 - lowmem); + memblock_x86_reserve_range(lowmem, 0x100000, "* BIOS reserved"); } diff --git a/trunk/arch/x86/kernel/head32.c b/trunk/arch/x86/kernel/head32.c index 51ff18616d50..3bb08509a7a1 100644 --- a/trunk/arch/x86/kernel/head32.c +++ b/trunk/arch/x86/kernel/head32.c @@ -31,8 +31,9 @@ static void __init i386_default_early_setup(void) void __init i386_start_kernel(void) { - memblock_reserve(__pa_symbol(&_text), - __pa_symbol(&__bss_stop) - __pa_symbol(&_text)); + memblock_init(); + + memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); #ifdef CONFIG_BLK_DEV_INITRD /* Reserve INITRD */ @@ -41,7 +42,7 @@ void __init i386_start_kernel(void) u64 ramdisk_image = boot_params.hdr.ramdisk_image; u64 ramdisk_size = boot_params.hdr.ramdisk_size; u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); - memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image); + memblock_x86_reserve_range(ramdisk_image, ramdisk_end, "RAMDISK"); } #endif diff --git a/trunk/arch/x86/kernel/head64.c b/trunk/arch/x86/kernel/head64.c index 3a3b779f41d3..5655c2272adb 100644 --- a/trunk/arch/x86/kernel/head64.c +++ b/trunk/arch/x86/kernel/head64.c @@ -98,8 +98,9 @@ void __init x86_64_start_reservations(char *real_mode_data) { copy_bootdata(__va(real_mode_data)); - memblock_reserve(__pa_symbol(&_text), - __pa_symbol(&__bss_stop) - __pa_symbol(&_text)); + memblock_init(); + + memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); #ifdef CONFIG_BLK_DEV_INITRD /* Reserve INITRD */ @@ -108,7 +109,7 @@ void __init x86_64_start_reservations(char *real_mode_data) unsigned long ramdisk_image = boot_params.hdr.ramdisk_image; unsigned long ramdisk_size = boot_params.hdr.ramdisk_size; unsigned long ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); - memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image); + memblock_x86_reserve_range(ramdisk_image, ramdisk_end, "RAMDISK"); } #endif diff --git a/trunk/arch/x86/kernel/hpet.c b/trunk/arch/x86/kernel/hpet.c index 07b0a56a754d..1bb0bf4d92cd 100644 --- a/trunk/arch/x86/kernel/hpet.c +++ b/trunk/arch/x86/kernel/hpet.c @@ -32,6 +32,8 @@ #define HPET_MIN_CYCLES 128 #define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1)) +#define EVT_TO_HPET_DEV(evt) container_of(evt, struct hpet_dev, evt) + /* * HPET address is set in acpi/boot.c, when an ACPI entry exists */ @@ -53,11 +55,6 @@ struct hpet_dev { char name[10]; }; -inline struct hpet_dev *EVT_TO_HPET_DEV(struct clock_event_device *evtdev) -{ - return container_of(evtdev, struct hpet_dev, evt); -} - inline unsigned int hpet_readl(unsigned int a) { return readl(hpet_virt_address + a); diff --git a/trunk/arch/x86/kernel/irq.c b/trunk/arch/x86/kernel/irq.c index 7943e0c21bde..429e0c92924e 100644 --- a/trunk/arch/x86/kernel/irq.c +++ b/trunk/arch/x86/kernel/irq.c @@ -74,10 +74,6 @@ int arch_show_interrupts(struct seq_file *p, int prec) for_each_online_cpu(j) seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs); seq_printf(p, " IRQ work interrupts\n"); - seq_printf(p, "%*s: ", prec, "RTR"); - for_each_online_cpu(j) - seq_printf(p, "%10u ", irq_stats(j)->icr_read_retry_count); - seq_printf(p, " APIC ICR read retries\n"); #endif if (x86_platform_ipi_callback) { seq_printf(p, "%*s: ", prec, "PLT"); @@ -140,7 +136,6 @@ u64 arch_irq_stat_cpu(unsigned int cpu) sum += irq_stats(cpu)->irq_spurious_count; sum += irq_stats(cpu)->apic_perf_irqs; sum += irq_stats(cpu)->apic_irq_work_irqs; - sum += irq_stats(cpu)->icr_read_retry_count; #endif if (x86_platform_ipi_callback) sum += irq_stats(cpu)->x86_platform_ipis; @@ -186,8 +181,8 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs) unsigned vector = ~regs->orig_ax; unsigned irq; - irq_enter(); exit_idle(); + irq_enter(); irq = __this_cpu_read(vector_irq[vector]); @@ -214,10 +209,10 @@ void smp_x86_platform_ipi(struct pt_regs *regs) ack_APIC_irq(); - irq_enter(); - exit_idle(); + irq_enter(); + inc_irq_stat(x86_platform_ipis); if (x86_platform_ipi_callback) diff --git a/trunk/arch/x86/kernel/jump_label.c b/trunk/arch/x86/kernel/jump_label.c index 2889b3d43882..ea9d5f2f13ef 100644 --- a/trunk/arch/x86/kernel/jump_label.c +++ b/trunk/arch/x86/kernel/jump_label.c @@ -50,7 +50,7 @@ void arch_jump_label_transform(struct jump_entry *entry, put_online_cpus(); } -__init_or_module void arch_jump_label_transform_static(struct jump_entry *entry, +void arch_jump_label_transform_static(struct jump_entry *entry, enum jump_label_type type) { __jump_label_transform(entry, type, text_poke_early); diff --git a/trunk/arch/x86/kernel/microcode_amd.c b/trunk/arch/x86/kernel/microcode_amd.c index fe86493f3ed1..d494799aafcd 100644 --- a/trunk/arch/x86/kernel/microcode_amd.c +++ b/trunk/arch/x86/kernel/microcode_amd.c @@ -1,18 +1,14 @@ /* * AMD CPU Microcode Update Driver for Linux - * Copyright (C) 2008-2011 Advanced Micro Devices Inc. + * Copyright (C) 2008 Advanced Micro Devices Inc. * * Author: Peter Oruba * * Based on work by: * Tigran Aivazian * - * Maintainers: - * Andreas Herrmann - * Borislav Petkov - * - * This driver allows to upgrade microcode on F10h AMD - * CPUs and later. + * This driver allows to upgrade microcode on AMD + * family 0x10 and 0x11 processors. * * Licensed under the terms of the GNU General Public * License version 2. See file COPYING for details. @@ -75,9 +71,6 @@ struct microcode_amd { static struct equiv_cpu_entry *equiv_cpu_table; -/* page-sized ucode patch buffer */ -void *patch; - static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig) { struct cpuinfo_x86 *c = &cpu_data(cpu); @@ -93,76 +86,27 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig) return 0; } -static unsigned int verify_ucode_size(int cpu, u32 patch_size, - unsigned int size) +static int get_matching_microcode(int cpu, struct microcode_header_amd *mc_hdr, + int rev) { - struct cpuinfo_x86 *c = &cpu_data(cpu); - u32 max_size; - -#define F1XH_MPB_MAX_SIZE 2048 -#define F14H_MPB_MAX_SIZE 1824 -#define F15H_MPB_MAX_SIZE 4096 - - switch (c->x86) { - case 0x14: - max_size = F14H_MPB_MAX_SIZE; - break; - case 0x15: - max_size = F15H_MPB_MAX_SIZE; - break; - default: - max_size = F1XH_MPB_MAX_SIZE; - break; - } - - if (patch_size > min_t(u32, size, max_size)) { - pr_err("patch size mismatch\n"); - return 0; - } - - return patch_size; -} - -static u16 find_equiv_id(void) -{ - unsigned int current_cpu_id, i = 0; + unsigned int current_cpu_id; + u16 equiv_cpu_id = 0; + unsigned int i = 0; BUG_ON(equiv_cpu_table == NULL); - current_cpu_id = cpuid_eax(0x00000001); while (equiv_cpu_table[i].installed_cpu != 0) { - if (current_cpu_id == equiv_cpu_table[i].installed_cpu) - return equiv_cpu_table[i].equiv_cpu; - + if (current_cpu_id == equiv_cpu_table[i].installed_cpu) { + equiv_cpu_id = equiv_cpu_table[i].equiv_cpu; + break; + } i++; } - return 0; -} -/* - * we signal a good patch is found by returning its size > 0 - */ -static int get_matching_microcode(int cpu, const u8 *ucode_ptr, - unsigned int leftover_size, int rev, - unsigned int *current_size) -{ - struct microcode_header_amd *mc_hdr; - unsigned int actual_size; - u16 equiv_cpu_id; - - /* size of the current patch we're staring at */ - *current_size = *(u32 *)(ucode_ptr + 4) + SECTION_HDR_SIZE; - - equiv_cpu_id = find_equiv_id(); if (!equiv_cpu_id) return 0; - /* - * let's look at the patch header itself now - */ - mc_hdr = (struct microcode_header_amd *)(ucode_ptr + SECTION_HDR_SIZE); - if (mc_hdr->processor_rev_id != equiv_cpu_id) return 0; @@ -176,20 +120,7 @@ static int get_matching_microcode(int cpu, const u8 *ucode_ptr, if (mc_hdr->patch_id <= rev) return 0; - /* - * now that the header looks sane, verify its size - */ - actual_size = verify_ucode_size(cpu, *current_size, leftover_size); - if (!actual_size) - return 0; - - /* clear the patch buffer */ - memset(patch, 0, PAGE_SIZE); - - /* all looks ok, get the binary patch */ - get_ucode_data(patch, ucode_ptr + SECTION_HDR_SIZE, actual_size); - - return actual_size; + return 1; } static int apply_microcode_amd(int cpu) @@ -224,6 +155,63 @@ static int apply_microcode_amd(int cpu) return 0; } +static unsigned int verify_ucode_size(int cpu, const u8 *buf, unsigned int size) +{ + struct cpuinfo_x86 *c = &cpu_data(cpu); + u32 max_size, actual_size; + +#define F1XH_MPB_MAX_SIZE 2048 +#define F14H_MPB_MAX_SIZE 1824 +#define F15H_MPB_MAX_SIZE 4096 + + switch (c->x86) { + case 0x14: + max_size = F14H_MPB_MAX_SIZE; + break; + case 0x15: + max_size = F15H_MPB_MAX_SIZE; + break; + default: + max_size = F1XH_MPB_MAX_SIZE; + break; + } + + actual_size = *(u32 *)(buf + 4); + + if (actual_size + SECTION_HDR_SIZE > size || actual_size > max_size) { + pr_err("section size mismatch\n"); + return 0; + } + + return actual_size; +} + +static struct microcode_header_amd * +get_next_ucode(int cpu, const u8 *buf, unsigned int size, unsigned int *mc_size) +{ + struct microcode_header_amd *mc = NULL; + unsigned int actual_size = 0; + + if (*(u32 *)buf != UCODE_UCODE_TYPE) { + pr_err("invalid type field in container file section header\n"); + goto out; + } + + actual_size = verify_ucode_size(cpu, buf, size); + if (!actual_size) + goto out; + + mc = vzalloc(actual_size); + if (!mc) + goto out; + + get_ucode_data(mc, buf + SECTION_HDR_SIZE, actual_size); + *mc_size = actual_size + SECTION_HDR_SIZE; + +out: + return mc; +} + static int install_equiv_cpu_table(const u8 *buf) { unsigned int *ibuf = (unsigned int *)buf; @@ -259,38 +247,36 @@ generic_load_microcode(int cpu, const u8 *data, size_t size) { struct ucode_cpu_info *uci = ucode_cpu_info + cpu; struct microcode_header_amd *mc_hdr = NULL; - unsigned int mc_size, leftover, current_size = 0; + unsigned int mc_size, leftover; int offset; const u8 *ucode_ptr = data; void *new_mc = NULL; unsigned int new_rev = uci->cpu_sig.rev; - enum ucode_state state = UCODE_ERROR; + enum ucode_state state = UCODE_OK; offset = install_equiv_cpu_table(ucode_ptr); if (offset < 0) { pr_err("failed to create equivalent cpu table\n"); - goto out; + return UCODE_ERROR; } + ucode_ptr += offset; leftover = size - offset; - if (*(u32 *)ucode_ptr != UCODE_UCODE_TYPE) { - pr_err("invalid type field in container file section header\n"); - goto free_table; - } - while (leftover) { - mc_size = get_matching_microcode(cpu, ucode_ptr, leftover, - new_rev, ¤t_size); - if (mc_size) { - mc_hdr = patch; - new_mc = patch; + mc_hdr = get_next_ucode(cpu, ucode_ptr, leftover, &mc_size); + if (!mc_hdr) + break; + + if (get_matching_microcode(cpu, mc_hdr, new_rev)) { + vfree(new_mc); new_rev = mc_hdr->patch_id; - goto out_ok; - } + new_mc = mc_hdr; + } else + vfree(mc_hdr); - ucode_ptr += current_size; - leftover -= current_size; + ucode_ptr += mc_size; + leftover -= mc_size; } if (!new_mc) { @@ -298,16 +284,19 @@ generic_load_microcode(int cpu, const u8 *data, size_t size) goto free_table; } -out_ok: - uci->mc = new_mc; - state = UCODE_OK; - pr_debug("CPU%d update ucode (0x%08x -> 0x%08x)\n", - cpu, uci->cpu_sig.rev, new_rev); + if (!leftover) { + vfree(uci->mc); + uci->mc = new_mc; + pr_debug("CPU%d update ucode (0x%08x -> 0x%08x)\n", + cpu, uci->cpu_sig.rev, new_rev); + } else { + vfree(new_mc); + state = UCODE_ERROR; + } free_table: free_equiv_cpu_table(); -out: return state; } @@ -348,6 +337,7 @@ static void microcode_fini_cpu_amd(int cpu) { struct ucode_cpu_info *uci = ucode_cpu_info + cpu; + vfree(uci->mc); uci->mc = NULL; } @@ -361,14 +351,5 @@ static struct microcode_ops microcode_amd_ops = { struct microcode_ops * __init init_amd_microcode(void) { - patch = (void *)get_zeroed_page(GFP_KERNEL); - if (!patch) - return NULL; - return µcode_amd_ops; } - -void __exit exit_amd_microcode(void) -{ - free_page((unsigned long)patch); -} diff --git a/trunk/arch/x86/kernel/microcode_core.c b/trunk/arch/x86/kernel/microcode_core.c index 9302e2d0eb4b..9d46f5e43b51 100644 --- a/trunk/arch/x86/kernel/microcode_core.c +++ b/trunk/arch/x86/kernel/microcode_core.c @@ -563,8 +563,6 @@ module_init(microcode_init); static void __exit microcode_exit(void) { - struct cpuinfo_x86 *c = &cpu_data(0); - microcode_dev_exit(); unregister_hotcpu_notifier(&mc_cpu_notifier); @@ -582,9 +580,6 @@ static void __exit microcode_exit(void) microcode_ops = NULL; - if (c->x86_vendor == X86_VENDOR_AMD) - exit_amd_microcode(); - pr_info("Microcode Update Driver: v" MICROCODE_VERSION " removed.\n"); } module_exit(microcode_exit); diff --git a/trunk/arch/x86/kernel/mpparse.c b/trunk/arch/x86/kernel/mpparse.c index ca470e4c92dc..0741b062a304 100644 --- a/trunk/arch/x86/kernel/mpparse.c +++ b/trunk/arch/x86/kernel/mpparse.c @@ -564,7 +564,9 @@ void __init default_get_smp_config(unsigned int early) static void __init smp_reserve_memory(struct mpf_intel *mpf) { - memblock_reserve(mpf->physptr, get_mpc_size(mpf->physptr)); + unsigned long size = get_mpc_size(mpf->physptr); + + memblock_x86_reserve_range(mpf->physptr, mpf->physptr+size, "* MP-table mpc"); } static int __init smp_scan_config(unsigned long base, unsigned long length) @@ -593,7 +595,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length) mpf, (u64)virt_to_phys(mpf)); mem = virt_to_phys(mpf); - memblock_reserve(mem, sizeof(*mpf)); + memblock_x86_reserve_range(mem, mem + sizeof(*mpf), "* MP-table mpf"); if (mpf->physptr) smp_reserve_memory(mpf); @@ -834,8 +836,10 @@ early_param("alloc_mptable", parse_alloc_mptable_opt); void __init early_reserve_e820_mpc_new(void) { - if (enable_update_mptable && alloc_mptable) - mpc_new_phys = early_reserve_e820(mpc_new_length, 4); + if (enable_update_mptable && alloc_mptable) { + u64 startt = 0; + mpc_new_phys = early_reserve_e820(startt, mpc_new_length, 4); + } } static int __init update_mp_table(void) diff --git a/trunk/arch/x86/kernel/process.c b/trunk/arch/x86/kernel/process.c index 15763af7bfe3..ee5d4fbd53b4 100644 --- a/trunk/arch/x86/kernel/process.c +++ b/trunk/arch/x86/kernel/process.c @@ -293,7 +293,7 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) regs.orig_ax = -1; regs.ip = (unsigned long) kernel_thread_helper; regs.cs = __KERNEL_CS | get_kernel_rpl(); - regs.flags = X86_EFLAGS_IF | X86_EFLAGS_BIT1; + regs.flags = X86_EFLAGS_IF | 0x2; /* Ok, create the new process.. */ return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); diff --git a/trunk/arch/x86/kernel/process_32.c b/trunk/arch/x86/kernel/process_32.c index 485204f58cda..795b79f984c2 100644 --- a/trunk/arch/x86/kernel/process_32.c +++ b/trunk/arch/x86/kernel/process_32.c @@ -99,8 +99,7 @@ void cpu_idle(void) /* endless idle loop with no priority at all */ while (1) { - tick_nohz_idle_enter(); - rcu_idle_enter(); + tick_nohz_stop_sched_tick(1); while (!need_resched()) { check_pgt_cache(); @@ -117,8 +116,7 @@ void cpu_idle(void) pm_idle(); start_critical_timings(); } - rcu_idle_exit(); - tick_nohz_idle_exit(); + tick_nohz_restart_sched_tick(); preempt_enable_no_resched(); schedule(); preempt_disable(); diff --git a/trunk/arch/x86/kernel/process_64.c b/trunk/arch/x86/kernel/process_64.c index 9b9fe4a85c87..3bd7e6eebf31 100644 --- a/trunk/arch/x86/kernel/process_64.c +++ b/trunk/arch/x86/kernel/process_64.c @@ -122,7 +122,7 @@ void cpu_idle(void) /* endless idle loop with no priority at all */ while (1) { - tick_nohz_idle_enter(); + tick_nohz_stop_sched_tick(1); while (!need_resched()) { rmb(); @@ -139,14 +139,8 @@ void cpu_idle(void) enter_idle(); /* Don't trace irqs off for idle */ stop_critical_timings(); - - /* enter_idle() needs rcu for notifiers */ - rcu_idle_enter(); - if (cpuidle_idle_call()) pm_idle(); - - rcu_idle_exit(); start_critical_timings(); /* In many cases the interrupt that ended idle @@ -155,7 +149,7 @@ void cpu_idle(void) __exit_idle(); } - tick_nohz_idle_exit(); + tick_nohz_restart_sched_tick(); preempt_enable_no_resched(); schedule(); preempt_disable(); @@ -299,12 +293,13 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) { - p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr, - IO_BITMAP_BYTES, GFP_KERNEL); + p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL); if (!p->thread.io_bitmap_ptr) { p->thread.io_bitmap_max = 0; return -ENOMEM; } + memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr, + IO_BITMAP_BYTES); set_tsk_thread_flag(p, TIF_IO_BITMAP); } diff --git a/trunk/arch/x86/kernel/ptrace.c b/trunk/arch/x86/kernel/ptrace.c index 89a04c7b5bb6..82528799c5de 100644 --- a/trunk/arch/x86/kernel/ptrace.c +++ b/trunk/arch/x86/kernel/ptrace.c @@ -749,8 +749,7 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr, /* * Handle PTRACE_POKEUSR calls for the debug register area. */ -static int ptrace_set_debugreg(struct task_struct *tsk, int n, - unsigned long val) +int ptrace_set_debugreg(struct task_struct *tsk, int n, unsigned long val) { struct thread_struct *thread = &(tsk->thread); int rc = 0; diff --git a/trunk/arch/x86/kernel/setup.c b/trunk/arch/x86/kernel/setup.c index d05444ac2aea..cf0ef986cb6d 100644 --- a/trunk/arch/x86/kernel/setup.c +++ b/trunk/arch/x86/kernel/setup.c @@ -306,8 +306,7 @@ static void __init cleanup_highmap(void) static void __init reserve_brk(void) { if (_brk_end > _brk_start) - memblock_reserve(__pa(_brk_start), - __pa(_brk_end) - __pa(_brk_start)); + memblock_x86_reserve_range(__pa(_brk_start), __pa(_brk_end), "BRK"); /* Mark brk area as locked down and no longer taking any new allocations */ @@ -332,13 +331,13 @@ static void __init relocate_initrd(void) ramdisk_here = memblock_find_in_range(0, end_of_lowmem, area_size, PAGE_SIZE); - if (!ramdisk_here) + if (ramdisk_here == MEMBLOCK_ERROR) panic("Cannot find place for new RAMDISK of size %lld\n", ramdisk_size); /* Note: this includes all the lowmem currently occupied by the initrd, we rely on that fact to keep the data intact. */ - memblock_reserve(ramdisk_here, area_size); + memblock_x86_reserve_range(ramdisk_here, ramdisk_here + area_size, "NEW RAMDISK"); initrd_start = ramdisk_here + PAGE_OFFSET; initrd_end = initrd_start + ramdisk_size; printk(KERN_INFO "Allocated new RAMDISK: %08llx - %08llx\n", @@ -394,7 +393,7 @@ static void __init reserve_initrd(void) initrd_start = 0; if (ramdisk_size >= (end_of_lowmem>>1)) { - memblock_free(ramdisk_image, ramdisk_end - ramdisk_image); + memblock_x86_free_range(ramdisk_image, ramdisk_end); printk(KERN_ERR "initrd too large to handle, " "disabling initrd\n"); return; @@ -417,7 +416,7 @@ static void __init reserve_initrd(void) relocate_initrd(); - memblock_free(ramdisk_image, ramdisk_end - ramdisk_image); + memblock_x86_free_range(ramdisk_image, ramdisk_end); } #else static void __init reserve_initrd(void) @@ -491,13 +490,15 @@ static void __init memblock_x86_reserve_range_setup_data(void) { struct setup_data *data; u64 pa_data; + char buf[32]; if (boot_params.hdr.version < 0x0209) return; pa_data = boot_params.hdr.setup_data; while (pa_data) { data = early_memremap(pa_data, sizeof(*data)); - memblock_reserve(pa_data, sizeof(*data) + data->len); + sprintf(buf, "setup data %x", data->type); + memblock_x86_reserve_range(pa_data, pa_data+sizeof(*data)+data->len, buf); pa_data = data->next; early_iounmap(data, sizeof(*data)); } @@ -553,7 +554,7 @@ static void __init reserve_crashkernel(void) crash_base = memblock_find_in_range(alignment, CRASH_KERNEL_ADDR_MAX, crash_size, alignment); - if (!crash_base) { + if (crash_base == MEMBLOCK_ERROR) { pr_info("crashkernel reservation failed - No suitable area found.\n"); return; } @@ -567,7 +568,7 @@ static void __init reserve_crashkernel(void) return; } } - memblock_reserve(crash_base, crash_size); + memblock_x86_reserve_range(crash_base, crash_base + crash_size, "CRASH KERNEL"); printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " "for crashkernel (System RAM: %ldMB)\n", @@ -625,7 +626,7 @@ static __init void reserve_ibft_region(void) addr = find_ibft_region(&size); if (size) - memblock_reserve(addr, size); + memblock_x86_reserve_range(addr, addr + size, "* ibft"); } static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10; diff --git a/trunk/arch/x86/kernel/smpboot.c b/trunk/arch/x86/kernel/smpboot.c index e38e21754eea..9f548cb4a958 100644 --- a/trunk/arch/x86/kernel/smpboot.c +++ b/trunk/arch/x86/kernel/smpboot.c @@ -840,8 +840,7 @@ int __cpuinit native_cpu_up(unsigned int cpu) pr_debug("++++++++++++++++++++=_---CPU UP %u\n", cpu); if (apicid == BAD_APICID || apicid == boot_cpu_physical_apicid || - !physid_isset(apicid, phys_cpu_present_map) || - (!x2apic_mode && apicid >= 255)) { + !physid_isset(apicid, phys_cpu_present_map)) { printk(KERN_ERR "%s: bad cpu %d\n", __func__, cpu); return -EINVAL; } diff --git a/trunk/arch/x86/kernel/trampoline.c b/trunk/arch/x86/kernel/trampoline.c index a73b61055ad6..a91ae7709b49 100644 --- a/trunk/arch/x86/kernel/trampoline.c +++ b/trunk/arch/x86/kernel/trampoline.c @@ -14,11 +14,11 @@ void __init setup_trampolines(void) /* Has to be in very low memory so we can execute real-mode AP code. */ mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE); - if (!mem) + if (mem == MEMBLOCK_ERROR) panic("Cannot allocate trampoline\n"); x86_trampoline_base = __va(mem); - memblock_reserve(mem, size); + memblock_x86_reserve_range(mem, mem + size, "TRAMPOLINE"); printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n", x86_trampoline_base, (unsigned long long)mem, size); diff --git a/trunk/arch/x86/kernel/traps.c b/trunk/arch/x86/kernel/traps.c index fa1191fb679d..a8e3eb83466c 100644 --- a/trunk/arch/x86/kernel/traps.c +++ b/trunk/arch/x86/kernel/traps.c @@ -306,10 +306,15 @@ dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code) == NOTIFY_STOP) return; #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ - +#ifdef CONFIG_KPROBES if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) return; +#else + if (notify_die(DIE_TRAP, "int3", regs, error_code, 3, SIGTRAP) + == NOTIFY_STOP) + return; +#endif preempt_conditional_sti(regs); do_trap(3, SIGTRAP, "int3", regs, error_code, NULL); diff --git a/trunk/arch/x86/kernel/tsc.c b/trunk/arch/x86/kernel/tsc.c index 2c9cf0fd78f5..db483369f10b 100644 --- a/trunk/arch/x86/kernel/tsc.c +++ b/trunk/arch/x86/kernel/tsc.c @@ -35,7 +35,7 @@ static int __read_mostly tsc_unstable; erroneous rdtsc usage on !cpu_has_tsc processors */ static int __read_mostly tsc_disabled = -1; -int tsc_clocksource_reliable; +static int tsc_clocksource_reliable; /* * Scheduler clock - returns current time in nanosec units. */ @@ -178,11 +178,11 @@ static unsigned long calc_pmtimer_ref(u64 deltatsc, u64 pm1, u64 pm2) } #define CAL_MS 10 -#define CAL_LATCH (PIT_TICK_RATE / (1000 / CAL_MS)) +#define CAL_LATCH (CLOCK_TICK_RATE / (1000 / CAL_MS)) #define CAL_PIT_LOOPS 1000 #define CAL2_MS 50 -#define CAL2_LATCH (PIT_TICK_RATE / (1000 / CAL2_MS)) +#define CAL2_LATCH (CLOCK_TICK_RATE / (1000 / CAL2_MS)) #define CAL2_PIT_LOOPS 5000 diff --git a/trunk/arch/x86/kernel/tsc_sync.c b/trunk/arch/x86/kernel/tsc_sync.c index 9eba29b46cb7..0aa5fed8b9e6 100644 --- a/trunk/arch/x86/kernel/tsc_sync.c +++ b/trunk/arch/x86/kernel/tsc_sync.c @@ -113,7 +113,7 @@ void __cpuinit check_tsc_sync_source(int cpu) if (unsynchronized_tsc()) return; - if (tsc_clocksource_reliable) { + if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) { if (cpu == (nr_cpu_ids-1) || system_state != SYSTEM_BOOTING) pr_info( "Skipped synchronization checks as TSC is reliable.\n"); @@ -172,7 +172,7 @@ void __cpuinit check_tsc_sync_target(void) { int cpus = 2; - if (unsynchronized_tsc() || tsc_clocksource_reliable) + if (unsynchronized_tsc() || boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) return; /* diff --git a/trunk/arch/x86/kernel/vsyscall_64.c b/trunk/arch/x86/kernel/vsyscall_64.c index b07ba9393564..e4d4a22e8b94 100644 --- a/trunk/arch/x86/kernel/vsyscall_64.c +++ b/trunk/arch/x86/kernel/vsyscall_64.c @@ -57,7 +57,7 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) = .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock), }; -static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE; +static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE; static int __init vsyscall_setup(char *str) { @@ -140,40 +140,11 @@ static int addr_to_vsyscall_nr(unsigned long addr) return nr; } -static bool write_ok_or_segv(unsigned long ptr, size_t size) -{ - /* - * XXX: if access_ok, get_user, and put_user handled - * sig_on_uaccess_error, this could go away. - */ - - if (!access_ok(VERIFY_WRITE, (void __user *)ptr, size)) { - siginfo_t info; - struct thread_struct *thread = ¤t->thread; - - thread->error_code = 6; /* user fault, no page, write */ - thread->cr2 = ptr; - thread->trap_no = 14; - - memset(&info, 0, sizeof(info)); - info.si_signo = SIGSEGV; - info.si_errno = 0; - info.si_code = SEGV_MAPERR; - info.si_addr = (void __user *)ptr; - - force_sig_info(SIGSEGV, &info, current); - return false; - } else { - return true; - } -} - bool emulate_vsyscall(struct pt_regs *regs, unsigned long address) { struct task_struct *tsk; unsigned long caller; int vsyscall_nr; - int prev_sig_on_uaccess_error; long ret; /* @@ -209,65 +180,35 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address) if (seccomp_mode(&tsk->seccomp)) do_exit(SIGKILL); - /* - * With a real vsyscall, page faults cause SIGSEGV. We want to - * preserve that behavior to make writing exploits harder. - */ - prev_sig_on_uaccess_error = current_thread_info()->sig_on_uaccess_error; - current_thread_info()->sig_on_uaccess_error = 1; - - /* - * 0 is a valid user pointer (in the access_ok sense) on 32-bit and - * 64-bit, so we don't need to special-case it here. For all the - * vsyscalls, 0 means "don't write anything" not "write it at - * address 0". - */ - ret = -EFAULT; switch (vsyscall_nr) { case 0: - if (!write_ok_or_segv(regs->di, sizeof(struct timeval)) || - !write_ok_or_segv(regs->si, sizeof(struct timezone))) - break; - ret = sys_gettimeofday( (struct timeval __user *)regs->di, (struct timezone __user *)regs->si); break; case 1: - if (!write_ok_or_segv(regs->di, sizeof(time_t))) - break; - ret = sys_time((time_t __user *)regs->di); break; case 2: - if (!write_ok_or_segv(regs->di, sizeof(unsigned)) || - !write_ok_or_segv(regs->si, sizeof(unsigned))) - break; - ret = sys_getcpu((unsigned __user *)regs->di, (unsigned __user *)regs->si, 0); break; } - current_thread_info()->sig_on_uaccess_error = prev_sig_on_uaccess_error; - if (ret == -EFAULT) { - /* Bad news -- userspace fed a bad pointer to a vsyscall. */ - warn_bad_vsyscall(KERN_INFO, regs, - "vsyscall fault (exploit attempt?)"); - /* - * If we failed to generate a signal for any reason, - * generate one here. (This should be impossible.) + * Bad news -- userspace fed a bad pointer to a vsyscall. + * + * With a real vsyscall, that would have caused SIGSEGV. + * To make writing reliable exploits using the emulated + * vsyscalls harder, generate SIGSEGV here as well. */ - if (WARN_ON_ONCE(!sigismember(&tsk->pending.signal, SIGBUS) && - !sigismember(&tsk->pending.signal, SIGSEGV))) - goto sigsegv; - - return true; /* Don't emulate the ret. */ + warn_bad_vsyscall(KERN_INFO, regs, + "vsyscall fault (exploit attempt?)"); + goto sigsegv; } regs->ax = ret; diff --git a/trunk/arch/x86/kernel/x86_init.c b/trunk/arch/x86/kernel/x86_init.c index 91f83e21b989..c1d6cd549397 100644 --- a/trunk/arch/x86/kernel/x86_init.c +++ b/trunk/arch/x86/kernel/x86_init.c @@ -92,7 +92,6 @@ struct x86_init_ops x86_init __initdata = { struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = { .setup_percpu_clockev = setup_secondary_APIC_clock, - .fixup_cpu_id = x86_default_fixup_cpu_id, }; static void default_nmi_init(void) { }; diff --git a/trunk/arch/x86/kvm/i8254.c b/trunk/arch/x86/kvm/i8254.c index 405f2620392f..76e3f1cd0369 100644 --- a/trunk/arch/x86/kvm/i8254.c +++ b/trunk/arch/x86/kvm/i8254.c @@ -338,15 +338,11 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data) return HRTIMER_NORESTART; } -static void create_pit_timer(struct kvm *kvm, u32 val, int is_period) +static void create_pit_timer(struct kvm_kpit_state *ps, u32 val, int is_period) { - struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state; struct kvm_timer *pt = &ps->pit_timer; s64 interval; - if (!irqchip_in_kernel(kvm)) - return; - interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ); pr_debug("create pit timer, interval is %llu nsec\n", interval); @@ -398,13 +394,13 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val) /* FIXME: enhance mode 4 precision */ case 4: if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)) { - create_pit_timer(kvm, val, 0); + create_pit_timer(ps, val, 0); } break; case 2: case 3: if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)){ - create_pit_timer(kvm, val, 1); + create_pit_timer(ps, val, 1); } break; default: diff --git a/trunk/arch/x86/kvm/x86.c b/trunk/arch/x86/kvm/x86.c index 4c938da2ba00..c38efd7b792e 100644 --- a/trunk/arch/x86/kvm/x86.c +++ b/trunk/arch/x86/kvm/x86.c @@ -602,6 +602,7 @@ static void update_cpuid(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; struct kvm_lapic *apic = vcpu->arch.apic; + u32 timer_mode_mask; best = kvm_find_cpuid_entry(vcpu, 1, 0); if (!best) @@ -614,12 +615,15 @@ static void update_cpuid(struct kvm_vcpu *vcpu) best->ecx |= bit(X86_FEATURE_OSXSAVE); } - if (apic) { - if (best->ecx & bit(X86_FEATURE_TSC_DEADLINE_TIMER)) - apic->lapic_timer.timer_mode_mask = 3 << 17; - else - apic->lapic_timer.timer_mode_mask = 1 << 17; - } + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && + best->function == 0x1) { + best->ecx |= bit(X86_FEATURE_TSC_DEADLINE_TIMER); + timer_mode_mask = 3 << 17; + } else + timer_mode_mask = 1 << 17; + + if (apic) + apic->lapic_timer.timer_mode_mask = timer_mode_mask; } int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) @@ -2131,9 +2135,6 @@ int kvm_dev_ioctl_check_extension(long ext) case KVM_CAP_TSC_CONTROL: r = kvm_has_tsc_control; break; - case KVM_CAP_TSC_DEADLINE_TIMER: - r = boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER); - break; default: r = 0; break; diff --git a/trunk/arch/x86/lib/inat.c b/trunk/arch/x86/lib/inat.c index 88ad5fbda6e1..46fc4ee09fc4 100644 --- a/trunk/arch/x86/lib/inat.c +++ b/trunk/arch/x86/lib/inat.c @@ -82,16 +82,9 @@ insn_attr_t inat_get_avx_attribute(insn_byte_t opcode, insn_byte_t vex_m, const insn_attr_t *table; if (vex_m > X86_VEX_M_MAX || vex_p > INAT_LSTPFX_MAX) return 0; - /* At first, this checks the master table */ - table = inat_avx_tables[vex_m][0]; + table = inat_avx_tables[vex_m][vex_p]; if (!table) return 0; - if (!inat_is_group(table[opcode]) && vex_p) { - /* If this is not a group, get attribute directly */ - table = inat_avx_tables[vex_m][vex_p]; - if (!table) - return 0; - } return table[opcode]; } diff --git a/trunk/arch/x86/lib/insn.c b/trunk/arch/x86/lib/insn.c index 5a1f9f3e3fbb..374562ed6704 100644 --- a/trunk/arch/x86/lib/insn.c +++ b/trunk/arch/x86/lib/insn.c @@ -202,7 +202,7 @@ void insn_get_opcode(struct insn *insn) m = insn_vex_m_bits(insn); p = insn_vex_p_bits(insn); insn->attr = inat_get_avx_attribute(op, m, p); - if (!inat_accept_vex(insn->attr) && !inat_is_group(insn->attr)) + if (!inat_accept_vex(insn->attr)) insn->attr = 0; /* This instruction is bad */ goto end; /* VEX has only 1 byte for opcode */ } @@ -249,8 +249,6 @@ void insn_get_modrm(struct insn *insn) pfx = insn_last_prefix(insn); insn->attr = inat_get_group_attribute(mod, pfx, insn->attr); - if (insn_is_avx(insn) && !inat_accept_vex(insn->attr)) - insn->attr = 0; /* This is bad */ } } diff --git a/trunk/arch/x86/lib/string_32.c b/trunk/arch/x86/lib/string_32.c index bd59090825db..82004d2bf05e 100644 --- a/trunk/arch/x86/lib/string_32.c +++ b/trunk/arch/x86/lib/string_32.c @@ -164,13 +164,15 @@ EXPORT_SYMBOL(strchr); size_t strlen(const char *s) { int d0; - size_t res; + int res; asm volatile("repne\n\t" - "scasb" + "scasb\n\t" + "notl %0\n\t" + "decl %0" : "=c" (res), "=&D" (d0) : "1" (s), "a" (0), "0" (0xffffffffu) : "memory"); - return ~res - 1; + return res; } EXPORT_SYMBOL(strlen); #endif diff --git a/trunk/arch/x86/lib/x86-opcode-map.txt b/trunk/arch/x86/lib/x86-opcode-map.txt index 5b83c51c12e0..a793da5e560e 100644 --- a/trunk/arch/x86/lib/x86-opcode-map.txt +++ b/trunk/arch/x86/lib/x86-opcode-map.txt @@ -1,11 +1,5 @@ # x86 Opcode Maps # -# This is (mostly) based on following documentations. -# - Intel(R) 64 and IA-32 Architectures Software Developer's Manual Vol.2 -# (#325383-040US, October 2011) -# - Intel(R) Advanced Vector Extensions Programming Reference -# (#319433-011,JUNE 2011). -# # # Table: table-name # Referrer: escaped-name @@ -21,13 +15,10 @@ # EndTable # # AVX Superscripts -# (v): this opcode requires VEX prefix. -# (v1): this opcode only supports 128bit VEX. -# -# Last Prefix Superscripts -# - (66): the last prefix is 0x66 -# - (F3): the last prefix is 0xF3 -# - (F2): the last prefix is 0xF2 +# (VEX): this opcode can accept VEX prefix. +# (oVEX): this opcode requires VEX prefix. +# (o128): this opcode only supports 128bit VEX. +# (o256): this opcode only supports 256bit VEX. # Table: one byte opcode @@ -208,8 +199,8 @@ a0: MOV AL,Ob a1: MOV rAX,Ov a2: MOV Ob,AL a3: MOV Ov,rAX -a4: MOVS/B Yb,Xb -a5: MOVS/W/D/Q Yv,Xv +a4: MOVS/B Xb,Yb +a5: MOVS/W/D/Q Xv,Yv a6: CMPS/B Xb,Yb a7: CMPS/W/D Xv,Yv a8: TEST AL,Ib @@ -242,8 +233,8 @@ c0: Grp2 Eb,Ib (1A) c1: Grp2 Ev,Ib (1A) c2: RETN Iw (f64) c3: RETN -c4: LES Gz,Mp (i64) | VEX+2byte (Prefix) -c5: LDS Gz,Mp (i64) | VEX+1byte (Prefix) +c4: LES Gz,Mp (i64) | 3bytes-VEX (Prefix) +c5: LDS Gz,Mp (i64) | 2bytes-VEX (Prefix) c6: Grp11 Eb,Ib (1A) c7: Grp11 Ev,Iz (1A) c8: ENTER Iw,Ib @@ -329,19 +320,14 @@ AVXcode: 1 # 3DNow! uses the last imm byte as opcode extension. 0f: 3DNow! Pq,Qq,Ib # 0x0f 0x10-0x1f -# NOTE: According to Intel SDM opcode map, vmovups and vmovupd has no operands -# but it actually has operands. And also, vmovss and vmovsd only accept 128bit. -# MOVSS/MOVSD has too many forms(3) on SDM. This map just shows a typical form. -# Many AVX instructions lack v1 superscript, according to Intel AVX-Prgramming -# Reference A.1 -10: vmovups Vps,Wps | vmovupd Vpd,Wpd (66) | vmovss Vx,Hx,Wss (F3),(v1) | vmovsd Vx,Hx,Wsd (F2),(v1) -11: vmovups Wps,Vps | vmovupd Wpd,Vpd (66) | vmovss Wss,Hx,Vss (F3),(v1) | vmovsd Wsd,Hx,Vsd (F2),(v1) -12: vmovlps Vq,Hq,Mq (v1) | vmovhlps Vq,Hq,Uq (v1) | vmovlpd Vq,Hq,Mq (66),(v1) | vmovsldup Vx,Wx (F3) | vmovddup Vx,Wx (F2) -13: vmovlps Mq,Vq (v1) | vmovlpd Mq,Vq (66),(v1) -14: vunpcklps Vx,Hx,Wx | vunpcklpd Vx,Hx,Wx (66) -15: vunpckhps Vx,Hx,Wx | vunpckhpd Vx,Hx,Wx (66) -16: vmovhps Vdq,Hq,Mq (v1) | vmovlhps Vdq,Hq,Uq (v1) | vmovhpd Vdq,Hq,Mq (66),(v1) | vmovshdup Vx,Wx (F3) -17: vmovhps Mq,Vq (v1) | vmovhpd Mq,Vq (66),(v1) +10: movups Vps,Wps (VEX) | movss Vss,Wss (F3),(VEX),(o128) | movupd Vpd,Wpd (66),(VEX) | movsd Vsd,Wsd (F2),(VEX),(o128) +11: movups Wps,Vps (VEX) | movss Wss,Vss (F3),(VEX),(o128) | movupd Wpd,Vpd (66),(VEX) | movsd Wsd,Vsd (F2),(VEX),(o128) +12: movlps Vq,Mq (VEX),(o128) | movlpd Vq,Mq (66),(VEX),(o128) | movhlps Vq,Uq (VEX),(o128) | movddup Vq,Wq (F2),(VEX) | movsldup Vq,Wq (F3),(VEX) +13: mpvlps Mq,Vq (VEX),(o128) | movlpd Mq,Vq (66),(VEX),(o128) +14: unpcklps Vps,Wq (VEX) | unpcklpd Vpd,Wq (66),(VEX) +15: unpckhps Vps,Wq (VEX) | unpckhpd Vpd,Wq (66),(VEX) +16: movhps Vq,Mq (VEX),(o128) | movhpd Vq,Mq (66),(VEX),(o128) | movlsps Vq,Uq (VEX),(o128) | movshdup Vq,Wq (F3),(VEX) +17: movhps Mq,Vq (VEX),(o128) | movhpd Mq,Vq (66),(VEX),(o128) 18: Grp16 (1A) 19: 1a: @@ -359,14 +345,14 @@ AVXcode: 1 25: 26: 27: -28: vmovaps Vps,Wps | vmovapd Vpd,Wpd (66) -29: vmovaps Wps,Vps | vmovapd Wpd,Vpd (66) -2a: cvtpi2ps Vps,Qpi | cvtpi2pd Vpd,Qpi (66) | vcvtsi2ss Vss,Hss,Ey (F3),(v1) | vcvtsi2sd Vsd,Hsd,Ey (F2),(v1) -2b: vmovntps Mps,Vps | vmovntpd Mpd,Vpd (66) -2c: cvttps2pi Ppi,Wps | cvttpd2pi Ppi,Wpd (66) | vcvttss2si Gy,Wss (F3),(v1) | vcvttsd2si Gy,Wsd (F2),(v1) -2d: cvtps2pi Ppi,Wps | cvtpd2pi Qpi,Wpd (66) | vcvtss2si Gy,Wss (F3),(v1) | vcvtsd2si Gy,Wsd (F2),(v1) -2e: vucomiss Vss,Wss (v1) | vucomisd Vsd,Wsd (66),(v1) -2f: vcomiss Vss,Wss (v1) | vcomisd Vsd,Wsd (66),(v1) +28: movaps Vps,Wps (VEX) | movapd Vpd,Wpd (66),(VEX) +29: movaps Wps,Vps (VEX) | movapd Wpd,Vpd (66),(VEX) +2a: cvtpi2ps Vps,Qpi | cvtsi2ss Vss,Ed/q (F3),(VEX),(o128) | cvtpi2pd Vpd,Qpi (66) | cvtsi2sd Vsd,Ed/q (F2),(VEX),(o128) +2b: movntps Mps,Vps (VEX) | movntpd Mpd,Vpd (66),(VEX) +2c: cvttps2pi Ppi,Wps | cvttss2si Gd/q,Wss (F3),(VEX),(o128) | cvttpd2pi Ppi,Wpd (66) | cvttsd2si Gd/q,Wsd (F2),(VEX),(o128) +2d: cvtps2pi Ppi,Wps | cvtss2si Gd/q,Wss (F3),(VEX),(o128) | cvtpd2pi Qpi,Wpd (66) | cvtsd2si Gd/q,Wsd (F2),(VEX),(o128) +2e: ucomiss Vss,Wss (VEX),(o128) | ucomisd Vsd,Wsd (66),(VEX),(o128) +2f: comiss Vss,Wss (VEX),(o128) | comisd Vsd,Wsd (66),(VEX),(o128) # 0x0f 0x30-0x3f 30: WRMSR 31: RDTSC @@ -402,66 +388,65 @@ AVXcode: 1 4e: CMOVLE/NG Gv,Ev 4f: CMOVNLE/G Gv,Ev # 0x0f 0x50-0x5f -50: vmovmskps Gy,Ups | vmovmskpd Gy,Upd (66) -51: vsqrtps Vps,Wps | vsqrtpd Vpd,Wpd (66) | vsqrtss Vss,Hss,Wss (F3),(v1) | vsqrtsd Vsd,Hsd,Wsd (F2),(v1) -52: vrsqrtps Vps,Wps | vrsqrtss Vss,Hss,Wss (F3),(v1) -53: vrcpps Vps,Wps | vrcpss Vss,Hss,Wss (F3),(v1) -54: vandps Vps,Hps,Wps | vandpd Vpd,Hpd,Wpd (66) -55: vandnps Vps,Hps,Wps | vandnpd Vpd,Hpd,Wpd (66) -56: vorps Vps,Hps,Wps | vorpd Vpd,Hpd,Wpd (66) -57: vxorps Vps,Hps,Wps | vxorpd Vpd,Hpd,Wpd (66) -58: vaddps Vps,Hps,Wps | vaddpd Vpd,Hpd,Wpd (66) | vaddss Vss,Hss,Wss (F3),(v1) | vaddsd Vsd,Hsd,Wsd (F2),(v1) -59: vmulps Vps,Hps,Wps | vmulpd Vpd,Hpd,Wpd (66) | vmulss Vss,Hss,Wss (F3),(v1) | vmulsd Vsd,Hsd,Wsd (F2),(v1) -5a: vcvtps2pd Vpd,Wps | vcvtpd2ps Vps,Wpd (66) | vcvtss2sd Vsd,Hx,Wss (F3),(v1) | vcvtsd2ss Vss,Hx,Wsd (F2),(v1) -5b: vcvtdq2ps Vps,Wdq | vcvtps2dq Vdq,Wps (66) | vcvttps2dq Vdq,Wps (F3) -5c: vsubps Vps,Hps,Wps | vsubpd Vpd,Hpd,Wpd (66) | vsubss Vss,Hss,Wss (F3),(v1) | vsubsd Vsd,Hsd,Wsd (F2),(v1) -5d: vminps Vps,Hps,Wps | vminpd Vpd,Hpd,Wpd (66) | vminss Vss,Hss,Wss (F3),(v1) | vminsd Vsd,Hsd,Wsd (F2),(v1) -5e: vdivps Vps,Hps,Wps | vdivpd Vpd,Hpd,Wpd (66) | vdivss Vss,Hss,Wss (F3),(v1) | vdivsd Vsd,Hsd,Wsd (F2),(v1) -5f: vmaxps Vps,Hps,Wps | vmaxpd Vpd,Hpd,Wpd (66) | vmaxss Vss,Hss,Wss (F3),(v1) | vmaxsd Vsd,Hsd,Wsd (F2),(v1) +50: movmskps Gd/q,Ups (VEX) | movmskpd Gd/q,Upd (66),(VEX) +51: sqrtps Vps,Wps (VEX) | sqrtss Vss,Wss (F3),(VEX),(o128) | sqrtpd Vpd,Wpd (66),(VEX) | sqrtsd Vsd,Wsd (F2),(VEX),(o128) +52: rsqrtps Vps,Wps (VEX) | rsqrtss Vss,Wss (F3),(VEX),(o128) +53: rcpps Vps,Wps (VEX) | rcpss Vss,Wss (F3),(VEX),(o128) +54: andps Vps,Wps (VEX) | andpd Vpd,Wpd (66),(VEX) +55: andnps Vps,Wps (VEX) | andnpd Vpd,Wpd (66),(VEX) +56: orps Vps,Wps (VEX) | orpd Vpd,Wpd (66),(VEX) +57: xorps Vps,Wps (VEX) | xorpd Vpd,Wpd (66),(VEX) +58: addps Vps,Wps (VEX) | addss Vss,Wss (F3),(VEX),(o128) | addpd Vpd,Wpd (66),(VEX) | addsd Vsd,Wsd (F2),(VEX),(o128) +59: mulps Vps,Wps (VEX) | mulss Vss,Wss (F3),(VEX),(o128) | mulpd Vpd,Wpd (66),(VEX) | mulsd Vsd,Wsd (F2),(VEX),(o128) +5a: cvtps2pd Vpd,Wps (VEX) | cvtss2sd Vsd,Wss (F3),(VEX),(o128) | cvtpd2ps Vps,Wpd (66),(VEX) | cvtsd2ss Vsd,Wsd (F2),(VEX),(o128) +5b: cvtdq2ps Vps,Wdq (VEX) | cvtps2dq Vdq,Wps (66),(VEX) | cvttps2dq Vdq,Wps (F3),(VEX) +5c: subps Vps,Wps (VEX) | subss Vss,Wss (F3),(VEX),(o128) | subpd Vpd,Wpd (66),(VEX) | subsd Vsd,Wsd (F2),(VEX),(o128) +5d: minps Vps,Wps (VEX) | minss Vss,Wss (F3),(VEX),(o128) | minpd Vpd,Wpd (66),(VEX) | minsd Vsd,Wsd (F2),(VEX),(o128) +5e: divps Vps,Wps (VEX) | divss Vss,Wss (F3),(VEX),(o128) | divpd Vpd,Wpd (66),(VEX) | divsd Vsd,Wsd (F2),(VEX),(o128) +5f: maxps Vps,Wps (VEX) | maxss Vss,Wss (F3),(VEX),(o128) | maxpd Vpd,Wpd (66),(VEX) | maxsd Vsd,Wsd (F2),(VEX),(o128) # 0x0f 0x60-0x6f -60: punpcklbw Pq,Qd | vpunpcklbw Vx,Hx,Wx (66),(v1) -61: punpcklwd Pq,Qd | vpunpcklwd Vx,Hx,Wx (66),(v1) -62: punpckldq Pq,Qd | vpunpckldq Vx,Hx,Wx (66),(v1) -63: packsswb Pq,Qq | vpacksswb Vx,Hx,Wx (66),(v1) -64: pcmpgtb Pq,Qq | vpcmpgtb Vx,Hx,Wx (66),(v1) -65: pcmpgtw Pq,Qq | vpcmpgtw Vx,Hx,Wx (66),(v1) -66: pcmpgtd Pq,Qq | vpcmpgtd Vx,Hx,Wx (66),(v1) -67: packuswb Pq,Qq | vpackuswb Vx,Hx,Wx (66),(v1) -68: punpckhbw Pq,Qd | vpunpckhbw Vx,Hx,Wx (66),(v1) -69: punpckhwd Pq,Qd | vpunpckhwd Vx,Hx,Wx (66),(v1) -6a: punpckhdq Pq,Qd | vpunpckhdq Vx,Hx,Wx (66),(v1) -6b: packssdw Pq,Qd | vpackssdw Vx,Hx,Wx (66),(v1) -6c: vpunpcklqdq Vx,Hx,Wx (66),(v1) -6d: vpunpckhqdq Vx,Hx,Wx (66),(v1) -6e: movd/q Pd,Ey | vmovd/q Vy,Ey (66),(v1) -6f: movq Pq,Qq | vmovdqa Vx,Wx (66) | vmovdqu Vx,Wx (F3) +60: punpcklbw Pq,Qd | punpcklbw Vdq,Wdq (66),(VEX),(o128) +61: punpcklwd Pq,Qd | punpcklwd Vdq,Wdq (66),(VEX),(o128) +62: punpckldq Pq,Qd | punpckldq Vdq,Wdq (66),(VEX),(o128) +63: packsswb Pq,Qq | packsswb Vdq,Wdq (66),(VEX),(o128) +64: pcmpgtb Pq,Qq | pcmpgtb Vdq,Wdq (66),(VEX),(o128) +65: pcmpgtw Pq,Qq | pcmpgtw Vdq,Wdq (66),(VEX),(o128) +66: pcmpgtd Pq,Qq | pcmpgtd Vdq,Wdq (66),(VEX),(o128) +67: packuswb Pq,Qq | packuswb Vdq,Wdq (66),(VEX),(o128) +68: punpckhbw Pq,Qd | punpckhbw Vdq,Wdq (66),(VEX),(o128) +69: punpckhwd Pq,Qd | punpckhwd Vdq,Wdq (66),(VEX),(o128) +6a: punpckhdq Pq,Qd | punpckhdq Vdq,Wdq (66),(VEX),(o128) +6b: packssdw Pq,Qd | packssdw Vdq,Wdq (66),(VEX),(o128) +6c: punpcklqdq Vdq,Wdq (66),(VEX),(o128) +6d: punpckhqdq Vdq,Wdq (66),(VEX),(o128) +6e: movd/q/ Pd,Ed/q | movd/q Vdq,Ed/q (66),(VEX),(o128) +6f: movq Pq,Qq | movdqa Vdq,Wdq (66),(VEX) | movdqu Vdq,Wdq (F3),(VEX) # 0x0f 0x70-0x7f -70: pshufw Pq,Qq,Ib | vpshufd Vx,Wx,Ib (66),(v1) | vpshufhw Vx,Wx,Ib (F3),(v1) | vpshuflw Vx,Wx,Ib (F2),(v1) +70: pshufw Pq,Qq,Ib | pshufd Vdq,Wdq,Ib (66),(VEX),(o128) | pshufhw Vdq,Wdq,Ib (F3),(VEX),(o128) | pshuflw VdqWdq,Ib (F2),(VEX),(o128) 71: Grp12 (1A) 72: Grp13 (1A) 73: Grp14 (1A) -74: pcmpeqb Pq,Qq | vpcmpeqb Vx,Hx,Wx (66),(v1) -75: pcmpeqw Pq,Qq | vpcmpeqw Vx,Hx,Wx (66),(v1) -76: pcmpeqd Pq,Qq | vpcmpeqd Vx,Hx,Wx (66),(v1) -# Note: Remove (v), because vzeroall and vzeroupper becomes emms without VEX. -77: emms | vzeroupper | vzeroall -78: VMREAD Ey,Gy -79: VMWRITE Gy,Ey +74: pcmpeqb Pq,Qq | pcmpeqb Vdq,Wdq (66),(VEX),(o128) +75: pcmpeqw Pq,Qq | pcmpeqw Vdq,Wdq (66),(VEX),(o128) +76: pcmpeqd Pq,Qq | pcmpeqd Vdq,Wdq (66),(VEX),(o128) +77: emms/vzeroupper/vzeroall (VEX) +78: VMREAD Ed/q,Gd/q +79: VMWRITE Gd/q,Ed/q 7a: 7b: -7c: vhaddpd Vpd,Hpd,Wpd (66) | vhaddps Vps,Hps,Wps (F2) -7d: vhsubpd Vpd,Hpd,Wpd (66) | vhsubps Vps,Hps,Wps (F2) -7e: movd/q Ey,Pd | vmovd/q Ey,Vy (66),(v1) | vmovq Vq,Wq (F3),(v1) -7f: movq Qq,Pq | vmovdqa Wx,Vx (66) | vmovdqu Wx,Vx (F3) +7c: haddps Vps,Wps (F2),(VEX) | haddpd Vpd,Wpd (66),(VEX) +7d: hsubps Vps,Wps (F2),(VEX) | hsubpd Vpd,Wpd (66),(VEX) +7e: movd/q Ed/q,Pd | movd/q Ed/q,Vdq (66),(VEX),(o128) | movq Vq,Wq (F3),(VEX),(o128) +7f: movq Qq,Pq | movdqa Wdq,Vdq (66),(VEX) | movdqu Wdq,Vdq (F3),(VEX) # 0x0f 0x80-0x8f 80: JO Jz (f64) 81: JNO Jz (f64) -82: JB/JC/JNAE Jz (f64) -83: JAE/JNB/JNC Jz (f64) -84: JE/JZ Jz (f64) -85: JNE/JNZ Jz (f64) +82: JB/JNAE/JC Jz (f64) +83: JNB/JAE/JNC Jz (f64) +84: JZ/JE Jz (f64) +85: JNZ/JNE Jz (f64) 86: JBE/JNA Jz (f64) -87: JA/JNBE Jz (f64) +87: JNBE/JA Jz (f64) 88: JS Jz (f64) 89: JNS Jz (f64) 8a: JP/JPE Jz (f64) @@ -517,18 +502,18 @@ b8: JMPE | POPCNT Gv,Ev (F3) b9: Grp10 (1A) ba: Grp8 Ev,Ib (1A) bb: BTC Ev,Gv -bc: BSF Gv,Ev | TZCNT Gv,Ev (F3) -bd: BSR Gv,Ev | LZCNT Gv,Ev (F3) +bc: BSF Gv,Ev +bd: BSR Gv,Ev be: MOVSX Gv,Eb bf: MOVSX Gv,Ew # 0x0f 0xc0-0xcf c0: XADD Eb,Gb c1: XADD Ev,Gv -c2: vcmpps Vps,Hps,Wps,Ib | vcmppd Vpd,Hpd,Wpd,Ib (66) | vcmpss Vss,Hss,Wss,Ib (F3),(v1) | vcmpsd Vsd,Hsd,Wsd,Ib (F2),(v1) -c3: movnti My,Gy -c4: pinsrw Pq,Ry/Mw,Ib | vpinsrw Vdq,Hdq,Ry/Mw,Ib (66),(v1) -c5: pextrw Gd,Nq,Ib | vpextrw Gd,Udq,Ib (66),(v1) -c6: vshufps Vps,Hps,Wps,Ib | vshufpd Vpd,Hpd,Wpd,Ib (66) +c2: cmpps Vps,Wps,Ib (VEX) | cmpss Vss,Wss,Ib (F3),(VEX),(o128) | cmppd Vpd,Wpd,Ib (66),(VEX) | cmpsd Vsd,Wsd,Ib (F2),(VEX) +c3: movnti Md/q,Gd/q +c4: pinsrw Pq,Rd/q/Mw,Ib | pinsrw Vdq,Rd/q/Mw,Ib (66),(VEX),(o128) +c5: pextrw Gd,Nq,Ib | pextrw Gd,Udq,Ib (66),(VEX),(o128) +c6: shufps Vps,Wps,Ib (VEX) | shufpd Vpd,Wpd,Ib (66),(VEX) c7: Grp9 (1A) c8: BSWAP RAX/EAX/R8/R8D c9: BSWAP RCX/ECX/R9/R9D @@ -539,55 +524,55 @@ cd: BSWAP RBP/EBP/R13/R13D ce: BSWAP RSI/ESI/R14/R14D cf: BSWAP RDI/EDI/R15/R15D # 0x0f 0xd0-0xdf -d0: vaddsubpd Vpd,Hpd,Wpd (66) | vaddsubps Vps,Hps,Wps (F2) -d1: psrlw Pq,Qq | vpsrlw Vx,Hx,Wx (66),(v1) -d2: psrld Pq,Qq | vpsrld Vx,Hx,Wx (66),(v1) -d3: psrlq Pq,Qq | vpsrlq Vx,Hx,Wx (66),(v1) -d4: paddq Pq,Qq | vpaddq Vx,Hx,Wx (66),(v1) -d5: pmullw Pq,Qq | vpmullw Vx,Hx,Wx (66),(v1) -d6: vmovq Wq,Vq (66),(v1) | movq2dq Vdq,Nq (F3) | movdq2q Pq,Uq (F2) -d7: pmovmskb Gd,Nq | vpmovmskb Gd,Ux (66),(v1) -d8: psubusb Pq,Qq | vpsubusb Vx,Hx,Wx (66),(v1) -d9: psubusw Pq,Qq | vpsubusw Vx,Hx,Wx (66),(v1) -da: pminub Pq,Qq | vpminub Vx,Hx,Wx (66),(v1) -db: pand Pq,Qq | vpand Vx,Hx,Wx (66),(v1) -dc: paddusb Pq,Qq | vpaddusb Vx,Hx,Wx (66),(v1) -dd: paddusw Pq,Qq | vpaddusw Vx,Hx,Wx (66),(v1) -de: pmaxub Pq,Qq | vpmaxub Vx,Hx,Wx (66),(v1) -df: pandn Pq,Qq | vpandn Vx,Hx,Wx (66),(v1) +d0: addsubps Vps,Wps (F2),(VEX) | addsubpd Vpd,Wpd (66),(VEX) +d1: psrlw Pq,Qq | psrlw Vdq,Wdq (66),(VEX),(o128) +d2: psrld Pq,Qq | psrld Vdq,Wdq (66),(VEX),(o128) +d3: psrlq Pq,Qq | psrlq Vdq,Wdq (66),(VEX),(o128) +d4: paddq Pq,Qq | paddq Vdq,Wdq (66),(VEX),(o128) +d5: pmullw Pq,Qq | pmullw Vdq,Wdq (66),(VEX),(o128) +d6: movq Wq,Vq (66),(VEX),(o128) | movq2dq Vdq,Nq (F3) | movdq2q Pq,Uq (F2) +d7: pmovmskb Gd,Nq | pmovmskb Gd,Udq (66),(VEX),(o128) +d8: psubusb Pq,Qq | psubusb Vdq,Wdq (66),(VEX),(o128) +d9: psubusw Pq,Qq | psubusw Vdq,Wdq (66),(VEX),(o128) +da: pminub Pq,Qq | pminub Vdq,Wdq (66),(VEX),(o128) +db: pand Pq,Qq | pand Vdq,Wdq (66),(VEX),(o128) +dc: paddusb Pq,Qq | paddusb Vdq,Wdq (66),(VEX),(o128) +dd: paddusw Pq,Qq | paddusw Vdq,Wdq (66),(VEX),(o128) +de: pmaxub Pq,Qq | pmaxub Vdq,Wdq (66),(VEX),(o128) +df: pandn Pq,Qq | pandn Vdq,Wdq (66),(VEX),(o128) # 0x0f 0xe0-0xef -e0: pavgb Pq,Qq | vpavgb Vx,Hx,Wx (66),(v1) -e1: psraw Pq,Qq | vpsraw Vx,Hx,Wx (66),(v1) -e2: psrad Pq,Qq | vpsrad Vx,Hx,Wx (66),(v1) -e3: pavgw Pq,Qq | vpavgw Vx,Hx,Wx (66),(v1) -e4: pmulhuw Pq,Qq | vpmulhuw Vx,Hx,Wx (66),(v1) -e5: pmulhw Pq,Qq | vpmulhw Vx,Hx,Wx (66),(v1) -e6: vcvttpd2dq Vx,Wpd (66) | vcvtdq2pd Vx,Wdq (F3) | vcvtpd2dq Vx,Wpd (F2) -e7: movntq Mq,Pq | vmovntdq Mx,Vx (66) -e8: psubsb Pq,Qq | vpsubsb Vx,Hx,Wx (66),(v1) -e9: psubsw Pq,Qq | vpsubsw Vx,Hx,Wx (66),(v1) -ea: pminsw Pq,Qq | vpminsw Vx,Hx,Wx (66),(v1) -eb: por Pq,Qq | vpor Vx,Hx,Wx (66),(v1) -ec: paddsb Pq,Qq | vpaddsb Vx,Hx,Wx (66),(v1) -ed: paddsw Pq,Qq | vpaddsw Vx,Hx,Wx (66),(v1) -ee: pmaxsw Pq,Qq | vpmaxsw Vx,Hx,Wx (66),(v1) -ef: pxor Pq,Qq | vpxor Vx,Hx,Wx (66),(v1) +e0: pavgb Pq,Qq | pavgb Vdq,Wdq (66),(VEX),(o128) +e1: psraw Pq,Qq | psraw Vdq,Wdq (66),(VEX),(o128) +e2: psrad Pq,Qq | psrad Vdq,Wdq (66),(VEX),(o128) +e3: pavgw Pq,Qq | pavgw Vdq,Wdq (66),(VEX),(o128) +e4: pmulhuw Pq,Qq | pmulhuw Vdq,Wdq (66),(VEX),(o128) +e5: pmulhw Pq,Qq | pmulhw Vdq,Wdq (66),(VEX),(o128) +e6: cvtpd2dq Vdq,Wpd (F2),(VEX) | cvttpd2dq Vdq,Wpd (66),(VEX) | cvtdq2pd Vpd,Wdq (F3),(VEX) +e7: movntq Mq,Pq | movntdq Mdq,Vdq (66),(VEX) +e8: psubsb Pq,Qq | psubsb Vdq,Wdq (66),(VEX),(o128) +e9: psubsw Pq,Qq | psubsw Vdq,Wdq (66),(VEX),(o128) +ea: pminsw Pq,Qq | pminsw Vdq,Wdq (66),(VEX),(o128) +eb: por Pq,Qq | por Vdq,Wdq (66),(VEX),(o128) +ec: paddsb Pq,Qq | paddsb Vdq,Wdq (66),(VEX),(o128) +ed: paddsw Pq,Qq | paddsw Vdq,Wdq (66),(VEX),(o128) +ee: pmaxsw Pq,Qq | pmaxsw Vdq,Wdq (66),(VEX),(o128) +ef: pxor Pq,Qq | pxor Vdq,Wdq (66),(VEX),(o128) # 0x0f 0xf0-0xff -f0: vlddqu Vx,Mx (F2) -f1: psllw Pq,Qq | vpsllw Vx,Hx,Wx (66),(v1) -f2: pslld Pq,Qq | vpslld Vx,Hx,Wx (66),(v1) -f3: psllq Pq,Qq | vpsllq Vx,Hx,Wx (66),(v1) -f4: pmuludq Pq,Qq | vpmuludq Vx,Hx,Wx (66),(v1) -f5: pmaddwd Pq,Qq | vpmaddwd Vx,Hx,Wx (66),(v1) -f6: psadbw Pq,Qq | vpsadbw Vx,Hx,Wx (66),(v1) -f7: maskmovq Pq,Nq | vmaskmovdqu Vx,Ux (66),(v1) -f8: psubb Pq,Qq | vpsubb Vx,Hx,Wx (66),(v1) -f9: psubw Pq,Qq | vpsubw Vx,Hx,Wx (66),(v1) -fa: psubd Pq,Qq | vpsubd Vx,Hx,Wx (66),(v1) -fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1) -fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1) -fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1) -fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1) +f0: lddqu Vdq,Mdq (F2),(VEX) +f1: psllw Pq,Qq | psllw Vdq,Wdq (66),(VEX),(o128) +f2: pslld Pq,Qq | pslld Vdq,Wdq (66),(VEX),(o128) +f3: psllq Pq,Qq | psllq Vdq,Wdq (66),(VEX),(o128) +f4: pmuludq Pq,Qq | pmuludq Vdq,Wdq (66),(VEX),(o128) +f5: pmaddwd Pq,Qq | pmaddwd Vdq,Wdq (66),(VEX),(o128) +f6: psadbw Pq,Qq | psadbw Vdq,Wdq (66),(VEX),(o128) +f7: maskmovq Pq,Nq | maskmovdqu Vdq,Udq (66),(VEX),(o128) +f8: psubb Pq,Qq | psubb Vdq,Wdq (66),(VEX),(o128) +f9: psubw Pq,Qq | psubw Vdq,Wdq (66),(VEX),(o128) +fa: psubd Pq,Qq | psubd Vdq,Wdq (66),(VEX),(o128) +fb: psubq Pq,Qq | psubq Vdq,Wdq (66),(VEX),(o128) +fc: paddb Pq,Qq | paddb Vdq,Wdq (66),(VEX),(o128) +fd: paddw Pq,Qq | paddw Vdq,Wdq (66),(VEX),(o128) +fe: paddd Pq,Qq | paddd Vdq,Wdq (66),(VEX),(o128) ff: EndTable @@ -595,193 +580,155 @@ Table: 3-byte opcode 1 (0x0f 0x38) Referrer: 3-byte escape 1 AVXcode: 2 # 0x0f 0x38 0x00-0x0f -00: pshufb Pq,Qq | vpshufb Vx,Hx,Wx (66),(v1) -01: phaddw Pq,Qq | vphaddw Vx,Hx,Wx (66),(v1) -02: phaddd Pq,Qq | vphaddd Vx,Hx,Wx (66),(v1) -03: phaddsw Pq,Qq | vphaddsw Vx,Hx,Wx (66),(v1) -04: pmaddubsw Pq,Qq | vpmaddubsw Vx,Hx,Wx (66),(v1) -05: phsubw Pq,Qq | vphsubw Vx,Hx,Wx (66),(v1) -06: phsubd Pq,Qq | vphsubd Vx,Hx,Wx (66),(v1) -07: phsubsw Pq,Qq | vphsubsw Vx,Hx,Wx (66),(v1) -08: psignb Pq,Qq | vpsignb Vx,Hx,Wx (66),(v1) -09: psignw Pq,Qq | vpsignw Vx,Hx,Wx (66),(v1) -0a: psignd Pq,Qq | vpsignd Vx,Hx,Wx (66),(v1) -0b: pmulhrsw Pq,Qq | vpmulhrsw Vx,Hx,Wx (66),(v1) -0c: vpermilps Vx,Hx,Wx (66),(v) -0d: vpermilpd Vx,Hx,Wx (66),(v) -0e: vtestps Vx,Wx (66),(v) -0f: vtestpd Vx,Wx (66),(v) +00: pshufb Pq,Qq | pshufb Vdq,Wdq (66),(VEX),(o128) +01: phaddw Pq,Qq | phaddw Vdq,Wdq (66),(VEX),(o128) +02: phaddd Pq,Qq | phaddd Vdq,Wdq (66),(VEX),(o128) +03: phaddsw Pq,Qq | phaddsw Vdq,Wdq (66),(VEX),(o128) +04: pmaddubsw Pq,Qq | pmaddubsw Vdq,Wdq (66),(VEX),(o128) +05: phsubw Pq,Qq | phsubw Vdq,Wdq (66),(VEX),(o128) +06: phsubd Pq,Qq | phsubd Vdq,Wdq (66),(VEX),(o128) +07: phsubsw Pq,Qq | phsubsw Vdq,Wdq (66),(VEX),(o128) +08: psignb Pq,Qq | psignb Vdq,Wdq (66),(VEX),(o128) +09: psignw Pq,Qq | psignw Vdq,Wdq (66),(VEX),(o128) +0a: psignd Pq,Qq | psignd Vdq,Wdq (66),(VEX),(o128) +0b: pmulhrsw Pq,Qq | pmulhrsw Vdq,Wdq (66),(VEX),(o128) +0c: Vpermilps /r (66),(oVEX) +0d: Vpermilpd /r (66),(oVEX) +0e: vtestps /r (66),(oVEX) +0f: vtestpd /r (66),(oVEX) # 0x0f 0x38 0x10-0x1f 10: pblendvb Vdq,Wdq (66) 11: 12: -13: vcvtph2ps Vx,Wx,Ib (66),(v) +13: 14: blendvps Vdq,Wdq (66) 15: blendvpd Vdq,Wdq (66) -16: vpermps Vqq,Hqq,Wqq (66),(v) -17: vptest Vx,Wx (66) -18: vbroadcastss Vx,Wd (66),(v) -19: vbroadcastsd Vqq,Wq (66),(v) -1a: vbroadcastf128 Vqq,Mdq (66),(v) +16: +17: ptest Vdq,Wdq (66),(VEX) +18: vbroadcastss /r (66),(oVEX) +19: vbroadcastsd /r (66),(oVEX),(o256) +1a: vbroadcastf128 /r (66),(oVEX),(o256) 1b: -1c: pabsb Pq,Qq | vpabsb Vx,Wx (66),(v1) -1d: pabsw Pq,Qq | vpabsw Vx,Wx (66),(v1) -1e: pabsd Pq,Qq | vpabsd Vx,Wx (66),(v1) +1c: pabsb Pq,Qq | pabsb Vdq,Wdq (66),(VEX),(o128) +1d: pabsw Pq,Qq | pabsw Vdq,Wdq (66),(VEX),(o128) +1e: pabsd Pq,Qq | pabsd Vdq,Wdq (66),(VEX),(o128) 1f: # 0x0f 0x38 0x20-0x2f -20: vpmovsxbw Vx,Ux/Mq (66),(v1) -21: vpmovsxbd Vx,Ux/Md (66),(v1) -22: vpmovsxbq Vx,Ux/Mw (66),(v1) -23: vpmovsxwd Vx,Ux/Mq (66),(v1) -24: vpmovsxwq Vx,Ux/Md (66),(v1) -25: vpmovsxdq Vx,Ux/Mq (66),(v1) +20: pmovsxbw Vdq,Udq/Mq (66),(VEX),(o128) +21: pmovsxbd Vdq,Udq/Md (66),(VEX),(o128) +22: pmovsxbq Vdq,Udq/Mw (66),(VEX),(o128) +23: pmovsxwd Vdq,Udq/Mq (66),(VEX),(o128) +24: pmovsxwq Vdq,Udq/Md (66),(VEX),(o128) +25: pmovsxdq Vdq,Udq/Mq (66),(VEX),(o128) 26: 27: -28: vpmuldq Vx,Hx,Wx (66),(v1) -29: vpcmpeqq Vx,Hx,Wx (66),(v1) -2a: vmovntdqa Vx,Mx (66),(v1) -2b: vpackusdw Vx,Hx,Wx (66),(v1) -2c: vmaskmovps Vx,Hx,Mx (66),(v) -2d: vmaskmovpd Vx,Hx,Mx (66),(v) -2e: vmaskmovps Mx,Hx,Vx (66),(v) -2f: vmaskmovpd Mx,Hx,Vx (66),(v) +28: pmuldq Vdq,Wdq (66),(VEX),(o128) +29: pcmpeqq Vdq,Wdq (66),(VEX),(o128) +2a: movntdqa Vdq,Mdq (66),(VEX),(o128) +2b: packusdw Vdq,Wdq (66),(VEX),(o128) +2c: vmaskmovps(ld) /r (66),(oVEX) +2d: vmaskmovpd(ld) /r (66),(oVEX) +2e: vmaskmovps(st) /r (66),(oVEX) +2f: vmaskmovpd(st) /r (66),(oVEX) # 0x0f 0x38 0x30-0x3f -30: vpmovzxbw Vx,Ux/Mq (66),(v1) -31: vpmovzxbd Vx,Ux/Md (66),(v1) -32: vpmovzxbq Vx,Ux/Mw (66),(v1) -33: vpmovzxwd Vx,Ux/Mq (66),(v1) -34: vpmovzxwq Vx,Ux/Md (66),(v1) -35: vpmovzxdq Vx,Ux/Mq (66),(v1) -36: vpermd Vqq,Hqq,Wqq (66),(v) -37: vpcmpgtq Vx,Hx,Wx (66),(v1) -38: vpminsb Vx,Hx,Wx (66),(v1) -39: vpminsd Vx,Hx,Wx (66),(v1) -3a: vpminuw Vx,Hx,Wx (66),(v1) -3b: vpminud Vx,Hx,Wx (66),(v1) -3c: vpmaxsb Vx,Hx,Wx (66),(v1) -3d: vpmaxsd Vx,Hx,Wx (66),(v1) -3e: vpmaxuw Vx,Hx,Wx (66),(v1) -3f: vpmaxud Vx,Hx,Wx (66),(v1) +30: pmovzxbw Vdq,Udq/Mq (66),(VEX),(o128) +31: pmovzxbd Vdq,Udq/Md (66),(VEX),(o128) +32: pmovzxbq Vdq,Udq/Mw (66),(VEX),(o128) +33: pmovzxwd Vdq,Udq/Mq (66),(VEX),(o128) +34: pmovzxwq Vdq,Udq/Md (66),(VEX),(o128) +35: pmovzxdq Vdq,Udq/Mq (66),(VEX),(o128) +36: +37: pcmpgtq Vdq,Wdq (66),(VEX),(o128) +38: pminsb Vdq,Wdq (66),(VEX),(o128) +39: pminsd Vdq,Wdq (66),(VEX),(o128) +3a: pminuw Vdq,Wdq (66),(VEX),(o128) +3b: pminud Vdq,Wdq (66),(VEX),(o128) +3c: pmaxsb Vdq,Wdq (66),(VEX),(o128) +3d: pmaxsd Vdq,Wdq (66),(VEX),(o128) +3e: pmaxuw Vdq,Wdq (66),(VEX),(o128) +3f: pmaxud Vdq,Wdq (66),(VEX),(o128) # 0x0f 0x38 0x40-0x8f -40: vpmulld Vx,Hx,Wx (66),(v1) -41: vphminposuw Vdq,Wdq (66),(v1) -42: -43: -44: -45: vpsrlvd/q Vx,Hx,Wx (66),(v) -46: vpsravd Vx,Hx,Wx (66),(v) -47: vpsllvd/q Vx,Hx,Wx (66),(v) -# Skip 0x48-0x57 -58: vpbroadcastd Vx,Wx (66),(v) -59: vpbroadcastq Vx,Wx (66),(v) -5a: vbroadcasti128 Vqq,Mdq (66),(v) -# Skip 0x5b-0x77 -78: vpbroadcastb Vx,Wx (66),(v) -79: vpbroadcastw Vx,Wx (66),(v) -# Skip 0x7a-0x7f -80: INVEPT Gy,Mdq (66) -81: INVPID Gy,Mdq (66) -82: INVPCID Gy,Mdq (66) -8c: vpmaskmovd/q Vx,Hx,Mx (66),(v) -8e: vpmaskmovd/q Mx,Vx,Hx (66),(v) +40: pmulld Vdq,Wdq (66),(VEX),(o128) +41: phminposuw Vdq,Wdq (66),(VEX),(o128) +80: INVEPT Gd/q,Mdq (66) +81: INVPID Gd/q,Mdq (66) # 0x0f 0x38 0x90-0xbf (FMA) -90: vgatherdd/q Vx,Hx,Wx (66),(v) -91: vgatherqd/q Vx,Hx,Wx (66),(v) -92: vgatherdps/d Vx,Hx,Wx (66),(v) -93: vgatherqps/d Vx,Hx,Wx (66),(v) -94: -95: -96: vfmaddsub132ps/d Vx,Hx,Wx (66),(v) -97: vfmsubadd132ps/d Vx,Hx,Wx (66),(v) -98: vfmadd132ps/d Vx,Hx,Wx (66),(v) -99: vfmadd132ss/d Vx,Hx,Wx (66),(v),(v1) -9a: vfmsub132ps/d Vx,Hx,Wx (66),(v) -9b: vfmsub132ss/d Vx,Hx,Wx (66),(v),(v1) -9c: vfnmadd132ps/d Vx,Hx,Wx (66),(v) -9d: vfnmadd132ss/d Vx,Hx,Wx (66),(v),(v1) -9e: vfnmsub132ps/d Vx,Hx,Wx (66),(v) -9f: vfnmsub132ss/d Vx,Hx,Wx (66),(v),(v1) -a6: vfmaddsub213ps/d Vx,Hx,Wx (66),(v) -a7: vfmsubadd213ps/d Vx,Hx,Wx (66),(v) -a8: vfmadd213ps/d Vx,Hx,Wx (66),(v) -a9: vfmadd213ss/d Vx,Hx,Wx (66),(v),(v1) -aa: vfmsub213ps/d Vx,Hx,Wx (66),(v) -ab: vfmsub213ss/d Vx,Hx,Wx (66),(v),(v1) -ac: vfnmadd213ps/d Vx,Hx,Wx (66),(v) -ad: vfnmadd213ss/d Vx,Hx,Wx (66),(v),(v1) -ae: vfnmsub213ps/d Vx,Hx,Wx (66),(v) -af: vfnmsub213ss/d Vx,Hx,Wx (66),(v),(v1) -b6: vfmaddsub231ps/d Vx,Hx,Wx (66),(v) -b7: vfmsubadd231ps/d Vx,Hx,Wx (66),(v) -b8: vfmadd231ps/d Vx,Hx,Wx (66),(v) -b9: vfmadd231ss/d Vx,Hx,Wx (66),(v),(v1) -ba: vfmsub231ps/d Vx,Hx,Wx (66),(v) -bb: vfmsub231ss/d Vx,Hx,Wx (66),(v),(v1) -bc: vfnmadd231ps/d Vx,Hx,Wx (66),(v) -bd: vfnmadd231ss/d Vx,Hx,Wx (66),(v),(v1) -be: vfnmsub231ps/d Vx,Hx,Wx (66),(v) -bf: vfnmsub231ss/d Vx,Hx,Wx (66),(v),(v1) +96: vfmaddsub132pd/ps /r (66),(VEX) +97: vfmsubadd132pd/ps /r (66),(VEX) +98: vfmadd132pd/ps /r (66),(VEX) +99: vfmadd132sd/ss /r (66),(VEX),(o128) +9a: vfmsub132pd/ps /r (66),(VEX) +9b: vfmsub132sd/ss /r (66),(VEX),(o128) +9c: vfnmadd132pd/ps /r (66),(VEX) +9d: vfnmadd132sd/ss /r (66),(VEX),(o128) +9e: vfnmsub132pd/ps /r (66),(VEX) +9f: vfnmsub132sd/ss /r (66),(VEX),(o128) +a6: vfmaddsub213pd/ps /r (66),(VEX) +a7: vfmsubadd213pd/ps /r (66),(VEX) +a8: vfmadd213pd/ps /r (66),(VEX) +a9: vfmadd213sd/ss /r (66),(VEX),(o128) +aa: vfmsub213pd/ps /r (66),(VEX) +ab: vfmsub213sd/ss /r (66),(VEX),(o128) +ac: vfnmadd213pd/ps /r (66),(VEX) +ad: vfnmadd213sd/ss /r (66),(VEX),(o128) +ae: vfnmsub213pd/ps /r (66),(VEX) +af: vfnmsub213sd/ss /r (66),(VEX),(o128) +b6: vfmaddsub231pd/ps /r (66),(VEX) +b7: vfmsubadd231pd/ps /r (66),(VEX) +b8: vfmadd231pd/ps /r (66),(VEX) +b9: vfmadd231sd/ss /r (66),(VEX),(o128) +ba: vfmsub231pd/ps /r (66),(VEX) +bb: vfmsub231sd/ss /r (66),(VEX),(o128) +bc: vfnmadd231pd/ps /r (66),(VEX) +bd: vfnmadd231sd/ss /r (66),(VEX),(o128) +be: vfnmsub231pd/ps /r (66),(VEX) +bf: vfnmsub231sd/ss /r (66),(VEX),(o128) # 0x0f 0x38 0xc0-0xff -db: VAESIMC Vdq,Wdq (66),(v1) -dc: VAESENC Vdq,Hdq,Wdq (66),(v1) -dd: VAESENCLAST Vdq,Hdq,Wdq (66),(v1) -de: VAESDEC Vdq,Hdq,Wdq (66),(v1) -df: VAESDECLAST Vdq,Hdq,Wdq (66),(v1) -f0: MOVBE Gy,My | MOVBE Gw,Mw (66) | CRC32 Gd,Eb (F2) -f1: MOVBE My,Gy | MOVBE Mw,Gw (66) | CRC32 Gd,Ey (F2) -f3: ANDN Gy,By,Ey (v) -f4: Grp17 (1A) -f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v) -f6: MULX By,Gy,rDX,Ey (F2),(v) -f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v) +db: aesimc Vdq,Wdq (66),(VEX),(o128) +dc: aesenc Vdq,Wdq (66),(VEX),(o128) +dd: aesenclast Vdq,Wdq (66),(VEX),(o128) +de: aesdec Vdq,Wdq (66),(VEX),(o128) +df: aesdeclast Vdq,Wdq (66),(VEX),(o128) +f0: MOVBE Gv,Mv | CRC32 Gd,Eb (F2) +f1: MOVBE Mv,Gv | CRC32 Gd,Ev (F2) EndTable Table: 3-byte opcode 2 (0x0f 0x3a) Referrer: 3-byte escape 2 AVXcode: 3 # 0x0f 0x3a 0x00-0xff -00: vpermq Vqq,Wqq,Ib (66),(v) -01: vpermpd Vqq,Wqq,Ib (66),(v) -02: vpblendd Vx,Hx,Wx,Ib (66),(v) -03: -04: vpermilps Vx,Wx,Ib (66),(v) -05: vpermilpd Vx,Wx,Ib (66),(v) -06: vperm2f128 Vqq,Hqq,Wqq,Ib (66),(v) -07: -08: vroundps Vx,Wx,Ib (66) -09: vroundpd Vx,Wx,Ib (66) -0a: vroundss Vss,Wss,Ib (66),(v1) -0b: vroundsd Vsd,Wsd,Ib (66),(v1) -0c: vblendps Vx,Hx,Wx,Ib (66) -0d: vblendpd Vx,Hx,Wx,Ib (66) -0e: vpblendw Vx,Hx,Wx,Ib (66),(v1) -0f: palignr Pq,Qq,Ib | vpalignr Vx,Hx,Wx,Ib (66),(v1) -14: vpextrb Rd/Mb,Vdq,Ib (66),(v1) -15: vpextrw Rd/Mw,Vdq,Ib (66),(v1) -16: vpextrd/q Ey,Vdq,Ib (66),(v1) -17: vextractps Ed,Vdq,Ib (66),(v1) -18: vinsertf128 Vqq,Hqq,Wqq,Ib (66),(v) -19: vextractf128 Wdq,Vqq,Ib (66),(v) -1d: vcvtps2ph Wx,Vx,Ib (66),(v) -20: vpinsrb Vdq,Hdq,Ry/Mb,Ib (66),(v1) -21: vinsertps Vdq,Hdq,Udq/Md,Ib (66),(v1) -22: vpinsrd/q Vdq,Hdq,Ey,Ib (66),(v1) -38: vinserti128 Vqq,Hqq,Wqq,Ib (66),(v) -39: vextracti128 Wdq,Vqq,Ib (66),(v) -40: vdpps Vx,Hx,Wx,Ib (66) -41: vdppd Vdq,Hdq,Wdq,Ib (66),(v1) -42: vmpsadbw Vx,Hx,Wx,Ib (66),(v1) -44: vpclmulqdq Vdq,Hdq,Wdq,Ib (66),(v1) -46: vperm2i128 Vqq,Hqq,Wqq,Ib (66),(v) -4a: vblendvps Vx,Hx,Wx,Lx (66),(v) -4b: vblendvpd Vx,Hx,Wx,Lx (66),(v) -4c: vpblendvb Vx,Hx,Wx,Lx (66),(v1) -60: vpcmpestrm Vdq,Wdq,Ib (66),(v1) -61: vpcmpestri Vdq,Wdq,Ib (66),(v1) -62: vpcmpistrm Vdq,Wdq,Ib (66),(v1) -63: vpcmpistri Vdq,Wdq,Ib (66),(v1) -df: VAESKEYGEN Vdq,Wdq,Ib (66),(v1) -f0: RORX Gy,Ey,Ib (F2),(v) +04: vpermilps /r,Ib (66),(oVEX) +05: vpermilpd /r,Ib (66),(oVEX) +06: vperm2f128 /r,Ib (66),(oVEX),(o256) +08: roundps Vdq,Wdq,Ib (66),(VEX) +09: roundpd Vdq,Wdq,Ib (66),(VEX) +0a: roundss Vss,Wss,Ib (66),(VEX),(o128) +0b: roundsd Vsd,Wsd,Ib (66),(VEX),(o128) +0c: blendps Vdq,Wdq,Ib (66),(VEX) +0d: blendpd Vdq,Wdq,Ib (66),(VEX) +0e: pblendw Vdq,Wdq,Ib (66),(VEX),(o128) +0f: palignr Pq,Qq,Ib | palignr Vdq,Wdq,Ib (66),(VEX),(o128) +14: pextrb Rd/Mb,Vdq,Ib (66),(VEX),(o128) +15: pextrw Rd/Mw,Vdq,Ib (66),(VEX),(o128) +16: pextrd/pextrq Ed/q,Vdq,Ib (66),(VEX),(o128) +17: extractps Ed,Vdq,Ib (66),(VEX),(o128) +18: vinsertf128 /r,Ib (66),(oVEX),(o256) +19: vextractf128 /r,Ib (66),(oVEX),(o256) +20: pinsrb Vdq,Rd/q/Mb,Ib (66),(VEX),(o128) +21: insertps Vdq,Udq/Md,Ib (66),(VEX),(o128) +22: pinsrd/pinsrq Vdq,Ed/q,Ib (66),(VEX),(o128) +40: dpps Vdq,Wdq,Ib (66),(VEX) +41: dppd Vdq,Wdq,Ib (66),(VEX),(o128) +42: mpsadbw Vdq,Wdq,Ib (66),(VEX),(o128) +44: pclmulq Vdq,Wdq,Ib (66),(VEX),(o128) +4a: vblendvps /r,Ib (66),(oVEX) +4b: vblendvpd /r,Ib (66),(oVEX) +4c: vpblendvb /r,Ib (66),(oVEX),(o128) +60: pcmpestrm Vdq,Wdq,Ib (66),(VEX),(o128) +61: pcmpestri Vdq,Wdq,Ib (66),(VEX),(o128) +62: pcmpistrm Vdq,Wdq,Ib (66),(VEX),(o128) +63: pcmpistri Vdq,Wdq,Ib (66),(VEX),(o128) +df: aeskeygenassist Vdq,Wdq,Ib (66),(VEX),(o128) EndTable GrpTable: Grp1 @@ -843,7 +790,7 @@ GrpTable: Grp5 2: CALLN Ev (f64) 3: CALLF Ep 4: JMPN Ev (f64) -5: JMPF Mp +5: JMPF Ep 6: PUSH Ev (d64) 7: EndTable @@ -860,7 +807,7 @@ EndTable GrpTable: Grp7 0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B) 1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001) -2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) +2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) 3: LIDT Ms 4: SMSW Mw/Rv 5: @@ -877,45 +824,44 @@ EndTable GrpTable: Grp9 1: CMPXCHG8B/16B Mq/Mdq -6: VMPTRLD Mq | VMCLEAR Mq (66) | VMXON Mq (F3) | RDRAND Rv (11B) -7: VMPTRST Mq | VMPTRST Mq (F3) +6: VMPTRLD Mq | VMCLEAR Mq (66) | VMXON Mq (F3) +7: VMPTRST Mq EndTable GrpTable: Grp10 EndTable GrpTable: Grp11 -# Note: the operands are given by group opcode 0: MOV EndTable GrpTable: Grp12 -2: psrlw Nq,Ib (11B) | vpsrlw Hx,Ux,Ib (66),(11B),(v1) -4: psraw Nq,Ib (11B) | vpsraw Hx,Ux,Ib (66),(11B),(v1) -6: psllw Nq,Ib (11B) | vpsllw Hx,Ux,Ib (66),(11B),(v1) +2: psrlw Nq,Ib (11B) | psrlw Udq,Ib (66),(11B),(VEX),(o128) +4: psraw Nq,Ib (11B) | psraw Udq,Ib (66),(11B),(VEX),(o128) +6: psllw Nq,Ib (11B) | psllw Udq,Ib (66),(11B),(VEX),(o128) EndTable GrpTable: Grp13 -2: psrld Nq,Ib (11B) | vpsrld Hx,Ux,Ib (66),(11B),(v1) -4: psrad Nq,Ib (11B) | vpsrad Hx,Ux,Ib (66),(11B),(v1) -6: pslld Nq,Ib (11B) | vpslld Hx,Ux,Ib (66),(11B),(v1) +2: psrld Nq,Ib (11B) | psrld Udq,Ib (66),(11B),(VEX),(o128) +4: psrad Nq,Ib (11B) | psrad Udq,Ib (66),(11B),(VEX),(o128) +6: pslld Nq,Ib (11B) | pslld Udq,Ib (66),(11B),(VEX),(o128) EndTable GrpTable: Grp14 -2: psrlq Nq,Ib (11B) | vpsrlq Hx,Ux,Ib (66),(11B),(v1) -3: vpsrldq Hx,Ux,Ib (66),(11B),(v1) -6: psllq Nq,Ib (11B) | vpsllq Hx,Ux,Ib (66),(11B),(v1) -7: vpslldq Hx,Ux,Ib (66),(11B),(v1) +2: psrlq Nq,Ib (11B) | psrlq Udq,Ib (66),(11B),(VEX),(o128) +3: psrldq Udq,Ib (66),(11B),(VEX),(o128) +6: psllq Nq,Ib (11B) | psllq Udq,Ib (66),(11B),(VEX),(o128) +7: pslldq Udq,Ib (66),(11B),(VEX),(o128) EndTable GrpTable: Grp15 -0: fxsave | RDFSBASE Ry (F3),(11B) -1: fxstor | RDGSBASE Ry (F3),(11B) -2: vldmxcsr Md (v1) | WRFSBASE Ry (F3),(11B) -3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B) +0: fxsave +1: fxstor +2: ldmxcsr (VEX) +3: stmxcsr (VEX) 4: XSAVE 5: XRSTOR | lfence (11B) -6: XSAVEOPT | mfence (11B) +6: mfence (11B) 7: clflush | sfence (11B) EndTable @@ -926,12 +872,6 @@ GrpTable: Grp16 3: prefetch T2 EndTable -GrpTable: Grp17 -1: BLSR By,Ey (v) -2: BLSMSK By,Ey (v) -3: BLSI By,Ey (v) -EndTable - # AMD's Prefetch Group GrpTable: GrpP 0: PREFETCH diff --git a/trunk/arch/x86/mm/Makefile b/trunk/arch/x86/mm/Makefile index 23d8e5fecf76..3d11327c9ab4 100644 --- a/trunk/arch/x86/mm/Makefile +++ b/trunk/arch/x86/mm/Makefile @@ -27,4 +27,6 @@ obj-$(CONFIG_AMD_NUMA) += amdtopology.o obj-$(CONFIG_ACPI_NUMA) += srat.o obj-$(CONFIG_NUMA_EMU) += numa_emulation.o +obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o + obj-$(CONFIG_MEMTEST) += memtest.o diff --git a/trunk/arch/x86/mm/extable.c b/trunk/arch/x86/mm/extable.c index 1fb85dbe390a..d0474ad2a6e5 100644 --- a/trunk/arch/x86/mm/extable.c +++ b/trunk/arch/x86/mm/extable.c @@ -25,7 +25,7 @@ int fixup_exception(struct pt_regs *regs) if (fixup) { /* If fixup is less than 16, it means uaccess error */ if (fixup->fixup < 16) { - current_thread_info()->uaccess_err = 1; + current_thread_info()->uaccess_err = -EFAULT; regs->ip += fixup->fixup; return 1; } diff --git a/trunk/arch/x86/mm/fault.c b/trunk/arch/x86/mm/fault.c index 9d74824a708d..5db0490deb07 100644 --- a/trunk/arch/x86/mm/fault.c +++ b/trunk/arch/x86/mm/fault.c @@ -626,7 +626,7 @@ pgtable_bad(struct pt_regs *regs, unsigned long error_code, static noinline void no_context(struct pt_regs *regs, unsigned long error_code, - unsigned long address, int signal, int si_code) + unsigned long address) { struct task_struct *tsk = current; unsigned long *stackend; @@ -634,17 +634,8 @@ no_context(struct pt_regs *regs, unsigned long error_code, int sig; /* Are we prepared to handle this kernel fault? */ - if (fixup_exception(regs)) { - if (current_thread_info()->sig_on_uaccess_error && signal) { - tsk->thread.trap_no = 14; - tsk->thread.error_code = error_code | PF_USER; - tsk->thread.cr2 = address; - - /* XXX: hwpoison faults will set the wrong code. */ - force_sig_info_fault(signal, si_code, address, tsk, 0); - } + if (fixup_exception(regs)) return; - } /* * 32-bit: @@ -764,7 +755,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, if (is_f00f_bug(regs, address)) return; - no_context(regs, error_code, address, SIGSEGV, si_code); + no_context(regs, error_code, address); } static noinline void @@ -828,7 +819,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, /* Kernel mode? Handle exceptions or die: */ if (!(error_code & PF_USER)) { - no_context(regs, error_code, address, SIGBUS, BUS_ADRERR); + no_context(regs, error_code, address); return; } @@ -863,7 +854,7 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, if (!(fault & VM_FAULT_RETRY)) up_read(¤t->mm->mmap_sem); if (!(error_code & PF_USER)) - no_context(regs, error_code, address, 0, 0); + no_context(regs, error_code, address); return 1; } if (!(fault & VM_FAULT_ERROR)) @@ -873,8 +864,7 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, /* Kernel mode? Handle exceptions or die: */ if (!(error_code & PF_USER)) { up_read(¤t->mm->mmap_sem); - no_context(regs, error_code, address, - SIGSEGV, SEGV_MAPERR); + no_context(regs, error_code, address); return 1; } diff --git a/trunk/arch/x86/mm/init.c b/trunk/arch/x86/mm/init.c index a298914058f9..87488b93a65c 100644 --- a/trunk/arch/x86/mm/init.c +++ b/trunk/arch/x86/mm/init.c @@ -67,7 +67,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse, good_end = max_pfn_mapped << PAGE_SHIFT; base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE); - if (!base) + if (base == MEMBLOCK_ERROR) panic("Cannot find space for the kernel page tables"); pgt_buf_start = base >> PAGE_SHIFT; @@ -80,7 +80,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse, void __init native_pagetable_reserve(u64 start, u64 end) { - memblock_reserve(start, end - start); + memblock_x86_reserve_range(start, end, "PGTABLE"); } struct map_range { @@ -279,8 +279,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, * pgt_buf_end) and free the other ones (pgt_buf_end - pgt_buf_top) * so that they can be reused for other purposes. * - * On native it just means calling memblock_reserve, on Xen it also - * means marking RW the pagetable pages that we allocated before + * On native it just means calling memblock_x86_reserve_range, on Xen it + * also means marking RW the pagetable pages that we allocated before * but that haven't been used. * * In fact on xen we mark RO the whole range pgt_buf_start - diff --git a/trunk/arch/x86/mm/init_32.c b/trunk/arch/x86/mm/init_32.c index 0c1da394a634..29f7c6d98179 100644 --- a/trunk/arch/x86/mm/init_32.c +++ b/trunk/arch/x86/mm/init_32.c @@ -427,17 +427,23 @@ static void __init add_one_highpage_init(struct page *page) void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn, unsigned long end_pfn) { - phys_addr_t start, end; - u64 i; - - for_each_free_mem_range(i, nid, &start, &end, NULL) { - unsigned long pfn = clamp_t(unsigned long, PFN_UP(start), - start_pfn, end_pfn); - unsigned long e_pfn = clamp_t(unsigned long, PFN_DOWN(end), - start_pfn, end_pfn); - for ( ; pfn < e_pfn; pfn++) - if (pfn_valid(pfn)) - add_one_highpage_init(pfn_to_page(pfn)); + struct range *range; + int nr_range; + int i; + + nr_range = __get_free_all_memory_range(&range, nid, start_pfn, end_pfn); + + for (i = 0; i < nr_range; i++) { + struct page *page; + int node_pfn; + + for (node_pfn = range[i].start; node_pfn < range[i].end; + node_pfn++) { + if (!pfn_valid(node_pfn)) + continue; + page = pfn_to_page(node_pfn); + add_one_highpage_init(page); + } } } #else @@ -644,18 +650,18 @@ void __init initmem_init(void) highstart_pfn = highend_pfn = max_pfn; if (max_pfn > max_low_pfn) highstart_pfn = max_low_pfn; + memblock_x86_register_active_regions(0, 0, highend_pfn); + sparse_memory_present_with_active_regions(0); printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", pages_to_mb(highend_pfn - highstart_pfn)); num_physpages = highend_pfn; high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; #else + memblock_x86_register_active_regions(0, 0, max_low_pfn); + sparse_memory_present_with_active_regions(0); num_physpages = max_low_pfn; high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; #endif - - memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0); - sparse_memory_present_with_active_regions(0); - #ifdef CONFIG_FLATMEM max_mapnr = num_physpages; #endif diff --git a/trunk/arch/x86/mm/init_64.c b/trunk/arch/x86/mm/init_64.c index a8a56ce3a962..bbaaa005bf0e 100644 --- a/trunk/arch/x86/mm/init_64.c +++ b/trunk/arch/x86/mm/init_64.c @@ -608,7 +608,7 @@ kernel_physical_mapping_init(unsigned long start, #ifndef CONFIG_NUMA void __init initmem_init(void) { - memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0); + memblock_x86_register_active_regions(0, 0, max_pfn); } #endif diff --git a/trunk/arch/x86/mm/memblock.c b/trunk/arch/x86/mm/memblock.c new file mode 100644 index 000000000000..992da5ec5a64 --- /dev/null +++ b/trunk/arch/x86/mm/memblock.c @@ -0,0 +1,348 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +/* Check for already reserved areas */ +bool __init memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align) +{ + struct memblock_region *r; + u64 addr = *addrp, last; + u64 size = *sizep; + bool changed = false; + +again: + last = addr + size; + for_each_memblock(reserved, r) { + if (last > r->base && addr < r->base) { + size = r->base - addr; + changed = true; + goto again; + } + if (last > (r->base + r->size) && addr < (r->base + r->size)) { + addr = round_up(r->base + r->size, align); + size = last - addr; + changed = true; + goto again; + } + if (last <= (r->base + r->size) && addr >= r->base) { + *sizep = 0; + return false; + } + } + if (changed) { + *addrp = addr; + *sizep = size; + } + return changed; +} + +/* + * Find next free range after start, and size is returned in *sizep + */ +u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align) +{ + struct memblock_region *r; + + for_each_memblock(memory, r) { + u64 ei_start = r->base; + u64 ei_last = ei_start + r->size; + u64 addr; + + addr = round_up(ei_start, align); + if (addr < start) + addr = round_up(start, align); + if (addr >= ei_last) + continue; + *sizep = ei_last - addr; + while (memblock_x86_check_reserved_size(&addr, sizep, align)) + ; + + if (*sizep) + return addr; + } + + return MEMBLOCK_ERROR; +} + +static __init struct range *find_range_array(int count) +{ + u64 end, size, mem; + struct range *range; + + size = sizeof(struct range) * count; + end = memblock.current_limit; + + mem = memblock_find_in_range(0, end, size, sizeof(struct range)); + if (mem == MEMBLOCK_ERROR) + panic("can not find more space for range array"); + + /* + * This range is tempoaray, so don't reserve it, it will not be + * overlapped because We will not alloccate new buffer before + * We discard this one + */ + range = __va(mem); + memset(range, 0, size); + + return range; +} + +static void __init memblock_x86_subtract_reserved(struct range *range, int az) +{ + u64 final_start, final_end; + struct memblock_region *r; + + /* Take out region array itself at first*/ + memblock_free_reserved_regions(); + + memblock_dbg("Subtract (%ld early reservations)\n", memblock.reserved.cnt); + + for_each_memblock(reserved, r) { + memblock_dbg(" [%010llx-%010llx]\n", (u64)r->base, (u64)r->base + r->size - 1); + final_start = PFN_DOWN(r->base); + final_end = PFN_UP(r->base + r->size); + if (final_start >= final_end) + continue; + subtract_range(range, az, final_start, final_end); + } + + /* Put region array back ? */ + memblock_reserve_reserved_regions(); +} + +struct count_data { + int nr; +}; + +static int __init count_work_fn(unsigned long start_pfn, + unsigned long end_pfn, void *datax) +{ + struct count_data *data = datax; + + data->nr++; + + return 0; +} + +static int __init count_early_node_map(int nodeid) +{ + struct count_data data; + + data.nr = 0; + work_with_active_regions(nodeid, count_work_fn, &data); + + return data.nr; +} + +int __init __get_free_all_memory_range(struct range **rangep, int nodeid, + unsigned long start_pfn, unsigned long end_pfn) +{ + int count; + struct range *range; + int nr_range; + + count = (memblock.reserved.cnt + count_early_node_map(nodeid)) * 2; + + range = find_range_array(count); + nr_range = 0; + + /* + * Use early_node_map[] and memblock.reserved.region to get range array + * at first + */ + nr_range = add_from_early_node_map(range, count, nr_range, nodeid); + subtract_range(range, count, 0, start_pfn); + subtract_range(range, count, end_pfn, -1ULL); + + memblock_x86_subtract_reserved(range, count); + nr_range = clean_sort_range(range, count); + + *rangep = range; + return nr_range; +} + +int __init get_free_all_memory_range(struct range **rangep, int nodeid) +{ + unsigned long end_pfn = -1UL; + +#ifdef CONFIG_X86_32 + end_pfn = max_low_pfn; +#endif + return __get_free_all_memory_range(rangep, nodeid, 0, end_pfn); +} + +static u64 __init __memblock_x86_memory_in_range(u64 addr, u64 limit, bool get_free) +{ + int i, count; + struct range *range; + int nr_range; + u64 final_start, final_end; + u64 free_size; + struct memblock_region *r; + + count = (memblock.reserved.cnt + memblock.memory.cnt) * 2; + + range = find_range_array(count); + nr_range = 0; + + addr = PFN_UP(addr); + limit = PFN_DOWN(limit); + + for_each_memblock(memory, r) { + final_start = PFN_UP(r->base); + final_end = PFN_DOWN(r->base + r->size); + if (final_start >= final_end) + continue; + if (final_start >= limit || final_end <= addr) + continue; + + nr_range = add_range(range, count, nr_range, final_start, final_end); + } + subtract_range(range, count, 0, addr); + subtract_range(range, count, limit, -1ULL); + + /* Subtract memblock.reserved.region in range ? */ + if (!get_free) + goto sort_and_count_them; + for_each_memblock(reserved, r) { + final_start = PFN_DOWN(r->base); + final_end = PFN_UP(r->base + r->size); + if (final_start >= final_end) + continue; + if (final_start >= limit || final_end <= addr) + continue; + + subtract_range(range, count, final_start, final_end); + } + +sort_and_count_them: + nr_range = clean_sort_range(range, count); + + free_size = 0; + for (i = 0; i < nr_range; i++) + free_size += range[i].end - range[i].start; + + return free_size << PAGE_SHIFT; +} + +u64 __init memblock_x86_free_memory_in_range(u64 addr, u64 limit) +{ + return __memblock_x86_memory_in_range(addr, limit, true); +} + +u64 __init memblock_x86_memory_in_range(u64 addr, u64 limit) +{ + return __memblock_x86_memory_in_range(addr, limit, false); +} + +void __init memblock_x86_reserve_range(u64 start, u64 end, char *name) +{ + if (start == end) + return; + + if (WARN_ONCE(start > end, "memblock_x86_reserve_range: wrong range [%#llx, %#llx)\n", start, end)) + return; + + memblock_dbg(" memblock_x86_reserve_range: [%#010llx-%#010llx] %16s\n", start, end - 1, name); + + memblock_reserve(start, end - start); +} + +void __init memblock_x86_free_range(u64 start, u64 end) +{ + if (start == end) + return; + + if (WARN_ONCE(start > end, "memblock_x86_free_range: wrong range [%#llx, %#llx)\n", start, end)) + return; + + memblock_dbg(" memblock_x86_free_range: [%#010llx-%#010llx]\n", start, end - 1); + + memblock_free(start, end - start); +} + +/* + * Need to call this function after memblock_x86_register_active_regions, + * so early_node_map[] is filled already. + */ +u64 __init memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align) +{ + u64 addr; + addr = find_memory_core_early(nid, size, align, start, end); + if (addr != MEMBLOCK_ERROR) + return addr; + + /* Fallback, should already have start end within node range */ + return memblock_find_in_range(start, end, size, align); +} + +/* + * Finds an active region in the address range from start_pfn to last_pfn and + * returns its range in ei_startpfn and ei_endpfn for the memblock entry. + */ +static int __init memblock_x86_find_active_region(const struct memblock_region *ei, + unsigned long start_pfn, + unsigned long last_pfn, + unsigned long *ei_startpfn, + unsigned long *ei_endpfn) +{ + u64 align = PAGE_SIZE; + + *ei_startpfn = round_up(ei->base, align) >> PAGE_SHIFT; + *ei_endpfn = round_down(ei->base + ei->size, align) >> PAGE_SHIFT; + + /* Skip map entries smaller than a page */ + if (*ei_startpfn >= *ei_endpfn) + return 0; + + /* Skip if map is outside the node */ + if (*ei_endpfn <= start_pfn || *ei_startpfn >= last_pfn) + return 0; + + /* Check for overlaps */ + if (*ei_startpfn < start_pfn) + *ei_startpfn = start_pfn; + if (*ei_endpfn > last_pfn) + *ei_endpfn = last_pfn; + + return 1; +} + +/* Walk the memblock.memory map and register active regions within a node */ +void __init memblock_x86_register_active_regions(int nid, unsigned long start_pfn, + unsigned long last_pfn) +{ + unsigned long ei_startpfn; + unsigned long ei_endpfn; + struct memblock_region *r; + + for_each_memblock(memory, r) + if (memblock_x86_find_active_region(r, start_pfn, last_pfn, + &ei_startpfn, &ei_endpfn)) + add_active_range(nid, ei_startpfn, ei_endpfn); +} + +/* + * Find the hole size (in bytes) in the memory range. + * @start: starting address of the memory range to scan + * @end: ending address of the memory range to scan + */ +u64 __init memblock_x86_hole_size(u64 start, u64 end) +{ + unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long last_pfn = end >> PAGE_SHIFT; + unsigned long ei_startpfn, ei_endpfn, ram = 0; + struct memblock_region *r; + + for_each_memblock(memory, r) + if (memblock_x86_find_active_region(r, start_pfn, last_pfn, + &ei_startpfn, &ei_endpfn)) + ram += ei_endpfn - ei_startpfn; + + return end - start - ((u64)ram << PAGE_SHIFT); +} diff --git a/trunk/arch/x86/mm/memtest.c b/trunk/arch/x86/mm/memtest.c index c80b9fb95734..92faf3a1c53e 100644 --- a/trunk/arch/x86/mm/memtest.c +++ b/trunk/arch/x86/mm/memtest.c @@ -34,7 +34,7 @@ static void __init reserve_bad_mem(u64 pattern, u64 start_bad, u64 end_bad) (unsigned long long) pattern, (unsigned long long) start_bad, (unsigned long long) end_bad); - memblock_reserve(start_bad, end_bad - start_bad); + memblock_x86_reserve_range(start_bad, end_bad, "BAD RAM"); } static void __init memtest(u64 pattern, u64 start_phys, u64 size) @@ -70,19 +70,24 @@ static void __init memtest(u64 pattern, u64 start_phys, u64 size) static void __init do_one_pass(u64 pattern, u64 start, u64 end) { - u64 i; - phys_addr_t this_start, this_end; - - for_each_free_mem_range(i, MAX_NUMNODES, &this_start, &this_end, NULL) { - this_start = clamp_t(phys_addr_t, this_start, start, end); - this_end = clamp_t(phys_addr_t, this_end, start, end); - if (this_start < this_end) { - printk(KERN_INFO " %010llx - %010llx pattern %016llx\n", - (unsigned long long)this_start, - (unsigned long long)this_end, - (unsigned long long)cpu_to_be64(pattern)); - memtest(pattern, this_start, this_end - this_start); - } + u64 size = 0; + + while (start < end) { + start = memblock_x86_find_in_range_size(start, &size, 1); + + /* done ? */ + if (start >= end) + break; + if (start + size > end) + size = end - start; + + printk(KERN_INFO " %010llx - %010llx pattern %016llx\n", + (unsigned long long) start, + (unsigned long long) start + size, + (unsigned long long) cpu_to_be64(pattern)); + memtest(pattern, start, size); + + start += size; } } diff --git a/trunk/arch/x86/mm/numa.c b/trunk/arch/x86/mm/numa.c index 496f494593bf..fbeaaf416610 100644 --- a/trunk/arch/x86/mm/numa.c +++ b/trunk/arch/x86/mm/numa.c @@ -192,6 +192,8 @@ int __init numa_add_memblk(int nid, u64 start, u64 end) /* Initialize NODE_DATA for a node on the local memory */ static void __init setup_node_data(int nid, u64 start, u64 end) { + const u64 nd_low = PFN_PHYS(MAX_DMA_PFN); + const u64 nd_high = PFN_PHYS(max_pfn_mapped); const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE); bool remapped = false; u64 nd_pa; @@ -222,12 +224,17 @@ static void __init setup_node_data(int nid, u64 start, u64 end) nd_pa = __pa(nd); remapped = true; } else { - nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid); - if (!nd_pa) { + nd_pa = memblock_x86_find_in_range_node(nid, nd_low, nd_high, + nd_size, SMP_CACHE_BYTES); + if (nd_pa == MEMBLOCK_ERROR) + nd_pa = memblock_find_in_range(nd_low, nd_high, + nd_size, SMP_CACHE_BYTES); + if (nd_pa == MEMBLOCK_ERROR) { pr_err("Cannot find %zu bytes in node %d\n", nd_size, nid); return; } + memblock_x86_reserve_range(nd_pa, nd_pa + nd_size, "NODE_DATA"); nd = __va(nd_pa); } @@ -364,7 +371,8 @@ void __init numa_reset_distance(void) /* numa_distance could be 1LU marking allocation failure, test cnt */ if (numa_distance_cnt) - memblock_free(__pa(numa_distance), size); + memblock_x86_free_range(__pa(numa_distance), + __pa(numa_distance) + size); numa_distance_cnt = 0; numa_distance = NULL; /* enable table creation */ } @@ -387,13 +395,13 @@ static int __init numa_alloc_distance(void) phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped), size, PAGE_SIZE); - if (!phys) { + if (phys == MEMBLOCK_ERROR) { pr_warning("NUMA: Warning: can't allocate distance table!\n"); /* don't retry until explicitly reset */ numa_distance = (void *)1LU; return -ENOMEM; } - memblock_reserve(phys, size); + memblock_x86_reserve_range(phys, phys + size, "NUMA DIST"); numa_distance = __va(phys); numa_distance_cnt = cnt; @@ -474,8 +482,8 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi) numaram = 0; } - e820ram = max_pfn - absent_pages_in_range(0, max_pfn); - + e820ram = max_pfn - (memblock_x86_hole_size(0, + PFN_PHYS(max_pfn)) >> PAGE_SHIFT); /* We seem to lose 3 pages somewhere. Allow 1M of slack. */ if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) { printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n", @@ -497,10 +505,13 @@ static int __init numa_register_memblks(struct numa_meminfo *mi) if (WARN_ON(nodes_empty(node_possible_map))) return -EINVAL; - for (i = 0; i < mi->nr_blks; i++) { - struct numa_memblk *mb = &mi->blk[i]; - memblock_set_node(mb->start, mb->end - mb->start, mb->nid); - } + for (i = 0; i < mi->nr_blks; i++) + memblock_x86_register_active_regions(mi->blk[i].nid, + mi->blk[i].start >> PAGE_SHIFT, + mi->blk[i].end >> PAGE_SHIFT); + + /* for out of order entries */ + sort_node_map(); /* * If sections array is gonna be used for pfn -> nid mapping, check @@ -534,8 +545,6 @@ static int __init numa_register_memblks(struct numa_meminfo *mi) setup_node_data(nid, start, end); } - /* Dump memblock with node info and return. */ - memblock_dump_all(); return 0; } @@ -573,7 +582,7 @@ static int __init numa_init(int (*init_func)(void)) nodes_clear(node_possible_map); nodes_clear(node_online_map); memset(&numa_meminfo, 0, sizeof(numa_meminfo)); - WARN_ON(memblock_set_node(0, ULLONG_MAX, MAX_NUMNODES)); + remove_all_active_ranges(); numa_reset_distance(); ret = init_func(); diff --git a/trunk/arch/x86/mm/numa_32.c b/trunk/arch/x86/mm/numa_32.c index 534255a36b6b..3adebe7e536a 100644 --- a/trunk/arch/x86/mm/numa_32.c +++ b/trunk/arch/x86/mm/numa_32.c @@ -199,23 +199,23 @@ void __init init_alloc_remap(int nid, u64 start, u64 end) /* allocate node memory and the lowmem remap area */ node_pa = memblock_find_in_range(start, end, size, LARGE_PAGE_BYTES); - if (!node_pa) { + if (node_pa == MEMBLOCK_ERROR) { pr_warning("remap_alloc: failed to allocate %lu bytes for node %d\n", size, nid); return; } - memblock_reserve(node_pa, size); + memblock_x86_reserve_range(node_pa, node_pa + size, "KVA RAM"); remap_pa = memblock_find_in_range(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT, size, LARGE_PAGE_BYTES); - if (!remap_pa) { + if (remap_pa == MEMBLOCK_ERROR) { pr_warning("remap_alloc: failed to allocate %lu bytes remap area for node %d\n", size, nid); - memblock_free(node_pa, size); + memblock_x86_free_range(node_pa, node_pa + size); return; } - memblock_reserve(remap_pa, size); + memblock_x86_reserve_range(remap_pa, remap_pa + size, "KVA PG"); remap_va = phys_to_virt(remap_pa); /* perform actual remap */ diff --git a/trunk/arch/x86/mm/numa_64.c b/trunk/arch/x86/mm/numa_64.c index 92e27119ee1a..dd27f401f0a0 100644 --- a/trunk/arch/x86/mm/numa_64.c +++ b/trunk/arch/x86/mm/numa_64.c @@ -19,7 +19,7 @@ unsigned long __init numa_free_all_bootmem(void) for_each_online_node(i) pages += free_all_bootmem_node(NODE_DATA(i)); - pages += free_low_memory_core_early(MAX_NUMNODES); + pages += free_all_memory_core_early(MAX_NUMNODES); return pages; } diff --git a/trunk/arch/x86/mm/numa_emulation.c b/trunk/arch/x86/mm/numa_emulation.c index 46db56845f18..d0ed086b6247 100644 --- a/trunk/arch/x86/mm/numa_emulation.c +++ b/trunk/arch/x86/mm/numa_emulation.c @@ -28,16 +28,6 @@ static int __init emu_find_memblk_by_nid(int nid, const struct numa_meminfo *mi) return -ENOENT; } -static u64 mem_hole_size(u64 start, u64 end) -{ - unsigned long start_pfn = PFN_UP(start); - unsigned long end_pfn = PFN_DOWN(end); - - if (start_pfn < end_pfn) - return PFN_PHYS(absent_pages_in_range(start_pfn, end_pfn)); - return 0; -} - /* * Sets up nid to range from @start to @end. The return value is -errno if * something went wrong, 0 otherwise. @@ -99,7 +89,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei, * Calculate target node size. x86_32 freaks on __udivdi3() so do * the division in ulong number of pages and convert back. */ - size = max_addr - addr - mem_hole_size(addr, max_addr); + size = max_addr - addr - memblock_x86_hole_size(addr, max_addr); size = PFN_PHYS((unsigned long)(size >> PAGE_SHIFT) / nr_nodes); /* @@ -145,7 +135,8 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei, * Continue to add memory to this fake node if its * non-reserved memory is less than the per-node size. */ - while (end - start - mem_hole_size(start, end) < size) { + while (end - start - + memblock_x86_hole_size(start, end) < size) { end += FAKE_NODE_MIN_SIZE; if (end > limit) { end = limit; @@ -159,7 +150,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei, * this one must extend to the boundary. */ if (end < dma32_end && dma32_end - end - - mem_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) + memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) end = dma32_end; /* @@ -167,7 +158,8 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei, * next node, this one must extend to the end of the * physical node. */ - if (limit - end - mem_hole_size(end, limit) < size) + if (limit - end - + memblock_x86_hole_size(end, limit) < size) end = limit; ret = emu_setup_memblk(ei, pi, nid++ % nr_nodes, @@ -188,7 +180,7 @@ static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size) { u64 end = start + size; - while (end - start - mem_hole_size(start, end) < size) { + while (end - start - memblock_x86_hole_size(start, end) < size) { end += FAKE_NODE_MIN_SIZE; if (end > max_addr) { end = max_addr; @@ -219,7 +211,8 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei, * creates a uniform distribution of node sizes across the entire * machine (but not necessarily over physical nodes). */ - min_size = (max_addr - addr - mem_hole_size(addr, max_addr)) / MAX_NUMNODES; + min_size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / + MAX_NUMNODES; min_size = max(min_size, FAKE_NODE_MIN_SIZE); if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size) min_size = (min_size + FAKE_NODE_MIN_SIZE) & @@ -259,7 +252,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei, * this one must extend to the boundary. */ if (end < dma32_end && dma32_end - end - - mem_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) + memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) end = dma32_end; /* @@ -267,7 +260,8 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei, * next node, this one must extend to the end of the * physical node. */ - if (limit - end - mem_hole_size(end, limit) < size) + if (limit - end - + memblock_x86_hole_size(end, limit) < size) end = limit; ret = emu_setup_memblk(ei, pi, nid++ % MAX_NUMNODES, @@ -357,11 +351,11 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped), phys_size, PAGE_SIZE); - if (!phys) { + if (phys == MEMBLOCK_ERROR) { pr_warning("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n"); goto no_emu; } - memblock_reserve(phys, phys_size); + memblock_x86_reserve_range(phys, phys + phys_size, "TMP NUMA DIST"); phys_dist = __va(phys); for (i = 0; i < numa_dist_cnt; i++) @@ -430,7 +424,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) /* free the copied physical distance table */ if (phys_dist) - memblock_free(__pa(phys_dist), phys_size); + memblock_x86_free_range(__pa(phys_dist), __pa(phys_dist) + phys_size); return; no_emu: diff --git a/trunk/arch/x86/mm/pageattr.c b/trunk/arch/x86/mm/pageattr.c index eda2acbb6e81..f9e526742fa1 100644 --- a/trunk/arch/x86/mm/pageattr.c +++ b/trunk/arch/x86/mm/pageattr.c @@ -998,7 +998,7 @@ int set_memory_uc(unsigned long addr, int numpages) } EXPORT_SYMBOL(set_memory_uc); -static int _set_memory_array(unsigned long *addr, int addrinarray, +int _set_memory_array(unsigned long *addr, int addrinarray, unsigned long new_type) { int i, j; diff --git a/trunk/arch/x86/mm/srat.c b/trunk/arch/x86/mm/srat.c index fd61b3fb7341..81dbfdeb080d 100644 --- a/trunk/arch/x86/mm/srat.c +++ b/trunk/arch/x86/mm/srat.c @@ -69,12 +69,6 @@ acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa) if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0) return; pxm = pa->proximity_domain; - apic_id = pa->apic_id; - if (!cpu_has_x2apic && (apic_id >= 0xff)) { - printk(KERN_INFO "SRAT: PXM %u -> X2APIC 0x%04x ignored\n", - pxm, apic_id); - return; - } node = setup_node(pxm); if (node < 0) { printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm); @@ -82,6 +76,7 @@ acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa) return; } + apic_id = pa->apic_id; if (apic_id >= MAX_LOCAL_APIC) { printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u skipped apicid that is too big\n", pxm, apic_id, node); return; diff --git a/trunk/arch/x86/oprofile/Makefile b/trunk/arch/x86/oprofile/Makefile index 1599f568f0e2..446902b2a6b6 100644 --- a/trunk/arch/x86/oprofile/Makefile +++ b/trunk/arch/x86/oprofile/Makefile @@ -4,8 +4,9 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \ oprof.o cpu_buffer.o buffer_sync.o \ event_buffer.o oprofile_files.o \ oprofilefs.o oprofile_stats.o \ - timer_int.o nmi_timer_int.o ) + timer_int.o ) oprofile-y := $(DRIVER_OBJS) init.o backtrace.o oprofile-$(CONFIG_X86_LOCAL_APIC) += nmi_int.o op_model_amd.o \ op_model_ppro.o op_model_p4.o +oprofile-$(CONFIG_X86_IO_APIC) += nmi_timer_int.o diff --git a/trunk/arch/x86/oprofile/init.c b/trunk/arch/x86/oprofile/init.c index 9e138d00ad36..f148cf652678 100644 --- a/trunk/arch/x86/oprofile/init.c +++ b/trunk/arch/x86/oprofile/init.c @@ -16,23 +16,37 @@ * with the NMI mode driver. */ -#ifdef CONFIG_X86_LOCAL_APIC extern int op_nmi_init(struct oprofile_operations *ops); +extern int op_nmi_timer_init(struct oprofile_operations *ops); extern void op_nmi_exit(void); -#else -static int op_nmi_init(struct oprofile_operations *ops) { return -ENODEV; } -static void op_nmi_exit(void) { } -#endif - extern void x86_backtrace(struct pt_regs * const regs, unsigned int depth); +static int nmi_timer; + int __init oprofile_arch_init(struct oprofile_operations *ops) { + int ret; + + ret = -ENODEV; + +#ifdef CONFIG_X86_LOCAL_APIC + ret = op_nmi_init(ops); +#endif + nmi_timer = (ret != 0); +#ifdef CONFIG_X86_IO_APIC + if (nmi_timer) + ret = op_nmi_timer_init(ops); +#endif ops->backtrace = x86_backtrace; - return op_nmi_init(ops); + + return ret; } + void oprofile_arch_exit(void) { - op_nmi_exit(); +#ifdef CONFIG_X86_LOCAL_APIC + if (!nmi_timer) + op_nmi_exit(); +#endif } diff --git a/trunk/arch/x86/oprofile/nmi_int.c b/trunk/arch/x86/oprofile/nmi_int.c index 26b8a8514ee5..75f9528e0372 100644 --- a/trunk/arch/x86/oprofile/nmi_int.c +++ b/trunk/arch/x86/oprofile/nmi_int.c @@ -595,36 +595,24 @@ static int __init p4_init(char **cpu_type) return 0; } -enum __force_cpu_type { - reserved = 0, /* do not force */ - timer, - arch_perfmon, -}; - -static int force_cpu_type; - -static int set_cpu_type(const char *str, struct kernel_param *kp) +static int force_arch_perfmon; +static int force_cpu_type(const char *str, struct kernel_param *kp) { - if (!strcmp(str, "timer")) { - force_cpu_type = timer; - printk(KERN_INFO "oprofile: forcing NMI timer mode\n"); - } else if (!strcmp(str, "arch_perfmon")) { - force_cpu_type = arch_perfmon; + if (!strcmp(str, "arch_perfmon")) { + force_arch_perfmon = 1; printk(KERN_INFO "oprofile: forcing architectural perfmon\n"); - } else { - force_cpu_type = 0; } return 0; } -module_param_call(cpu_type, set_cpu_type, NULL, NULL, 0); +module_param_call(cpu_type, force_cpu_type, NULL, NULL, 0); static int __init ppro_init(char **cpu_type) { __u8 cpu_model = boot_cpu_data.x86_model; struct op_x86_model_spec *spec = &op_ppro_spec; /* default */ - if (force_cpu_type == arch_perfmon && cpu_has_arch_perfmon) + if (force_arch_perfmon && cpu_has_arch_perfmon) return 0; /* @@ -691,9 +679,6 @@ int __init op_nmi_init(struct oprofile_operations *ops) if (!cpu_has_apic) return -ENODEV; - if (force_cpu_type == timer) - return -ENODEV; - switch (vendor) { case X86_VENDOR_AMD: /* Needs to be at least an Athlon (or hammer in 32bit mode) */ diff --git a/trunk/arch/x86/oprofile/nmi_timer_int.c b/trunk/arch/x86/oprofile/nmi_timer_int.c new file mode 100644 index 000000000000..7f8052cd6620 --- /dev/null +++ b/trunk/arch/x86/oprofile/nmi_timer_int.c @@ -0,0 +1,50 @@ +/** + * @file nmi_timer_int.c + * + * @remark Copyright 2003 OProfile authors + * @remark Read the file COPYING + * + * @author Zwane Mwaikambo + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +static int profile_timer_exceptions_notify(unsigned int val, struct pt_regs *regs) +{ + oprofile_add_sample(regs, 0); + return NMI_HANDLED; +} + +static int timer_start(void) +{ + if (register_nmi_handler(NMI_LOCAL, profile_timer_exceptions_notify, + 0, "oprofile-timer")) + return 1; + return 0; +} + + +static void timer_stop(void) +{ + unregister_nmi_handler(NMI_LOCAL, "oprofile-timer"); + synchronize_sched(); /* Allow already-started NMIs to complete. */ +} + + +int __init op_nmi_timer_init(struct oprofile_operations *ops) +{ + ops->start = timer_start; + ops->stop = timer_stop; + ops->cpu_type = "timer"; + printk(KERN_INFO "oprofile: using NMI timer interrupt.\n"); + return 0; +} diff --git a/trunk/arch/x86/platform/efi/efi.c b/trunk/arch/x86/platform/efi/efi.c index 4cf9bd0a1653..37718f0f053d 100644 --- a/trunk/arch/x86/platform/efi/efi.c +++ b/trunk/arch/x86/platform/efi/efi.c @@ -238,8 +238,7 @@ static efi_status_t __init phys_efi_get_time(efi_time_t *tm, spin_lock_irqsave(&rtc_lock, flags); efi_call_phys_prelog(); - status = efi_call_phys2(efi_phys.get_time, virt_to_phys(tm), - virt_to_phys(tc)); + status = efi_call_phys2(efi_phys.get_time, tm, tc); efi_call_phys_epilog(); spin_unlock_irqrestore(&rtc_lock, flags); return status; @@ -353,7 +352,8 @@ void __init efi_memblock_x86_reserve_range(void) boot_params.efi_info.efi_memdesc_size; memmap.desc_version = boot_params.efi_info.efi_memdesc_version; memmap.desc_size = boot_params.efi_info.efi_memdesc_size; - memblock_reserve(pmap, memmap.nr_map * memmap.desc_size); + memblock_x86_reserve_range(pmap, pmap + memmap.nr_map * memmap.desc_size, + "EFI memmap"); } #if EFI_DEBUG @@ -397,14 +397,16 @@ void __init efi_reserve_boot_services(void) if ((start+size >= virt_to_phys(_text) && start <= virt_to_phys(_end)) || !e820_all_mapped(start, start+size, E820_RAM) || - memblock_is_region_reserved(start, size)) { + memblock_x86_check_reserved_size(&start, &size, + 1<num_pages = 0; memblock_dbg(PFX "Could not reserve boot range " "[0x%010llx-0x%010llx]\n", start, start+size-1); } else - memblock_reserve(start, size); + memblock_x86_reserve_range(start, start+size, + "EFI Boot"); } } diff --git a/trunk/arch/x86/tools/Makefile b/trunk/arch/x86/tools/Makefile index d511aa97533a..f82082677337 100644 --- a/trunk/arch/x86/tools/Makefile +++ b/trunk/arch/x86/tools/Makefile @@ -18,21 +18,14 @@ chkobjdump = $(srctree)/arch/x86/tools/chkobjdump.awk quiet_cmd_posttest = TEST $@ cmd_posttest = ($(OBJDUMP) -v | $(AWK) -f $(chkobjdump)) || $(OBJDUMP) -d -j .text $(objtree)/vmlinux | $(AWK) -f $(distill_awk) | $(obj)/test_get_len $(posttest_64bit) $(posttest_verbose) -quiet_cmd_sanitytest = TEST $@ - cmd_sanitytest = $(obj)/insn_sanity $(posttest_64bit) -m 1000000 - -posttest: $(obj)/test_get_len vmlinux $(obj)/insn_sanity +posttest: $(obj)/test_get_len vmlinux $(call cmd,posttest) - $(call cmd,sanitytest) -hostprogs-y += test_get_len insn_sanity +hostprogs-y := test_get_len # -I needed for generated C source and C source which in the kernel tree. HOSTCFLAGS_test_get_len.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ -I$(srctree)/include/ -HOSTCFLAGS_insn_sanity.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ -I$(srctree)/include/ - # Dependencies are also needed. $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c -$(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c diff --git a/trunk/arch/x86/tools/gen-insn-attr-x86.awk b/trunk/arch/x86/tools/gen-insn-attr-x86.awk index 5f6a5b6c3a15..eaf11f52fc0b 100644 --- a/trunk/arch/x86/tools/gen-insn-attr-x86.awk +++ b/trunk/arch/x86/tools/gen-insn-attr-x86.awk @@ -47,7 +47,7 @@ BEGIN { sep_expr = "^\\|$" group_expr = "^Grp[0-9A-Za-z]+" - imm_expr = "^[IJAOL][a-z]" + imm_expr = "^[IJAO][a-z]" imm_flag["Ib"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)" imm_flag["Jb"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)" imm_flag["Iw"] = "INAT_MAKE_IMM(INAT_IMM_WORD)" @@ -59,7 +59,6 @@ BEGIN { imm_flag["Iv"] = "INAT_MAKE_IMM(INAT_IMM_VWORD)" imm_flag["Ob"] = "INAT_MOFFSET" imm_flag["Ov"] = "INAT_MOFFSET" - imm_flag["Lx"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)" modrm_expr = "^([CDEGMNPQRSUVW/][a-z]+|NTA|T[012])" force64_expr = "\\([df]64\\)" @@ -71,12 +70,8 @@ BEGIN { lprefix3_expr = "\\(F2\\)" max_lprefix = 4 - # All opcodes starting with lower-case 'v' or with (v1) superscript - # accepts VEX prefix - vexok_opcode_expr = "^v.*" - vexok_expr = "\\(v1\\)" - # All opcodes with (v) superscript supports *only* VEX prefix - vexonly_expr = "\\(v\\)" + vexok_expr = "\\(VEX\\)" + vexonly_expr = "\\(oVEX\\)" prefix_expr = "\\(Prefix\\)" prefix_num["Operand-Size"] = "INAT_PFX_OPNDSZ" @@ -90,8 +85,8 @@ BEGIN { prefix_num["SEG=GS"] = "INAT_PFX_GS" prefix_num["SEG=SS"] = "INAT_PFX_SS" prefix_num["Address-Size"] = "INAT_PFX_ADDRSZ" - prefix_num["VEX+1byte"] = "INAT_PFX_VEX2" - prefix_num["VEX+2byte"] = "INAT_PFX_VEX3" + prefix_num["2bytes-VEX"] = "INAT_PFX_VEX2" + prefix_num["3bytes-VEX"] = "INAT_PFX_VEX3" clear_vars() } @@ -315,10 +310,12 @@ function convert_operands(count,opnd, i,j,imm,mod) if (match(opcode, fpu_expr)) flags = add_flags(flags, "INAT_MODRM") - # check VEX codes + # check VEX only code if (match(ext, vexonly_expr)) flags = add_flags(flags, "INAT_VEXOK | INAT_VEXONLY") - else if (match(ext, vexok_expr) || match(opcode, vexok_opcode_expr)) + + # check VEX only code + if (match(ext, vexok_expr)) flags = add_flags(flags, "INAT_VEXOK") # check prefixes diff --git a/trunk/arch/x86/tools/insn_sanity.c b/trunk/arch/x86/tools/insn_sanity.c deleted file mode 100644 index cc2f8c131286..000000000000 --- a/trunk/arch/x86/tools/insn_sanity.c +++ /dev/null @@ -1,275 +0,0 @@ -/* - * x86 decoder sanity test - based on test_get_insn.c - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright (C) IBM Corporation, 2009 - * Copyright (C) Hitachi, Ltd., 2011 - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#define unlikely(cond) (cond) -#define ARRAY_SIZE(a) (sizeof(a)/sizeof(a[0])) - -#include -#include -#include - -/* - * Test of instruction analysis against tampering. - * Feed random binary to instruction decoder and ensure not to - * access out-of-instruction-buffer. - */ - -#define DEFAULT_MAX_ITER 10000 -#define INSN_NOP 0x90 - -static const char *prog; /* Program name */ -static int verbose; /* Verbosity */ -static int x86_64; /* x86-64 bit mode flag */ -static unsigned int seed; /* Random seed */ -static unsigned long iter_start; /* Start of iteration number */ -static unsigned long iter_end = DEFAULT_MAX_ITER; /* End of iteration number */ -static FILE *input_file; /* Input file name */ - -static void usage(const char *err) -{ - if (err) - fprintf(stderr, "Error: %s\n\n", err); - fprintf(stderr, "Usage: %s [-y|-n|-v] [-s seed[,no]] [-m max] [-i input]\n", prog); - fprintf(stderr, "\t-y 64bit mode\n"); - fprintf(stderr, "\t-n 32bit mode\n"); - fprintf(stderr, "\t-v Verbosity(-vv dumps any decoded result)\n"); - fprintf(stderr, "\t-s Give a random seed (and iteration number)\n"); - fprintf(stderr, "\t-m Give a maximum iteration number\n"); - fprintf(stderr, "\t-i Give an input file with decoded binary\n"); - exit(1); -} - -static void dump_field(FILE *fp, const char *name, const char *indent, - struct insn_field *field) -{ - fprintf(fp, "%s.%s = {\n", indent, name); - fprintf(fp, "%s\t.value = %d, bytes[] = {%x, %x, %x, %x},\n", - indent, field->value, field->bytes[0], field->bytes[1], - field->bytes[2], field->bytes[3]); - fprintf(fp, "%s\t.got = %d, .nbytes = %d},\n", indent, - field->got, field->nbytes); -} - -static void dump_insn(FILE *fp, struct insn *insn) -{ - fprintf(fp, "Instruction = {\n"); - dump_field(fp, "prefixes", "\t", &insn->prefixes); - dump_field(fp, "rex_prefix", "\t", &insn->rex_prefix); - dump_field(fp, "vex_prefix", "\t", &insn->vex_prefix); - dump_field(fp, "opcode", "\t", &insn->opcode); - dump_field(fp, "modrm", "\t", &insn->modrm); - dump_field(fp, "sib", "\t", &insn->sib); - dump_field(fp, "displacement", "\t", &insn->displacement); - dump_field(fp, "immediate1", "\t", &insn->immediate1); - dump_field(fp, "immediate2", "\t", &insn->immediate2); - fprintf(fp, "\t.attr = %x, .opnd_bytes = %d, .addr_bytes = %d,\n", - insn->attr, insn->opnd_bytes, insn->addr_bytes); - fprintf(fp, "\t.length = %d, .x86_64 = %d, .kaddr = %p}\n", - insn->length, insn->x86_64, insn->kaddr); -} - -static void dump_stream(FILE *fp, const char *msg, unsigned long nr_iter, - unsigned char *insn_buf, struct insn *insn) -{ - int i; - - fprintf(fp, "%s:\n", msg); - - dump_insn(fp, insn); - - fprintf(fp, "You can reproduce this with below command(s);\n"); - - /* Input a decoded instruction sequence directly */ - fprintf(fp, " $ echo "); - for (i = 0; i < MAX_INSN_SIZE; i++) - fprintf(fp, " %02x", insn_buf[i]); - fprintf(fp, " | %s -i -\n", prog); - - if (!input_file) { - fprintf(fp, "Or \n"); - /* Give a seed and iteration number */ - fprintf(fp, " $ %s -s 0x%x,%lu\n", prog, seed, nr_iter); - } -} - -static void init_random_seed(void) -{ - int fd; - - fd = open("/dev/urandom", O_RDONLY); - if (fd < 0) - goto fail; - - if (read(fd, &seed, sizeof(seed)) != sizeof(seed)) - goto fail; - - close(fd); - return; -fail: - usage("Failed to open /dev/urandom"); -} - -/* Read given instruction sequence from the input file */ -static int read_next_insn(unsigned char *insn_buf) -{ - char buf[256] = "", *tmp; - int i; - - tmp = fgets(buf, ARRAY_SIZE(buf), input_file); - if (tmp == NULL || feof(input_file)) - return 0; - - for (i = 0; i < MAX_INSN_SIZE; i++) { - insn_buf[i] = (unsigned char)strtoul(tmp, &tmp, 16); - if (*tmp != ' ') - break; - } - - return i; -} - -static int generate_insn(unsigned char *insn_buf) -{ - int i; - - if (input_file) - return read_next_insn(insn_buf); - - /* Fills buffer with random binary up to MAX_INSN_SIZE */ - for (i = 0; i < MAX_INSN_SIZE - 1; i += 2) - *(unsigned short *)(&insn_buf[i]) = random() & 0xffff; - - while (i < MAX_INSN_SIZE) - insn_buf[i++] = random() & 0xff; - - return i; -} - -static void parse_args(int argc, char **argv) -{ - int c; - char *tmp = NULL; - int set_seed = 0; - - prog = argv[0]; - while ((c = getopt(argc, argv, "ynvs:m:i:")) != -1) { - switch (c) { - case 'y': - x86_64 = 1; - break; - case 'n': - x86_64 = 0; - break; - case 'v': - verbose++; - break; - case 'i': - if (strcmp("-", optarg) == 0) - input_file = stdin; - else - input_file = fopen(optarg, "r"); - if (!input_file) - usage("Failed to open input file"); - break; - case 's': - seed = (unsigned int)strtoul(optarg, &tmp, 0); - if (*tmp == ',') { - optarg = tmp + 1; - iter_start = strtoul(optarg, &tmp, 0); - } - if (*tmp != '\0' || tmp == optarg) - usage("Failed to parse seed"); - set_seed = 1; - break; - case 'm': - iter_end = strtoul(optarg, &tmp, 0); - if (*tmp != '\0' || tmp == optarg) - usage("Failed to parse max_iter"); - break; - default: - usage(NULL); - } - } - - /* Check errors */ - if (iter_end < iter_start) - usage("Max iteration number must be bigger than iter-num"); - - if (set_seed && input_file) - usage("Don't use input file (-i) with random seed (-s)"); - - /* Initialize random seed */ - if (!input_file) { - if (!set_seed) /* No seed is given */ - init_random_seed(); - srand(seed); - } -} - -int main(int argc, char **argv) -{ - struct insn insn; - int insns = 0; - int errors = 0; - unsigned long i; - unsigned char insn_buf[MAX_INSN_SIZE * 2]; - - parse_args(argc, argv); - - /* Prepare stop bytes with NOPs */ - memset(insn_buf + MAX_INSN_SIZE, INSN_NOP, MAX_INSN_SIZE); - - for (i = 0; i < iter_end; i++) { - if (generate_insn(insn_buf) <= 0) - break; - - if (i < iter_start) /* Skip to given iteration number */ - continue; - - /* Decode an instruction */ - insn_init(&insn, insn_buf, x86_64); - insn_get_length(&insn); - - if (insn.next_byte <= insn.kaddr || - insn.kaddr + MAX_INSN_SIZE < insn.next_byte) { - /* Access out-of-range memory */ - dump_stream(stderr, "Error: Found an access violation", i, insn_buf, &insn); - errors++; - } else if (verbose && !insn_complete(&insn)) - dump_stream(stdout, "Info: Found an undecodable input", i, insn_buf, &insn); - else if (verbose >= 2) - dump_insn(stdout, &insn); - insns++; - } - - fprintf(stdout, "%s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n", (errors) ? "Failure" : "Success", insns, (input_file) ? "given" : "random", errors, seed); - - return errors ? 1 : 0; -} diff --git a/trunk/arch/x86/xen/enlighten.c b/trunk/arch/x86/xen/enlighten.c index 12eb07bfb267..1f928659c338 100644 --- a/trunk/arch/x86/xen/enlighten.c +++ b/trunk/arch/x86/xen/enlighten.c @@ -1215,6 +1215,8 @@ asmlinkage void __init xen_start_kernel(void) local_irq_disable(); early_boot_irqs_disabled = true; + memblock_init(); + xen_raw_console_write("mapping kernel into physical memory\n"); pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages); xen_ident_map_ISA(); diff --git a/trunk/arch/x86/xen/mmu.c b/trunk/arch/x86/xen/mmu.c index f4bf8aa574f4..87f6673b1207 100644 --- a/trunk/arch/x86/xen/mmu.c +++ b/trunk/arch/x86/xen/mmu.c @@ -1774,8 +1774,10 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd, __xen_write_cr3(true, __pa(pgd)); xen_mc_issue(PARAVIRT_LAZY_CPU); - memblock_reserve(__pa(xen_start_info->pt_base), - xen_start_info->nr_pt_frames * PAGE_SIZE); + memblock_x86_reserve_range(__pa(xen_start_info->pt_base), + __pa(xen_start_info->pt_base + + xen_start_info->nr_pt_frames * PAGE_SIZE), + "XEN PAGETABLES"); return pgd; } @@ -1851,8 +1853,10 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd, PFN_DOWN(__pa(initial_page_table))); xen_write_cr3(__pa(initial_page_table)); - memblock_reserve(__pa(xen_start_info->pt_base), - xen_start_info->nr_pt_frames * PAGE_SIZE)); + memblock_x86_reserve_range(__pa(xen_start_info->pt_base), + __pa(xen_start_info->pt_base + + xen_start_info->nr_pt_frames * PAGE_SIZE), + "XEN PAGETABLES"); return initial_page_table; } diff --git a/trunk/arch/x86/xen/setup.c b/trunk/arch/x86/xen/setup.c index e03c63692176..b2c7179fa263 100644 --- a/trunk/arch/x86/xen/setup.c +++ b/trunk/arch/x86/xen/setup.c @@ -75,7 +75,7 @@ static void __init xen_add_extra_mem(u64 start, u64 size) if (i == XEN_EXTRA_MEM_MAX_REGIONS) printk(KERN_WARNING "Warning: not enough extra memory regions\n"); - memblock_reserve(start, size); + memblock_x86_reserve_range(start, start + size, "XEN EXTRA"); xen_max_p2m_pfn = PFN_DOWN(start + size); @@ -311,8 +311,9 @@ char * __init xen_memory_setup(void) * - xen_start_info * See comment above "struct start_info" in */ - memblock_reserve(__pa(xen_start_info->mfn_list), - xen_start_info->pt_base - xen_start_info->mfn_list); + memblock_x86_reserve_range(__pa(xen_start_info->mfn_list), + __pa(xen_start_info->pt_base), + "XEN START INFO"); sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); diff --git a/trunk/arch/xtensa/include/asm/socket.h b/trunk/arch/xtensa/include/asm/socket.h index bb06968be227..cbdf2ffaacff 100644 --- a/trunk/arch/xtensa/include/asm/socket.h +++ b/trunk/arch/xtensa/include/asm/socket.h @@ -73,7 +73,4 @@ #define SO_RXQ_OVFL 40 -#define SO_WIFI_STATUS 41 -#define SCM_WIFI_STATUS SO_WIFI_STATUS - #endif /* _XTENSA_SOCKET_H */ diff --git a/trunk/arch/xtensa/kernel/time.c b/trunk/arch/xtensa/kernel/time.c index ac62f9cf1e10..f3e5eb43f71c 100644 --- a/trunk/arch/xtensa/kernel/time.c +++ b/trunk/arch/xtensa/kernel/time.c @@ -41,6 +41,14 @@ static struct clocksource ccount_clocksource = { .rating = 200, .read = ccount_read, .mask = CLOCKSOURCE_MASK(32), + /* + * With a shift of 22 the lower limit of the cpu clock is + * 1MHz, where NSEC_PER_CCOUNT is 1000 or a bit less than + * 2^10: Since we have 32 bits and the multiplicator can + * already take up as much as 10 bits, this leaves us with + * remaining upper 22 bits. + */ + .shift = 22, }; static irqreturn_t timer_interrupt(int irq, void *dev_id); @@ -58,7 +66,10 @@ void __init time_init(void) printk("%d.%02d MHz\n", (int)ccount_per_jiffy/(1000000/HZ), (int)(ccount_per_jiffy/(10000/HZ))%100); #endif - clocksource_register_hz(&ccount_clocksource, CCOUNT_PER_JIFFY * HZ); + ccount_clocksource.mult = + clocksource_hz2mult(CCOUNT_PER_JIFFY * HZ, + ccount_clocksource.shift); + clocksource_register(&ccount_clocksource); /* Initialize the linux timer interrupt. */ diff --git a/trunk/block/blk-map.c b/trunk/block/blk-map.c index 623e1cd4cffe..164cd0059706 100644 --- a/trunk/block/blk-map.c +++ b/trunk/block/blk-map.c @@ -311,7 +311,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, if (IS_ERR(bio)) return PTR_ERR(bio); - if (!reading) + if (rq_data_dir(rq) == WRITE) bio->bi_rw |= REQ_WRITE; if (do_copy) diff --git a/trunk/block/blk-tag.c b/trunk/block/blk-tag.c index 4af6f5cc1167..e74d6d13838f 100644 --- a/trunk/block/blk-tag.c +++ b/trunk/block/blk-tag.c @@ -282,9 +282,18 @@ EXPORT_SYMBOL(blk_queue_resize_tags); void blk_queue_end_tag(struct request_queue *q, struct request *rq) { struct blk_queue_tag *bqt = q->queue_tags; - unsigned tag = rq->tag; /* negative tags invalid */ + int tag = rq->tag; - BUG_ON(tag >= bqt->real_max_depth); + BUG_ON(tag == -1); + + if (unlikely(tag >= bqt->max_depth)) { + /* + * This can happen after tag depth has been reduced. + * But tag shouldn't be larger than real_max_depth. + */ + WARN_ON(tag >= bqt->real_max_depth); + return; + } list_del_init(&rq->queuelist); rq->cmd_flags &= ~REQ_QUEUED; diff --git a/trunk/block/cfq-iosched.c b/trunk/block/cfq-iosched.c index 3548705b04e4..4c12869fcf77 100644 --- a/trunk/block/cfq-iosched.c +++ b/trunk/block/cfq-iosched.c @@ -1655,8 +1655,6 @@ cfq_merged_requests(struct request_queue *q, struct request *rq, struct request *next) { struct cfq_queue *cfqq = RQ_CFQQ(rq); - struct cfq_data *cfqd = q->elevator->elevator_data; - /* * reposition in fifo if next is older than rq */ @@ -1671,16 +1669,6 @@ cfq_merged_requests(struct request_queue *q, struct request *rq, cfq_remove_request(next); cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(next), rq_is_sync(next)); - - cfqq = RQ_CFQQ(next); - /* - * all requests of this queue are merged to other queues, delete it - * from the service tree. If it's the active_queue, - * cfq_dispatch_requests() will choose to expire it or do idle - */ - if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) && - cfqq != cfqd->active_queue) - cfq_del_cfqq_rr(cfqd, cfqq); } static int cfq_allow_merge(struct request_queue *q, struct request *rq, diff --git a/trunk/block/ioctl.c b/trunk/block/ioctl.c index d510c2a4eff8..ca939fc1030f 100644 --- a/trunk/block/ioctl.c +++ b/trunk/block/ioctl.c @@ -179,26 +179,6 @@ int __blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode, */ EXPORT_SYMBOL_GPL(__blkdev_driver_ioctl); -/* - * Is it an unrecognized ioctl? The correct returns are either - * ENOTTY (final) or ENOIOCTLCMD ("I don't know this one, try a - * fallback"). ENOIOCTLCMD gets turned into ENOTTY by the ioctl - * code before returning. - * - * Confused drivers sometimes return EINVAL, which is wrong. It - * means "I understood the ioctl command, but the parameters to - * it were wrong". - * - * We should aim to just fix the broken drivers, the EINVAL case - * should go away. - */ -static inline int is_unrecognized_ioctl(int ret) -{ - return ret == -EINVAL || - ret == -ENOTTY || - ret == -ENOIOCTLCMD; -} - /* * always keep this in sync with compat_blkdev_ioctl() */ @@ -216,7 +196,8 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, return -EACCES; ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg); - if (!is_unrecognized_ioctl(ret)) + /* -EINVAL to handle old uncorrected drivers */ + if (ret != -EINVAL && ret != -ENOTTY) return ret; fsync_bdev(bdev); @@ -225,7 +206,8 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, case BLKROSET: ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg); - if (!is_unrecognized_ioctl(ret)) + /* -EINVAL to handle old uncorrected drivers */ + if (ret != -EINVAL && ret != -ENOTTY) return ret; if (!capable(CAP_SYS_ADMIN)) return -EACCES; diff --git a/trunk/drivers/atm/iphase.c b/trunk/drivers/atm/iphase.c index 9e373ba20308..3d0c2b0fed9c 100644 --- a/trunk/drivers/atm/iphase.c +++ b/trunk/drivers/atm/iphase.c @@ -1320,8 +1320,8 @@ static void rx_dle_intr(struct atm_dev *dev) if (ia_vcc == NULL) { atomic_inc(&vcc->stats->rx_err); - atm_return(vcc, skb->truesize); dev_kfree_skb_any(skb); + atm_return(vcc, atm_guess_pdu2truesize(len)); goto INCR_DLE; } // get real pkt length pwang_test @@ -1334,8 +1334,8 @@ static void rx_dle_intr(struct atm_dev *dev) atomic_inc(&vcc->stats->rx_err); IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)", length, skb->len);) - atm_return(vcc, skb->truesize); dev_kfree_skb_any(skb); + atm_return(vcc, atm_guess_pdu2truesize(len)); goto INCR_DLE; } skb_trim(skb, length); diff --git a/trunk/drivers/base/cpu.c b/trunk/drivers/base/cpu.c index 3991502b21e5..251acea3d359 100644 --- a/trunk/drivers/base/cpu.c +++ b/trunk/drivers/base/cpu.c @@ -247,13 +247,6 @@ struct sys_device *get_cpu_sysdev(unsigned cpu) } EXPORT_SYMBOL_GPL(get_cpu_sysdev); -bool cpu_is_hotpluggable(unsigned cpu) -{ - struct sys_device *dev = get_cpu_sysdev(cpu); - return dev && container_of(dev, struct cpu, sysdev)->hotpluggable; -} -EXPORT_SYMBOL_GPL(cpu_is_hotpluggable); - int __init cpu_dev_init(void) { int err; diff --git a/trunk/drivers/bcma/bcma_private.h b/trunk/drivers/bcma/bcma_private.h index fda56bde36b8..30a3085d3354 100644 --- a/trunk/drivers/bcma/bcma_private.h +++ b/trunk/drivers/bcma/bcma_private.h @@ -18,9 +18,6 @@ void bcma_bus_unregister(struct bcma_bus *bus); int __init bcma_bus_early_register(struct bcma_bus *bus, struct bcma_device *core_cc, struct bcma_device *core_mips); -#ifdef CONFIG_PM -int bcma_bus_resume(struct bcma_bus *bus); -#endif /* scan.c */ int bcma_bus_scan(struct bcma_bus *bus); diff --git a/trunk/drivers/bcma/host_pci.c b/trunk/drivers/bcma/host_pci.c index 443b83a2fd7a..1b51d8b7ac80 100644 --- a/trunk/drivers/bcma/host_pci.c +++ b/trunk/drivers/bcma/host_pci.c @@ -21,58 +21,48 @@ static void bcma_host_pci_switch_core(struct bcma_device *core) pr_debug("Switched to core: 0x%X\n", core->id.id); } -/* Provides access to the requested core. Returns base offset that has to be - * used. It makes use of fixed windows when possible. */ -static u16 bcma_host_pci_provide_access_to_core(struct bcma_device *core) +static u8 bcma_host_pci_read8(struct bcma_device *core, u16 offset) { - switch (core->id.id) { - case BCMA_CORE_CHIPCOMMON: - return 3 * BCMA_CORE_SIZE; - case BCMA_CORE_PCIE: - return 2 * BCMA_CORE_SIZE; - } - if (core->bus->mapped_core != core) bcma_host_pci_switch_core(core); - return 0; -} - -static u8 bcma_host_pci_read8(struct bcma_device *core, u16 offset) -{ - offset += bcma_host_pci_provide_access_to_core(core); return ioread8(core->bus->mmio + offset); } static u16 bcma_host_pci_read16(struct bcma_device *core, u16 offset) { - offset += bcma_host_pci_provide_access_to_core(core); + if (core->bus->mapped_core != core) + bcma_host_pci_switch_core(core); return ioread16(core->bus->mmio + offset); } static u32 bcma_host_pci_read32(struct bcma_device *core, u16 offset) { - offset += bcma_host_pci_provide_access_to_core(core); + if (core->bus->mapped_core != core) + bcma_host_pci_switch_core(core); return ioread32(core->bus->mmio + offset); } static void bcma_host_pci_write8(struct bcma_device *core, u16 offset, u8 value) { - offset += bcma_host_pci_provide_access_to_core(core); + if (core->bus->mapped_core != core) + bcma_host_pci_switch_core(core); iowrite8(value, core->bus->mmio + offset); } static void bcma_host_pci_write16(struct bcma_device *core, u16 offset, u16 value) { - offset += bcma_host_pci_provide_access_to_core(core); + if (core->bus->mapped_core != core) + bcma_host_pci_switch_core(core); iowrite16(value, core->bus->mmio + offset); } static void bcma_host_pci_write32(struct bcma_device *core, u16 offset, u32 value) { - offset += bcma_host_pci_provide_access_to_core(core); + if (core->bus->mapped_core != core) + bcma_host_pci_switch_core(core); iowrite32(value, core->bus->mmio + offset); } @@ -234,41 +224,6 @@ static void bcma_host_pci_remove(struct pci_dev *dev) pci_set_drvdata(dev, NULL); } -#ifdef CONFIG_PM -static int bcma_host_pci_suspend(struct pci_dev *dev, pm_message_t state) -{ - /* Host specific */ - pci_save_state(dev); - pci_disable_device(dev); - pci_set_power_state(dev, pci_choose_state(dev, state)); - - return 0; -} - -static int bcma_host_pci_resume(struct pci_dev *dev) -{ - struct bcma_bus *bus = pci_get_drvdata(dev); - int err; - - /* Host specific */ - pci_set_power_state(dev, 0); - err = pci_enable_device(dev); - if (err) - return err; - pci_restore_state(dev); - - /* Bus specific */ - err = bcma_bus_resume(bus); - if (err) - return err; - - return 0; -} -#else /* CONFIG_PM */ -# define bcma_host_pci_suspend NULL -# define bcma_host_pci_resume NULL -#endif /* CONFIG_PM */ - static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = { { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) }, @@ -284,8 +239,6 @@ static struct pci_driver bcma_pci_bridge_driver = { .id_table = bcma_pci_bridge_tbl, .probe = bcma_host_pci_probe, .remove = bcma_host_pci_remove, - .suspend = bcma_host_pci_suspend, - .resume = bcma_host_pci_resume, }; int __init bcma_host_pci_init(void) diff --git a/trunk/drivers/bcma/main.c b/trunk/drivers/bcma/main.c index 10f92b371e58..70c84b951098 100644 --- a/trunk/drivers/bcma/main.c +++ b/trunk/drivers/bcma/main.c @@ -240,22 +240,6 @@ int __init bcma_bus_early_register(struct bcma_bus *bus, return 0; } -#ifdef CONFIG_PM -int bcma_bus_resume(struct bcma_bus *bus) -{ - struct bcma_device *core; - - /* Init CC core */ - core = bcma_find_core(bus, BCMA_CORE_CHIPCOMMON); - if (core) { - bus->drv_cc.setup_done = false; - bcma_core_chipcommon_init(&bus->drv_cc); - } - - return 0; -} -#endif - int __bcma_driver_register(struct bcma_driver *drv, struct module *owner) { drv->drv.name = drv->name; diff --git a/trunk/drivers/bcma/sprom.c b/trunk/drivers/bcma/sprom.c index 6f230fb087c5..d7292390d236 100644 --- a/trunk/drivers/bcma/sprom.c +++ b/trunk/drivers/bcma/sprom.c @@ -129,9 +129,6 @@ static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom) u16 v; int i; - bus->sprom.revision = sprom[SSB_SPROMSIZE_WORDS_R4 - 1] & - SSB_SPROM_REVISION_REV; - for (i = 0; i < 3; i++) { v = sprom[SPOFF(SSB_SPROM8_IL0MAC) + i]; *(((__be16 *)bus->sprom.il0mac) + i) = cpu_to_be16(v); @@ -139,70 +136,12 @@ static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom) bus->sprom.board_rev = sprom[SPOFF(SSB_SPROM8_BOARDREV)]; - bus->sprom.txpid2g[0] = (sprom[SPOFF(SSB_SPROM4_TXPID2G01)] & - SSB_SPROM4_TXPID2G0) >> SSB_SPROM4_TXPID2G0_SHIFT; - bus->sprom.txpid2g[1] = (sprom[SPOFF(SSB_SPROM4_TXPID2G01)] & - SSB_SPROM4_TXPID2G1) >> SSB_SPROM4_TXPID2G1_SHIFT; - bus->sprom.txpid2g[2] = (sprom[SPOFF(SSB_SPROM4_TXPID2G23)] & - SSB_SPROM4_TXPID2G2) >> SSB_SPROM4_TXPID2G2_SHIFT; - bus->sprom.txpid2g[3] = (sprom[SPOFF(SSB_SPROM4_TXPID2G23)] & - SSB_SPROM4_TXPID2G3) >> SSB_SPROM4_TXPID2G3_SHIFT; - - bus->sprom.txpid5gl[0] = (sprom[SPOFF(SSB_SPROM4_TXPID5GL01)] & - SSB_SPROM4_TXPID5GL0) >> SSB_SPROM4_TXPID5GL0_SHIFT; - bus->sprom.txpid5gl[1] = (sprom[SPOFF(SSB_SPROM4_TXPID5GL01)] & - SSB_SPROM4_TXPID5GL1) >> SSB_SPROM4_TXPID5GL1_SHIFT; - bus->sprom.txpid5gl[2] = (sprom[SPOFF(SSB_SPROM4_TXPID5GL23)] & - SSB_SPROM4_TXPID5GL2) >> SSB_SPROM4_TXPID5GL2_SHIFT; - bus->sprom.txpid5gl[3] = (sprom[SPOFF(SSB_SPROM4_TXPID5GL23)] & - SSB_SPROM4_TXPID5GL3) >> SSB_SPROM4_TXPID5GL3_SHIFT; - - bus->sprom.txpid5g[0] = (sprom[SPOFF(SSB_SPROM4_TXPID5G01)] & - SSB_SPROM4_TXPID5G0) >> SSB_SPROM4_TXPID5G0_SHIFT; - bus->sprom.txpid5g[1] = (sprom[SPOFF(SSB_SPROM4_TXPID5G01)] & - SSB_SPROM4_TXPID5G1) >> SSB_SPROM4_TXPID5G1_SHIFT; - bus->sprom.txpid5g[2] = (sprom[SPOFF(SSB_SPROM4_TXPID5G23)] & - SSB_SPROM4_TXPID5G2) >> SSB_SPROM4_TXPID5G2_SHIFT; - bus->sprom.txpid5g[3] = (sprom[SPOFF(SSB_SPROM4_TXPID5G23)] & - SSB_SPROM4_TXPID5G3) >> SSB_SPROM4_TXPID5G3_SHIFT; - - bus->sprom.txpid5gh[0] = (sprom[SPOFF(SSB_SPROM4_TXPID5GH01)] & - SSB_SPROM4_TXPID5GH0) >> SSB_SPROM4_TXPID5GH0_SHIFT; - bus->sprom.txpid5gh[1] = (sprom[SPOFF(SSB_SPROM4_TXPID5GH01)] & - SSB_SPROM4_TXPID5GH1) >> SSB_SPROM4_TXPID5GH1_SHIFT; - bus->sprom.txpid5gh[2] = (sprom[SPOFF(SSB_SPROM4_TXPID5GH23)] & - SSB_SPROM4_TXPID5GH2) >> SSB_SPROM4_TXPID5GH2_SHIFT; - bus->sprom.txpid5gh[3] = (sprom[SPOFF(SSB_SPROM4_TXPID5GH23)] & - SSB_SPROM4_TXPID5GH3) >> SSB_SPROM4_TXPID5GH3_SHIFT; - bus->sprom.boardflags_lo = sprom[SPOFF(SSB_SPROM8_BFLLO)]; bus->sprom.boardflags_hi = sprom[SPOFF(SSB_SPROM8_BFLHI)]; bus->sprom.boardflags2_lo = sprom[SPOFF(SSB_SPROM8_BFL2LO)]; bus->sprom.boardflags2_hi = sprom[SPOFF(SSB_SPROM8_BFL2HI)]; bus->sprom.country_code = sprom[SPOFF(SSB_SPROM8_CCODE)]; - - bus->sprom.fem.ghz2.tssipos = (sprom[SPOFF(SSB_SPROM8_FEM2G)] & - SSB_SROM8_FEM_TSSIPOS) >> SSB_SROM8_FEM_TSSIPOS_SHIFT; - bus->sprom.fem.ghz2.extpa_gain = (sprom[SPOFF(SSB_SPROM8_FEM2G)] & - SSB_SROM8_FEM_EXTPA_GAIN) >> SSB_SROM8_FEM_EXTPA_GAIN_SHIFT; - bus->sprom.fem.ghz2.pdet_range = (sprom[SPOFF(SSB_SPROM8_FEM2G)] & - SSB_SROM8_FEM_PDET_RANGE) >> SSB_SROM8_FEM_PDET_RANGE_SHIFT; - bus->sprom.fem.ghz2.tr_iso = (sprom[SPOFF(SSB_SPROM8_FEM2G)] & - SSB_SROM8_FEM_TR_ISO) >> SSB_SROM8_FEM_TR_ISO_SHIFT; - bus->sprom.fem.ghz2.antswlut = (sprom[SPOFF(SSB_SPROM8_FEM2G)] & - SSB_SROM8_FEM_ANTSWLUT) >> SSB_SROM8_FEM_ANTSWLUT_SHIFT; - - bus->sprom.fem.ghz5.tssipos = (sprom[SPOFF(SSB_SPROM8_FEM5G)] & - SSB_SROM8_FEM_TSSIPOS) >> SSB_SROM8_FEM_TSSIPOS_SHIFT; - bus->sprom.fem.ghz5.extpa_gain = (sprom[SPOFF(SSB_SPROM8_FEM5G)] & - SSB_SROM8_FEM_EXTPA_GAIN) >> SSB_SROM8_FEM_EXTPA_GAIN_SHIFT; - bus->sprom.fem.ghz5.pdet_range = (sprom[SPOFF(SSB_SPROM8_FEM5G)] & - SSB_SROM8_FEM_PDET_RANGE) >> SSB_SROM8_FEM_PDET_RANGE_SHIFT; - bus->sprom.fem.ghz5.tr_iso = (sprom[SPOFF(SSB_SPROM8_FEM5G)] & - SSB_SROM8_FEM_TR_ISO) >> SSB_SROM8_FEM_TR_ISO_SHIFT; - bus->sprom.fem.ghz5.antswlut = (sprom[SPOFF(SSB_SPROM8_FEM5G)] & - SSB_SROM8_FEM_ANTSWLUT) >> SSB_SROM8_FEM_ANTSWLUT_SHIFT; } int bcma_sprom_get(struct bcma_bus *bus) diff --git a/trunk/drivers/bluetooth/ath3k.c b/trunk/drivers/bluetooth/ath3k.c index 1622772f802d..106beb194f3c 100644 --- a/trunk/drivers/bluetooth/ath3k.c +++ b/trunk/drivers/bluetooth/ath3k.c @@ -30,7 +30,6 @@ #include #define VERSION "1.0" -#define ATH3K_FIRMWARE "ath3k-1.fw" #define ATH3K_DNLOAD 0x01 #define ATH3K_GETSTATE 0x05 @@ -401,15 +400,9 @@ static int ath3k_probe(struct usb_interface *intf, return 0; } - ret = request_firmware(&firmware, ATH3K_FIRMWARE, &udev->dev); - if (ret < 0) { - if (ret == -ENOENT) - BT_ERR("Firmware file \"%s\" not found", - ATH3K_FIRMWARE); - else - BT_ERR("Firmware file \"%s\" request failed (err=%d)", - ATH3K_FIRMWARE, ret); - return ret; + if (request_firmware(&firmware, "ath3k-1.fw", &udev->dev) < 0) { + BT_ERR("Error loading firmware"); + return -EIO; } ret = ath3k_load_firmware(udev, firmware); @@ -448,4 +441,4 @@ MODULE_AUTHOR("Atheros Communications"); MODULE_DESCRIPTION("Atheros AR30xx firmware driver"); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); -MODULE_FIRMWARE(ATH3K_FIRMWARE); +MODULE_FIRMWARE("ath3k-1.fw"); diff --git a/trunk/drivers/bluetooth/bfusb.c b/trunk/drivers/bluetooth/bfusb.c index a936763b8c3d..61b591470a90 100644 --- a/trunk/drivers/bluetooth/bfusb.c +++ b/trunk/drivers/bluetooth/bfusb.c @@ -751,7 +751,9 @@ static void bfusb_disconnect(struct usb_interface *intf) bfusb_close(hdev); - hci_unregister_dev(hdev); + if (hci_unregister_dev(hdev) < 0) + BT_ERR("Can't unregister HCI device %s", hdev->name); + hci_free_dev(hdev); } diff --git a/trunk/drivers/bluetooth/bluecard_cs.c b/trunk/drivers/bluetooth/bluecard_cs.c index c6a0c6103743..aed1904ea67b 100644 --- a/trunk/drivers/bluetooth/bluecard_cs.c +++ b/trunk/drivers/bluetooth/bluecard_cs.c @@ -844,7 +844,9 @@ static int bluecard_close(bluecard_info_t *info) /* Turn FPGA off */ outb(0x80, iobase + 0x30); - hci_unregister_dev(hdev); + if (hci_unregister_dev(hdev) < 0) + BT_ERR("Can't unregister HCI device %s", hdev->name); + hci_free_dev(hdev); return 0; diff --git a/trunk/drivers/bluetooth/bt3c_cs.c b/trunk/drivers/bluetooth/bt3c_cs.c index 0c97e5d514b6..4fc01949d399 100644 --- a/trunk/drivers/bluetooth/bt3c_cs.c +++ b/trunk/drivers/bluetooth/bt3c_cs.c @@ -636,7 +636,9 @@ static int bt3c_close(bt3c_info_t *info) bt3c_hci_close(hdev); - hci_unregister_dev(hdev); + if (hci_unregister_dev(hdev) < 0) + BT_ERR("Can't unregister HCI device %s", hdev->name); + hci_free_dev(hdev); return 0; diff --git a/trunk/drivers/bluetooth/btuart_cs.c b/trunk/drivers/bluetooth/btuart_cs.c index 200b3a2877d6..526b61807d94 100644 --- a/trunk/drivers/bluetooth/btuart_cs.c +++ b/trunk/drivers/bluetooth/btuart_cs.c @@ -565,7 +565,9 @@ static int btuart_close(btuart_info_t *info) spin_unlock_irqrestore(&(info->lock), flags); - hci_unregister_dev(hdev); + if (hci_unregister_dev(hdev) < 0) + BT_ERR("Can't unregister HCI device %s", hdev->name); + hci_free_dev(hdev); return 0; diff --git a/trunk/drivers/bluetooth/btusb.c b/trunk/drivers/bluetooth/btusb.c index fbfba802a3d7..eabc437ce500 100644 --- a/trunk/drivers/bluetooth/btusb.c +++ b/trunk/drivers/bluetooth/btusb.c @@ -101,7 +101,6 @@ static struct usb_device_id btusb_table[] = { { USB_DEVICE(0x0c10, 0x0000) }, /* Broadcom BCM20702A0 */ - { USB_DEVICE(0x0a5c, 0x21e3) }, { USB_DEVICE(0x413c, 0x8197) }, { } /* Terminating entry */ @@ -316,8 +315,7 @@ static int btusb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags) err = usb_submit_urb(urb, mem_flags); if (err < 0) { - if (err != -EPERM && err != -ENODEV) - BT_ERR("%s urb %p submission failed (%d)", + BT_ERR("%s urb %p submission failed (%d)", hdev->name, urb, -err); usb_unanchor_urb(urb); } @@ -402,8 +400,7 @@ static int btusb_submit_bulk_urb(struct hci_dev *hdev, gfp_t mem_flags) err = usb_submit_urb(urb, mem_flags); if (err < 0) { - if (err != -EPERM && err != -ENODEV) - BT_ERR("%s urb %p submission failed (%d)", + BT_ERR("%s urb %p submission failed (%d)", hdev->name, urb, -err); usb_unanchor_urb(urb); } @@ -509,10 +506,15 @@ static int btusb_submit_isoc_urb(struct hci_dev *hdev, gfp_t mem_flags) pipe = usb_rcvisocpipe(data->udev, data->isoc_rx_ep->bEndpointAddress); - usb_fill_int_urb(urb, data->udev, pipe, buf, size, btusb_isoc_complete, - hdev, data->isoc_rx_ep->bInterval); + urb->dev = data->udev; + urb->pipe = pipe; + urb->context = hdev; + urb->complete = btusb_isoc_complete; + urb->interval = data->isoc_rx_ep->bInterval; urb->transfer_flags = URB_FREE_BUFFER | URB_ISO_ASAP; + urb->transfer_buffer = buf; + urb->transfer_buffer_length = size; __fill_isoc_descriptor(urb, size, le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize)); @@ -521,8 +523,7 @@ static int btusb_submit_isoc_urb(struct hci_dev *hdev, gfp_t mem_flags) err = usb_submit_urb(urb, mem_flags); if (err < 0) { - if (err != -EPERM && err != -ENODEV) - BT_ERR("%s urb %p submission failed (%d)", + BT_ERR("%s urb %p submission failed (%d)", hdev->name, urb, -err); usb_unanchor_urb(urb); } @@ -726,9 +727,6 @@ static int btusb_send_frame(struct sk_buff *skb) usb_fill_bulk_urb(urb, data->udev, pipe, skb->data, skb->len, btusb_tx_complete, skb); - if (skb->priority >= HCI_PRIO_MAX - 1) - urb->transfer_flags = URB_ISO_ASAP; - hdev->stat.acl_tx++; break; @@ -772,9 +770,7 @@ static int btusb_send_frame(struct sk_buff *skb) err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { - if (err != -EPERM && err != -ENODEV) - BT_ERR("%s urb %p submission failed (%d)", - hdev->name, urb, -err); + BT_ERR("%s urb %p submission failed", hdev->name, urb); kfree(urb->setup_packet); usb_unanchor_urb(urb); } else { diff --git a/trunk/drivers/bluetooth/dtl1_cs.c b/trunk/drivers/bluetooth/dtl1_cs.c index 969bb22e493f..5e4c2de9fc3f 100644 --- a/trunk/drivers/bluetooth/dtl1_cs.c +++ b/trunk/drivers/bluetooth/dtl1_cs.c @@ -551,7 +551,9 @@ static int dtl1_close(dtl1_info_t *info) spin_unlock_irqrestore(&(info->lock), flags); - hci_unregister_dev(hdev); + if (hci_unregister_dev(hdev) < 0) + BT_ERR("Can't unregister HCI device %s", hdev->name); + hci_free_dev(hdev); return 0; diff --git a/trunk/drivers/bluetooth/hci_vhci.c b/trunk/drivers/bluetooth/hci_vhci.c index 2ed6ab1c6e1b..67c180c2c1e0 100644 --- a/trunk/drivers/bluetooth/hci_vhci.c +++ b/trunk/drivers/bluetooth/hci_vhci.c @@ -41,8 +41,6 @@ #define VERSION "1.3" -static bool amp; - struct vhci_data { struct hci_dev *hdev; @@ -241,9 +239,6 @@ static int vhci_open(struct inode *inode, struct file *file) hdev->bus = HCI_VIRTUAL; hdev->driver_data = data; - if (amp) - hdev->dev_type = HCI_AMP; - hdev->open = vhci_open_dev; hdev->close = vhci_close_dev; hdev->flush = vhci_flush; @@ -269,7 +264,10 @@ static int vhci_release(struct inode *inode, struct file *file) struct vhci_data *data = file->private_data; struct hci_dev *hdev = data->hdev; - hci_unregister_dev(hdev); + if (hci_unregister_dev(hdev) < 0) { + BT_ERR("Can't unregister HCI device %s", hdev->name); + } + hci_free_dev(hdev); file->private_data = NULL; @@ -308,9 +306,6 @@ static void __exit vhci_exit(void) module_init(vhci_init); module_exit(vhci_exit); -module_param(amp, bool, 0644); -MODULE_PARM_DESC(amp, "Create AMP controller device"); - MODULE_AUTHOR("Marcel Holtmann "); MODULE_DESCRIPTION("Bluetooth virtual HCI driver ver " VERSION); MODULE_VERSION(VERSION); diff --git a/trunk/drivers/char/random.c b/trunk/drivers/char/random.c index 85da8740586b..6035ab8d5ef7 100644 --- a/trunk/drivers/char/random.c +++ b/trunk/drivers/char/random.c @@ -624,8 +624,8 @@ static struct timer_rand_state input_timer_state; static void add_timer_randomness(struct timer_rand_state *state, unsigned num) { struct { + cycles_t cycles; long jiffies; - unsigned cycles; unsigned num; } sample; long delta, delta2, delta3; @@ -637,11 +637,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) goto out; sample.jiffies = jiffies; - - /* Use arch random value, fall back to cycles */ - if (!arch_get_random_int(&sample.cycles)) - sample.cycles = get_cycles(); - + sample.cycles = get_cycles(); sample.num = num; mix_pool_bytes(&input_pool, &sample, sizeof(sample)); diff --git a/trunk/drivers/clocksource/acpi_pm.c b/trunk/drivers/clocksource/acpi_pm.c index 6b5cf02c35c8..effe7974aa9a 100644 --- a/trunk/drivers/clocksource/acpi_pm.c +++ b/trunk/drivers/clocksource/acpi_pm.c @@ -143,7 +143,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_LE, #ifndef CONFIG_X86_64 #include #define PMTMR_EXPECTED_RATE \ - ((CALIBRATE_LATCH * (PMTMR_TICKS_PER_SEC >> 10)) / (PIT_TICK_RATE>>10)) + ((CALIBRATE_LATCH * (PMTMR_TICKS_PER_SEC >> 10)) / (CLOCK_TICK_RATE>>10)) /* * Some boards have the PMTMR running way too fast. We check * the PMTMR rate against PIT channel 2 to catch these cases. diff --git a/trunk/drivers/clocksource/i8253.c b/trunk/drivers/clocksource/i8253.c index e7cab2da910f..27c49e60b7d6 100644 --- a/trunk/drivers/clocksource/i8253.c +++ b/trunk/drivers/clocksource/i8253.c @@ -53,7 +53,7 @@ static cycle_t i8253_read(struct clocksource *cs) count |= inb_p(PIT_CH0) << 8; /* VIA686a test code... reset the latch if count > max + 1 */ - if (count > PIT_LATCH) { + if (count > LATCH) { outb_p(0x34, PIT_MODE); outb_p(PIT_LATCH & 0xff, PIT_CH0); outb_p(PIT_LATCH >> 8, PIT_CH0); @@ -114,8 +114,8 @@ static void init_pit_timer(enum clock_event_mode mode, case CLOCK_EVT_MODE_PERIODIC: /* binary, mode 2, LSB/MSB, ch 0 */ outb_p(0x34, PIT_MODE); - outb_p(PIT_LATCH & 0xff , PIT_CH0); /* LSB */ - outb_p(PIT_LATCH >> 8 , PIT_CH0); /* MSB */ + outb_p(LATCH & 0xff , PIT_CH0); /* LSB */ + outb_p(LATCH >> 8 , PIT_CH0); /* MSB */ break; case CLOCK_EVT_MODE_SHUTDOWN: diff --git a/trunk/drivers/clocksource/tcb_clksrc.c b/trunk/drivers/clocksource/tcb_clksrc.c index 55d0f95f82f9..79c47e88d5d1 100644 --- a/trunk/drivers/clocksource/tcb_clksrc.c +++ b/trunk/drivers/clocksource/tcb_clksrc.c @@ -59,6 +59,7 @@ static struct clocksource clksrc = { .rating = 200, .read = tc_get_cycles, .mask = CLOCKSOURCE_MASK(32), + .shift = 18, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; @@ -255,6 +256,7 @@ static int __init tcb_clksrc_init(void) best_divisor_idx = i; } + clksrc.mult = clocksource_hz2mult(divided_rate, clksrc.shift); printk(bootinfo, clksrc.name, CONFIG_ATMEL_TCB_CLKSRC_BLOCK, divided_rate / 1000000, @@ -290,7 +292,7 @@ static int __init tcb_clksrc_init(void) __raw_writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR); /* and away we go! */ - clocksource_register_hz(&clksrc, divided_rate); + clocksource_register(&clksrc); /* channel 2: periodic and oneshot timer support */ setup_clkevents(tc, clk32k_divisor_idx); diff --git a/trunk/drivers/cpufreq/cpufreq_conservative.c b/trunk/drivers/cpufreq/cpufreq_conservative.c index 235a340e81f2..c97b468ee9f7 100644 --- a/trunk/drivers/cpufreq/cpufreq_conservative.c +++ b/trunk/drivers/cpufreq/cpufreq_conservative.c @@ -95,26 +95,27 @@ static struct dbs_tuners { .freq_step = 5, }; -static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) +static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, + cputime64_t *wall) { - u64 idle_time; - u64 cur_wall_time; - u64 busy_time; + cputime64_t idle_time; + cputime64_t cur_wall_time; + cputime64_t busy_time; cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, + kstat_cpu(cpu).cpustat.system); - busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); - idle_time = cur_wall_time - busy_time; + idle_time = cputime64_sub(cur_wall_time, busy_time); if (wall) - *wall = jiffies_to_usecs(cur_wall_time); + *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); - return jiffies_to_usecs(idle_time); + return (cputime64_t)jiffies_to_usecs(idle_time); } static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) @@ -271,7 +272,7 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, dbs_info->prev_cpu_idle = get_cpu_idle_time(j, &dbs_info->prev_cpu_wall); if (dbs_tuners_ins.ignore_nice) - dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; } return count; } @@ -352,20 +353,20 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); - wall_time = (unsigned int) - (cur_wall_time - j_dbs_info->prev_cpu_wall); + wall_time = (unsigned int) cputime64_sub(cur_wall_time, + j_dbs_info->prev_cpu_wall); j_dbs_info->prev_cpu_wall = cur_wall_time; - idle_time = (unsigned int) - (cur_idle_time - j_dbs_info->prev_cpu_idle); + idle_time = (unsigned int) cputime64_sub(cur_idle_time, + j_dbs_info->prev_cpu_idle); j_dbs_info->prev_cpu_idle = cur_idle_time; if (dbs_tuners_ins.ignore_nice) { - u64 cur_nice; + cputime64_t cur_nice; unsigned long cur_nice_jiffies; - cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - - j_dbs_info->prev_cpu_nice; + cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, + j_dbs_info->prev_cpu_nice); /* * Assumption: nice time between sampling periods will * be less than 2^32 jiffies for 32 bit sys @@ -373,7 +374,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) cur_nice_jiffies = (unsigned long) cputime64_to_jiffies64(cur_nice); - j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; idle_time += jiffies_to_usecs(cur_nice_jiffies); } @@ -500,9 +501,10 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, &j_dbs_info->prev_cpu_wall); - if (dbs_tuners_ins.ignore_nice) + if (dbs_tuners_ins.ignore_nice) { j_dbs_info->prev_cpu_nice = - kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + kstat_cpu(j).cpustat.nice; + } } this_dbs_info->down_skip = 0; this_dbs_info->requested_freq = policy->cur; diff --git a/trunk/drivers/cpufreq/cpufreq_ondemand.c b/trunk/drivers/cpufreq/cpufreq_ondemand.c index 3d679eee70a1..fa8af4ebb1d6 100644 --- a/trunk/drivers/cpufreq/cpufreq_ondemand.c +++ b/trunk/drivers/cpufreq/cpufreq_ondemand.c @@ -119,26 +119,27 @@ static struct dbs_tuners { .powersave_bias = 0, }; -static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) +static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, + cputime64_t *wall) { - u64 idle_time; - u64 cur_wall_time; - u64 busy_time; + cputime64_t idle_time; + cputime64_t cur_wall_time; + cputime64_t busy_time; cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, + kstat_cpu(cpu).cpustat.system); - busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); - idle_time = cur_wall_time - busy_time; + idle_time = cputime64_sub(cur_wall_time, busy_time); if (wall) - *wall = jiffies_to_usecs(cur_wall_time); + *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); - return jiffies_to_usecs(idle_time); + return (cputime64_t)jiffies_to_usecs(idle_time); } static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) @@ -344,7 +345,7 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, dbs_info->prev_cpu_idle = get_cpu_idle_time(j, &dbs_info->prev_cpu_wall); if (dbs_tuners_ins.ignore_nice) - dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; } return count; @@ -441,24 +442,24 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); - wall_time = (unsigned int) - (cur_wall_time - j_dbs_info->prev_cpu_wall); + wall_time = (unsigned int) cputime64_sub(cur_wall_time, + j_dbs_info->prev_cpu_wall); j_dbs_info->prev_cpu_wall = cur_wall_time; - idle_time = (unsigned int) - (cur_idle_time - j_dbs_info->prev_cpu_idle); + idle_time = (unsigned int) cputime64_sub(cur_idle_time, + j_dbs_info->prev_cpu_idle); j_dbs_info->prev_cpu_idle = cur_idle_time; - iowait_time = (unsigned int) - (cur_iowait_time - j_dbs_info->prev_cpu_iowait); + iowait_time = (unsigned int) cputime64_sub(cur_iowait_time, + j_dbs_info->prev_cpu_iowait); j_dbs_info->prev_cpu_iowait = cur_iowait_time; if (dbs_tuners_ins.ignore_nice) { - u64 cur_nice; + cputime64_t cur_nice; unsigned long cur_nice_jiffies; - cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - - j_dbs_info->prev_cpu_nice; + cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, + j_dbs_info->prev_cpu_nice); /* * Assumption: nice time between sampling periods will * be less than 2^32 jiffies for 32 bit sys @@ -466,7 +467,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) cur_nice_jiffies = (unsigned long) cputime64_to_jiffies64(cur_nice); - j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; idle_time += jiffies_to_usecs(cur_nice_jiffies); } @@ -645,9 +646,10 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, &j_dbs_info->prev_cpu_wall); - if (dbs_tuners_ins.ignore_nice) + if (dbs_tuners_ins.ignore_nice) { j_dbs_info->prev_cpu_nice = - kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + kstat_cpu(j).cpustat.nice; + } } this_dbs_info->cpu = cpu; this_dbs_info->rate_mult = 1; diff --git a/trunk/drivers/cpufreq/cpufreq_stats.c b/trunk/drivers/cpufreq/cpufreq_stats.c index 2a508edd768b..c5072a91e848 100644 --- a/trunk/drivers/cpufreq/cpufreq_stats.c +++ b/trunk/drivers/cpufreq/cpufreq_stats.c @@ -61,8 +61,9 @@ static int cpufreq_stats_update(unsigned int cpu) spin_lock(&cpufreq_stats_lock); stat = per_cpu(cpufreq_stats_table, cpu); if (stat->time_in_state) - stat->time_in_state[stat->last_index] += - cur_time - stat->last_time; + stat->time_in_state[stat->last_index] = + cputime64_add(stat->time_in_state[stat->last_index], + cputime_sub(cur_time, stat->last_time)); stat->last_time = cur_time; spin_unlock(&cpufreq_stats_lock); return 0; diff --git a/trunk/drivers/dma/Kconfig b/trunk/drivers/dma/Kconfig index 5a99bb3f255a..ab8f469f5cf8 100644 --- a/trunk/drivers/dma/Kconfig +++ b/trunk/drivers/dma/Kconfig @@ -124,7 +124,7 @@ config MV_XOR config MX3_IPU bool "MX3x Image Processing Unit support" - depends on SOC_IMX31 || SOC_IMX35 + depends on ARCH_MX3 select DMA_ENGINE default y help @@ -216,7 +216,7 @@ config PCH_DMA config IMX_SDMA tristate "i.MX SDMA support" - depends on ARCH_MX25 || SOC_IMX31 || SOC_IMX35 || ARCH_MX5 + depends on ARCH_MX25 || ARCH_MX3 || ARCH_MX5 select DMA_ENGINE help Support the i.MX SDMA engine. This engine is integrated into diff --git a/trunk/drivers/edac/i7core_edac.c b/trunk/drivers/edac/i7core_edac.c index 8568d9b61875..70ad8923f1d7 100644 --- a/trunk/drivers/edac/i7core_edac.c +++ b/trunk/drivers/edac/i7core_edac.c @@ -2234,7 +2234,7 @@ static void i7core_unregister_mci(struct i7core_dev *i7core_dev) if (pvt->enable_scrub) disable_sdram_scrub_setting(mci); - mce_unregister_decode_chain(&i7_mce_dec); + atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &i7_mce_dec); /* Disable EDAC polling */ i7core_pci_ctl_release(pvt); @@ -2336,7 +2336,7 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev) /* DCLK for scrub rate setting */ pvt->dclk_freq = get_dclk_freq(); - mce_register_decode_chain(&i7_mce_dec); + atomic_notifier_chain_register(&x86_mce_decoder_chain, &i7_mce_dec); return 0; diff --git a/trunk/drivers/edac/mce_amd.c b/trunk/drivers/edac/mce_amd.c index bd926ea2e00c..d0864d9c38ad 100644 --- a/trunk/drivers/edac/mce_amd.c +++ b/trunk/drivers/edac/mce_amd.c @@ -884,7 +884,7 @@ static int __init mce_amd_init(void) pr_info("MCE: In-kernel MCE decoding enabled.\n"); - mce_register_decode_chain(&amd_mce_dec_nb); + atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb); return 0; } @@ -893,7 +893,7 @@ early_initcall(mce_amd_init); #ifdef MODULE static void __exit mce_amd_exit(void) { - mce_unregister_decode_chain(&amd_mce_dec_nb); + atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &amd_mce_dec_nb); kfree(fam_ops); } diff --git a/trunk/drivers/edac/sb_edac.c b/trunk/drivers/edac/sb_edac.c index 1dc118d83cc6..7a402bfbee7d 100644 --- a/trunk/drivers/edac/sb_edac.c +++ b/trunk/drivers/edac/sb_edac.c @@ -1609,9 +1609,11 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val, mce->cpuvendor, mce->cpuid, mce->time, mce->socketid, mce->apicid); +#ifdef CONFIG_SMP /* Only handle if it is the right mc controller */ if (cpu_data(mce->cpu).phys_proc_id != pvt->sbridge_dev->mc) return NOTIFY_DONE; +#endif smp_rmb(); if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) { @@ -1659,7 +1661,8 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev) debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n", __func__, mci, &sbridge_dev->pdev[0]->dev); - mce_unregister_decode_chain(&sbridge_mce_dec); + atomic_notifier_chain_unregister(&x86_mce_decoder_chain, + &sbridge_mce_dec); /* Remove MC sysfs nodes */ edac_mc_del_mc(mci->dev); @@ -1728,7 +1731,8 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev) goto fail0; } - mce_register_decode_chain(&sbridge_mce_dec); + atomic_notifier_chain_register(&x86_mce_decoder_chain, + &sbridge_mce_dec); return 0; fail0: diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c index b9da8900ae4e..c681dc149d2a 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -756,9 +756,9 @@ intel_enable_semaphores(struct drm_device *dev) if (i915_semaphores >= 0) return i915_semaphores; - /* Disable semaphores on SNB */ + /* Enable semaphores on SNB when IO remapping is off */ if (INTEL_INFO(dev)->gen == 6) - return 0; + return !intel_iommu_enabled; return 1; } diff --git a/trunk/drivers/gpu/drm/i915/intel_display.c b/trunk/drivers/gpu/drm/i915/intel_display.c index daa5743ccbd6..d809b038ca88 100644 --- a/trunk/drivers/gpu/drm/i915/intel_display.c +++ b/trunk/drivers/gpu/drm/i915/intel_display.c @@ -7922,11 +7922,13 @@ static bool intel_enable_rc6(struct drm_device *dev) return 0; /* - * Disable rc6 on Sandybridge + * Enable rc6 on Sandybridge if DMA remapping is disabled */ if (INTEL_INFO(dev)->gen == 6) { - DRM_DEBUG_DRIVER("Sandybridge: RC6 disabled\n"); - return 0; + DRM_DEBUG_DRIVER("Sandybridge: intel_iommu_enabled %s -- RC6 %sabled\n", + intel_iommu_enabled ? "true" : "false", + !intel_iommu_enabled ? "en" : "dis"); + return !intel_iommu_enabled; } DRM_DEBUG_DRIVER("RC6 enabled\n"); return 1; diff --git a/trunk/drivers/gpu/drm/radeon/evergreen.c b/trunk/drivers/gpu/drm/radeon/evergreen.c index 92c9628c572d..5e00d1670aa9 100644 --- a/trunk/drivers/gpu/drm/radeon/evergreen.c +++ b/trunk/drivers/gpu/drm/radeon/evergreen.c @@ -3276,18 +3276,6 @@ int evergreen_init(struct radeon_device *rdev) rdev->accel_working = false; } } - - /* Don't start up if the MC ucode is missing on BTC parts. - * The default clocks and voltages before the MC ucode - * is loaded are not suffient for advanced operations. - */ - if (ASIC_IS_DCE5(rdev)) { - if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) { - DRM_ERROR("radeon: MC ucode required for NI+.\n"); - return -EINVAL; - } - } - return 0; } diff --git a/trunk/drivers/gpu/drm/radeon/radeon_atombios.c b/trunk/drivers/gpu/drm/radeon/radeon_atombios.c index 5082d17d14dc..d24baf30efcb 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_atombios.c @@ -2560,11 +2560,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; rdev->pm.current_clock_mode_index = 0; - if (rdev->pm.default_power_state_index >= 0) - rdev->pm.current_vddc = - rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; - else - rdev->pm.current_vddc = 0; + rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; } void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable) diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index f94b33ae2215..8aa1dbb45c67 100644 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1093,6 +1093,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, struct vmw_surface *surface = NULL; struct vmw_dma_buffer *bo = NULL; struct ttm_base_object *user_obj; + u64 required_size; int ret; /** @@ -1101,9 +1102,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, * requested framebuffer. */ - if (!vmw_kms_validate_mode_vram(dev_priv, - mode_cmd->pitch, - mode_cmd->height)) { + required_size = mode_cmd->pitch * mode_cmd->height; + if (unlikely(required_size > (u64) dev_priv->vram_size)) { DRM_ERROR("VRAM size is too small for requested mode.\n"); return ERR_PTR(-ENOMEM); } diff --git a/trunk/drivers/hwmon/coretemp.c b/trunk/drivers/hwmon/coretemp.c index 1fdef885341c..104b3767516c 100644 --- a/trunk/drivers/hwmon/coretemp.c +++ b/trunk/drivers/hwmon/coretemp.c @@ -57,15 +57,16 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius"); #define TOTAL_ATTRS (MAX_CORE_ATTRS + 1) #define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO) +#ifdef CONFIG_SMP #define TO_PHYS_ID(cpu) cpu_data(cpu).phys_proc_id #define TO_CORE_ID(cpu) cpu_data(cpu).cpu_core_id -#define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO) - -#ifdef CONFIG_SMP #define for_each_sibling(i, cpu) for_each_cpu(i, cpu_sibling_mask(cpu)) #else +#define TO_PHYS_ID(cpu) (cpu) +#define TO_CORE_ID(cpu) (cpu) #define for_each_sibling(i, cpu) for (i = 0; false; ) #endif +#define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO) /* * Per-Core Temperature Data diff --git a/trunk/drivers/ieee802154/fakehard.c b/trunk/drivers/ieee802154/fakehard.c index 73d453159408..eb0e2ccc79ae 100644 --- a/trunk/drivers/ieee802154/fakehard.c +++ b/trunk/drivers/ieee802154/fakehard.c @@ -343,7 +343,7 @@ static void ieee802154_fake_setup(struct net_device *dev) { dev->addr_len = IEEE802154_ADDR_LEN; memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN); - dev->features = NETIF_F_HW_CSUM; + dev->features = NETIF_F_NO_CSUM; dev->needed_tailroom = 2; /* FCS */ dev->mtu = 127; dev->tx_queue_len = 10; diff --git a/trunk/drivers/infiniband/core/addr.c b/trunk/drivers/infiniband/core/addr.c index 1612cfd50f39..e9cf51b1343b 100644 --- a/trunk/drivers/infiniband/core/addr.c +++ b/trunk/drivers/infiniband/core/addr.c @@ -178,25 +178,6 @@ static void queue_req(struct addr_req *req) mutex_unlock(&lock); } -static int dst_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *addr) -{ - struct neighbour *n; - int ret; - - rcu_read_lock(); - n = dst_get_neighbour_noref(dst); - if (!n || !(n->nud_state & NUD_VALID)) { - if (n) - neigh_event_send(n, NULL); - ret = -ENODATA; - } else { - ret = rdma_copy_addr(addr, dst->dev, n->ha); - } - rcu_read_unlock(); - - return ret; -} - static int addr4_resolve(struct sockaddr_in *src_in, struct sockaddr_in *dst_in, struct rdma_dev_addr *addr) @@ -204,6 +185,7 @@ static int addr4_resolve(struct sockaddr_in *src_in, __be32 src_ip = src_in->sin_addr.s_addr; __be32 dst_ip = dst_in->sin_addr.s_addr; struct rtable *rt; + struct neighbour *neigh; struct flowi4 fl4; int ret; @@ -232,7 +214,20 @@ static int addr4_resolve(struct sockaddr_in *src_in, goto put; } - ret = dst_fetch_ha(&rt->dst, addr); + neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->dst.dev); + if (!neigh || !(neigh->nud_state & NUD_VALID)) { + rcu_read_lock(); + neigh_event_send(dst_get_neighbour(&rt->dst), NULL); + rcu_read_unlock(); + ret = -ENODATA; + if (neigh) + goto release; + goto put; + } + + ret = rdma_copy_addr(addr, neigh->dev, neigh->ha); +release: + neigh_release(neigh); put: ip_rt_put(rt); out: @@ -245,12 +240,13 @@ static int addr6_resolve(struct sockaddr_in6 *src_in, struct rdma_dev_addr *addr) { struct flowi6 fl6; + struct neighbour *neigh; struct dst_entry *dst; int ret; memset(&fl6, 0, sizeof fl6); - fl6.daddr = dst_in->sin6_addr; - fl6.saddr = src_in->sin6_addr; + ipv6_addr_copy(&fl6.daddr, &dst_in->sin6_addr); + ipv6_addr_copy(&fl6.saddr, &src_in->sin6_addr); fl6.flowi6_oif = addr->bound_dev_if; dst = ip6_route_output(&init_net, NULL, &fl6); @@ -264,7 +260,7 @@ static int addr6_resolve(struct sockaddr_in6 *src_in, goto put; src_in->sin6_family = AF_INET6; - src_in->sin6_addr = fl6.saddr; + ipv6_addr_copy(&src_in->sin6_addr, &fl6.saddr); } if (dst->dev->flags & IFF_LOOPBACK) { @@ -280,7 +276,16 @@ static int addr6_resolve(struct sockaddr_in6 *src_in, goto put; } - ret = dst_fetch_ha(dst, addr); + rcu_read_lock(); + neigh = dst_get_neighbour(dst); + if (!neigh || !(neigh->nud_state & NUD_VALID)) { + if (neigh) + neigh_event_send(neigh, NULL); + ret = -ENODATA; + } else { + ret = rdma_copy_addr(addr, dst->dev, neigh->ha); + } + rcu_read_unlock(); put: dst_release(dst); return ret; diff --git a/trunk/drivers/infiniband/core/cma.c b/trunk/drivers/infiniband/core/cma.c index 236a88c1ca87..d0d4aa9f4802 100644 --- a/trunk/drivers/infiniband/core/cma.c +++ b/trunk/drivers/infiniband/core/cma.c @@ -2005,11 +2005,11 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv) if (cma_zero_addr(src)) { dst = (struct sockaddr *) &id_priv->id.route.addr.dst_addr; if ((src->sa_family = dst->sa_family) == AF_INET) { - ((struct sockaddr_in *)src)->sin_addr = - ((struct sockaddr_in *)dst)->sin_addr; + ((struct sockaddr_in *) src)->sin_addr.s_addr = + ((struct sockaddr_in *) dst)->sin_addr.s_addr; } else { - ((struct sockaddr_in6 *)src)->sin6_addr = - ((struct sockaddr_in6 *)dst)->sin6_addr; + ipv6_addr_copy(&((struct sockaddr_in6 *) src)->sin6_addr, + &((struct sockaddr_in6 *) dst)->sin6_addr); } } diff --git a/trunk/drivers/infiniband/hw/cxgb3/iwch_cm.c b/trunk/drivers/infiniband/hw/cxgb3/iwch_cm.c index 740dcc065cf2..c88b12beef25 100644 --- a/trunk/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/trunk/drivers/infiniband/hw/cxgb3/iwch_cm.c @@ -1338,6 +1338,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) struct iwch_ep *child_ep, *parent_ep = ctx; struct cpl_pass_accept_req *req = cplhdr(skb); unsigned int hwtid = GET_TID(req); + struct neighbour *neigh; struct dst_entry *dst; struct l2t_entry *l2t; struct rtable *rt; @@ -1374,7 +1375,10 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) goto reject; } dst = &rt->dst; - l2t = t3_l2t_get(tdev, dst, NULL); + rcu_read_lock(); + neigh = dst_get_neighbour(dst); + l2t = t3_l2t_get(tdev, neigh, neigh->dev); + rcu_read_unlock(); if (!l2t) { printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", __func__); @@ -1885,6 +1889,7 @@ static int is_loopback_dst(struct iw_cm_id *cm_id) int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) { struct iwch_dev *h = to_iwch_dev(cm_id->device); + struct neighbour *neigh; struct iwch_ep *ep; struct rtable *rt; int err = 0; @@ -1942,7 +1947,13 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) goto fail3; } ep->dst = &rt->dst; - ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst, NULL); + + rcu_read_lock(); + neigh = dst_get_neighbour(ep->dst); + + /* get a l2t entry */ + ep->l2t = t3_l2t_get(ep->com.tdev, neigh, neigh->dev); + rcu_read_unlock(); if (!ep->l2t) { printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); err = -ENOMEM; diff --git a/trunk/drivers/infiniband/hw/cxgb4/cm.c b/trunk/drivers/infiniband/hw/cxgb4/cm.c index 0668bb3472d0..0747004313ad 100644 --- a/trunk/drivers/infiniband/hw/cxgb4/cm.c +++ b/trunk/drivers/infiniband/hw/cxgb4/cm.c @@ -1556,67 +1556,6 @@ static void get_4tuple(struct cpl_pass_accept_req *req, return; } -static int import_ep(struct c4iw_ep *ep, __be32 peer_ip, struct dst_entry *dst, - struct c4iw_dev *cdev, bool clear_mpa_v1) -{ - struct neighbour *n; - int err, step; - - rcu_read_lock(); - n = dst_get_neighbour_noref(dst); - err = -ENODEV; - if (!n) - goto out; - err = -ENOMEM; - if (n->dev->flags & IFF_LOOPBACK) { - struct net_device *pdev; - - pdev = ip_dev_find(&init_net, peer_ip); - ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, - n, pdev, 0); - if (!ep->l2t) - goto out; - ep->mtu = pdev->mtu; - ep->tx_chan = cxgb4_port_chan(pdev); - ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; - step = cdev->rdev.lldi.ntxq / - cdev->rdev.lldi.nchan; - ep->txq_idx = cxgb4_port_idx(pdev) * step; - step = cdev->rdev.lldi.nrxq / - cdev->rdev.lldi.nchan; - ep->ctrlq_idx = cxgb4_port_idx(pdev); - ep->rss_qid = cdev->rdev.lldi.rxq_ids[ - cxgb4_port_idx(pdev) * step]; - dev_put(pdev); - } else { - ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, - n, n->dev, 0); - if (!ep->l2t) - goto out; - ep->mtu = dst_mtu(ep->dst); - ep->tx_chan = cxgb4_port_chan(n->dev); - ep->smac_idx = (cxgb4_port_viid(n->dev) & 0x7F) << 1; - step = cdev->rdev.lldi.ntxq / - cdev->rdev.lldi.nchan; - ep->txq_idx = cxgb4_port_idx(n->dev) * step; - ep->ctrlq_idx = cxgb4_port_idx(n->dev); - step = cdev->rdev.lldi.nrxq / - cdev->rdev.lldi.nchan; - ep->rss_qid = cdev->rdev.lldi.rxq_ids[ - cxgb4_port_idx(n->dev) * step]; - - if (clear_mpa_v1) { - ep->retry_with_mpa_v1 = 0; - ep->tried_with_mpa_v1 = 0; - } - } - err = 0; -out: - rcu_read_unlock(); - - return err; -} - static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) { struct c4iw_ep *child_ep, *parent_ep; @@ -1624,11 +1563,18 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid)); struct tid_info *t = dev->rdev.lldi.tids; unsigned int hwtid = GET_TID(req); + struct neighbour *neigh; struct dst_entry *dst; + struct l2t_entry *l2t; struct rtable *rt; __be32 local_ip, peer_ip; __be16 local_port, peer_port; - int err; + struct net_device *pdev; + u32 tx_chan, smac_idx; + u16 rss_qid; + u32 mtu; + int step; + int txq_idx, ctrlq_idx; parent_ep = lookup_stid(t, stid); PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid); @@ -1650,24 +1596,49 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) goto reject; } dst = &rt->dst; - - child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL); - if (!child_ep) { - printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", + rcu_read_lock(); + neigh = dst_get_neighbour(dst); + if (neigh->dev->flags & IFF_LOOPBACK) { + pdev = ip_dev_find(&init_net, peer_ip); + BUG_ON(!pdev); + l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, pdev, 0); + mtu = pdev->mtu; + tx_chan = cxgb4_port_chan(pdev); + smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; + step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan; + txq_idx = cxgb4_port_idx(pdev) * step; + ctrlq_idx = cxgb4_port_idx(pdev); + step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; + rss_qid = dev->rdev.lldi.rxq_ids[cxgb4_port_idx(pdev) * step]; + dev_put(pdev); + } else { + l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, neigh->dev, 0); + mtu = dst_mtu(dst); + tx_chan = cxgb4_port_chan(neigh->dev); + smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1; + step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan; + txq_idx = cxgb4_port_idx(neigh->dev) * step; + ctrlq_idx = cxgb4_port_idx(neigh->dev); + step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; + rss_qid = dev->rdev.lldi.rxq_ids[ + cxgb4_port_idx(neigh->dev) * step]; + } + rcu_read_unlock(); + if (!l2t) { + printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", __func__); dst_release(dst); goto reject; } - err = import_ep(child_ep, peer_ip, dst, dev, false); - if (err) { - printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", + child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL); + if (!child_ep) { + printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", __func__); + cxgb4_l2t_release(l2t); dst_release(dst); - kfree(child_ep); goto reject; } - state_set(&child_ep->com, CONNECTING); child_ep->com.dev = dev; child_ep->com.cm_id = NULL; @@ -1680,11 +1651,18 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) c4iw_get_ep(&parent_ep->com); child_ep->parent_ep = parent_ep; child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid)); + child_ep->l2t = l2t; child_ep->dst = dst; child_ep->hwtid = hwtid; + child_ep->tx_chan = tx_chan; + child_ep->smac_idx = smac_idx; + child_ep->rss_qid = rss_qid; + child_ep->mtu = mtu; + child_ep->txq_idx = txq_idx; + child_ep->ctrlq_idx = ctrlq_idx; PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__, - child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid); + tx_chan, smac_idx, rss_qid); init_timer(&child_ep->timer); cxgb4_insert_tid(t, child_ep, hwtid); @@ -1814,8 +1792,11 @@ static int is_neg_adv_abort(unsigned int status) static int c4iw_reconnect(struct c4iw_ep *ep) { - struct rtable *rt; int err = 0; + struct rtable *rt; + struct net_device *pdev; + struct neighbour *neigh; + int step; PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id); init_timer(&ep->timer); @@ -1843,10 +1824,47 @@ static int c4iw_reconnect(struct c4iw_ep *ep) } ep->dst = &rt->dst; - err = import_ep(ep, ep->com.cm_id->remote_addr.sin_addr.s_addr, - ep->dst, ep->com.dev, false); - if (err) { + rcu_read_lock(); + neigh = dst_get_neighbour(ep->dst); + + /* get a l2t entry */ + if (neigh->dev->flags & IFF_LOOPBACK) { + PDBG("%s LOOPBACK\n", __func__); + pdev = ip_dev_find(&init_net, + ep->com.cm_id->remote_addr.sin_addr.s_addr); + ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t, + neigh, pdev, 0); + ep->mtu = pdev->mtu; + ep->tx_chan = cxgb4_port_chan(pdev); + ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; + step = ep->com.dev->rdev.lldi.ntxq / + ep->com.dev->rdev.lldi.nchan; + ep->txq_idx = cxgb4_port_idx(pdev) * step; + step = ep->com.dev->rdev.lldi.nrxq / + ep->com.dev->rdev.lldi.nchan; + ep->ctrlq_idx = cxgb4_port_idx(pdev); + ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ + cxgb4_port_idx(pdev) * step]; + dev_put(pdev); + } else { + ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t, + neigh, neigh->dev, 0); + ep->mtu = dst_mtu(ep->dst); + ep->tx_chan = cxgb4_port_chan(neigh->dev); + ep->smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1; + step = ep->com.dev->rdev.lldi.ntxq / + ep->com.dev->rdev.lldi.nchan; + ep->txq_idx = cxgb4_port_idx(neigh->dev) * step; + ep->ctrlq_idx = cxgb4_port_idx(neigh->dev); + step = ep->com.dev->rdev.lldi.nrxq / + ep->com.dev->rdev.lldi.nchan; + ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ + cxgb4_port_idx(neigh->dev) * step]; + } + rcu_read_unlock(); + if (!ep->l2t) { printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); + err = -ENOMEM; goto fail4; } @@ -2222,10 +2240,13 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) { + int err = 0; struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); struct c4iw_ep *ep; struct rtable *rt; - int err = 0; + struct net_device *pdev; + struct neighbour *neigh; + int step; if ((conn_param->ord > c4iw_max_read_depth) || (conn_param->ird > c4iw_max_read_depth)) { @@ -2286,10 +2307,49 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) } ep->dst = &rt->dst; - err = import_ep(ep, cm_id->remote_addr.sin_addr.s_addr, - ep->dst, ep->com.dev, true); - if (err) { + rcu_read_lock(); + neigh = dst_get_neighbour(ep->dst); + + /* get a l2t entry */ + if (neigh->dev->flags & IFF_LOOPBACK) { + PDBG("%s LOOPBACK\n", __func__); + pdev = ip_dev_find(&init_net, + cm_id->remote_addr.sin_addr.s_addr); + ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t, + neigh, pdev, 0); + ep->mtu = pdev->mtu; + ep->tx_chan = cxgb4_port_chan(pdev); + ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; + step = ep->com.dev->rdev.lldi.ntxq / + ep->com.dev->rdev.lldi.nchan; + ep->txq_idx = cxgb4_port_idx(pdev) * step; + step = ep->com.dev->rdev.lldi.nrxq / + ep->com.dev->rdev.lldi.nchan; + ep->ctrlq_idx = cxgb4_port_idx(pdev); + ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ + cxgb4_port_idx(pdev) * step]; + dev_put(pdev); + } else { + ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t, + neigh, neigh->dev, 0); + ep->mtu = dst_mtu(ep->dst); + ep->tx_chan = cxgb4_port_chan(neigh->dev); + ep->smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1; + step = ep->com.dev->rdev.lldi.ntxq / + ep->com.dev->rdev.lldi.nchan; + ep->txq_idx = cxgb4_port_idx(neigh->dev) * step; + ep->ctrlq_idx = cxgb4_port_idx(neigh->dev); + step = ep->com.dev->rdev.lldi.nrxq / + ep->com.dev->rdev.lldi.nchan; + ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ + cxgb4_port_idx(neigh->dev) * step]; + ep->retry_with_mpa_v1 = 0; + ep->tried_with_mpa_v1 = 0; + } + rcu_read_unlock(); + if (!ep->l2t) { printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); + err = -ENOMEM; goto fail4; } diff --git a/trunk/drivers/infiniband/hw/mlx4/mad.c b/trunk/drivers/infiniband/hw/mlx4/mad.c index 95c94d8f0254..f36da994a85a 100644 --- a/trunk/drivers/infiniband/hw/mlx4/mad.c +++ b/trunk/drivers/infiniband/hw/mlx4/mad.c @@ -109,8 +109,7 @@ int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int ignore_mkey, int ignore_bkey, err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma, in_modifier, op_modifier, - MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C, - MLX4_CMD_NATIVE); + MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C); if (!err) memcpy(response_mad, outmailbox->buf, 256); @@ -331,8 +330,7 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, return IB_MAD_RESULT_FAILURE; err = mlx4_cmd_box(dev->dev, 0, mailbox->dma, inmod, 0, - MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C, - MLX4_CMD_WRAPPED); + MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C); if (err) err = IB_MAD_RESULT_FAILURE; else { diff --git a/trunk/drivers/infiniband/hw/mlx4/main.c b/trunk/drivers/infiniband/hw/mlx4/main.c index 7b445df6a667..18836cdf1e10 100644 --- a/trunk/drivers/infiniband/hw/mlx4/main.c +++ b/trunk/drivers/infiniband/hw/mlx4/main.c @@ -177,7 +177,7 @@ mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num) { struct mlx4_dev *dev = to_mdev(device)->dev; - return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ? + return dev->caps.port_mask & (1 << (port_num - 1)) ? IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET; } @@ -434,7 +434,7 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask, memset(mailbox->buf, 0, 256); memcpy(mailbox->buf, props->node_desc, 64); mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0, - MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A); mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox); @@ -463,7 +463,7 @@ static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols, } err = mlx4_cmd(dev->dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT, - MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + MLX4_CMD_TIME_CLASS_B); mlx4_free_cmd_mailbox(dev->dev, mailbox); return err; @@ -899,8 +899,7 @@ static void update_gids_task(struct work_struct *work) memcpy(gids, gw->gids, sizeof gw->gids); err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port, - 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, - MLX4_CMD_NATIVE); + 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B); if (err) printk(KERN_WARNING "set port command failed\n"); else { @@ -1075,11 +1074,6 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) printk_once(KERN_INFO "%s", mlx4_ib_version); - if (mlx4_is_mfunc(dev)) { - printk(KERN_WARNING "IB not yet supported in SRIOV\n"); - return NULL; - } - mlx4_foreach_ib_transport_port(i, dev) num_ports++; diff --git a/trunk/drivers/infiniband/hw/nes/nes_cm.c b/trunk/drivers/infiniband/hw/nes/nes_cm.c index b1e6cae5f47e..0a52d72371ee 100644 --- a/trunk/drivers/infiniband/hw/nes/nes_cm.c +++ b/trunk/drivers/infiniband/hw/nes/nes_cm.c @@ -1348,8 +1348,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi else netdev = nesvnic->netdev; - rcu_read_lock(); - neigh = dst_get_neighbour_noref(&rt->dst); + neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, netdev); if (neigh) { if (neigh->nud_state & NUD_VALID) { nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X" @@ -1360,6 +1359,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi if (!memcmp(nesadapter->arp_table[arpindex].mac_addr, neigh->ha, ETH_ALEN)) { /* Mac address same as in nes_arp_table */ + neigh_release(neigh); ip_rt_put(rt); return rc; } @@ -1373,11 +1373,15 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi dst_ip, NES_ARP_ADD); rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL, NES_ARP_RESOLVE); - } else { - neigh_event_send(neigh, NULL); } + neigh_release(neigh); + } + + if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID))) { + rcu_read_lock(); + neigh_event_send(dst_get_neighbour(&rt->dst), NULL); + rcu_read_unlock(); } - rcu_read_unlock(); ip_rt_put(rt); return rc; } diff --git a/trunk/drivers/infiniband/hw/nes/nes_nic.c b/trunk/drivers/infiniband/hw/nes/nes_nic.c index 4b3fa711a247..c00d2f3f8966 100644 --- a/trunk/drivers/infiniband/hw/nes/nes_nic.c +++ b/trunk/drivers/infiniband/hw/nes/nes_nic.c @@ -1589,7 +1589,7 @@ static const struct ethtool_ops nes_ethtool_ops = { .set_pauseparam = nes_netdev_set_pauseparam, }; -static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev, netdev_features_t features) +static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev, u32 features) { struct nes_adapter *nesadapter = nesdev->nesadapter; u32 u32temp; @@ -1610,7 +1610,7 @@ static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev, spin_unlock_irqrestore(&nesadapter->phy_lock, flags); } -static netdev_features_t nes_fix_features(struct net_device *netdev, netdev_features_t features) +static u32 nes_fix_features(struct net_device *netdev, u32 features) { /* * Since there is no support for separate rx/tx vlan accel @@ -1624,7 +1624,7 @@ static netdev_features_t nes_fix_features(struct net_device *netdev, netdev_feat return features; } -static int nes_set_features(struct net_device *netdev, netdev_features_t features) +static int nes_set_features(struct net_device *netdev, u32 features) { struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_device *nesdev = nesvnic->nesdev; diff --git a/trunk/drivers/infiniband/ulp/ipoib/ipoib_main.c b/trunk/drivers/infiniband/ulp/ipoib/ipoib_main.c index 3514ca05deea..83695b48b010 100644 --- a/trunk/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/trunk/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -171,7 +171,7 @@ static int ipoib_stop(struct net_device *dev) return 0; } -static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features) +static u32 ipoib_fix_features(struct net_device *dev, u32 features) { struct ipoib_dev_priv *priv = netdev_priv(dev); @@ -556,13 +556,15 @@ static int path_rec_start(struct net_device *dev, } /* called with rcu_read_lock */ -static void neigh_add_path(struct sk_buff *skb, struct neighbour *n, struct net_device *dev) +static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_path *path; struct ipoib_neigh *neigh; + struct neighbour *n; unsigned long flags; + n = dst_get_neighbour(skb_dst(skb)); neigh = ipoib_neigh_alloc(n, skb->dev); if (!neigh) { ++dev->stats.tx_dropped; @@ -636,13 +638,16 @@ static void neigh_add_path(struct sk_buff *skb, struct neighbour *n, struct net_ } /* called with rcu_read_lock */ -static void ipoib_path_lookup(struct sk_buff *skb, struct neighbour *n, struct net_device *dev) +static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(skb->dev); + struct dst_entry *dst = skb_dst(skb); + struct neighbour *n; /* Look up path record for unicasts */ + n = dst_get_neighbour(dst); if (n->ha[4] != 0xff) { - neigh_add_path(skb, n, dev); + neigh_add_path(skb, dev); return; } @@ -718,17 +723,12 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) unsigned long flags; rcu_read_lock(); - if (likely(skb_dst(skb))) { - n = dst_get_neighbour_noref(skb_dst(skb)); - if (!n) { - ++dev->stats.tx_dropped; - dev_kfree_skb_any(skb); - goto unlock; - } - } + if (likely(skb_dst(skb))) + n = dst_get_neighbour(skb_dst(skb)); + if (likely(n)) { if (unlikely(!*to_ipoib_neigh(n))) { - ipoib_path_lookup(skb, n, dev); + ipoib_path_lookup(skb, dev); goto unlock; } @@ -751,7 +751,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) list_del(&neigh->list); ipoib_neigh_free(dev, neigh); spin_unlock_irqrestore(&priv->lock, flags); - ipoib_path_lookup(skb, n, dev); + ipoib_path_lookup(skb, dev); goto unlock; } @@ -841,7 +841,7 @@ static int ipoib_hard_header(struct sk_buff *skb, dst = skb_dst(skb); n = NULL; if (dst) - n = dst_get_neighbour_noref_raw(dst); + n = dst_get_neighbour_raw(dst); if ((!dst || !n) && daddr) { struct ipoib_pseudoheader *phdr = (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr); @@ -1222,8 +1222,6 @@ static struct net_device *ipoib_add_port(const char *format, priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu); priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; - priv->dev->neigh_priv_len = sizeof(struct ipoib_neigh); - result = ib_query_pkey(hca, port, 0, &priv->pkey); if (result) { printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n", diff --git a/trunk/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/trunk/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index f7ff9dd66cda..873bff97e69e 100644 --- a/trunk/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/trunk/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -269,7 +269,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, skb->dev = dev; if (dst) - n = dst_get_neighbour_noref_raw(dst); + n = dst_get_neighbour_raw(dst); if (!dst || !n) { /* put pseudoheader back on for next time */ skb_push(skb, sizeof (struct ipoib_pseudoheader)); @@ -728,7 +728,7 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb) rcu_read_lock(); if (dst) - n = dst_get_neighbour_noref(dst); + n = dst_get_neighbour(dst); if (n && !*to_ipoib_neigh(n)) { struct ipoib_neigh *neigh = ipoib_neigh_alloc(n, skb->dev); diff --git a/trunk/drivers/input/mouse/sentelic.c b/trunk/drivers/input/mouse/sentelic.c index 86d6f39178b0..c5b12d2e955a 100644 --- a/trunk/drivers/input/mouse/sentelic.c +++ b/trunk/drivers/input/mouse/sentelic.c @@ -2,7 +2,7 @@ * Finger Sensing Pad PS/2 mouse driver. * * Copyright (C) 2005-2007 Asia Vital Components Co., Ltd. - * Copyright (C) 2005-2011 Tai-hwa Liang, Sentelic Corporation. + * Copyright (C) 2005-2010 Tai-hwa Liang, Sentelic Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -162,7 +162,7 @@ static int fsp_reg_write(struct psmouse *psmouse, int reg_addr, int reg_val) ps2_sendbyte(ps2dev, v, FSP_CMD_TIMEOUT2); if (ps2_sendbyte(ps2dev, 0xf3, FSP_CMD_TIMEOUT) < 0) - goto out; + return -1; if ((v = fsp_test_invert_cmd(reg_val)) != reg_val) { /* inversion is required */ @@ -261,7 +261,7 @@ static int fsp_page_reg_write(struct psmouse *psmouse, int reg_val) ps2_sendbyte(ps2dev, 0x88, FSP_CMD_TIMEOUT2); if (ps2_sendbyte(ps2dev, 0xf3, FSP_CMD_TIMEOUT) < 0) - goto out; + return -1; if ((v = fsp_test_invert_cmd(reg_val)) != reg_val) { ps2_sendbyte(ps2dev, 0x47, FSP_CMD_TIMEOUT2); @@ -309,7 +309,7 @@ static int fsp_get_buttons(struct psmouse *psmouse, int *btn) }; int val; - if (fsp_reg_read(psmouse, FSP_REG_TMOD_STATUS, &val) == -1) + if (fsp_reg_read(psmouse, FSP_REG_TMOD_STATUS1, &val) == -1) return -EIO; *btn = buttons[(val & 0x30) >> 4]; diff --git a/trunk/drivers/input/mouse/sentelic.h b/trunk/drivers/input/mouse/sentelic.h index 2e4af24f8c15..ed1395ac7b8b 100644 --- a/trunk/drivers/input/mouse/sentelic.h +++ b/trunk/drivers/input/mouse/sentelic.h @@ -2,7 +2,7 @@ * Finger Sensing Pad PS/2 mouse driver. * * Copyright (C) 2005-2007 Asia Vital Components Co., Ltd. - * Copyright (C) 2005-2011 Tai-hwa Liang, Sentelic Corporation. + * Copyright (C) 2005-2009 Tai-hwa Liang, Sentelic Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -33,7 +33,6 @@ /* Finger-sensing Pad control registers */ #define FSP_REG_SYSCTL1 0x10 #define FSP_BIT_EN_REG_CLK BIT(5) -#define FSP_REG_TMOD_STATUS 0x20 #define FSP_REG_OPC_QDOWN 0x31 #define FSP_BIT_EN_OPC_TAG BIT(7) #define FSP_REG_OPTZ_XLO 0x34 diff --git a/trunk/drivers/iommu/intel-iommu.c b/trunk/drivers/iommu/intel-iommu.c index 31053a951c34..bdc447fd4766 100644 --- a/trunk/drivers/iommu/intel-iommu.c +++ b/trunk/drivers/iommu/intel-iommu.c @@ -41,7 +41,6 @@ #include #include #include -#include #include #include @@ -2189,6 +2188,18 @@ static inline void iommu_prepare_isa(void) static int md_domain_init(struct dmar_domain *domain, int guest_width); +static int __init si_domain_work_fn(unsigned long start_pfn, + unsigned long end_pfn, void *datax) +{ + int *ret = datax; + + *ret = iommu_domain_identity_map(si_domain, + (uint64_t)start_pfn << PAGE_SHIFT, + (uint64_t)end_pfn << PAGE_SHIFT); + return *ret; + +} + static int __init si_domain_init(int hw) { struct dmar_drhd_unit *drhd; @@ -2220,15 +2231,9 @@ static int __init si_domain_init(int hw) return 0; for_each_online_node(nid) { - unsigned long start_pfn, end_pfn; - int i; - - for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { - ret = iommu_domain_identity_map(si_domain, - PFN_PHYS(start_pfn), PFN_PHYS(end_pfn)); - if (ret) - return ret; - } + work_with_active_regions(nid, si_domain_work_fn, &ret); + if (ret) + return ret; } return 0; diff --git a/trunk/drivers/iommu/iommu.c b/trunk/drivers/iommu/iommu.c index 5b5fa5cdaa31..2fb2963df553 100644 --- a/trunk/drivers/iommu/iommu.c +++ b/trunk/drivers/iommu/iommu.c @@ -90,7 +90,7 @@ struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) if (bus == NULL || bus->iommu_ops == NULL) return NULL; - domain = kzalloc(sizeof(*domain), GFP_KERNEL); + domain = kmalloc(sizeof(*domain), GFP_KERNEL); if (!domain) return NULL; diff --git a/trunk/drivers/isdn/gigaset/i4l.c b/trunk/drivers/isdn/gigaset/i4l.c index 1793ba1b6a89..04231cb2f031 100644 --- a/trunk/drivers/isdn/gigaset/i4l.c +++ b/trunk/drivers/isdn/gigaset/i4l.c @@ -624,6 +624,8 @@ int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid) { isdn_if *iif; + pr_info("ISDN4Linux interface\n"); + iif = kmalloc(sizeof *iif, GFP_KERNEL); if (!iif) { pr_err("out of memory\n"); @@ -682,7 +684,6 @@ void gigaset_isdn_unregdev(struct cardstate *cs) */ void gigaset_isdn_regdrv(void) { - pr_info("ISDN4Linux interface\n"); /* nothing to do */ } diff --git a/trunk/drivers/lguest/lguest_device.c b/trunk/drivers/lguest/lguest_device.c index 595d73197016..0dc30ffde5ad 100644 --- a/trunk/drivers/lguest/lguest_device.c +++ b/trunk/drivers/lguest/lguest_device.c @@ -381,11 +381,6 @@ static int lg_find_vqs(struct virtio_device *vdev, unsigned nvqs, return PTR_ERR(vqs[i]); } -static const char *lg_bus_name(struct virtio_device *vdev) -{ - return ""; -} - /* The ops structure which hooks everything together. */ static struct virtio_config_ops lguest_config_ops = { .get_features = lg_get_features, @@ -397,7 +392,6 @@ static struct virtio_config_ops lguest_config_ops = { .reset = lg_reset, .find_vqs = lg_find_vqs, .del_vqs = lg_del_vqs, - .bus_name = lg_bus_name, }; /* diff --git a/trunk/drivers/lguest/x86/core.c b/trunk/drivers/lguest/x86/core.c index 39809035320a..65af42f2d593 100644 --- a/trunk/drivers/lguest/x86/core.c +++ b/trunk/drivers/lguest/x86/core.c @@ -697,7 +697,7 @@ void lguest_arch_setup_regs(struct lg_cpu *cpu, unsigned long start) * interrupts are enabled. We always leave interrupts enabled while * running the Guest. */ - regs->eflags = X86_EFLAGS_IF | X86_EFLAGS_BIT1; + regs->eflags = X86_EFLAGS_IF | 0x2; /* * The "Extended Instruction Pointer" register says where the Guest is diff --git a/trunk/drivers/macintosh/rack-meter.c b/trunk/drivers/macintosh/rack-meter.c index 6dc26b61219b..2637c139777b 100644 --- a/trunk/drivers/macintosh/rack-meter.c +++ b/trunk/drivers/macintosh/rack-meter.c @@ -81,13 +81,13 @@ static int rackmeter_ignore_nice; */ static inline cputime64_t get_cpu_idle_time(unsigned int cpu) { - u64 retval; + cputime64_t retval; - retval = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE] + - kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT]; + retval = cputime64_add(kstat_cpu(cpu).cpustat.idle, + kstat_cpu(cpu).cpustat.iowait); if (rackmeter_ignore_nice) - retval += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; + retval = cputime64_add(retval, kstat_cpu(cpu).cpustat.nice); return retval; } @@ -220,11 +220,13 @@ static void rackmeter_do_timer(struct work_struct *work) int i, offset, load, cumm, pause; cur_jiffies = jiffies64_to_cputime64(get_jiffies_64()); - total_ticks = (unsigned int) (cur_jiffies - rcpu->prev_wall); + total_ticks = (unsigned int)cputime64_sub(cur_jiffies, + rcpu->prev_wall); rcpu->prev_wall = cur_jiffies; total_idle_ticks = get_cpu_idle_time(cpu); - idle_ticks = (unsigned int) (total_idle_ticks - rcpu->prev_idle); + idle_ticks = (unsigned int) cputime64_sub(total_idle_ticks, + rcpu->prev_idle); rcpu->prev_idle = total_idle_ticks; /* We do a very dumb calculation to update the LEDs for now, diff --git a/trunk/drivers/media/video/gspca/gspca.c b/trunk/drivers/media/video/gspca/gspca.c index 2ca10dfec91f..881e04c7ffe6 100644 --- a/trunk/drivers/media/video/gspca/gspca.c +++ b/trunk/drivers/media/video/gspca/gspca.c @@ -838,13 +838,13 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev) gspca_dev->usb_err = 0; /* do the specific subdriver stuff before endpoint selection */ - intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface); - gspca_dev->alt = gspca_dev->cam.bulk ? intf->num_altsetting : 0; + gspca_dev->alt = 0; if (gspca_dev->sd_desc->isoc_init) { ret = gspca_dev->sd_desc->isoc_init(gspca_dev); if (ret < 0) goto unlock; } + intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface); xfer = gspca_dev->cam.bulk ? USB_ENDPOINT_XFER_BULK : USB_ENDPOINT_XFER_ISOC; @@ -957,7 +957,7 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev) ret = -EIO; goto out; } - gspca_dev->alt = ep_tb[--alt_idx].alt; + alt = ep_tb[--alt_idx].alt; } } out: diff --git a/trunk/drivers/misc/eeprom/eeprom_93cx6.c b/trunk/drivers/misc/eeprom/eeprom_93cx6.c index 0ff4b02177be..7b33de95c4bf 100644 --- a/trunk/drivers/misc/eeprom/eeprom_93cx6.c +++ b/trunk/drivers/misc/eeprom/eeprom_93cx6.c @@ -63,7 +63,6 @@ static void eeprom_93cx6_startup(struct eeprom_93cx6 *eeprom) eeprom->reg_data_out = 0; eeprom->reg_data_clock = 0; eeprom->reg_chip_select = 1; - eeprom->drive_data = 1; eeprom->register_write(eeprom); /* @@ -102,7 +101,6 @@ static void eeprom_93cx6_write_bits(struct eeprom_93cx6 *eeprom, */ eeprom->reg_data_in = 0; eeprom->reg_data_out = 0; - eeprom->drive_data = 1; /* * Start writing all bits. @@ -142,7 +140,6 @@ static void eeprom_93cx6_read_bits(struct eeprom_93cx6 *eeprom, */ eeprom->reg_data_in = 0; eeprom->reg_data_out = 0; - eeprom->drive_data = 0; /* * Start reading all bits. @@ -234,88 +231,3 @@ void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom, const u8 word, } EXPORT_SYMBOL_GPL(eeprom_93cx6_multiread); -/** - * eeprom_93cx6_wren - set the write enable state - * @eeprom: Pointer to eeprom structure - * @enable: true to enable writes, otherwise disable writes - * - * Set the EEPROM write enable state to either allow or deny - * writes depending on the @enable value. - */ -void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable) -{ - u16 command; - - /* start the command */ - eeprom_93cx6_startup(eeprom); - - /* create command to enable/disable */ - - command = enable ? PCI_EEPROM_EWEN_OPCODE : PCI_EEPROM_EWDS_OPCODE; - command <<= (eeprom->width - 2); - - eeprom_93cx6_write_bits(eeprom, command, - PCI_EEPROM_WIDTH_OPCODE + eeprom->width); - - eeprom_93cx6_cleanup(eeprom); -} -EXPORT_SYMBOL_GPL(eeprom_93cx6_wren); - -/** - * eeprom_93cx6_write - write data to the EEPROM - * @eeprom: Pointer to eeprom structure - * @addr: Address to write data to. - * @data: The data to write to address @addr. - * - * Write the @data to the specified @addr in the EEPROM and - * waiting for the device to finish writing. - * - * Note, since we do not expect large number of write operations - * we delay in between parts of the operation to avoid using excessive - * amounts of CPU time busy waiting. - */ -void eeprom_93cx6_write(struct eeprom_93cx6 *eeprom, u8 addr, u16 data) -{ - int timeout = 100; - u16 command; - - /* start the command */ - eeprom_93cx6_startup(eeprom); - - command = PCI_EEPROM_WRITE_OPCODE << eeprom->width; - command |= addr; - - /* send write command */ - eeprom_93cx6_write_bits(eeprom, command, - PCI_EEPROM_WIDTH_OPCODE + eeprom->width); - - /* send data */ - eeprom_93cx6_write_bits(eeprom, data, 16); - - /* get ready to check for busy */ - eeprom->drive_data = 0; - eeprom->reg_chip_select = 1; - eeprom->register_write(eeprom); - - /* wait at-least 250ns to get DO to be the busy signal */ - usleep_range(1000, 2000); - - /* wait for DO to go high to signify finish */ - - while (true) { - eeprom->register_read(eeprom); - - if (eeprom->reg_data_out) - break; - - usleep_range(1000, 2000); - - if (--timeout <= 0) { - printk(KERN_ERR "%s: timeout\n", __func__); - break; - } - } - - eeprom_93cx6_cleanup(eeprom); -} -EXPORT_SYMBOL_GPL(eeprom_93cx6_write); diff --git a/trunk/drivers/misc/sgi-xp/xpnet.c b/trunk/drivers/misc/sgi-xp/xpnet.c index 3fac67a5204c..42f067347bc7 100644 --- a/trunk/drivers/misc/sgi-xp/xpnet.c +++ b/trunk/drivers/misc/sgi-xp/xpnet.c @@ -576,7 +576,7 @@ xpnet_init(void) * report an error if the data is not retrievable and the * packet will be dropped. */ - xpnet_device->features = NETIF_F_HW_CSUM; + xpnet_device->features = NETIF_F_NO_CSUM; result = register_netdev(xpnet_device); if (result != 0) { diff --git a/trunk/drivers/mmc/host/mmci.c b/trunk/drivers/mmc/host/mmci.c index 0726e59fd418..50b5f9926f64 100644 --- a/trunk/drivers/mmc/host/mmci.c +++ b/trunk/drivers/mmc/host/mmci.c @@ -675,8 +675,7 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data, unsigned int status) { /* First check for errors */ - if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR| - MCI_TXUNDERRUN|MCI_RXOVERRUN)) { + if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) { u32 remain, success; /* Terminate the DMA transfer */ @@ -755,12 +754,8 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, } if (!cmd->data || cmd->error) { - if (host->data) { - /* Terminate the DMA transfer */ - if (dma_inprogress(host)) - mmci_dma_data_error(host); + if (host->data) mmci_stop_data(host); - } mmci_request_end(host, cmd->mrq); } else if (!(cmd->data->flags & MMC_DATA_READ)) { mmci_start_data(host, cmd->data); @@ -960,9 +955,8 @@ static irqreturn_t mmci_irq(int irq, void *dev_id) dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status); data = host->data; - if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR| - MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND| - MCI_DATABLOCKEND) && data) + if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN| + MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data) mmci_data_irq(host, data, status); cmd = host->cmd; diff --git a/trunk/drivers/net/Kconfig b/trunk/drivers/net/Kconfig index 9845afb37cc8..654a5e94e0e7 100644 --- a/trunk/drivers/net/Kconfig +++ b/trunk/drivers/net/Kconfig @@ -125,8 +125,6 @@ config IFB 'ifb1' etc. Look at the iproute2 documentation directory for usage etc -source "drivers/net/team/Kconfig" - config MACVLAN tristate "MAC-VLAN support (EXPERIMENTAL)" depends on EXPERIMENTAL @@ -243,8 +241,6 @@ source "drivers/atm/Kconfig" source "drivers/net/caif/Kconfig" -source "drivers/net/dsa/Kconfig" - source "drivers/net/ethernet/Kconfig" source "drivers/net/fddi/Kconfig" diff --git a/trunk/drivers/net/Makefile b/trunk/drivers/net/Makefile index 1988881853ab..fa877cd2b139 100644 --- a/trunk/drivers/net/Makefile +++ b/trunk/drivers/net/Makefile @@ -17,7 +17,6 @@ obj-$(CONFIG_NET) += Space.o loopback.o obj-$(CONFIG_NETCONSOLE) += netconsole.o obj-$(CONFIG_PHYLIB) += phy/ obj-$(CONFIG_RIONET) += rionet.o -obj-$(CONFIG_NET_TEAM) += team/ obj-$(CONFIG_TUN) += tun.o obj-$(CONFIG_VETH) += veth.o obj-$(CONFIG_VIRTIO_NET) += virtio_net.o @@ -30,7 +29,6 @@ obj-$(CONFIG_DEV_APPLETALK) += appletalk/ obj-$(CONFIG_CAIF) += caif/ obj-$(CONFIG_CAN) += can/ obj-$(CONFIG_ETRAX_ETHERNET) += cris/ -obj-$(CONFIG_NET_DSA) += dsa/ obj-$(CONFIG_ETHERNET) += ethernet/ obj-$(CONFIG_FDDI) += fddi/ obj-$(CONFIG_HIPPI) += hippi/ diff --git a/trunk/drivers/net/bonding/bond_ipv6.c b/trunk/drivers/net/bonding/bond_ipv6.c new file mode 100644 index 000000000000..027a0ee7d85b --- /dev/null +++ b/trunk/drivers/net/bonding/bond_ipv6.c @@ -0,0 +1,225 @@ +/* + * Copyright(c) 2008 Hewlett-Packard Development Company, L.P. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include "bonding.h" + +/* + * Assign bond->master_ipv6 to the next IPv6 address in the list, or + * zero it out if there are none. + */ +static void bond_glean_dev_ipv6(struct net_device *dev, struct in6_addr *addr) +{ + struct inet6_dev *idev; + + if (!dev) + return; + + idev = in6_dev_get(dev); + if (!idev) + return; + + read_lock_bh(&idev->lock); + if (!list_empty(&idev->addr_list)) { + struct inet6_ifaddr *ifa + = list_first_entry(&idev->addr_list, + struct inet6_ifaddr, if_list); + ipv6_addr_copy(addr, &ifa->addr); + } else + ipv6_addr_set(addr, 0, 0, 0, 0); + + read_unlock_bh(&idev->lock); + + in6_dev_put(idev); +} + +static void bond_na_send(struct net_device *slave_dev, + struct in6_addr *daddr, + int router, + unsigned short vlan_id) +{ + struct in6_addr mcaddr; + struct icmp6hdr icmp6h = { + .icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT, + }; + struct sk_buff *skb; + + icmp6h.icmp6_router = router; + icmp6h.icmp6_solicited = 0; + icmp6h.icmp6_override = 1; + + addrconf_addr_solict_mult(daddr, &mcaddr); + + pr_debug("ipv6 na on slave %s: dest %pI6, src %pI6\n", + slave_dev->name, &mcaddr, daddr); + + skb = ndisc_build_skb(slave_dev, &mcaddr, daddr, &icmp6h, daddr, + ND_OPT_TARGET_LL_ADDR); + + if (!skb) { + pr_err("NA packet allocation failed\n"); + return; + } + + if (vlan_id) { + /* The Ethernet header is not present yet, so it is + * too early to insert a VLAN tag. Force use of an + * out-of-line tag here and let dev_hard_start_xmit() + * insert it if the slave hardware can't. + */ + skb = __vlan_hwaccel_put_tag(skb, vlan_id); + if (!skb) { + pr_err("failed to insert VLAN tag\n"); + return; + } + } + + ndisc_send_skb(skb, slave_dev, NULL, &mcaddr, daddr, &icmp6h); +} + +/* + * Kick out an unsolicited Neighbor Advertisement for an IPv6 address on + * the bonding master. This will help the switch learn our address + * if in active-backup mode. + * + * Caller must hold curr_slave_lock for read or better + */ +void bond_send_unsolicited_na(struct bonding *bond) +{ + struct slave *slave = bond->curr_active_slave; + struct vlan_entry *vlan; + struct inet6_dev *idev; + int is_router; + + pr_debug("%s: bond %s slave %s\n", bond->dev->name, + __func__, slave ? slave->dev->name : "NULL"); + + if (!slave || !bond->send_unsol_na || + test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state)) + return; + + bond->send_unsol_na--; + + idev = in6_dev_get(bond->dev); + if (!idev) + return; + + is_router = !!idev->cnf.forwarding; + + in6_dev_put(idev); + + if (!ipv6_addr_any(&bond->master_ipv6)) + bond_na_send(slave->dev, &bond->master_ipv6, is_router, 0); + + list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { + if (!ipv6_addr_any(&vlan->vlan_ipv6)) { + bond_na_send(slave->dev, &vlan->vlan_ipv6, is_router, + vlan->vlan_id); + } + } +} + +/* + * bond_inet6addr_event: handle inet6addr notifier chain events. + * + * We keep track of device IPv6 addresses primarily to use as source + * addresses in NS probes. + * + * We track one IPv6 for the main device (if it has one). + */ +static int bond_inet6addr_event(struct notifier_block *this, + unsigned long event, + void *ptr) +{ + struct inet6_ifaddr *ifa = ptr; + struct net_device *vlan_dev, *event_dev = ifa->idev->dev; + struct bonding *bond; + struct vlan_entry *vlan; + struct bond_net *bn = net_generic(dev_net(event_dev), bond_net_id); + + list_for_each_entry(bond, &bn->dev_list, bond_list) { + if (bond->dev == event_dev) { + switch (event) { + case NETDEV_UP: + if (ipv6_addr_any(&bond->master_ipv6)) + ipv6_addr_copy(&bond->master_ipv6, + &ifa->addr); + return NOTIFY_OK; + case NETDEV_DOWN: + if (ipv6_addr_equal(&bond->master_ipv6, + &ifa->addr)) + bond_glean_dev_ipv6(bond->dev, + &bond->master_ipv6); + return NOTIFY_OK; + default: + return NOTIFY_DONE; + } + } + + list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { + rcu_read_lock(); + vlan_dev = __vlan_find_dev_deep(bond->dev, + vlan->vlan_id); + rcu_read_unlock(); + if (vlan_dev == event_dev) { + switch (event) { + case NETDEV_UP: + if (ipv6_addr_any(&vlan->vlan_ipv6)) + ipv6_addr_copy(&vlan->vlan_ipv6, + &ifa->addr); + return NOTIFY_OK; + case NETDEV_DOWN: + if (ipv6_addr_equal(&vlan->vlan_ipv6, + &ifa->addr)) + bond_glean_dev_ipv6(vlan_dev, + &vlan->vlan_ipv6); + return NOTIFY_OK; + default: + return NOTIFY_DONE; + } + } + } + } + return NOTIFY_DONE; +} + +static struct notifier_block bond_inet6addr_notifier = { + .notifier_call = bond_inet6addr_event, +}; + +void bond_register_ipv6_notifier(void) +{ + register_inet6addr_notifier(&bond_inet6addr_notifier); +} + +void bond_unregister_ipv6_notifier(void) +{ + unregister_inet6addr_notifier(&bond_inet6addr_notifier); +} + diff --git a/trunk/drivers/net/bonding/bond_main.c b/trunk/drivers/net/bonding/bond_main.c index 435984ad8b2f..7f8756825b8a 100644 --- a/trunk/drivers/net/bonding/bond_main.c +++ b/trunk/drivers/net/bonding/bond_main.c @@ -428,34 +428,27 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, * @bond_dev: bonding net device that got called * @vid: vlan id being added */ -static int bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid) +static void bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid) { struct bonding *bond = netdev_priv(bond_dev); - struct slave *slave, *stop_at; + struct slave *slave; int i, res; bond_for_each_slave(bond, slave, i) { - res = vlan_vid_add(slave->dev, vid); - if (res) - goto unwind; + struct net_device *slave_dev = slave->dev; + const struct net_device_ops *slave_ops = slave_dev->netdev_ops; + + if ((slave_dev->features & NETIF_F_HW_VLAN_FILTER) && + slave_ops->ndo_vlan_rx_add_vid) { + slave_ops->ndo_vlan_rx_add_vid(slave_dev, vid); + } } res = bond_add_vlan(bond, vid); if (res) { pr_err("%s: Error: Failed to add vlan id %d\n", bond_dev->name, vid); - return res; } - - return 0; - -unwind: - /* unwind from head to the slave that failed */ - stop_at = slave; - bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at) - vlan_vid_del(slave->dev, vid); - - return res; } /** @@ -463,48 +456,56 @@ static int bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid) * @bond_dev: bonding net device that got called * @vid: vlan id being removed */ -static int bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid) +static void bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid) { struct bonding *bond = netdev_priv(bond_dev); struct slave *slave; int i, res; - bond_for_each_slave(bond, slave, i) - vlan_vid_del(slave->dev, vid); + bond_for_each_slave(bond, slave, i) { + struct net_device *slave_dev = slave->dev; + const struct net_device_ops *slave_ops = slave_dev->netdev_ops; + + if ((slave_dev->features & NETIF_F_HW_VLAN_FILTER) && + slave_ops->ndo_vlan_rx_kill_vid) { + slave_ops->ndo_vlan_rx_kill_vid(slave_dev, vid); + } + } res = bond_del_vlan(bond, vid); if (res) { pr_err("%s: Error: Failed to remove vlan id %d\n", bond_dev->name, vid); - return res; } - - return 0; } static void bond_add_vlans_on_slave(struct bonding *bond, struct net_device *slave_dev) { struct vlan_entry *vlan; - int res; + const struct net_device_ops *slave_ops = slave_dev->netdev_ops; - list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { - res = vlan_vid_add(slave_dev, vlan->vlan_id); - if (res) - pr_warning("%s: Failed to add vlan id %d to device %s\n", - bond->dev->name, vlan->vlan_id, - slave_dev->name); - } + if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) || + !(slave_ops->ndo_vlan_rx_add_vid)) + return; + + list_for_each_entry(vlan, &bond->vlan_list, vlan_list) + slave_ops->ndo_vlan_rx_add_vid(slave_dev, vlan->vlan_id); } static void bond_del_vlans_from_slave(struct bonding *bond, struct net_device *slave_dev) { + const struct net_device_ops *slave_ops = slave_dev->netdev_ops; struct vlan_entry *vlan; + if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) || + !(slave_ops->ndo_vlan_rx_kill_vid)) + return; + list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { if (!vlan->vlan_id) continue; - vlan_vid_del(slave_dev, vlan->vlan_id); + slave_ops->ndo_vlan_rx_kill_vid(slave_dev, vlan->vlan_id); } } @@ -1324,12 +1325,11 @@ static int bond_sethwaddr(struct net_device *bond_dev, return 0; } -static netdev_features_t bond_fix_features(struct net_device *dev, - netdev_features_t features) +static u32 bond_fix_features(struct net_device *dev, u32 features) { struct slave *slave; struct bonding *bond = netdev_priv(dev); - netdev_features_t mask; + u32 mask; int i; read_lock(&bond->lock); @@ -1363,7 +1363,7 @@ static void bond_compute_features(struct bonding *bond) { struct slave *slave; struct net_device *bond_dev = bond->dev; - netdev_features_t vlan_features = BOND_VLAN_FEATURES; + u32 vlan_features = BOND_VLAN_FEATURES; unsigned short max_hard_header_len = ETH_HLEN; int i; @@ -1822,7 +1822,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) "but new slave device does not support netpoll.\n", bond_dev->name); res = -EBUSY; - goto err_detach; + goto err_close; } } #endif @@ -1831,7 +1831,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) res = bond_create_slave_symlinks(bond_dev, slave_dev); if (res) - goto err_detach; + goto err_close; res = netdev_rx_handler_register(slave_dev, bond_handle_frame, new_slave); @@ -1852,11 +1852,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) err_dest_symlinks: bond_destroy_slave_symlinks(bond_dev, slave_dev); -err_detach: - write_lock_bh(&bond->lock); - bond_detach_slave(bond, new_slave); - write_unlock_bh(&bond->lock); - err_close: dev_close(slave_dev); @@ -1902,7 +1897,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) struct bonding *bond = netdev_priv(bond_dev); struct slave *slave, *oldcurrent; struct sockaddr addr; - netdev_features_t old_features = bond_dev->features; + u32 old_features = bond_dev->features; /* slave is not a slave or master is not master of this slave */ if (!(slave_dev->flags & IFF_SLAVE) || @@ -4344,7 +4339,7 @@ static void bond_setup(struct net_device *bond_dev) NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; - bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM); + bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_NO_CSUM); bond_dev->features |= bond_dev->hw_features; } diff --git a/trunk/drivers/net/caif/caif_hsi.c b/trunk/drivers/net/caif/caif_hsi.c index 0a4fc62a381d..073352517adc 100644 --- a/trunk/drivers/net/caif/caif_hsi.c +++ b/trunk/drivers/net/caif/caif_hsi.c @@ -117,6 +117,15 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi) dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__); + + ret = cfhsi->dev->cfhsi_wake_up(cfhsi->dev); + if (ret) { + dev_warn(&cfhsi->ndev->dev, + "%s: can't wake up HSI interface: %d.\n", + __func__, ret); + return ret; + } + do { ret = cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev, &fifo_occupancy); @@ -159,6 +168,8 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi) } } while (1); + cfhsi->dev->cfhsi_wake_down(cfhsi->dev); + return ret; } @@ -933,7 +944,7 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev) /* Create HSI frame. */ len = cfhsi_tx_frm(desc, cfhsi); - WARN_ON(!len); + BUG_ON(!len); /* Set up new transfer. */ res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev); diff --git a/trunk/drivers/net/caif/caif_serial.c b/trunk/drivers/net/caif/caif_serial.c index 8a3054b84812..23406e62c0b0 100644 --- a/trunk/drivers/net/caif/caif_serial.c +++ b/trunk/drivers/net/caif/caif_serial.c @@ -38,15 +38,15 @@ MODULE_ALIAS_LDISC(N_CAIF); /*This list is protected by the rtnl lock. */ static LIST_HEAD(ser_list); -static bool ser_loop; +static int ser_loop; module_param(ser_loop, bool, S_IRUGO); MODULE_PARM_DESC(ser_loop, "Run in simulated loopback mode."); -static bool ser_use_stx = true; +static int ser_use_stx = 1; module_param(ser_use_stx, bool, S_IRUGO); MODULE_PARM_DESC(ser_use_stx, "STX enabled or not."); -static bool ser_use_fcs = true; +static int ser_use_fcs = 1; module_param(ser_use_fcs, bool, S_IRUGO); MODULE_PARM_DESC(ser_use_fcs, "FCS enabled or not."); @@ -261,7 +261,7 @@ static int handle_tx(struct ser_device *ser) skb_pull(skb, tty_wr); if (skb->len == 0) { struct sk_buff *tmp = skb_dequeue(&ser->head); - WARN_ON(tmp != skb); + BUG_ON(tmp != skb); if (in_interrupt()) dev_kfree_skb_irq(skb); else @@ -305,7 +305,7 @@ static void ldisc_tx_wakeup(struct tty_struct *tty) ser = tty->disc_data; BUG_ON(ser == NULL); - WARN_ON(ser->tty != tty); + BUG_ON(ser->tty != tty); handle_tx(ser); } diff --git a/trunk/drivers/net/caif/caif_shmcore.c b/trunk/drivers/net/caif/caif_shmcore.c index 5b2041319a32..d4b26fb24ed9 100644 --- a/trunk/drivers/net/caif/caif_shmcore.c +++ b/trunk/drivers/net/caif/caif_shmcore.c @@ -238,11 +238,11 @@ int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv) if ((avail_emptybuff > HIGH_WATERMARK) && (!pshm_drv->tx_empty_available)) { pshm_drv->tx_empty_available = 1; - spin_unlock_irqrestore(&pshm_drv->lock, flags); pshm_drv->cfdev.flowctrl (pshm_drv->pshm_dev->pshm_netdev, CAIF_FLOW_ON); + spin_unlock_irqrestore(&pshm_drv->lock, flags); /* Schedule the work queue. if required */ if (!work_pending(&pshm_drv->shm_tx_work)) @@ -285,7 +285,6 @@ static void shm_rx_work_func(struct work_struct *rx_work) list_entry(pshm_drv->rx_full_list.next, struct buf_list, list); list_del_init(&pbuf->list); - spin_unlock_irqrestore(&pshm_drv->lock, flags); /* Retrieve pointer to start of the packet descriptor area. */ pck_desc = (struct shm_pck_desc *) pbuf->desc_vptr; @@ -337,11 +336,7 @@ static void shm_rx_work_func(struct work_struct *rx_work) /* Get a suitable CAIF packet and copy in data. */ skb = netdev_alloc_skb(pshm_drv->pshm_dev->pshm_netdev, frm_pck_len + 1); - - if (skb == NULL) { - pr_info("OOM: Try next frame in descriptor\n"); - break; - } + BUG_ON(skb == NULL); p = skb_put(skb, frm_pck_len); memcpy(p, pbuf->desc_vptr + frm_pck_ofs, frm_pck_len); @@ -365,7 +360,6 @@ static void shm_rx_work_func(struct work_struct *rx_work) pck_desc++; } - spin_lock_irqsave(&pshm_drv->lock, flags); list_add_tail(&pbuf->list, &pshm_drv->rx_pend_list); spin_unlock_irqrestore(&pshm_drv->lock, flags); @@ -418,6 +412,7 @@ static void shm_tx_work_func(struct work_struct *tx_work) if (skb == NULL) goto send_msg; + /* Check the available no. of buffers in the empty list */ list_for_each(pos, &pshm_drv->tx_empty_list) avail_emptybuff++; @@ -426,11 +421,9 @@ static void shm_tx_work_func(struct work_struct *tx_work) pshm_drv->tx_empty_available) { /* Update blocking condition. */ pshm_drv->tx_empty_available = 0; - spin_unlock_irqrestore(&pshm_drv->lock, flags); pshm_drv->cfdev.flowctrl (pshm_drv->pshm_dev->pshm_netdev, CAIF_FLOW_OFF); - spin_lock_irqsave(&pshm_drv->lock, flags); } /* * We simply return back to the caller if we do not have space @@ -476,8 +469,6 @@ static void shm_tx_work_func(struct work_struct *tx_work) } skb = skb_dequeue(&pshm_drv->sk_qhead); - if (skb == NULL) - break; /* Copy in CAIF frame. */ skb_copy_bits(skb, 0, pbuf->desc_vptr + pbuf->frm_ofs + SHM_HDR_LEN + @@ -486,7 +477,7 @@ static void shm_tx_work_func(struct work_struct *tx_work) pshm_drv->pshm_dev->pshm_netdev->stats.tx_packets++; pshm_drv->pshm_dev->pshm_netdev->stats.tx_bytes += frmlen; - dev_kfree_skb_irq(skb); + dev_kfree_skb(skb); /* Fill in the shared memory packet descriptor area. */ pck_desc = (struct shm_pck_desc *) (pbuf->desc_vptr); @@ -521,11 +512,16 @@ static void shm_tx_work_func(struct work_struct *tx_work) static int shm_netdev_tx(struct sk_buff *skb, struct net_device *shm_netdev) { struct shmdrv_layer *pshm_drv; + unsigned long flags = 0; pshm_drv = netdev_priv(shm_netdev); + spin_lock_irqsave(&pshm_drv->lock, flags); + skb_queue_tail(&pshm_drv->sk_qhead, skb); + spin_unlock_irqrestore(&pshm_drv->lock, flags); + /* Schedule Tx work queue. for deferred processing of skbs*/ if (!work_pending(&pshm_drv->shm_tx_work)) queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work); @@ -610,7 +606,6 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev) pshm_drv->shm_rx_addr = pshm_dev->shm_base_addr + (NR_TX_BUF * TX_BUF_SZ); - spin_lock_init(&pshm_drv->lock); INIT_LIST_HEAD(&pshm_drv->tx_empty_list); INIT_LIST_HEAD(&pshm_drv->tx_pend_list); INIT_LIST_HEAD(&pshm_drv->tx_full_list); @@ -645,7 +640,7 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev) tx_buf->frm_ofs = SHM_CAIF_FRM_OFS; if (pshm_dev->shm_loopback) - tx_buf->desc_vptr = (unsigned char *)tx_buf->phy_addr; + tx_buf->desc_vptr = (char *)tx_buf->phy_addr; else tx_buf->desc_vptr = ioremap(tx_buf->phy_addr, TX_BUF_SZ); @@ -669,7 +664,7 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev) rx_buf->len = RX_BUF_SZ; if (pshm_dev->shm_loopback) - rx_buf->desc_vptr = (unsigned char *)rx_buf->phy_addr; + rx_buf->desc_vptr = (char *)rx_buf->phy_addr; else rx_buf->desc_vptr = ioremap(rx_buf->phy_addr, RX_BUF_SZ); diff --git a/trunk/drivers/net/caif/caif_spi.c b/trunk/drivers/net/caif/caif_spi.c index 96391c36fa74..05e791f46aef 100644 --- a/trunk/drivers/net/caif/caif_spi.c +++ b/trunk/drivers/net/caif/caif_spi.c @@ -35,7 +35,7 @@ MODULE_DESCRIPTION("CAIF SPI driver"); /* Returns the number of padding bytes for alignment. */ #define PAD_POW2(x, pow) ((((x)&((pow)-1))==0) ? 0 : (((pow)-((x)&((pow)-1))))) -static bool spi_loop; +static int spi_loop; module_param(spi_loop, bool, S_IRUGO); MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode."); @@ -226,7 +226,7 @@ static ssize_t dbgfs_frame(struct file *file, char __user *user_buf, "Tx data (Len: %d):\n", cfspi->tx_cpck_len); len += print_frame((buf + len), (DEBUGFS_BUF_SIZE - len), - cfspi->xfer.va_tx[0], + cfspi->xfer.va_tx, (cfspi->tx_cpck_len + SPI_CMD_SZ), 100); len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), @@ -599,11 +599,48 @@ static int cfspi_close(struct net_device *dev) netif_stop_queue(dev); return 0; } +static const struct net_device_ops cfspi_ops = { + .ndo_open = cfspi_open, + .ndo_stop = cfspi_close, + .ndo_start_xmit = cfspi_xmit +}; -static int cfspi_init(struct net_device *dev) +static void cfspi_setup(struct net_device *dev) { - int res = 0; struct cfspi *cfspi = netdev_priv(dev); + dev->features = 0; + dev->netdev_ops = &cfspi_ops; + dev->type = ARPHRD_CAIF; + dev->flags = IFF_NOARP | IFF_POINTOPOINT; + dev->tx_queue_len = 0; + dev->mtu = SPI_MAX_PAYLOAD_SIZE; + dev->destructor = free_netdev; + skb_queue_head_init(&cfspi->qhead); + skb_queue_head_init(&cfspi->chead); + cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW; + cfspi->cfdev.use_frag = false; + cfspi->cfdev.use_stx = false; + cfspi->cfdev.use_fcs = false; + cfspi->ndev = dev; +} + +int cfspi_spi_probe(struct platform_device *pdev) +{ + struct cfspi *cfspi = NULL; + struct net_device *ndev; + struct cfspi_dev *dev; + int res; + dev = (struct cfspi_dev *)pdev->dev.platform_data; + + ndev = alloc_netdev(sizeof(struct cfspi), + "cfspi%d", cfspi_setup); + if (!ndev) + return -ENOMEM; + + cfspi = netdev_priv(ndev); + netif_stop_queue(ndev); + cfspi->ndev = ndev; + cfspi->pdev = pdev; /* Set flow info. */ cfspi->flow_off_sent = 0; @@ -619,11 +656,16 @@ static int cfspi_init(struct net_device *dev) cfspi->slave_talked = false; } + /* Assign the SPI device. */ + cfspi->dev = dev; + /* Assign the device ifc to this SPI interface. */ + dev->ifc = &cfspi->ifc; + /* Allocate DMA buffers. */ - cfspi->xfer.va_tx[0] = dma_alloc(&cfspi->xfer.pa_tx[0]); - if (!cfspi->xfer.va_tx[0]) { + cfspi->xfer.va_tx = dma_alloc(&cfspi->xfer.pa_tx); + if (!cfspi->xfer.va_tx) { res = -ENODEV; - goto err_dma_alloc_tx_0; + goto err_dma_alloc_tx; } cfspi->xfer.va_rx = dma_alloc(&cfspi->xfer.pa_rx); @@ -672,87 +714,6 @@ static int cfspi_init(struct net_device *dev) /* Schedule the work queue. */ queue_work(cfspi->wq, &cfspi->work); - return 0; - - err_create_wq: - dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx); - err_dma_alloc_rx: - dma_free(cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]); - err_dma_alloc_tx_0: - return res; -} - -static void cfspi_uninit(struct net_device *dev) -{ - struct cfspi *cfspi = netdev_priv(dev); - - /* Remove from list. */ - spin_lock(&cfspi_list_lock); - list_del(&cfspi->list); - spin_unlock(&cfspi_list_lock); - - cfspi->ndev = NULL; - /* Free DMA buffers. */ - dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx); - dma_free(cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]); - set_bit(SPI_TERMINATE, &cfspi->state); - wake_up_interruptible(&cfspi->wait); - destroy_workqueue(cfspi->wq); - /* Destroy debugfs directory and files. */ - dev_debugfs_rem(cfspi); - return; -} - -static const struct net_device_ops cfspi_ops = { - .ndo_open = cfspi_open, - .ndo_stop = cfspi_close, - .ndo_init = cfspi_init, - .ndo_uninit = cfspi_uninit, - .ndo_start_xmit = cfspi_xmit -}; - -static void cfspi_setup(struct net_device *dev) -{ - struct cfspi *cfspi = netdev_priv(dev); - dev->features = 0; - dev->netdev_ops = &cfspi_ops; - dev->type = ARPHRD_CAIF; - dev->flags = IFF_NOARP | IFF_POINTOPOINT; - dev->tx_queue_len = 0; - dev->mtu = SPI_MAX_PAYLOAD_SIZE; - dev->destructor = free_netdev; - skb_queue_head_init(&cfspi->qhead); - skb_queue_head_init(&cfspi->chead); - cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW; - cfspi->cfdev.use_frag = false; - cfspi->cfdev.use_stx = false; - cfspi->cfdev.use_fcs = false; - cfspi->ndev = dev; -} - -int cfspi_spi_probe(struct platform_device *pdev) -{ - struct cfspi *cfspi = NULL; - struct net_device *ndev; - struct cfspi_dev *dev; - int res; - dev = (struct cfspi_dev *)pdev->dev.platform_data; - - ndev = alloc_netdev(sizeof(struct cfspi), - "cfspi%d", cfspi_setup); - if (!dev) - return -ENODEV; - - cfspi = netdev_priv(ndev); - netif_stop_queue(ndev); - cfspi->ndev = ndev; - cfspi->pdev = pdev; - - /* Assign the SPI device. */ - cfspi->dev = dev; - /* Assign the device ifc to this SPI interface. */ - dev->ifc = &cfspi->ifc; - /* Register network device. */ res = register_netdev(ndev); if (res) { @@ -762,6 +723,15 @@ int cfspi_spi_probe(struct platform_device *pdev) return res; err_net_reg: + dev_debugfs_rem(cfspi); + set_bit(SPI_TERMINATE, &cfspi->state); + wake_up_interruptible(&cfspi->wait); + destroy_workqueue(cfspi->wq); + err_create_wq: + dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx); + err_dma_alloc_rx: + dma_free(cfspi->xfer.va_tx, cfspi->xfer.pa_tx); + err_dma_alloc_tx: free_netdev(ndev); return res; @@ -769,8 +739,34 @@ int cfspi_spi_probe(struct platform_device *pdev) int cfspi_spi_remove(struct platform_device *pdev) { - /* Everything is done in cfspi_uninit(). */ - return 0; + struct list_head *list_node; + struct list_head *n; + struct cfspi *cfspi = NULL; + struct cfspi_dev *dev; + + dev = (struct cfspi_dev *)pdev->dev.platform_data; + spin_lock(&cfspi_list_lock); + list_for_each_safe(list_node, n, &cfspi_list) { + cfspi = list_entry(list_node, struct cfspi, list); + /* Find the corresponding device. */ + if (cfspi->dev == dev) { + /* Remove from list. */ + list_del(list_node); + /* Free DMA buffers. */ + dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx); + dma_free(cfspi->xfer.va_tx, cfspi->xfer.pa_tx); + set_bit(SPI_TERMINATE, &cfspi->state); + wake_up_interruptible(&cfspi->wait); + destroy_workqueue(cfspi->wq); + /* Destroy debugfs directory and files. */ + dev_debugfs_rem(cfspi); + unregister_netdev(cfspi->ndev); + spin_unlock(&cfspi_list_lock); + return 0; + } + } + spin_unlock(&cfspi_list_lock); + return -ENODEV; } static void __exit cfspi_exit_module(void) @@ -781,7 +777,7 @@ static void __exit cfspi_exit_module(void) list_for_each_safe(list_node, n, &cfspi_list) { cfspi = list_entry(list_node, struct cfspi, list); - unregister_netdev(cfspi->ndev); + platform_device_unregister(cfspi->pdev); } /* Destroy sysfs files. */ diff --git a/trunk/drivers/net/can/Kconfig b/trunk/drivers/net/can/Kconfig index ab45758c49a4..f6c98fb4a517 100644 --- a/trunk/drivers/net/can/Kconfig +++ b/trunk/drivers/net/can/Kconfig @@ -116,8 +116,6 @@ source "drivers/net/can/sja1000/Kconfig" source "drivers/net/can/c_can/Kconfig" -source "drivers/net/can/cc770/Kconfig" - source "drivers/net/can/usb/Kconfig" source "drivers/net/can/softing/Kconfig" diff --git a/trunk/drivers/net/can/Makefile b/trunk/drivers/net/can/Makefile index 938be37b670c..24ebfe8d758a 100644 --- a/trunk/drivers/net/can/Makefile +++ b/trunk/drivers/net/can/Makefile @@ -14,7 +14,6 @@ obj-y += softing/ obj-$(CONFIG_CAN_SJA1000) += sja1000/ obj-$(CONFIG_CAN_MSCAN) += mscan/ obj-$(CONFIG_CAN_C_CAN) += c_can/ -obj-$(CONFIG_CAN_CC770) += cc770/ obj-$(CONFIG_CAN_AT91) += at91_can.o obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o obj-$(CONFIG_CAN_MCP251X) += mcp251x.o diff --git a/trunk/drivers/net/can/at91_can.c b/trunk/drivers/net/can/at91_can.c index 6ea905c2cf6d..044ea0647b04 100644 --- a/trunk/drivers/net/can/at91_can.c +++ b/trunk/drivers/net/can/at91_can.c @@ -1383,7 +1383,18 @@ static struct platform_driver at91_can_driver = { .id_table = at91_can_id_table, }; -module_platform_driver(at91_can_driver); +static int __init at91_can_module_init(void) +{ + return platform_driver_register(&at91_can_driver); +} + +static void __exit at91_can_module_exit(void) +{ + platform_driver_unregister(&at91_can_driver); +} + +module_init(at91_can_module_init); +module_exit(at91_can_module_exit); MODULE_AUTHOR("Marc Kleine-Budde "); MODULE_LICENSE("GPL v2"); diff --git a/trunk/drivers/net/can/bfin_can.c b/trunk/drivers/net/can/bfin_can.c index 349e0fabb63a..a1c5abc38cd2 100644 --- a/trunk/drivers/net/can/bfin_can.c +++ b/trunk/drivers/net/can/bfin_can.c @@ -676,7 +676,17 @@ static struct platform_driver bfin_can_driver = { }, }; -module_platform_driver(bfin_can_driver); +static int __init bfin_can_init(void) +{ + return platform_driver_register(&bfin_can_driver); +} +module_init(bfin_can_init); + +static void __exit bfin_can_exit(void) +{ + platform_driver_unregister(&bfin_can_driver); +} +module_exit(bfin_can_exit); MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>"); MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/net/can/c_can/c_can_platform.c b/trunk/drivers/net/can/c_can/c_can_platform.c index 5e1a5ff6476e..0b5c6f8bdd34 100644 --- a/trunk/drivers/net/can/c_can/c_can_platform.c +++ b/trunk/drivers/net/can/c_can/c_can_platform.c @@ -197,7 +197,17 @@ static struct platform_driver c_can_plat_driver = { .remove = __devexit_p(c_can_plat_remove), }; -module_platform_driver(c_can_plat_driver); +static int __init c_can_plat_init(void) +{ + return platform_driver_register(&c_can_plat_driver); +} +module_init(c_can_plat_init); + +static void __exit c_can_plat_exit(void) +{ + platform_driver_unregister(&c_can_plat_driver); +} +module_exit(c_can_plat_exit); MODULE_AUTHOR("Bhupesh Sharma "); MODULE_LICENSE("GPL v2"); diff --git a/trunk/drivers/net/can/cc770/Kconfig b/trunk/drivers/net/can/cc770/Kconfig deleted file mode 100644 index 22c07a8c8b43..000000000000 --- a/trunk/drivers/net/can/cc770/Kconfig +++ /dev/null @@ -1,21 +0,0 @@ -menuconfig CAN_CC770 - tristate "Bosch CC770 and Intel AN82527 devices" - depends on CAN_DEV && HAS_IOMEM - -if CAN_CC770 - -config CAN_CC770_ISA - tristate "ISA Bus based legacy CC770 driver" - ---help--- - This driver adds legacy support for CC770 and AN82527 chips - connected to the ISA bus using I/O port, memory mapped or - indirect access. - -config CAN_CC770_PLATFORM - tristate "Generic Platform Bus based CC770 driver" - ---help--- - This driver adds support for the CC770 and AN82527 chips - connected to the "platform bus" (Linux abstraction for directly - to the processor attached devices). - -endif diff --git a/trunk/drivers/net/can/cc770/Makefile b/trunk/drivers/net/can/cc770/Makefile deleted file mode 100644 index 9fb8321b33eb..000000000000 --- a/trunk/drivers/net/can/cc770/Makefile +++ /dev/null @@ -1,9 +0,0 @@ -# -# Makefile for the Bosch CC770 CAN controller drivers. -# - -obj-$(CONFIG_CAN_CC770) += cc770.o -obj-$(CONFIG_CAN_CC770_ISA) += cc770_isa.o -obj-$(CONFIG_CAN_CC770_PLATFORM) += cc770_platform.o - -ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG diff --git a/trunk/drivers/net/can/cc770/cc770.c b/trunk/drivers/net/can/cc770/cc770.c deleted file mode 100644 index 766896747643..000000000000 --- a/trunk/drivers/net/can/cc770/cc770.c +++ /dev/null @@ -1,881 +0,0 @@ -/* - * Core driver for the CC770 and AN82527 CAN controllers - * - * Copyright (C) 2009, 2011 Wolfgang Grandegger - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the version 2 of the GNU General Public License - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#include "cc770.h" - -MODULE_AUTHOR("Wolfgang Grandegger "); -MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION(KBUILD_MODNAME "CAN netdevice driver"); - -/* - * The CC770 is a CAN controller from Bosch, which is 100% compatible - * with the AN82527 from Intel, but with "bugs" being fixed and some - * additional functionality, mainly: - * - * 1. RX and TX error counters are readable. - * 2. Support of silent (listen-only) mode. - * 3. Message object 15 can receive all types of frames, also RTR and EFF. - * - * Details are available from Bosch's "CC770_Product_Info_2007-01.pdf", - * which explains in detail the compatibility between the CC770 and the - * 82527. This driver use the additional functionality 3. on real CC770 - * devices. Unfortunately, the CC770 does still not store the message - * identifier of received remote transmission request frames and - * therefore it's set to 0. - * - * The message objects 1..14 can be used for TX and RX while the message - * objects 15 is optimized for RX. It has a shadow register for reliable - * data receiption under heavy bus load. Therefore it makes sense to use - * this message object for the needed use case. The frame type (EFF/SFF) - * for the message object 15 can be defined via kernel module parameter - * "msgobj15_eff". If not equal 0, it will receive 29-bit EFF frames, - * otherwise 11 bit SFF messages. - */ -static int msgobj15_eff; -module_param(msgobj15_eff, int, S_IRUGO); -MODULE_PARM_DESC(msgobj15_eff, "Extended 29-bit frames for message object 15 " - "(default: 11-bit standard frames)"); - -static int i82527_compat; -module_param(i82527_compat, int, S_IRUGO); -MODULE_PARM_DESC(i82527_compat, "Strict Intel 82527 comptibility mode " - "without using additional functions"); - -/* - * This driver uses the last 5 message objects 11..15. The definitions - * and structure below allows to configure and assign them to the real - * message object. - */ -static unsigned char cc770_obj_flags[CC770_OBJ_MAX] = { - [CC770_OBJ_RX0] = CC770_OBJ_FLAG_RX, - [CC770_OBJ_RX1] = CC770_OBJ_FLAG_RX | CC770_OBJ_FLAG_EFF, - [CC770_OBJ_RX_RTR0] = CC770_OBJ_FLAG_RX | CC770_OBJ_FLAG_RTR, - [CC770_OBJ_RX_RTR1] = CC770_OBJ_FLAG_RX | CC770_OBJ_FLAG_RTR | - CC770_OBJ_FLAG_EFF, - [CC770_OBJ_TX] = 0, -}; - -static struct can_bittiming_const cc770_bittiming_const = { - .name = KBUILD_MODNAME, - .tseg1_min = 1, - .tseg1_max = 16, - .tseg2_min = 1, - .tseg2_max = 8, - .sjw_max = 4, - .brp_min = 1, - .brp_max = 64, - .brp_inc = 1, -}; - -static inline int intid2obj(unsigned int intid) -{ - if (intid == 2) - return 0; - else - return MSGOBJ_LAST + 2 - intid; -} - -static void enable_all_objs(const struct net_device *dev) -{ - struct cc770_priv *priv = netdev_priv(dev); - u8 msgcfg; - unsigned char obj_flags; - unsigned int o, mo; - - for (o = 0; o < ARRAY_SIZE(priv->obj_flags); o++) { - obj_flags = priv->obj_flags[o]; - mo = obj2msgobj(o); - - if (obj_flags & CC770_OBJ_FLAG_RX) { - /* - * We don't need extra objects for RTR and EFF if - * the additional CC770 functions are enabled. - */ - if (priv->control_normal_mode & CTRL_EAF) { - if (o > 0) - continue; - netdev_dbg(dev, "Message object %d for " - "RX data, RTR, SFF and EFF\n", mo); - } else { - netdev_dbg(dev, - "Message object %d for RX %s %s\n", - mo, obj_flags & CC770_OBJ_FLAG_RTR ? - "RTR" : "data", - obj_flags & CC770_OBJ_FLAG_EFF ? - "EFF" : "SFF"); - } - - if (obj_flags & CC770_OBJ_FLAG_EFF) - msgcfg = MSGCFG_XTD; - else - msgcfg = 0; - if (obj_flags & CC770_OBJ_FLAG_RTR) - msgcfg |= MSGCFG_DIR; - - cc770_write_reg(priv, msgobj[mo].config, msgcfg); - cc770_write_reg(priv, msgobj[mo].ctrl0, - MSGVAL_SET | TXIE_RES | - RXIE_SET | INTPND_RES); - - if (obj_flags & CC770_OBJ_FLAG_RTR) - cc770_write_reg(priv, msgobj[mo].ctrl1, - NEWDAT_RES | CPUUPD_SET | - TXRQST_RES | RMTPND_RES); - else - cc770_write_reg(priv, msgobj[mo].ctrl1, - NEWDAT_RES | MSGLST_RES | - TXRQST_RES | RMTPND_RES); - } else { - netdev_dbg(dev, "Message object %d for " - "TX data, RTR, SFF and EFF\n", mo); - - cc770_write_reg(priv, msgobj[mo].ctrl1, - RMTPND_RES | TXRQST_RES | - CPUUPD_RES | NEWDAT_RES); - cc770_write_reg(priv, msgobj[mo].ctrl0, - MSGVAL_RES | TXIE_RES | - RXIE_RES | INTPND_RES); - } - } -} - -static void disable_all_objs(const struct cc770_priv *priv) -{ - int o, mo; - - for (o = 0; o < ARRAY_SIZE(priv->obj_flags); o++) { - mo = obj2msgobj(o); - - if (priv->obj_flags[o] & CC770_OBJ_FLAG_RX) { - if (o > 0 && priv->control_normal_mode & CTRL_EAF) - continue; - - cc770_write_reg(priv, msgobj[mo].ctrl1, - NEWDAT_RES | MSGLST_RES | - TXRQST_RES | RMTPND_RES); - cc770_write_reg(priv, msgobj[mo].ctrl0, - MSGVAL_RES | TXIE_RES | - RXIE_RES | INTPND_RES); - } else { - /* Clear message object for send */ - cc770_write_reg(priv, msgobj[mo].ctrl1, - RMTPND_RES | TXRQST_RES | - CPUUPD_RES | NEWDAT_RES); - cc770_write_reg(priv, msgobj[mo].ctrl0, - MSGVAL_RES | TXIE_RES | - RXIE_RES | INTPND_RES); - } - } -} - -static void set_reset_mode(struct net_device *dev) -{ - struct cc770_priv *priv = netdev_priv(dev); - - /* Enable configuration and puts chip in bus-off, disable interrupts */ - cc770_write_reg(priv, control, CTRL_CCE | CTRL_INI); - - priv->can.state = CAN_STATE_STOPPED; - - /* Clear interrupts */ - cc770_read_reg(priv, interrupt); - - /* Clear status register */ - cc770_write_reg(priv, status, 0); - - /* Disable all used message objects */ - disable_all_objs(priv); -} - -static void set_normal_mode(struct net_device *dev) -{ - struct cc770_priv *priv = netdev_priv(dev); - - /* Clear interrupts */ - cc770_read_reg(priv, interrupt); - - /* Clear status register and pre-set last error code */ - cc770_write_reg(priv, status, STAT_LEC_MASK); - - /* Enable all used message objects*/ - enable_all_objs(dev); - - /* - * Clear bus-off, interrupts only for errors, - * not for status change - */ - cc770_write_reg(priv, control, priv->control_normal_mode); - - priv->can.state = CAN_STATE_ERROR_ACTIVE; -} - -static void chipset_init(struct cc770_priv *priv) -{ - int mo, id, data; - - /* Enable configuration and put chip in bus-off, disable interrupts */ - cc770_write_reg(priv, control, (CTRL_CCE | CTRL_INI)); - - /* Set CLKOUT divider and slew rates */ - cc770_write_reg(priv, clkout, priv->clkout); - - /* Configure CPU interface / CLKOUT enable */ - cc770_write_reg(priv, cpu_interface, priv->cpu_interface); - - /* Set bus configuration */ - cc770_write_reg(priv, bus_config, priv->bus_config); - - /* Clear interrupts */ - cc770_read_reg(priv, interrupt); - - /* Clear status register */ - cc770_write_reg(priv, status, 0); - - /* Clear and invalidate message objects */ - for (mo = MSGOBJ_FIRST; mo <= MSGOBJ_LAST; mo++) { - cc770_write_reg(priv, msgobj[mo].ctrl0, - INTPND_UNC | RXIE_RES | - TXIE_RES | MSGVAL_RES); - cc770_write_reg(priv, msgobj[mo].ctrl0, - INTPND_RES | RXIE_RES | - TXIE_RES | MSGVAL_RES); - cc770_write_reg(priv, msgobj[mo].ctrl1, - NEWDAT_RES | MSGLST_RES | - TXRQST_RES | RMTPND_RES); - for (data = 0; data < 8; data++) - cc770_write_reg(priv, msgobj[mo].data[data], 0); - for (id = 0; id < 4; id++) - cc770_write_reg(priv, msgobj[mo].id[id], 0); - cc770_write_reg(priv, msgobj[mo].config, 0); - } - - /* Set all global ID masks to "don't care" */ - cc770_write_reg(priv, global_mask_std[0], 0); - cc770_write_reg(priv, global_mask_std[1], 0); - cc770_write_reg(priv, global_mask_ext[0], 0); - cc770_write_reg(priv, global_mask_ext[1], 0); - cc770_write_reg(priv, global_mask_ext[2], 0); - cc770_write_reg(priv, global_mask_ext[3], 0); - -} - -static int cc770_probe_chip(struct net_device *dev) -{ - struct cc770_priv *priv = netdev_priv(dev); - - /* Enable configuration, put chip in bus-off, disable ints */ - cc770_write_reg(priv, control, CTRL_CCE | CTRL_EAF | CTRL_INI); - /* Configure cpu interface / CLKOUT disable */ - cc770_write_reg(priv, cpu_interface, priv->cpu_interface); - - /* - * Check if hardware reset is still inactive or maybe there - * is no chip in this address space - */ - if (cc770_read_reg(priv, cpu_interface) & CPUIF_RST) { - netdev_info(dev, "probing @0x%p failed (reset)\n", - priv->reg_base); - return -ENODEV; - } - - /* Write and read back test pattern (some arbitrary values) */ - cc770_write_reg(priv, msgobj[1].data[1], 0x25); - cc770_write_reg(priv, msgobj[2].data[3], 0x52); - cc770_write_reg(priv, msgobj[10].data[6], 0xc3); - if ((cc770_read_reg(priv, msgobj[1].data[1]) != 0x25) || - (cc770_read_reg(priv, msgobj[2].data[3]) != 0x52) || - (cc770_read_reg(priv, msgobj[10].data[6]) != 0xc3)) { - netdev_info(dev, "probing @0x%p failed (pattern)\n", - priv->reg_base); - return -ENODEV; - } - - /* Check if this chip is a CC770 supporting additional functions */ - if (cc770_read_reg(priv, control) & CTRL_EAF) - priv->control_normal_mode |= CTRL_EAF; - - return 0; -} - -static void cc770_start(struct net_device *dev) -{ - struct cc770_priv *priv = netdev_priv(dev); - - /* leave reset mode */ - if (priv->can.state != CAN_STATE_STOPPED) - set_reset_mode(dev); - - /* leave reset mode */ - set_normal_mode(dev); -} - -static int cc770_set_mode(struct net_device *dev, enum can_mode mode) -{ - switch (mode) { - case CAN_MODE_START: - cc770_start(dev); - netif_wake_queue(dev); - break; - - default: - return -EOPNOTSUPP; - } - - return 0; -} - -static int cc770_set_bittiming(struct net_device *dev) -{ - struct cc770_priv *priv = netdev_priv(dev); - struct can_bittiming *bt = &priv->can.bittiming; - u8 btr0, btr1; - - btr0 = ((bt->brp - 1) & 0x3f) | (((bt->sjw - 1) & 0x3) << 6); - btr1 = ((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) | - (((bt->phase_seg2 - 1) & 0x7) << 4); - if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) - btr1 |= 0x80; - - netdev_info(dev, "setting BTR0=0x%02x BTR1=0x%02x\n", btr0, btr1); - - cc770_write_reg(priv, bit_timing_0, btr0); - cc770_write_reg(priv, bit_timing_1, btr1); - - return 0; -} - -static int cc770_get_berr_counter(const struct net_device *dev, - struct can_berr_counter *bec) -{ - struct cc770_priv *priv = netdev_priv(dev); - - bec->txerr = cc770_read_reg(priv, tx_error_counter); - bec->rxerr = cc770_read_reg(priv, rx_error_counter); - - return 0; -} - -static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct cc770_priv *priv = netdev_priv(dev); - struct net_device_stats *stats = &dev->stats; - struct can_frame *cf = (struct can_frame *)skb->data; - unsigned int mo = obj2msgobj(CC770_OBJ_TX); - u8 dlc, rtr; - u32 id; - int i; - - if (can_dropped_invalid_skb(dev, skb)) - return NETDEV_TX_OK; - - if ((cc770_read_reg(priv, - msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) { - netdev_err(dev, "TX register is still occupied!\n"); - return NETDEV_TX_BUSY; - } - - netif_stop_queue(dev); - - dlc = cf->can_dlc; - id = cf->can_id; - if (cf->can_id & CAN_RTR_FLAG) - rtr = 0; - else - rtr = MSGCFG_DIR; - cc770_write_reg(priv, msgobj[mo].ctrl1, - RMTPND_RES | TXRQST_RES | CPUUPD_SET | NEWDAT_RES); - cc770_write_reg(priv, msgobj[mo].ctrl0, - MSGVAL_SET | TXIE_SET | RXIE_RES | INTPND_RES); - if (id & CAN_EFF_FLAG) { - id &= CAN_EFF_MASK; - cc770_write_reg(priv, msgobj[mo].config, - (dlc << 4) | rtr | MSGCFG_XTD); - cc770_write_reg(priv, msgobj[mo].id[3], id << 3); - cc770_write_reg(priv, msgobj[mo].id[2], id >> 5); - cc770_write_reg(priv, msgobj[mo].id[1], id >> 13); - cc770_write_reg(priv, msgobj[mo].id[0], id >> 21); - } else { - id &= CAN_SFF_MASK; - cc770_write_reg(priv, msgobj[mo].config, (dlc << 4) | rtr); - cc770_write_reg(priv, msgobj[mo].id[0], id >> 3); - cc770_write_reg(priv, msgobj[mo].id[1], id << 5); - } - - for (i = 0; i < dlc; i++) - cc770_write_reg(priv, msgobj[mo].data[i], cf->data[i]); - - cc770_write_reg(priv, msgobj[mo].ctrl1, - RMTPND_RES | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC); - - stats->tx_bytes += dlc; - - can_put_echo_skb(skb, dev, 0); - - /* - * HM: We had some cases of repeated IRQs so make sure the - * INT is acknowledged I know it's already further up, but - * doing again fixed the issue - */ - cc770_write_reg(priv, msgobj[mo].ctrl0, - MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES); - - return NETDEV_TX_OK; -} - -static void cc770_rx(struct net_device *dev, unsigned int mo, u8 ctrl1) -{ - struct cc770_priv *priv = netdev_priv(dev); - struct net_device_stats *stats = &dev->stats; - struct can_frame *cf; - struct sk_buff *skb; - u8 config; - u32 id; - int i; - - skb = alloc_can_skb(dev, &cf); - if (!skb) - return; - - config = cc770_read_reg(priv, msgobj[mo].config); - - if (ctrl1 & RMTPND_SET) { - /* - * Unfortunately, the chip does not store the real message - * identifier of the received remote transmission request - * frame. Therefore we set it to 0. - */ - cf->can_id = CAN_RTR_FLAG; - if (config & MSGCFG_XTD) - cf->can_id |= CAN_EFF_FLAG; - cf->can_dlc = 0; - } else { - if (config & MSGCFG_XTD) { - id = cc770_read_reg(priv, msgobj[mo].id[3]); - id |= cc770_read_reg(priv, msgobj[mo].id[2]) << 8; - id |= cc770_read_reg(priv, msgobj[mo].id[1]) << 16; - id |= cc770_read_reg(priv, msgobj[mo].id[0]) << 24; - id >>= 3; - id |= CAN_EFF_FLAG; - } else { - id = cc770_read_reg(priv, msgobj[mo].id[1]); - id |= cc770_read_reg(priv, msgobj[mo].id[0]) << 8; - id >>= 5; - } - - cf->can_id = id; - cf->can_dlc = get_can_dlc((config & 0xf0) >> 4); - for (i = 0; i < cf->can_dlc; i++) - cf->data[i] = cc770_read_reg(priv, msgobj[mo].data[i]); - } - netif_rx(skb); - - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; -} - -static int cc770_err(struct net_device *dev, u8 status) -{ - struct cc770_priv *priv = netdev_priv(dev); - struct net_device_stats *stats = &dev->stats; - struct can_frame *cf; - struct sk_buff *skb; - u8 lec; - - netdev_dbg(dev, "status interrupt (%#x)\n", status); - - skb = alloc_can_err_skb(dev, &cf); - if (!skb) - return -ENOMEM; - - /* Use extended functions of the CC770 */ - if (priv->control_normal_mode & CTRL_EAF) { - cf->data[6] = cc770_read_reg(priv, tx_error_counter); - cf->data[7] = cc770_read_reg(priv, rx_error_counter); - } - - if (status & STAT_BOFF) { - /* Disable interrupts */ - cc770_write_reg(priv, control, CTRL_INI); - cf->can_id |= CAN_ERR_BUSOFF; - priv->can.state = CAN_STATE_BUS_OFF; - can_bus_off(dev); - } else if (status & STAT_WARN) { - cf->can_id |= CAN_ERR_CRTL; - /* Only the CC770 does show error passive */ - if (cf->data[7] > 127) { - cf->data[1] = CAN_ERR_CRTL_RX_PASSIVE | - CAN_ERR_CRTL_TX_PASSIVE; - priv->can.state = CAN_STATE_ERROR_PASSIVE; - priv->can.can_stats.error_passive++; - } else { - cf->data[1] = CAN_ERR_CRTL_RX_WARNING | - CAN_ERR_CRTL_TX_WARNING; - priv->can.state = CAN_STATE_ERROR_WARNING; - priv->can.can_stats.error_warning++; - } - } else { - /* Back to error avtive */ - cf->can_id |= CAN_ERR_PROT; - cf->data[2] = CAN_ERR_PROT_ACTIVE; - priv->can.state = CAN_STATE_ERROR_ACTIVE; - } - - lec = status & STAT_LEC_MASK; - if (lec < 7 && lec > 0) { - if (lec == STAT_LEC_ACK) { - cf->can_id |= CAN_ERR_ACK; - } else { - cf->can_id |= CAN_ERR_PROT; - switch (lec) { - case STAT_LEC_STUFF: - cf->data[2] |= CAN_ERR_PROT_STUFF; - break; - case STAT_LEC_FORM: - cf->data[2] |= CAN_ERR_PROT_FORM; - break; - case STAT_LEC_BIT1: - cf->data[2] |= CAN_ERR_PROT_BIT1; - break; - case STAT_LEC_BIT0: - cf->data[2] |= CAN_ERR_PROT_BIT0; - break; - case STAT_LEC_CRC: - cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; - break; - } - } - } - - netif_rx(skb); - - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; - - return 0; -} - -static int cc770_status_interrupt(struct net_device *dev) -{ - struct cc770_priv *priv = netdev_priv(dev); - u8 status; - - status = cc770_read_reg(priv, status); - /* Reset the status register including RXOK and TXOK */ - cc770_write_reg(priv, status, STAT_LEC_MASK); - - if (status & (STAT_WARN | STAT_BOFF) || - (status & STAT_LEC_MASK) != STAT_LEC_MASK) { - cc770_err(dev, status); - return status & STAT_BOFF; - } - - return 0; -} - -static void cc770_rx_interrupt(struct net_device *dev, unsigned int o) -{ - struct cc770_priv *priv = netdev_priv(dev); - struct net_device_stats *stats = &dev->stats; - unsigned int mo = obj2msgobj(o); - u8 ctrl1; - int n = CC770_MAX_MSG; - - while (n--) { - ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1); - - if (!(ctrl1 & NEWDAT_SET)) { - /* Check for RTR if additional functions are enabled */ - if (priv->control_normal_mode & CTRL_EAF) { - if (!(cc770_read_reg(priv, msgobj[mo].ctrl0) & - INTPND_SET)) - break; - } else { - break; - } - } - - if (ctrl1 & MSGLST_SET) { - stats->rx_over_errors++; - stats->rx_errors++; - } - if (mo < MSGOBJ_LAST) - cc770_write_reg(priv, msgobj[mo].ctrl1, - NEWDAT_RES | MSGLST_RES | - TXRQST_UNC | RMTPND_UNC); - cc770_rx(dev, mo, ctrl1); - - cc770_write_reg(priv, msgobj[mo].ctrl0, - MSGVAL_SET | TXIE_RES | - RXIE_SET | INTPND_RES); - cc770_write_reg(priv, msgobj[mo].ctrl1, - NEWDAT_RES | MSGLST_RES | - TXRQST_RES | RMTPND_RES); - } -} - -static void cc770_rtr_interrupt(struct net_device *dev, unsigned int o) -{ - struct cc770_priv *priv = netdev_priv(dev); - unsigned int mo = obj2msgobj(o); - u8 ctrl0, ctrl1; - int n = CC770_MAX_MSG; - - while (n--) { - ctrl0 = cc770_read_reg(priv, msgobj[mo].ctrl0); - if (!(ctrl0 & INTPND_SET)) - break; - - ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1); - cc770_rx(dev, mo, ctrl1); - - cc770_write_reg(priv, msgobj[mo].ctrl0, - MSGVAL_SET | TXIE_RES | - RXIE_SET | INTPND_RES); - cc770_write_reg(priv, msgobj[mo].ctrl1, - NEWDAT_RES | CPUUPD_SET | - TXRQST_RES | RMTPND_RES); - } -} - -static void cc770_tx_interrupt(struct net_device *dev, unsigned int o) -{ - struct cc770_priv *priv = netdev_priv(dev); - struct net_device_stats *stats = &dev->stats; - unsigned int mo = obj2msgobj(o); - - /* Nothing more to send, switch off interrupts */ - cc770_write_reg(priv, msgobj[mo].ctrl0, - MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES); - /* - * We had some cases of repeated IRQ so make sure the - * INT is acknowledged - */ - cc770_write_reg(priv, msgobj[mo].ctrl0, - MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES); - - stats->tx_packets++; - can_get_echo_skb(dev, 0); - netif_wake_queue(dev); -} - -irqreturn_t cc770_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = (struct net_device *)dev_id; - struct cc770_priv *priv = netdev_priv(dev); - u8 intid; - int o, n = 0; - - /* Shared interrupts and IRQ off? */ - if (priv->can.state == CAN_STATE_STOPPED) - return IRQ_NONE; - - if (priv->pre_irq) - priv->pre_irq(priv); - - while (n < CC770_MAX_IRQ) { - /* Read the highest pending interrupt request */ - intid = cc770_read_reg(priv, interrupt); - if (!intid) - break; - n++; - - if (intid == 1) { - /* Exit in case of bus-off */ - if (cc770_status_interrupt(dev)) - break; - } else { - o = intid2obj(intid); - - if (o >= CC770_OBJ_MAX) { - netdev_err(dev, "Unexpected interrupt id %d\n", - intid); - continue; - } - - if (priv->obj_flags[o] & CC770_OBJ_FLAG_RTR) - cc770_rtr_interrupt(dev, o); - else if (priv->obj_flags[o] & CC770_OBJ_FLAG_RX) - cc770_rx_interrupt(dev, o); - else - cc770_tx_interrupt(dev, o); - } - } - - if (priv->post_irq) - priv->post_irq(priv); - - if (n >= CC770_MAX_IRQ) - netdev_dbg(dev, "%d messages handled in ISR", n); - - return (n) ? IRQ_HANDLED : IRQ_NONE; -} - -static int cc770_open(struct net_device *dev) -{ - struct cc770_priv *priv = netdev_priv(dev); - int err; - - /* set chip into reset mode */ - set_reset_mode(dev); - - /* common open */ - err = open_candev(dev); - if (err) - return err; - - err = request_irq(dev->irq, &cc770_interrupt, priv->irq_flags, - dev->name, dev); - if (err) { - close_candev(dev); - return -EAGAIN; - } - - /* init and start chip */ - cc770_start(dev); - - netif_start_queue(dev); - - return 0; -} - -static int cc770_close(struct net_device *dev) -{ - netif_stop_queue(dev); - set_reset_mode(dev); - - free_irq(dev->irq, dev); - close_candev(dev); - - return 0; -} - -struct net_device *alloc_cc770dev(int sizeof_priv) -{ - struct net_device *dev; - struct cc770_priv *priv; - - dev = alloc_candev(sizeof(struct cc770_priv) + sizeof_priv, - CC770_ECHO_SKB_MAX); - if (!dev) - return NULL; - - priv = netdev_priv(dev); - - priv->dev = dev; - priv->can.bittiming_const = &cc770_bittiming_const; - priv->can.do_set_bittiming = cc770_set_bittiming; - priv->can.do_set_mode = cc770_set_mode; - priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; - - memcpy(priv->obj_flags, cc770_obj_flags, sizeof(cc770_obj_flags)); - - if (sizeof_priv) - priv->priv = (void *)priv + sizeof(struct cc770_priv); - - return dev; -} -EXPORT_SYMBOL_GPL(alloc_cc770dev); - -void free_cc770dev(struct net_device *dev) -{ - free_candev(dev); -} -EXPORT_SYMBOL_GPL(free_cc770dev); - -static const struct net_device_ops cc770_netdev_ops = { - .ndo_open = cc770_open, - .ndo_stop = cc770_close, - .ndo_start_xmit = cc770_start_xmit, -}; - -int register_cc770dev(struct net_device *dev) -{ - struct cc770_priv *priv = netdev_priv(dev); - int err; - - err = cc770_probe_chip(dev); - if (err) - return err; - - dev->netdev_ops = &cc770_netdev_ops; - - dev->flags |= IFF_ECHO; /* we support local echo */ - - /* Should we use additional functions? */ - if (!i82527_compat && priv->control_normal_mode & CTRL_EAF) { - priv->can.do_get_berr_counter = cc770_get_berr_counter; - priv->control_normal_mode = CTRL_IE | CTRL_EAF | CTRL_EIE; - netdev_dbg(dev, "i82527 mode with additional functions\n"); - } else { - priv->control_normal_mode = CTRL_IE | CTRL_EIE; - netdev_dbg(dev, "strict i82527 compatibility mode\n"); - } - - chipset_init(priv); - set_reset_mode(dev); - - return register_candev(dev); -} -EXPORT_SYMBOL_GPL(register_cc770dev); - -void unregister_cc770dev(struct net_device *dev) -{ - set_reset_mode(dev); - unregister_candev(dev); -} -EXPORT_SYMBOL_GPL(unregister_cc770dev); - -static __init int cc770_init(void) -{ - if (msgobj15_eff) { - cc770_obj_flags[CC770_OBJ_RX0] |= CC770_OBJ_FLAG_EFF; - cc770_obj_flags[CC770_OBJ_RX1] &= ~CC770_OBJ_FLAG_EFF; - } - - pr_info("CAN netdevice driver\n"); - - return 0; -} -module_init(cc770_init); - -static __exit void cc770_exit(void) -{ - pr_info("driver removed\n"); -} -module_exit(cc770_exit); diff --git a/trunk/drivers/net/can/cc770/cc770.h b/trunk/drivers/net/can/cc770/cc770.h deleted file mode 100644 index a1739db98d91..000000000000 --- a/trunk/drivers/net/can/cc770/cc770.h +++ /dev/null @@ -1,203 +0,0 @@ -/* - * Core driver for the CC770 and AN82527 CAN controllers - * - * Copyright (C) 2009, 2011 Wolfgang Grandegger - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the version 2 of the GNU General Public License - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef CC770_DEV_H -#define CC770_DEV_H - -#include - -struct cc770_msgobj { - u8 ctrl0; - u8 ctrl1; - u8 id[4]; - u8 config; - u8 data[8]; - u8 dontuse; /* padding */ -} __packed; - -struct cc770_regs { - union { - struct cc770_msgobj msgobj[16]; /* Message object 1..15 */ - struct { - u8 control; /* Control Register */ - u8 status; /* Status Register */ - u8 cpu_interface; /* CPU Interface Register */ - u8 dontuse1; - u8 high_speed_read[2]; /* High Speed Read */ - u8 global_mask_std[2]; /* Standard Global Mask */ - u8 global_mask_ext[4]; /* Extended Global Mask */ - u8 msg15_mask[4]; /* Message 15 Mask */ - u8 dontuse2[15]; - u8 clkout; /* Clock Out Register */ - u8 dontuse3[15]; - u8 bus_config; /* Bus Configuration Register */ - u8 dontuse4[15]; - u8 bit_timing_0; /* Bit Timing Register byte 0 */ - u8 dontuse5[15]; - u8 bit_timing_1; /* Bit Timing Register byte 1 */ - u8 dontuse6[15]; - u8 interrupt; /* Interrupt Register */ - u8 dontuse7[15]; - u8 rx_error_counter; /* Receive Error Counter */ - u8 dontuse8[15]; - u8 tx_error_counter; /* Transmit Error Counter */ - u8 dontuse9[31]; - u8 p1_conf; - u8 dontuse10[15]; - u8 p2_conf; - u8 dontuse11[15]; - u8 p1_in; - u8 dontuse12[15]; - u8 p2_in; - u8 dontuse13[15]; - u8 p1_out; - u8 dontuse14[15]; - u8 p2_out; - u8 dontuse15[15]; - u8 serial_reset_addr; - }; - }; -} __packed; - -/* Control Register (0x00) */ -#define CTRL_INI 0x01 /* Initialization */ -#define CTRL_IE 0x02 /* Interrupt Enable */ -#define CTRL_SIE 0x04 /* Status Interrupt Enable */ -#define CTRL_EIE 0x08 /* Error Interrupt Enable */ -#define CTRL_EAF 0x20 /* Enable additional functions */ -#define CTRL_CCE 0x40 /* Change Configuration Enable */ - -/* Status Register (0x01) */ -#define STAT_LEC_STUFF 0x01 /* Stuff error */ -#define STAT_LEC_FORM 0x02 /* Form error */ -#define STAT_LEC_ACK 0x03 /* Acknowledgement error */ -#define STAT_LEC_BIT1 0x04 /* Bit1 error */ -#define STAT_LEC_BIT0 0x05 /* Bit0 error */ -#define STAT_LEC_CRC 0x06 /* CRC error */ -#define STAT_LEC_MASK 0x07 /* Last Error Code mask */ -#define STAT_TXOK 0x08 /* Transmit Message Successfully */ -#define STAT_RXOK 0x10 /* Receive Message Successfully */ -#define STAT_WAKE 0x20 /* Wake Up Status */ -#define STAT_WARN 0x40 /* Warning Status */ -#define STAT_BOFF 0x80 /* Bus Off Status */ - -/* - * CPU Interface Register (0x02) - * Clock Out Register (0x1f) - * Bus Configuration Register (0x2f) - * - * see include/linux/can/platform/cc770.h - */ - -/* Message Control Register 0 (Base Address + 0x0) */ -#define INTPND_RES 0x01 /* No Interrupt pending */ -#define INTPND_SET 0x02 /* Interrupt pending */ -#define INTPND_UNC 0x03 -#define RXIE_RES 0x04 /* Receive Interrupt Disable */ -#define RXIE_SET 0x08 /* Receive Interrupt Enable */ -#define RXIE_UNC 0x0c -#define TXIE_RES 0x10 /* Transmit Interrupt Disable */ -#define TXIE_SET 0x20 /* Transmit Interrupt Enable */ -#define TXIE_UNC 0x30 -#define MSGVAL_RES 0x40 /* Message Invalid */ -#define MSGVAL_SET 0x80 /* Message Valid */ -#define MSGVAL_UNC 0xc0 - -/* Message Control Register 1 (Base Address + 0x01) */ -#define NEWDAT_RES 0x01 /* No New Data */ -#define NEWDAT_SET 0x02 /* New Data */ -#define NEWDAT_UNC 0x03 -#define MSGLST_RES 0x04 /* No Message Lost */ -#define MSGLST_SET 0x08 /* Message Lost */ -#define MSGLST_UNC 0x0c -#define CPUUPD_RES 0x04 /* No CPU Updating */ -#define CPUUPD_SET 0x08 /* CPU Updating */ -#define CPUUPD_UNC 0x0c -#define TXRQST_RES 0x10 /* No Transmission Request */ -#define TXRQST_SET 0x20 /* Transmission Request */ -#define TXRQST_UNC 0x30 -#define RMTPND_RES 0x40 /* No Remote Request Pending */ -#define RMTPND_SET 0x80 /* Remote Request Pending */ -#define RMTPND_UNC 0xc0 - -/* Message Configuration Register (Base Address + 0x06) */ -#define MSGCFG_XTD 0x04 /* Extended Identifier */ -#define MSGCFG_DIR 0x08 /* Direction is Transmit */ - -#define MSGOBJ_FIRST 1 -#define MSGOBJ_LAST 15 - -#define CC770_IO_SIZE 0x100 -#define CC770_MAX_IRQ 20 /* max. number of interrupts handled in ISR */ -#define CC770_MAX_MSG 4 /* max. number of messages handled in ISR */ - -#define CC770_ECHO_SKB_MAX 1 - -#define cc770_read_reg(priv, member) \ - priv->read_reg(priv, offsetof(struct cc770_regs, member)) - -#define cc770_write_reg(priv, member, value) \ - priv->write_reg(priv, offsetof(struct cc770_regs, member), value) - -/* - * Message objects and flags used by this driver - */ -#define CC770_OBJ_FLAG_RX 0x01 -#define CC770_OBJ_FLAG_RTR 0x02 -#define CC770_OBJ_FLAG_EFF 0x04 - -enum { - CC770_OBJ_RX0 = 0, /* for receiving normal messages */ - CC770_OBJ_RX1, /* for receiving normal messages */ - CC770_OBJ_RX_RTR0, /* for receiving remote transmission requests */ - CC770_OBJ_RX_RTR1, /* for receiving remote transmission requests */ - CC770_OBJ_TX, /* for sending messages */ - CC770_OBJ_MAX -}; - -#define obj2msgobj(o) (MSGOBJ_LAST - (o)) /* message object 11..15 */ - -/* - * CC770 private data structure - */ -struct cc770_priv { - struct can_priv can; /* must be the first member */ - struct sk_buff *echo_skb; - - /* the lower-layer is responsible for appropriate locking */ - u8 (*read_reg)(const struct cc770_priv *priv, int reg); - void (*write_reg)(const struct cc770_priv *priv, int reg, u8 val); - void (*pre_irq)(const struct cc770_priv *priv); - void (*post_irq)(const struct cc770_priv *priv); - - void *priv; /* for board-specific data */ - struct net_device *dev; - - void __iomem *reg_base; /* ioremap'ed address to registers */ - unsigned long irq_flags; /* for request_irq() */ - - unsigned char obj_flags[CC770_OBJ_MAX]; - u8 control_normal_mode; /* Control register for normal mode */ - u8 cpu_interface; /* CPU interface register */ - u8 clkout; /* Clock out register */ - u8 bus_config; /* Bus conffiguration register */ -}; - -struct net_device *alloc_cc770dev(int sizeof_priv); -void free_cc770dev(struct net_device *dev); -int register_cc770dev(struct net_device *dev); -void unregister_cc770dev(struct net_device *dev); - -#endif /* CC770_DEV_H */ diff --git a/trunk/drivers/net/can/cc770/cc770_isa.c b/trunk/drivers/net/can/cc770/cc770_isa.c deleted file mode 100644 index 4be5fe2c40a5..000000000000 --- a/trunk/drivers/net/can/cc770/cc770_isa.c +++ /dev/null @@ -1,367 +0,0 @@ -/* - * Driver for CC770 and AN82527 CAN controllers on the legacy ISA bus - * - * Copyright (C) 2009, 2011 Wolfgang Grandegger - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the version 2 of the GNU General Public License - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -/* - * Bosch CC770 and Intel AN82527 CAN controllers on the ISA or PC-104 bus. - * The I/O port or memory address and the IRQ number must be specified via - * module parameters: - * - * insmod cc770_isa.ko port=0x310,0x380 irq=7,11 - * - * for ISA devices using I/O ports or: - * - * insmod cc770_isa.ko mem=0xd1000,0xd1000 irq=7,11 - * - * for memory mapped ISA devices. - * - * Indirect access via address and data port is supported as well: - * - * insmod cc770_isa.ko port=0x310,0x380 indirect=1 irq=7,11 - * - * Furthermore, the following mode parameter can be defined: - * - * clk: External oscillator clock frequency (default=16000000 [16 MHz]) - * cir: CPU interface register (default=0x40 [DSC]) - * bcr: Bus configuration register (default=0x40 [CBY]) - * cor: Clockout register (default=0x00) - * - * Note: for clk, cir, bcr and cor, the first argument re-defines the - * default for all other devices, e.g.: - * - * insmod cc770_isa.ko mem=0xd1000,0xd1000 irq=7,11 clk=24000000 - * - * is equivalent to - * - * insmod cc770_isa.ko mem=0xd1000,0xd1000 irq=7,11 clk=24000000,24000000 - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "cc770.h" - -#define MAXDEV 8 - -MODULE_AUTHOR("Wolfgang Grandegger "); -MODULE_DESCRIPTION("Socket-CAN driver for CC770 on the ISA bus"); -MODULE_LICENSE("GPL v2"); - -#define CLK_DEFAULT 16000000 /* 16 MHz */ -#define COR_DEFAULT 0x00 -#define BCR_DEFAULT BUSCFG_CBY - -static unsigned long port[MAXDEV]; -static unsigned long mem[MAXDEV]; -static int __devinitdata irq[MAXDEV]; -static int __devinitdata clk[MAXDEV]; -static u8 __devinitdata cir[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; -static u8 __devinitdata cor[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; -static u8 __devinitdata bcr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; -static int __devinitdata indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1}; - -module_param_array(port, ulong, NULL, S_IRUGO); -MODULE_PARM_DESC(port, "I/O port number"); - -module_param_array(mem, ulong, NULL, S_IRUGO); -MODULE_PARM_DESC(mem, "I/O memory address"); - -module_param_array(indirect, int, NULL, S_IRUGO); -MODULE_PARM_DESC(indirect, "Indirect access via address and data port"); - -module_param_array(irq, int, NULL, S_IRUGO); -MODULE_PARM_DESC(irq, "IRQ number"); - -module_param_array(clk, int, NULL, S_IRUGO); -MODULE_PARM_DESC(clk, "External oscillator clock frequency " - "(default=16000000 [16 MHz])"); - -module_param_array(cir, byte, NULL, S_IRUGO); -MODULE_PARM_DESC(cir, "CPU interface register (default=0x40 [DSC])"); - -module_param_array(cor, byte, NULL, S_IRUGO); -MODULE_PARM_DESC(cor, "Clockout register (default=0x00)"); - -module_param_array(bcr, byte, NULL, S_IRUGO); -MODULE_PARM_DESC(bcr, "Bus configuration register (default=0x40 [CBY])"); - -#define CC770_IOSIZE 0x20 -#define CC770_IOSIZE_INDIRECT 0x02 - -static struct platform_device *cc770_isa_devs[MAXDEV]; - -static u8 cc770_isa_mem_read_reg(const struct cc770_priv *priv, int reg) -{ - return readb(priv->reg_base + reg); -} - -static void cc770_isa_mem_write_reg(const struct cc770_priv *priv, - int reg, u8 val) -{ - writeb(val, priv->reg_base + reg); -} - -static u8 cc770_isa_port_read_reg(const struct cc770_priv *priv, int reg) -{ - return inb((unsigned long)priv->reg_base + reg); -} - -static void cc770_isa_port_write_reg(const struct cc770_priv *priv, - int reg, u8 val) -{ - outb(val, (unsigned long)priv->reg_base + reg); -} - -static u8 cc770_isa_port_read_reg_indirect(const struct cc770_priv *priv, - int reg) -{ - unsigned long base = (unsigned long)priv->reg_base; - - outb(reg, base); - return inb(base + 1); -} - -static void cc770_isa_port_write_reg_indirect(const struct cc770_priv *priv, - int reg, u8 val) -{ - unsigned long base = (unsigned long)priv->reg_base; - - outb(reg, base); - outb(val, base + 1); -} - -static int __devinit cc770_isa_probe(struct platform_device *pdev) -{ - struct net_device *dev; - struct cc770_priv *priv; - void __iomem *base = NULL; - int iosize = CC770_IOSIZE; - int idx = pdev->id; - int err; - u32 clktmp; - - dev_dbg(&pdev->dev, "probing idx=%d: port=%#lx, mem=%#lx, irq=%d\n", - idx, port[idx], mem[idx], irq[idx]); - if (mem[idx]) { - if (!request_mem_region(mem[idx], iosize, KBUILD_MODNAME)) { - err = -EBUSY; - goto exit; - } - base = ioremap_nocache(mem[idx], iosize); - if (!base) { - err = -ENOMEM; - goto exit_release; - } - } else { - if (indirect[idx] > 0 || - (indirect[idx] == -1 && indirect[0] > 0)) - iosize = CC770_IOSIZE_INDIRECT; - if (!request_region(port[idx], iosize, KBUILD_MODNAME)) { - err = -EBUSY; - goto exit; - } - } - - dev = alloc_cc770dev(0); - if (!dev) { - err = -ENOMEM; - goto exit_unmap; - } - priv = netdev_priv(dev); - - dev->irq = irq[idx]; - priv->irq_flags = IRQF_SHARED; - if (mem[idx]) { - priv->reg_base = base; - dev->base_addr = mem[idx]; - priv->read_reg = cc770_isa_mem_read_reg; - priv->write_reg = cc770_isa_mem_write_reg; - } else { - priv->reg_base = (void __iomem *)port[idx]; - dev->base_addr = port[idx]; - - if (iosize == CC770_IOSIZE_INDIRECT) { - priv->read_reg = cc770_isa_port_read_reg_indirect; - priv->write_reg = cc770_isa_port_write_reg_indirect; - } else { - priv->read_reg = cc770_isa_port_read_reg; - priv->write_reg = cc770_isa_port_write_reg; - } - } - - if (clk[idx]) - clktmp = clk[idx]; - else if (clk[0]) - clktmp = clk[0]; - else - clktmp = CLK_DEFAULT; - priv->can.clock.freq = clktmp; - - if (cir[idx] != 0xff) { - priv->cpu_interface = cir[idx]; - } else if (cir[0] != 0xff) { - priv->cpu_interface = cir[0]; - } else { - /* The system clock may not exceed 10 MHz */ - if (clktmp > 10000000) { - priv->cpu_interface |= CPUIF_DSC; - clktmp /= 2; - } - /* The memory clock may not exceed 8 MHz */ - if (clktmp > 8000000) - priv->cpu_interface |= CPUIF_DMC; - } - - if (priv->cpu_interface & CPUIF_DSC) - priv->can.clock.freq /= 2; - - if (bcr[idx] != 0xff) - priv->bus_config = bcr[idx]; - else if (bcr[0] != 0xff) - priv->bus_config = bcr[0]; - else - priv->bus_config = BCR_DEFAULT; - - if (cor[idx] != 0xff) - priv->clkout = cor[idx]; - else if (cor[0] != 0xff) - priv->clkout = cor[0]; - else - priv->clkout = COR_DEFAULT; - - dev_set_drvdata(&pdev->dev, dev); - SET_NETDEV_DEV(dev, &pdev->dev); - - err = register_cc770dev(dev); - if (err) { - dev_err(&pdev->dev, - "couldn't register device (err=%d)\n", err); - goto exit_unmap; - } - - dev_info(&pdev->dev, "device registered (reg_base=0x%p, irq=%d)\n", - priv->reg_base, dev->irq); - return 0; - - exit_unmap: - if (mem[idx]) - iounmap(base); - exit_release: - if (mem[idx]) - release_mem_region(mem[idx], iosize); - else - release_region(port[idx], iosize); - exit: - return err; -} - -static int __devexit cc770_isa_remove(struct platform_device *pdev) -{ - struct net_device *dev = dev_get_drvdata(&pdev->dev); - struct cc770_priv *priv = netdev_priv(dev); - int idx = pdev->id; - - unregister_cc770dev(dev); - dev_set_drvdata(&pdev->dev, NULL); - - if (mem[idx]) { - iounmap(priv->reg_base); - release_mem_region(mem[idx], CC770_IOSIZE); - } else { - if (priv->read_reg == cc770_isa_port_read_reg_indirect) - release_region(port[idx], CC770_IOSIZE_INDIRECT); - else - release_region(port[idx], CC770_IOSIZE); - } - free_cc770dev(dev); - - return 0; -} - -static struct platform_driver cc770_isa_driver = { - .probe = cc770_isa_probe, - .remove = __devexit_p(cc770_isa_remove), - .driver = { - .name = KBUILD_MODNAME, - .owner = THIS_MODULE, - }, -}; - -static int __init cc770_isa_init(void) -{ - int idx, err; - - for (idx = 0; idx < ARRAY_SIZE(cc770_isa_devs); idx++) { - if ((port[idx] || mem[idx]) && irq[idx]) { - cc770_isa_devs[idx] = - platform_device_alloc(KBUILD_MODNAME, idx); - if (!cc770_isa_devs[idx]) { - err = -ENOMEM; - goto exit_free_devices; - } - err = platform_device_add(cc770_isa_devs[idx]); - if (err) { - platform_device_put(cc770_isa_devs[idx]); - goto exit_free_devices; - } - pr_debug("platform device %d: port=%#lx, mem=%#lx, " - "irq=%d\n", - idx, port[idx], mem[idx], irq[idx]); - } else if (idx == 0 || port[idx] || mem[idx]) { - pr_err("insufficient parameters supplied\n"); - err = -EINVAL; - goto exit_free_devices; - } - } - - err = platform_driver_register(&cc770_isa_driver); - if (err) - goto exit_free_devices; - - pr_info("driver for max. %d devices registered\n", MAXDEV); - - return 0; - -exit_free_devices: - while (--idx >= 0) { - if (cc770_isa_devs[idx]) - platform_device_unregister(cc770_isa_devs[idx]); - } - - return err; -} -module_init(cc770_isa_init); - -static void __exit cc770_isa_exit(void) -{ - int idx; - - platform_driver_unregister(&cc770_isa_driver); - for (idx = 0; idx < ARRAY_SIZE(cc770_isa_devs); idx++) { - if (cc770_isa_devs[idx]) - platform_device_unregister(cc770_isa_devs[idx]); - } -} -module_exit(cc770_isa_exit); diff --git a/trunk/drivers/net/can/cc770/cc770_platform.c b/trunk/drivers/net/can/cc770/cc770_platform.c deleted file mode 100644 index 53115eee8075..000000000000 --- a/trunk/drivers/net/can/cc770/cc770_platform.c +++ /dev/null @@ -1,272 +0,0 @@ -/* - * Driver for CC770 and AN82527 CAN controllers on the platform bus - * - * Copyright (C) 2009, 2011 Wolfgang Grandegger - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the version 2 of the GNU General Public License - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -/* - * If platform data are used you should have similar definitions - * in your board-specific code: - * - * static struct cc770_platform_data myboard_cc770_pdata = { - * .osc_freq = 16000000, - * .cir = 0x41, - * .cor = 0x20, - * .bcr = 0x40, - * }; - * - * Please see include/linux/can/platform/cc770.h for description of - * above fields. - * - * If the device tree is used, you need a CAN node definition in your - * DTS file similar to: - * - * can@3,100 { - * compatible = "bosch,cc770"; - * reg = <3 0x100 0x80>; - * interrupts = <2 0>; - * interrupt-parent = <&mpic>; - * bosch,external-clock-frequency = <16000000>; - * }; - * - * See "Documentation/devicetree/bindings/net/can/cc770.txt" for further - * information. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "cc770.h" - -#define DRV_NAME "cc770_platform" - -MODULE_AUTHOR("Wolfgang Grandegger "); -MODULE_DESCRIPTION("Socket-CAN driver for CC770 on the platform bus"); -MODULE_LICENSE("GPL v2"); - -#define CC770_PLATFORM_CAN_CLOCK 16000000 - -static u8 cc770_platform_read_reg(const struct cc770_priv *priv, int reg) -{ - return ioread8(priv->reg_base + reg); -} - -static void cc770_platform_write_reg(const struct cc770_priv *priv, int reg, - u8 val) -{ - iowrite8(val, priv->reg_base + reg); -} - -static int __devinit cc770_get_of_node_data(struct platform_device *pdev, - struct cc770_priv *priv) -{ - struct device_node *np = pdev->dev.of_node; - const u32 *prop; - int prop_size; - u32 clkext; - - prop = of_get_property(np, "bosch,external-clock-frequency", - &prop_size); - if (prop && (prop_size == sizeof(u32))) - clkext = *prop; - else - clkext = CC770_PLATFORM_CAN_CLOCK; /* default */ - priv->can.clock.freq = clkext; - - /* The system clock may not exceed 10 MHz */ - if (priv->can.clock.freq > 10000000) { - priv->cpu_interface |= CPUIF_DSC; - priv->can.clock.freq /= 2; - } - - /* The memory clock may not exceed 8 MHz */ - if (priv->can.clock.freq > 8000000) - priv->cpu_interface |= CPUIF_DMC; - - if (of_get_property(np, "bosch,divide-memory-clock", NULL)) - priv->cpu_interface |= CPUIF_DMC; - if (of_get_property(np, "bosch,iso-low-speed-mux", NULL)) - priv->cpu_interface |= CPUIF_MUX; - - if (!of_get_property(np, "bosch,no-comperator-bypass", NULL)) - priv->bus_config |= BUSCFG_CBY; - if (of_get_property(np, "bosch,disconnect-rx0-input", NULL)) - priv->bus_config |= BUSCFG_DR0; - if (of_get_property(np, "bosch,disconnect-rx1-input", NULL)) - priv->bus_config |= BUSCFG_DR1; - if (of_get_property(np, "bosch,disconnect-tx1-output", NULL)) - priv->bus_config |= BUSCFG_DT1; - if (of_get_property(np, "bosch,polarity-dominant", NULL)) - priv->bus_config |= BUSCFG_POL; - - prop = of_get_property(np, "bosch,clock-out-frequency", &prop_size); - if (prop && (prop_size == sizeof(u32)) && *prop > 0) { - u32 cdv = clkext / *prop; - int slew; - - if (cdv > 0 && cdv < 16) { - priv->cpu_interface |= CPUIF_CEN; - priv->clkout |= (cdv - 1) & CLKOUT_CD_MASK; - - prop = of_get_property(np, "bosch,slew-rate", - &prop_size); - if (prop && (prop_size == sizeof(u32))) { - slew = *prop; - } else { - /* Determine default slew rate */ - slew = (CLKOUT_SL_MASK >> - CLKOUT_SL_SHIFT) - - ((cdv * clkext - 1) / 8000000); - if (slew < 0) - slew = 0; - } - priv->clkout |= (slew << CLKOUT_SL_SHIFT) & - CLKOUT_SL_MASK; - } else { - dev_dbg(&pdev->dev, "invalid clock-out-frequency\n"); - } - } - - return 0; -} - -static int __devinit cc770_get_platform_data(struct platform_device *pdev, - struct cc770_priv *priv) -{ - - struct cc770_platform_data *pdata = pdev->dev.platform_data; - - priv->can.clock.freq = pdata->osc_freq; - if (priv->cpu_interface | CPUIF_DSC) - priv->can.clock.freq /= 2; - priv->clkout = pdata->cor; - priv->bus_config = pdata->bcr; - priv->cpu_interface = pdata->cir; - - return 0; -} - -static int __devinit cc770_platform_probe(struct platform_device *pdev) -{ - struct net_device *dev; - struct cc770_priv *priv; - struct resource *mem; - resource_size_t mem_size; - void __iomem *base; - int err, irq; - - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - irq = platform_get_irq(pdev, 0); - if (!mem || irq <= 0) - return -ENODEV; - - mem_size = resource_size(mem); - if (!request_mem_region(mem->start, mem_size, pdev->name)) - return -EBUSY; - - base = ioremap(mem->start, mem_size); - if (!base) { - err = -ENOMEM; - goto exit_release_mem; - } - - dev = alloc_cc770dev(0); - if (!dev) { - err = -ENOMEM; - goto exit_unmap_mem; - } - - dev->irq = irq; - priv = netdev_priv(dev); - priv->read_reg = cc770_platform_read_reg; - priv->write_reg = cc770_platform_write_reg; - priv->irq_flags = IRQF_SHARED; - priv->reg_base = base; - - if (pdev->dev.of_node) - err = cc770_get_of_node_data(pdev, priv); - else if (pdev->dev.platform_data) - err = cc770_get_platform_data(pdev, priv); - else - err = -ENODEV; - if (err) - goto exit_free_cc770; - - dev_dbg(&pdev->dev, - "reg_base=0x%p irq=%d clock=%d cpu_interface=0x%02x " - "bus_config=0x%02x clkout=0x%02x\n", - priv->reg_base, dev->irq, priv->can.clock.freq, - priv->cpu_interface, priv->bus_config, priv->clkout); - - dev_set_drvdata(&pdev->dev, dev); - SET_NETDEV_DEV(dev, &pdev->dev); - - err = register_cc770dev(dev); - if (err) { - dev_err(&pdev->dev, - "couldn't register CC700 device (err=%d)\n", err); - goto exit_free_cc770; - } - - return 0; - -exit_free_cc770: - free_cc770dev(dev); -exit_unmap_mem: - iounmap(base); -exit_release_mem: - release_mem_region(mem->start, mem_size); - - return err; -} - -static int __devexit cc770_platform_remove(struct platform_device *pdev) -{ - struct net_device *dev = dev_get_drvdata(&pdev->dev); - struct cc770_priv *priv = netdev_priv(dev); - struct resource *mem; - - unregister_cc770dev(dev); - iounmap(priv->reg_base); - free_cc770dev(dev); - - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(mem->start, resource_size(mem)); - - return 0; -} - -static struct of_device_id __devinitdata cc770_platform_table[] = { - {.compatible = "bosch,cc770"}, /* CC770 from Bosch */ - {.compatible = "intc,82527"}, /* AN82527 from Intel CP */ - {}, -}; - -static struct platform_driver cc770_platform_driver = { - .driver = { - .name = DRV_NAME, - .owner = THIS_MODULE, - .of_match_table = cc770_platform_table, - }, - .probe = cc770_platform_probe, - .remove = __devexit_p(cc770_platform_remove), -}; - -module_platform_driver(cc770_platform_driver); diff --git a/trunk/drivers/net/can/dev.c b/trunk/drivers/net/can/dev.c index 120f1ab5a2ce..25695bde0549 100644 --- a/trunk/drivers/net/can/dev.c +++ b/trunk/drivers/net/can/dev.c @@ -454,7 +454,7 @@ static void can_setup(struct net_device *dev) /* New-style flags. */ dev->flags = IFF_NOARP; - dev->features = NETIF_F_HW_CSUM; + dev->features = NETIF_F_NO_CSUM; } struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf) diff --git a/trunk/drivers/net/can/flexcan.c b/trunk/drivers/net/can/flexcan.c index 165a4c798025..e02337953f41 100644 --- a/trunk/drivers/net/can/flexcan.c +++ b/trunk/drivers/net/can/flexcan.c @@ -1060,7 +1060,20 @@ static struct platform_driver flexcan_driver = { .remove = __devexit_p(flexcan_remove), }; -module_platform_driver(flexcan_driver); +static int __init flexcan_init(void) +{ + pr_info("%s netdevice driver\n", DRV_NAME); + return platform_driver_register(&flexcan_driver); +} + +static void __exit flexcan_exit(void) +{ + platform_driver_unregister(&flexcan_driver); + pr_info("%s: driver removed\n", DRV_NAME); +} + +module_init(flexcan_init); +module_exit(flexcan_exit); MODULE_AUTHOR("Sascha Hauer , " "Marc Kleine-Budde "); diff --git a/trunk/drivers/net/can/janz-ican3.c b/trunk/drivers/net/can/janz-ican3.c index 08c893cb7896..32778d56d330 100644 --- a/trunk/drivers/net/can/janz-ican3.c +++ b/trunk/drivers/net/can/janz-ican3.c @@ -1803,9 +1803,20 @@ static struct platform_driver ican3_driver = { .remove = __devexit_p(ican3_remove), }; -module_platform_driver(ican3_driver); +static int __init ican3_init(void) +{ + return platform_driver_register(&ican3_driver); +} + +static void __exit ican3_exit(void) +{ + platform_driver_unregister(&ican3_driver); +} MODULE_AUTHOR("Ira W. Snyder "); MODULE_DESCRIPTION("Janz MODULbus VMOD-ICAN3 Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:janz-ican3"); + +module_init(ican3_init); +module_exit(ican3_exit); diff --git a/trunk/drivers/net/can/mscan/mpc5xxx_can.c b/trunk/drivers/net/can/mscan/mpc5xxx_can.c index 5caa572d71e3..5fedc3375562 100644 --- a/trunk/drivers/net/can/mscan/mpc5xxx_can.c +++ b/trunk/drivers/net/can/mscan/mpc5xxx_can.c @@ -411,7 +411,17 @@ static struct platform_driver mpc5xxx_can_driver = { #endif }; -module_platform_driver(mpc5xxx_can_driver); +static int __init mpc5xxx_can_init(void) +{ + return platform_driver_register(&mpc5xxx_can_driver); +} +module_init(mpc5xxx_can_init); + +static void __exit mpc5xxx_can_exit(void) +{ + platform_driver_unregister(&mpc5xxx_can_driver); +}; +module_exit(mpc5xxx_can_exit); MODULE_AUTHOR("Wolfgang Grandegger "); MODULE_DESCRIPTION("Freescale MPC5xxx CAN driver"); diff --git a/trunk/drivers/net/can/mscan/mscan.c b/trunk/drivers/net/can/mscan/mscan.c index 1c82dd8b896e..ec4a3119e2c9 100644 --- a/trunk/drivers/net/can/mscan/mscan.c +++ b/trunk/drivers/net/can/mscan/mscan.c @@ -581,10 +581,7 @@ static int mscan_open(struct net_device *dev) priv->open_time = jiffies; - if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) - setbits8(®s->canctl1, MSCAN_LISTEN); - else - clrbits8(®s->canctl1, MSCAN_LISTEN); + clrbits8(®s->canctl1, MSCAN_LISTEN); ret = mscan_start(dev); if (ret) @@ -693,8 +690,7 @@ struct net_device *alloc_mscandev(void) priv->can.bittiming_const = &mscan_bittiming_const; priv->can.do_set_bittiming = mscan_do_set_bittiming; priv->can.do_set_mode = mscan_do_set_mode; - priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | - CAN_CTRLMODE_LISTENONLY; + priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; for (i = 0; i < TX_QUEUE_SIZE; i++) { priv->tx_queue[i].id = i; diff --git a/trunk/drivers/net/can/sja1000/Kconfig b/trunk/drivers/net/can/sja1000/Kconfig index 36e9d594069d..fe9e64d476eb 100644 --- a/trunk/drivers/net/can/sja1000/Kconfig +++ b/trunk/drivers/net/can/sja1000/Kconfig @@ -6,6 +6,7 @@ if CAN_SJA1000 config CAN_SJA1000_ISA tristate "ISA Bus based legacy SJA1000 driver" + depends on ISA ---help--- This driver adds legacy support for SJA1000 chips connected to the ISA bus using I/O port, memory mapped or indirect access. diff --git a/trunk/drivers/net/can/sja1000/sja1000_isa.c b/trunk/drivers/net/can/sja1000/sja1000_isa.c index 90c5c2dfd2fd..496223e9e2fc 100644 --- a/trunk/drivers/net/can/sja1000/sja1000_isa.c +++ b/trunk/drivers/net/can/sja1000/sja1000_isa.c @@ -17,7 +17,7 @@ #include #include -#include +#include #include #include #include @@ -44,9 +44,9 @@ static unsigned long port[MAXDEV]; static unsigned long mem[MAXDEV]; static int __devinitdata irq[MAXDEV]; static int __devinitdata clk[MAXDEV]; -static unsigned char __devinitdata cdr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; -static unsigned char __devinitdata ocr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; -static int __devinitdata indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1}; +static char __devinitdata cdr[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1}; +static char __devinitdata ocr[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1}; +static char __devinitdata indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1}; module_param_array(port, ulong, NULL, S_IRUGO); MODULE_PARM_DESC(port, "I/O port number"); @@ -54,7 +54,7 @@ MODULE_PARM_DESC(port, "I/O port number"); module_param_array(mem, ulong, NULL, S_IRUGO); MODULE_PARM_DESC(mem, "I/O memory address"); -module_param_array(indirect, int, NULL, S_IRUGO); +module_param_array(indirect, byte, NULL, S_IRUGO); MODULE_PARM_DESC(indirect, "Indirect access via address and data port"); module_param_array(irq, int, NULL, S_IRUGO); @@ -75,8 +75,6 @@ MODULE_PARM_DESC(ocr, "Output control register " #define SJA1000_IOSIZE 0x20 #define SJA1000_IOSIZE_INDIRECT 0x02 -static struct platform_device *sja1000_isa_devs[MAXDEV]; - static u8 sja1000_isa_mem_read_reg(const struct sja1000_priv *priv, int reg) { return readb(priv->reg_base + reg); @@ -117,18 +115,26 @@ static void sja1000_isa_port_write_reg_indirect(const struct sja1000_priv *priv, outb(val, base + 1); } -static int __devinit sja1000_isa_probe(struct platform_device *pdev) +static int __devinit sja1000_isa_match(struct device *pdev, unsigned int idx) +{ + if (port[idx] || mem[idx]) { + if (irq[idx]) + return 1; + } else if (idx) + return 0; + + dev_err(pdev, "insufficient parameters supplied\n"); + return 0; +} + +static int __devinit sja1000_isa_probe(struct device *pdev, unsigned int idx) { struct net_device *dev; struct sja1000_priv *priv; void __iomem *base = NULL; int iosize = SJA1000_IOSIZE; - int idx = pdev->id; int err; - dev_dbg(&pdev->dev, "probing idx=%d: port=%#lx, mem=%#lx, irq=%d\n", - idx, port[idx], mem[idx], irq[idx]); - if (mem[idx]) { if (!request_mem_region(mem[idx], iosize, DRV_NAME)) { err = -EBUSY; @@ -183,31 +189,31 @@ static int __devinit sja1000_isa_probe(struct platform_device *pdev) else priv->can.clock.freq = CLK_DEFAULT / 2; - if (ocr[idx] != 0xff) - priv->ocr = ocr[idx]; - else if (ocr[0] != 0xff) - priv->ocr = ocr[0]; + if (ocr[idx] != -1) + priv->ocr = ocr[idx] & 0xff; + else if (ocr[0] != -1) + priv->ocr = ocr[0] & 0xff; else priv->ocr = OCR_DEFAULT; - if (cdr[idx] != 0xff) - priv->cdr = cdr[idx]; - else if (cdr[0] != 0xff) - priv->cdr = cdr[0]; + if (cdr[idx] != -1) + priv->cdr = cdr[idx] & 0xff; + else if (cdr[0] != -1) + priv->cdr = cdr[0] & 0xff; else priv->cdr = CDR_DEFAULT; - dev_set_drvdata(&pdev->dev, dev); - SET_NETDEV_DEV(dev, &pdev->dev); + dev_set_drvdata(pdev, dev); + SET_NETDEV_DEV(dev, pdev); err = register_sja1000dev(dev); if (err) { - dev_err(&pdev->dev, "registering %s failed (err=%d)\n", + dev_err(pdev, "registering %s failed (err=%d)\n", DRV_NAME, err); goto exit_unmap; } - dev_info(&pdev->dev, "%s device registered (reg_base=0x%p, irq=%d)\n", + dev_info(pdev, "%s device registered (reg_base=0x%p, irq=%d)\n", DRV_NAME, priv->reg_base, dev->irq); return 0; @@ -223,14 +229,13 @@ static int __devinit sja1000_isa_probe(struct platform_device *pdev) return err; } -static int __devexit sja1000_isa_remove(struct platform_device *pdev) +static int __devexit sja1000_isa_remove(struct device *pdev, unsigned int idx) { - struct net_device *dev = dev_get_drvdata(&pdev->dev); + struct net_device *dev = dev_get_drvdata(pdev); struct sja1000_priv *priv = netdev_priv(dev); - int idx = pdev->id; unregister_sja1000dev(dev); - dev_set_drvdata(&pdev->dev, NULL); + dev_set_drvdata(pdev, NULL); if (mem[idx]) { iounmap(priv->reg_base); @@ -246,70 +251,29 @@ static int __devexit sja1000_isa_remove(struct platform_device *pdev) return 0; } -static struct platform_driver sja1000_isa_driver = { +static struct isa_driver sja1000_isa_driver = { + .match = sja1000_isa_match, .probe = sja1000_isa_probe, .remove = __devexit_p(sja1000_isa_remove), .driver = { .name = DRV_NAME, - .owner = THIS_MODULE, }, }; static int __init sja1000_isa_init(void) { - int idx, err; - - for (idx = 0; idx < MAXDEV; idx++) { - if ((port[idx] || mem[idx]) && irq[idx]) { - sja1000_isa_devs[idx] = - platform_device_alloc(DRV_NAME, idx); - if (!sja1000_isa_devs[idx]) { - err = -ENOMEM; - goto exit_free_devices; - } - err = platform_device_add(sja1000_isa_devs[idx]); - if (err) { - platform_device_put(sja1000_isa_devs[idx]); - goto exit_free_devices; - } - pr_debug("%s: platform device %d: port=%#lx, mem=%#lx, " - "irq=%d\n", - DRV_NAME, idx, port[idx], mem[idx], irq[idx]); - } else if (idx == 0 || port[idx] || mem[idx]) { - pr_err("%s: insufficient parameters supplied\n", - DRV_NAME); - err = -EINVAL; - goto exit_free_devices; - } - } - - err = platform_driver_register(&sja1000_isa_driver); - if (err) - goto exit_free_devices; - - pr_info("Legacy %s driver for max. %d devices registered\n", - DRV_NAME, MAXDEV); - - return 0; - -exit_free_devices: - while (--idx >= 0) { - if (sja1000_isa_devs[idx]) - platform_device_unregister(sja1000_isa_devs[idx]); - } + int err = isa_register_driver(&sja1000_isa_driver, MAXDEV); + if (!err) + printk(KERN_INFO + "Legacy %s driver for max. %d devices registered\n", + DRV_NAME, MAXDEV); return err; } static void __exit sja1000_isa_exit(void) { - int idx; - - platform_driver_unregister(&sja1000_isa_driver); - for (idx = 0; idx < MAXDEV; idx++) { - if (sja1000_isa_devs[idx]) - platform_device_unregister(sja1000_isa_devs[idx]); - } + isa_unregister_driver(&sja1000_isa_driver); } module_init(sja1000_isa_init); diff --git a/trunk/drivers/net/can/sja1000/sja1000_of_platform.c b/trunk/drivers/net/can/sja1000/sja1000_of_platform.c index f2683eb6a3d5..c3dd9d09be57 100644 --- a/trunk/drivers/net/can/sja1000/sja1000_of_platform.c +++ b/trunk/drivers/net/can/sja1000/sja1000_of_platform.c @@ -220,4 +220,14 @@ static struct platform_driver sja1000_ofp_driver = { .remove = __devexit_p(sja1000_ofp_remove), }; -module_platform_driver(sja1000_ofp_driver); +static int __init sja1000_ofp_init(void) +{ + return platform_driver_register(&sja1000_ofp_driver); +} +module_init(sja1000_ofp_init); + +static void __exit sja1000_ofp_exit(void) +{ + return platform_driver_unregister(&sja1000_ofp_driver); +}; +module_exit(sja1000_ofp_exit); diff --git a/trunk/drivers/net/can/sja1000/sja1000_platform.c b/trunk/drivers/net/can/sja1000/sja1000_platform.c index 4f50145f6483..d9fadc489b32 100644 --- a/trunk/drivers/net/can/sja1000/sja1000_platform.c +++ b/trunk/drivers/net/can/sja1000/sja1000_platform.c @@ -185,4 +185,15 @@ static struct platform_driver sp_driver = { }, }; -module_platform_driver(sp_driver); +static int __init sp_init(void) +{ + return platform_driver_register(&sp_driver); +} + +static void __exit sp_exit(void) +{ + platform_driver_unregister(&sp_driver); +} + +module_init(sp_init); +module_exit(sp_exit); diff --git a/trunk/drivers/net/can/slcan.c b/trunk/drivers/net/can/slcan.c index 3f1ebcc2cb83..a979b006f459 100644 --- a/trunk/drivers/net/can/slcan.c +++ b/trunk/drivers/net/can/slcan.c @@ -387,7 +387,7 @@ static void slc_setup(struct net_device *dev) /* New-style flags. */ dev->flags = IFF_NOARP; - dev->features = NETIF_F_HW_CSUM; + dev->features = NETIF_F_NO_CSUM; } /****************************************** diff --git a/trunk/drivers/net/can/softing/softing_main.c b/trunk/drivers/net/can/softing/softing_main.c index a7c77c744ee9..09a8b86cf1ac 100644 --- a/trunk/drivers/net/can/softing/softing_main.c +++ b/trunk/drivers/net/can/softing/softing_main.c @@ -874,9 +874,21 @@ static struct platform_driver softing_driver = { .remove = __devexit_p(softing_pdev_remove), }; -module_platform_driver(softing_driver); - MODULE_ALIAS("platform:softing"); + +static int __init softing_start(void) +{ + return platform_driver_register(&softing_driver); +} + +static void __exit softing_stop(void) +{ + platform_driver_unregister(&softing_driver); +} + +module_init(softing_start); +module_exit(softing_stop); + MODULE_DESCRIPTION("Softing DPRAM CAN driver"); MODULE_AUTHOR("Kurt Van Dijck "); MODULE_LICENSE("GPL v2"); diff --git a/trunk/drivers/net/can/ti_hecc.c b/trunk/drivers/net/can/ti_hecc.c index df809e3f130e..2adc294f512a 100644 --- a/trunk/drivers/net/can/ti_hecc.c +++ b/trunk/drivers/net/can/ti_hecc.c @@ -1037,7 +1037,20 @@ static struct platform_driver ti_hecc_driver = { .resume = ti_hecc_resume, }; -module_platform_driver(ti_hecc_driver); +static int __init ti_hecc_init_driver(void) +{ + printk(KERN_INFO DRV_DESC "\n"); + return platform_driver_register(&ti_hecc_driver); +} + +static void __exit ti_hecc_exit_driver(void) +{ + printk(KERN_INFO DRV_DESC " unloaded\n"); + platform_driver_unregister(&ti_hecc_driver); +} + +module_exit(ti_hecc_exit_driver); +module_init(ti_hecc_init_driver); MODULE_AUTHOR("Anant Gole "); MODULE_LICENSE("GPL v2"); diff --git a/trunk/drivers/net/can/vcan.c b/trunk/drivers/net/can/vcan.c index ea2d94285936..f93e2d6fc88c 100644 --- a/trunk/drivers/net/can/vcan.c +++ b/trunk/drivers/net/can/vcan.c @@ -63,7 +63,7 @@ MODULE_AUTHOR("Urs Thuermann "); * See Documentation/networking/can.txt for details. */ -static bool echo; /* echo testing. Default: 0 (Off) */ +static int echo; /* echo testing. Default: 0 (Off) */ module_param(echo, bool, S_IRUGO); MODULE_PARM_DESC(echo, "Echo sent frames (for testing). Default: 0 (Off)"); diff --git a/trunk/drivers/net/dsa/Kconfig b/trunk/drivers/net/dsa/Kconfig deleted file mode 100644 index dd151d53d506..000000000000 --- a/trunk/drivers/net/dsa/Kconfig +++ /dev/null @@ -1,36 +0,0 @@ -menu "Distributed Switch Architecture drivers" - depends on NET_DSA - -config NET_DSA_MV88E6XXX - tristate - default n - -config NET_DSA_MV88E6060 - tristate "Marvell 88E6060 ethernet switch chip support" - select NET_DSA_TAG_TRAILER - ---help--- - This enables support for the Marvell 88E6060 ethernet switch - chip. - -config NET_DSA_MV88E6XXX_NEED_PPU - bool - default n - -config NET_DSA_MV88E6131 - tristate "Marvell 88E6085/6095/6095F/6131 ethernet switch chip support" - select NET_DSA_MV88E6XXX - select NET_DSA_MV88E6XXX_NEED_PPU - select NET_DSA_TAG_DSA - ---help--- - This enables support for the Marvell 88E6085/6095/6095F/6131 - ethernet switch chips. - -config NET_DSA_MV88E6123_61_65 - tristate "Marvell 88E6123/6161/6165 ethernet switch chip support" - select NET_DSA_MV88E6XXX - select NET_DSA_TAG_EDSA - ---help--- - This enables support for the Marvell 88E6123/6161/6165 - ethernet switch chips. - -endmenu diff --git a/trunk/drivers/net/dsa/Makefile b/trunk/drivers/net/dsa/Makefile deleted file mode 100644 index f3bda05536cc..000000000000 --- a/trunk/drivers/net/dsa/Makefile +++ /dev/null @@ -1,9 +0,0 @@ -obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o -obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx_drv.o -mv88e6xxx_drv-y += mv88e6xxx.o -ifdef CONFIG_NET_DSA_MV88E6123_61_65 -mv88e6xxx_drv-y += mv88e6123_61_65.o -endif -ifdef CONFIG_NET_DSA_MV88E6131 -mv88e6xxx_drv-y += mv88e6131.o -endif diff --git a/trunk/drivers/net/dummy.c b/trunk/drivers/net/dummy.c index 087648ea1edb..a7c5e8831e8c 100644 --- a/trunk/drivers/net/dummy.c +++ b/trunk/drivers/net/dummy.c @@ -134,7 +134,7 @@ static void dummy_setup(struct net_device *dev) dev->flags |= IFF_NOARP; dev->flags &= ~IFF_MULTICAST; dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO; - dev->features |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX; + dev->features |= NETIF_F_NO_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX; random_ether_addr(dev->dev_addr); } diff --git a/trunk/drivers/net/ethernet/3com/3c589_cs.c b/trunk/drivers/net/ethernet/3com/3c589_cs.c index da410f036869..972f80ecc510 100644 --- a/trunk/drivers/net/ethernet/3com/3c589_cs.c +++ b/trunk/drivers/net/ethernet/3com/3c589_cs.c @@ -468,10 +468,9 @@ static void tc589_reset(struct net_device *dev) static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); - snprintf(info->bus_info, sizeof(info->bus_info), - "PCMCIA 0x%lx", dev->base_addr); + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr); } static const struct ethtool_ops netdev_ethtool_ops = { diff --git a/trunk/drivers/net/ethernet/3com/3c59x.c b/trunk/drivers/net/ethernet/3com/3c59x.c index 8153a3e0a1a4..b42c06baba89 100644 --- a/trunk/drivers/net/ethernet/3com/3c59x.c +++ b/trunk/drivers/net/ethernet/3com/3c59x.c @@ -2929,17 +2929,15 @@ static void vortex_get_drvinfo(struct net_device *dev, { struct vortex_private *vp = netdev_priv(dev); - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strcpy(info->driver, DRV_NAME); if (VORTEX_PCI(vp)) { - strlcpy(info->bus_info, pci_name(VORTEX_PCI(vp)), - sizeof(info->bus_info)); + strcpy(info->bus_info, pci_name(VORTEX_PCI(vp))); } else { if (VORTEX_EISA(vp)) - strlcpy(info->bus_info, dev_name(vp->gendev), - sizeof(info->bus_info)); + strcpy(info->bus_info, dev_name(vp->gendev)); else - snprintf(info->bus_info, sizeof(info->bus_info), - "EISA 0x%lx %d", dev->base_addr, dev->irq); + sprintf(info->bus_info, "EISA 0x%lx %d", + dev->base_addr, dev->irq); } } diff --git a/trunk/drivers/net/ethernet/3com/typhoon.c b/trunk/drivers/net/ethernet/3com/typhoon.c index 6d6bc754b1a8..20ea07508ac7 100644 --- a/trunk/drivers/net/ethernet/3com/typhoon.c +++ b/trunk/drivers/net/ethernet/3com/typhoon.c @@ -988,23 +988,21 @@ typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) smp_rmb(); if(tp->card_state == Sleeping) { - strlcpy(info->fw_version, "Sleep image", - sizeof(info->fw_version)); + strcpy(info->fw_version, "Sleep image"); } else { INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS); if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) { - strlcpy(info->fw_version, "Unknown runtime", - sizeof(info->fw_version)); + strcpy(info->fw_version, "Unknown runtime"); } else { u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2); - snprintf(info->fw_version, sizeof(info->fw_version), - "%02x.%03x.%03x", sleep_ver >> 24, - (sleep_ver >> 12) & 0xfff, sleep_ver & 0xfff); + snprintf(info->fw_version, 32, "%02x.%03x.%03x", + sleep_ver >> 24, (sleep_ver >> 12) & 0xfff, + sleep_ver & 0xfff); } } - strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); - strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info)); + strcpy(info->driver, KBUILD_MODNAME); + strcpy(info->bus_info, pci_name(pci_dev)); } static int diff --git a/trunk/drivers/net/ethernet/8390/8390.h b/trunk/drivers/net/ethernet/8390/8390.h index ef325ffa1b5a..58a12e4c78f9 100644 --- a/trunk/drivers/net/ethernet/8390/8390.h +++ b/trunk/drivers/net/ethernet/8390/8390.h @@ -14,6 +14,8 @@ #define TX_PAGES 12 /* Two Tx slots */ +#define ETHER_ADDR_LEN 6 + /* The 8390 specific per-packet-header format. */ struct e8390_pkt_hdr { unsigned char status; /* status */ diff --git a/trunk/drivers/net/ethernet/8390/apne.c b/trunk/drivers/net/ethernet/8390/apne.c index 3ad5d2f9a49c..547737340cbb 100644 --- a/trunk/drivers/net/ethernet/8390/apne.c +++ b/trunk/drivers/net/ethernet/8390/apne.c @@ -318,7 +318,7 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr) i = request_irq(dev->irq, apne_interrupt, IRQF_SHARED, DRV_NAME, dev); if (i) return i; - for (i = 0; i < ETH_ALEN; i++) + for(i = 0; i < ETHER_ADDR_LEN; i++) dev->dev_addr[i] = SA_prom[i]; printk(" %pM\n", dev->dev_addr); diff --git a/trunk/drivers/net/ethernet/8390/ax88796.c b/trunk/drivers/net/ethernet/8390/ax88796.c index 9e8ba4f5636b..e9f8432f55b4 100644 --- a/trunk/drivers/net/ethernet/8390/ax88796.c +++ b/trunk/drivers/net/ethernet/8390/ax88796.c @@ -735,14 +735,15 @@ static int ax_init_dev(struct net_device *dev) if (ax->plat->flags & AXFLG_MAC_FROMDEV) { ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP, ei_local->mem + E8390_CMD); /* 0x61 */ - for (i = 0; i < ETH_ALEN; i++) + for (i = 0; i < ETHER_ADDR_LEN; i++) dev->dev_addr[i] = ei_inb(ioaddr + EN1_PHYS_SHIFT(i)); } if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) && ax->plat->mac_addr) - memcpy(dev->dev_addr, ax->plat->mac_addr, ETH_ALEN); + memcpy(dev->dev_addr, ax->plat->mac_addr, + ETHER_ADDR_LEN); ax_reset_8390(dev); @@ -990,7 +991,18 @@ static struct platform_driver axdrv = { .resume = ax_resume, }; -module_platform_driver(axdrv); +static int __init axdrv_init(void) +{ + return platform_driver_register(&axdrv); +} + +static void __exit axdrv_exit(void) +{ + platform_driver_unregister(&axdrv); +} + +module_init(axdrv_init); +module_exit(axdrv_exit); MODULE_DESCRIPTION("AX88796 10/100 Ethernet platform driver"); MODULE_AUTHOR("Ben Dooks, "); diff --git a/trunk/drivers/net/ethernet/8390/es3210.c b/trunk/drivers/net/ethernet/8390/es3210.c index 6428f9e7a554..7a09575ecff0 100644 --- a/trunk/drivers/net/ethernet/8390/es3210.c +++ b/trunk/drivers/net/ethernet/8390/es3210.c @@ -195,7 +195,7 @@ static int __init es_probe1(struct net_device *dev, int ioaddr) goto out; } - for (i = 0; i < ETH_ALEN ; i++) + for (i = 0; i < ETHER_ADDR_LEN ; i++) dev->dev_addr[i] = inb(ioaddr + ES_SA_PROM + i); /* Check the Racal vendor ID as well. */ diff --git a/trunk/drivers/net/ethernet/8390/hp-plus.c b/trunk/drivers/net/ethernet/8390/hp-plus.c index d42938b6b596..eeac843dcd2d 100644 --- a/trunk/drivers/net/ethernet/8390/hp-plus.c +++ b/trunk/drivers/net/ethernet/8390/hp-plus.c @@ -202,7 +202,7 @@ static int __init hpp_probe1(struct net_device *dev, int ioaddr) /* Retrieve and checksum the station address. */ outw(MAC_Page, ioaddr + HP_PAGING); - for(i = 0; i < ETH_ALEN; i++) { + for(i = 0; i < ETHER_ADDR_LEN; i++) { unsigned char inval = inb(ioaddr + 8 + i); dev->dev_addr[i] = inval; checksum += inval; diff --git a/trunk/drivers/net/ethernet/8390/hp.c b/trunk/drivers/net/ethernet/8390/hp.c index 113f1e075a26..18564d4a7c04 100644 --- a/trunk/drivers/net/ethernet/8390/hp.c +++ b/trunk/drivers/net/ethernet/8390/hp.c @@ -156,7 +156,7 @@ static int __init hp_probe1(struct net_device *dev, int ioaddr) printk("%s: %s (ID %02x) at %#3x,", dev->name, name, board_id, ioaddr); - for(i = 0; i < ETH_ALEN; i++) + for(i = 0; i < ETHER_ADDR_LEN; i++) dev->dev_addr[i] = inb(ioaddr + i); printk(" %pM", dev->dev_addr); diff --git a/trunk/drivers/net/ethernet/8390/hydra.c b/trunk/drivers/net/ethernet/8390/hydra.c index 5370c884620b..3dac937a67c4 100644 --- a/trunk/drivers/net/ethernet/8390/hydra.c +++ b/trunk/drivers/net/ethernet/8390/hydra.c @@ -129,7 +129,7 @@ static int __devinit hydra_init(struct zorro_dev *z) if (!dev) return -ENOMEM; - for (j = 0; j < ETH_ALEN; j++) + for(j = 0; j < ETHER_ADDR_LEN; j++) dev->dev_addr[j] = *((u8 *)(board + HYDRA_ADDRPROM + 2*j)); /* We must set the 8390 for word mode. */ diff --git a/trunk/drivers/net/ethernet/8390/lne390.c b/trunk/drivers/net/ethernet/8390/lne390.c index 69490ae018ea..f9888d20177b 100644 --- a/trunk/drivers/net/ethernet/8390/lne390.c +++ b/trunk/drivers/net/ethernet/8390/lne390.c @@ -191,14 +191,14 @@ static int __init lne390_probe1(struct net_device *dev, int ioaddr) || inb(ioaddr + LNE390_SA_PROM + 1) != LNE390_ADDR1 || inb(ioaddr + LNE390_SA_PROM + 2) != LNE390_ADDR2 ) { printk("lne390.c: card not found"); - for (i = 0; i < ETH_ALEN; i++) + for(i = 0; i < ETHER_ADDR_LEN; i++) printk(" %02x", inb(ioaddr + LNE390_SA_PROM + i)); printk(" (invalid prefix).\n"); return -ENODEV; } #endif - for (i = 0; i < ETH_ALEN; i++) + for(i = 0; i < ETHER_ADDR_LEN; i++) dev->dev_addr[i] = inb(ioaddr + LNE390_SA_PROM + i); printk("lne390.c: LNE390%X in EISA slot %d, address %pM.\n", 0xa+revision, ioaddr/0x1000, dev->dev_addr); diff --git a/trunk/drivers/net/ethernet/8390/ne-h8300.c b/trunk/drivers/net/ethernet/8390/ne-h8300.c index 9b9c77d5a65c..cd36a6a5f408 100644 --- a/trunk/drivers/net/ethernet/8390/ne-h8300.c +++ b/trunk/drivers/net/ethernet/8390/ne-h8300.c @@ -312,7 +312,7 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr) dev->base_addr = ioaddr; - for (i = 0; i < ETH_ALEN; i++) + for(i = 0; i < ETHER_ADDR_LEN; i++) dev->dev_addr[i] = SA_prom[i]; printk(" %pM\n", dev->dev_addr); diff --git a/trunk/drivers/net/ethernet/8390/ne.c b/trunk/drivers/net/ethernet/8390/ne.c index f92ea2a65a57..1063093b3afc 100644 --- a/trunk/drivers/net/ethernet/8390/ne.c +++ b/trunk/drivers/net/ethernet/8390/ne.c @@ -503,12 +503,12 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr) #ifdef CONFIG_PLAT_MAPPI outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, ioaddr + E8390_CMD); /* 0x61 */ - for (i = 0; i < ETH_ALEN; i++) { + for (i = 0 ; i < ETHER_ADDR_LEN ; i++) { dev->dev_addr[i] = SA_prom[i] = inb_p(ioaddr + EN1_PHYS_SHIFT(i)); } #else - for (i = 0; i < ETH_ALEN; i++) { + for(i = 0; i < ETHER_ADDR_LEN; i++) { dev->dev_addr[i] = SA_prom[i]; } #endif diff --git a/trunk/drivers/net/ethernet/8390/ne2.c b/trunk/drivers/net/ethernet/8390/ne2.c index 922b32036c63..70cdc6996342 100644 --- a/trunk/drivers/net/ethernet/8390/ne2.c +++ b/trunk/drivers/net/ethernet/8390/ne2.c @@ -460,7 +460,7 @@ static int __init ne2_probe1(struct net_device *dev, int slot) dev->base_addr = base_addr; - for (i = 0; i < ETH_ALEN; i++) + for(i = 0; i < ETHER_ADDR_LEN; i++) dev->dev_addr[i] = SA_prom[i]; printk(" %pM\n", dev->dev_addr); diff --git a/trunk/drivers/net/ethernet/8390/ne2k-pci.c b/trunk/drivers/net/ethernet/8390/ne2k-pci.c index 3fab04a0034a..39923425ba25 100644 --- a/trunk/drivers/net/ethernet/8390/ne2k-pci.c +++ b/trunk/drivers/net/ethernet/8390/ne2k-pci.c @@ -639,9 +639,9 @@ static void ne2k_pci_get_drvinfo(struct net_device *dev, struct ei_device *ei = netdev_priv(dev); struct pci_dev *pci_dev = (struct pci_dev *) ei->priv; - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info)); + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + strcpy(info->bus_info, pci_name(pci_dev)); } static const struct ethtool_ops ne2k_pci_ethtool_ops = { diff --git a/trunk/drivers/net/ethernet/8390/ne3210.c b/trunk/drivers/net/ethernet/8390/ne3210.c index 2a3e8057feae..243ed2aee88e 100644 --- a/trunk/drivers/net/ethernet/8390/ne3210.c +++ b/trunk/drivers/net/ethernet/8390/ne3210.c @@ -125,7 +125,7 @@ static int __init ne3210_eisa_probe (struct device *device) #endif port_index = inb(ioaddr + NE3210_CFG2) >> 6; - for (i = 0; i < ETH_ALEN; i++) + for(i = 0; i < ETHER_ADDR_LEN; i++) dev->dev_addr[i] = inb(ioaddr + NE3210_SA_PROM + i); printk("ne3210.c: NE3210 in EISA slot %d, media: %s, addr: %pM.\n", edev->slot, ifmap[port_index], dev->dev_addr); diff --git a/trunk/drivers/net/ethernet/8390/stnic.c b/trunk/drivers/net/ethernet/8390/stnic.c index 3b903759980a..d85f0a84bc7b 100644 --- a/trunk/drivers/net/ethernet/8390/stnic.c +++ b/trunk/drivers/net/ethernet/8390/stnic.c @@ -114,7 +114,7 @@ static int __init stnic_probe(void) #ifdef CONFIG_SH_STANDARD_BIOS sh_bios_get_node_addr (stnic_eadr); #endif - for (i = 0; i < ETH_ALEN; i++) + for (i = 0; i < ETHER_ADDR_LEN; i++) dev->dev_addr[i] = stnic_eadr[i]; /* Set the base address to point to the NIC, not the "real" base! */ diff --git a/trunk/drivers/net/ethernet/8390/zorro8390.c b/trunk/drivers/net/ethernet/8390/zorro8390.c index bcd27323b203..3aa9fe9999b5 100644 --- a/trunk/drivers/net/ethernet/8390/zorro8390.c +++ b/trunk/drivers/net/ethernet/8390/zorro8390.c @@ -365,7 +365,7 @@ static int __devinit zorro8390_init(struct net_device *dev, if (i) return i; - for (i = 0; i < ETH_ALEN; i++) + for (i = 0; i < ETHER_ADDR_LEN; i++) dev->dev_addr[i] = SA_prom[i]; pr_debug("Found ethernet address: %pM\n", dev->dev_addr); diff --git a/trunk/drivers/net/ethernet/Kconfig b/trunk/drivers/net/ethernet/Kconfig index 3474a61d4705..597f4d45c632 100644 --- a/trunk/drivers/net/ethernet/Kconfig +++ b/trunk/drivers/net/ethernet/Kconfig @@ -28,7 +28,6 @@ source "drivers/net/ethernet/cadence/Kconfig" source "drivers/net/ethernet/adi/Kconfig" source "drivers/net/ethernet/broadcom/Kconfig" source "drivers/net/ethernet/brocade/Kconfig" -source "drivers/net/ethernet/calxeda/Kconfig" source "drivers/net/ethernet/chelsio/Kconfig" source "drivers/net/ethernet/cirrus/Kconfig" source "drivers/net/ethernet/cisco/Kconfig" diff --git a/trunk/drivers/net/ethernet/Makefile b/trunk/drivers/net/ethernet/Makefile index cd6d69a6a7d2..be5dde040261 100644 --- a/trunk/drivers/net/ethernet/Makefile +++ b/trunk/drivers/net/ethernet/Makefile @@ -14,7 +14,6 @@ obj-$(CONFIG_NET_ATMEL) += cadence/ obj-$(CONFIG_NET_BFIN) += adi/ obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/ obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/ -obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/ obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/ obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/ obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/ diff --git a/trunk/drivers/net/ethernet/adaptec/starfire.c b/trunk/drivers/net/ethernet/adaptec/starfire.c index cb4f38a17f20..6d9f6911000f 100644 --- a/trunk/drivers/net/ethernet/adaptec/starfire.c +++ b/trunk/drivers/net/ethernet/adaptec/starfire.c @@ -607,7 +607,7 @@ static const struct ethtool_ops ethtool_ops; #ifdef VLAN_SUPPORT -static int netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) +static void netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) { struct netdev_private *np = netdev_priv(dev); @@ -617,11 +617,9 @@ static int netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) set_bit(vid, np->active_vlans); set_rx_mode(dev); spin_unlock(&np->lock); - - return 0; } -static int netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) +static void netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) { struct netdev_private *np = netdev_priv(dev); @@ -631,8 +629,6 @@ static int netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) clear_bit(vid, np->active_vlans); set_rx_mode(dev); spin_unlock(&np->lock); - - return 0; } #endif /* VLAN_SUPPORT */ @@ -1846,9 +1842,9 @@ static int check_if_running(struct net_device *dev) static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct netdev_private *np = netdev_priv(dev); - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + strcpy(info->bus_info, pci_name(np->pci_dev)); } static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) diff --git a/trunk/drivers/net/ethernet/aeroflex/greth.c b/trunk/drivers/net/ethernet/aeroflex/greth.c index c885aa905dec..442fefa4f2ca 100644 --- a/trunk/drivers/net/ethernet/aeroflex/greth.c +++ b/trunk/drivers/net/ethernet/aeroflex/greth.c @@ -1623,7 +1623,18 @@ static struct platform_driver greth_of_driver = { .remove = __devexit_p(greth_of_remove), }; -module_platform_driver(greth_of_driver); +static int __init greth_init(void) +{ + return platform_driver_register(&greth_of_driver); +} + +static void __exit greth_cleanup(void) +{ + platform_driver_unregister(&greth_of_driver); +} + +module_init(greth_init); +module_exit(greth_cleanup); MODULE_AUTHOR("Aeroflex Gaisler AB."); MODULE_DESCRIPTION("Aeroflex Gaisler Ethernet MAC driver"); diff --git a/trunk/drivers/net/ethernet/amd/amd8111e.c b/trunk/drivers/net/ethernet/amd/amd8111e.c index 33e0a8c20f6b..a9745f4ddbfe 100644 --- a/trunk/drivers/net/ethernet/amd/amd8111e.c +++ b/trunk/drivers/net/ethernet/amd/amd8111e.c @@ -499,7 +499,7 @@ static int amd8111e_restart(struct net_device *dev) writel( VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2 ); /* Setting the MAC address to the device */ - for (i = 0; i < ETH_ALEN; i++) + for(i = 0; i < ETH_ADDR_LEN; i++) writeb( dev->dev_addr[i], mmio + PADR + i ); /* Enable interrupt coalesce */ @@ -1412,11 +1412,10 @@ static void amd8111e_get_drvinfo(struct net_device* dev, struct ethtool_drvinfo { struct amd8111e_priv *lp = netdev_priv(dev); struct pci_dev *pci_dev = lp->pci_dev; - strlcpy(info->driver, MODULE_NAME, sizeof(info->driver)); - strlcpy(info->version, MODULE_VERS, sizeof(info->version)); - snprintf(info->fw_version, sizeof(info->fw_version), - "%u", chip_version); - strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info)); + strcpy (info->driver, MODULE_NAME); + strcpy (info->version, MODULE_VERS); + sprintf(info->fw_version,"%u",chip_version); + strcpy (info->bus_info, pci_name(pci_dev)); } static int amd8111e_get_regs_len(struct net_device *dev) @@ -1550,7 +1549,7 @@ static int amd8111e_set_mac_address(struct net_device *dev, void *p) memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); spin_lock_irq(&lp->lock); /* Setting the MAC address to the device */ - for (i = 0; i < ETH_ALEN; i++) + for(i = 0; i < ETH_ADDR_LEN; i++) writeb( dev->dev_addr[i], lp->mmio + PADR + i ); spin_unlock_irq(&lp->lock); @@ -1886,7 +1885,7 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev, } /* Initializing MAC address */ - for (i = 0; i < ETH_ALEN; i++) + for(i = 0; i < ETH_ADDR_LEN; i++) dev->dev_addr[i] = readb(lp->mmio + PADR + i); /* Setting user defined parametrs */ diff --git a/trunk/drivers/net/ethernet/amd/amd8111e.h b/trunk/drivers/net/ethernet/amd/amd8111e.h index 8baa3527ba74..2ff2e7a12dd0 100644 --- a/trunk/drivers/net/ethernet/amd/amd8111e.h +++ b/trunk/drivers/net/ethernet/amd/amd8111e.h @@ -586,6 +586,7 @@ typedef enum { #define PKT_BUFF_SZ 1536 #define MIN_PKT_LEN 60 +#define ETH_ADDR_LEN 6 #define AMD8111E_TX_TIMEOUT (3 * HZ)/* 3 sec */ #define SOFT_TIMER_FREQ 0xBEBC /* 0.5 sec */ @@ -807,8 +808,8 @@ typedef enum { static int card_idx; static int speed_duplex[MAX_UNITS] = { 0, }; -static bool coalesce[MAX_UNITS] = { [ 0 ... MAX_UNITS-1] = true }; -static bool dynamic_ipg[MAX_UNITS] = { [ 0 ... MAX_UNITS-1] = false }; +static int coalesce[MAX_UNITS] = {1,1,1,1,1,1,1,1}; +static int dynamic_ipg[MAX_UNITS] = {0,0,0,0,0,0,0,0}; static unsigned int chip_version; #endif /* _AMD8111E_H */ diff --git a/trunk/drivers/net/ethernet/amd/au1000_eth.c b/trunk/drivers/net/ethernet/amd/au1000_eth.c index cc9262be69c8..4865ff14bebf 100644 --- a/trunk/drivers/net/ethernet/amd/au1000_eth.c +++ b/trunk/drivers/net/ethernet/amd/au1000_eth.c @@ -1339,7 +1339,18 @@ static struct platform_driver au1000_eth_driver = { .owner = THIS_MODULE, }, }; +MODULE_ALIAS("platform:au1000-eth"); -module_platform_driver(au1000_eth_driver); -MODULE_ALIAS("platform:au1000-eth"); +static int __init au1000_init_module(void) +{ + return platform_driver_register(&au1000_eth_driver); +} + +static void __exit au1000_exit_module(void) +{ + platform_driver_unregister(&au1000_eth_driver); +} + +module_init(au1000_init_module); +module_exit(au1000_exit_module); diff --git a/trunk/drivers/net/ethernet/amd/nmclan_cs.c b/trunk/drivers/net/ethernet/amd/nmclan_cs.c index 6be0dd67631a..3accd5d21b08 100644 --- a/trunk/drivers/net/ethernet/amd/nmclan_cs.c +++ b/trunk/drivers/net/ethernet/amd/nmclan_cs.c @@ -160,6 +160,8 @@ Include Files Defines ---------------------------------------------------------------------------- */ +#define ETHER_ADDR_LEN ETH_ALEN + /* 6 bytes in an Ethernet Address */ #define MACE_LADRF_LEN 8 /* 8 bytes in Logical Address Filter */ @@ -598,7 +600,7 @@ static int mace_init(mace_private *lp, unsigned int ioaddr, char *enet_addr) } } /* Set PADR register */ - for (i = 0; i < ETH_ALEN; i++) + for (i = 0; i < ETHER_ADDR_LEN; i++) mace_write(lp, ioaddr, MACE_PADR, enet_addr[i]); /* MAC Configuration Control Register should be written last */ @@ -637,11 +639,11 @@ static int nmclan_config(struct pcmcia_device *link) /* Read the ethernet address from the CIS. */ len = pcmcia_get_tuple(link, 0x80, &buf); - if (!buf || len < ETH_ALEN) { + if (!buf || len < ETHER_ADDR_LEN) { kfree(buf); goto failed; } - memcpy(dev->dev_addr, buf, ETH_ALEN); + memcpy(dev->dev_addr, buf, ETHER_ADDR_LEN); kfree(buf); /* Verify configuration by reading the MACE ID. */ @@ -820,10 +822,9 @@ static int mace_close(struct net_device *dev) static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); - snprintf(info->bus_info, sizeof(info->bus_info), - "PCMCIA 0x%lx", dev->base_addr); + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr); } static const struct ethtool_ops netdev_ethtool_ops = { @@ -1419,7 +1420,7 @@ Output static void set_multicast_list(struct net_device *dev) { mace_private *lp = netdev_priv(dev); - int adr[ETH_ALEN] = {0}; /* Ethernet address */ + int adr[ETHER_ADDR_LEN] = {0}; /* Ethernet address */ struct netdev_hw_addr *ha; #ifdef PCMCIA_DEBUG @@ -1441,7 +1442,7 @@ static void set_multicast_list(struct net_device *dev) /* Calculate multicast logical address filter */ memset(lp->multicast_ladrf, 0, MACE_LADRF_LEN); netdev_for_each_mc_addr(ha, dev) { - memcpy(adr, ha->addr, ETH_ALEN); + memcpy(adr, ha->addr, ETHER_ADDR_LEN); BuildLAF(lp->multicast_ladrf, adr); } } diff --git a/trunk/drivers/net/ethernet/amd/pcnet32.c b/trunk/drivers/net/ethernet/amd/pcnet32.c index 20e6dab0186c..f92bc6e34828 100644 --- a/trunk/drivers/net/ethernet/amd/pcnet32.c +++ b/trunk/drivers/net/ethernet/amd/pcnet32.c @@ -711,14 +711,12 @@ static void pcnet32_get_drvinfo(struct net_device *dev, { struct pcnet32_private *lp = netdev_priv(dev); - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); if (lp->pci_dev) - strlcpy(info->bus_info, pci_name(lp->pci_dev), - sizeof(info->bus_info)); + strcpy(info->bus_info, pci_name(lp->pci_dev)); else - snprintf(info->bus_info, sizeof(info->bus_info), - "VLB 0x%lx", dev->base_addr); + sprintf(info->bus_info, "VLB 0x%lx", dev->base_addr); } static u32 pcnet32_get_link(struct net_device *dev) diff --git a/trunk/drivers/net/ethernet/amd/sunlance.c b/trunk/drivers/net/ethernet/amd/sunlance.c index 7ea16d32a5f5..8fda457f94cf 100644 --- a/trunk/drivers/net/ethernet/amd/sunlance.c +++ b/trunk/drivers/net/ethernet/amd/sunlance.c @@ -1540,4 +1540,17 @@ static struct platform_driver sunlance_sbus_driver = { .remove = __devexit_p(sunlance_sbus_remove), }; -module_platform_driver(sunlance_sbus_driver); + +/* Find all the lance cards on the system and initialize them */ +static int __init sparc_lance_init(void) +{ + return platform_driver_register(&sunlance_sbus_driver); +} + +static void __exit sparc_lance_exit(void) +{ + platform_driver_unregister(&sunlance_sbus_driver); +} + +module_init(sparc_lance_init); +module_exit(sparc_lance_exit); diff --git a/trunk/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c b/trunk/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c index 0a9326aa58b5..7be884d0aaf6 100644 --- a/trunk/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c +++ b/trunk/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c @@ -232,6 +232,7 @@ static void atl1c_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->driver, atl1c_driver_name, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, atl1c_driver_version, sizeof(drvinfo->version)); + strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); drvinfo->n_stats = 0; diff --git a/trunk/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/trunk/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index b8591246eb4c..02c7ed8d9eca 100644 --- a/trunk/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/trunk/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -411,7 +411,7 @@ static void atl1c_set_multi(struct net_device *netdev) } } -static void __atl1c_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data) +static void __atl1c_vlan_mode(u32 features, u32 *mac_ctrl_data) { if (features & NETIF_F_HW_VLAN_RX) { /* enable VLAN tag insert/strip */ @@ -422,8 +422,7 @@ static void __atl1c_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data) } } -static void atl1c_vlan_mode(struct net_device *netdev, - netdev_features_t features) +static void atl1c_vlan_mode(struct net_device *netdev, u32 features) { struct atl1c_adapter *adapter = netdev_priv(netdev); struct pci_dev *pdev = adapter->pdev; @@ -483,8 +482,7 @@ static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter, roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE; } -static netdev_features_t atl1c_fix_features(struct net_device *netdev, - netdev_features_t features) +static u32 atl1c_fix_features(struct net_device *netdev, u32 features) { /* * Since there is no support for separate rx/tx vlan accel @@ -501,10 +499,9 @@ static netdev_features_t atl1c_fix_features(struct net_device *netdev, return features; } -static int atl1c_set_features(struct net_device *netdev, - netdev_features_t features) +static int atl1c_set_features(struct net_device *netdev, u32 features) { - netdev_features_t changed = netdev->features ^ features; + u32 changed = netdev->features ^ features; if (changed & NETIF_F_HW_VLAN_RX) atl1c_vlan_mode(netdev, features); diff --git a/trunk/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c b/trunk/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c index 6e61f9f9ebb5..6269438d365f 100644 --- a/trunk/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c +++ b/trunk/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c @@ -310,12 +310,10 @@ static void atl1e_get_drvinfo(struct net_device *netdev, { struct atl1e_adapter *adapter = netdev_priv(netdev); - strlcpy(drvinfo->driver, atl1e_driver_name, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, atl1e_driver_version, - sizeof(drvinfo->version)); - strlcpy(drvinfo->fw_version, "L1e", sizeof(drvinfo->fw_version)); - strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), - sizeof(drvinfo->bus_info)); + strncpy(drvinfo->driver, atl1e_driver_name, 32); + strncpy(drvinfo->version, atl1e_driver_version, 32); + strncpy(drvinfo->fw_version, "L1e", 32); + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); drvinfo->n_stats = 0; drvinfo->testinfo_len = 0; drvinfo->regdump_len = atl1e_get_regs_len(netdev); diff --git a/trunk/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/trunk/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index c915c0873810..95483bcac1d0 100644 --- a/trunk/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/trunk/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -313,7 +313,7 @@ static void atl1e_set_multi(struct net_device *netdev) } } -static void __atl1e_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data) +static void __atl1e_vlan_mode(u32 features, u32 *mac_ctrl_data) { if (features & NETIF_F_HW_VLAN_RX) { /* enable VLAN tag insert/strip */ @@ -324,8 +324,7 @@ static void __atl1e_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data) } } -static void atl1e_vlan_mode(struct net_device *netdev, - netdev_features_t features) +static void atl1e_vlan_mode(struct net_device *netdev, u32 features) { struct atl1e_adapter *adapter = netdev_priv(netdev); u32 mac_ctrl_data = 0; @@ -371,8 +370,7 @@ static int atl1e_set_mac_addr(struct net_device *netdev, void *p) return 0; } -static netdev_features_t atl1e_fix_features(struct net_device *netdev, - netdev_features_t features) +static u32 atl1e_fix_features(struct net_device *netdev, u32 features) { /* * Since there is no support for separate rx/tx vlan accel @@ -386,10 +384,9 @@ static netdev_features_t atl1e_fix_features(struct net_device *netdev, return features; } -static int atl1e_set_features(struct net_device *netdev, - netdev_features_t features) +static int atl1e_set_features(struct net_device *netdev, u32 features) { - netdev_features_t changed = netdev->features ^ features; + u32 changed = netdev->features ^ features; if (changed & NETIF_F_HW_VLAN_RX) atl1e_vlan_mode(netdev, features); diff --git a/trunk/drivers/net/ethernet/atheros/atlx/atl1.c b/trunk/drivers/net/ethernet/atheros/atlx/atl1.c index 9bd204976648..33a4e35f5ee8 100644 --- a/trunk/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/trunk/drivers/net/ethernet/atheros/atlx/atl1.c @@ -3365,6 +3365,7 @@ static void atl1_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, ATLX_DRIVER_VERSION, sizeof(drvinfo->version)); + strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); drvinfo->eedump_len = ATL1_EEDUMP_LEN; diff --git a/trunk/drivers/net/ethernet/atheros/atlx/atl2.c b/trunk/drivers/net/ethernet/atheros/atlx/atl2.c index 071f4c858969..1feae5928a4b 100644 --- a/trunk/drivers/net/ethernet/atheros/atlx/atl2.c +++ b/trunk/drivers/net/ethernet/atheros/atlx/atl2.c @@ -361,7 +361,7 @@ static inline void atl2_irq_disable(struct atl2_adapter *adapter) synchronize_irq(adapter->pdev->irq); } -static void __atl2_vlan_mode(netdev_features_t features, u32 *ctrl) +static void __atl2_vlan_mode(u32 features, u32 *ctrl) { if (features & NETIF_F_HW_VLAN_RX) { /* enable VLAN tag insert/strip */ @@ -372,8 +372,7 @@ static void __atl2_vlan_mode(netdev_features_t features, u32 *ctrl) } } -static void atl2_vlan_mode(struct net_device *netdev, - netdev_features_t features) +static void atl2_vlan_mode(struct net_device *netdev, u32 features) { struct atl2_adapter *adapter = netdev_priv(netdev); u32 ctrl; @@ -392,8 +391,7 @@ static void atl2_restore_vlan(struct atl2_adapter *adapter) atl2_vlan_mode(adapter->netdev, adapter->netdev->features); } -static netdev_features_t atl2_fix_features(struct net_device *netdev, - netdev_features_t features) +static u32 atl2_fix_features(struct net_device *netdev, u32 features) { /* * Since there is no support for separate rx/tx vlan accel @@ -407,10 +405,9 @@ static netdev_features_t atl2_fix_features(struct net_device *netdev, return features; } -static int atl2_set_features(struct net_device *netdev, - netdev_features_t features) +static int atl2_set_features(struct net_device *netdev, u32 features) { - netdev_features_t changed = netdev->features ^ features; + u32 changed = netdev->features ^ features; if (changed & NETIF_F_HW_VLAN_RX) atl2_vlan_mode(netdev, features); @@ -2052,12 +2049,10 @@ static void atl2_get_drvinfo(struct net_device *netdev, { struct atl2_adapter *adapter = netdev_priv(netdev); - strlcpy(drvinfo->driver, atl2_driver_name, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, atl2_driver_version, - sizeof(drvinfo->version)); - strlcpy(drvinfo->fw_version, "L2", sizeof(drvinfo->fw_version)); - strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), - sizeof(drvinfo->bus_info)); + strncpy(drvinfo->driver, atl2_driver_name, 32); + strncpy(drvinfo->version, atl2_driver_version, 32); + strncpy(drvinfo->fw_version, "L2", 32); + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); drvinfo->n_stats = 0; drvinfo->testinfo_len = 0; drvinfo->regdump_len = atl2_get_regs_len(netdev); diff --git a/trunk/drivers/net/ethernet/atheros/atlx/atlx.c b/trunk/drivers/net/ethernet/atheros/atlx/atlx.c index 8ff7411094d5..aabcf4b5745a 100644 --- a/trunk/drivers/net/ethernet/atheros/atlx/atlx.c +++ b/trunk/drivers/net/ethernet/atheros/atlx/atlx.c @@ -211,7 +211,7 @@ static void atlx_link_chg_task(struct work_struct *work) spin_unlock_irqrestore(&adapter->lock, flags); } -static void __atlx_vlan_mode(netdev_features_t features, u32 *ctrl) +static void __atlx_vlan_mode(u32 features, u32 *ctrl) { if (features & NETIF_F_HW_VLAN_RX) { /* enable VLAN tag insert/strip */ @@ -222,8 +222,7 @@ static void __atlx_vlan_mode(netdev_features_t features, u32 *ctrl) } } -static void atlx_vlan_mode(struct net_device *netdev, - netdev_features_t features) +static void atlx_vlan_mode(struct net_device *netdev, u32 features) { struct atlx_adapter *adapter = netdev_priv(netdev); unsigned long flags; @@ -243,8 +242,7 @@ static void atlx_restore_vlan(struct atlx_adapter *adapter) atlx_vlan_mode(adapter->netdev, adapter->netdev->features); } -static netdev_features_t atlx_fix_features(struct net_device *netdev, - netdev_features_t features) +static u32 atlx_fix_features(struct net_device *netdev, u32 features) { /* * Since there is no support for separate rx/tx vlan accel @@ -258,10 +256,9 @@ static netdev_features_t atlx_fix_features(struct net_device *netdev, return features; } -static int atlx_set_features(struct net_device *netdev, - netdev_features_t features) +static int atlx_set_features(struct net_device *netdev, u32 features) { - netdev_features_t changed = netdev->features ^ features; + u32 changed = netdev->features ^ features; if (changed & NETIF_F_HW_VLAN_RX) atlx_vlan_mode(netdev, features); diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2.c b/trunk/drivers/net/ethernet/broadcom/bnx2.c index 021fb818007a..965c7235804d 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2.c @@ -57,11 +57,11 @@ #include "bnx2_fw.h" #define DRV_MODULE_NAME "bnx2" -#define DRV_MODULE_VERSION "2.2.1" -#define DRV_MODULE_RELDATE "Dec 18, 2011" -#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw" +#define DRV_MODULE_VERSION "2.1.11" +#define DRV_MODULE_RELDATE "July 20, 2011" +#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.1.fw" #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw" -#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw" +#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1a.fw" #define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw" #define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw" @@ -409,7 +409,7 @@ static int bnx2_unregister_cnic(struct net_device *dev) mutex_lock(&bp->cnic_lock); cp->drv_state = 0; bnapi->cnic_present = 0; - RCU_INIT_POINTER(bp->cnic_ops, NULL); + rcu_assign_pointer(bp->cnic_ops, NULL); mutex_unlock(&bp->cnic_lock); synchronize_rcu(); return 0; @@ -2054,8 +2054,8 @@ __acquires(&bp->phy_lock) if (bp->autoneg & AUTONEG_SPEED) { u32 adv_reg, adv1000_reg; - u32 new_adv = 0; - u32 new_adv1000 = 0; + u32 new_adv_reg = 0; + u32 new_adv1000_reg = 0; bnx2_read_phy(bp, bp->mii_adv, &adv_reg); adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP | @@ -2064,18 +2064,27 @@ __acquires(&bp->phy_lock) bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg); adv1000_reg &= PHY_ALL_1000_SPEED; - new_adv = ethtool_adv_to_mii_adv_t(bp->advertising); - new_adv |= ADVERTISE_CSMA; - new_adv |= bnx2_phy_get_pause_adv(bp); + if (bp->advertising & ADVERTISED_10baseT_Half) + new_adv_reg |= ADVERTISE_10HALF; + if (bp->advertising & ADVERTISED_10baseT_Full) + new_adv_reg |= ADVERTISE_10FULL; + if (bp->advertising & ADVERTISED_100baseT_Half) + new_adv_reg |= ADVERTISE_100HALF; + if (bp->advertising & ADVERTISED_100baseT_Full) + new_adv_reg |= ADVERTISE_100FULL; + if (bp->advertising & ADVERTISED_1000baseT_Full) + new_adv1000_reg |= ADVERTISE_1000FULL; - new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising); + new_adv_reg |= ADVERTISE_CSMA; - if ((adv1000_reg != new_adv1000) || - (adv_reg != new_adv) || + new_adv_reg |= bnx2_phy_get_pause_adv(bp); + + if ((adv1000_reg != new_adv1000_reg) || + (adv_reg != new_adv_reg) || ((bmcr & BMCR_ANENABLE) == 0)) { - bnx2_write_phy(bp, bp->mii_adv, new_adv); - bnx2_write_phy(bp, MII_CTRL1000, new_adv1000); + bnx2_write_phy(bp, bp->mii_adv, new_adv_reg); + bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg); bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART | BMCR_ANENABLE); } @@ -2725,27 +2734,31 @@ bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index) } static inline int -bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp) +bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp) { - u8 *data; + struct sk_buff *skb; struct sw_bd *rx_buf = &rxr->rx_buf_ring[index]; dma_addr_t mapping; struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)]; + unsigned long align; - data = kmalloc(bp->rx_buf_size, gfp); - if (!data) + skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp); + if (skb == NULL) { return -ENOMEM; + } - mapping = dma_map_single(&bp->pdev->dev, - get_l2_fhdr(data), - bp->rx_buf_use_size, + if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1)))) + skb_reserve(skb, BNX2_RX_ALIGN - align); + + mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); if (dma_mapping_error(&bp->pdev->dev, mapping)) { - kfree(data); + dev_kfree_skb(skb); return -EIO; } - rx_buf->data = data; + rx_buf->skb = skb; + rx_buf->desc = (struct l2_fhdr *) skb->data; dma_unmap_addr_set(rx_buf, mapping, mapping); rxbd->rx_bd_haddr_hi = (u64) mapping >> 32; @@ -2810,7 +2823,6 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; u16 hw_cons, sw_cons, sw_ring_cons; int tx_pkt = 0, index; - unsigned int tx_bytes = 0; struct netdev_queue *txq; index = (bnapi - bp->bnx2_napi); @@ -2865,7 +2877,6 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) sw_cons = NEXT_TX_BD(sw_cons); - tx_bytes += skb->len; dev_kfree_skb(skb); tx_pkt++; if (tx_pkt == budget) @@ -2875,7 +2886,6 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) hw_cons = bnx2_get_hw_tx_cons(bnapi); } - netdev_tx_completed_queue(txq, tx_pkt, tx_bytes); txr->hw_tx_cons = hw_cons; txr->tx_cons = sw_cons; @@ -2955,8 +2965,8 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, } static inline void -bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, - u8 *data, u16 cons, u16 prod) +bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, + struct sk_buff *skb, u16 cons, u16 prod) { struct sw_bd *cons_rx_buf, *prod_rx_buf; struct rx_bd *cons_bd, *prod_bd; @@ -2970,7 +2980,8 @@ bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, rxr->rx_prod_bseq += bp->rx_buf_use_size; - prod_rx_buf->data = data; + prod_rx_buf->skb = skb; + prod_rx_buf->desc = (struct l2_fhdr *) skb->data; if (cons == prod) return; @@ -2984,39 +2995,33 @@ bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo; } -static struct sk_buff * -bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data, +static int +bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb, unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr, u32 ring_idx) { int err; u16 prod = ring_idx & 0xffff; - struct sk_buff *skb; - err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); + err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_ATOMIC); if (unlikely(err)) { - bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod); -error: + bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod); if (hdr_len) { unsigned int raw_len = len + 4; int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT; bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages); } - return NULL; + return err; } + skb_reserve(skb, BNX2_RX_OFFSET); dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); - skb = build_skb(data); - if (!skb) { - kfree(data); - goto error; - } - skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET); + if (hdr_len == 0) { skb_put(skb, len); - return skb; + return 0; } else { unsigned int i, frag_len, frag_size, pages; struct sw_pg *rx_pg; @@ -3047,7 +3052,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data, skb_frag_size_sub(frag, tail); skb->data_len -= tail; } - return skb; + return 0; } rx_pg = &rxr->rx_pg_ring[pg_cons]; @@ -3069,7 +3074,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data, rxr->rx_pg_prod = pg_prod; bnx2_reuse_rx_skb_pages(bp, rxr, skb, pages - i); - return NULL; + return err; } dma_unmap_page(&bp->pdev->dev, mapping_old, @@ -3086,7 +3091,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data, rxr->rx_pg_prod = pg_prod; rxr->rx_pg_cons = pg_cons; } - return skb; + return 0; } static inline u16 @@ -3125,17 +3130,19 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) struct sw_bd *rx_buf, *next_rx_buf; struct sk_buff *skb; dma_addr_t dma_addr; - u8 *data; sw_ring_cons = RX_RING_IDX(sw_cons); sw_ring_prod = RX_RING_IDX(sw_prod); rx_buf = &rxr->rx_buf_ring[sw_ring_cons]; - data = rx_buf->data; - rx_buf->data = NULL; + skb = rx_buf->skb; + prefetchw(skb); + + next_rx_buf = + &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))]; + prefetch(next_rx_buf->desc); - rx_hdr = get_l2_fhdr(data); - prefetch(rx_hdr); + rx_buf->skb = NULL; dma_addr = dma_unmap_addr(rx_buf, mapping); @@ -3143,10 +3150,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE); - next_rx_buf = - &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))]; - prefetch(get_l2_fhdr(next_rx_buf->data)); - + rx_hdr = rx_buf->desc; len = rx_hdr->l2_fhdr_pkt_len; status = rx_hdr->l2_fhdr_status; @@ -3165,7 +3169,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) L2_FHDR_ERRORS_TOO_SHORT | L2_FHDR_ERRORS_GIANT_FRAME))) { - bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons, + bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons, sw_ring_prod); if (pg_ring_used) { int pages; @@ -3180,29 +3184,30 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) len -= 4; if (len <= bp->rx_copy_thresh) { - skb = netdev_alloc_skb(bp->dev, len + 6); - if (skb == NULL) { - bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons, + struct sk_buff *new_skb; + + new_skb = netdev_alloc_skb(bp->dev, len + 6); + if (new_skb == NULL) { + bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons, sw_ring_prod); goto next_rx; } /* aligned copy */ - memcpy(skb->data, - (u8 *)rx_hdr + BNX2_RX_OFFSET - 6, - len + 6); - skb_reserve(skb, 6); - skb_put(skb, len); + skb_copy_from_linear_data_offset(skb, + BNX2_RX_OFFSET - 6, + new_skb->data, len + 6); + skb_reserve(new_skb, 6); + skb_put(new_skb, len); - bnx2_reuse_rx_data(bp, rxr, data, + bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons, sw_ring_prod); - } else { - skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr, - (sw_ring_cons << 16) | sw_ring_prod); - if (!skb) - goto next_rx; - } + skb = new_skb; + } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len, + dma_addr, (sw_ring_cons << 16) | sw_ring_prod))) + goto next_rx; + if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) __vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag); @@ -5229,7 +5234,7 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num) ring_prod = prod = rxr->rx_prod; for (i = 0; i < bp->rx_ring_size; i++) { - if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) { + if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) { netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n", ring_num, i, bp->rx_ring_size); break; @@ -5324,7 +5329,7 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size) rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8; rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD + - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + sizeof(struct skb_shared_info); bp->rx_copy_thresh = BNX2_RX_COPY_THRESH; bp->rx_pg_ring_size = 0; @@ -5346,9 +5351,8 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size) } bp->rx_buf_use_size = rx_size; - /* hw alignment + build_skb() overhead*/ - bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) + - NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + /* hw alignment */ + bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN; bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET; bp->rx_ring_size = size; bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS); @@ -5396,7 +5400,6 @@ bnx2_free_tx_skbs(struct bnx2 *bp) } dev_kfree_skb(skb); } - netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i)); } } @@ -5415,9 +5418,9 @@ bnx2_free_rx_skbs(struct bnx2 *bp) for (j = 0; j < bp->rx_max_ring_idx; j++) { struct sw_bd *rx_buf = &rxr->rx_buf_ring[j]; - u8 *data = rx_buf->data; + struct sk_buff *skb = rx_buf->skb; - if (data == NULL) + if (skb == NULL) continue; dma_unmap_single(&bp->pdev->dev, @@ -5425,9 +5428,9 @@ bnx2_free_rx_skbs(struct bnx2 *bp) bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); - rx_buf->data = NULL; + rx_buf->skb = NULL; - kfree(data); + dev_kfree_skb(skb); } for (j = 0; j < bp->rx_max_pg_ring_idx; j++) bnx2_free_rx_page(bp, rxr, j); @@ -5733,8 +5736,7 @@ static int bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) { unsigned int pkt_size, num_pkts, i; - struct sk_buff *skb; - u8 *data; + struct sk_buff *skb, *rx_skb; unsigned char *packet; u16 rx_start_idx, rx_idx; dma_addr_t map; @@ -5826,14 +5828,14 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) } rx_buf = &rxr->rx_buf_ring[rx_start_idx]; - data = rx_buf->data; + rx_skb = rx_buf->skb; - rx_hdr = get_l2_fhdr(data); - data = (u8 *)rx_hdr + BNX2_RX_OFFSET; + rx_hdr = rx_buf->desc; + skb_reserve(rx_skb, BNX2_RX_OFFSET); dma_sync_single_for_cpu(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), - bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); + bp->rx_buf_size, PCI_DMA_FROMDEVICE); if (rx_hdr->l2_fhdr_status & (L2_FHDR_ERRORS_BAD_CRC | @@ -5850,7 +5852,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) } for (i = 14; i < pkt_size; i++) { - if (*(data + i) != (unsigned char) (i & 0xff)) { + if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) { goto loopback_test_done; } } @@ -6550,8 +6552,6 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) } txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END; - netdev_tx_sent_queue(txq, skb->len); - prod = NEXT_TX_BD(prod); txr->tx_prod_bseq += skb->len; @@ -6873,10 +6873,10 @@ bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct bnx2 *bp = netdev_priv(dev); - strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); - strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version)); + strcpy(info->driver, DRV_MODULE_NAME); + strcpy(info->version, DRV_MODULE_VERSION); + strcpy(info->bus_info, pci_name(bp->pdev)); + strcpy(info->fw_version, bp->fw_version); } #define BNX2_REGDUMP_LEN (32 * 1024) @@ -7571,8 +7571,8 @@ bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state) return 0; } -static netdev_features_t -bnx2_fix_features(struct net_device *dev, netdev_features_t features) +static u32 +bnx2_fix_features(struct net_device *dev, u32 features) { struct bnx2 *bp = netdev_priv(dev); @@ -7583,7 +7583,7 @@ bnx2_fix_features(struct net_device *dev, netdev_features_t features) } static int -bnx2_set_features(struct net_device *dev, netdev_features_t features) +bnx2_set_features(struct net_device *dev, u32 features) { struct bnx2 *bp = netdev_priv(dev); diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2.h b/trunk/drivers/net/ethernet/broadcom/bnx2.h index 1db2d51ba3f1..99d31a7d6aaa 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2.h +++ b/trunk/drivers/net/ethernet/broadcom/bnx2.h @@ -6563,25 +6563,12 @@ struct l2_fhdr { #define MB_TX_CID_ADDR MB_GET_CID_ADDR(TX_CID) #define MB_RX_CID_ADDR MB_GET_CID_ADDR(RX_CID) -/* - * This driver uses new build_skb() API : - * RX ring buffer contains pointer to kmalloc() data only, - * skb are built only after Hardware filled the frame. - */ struct sw_bd { - u8 *data; + struct sk_buff *skb; + struct l2_fhdr *desc; DEFINE_DMA_UNMAP_ADDR(mapping); }; -/* Its faster to compute this from data than storing it in sw_bd - * (less cache misses) - */ -static inline struct l2_fhdr *get_l2_fhdr(u8 *data) -{ - return (struct l2_fhdr *)(PTR_ALIGN(data, BNX2_RX_ALIGN) + NET_SKB_PAD); -} - - struct sw_pg { struct page *page; DEFINE_DMA_UNMAP_ADDR(mapping); diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 8c73d34b2ff1..aec7212ac983 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -23,8 +23,8 @@ * (you will need to reboot afterwards) */ /* #define BNX2X_STOP_ON_ERROR */ -#define DRV_MODULE_VERSION "1.70.35-0" -#define DRV_MODULE_RELDATE "2011/11/10" +#define DRV_MODULE_VERSION "1.70.30-0" +#define DRV_MODULE_RELDATE "2011/10/25" #define BNX2X_BC_VER 0x040200 #if defined(CONFIG_DCB) @@ -293,13 +293,8 @@ enum { #define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp)) /* fast path */ -/* - * This driver uses new build_skb() API : - * RX ring buffer contains pointer to kmalloc() data only, - * skb are built only after Hardware filled the frame. - */ struct sw_rx_bd { - u8 *data; + struct sk_buff *skb; DEFINE_DMA_UNMAP_ADDR(mapping); }; @@ -416,7 +411,8 @@ union db_prod { /* Number of u64 elements in SGE mask array */ -#define RX_SGE_MASK_LEN (NUM_RX_SGE / BIT_VEC64_ELEM_SZ) +#define RX_SGE_MASK_LEN ((NUM_RX_SGE_PAGES * RX_SGE_CNT) / \ + BIT_VEC64_ELEM_SZ) #define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1) #define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK) @@ -429,8 +425,8 @@ union host_hc_status_block { struct bnx2x_agg_info { /* - * First aggregation buffer is a data buffer, the following - are pages. - * We will preallocate the data buffer for each aggregation when + * First aggregation buffer is an skb, the following - are pages. + * We will preallocate the skbs for each aggregation when * we open the interface and will replace the BD at the consumer * with this one when we receive the TPA_START CQE in order to * keep the Rx BD ring consistent. @@ -444,7 +440,6 @@ struct bnx2x_agg_info { u16 parsing_flags; u16 vlan_tag; u16 len_on_bd; - u32 rxhash; }; #define Q_STATS_OFFSET32(stat_name) \ @@ -512,7 +507,6 @@ struct bnx2x_fastpath { __le16 fp_hc_idx; u8 index; /* number in fp array */ - u8 rx_queue; /* index for skb_record */ u8 cl_id; /* eth client id */ u8 cl_qzone_id; u8 fw_sb_id; /* status block number in FW */ @@ -887,8 +881,6 @@ struct bnx2x_common { #define CHIP_PORT_MODE_NONE 0x2 #define CHIP_MODE(bp) (bp->common.chip_port_mode) #define CHIP_MODE_IS_4_PORT(bp) (CHIP_MODE(bp) == CHIP_4_PORT_MODE) - - u32 boot_mode; }; /* IGU MSIX STATISTICS on 57712: 64 for VFs; 4 for PFs; 4 for Attentions */ @@ -1050,8 +1042,6 @@ struct bnx2x_slowpath { u32 wb_comp; u32 wb_data[4]; - - union drv_info_to_mcp drv_info_to_mcp; }; #define bnx2x_sp(bp, var) (&bp->slowpath->var) @@ -1132,21 +1122,18 @@ enum { enum { BNX2X_PORT_QUERY_IDX, BNX2X_PF_QUERY_IDX, - BNX2X_FCOE_QUERY_IDX, BNX2X_FIRST_QUEUE_QUERY_IDX, }; struct bnx2x_fw_stats_req { struct stats_query_header hdr; - struct stats_query_entry query[FP_SB_MAX_E1x+ - BNX2X_FIRST_QUEUE_QUERY_IDX]; + struct stats_query_entry query[STATS_QUERY_CMD_COUNT]; }; struct bnx2x_fw_stats_data { struct stats_counter storm_counters; struct per_port_stats port; struct per_pf_stats pf; - struct fcoe_statistics_params fcoe; struct per_queue_stats queue_stats[1]; }; @@ -1154,7 +1141,6 @@ struct bnx2x_fw_stats_data { enum { BNX2X_SP_RTNL_SETUP_TC, BNX2X_SP_RTNL_TX_TIMEOUT, - BNX2X_SP_RTNL_FAN_FAILURE, }; @@ -1200,20 +1186,10 @@ struct bnx2x { #define ETH_MAX_JUMBO_PACKET_SIZE 9600 /* Max supported alignment is 256 (8 shift) */ -#define BNX2X_RX_ALIGN_SHIFT min(8, L1_CACHE_SHIFT) - - /* FW uses 2 Cache lines Alignment for start packet and size - * - * We assume skb_build() uses sizeof(struct skb_shared_info) bytes - * at the end of skb->data, to avoid wasting a full cache line. - * This reduces memory use (skb->truesize). - */ -#define BNX2X_FW_RX_ALIGN_START (1UL << BNX2X_RX_ALIGN_SHIFT) - -#define BNX2X_FW_RX_ALIGN_END \ - max(1UL << BNX2X_RX_ALIGN_SHIFT, \ - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) - +#define BNX2X_RX_ALIGN_SHIFT ((L1_CACHE_SHIFT < 8) ? \ + L1_CACHE_SHIFT : 8) + /* FW use 2 Cache lines Alignment for start packet and size */ +#define BNX2X_FW_RX_ALIGN (2 << BNX2X_RX_ALIGN_SHIFT) #define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5) struct host_sp_status_block *def_status_blk; @@ -1273,7 +1249,6 @@ struct bnx2x { #define NO_ISCSI_OOO_FLAG (1 << 13) #define NO_ISCSI_FLAG (1 << 14) #define NO_FCOE_FLAG (1 << 15) -#define BC_SUPPORTS_PFC_STATS (1 << 17) #define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG) #define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG) @@ -2009,6 +1984,13 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, #define HW_PRTY_ASSERT_SET_4 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | \ AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR) +#define RSS_FLAGS(bp) \ + (TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \ + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \ + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY | \ + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY | \ + (bp->multi_mode << \ + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT)) #define MULTI_MASK 0x7f @@ -2073,8 +2055,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, #define BNX2X_VPD_LEN 128 #define VENDOR_ID_LEN 4 -int bnx2x_close(struct net_device *dev); - /* Congestion management fairness mode */ #define CMNG_FNS_NONE 0 #define CMNG_FNS_MINMAX 1 @@ -2092,16 +2072,4 @@ static const u32 dmae_reg_go_c[] = { void bnx2x_set_ethtool_ops(struct net_device *netdev); void bnx2x_notify_link_changed(struct bnx2x *bp); - - -#define BNX2X_MF_PROTOCOL(bp) \ - ((bp)->mf_config[BP_VN(bp)] & FUNC_MF_CFG_PROTOCOL_MASK) - -#ifdef BCM_CNIC -#define BNX2X_IS_MF_PROTOCOL_ISCSI(bp) \ - (BNX2X_MF_PROTOCOL(bp) == FUNC_MF_CFG_PROTOCOL_ISCSI) - -#define IS_MF_ISCSI_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_PROTOCOL_ISCSI(bp)) -#endif - #endif /* bnx2x.h */ diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 2b731b253598..580b44edb066 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -79,21 +79,19 @@ static inline void bnx2x_bz_fp(struct bnx2x *bp, int index) * @to: destination FP index * * Makes sure the contents of the bp->fp[to].napi is kept - * intact. This is done by first copying the napi struct from - * the target to the source, and then mem copying the entire - * source onto the target + * intact. */ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) { struct bnx2x_fastpath *from_fp = &bp->fp[from]; struct bnx2x_fastpath *to_fp = &bp->fp[to]; - - /* Copy the NAPI object as it has been already initialized */ - from_fp->napi = to_fp->napi; - + struct napi_struct orig_napi = to_fp->napi; /* Move bnx2x_fastpath contents */ memcpy(to_fp, from_fp, sizeof(*to_fp)); to_fp->index = to; + + /* Restore the NAPI object as it has been already initialized */ + to_fp->napi = orig_napi; } int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ @@ -102,8 +100,7 @@ int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ * return idx of last bd freed */ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, - u16 idx, unsigned int *pkts_compl, - unsigned int *bytes_compl) + u16 idx) { struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx]; struct eth_tx_start_bd *tx_start_bd; @@ -160,10 +157,6 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, /* release skb */ WARN_ON(!skb); - if (skb) { - (*pkts_compl)++; - (*bytes_compl) += skb->len; - } dev_kfree_skb_any(skb); tx_buf->first_bd = 0; tx_buf->skb = NULL; @@ -175,7 +168,6 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata) { struct netdev_queue *txq; u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons; - unsigned int pkts_compl = 0, bytes_compl = 0; #ifdef BNX2X_STOP_ON_ERROR if (unlikely(bp->panic)) @@ -195,14 +187,10 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata) " pkt_cons %u\n", txdata->txq_index, hw_cons, sw_cons, pkt_cons); - bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons, - &pkts_compl, &bytes_compl); - + bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons); sw_cons++; } - netdev_tx_completed_queue(txq, pkts_compl, bytes_compl); - txdata->tx_pkt_cons = sw_cons; txdata->tx_bd_cons = bd_cons; @@ -304,21 +292,8 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, fp->last_max_sge, fp->rx_sge_prod); } -/* Set Toeplitz hash value in the skb using the value from the - * CQE (calculated by HW). - */ -static u32 bnx2x_get_rxhash(const struct bnx2x *bp, - const struct eth_fast_path_rx_cqe *cqe) -{ - /* Set Toeplitz hash from CQE */ - if ((bp->dev->features & NETIF_F_RXHASH) && - (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) - return le32_to_cpu(cqe->rss_hash_result); - return 0; -} - static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, - u16 cons, u16 prod, + struct sk_buff *skb, u16 cons, u16 prod, struct eth_fast_path_rx_cqe *cqe) { struct bnx2x *bp = fp->bp; @@ -333,9 +308,9 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, if (tpa_info->tpa_state != BNX2X_TPA_STOP) BNX2X_ERR("start of bin not in stop [%d]\n", queue); - /* Try to map an empty data buffer from the aggregation info */ + /* Try to map an empty skb from the aggregation info */ mapping = dma_map_single(&bp->pdev->dev, - first_buf->data + NET_SKB_PAD, + first_buf->skb->data, fp->rx_buf_size, DMA_FROM_DEVICE); /* * ...if it fails - move the skb from the consumer to the producer @@ -345,15 +320,15 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { /* Move the BD from the consumer to the producer */ - bnx2x_reuse_rx_data(fp, cons, prod); + bnx2x_reuse_rx_skb(fp, cons, prod); tpa_info->tpa_state = BNX2X_TPA_ERROR; return; } - /* move empty data from pool to prod */ - prod_rx_buf->data = first_buf->data; + /* move empty skb from pool to prod */ + prod_rx_buf->skb = first_buf->skb; dma_unmap_addr_set(prod_rx_buf, mapping, mapping); - /* point prod_bd to new data */ + /* point prod_bd to new skb */ prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); @@ -367,7 +342,6 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, tpa_info->tpa_state = BNX2X_TPA_START; tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd); tpa_info->placement_offset = cqe->placement_offset; - tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe); #ifdef BNX2X_STOP_ON_ERROR fp->tpa_queue_used |= (1 << queue); @@ -495,12 +469,11 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, { struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue]; struct sw_rx_bd *rx_buf = &tpa_info->first_buf; - u32 pad = tpa_info->placement_offset; + u8 pad = tpa_info->placement_offset; u16 len = tpa_info->len_on_bd; - struct sk_buff *skb = NULL; - u8 *data = rx_buf->data; + struct sk_buff *skb = rx_buf->skb; /* alloc new skb */ - u8 *new_data; + struct sk_buff *new_skb; u8 old_tpa_state = tpa_info->tpa_state; tpa_info->tpa_state = BNX2X_TPA_STOP; @@ -511,18 +484,18 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, if (old_tpa_state == BNX2X_TPA_ERROR) goto drop; - /* Try to allocate the new data */ - new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC); + /* Try to allocate the new skb */ + new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size); /* Unmap skb in the pool anyway, as we are going to change pool entry status to BNX2X_TPA_STOP even if new skb allocation fails. */ dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), fp->rx_buf_size, DMA_FROM_DEVICE); - if (likely(new_data)) - skb = build_skb(data); - if (likely(skb)) { + if (likely(new_skb)) { + prefetch(skb); + prefetch(((char *)(skb)) + L1_CACHE_BYTES); #ifdef BNX2X_STOP_ON_ERROR if (pad + len > fp->rx_buf_size) { @@ -534,9 +507,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, } #endif - skb_reserve(skb, pad + NET_SKB_PAD); + skb_reserve(skb, pad); skb_put(skb, len); - skb->rxhash = tpa_info->rxhash; skb->protocol = eth_type_trans(skb, bp->dev); skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -552,8 +524,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, } - /* put new data in bin */ - rx_buf->data = new_data; + /* put new skb in bin */ + rx_buf->skb = new_skb; return; } @@ -565,6 +537,19 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, fp->eth_q_stats.rx_skb_alloc_failed++; } +/* Set Toeplitz hash value in the skb using the value from the + * CQE (calculated by HW). + */ +static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe, + struct sk_buff *skb) +{ + /* Set Toeplitz hash from CQE */ + if ((bp->dev->features & NETIF_F_RXHASH) && + (cqe->fast_path_cqe.status_flags & + ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) + skb->rxhash = + le32_to_cpu(cqe->fast_path_cqe.rss_hash_result); +} int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) { @@ -607,7 +592,6 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) u8 cqe_fp_flags; enum eth_rx_cqe_type cqe_fp_type; u16 len, pad; - u8 *data; #ifdef BNX2X_STOP_ON_ERROR if (unlikely(bp->panic)) @@ -618,6 +602,13 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) bd_prod = RX_BD(bd_prod); bd_cons = RX_BD(bd_cons); + /* Prefetch the page containing the BD descriptor + at producer's index. It will be needed when new skb is + allocated */ + prefetch((void *)(PAGE_ALIGN((unsigned long) + (&fp->rx_desc_ring[bd_prod])) - + PAGE_SIZE + 1)); + cqe = &fp->rx_comp_ring[comp_ring_cons]; cqe_fp = &cqe->fast_path_cqe; cqe_fp_flags = cqe_fp->type_error_flags; @@ -633,123 +624,138 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) { bnx2x_sp_event(fp, cqe); goto next_cqe; - } - rx_buf = &fp->rx_buf_ring[bd_cons]; - data = rx_buf->data; - if (!CQE_TYPE_FAST(cqe_fp_type)) { + /* this is an rx packet */ + } else { + rx_buf = &fp->rx_buf_ring[bd_cons]; + skb = rx_buf->skb; + prefetch(skb); + + if (!CQE_TYPE_FAST(cqe_fp_type)) { #ifdef BNX2X_STOP_ON_ERROR - /* sanity check */ - if (fp->disable_tpa && - (CQE_TYPE_START(cqe_fp_type) || - CQE_TYPE_STOP(cqe_fp_type))) - BNX2X_ERR("START/STOP packet while " - "disable_tpa type %x\n", - CQE_TYPE(cqe_fp_type)); + /* sanity check */ + if (fp->disable_tpa && + (CQE_TYPE_START(cqe_fp_type) || + CQE_TYPE_STOP(cqe_fp_type))) + BNX2X_ERR("START/STOP packet while " + "disable_tpa type %x\n", + CQE_TYPE(cqe_fp_type)); #endif - if (CQE_TYPE_START(cqe_fp_type)) { - u16 queue = cqe_fp->queue_index; - DP(NETIF_MSG_RX_STATUS, - "calling tpa_start on queue %d\n", - queue); + if (CQE_TYPE_START(cqe_fp_type)) { + u16 queue = cqe_fp->queue_index; + DP(NETIF_MSG_RX_STATUS, + "calling tpa_start on queue %d\n", + queue); - bnx2x_tpa_start(fp, queue, - bd_cons, bd_prod, - cqe_fp); - goto next_rx; - } else { - u16 queue = - cqe->end_agg_cqe.queue_index; - DP(NETIF_MSG_RX_STATUS, - "calling tpa_stop on queue %d\n", - queue); - - bnx2x_tpa_stop(bp, fp, queue, - &cqe->end_agg_cqe, - comp_ring_cons); + bnx2x_tpa_start(fp, queue, skb, + bd_cons, bd_prod, + cqe_fp); + + /* Set Toeplitz hash for LRO skb */ + bnx2x_set_skb_rxhash(bp, cqe, skb); + + goto next_rx; + + } else { + u16 queue = + cqe->end_agg_cqe.queue_index; + DP(NETIF_MSG_RX_STATUS, + "calling tpa_stop on queue %d\n", + queue); + + bnx2x_tpa_stop(bp, fp, queue, + &cqe->end_agg_cqe, + comp_ring_cons); #ifdef BNX2X_STOP_ON_ERROR - if (bp->panic) - return 0; + if (bp->panic) + return 0; #endif - bnx2x_update_sge_prod(fp, cqe_fp); - goto next_cqe; + bnx2x_update_sge_prod(fp, cqe_fp); + goto next_cqe; + } } - } - /* non TPA */ - len = le16_to_cpu(cqe_fp->pkt_len); - pad = cqe_fp->placement_offset; - dma_sync_single_for_cpu(&bp->pdev->dev, + /* non TPA */ + len = le16_to_cpu(cqe_fp->pkt_len); + pad = cqe_fp->placement_offset; + dma_sync_single_for_cpu(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), - pad + RX_COPY_THRESH, - DMA_FROM_DEVICE); - pad += NET_SKB_PAD; - prefetch(data + pad); /* speedup eth_type_trans() */ - /* is this an error packet? */ - if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) { - DP(NETIF_MSG_RX_ERR, - "ERROR flags %x rx packet %u\n", - cqe_fp_flags, sw_comp_cons); - fp->eth_q_stats.rx_err_discard_pkt++; - goto reuse_rx; - } + pad + RX_COPY_THRESH, + DMA_FROM_DEVICE); + prefetch(((char *)(skb)) + L1_CACHE_BYTES); - /* Since we don't have a jumbo ring - * copy small packets if mtu > 1500 - */ - if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) && - (len <= RX_COPY_THRESH)) { - skb = netdev_alloc_skb_ip_align(bp->dev, len); - if (skb == NULL) { + /* is this an error packet? */ + if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) { DP(NETIF_MSG_RX_ERR, - "ERROR packet dropped because of alloc failure\n"); - fp->eth_q_stats.rx_skb_alloc_failed++; + "ERROR flags %x rx packet %u\n", + cqe_fp_flags, sw_comp_cons); + fp->eth_q_stats.rx_err_discard_pkt++; goto reuse_rx; } - memcpy(skb->data, data + pad, len); - bnx2x_reuse_rx_data(fp, bd_cons, bd_prod); - } else { - if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) { + + /* Since we don't have a jumbo ring + * copy small packets if mtu > 1500 + */ + if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) && + (len <= RX_COPY_THRESH)) { + struct sk_buff *new_skb; + + new_skb = netdev_alloc_skb(bp->dev, len + pad); + if (new_skb == NULL) { + DP(NETIF_MSG_RX_ERR, + "ERROR packet dropped " + "because of alloc failure\n"); + fp->eth_q_stats.rx_skb_alloc_failed++; + goto reuse_rx; + } + + /* aligned copy */ + skb_copy_from_linear_data_offset(skb, pad, + new_skb->data + pad, len); + skb_reserve(new_skb, pad); + skb_put(new_skb, len); + + bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod); + + skb = new_skb; + + } else + if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) { dma_unmap_single(&bp->pdev->dev, - dma_unmap_addr(rx_buf, mapping), + dma_unmap_addr(rx_buf, mapping), fp->rx_buf_size, DMA_FROM_DEVICE); - skb = build_skb(data); - if (unlikely(!skb)) { - kfree(data); - fp->eth_q_stats.rx_skb_alloc_failed++; - goto next_rx; - } skb_reserve(skb, pad); + skb_put(skb, len); + } else { DP(NETIF_MSG_RX_ERR, "ERROR packet dropped because " "of alloc failure\n"); fp->eth_q_stats.rx_skb_alloc_failed++; reuse_rx: - bnx2x_reuse_rx_data(fp, bd_cons, bd_prod); + bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod); goto next_rx; } - } - skb_put(skb, len); - skb->protocol = eth_type_trans(skb, bp->dev); + skb->protocol = eth_type_trans(skb, bp->dev); - /* Set Toeplitz hash for a none-LRO skb */ - skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp); + /* Set Toeplitz hash for a none-LRO skb */ + bnx2x_set_skb_rxhash(bp, cqe, skb); - skb_checksum_none_assert(skb); + skb_checksum_none_assert(skb); - if (bp->dev->features & NETIF_F_RXCSUM) { + if (bp->dev->features & NETIF_F_RXCSUM) { - if (likely(BNX2X_RX_CSUM_OK(cqe))) - skb->ip_summed = CHECKSUM_UNNECESSARY; - else - fp->eth_q_stats.hw_csum_err++; + if (likely(BNX2X_RX_CSUM_OK(cqe))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + fp->eth_q_stats.hw_csum_err++; + } } - skb_record_rx_queue(skb, fp->rx_queue); + skb_record_rx_queue(skb, fp->index); if (le16_to_cpu(cqe_fp->pars_flags.flags) & PARSING_FLAGS_VLAN) @@ -759,7 +765,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) next_rx: - rx_buf->data = NULL; + rx_buf->skb = NULL; bd_cons = NEXT_RX_IDX(bd_cons); bd_prod = NEXT_RX_IDX(bd_prod); @@ -1005,9 +1011,9 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) struct sw_rx_bd *first_buf = &tpa_info->first_buf; - first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, - GFP_ATOMIC); - if (!first_buf->data) { + first_buf->skb = netdev_alloc_skb(bp->dev, + fp->rx_buf_size); + if (!first_buf->skb) { BNX2X_ERR("Failed to allocate TPA " "skb pool for queue[%d] - " "disabling TPA on this " @@ -1087,18 +1093,16 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp) struct bnx2x_fastpath *fp = &bp->fp[i]; for_each_cos_in_tx_queue(fp, cos) { struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; - unsigned pkts_compl = 0, bytes_compl = 0; + u16 bd_cons = txdata->tx_bd_cons; u16 sw_prod = txdata->tx_pkt_prod; u16 sw_cons = txdata->tx_pkt_cons; while (sw_cons != sw_prod) { - bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons), - &pkts_compl, &bytes_compl); + bd_cons = bnx2x_free_tx_pkt(bp, txdata, + TX_BD(sw_cons)); sw_cons++; } - netdev_tx_reset_queue( - netdev_get_tx_queue(bp->dev, txdata->txq_index)); } } } @@ -1114,16 +1118,16 @@ static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp) for (i = 0; i < NUM_RX_BD; i++) { struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i]; - u8 *data = rx_buf->data; + struct sk_buff *skb = rx_buf->skb; - if (data == NULL) + if (skb == NULL) continue; dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), fp->rx_buf_size, DMA_FROM_DEVICE); - rx_buf->data = NULL; - kfree(data); + rx_buf->skb = NULL; + dev_kfree_skb(skb); } } @@ -1441,11 +1445,6 @@ void bnx2x_set_num_queues(struct bnx2x *bp) break; } -#ifdef BCM_CNIC - /* override in ISCSI SD mod */ - if (IS_MF_ISCSI_SD(bp)) - bp->num_queues = 1; -#endif /* Add special queues */ bp->num_queues += NON_ETH_CONTEXT_USE; } @@ -1510,7 +1509,6 @@ static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp) for_each_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; - u32 mtu; /* Always use a mini-jumbo MTU for the FCoE L2 ring */ if (IS_FCOE_IDX(i)) @@ -1520,15 +1518,13 @@ static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp) * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer * overrun attack. */ - mtu = BNX2X_FCOE_MINI_JUMBO_MTU; + fp->rx_buf_size = + BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD + + BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING; else - mtu = bp->dev->mtu; - fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START + - IP_HEADER_ALIGNMENT_PADDING + - ETH_OVREHEAD + - mtu + - BNX2X_FW_RX_ALIGN_END; - /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */ + fp->rx_buf_size = + bp->dev->mtu + ETH_OVREHEAD + + BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING; } } @@ -1545,8 +1541,7 @@ static inline int bnx2x_init_rss_pf(struct bnx2x *bp) if (bp->multi_mode != ETH_RSS_MODE_DISABLED) { for (i = 0; i < sizeof(ind_table); i++) ind_table[i] = - bp->fp->cl_id + - ethtool_rxfh_indir_default(i, num_eth_queues); + bp->fp->cl_id + (i % num_eth_queues); } /* @@ -1934,17 +1929,13 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) break; } - if (bp->port.pmf) - bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 0); - else + if (!bp->port.pmf) bnx2x__link_status_update(bp); /* start the timer */ mod_timer(&bp->timer, jiffies + bp->current_interval); #ifdef BCM_CNIC - /* re-read iscsi info */ - bnx2x_get_iscsi_info(bp); bnx2x_setup_cnic_irq_info(bp); if (bp->state == BNX2X_STATE_OPEN) bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); @@ -2808,7 +2799,6 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { - unsigned int pkts_compl = 0, bytes_compl = 0; DP(NETIF_MSG_TX_QUEUED, "Unable to map page - " "dropping packet...\n"); @@ -2820,8 +2810,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) */ first_bd->nbd = cpu_to_le16(nbd); bnx2x_free_tx_pkt(bp, txdata, - TX_BD(txdata->tx_pkt_prod), - &pkts_compl, &bytes_compl); + TX_BD(txdata->tx_pkt_prod)); return NETDEV_TX_OK; } @@ -2882,8 +2871,6 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) pbd_e2->parsing_data); DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod); - netdev_tx_sent_queue(txq, skb->len); - txdata->tx_pkt_prod++; /* * Make sure that the BD data is updated before updating the producer @@ -2994,14 +2981,9 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p) struct bnx2x *bp = netdev_priv(dev); int rc = 0; - if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) + if (!is_valid_ether_addr((u8 *)(addr->sa_data))) return -EINVAL; -#ifdef BCM_CNIC - if (IS_MF_ISCSI_SD(bp) && !is_zero_ether_addr(addr->sa_data)) - return -EINVAL; -#endif - if (netif_running(dev)) { rc = bnx2x_set_eth_mac(bp, false); if (rc) @@ -3116,12 +3098,7 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) u8 cos; int rx_ring_size = 0; -#ifdef BCM_CNIC - if (IS_MF_ISCSI_SD(bp)) { - rx_ring_size = MIN_RX_SIZE_NONTPA; - bp->rx_ring_size = rx_ring_size; - } else -#endif + /* if rx_ring_size specified - use it */ if (!bp->rx_ring_size) { rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); @@ -3131,7 +3108,7 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) MIN_RX_SIZE_TPA, rx_ring_size); bp->rx_ring_size = rx_ring_size; - } else /* if rx_ring_size specified - use it */ + } else rx_ring_size = bp->rx_ring_size; /* Common */ @@ -3301,14 +3278,14 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp) msix_table_size = bp->igu_sb_cnt + 1; /* fp array: RSS plus CNIC related L2 queues */ - fp = kcalloc(BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE, + fp = kzalloc((BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE) * sizeof(*fp), GFP_KERNEL); if (!fp) goto alloc_err; bp->fp = fp; /* msix table */ - tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL); + tbl = kzalloc(msix_table_size * sizeof(*tbl), GFP_KERNEL); if (!tbl) goto alloc_err; bp->msix_table = tbl; @@ -3432,8 +3409,7 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu) return bnx2x_reload_if_running(dev); } -netdev_features_t bnx2x_fix_features(struct net_device *dev, - netdev_features_t features) +u32 bnx2x_fix_features(struct net_device *dev, u32 features) { struct bnx2x *bp = netdev_priv(dev); @@ -3444,7 +3420,7 @@ netdev_features_t bnx2x_fix_features(struct net_device *dev, return features; } -int bnx2x_set_features(struct net_device *dev, netdev_features_t features) +int bnx2x_set_features(struct net_device *dev, u32 features) { struct bnx2x *bp = netdev_priv(dev); u32 flags = bp->flags; diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index bf27c54ff2e0..283d663da180 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -20,7 +20,6 @@ #include #include #include -#include #include "bnx2x.h" @@ -534,9 +533,8 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu); */ int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type); #endif -netdev_features_t bnx2x_fix_features(struct net_device *dev, - netdev_features_t features); -int bnx2x_set_features(struct net_device *dev, netdev_features_t features); +u32 bnx2x_fix_features(struct net_device *dev, u32 features); +int bnx2x_set_features(struct net_device *dev, u32 features); /** * bnx2x_tx_timeout - tx timeout netdev callback @@ -876,7 +874,8 @@ static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp) static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp) { /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */ - memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask)); + memset(fp->sge_mask, 0xff, + (NUM_RX_SGE >> BIT_VEC64_ELEM_SHIFT)*sizeof(u64)); /* Clear the two last indices in the page to 1: these are the indices that correspond to the "next" element, @@ -912,27 +911,26 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp, return 0; } -static inline int bnx2x_alloc_rx_data(struct bnx2x *bp, - struct bnx2x_fastpath *fp, u16 index) +static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp, + struct bnx2x_fastpath *fp, u16 index) { - u8 *data; + struct sk_buff *skb; struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index]; struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; dma_addr_t mapping; - data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC); - if (unlikely(data == NULL)) + skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size); + if (unlikely(skb == NULL)) return -ENOMEM; - mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD, - fp->rx_buf_size, + mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { - kfree(data); + dev_kfree_skb_any(skb); return -ENOMEM; } - rx_buf->data = data; + rx_buf->skb = skb; dma_unmap_addr_set(rx_buf, mapping, mapping); rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); @@ -941,12 +939,12 @@ static inline int bnx2x_alloc_rx_data(struct bnx2x *bp, return 0; } -/* note that we are not allocating a new buffer, +/* note that we are not allocating a new skb, * we are just moving one from cons to prod * we are not creating a new mapping, * so there is no need to check for dma_mapping_error(). */ -static inline void bnx2x_reuse_rx_data(struct bnx2x_fastpath *fp, +static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp, u16 cons, u16 prod) { struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; @@ -956,7 +954,7 @@ static inline void bnx2x_reuse_rx_data(struct bnx2x_fastpath *fp, dma_unmap_addr_set(prod_rx_buf, mapping, dma_unmap_addr(cons_rx_buf, mapping)); - prod_rx_buf->data = cons_rx_buf->data; + prod_rx_buf->skb = cons_rx_buf->skb; *prod_bd = *cons_bd; } @@ -1032,9 +1030,9 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, for (i = 0; i < last; i++) { struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i]; struct sw_rx_bd *first_buf = &tpa_info->first_buf; - u8 *data = first_buf->data; + struct sk_buff *skb = first_buf->skb; - if (data == NULL) { + if (skb == NULL) { DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i); continue; } @@ -1042,8 +1040,8 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(first_buf, mapping), fp->rx_buf_size, DMA_FROM_DEVICE); - kfree(data); - first_buf->data = NULL; + dev_kfree_skb(skb); + first_buf->skb = NULL; } } @@ -1151,7 +1149,7 @@ static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp, * fp->eth_q_stats.rx_skb_alloc_failed = 0 */ for (i = 0; i < rx_ring_size; i++) { - if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) { + if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) { fp->eth_q_stats.rx_skb_alloc_failed++; continue; } @@ -1320,7 +1318,6 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp) struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); unsigned long q_type = 0; - bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp); bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp, BNX2X_FCOE_ETH_CL_ID_IDX); /** Current BNX2X_FCOE_ETH_CID deffinition implies not more than @@ -1491,77 +1488,4 @@ static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg) return max_cfg; } -/** - * bnx2x_get_iscsi_info - update iSCSI params according to licensing info. - * - * @bp: driver handle - * - */ -void bnx2x_get_iscsi_info(struct bnx2x *bp); - -/* returns func by VN for current port */ -static inline int func_by_vn(struct bnx2x *bp, int vn) -{ - return 2 * vn + BP_PORT(bp); -} - -/** - * bnx2x_link_sync_notify - send notification to other functions. - * - * @bp: driver handle - * - */ -static inline void bnx2x_link_sync_notify(struct bnx2x *bp) -{ - int func; - int vn; - - /* Set the attention towards other drivers on the same port */ - for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { - if (vn == BP_VN(bp)) - continue; - - func = func_by_vn(bp, vn); - REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + - (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); - } -} - -/** - * bnx2x_update_drv_flags - update flags in shmem - * - * @bp: driver handle - * @flags: flags to update - * @set: set or clear - * - */ -static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set) -{ - if (SHMEM2_HAS(bp, drv_flags)) { - u32 drv_flags; - bnx2x_acquire_hw_lock(bp, HW_LOCK_DRV_FLAGS); - drv_flags = SHMEM2_RD(bp, drv_flags); - - if (set) - SET_FLAGS(drv_flags, flags); - else - RESET_FLAGS(drv_flags, flags); - - SHMEM2_WR(bp, drv_flags, drv_flags); - DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags); - bnx2x_release_hw_lock(bp, HW_LOCK_DRV_FLAGS); - } -} - -static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr) -{ - if (is_valid_ether_addr(addr)) - return true; -#ifdef BCM_CNIC - if (is_zero_ether_addr(addr) && IS_MF_ISCSI_SD(bp)) - return true; -#endif - return false; -} - #endif /* BNX2X_CMN_H */ diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index 5051cf3deb20..51bd7485ab18 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c @@ -685,6 +685,24 @@ int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall) } #endif +static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set) +{ + if (SHMEM2_HAS(bp, drv_flags)) { + u32 drv_flags; + bnx2x_acquire_hw_lock(bp, HW_LOCK_DRV_FLAGS); + drv_flags = SHMEM2_RD(bp, drv_flags); + + if (set) + SET_FLAGS(drv_flags, flags); + else + RESET_FLAGS(drv_flags, flags); + + SHMEM2_WR(bp, drv_flags, drv_flags); + DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags); + bnx2x_release_hw_lock(bp, HW_LOCK_DRV_FLAGS); + } +} + static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp) { u8 prio, cos; @@ -737,26 +755,18 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) /* mark DCBX result for PMF migration */ bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 1); #ifdef BCM_DCBNL - /* + /** * Add new app tlvs to dcbnl */ bnx2x_dcbnl_update_applist(bp, false); #endif - /* - * reconfigure the netdevice with the results of the new + bnx2x_dcbx_stop_hw_tx(bp); + + /* reconfigure the netdevice with the results of the new * dcbx negotiation. */ bnx2x_dcbx_update_tc_mapping(bp); - /* - * allow other funtions to update their netdevices - * accordingly - */ - if (IS_MF(bp)) - bnx2x_link_sync_notify(bp); - - bnx2x_dcbx_stop_hw_tx(bp); - return; } case BNX2X_DCBX_STATE_TX_PAUSED: @@ -765,7 +775,6 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) bnx2x_dcbx_update_ets_params(bp); bnx2x_dcbx_resume_hw_tx(bp); - return; case BNX2X_DCBX_STATE_TX_RELEASED: DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_RELEASED\n"); @@ -874,7 +883,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp, /*For IEEE admin_recommendation_bw_precentage *For IEEE admin_recommendation_ets_pg */ af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap; - for (i = 0; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) { + for (i = 0; i < 4; i++) { if (dp->admin_priority_app_table[i].valid) { struct bnx2x_admin_priority_app_table *table = dp->admin_priority_app_table; @@ -914,7 +923,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp, void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled) { - if (!CHIP_IS_E1x(bp)) { + if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3(bp)) { bp->dcb_state = dcb_on; bp->dcbx_enabled = dcbx_enabled; } else { @@ -1854,7 +1863,7 @@ static void bnx2x_dcbx_fw_struct(struct bnx2x *bp, void bnx2x_dcbx_pmf_update(struct bnx2x *bp) { /* if we need to syncronize DCBX result from prev PMF - * read it from shmem and update bp and netdev accordingly + * read it from shmem and update bp accordingly */ if (SHMEM2_HAS(bp, drv_flags) && GET_FLAGS(SHMEM2_RD(bp, drv_flags), DRV_FLAGS_DCB_CONFIGURED)) { @@ -1866,22 +1875,6 @@ void bnx2x_dcbx_pmf_update(struct bnx2x *bp) bp->dcbx_error); bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat, bp->dcbx_error); -#ifdef BCM_DCBNL - /* - * Add new app tlvs to dcbnl - */ - bnx2x_dcbnl_update_applist(bp, false); - /* - * Send a notification for the new negotiated parameters - */ - dcbnl_cee_notify(bp->dev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0); -#endif - /* - * reconfigure the netdevice with the results of the new - * dcbx negotiation. - */ - bnx2x_dcbx_update_tc_mapping(bp); - } } @@ -2249,7 +2242,7 @@ static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up) int i, ff; /* iterate over the app entries looking for idtype and idval */ - for (i = 0, ff = -1; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) { + for (i = 0, ff = -1; i < 4; i++) { struct bnx2x_admin_priority_app_table *app_ent = &bp->dcbx_config_params.admin_priority_app_table[i]; if (bnx2x_admin_app_is_equal(app_ent, idtype, idval)) @@ -2258,7 +2251,7 @@ static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up) if (ff < 0 && !app_ent->valid) ff = i; } - if (i < DCBX_CONFIG_MAX_APP_PROTOCOL) + if (i < 4) /* if found overwrite up */ bp->dcbx_config_params. admin_priority_app_table[i].priority = up; diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h index 2ab9254e2d5e..2c6a3bca6f28 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h @@ -90,7 +90,6 @@ struct bnx2x_admin_priority_app_table { u32 app_id; }; -#define DCBX_CONFIG_MAX_APP_PROTOCOL 4 struct bnx2x_config_dcbx_params { u32 overwrite_settings; u32 admin_dcbx_version; @@ -110,8 +109,7 @@ struct bnx2x_config_dcbx_params { u32 admin_recommendation_bw_precentage[8]; u32 admin_recommendation_ets_pg[8]; u32 admin_pfc_bitmap; - struct bnx2x_admin_priority_app_table - admin_priority_app_table[DCBX_CONFIG_MAX_APP_PROTOCOL]; + struct bnx2x_admin_priority_app_table admin_priority_app_table[4]; u32 admin_default_priority; }; diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index a688b9d975a2..f0ca8b27a55e 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -107,10 +107,6 @@ static const struct { 4, STATS_FLAGS_PORT, "rx_filtered_packets" }, { STATS_OFFSET32(mf_tag_discard), 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" }, - { STATS_OFFSET32(pfc_frames_received_hi), - 8, STATS_FLAGS_PORT, "pfc_frames_received" }, - { STATS_OFFSET32(pfc_frames_sent_hi), - 8, STATS_FLAGS_PORT, "pfc_frames_sent" }, { STATS_OFFSET32(brb_drop_hi), 8, STATS_FLAGS_PORT, "rx_brb_discard" }, { STATS_OFFSET32(brb_truncate_hi), @@ -356,7 +352,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) DP(NETIF_MSG_LINK, "Unsupported port type\n"); return -EINVAL; } - /* Save new config in case command complete successully */ + /* Save new config in case command complete successuly */ new_multi_phy_config = bp->link_params.multi_phy_config; /* Get the new cfg_idx */ cfg_idx = bnx2x_get_link_cfg_idx(bp); @@ -765,8 +761,8 @@ static void bnx2x_get_drvinfo(struct net_device *dev, struct bnx2x *bp = netdev_priv(dev); u8 phy_fw_ver[PHY_FW_VER_LEN]; - strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); + strcpy(info->driver, DRV_MODULE_NAME); + strcpy(info->version, DRV_MODULE_VERSION); phy_fw_ver[0] = '\0'; if (bp->port.pmf) { @@ -777,14 +773,14 @@ static void bnx2x_get_drvinfo(struct net_device *dev, bnx2x_release_phy_lock(bp); } - strlcpy(info->fw_version, bp->fw_ver, sizeof(info->fw_version)); + strncpy(info->fw_version, bp->fw_ver, 32); snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver), "bc %d.%d.%d%s%s", (bp->common.bc_ver & 0xff0000) >> 16, (bp->common.bc_ver & 0xff00) >> 8, (bp->common.bc_ver & 0xff), ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver); - strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); + strcpy(info->bus_info, pci_name(bp->pdev)); info->n_stats = BNX2X_NUM_STATS; info->testinfo_len = BNX2X_NUM_TESTS; info->eedump_len = bp->common.flash_size; @@ -1744,8 +1740,6 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) struct sw_rx_bd *rx_buf; u16 len; int rc = -ENODEV; - u8 *data; - struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txdata->txq_index); /* check the loopback mode */ switch (loopback_mode) { @@ -1754,18 +1748,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) return -EINVAL; break; case BNX2X_MAC_LOOPBACK: - if (CHIP_IS_E3(bp)) { - int cfg_idx = bnx2x_get_link_cfg_idx(bp); - if (bp->port.supported[cfg_idx] & - (SUPPORTED_10000baseT_Full | - SUPPORTED_20000baseMLD2_Full | - SUPPORTED_20000baseKR2_Full)) - bp->link_params.loopback_mode = LOOPBACK_XMAC; - else - bp->link_params.loopback_mode = LOOPBACK_UMAC; - } else - bp->link_params.loopback_mode = LOOPBACK_BMAC; - + bp->link_params.loopback_mode = CHIP_IS_E3(bp) ? + LOOPBACK_XMAC : LOOPBACK_BMAC; bnx2x_phy_init(&bp->link_params, &bp->link_vars); break; default: @@ -1800,8 +1784,6 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) tx_start_idx = le16_to_cpu(*txdata->tx_cons_sb); rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb); - netdev_tx_sent_queue(txq, skb->len); - pkt_prod = txdata->tx_pkt_prod++; tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)]; tx_buf->first_bd = txdata->tx_bd_prod; @@ -1883,9 +1865,10 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) dma_sync_single_for_cpu(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), fp_rx->rx_buf_size, DMA_FROM_DEVICE); - data = rx_buf->data + NET_SKB_PAD + cqe->fast_path_cqe.placement_offset; + skb = rx_buf->skb; + skb_reserve(skb, cqe->fast_path_cqe.placement_offset); for (i = ETH_HLEN; i < pkt_size; i++) - if (*(data + i) != (unsigned char) (i & 0xff)) + if (*(skb->data + i) != (unsigned char) (i & 0xff)) goto test_loopback_rx_exit; rc = 0; @@ -2302,20 +2285,18 @@ static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, } } -static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev) -{ - struct bnx2x *bp = netdev_priv(dev); - - return (bp->multi_mode == ETH_RSS_MODE_DISABLED ? - 0 : T_ETH_INDIRECTION_TABLE_SIZE); -} - -static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir) +static int bnx2x_get_rxfh_indir(struct net_device *dev, + struct ethtool_rxfh_indir *indir) { struct bnx2x *bp = netdev_priv(dev); + size_t copy_size = + min_t(size_t, indir->size, T_ETH_INDIRECTION_TABLE_SIZE); u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; size_t i; + if (bp->multi_mode == ETH_RSS_MODE_DISABLED) + return -EOPNOTSUPP; + /* Get the current configuration of the RSS indirection table */ bnx2x_get_rss_ind_table(&bp->rss_conf_obj, ind_table); @@ -2328,19 +2309,33 @@ static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir) * align the returned table to the Client ID of the leading RSS * queue. */ - for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) - indir[i] = ind_table[i] - bp->fp->cl_id; + for (i = 0; i < copy_size; i++) + indir->ring_index[i] = ind_table[i] - bp->fp->cl_id; + + indir->size = T_ETH_INDIRECTION_TABLE_SIZE; return 0; } -static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir) +static int bnx2x_set_rxfh_indir(struct net_device *dev, + const struct ethtool_rxfh_indir *indir) { struct bnx2x *bp = netdev_priv(dev); size_t i; u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; + u32 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); + + if (bp->multi_mode == ETH_RSS_MODE_DISABLED) + return -EOPNOTSUPP; + + /* validate the size */ + if (indir->size != T_ETH_INDIRECTION_TABLE_SIZE) + return -EINVAL; for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { + /* validate the indices */ + if (indir->ring_index[i] >= num_eth_queues) + return -EINVAL; /* * The same as in bnx2x_get_rxfh_indir: we can't use a memcpy() * as an internal storage of an indirection table is a u8 array @@ -2350,7 +2345,7 @@ static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir) * align the received table to the Client ID of the leading RSS * queue */ - ind_table[i] = indir[i] + bp->fp->cl_id; + ind_table[i] = indir->ring_index[i] + bp->fp->cl_id; } return bnx2x_config_rss_pf(bp, ind_table, false); @@ -2383,7 +2378,6 @@ static const struct ethtool_ops bnx2x_ethtool_ops = { .set_phys_id = bnx2x_set_phys_id, .get_ethtool_stats = bnx2x_get_ethtool_stats, .get_rxnfc = bnx2x_get_rxnfc, - .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size, .get_rxfh_indir = bnx2x_get_rxfh_indir, .set_rxfh_indir = bnx2x_set_rxfh_indir, }; diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 3e30c8642c26..fc754cb6cc0f 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -1247,14 +1247,11 @@ struct drv_func_mb { #define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000 #define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234 #define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014 - #define REQ_BC_VER_4_PFC_STATS_SUPPORTED 0x00070201 #define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000 #define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000 #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000 - #define DRV_MSG_CODE_DRV_INFO_ACK 0xd8000000 - #define DRV_MSG_CODE_DRV_INFO_NACK 0xd9000000 #define DRV_MSG_CODE_SET_MF_BW 0xe0000000 #define REQ_BC_VER_4_SET_MF_BW 0x00060202 @@ -1307,8 +1304,6 @@ struct drv_func_mb { #define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG 0xa0200000 #define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED 0xa0300000 #define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000 - #define FW_MSG_CODE_DRV_INFO_ACK 0xd8100000 - #define FW_MSG_CODE_DRV_INFO_NACK 0xd9100000 #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000 #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000 @@ -1365,7 +1360,6 @@ struct drv_func_mb { #define DRV_STATUS_DCBX_EVENT_MASK 0x000f0000 #define DRV_STATUS_DCBX_NEGOTIATION_RESULTS 0x00010000 - #define DRV_STATUS_DRV_INFO_REQ 0x04000000 u32 virt_mac_upper; #define VIRT_MAC_SIGN_MASK 0xffff0000 @@ -1970,38 +1964,9 @@ struct shmem2_region { u32 extended_dev_info_shared_addr; u32 ncsi_oem_data_addr; - u32 ocsd_host_addr; /* initialized by option ROM */ - u32 ocbb_host_addr; /* initialized by option ROM */ - u32 ocsd_req_update_interval; /* initialized by option ROM */ - u32 temperature_in_half_celsius; - u32 glob_struct_in_host; - - u32 dcbx_neg_res_ext_offset; -#define SHMEM_DCBX_NEG_RES_EXT_NONE 0x00000000 - - u32 drv_capabilities_flag[E2_FUNC_MAX]; -#define DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED 0x00000001 -#define DRV_FLAGS_CAPABILITIES_LOADED_L2 0x00000002 -#define DRV_FLAGS_CAPABILITIES_LOADED_FCOE 0x00000004 -#define DRV_FLAGS_CAPABILITIES_LOADED_ISCSI 0x00000008 - - u32 extended_dev_info_shared_cfg_size; - - u32 dcbx_en[PORT_MAX]; - - /* The offset points to the multi threaded meta structure */ - u32 multi_thread_data_offset; - - /* address of DMAable host address holding values from the drivers */ - u32 drv_info_host_addr_lo; - u32 drv_info_host_addr_hi; - - /* general values written by the MFW (such as current version) */ - u32 drv_info_control; -#define DRV_INFO_CONTROL_VER_MASK 0x000000ff -#define DRV_INFO_CONTROL_VER_SHIFT 0 -#define DRV_INFO_CONTROL_OP_CODE_MASK 0x0000ff00 -#define DRV_INFO_CONTROL_OP_CODE_SHIFT 8 + u32 ocsd_host_addr; + u32 ocbb_host_addr; + u32 ocsd_req_update_interval; }; @@ -2536,18 +2501,14 @@ struct mac_stx { #define MAC_STX_IDX_MAX 2 struct host_port_stats { - u32 host_port_stats_counter; + u32 host_port_stats_start; struct mac_stx mac_stx[MAC_STX_IDX_MAX]; u32 brb_drop_hi; u32 brb_drop_lo; - u32 not_used; /* obsolete */ - u32 pfc_frames_tx_hi; - u32 pfc_frames_tx_lo; - u32 pfc_frames_rx_hi; - u32 pfc_frames_rx_lo; + u32 host_port_stats_end; }; @@ -2587,118 +2548,6 @@ struct host_func_stats { /* VIC definitions */ #define VICSTATST_UIF_INDEX 2 -/* current drv_info version */ -#define DRV_INFO_CUR_VER 1 - -/* drv_info op codes supported */ -enum drv_info_opcode { - ETH_STATS_OPCODE, - FCOE_STATS_OPCODE, - ISCSI_STATS_OPCODE -}; - -#define ETH_STAT_INFO_VERSION_LEN 12 -/* Per PCI Function Ethernet Statistics required from the driver */ -struct eth_stats_info { - /* Function's Driver Version. padded to 12 */ - u8 version[ETH_STAT_INFO_VERSION_LEN]; - /* Locally Admin Addr. BigEndian EIU48. Actual size is 6 bytes */ - u8 mac_local[8]; - u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */ - u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */ - u32 mtu_size; /* MTU Size. Note : Negotiated MTU */ - u32 feature_flags; /* Feature_Flags. */ -#define FEATURE_ETH_CHKSUM_OFFLOAD_MASK 0x01 -#define FEATURE_ETH_LSO_MASK 0x02 -#define FEATURE_ETH_BOOTMODE_MASK 0x1C -#define FEATURE_ETH_BOOTMODE_SHIFT 2 -#define FEATURE_ETH_BOOTMODE_NONE (0x0 << 2) -#define FEATURE_ETH_BOOTMODE_PXE (0x1 << 2) -#define FEATURE_ETH_BOOTMODE_ISCSI (0x2 << 2) -#define FEATURE_ETH_BOOTMODE_FCOE (0x3 << 2) -#define FEATURE_ETH_TOE_MASK 0x20 - u32 lso_max_size; /* LSO MaxOffloadSize. */ - u32 lso_min_seg_cnt; /* LSO MinSegmentCount. */ - /* Num Offloaded Connections TCP_IPv4. */ - u32 ipv4_ofld_cnt; - /* Num Offloaded Connections TCP_IPv6. */ - u32 ipv6_ofld_cnt; - u32 promiscuous_mode; /* Promiscuous Mode. non-zero true */ - u32 txq_size; /* TX Descriptors Queue Size */ - u32 rxq_size; /* RX Descriptors Queue Size */ - /* TX Descriptor Queue Avg Depth. % Avg Queue Depth since last poll */ - u32 txq_avg_depth; - /* RX Descriptors Queue Avg Depth. % Avg Queue Depth since last poll */ - u32 rxq_avg_depth; - /* IOV_Offload. 0=none; 1=MultiQueue, 2=VEB 3= VEPA*/ - u32 iov_offload; - /* Number of NetQueue/VMQ Config'd. */ - u32 netq_cnt; - u32 vf_cnt; /* Num VF assigned to this PF. */ -}; - -/* Per PCI Function FCOE Statistics required from the driver */ -struct fcoe_stats_info { - u8 version[12]; /* Function's Driver Version. */ - u8 mac_local[8]; /* Locally Admin Addr. */ - u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */ - u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */ - /* QoS Priority (per 802.1p). 0-7255 */ - u32 qos_priority; - u32 txq_size; /* FCoE TX Descriptors Queue Size. */ - u32 rxq_size; /* FCoE RX Descriptors Queue Size. */ - /* FCoE TX Descriptor Queue Avg Depth. */ - u32 txq_avg_depth; - /* FCoE RX Descriptors Queue Avg Depth. */ - u32 rxq_avg_depth; - u32 rx_frames_lo; /* FCoE RX Frames received. */ - u32 rx_frames_hi; /* FCoE RX Frames received. */ - u32 rx_bytes_lo; /* FCoE RX Bytes received. */ - u32 rx_bytes_hi; /* FCoE RX Bytes received. */ - u32 tx_frames_lo; /* FCoE TX Frames sent. */ - u32 tx_frames_hi; /* FCoE TX Frames sent. */ - u32 tx_bytes_lo; /* FCoE TX Bytes sent. */ - u32 tx_bytes_hi; /* FCoE TX Bytes sent. */ -}; - -/* Per PCI Function iSCSI Statistics required from the driver*/ -struct iscsi_stats_info { - u8 version[12]; /* Function's Driver Version. */ - u8 mac_local[8]; /* Locally Admin iSCSI MAC Addr. */ - u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */ - /* QoS Priority (per 802.1p). 0-7255 */ - u32 qos_priority; - u8 initiator_name[64]; /* iSCSI Boot Initiator Node name. */ - u8 ww_port_name[64]; /* iSCSI World wide port name */ - u8 boot_target_name[64];/* iSCSI Boot Target Name. */ - u8 boot_target_ip[16]; /* iSCSI Boot Target IP. */ - u32 boot_target_portal; /* iSCSI Boot Target Portal. */ - u8 boot_init_ip[16]; /* iSCSI Boot Initiator IP Address. */ - u32 max_frame_size; /* Max Frame Size. bytes */ - u32 txq_size; /* PDU TX Descriptors Queue Size. */ - u32 rxq_size; /* PDU RX Descriptors Queue Size. */ - u32 txq_avg_depth; /* PDU TX Descriptor Queue Avg Depth. */ - u32 rxq_avg_depth; /* PDU RX Descriptors Queue Avg Depth. */ - u32 rx_pdus_lo; /* iSCSI PDUs received. */ - u32 rx_pdus_hi; /* iSCSI PDUs received. */ - u32 rx_bytes_lo; /* iSCSI RX Bytes received. */ - u32 rx_bytes_hi; /* iSCSI RX Bytes received. */ - u32 tx_pdus_lo; /* iSCSI PDUs sent. */ - u32 tx_pdus_hi; /* iSCSI PDUs sent. */ - u32 tx_bytes_lo; /* iSCSI PDU TX Bytes sent. */ - u32 tx_bytes_hi; /* iSCSI PDU TX Bytes sent. */ - u32 pcp_prior_map_tbl; /* C-PCP to S-PCP Priority MapTable. - * 9 nibbles, the position of each nibble - * represents the C-PCP value, the value - * of the nibble = S-PCP value. - */ -}; - -union drv_info_to_mcp { - struct eth_stats_info ether_stat; - struct fcoe_stats_info fcoe_stat; - struct iscsi_stats_info iscsi_stat; -}; #define BCM_5710_FW_MAJOR_VERSION 7 #define BCM_5710_FW_MINOR_VERSION 0 #define BCM_5710_FW_REVISION_VERSION 29 @@ -4311,63 +4160,9 @@ struct ustorm_eth_rx_producers { }; -/* - * FCoE RX statistics parameters section#0 - */ -struct fcoe_rx_stat_params_section0 { - __le32 fcoe_rx_pkt_cnt; - __le32 fcoe_rx_byte_cnt; -}; - - -/* - * FCoE RX statistics parameters section#1 - */ -struct fcoe_rx_stat_params_section1 { - __le32 fcoe_ver_cnt; - __le32 fcoe_rx_drop_pkt_cnt; -}; - - -/* - * FCoE RX statistics parameters section#2 - */ -struct fcoe_rx_stat_params_section2 { - __le32 fc_crc_cnt; - __le32 eofa_del_cnt; - __le32 miss_frame_cnt; - __le32 seq_timeout_cnt; - __le32 drop_seq_cnt; - __le32 fcoe_rx_drop_pkt_cnt; - __le32 fcp_rx_pkt_cnt; - __le32 reserved0; -}; - - -/* - * FCoE TX statistics parameters - */ -struct fcoe_tx_stat_params { - __le32 fcoe_tx_pkt_cnt; - __le32 fcoe_tx_byte_cnt; - __le32 fcp_tx_pkt_cnt; - __le32 reserved0; -}; - -/* - * FCoE statistics parameters - */ -struct fcoe_statistics_params { - struct fcoe_tx_stat_params tx_stat; - struct fcoe_rx_stat_params_section0 rx_stat0; - struct fcoe_rx_stat_params_section1 rx_stat1; - struct fcoe_rx_stat_params_section2 rx_stat2; -}; - - /* * cfc delete event data -*/ + */ struct cfc_del_event_data { u32 cid; u32 reserved0; diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 4df9505b67b6..882f48f0a03c 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -27,6 +27,7 @@ #include "bnx2x.h" #include "bnx2x_cmn.h" + /********************************************************/ #define ETH_HLEN 14 /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */ @@ -162,11 +163,6 @@ #define EDC_MODE_LIMITING 0x0044 #define EDC_MODE_PASSIVE_DAC 0x0055 -/* BRB default for class 0 E2 */ -#define DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR 170 -#define DEFAULT0_E2_BRB_MAC_PAUSE_XON_THR 250 -#define DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR 10 -#define DEFAULT0_E2_BRB_MAC_FULL_XON_THR 50 /* BRB thresholds for E2*/ #define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE 170 @@ -181,12 +177,6 @@ #define PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE 50 #define PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE 250 -/* BRB default for class 0 E3A0 */ -#define DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR 290 -#define DEFAULT0_E3A0_BRB_MAC_PAUSE_XON_THR 410 -#define DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR 10 -#define DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR 50 - /* BRB thresholds for E3A0 */ #define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE 290 #define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0 @@ -200,11 +190,6 @@ #define PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE 50 #define PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE 410 -/* BRB default for E3B0 */ -#define DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR 330 -#define DEFAULT0_E3B0_BRB_MAC_PAUSE_XON_THR 490 -#define DEFAULT0_E3B0_BRB_MAC_FULL_XOFF_THR 15 -#define DEFAULT0_E3B0_BRB_MAC_FULL_XON_THR 55 /* BRB thresholds for E3B0 2 port mode*/ #define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE 1025 @@ -254,29 +239,18 @@ #define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE 50 #define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE 384 + /* only for E3B0*/ #define PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR 304 #define PFC_E3B0_4P_BRB_FULL_LB_XON_THR 384 -#define PFC_E3B0_4P_LB_GUART 120 +#define PFC_E3B0_4P_LB_GUART 120 #define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART 120 -#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST 80 +#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST 80 #define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART 80 -#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST 120 - -/* Pause defines*/ -#define DEFAULT_E3B0_BRB_FULL_LB_XOFF_THR 330 -#define DEFAULT_E3B0_BRB_FULL_LB_XON_THR 490 -#define DEFAULT_E3B0_LB_GUART 40 - -#define DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART 40 -#define DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART_HYST 0 - -#define DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART 40 -#define DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART_HYST 0 +#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST 120 -/* ETS defines*/ #define DCBX_INVALID_COS (0xFF) #define ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000) @@ -466,7 +440,7 @@ static u32 bnx2x_ets_get_min_w_val_nig(const struct link_vars *vars) u32 min_w_val = 0; /* Calculate min_w_val.*/ if (vars->link_up) { - if (vars->line_speed == SPEED_20000) + if (SPEED_20000 == vars->line_speed) min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS; else min_w_val = ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS; @@ -516,7 +490,7 @@ static void bnx2x_ets_e3b0_set_credit_upper_bound_nig( REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 : NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5, credit_upper_bound); - if (!port) { + if (0 == port) { REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6, credit_upper_bound); REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7, @@ -610,7 +584,7 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4, 0x0); REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 : NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5, 0x0); - if (!port) { + if (0 == port) { REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6, 0x0); REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7, 0x0); REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8, 0x0); @@ -638,7 +612,7 @@ static void bnx2x_ets_e3b0_set_credit_upper_bound_pbf( * In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4 * port mode port1 has COS0-2 that can be used for WFQ. */ - if (!port) { + if (0 == port) { base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0; max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0; } else { @@ -700,7 +674,7 @@ static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params) * In 2 port mode port0 has COS0-5 that can be used for WFQ. * In 4 port mode port1 has COS0-2 that can be used for WFQ. */ - if (!port) { + if (0 == port) { base_weight = PBF_REG_COS0_WEIGHT_P0; max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0; } else { @@ -872,47 +846,34 @@ static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp, ******************************************************************************/ static int bnx2x_ets_e3b0_get_total_bw( const struct link_params *params, - struct bnx2x_ets_params *ets_params, + const struct bnx2x_ets_params *ets_params, u16 *total_bw) { struct bnx2x *bp = params->bp; u8 cos_idx = 0; - u8 is_bw_cos_exist = 0; *total_bw = 0 ; - /* Calculate total BW requested */ for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) { - if (ets_params->cos[cos_idx].state == bnx2x_cos_state_bw) { - is_bw_cos_exist = 1; - if (!ets_params->cos[cos_idx].params.bw_params.bw) { - DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW" - "was set to 0\n"); - /* - * This is to prevent a state when ramrods - * can't be sent - */ - ets_params->cos[cos_idx].params.bw_params.bw - = 1; - } + if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) { *total_bw += ets_params->cos[cos_idx].params.bw_params.bw; } } /* Check total BW is valid */ - if ((is_bw_cos_exist == 1) && (*total_bw != 100)) { - if (*total_bw == 0) { + if ((100 != *total_bw) || (0 == *total_bw)) { + if (0 == *total_bw) { DP(NETIF_MSG_LINK, - "bnx2x_ets_E3B0_config total BW shouldn't be 0\n"); + "bnx2x_ets_E3B0_config toatl BW shouldn't be 0\n"); return -EINVAL; } DP(NETIF_MSG_LINK, - "bnx2x_ets_E3B0_config total BW should be 100\n"); - /* - * We can handle a case whre the BW isn't 100 this can happen - * if the TC are joined. - */ + "bnx2x_ets_E3B0_config toatl BW should be 100\n"); + /** + * We can handle a case whre the BW isn't 100 this can happen + * if the TC are joined. + */ } return 0; } @@ -943,7 +904,7 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params, const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 : DCBX_E3B0_MAX_NUM_COS_PORT0; - if (sp_pri_to_cos[pri] != DCBX_INVALID_COS) { + if (DCBX_INVALID_COS != sp_pri_to_cos[pri]) { DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid " "parameter There can't be two COS's with " "the same strict pri\n"); @@ -952,7 +913,7 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params, if (pri > max_num_of_cos) { DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid " - "parameter Illegal strict priority\n"); + "parameter Illegal strict priority\n"); return -EINVAL; } @@ -1034,8 +995,8 @@ static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params, /* Set all the strict priority first */ for (i = 0; i < max_num_of_cos; i++) { - if (sp_pri_to_cos[i] != DCBX_INVALID_COS) { - if (sp_pri_to_cos[i] >= DCBX_MAX_NUM_COS) { + if (DCBX_INVALID_COS != sp_pri_to_cos[i]) { + if (DCBX_MAX_NUM_COS <= sp_pri_to_cos[i]) { DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_set_pri_cli_reg " "invalid cos entry\n"); @@ -1049,7 +1010,7 @@ static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params, sp_pri_to_cos[i], pri_set); pri_bitmask = 1 << sp_pri_to_cos[i]; /* COS is used remove it from bitmap.*/ - if (!(pri_bitmask & cos_bit_to_set)) { + if (0 == (pri_bitmask & cos_bit_to_set)) { DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_set_pri_cli_reg " "invalid There can't be two COS's with" @@ -1111,7 +1072,7 @@ static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params, ******************************************************************************/ int bnx2x_ets_e3b0_config(const struct link_params *params, const struct link_vars *vars, - struct bnx2x_ets_params *ets_params) + const struct bnx2x_ets_params *ets_params) { struct bnx2x *bp = params->bp; int bnx2x_status = 0; @@ -1144,15 +1105,15 @@ int bnx2x_ets_e3b0_config(const struct link_params *params, /* Prepare BW parameters*/ bnx2x_status = bnx2x_ets_e3b0_get_total_bw(params, ets_params, &total_bw); - if (bnx2x_status) { + if (0 != bnx2x_status) { DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config get_total_bw failed\n"); return -EINVAL; } - /* - * Upper bound is set according to current link speed (min_w_val - * should be the same for upper bound and COS credit val). + /** + * Upper bound is set according to current link speed (min_w_val + * should be the same for upper bound and COS credit val). */ bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig); bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf); @@ -1161,7 +1122,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params, for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) { if (bnx2x_cos_state_bw == ets_params->cos[cos_entry].state) { cos_bw_bitmap |= (1 << cos_entry); - /* + /** * The function also sets the BW in HW(not the mappin * yet) */ @@ -1185,7 +1146,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params, "bnx2x_ets_e3b0_config cos state not valid\n"); return -EINVAL; } - if (bnx2x_status) { + if (0 != bnx2x_status) { DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_config set cos bw failed\n"); return bnx2x_status; @@ -1196,7 +1157,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params, bnx2x_status = bnx2x_ets_e3b0_sp_set_pri_cli_reg(params, sp_pri_to_cos); - if (bnx2x_status) { + if (0 != bnx2x_status) { DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config set_pri_cli_reg failed\n"); return bnx2x_status; @@ -1207,7 +1168,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params, cos_sp_bitmap, cos_bw_bitmap); - if (bnx2x_status) { + if (0 != bnx2x_status) { DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config SP failed\n"); return bnx2x_status; } @@ -1271,9 +1232,9 @@ void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw, DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n"); - if ((!total_bw) || - (!cos0_bw) || - (!cos1_bw)) { + if ((0 == total_bw) || + (0 == cos0_bw) || + (0 == cos1_bw)) { DP(NETIF_MSG_LINK, "Total BW can't be zero\n"); return; } @@ -1329,7 +1290,7 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos) * dbg0-010 dbg1-001 cos1-100 cos0-011 MCP-000 * dbg0-010 dbg1-001 cos0-011 cos1-100 MCP-000 */ - val = (!strict_cos) ? 0x2318 : 0x22E0; + val = (0 == strict_cos) ? 0x2318 : 0x22E0; REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val); return 0; @@ -1337,6 +1298,7 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos) /******************************************************************/ /* PFC section */ /******************************************************************/ + static void bnx2x_update_pfc_xmac(struct link_params *params, struct link_vars *vars, u8 is_lb) @@ -1439,7 +1401,7 @@ void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars, if (!vars->link_up) return; - if (vars->mac_type == MAC_TYPE_EMAC) { + if (MAC_TYPE_EMAC == vars->mac_type) { DP(NETIF_MSG_LINK, "About to read PFC stats from EMAC\n"); bnx2x_emac_get_pfc_stat(params, pfc_frames_sent, pfc_frames_received); @@ -1473,18 +1435,6 @@ static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id, u8 port) udelay(40); } -static u8 bnx2x_is_4_port_mode(struct bnx2x *bp) -{ - u32 port4mode_ovwr_val; - /* Check 4-port override enabled */ - port4mode_ovwr_val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR); - if (port4mode_ovwr_val & (1<<0)) { - /* Return 4-port mode override value */ - return ((port4mode_ovwr_val & (1<<1)) == (1<<1)); - } - /* Return 4-port mode from input pin */ - return (u8)REG_RD(bp, MISC_REG_PORT4MODE_EN); -} static void bnx2x_emac_init(struct link_params *params, struct link_vars *vars) @@ -1651,18 +1601,31 @@ static void bnx2x_umac_enable(struct link_params *params, } +static u8 bnx2x_is_4_port_mode(struct bnx2x *bp) +{ + u32 port4mode_ovwr_val; + /* Check 4-port override enabled */ + port4mode_ovwr_val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR); + if (port4mode_ovwr_val & (1<<0)) { + /* Return 4-port mode override value */ + return ((port4mode_ovwr_val & (1<<1)) == (1<<1)); + } + /* Return 4-port mode from input pin */ + return (u8)REG_RD(bp, MISC_REG_PORT4MODE_EN); +} + /* Define the XMAC mode */ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed) { struct bnx2x *bp = params->bp; u32 is_port4mode = bnx2x_is_4_port_mode(bp); - /* - * In 4-port mode, need to set the mode only once, so if XMAC is - * already out of reset, it means the mode has already been set, - * and it must not* reset the XMAC again, since it controls both - * ports of the path - */ + /** + * In 4-port mode, need to set the mode only once, so if XMAC is + * already out of reset, it means the mode has already been set, + * and it must not* reset the XMAC again, since it controls both + * ports of the path + **/ if ((CHIP_NUM(bp) == CHIP_NUM_57840) && (REG_RD(bp, MISC_REG_RESET_REG_2) & @@ -1780,7 +1743,6 @@ static int bnx2x_xmac_enable(struct link_params *params, return 0; } - static int bnx2x_emac_enable(struct link_params *params, struct link_vars *vars, u8 lb) { @@ -2037,6 +1999,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params, REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2); } + /* PFC BRB internal port configuration params */ struct bnx2x_pfc_brb_threshold_val { u32 pause_xoff; @@ -2046,8 +2009,6 @@ struct bnx2x_pfc_brb_threshold_val { }; struct bnx2x_pfc_brb_e3b0_val { - u32 per_class_guaranty_mode; - u32 lb_guarantied_hyst; u32 full_lb_xoff_th; u32 full_lb_xon_threshold; u32 lb_guarantied; @@ -2060,9 +2021,6 @@ struct bnx2x_pfc_brb_e3b0_val { struct bnx2x_pfc_brb_th_val { struct bnx2x_pfc_brb_threshold_val pauseable_th; struct bnx2x_pfc_brb_threshold_val non_pauseable_th; - struct bnx2x_pfc_brb_threshold_val default_class0; - struct bnx2x_pfc_brb_threshold_val default_class1; - }; static int bnx2x_pfc_brb_get_config_params( struct link_params *params, @@ -2070,200 +2028,140 @@ static int bnx2x_pfc_brb_get_config_params( { struct bnx2x *bp = params->bp; DP(NETIF_MSG_LINK, "Setting PFC BRB configuration\n"); - - config_val->default_class1.pause_xoff = 0; - config_val->default_class1.pause_xon = 0; - config_val->default_class1.full_xoff = 0; - config_val->default_class1.full_xon = 0; - if (CHIP_IS_E2(bp)) { - /* class0 defaults */ - config_val->default_class0.pause_xoff = - DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR; - config_val->default_class0.pause_xon = - DEFAULT0_E2_BRB_MAC_PAUSE_XON_THR; - config_val->default_class0.full_xoff = - DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR; - config_val->default_class0.full_xon = - DEFAULT0_E2_BRB_MAC_FULL_XON_THR; - /* pause able*/ config_val->pauseable_th.pause_xoff = - PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE; + PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE; config_val->pauseable_th.pause_xon = - PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE; + PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE; config_val->pauseable_th.full_xoff = - PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE; + PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE; config_val->pauseable_th.full_xon = - PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE; + PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE; /* non pause able*/ config_val->non_pauseable_th.pause_xoff = - PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; + PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; config_val->non_pauseable_th.pause_xon = - PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE; + PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE; config_val->non_pauseable_th.full_xoff = - PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE; + PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE; config_val->non_pauseable_th.full_xon = - PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE; + PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE; } else if (CHIP_IS_E3A0(bp)) { - /* class0 defaults */ - config_val->default_class0.pause_xoff = - DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR; - config_val->default_class0.pause_xon = - DEFAULT0_E3A0_BRB_MAC_PAUSE_XON_THR; - config_val->default_class0.full_xoff = - DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR; - config_val->default_class0.full_xon = - DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR; - /* pause able */ config_val->pauseable_th.pause_xoff = - PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE; + PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE; config_val->pauseable_th.pause_xon = - PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE; + PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE; config_val->pauseable_th.full_xoff = - PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE; + PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE; config_val->pauseable_th.full_xon = - PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE; + PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE; /* non pause able*/ config_val->non_pauseable_th.pause_xoff = - PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; + PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; config_val->non_pauseable_th.pause_xon = - PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE; + PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE; config_val->non_pauseable_th.full_xoff = - PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE; + PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE; config_val->non_pauseable_th.full_xon = - PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE; + PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE; } else if (CHIP_IS_E3B0(bp)) { - /* class0 defaults */ - config_val->default_class0.pause_xoff = - DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR; - config_val->default_class0.pause_xon = - DEFAULT0_E3B0_BRB_MAC_PAUSE_XON_THR; - config_val->default_class0.full_xoff = - DEFAULT0_E3B0_BRB_MAC_FULL_XOFF_THR; - config_val->default_class0.full_xon = - DEFAULT0_E3B0_BRB_MAC_FULL_XON_THR; - if (params->phy[INT_PHY].flags & FLAGS_4_PORT_MODE) { config_val->pauseable_th.pause_xoff = - PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE; - config_val->pauseable_th.pause_xon = - PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE; - config_val->pauseable_th.full_xoff = - PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE; - config_val->pauseable_th.full_xon = - PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE; - /* non pause able*/ - config_val->non_pauseable_th.pause_xoff = - PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; - config_val->non_pauseable_th.pause_xon = - PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE; - config_val->non_pauseable_th.full_xoff = - PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE; - config_val->non_pauseable_th.full_xon = - PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE; - } else { - config_val->pauseable_th.pause_xoff = - PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE; + PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE; config_val->pauseable_th.pause_xon = - PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE; + PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE; config_val->pauseable_th.full_xoff = - PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE; + PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE; config_val->pauseable_th.full_xon = - PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE; + PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE; /* non pause able*/ config_val->non_pauseable_th.pause_xoff = - PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; + PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; config_val->non_pauseable_th.pause_xon = - PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE; + PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE; config_val->non_pauseable_th.full_xoff = - PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE; + PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE; config_val->non_pauseable_th.full_xon = - PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE; - } + PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE; + } else { + config_val->pauseable_th.pause_xoff = + PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE; + config_val->pauseable_th.pause_xon = + PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE; + config_val->pauseable_th.full_xoff = + PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE; + config_val->pauseable_th.full_xon = + PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE; + /* non pause able*/ + config_val->non_pauseable_th.pause_xoff = + PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; + config_val->non_pauseable_th.pause_xon = + PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE; + config_val->non_pauseable_th.full_xoff = + PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE; + config_val->non_pauseable_th.full_xon = + PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE; + } } else return -EINVAL; return 0; } -static void bnx2x_pfc_brb_get_e3b0_config_params( - struct link_params *params, - struct bnx2x_pfc_brb_e3b0_val - *e3b0_val, - struct bnx2x_nig_brb_pfc_port_params *pfc_params, - const u8 pfc_enabled) -{ - if (pfc_enabled && pfc_params) { - e3b0_val->per_class_guaranty_mode = 1; - e3b0_val->lb_guarantied_hyst = 80; - if (params->phy[INT_PHY].flags & - FLAGS_4_PORT_MODE) { - e3b0_val->full_lb_xoff_th = - PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR; - e3b0_val->full_lb_xon_threshold = - PFC_E3B0_4P_BRB_FULL_LB_XON_THR; - e3b0_val->lb_guarantied = - PFC_E3B0_4P_LB_GUART; - e3b0_val->mac_0_class_t_guarantied = - PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART; - e3b0_val->mac_0_class_t_guarantied_hyst = - PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST; - e3b0_val->mac_1_class_t_guarantied = - PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART; - e3b0_val->mac_1_class_t_guarantied_hyst = - PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST; - } else { - e3b0_val->full_lb_xoff_th = - PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR; - e3b0_val->full_lb_xon_threshold = - PFC_E3B0_2P_BRB_FULL_LB_XON_THR; - e3b0_val->mac_0_class_t_guarantied_hyst = - PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST; - e3b0_val->mac_1_class_t_guarantied = - PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART; - e3b0_val->mac_1_class_t_guarantied_hyst = - PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST; - - if (pfc_params->cos0_pauseable != - pfc_params->cos1_pauseable) { - /* nonpauseable= Lossy + pauseable = Lossless*/ - e3b0_val->lb_guarantied = - PFC_E3B0_2P_MIX_PAUSE_LB_GUART; - e3b0_val->mac_0_class_t_guarantied = - PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART; - } else if (pfc_params->cos0_pauseable) { - /* Lossless +Lossless*/ - e3b0_val->lb_guarantied = - PFC_E3B0_2P_PAUSE_LB_GUART; - e3b0_val->mac_0_class_t_guarantied = - PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART; - } else { - /* Lossy +Lossy*/ - e3b0_val->lb_guarantied = - PFC_E3B0_2P_NON_PAUSE_LB_GUART; - e3b0_val->mac_0_class_t_guarantied = - PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART; - } - } - } else { - e3b0_val->per_class_guaranty_mode = 0; - e3b0_val->lb_guarantied_hyst = 0; +static void bnx2x_pfc_brb_get_e3b0_config_params(struct link_params *params, + struct bnx2x_pfc_brb_e3b0_val + *e3b0_val, + u32 cos0_pauseable, + u32 cos1_pauseable) +{ + if (params->phy[INT_PHY].flags & FLAGS_4_PORT_MODE) { e3b0_val->full_lb_xoff_th = - DEFAULT_E3B0_BRB_FULL_LB_XOFF_THR; + PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR; e3b0_val->full_lb_xon_threshold = - DEFAULT_E3B0_BRB_FULL_LB_XON_THR; + PFC_E3B0_4P_BRB_FULL_LB_XON_THR; e3b0_val->lb_guarantied = - DEFAULT_E3B0_LB_GUART; + PFC_E3B0_4P_LB_GUART; e3b0_val->mac_0_class_t_guarantied = - DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART; + PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART; e3b0_val->mac_0_class_t_guarantied_hyst = - DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART_HYST; + PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST; e3b0_val->mac_1_class_t_guarantied = - DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART; + PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART; e3b0_val->mac_1_class_t_guarantied_hyst = - DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART_HYST; + PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST; + } else { + e3b0_val->full_lb_xoff_th = + PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR; + e3b0_val->full_lb_xon_threshold = + PFC_E3B0_2P_BRB_FULL_LB_XON_THR; + e3b0_val->mac_0_class_t_guarantied_hyst = + PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST; + e3b0_val->mac_1_class_t_guarantied = + PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART; + e3b0_val->mac_1_class_t_guarantied_hyst = + PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST; + + if (cos0_pauseable != cos1_pauseable) { + /* nonpauseable= Lossy + pauseable = Lossless*/ + e3b0_val->lb_guarantied = + PFC_E3B0_2P_MIX_PAUSE_LB_GUART; + e3b0_val->mac_0_class_t_guarantied = + PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART; + } else if (cos0_pauseable) { + /* Lossless +Lossless*/ + e3b0_val->lb_guarantied = + PFC_E3B0_2P_PAUSE_LB_GUART; + e3b0_val->mac_0_class_t_guarantied = + PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART; + } else { + /* Lossy +Lossy*/ + e3b0_val->lb_guarantied = + PFC_E3B0_2P_NON_PAUSE_LB_GUART; + e3b0_val->mac_0_class_t_guarantied = + PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART; + } } } static int bnx2x_update_pfc_brb(struct link_params *params, @@ -2274,28 +2172,23 @@ static int bnx2x_update_pfc_brb(struct link_params *params, struct bnx2x *bp = params->bp; struct bnx2x_pfc_brb_th_val config_val = { {0} }; struct bnx2x_pfc_brb_threshold_val *reg_th_config = - &config_val.pauseable_th; + &config_val.pauseable_th; struct bnx2x_pfc_brb_e3b0_val e3b0_val = {0}; - const int set_pfc = params->feature_config_flags & + int set_pfc = params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED; - const u8 pfc_enabled = (set_pfc && pfc_params); int bnx2x_status = 0; u8 port = params->port; /* default - pause configuration */ reg_th_config = &config_val.pauseable_th; bnx2x_status = bnx2x_pfc_brb_get_config_params(params, &config_val); - if (bnx2x_status) + if (0 != bnx2x_status) return bnx2x_status; - if (pfc_enabled) { + if (set_pfc && pfc_params) /* First COS */ - if (pfc_params->cos0_pauseable) - reg_th_config = &config_val.pauseable_th; - else + if (!pfc_params->cos0_pauseable) reg_th_config = &config_val.non_pauseable_th; - } else - reg_th_config = &config_val.default_class0; /* * The number of free blocks below which the pause signal to class 0 * of MAC #n is asserted. n=0,1 @@ -2322,119 +2215,122 @@ static int bnx2x_update_pfc_brb(struct link_params *params, REG_WR(bp, (port) ? BRB1_REG_FULL_0_XON_THRESHOLD_1 : BRB1_REG_FULL_0_XON_THRESHOLD_0 , reg_th_config->full_xon); - if (pfc_enabled) { + if (set_pfc && pfc_params) { /* Second COS */ if (pfc_params->cos1_pauseable) reg_th_config = &config_val.pauseable_th; else reg_th_config = &config_val.non_pauseable_th; - } else - reg_th_config = &config_val.default_class1; - /* - * The number of free blocks below which the pause signal to - * class 1 of MAC #n is asserted. n=0,1 - */ - REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 : - BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0, - reg_th_config->pause_xoff); - - /* - * The number of free blocks above which the pause signal to - * class 1 of MAC #n is de-asserted. n=0,1 - */ - REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 : - BRB1_REG_PAUSE_1_XON_THRESHOLD_0, - reg_th_config->pause_xon); - /* - * The number of free blocks below which the full signal to - * class 1 of MAC #n is asserted. n=0,1 - */ - REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 : - BRB1_REG_FULL_1_XOFF_THRESHOLD_0, - reg_th_config->full_xoff); - /* - * The number of free blocks above which the full signal to - * class 1 of MAC #n is de-asserted. n=0,1 - */ - REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 : - BRB1_REG_FULL_1_XON_THRESHOLD_0, - reg_th_config->full_xon); - - if (CHIP_IS_E3B0(bp)) { - bnx2x_pfc_brb_get_e3b0_config_params( - params, - &e3b0_val, - pfc_params, - pfc_enabled); - - REG_WR(bp, BRB1_REG_PER_CLASS_GUARANTY_MODE, - e3b0_val.per_class_guaranty_mode); - /* - * The hysteresis on the guarantied buffer space for the Lb - * port before signaling XON. - */ - REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST, - e3b0_val.lb_guarantied_hyst); - + * The number of free blocks below which the pause signal to + * class 1 of MAC #n is asserted. n=0,1 + **/ + REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 : + BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0, + reg_th_config->pause_xoff); /* - * The number of free blocks below which the full signal to the - * LB port is asserted. + * The number of free blocks above which the pause signal to + * class 1 of MAC #n is de-asserted. n=0,1 */ - REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, - e3b0_val.full_lb_xoff_th); + REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 : + BRB1_REG_PAUSE_1_XON_THRESHOLD_0, + reg_th_config->pause_xon); /* - * The number of free blocks above which the full signal to the - * LB port is de-asserted. + * The number of free blocks below which the full signal to + * class 1 of MAC #n is asserted. n=0,1 */ - REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, - e3b0_val.full_lb_xon_threshold); + REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 : + BRB1_REG_FULL_1_XOFF_THRESHOLD_0, + reg_th_config->full_xoff); /* - * The number of blocks guarantied for the MAC #n port. n=0,1 + * The number of free blocks above which the full signal to + * class 1 of MAC #n is de-asserted. n=0,1 */ + REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 : + BRB1_REG_FULL_1_XON_THRESHOLD_0, + reg_th_config->full_xon); - /* The number of blocks guarantied for the LB port.*/ - REG_WR(bp, BRB1_REG_LB_GUARANTIED, - e3b0_val.lb_guarantied); - /* - * The number of blocks guarantied for the MAC #n port. - */ - REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0, - 2 * e3b0_val.mac_0_class_t_guarantied); - REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1, - 2 * e3b0_val.mac_1_class_t_guarantied); - /* - * The number of blocks guarantied for class #t in MAC0. t=0,1 - */ - REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED, - e3b0_val.mac_0_class_t_guarantied); - REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED, - e3b0_val.mac_0_class_t_guarantied); - /* - * The hysteresis on the guarantied buffer space for class in - * MAC0. t=0,1 - */ - REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST, - e3b0_val.mac_0_class_t_guarantied_hyst); - REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST, - e3b0_val.mac_0_class_t_guarantied_hyst); + if (CHIP_IS_E3B0(bp)) { + /*Should be done by init tool */ + /* + * BRB_empty_for_dup = BRB1_REG_BRB_EMPTY_THRESHOLD + * reset value + * 944 + */ + + /** + * The hysteresis on the guarantied buffer space for the Lb port + * before signaling XON. + **/ + REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST, 80); + + bnx2x_pfc_brb_get_e3b0_config_params( + params, + &e3b0_val, + pfc_params->cos0_pauseable, + pfc_params->cos1_pauseable); + /** + * The number of free blocks below which the full signal to the + * LB port is asserted. + */ + REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, + e3b0_val.full_lb_xoff_th); + /** + * The number of free blocks above which the full signal to the + * LB port is de-asserted. + */ + REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, + e3b0_val.full_lb_xon_threshold); + /** + * The number of blocks guarantied for the MAC #n port. n=0,1 + */ + + /*The number of blocks guarantied for the LB port.*/ + REG_WR(bp, BRB1_REG_LB_GUARANTIED, + e3b0_val.lb_guarantied); + + /** + * The number of blocks guarantied for the MAC #n port. + */ + REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0, + 2 * e3b0_val.mac_0_class_t_guarantied); + REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1, + 2 * e3b0_val.mac_1_class_t_guarantied); + /** + * The number of blocks guarantied for class #t in MAC0. t=0,1 + */ + REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED, + e3b0_val.mac_0_class_t_guarantied); + REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED, + e3b0_val.mac_0_class_t_guarantied); + /** + * The hysteresis on the guarantied buffer space for class in + * MAC0. t=0,1 + */ + REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST, + e3b0_val.mac_0_class_t_guarantied_hyst); + REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST, + e3b0_val.mac_0_class_t_guarantied_hyst); + + /** + * The number of blocks guarantied for class #t in MAC1.t=0,1 + */ + REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED, + e3b0_val.mac_1_class_t_guarantied); + REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED, + e3b0_val.mac_1_class_t_guarantied); + /** + * The hysteresis on the guarantied buffer space for class #t + * in MAC1. t=0,1 + */ + REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST, + e3b0_val.mac_1_class_t_guarantied_hyst); + REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST, + e3b0_val.mac_1_class_t_guarantied_hyst); + + } - /* - * The number of blocks guarantied for class #t in MAC1.t=0,1 - */ - REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED, - e3b0_val.mac_1_class_t_guarantied); - REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED, - e3b0_val.mac_1_class_t_guarantied); - /* - * The hysteresis on the guarantied buffer space for class #t - * in MAC1. t=0,1 - */ - REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST, - e3b0_val.mac_1_class_t_guarantied_hyst); - REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST, - e3b0_val.mac_1_class_t_guarantied_hyst); } return bnx2x_status; @@ -2619,7 +2515,7 @@ int bnx2x_update_pfc(struct link_params *params, /* update BRB params */ bnx2x_status = bnx2x_update_pfc_brb(params, vars, pfc_params); - if (bnx2x_status) + if (0 != bnx2x_status) return bnx2x_status; if (!vars->link_up) @@ -2637,6 +2533,7 @@ int bnx2x_update_pfc(struct link_params *params, bnx2x_emac_enable(params, vars, 0); return bnx2x_status; } + if (CHIP_IS_E2(bp)) bnx2x_update_pfc_bmac2(params, vars, bmac_loopback); else @@ -3156,6 +3053,7 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "write phy register failed\n"); netdev_err(bp->dev, "MDC/MDIO access timeout\n"); rc = -EFAULT; + } else { /* data */ tmp = ((phy->addr << 21) | (devad << 16) | val | @@ -3192,6 +3090,8 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, EMAC_MDIO_STATUS_10MB); return rc; } + + /******************************************************************/ /* BSC access functions from E3 */ /******************************************************************/ @@ -3439,7 +3339,7 @@ static void bnx2x_set_aer_mmd(struct link_params *params, aer_val = 0x3800 + offset - 1; else aer_val = 0x3800 + offset; - + DP(NETIF_MSG_LINK, "Set AER to 0x%x\n", aer_val); CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, MDIO_AER_BLOCK_AER_REG, aer_val); @@ -4042,11 +3942,13 @@ static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp, static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy, struct link_params *params, - u8 fiber_mode, - u8 always_autoneg) + u8 fiber_mode) { struct bnx2x *bp = params->bp; u16 val16, digctrl_kx1, digctrl_kx2; + u8 lane; + + lane = bnx2x_get_warpcore_lane(phy, params); /* Clear XFI clock comp in non-10G single lane mode. */ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, @@ -4054,7 +3956,7 @@ static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy, bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, val16 & ~(3<<13)); - if (always_autoneg || phy->req_line_speed == SPEED_AUTO_NEG) { + if (phy->req_line_speed == SPEED_AUTO_NEG) { /* SGMII Autoneg */ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); @@ -4065,7 +3967,7 @@ static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy, } else { bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); - val16 &= 0xcebf; + val16 &= 0xcfbf; switch (phy->req_line_speed) { case SPEED_10: break; @@ -4141,7 +4043,9 @@ static void bnx2x_warpcore_reset_lane(struct bnx2x *bp, bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC6, &val); } -/* Clear SFI/XFI link settings registers */ + + + /* Clear SFI/XFI link settings registers */ static void bnx2x_warpcore_clear_regs(struct bnx2x_phy *phy, struct link_params *params, u16 lane) @@ -4346,7 +4250,7 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy, vars->phy_flags |= PHY_SGMII_FLAG; DP(NETIF_MSG_LINK, "Setting SGMII mode\n"); bnx2x_warpcore_clear_regs(phy, params, lane); - bnx2x_warpcore_set_sgmii_speed(phy, params, 0, 1); + bnx2x_warpcore_set_sgmii_speed(phy, params, 0); } else { switch (serdes_net_if) { case PORT_HW_CFG_NET_SERDES_IF_KR: @@ -4374,8 +4278,7 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy, } bnx2x_warpcore_set_sgmii_speed(phy, params, - fiber_mode, - 0); + fiber_mode); } break; @@ -4388,8 +4291,7 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy, bnx2x_warpcore_set_10G_XFI(phy, params, 0); } else if (vars->line_speed == SPEED_1000) { DP(NETIF_MSG_LINK, "Setting 1G Fiber\n"); - bnx2x_warpcore_set_sgmii_speed( - phy, params, 1, 0); + bnx2x_warpcore_set_sgmii_speed(phy, params, 1); } /* Issue Module detection */ if (bnx2x_is_sfp_module_plugged(phy, params)) @@ -4526,6 +4428,12 @@ static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy, /* Switch back to 4-copy registers */ bnx2x_set_aer_mmd(params, phy); + /* Global loopback, not recommended. */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 | + 0x4000); } else { /* 10G & 20G */ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, @@ -4542,14 +4450,25 @@ static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy, } -void bnx2x_sync_link(struct link_params *params, - struct link_vars *vars) +void bnx2x_link_status_update(struct link_params *params, + struct link_vars *vars) { struct bnx2x *bp = params->bp; u8 link_10g_plus; + u8 port = params->port; + u32 sync_offset, media_types; + /* Update PHY configuration */ + set_phy_vars(params, vars); + + vars->link_status = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + port_mb[port].link_status)); + + vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP); + vars->phy_flags = PHY_XGXS_FLAG; if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG) vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG; - vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP); + if (vars->link_up) { DP(NETIF_MSG_LINK, "phy link up\n"); @@ -4644,23 +4563,7 @@ void bnx2x_sync_link(struct link_params *params, if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG) vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; } -} - -void bnx2x_link_status_update(struct link_params *params, - struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - u8 port = params->port; - u32 sync_offset, media_types; - /* Update PHY configuration */ - set_phy_vars(params, vars); - - vars->link_status = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, - port_mb[port].link_status)); - vars->phy_flags = PHY_XGXS_FLAG; - bnx2x_sync_link(params, vars); /* Sync media type */ sync_offset = params->shmem_base + offsetof(struct shmem_region, @@ -4699,6 +4602,7 @@ void bnx2x_link_status_update(struct link_params *params, vars->line_speed, vars->duplex, vars->flow_ctrl); } + static void bnx2x_set_master_ln(struct link_params *params, struct bnx2x_phy *phy) { @@ -4772,8 +4676,11 @@ static void bnx2x_set_swap_lanes(struct link_params *params, * Each two bits represents a lane number: * No swap is 0123 => 0x1b no need to enable the swap */ - u16 rx_lane_swap, tx_lane_swap; + u16 ser_lane, rx_lane_swap, tx_lane_swap; + ser_lane = ((params->lane_config & + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); rx_lane_swap = ((params->lane_config & PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >> PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT); @@ -5449,6 +5356,7 @@ static int bnx2x_link_settings_status(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) { + struct bnx2x *bp = params->bp; u16 gp_status, duplex = DUPLEX_HALF, link_up = 0, speed_mask; @@ -5495,7 +5403,9 @@ static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) { + struct bnx2x *bp = params->bp; + u8 lane; u16 gp_status1, gp_speed, link_up, duplex = DUPLEX_FULL; int rc = 0; @@ -6768,6 +6678,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars) return rc; } + /*****************************************************************************/ /* External Phy section */ /*****************************************************************************/ @@ -8192,15 +8103,7 @@ static void bnx2x_warpcore_power_module(struct link_params *params, static void bnx2x_warpcore_hw_reset(struct bnx2x_phy *phy, struct link_params *params) { - struct bnx2x *bp = params->bp; bnx2x_warpcore_power_module(params, phy, 0); - /* Put Warpcore in low power mode */ - REG_WR(bp, MISC_REG_WC0_RESET, 0x0c0e); - - /* Put LCPLL in low power mode */ - REG_WR(bp, MISC_REG_LCPLL_E40_PWRDWN, 1); - REG_WR(bp, MISC_REG_LCPLL_E40_RESETB_ANA, 0); - REG_WR(bp, MISC_REG_LCPLL_E40_RESETB_DIG, 0); } static void bnx2x_power_sfp_module(struct link_params *params, @@ -9137,13 +9040,13 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "8727 Power fault has been detected on port %d\n", oc_port); - netdev_err(bp->dev, "Error: Power fault on Port %d has " - "been detected and the power to " - "that SFP+ module has been removed " - "to prevent failure of the card. " - "Please remove the SFP+ module and " - "restart the system to clear this " - "error.\n", + netdev_err(bp->dev, "Error: Power fault on Port %d has" + " been detected and the power to " + "that SFP+ module has been removed" + " to prevent failure of the card." + " Please remove the SFP+ module and" + " restart the system to clear this" + " error.\n", oc_port); /* Disable all RX_ALARMs except for mod_abs */ bnx2x_cl45_write(bp, phy, @@ -9325,7 +9228,7 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy, static void bnx2x_848xx_set_led(struct bnx2x *bp, struct bnx2x_phy *phy) { - u16 val, offset; + u16 val; /* PHYC_CTL_LED_CTL */ bnx2x_cl45_read(bp, phy, @@ -9360,22 +9263,14 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp, MDIO_PMA_REG_8481_LED3_BLINK, 0); - /* Configure the blink rate to ~15.9 Hz */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH, - MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ); - - if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) - offset = MDIO_PMA_REG_84833_CTL_LED_CTL_1; - else - offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1; - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, offset, &val); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_84823_CTL_LED_CTL_1, &val); val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/ + bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, offset, val); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_84823_CTL_LED_CTL_1, val); /* 'Interrupt Mask' */ bnx2x_cl45_write(bp, phy, @@ -9388,7 +9283,7 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, struct link_vars *vars) { struct bnx2x *bp = params->bp; - u16 autoneg_val, an_1000_val, an_10_100_val, an_10g_val; + u16 autoneg_val, an_1000_val, an_10_100_val; u16 tmp_req_line_speed; tmp_req_line_speed = phy->req_line_speed; @@ -9483,8 +9378,6 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL, (1<<15 | 1<<9 | 7<<0)); - /* The PHY needs this set even for forced link. */ - an_10_100_val |= (1<<8) | (1<<7); DP(NETIF_MSG_LINK, "Setting 100M force\n"); } if ((phy->req_line_speed == SPEED_10) && @@ -9522,17 +9415,9 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "Advertising 10G\n"); /* Restart autoneg for 10G*/ - bnx2x_cl45_read(bp, phy, - MDIO_AN_DEVAD, - MDIO_AN_REG_8481_10GBASE_T_AN_CTRL, - &an_10g_val); - bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, - MDIO_AN_REG_8481_10GBASE_T_AN_CTRL, - an_10g_val | 0x1000); bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, - 0x3200); + MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, + 0x3200); } else bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, @@ -9564,95 +9449,74 @@ static int bnx2x_8481_config_init(struct bnx2x_phy *phy, return bnx2x_848xx_cmn_config_init(phy, params, vars); } -#define PHY84833_CMDHDLR_WAIT 300 -#define PHY84833_CMDHDLR_MAX_ARGS 5 -static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy, + +#define PHY84833_HDSHK_WAIT 300 +static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy, struct link_params *params, - u16 fw_cmd, - u16 cmd_args[]) + struct link_vars *vars) { u32 idx; + u32 pair_swap; u16 val; + u16 data; struct bnx2x *bp = params->bp; + /* Do pair swap */ + + /* Check for configuration. */ + pair_swap = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port].xgbt_phy_cfg)) & + PORT_HW_CFG_RJ45_PAIR_SWAP_MASK; + + if (pair_swap == 0) + return 0; + + data = (u16)pair_swap; + /* Write CMD_OPEN_OVERRIDE to STATUS reg */ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_CMD_HDLR_STATUS, - PHY84833_STATUS_CMD_OPEN_OVERRIDE); - for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) { + MDIO_84833_TOP_CFG_SCRATCH_REG2, + PHY84833_CMD_OPEN_OVERRIDE); + for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) { bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_CMD_HDLR_STATUS, &val); - if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS) + MDIO_84833_TOP_CFG_SCRATCH_REG2, &val); + if (val == PHY84833_CMD_OPEN_FOR_CMDS) break; msleep(1); } - if (idx >= PHY84833_CMDHDLR_WAIT) { - DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n"); + if (idx >= PHY84833_HDSHK_WAIT) { + DP(NETIF_MSG_LINK, "Pairswap: FW not ready.\n"); return -EINVAL; } - /* Prepare argument(s) and issue command */ - for (idx = 0; idx < PHY84833_CMDHDLR_MAX_ARGS; idx++) { - bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_CMD_HDLR_DATA1 + idx, - cmd_args[idx]); - } bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_CMD_HDLR_COMMAND, fw_cmd); - for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) { + MDIO_84833_TOP_CFG_SCRATCH_REG4, + data); + /* Issue pair swap command */ + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_SCRATCH_REG0, + PHY84833_DIAG_CMD_PAIR_SWAP_CHANGE); + for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) { bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_CMD_HDLR_STATUS, &val); - if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) || - (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) + MDIO_84833_TOP_CFG_SCRATCH_REG2, &val); + if ((val == PHY84833_CMD_COMPLETE_PASS) || + (val == PHY84833_CMD_COMPLETE_ERROR)) break; msleep(1); } - if ((idx >= PHY84833_CMDHDLR_WAIT) || - (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) { - DP(NETIF_MSG_LINK, "FW cmd failed.\n"); + if ((idx >= PHY84833_HDSHK_WAIT) || + (val == PHY84833_CMD_COMPLETE_ERROR)) { + DP(NETIF_MSG_LINK, "Pairswap: override failed.\n"); return -EINVAL; } - /* Gather returning data */ - for (idx = 0; idx < PHY84833_CMDHDLR_MAX_ARGS; idx++) { - bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_CMD_HDLR_DATA1 + idx, - &cmd_args[idx]); - } bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_CMD_HDLR_STATUS, - PHY84833_STATUS_CMD_CLEAR_COMPLETE); + MDIO_84833_TOP_CFG_SCRATCH_REG2, + PHY84833_CMD_CLEAR_COMPLETE); + DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data); return 0; } -static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) -{ - u32 pair_swap; - u16 data[PHY84833_CMDHDLR_MAX_ARGS]; - int status; - struct bnx2x *bp = params->bp; - - /* Check for configuration. */ - pair_swap = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, - dev_info.port_hw_config[params->port].xgbt_phy_cfg)) & - PORT_HW_CFG_RJ45_PAIR_SWAP_MASK; - - if (pair_swap == 0) - return 0; - - /* Only the second argument is used for this command */ - data[1] = (u16)pair_swap; - - status = bnx2x_84833_cmd_hdlr(phy, params, - PHY84833_CMD_SET_PAIR_SWAP, data); - if (status == 0) - DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]); - - return status; -} - static u8 bnx2x_84833_get_reset_gpios(struct bnx2x *bp, u32 shmem_base_path[], u32 chip_id) @@ -9715,6 +9579,24 @@ static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy, return 0; } +static int bnx2x_84833_common_init_phy(struct bnx2x *bp, + u32 shmem_base_path[], + u32 chip_id) +{ + u8 reset_gpios; + + reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, chip_id); + + bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW); + udelay(10); + bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH); + msleep(800); + DP(NETIF_MSG_LINK, "84833 reset pulse on pin values 0x%x\n", + reset_gpios); + + return 0; +} + #define PHY84833_CONSTANT_LATENCY 1193 static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, struct link_params *params, @@ -9723,8 +9605,8 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, struct bnx2x *bp = params->bp; u8 port, initialize = 1; u16 val; - u32 actual_phy_selection, cms_enable; - u16 cmd_args[PHY84833_CMDHDLR_MAX_ARGS]; + u16 temp; + u32 actual_phy_selection, cms_enable, idx; int rc = 0; msleep(1); @@ -9743,13 +9625,6 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x8000); - } - - bnx2x_wait_reset_complete(bp, phy, params); - - /* Wait for GPHY to come out of reset */ - msleep(50); - if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { /* Bring PHY out of super isolate mode */ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, @@ -9758,20 +9633,27 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, MDIO_84833_TOP_CFG_XGPHY_STRAP1, val); - bnx2x_84833_pair_swap_cfg(phy, params, vars); - } else { - /* - * BCM84823 requires that XGXS links up first @ 10G for normal - * behavior. - */ - u16 temp; - temp = vars->line_speed; - vars->line_speed = SPEED_10000; - bnx2x_set_autoneg(¶ms->phy[INT_PHY], params, vars, 0); - bnx2x_program_serdes(¶ms->phy[INT_PHY], params, vars); - vars->line_speed = temp; } + bnx2x_wait_reset_complete(bp, phy, params); + + /* Wait for GPHY to come out of reset */ + msleep(50); + + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) + bnx2x_84833_pair_swap_cfg(phy, params, vars); + + /* + * BCM84823 requires that XGXS links up first @ 10G for normal behavior + */ + temp = vars->line_speed; + vars->line_speed = SPEED_10000; + bnx2x_set_autoneg(¶ms->phy[INT_PHY], params, vars, 0); + bnx2x_program_serdes(¶ms->phy[INT_PHY], params, vars); + vars->line_speed = temp; + + /* Set dual-media configuration according to configuration */ + bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, MDIO_CTL_REG_84823_MEDIA, &val); val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK | @@ -9818,18 +9700,64 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, /* AutogrEEEn */ if (params->feature_config_flags & - FEATURE_CONFIG_AUTOGREEEN_ENABLED) - cmd_args[0] = 0x2; - else - cmd_args[0] = 0x0; + FEATURE_CONFIG_AUTOGREEEN_ENABLED) { + /* Ensure that f/w is ready */ + for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) { + bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_SCRATCH_REG2, &val); + if (val == PHY84833_CMD_OPEN_FOR_CMDS) + break; + usleep_range(1000, 1000); + } + if (idx >= PHY84833_HDSHK_WAIT) { + DP(NETIF_MSG_LINK, "AutogrEEEn: FW not ready.\n"); + return -EINVAL; + } + + /* Select EEE mode */ + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_SCRATCH_REG3, + 0x2); + + /* Set Idle and Latency */ + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_SCRATCH_REG4, + PHY84833_CONSTANT_LATENCY + 1); + + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_DATA3_REG, + PHY84833_CONSTANT_LATENCY + 1); + + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_DATA4_REG, + PHY84833_CONSTANT_LATENCY); + + /* Send EEE instruction to command register */ + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_SCRATCH_REG0, + PHY84833_DIAG_CMD_SET_EEE_MODE); + + /* Ensure that the command has completed */ + for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) { + bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_SCRATCH_REG2, &val); + if ((val == PHY84833_CMD_COMPLETE_PASS) || + (val == PHY84833_CMD_COMPLETE_ERROR)) + break; + usleep_range(1000, 1000); + } + if ((idx >= PHY84833_HDSHK_WAIT) || + (val == PHY84833_CMD_COMPLETE_ERROR)) { + DP(NETIF_MSG_LINK, "AutogrEEEn: command failed.\n"); + return -EINVAL; + } + + /* Reset command handler */ + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_SCRATCH_REG2, + PHY84833_CMD_CLEAR_COMPLETE); + } - cmd_args[1] = 0x0; - cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1; - cmd_args[3] = PHY84833_CONSTANT_LATENCY; - rc = bnx2x_84833_cmd_hdlr(phy, params, - PHY84833_CMD_SET_EEE_MODE, cmd_args); - if (rc != 0) - DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n"); if (initialize) rc = bnx2x_848xx_cmn_config_init(phy, params, vars); else @@ -10216,10 +10144,8 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "54618SE cfg init\n"); usleep_range(1000, 1000); - /* - * This works with E3 only, no need to check the chip - * before determining the port. - */ + /* This works with E3 only, no need to check the chip + before determining the port. */ port = params->port; cfg_pin = (REG_RD(bp, params->shmem_base + @@ -11292,9 +11218,7 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port, offsetof(struct shmem_region, dev_info.port_feature_config[port].link_config)) & PORT_FEATURE_CONNECTED_SWITCH_MASK); - chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) | - ((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12); - + chip_id = REG_RD(bp, MISC_REG_CHIP_NUM) << 16; DP(NETIF_MSG_LINK, ":chip_id = 0x%x\n", chip_id); if (USES_WARPCORE(bp)) { u32 serdes_net_if; @@ -11473,10 +11397,6 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp, return -EINVAL; default: *phy = phy_null; - /* In case external PHY wasn't found */ - if ((phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) && - (phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) - return -EINVAL; return 0; } @@ -11650,7 +11570,7 @@ u32 bnx2x_phy_selection(struct link_params *params) int bnx2x_phy_probe(struct link_params *params) { - u8 phy_index, actual_phy_idx; + u8 phy_index, actual_phy_idx, link_cfg_idx; u32 phy_config_swapped, sync_offset, media_types; struct bnx2x *bp = params->bp; struct bnx2x_phy *phy; @@ -11661,6 +11581,7 @@ int bnx2x_phy_probe(struct link_params *params) for (phy_index = INT_PHY; phy_index < MAX_PHYS; phy_index++) { + link_cfg_idx = LINK_CONFIG_IDX(phy_index); actual_phy_idx = phy_index; if (phy_config_swapped) { if (phy_index == EXT_PHY1) @@ -12326,63 +12247,6 @@ static int bnx2x_8727_common_init_phy(struct bnx2x *bp, return 0; } -static int bnx2x_84833_common_init_phy(struct bnx2x *bp, - u32 shmem_base_path[], - u32 shmem2_base_path[], - u8 phy_index, - u32 chip_id) -{ - u8 reset_gpios; - struct bnx2x_phy phy; - u32 shmem_base, shmem2_base, cnt; - s8 port = 0; - u16 val; - - reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, chip_id); - bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW); - udelay(10); - bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH); - DP(NETIF_MSG_LINK, "84833 reset pulse on pin values 0x%x\n", - reset_gpios); - for (port = PORT_MAX - 1; port >= PORT_0; port--) { - /* This PHY is for E2 and E3. */ - shmem_base = shmem_base_path[port]; - shmem2_base = shmem2_base_path[port]; - /* Extract the ext phy address for the port */ - if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base, - 0, &phy) != - 0) { - DP(NETIF_MSG_LINK, "populate_phy failed\n"); - return -EINVAL; - } - - /* Wait for FW completing its initialization. */ - for (cnt = 0; cnt < 1000; cnt++) { - bnx2x_cl45_read(bp, &phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_CTRL, &val); - if (!(val & (1<<15))) - break; - msleep(1); - } - if (cnt >= 1000) - DP(NETIF_MSG_LINK, - "84833 Cmn reset timeout (%d)\n", port); - - /* Put the port in super isolate mode. */ - bnx2x_cl45_read(bp, &phy, - MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val); - val |= MDIO_84833_SUPER_ISOLATE; - bnx2x_cl45_write(bp, &phy, - MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_XGPHY_STRAP1, val); - } - - return 0; -} - - static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[], u32 shmem2_base_path[], u8 phy_index, u32 ext_phy_type, u32 chip_id) @@ -12417,9 +12281,7 @@ static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[], * GPIO3's are linked, and so both need to be toggled * to obtain required 2us pulse. */ - rc = bnx2x_84833_common_init_phy(bp, shmem_base_path, - shmem2_base_path, - phy_index, chip_id); + rc = bnx2x_84833_common_init_phy(bp, shmem_base_path, chip_id); break; case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: rc = -EINVAL; diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h index e02a68a7fb85..2a46e633abe9 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h @@ -479,7 +479,7 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos); /* Configure the COS to ETS according to BW and SP settings.*/ int bnx2x_ets_e3b0_config(const struct link_params *params, const struct link_vars *vars, - struct bnx2x_ets_params *ets_params); + const struct bnx2x_ets_params *ets_params); /* Read pfc statistic*/ void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars, u32 pfc_frames_sent[2], diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index ffeaaa95ed96..2f6361e949f0 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -2318,6 +2318,12 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp) CMNG_FLAGS_PER_PORT_FAIRNESS_VN; } +/* returns func by VN for current port */ +static inline int func_by_vn(struct bnx2x *bp, int vn) +{ + return 2 * vn + BP_PORT(bp); +} + static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn) { struct rate_shaping_vars_per_vn m_rs_vn; @@ -2469,6 +2475,22 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) "rate shaping and fairness are disabled\n"); } +static inline void bnx2x_link_sync_notify(struct bnx2x *bp) +{ + int func; + int vn; + + /* Set the attention towards other drivers on the same port */ + for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { + if (vn == BP_VN(bp)) + continue; + + func = func_by_vn(bp, vn); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + + (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); + } +} + /* This function is called upon link interrupt */ static void bnx2x_link_attn(struct bnx2x *bp) { @@ -2527,9 +2549,6 @@ void bnx2x__link_status_update(struct bnx2x *bp) if (bp->state != BNX2X_STATE_OPEN) return; - /* read updated dcb configuration */ - bnx2x_dcbx_pmf_update(bp); - bnx2x_link_status_update(&bp->link_params, &bp->link_vars); if (bp->link_vars.link_up) @@ -2624,6 +2643,15 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param) return rc; } +static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp) +{ +#ifdef BCM_CNIC + /* Statistics are not supported for CNIC Clients at the moment */ + if (IS_FCOE_FP(fp)) + return false; +#endif + return true; +} void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) { @@ -2667,11 +2695,11 @@ static inline unsigned long bnx2x_get_common_flags(struct bnx2x *bp, * parent connection). The statistics are zeroed when the parent * connection is initialized. */ - - __set_bit(BNX2X_Q_FLG_STATS, &flags); - if (zero_stats) - __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags); - + if (stat_counter_valid(bp, fp)) { + __set_bit(BNX2X_Q_FLG_STATS, &flags); + if (zero_stats) + __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags); + } return flags; } @@ -2780,8 +2808,8 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, /* This should be a maximum number of data bytes that may be * placed on the BD (not including paddings). */ - rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START - - BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING; + rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN - + IP_HEADER_ALIGNMENT_PADDING; rxq_init->cl_qzone_id = fp->cl_qzone_id; rxq_init->tpa_agg_sz = tpa_agg_size; @@ -2912,143 +2940,6 @@ static void bnx2x_e1h_enable(struct bnx2x *bp) */ } -#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3 - -static void bnx2x_drv_info_ether_stat(struct bnx2x *bp) -{ - struct eth_stats_info *ether_stat = - &bp->slowpath->drv_info_to_mcp.ether_stat; - - /* leave last char as NULL */ - memcpy(ether_stat->version, DRV_MODULE_VERSION, - ETH_STAT_INFO_VERSION_LEN - 1); - - bp->fp[0].mac_obj.get_n_elements(bp, &bp->fp[0].mac_obj, - DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, - ether_stat->mac_local); - - ether_stat->mtu_size = bp->dev->mtu; - - if (bp->dev->features & NETIF_F_RXCSUM) - ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; - if (bp->dev->features & NETIF_F_TSO) - ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK; - ether_stat->feature_flags |= bp->common.boot_mode; - - ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0; - - ether_stat->txq_size = bp->tx_ring_size; - ether_stat->rxq_size = bp->rx_ring_size; -} - -static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp) -{ -#ifdef BCM_CNIC - struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; - struct fcoe_stats_info *fcoe_stat = - &bp->slowpath->drv_info_to_mcp.fcoe_stat; - - memcpy(fcoe_stat->mac_local, bp->fip_mac, ETH_ALEN); - - fcoe_stat->qos_priority = - app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE]; - - /* insert FCoE stats from ramrod response */ - if (!NO_FCOE(bp)) { - struct tstorm_per_queue_stats *fcoe_q_tstorm_stats = - &bp->fw_stats_data->queue_stats[FCOE_IDX]. - tstorm_queue_statistics; - - struct xstorm_per_queue_stats *fcoe_q_xstorm_stats = - &bp->fw_stats_data->queue_stats[FCOE_IDX]. - xstorm_queue_statistics; - - struct fcoe_statistics_params *fw_fcoe_stat = - &bp->fw_stats_data->fcoe; - - ADD_64(fcoe_stat->rx_bytes_hi, 0, fcoe_stat->rx_bytes_lo, - fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt); - - ADD_64(fcoe_stat->rx_bytes_hi, - fcoe_q_tstorm_stats->rcv_ucast_bytes.hi, - fcoe_stat->rx_bytes_lo, - fcoe_q_tstorm_stats->rcv_ucast_bytes.lo); - - ADD_64(fcoe_stat->rx_bytes_hi, - fcoe_q_tstorm_stats->rcv_bcast_bytes.hi, - fcoe_stat->rx_bytes_lo, - fcoe_q_tstorm_stats->rcv_bcast_bytes.lo); - - ADD_64(fcoe_stat->rx_bytes_hi, - fcoe_q_tstorm_stats->rcv_mcast_bytes.hi, - fcoe_stat->rx_bytes_lo, - fcoe_q_tstorm_stats->rcv_mcast_bytes.lo); - - ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo, - fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt); - - ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo, - fcoe_q_tstorm_stats->rcv_ucast_pkts); - - ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo, - fcoe_q_tstorm_stats->rcv_bcast_pkts); - - ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo, - fcoe_q_tstorm_stats->rcv_mcast_pkts); - - ADD_64(fcoe_stat->tx_bytes_hi, 0, fcoe_stat->tx_bytes_lo, - fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt); - - ADD_64(fcoe_stat->tx_bytes_hi, - fcoe_q_xstorm_stats->ucast_bytes_sent.hi, - fcoe_stat->tx_bytes_lo, - fcoe_q_xstorm_stats->ucast_bytes_sent.lo); - - ADD_64(fcoe_stat->tx_bytes_hi, - fcoe_q_xstorm_stats->bcast_bytes_sent.hi, - fcoe_stat->tx_bytes_lo, - fcoe_q_xstorm_stats->bcast_bytes_sent.lo); - - ADD_64(fcoe_stat->tx_bytes_hi, - fcoe_q_xstorm_stats->mcast_bytes_sent.hi, - fcoe_stat->tx_bytes_lo, - fcoe_q_xstorm_stats->mcast_bytes_sent.lo); - - ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo, - fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt); - - ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo, - fcoe_q_xstorm_stats->ucast_pkts_sent); - - ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo, - fcoe_q_xstorm_stats->bcast_pkts_sent); - - ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo, - fcoe_q_xstorm_stats->mcast_pkts_sent); - } - - /* ask L5 driver to add data to the struct */ - bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD); -#endif -} - -static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp) -{ -#ifdef BCM_CNIC - struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; - struct iscsi_stats_info *iscsi_stat = - &bp->slowpath->drv_info_to_mcp.iscsi_stat; - - memcpy(iscsi_stat->mac_local, bp->cnic_eth_dev.iscsi_mac, ETH_ALEN); - - iscsi_stat->qos_priority = - app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI]; - - /* ask L5 driver to add data to the struct */ - bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD); -#endif -} - /* called due to MCP event (on pmf): * reread new bandwidth configuration * configure FW @@ -3069,50 +2960,6 @@ static inline void bnx2x_set_mf_bw(struct bnx2x *bp) bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0); } -static void bnx2x_handle_drv_info_req(struct bnx2x *bp) -{ - enum drv_info_opcode op_code; - u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control); - - /* if drv_info version supported by MFW doesn't match - send NACK */ - if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) { - bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0); - return; - } - - op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >> - DRV_INFO_CONTROL_OP_CODE_SHIFT; - - memset(&bp->slowpath->drv_info_to_mcp, 0, - sizeof(union drv_info_to_mcp)); - - switch (op_code) { - case ETH_STATS_OPCODE: - bnx2x_drv_info_ether_stat(bp); - break; - case FCOE_STATS_OPCODE: - bnx2x_drv_info_fcoe_stat(bp); - break; - case ISCSI_STATS_OPCODE: - bnx2x_drv_info_iscsi_stat(bp); - break; - default: - /* if op code isn't supported - send NACK */ - bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0); - return; - } - - /* if we got drv_info attn from MFW then these fields are defined in - * shmem2 for sure - */ - SHMEM2_WR(bp, drv_info_host_addr_lo, - U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp))); - SHMEM2_WR(bp, drv_info_host_addr_hi, - U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp))); - - bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0); -} - static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) { DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event); @@ -3471,17 +3318,6 @@ static inline void bnx2x_fan_failure(struct bnx2x *bp) netdev_err(bp->dev, "Fan Failure on Network Controller has caused" " the driver to shutdown the card to prevent permanent" " damage. Please contact OEM Support for assistance\n"); - - /* - * Scheudle device reset (unload) - * This is due to some boards consuming sufficient power when driver is - * up to overheat if fan fails. - */ - smp_mb__before_clear_bit(); - set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state); - smp_mb__after_clear_bit(); - schedule_delayed_work(&bp->sp_rtnl_task, 0); - } static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) @@ -3620,8 +3456,6 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) if (val & DRV_STATUS_SET_MF_BW) bnx2x_set_mf_bw(bp); - if (val & DRV_STATUS_DRV_INFO_REQ) - bnx2x_handle_drv_info_req(bp); if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF)) bnx2x_pmf_update(bp); @@ -5413,7 +5247,7 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx) u8 cos; unsigned long q_type = 0; u32 cids[BNX2X_MULTI_TX_COS] = { 0 }; - fp->rx_queue = fp_idx; + fp->cid = fp_idx; fp->cl_id = bnx2x_fp_cl_id(fp); fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp); @@ -7022,16 +6856,13 @@ void bnx2x_free_mem(struct bnx2x *bp) static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) { int num_groups; - int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1; - /* number of queues for statistics is number of eth queues + FCoE */ - u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats; + /* number of eth_queues */ + u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp); /* Total number of FW statistics requests = - * 1 for port stats + 1 for PF stats + potential 1 for FCoE stats + - * num of queues - */ - bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats; + * 1 for port stats + 1 for PF stats + num_eth_queues */ + bp->fw_stats_num = 2 + num_queue_stats; /* Request is built from stats_query_header and an array of @@ -7039,8 +6870,8 @@ static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) * STATS_QUERY_CMD_COUNT rules. The real number or requests is * configured in the stats_query_header. */ - num_groups = ((bp->fw_stats_num) / STATS_QUERY_CMD_COUNT) + - (((bp->fw_stats_num) % STATS_QUERY_CMD_COUNT) ? 1 : 0); + num_groups = (2 + num_queue_stats) / STATS_QUERY_CMD_COUNT + + (((2 + num_queue_stats) % STATS_QUERY_CMD_COUNT) ? 1 : 0); bp->fw_stats_req_sz = sizeof(struct stats_query_header) + num_groups * sizeof(struct stats_query_cmd_group); @@ -7049,13 +6880,9 @@ static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) * * stats_counter holds per-STORM counters that are incremented * when STORM has finished with the current request. - * - * memory for FCoE offloaded statistics are counted anyway, - * even if they will not be sent. */ bp->fw_stats_data_sz = sizeof(struct per_port_stats) + sizeof(struct per_pf_stats) + - sizeof(struct fcoe_statistics_params) + sizeof(struct per_queue_stats) * num_queue_stats + sizeof(struct stats_counter); @@ -7198,13 +7025,6 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set) { unsigned long ramrod_flags = 0; -#ifdef BCM_CNIC - if (is_zero_ether_addr(bp->dev->dev_addr) && IS_MF_ISCSI_SD(bp)) { - DP(NETIF_MSG_IFUP, "Ignoring Zero MAC for iSCSI SD mode\n"); - return 0; - } -#endif - DP(NETIF_MSG_IFUP, "Adding Eth MAC\n"); __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); @@ -8702,17 +8522,6 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work) if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos); - /* - * in case of fan failure we need to reset id if the "stop on error" - * debug flag is set, since we trying to prevent permanent overheating - * damage - */ - if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) { - DP(BNX2X_MSG_SP, "fan failure detected. Unloading driver\n"); - netif_device_detach(bp->dev); - bnx2x_close(bp->dev); - } - sp_rtnl_exit: rtnl_unlock(); } @@ -8899,7 +8708,7 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) { - u32 val, val2, val3, val4, id, boot_mode; + u32 val, val2, val3, val4, id; u16 pmc; /* Get the chip revision id and number. */ @@ -9008,26 +8817,6 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) bp->link_params.feature_config_flags |= (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ? FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0; - bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ? - BC_SUPPORTS_PFC_STATS : 0; - - boot_mode = SHMEM_RD(bp, - dev_info.port_feature_config[BP_PORT(bp)].mba_config) & - PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK; - switch (boot_mode) { - case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE: - bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE; - break; - case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB: - bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI; - break; - case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT: - bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE; - break; - case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE: - bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE; - break; - } pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc); bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG; @@ -9478,43 +9267,22 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp) bp->common.shmem2_base); } -void bnx2x_get_iscsi_info(struct bnx2x *bp) -{ #ifdef BCM_CNIC +static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp) +{ int port = BP_PORT(bp); + int func = BP_ABS_FUNC(bp); u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, drv_lic_key[port].max_iscsi_conn); + u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, + drv_lic_key[port].max_fcoe_conn); - /* Get the number of maximum allowed iSCSI connections */ + /* Get the number of maximum allowed iSCSI and FCoE connections */ bp->cnic_eth_dev.max_iscsi_conn = (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >> BNX2X_MAX_ISCSI_INIT_CONN_SHIFT; - BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n", - bp->cnic_eth_dev.max_iscsi_conn); - - /* - * If maximum allowed number of connections is zero - - * disable the feature. - */ - if (!bp->cnic_eth_dev.max_iscsi_conn) - bp->flags |= NO_ISCSI_FLAG; -#else - bp->flags |= NO_ISCSI_FLAG; -#endif -} - -static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp) -{ -#ifdef BCM_CNIC - int port = BP_PORT(bp); - int func = BP_ABS_FUNC(bp); - - u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, - drv_lic_key[port].max_fcoe_conn); - - /* Get the number of maximum allowed FCoE connections */ bp->cnic_eth_dev.max_fcoe_conn = (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >> BNX2X_MAX_FCOE_INIT_CONN_SHIFT; @@ -9566,29 +9334,21 @@ static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp) } } - BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn); + BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n", + bp->cnic_eth_dev.max_iscsi_conn, + bp->cnic_eth_dev.max_fcoe_conn); /* * If maximum allowed number of connections is zero - * disable the feature. */ + if (!bp->cnic_eth_dev.max_iscsi_conn) + bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; + if (!bp->cnic_eth_dev.max_fcoe_conn) bp->flags |= NO_FCOE_FLAG; -#else - bp->flags |= NO_FCOE_FLAG; -#endif -} - -static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp) -{ - /* - * iSCSI may be dynamically disabled but reading - * info here we will decrease memory usage by driver - * if the feature is disabled for good - */ - bnx2x_get_iscsi_info(bp); - bnx2x_get_fcoe_info(bp); } +#endif static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) { @@ -9614,8 +9374,7 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); #ifdef BCM_CNIC - /* - * iSCSI and FCoE NPAR MACs: if there is no either iSCSI or + /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or * FCoE MAC then the appropriate feature should be disabled. */ if (IS_MF_SI(bp)) { @@ -9637,22 +9396,11 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) val = MF_CFG_RD(bp, func_ext_config[func]. fcoe_mac_addr_lower); bnx2x_set_mac_buf(fip_mac, val, val2); - BNX2X_DEV_INFO("Read FCoE L2 MAC: %pM\n", + BNX2X_DEV_INFO("Read FCoE L2 MAC to %pM\n", fip_mac); } else bp->flags |= NO_FCOE_FLAG; - } else { /* SD mode */ - if (BNX2X_IS_MF_PROTOCOL_ISCSI(bp)) { - /* use primary mac as iscsi mac */ - memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN); - /* Zero primary MAC configuration */ - memset(bp->dev->dev_addr, 0, ETH_ALEN); - - BNX2X_DEV_INFO("SD ISCSI MODE\n"); - BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n", - iscsi_mac); - } } #endif } else { @@ -9701,7 +9449,7 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) } #endif - if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr)) + if (!is_valid_ether_addr(bp->dev->dev_addr)) dev_err(&bp->pdev->dev, "bad Ethernet MAC address configuration: " "%pM, change it manually before bringing up " @@ -9913,7 +9661,9 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) /* Get MAC addresses */ bnx2x_get_mac_hwinfo(bp); +#ifdef BCM_CNIC bnx2x_get_cnic_info(bp); +#endif /* Get current FW pulse sequence */ if (!BP_NOMCP(bp)) { @@ -9931,49 +9681,30 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp) { int cnt, i, block_end, rodi; - char vpd_start[BNX2X_VPD_LEN+1]; + char vpd_data[BNX2X_VPD_LEN+1]; char str_id_reg[VENDOR_ID_LEN+1]; char str_id_cap[VENDOR_ID_LEN+1]; - char *vpd_data; - char *vpd_extended_data = NULL; u8 len; - cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start); + cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data); memset(bp->fw_ver, 0, sizeof(bp->fw_ver)); if (cnt < BNX2X_VPD_LEN) goto out_not_found; - /* VPD RO tag should be first tag after identifier string, hence - * we should be able to find it in first BNX2X_VPD_LEN chars - */ - i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN, + i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN, PCI_VPD_LRDT_RO_DATA); if (i < 0) goto out_not_found; + block_end = i + PCI_VPD_LRDT_TAG_SIZE + - pci_vpd_lrdt_size(&vpd_start[i]); + pci_vpd_lrdt_size(&vpd_data[i]); i += PCI_VPD_LRDT_TAG_SIZE; - if (block_end > BNX2X_VPD_LEN) { - vpd_extended_data = kmalloc(block_end, GFP_KERNEL); - if (vpd_extended_data == NULL) - goto out_not_found; - - /* read rest of vpd image into vpd_extended_data */ - memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN); - cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN, - block_end - BNX2X_VPD_LEN, - vpd_extended_data + BNX2X_VPD_LEN); - if (cnt < (block_end - BNX2X_VPD_LEN)) - goto out_not_found; - vpd_data = vpd_extended_data; - } else - vpd_data = vpd_start; - - /* now vpd_data holds full vpd content in both cases */ + if (block_end > BNX2X_VPD_LEN) + goto out_not_found; rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end, PCI_VPD_RO_KEYWORD_MFR_ID); @@ -10005,11 +9736,9 @@ static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp) bp->fw_ver[len] = ' '; } } - kfree(vpd_extended_data); return; } out_not_found: - kfree(vpd_extended_data); return; } @@ -10111,20 +9840,15 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) bp->multi_mode = multi_mode; - bp->disable_tpa = disable_tpa; - -#ifdef BCM_CNIC - bp->disable_tpa |= IS_MF_ISCSI_SD(bp); -#endif - /* Set TPA flags */ - if (bp->disable_tpa) { + if (disable_tpa) { bp->flags &= ~TPA_ENABLE_FLAG; bp->dev->features &= ~NETIF_F_LRO; } else { bp->flags |= TPA_ENABLE_FLAG; bp->dev->features |= NETIF_F_LRO; } + bp->disable_tpa = disable_tpa; if (CHIP_IS_E1(bp)) bp->dropless_fc = 0; @@ -10241,7 +9965,7 @@ static int bnx2x_open(struct net_device *dev) } /* called with rtnl_lock */ -int bnx2x_close(struct net_device *dev) +static int bnx2x_close(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); @@ -10395,11 +10119,6 @@ void bnx2x_set_rx_mode(struct net_device *dev) } bp->rx_mode = rx_mode; -#ifdef BCM_CNIC - /* handle ISCSI SD mode */ - if (IS_MF_ISCSI_SD(bp)) - bp->rx_mode = BNX2X_RX_MODE_NONE; -#endif /* Schedule the rx_mode command */ if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) { @@ -10479,15 +10198,6 @@ static void poll_bnx2x(struct net_device *dev) } #endif -static int bnx2x_validate_addr(struct net_device *dev) -{ - struct bnx2x *bp = netdev_priv(dev); - - if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr)) - return -EADDRNOTAVAIL; - return 0; -} - static const struct net_device_ops bnx2x_netdev_ops = { .ndo_open = bnx2x_open, .ndo_stop = bnx2x_close, @@ -10495,7 +10205,7 @@ static const struct net_device_ops bnx2x_netdev_ops = { .ndo_select_queue = bnx2x_select_queue, .ndo_set_rx_mode = bnx2x_set_rx_mode, .ndo_set_mac_address = bnx2x_change_mac_addr, - .ndo_validate_addr = bnx2x_validate_addr, + .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = bnx2x_ioctl, .ndo_change_mtu = bnx2x_change_mtu, .ndo_fix_features = bnx2x_fix_features, @@ -11113,8 +10823,8 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, bp->qm_cid_count = bnx2x_set_qm_cid_count(bp); #ifdef BCM_CNIC - /* disable FCOE L2 queue for E1x */ - if (CHIP_IS_E1x(bp)) + /* disable FCOE L2 queue for E1x and E3*/ + if (CHIP_IS_E1x(bp) || CHIP_IS_E3(bp)) bp->flags |= NO_FCOE_FLAG; #endif @@ -11776,38 +11486,6 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) smp_mb__after_atomic_inc(); break; } - case DRV_CTL_ULP_REGISTER_CMD: { - int ulp_type = ctl->data.ulp_type; - - if (CHIP_IS_E3(bp)) { - int idx = BP_FW_MB_IDX(bp); - u32 cap; - - cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]); - if (ulp_type == CNIC_ULP_ISCSI) - cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI; - else if (ulp_type == CNIC_ULP_FCOE) - cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE; - SHMEM2_WR(bp, drv_capabilities_flag[idx], cap); - } - break; - } - case DRV_CTL_ULP_UNREGISTER_CMD: { - int ulp_type = ctl->data.ulp_type; - - if (CHIP_IS_E3(bp)) { - int idx = BP_FW_MB_IDX(bp); - u32 cap; - - cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]); - if (ulp_type == CNIC_ULP_ISCSI) - cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI; - else if (ulp_type == CNIC_ULP_FCOE) - cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE; - SHMEM2_WR(bp, drv_capabilities_flag[idx], cap); - } - break; - } default: BNX2X_ERR("unknown command %x\n", ctl->cmd); @@ -11883,7 +11561,7 @@ static int bnx2x_unregister_cnic(struct net_device *dev) mutex_lock(&bp->cnic_mutex); cp->drv_state = 0; - RCU_INIT_POINTER(bp->cnic_ops, NULL); + rcu_assign_pointer(bp->cnic_ops, NULL); mutex_unlock(&bp->cnic_mutex); synchronize_rcu(); kfree(bp->cnic_kwq); diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index 44609de4e5dc..e58073ef33b4 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -160,11 +160,8 @@ #define BRB1_REG_PAUSE_HIGH_THRESHOLD_1 0x6007c /* [RW 10] Write client 0: Assert pause threshold. */ #define BRB1_REG_PAUSE_LOW_THRESHOLD_0 0x60068 -/* [RW 1] Indicates if to use per-class guaranty mode (new mode) or per-MAC - * guaranty mode (backwards-compatible mode). 0=per-MAC guaranty mode (BC - * mode). 1=per-class guaranty mode (new mode). */ -#define BRB1_REG_PER_CLASS_GUARANTY_MODE 0x60268 -/* [R 24] The number of full blocks occpied by port. */ +#define BRB1_REG_PAUSE_LOW_THRESHOLD_1 0x6006c +/* [R 24] The number of full blocks occupied by port. */ #define BRB1_REG_PORT_NUM_OCC_BLOCKS_0 0x60094 /* [RW 1] Reset the design by software. */ #define BRB1_REG_SOFT_RESET 0x600dc @@ -1622,14 +1619,6 @@ register bits. */ #define MISC_REG_LCPLL_CTRL_1 0xa2a4 #define MISC_REG_LCPLL_CTRL_REG_2 0xa2a8 -/* [RW 1] LCPLL power down. Global register. Active High. Reset on POR - * reset. */ -#define MISC_REG_LCPLL_E40_PWRDWN 0xaa74 -/* [RW 1] LCPLL VCO reset. Global register. Active Low Reset on POR reset. */ -#define MISC_REG_LCPLL_E40_RESETB_ANA 0xaa78 -/* [RW 1] LCPLL post-divider reset. Global register. Active Low Reset on POR - * reset. */ -#define MISC_REG_LCPLL_E40_RESETB_DIG 0xaa7c /* [RW 4] Interrupt mask register #0 read/write */ #define MISC_REG_MISC_INT_MASK 0xa388 /* [RW 1] Parity mask register #0 read/write */ @@ -1765,7 +1754,6 @@ * is compared to the value on ctrl_md_devad. Drives output * misc_xgxs0_phy_addr. Global register. */ #define MISC_REG_WC0_CTRL_PHY_ADDR 0xa9cc -#define MISC_REG_WC0_RESET 0xac30 /* [RW 2] XMAC Core port mode. Indicates the number of ports on the system side. This should be less than or equal to phy_port_mode; if some of the ports are not used. This enables reduction of frequency on the core side. @@ -6835,13 +6823,11 @@ Theotherbitsarereservedandshouldbezero*/ #define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER 0x0000 #define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER 0x0100 #define MDIO_CTL_REG_84823_MEDIA_FIBER_1G 0x1000 -#define MDIO_CTL_REG_84823_USER_CTRL_REG 0x4005 -#define MDIO_CTL_REG_84823_USER_CTRL_CMS 0x0080 -#define MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH 0xa82b -#define MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ 0x2f -#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3 -#define MDIO_PMA_REG_84833_CTL_LED_CTL_1 0xa8ec -#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080 +#define MDIO_CTL_REG_84823_USER_CTRL_REG 0x4005 +#define MDIO_CTL_REG_84823_USER_CTRL_CMS 0x0080 + +#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3 +#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080 /* BCM84833 only */ #define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a @@ -6852,35 +6838,26 @@ Theotherbitsarereservedandshouldbezero*/ #define MDIO_84833_TOP_CFG_SCRATCH_REG2 0x4007 #define MDIO_84833_TOP_CFG_SCRATCH_REG3 0x4008 #define MDIO_84833_TOP_CFG_SCRATCH_REG4 0x4009 -#define MDIO_84833_TOP_CFG_SCRATCH_REG26 0x4037 -#define MDIO_84833_TOP_CFG_SCRATCH_REG27 0x4038 -#define MDIO_84833_TOP_CFG_SCRATCH_REG28 0x4039 -#define MDIO_84833_TOP_CFG_SCRATCH_REG29 0x403a -#define MDIO_84833_TOP_CFG_SCRATCH_REG30 0x403b -#define MDIO_84833_TOP_CFG_SCRATCH_REG31 0x403c -#define MDIO_84833_CMD_HDLR_COMMAND MDIO_84833_TOP_CFG_SCRATCH_REG0 -#define MDIO_84833_CMD_HDLR_STATUS MDIO_84833_TOP_CFG_SCRATCH_REG26 -#define MDIO_84833_CMD_HDLR_DATA1 MDIO_84833_TOP_CFG_SCRATCH_REG27 -#define MDIO_84833_CMD_HDLR_DATA2 MDIO_84833_TOP_CFG_SCRATCH_REG28 -#define MDIO_84833_CMD_HDLR_DATA3 MDIO_84833_TOP_CFG_SCRATCH_REG29 -#define MDIO_84833_CMD_HDLR_DATA4 MDIO_84833_TOP_CFG_SCRATCH_REG30 -#define MDIO_84833_CMD_HDLR_DATA5 MDIO_84833_TOP_CFG_SCRATCH_REG31 +#define MDIO_84833_TOP_CFG_DATA3_REG 0x4011 +#define MDIO_84833_TOP_CFG_DATA4_REG 0x4012 /* Mailbox command set used by 84833. */ -#define PHY84833_CMD_SET_PAIR_SWAP 0x8001 -#define PHY84833_CMD_GET_EEE_MODE 0x8008 -#define PHY84833_CMD_SET_EEE_MODE 0x8009 +#define PHY84833_DIAG_CMD_PAIR_SWAP_CHANGE 0x2 /* Mailbox status set used by 84833. */ -#define PHY84833_STATUS_CMD_RECEIVED 0x0001 -#define PHY84833_STATUS_CMD_IN_PROGRESS 0x0002 -#define PHY84833_STATUS_CMD_COMPLETE_PASS 0x0004 -#define PHY84833_STATUS_CMD_COMPLETE_ERROR 0x0008 -#define PHY84833_STATUS_CMD_OPEN_FOR_CMDS 0x0010 -#define PHY84833_STATUS_CMD_SYSTEM_BOOT 0x0020 -#define PHY84833_STATUS_CMD_NOT_OPEN_FOR_CMDS 0x0040 -#define PHY84833_STATUS_CMD_CLEAR_COMPLETE 0x0080 -#define PHY84833_STATUS_CMD_OPEN_OVERRIDE 0xa5a5 +#define PHY84833_CMD_RECEIVED 0x0001 +#define PHY84833_CMD_IN_PROGRESS 0x0002 +#define PHY84833_CMD_COMPLETE_PASS 0x0004 +#define PHY84833_CMD_COMPLETE_ERROR 0x0008 +#define PHY84833_CMD_OPEN_FOR_CMDS 0x0010 +#define PHY84833_CMD_SYSTEM_BOOT 0x0020 +#define PHY84833_CMD_NOT_OPEN_FOR_CMDS 0x0040 +#define PHY84833_CMD_CLEAR_COMPLETE 0x0080 +#define PHY84833_CMD_OPEN_OVERRIDE 0xa5a5 + +/* 84833 F/W Feature Commands */ +#define PHY84833_DIAG_CMD_GET_EEE_MODE 0x27 +#define PHY84833_DIAG_CMD_SET_EEE_MODE 0x28 /* Warpcore clause 45 addressing */ #define MDIO_WC_DEVAD 0x3 diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 5ac616093f9f..14517691f8db 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -30,8 +30,6 @@ #define BNX2X_MAX_EMUL_MULTI 16 -#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN) - /**** Exe Queue interfaces ****/ /** @@ -443,36 +441,6 @@ static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o) return true; } -static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, - int n, u8 *buf) -{ - struct bnx2x_vlan_mac_registry_elem *pos; - u8 *next = buf; - int counter = 0; - - /* traverse list */ - list_for_each_entry(pos, &o->head, link) { - if (counter < n) { - /* place leading zeroes in buffer */ - memset(next, 0, MAC_LEADING_ZERO_CNT); - - /* place mac after leading zeroes*/ - memcpy(next + MAC_LEADING_ZERO_CNT, pos->u.mac.mac, - ETH_ALEN); - - /* calculate address of next element and - * advance counter - */ - counter++; - next = buf + counter * ALIGN(ETH_ALEN, sizeof(u32)); - - DP(BNX2X_MSG_SP, "copied element number %d to address %p element was %pM\n", - counter, next, pos->u.mac.mac); - } - } - return counter * ETH_ALEN; -} - /* check_add() callbacks */ static int bnx2x_check_mac_add(struct bnx2x_vlan_mac_obj *o, union bnx2x_classification_ramrod_data *data) @@ -1918,7 +1886,6 @@ void bnx2x_init_mac_obj(struct bnx2x *bp, mac_obj->check_move = bnx2x_check_move; mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; - mac_obj->get_n_elements = bnx2x_get_n_elements; /* Exe Queue */ bnx2x_exe_queue_init(bp, @@ -3375,7 +3342,7 @@ static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp, if (!list_empty(&o->registry.exact_match.macs)) return 0; - elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC); + elem = kzalloc(sizeof(*elem)*len, GFP_ATOMIC); if (!elem) { BNX2X_ERR("Failed to allocate registry memory\n"); return -ENOMEM; diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 992308ff82e8..9a517c2e9f1b 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -285,19 +285,6 @@ struct bnx2x_vlan_mac_obj { /* RAMROD command to be used */ int ramrod_cmd; - /* copy first n elements onto preallocated buffer - * - * @param n number of elements to get - * @param buf buffer preallocated by caller into which elements - * will be copied. Note elements are 4-byte aligned - * so buffer size must be able to accomodate the - * aligned elements. - * - * @return number of copied bytes - */ - int (*get_n_elements)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, - int n, u8 *buf); - /** * Checks if ADD-ramrod with the given params may be performed. * diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index bc0121ac291e..02ac6a771bf9 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -39,17 +39,6 @@ static inline long bnx2x_hilo(u32 *hiref) #endif } -static u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp) -{ - u16 res = sizeof(struct host_port_stats) >> 2; - - /* if PFC stats are not supported by the MFW, don't DMA them */ - if (!(bp->flags & BC_SUPPORTS_PFC_STATS)) - res -= (sizeof(u32)*4) >> 2; - - return res; -} - /* * Init service functions */ @@ -189,8 +178,7 @@ static void bnx2x_stats_pmf_update(struct bnx2x *bp) DMAE_LEN32_RD_MAX * 4); dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) + DMAE_LEN32_RD_MAX * 4); - dmae->len = bnx2x_get_port_stats_dma_len(bp) - DMAE_LEN32_RD_MAX; - + dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX; dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); dmae->comp_val = DMAE_COMP_VAL; @@ -229,7 +217,7 @@ static void bnx2x_port_stats_init(struct bnx2x *bp) dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); dmae->dst_addr_lo = bp->port.port_stx >> 2; dmae->dst_addr_hi = 0; - dmae->len = bnx2x_get_port_stats_dma_len(bp); + dmae->len = sizeof(struct host_port_stats) >> 2; dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; dmae->comp_addr_hi = 0; dmae->comp_val = 1; @@ -552,25 +540,6 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp) UPDATE_STAT64(tx_stat_gterr, tx_stat_dot3statsinternalmactransmiterrors); UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); - - /* collect PFC stats */ - DIFF_64(diff.hi, new->tx_stat_gtpp_hi, - pstats->pfc_frames_tx_hi, - diff.lo, new->tx_stat_gtpp_lo, - pstats->pfc_frames_tx_lo); - pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi; - pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo; - ADD_64(pstats->pfc_frames_tx_hi, diff.hi, - pstats->pfc_frames_tx_lo, diff.lo); - - DIFF_64(diff.hi, new->rx_stat_grpp_hi, - pstats->pfc_frames_rx_hi, - diff.lo, new->rx_stat_grpp_lo, - pstats->pfc_frames_rx_lo); - pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi; - pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo; - ADD_64(pstats->pfc_frames_rx_hi, diff.hi, - pstats->pfc_frames_rx_lo, diff.lo); } estats->pause_frames_received_hi = @@ -582,15 +551,6 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp) pstats->mac_stx[1].tx_stat_outxoffsent_hi; estats->pause_frames_sent_lo = pstats->mac_stx[1].tx_stat_outxoffsent_lo; - - estats->pfc_frames_received_hi = - pstats->pfc_frames_rx_hi; - estats->pfc_frames_received_lo = - pstats->pfc_frames_rx_lo; - estats->pfc_frames_sent_hi = - pstats->pfc_frames_tx_hi; - estats->pfc_frames_sent_lo = - pstats->pfc_frames_tx_lo; } static void bnx2x_mstat_stats_update(struct bnx2x *bp) @@ -611,11 +571,6 @@ static void bnx2x_mstat_stats_update(struct bnx2x *bp) ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent); ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone); - /* collect pfc stats */ - ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi, - pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo); - ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi, - pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo); ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets); ADD_STAT64(stats_tx.tx_gt127, @@ -673,15 +628,6 @@ static void bnx2x_mstat_stats_update(struct bnx2x *bp) pstats->mac_stx[1].tx_stat_outxoffsent_hi; estats->pause_frames_sent_lo = pstats->mac_stx[1].tx_stat_outxoffsent_lo; - - estats->pfc_frames_received_hi = - pstats->pfc_frames_rx_hi; - estats->pfc_frames_received_lo = - pstats->pfc_frames_rx_lo; - estats->pfc_frames_sent_hi = - pstats->pfc_frames_tx_hi; - estats->pfc_frames_sent_lo = - pstats->pfc_frames_tx_lo; } static void bnx2x_emac_stats_update(struct bnx2x *bp) @@ -794,7 +740,7 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp) estats->brb_drop_hi = pstats->brb_drop_hi; estats->brb_drop_lo = pstats->brb_drop_lo; - pstats->host_port_stats_counter++; + pstats->host_port_stats_start = ++pstats->host_port_stats_end; if (!BP_NOMCP(bp)) { u32 nig_timer_max = @@ -1319,7 +1265,7 @@ static void bnx2x_port_stats_stop(struct bnx2x *bp) dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); dmae->dst_addr_lo = bp->port.port_stx >> 2; dmae->dst_addr_hi = 0; - dmae->len = bnx2x_get_port_stats_dma_len(bp); + dmae->len = sizeof(struct host_port_stats) >> 2; if (bp->func_stx) { dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; dmae->comp_addr_hi = 0; @@ -1403,14 +1349,12 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) enum bnx2x_stats_state state; if (unlikely(bp->panic)) return; - + bnx2x_stats_stm[bp->stats_state][event].action(bp); spin_lock_bh(&bp->stats_lock); state = bp->stats_state; bp->stats_state = bnx2x_stats_stm[state][event].next_state; spin_unlock_bh(&bp->stats_lock); - bnx2x_stats_stm[state][event].action(bp); - if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", state, event, bp->stats_state); @@ -1436,7 +1380,7 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp) dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); dmae->dst_addr_lo = bp->port.port_stx >> 2; dmae->dst_addr_hi = 0; - dmae->len = bnx2x_get_port_stats_dma_len(bp); + dmae->len = sizeof(struct host_port_stats) >> 2; dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); dmae->comp_val = DMAE_COMP_VAL; @@ -1513,7 +1457,6 @@ static void bnx2x_func_stats_base_update(struct bnx2x *bp) static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp) { int i; - int first_queue_query_index; struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr; dma_addr_t cur_data_offset; @@ -1569,40 +1512,14 @@ static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp) cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset)); cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset)); - /**** FCoE FW statistics data ****/ - if (!NO_FCOE(bp)) { - cur_data_offset = bp->fw_stats_data_mapping + - offsetof(struct bnx2x_fw_stats_data, fcoe); - - cur_query_entry = - &bp->fw_stats_req->query[BNX2X_FCOE_QUERY_IDX]; - - cur_query_entry->kind = STATS_TYPE_FCOE; - /* For FCoE query index is a DONT CARE */ - cur_query_entry->index = BP_PORT(bp); - cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); - cur_query_entry->address.hi = - cpu_to_le32(U64_HI(cur_data_offset)); - cur_query_entry->address.lo = - cpu_to_le32(U64_LO(cur_data_offset)); - } - /**** Clients' queries ****/ cur_data_offset = bp->fw_stats_data_mapping + offsetof(struct bnx2x_fw_stats_data, queue_stats); - /* first queue query index depends whether FCoE offloaded request will - * be included in the ramrod - */ - if (!NO_FCOE(bp)) - first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX; - else - first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1; - for_each_eth_queue(bp, i) { cur_query_entry = &bp->fw_stats_req-> - query[first_queue_query_index + i]; + query[BNX2X_FIRST_QUEUE_QUERY_IDX + i]; cur_query_entry->kind = STATS_TYPE_QUEUE; cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]); @@ -1614,21 +1531,6 @@ static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp) cur_data_offset += sizeof(struct per_queue_stats); } - - /* add FCoE queue query if needed */ - if (!NO_FCOE(bp)) { - cur_query_entry = - &bp->fw_stats_req-> - query[first_queue_query_index + i]; - - cur_query_entry->kind = STATS_TYPE_QUEUE; - cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX]); - cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); - cur_query_entry->address.hi = - cpu_to_le32(U64_HI(cur_data_offset)); - cur_query_entry->address.lo = - cpu_to_le32(U64_LO(cur_data_offset)); - } } void bnx2x_stats_init(struct bnx2x *bp) diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h index 683deb053109..5d8ce2f6afef 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h @@ -193,12 +193,6 @@ struct bnx2x_eth_stats { u32 total_tpa_aggregated_frames_lo; u32 total_tpa_bytes_hi; u32 total_tpa_bytes_lo; - - /* PFC */ - u32 pfc_frames_received_hi; - u32 pfc_frames_received_lo; - u32 pfc_frames_sent_hi; - u32 pfc_frames_sent_lo; }; diff --git a/trunk/drivers/net/ethernet/broadcom/cnic.c b/trunk/drivers/net/ethernet/broadcom/cnic.c index dd3a0a232ea0..6f10c6939834 100644 --- a/trunk/drivers/net/ethernet/broadcom/cnic.c +++ b/trunk/drivers/net/ethernet/broadcom/cnic.c @@ -250,21 +250,6 @@ static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off) return io->data; } -static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_eth_dev *ethdev = cp->ethdev; - struct drv_ctl_info info; - - if (reg) - info.cmd = DRV_CTL_ULP_REGISTER_CMD; - else - info.cmd = DRV_CTL_ULP_UNREGISTER_CMD; - - info.data.ulp_type = ulp_type; - ethdev->drv_ctl(dev->netdev, &info); -} - static int cnic_in_use(struct cnic_sock *csk) { return test_bit(SK_F_INUSE, &csk->flags); @@ -521,7 +506,7 @@ int cnic_unregister_driver(int ulp_type) } read_unlock(&cnic_dev_lock); - RCU_INIT_POINTER(cnic_ulp_tbl[ulp_type], NULL); + rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL); mutex_unlock(&cnic_lock); synchronize_rcu(); @@ -578,8 +563,6 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type, mutex_unlock(&cnic_lock); - cnic_ulp_ctl(dev, ulp_type, true); - return 0; } @@ -596,7 +579,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type) } mutex_lock(&cnic_lock); if (rcu_dereference(cp->ulp_ops[ulp_type])) { - RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL); + rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL); cnic_put(dev); } else { pr_err("%s: device not registered to this ulp type %d\n", @@ -619,8 +602,6 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type) if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type])) netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n"); - cnic_ulp_ctl(dev, ulp_type, false); - return 0; } EXPORT_SYMBOL(cnic_unregister_driver); @@ -1361,7 +1342,7 @@ static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid, if (ret == 1) return 0; - return ret; + return -EBUSY; } static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type, @@ -1849,7 +1830,7 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], done: cqes[0] = (struct kcqe *) &kcqe; cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); - return 0; + return ret; } @@ -1947,7 +1928,7 @@ static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe) cqes[0] = (struct kcqe *) &kcqe; cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); - return 0; + return ret; } static void cnic_init_storm_conn_bufs(struct cnic_dev *dev, @@ -2513,57 +2494,6 @@ static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe) return ret; } -static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe) -{ - struct cnic_local *cp = dev->cnic_priv; - struct kcqe kcqe; - struct kcqe *cqes[1]; - u32 cid; - u32 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag); - u32 layer_code = kwqe->kwqe_op_flag & KWQE_LAYER_MASK; - int ulp_type; - - cid = kwqe->kwqe_info0; - memset(&kcqe, 0, sizeof(kcqe)); - - if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_ISCSI) { - ulp_type = CNIC_ULP_ISCSI; - if (opcode == ISCSI_KWQE_OPCODE_UPDATE_CONN) - cid = kwqe->kwqe_info1; - - kcqe.kcqe_op_flag = (opcode + 0x10) << KCQE_FLAGS_OPCODE_SHIFT; - kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_ISCSI; - kcqe.kcqe_info1 = ISCSI_KCQE_COMPLETION_STATUS_NIC_ERROR; - kcqe.kcqe_info2 = cid; - cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &kcqe.kcqe_info0); - - } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L4) { - struct l4_kcq *l4kcqe = (struct l4_kcq *) &kcqe; - u32 kcqe_op; - - ulp_type = CNIC_ULP_L4; - if (opcode == L4_KWQE_OPCODE_VALUE_CONNECT1) - kcqe_op = L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE; - else if (opcode == L4_KWQE_OPCODE_VALUE_RESET) - kcqe_op = L4_KCQE_OPCODE_VALUE_RESET_COMP; - else if (opcode == L4_KWQE_OPCODE_VALUE_CLOSE) - kcqe_op = L4_KCQE_OPCODE_VALUE_CLOSE_COMP; - else - return; - - kcqe.kcqe_op_flag = (kcqe_op << KCQE_FLAGS_OPCODE_SHIFT) | - KCQE_FLAGS_LAYER_MASK_L4; - l4kcqe->status = L4_KCQE_COMPLETION_STATUS_NIC_ERROR; - l4kcqe->cid = cid; - cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &l4kcqe->conn_id); - } else { - return; - } - - cqes[0] = (struct kcqe *) &kcqe; - cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1); -} - static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], u32 num_wqes) { @@ -2621,17 +2551,9 @@ static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev, opcode); break; } - if (ret < 0) { + if (ret < 0) netdev_err(dev->netdev, "KWQE(0x%x) failed\n", opcode); - - /* Possibly bnx2x parity error, send completion - * to ulp drivers with error code to speed up - * cleanup and reset recovery. - */ - if (ret == -EIO || ret == -EAGAIN) - cnic_bnx2x_kwqe_err(dev, kwqe); - } i += work; } return 0; @@ -3130,26 +3052,9 @@ static void cnic_ulp_start(struct cnic_dev *dev) } } -static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_ulp_ops *ulp_ops; - int rc; - - mutex_lock(&cnic_lock); - ulp_ops = cnic_ulp_tbl_prot(ulp_type); - if (ulp_ops && ulp_ops->cnic_get_stats) - rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]); - else - rc = -ENODEV; - mutex_unlock(&cnic_lock); - return rc; -} - static int cnic_ctl(void *data, struct cnic_ctl_info *info) { struct cnic_dev *dev = data; - int ulp_type = CNIC_ULP_ISCSI; switch (info->cmd) { case CNIC_CTL_STOP_CMD: @@ -3195,15 +3100,6 @@ static int cnic_ctl(void *data, struct cnic_ctl_info *info) } break; } - case CNIC_CTL_FCOE_STATS_GET_CMD: - ulp_type = CNIC_ULP_FCOE; - /* fall through */ - case CNIC_CTL_ISCSI_STATS_GET_CMD: - cnic_hold(dev); - cnic_copy_ulp_stats(dev, ulp_type); - cnic_put(dev); - break; - default: return -EINVAL; } @@ -3579,7 +3475,7 @@ static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr, struct flowi6 fl6; memset(&fl6, 0, sizeof(fl6)); - fl6.daddr = dst_addr->sin6_addr; + ipv6_addr_copy(&fl6.daddr, &dst_addr->sin6_addr); if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL) fl6.flowi6_oif = dst_addr->sin6_scope_id; @@ -3908,9 +3804,6 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe) case L4_KCQE_OPCODE_VALUE_RESET_COMP: case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE: case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD: - if (l4kcqe->status == L4_KCQE_COMPLETION_STATUS_NIC_ERROR) - set_bit(SK_F_HW_ERR, &csk->flags); - cp->close_conn(csk, opcode); break; @@ -4038,9 +3931,7 @@ static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode) case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: case L4_KCQE_OPCODE_VALUE_RESET_COMP: if (cnic_ready_to_close(csk, opcode)) { - if (test_bit(SK_F_HW_ERR, &csk->flags)) - close_complete = 1; - else if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) + if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE; else close_complete = 1; @@ -4933,7 +4824,6 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev) int func = CNIC_FUNC(cp), ret; u32 pfid; - dev->stats_addr = ethdev->addr_drv_info_to_mcp; cp->port_mode = CHIP_PORT_MODE_NONE; if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { @@ -5244,7 +5134,7 @@ static void cnic_stop_hw(struct cnic_dev *dev) } cnic_shutdown_rings(dev); clear_bit(CNIC_F_CNIC_UP, &dev->flags); - RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL); + rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL); synchronize_rcu(); cnic_cm_shutdown(dev); cp->stop_hw(dev); @@ -5398,8 +5288,6 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev) cdev->pcidev = pdev; cp->chip_id = ethdev->chip_id; - cdev->stats_addr = ethdev->addr_drv_info_to_mcp; - if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)) cdev->max_iscsi_conn = ethdev->max_iscsi_conn; if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) && diff --git a/trunk/drivers/net/ethernet/broadcom/cnic_defs.h b/trunk/drivers/net/ethernet/broadcom/cnic_defs.h index 86936f6b6dbc..239de898f071 100644 --- a/trunk/drivers/net/ethernet/broadcom/cnic_defs.h +++ b/trunk/drivers/net/ethernet/broadcom/cnic_defs.h @@ -85,7 +85,6 @@ /* KCQ (kernel completion queue) completion status */ #define L4_KCQE_COMPLETION_STATUS_SUCCESS (0) -#define L4_KCQE_COMPLETION_STATUS_NIC_ERROR (4) #define L4_KCQE_COMPLETION_STATUS_TIMEOUT (0x93) #define L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL (0x83) diff --git a/trunk/drivers/net/ethernet/broadcom/cnic_if.h b/trunk/drivers/net/ethernet/broadcom/cnic_if.h index 1517763d4e55..79443e0dbf96 100644 --- a/trunk/drivers/net/ethernet/broadcom/cnic_if.h +++ b/trunk/drivers/net/ethernet/broadcom/cnic_if.h @@ -1,6 +1,6 @@ /* cnic_if.h: Broadcom CNIC core network driver. * - * Copyright (c) 2006-2012 Broadcom Corporation + * Copyright (c) 2006-2011 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -12,8 +12,8 @@ #ifndef CNIC_IF_H #define CNIC_IF_H -#define CNIC_MODULE_VERSION "2.5.8" -#define CNIC_MODULE_RELDATE "Jan 3, 2012" +#define CNIC_MODULE_VERSION "2.5.7" +#define CNIC_MODULE_RELDATE "July 20, 2011" #define CNIC_ULP_RDMA 0 #define CNIC_ULP_ISCSI 1 @@ -86,8 +86,6 @@ struct kcqe { #define CNIC_CTL_START_CMD 2 #define CNIC_CTL_COMPLETION_CMD 3 #define CNIC_CTL_STOP_ISCSI_CMD 4 -#define CNIC_CTL_FCOE_STATS_GET_CMD 5 -#define CNIC_CTL_ISCSI_STATS_GET_CMD 6 #define DRV_CTL_IO_WR_CMD 0x101 #define DRV_CTL_IO_RD_CMD 0x102 @@ -98,8 +96,6 @@ struct kcqe { #define DRV_CTL_STOP_L2_CMD 0x107 #define DRV_CTL_RET_L2_SPQ_CREDIT_CMD 0x10c #define DRV_CTL_ISCSI_STOPPED_CMD 0x10d -#define DRV_CTL_ULP_REGISTER_CMD 0x10e -#define DRV_CTL_ULP_UNREGISTER_CMD 0x10f struct cnic_ctl_completion { u32 cid; @@ -137,7 +133,6 @@ struct drv_ctl_info { struct drv_ctl_spq_credit credit; struct drv_ctl_io io; struct drv_ctl_l2_ring ring; - int ulp_type; char bytes[MAX_DRV_CTL_DATA]; } data; }; @@ -206,7 +201,6 @@ struct cnic_eth_dev { struct kwqe_16 *[], u32); int (*drv_ctl)(struct net_device *, struct drv_ctl_info *); unsigned long reserved1[2]; - union drv_info_to_mcp *addr_drv_info_to_mcp; }; struct cnic_sockaddr { @@ -261,7 +255,6 @@ struct cnic_sock { #define SK_F_CONNECT_START 4 #define SK_F_IPV6 5 #define SK_F_CLOSING 7 -#define SK_F_HW_ERR 8 atomic_t ref_count; u32 state; @@ -304,8 +297,6 @@ struct cnic_dev { int max_fcoe_conn; int max_rdma_conn; - union drv_info_to_mcp *stats_addr; - void *cnic_priv; }; @@ -335,7 +326,6 @@ struct cnic_ulp_ops { void (*cm_remote_abort)(struct cnic_sock *); int (*iscsi_nl_send_msg)(void *ulp_ctx, u32 msg_type, char *data, u16 data_size); - int (*cnic_get_stats)(void *ulp_ctx); struct module *owner; atomic_t ref_count; }; diff --git a/trunk/drivers/net/ethernet/broadcom/sb1250-mac.c b/trunk/drivers/net/ethernet/broadcom/sb1250-mac.c index 8fa7abc53ec6..0a1d7f279fc8 100644 --- a/trunk/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/trunk/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -163,6 +163,7 @@ enum sbmac_state { #define SBMAC_MAX_TXDESCR 256 #define SBMAC_MAX_RXDESCR 256 +#define ETHER_ADDR_LEN 6 #define ENET_PACKET_SIZE 1518 /*#define ENET_PACKET_SIZE 9216 */ @@ -265,7 +266,7 @@ struct sbmac_softc { int sbm_pause; /* current pause setting */ int sbm_link; /* current link state */ - unsigned char sbm_hwaddr[ETH_ALEN]; + unsigned char sbm_hwaddr[ETHER_ADDR_LEN]; struct sbmacdma sbm_txdma; /* only channel 0 for now */ struct sbmacdma sbm_rxdma; @@ -2675,4 +2676,15 @@ static struct platform_driver sbmac_driver = { }, }; -module_platform_driver(sbmac_driver); +static int __init sbmac_init_module(void) +{ + return platform_driver_register(&sbmac_driver); +} + +static void __exit sbmac_cleanup_module(void) +{ + platform_driver_unregister(&sbmac_driver); +} + +module_init(sbmac_init_module); +module_exit(sbmac_cleanup_module); diff --git a/trunk/drivers/net/ethernet/broadcom/tg3.c b/trunk/drivers/net/ethernet/broadcom/tg3.c index 076e02a415a0..bf4074167d6a 100644 --- a/trunk/drivers/net/ethernet/broadcom/tg3.c +++ b/trunk/drivers/net/ethernet/broadcom/tg3.c @@ -89,10 +89,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) #define DRV_MODULE_NAME "tg3" #define TG3_MAJ_NUM 3 -#define TG3_MIN_NUM 122 +#define TG3_MIN_NUM 121 #define DRV_MODULE_VERSION \ __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) -#define DRV_MODULE_RELDATE "December 7, 2011" +#define DRV_MODULE_RELDATE "November 2, 2011" #define RESET_KIND_SHUTDOWN 0 #define RESET_KIND_INIT 1 @@ -135,6 +135,7 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700) #define TG3_DEF_RX_JUMBO_RING_PENDING 100 +#define TG3_RSS_INDIR_TBL_SIZE 128 /* Do not place this n-ring entries value into the tp struct itself, * we really want to expose these constants to GCC so that modulo et @@ -193,13 +194,12 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) #if (NET_IP_ALIGN != 0) #define TG3_RX_OFFSET(tp) ((tp)->rx_offset) #else -#define TG3_RX_OFFSET(tp) (NET_SKB_PAD) +#define TG3_RX_OFFSET(tp) 0 #endif /* minimum number of free TX descriptors required to wake up TX process */ #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) -#define TG3_TX_BD_DMA_MAX_2K 2048 -#define TG3_TX_BD_DMA_MAX_4K 4096 +#define TG3_TX_BD_DMA_MAX 4096 #define TG3_RAW_IP_ALIGN 2 @@ -1670,6 +1670,22 @@ static void tg3_link_report(struct tg3 *tp) } } +static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl) +{ + u16 miireg; + + if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX)) + miireg = ADVERTISE_PAUSE_CAP; + else if (flow_ctrl & FLOW_CTRL_TX) + miireg = ADVERTISE_PAUSE_ASYM; + else if (flow_ctrl & FLOW_CTRL_RX) + miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + else + miireg = 0; + + return miireg; +} + static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl) { u16 miireg; @@ -1690,12 +1706,18 @@ static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv) { u8 cap = 0; - if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) { - cap = FLOW_CTRL_TX | FLOW_CTRL_RX; - } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) { - if (lcladv & ADVERTISE_1000XPAUSE) - cap = FLOW_CTRL_RX; - if (rmtadv & ADVERTISE_1000XPAUSE) + if (lcladv & ADVERTISE_1000XPAUSE) { + if (lcladv & ADVERTISE_1000XPSE_ASYM) { + if (rmtadv & LPA_1000XPAUSE) + cap = FLOW_CTRL_TX | FLOW_CTRL_RX; + else if (rmtadv & LPA_1000XPAUSE_ASYM) + cap = FLOW_CTRL_RX; + } else { + if (rmtadv & LPA_1000XPAUSE) + cap = FLOW_CTRL_TX | FLOW_CTRL_RX; + } + } else if (lcladv & ADVERTISE_1000XPSE_ASYM) { + if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM)) cap = FLOW_CTRL_TX; } @@ -1770,7 +1792,7 @@ static void tg3_adjust_link(struct net_device *dev) if (phydev->duplex == DUPLEX_HALF) mac_mode |= MAC_MODE_HALF_DUPLEX; else { - lcl_adv = mii_advertise_flowctrl( + lcl_adv = tg3_advert_flowctrl_1000T( tp->link_config.flowctrl); if (phydev->pause) @@ -2138,7 +2160,7 @@ static void tg3_phy_eee_enable(struct tg3 *tp) if (tp->link_config.active_speed == SPEED_1000 && (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || - tg3_flag(tp, 57765_CLASS)) && + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) && !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { val = MII_TG3_DSP_TAP26_ALNOKO | MII_TG3_DSP_TAP26_RMRXSTO; @@ -2657,7 +2679,8 @@ static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol) bool need_vaux = false; /* The GPIOs do something completely different on 57765. */ - if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS)) + if (!tg3_flag(tp, IS_NIC) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) return; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || @@ -3571,24 +3594,37 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) u32 val, new_adv; new_adv = ADVERTISE_CSMA; - new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL; - new_adv |= mii_advertise_flowctrl(flowctrl); + if (advertise & ADVERTISED_10baseT_Half) + new_adv |= ADVERTISE_10HALF; + if (advertise & ADVERTISED_10baseT_Full) + new_adv |= ADVERTISE_10FULL; + if (advertise & ADVERTISED_100baseT_Half) + new_adv |= ADVERTISE_100HALF; + if (advertise & ADVERTISED_100baseT_Full) + new_adv |= ADVERTISE_100FULL; + + new_adv |= tg3_advert_flowctrl_1000T(flowctrl); err = tg3_writephy(tp, MII_ADVERTISE, new_adv); if (err) goto done; - if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { - new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise); + if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) + goto done; - if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || - tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) - new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; + new_adv = 0; + if (advertise & ADVERTISED_1000baseT_Half) + new_adv |= ADVERTISE_1000HALF; + if (advertise & ADVERTISED_1000baseT_Full) + new_adv |= ADVERTISE_1000FULL; - err = tg3_writephy(tp, MII_CTRL1000, new_adv); - if (err) - goto done; - } + if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || + tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) + new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; + + err = tg3_writephy(tp, MII_CTRL1000, new_adv); + if (err) + goto done; if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) goto done; @@ -3614,7 +3650,6 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) switch (GET_ASIC_REV(tp->pci_chip_rev_id)) { case ASIC_REV_5717: case ASIC_REV_57765: - case ASIC_REV_57766: case ASIC_REV_5719: /* If we advertised any eee advertisements above... */ if (val) @@ -3751,61 +3786,76 @@ static int tg3_init_5401phy_dsp(struct tg3 *tp) return err; } -static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv) +static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask) { - u32 advmsk, tgtadv, advertising; + u32 adv_reg, all_mask = 0; - advertising = tp->link_config.advertising; - tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL; - - advmsk = ADVERTISE_ALL; - if (tp->link_config.active_duplex == DUPLEX_FULL) { - tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl); - advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; - } + if (mask & ADVERTISED_10baseT_Half) + all_mask |= ADVERTISE_10HALF; + if (mask & ADVERTISED_10baseT_Full) + all_mask |= ADVERTISE_10FULL; + if (mask & ADVERTISED_100baseT_Half) + all_mask |= ADVERTISE_100HALF; + if (mask & ADVERTISED_100baseT_Full) + all_mask |= ADVERTISE_100FULL; - if (tg3_readphy(tp, MII_ADVERTISE, lcladv)) - return false; + if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg)) + return 0; - if ((*lcladv & advmsk) != tgtadv) - return false; + if ((adv_reg & ADVERTISE_ALL) != all_mask) + return 0; if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { u32 tg3_ctrl; - tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising); + all_mask = 0; + if (mask & ADVERTISED_1000baseT_Half) + all_mask |= ADVERTISE_1000HALF; + if (mask & ADVERTISED_1000baseT_Full) + all_mask |= ADVERTISE_1000FULL; if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl)) - return false; + return 0; tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL); - if (tg3_ctrl != tgtadv) - return false; + if (tg3_ctrl != all_mask) + return 0; } - return true; + return 1; } -static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv) +static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv) { - u32 lpeth = 0; + u32 curadv, reqadv; - if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { - u32 val; - - if (tg3_readphy(tp, MII_STAT1000, &val)) - return false; + if (tg3_readphy(tp, MII_ADVERTISE, lcladv)) + return 1; - lpeth = mii_stat1000_to_ethtool_lpa_t(val); - } + curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); + reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl); - if (tg3_readphy(tp, MII_LPA, rmtadv)) - return false; + if (tp->link_config.active_duplex == DUPLEX_FULL) { + if (curadv != reqadv) + return 0; - lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv); - tp->link_config.rmt_adv = lpeth; + if (tg3_flag(tp, PAUSE_AUTONEG)) + tg3_readphy(tp, MII_LPA, rmtadv); + } else { + /* Reprogram the advertisement register, even if it + * does not affect the current link. If the link + * gets renegotiated in the future, we can save an + * additional renegotiation cycle by advertising + * it correctly in the first place. + */ + if (curadv != reqadv) { + *lcladv &= ~(ADVERTISE_PAUSE_CAP | + ADVERTISE_PAUSE_ASYM); + tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv); + } + } - return true; + return 1; } static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) @@ -3911,8 +3961,6 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) current_link_up = 0; current_speed = SPEED_INVALID; current_duplex = DUPLEX_INVALID; - tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE; - tp->link_config.rmt_adv = 0; if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) { err = tg3_phy_auxctl_read(tp, @@ -3968,9 +4016,12 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) if (tp->link_config.autoneg == AUTONEG_ENABLE) { if ((bmcr & BMCR_ANENABLE) && - tg3_phy_copper_an_config_ok(tp, &lcl_adv) && - tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv)) - current_link_up = 1; + tg3_copper_is_advertising_all(tp, + tp->link_config.advertising)) { + if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv, + &rmt_adv)) + current_link_up = 1; + } } else { if (!(bmcr & BMCR_ANENABLE) && tp->link_config.speed == current_speed && @@ -3982,22 +4033,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) } if (current_link_up == 1 && - tp->link_config.active_duplex == DUPLEX_FULL) { - u32 reg, bit; - - if (tp->phy_flags & TG3_PHYFLG_IS_FET) { - reg = MII_TG3_FET_GEN_STAT; - bit = MII_TG3_FET_GEN_STAT_MDIXSTAT; - } else { - reg = MII_TG3_EXT_STAT; - bit = MII_TG3_EXT_STAT_MDIX; - } - - if (!tg3_readphy(tp, reg, &val) && (val & bit)) - tp->phy_flags |= TG3_PHYFLG_MDIX_STATE; - + tp->link_config.active_duplex == DUPLEX_FULL) tg3_setup_flow_control(tp, lcl_adv, rmt_adv); - } } relink: @@ -4606,9 +4643,6 @@ static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status) if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE) remote_adv |= LPA_1000XPAUSE_ASYM; - tp->link_config.rmt_adv = - mii_adv_to_ethtool_adv_x(remote_adv); - tg3_setup_flow_control(tp, local_adv, remote_adv); current_link_up = 1; tp->serdes_counter = 0; @@ -4680,9 +4714,6 @@ static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status) if (rxflags & MR_LP_ADV_ASYM_PAUSE) remote_adv |= LPA_1000XPAUSE_ASYM; - tp->link_config.rmt_adv = - mii_adv_to_ethtool_adv_x(remote_adv); - tg3_setup_flow_control(tp, local_adv, remote_adv); current_link_up = 1; @@ -4765,7 +4796,6 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset) udelay(40); current_link_up = 0; - tp->link_config.rmt_adv = 0; mac_status = tr32(MAC_STATUS); if (tg3_flag(tp, HW_AUTONEG)) @@ -4857,7 +4887,6 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) current_link_up = 0; current_speed = SPEED_INVALID; current_duplex = DUPLEX_INVALID; - tp->link_config.rmt_adv = 0; err |= tg3_readphy(tp, MII_BMSR, &bmsr); err |= tg3_readphy(tp, MII_BMSR, &bmsr); @@ -4874,19 +4903,23 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { /* do nothing, just check for link up at the end */ } else if (tp->link_config.autoneg == AUTONEG_ENABLE) { - u32 adv, newadv; + u32 adv, new_adv; err |= tg3_readphy(tp, MII_ADVERTISE, &adv); - newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF | - ADVERTISE_1000XPAUSE | - ADVERTISE_1000XPSE_ASYM | - ADVERTISE_SLCT); + new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF | + ADVERTISE_1000XPAUSE | + ADVERTISE_1000XPSE_ASYM | + ADVERTISE_SLCT); - newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); - newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising); + new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); - if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) { - tg3_writephy(tp, MII_ADVERTISE, newadv); + if (tp->link_config.advertising & ADVERTISED_1000baseT_Half) + new_adv |= ADVERTISE_1000XHALF; + if (tp->link_config.advertising & ADVERTISED_1000baseT_Full) + new_adv |= ADVERTISE_1000XFULL; + + if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) { + tg3_writephy(tp, MII_ADVERTISE, new_adv); bmcr |= BMCR_ANENABLE | BMCR_ANRESTART; tg3_writephy(tp, MII_BMCR, bmcr); @@ -4964,9 +4997,6 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) current_duplex = DUPLEX_FULL; else current_duplex = DUPLEX_HALF; - - tp->link_config.rmt_adv = - mii_adv_to_ethtool_adv_x(remote_adv); } else if (!tg3_flag(tp, 5780_CLASS)) { /* Link is up via parallel detect */ } else { @@ -5290,7 +5320,6 @@ static void tg3_tx(struct tg3_napi *tnapi) u32 sw_idx = tnapi->tx_cons; struct netdev_queue *txq; int index = tnapi - tp->napi; - unsigned int pkts_compl = 0, bytes_compl = 0; if (tg3_flag(tp, ENABLE_TSS)) index--; @@ -5341,9 +5370,6 @@ static void tg3_tx(struct tg3_napi *tnapi) sw_idx = NEXT_TX(sw_idx); } - pkts_compl++; - bytes_compl += skb->len; - dev_kfree_skb(skb); if (unlikely(tx_bug)) { @@ -5352,8 +5378,6 @@ static void tg3_tx(struct tg3_napi *tnapi) } } - netdev_completed_queue(tp->dev, pkts_compl, bytes_compl); - tnapi->tx_cons = sw_idx; /* Need to make the tx_cons update visible to tg3_start_xmit() @@ -5373,15 +5397,15 @@ static void tg3_tx(struct tg3_napi *tnapi) } } -static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) +static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) { - if (!ri->data) + if (!ri->skb) return; pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping), map_sz, PCI_DMA_FROMDEVICE); - kfree(ri->data); - ri->data = NULL; + dev_kfree_skb_any(ri->skb); + ri->skb = NULL; } /* Returns size of skb allocated or < 0 on error. @@ -5395,28 +5419,28 @@ static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) * buffers the cpu only reads the last cacheline of the RX descriptor * (to fetch the error flags, vlan tag, checksum, and opaque cookie). */ -static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, +static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, u32 opaque_key, u32 dest_idx_unmasked) { struct tg3_rx_buffer_desc *desc; struct ring_info *map; - u8 *data; + struct sk_buff *skb; dma_addr_t mapping; - int skb_size, data_size, dest_idx; + int skb_size, dest_idx; switch (opaque_key) { case RXD_OPAQUE_RING_STD: dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; desc = &tpr->rx_std[dest_idx]; map = &tpr->rx_std_buffers[dest_idx]; - data_size = tp->rx_pkt_map_sz; + skb_size = tp->rx_pkt_map_sz; break; case RXD_OPAQUE_RING_JUMBO: dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; desc = &tpr->rx_jmb[dest_idx].std; map = &tpr->rx_jmb_buffers[dest_idx]; - data_size = TG3_RX_JMB_MAP_SZ; + skb_size = TG3_RX_JMB_MAP_SZ; break; default: @@ -5429,33 +5453,31 @@ static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, * Callers depend upon this behavior and assume that * we leave everything unchanged if we fail. */ - skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) + - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - data = kmalloc(skb_size, GFP_ATOMIC); - if (!data) + skb = netdev_alloc_skb(tp->dev, skb_size + TG3_RX_OFFSET(tp)); + if (skb == NULL) return -ENOMEM; - mapping = pci_map_single(tp->pdev, - data + TG3_RX_OFFSET(tp), - data_size, + skb_reserve(skb, TG3_RX_OFFSET(tp)); + + mapping = pci_map_single(tp->pdev, skb->data, skb_size, PCI_DMA_FROMDEVICE); if (pci_dma_mapping_error(tp->pdev, mapping)) { - kfree(data); + dev_kfree_skb(skb); return -EIO; } - map->data = data; + map->skb = skb; dma_unmap_addr_set(map, mapping, mapping); desc->addr_hi = ((u64)mapping >> 32); desc->addr_lo = ((u64)mapping & 0xffffffff); - return data_size; + return skb_size; } /* We only need to move over in the address because the other * members of the RX descriptor are invariant. See notes above - * tg3_alloc_rx_data for full details. + * tg3_alloc_rx_skb for full details. */ static void tg3_recycle_rx(struct tg3_napi *tnapi, struct tg3_rx_prodring_set *dpr, @@ -5489,7 +5511,7 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi, return; } - dest_map->data = src_map->data; + dest_map->skb = src_map->skb; dma_unmap_addr_set(dest_map, mapping, dma_unmap_addr(src_map, mapping)); dest_desc->addr_hi = src_desc->addr_hi; @@ -5500,7 +5522,7 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi, */ smp_wmb(); - src_map->data = NULL; + src_map->skb = NULL; } /* The RX ring scheme is composed of multiple rings which post fresh @@ -5554,20 +5576,19 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) struct sk_buff *skb; dma_addr_t dma_addr; u32 opaque_key, desc_idx, *post_ptr; - u8 *data; desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; if (opaque_key == RXD_OPAQUE_RING_STD) { ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx]; dma_addr = dma_unmap_addr(ri, mapping); - data = ri->data; + skb = ri->skb; post_ptr = &std_prod_idx; rx_std_posted++; } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx]; dma_addr = dma_unmap_addr(ri, mapping); - data = ri->data; + skb = ri->skb; post_ptr = &jmb_prod_idx; } else goto next_pkt_nopost; @@ -5585,14 +5606,13 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) goto next_pkt; } - prefetch(data + TG3_RX_OFFSET(tp)); len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - ETH_FCS_LEN; if (len > TG3_RX_COPY_THRESH(tp)) { int skb_size; - skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key, + skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key, *post_ptr); if (skb_size < 0) goto drop_it; @@ -5600,37 +5620,35 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) pci_unmap_single(tp->pdev, dma_addr, skb_size, PCI_DMA_FROMDEVICE); - skb = build_skb(data); - if (!skb) { - kfree(data); - goto drop_it_no_recycle; - } - skb_reserve(skb, TG3_RX_OFFSET(tp)); - /* Ensure that the update to the data happens + /* Ensure that the update to the skb happens * after the usage of the old DMA mapping. */ smp_wmb(); - ri->data = NULL; + ri->skb = NULL; + skb_put(skb, len); } else { + struct sk_buff *copy_skb; + tg3_recycle_rx(tnapi, tpr, opaque_key, desc_idx, *post_ptr); - skb = netdev_alloc_skb(tp->dev, - len + TG3_RAW_IP_ALIGN); - if (skb == NULL) + copy_skb = netdev_alloc_skb(tp->dev, len + + TG3_RAW_IP_ALIGN); + if (copy_skb == NULL) goto drop_it_no_recycle; - skb_reserve(skb, TG3_RAW_IP_ALIGN); + skb_reserve(copy_skb, TG3_RAW_IP_ALIGN); + skb_put(copy_skb, len); pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); - memcpy(skb->data, - data + TG3_RX_OFFSET(tp), - len); + skb_copy_from_linear_data(skb, copy_skb->data, len); pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); + + /* We'll reuse the original ring buffer. */ + skb = copy_skb; } - skb_put(skb, len); if ((tp->dev->features & NETIF_F_RXCSUM) && (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK) @@ -5769,7 +5787,7 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp, di = dpr->rx_std_prod_idx; for (i = di; i < di + cpycnt; i++) { - if (dpr->rx_std_buffers[i].data) { + if (dpr->rx_std_buffers[i].skb) { cpycnt = i - di; err = -ENOSPC; break; @@ -5827,7 +5845,7 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp, di = dpr->rx_jmb_prod_idx; for (i = di; i < di + cpycnt; i++) { - if (dpr->rx_jmb_buffers[i].data) { + if (dpr->rx_jmb_buffers[i].skb) { cpycnt = i - di; err = -ENOSPC; break; @@ -6425,25 +6443,25 @@ static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget, bool hwbug = false; if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8) - hwbug = true; + hwbug = 1; if (tg3_4g_overflow_test(map, len)) - hwbug = true; + hwbug = 1; if (tg3_40bit_overflow_test(tp, map, len)) - hwbug = true; + hwbug = 1; - if (tp->dma_limit) { + if (tg3_flag(tp, 4K_FIFO_LIMIT)) { u32 prvidx = *entry; u32 tmp_flag = flags & ~TXD_FLAG_END; - while (len > tp->dma_limit && *budget) { - u32 frag_len = tp->dma_limit; - len -= tp->dma_limit; + while (len > TG3_TX_BD_DMA_MAX && *budget) { + u32 frag_len = TG3_TX_BD_DMA_MAX; + len -= TG3_TX_BD_DMA_MAX; /* Avoid the 8byte DMA problem */ if (len <= 8) { - len += tp->dma_limit / 2; - frag_len = tp->dma_limit / 2; + len += TG3_TX_BD_DMA_MAX / 2; + frag_len = TG3_TX_BD_DMA_MAX / 2; } tnapi->tx_buffers[*entry].fragmented = true; @@ -6464,7 +6482,7 @@ static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget, *budget -= 1; *entry = NEXT_TX(*entry); } else { - hwbug = true; + hwbug = 1; tnapi->tx_buffers[prvidx].fragmented = false; } } @@ -6798,7 +6816,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) } skb_tx_timestamp(skb); - netdev_sent_queue(tp->dev, skb->len); /* Packets are ready, update Tx producer idx local and on card. */ tw32_tx_mbox(tnapi->prodmbox, entry); @@ -6951,7 +6968,7 @@ static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk) return 0; } -static void tg3_set_loopback(struct net_device *dev, netdev_features_t features) +static void tg3_set_loopback(struct net_device *dev, u32 features) { struct tg3 *tp = netdev_priv(dev); @@ -6977,8 +6994,7 @@ static void tg3_set_loopback(struct net_device *dev, netdev_features_t features) } } -static netdev_features_t tg3_fix_features(struct net_device *dev, - netdev_features_t features) +static u32 tg3_fix_features(struct net_device *dev, u32 features) { struct tg3 *tp = netdev_priv(dev); @@ -6988,9 +7004,9 @@ static netdev_features_t tg3_fix_features(struct net_device *dev, return features; } -static int tg3_set_features(struct net_device *dev, netdev_features_t features) +static int tg3_set_features(struct net_device *dev, u32 features) { - netdev_features_t changed = dev->features ^ features; + u32 changed = dev->features ^ features; if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) tg3_set_loopback(dev, features); @@ -7066,14 +7082,14 @@ static void tg3_rx_prodring_free(struct tg3 *tp, if (tpr != &tp->napi[0].prodring) { for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx; i = (i + 1) & tp->rx_std_ring_mask) - tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], + tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], tp->rx_pkt_map_sz); if (tg3_flag(tp, JUMBO_CAPABLE)) { for (i = tpr->rx_jmb_cons_idx; i != tpr->rx_jmb_prod_idx; i = (i + 1) & tp->rx_jmb_ring_mask) { - tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], + tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], TG3_RX_JMB_MAP_SZ); } } @@ -7082,12 +7098,12 @@ static void tg3_rx_prodring_free(struct tg3 *tp, } for (i = 0; i <= tp->rx_std_ring_mask; i++) - tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], + tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], tp->rx_pkt_map_sz); if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { for (i = 0; i <= tp->rx_jmb_ring_mask; i++) - tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], + tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], TG3_RX_JMB_MAP_SZ); } } @@ -7143,7 +7159,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp, /* Now allocate fresh SKBs for each rx ring. */ for (i = 0; i < tp->rx_pending; i++) { - if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) { + if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) { netdev_warn(tp->dev, "Using a smaller RX standard ring. Only " "%d out of %d buffers were allocated " @@ -7175,7 +7191,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp, } for (i = 0; i < tp->rx_jumbo_pending; i++) { - if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) { + if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) { netdev_warn(tp->dev, "Using a smaller RX jumbo ring. Only %d " "out of %d buffers were allocated " @@ -7281,7 +7297,6 @@ static void tg3_free_rings(struct tg3 *tp) dev_kfree_skb_any(skb); } } - netdev_reset_queue(tp->dev); } /* Initialize tx/rx rings for packet processing. @@ -7576,6 +7591,8 @@ static int tg3_abort_hw(struct tg3 *tp, int silent) if (tnapi->hw_status) memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); } + if (tp->hw_stats) + memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); return err; } @@ -7609,11 +7626,15 @@ static void tg3_restore_pci_state(struct tg3 *tp) pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd); - if (!tg3_flag(tp, PCI_EXPRESS)) { - pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, - tp->pci_cacheline_sz); - pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, - tp->pci_lat_timer); + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) { + if (tg3_flag(tp, PCI_EXPRESS)) + pcie_set_readrq(tp->pdev, tp->pcie_readrq); + else { + pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, + tp->pci_cacheline_sz); + pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, + tp->pci_lat_timer); + } } /* Make sure PCI-X relaxed ordering bit is clear. */ @@ -7798,6 +7819,8 @@ static int tg3_chip_reset(struct tg3 *tp) pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL, val16); + pcie_set_readrq(tp->pdev, tp->pcie_readrq); + /* Clear error status */ pci_write_config_word(tp->pdev, pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA, @@ -7891,11 +7914,6 @@ static int tg3_chip_reset(struct tg3 *tp) return 0; } -static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *, - struct rtnl_link_stats64 *); -static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *, - struct tg3_ethtool_stats *); - /* tp->lock is held. */ static int tg3_halt(struct tg3 *tp, int kind, int silent) { @@ -7913,15 +7931,6 @@ static int tg3_halt(struct tg3 *tp, int kind, int silent) tg3_write_sig_legacy(tp, kind); tg3_write_sig_post_reset(tp, kind); - if (tp->hw_stats) { - /* Save the stats across chip resets... */ - tg3_get_stats64(tp->dev, &tp->net_stats_prev), - tg3_get_estats(tp, &tp->estats_prev); - - /* And make sure the next sample is new data */ - memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); - } - if (err) return err; @@ -8065,7 +8074,7 @@ static void tg3_rings_reset(struct tg3 *tp) limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; else if (tg3_flag(tp, 5717_PLUS)) limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4; - else if (tg3_flag(tp, 57765_CLASS)) + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2; else limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; @@ -8082,7 +8091,7 @@ static void tg3_rings_reset(struct tg3 *tp) else if (!tg3_flag(tp, 5705_PLUS)) limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || - tg3_flag(tp, 57765_CLASS)) + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4; else limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; @@ -8188,8 +8197,7 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp) if (!tg3_flag(tp, 5750_PLUS) || tg3_flag(tp, 5780_CLASS) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || - tg3_flag(tp, 57765_PLUS)) + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700; else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) @@ -8209,7 +8217,10 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp) if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) return; - bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700; + if (!tg3_flag(tp, 5705_PLUS)) + bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700; + else + bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717; host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1); @@ -8220,54 +8231,6 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp) tw32(JMB_REPLENISH_LWM, bdcache_maxcnt); } -static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp) -{ - int i; - - for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) - tp->rss_ind_tbl[i] = - ethtool_rxfh_indir_default(i, tp->irq_cnt - 1); -} - -static void tg3_rss_check_indir_tbl(struct tg3 *tp) -{ - int i; - - if (!tg3_flag(tp, SUPPORT_MSIX)) - return; - - if (tp->irq_cnt <= 2) { - memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl)); - return; - } - - /* Validate table against current IRQ count */ - for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) { - if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1) - break; - } - - if (i != TG3_RSS_INDIR_TBL_SIZE) - tg3_rss_init_dflt_indir_tbl(tp); -} - -static void tg3_rss_write_indir_tbl(struct tg3 *tp) -{ - int i = 0; - u32 reg = MAC_RSS_INDIR_TBL_0; - - while (i < TG3_RSS_INDIR_TBL_SIZE) { - u32 val = tp->rss_ind_tbl[i]; - i++; - for (; i % 8; i++) { - val <<= 4; - val |= tp->rss_ind_tbl[i]; - } - tw32(reg, val); - reg += 4; - } -} - /* tp->lock is held. */ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) { @@ -8374,7 +8337,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32(GRC_MODE, grc_mode); } - if (tg3_flag(tp, 57765_CLASS)) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) { u32 grc_mode = tr32(GRC_MODE); @@ -8462,7 +8425,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK; - if (!tg3_flag(tp, 57765_CLASS) && + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) val |= DMA_RWCTRL_TAGGED_STAT_WA; tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl); @@ -8609,7 +8572,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, val | BDINFO_FLAGS_USE_EXT_RECV); if (!tg3_flag(tp, USE_JUMBO_BDFLAG) || - tg3_flag(tp, 57765_CLASS)) + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, NIC_SRAM_RX_JUMBO_BUFFER_DESC); } else { @@ -8618,7 +8581,10 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) } if (tg3_flag(tp, 57765_PLUS)) { - val = TG3_RX_STD_RING_SIZE(tp); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + val = TG3_RX_STD_MAX_SIZE_5700; + else + val = TG3_RX_STD_MAX_SIZE_5717; val <<= BDINFO_FLAGS_MAXLEN_SHIFT; val |= (TG3_RX_STD_DMA_SZ << 2); } else @@ -8695,9 +8661,6 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) if (tg3_flag(tp, PCI_EXPRESS)) rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) - rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR; - if (tg3_flag(tp, HW_TSO_1) || tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) @@ -8961,7 +8924,28 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) udelay(100); if (tg3_flag(tp, ENABLE_RSS)) { - tg3_rss_write_indir_tbl(tp); + int i = 0; + u32 reg = MAC_RSS_INDIR_TBL_0; + + if (tp->irq_cnt == 2) { + for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) { + tw32(reg, 0x0); + reg += 4; + } + } else { + u32 val; + + while (i < TG3_RSS_INDIR_TBL_SIZE) { + val = i % (tp->irq_cnt - 1); + i++; + for (; i % 8; i++) { + val <<= 4; + val |= (i % (tp->irq_cnt - 1)); + } + tw32(reg, val); + reg += 4; + } + } /* Setup the "secret" hash key. */ tw32(MAC_RSS_HASH_KEY_0, 0x5f865437); @@ -9018,7 +9002,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) /* Prevent chip from dropping frames when flow control * is enabled. */ - if (tg3_flag(tp, 57765_CLASS)) + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) val = 1; else val = 2; @@ -9233,7 +9217,7 @@ static void tg3_timer(unsigned long __opaque) spin_lock(&tp->lock); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - tg3_flag(tp, 57765_CLASS)) + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) tg3_chk_missed_msi(tp); if (!tg3_flag(tp, TAGGED_STATUS)) { @@ -9685,8 +9669,6 @@ static int tg3_open(struct net_device *dev) */ tg3_ints_init(tp); - tg3_rss_check_indir_tbl(tp); - /* The placement of this call is tied * to the setup and use of Host TX descriptors. */ @@ -9718,8 +9700,8 @@ static int tg3_open(struct net_device *dev) tg3_free_rings(tp); } else { if (tg3_flag(tp, TAGGED_STATUS) && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && - !tg3_flag(tp, 57765_CLASS)) + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) tp->timer_offset = HZ; else tp->timer_offset = HZ / 10; @@ -9800,6 +9782,10 @@ static int tg3_open(struct net_device *dev) return err; } +static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *, + struct rtnl_link_stats64 *); +static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *); + static int tg3_close(struct net_device *dev) { int i; @@ -9831,9 +9817,10 @@ static int tg3_close(struct net_device *dev) tg3_ints_fini(tp); - /* Clear stats across close / open calls */ - memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev)); - memset(&tp->estats_prev, 0, sizeof(tp->estats_prev)); + tg3_get_stats64(tp->dev, &tp->net_stats_prev); + + memcpy(&tp->estats_prev, tg3_get_estats(tp), + sizeof(tp->estats_prev)); tg3_napi_fini(tp); @@ -9881,9 +9868,9 @@ static u64 calc_crc_errors(struct tg3 *tp) estats->member = old_estats->member + \ get_stat64(&hw_stats->member) -static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp, - struct tg3_ethtool_stats *estats) +static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp) { + struct tg3_ethtool_stats *estats = &tp->estats; struct tg3_ethtool_stats *old_estats = &tp->estats_prev; struct tg3_hw_stats *hw_stats = tp->hw_stats; @@ -10331,20 +10318,12 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) cmd->advertising |= ADVERTISED_Asym_Pause; } } - if (netif_running(dev) && netif_carrier_ok(dev)) { + if (netif_running(dev)) { ethtool_cmd_speed_set(cmd, tp->link_config.active_speed); cmd->duplex = tp->link_config.active_duplex; - cmd->lp_advertising = tp->link_config.rmt_adv; - if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { - if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE) - cmd->eth_tp_mdix = ETH_TP_MDI_X; - else - cmd->eth_tp_mdix = ETH_TP_MDI; - } } else { ethtool_cmd_speed_set(cmd, SPEED_INVALID); cmd->duplex = DUPLEX_INVALID; - cmd->eth_tp_mdix = ETH_TP_MDI_INVALID; } cmd->phy_address = tp->phy_addr; cmd->transceiver = XCVR_INTERNAL; @@ -10449,10 +10428,10 @@ static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info { struct tg3 *tp = netdev_priv(dev); - strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); - strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version)); - strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info)); + strcpy(info->driver, DRV_MODULE_NAME); + strcpy(info->version, DRV_MODULE_VERSION); + strcpy(info->fw_version, tp->fw_ver); + strcpy(info->bus_info, pci_name(tp->pdev)); } static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) @@ -10611,12 +10590,12 @@ static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG); - if (tp->link_config.flowctrl & FLOW_CTRL_RX) + if (tp->link_config.active_flowctrl & FLOW_CTRL_RX) epause->rx_pause = 1; else epause->rx_pause = 0; - if (tp->link_config.flowctrl & FLOW_CTRL_TX) + if (tp->link_config.active_flowctrl & FLOW_CTRL_TX) epause->tx_pause = 1; else epause->tx_pause = 0; @@ -10736,78 +10715,6 @@ static int tg3_get_sset_count(struct net_device *dev, int sset) } } -static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, - u32 *rules __always_unused) -{ - struct tg3 *tp = netdev_priv(dev); - - if (!tg3_flag(tp, SUPPORT_MSIX)) - return -EOPNOTSUPP; - - switch (info->cmd) { - case ETHTOOL_GRXRINGS: - if (netif_running(tp->dev)) - info->data = tp->irq_cnt; - else { - info->data = num_online_cpus(); - if (info->data > TG3_IRQ_MAX_VECS_RSS) - info->data = TG3_IRQ_MAX_VECS_RSS; - } - - /* The first interrupt vector only - * handles link interrupts. - */ - info->data -= 1; - return 0; - - default: - return -EOPNOTSUPP; - } -} - -static u32 tg3_get_rxfh_indir_size(struct net_device *dev) -{ - u32 size = 0; - struct tg3 *tp = netdev_priv(dev); - - if (tg3_flag(tp, SUPPORT_MSIX)) - size = TG3_RSS_INDIR_TBL_SIZE; - - return size; -} - -static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir) -{ - struct tg3 *tp = netdev_priv(dev); - int i; - - for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) - indir[i] = tp->rss_ind_tbl[i]; - - return 0; -} - -static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir) -{ - struct tg3 *tp = netdev_priv(dev); - size_t i; - - for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) - tp->rss_ind_tbl[i] = indir[i]; - - if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS)) - return 0; - - /* It is legal to write the indirection - * table while the device is running. - */ - tg3_full_lock(tp, 0); - tg3_rss_write_indir_tbl(tp); - tg3_full_unlock(tp); - - return 0; -} - static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf) { switch (stringset) { @@ -10862,8 +10769,7 @@ static void tg3_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *tmp_stats) { struct tg3 *tp = netdev_priv(dev); - - tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats); + memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats)); } static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen) @@ -11446,7 +11352,7 @@ static int tg3_test_memory(struct tg3 *tp) if (tg3_flag(tp, 5717_PLUS)) mem_tbl = mem_tbl_5717; - else if (tg3_flag(tp, 57765_CLASS)) + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) mem_tbl = mem_tbl_57765; else if (tg3_flag(tp, 5755_PLUS)) mem_tbl = mem_tbl_5755; @@ -11494,8 +11400,8 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback) u32 rx_start_idx, rx_idx, tx_idx, opaque_key; u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val; u32 budget; - struct sk_buff *skb; - u8 *tx_data, *rx_data; + struct sk_buff *skb, *rx_skb; + u8 *tx_data; dma_addr_t map; int num_pkts, tx_len, rx_len, i, err; struct tg3_rx_buffer_desc *desc; @@ -11663,11 +11569,11 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback) } if (opaque_key == RXD_OPAQUE_RING_STD) { - rx_data = tpr->rx_std_buffers[desc_idx].data; + rx_skb = tpr->rx_std_buffers[desc_idx].skb; map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping); } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { - rx_data = tpr->rx_jmb_buffers[desc_idx].data; + rx_skb = tpr->rx_jmb_buffers[desc_idx].skb; map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx], mapping); } else @@ -11676,16 +11582,15 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback) pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE); - rx_data += TG3_RX_OFFSET(tp); for (i = data_off; i < rx_len; i++, val++) { - if (*(rx_data + i) != (u8) (val & 0xff)) + if (*(rx_skb->data + i) != (u8) (val & 0xff)) goto out; } } err = 0; - /* tg3_free_rings will unmap and free the rx_data */ + /* tg3_free_rings will unmap and free the rx_skb */ out: return err; } @@ -12038,10 +11943,6 @@ static const struct ethtool_ops tg3_ethtool_ops = { .get_coalesce = tg3_get_coalesce, .set_coalesce = tg3_set_coalesce, .get_sset_count = tg3_get_sset_count, - .get_rxnfc = tg3_get_rxnfc, - .get_rxfh_indir_size = tg3_get_rxfh_indir_size, - .get_rxfh_indir = tg3_get_rxfh_indir, - .set_rxfh_indir = tg3_set_rxfh_indir, }; static void __devinit tg3_get_eeprom_size(struct tg3 *tp) @@ -12711,7 +12612,7 @@ static void __devinit tg3_nvram_init(struct tg3 *tp) else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) tg3_get_5906_nvram_info(tp); else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || - tg3_flag(tp, 57765_CLASS)) + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) tg3_get_57780_nvram_info(tp); else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) @@ -13317,7 +13218,8 @@ static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp) static void __devinit tg3_phy_init_link_config(struct tg3 *tp) { - u32 adv = ADVERTISED_Autoneg; + u32 adv = ADVERTISED_Autoneg | + ADVERTISED_Pause; if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) adv |= ADVERTISED_1000baseT_Half | @@ -13420,7 +13322,7 @@ static int __devinit tg3_phy_probe(struct tg3 *tp) if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && !tg3_flag(tp, ENABLE_APE) && !tg3_flag(tp, ENABLE_ASF)) { - u32 bmsr, dummy; + u32 bmsr, mask; tg3_readphy(tp, MII_BMSR, &bmsr); if (!tg3_readphy(tp, MII_BMSR, &bmsr) && @@ -13433,7 +13335,10 @@ static int __devinit tg3_phy_probe(struct tg3 *tp) tg3_phy_set_wirespeed(tp); - if (!tg3_phy_copper_an_config_ok(tp, &dummy)) { + mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full); + if (!tg3_copper_is_advertising_all(tp, mask)) { tg3_phy_autoneg_cfg(tp, tp->link_config.advertising, tp->link_config.flowctrl); @@ -13555,17 +13460,6 @@ static void __devinit tg3_read_vpd(struct tg3 *tp) strcpy(tp->board_part_number, "BCM57795"); else goto nomatch; - } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) { - if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762) - strcpy(tp->board_part_number, "BCM57762"); - else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766) - strcpy(tp->board_part_number, "BCM57766"); - else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782) - strcpy(tp->board_part_number, "BCM57782"); - else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786) - strcpy(tp->board_part_number, "BCM57786"); - else - goto nomatch; } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { strcpy(tp->board_part_number, "BCM95906"); } else { @@ -13904,11 +13798,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786) + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) pci_read_config_dword(tp->pdev, TG3PCI_GEN15_PRODID_ASICREV, &prod_id_asic_rev); @@ -14055,10 +13945,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tg3_flag_set(tp, 5717_PLUS); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) - tg3_flag_set(tp, 57765_CLASS); - - if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS)) + tg3_flag(tp, 5717_PLUS)) tg3_flag_set(tp, 57765_PLUS); /* Intentionally exclude ASIC_REV_5906 */ @@ -14110,13 +13997,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) if (tg3_flag(tp, HW_TSO_1) || tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3) || - tp->fw_needed) { - /* For firmware TSO, assume ASF is disabled. - * We'll disable TSO later if we discover ASF - * is enabled in tg3_get_eeprom_hw_cfg(). - */ + (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF))) tg3_flag_set(tp, TSO_CAPABLE); - } else { + else { tg3_flag_clear(tp, TSO_CAPABLE); tg3_flag_clear(tp, TSO_BUG); tp->fw_needed = NULL; @@ -14144,7 +14027,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) if (tg3_flag(tp, 57765_PLUS)) { tg3_flag_set(tp, SUPPORT_MSIX); tp->irq_max = TG3_IRQ_MAX_VECS; - tg3_rss_init_dflt_indir_tbl(tp); } } @@ -14152,13 +14034,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tg3_flag_set(tp, SHORT_DMA_BUG); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) - tp->dma_limit = TG3_TX_BD_DMA_MAX_4K; - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) - tp->dma_limit = TG3_TX_BD_DMA_MAX_2K; + tg3_flag_set(tp, 4K_FIFO_LIMIT); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + if (tg3_flag(tp, 5717_PLUS)) tg3_flag_set(tp, LRG_PROD_RING_CAP); if (tg3_flag(tp, 57765_PLUS) && @@ -14178,11 +14056,12 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tg3_flag_set(tp, PCI_EXPRESS); - if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) { - int readrq = pcie_get_readrq(tp->pdev); - if (readrq > 2048) - pcie_set_readrq(tp->pdev, 2048); - } + tp->pcie_readrq = 4096; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + tp->pcie_readrq = 2048; + + pcie_set_readrq(tp->pdev, tp->pcie_readrq); pci_read_config_word(tp->pdev, pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, @@ -14394,12 +14273,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) */ tg3_get_eeprom_hw_cfg(tp); - if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) { - tg3_flag_clear(tp, TSO_CAPABLE); - tg3_flag_clear(tp, TSO_BUG); - tp->fw_needed = NULL; - } - if (tg3_flag(tp, ENABLE_APE)) { /* Allow reads and writes to the * APE register and memory space. @@ -14438,7 +14311,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || - tg3_flag(tp, 57765_CLASS)) + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || @@ -14675,11 +14548,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) else tg3_flag_clear(tp, POLL_SERDES); - tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN; + tp->rx_offset = NET_IP_ALIGN; tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && tg3_flag(tp, PCIX_MODE)) { - tp->rx_offset = NET_SKB_PAD; + tp->rx_offset = 0; #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS tp->rx_copy_thresh = ~(u16)0; #endif @@ -15440,7 +15313,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, u32 sndmbx, rcvmbx, intmbx; char str[40]; u64 dma_mask, persist_dma_mask; - netdev_features_t features = 0; + u32 features = 0; printk_once(KERN_INFO "%s\n", version); diff --git a/trunk/drivers/net/ethernet/broadcom/tg3.h b/trunk/drivers/net/ethernet/broadcom/tg3.h index aea8f72c24fa..94b4bd049a33 100644 --- a/trunk/drivers/net/ethernet/broadcom/tg3.h +++ b/trunk/drivers/net/ethernet/broadcom/tg3.h @@ -31,8 +31,6 @@ #define TG3_RX_RET_MAX_SIZE_5705 512 #define TG3_RX_RET_MAX_SIZE_5717 4096 -#define TG3_RSS_INDIR_TBL_SIZE 128 - /* First 256 bytes are a mirror of PCI config space. */ #define TG3PCI_VENDOR 0x00000000 #define TG3PCI_VENDOR_BROADCOM 0x14e4 @@ -59,10 +57,6 @@ #define TG3PCI_DEVICE_TIGON3_57795 0x16b6 #define TG3PCI_DEVICE_TIGON3_5719 0x1657 #define TG3PCI_DEVICE_TIGON3_5720 0x165f -#define TG3PCI_DEVICE_TIGON3_57762 0x1682 -#define TG3PCI_DEVICE_TIGON3_57766 0x1686 -#define TG3PCI_DEVICE_TIGON3_57786 0x16b3 -#define TG3PCI_DEVICE_TIGON3_57782 0x16b7 /* 0x04 --> 0x2c unused */ #define TG3PCI_SUBVENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM #define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6 0x1644 @@ -174,7 +168,6 @@ #define ASIC_REV_57765 0x57785 #define ASIC_REV_5719 0x5719 #define ASIC_REV_5720 0x5720 -#define ASIC_REV_57766 0x57766 #define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8) #define CHIPREV_5700_AX 0x70 #define CHIPREV_5700_BX 0x71 @@ -1347,7 +1340,6 @@ #define RDMAC_MODE_MBUF_SBD_CRPT_ENAB 0x00002000 #define RDMAC_MODE_FIFO_SIZE_128 0x00020000 #define RDMAC_MODE_FIFO_LONG_BURST 0x00030000 -#define RDMAC_MODE_JMB_2K_MMRR 0x00800000 #define RDMAC_MODE_MULT_DMA_RD_DIS 0x01000000 #define RDMAC_MODE_IPV4_LSO_EN 0x08000000 #define RDMAC_MODE_IPV6_LSO_EN 0x10000000 @@ -2182,7 +2174,6 @@ #define MII_TG3_EXT_CTRL_TBI 0x8000 #define MII_TG3_EXT_STAT 0x11 /* Extended status register */ -#define MII_TG3_EXT_STAT_MDIX 0x2000 #define MII_TG3_EXT_STAT_LPASS 0x0100 #define MII_TG3_RXR_COUNTERS 0x14 /* Local/Remote Receiver Counts */ @@ -2286,9 +2277,6 @@ #define MII_TG3_FET_PTEST_FRC_TX_LINK 0x1000 #define MII_TG3_FET_PTEST_FRC_TX_LOCK 0x0800 -#define MII_TG3_FET_GEN_STAT 0x1c -#define MII_TG3_FET_GEN_STAT_MDIXSTAT 0x2000 - #define MII_TG3_FET_TEST 0x1f #define MII_TG3_FET_SHADOW_EN 0x0080 @@ -2674,13 +2662,9 @@ struct tg3_hw_stats { /* 'mapping' is superfluous as the chip does not write into * the tx/rx post rings so we could just fetch it from there. * But the cache behavior is better how we are doing it now. - * - * This driver uses new build_skb() API : - * RX ring buffer contains pointer to kmalloc() data only, - * skb are built only after Hardware filled the frame. */ struct ring_info { - u8 *data; + struct sk_buff *skb; DEFINE_DMA_UNMAP_ADDR(mapping); }; @@ -2706,7 +2690,6 @@ struct tg3_link_config { #define DUPLEX_INVALID 0xff #define AUTONEG_INVALID 0xff u16 active_speed; - u32 rmt_adv; /* When we go in and out of low power mode we need * to swap with this state. @@ -2882,8 +2865,6 @@ enum TG3_FLAGS { TG3_FLAG_NVRAM_BUFFERED, TG3_FLAG_SUPPORT_MSI, TG3_FLAG_SUPPORT_MSIX, - TG3_FLAG_USING_MSI, - TG3_FLAG_USING_MSIX, TG3_FLAG_PCIX_MODE, TG3_FLAG_PCI_HIGH_SPEED, TG3_FLAG_PCI_32BIT, @@ -2899,6 +2880,7 @@ enum TG3_FLAGS { TG3_FLAG_CHIP_RESETTING, TG3_FLAG_INIT_COMPLETE, TG3_FLAG_TSO_BUG, + TG3_FLAG_IS_5788, TG3_FLAG_MAX_RXPEND_64, TG3_FLAG_TSO_CAPABLE, TG3_FLAG_PCI_EXPRESS, /* BCM5785 + pci_is_pcie() */ @@ -2907,9 +2889,14 @@ enum TG3_FLAGS { TG3_FLAG_IS_NIC, TG3_FLAG_FLASH, TG3_FLAG_HW_TSO_1, - TG3_FLAG_HW_TSO_2, + TG3_FLAG_5705_PLUS, + TG3_FLAG_5750_PLUS, TG3_FLAG_HW_TSO_3, + TG3_FLAG_USING_MSI, + TG3_FLAG_USING_MSIX, TG3_FLAG_ICH_WORKAROUND, + TG3_FLAG_5780_CLASS, + TG3_FLAG_HW_TSO_2, TG3_FLAG_1SHOT_MSI, TG3_FLAG_NO_FWARE_REPORTED, TG3_FLAG_NO_NVRAM_ADDR_TRANS, @@ -2923,23 +2910,18 @@ enum TG3_FLAGS { TG3_FLAG_RGMII_EXT_IBND_RX_EN, TG3_FLAG_RGMII_EXT_IBND_TX_EN, TG3_FLAG_CLKREQ_BUG, + TG3_FLAG_5755_PLUS, TG3_FLAG_NO_NVRAM, TG3_FLAG_ENABLE_RSS, TG3_FLAG_ENABLE_TSS, TG3_FLAG_SHORT_DMA_BUG, TG3_FLAG_USE_JUMBO_BDFLAG, TG3_FLAG_L1PLLPD_EN, + TG3_FLAG_57765_PLUS, TG3_FLAG_APE_HAS_NCSI, + TG3_FLAG_5717_PLUS, TG3_FLAG_4K_FIFO_LIMIT, TG3_FLAG_RESET_TASK_PENDING, - TG3_FLAG_5705_PLUS, - TG3_FLAG_IS_5788, - TG3_FLAG_5750_PLUS, - TG3_FLAG_5780_CLASS, - TG3_FLAG_5755_PLUS, - TG3_FLAG_57765_PLUS, - TG3_FLAG_57765_CLASS, - TG3_FLAG_5717_PLUS, /* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */ TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */ @@ -3003,7 +2985,6 @@ struct tg3 { /* begin "tx thread" cacheline section */ void (*write32_tx_mbox) (struct tg3 *, u32, u32); - u32 dma_limit; /* begin "rx thread" cacheline section */ struct tg3_napi napi[TG3_IRQ_MAX_VECS]; @@ -3024,6 +3005,7 @@ struct tg3 { unsigned long rx_dropped; unsigned long tx_dropped; struct rtnl_link_stats64 net_stats_prev; + struct tg3_ethtool_stats estats; struct tg3_ethtool_stats estats_prev; DECLARE_BITMAP(tg3_flags, TG3_FLAG_NUMBER_OF_FLAGS); @@ -3149,12 +3131,10 @@ struct tg3 { #define TG3_PHYFLG_SERDES_PREEMPHASIS 0x00010000 #define TG3_PHYFLG_PARALLEL_DETECT 0x00020000 #define TG3_PHYFLG_EEE_CAP 0x00040000 -#define TG3_PHYFLG_MDIX_STATE 0x00200000 u32 led_ctrl; u32 phy_otp; u32 setlpicnt; - u8 rss_ind_tbl[TG3_RSS_INDIR_TBL_SIZE]; #define TG3_BPN_SIZE 24 char board_part_number[TG3_BPN_SIZE]; diff --git a/trunk/drivers/net/ethernet/brocade/bna/Makefile b/trunk/drivers/net/ethernet/brocade/bna/Makefile index 6027302ae73a..74d3abca1960 100644 --- a/trunk/drivers/net/ethernet/brocade/bna/Makefile +++ b/trunk/drivers/net/ethernet/brocade/bna/Makefile @@ -5,7 +5,7 @@ obj-$(CONFIG_BNA) += bna.o -bna-objs := bnad.o bnad_ethtool.o bnad_debugfs.o bna_enet.o bna_tx_rx.o +bna-objs := bnad.o bnad_ethtool.o bna_enet.o bna_tx_rx.o bna-objs += bfa_msgq.o bfa_ioc.o bfa_ioc_ct.o bfa_cee.o bna-objs += cna_fwimg.o diff --git a/trunk/drivers/net/ethernet/brocade/bna/bfa_cee.c b/trunk/drivers/net/ethernet/brocade/bna/bfa_cee.c index 29f284f79e02..8e627186507c 100644 --- a/trunk/drivers/net/ethernet/brocade/bna/bfa_cee.c +++ b/trunk/drivers/net/ethernet/brocade/bna/bfa_cee.c @@ -184,41 +184,6 @@ bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa) (dma_kva + bfa_cee_attr_meminfo()); } -/** - * bfa_cee_get_attr() - * - * @brief Send the request to the f/w to fetch CEE attributes. - * - * @param[in] Pointer to the CEE module data structure. - * - * @return Status - */ -enum bfa_status -bfa_nw_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr, - bfa_cee_get_attr_cbfn_t cbfn, void *cbarg) -{ - struct bfi_cee_get_req *cmd; - - BUG_ON(!((cee != NULL) && (cee->ioc != NULL))); - if (!bfa_nw_ioc_is_operational(cee->ioc)) - return BFA_STATUS_IOC_FAILURE; - - if (cee->get_attr_pending == true) - return BFA_STATUS_DEVBUSY; - - cee->get_attr_pending = true; - cmd = (struct bfi_cee_get_req *) cee->get_cfg_mb.msg; - cee->attr = attr; - cee->cbfn.get_attr_cbfn = cbfn; - cee->cbfn.get_attr_cbarg = cbarg; - bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_CFG_REQ, - bfa_ioc_portid(cee->ioc)); - bfa_dma_be_addr_set(cmd->dma_addr, cee->attr_dma.pa); - bfa_nw_ioc_mbox_queue(cee->ioc, &cee->get_cfg_mb, NULL, NULL); - - return BFA_STATUS_OK; -} - /** * bfa_cee_isrs() * diff --git a/trunk/drivers/net/ethernet/brocade/bna/bfa_cee.h b/trunk/drivers/net/ethernet/brocade/bna/bfa_cee.h index 93fde633d6f3..58d54e98d595 100644 --- a/trunk/drivers/net/ethernet/brocade/bna/bfa_cee.h +++ b/trunk/drivers/net/ethernet/brocade/bna/bfa_cee.h @@ -59,7 +59,5 @@ u32 bfa_nw_cee_meminfo(void); void bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa); void bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc, void *dev); -enum bfa_status bfa_nw_cee_get_attr(struct bfa_cee *cee, - struct bfa_cee_attr *attr, - bfa_cee_get_attr_cbfn_t cbfn, void *cbarg); + #endif /* __BFA_CEE_H__ */ diff --git a/trunk/drivers/net/ethernet/brocade/bna/bfa_defs.h b/trunk/drivers/net/ethernet/brocade/bna/bfa_defs.h index 871c6309334c..2f12d68021d5 100644 --- a/trunk/drivers/net/ethernet/brocade/bna/bfa_defs.h +++ b/trunk/drivers/net/ethernet/brocade/bna/bfa_defs.h @@ -219,39 +219,41 @@ enum { * All numerical fields are in big-endian format. */ struct bfa_mfg_block { - u8 version; /* manufacturing block version */ - u8 mfg_sig[3]; /* characters 'M', 'F', 'G' */ - u16 mfgsize; /* mfg block size */ - u16 u16_chksum; /* old u16 checksum */ - char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)]; - char brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)]; - u8 mfg_day; /* manufacturing day */ - u8 mfg_month; /* manufacturing month */ - u16 mfg_year; /* manufacturing year */ - u64 mfg_wwn; /* wwn base for this adapter */ - u8 num_wwn; /* number of wwns assigned */ - u8 mfg_speeds; /* speeds allowed for this adapter */ - u8 rsv[2]; - char supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)]; - char supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)]; - char supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)]; - char supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)]; - mac_t mfg_mac; /* base mac address */ - u8 num_mac; /* number of mac addresses */ - u8 rsv2; - u32 card_type; /* card type */ - char cap_nic; /* capability nic */ - char cap_cna; /* capability cna */ - char cap_hba; /* capability hba */ - char cap_fc16g; /* capability fc 16g */ - char cap_sriov; /* capability sriov */ - char cap_mezz; /* capability mezz */ - u8 rsv3; - u8 mfg_nports; /* number of ports */ - char media[8]; /* xfi/xaui */ - char initial_mode[8]; /* initial mode: hba/cna/nic */ - u8 rsv4[84]; - u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /* md5 checksum */ + u8 version; /*!< manufacturing block version */ + u8 mfg_sig[3]; /*!< characters 'M', 'F', 'G' */ + u16 mfgsize; /*!< mfg block size */ + u16 u16_chksum; /*!< old u16 checksum */ + char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)]; + char brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)]; + u8 mfg_day; /*!< manufacturing day */ + u8 mfg_month; /*!< manufacturing month */ + u16 mfg_year; /*!< manufacturing year */ + u64 mfg_wwn; /*!< wwn base for this adapter */ + u8 num_wwn; /*!< number of wwns assigned */ + u8 mfg_speeds; /*!< speeds allowed for this adapter */ + u8 rsv[2]; + char supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)]; + char supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)]; + char + supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)]; + char + supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)]; + mac_t mfg_mac; /*!< mac address */ + u8 num_mac; /*!< number of mac addresses */ + u8 rsv2; + u32 card_type; /*!< card type */ + char cap_nic; /*!< capability nic */ + char cap_cna; /*!< capability cna */ + char cap_hba; /*!< capability hba */ + char cap_fc16g; /*!< capability fc 16g */ + char cap_sriov; /*!< capability sriov */ + char cap_mezz; /*!< capability mezz */ + u8 rsv3; + u8 mfg_nports; /*!< number of ports */ + char media[8]; /*!< xfi/xaui */ + char initial_mode[8];/*!< initial mode: hba/cna/nic */ + u8 rsv4[84]; + u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /*!< md5 checksum */ }; #pragma pack() @@ -291,34 +293,4 @@ enum bfa_mode { BFA_MODE_NIC = 3 }; -/* - * Flash module specific - */ -#define BFA_FLASH_PART_ENTRY_SIZE 32 /* partition entry size */ -#define BFA_FLASH_PART_MAX 32 /* maximal # of partitions */ -#define BFA_TOTAL_FLASH_SIZE 0x400000 -#define BFA_FLASH_PART_MFG 7 - -/* - * flash partition attributes - */ -struct bfa_flash_part_attr { - u32 part_type; /* partition type */ - u32 part_instance; /* partition instance */ - u32 part_off; /* partition offset */ - u32 part_size; /* partition size */ - u32 part_len; /* partition content length */ - u32 part_status; /* partition status */ - char rsv[BFA_FLASH_PART_ENTRY_SIZE - 24]; -}; - -/* - * flash attributes - */ -struct bfa_flash_attr { - u32 status; /* flash overall status */ - u32 npart; /* num of partitions */ - struct bfa_flash_part_attr part[BFA_FLASH_PART_MAX]; -}; - #endif /* __BFA_DEFS_H__ */ diff --git a/trunk/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/trunk/drivers/net/ethernet/brocade/bna/bfa_ioc.c index abfad275b5f3..b0307a00a109 100644 --- a/trunk/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/trunk/drivers/net/ethernet/brocade/bna/bfa_ioc.c @@ -74,7 +74,6 @@ static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc); static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event); static void bfa_ioc_disable_comp(struct bfa_ioc *ioc); static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc); -static void bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc); static void bfa_ioc_fail_notify(struct bfa_ioc *ioc); static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc); static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc); @@ -998,7 +997,6 @@ bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event) static void bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf) { - bfa_nw_ioc_debug_save_ftrc(iocpf->ioc); bfa_ioc_hw_sem_get(iocpf->ioc); } @@ -1745,114 +1743,6 @@ bfa_ioc_mbox_flush(struct bfa_ioc *ioc) bfa_q_deq(&mod->cmd_q, &cmd); } -/** - * Read data from SMEM to host through PCI memmap - * - * @param[in] ioc memory for IOC - * @param[in] tbuf app memory to store data from smem - * @param[in] soff smem offset - * @param[in] sz size of smem in bytes - */ -static int -bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz) -{ - u32 pgnum, loff, r32; - int i, len; - u32 *buf = tbuf; - - pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff); - loff = PSS_SMEM_PGOFF(soff); - - /* - * Hold semaphore to serialize pll init and fwtrc. - */ - if (bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg) == 0) - return 1; - - writel(pgnum, ioc->ioc_regs.host_page_num_fn); - - len = sz/sizeof(u32); - for (i = 0; i < len; i++) { - r32 = swab32(readl((loff) + (ioc->ioc_regs.smem_page_start))); - buf[i] = be32_to_cpu(r32); - loff += sizeof(u32); - - /** - * handle page offset wrap around - */ - loff = PSS_SMEM_PGOFF(loff); - if (loff == 0) { - pgnum++; - writel(pgnum, ioc->ioc_regs.host_page_num_fn); - } - } - - writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0), - ioc->ioc_regs.host_page_num_fn); - - /* - * release semaphore - */ - readl(ioc->ioc_regs.ioc_init_sem_reg); - writel(1, ioc->ioc_regs.ioc_init_sem_reg); - return 0; -} - -/** - * Retrieve saved firmware trace from a prior IOC failure. - */ -int -bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen) -{ - u32 loff = BFI_IOC_TRC_OFF + BNA_DBG_FWTRC_LEN * ioc->port_id; - int tlen, status = 0; - - tlen = *trclen; - if (tlen > BNA_DBG_FWTRC_LEN) - tlen = BNA_DBG_FWTRC_LEN; - - status = bfa_nw_ioc_smem_read(ioc, trcdata, loff, tlen); - *trclen = tlen; - return status; -} - -/** - * Save firmware trace if configured. - */ -static void -bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc) -{ - int tlen; - - if (ioc->dbg_fwsave_once) { - ioc->dbg_fwsave_once = 0; - if (ioc->dbg_fwsave_len) { - tlen = ioc->dbg_fwsave_len; - bfa_nw_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen); - } - } -} - -/** - * Retrieve saved firmware trace from a prior IOC failure. - */ -int -bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen) -{ - int tlen; - - if (ioc->dbg_fwsave_len == 0) - return BFA_STATUS_ENOFSAVE; - - tlen = *trclen; - if (tlen > ioc->dbg_fwsave_len) - tlen = ioc->dbg_fwsave_len; - - memcpy(trcdata, ioc->dbg_fwsave, tlen); - *trclen = tlen; - return BFA_STATUS_OK; -} - static void bfa_ioc_fail_notify(struct bfa_ioc *ioc) { @@ -1861,7 +1751,6 @@ bfa_ioc_fail_notify(struct bfa_ioc *ioc) */ ioc->cbfn->hbfail_cbfn(ioc->bfa); bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED); - bfa_nw_ioc_debug_save_ftrc(ioc); } /** @@ -2169,16 +2058,6 @@ bfa_nw_ioc_disable(struct bfa_ioc *ioc) bfa_fsm_send_event(ioc, IOC_E_DISABLE); } -/** - * Initialize memory for saving firmware trace. - */ -void -bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave) -{ - ioc->dbg_fwsave = dbg_fwsave; - ioc->dbg_fwsave_len = ioc->iocpf.auto_recover ? BNA_DBG_FWTRC_LEN : 0; -} - static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr) { @@ -2292,15 +2171,6 @@ bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc) bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled); } -/** - * return true if IOC is operational - */ -bool -bfa_nw_ioc_is_operational(struct bfa_ioc *ioc) -{ - return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op); -} - /** * Add to IOC heartbeat failure notification queue. To be used by common * modules such as cee, port, diag. @@ -2601,366 +2471,3 @@ bfa_ioc_poll_fwinit(struct bfa_ioc *ioc) msecs_to_jiffies(BFA_IOC_POLL_TOV)); } } - -/* - * Flash module specific - */ - -/* - * FLASH DMA buffer should be big enough to hold both MFG block and - * asic block(64k) at the same time and also should be 2k aligned to - * avoid write segement to cross sector boundary. - */ -#define BFA_FLASH_SEG_SZ 2048 -#define BFA_FLASH_DMA_BUF_SZ \ - roundup(0x010000 + sizeof(struct bfa_mfg_block), BFA_FLASH_SEG_SZ) - -static void -bfa_flash_cb(struct bfa_flash *flash) -{ - flash->op_busy = 0; - if (flash->cbfn) - flash->cbfn(flash->cbarg, flash->status); -} - -static void -bfa_flash_notify(void *cbarg, enum bfa_ioc_event event) -{ - struct bfa_flash *flash = cbarg; - - switch (event) { - case BFA_IOC_E_DISABLED: - case BFA_IOC_E_FAILED: - if (flash->op_busy) { - flash->status = BFA_STATUS_IOC_FAILURE; - flash->cbfn(flash->cbarg, flash->status); - flash->op_busy = 0; - } - break; - default: - break; - } -} - -/* - * Send flash write request. - * - * @param[in] cbarg - callback argument - */ -static void -bfa_flash_write_send(struct bfa_flash *flash) -{ - struct bfi_flash_write_req *msg = - (struct bfi_flash_write_req *) flash->mb.msg; - u32 len; - - msg->type = be32_to_cpu(flash->type); - msg->instance = flash->instance; - msg->offset = be32_to_cpu(flash->addr_off + flash->offset); - len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ? - flash->residue : BFA_FLASH_DMA_BUF_SZ; - msg->length = be32_to_cpu(len); - - /* indicate if it's the last msg of the whole write operation */ - msg->last = (len == flash->residue) ? 1 : 0; - - bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ, - bfa_ioc_portid(flash->ioc)); - bfa_alen_set(&msg->alen, len, flash->dbuf_pa); - memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len); - bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL); - - flash->residue -= len; - flash->offset += len; -} - -/* - * Send flash read request. - * - * @param[in] cbarg - callback argument - */ -static void -bfa_flash_read_send(void *cbarg) -{ - struct bfa_flash *flash = cbarg; - struct bfi_flash_read_req *msg = - (struct bfi_flash_read_req *) flash->mb.msg; - u32 len; - - msg->type = be32_to_cpu(flash->type); - msg->instance = flash->instance; - msg->offset = be32_to_cpu(flash->addr_off + flash->offset); - len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ? - flash->residue : BFA_FLASH_DMA_BUF_SZ; - msg->length = be32_to_cpu(len); - bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ, - bfa_ioc_portid(flash->ioc)); - bfa_alen_set(&msg->alen, len, flash->dbuf_pa); - bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL); -} - -/* - * Process flash response messages upon receiving interrupts. - * - * @param[in] flasharg - flash structure - * @param[in] msg - message structure - */ -static void -bfa_flash_intr(void *flasharg, struct bfi_mbmsg *msg) -{ - struct bfa_flash *flash = flasharg; - u32 status; - - union { - struct bfi_flash_query_rsp *query; - struct bfi_flash_write_rsp *write; - struct bfi_flash_read_rsp *read; - struct bfi_mbmsg *msg; - } m; - - m.msg = msg; - - /* receiving response after ioc failure */ - if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) - return; - - switch (msg->mh.msg_id) { - case BFI_FLASH_I2H_QUERY_RSP: - status = be32_to_cpu(m.query->status); - if (status == BFA_STATUS_OK) { - u32 i; - struct bfa_flash_attr *attr, *f; - - attr = (struct bfa_flash_attr *) flash->ubuf; - f = (struct bfa_flash_attr *) flash->dbuf_kva; - attr->status = be32_to_cpu(f->status); - attr->npart = be32_to_cpu(f->npart); - for (i = 0; i < attr->npart; i++) { - attr->part[i].part_type = - be32_to_cpu(f->part[i].part_type); - attr->part[i].part_instance = - be32_to_cpu(f->part[i].part_instance); - attr->part[i].part_off = - be32_to_cpu(f->part[i].part_off); - attr->part[i].part_size = - be32_to_cpu(f->part[i].part_size); - attr->part[i].part_len = - be32_to_cpu(f->part[i].part_len); - attr->part[i].part_status = - be32_to_cpu(f->part[i].part_status); - } - } - flash->status = status; - bfa_flash_cb(flash); - break; - case BFI_FLASH_I2H_WRITE_RSP: - status = be32_to_cpu(m.write->status); - if (status != BFA_STATUS_OK || flash->residue == 0) { - flash->status = status; - bfa_flash_cb(flash); - } else - bfa_flash_write_send(flash); - break; - case BFI_FLASH_I2H_READ_RSP: - status = be32_to_cpu(m.read->status); - if (status != BFA_STATUS_OK) { - flash->status = status; - bfa_flash_cb(flash); - } else { - u32 len = be32_to_cpu(m.read->length); - memcpy(flash->ubuf + flash->offset, - flash->dbuf_kva, len); - flash->residue -= len; - flash->offset += len; - if (flash->residue == 0) { - flash->status = status; - bfa_flash_cb(flash); - } else - bfa_flash_read_send(flash); - } - break; - case BFI_FLASH_I2H_BOOT_VER_RSP: - case BFI_FLASH_I2H_EVENT: - break; - default: - WARN_ON(1); - } -} - -/* - * Flash memory info API. - */ -u32 -bfa_nw_flash_meminfo(void) -{ - return roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); -} - -/* - * Flash attach API. - * - * @param[in] flash - flash structure - * @param[in] ioc - ioc structure - * @param[in] dev - device structure - */ -void -bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev) -{ - flash->ioc = ioc; - flash->cbfn = NULL; - flash->cbarg = NULL; - flash->op_busy = 0; - - bfa_nw_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash); - bfa_q_qe_init(&flash->ioc_notify); - bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash); - list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q); -} - -/* - * Claim memory for flash - * - * @param[in] flash - flash structure - * @param[in] dm_kva - pointer to virtual memory address - * @param[in] dm_pa - physical memory address - */ -void -bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa) -{ - flash->dbuf_kva = dm_kva; - flash->dbuf_pa = dm_pa; - memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ); - dm_kva += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); - dm_pa += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); -} - -/* - * Get flash attribute. - * - * @param[in] flash - flash structure - * @param[in] attr - flash attribute structure - * @param[in] cbfn - callback function - * @param[in] cbarg - callback argument - * - * Return status. - */ -enum bfa_status -bfa_nw_flash_get_attr(struct bfa_flash *flash, struct bfa_flash_attr *attr, - bfa_cb_flash cbfn, void *cbarg) -{ - struct bfi_flash_query_req *msg = - (struct bfi_flash_query_req *) flash->mb.msg; - - if (!bfa_nw_ioc_is_operational(flash->ioc)) - return BFA_STATUS_IOC_NON_OP; - - if (flash->op_busy) - return BFA_STATUS_DEVBUSY; - - flash->op_busy = 1; - flash->cbfn = cbfn; - flash->cbarg = cbarg; - flash->ubuf = (u8 *) attr; - - bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ, - bfa_ioc_portid(flash->ioc)); - bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr), flash->dbuf_pa); - bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL); - - return BFA_STATUS_OK; -} - -/* - * Update flash partition. - * - * @param[in] flash - flash structure - * @param[in] type - flash partition type - * @param[in] instance - flash partition instance - * @param[in] buf - update data buffer - * @param[in] len - data buffer length - * @param[in] offset - offset relative to the partition starting address - * @param[in] cbfn - callback function - * @param[in] cbarg - callback argument - * - * Return status. - */ -enum bfa_status -bfa_nw_flash_update_part(struct bfa_flash *flash, u32 type, u8 instance, - void *buf, u32 len, u32 offset, - bfa_cb_flash cbfn, void *cbarg) -{ - if (!bfa_nw_ioc_is_operational(flash->ioc)) - return BFA_STATUS_IOC_NON_OP; - - /* - * 'len' must be in word (4-byte) boundary - */ - if (!len || (len & 0x03)) - return BFA_STATUS_FLASH_BAD_LEN; - - if (type == BFA_FLASH_PART_MFG) - return BFA_STATUS_EINVAL; - - if (flash->op_busy) - return BFA_STATUS_DEVBUSY; - - flash->op_busy = 1; - flash->cbfn = cbfn; - flash->cbarg = cbarg; - flash->type = type; - flash->instance = instance; - flash->residue = len; - flash->offset = 0; - flash->addr_off = offset; - flash->ubuf = buf; - - bfa_flash_write_send(flash); - - return BFA_STATUS_OK; -} - -/* - * Read flash partition. - * - * @param[in] flash - flash structure - * @param[in] type - flash partition type - * @param[in] instance - flash partition instance - * @param[in] buf - read data buffer - * @param[in] len - data buffer length - * @param[in] offset - offset relative to the partition starting address - * @param[in] cbfn - callback function - * @param[in] cbarg - callback argument - * - * Return status. - */ -enum bfa_status -bfa_nw_flash_read_part(struct bfa_flash *flash, u32 type, u8 instance, - void *buf, u32 len, u32 offset, - bfa_cb_flash cbfn, void *cbarg) -{ - if (!bfa_nw_ioc_is_operational(flash->ioc)) - return BFA_STATUS_IOC_NON_OP; - - /* - * 'len' must be in word (4-byte) boundary - */ - if (!len || (len & 0x03)) - return BFA_STATUS_FLASH_BAD_LEN; - - if (flash->op_busy) - return BFA_STATUS_DEVBUSY; - - flash->op_busy = 1; - flash->cbfn = cbfn; - flash->cbarg = cbarg; - flash->type = type; - flash->instance = instance; - flash->residue = len; - flash->offset = 0; - flash->addr_off = offset; - flash->ubuf = buf; - - bfa_flash_read_send(flash); - - return BFA_STATUS_OK; -} diff --git a/trunk/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/trunk/drivers/net/ethernet/brocade/bna/bfa_ioc.h index 3b4460fdc148..ca158d1eaef3 100644 --- a/trunk/drivers/net/ethernet/brocade/bna/bfa_ioc.h +++ b/trunk/drivers/net/ethernet/brocade/bna/bfa_ioc.h @@ -27,8 +27,6 @@ #define BFA_IOC_HWSEM_TOV 500 /* msecs */ #define BFA_IOC_HB_TOV 500 /* msecs */ #define BFA_IOC_POLL_TOV 200 /* msecs */ -#define BNA_DBG_FWTRC_LEN (BFI_IOC_TRC_ENTS * BFI_IOC_TRC_ENT_SZ + \ - BFI_IOC_TRC_HDR_SZ) /** * PCI device information required by IOC @@ -70,16 +68,6 @@ __bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa) dma_addr->a32.addr_hi = (u32) htonl(upper_32_bits(pa)); } -#define bfa_alen_set(__alen, __len, __pa) \ - __bfa_alen_set(__alen, __len, (u64)__pa) - -static inline void -__bfa_alen_set(struct bfi_alen *alen, u32 len, u64 pa) -{ - alen->al_len = cpu_to_be32(len); - bfa_dma_be_addr_set(alen->al_addr, pa); -} - struct bfa_ioc_regs { void __iomem *hfn_mbox_cmd; void __iomem *hfn_mbox; @@ -308,7 +296,6 @@ void bfa_nw_ioc_disable(struct bfa_ioc *ioc); void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc); bool bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc); -bool bfa_nw_ioc_is_operational(struct bfa_ioc *ioc); void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr); void bfa_nw_ioc_notify_register(struct bfa_ioc *ioc, struct bfa_ioc_notify *notify); @@ -320,9 +307,6 @@ void bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, bool bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr); mac_t bfa_nw_ioc_get_mac(struct bfa_ioc *ioc); -void bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave); -int bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen); -int bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen); /* * Timeout APIs @@ -338,42 +322,4 @@ void bfa_nw_iocpf_sem_timeout(void *ioc); u32 *bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off); u32 bfa_cb_image_get_size(enum bfi_asic_gen asic_gen); -/* - * Flash module specific - */ -typedef void (*bfa_cb_flash) (void *cbarg, enum bfa_status status); - -struct bfa_flash { - struct bfa_ioc *ioc; /* back pointer to ioc */ - u32 type; /* partition type */ - u8 instance; /* partition instance */ - u8 rsv[3]; - u32 op_busy; /* operation busy flag */ - u32 residue; /* residual length */ - u32 offset; /* offset */ - enum bfa_status status; /* status */ - u8 *dbuf_kva; /* dma buf virtual address */ - u64 dbuf_pa; /* dma buf physical address */ - bfa_cb_flash cbfn; /* user callback function */ - void *cbarg; /* user callback arg */ - u8 *ubuf; /* user supplied buffer */ - u32 addr_off; /* partition address offset */ - struct bfa_mbox_cmd mb; /* mailbox */ - struct bfa_ioc_notify ioc_notify; /* ioc event notify */ -}; - -enum bfa_status bfa_nw_flash_get_attr(struct bfa_flash *flash, - struct bfa_flash_attr *attr, - bfa_cb_flash cbfn, void *cbarg); -enum bfa_status bfa_nw_flash_update_part(struct bfa_flash *flash, - u32 type, u8 instance, void *buf, u32 len, u32 offset, - bfa_cb_flash cbfn, void *cbarg); -enum bfa_status bfa_nw_flash_read_part(struct bfa_flash *flash, - u32 type, u8 instance, void *buf, u32 len, u32 offset, - bfa_cb_flash cbfn, void *cbarg); -u32 bfa_nw_flash_meminfo(void); -void bfa_nw_flash_attach(struct bfa_flash *flash, - struct bfa_ioc *ioc, void *dev); -void bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa); - #endif /* __BFA_IOC_H__ */ diff --git a/trunk/drivers/net/ethernet/brocade/bna/bfi.h b/trunk/drivers/net/ethernet/brocade/bna/bfi.h index 0d9df695397a..7a1393aabd43 100644 --- a/trunk/drivers/net/ethernet/brocade/bna/bfi.h +++ b/trunk/drivers/net/ethernet/brocade/bna/bfi.h @@ -83,14 +83,6 @@ union bfi_addr_u { } a32; }; -/** - * Generic DMA addr-len pair. - */ -struct bfi_alen { - union bfi_addr_u al_addr; /* DMA addr of buffer */ - u32 al_len; /* length of buffer */ -}; - /* * Large Message structure - 128 Bytes size Msgs */ @@ -257,8 +249,6 @@ struct bfi_ioc_getattr_reply { */ #define BFI_IOC_TRC_OFF (0x4b00) #define BFI_IOC_TRC_ENTS 256 -#define BFI_IOC_TRC_ENT_SZ 16 -#define BFI_IOC_TRC_HDR_SZ 32 #define BFI_IOC_FW_SIGNATURE (0xbfadbfad) #define BFI_IOC_MD5SUM_SZ 4 @@ -486,93 +476,6 @@ struct bfi_msgq_i2h_cmdq_copy_req { u16 len; }; -/* - * FLASH module specific - */ -enum bfi_flash_h2i_msgs { - BFI_FLASH_H2I_QUERY_REQ = 1, - BFI_FLASH_H2I_ERASE_REQ = 2, - BFI_FLASH_H2I_WRITE_REQ = 3, - BFI_FLASH_H2I_READ_REQ = 4, - BFI_FLASH_H2I_BOOT_VER_REQ = 5, -}; - -enum bfi_flash_i2h_msgs { - BFI_FLASH_I2H_QUERY_RSP = BFA_I2HM(1), - BFI_FLASH_I2H_ERASE_RSP = BFA_I2HM(2), - BFI_FLASH_I2H_WRITE_RSP = BFA_I2HM(3), - BFI_FLASH_I2H_READ_RSP = BFA_I2HM(4), - BFI_FLASH_I2H_BOOT_VER_RSP = BFA_I2HM(5), - BFI_FLASH_I2H_EVENT = BFA_I2HM(127), -}; - -/* - * Flash query request - */ -struct bfi_flash_query_req { - struct bfi_mhdr mh; /* Common msg header */ - struct bfi_alen alen; -}; - -/* - * Flash write request - */ -struct bfi_flash_write_req { - struct bfi_mhdr mh; /* Common msg header */ - struct bfi_alen alen; - u32 type; /* partition type */ - u8 instance; /* partition instance */ - u8 last; - u8 rsv[2]; - u32 offset; - u32 length; -}; - -/* - * Flash read request - */ -struct bfi_flash_read_req { - struct bfi_mhdr mh; /* Common msg header */ - u32 type; /* partition type */ - u8 instance; /* partition instance */ - u8 rsv[3]; - u32 offset; - u32 length; - struct bfi_alen alen; -}; - -/* - * Flash query response - */ -struct bfi_flash_query_rsp { - struct bfi_mhdr mh; /* Common msg header */ - u32 status; -}; - -/* - * Flash read response - */ -struct bfi_flash_read_rsp { - struct bfi_mhdr mh; /* Common msg header */ - u32 type; /* partition type */ - u8 instance; /* partition instance */ - u8 rsv[3]; - u32 status; - u32 length; -}; - -/* - * Flash write response - */ -struct bfi_flash_write_rsp { - struct bfi_mhdr mh; /* Common msg header */ - u32 type; /* partition type */ - u8 instance; /* partition instance */ - u8 rsv[3]; - u32 status; - u32 length; -}; - #pragma pack() #endif /* __BFI_H__ */ diff --git a/trunk/drivers/net/ethernet/brocade/bna/bna_enet.c b/trunk/drivers/net/ethernet/brocade/bna/bna_enet.c index 9ccc586e3767..26f5c5abfd1f 100644 --- a/trunk/drivers/net/ethernet/brocade/bna/bna_enet.c +++ b/trunk/drivers/net/ethernet/brocade/bna/bna_enet.c @@ -1727,7 +1727,6 @@ bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna, bfa_nw_ioc_mem_claim(&ioceth->ioc, kva, dma); kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva; - bfa_nw_ioc_debug_memclaim(&ioceth->ioc, kva); /** * Attach common modules (Diag, SFP, CEE, Port) and claim respective @@ -1741,11 +1740,6 @@ bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna, kva += bfa_nw_cee_meminfo(); dma += bfa_nw_cee_meminfo(); - bfa_nw_flash_attach(&bna->flash, &ioceth->ioc, bna); - bfa_nw_flash_memclaim(&bna->flash, kva, dma); - kva += bfa_nw_flash_meminfo(); - dma += bfa_nw_flash_meminfo(); - bfa_msgq_attach(&bna->msgq, &ioceth->ioc); bfa_msgq_memclaim(&bna->msgq, kva, dma); bfa_msgq_regisr(&bna->msgq, BFI_MC_ENET, bna_msgq_rsp_handler, bna); @@ -1898,8 +1892,7 @@ bna_res_req(struct bna_res_info *res_info) res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1; res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN( (bfa_nw_cee_meminfo() + - bfa_nw_flash_meminfo() + - bfa_msgq_meminfo()), PAGE_SIZE); + bfa_msgq_meminfo()), PAGE_SIZE); /* DMA memory for retrieving IOC attributes */ res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM; @@ -1911,8 +1904,8 @@ bna_res_req(struct bna_res_info *res_info) /* Virtual memory for retreiving fw_trc */ res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM; res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA; - res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 1; - res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = BNA_DBG_FWTRC_LEN; + res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 0; + res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = 0; /* DMA memory for retreiving stats */ res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM; diff --git a/trunk/drivers/net/ethernet/brocade/bna/bna_types.h b/trunk/drivers/net/ethernet/brocade/bna/bna_types.h index e8d3ab7ea6cb..d090fbfb12fa 100644 --- a/trunk/drivers/net/ethernet/brocade/bna/bna_types.h +++ b/trunk/drivers/net/ethernet/brocade/bna/bna_types.h @@ -427,7 +427,7 @@ struct bna_ethport { /* Doorbell structure */ struct bna_ib_dbell { - void __iomem *doorbell_addr; + void *__iomem doorbell_addr; u32 doorbell_ack; }; @@ -463,7 +463,7 @@ struct bna_tcb { u32 consumer_index; volatile u32 *hw_consumer_index; u32 q_depth; - void __iomem *q_dbell; + void *__iomem q_dbell; struct bna_ib_dbell *i_dbell; int page_idx; int page_count; @@ -599,7 +599,7 @@ struct bna_rcb { u32 producer_index; u32 consumer_index; u32 q_depth; - void __iomem *q_dbell; + void *__iomem q_dbell; int page_idx; int page_count; /* Control path */ @@ -966,7 +966,6 @@ struct bna { struct bna_ioceth ioceth; struct bfa_cee cee; - struct bfa_flash flash; struct bfa_msgq msgq; struct bna_ethport ethport; diff --git a/trunk/drivers/net/ethernet/brocade/bna/bnad.c b/trunk/drivers/net/ethernet/brocade/bna/bnad.c index be7d91e4b785..7f3091e7eb42 100644 --- a/trunk/drivers/net/ethernet/brocade/bna/bnad.c +++ b/trunk/drivers/net/ethernet/brocade/bna/bnad.c @@ -44,18 +44,11 @@ static uint bnad_ioc_auto_recover = 1; module_param(bnad_ioc_auto_recover, uint, 0444); MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery"); -static uint bna_debugfs_enable = 1; -module_param(bna_debugfs_enable, uint, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1," - " Range[false:0|true:1]"); - /* * Global variables */ u32 bnad_rxqs_per_cq = 2; -static u32 bna_id; -static struct mutex bnad_list_mutex; -static LIST_HEAD(bnad_list); + static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; /* @@ -82,23 +75,6 @@ do { \ #define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */ -static void -bnad_add_to_list(struct bnad *bnad) -{ - mutex_lock(&bnad_list_mutex); - list_add_tail(&bnad->list_entry, &bnad_list); - bnad->id = bna_id++; - mutex_unlock(&bnad_list_mutex); -} - -static void -bnad_remove_from_list(struct bnad *bnad) -{ - mutex_lock(&bnad_list_mutex); - list_del(&bnad->list_entry); - mutex_unlock(&bnad_list_mutex); -} - /* * Reinitialize completions in CQ, once Rx is taken down */ @@ -747,7 +723,7 @@ void bnad_cb_ethport_link_status(struct bnad *bnad, enum bna_link_status link_status) { - bool link_up = false; + bool link_up = 0; link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP); @@ -1108,16 +1084,6 @@ bnad_cb_enet_mtu_set(struct bnad *bnad) complete(&bnad->bnad_completions.mtu_comp); } -void -bnad_cb_completion(void *arg, enum bfa_status status) -{ - struct bnad_iocmd_comp *iocmd_comp = - (struct bnad_iocmd_comp *)arg; - - iocmd_comp->comp_status = (u32) status; - complete(&iocmd_comp->comp); -} - /* Resource allocation, free functions */ static void @@ -3002,7 +2968,7 @@ bnad_change_mtu(struct net_device *netdev, int new_mtu) return err; } -static int +static void bnad_vlan_rx_add_vid(struct net_device *netdev, unsigned short vid) { @@ -3010,7 +2976,7 @@ bnad_vlan_rx_add_vid(struct net_device *netdev, unsigned long flags; if (!bnad->rx_info[0].rx) - return 0; + return; mutex_lock(&bnad->conf_mutex); @@ -3020,11 +2986,9 @@ bnad_vlan_rx_add_vid(struct net_device *netdev, spin_unlock_irqrestore(&bnad->bna_lock, flags); mutex_unlock(&bnad->conf_mutex); - - return 0; } -static int +static void bnad_vlan_rx_kill_vid(struct net_device *netdev, unsigned short vid) { @@ -3032,7 +2996,7 @@ bnad_vlan_rx_kill_vid(struct net_device *netdev, unsigned long flags; if (!bnad->rx_info[0].rx) - return 0; + return; mutex_lock(&bnad->conf_mutex); @@ -3042,8 +3006,6 @@ bnad_vlan_rx_kill_vid(struct net_device *netdev, spin_unlock_irqrestore(&bnad->bna_lock, flags); mutex_unlock(&bnad->conf_mutex); - - return 0; } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -3201,14 +3163,12 @@ bnad_lock_init(struct bnad *bnad) { spin_lock_init(&bnad->bna_lock); mutex_init(&bnad->conf_mutex); - mutex_init(&bnad_list_mutex); } static void bnad_lock_uninit(struct bnad *bnad) { mutex_destroy(&bnad->conf_mutex); - mutex_destroy(&bnad_list_mutex); } /* PCI Initialization */ @@ -3226,7 +3186,7 @@ bnad_pci_init(struct bnad *bnad, goto disable_device; if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { - *using_dac = true; + *using_dac = 1; } else { err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { @@ -3235,7 +3195,7 @@ bnad_pci_init(struct bnad *bnad, if (err) goto release_regions; } - *using_dac = false; + *using_dac = 0; } pci_set_master(pdev); return 0; @@ -3289,8 +3249,8 @@ bnad_pci_probe(struct pci_dev *pdev, return err; } bnad = netdev_priv(netdev); + bnad_lock_init(bnad); - bnad_add_to_list(bnad); mutex_lock(&bnad->conf_mutex); /* @@ -3317,10 +3277,6 @@ bnad_pci_probe(struct pci_dev *pdev, /* Set link to down state */ netif_carrier_off(netdev); - /* Setup the debugfs node for this bfad */ - if (bna_debugfs_enable) - bnad_debugfs_init(bnad); - /* Get resource requirement form bna */ spin_lock_irqsave(&bnad->bna_lock, flags); bna_res_req(&bnad->res_info[0]); @@ -3442,15 +3398,11 @@ bnad_pci_probe(struct pci_dev *pdev, res_free: bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX); drv_uninit: - /* Remove the debugfs node for this bnad */ - kfree(bnad->regdata); - bnad_debugfs_uninit(bnad); bnad_uninit(bnad); pci_uninit: bnad_pci_uninit(pdev); unlock_mutex: mutex_unlock(&bnad->conf_mutex); - bnad_remove_from_list(bnad); bnad_lock_uninit(bnad); free_netdev(netdev); return err; @@ -3489,11 +3441,7 @@ bnad_pci_remove(struct pci_dev *pdev) bnad_disable_msix(bnad); bnad_pci_uninit(pdev); mutex_unlock(&bnad->conf_mutex); - bnad_remove_from_list(bnad); bnad_lock_uninit(bnad); - /* Remove the debugfs node for this bnad */ - kfree(bnad->regdata); - bnad_debugfs_uninit(bnad); bnad_uninit(bnad); free_netdev(netdev); } diff --git a/trunk/drivers/net/ethernet/brocade/bna/bnad.h b/trunk/drivers/net/ethernet/brocade/bna/bnad.h index 55824d92699f..5487ca42d018 100644 --- a/trunk/drivers/net/ethernet/brocade/bna/bnad.h +++ b/trunk/drivers/net/ethernet/brocade/bna/bnad.h @@ -124,12 +124,6 @@ enum bnad_link_state { BNAD_LS_UP = 1 }; -struct bnad_iocmd_comp { - struct bnad *bnad; - struct completion comp; - int comp_status; -}; - struct bnad_completion { struct completion ioc_comp; struct completion ucast_comp; @@ -257,8 +251,6 @@ struct bnad_unmap_q { struct bnad { struct net_device *netdev; - u32 id; - struct list_head list_entry; /* Data path */ struct bnad_tx_info tx_info[BNAD_MAX_TX]; @@ -328,26 +320,12 @@ struct bnad { char adapter_name[BNAD_NAME_LEN]; char port_name[BNAD_NAME_LEN]; char mbox_irq_name[BNAD_NAME_LEN]; - - /* debugfs specific data */ - char *regdata; - u32 reglen; - struct dentry *bnad_dentry_files[5]; - struct dentry *port_debugfs_root; -}; - -struct bnad_drvinfo { - struct bfa_ioc_attr ioc_attr; - struct bfa_cee_attr cee_attr; - struct bfa_flash_attr flash_attr; - u32 cee_status; - u32 flash_status; }; /* * EXTERN VARIABLES */ -extern const struct firmware *bfi_fw; +extern struct firmware *bfi_fw; extern u32 bnad_rxqs_per_cq; /* @@ -362,7 +340,6 @@ extern int bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr); extern int bnad_enable_default_bcast(struct bnad *bnad); extern void bnad_restore_vlans(struct bnad *bnad, u32 rx_id); extern void bnad_set_ethtool_ops(struct net_device *netdev); -extern void bnad_cb_completion(void *arg, enum bfa_status status); /* Configuration & setup */ extern void bnad_tx_coalescing_timeo_set(struct bnad *bnad); @@ -382,10 +359,6 @@ extern void bnad_netdev_qstats_fill(struct bnad *bnad, extern void bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats); -/* Debugfs */ -void bnad_debugfs_init(struct bnad *bnad); -void bnad_debugfs_uninit(struct bnad *bnad); - /** * MACROS */ diff --git a/trunk/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/trunk/drivers/net/ethernet/brocade/bna/bnad_debugfs.c deleted file mode 100644 index 592ad3929f53..000000000000 --- a/trunk/drivers/net/ethernet/brocade/bna/bnad_debugfs.c +++ /dev/null @@ -1,623 +0,0 @@ -/* - * Linux network driver for Brocade Converged Network Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -/* - * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - */ - -#include -#include -#include "bnad.h" - -/* - * BNA debufs interface - * - * To access the interface, debugfs file system should be mounted - * if not already mounted using: - * mount -t debugfs none /sys/kernel/debug - * - * BNA Hierarchy: - * - bna/pci_dev: - * where the pci_name corresponds to the one under /sys/bus/pci/drivers/bna - * - * Debugging service available per pci_dev: - * fwtrc: To collect current firmware trace. - * fwsave: To collect last saved fw trace as a result of firmware crash. - * regwr: To write one word to chip register - * regrd: To read one or more words from chip register. - */ - -struct bnad_debug_info { - char *debug_buffer; - void *i_private; - int buffer_len; -}; - -static int -bnad_debugfs_open_fwtrc(struct inode *inode, struct file *file) -{ - struct bnad *bnad = inode->i_private; - struct bnad_debug_info *fw_debug; - unsigned long flags; - int rc; - - fw_debug = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL); - if (!fw_debug) - return -ENOMEM; - - fw_debug->buffer_len = BNA_DBG_FWTRC_LEN; - - fw_debug->debug_buffer = kzalloc(fw_debug->buffer_len, GFP_KERNEL); - if (!fw_debug->debug_buffer) { - kfree(fw_debug); - fw_debug = NULL; - pr_warn("bna %s: Failed to allocate fwtrc buffer\n", - pci_name(bnad->pcidev)); - return -ENOMEM; - } - - spin_lock_irqsave(&bnad->bna_lock, flags); - rc = bfa_nw_ioc_debug_fwtrc(&bnad->bna.ioceth.ioc, - fw_debug->debug_buffer, - &fw_debug->buffer_len); - spin_unlock_irqrestore(&bnad->bna_lock, flags); - if (rc != BFA_STATUS_OK) { - kfree(fw_debug->debug_buffer); - fw_debug->debug_buffer = NULL; - kfree(fw_debug); - fw_debug = NULL; - pr_warn("bnad %s: Failed to collect fwtrc\n", - pci_name(bnad->pcidev)); - return -ENOMEM; - } - - file->private_data = fw_debug; - - return 0; -} - -static int -bnad_debugfs_open_fwsave(struct inode *inode, struct file *file) -{ - struct bnad *bnad = inode->i_private; - struct bnad_debug_info *fw_debug; - unsigned long flags; - int rc; - - fw_debug = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL); - if (!fw_debug) - return -ENOMEM; - - fw_debug->buffer_len = BNA_DBG_FWTRC_LEN; - - fw_debug->debug_buffer = kzalloc(fw_debug->buffer_len, GFP_KERNEL); - if (!fw_debug->debug_buffer) { - kfree(fw_debug); - fw_debug = NULL; - pr_warn("bna %s: Failed to allocate fwsave buffer\n", - pci_name(bnad->pcidev)); - return -ENOMEM; - } - - spin_lock_irqsave(&bnad->bna_lock, flags); - rc = bfa_nw_ioc_debug_fwsave(&bnad->bna.ioceth.ioc, - fw_debug->debug_buffer, - &fw_debug->buffer_len); - spin_unlock_irqrestore(&bnad->bna_lock, flags); - if (rc != BFA_STATUS_OK && rc != BFA_STATUS_ENOFSAVE) { - kfree(fw_debug->debug_buffer); - fw_debug->debug_buffer = NULL; - kfree(fw_debug); - fw_debug = NULL; - pr_warn("bna %s: Failed to collect fwsave\n", - pci_name(bnad->pcidev)); - return -ENOMEM; - } - - file->private_data = fw_debug; - - return 0; -} - -static int -bnad_debugfs_open_reg(struct inode *inode, struct file *file) -{ - struct bnad_debug_info *reg_debug; - - reg_debug = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL); - if (!reg_debug) - return -ENOMEM; - - reg_debug->i_private = inode->i_private; - - file->private_data = reg_debug; - - return 0; -} - -static int -bnad_get_debug_drvinfo(struct bnad *bnad, void *buffer, u32 len) -{ - struct bnad_drvinfo *drvinfo = (struct bnad_drvinfo *) buffer; - struct bnad_iocmd_comp fcomp; - unsigned long flags = 0; - int ret = BFA_STATUS_FAILED; - - /* Get IOC info */ - spin_lock_irqsave(&bnad->bna_lock, flags); - bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, &drvinfo->ioc_attr); - spin_unlock_irqrestore(&bnad->bna_lock, flags); - - /* Retrieve CEE related info */ - fcomp.bnad = bnad; - fcomp.comp_status = 0; - init_completion(&fcomp.comp); - spin_lock_irqsave(&bnad->bna_lock, flags); - ret = bfa_nw_cee_get_attr(&bnad->bna.cee, &drvinfo->cee_attr, - bnad_cb_completion, &fcomp); - if (ret != BFA_STATUS_OK) { - spin_unlock_irqrestore(&bnad->bna_lock, flags); - goto out; - } - spin_unlock_irqrestore(&bnad->bna_lock, flags); - wait_for_completion(&fcomp.comp); - drvinfo->cee_status = fcomp.comp_status; - - /* Retrieve flash partition info */ - fcomp.comp_status = 0; - init_completion(&fcomp.comp); - spin_lock_irqsave(&bnad->bna_lock, flags); - ret = bfa_nw_flash_get_attr(&bnad->bna.flash, &drvinfo->flash_attr, - bnad_cb_completion, &fcomp); - if (ret != BFA_STATUS_OK) { - spin_unlock_irqrestore(&bnad->bna_lock, flags); - goto out; - } - spin_unlock_irqrestore(&bnad->bna_lock, flags); - wait_for_completion(&fcomp.comp); - drvinfo->flash_status = fcomp.comp_status; -out: - return ret; -} - -static int -bnad_debugfs_open_drvinfo(struct inode *inode, struct file *file) -{ - struct bnad *bnad = inode->i_private; - struct bnad_debug_info *drv_info; - int rc; - - drv_info = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL); - if (!drv_info) - return -ENOMEM; - - drv_info->buffer_len = sizeof(struct bnad_drvinfo); - - drv_info->debug_buffer = kzalloc(drv_info->buffer_len, GFP_KERNEL); - if (!drv_info->debug_buffer) { - kfree(drv_info); - drv_info = NULL; - pr_warn("bna %s: Failed to allocate drv info buffer\n", - pci_name(bnad->pcidev)); - return -ENOMEM; - } - - mutex_lock(&bnad->conf_mutex); - rc = bnad_get_debug_drvinfo(bnad, drv_info->debug_buffer, - drv_info->buffer_len); - mutex_unlock(&bnad->conf_mutex); - if (rc != BFA_STATUS_OK) { - kfree(drv_info->debug_buffer); - drv_info->debug_buffer = NULL; - kfree(drv_info); - drv_info = NULL; - pr_warn("bna %s: Failed to collect drvinfo\n", - pci_name(bnad->pcidev)); - return -ENOMEM; - } - - file->private_data = drv_info; - - return 0; -} - -/* Changes the current file position */ -static loff_t -bnad_debugfs_lseek(struct file *file, loff_t offset, int orig) -{ - loff_t pos = file->f_pos; - struct bnad_debug_info *debug = file->private_data; - - if (!debug) - return -EINVAL; - - switch (orig) { - case 0: - file->f_pos = offset; - break; - case 1: - file->f_pos += offset; - break; - case 2: - file->f_pos = debug->buffer_len - offset; - break; - default: - return -EINVAL; - } - - if (file->f_pos < 0 || file->f_pos > debug->buffer_len) { - file->f_pos = pos; - return -EINVAL; - } - - return file->f_pos; -} - -static ssize_t -bnad_debugfs_read(struct file *file, char __user *buf, - size_t nbytes, loff_t *pos) -{ - struct bnad_debug_info *debug = file->private_data; - - if (!debug || !debug->debug_buffer) - return 0; - - return simple_read_from_buffer(buf, nbytes, pos, - debug->debug_buffer, debug->buffer_len); -} - -#define BFA_REG_CT_ADDRSZ (0x40000) -#define BFA_REG_CB_ADDRSZ (0x20000) -#define BFA_REG_ADDRSZ(__ioc) \ - ((u32)(bfa_asic_id_ctc(bfa_ioc_devid(__ioc)) ? \ - BFA_REG_CT_ADDRSZ : BFA_REG_CB_ADDRSZ)) -#define BFA_REG_ADDRMSK(__ioc) (BFA_REG_ADDRSZ(__ioc) - 1) - -/* - * Function to check if the register offset passed is valid. - */ -static int -bna_reg_offset_check(struct bfa_ioc *ioc, u32 offset, u32 len) -{ - u8 area; - - /* check [16:15] */ - area = (offset >> 15) & 0x7; - if (area == 0) { - /* PCIe core register */ - if ((offset + (len<<2)) > 0x8000) /* 8k dwords or 32KB */ - return BFA_STATUS_EINVAL; - } else if (area == 0x1) { - /* CB 32 KB memory page */ - if ((offset + (len<<2)) > 0x10000) /* 8k dwords or 32KB */ - return BFA_STATUS_EINVAL; - } else { - /* CB register space 64KB */ - if ((offset + (len<<2)) > BFA_REG_ADDRMSK(ioc)) - return BFA_STATUS_EINVAL; - } - return BFA_STATUS_OK; -} - -static ssize_t -bnad_debugfs_read_regrd(struct file *file, char __user *buf, - size_t nbytes, loff_t *pos) -{ - struct bnad_debug_info *regrd_debug = file->private_data; - struct bnad *bnad = (struct bnad *)regrd_debug->i_private; - ssize_t rc; - - if (!bnad->regdata) - return 0; - - rc = simple_read_from_buffer(buf, nbytes, pos, - bnad->regdata, bnad->reglen); - - if ((*pos + nbytes) >= bnad->reglen) { - kfree(bnad->regdata); - bnad->regdata = NULL; - bnad->reglen = 0; - } - - return rc; -} - -static ssize_t -bnad_debugfs_write_regrd(struct file *file, const char __user *buf, - size_t nbytes, loff_t *ppos) -{ - struct bnad_debug_info *regrd_debug = file->private_data; - struct bnad *bnad = (struct bnad *)regrd_debug->i_private; - struct bfa_ioc *ioc = &bnad->bna.ioceth.ioc; - int addr, len, rc, i; - u32 *regbuf; - void __iomem *rb, *reg_addr; - unsigned long flags; - void *kern_buf; - - /* Allocate memory to store the user space buf */ - kern_buf = kzalloc(nbytes, GFP_KERNEL); - if (!kern_buf) { - pr_warn("bna %s: Failed to allocate user buffer\n", - pci_name(bnad->pcidev)); - return -ENOMEM; - } - - if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) { - kfree(kern_buf); - return -ENOMEM; - } - - rc = sscanf(kern_buf, "%x:%x", &addr, &len); - if (rc < 2) { - pr_warn("bna %s: Failed to read user buffer\n", - pci_name(bnad->pcidev)); - kfree(kern_buf); - return -EINVAL; - } - - kfree(kern_buf); - kfree(bnad->regdata); - bnad->regdata = NULL; - bnad->reglen = 0; - - bnad->regdata = kzalloc(len << 2, GFP_KERNEL); - if (!bnad->regdata) { - pr_warn("bna %s: Failed to allocate regrd buffer\n", - pci_name(bnad->pcidev)); - return -ENOMEM; - } - - bnad->reglen = len << 2; - rb = bfa_ioc_bar0(ioc); - addr &= BFA_REG_ADDRMSK(ioc); - - /* offset and len sanity check */ - rc = bna_reg_offset_check(ioc, addr, len); - if (rc) { - pr_warn("bna %s: Failed reg offset check\n", - pci_name(bnad->pcidev)); - kfree(bnad->regdata); - bnad->regdata = NULL; - bnad->reglen = 0; - return -EINVAL; - } - - reg_addr = rb + addr; - regbuf = (u32 *)bnad->regdata; - spin_lock_irqsave(&bnad->bna_lock, flags); - for (i = 0; i < len; i++) { - *regbuf = readl(reg_addr); - regbuf++; - reg_addr += sizeof(u32); - } - spin_unlock_irqrestore(&bnad->bna_lock, flags); - - return nbytes; -} - -static ssize_t -bnad_debugfs_write_regwr(struct file *file, const char __user *buf, - size_t nbytes, loff_t *ppos) -{ - struct bnad_debug_info *debug = file->private_data; - struct bnad *bnad = (struct bnad *)debug->i_private; - struct bfa_ioc *ioc = &bnad->bna.ioceth.ioc; - int addr, val, rc; - void __iomem *reg_addr; - unsigned long flags; - void *kern_buf; - - /* Allocate memory to store the user space buf */ - kern_buf = kzalloc(nbytes, GFP_KERNEL); - if (!kern_buf) { - pr_warn("bna %s: Failed to allocate user buffer\n", - pci_name(bnad->pcidev)); - return -ENOMEM; - } - - if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) { - kfree(kern_buf); - return -ENOMEM; - } - - rc = sscanf(kern_buf, "%x:%x", &addr, &val); - if (rc < 2) { - pr_warn("bna %s: Failed to read user buffer\n", - pci_name(bnad->pcidev)); - kfree(kern_buf); - return -EINVAL; - } - kfree(kern_buf); - - addr &= BFA_REG_ADDRMSK(ioc); /* offset only 17 bit and word align */ - - /* offset and len sanity check */ - rc = bna_reg_offset_check(ioc, addr, 1); - if (rc) { - pr_warn("bna %s: Failed reg offset check\n", - pci_name(bnad->pcidev)); - return -EINVAL; - } - - reg_addr = (bfa_ioc_bar0(ioc)) + addr; - spin_lock_irqsave(&bnad->bna_lock, flags); - writel(val, reg_addr); - spin_unlock_irqrestore(&bnad->bna_lock, flags); - - return nbytes; -} - -static int -bnad_debugfs_release(struct inode *inode, struct file *file) -{ - struct bnad_debug_info *debug = file->private_data; - - if (!debug) - return 0; - - file->private_data = NULL; - kfree(debug); - return 0; -} - -static int -bnad_debugfs_buffer_release(struct inode *inode, struct file *file) -{ - struct bnad_debug_info *debug = file->private_data; - - if (!debug) - return 0; - - kfree(debug->debug_buffer); - - file->private_data = NULL; - kfree(debug); - debug = NULL; - return 0; -} - -static const struct file_operations bnad_debugfs_op_fwtrc = { - .owner = THIS_MODULE, - .open = bnad_debugfs_open_fwtrc, - .llseek = bnad_debugfs_lseek, - .read = bnad_debugfs_read, - .release = bnad_debugfs_buffer_release, -}; - -static const struct file_operations bnad_debugfs_op_fwsave = { - .owner = THIS_MODULE, - .open = bnad_debugfs_open_fwsave, - .llseek = bnad_debugfs_lseek, - .read = bnad_debugfs_read, - .release = bnad_debugfs_buffer_release, -}; - -static const struct file_operations bnad_debugfs_op_regrd = { - .owner = THIS_MODULE, - .open = bnad_debugfs_open_reg, - .llseek = bnad_debugfs_lseek, - .read = bnad_debugfs_read_regrd, - .write = bnad_debugfs_write_regrd, - .release = bnad_debugfs_release, -}; - -static const struct file_operations bnad_debugfs_op_regwr = { - .owner = THIS_MODULE, - .open = bnad_debugfs_open_reg, - .llseek = bnad_debugfs_lseek, - .write = bnad_debugfs_write_regwr, - .release = bnad_debugfs_release, -}; - -static const struct file_operations bnad_debugfs_op_drvinfo = { - .owner = THIS_MODULE, - .open = bnad_debugfs_open_drvinfo, - .llseek = bnad_debugfs_lseek, - .read = bnad_debugfs_read, - .release = bnad_debugfs_buffer_release, -}; - -struct bnad_debugfs_entry { - const char *name; - mode_t mode; - const struct file_operations *fops; -}; - -static const struct bnad_debugfs_entry bnad_debugfs_files[] = { - { "fwtrc", S_IFREG|S_IRUGO, &bnad_debugfs_op_fwtrc, }, - { "fwsave", S_IFREG|S_IRUGO, &bnad_debugfs_op_fwsave, }, - { "regrd", S_IFREG|S_IRUGO|S_IWUSR, &bnad_debugfs_op_regrd, }, - { "regwr", S_IFREG|S_IWUSR, &bnad_debugfs_op_regwr, }, - { "drvinfo", S_IFREG|S_IRUGO, &bnad_debugfs_op_drvinfo, }, -}; - -static struct dentry *bna_debugfs_root; -static atomic_t bna_debugfs_port_count; - -/* Initialize debugfs interface for BNA */ -void -bnad_debugfs_init(struct bnad *bnad) -{ - const struct bnad_debugfs_entry *file; - char name[64]; - int i; - - /* Setup the BNA debugfs root directory*/ - if (!bna_debugfs_root) { - bna_debugfs_root = debugfs_create_dir("bna", NULL); - atomic_set(&bna_debugfs_port_count, 0); - if (!bna_debugfs_root) { - pr_warn("BNA: debugfs root dir creation failed\n"); - return; - } - } - - /* Setup the pci_dev debugfs directory for the port */ - snprintf(name, sizeof(name), "pci_dev:%s", pci_name(bnad->pcidev)); - if (!bnad->port_debugfs_root) { - bnad->port_debugfs_root = - debugfs_create_dir(name, bna_debugfs_root); - if (!bnad->port_debugfs_root) { - pr_warn("bna pci_dev %s: root dir creation failed\n", - pci_name(bnad->pcidev)); - return; - } - - atomic_inc(&bna_debugfs_port_count); - - for (i = 0; i < ARRAY_SIZE(bnad_debugfs_files); i++) { - file = &bnad_debugfs_files[i]; - bnad->bnad_dentry_files[i] = - debugfs_create_file(file->name, - file->mode, - bnad->port_debugfs_root, - bnad, - file->fops); - if (!bnad->bnad_dentry_files[i]) { - pr_warn( - "BNA pci_dev:%s: create %s entry failed\n", - pci_name(bnad->pcidev), file->name); - return; - } - } - } -} - -/* Uninitialize debugfs interface for BNA */ -void -bnad_debugfs_uninit(struct bnad *bnad) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(bnad_debugfs_files); i++) { - if (bnad->bnad_dentry_files[i]) { - debugfs_remove(bnad->bnad_dentry_files[i]); - bnad->bnad_dentry_files[i] = NULL; - } - } - - /* Remove the pci_dev debugfs directory for the port */ - if (bnad->port_debugfs_root) { - debugfs_remove(bnad->port_debugfs_root); - bnad->port_debugfs_root = NULL; - atomic_dec(&bna_debugfs_port_count); - } - - /* Remove the BNA debugfs root directory */ - if (atomic_read(&bna_debugfs_port_count) == 0) { - debugfs_remove(bna_debugfs_root); - bna_debugfs_root = NULL; - } -} diff --git a/trunk/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/trunk/drivers/net/ethernet/brocade/bna/bnad_ethtool.c index 9b44ec8096ba..fd3dcc1e9145 100644 --- a/trunk/drivers/net/ethernet/brocade/bna/bnad_ethtool.c +++ b/trunk/drivers/net/ethernet/brocade/bna/bnad_ethtool.c @@ -38,7 +38,7 @@ sizeof(struct bnad_drv_stats) / sizeof(u64) + \ offsetof(struct bfi_enet_stats, rxf_stats[0]) / sizeof(u64)) -static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = { +static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = { "rx_packets", "tx_packets", "rx_bytes", @@ -296,8 +296,8 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) struct bfa_ioc_attr *ioc_attr; unsigned long flags; - strlcpy(drvinfo->driver, BNAD_NAME, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, BNAD_VERSION, sizeof(drvinfo->version)); + strcpy(drvinfo->driver, BNAD_NAME); + strcpy(drvinfo->version, BNAD_VERSION); ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL); if (ioc_attr) { @@ -305,13 +305,12 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, ioc_attr); spin_unlock_irqrestore(&bnad->bna_lock, flags); - strlcpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver, - sizeof(drvinfo->fw_version)); + strncpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver, + sizeof(drvinfo->fw_version) - 1); kfree(ioc_attr); } - strlcpy(drvinfo->bus_info, pci_name(bnad->pcidev), - sizeof(drvinfo->bus_info)); + strncpy(drvinfo->bus_info, pci_name(bnad->pcidev), ETHTOOL_BUSINFO_LEN); } static void @@ -935,144 +934,7 @@ bnad_get_sset_count(struct net_device *netdev, int sset) } } -static u32 -bnad_get_flash_partition_by_offset(struct bnad *bnad, u32 offset, - u32 *base_offset) -{ - struct bfa_flash_attr *flash_attr; - struct bnad_iocmd_comp fcomp; - u32 i, flash_part = 0, ret; - unsigned long flags = 0; - - flash_attr = kzalloc(sizeof(struct bfa_flash_attr), GFP_KERNEL); - if (!flash_attr) - return -ENOMEM; - - fcomp.bnad = bnad; - fcomp.comp_status = 0; - - init_completion(&fcomp.comp); - spin_lock_irqsave(&bnad->bna_lock, flags); - ret = bfa_nw_flash_get_attr(&bnad->bna.flash, flash_attr, - bnad_cb_completion, &fcomp); - if (ret != BFA_STATUS_OK) { - spin_unlock_irqrestore(&bnad->bna_lock, flags); - kfree(flash_attr); - goto out_err; - } - spin_unlock_irqrestore(&bnad->bna_lock, flags); - wait_for_completion(&fcomp.comp); - ret = fcomp.comp_status; - - /* Check for the flash type & base offset value */ - if (ret == BFA_STATUS_OK) { - for (i = 0; i < flash_attr->npart; i++) { - if (offset >= flash_attr->part[i].part_off && - offset < (flash_attr->part[i].part_off + - flash_attr->part[i].part_size)) { - flash_part = flash_attr->part[i].part_type; - *base_offset = flash_attr->part[i].part_off; - break; - } - } - } - kfree(flash_attr); - return flash_part; -out_err: - return -EINVAL; -} - -static int -bnad_get_eeprom_len(struct net_device *netdev) -{ - return BFA_TOTAL_FLASH_SIZE; -} - -static int -bnad_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, - u8 *bytes) -{ - struct bnad *bnad = netdev_priv(netdev); - struct bnad_iocmd_comp fcomp; - u32 flash_part = 0, base_offset = 0; - unsigned long flags = 0; - int ret = 0; - - /* Check if the flash read request is valid */ - if (eeprom->magic != (bnad->pcidev->vendor | - (bnad->pcidev->device << 16))) - return -EFAULT; - - /* Query the flash partition based on the offset */ - flash_part = bnad_get_flash_partition_by_offset(bnad, - eeprom->offset, &base_offset); - if (flash_part <= 0) - return -EFAULT; - - fcomp.bnad = bnad; - fcomp.comp_status = 0; - - init_completion(&fcomp.comp); - spin_lock_irqsave(&bnad->bna_lock, flags); - ret = bfa_nw_flash_read_part(&bnad->bna.flash, flash_part, - bnad->id, bytes, eeprom->len, - eeprom->offset - base_offset, - bnad_cb_completion, &fcomp); - if (ret != BFA_STATUS_OK) { - spin_unlock_irqrestore(&bnad->bna_lock, flags); - goto done; - } - - spin_unlock_irqrestore(&bnad->bna_lock, flags); - wait_for_completion(&fcomp.comp); - ret = fcomp.comp_status; -done: - return ret; -} - -static int -bnad_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, - u8 *bytes) -{ - struct bnad *bnad = netdev_priv(netdev); - struct bnad_iocmd_comp fcomp; - u32 flash_part = 0, base_offset = 0; - unsigned long flags = 0; - int ret = 0; - - /* Check if the flash update request is valid */ - if (eeprom->magic != (bnad->pcidev->vendor | - (bnad->pcidev->device << 16))) - return -EINVAL; - - /* Query the flash partition based on the offset */ - flash_part = bnad_get_flash_partition_by_offset(bnad, - eeprom->offset, &base_offset); - if (flash_part <= 0) - return -EFAULT; - - fcomp.bnad = bnad; - fcomp.comp_status = 0; - - init_completion(&fcomp.comp); - spin_lock_irqsave(&bnad->bna_lock, flags); - ret = bfa_nw_flash_update_part(&bnad->bna.flash, flash_part, - bnad->id, bytes, eeprom->len, - eeprom->offset - base_offset, - bnad_cb_completion, &fcomp); - if (ret != BFA_STATUS_OK) { - spin_unlock_irqrestore(&bnad->bna_lock, flags); - goto done; - } - - spin_unlock_irqrestore(&bnad->bna_lock, flags); - wait_for_completion(&fcomp.comp); - ret = fcomp.comp_status; -done: - return ret; -} - -static const struct ethtool_ops bnad_ethtool_ops = { +static struct ethtool_ops bnad_ethtool_ops = { .get_settings = bnad_get_settings, .set_settings = bnad_set_settings, .get_drvinfo = bnad_get_drvinfo, @@ -1086,10 +948,7 @@ static const struct ethtool_ops bnad_ethtool_ops = { .set_pauseparam = bnad_set_pauseparam, .get_strings = bnad_get_strings, .get_ethtool_stats = bnad_get_ethtool_stats, - .get_sset_count = bnad_get_sset_count, - .get_eeprom_len = bnad_get_eeprom_len, - .get_eeprom = bnad_get_eeprom, - .set_eeprom = bnad_set_eeprom, + .get_sset_count = bnad_get_sset_count }; void diff --git a/trunk/drivers/net/ethernet/brocade/bna/cna.h b/trunk/drivers/net/ethernet/brocade/bna/cna.h index 32e8f178ab76..1b3e90dfbd9a 100644 --- a/trunk/drivers/net/ethernet/brocade/bna/cna.h +++ b/trunk/drivers/net/ethernet/brocade/bna/cna.h @@ -43,7 +43,8 @@ extern char bfa_version[]; #pragma pack(1) -typedef struct mac { u8 mac[ETH_ALEN]; } mac_t; +#define MAC_ADDRLEN (6) +typedef struct mac { u8 mac[MAC_ADDRLEN]; } mac_t; #pragma pack() diff --git a/trunk/drivers/net/ethernet/brocade/bna/cna_fwimg.c b/trunk/drivers/net/ethernet/brocade/bna/cna_fwimg.c index cfc22a64157e..725b9fff337f 100644 --- a/trunk/drivers/net/ethernet/brocade/bna/cna_fwimg.c +++ b/trunk/drivers/net/ethernet/brocade/bna/cna_fwimg.c @@ -16,7 +16,6 @@ * www.brocade.com */ #include -#include "bnad.h" #include "bfi.h" #include "cna.h" diff --git a/trunk/drivers/net/ethernet/calxeda/Kconfig b/trunk/drivers/net/ethernet/calxeda/Kconfig deleted file mode 100644 index aba435c3d4ae..000000000000 --- a/trunk/drivers/net/ethernet/calxeda/Kconfig +++ /dev/null @@ -1,7 +0,0 @@ -config NET_CALXEDA_XGMAC - tristate "Calxeda 1G/10G XGMAC Ethernet driver" - depends on HAS_IOMEM - select CRC32 - help - This is the driver for the XGMAC Ethernet IP block found on Calxeda - Highbank platforms. diff --git a/trunk/drivers/net/ethernet/calxeda/Makefile b/trunk/drivers/net/ethernet/calxeda/Makefile deleted file mode 100644 index f0ef08067f97..000000000000 --- a/trunk/drivers/net/ethernet/calxeda/Makefile +++ /dev/null @@ -1 +0,0 @@ -obj-$(CONFIG_NET_CALXEDA_XGMAC) += xgmac.o diff --git a/trunk/drivers/net/ethernet/calxeda/xgmac.c b/trunk/drivers/net/ethernet/calxeda/xgmac.c deleted file mode 100644 index 1fce186a9031..000000000000 --- a/trunk/drivers/net/ethernet/calxeda/xgmac.c +++ /dev/null @@ -1,1928 +0,0 @@ -/* - * Copyright 2010-2011 Calxeda, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* XGMAC Register definitions */ -#define XGMAC_CONTROL 0x00000000 /* MAC Configuration */ -#define XGMAC_FRAME_FILTER 0x00000004 /* MAC Frame Filter */ -#define XGMAC_FLOW_CTRL 0x00000018 /* MAC Flow Control */ -#define XGMAC_VLAN_TAG 0x0000001C /* VLAN Tags */ -#define XGMAC_VERSION 0x00000020 /* Version */ -#define XGMAC_VLAN_INCL 0x00000024 /* VLAN tag for tx frames */ -#define XGMAC_LPI_CTRL 0x00000028 /* LPI Control and Status */ -#define XGMAC_LPI_TIMER 0x0000002C /* LPI Timers Control */ -#define XGMAC_TX_PACE 0x00000030 /* Transmit Pace and Stretch */ -#define XGMAC_VLAN_HASH 0x00000034 /* VLAN Hash Table */ -#define XGMAC_DEBUG 0x00000038 /* Debug */ -#define XGMAC_INT_STAT 0x0000003C /* Interrupt and Control */ -#define XGMAC_ADDR_HIGH(reg) (0x00000040 + ((reg) * 8)) -#define XGMAC_ADDR_LOW(reg) (0x00000044 + ((reg) * 8)) -#define XGMAC_HASH(n) (0x00000300 + (n) * 4) /* HASH table regs */ -#define XGMAC_NUM_HASH 16 -#define XGMAC_OMR 0x00000400 -#define XGMAC_REMOTE_WAKE 0x00000700 /* Remote Wake-Up Frm Filter */ -#define XGMAC_PMT 0x00000704 /* PMT Control and Status */ -#define XGMAC_MMC_CTRL 0x00000800 /* XGMAC MMC Control */ -#define XGMAC_MMC_INTR_RX 0x00000804 /* Recieve Interrupt */ -#define XGMAC_MMC_INTR_TX 0x00000808 /* Transmit Interrupt */ -#define XGMAC_MMC_INTR_MASK_RX 0x0000080c /* Recieve Interrupt Mask */ -#define XGMAC_MMC_INTR_MASK_TX 0x00000810 /* Transmit Interrupt Mask */ - -/* Hardware TX Statistics Counters */ -#define XGMAC_MMC_TXOCTET_GB_LO 0x00000814 -#define XGMAC_MMC_TXOCTET_GB_HI 0x00000818 -#define XGMAC_MMC_TXFRAME_GB_LO 0x0000081C -#define XGMAC_MMC_TXFRAME_GB_HI 0x00000820 -#define XGMAC_MMC_TXBCFRAME_G 0x00000824 -#define XGMAC_MMC_TXMCFRAME_G 0x0000082C -#define XGMAC_MMC_TXUCFRAME_GB 0x00000864 -#define XGMAC_MMC_TXMCFRAME_GB 0x0000086C -#define XGMAC_MMC_TXBCFRAME_GB 0x00000874 -#define XGMAC_MMC_TXUNDERFLOW 0x0000087C -#define XGMAC_MMC_TXOCTET_G_LO 0x00000884 -#define XGMAC_MMC_TXOCTET_G_HI 0x00000888 -#define XGMAC_MMC_TXFRAME_G_LO 0x0000088C -#define XGMAC_MMC_TXFRAME_G_HI 0x00000890 -#define XGMAC_MMC_TXPAUSEFRAME 0x00000894 -#define XGMAC_MMC_TXVLANFRAME 0x0000089C - -/* Hardware RX Statistics Counters */ -#define XGMAC_MMC_RXFRAME_GB_LO 0x00000900 -#define XGMAC_MMC_RXFRAME_GB_HI 0x00000904 -#define XGMAC_MMC_RXOCTET_GB_LO 0x00000908 -#define XGMAC_MMC_RXOCTET_GB_HI 0x0000090C -#define XGMAC_MMC_RXOCTET_G_LO 0x00000910 -#define XGMAC_MMC_RXOCTET_G_HI 0x00000914 -#define XGMAC_MMC_RXBCFRAME_G 0x00000918 -#define XGMAC_MMC_RXMCFRAME_G 0x00000920 -#define XGMAC_MMC_RXCRCERR 0x00000928 -#define XGMAC_MMC_RXRUNT 0x00000930 -#define XGMAC_MMC_RXJABBER 0x00000934 -#define XGMAC_MMC_RXUCFRAME_G 0x00000970 -#define XGMAC_MMC_RXLENGTHERR 0x00000978 -#define XGMAC_MMC_RXPAUSEFRAME 0x00000988 -#define XGMAC_MMC_RXOVERFLOW 0x00000990 -#define XGMAC_MMC_RXVLANFRAME 0x00000998 -#define XGMAC_MMC_RXWATCHDOG 0x000009a0 - -/* DMA Control and Status Registers */ -#define XGMAC_DMA_BUS_MODE 0x00000f00 /* Bus Mode */ -#define XGMAC_DMA_TX_POLL 0x00000f04 /* Transmit Poll Demand */ -#define XGMAC_DMA_RX_POLL 0x00000f08 /* Received Poll Demand */ -#define XGMAC_DMA_RX_BASE_ADDR 0x00000f0c /* Receive List Base */ -#define XGMAC_DMA_TX_BASE_ADDR 0x00000f10 /* Transmit List Base */ -#define XGMAC_DMA_STATUS 0x00000f14 /* Status Register */ -#define XGMAC_DMA_CONTROL 0x00000f18 /* Ctrl (Operational Mode) */ -#define XGMAC_DMA_INTR_ENA 0x00000f1c /* Interrupt Enable */ -#define XGMAC_DMA_MISS_FRAME_CTR 0x00000f20 /* Missed Frame Counter */ -#define XGMAC_DMA_RI_WDOG_TIMER 0x00000f24 /* RX Intr Watchdog Timer */ -#define XGMAC_DMA_AXI_BUS 0x00000f28 /* AXI Bus Mode */ -#define XGMAC_DMA_AXI_STATUS 0x00000f2C /* AXI Status */ -#define XGMAC_DMA_HW_FEATURE 0x00000f58 /* Enabled Hardware Features */ - -#define XGMAC_ADDR_AE 0x80000000 -#define XGMAC_MAX_FILTER_ADDR 31 - -/* PMT Control and Status */ -#define XGMAC_PMT_POINTER_RESET 0x80000000 -#define XGMAC_PMT_GLBL_UNICAST 0x00000200 -#define XGMAC_PMT_WAKEUP_RX_FRM 0x00000040 -#define XGMAC_PMT_MAGIC_PKT 0x00000020 -#define XGMAC_PMT_WAKEUP_FRM_EN 0x00000004 -#define XGMAC_PMT_MAGIC_PKT_EN 0x00000002 -#define XGMAC_PMT_POWERDOWN 0x00000001 - -#define XGMAC_CONTROL_SPD 0x40000000 /* Speed control */ -#define XGMAC_CONTROL_SPD_MASK 0x60000000 -#define XGMAC_CONTROL_SPD_1G 0x60000000 -#define XGMAC_CONTROL_SPD_2_5G 0x40000000 -#define XGMAC_CONTROL_SPD_10G 0x00000000 -#define XGMAC_CONTROL_SARC 0x10000000 /* Source Addr Insert/Replace */ -#define XGMAC_CONTROL_SARK_MASK 0x18000000 -#define XGMAC_CONTROL_CAR 0x04000000 /* CRC Addition/Replacement */ -#define XGMAC_CONTROL_CAR_MASK 0x06000000 -#define XGMAC_CONTROL_DP 0x01000000 /* Disable Padding */ -#define XGMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on rx */ -#define XGMAC_CONTROL_JD 0x00400000 /* Jabber disable */ -#define XGMAC_CONTROL_JE 0x00100000 /* Jumbo frame */ -#define XGMAC_CONTROL_LM 0x00001000 /* Loop-back mode */ -#define XGMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */ -#define XGMAC_CONTROL_ACS 0x00000080 /* Automatic Pad/FCS Strip */ -#define XGMAC_CONTROL_DDIC 0x00000010 /* Disable Deficit Idle Count */ -#define XGMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */ -#define XGMAC_CONTROL_RE 0x00000004 /* Receiver Enable */ - -/* XGMAC Frame Filter defines */ -#define XGMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */ -#define XGMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */ -#define XGMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */ -#define XGMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */ -#define XGMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */ -#define XGMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */ -#define XGMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */ -#define XGMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */ -#define XGMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */ -#define XGMAC_FRAME_FILTER_VHF 0x00000800 /* VLAN Hash Filter */ -#define XGMAC_FRAME_FILTER_VPF 0x00001000 /* VLAN Perfect Filter */ -#define XGMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */ - -/* XGMAC FLOW CTRL defines */ -#define XGMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */ -#define XGMAC_FLOW_CTRL_PT_SHIFT 16 -#define XGMAC_FLOW_CTRL_DZQP 0x00000080 /* Disable Zero-Quanta Phase */ -#define XGMAC_FLOW_CTRL_PLT 0x00000020 /* Pause Low Threshhold */ -#define XGMAC_FLOW_CTRL_PLT_MASK 0x00000030 /* PLT MASK */ -#define XGMAC_FLOW_CTRL_UP 0x00000008 /* Unicast Pause Frame Detect */ -#define XGMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */ -#define XGMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */ -#define XGMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */ - -/* XGMAC_INT_STAT reg */ -#define XGMAC_INT_STAT_PMT 0x0080 /* PMT Interrupt Status */ -#define XGMAC_INT_STAT_LPI 0x0040 /* LPI Interrupt Status */ - -/* DMA Bus Mode register defines */ -#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */ -#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */ -#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */ -#define DMA_BUS_MODE_ATDS 0x00000080 /* Alternate Descriptor Size */ - -/* Programmable burst length */ -#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */ -#define DMA_BUS_MODE_PBL_SHIFT 8 -#define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */ -#define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */ -#define DMA_BUS_MODE_RPBL_SHIFT 17 -#define DMA_BUS_MODE_USP 0x00800000 -#define DMA_BUS_MODE_8PBL 0x01000000 -#define DMA_BUS_MODE_AAL 0x02000000 - -/* DMA Bus Mode register defines */ -#define DMA_BUS_PR_RATIO_MASK 0x0000c000 /* Rx/Tx priority ratio */ -#define DMA_BUS_PR_RATIO_SHIFT 14 -#define DMA_BUS_FB 0x00010000 /* Fixed Burst */ - -/* DMA Control register defines */ -#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */ -#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */ -#define DMA_CONTROL_DFF 0x01000000 /* Disable flush of rx frames */ - -/* DMA Normal interrupt */ -#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */ -#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */ -#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */ -#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */ -#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */ -#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */ -#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */ -#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */ -#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */ -#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */ -#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */ -#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */ -#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavail */ -#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */ -#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */ - -#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \ - DMA_INTR_ENA_TUE) - -#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \ - DMA_INTR_ENA_RWE | DMA_INTR_ENA_RSE | \ - DMA_INTR_ENA_RUE | DMA_INTR_ENA_UNE | \ - DMA_INTR_ENA_OVE | DMA_INTR_ENA_TJE | \ - DMA_INTR_ENA_TSE) - -/* DMA default interrupt mask */ -#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL) - -/* DMA Status register defines */ -#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */ -#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */ -#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */ -#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */ -#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */ -#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */ -#define DMA_STATUS_TS_SHIFT 20 -#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */ -#define DMA_STATUS_RS_SHIFT 17 -#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */ -#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */ -#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */ -#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */ -#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */ -#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */ -#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */ -#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */ -#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */ -#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */ -#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */ -#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */ -#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavail */ -#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */ -#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */ - -/* Common MAC defines */ -#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */ -#define MAC_ENABLE_RX 0x00000004 /* Receiver Enable */ - -/* XGMAC Operation Mode Register */ -#define XGMAC_OMR_TSF 0x00200000 /* TX FIFO Store and Forward */ -#define XGMAC_OMR_FTF 0x00100000 /* Flush Transmit FIFO */ -#define XGMAC_OMR_TTC 0x00020000 /* Transmit Threshhold Ctrl */ -#define XGMAC_OMR_TTC_MASK 0x00030000 -#define XGMAC_OMR_RFD 0x00006000 /* FC Deactivation Threshhold */ -#define XGMAC_OMR_RFD_MASK 0x00007000 /* FC Deact Threshhold MASK */ -#define XGMAC_OMR_RFA 0x00000600 /* FC Activation Threshhold */ -#define XGMAC_OMR_RFA_MASK 0x00000E00 /* FC Act Threshhold MASK */ -#define XGMAC_OMR_EFC 0x00000100 /* Enable Hardware FC */ -#define XGMAC_OMR_FEF 0x00000080 /* Forward Error Frames */ -#define XGMAC_OMR_DT 0x00000040 /* Drop TCP/IP csum Errors */ -#define XGMAC_OMR_RSF 0x00000020 /* RX FIFO Store and Forward */ -#define XGMAC_OMR_RTC 0x00000010 /* RX Threshhold Ctrl */ -#define XGMAC_OMR_RTC_MASK 0x00000018 /* RX Threshhold Ctrl MASK */ - -/* XGMAC HW Features Register */ -#define DMA_HW_FEAT_TXCOESEL 0x00010000 /* TX Checksum offload */ - -#define XGMAC_MMC_CTRL_CNT_FRZ 0x00000008 - -/* XGMAC Descriptor Defines */ -#define MAX_DESC_BUF_SZ (0x2000 - 8) - -#define RXDESC_EXT_STATUS 0x00000001 -#define RXDESC_CRC_ERR 0x00000002 -#define RXDESC_RX_ERR 0x00000008 -#define RXDESC_RX_WDOG 0x00000010 -#define RXDESC_FRAME_TYPE 0x00000020 -#define RXDESC_GIANT_FRAME 0x00000080 -#define RXDESC_LAST_SEG 0x00000100 -#define RXDESC_FIRST_SEG 0x00000200 -#define RXDESC_VLAN_FRAME 0x00000400 -#define RXDESC_OVERFLOW_ERR 0x00000800 -#define RXDESC_LENGTH_ERR 0x00001000 -#define RXDESC_SA_FILTER_FAIL 0x00002000 -#define RXDESC_DESCRIPTOR_ERR 0x00004000 -#define RXDESC_ERROR_SUMMARY 0x00008000 -#define RXDESC_FRAME_LEN_OFFSET 16 -#define RXDESC_FRAME_LEN_MASK 0x3fff0000 -#define RXDESC_DA_FILTER_FAIL 0x40000000 - -#define RXDESC1_END_RING 0x00008000 - -#define RXDESC_IP_PAYLOAD_MASK 0x00000003 -#define RXDESC_IP_PAYLOAD_UDP 0x00000001 -#define RXDESC_IP_PAYLOAD_TCP 0x00000002 -#define RXDESC_IP_PAYLOAD_ICMP 0x00000003 -#define RXDESC_IP_HEADER_ERR 0x00000008 -#define RXDESC_IP_PAYLOAD_ERR 0x00000010 -#define RXDESC_IPV4_PACKET 0x00000040 -#define RXDESC_IPV6_PACKET 0x00000080 -#define TXDESC_UNDERFLOW_ERR 0x00000001 -#define TXDESC_JABBER_TIMEOUT 0x00000002 -#define TXDESC_LOCAL_FAULT 0x00000004 -#define TXDESC_REMOTE_FAULT 0x00000008 -#define TXDESC_VLAN_FRAME 0x00000010 -#define TXDESC_FRAME_FLUSHED 0x00000020 -#define TXDESC_IP_HEADER_ERR 0x00000040 -#define TXDESC_PAYLOAD_CSUM_ERR 0x00000080 -#define TXDESC_ERROR_SUMMARY 0x00008000 -#define TXDESC_SA_CTRL_INSERT 0x00040000 -#define TXDESC_SA_CTRL_REPLACE 0x00080000 -#define TXDESC_2ND_ADDR_CHAINED 0x00100000 -#define TXDESC_END_RING 0x00200000 -#define TXDESC_CSUM_IP 0x00400000 -#define TXDESC_CSUM_IP_PAYLD 0x00800000 -#define TXDESC_CSUM_ALL 0x00C00000 -#define TXDESC_CRC_EN_REPLACE 0x01000000 -#define TXDESC_CRC_EN_APPEND 0x02000000 -#define TXDESC_DISABLE_PAD 0x04000000 -#define TXDESC_FIRST_SEG 0x10000000 -#define TXDESC_LAST_SEG 0x20000000 -#define TXDESC_INTERRUPT 0x40000000 - -#define DESC_OWN 0x80000000 -#define DESC_BUFFER1_SZ_MASK 0x00001fff -#define DESC_BUFFER2_SZ_MASK 0x1fff0000 -#define DESC_BUFFER2_SZ_OFFSET 16 - -struct xgmac_dma_desc { - __le32 flags; - __le32 buf_size; - __le32 buf1_addr; /* Buffer 1 Address Pointer */ - __le32 buf2_addr; /* Buffer 2 Address Pointer */ - __le32 ext_status; - __le32 res[3]; -}; - -struct xgmac_extra_stats { - /* Transmit errors */ - unsigned long tx_jabber; - unsigned long tx_frame_flushed; - unsigned long tx_payload_error; - unsigned long tx_ip_header_error; - unsigned long tx_local_fault; - unsigned long tx_remote_fault; - /* Receive errors */ - unsigned long rx_watchdog; - unsigned long rx_da_filter_fail; - unsigned long rx_sa_filter_fail; - unsigned long rx_payload_error; - unsigned long rx_ip_header_error; - /* Tx/Rx IRQ errors */ - unsigned long tx_undeflow; - unsigned long tx_process_stopped; - unsigned long rx_buf_unav; - unsigned long rx_process_stopped; - unsigned long tx_early; - unsigned long fatal_bus_error; -}; - -struct xgmac_priv { - struct xgmac_dma_desc *dma_rx; - struct sk_buff **rx_skbuff; - unsigned int rx_tail; - unsigned int rx_head; - - struct xgmac_dma_desc *dma_tx; - struct sk_buff **tx_skbuff; - unsigned int tx_head; - unsigned int tx_tail; - - void __iomem *base; - struct sk_buff_head rx_recycle; - unsigned int dma_buf_sz; - dma_addr_t dma_rx_phy; - dma_addr_t dma_tx_phy; - - struct net_device *dev; - struct device *device; - struct napi_struct napi; - - struct xgmac_extra_stats xstats; - - spinlock_t stats_lock; - int pmt_irq; - char rx_pause; - char tx_pause; - int wolopts; -}; - -/* XGMAC Configuration Settings */ -#define MAX_MTU 9000 -#define PAUSE_TIME 0x400 - -#define DMA_RX_RING_SZ 256 -#define DMA_TX_RING_SZ 128 -/* minimum number of free TX descriptors required to wake up TX process */ -#define TX_THRESH (DMA_TX_RING_SZ/4) - -/* DMA descriptor ring helpers */ -#define dma_ring_incr(n, s) (((n) + 1) & ((s) - 1)) -#define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s) -#define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s) - -/* XGMAC Descriptor Access Helpers */ -static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz) -{ - if (buf_sz > MAX_DESC_BUF_SZ) - p->buf_size = cpu_to_le32(MAX_DESC_BUF_SZ | - (buf_sz - MAX_DESC_BUF_SZ) << DESC_BUFFER2_SZ_OFFSET); - else - p->buf_size = cpu_to_le32(buf_sz); -} - -static inline int desc_get_buf_len(struct xgmac_dma_desc *p) -{ - u32 len = cpu_to_le32(p->flags); - return (len & DESC_BUFFER1_SZ_MASK) + - ((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET); -} - -static inline void desc_init_rx_desc(struct xgmac_dma_desc *p, int ring_size, - int buf_sz) -{ - struct xgmac_dma_desc *end = p + ring_size - 1; - - memset(p, 0, sizeof(*p) * ring_size); - - for (; p <= end; p++) - desc_set_buf_len(p, buf_sz); - - end->buf_size |= cpu_to_le32(RXDESC1_END_RING); -} - -static inline void desc_init_tx_desc(struct xgmac_dma_desc *p, u32 ring_size) -{ - memset(p, 0, sizeof(*p) * ring_size); - p[ring_size - 1].flags = cpu_to_le32(TXDESC_END_RING); -} - -static inline int desc_get_owner(struct xgmac_dma_desc *p) -{ - return le32_to_cpu(p->flags) & DESC_OWN; -} - -static inline void desc_set_rx_owner(struct xgmac_dma_desc *p) -{ - /* Clear all fields and set the owner */ - p->flags = cpu_to_le32(DESC_OWN); -} - -static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags) -{ - u32 tmpflags = le32_to_cpu(p->flags); - tmpflags &= TXDESC_END_RING; - tmpflags |= flags | DESC_OWN; - p->flags = cpu_to_le32(tmpflags); -} - -static inline int desc_get_tx_ls(struct xgmac_dma_desc *p) -{ - return le32_to_cpu(p->flags) & TXDESC_LAST_SEG; -} - -static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p) -{ - return le32_to_cpu(p->buf1_addr); -} - -static inline void desc_set_buf_addr(struct xgmac_dma_desc *p, - u32 paddr, int len) -{ - p->buf1_addr = cpu_to_le32(paddr); - if (len > MAX_DESC_BUF_SZ) - p->buf2_addr = cpu_to_le32(paddr + MAX_DESC_BUF_SZ); -} - -static inline void desc_set_buf_addr_and_size(struct xgmac_dma_desc *p, - u32 paddr, int len) -{ - desc_set_buf_len(p, len); - desc_set_buf_addr(p, paddr, len); -} - -static inline int desc_get_rx_frame_len(struct xgmac_dma_desc *p) -{ - u32 data = le32_to_cpu(p->flags); - u32 len = (data & RXDESC_FRAME_LEN_MASK) >> RXDESC_FRAME_LEN_OFFSET; - if (data & RXDESC_FRAME_TYPE) - len -= ETH_FCS_LEN; - - return len; -} - -static void xgmac_dma_flush_tx_fifo(void __iomem *ioaddr) -{ - int timeout = 1000; - u32 reg = readl(ioaddr + XGMAC_OMR); - writel(reg | XGMAC_OMR_FTF, ioaddr + XGMAC_OMR); - - while ((timeout-- > 0) && readl(ioaddr + XGMAC_OMR) & XGMAC_OMR_FTF) - udelay(1); -} - -static int desc_get_tx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p) -{ - struct xgmac_extra_stats *x = &priv->xstats; - u32 status = le32_to_cpu(p->flags); - - if (!(status & TXDESC_ERROR_SUMMARY)) - return 0; - - netdev_dbg(priv->dev, "tx desc error = 0x%08x\n", status); - if (status & TXDESC_JABBER_TIMEOUT) - x->tx_jabber++; - if (status & TXDESC_FRAME_FLUSHED) - x->tx_frame_flushed++; - if (status & TXDESC_UNDERFLOW_ERR) - xgmac_dma_flush_tx_fifo(priv->base); - if (status & TXDESC_IP_HEADER_ERR) - x->tx_ip_header_error++; - if (status & TXDESC_LOCAL_FAULT) - x->tx_local_fault++; - if (status & TXDESC_REMOTE_FAULT) - x->tx_remote_fault++; - if (status & TXDESC_PAYLOAD_CSUM_ERR) - x->tx_payload_error++; - - return -1; -} - -static int desc_get_rx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p) -{ - struct xgmac_extra_stats *x = &priv->xstats; - int ret = CHECKSUM_UNNECESSARY; - u32 status = le32_to_cpu(p->flags); - u32 ext_status = le32_to_cpu(p->ext_status); - - if (status & RXDESC_DA_FILTER_FAIL) { - netdev_dbg(priv->dev, "XGMAC RX : Dest Address filter fail\n"); - x->rx_da_filter_fail++; - return -1; - } - - /* Check if packet has checksum already */ - if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) && - !(ext_status & RXDESC_IP_PAYLOAD_MASK)) - ret = CHECKSUM_NONE; - - netdev_dbg(priv->dev, "rx status - frame type=%d, csum = %d, ext stat %08x\n", - (status & RXDESC_FRAME_TYPE) ? 1 : 0, ret, ext_status); - - if (!(status & RXDESC_ERROR_SUMMARY)) - return ret; - - /* Handle any errors */ - if (status & (RXDESC_DESCRIPTOR_ERR | RXDESC_OVERFLOW_ERR | - RXDESC_GIANT_FRAME | RXDESC_LENGTH_ERR | RXDESC_CRC_ERR)) - return -1; - - if (status & RXDESC_EXT_STATUS) { - if (ext_status & RXDESC_IP_HEADER_ERR) - x->rx_ip_header_error++; - if (ext_status & RXDESC_IP_PAYLOAD_ERR) - x->rx_payload_error++; - netdev_dbg(priv->dev, "IP checksum error - stat %08x\n", - ext_status); - return CHECKSUM_NONE; - } - - return ret; -} - -static inline void xgmac_mac_enable(void __iomem *ioaddr) -{ - u32 value = readl(ioaddr + XGMAC_CONTROL); - value |= MAC_ENABLE_RX | MAC_ENABLE_TX; - writel(value, ioaddr + XGMAC_CONTROL); - - value = readl(ioaddr + XGMAC_DMA_CONTROL); - value |= DMA_CONTROL_ST | DMA_CONTROL_SR; - writel(value, ioaddr + XGMAC_DMA_CONTROL); -} - -static inline void xgmac_mac_disable(void __iomem *ioaddr) -{ - u32 value = readl(ioaddr + XGMAC_DMA_CONTROL); - value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR); - writel(value, ioaddr + XGMAC_DMA_CONTROL); - - value = readl(ioaddr + XGMAC_CONTROL); - value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX); - writel(value, ioaddr + XGMAC_CONTROL); -} - -static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr, - int num) -{ - u32 data; - - data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0); - writel(data, ioaddr + XGMAC_ADDR_HIGH(num)); - data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; - writel(data, ioaddr + XGMAC_ADDR_LOW(num)); -} - -static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, - int num) -{ - u32 hi_addr, lo_addr; - - /* Read the MAC address from the hardware */ - hi_addr = readl(ioaddr + XGMAC_ADDR_HIGH(num)); - lo_addr = readl(ioaddr + XGMAC_ADDR_LOW(num)); - - /* Extract the MAC address from the high and low words */ - addr[0] = lo_addr & 0xff; - addr[1] = (lo_addr >> 8) & 0xff; - addr[2] = (lo_addr >> 16) & 0xff; - addr[3] = (lo_addr >> 24) & 0xff; - addr[4] = hi_addr & 0xff; - addr[5] = (hi_addr >> 8) & 0xff; -} - -static int xgmac_set_flow_ctrl(struct xgmac_priv *priv, int rx, int tx) -{ - u32 reg; - unsigned int flow = 0; - - priv->rx_pause = rx; - priv->tx_pause = tx; - - if (rx || tx) { - if (rx) - flow |= XGMAC_FLOW_CTRL_RFE; - if (tx) - flow |= XGMAC_FLOW_CTRL_TFE; - - flow |= XGMAC_FLOW_CTRL_PLT | XGMAC_FLOW_CTRL_UP; - flow |= (PAUSE_TIME << XGMAC_FLOW_CTRL_PT_SHIFT); - - writel(flow, priv->base + XGMAC_FLOW_CTRL); - - reg = readl(priv->base + XGMAC_OMR); - reg |= XGMAC_OMR_EFC; - writel(reg, priv->base + XGMAC_OMR); - } else { - writel(0, priv->base + XGMAC_FLOW_CTRL); - - reg = readl(priv->base + XGMAC_OMR); - reg &= ~XGMAC_OMR_EFC; - writel(reg, priv->base + XGMAC_OMR); - } - - return 0; -} - -static void xgmac_rx_refill(struct xgmac_priv *priv) -{ - struct xgmac_dma_desc *p; - dma_addr_t paddr; - - while (dma_ring_space(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ) > 1) { - int entry = priv->rx_head; - struct sk_buff *skb; - - p = priv->dma_rx + entry; - - if (priv->rx_skbuff[entry] != NULL) - continue; - - skb = __skb_dequeue(&priv->rx_recycle); - if (skb == NULL) - skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz); - if (unlikely(skb == NULL)) - break; - - priv->rx_skbuff[entry] = skb; - paddr = dma_map_single(priv->device, skb->data, - priv->dma_buf_sz, DMA_FROM_DEVICE); - desc_set_buf_addr(p, paddr, priv->dma_buf_sz); - - netdev_dbg(priv->dev, "rx ring: head %d, tail %d\n", - priv->rx_head, priv->rx_tail); - - priv->rx_head = dma_ring_incr(priv->rx_head, DMA_RX_RING_SZ); - /* Ensure descriptor is in memory before handing to h/w */ - wmb(); - desc_set_rx_owner(p); - } -} - -/** - * init_xgmac_dma_desc_rings - init the RX/TX descriptor rings - * @dev: net device structure - * Description: this function initializes the DMA RX/TX descriptors - * and allocates the socket buffers. - */ -static int xgmac_dma_desc_rings_init(struct net_device *dev) -{ - struct xgmac_priv *priv = netdev_priv(dev); - unsigned int bfsize; - - /* Set the Buffer size according to the MTU; - * indeed, in case of jumbo we need to bump-up the buffer sizes. - */ - bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN + 64, - 64); - - netdev_dbg(priv->dev, "mtu [%d] bfsize [%d]\n", dev->mtu, bfsize); - - priv->rx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_RX_RING_SZ, - GFP_KERNEL); - if (!priv->rx_skbuff) - return -ENOMEM; - - priv->dma_rx = dma_alloc_coherent(priv->device, - DMA_RX_RING_SZ * - sizeof(struct xgmac_dma_desc), - &priv->dma_rx_phy, - GFP_KERNEL); - if (!priv->dma_rx) - goto err_dma_rx; - - priv->tx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_TX_RING_SZ, - GFP_KERNEL); - if (!priv->tx_skbuff) - goto err_tx_skb; - - priv->dma_tx = dma_alloc_coherent(priv->device, - DMA_TX_RING_SZ * - sizeof(struct xgmac_dma_desc), - &priv->dma_tx_phy, - GFP_KERNEL); - if (!priv->dma_tx) - goto err_dma_tx; - - netdev_dbg(priv->dev, "DMA desc rings: virt addr (Rx %p, " - "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n", - priv->dma_rx, priv->dma_tx, - (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy); - - priv->rx_tail = 0; - priv->rx_head = 0; - priv->dma_buf_sz = bfsize; - desc_init_rx_desc(priv->dma_rx, DMA_RX_RING_SZ, priv->dma_buf_sz); - xgmac_rx_refill(priv); - - priv->tx_tail = 0; - priv->tx_head = 0; - desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ); - - writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR); - writel(priv->dma_rx_phy, priv->base + XGMAC_DMA_RX_BASE_ADDR); - - return 0; - -err_dma_tx: - kfree(priv->tx_skbuff); -err_tx_skb: - dma_free_coherent(priv->device, - DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc), - priv->dma_rx, priv->dma_rx_phy); -err_dma_rx: - kfree(priv->rx_skbuff); - return -ENOMEM; -} - -static void xgmac_free_rx_skbufs(struct xgmac_priv *priv) -{ - int i; - struct xgmac_dma_desc *p; - - if (!priv->rx_skbuff) - return; - - for (i = 0; i < DMA_RX_RING_SZ; i++) { - if (priv->rx_skbuff[i] == NULL) - continue; - - p = priv->dma_rx + i; - dma_unmap_single(priv->device, desc_get_buf_addr(p), - priv->dma_buf_sz, DMA_FROM_DEVICE); - dev_kfree_skb_any(priv->rx_skbuff[i]); - priv->rx_skbuff[i] = NULL; - } -} - -static void xgmac_free_tx_skbufs(struct xgmac_priv *priv) -{ - int i, f; - struct xgmac_dma_desc *p; - - if (!priv->tx_skbuff) - return; - - for (i = 0; i < DMA_TX_RING_SZ; i++) { - if (priv->tx_skbuff[i] == NULL) - continue; - - p = priv->dma_tx + i; - dma_unmap_single(priv->device, desc_get_buf_addr(p), - desc_get_buf_len(p), DMA_TO_DEVICE); - - for (f = 0; f < skb_shinfo(priv->tx_skbuff[i])->nr_frags; f++) { - p = priv->dma_tx + i++; - dma_unmap_page(priv->device, desc_get_buf_addr(p), - desc_get_buf_len(p), DMA_TO_DEVICE); - } - - dev_kfree_skb_any(priv->tx_skbuff[i]); - priv->tx_skbuff[i] = NULL; - } -} - -static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv) -{ - /* Release the DMA TX/RX socket buffers */ - xgmac_free_rx_skbufs(priv); - xgmac_free_tx_skbufs(priv); - - /* Free the consistent memory allocated for descriptor rings */ - if (priv->dma_tx) { - dma_free_coherent(priv->device, - DMA_TX_RING_SZ * sizeof(struct xgmac_dma_desc), - priv->dma_tx, priv->dma_tx_phy); - priv->dma_tx = NULL; - } - if (priv->dma_rx) { - dma_free_coherent(priv->device, - DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc), - priv->dma_rx, priv->dma_rx_phy); - priv->dma_rx = NULL; - } - kfree(priv->rx_skbuff); - priv->rx_skbuff = NULL; - kfree(priv->tx_skbuff); - priv->tx_skbuff = NULL; -} - -/** - * xgmac_tx: - * @priv: private driver structure - * Description: it reclaims resources after transmission completes. - */ -static void xgmac_tx_complete(struct xgmac_priv *priv) -{ - int i; - void __iomem *ioaddr = priv->base; - - writel(DMA_STATUS_TU | DMA_STATUS_NIS, ioaddr + XGMAC_DMA_STATUS); - - while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) { - unsigned int entry = priv->tx_tail; - struct sk_buff *skb = priv->tx_skbuff[entry]; - struct xgmac_dma_desc *p = priv->dma_tx + entry; - - /* Check if the descriptor is owned by the DMA. */ - if (desc_get_owner(p)) - break; - - /* Verify tx error by looking at the last segment */ - if (desc_get_tx_ls(p)) - desc_get_tx_status(priv, p); - - netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n", - priv->tx_head, priv->tx_tail); - - dma_unmap_single(priv->device, desc_get_buf_addr(p), - desc_get_buf_len(p), DMA_TO_DEVICE); - - priv->tx_skbuff[entry] = NULL; - priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ); - - if (!skb) { - continue; - } - - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - entry = priv->tx_tail = dma_ring_incr(priv->tx_tail, - DMA_TX_RING_SZ); - p = priv->dma_tx + priv->tx_tail; - - dma_unmap_page(priv->device, desc_get_buf_addr(p), - desc_get_buf_len(p), DMA_TO_DEVICE); - } - - /* - * If there's room in the queue (limit it to size) - * we add this skb back into the pool, - * if it's the right size. - */ - if ((skb_queue_len(&priv->rx_recycle) < - DMA_RX_RING_SZ) && - skb_recycle_check(skb, priv->dma_buf_sz)) - __skb_queue_head(&priv->rx_recycle, skb); - else - dev_kfree_skb(skb); - } - - if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) > - TX_THRESH) - netif_wake_queue(priv->dev); -} - -/** - * xgmac_tx_err: - * @priv: pointer to the private device structure - * Description: it cleans the descriptors and restarts the transmission - * in case of errors. - */ -static void xgmac_tx_err(struct xgmac_priv *priv) -{ - u32 reg, value, inten; - - netif_stop_queue(priv->dev); - - inten = readl(priv->base + XGMAC_DMA_INTR_ENA); - writel(0, priv->base + XGMAC_DMA_INTR_ENA); - - reg = readl(priv->base + XGMAC_DMA_CONTROL); - writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL); - do { - value = readl(priv->base + XGMAC_DMA_STATUS) & 0x700000; - } while (value && (value != 0x600000)); - - xgmac_free_tx_skbufs(priv); - desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ); - priv->tx_tail = 0; - priv->tx_head = 0; - writel(reg | DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL); - - writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS, - priv->base + XGMAC_DMA_STATUS); - writel(inten, priv->base + XGMAC_DMA_INTR_ENA); - - netif_wake_queue(priv->dev); -} - -static int xgmac_hw_init(struct net_device *dev) -{ - u32 value, ctrl; - int limit; - struct xgmac_priv *priv = netdev_priv(dev); - void __iomem *ioaddr = priv->base; - - /* Save the ctrl register value */ - ctrl = readl(ioaddr + XGMAC_CONTROL) & XGMAC_CONTROL_SPD_MASK; - - /* SW reset */ - value = DMA_BUS_MODE_SFT_RESET; - writel(value, ioaddr + XGMAC_DMA_BUS_MODE); - limit = 15000; - while (limit-- && - (readl(ioaddr + XGMAC_DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)) - cpu_relax(); - if (limit < 0) - return -EBUSY; - - value = (0x10 << DMA_BUS_MODE_PBL_SHIFT) | - (0x10 << DMA_BUS_MODE_RPBL_SHIFT) | - DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL; - writel(value, ioaddr + XGMAC_DMA_BUS_MODE); - - /* Enable interrupts */ - writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS); - writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); - - /* XGMAC requires AXI bus init. This is a 'magic number' for now */ - writel(0x000100E, ioaddr + XGMAC_DMA_AXI_BUS); - - ctrl |= XGMAC_CONTROL_DDIC | XGMAC_CONTROL_JE | XGMAC_CONTROL_ACS | - XGMAC_CONTROL_CAR; - if (dev->features & NETIF_F_RXCSUM) - ctrl |= XGMAC_CONTROL_IPC; - writel(ctrl, ioaddr + XGMAC_CONTROL); - - value = DMA_CONTROL_DFF; - writel(value, ioaddr + XGMAC_DMA_CONTROL); - - /* Set the HW DMA mode and the COE */ - writel(XGMAC_OMR_TSF | XGMAC_OMR_RSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA, - ioaddr + XGMAC_OMR); - - /* Reset the MMC counters */ - writel(1, ioaddr + XGMAC_MMC_CTRL); - return 0; -} - -/** - * xgmac_open - open entry point of the driver - * @dev : pointer to the device structure. - * Description: - * This function is the open entry point of the driver. - * Return value: - * 0 on success and an appropriate (-)ve integer as defined in errno.h - * file on failure. - */ -static int xgmac_open(struct net_device *dev) -{ - int ret; - struct xgmac_priv *priv = netdev_priv(dev); - void __iomem *ioaddr = priv->base; - - /* Check that the MAC address is valid. If its not, refuse - * to bring the device up. The user must specify an - * address using the following linux command: - * ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx */ - if (!is_valid_ether_addr(dev->dev_addr)) { - random_ether_addr(dev->dev_addr); - netdev_dbg(priv->dev, "generated random MAC address %pM\n", - dev->dev_addr); - } - - skb_queue_head_init(&priv->rx_recycle); - memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats)); - - /* Initialize the XGMAC and descriptors */ - xgmac_hw_init(dev); - xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0); - xgmac_set_flow_ctrl(priv, priv->rx_pause, priv->tx_pause); - - ret = xgmac_dma_desc_rings_init(dev); - if (ret < 0) - return ret; - - /* Enable the MAC Rx/Tx */ - xgmac_mac_enable(ioaddr); - - napi_enable(&priv->napi); - netif_start_queue(dev); - - return 0; -} - -/** - * xgmac_release - close entry point of the driver - * @dev : device pointer. - * Description: - * This is the stop entry point of the driver. - */ -static int xgmac_stop(struct net_device *dev) -{ - struct xgmac_priv *priv = netdev_priv(dev); - - netif_stop_queue(dev); - - if (readl(priv->base + XGMAC_DMA_INTR_ENA)) - napi_disable(&priv->napi); - - writel(0, priv->base + XGMAC_DMA_INTR_ENA); - skb_queue_purge(&priv->rx_recycle); - - /* Disable the MAC core */ - xgmac_mac_disable(priv->base); - - /* Release and free the Rx/Tx resources */ - xgmac_free_dma_desc_rings(priv); - - return 0; -} - -/** - * xgmac_xmit: - * @skb : the socket buffer - * @dev : device pointer - * Description : Tx entry point of the driver. - */ -static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct xgmac_priv *priv = netdev_priv(dev); - unsigned int entry; - int i; - int nfrags = skb_shinfo(skb)->nr_frags; - struct xgmac_dma_desc *desc, *first; - unsigned int desc_flags; - unsigned int len; - dma_addr_t paddr; - - if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) < - (nfrags + 1)) { - writel(DMA_INTR_DEFAULT_MASK | DMA_INTR_ENA_TIE, - priv->base + XGMAC_DMA_INTR_ENA); - netif_stop_queue(dev); - return NETDEV_TX_BUSY; - } - - desc_flags = (skb->ip_summed == CHECKSUM_PARTIAL) ? - TXDESC_CSUM_ALL : 0; - entry = priv->tx_head; - desc = priv->dma_tx + entry; - first = desc; - - len = skb_headlen(skb); - paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE); - if (dma_mapping_error(priv->device, paddr)) { - dev_kfree_skb(skb); - return -EIO; - } - priv->tx_skbuff[entry] = skb; - desc_set_buf_addr_and_size(desc, paddr, len); - - for (i = 0; i < nfrags; i++) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - - len = frag->size; - - paddr = skb_frag_dma_map(priv->device, frag, 0, len, - DMA_TO_DEVICE); - if (dma_mapping_error(priv->device, paddr)) { - dev_kfree_skb(skb); - return -EIO; - } - - entry = dma_ring_incr(entry, DMA_TX_RING_SZ); - desc = priv->dma_tx + entry; - priv->tx_skbuff[entry] = NULL; - - desc_set_buf_addr_and_size(desc, paddr, len); - if (i < (nfrags - 1)) - desc_set_tx_owner(desc, desc_flags); - } - - /* Interrupt on completition only for the latest segment */ - if (desc != first) - desc_set_tx_owner(desc, desc_flags | - TXDESC_LAST_SEG | TXDESC_INTERRUPT); - else - desc_flags |= TXDESC_LAST_SEG | TXDESC_INTERRUPT; - - /* Set owner on first desc last to avoid race condition */ - wmb(); - desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG); - - priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ); - - writel(1, priv->base + XGMAC_DMA_TX_POLL); - - return NETDEV_TX_OK; -} - -static int xgmac_rx(struct xgmac_priv *priv, int limit) -{ - unsigned int entry; - unsigned int count = 0; - struct xgmac_dma_desc *p; - - while (count < limit) { - int ip_checksum; - struct sk_buff *skb; - int frame_len; - - writel(DMA_STATUS_RI | DMA_STATUS_NIS, - priv->base + XGMAC_DMA_STATUS); - - entry = priv->rx_tail; - p = priv->dma_rx + entry; - if (desc_get_owner(p)) - break; - - count++; - priv->rx_tail = dma_ring_incr(priv->rx_tail, DMA_RX_RING_SZ); - - /* read the status of the incoming frame */ - ip_checksum = desc_get_rx_status(priv, p); - if (ip_checksum < 0) - continue; - - skb = priv->rx_skbuff[entry]; - if (unlikely(!skb)) { - netdev_err(priv->dev, "Inconsistent Rx descriptor chain\n"); - break; - } - priv->rx_skbuff[entry] = NULL; - - frame_len = desc_get_rx_frame_len(p); - netdev_dbg(priv->dev, "RX frame size %d, COE status: %d\n", - frame_len, ip_checksum); - - skb_put(skb, frame_len); - dma_unmap_single(priv->device, desc_get_buf_addr(p), - frame_len, DMA_FROM_DEVICE); - - skb->protocol = eth_type_trans(skb, priv->dev); - skb->ip_summed = ip_checksum; - if (ip_checksum == CHECKSUM_NONE) - netif_receive_skb(skb); - else - napi_gro_receive(&priv->napi, skb); - } - - xgmac_rx_refill(priv); - - writel(1, priv->base + XGMAC_DMA_RX_POLL); - - return count; -} - -/** - * xgmac_poll - xgmac poll method (NAPI) - * @napi : pointer to the napi structure. - * @budget : maximum number of packets that the current CPU can receive from - * all interfaces. - * Description : - * This function implements the the reception process. - * Also it runs the TX completion thread - */ -static int xgmac_poll(struct napi_struct *napi, int budget) -{ - struct xgmac_priv *priv = container_of(napi, - struct xgmac_priv, napi); - int work_done = 0; - - xgmac_tx_complete(priv); - work_done = xgmac_rx(priv, budget); - - if (work_done < budget) { - napi_complete(napi); - writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA); - } - return work_done; -} - -/** - * xgmac_tx_timeout - * @dev : Pointer to net device structure - * Description: this function is called when a packet transmission fails to - * complete within a reasonable tmrate. The driver will mark the error in the - * netdev structure and arrange for the device to be reset to a sane state - * in order to transmit a new packet. - */ -static void xgmac_tx_timeout(struct net_device *dev) -{ - struct xgmac_priv *priv = netdev_priv(dev); - - /* Clear Tx resources and restart transmitting again */ - xgmac_tx_err(priv); -} - -/** - * xgmac_set_rx_mode - entry point for multicast addressing - * @dev : pointer to the device structure - * Description: - * This function is a driver entry point which gets called by the kernel - * whenever multicast addresses must be enabled/disabled. - * Return value: - * void. - */ -static void xgmac_set_rx_mode(struct net_device *dev) -{ - int i; - struct xgmac_priv *priv = netdev_priv(dev); - void __iomem *ioaddr = priv->base; - unsigned int value = 0; - u32 hash_filter[XGMAC_NUM_HASH]; - int reg = 1; - struct netdev_hw_addr *ha; - bool use_hash = false; - - netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n", - netdev_mc_count(dev), netdev_uc_count(dev)); - - if (dev->flags & IFF_PROMISC) { - writel(XGMAC_FRAME_FILTER_PR, ioaddr + XGMAC_FRAME_FILTER); - return; - } - - memset(hash_filter, 0, sizeof(hash_filter)); - - if (netdev_uc_count(dev) > XGMAC_MAX_FILTER_ADDR) { - use_hash = true; - value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF; - } - netdev_for_each_uc_addr(ha, dev) { - if (use_hash) { - u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23; - - /* The most significant 4 bits determine the register to - * use (H/L) while the other 5 bits determine the bit - * within the register. */ - hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); - } else { - xgmac_set_mac_addr(ioaddr, ha->addr, reg); - reg++; - } - } - - if (dev->flags & IFF_ALLMULTI) { - value |= XGMAC_FRAME_FILTER_PM; - goto out; - } - - if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) { - use_hash = true; - value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF; - } - netdev_for_each_mc_addr(ha, dev) { - if (use_hash) { - u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23; - - /* The most significant 4 bits determine the register to - * use (H/L) while the other 5 bits determine the bit - * within the register. */ - hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); - } else { - xgmac_set_mac_addr(ioaddr, ha->addr, reg); - reg++; - } - } - -out: - for (i = 0; i < XGMAC_NUM_HASH; i++) - writel(hash_filter[i], ioaddr + XGMAC_HASH(i)); - - writel(value, ioaddr + XGMAC_FRAME_FILTER); -} - -/** - * xgmac_change_mtu - entry point to change MTU size for the device. - * @dev : device pointer. - * @new_mtu : the new MTU size for the device. - * Description: the Maximum Transfer Unit (MTU) is used by the network layer - * to drive packet transmission. Ethernet has an MTU of 1500 octets - * (ETH_DATA_LEN). This value can be changed with ifconfig. - * Return value: - * 0 on success and an appropriate (-)ve integer as defined in errno.h - * file on failure. - */ -static int xgmac_change_mtu(struct net_device *dev, int new_mtu) -{ - struct xgmac_priv *priv = netdev_priv(dev); - int old_mtu; - - if ((new_mtu < 46) || (new_mtu > MAX_MTU)) { - netdev_err(priv->dev, "invalid MTU, max MTU is: %d\n", MAX_MTU); - return -EINVAL; - } - - old_mtu = dev->mtu; - dev->mtu = new_mtu; - - /* return early if the buffer sizes will not change */ - if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN) - return 0; - if (old_mtu == new_mtu) - return 0; - - /* Stop everything, get ready to change the MTU */ - if (!netif_running(dev)) - return 0; - - /* Bring the interface down and then back up */ - xgmac_stop(dev); - return xgmac_open(dev); -} - -static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id) -{ - u32 intr_status; - struct net_device *dev = (struct net_device *)dev_id; - struct xgmac_priv *priv = netdev_priv(dev); - void __iomem *ioaddr = priv->base; - - intr_status = readl(ioaddr + XGMAC_INT_STAT); - if (intr_status & XGMAC_INT_STAT_PMT) { - netdev_dbg(priv->dev, "received Magic frame\n"); - /* clear the PMT bits 5 and 6 by reading the PMT */ - readl(ioaddr + XGMAC_PMT); - } - return IRQ_HANDLED; -} - -static irqreturn_t xgmac_interrupt(int irq, void *dev_id) -{ - u32 intr_status; - bool tx_err = false; - struct net_device *dev = (struct net_device *)dev_id; - struct xgmac_priv *priv = netdev_priv(dev); - struct xgmac_extra_stats *x = &priv->xstats; - - /* read the status register (CSR5) */ - intr_status = readl(priv->base + XGMAC_DMA_STATUS); - intr_status &= readl(priv->base + XGMAC_DMA_INTR_ENA); - writel(intr_status, priv->base + XGMAC_DMA_STATUS); - - /* It displays the DMA process states (CSR5 register) */ - /* ABNORMAL interrupts */ - if (unlikely(intr_status & DMA_STATUS_AIS)) { - if (intr_status & DMA_STATUS_TJT) { - netdev_err(priv->dev, "transmit jabber\n"); - x->tx_jabber++; - } - if (intr_status & DMA_STATUS_RU) - x->rx_buf_unav++; - if (intr_status & DMA_STATUS_RPS) { - netdev_err(priv->dev, "receive process stopped\n"); - x->rx_process_stopped++; - } - if (intr_status & DMA_STATUS_ETI) { - netdev_err(priv->dev, "transmit early interrupt\n"); - x->tx_early++; - } - if (intr_status & DMA_STATUS_TPS) { - netdev_err(priv->dev, "transmit process stopped\n"); - x->tx_process_stopped++; - tx_err = true; - } - if (intr_status & DMA_STATUS_FBI) { - netdev_err(priv->dev, "fatal bus error\n"); - x->fatal_bus_error++; - tx_err = true; - } - - if (tx_err) - xgmac_tx_err(priv); - } - - /* TX/RX NORMAL interrupts */ - if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU)) { - writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA); - napi_schedule(&priv->napi); - } - - return IRQ_HANDLED; -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -/* Polling receive - used by NETCONSOLE and other diagnostic tools - * to allow network I/O with interrupts disabled. */ -static void xgmac_poll_controller(struct net_device *dev) -{ - disable_irq(dev->irq); - xgmac_interrupt(dev->irq, dev); - enable_irq(dev->irq); -} -#endif - -static struct rtnl_link_stats64 * -xgmac_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *storage) -{ - struct xgmac_priv *priv = netdev_priv(dev); - void __iomem *base = priv->base; - u32 count; - - spin_lock_bh(&priv->stats_lock); - writel(XGMAC_MMC_CTRL_CNT_FRZ, base + XGMAC_MMC_CTRL); - - storage->rx_bytes = readl(base + XGMAC_MMC_RXOCTET_G_LO); - storage->rx_bytes |= (u64)(readl(base + XGMAC_MMC_RXOCTET_G_HI)) << 32; - - storage->rx_packets = readl(base + XGMAC_MMC_RXFRAME_GB_LO); - storage->multicast = readl(base + XGMAC_MMC_RXMCFRAME_G); - storage->rx_crc_errors = readl(base + XGMAC_MMC_RXCRCERR); - storage->rx_length_errors = readl(base + XGMAC_MMC_RXLENGTHERR); - storage->rx_missed_errors = readl(base + XGMAC_MMC_RXOVERFLOW); - - storage->tx_bytes = readl(base + XGMAC_MMC_TXOCTET_G_LO); - storage->tx_bytes |= (u64)(readl(base + XGMAC_MMC_TXOCTET_G_HI)) << 32; - - count = readl(base + XGMAC_MMC_TXFRAME_GB_LO); - storage->tx_errors = count - readl(base + XGMAC_MMC_TXFRAME_G_LO); - storage->tx_packets = count; - storage->tx_fifo_errors = readl(base + XGMAC_MMC_TXUNDERFLOW); - - writel(0, base + XGMAC_MMC_CTRL); - spin_unlock_bh(&priv->stats_lock); - return storage; -} - -static int xgmac_set_mac_address(struct net_device *dev, void *p) -{ - struct xgmac_priv *priv = netdev_priv(dev); - void __iomem *ioaddr = priv->base; - struct sockaddr *addr = p; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; - - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); - - xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0); - - return 0; -} - -static int xgmac_set_features(struct net_device *dev, netdev_features_t features) -{ - u32 ctrl; - struct xgmac_priv *priv = netdev_priv(dev); - void __iomem *ioaddr = priv->base; - u32 changed = dev->features ^ features; - - if (!(changed & NETIF_F_RXCSUM)) - return 0; - - ctrl = readl(ioaddr + XGMAC_CONTROL); - if (features & NETIF_F_RXCSUM) - ctrl |= XGMAC_CONTROL_IPC; - else - ctrl &= ~XGMAC_CONTROL_IPC; - writel(ctrl, ioaddr + XGMAC_CONTROL); - - return 0; -} - -static const struct net_device_ops xgmac_netdev_ops = { - .ndo_open = xgmac_open, - .ndo_start_xmit = xgmac_xmit, - .ndo_stop = xgmac_stop, - .ndo_change_mtu = xgmac_change_mtu, - .ndo_set_rx_mode = xgmac_set_rx_mode, - .ndo_tx_timeout = xgmac_tx_timeout, - .ndo_get_stats64 = xgmac_get_stats64, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = xgmac_poll_controller, -#endif - .ndo_set_mac_address = xgmac_set_mac_address, - .ndo_set_features = xgmac_set_features, -}; - -static int xgmac_ethtool_getsettings(struct net_device *dev, - struct ethtool_cmd *cmd) -{ - cmd->autoneg = 0; - cmd->duplex = DUPLEX_FULL; - ethtool_cmd_speed_set(cmd, 10000); - cmd->supported = 0; - cmd->advertising = 0; - cmd->transceiver = XCVR_INTERNAL; - return 0; -} - -static void xgmac_get_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) -{ - struct xgmac_priv *priv = netdev_priv(netdev); - - pause->rx_pause = priv->rx_pause; - pause->tx_pause = priv->tx_pause; -} - -static int xgmac_set_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) -{ - struct xgmac_priv *priv = netdev_priv(netdev); - - if (pause->autoneg) - return -EINVAL; - - return xgmac_set_flow_ctrl(priv, pause->rx_pause, pause->tx_pause); -} - -struct xgmac_stats { - char stat_string[ETH_GSTRING_LEN]; - int stat_offset; - bool is_reg; -}; - -#define XGMAC_STAT(m) \ - { #m, offsetof(struct xgmac_priv, xstats.m), false } -#define XGMAC_HW_STAT(m, reg_offset) \ - { #m, reg_offset, true } - -static const struct xgmac_stats xgmac_gstrings_stats[] = { - XGMAC_STAT(tx_frame_flushed), - XGMAC_STAT(tx_payload_error), - XGMAC_STAT(tx_ip_header_error), - XGMAC_STAT(tx_local_fault), - XGMAC_STAT(tx_remote_fault), - XGMAC_STAT(tx_early), - XGMAC_STAT(tx_process_stopped), - XGMAC_STAT(tx_jabber), - XGMAC_STAT(rx_buf_unav), - XGMAC_STAT(rx_process_stopped), - XGMAC_STAT(rx_payload_error), - XGMAC_STAT(rx_ip_header_error), - XGMAC_STAT(rx_da_filter_fail), - XGMAC_STAT(rx_sa_filter_fail), - XGMAC_STAT(fatal_bus_error), - XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG), - XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME), - XGMAC_HW_STAT(rx_vlan, XGMAC_MMC_RXVLANFRAME), - XGMAC_HW_STAT(tx_pause, XGMAC_MMC_TXPAUSEFRAME), - XGMAC_HW_STAT(rx_pause, XGMAC_MMC_RXPAUSEFRAME), -}; -#define XGMAC_STATS_LEN ARRAY_SIZE(xgmac_gstrings_stats) - -static void xgmac_get_ethtool_stats(struct net_device *dev, - struct ethtool_stats *dummy, - u64 *data) -{ - struct xgmac_priv *priv = netdev_priv(dev); - void *p = priv; - int i; - - for (i = 0; i < XGMAC_STATS_LEN; i++) { - if (xgmac_gstrings_stats[i].is_reg) - *data++ = readl(priv->base + - xgmac_gstrings_stats[i].stat_offset); - else - *data++ = *(u32 *)(p + - xgmac_gstrings_stats[i].stat_offset); - } -} - -static int xgmac_get_sset_count(struct net_device *netdev, int sset) -{ - switch (sset) { - case ETH_SS_STATS: - return XGMAC_STATS_LEN; - default: - return -EINVAL; - } -} - -static void xgmac_get_strings(struct net_device *dev, u32 stringset, - u8 *data) -{ - int i; - u8 *p = data; - - switch (stringset) { - case ETH_SS_STATS: - for (i = 0; i < XGMAC_STATS_LEN; i++) { - memcpy(p, xgmac_gstrings_stats[i].stat_string, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } - break; - default: - WARN_ON(1); - break; - } -} - -static void xgmac_get_wol(struct net_device *dev, - struct ethtool_wolinfo *wol) -{ - struct xgmac_priv *priv = netdev_priv(dev); - - if (device_can_wakeup(priv->device)) { - wol->supported = WAKE_MAGIC | WAKE_UCAST; - wol->wolopts = priv->wolopts; - } -} - -static int xgmac_set_wol(struct net_device *dev, - struct ethtool_wolinfo *wol) -{ - struct xgmac_priv *priv = netdev_priv(dev); - u32 support = WAKE_MAGIC | WAKE_UCAST; - - if (!device_can_wakeup(priv->device)) - return -ENOTSUPP; - - if (wol->wolopts & ~support) - return -EINVAL; - - priv->wolopts = wol->wolopts; - - if (wol->wolopts) { - device_set_wakeup_enable(priv->device, 1); - enable_irq_wake(dev->irq); - } else { - device_set_wakeup_enable(priv->device, 0); - disable_irq_wake(dev->irq); - } - - return 0; -} - -static const struct ethtool_ops xgmac_ethtool_ops = { - .get_settings = xgmac_ethtool_getsettings, - .get_link = ethtool_op_get_link, - .get_pauseparam = xgmac_get_pauseparam, - .set_pauseparam = xgmac_set_pauseparam, - .get_ethtool_stats = xgmac_get_ethtool_stats, - .get_strings = xgmac_get_strings, - .get_wol = xgmac_get_wol, - .set_wol = xgmac_set_wol, - .get_sset_count = xgmac_get_sset_count, -}; - -/** - * xgmac_probe - * @pdev: platform device pointer - * Description: the driver is initialized through platform_device. - */ -static int xgmac_probe(struct platform_device *pdev) -{ - int ret = 0; - struct resource *res; - struct net_device *ndev = NULL; - struct xgmac_priv *priv = NULL; - u32 uid; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) - return -ENODEV; - - if (!request_mem_region(res->start, resource_size(res), pdev->name)) - return -EBUSY; - - ndev = alloc_etherdev(sizeof(struct xgmac_priv)); - if (!ndev) { - ret = -ENOMEM; - goto err_alloc; - } - - SET_NETDEV_DEV(ndev, &pdev->dev); - priv = netdev_priv(ndev); - platform_set_drvdata(pdev, ndev); - ether_setup(ndev); - ndev->netdev_ops = &xgmac_netdev_ops; - SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops); - spin_lock_init(&priv->stats_lock); - - priv->device = &pdev->dev; - priv->dev = ndev; - priv->rx_pause = 1; - priv->tx_pause = 1; - - priv->base = ioremap(res->start, resource_size(res)); - if (!priv->base) { - netdev_err(ndev, "ioremap failed\n"); - ret = -ENOMEM; - goto err_io; - } - - uid = readl(priv->base + XGMAC_VERSION); - netdev_info(ndev, "h/w version is 0x%x\n", uid); - - writel(0, priv->base + XGMAC_DMA_INTR_ENA); - ndev->irq = platform_get_irq(pdev, 0); - if (ndev->irq == -ENXIO) { - netdev_err(ndev, "No irq resource\n"); - ret = ndev->irq; - goto err_irq; - } - - ret = request_irq(ndev->irq, xgmac_interrupt, 0, - dev_name(&pdev->dev), ndev); - if (ret < 0) { - netdev_err(ndev, "Could not request irq %d - ret %d)\n", - ndev->irq, ret); - goto err_irq; - } - - priv->pmt_irq = platform_get_irq(pdev, 1); - if (priv->pmt_irq == -ENXIO) { - netdev_err(ndev, "No pmt irq resource\n"); - ret = priv->pmt_irq; - goto err_pmt_irq; - } - - ret = request_irq(priv->pmt_irq, xgmac_pmt_interrupt, 0, - dev_name(&pdev->dev), ndev); - if (ret < 0) { - netdev_err(ndev, "Could not request irq %d - ret %d)\n", - priv->pmt_irq, ret); - goto err_pmt_irq; - } - - device_set_wakeup_capable(&pdev->dev, 1); - if (device_can_wakeup(priv->device)) - priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ - - ndev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA; - if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL) - ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_RXCSUM; - ndev->features |= ndev->hw_features; - ndev->priv_flags |= IFF_UNICAST_FLT; - - /* Get the MAC address */ - xgmac_get_mac_addr(priv->base, ndev->dev_addr, 0); - if (!is_valid_ether_addr(ndev->dev_addr)) - netdev_warn(ndev, "MAC address %pM not valid", - ndev->dev_addr); - - netif_napi_add(ndev, &priv->napi, xgmac_poll, 64); - ret = register_netdev(ndev); - if (ret) - goto err_reg; - - return 0; - -err_reg: - netif_napi_del(&priv->napi); - free_irq(priv->pmt_irq, ndev); -err_pmt_irq: - free_irq(ndev->irq, ndev); -err_irq: - iounmap(priv->base); -err_io: - free_netdev(ndev); -err_alloc: - release_mem_region(res->start, resource_size(res)); - platform_set_drvdata(pdev, NULL); - return ret; -} - -/** - * xgmac_dvr_remove - * @pdev: platform device pointer - * Description: this function resets the TX/RX processes, disables the MAC RX/TX - * changes the link status, releases the DMA descriptor rings, - * unregisters the MDIO bus and unmaps the allocated memory. - */ -static int xgmac_remove(struct platform_device *pdev) -{ - struct net_device *ndev = platform_get_drvdata(pdev); - struct xgmac_priv *priv = netdev_priv(ndev); - struct resource *res; - - xgmac_mac_disable(priv->base); - - /* Free the IRQ lines */ - free_irq(ndev->irq, ndev); - free_irq(priv->pmt_irq, ndev); - - platform_set_drvdata(pdev, NULL); - unregister_netdev(ndev); - netif_napi_del(&priv->napi); - - iounmap(priv->base); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(res->start, resource_size(res)); - - free_netdev(ndev); - - return 0; -} - -#ifdef CONFIG_PM_SLEEP -static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode) -{ - unsigned int pmt = 0; - - if (mode & WAKE_MAGIC) - pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT; - if (mode & WAKE_UCAST) - pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST; - - writel(pmt, ioaddr + XGMAC_PMT); -} - -static int xgmac_suspend(struct device *dev) -{ - struct net_device *ndev = platform_get_drvdata(to_platform_device(dev)); - struct xgmac_priv *priv = netdev_priv(ndev); - u32 value; - - if (!ndev || !netif_running(ndev)) - return 0; - - netif_device_detach(ndev); - napi_disable(&priv->napi); - writel(0, priv->base + XGMAC_DMA_INTR_ENA); - - if (device_may_wakeup(priv->device)) { - /* Stop TX/RX DMA Only */ - value = readl(priv->base + XGMAC_DMA_CONTROL); - value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR); - writel(value, priv->base + XGMAC_DMA_CONTROL); - - xgmac_pmt(priv->base, priv->wolopts); - } else - xgmac_mac_disable(priv->base); - - return 0; -} - -static int xgmac_resume(struct device *dev) -{ - struct net_device *ndev = platform_get_drvdata(to_platform_device(dev)); - struct xgmac_priv *priv = netdev_priv(ndev); - void __iomem *ioaddr = priv->base; - - if (!netif_running(ndev)) - return 0; - - xgmac_pmt(ioaddr, 0); - - /* Enable the MAC and DMA */ - xgmac_mac_enable(ioaddr); - writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS); - writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); - - netif_device_attach(ndev); - napi_enable(&priv->napi); - - return 0; -} - -static SIMPLE_DEV_PM_OPS(xgmac_pm_ops, xgmac_suspend, xgmac_resume); -#define XGMAC_PM_OPS (&xgmac_pm_ops) -#else -#define XGMAC_PM_OPS NULL -#endif /* CONFIG_PM_SLEEP */ - -static const struct of_device_id xgmac_of_match[] = { - { .compatible = "calxeda,hb-xgmac", }, - {}, -}; -MODULE_DEVICE_TABLE(of, xgmac_of_match); - -static struct platform_driver xgmac_driver = { - .driver = { - .name = "calxedaxgmac", - .of_match_table = xgmac_of_match, - }, - .probe = xgmac_probe, - .remove = xgmac_remove, - .driver.pm = XGMAC_PM_OPS, -}; - -module_platform_driver(xgmac_driver); - -MODULE_AUTHOR("Calxeda, Inc."); -MODULE_DESCRIPTION("Calxeda 10G XGMAC driver"); -MODULE_LICENSE("GPL v2"); diff --git a/trunk/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/trunk/drivers/net/ethernet/chelsio/cxgb/cxgb2.c index 1d17c92f2dda..ca26d97171bd 100644 --- a/trunk/drivers/net/ethernet/chelsio/cxgb/cxgb2.c +++ b/trunk/drivers/net/ethernet/chelsio/cxgb/cxgb2.c @@ -434,10 +434,10 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct adapter *adapter = dev->ml_priv; - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, pci_name(adapter->pdev), - sizeof(info->bus_info)); + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + strcpy(info->fw_version, "N/A"); + strcpy(info->bus_info, pci_name(adapter->pdev)); } static int get_sset_count(struct net_device *dev, int sset) @@ -849,8 +849,7 @@ static int t1_set_mac_addr(struct net_device *dev, void *p) return 0; } -static netdev_features_t t1_fix_features(struct net_device *dev, - netdev_features_t features) +static u32 t1_fix_features(struct net_device *dev, u32 features) { /* * Since there is no support for separate rx/tx vlan accel @@ -864,9 +863,9 @@ static netdev_features_t t1_fix_features(struct net_device *dev, return features; } -static int t1_set_features(struct net_device *dev, netdev_features_t features) +static int t1_set_features(struct net_device *dev, u32 features) { - netdev_features_t changed = dev->features ^ features; + u32 changed = dev->features ^ features; struct adapter *adapter = dev->ml_priv; if (changed & NETIF_F_HW_VLAN_RX) diff --git a/trunk/drivers/net/ethernet/chelsio/cxgb/sge.c b/trunk/drivers/net/ethernet/chelsio/cxgb/sge.c index 47a84359d4e4..f9b602300040 100644 --- a/trunk/drivers/net/ethernet/chelsio/cxgb/sge.c +++ b/trunk/drivers/net/ethernet/chelsio/cxgb/sge.c @@ -742,7 +742,7 @@ static inline void setup_ring_params(struct adapter *adapter, u64 addr, /* * Enable/disable VLAN acceleration. */ -void t1_vlan_mode(struct adapter *adapter, netdev_features_t features) +void t1_vlan_mode(struct adapter *adapter, u32 features) { struct sge *sge = adapter->sge; diff --git a/trunk/drivers/net/ethernet/chelsio/cxgb/sge.h b/trunk/drivers/net/ethernet/chelsio/cxgb/sge.h index b9bf16b385f7..e03980bcdd65 100644 --- a/trunk/drivers/net/ethernet/chelsio/cxgb/sge.h +++ b/trunk/drivers/net/ethernet/chelsio/cxgb/sge.h @@ -79,7 +79,7 @@ irqreturn_t t1_interrupt(int irq, void *cookie); int t1_poll(struct napi_struct *, int); netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev); -void t1_vlan_mode(struct adapter *adapter, netdev_features_t features); +void t1_vlan_mode(struct adapter *adapter, u32 features); void t1_sge_start(struct sge *); void t1_sge_stop(struct sge *); int t1_sge_intr_error_handler(struct sge *); diff --git a/trunk/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/trunk/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 857cc254cab8..4d15c8f99c3b 100644 --- a/trunk/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/trunk/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -1576,11 +1576,12 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) t3_get_tp_version(adapter, &tp_vers); spin_unlock(&adapter->stats_lock); - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, pci_name(adapter->pdev), - sizeof(info->bus_info)); - if (fw_vers) + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + strcpy(info->bus_info, pci_name(adapter->pdev)); + if (!fw_vers) + strcpy(info->fw_version, "N/A"); + else { snprintf(info->fw_version, sizeof(info->fw_version), "%s %u.%u.%u TP %u.%u.%u", G_FW_VERSION_TYPE(fw_vers) ? "T" : "N", @@ -1590,6 +1591,7 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) G_TP_VERSION_MAJOR(tp_vers), G_TP_VERSION_MINOR(tp_vers), G_TP_VERSION_MICRO(tp_vers)); + } } static void get_strings(struct net_device *dev, u32 stringset, u8 * data) @@ -2529,7 +2531,7 @@ static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p) } } -static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features) +static void cxgb_vlan_mode(struct net_device *dev, u32 features) { struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; @@ -2550,8 +2552,7 @@ static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features) t3_synchronize_rx(adapter, pi); } -static netdev_features_t cxgb_fix_features(struct net_device *dev, - netdev_features_t features) +static u32 cxgb_fix_features(struct net_device *dev, u32 features) { /* * Since there is no support for separate rx/tx vlan accel @@ -2565,9 +2566,9 @@ static netdev_features_t cxgb_fix_features(struct net_device *dev, return features; } -static int cxgb_set_features(struct net_device *dev, netdev_features_t features) +static int cxgb_set_features(struct net_device *dev, u32 features) { - netdev_features_t changed = dev->features ^ features; + u32 changed = dev->features ^ features; if (changed & NETIF_F_HW_VLAN_RX) cxgb_vlan_mode(dev, features); diff --git a/trunk/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/trunk/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c index 65e4b280619a..90ff1318cc05 100644 --- a/trunk/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c +++ b/trunk/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c @@ -969,7 +969,7 @@ static int nb_callback(struct notifier_block *self, unsigned long event, case (NETEVENT_REDIRECT):{ struct netevent_redirect *nr = ctx; cxgb_redirect(nr->old, nr->new); - cxgb_neigh_update(dst_get_neighbour_noref(nr->new)); + cxgb_neigh_update(dst_get_neighbour(nr->new)); break; } default: @@ -1072,11 +1072,8 @@ static int is_offloading(struct net_device *dev) static void cxgb_neigh_update(struct neighbour *neigh) { - struct net_device *dev; + struct net_device *dev = neigh->dev; - if (!neigh) - return; - dev = neigh->dev; if (dev && (is_offloading(dev))) { struct t3cdev *tdev = dev2t3cdev(dev); @@ -1110,7 +1107,6 @@ static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e) static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new) { struct net_device *olddev, *newdev; - struct neighbour *n; struct tid_info *ti; struct t3cdev *tdev; u32 tid; @@ -1118,16 +1114,8 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new) struct l2t_entry *e; struct t3c_tid_entry *te; - n = dst_get_neighbour_noref(old); - if (!n) - return; - olddev = n->dev; - - n = dst_get_neighbour_noref(new); - if (!n) - return; - newdev = n->dev; - + olddev = dst_get_neighbour(old)->dev; + newdev = dst_get_neighbour(new)->dev; if (!is_offloading(olddev)) return; if (!is_offloading(newdev)) { @@ -1144,7 +1132,7 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new) } /* Add new L2T entry */ - e = t3_l2t_get(tdev, new, newdev); + e = t3_l2t_get(tdev, dst_get_neighbour(new), newdev); if (!e) { printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n", __func__); @@ -1313,7 +1301,7 @@ int cxgb3_offload_activate(struct adapter *adapter) out_free_l2t: t3_free_l2t(L2DATA(dev)); - RCU_INIT_POINTER(dev->l2opt, NULL); + rcu_assign_pointer(dev->l2opt, NULL); out_free: kfree(t); return err; @@ -1341,7 +1329,7 @@ void cxgb3_offload_deactivate(struct adapter *adapter) rcu_read_lock(); d = L2DATA(tdev); rcu_read_unlock(); - RCU_INIT_POINTER(tdev->l2opt, NULL); + rcu_assign_pointer(tdev->l2opt, NULL); call_rcu(&d->rcu_head, clean_l2_data); if (t->nofail_skb) kfree_skb(t->nofail_skb); diff --git a/trunk/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/trunk/drivers/net/ethernet/chelsio/cxgb3/l2t.c index 3fa3c8833ed7..70fec8b1140f 100644 --- a/trunk/drivers/net/ethernet/chelsio/cxgb3/l2t.c +++ b/trunk/drivers/net/ethernet/chelsio/cxgb3/l2t.c @@ -298,31 +298,18 @@ static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh) spin_unlock(&e->lock); } -struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst, +struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh, struct net_device *dev) { struct l2t_entry *e = NULL; - struct neighbour *neigh; - struct port_info *p; struct l2t_data *d; int hash; - u32 addr; - int ifidx; - int smt_idx; + u32 addr = *(u32 *) neigh->primary_key; + int ifidx = neigh->dev->ifindex; + struct port_info *p = netdev_priv(dev); + int smt_idx = p->port_id; rcu_read_lock(); - neigh = dst_get_neighbour_noref(dst); - if (!neigh) - goto done_rcu; - - addr = *(u32 *) neigh->primary_key; - ifidx = neigh->dev->ifindex; - - if (!dev) - dev = neigh->dev; - p = netdev_priv(dev); - smt_idx = p->port_id; - d = L2DATA(cdev); if (!d) goto done_rcu; @@ -336,7 +323,7 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst, l2t_hold(d, e); if (atomic_read(&e->refcnt) == 1) reuse_entry(e, neigh); - goto done_unlock; + goto done; } /* Need to allocate a new entry */ @@ -357,7 +344,7 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst, e->vlan = VLAN_NONE; spin_unlock(&e->lock); } -done_unlock: +done: write_unlock_bh(&d->lock); done_rcu: rcu_read_unlock(); diff --git a/trunk/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/trunk/drivers/net/ethernet/chelsio/cxgb3/l2t.h index c4e864369751..c5f54796e2cb 100644 --- a/trunk/drivers/net/ethernet/chelsio/cxgb3/l2t.h +++ b/trunk/drivers/net/ethernet/chelsio/cxgb3/l2t.h @@ -109,7 +109,7 @@ static inline void set_arp_failure_handler(struct sk_buff *skb, void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e); void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh); -struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst, +struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh, struct net_device *dev); int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb, struct l2t_entry *e); diff --git a/trunk/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/trunk/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index e83d12c7bf20..4c8f42afa3c6 100644 --- a/trunk/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/trunk/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -243,7 +243,7 @@ module_param_array(intr_cnt, uint, NULL, 0644); MODULE_PARM_DESC(intr_cnt, "thresholds 1..3 for queue interrupt packet counters"); -static bool vf_acls; +static int vf_acls; #ifdef CONFIG_PCI_IOV module_param(vf_acls, bool, 0644); @@ -1002,12 +1002,13 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct adapter *adapter = netdev2adap(dev); - strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, pci_name(adapter->pdev), - sizeof(info->bus_info)); + strcpy(info->driver, KBUILD_MODNAME); + strcpy(info->version, DRV_VERSION); + strcpy(info->bus_info, pci_name(adapter->pdev)); - if (adapter->params.fw_vers) + if (!adapter->params.fw_vers) + strcpy(info->fw_version, "N/A"); + else snprintf(info->fw_version, sizeof(info->fw_version), "%u.%u.%u.%u, TP %u.%u.%u.%u", FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers), @@ -1854,10 +1855,10 @@ static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) return err; } -static int cxgb_set_features(struct net_device *dev, netdev_features_t features) +static int cxgb_set_features(struct net_device *dev, u32 features) { const struct port_info *pi = netdev_priv(dev); - netdev_features_t changed = dev->features ^ features; + u32 changed = dev->features ^ features; int err; if (!(changed & NETIF_F_HW_VLAN_RX)) @@ -1871,30 +1872,30 @@ static int cxgb_set_features(struct net_device *dev, netdev_features_t features) return err; } -static u32 get_rss_table_size(struct net_device *dev) -{ - const struct port_info *pi = netdev_priv(dev); - - return pi->rss_size; -} - -static int get_rss_table(struct net_device *dev, u32 *p) +static int get_rss_table(struct net_device *dev, struct ethtool_rxfh_indir *p) { const struct port_info *pi = netdev_priv(dev); - unsigned int n = pi->rss_size; + unsigned int n = min_t(unsigned int, p->size, pi->rss_size); + p->size = pi->rss_size; while (n--) - p[n] = pi->rss[n]; + p->ring_index[n] = pi->rss[n]; return 0; } -static int set_rss_table(struct net_device *dev, const u32 *p) +static int set_rss_table(struct net_device *dev, + const struct ethtool_rxfh_indir *p) { unsigned int i; struct port_info *pi = netdev_priv(dev); - for (i = 0; i < pi->rss_size; i++) - pi->rss[i] = p[i]; + if (p->size != pi->rss_size) + return -EINVAL; + for (i = 0; i < p->size; i++) + if (p->ring_index[i] >= pi->nqsets) + return -EINVAL; + for (i = 0; i < p->size; i++) + pi->rss[i] = p->ring_index[i]; if (pi->adapter->flags & FULL_INIT_DONE) return write_rss(pi, pi->rss); return 0; @@ -1963,7 +1964,7 @@ static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, return -EOPNOTSUPP; } -static const struct ethtool_ops cxgb_ethtool_ops = { +static struct ethtool_ops cxgb_ethtool_ops = { .get_settings = get_settings, .set_settings = set_settings, .get_drvinfo = get_drvinfo, @@ -1989,7 +1990,6 @@ static const struct ethtool_ops cxgb_ethtool_ops = { .get_wol = get_wol, .set_wol = set_wol, .get_rxnfc = get_rxnfc, - .get_rxfh_indir_size = get_rss_table_size, .get_rxfh_indir = get_rss_table, .set_rxfh_indir = set_rss_table, .flash_device = set_flash, @@ -3449,7 +3449,7 @@ static int __devinit init_rss(struct adapter *adap) if (!pi->rss) return -ENOMEM; for (j = 0; j < pi->rss_size; j++) - pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets); + pi->rss[j] = j % pi->nqsets; } return 0; } @@ -3537,7 +3537,7 @@ static int __devinit init_one(struct pci_dev *pdev, { int func, i, err; struct port_info *pi; - bool highdma = false; + unsigned int highdma = 0; struct adapter *adapter = NULL; printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); @@ -3563,7 +3563,7 @@ static int __devinit init_one(struct pci_dev *pdev, } if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { - highdma = true; + highdma = NETIF_F_HIGHDMA; err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " @@ -3637,9 +3637,7 @@ static int __devinit init_one(struct pci_dev *pdev, NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; - if (highdma) - netdev->hw_features |= NETIF_F_HIGHDMA; - netdev->features |= netdev->hw_features; + netdev->features |= netdev->hw_features | highdma; netdev->vlan_features = netdev->features & VLAN_FEAT; netdev->priv_flags |= IFF_UNICAST_FLT; diff --git a/trunk/drivers/net/ethernet/chelsio/cxgb4/sge.c b/trunk/drivers/net/ethernet/chelsio/cxgb4/sge.c index 2dae7959f000..140254c7cba9 100644 --- a/trunk/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/trunk/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -491,7 +491,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, __be64 *d = &q->desc[q->pidx]; struct rx_sw_desc *sd = &q->sdesc[q->pidx]; - gfp |= __GFP_NOWARN | __GFP_COLD; + gfp |= __GFP_NOWARN; /* failures are expected */ #if FL_PG_ORDER > 0 /* @@ -528,7 +528,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, #endif while (n--) { - pg = alloc_page(gfp); + pg = __netdev_alloc_page(adap->port[0], gfp); if (unlikely(!pg)) { q->alloc_failed++; break; @@ -537,7 +537,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE, PCI_DMA_FROMDEVICE); if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { - put_page(pg); + netdev_free_page(adap->port[0], pg); goto out; } *d++ = cpu_to_be64(mapping); diff --git a/trunk/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/trunk/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 5ca73671830b..da9072bfca8b 100644 --- a/trunk/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/trunk/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -1092,8 +1092,7 @@ static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu) return ret; } -static netdev_features_t cxgb4vf_fix_features(struct net_device *dev, - netdev_features_t features) +static u32 cxgb4vf_fix_features(struct net_device *dev, u32 features) { /* * Since there is no support for separate rx/tx vlan accel @@ -1107,11 +1106,10 @@ static netdev_features_t cxgb4vf_fix_features(struct net_device *dev, return features; } -static int cxgb4vf_set_features(struct net_device *dev, - netdev_features_t features) +static int cxgb4vf_set_features(struct net_device *dev, u32 features) { struct port_info *pi = netdev_priv(dev); - netdev_features_t changed = dev->features ^ features; + u32 changed = dev->features ^ features; if (changed & NETIF_F_HW_VLAN_RX) t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1, @@ -1205,10 +1203,9 @@ static void cxgb4vf_get_drvinfo(struct net_device *dev, { struct adapter *adapter = netdev2adap(dev); - strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); - strlcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)), - sizeof(drvinfo->bus_info)); + strcpy(drvinfo->driver, KBUILD_MODNAME); + strcpy(drvinfo->version, DRV_VERSION); + strcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent))); snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%u.%u.%u.%u, TP %u.%u.%u.%u", FW_HDR_FW_VER_MAJOR_GET(adapter->params.dev.fwrev), @@ -1564,7 +1561,7 @@ static void cxgb4vf_get_wol(struct net_device *dev, */ #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) -static const struct ethtool_ops cxgb4vf_ethtool_ops = { +static struct ethtool_ops cxgb4vf_ethtool_ops = { .get_settings = cxgb4vf_get_settings, .get_drvinfo = cxgb4vf_get_drvinfo, .get_msglevel = cxgb4vf_get_msglevel, diff --git a/trunk/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/trunk/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index c381db23e713..8d5d55ad102d 100644 --- a/trunk/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/trunk/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -653,7 +653,8 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, alloc_small_pages: while (n--) { - page = alloc_page(gfp | __GFP_NOWARN | __GFP_COLD); + page = __netdev_alloc_page(adapter->port[0], + gfp | __GFP_NOWARN); if (unlikely(!page)) { fl->alloc_failed++; break; @@ -663,7 +664,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE, PCI_DMA_FROMDEVICE); if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) { - put_page(page); + netdev_free_page(adapter->port[0], page); break; } *d++ = cpu_to_be64(dma_addr); diff --git a/trunk/drivers/net/ethernet/cisco/enic/enic_dev.c b/trunk/drivers/net/ethernet/cisco/enic/enic_dev.c index bf0fc56dba19..fd6247b3c0ee 100644 --- a/trunk/drivers/net/ethernet/cisco/enic/enic_dev.c +++ b/trunk/drivers/net/ethernet/cisco/enic/enic_dev.c @@ -212,29 +212,23 @@ int enic_dev_deinit_done(struct enic *enic, int *status) } /* rtnl lock is held */ -int enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid) { struct enic *enic = netdev_priv(netdev); - int err; spin_lock(&enic->devcmd_lock); - err = enic_add_vlan(enic, vid); + enic_add_vlan(enic, vid); spin_unlock(&enic->devcmd_lock); - - return err; } /* rtnl lock is held */ -int enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) { struct enic *enic = netdev_priv(netdev); - int err; spin_lock(&enic->devcmd_lock); - err = enic_del_vlan(enic, vid); + enic_del_vlan(enic, vid); spin_unlock(&enic->devcmd_lock); - - return err; } int enic_dev_enable2(struct enic *enic, int active) diff --git a/trunk/drivers/net/ethernet/cisco/enic/enic_dev.h b/trunk/drivers/net/ethernet/cisco/enic/enic_dev.h index da1cba3c410e..1f83a4747ba0 100644 --- a/trunk/drivers/net/ethernet/cisco/enic/enic_dev.h +++ b/trunk/drivers/net/ethernet/cisco/enic/enic_dev.h @@ -46,8 +46,8 @@ int enic_dev_packet_filter(struct enic *enic, int directed, int multicast, int broadcast, int promisc, int allmulti); int enic_dev_add_addr(struct enic *enic, u8 *addr); int enic_dev_del_addr(struct enic *enic, u8 *addr); -int enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid); -int enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); +void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid); +void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); int enic_dev_notify_unset(struct enic *enic); int enic_dev_hang_notify(struct enic *enic); int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic); diff --git a/trunk/drivers/net/ethernet/cisco/enic/enic_main.c b/trunk/drivers/net/ethernet/cisco/enic/enic_main.c index 2fd9db4b1be5..c3786fda11db 100644 --- a/trunk/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/trunk/drivers/net/ethernet/cisco/enic/enic_main.c @@ -217,11 +217,11 @@ static void enic_get_drvinfo(struct net_device *netdev, enic_dev_fw_info(enic, &fw_info); - strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); - strlcpy(drvinfo->fw_version, fw_info->fw_version, + strncpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); + strncpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); + strncpy(drvinfo->fw_version, fw_info->fw_version, sizeof(drvinfo->fw_version)); - strlcpy(drvinfo->bus_info, pci_name(enic->pdev), + strncpy(drvinfo->bus_info, pci_name(enic->pdev), sizeof(drvinfo->bus_info)); } @@ -2379,7 +2379,7 @@ static int __devinit enic_probe(struct pci_dev *pdev, #endif /* Allocate structure for port profiles */ - enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL); + enic->pp = kzalloc(num_pps * sizeof(*enic->pp), GFP_KERNEL); if (!enic->pp) { pr_err("port profile alloc failed, aborting\n"); err = -ENOMEM; diff --git a/trunk/drivers/net/ethernet/davicom/dm9000.c b/trunk/drivers/net/ethernet/davicom/dm9000.c index f801754c71a7..2a22f5256353 100644 --- a/trunk/drivers/net/ethernet/davicom/dm9000.c +++ b/trunk/drivers/net/ethernet/davicom/dm9000.c @@ -474,11 +474,10 @@ static int dm9000_nway_reset(struct net_device *dev) return mii_nway_restart(&dm->mii); } -static int dm9000_set_features(struct net_device *dev, - netdev_features_t features) +static int dm9000_set_features(struct net_device *dev, u32 features) { board_info_t *dm = to_dm9000_board(dev); - netdev_features_t changed = dev->features ^ features; + u32 changed = dev->features ^ features; unsigned long flags; if (!(changed & NETIF_F_RXCSUM)) diff --git a/trunk/drivers/net/ethernet/dec/tulip/de2104x.c b/trunk/drivers/net/ethernet/dec/tulip/de2104x.c index 1eb46a0bb488..1427739d9a51 100644 --- a/trunk/drivers/net/ethernet/dec/tulip/de2104x.c +++ b/trunk/drivers/net/ethernet/dec/tulip/de2104x.c @@ -1598,9 +1598,9 @@ static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info) { struct de_private *de = netdev_priv(dev); - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, pci_name(de->pdev), sizeof(info->bus_info)); + strcpy (info->driver, DRV_NAME); + strcpy (info->version, DRV_VERSION); + strcpy (info->bus_info, pci_name(de->pdev)); info->eedump_len = DE_EEPROM_SIZE; } diff --git a/trunk/drivers/net/ethernet/dec/tulip/de4x5.c b/trunk/drivers/net/ethernet/dec/tulip/de4x5.c index 4d71f5ae20c8..871bcaa7068d 100644 --- a/trunk/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/trunk/drivers/net/ethernet/dec/tulip/de4x5.c @@ -2127,9 +2127,14 @@ srom_search(struct net_device *dev, struct pci_dev *pdev) u_long iobase = 0; /* Clear upper 32 bits in Alphas */ int i, j; struct de4x5_private *lp = netdev_priv(dev); - struct pci_dev *this_dev; + struct list_head *walk; + + list_for_each(walk, &pdev->bus_list) { + struct pci_dev *this_dev = pci_dev_b(walk); + + /* Skip the pci_bus list entry */ + if (list_entry(walk, struct pci_bus, devices) == pdev->bus) continue; - list_for_each_entry(this_dev, &pdev->bus->devices, bus_list) { vendor = this_dev->vendor; device = this_dev->device << 8; if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x)) continue; @@ -5191,7 +5196,7 @@ de4x5_parse_params(struct net_device *dev) struct de4x5_private *lp = netdev_priv(dev); char *p, *q, t; - lp->params.fdx = false; + lp->params.fdx = 0; lp->params.autosense = AUTO; if (args == NULL) return; @@ -5201,7 +5206,7 @@ de4x5_parse_params(struct net_device *dev) t = *q; *q = '\0'; - if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = true; + if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = 1; if (strstr(p, "autosense") || strstr(p, "AUTOSENSE")) { if (strstr(p, "TP")) { diff --git a/trunk/drivers/net/ethernet/dec/tulip/dmfe.c b/trunk/drivers/net/ethernet/dec/tulip/dmfe.c index 51f7542eb451..17b11ee1745a 100644 --- a/trunk/drivers/net/ethernet/dec/tulip/dmfe.c +++ b/trunk/drivers/net/ethernet/dec/tulip/dmfe.c @@ -1085,11 +1085,10 @@ static void dmfe_ethtool_get_drvinfo(struct net_device *dev, { struct dmfe_board_info *np = netdev_priv(dev); - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); if (np->pdev) - strlcpy(info->bus_info, pci_name(np->pdev), - sizeof(info->bus_info)); + strcpy(info->bus_info, pci_name(np->pdev)); else sprintf(info->bus_info, "EISA 0x%lx %d", dev->base_addr, dev->irq); diff --git a/trunk/drivers/net/ethernet/dec/tulip/tulip_core.c b/trunk/drivers/net/ethernet/dec/tulip/tulip_core.c index 4eb0d76145c2..9656dd0647d9 100644 --- a/trunk/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/trunk/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -871,9 +871,9 @@ static struct net_device_stats *tulip_get_stats(struct net_device *dev) static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct tulip_private *np = netdev_priv(dev); - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info)); + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + strcpy(info->bus_info, pci_name(np->pdev)); } diff --git a/trunk/drivers/net/ethernet/dec/tulip/uli526x.c b/trunk/drivers/net/ethernet/dec/tulip/uli526x.c index 48b0b6566eef..7a44a7a6adc8 100644 --- a/trunk/drivers/net/ethernet/dec/tulip/uli526x.c +++ b/trunk/drivers/net/ethernet/dec/tulip/uli526x.c @@ -960,11 +960,10 @@ static void netdev_get_drvinfo(struct net_device *dev, { struct uli526x_board_info *np = netdev_priv(dev); - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); if (np->pdev) - strlcpy(info->bus_info, pci_name(np->pdev), - sizeof(info->bus_info)); + strcpy(info->bus_info, pci_name(np->pdev)); else sprintf(info->bus_info, "EISA 0x%lx %d", dev->base_addr, dev->irq); diff --git a/trunk/drivers/net/ethernet/dec/tulip/winbond-840.c b/trunk/drivers/net/ethernet/dec/tulip/winbond-840.c index 52da7b2fe3b6..4d01219ba22f 100644 --- a/trunk/drivers/net/ethernet/dec/tulip/winbond-840.c +++ b/trunk/drivers/net/ethernet/dec/tulip/winbond-840.c @@ -1390,9 +1390,9 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo * { struct netdev_private *np = netdev_priv(dev); - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); + strcpy (info->driver, DRV_NAME); + strcpy (info->version, DRV_VERSION); + strcpy (info->bus_info, pci_name(np->pci_dev)); } static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) diff --git a/trunk/drivers/net/ethernet/dlink/de600.c b/trunk/drivers/net/ethernet/dlink/de600.c index c24fab1e9cbe..23a65398d011 100644 --- a/trunk/drivers/net/ethernet/dlink/de600.c +++ b/trunk/drivers/net/ethernet/dlink/de600.c @@ -59,7 +59,7 @@ static const char version[] = "de600.c: $Revision: 1.41-2.5 $, Bjorn Ekwall (bj #include "de600.h" -static bool check_lost = true; +static unsigned int check_lost = 1; module_param(check_lost, bool, 0); MODULE_PARM_DESC(check_lost, "If set then check for unplugged de600"); diff --git a/trunk/drivers/net/ethernet/dlink/sundance.c b/trunk/drivers/net/ethernet/dlink/sundance.c index 28a3a9b50b8b..dcd7f7a71ad4 100644 --- a/trunk/drivers/net/ethernet/dlink/sundance.c +++ b/trunk/drivers/net/ethernet/dlink/sundance.c @@ -1634,9 +1634,9 @@ static int check_if_running(struct net_device *dev) static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct netdev_private *np = netdev_priv(dev); - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + strcpy(info->bus_info, pci_name(np->pci_dev)); } static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) diff --git a/trunk/drivers/net/ethernet/dnet.c b/trunk/drivers/net/ethernet/dnet.c index ce88c0f399f6..c1063d1540c2 100644 --- a/trunk/drivers/net/ethernet/dnet.c +++ b/trunk/drivers/net/ethernet/dnet.c @@ -804,9 +804,9 @@ static int dnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) static void dnet_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, "0", sizeof(info->bus_info)); + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + strcpy(info->bus_info, "0"); } static const struct ethtool_ops dnet_ethtool_ops = { @@ -977,7 +977,18 @@ static struct platform_driver dnet_driver = { }, }; -module_platform_driver(dnet_driver); +static int __init dnet_init(void) +{ + return platform_driver_register(&dnet_driver); +} + +static void __exit dnet_exit(void) +{ + platform_driver_unregister(&dnet_driver); +} + +module_init(dnet_init); +module_exit(dnet_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Dave DNET Ethernet driver"); diff --git a/trunk/drivers/net/ethernet/emulex/benet/be.h b/trunk/drivers/net/ethernet/emulex/benet/be.h index cbdec2536da6..644e8fed8364 100644 --- a/trunk/drivers/net/ethernet/emulex/benet/be.h +++ b/trunk/drivers/net/ethernet/emulex/benet/be.h @@ -40,7 +40,6 @@ #define OC_NAME "Emulex OneConnect 10Gbps NIC" #define OC_NAME_BE OC_NAME "(be3)" #define OC_NAME_LANCER OC_NAME "(Lancer)" -#define OC_NAME_SH OC_NAME "(Skyhawk)" #define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver" #define BE_VENDOR_ID 0x19a2 @@ -51,7 +50,6 @@ #define OC_DEVICE_ID2 0x710 /* Device Id for BE3 cards */ #define OC_DEVICE_ID3 0xe220 /* Device id for Lancer cards */ #define OC_DEVICE_ID4 0xe228 /* Device id for VF in Lancer */ -#define OC_DEVICE_ID5 0x720 /* Device Id for Skyhawk cards */ static inline char *nic_name(struct pci_dev *pdev) { @@ -65,8 +63,6 @@ static inline char *nic_name(struct pci_dev *pdev) return OC_NAME_LANCER; case BE_DEVICE_ID2: return BE3_NAME; - case OC_DEVICE_ID5: - return OC_NAME_SH; default: return BE_NAME; } @@ -292,14 +288,14 @@ struct be_drv_stats { }; struct be_vf_cfg { - unsigned char mac_addr[ETH_ALEN]; - int if_handle; - int pmac_id; - u16 vlan_tag; - u32 tx_rate; + unsigned char vf_mac_addr[ETH_ALEN]; + u32 vf_if_handle; + u32 vf_pmac_id; + u16 vf_vlan_tag; + u32 vf_tx_rate; }; -#define BE_FLAGS_LINK_STATUS_INIT 1 +#define BE_INVALID_PMAC_ID 0xffffffff struct be_adapter { struct pci_dev *pdev; @@ -349,16 +345,13 @@ struct be_adapter { struct delayed_work work; u16 work_counter; - u32 flags; /* Ethtool knobs and info */ char fw_ver[FW_VER_LEN]; - int if_handle; /* Used to configure filtering */ + u32 if_handle; /* Used to configure filtering */ u32 pmac_id; /* MAC addr handle used by BE card */ u32 beacon_state; /* for set_phys_id */ bool eeh_err; - bool ue_detected; - bool fw_timeout; u32 port_num; bool promiscuous; bool wol; @@ -366,6 +359,7 @@ struct be_adapter { u32 function_caps; u32 rx_fc; /* Rx flow control */ u32 tx_fc; /* Tx flow control */ + bool ue_detected; bool stats_cmd_sent; int link_speed; u8 port_type; @@ -375,20 +369,16 @@ struct be_adapter { u32 flash_status; struct completion flash_compl; - u32 num_vfs; - u8 is_virtfn; - struct be_vf_cfg *vf_cfg; bool be3_native; + bool sriov_enabled; + struct be_vf_cfg *vf_cfg; + u8 is_virtfn; u32 sli_family; u8 hba_port_num; u16 pvid; }; #define be_physfn(adapter) (!adapter->is_virtfn) -#define sriov_enabled(adapter) (adapter->num_vfs > 0) -#define for_all_vfs(adapter, vf_cfg, i) \ - for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \ - i++, vf_cfg++) /* BladeEngine Generation numbers */ #define BE_GEN2 2 @@ -534,14 +524,9 @@ static inline bool be_multi_rxq(const struct be_adapter *adapter) return adapter->num_rx_qs > 1; } -static inline bool be_error(struct be_adapter *adapter) -{ - return adapter->eeh_err || adapter->ue_detected || adapter->fw_timeout; -} - extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped); -extern void be_link_status_update(struct be_adapter *adapter, u8 link_status); +extern void be_link_status_update(struct be_adapter *adapter, u32 link_status); extern void be_parse_stats(struct be_adapter *adapter); extern int be_load_fw(struct be_adapter *adapter, u8 *func); #endif /* BE_H */ diff --git a/trunk/drivers/net/ethernet/emulex/benet/be_cmds.c b/trunk/drivers/net/ethernet/emulex/benet/be_cmds.c index 0fcb45624796..2c7b36673dfc 100644 --- a/trunk/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/trunk/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -31,8 +31,11 @@ static void be_mcc_notify(struct be_adapter *adapter) struct be_queue_info *mccq = &adapter->mcc_obj.q; u32 val = 0; - if (be_error(adapter)) + if (adapter->eeh_err) { + dev_info(&adapter->pdev->dev, + "Error in Card Detected! Cannot issue commands\n"); return; + } val |= mccq->id & DB_MCCQ_RING_ID_MASK; val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT; @@ -125,14 +128,7 @@ static int be_mcc_compl_process(struct be_adapter *adapter, static void be_async_link_state_process(struct be_adapter *adapter, struct be_async_event_link_state *evt) { - /* When link status changes, link speed must be re-queried from FW */ - adapter->link_speed = -1; - - /* For the initial link status do not rely on the ASYNC event as - * it may not be received in some cases. - */ - if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT) - be_link_status_update(adapter, evt->port_link_status); + be_link_status_update(adapter, evt->port_link_status); } /* Grp5 CoS Priority evt */ @@ -270,10 +266,10 @@ static int be_mcc_wait_compl(struct be_adapter *adapter) int i, num, status = 0; struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; - for (i = 0; i < mcc_timeout; i++) { - if (be_error(adapter)) - return -EIO; + if (adapter->eeh_err) + return -EIO; + for (i = 0; i < mcc_timeout; i++) { num = be_process_mcc(adapter, &status); if (num) be_cq_notify(adapter, mcc_obj->cq.id, @@ -284,8 +280,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter) udelay(100); } if (i == mcc_timeout) { - dev_err(&adapter->pdev->dev, "FW not responding\n"); - adapter->fw_timeout = true; + dev_err(&adapter->pdev->dev, "mccq poll timed out\n"); return -1; } return status; @@ -303,21 +298,26 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db) int msecs = 0; u32 ready; - do { - if (be_error(adapter)) - return -EIO; + if (adapter->eeh_err) { + dev_err(&adapter->pdev->dev, + "Error detected in card.Cannot issue commands\n"); + return -EIO; + } + do { ready = ioread32(db); - if (ready == 0xffffffff) + if (ready == 0xffffffff) { + dev_err(&adapter->pdev->dev, + "pci slot disconnected\n"); return -1; + } ready &= MPU_MAILBOX_DB_RDY_MASK; if (ready) break; if (msecs > 4000) { - dev_err(&adapter->pdev->dev, "FW not responding\n"); - adapter->fw_timeout = true; + dev_err(&adapter->pdev->dev, "mbox poll timed out\n"); be_detect_dump_ue(adapter); return -1; } @@ -555,6 +555,9 @@ int be_cmd_fw_clean(struct be_adapter *adapter) u8 *wrb; int status; + if (adapter->eeh_err) + return -EIO; + if (mutex_lock_interruptible(&adapter->mbox_lock)) return -1; @@ -616,7 +619,7 @@ int be_cmd_eq_create(struct be_adapter *adapter, /* Use MCC */ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, - u8 type, bool permanent, u32 if_handle, u32 pmac_id) + u8 type, bool permanent, u32 if_handle) { struct be_mcc_wrb *wrb; struct be_cmd_req_mac_query *req; @@ -638,7 +641,6 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, req->permanent = 1; } else { req->if_id = cpu_to_le16((u16) if_handle); - req->pmac_id = cpu_to_le32(pmac_id); req->permanent = 0; } @@ -693,15 +695,12 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, } /* Uses synchronous MCCQ */ -int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom) +int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom) { struct be_mcc_wrb *wrb; struct be_cmd_req_pmac_del *req; int status; - if (pmac_id == -1) - return 0; - spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); @@ -924,14 +923,10 @@ int be_cmd_txq_create(struct be_adapter *adapter, void *ctxt; int status; - spin_lock_bh(&adapter->mcc_lock); - - wrb = wrb_from_mccq(adapter); - if (!wrb) { - status = -EBUSY; - goto err; - } + if (mutex_lock_interruptible(&adapter->mbox_lock)) + return -1; + wrb = wrb_from_mbox(adapter); req = embedded_payload(wrb); ctxt = &req->context; @@ -957,15 +952,14 @@ int be_cmd_txq_create(struct be_adapter *adapter, be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); - status = be_mcc_notify_wait(adapter); + status = be_mbox_notify_wait(adapter); if (!status) { struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb); txq->id = le16_to_cpu(resp->cid); txq->created = true; } -err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mbox_lock); return status; } @@ -1024,6 +1018,9 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, u8 subsys = 0, opcode = 0; int status; + if (adapter->eeh_err) + return -EIO; + if (mutex_lock_interruptible(&adapter->mbox_lock)) return -1; @@ -1139,13 +1136,16 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, } /* Uses MCCQ */ -int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain) +int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain) { struct be_mcc_wrb *wrb; struct be_cmd_req_if_destroy *req; int status; - if (interface_id == -1) + if (adapter->eeh_err) + return -EIO; + + if (!interface_id) return 0; spin_lock_bh(&adapter->mcc_lock); @@ -1239,7 +1239,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter, /* Uses synchronous mcc */ int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed, - u16 *link_speed, u8 *link_status, u32 dom) + u16 *link_speed, u32 dom) { struct be_mcc_wrb *wrb; struct be_cmd_req_link_status *req; @@ -1247,9 +1247,6 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed, spin_lock_bh(&adapter->mcc_lock); - if (link_status) - *link_status = LINK_DOWN; - wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; @@ -1257,9 +1254,6 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed, } req = embedded_payload(wrb); - if (adapter->generation == BE_GEN3 || lancer_chip(adapter)) - req->hdr.version = 1; - be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL); @@ -1267,13 +1261,10 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed, if (!status) { struct be_cmd_resp_link_status *resp = embedded_payload(wrb); if (resp->mac_speed != PHY_LINK_SPEED_ZERO) { - if (link_speed) - *link_speed = le16_to_cpu(resp->link_speed); + *link_speed = le16_to_cpu(resp->link_speed); if (mac_speed) *mac_speed = resp->mac_speed; } - if (link_status) - *link_status = resp->logical_link_status; } err: @@ -1682,9 +1673,8 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size) { struct be_mcc_wrb *wrb; struct be_cmd_req_rss_config *req; - u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e, - 0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2, - 0x3ea83c02, 0x4a110304}; + u32 myhash[10] = {0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF, + 0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF}; int status; if (mutex_lock_interruptible(&adapter->mbox_lock)) @@ -1846,53 +1836,6 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, return status; } -int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, - u32 data_size, u32 data_offset, const char *obj_name, - u32 *data_read, u32 *eof, u8 *addn_status) -{ - struct be_mcc_wrb *wrb; - struct lancer_cmd_req_read_object *req; - struct lancer_cmd_resp_read_object *resp; - int status; - - spin_lock_bh(&adapter->mcc_lock); - - wrb = wrb_from_mccq(adapter); - if (!wrb) { - status = -EBUSY; - goto err_unlock; - } - - req = embedded_payload(wrb); - - be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, - OPCODE_COMMON_READ_OBJECT, - sizeof(struct lancer_cmd_req_read_object), wrb, - NULL); - - req->desired_read_len = cpu_to_le32(data_size); - req->read_offset = cpu_to_le32(data_offset); - strcpy(req->object_name, obj_name); - req->descriptor_count = cpu_to_le32(1); - req->buf_len = cpu_to_le32(data_size); - req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF)); - req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma)); - - status = be_mcc_notify_wait(adapter); - - resp = embedded_payload(wrb); - if (!status) { - *data_read = le32_to_cpu(resp->actual_read_len); - *eof = le32_to_cpu(resp->eof); - } else { - *addn_status = resp->additional_status; - } - -err_unlock: - spin_unlock_bh(&adapter->mcc_lock); - return status; -} - int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, u32 flash_type, u32 flash_opcode, u32 buf_size) { @@ -2295,99 +2238,3 @@ int be_cmd_req_native_mode(struct be_adapter *adapter) mutex_unlock(&adapter->mbox_lock); return status; } - -/* Uses synchronous MCCQ */ -int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain, - u32 *pmac_id) -{ - struct be_mcc_wrb *wrb; - struct be_cmd_req_get_mac_list *req; - int status; - int mac_count; - - spin_lock_bh(&adapter->mcc_lock); - - wrb = wrb_from_mccq(adapter); - if (!wrb) { - status = -EBUSY; - goto err; - } - req = embedded_payload(wrb); - - be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, - OPCODE_COMMON_GET_MAC_LIST, sizeof(*req), - wrb, NULL); - - req->hdr.domain = domain; - - status = be_mcc_notify_wait(adapter); - if (!status) { - struct be_cmd_resp_get_mac_list *resp = - embedded_payload(wrb); - int i; - u8 *ctxt = &resp->context[0][0]; - status = -EIO; - mac_count = resp->mac_count; - be_dws_le_to_cpu(&resp->context, sizeof(resp->context)); - for (i = 0; i < mac_count; i++) { - if (!AMAP_GET_BITS(struct amap_get_mac_list_context, - act, ctxt)) { - *pmac_id = AMAP_GET_BITS - (struct amap_get_mac_list_context, - macid, ctxt); - status = 0; - break; - } - ctxt += sizeof(struct amap_get_mac_list_context) / 8; - } - } - -err: - spin_unlock_bh(&adapter->mcc_lock); - return status; -} - -/* Uses synchronous MCCQ */ -int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, - u8 mac_count, u32 domain) -{ - struct be_mcc_wrb *wrb; - struct be_cmd_req_set_mac_list *req; - int status; - struct be_dma_mem cmd; - - memset(&cmd, 0, sizeof(struct be_dma_mem)); - cmd.size = sizeof(struct be_cmd_req_set_mac_list); - cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, - &cmd.dma, GFP_KERNEL); - if (!cmd.va) { - dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); - return -ENOMEM; - } - - spin_lock_bh(&adapter->mcc_lock); - - wrb = wrb_from_mccq(adapter); - if (!wrb) { - status = -EBUSY; - goto err; - } - - req = cmd.va; - be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, - OPCODE_COMMON_SET_MAC_LIST, sizeof(*req), - wrb, &cmd); - - req->hdr.domain = domain; - req->mac_count = mac_count; - if (mac_count) - memcpy(req->mac, mac_array, ETH_ALEN*mac_count); - - status = be_mcc_notify_wait(adapter); - -err: - dma_free_coherent(&adapter->pdev->dev, cmd.size, - cmd.va, cmd.dma); - spin_unlock_bh(&adapter->mcc_lock); - return status; -} diff --git a/trunk/drivers/net/ethernet/emulex/benet/be_cmds.h b/trunk/drivers/net/ethernet/emulex/benet/be_cmds.h index dca89249088f..a35cd03fac4e 100644 --- a/trunk/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/trunk/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -189,9 +189,6 @@ struct be_mcc_mailbox { #define OPCODE_COMMON_GET_PHY_DETAILS 102 #define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103 #define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121 -#define OPCODE_COMMON_GET_MAC_LIST 147 -#define OPCODE_COMMON_SET_MAC_LIST 148 -#define OPCODE_COMMON_READ_OBJECT 171 #define OPCODE_COMMON_WRITE_OBJECT 172 #define OPCODE_ETH_RSS_CONFIG 1 @@ -297,7 +294,6 @@ struct be_cmd_req_mac_query { u8 type; u8 permanent; u16 if_id; - u32 pmac_id; } __packed; struct be_cmd_resp_mac_query { @@ -960,8 +956,7 @@ struct be_cmd_resp_link_status { u8 mgmt_mac_duplex; u8 mgmt_mac_speed; u16 link_speed; - u8 logical_link_status; - u8 rsvd1[3]; + u32 rsvd0; } __packed; /******************** Port Identification ***************************/ @@ -1166,38 +1161,6 @@ struct lancer_cmd_resp_write_object { u32 actual_write_len; }; -/************************ Lancer Read FW info **************/ -#define LANCER_READ_FILE_CHUNK (32*1024) -#define LANCER_READ_FILE_EOF_MASK 0x80000000 - -#define LANCER_FW_DUMP_FILE "/dbg/dump.bin" -#define LANCER_VPD_PF_FILE "/vpd/ntr_pf.vpd" -#define LANCER_VPD_VF_FILE "/vpd/ntr_vf.vpd" - -struct lancer_cmd_req_read_object { - struct be_cmd_req_hdr hdr; - u32 desired_read_len; - u32 read_offset; - u8 object_name[104]; - u32 descriptor_count; - u32 buf_len; - u32 addr_low; - u32 addr_high; -}; - -struct lancer_cmd_resp_read_object { - u8 opcode; - u8 subsystem; - u8 rsvd1[2]; - u8 status; - u8 additional_status; - u8 rsvd2[2]; - u32 resp_len; - u32 actual_resp_len; - u32 actual_read_len; - u32 eof; -}; - /************************ WOL *******************************/ struct be_cmd_req_acpi_wol_magic_config{ struct be_cmd_req_hdr hdr; @@ -1344,34 +1307,6 @@ struct be_cmd_resp_set_func_cap { u8 rsvd[212]; }; -/******************** GET/SET_MACLIST **************************/ -#define BE_MAX_MAC 64 -struct amap_get_mac_list_context { - u8 macid[31]; - u8 act; -} __packed; - -struct be_cmd_req_get_mac_list { - struct be_cmd_req_hdr hdr; - u32 rsvd; -} __packed; - -struct be_cmd_resp_get_mac_list { - struct be_cmd_resp_hdr hdr; - u8 mac_count; - u8 rsvd1; - u16 rsvd2; - u8 context[sizeof(struct amap_get_mac_list_context) / 8][BE_MAX_MAC]; -} __packed; - -struct be_cmd_req_set_mac_list { - struct be_cmd_req_hdr hdr; - u8 mac_count; - u8 rsvd1; - u16 rsvd2; - struct macaddr mac[BE_MAX_MAC]; -} __packed; - /*************** HW Stats Get v1 **********************************/ #define BE_TXP_SW_SZ 48 struct be_port_rxf_stats_v1 { @@ -1478,15 +1413,15 @@ static inline void *be_erx_stats_from_cmd(struct be_adapter *adapter) extern int be_pci_fnum_get(struct be_adapter *adapter); extern int be_cmd_POST(struct be_adapter *adapter); extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, - u8 type, bool permanent, u32 if_handle, u32 pmac_id); + u8 type, bool permanent, u32 if_handle); extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, u32 if_id, u32 *pmac_id, u32 domain); extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, - int pmac_id, u32 domain); + u32 pmac_id, u32 domain); extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, u8 *mac, u32 *if_handle, u32 *pmac_id, u32 domain); -extern int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle, +extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle, u32 domain); extern int be_cmd_eq_create(struct be_adapter *adapter, struct be_queue_info *eq, int eq_delay); @@ -1508,8 +1443,8 @@ extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, int type); extern int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q); -extern int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed, - u16 *link_speed, u8 *link_status, u32 dom); +extern int be_cmd_link_status_query(struct be_adapter *adapter, + u8 *mac_speed, u16 *link_speed, u32 dom); extern int be_cmd_reset(struct be_adapter *adapter); extern int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd); @@ -1545,9 +1480,6 @@ extern int lancer_cmd_write_object(struct be_adapter *adapter, u32 data_size, u32 data_offset, const char *obj_name, u32 *data_written, u8 *addn_status); -int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, - u32 data_size, u32 data_offset, const char *obj_name, - u32 *data_read, u32 *eof, u8 *addn_status); int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, int offset); extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, @@ -1574,8 +1506,4 @@ extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter); extern int be_cmd_req_native_mode(struct be_adapter *adapter); extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size); extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf); -extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain, - u32 *pmac_id); -extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, - u8 mac_count, u32 domain); diff --git a/trunk/drivers/net/ethernet/emulex/benet/be_ethtool.c b/trunk/drivers/net/ethernet/emulex/benet/be_ethtool.c index 6db6b6ae5e9b..bf8153ea4ed8 100644 --- a/trunk/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/trunk/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -127,8 +127,8 @@ static void be_get_drvinfo(struct net_device *netdev, memset(fw_on_flash, 0 , sizeof(fw_on_flash)); be_cmd_get_fw_ver(adapter, adapter->fw_ver, fw_on_flash); - strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, DRV_VER, sizeof(drvinfo->version)); + strcpy(drvinfo->driver, DRV_NAME); + strcpy(drvinfo->version, DRV_VER); strncpy(drvinfo->fw_version, adapter->fw_ver, FW_VER_LEN); if (memcmp(adapter->fw_ver, fw_on_flash, FW_VER_LEN) != 0) { strcat(drvinfo->fw_version, " ["); @@ -136,84 +136,21 @@ static void be_get_drvinfo(struct net_device *netdev, strcat(drvinfo->fw_version, "]"); } - strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), - sizeof(drvinfo->bus_info)); + strcpy(drvinfo->bus_info, pci_name(adapter->pdev)); drvinfo->testinfo_len = 0; drvinfo->regdump_len = 0; drvinfo->eedump_len = 0; } -static u32 -lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name) -{ - u32 data_read = 0, eof; - u8 addn_status; - struct be_dma_mem data_len_cmd; - int status; - - memset(&data_len_cmd, 0, sizeof(data_len_cmd)); - /* data_offset and data_size should be 0 to get reg len */ - status = lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0, - file_name, &data_read, &eof, &addn_status); - - return data_read; -} - -static int -lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name, - u32 buf_len, void *buf) -{ - struct be_dma_mem read_cmd; - u32 read_len = 0, total_read_len = 0, chunk_size; - u32 eof = 0; - u8 addn_status; - int status = 0; - - read_cmd.size = LANCER_READ_FILE_CHUNK; - read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size, - &read_cmd.dma); - - if (!read_cmd.va) { - dev_err(&adapter->pdev->dev, - "Memory allocation failure while reading dump\n"); - return -ENOMEM; - } - - while ((total_read_len < buf_len) && !eof) { - chunk_size = min_t(u32, (buf_len - total_read_len), - LANCER_READ_FILE_CHUNK); - chunk_size = ALIGN(chunk_size, 4); - status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size, - total_read_len, file_name, &read_len, - &eof, &addn_status); - if (!status) { - memcpy(buf + total_read_len, read_cmd.va, read_len); - total_read_len += read_len; - eof &= LANCER_READ_FILE_EOF_MASK; - } else { - status = -EIO; - break; - } - } - pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va, - read_cmd.dma); - - return status; -} - static int be_get_reg_len(struct net_device *netdev) { struct be_adapter *adapter = netdev_priv(netdev); u32 log_size = 0; - if (be_physfn(adapter)) { - if (lancer_chip(adapter)) - log_size = lancer_cmd_get_file_len(adapter, - LANCER_FW_DUMP_FILE); - else - be_cmd_get_reg_len(adapter, &log_size); - } + if (be_physfn(adapter)) + be_cmd_get_reg_len(adapter, &log_size); + return log_size; } @@ -224,11 +161,7 @@ be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf) if (be_physfn(adapter)) { memset(buf, 0, regs->len); - if (lancer_chip(adapter)) - lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE, - regs->len, buf); - else - be_cmd_get_regs(adapter, regs->len, buf); + be_cmd_get_regs(adapter, regs->len, buf); } } @@ -429,14 +362,11 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) struct be_phy_info phy_info; u8 mac_speed = 0; u16 link_speed = 0; - u8 link_status; int status; if ((adapter->link_speed < 0) || (!(netdev->flags & IFF_UP))) { status = be_cmd_link_status_query(adapter, &mac_speed, - &link_speed, &link_status, 0); - if (!status) - be_link_status_update(adapter, link_status); + &link_speed, 0); /* link_speed is in units of 10 Mbps */ if (link_speed) { @@ -523,13 +453,16 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) return 0; } -static void be_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) +static void +be_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { struct be_adapter *adapter = netdev_priv(netdev); - ring->rx_max_pending = ring->rx_pending = adapter->rx_obj[0].q.len; - ring->tx_max_pending = ring->tx_pending = adapter->tx_obj[0].q.len; + ring->rx_max_pending = adapter->rx_obj[0].q.len; + ring->tx_max_pending = adapter->tx_obj[0].q.len; + + ring->rx_pending = atomic_read(&adapter->rx_obj[0].q.used); + ring->tx_pending = atomic_read(&adapter->tx_obj[0].q.used); } static void @@ -703,7 +636,7 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data) } if (be_cmd_link_status_query(adapter, &mac_speed, - &qos_link_speed, NULL, 0) != 0) { + &qos_link_speed, 0) != 0) { test->flags |= ETH_TEST_FL_FAILED; data[4] = -1; } else if (!mac_speed) { @@ -727,17 +660,7 @@ be_do_flash(struct net_device *netdev, struct ethtool_flash *efl) static int be_get_eeprom_len(struct net_device *netdev) { - struct be_adapter *adapter = netdev_priv(netdev); - if (lancer_chip(adapter)) { - if (be_physfn(adapter)) - return lancer_cmd_get_file_len(adapter, - LANCER_VPD_PF_FILE); - else - return lancer_cmd_get_file_len(adapter, - LANCER_VPD_VF_FILE); - } else { - return BE_READ_SEEPROM_LEN; - } + return BE_READ_SEEPROM_LEN; } static int @@ -752,15 +675,6 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, if (!eeprom->len) return -EINVAL; - if (lancer_chip(adapter)) { - if (be_physfn(adapter)) - return lancer_cmd_read_file(adapter, LANCER_VPD_PF_FILE, - eeprom->len, data); - else - return lancer_cmd_read_file(adapter, LANCER_VPD_VF_FILE, - eeprom->len, data); - } - eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16); memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem)); diff --git a/trunk/drivers/net/ethernet/emulex/benet/be_main.c b/trunk/drivers/net/ethernet/emulex/benet/be_main.c index 6c46753aeb43..bf266a00c774 100644 --- a/trunk/drivers/net/ethernet/emulex/benet/be_main.c +++ b/trunk/drivers/net/ethernet/emulex/benet/be_main.c @@ -27,13 +27,12 @@ MODULE_DESCRIPTION(DRV_DESC " " DRV_VER); MODULE_AUTHOR("ServerEngines Corporation"); MODULE_LICENSE("GPL"); -static unsigned int num_vfs; -module_param(num_vfs, uint, S_IRUGO); -MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize"); - static ushort rx_frag_size = 2048; +static unsigned int num_vfs; module_param(rx_frag_size, ushort, S_IRUGO); +module_param(num_vfs, uint, S_IRUGO); MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data."); +MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize"); static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = { { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, @@ -42,7 +41,6 @@ static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = { { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)}, { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)}, - { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)}, { 0 } }; MODULE_DEVICE_TABLE(pci, be_dev_ids); @@ -239,8 +237,7 @@ static int be_mac_addr_set(struct net_device *netdev, void *p) return -EADDRNOTAVAIL; status = be_cmd_mac_addr_query(adapter, current_mac, - MAC_ADDRESS_TYPE_NETWORK, false, - adapter->if_handle, 0); + MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle); if (status) goto err; @@ -318,8 +315,6 @@ static void populate_be3_stats(struct be_adapter *adapter) struct be_drv_stats *drvs = &adapter->drv_stats; be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats)); - drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop; - drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames; drvs->rx_pause_frames = port_stats->rx_pause_frames; drvs->rx_crc_errors = port_stats->rx_crc_errors; drvs->rx_control_frames = port_stats->rx_control_frames; @@ -496,19 +491,19 @@ static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev, return stats; } -void be_link_status_update(struct be_adapter *adapter, u8 link_status) +void be_link_status_update(struct be_adapter *adapter, u32 link_status) { struct net_device *netdev = adapter->netdev; - if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) { - netif_carrier_off(netdev); - adapter->flags |= BE_FLAGS_LINK_STATUS_INIT; - } - - if ((link_status & LINK_STATUS_MASK) == LINK_UP) + /* when link status changes, link speed must be re-queried from card */ + adapter->link_speed = -1; + if ((link_status & LINK_STATUS_MASK) == LINK_UP) { netif_carrier_on(netdev); - else + dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name); + } else { netif_carrier_off(netdev); + dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name); + } } static void be_tx_stats_update(struct be_tx_obj *txo, @@ -554,26 +549,11 @@ static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len) wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK; } -static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter, - struct sk_buff *skb) -{ - u8 vlan_prio; - u16 vlan_tag; - - vlan_tag = vlan_tx_tag_get(skb); - vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; - /* If vlan priority provided by OS is NOT in available bmap */ - if (!(adapter->vlan_prio_bmap & (1 << vlan_prio))) - vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) | - adapter->recommended_prio; - - return vlan_tag; -} - static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr, struct sk_buff *skb, u32 wrb_cnt, u32 len) { - u16 vlan_tag; + u8 vlan_prio = 0; + u16 vlan_tag = 0; memset(hdr, 0, sizeof(*hdr)); @@ -604,7 +584,12 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr, if (vlan_tx_tag_present(skb)) { AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1); - vlan_tag = be_get_tx_vlan_tag(adapter, skb); + vlan_tag = vlan_tx_tag_get(skb); + vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; + /* If vlan priority provided by OS is NOT in available bmap */ + if (!(adapter->vlan_prio_bmap & (1 << vlan_prio))) + vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) | + adapter->recommended_prio; AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag); } @@ -707,25 +692,6 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, u32 start = txq->head; bool dummy_wrb, stopped = false; - /* For vlan tagged pkts, BE - * 1) calculates checksum even when CSO is not requested - * 2) calculates checksum wrongly for padded pkt less than - * 60 bytes long. - * As a workaround disable TX vlan offloading in such cases. - */ - if (unlikely(vlan_tx_tag_present(skb) && - (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) { - skb = skb_share_check(skb, GFP_ATOMIC); - if (unlikely(!skb)) - goto tx_drop; - - skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb)); - if (unlikely(!skb)) - goto tx_drop; - - skb->vlan_tci = 0; - } - wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb); copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb); @@ -753,7 +719,6 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, txq->head = start; dev_kfree_skb_any(skb); } -tx_drop: return NETDEV_TX_OK; } @@ -781,15 +746,15 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu) */ static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num) { - struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num]; u16 vtag[BE_NUM_VLANS_SUPPORTED]; u16 ntags = 0, i; int status = 0; + u32 if_handle; if (vf) { - vtag[0] = cpu_to_le16(vf_cfg->vlan_tag); - status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag, - 1, 1, 0); + if_handle = adapter->vf_cfg[vf_num].vf_if_handle; + vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag); + status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0); } /* No need to further configure vids if in promiscuous mode */ @@ -814,48 +779,31 @@ static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num) return status; } -static int be_vlan_add_vid(struct net_device *netdev, u16 vid) +static void be_vlan_add_vid(struct net_device *netdev, u16 vid) { struct be_adapter *adapter = netdev_priv(netdev); - int status = 0; - if (!be_physfn(adapter)) { - status = -EINVAL; - goto ret; - } + adapter->vlans_added++; + if (!be_physfn(adapter)) + return; adapter->vlan_tag[vid] = 1; if (adapter->vlans_added <= (adapter->max_vlans + 1)) - status = be_vid_config(adapter, false, 0); - - if (!status) - adapter->vlans_added++; - else - adapter->vlan_tag[vid] = 0; -ret: - return status; + be_vid_config(adapter, false, 0); } -static int be_vlan_rem_vid(struct net_device *netdev, u16 vid) +static void be_vlan_rem_vid(struct net_device *netdev, u16 vid) { struct be_adapter *adapter = netdev_priv(netdev); - int status = 0; - if (!be_physfn(adapter)) { - status = -EINVAL; - goto ret; - } + adapter->vlans_added--; + + if (!be_physfn(adapter)) + return; adapter->vlan_tag[vid] = 0; if (adapter->vlans_added <= adapter->max_vlans) - status = be_vid_config(adapter, false, 0); - - if (!status) - adapter->vlans_added--; - else - adapter->vlan_tag[vid] = 1; -ret: - return status; + be_vid_config(adapter, false, 0); } static void be_set_rx_mode(struct net_device *netdev) @@ -892,30 +840,28 @@ static void be_set_rx_mode(struct net_device *netdev) static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) { struct be_adapter *adapter = netdev_priv(netdev); - struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; int status; - if (!sriov_enabled(adapter)) + if (!adapter->sriov_enabled) return -EPERM; - if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs) + if (!is_valid_ether_addr(mac) || (vf >= num_vfs)) return -EINVAL; - if (lancer_chip(adapter)) { - status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1); - } else { - status = be_cmd_pmac_del(adapter, vf_cfg->if_handle, - vf_cfg->pmac_id, vf + 1); + if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID) + status = be_cmd_pmac_del(adapter, + adapter->vf_cfg[vf].vf_if_handle, + adapter->vf_cfg[vf].vf_pmac_id, vf + 1); - status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle, - &vf_cfg->pmac_id, vf + 1); - } + status = be_cmd_pmac_add(adapter, mac, + adapter->vf_cfg[vf].vf_if_handle, + &adapter->vf_cfg[vf].vf_pmac_id, vf + 1); if (status) dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n", mac, vf); else - memcpy(vf_cfg->mac_addr, mac, ETH_ALEN); + memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN); return status; } @@ -924,19 +870,18 @@ static int be_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *vi) { struct be_adapter *adapter = netdev_priv(netdev); - struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; - if (!sriov_enabled(adapter)) + if (!adapter->sriov_enabled) return -EPERM; - if (vf >= adapter->num_vfs) + if (vf >= num_vfs) return -EINVAL; vi->vf = vf; - vi->tx_rate = vf_cfg->tx_rate; - vi->vlan = vf_cfg->vlan_tag; + vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate; + vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag; vi->qos = 0; - memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN); + memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN); return 0; } @@ -947,17 +892,17 @@ static int be_set_vf_vlan(struct net_device *netdev, struct be_adapter *adapter = netdev_priv(netdev); int status = 0; - if (!sriov_enabled(adapter)) + if (!adapter->sriov_enabled) return -EPERM; - if (vf >= adapter->num_vfs || vlan > 4095) + if ((vf >= num_vfs) || (vlan > 4095)) return -EINVAL; if (vlan) { - adapter->vf_cfg[vf].vlan_tag = vlan; + adapter->vf_cfg[vf].vf_vlan_tag = vlan; adapter->vlans_added++; } else { - adapter->vf_cfg[vf].vlan_tag = 0; + adapter->vf_cfg[vf].vf_vlan_tag = 0; adapter->vlans_added--; } @@ -975,25 +920,21 @@ static int be_set_vf_tx_rate(struct net_device *netdev, struct be_adapter *adapter = netdev_priv(netdev); int status = 0; - if (!sriov_enabled(adapter)) + if (!adapter->sriov_enabled) return -EPERM; - if (vf >= adapter->num_vfs) + if ((vf >= num_vfs) || (rate < 0)) return -EINVAL; - if (rate < 100 || rate > 10000) { - dev_err(&adapter->pdev->dev, - "tx rate must be between 100 and 10000 Mbps\n"); - return -EINVAL; - } + if (rate > 10000) + rate = 10000; + adapter->vf_cfg[vf].vf_tx_rate = rate; status = be_cmd_set_qos(adapter, rate / 10, vf + 1); if (status) - dev_err(&adapter->pdev->dev, + dev_info(&adapter->pdev->dev, "tx rate %d on VF %d failed\n", rate, vf); - else - adapter->vf_cfg[vf].tx_rate = rate; return status; } @@ -1704,7 +1645,8 @@ static void be_tx_queues_destroy(struct be_adapter *adapter) static int be_num_txqs_want(struct be_adapter *adapter) { - if (sriov_enabled(adapter) || be_is_mc(adapter) || + if ((num_vfs && adapter->sriov_enabled) || + be_is_mc(adapter) || lancer_chip(adapter) || !be_physfn(adapter) || adapter->generation == BE_GEN2) return 1; @@ -1720,12 +1662,9 @@ static int be_tx_queues_create(struct be_adapter *adapter) u8 i; adapter->num_tx_qs = be_num_txqs_want(adapter); - if (adapter->num_tx_qs != MAX_TX_QS) { - rtnl_lock(); + if (adapter->num_tx_qs != MAX_TX_QS) netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_qs); - rtnl_unlock(); - } adapter->tx_eq.max_eqd = 0; adapter->tx_eq.min_eqd = 0; @@ -1754,6 +1693,9 @@ static int be_tx_queues_create(struct be_adapter *adapter) if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb))) goto err; + + if (be_cmd_txq_create(adapter, q, cq)) + goto err; } return 0; @@ -1786,8 +1728,8 @@ static void be_rx_queues_destroy(struct be_adapter *adapter) static u32 be_num_rxqs_want(struct be_adapter *adapter) { if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) && - !sriov_enabled(adapter) && be_physfn(adapter) && - !be_is_mc(adapter)) { + !adapter->sriov_enabled && be_physfn(adapter) && + !be_is_mc(adapter)) { return 1 + MAX_RSS_QS; /* one default non-RSS queue */ } else { dev_warn(&adapter->pdev->dev, @@ -1987,7 +1929,6 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget) struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi); struct be_adapter *adapter = container_of(tx_eq, struct be_adapter, tx_eq); - struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; struct be_tx_obj *txo; struct be_eth_tx_compl *txcp; int tx_compl, mcc_compl, status = 0; @@ -2024,19 +1965,12 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget) mcc_compl = be_process_mcc(adapter, &status); if (mcc_compl) { + struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl); } napi_complete(napi); - /* Arm CQ again to regenerate EQEs for Lancer in INTx mode */ - if (lancer_chip(adapter) && !msix_enabled(adapter)) { - for_all_tx_queues(adapter, txo, i) - be_cq_notify(adapter, txo->cq.id, true, 0); - - be_cq_notify(adapter, mcc_obj->cq.id, true, 0); - } - be_eq_notify(adapter, tx_eq->q.id, true, false, 0); adapter->drv_stats.tx_events++; return 1; @@ -2048,9 +1982,6 @@ void be_detect_dump_ue(struct be_adapter *adapter) u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0; u32 i; - if (adapter->eeh_err || adapter->ue_detected) - return; - if (lancer_chip(adapter)) { sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); if (sliport_status & SLIPORT_STATUS_ERR_MASK) { @@ -2077,8 +2008,7 @@ void be_detect_dump_ue(struct be_adapter *adapter) sliport_status & SLIPORT_STATUS_ERR_MASK) { adapter->ue_detected = true; adapter->eeh_err = true; - dev_err(&adapter->pdev->dev, - "Unrecoverable error in the card\n"); + dev_err(&adapter->pdev->dev, "UE Detected!!\n"); } if (ue_lo) { @@ -2106,6 +2036,53 @@ void be_detect_dump_ue(struct be_adapter *adapter) } } +static void be_worker(struct work_struct *work) +{ + struct be_adapter *adapter = + container_of(work, struct be_adapter, work.work); + struct be_rx_obj *rxo; + int i; + + if (!adapter->ue_detected) + be_detect_dump_ue(adapter); + + /* when interrupts are not yet enabled, just reap any pending + * mcc completions */ + if (!netif_running(adapter->netdev)) { + int mcc_compl, status = 0; + + mcc_compl = be_process_mcc(adapter, &status); + + if (mcc_compl) { + struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; + be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl); + } + + goto reschedule; + } + + if (!adapter->stats_cmd_sent) { + if (lancer_chip(adapter)) + lancer_cmd_get_pport_stats(adapter, + &adapter->stats_cmd); + else + be_cmd_get_stats(adapter, &adapter->stats_cmd); + } + + for_all_rx_queues(adapter, rxo, i) { + be_rx_eqd_update(adapter, rxo); + + if (rxo->rx_post_starved) { + rxo->rx_post_starved = false; + be_post_rx_frags(rxo, GFP_KERNEL); + } + } + +reschedule: + adapter->work_counter++; + schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); +} + static void be_msix_disable(struct be_adapter *adapter) { if (msix_enabled(adapter)) { @@ -2142,28 +2119,27 @@ static void be_msix_enable(struct be_adapter *adapter) static int be_sriov_enable(struct be_adapter *adapter) { be_check_sriov_fn_type(adapter); - #ifdef CONFIG_PCI_IOV if (be_physfn(adapter) && num_vfs) { int status, pos; - u16 dev_vfs; + u16 nvfs; pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV); pci_read_config_word(adapter->pdev, - pos + PCI_SRIOV_TOTAL_VF, &dev_vfs); + pos + PCI_SRIOV_TOTAL_VF, &nvfs); - adapter->num_vfs = min_t(u16, num_vfs, dev_vfs); - if (adapter->num_vfs != num_vfs) + if (num_vfs > nvfs) { dev_info(&adapter->pdev->dev, - "Device supports %d VFs and not %d\n", - adapter->num_vfs, num_vfs); + "Device supports %d VFs and not %d\n", + nvfs, num_vfs); + num_vfs = nvfs; + } - status = pci_enable_sriov(adapter->pdev, adapter->num_vfs); - if (status) - adapter->num_vfs = 0; + status = pci_enable_sriov(adapter->pdev, num_vfs); + adapter->sriov_enabled = status ? false : true; - if (adapter->num_vfs) { + if (adapter->sriov_enabled) { adapter->vf_cfg = kcalloc(num_vfs, sizeof(struct be_vf_cfg), GFP_KERNEL); @@ -2178,10 +2154,10 @@ static int be_sriov_enable(struct be_adapter *adapter) static void be_sriov_disable(struct be_adapter *adapter) { #ifdef CONFIG_PCI_IOV - if (sriov_enabled(adapter)) { + if (adapter->sriov_enabled) { pci_disable_sriov(adapter->pdev); kfree(adapter->vf_cfg); - adapter->num_vfs = 0; + adapter->sriov_enabled = false; } #endif } @@ -2375,8 +2351,8 @@ static int be_close(struct net_device *netdev) static int be_rx_queues_setup(struct be_adapter *adapter) { struct be_rx_obj *rxo; - int rc, i, j; - u8 rsstable[128]; + int rc, i; + u8 rsstable[MAX_RSS_QS]; for_all_rx_queues(adapter, rxo, i) { rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, @@ -2388,15 +2364,11 @@ static int be_rx_queues_setup(struct be_adapter *adapter) } if (be_multi_rxq(adapter)) { - for (j = 0; j < 128; j += adapter->num_rx_qs - 1) { - for_all_rss_queues(adapter, rxo, i) { - if ((j + i) >= 128) - break; - rsstable[j + i] = rxo->rss_id; - } - } - rc = be_cmd_rss_config(adapter, rsstable, 128); + for_all_rss_queues(adapter, rxo, i) + rsstable[i] = rxo->rss_id; + rc = be_cmd_rss_config(adapter, rsstable, + adapter->num_rx_qs - 1); if (rc) return rc; } @@ -2414,7 +2386,6 @@ static int be_open(struct net_device *netdev) struct be_adapter *adapter = netdev_priv(netdev); struct be_eq_obj *tx_eq = &adapter->tx_eq; struct be_rx_obj *rxo; - u8 link_status; int status, i; status = be_rx_queues_setup(adapter); @@ -2438,11 +2409,6 @@ static int be_open(struct net_device *netdev) /* Now that interrupts are on we can process async mcc */ be_async_mcc_enable(adapter); - status = be_cmd_link_status_query(adapter, NULL, NULL, - &link_status, 0); - if (!status) - be_link_status_update(adapter, link_status); - return 0; err: be_close(adapter->netdev); @@ -2499,24 +2465,19 @@ static inline int be_vf_eth_addr_config(struct be_adapter *adapter) u32 vf; int status = 0; u8 mac[ETH_ALEN]; - struct be_vf_cfg *vf_cfg; be_vf_eth_addr_generate(adapter, mac); - for_all_vfs(adapter, vf_cfg, vf) { - if (lancer_chip(adapter)) { - status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1); - } else { - status = be_cmd_pmac_add(adapter, mac, - vf_cfg->if_handle, - &vf_cfg->pmac_id, vf + 1); - } - + for (vf = 0; vf < num_vfs; vf++) { + status = be_cmd_pmac_add(adapter, mac, + adapter->vf_cfg[vf].vf_if_handle, + &adapter->vf_cfg[vf].vf_pmac_id, + vf + 1); if (status) dev_err(&adapter->pdev->dev, - "Mac address assignment failed for VF %d\n", vf); + "Mac address add failed for VF %d\n", vf); else - memcpy(vf_cfg->mac_addr, mac, ETH_ALEN); + memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN); mac[5] += 1; } @@ -2525,23 +2486,24 @@ static inline int be_vf_eth_addr_config(struct be_adapter *adapter) static void be_vf_clear(struct be_adapter *adapter) { - struct be_vf_cfg *vf_cfg; u32 vf; - for_all_vfs(adapter, vf_cfg, vf) { - if (lancer_chip(adapter)) - be_cmd_set_mac_list(adapter, NULL, 0, vf + 1); - else - be_cmd_pmac_del(adapter, vf_cfg->if_handle, - vf_cfg->pmac_id, vf + 1); - - be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1); + for (vf = 0; vf < num_vfs; vf++) { + if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID) + be_cmd_pmac_del(adapter, + adapter->vf_cfg[vf].vf_if_handle, + adapter->vf_cfg[vf].vf_pmac_id, vf + 1); } + + for (vf = 0; vf < num_vfs; vf++) + if (adapter->vf_cfg[vf].vf_if_handle) + be_cmd_if_destroy(adapter, + adapter->vf_cfg[vf].vf_if_handle, vf + 1); } static int be_clear(struct be_adapter *adapter) { - if (sriov_enabled(adapter)) + if (be_physfn(adapter) && adapter->sriov_enabled) be_vf_clear(adapter); be_cmd_if_destroy(adapter, adapter->if_handle, 0); @@ -2549,94 +2511,61 @@ static int be_clear(struct be_adapter *adapter) be_mcc_queues_destroy(adapter); be_rx_queues_destroy(adapter); be_tx_queues_destroy(adapter); + adapter->eq_next_idx = 0; + + adapter->be3_native = false; + adapter->promiscuous = false; /* tell fw we're done with firing cmds */ be_cmd_fw_clean(adapter); return 0; } -static void be_vf_setup_init(struct be_adapter *adapter) -{ - struct be_vf_cfg *vf_cfg; - int vf; - - for_all_vfs(adapter, vf_cfg, vf) { - vf_cfg->if_handle = -1; - vf_cfg->pmac_id = -1; - } -} - static int be_vf_setup(struct be_adapter *adapter) { - struct be_vf_cfg *vf_cfg; u32 cap_flags, en_flags, vf; u16 lnk_speed; int status; - be_vf_setup_init(adapter); - - cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | - BE_IF_FLAGS_MULTICAST; - for_all_vfs(adapter, vf_cfg, vf) { + cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST; + for (vf = 0; vf < num_vfs; vf++) { status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL, - &vf_cfg->if_handle, NULL, vf + 1); + &adapter->vf_cfg[vf].vf_if_handle, + NULL, vf+1); if (status) goto err; + adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID; } - status = be_vf_eth_addr_config(adapter); - if (status) - goto err; + if (!lancer_chip(adapter)) { + status = be_vf_eth_addr_config(adapter); + if (status) + goto err; + } - for_all_vfs(adapter, vf_cfg, vf) { + for (vf = 0; vf < num_vfs; vf++) { status = be_cmd_link_status_query(adapter, NULL, &lnk_speed, - NULL, vf + 1); + vf + 1); if (status) goto err; - vf_cfg->tx_rate = lnk_speed * 10; + adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10; } return 0; err: return status; } -static void be_setup_init(struct be_adapter *adapter) -{ - adapter->vlan_prio_bmap = 0xff; - adapter->link_speed = -1; - adapter->if_handle = -1; - adapter->be3_native = false; - adapter->promiscuous = false; - adapter->eq_next_idx = 0; -} - -static int be_configure_mac_from_list(struct be_adapter *adapter, u8 *mac) -{ - u32 pmac_id; - int status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id); - if (status != 0) - goto do_none; - status = be_cmd_mac_addr_query(adapter, mac, - MAC_ADDRESS_TYPE_NETWORK, - false, adapter->if_handle, pmac_id); - if (status != 0) - goto do_none; - status = be_cmd_pmac_add(adapter, mac, adapter->if_handle, - &adapter->pmac_id, 0); -do_none: - return status; -} - static int be_setup(struct be_adapter *adapter) { struct net_device *netdev = adapter->netdev; u32 cap_flags, en_flags; u32 tx_fc, rx_fc; - int status, i; + int status; u8 mac[ETH_ALEN]; - struct be_tx_obj *txo; - be_setup_init(adapter); + /* Allow all priorities by default. A GRP5 evt may modify this */ + adapter->vlan_prio_bmap = 0xff; + adapter->link_speed = -1; be_cmd_req_native_mode(adapter); @@ -2654,7 +2583,7 @@ static int be_setup(struct be_adapter *adapter) memset(mac, 0, ETH_ALEN); status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK, - true /*permanent */, 0, 0); + true /*permanent */, 0); if (status) return status; memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); @@ -2663,8 +2592,7 @@ static int be_setup(struct be_adapter *adapter) en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS; cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS | - BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS; - + BE_IF_FLAGS_PROMISCUOUS; if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) { cap_flags |= BE_IF_FLAGS_RSS; en_flags |= BE_IF_FLAGS_RSS; @@ -2675,23 +2603,12 @@ static int be_setup(struct be_adapter *adapter) if (status != 0) goto err; - for_all_tx_queues(adapter, txo, i) { - status = be_cmd_txq_create(adapter, &txo->q, &txo->cq); - if (status) - goto err; - } - - /* The VF's permanent mac queried from card is incorrect. - * For BEx: Query the mac configued by the PF using if_handle - * For Lancer: Get and use mac_list to obtain mac address. - */ - if (!be_physfn(adapter)) { - if (lancer_chip(adapter)) - status = be_configure_mac_from_list(adapter, mac); - else - status = be_cmd_mac_addr_query(adapter, mac, - MAC_ADDRESS_TYPE_NETWORK, false, - adapter->if_handle, 0); + /* For BEx, the VF's permanent mac queried from card is incorrect. + * Query the mac configued by the PF using if_handle + */ + if (!be_physfn(adapter) && !lancer_chip(adapter)) { + status = be_cmd_mac_addr_query(adapter, mac, + MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle); if (!status) { memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); @@ -2707,21 +2624,18 @@ static int be_setup(struct be_adapter *adapter) be_set_rx_mode(adapter->netdev); status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc); - /* For Lancer: It is legal for this cmd to fail on VF */ - if (status && (be_physfn(adapter) || !lancer_chip(adapter))) + if (status) goto err; - if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) { status = be_cmd_set_flow_control(adapter, adapter->tx_fc, adapter->rx_fc); - /* For Lancer: It is legal for this cmd to fail on VF */ - if (status && (be_physfn(adapter) || !lancer_chip(adapter))) + if (status) goto err; } pcie_set_readrq(adapter->pdev, 4096); - if (sriov_enabled(adapter)) { + if (be_physfn(adapter) && adapter->sriov_enabled) { status = be_vf_setup(adapter); if (status) goto err; @@ -2733,19 +2647,6 @@ static int be_setup(struct be_adapter *adapter) return status; } -#ifdef CONFIG_NET_POLL_CONTROLLER -static void be_netpoll(struct net_device *netdev) -{ - struct be_adapter *adapter = netdev_priv(netdev); - struct be_rx_obj *rxo; - int i; - - event_handle(adapter, &adapter->tx_eq, false); - for_all_rx_queues(adapter, rxo, i) - event_handle(adapter, &rxo->rx_eq, true); -} -#endif - #define FW_FILE_HDR_SIGN "ServerEngines Corp. " static bool be_flash_redboot(struct be_adapter *adapter, const u8 *p, u32 img_start, int image_size, @@ -3094,10 +2995,7 @@ static struct net_device_ops be_netdev_ops = { .ndo_set_vf_mac = be_set_vf_mac, .ndo_set_vf_vlan = be_set_vf_vlan, .ndo_set_vf_tx_rate = be_set_vf_tx_rate, - .ndo_get_vf_config = be_get_vf_config, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = be_netpoll, -#endif + .ndo_get_vf_config = be_get_vf_config }; static void be_netdev_init(struct net_device *netdev) @@ -3344,7 +3242,6 @@ static int be_dev_family_check(struct be_adapter *adapter) break; case BE_DEVICE_ID2: case OC_DEVICE_ID2: - case OC_DEVICE_ID5: adapter->generation = BE_GEN3; break; case OC_DEVICE_ID3: @@ -3370,7 +3267,7 @@ static int be_dev_family_check(struct be_adapter *adapter) static int lancer_wait_ready(struct be_adapter *adapter) { -#define SLIPORT_READY_TIMEOUT 30 +#define SLIPORT_READY_TIMEOUT 500 u32 sliport_status; int status = 0, i; @@ -3379,7 +3276,7 @@ static int lancer_wait_ready(struct be_adapter *adapter) if (sliport_status & SLIPORT_STATUS_RDY_MASK) break; - msleep(1000); + msleep(20); } if (i == SLIPORT_READY_TIMEOUT) @@ -3416,104 +3313,6 @@ static int lancer_test_and_set_rdy_state(struct be_adapter *adapter) return status; } -static void lancer_test_and_recover_fn_err(struct be_adapter *adapter) -{ - int status; - u32 sliport_status; - - if (adapter->eeh_err || adapter->ue_detected) - return; - - sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); - - if (sliport_status & SLIPORT_STATUS_ERR_MASK) { - dev_err(&adapter->pdev->dev, - "Adapter in error state." - "Trying to recover.\n"); - - status = lancer_test_and_set_rdy_state(adapter); - if (status) - goto err; - - netif_device_detach(adapter->netdev); - - if (netif_running(adapter->netdev)) - be_close(adapter->netdev); - - be_clear(adapter); - - adapter->fw_timeout = false; - - status = be_setup(adapter); - if (status) - goto err; - - if (netif_running(adapter->netdev)) { - status = be_open(adapter->netdev); - if (status) - goto err; - } - - netif_device_attach(adapter->netdev); - - dev_err(&adapter->pdev->dev, - "Adapter error recovery succeeded\n"); - } - return; -err: - dev_err(&adapter->pdev->dev, - "Adapter error recovery failed\n"); -} - -static void be_worker(struct work_struct *work) -{ - struct be_adapter *adapter = - container_of(work, struct be_adapter, work.work); - struct be_rx_obj *rxo; - int i; - - if (lancer_chip(adapter)) - lancer_test_and_recover_fn_err(adapter); - - be_detect_dump_ue(adapter); - - /* when interrupts are not yet enabled, just reap any pending - * mcc completions */ - if (!netif_running(adapter->netdev)) { - int mcc_compl, status = 0; - - mcc_compl = be_process_mcc(adapter, &status); - - if (mcc_compl) { - struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; - be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl); - } - - goto reschedule; - } - - if (!adapter->stats_cmd_sent) { - if (lancer_chip(adapter)) - lancer_cmd_get_pport_stats(adapter, - &adapter->stats_cmd); - else - be_cmd_get_stats(adapter, &adapter->stats_cmd); - } - - for_all_rx_queues(adapter, rxo, i) { - be_rx_eqd_update(adapter, rxo); - - if (rxo->rx_post_starved) { - rxo->rx_post_starved = false; - be_post_rx_frags(rxo, GFP_KERNEL); - } - } - -reschedule: - adapter->work_counter++; - schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); -} - static int __devinit be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) { @@ -3566,12 +3365,7 @@ static int __devinit be_probe(struct pci_dev *pdev, goto disable_sriov; if (lancer_chip(adapter)) { - status = lancer_wait_ready(adapter); - if (!status) { - iowrite32(SLI_PORT_CONTROL_IP_MASK, - adapter->db + SLIPORT_CONTROL_OFFSET); - status = lancer_test_and_set_rdy_state(adapter); - } + status = lancer_test_and_set_rdy_state(adapter); if (status) { dev_err(&pdev->dev, "Adapter in non recoverable error\n"); goto ctrl_clean; @@ -3765,8 +3559,6 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev) dev_info(&adapter->pdev->dev, "EEH reset\n"); adapter->eeh_err = false; - adapter->ue_detected = false; - adapter->fw_timeout = false; status = pci_enable_device(pdev); if (status) diff --git a/trunk/drivers/net/ethernet/ethoc.c b/trunk/drivers/net/ethernet/ethoc.c index 60f0e788cc25..251b635fe75a 100644 --- a/trunk/drivers/net/ethernet/ethoc.c +++ b/trunk/drivers/net/ethernet/ethoc.c @@ -1185,7 +1185,18 @@ static struct platform_driver ethoc_driver = { }, }; -module_platform_driver(ethoc_driver); +static int __init ethoc_init(void) +{ + return platform_driver_register(ðoc_driver); +} + +static void __exit ethoc_exit(void) +{ + platform_driver_unregister(ðoc_driver); +} + +module_init(ethoc_init); +module_exit(ethoc_exit); MODULE_AUTHOR("Thierry Reding "); MODULE_DESCRIPTION("OpenCores Ethernet MAC driver"); diff --git a/trunk/drivers/net/ethernet/fealnx.c b/trunk/drivers/net/ethernet/fealnx.c index c82d444b582d..61d2bddec1fa 100644 --- a/trunk/drivers/net/ethernet/fealnx.c +++ b/trunk/drivers/net/ethernet/fealnx.c @@ -1818,9 +1818,9 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i { struct netdev_private *np = netdev_priv(dev); - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + strcpy(info->bus_info, pci_name(np->pci_dev)); } static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) diff --git a/trunk/drivers/net/ethernet/freescale/Kconfig b/trunk/drivers/net/ethernet/freescale/Kconfig index 3574e1499dfc..5272f9d4dda9 100644 --- a/trunk/drivers/net/ethernet/freescale/Kconfig +++ b/trunk/drivers/net/ethernet/freescale/Kconfig @@ -21,10 +21,10 @@ config NET_VENDOR_FREESCALE if NET_VENDOR_FREESCALE config FEC - tristate "FEC ethernet controller (of ColdFire and some i.MX CPUs)" + bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)" depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \ - ARCH_MXC || SOC_IMX28) - default ARCH_MXC || SOC_IMX28 if ARM + ARCH_MXC || ARCH_MXS) + default ARCH_MXC || ARCH_MXS if ARM select PHYLIB ---help--- Say Y here if you want to use the built-in 10/100 Fast ethernet diff --git a/trunk/drivers/net/ethernet/freescale/fec.c b/trunk/drivers/net/ethernet/freescale/fec.c index 20c2e3f3e18a..c136230d50bb 100644 --- a/trunk/drivers/net/ethernet/freescale/fec.c +++ b/trunk/drivers/net/ethernet/freescale/fec.c @@ -99,7 +99,7 @@ static struct platform_device_id fec_devtype[] = { MODULE_DEVICE_TABLE(platform, fec_devtype); enum imx_fec_type { - IMX25_FEC = 1, /* runs on i.mx25/50/53 */ + IMX25_FEC = 1, /* runs on i.mx25/50/53 */ IMX27_FEC, /* runs on i.mx27/35/51 */ IMX28_FEC, IMX6Q_FEC, @@ -132,7 +132,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); #elif defined (CONFIG_M5272C3) #define FEC_FLASHMAC (0xffe04000 + 4) #elif defined(CONFIG_MOD5272) -#define FEC_FLASHMAC 0xffc0406b +#define FEC_FLASHMAC 0xffc0406b #else #define FEC_FLASHMAC 0 #endif @@ -255,13 +255,11 @@ struct fec_enet_private { #define FEC_MMFR_TA (2 << 16) #define FEC_MMFR_DATA(v) (v & 0xffff) -#define FEC_MII_TIMEOUT 30000 /* us */ +#define FEC_MII_TIMEOUT 1000 /* us */ /* Transmitter timeout */ #define TX_TIMEOUT (2 * HZ) -static int mii_cnt; - static void *swap_buffer(void *bufaddr, int len) { int i; @@ -518,7 +516,6 @@ fec_stop(struct net_device *ndev) struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = platform_get_device_id(fep->pdev); - u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8); /* We cannot expect a graceful transmit stop without link !!! */ if (fep->link) { @@ -535,10 +532,8 @@ fec_stop(struct net_device *ndev) writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); /* We have to keep ENET enabled to have MII interrupt stay working */ - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { + if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) writel(2, fep->hwp + FEC_ECNTRL); - writel(rmii_mode, fep->hwp + FEC_R_CNTRL); - } } @@ -824,7 +819,7 @@ static void __inline__ fec_get_mac(struct net_device *ndev) iap = (unsigned char *)FEC_FLASHMAC; #else if (pdata) - iap = (unsigned char *)&pdata->mac; + memcpy(iap, pdata->mac, ETH_ALEN); #endif } @@ -871,8 +866,6 @@ static void fec_enet_adjust_link(struct net_device *ndev) if (phy_dev->link) { if (fep->full_duplex != phy_dev->duplex) { fec_restart(ndev, phy_dev->duplex); - /* prevent unnecessary second fec_restart() below */ - fep->link = phy_dev->link; status_change = 1; } } @@ -980,9 +973,8 @@ static int fec_enet_mii_probe(struct net_device *ndev) } if (phy_id >= PHY_MAX_ADDR) { - printk(KERN_INFO - "%s: no PHY, assuming direct connection to switch\n", - ndev->name); + printk(KERN_INFO "%s: no PHY, assuming direct connection " + "to switch\n", ndev->name); strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); phy_id = 0; } @@ -1007,9 +999,8 @@ static int fec_enet_mii_probe(struct net_device *ndev) fep->link = 0; fep->full_duplex = 0; - printk(KERN_INFO - "%s: Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", - ndev->name, + printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] " + "(mii_bus:phy_addr=%s, irq=%d)\n", ndev->name, fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev), fep->phy_dev->irq); @@ -1043,12 +1034,8 @@ static int fec_enet_mii_init(struct platform_device *pdev) */ if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) { /* fec1 uses fec0 mii_bus */ - if (mii_cnt && fec0_mii_bus) { - fep->mii_bus = fec0_mii_bus; - mii_cnt++; - return 0; - } - return -ENOENT; + fep->mii_bus = fec0_mii_bus; + return 0; } fep->mii_timeout = 0; @@ -1093,8 +1080,6 @@ static int fec_enet_mii_init(struct platform_device *pdev) if (mdiobus_register(fep->mii_bus)) goto err_out_free_mdio_irq; - mii_cnt++; - /* save fec0 mii_bus */ if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) fec0_mii_bus = fep->mii_bus; @@ -1111,11 +1096,11 @@ static int fec_enet_mii_init(struct platform_device *pdev) static void fec_enet_mii_remove(struct fec_enet_private *fep) { - if (--mii_cnt == 0) { - mdiobus_unregister(fep->mii_bus); - kfree(fep->mii_bus->irq); - mdiobus_free(fep->mii_bus); - } + if (fep->phy_dev) + phy_disconnect(fep->phy_dev); + mdiobus_unregister(fep->mii_bus); + kfree(fep->mii_bus->irq); + mdiobus_free(fep->mii_bus); } static int fec_enet_get_settings(struct net_device *ndev, @@ -1152,7 +1137,7 @@ static void fec_enet_get_drvinfo(struct net_device *ndev, strcpy(info->bus_info, dev_name(&ndev->dev)); } -static const struct ethtool_ops fec_enet_ethtool_ops = { +static struct ethtool_ops fec_enet_ethtool_ops = { .get_settings = fec_enet_get_settings, .set_settings = fec_enet_set_settings, .get_drvinfo = fec_enet_get_drvinfo, @@ -1589,12 +1574,8 @@ fec_probe(struct platform_device *pdev) for (i = 0; i < FEC_IRQ_NUM; i++) { irq = platform_get_irq(pdev, i); - if (irq < 0) { - if (i) - break; - ret = irq; - goto failed_irq; - } + if (i && irq < 0) + break; ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev); if (ret) { while (--i >= 0) { @@ -1605,7 +1586,7 @@ fec_probe(struct platform_device *pdev) } } - fep->clk = clk_get(&pdev->dev, NULL); + fep->clk = clk_get(&pdev->dev, "fec_clk"); if (IS_ERR(fep->clk)) { ret = PTR_ERR(fep->clk); goto failed_clk; @@ -1657,18 +1638,13 @@ fec_drv_remove(struct platform_device *pdev) struct net_device *ndev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(ndev); struct resource *r; - int i; - unregister_netdev(ndev); + fec_stop(ndev); fec_enet_mii_remove(fep); - for (i = 0; i < FEC_IRQ_NUM; i++) { - int irq = platform_get_irq(pdev, i); - if (irq > 0) - free_irq(irq, ndev); - } clk_disable(fep->clk); clk_put(fep->clk); iounmap(fep->hwp); + unregister_netdev(ndev); free_netdev(ndev); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/trunk/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/trunk/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 910a8e18a9ae..5bf5471f06ff 100644 --- a/trunk/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/trunk/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -1171,6 +1171,16 @@ static struct platform_driver fs_enet_driver = { .remove = fs_enet_remove, }; +static int __init fs_init(void) +{ + return platform_driver_register(&fs_enet_driver); +} + +static void __exit fs_cleanup(void) +{ + platform_driver_unregister(&fs_enet_driver); +} + #ifdef CONFIG_NET_POLL_CONTROLLER static void fs_enet_netpoll(struct net_device *dev) { @@ -1180,4 +1190,7 @@ static void fs_enet_netpoll(struct net_device *dev) } #endif -module_platform_driver(fs_enet_driver); +/**************************************************************************************/ + +module_init(fs_init); +module_exit(fs_cleanup); diff --git a/trunk/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/trunk/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c index 0f2d1a710909..b09270b5d0a5 100644 --- a/trunk/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c +++ b/trunk/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c @@ -232,4 +232,15 @@ static struct platform_driver fs_enet_bb_mdio_driver = { .remove = fs_enet_mdio_remove, }; -module_platform_driver(fs_enet_bb_mdio_driver); +static int fs_enet_mdio_bb_init(void) +{ + return platform_driver_register(&fs_enet_bb_mdio_driver); +} + +static void fs_enet_mdio_bb_exit(void) +{ + platform_driver_unregister(&fs_enet_bb_mdio_driver); +} + +module_init(fs_enet_mdio_bb_init); +module_exit(fs_enet_mdio_bb_exit); diff --git a/trunk/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/trunk/drivers/net/ethernet/freescale/fs_enet/mii-fec.c index 55bb867258e6..e0e9d6c35d83 100644 --- a/trunk/drivers/net/ethernet/freescale/fs_enet/mii-fec.c +++ b/trunk/drivers/net/ethernet/freescale/fs_enet/mii-fec.c @@ -237,4 +237,15 @@ static struct platform_driver fs_enet_fec_mdio_driver = { .remove = fs_enet_mdio_remove, }; -module_platform_driver(fs_enet_fec_mdio_driver); +static int fs_enet_mdio_fec_init(void) +{ + return platform_driver_register(&fs_enet_fec_mdio_driver); +} + +static void fs_enet_mdio_fec_exit(void) +{ + platform_driver_unregister(&fs_enet_fec_mdio_driver); +} + +module_init(fs_enet_mdio_fec_init); +module_exit(fs_enet_mdio_fec_exit); diff --git a/trunk/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/trunk/drivers/net/ethernet/freescale/fsl_pq_mdio.c index 9eb815941df5..4d9f84b8ab97 100644 --- a/trunk/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/trunk/drivers/net/ethernet/freescale/fsl_pq_mdio.c @@ -360,11 +360,12 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev) if (tbiaddr == -1) { err = -EBUSY; + goto err_free_irqs; - } else { - out_be32(tbipa, tbiaddr); } + out_be32(tbipa, tbiaddr); + err = of_mdiobus_register(new_bus, np); if (err) { printk (KERN_ERR "%s: Cannot register as MDIO bus\n", @@ -442,6 +443,15 @@ static struct platform_driver fsl_pq_mdio_driver = { .remove = fsl_pq_mdio_remove, }; -module_platform_driver(fsl_pq_mdio_driver); +int __init fsl_pq_mdio_init(void) +{ + return platform_driver_register(&fsl_pq_mdio_driver); +} +module_init(fsl_pq_mdio_init); +void fsl_pq_mdio_exit(void) +{ + platform_driver_unregister(&fsl_pq_mdio_driver); +} +module_exit(fsl_pq_mdio_exit); MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/net/ethernet/freescale/gianfar.c b/trunk/drivers/net/ethernet/freescale/gianfar.c index e01cdaa722a9..83199fd0d62b 100644 --- a/trunk/drivers/net/ethernet/freescale/gianfar.c +++ b/trunk/drivers/net/ethernet/freescale/gianfar.c @@ -734,7 +734,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) mac_addr = of_get_mac_address(np); if (mac_addr) - memcpy(dev->dev_addr, mac_addr, ETH_ALEN); + memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN); if (model && !strcasecmp(model, "TSEC")) priv->device_flags = @@ -2306,7 +2306,7 @@ void gfar_check_rx_parser_mode(struct gfar_private *priv) } /* Enables and disables VLAN insertion/extraction */ -void gfar_vlan_mode(struct net_device *dev, netdev_features_t features) +void gfar_vlan_mode(struct net_device *dev, u32 features) { struct gfar_private *priv = netdev_priv(dev); struct gfar __iomem *regs = NULL; @@ -3114,7 +3114,7 @@ static void gfar_set_multi(struct net_device *dev) static void gfar_clear_exact_match(struct net_device *dev) { int idx; - static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; + static const u8 zero_arr[MAC_ADDR_LEN] = {0, 0, 0, 0, 0, 0}; for(idx = 1;idx < GFAR_EM_NUM + 1;idx++) gfar_set_mac_for_addr(dev, idx, zero_arr); @@ -3137,7 +3137,7 @@ static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) { u32 tempval; struct gfar_private *priv = netdev_priv(dev); - u32 result = ether_crc(ETH_ALEN, addr); + u32 result = ether_crc(MAC_ADDR_LEN, addr); int width = priv->hash_width; u8 whichbit = (result >> (32 - width)) & 0x1f; u8 whichreg = result >> (32 - width + 5); @@ -3158,7 +3158,7 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num, struct gfar_private *priv = netdev_priv(dev); struct gfar __iomem *regs = priv->gfargrp[0].regs; int idx; - char tmpbuf[ETH_ALEN]; + char tmpbuf[MAC_ADDR_LEN]; u32 tempval; u32 __iomem *macptr = ®s->macstnaddr1; @@ -3166,8 +3166,8 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num, /* Now copy it into the mac registers backwards, cuz */ /* little endian is silly */ - for (idx = 0; idx < ETH_ALEN; idx++) - tmpbuf[ETH_ALEN - 1 - idx] = addr[idx]; + for (idx = 0; idx < MAC_ADDR_LEN; idx++) + tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx]; gfar_write(macptr, *((u32 *) (tmpbuf))); @@ -3281,4 +3281,16 @@ static struct platform_driver gfar_driver = { .remove = gfar_remove, }; -module_platform_driver(gfar_driver); +static int __init gfar_init(void) +{ + return platform_driver_register(&gfar_driver); +} + +static void __exit gfar_exit(void) +{ + platform_driver_unregister(&gfar_driver); +} + +module_init(gfar_init); +module_exit(gfar_exit); + diff --git a/trunk/drivers/net/ethernet/freescale/gianfar.h b/trunk/drivers/net/ethernet/freescale/gianfar.h index fe7ac3a83194..9aa43773e8e3 100644 --- a/trunk/drivers/net/ethernet/freescale/gianfar.h +++ b/trunk/drivers/net/ethernet/freescale/gianfar.h @@ -74,6 +74,9 @@ struct ethtool_rx_list { * will be the next highest multiple of 512 bytes. */ #define INCREMENTAL_BUFFER_SIZE 512 + +#define MAC_ADDR_LEN 6 + #define PHY_INIT_TIMEOUT 100000 #define GFAR_PHY_CHANGE_TIME 2 @@ -1176,9 +1179,9 @@ extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, extern void gfar_configure_coalescing(struct gfar_private *priv, unsigned long tx_mask, unsigned long rx_mask); void gfar_init_sysfs(struct net_device *dev); -int gfar_set_features(struct net_device *dev, netdev_features_t features); +int gfar_set_features(struct net_device *dev, u32 features); extern void gfar_check_rx_parser_mode(struct gfar_private *priv); -extern void gfar_vlan_mode(struct net_device *dev, netdev_features_t features); +extern void gfar_vlan_mode(struct net_device *dev, u32 features); extern const struct ethtool_ops gfar_ethtool_ops; diff --git a/trunk/drivers/net/ethernet/freescale/gianfar_ethtool.c b/trunk/drivers/net/ethernet/freescale/gianfar_ethtool.c index 5a3b2e5b2880..212736bab6bb 100644 --- a/trunk/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/trunk/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -519,12 +519,12 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva return err; } -int gfar_set_features(struct net_device *dev, netdev_features_t features) +int gfar_set_features(struct net_device *dev, u32 features) { struct gfar_private *priv = netdev_priv(dev); unsigned long flags; int err = 0, i = 0; - netdev_features_t changed = dev->features ^ features; + u32 changed = dev->features ^ features; if (changed & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX)) gfar_vlan_mode(dev, features); @@ -1410,9 +1410,10 @@ static int gfar_optimize_filer_masks(struct filer_table *tab) /* We need a copy of the filer table because * we want to change its order */ - temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL); + temp_table = kmalloc(sizeof(*temp_table), GFP_KERNEL); if (temp_table == NULL) return -ENOMEM; + memcpy(temp_table, tab, sizeof(*temp_table)); mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1, sizeof(struct gfar_mask_entry), GFP_KERNEL); @@ -1692,9 +1693,8 @@ static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd) ret = gfar_set_hash_opts(priv, cmd); break; case ETHTOOL_SRXCLSRLINS: - if ((cmd->fs.ring_cookie != RX_CLS_FLOW_DISC && - cmd->fs.ring_cookie >= priv->num_rx_queues) || - cmd->fs.location >= MAX_FILER_IDX) { + if (cmd->fs.ring_cookie != RX_CLS_FLOW_DISC && + cmd->fs.ring_cookie >= priv->num_rx_queues) { ret = -EINVAL; break; } diff --git a/trunk/drivers/net/ethernet/freescale/gianfar_ptp.c b/trunk/drivers/net/ethernet/freescale/gianfar_ptp.c index 83e0ed757e33..f67b8aebc89c 100644 --- a/trunk/drivers/net/ethernet/freescale/gianfar_ptp.c +++ b/trunk/drivers/net/ethernet/freescale/gianfar_ptp.c @@ -562,7 +562,21 @@ static struct platform_driver gianfar_ptp_driver = { .remove = gianfar_ptp_remove, }; -module_platform_driver(gianfar_ptp_driver); +/* module operations */ + +static int __init ptp_gianfar_init(void) +{ + return platform_driver_register(&gianfar_ptp_driver); +} + +module_init(ptp_gianfar_init); + +static void __exit ptp_gianfar_exit(void) +{ + platform_driver_unregister(&gianfar_ptp_driver); +} + +module_exit(ptp_gianfar_exit); MODULE_AUTHOR("Richard Cochran "); MODULE_DESCRIPTION("PTP clock using the eTSEC"); diff --git a/trunk/drivers/net/ethernet/freescale/ucc_geth.c b/trunk/drivers/net/ethernet/freescale/ucc_geth.c index ba2dc083bfc0..b5dc0273a1d1 100644 --- a/trunk/drivers/net/ethernet/freescale/ucc_geth.c +++ b/trunk/drivers/net/ethernet/freescale/ucc_geth.c @@ -443,7 +443,7 @@ static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth, static inline int compare_addr(u8 **addr1, u8 **addr2) { - return memcmp(addr1, addr2, ETH_ALEN); + return memcmp(addr1, addr2, ENET_NUM_OCTETS_PER_ADDRESS); } #ifdef DEBUG diff --git a/trunk/drivers/net/ethernet/freescale/ucc_geth.h b/trunk/drivers/net/ethernet/freescale/ucc_geth.h index 2e395a2566b8..d12fcad145e9 100644 --- a/trunk/drivers/net/ethernet/freescale/ucc_geth.h +++ b/trunk/drivers/net/ethernet/freescale/ucc_geth.h @@ -20,7 +20,6 @@ #include #include -#include #include #include @@ -882,6 +881,7 @@ struct ucc_geth_hardware_statistics { #define TX_RING_MOD_MASK(size) (size-1) #define RX_RING_MOD_MASK(size) (size-1) +#define ENET_NUM_OCTETS_PER_ADDRESS 6 #define ENET_GROUP_ADDR 0x01 /* Group address mask for ethernet addresses */ @@ -1051,7 +1051,7 @@ enum ucc_geth_num_of_station_addresses { /* UCC GETH 82xx Ethernet Address Container */ struct enet_addr_container { - u8 address[ETH_ALEN]; /* ethernet address */ + u8 address[ENET_NUM_OCTETS_PER_ADDRESS]; /* ethernet address */ enum ucc_geth_enet_address_recognition_location location; /* location in 82xx address recognition @@ -1194,7 +1194,7 @@ struct ucc_geth_private { u16 cpucount[NUM_TX_QUEUES]; u16 __iomem *p_cpucount[NUM_TX_QUEUES]; int indAddrRegUsed[NUM_OF_PADDRS]; - u8 paddr[NUM_OF_PADDRS][ETH_ALEN]; /* ethernet address */ + u8 paddr[NUM_OF_PADDRS][ENET_NUM_OCTETS_PER_ADDRESS]; /* ethernet address */ u8 numGroupAddrInHash; u8 numIndAddrInHash; u8 numIndAddrInReg; diff --git a/trunk/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/trunk/drivers/net/ethernet/fujitsu/fmvj18x_cs.c index ee84b472cee6..15416752c13e 100644 --- a/trunk/drivers/net/ethernet/fujitsu/fmvj18x_cs.c +++ b/trunk/drivers/net/ethernet/fujitsu/fmvj18x_cs.c @@ -1058,10 +1058,9 @@ static void fjn_rx(struct net_device *dev) static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); - snprintf(info->bus_info, sizeof(info->bus_info), - "PCMCIA 0x%lx", dev->base_addr); + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr); } static const struct ethtool_ops netdev_ethtool_ops = { diff --git a/trunk/drivers/net/ethernet/i825xx/eepro.c b/trunk/drivers/net/ethernet/i825xx/eepro.c index 114cda7721fe..067c46069a11 100644 --- a/trunk/drivers/net/ethernet/i825xx/eepro.c +++ b/trunk/drivers/net/ethernet/i825xx/eepro.c @@ -1726,10 +1726,9 @@ static int eepro_ethtool_get_settings(struct net_device *dev, static void eepro_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { - strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); - snprintf(drvinfo->bus_info, sizeof(drvinfo->bus_info), - "ISA 0x%lx", dev->base_addr); + strcpy(drvinfo->driver, DRV_NAME); + strcpy(drvinfo->version, DRV_VERSION); + sprintf(drvinfo->bus_info, "ISA 0x%lx", dev->base_addr); } static const struct ethtool_ops eepro_ethtool_ops = { diff --git a/trunk/drivers/net/ethernet/ibm/ehea/ehea_main.c b/trunk/drivers/net/ethernet/ibm/ehea/ehea_main.c index 3554414eb5e2..bfeccbfde236 100644 --- a/trunk/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/trunk/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -2114,19 +2114,17 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } -static int ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) +static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) { struct ehea_port *port = netdev_priv(dev); struct ehea_adapter *adapter = port->adapter; struct hcp_ehea_port_cb1 *cb1; int index; u64 hret; - int err = 0; cb1 = (void *)get_zeroed_page(GFP_KERNEL); if (!cb1) { pr_err("no mem for cb1\n"); - err = -ENOMEM; goto out; } @@ -2134,7 +2132,6 @@ static int ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) H_PORT_CB1, H_PORT_CB1_ALL, cb1); if (hret != H_SUCCESS) { pr_err("query_ehea_port failed\n"); - err = -EINVAL; goto out; } @@ -2143,28 +2140,24 @@ static int ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, H_PORT_CB1, H_PORT_CB1_ALL, cb1); - if (hret != H_SUCCESS) { + if (hret != H_SUCCESS) pr_err("modify_ehea_port failed\n"); - err = -EINVAL; - } out: free_page((unsigned long)cb1); - return err; + return; } -static int ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) +static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) { struct ehea_port *port = netdev_priv(dev); struct ehea_adapter *adapter = port->adapter; struct hcp_ehea_port_cb1 *cb1; int index; u64 hret; - int err = 0; cb1 = (void *)get_zeroed_page(GFP_KERNEL); if (!cb1) { pr_err("no mem for cb1\n"); - err = -ENOMEM; goto out; } @@ -2172,7 +2165,6 @@ static int ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) H_PORT_CB1, H_PORT_CB1_ALL, cb1); if (hret != H_SUCCESS) { pr_err("query_ehea_port failed\n"); - err = -EINVAL; goto out; } @@ -2181,13 +2173,10 @@ static int ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, H_PORT_CB1, H_PORT_CB1_ALL, cb1); - if (hret != H_SUCCESS) { + if (hret != H_SUCCESS) pr_err("modify_ehea_port failed\n"); - err = -EINVAL; - } out: free_page((unsigned long)cb1); - return err; } int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp) diff --git a/trunk/drivers/net/ethernet/ibm/emac/core.c b/trunk/drivers/net/ethernet/ibm/emac/core.c index 2abce965c7bd..ed79b2d3ad3e 100644 --- a/trunk/drivers/net/ethernet/ibm/emac/core.c +++ b/trunk/drivers/net/ethernet/ibm/emac/core.c @@ -2924,9 +2924,6 @@ static int __devexit emac_remove(struct platform_device *ofdev) if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) zmii_detach(dev->zmii_dev, dev->zmii_port); - busy_phy_map &= ~(1 << dev->phy.address); - DBG(dev, "busy_phy_map now %#x" NL, busy_phy_map); - mal_unregister_commac(dev->mal, &dev->commac); emac_put_deps(dev); diff --git a/trunk/drivers/net/ethernet/ibm/ibmveth.c b/trunk/drivers/net/ethernet/ibm/ibmveth.c index e877371680a9..b1cd41b9c61c 100644 --- a/trunk/drivers/net/ethernet/ibm/ibmveth.c +++ b/trunk/drivers/net/ethernet/ibm/ibmveth.c @@ -735,8 +735,7 @@ static void netdev_get_drvinfo(struct net_device *dev, sizeof(info->version) - 1); } -static netdev_features_t ibmveth_fix_features(struct net_device *dev, - netdev_features_t features) +static u32 ibmveth_fix_features(struct net_device *dev, u32 features) { /* * Since the ibmveth firmware interface does not have the @@ -839,8 +838,7 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data) return rc1 ? rc1 : rc2; } -static int ibmveth_set_features(struct net_device *dev, - netdev_features_t features) +static int ibmveth_set_features(struct net_device *dev, u32 features) { struct ibmveth_adapter *adapter = netdev_priv(dev); int rx_csum = !!(features & NETIF_F_RXCSUM); diff --git a/trunk/drivers/net/ethernet/icplus/ipg.c b/trunk/drivers/net/ethernet/icplus/ipg.c index 075451d0207d..8fd80a00b898 100644 --- a/trunk/drivers/net/ethernet/icplus/ipg.c +++ b/trunk/drivers/net/ethernet/icplus/ipg.c @@ -371,9 +371,16 @@ static void mdio_write(struct net_device *dev, int phy_id, int phy_reg, int val) } /* The last cycle is a tri-state, so read from the PHY. */ - ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity); - ipg_r8(PHY_CTRL); - ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity); + for (j = 7; j < 8; j++) { + for (i = 0; i < p[j].len; i++) { + ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity); + + p[j].field |= ((ipg_r8(PHY_CTRL) & + IPG_PC_MGMTDATA) >> 1) << (p[j].len - 1 - i); + + ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity); + } + } } static void ipg_set_led_mode(struct net_device *dev) diff --git a/trunk/drivers/net/ethernet/intel/e100.c b/trunk/drivers/net/ethernet/intel/e100.c index 9436397e5725..5a2fdf7a00c8 100644 --- a/trunk/drivers/net/ethernet/intel/e100.c +++ b/trunk/drivers/net/ethernet/intel/e100.c @@ -2376,10 +2376,10 @@ static void e100_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { struct nic *nic = netdev_priv(netdev); - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, pci_name(nic->pdev), - sizeof(info->bus_info)); + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + strcpy(info->fw_version, "N/A"); + strcpy(info->bus_info, pci_name(nic->pdev)); } #define E100_PHY_REGS 0x1C diff --git a/trunk/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/trunk/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index 3103f0b6bf5e..2b223ac99c42 100644 --- a/trunk/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/trunk/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -515,14 +515,14 @@ static void e1000_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct e1000_adapter *adapter = netdev_priv(netdev); + char firmware_version[32]; - strlcpy(drvinfo->driver, e1000_driver_name, - sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, e1000_driver_version, - sizeof(drvinfo->version)); + strncpy(drvinfo->driver, e1000_driver_name, 32); + strncpy(drvinfo->version, e1000_driver_version, 32); - strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), - sizeof(drvinfo->bus_info)); + sprintf(firmware_version, "N/A"); + strncpy(drvinfo->fw_version, firmware_version, 32); + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); drvinfo->regdump_len = e1000_get_regs_len(netdev); drvinfo->eedump_len = e1000_get_eeprom_len(netdev); } diff --git a/trunk/drivers/net/ethernet/intel/e1000/e1000_hw.h b/trunk/drivers/net/ethernet/intel/e1000/e1000_hw.h index f6c4d7e2560c..5c9a8403668b 100644 --- a/trunk/drivers/net/ethernet/intel/e1000/e1000_hw.h +++ b/trunk/drivers/net/ethernet/intel/e1000/e1000_hw.h @@ -448,6 +448,7 @@ void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value); #define E1000_DEV_ID_INTEL_CE4100_GBE 0x2E6E #define NODE_ADDRESS_SIZE 6 +#define ETH_LENGTH_OF_ADDRESS 6 /* MAC decode size is 128K - This is the size of BAR0 */ #define MAC_DECODE_SIZE (128 * 1024) @@ -812,7 +813,8 @@ struct e1000_ffvt_entry { #define E1000_FLA 0x0001C /* Flash Access - RW */ #define E1000_MDIC 0x00020 /* MDI Control - RW */ -#define INTEL_CE_GBE_MDIO_RCOMP_BASE (hw->ce4100_gbe_mdio_base_virt) +extern void __iomem *ce4100_gbe_mdio_base_virt; +#define INTEL_CE_GBE_MDIO_RCOMP_BASE (ce4100_gbe_mdio_base_virt) #define E1000_MDIO_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0) #define E1000_MDIO_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 4) #define E1000_MDIO_DRV (INTEL_CE_GBE_MDIO_RCOMP_BASE + 8) @@ -1342,7 +1344,6 @@ struct e1000_hw_stats { struct e1000_hw { u8 __iomem *hw_addr; u8 __iomem *flash_address; - void __iomem *ce4100_gbe_mdio_base_virt; e1000_mac_type mac_type; e1000_phy_type phy_type; u32 phy_init_script; diff --git a/trunk/drivers/net/ethernet/intel/e1000/e1000_main.c b/trunk/drivers/net/ethernet/intel/e1000/e1000_main.c index 669ca3800c01..cf480b554622 100644 --- a/trunk/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/trunk/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -33,6 +33,11 @@ #include #include +/* Intel Media SOC GbE MDIO physical base address */ +static unsigned long ce4100_gbe_mdio_base_phy; +/* Intel Media SOC GbE MDIO virtual base address */ +void __iomem *ce4100_gbe_mdio_base_virt; + char e1000_driver_name[] = "e1000"; static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; #define DRV_VERSION "7.3.21-k8-NAPI" @@ -162,10 +167,9 @@ static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb); static bool e1000_vlan_used(struct e1000_adapter *adapter); -static void e1000_vlan_mode(struct net_device *netdev, - netdev_features_t features); -static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid); -static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); +static void e1000_vlan_mode(struct net_device *netdev, u32 features); +static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid); +static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); static void e1000_restore_vlan(struct e1000_adapter *adapter); #ifdef CONFIG_PM @@ -802,8 +806,7 @@ static int e1000_is_need_ioport(struct pci_dev *pdev) } } -static netdev_features_t e1000_fix_features(struct net_device *netdev, - netdev_features_t features) +static u32 e1000_fix_features(struct net_device *netdev, u32 features) { /* * Since there is no support for separate rx/tx vlan accel @@ -817,11 +820,10 @@ static netdev_features_t e1000_fix_features(struct net_device *netdev, return features; } -static int e1000_set_features(struct net_device *netdev, - netdev_features_t features) +static int e1000_set_features(struct net_device *netdev, u32 features) { struct e1000_adapter *adapter = netdev_priv(netdev); - netdev_features_t changed = features ^ netdev->features; + u32 changed = features ^ netdev->features; if (changed & NETIF_F_HW_VLAN_RX) e1000_vlan_mode(netdev, features); @@ -1049,11 +1051,11 @@ static int __devinit e1000_probe(struct pci_dev *pdev, err = -EIO; if (hw->mac_type == e1000_ce4100) { - hw->ce4100_gbe_mdio_base_virt = - ioremap(pci_resource_start(pdev, BAR_1), + ce4100_gbe_mdio_base_phy = pci_resource_start(pdev, BAR_1); + ce4100_gbe_mdio_base_virt = ioremap(ce4100_gbe_mdio_base_phy, pci_resource_len(pdev, BAR_1)); - if (!hw->ce4100_gbe_mdio_base_virt) + if (!ce4100_gbe_mdio_base_virt) goto err_mdio_ioremap; } @@ -1180,7 +1182,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, if (global_quad_port_a != 0) adapter->eeprom_wol = 0; else - adapter->quad_port_a = true; + adapter->quad_port_a = 1; /* Reset for multiple quad port adapters */ if (++global_quad_port_a == 4) global_quad_port_a = 0; @@ -1244,7 +1246,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, err_dma: err_sw_init: err_mdio_ioremap: - iounmap(hw->ce4100_gbe_mdio_base_virt); + iounmap(ce4100_gbe_mdio_base_virt); iounmap(hw->hw_addr); err_ioremap: free_netdev(netdev); @@ -1281,8 +1283,6 @@ static void __devexit e1000_remove(struct pci_dev *pdev) kfree(adapter->tx_ring); kfree(adapter->rx_ring); - if (hw->mac_type == e1000_ce4100) - iounmap(hw->ce4100_gbe_mdio_base_virt); iounmap(hw->hw_addr); if (hw->flash_address) iounmap(hw->flash_address); @@ -1676,7 +1676,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) * need this to apply a workaround later in the send path. */ if (hw->mac_type == e1000_82544 && hw->bus_type == e1000_bus_type_pcix) - adapter->pcix_82544 = true; + adapter->pcix_82544 = 1; ew32(TCTL, tctl); @@ -1999,7 +1999,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter, tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; - tx_ring->last_tx_tso = false; + tx_ring->last_tx_tso = 0; writel(0, hw->hw_addr + tx_ring->tdh); writel(0, hw->hw_addr + tx_ring->tdt); @@ -2848,7 +2848,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter, * DMA'd to the controller */ if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) { - tx_ring->last_tx_tso = false; + tx_ring->last_tx_tso = 0; size -= 4; } @@ -3216,7 +3216,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, if (likely(tso)) { if (likely(hw->mac_type != e1000_82544)) - tx_ring->last_tx_tso = true; + tx_ring->last_tx_tso = 1; tx_flags |= E1000_TX_FLAGS_TSO; } else if (likely(e1000_tx_csum(adapter, tx_ring, skb))) tx_flags |= E1000_TX_FLAGS_CSUM; @@ -4577,8 +4577,7 @@ static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, e1000_irq_enable(adapter); } -static void e1000_vlan_mode(struct net_device *netdev, - netdev_features_t features) +static void e1000_vlan_mode(struct net_device *netdev, u32 features) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -4601,7 +4600,7 @@ static void e1000_vlan_mode(struct net_device *netdev, e1000_irq_enable(adapter); } -static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -4610,7 +4609,7 @@ static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) if ((hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && (vid == adapter->mng_vlan_id)) - return 0; + return; if (!e1000_vlan_used(adapter)) e1000_vlan_filter_on_off(adapter, true); @@ -4622,11 +4621,9 @@ static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) e1000_write_vfta(hw, index, vfta); set_bit(vid, adapter->active_vlans); - - return 0; } -static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -4647,8 +4644,6 @@ static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) if (!e1000_vlan_used(adapter)) e1000_vlan_filter_on_off(adapter, false); - - return 0; } static void e1000_restore_vlan(struct e1000_adapter *adapter) @@ -4721,6 +4716,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) netif_device_detach(netdev); + mutex_lock(&adapter->mutex); + if (netif_running(netdev)) { WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); e1000_down(adapter); @@ -4728,8 +4725,10 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) #ifdef CONFIG_PM retval = pci_save_state(pdev); - if (retval) + if (retval) { + mutex_unlock(&adapter->mutex); return retval; + } #endif status = er32(STATUS); @@ -4784,6 +4783,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) if (netif_running(netdev)) e1000_free_irq(adapter); + mutex_unlock(&adapter->mutex); + pci_disable_device(pdev); return 0; diff --git a/trunk/drivers/net/ethernet/intel/e1000e/e1000.h b/trunk/drivers/net/ethernet/intel/e1000e/e1000.h index f478a22ed577..9fe18d1d53d8 100644 --- a/trunk/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/trunk/drivers/net/ethernet/intel/e1000e/e1000.h @@ -309,7 +309,6 @@ struct e1000_adapter { u32 txd_cmd; bool detect_tx_hung; - bool tx_hang_recheck; u8 tx_timeout_factor; u32 tx_int_delay; diff --git a/trunk/drivers/net/ethernet/intel/e1000e/ethtool.c b/trunk/drivers/net/ethernet/intel/e1000e/ethtool.c index fb2c28e799a2..69c9d2199140 100644 --- a/trunk/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/trunk/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -579,24 +579,26 @@ static void e1000_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct e1000_adapter *adapter = netdev_priv(netdev); + char firmware_version[32]; - strlcpy(drvinfo->driver, e1000e_driver_name, - sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, e1000e_driver_version, - sizeof(drvinfo->version)); + strncpy(drvinfo->driver, e1000e_driver_name, + sizeof(drvinfo->driver) - 1); + strncpy(drvinfo->version, e1000e_driver_version, + sizeof(drvinfo->version) - 1); /* * EEPROM image version # is reported as firmware version # for * PCI-E controllers */ - snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), - "%d.%d-%d", + snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d", (adapter->eeprom_vers & 0xF000) >> 12, (adapter->eeprom_vers & 0x0FF0) >> 4, (adapter->eeprom_vers & 0x000F)); - strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), - sizeof(drvinfo->bus_info)); + strncpy(drvinfo->fw_version, firmware_version, + sizeof(drvinfo->fw_version) - 1); + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info) - 1); drvinfo->regdump_len = e1000_get_regs_len(netdev); drvinfo->eedump_len = e1000_get_eeprom_len(netdev); } diff --git a/trunk/drivers/net/ethernet/intel/e1000e/netdev.c b/trunk/drivers/net/ethernet/intel/e1000e/netdev.c index 3911401ed65d..a855db1ad249 100644 --- a/trunk/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/trunk/drivers/net/ethernet/intel/e1000e/netdev.c @@ -163,13 +163,16 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo) regs[n] = __er32(hw, E1000_TARC(n)); break; default: - pr_info("%-15s %08x\n", - reginfo->name, __er32(hw, reginfo->ofs)); + printk(KERN_INFO "%-15s %08x\n", + reginfo->name, __er32(hw, reginfo->ofs)); return; } snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]"); - pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]); + printk(KERN_INFO "%-15s ", rname); + for (n = 0; n < 2; n++) + printk(KERN_CONT "%08x ", regs[n]); + printk(KERN_CONT "\n"); } /* @@ -205,15 +208,16 @@ static void e1000e_dump(struct e1000_adapter *adapter) /* Print netdevice Info */ if (netdev) { dev_info(&adapter->pdev->dev, "Net device Info\n"); - pr_info("Device Name state trans_start last_rx\n"); - pr_info("%-15s %016lX %016lX %016lX\n", - netdev->name, netdev->state, netdev->trans_start, - netdev->last_rx); + printk(KERN_INFO "Device Name state " + "trans_start last_rx\n"); + printk(KERN_INFO "%-15s %016lX %016lX %016lX\n", + netdev->name, netdev->state, netdev->trans_start, + netdev->last_rx); } /* Print Registers */ dev_info(&adapter->pdev->dev, "Register Dump\n"); - pr_info(" Register Name Value\n"); + printk(KERN_INFO " Register Name Value\n"); for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl; reginfo->name; reginfo++) { e1000_regdump(hw, reginfo); @@ -224,14 +228,15 @@ static void e1000e_dump(struct e1000_adapter *adapter) goto exit; dev_info(&adapter->pdev->dev, "Tx Ring Summary\n"); - pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); + printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]" + " leng ntw timestamp\n"); buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; - pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n", - 0, tx_ring->next_to_use, tx_ring->next_to_clean, - (unsigned long long)buffer_info->dma, - buffer_info->length, - buffer_info->next_to_watch, - (unsigned long long)buffer_info->time_stamp); + printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n", + 0, tx_ring->next_to_use, tx_ring->next_to_clean, + (unsigned long long)buffer_info->dma, + buffer_info->length, + buffer_info->next_to_watch, + (unsigned long long)buffer_info->time_stamp); /* Print Tx Ring */ if (!netif_msg_tx_done(adapter)) @@ -266,32 +271,37 @@ static void e1000e_dump(struct e1000_adapter *adapter) * +----------------------------------------------------------------+ * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 */ - pr_info("Tl[desc] [address 63:0 ] [SpeCssSCmCsLen] [bi->dma ] leng ntw timestamp bi->skb <-- Legacy format\n"); - pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Context format\n"); - pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Data format\n"); + printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]" + " [bi->dma ] leng ntw timestamp bi->skb " + "<-- Legacy format\n"); + printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]" + " [bi->dma ] leng ntw timestamp bi->skb " + "<-- Ext Context format\n"); + printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]" + " [bi->dma ] leng ntw timestamp bi->skb " + "<-- Ext Data format\n"); for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { - const char *next_desc; tx_desc = E1000_TX_DESC(*tx_ring, i); buffer_info = &tx_ring->buffer_info[i]; u0 = (struct my_u0 *)tx_desc; + printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX " + "%04X %3X %016llX %p", + (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' : + ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), i, + (unsigned long long)le64_to_cpu(u0->a), + (unsigned long long)le64_to_cpu(u0->b), + (unsigned long long)buffer_info->dma, + buffer_info->length, buffer_info->next_to_watch, + (unsigned long long)buffer_info->time_stamp, + buffer_info->skb); if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) - next_desc = " NTC/U"; + printk(KERN_CONT " NTC/U\n"); else if (i == tx_ring->next_to_use) - next_desc = " NTU"; + printk(KERN_CONT " NTU\n"); else if (i == tx_ring->next_to_clean) - next_desc = " NTC"; + printk(KERN_CONT " NTC\n"); else - next_desc = ""; - pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p%s\n", - (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' : - ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), - i, - (unsigned long long)le64_to_cpu(u0->a), - (unsigned long long)le64_to_cpu(u0->b), - (unsigned long long)buffer_info->dma, - buffer_info->length, buffer_info->next_to_watch, - (unsigned long long)buffer_info->time_stamp, - buffer_info->skb, next_desc); + printk(KERN_CONT "\n"); if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, @@ -302,9 +312,9 @@ static void e1000e_dump(struct e1000_adapter *adapter) /* Print Rx Ring Summary */ rx_ring_summary: dev_info(&adapter->pdev->dev, "Rx Ring Summary\n"); - pr_info("Queue [NTU] [NTC]\n"); - pr_info(" %5d %5X %5X\n", - 0, rx_ring->next_to_use, rx_ring->next_to_clean); + printk(KERN_INFO "Queue [NTU] [NTC]\n"); + printk(KERN_INFO " %5d %5X %5X\n", 0, + rx_ring->next_to_use, rx_ring->next_to_clean); /* Print Rx Ring */ if (!netif_msg_rx_status(adapter)) @@ -327,7 +337,10 @@ static void e1000e_dump(struct e1000_adapter *adapter) * 24 | Buffer Address 3 [63:0] | * +-----------------------------------------------------+ */ - pr_info("R [desc] [buffer 0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] [bi->skb] <-- Ext Pkt Split format\n"); + printk(KERN_INFO "R [desc] [buffer 0 63:0 ] " + "[buffer 1 63:0 ] " + "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] " + "[bi->skb] <-- Ext Pkt Split format\n"); /* [Extended] Receive Descriptor (Write-Back) Format * * 63 48 47 32 31 13 12 8 7 4 3 0 @@ -339,40 +352,35 @@ static void e1000e_dump(struct e1000_adapter *adapter) * +------------------------------------------------------+ * 63 48 47 32 31 20 19 0 */ - pr_info("RWB[desc] [ck ipid mrqhsh] [vl l0 ee es] [ l3 l2 l1 hs] [reserved ] ---------------- [bi->skb] <-- Ext Rx Write-Back format\n"); + printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] " + "[vl l0 ee es] " + "[ l3 l2 l1 hs] [reserved ] ---------------- " + "[bi->skb] <-- Ext Rx Write-Back format\n"); for (i = 0; i < rx_ring->count; i++) { - const char *next_desc; buffer_info = &rx_ring->buffer_info[i]; rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i); u1 = (struct my_u1 *)rx_desc_ps; staterr = le32_to_cpu(rx_desc_ps->wb.middle.status_error); - - if (i == rx_ring->next_to_use) - next_desc = " NTU"; - else if (i == rx_ring->next_to_clean) - next_desc = " NTC"; - else - next_desc = ""; - if (staterr & E1000_RXD_STAT_DD) { /* Descriptor Done */ - pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX ---------------- %p%s\n", - "RWB", i, - (unsigned long long)le64_to_cpu(u1->a), - (unsigned long long)le64_to_cpu(u1->b), - (unsigned long long)le64_to_cpu(u1->c), - (unsigned long long)le64_to_cpu(u1->d), - buffer_info->skb, next_desc); + printk(KERN_INFO "RWB[0x%03X] %016llX " + "%016llX %016llX %016llX " + "---------------- %p", i, + (unsigned long long)le64_to_cpu(u1->a), + (unsigned long long)le64_to_cpu(u1->b), + (unsigned long long)le64_to_cpu(u1->c), + (unsigned long long)le64_to_cpu(u1->d), + buffer_info->skb); } else { - pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX %016llX %p%s\n", - "R ", i, - (unsigned long long)le64_to_cpu(u1->a), - (unsigned long long)le64_to_cpu(u1->b), - (unsigned long long)le64_to_cpu(u1->c), - (unsigned long long)le64_to_cpu(u1->d), - (unsigned long long)buffer_info->dma, - buffer_info->skb, next_desc); + printk(KERN_INFO "R [0x%03X] %016llX " + "%016llX %016llX %016llX %016llX %p", i, + (unsigned long long)le64_to_cpu(u1->a), + (unsigned long long)le64_to_cpu(u1->b), + (unsigned long long)le64_to_cpu(u1->c), + (unsigned long long)le64_to_cpu(u1->d), + (unsigned long long)buffer_info->dma, + buffer_info->skb); if (netif_msg_pktdata(adapter)) print_hex_dump(KERN_INFO, "", @@ -380,6 +388,13 @@ static void e1000e_dump(struct e1000_adapter *adapter) phys_to_virt(buffer_info->dma), adapter->rx_ps_bsize0, true); } + + if (i == rx_ring->next_to_use) + printk(KERN_CONT " NTU\n"); + else if (i == rx_ring->next_to_clean) + printk(KERN_CONT " NTC\n"); + else + printk(KERN_CONT "\n"); } break; default: @@ -392,7 +407,9 @@ static void e1000e_dump(struct e1000_adapter *adapter) * 8 | Reserved | * +-----------------------------------------------------+ */ - pr_info("R [desc] [buf addr 63:0 ] [reserved 63:0 ] [bi->dma ] [bi->skb] <-- Ext (Read) format\n"); + printk(KERN_INFO "R [desc] [buf addr 63:0 ] " + "[reserved 63:0 ] [bi->dma ] " + "[bi->skb] <-- Ext (Read) format\n"); /* Extended Receive Descriptor (Write-Back) Format * * 63 48 47 32 31 24 23 4 3 0 @@ -406,37 +423,29 @@ static void e1000e_dump(struct e1000_adapter *adapter) * +------------------------------------------------------+ * 63 48 47 32 31 20 19 0 */ - pr_info("RWB[desc] [cs ipid mrq] [vt ln xe xs] [bi->skb] <-- Ext (Write-Back) format\n"); + printk(KERN_INFO "RWB[desc] [cs ipid mrq] " + "[vt ln xe xs] " + "[bi->skb] <-- Ext (Write-Back) format\n"); for (i = 0; i < rx_ring->count; i++) { - const char *next_desc; - buffer_info = &rx_ring->buffer_info[i]; rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); u1 = (struct my_u1 *)rx_desc; staterr = le32_to_cpu(rx_desc->wb.upper.status_error); - - if (i == rx_ring->next_to_use) - next_desc = " NTU"; - else if (i == rx_ring->next_to_clean) - next_desc = " NTC"; - else - next_desc = ""; - if (staterr & E1000_RXD_STAT_DD) { /* Descriptor Done */ - pr_info("%s[0x%03X] %016llX %016llX ---------------- %p%s\n", - "RWB", i, - (unsigned long long)le64_to_cpu(u1->a), - (unsigned long long)le64_to_cpu(u1->b), - buffer_info->skb, next_desc); + printk(KERN_INFO "RWB[0x%03X] %016llX " + "%016llX ---------------- %p", i, + (unsigned long long)le64_to_cpu(u1->a), + (unsigned long long)le64_to_cpu(u1->b), + buffer_info->skb); } else { - pr_info("%s[0x%03X] %016llX %016llX %016llX %p%s\n", - "R ", i, - (unsigned long long)le64_to_cpu(u1->a), - (unsigned long long)le64_to_cpu(u1->b), - (unsigned long long)buffer_info->dma, - buffer_info->skb, next_desc); + printk(KERN_INFO "R [0x%03X] %016llX " + "%016llX %016llX %p", i, + (unsigned long long)le64_to_cpu(u1->a), + (unsigned long long)le64_to_cpu(u1->b), + (unsigned long long)buffer_info->dma, + buffer_info->skb); if (netif_msg_pktdata(adapter)) print_hex_dump(KERN_INFO, "", @@ -447,6 +456,13 @@ static void e1000e_dump(struct e1000_adapter *adapter) adapter->rx_buffer_len, true); } + + if (i == rx_ring->next_to_use) + printk(KERN_CONT " NTU\n"); + else if (i == rx_ring->next_to_clean) + printk(KERN_CONT " NTC\n"); + else + printk(KERN_CONT "\n"); } } @@ -859,7 +875,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, u32 length, staterr; unsigned int i; int cleaned_count = 0; - bool cleaned = false; + bool cleaned = 0; unsigned int total_rx_bytes = 0, total_rx_packets = 0; i = rx_ring->next_to_clean; @@ -888,7 +904,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, next_buffer = &rx_ring->buffer_info[i]; - cleaned = true; + cleaned = 1; cleaned_count++; dma_unmap_single(&pdev->dev, buffer_info->dma, @@ -1014,7 +1030,6 @@ static void e1000_print_hw_hang(struct work_struct *work) struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, print_hang_task); - struct net_device *netdev = adapter->netdev; struct e1000_ring *tx_ring = adapter->tx_ring; unsigned int i = tx_ring->next_to_clean; unsigned int eop = tx_ring->buffer_info[i].next_to_watch; @@ -1026,21 +1041,6 @@ static void e1000_print_hw_hang(struct work_struct *work) if (test_bit(__E1000_DOWN, &adapter->state)) return; - if (!adapter->tx_hang_recheck && - (adapter->flags2 & FLAG2_DMA_BURST)) { - /* May be block on write-back, flush and detect again - * flush pending descriptor writebacks to memory - */ - ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); - /* execute the writes immediately */ - e1e_flush(); - adapter->tx_hang_recheck = true; - return; - } - /* Real hang detected */ - adapter->tx_hang_recheck = false; - netif_stop_queue(netdev); - e1e_rphy(hw, PHY_STATUS, &phy_status); e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); @@ -1095,7 +1095,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) unsigned int i, eop; unsigned int count = 0; unsigned int total_tx_bytes = 0, total_tx_packets = 0; - unsigned int bytes_compl = 0, pkts_compl = 0; i = tx_ring->next_to_clean; eop = tx_ring->buffer_info[i].next_to_watch; @@ -1113,10 +1112,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) if (cleaned) { total_tx_packets += buffer_info->segs; total_tx_bytes += buffer_info->bytecount; - if (buffer_info->skb) { - bytes_compl += buffer_info->skb->len; - pkts_compl++; - } } e1000_put_txbuf(adapter, buffer_info); @@ -1135,8 +1130,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) tx_ring->next_to_clean = i; - netdev_completed_queue(netdev, pkts_compl, bytes_compl); - #define TX_WAKE_THRESHOLD 32 if (count && netif_carrier_ok(netdev) && e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) { @@ -1157,14 +1150,14 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) * Detect a transmit hang in hardware, this serializes the * check with the clearing of time_stamp and movement of i */ - adapter->detect_tx_hung = false; + adapter->detect_tx_hung = 0; if (tx_ring->buffer_info[i].time_stamp && time_after(jiffies, tx_ring->buffer_info[i].time_stamp + (adapter->tx_timeout_factor * HZ)) && - !(er32(STATUS) & E1000_STATUS_TXOFF)) + !(er32(STATUS) & E1000_STATUS_TXOFF)) { schedule_work(&adapter->print_hang_task); - else - adapter->tx_hang_recheck = false; + netif_stop_queue(netdev); + } } adapter->total_tx_bytes += total_tx_bytes; adapter->total_tx_packets += total_tx_packets; @@ -1192,7 +1185,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, unsigned int i, j; u32 length, staterr; int cleaned_count = 0; - bool cleaned = false; + bool cleaned = 0; unsigned int total_rx_bytes = 0, total_rx_packets = 0; i = rx_ring->next_to_clean; @@ -1218,7 +1211,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, next_buffer = &rx_ring->buffer_info[i]; - cleaned = true; + cleaned = 1; cleaned_count++; dma_unmap_single(&pdev->dev, buffer_info->dma, adapter->rx_ps_bsize0, DMA_FROM_DEVICE); @@ -1229,7 +1222,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, adapter->flags2 |= FLAG2_IS_DISCARDING; if (adapter->flags2 & FLAG2_IS_DISCARDING) { - e_dbg("Packet Split buffers didn't pick up the full packet\n"); + e_dbg("Packet Split buffers didn't pick up the full " + "packet\n"); dev_kfree_skb_irq(skb); if (staterr & E1000_RXD_STAT_EOP) adapter->flags2 &= ~FLAG2_IS_DISCARDING; @@ -1244,7 +1238,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, length = le16_to_cpu(rx_desc->wb.middle.length0); if (!length) { - e_dbg("Last part of the packet spanning multiple descriptors\n"); + e_dbg("Last part of the packet spanning multiple " + "descriptors\n"); dev_kfree_skb_irq(skb); goto next_desc; } @@ -1922,7 +1917,8 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter) return; } /* MSI-X failed, so fall through and try MSI */ - e_err("Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n"); + e_err("Failed to initialize MSI-X interrupts. " + "Falling back to MSI interrupts.\n"); e1000e_reset_interrupt_capability(adapter); } adapter->int_mode = E1000E_INT_MODE_MSI; @@ -1932,7 +1928,8 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter) adapter->flags |= FLAG_MSI_ENABLED; } else { adapter->int_mode = E1000E_INT_MODE_LEGACY; - e_err("Failed to initialize MSI interrupts. Falling back to legacy interrupts.\n"); + e_err("Failed to initialize MSI interrupts. Falling " + "back to legacy interrupts.\n"); } /* Fall through */ case E1000E_INT_MODE_LEGACY: @@ -2263,7 +2260,6 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter) e1000_put_txbuf(adapter, buffer_info); } - netdev_reset_queue(adapter->netdev); size = sizeof(struct e1000_buffer) * tx_ring->count; memset(tx_ring->buffer_info, 0, size); @@ -2522,7 +2518,7 @@ static int e1000_clean(struct napi_struct *napi, int budget) return work_done; } -static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -2532,7 +2528,7 @@ static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) if ((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && (vid == adapter->mng_vlan_id)) - return 0; + return; /* add VID to filter table */ if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { @@ -2543,11 +2539,9 @@ static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) } set_bit(vid, adapter->active_vlans); - - return 0; } -static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -2558,7 +2552,7 @@ static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) (vid == adapter->mng_vlan_id)) { /* release control to f/w */ e1000e_release_hw_control(adapter); - return 0; + return; } /* remove VID from filter table */ @@ -2570,8 +2564,6 @@ static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) } clear_bit(vid, adapter->active_vlans); - - return 0; } /** @@ -3121,147 +3113,79 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) } /** - * e1000e_write_mc_addr_list - write multicast addresses to MTA - * @netdev: network interface device structure - * - * Writes multicast address list to the MTA hash table. - * Returns: -ENOMEM on failure - * 0 on no addresses written - * X on writing X addresses to MTA - */ -static int e1000e_write_mc_addr_list(struct net_device *netdev) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - struct netdev_hw_addr *ha; - u8 *mta_list; - int i; - - if (netdev_mc_empty(netdev)) { - /* nothing to program, so clear mc list */ - hw->mac.ops.update_mc_addr_list(hw, NULL, 0); - return 0; - } - - mta_list = kzalloc(netdev_mc_count(netdev) * ETH_ALEN, GFP_ATOMIC); - if (!mta_list) - return -ENOMEM; - - /* update_mc_addr_list expects a packed array of only addresses. */ - i = 0; - netdev_for_each_mc_addr(ha, netdev) - memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); - - hw->mac.ops.update_mc_addr_list(hw, mta_list, i); - kfree(mta_list); - - return netdev_mc_count(netdev); -} - -/** - * e1000e_write_uc_addr_list - write unicast addresses to RAR table - * @netdev: network interface device structure + * e1000_update_mc_addr_list - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program * - * Writes unicast address list to the RAR table. - * Returns: -ENOMEM on failure/insufficient address space - * 0 on no addresses written - * X on writing X addresses to the RAR table + * Updates the Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. **/ -static int e1000e_write_uc_addr_list(struct net_device *netdev) +static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count) { - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - unsigned int rar_entries = hw->mac.rar_entry_count; - int count = 0; - - /* save a rar entry for our hardware address */ - rar_entries--; - - /* save a rar entry for the LAA workaround */ - if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) - rar_entries--; - - /* return ENOMEM indicating insufficient memory for addresses */ - if (netdev_uc_count(netdev) > rar_entries) - return -ENOMEM; - - if (!netdev_uc_empty(netdev) && rar_entries) { - struct netdev_hw_addr *ha; - - /* - * write the addresses in reverse order to avoid write - * combining - */ - netdev_for_each_uc_addr(ha, netdev) { - if (!rar_entries) - break; - e1000e_rar_set(hw, ha->addr, rar_entries--); - count++; - } - } - - /* zero out the remaining RAR entries not used above */ - for (; rar_entries > 0; rar_entries--) { - ew32(RAH(rar_entries), 0); - ew32(RAL(rar_entries), 0); - } - e1e_flush(); - - return count; + hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count); } /** - * e1000e_set_rx_mode - secondary unicast, Multicast and Promiscuous mode set + * e1000_set_multi - Multicast and Promiscuous mode set * @netdev: network interface device structure * - * The ndo_set_rx_mode entry point is called whenever the unicast or multicast - * address list or the network interface flags are updated. This routine is - * responsible for configuring the hardware for proper unicast, multicast, + * The set_multi entry point is called whenever the multicast address + * list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper multicast, * promiscuous mode, and all-multi behavior. **/ -static void e1000e_set_rx_mode(struct net_device *netdev) +static void e1000_set_multi(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + u8 *mta_list; u32 rctl; /* Check for Promiscuous and All Multicast modes */ - rctl = er32(RCTL); - /* clear the affected bits */ - rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE); + rctl = er32(RCTL); if (netdev->flags & IFF_PROMISC) { rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); + rctl &= ~E1000_RCTL_VFE; /* Do not hardware filter VLANs in promisc mode */ e1000e_vlan_filter_disable(adapter); } else { - int count; if (netdev->flags & IFF_ALLMULTI) { rctl |= E1000_RCTL_MPE; + rctl &= ~E1000_RCTL_UPE; } else { - /* - * Write addresses to the MTA, if the attempt fails - * then we should just turn on promiscuous mode so - * that we can at least receive multicast traffic - */ - count = e1000e_write_mc_addr_list(netdev); - if (count < 0) - rctl |= E1000_RCTL_MPE; + rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE); } e1000e_vlan_filter_enable(adapter); - /* - * Write addresses to available RAR registers, if there is not - * sufficient space to store all the addresses then enable - * unicast promiscuous mode - */ - count = e1000e_write_uc_addr_list(netdev); - if (count < 0) - rctl |= E1000_RCTL_UPE; } ew32(RCTL, rctl); + if (!netdev_mc_empty(netdev)) { + int i = 0; + + mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC); + if (!mta_list) + return; + + /* prepare a packed array of only addresses. */ + netdev_for_each_mc_addr(ha, netdev) + memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); + + e1000_update_mc_addr_list(hw, mta_list, i); + kfree(mta_list); + } else { + /* + * if we're called from probe, we might not have + * anything to do here, so clear out the list + */ + e1000_update_mc_addr_list(hw, NULL, 0); + } + if (netdev->features & NETIF_F_HW_VLAN_RX) e1000e_vlan_strip_enable(adapter); else @@ -3274,7 +3198,7 @@ static void e1000e_set_rx_mode(struct net_device *netdev) **/ static void e1000_configure(struct e1000_adapter *adapter) { - e1000e_set_rx_mode(adapter->netdev); + e1000_set_multi(adapter->netdev); e1000_restore_vlan(adapter); e1000_init_manageability_pt(adapter); @@ -3520,6 +3444,7 @@ int e1000e_up(struct e1000_adapter *adapter) clear_bit(__E1000_DOWN, &adapter->state); + napi_enable(&adapter->napi); if (adapter->msix_entries) e1000_configure_msix(adapter); e1000_irq_enable(adapter); @@ -3581,6 +3506,7 @@ void e1000e_down(struct e1000_adapter *adapter) e1e_flush(); usleep_range(10000, 20000); + napi_disable(&adapter->napi); e1000_irq_disable(adapter); del_timer_sync(&adapter->watchdog_timer); @@ -3856,7 +3782,6 @@ static int e1000_open(struct net_device *netdev) e1000_irq_enable(adapter); - adapter->tx_hang_recheck = false; netif_start_queue(netdev); adapter->idle_check = true; @@ -3903,8 +3828,6 @@ static int e1000_close(struct net_device *netdev) pm_runtime_get_sync(&pdev->dev); - napi_disable(&adapter->napi); - if (!test_bit(__E1000_DOWN, &adapter->state)) { e1000e_down(adapter); e1000_free_irq(adapter); @@ -4245,19 +4168,22 @@ static void e1000_print_link_info(struct e1000_adapter *adapter) u32 ctrl = er32(CTRL); /* Link status message must follow this format for user tools */ - printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", - adapter->netdev->name, - adapter->link_speed, - adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half", - (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" : - (ctrl & E1000_CTRL_RFCE) ? "Rx" : - (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None"); + printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s, " + "Flow Control: %s\n", + adapter->netdev->name, + adapter->link_speed, + (adapter->link_duplex == FULL_DUPLEX) ? + "Full Duplex" : "Half Duplex", + ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ? + "Rx/Tx" : + ((ctrl & E1000_CTRL_RFCE) ? "Rx" : + ((ctrl & E1000_CTRL_TFCE) ? "Tx" : "None"))); } static bool e1000e_has_link(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; - bool link_active = false; + bool link_active = 0; s32 ret_val = 0; /* @@ -4272,7 +4198,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter) ret_val = hw->mac.ops.check_for_link(hw); link_active = !hw->mac.get_link_status; } else { - link_active = true; + link_active = 1; } break; case e1000_media_type_fiber: @@ -4371,7 +4297,7 @@ static void e1000_watchdog_task(struct work_struct *work) if (link) { if (!netif_carrier_ok(netdev)) { - bool txb2b = true; + bool txb2b = 1; /* Cancel scheduled suspend requests. */ pm_runtime_resume(netdev->dev.parent); @@ -4397,18 +4323,21 @@ static void e1000_watchdog_task(struct work_struct *work) e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp); if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS)) - e_info("Autonegotiated half duplex but link partner cannot autoneg. Try forcing full duplex if link gets many collisions.\n"); + e_info("Autonegotiated half duplex but" + " link partner cannot autoneg. " + " Try forcing full duplex if " + "link gets many collisions.\n"); } /* adjust timeout factor according to speed/duplex */ adapter->tx_timeout_factor = 1; switch (adapter->link_speed) { case SPEED_10: - txb2b = false; + txb2b = 0; adapter->tx_timeout_factor = 16; break; case SPEED_100: - txb2b = false; + txb2b = 0; adapter->tx_timeout_factor = 10; break; } @@ -4544,7 +4473,7 @@ static void e1000_watchdog_task(struct work_struct *work) e1000e_flush_descriptors(adapter); /* Force detection of hung controller every watchdog period */ - adapter->detect_tx_hung = true; + adapter->detect_tx_hung = 1; /* * With 82571 controllers, LAA may be overwritten due to controller @@ -5056,7 +4985,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, /* if count is 0 then mapping error has occurred */ count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss); if (count) { - netdev_sent_queue(netdev, skb->len); e1000_tx_queue(adapter, tx_flags, count); /* Make sure there is space in the ring for the next send. */ e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2); @@ -5182,7 +5110,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) if ((adapter->hw.mac.type == e1000_pch2lan) && !(adapter->flags2 & FLAG2_CRC_STRIPPING) && (new_mtu > ETH_DATA_LEN)) { - e_err("Jumbo Frames not supported on 82579 when CRC stripping is disabled.\n"); + e_err("Jumbo Frames not supported on 82579 when CRC " + "stripping is disabled.\n"); return -EINVAL; } @@ -5402,7 +5331,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, if (wufc) { e1000_setup_rctl(adapter); - e1000e_set_rx_mode(netdev); + e1000_set_multi(netdev); /* turn on all-multi mode if wake on multicast is enabled */ if (wufc & E1000_WUFC_MC) { @@ -5598,8 +5527,8 @@ static int __e1000_resume(struct pci_dev *pdev) phy_data & E1000_WUS_MC ? "Multicast Packet" : phy_data & E1000_WUS_BC ? "Broadcast Packet" : phy_data & E1000_WUS_MAG ? "Magic Packet" : - phy_data & E1000_WUS_LNKC ? - "Link Status Change" : "other"); + phy_data & E1000_WUS_LNKC ? "Link Status " + " Change" : "other"); } e1e_wphy(&adapter->hw, BM_WUS, ~0); } else { @@ -5930,11 +5859,10 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter) } } -static int e1000_set_features(struct net_device *netdev, - netdev_features_t features) +static int e1000_set_features(struct net_device *netdev, u32 features) { struct e1000_adapter *adapter = netdev_priv(netdev); - netdev_features_t changed = features ^ netdev->features; + u32 changed = features ^ netdev->features; if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) adapter->flags |= FLAG_TSO_FORCE; @@ -5956,7 +5884,7 @@ static const struct net_device_ops e1000e_netdev_ops = { .ndo_stop = e1000_close, .ndo_start_xmit = e1000_xmit_frame, .ndo_get_stats64 = e1000e_get_stats64, - .ndo_set_rx_mode = e1000e_set_rx_mode, + .ndo_set_rx_mode = e1000_set_multi, .ndo_set_mac_address = e1000_set_mac, .ndo_change_mtu = e1000_change_mtu, .ndo_do_ioctl = e1000_ioctl, @@ -6021,7 +5949,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev, err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { - dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); + dev_err(&pdev->dev, "No usable DMA " + "configuration, aborting\n"); goto err_dma; } } @@ -6147,8 +6076,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev, NETIF_F_TSO6 | NETIF_F_HW_CSUM); - netdev->priv_flags |= IFF_UNICAST_FLT; - if (pci_using_dac) { netdev->features |= NETIF_F_HIGHDMA; netdev->vlan_features |= NETIF_F_HIGHDMA; @@ -6208,7 +6135,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, /* Initialize link parameters. User can change them with ethtool */ adapter->hw.mac.autoneg = 1; - adapter->fc_autoneg = true; + adapter->fc_autoneg = 1; adapter->hw.fc.requested_mode = e1000_fc_default; adapter->hw.fc.current_mode = e1000_fc_default; adapter->hw.phy.autoneg_advertised = 0x2f; diff --git a/trunk/drivers/net/ethernet/intel/igb/e1000_82575.c b/trunk/drivers/net/ethernet/intel/igb/e1000_82575.c index b8e20f037d0a..7881fb95a25b 100644 --- a/trunk/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/trunk/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -29,8 +29,6 @@ * e1000_82576 */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include #include @@ -246,7 +244,8 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) * Check for invalid size */ if ((hw->mac.type == e1000_82576) && (size > 15)) { - pr_notice("The NVM size is not valid, defaulting to 32K\n"); + printk("igb: The NVM size is not valid, " + "defaulting to 32K.\n"); size = 15; } nvm->word_size = 1 << size; diff --git a/trunk/drivers/net/ethernet/intel/igb/igb.h b/trunk/drivers/net/ethernet/intel/igb/igb.h index 3d12e67eebb4..c69feebf2653 100644 --- a/trunk/drivers/net/ethernet/intel/igb/igb.h +++ b/trunk/drivers/net/ethernet/intel/igb/igb.h @@ -447,9 +447,4 @@ static inline s32 igb_get_phy_info(struct e1000_hw *hw) return 0; } -static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring) -{ - return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); -} - #endif /* _IGB_H_ */ diff --git a/trunk/drivers/net/ethernet/intel/igb/igb_ethtool.c b/trunk/drivers/net/ethernet/intel/igb/igb_ethtool.c index 7998bf4d5946..43873eba2f63 100644 --- a/trunk/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/trunk/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -36,7 +36,6 @@ #include #include #include -#include #include "igb.h" @@ -149,8 +148,7 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) SUPPORTED_1000baseT_Full| SUPPORTED_Autoneg | SUPPORTED_TP); - ecmd->advertising = (ADVERTISED_TP | - ADVERTISED_Pause); + ecmd->advertising = ADVERTISED_TP; if (hw->mac.autoneg == 1) { ecmd->advertising |= ADVERTISED_Autoneg; @@ -167,8 +165,7 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) ecmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_FIBRE | - ADVERTISED_Autoneg | - ADVERTISED_Pause); + ADVERTISED_Autoneg); ecmd->port = PORT_FIBRE; } @@ -676,22 +673,25 @@ static void igb_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct igb_adapter *adapter = netdev_priv(netdev); + char firmware_version[32]; u16 eeprom_data; - strlcpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version)); + strncpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver) - 1); + strncpy(drvinfo->version, igb_driver_version, + sizeof(drvinfo->version) - 1); /* EEPROM image version # is reported as firmware version # for * 82575 controllers */ adapter->hw.nvm.ops.read(&adapter->hw, 5, 1, &eeprom_data); - snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), - "%d.%d-%d", + sprintf(firmware_version, "%d.%d-%d", (eeprom_data & 0xF000) >> 12, (eeprom_data & 0x0FF0) >> 4, eeprom_data & 0x000F); - strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), - sizeof(drvinfo->bus_info)); + strncpy(drvinfo->fw_version, firmware_version, + sizeof(drvinfo->fw_version) - 1); + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info) - 1); drvinfo->n_stats = IGB_STATS_LEN; drvinfo->testinfo_len = IGB_TEST_LEN; drvinfo->regdump_len = igb_get_regs_len(netdev); @@ -2162,19 +2162,6 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) } } -static int igb_ethtool_begin(struct net_device *netdev) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - pm_runtime_get_sync(&adapter->pdev->dev); - return 0; -} - -static void igb_ethtool_complete(struct net_device *netdev) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - pm_runtime_put(&adapter->pdev->dev); -} - static const struct ethtool_ops igb_ethtool_ops = { .get_settings = igb_get_settings, .set_settings = igb_set_settings, @@ -2201,8 +2188,6 @@ static const struct ethtool_ops igb_ethtool_ops = { .get_ethtool_stats = igb_get_ethtool_stats, .get_coalesce = igb_get_coalesce, .set_coalesce = igb_set_coalesce, - .begin = igb_ethtool_begin, - .complete = igb_ethtool_complete, }; void igb_set_ethtool_ops(struct net_device *netdev) diff --git a/trunk/drivers/net/ethernet/intel/igb/igb_main.c b/trunk/drivers/net/ethernet/intel/igb/igb_main.c index 01e5e89ef959..ced544499f1b 100644 --- a/trunk/drivers/net/ethernet/intel/igb/igb_main.c +++ b/trunk/drivers/net/ethernet/intel/igb/igb_main.c @@ -25,8 +25,6 @@ *******************************************************************************/ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include #include #include @@ -53,7 +51,6 @@ #include #include #include -#include #ifdef CONFIG_IGB_DCA #include #endif @@ -148,9 +145,9 @@ static bool igb_clean_rx_irq(struct igb_q_vector *, int); static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); static void igb_tx_timeout(struct net_device *); static void igb_reset_task(struct work_struct *); -static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features); -static int igb_vlan_rx_add_vid(struct net_device *, u16); -static int igb_vlan_rx_kill_vid(struct net_device *, u16); +static void igb_vlan_mode(struct net_device *netdev, u32 features); +static void igb_vlan_rx_add_vid(struct net_device *, u16); +static void igb_vlan_rx_kill_vid(struct net_device *, u16); static void igb_restore_vlan(struct igb_adapter *); static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8); static void igb_ping_all_vfs(struct igb_adapter *); @@ -173,18 +170,8 @@ static int igb_check_vf_assignment(struct igb_adapter *adapter); #endif #ifdef CONFIG_PM -static int igb_suspend(struct device *); -static int igb_resume(struct device *); -#ifdef CONFIG_PM_RUNTIME -static int igb_runtime_suspend(struct device *dev); -static int igb_runtime_resume(struct device *dev); -static int igb_runtime_idle(struct device *dev); -#endif -static const struct dev_pm_ops igb_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume) - SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume, - igb_runtime_idle) -}; +static int igb_suspend(struct pci_dev *, pm_message_t); +static int igb_resume(struct pci_dev *); #endif static void igb_shutdown(struct pci_dev *); #ifdef CONFIG_IGB_DCA @@ -225,7 +212,9 @@ static struct pci_driver igb_driver = { .probe = igb_probe, .remove = __devexit_p(igb_remove), #ifdef CONFIG_PM - .driver.pm = &igb_pm_ops, + /* Power Management Hooks */ + .suspend = igb_suspend, + .resume = igb_resume, #endif .shutdown = igb_shutdown, .err_handler = &igb_err_handler @@ -336,13 +325,16 @@ static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo) regs[n] = rd32(E1000_TXDCTL(n)); break; default: - pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs)); + printk(KERN_INFO "%-15s %08x\n", + reginfo->name, rd32(reginfo->ofs)); return; } snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]"); - pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1], - regs[2], regs[3]); + printk(KERN_INFO "%-15s ", rname); + for (n = 0; n < 4; n++) + printk(KERN_CONT "%08x ", regs[n]); + printk(KERN_CONT "\n"); } /* @@ -367,15 +359,18 @@ static void igb_dump(struct igb_adapter *adapter) /* Print netdevice Info */ if (netdev) { dev_info(&adapter->pdev->dev, "Net device Info\n"); - pr_info("Device Name state trans_start " - "last_rx\n"); - pr_info("%-15s %016lX %016lX %016lX\n", netdev->name, - netdev->state, netdev->trans_start, netdev->last_rx); + printk(KERN_INFO "Device Name state " + "trans_start last_rx\n"); + printk(KERN_INFO "%-15s %016lX %016lX %016lX\n", + netdev->name, + netdev->state, + netdev->trans_start, + netdev->last_rx); } /* Print Registers */ dev_info(&adapter->pdev->dev, "Register Dump\n"); - pr_info(" Register Name Value\n"); + printk(KERN_INFO " Register Name Value\n"); for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl; reginfo->name; reginfo++) { igb_regdump(hw, reginfo); @@ -386,17 +381,18 @@ static void igb_dump(struct igb_adapter *adapter) goto exit; dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); - pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); + printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]" + " leng ntw timestamp\n"); for (n = 0; n < adapter->num_tx_queues; n++) { struct igb_tx_buffer *buffer_info; tx_ring = adapter->tx_ring[n]; buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; - pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n", - n, tx_ring->next_to_use, tx_ring->next_to_clean, - (u64)buffer_info->dma, - buffer_info->length, - buffer_info->next_to_watch, - (u64)buffer_info->time_stamp); + printk(KERN_INFO " %5d %5X %5X %016llX %04X %p %016llX\n", + n, tx_ring->next_to_use, tx_ring->next_to_clean, + (u64)buffer_info->dma, + buffer_info->length, + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp); } /* Print TX Rings */ @@ -418,38 +414,36 @@ static void igb_dump(struct igb_adapter *adapter) for (n = 0; n < adapter->num_tx_queues; n++) { tx_ring = adapter->tx_ring[n]; - pr_info("------------------------------------\n"); - pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); - pr_info("------------------------------------\n"); - pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] " - "[bi->dma ] leng ntw timestamp " - "bi->skb\n"); + printk(KERN_INFO "------------------------------------\n"); + printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index); + printk(KERN_INFO "------------------------------------\n"); + printk(KERN_INFO "T [desc] [address 63:0 ] " + "[PlPOCIStDDM Ln] [bi->dma ] " + "leng ntw timestamp bi->skb\n"); for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { - const char *next_desc; struct igb_tx_buffer *buffer_info; tx_desc = IGB_TX_DESC(tx_ring, i); buffer_info = &tx_ring->tx_buffer_info[i]; u0 = (struct my_u0 *)tx_desc; - if (i == tx_ring->next_to_use && - i == tx_ring->next_to_clean) - next_desc = " NTC/U"; - else if (i == tx_ring->next_to_use) - next_desc = " NTU"; - else if (i == tx_ring->next_to_clean) - next_desc = " NTC"; - else - next_desc = ""; - - pr_info("T [0x%03X] %016llX %016llX %016llX" - " %04X %p %016llX %p%s\n", i, + printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX" + " %04X %p %016llX %p", i, le64_to_cpu(u0->a), le64_to_cpu(u0->b), (u64)buffer_info->dma, buffer_info->length, buffer_info->next_to_watch, (u64)buffer_info->time_stamp, - buffer_info->skb, next_desc); + buffer_info->skb); + if (i == tx_ring->next_to_use && + i == tx_ring->next_to_clean) + printk(KERN_CONT " NTC/U\n"); + else if (i == tx_ring->next_to_use) + printk(KERN_CONT " NTU\n"); + else if (i == tx_ring->next_to_clean) + printk(KERN_CONT " NTC\n"); + else + printk(KERN_CONT "\n"); if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) print_hex_dump(KERN_INFO, "", @@ -462,11 +456,11 @@ static void igb_dump(struct igb_adapter *adapter) /* Print RX Rings Summary */ rx_ring_summary: dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); - pr_info("Queue [NTU] [NTC]\n"); + printk(KERN_INFO "Queue [NTU] [NTC]\n"); for (n = 0; n < adapter->num_rx_queues; n++) { rx_ring = adapter->rx_ring[n]; - pr_info(" %5d %5X %5X\n", - n, rx_ring->next_to_use, rx_ring->next_to_clean); + printk(KERN_INFO " %5d %5X %5X\n", n, + rx_ring->next_to_use, rx_ring->next_to_clean); } /* Print RX Rings */ @@ -498,43 +492,36 @@ static void igb_dump(struct igb_adapter *adapter) for (n = 0; n < adapter->num_rx_queues; n++) { rx_ring = adapter->rx_ring[n]; - pr_info("------------------------------------\n"); - pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); - pr_info("------------------------------------\n"); - pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] " - "[bi->dma ] [bi->skb] <-- Adv Rx Read format\n"); - pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] -----" - "----------- [bi->skb] <-- Adv Rx Write-Back format\n"); + printk(KERN_INFO "------------------------------------\n"); + printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index); + printk(KERN_INFO "------------------------------------\n"); + printk(KERN_INFO "R [desc] [ PktBuf A0] " + "[ HeadBuf DD] [bi->dma ] [bi->skb] " + "<-- Adv Rx Read format\n"); + printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] " + "[vl er S cks ln] ---------------- [bi->skb] " + "<-- Adv Rx Write-Back format\n"); for (i = 0; i < rx_ring->count; i++) { - const char *next_desc; struct igb_rx_buffer *buffer_info; buffer_info = &rx_ring->rx_buffer_info[i]; rx_desc = IGB_RX_DESC(rx_ring, i); u0 = (struct my_u0 *)rx_desc; staterr = le32_to_cpu(rx_desc->wb.upper.status_error); - - if (i == rx_ring->next_to_use) - next_desc = " NTU"; - else if (i == rx_ring->next_to_clean) - next_desc = " NTC"; - else - next_desc = ""; - if (staterr & E1000_RXD_STAT_DD) { /* Descriptor Done */ - pr_info("%s[0x%03X] %016llX %016llX -------" - "--------- %p%s\n", "RWB", i, + printk(KERN_INFO "RWB[0x%03X] %016llX " + "%016llX ---------------- %p", i, le64_to_cpu(u0->a), le64_to_cpu(u0->b), - buffer_info->skb, next_desc); + buffer_info->skb); } else { - pr_info("%s[0x%03X] %016llX %016llX %016llX" - " %p%s\n", "R ", i, + printk(KERN_INFO "R [0x%03X] %016llX " + "%016llX %016llX %p", i, le64_to_cpu(u0->a), le64_to_cpu(u0->b), (u64)buffer_info->dma, - buffer_info->skb, next_desc); + buffer_info->skb); if (netif_msg_pktdata(adapter)) { print_hex_dump(KERN_INFO, "", @@ -551,6 +538,14 @@ static void igb_dump(struct igb_adapter *adapter) PAGE_SIZE/2, true); } } + + if (i == rx_ring->next_to_use) + printk(KERN_CONT " NTU\n"); + else if (i == rx_ring->next_to_clean) + printk(KERN_CONT " NTC\n"); + else + printk(KERN_CONT "\n"); + } } @@ -604,10 +599,10 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw) static int __init igb_init_module(void) { int ret; - pr_info("%s - version %s\n", + printk(KERN_INFO "%s - version %s\n", igb_driver_string, igb_driver_version); - pr_info("%s\n", igb_copyright); + printk(KERN_INFO "%s\n", igb_copyright); #ifdef CONFIG_IGB_DCA dca_register_notify(&dca_notifier); @@ -1507,7 +1502,6 @@ void igb_power_up_link(struct igb_adapter *adapter) igb_power_up_phy_copper(&adapter->hw); else igb_power_up_serdes_link_82575(&adapter->hw); - igb_reset_phy(&adapter->hw); } /** @@ -1748,8 +1742,7 @@ void igb_reset(struct igb_adapter *adapter) igb_get_phy_info(hw); } -static netdev_features_t igb_fix_features(struct net_device *netdev, - netdev_features_t features) +static u32 igb_fix_features(struct net_device *netdev, u32 features) { /* * Since there is no support for separate rx/tx vlan accel @@ -1763,10 +1756,9 @@ static netdev_features_t igb_fix_features(struct net_device *netdev, return features; } -static int igb_set_features(struct net_device *netdev, - netdev_features_t features) +static int igb_set_features(struct net_device *netdev, u32 features) { - netdev_features_t changed = netdev->features ^ features; + u32 changed = netdev->features ^ features; if (changed & NETIF_F_HW_VLAN_RX) igb_vlan_mode(netdev, features); @@ -2121,8 +2113,6 @@ static int __devinit igb_probe(struct pci_dev *pdev, default: break; } - - pm_runtime_put_noidle(&pdev->dev); return 0; err_register: @@ -2162,8 +2152,6 @@ static void __devexit igb_remove(struct pci_dev *pdev) struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - pm_runtime_get_noresume(&pdev->dev); - /* * The watchdog timer may be rescheduled, so explicitly * disable watchdog from being rescheduled. @@ -2486,22 +2474,16 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter) * handler is registered with the OS, the watchdog timer is started, * and the stack is notified that the interface is ready. **/ -static int __igb_open(struct net_device *netdev, bool resuming) +static int igb_open(struct net_device *netdev) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - struct pci_dev *pdev = adapter->pdev; int err; int i; /* disallow open during test */ - if (test_bit(__IGB_TESTING, &adapter->state)) { - WARN_ON(resuming); + if (test_bit(__IGB_TESTING, &adapter->state)) return -EBUSY; - } - - if (!resuming) - pm_runtime_get_sync(&pdev->dev); netif_carrier_off(netdev); @@ -2547,9 +2529,6 @@ static int __igb_open(struct net_device *netdev, bool resuming) netif_tx_start_all_queues(netdev); - if (!resuming) - pm_runtime_put(&pdev->dev); - /* start the watchdog. */ hw->mac.get_link_status = 1; schedule_work(&adapter->watchdog_task); @@ -2564,17 +2543,10 @@ static int __igb_open(struct net_device *netdev, bool resuming) igb_free_all_tx_resources(adapter); err_setup_tx: igb_reset(adapter); - if (!resuming) - pm_runtime_put(&pdev->dev); return err; } -static int igb_open(struct net_device *netdev) -{ - return __igb_open(netdev, false); -} - /** * igb_close - Disables a network interface * @netdev: network interface device structure @@ -2586,32 +2558,21 @@ static int igb_open(struct net_device *netdev) * needs to be disabled. A global MAC reset is issued to stop the * hardware, and all transmit and receive resources are freed. **/ -static int __igb_close(struct net_device *netdev, bool suspending) +static int igb_close(struct net_device *netdev) { struct igb_adapter *adapter = netdev_priv(netdev); - struct pci_dev *pdev = adapter->pdev; WARN_ON(test_bit(__IGB_RESETTING, &adapter->state)); - - if (!suspending) - pm_runtime_get_sync(&pdev->dev); - igb_down(adapter); + igb_free_irq(adapter); igb_free_all_tx_resources(adapter); igb_free_all_rx_resources(adapter); - if (!suspending) - pm_runtime_put_sync(&pdev->dev); return 0; } -static int igb_close(struct net_device *netdev) -{ - return __igb_close(netdev, false); -} - /** * igb_setup_tx_resources - allocate Tx resources (Descriptors) * @tx_ring: tx descriptor ring (for a specific queue) to setup @@ -3242,7 +3203,6 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring) buffer_info = &tx_ring->tx_buffer_info[i]; igb_unmap_and_free_tx_resource(tx_ring, buffer_info); } - netdev_tx_reset_queue(txring_txq(tx_ring)); size = sizeof(struct igb_tx_buffer) * tx_ring->count; memset(tx_ring->tx_buffer_info, 0, size); @@ -3672,9 +3632,6 @@ static void igb_watchdog_task(struct work_struct *work) link = igb_has_link(adapter); if (link) { - /* Cancel scheduled suspend requests. */ - pm_runtime_resume(netdev->dev.parent); - if (!netif_carrier_ok(netdev)) { u32 ctrl; hw->mac.ops.get_speed_and_duplex(hw, @@ -3683,23 +3640,23 @@ static void igb_watchdog_task(struct work_struct *work) ctrl = rd32(E1000_CTRL); /* Links status message must follow this format */ - printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s " - "Duplex, Flow Control: %s\n", + printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, " + "Flow Control: %s\n", netdev->name, adapter->link_speed, adapter->link_duplex == FULL_DUPLEX ? - "Full" : "Half", - (ctrl & E1000_CTRL_TFCE) && - (ctrl & E1000_CTRL_RFCE) ? "RX/TX" : - (ctrl & E1000_CTRL_RFCE) ? "RX" : - (ctrl & E1000_CTRL_TFCE) ? "TX" : "None"); + "Full Duplex" : "Half Duplex", + ((ctrl & E1000_CTRL_TFCE) && + (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" : + ((ctrl & E1000_CTRL_RFCE) ? "RX" : + ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None"))); /* check for thermal sensor event */ - if (igb_thermal_sensor_event(hw, - E1000_THSTAT_LINK_THROTTLE)) { - netdev_info(netdev, "The network adapter link " - "speed was downshifted because it " - "overheated\n"); + if (igb_thermal_sensor_event(hw, E1000_THSTAT_LINK_THROTTLE)) { + printk(KERN_INFO "igb: %s The network adapter " + "link speed was downshifted " + "because it overheated.\n", + netdev->name); } /* adjust timeout factor according to speed/duplex */ @@ -3729,10 +3686,11 @@ static void igb_watchdog_task(struct work_struct *work) adapter->link_duplex = 0; /* check for thermal sensor event */ - if (igb_thermal_sensor_event(hw, - E1000_THSTAT_PWR_DOWN)) { - netdev_err(netdev, "The network adapter was " - "stopped because it overheated\n"); + if (igb_thermal_sensor_event(hw, E1000_THSTAT_PWR_DOWN)) { + printk(KERN_ERR "igb: %s The network adapter " + "was stopped because it " + "overheated.\n", + netdev->name); } /* Links status message must follow this format */ @@ -3746,9 +3704,6 @@ static void igb_watchdog_task(struct work_struct *work) if (!test_bit(__IGB_DOWN, &adapter->state)) mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ)); - - pm_schedule_suspend(netdev->dev.parent, - MSEC_PER_SEC * 5); } } @@ -4286,8 +4241,6 @@ static void igb_tx_map(struct igb_ring *tx_ring, frag++; } - netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); - /* write last descriptor with RS and EOP bits */ cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD); tx_desc->read.cmd_type_len = cmd_type; @@ -5827,8 +5780,6 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) } } - netdev_tx_completed_queue(txring_txq(tx_ring), - total_packets, total_bytes); i += tx_ring->count; tx_ring->next_to_clean = i; u64_stats_update_begin(&tx_ring->tx_syncp); @@ -6187,7 +6138,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, return true; if (!page) { - page = alloc_page(GFP_ATOMIC | __GFP_COLD); + page = netdev_alloc_page(rx_ring->netdev); bi->page = page; if (unlikely(!page)) { rx_ring->rx_stats.alloc_failed++; @@ -6516,7 +6467,7 @@ s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) return 0; } -static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features) +static void igb_vlan_mode(struct net_device *netdev, u32 features) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -6543,7 +6494,7 @@ static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features) igb_rlpml_set(adapter); } -static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -6556,11 +6507,9 @@ static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) igb_vfta_set(hw, vid, true); set_bit(vid, adapter->active_vlans); - - return 0; } -static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -6575,8 +6524,6 @@ static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) igb_vfta_set(hw, vid, false); clear_bit(vid, adapter->active_vlans); - - return 0; } static void igb_restore_vlan(struct igb_adapter *adapter) @@ -6635,14 +6582,13 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx) return -EINVAL; } -static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, - bool runtime) +static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake) { struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u32 ctrl, rctl, status; - u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; + u32 wufc = adapter->wol; #ifdef CONFIG_PM int retval = 0; #endif @@ -6650,7 +6596,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, netif_device_detach(netdev); if (netif_running(netdev)) - __igb_close(netdev, true); + igb_close(netdev); igb_clear_interrupt_scheme(adapter); @@ -6709,13 +6655,12 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, } #ifdef CONFIG_PM -static int igb_suspend(struct device *dev) +static int igb_suspend(struct pci_dev *pdev, pm_message_t state) { int retval; bool wake; - struct pci_dev *pdev = to_pci_dev(dev); - retval = __igb_shutdown(pdev, &wake, 0); + retval = __igb_shutdown(pdev, &wake); if (retval) return retval; @@ -6729,9 +6674,8 @@ static int igb_suspend(struct device *dev) return 0; } -static int igb_resume(struct device *dev) +static int igb_resume(struct pci_dev *pdev) { - struct pci_dev *pdev = to_pci_dev(dev); struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -6752,18 +6696,7 @@ static int igb_resume(struct device *dev) pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); - if (!rtnl_is_locked()) { - /* - * shut up ASSERT_RTNL() warning in - * netif_set_real_num_tx/rx_queues. - */ - rtnl_lock(); - err = igb_init_interrupt_scheme(adapter); - rtnl_unlock(); - } else { - err = igb_init_interrupt_scheme(adapter); - } - if (err) { + if (igb_init_interrupt_scheme(adapter)) { dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); return -ENOMEM; } @@ -6776,61 +6709,23 @@ static int igb_resume(struct device *dev) wr32(E1000_WUS, ~0); - if (netdev->flags & IFF_UP) { - err = __igb_open(netdev, true); + if (netif_running(netdev)) { + err = igb_open(netdev); if (err) return err; } netif_device_attach(netdev); - return 0; -} - -#ifdef CONFIG_PM_RUNTIME -static int igb_runtime_idle(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct net_device *netdev = pci_get_drvdata(pdev); - struct igb_adapter *adapter = netdev_priv(netdev); - - if (!igb_has_link(adapter)) - pm_schedule_suspend(dev, MSEC_PER_SEC * 5); - - return -EBUSY; -} - -static int igb_runtime_suspend(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - int retval; - bool wake; - - retval = __igb_shutdown(pdev, &wake, 1); - if (retval) - return retval; - - if (wake) { - pci_prepare_to_sleep(pdev); - } else { - pci_wake_from_d3(pdev, false); - pci_set_power_state(pdev, PCI_D3hot); - } return 0; } - -static int igb_runtime_resume(struct device *dev) -{ - return igb_resume(dev); -} -#endif /* CONFIG_PM_RUNTIME */ #endif static void igb_shutdown(struct pci_dev *pdev) { bool wake; - __igb_shutdown(pdev, &wake, 0); + __igb_shutdown(pdev, &wake); if (system_state == SYSTEM_POWER_OFF) { pci_wake_from_d3(pdev, wake); @@ -7169,28 +7064,15 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) wr32(E1000_DMCTXTH, 0); /* - * DMA Coalescing high water mark needs to be greater - * than the Rx threshold. Set hwm to PBA - max frame - * size in 16B units, capping it at PBA - 6KB. + * DMA Coalescing high water mark needs to be higher + * than the RX threshold. set hwm to PBA - 2 * max + * frame size */ - hwm = 64 * pba - adapter->max_frame_size / 16; - if (hwm < 64 * (pba - 6)) - hwm = 64 * (pba - 6); - reg = rd32(E1000_FCRTC); - reg &= ~E1000_FCRTC_RTH_COAL_MASK; - reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT) - & E1000_FCRTC_RTH_COAL_MASK); - wr32(E1000_FCRTC, reg); - - /* - * Set the DMA Coalescing Rx threshold to PBA - 2 * max - * frame size, capping it at PBA - 10KB. - */ - dmac_thr = pba - adapter->max_frame_size / 512; - if (dmac_thr < pba - 10) - dmac_thr = pba - 10; + hwm = pba - (2 * adapter->max_frame_size); reg = rd32(E1000_DMACR); reg &= ~E1000_DMACR_DMACTHR_MASK; + dmac_thr = pba - 4; + reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT) & E1000_DMACR_DMACTHR_MASK); @@ -7206,6 +7088,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) * coalescing(smart fifb)-UTRESH=0 */ wr32(E1000_DMCRTRH, 0); + wr32(E1000_FCRTC, hwm); reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4); diff --git a/trunk/drivers/net/ethernet/intel/igbvf/ethtool.c b/trunk/drivers/net/ethernet/intel/igbvf/ethtool.c index 7b600a1f6366..2c25858cc0ff 100644 --- a/trunk/drivers/net/ethernet/intel/igbvf/ethtool.c +++ b/trunk/drivers/net/ethernet/intel/igbvf/ethtool.c @@ -191,12 +191,12 @@ static void igbvf_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct igbvf_adapter *adapter = netdev_priv(netdev); + char firmware_version[32] = "N/A"; - strlcpy(drvinfo->driver, igbvf_driver_name, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, igbvf_driver_version, - sizeof(drvinfo->version)); - strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), - sizeof(drvinfo->bus_info)); + strncpy(drvinfo->driver, igbvf_driver_name, 32); + strncpy(drvinfo->version, igbvf_driver_version, 32); + strncpy(drvinfo->fw_version, firmware_version, 32); + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); drvinfo->regdump_len = igbvf_get_regs_len(netdev); drvinfo->eedump_len = igbvf_get_eeprom_len(netdev); } diff --git a/trunk/drivers/net/ethernet/intel/igbvf/netdev.c b/trunk/drivers/net/ethernet/intel/igbvf/netdev.c index fd3da3076c2f..cca78124be31 100644 --- a/trunk/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/trunk/drivers/net/ethernet/intel/igbvf/netdev.c @@ -25,8 +25,6 @@ *******************************************************************************/ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include #include #include @@ -1176,20 +1174,18 @@ static void igbvf_set_rlpml(struct igbvf_adapter *adapter) e1000_rlpml_set_vf(hw, max_frame_size); } -static int igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +static void igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - if (hw->mac.ops.set_vfta(hw, vid, true)) { + if (hw->mac.ops.set_vfta(hw, vid, true)) dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid); - return -EINVAL; - } - set_bit(vid, adapter->active_vlans); - return 0; + else + set_bit(vid, adapter->active_vlans); } -static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +static void igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -1199,13 +1195,11 @@ static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) if (!test_bit(__IGBVF_DOWN, &adapter->state)) igbvf_irq_enable(adapter); - if (hw->mac.ops.set_vfta(hw, vid, false)) { + if (hw->mac.ops.set_vfta(hw, vid, false)) dev_err(&adapter->pdev->dev, "Failed to remove vlan id %d\n", vid); - return -EINVAL; - } - clear_bit(vid, adapter->active_vlans); - return 0; + else + clear_bit(vid, adapter->active_vlans); } static void igbvf_restore_vlan(struct igbvf_adapter *adapter) @@ -1752,9 +1746,10 @@ void igbvf_update_stats(struct igbvf_adapter *adapter) static void igbvf_print_link_info(struct igbvf_adapter *adapter) { - dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s Duplex\n", - adapter->link_speed, - adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half"); + dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s\n", + adapter->link_speed, + ((adapter->link_duplex == FULL_DUPLEX) ? + "Full Duplex" : "Half Duplex")); } static bool igbvf_has_link(struct igbvf_adapter *adapter) @@ -2537,8 +2532,7 @@ static void igbvf_print_device_info(struct igbvf_adapter *adapter) dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr); } -static int igbvf_set_features(struct net_device *netdev, - netdev_features_t features) +static int igbvf_set_features(struct net_device *netdev, u32 features) { struct igbvf_adapter *adapter = netdev_priv(netdev); @@ -2848,8 +2842,9 @@ static struct pci_driver igbvf_driver = { static int __init igbvf_init_module(void) { int ret; - pr_info("%s - version %s\n", igbvf_driver_string, igbvf_driver_version); - pr_info("%s\n", igbvf_copyright); + printk(KERN_INFO "%s - version %s\n", + igbvf_driver_string, igbvf_driver_version); + printk(KERN_INFO "%s\n", igbvf_copyright); ret = pci_register_driver(&igbvf_driver); diff --git a/trunk/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/trunk/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c index dbb7dd2f8e36..9dfce7dff79b 100644 --- a/trunk/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c +++ b/trunk/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c @@ -473,12 +473,10 @@ ixgb_get_drvinfo(struct net_device *netdev, { struct ixgb_adapter *adapter = netdev_priv(netdev); - strlcpy(drvinfo->driver, ixgb_driver_name, - sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, ixgb_driver_version, - sizeof(drvinfo->version)); - strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), - sizeof(drvinfo->bus_info)); + strncpy(drvinfo->driver, ixgb_driver_name, 32); + strncpy(drvinfo->version, ixgb_driver_version, 32); + strncpy(drvinfo->fw_version, "N/A", 32); + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); drvinfo->n_stats = IXGB_STATS_LEN; drvinfo->regdump_len = ixgb_get_regs_len(netdev); drvinfo->eedump_len = ixgb_get_eeprom_len(netdev); diff --git a/trunk/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/trunk/drivers/net/ethernet/intel/ixgb/ixgb_main.c index 9bd5faf64a85..e21148f8b160 100644 --- a/trunk/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/trunk/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -101,8 +101,8 @@ static void ixgb_tx_timeout_task(struct work_struct *work); static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter); static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter); -static int ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid); -static int ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); +static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid); +static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); static void ixgb_restore_vlan(struct ixgb_adapter *adapter); #ifdef CONFIG_NET_POLL_CONTROLLER @@ -228,7 +228,7 @@ ixgb_up(struct ixgb_adapter *adapter) if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_PCIX_MODE) { err = pci_enable_msi(adapter->pdev); if (!err) { - adapter->have_msi = true; + adapter->have_msi = 1; irq_flags = 0; } /* proceed to try to request regular interrupt */ @@ -325,8 +325,8 @@ ixgb_reset(struct ixgb_adapter *adapter) } } -static netdev_features_t -ixgb_fix_features(struct net_device *netdev, netdev_features_t features) +static u32 +ixgb_fix_features(struct net_device *netdev, u32 features) { /* * Tx VLAN insertion does not work per HW design when Rx stripping is @@ -339,10 +339,10 @@ ixgb_fix_features(struct net_device *netdev, netdev_features_t features) } static int -ixgb_set_features(struct net_device *netdev, netdev_features_t features) +ixgb_set_features(struct net_device *netdev, u32 features) { struct ixgb_adapter *adapter = netdev_priv(netdev); - netdev_features_t changed = features ^ netdev->features; + u32 changed = features ^ netdev->features; if (!(changed & (NETIF_F_RXCSUM|NETIF_F_HW_VLAN_RX))) return 0; @@ -2217,7 +2217,7 @@ ixgb_vlan_strip_disable(struct ixgb_adapter *adapter) IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl); } -static int +static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) { struct ixgb_adapter *adapter = netdev_priv(netdev); @@ -2230,11 +2230,9 @@ ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) vfta |= (1 << (vid & 0x1F)); ixgb_write_vfta(&adapter->hw, index, vfta); set_bit(vid, adapter->active_vlans); - - return 0; } -static int +static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) { struct ixgb_adapter *adapter = netdev_priv(netdev); @@ -2247,8 +2245,6 @@ ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) vfta &= ~(1 << (vid & 0x1F)); ixgb_write_vfta(&adapter->hw, index, vfta); clear_bit(vid, adapter->active_vlans); - - return 0; } static void diff --git a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 258164d6d45a..a8368d5cf686 100644 --- a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -560,7 +560,6 @@ extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg, extern char ixgbe_driver_name[]; extern const char ixgbe_driver_version[]; -extern char ixgbe_default_device_descr[]; extern void ixgbe_up(struct ixgbe_adapter *adapter); extern void ixgbe_down(struct ixgbe_adapter *adapter); @@ -628,8 +627,6 @@ extern u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter); extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up); #endif /* CONFIG_IXGBE_DCB */ extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type); -extern int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, - struct netdev_fcoe_hbainfo *info); #endif /* IXGBE_FCOE */ #endif /* _IXGBE_H_ */ diff --git a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 772072147bea..4ae26a748da0 100644 --- a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -356,7 +356,6 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) case IXGBE_DEV_ID_82599_SFP_FCOE: case IXGBE_DEV_ID_82599_SFP_EM: case IXGBE_DEV_ID_82599_SFP_SF2: - case IXGBE_DEV_ID_82599_SFP_SF_QP: case IXGBE_DEV_ID_82599EN_SFP: media_type = ixgbe_media_type_fiber; break; diff --git a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index a3aa6333073f..f1365fef4ed2 100644 --- a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -266,10 +266,10 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) if (hw->mac.type == ixgbe_mac_X540) { if (hw->phy.id == 0) hw->phy.ops.identify(hw); - hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, MDIO_MMD_PCS, &i); - hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, MDIO_MMD_PCS, &i); - hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, MDIO_MMD_PCS, &i); - hw->phy.ops.read_reg(hw, IXGBE_LDPCECH, MDIO_MMD_PCS, &i); + hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECL, &i); + hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECH, &i); + hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECL, &i); + hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECH, &i); } return 0; @@ -2599,7 +2599,7 @@ s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) { ixgbe_link_speed speed = 0; - bool link_up = false; + bool link_up = 0; u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); diff --git a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c index da31735311f1..33b93ffb87cb 100644 --- a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c +++ b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c @@ -158,6 +158,10 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, { struct ixgbe_adapter *adapter = netdev_priv(netdev); + /* Abort a bad configuration */ + if (ffs(up_map) > adapter->dcb_cfg.num_tcs.pg_tcs) + return; + if (prio != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio; if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) @@ -181,7 +185,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, if (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap != adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap) - adapter->dcb_set_bitmap |= BIT_PFC | BIT_APP_UPCHG; + adapter->dcb_set_bitmap |= BIT_PFC; } static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, @@ -202,6 +206,10 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc, { struct ixgbe_adapter *adapter = netdev_priv(netdev); + /* Abort bad configurations */ + if (ffs(up_map) > adapter->dcb_cfg.num_tcs.pg_tcs) + return; + if (prio != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio; if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) @@ -301,27 +309,6 @@ static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, *setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc; } -#ifdef IXGBE_FCOE -static void ixgbe_dcbnl_devreset(struct net_device *dev) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - - while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) - usleep_range(1000, 2000); - - if (netif_running(dev)) - dev->netdev_ops->ndo_stop(dev); - - ixgbe_clear_interrupt_scheme(adapter); - ixgbe_init_interrupt_scheme(adapter); - - if (netif_running(dev)) - dev->netdev_ops->ndo_open(dev); - - clear_bit(__IXGBE_RESETTING, &adapter->state); -} -#endif - static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); @@ -351,6 +338,27 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) if (ret) return DCB_NO_HW_CHG; +#ifdef IXGBE_FCOE + if (up && !(up & (1 << adapter->fcoe.up))) + adapter->dcb_set_bitmap |= BIT_APP_UPCHG; + + /* + * Only take down the adapter if an app change occurred. FCoE + * may shuffle tx rings in this case and this can not be done + * without a reset currently. + */ + if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { + while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + adapter->fcoe.up = ffs(up) - 1; + + if (netif_running(netdev)) + netdev->netdev_ops->ndo_stop(netdev); + ixgbe_clear_interrupt_scheme(adapter); + } +#endif + if (adapter->dcb_cfg.pfc_mode_enable) { switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: @@ -377,6 +385,15 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) } } +#ifdef IXGBE_FCOE + if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { + ixgbe_init_interrupt_scheme(adapter); + if (netif_running(netdev)) + netdev->netdev_ops->ndo_open(netdev); + ret = DCB_HW_CHG_RST; + } +#endif + if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) { u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS]; u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS]; @@ -425,19 +442,8 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) if (adapter->dcb_cfg.pfc_mode_enable) adapter->hw.fc.current_mode = ixgbe_fc_pfc; -#ifdef IXGBE_FCOE - /* Reprogam FCoE hardware offloads when the traffic class - * FCoE is using changes. This happens if the APP info - * changes or the up2tc mapping is updated. - */ - if ((up && !(up & (1 << adapter->fcoe.up))) || - (adapter->dcb_set_bitmap & BIT_APP_UPCHG)) { - adapter->fcoe.up = ffs(up) - 1; - ixgbe_dcbnl_devreset(netdev); - ret = DCB_HW_CHG_RST; - } -#endif - + if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) + clear_bit(__IXGBE_RESETTING, &adapter->state); adapter->dcb_set_bitmap = 0x00; return ret; } @@ -655,6 +661,22 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev, return ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en, prio_tc); } +#ifdef IXGBE_FCOE +static void ixgbe_dcbnl_devreset(struct net_device *dev) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + + if (netif_running(dev)) + dev->netdev_ops->ndo_stop(dev); + + ixgbe_clear_interrupt_scheme(adapter); + ixgbe_init_interrupt_scheme(adapter); + + if (netif_running(dev)) + dev->netdev_ops->ndo_open(dev); +} +#endif + static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app) { @@ -739,9 +761,7 @@ static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode) ixgbe_dcbnl_ieee_setets(dev, &ets); ixgbe_dcbnl_ieee_setpfc(dev, &pfc); } else if (mode & DCB_CAP_DCBX_VER_CEE) { - u8 mask = BIT_PFC | BIT_PG_TX | BIT_PG_RX | BIT_APP_UPCHG; - - adapter->dcb_set_bitmap |= mask; + adapter->dcb_set_bitmap |= (BIT_PFC & BIT_PG_TX & BIT_PG_RX); ixgbe_dcbnl_set_all(dev); } else { /* Drop into single TC mode strict priority as this diff --git a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index da7e580f517a..70d58c3849b0 100644 --- a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -888,19 +888,23 @@ static void ixgbe_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct ixgbe_adapter *adapter = netdev_priv(netdev); + char firmware_version[32]; u32 nvm_track_id; - strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, ixgbe_driver_version, - sizeof(drvinfo->version)); + strncpy(drvinfo->driver, ixgbe_driver_name, + sizeof(drvinfo->driver) - 1); + strncpy(drvinfo->version, ixgbe_driver_version, + sizeof(drvinfo->version) - 1); nvm_track_id = (adapter->eeprom_verh << 16) | adapter->eeprom_verl; - snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x", + snprintf(firmware_version, sizeof(firmware_version), "0x%08x", nvm_track_id); - strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), - sizeof(drvinfo->bus_info)); + strncpy(drvinfo->fw_version, firmware_version, + sizeof(drvinfo->fw_version) - 1); + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info) - 1); drvinfo->n_stats = IXGBE_STATS_LEN; drvinfo->testinfo_len = IXGBE_TEST_LEN; drvinfo->regdump_len = ixgbe_get_regs_len(netdev); @@ -1955,21 +1959,12 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter, /* WOL not supported except for the following */ switch(hw->device_id) { case IXGBE_DEV_ID_82599_SFP: - /* Only these subdevices could supports WOL */ - switch (hw->subsystem_device_id) { - case IXGBE_SUBDEV_ID_82599_560FLR: - /* only support first port */ - if (hw->bus.func != 0) { - wol->supported = 0; - break; - } - case IXGBE_SUBDEV_ID_82599_SFP: - retval = 0; - break; - default: + /* Only this subdevice supports WOL */ + if (hw->subsystem_device_id != IXGBE_SUBDEV_ID_82599_SFP) { wol->supported = 0; break; } + retval = 0; break; case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: /* All except this subdevice support WOL */ diff --git a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index d18d6157dd2c..df3b1be69d83 100644 --- a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -855,86 +855,3 @@ int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type) } return rc; } - -/** - * ixgbe_fcoe_get_hbainfo - get FCoE HBA information - * @netdev : ixgbe adapter - * @info : HBA information - * - * Returns ixgbe HBA information - * - * Returns : 0 on success - */ -int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, - struct netdev_fcoe_hbainfo *info) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - int i, pos; - u8 buf[8]; - - if (!info) - return -EINVAL; - - /* Don't return information on unsupported devices */ - if (hw->mac.type != ixgbe_mac_82599EB && - hw->mac.type != ixgbe_mac_X540) - return -EINVAL; - - /* Manufacturer */ - snprintf(info->manufacturer, sizeof(info->manufacturer), - "Intel Corporation"); - - /* Serial Number */ - - /* Get the PCI-e Device Serial Number Capability */ - pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_DSN); - if (pos) { - pos += 4; - for (i = 0; i < 8; i++) - pci_read_config_byte(adapter->pdev, pos + i, &buf[i]); - - snprintf(info->serial_number, sizeof(info->serial_number), - "%02X%02X%02X%02X%02X%02X%02X%02X", - buf[7], buf[6], buf[5], buf[4], - buf[3], buf[2], buf[1], buf[0]); - } else - snprintf(info->serial_number, sizeof(info->serial_number), - "Unknown"); - - /* Hardware Version */ - snprintf(info->hardware_version, - sizeof(info->hardware_version), - "Rev %d", hw->revision_id); - /* Driver Name/Version */ - snprintf(info->driver_version, - sizeof(info->driver_version), - "%s v%s", - ixgbe_driver_name, - ixgbe_driver_version); - /* Firmware Version */ - snprintf(info->firmware_version, - sizeof(info->firmware_version), - "0x%08x", - (adapter->eeprom_verh << 16) | - adapter->eeprom_verl); - - /* Model */ - if (hw->mac.type == ixgbe_mac_82599EB) { - snprintf(info->model, - sizeof(info->model), - "Intel 82599"); - } else { - snprintf(info->model, - sizeof(info->model), - "Intel X540"); - } - - /* Model Description */ - snprintf(info->model_description, - sizeof(info->model_description), - "%s", - ixgbe_default_device_descr); - - return 0; -} diff --git a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 1ee5d0fbb905..8ef92d1a6aa1 100644 --- a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -55,8 +55,6 @@ char ixgbe_driver_name[] = "ixgbe"; static const char ixgbe_driver_string[] = "Intel(R) 10 Gigabit PCI Express Network Driver"; -char ixgbe_default_device_descr[] = - "Intel(R) 10 Gigabit Network Connection"; #define MAJ 3 #define MIN 6 #define BUILD 7 @@ -108,7 +106,6 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 }, /* required last entry */ {0, } }; @@ -149,7 +146,7 @@ static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter) { BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state)); - /* flush memory to make sure state is correct before next watchdog */ + /* flush memory to make sure state is correct before next watchog */ smp_mb__before_clear_bit(); clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); } @@ -1143,7 +1140,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) if (ring_is_ps_enabled(rx_ring)) { if (!bi->page) { - bi->page = alloc_page(GFP_ATOMIC | __GFP_COLD); + bi->page = netdev_alloc_page(rx_ring->netdev); if (!bi->page) { rx_ring->rx_stats.alloc_rx_page_failed++; goto no_buffers; @@ -2159,7 +2156,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data) IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read - * therefore no explicit interrupt disable is necessary */ + * therefore no explict interrupt disable is necessary */ eicr = IXGBE_READ_REG(hw, IXGBE_EICR); if (!eicr) { /* @@ -3047,7 +3044,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) hw->mac.ops.enable_rx_dma(hw, rxctrl); } -static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; @@ -3056,11 +3053,9 @@ static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) /* add VID to filter table */ hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true); set_bit(vid, adapter->active_vlans); - - return 0; } -static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; @@ -3069,8 +3064,6 @@ static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) /* remove VID from filter table */ hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false); clear_bit(vid, adapter->active_vlans); - - return 0; } /** @@ -3609,7 +3602,7 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) { /* - * We are assuming the worst case scenario here, and that + * We are assuming the worst case scenerio here, and that * is that an SFP was inserted/removed after the reset * but before SFP detection was enabled. As such the best * solution is to just start searching as soon as we start @@ -3831,7 +3824,7 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) case IXGBE_ERR_EEPROM_VERSION: /* We are running on a pre-production device, log a warning */ e_dev_warn("This device is a pre-production adapter/LOM. " - "Please be aware there may be issues associated with " + "Please be aware there may be issuesassociated with " "your hardware. If you are experiencing problems " "please contact your Intel or hardware " "representative who provided you with this " @@ -4026,7 +4019,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) /* Mark all the VFs as inactive */ for (i = 0 ; i < adapter->num_vfs; i++) - adapter->vfinfo[i].clear_to_send = false; + adapter->vfinfo[i].clear_to_send = 0; /* ping all the active vfs to let them know we are going down */ ixgbe_ping_all_vfs(adapter); @@ -5795,9 +5788,9 @@ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter) * @adapter - pointer to the device adapter structure * * This function serves two purposes. First it strobes the interrupt lines - * in order to make certain interrupts are occurring. Secondly it sets the + * in order to make certain interrupts are occuring. Secondly it sets the * bits needed to check for TX hangs. As a result we should immediately - * determine if a hang has occurred. + * determine if a hang has occured. */ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) { @@ -7135,7 +7128,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) return -EINVAL; /* Hardware has to reinitialize queues and interrupts to - * match packet buffer alignment. Unfortunately, the + * match packet buffer alignment. Unfortunantly, the * hardware is not flexible enough to do this dynamically. */ if (netif_running(dev)) @@ -7181,8 +7174,7 @@ void ixgbe_do_reset(struct net_device *netdev) ixgbe_reset(adapter); } -static netdev_features_t ixgbe_fix_features(struct net_device *netdev, - netdev_features_t data) +static u32 ixgbe_fix_features(struct net_device *netdev, u32 data) { struct ixgbe_adapter *adapter = netdev_priv(netdev); @@ -7212,8 +7204,7 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev, return data; } -static int ixgbe_set_features(struct net_device *netdev, - netdev_features_t data) +static int ixgbe_set_features(struct net_device *netdev, u32 data) { struct ixgbe_adapter *adapter = netdev_priv(netdev); bool need_reset = false; @@ -7295,7 +7286,6 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_fcoe_enable = ixgbe_fcoe_enable, .ndo_fcoe_disable = ixgbe_fcoe_disable, .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn, - .ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo, #endif /* IXGBE_FCOE */ .ndo_set_features = ixgbe_set_features, .ndo_fix_features = ixgbe_fix_features, @@ -7608,16 +7598,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, adapter->wol = 0; switch (pdev->device) { case IXGBE_DEV_ID_82599_SFP: - /* Only these subdevice supports WOL */ - switch (pdev->subsystem_device) { - case IXGBE_SUBDEV_ID_82599_560FLR: - /* only support first port */ - if (hw->bus.func != 0) - break; - case IXGBE_SUBDEV_ID_82599_SFP: + /* Only this subdevice supports WOL */ + if (pdev->subsystem_device == IXGBE_SUBDEV_ID_82599_SFP) adapter->wol = IXGBE_WUFC_MAG; - break; - } break; case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: /* All except this subdevice support WOL */ @@ -7725,7 +7708,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, /* add san mac addr to netdev */ ixgbe_add_sanmac_netdev(netdev); - e_dev_info("%s\n", ixgbe_default_device_descr); + e_dev_info("Intel(R) 10 Gigabit Network Connection\n"); cards_found++; return 0; diff --git a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 7cf1e1f56c69..9a56fd74e673 100644 --- a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -1214,7 +1214,7 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, u32 max_retry = 10; u32 retry = 0; u16 swfw_mask = 0; - bool nack = true; + bool nack = 1; *data = 0; if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) @@ -1421,7 +1421,7 @@ static void ixgbe_i2c_stop(struct ixgbe_hw *hw) static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data) { s32 i; - bool bit = false; + bool bit = 0; for (i = 7; i >= 0; i--) { ixgbe_clock_in_i2c_bit(hw, &bit); @@ -1443,7 +1443,7 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data) s32 status = 0; s32 i; u32 i2cctl; - bool bit = false; + bool bit = 0; for (i = 7; i >= 0; i--) { bit = (data >> i) & 0x1; @@ -1457,7 +1457,6 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data) i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); i2cctl |= IXGBE_I2C_DATA_OUT; IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, i2cctl); - IXGBE_WRITE_FLUSH(hw); return status; } @@ -1474,7 +1473,7 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) u32 i = 0; u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); u32 timeout = 10; - bool ack = true; + bool ack = 1; ixgbe_raise_i2c_clk(hw, &i2cctl); @@ -1647,9 +1646,9 @@ static bool ixgbe_get_i2c_data(u32 *i2cctl) bool data; if (*i2cctl & IXGBE_I2C_DATA_IN) - data = true; + data = 1; else - data = false; + data = 0; return data; } diff --git a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index cf6812dd1436..00fcd39ad666 100644 --- a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -572,7 +572,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) /* reply to reset with ack and vf mac address */ msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK; - memcpy(new_mac, vf_mac, ETH_ALEN); + memcpy(new_mac, vf_mac, IXGBE_ETH_LENGTH_OF_ADDRESS); /* * Piggyback the multicast filter type so VF can compute the * correct vectors diff --git a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h index e8badab03359..df04f1a3857c 100644 --- a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h +++ b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h @@ -33,6 +33,7 @@ void ixgbe_msg_task(struct ixgbe_adapter *adapter); int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask); void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter); void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter); +void ixgbe_dump_registers(struct ixgbe_adapter *adapter); int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac); int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, u8 qos); diff --git a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 802bfa0f62cc..6c5cca808bd7 100644 --- a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -57,7 +57,6 @@ #define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152a #define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529 #define IXGBE_SUBDEV_ID_82599_SFP 0x11A9 -#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0 #define IXGBE_DEV_ID_82599_SFP_EM 0x1507 #define IXGBE_DEV_ID_82599_SFP_SF2 0x154D #define IXGBE_DEV_ID_82599EN_SFP 0x1557 @@ -66,7 +65,6 @@ #define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C #define IXGBE_DEV_ID_82599_LS 0x154F #define IXGBE_DEV_ID_X540T 0x1528 -#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A /* VF Device IDs */ #define IXGBE_DEV_ID_82599_VF 0x10ED @@ -1712,6 +1710,8 @@ enum { #define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ #define IXGBE_NVM_POLL_READ 0 /* Flag for polling for read complete */ +#define IXGBE_ETH_LENGTH_OF_ADDRESS 6 + #define IXGBE_EEPROM_PAGE_SIZE_MAX 128 #define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */ #define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */ @@ -2802,9 +2802,9 @@ struct ixgbe_eeprom_info { struct ixgbe_mac_info { struct ixgbe_mac_operations ops; enum ixgbe_mac_type type; - u8 addr[ETH_ALEN]; - u8 perm_addr[ETH_ALEN]; - u8 san_addr[ETH_ALEN]; + u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; + u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; + u8 san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; /* prefix for World Wide Node Name (WWNN) */ u16 wwnn_prefix; /* prefix for World Wide Port Name (WWPN) */ diff --git a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index 8cc5eccfd651..e5101e91b6b5 100644 --- a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -751,20 +751,16 @@ static s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) { u32 macc_reg; u32 ledctl_reg; - ixgbe_link_speed speed; - bool link_up; /* - * Link should be up in order for the blink bit in the LED control - * register to work. Force link and speed in the MAC if link is down. - * This will be reversed when we stop the blinking. + * In order for the blink bit in the LED control register + * to work, link and speed must be forced in the MAC. We + * will reverse this when we stop the blinking. */ - hw->mac.ops.check_link(hw, &speed, &link_up, false); - if (link_up == false) { - macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC); - macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS; - IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg); - } + macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC); + macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS; + IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg); + /* Set the LED to LINK_UP + BLINK. */ ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); ledctl_reg &= ~IXGBE_LED_MODE_MASK(index); diff --git a/trunk/drivers/net/ethernet/intel/ixgbevf/defines.h b/trunk/drivers/net/ethernet/intel/ixgbevf/defines.h index 2eb89cb94a0d..78abb6f1a866 100644 --- a/trunk/drivers/net/ethernet/intel/ixgbevf/defines.h +++ b/trunk/drivers/net/ethernet/intel/ixgbevf/defines.h @@ -35,6 +35,7 @@ #define IXGBE_VF_IRQ_CLEAR_MASK 7 #define IXGBE_VF_MAX_TX_QUEUES 1 #define IXGBE_VF_MAX_RX_QUEUES 1 +#define IXGBE_ETH_LENGTH_OF_ADDRESS 6 /* Link speed */ typedef u32 ixgbe_link_speed; diff --git a/trunk/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/trunk/drivers/net/ethernet/intel/ixgbevf/ethtool.c index dc8e6511c640..e29ba4506b74 100644 --- a/trunk/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/trunk/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -27,8 +27,6 @@ /* ethtool support for ixgbevf */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include #include #include @@ -267,11 +265,11 @@ static void ixgbevf_get_drvinfo(struct net_device *netdev, { struct ixgbevf_adapter *adapter = netdev_priv(netdev); - strlcpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, ixgbevf_driver_version, - sizeof(drvinfo->version)); - strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), - sizeof(drvinfo->bus_info)); + strlcpy(drvinfo->driver, ixgbevf_driver_name, 32); + strlcpy(drvinfo->version, ixgbevf_driver_version, 32); + + strlcpy(drvinfo->fw_version, "N/A", 4); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); } static void ixgbevf_get_ringparam(struct net_device *netdev, @@ -551,8 +549,8 @@ static const u32 register_test_patterns[] = { writel((W & M), (adapter->hw.hw_addr + R)); \ val = readl(adapter->hw.hw_addr + R); \ if ((W & M) != (val & M)) { \ - pr_err("set/check reg %04X test failed: got 0x%08X expected " \ - "0x%08X\n", R, (val & M), (W & M)); \ + printk(KERN_ERR "set/check reg %04X test failed: got 0x%08X " \ + "expected 0x%08X\n", R, (val & M), (W & M)); \ *data = R; \ writel(before, (adapter->hw.hw_addr + R)); \ return 1; \ diff --git a/trunk/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/trunk/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 891162d1610c..4c8e19951d57 100644 --- a/trunk/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/trunk/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -29,9 +29,6 @@ /****************************************************************************** Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code ******************************************************************************/ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include #include #include @@ -366,7 +363,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, if (!bi->page_dma && (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) { if (!bi->page) { - bi->page = alloc_page(GFP_ATOMIC | __GFP_COLD); + bi->page = netdev_alloc_page(adapter->netdev); if (!bi->page) { adapter->alloc_rx_page_failed++; goto no_buffers; @@ -1403,7 +1400,7 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) } } -static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; @@ -1412,11 +1409,9 @@ static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) if (hw->mac.ops.set_vfta) hw->mac.ops.set_vfta(hw, vid, 0, true); set_bit(vid, adapter->active_vlans); - - return 0; } -static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; @@ -1425,8 +1420,6 @@ static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) if (hw->mac.ops.set_vfta) hw->mac.ops.set_vfta(hw, vid, 0, false); clear_bit(vid, adapter->active_vlans); - - return 0; } static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter) @@ -1444,7 +1437,7 @@ static int ixgbevf_write_uc_addr_list(struct net_device *netdev) int count = 0; if ((netdev_uc_count(netdev)) > 10) { - pr_err("Too many unicast filters - No Space\n"); + printk(KERN_ERR "Too many unicast filters - No Space\n"); return -ENOSPC; } @@ -2142,7 +2135,7 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) err = ixgbevf_alloc_queues(adapter); if (err) { - pr_err("Unable to allocate memory for queues\n"); + printk(KERN_ERR "Unable to allocate memory for queues\n"); goto err_alloc_queues; } @@ -2196,7 +2189,7 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter) } else { err = hw->mac.ops.init_hw(hw); if (err) { - pr_err("init_shared_code failed: %d\n", err); + printk(KERN_ERR "init_shared_code failed: %d\n", err); goto out; } } @@ -2637,8 +2630,8 @@ static int ixgbevf_open(struct net_device *netdev) * the vf can't start. */ if (hw->adapter_stopped) { err = IXGBE_ERR_MBX; - pr_err("Unable to start - perhaps the PF Driver isn't " - "up yet\n"); + printk(KERN_ERR "Unable to start - perhaps the PF" + " Driver isn't up yet\n"); goto err_setup_reset; } } @@ -2849,8 +2842,10 @@ static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter, break; default: if (unlikely(net_ratelimit())) { - pr_warn("partial checksum but " - "proto=%x!\n", skb->protocol); + printk(KERN_WARNING + "partial checksum but " + "proto=%x!\n", + skb->protocol); } break; } @@ -3254,8 +3249,7 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, return stats; } -static int ixgbevf_set_features(struct net_device *netdev, - netdev_features_t features) +static int ixgbevf_set_features(struct net_device *netdev, u32 features) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); @@ -3420,7 +3414,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev, memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); if (!is_valid_ether_addr(netdev->dev_addr)) { - pr_err("invalid MAC address\n"); + printk(KERN_ERR "invalid MAC address\n"); err = -EIO; goto err_sw_init; } @@ -3541,10 +3535,10 @@ static struct pci_driver ixgbevf_driver = { static int __init ixgbevf_init_module(void) { int ret; - pr_info("%s - version %s\n", ixgbevf_driver_string, - ixgbevf_driver_version); + printk(KERN_INFO "ixgbevf: %s - version %s\n", ixgbevf_driver_string, + ixgbevf_driver_version); - pr_info("%s\n", ixgbevf_copyright); + printk(KERN_INFO "%s\n", ixgbevf_copyright); ret = pci_register_driver(&ixgbevf_driver); return ret; diff --git a/trunk/drivers/net/ethernet/intel/ixgbevf/mbx.h b/trunk/drivers/net/ethernet/intel/ixgbevf/mbx.h index 9d38a94a348a..ea393eb03f3a 100644 --- a/trunk/drivers/net/ethernet/intel/ixgbevf/mbx.h +++ b/trunk/drivers/net/ethernet/intel/ixgbevf/mbx.h @@ -47,8 +47,8 @@ #define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ #define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ -#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * (x))) -#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * (vfn))) +#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * x)) +#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * vfn)) #define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */ #define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ diff --git a/trunk/drivers/net/ethernet/intel/ixgbevf/regs.h b/trunk/drivers/net/ethernet/intel/ixgbevf/regs.h index 5e4d5e5cdf38..189200eeca26 100644 --- a/trunk/drivers/net/ethernet/intel/ixgbevf/regs.h +++ b/trunk/drivers/net/ethernet/intel/ixgbevf/regs.h @@ -39,29 +39,29 @@ #define IXGBE_VTEIMC 0x0010C #define IXGBE_VTEIAC 0x00110 #define IXGBE_VTEIAM 0x00114 -#define IXGBE_VTEITR(x) (0x00820 + (4 * (x))) -#define IXGBE_VTIVAR(x) (0x00120 + (4 * (x))) +#define IXGBE_VTEITR(x) (0x00820 + (4 * x)) +#define IXGBE_VTIVAR(x) (0x00120 + (4 * x)) #define IXGBE_VTIVAR_MISC 0x00140 -#define IXGBE_VTRSCINT(x) (0x00180 + (4 * (x))) -#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * (x))) -#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * (x))) -#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * (x))) -#define IXGBE_VFRDH(x) (0x01010 + (0x40 * (x))) -#define IXGBE_VFRDT(x) (0x01018 + (0x40 * (x))) -#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * (x))) -#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * (x))) -#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * (x))) +#define IXGBE_VTRSCINT(x) (0x00180 + (4 * x)) +#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * x)) +#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * x)) +#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * x)) +#define IXGBE_VFRDH(x) (0x01010 + (0x40 * x)) +#define IXGBE_VFRDT(x) (0x01018 + (0x40 * x)) +#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * x)) +#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * x)) +#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * x)) #define IXGBE_VFPSRTYPE 0x00300 -#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * (x))) -#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * (x))) -#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * (x))) -#define IXGBE_VFTDH(x) (0x02010 + (0x40 * (x))) -#define IXGBE_VFTDT(x) (0x02018 + (0x40 * (x))) -#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * (x))) -#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * (x))) -#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * (x))) -#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * (x))) -#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * (x))) +#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * x)) +#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * x)) +#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * x)) +#define IXGBE_VFTDH(x) (0x02010 + (0x40 * x)) +#define IXGBE_VFTDT(x) (0x02018 + (0x40 * x)) +#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * x)) +#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * x)) +#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * x)) +#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * x)) +#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * x)) #define IXGBE_VFGPRC 0x0101C #define IXGBE_VFGPTC 0x0201C #define IXGBE_VFGORC_LSB 0x01020 diff --git a/trunk/drivers/net/ethernet/intel/ixgbevf/vf.c b/trunk/drivers/net/ethernet/intel/ixgbevf/vf.c index 21533e300367..aa3682e8c473 100644 --- a/trunk/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/trunk/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -108,7 +108,7 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw) if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK)) return IXGBE_ERR_INVALID_MAC_ADDR; - memcpy(hw->mac.perm_addr, addr, ETH_ALEN); + memcpy(hw->mac.perm_addr, addr, IXGBE_ETH_LENGTH_OF_ADDRESS); hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD]; return 0; @@ -211,7 +211,7 @@ static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) **/ static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr) { - memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN); + memcpy(mac_addr, hw->mac.perm_addr, IXGBE_ETH_LENGTH_OF_ADDRESS); return 0; } diff --git a/trunk/drivers/net/ethernet/jme.c b/trunk/drivers/net/ethernet/jme.c index 27d651a80f3f..76b84573566b 100644 --- a/trunk/drivers/net/ethernet/jme.c +++ b/trunk/drivers/net/ethernet/jme.c @@ -1990,7 +1990,7 @@ jme_fill_tx_map(struct pci_dev *pdev, struct page *page, u32 page_offset, u32 len, - bool hidma) + u8 hidma) { dma_addr_t dmaaddr; @@ -2024,7 +2024,7 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) struct jme_ring *txring = &(jme->txring[0]); struct txdesc *txdesc = txring->desc, *ctxdesc; struct jme_buffer_info *txbi = txring->bufinf, *ctxbi; - bool hidma = jme->dev->features & NETIF_F_HIGHDMA; + u8 hidma = jme->dev->features & NETIF_F_HIGHDMA; int i, nr_frags = skb_shinfo(skb)->nr_frags; int mask = jme->tx_ring_mask; const struct skb_frag_struct *frag; @@ -2399,9 +2399,9 @@ jme_get_drvinfo(struct net_device *netdev, { struct jme_adapter *jme = netdev_priv(netdev); - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, pci_name(jme->pdev), sizeof(info->bus_info)); + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + strcpy(info->bus_info, pci_name(jme->pdev)); } static int @@ -2727,8 +2727,8 @@ jme_set_msglevel(struct net_device *netdev, u32 value) jme->msg_enable = value; } -static netdev_features_t -jme_fix_features(struct net_device *netdev, netdev_features_t features) +static u32 +jme_fix_features(struct net_device *netdev, u32 features) { if (netdev->mtu > 1900) features &= ~(NETIF_F_ALL_TSO | NETIF_F_ALL_CSUM); @@ -2736,7 +2736,7 @@ jme_fix_features(struct net_device *netdev, netdev_features_t features) } static int -jme_set_features(struct net_device *netdev, netdev_features_t features) +jme_set_features(struct net_device *netdev, u32 features) { struct jme_adapter *jme = netdev_priv(netdev); diff --git a/trunk/drivers/net/ethernet/korina.c b/trunk/drivers/net/ethernet/korina.c index 6ad094f176f8..d8430f487b84 100644 --- a/trunk/drivers/net/ethernet/korina.c +++ b/trunk/drivers/net/ethernet/korina.c @@ -1230,7 +1230,18 @@ static struct platform_driver korina_driver = { .remove = korina_remove, }; -module_platform_driver(korina_driver); +static int __init korina_init_module(void) +{ + return platform_driver_register(&korina_driver); +} + +static void korina_cleanup_module(void) +{ + return platform_driver_unregister(&korina_driver); +} + +module_init(korina_init_module); +module_exit(korina_cleanup_module); MODULE_AUTHOR("Philip Rischel "); MODULE_AUTHOR("Felix Fietkau "); diff --git a/trunk/drivers/net/ethernet/marvell/mv643xx_eth.c b/trunk/drivers/net/ethernet/marvell/mv643xx_eth.c index e87847e32ddb..194a03113802 100644 --- a/trunk/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/trunk/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -1502,12 +1502,10 @@ mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) static void mv643xx_eth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { - strlcpy(drvinfo->driver, mv643xx_eth_driver_name, - sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, mv643xx_eth_driver_version, - sizeof(drvinfo->version)); - strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); - strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info)); + strncpy(drvinfo->driver, mv643xx_eth_driver_name, 32); + strncpy(drvinfo->version, mv643xx_eth_driver_version, 32); + strncpy(drvinfo->fw_version, "N/A", 32); + strncpy(drvinfo->bus_info, "platform", 32); drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats); } @@ -1580,10 +1578,10 @@ mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er) static int -mv643xx_eth_set_features(struct net_device *dev, netdev_features_t features) +mv643xx_eth_set_features(struct net_device *dev, u32 features) { struct mv643xx_eth_private *mp = netdev_priv(dev); - bool rx_csum = features & NETIF_F_RXCSUM; + u32 rx_csum = features & NETIF_F_RXCSUM; wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000); diff --git a/trunk/drivers/net/ethernet/marvell/pxa168_eth.c b/trunk/drivers/net/ethernet/marvell/pxa168_eth.c index 5ec409e3da09..d17d0624c5e6 100644 --- a/trunk/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/trunk/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1645,7 +1645,18 @@ static struct platform_driver pxa168_eth_driver = { }, }; -module_platform_driver(pxa168_eth_driver); +static int __init pxa168_init_module(void) +{ + return platform_driver_register(&pxa168_eth_driver); +} + +static void __exit pxa168_cleanup_module(void) +{ + platform_driver_unregister(&pxa168_eth_driver); +} + +module_init(pxa168_init_module); +module_exit(pxa168_cleanup_module); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168"); diff --git a/trunk/drivers/net/ethernet/marvell/skge.c b/trunk/drivers/net/ethernet/marvell/skge.c index 18a87a57fc0a..c7b60839ac99 100644 --- a/trunk/drivers/net/ethernet/marvell/skge.c +++ b/trunk/drivers/net/ethernet/marvell/skge.c @@ -394,10 +394,10 @@ static void skge_get_drvinfo(struct net_device *dev, { struct skge_port *skge = netdev_priv(dev); - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, pci_name(skge->hw->pdev), - sizeof(info->bus_info)); + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + strcpy(info->fw_version, "N/A"); + strcpy(info->bus_info, pci_name(skge->hw->pdev)); } static const struct skge_stat { @@ -2606,9 +2606,6 @@ static int skge_up(struct net_device *dev) spin_unlock_irq(&hw->hw_lock); napi_enable(&skge->napi); - - skge_set_multicast(dev); - return 0; free_tx_ring: @@ -4042,7 +4039,7 @@ static void __devexit skge_remove(struct pci_dev *pdev) pci_set_drvdata(pdev, NULL); } -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_PM static int skge_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); @@ -4104,7 +4101,7 @@ static SIMPLE_DEV_PM_OPS(skge_pm_ops, skge_suspend, skge_resume); #else #define SKGE_PM_OPS NULL -#endif /* CONFIG_PM_SLEEP */ +#endif static void skge_shutdown(struct pci_dev *pdev) { diff --git a/trunk/drivers/net/ethernet/marvell/sky2.c b/trunk/drivers/net/ethernet/marvell/sky2.c index 760c2b17dfd3..7803efa46eb2 100644 --- a/trunk/drivers/net/ethernet/marvell/sky2.c +++ b/trunk/drivers/net/ethernet/marvell/sky2.c @@ -1110,7 +1110,6 @@ static void tx_init(struct sky2_port *sky2) sky2->tx_prod = sky2->tx_cons = 0; sky2->tx_tcpsum = 0; sky2->tx_last_mss = 0; - netdev_reset_queue(sky2->netdev); le = get_tx_le(sky2, &sky2->tx_prod); le->addr = 0; @@ -1285,7 +1284,7 @@ static const uint32_t rss_init_key[10] = { }; /* Enable/disable receive hash calculation (RSS) */ -static void rx_set_rss(struct net_device *dev, netdev_features_t features) +static void rx_set_rss(struct net_device *dev, u32 features) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; @@ -1403,7 +1402,7 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) #define SKY2_VLAN_OFFLOADS (NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO) -static void sky2_vlan_mode(struct net_device *dev, netdev_features_t features) +static void sky2_vlan_mode(struct net_device *dev, u32 features) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; @@ -1972,7 +1971,6 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb, if (tx_avail(sky2) <= MAX_SKB_TX_LE) netif_stop_queue(dev); - netdev_sent_queue(dev, skb->len); sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod); return NETDEV_TX_OK; @@ -2004,8 +2002,7 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb, static void sky2_tx_complete(struct sky2_port *sky2, u16 done) { struct net_device *dev = sky2->netdev; - u16 idx; - unsigned int bytes_compl = 0, pkts_compl = 0; + unsigned idx; BUG_ON(done >= sky2->tx_ring_size); @@ -2020,8 +2017,10 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done) netif_printk(sky2, tx_done, KERN_DEBUG, dev, "tx done %u\n", idx); - pkts_compl++; - bytes_compl += skb->len; + u64_stats_update_begin(&sky2->tx_stats.syncp); + ++sky2->tx_stats.packets; + sky2->tx_stats.bytes += skb->len; + u64_stats_update_end(&sky2->tx_stats.syncp); re->skb = NULL; dev_kfree_skb_any(skb); @@ -2032,13 +2031,6 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done) sky2->tx_cons = idx; smp_mb(); - - netdev_completed_queue(dev, pkts_compl, bytes_compl); - - u64_stats_update_begin(&sky2->tx_stats.syncp); - sky2->tx_stats.packets += pkts_compl; - sky2->tx_stats.bytes += bytes_compl; - u64_stats_update_end(&sky2->tx_stats.syncp); } static void sky2_tx_reset(struct sky2_hw *hw, unsigned port) @@ -3651,10 +3643,10 @@ static void sky2_get_drvinfo(struct net_device *dev, { struct sky2_port *sky2 = netdev_priv(dev); - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, pci_name(sky2->hw->pdev), - sizeof(info->bus_info)); + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + strcpy(info->fw_version, "N/A"); + strcpy(info->bus_info, pci_name(sky2->hw->pdev)); } static const struct sky2_stat { @@ -4319,8 +4311,7 @@ static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom return sky2_vpd_write(sky2->hw, cap, data, eeprom->offset, eeprom->len); } -static netdev_features_t sky2_fix_features(struct net_device *dev, - netdev_features_t features) +static u32 sky2_fix_features(struct net_device *dev, u32 features) { const struct sky2_port *sky2 = netdev_priv(dev); const struct sky2_hw *hw = sky2->hw; @@ -4344,13 +4335,13 @@ static netdev_features_t sky2_fix_features(struct net_device *dev, return features; } -static int sky2_set_features(struct net_device *dev, netdev_features_t features) +static int sky2_set_features(struct net_device *dev, u32 features) { struct sky2_port *sky2 = netdev_priv(dev); - netdev_features_t changed = dev->features ^ features; + u32 changed = dev->features ^ features; if (changed & NETIF_F_RXCSUM) { - bool on = features & NETIF_F_RXCSUM; + u32 on = features & NETIF_F_RXCSUM; sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), on ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); } diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/Makefile b/trunk/drivers/net/ethernet/mellanox/mlx4/Makefile index 4a40ab967eeb..d1aa45a15854 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/Makefile +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/Makefile @@ -1,7 +1,7 @@ obj-$(CONFIG_MLX4_CORE) += mlx4_core.o mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \ - mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o resource_tracker.o + mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o obj-$(CONFIG_MLX4_EN) += mlx4_en.o diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/catas.c b/trunk/drivers/net/ethernet/mellanox/mlx4/catas.c index 915e947b422d..45aea9c3ae2c 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/catas.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/catas.c @@ -48,8 +48,7 @@ static struct work_struct catas_work; static int internal_err_reset = 1; module_param(internal_err_reset, int, 0644); MODULE_PARM_DESC(internal_err_reset, - "Reset device on internal errors if non-zero" - " (default 1, in SRIOV mode default is 0)"); + "Reset device on internal errors if non-zero (default 1)"); static void dump_err_buf(struct mlx4_dev *dev) { @@ -117,10 +116,6 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev) struct mlx4_priv *priv = mlx4_priv(dev); phys_addr_t addr; - /*If we are in SRIOV the default of the module param must be 0*/ - if (mlx4_is_mfunc(dev)) - internal_err_reset = 0; - INIT_LIST_HEAD(&priv->catas_err.list); init_timer(&priv->catas_err.timer); priv->catas_err.map = NULL; diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/cmd.c b/trunk/drivers/net/ethernet/mellanox/mlx4/cmd.c index 978f593094c0..78f5a1a0b8c8 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -39,18 +39,12 @@ #include #include -#include #include #include "mlx4.h" -#include "fw.h" #define CMD_POLL_TOKEN 0xffff -#define INBOX_MASK 0xffffffffffffff00ULL - -#define CMD_CHAN_VER 1 -#define CMD_CHAN_IF_REV 1 enum { /* command completed successfully: */ @@ -116,12 +110,8 @@ struct mlx4_cmd_context { int next; u64 out_param; u16 token; - u8 fw_status; }; -static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr_cmd *in_vhcr); - static int mlx4_status_to_errno(u8 status) { static const int trans_table[] = { @@ -152,139 +142,6 @@ static int mlx4_status_to_errno(u8 status) return trans_table[status]; } -static u8 mlx4_errno_to_status(int errno) -{ - switch (errno) { - case -EPERM: - return CMD_STAT_BAD_OP; - case -EINVAL: - return CMD_STAT_BAD_PARAM; - case -ENXIO: - return CMD_STAT_BAD_SYS_STATE; - case -EBUSY: - return CMD_STAT_RESOURCE_BUSY; - case -ENOMEM: - return CMD_STAT_EXCEED_LIM; - case -ENFILE: - return CMD_STAT_ICM_ERROR; - default: - return CMD_STAT_INTERNAL_ERR; - } -} - -static int comm_pending(struct mlx4_dev *dev) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - u32 status = readl(&priv->mfunc.comm->slave_read); - - return (swab32(status) >> 31) != priv->cmd.comm_toggle; -} - -static void mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - u32 val; - - priv->cmd.comm_toggle ^= 1; - val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31); - __raw_writel((__force u32) cpu_to_be32(val), - &priv->mfunc.comm->slave_write); - mmiowb(); -} - -static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param, - unsigned long timeout) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - unsigned long end; - int err = 0; - int ret_from_pending = 0; - - /* First, verify that the master reports correct status */ - if (comm_pending(dev)) { - mlx4_warn(dev, "Communication channel is not idle." - "my toggle is %d (cmd:0x%x)\n", - priv->cmd.comm_toggle, cmd); - return -EAGAIN; - } - - /* Write command */ - down(&priv->cmd.poll_sem); - mlx4_comm_cmd_post(dev, cmd, param); - - end = msecs_to_jiffies(timeout) + jiffies; - while (comm_pending(dev) && time_before(jiffies, end)) - cond_resched(); - ret_from_pending = comm_pending(dev); - if (ret_from_pending) { - /* check if the slave is trying to boot in the middle of - * FLR process. The only non-zero result in the RESET command - * is MLX4_DELAY_RESET_SLAVE*/ - if ((MLX4_COMM_CMD_RESET == cmd)) { - mlx4_warn(dev, "Got slave FLRed from Communication" - " channel (ret:0x%x)\n", ret_from_pending); - err = MLX4_DELAY_RESET_SLAVE; - } else { - mlx4_warn(dev, "Communication channel timed out\n"); - err = -ETIMEDOUT; - } - } - - up(&priv->cmd.poll_sem); - return err; -} - -static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op, - u16 param, unsigned long timeout) -{ - struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; - struct mlx4_cmd_context *context; - int err = 0; - - down(&cmd->event_sem); - - spin_lock(&cmd->context_lock); - BUG_ON(cmd->free_head < 0); - context = &cmd->context[cmd->free_head]; - context->token += cmd->token_mask + 1; - cmd->free_head = context->next; - spin_unlock(&cmd->context_lock); - - init_completion(&context->done); - - mlx4_comm_cmd_post(dev, op, param); - - if (!wait_for_completion_timeout(&context->done, - msecs_to_jiffies(timeout))) { - err = -EBUSY; - goto out; - } - - err = context->result; - if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) { - mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n", - op, context->fw_status); - goto out; - } - -out: - spin_lock(&cmd->context_lock); - context->next = cmd->free_head; - cmd->free_head = context - cmd->context; - spin_unlock(&cmd->context_lock); - - up(&cmd->event_sem); - return err; -} - -int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param, - unsigned long timeout) -{ - if (mlx4_priv(dev)->cmd.use_events) - return mlx4_comm_cmd_wait(dev, cmd, param, timeout); - return mlx4_comm_cmd_poll(dev, cmd, param, timeout); -} - static int cmd_pending(struct mlx4_dev *dev) { u32 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET); @@ -310,10 +167,8 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param, end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS); while (cmd_pending(dev)) { - if (time_after_eq(jiffies, end)) { - mlx4_err(dev, "%s:cmd_pending failed\n", __func__); + if (time_after_eq(jiffies, end)) goto out; - } cond_resched(); } @@ -337,7 +192,7 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param, (cmd->toggle << HCR_T_BIT) | (event ? (1 << HCR_E_BIT) : 0) | (op_modifier << HCR_OPMOD_SHIFT) | - op), hcr + 6); + op), hcr + 6); /* * Make sure that our HCR writes don't get mixed in with @@ -354,62 +209,6 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param, return ret; } -static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param, - int out_is_imm, u32 in_modifier, u8 op_modifier, - u16 op, unsigned long timeout) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr; - int ret; - - down(&priv->cmd.slave_sem); - vhcr->in_param = cpu_to_be64(in_param); - vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0; - vhcr->in_modifier = cpu_to_be32(in_modifier); - vhcr->opcode = cpu_to_be16((((u16) op_modifier) << 12) | (op & 0xfff)); - vhcr->token = cpu_to_be16(CMD_POLL_TOKEN); - vhcr->status = 0; - vhcr->flags = !!(priv->cmd.use_events) << 6; - if (mlx4_is_master(dev)) { - ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr); - if (!ret) { - if (out_is_imm) { - if (out_param) - *out_param = - be64_to_cpu(vhcr->out_param); - else { - mlx4_err(dev, "response expected while" - "output mailbox is NULL for " - "command 0x%x\n", op); - vhcr->status = CMD_STAT_BAD_PARAM; - } - } - ret = mlx4_status_to_errno(vhcr->status); - } - } else { - ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0, - MLX4_COMM_TIME + timeout); - if (!ret) { - if (out_is_imm) { - if (out_param) - *out_param = - be64_to_cpu(vhcr->out_param); - else { - mlx4_err(dev, "response expected while" - "output mailbox is NULL for " - "command 0x%x\n", op); - vhcr->status = CMD_STAT_BAD_PARAM; - } - } - ret = mlx4_status_to_errno(vhcr->status); - } else - mlx4_err(dev, "failed execution of VHCR_POST command" - "opcode 0x%x\n", op); - } - up(&priv->cmd.slave_sem); - return ret; -} - static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param, int out_is_imm, u32 in_modifier, u8 op_modifier, u16 op, unsigned long timeout) @@ -418,7 +217,6 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param, void __iomem *hcr = priv->cmd.hcr; int err = 0; unsigned long end; - u32 stat; down(&priv->cmd.poll_sem); @@ -442,12 +240,9 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param, __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 | (u64) be32_to_cpu((__force __be32) __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4)); - stat = be32_to_cpu((__force __be32) - __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24; - err = mlx4_status_to_errno(stat); - if (err) - mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n", - op, stat); + + err = mlx4_status_to_errno(be32_to_cpu((__force __be32) + __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24); out: up(&priv->cmd.poll_sem); @@ -464,7 +259,6 @@ void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param) if (token != context->token) return; - context->fw_status = status; context->result = mlx4_status_to_errno(status); context->out_param = out_param; @@ -493,18 +287,14 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param, mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0, in_modifier, op_modifier, op, context->token, 1); - if (!wait_for_completion_timeout(&context->done, - msecs_to_jiffies(timeout))) { + if (!wait_for_completion_timeout(&context->done, msecs_to_jiffies(timeout))) { err = -EBUSY; goto out; } err = context->result; - if (err) { - mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n", - op, context->fw_status); + if (err) goto out; - } if (out_is_imm) *out_param = context->out_param; @@ -521,1045 +311,16 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param, int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param, int out_is_imm, u32 in_modifier, u8 op_modifier, - u16 op, unsigned long timeout, int native) -{ - if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) { - if (mlx4_priv(dev)->cmd.use_events) - return mlx4_cmd_wait(dev, in_param, out_param, - out_is_imm, in_modifier, - op_modifier, op, timeout); - else - return mlx4_cmd_poll(dev, in_param, out_param, - out_is_imm, in_modifier, - op_modifier, op, timeout); - } - return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm, - in_modifier, op_modifier, op, timeout); -} -EXPORT_SYMBOL_GPL(__mlx4_cmd); - - -static int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev) -{ - return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL, - MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); -} - -static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr, - int slave, u64 slave_addr, - int size, int is_read) -{ - u64 in_param; - u64 out_param; - - if ((slave_addr & 0xfff) | (master_addr & 0xfff) | - (slave & ~0x7f) | (size & 0xff)) { - mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx " - "master_addr:0x%llx slave_id:%d size:%d\n", - slave_addr, master_addr, slave, size); - return -EINVAL; - } - - if (is_read) { - in_param = (u64) slave | slave_addr; - out_param = (u64) dev->caps.function | master_addr; - } else { - in_param = (u64) dev->caps.function | master_addr; - out_param = (u64) slave | slave_addr; - } - - return mlx4_cmd_imm(dev, in_param, &out_param, size, 0, - MLX4_CMD_ACCESS_MEM, - MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); -} - -int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd) -{ - u64 in_param; - u64 out_param; - int err; - - in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param; - out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param; - if (cmd->encode_slave_id) { - in_param &= 0xffffffffffffff00ll; - in_param |= slave; - } - - err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm, - vhcr->in_modifier, vhcr->op_modifier, vhcr->op, - MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); - - if (cmd->out_is_imm) - vhcr->out_param = out_param; - - return err; -} - -static struct mlx4_cmd_info cmd_info[] = { - { - .opcode = MLX4_CMD_QUERY_FW, - .has_inbox = false, - .has_outbox = true, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = NULL - }, - { - .opcode = MLX4_CMD_QUERY_HCA, - .has_inbox = false, - .has_outbox = true, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = NULL - }, - { - .opcode = MLX4_CMD_QUERY_DEV_CAP, - .has_inbox = false, - .has_outbox = true, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = NULL - }, - { - .opcode = MLX4_CMD_QUERY_FUNC_CAP, - .has_inbox = false, - .has_outbox = true, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = mlx4_QUERY_FUNC_CAP_wrapper - }, - { - .opcode = MLX4_CMD_QUERY_ADAPTER, - .has_inbox = false, - .has_outbox = true, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = NULL - }, - { - .opcode = MLX4_CMD_INIT_PORT, - .has_inbox = false, - .has_outbox = false, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = mlx4_INIT_PORT_wrapper - }, - { - .opcode = MLX4_CMD_CLOSE_PORT, - .has_inbox = false, - .has_outbox = false, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = mlx4_CLOSE_PORT_wrapper - }, - { - .opcode = MLX4_CMD_QUERY_PORT, - .has_inbox = false, - .has_outbox = true, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = mlx4_QUERY_PORT_wrapper - }, - { - .opcode = MLX4_CMD_SET_PORT, - .has_inbox = true, - .has_outbox = false, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = mlx4_SET_PORT_wrapper - }, - { - .opcode = MLX4_CMD_MAP_EQ, - .has_inbox = false, - .has_outbox = false, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = mlx4_MAP_EQ_wrapper - }, - { - .opcode = MLX4_CMD_SW2HW_EQ, - .has_inbox = true, - .has_outbox = false, - .out_is_imm = false, - .encode_slave_id = true, - .verify = NULL, - .wrapper = mlx4_SW2HW_EQ_wrapper - }, - { - .opcode = MLX4_CMD_HW_HEALTH_CHECK, - .has_inbox = false, - .has_outbox = false, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = NULL - }, - { - .opcode = MLX4_CMD_NOP, - .has_inbox = false, - .has_outbox = false, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = NULL - }, - { - .opcode = MLX4_CMD_ALLOC_RES, - .has_inbox = false, - .has_outbox = false, - .out_is_imm = true, - .encode_slave_id = false, - .verify = NULL, - .wrapper = mlx4_ALLOC_RES_wrapper - }, - { - .opcode = MLX4_CMD_FREE_RES, - .has_inbox = false, - .has_outbox = false, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = mlx4_FREE_RES_wrapper - }, - { - .opcode = MLX4_CMD_SW2HW_MPT, - .has_inbox = true, - .has_outbox = false, - .out_is_imm = false, - .encode_slave_id = true, - .verify = NULL, - .wrapper = mlx4_SW2HW_MPT_wrapper - }, - { - .opcode = MLX4_CMD_QUERY_MPT, - .has_inbox = false, - .has_outbox = true, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = mlx4_QUERY_MPT_wrapper - }, - { - .opcode = MLX4_CMD_HW2SW_MPT, - .has_inbox = false, - .has_outbox = false, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = mlx4_HW2SW_MPT_wrapper - }, - { - .opcode = MLX4_CMD_READ_MTT, - .has_inbox = false, - .has_outbox = true, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = NULL - }, - { - .opcode = MLX4_CMD_WRITE_MTT, - .has_inbox = true, - .has_outbox = false, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = mlx4_WRITE_MTT_wrapper - }, - { - .opcode = MLX4_CMD_SYNC_TPT, - .has_inbox = true, - .has_outbox = false, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = NULL - }, - { - .opcode = MLX4_CMD_HW2SW_EQ, - .has_inbox = false, - .has_outbox = true, - .out_is_imm = false, - .encode_slave_id = true, - .verify = NULL, - .wrapper = mlx4_HW2SW_EQ_wrapper - }, - { - .opcode = MLX4_CMD_QUERY_EQ, - .has_inbox = false, - .has_outbox = true, - .out_is_imm = false, - .encode_slave_id = true, - .verify = NULL, - .wrapper = mlx4_QUERY_EQ_wrapper - }, - { - .opcode = MLX4_CMD_SW2HW_CQ, - .has_inbox = true, - .has_outbox = false, - .out_is_imm = false, - .encode_slave_id = true, - .verify = NULL, - .wrapper = mlx4_SW2HW_CQ_wrapper - }, - { - .opcode = MLX4_CMD_HW2SW_CQ, - .has_inbox = false, - .has_outbox = false, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = mlx4_HW2SW_CQ_wrapper - }, - { - .opcode = MLX4_CMD_QUERY_CQ, - .has_inbox = false, - .has_outbox = true, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = mlx4_QUERY_CQ_wrapper - }, - { - .opcode = MLX4_CMD_MODIFY_CQ, - .has_inbox = true, - .has_outbox = false, - .out_is_imm = true, - .encode_slave_id = false, - .verify = NULL, - .wrapper = mlx4_MODIFY_CQ_wrapper - }, - { - .opcode = MLX4_CMD_SW2HW_SRQ, - .has_inbox = true, - .has_outbox = false, - .out_is_imm = false, - .encode_slave_id = true, - .verify = NULL, - .wrapper = mlx4_SW2HW_SRQ_wrapper - }, - { - .opcode = MLX4_CMD_HW2SW_SRQ, - .has_inbox = false, - .has_outbox = false, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = mlx4_HW2SW_SRQ_wrapper - }, - { - .opcode = MLX4_CMD_QUERY_SRQ, - .has_inbox = false, - .has_outbox = true, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = mlx4_QUERY_SRQ_wrapper - }, - { - .opcode = MLX4_CMD_ARM_SRQ, - .has_inbox = false, - .has_outbox = false, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = mlx4_ARM_SRQ_wrapper - }, - { - .opcode = MLX4_CMD_RST2INIT_QP, - .has_inbox = true, - .has_outbox = false, - .out_is_imm = false, - .encode_slave_id = true, - .verify = NULL, - .wrapper = mlx4_RST2INIT_QP_wrapper - }, - { - .opcode = MLX4_CMD_INIT2INIT_QP, - .has_inbox = true, - .has_outbox = false, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = mlx4_GEN_QP_wrapper - }, - { - .opcode = MLX4_CMD_INIT2RTR_QP, - .has_inbox = true, - .has_outbox = false, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = mlx4_INIT2RTR_QP_wrapper - }, - { - .opcode = MLX4_CMD_RTR2RTS_QP, - .has_inbox = true, - .has_outbox = false, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = mlx4_GEN_QP_wrapper - }, - { - .opcode = MLX4_CMD_RTS2RTS_QP, - .has_inbox = true, - .has_outbox = false, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = mlx4_GEN_QP_wrapper - }, - { - .opcode = MLX4_CMD_SQERR2RTS_QP, - .has_inbox = true, - .has_outbox = false, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = mlx4_GEN_QP_wrapper - }, - { - .opcode = MLX4_CMD_2ERR_QP, - .has_inbox = false, - .has_outbox = false, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = mlx4_GEN_QP_wrapper - }, - { - .opcode = MLX4_CMD_RTS2SQD_QP, - .has_inbox = false, - .has_outbox = false, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = mlx4_GEN_QP_wrapper - }, - { - .opcode = MLX4_CMD_SQD2SQD_QP, - .has_inbox = true, - .has_outbox = false, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = mlx4_GEN_QP_wrapper - }, - { - .opcode = MLX4_CMD_SQD2RTS_QP, - .has_inbox = true, - .has_outbox = false, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = mlx4_GEN_QP_wrapper - }, - { - .opcode = MLX4_CMD_2RST_QP, - .has_inbox = false, - .has_outbox = false, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = mlx4_2RST_QP_wrapper - }, - { - .opcode = MLX4_CMD_QUERY_QP, - .has_inbox = false, - .has_outbox = true, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = mlx4_GEN_QP_wrapper - }, - { - .opcode = MLX4_CMD_SUSPEND_QP, - .has_inbox = false, - .has_outbox = false, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = mlx4_GEN_QP_wrapper - }, - { - .opcode = MLX4_CMD_UNSUSPEND_QP, - .has_inbox = false, - .has_outbox = false, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = mlx4_GEN_QP_wrapper - }, - { - .opcode = MLX4_CMD_QUERY_IF_STAT, - .has_inbox = false, - .has_outbox = true, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = mlx4_QUERY_IF_STAT_wrapper - }, - /* Native multicast commands are not available for guests */ - { - .opcode = MLX4_CMD_QP_ATTACH, - .has_inbox = true, - .has_outbox = false, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = mlx4_QP_ATTACH_wrapper - }, - { - .opcode = MLX4_CMD_PROMISC, - .has_inbox = false, - .has_outbox = false, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = mlx4_PROMISC_wrapper - }, - /* Ethernet specific commands */ - { - .opcode = MLX4_CMD_SET_VLAN_FLTR, - .has_inbox = true, - .has_outbox = false, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = mlx4_SET_VLAN_FLTR_wrapper - }, - { - .opcode = MLX4_CMD_SET_MCAST_FLTR, - .has_inbox = false, - .has_outbox = false, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = mlx4_SET_MCAST_FLTR_wrapper - }, - { - .opcode = MLX4_CMD_DUMP_ETH_STATS, - .has_inbox = false, - .has_outbox = true, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = mlx4_DUMP_ETH_STATS_wrapper - }, - { - .opcode = MLX4_CMD_INFORM_FLR_DONE, - .has_inbox = false, - .has_outbox = false, - .out_is_imm = false, - .encode_slave_id = false, - .verify = NULL, - .wrapper = NULL - }, -}; - -static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr_cmd *in_vhcr) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_cmd_info *cmd = NULL; - struct mlx4_vhcr_cmd *vhcr_cmd = in_vhcr ? in_vhcr : priv->mfunc.vhcr; - struct mlx4_vhcr *vhcr; - struct mlx4_cmd_mailbox *inbox = NULL; - struct mlx4_cmd_mailbox *outbox = NULL; - u64 in_param; - u64 out_param; - int ret = 0; - int i; - int err = 0; - - /* Create sw representation of Virtual HCR */ - vhcr = kzalloc(sizeof(struct mlx4_vhcr), GFP_KERNEL); - if (!vhcr) - return -ENOMEM; - - /* DMA in the vHCR */ - if (!in_vhcr) { - ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave, - priv->mfunc.master.slave_state[slave].vhcr_dma, - ALIGN(sizeof(struct mlx4_vhcr_cmd), - MLX4_ACCESS_MEM_ALIGN), 1); - if (ret) { - mlx4_err(dev, "%s:Failed reading vhcr" - "ret: 0x%x\n", __func__, ret); - kfree(vhcr); - return ret; - } - } - - /* Fill SW VHCR fields */ - vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param); - vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param); - vhcr->in_modifier = be32_to_cpu(vhcr_cmd->in_modifier); - vhcr->token = be16_to_cpu(vhcr_cmd->token); - vhcr->op = be16_to_cpu(vhcr_cmd->opcode) & 0xfff; - vhcr->op_modifier = (u8) (be16_to_cpu(vhcr_cmd->opcode) >> 12); - vhcr->e_bit = vhcr_cmd->flags & (1 << 6); - - /* Lookup command */ - for (i = 0; i < ARRAY_SIZE(cmd_info); ++i) { - if (vhcr->op == cmd_info[i].opcode) { - cmd = &cmd_info[i]; - break; - } - } - if (!cmd) { - mlx4_err(dev, "Unknown command:0x%x accepted from slave:%d\n", - vhcr->op, slave); - vhcr_cmd->status = CMD_STAT_BAD_PARAM; - goto out_status; - } - - /* Read inbox */ - if (cmd->has_inbox) { - vhcr->in_param &= INBOX_MASK; - inbox = mlx4_alloc_cmd_mailbox(dev); - if (IS_ERR(inbox)) { - vhcr_cmd->status = CMD_STAT_BAD_SIZE; - inbox = NULL; - goto out_status; - } - - if (mlx4_ACCESS_MEM(dev, inbox->dma, slave, - vhcr->in_param, - MLX4_MAILBOX_SIZE, 1)) { - mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n", - __func__, cmd->opcode); - vhcr_cmd->status = CMD_STAT_INTERNAL_ERR; - goto out_status; - } - } - - /* Apply permission and bound checks if applicable */ - if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) { - mlx4_warn(dev, "Command:0x%x from slave: %d failed protection " - "checks for resource_id:%d\n", vhcr->op, slave, - vhcr->in_modifier); - vhcr_cmd->status = CMD_STAT_BAD_OP; - goto out_status; - } - - /* Allocate outbox */ - if (cmd->has_outbox) { - outbox = mlx4_alloc_cmd_mailbox(dev); - if (IS_ERR(outbox)) { - vhcr_cmd->status = CMD_STAT_BAD_SIZE; - outbox = NULL; - goto out_status; - } - } - - /* Execute the command! */ - if (cmd->wrapper) { - err = cmd->wrapper(dev, slave, vhcr, inbox, outbox, - cmd); - if (cmd->out_is_imm) - vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param); - } else { - in_param = cmd->has_inbox ? (u64) inbox->dma : - vhcr->in_param; - out_param = cmd->has_outbox ? (u64) outbox->dma : - vhcr->out_param; - err = __mlx4_cmd(dev, in_param, &out_param, - cmd->out_is_imm, vhcr->in_modifier, - vhcr->op_modifier, vhcr->op, - MLX4_CMD_TIME_CLASS_A, - MLX4_CMD_NATIVE); - - if (cmd->out_is_imm) { - vhcr->out_param = out_param; - vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param); - } - } - - if (err) { - mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with" - " error:%d, status %d\n", - vhcr->op, slave, vhcr->errno, err); - vhcr_cmd->status = mlx4_errno_to_status(err); - goto out_status; - } - - - /* Write outbox if command completed successfully */ - if (cmd->has_outbox && !vhcr_cmd->status) { - ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave, - vhcr->out_param, - MLX4_MAILBOX_SIZE, MLX4_CMD_WRAPPED); - if (ret) { - /* If we failed to write back the outbox after the - *command was successfully executed, we must fail this - * slave, as it is now in undefined state */ - mlx4_err(dev, "%s:Failed writing outbox\n", __func__); - goto out; - } - } - -out_status: - /* DMA back vhcr result */ - if (!in_vhcr) { - ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave, - priv->mfunc.master.slave_state[slave].vhcr_dma, - ALIGN(sizeof(struct mlx4_vhcr), - MLX4_ACCESS_MEM_ALIGN), - MLX4_CMD_WRAPPED); - if (ret) - mlx4_err(dev, "%s:Failed writing vhcr result\n", - __func__); - else if (vhcr->e_bit && - mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe)) - mlx4_warn(dev, "Failed to generate command completion " - "eqe for slave %d\n", slave); - } - -out: - kfree(vhcr); - mlx4_free_cmd_mailbox(dev, inbox); - mlx4_free_cmd_mailbox(dev, outbox); - return ret; -} - -static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd, - u16 param, u8 toggle) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; - u32 reply; - u32 slave_status = 0; - u8 is_going_down = 0; - - slave_state[slave].comm_toggle ^= 1; - reply = (u32) slave_state[slave].comm_toggle << 31; - if (toggle != slave_state[slave].comm_toggle) { - mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER" - "STATE COMPROMISIED ***\n", toggle, slave); - goto reset_slave; - } - if (cmd == MLX4_COMM_CMD_RESET) { - mlx4_warn(dev, "Received reset from slave:%d\n", slave); - slave_state[slave].active = false; - /*check if we are in the middle of FLR process, - if so return "retry" status to the slave*/ - if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) { - slave_status = MLX4_DELAY_RESET_SLAVE; - goto inform_slave_state; - } - - /* write the version in the event field */ - reply |= mlx4_comm_get_version(); - - goto reset_slave; - } - /*command from slave in the middle of FLR*/ - if (cmd != MLX4_COMM_CMD_RESET && - MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) { - mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) " - "in the middle of FLR\n", slave, cmd); - return; - } - - switch (cmd) { - case MLX4_COMM_CMD_VHCR0: - if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET) - goto reset_slave; - slave_state[slave].vhcr_dma = ((u64) param) << 48; - priv->mfunc.master.slave_state[slave].cookie = 0; - mutex_init(&priv->mfunc.master.gen_eqe_mutex[slave]); - break; - case MLX4_COMM_CMD_VHCR1: - if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0) - goto reset_slave; - slave_state[slave].vhcr_dma |= ((u64) param) << 32; - break; - case MLX4_COMM_CMD_VHCR2: - if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1) - goto reset_slave; - slave_state[slave].vhcr_dma |= ((u64) param) << 16; - break; - case MLX4_COMM_CMD_VHCR_EN: - if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2) - goto reset_slave; - slave_state[slave].vhcr_dma |= param; - slave_state[slave].active = true; - break; - case MLX4_COMM_CMD_VHCR_POST: - if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) && - (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST)) - goto reset_slave; - down(&priv->cmd.slave_sem); - if (mlx4_master_process_vhcr(dev, slave, NULL)) { - mlx4_err(dev, "Failed processing vhcr for slave:%d," - " reseting slave.\n", slave); - up(&priv->cmd.slave_sem); - goto reset_slave; - } - up(&priv->cmd.slave_sem); - break; - default: - mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave); - goto reset_slave; - } - spin_lock(&priv->mfunc.master.slave_state_lock); - if (!slave_state[slave].is_slave_going_down) - slave_state[slave].last_cmd = cmd; - else - is_going_down = 1; - spin_unlock(&priv->mfunc.master.slave_state_lock); - if (is_going_down) { - mlx4_warn(dev, "Slave is going down aborting command(%d)" - " executing from slave:%d\n", - cmd, slave); - return; - } - __raw_writel((__force u32) cpu_to_be32(reply), - &priv->mfunc.comm[slave].slave_read); - mmiowb(); - - return; - -reset_slave: - /* cleanup any slave resources */ - mlx4_delete_all_resources_for_slave(dev, slave); - spin_lock(&priv->mfunc.master.slave_state_lock); - if (!slave_state[slave].is_slave_going_down) - slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET; - spin_unlock(&priv->mfunc.master.slave_state_lock); - /*with slave in the middle of flr, no need to clean resources again.*/ -inform_slave_state: - memset(&slave_state[slave].event_eq, 0, - sizeof(struct mlx4_slave_event_eq_info)); - __raw_writel((__force u32) cpu_to_be32(reply), - &priv->mfunc.comm[slave].slave_read); - wmb(); -} - -/* master command processing */ -void mlx4_master_comm_channel(struct work_struct *work) -{ - struct mlx4_mfunc_master_ctx *master = - container_of(work, - struct mlx4_mfunc_master_ctx, - comm_work); - struct mlx4_mfunc *mfunc = - container_of(master, struct mlx4_mfunc, master); - struct mlx4_priv *priv = - container_of(mfunc, struct mlx4_priv, mfunc); - struct mlx4_dev *dev = &priv->dev; - __be32 *bit_vec; - u32 comm_cmd; - u32 vec; - int i, j, slave; - int toggle; - int served = 0; - int reported = 0; - u32 slt; - - bit_vec = master->comm_arm_bit_vector; - for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++) { - vec = be32_to_cpu(bit_vec[i]); - for (j = 0; j < 32; j++) { - if (!(vec & (1 << j))) - continue; - ++reported; - slave = (i * 32) + j; - comm_cmd = swab32(readl( - &mfunc->comm[slave].slave_write)); - slt = swab32(readl(&mfunc->comm[slave].slave_read)) - >> 31; - toggle = comm_cmd >> 31; - if (toggle != slt) { - if (master->slave_state[slave].comm_toggle - != slt) { - printk(KERN_INFO "slave %d out of sync." - " read toggle %d, state toggle %d. " - "Resynching.\n", slave, slt, - master->slave_state[slave].comm_toggle); - master->slave_state[slave].comm_toggle = - slt; - } - mlx4_master_do_cmd(dev, slave, - comm_cmd >> 16 & 0xff, - comm_cmd & 0xffff, toggle); - ++served; - } - } - } - - if (reported && reported != served) - mlx4_warn(dev, "Got command event with bitmask from %d slaves" - " but %d were served\n", - reported, served); - - if (mlx4_ARM_COMM_CHANNEL(dev)) - mlx4_warn(dev, "Failed to arm comm channel events\n"); -} - -static int sync_toggles(struct mlx4_dev *dev) + u16 op, unsigned long timeout) { - struct mlx4_priv *priv = mlx4_priv(dev); - int wr_toggle; - int rd_toggle; - unsigned long end; - - wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write)) >> 31; - end = jiffies + msecs_to_jiffies(5000); - - while (time_before(jiffies, end)) { - rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)) >> 31; - if (rd_toggle == wr_toggle) { - priv->cmd.comm_toggle = rd_toggle; - return 0; - } - - cond_resched(); - } - - /* - * we could reach here if for example the previous VM using this - * function misbehaved and left the channel with unsynced state. We - * should fix this here and give this VM a chance to use a properly - * synced channel - */ - mlx4_warn(dev, "recovering from previously mis-behaved VM\n"); - __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read); - __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write); - priv->cmd.comm_toggle = 0; - - return 0; -} - -int mlx4_multi_func_init(struct mlx4_dev *dev) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_slave_state *s_state; - int i, err, port; - - priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE, - &priv->mfunc.vhcr_dma, - GFP_KERNEL); - if (!priv->mfunc.vhcr) { - mlx4_err(dev, "Couldn't allocate vhcr.\n"); - return -ENOMEM; - } - - if (mlx4_is_master(dev)) - priv->mfunc.comm = - ioremap(pci_resource_start(dev->pdev, priv->fw.comm_bar) + - priv->fw.comm_base, MLX4_COMM_PAGESIZE); + if (mlx4_priv(dev)->cmd.use_events) + return mlx4_cmd_wait(dev, in_param, out_param, out_is_imm, + in_modifier, op_modifier, op, timeout); else - priv->mfunc.comm = - ioremap(pci_resource_start(dev->pdev, 2) + - MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE); - if (!priv->mfunc.comm) { - mlx4_err(dev, "Couldn't map communication vector.\n"); - goto err_vhcr; - } - - if (mlx4_is_master(dev)) { - priv->mfunc.master.slave_state = - kzalloc(dev->num_slaves * - sizeof(struct mlx4_slave_state), GFP_KERNEL); - if (!priv->mfunc.master.slave_state) - goto err_comm; - - for (i = 0; i < dev->num_slaves; ++i) { - s_state = &priv->mfunc.master.slave_state[i]; - s_state->last_cmd = MLX4_COMM_CMD_RESET; - __raw_writel((__force u32) 0, - &priv->mfunc.comm[i].slave_write); - __raw_writel((__force u32) 0, - &priv->mfunc.comm[i].slave_read); - mmiowb(); - for (port = 1; port <= MLX4_MAX_PORTS; port++) { - s_state->vlan_filter[port] = - kzalloc(sizeof(struct mlx4_vlan_fltr), - GFP_KERNEL); - if (!s_state->vlan_filter[port]) { - if (--port) - kfree(s_state->vlan_filter[port]); - goto err_slaves; - } - INIT_LIST_HEAD(&s_state->mcast_filters[port]); - } - spin_lock_init(&s_state->lock); - } - - memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe)); - priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD; - INIT_WORK(&priv->mfunc.master.comm_work, - mlx4_master_comm_channel); - INIT_WORK(&priv->mfunc.master.slave_event_work, - mlx4_gen_slave_eqe); - INIT_WORK(&priv->mfunc.master.slave_flr_event_work, - mlx4_master_handle_slave_flr); - spin_lock_init(&priv->mfunc.master.slave_state_lock); - priv->mfunc.master.comm_wq = - create_singlethread_workqueue("mlx4_comm"); - if (!priv->mfunc.master.comm_wq) - goto err_slaves; - - if (mlx4_init_resource_tracker(dev)) - goto err_thread; - - sema_init(&priv->cmd.slave_sem, 1); - err = mlx4_ARM_COMM_CHANNEL(dev); - if (err) { - mlx4_err(dev, " Failed to arm comm channel eq: %x\n", - err); - goto err_resource; - } - - } else { - err = sync_toggles(dev); - if (err) { - mlx4_err(dev, "Couldn't sync toggles\n"); - goto err_comm; - } - - sema_init(&priv->cmd.slave_sem, 1); - } - return 0; - -err_resource: - mlx4_free_resource_tracker(dev); -err_thread: - flush_workqueue(priv->mfunc.master.comm_wq); - destroy_workqueue(priv->mfunc.master.comm_wq); -err_slaves: - while (--i) { - for (port = 1; port <= MLX4_MAX_PORTS; port++) - kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]); - } - kfree(priv->mfunc.master.slave_state); -err_comm: - iounmap(priv->mfunc.comm); -err_vhcr: - dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, - priv->mfunc.vhcr, - priv->mfunc.vhcr_dma); - priv->mfunc.vhcr = NULL; - return -ENOMEM; + return mlx4_cmd_poll(dev, in_param, out_param, out_is_imm, + in_modifier, op_modifier, op, timeout); } +EXPORT_SYMBOL_GPL(__mlx4_cmd); int mlx4_cmd_init(struct mlx4_dev *dev) { @@ -1570,51 +331,22 @@ int mlx4_cmd_init(struct mlx4_dev *dev) priv->cmd.use_events = 0; priv->cmd.toggle = 1; - priv->cmd.hcr = NULL; - priv->mfunc.vhcr = NULL; - - if (!mlx4_is_slave(dev)) { - priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + - MLX4_HCR_BASE, MLX4_HCR_SIZE); - if (!priv->cmd.hcr) { - mlx4_err(dev, "Couldn't map command register.\n"); - return -ENOMEM; - } + priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_HCR_BASE, + MLX4_HCR_SIZE); + if (!priv->cmd.hcr) { + mlx4_err(dev, "Couldn't map command register."); + return -ENOMEM; } priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev, MLX4_MAILBOX_SIZE, MLX4_MAILBOX_SIZE, 0); - if (!priv->cmd.pool) - goto err_hcr; - - return 0; - -err_hcr: - if (!mlx4_is_slave(dev)) + if (!priv->cmd.pool) { iounmap(priv->cmd.hcr); - return -ENOMEM; -} - -void mlx4_multi_func_cleanup(struct mlx4_dev *dev) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - int i, port; - - if (mlx4_is_master(dev)) { - flush_workqueue(priv->mfunc.master.comm_wq); - destroy_workqueue(priv->mfunc.master.comm_wq); - for (i = 0; i < dev->num_slaves; i++) { - for (port = 1; port <= MLX4_MAX_PORTS; port++) - kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]); - } - kfree(priv->mfunc.master.slave_state); - iounmap(priv->mfunc.comm); - dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, - priv->mfunc.vhcr, - priv->mfunc.vhcr_dma); - priv->mfunc.vhcr = NULL; + return -ENOMEM; } + + return 0; } void mlx4_cmd_cleanup(struct mlx4_dev *dev) @@ -1622,9 +354,7 @@ void mlx4_cmd_cleanup(struct mlx4_dev *dev) struct mlx4_priv *priv = mlx4_priv(dev); pci_pool_destroy(priv->cmd.pool); - - if (!mlx4_is_slave(dev)) - iounmap(priv->cmd.hcr); + iounmap(priv->cmd.hcr); } /* @@ -1635,7 +365,6 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); int i; - int err = 0; priv->cmd.context = kmalloc(priv->cmd.max_cmds * sizeof (struct mlx4_cmd_context), @@ -1660,10 +389,11 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev) ; /* nothing */ --priv->cmd.token_mask; - down(&priv->cmd.poll_sem); priv->cmd.use_events = 1; - return err; + down(&priv->cmd.poll_sem); + + return 0; } /* @@ -1703,8 +433,7 @@ struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev) } EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox); -void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, - struct mlx4_cmd_mailbox *mailbox) +void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox) { if (!mailbox) return; @@ -1713,8 +442,3 @@ void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, kfree(mailbox); } EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox); - -u32 mlx4_comm_get_version(void) -{ - return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER; -} diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/cq.c b/trunk/drivers/net/ethernet/mellanox/mlx4/cq.c index 475f9d6af955..499a5168892a 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/cq.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/cq.c @@ -34,9 +34,9 @@ * SOFTWARE. */ -#include #include #include +#include #include #include @@ -44,6 +44,27 @@ #include "mlx4.h" #include "icm.h" +struct mlx4_cq_context { + __be32 flags; + u16 reserved1[3]; + __be16 page_offset; + __be32 logsize_usrpage; + __be16 cq_period; + __be16 cq_max_count; + u8 reserved2[3]; + u8 comp_eqn; + u8 log_page_size; + u8 reserved3[2]; + u8 mtt_base_addr_h; + __be32 mtt_base_addr_l; + __be32 last_notified_index; + __be32 solicit_producer_index; + __be32 consumer_index; + __be32 producer_index; + u32 reserved4[2]; + __be64 db_rec_addr; +}; + #define MLX4_CQ_STATUS_OK ( 0 << 28) #define MLX4_CQ_STATUS_OVERFLOW ( 9 << 28) #define MLX4_CQ_STATUS_WRITE_FAIL (10 << 28) @@ -60,7 +81,7 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn) cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree, cqn & (dev->caps.num_cqs - 1)); if (!cq) { - mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn); + mlx4_warn(dev, "Completion event for bogus CQ %08x\n", cqn); return; } @@ -96,24 +117,23 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type) static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, int cq_num) { - return mlx4_cmd(dev, mailbox->dma | dev->caps.function, cq_num, 0, - MLX4_CMD_SW2HW_CQ, MLX4_CMD_TIME_CLASS_A, - MLX4_CMD_WRAPPED); + return mlx4_cmd(dev, mailbox->dma, cq_num, 0, MLX4_CMD_SW2HW_CQ, + MLX4_CMD_TIME_CLASS_A); } static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, int cq_num, u32 opmod) { return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_MODIFY_CQ, - MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + MLX4_CMD_TIME_CLASS_A); } static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, int cq_num) { - return mlx4_cmd_box(dev, dev->caps.function, mailbox ? mailbox->dma : 0, - cq_num, mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ, - MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, cq_num, + mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ, + MLX4_CMD_TIME_CLASS_A); } int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq, @@ -168,78 +188,6 @@ int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq, } EXPORT_SYMBOL_GPL(mlx4_cq_resize); -int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_cq_table *cq_table = &priv->cq_table; - int err; - - *cqn = mlx4_bitmap_alloc(&cq_table->bitmap); - if (*cqn == -1) - return -ENOMEM; - - err = mlx4_table_get(dev, &cq_table->table, *cqn); - if (err) - goto err_out; - - err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn); - if (err) - goto err_put; - return 0; - -err_put: - mlx4_table_put(dev, &cq_table->table, *cqn); - -err_out: - mlx4_bitmap_free(&cq_table->bitmap, *cqn); - return err; -} - -static int mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn) -{ - u64 out_param; - int err; - - if (mlx4_is_mfunc(dev)) { - err = mlx4_cmd_imm(dev, 0, &out_param, RES_CQ, - RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES, - MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); - if (err) - return err; - else { - *cqn = get_param_l(&out_param); - return 0; - } - } - return __mlx4_cq_alloc_icm(dev, cqn); -} - -void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_cq_table *cq_table = &priv->cq_table; - - mlx4_table_put(dev, &cq_table->cmpt_table, cqn); - mlx4_table_put(dev, &cq_table->table, cqn); - mlx4_bitmap_free(&cq_table->bitmap, cqn); -} - -static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn) -{ - u64 in_param; - int err; - - if (mlx4_is_mfunc(dev)) { - set_param_l(&in_param, cqn); - err = mlx4_cmd(dev, in_param, RES_CQ, RES_OP_RESERVE_AND_MAP, - MLX4_CMD_FREE_RES, - MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); - if (err) - mlx4_warn(dev, "Failed freeing cq:%d\n", cqn); - } else - __mlx4_cq_free_icm(dev, cqn); -} - int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, unsigned vector, int collapsed) @@ -256,15 +204,23 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, cq->vector = vector; - err = mlx4_cq_alloc_icm(dev, &cq->cqn); + cq->cqn = mlx4_bitmap_alloc(&cq_table->bitmap); + if (cq->cqn == -1) + return -ENOMEM; + + err = mlx4_table_get(dev, &cq_table->table, cq->cqn); if (err) - return err; + goto err_out; + + err = mlx4_table_get(dev, &cq_table->cmpt_table, cq->cqn); + if (err) + goto err_put; spin_lock_irq(&cq_table->lock); err = radix_tree_insert(&cq_table->tree, cq->cqn, cq); spin_unlock_irq(&cq_table->lock); if (err) - goto err_icm; + goto err_cmpt_put; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) { @@ -303,8 +259,14 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, radix_tree_delete(&cq_table->tree, cq->cqn); spin_unlock_irq(&cq_table->lock); -err_icm: - mlx4_cq_free_icm(dev, cq->cqn); +err_cmpt_put: + mlx4_table_put(dev, &cq_table->cmpt_table, cq->cqn); + +err_put: + mlx4_table_put(dev, &cq_table->table, cq->cqn); + +err_out: + mlx4_bitmap_free(&cq_table->bitmap, cq->cqn); return err; } @@ -330,7 +292,8 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq) complete(&cq->free); wait_for_completion(&cq->free); - mlx4_cq_free_icm(dev, cq->cqn); + mlx4_table_put(dev, &cq_table->table, cq->cqn); + mlx4_bitmap_free(&cq_table->bitmap, cq->cqn); } EXPORT_SYMBOL_GPL(mlx4_cq_free); @@ -341,8 +304,6 @@ int mlx4_init_cq_table(struct mlx4_dev *dev) spin_lock_init(&cq_table->lock); INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC); - if (mlx4_is_slave(dev)) - return 0; err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs, dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0); @@ -354,8 +315,6 @@ int mlx4_init_cq_table(struct mlx4_dev *dev) void mlx4_cleanup_cq_table(struct mlx4_dev *dev) { - if (mlx4_is_slave(dev)) - return; /* Nothing to do to clean up radix_tree */ mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap); } diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/trunk/drivers/net/ethernet/mellanox/mlx4/en_cq.c index 00b81272e314..227997d775e8 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/en_cq.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/en_cq.c @@ -51,7 +51,10 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv, int err; cq->size = entries; - cq->buf_size = cq->size * sizeof(struct mlx4_cqe); + if (mode == RX) + cq->buf_size = cq->size * sizeof(struct mlx4_cqe); + else + cq->buf_size = sizeof(struct mlx4_cqe); cq->ring = ring; cq->is_tx = mode; @@ -117,7 +120,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, cq->size = priv->rx_ring[cq->ring].actual_size; err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar, - cq->wqres.db.dma, &cq->mcq, cq->vector, 0); + cq->wqres.db.dma, &cq->mcq, cq->vector, cq->is_tx); if (err) return err; @@ -144,7 +147,6 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); if (priv->mdev->dev->caps.comp_pool && cq->vector) mlx4_release_eq(priv->mdev->dev, cq->vector); - cq->vector = 0; cq->buf_size = 0; cq->buf = NULL; } diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/trunk/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 7dbc6a230779..74e2a2a8a02b 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -45,16 +45,13 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; - strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", - sizeof(drvinfo->version)); - snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), - "%d.%d.%d", + strncpy(drvinfo->driver, DRV_NAME, 32); + strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32); + sprintf(drvinfo->fw_version, "%d.%d.%d", (u16) (mdev->dev->caps.fw_ver >> 32), (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff), (u16) (mdev->dev->caps.fw_ver & 0xffff)); - strlcpy(drvinfo->bus_info, pci_name(mdev->dev->pdev), - sizeof(drvinfo->bus_info)); + strncpy(drvinfo->bus_info, pci_name(mdev->dev->pdev), 32); drvinfo->n_stats = 0; drvinfo->regdump_len = 0; drvinfo->eedump_len = 0; @@ -106,17 +103,8 @@ static void mlx4_en_get_wol(struct net_device *netdev, struct mlx4_en_priv *priv = netdev_priv(netdev); int err = 0; u64 config = 0; - u64 mask; - if ((priv->port < 1) || (priv->port > 2)) { - en_err(priv, "Failed to get WoL information\n"); - return; - } - - mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 : - MLX4_DEV_CAP_FLAG_WOL_PORT2; - - if (!(priv->mdev->dev->caps.flags & mask)) { + if (!(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_WOL)) { wol->supported = 0; wol->wolopts = 0; return; @@ -145,15 +133,8 @@ static int mlx4_en_set_wol(struct net_device *netdev, struct mlx4_en_priv *priv = netdev_priv(netdev); u64 config = 0; int err = 0; - u64 mask; - - if ((priv->port < 1) || (priv->port > 2)) - return -EOPNOTSUPP; - - mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 : - MLX4_DEV_CAP_FLAG_WOL_PORT2; - if (!(priv->mdev->dev->caps.flags & mask)) + if (!(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_WOL)) return -EOPNOTSUPP; if (wol->supported & ~WAKE_MAGIC) diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/trunk/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 72fa807b69ce..78d776bc355c 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -45,7 +45,7 @@ #include "mlx4_en.h" #include "en_port.h" -static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) +static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; @@ -67,10 +67,9 @@ static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) en_err(priv, "failed adding vlan %d\n", vid); mutex_unlock(&mdev->state_lock); - return 0; } -static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) +static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; @@ -94,8 +93,6 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) en_err(priv, "Failed configuring VLAN filter\n"); } mutex_unlock(&mdev->state_lock); - - return 0; } u64 mlx4_en_mac_to_u64(u8 *addr) @@ -136,7 +133,7 @@ static void mlx4_en_do_set_mac(struct work_struct *work) if (priv->port_up) { /* Remove old MAC and insert the new one */ err = mlx4_replace_mac(mdev->dev, priv->port, - priv->base_qpn, priv->mac); + priv->base_qpn, priv->mac, 0); if (err) en_err(priv, "Failed changing HW MAC address\n"); } else @@ -151,7 +148,6 @@ static void mlx4_en_clear_list(struct net_device *dev) struct mlx4_en_priv *priv = netdev_priv(dev); kfree(priv->mc_addrs); - priv->mc_addrs = NULL; priv->mc_addrs_cnt = 0; } @@ -171,7 +167,6 @@ static void mlx4_en_cache_mclist(struct net_device *dev) i = 0; netdev_for_each_mc_addr(ha, dev) memcpy(mc_addrs + i++ * ETH_ALEN, ha->addr, ETH_ALEN); - mlx4_en_clear_list(dev); priv->mc_addrs = mc_addrs; priv->mc_addrs_cnt = mc_addrs_cnt; } @@ -209,16 +204,6 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) goto out; } - if (!netif_carrier_ok(dev)) { - if (!mlx4_en_QUERY_PORT(mdev, priv->port)) { - if (priv->port_state.link_state) { - priv->last_link_state = MLX4_DEV_EVENT_PORT_UP; - netif_carrier_on(dev); - en_dbg(LINK, priv, "Link Up\n"); - } - } - } - /* * Promsicuous mode: disable all filters */ @@ -614,12 +599,12 @@ int mlx4_en_start_port(struct net_device *dev) ++rx_index; } - /* Set qp number */ - en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port); - err = mlx4_get_eth_qp(mdev->dev, priv->port, - priv->mac, &priv->base_qpn); + /* Set port mac number */ + en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port); + err = mlx4_register_mac(mdev->dev, priv->port, + priv->mac, &priv->base_qpn, 0); if (err) { - en_err(priv, "Failed getting eth qp\n"); + en_err(priv, "Failed setting port mac\n"); goto cq_err; } mdev->mac_removed[priv->port] = 0; @@ -714,7 +699,7 @@ int mlx4_en_start_port(struct net_device *dev) mlx4_en_release_rss_steer(priv); mac_err: - mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn); + mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn); cq_err: while (rx_index--) mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]); @@ -760,6 +745,10 @@ void mlx4_en_stop_port(struct net_device *dev) /* Flush multicast filter */ mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); + /* Unregister Mac address for the port */ + mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn); + mdev->mac_removed[priv->port] = 1; + /* Free TX Rings */ for (i = 0; i < priv->tx_ring_num; i++) { mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]); @@ -773,10 +762,6 @@ void mlx4_en_stop_port(struct net_device *dev) /* Free RSS qps */ mlx4_en_release_rss_steer(priv); - /* Unregister Mac address for the port */ - mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn); - mdev->mac_removed[priv->port] = 1; - /* Free RX Rings */ for (i = 0; i < priv->rx_ring_num; i++) { mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); @@ -989,21 +974,6 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) return 0; } -static int mlx4_en_set_features(struct net_device *netdev, - netdev_features_t features) -{ - struct mlx4_en_priv *priv = netdev_priv(netdev); - - if (features & NETIF_F_LOOPBACK) - priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); - else - priv->ctrl_flags &= - cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK); - - return 0; - -} - static const struct net_device_ops mlx4_netdev_ops = { .ndo_open = mlx4_en_open, .ndo_stop = mlx4_en_close, @@ -1020,7 +990,6 @@ static const struct net_device_ops mlx4_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = mlx4_en_netpoll, #endif - .ndo_set_features = mlx4_en_set_features, }; int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, @@ -1053,8 +1022,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, priv->port = port; priv->port_up = false; priv->flags = prof->flags; - priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | - MLX4_WQE_CTRL_SOLICITED); priv->tx_ring_num = prof->tx_ring_num; priv->rx_ring_num = prof->rx_ring_num; priv->mac_index = -1; @@ -1121,7 +1088,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, dev->features = dev->hw_features | NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; - dev->hw_features |= NETIF_F_LOOPBACK; mdev->pndev[port] = dev; diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/en_port.c b/trunk/drivers/net/ethernet/mellanox/mlx4/en_port.c index 331791467a22..03c84cd78cde 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/en_port.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/en_port.c @@ -41,6 +41,13 @@ #include "mlx4_en.h" +int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, + u64 mac, u64 clear, u8 mode) +{ + return mlx4_cmd(dev, (mac | (clear << 63)), port, mode, + MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B); +} + int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv) { struct mlx4_cmd_mailbox *mailbox; @@ -65,7 +72,76 @@ int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv) filter->entry[i] = cpu_to_be32(entry); } err = mlx4_cmd(dev, mailbox->dma, priv->port, 0, MLX4_CMD_SET_VLAN_FLTR, - MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); + MLX4_CMD_TIME_CLASS_B); + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + + +int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu, + u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_set_port_general_context *context; + int err; + u32 in_mod; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + context = mailbox->buf; + memset(context, 0, sizeof *context); + + context->flags = SET_PORT_GEN_ALL_VALID; + context->mtu = cpu_to_be16(mtu); + context->pptx = (pptx * (!pfctx)) << 7; + context->pfctx = pfctx; + context->pprx = (pprx * (!pfcrx)) << 7; + context->pfcrx = pfcrx; + + in_mod = MLX4_SET_PORT_GENERAL << 8 | port; + err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, + MLX4_CMD_TIME_CLASS_B); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, + u8 promisc) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_set_port_rqp_calc_context *context; + int err; + u32 in_mod; + u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ? + MCAST_DIRECT : MCAST_DEFAULT; + + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER && + dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) + return 0; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + context = mailbox->buf; + memset(context, 0, sizeof *context); + + context->base_qpn = cpu_to_be32(base_qpn); + context->n_mac = dev->caps.log_num_macs; + context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | + base_qpn); + context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT | + base_qpn); + context->intra_no_vlan = 0; + context->no_vlan = MLX4_NO_VLAN_IDX; + context->intra_vlan_miss = 0; + context->vlan_miss = MLX4_VLAN_MISS_IDX; + + in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port; + err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, + MLX4_CMD_TIME_CLASS_B); + mlx4_free_cmd_mailbox(dev, mailbox); return err; } @@ -83,8 +159,7 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port) return PTR_ERR(mailbox); memset(mailbox->buf, 0, sizeof(*qport_context)); err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0, - MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, - MLX4_CMD_WRAPPED); + MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B); if (err) goto out; qport_context = mailbox->buf; @@ -129,8 +204,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) return PTR_ERR(mailbox); memset(mailbox->buf, 0, sizeof(*mlx4_en_stats)); err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0, - MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B, - MLX4_CMD_WRAPPED); + MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B); if (err) goto out; diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/en_port.h b/trunk/drivers/net/ethernet/mellanox/mlx4/en_port.h index 6934fd7e66ed..19eb244f5165 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/en_port.h +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/en_port.h @@ -39,6 +39,49 @@ #define SET_PORT_PROMISC_SHIFT 31 #define SET_PORT_MC_PROMISC_SHIFT 30 +enum { + MLX4_CMD_SET_VLAN_FLTR = 0x47, + MLX4_CMD_SET_MCAST_FLTR = 0x48, + MLX4_CMD_DUMP_ETH_STATS = 0x49, +}; + +enum { + MCAST_DIRECT_ONLY = 0, + MCAST_DIRECT = 1, + MCAST_DEFAULT = 2 +}; + +struct mlx4_set_port_general_context { + u8 reserved[3]; + u8 flags; + u16 reserved2; + __be16 mtu; + u8 pptx; + u8 pfctx; + u16 reserved3; + u8 pprx; + u8 pfcrx; + u16 reserved4; +}; + +struct mlx4_set_port_rqp_calc_context { + __be32 base_qpn; + u8 rererved; + u8 n_mac; + u8 n_vlan; + u8 n_prio; + u8 reserved2[3]; + u8 mac_miss; + u8 intra_no_vlan; + u8 no_vlan; + u8 intra_vlan_miss; + u8 vlan_miss; + u8 reserved3[3]; + u8 no_vlan_prio; + __be32 promisc; + __be32 mcast; +}; + #define VLAN_FLTR_SIZE 128 struct mlx4_set_vlan_fltr_mbox { __be32 entry[VLAN_FLTR_SIZE]; diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/trunk/drivers/net/ethernet/mellanox/mlx4/en_resources.c index bcbc54c16947..0dfb4ec8a9dd 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/en_resources.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/en_resources.c @@ -44,7 +44,7 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, struct mlx4_en_dev *mdev = priv->mdev; memset(context, 0, sizeof *context); - context->flags = cpu_to_be32(7 << 16 | rss << MLX4_RSS_QPC_FLAG_OFFSET); + context->flags = cpu_to_be32(7 << 16 | rss << 13); context->pd = cpu_to_be32(mdev->priv_pdn); context->mtu_msgmax = 0xff; if (!is_tx && !rss) diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/trunk/drivers/net/ethernet/mellanox/mlx4/en_rx.c index e8d6ad2dce0a..c2df6c358603 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -541,8 +541,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud unsigned int length; int polled = 0; int ip_summed; - struct ethhdr *ethh; - u64 s_mac; if (!priv->port_up) return 0; @@ -579,19 +577,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud goto next; } - /* Get pointer to first fragment since we haven't skb yet and - * cast it to ethhdr struct */ - ethh = (struct ethhdr *)(page_address(skb_frags[0].page) + - skb_frags[0].offset); - s_mac = mlx4_en_mac_to_u64(ethh->h_source); - - /* If source MAC is equal to our own MAC and not performing - * the selftest or flb disabled - drop the packet */ - if (s_mac == priv->mac && - (!(dev->features & NETIF_F_LOOPBACK) || - !priv->validate_loopback)) - goto next; - /* * Packet is OK - process it. */ @@ -852,10 +837,9 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_rss_map *rss_map = &priv->rss_map; struct mlx4_qp_context context; - struct mlx4_rss_context *rss_context; + struct mlx4_en_rss_context *rss_context; void *ptr; - u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 | - MLX4_RSS_TCP_IPV6); + u8 rss_mask = 0x3f; int i, qpn; int err = 0; int good_qps = 0; @@ -893,21 +877,18 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, priv->rx_ring[0].cqn, &context); - ptr = ((void *) &context) + offsetof(struct mlx4_qp_context, pri_path) - + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH; + ptr = ((void *) &context) + 0x3c; rss_context = ptr; rss_context->base_qpn = cpu_to_be32(ilog2(priv->rx_ring_num) << 24 | (rss_map->base_qpn)); rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn); - if (priv->mdev->profile.udp_rss) { - rss_mask |= MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6; - rss_context->base_qpn_udp = rss_context->default_qpn; - } rss_context->flags = rss_mask; - rss_context->hash_fn = MLX4_RSS_HASH_TOP; + rss_context->hash_fn = 1; for (i = 0; i < 10; i++) rss_context->rss_key[i] = rsskey[i]; + if (priv->mdev->profile.udp_rss) + rss_context->base_qpn_udp = rss_context->default_qpn; err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context, &rss_map->indir_qp, &rss_map->indir_state); if (err) diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/trunk/drivers/net/ethernet/mellanox/mlx4/en_selftest.c index bf2e5d3f177c..9fdbcecd499d 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/en_selftest.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/en_selftest.c @@ -43,7 +43,7 @@ static int mlx4_en_test_registers(struct mlx4_en_priv *priv) { return mlx4_cmd(priv->mdev->dev, 0, 0, 0, MLX4_CMD_HW_HEALTH_CHECK, - MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + MLX4_CMD_TIME_CLASS_A); } static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv) diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/trunk/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 9ef9038d0629..d901b4267537 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -307,60 +307,59 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) return cnt; } + static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_cq *mcq = &cq->mcq; struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; - struct mlx4_cqe *cqe; + struct mlx4_cqe *cqe = cq->buf; u16 index; - u16 new_index, ring_index; + u16 new_index; u32 txbbs_skipped = 0; - u32 cons_index = mcq->cons_index; - int size = cq->size; - u32 size_mask = ring->size_mask; - struct mlx4_cqe *buf = cq->buf; + u32 cq_last_sav; - if (!priv->port_up) + /* index always points to the first TXBB of the last polled descriptor */ + index = ring->cons & ring->size_mask; + new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask; + if (index == new_index) return; - index = cons_index & size_mask; - cqe = &buf[index]; - ring_index = ring->cons & size_mask; - - /* Process all completed CQEs */ - while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, - cons_index & size)) { - /* - * make sure we read the CQE after we read the - * ownership bit - */ - rmb(); - - /* Skip over last polled CQE */ - new_index = be16_to_cpu(cqe->wqe_index) & size_mask; + if (!priv->port_up) + return; + /* + * We use a two-stage loop: + * - the first samples the HW-updated CQE + * - the second frees TXBBs until the last sample + * This lets us amortize CQE cache misses, while still polling the CQ + * until is quiescent. + */ + cq_last_sav = mcq->cons_index; + do { do { + /* Skip over last polled CQE */ + index = (index + ring->last_nr_txbb) & ring->size_mask; txbbs_skipped += ring->last_nr_txbb; - ring_index = (ring_index + ring->last_nr_txbb) & size_mask; - /* free next descriptor */ + + /* Poll next CQE */ ring->last_nr_txbb = mlx4_en_free_tx_desc( - priv, ring, ring_index, - !!((ring->cons + txbbs_skipped) & - ring->size)); - } while (ring_index != new_index); - - ++cons_index; - index = cons_index & size_mask; - cqe = &buf[index]; - } + priv, ring, index, + !!((ring->cons + txbbs_skipped) & + ring->size)); + ++mcq->cons_index; + + } while (index != new_index); + new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask; + } while (index != new_index); + AVG_PERF_COUNTER(priv->pstats.tx_coal_avg, + (u32) (mcq->cons_index - cq_last_sav)); /* * To prevent CQ overflow we first update CQ consumer and only then * the ring consumer. */ - mcq->cons_index = cons_index; mlx4_cq_set_ci(mcq); wmb(); ring->cons += txbbs_skipped; @@ -566,8 +565,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc)); } tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag); - tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * - (!!vlan_tx_tag_present(skb)); + tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!(*vlan_tag); tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f; } @@ -678,25 +676,27 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) /* Prepare ctrl segement apart opcode+ownership, which depends on * whether LSO is used */ tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag); - tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * - !!vlan_tx_tag_present(skb); + tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!vlan_tag; tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f; - tx_desc->ctrl.srcrb_flags = priv->ctrl_flags; + tx_desc->ctrl.srcrb_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | + MLX4_WQE_CTRL_SOLICITED); if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM | MLX4_WQE_CTRL_TCP_UDP_CSUM); ring->tx_csum++; } - /* Copy dst mac address to wqe */ - skb_reset_mac_header(skb); - ethh = eth_hdr(skb); - if (ethh && ethh->h_dest) { - mac = mlx4_en_mac_to_u64(ethh->h_dest); - mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16); - mac_l = (u32) (mac & 0xffffffff); - tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h); - tx_desc->ctrl.imm = cpu_to_be32(mac_l); + if (unlikely(priv->validate_loopback)) { + /* Copy dst mac address to wqe */ + skb_reset_mac_header(skb); + ethh = eth_hdr(skb); + if (ethh && ethh->h_dest) { + mac = mlx4_en_mac_to_u64(ethh->h_dest); + mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16); + mac_l = (u32) (mac & 0xffffffff); + tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h); + tx_desc->ctrl.imm = cpu_to_be32(mac_l); + } } /* Handle LSO (TSO) packets */ diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/eq.c b/trunk/drivers/net/ethernet/mellanox/mlx4/eq.c index 1e9b55eb7217..24ee96775996 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -31,7 +31,6 @@ * SOFTWARE. */ -#include #include #include #include @@ -53,6 +52,30 @@ enum { MLX4_EQ_ENTRY_SIZE = 0x20 }; +/* + * Must be packed because start is 64 bits but only aligned to 32 bits. + */ +struct mlx4_eq_context { + __be32 flags; + u16 reserved1[3]; + __be16 page_offset; + u8 log_eq_size; + u8 reserved2[4]; + u8 eq_period; + u8 reserved3; + u8 eq_max_count; + u8 reserved4[3]; + u8 intr; + u8 log_page_size; + u8 reserved5[2]; + u8 mtt_base_addr_h; + __be32 mtt_base_addr_l; + u32 reserved6[2]; + __be32 consumer_index; + __be32 producer_index; + u32 reserved7[4]; +}; + #define MLX4_EQ_STATUS_OK ( 0 << 28) #define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28) #define MLX4_EQ_OWNER_SW ( 0 << 24) @@ -77,9 +100,46 @@ enum { (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \ (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \ (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \ - (1ull << MLX4_EVENT_TYPE_CMD) | \ - (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \ - (1ull << MLX4_EVENT_TYPE_FLR_EVENT)) + (1ull << MLX4_EVENT_TYPE_CMD)) + +struct mlx4_eqe { + u8 reserved1; + u8 type; + u8 reserved2; + u8 subtype; + union { + u32 raw[6]; + struct { + __be32 cqn; + } __packed comp; + struct { + u16 reserved1; + __be16 token; + u32 reserved2; + u8 reserved3[3]; + u8 status; + __be64 out_param; + } __packed cmd; + struct { + __be32 qpn; + } __packed qp; + struct { + __be32 srqn; + } __packed srq; + struct { + __be32 cqn; + u32 reserved1; + u8 reserved2[3]; + u8 syndrome; + } __packed cq_err; + struct { + u32 reserved1[2]; + __be32 port; + } __packed port_change; + } event; + u8 reserved3[3]; + u8 owner; +} __packed; static void eq_set_ci(struct mlx4_eq *eq, int req_not) { @@ -102,144 +162,13 @@ static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq) return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe; } -static struct mlx4_eqe *next_slave_event_eqe(struct mlx4_slave_event_eq *slave_eq) -{ - struct mlx4_eqe *eqe = - &slave_eq->event_eqe[slave_eq->cons & (SLAVE_EVENT_EQ_SIZE - 1)]; - return (!!(eqe->owner & 0x80) ^ - !!(slave_eq->cons & SLAVE_EVENT_EQ_SIZE)) ? - eqe : NULL; -} - -void mlx4_gen_slave_eqe(struct work_struct *work) -{ - struct mlx4_mfunc_master_ctx *master = - container_of(work, struct mlx4_mfunc_master_ctx, - slave_event_work); - struct mlx4_mfunc *mfunc = - container_of(master, struct mlx4_mfunc, master); - struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc); - struct mlx4_dev *dev = &priv->dev; - struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq; - struct mlx4_eqe *eqe; - u8 slave; - int i; - - for (eqe = next_slave_event_eqe(slave_eq); eqe; - eqe = next_slave_event_eqe(slave_eq)) { - slave = eqe->slave_id; - - /* All active slaves need to receive the event */ - if (slave == ALL_SLAVES) { - for (i = 0; i < dev->num_slaves; i++) { - if (i != dev->caps.function && - master->slave_state[i].active) - if (mlx4_GEN_EQE(dev, i, eqe)) - mlx4_warn(dev, "Failed to " - " generate event " - "for slave %d\n", i); - } - } else { - if (mlx4_GEN_EQE(dev, slave, eqe)) - mlx4_warn(dev, "Failed to generate event " - "for slave %d\n", slave); - } - ++slave_eq->cons; - } -} - - -static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq; - struct mlx4_eqe *s_eqe = - &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)]; - - if ((!!(s_eqe->owner & 0x80)) ^ - (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) { - mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. " - "No free EQE on slave events queue\n", slave); - return; - } - - memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1); - s_eqe->slave_id = slave; - /* ensure all information is written before setting the ownersip bit */ - wmb(); - s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80; - ++slave_eq->prod; - - queue_work(priv->mfunc.master.comm_wq, - &priv->mfunc.master.slave_event_work); -} - -static void mlx4_slave_event(struct mlx4_dev *dev, int slave, - struct mlx4_eqe *eqe) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_slave_state *s_slave = - &priv->mfunc.master.slave_state[slave]; - - if (!s_slave->active) { - /*mlx4_warn(dev, "Trying to pass event to inactive slave\n");*/ - return; - } - - slave_event(dev, slave, eqe); -} - -void mlx4_master_handle_slave_flr(struct work_struct *work) -{ - struct mlx4_mfunc_master_ctx *master = - container_of(work, struct mlx4_mfunc_master_ctx, - slave_flr_event_work); - struct mlx4_mfunc *mfunc = - container_of(master, struct mlx4_mfunc, master); - struct mlx4_priv *priv = - container_of(mfunc, struct mlx4_priv, mfunc); - struct mlx4_dev *dev = &priv->dev; - struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; - int i; - int err; - - mlx4_dbg(dev, "mlx4_handle_slave_flr\n"); - - for (i = 0 ; i < dev->num_slaves; i++) { - - if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) { - mlx4_dbg(dev, "mlx4_handle_slave_flr: " - "clean slave: %d\n", i); - - mlx4_delete_all_resources_for_slave(dev, i); - /*return the slave to running mode*/ - spin_lock(&priv->mfunc.master.slave_state_lock); - slave_state[i].last_cmd = MLX4_COMM_CMD_RESET; - slave_state[i].is_slave_going_down = 0; - spin_unlock(&priv->mfunc.master.slave_state_lock); - /*notify the FW:*/ - err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE, - MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); - if (err) - mlx4_warn(dev, "Failed to notify FW on " - "FLR done (slave:%d)\n", i); - } - } -} - static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) { - struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_eqe *eqe; int cqn; int eqes_found = 0; int set_ci = 0; int port; - int slave = 0; - int ret; - u32 flr_slave; - u8 update_slave_state; - int i; while ((eqe = next_eqe_sw(eq))) { /* @@ -262,68 +191,14 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) case MLX4_EVENT_TYPE_PATH_MIG_FAILED: case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR: case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR: - mlx4_dbg(dev, "event %d arrived\n", eqe->type); - if (mlx4_is_master(dev)) { - /* forward only to slave owning the QP */ - ret = mlx4_get_slave_from_resource_id(dev, - RES_QP, - be32_to_cpu(eqe->event.qp.qpn) - & 0xffffff, &slave); - if (ret && ret != -ENOENT) { - mlx4_dbg(dev, "QP event %02x(%02x) on " - "EQ %d at index %u: could " - "not get slave id (%d)\n", - eqe->type, eqe->subtype, - eq->eqn, eq->cons_index, ret); - break; - } - - if (!ret && slave != dev->caps.function) { - mlx4_slave_event(dev, slave, eqe); - break; - } - - } - mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & - 0xffffff, eqe->type); + mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, + eqe->type); break; case MLX4_EVENT_TYPE_SRQ_LIMIT: - mlx4_warn(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n", - __func__); case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: - if (mlx4_is_master(dev)) { - /* forward only to slave owning the SRQ */ - ret = mlx4_get_slave_from_resource_id(dev, - RES_SRQ, - be32_to_cpu(eqe->event.srq.srqn) - & 0xffffff, - &slave); - if (ret && ret != -ENOENT) { - mlx4_warn(dev, "SRQ event %02x(%02x) " - "on EQ %d at index %u: could" - " not get slave id (%d)\n", - eqe->type, eqe->subtype, - eq->eqn, eq->cons_index, ret); - break; - } - mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x," - " event: %02x(%02x)\n", __func__, - slave, - be32_to_cpu(eqe->event.srq.srqn), - eqe->type, eqe->subtype); - - if (!ret && slave != dev->caps.function) { - mlx4_warn(dev, "%s: sending event " - "%02x(%02x) to slave:%d\n", - __func__, eqe->type, - eqe->subtype, slave); - mlx4_slave_event(dev, slave, eqe); - break; - } - } - mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & - 0xffffff, eqe->type); + mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff, + eqe->type); break; case MLX4_EVENT_TYPE_CMD: @@ -336,35 +211,13 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) case MLX4_EVENT_TYPE_PORT_CHANGE: port = be32_to_cpu(eqe->event.port_change.port) >> 28; if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) { - mlx4_dispatch_event(dev, - MLX4_DEV_EVENT_PORT_DOWN, + mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN, port); mlx4_priv(dev)->sense.do_sense_port[port] = 1; - if (mlx4_is_master(dev)) - /*change the state of all slave's port - * to down:*/ - for (i = 0; i < dev->num_slaves; i++) { - mlx4_dbg(dev, "%s: Sending " - "MLX4_PORT_CHANGE_SUBTYPE_DOWN" - " to slave: %d, port:%d\n", - __func__, i, port); - if (i == dev->caps.function) - continue; - mlx4_slave_event(dev, i, eqe); - } } else { - mlx4_dispatch_event(dev, - MLX4_DEV_EVENT_PORT_UP, + mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP, port); mlx4_priv(dev)->sense.do_sense_port[port] = 0; - - if (mlx4_is_master(dev)) { - for (i = 0; i < dev->num_slaves; i++) { - if (i == dev->caps.function) - continue; - mlx4_slave_event(dev, i, eqe); - } - } } break; @@ -373,28 +226,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) eqe->event.cq_err.syndrome == 1 ? "overrun" : "access violation", be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff); - if (mlx4_is_master(dev)) { - ret = mlx4_get_slave_from_resource_id(dev, - RES_CQ, - be32_to_cpu(eqe->event.cq_err.cqn) - & 0xffffff, &slave); - if (ret && ret != -ENOENT) { - mlx4_dbg(dev, "CQ event %02x(%02x) on " - "EQ %d at index %u: could " - "not get slave id (%d)\n", - eqe->type, eqe->subtype, - eq->eqn, eq->cons_index, ret); - break; - } - - if (!ret && slave != dev->caps.function) { - mlx4_slave_event(dev, slave, eqe); - break; - } - } - mlx4_cq_event(dev, - be32_to_cpu(eqe->event.cq_err.cqn) - & 0xffffff, + mlx4_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn), eqe->type); break; @@ -402,60 +234,13 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn); break; - case MLX4_EVENT_TYPE_COMM_CHANNEL: - if (!mlx4_is_master(dev)) { - mlx4_warn(dev, "Received comm channel event " - "for non master device\n"); - break; - } - memcpy(&priv->mfunc.master.comm_arm_bit_vector, - eqe->event.comm_channel_arm.bit_vec, - sizeof eqe->event.comm_channel_arm.bit_vec); - queue_work(priv->mfunc.master.comm_wq, - &priv->mfunc.master.comm_work); - break; - - case MLX4_EVENT_TYPE_FLR_EVENT: - flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id); - if (!mlx4_is_master(dev)) { - mlx4_warn(dev, "Non-master function received" - "FLR event\n"); - break; - } - - mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave); - - if (flr_slave > dev->num_slaves) { - mlx4_warn(dev, - "Got FLR for unknown function: %d\n", - flr_slave); - update_slave_state = 0; - } else - update_slave_state = 1; - - spin_lock(&priv->mfunc.master.slave_state_lock); - if (update_slave_state) { - priv->mfunc.master.slave_state[flr_slave].active = false; - priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR; - priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1; - } - spin_unlock(&priv->mfunc.master.slave_state_lock); - queue_work(priv->mfunc.master.comm_wq, - &priv->mfunc.master.slave_flr_event_work); - break; case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: case MLX4_EVENT_TYPE_ECC_DETECT: default: - mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at " - "index %u. owner=%x, nent=0x%x, slave=%x, " - "ownership=%s\n", - eqe->type, eqe->subtype, eq->eqn, - eq->cons_index, eqe->owner, eq->nent, - eqe->slave_id, - !!(eqe->owner & 0x80) ^ - !!(eq->cons_index & eq->nent) ? "HW" : "SW"); + mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u\n", + eqe->type, eqe->subtype, eq->eqn, eq->cons_index); break; - }; + } ++eq->cons_index; eqes_found = 1; @@ -505,58 +290,25 @@ static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr) return IRQ_HANDLED; } -int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_slave_event_eq_info *event_eq = - &priv->mfunc.master.slave_state[slave].event_eq; - u32 in_modifier = vhcr->in_modifier; - u32 eqn = in_modifier & 0x1FF; - u64 in_param = vhcr->in_param; - int err = 0; - - if (slave == dev->caps.function) - err = mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn, - 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B, - MLX4_CMD_NATIVE); - if (!err) { - if (in_modifier >> 31) { - /* unmap */ - event_eq->event_type &= ~in_param; - } else { - event_eq->eqn = eqn; - event_eq->event_type = in_param; - } - } - return err; -} - static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap, int eq_num) { return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num, - 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B, - MLX4_CMD_WRAPPED); + 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B); } static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, int eq_num) { - return mlx4_cmd(dev, mailbox->dma | dev->caps.function, eq_num, 0, - MLX4_CMD_SW2HW_EQ, MLX4_CMD_TIME_CLASS_A, - MLX4_CMD_WRAPPED); + return mlx4_cmd(dev, mailbox->dma, eq_num, 0, MLX4_CMD_SW2HW_EQ, + MLX4_CMD_TIME_CLASS_A); } static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, int eq_num) { - return mlx4_cmd_box(dev, dev->caps.function, mailbox->dma, eq_num, - 0, MLX4_CMD_HW2SW_EQ, MLX4_CMD_TIME_CLASS_A, - MLX4_CMD_WRAPPED); + return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 0, MLX4_CMD_HW2SW_EQ, + MLX4_CMD_TIME_CLASS_A); } static int mlx4_num_eq_uar(struct mlx4_dev *dev) @@ -833,16 +585,14 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) for (i = 0; i < mlx4_num_eq_uar(dev); ++i) priv->eq_table.uar_map[i] = NULL; - if (!mlx4_is_slave(dev)) { - err = mlx4_map_clr_int(dev); - if (err) - goto err_out_bitmap; + err = mlx4_map_clr_int(dev); + if (err) + goto err_out_bitmap; - priv->eq_table.clr_mask = - swab32(1 << (priv->eq_table.inta_pin & 31)); - priv->eq_table.clr_int = priv->clr_base + - (priv->eq_table.inta_pin < 32 ? 4 : 0); - } + priv->eq_table.clr_mask = + swab32(1 << (priv->eq_table.inta_pin & 31)); + priv->eq_table.clr_int = priv->clr_base + + (priv->eq_table.inta_pin < 32 ? 4 : 0); priv->eq_table.irq_names = kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 + @@ -950,8 +700,7 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) mlx4_free_eq(dev, &priv->eq_table.eq[i]); --i; } - if (!mlx4_is_slave(dev)) - mlx4_unmap_clr_int(dev); + mlx4_unmap_clr_int(dev); mlx4_free_irqs(dev); err_out_bitmap: @@ -976,8 +725,7 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev) for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) mlx4_free_eq(dev, &priv->eq_table.eq[i]); - if (!mlx4_is_slave(dev)) - mlx4_unmap_clr_int(dev); + mlx4_unmap_clr_int(dev); for (i = 0; i < mlx4_num_eq_uar(dev); ++i) if (priv->eq_table.uar_map[i]) @@ -1000,7 +748,7 @@ int mlx4_test_interrupts(struct mlx4_dev *dev) err = mlx4_NOP(dev); /* When not in MSI_X, there is only one irq to check */ - if (!(dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(dev)) + if (!(dev->flags & MLX4_FLAG_MSI_X)) return err; /* A loop over all completion vectors, for each vector we will check diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/fw.c b/trunk/drivers/net/ethernet/mellanox/mlx4/fw.c index a424a19280cc..435ca6e49734 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -32,7 +32,6 @@ * SOFTWARE. */ -#include #include #include #include @@ -49,7 +48,7 @@ enum { extern void __buggy_use_of_MLX4_GET(void); extern void __buggy_use_of_MLX4_PUT(void); -static bool enable_qos; +static int enable_qos; module_param(enable_qos, bool, 0444); MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (default: off)"); @@ -140,185 +139,12 @@ int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg) MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET); err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG, - MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + MLX4_CMD_TIME_CLASS_A); mlx4_free_cmd_mailbox(dev, mailbox); return err; } -int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd) -{ - u8 field; - u32 size; - int err = 0; - -#define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0 -#define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1 -#define QUERY_FUNC_CAP_FUNCTION_OFFSET 0x3 -#define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4 -#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x10 -#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x14 -#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x18 -#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x20 -#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x24 -#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x28 -#define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c -#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0X30 - -#define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3 -#define QUERY_FUNC_CAP_ETH_PROPS_OFFSET 0xc - - if (vhcr->op_modifier == 1) { - field = vhcr->in_modifier; - MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); - - field = 0; /* ensure fvl bit is not set */ - MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_ETH_PROPS_OFFSET); - } else if (vhcr->op_modifier == 0) { - field = 1 << 7; /* enable only ethernet interface */ - MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET); - - field = slave; - MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FUNCTION_OFFSET); - - field = dev->caps.num_ports; - MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); - - size = 0; /* no PF behavious is set for now */ - MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET); - - size = dev->caps.num_qps; - MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET); - - size = dev->caps.num_srqs; - MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET); - - size = dev->caps.num_cqs; - MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET); - - size = dev->caps.num_eqs; - MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET); - - size = dev->caps.reserved_eqs; - MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); - - size = dev->caps.num_mpts; - MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET); - - size = dev->caps.num_mtts; - MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET); - - size = dev->caps.num_mgms + dev->caps.num_amgms; - MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET); - - } else - err = -EINVAL; - - return err; -} - -int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap) -{ - struct mlx4_cmd_mailbox *mailbox; - u32 *outbox; - u8 field; - u32 size; - int i; - int err = 0; - - - mailbox = mlx4_alloc_cmd_mailbox(dev); - if (IS_ERR(mailbox)) - return PTR_ERR(mailbox); - - err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FUNC_CAP, - MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); - if (err) - goto out; - - outbox = mailbox->buf; - - MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET); - if (!(field & (1 << 7))) { - mlx4_err(dev, "The host doesn't support eth interface\n"); - err = -EPROTONOSUPPORT; - goto out; - } - - MLX4_GET(field, outbox, QUERY_FUNC_CAP_FUNCTION_OFFSET); - func_cap->function = field; - - MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); - func_cap->num_ports = field; - - MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET); - func_cap->pf_context_behaviour = size; - - MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET); - func_cap->qp_quota = size & 0xFFFFFF; - - MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET); - func_cap->srq_quota = size & 0xFFFFFF; - - MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET); - func_cap->cq_quota = size & 0xFFFFFF; - - MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET); - func_cap->max_eq = size & 0xFFFFFF; - - MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); - func_cap->reserved_eq = size & 0xFFFFFF; - - MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET); - func_cap->mpt_quota = size & 0xFFFFFF; - - MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET); - func_cap->mtt_quota = size & 0xFFFFFF; - - MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET); - func_cap->mcg_quota = size & 0xFFFFFF; - - for (i = 1; i <= func_cap->num_ports; ++i) { - err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 1, - MLX4_CMD_QUERY_FUNC_CAP, - MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); - if (err) - goto out; - - MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET); - if (field & (1 << 7)) { - mlx4_err(dev, "VLAN is enforced on this port\n"); - err = -EPROTONOSUPPORT; - goto out; - } - - if (field & (1 << 6)) { - mlx4_err(dev, "Force mac is enabled on this port\n"); - err = -EPROTONOSUPPORT; - goto out; - } - - MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); - func_cap->physical_port[i] = field; - } - - /* All other resources are allocated by the master, but we still report - * 'num' and 'reserved' capabilities as follows: - * - num remains the maximum resource index - * - 'num - reserved' is the total available objects of a resource, but - * resource indices may be less than 'reserved' - * TODO: set per-resource quotas */ - -out: - mlx4_free_cmd_mailbox(dev, mailbox); - - return err; -} - int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) { struct mlx4_cmd_mailbox *mailbox; @@ -403,7 +229,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) outbox = mailbox->buf; err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, - MLX4_CMD_TIME_CLASS_A, !mlx4_is_slave(dev)); + MLX4_CMD_TIME_CLASS_A); if (err) goto out; @@ -570,15 +396,12 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) for (i = 1; i <= dev_cap->num_ports; ++i) { err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT, - MLX4_CMD_TIME_CLASS_B, - !mlx4_is_slave(dev)); + MLX4_CMD_TIME_CLASS_B); if (err) goto out; MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET); dev_cap->supported_port_types[i] = field & 3; - dev_cap->suggested_type[i] = (field >> 3) & 1; - dev_cap->default_sense[i] = (field >> 4) & 1; MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET); dev_cap->ib_mtu[i] = field & 0xf; MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET); @@ -647,61 +470,6 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) return err; } -int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd) -{ - u64 def_mac; - u8 port_type; - int err; - -#define MLX4_PORT_SUPPORT_IB (1 << 0) -#define MLX4_PORT_SUGGEST_TYPE (1 << 3) -#define MLX4_PORT_DEFAULT_SENSE (1 << 4) -#define MLX4_VF_PORT_ETH_ONLY_MASK (0xff & ~MLX4_PORT_SUPPORT_IB & \ - ~MLX4_PORT_SUGGEST_TYPE & \ - ~MLX4_PORT_DEFAULT_SENSE) - - err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0, - MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, - MLX4_CMD_NATIVE); - - if (!err && dev->caps.function != slave) { - /* set slave default_mac address */ - MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET); - def_mac += slave << 8; - MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET); - - /* get port type - currently only eth is enabled */ - MLX4_GET(port_type, outbox->buf, - QUERY_PORT_SUPPORTED_TYPE_OFFSET); - - /* Allow only Eth port, no link sensing allowed */ - port_type &= MLX4_VF_PORT_ETH_ONLY_MASK; - - /* check eth is enabled for this port */ - if (!(port_type & 2)) - mlx4_dbg(dev, "QUERY PORT: eth not supported by host"); - - MLX4_PUT(outbox->buf, port_type, - QUERY_PORT_SUPPORTED_TYPE_OFFSET); - } - - return err; -} - -static int mlx4_QUERY_PORT(struct mlx4_dev *dev, void *ptr, u8 port) -{ - struct mlx4_cmd_mailbox *outbox = ptr; - - return mlx4_cmd_box(dev, 0, outbox->dma, port, 0, - MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, - MLX4_CMD_WRAPPED); -} -EXPORT_SYMBOL_GPL(mlx4_QUERY_PORT); - int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt) { struct mlx4_cmd_mailbox *mailbox; @@ -751,8 +519,7 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt) if (++nent == MLX4_MAILBOX_SIZE / 16) { err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, - MLX4_CMD_TIME_CLASS_B, - MLX4_CMD_NATIVE); + MLX4_CMD_TIME_CLASS_B); if (err) goto out; nent = 0; @@ -761,8 +528,7 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt) } if (nent) - err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, - MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, MLX4_CMD_TIME_CLASS_B); if (err) goto out; @@ -791,15 +557,13 @@ int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm) int mlx4_UNMAP_FA(struct mlx4_dev *dev) { - return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA, - MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA, MLX4_CMD_TIME_CLASS_B); } int mlx4_RUN_FW(struct mlx4_dev *dev) { - return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW, - MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW, MLX4_CMD_TIME_CLASS_A); } int mlx4_QUERY_FW(struct mlx4_dev *dev) @@ -815,7 +579,6 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev) #define QUERY_FW_OUT_SIZE 0x100 #define QUERY_FW_VER_OFFSET 0x00 -#define QUERY_FW_PPF_ID 0x09 #define QUERY_FW_CMD_IF_REV_OFFSET 0x0a #define QUERY_FW_MAX_CMD_OFFSET 0x0f #define QUERY_FW_ERR_START_OFFSET 0x30 @@ -826,16 +589,13 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev) #define QUERY_FW_CLR_INT_BASE_OFFSET 0x20 #define QUERY_FW_CLR_INT_BAR_OFFSET 0x28 -#define QUERY_FW_COMM_BASE_OFFSET 0x40 -#define QUERY_FW_COMM_BAR_OFFSET 0x48 - mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); outbox = mailbox->buf; err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW, - MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + MLX4_CMD_TIME_CLASS_A); if (err) goto out; @@ -848,9 +608,6 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev) ((fw_ver & 0xffff0000ull) >> 16) | ((fw_ver & 0x0000ffffull) << 16); - MLX4_GET(lg, outbox, QUERY_FW_PPF_ID); - dev->caps.function = lg; - MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET); if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV || cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) { @@ -892,11 +649,6 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev) MLX4_GET(fw->clr_int_bar, outbox, QUERY_FW_CLR_INT_BAR_OFFSET); fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2; - MLX4_GET(fw->comm_base, outbox, QUERY_FW_COMM_BASE_OFFSET); - MLX4_GET(fw->comm_bar, outbox, QUERY_FW_COMM_BAR_OFFSET); - fw->comm_bar = (fw->comm_bar >> 6) * 2; - mlx4_dbg(dev, "Communication vector bar:%d offset:0x%llx\n", - fw->comm_bar, fw->comm_base); mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2); /* @@ -959,7 +711,7 @@ int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter) outbox = mailbox->buf; err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER, - MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + MLX4_CMD_TIME_CLASS_A); if (err) goto out; @@ -991,7 +743,6 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) #define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f) #define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30) #define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37) -#define INIT_HCA_EQE_CQE_OFFSETS (INIT_HCA_QPC_OFFSET + 0x38) #define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40) #define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50) #define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60) @@ -1080,11 +831,10 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) /* UAR attributes */ - MLX4_PUT(inbox, param->uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET); + MLX4_PUT(inbox, (u8) (PAGE_SHIFT - 12), INIT_HCA_UAR_PAGE_SZ_OFFSET); MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET); - err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000, - MLX4_CMD_NATIVE); + err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000); if (err) mlx4_err(dev, "INIT_HCA returns %d\n", err); @@ -1093,101 +843,6 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) return err; } -int mlx4_QUERY_HCA(struct mlx4_dev *dev, - struct mlx4_init_hca_param *param) -{ - struct mlx4_cmd_mailbox *mailbox; - __be32 *outbox; - int err; - -#define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04 - - mailbox = mlx4_alloc_cmd_mailbox(dev); - if (IS_ERR(mailbox)) - return PTR_ERR(mailbox); - outbox = mailbox->buf; - - err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, - MLX4_CMD_QUERY_HCA, - MLX4_CMD_TIME_CLASS_B, - !mlx4_is_slave(dev)); - if (err) - goto out; - - MLX4_GET(param->global_caps, outbox, QUERY_HCA_GLOBAL_CAPS_OFFSET); - - /* QPC/EEC/CQC/EQC/RDMARC attributes */ - - MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET); - MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET); - MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET); - MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET); - MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET); - MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET); - MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET); - MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET); - MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET); - MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET); - MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET); - MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET); - - /* multicast attributes */ - - MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET); - MLX4_GET(param->log_mc_entry_sz, outbox, - INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); - MLX4_GET(param->log_mc_hash_sz, outbox, - INIT_HCA_LOG_MC_HASH_SZ_OFFSET); - MLX4_GET(param->log_mc_table_sz, outbox, - INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); - - /* TPT attributes */ - - MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET); - MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET); - MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET); - MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET); - - /* UAR attributes */ - - MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET); - MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET); - -out: - mlx4_free_cmd_mailbox(dev, mailbox); - - return err; -} - -int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - int port = vhcr->in_modifier; - int err; - - if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port)) - return 0; - - if (dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB) - return -ENODEV; - - /* Enable port only if it was previously disabled */ - if (!priv->mfunc.master.init_port_ref[port]) { - err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, - MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); - if (err) - return err; - priv->mfunc.master.slave_state[slave].init_port_mask |= - (1 << port); - } - ++priv->mfunc.master.init_port_ref[port]; - return 0; -} - int mlx4_INIT_PORT(struct mlx4_dev *dev, int port) { struct mlx4_cmd_mailbox *mailbox; @@ -1231,62 +886,33 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, int port) MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET); err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT, - MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + MLX4_CMD_TIME_CLASS_A); mlx4_free_cmd_mailbox(dev, mailbox); } else err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, - MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + MLX4_CMD_TIME_CLASS_A); return err; } EXPORT_SYMBOL_GPL(mlx4_INIT_PORT); -int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - int port = vhcr->in_modifier; - int err; - - if (!(priv->mfunc.master.slave_state[slave].init_port_mask & - (1 << port))) - return 0; - - if (dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB) - return -ENODEV; - if (priv->mfunc.master.init_port_ref[port] == 1) { - err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000, - MLX4_CMD_NATIVE); - if (err) - return err; - } - priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port); - --priv->mfunc.master.init_port_ref[port]; - return 0; -} - int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port) { - return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000, - MLX4_CMD_WRAPPED); + return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000); } EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT); int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic) { - return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000, - MLX4_CMD_NATIVE); + return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000); } int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages) { int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0, MLX4_CMD_SET_ICM_SIZE, - MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + MLX4_CMD_TIME_CLASS_A); if (ret) return ret; @@ -1303,7 +929,7 @@ int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages) int mlx4_NOP(struct mlx4_dev *dev) { /* Input modifier of 0x1f means "finish as soon as possible." */ - return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100, MLX4_CMD_NATIVE); + return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100); } #define MLX4_WOL_SETUP_MODE (5 << 28) @@ -1312,8 +938,7 @@ int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port) u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8; return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3, - MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A, - MLX4_CMD_NATIVE); + MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A); } EXPORT_SYMBOL_GPL(mlx4_wol_read); @@ -1322,6 +947,6 @@ int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port) u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8; return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG, - MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + MLX4_CMD_TIME_CLASS_A); } EXPORT_SYMBOL_GPL(mlx4_wol_write); diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/fw.h b/trunk/drivers/net/ethernet/mellanox/mlx4/fw.h index 119e0cc9fab3..bf5ec2286528 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/fw.h +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/fw.h @@ -111,30 +111,11 @@ struct mlx4_dev_cap { u64 max_icm_sz; int max_gso_sz; u8 supported_port_types[MLX4_MAX_PORTS + 1]; - u8 suggested_type[MLX4_MAX_PORTS + 1]; - u8 default_sense[MLX4_MAX_PORTS + 1]; u8 log_max_macs[MLX4_MAX_PORTS + 1]; u8 log_max_vlans[MLX4_MAX_PORTS + 1]; u32 max_counters; }; -struct mlx4_func_cap { - u8 function; - u8 num_ports; - u8 flags; - u32 pf_context_behaviour; - int qp_quota; - int cq_quota; - int srq_quota; - int mpt_quota; - int mtt_quota; - int max_eq; - int reserved_eq; - int mcg_quota; - u8 physical_port[MLX4_MAX_PORTS + 1]; - u8 port_flags[MLX4_MAX_PORTS + 1]; -}; - struct mlx4_adapter { char board_id[MLX4_BOARD_ID_LEN]; u8 inta_pin; @@ -152,7 +133,6 @@ struct mlx4_init_hca_param { u64 dmpt_base; u64 cmpt_base; u64 mtt_base; - u64 global_caps; u16 log_mc_entry_sz; u16 log_mc_hash_sz; u8 log_num_qps; @@ -163,7 +143,6 @@ struct mlx4_init_hca_param { u8 log_mc_table_sz; u8 log_mpt_sz; u8 log_uar_sz; - u8 uar_page_sz; /* log pg sz in 4k chunks */ }; struct mlx4_init_ib_param { @@ -188,19 +167,12 @@ struct mlx4_set_ib_param { }; int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap); -int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap); -int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd); int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm); int mlx4_UNMAP_FA(struct mlx4_dev *dev); int mlx4_RUN_FW(struct mlx4_dev *dev); int mlx4_QUERY_FW(struct mlx4_dev *dev); int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter); int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param); -int mlx4_QUERY_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param); int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic); int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt); int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages); diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/icm.c b/trunk/drivers/net/ethernet/mellanox/mlx4/icm.c index a9ade1c3cad5..02393fdf44c1 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/icm.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/icm.c @@ -213,7 +213,7 @@ static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt) static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count) { return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM, - MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + MLX4_CMD_TIME_CLASS_B); } int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm) @@ -223,8 +223,7 @@ int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm) int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev) { - return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX, - MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX, MLX4_CMD_TIME_CLASS_B); } int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj) diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/intf.c b/trunk/drivers/net/ethernet/mellanox/mlx4/intf.c index b4e9f6f5cc04..ca6feb55bd94 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/intf.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/intf.c @@ -142,8 +142,7 @@ int mlx4_register_device(struct mlx4_dev *dev) mlx4_add_device(intf, priv); mutex_unlock(&intf_mutex); - if (!mlx4_is_slave(dev)) - mlx4_start_catas_poll(dev); + mlx4_start_catas_poll(dev); return 0; } @@ -153,8 +152,7 @@ void mlx4_unregister_device(struct mlx4_dev *dev) struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_interface *intf; - if (!mlx4_is_slave(dev)) - mlx4_stop_catas_poll(dev); + mlx4_stop_catas_poll(dev); mutex_lock(&intf_mutex); list_for_each_entry(intf, &intf_list, list) diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/main.c b/trunk/drivers/net/ethernet/mellanox/mlx4/main.c index 6bb62c580e2d..94bbc85a532d 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/main.c @@ -40,7 +40,6 @@ #include #include #include -#include #include #include @@ -76,42 +75,21 @@ MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); #endif /* CONFIG_PCI_MSI */ -static int num_vfs; -module_param(num_vfs, int, 0444); -MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0"); - -static int probe_vf; -module_param(probe_vf, int, 0644); -MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)"); - -int mlx4_log_num_mgm_entry_size = 10; -module_param_named(log_num_mgm_entry_size, - mlx4_log_num_mgm_entry_size, int, 0444); -MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num" - " of qp per mcg, for example:" - " 10 gives 248.range: 9<=" - " log_num_mgm_entry_size <= 12"); - -#define MLX4_VF (1 << 0) - -#define HCA_GLOBAL_CAP_MASK 0 -#define PF_CONTEXT_BEHAVIOUR_MASK 0 - static char mlx4_version[] __devinitdata = DRV_NAME ": Mellanox ConnectX core driver v" DRV_VERSION " (" DRV_RELDATE ")\n"; static struct mlx4_profile default_profile = { - .num_qp = 1 << 18, + .num_qp = 1 << 17, .num_srq = 1 << 16, .rdmarc_per_qp = 1 << 4, .num_cq = 1 << 16, .num_mcg = 1 << 13, - .num_mpt = 1 << 19, + .num_mpt = 1 << 17, .num_mtt = 1 << 20, }; -static int log_num_mac = 7; +static int log_num_mac = 2; module_param_named(log_num_mac, log_num_mac, int, 0444); MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)"); @@ -121,33 +99,15 @@ MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)"); /* Log2 max number of VLANs per ETH port (0-7) */ #define MLX4_LOG_NUM_VLANS 7 -static bool use_prio; +static int use_prio; module_param_named(use_prio, use_prio, bool, 0444); MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports " "(0/1, default 0)"); -int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); +static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)"); -static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE}; -static int arr_argc = 2; -module_param_array(port_type_array, int, &arr_argc, 0444); -MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default " - "1 for IB, 2 for Ethernet"); - -struct mlx4_port_config { - struct list_head list; - enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; - struct pci_dev *pdev; -}; - -static inline int mlx4_master_get_num_eqs(struct mlx4_dev *dev) -{ - return dev->caps.reserved_eqs + - MLX4_MFUNC_EQ_NUM * (dev->num_slaves + 1); -} - int mlx4_check_port_params(struct mlx4_dev *dev, enum mlx4_port_type *port_type) { @@ -180,8 +140,10 @@ static void mlx4_set_port_mask(struct mlx4_dev *dev) { int i; + dev->caps.port_mask = 0; for (i = 1; i <= dev->caps.num_ports; ++i) - dev->caps.port_mask[i] = dev->caps.port_type[i]; + if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB) + dev->caps.port_mask |= 1 << (i - 1); } static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) @@ -226,15 +188,12 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i]; dev->caps.def_mac[i] = dev_cap->def_mac[i]; dev->caps.supported_type[i] = dev_cap->supported_port_types[i]; - dev->caps.suggested_type[i] = dev_cap->suggested_type[i]; - dev->caps.default_sense[i] = dev_cap->default_sense[i]; dev->caps.trans_type[i] = dev_cap->trans_type[i]; dev->caps.vendor_oui[i] = dev_cap->vendor_oui[i]; dev->caps.wavelength[i] = dev_cap->wavelength[i]; dev->caps.trans_code[i] = dev_cap->trans_code[i]; } - dev->caps.uar_page_size = PAGE_SIZE; dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay; dev->caps.bf_reg_size = dev_cap->bf_reg_size; @@ -248,7 +207,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev->caps.reserved_srqs = dev_cap->reserved_srqs; dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz; dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz; - dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev); + dev->caps.num_qp_per_mgm = MLX4_QP_PER_MGM; /* * Subtract 1 from the limit because we need to allocate a * spare CQE so the HCA HW can tell the difference between an @@ -257,18 +216,17 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev->caps.max_cqes = dev_cap->max_cq_sz - 1; dev->caps.reserved_cqs = dev_cap->reserved_cqs; dev->caps.reserved_eqs = dev_cap->reserved_eqs; - dev->caps.reserved_mtts = dev_cap->reserved_mtts; + dev->caps.mtts_per_seg = 1 << log_mtts_per_seg; + dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts, + dev->caps.mtts_per_seg); dev->caps.reserved_mrws = dev_cap->reserved_mrws; - - /* The first 128 UARs are used for EQ doorbells */ - dev->caps.reserved_uars = max_t(int, 128, dev_cap->reserved_uars); + dev->caps.reserved_uars = dev_cap->reserved_uars; dev->caps.reserved_pds = dev_cap->reserved_pds; dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? dev_cap->reserved_xrcds : 0; dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? dev_cap->max_xrcds : 0; - dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz; - + dev->caps.mtt_entry_sz = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz; dev->caps.max_msg_sz = dev_cap->max_msg_sz; dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); dev->caps.flags = dev_cap->flags; @@ -277,70 +235,18 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev->caps.stat_rate_support = dev_cap->stat_rate_support; dev->caps.max_gso_sz = dev_cap->max_gso_sz; - /* Sense port always allowed on supported devices for ConnectX1 and 2 */ - if (dev->pdev->device != 0x1003) - dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; - dev->caps.log_num_macs = log_num_mac; dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS; dev->caps.log_num_prios = use_prio ? 3 : 0; for (i = 1; i <= dev->caps.num_ports; ++i) { - dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE; - if (dev->caps.supported_type[i]) { - /* if only ETH is supported - assign ETH */ - if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH) - dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; - /* if only IB is supported, - * assign IB only if SRIOV is off*/ - else if (dev->caps.supported_type[i] == - MLX4_PORT_TYPE_IB) { - if (dev->flags & MLX4_FLAG_SRIOV) - dev->caps.port_type[i] = - MLX4_PORT_TYPE_NONE; - else - dev->caps.port_type[i] = - MLX4_PORT_TYPE_IB; - /* if IB and ETH are supported, - * first of all check if SRIOV is on */ - } else if (dev->flags & MLX4_FLAG_SRIOV) - dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; - else { - /* In non-SRIOV mode, we set the port type - * according to user selection of port type, - * if usere selected none, take the FW hint */ - if (port_type_array[i-1] == MLX4_PORT_TYPE_NONE) - dev->caps.port_type[i] = dev->caps.suggested_type[i] ? - MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB; - else - dev->caps.port_type[i] = port_type_array[i-1]; - } - } - /* - * Link sensing is allowed on the port if 3 conditions are true: - * 1. Both protocols are supported on the port. - * 2. Different types are supported on the port - * 3. FW declared that it supports link sensing - */ + if (dev->caps.supported_type[i] != MLX4_PORT_TYPE_ETH) + dev->caps.port_type[i] = MLX4_PORT_TYPE_IB; + else + dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; + dev->caps.possible_type[i] = dev->caps.port_type[i]; mlx4_priv(dev)->sense.sense_allowed[i] = - ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) && - (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && - (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)); - - /* - * If "default_sense" bit is set, we move the port to "AUTO" mode - * and perform sense_port FW command to try and set the correct - * port type from beginning - */ - if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) { - enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE; - dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO; - mlx4_SENSE_PORT(dev, i, &sensed_port); - if (sensed_port != MLX4_PORT_TYPE_NONE) - dev->caps.port_type[i] = sensed_port; - } else { - dev->caps.possible_type[i] = dev->caps.port_type[i]; - } + dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO; if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) { dev->caps.log_num_macs = dev_cap->log_max_macs[i]; @@ -356,6 +262,8 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) } } + mlx4_set_port_mask(dev); + dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters); dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps; @@ -374,149 +282,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) return 0; } -/*The function checks if there are live vf, return the num of them*/ -static int mlx4_how_many_lives_vf(struct mlx4_dev *dev) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_slave_state *s_state; - int i; - int ret = 0; - - for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) { - s_state = &priv->mfunc.master.slave_state[i]; - if (s_state->active && s_state->last_cmd != - MLX4_COMM_CMD_RESET) { - mlx4_warn(dev, "%s: slave: %d is still active\n", - __func__, i); - ret++; - } - } - return ret; -} - -static int mlx4_is_slave_active(struct mlx4_dev *dev, int slave) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_slave_state *s_slave; - - if (!mlx4_is_master(dev)) - return 0; - - s_slave = &priv->mfunc.master.slave_state[slave]; - return !!s_slave->active; -} -EXPORT_SYMBOL(mlx4_is_slave_active); - -static int mlx4_slave_cap(struct mlx4_dev *dev) -{ - int err; - u32 page_size; - struct mlx4_dev_cap dev_cap; - struct mlx4_func_cap func_cap; - struct mlx4_init_hca_param hca_param; - int i; - - memset(&hca_param, 0, sizeof(hca_param)); - err = mlx4_QUERY_HCA(dev, &hca_param); - if (err) { - mlx4_err(dev, "QUERY_HCA command failed, aborting.\n"); - return err; - } - - /*fail if the hca has an unknown capability */ - if ((hca_param.global_caps | HCA_GLOBAL_CAP_MASK) != - HCA_GLOBAL_CAP_MASK) { - mlx4_err(dev, "Unknown hca global capabilities\n"); - return -ENOSYS; - } - - mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz; - - memset(&dev_cap, 0, sizeof(dev_cap)); - err = mlx4_dev_cap(dev, &dev_cap); - if (err) { - mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); - return err; - } - - page_size = ~dev->caps.page_size_cap + 1; - mlx4_warn(dev, "HCA minimum page size:%d\n", page_size); - if (page_size > PAGE_SIZE) { - mlx4_err(dev, "HCA minimum page size of %d bigger than " - "kernel PAGE_SIZE of %ld, aborting.\n", - page_size, PAGE_SIZE); - return -ENODEV; - } - - /* slave gets uar page size from QUERY_HCA fw command */ - dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12); - - /* TODO: relax this assumption */ - if (dev->caps.uar_page_size != PAGE_SIZE) { - mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n", - dev->caps.uar_page_size, PAGE_SIZE); - return -ENODEV; - } - - memset(&func_cap, 0, sizeof(func_cap)); - err = mlx4_QUERY_FUNC_CAP(dev, &func_cap); - if (err) { - mlx4_err(dev, "QUERY_FUNC_CAP command failed, aborting.\n"); - return err; - } - - if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) != - PF_CONTEXT_BEHAVIOUR_MASK) { - mlx4_err(dev, "Unknown pf context behaviour\n"); - return -ENOSYS; - } - - dev->caps.function = func_cap.function; - dev->caps.num_ports = func_cap.num_ports; - dev->caps.num_qps = func_cap.qp_quota; - dev->caps.num_srqs = func_cap.srq_quota; - dev->caps.num_cqs = func_cap.cq_quota; - dev->caps.num_eqs = func_cap.max_eq; - dev->caps.reserved_eqs = func_cap.reserved_eq; - dev->caps.num_mpts = func_cap.mpt_quota; - dev->caps.num_mtts = func_cap.mtt_quota; - dev->caps.num_pds = MLX4_NUM_PDS; - dev->caps.num_mgms = 0; - dev->caps.num_amgms = 0; - - for (i = 1; i <= dev->caps.num_ports; ++i) - dev->caps.port_mask[i] = dev->caps.port_type[i]; - - if (dev->caps.num_ports > MLX4_MAX_PORTS) { - mlx4_err(dev, "HCA has %d ports, but we only support %d, " - "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS); - return -ENODEV; - } - - if (dev->caps.uar_page_size * (dev->caps.num_uars - - dev->caps.reserved_uars) > - pci_resource_len(dev->pdev, 2)) { - mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than " - "PCI resource 2 size of 0x%llx, aborting.\n", - dev->caps.uar_page_size * dev->caps.num_uars, - (unsigned long long) pci_resource_len(dev->pdev, 2)); - return -ENODEV; - } - -#if 0 - mlx4_warn(dev, "sqp_demux:%d\n", dev->caps.sqp_demux); - mlx4_warn(dev, "num_uars:%d reserved_uars:%d uar region:0x%x bar2:0x%llx\n", - dev->caps.num_uars, dev->caps.reserved_uars, - dev->caps.uar_page_size * dev->caps.num_uars, - pci_resource_len(dev->pdev, 2)); - mlx4_warn(dev, "num_eqs:%d reserved_eqs:%d\n", dev->caps.num_eqs, - dev->caps.reserved_eqs); - mlx4_warn(dev, "num_pds:%d reserved_pds:%d slave_pd_shift:%d pd_base:%d\n", - dev->caps.num_pds, dev->caps.reserved_pds, - dev->caps.slave_pd_shift, dev->caps.pd_base); -#endif - return 0; -} /* * Change the port configuration of the device. @@ -612,8 +377,7 @@ static ssize_t set_port_type(struct device *dev, types[i] = mdev->caps.port_type[i+1]; } - if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && - !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) { + if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { for (i = 1; i <= mdev->caps.num_ports; i++) { if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) { mdev->caps.possible_type[i] = mdev->caps.port_type[i]; @@ -687,7 +451,6 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, { struct mlx4_priv *priv = mlx4_priv(dev); int err; - int num_eqs; err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table, cmpt_base + @@ -717,14 +480,12 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, if (err) goto err_srq; - num_eqs = (mlx4_is_master(dev)) ? - roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) : - dev->caps.num_eqs; err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, cmpt_base + ((u64) (MLX4_CMPT_TYPE_EQ * cmpt_entry_sz) << MLX4_CMPT_SHIFT), - cmpt_entry_sz, num_eqs, num_eqs, 0, 0); + cmpt_entry_sz, + dev->caps.num_eqs, dev->caps.num_eqs, 0, 0); if (err) goto err_cq; @@ -748,7 +509,6 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, { struct mlx4_priv *priv = mlx4_priv(dev); u64 aux_pages; - int num_eqs; int err; err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages); @@ -780,13 +540,10 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, goto err_unmap_aux; } - - num_eqs = (mlx4_is_master(dev)) ? - roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) : - dev->caps.num_eqs; err = mlx4_init_icm_table(dev, &priv->eq_table.table, init_hca->eqc_base, dev_cap->eqc_entry_sz, - num_eqs, num_eqs, 0, 0); + dev->caps.num_eqs, dev->caps.num_eqs, + 0, 0); if (err) { mlx4_err(dev, "Failed to map EQ context memory, aborting.\n"); goto err_unmap_cmpt; @@ -806,7 +563,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table, init_hca->mtt_base, dev->caps.mtt_entry_sz, - dev->caps.num_mtts, + dev->caps.num_mtt_segs, dev->caps.reserved_mtts, 1, 0); if (err) { mlx4_err(dev, "Failed to map MTT context memory, aborting.\n"); @@ -893,8 +650,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, * and it's a lot easier than trying to track ref counts. */ err = mlx4_init_icm_table(dev, &priv->mcg_table.table, - init_hca->mc_base, - mlx4_get_mgm_entry_size(dev), + init_hca->mc_base, MLX4_MGM_ENTRY_SIZE, dev->caps.num_mgms + dev->caps.num_amgms, dev->caps.num_mgms + dev->caps.num_amgms, 0, 0); @@ -970,16 +726,6 @@ static void mlx4_free_icms(struct mlx4_dev *dev) mlx4_free_icm(dev, priv->fw.aux_icm, 0); } -static void mlx4_slave_exit(struct mlx4_dev *dev) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - - down(&priv->cmd.slave_sem); - if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME)) - mlx4_warn(dev, "Failed to close slave function.\n"); - up(&priv->cmd.slave_sem); -} - static int map_bf_area(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -987,10 +733,8 @@ static int map_bf_area(struct mlx4_dev *dev) resource_size_t bf_len; int err = 0; - bf_start = pci_resource_start(dev->pdev, 2) + - (dev->caps.num_uars << PAGE_SHIFT); - bf_len = pci_resource_len(dev->pdev, 2) - - (dev->caps.num_uars << PAGE_SHIFT); + bf_start = pci_resource_start(dev->pdev, 2) + (dev->caps.num_uars << PAGE_SHIFT); + bf_len = pci_resource_len(dev->pdev, 2) - (dev->caps.num_uars << PAGE_SHIFT); priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len); if (!priv->bf_mapping) err = -ENOMEM; @@ -1007,81 +751,10 @@ static void unmap_bf_area(struct mlx4_dev *dev) static void mlx4_close_hca(struct mlx4_dev *dev) { unmap_bf_area(dev); - if (mlx4_is_slave(dev)) - mlx4_slave_exit(dev); - else { - mlx4_CLOSE_HCA(dev, 0); - mlx4_free_icms(dev); - mlx4_UNMAP_FA(dev); - mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0); - } -} - -static int mlx4_init_slave(struct mlx4_dev *dev) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - u64 dma = (u64) priv->mfunc.vhcr_dma; - int num_of_reset_retries = NUM_OF_RESET_RETRIES; - int ret_from_reset = 0; - u32 slave_read; - u32 cmd_channel_ver; - - down(&priv->cmd.slave_sem); - priv->cmd.max_cmds = 1; - mlx4_warn(dev, "Sending reset\n"); - ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, - MLX4_COMM_TIME); - /* if we are in the middle of flr the slave will try - * NUM_OF_RESET_RETRIES times before leaving.*/ - if (ret_from_reset) { - if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) { - msleep(SLEEP_TIME_IN_RESET); - while (ret_from_reset && num_of_reset_retries) { - mlx4_warn(dev, "slave is currently in the" - "middle of FLR. retrying..." - "(try num:%d)\n", - (NUM_OF_RESET_RETRIES - - num_of_reset_retries + 1)); - ret_from_reset = - mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, - 0, MLX4_COMM_TIME); - num_of_reset_retries = num_of_reset_retries - 1; - } - } else - goto err; - } - - /* check the driver version - the slave I/F revision - * must match the master's */ - slave_read = swab32(readl(&priv->mfunc.comm->slave_read)); - cmd_channel_ver = mlx4_comm_get_version(); - - if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) != - MLX4_COMM_GET_IF_REV(slave_read)) { - mlx4_err(dev, "slave driver version is not supported" - " by the master\n"); - goto err; - } - - mlx4_warn(dev, "Sending vhcr0\n"); - if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48, - MLX4_COMM_TIME)) - goto err; - if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32, - MLX4_COMM_TIME)) - goto err; - if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16, - MLX4_COMM_TIME)) - goto err; - if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, MLX4_COMM_TIME)) - goto err; - up(&priv->cmd.slave_sem); - return 0; - -err: - mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 0); - up(&priv->cmd.slave_sem); - return -EIO; + mlx4_CLOSE_HCA(dev, 0); + mlx4_free_icms(dev); + mlx4_UNMAP_FA(dev); + mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0); } static int mlx4_init_hca(struct mlx4_dev *dev) @@ -1095,75 +768,55 @@ static int mlx4_init_hca(struct mlx4_dev *dev) u64 icm_size; int err; - if (!mlx4_is_slave(dev)) { - err = mlx4_QUERY_FW(dev); - if (err) { - if (err == -EACCES) - mlx4_info(dev, "non-primary physical function, skipping.\n"); - else - mlx4_err(dev, "QUERY_FW command failed, aborting.\n"); - goto unmap_bf; - } - - err = mlx4_load_fw(dev); - if (err) { - mlx4_err(dev, "Failed to start FW, aborting.\n"); - goto unmap_bf; - } - - mlx4_cfg.log_pg_sz_m = 1; - mlx4_cfg.log_pg_sz = 0; - err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg); - if (err) - mlx4_warn(dev, "Failed to override log_pg_sz parameter\n"); - - err = mlx4_dev_cap(dev, &dev_cap); - if (err) { - mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); - goto err_stop_fw; - } + err = mlx4_QUERY_FW(dev); + if (err) { + if (err == -EACCES) + mlx4_info(dev, "non-primary physical function, skipping.\n"); + else + mlx4_err(dev, "QUERY_FW command failed, aborting.\n"); + return err; + } - profile = default_profile; + err = mlx4_load_fw(dev); + if (err) { + mlx4_err(dev, "Failed to start FW, aborting.\n"); + return err; + } - icm_size = mlx4_make_profile(dev, &profile, &dev_cap, - &init_hca); - if ((long long) icm_size < 0) { - err = icm_size; - goto err_stop_fw; - } + mlx4_cfg.log_pg_sz_m = 1; + mlx4_cfg.log_pg_sz = 0; + err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg); + if (err) + mlx4_warn(dev, "Failed to override log_pg_sz parameter\n"); - init_hca.log_uar_sz = ilog2(dev->caps.num_uars); - init_hca.uar_page_sz = PAGE_SHIFT - 12; + err = mlx4_dev_cap(dev, &dev_cap); + if (err) { + mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); + goto err_stop_fw; + } - err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); - if (err) - goto err_stop_fw; + profile = default_profile; - err = mlx4_INIT_HCA(dev, &init_hca); - if (err) { - mlx4_err(dev, "INIT_HCA command failed, aborting.\n"); - goto err_free_icm; - } - } else { - err = mlx4_init_slave(dev); - if (err) { - mlx4_err(dev, "Failed to initialize slave\n"); - goto unmap_bf; - } - - err = mlx4_slave_cap(dev); - if (err) { - mlx4_err(dev, "Failed to obtain slave caps\n"); - goto err_close; - } + icm_size = mlx4_make_profile(dev, &profile, &dev_cap, &init_hca); + if ((long long) icm_size < 0) { + err = icm_size; + goto err_stop_fw; } if (map_bf_area(dev)) mlx4_dbg(dev, "Failed to map blue flame area\n"); - /*Only the master set the ports, all the rest got it from it.*/ - if (!mlx4_is_slave(dev)) - mlx4_set_port_mask(dev); + init_hca.log_uar_sz = ilog2(dev->caps.num_uars); + + err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); + if (err) + goto err_stop_fw; + + err = mlx4_INIT_HCA(dev, &init_hca); + if (err) { + mlx4_err(dev, "INIT_HCA command failed, aborting.\n"); + goto err_free_icm; + } err = mlx4_QUERY_ADAPTER(dev, &adapter); if (err) { @@ -1177,19 +830,16 @@ static int mlx4_init_hca(struct mlx4_dev *dev) return 0; err_close: - mlx4_close_hca(dev); + mlx4_CLOSE_HCA(dev, 0); err_free_icm: - if (!mlx4_is_slave(dev)) - mlx4_free_icms(dev); + mlx4_free_icms(dev); err_stop_fw: - if (!mlx4_is_slave(dev)) { - mlx4_UNMAP_FA(dev); - mlx4_free_icm(dev, priv->fw.fw_icm, 0); - } -unmap_bf: unmap_bf_area(dev); + mlx4_UNMAP_FA(dev); + mlx4_free_icm(dev, priv->fw.fw_icm, 0); + return err; } @@ -1336,56 +986,55 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) goto err_srq_table_free; } - if (!mlx4_is_slave(dev)) { - err = mlx4_init_mcg_table(dev); - if (err) { - mlx4_err(dev, "Failed to initialize " - "multicast group table, aborting.\n"); - goto err_qp_table_free; - } + err = mlx4_init_mcg_table(dev); + if (err) { + mlx4_err(dev, "Failed to initialize " + "multicast group table, aborting.\n"); + goto err_qp_table_free; } err = mlx4_init_counters_table(dev); if (err && err != -ENOENT) { mlx4_err(dev, "Failed to initialize counters table, aborting.\n"); - goto err_mcg_table_free; + goto err_counters_table_free; } - if (!mlx4_is_slave(dev)) { - for (port = 1; port <= dev->caps.num_ports; port++) { - ib_port_default_caps = 0; - err = mlx4_get_port_ib_caps(dev, port, - &ib_port_default_caps); - if (err) - mlx4_warn(dev, "failed to get port %d default " - "ib capabilities (%d). Continuing " - "with caps = 0\n", port, err); - dev->caps.ib_port_def_cap[port] = ib_port_default_caps; - - err = mlx4_check_ext_port_caps(dev, port); - if (err) - mlx4_warn(dev, "failed to get port %d extended " - "port capabilities support info (%d)." - " Assuming not supported\n", - port, err); + for (port = 1; port <= dev->caps.num_ports; port++) { + enum mlx4_port_type port_type = 0; + mlx4_SENSE_PORT(dev, port, &port_type); + if (port_type) + dev->caps.port_type[port] = port_type; + ib_port_default_caps = 0; + err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps); + if (err) + mlx4_warn(dev, "failed to get port %d default " + "ib capabilities (%d). Continuing with " + "caps = 0\n", port, err); + dev->caps.ib_port_def_cap[port] = ib_port_default_caps; - err = mlx4_SET_PORT(dev, port); - if (err) { - mlx4_err(dev, "Failed to set port %d, aborting\n", - port); - goto err_counters_table_free; - } + err = mlx4_check_ext_port_caps(dev, port); + if (err) + mlx4_warn(dev, "failed to get port %d extended " + "port capabilities support info (%d)." + " Assuming not supported\n", port, err); + + err = mlx4_SET_PORT(dev, port); + if (err) { + mlx4_err(dev, "Failed to set port %d, aborting\n", + port); + goto err_mcg_table_free; } } + mlx4_set_port_mask(dev); return 0; -err_counters_table_free: - mlx4_cleanup_counters_table(dev); - err_mcg_table_free: mlx4_cleanup_mcg_table(dev); +err_counters_table_free: + mlx4_cleanup_counters_table(dev); + err_qp_table_free: mlx4_cleanup_qp_table(dev); @@ -1432,16 +1081,8 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) int i; if (msi_x) { - /* In multifunction mode each function gets 2 msi-X vectors - * one for data path completions anf the other for asynch events - * or command completions */ - if (mlx4_is_mfunc(dev)) { - nreq = 2; - } else { - nreq = min_t(int, dev->caps.num_eqs - - dev->caps.reserved_eqs, nreq); - } - + nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, + nreq); entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); if (!entries) goto no_msi; @@ -1497,24 +1138,16 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port) info->dev = dev; info->port = port; - if (!mlx4_is_slave(dev)) { - INIT_RADIX_TREE(&info->mac_tree, GFP_KERNEL); - mlx4_init_mac_table(dev, &info->mac_table); - mlx4_init_vlan_table(dev, &info->vlan_table); - info->base_qpn = - dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] + + mlx4_init_mac_table(dev, &info->mac_table); + mlx4_init_vlan_table(dev, &info->vlan_table); + info->base_qpn = dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] + (port - 1) * (1 << log_num_mac); - } sprintf(info->dev_name, "mlx4_port%d", port); info->port_attr.attr.name = info->dev_name; - if (mlx4_is_mfunc(dev)) - info->port_attr.attr.mode = S_IRUGO; - else { - info->port_attr.attr.mode = S_IRUGO | S_IWUSR; - info->port_attr.store = set_port_type; - } + info->port_attr.attr.mode = S_IRUGO | S_IWUSR; info->port_attr.show = show_port_type; + info->port_attr.store = set_port_type; sysfs_attr_init(&info->port_attr.attr); err = device_create_file(&dev->pdev->dev, &info->port_attr); @@ -1587,46 +1220,6 @@ static void mlx4_clear_steering(struct mlx4_dev *dev) kfree(priv->steer); } -static int extended_func_num(struct pci_dev *pdev) -{ - return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn); -} - -#define MLX4_OWNER_BASE 0x8069c -#define MLX4_OWNER_SIZE 4 - -static int mlx4_get_ownership(struct mlx4_dev *dev) -{ - void __iomem *owner; - u32 ret; - - owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE, - MLX4_OWNER_SIZE); - if (!owner) { - mlx4_err(dev, "Failed to obtain ownership bit\n"); - return -ENOMEM; - } - - ret = readl(owner); - iounmap(owner); - return (int) !!ret; -} - -static void mlx4_free_ownership(struct mlx4_dev *dev) -{ - void __iomem *owner; - - owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE, - MLX4_OWNER_SIZE); - if (!owner) { - mlx4_err(dev, "Failed to obtain ownership bit\n"); - return; - } - writel(0, owner); - msleep(1000); - iounmap(owner); -} - static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { struct mlx4_priv *priv; @@ -1642,20 +1235,13 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) "aborting.\n"); return err; } - if (num_vfs > MLX4_MAX_NUM_VF) { - printk(KERN_ERR "There are more VF's (%d) than allowed(%d)\n", - num_vfs, MLX4_MAX_NUM_VF); - return -EINVAL; - } + /* - * Check for BARs. + * Check for BARs. We expect 0: 1MB */ - if (((id == NULL) || !(id->driver_data & MLX4_VF)) && - !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { - dev_err(&pdev->dev, "Missing DCS, aborting." - "(id == 0X%p, id->driver_data: 0x%lx," - " pci_resource_flags(pdev, 0):0x%lx)\n", id, - id ? id->driver_data : 0, pci_resource_flags(pdev, 0)); + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || + pci_resource_len(pdev, 0) != 1 << 20) { + dev_err(&pdev->dev, "Missing DCS, aborting.\n"); err = -ENODEV; goto err_disable_pdev; } @@ -1719,132 +1305,42 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) mutex_init(&priv->bf_mutex); dev->rev_id = pdev->revision; - /* Detect if this device is a virtual function */ - if (id && id->driver_data & MLX4_VF) { - /* When acting as pf, we normally skip vfs unless explicitly - * requested to probe them. */ - if (num_vfs && extended_func_num(pdev) > probe_vf) { - mlx4_warn(dev, "Skipping virtual function:%d\n", - extended_func_num(pdev)); - err = -ENODEV; - goto err_free_dev; - } - mlx4_warn(dev, "Detected virtual function - running in slave mode\n"); - dev->flags |= MLX4_FLAG_SLAVE; - } else { - /* We reset the device and enable SRIOV only for physical - * devices. Try to claim ownership on the device; - * if already taken, skip -- do not allow multiple PFs */ - err = mlx4_get_ownership(dev); - if (err) { - if (err < 0) - goto err_free_dev; - else { - mlx4_warn(dev, "Multiple PFs not yet supported." - " Skipping PF.\n"); - err = -EINVAL; - goto err_free_dev; - } - } - if (num_vfs) { - mlx4_warn(dev, "Enabling sriov with:%d vfs\n", num_vfs); - err = pci_enable_sriov(pdev, num_vfs); - if (err) { - mlx4_err(dev, "Failed to enable sriov," - "continuing without sriov enabled" - " (err = %d).\n", err); - num_vfs = 0; - err = 0; - } else { - mlx4_warn(dev, "Running in master mode\n"); - dev->flags |= MLX4_FLAG_SRIOV | - MLX4_FLAG_MASTER; - dev->num_vfs = num_vfs; - } - } - - /* - * Now reset the HCA before we touch the PCI capabilities or - * attempt a firmware command, since a boot ROM may have left - * the HCA in an undefined state. - */ - err = mlx4_reset(dev); - if (err) { - mlx4_err(dev, "Failed to reset HCA, aborting.\n"); - goto err_rel_own; - } + /* + * Now reset the HCA before we touch the PCI capabilities or + * attempt a firmware command, since a boot ROM may have left + * the HCA in an undefined state. + */ + err = mlx4_reset(dev); + if (err) { + mlx4_err(dev, "Failed to reset HCA, aborting.\n"); + goto err_free_dev; } -slave_start: if (mlx4_cmd_init(dev)) { mlx4_err(dev, "Failed to init command interface, aborting.\n"); - goto err_sriov; - } - - /* In slave functions, the communication channel must be initialized - * before posting commands. Also, init num_slaves before calling - * mlx4_init_hca */ - if (mlx4_is_mfunc(dev)) { - if (mlx4_is_master(dev)) - dev->num_slaves = MLX4_MAX_NUM_SLAVES; - else { - dev->num_slaves = 0; - if (mlx4_multi_func_init(dev)) { - mlx4_err(dev, "Failed to init slave mfunc" - " interface, aborting.\n"); - goto err_cmd; - } - } + goto err_free_dev; } err = mlx4_init_hca(dev); - if (err) { - if (err == -EACCES) { - /* Not primary Physical function - * Running in slave mode */ - mlx4_cmd_cleanup(dev); - dev->flags |= MLX4_FLAG_SLAVE; - dev->flags &= ~MLX4_FLAG_MASTER; - goto slave_start; - } else - goto err_mfunc; - } - - /* In master functions, the communication channel must be initialized - * after obtaining its address from fw */ - if (mlx4_is_master(dev)) { - if (mlx4_multi_func_init(dev)) { - mlx4_err(dev, "Failed to init master mfunc" - "interface, aborting.\n"); - goto err_close; - } - } + if (err) + goto err_cmd; err = mlx4_alloc_eq_table(dev); if (err) - goto err_master_mfunc; + goto err_close; priv->msix_ctl.pool_bm = 0; spin_lock_init(&priv->msix_ctl.pool_lock); mlx4_enable_msi_x(dev); - if ((mlx4_is_mfunc(dev)) && - !(dev->flags & MLX4_FLAG_MSI_X)) { - mlx4_err(dev, "INTx is not supported in multi-function mode." - " aborting.\n"); - goto err_free_eq; - } - if (!mlx4_is_slave(dev)) { - err = mlx4_init_steering(dev); - if (err) - goto err_free_eq; - } + err = mlx4_init_steering(dev); + if (err) + goto err_free_eq; err = mlx4_setup_hca(dev); - if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) && - !mlx4_is_mfunc(dev)) { + if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) { dev->flags &= ~MLX4_FLAG_MSI_X; pci_disable_msix(pdev); err = mlx4_setup_hca(dev); @@ -1887,37 +1383,20 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) mlx4_cleanup_uar_table(dev); err_steer: - if (!mlx4_is_slave(dev)) - mlx4_clear_steering(dev); + mlx4_clear_steering(dev); err_free_eq: mlx4_free_eq_table(dev); -err_master_mfunc: - if (mlx4_is_master(dev)) - mlx4_multi_func_cleanup(dev); - err_close: if (dev->flags & MLX4_FLAG_MSI_X) pci_disable_msix(pdev); mlx4_close_hca(dev); -err_mfunc: - if (mlx4_is_slave(dev)) - mlx4_multi_func_cleanup(dev); - err_cmd: mlx4_cmd_cleanup(dev); -err_sriov: - if (num_vfs && (dev->flags & MLX4_FLAG_SRIOV)) - pci_disable_sriov(pdev); - -err_rel_own: - if (!mlx4_is_slave(dev)) - mlx4_free_ownership(dev); - err_free_dev: kfree(priv); @@ -1945,12 +1424,6 @@ static void mlx4_remove_one(struct pci_dev *pdev) int p; if (dev) { - /* in SRIOV it is not allowed to unload the pf's - * driver while there are alive vf's */ - if (mlx4_is_master(dev)) { - if (mlx4_how_many_lives_vf(dev)) - printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n"); - } mlx4_stop_sense(dev); mlx4_unregister_device(dev); @@ -1970,31 +1443,17 @@ static void mlx4_remove_one(struct pci_dev *pdev) mlx4_cleanup_xrcd_table(dev); mlx4_cleanup_pd_table(dev); - if (mlx4_is_master(dev)) - mlx4_free_resource_tracker(dev); - iounmap(priv->kar); mlx4_uar_free(dev, &priv->driver_uar); mlx4_cleanup_uar_table(dev); - if (!mlx4_is_slave(dev)) - mlx4_clear_steering(dev); + mlx4_clear_steering(dev); mlx4_free_eq_table(dev); - if (mlx4_is_master(dev)) - mlx4_multi_func_cleanup(dev); mlx4_close_hca(dev); - if (mlx4_is_slave(dev)) - mlx4_multi_func_cleanup(dev); mlx4_cmd_cleanup(dev); if (dev->flags & MLX4_FLAG_MSI_X) pci_disable_msix(pdev); - if (num_vfs && (dev->flags & MLX4_FLAG_SRIOV)) { - mlx4_warn(dev, "Disabling sriov\n"); - pci_disable_sriov(pdev); - } - if (!mlx4_is_slave(dev)) - mlx4_free_ownership(dev); kfree(priv); pci_release_regions(pdev); pci_disable_device(pdev); @@ -2009,48 +1468,33 @@ int mlx4_restart_one(struct pci_dev *pdev) } static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = { - /* MT25408 "Hermon" SDR */ - { PCI_VDEVICE(MELLANOX, 0x6340), 0 }, - /* MT25408 "Hermon" DDR */ - { PCI_VDEVICE(MELLANOX, 0x634a), 0 }, - /* MT25408 "Hermon" QDR */ - { PCI_VDEVICE(MELLANOX, 0x6354), 0 }, - /* MT25408 "Hermon" DDR PCIe gen2 */ - { PCI_VDEVICE(MELLANOX, 0x6732), 0 }, - /* MT25408 "Hermon" QDR PCIe gen2 */ - { PCI_VDEVICE(MELLANOX, 0x673c), 0 }, - /* MT25408 "Hermon" EN 10GigE */ - { PCI_VDEVICE(MELLANOX, 0x6368), 0 }, - /* MT25408 "Hermon" EN 10GigE PCIe gen2 */ - { PCI_VDEVICE(MELLANOX, 0x6750), 0 }, - /* MT25458 ConnectX EN 10GBASE-T 10GigE */ - { PCI_VDEVICE(MELLANOX, 0x6372), 0 }, - /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */ - { PCI_VDEVICE(MELLANOX, 0x675a), 0 }, - /* MT26468 ConnectX EN 10GigE PCIe gen2*/ - { PCI_VDEVICE(MELLANOX, 0x6764), 0 }, - /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */ - { PCI_VDEVICE(MELLANOX, 0x6746), 0 }, - /* MT26478 ConnectX2 40GigE PCIe gen2 */ - { PCI_VDEVICE(MELLANOX, 0x676e), 0 }, - /* MT25400 Family [ConnectX-2 Virtual Function] */ - { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_VF }, - /* MT27500 Family [ConnectX-3] */ - { PCI_VDEVICE(MELLANOX, 0x1003), 0 }, - /* MT27500 Family [ConnectX-3 Virtual Function] */ - { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_VF }, - { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */ - { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */ - { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */ - { PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */ - { PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */ - { PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */ - { PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */ - { PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */ - { PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */ - { PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */ - { PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */ - { PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */ + { PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */ + { PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */ + { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */ + { PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */ + { PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */ + { PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */ + { PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */ + { PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */ + { PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */ + { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/ + { PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */ + { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */ + { PCI_VDEVICE(MELLANOX, 0x1002) }, /* MT25400 Family [ConnectX-2 Virtual Function] */ + { PCI_VDEVICE(MELLANOX, 0x1003) }, /* MT27500 Family [ConnectX-3] */ + { PCI_VDEVICE(MELLANOX, 0x1004) }, /* MT27500 Family [ConnectX-3 Virtual Function] */ + { PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */ + { PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */ + { PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */ + { PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */ + { PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */ + { PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */ + { PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */ + { PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */ + { PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */ + { PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */ + { PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */ + { PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */ { 0, } }; @@ -2079,12 +1523,6 @@ static int __init mlx4_verify_params(void) return -1; } - /* Check if module param for ports type has legal combination */ - if (port_type_array[0] == false && port_type_array[1] == true) { - printk(KERN_WARNING "Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n"); - port_type_array[0] = true; - } - return 0; } diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/mcg.c b/trunk/drivers/net/ethernet/mellanox/mlx4/mcg.c index 0785d9b2a265..978688c31046 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -44,47 +44,28 @@ static const u8 zero_gid[16]; /* automatically initialized to 0 */ -struct mlx4_mgm { - __be32 next_gid_index; - __be32 members_count; - u32 reserved[2]; - u8 gid[16]; - __be32 qp[MLX4_MAX_QP_PER_MGM]; -}; - -int mlx4_get_mgm_entry_size(struct mlx4_dev *dev) -{ - return min((1 << mlx4_log_num_mgm_entry_size), MLX4_MAX_MGM_ENTRY_SIZE); -} - -int mlx4_get_qp_per_mgm(struct mlx4_dev *dev) -{ - return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2); -} - static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index, struct mlx4_cmd_mailbox *mailbox) { return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG, - MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + MLX4_CMD_TIME_CLASS_A); } static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index, struct mlx4_cmd_mailbox *mailbox) { return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG, - MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + MLX4_CMD_TIME_CLASS_A); } -static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 port, u8 steer, +static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 vep_num, u8 port, u8 steer, struct mlx4_cmd_mailbox *mailbox) { u32 in_mod; - in_mod = (u32) port << 16 | steer << 1; + in_mod = (u32) vep_num << 24 | (u32) port << 16 | steer << 1; return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1, - MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A, - MLX4_CMD_NATIVE); + MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A); } static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, @@ -94,8 +75,7 @@ static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, int err; err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod, - MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A, - MLX4_CMD_NATIVE); + MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A); if (!err) *hash = imm; @@ -122,7 +102,7 @@ static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 pf_num, * Add new entry to steering data structure. * All promisc QPs should be added as well */ -static int new_steering_entry(struct mlx4_dev *dev, u8 port, +static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port, enum mlx4_steer_type steer, unsigned int index, u32 qpn) { @@ -135,8 +115,10 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 port, struct mlx4_promisc_qp *dqp = NULL; u32 prot; int err; + u8 pf_num; - s_steer = &mlx4_priv(dev)->steer[0]; + pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1); + s_steer = &mlx4_priv(dev)->steer[pf_num]; new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL); if (!new_entry) return -ENOMEM; @@ -148,7 +130,7 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 port, /* If the given qpn is also a promisc qp, * it should be inserted to duplicates list */ - pqp = get_promisc_qp(dev, 0, steer, qpn); + pqp = get_promisc_qp(dev, pf_num, steer, qpn); if (pqp) { dqp = kmalloc(sizeof *dqp, GFP_KERNEL); if (!dqp) { @@ -183,7 +165,7 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 port, /* don't add already existing qpn */ if (pqp->qpn == qpn) continue; - if (members_count == dev->caps.num_qp_per_mgm) { + if (members_count == MLX4_QP_PER_MGM) { /* out of space */ err = -ENOMEM; goto out_mailbox; @@ -211,7 +193,7 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 port, } /* update the data structures with existing steering entry */ -static int existing_steering_entry(struct mlx4_dev *dev, u8 port, +static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port, enum mlx4_steer_type steer, unsigned int index, u32 qpn) { @@ -219,10 +201,12 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 port, struct mlx4_steer_index *tmp_entry, *entry = NULL; struct mlx4_promisc_qp *pqp; struct mlx4_promisc_qp *dqp; + u8 pf_num; - s_steer = &mlx4_priv(dev)->steer[0]; + pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1); + s_steer = &mlx4_priv(dev)->steer[pf_num]; - pqp = get_promisc_qp(dev, 0, steer, qpn); + pqp = get_promisc_qp(dev, pf_num, steer, qpn); if (!pqp) return 0; /* nothing to do */ @@ -241,7 +225,7 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 port, * we need to add it as a duplicate to this entry * for future references */ list_for_each_entry(dqp, &entry->duplicates, list) { - if (qpn == pqp->qpn) + if (qpn == dqp->qpn) return 0; /* qp is already duplicated */ } @@ -257,18 +241,20 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 port, /* Check whether a qpn is a duplicate on steering entry * If so, it should not be removed from mgm */ -static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port, +static bool check_duplicate_entry(struct mlx4_dev *dev, u8 vep_num, u8 port, enum mlx4_steer_type steer, unsigned int index, u32 qpn) { struct mlx4_steer *s_steer; struct mlx4_steer_index *tmp_entry, *entry = NULL; struct mlx4_promisc_qp *dqp, *tmp_dqp; + u8 pf_num; - s_steer = &mlx4_priv(dev)->steer[0]; + pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1); + s_steer = &mlx4_priv(dev)->steer[pf_num]; /* if qp is not promisc, it cannot be duplicated */ - if (!get_promisc_qp(dev, 0, steer, qpn)) + if (!get_promisc_qp(dev, pf_num, steer, qpn)) return false; /* The qp is promisc qp so it is a duplicate on this index @@ -293,7 +279,7 @@ static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port, } /* I a steering entry contains only promisc QPs, it can be removed. */ -static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port, +static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port, enum mlx4_steer_type steer, unsigned int index, u32 tqpn) { @@ -305,8 +291,10 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port, u32 members_count; bool ret = false; int i; + u8 pf_num; - s_steer = &mlx4_priv(dev)->steer[0]; + pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1); + s_steer = &mlx4_priv(dev)->steer[pf_num]; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) @@ -318,7 +306,7 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port, members_count = be32_to_cpu(mgm->members_count) & 0xffffff; for (i = 0; i < members_count; i++) { qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK; - if (!get_promisc_qp(dev, 0, steer, qpn) && qpn != tqpn) { + if (!get_promisc_qp(dev, pf_num, steer, qpn) && qpn != tqpn) { /* the qp is not promisc, the entry can't be removed */ goto out; } @@ -344,7 +332,7 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port, return ret; } -static int add_promisc_qp(struct mlx4_dev *dev, u8 port, +static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port, enum mlx4_steer_type steer, u32 qpn) { struct mlx4_steer *s_steer; @@ -359,13 +347,14 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 port, bool found; int last_index; int err; + u8 pf_num; struct mlx4_priv *priv = mlx4_priv(dev); - - s_steer = &mlx4_priv(dev)->steer[0]; + pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1); + s_steer = &mlx4_priv(dev)->steer[pf_num]; mutex_lock(&priv->mcg_table.mutex); - if (get_promisc_qp(dev, 0, steer, qpn)) { + if (get_promisc_qp(dev, pf_num, steer, qpn)) { err = 0; /* Noting to do, already exists */ goto out_mutex; } @@ -408,7 +397,7 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 port, } if (!found) { /* Need to add the qpn to mgm */ - if (members_count == dev->caps.num_qp_per_mgm) { + if (members_count == MLX4_QP_PER_MGM) { /* entry is full */ err = -ENOMEM; goto out_mailbox; @@ -431,7 +420,7 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 port, mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); - err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox); + err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox); if (err) goto out_list; @@ -450,7 +439,7 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 port, return err; } -static int remove_promisc_qp(struct mlx4_dev *dev, u8 port, +static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port, enum mlx4_steer_type steer, u32 qpn) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -465,11 +454,13 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port, bool back_to_list = false; int loc, i; int err; + u8 pf_num; - s_steer = &mlx4_priv(dev)->steer[0]; + pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1); + s_steer = &mlx4_priv(dev)->steer[pf_num]; mutex_lock(&priv->mcg_table.mutex); - pqp = get_promisc_qp(dev, 0, steer, qpn); + pqp = get_promisc_qp(dev, pf_num, steer, qpn); if (unlikely(!pqp)) { mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn); /* nothing to do */ @@ -488,13 +479,12 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port, goto out_list; } mgm = mailbox->buf; - memset(mgm, 0, sizeof *mgm); members_count = 0; list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); - err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox); + err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox); if (err) goto out_mailbox; @@ -659,13 +649,12 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], } index += dev->caps.num_mgms; - new_entry = 1; memset(mgm, 0, sizeof *mgm); memcpy(mgm->gid, gid, 16); } members_count = be32_to_cpu(mgm->members_count) & 0xffffff; - if (members_count == dev->caps.num_qp_per_mgm) { + if (members_count == MLX4_QP_PER_MGM) { mlx4_err(dev, "MGM at index %x is full.\n", index); err = -ENOMEM; goto out; @@ -707,9 +696,9 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], if (prot == MLX4_PROT_ETH) { /* manage the steering entry for promisc mode */ if (new_entry) - new_steering_entry(dev, port, steer, index, qp->qpn); + new_steering_entry(dev, 0, port, steer, index, qp->qpn); else - existing_steering_entry(dev, port, steer, + existing_steering_entry(dev, 0, port, steer, index, qp->qpn); } if (err && link && index != -1) { @@ -760,7 +749,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], /* if this pq is also a promisc qp, it shouldn't be removed */ if (prot == MLX4_PROT_ETH && - check_duplicate_entry(dev, port, steer, index, qp->qpn)) + check_duplicate_entry(dev, 0, port, steer, index, qp->qpn)) goto out; members_count = be32_to_cpu(mgm->members_count) & 0xffffff; @@ -780,8 +769,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], mgm->qp[i - 1] = 0; if (prot == MLX4_PROT_ETH) - removed_entry = can_remove_steering_entry(dev, port, steer, - index, qp->qpn); + removed_entry = can_remove_steering_entry(dev, 0, port, steer, index, qp->qpn); if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) { err = mlx4_WRITE_ENTRY(dev, index, mailbox); goto out; @@ -840,34 +828,6 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], return err; } -static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp, - u8 gid[16], u8 attach, u8 block_loopback, - enum mlx4_protocol prot) -{ - struct mlx4_cmd_mailbox *mailbox; - int err = 0; - int qpn; - - if (!mlx4_is_mfunc(dev)) - return -EBADF; - - mailbox = mlx4_alloc_cmd_mailbox(dev); - if (IS_ERR(mailbox)) - return PTR_ERR(mailbox); - - memcpy(mailbox->buf, gid, 16); - qpn = qp->qpn; - qpn |= (prot << 28); - if (attach && block_loopback) - qpn |= (1 << 31); - - err = mlx4_cmd(dev, mailbox->dma, qpn, attach, - MLX4_CMD_QP_ATTACH, MLX4_CMD_TIME_CLASS_A, - MLX4_CMD_WRAPPED); - - mlx4_free_cmd_mailbox(dev, mailbox); - return err; -} int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], int block_mcast_loopback, enum mlx4_protocol prot) @@ -883,12 +843,9 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], if (prot == MLX4_PROT_ETH) gid[7] |= (steer << 1); - if (mlx4_is_mfunc(dev)) - return mlx4_QP_ATTACH(dev, qp, gid, 1, - block_mcast_loopback, prot); - - return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback, - prot, steer); + return mlx4_qp_attach_common(dev, qp, gid, + block_mcast_loopback, prot, + steer); } EXPORT_SYMBOL_GPL(mlx4_multicast_attach); @@ -903,90 +860,22 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)) return 0; - if (prot == MLX4_PROT_ETH) + if (prot == MLX4_PROT_ETH) { gid[7] |= (steer << 1); - - if (mlx4_is_mfunc(dev)) - return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot); + } return mlx4_qp_detach_common(dev, qp, gid, prot, steer); } EXPORT_SYMBOL_GPL(mlx4_multicast_detach); -int mlx4_unicast_attach(struct mlx4_dev *dev, - struct mlx4_qp *qp, u8 gid[16], - int block_mcast_loopback, enum mlx4_protocol prot) -{ - if (prot == MLX4_PROT_ETH && - !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) - return 0; - - if (prot == MLX4_PROT_ETH) - gid[7] |= (MLX4_UC_STEER << 1); - - if (mlx4_is_mfunc(dev)) - return mlx4_QP_ATTACH(dev, qp, gid, 1, - block_mcast_loopback, prot); - - return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback, - prot, MLX4_UC_STEER); -} -EXPORT_SYMBOL_GPL(mlx4_unicast_attach); - -int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, - u8 gid[16], enum mlx4_protocol prot) -{ - if (prot == MLX4_PROT_ETH && - !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) - return 0; - - if (prot == MLX4_PROT_ETH) - gid[7] |= (MLX4_UC_STEER << 1); - - if (mlx4_is_mfunc(dev)) - return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot); - - return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER); -} -EXPORT_SYMBOL_GPL(mlx4_unicast_detach); - -int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd) -{ - u32 qpn = (u32) vhcr->in_param & 0xffffffff; - u8 port = vhcr->in_param >> 62; - enum mlx4_steer_type steer = vhcr->in_modifier; - - /* Promiscuous unicast is not allowed in mfunc */ - if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER) - return 0; - - if (vhcr->op_modifier) - return add_promisc_qp(dev, port, steer, qpn); - else - return remove_promisc_qp(dev, port, steer, qpn); -} - -static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn, - enum mlx4_steer_type steer, u8 add, u8 port) -{ - return mlx4_cmd(dev, (u64) qpn | (u64) port << 62, (u32) steer, add, - MLX4_CMD_PROMISC, MLX4_CMD_TIME_CLASS_A, - MLX4_CMD_WRAPPED); -} int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) { if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)) return 0; - if (mlx4_is_mfunc(dev)) - return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port); - return add_promisc_qp(dev, port, MLX4_MC_STEER, qpn); + return add_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn); } EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add); @@ -995,10 +884,8 @@ int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)) return 0; - if (mlx4_is_mfunc(dev)) - return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port); - return remove_promisc_qp(dev, port, MLX4_MC_STEER, qpn); + return remove_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn); } EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove); @@ -1007,10 +894,8 @@ int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)) return 0; - if (mlx4_is_mfunc(dev)) - return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port); - return add_promisc_qp(dev, port, MLX4_UC_STEER, qpn); + return add_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn); } EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add); @@ -1019,10 +904,7 @@ int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)) return 0; - if (mlx4_is_mfunc(dev)) - return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port); - - return remove_promisc_qp(dev, port, MLX4_UC_STEER, qpn); + return remove_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn); } EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove); diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/trunk/drivers/net/ethernet/mellanox/mlx4/mlx4.h index a80121a2b519..5dfa68ffc11c 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -46,25 +46,21 @@ #include #include #include -#include #define DRV_NAME "mlx4_core" -#define PFX DRV_NAME ": " -#define DRV_VERSION "1.1" -#define DRV_RELDATE "Dec, 2011" +#define DRV_VERSION "1.0" +#define DRV_RELDATE "July 14, 2011" enum { MLX4_HCR_BASE = 0x80680, MLX4_HCR_SIZE = 0x0001c, - MLX4_CLR_INT_SIZE = 0x00008, - MLX4_SLAVE_COMM_BASE = 0x0, - MLX4_COMM_PAGESIZE = 0x1000 + MLX4_CLR_INT_SIZE = 0x00008 }; enum { - MLX4_MAX_MGM_ENTRY_SIZE = 0x1000, - MLX4_MAX_QP_PER_MGM = 4 * (MLX4_MAX_MGM_ENTRY_SIZE / 16 - 2), - MLX4_MTT_ENTRY_PER_SEG = 8, + MLX4_MGM_ENTRY_SIZE = 0x100, + MLX4_QP_PER_MGM = 4 * (MLX4_MGM_ENTRY_SIZE / 16 - 2), + MLX4_MTT_ENTRY_PER_SEG = 8 }; enum { @@ -84,94 +80,6 @@ enum { MLX4_NUM_CMPTS = MLX4_CMPT_NUM_TYPE << MLX4_CMPT_SHIFT }; -enum mlx4_mr_state { - MLX4_MR_DISABLED = 0, - MLX4_MR_EN_HW, - MLX4_MR_EN_SW -}; - -#define MLX4_COMM_TIME 10000 -enum { - MLX4_COMM_CMD_RESET, - MLX4_COMM_CMD_VHCR0, - MLX4_COMM_CMD_VHCR1, - MLX4_COMM_CMD_VHCR2, - MLX4_COMM_CMD_VHCR_EN, - MLX4_COMM_CMD_VHCR_POST, - MLX4_COMM_CMD_FLR = 254 -}; - -/*The flag indicates that the slave should delay the RESET cmd*/ -#define MLX4_DELAY_RESET_SLAVE 0xbbbbbbb -/*indicates how many retries will be done if we are in the middle of FLR*/ -#define NUM_OF_RESET_RETRIES 10 -#define SLEEP_TIME_IN_RESET (2 * 1000) -enum mlx4_resource { - RES_QP, - RES_CQ, - RES_SRQ, - RES_XRCD, - RES_MPT, - RES_MTT, - RES_MAC, - RES_VLAN, - RES_EQ, - RES_COUNTER, - MLX4_NUM_OF_RESOURCE_TYPE -}; - -enum mlx4_alloc_mode { - RES_OP_RESERVE, - RES_OP_RESERVE_AND_MAP, - RES_OP_MAP_ICM, -}; - - -/* - *Virtual HCR structures. - * mlx4_vhcr is the sw representation, in machine endianess - * - * mlx4_vhcr_cmd is the formalized structure, the one that is passed - * to FW to go through communication channel. - * It is big endian, and has the same structure as the physical HCR - * used by command interface - */ -struct mlx4_vhcr { - u64 in_param; - u64 out_param; - u32 in_modifier; - u32 errno; - u16 op; - u16 token; - u8 op_modifier; - u8 e_bit; -}; - -struct mlx4_vhcr_cmd { - __be64 in_param; - __be32 in_modifier; - __be64 out_param; - __be16 token; - u16 reserved; - u8 status; - u8 flags; - __be16 opcode; -}; - -struct mlx4_cmd_info { - u16 opcode; - bool has_inbox; - bool has_outbox; - bool out_is_imm; - bool encode_slave_id; - int (*verify)(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox); - int (*wrapper)(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd); -}; - #ifdef CONFIG_MLX4_DEBUG extern int mlx4_debug_level; #else /* CONFIG_MLX4_DEBUG */ @@ -191,12 +99,6 @@ do { \ #define mlx4_warn(mdev, format, arg...) \ dev_warn(&mdev->pdev->dev, format, ##arg) -extern int mlx4_log_num_mgm_entry_size; -extern int log_mtts_per_seg; - -#define MLX4_MAX_NUM_SLAVES (MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF) -#define ALL_SLAVES 0xff - struct mlx4_bitmap { u32 last; u32 top; @@ -228,147 +130,6 @@ struct mlx4_icm_table { struct mlx4_icm **icm; }; -/* - * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits. - */ -struct mlx4_mpt_entry { - __be32 flags; - __be32 qpn; - __be32 key; - __be32 pd_flags; - __be64 start; - __be64 length; - __be32 lkey; - __be32 win_cnt; - u8 reserved1[3]; - u8 mtt_rep; - __be64 mtt_addr; - __be32 mtt_sz; - __be32 entity_size; - __be32 first_byte_offset; -} __packed; - -/* - * Must be packed because start is 64 bits but only aligned to 32 bits. - */ -struct mlx4_eq_context { - __be32 flags; - u16 reserved1[3]; - __be16 page_offset; - u8 log_eq_size; - u8 reserved2[4]; - u8 eq_period; - u8 reserved3; - u8 eq_max_count; - u8 reserved4[3]; - u8 intr; - u8 log_page_size; - u8 reserved5[2]; - u8 mtt_base_addr_h; - __be32 mtt_base_addr_l; - u32 reserved6[2]; - __be32 consumer_index; - __be32 producer_index; - u32 reserved7[4]; -}; - -struct mlx4_cq_context { - __be32 flags; - u16 reserved1[3]; - __be16 page_offset; - __be32 logsize_usrpage; - __be16 cq_period; - __be16 cq_max_count; - u8 reserved2[3]; - u8 comp_eqn; - u8 log_page_size; - u8 reserved3[2]; - u8 mtt_base_addr_h; - __be32 mtt_base_addr_l; - __be32 last_notified_index; - __be32 solicit_producer_index; - __be32 consumer_index; - __be32 producer_index; - u32 reserved4[2]; - __be64 db_rec_addr; -}; - -struct mlx4_srq_context { - __be32 state_logsize_srqn; - u8 logstride; - u8 reserved1; - __be16 xrcd; - __be32 pg_offset_cqn; - u32 reserved2; - u8 log_page_size; - u8 reserved3[2]; - u8 mtt_base_addr_h; - __be32 mtt_base_addr_l; - __be32 pd; - __be16 limit_watermark; - __be16 wqe_cnt; - u16 reserved4; - __be16 wqe_counter; - u32 reserved5; - __be64 db_rec_addr; -}; - -struct mlx4_eqe { - u8 reserved1; - u8 type; - u8 reserved2; - u8 subtype; - union { - u32 raw[6]; - struct { - __be32 cqn; - } __packed comp; - struct { - u16 reserved1; - __be16 token; - u32 reserved2; - u8 reserved3[3]; - u8 status; - __be64 out_param; - } __packed cmd; - struct { - __be32 qpn; - } __packed qp; - struct { - __be32 srqn; - } __packed srq; - struct { - __be32 cqn; - u32 reserved1; - u8 reserved2[3]; - u8 syndrome; - } __packed cq_err; - struct { - u32 reserved1[2]; - __be32 port; - } __packed port_change; - struct { - #define COMM_CHANNEL_BIT_ARRAY_SIZE 4 - u32 reserved; - u32 bit_vec[COMM_CHANNEL_BIT_ARRAY_SIZE]; - } __packed comm_channel_arm; - struct { - u8 port; - u8 reserved[3]; - __be64 mac; - } __packed mac_update; - struct { - u8 port; - } __packed sw_event; - struct { - __be32 slave_id; - } __packed flr_event; - } event; - u8 slave_id; - u8 reserved3[2]; - u8 owner; -} __packed; - struct mlx4_eq { struct mlx4_dev *dev; void __iomem *doorbell; @@ -381,18 +142,6 @@ struct mlx4_eq { struct mlx4_mtt mtt; }; -struct mlx4_slave_eqe { - u8 type; - u8 port; - u32 param; -}; - -struct mlx4_slave_event_eq_info { - u32 eqn; - u16 token; - u64 event_type; -}; - struct mlx4_profile { int num_qp; int rdmarc_per_qp; @@ -406,37 +155,16 @@ struct mlx4_profile { struct mlx4_fw { u64 clr_int_base; u64 catas_offset; - u64 comm_base; struct mlx4_icm *fw_icm; struct mlx4_icm *aux_icm; u32 catas_size; u16 fw_pages; u8 clr_int_bar; u8 catas_bar; - u8 comm_bar; }; -struct mlx4_comm { - u32 slave_write; - u32 slave_read; -}; - -enum { - MLX4_MCAST_CONFIG = 0, - MLX4_MCAST_DISABLE = 1, - MLX4_MCAST_ENABLE = 2, -}; - -#define VLAN_FLTR_SIZE 128 - -struct mlx4_vlan_fltr { - __be32 entry[VLAN_FLTR_SIZE]; -}; - -struct mlx4_mcast_entry { - struct list_head list; - u64 addr; -}; +#define MGM_QPN_MASK 0x00FFFFFF +#define MGM_BLCK_LB_BIT 30 struct mlx4_promisc_qp { struct list_head list; @@ -449,87 +177,19 @@ struct mlx4_steer_index { struct list_head duplicates; }; -struct mlx4_slave_state { - u8 comm_toggle; - u8 last_cmd; - u8 init_port_mask; - bool active; - u8 function; - dma_addr_t vhcr_dma; - u16 mtu[MLX4_MAX_PORTS + 1]; - __be32 ib_cap_mask[MLX4_MAX_PORTS + 1]; - struct mlx4_slave_eqe eq[MLX4_MFUNC_MAX_EQES]; - struct list_head mcast_filters[MLX4_MAX_PORTS + 1]; - struct mlx4_vlan_fltr *vlan_filter[MLX4_MAX_PORTS + 1]; - struct mlx4_slave_event_eq_info event_eq; - u16 eq_pi; - u16 eq_ci; - spinlock_t lock; - /*initialized via the kzalloc*/ - u8 is_slave_going_down; - u32 cookie; -}; - -struct slave_list { - struct mutex mutex; - struct list_head res_list[MLX4_NUM_OF_RESOURCE_TYPE]; -}; - -struct mlx4_resource_tracker { - spinlock_t lock; - /* tree for each resources */ - struct radix_tree_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE]; - /* num_of_slave's lists, one per slave */ - struct slave_list *slave_list; -}; - -#define SLAVE_EVENT_EQ_SIZE 128 -struct mlx4_slave_event_eq { - u32 eqn; - u32 cons; - u32 prod; - struct mlx4_eqe event_eqe[SLAVE_EVENT_EQ_SIZE]; -}; - -struct mlx4_master_qp0_state { - int proxy_qp0_active; - int qp0_active; - int port_active; -}; - -struct mlx4_mfunc_master_ctx { - struct mlx4_slave_state *slave_state; - struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1]; - int init_port_ref[MLX4_MAX_PORTS + 1]; - u16 max_mtu[MLX4_MAX_PORTS + 1]; - int disable_mcast_ref[MLX4_MAX_PORTS + 1]; - struct mlx4_resource_tracker res_tracker; - struct workqueue_struct *comm_wq; - struct work_struct comm_work; - struct work_struct slave_event_work; - struct work_struct slave_flr_event_work; - spinlock_t slave_state_lock; - __be32 comm_arm_bit_vector[4]; - struct mlx4_eqe cmd_eqe; - struct mlx4_slave_event_eq slave_eq; - struct mutex gen_eqe_mutex[MLX4_MFUNC_MAX]; -}; - -struct mlx4_mfunc { - struct mlx4_comm __iomem *comm; - struct mlx4_vhcr_cmd *vhcr; - dma_addr_t vhcr_dma; - - struct mlx4_mfunc_master_ctx master; +struct mlx4_mgm { + __be32 next_gid_index; + __be32 members_count; + u32 reserved[2]; + u8 gid[16]; + __be32 qp[MLX4_QP_PER_MGM]; }; - struct mlx4_cmd { struct pci_pool *pool; void __iomem *hcr; struct mutex hcr_mutex; struct semaphore poll_sem; struct semaphore event_sem; - struct semaphore slave_sem; int max_cmds; spinlock_t context_lock; int free_head; @@ -537,7 +197,6 @@ struct mlx4_cmd { u16 token_mask; u8 use_events; u8 toggle; - u8 comm_toggle; }; struct mlx4_uar_table { @@ -628,48 +287,6 @@ struct mlx4_vlan_table { int max; }; -#define SET_PORT_GEN_ALL_VALID 0x7 -#define SET_PORT_PROMISC_SHIFT 31 -#define SET_PORT_MC_PROMISC_SHIFT 30 - -enum { - MCAST_DIRECT_ONLY = 0, - MCAST_DIRECT = 1, - MCAST_DEFAULT = 2 -}; - - -struct mlx4_set_port_general_context { - u8 reserved[3]; - u8 flags; - u16 reserved2; - __be16 mtu; - u8 pptx; - u8 pfctx; - u16 reserved3; - u8 pprx; - u8 pfcrx; - u16 reserved4; -}; - -struct mlx4_set_port_rqp_calc_context { - __be32 base_qpn; - u8 rererved; - u8 n_mac; - u8 n_vlan; - u8 n_prio; - u8 reserved2[3]; - u8 mac_miss; - u8 intra_no_vlan; - u8 no_vlan; - u8 intra_vlan_miss; - u8 vlan_miss; - u8 reserved3[3]; - u8 no_vlan_prio; - __be32 promisc; - __be32 mcast; -}; - struct mlx4_mac_entry { u64 mac; }; @@ -716,7 +333,6 @@ struct mlx4_priv { struct mlx4_fw fw; struct mlx4_cmd cmd; - struct mlx4_mfunc mfunc; struct mlx4_bitmap pd_bitmap; struct mlx4_bitmap xrcd_bitmap; @@ -743,7 +359,6 @@ struct mlx4_priv { struct list_head bf_list; struct mutex bf_mutex; struct io_mapping *bf_mapping; - int reserved_mtts; }; static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev) @@ -788,62 +403,6 @@ void mlx4_cleanup_cq_table(struct mlx4_dev *dev); void mlx4_cleanup_qp_table(struct mlx4_dev *dev); void mlx4_cleanup_srq_table(struct mlx4_dev *dev); void mlx4_cleanup_mcg_table(struct mlx4_dev *dev); -int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn); -void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn); -int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn); -void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn); -int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn); -void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn); -int __mlx4_mr_reserve(struct mlx4_dev *dev); -void __mlx4_mr_release(struct mlx4_dev *dev, u32 index); -int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index); -void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index); -u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order); -void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order); - -int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd); -int mlx4_SYNC_TPT_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd); -int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd); -int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd); -int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd); -int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd); -int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd); -int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, - int *base); -void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt); -int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac); -void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac); -int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac); -int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, - int start_index, int npages, u64 *page_list); void mlx4_start_catas_poll(struct mlx4_dev *dev); void mlx4_stop_catas_poll(struct mlx4_dev *dev); @@ -860,113 +419,13 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, struct mlx4_profile *request, struct mlx4_dev_cap *dev_cap, struct mlx4_init_hca_param *init_hca); -void mlx4_master_comm_channel(struct work_struct *work); -void mlx4_gen_slave_eqe(struct work_struct *work); -void mlx4_master_handle_slave_flr(struct work_struct *work); - -int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd); -int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd); -int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd); -int mlx4_COMM_INT_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd); -int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd); -int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd); -int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd); -int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd); -int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd); -int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd); -int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd); -int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd); -int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd); -int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd); -int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd); -int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd); -int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd); -int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd); - -int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe); int mlx4_cmd_init(struct mlx4_dev *dev); void mlx4_cmd_cleanup(struct mlx4_dev *dev); -int mlx4_multi_func_init(struct mlx4_dev *dev); -void mlx4_multi_func_cleanup(struct mlx4_dev *dev); void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param); int mlx4_cmd_use_events(struct mlx4_dev *dev); void mlx4_cmd_use_polling(struct mlx4_dev *dev); -int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param, - unsigned long timeout); - void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn); void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type); @@ -993,113 +452,12 @@ void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table); void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table); int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port); -/* resource tracker functions*/ -int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev, - enum mlx4_resource resource_type, - int resource_id, int *slave); -void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id); -int mlx4_init_resource_tracker(struct mlx4_dev *dev); - -void mlx4_free_resource_tracker(struct mlx4_dev *dev); - -int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd); -int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd); -int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd); -int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd); int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps); int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port); - -int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd); - -int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd); int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], enum mlx4_protocol prot, enum mlx4_steer_type steer); int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], int block_mcast_loopback, enum mlx4_protocol prot, enum mlx4_steer_type steer); -int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd); -int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd); -int mlx4_common_set_vlan_fltr(struct mlx4_dev *dev, int function, - int port, void *buf); -int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave, u32 in_mod, - struct mlx4_cmd_mailbox *outbox); -int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd); -int mlx4_PKEY_TABLE_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd); -int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd); - -int mlx4_get_mgm_entry_size(struct mlx4_dev *dev); -int mlx4_get_qp_per_mgm(struct mlx4_dev *dev); - -static inline void set_param_l(u64 *arg, u32 val) -{ - *((u32 *)arg) = val; -} - -static inline void set_param_h(u64 *arg, u32 val) -{ - *arg = (*arg & 0xffffffff) | ((u64) val << 32); -} - -static inline u32 get_param_l(u64 *arg) -{ - return (u32) (*arg & 0xffffffff); -} - -static inline u32 get_param_h(u64 *arg) -{ - return (u32)(*arg >> 32); -} - -static inline spinlock_t *mlx4_tlock(struct mlx4_dev *dev) -{ - return &mlx4_priv(dev)->mfunc.master.res_tracker.lock; -} - -#define NOT_MASKED_PD_BITS 17 - #endif /* MLX4_H */ diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/trunk/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index f2a8e65f5f88..207b5add3ca8 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -51,8 +51,8 @@ #include "en_port.h" #define DRV_NAME "mlx4_en" -#define DRV_VERSION "2.0" -#define DRV_RELDATE "Dec 2011" +#define DRV_VERSION "1.5.4.2" +#define DRV_RELDATE "October 2011" #define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN) @@ -366,6 +366,16 @@ struct mlx4_en_rss_map { enum mlx4_qp_state indir_state; }; +struct mlx4_en_rss_context { + __be32 base_qpn; + __be32 default_qpn; + u16 reserved; + u8 hash_fn; + u8 flags; + __be32 rss_key[10]; + __be32 base_qpn_udp; +}; + struct mlx4_en_port_state { int link_state; int link_speed; @@ -453,7 +463,6 @@ struct mlx4_en_priv { int base_qpn; struct mlx4_en_rss_map rss_map; - u32 ctrl_flags; u32 flags; #define MLX4_EN_FLAG_PROMISC 0x1 #define MLX4_EN_FLAG_MC_PROMISC 0x2 @@ -486,9 +495,9 @@ struct mlx4_en_priv { enum mlx4_en_wol { MLX4_EN_WOL_MAGIC = (1ULL << 61), MLX4_EN_WOL_ENABLED = (1ULL << 62), + MLX4_EN_WOL_DO_MODIFY = (1ULL << 63), }; -#define MLX4_EN_WOL_DO_MODIFY (1ULL << 63) void mlx4_en_destroy_netdev(struct net_device *dev); int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/mr.c b/trunk/drivers/net/ethernet/mellanox/mlx4/mr.c index 01df5567e16e..efa3e77355e4 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -32,17 +32,35 @@ * SOFTWARE. */ -#include #include #include #include -#include #include #include "mlx4.h" #include "icm.h" +/* + * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits. + */ +struct mlx4_mpt_entry { + __be32 flags; + __be32 qpn; + __be32 key; + __be32 pd_flags; + __be64 start; + __be64 length; + __be32 lkey; + __be32 win_cnt; + u8 reserved1[3]; + u8 mtt_rep; + __be64 mtt_seg; + __be32 mtt_sz; + __be32 entity_size; + __be32 first_byte_offset; +} __packed; + #define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28) #define MLX4_MPT_FLAG_FREE (0x3UL << 28) #define MLX4_MPT_FLAG_MIO (1 << 17) @@ -162,48 +180,22 @@ static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy) kfree(buddy->num_free); } -u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) +static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) { struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; u32 seg; - int seg_order; - u32 offset; - seg_order = max_t(int, order - log_mtts_per_seg, 0); - - seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, seg_order); + seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, order); if (seg == -1) return -1; - offset = seg * (1 << log_mtts_per_seg); - - if (mlx4_table_get_range(dev, &mr_table->mtt_table, offset, - offset + (1 << order) - 1)) { - mlx4_buddy_free(&mr_table->mtt_buddy, seg, seg_order); + if (mlx4_table_get_range(dev, &mr_table->mtt_table, seg, + seg + (1 << order) - 1)) { + mlx4_buddy_free(&mr_table->mtt_buddy, seg, order); return -1; } - return offset; -} - -static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) -{ - u64 in_param; - u64 out_param; - int err; - - if (mlx4_is_mfunc(dev)) { - set_param_l(&in_param, order); - err = mlx4_cmd_imm(dev, in_param, &out_param, RES_MTT, - RES_OP_RESERVE_AND_MAP, - MLX4_CMD_ALLOC_RES, - MLX4_CMD_TIME_CLASS_A, - MLX4_CMD_WRAPPED); - if (err) - return -1; - return get_param_l(&out_param); - } - return __mlx4_alloc_mtt_range(dev, order); + return seg; } int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, @@ -218,63 +210,33 @@ int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, } else mtt->page_shift = page_shift; - for (mtt->order = 0, i = 1; i < npages; i <<= 1) + for (mtt->order = 0, i = dev->caps.mtts_per_seg; i < npages; i <<= 1) ++mtt->order; - mtt->offset = mlx4_alloc_mtt_range(dev, mtt->order); - if (mtt->offset == -1) + mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order); + if (mtt->first_seg == -1) return -ENOMEM; return 0; } EXPORT_SYMBOL_GPL(mlx4_mtt_init); -void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) +void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt) { - u32 first_seg; - int seg_order; struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; - seg_order = max_t(int, order - log_mtts_per_seg, 0); - first_seg = offset / (1 << log_mtts_per_seg); - - mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, seg_order); - mlx4_table_put_range(dev, &mr_table->mtt_table, offset, - offset + (1 << order) - 1); -} - -static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) -{ - u64 in_param; - int err; - - if (mlx4_is_mfunc(dev)) { - set_param_l(&in_param, offset); - set_param_h(&in_param, order); - err = mlx4_cmd(dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP, - MLX4_CMD_FREE_RES, - MLX4_CMD_TIME_CLASS_A, - MLX4_CMD_WRAPPED); - if (err) - mlx4_warn(dev, "Failed to free mtt range at:" - "%d order:%d\n", offset, order); - return; - } - __mlx4_free_mtt_range(dev, offset, order); -} - -void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt) -{ if (mtt->order < 0) return; - mlx4_free_mtt_range(dev, mtt->offset, mtt->order); + mlx4_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order); + mlx4_table_put_range(dev, &mr_table->mtt_table, mtt->first_seg, + mtt->first_seg + (1 << mtt->order) - 1); } EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup); u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt) { - return (u64) mtt->offset * dev->caps.mtt_entry_sz; + return (u64) mtt->first_seg * dev->caps.mtt_entry_sz; } EXPORT_SYMBOL_GPL(mlx4_mtt_addr); @@ -291,205 +253,69 @@ static u32 key_to_hw_index(u32 key) static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, int mpt_index) { - return mlx4_cmd(dev, mailbox->dma | dev->caps.function , mpt_index, - 0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B, - MLX4_CMD_WRAPPED); + return mlx4_cmd(dev, mailbox->dma, mpt_index, 0, MLX4_CMD_SW2HW_MPT, + MLX4_CMD_TIME_CLASS_B); } static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, int mpt_index) { return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index, - !mailbox, MLX4_CMD_HW2SW_MPT, - MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); + !mailbox, MLX4_CMD_HW2SW_MPT, MLX4_CMD_TIME_CLASS_B); } -static int mlx4_mr_reserve_range(struct mlx4_dev *dev, int cnt, int align, - u32 *base_mridx) +int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, + int npages, int page_shift, struct mlx4_mr *mr) { struct mlx4_priv *priv = mlx4_priv(dev); - u32 mridx; + u32 index; + int err; - mridx = mlx4_bitmap_alloc_range(&priv->mr_table.mpt_bitmap, cnt, align); - if (mridx == -1) + index = mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap); + if (index == -1) return -ENOMEM; - *base_mridx = mridx; - return 0; - -} -EXPORT_SYMBOL_GPL(mlx4_mr_reserve_range); - -static void mlx4_mr_release_range(struct mlx4_dev *dev, u32 base_mridx, int cnt) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - mlx4_bitmap_free_range(&priv->mr_table.mpt_bitmap, base_mridx, cnt); -} -EXPORT_SYMBOL_GPL(mlx4_mr_release_range); - -static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd, - u64 iova, u64 size, u32 access, int npages, - int page_shift, struct mlx4_mr *mr) -{ mr->iova = iova; mr->size = size; mr->pd = pd; mr->access = access; - mr->enabled = MLX4_MR_DISABLED; - mr->key = hw_index_to_key(mridx); - - return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); -} -EXPORT_SYMBOL_GPL(mlx4_mr_alloc_reserved); - -static int mlx4_WRITE_MTT(struct mlx4_dev *dev, - struct mlx4_cmd_mailbox *mailbox, - int num_entries) -{ - return mlx4_cmd(dev, mailbox->dma, num_entries, 0, MLX4_CMD_WRITE_MTT, - MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); -} + mr->enabled = 0; + mr->key = hw_index_to_key(index); -int __mlx4_mr_reserve(struct mlx4_dev *dev) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - - return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap); -} - -static int mlx4_mr_reserve(struct mlx4_dev *dev) -{ - u64 out_param; - - if (mlx4_is_mfunc(dev)) { - if (mlx4_cmd_imm(dev, 0, &out_param, RES_MPT, RES_OP_RESERVE, - MLX4_CMD_ALLOC_RES, - MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED)) - return -1; - return get_param_l(&out_param); - } - return __mlx4_mr_reserve(dev); -} - -void __mlx4_mr_release(struct mlx4_dev *dev, u32 index) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - - mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index); -} - -static void mlx4_mr_release(struct mlx4_dev *dev, u32 index) -{ - u64 in_param; - - if (mlx4_is_mfunc(dev)) { - set_param_l(&in_param, index); - if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_RESERVE, - MLX4_CMD_FREE_RES, - MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED)) - mlx4_warn(dev, "Failed to release mr index:%d\n", - index); - return; - } - __mlx4_mr_release(dev, index); -} - -int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index) -{ - struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; - - return mlx4_table_get(dev, &mr_table->dmpt_table, index); -} - -static int mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index) -{ - u64 param; - - if (mlx4_is_mfunc(dev)) { - set_param_l(¶m, index); - return mlx4_cmd_imm(dev, param, ¶m, RES_MPT, RES_OP_MAP_ICM, - MLX4_CMD_ALLOC_RES, - MLX4_CMD_TIME_CLASS_A, - MLX4_CMD_WRAPPED); - } - return __mlx4_mr_alloc_icm(dev, index); -} - -void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index) -{ - struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; - - mlx4_table_put(dev, &mr_table->dmpt_table, index); -} - -static void mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index) -{ - u64 in_param; - - if (mlx4_is_mfunc(dev)) { - set_param_l(&in_param, index); - if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_MAP_ICM, - MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, - MLX4_CMD_WRAPPED)) - mlx4_warn(dev, "Failed to free icm of mr index:%d\n", - index); - return; - } - return __mlx4_mr_free_icm(dev, index); -} - -int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, - int npages, int page_shift, struct mlx4_mr *mr) -{ - u32 index; - int err; - - index = mlx4_mr_reserve(dev); - if (index == -1) - return -ENOMEM; - - err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size, - access, npages, page_shift, mr); + err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); if (err) - mlx4_mr_release(dev, index); + mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index); return err; } EXPORT_SYMBOL_GPL(mlx4_mr_alloc); -static void mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr) +void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr) { + struct mlx4_priv *priv = mlx4_priv(dev); int err; - if (mr->enabled == MLX4_MR_EN_HW) { + if (mr->enabled) { err = mlx4_HW2SW_MPT(dev, NULL, key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1)); if (err) - mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err); - - mr->enabled = MLX4_MR_EN_SW; + mlx4_warn(dev, "HW2SW_MPT failed (%d)\n", err); } - mlx4_mtt_cleanup(dev, &mr->mtt); -} -EXPORT_SYMBOL_GPL(mlx4_mr_free_reserved); -void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr) -{ - mlx4_mr_free_reserved(dev, mr); - if (mr->enabled) - mlx4_mr_free_icm(dev, key_to_hw_index(mr->key)); - mlx4_mr_release(dev, key_to_hw_index(mr->key)); + mlx4_mtt_cleanup(dev, &mr->mtt); + mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, key_to_hw_index(mr->key)); } EXPORT_SYMBOL_GPL(mlx4_mr_free); int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) { + struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; struct mlx4_cmd_mailbox *mailbox; struct mlx4_mpt_entry *mpt_entry; int err; - err = mlx4_mr_alloc_icm(dev, key_to_hw_index(mr->key)); + err = mlx4_table_get(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key)); if (err) return err; @@ -514,10 +340,9 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) if (mr->mtt.order < 0) { mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); - mpt_entry->mtt_addr = 0; + mpt_entry->mtt_seg = 0; } else { - mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev, - &mr->mtt)); + mpt_entry->mtt_seg = cpu_to_be64(mlx4_mtt_addr(dev, &mr->mtt)); } if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) { @@ -525,7 +350,8 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE); mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG | MLX4_MPT_PD_FLAG_RAE); - mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order); + mpt_entry->mtt_sz = cpu_to_be32((1 << mr->mtt.order) * + dev->caps.mtts_per_seg); } else { mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS); } @@ -536,7 +362,8 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err); goto err_cmd; } - mr->enabled = MLX4_MR_EN_HW; + + mr->enabled = 1; mlx4_free_cmd_mailbox(dev, mailbox); @@ -546,7 +373,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) mlx4_free_cmd_mailbox(dev, mailbox); err_table: - mlx4_mr_free_icm(dev, key_to_hw_index(mr->key)); + mlx4_table_put(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key)); return err; } EXPORT_SYMBOL_GPL(mlx4_mr_enable); @@ -558,10 +385,18 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, __be64 *mtts; dma_addr_t dma_handle; int i; + int s = start_index * sizeof (u64); - mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->offset + - start_index, &dma_handle); + /* All MTTs must fit in the same page */ + if (start_index / (PAGE_SIZE / sizeof (u64)) != + (start_index + npages - 1) / (PAGE_SIZE / sizeof (u64))) + return -EINVAL; + + if (start_index & (dev->caps.mtts_per_seg - 1)) + return -EINVAL; + mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg + + s / dev->caps.mtt_entry_sz, &dma_handle); if (!mtts) return -ENOMEM; @@ -577,75 +412,27 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, return 0; } -int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, - int start_index, int npages, u64 *page_list) +int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + int start_index, int npages, u64 *page_list) { - int err = 0; int chunk; - int mtts_per_page; - int max_mtts_first_page; - - /* compute how may mtts fit in the first page */ - mtts_per_page = PAGE_SIZE / sizeof(u64); - max_mtts_first_page = mtts_per_page - (mtt->offset + start_index) - % mtts_per_page; + int err; - chunk = min_t(int, max_mtts_first_page, npages); + if (mtt->order < 0) + return -EINVAL; while (npages > 0) { + chunk = min_t(int, PAGE_SIZE / sizeof(u64), npages); err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list); if (err) return err; + npages -= chunk; start_index += chunk; page_list += chunk; - - chunk = min_t(int, mtts_per_page, npages); } - return err; -} -int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, - int start_index, int npages, u64 *page_list) -{ - struct mlx4_cmd_mailbox *mailbox = NULL; - __be64 *inbox = NULL; - int chunk; - int err = 0; - int i; - - if (mtt->order < 0) - return -EINVAL; - - if (mlx4_is_mfunc(dev)) { - mailbox = mlx4_alloc_cmd_mailbox(dev); - if (IS_ERR(mailbox)) - return PTR_ERR(mailbox); - inbox = mailbox->buf; - - while (npages > 0) { - chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) - 2, - npages); - inbox[0] = cpu_to_be64(mtt->offset + start_index); - inbox[1] = 0; - for (i = 0; i < chunk; ++i) - inbox[i + 2] = cpu_to_be64(page_list[i] | - MLX4_MTT_FLAG_PRESENT); - err = mlx4_WRITE_MTT(dev, mailbox, chunk); - if (err) { - mlx4_free_cmd_mailbox(dev, mailbox); - return err; - } - - npages -= chunk; - start_index += chunk; - page_list += chunk; - } - mlx4_free_cmd_mailbox(dev, mailbox); - return err; - } - - return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list); + return 0; } EXPORT_SYMBOL_GPL(mlx4_write_mtt); @@ -675,34 +462,21 @@ EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt); int mlx4_init_mr_table(struct mlx4_dev *dev) { - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_mr_table *mr_table = &priv->mr_table; + struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; int err; - if (!is_power_of_2(dev->caps.num_mpts)) - return -EINVAL; - - /* Nothing to do for slaves - all MR handling is forwarded - * to the master */ - if (mlx4_is_slave(dev)) - return 0; - err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts, ~0, dev->caps.reserved_mrws, 0); if (err) return err; err = mlx4_buddy_init(&mr_table->mtt_buddy, - ilog2(dev->caps.num_mtts / - (1 << log_mtts_per_seg))); + ilog2(dev->caps.num_mtt_segs)); if (err) goto err_buddy; if (dev->caps.reserved_mtts) { - priv->reserved_mtts = - mlx4_alloc_mtt_range(dev, - fls(dev->caps.reserved_mtts - 1)); - if (priv->reserved_mtts < 0) { + if (mlx4_alloc_mtt_range(dev, fls(dev->caps.reserved_mtts - 1)) == -1) { mlx4_warn(dev, "MTT table of order %d is too small.\n", mr_table->mtt_buddy.max_order); err = -ENOMEM; @@ -723,14 +497,8 @@ int mlx4_init_mr_table(struct mlx4_dev *dev) void mlx4_cleanup_mr_table(struct mlx4_dev *dev) { - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_mr_table *mr_table = &priv->mr_table; + struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; - if (mlx4_is_slave(dev)) - return; - if (priv->reserved_mtts >= 0) - mlx4_free_mtt_range(dev, priv->reserved_mtts, - fls(dev->caps.reserved_mtts - 1)); mlx4_buddy_cleanup(&mr_table->mtt_buddy); mlx4_bitmap_cleanup(&mr_table->mpt_bitmap); } @@ -813,7 +581,7 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, int max_maps, u8 page_shift, struct mlx4_fmr *fmr) { struct mlx4_priv *priv = mlx4_priv(dev); - u64 mtt_offset; + u64 mtt_seg; int err = -ENOMEM; if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32) @@ -833,12 +601,11 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, if (err) return err; - mtt_offset = fmr->mr.mtt.offset * dev->caps.mtt_entry_sz; + mtt_seg = fmr->mr.mtt.first_seg * dev->caps.mtt_entry_sz; fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table, - fmr->mr.mtt.offset, + fmr->mr.mtt.first_seg, &fmr->dma_handle); - if (!fmr->mtts) { err = -ENOMEM; goto err_free; @@ -852,46 +619,6 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, } EXPORT_SYMBOL_GPL(mlx4_fmr_alloc); -static int mlx4_fmr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, - u32 pd, u32 access, int max_pages, - int max_maps, u8 page_shift, struct mlx4_fmr *fmr) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - int err = -ENOMEM; - - if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32) - return -EINVAL; - - /* All MTTs must fit in the same page */ - if (max_pages * sizeof *fmr->mtts > PAGE_SIZE) - return -EINVAL; - - fmr->page_shift = page_shift; - fmr->max_pages = max_pages; - fmr->max_maps = max_maps; - fmr->maps = 0; - - err = mlx4_mr_alloc_reserved(dev, mridx, pd, 0, 0, access, max_pages, - page_shift, &fmr->mr); - if (err) - return err; - - fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table, - fmr->mr.mtt.offset, - &fmr->dma_handle); - if (!fmr->mtts) { - err = -ENOMEM; - goto err_free; - } - - return 0; - -err_free: - mlx4_mr_free_reserved(dev, &fmr->mr); - return err; -} -EXPORT_SYMBOL_GPL(mlx4_fmr_alloc_reserved); - int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -913,32 +640,12 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_enable); void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u32 *lkey, u32 *rkey) { - struct mlx4_cmd_mailbox *mailbox; - int err; - if (!fmr->maps) return; fmr->maps = 0; - mailbox = mlx4_alloc_cmd_mailbox(dev); - if (IS_ERR(mailbox)) { - err = PTR_ERR(mailbox); - printk(KERN_WARNING "mlx4_ib: mlx4_alloc_cmd_mailbox" - " failed (%d)\n", err); - return; - } - - err = mlx4_HW2SW_MPT(dev, NULL, - key_to_hw_index(fmr->mr.key) & - (dev->caps.num_mpts - 1)); - mlx4_free_cmd_mailbox(dev, mailbox); - if (err) { - printk(KERN_WARNING "mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n", - err); - return; - } - fmr->mr.enabled = MLX4_MR_EN_SW; + *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW; } EXPORT_SYMBOL_GPL(mlx4_fmr_unmap); @@ -947,28 +654,15 @@ int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr) if (fmr->maps) return -EBUSY; + fmr->mr.enabled = 0; mlx4_mr_free(dev, &fmr->mr); - fmr->mr.enabled = MLX4_MR_DISABLED; return 0; } EXPORT_SYMBOL_GPL(mlx4_fmr_free); -static int mlx4_fmr_free_reserved(struct mlx4_dev *dev, struct mlx4_fmr *fmr) -{ - if (fmr->maps) - return -EBUSY; - - mlx4_mr_free_reserved(dev, &fmr->mr); - fmr->mr.enabled = MLX4_MR_DISABLED; - - return 0; -} -EXPORT_SYMBOL_GPL(mlx4_fmr_free_reserved); - int mlx4_SYNC_TPT(struct mlx4_dev *dev) { - return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000, - MLX4_CMD_WRAPPED); + return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000); } EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT); diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/pd.c b/trunk/drivers/net/ethernet/mellanox/mlx4/pd.c index 5c9a54df17ab..260ed259ce9b 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/pd.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/pd.c @@ -31,7 +31,6 @@ * SOFTWARE. */ -#include #include #include #include @@ -52,8 +51,7 @@ int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn) *pdn = mlx4_bitmap_alloc(&priv->pd_bitmap); if (*pdn == -1) return -ENOMEM; - if (mlx4_is_mfunc(dev)) - *pdn |= (dev->caps.function + 1) << NOT_MASKED_PD_BITS; + return 0; } EXPORT_SYMBOL_GPL(mlx4_pd_alloc); @@ -87,8 +85,7 @@ int mlx4_init_pd_table(struct mlx4_dev *dev) struct mlx4_priv *priv = mlx4_priv(dev); return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds, - (1 << NOT_MASKED_PD_BITS) - 1, - dev->caps.reserved_pds, 0); + (1 << 24) - 1, dev->caps.reserved_pds, 0); } void mlx4_cleanup_pd_table(struct mlx4_dev *dev) @@ -111,19 +108,13 @@ void mlx4_cleanup_xrcd_table(struct mlx4_dev *dev) int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar) { - int offset; - uar->index = mlx4_bitmap_alloc(&mlx4_priv(dev)->uar_table.bitmap); if (uar->index == -1) return -ENOMEM; - if (mlx4_is_slave(dev)) - offset = uar->index % ((int) pci_resource_len(dev->pdev, 2) / - dev->caps.uar_page_size); - else - offset = uar->index; - uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + offset; + uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index; uar->map = NULL; + return 0; } EXPORT_SYMBOL_GPL(mlx4_uar_alloc); @@ -241,7 +232,7 @@ int mlx4_init_uar_table(struct mlx4_dev *dev) return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap, dev->caps.num_uars, dev->caps.num_uars - 1, - dev->caps.reserved_uars, 0); + max(128, dev->caps.reserved_uars), 0); } void mlx4_cleanup_uar_table(struct mlx4_dev *dev) diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/port.c b/trunk/drivers/net/ethernet/mellanox/mlx4/port.c index 88b52e547524..d942aea4927b 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/port.c @@ -70,12 +70,41 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table) table->total = 0; } -static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn) +static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port, + __be64 *entries) +{ + struct mlx4_cmd_mailbox *mailbox; + u32 in_mod; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE); + + in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port; + err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, + MLX4_CMD_TIME_CLASS_B); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, + u64 mac, int *qpn, u8 reserve) { struct mlx4_qp qp; u8 gid[16] = {0}; int err; + if (reserve) { + err = mlx4_qp_reserve_range(dev, 1, 1, qpn); + if (err) { + mlx4_err(dev, "Failed to reserve qp for mac registration\n"); + return err; + } + } qp.qpn = *qpn; mac &= 0xffffffffffffULL; @@ -84,15 +113,16 @@ static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn) gid[5] = port; gid[7] = MLX4_UC_STEER << 1; - err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH); - if (err) - mlx4_warn(dev, "Failed Attaching Unicast\n"); + err = mlx4_qp_attach_common(dev, &qp, gid, 0, + MLX4_PROT_ETH, MLX4_UC_STEER); + if (err && reserve) + mlx4_qp_release_range(dev, *qpn, 1); return err; } static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port, - u64 mac, int qpn) + u64 mac, int qpn, u8 free) { struct mlx4_qp qp; u8 gid[16] = {0}; @@ -104,164 +134,60 @@ static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port, gid[5] = port; gid[7] = MLX4_UC_STEER << 1; - mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH); -} - -static int validate_index(struct mlx4_dev *dev, - struct mlx4_mac_table *table, int index) -{ - int err = 0; - - if (index < 0 || index >= table->max || !table->entries[index]) { - mlx4_warn(dev, "No valid Mac entry for the given index\n"); - err = -EINVAL; - } - return err; -} - -static int find_index(struct mlx4_dev *dev, - struct mlx4_mac_table *table, u64 mac) -{ - int i; - - for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { - if ((mac & MLX4_MAC_MASK) == - (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) - return i; - } - /* Mac not found */ - return -EINVAL; + mlx4_qp_detach_common(dev, &qp, gid, MLX4_PROT_ETH, MLX4_UC_STEER); + if (free) + mlx4_qp_release_range(dev, qpn, 1); } -int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn) +int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap) { struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; + struct mlx4_mac_table *table = &info->mac_table; struct mlx4_mac_entry *entry; - int index = 0; - int err = 0; - - mlx4_dbg(dev, "Registering MAC: 0x%llx for adding\n", - (unsigned long long) mac); - index = mlx4_register_mac(dev, port, mac); - if (index < 0) { - err = index; - mlx4_err(dev, "Failed adding MAC: 0x%llx\n", - (unsigned long long) mac); - return err; - } - - if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) { - *qpn = info->base_qpn + index; - return 0; - } - - err = mlx4_qp_reserve_range(dev, 1, 1, qpn); - mlx4_dbg(dev, "Reserved qp %d\n", *qpn); - if (err) { - mlx4_err(dev, "Failed to reserve qp for mac registration\n"); - goto qp_err; - } - - err = mlx4_uc_steer_add(dev, port, mac, qpn); - if (err) - goto steer_err; - - entry = kmalloc(sizeof *entry, GFP_KERNEL); - if (!entry) { - err = -ENOMEM; - goto alloc_err; - } - entry->mac = mac; - err = radix_tree_insert(&info->mac_tree, *qpn, entry); - if (err) - goto insert_err; - return 0; - -insert_err: - kfree(entry); - -alloc_err: - mlx4_uc_steer_release(dev, port, mac, *qpn); - -steer_err: - mlx4_qp_release_range(dev, *qpn, 1); - -qp_err: - mlx4_unregister_mac(dev, port, mac); - return err; -} -EXPORT_SYMBOL_GPL(mlx4_get_eth_qp); + int i, err = 0; + int free = -1; -void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn) -{ - struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; - struct mlx4_mac_entry *entry; + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) { + err = mlx4_uc_steer_add(dev, port, mac, qpn, 1); + if (err) + return err; - mlx4_dbg(dev, "Registering MAC: 0x%llx for deleting\n", - (unsigned long long) mac); - mlx4_unregister_mac(dev, port, mac); + entry = kmalloc(sizeof *entry, GFP_KERNEL); + if (!entry) { + mlx4_uc_steer_release(dev, port, mac, *qpn, 1); + return -ENOMEM; + } - if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) { - entry = radix_tree_lookup(&info->mac_tree, qpn); - if (entry) { - mlx4_dbg(dev, "Releasing qp: port %d, mac 0x%llx," - " qpn %d\n", port, - (unsigned long long) mac, qpn); - mlx4_uc_steer_release(dev, port, entry->mac, qpn); - mlx4_qp_release_range(dev, qpn, 1); - radix_tree_delete(&info->mac_tree, qpn); + entry->mac = mac; + err = radix_tree_insert(&info->mac_tree, *qpn, entry); + if (err) { kfree(entry); + mlx4_uc_steer_release(dev, port, mac, *qpn, 1); + return err; } } -} -EXPORT_SYMBOL_GPL(mlx4_put_eth_qp); - -static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port, - __be64 *entries) -{ - struct mlx4_cmd_mailbox *mailbox; - u32 in_mod; - int err; - mailbox = mlx4_alloc_cmd_mailbox(dev); - if (IS_ERR(mailbox)) - return PTR_ERR(mailbox); - - memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE); - - in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port; - - err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, - MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); - - mlx4_free_cmd_mailbox(dev, mailbox); - return err; -} - -int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) -{ - struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; - struct mlx4_mac_table *table = &info->mac_table; - int i, err = 0; - int free = -1; - - mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d\n", - (unsigned long long) mac, port); + mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac); mutex_lock(&table->mutex); - for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { - if (free < 0 && !table->entries[i]) { + for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) { + if (free < 0 && !table->refs[i]) { free = i; continue; } if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { - /* MAC already registered, Must not have duplicates */ - err = -EEXIST; + /* MAC already registered, increase references count */ + ++table->refs[i]; goto out; } } + if (free < 0) { + err = -ENOMEM; + goto out; + } + mlx4_dbg(dev, "Free MAC index is %d\n", free); if (table->total == table->max) { @@ -271,103 +197,103 @@ int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) } /* Register new MAC */ + table->refs[free] = 1; table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID); err = mlx4_set_port_mac_table(dev, port, table->entries); if (unlikely(err)) { - mlx4_err(dev, "Failed adding MAC: 0x%llx\n", - (unsigned long long) mac); + mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) mac); + table->refs[free] = 0; table->entries[free] = 0; goto out; } - err = free; + if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) + *qpn = info->base_qpn + free; ++table->total; out: mutex_unlock(&table->mutex); return err; } -EXPORT_SYMBOL_GPL(__mlx4_register_mac); +EXPORT_SYMBOL_GPL(mlx4_register_mac); -int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) +static int validate_index(struct mlx4_dev *dev, + struct mlx4_mac_table *table, int index) { - u64 out_param; - int err; - - if (mlx4_is_mfunc(dev)) { - set_param_l(&out_param, port); - err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC, - RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES, - MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); - if (err) - return err; + int err = 0; - return get_param_l(&out_param); + if (index < 0 || index >= table->max || !table->entries[index]) { + mlx4_warn(dev, "No valid Mac entry for the given index\n"); + err = -EINVAL; } - return __mlx4_register_mac(dev, port, mac); + return err; } -EXPORT_SYMBOL_GPL(mlx4_register_mac); +static int find_index(struct mlx4_dev *dev, + struct mlx4_mac_table *table, u64 mac) +{ + int i; + for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { + if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) + return i; + } + /* Mac not found */ + return -EINVAL; +} -void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac) +void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn) { struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; struct mlx4_mac_table *table = &info->mac_table; - int index; + int index = qpn - info->base_qpn; + struct mlx4_mac_entry *entry; - index = find_index(dev, table, mac); + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) { + entry = radix_tree_lookup(&info->mac_tree, qpn); + if (entry) { + mlx4_uc_steer_release(dev, port, entry->mac, qpn, 1); + radix_tree_delete(&info->mac_tree, qpn); + index = find_index(dev, table, entry->mac); + kfree(entry); + } + } mutex_lock(&table->mutex); if (validate_index(dev, table, index)) goto out; - table->entries[index] = 0; - mlx4_set_port_mac_table(dev, port, table->entries); - --table->total; + /* Check whether this address has reference count */ + if (!(--table->refs[index])) { + table->entries[index] = 0; + mlx4_set_port_mac_table(dev, port, table->entries); + --table->total; + } out: mutex_unlock(&table->mutex); } -EXPORT_SYMBOL_GPL(__mlx4_unregister_mac); - -void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac) -{ - u64 out_param; - int err; - - if (mlx4_is_mfunc(dev)) { - set_param_l(&out_param, port); - err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC, - RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES, - MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); - return; - } - __mlx4_unregister_mac(dev, port, mac); - return; -} EXPORT_SYMBOL_GPL(mlx4_unregister_mac); -int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac) +int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wrap) { struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; struct mlx4_mac_table *table = &info->mac_table; - struct mlx4_mac_entry *entry; int index = qpn - info->base_qpn; - int err = 0; + struct mlx4_mac_entry *entry; + int err; if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) { entry = radix_tree_lookup(&info->mac_tree, qpn); if (!entry) return -EINVAL; - mlx4_uc_steer_release(dev, port, entry->mac, qpn); - mlx4_unregister_mac(dev, port, entry->mac); + index = find_index(dev, table, entry->mac); + mlx4_uc_steer_release(dev, port, entry->mac, qpn, 0); entry->mac = new_mac; - mlx4_register_mac(dev, port, new_mac); - err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn); - return err; + err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn, 0); + if (err || index < 0) + return err; } - /* CX1 doesn't support multi-functions */ mutex_lock(&table->mutex); err = validate_index(dev, table, index); @@ -378,8 +304,7 @@ int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac) err = mlx4_set_port_mac_table(dev, port, table->entries); if (unlikely(err)) { - mlx4_err(dev, "Failed adding MAC: 0x%llx\n", - (unsigned long long) new_mac); + mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) new_mac); table->entries[index] = 0; } out: @@ -387,7 +312,6 @@ int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac) return err; } EXPORT_SYMBOL_GPL(mlx4_replace_mac); - static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port, __be32 *entries) { @@ -402,7 +326,7 @@ static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port, memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE); in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port; err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, - MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); + MLX4_CMD_TIME_CLASS_B); mlx4_free_cmd_mailbox(dev, mailbox); @@ -428,8 +352,7 @@ int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx) } EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan); -static int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, - int *index) +int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index) { struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; int i, err = 0; @@ -464,7 +387,7 @@ static int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, goto out; } - /* Register new VLAN */ + /* Register new MAC */ table->refs[free] = 1; table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID); @@ -482,27 +405,9 @@ static int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, mutex_unlock(&table->mutex); return err; } - -int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index) -{ - u64 out_param; - int err; - - if (mlx4_is_mfunc(dev)) { - set_param_l(&out_param, port); - err = mlx4_cmd_imm(dev, vlan, &out_param, RES_VLAN, - RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES, - MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); - if (!err) - *index = get_param_l(&out_param); - - return err; - } - return __mlx4_register_vlan(dev, port, vlan, index); -} EXPORT_SYMBOL_GPL(mlx4_register_vlan); -static void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index) +void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index) { struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; @@ -527,25 +432,6 @@ static void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index) out: mutex_unlock(&table->mutex); } - -void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index) -{ - u64 in_param; - int err; - - if (mlx4_is_mfunc(dev)) { - set_param_l(&in_param, port); - err = mlx4_cmd(dev, in_param, RES_VLAN, RES_OP_RESERVE_AND_MAP, - MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, - MLX4_CMD_WRAPPED); - if (!err) - mlx4_warn(dev, "Failed freeing vlan at index:%d\n", - index); - - return; - } - __mlx4_unregister_vlan(dev, port, index); -} EXPORT_SYMBOL_GPL(mlx4_unregister_vlan); int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps) @@ -576,8 +462,7 @@ int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps) *(__be32 *) (&inbuf[20]) = cpu_to_be32(port); err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3, - MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C, - MLX4_CMD_NATIVE); + MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C); if (!err) *caps = *(__be32 *) (outbuf + 84); mlx4_free_cmd_mailbox(dev, inmailbox); @@ -614,8 +499,7 @@ int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port) *(__be32 *) (&inbuf[20]) = cpu_to_be32(port); err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3, - MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C, - MLX4_CMD_NATIVE); + MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C); packet_error = be16_to_cpu(*(__be16 *) (outbuf + 4)); @@ -628,139 +512,6 @@ int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port) return err; } -static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, - u8 op_mod, struct mlx4_cmd_mailbox *inbox) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_port_info *port_info; - struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master; - struct mlx4_slave_state *slave_st = &master->slave_state[slave]; - struct mlx4_set_port_rqp_calc_context *qpn_context; - struct mlx4_set_port_general_context *gen_context; - int reset_qkey_viols; - int port; - int is_eth; - u32 in_modifier; - u32 promisc; - u16 mtu, prev_mtu; - int err; - int i; - __be32 agg_cap_mask; - __be32 slave_cap_mask; - __be32 new_cap_mask; - - port = in_mod & 0xff; - in_modifier = in_mod >> 8; - is_eth = op_mod; - port_info = &priv->port[port]; - - /* Slaves cannot perform SET_PORT operations except changing MTU */ - if (is_eth) { - if (slave != dev->caps.function && - in_modifier != MLX4_SET_PORT_GENERAL) { - mlx4_warn(dev, "denying SET_PORT for slave:%d\n", - slave); - return -EINVAL; - } - switch (in_modifier) { - case MLX4_SET_PORT_RQP_CALC: - qpn_context = inbox->buf; - qpn_context->base_qpn = - cpu_to_be32(port_info->base_qpn); - qpn_context->n_mac = 0x7; - promisc = be32_to_cpu(qpn_context->promisc) >> - SET_PORT_PROMISC_SHIFT; - qpn_context->promisc = cpu_to_be32( - promisc << SET_PORT_PROMISC_SHIFT | - port_info->base_qpn); - promisc = be32_to_cpu(qpn_context->mcast) >> - SET_PORT_MC_PROMISC_SHIFT; - qpn_context->mcast = cpu_to_be32( - promisc << SET_PORT_MC_PROMISC_SHIFT | - port_info->base_qpn); - break; - case MLX4_SET_PORT_GENERAL: - gen_context = inbox->buf; - /* Mtu is configured as the max MTU among all the - * the functions on the port. */ - mtu = be16_to_cpu(gen_context->mtu); - mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port]); - prev_mtu = slave_st->mtu[port]; - slave_st->mtu[port] = mtu; - if (mtu > master->max_mtu[port]) - master->max_mtu[port] = mtu; - if (mtu < prev_mtu && prev_mtu == - master->max_mtu[port]) { - slave_st->mtu[port] = mtu; - master->max_mtu[port] = mtu; - for (i = 0; i < dev->num_slaves; i++) { - master->max_mtu[port] = - max(master->max_mtu[port], - master->slave_state[i].mtu[port]); - } - } - - gen_context->mtu = cpu_to_be16(master->max_mtu[port]); - break; - } - return mlx4_cmd(dev, inbox->dma, in_mod, op_mod, - MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, - MLX4_CMD_NATIVE); - } - - /* For IB, we only consider: - * - The capability mask, which is set to the aggregate of all - * slave function capabilities - * - The QKey violatin counter - reset according to each request. - */ - - if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { - reset_qkey_viols = (*(u8 *) inbox->buf) & 0x40; - new_cap_mask = ((__be32 *) inbox->buf)[2]; - } else { - reset_qkey_viols = ((u8 *) inbox->buf)[3] & 0x1; - new_cap_mask = ((__be32 *) inbox->buf)[1]; - } - - agg_cap_mask = 0; - slave_cap_mask = - priv->mfunc.master.slave_state[slave].ib_cap_mask[port]; - priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = new_cap_mask; - for (i = 0; i < dev->num_slaves; i++) - agg_cap_mask |= - priv->mfunc.master.slave_state[i].ib_cap_mask[port]; - - /* only clear mailbox for guests. Master may be setting - * MTU or PKEY table size - */ - if (slave != dev->caps.function) - memset(inbox->buf, 0, 256); - if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { - *(u8 *) inbox->buf = !!reset_qkey_viols << 6; - ((__be32 *) inbox->buf)[2] = agg_cap_mask; - } else { - ((u8 *) inbox->buf)[3] = !!reset_qkey_viols; - ((__be32 *) inbox->buf)[1] = agg_cap_mask; - } - - err = mlx4_cmd(dev, inbox->dma, port, is_eth, MLX4_CMD_SET_PORT, - MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); - if (err) - priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = - slave_cap_mask; - return err; -} - -int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd) -{ - return mlx4_common_set_port(dev, slave, vhcr->in_modifier, - vhcr->op_modifier, inbox); -} - int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port) { struct mlx4_cmd_mailbox *mailbox; @@ -777,127 +528,8 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port) ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port]; err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT, - MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); + MLX4_CMD_TIME_CLASS_B); mlx4_free_cmd_mailbox(dev, mailbox); return err; } - -int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu, - u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx) -{ - struct mlx4_cmd_mailbox *mailbox; - struct mlx4_set_port_general_context *context; - int err; - u32 in_mod; - - mailbox = mlx4_alloc_cmd_mailbox(dev); - if (IS_ERR(mailbox)) - return PTR_ERR(mailbox); - context = mailbox->buf; - memset(context, 0, sizeof *context); - - context->flags = SET_PORT_GEN_ALL_VALID; - context->mtu = cpu_to_be16(mtu); - context->pptx = (pptx * (!pfctx)) << 7; - context->pfctx = pfctx; - context->pprx = (pprx * (!pfcrx)) << 7; - context->pfcrx = pfcrx; - - in_mod = MLX4_SET_PORT_GENERAL << 8 | port; - err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, - MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); - - mlx4_free_cmd_mailbox(dev, mailbox); - return err; -} -EXPORT_SYMBOL(mlx4_SET_PORT_general); - -int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, - u8 promisc) -{ - struct mlx4_cmd_mailbox *mailbox; - struct mlx4_set_port_rqp_calc_context *context; - int err; - u32 in_mod; - u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ? - MCAST_DIRECT : MCAST_DEFAULT; - - if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER && - dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) - return 0; - - mailbox = mlx4_alloc_cmd_mailbox(dev); - if (IS_ERR(mailbox)) - return PTR_ERR(mailbox); - context = mailbox->buf; - memset(context, 0, sizeof *context); - - context->base_qpn = cpu_to_be32(base_qpn); - context->n_mac = dev->caps.log_num_macs; - context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | - base_qpn); - context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT | - base_qpn); - context->intra_no_vlan = 0; - context->no_vlan = MLX4_NO_VLAN_IDX; - context->intra_vlan_miss = 0; - context->vlan_miss = MLX4_VLAN_MISS_IDX; - - in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port; - err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, - MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); - - mlx4_free_cmd_mailbox(dev, mailbox); - return err; -} -EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc); - -int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd) -{ - int err = 0; - - return err; -} - -int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, - u64 mac, u64 clear, u8 mode) -{ - return mlx4_cmd(dev, (mac | (clear << 63)), port, mode, - MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B, - MLX4_CMD_WRAPPED); -} -EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR); - -int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd) -{ - int err = 0; - - return err; -} - -int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave, - u32 in_mod, struct mlx4_cmd_mailbox *outbox) -{ - return mlx4_cmd_box(dev, 0, outbox->dma, in_mod, 0, - MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B, - MLX4_CMD_NATIVE); -} - -int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd) -{ - return mlx4_common_dump_eth_stats(dev, slave, - vhcr->in_modifier, outbox); -} diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/profile.c b/trunk/drivers/net/ethernet/mellanox/mlx4/profile.c index 66f91ca7a7c6..b967647d0c76 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/profile.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/profile.c @@ -98,8 +98,8 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz; profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz; profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz; - profile[MLX4_RES_MTT].size = dev_cap->mtt_entry_sz; - profile[MLX4_RES_MCG].size = mlx4_get_mgm_entry_size(dev); + profile[MLX4_RES_MTT].size = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz; + profile[MLX4_RES_MCG].size = MLX4_MGM_ENTRY_SIZE; profile[MLX4_RES_QP].num = request->num_qp; profile[MLX4_RES_RDMARC].num = request->num_qp * request->rdmarc_per_qp; @@ -210,7 +210,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, init_hca->cmpt_base = profile[i].start; break; case MLX4_RES_MTT: - dev->caps.num_mtts = profile[i].num; + dev->caps.num_mtt_segs = profile[i].num; priv->mr_table.mtt_base = profile[i].start; init_hca->mtt_base = profile[i].start; break; @@ -218,8 +218,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, dev->caps.num_mgms = profile[i].num >> 1; dev->caps.num_amgms = profile[i].num >> 1; init_hca->mc_base = profile[i].start; - init_hca->log_mc_entry_sz = - ilog2(mlx4_get_mgm_entry_size(dev)); + init_hca->log_mc_entry_sz = ilog2(MLX4_MGM_ENTRY_SIZE); init_hca->log_mc_table_sz = profile[i].log_num; init_hca->log_mc_hash_sz = profile[i].log_num - 1; break; diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/qp.c b/trunk/drivers/net/ethernet/mellanox/mlx4/qp.c index 6b03ac8b9002..15f870cb2590 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -35,8 +35,6 @@ #include #include -#include - #include #include @@ -57,7 +55,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type) spin_unlock(&qp_table->lock); if (!qp) { - mlx4_dbg(dev, "Async event for none existent QP %08x\n", qpn); + mlx4_warn(dev, "Async event for bogus QP %08x\n", qpn); return; } @@ -67,17 +65,10 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type) complete(&qp->free); } -static int is_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp) -{ - return qp->qpn >= dev->caps.sqp_start && - qp->qpn <= dev->caps.sqp_start + 1; -} - -static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, - enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state, - struct mlx4_qp_context *context, - enum mlx4_qp_optpar optpar, - int sqd_event, struct mlx4_qp *qp, int native) +int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state, + struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar, + int sqd_event, struct mlx4_qp *qp) { static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = { [MLX4_QP_STATE_RST] = { @@ -119,26 +110,16 @@ static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, } }; - struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_cmd_mailbox *mailbox; int ret = 0; - u8 port; if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE || !op[cur_state][new_state]) return -EINVAL; - if (op[cur_state][new_state] == MLX4_CMD_2RST_QP) { - ret = mlx4_cmd(dev, 0, qp->qpn, 2, - MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A, native); - if (mlx4_is_master(dev) && cur_state != MLX4_QP_STATE_ERR && - cur_state != MLX4_QP_STATE_RST && - is_qp0(dev, qp)) { - port = (qp->qpn & 1) + 1; - priv->mfunc.master.qp0_state[port].qp0_active = 0; - } - return ret; - } + if (op[cur_state][new_state] == MLX4_CMD_2RST_QP) + return mlx4_cmd(dev, 0, qp->qpn, 2, + MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A); mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) @@ -151,218 +132,107 @@ static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; } - port = ((context->pri_path.sched_queue >> 6) & 1) + 1; - if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) - context->pri_path.sched_queue = (context->pri_path.sched_queue & - 0xc3); - *(__be32 *) mailbox->buf = cpu_to_be32(optpar); memcpy(mailbox->buf + 8, context, sizeof *context); ((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn = cpu_to_be32(qp->qpn); - ret = mlx4_cmd(dev, mailbox->dma | dev->caps.function, - qp->qpn | (!!sqd_event << 31), + ret = mlx4_cmd(dev, mailbox->dma, qp->qpn | (!!sqd_event << 31), new_state == MLX4_QP_STATE_RST ? 2 : 0, - op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, native); + op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C); mlx4_free_cmd_mailbox(dev, mailbox); return ret; } - -int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, - enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state, - struct mlx4_qp_context *context, - enum mlx4_qp_optpar optpar, - int sqd_event, struct mlx4_qp *qp) -{ - return __mlx4_qp_modify(dev, mtt, cur_state, new_state, context, - optpar, sqd_event, qp, 0); -} EXPORT_SYMBOL_GPL(mlx4_qp_modify); -int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, - int *base) +int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_qp_table *qp_table = &priv->qp_table; + int qpn; - *base = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align); - if (*base == -1) + qpn = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align); + if (qpn == -1) return -ENOMEM; + *base = qpn; return 0; } - -int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base) -{ - u64 in_param; - u64 out_param; - int err; - - if (mlx4_is_mfunc(dev)) { - set_param_l(&in_param, cnt); - set_param_h(&in_param, align); - err = mlx4_cmd_imm(dev, in_param, &out_param, - RES_QP, RES_OP_RESERVE, - MLX4_CMD_ALLOC_RES, - MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); - if (err) - return err; - - *base = get_param_l(&out_param); - return 0; - } - return __mlx4_qp_reserve_range(dev, cnt, align, base); -} EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range); -void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) +void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_qp_table *qp_table = &priv->qp_table; - - if (mlx4_is_qp_reserved(dev, (u32) base_qpn)) + if (base_qpn < dev->caps.sqp_start + 8) return; - mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt); -} - -void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) -{ - u64 in_param; - int err; - if (mlx4_is_mfunc(dev)) { - set_param_l(&in_param, base_qpn); - set_param_h(&in_param, cnt); - err = mlx4_cmd(dev, in_param, RES_QP, RES_OP_RESERVE, - MLX4_CMD_FREE_RES, - MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); - if (err) { - mlx4_warn(dev, "Failed to release qp range" - " base:%d cnt:%d\n", base_qpn, cnt); - } - } else - __mlx4_qp_release_range(dev, base_qpn, cnt); + mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt); } EXPORT_SYMBOL_GPL(mlx4_qp_release_range); -int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn) +int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_qp_table *qp_table = &priv->qp_table; int err; - err = mlx4_table_get(dev, &qp_table->qp_table, qpn); + if (!qpn) + return -EINVAL; + + qp->qpn = qpn; + + err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn); if (err) goto err_out; - err = mlx4_table_get(dev, &qp_table->auxc_table, qpn); + err = mlx4_table_get(dev, &qp_table->auxc_table, qp->qpn); if (err) goto err_put_qp; - err = mlx4_table_get(dev, &qp_table->altc_table, qpn); + err = mlx4_table_get(dev, &qp_table->altc_table, qp->qpn); if (err) goto err_put_auxc; - err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn); + err = mlx4_table_get(dev, &qp_table->rdmarc_table, qp->qpn); if (err) goto err_put_altc; - err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn); + err = mlx4_table_get(dev, &qp_table->cmpt_table, qp->qpn); if (err) goto err_put_rdmarc; + spin_lock_irq(&qp_table->lock); + err = radix_tree_insert(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1), qp); + spin_unlock_irq(&qp_table->lock); + if (err) + goto err_put_cmpt; + + atomic_set(&qp->refcount, 1); + init_completion(&qp->free); + return 0; +err_put_cmpt: + mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn); + err_put_rdmarc: - mlx4_table_put(dev, &qp_table->rdmarc_table, qpn); + mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn); err_put_altc: - mlx4_table_put(dev, &qp_table->altc_table, qpn); + mlx4_table_put(dev, &qp_table->altc_table, qp->qpn); err_put_auxc: - mlx4_table_put(dev, &qp_table->auxc_table, qpn); + mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn); err_put_qp: - mlx4_table_put(dev, &qp_table->qp_table, qpn); + mlx4_table_put(dev, &qp_table->qp_table, qp->qpn); err_out: return err; } - -static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn) -{ - u64 param; - - if (mlx4_is_mfunc(dev)) { - set_param_l(¶m, qpn); - return mlx4_cmd_imm(dev, param, ¶m, RES_QP, RES_OP_MAP_ICM, - MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A, - MLX4_CMD_WRAPPED); - } - return __mlx4_qp_alloc_icm(dev, qpn); -} - -void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_qp_table *qp_table = &priv->qp_table; - - mlx4_table_put(dev, &qp_table->cmpt_table, qpn); - mlx4_table_put(dev, &qp_table->rdmarc_table, qpn); - mlx4_table_put(dev, &qp_table->altc_table, qpn); - mlx4_table_put(dev, &qp_table->auxc_table, qpn); - mlx4_table_put(dev, &qp_table->qp_table, qpn); -} - -static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) -{ - u64 in_param; - - if (mlx4_is_mfunc(dev)) { - set_param_l(&in_param, qpn); - if (mlx4_cmd(dev, in_param, RES_QP, RES_OP_MAP_ICM, - MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, - MLX4_CMD_WRAPPED)) - mlx4_warn(dev, "Failed to free icm of qp:%d\n", qpn); - } else - __mlx4_qp_free_icm(dev, qpn); -} - -int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_qp_table *qp_table = &priv->qp_table; - int err; - - if (!qpn) - return -EINVAL; - - qp->qpn = qpn; - - err = mlx4_qp_alloc_icm(dev, qpn); - if (err) - return err; - - spin_lock_irq(&qp_table->lock); - err = radix_tree_insert(&dev->qp_table_tree, qp->qpn & - (dev->caps.num_qps - 1), qp); - spin_unlock_irq(&qp_table->lock); - if (err) - goto err_icm; - - atomic_set(&qp->refcount, 1); - init_completion(&qp->free); - - return 0; - -err_icm: - mlx4_qp_free_icm(dev, qpn); - return err; -} - EXPORT_SYMBOL_GPL(mlx4_qp_alloc); void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp) @@ -378,18 +248,24 @@ EXPORT_SYMBOL_GPL(mlx4_qp_remove); void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp) { + struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; + if (atomic_dec_and_test(&qp->refcount)) complete(&qp->free); wait_for_completion(&qp->free); - mlx4_qp_free_icm(dev, qp->qpn); + mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn); + mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn); + mlx4_table_put(dev, &qp_table->altc_table, qp->qpn); + mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn); + mlx4_table_put(dev, &qp_table->qp_table, qp->qpn); } EXPORT_SYMBOL_GPL(mlx4_qp_free); static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn) { return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP, - MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + MLX4_CMD_TIME_CLASS_B); } int mlx4_init_qp_table(struct mlx4_dev *dev) @@ -400,8 +276,6 @@ int mlx4_init_qp_table(struct mlx4_dev *dev) spin_lock_init(&qp_table->lock); INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC); - if (mlx4_is_slave(dev)) - return 0; /* * We reserve 2 extra QPs per port for the special QPs. The @@ -453,9 +327,6 @@ int mlx4_init_qp_table(struct mlx4_dev *dev) void mlx4_cleanup_qp_table(struct mlx4_dev *dev) { - if (mlx4_is_slave(dev)) - return; - mlx4_CONF_SPECIAL_QP(dev, 0); mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap); } @@ -471,8 +342,7 @@ int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp, return PTR_ERR(mailbox); err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0, - MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A, - MLX4_CMD_WRAPPED); + MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A); if (!err) memcpy(context, mailbox->buf + 8, sizeof *context); diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/trunk/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c deleted file mode 100644 index ed20751a057d..000000000000 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ /dev/null @@ -1,3104 +0,0 @@ -/* - * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. - * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. - * All rights reserved. - * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "mlx4.h" -#include "fw.h" - -#define MLX4_MAC_VALID (1ull << 63) -#define MLX4_MAC_MASK 0x7fffffffffffffffULL -#define ETH_ALEN 6 - -struct mac_res { - struct list_head list; - u64 mac; - u8 port; -}; - -struct res_common { - struct list_head list; - u32 res_id; - int owner; - int state; - int from_state; - int to_state; - int removing; -}; - -enum { - RES_ANY_BUSY = 1 -}; - -struct res_gid { - struct list_head list; - u8 gid[16]; - enum mlx4_protocol prot; -}; - -enum res_qp_states { - RES_QP_BUSY = RES_ANY_BUSY, - - /* QP number was allocated */ - RES_QP_RESERVED, - - /* ICM memory for QP context was mapped */ - RES_QP_MAPPED, - - /* QP is in hw ownership */ - RES_QP_HW -}; - -static inline const char *qp_states_str(enum res_qp_states state) -{ - switch (state) { - case RES_QP_BUSY: return "RES_QP_BUSY"; - case RES_QP_RESERVED: return "RES_QP_RESERVED"; - case RES_QP_MAPPED: return "RES_QP_MAPPED"; - case RES_QP_HW: return "RES_QP_HW"; - default: return "Unknown"; - } -} - -struct res_qp { - struct res_common com; - struct res_mtt *mtt; - struct res_cq *rcq; - struct res_cq *scq; - struct res_srq *srq; - struct list_head mcg_list; - spinlock_t mcg_spl; - int local_qpn; -}; - -enum res_mtt_states { - RES_MTT_BUSY = RES_ANY_BUSY, - RES_MTT_ALLOCATED, -}; - -static inline const char *mtt_states_str(enum res_mtt_states state) -{ - switch (state) { - case RES_MTT_BUSY: return "RES_MTT_BUSY"; - case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED"; - default: return "Unknown"; - } -} - -struct res_mtt { - struct res_common com; - int order; - atomic_t ref_count; -}; - -enum res_mpt_states { - RES_MPT_BUSY = RES_ANY_BUSY, - RES_MPT_RESERVED, - RES_MPT_MAPPED, - RES_MPT_HW, -}; - -struct res_mpt { - struct res_common com; - struct res_mtt *mtt; - int key; -}; - -enum res_eq_states { - RES_EQ_BUSY = RES_ANY_BUSY, - RES_EQ_RESERVED, - RES_EQ_HW, -}; - -struct res_eq { - struct res_common com; - struct res_mtt *mtt; -}; - -enum res_cq_states { - RES_CQ_BUSY = RES_ANY_BUSY, - RES_CQ_ALLOCATED, - RES_CQ_HW, -}; - -struct res_cq { - struct res_common com; - struct res_mtt *mtt; - atomic_t ref_count; -}; - -enum res_srq_states { - RES_SRQ_BUSY = RES_ANY_BUSY, - RES_SRQ_ALLOCATED, - RES_SRQ_HW, -}; - -static inline const char *srq_states_str(enum res_srq_states state) -{ - switch (state) { - case RES_SRQ_BUSY: return "RES_SRQ_BUSY"; - case RES_SRQ_ALLOCATED: return "RES_SRQ_ALLOCATED"; - case RES_SRQ_HW: return "RES_SRQ_HW"; - default: return "Unknown"; - } -} - -struct res_srq { - struct res_common com; - struct res_mtt *mtt; - struct res_cq *cq; - atomic_t ref_count; -}; - -enum res_counter_states { - RES_COUNTER_BUSY = RES_ANY_BUSY, - RES_COUNTER_ALLOCATED, -}; - -static inline const char *counter_states_str(enum res_counter_states state) -{ - switch (state) { - case RES_COUNTER_BUSY: return "RES_COUNTER_BUSY"; - case RES_COUNTER_ALLOCATED: return "RES_COUNTER_ALLOCATED"; - default: return "Unknown"; - } -} - -struct res_counter { - struct res_common com; - int port; -}; - -/* For Debug uses */ -static const char *ResourceType(enum mlx4_resource rt) -{ - switch (rt) { - case RES_QP: return "RES_QP"; - case RES_CQ: return "RES_CQ"; - case RES_SRQ: return "RES_SRQ"; - case RES_MPT: return "RES_MPT"; - case RES_MTT: return "RES_MTT"; - case RES_MAC: return "RES_MAC"; - case RES_EQ: return "RES_EQ"; - case RES_COUNTER: return "RES_COUNTER"; - default: return "Unknown resource type !!!"; - }; -} - -int mlx4_init_resource_tracker(struct mlx4_dev *dev) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - int i; - int t; - - priv->mfunc.master.res_tracker.slave_list = - kzalloc(dev->num_slaves * sizeof(struct slave_list), - GFP_KERNEL); - if (!priv->mfunc.master.res_tracker.slave_list) - return -ENOMEM; - - for (i = 0 ; i < dev->num_slaves; i++) { - for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t) - INIT_LIST_HEAD(&priv->mfunc.master.res_tracker. - slave_list[i].res_list[t]); - mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex); - } - - mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n", - dev->num_slaves); - for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) - INIT_RADIX_TREE(&priv->mfunc.master.res_tracker.res_tree[i], - GFP_ATOMIC|__GFP_NOWARN); - - spin_lock_init(&priv->mfunc.master.res_tracker.lock); - return 0 ; -} - -void mlx4_free_resource_tracker(struct mlx4_dev *dev) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - int i; - - if (priv->mfunc.master.res_tracker.slave_list) { - for (i = 0 ; i < dev->num_slaves; i++) - mlx4_delete_all_resources_for_slave(dev, i); - - kfree(priv->mfunc.master.res_tracker.slave_list); - } -} - -static void update_ud_gid(struct mlx4_dev *dev, - struct mlx4_qp_context *qp_ctx, u8 slave) -{ - u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff; - - if (MLX4_QP_ST_UD == ts) - qp_ctx->pri_path.mgid_index = 0x80 | slave; - - mlx4_dbg(dev, "slave %d, new gid index: 0x%x ", - slave, qp_ctx->pri_path.mgid_index); -} - -static int mpt_mask(struct mlx4_dev *dev) -{ - return dev->caps.num_mpts - 1; -} - -static void *find_res(struct mlx4_dev *dev, int res_id, - enum mlx4_resource type) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - - return radix_tree_lookup(&priv->mfunc.master.res_tracker.res_tree[type], - res_id); -} - -static int get_res(struct mlx4_dev *dev, int slave, int res_id, - enum mlx4_resource type, - void *res) -{ - struct res_common *r; - int err = 0; - - spin_lock_irq(mlx4_tlock(dev)); - r = find_res(dev, res_id, type); - if (!r) { - err = -ENONET; - goto exit; - } - - if (r->state == RES_ANY_BUSY) { - err = -EBUSY; - goto exit; - } - - if (r->owner != slave) { - err = -EPERM; - goto exit; - } - - r->from_state = r->state; - r->state = RES_ANY_BUSY; - mlx4_dbg(dev, "res %s id 0x%x to busy\n", - ResourceType(type), r->res_id); - - if (res) - *((struct res_common **)res) = r; - -exit: - spin_unlock_irq(mlx4_tlock(dev)); - return err; -} - -int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev, - enum mlx4_resource type, - int res_id, int *slave) -{ - - struct res_common *r; - int err = -ENOENT; - int id = res_id; - - if (type == RES_QP) - id &= 0x7fffff; - spin_lock(mlx4_tlock(dev)); - - r = find_res(dev, id, type); - if (r) { - *slave = r->owner; - err = 0; - } - spin_unlock(mlx4_tlock(dev)); - - return err; -} - -static void put_res(struct mlx4_dev *dev, int slave, int res_id, - enum mlx4_resource type) -{ - struct res_common *r; - - spin_lock_irq(mlx4_tlock(dev)); - r = find_res(dev, res_id, type); - if (r) - r->state = r->from_state; - spin_unlock_irq(mlx4_tlock(dev)); -} - -static struct res_common *alloc_qp_tr(int id) -{ - struct res_qp *ret; - - ret = kzalloc(sizeof *ret, GFP_KERNEL); - if (!ret) - return NULL; - - ret->com.res_id = id; - ret->com.state = RES_QP_RESERVED; - INIT_LIST_HEAD(&ret->mcg_list); - spin_lock_init(&ret->mcg_spl); - - return &ret->com; -} - -static struct res_common *alloc_mtt_tr(int id, int order) -{ - struct res_mtt *ret; - - ret = kzalloc(sizeof *ret, GFP_KERNEL); - if (!ret) - return NULL; - - ret->com.res_id = id; - ret->order = order; - ret->com.state = RES_MTT_ALLOCATED; - atomic_set(&ret->ref_count, 0); - - return &ret->com; -} - -static struct res_common *alloc_mpt_tr(int id, int key) -{ - struct res_mpt *ret; - - ret = kzalloc(sizeof *ret, GFP_KERNEL); - if (!ret) - return NULL; - - ret->com.res_id = id; - ret->com.state = RES_MPT_RESERVED; - ret->key = key; - - return &ret->com; -} - -static struct res_common *alloc_eq_tr(int id) -{ - struct res_eq *ret; - - ret = kzalloc(sizeof *ret, GFP_KERNEL); - if (!ret) - return NULL; - - ret->com.res_id = id; - ret->com.state = RES_EQ_RESERVED; - - return &ret->com; -} - -static struct res_common *alloc_cq_tr(int id) -{ - struct res_cq *ret; - - ret = kzalloc(sizeof *ret, GFP_KERNEL); - if (!ret) - return NULL; - - ret->com.res_id = id; - ret->com.state = RES_CQ_ALLOCATED; - atomic_set(&ret->ref_count, 0); - - return &ret->com; -} - -static struct res_common *alloc_srq_tr(int id) -{ - struct res_srq *ret; - - ret = kzalloc(sizeof *ret, GFP_KERNEL); - if (!ret) - return NULL; - - ret->com.res_id = id; - ret->com.state = RES_SRQ_ALLOCATED; - atomic_set(&ret->ref_count, 0); - - return &ret->com; -} - -static struct res_common *alloc_counter_tr(int id) -{ - struct res_counter *ret; - - ret = kzalloc(sizeof *ret, GFP_KERNEL); - if (!ret) - return NULL; - - ret->com.res_id = id; - ret->com.state = RES_COUNTER_ALLOCATED; - - return &ret->com; -} - -static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave, - int extra) -{ - struct res_common *ret; - - switch (type) { - case RES_QP: - ret = alloc_qp_tr(id); - break; - case RES_MPT: - ret = alloc_mpt_tr(id, extra); - break; - case RES_MTT: - ret = alloc_mtt_tr(id, extra); - break; - case RES_EQ: - ret = alloc_eq_tr(id); - break; - case RES_CQ: - ret = alloc_cq_tr(id); - break; - case RES_SRQ: - ret = alloc_srq_tr(id); - break; - case RES_MAC: - printk(KERN_ERR "implementation missing\n"); - return NULL; - case RES_COUNTER: - ret = alloc_counter_tr(id); - break; - - default: - return NULL; - } - if (ret) - ret->owner = slave; - - return ret; -} - -static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count, - enum mlx4_resource type, int extra) -{ - int i; - int err; - struct mlx4_priv *priv = mlx4_priv(dev); - struct res_common **res_arr; - struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; - struct radix_tree_root *root = &tracker->res_tree[type]; - - res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL); - if (!res_arr) - return -ENOMEM; - - for (i = 0; i < count; ++i) { - res_arr[i] = alloc_tr(base + i, type, slave, extra); - if (!res_arr[i]) { - for (--i; i >= 0; --i) - kfree(res_arr[i]); - - kfree(res_arr); - return -ENOMEM; - } - } - - spin_lock_irq(mlx4_tlock(dev)); - for (i = 0; i < count; ++i) { - if (find_res(dev, base + i, type)) { - err = -EEXIST; - goto undo; - } - err = radix_tree_insert(root, base + i, res_arr[i]); - if (err) - goto undo; - list_add_tail(&res_arr[i]->list, - &tracker->slave_list[slave].res_list[type]); - } - spin_unlock_irq(mlx4_tlock(dev)); - kfree(res_arr); - - return 0; - -undo: - for (--i; i >= base; --i) - radix_tree_delete(&tracker->res_tree[type], i); - - spin_unlock_irq(mlx4_tlock(dev)); - - for (i = 0; i < count; ++i) - kfree(res_arr[i]); - - kfree(res_arr); - - return err; -} - -static int remove_qp_ok(struct res_qp *res) -{ - if (res->com.state == RES_QP_BUSY) - return -EBUSY; - else if (res->com.state != RES_QP_RESERVED) - return -EPERM; - - return 0; -} - -static int remove_mtt_ok(struct res_mtt *res, int order) -{ - if (res->com.state == RES_MTT_BUSY || - atomic_read(&res->ref_count)) { - printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n", - __func__, __LINE__, - mtt_states_str(res->com.state), - atomic_read(&res->ref_count)); - return -EBUSY; - } else if (res->com.state != RES_MTT_ALLOCATED) - return -EPERM; - else if (res->order != order) - return -EINVAL; - - return 0; -} - -static int remove_mpt_ok(struct res_mpt *res) -{ - if (res->com.state == RES_MPT_BUSY) - return -EBUSY; - else if (res->com.state != RES_MPT_RESERVED) - return -EPERM; - - return 0; -} - -static int remove_eq_ok(struct res_eq *res) -{ - if (res->com.state == RES_MPT_BUSY) - return -EBUSY; - else if (res->com.state != RES_MPT_RESERVED) - return -EPERM; - - return 0; -} - -static int remove_counter_ok(struct res_counter *res) -{ - if (res->com.state == RES_COUNTER_BUSY) - return -EBUSY; - else if (res->com.state != RES_COUNTER_ALLOCATED) - return -EPERM; - - return 0; -} - -static int remove_cq_ok(struct res_cq *res) -{ - if (res->com.state == RES_CQ_BUSY) - return -EBUSY; - else if (res->com.state != RES_CQ_ALLOCATED) - return -EPERM; - - return 0; -} - -static int remove_srq_ok(struct res_srq *res) -{ - if (res->com.state == RES_SRQ_BUSY) - return -EBUSY; - else if (res->com.state != RES_SRQ_ALLOCATED) - return -EPERM; - - return 0; -} - -static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra) -{ - switch (type) { - case RES_QP: - return remove_qp_ok((struct res_qp *)res); - case RES_CQ: - return remove_cq_ok((struct res_cq *)res); - case RES_SRQ: - return remove_srq_ok((struct res_srq *)res); - case RES_MPT: - return remove_mpt_ok((struct res_mpt *)res); - case RES_MTT: - return remove_mtt_ok((struct res_mtt *)res, extra); - case RES_MAC: - return -ENOSYS; - case RES_EQ: - return remove_eq_ok((struct res_eq *)res); - case RES_COUNTER: - return remove_counter_ok((struct res_counter *)res); - default: - return -EINVAL; - } -} - -static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count, - enum mlx4_resource type, int extra) -{ - int i; - int err; - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; - struct res_common *r; - - spin_lock_irq(mlx4_tlock(dev)); - for (i = base; i < base + count; ++i) { - r = radix_tree_lookup(&tracker->res_tree[type], i); - if (!r) { - err = -ENOENT; - goto out; - } - if (r->owner != slave) { - err = -EPERM; - goto out; - } - err = remove_ok(r, type, extra); - if (err) - goto out; - } - - for (i = base; i < base + count; ++i) { - r = radix_tree_lookup(&tracker->res_tree[type], i); - radix_tree_delete(&tracker->res_tree[type], i); - list_del(&r->list); - kfree(r); - } - err = 0; - -out: - spin_unlock_irq(mlx4_tlock(dev)); - - return err; -} - -static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn, - enum res_qp_states state, struct res_qp **qp, - int alloc) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; - struct res_qp *r; - int err = 0; - - spin_lock_irq(mlx4_tlock(dev)); - r = radix_tree_lookup(&tracker->res_tree[RES_QP], qpn); - if (!r) - err = -ENOENT; - else if (r->com.owner != slave) - err = -EPERM; - else { - switch (state) { - case RES_QP_BUSY: - mlx4_dbg(dev, "%s: failed RES_QP, 0x%x\n", - __func__, r->com.res_id); - err = -EBUSY; - break; - - case RES_QP_RESERVED: - if (r->com.state == RES_QP_MAPPED && !alloc) - break; - - mlx4_dbg(dev, "failed RES_QP, 0x%x\n", r->com.res_id); - err = -EINVAL; - break; - - case RES_QP_MAPPED: - if ((r->com.state == RES_QP_RESERVED && alloc) || - r->com.state == RES_QP_HW) - break; - else { - mlx4_dbg(dev, "failed RES_QP, 0x%x\n", - r->com.res_id); - err = -EINVAL; - } - - break; - - case RES_QP_HW: - if (r->com.state != RES_QP_MAPPED) - err = -EINVAL; - break; - default: - err = -EINVAL; - } - - if (!err) { - r->com.from_state = r->com.state; - r->com.to_state = state; - r->com.state = RES_QP_BUSY; - if (qp) - *qp = (struct res_qp *)r; - } - } - - spin_unlock_irq(mlx4_tlock(dev)); - - return err; -} - -static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index, - enum res_mpt_states state, struct res_mpt **mpt) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; - struct res_mpt *r; - int err = 0; - - spin_lock_irq(mlx4_tlock(dev)); - r = radix_tree_lookup(&tracker->res_tree[RES_MPT], index); - if (!r) - err = -ENOENT; - else if (r->com.owner != slave) - err = -EPERM; - else { - switch (state) { - case RES_MPT_BUSY: - err = -EINVAL; - break; - - case RES_MPT_RESERVED: - if (r->com.state != RES_MPT_MAPPED) - err = -EINVAL; - break; - - case RES_MPT_MAPPED: - if (r->com.state != RES_MPT_RESERVED && - r->com.state != RES_MPT_HW) - err = -EINVAL; - break; - - case RES_MPT_HW: - if (r->com.state != RES_MPT_MAPPED) - err = -EINVAL; - break; - default: - err = -EINVAL; - } - - if (!err) { - r->com.from_state = r->com.state; - r->com.to_state = state; - r->com.state = RES_MPT_BUSY; - if (mpt) - *mpt = (struct res_mpt *)r; - } - } - - spin_unlock_irq(mlx4_tlock(dev)); - - return err; -} - -static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index, - enum res_eq_states state, struct res_eq **eq) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; - struct res_eq *r; - int err = 0; - - spin_lock_irq(mlx4_tlock(dev)); - r = radix_tree_lookup(&tracker->res_tree[RES_EQ], index); - if (!r) - err = -ENOENT; - else if (r->com.owner != slave) - err = -EPERM; - else { - switch (state) { - case RES_EQ_BUSY: - err = -EINVAL; - break; - - case RES_EQ_RESERVED: - if (r->com.state != RES_EQ_HW) - err = -EINVAL; - break; - - case RES_EQ_HW: - if (r->com.state != RES_EQ_RESERVED) - err = -EINVAL; - break; - - default: - err = -EINVAL; - } - - if (!err) { - r->com.from_state = r->com.state; - r->com.to_state = state; - r->com.state = RES_EQ_BUSY; - if (eq) - *eq = r; - } - } - - spin_unlock_irq(mlx4_tlock(dev)); - - return err; -} - -static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn, - enum res_cq_states state, struct res_cq **cq) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; - struct res_cq *r; - int err; - - spin_lock_irq(mlx4_tlock(dev)); - r = radix_tree_lookup(&tracker->res_tree[RES_CQ], cqn); - if (!r) - err = -ENOENT; - else if (r->com.owner != slave) - err = -EPERM; - else { - switch (state) { - case RES_CQ_BUSY: - err = -EBUSY; - break; - - case RES_CQ_ALLOCATED: - if (r->com.state != RES_CQ_HW) - err = -EINVAL; - else if (atomic_read(&r->ref_count)) - err = -EBUSY; - else - err = 0; - break; - - case RES_CQ_HW: - if (r->com.state != RES_CQ_ALLOCATED) - err = -EINVAL; - else - err = 0; - break; - - default: - err = -EINVAL; - } - - if (!err) { - r->com.from_state = r->com.state; - r->com.to_state = state; - r->com.state = RES_CQ_BUSY; - if (cq) - *cq = r; - } - } - - spin_unlock_irq(mlx4_tlock(dev)); - - return err; -} - -static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index, - enum res_cq_states state, struct res_srq **srq) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; - struct res_srq *r; - int err = 0; - - spin_lock_irq(mlx4_tlock(dev)); - r = radix_tree_lookup(&tracker->res_tree[RES_SRQ], index); - if (!r) - err = -ENOENT; - else if (r->com.owner != slave) - err = -EPERM; - else { - switch (state) { - case RES_SRQ_BUSY: - err = -EINVAL; - break; - - case RES_SRQ_ALLOCATED: - if (r->com.state != RES_SRQ_HW) - err = -EINVAL; - else if (atomic_read(&r->ref_count)) - err = -EBUSY; - break; - - case RES_SRQ_HW: - if (r->com.state != RES_SRQ_ALLOCATED) - err = -EINVAL; - break; - - default: - err = -EINVAL; - } - - if (!err) { - r->com.from_state = r->com.state; - r->com.to_state = state; - r->com.state = RES_SRQ_BUSY; - if (srq) - *srq = r; - } - } - - spin_unlock_irq(mlx4_tlock(dev)); - - return err; -} - -static void res_abort_move(struct mlx4_dev *dev, int slave, - enum mlx4_resource type, int id) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; - struct res_common *r; - - spin_lock_irq(mlx4_tlock(dev)); - r = radix_tree_lookup(&tracker->res_tree[type], id); - if (r && (r->owner == slave)) - r->state = r->from_state; - spin_unlock_irq(mlx4_tlock(dev)); -} - -static void res_end_move(struct mlx4_dev *dev, int slave, - enum mlx4_resource type, int id) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; - struct res_common *r; - - spin_lock_irq(mlx4_tlock(dev)); - r = radix_tree_lookup(&tracker->res_tree[type], id); - if (r && (r->owner == slave)) - r->state = r->to_state; - spin_unlock_irq(mlx4_tlock(dev)); -} - -static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn) -{ - return mlx4_is_qp_reserved(dev, qpn); -} - -static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, - u64 in_param, u64 *out_param) -{ - int err; - int count; - int align; - int base; - int qpn; - - switch (op) { - case RES_OP_RESERVE: - count = get_param_l(&in_param); - align = get_param_h(&in_param); - err = __mlx4_qp_reserve_range(dev, count, align, &base); - if (err) - return err; - - err = add_res_range(dev, slave, base, count, RES_QP, 0); - if (err) { - __mlx4_qp_release_range(dev, base, count); - return err; - } - set_param_l(out_param, base); - break; - case RES_OP_MAP_ICM: - qpn = get_param_l(&in_param) & 0x7fffff; - if (valid_reserved(dev, slave, qpn)) { - err = add_res_range(dev, slave, qpn, 1, RES_QP, 0); - if (err) - return err; - } - - err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, - NULL, 1); - if (err) - return err; - - if (!valid_reserved(dev, slave, qpn)) { - err = __mlx4_qp_alloc_icm(dev, qpn); - if (err) { - res_abort_move(dev, slave, RES_QP, qpn); - return err; - } - } - - res_end_move(dev, slave, RES_QP, qpn); - break; - - default: - err = -EINVAL; - break; - } - return err; -} - -static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, - u64 in_param, u64 *out_param) -{ - int err = -EINVAL; - int base; - int order; - - if (op != RES_OP_RESERVE_AND_MAP) - return err; - - order = get_param_l(&in_param); - base = __mlx4_alloc_mtt_range(dev, order); - if (base == -1) - return -ENOMEM; - - err = add_res_range(dev, slave, base, 1, RES_MTT, order); - if (err) - __mlx4_free_mtt_range(dev, base, order); - else - set_param_l(out_param, base); - - return err; -} - -static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, - u64 in_param, u64 *out_param) -{ - int err = -EINVAL; - int index; - int id; - struct res_mpt *mpt; - - switch (op) { - case RES_OP_RESERVE: - index = __mlx4_mr_reserve(dev); - if (index == -1) - break; - id = index & mpt_mask(dev); - - err = add_res_range(dev, slave, id, 1, RES_MPT, index); - if (err) { - __mlx4_mr_release(dev, index); - break; - } - set_param_l(out_param, index); - break; - case RES_OP_MAP_ICM: - index = get_param_l(&in_param); - id = index & mpt_mask(dev); - err = mr_res_start_move_to(dev, slave, id, - RES_MPT_MAPPED, &mpt); - if (err) - return err; - - err = __mlx4_mr_alloc_icm(dev, mpt->key); - if (err) { - res_abort_move(dev, slave, RES_MPT, id); - return err; - } - - res_end_move(dev, slave, RES_MPT, id); - break; - } - return err; -} - -static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, - u64 in_param, u64 *out_param) -{ - int cqn; - int err; - - switch (op) { - case RES_OP_RESERVE_AND_MAP: - err = __mlx4_cq_alloc_icm(dev, &cqn); - if (err) - break; - - err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0); - if (err) { - __mlx4_cq_free_icm(dev, cqn); - break; - } - - set_param_l(out_param, cqn); - break; - - default: - err = -EINVAL; - } - - return err; -} - -static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, - u64 in_param, u64 *out_param) -{ - int srqn; - int err; - - switch (op) { - case RES_OP_RESERVE_AND_MAP: - err = __mlx4_srq_alloc_icm(dev, &srqn); - if (err) - break; - - err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0); - if (err) { - __mlx4_srq_free_icm(dev, srqn); - break; - } - - set_param_l(out_param, srqn); - break; - - default: - err = -EINVAL; - } - - return err; -} - -static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; - struct mac_res *res; - - res = kzalloc(sizeof *res, GFP_KERNEL); - if (!res) - return -ENOMEM; - res->mac = mac; - res->port = (u8) port; - list_add_tail(&res->list, - &tracker->slave_list[slave].res_list[RES_MAC]); - return 0; -} - -static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac, - int port) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; - struct list_head *mac_list = - &tracker->slave_list[slave].res_list[RES_MAC]; - struct mac_res *res, *tmp; - - list_for_each_entry_safe(res, tmp, mac_list, list) { - if (res->mac == mac && res->port == (u8) port) { - list_del(&res->list); - kfree(res); - break; - } - } -} - -static void rem_slave_macs(struct mlx4_dev *dev, int slave) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; - struct list_head *mac_list = - &tracker->slave_list[slave].res_list[RES_MAC]; - struct mac_res *res, *tmp; - - list_for_each_entry_safe(res, tmp, mac_list, list) { - list_del(&res->list); - __mlx4_unregister_mac(dev, res->port, res->mac); - kfree(res); - } -} - -static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, - u64 in_param, u64 *out_param) -{ - int err = -EINVAL; - int port; - u64 mac; - - if (op != RES_OP_RESERVE_AND_MAP) - return err; - - port = get_param_l(out_param); - mac = in_param; - - err = __mlx4_register_mac(dev, port, mac); - if (err >= 0) { - set_param_l(out_param, err); - err = 0; - } - - if (!err) { - err = mac_add_to_slave(dev, slave, mac, port); - if (err) - __mlx4_unregister_mac(dev, port, mac); - } - return err; -} - -static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, - u64 in_param, u64 *out_param) -{ - return 0; -} - -int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd) -{ - int err; - int alop = vhcr->op_modifier; - - switch (vhcr->in_modifier) { - case RES_QP: - err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop, - vhcr->in_param, &vhcr->out_param); - break; - - case RES_MTT: - err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop, - vhcr->in_param, &vhcr->out_param); - break; - - case RES_MPT: - err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop, - vhcr->in_param, &vhcr->out_param); - break; - - case RES_CQ: - err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop, - vhcr->in_param, &vhcr->out_param); - break; - - case RES_SRQ: - err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop, - vhcr->in_param, &vhcr->out_param); - break; - - case RES_MAC: - err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop, - vhcr->in_param, &vhcr->out_param); - break; - - case RES_VLAN: - err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop, - vhcr->in_param, &vhcr->out_param); - break; - - default: - err = -EINVAL; - break; - } - - return err; -} - -static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, - u64 in_param) -{ - int err; - int count; - int base; - int qpn; - - switch (op) { - case RES_OP_RESERVE: - base = get_param_l(&in_param) & 0x7fffff; - count = get_param_h(&in_param); - err = rem_res_range(dev, slave, base, count, RES_QP, 0); - if (err) - break; - __mlx4_qp_release_range(dev, base, count); - break; - case RES_OP_MAP_ICM: - qpn = get_param_l(&in_param) & 0x7fffff; - err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED, - NULL, 0); - if (err) - return err; - - if (!valid_reserved(dev, slave, qpn)) - __mlx4_qp_free_icm(dev, qpn); - - res_end_move(dev, slave, RES_QP, qpn); - - if (valid_reserved(dev, slave, qpn)) - err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0); - break; - default: - err = -EINVAL; - break; - } - return err; -} - -static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, - u64 in_param, u64 *out_param) -{ - int err = -EINVAL; - int base; - int order; - - if (op != RES_OP_RESERVE_AND_MAP) - return err; - - base = get_param_l(&in_param); - order = get_param_h(&in_param); - err = rem_res_range(dev, slave, base, 1, RES_MTT, order); - if (!err) - __mlx4_free_mtt_range(dev, base, order); - return err; -} - -static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, - u64 in_param) -{ - int err = -EINVAL; - int index; - int id; - struct res_mpt *mpt; - - switch (op) { - case RES_OP_RESERVE: - index = get_param_l(&in_param); - id = index & mpt_mask(dev); - err = get_res(dev, slave, id, RES_MPT, &mpt); - if (err) - break; - index = mpt->key; - put_res(dev, slave, id, RES_MPT); - - err = rem_res_range(dev, slave, id, 1, RES_MPT, 0); - if (err) - break; - __mlx4_mr_release(dev, index); - break; - case RES_OP_MAP_ICM: - index = get_param_l(&in_param); - id = index & mpt_mask(dev); - err = mr_res_start_move_to(dev, slave, id, - RES_MPT_RESERVED, &mpt); - if (err) - return err; - - __mlx4_mr_free_icm(dev, mpt->key); - res_end_move(dev, slave, RES_MPT, id); - return err; - break; - default: - err = -EINVAL; - break; - } - return err; -} - -static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, - u64 in_param, u64 *out_param) -{ - int cqn; - int err; - - switch (op) { - case RES_OP_RESERVE_AND_MAP: - cqn = get_param_l(&in_param); - err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0); - if (err) - break; - - __mlx4_cq_free_icm(dev, cqn); - break; - - default: - err = -EINVAL; - break; - } - - return err; -} - -static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, - u64 in_param, u64 *out_param) -{ - int srqn; - int err; - - switch (op) { - case RES_OP_RESERVE_AND_MAP: - srqn = get_param_l(&in_param); - err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0); - if (err) - break; - - __mlx4_srq_free_icm(dev, srqn); - break; - - default: - err = -EINVAL; - break; - } - - return err; -} - -static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, - u64 in_param, u64 *out_param) -{ - int port; - int err = 0; - - switch (op) { - case RES_OP_RESERVE_AND_MAP: - port = get_param_l(out_param); - mac_del_from_slave(dev, slave, in_param, port); - __mlx4_unregister_mac(dev, port, in_param); - break; - default: - err = -EINVAL; - break; - } - - return err; - -} - -static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, - u64 in_param, u64 *out_param) -{ - return 0; -} - -int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd) -{ - int err = -EINVAL; - int alop = vhcr->op_modifier; - - switch (vhcr->in_modifier) { - case RES_QP: - err = qp_free_res(dev, slave, vhcr->op_modifier, alop, - vhcr->in_param); - break; - - case RES_MTT: - err = mtt_free_res(dev, slave, vhcr->op_modifier, alop, - vhcr->in_param, &vhcr->out_param); - break; - - case RES_MPT: - err = mpt_free_res(dev, slave, vhcr->op_modifier, alop, - vhcr->in_param); - break; - - case RES_CQ: - err = cq_free_res(dev, slave, vhcr->op_modifier, alop, - vhcr->in_param, &vhcr->out_param); - break; - - case RES_SRQ: - err = srq_free_res(dev, slave, vhcr->op_modifier, alop, - vhcr->in_param, &vhcr->out_param); - break; - - case RES_MAC: - err = mac_free_res(dev, slave, vhcr->op_modifier, alop, - vhcr->in_param, &vhcr->out_param); - break; - - case RES_VLAN: - err = vlan_free_res(dev, slave, vhcr->op_modifier, alop, - vhcr->in_param, &vhcr->out_param); - break; - - default: - break; - } - return err; -} - -/* ugly but other choices are uglier */ -static int mr_phys_mpt(struct mlx4_mpt_entry *mpt) -{ - return (be32_to_cpu(mpt->flags) >> 9) & 1; -} - -static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt) -{ - return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8; -} - -static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt) -{ - return be32_to_cpu(mpt->mtt_sz); -} - -static int mr_get_pdn(struct mlx4_mpt_entry *mpt) -{ - return be32_to_cpu(mpt->pd_flags) & 0xffffff; -} - -static int qp_get_mtt_addr(struct mlx4_qp_context *qpc) -{ - return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8; -} - -static int srq_get_mtt_addr(struct mlx4_srq_context *srqc) -{ - return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8; -} - -static int qp_get_mtt_size(struct mlx4_qp_context *qpc) -{ - int page_shift = (qpc->log_page_size & 0x3f) + 12; - int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf; - int log_sq_sride = qpc->sq_size_stride & 7; - int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf; - int log_rq_stride = qpc->rq_size_stride & 7; - int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1; - int rss = (be32_to_cpu(qpc->flags) >> 13) & 1; - int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1; - int sq_size; - int rq_size; - int total_pages; - int total_mem; - int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f; - - sq_size = 1 << (log_sq_size + log_sq_sride + 4); - rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4)); - total_mem = sq_size + rq_size; - total_pages = - roundup_pow_of_two((total_mem + (page_offset << 6)) >> - page_shift); - - return total_pages; -} - -static int qp_get_pdn(struct mlx4_qp_context *qpc) -{ - return be32_to_cpu(qpc->pd) & 0xffffff; -} - -static int pdn2slave(int pdn) -{ - return (pdn >> NOT_MASKED_PD_BITS) - 1; -} - -static int check_mtt_range(struct mlx4_dev *dev, int slave, int start, - int size, struct res_mtt *mtt) -{ - int res_start = mtt->com.res_id; - int res_size = (1 << mtt->order); - - if (start < res_start || start + size > res_start + res_size) - return -EPERM; - return 0; -} - -int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd) -{ - int err; - int index = vhcr->in_modifier; - struct res_mtt *mtt; - struct res_mpt *mpt; - int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz; - int phys; - int id; - - id = index & mpt_mask(dev); - err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt); - if (err) - return err; - - phys = mr_phys_mpt(inbox->buf); - if (!phys) { - err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); - if (err) - goto ex_abort; - - err = check_mtt_range(dev, slave, mtt_base, - mr_get_mtt_size(inbox->buf), mtt); - if (err) - goto ex_put; - - mpt->mtt = mtt; - } - - if (pdn2slave(mr_get_pdn(inbox->buf)) != slave) { - err = -EPERM; - goto ex_put; - } - - err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); - if (err) - goto ex_put; - - if (!phys) { - atomic_inc(&mtt->ref_count); - put_res(dev, slave, mtt->com.res_id, RES_MTT); - } - - res_end_move(dev, slave, RES_MPT, id); - return 0; - -ex_put: - if (!phys) - put_res(dev, slave, mtt->com.res_id, RES_MTT); -ex_abort: - res_abort_move(dev, slave, RES_MPT, id); - - return err; -} - -int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd) -{ - int err; - int index = vhcr->in_modifier; - struct res_mpt *mpt; - int id; - - id = index & mpt_mask(dev); - err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt); - if (err) - return err; - - err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); - if (err) - goto ex_abort; - - if (mpt->mtt) - atomic_dec(&mpt->mtt->ref_count); - - res_end_move(dev, slave, RES_MPT, id); - return 0; - -ex_abort: - res_abort_move(dev, slave, RES_MPT, id); - - return err; -} - -int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd) -{ - int err; - int index = vhcr->in_modifier; - struct res_mpt *mpt; - int id; - - id = index & mpt_mask(dev); - err = get_res(dev, slave, id, RES_MPT, &mpt); - if (err) - return err; - - if (mpt->com.from_state != RES_MPT_HW) { - err = -EBUSY; - goto out; - } - - err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); - -out: - put_res(dev, slave, id, RES_MPT); - return err; -} - -static int qp_get_rcqn(struct mlx4_qp_context *qpc) -{ - return be32_to_cpu(qpc->cqn_recv) & 0xffffff; -} - -static int qp_get_scqn(struct mlx4_qp_context *qpc) -{ - return be32_to_cpu(qpc->cqn_send) & 0xffffff; -} - -static u32 qp_get_srqn(struct mlx4_qp_context *qpc) -{ - return be32_to_cpu(qpc->srqn) & 0x1ffffff; -} - -int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd) -{ - int err; - int qpn = vhcr->in_modifier & 0x7fffff; - struct res_mtt *mtt; - struct res_qp *qp; - struct mlx4_qp_context *qpc = inbox->buf + 8; - int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz; - int mtt_size = qp_get_mtt_size(qpc); - struct res_cq *rcq; - struct res_cq *scq; - int rcqn = qp_get_rcqn(qpc); - int scqn = qp_get_scqn(qpc); - u32 srqn = qp_get_srqn(qpc) & 0xffffff; - int use_srq = (qp_get_srqn(qpc) >> 24) & 1; - struct res_srq *srq; - int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff; - - err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0); - if (err) - return err; - qp->local_qpn = local_qpn; - - err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); - if (err) - goto ex_abort; - - err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt); - if (err) - goto ex_put_mtt; - - if (pdn2slave(qp_get_pdn(qpc)) != slave) { - err = -EPERM; - goto ex_put_mtt; - } - - err = get_res(dev, slave, rcqn, RES_CQ, &rcq); - if (err) - goto ex_put_mtt; - - if (scqn != rcqn) { - err = get_res(dev, slave, scqn, RES_CQ, &scq); - if (err) - goto ex_put_rcq; - } else - scq = rcq; - - if (use_srq) { - err = get_res(dev, slave, srqn, RES_SRQ, &srq); - if (err) - goto ex_put_scq; - } - - err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); - if (err) - goto ex_put_srq; - atomic_inc(&mtt->ref_count); - qp->mtt = mtt; - atomic_inc(&rcq->ref_count); - qp->rcq = rcq; - atomic_inc(&scq->ref_count); - qp->scq = scq; - - if (scqn != rcqn) - put_res(dev, slave, scqn, RES_CQ); - - if (use_srq) { - atomic_inc(&srq->ref_count); - put_res(dev, slave, srqn, RES_SRQ); - qp->srq = srq; - } - put_res(dev, slave, rcqn, RES_CQ); - put_res(dev, slave, mtt_base, RES_MTT); - res_end_move(dev, slave, RES_QP, qpn); - - return 0; - -ex_put_srq: - if (use_srq) - put_res(dev, slave, srqn, RES_SRQ); -ex_put_scq: - if (scqn != rcqn) - put_res(dev, slave, scqn, RES_CQ); -ex_put_rcq: - put_res(dev, slave, rcqn, RES_CQ); -ex_put_mtt: - put_res(dev, slave, mtt_base, RES_MTT); -ex_abort: - res_abort_move(dev, slave, RES_QP, qpn); - - return err; -} - -static int eq_get_mtt_addr(struct mlx4_eq_context *eqc) -{ - return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8; -} - -static int eq_get_mtt_size(struct mlx4_eq_context *eqc) -{ - int log_eq_size = eqc->log_eq_size & 0x1f; - int page_shift = (eqc->log_page_size & 0x3f) + 12; - - if (log_eq_size + 5 < page_shift) - return 1; - - return 1 << (log_eq_size + 5 - page_shift); -} - -static int cq_get_mtt_addr(struct mlx4_cq_context *cqc) -{ - return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8; -} - -static int cq_get_mtt_size(struct mlx4_cq_context *cqc) -{ - int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f; - int page_shift = (cqc->log_page_size & 0x3f) + 12; - - if (log_cq_size + 5 < page_shift) - return 1; - - return 1 << (log_cq_size + 5 - page_shift); -} - -int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd) -{ - int err; - int eqn = vhcr->in_modifier; - int res_id = (slave << 8) | eqn; - struct mlx4_eq_context *eqc = inbox->buf; - int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz; - int mtt_size = eq_get_mtt_size(eqc); - struct res_eq *eq; - struct res_mtt *mtt; - - err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0); - if (err) - return err; - err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq); - if (err) - goto out_add; - - err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); - if (err) - goto out_move; - - err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt); - if (err) - goto out_put; - - err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); - if (err) - goto out_put; - - atomic_inc(&mtt->ref_count); - eq->mtt = mtt; - put_res(dev, slave, mtt->com.res_id, RES_MTT); - res_end_move(dev, slave, RES_EQ, res_id); - return 0; - -out_put: - put_res(dev, slave, mtt->com.res_id, RES_MTT); -out_move: - res_abort_move(dev, slave, RES_EQ, res_id); -out_add: - rem_res_range(dev, slave, res_id, 1, RES_EQ, 0); - return err; -} - -static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start, - int len, struct res_mtt **res) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; - struct res_mtt *mtt; - int err = -EINVAL; - - spin_lock_irq(mlx4_tlock(dev)); - list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT], - com.list) { - if (!check_mtt_range(dev, slave, start, len, mtt)) { - *res = mtt; - mtt->com.from_state = mtt->com.state; - mtt->com.state = RES_MTT_BUSY; - err = 0; - break; - } - } - spin_unlock_irq(mlx4_tlock(dev)); - - return err; -} - -int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd) -{ - struct mlx4_mtt mtt; - __be64 *page_list = inbox->buf; - u64 *pg_list = (u64 *)page_list; - int i; - struct res_mtt *rmtt = NULL; - int start = be64_to_cpu(page_list[0]); - int npages = vhcr->in_modifier; - int err; - - err = get_containing_mtt(dev, slave, start, npages, &rmtt); - if (err) - return err; - - /* Call the SW implementation of write_mtt: - * - Prepare a dummy mtt struct - * - Translate inbox contents to simple addresses in host endianess */ - mtt.offset = 0; /* TBD this is broken but I don't handle it since - we don't really use it */ - mtt.order = 0; - mtt.page_shift = 0; - for (i = 0; i < npages; ++i) - pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL); - - err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages, - ((u64 *)page_list + 2)); - - if (rmtt) - put_res(dev, slave, rmtt->com.res_id, RES_MTT); - - return err; -} - -int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd) -{ - int eqn = vhcr->in_modifier; - int res_id = eqn | (slave << 8); - struct res_eq *eq; - int err; - - err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq); - if (err) - return err; - - err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL); - if (err) - goto ex_abort; - - err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); - if (err) - goto ex_put; - - atomic_dec(&eq->mtt->ref_count); - put_res(dev, slave, eq->mtt->com.res_id, RES_MTT); - res_end_move(dev, slave, RES_EQ, res_id); - rem_res_range(dev, slave, res_id, 1, RES_EQ, 0); - - return 0; - -ex_put: - put_res(dev, slave, eq->mtt->com.res_id, RES_MTT); -ex_abort: - res_abort_move(dev, slave, RES_EQ, res_id); - - return err; -} - -int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_slave_event_eq_info *event_eq; - struct mlx4_cmd_mailbox *mailbox; - u32 in_modifier = 0; - int err; - int res_id; - struct res_eq *req; - - if (!priv->mfunc.master.slave_state) - return -EINVAL; - - event_eq = &priv->mfunc.master.slave_state[slave].event_eq; - - /* Create the event only if the slave is registered */ - if ((event_eq->event_type & (1 << eqe->type)) == 0) - return 0; - - mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]); - res_id = (slave << 8) | event_eq->eqn; - err = get_res(dev, slave, res_id, RES_EQ, &req); - if (err) - goto unlock; - - if (req->com.from_state != RES_EQ_HW) { - err = -EINVAL; - goto put; - } - - mailbox = mlx4_alloc_cmd_mailbox(dev); - if (IS_ERR(mailbox)) { - err = PTR_ERR(mailbox); - goto put; - } - - if (eqe->type == MLX4_EVENT_TYPE_CMD) { - ++event_eq->token; - eqe->event.cmd.token = cpu_to_be16(event_eq->token); - } - - memcpy(mailbox->buf, (u8 *) eqe, 28); - - in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16); - - err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0, - MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B, - MLX4_CMD_NATIVE); - - put_res(dev, slave, res_id, RES_EQ); - mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]); - mlx4_free_cmd_mailbox(dev, mailbox); - return err; - -put: - put_res(dev, slave, res_id, RES_EQ); - -unlock: - mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]); - return err; -} - -int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd) -{ - int eqn = vhcr->in_modifier; - int res_id = eqn | (slave << 8); - struct res_eq *eq; - int err; - - err = get_res(dev, slave, res_id, RES_EQ, &eq); - if (err) - return err; - - if (eq->com.from_state != RES_EQ_HW) { - err = -EINVAL; - goto ex_put; - } - - err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); - -ex_put: - put_res(dev, slave, res_id, RES_EQ); - return err; -} - -int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd) -{ - int err; - int cqn = vhcr->in_modifier; - struct mlx4_cq_context *cqc = inbox->buf; - int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz; - struct res_cq *cq; - struct res_mtt *mtt; - - err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq); - if (err) - return err; - err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); - if (err) - goto out_move; - err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt); - if (err) - goto out_put; - err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); - if (err) - goto out_put; - atomic_inc(&mtt->ref_count); - cq->mtt = mtt; - put_res(dev, slave, mtt->com.res_id, RES_MTT); - res_end_move(dev, slave, RES_CQ, cqn); - return 0; - -out_put: - put_res(dev, slave, mtt->com.res_id, RES_MTT); -out_move: - res_abort_move(dev, slave, RES_CQ, cqn); - return err; -} - -int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd) -{ - int err; - int cqn = vhcr->in_modifier; - struct res_cq *cq; - - err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq); - if (err) - return err; - err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); - if (err) - goto out_move; - atomic_dec(&cq->mtt->ref_count); - res_end_move(dev, slave, RES_CQ, cqn); - return 0; - -out_move: - res_abort_move(dev, slave, RES_CQ, cqn); - return err; -} - -int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd) -{ - int cqn = vhcr->in_modifier; - struct res_cq *cq; - int err; - - err = get_res(dev, slave, cqn, RES_CQ, &cq); - if (err) - return err; - - if (cq->com.from_state != RES_CQ_HW) - goto ex_put; - - err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); -ex_put: - put_res(dev, slave, cqn, RES_CQ); - - return err; -} - -static int handle_resize(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd, - struct res_cq *cq) -{ - int err; - struct res_mtt *orig_mtt; - struct res_mtt *mtt; - struct mlx4_cq_context *cqc = inbox->buf; - int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz; - - err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt); - if (err) - return err; - - if (orig_mtt != cq->mtt) { - err = -EINVAL; - goto ex_put; - } - - err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); - if (err) - goto ex_put; - - err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt); - if (err) - goto ex_put1; - err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); - if (err) - goto ex_put1; - atomic_dec(&orig_mtt->ref_count); - put_res(dev, slave, orig_mtt->com.res_id, RES_MTT); - atomic_inc(&mtt->ref_count); - cq->mtt = mtt; - put_res(dev, slave, mtt->com.res_id, RES_MTT); - return 0; - -ex_put1: - put_res(dev, slave, mtt->com.res_id, RES_MTT); -ex_put: - put_res(dev, slave, orig_mtt->com.res_id, RES_MTT); - - return err; - -} - -int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd) -{ - int cqn = vhcr->in_modifier; - struct res_cq *cq; - int err; - - err = get_res(dev, slave, cqn, RES_CQ, &cq); - if (err) - return err; - - if (cq->com.from_state != RES_CQ_HW) - goto ex_put; - - if (vhcr->op_modifier == 0) { - err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq); - if (err) - goto ex_put; - } - - err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); -ex_put: - put_res(dev, slave, cqn, RES_CQ); - - return err; -} - -static int srq_get_pdn(struct mlx4_srq_context *srqc) -{ - return be32_to_cpu(srqc->pd) & 0xffffff; -} - -static int srq_get_mtt_size(struct mlx4_srq_context *srqc) -{ - int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf; - int log_rq_stride = srqc->logstride & 7; - int page_shift = (srqc->log_page_size & 0x3f) + 12; - - if (log_srq_size + log_rq_stride + 4 < page_shift) - return 1; - - return 1 << (log_srq_size + log_rq_stride + 4 - page_shift); -} - -int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd) -{ - int err; - int srqn = vhcr->in_modifier; - struct res_mtt *mtt; - struct res_srq *srq; - struct mlx4_srq_context *srqc = inbox->buf; - int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz; - - if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff)) - return -EINVAL; - - err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq); - if (err) - return err; - err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); - if (err) - goto ex_abort; - err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc), - mtt); - if (err) - goto ex_put_mtt; - - if (pdn2slave(srq_get_pdn(srqc)) != slave) { - err = -EPERM; - goto ex_put_mtt; - } - - err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); - if (err) - goto ex_put_mtt; - - atomic_inc(&mtt->ref_count); - srq->mtt = mtt; - put_res(dev, slave, mtt->com.res_id, RES_MTT); - res_end_move(dev, slave, RES_SRQ, srqn); - return 0; - -ex_put_mtt: - put_res(dev, slave, mtt->com.res_id, RES_MTT); -ex_abort: - res_abort_move(dev, slave, RES_SRQ, srqn); - - return err; -} - -int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd) -{ - int err; - int srqn = vhcr->in_modifier; - struct res_srq *srq; - - err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq); - if (err) - return err; - err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); - if (err) - goto ex_abort; - atomic_dec(&srq->mtt->ref_count); - if (srq->cq) - atomic_dec(&srq->cq->ref_count); - res_end_move(dev, slave, RES_SRQ, srqn); - - return 0; - -ex_abort: - res_abort_move(dev, slave, RES_SRQ, srqn); - - return err; -} - -int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd) -{ - int err; - int srqn = vhcr->in_modifier; - struct res_srq *srq; - - err = get_res(dev, slave, srqn, RES_SRQ, &srq); - if (err) - return err; - if (srq->com.from_state != RES_SRQ_HW) { - err = -EBUSY; - goto out; - } - err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); -out: - put_res(dev, slave, srqn, RES_SRQ); - return err; -} - -int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd) -{ - int err; - int srqn = vhcr->in_modifier; - struct res_srq *srq; - - err = get_res(dev, slave, srqn, RES_SRQ, &srq); - if (err) - return err; - - if (srq->com.from_state != RES_SRQ_HW) { - err = -EBUSY; - goto out; - } - - err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); -out: - put_res(dev, slave, srqn, RES_SRQ); - return err; -} - -int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd) -{ - int err; - int qpn = vhcr->in_modifier & 0x7fffff; - struct res_qp *qp; - - err = get_res(dev, slave, qpn, RES_QP, &qp); - if (err) - return err; - if (qp->com.from_state != RES_QP_HW) { - err = -EBUSY; - goto out; - } - - err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); -out: - put_res(dev, slave, qpn, RES_QP); - return err; -} - -int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd) -{ - struct mlx4_qp_context *qpc = inbox->buf + 8; - - update_ud_gid(dev, qpc, (u8)slave); - - return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); -} - -int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd) -{ - int err; - int qpn = vhcr->in_modifier & 0x7fffff; - struct res_qp *qp; - - err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0); - if (err) - return err; - err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); - if (err) - goto ex_abort; - - atomic_dec(&qp->mtt->ref_count); - atomic_dec(&qp->rcq->ref_count); - atomic_dec(&qp->scq->ref_count); - if (qp->srq) - atomic_dec(&qp->srq->ref_count); - res_end_move(dev, slave, RES_QP, qpn); - return 0; - -ex_abort: - res_abort_move(dev, slave, RES_QP, qpn); - - return err; -} - -static struct res_gid *find_gid(struct mlx4_dev *dev, int slave, - struct res_qp *rqp, u8 *gid) -{ - struct res_gid *res; - - list_for_each_entry(res, &rqp->mcg_list, list) { - if (!memcmp(res->gid, gid, 16)) - return res; - } - return NULL; -} - -static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp, - u8 *gid, enum mlx4_protocol prot) -{ - struct res_gid *res; - int err; - - res = kzalloc(sizeof *res, GFP_KERNEL); - if (!res) - return -ENOMEM; - - spin_lock_irq(&rqp->mcg_spl); - if (find_gid(dev, slave, rqp, gid)) { - kfree(res); - err = -EEXIST; - } else { - memcpy(res->gid, gid, 16); - res->prot = prot; - list_add_tail(&res->list, &rqp->mcg_list); - err = 0; - } - spin_unlock_irq(&rqp->mcg_spl); - - return err; -} - -static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp, - u8 *gid, enum mlx4_protocol prot) -{ - struct res_gid *res; - int err; - - spin_lock_irq(&rqp->mcg_spl); - res = find_gid(dev, slave, rqp, gid); - if (!res || res->prot != prot) - err = -EINVAL; - else { - list_del(&res->list); - kfree(res); - err = 0; - } - spin_unlock_irq(&rqp->mcg_spl); - - return err; -} - -int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd) -{ - struct mlx4_qp qp; /* dummy for calling attach/detach */ - u8 *gid = inbox->buf; - enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7; - int err, err1; - int qpn; - struct res_qp *rqp; - int attach = vhcr->op_modifier; - int block_loopback = vhcr->in_modifier >> 31; - u8 steer_type_mask = 2; - enum mlx4_steer_type type = gid[7] & steer_type_mask; - - qpn = vhcr->in_modifier & 0xffffff; - err = get_res(dev, slave, qpn, RES_QP, &rqp); - if (err) - return err; - - qp.qpn = qpn; - if (attach) { - err = add_mcg_res(dev, slave, rqp, gid, prot); - if (err) - goto ex_put; - - err = mlx4_qp_attach_common(dev, &qp, gid, - block_loopback, prot, type); - if (err) - goto ex_rem; - } else { - err = rem_mcg_res(dev, slave, rqp, gid, prot); - if (err) - goto ex_put; - err = mlx4_qp_detach_common(dev, &qp, gid, prot, type); - } - - put_res(dev, slave, qpn, RES_QP); - return 0; - -ex_rem: - /* ignore error return below, already in error */ - err1 = rem_mcg_res(dev, slave, rqp, gid, prot); -ex_put: - put_res(dev, slave, qpn, RES_QP); - - return err; -} - -enum { - BUSY_MAX_RETRIES = 10 -}; - -int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd) -{ - int err; - int index = vhcr->in_modifier & 0xffff; - - err = get_res(dev, slave, index, RES_COUNTER, NULL); - if (err) - return err; - - err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); - put_res(dev, slave, index, RES_COUNTER); - return err; -} - -static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp) -{ - struct res_gid *rgid; - struct res_gid *tmp; - int err; - struct mlx4_qp qp; /* dummy for calling attach/detach */ - - list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) { - qp.qpn = rqp->local_qpn; - err = mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot, - MLX4_MC_STEER); - list_del(&rgid->list); - kfree(rgid); - } -} - -static int _move_all_busy(struct mlx4_dev *dev, int slave, - enum mlx4_resource type, int print) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_resource_tracker *tracker = - &priv->mfunc.master.res_tracker; - struct list_head *rlist = &tracker->slave_list[slave].res_list[type]; - struct res_common *r; - struct res_common *tmp; - int busy; - - busy = 0; - spin_lock_irq(mlx4_tlock(dev)); - list_for_each_entry_safe(r, tmp, rlist, list) { - if (r->owner == slave) { - if (!r->removing) { - if (r->state == RES_ANY_BUSY) { - if (print) - mlx4_dbg(dev, - "%s id 0x%x is busy\n", - ResourceType(type), - r->res_id); - ++busy; - } else { - r->from_state = r->state; - r->state = RES_ANY_BUSY; - r->removing = 1; - } - } - } - } - spin_unlock_irq(mlx4_tlock(dev)); - - return busy; -} - -static int move_all_busy(struct mlx4_dev *dev, int slave, - enum mlx4_resource type) -{ - unsigned long begin; - int busy; - - begin = jiffies; - do { - busy = _move_all_busy(dev, slave, type, 0); - if (time_after(jiffies, begin + 5 * HZ)) - break; - if (busy) - cond_resched(); - } while (busy); - - if (busy) - busy = _move_all_busy(dev, slave, type, 1); - - return busy; -} -static void rem_slave_qps(struct mlx4_dev *dev, int slave) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; - struct list_head *qp_list = - &tracker->slave_list[slave].res_list[RES_QP]; - struct res_qp *qp; - struct res_qp *tmp; - int state; - u64 in_param; - int qpn; - int err; - - err = move_all_busy(dev, slave, RES_QP); - if (err) - mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy" - "for slave %d\n", slave); - - spin_lock_irq(mlx4_tlock(dev)); - list_for_each_entry_safe(qp, tmp, qp_list, com.list) { - spin_unlock_irq(mlx4_tlock(dev)); - if (qp->com.owner == slave) { - qpn = qp->com.res_id; - detach_qp(dev, slave, qp); - state = qp->com.from_state; - while (state != 0) { - switch (state) { - case RES_QP_RESERVED: - spin_lock_irq(mlx4_tlock(dev)); - radix_tree_delete(&tracker->res_tree[RES_QP], - qp->com.res_id); - list_del(&qp->com.list); - spin_unlock_irq(mlx4_tlock(dev)); - kfree(qp); - state = 0; - break; - case RES_QP_MAPPED: - if (!valid_reserved(dev, slave, qpn)) - __mlx4_qp_free_icm(dev, qpn); - state = RES_QP_RESERVED; - break; - case RES_QP_HW: - in_param = slave; - err = mlx4_cmd(dev, in_param, - qp->local_qpn, 2, - MLX4_CMD_2RST_QP, - MLX4_CMD_TIME_CLASS_A, - MLX4_CMD_NATIVE); - if (err) - mlx4_dbg(dev, "rem_slave_qps: failed" - " to move slave %d qpn %d to" - " reset\n", slave, - qp->local_qpn); - atomic_dec(&qp->rcq->ref_count); - atomic_dec(&qp->scq->ref_count); - atomic_dec(&qp->mtt->ref_count); - if (qp->srq) - atomic_dec(&qp->srq->ref_count); - state = RES_QP_MAPPED; - break; - default: - state = 0; - } - } - } - spin_lock_irq(mlx4_tlock(dev)); - } - spin_unlock_irq(mlx4_tlock(dev)); -} - -static void rem_slave_srqs(struct mlx4_dev *dev, int slave) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; - struct list_head *srq_list = - &tracker->slave_list[slave].res_list[RES_SRQ]; - struct res_srq *srq; - struct res_srq *tmp; - int state; - u64 in_param; - LIST_HEAD(tlist); - int srqn; - int err; - - err = move_all_busy(dev, slave, RES_SRQ); - if (err) - mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to " - "busy for slave %d\n", slave); - - spin_lock_irq(mlx4_tlock(dev)); - list_for_each_entry_safe(srq, tmp, srq_list, com.list) { - spin_unlock_irq(mlx4_tlock(dev)); - if (srq->com.owner == slave) { - srqn = srq->com.res_id; - state = srq->com.from_state; - while (state != 0) { - switch (state) { - case RES_SRQ_ALLOCATED: - __mlx4_srq_free_icm(dev, srqn); - spin_lock_irq(mlx4_tlock(dev)); - radix_tree_delete(&tracker->res_tree[RES_SRQ], - srqn); - list_del(&srq->com.list); - spin_unlock_irq(mlx4_tlock(dev)); - kfree(srq); - state = 0; - break; - - case RES_SRQ_HW: - in_param = slave; - err = mlx4_cmd(dev, in_param, srqn, 1, - MLX4_CMD_HW2SW_SRQ, - MLX4_CMD_TIME_CLASS_A, - MLX4_CMD_NATIVE); - if (err) - mlx4_dbg(dev, "rem_slave_srqs: failed" - " to move slave %d srq %d to" - " SW ownership\n", - slave, srqn); - - atomic_dec(&srq->mtt->ref_count); - if (srq->cq) - atomic_dec(&srq->cq->ref_count); - state = RES_SRQ_ALLOCATED; - break; - - default: - state = 0; - } - } - } - spin_lock_irq(mlx4_tlock(dev)); - } - spin_unlock_irq(mlx4_tlock(dev)); -} - -static void rem_slave_cqs(struct mlx4_dev *dev, int slave) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; - struct list_head *cq_list = - &tracker->slave_list[slave].res_list[RES_CQ]; - struct res_cq *cq; - struct res_cq *tmp; - int state; - u64 in_param; - LIST_HEAD(tlist); - int cqn; - int err; - - err = move_all_busy(dev, slave, RES_CQ); - if (err) - mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to " - "busy for slave %d\n", slave); - - spin_lock_irq(mlx4_tlock(dev)); - list_for_each_entry_safe(cq, tmp, cq_list, com.list) { - spin_unlock_irq(mlx4_tlock(dev)); - if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) { - cqn = cq->com.res_id; - state = cq->com.from_state; - while (state != 0) { - switch (state) { - case RES_CQ_ALLOCATED: - __mlx4_cq_free_icm(dev, cqn); - spin_lock_irq(mlx4_tlock(dev)); - radix_tree_delete(&tracker->res_tree[RES_CQ], - cqn); - list_del(&cq->com.list); - spin_unlock_irq(mlx4_tlock(dev)); - kfree(cq); - state = 0; - break; - - case RES_CQ_HW: - in_param = slave; - err = mlx4_cmd(dev, in_param, cqn, 1, - MLX4_CMD_HW2SW_CQ, - MLX4_CMD_TIME_CLASS_A, - MLX4_CMD_NATIVE); - if (err) - mlx4_dbg(dev, "rem_slave_cqs: failed" - " to move slave %d cq %d to" - " SW ownership\n", - slave, cqn); - atomic_dec(&cq->mtt->ref_count); - state = RES_CQ_ALLOCATED; - break; - - default: - state = 0; - } - } - } - spin_lock_irq(mlx4_tlock(dev)); - } - spin_unlock_irq(mlx4_tlock(dev)); -} - -static void rem_slave_mrs(struct mlx4_dev *dev, int slave) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; - struct list_head *mpt_list = - &tracker->slave_list[slave].res_list[RES_MPT]; - struct res_mpt *mpt; - struct res_mpt *tmp; - int state; - u64 in_param; - LIST_HEAD(tlist); - int mptn; - int err; - - err = move_all_busy(dev, slave, RES_MPT); - if (err) - mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to " - "busy for slave %d\n", slave); - - spin_lock_irq(mlx4_tlock(dev)); - list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) { - spin_unlock_irq(mlx4_tlock(dev)); - if (mpt->com.owner == slave) { - mptn = mpt->com.res_id; - state = mpt->com.from_state; - while (state != 0) { - switch (state) { - case RES_MPT_RESERVED: - __mlx4_mr_release(dev, mpt->key); - spin_lock_irq(mlx4_tlock(dev)); - radix_tree_delete(&tracker->res_tree[RES_MPT], - mptn); - list_del(&mpt->com.list); - spin_unlock_irq(mlx4_tlock(dev)); - kfree(mpt); - state = 0; - break; - - case RES_MPT_MAPPED: - __mlx4_mr_free_icm(dev, mpt->key); - state = RES_MPT_RESERVED; - break; - - case RES_MPT_HW: - in_param = slave; - err = mlx4_cmd(dev, in_param, mptn, 0, - MLX4_CMD_HW2SW_MPT, - MLX4_CMD_TIME_CLASS_A, - MLX4_CMD_NATIVE); - if (err) - mlx4_dbg(dev, "rem_slave_mrs: failed" - " to move slave %d mpt %d to" - " SW ownership\n", - slave, mptn); - if (mpt->mtt) - atomic_dec(&mpt->mtt->ref_count); - state = RES_MPT_MAPPED; - break; - default: - state = 0; - } - } - } - spin_lock_irq(mlx4_tlock(dev)); - } - spin_unlock_irq(mlx4_tlock(dev)); -} - -static void rem_slave_mtts(struct mlx4_dev *dev, int slave) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_resource_tracker *tracker = - &priv->mfunc.master.res_tracker; - struct list_head *mtt_list = - &tracker->slave_list[slave].res_list[RES_MTT]; - struct res_mtt *mtt; - struct res_mtt *tmp; - int state; - LIST_HEAD(tlist); - int base; - int err; - - err = move_all_busy(dev, slave, RES_MTT); - if (err) - mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to " - "busy for slave %d\n", slave); - - spin_lock_irq(mlx4_tlock(dev)); - list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) { - spin_unlock_irq(mlx4_tlock(dev)); - if (mtt->com.owner == slave) { - base = mtt->com.res_id; - state = mtt->com.from_state; - while (state != 0) { - switch (state) { - case RES_MTT_ALLOCATED: - __mlx4_free_mtt_range(dev, base, - mtt->order); - spin_lock_irq(mlx4_tlock(dev)); - radix_tree_delete(&tracker->res_tree[RES_MTT], - base); - list_del(&mtt->com.list); - spin_unlock_irq(mlx4_tlock(dev)); - kfree(mtt); - state = 0; - break; - - default: - state = 0; - } - } - } - spin_lock_irq(mlx4_tlock(dev)); - } - spin_unlock_irq(mlx4_tlock(dev)); -} - -static void rem_slave_eqs(struct mlx4_dev *dev, int slave) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; - struct list_head *eq_list = - &tracker->slave_list[slave].res_list[RES_EQ]; - struct res_eq *eq; - struct res_eq *tmp; - int err; - int state; - LIST_HEAD(tlist); - int eqn; - struct mlx4_cmd_mailbox *mailbox; - - err = move_all_busy(dev, slave, RES_EQ); - if (err) - mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to " - "busy for slave %d\n", slave); - - spin_lock_irq(mlx4_tlock(dev)); - list_for_each_entry_safe(eq, tmp, eq_list, com.list) { - spin_unlock_irq(mlx4_tlock(dev)); - if (eq->com.owner == slave) { - eqn = eq->com.res_id; - state = eq->com.from_state; - while (state != 0) { - switch (state) { - case RES_EQ_RESERVED: - spin_lock_irq(mlx4_tlock(dev)); - radix_tree_delete(&tracker->res_tree[RES_EQ], - eqn); - list_del(&eq->com.list); - spin_unlock_irq(mlx4_tlock(dev)); - kfree(eq); - state = 0; - break; - - case RES_EQ_HW: - mailbox = mlx4_alloc_cmd_mailbox(dev); - if (IS_ERR(mailbox)) { - cond_resched(); - continue; - } - err = mlx4_cmd_box(dev, slave, 0, - eqn & 0xff, 0, - MLX4_CMD_HW2SW_EQ, - MLX4_CMD_TIME_CLASS_A, - MLX4_CMD_NATIVE); - mlx4_dbg(dev, "rem_slave_eqs: failed" - " to move slave %d eqs %d to" - " SW ownership\n", slave, eqn); - mlx4_free_cmd_mailbox(dev, mailbox); - if (!err) { - atomic_dec(&eq->mtt->ref_count); - state = RES_EQ_RESERVED; - } - break; - - default: - state = 0; - } - } - } - spin_lock_irq(mlx4_tlock(dev)); - } - spin_unlock_irq(mlx4_tlock(dev)); -} - -void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - - mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); - /*VLAN*/ - rem_slave_macs(dev, slave); - rem_slave_qps(dev, slave); - rem_slave_srqs(dev, slave); - rem_slave_cqs(dev, slave); - rem_slave_mrs(dev, slave); - rem_slave_eqs(dev, slave); - rem_slave_mtts(dev, slave); - mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); -} diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/sense.c b/trunk/drivers/net/ethernet/mellanox/mlx4/sense.c index 802498293528..e2337a7411d9 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/sense.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/sense.c @@ -45,8 +45,7 @@ int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port, int err = 0; err = mlx4_cmd_imm(dev, 0, &out_param, port, 0, - MLX4_CMD_SENSE_PORT, MLX4_CMD_TIME_CLASS_B, - MLX4_CMD_WRAPPED); + MLX4_CMD_SENSE_PORT, MLX4_CMD_TIME_CLASS_B); if (err) { mlx4_err(dev, "Sense command failed for port: %d\n", port); return err; diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/srq.c b/trunk/drivers/net/ethernet/mellanox/mlx4/srq.c index 2823fffc6383..9cbf3fce0145 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/srq.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/srq.c @@ -31,8 +31,6 @@ * SOFTWARE. */ -#include - #include #include #include @@ -40,6 +38,26 @@ #include "mlx4.h" #include "icm.h" +struct mlx4_srq_context { + __be32 state_logsize_srqn; + u8 logstride; + u8 reserved1; + __be16 xrcd; + __be32 pg_offset_cqn; + u32 reserved2; + u8 log_page_size; + u8 reserved3[2]; + u8 mtt_base_addr_h; + __be32 mtt_base_addr_l; + __be32 pd; + __be16 limit_watermark; + __be16 wqe_cnt; + u16 reserved4; + __be16 wqe_counter; + u32 reserved5; + __be64 db_rec_addr; +}; + void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type) { struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; @@ -67,9 +85,8 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type) static int mlx4_SW2HW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, int srq_num) { - return mlx4_cmd(dev, mailbox->dma | dev->caps.function, srq_num, 0, - MLX4_CMD_SW2HW_SRQ, MLX4_CMD_TIME_CLASS_A, - MLX4_CMD_WRAPPED); + return mlx4_cmd(dev, mailbox->dma, srq_num, 0, MLX4_CMD_SW2HW_SRQ, + MLX4_CMD_TIME_CLASS_A); } static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, @@ -77,109 +94,48 @@ static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox { return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, srq_num, mailbox ? 0 : 1, MLX4_CMD_HW2SW_SRQ, - MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + MLX4_CMD_TIME_CLASS_A); } static int mlx4_ARM_SRQ(struct mlx4_dev *dev, int srq_num, int limit_watermark) { return mlx4_cmd(dev, limit_watermark, srq_num, 0, MLX4_CMD_ARM_SRQ, - MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); + MLX4_CMD_TIME_CLASS_B); } static int mlx4_QUERY_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, int srq_num) { return mlx4_cmd_box(dev, 0, mailbox->dma, srq_num, 0, MLX4_CMD_QUERY_SRQ, - MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + MLX4_CMD_TIME_CLASS_A); } -int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn) +int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd, + struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq) { struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_srq_context *srq_context; + u64 mtt_addr; int err; - - *srqn = mlx4_bitmap_alloc(&srq_table->bitmap); - if (*srqn == -1) + srq->srqn = mlx4_bitmap_alloc(&srq_table->bitmap); + if (srq->srqn == -1) return -ENOMEM; - err = mlx4_table_get(dev, &srq_table->table, *srqn); + err = mlx4_table_get(dev, &srq_table->table, srq->srqn); if (err) goto err_out; - err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn); + err = mlx4_table_get(dev, &srq_table->cmpt_table, srq->srqn); if (err) goto err_put; - return 0; - -err_put: - mlx4_table_put(dev, &srq_table->table, *srqn); - -err_out: - mlx4_bitmap_free(&srq_table->bitmap, *srqn); - return err; -} - -static int mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn) -{ - u64 out_param; - int err; - - if (mlx4_is_mfunc(dev)) { - err = mlx4_cmd_imm(dev, 0, &out_param, RES_SRQ, - RES_OP_RESERVE_AND_MAP, - MLX4_CMD_ALLOC_RES, - MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); - if (!err) - *srqn = get_param_l(&out_param); - - return err; - } - return __mlx4_srq_alloc_icm(dev, srqn); -} - -void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn) -{ - struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; - - mlx4_table_put(dev, &srq_table->cmpt_table, srqn); - mlx4_table_put(dev, &srq_table->table, srqn); - mlx4_bitmap_free(&srq_table->bitmap, srqn); -} - -static void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn) -{ - u64 in_param; - - if (mlx4_is_mfunc(dev)) { - set_param_l(&in_param, srqn); - if (mlx4_cmd(dev, in_param, RES_SRQ, RES_OP_RESERVE_AND_MAP, - MLX4_CMD_FREE_RES, - MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED)) - mlx4_warn(dev, "Failed freeing cq:%d\n", srqn); - return; - } - __mlx4_srq_free_icm(dev, srqn); -} - -int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd, - struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq) -{ - struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; - struct mlx4_cmd_mailbox *mailbox; - struct mlx4_srq_context *srq_context; - u64 mtt_addr; - int err; - - err = mlx4_srq_alloc_icm(dev, &srq->srqn); - if (err) - return err; spin_lock_irq(&srq_table->lock); err = radix_tree_insert(&srq_table->tree, srq->srqn, srq); spin_unlock_irq(&srq_table->lock); if (err) - goto err_icm; + goto err_cmpt_put; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) { @@ -218,8 +174,15 @@ int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd, radix_tree_delete(&srq_table->tree, srq->srqn); spin_unlock_irq(&srq_table->lock); -err_icm: - mlx4_srq_free_icm(dev, srq->srqn); +err_cmpt_put: + mlx4_table_put(dev, &srq_table->cmpt_table, srq->srqn); + +err_put: + mlx4_table_put(dev, &srq_table->table, srq->srqn); + +err_out: + mlx4_bitmap_free(&srq_table->bitmap, srq->srqn); + return err; } EXPORT_SYMBOL_GPL(mlx4_srq_alloc); @@ -241,7 +204,8 @@ void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq) complete(&srq->free); wait_for_completion(&srq->free); - mlx4_srq_free_icm(dev, srq->srqn); + mlx4_table_put(dev, &srq_table->table, srq->srqn); + mlx4_bitmap_free(&srq_table->bitmap, srq->srqn); } EXPORT_SYMBOL_GPL(mlx4_srq_free); @@ -281,8 +245,6 @@ int mlx4_init_srq_table(struct mlx4_dev *dev) spin_lock_init(&srq_table->lock); INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC); - if (mlx4_is_slave(dev)) - return 0; err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs, dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0); @@ -294,7 +256,5 @@ int mlx4_init_srq_table(struct mlx4_dev *dev) void mlx4_cleanup_srq_table(struct mlx4_dev *dev) { - if (mlx4_is_slave(dev)) - return; mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap); } diff --git a/trunk/drivers/net/ethernet/micrel/Kconfig b/trunk/drivers/net/ethernet/micrel/Kconfig index 1ea811cf515b..d10c2e15f4ed 100644 --- a/trunk/drivers/net/ethernet/micrel/Kconfig +++ b/trunk/drivers/net/ethernet/micrel/Kconfig @@ -42,8 +42,6 @@ config KS8851 select NET_CORE select MII select CRC32 - select MISC_DEVICES - select EEPROM_93CX6 ---help--- SPI driver for Micrel KS8851 SPI attached network chip. diff --git a/trunk/drivers/net/ethernet/micrel/ks8842.c b/trunk/drivers/net/ethernet/micrel/ks8842.c index 75ec87a822b8..4a6ae057e3b1 100644 --- a/trunk/drivers/net/ethernet/micrel/ks8842.c +++ b/trunk/drivers/net/ethernet/micrel/ks8842.c @@ -1264,7 +1264,18 @@ static struct platform_driver ks8842_platform_driver = { .remove = ks8842_remove, }; -module_platform_driver(ks8842_platform_driver); +static int __init ks8842_init(void) +{ + return platform_driver_register(&ks8842_platform_driver); +} + +static void __exit ks8842_exit(void) +{ + platform_driver_unregister(&ks8842_platform_driver); +} + +module_init(ks8842_init); +module_exit(ks8842_exit); MODULE_DESCRIPTION("Timberdale KS8842 ethernet driver"); MODULE_AUTHOR("Mocean Laboratories "); diff --git a/trunk/drivers/net/ethernet/micrel/ks8851.c b/trunk/drivers/net/ethernet/micrel/ks8851.c index 6b35e7da9a9c..f56743a28fc0 100644 --- a/trunk/drivers/net/ethernet/micrel/ks8851.c +++ b/trunk/drivers/net/ethernet/micrel/ks8851.c @@ -22,7 +22,6 @@ #include #include #include -#include #include @@ -83,7 +82,6 @@ union ks8851_tx_hdr { * @rc_ccr: Cached copy of KS_CCR. * @rc_rxqcr: Cached copy of KS_RXQCR. * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom - * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM. * * The @lock ensures that the chip is protected when certain operations are * in progress. When the read or write packet transfer is in progress, most @@ -130,8 +128,6 @@ struct ks8851_net { struct spi_message spi_msg2; struct spi_transfer spi_xfer1; struct spi_transfer spi_xfer2[2]; - - struct eeprom_93cx6 eeprom; }; static int msg_enable; @@ -346,26 +342,6 @@ static void ks8851_soft_reset(struct ks8851_net *ks, unsigned op) mdelay(1); /* wait for condition to clear */ } -/** - * ks8851_set_powermode - set power mode of the device - * @ks: The device state - * @pwrmode: The power mode value to write to KS_PMECR. - * - * Change the power mode of the chip. - */ -static void ks8851_set_powermode(struct ks8851_net *ks, unsigned pwrmode) -{ - unsigned pmecr; - - netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode); - - pmecr = ks8851_rdreg16(ks, KS_PMECR); - pmecr &= ~PMECR_PM_MASK; - pmecr |= pwrmode; - - ks8851_wrreg16(ks, KS_PMECR, pmecr); -} - /** * ks8851_write_mac_addr - write mac address to device registers * @dev: The network device @@ -382,63 +358,30 @@ static int ks8851_write_mac_addr(struct net_device *dev) mutex_lock(&ks->lock); - /* - * Wake up chip in case it was powered off when stopped; otherwise, - * the first write to the MAC address does not take effect. - */ - ks8851_set_powermode(ks, PMECR_PM_NORMAL); for (i = 0; i < ETH_ALEN; i++) ks8851_wrreg8(ks, KS_MAR(i), dev->dev_addr[i]); - if (!netif_running(dev)) - ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN); mutex_unlock(&ks->lock); return 0; } -/** - * ks8851_read_mac_addr - read mac address from device registers - * @dev: The network device - * - * Update our copy of the KS8851 MAC address from the registers of @dev. -*/ -static void ks8851_read_mac_addr(struct net_device *dev) -{ - struct ks8851_net *ks = netdev_priv(dev); - int i; - - mutex_lock(&ks->lock); - - for (i = 0; i < ETH_ALEN; i++) - dev->dev_addr[i] = ks8851_rdreg8(ks, KS_MAR(i)); - - mutex_unlock(&ks->lock); -} - /** * ks8851_init_mac - initialise the mac address * @ks: The device structure * * Get or create the initial mac address for the device and then set that - * into the station address register. If there is an EEPROM present, then - * we try that. If no valid mac address is found we use random_ether_addr() + * into the station address register. Currently we assume that the device + * does not have a valid mac address in it, and so we use random_ether_addr() * to create a new one. + * + * In future, the driver should check to see if the device has an EEPROM + * attached and whether that has a valid ethernet address in it. */ static void ks8851_init_mac(struct ks8851_net *ks) { struct net_device *dev = ks->netdev; - /* first, try reading what we've got already */ - if (ks->rc_ccr & CCR_EEPROM) { - ks8851_read_mac_addr(dev); - if (is_valid_ether_addr(dev->dev_addr)) - return; - - netdev_err(ks->netdev, "invalid mac address read %pM\n", - dev->dev_addr); - } - random_ether_addr(dev->dev_addr); ks8851_write_mac_addr(dev); } @@ -795,6 +738,26 @@ static void ks8851_tx_work(struct work_struct *work) mutex_unlock(&ks->lock); } +/** + * ks8851_set_powermode - set power mode of the device + * @ks: The device state + * @pwrmode: The power mode value to write to KS_PMECR. + * + * Change the power mode of the chip. + */ +static void ks8851_set_powermode(struct ks8851_net *ks, unsigned pwrmode) +{ + unsigned pmecr; + + netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode); + + pmecr = ks8851_rdreg16(ks, KS_PMECR); + pmecr &= ~PMECR_PM_MASK; + pmecr |= pwrmode; + + ks8851_wrreg16(ks, KS_PMECR, pmecr); +} + /** * ks8851_net_open - open network device * @dev: The network device being opened. @@ -1075,6 +1038,234 @@ static const struct net_device_ops ks8851_netdev_ops = { .ndo_validate_addr = eth_validate_addr, }; +/* Companion eeprom access */ + +enum { /* EEPROM programming states */ + EEPROM_CONTROL, + EEPROM_ADDRESS, + EEPROM_DATA, + EEPROM_COMPLETE +}; + +/** + * ks8851_eeprom_read - read a 16bits word in ks8851 companion EEPROM + * @dev: The network device the PHY is on. + * @addr: EEPROM address to read + * + * eeprom_size: used to define the data coding length. Can be changed + * through debug-fs. + * + * Programs a read on the EEPROM using ks8851 EEPROM SW access feature. + * Warning: The READ feature is not supported on ks8851 revision 0. + * + * Rough programming model: + * - on period start: set clock high and read value on bus + * - on period / 2: set clock low and program value on bus + * - start on period / 2 + */ +unsigned int ks8851_eeprom_read(struct net_device *dev, unsigned int addr) +{ + struct ks8851_net *ks = netdev_priv(dev); + int eepcr; + int ctrl = EEPROM_OP_READ; + int state = EEPROM_CONTROL; + int bit_count = EEPROM_OP_LEN - 1; + unsigned int data = 0; + int dummy; + unsigned int addr_len; + + addr_len = (ks->eeprom_size == 128) ? 6 : 8; + + /* start transaction: chip select high, authorize write */ + mutex_lock(&ks->lock); + eepcr = EEPCR_EESA | EEPCR_EESRWA; + ks8851_wrreg16(ks, KS_EEPCR, eepcr); + eepcr |= EEPCR_EECS; + ks8851_wrreg16(ks, KS_EEPCR, eepcr); + mutex_unlock(&ks->lock); + + while (state != EEPROM_COMPLETE) { + /* falling clock period starts... */ + /* set EED_IO pin for control and address */ + eepcr &= ~EEPCR_EEDO; + switch (state) { + case EEPROM_CONTROL: + eepcr |= ((ctrl >> bit_count) & 1) << 2; + if (bit_count-- <= 0) { + bit_count = addr_len - 1; + state = EEPROM_ADDRESS; + } + break; + case EEPROM_ADDRESS: + eepcr |= ((addr >> bit_count) & 1) << 2; + bit_count--; + break; + case EEPROM_DATA: + /* Change to receive mode */ + eepcr &= ~EEPCR_EESRWA; + break; + } + + /* lower clock */ + eepcr &= ~EEPCR_EESCK; + + mutex_lock(&ks->lock); + ks8851_wrreg16(ks, KS_EEPCR, eepcr); + mutex_unlock(&ks->lock); + + /* waitread period / 2 */ + udelay(EEPROM_SK_PERIOD / 2); + + /* rising clock period starts... */ + + /* raise clock */ + mutex_lock(&ks->lock); + eepcr |= EEPCR_EESCK; + ks8851_wrreg16(ks, KS_EEPCR, eepcr); + mutex_unlock(&ks->lock); + + /* Manage read */ + switch (state) { + case EEPROM_ADDRESS: + if (bit_count < 0) { + bit_count = EEPROM_DATA_LEN - 1; + state = EEPROM_DATA; + } + break; + case EEPROM_DATA: + mutex_lock(&ks->lock); + dummy = ks8851_rdreg16(ks, KS_EEPCR); + mutex_unlock(&ks->lock); + data |= ((dummy >> EEPCR_EESB_OFFSET) & 1) << bit_count; + if (bit_count-- <= 0) + state = EEPROM_COMPLETE; + break; + } + + /* wait period / 2 */ + udelay(EEPROM_SK_PERIOD / 2); + } + + /* close transaction */ + mutex_lock(&ks->lock); + eepcr &= ~EEPCR_EECS; + ks8851_wrreg16(ks, KS_EEPCR, eepcr); + eepcr = 0; + ks8851_wrreg16(ks, KS_EEPCR, eepcr); + mutex_unlock(&ks->lock); + + return data; +} + +/** + * ks8851_eeprom_write - write a 16bits word in ks8851 companion EEPROM + * @dev: The network device the PHY is on. + * @op: operand (can be WRITE, EWEN, EWDS) + * @addr: EEPROM address to write + * @data: data to write + * + * eeprom_size: used to define the data coding length. Can be changed + * through debug-fs. + * + * Programs a write on the EEPROM using ks8851 EEPROM SW access feature. + * + * Note that a write enable is required before writing data. + * + * Rough programming model: + * - on period start: set clock high + * - on period / 2: set clock low and program value on bus + * - start on period / 2 + */ +void ks8851_eeprom_write(struct net_device *dev, unsigned int op, + unsigned int addr, unsigned int data) +{ + struct ks8851_net *ks = netdev_priv(dev); + int eepcr; + int state = EEPROM_CONTROL; + int bit_count = EEPROM_OP_LEN - 1; + unsigned int addr_len; + + addr_len = (ks->eeprom_size == 128) ? 6 : 8; + + switch (op) { + case EEPROM_OP_EWEN: + addr = 0x30; + break; + case EEPROM_OP_EWDS: + addr = 0; + break; + } + + /* start transaction: chip select high, authorize write */ + mutex_lock(&ks->lock); + eepcr = EEPCR_EESA | EEPCR_EESRWA; + ks8851_wrreg16(ks, KS_EEPCR, eepcr); + eepcr |= EEPCR_EECS; + ks8851_wrreg16(ks, KS_EEPCR, eepcr); + mutex_unlock(&ks->lock); + + while (state != EEPROM_COMPLETE) { + /* falling clock period starts... */ + /* set EED_IO pin for control and address */ + eepcr &= ~EEPCR_EEDO; + switch (state) { + case EEPROM_CONTROL: + eepcr |= ((op >> bit_count) & 1) << 2; + if (bit_count-- <= 0) { + bit_count = addr_len - 1; + state = EEPROM_ADDRESS; + } + break; + case EEPROM_ADDRESS: + eepcr |= ((addr >> bit_count) & 1) << 2; + if (bit_count-- <= 0) { + if (op == EEPROM_OP_WRITE) { + bit_count = EEPROM_DATA_LEN - 1; + state = EEPROM_DATA; + } else { + state = EEPROM_COMPLETE; + } + } + break; + case EEPROM_DATA: + eepcr |= ((data >> bit_count) & 1) << 2; + if (bit_count-- <= 0) + state = EEPROM_COMPLETE; + break; + } + + /* lower clock */ + eepcr &= ~EEPCR_EESCK; + + mutex_lock(&ks->lock); + ks8851_wrreg16(ks, KS_EEPCR, eepcr); + mutex_unlock(&ks->lock); + + /* wait period / 2 */ + udelay(EEPROM_SK_PERIOD / 2); + + /* rising clock period starts... */ + + /* raise clock */ + eepcr |= EEPCR_EESCK; + mutex_lock(&ks->lock); + ks8851_wrreg16(ks, KS_EEPCR, eepcr); + mutex_unlock(&ks->lock); + + /* wait period / 2 */ + udelay(EEPROM_SK_PERIOD / 2); + } + + /* close transaction */ + mutex_lock(&ks->lock); + eepcr &= ~EEPCR_EECS; + ks8851_wrreg16(ks, KS_EEPCR, eepcr); + eepcr = 0; + ks8851_wrreg16(ks, KS_EEPCR, eepcr); + mutex_unlock(&ks->lock); + +} + /* ethtool support */ static void ks8851_get_drvinfo(struct net_device *dev, @@ -1121,141 +1312,115 @@ static int ks8851_nway_reset(struct net_device *dev) return mii_nway_restart(&ks->mii); } -/* EEPROM support */ +static int ks8851_get_eeprom_len(struct net_device *dev) +{ + struct ks8851_net *ks = netdev_priv(dev); + return ks->eeprom_size; +} -static void ks8851_eeprom_regread(struct eeprom_93cx6 *ee) +static int ks8851_get_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *bytes) { - struct ks8851_net *ks = ee->data; - unsigned val; + struct ks8851_net *ks = netdev_priv(dev); + u16 *eeprom_buff; + int first_word; + int last_word; + int ret_val = 0; + u16 i; - val = ks8851_rdreg16(ks, KS_EEPCR); + if (eeprom->len == 0) + return -EINVAL; - ee->reg_data_out = (val & EEPCR_EESB) ? 1 : 0; - ee->reg_data_clock = (val & EEPCR_EESCK) ? 1 : 0; - ee->reg_chip_select = (val & EEPCR_EECS) ? 1 : 0; -} + if (eeprom->len > ks->eeprom_size) + return -EINVAL; -static void ks8851_eeprom_regwrite(struct eeprom_93cx6 *ee) -{ - struct ks8851_net *ks = ee->data; - unsigned val = EEPCR_EESA; /* default - eeprom access on */ - - if (ee->drive_data) - val |= EEPCR_EESRWA; - if (ee->reg_data_in) - val |= EEPCR_EEDO; - if (ee->reg_data_clock) - val |= EEPCR_EESCK; - if (ee->reg_chip_select) - val |= EEPCR_EECS; - - ks8851_wrreg16(ks, KS_EEPCR, val); -} + eeprom->magic = ks8851_rdreg16(ks, KS_CIDER); -/** - * ks8851_eeprom_claim - claim device EEPROM and activate the interface - * @ks: The network device state. - * - * Check for the presence of an EEPROM, and then activate software access - * to the device. - */ -static int ks8851_eeprom_claim(struct ks8851_net *ks) -{ - if (!(ks->rc_ccr & CCR_EEPROM)) - return -ENOENT; + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; - mutex_lock(&ks->lock); + eeprom_buff = kmalloc(sizeof(u16) * + (last_word - first_word + 1), GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; - /* start with clock low, cs high */ - ks8851_wrreg16(ks, KS_EEPCR, EEPCR_EESA | EEPCR_EECS); - return 0; -} + for (i = 0; i < last_word - first_word + 1; i++) + eeprom_buff[i] = ks8851_eeprom_read(dev, first_word + 1); -/** - * ks8851_eeprom_release - release the EEPROM interface - * @ks: The device state - * - * Release the software access to the device EEPROM - */ -static void ks8851_eeprom_release(struct ks8851_net *ks) -{ - unsigned val = ks8851_rdreg16(ks, KS_EEPCR); + /* Device's eeprom is little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); - ks8851_wrreg16(ks, KS_EEPCR, val & ~EEPCR_EESA); - mutex_unlock(&ks->lock); -} + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); + kfree(eeprom_buff); -#define KS_EEPROM_MAGIC (0x00008851) + return ret_val; +} static int ks8851_set_eeprom(struct net_device *dev, - struct ethtool_eeprom *ee, u8 *data) + struct ethtool_eeprom *eeprom, u8 *bytes) { struct ks8851_net *ks = netdev_priv(dev); - int offset = ee->offset; - int len = ee->len; - u16 tmp; - - /* currently only support byte writing */ - if (len != 1) - return -EINVAL; - - if (ee->magic != KS_EEPROM_MAGIC) + u16 *eeprom_buff; + void *ptr; + int max_len; + int first_word; + int last_word; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if (eeprom->len > ks->eeprom_size) return -EINVAL; - if (ks8851_eeprom_claim(ks)) - return -ENOENT; + if (eeprom->magic != ks8851_rdreg16(ks, KS_CIDER)) + return -EFAULT; - eeprom_93cx6_wren(&ks->eeprom, true); - - /* ethtool currently only supports writing bytes, which means - * we have to read/modify/write our 16bit EEPROMs */ + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + max_len = (last_word - first_word + 1) * 2; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; - eeprom_93cx6_read(&ks->eeprom, offset/2, &tmp); + ptr = (void *)eeprom_buff; - if (offset & 1) { - tmp &= 0xff; - tmp |= *data << 8; - } else { - tmp &= 0xff00; - tmp |= *data; + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word */ + /* only the second byte of the word is being modified */ + eeprom_buff[0] = ks8851_eeprom_read(dev, first_word); + ptr++; } + if ((eeprom->offset + eeprom->len) & 1) + /* need read/modify/write of last changed EEPROM word */ + /* only the first byte of the word is being modified */ + eeprom_buff[last_word - first_word] = + ks8851_eeprom_read(dev, last_word); - eeprom_93cx6_write(&ks->eeprom, offset/2, tmp); - eeprom_93cx6_wren(&ks->eeprom, false); - ks8851_eeprom_release(ks); + /* Device's eeprom is little-endian, word addressable */ + le16_to_cpus(&eeprom_buff[0]); + le16_to_cpus(&eeprom_buff[last_word - first_word]); - return 0; -} + memcpy(ptr, bytes, eeprom->len); -static int ks8851_get_eeprom(struct net_device *dev, - struct ethtool_eeprom *ee, u8 *data) -{ - struct ks8851_net *ks = netdev_priv(dev); - int offset = ee->offset; - int len = ee->len; + for (i = 0; i < last_word - first_word + 1; i++) + eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); - /* must be 2 byte aligned */ - if (len & 1 || offset & 1) - return -EINVAL; + ks8851_eeprom_write(dev, EEPROM_OP_EWEN, 0, 0); - if (ks8851_eeprom_claim(ks)) - return -ENOENT; - - ee->magic = KS_EEPROM_MAGIC; + for (i = 0; i < last_word - first_word + 1; i++) { + ks8851_eeprom_write(dev, EEPROM_OP_WRITE, first_word + i, + eeprom_buff[i]); + mdelay(EEPROM_WRITE_TIME); + } - eeprom_93cx6_multiread(&ks->eeprom, offset/2, (__le16 *)data, len/2); - ks8851_eeprom_release(ks); + ks8851_eeprom_write(dev, EEPROM_OP_EWDS, 0, 0); - return 0; -} - -static int ks8851_get_eeprom_len(struct net_device *dev) -{ - struct ks8851_net *ks = netdev_priv(dev); - - /* currently, we assume it is an 93C46 attached, so return 128 */ - return ks->rc_ccr & CCR_EEPROM ? 128 : 0; + kfree(eeprom_buff); + return ret_val; } static const struct ethtool_ops ks8851_ethtool_ops = { @@ -1448,13 +1613,6 @@ static int __devinit ks8851_probe(struct spi_device *spi) spi_message_add_tail(&ks->spi_xfer2[0], &ks->spi_msg2); spi_message_add_tail(&ks->spi_xfer2[1], &ks->spi_msg2); - /* setup EEPROM state */ - - ks->eeprom.data = ks; - ks->eeprom.width = PCI_EEPROM_WIDTH_93C46; - ks->eeprom.register_read = ks8851_eeprom_regread; - ks->eeprom.register_write = ks8851_eeprom_regwrite; - /* setup mii state */ ks->mii.dev = ndev; ks->mii.phy_id = 1, @@ -1516,10 +1674,9 @@ static int __devinit ks8851_probe(struct spi_device *spi) goto err_netdev; } - netdev_info(ndev, "revision %d, MAC %pM, IRQ %d, %s EEPROM\n", + netdev_info(ndev, "revision %d, MAC %pM, IRQ %d\n", CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)), - ndev->dev_addr, ndev->irq, - ks->rc_ccr & CCR_EEPROM ? "has" : "no"); + ndev->dev_addr, ndev->irq); return 0; diff --git a/trunk/drivers/net/ethernet/micrel/ks8851.h b/trunk/drivers/net/ethernet/micrel/ks8851.h index b0fae86aacad..537fb06e5932 100644 --- a/trunk/drivers/net/ethernet/micrel/ks8851.h +++ b/trunk/drivers/net/ethernet/micrel/ks8851.h @@ -16,7 +16,7 @@ #define CCR_32PIN (1 << 0) /* MAC address registers */ -#define KS_MAR(_m) (0x15 - (_m)) +#define KS_MAR(_m) 0x15 - (_m) #define KS_MARL 0x10 #define KS_MARM 0x12 #define KS_MARH 0x14 @@ -27,11 +27,22 @@ #define KS_EEPCR 0x22 #define EEPCR_EESRWA (1 << 5) #define EEPCR_EESA (1 << 4) -#define EEPCR_EESB (1 << 3) +#define EEPCR_EESB_OFFSET 3 +#define EEPCR_EESB (1 << EEPCR_EESB_OFFSET) #define EEPCR_EEDO (1 << 2) #define EEPCR_EESCK (1 << 1) #define EEPCR_EECS (1 << 0) +#define EEPROM_OP_LEN 3 /* bits:*/ +#define EEPROM_OP_READ 0x06 +#define EEPROM_OP_EWEN 0x04 +#define EEPROM_OP_WRITE 0x05 +#define EEPROM_OP_EWDS 0x14 + +#define EEPROM_DATA_LEN 16 /* 16 bits EEPROM */ +#define EEPROM_WRITE_TIME 4 /* wrt ack time in ms */ +#define EEPROM_SK_PERIOD 400 /* in us */ + #define KS_MBIR 0x24 #define MBIR_TXMBF (1 << 12) #define MBIR_TXMBFA (1 << 11) diff --git a/trunk/drivers/net/ethernet/micrel/ks8851_mll.c b/trunk/drivers/net/ethernet/micrel/ks8851_mll.c index e58e78e5c930..d19c849059d8 100644 --- a/trunk/drivers/net/ethernet/micrel/ks8851_mll.c +++ b/trunk/drivers/net/ethernet/micrel/ks8851_mll.c @@ -1500,7 +1500,8 @@ static int ks_hw_init(struct ks_net *ks) ks->all_mcast = 0; ks->mcast_lst_size = 0; - ks->frame_head_info = kmalloc(MHEADER_SIZE, GFP_KERNEL); + ks->frame_head_info = (struct type_frame_head *) \ + kmalloc(MHEADER_SIZE, GFP_KERNEL); if (!ks->frame_head_info) { pr_err("Error: Fail to allocate frame memory\n"); return false; @@ -1658,7 +1659,18 @@ static struct platform_driver ks8851_platform_driver = { .remove = __devexit_p(ks8851_remove), }; -module_platform_driver(ks8851_platform_driver); +static int __init ks8851_init(void) +{ + return platform_driver_register(&ks8851_platform_driver); +} + +static void __exit ks8851_exit(void) +{ + platform_driver_unregister(&ks8851_platform_driver); +} + +module_init(ks8851_init); +module_exit(ks8851_exit); MODULE_DESCRIPTION("KS8851 MLL Network driver"); MODULE_AUTHOR("David Choi "); diff --git a/trunk/drivers/net/ethernet/micrel/ksz884x.c b/trunk/drivers/net/ethernet/micrel/ksz884x.c index 6ed09a85f035..7ece990381c8 100644 --- a/trunk/drivers/net/ethernet/micrel/ksz884x.c +++ b/trunk/drivers/net/ethernet/micrel/ksz884x.c @@ -743,7 +743,8 @@ /* Change default LED mode. */ #define SET_DEFAULT_LED LED_SPEED_DUPLEX_ACT -#define MAC_ADDR_ORDER(i) (ETH_ALEN - 1 - (i)) +#define MAC_ADDR_LEN 6 +#define MAC_ADDR_ORDER(i) (MAC_ADDR_LEN - 1 - (i)) #define MAX_ETHERNET_BODY_SIZE 1500 #define ETHERNET_HEADER_SIZE 14 @@ -1042,7 +1043,7 @@ enum { * @valid: Valid setting indicating the entry is being used. */ struct ksz_mac_table { - u8 mac_addr[ETH_ALEN]; + u8 mac_addr[MAC_ADDR_LEN]; u16 vid; u8 fid; u8 ports; @@ -1186,8 +1187,8 @@ struct ksz_switch { u8 diffserv[DIFFSERV_ENTRIES]; u8 p_802_1p[PRIO_802_1P_ENTRIES]; - u8 br_addr[ETH_ALEN]; - u8 other_addr[ETH_ALEN]; + u8 br_addr[MAC_ADDR_LEN]; + u8 other_addr[MAC_ADDR_LEN]; u8 broad_per; u8 member; @@ -1291,14 +1292,14 @@ struct ksz_hw { int tx_int_mask; int tx_size; - u8 perm_addr[ETH_ALEN]; - u8 override_addr[ETH_ALEN]; - u8 address[ADDITIONAL_ENTRIES][ETH_ALEN]; + u8 perm_addr[MAC_ADDR_LEN]; + u8 override_addr[MAC_ADDR_LEN]; + u8 address[ADDITIONAL_ENTRIES][MAC_ADDR_LEN]; u8 addr_list_size; u8 mac_override; u8 promiscuous; u8 all_multi; - u8 multi_list[MAX_MULTICAST_LIST][ETH_ALEN]; + u8 multi_list[MAX_MULTICAST_LIST][MAC_ADDR_LEN]; u8 multi_bits[HW_MULTICAST_SIZE]; u8 multi_list_size; @@ -3653,7 +3654,7 @@ static void hw_add_wol_bcast(struct ksz_hw *hw) static const u8 mask[] = { 0x3F }; static const u8 pattern[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; - hw_set_wol_frame(hw, 2, 1, mask, ETH_ALEN, pattern); + hw_set_wol_frame(hw, 2, 1, mask, MAC_ADDR_LEN, pattern); } /** @@ -3688,7 +3689,7 @@ static void hw_add_wol_ucast(struct ksz_hw *hw) { static const u8 mask[] = { 0x3F }; - hw_set_wol_frame(hw, 0, 1, mask, ETH_ALEN, hw->override_addr); + hw_set_wol_frame(hw, 0, 1, mask, MAC_ADDR_LEN, hw->override_addr); } /** @@ -4054,7 +4055,7 @@ static void hw_set_addr(struct ksz_hw *hw) { int i; - for (i = 0; i < ETH_ALEN; i++) + for (i = 0; i < MAC_ADDR_LEN; i++) writeb(hw->override_addr[MAC_ADDR_ORDER(i)], hw->io + KS884X_ADDR_0_OFFSET + i); @@ -4071,16 +4072,17 @@ static void hw_read_addr(struct ksz_hw *hw) { int i; - for (i = 0; i < ETH_ALEN; i++) + for (i = 0; i < MAC_ADDR_LEN; i++) hw->perm_addr[MAC_ADDR_ORDER(i)] = readb(hw->io + KS884X_ADDR_0_OFFSET + i); if (!hw->mac_override) { - memcpy(hw->override_addr, hw->perm_addr, ETH_ALEN); + memcpy(hw->override_addr, hw->perm_addr, MAC_ADDR_LEN); if (empty_addr(hw->override_addr)) { - memcpy(hw->perm_addr, DEFAULT_MAC_ADDRESS, ETH_ALEN); + memcpy(hw->perm_addr, DEFAULT_MAC_ADDRESS, + MAC_ADDR_LEN); memcpy(hw->override_addr, DEFAULT_MAC_ADDRESS, - ETH_ALEN); + MAC_ADDR_LEN); hw->override_addr[5] += hw->id; hw_set_addr(hw); } @@ -4128,16 +4130,16 @@ static int hw_add_addr(struct ksz_hw *hw, u8 *mac_addr) int i; int j = ADDITIONAL_ENTRIES; - if (!memcmp(hw->override_addr, mac_addr, ETH_ALEN)) + if (!memcmp(hw->override_addr, mac_addr, MAC_ADDR_LEN)) return 0; for (i = 0; i < hw->addr_list_size; i++) { - if (!memcmp(hw->address[i], mac_addr, ETH_ALEN)) + if (!memcmp(hw->address[i], mac_addr, MAC_ADDR_LEN)) return 0; if (ADDITIONAL_ENTRIES == j && empty_addr(hw->address[i])) j = i; } if (j < ADDITIONAL_ENTRIES) { - memcpy(hw->address[j], mac_addr, ETH_ALEN); + memcpy(hw->address[j], mac_addr, MAC_ADDR_LEN); hw_ena_add_addr(hw, j, hw->address[j]); return 0; } @@ -4149,8 +4151,8 @@ static int hw_del_addr(struct ksz_hw *hw, u8 *mac_addr) int i; for (i = 0; i < hw->addr_list_size; i++) { - if (!memcmp(hw->address[i], mac_addr, ETH_ALEN)) { - memset(hw->address[i], 0, ETH_ALEN); + if (!memcmp(hw->address[i], mac_addr, MAC_ADDR_LEN)) { + memset(hw->address[i], 0, MAC_ADDR_LEN); writel(0, hw->io + ADD_ADDR_INCR * i + KS_ADD_ADDR_0_HI); return 0; @@ -4380,10 +4382,12 @@ static void ksz_update_timer(struct ksz_timer_info *info) */ static int ksz_alloc_soft_desc(struct ksz_desc_info *desc_info, int transmit) { - desc_info->ring = kzalloc(sizeof(struct ksz_desc) * desc_info->alloc, - GFP_KERNEL); + desc_info->ring = kmalloc(sizeof(struct ksz_desc) * desc_info->alloc, + GFP_KERNEL); if (!desc_info->ring) return 1; + memset((void *) desc_info->ring, 0, + sizeof(struct ksz_desc) * desc_info->alloc); hw_init_desc(desc_info, transmit); return 0; } @@ -5672,7 +5676,7 @@ static int netdev_set_mac_address(struct net_device *dev, void *addr) hw_del_addr(hw, dev->dev_addr); else { hw->mac_override = 1; - memcpy(hw->override_addr, mac->sa_data, ETH_ALEN); + memcpy(hw->override_addr, mac->sa_data, MAC_ADDR_LEN); } memcpy(dev->dev_addr, mac->sa_data, MAX_ADDR_LEN); @@ -5782,7 +5786,7 @@ static void netdev_set_rx_mode(struct net_device *dev) netdev_for_each_mc_addr(ha, dev) { if (i >= MAX_MULTICAST_LIST) break; - memcpy(hw->multi_list[i++], ha->addr, ETH_ALEN); + memcpy(hw->multi_list[i++], ha->addr, MAC_ADDR_LEN); } hw->multi_list_size = (u8) i; hw_set_grp_addr(hw); @@ -6089,10 +6093,9 @@ static void netdev_get_drvinfo(struct net_device *dev, struct dev_priv *priv = netdev_priv(dev); struct dev_info *hw_priv = priv->adapter; - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, pci_name(hw_priv->pdev), - sizeof(info->bus_info)); + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + strcpy(info->bus_info, pci_name(hw_priv->pdev)); } /** @@ -6584,8 +6587,7 @@ static void netdev_get_ethtool_stats(struct net_device *dev, * * Return 0 if successful; otherwise an error code. */ -static int netdev_set_features(struct net_device *dev, - netdev_features_t features) +static int netdev_set_features(struct net_device *dev, u32 features) { struct dev_priv *priv = netdev_priv(dev); struct dev_info *hw_priv = priv->adapter; @@ -6607,7 +6609,7 @@ static int netdev_set_features(struct net_device *dev, return 0; } -static const struct ethtool_ops netdev_ethtool_ops = { +static struct ethtool_ops netdev_ethtool_ops = { .get_settings = netdev_get_settings, .set_settings = netdev_set_settings, .nway_reset = netdev_nway_reset, @@ -6858,7 +6860,7 @@ static void get_mac_addr(struct dev_info *hw_priv, u8 *macaddr, int port) int num; i = j = num = got_num = 0; - while (j < ETH_ALEN) { + while (j < MAC_ADDR_LEN) { if (macaddr[i]) { int digit; @@ -6889,7 +6891,7 @@ static void get_mac_addr(struct dev_info *hw_priv, u8 *macaddr, int port) } i++; } - if (ETH_ALEN == j) { + if (MAC_ADDR_LEN == j) { if (MAIN_PORT == port) hw_priv->hw.mac_override = 1; } @@ -7056,7 +7058,7 @@ static int __devinit pcidev_init(struct pci_dev *pdev, /* Multiple device interfaces mode requires a second MAC address. */ if (hw->dev_count > 1) { - memcpy(sw->other_addr, hw->override_addr, ETH_ALEN); + memcpy(sw->other_addr, hw->override_addr, MAC_ADDR_LEN); read_other_addr(hw); if (mac1addr[0] != ':') get_mac_addr(hw_priv, mac1addr, OTHER_PORT); @@ -7106,11 +7108,12 @@ static int __devinit pcidev_init(struct pci_dev *pdev, dev->irq = pdev->irq; if (MAIN_PORT == i) memcpy(dev->dev_addr, hw_priv->hw.override_addr, - ETH_ALEN); + MAC_ADDR_LEN); else { - memcpy(dev->dev_addr, sw->other_addr, ETH_ALEN); + memcpy(dev->dev_addr, sw->other_addr, + MAC_ADDR_LEN); if (!memcmp(sw->other_addr, hw->override_addr, - ETH_ALEN)) + MAC_ADDR_LEN)) dev->dev_addr[5] += port->first_port; } diff --git a/trunk/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/trunk/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 20b72ecb020a..0778edcf7b9a 100644 --- a/trunk/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/trunk/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -1491,7 +1491,7 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget) * access to avoid theoretical race condition with functions that * change NETIF_F_LRO flag at runtime. */ - bool lro_enabled = !!(ACCESS_ONCE(mgp->dev->features) & NETIF_F_LRO); + bool lro_enabled = ACCESS_ONCE(mgp->dev->features) & NETIF_F_LRO; while (rx_done->entry[idx].length != 0 && work_done < budget) { length = ntohs(rx_done->entry[idx].length); @@ -3149,8 +3149,7 @@ static int myri10ge_set_mac_address(struct net_device *dev, void *addr) return 0; } -static netdev_features_t myri10ge_fix_features(struct net_device *dev, - netdev_features_t features) +static u32 myri10ge_fix_features(struct net_device *dev, u32 features) { if (!(features & NETIF_F_RXCSUM)) features &= ~NETIF_F_LRO; diff --git a/trunk/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h b/trunk/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h index b7fc26c4f738..11be150e4d67 100644 --- a/trunk/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h +++ b/trunk/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h @@ -356,7 +356,7 @@ enum myri10ge_mcp_cmd_type { MXGEFW_CMD_GET_DCA_OFFSET = 56, /* offset of dca control for WDMAs */ - /* VMware NetQueue commands */ + /* VMWare NetQueue commands */ MXGEFW_CMD_NETQ_GET_FILTERS_PER_QUEUE = 57, MXGEFW_CMD_NETQ_ADD_FILTER = 58, /* data0 = filter_id << 16 | queue << 8 | type */ diff --git a/trunk/drivers/net/ethernet/natsemi/jazzsonic.c b/trunk/drivers/net/ethernet/natsemi/jazzsonic.c index 5b89fd377ae3..fc7c6a932ad9 100644 --- a/trunk/drivers/net/ethernet/natsemi/jazzsonic.c +++ b/trunk/drivers/net/ethernet/natsemi/jazzsonic.c @@ -294,4 +294,15 @@ static struct platform_driver jazz_sonic_driver = { }, }; -module_platform_driver(jazz_sonic_driver); +static int __init jazz_sonic_init_module(void) +{ + return platform_driver_register(&jazz_sonic_driver); +} + +static void __exit jazz_sonic_cleanup_module(void) +{ + platform_driver_unregister(&jazz_sonic_driver); +} + +module_init(jazz_sonic_init_module); +module_exit(jazz_sonic_cleanup_module); diff --git a/trunk/drivers/net/ethernet/natsemi/macsonic.c b/trunk/drivers/net/ethernet/natsemi/macsonic.c index 70367d76fc8d..a2eacbfb4252 100644 --- a/trunk/drivers/net/ethernet/natsemi/macsonic.c +++ b/trunk/drivers/net/ethernet/natsemi/macsonic.c @@ -643,4 +643,15 @@ static struct platform_driver mac_sonic_driver = { }, }; -module_platform_driver(mac_sonic_driver); +static int __init mac_sonic_init_module(void) +{ + return platform_driver_register(&mac_sonic_driver); +} + +static void __exit mac_sonic_cleanup_module(void) +{ + platform_driver_unregister(&mac_sonic_driver); +} + +module_init(mac_sonic_init_module); +module_exit(mac_sonic_cleanup_module); diff --git a/trunk/drivers/net/ethernet/natsemi/natsemi.c b/trunk/drivers/net/ethernet/natsemi/natsemi.c index ac7b16b6e7af..6ca047aab793 100644 --- a/trunk/drivers/net/ethernet/natsemi/natsemi.c +++ b/trunk/drivers/net/ethernet/natsemi/natsemi.c @@ -2555,9 +2555,9 @@ static void set_rx_mode(struct net_device *dev) static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct netdev_private *np = netdev_priv(dev); - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); + strncpy(info->driver, DRV_NAME, ETHTOOL_BUSINFO_LEN); + strncpy(info->version, DRV_VERSION, ETHTOOL_BUSINFO_LEN); + strncpy(info->bus_info, pci_name(np->pci_dev), ETHTOOL_BUSINFO_LEN); } static int get_regs_len(struct net_device *dev) diff --git a/trunk/drivers/net/ethernet/natsemi/ns83820.c b/trunk/drivers/net/ethernet/natsemi/ns83820.c index c24b46cbfe27..2b8f64ddfb55 100644 --- a/trunk/drivers/net/ethernet/natsemi/ns83820.c +++ b/trunk/drivers/net/ethernet/natsemi/ns83820.c @@ -1364,9 +1364,9 @@ static int ns83820_set_settings(struct net_device *ndev, static void ns83820_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { struct ns83820 *dev = PRIV(ndev); - strlcpy(info->driver, "ns83820", sizeof(info->driver)); - strlcpy(info->version, VERSION, sizeof(info->version)); - strlcpy(info->bus_info, pci_name(dev->pci_dev), sizeof(info->bus_info)); + strcpy(info->driver, "ns83820"); + strcpy(info->version, VERSION); + strcpy(info->bus_info, pci_name(dev->pci_dev)); } static u32 ns83820_get_link(struct net_device *ndev) diff --git a/trunk/drivers/net/ethernet/natsemi/xtsonic.c b/trunk/drivers/net/ethernet/natsemi/xtsonic.c index e01c0a07a93a..ccf61b9da8d1 100644 --- a/trunk/drivers/net/ethernet/natsemi/xtsonic.c +++ b/trunk/drivers/net/ethernet/natsemi/xtsonic.c @@ -319,4 +319,15 @@ static struct platform_driver xtsonic_driver = { }, }; -module_platform_driver(xtsonic_driver); +static int __init xtsonic_init(void) +{ + return platform_driver_register(&xtsonic_driver); +} + +static void __exit xtsonic_cleanup(void) +{ + platform_driver_unregister(&xtsonic_driver); +} + +module_init(xtsonic_init); +module_exit(xtsonic_cleanup); diff --git a/trunk/drivers/net/ethernet/neterion/s2io.c b/trunk/drivers/net/ethernet/neterion/s2io.c index 97f63e12d86e..c27fb3dda9f4 100644 --- a/trunk/drivers/net/ethernet/neterion/s2io.c +++ b/trunk/drivers/net/ethernet/neterion/s2io.c @@ -5391,9 +5391,10 @@ static void s2io_ethtool_gdrvinfo(struct net_device *dev, { struct s2io_nic *sp = netdev_priv(dev); - strlcpy(info->driver, s2io_driver_name, sizeof(info->driver)); - strlcpy(info->version, s2io_driver_version, sizeof(info->version)); - strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info)); + strncpy(info->driver, s2io_driver_name, sizeof(info->driver)); + strncpy(info->version, s2io_driver_version, sizeof(info->version)); + strncpy(info->fw_version, "", sizeof(info->fw_version)); + strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info)); info->regdump_len = XENA_REG_SPACE; info->eedump_len = XENA_EEPROM_SPACE; } @@ -6615,10 +6616,10 @@ static void s2io_ethtool_get_strings(struct net_device *dev, } } -static int s2io_set_features(struct net_device *dev, netdev_features_t features) +static int s2io_set_features(struct net_device *dev, u32 features) { struct s2io_nic *sp = netdev_priv(dev); - netdev_features_t changed = (features ^ dev->features) & NETIF_F_LRO; + u32 changed = (features ^ dev->features) & NETIF_F_LRO; if (changed && netif_running(dev)) { int rc; diff --git a/trunk/drivers/net/ethernet/neterion/vxge/vxge-main.c b/trunk/drivers/net/ethernet/neterion/vxge/vxge-main.c index ef76725454d2..a83197d757c1 100644 --- a/trunk/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/trunk/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -2662,10 +2662,9 @@ static void vxge_poll_vp_lockup(unsigned long data) mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000); } -static netdev_features_t vxge_fix_features(struct net_device *dev, - netdev_features_t features) +static u32 vxge_fix_features(struct net_device *dev, u32 features) { - netdev_features_t changed = dev->features ^ features; + u32 changed = dev->features ^ features; /* Enabling RTH requires some of the logic in vxge_device_register and a * vpath reset. Due to these restrictions, only allow modification @@ -2677,10 +2676,10 @@ static netdev_features_t vxge_fix_features(struct net_device *dev, return features; } -static int vxge_set_features(struct net_device *dev, netdev_features_t features) +static int vxge_set_features(struct net_device *dev, u32 features) { struct vxgedev *vdev = netdev_priv(dev); - netdev_features_t changed = dev->features ^ features; + u32 changed = dev->features ^ features; if (!(changed & NETIF_F_RXHASH)) return 0; @@ -3305,7 +3304,7 @@ static void vxge_tx_watchdog(struct net_device *dev) * * Add the vlan id to the devices vlan id table */ -static int +static void vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) { struct vxgedev *vdev = netdev_priv(dev); @@ -3320,7 +3319,6 @@ vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) vxge_hw_vpath_vid_add(vpath->handle, vid); } set_bit(vid, vdev->active_vlans); - return 0; } /** @@ -3330,7 +3328,7 @@ vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) * * Remove the vlan id from the device's vlan id table */ -static int +static void vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) { struct vxgedev *vdev = netdev_priv(dev); @@ -3349,7 +3347,6 @@ vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__); clear_bit(vid, vdev->active_vlans); - return 0; } static const struct net_device_ops vxge_netdev_ops = { diff --git a/trunk/drivers/net/ethernet/nuvoton/w90p910_ether.c b/trunk/drivers/net/ethernet/nuvoton/w90p910_ether.c index b75a0497d58d..f1bfb8f8fcf0 100644 --- a/trunk/drivers/net/ethernet/nuvoton/w90p910_ether.c +++ b/trunk/drivers/net/ethernet/nuvoton/w90p910_ether.c @@ -1103,7 +1103,18 @@ static struct platform_driver w90p910_ether_driver = { }, }; -module_platform_driver(w90p910_ether_driver); +static int __init w90p910_ether_init(void) +{ + return platform_driver_register(&w90p910_ether_driver); +} + +static void __exit w90p910_ether_exit(void) +{ + platform_driver_unregister(&w90p910_ether_driver); +} + +module_init(w90p910_ether_init); +module_exit(w90p910_ether_exit); MODULE_AUTHOR("Wan ZongShun "); MODULE_DESCRIPTION("w90p910 MAC driver!"); diff --git a/trunk/drivers/net/ethernet/nvidia/forcedeth.c b/trunk/drivers/net/ethernet/nvidia/forcedeth.c index 4c4e7f458383..1c61d36e6570 100644 --- a/trunk/drivers/net/ethernet/nvidia/forcedeth.c +++ b/trunk/drivers/net/ethernet/nvidia/forcedeth.c @@ -65,8 +65,7 @@ #include #include #include -#include -#include +#include #include #include @@ -737,16 +736,6 @@ struct nv_skb_map { * - tx setup is lockless: it relies on netif_tx_lock. Actual submission * needs netdev_priv(dev)->lock :-( * - set_multicast_list: preparation lockless, relies on netif_tx_lock. - * - * Hardware stats updates are protected by hwstats_lock: - * - updated by nv_do_stats_poll (timer). This is meant to avoid - * integer wraparound in the NIC stats registers, at low frequency - * (0.1 Hz) - * - updated by nv_get_ethtool_stats + nv_get_stats64 - * - * Software stats are accessed only through 64b synchronization points - * and are not subject to other synchronization techniques (single - * update thread on the TX or RX paths). */ /* in dev: base, irq */ @@ -756,10 +745,9 @@ struct fe_priv { struct net_device *dev; struct napi_struct napi; - /* hardware stats are updated in syscall and timer */ - spinlock_t hwstats_lock; + /* General data: + * Locking: spin_lock(&np->lock); */ struct nv_ethtool_stats estats; - int in_shutdown; u32 linkspeed; int duplex; @@ -810,13 +798,6 @@ struct fe_priv { u32 nic_poll_irq; int rx_ring_size; - /* RX software stats */ - struct u64_stats_sync swstats_rx_syncp; - u64 stat_rx_packets; - u64 stat_rx_bytes; /* not always available in HW */ - u64 stat_rx_missed_errors; - u64 stat_rx_dropped; - /* media detection workaround. * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); */ @@ -839,12 +820,6 @@ struct fe_priv { struct nv_skb_map *tx_end_flip; int tx_stop; - /* TX software stats */ - struct u64_stats_sync swstats_tx_syncp; - u64 stat_tx_packets; /* not always available in HW */ - u64 stat_tx_bytes; - u64 stat_tx_dropped; - /* msi/msi-x fields */ u32 msi_flags; struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS]; @@ -916,11 +891,6 @@ enum { }; static int dma_64bit = NV_DMA_64BIT_ENABLED; -/* - * Debug output control for tx_timeout - */ -static bool debug_tx_timeout = false; - /* * Crossover Detection * Realtek 8201 phy + some OEM boards do not work properly. @@ -1660,19 +1630,11 @@ static void nv_mac_reset(struct net_device *dev) pci_push(base); } -/* Caller must appropriately lock netdev_priv(dev)->hwstats_lock */ -static void nv_update_stats(struct net_device *dev) +static void nv_get_hw_stats(struct net_device *dev) { struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); - /* If it happens that this is run in top-half context, then - * replace the spin_lock of hwstats_lock with - * spin_lock_irqsave() in calling functions. */ - WARN_ONCE(in_irq(), "forcedeth: estats spin_lock(_bh) from top-half"); - assert_spin_locked(&np->hwstats_lock); - - /* query hardware */ np->estats.tx_bytes += readl(base + NvRegTxCnt); np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt); np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt); @@ -1731,73 +1693,40 @@ static void nv_update_stats(struct net_device *dev) } /* - * nv_get_stats64: dev->ndo_get_stats64 function + * nv_get_stats: dev->get_stats function * Get latest stats value from the nic. * Called with read_lock(&dev_base_lock) held for read - * only synchronized against unregister_netdevice. */ -static struct rtnl_link_stats64* -nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage) - __acquires(&netdev_priv(dev)->hwstats_lock) - __releases(&netdev_priv(dev)->hwstats_lock) +static struct net_device_stats *nv_get_stats(struct net_device *dev) { struct fe_priv *np = netdev_priv(dev); - unsigned int syncp_start; - - /* - * Note: because HW stats are not always available and for - * consistency reasons, the following ifconfig stats are - * managed by software: rx_bytes, tx_bytes, rx_packets and - * tx_packets. The related hardware stats reported by ethtool - * should be equivalent to these ifconfig stats, with 4 - * additional bytes per packet (Ethernet FCS CRC), except for - * tx_packets when TSO kicks in. - */ - - /* software stats */ - do { - syncp_start = u64_stats_fetch_begin_bh(&np->swstats_rx_syncp); - storage->rx_packets = np->stat_rx_packets; - storage->rx_bytes = np->stat_rx_bytes; - storage->rx_dropped = np->stat_rx_dropped; - storage->rx_missed_errors = np->stat_rx_missed_errors; - } while (u64_stats_fetch_retry_bh(&np->swstats_rx_syncp, syncp_start)); - - do { - syncp_start = u64_stats_fetch_begin_bh(&np->swstats_tx_syncp); - storage->tx_packets = np->stat_tx_packets; - storage->tx_bytes = np->stat_tx_bytes; - storage->tx_dropped = np->stat_tx_dropped; - } while (u64_stats_fetch_retry_bh(&np->swstats_tx_syncp, syncp_start)); /* If the nic supports hw counters then retrieve latest values */ - if (np->driver_data & DEV_HAS_STATISTICS_V123) { - spin_lock_bh(&np->hwstats_lock); - - nv_update_stats(dev); - - /* generic stats */ - storage->rx_errors = np->estats.rx_errors_total; - storage->tx_errors = np->estats.tx_errors_total; - - /* meaningful only when NIC supports stats v3 */ - storage->multicast = np->estats.rx_multicast; + if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) { + nv_get_hw_stats(dev); - /* detailed rx_errors */ - storage->rx_length_errors = np->estats.rx_length_error; - storage->rx_over_errors = np->estats.rx_over_errors; - storage->rx_crc_errors = np->estats.rx_crc_errors; - storage->rx_frame_errors = np->estats.rx_frame_align_error; - storage->rx_fifo_errors = np->estats.rx_drop_frame; - - /* detailed tx_errors */ - storage->tx_carrier_errors = np->estats.tx_carrier_errors; - storage->tx_fifo_errors = np->estats.tx_fifo_errors; + /* + * Note: because HW stats are not always available and + * for consistency reasons, the following ifconfig + * stats are managed by software: rx_bytes, tx_bytes, + * rx_packets and tx_packets. The related hardware + * stats reported by ethtool should be equivalent to + * these ifconfig stats, with 4 additional bytes per + * packet (Ethernet FCS CRC). + */ - spin_unlock_bh(&np->hwstats_lock); + /* copy to net_device stats */ + dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors; + dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors; + dev->stats.rx_crc_errors = np->estats.rx_crc_errors; + dev->stats.rx_over_errors = np->estats.rx_over_errors; + dev->stats.rx_fifo_errors = np->estats.rx_drop_frame; + dev->stats.rx_errors = np->estats.rx_errors_total; + dev->stats.tx_errors = np->estats.tx_errors_total; } - return storage; + return &dev->stats; } /* @@ -1830,12 +1759,8 @@ static int nv_alloc_rx(struct net_device *dev) np->put_rx.orig = np->first_rx.orig; if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) np->put_rx_ctx = np->first_rx_ctx; - } else { - u64_stats_update_begin(&np->swstats_rx_syncp); - np->stat_rx_dropped++; - u64_stats_update_end(&np->swstats_rx_syncp); + } else return 1; - } } return 0; } @@ -1866,12 +1791,8 @@ static int nv_alloc_rx_optimized(struct net_device *dev) np->put_rx.ex = np->first_rx.ex; if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) np->put_rx_ctx = np->first_rx_ctx; - } else { - u64_stats_update_begin(&np->swstats_rx_syncp); - np->stat_rx_dropped++; - u64_stats_update_end(&np->swstats_rx_syncp); + } else return 1; - } } return 0; } @@ -1928,7 +1849,6 @@ static void nv_init_tx(struct net_device *dev) np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1]; np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb; np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1]; - netdev_reset_queue(np->dev); np->tx_pkts_in_progress = 0; np->tx_change_owner = NULL; np->tx_end_flip = NULL; @@ -2007,11 +1927,8 @@ static void nv_drain_tx(struct net_device *dev) np->tx_ring.ex[i].bufhigh = 0; np->tx_ring.ex[i].buflow = 0; } - if (nv_release_txskb(np, &np->tx_skb[i])) { - u64_stats_update_begin(&np->swstats_tx_syncp); - np->stat_tx_dropped++; - u64_stats_update_end(&np->swstats_tx_syncp); - } + if (nv_release_txskb(np, &np->tx_skb[i])) + dev->stats.tx_dropped++; np->tx_skb[i].dma = 0; np->tx_skb[i].dma_len = 0; np->tx_skb[i].dma_single = 0; @@ -2277,9 +2194,6 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) /* set tx flags */ start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); - - netdev_sent_queue(np->dev, skb->len); - np->put_tx.orig = put_tx; spin_unlock_irqrestore(&np->lock, flags); @@ -2424,9 +2338,6 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, /* set tx flags */ start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); - - netdev_sent_queue(np->dev, skb->len); - np->put_tx.ex = put_tx; spin_unlock_irqrestore(&np->lock, flags); @@ -2464,7 +2375,6 @@ static int nv_tx_done(struct net_device *dev, int limit) u32 flags; int tx_work = 0; struct ring_desc *orig_get_tx = np->get_tx.orig; - unsigned int bytes_compl = 0; while ((np->get_tx.orig != np->put_tx.orig) && !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) && @@ -2475,16 +2385,12 @@ static int nv_tx_done(struct net_device *dev, int limit) if (np->desc_ver == DESC_VER_1) { if (flags & NV_TX_LASTPACKET) { if (flags & NV_TX_ERROR) { - if ((flags & NV_TX_RETRYERROR) - && !(flags & NV_TX_RETRYCOUNT_MASK)) + if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK)) nv_legacybackoff_reseed(dev); } else { - u64_stats_update_begin(&np->swstats_tx_syncp); - np->stat_tx_packets++; - np->stat_tx_bytes += np->get_tx_ctx->skb->len; - u64_stats_update_end(&np->swstats_tx_syncp); + dev->stats.tx_packets++; + dev->stats.tx_bytes += np->get_tx_ctx->skb->len; } - bytes_compl += np->get_tx_ctx->skb->len; dev_kfree_skb_any(np->get_tx_ctx->skb); np->get_tx_ctx->skb = NULL; tx_work++; @@ -2492,16 +2398,12 @@ static int nv_tx_done(struct net_device *dev, int limit) } else { if (flags & NV_TX2_LASTPACKET) { if (flags & NV_TX2_ERROR) { - if ((flags & NV_TX2_RETRYERROR) - && !(flags & NV_TX2_RETRYCOUNT_MASK)) + if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) nv_legacybackoff_reseed(dev); } else { - u64_stats_update_begin(&np->swstats_tx_syncp); - np->stat_tx_packets++; - np->stat_tx_bytes += np->get_tx_ctx->skb->len; - u64_stats_update_end(&np->swstats_tx_syncp); + dev->stats.tx_packets++; + dev->stats.tx_bytes += np->get_tx_ctx->skb->len; } - bytes_compl += np->get_tx_ctx->skb->len; dev_kfree_skb_any(np->get_tx_ctx->skb); np->get_tx_ctx->skb = NULL; tx_work++; @@ -2512,9 +2414,6 @@ static int nv_tx_done(struct net_device *dev, int limit) if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) np->get_tx_ctx = np->first_tx_ctx; } - - netdev_completed_queue(np->dev, tx_work, bytes_compl); - if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) { np->tx_stop = 0; netif_wake_queue(dev); @@ -2528,7 +2427,6 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit) u32 flags; int tx_work = 0; struct ring_desc_ex *orig_get_tx = np->get_tx.ex; - unsigned long bytes_cleaned = 0; while ((np->get_tx.ex != np->put_tx.ex) && !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) && @@ -2538,21 +2436,17 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit) if (flags & NV_TX2_LASTPACKET) { if (flags & NV_TX2_ERROR) { - if ((flags & NV_TX2_RETRYERROR) - && !(flags & NV_TX2_RETRYCOUNT_MASK)) { + if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) { if (np->driver_data & DEV_HAS_GEAR_MODE) nv_gear_backoff_reseed(dev); else nv_legacybackoff_reseed(dev); } } else { - u64_stats_update_begin(&np->swstats_tx_syncp); - np->stat_tx_packets++; - np->stat_tx_bytes += np->get_tx_ctx->skb->len; - u64_stats_update_end(&np->swstats_tx_syncp); + dev->stats.tx_packets++; + dev->stats.tx_bytes += np->get_tx_ctx->skb->len; } - bytes_cleaned += np->get_tx_ctx->skb->len; dev_kfree_skb_any(np->get_tx_ctx->skb); np->get_tx_ctx->skb = NULL; tx_work++; @@ -2560,15 +2454,11 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit) if (np->tx_limit) nv_tx_flip_ownership(dev); } - if (unlikely(np->get_tx.ex++ == np->last_tx.ex)) np->get_tx.ex = np->first_tx.ex; if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) np->get_tx_ctx = np->first_tx_ctx; } - - netdev_completed_queue(np->dev, tx_work, bytes_cleaned); - if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) { np->tx_stop = 0; netif_wake_queue(dev); @@ -2587,64 +2477,56 @@ static void nv_tx_timeout(struct net_device *dev) u32 status; union ring_type put_tx; int saved_tx_limit; + int i; if (np->msi_flags & NV_MSI_X_ENABLED) status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; else status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; - netdev_warn(dev, "Got tx_timeout. irq status: %08x\n", status); - - if (unlikely(debug_tx_timeout)) { - int i; + netdev_info(dev, "Got tx_timeout. irq: %08x\n", status); - netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr); - netdev_info(dev, "Dumping tx registers\n"); - for (i = 0; i <= np->register_size; i += 32) { + netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr); + netdev_info(dev, "Dumping tx registers\n"); + for (i = 0; i <= np->register_size; i += 32) { + netdev_info(dev, + "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n", + i, + readl(base + i + 0), readl(base + i + 4), + readl(base + i + 8), readl(base + i + 12), + readl(base + i + 16), readl(base + i + 20), + readl(base + i + 24), readl(base + i + 28)); + } + netdev_info(dev, "Dumping tx ring\n"); + for (i = 0; i < np->tx_ring_size; i += 4) { + if (!nv_optimized(np)) { netdev_info(dev, - "%3x: %08x %08x %08x %08x " - "%08x %08x %08x %08x\n", + "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", i, - readl(base + i + 0), readl(base + i + 4), - readl(base + i + 8), readl(base + i + 12), - readl(base + i + 16), readl(base + i + 20), - readl(base + i + 24), readl(base + i + 28)); - } - netdev_info(dev, "Dumping tx ring\n"); - for (i = 0; i < np->tx_ring_size; i += 4) { - if (!nv_optimized(np)) { - netdev_info(dev, - "%03x: %08x %08x // %08x %08x " - "// %08x %08x // %08x %08x\n", - i, - le32_to_cpu(np->tx_ring.orig[i].buf), - le32_to_cpu(np->tx_ring.orig[i].flaglen), - le32_to_cpu(np->tx_ring.orig[i+1].buf), - le32_to_cpu(np->tx_ring.orig[i+1].flaglen), - le32_to_cpu(np->tx_ring.orig[i+2].buf), - le32_to_cpu(np->tx_ring.orig[i+2].flaglen), - le32_to_cpu(np->tx_ring.orig[i+3].buf), - le32_to_cpu(np->tx_ring.orig[i+3].flaglen)); - } else { - netdev_info(dev, - "%03x: %08x %08x %08x " - "// %08x %08x %08x " - "// %08x %08x %08x " - "// %08x %08x %08x\n", - i, - le32_to_cpu(np->tx_ring.ex[i].bufhigh), - le32_to_cpu(np->tx_ring.ex[i].buflow), - le32_to_cpu(np->tx_ring.ex[i].flaglen), - le32_to_cpu(np->tx_ring.ex[i+1].bufhigh), - le32_to_cpu(np->tx_ring.ex[i+1].buflow), - le32_to_cpu(np->tx_ring.ex[i+1].flaglen), - le32_to_cpu(np->tx_ring.ex[i+2].bufhigh), - le32_to_cpu(np->tx_ring.ex[i+2].buflow), - le32_to_cpu(np->tx_ring.ex[i+2].flaglen), - le32_to_cpu(np->tx_ring.ex[i+3].bufhigh), - le32_to_cpu(np->tx_ring.ex[i+3].buflow), - le32_to_cpu(np->tx_ring.ex[i+3].flaglen)); - } + le32_to_cpu(np->tx_ring.orig[i].buf), + le32_to_cpu(np->tx_ring.orig[i].flaglen), + le32_to_cpu(np->tx_ring.orig[i+1].buf), + le32_to_cpu(np->tx_ring.orig[i+1].flaglen), + le32_to_cpu(np->tx_ring.orig[i+2].buf), + le32_to_cpu(np->tx_ring.orig[i+2].flaglen), + le32_to_cpu(np->tx_ring.orig[i+3].buf), + le32_to_cpu(np->tx_ring.orig[i+3].flaglen)); + } else { + netdev_info(dev, + "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", + i, + le32_to_cpu(np->tx_ring.ex[i].bufhigh), + le32_to_cpu(np->tx_ring.ex[i].buflow), + le32_to_cpu(np->tx_ring.ex[i].flaglen), + le32_to_cpu(np->tx_ring.ex[i+1].bufhigh), + le32_to_cpu(np->tx_ring.ex[i+1].buflow), + le32_to_cpu(np->tx_ring.ex[i+1].flaglen), + le32_to_cpu(np->tx_ring.ex[i+2].bufhigh), + le32_to_cpu(np->tx_ring.ex[i+2].buflow), + le32_to_cpu(np->tx_ring.ex[i+2].flaglen), + le32_to_cpu(np->tx_ring.ex[i+3].bufhigh), + le32_to_cpu(np->tx_ring.ex[i+3].buflow), + le32_to_cpu(np->tx_ring.ex[i+3].flaglen)); } } @@ -2767,11 +2649,8 @@ static int nv_rx_process(struct net_device *dev, int limit) } /* the rest are hard errors */ else { - if (flags & NV_RX_MISSEDFRAME) { - u64_stats_update_begin(&np->swstats_rx_syncp); - np->stat_rx_missed_errors++; - u64_stats_update_end(&np->swstats_rx_syncp); - } + if (flags & NV_RX_MISSEDFRAME) + dev->stats.rx_missed_errors++; dev_kfree_skb(skb); goto next_pkt; } @@ -2814,10 +2693,8 @@ static int nv_rx_process(struct net_device *dev, int limit) skb_put(skb, len); skb->protocol = eth_type_trans(skb, dev); napi_gro_receive(&np->napi, skb); - u64_stats_update_begin(&np->swstats_rx_syncp); - np->stat_rx_packets++; - np->stat_rx_bytes += len; - u64_stats_update_end(&np->swstats_rx_syncp); + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; next_pkt: if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) np->get_rx.orig = np->first_rx.orig; @@ -2900,10 +2777,8 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit) __vlan_hwaccel_put_tag(skb, vid); } napi_gro_receive(&np->napi, skb); - u64_stats_update_begin(&np->swstats_rx_syncp); - np->stat_rx_packets++; - np->stat_rx_bytes += len; - u64_stats_update_end(&np->swstats_rx_syncp); + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; } else { dev_kfree_skb(skb); } @@ -3146,73 +3021,6 @@ static void nv_update_pause(struct net_device *dev, u32 pause_flags) } } -static void nv_force_linkspeed(struct net_device *dev, int speed, int duplex) -{ - struct fe_priv *np = netdev_priv(dev); - u8 __iomem *base = get_hwbase(dev); - u32 phyreg, txreg; - int mii_status; - - np->linkspeed = NVREG_LINKSPEED_FORCE|speed; - np->duplex = duplex; - - /* see if gigabit phy */ - mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); - if (mii_status & PHY_GIGABIT) { - np->gigabit = PHY_GIGABIT; - phyreg = readl(base + NvRegSlotTime); - phyreg &= ~(0x3FF00); - if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) - phyreg |= NVREG_SLOTTIME_10_100_FULL; - else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100) - phyreg |= NVREG_SLOTTIME_10_100_FULL; - else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000) - phyreg |= NVREG_SLOTTIME_1000_FULL; - writel(phyreg, base + NvRegSlotTime); - } - - phyreg = readl(base + NvRegPhyInterface); - phyreg &= ~(PHY_HALF|PHY_100|PHY_1000); - if (np->duplex == 0) - phyreg |= PHY_HALF; - if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100) - phyreg |= PHY_100; - else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == - NVREG_LINKSPEED_1000) - phyreg |= PHY_1000; - writel(phyreg, base + NvRegPhyInterface); - - if (phyreg & PHY_RGMII) { - if ((np->linkspeed & NVREG_LINKSPEED_MASK) == - NVREG_LINKSPEED_1000) - txreg = NVREG_TX_DEFERRAL_RGMII_1000; - else - txreg = NVREG_TX_DEFERRAL_RGMII_10_100; - } else { - txreg = NVREG_TX_DEFERRAL_DEFAULT; - } - writel(txreg, base + NvRegTxDeferral); - - if (np->desc_ver == DESC_VER_1) { - txreg = NVREG_TX_WM_DESC1_DEFAULT; - } else { - if ((np->linkspeed & NVREG_LINKSPEED_MASK) == - NVREG_LINKSPEED_1000) - txreg = NVREG_TX_WM_DESC2_3_1000; - else - txreg = NVREG_TX_WM_DESC2_3_DEFAULT; - } - writel(txreg, base + NvRegTxWatermark); - - writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD), - base + NvRegMisc1); - pci_push(base); - writel(np->linkspeed, base + NvRegLinkSpeed); - pci_push(base); - - return; -} - /** * nv_update_linkspeed: Setup the MAC according to the link partner * @dev: Network device to be configured @@ -3234,25 +3042,11 @@ static int nv_update_linkspeed(struct net_device *dev) int newls = np->linkspeed; int newdup = np->duplex; int mii_status; - u32 bmcr; int retval = 0; u32 control_1000, status_1000, phyreg, pause_flags, txreg; u32 txrxFlags = 0; u32 phy_exp; - /* If device loopback is enabled, set carrier on and enable max link - * speed. - */ - bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); - if (bmcr & BMCR_LOOPBACK) { - if (netif_running(dev)) { - nv_force_linkspeed(dev, NVREG_LINKSPEED_1000, 1); - if (!netif_carrier_ok(dev)) - netif_carrier_on(dev); - } - return 1; - } - /* BMSR_LSTATUS is latched, read it twice: * we want the current value. */ @@ -3935,7 +3729,6 @@ static int nv_request_irq(struct net_device *dev, int intr_test) writel(0, base + NvRegMSIXMap0); writel(0, base + NvRegMSIXMap1); } - netdev_info(dev, "MSI-X enabled\n"); } } if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { @@ -3957,7 +3750,6 @@ static int nv_request_irq(struct net_device *dev, int intr_test) writel(0, base + NvRegMSIMap1); /* enable msi vector 0 */ writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); - netdev_info(dev, "MSI enabled\n"); } } if (ret != 0) { @@ -4112,18 +3904,11 @@ static void nv_poll_controller(struct net_device *dev) #endif static void nv_do_stats_poll(unsigned long data) - __acquires(&netdev_priv(dev)->hwstats_lock) - __releases(&netdev_priv(dev)->hwstats_lock) { struct net_device *dev = (struct net_device *) data; struct fe_priv *np = netdev_priv(dev); - /* If lock is currently taken, the stats are being refreshed - * and hence fresh enough */ - if (spin_trylock(&np->hwstats_lock)) { - nv_update_stats(dev); - spin_unlock(&np->hwstats_lock); - } + nv_get_hw_stats(dev); if (!np->in_shutdown) mod_timer(&np->stats_poll, @@ -4133,9 +3918,9 @@ static void nv_do_stats_poll(unsigned long data) static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct fe_priv *np = netdev_priv(dev); - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, FORCEDETH_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); + strcpy(info->driver, DRV_NAME); + strcpy(info->version, FORCEDETH_VERSION); + strcpy(info->bus_info, pci_name(np->pci_dev)); } static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) @@ -4688,63 +4473,7 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* return 0; } -static int nv_set_loopback(struct net_device *dev, netdev_features_t features) -{ - struct fe_priv *np = netdev_priv(dev); - unsigned long flags; - u32 miicontrol; - int err, retval = 0; - - spin_lock_irqsave(&np->lock, flags); - miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); - if (features & NETIF_F_LOOPBACK) { - if (miicontrol & BMCR_LOOPBACK) { - spin_unlock_irqrestore(&np->lock, flags); - netdev_info(dev, "Loopback already enabled\n"); - return 0; - } - nv_disable_irq(dev); - /* Turn on loopback mode */ - miicontrol |= BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000; - err = mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol); - if (err) { - retval = PHY_ERROR; - spin_unlock_irqrestore(&np->lock, flags); - phy_init(dev); - } else { - if (netif_running(dev)) { - /* Force 1000 Mbps full-duplex */ - nv_force_linkspeed(dev, NVREG_LINKSPEED_1000, - 1); - /* Force link up */ - netif_carrier_on(dev); - } - spin_unlock_irqrestore(&np->lock, flags); - netdev_info(dev, - "Internal PHY loopback mode enabled.\n"); - } - } else { - if (!(miicontrol & BMCR_LOOPBACK)) { - spin_unlock_irqrestore(&np->lock, flags); - netdev_info(dev, "Loopback already disabled\n"); - return 0; - } - nv_disable_irq(dev); - /* Turn off loopback */ - spin_unlock_irqrestore(&np->lock, flags); - netdev_info(dev, "Internal PHY loopback mode disabled.\n"); - phy_init(dev); - } - msleep(500); - spin_lock_irqsave(&np->lock, flags); - nv_enable_irq(dev); - spin_unlock_irqrestore(&np->lock, flags); - - return retval; -} - -static netdev_features_t nv_fix_features(struct net_device *dev, - netdev_features_t features) +static u32 nv_fix_features(struct net_device *dev, u32 features) { /* vlan is dependent on rx checksum offload */ if (features & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX)) @@ -4753,7 +4482,7 @@ static netdev_features_t nv_fix_features(struct net_device *dev, return features; } -static void nv_vlan_mode(struct net_device *dev, netdev_features_t features) +static void nv_vlan_mode(struct net_device *dev, u32 features) { struct fe_priv *np = get_nvpriv(dev); @@ -4774,18 +4503,11 @@ static void nv_vlan_mode(struct net_device *dev, netdev_features_t features) spin_unlock_irq(&np->lock); } -static int nv_set_features(struct net_device *dev, netdev_features_t features) +static int nv_set_features(struct net_device *dev, u32 features) { struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); - netdev_features_t changed = dev->features ^ features; - int retval; - - if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) { - retval = nv_set_loopback(dev, features); - if (retval != 0) - return retval; - } + u32 changed = dev->features ^ features; if (changed & NETIF_F_RXCSUM) { spin_lock_irq(&np->lock); @@ -4831,18 +4553,14 @@ static int nv_get_sset_count(struct net_device *dev, int sset) } } -static void nv_get_ethtool_stats(struct net_device *dev, - struct ethtool_stats *estats, u64 *buffer) - __acquires(&netdev_priv(dev)->hwstats_lock) - __releases(&netdev_priv(dev)->hwstats_lock) +static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer) { struct fe_priv *np = netdev_priv(dev); - spin_lock_bh(&np->hwstats_lock); - nv_update_stats(dev); - memcpy(buffer, &np->estats, - nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64)); - spin_unlock_bh(&np->hwstats_lock); + /* update stats */ + nv_get_hw_stats(dev); + + memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64)); } static int nv_link_test(struct net_device *dev) @@ -5424,12 +5142,6 @@ static int nv_open(struct net_device *dev) spin_unlock_irq(&np->lock); - /* If the loopback feature was set while the device was down, make sure - * that it's set correctly now. - */ - if (dev->features & NETIF_F_LOOPBACK) - nv_set_loopback(dev, dev->features); - return 0; out_drain: nv_drain_rxtx(dev); @@ -5486,7 +5198,7 @@ static int nv_close(struct net_device *dev) static const struct net_device_ops nv_netdev_ops = { .ndo_open = nv_open, .ndo_stop = nv_close, - .ndo_get_stats64 = nv_get_stats64, + .ndo_get_stats = nv_get_stats, .ndo_start_xmit = nv_start_xmit, .ndo_tx_timeout = nv_tx_timeout, .ndo_change_mtu = nv_change_mtu, @@ -5503,7 +5215,7 @@ static const struct net_device_ops nv_netdev_ops = { static const struct net_device_ops nv_netdev_ops_optimized = { .ndo_open = nv_open, .ndo_stop = nv_close, - .ndo_get_stats64 = nv_get_stats64, + .ndo_get_stats = nv_get_stats, .ndo_start_xmit = nv_start_xmit_optimized, .ndo_tx_timeout = nv_tx_timeout, .ndo_change_mtu = nv_change_mtu, @@ -5542,7 +5254,6 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i np->dev = dev; np->pci_dev = pci_dev; spin_lock_init(&np->lock); - spin_lock_init(&np->hwstats_lock); SET_NETDEV_DEV(dev, &pci_dev->dev); init_timer(&np->oom_kick); @@ -5551,7 +5262,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i init_timer(&np->nic_poll); np->nic_poll.data = (unsigned long) dev; np->nic_poll.function = nv_do_nic_poll; /* timer handler */ - init_timer_deferrable(&np->stats_poll); + init_timer(&np->stats_poll); np->stats_poll.data = (unsigned long) dev; np->stats_poll.function = nv_do_stats_poll; /* timer handler */ @@ -5635,9 +5346,6 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i dev->features |= dev->hw_features; - /* Add loopback capability to the device. */ - dev->hw_features |= NETIF_F_LOOPBACK; - np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG; if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) || (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) || @@ -5913,14 +5621,12 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n", dev->name, np->phy_oui, np->phyaddr, dev->dev_addr); - dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%s%sdesc-v%u\n", + dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n", dev->features & NETIF_F_HIGHDMA ? "highdma " : "", dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ? "csum " : "", dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ? "vlan " : "", - dev->features & (NETIF_F_LOOPBACK) ? - "loopback " : "", id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "", id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "", id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "", @@ -6294,9 +6000,6 @@ module_param(phy_cross, int, 0); MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0."); module_param(phy_power_down, int, 0); MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0)."); -module_param(debug_tx_timeout, bool, 0); -MODULE_PARM_DESC(debug_tx_timeout, - "Dump tx related registers and ring when tx_timeout happens"); MODULE_AUTHOR("Manfred Spraul "); MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); diff --git a/trunk/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/trunk/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c index ac4e72d529e5..8c8027176bef 100644 --- a/trunk/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c +++ b/trunk/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c @@ -161,10 +161,10 @@ static void pch_gbe_get_drvinfo(struct net_device *netdev, { struct pch_gbe_adapter *adapter = netdev_priv(netdev); - strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, pch_driver_version, sizeof(drvinfo->version)); - strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), - sizeof(drvinfo->bus_info)); + strcpy(drvinfo->driver, KBUILD_MODNAME); + strcpy(drvinfo->version, pch_driver_version); + strcpy(drvinfo->fw_version, "N/A"); + strcpy(drvinfo->bus_info, pci_name(adapter->pdev)); drvinfo->regdump_len = pch_gbe_get_regs_len(netdev); } diff --git a/trunk/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/trunk/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 964e9c0948bc..48406ca382f1 100644 --- a/trunk/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/trunk/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -2109,11 +2109,10 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu) * Returns * 0: HW state updated successfully */ -static int pch_gbe_set_features(struct net_device *netdev, - netdev_features_t features) +static int pch_gbe_set_features(struct net_device *netdev, u32 features) { struct pch_gbe_adapter *adapter = netdev_priv(netdev); - netdev_features_t changed = features ^ netdev->features; + u32 changed = features ^ netdev->features; if (!(changed & NETIF_F_RXCSUM)) return 0; diff --git a/trunk/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/trunk/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c index 8a371985319f..e09ea83b8c47 100644 --- a/trunk/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c +++ b/trunk/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c @@ -83,18 +83,14 @@ netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) u32 fw_minor = 0; u32 fw_build = 0; - strlcpy(drvinfo->driver, netxen_nic_driver_name, - sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID, - sizeof(drvinfo->version)); + strncpy(drvinfo->driver, netxen_nic_driver_name, 32); + strncpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID, 32); fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR); fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR); fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB); - snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), - "%d.%d.%d", fw_major, fw_minor, fw_build); + sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build); - strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), - sizeof(drvinfo->bus_info)); + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); drvinfo->regdump_len = NETXEN_NIC_REGS_LEN; drvinfo->eedump_len = netxen_nic_get_eeprom_len(dev); } diff --git a/trunk/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/trunk/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 7dd9a4b107e6..8cf3173ba488 100644 --- a/trunk/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/trunk/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -544,8 +544,7 @@ static void netxen_set_multicast_list(struct net_device *dev) adapter->set_multi(dev); } -static netdev_features_t netxen_fix_features(struct net_device *dev, - netdev_features_t features) +static u32 netxen_fix_features(struct net_device *dev, u32 features) { if (!(features & NETIF_F_RXCSUM)) { netdev_info(dev, "disabling LRO as RXCSUM is off\n"); @@ -556,8 +555,7 @@ static netdev_features_t netxen_fix_features(struct net_device *dev, return features; } -static int netxen_set_features(struct net_device *dev, - netdev_features_t features) +static int netxen_set_features(struct net_device *dev, u32 features) { struct netxen_adapter *adapter = netdev_priv(dev); int hw_lro; diff --git a/trunk/drivers/net/ethernet/qlogic/qla3xxx.c b/trunk/drivers/net/ethernet/qlogic/qla3xxx.c index 7931531c3a40..a4bdff438a5e 100644 --- a/trunk/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/trunk/drivers/net/ethernet/qlogic/qla3xxx.c @@ -1735,11 +1735,10 @@ static void ql_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *drvinfo) { struct ql3_adapter *qdev = netdev_priv(ndev); - strlcpy(drvinfo->driver, ql3xxx_driver_name, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, ql3xxx_driver_version, - sizeof(drvinfo->version)); - strlcpy(drvinfo->bus_info, pci_name(qdev->pdev), - sizeof(drvinfo->bus_info)); + strncpy(drvinfo->driver, ql3xxx_driver_name, 32); + strncpy(drvinfo->version, ql3xxx_driver_version, 32); + strncpy(drvinfo->fw_version, "N/A", 32); + strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32); drvinfo->regdump_len = 0; drvinfo->eedump_len = 0; } diff --git a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 60976fc4ccc6..7ed53dbb8646 100644 --- a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -1466,9 +1466,8 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup); int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu); int qlcnic_change_mtu(struct net_device *netdev, int new_mtu); -netdev_features_t qlcnic_fix_features(struct net_device *netdev, - netdev_features_t features); -int qlcnic_set_features(struct net_device *netdev, netdev_features_t features); +u32 qlcnic_fix_features(struct net_device *netdev, u32 features); +int qlcnic_set_features(struct net_device *netdev, u32 features); int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable); int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable); int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter); diff --git a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index cc228cf3d84b..8aa1c6e8667b 100644 --- a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -140,14 +140,11 @@ qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR); fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR); fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB); - snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), - "%d.%d.%d", fw_major, fw_minor, fw_build); - - strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), - sizeof(drvinfo->bus_info)); - strlcpy(drvinfo->driver, qlcnic_driver_name, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, QLCNIC_LINUX_VERSIONID, - sizeof(drvinfo->version)); + sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build); + + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); + strlcpy(drvinfo->driver, qlcnic_driver_name, 32); + strlcpy(drvinfo->version, QLCNIC_LINUX_VERSIONID, 32); } static int diff --git a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c index b528e52a8ee1..bcb81e47543a 100644 --- a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c +++ b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c @@ -817,13 +817,12 @@ int qlcnic_change_mtu(struct net_device *netdev, int mtu) } -netdev_features_t qlcnic_fix_features(struct net_device *netdev, - netdev_features_t features) +u32 qlcnic_fix_features(struct net_device *netdev, u32 features) { struct qlcnic_adapter *adapter = netdev_priv(netdev); if ((adapter->flags & QLCNIC_ESWITCH_ENABLED)) { - netdev_features_t changed = features ^ netdev->features; + u32 changed = features ^ netdev->features; features ^= changed & (NETIF_F_ALL_CSUM | NETIF_F_RXCSUM); } @@ -834,10 +833,10 @@ netdev_features_t qlcnic_fix_features(struct net_device *netdev, } -int qlcnic_set_features(struct net_device *netdev, netdev_features_t features) +int qlcnic_set_features(struct net_device *netdev, u32 features) { struct qlcnic_adapter *adapter = netdev_priv(netdev); - netdev_features_t changed = netdev->features ^ features; + u32 changed = netdev->features ^ features; int hw_lro = (features & NETIF_F_LRO) ? QLCNIC_LRO_ENABLED : 0; if (!(changed & NETIF_F_LRO)) diff --git a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 69b8e4ef14d9..0bd163828e33 100644 --- a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -97,8 +97,8 @@ static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32); static int qlcnicvf_start_firmware(struct qlcnic_adapter *); static void qlcnic_set_netdev_features(struct qlcnic_adapter *, struct qlcnic_esw_func_cfg *); -static int qlcnic_vlan_rx_add(struct net_device *, u16); -static int qlcnic_vlan_rx_del(struct net_device *, u16); +static void qlcnic_vlan_rx_add(struct net_device *, u16); +static void qlcnic_vlan_rx_del(struct net_device *, u16); /* PCI Device ID Table */ #define ENTRY(device) \ @@ -735,22 +735,20 @@ qlcnic_set_vlan_config(struct qlcnic_adapter *adapter, adapter->pvid = 0; } -static int +static void qlcnic_vlan_rx_add(struct net_device *netdev, u16 vid) { struct qlcnic_adapter *adapter = netdev_priv(netdev); set_bit(vid, adapter->vlans); - return 0; } -static int +static void qlcnic_vlan_rx_del(struct net_device *netdev, u16 vid) { struct qlcnic_adapter *adapter = netdev_priv(netdev); qlcnic_restore_indev_addr(netdev, NETDEV_DOWN); clear_bit(vid, adapter->vlans); - return 0; } static void @@ -794,7 +792,7 @@ qlcnic_set_netdev_features(struct qlcnic_adapter *adapter, struct qlcnic_esw_func_cfg *esw_cfg) { struct net_device *netdev = adapter->netdev; - netdev_features_t features, vlan_features; + unsigned long features, vlan_features; features = (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO); diff --git a/trunk/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/trunk/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c index 8e2c2a74f3a5..9b67bfea035f 100644 --- a/trunk/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c +++ b/trunk/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c @@ -366,16 +366,13 @@ static void ql_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *drvinfo) { struct ql_adapter *qdev = netdev_priv(ndev); - strlcpy(drvinfo->driver, qlge_driver_name, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, qlge_driver_version, - sizeof(drvinfo->version)); - snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), - "v%d.%d.%d", + strncpy(drvinfo->driver, qlge_driver_name, 32); + strncpy(drvinfo->version, qlge_driver_version, 32); + snprintf(drvinfo->fw_version, 32, "v%d.%d.%d", (qdev->fw_rev_id & 0x00ff0000) >> 16, (qdev->fw_rev_id & 0x0000ff00) >> 8, (qdev->fw_rev_id & 0x000000ff)); - strlcpy(drvinfo->bus_info, pci_name(qdev->pdev), - sizeof(drvinfo->bus_info)); + strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32); drvinfo->n_stats = 0; drvinfo->testinfo_len = 0; if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) diff --git a/trunk/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/trunk/drivers/net/ethernet/qlogic/qlge/qlge_main.c index b54898737284..c92afcd912e2 100644 --- a/trunk/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/trunk/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -2307,7 +2307,7 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget) return work_done; } -static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features) +static void qlge_vlan_mode(struct net_device *ndev, u32 features) { struct ql_adapter *qdev = netdev_priv(ndev); @@ -2323,8 +2323,7 @@ static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features) } } -static netdev_features_t qlge_fix_features(struct net_device *ndev, - netdev_features_t features) +static u32 qlge_fix_features(struct net_device *ndev, u32 features) { /* * Since there is no support for separate rx/tx vlan accel @@ -2338,10 +2337,9 @@ static netdev_features_t qlge_fix_features(struct net_device *ndev, return features; } -static int qlge_set_features(struct net_device *ndev, - netdev_features_t features) +static int qlge_set_features(struct net_device *ndev, u32 features) { - netdev_features_t changed = ndev->features ^ features; + u32 changed = ndev->features ^ features; if (changed & NETIF_F_HW_VLAN_RX) qlge_vlan_mode(ndev, features); @@ -2349,66 +2347,56 @@ static int qlge_set_features(struct net_device *ndev, return 0; } -static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid) +static void __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid) { u32 enable_bit = MAC_ADDR_E; - int err; - err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit, - MAC_ADDR_TYPE_VLAN, vid); - if (err) + if (ql_set_mac_addr_reg + (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { netif_err(qdev, ifup, qdev->ndev, "Failed to init vlan address.\n"); - return err; + } } -static int qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid) +static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid) { struct ql_adapter *qdev = netdev_priv(ndev); int status; - int err; status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); if (status) - return status; + return; - err = __qlge_vlan_rx_add_vid(qdev, vid); + __qlge_vlan_rx_add_vid(qdev, vid); set_bit(vid, qdev->active_vlans); ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); - - return err; } -static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid) +static void __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid) { u32 enable_bit = 0; - int err; - err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit, - MAC_ADDR_TYPE_VLAN, vid); - if (err) + if (ql_set_mac_addr_reg + (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { netif_err(qdev, ifup, qdev->ndev, "Failed to clear vlan address.\n"); - return err; + } } -static int qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid) +static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid) { struct ql_adapter *qdev = netdev_priv(ndev); int status; - int err; status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); if (status) - return status; + return; - err = __qlge_vlan_rx_kill_vid(qdev, vid); + __qlge_vlan_rx_kill_vid(qdev, vid); clear_bit(vid, qdev->active_vlans); ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); - - return err; } static void qlge_restore_vlan(struct ql_adapter *qdev) diff --git a/trunk/drivers/net/ethernet/rdc/r6040.c b/trunk/drivers/net/ethernet/rdc/r6040.c index 87aa43935070..4bf68cfef390 100644 --- a/trunk/drivers/net/ethernet/rdc/r6040.c +++ b/trunk/drivers/net/ethernet/rdc/r6040.c @@ -52,6 +52,12 @@ #define DRV_VERSION "0.28" #define DRV_RELDATE "07Oct2011" +/* PHY CHIP Address */ +#define PHY1_ADDR 1 /* For MAC1 */ +#define PHY2_ADDR 3 /* For MAC2 */ +#define PHY_MODE 0x3100 /* PHY CHIP Register 0 */ +#define PHY_CAP 0x01E1 /* PHY CHIP Register 4 */ + /* Time in jiffies before concluding the transmitter is hung. */ #define TX_TIMEOUT (6000 * HZ / 1000) @@ -63,11 +69,8 @@ /* MAC registers */ #define MCR0 0x00 /* Control register 0 */ -#define MCR0_RCVEN 0x0002 /* Receive enable */ #define MCR0_PROMISC 0x0020 /* Promiscuous mode */ #define MCR0_HASH_EN 0x0100 /* Enable multicast hash table function */ -#define MCR0_XMTEN 0x1000 /* Transmission enable */ -#define MCR0_FD 0x8000 /* Full/Half duplex */ #define MCR1 0x04 /* Control register 1 */ #define MAC_RST 0x0001 /* Reset the MAC */ #define MBCR 0x08 /* Bus control */ @@ -126,7 +129,6 @@ #define PHY_CC 0x88 /* PHY status change configuration register */ #define PHY_ST 0x8A /* PHY status register */ #define MAC_SM 0xAC /* MAC status machine */ -#define MAC_SM_RST 0x0002 /* MAC status machine reset */ #define MAC_ID 0xBE /* Identifier register */ #define TX_DCNT 0x80 /* TX descriptor count */ @@ -152,6 +154,9 @@ #define DSC_RX_MIDH_HIT 0x0004 /* RX MID table hit (no error) */ #define DSC_RX_IDX_MID_MASK 3 /* RX mask for the index of matched MIDx */ +/* PHY settings */ +#define ICPLUS_PHY_ID 0x0243 + MODULE_AUTHOR("Sten Wang ," "Daniel Gimpelevich ," "Florian Fainelli "); @@ -173,7 +178,7 @@ struct r6040_descriptor { struct r6040_descriptor *vndescp; /* 14-17 */ struct sk_buff *skb_ptr; /* 18-1B */ u32 rev2; /* 1C-1F */ -} __aligned(32); +} __attribute__((aligned(32))); struct r6040_private { spinlock_t lock; /* driver lock */ @@ -186,7 +191,7 @@ struct r6040_private { struct r6040_descriptor *tx_ring; dma_addr_t rx_ring_dma; dma_addr_t tx_ring_dma; - u16 tx_free_desc; + u16 tx_free_desc, phy_addr; u16 mcr0, mcr1; struct net_device *dev; struct mii_bus *mii_bus; @@ -201,6 +206,8 @@ static char version[] __devinitdata = DRV_NAME ": RDC R6040 NAPI net driver," "version "DRV_VERSION " (" DRV_RELDATE ")"; +static int phy_table[] = { PHY1_ADDR, PHY2_ADDR }; + /* Read a word data from PHY Chip */ static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg) { @@ -372,11 +379,11 @@ static void r6040_init_mac_regs(struct net_device *dev) iowrite16(MAC_RST, ioaddr + MCR1); while (limit--) { cmd = ioread16(ioaddr + MCR1); - if (cmd & MAC_RST) + if (cmd & 0x1) break; } /* Reset internal state machine */ - iowrite16(MAC_SM_RST, ioaddr + MAC_SM); + iowrite16(2, ioaddr + MAC_SM); iowrite16(0, ioaddr + MAC_SM); mdelay(5); @@ -402,7 +409,7 @@ static void r6040_init_mac_regs(struct net_device *dev) iowrite16(INT_MASK, ioaddr + MIER); /* Enable TX and RX */ - iowrite16(lp->mcr0 | MCR0_RCVEN, ioaddr); + iowrite16(lp->mcr0 | 0x0002, ioaddr); /* Let TX poll the descriptors * we may got called by r6040_tx_timeout which has left @@ -454,7 +461,7 @@ static void r6040_down(struct net_device *dev) iowrite16(MAC_RST, ioaddr + MCR1); /* Reset RDC MAC */ while (limit--) { cmd = ioread16(ioaddr + MCR1); - if (cmd & MAC_RST) + if (cmd & 0x1) break; } @@ -735,10 +742,9 @@ static void r6040_mac_address(struct net_device *dev) void __iomem *ioaddr = lp->base; u16 *adrp; - /* Reset MAC */ - iowrite16(MAC_RST, ioaddr + MCR1); - /* Reset internal state machine */ - iowrite16(MAC_SM_RST, ioaddr + MAC_SM); + /* MAC operation register */ + iowrite16(0x01, ioaddr + MCR1); /* Reset MAC */ + iowrite16(2, ioaddr + MAC_SM); /* Reset internal state machine */ iowrite16(0, ioaddr + MAC_SM); mdelay(5); @@ -1007,7 +1013,7 @@ static void r6040_adjust_link(struct net_device *dev) /* reflect duplex change */ if (phydev->link && (lp->old_duplex != phydev->duplex)) { - lp->mcr0 |= (phydev->duplex == DUPLEX_FULL ? MCR0_FD : 0); + lp->mcr0 |= (phydev->duplex == DUPLEX_FULL ? 0x8000 : 0); iowrite16(lp->mcr0, ioaddr); status_changed = 1; @@ -1160,7 +1166,8 @@ static int __devinit r6040_init_one(struct pci_dev *pdev, lp->dev = dev; /* Init RDC private data */ - lp->mcr0 = MCR0_XMTEN | MCR0; + lp->mcr0 = 0x1002; + lp->phy_addr = phy_table[card_idx]; /* The RDC-specific entries in the device structure. */ dev->netdev_ops = &r6040_netdev_ops; @@ -1181,8 +1188,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev, lp->mii_bus->write = r6040_mdiobus_write; lp->mii_bus->reset = r6040_mdiobus_reset; lp->mii_bus->name = "r6040_eth_mii"; - snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", - dev_name(&pdev->dev), card_idx); + snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%x", card_idx); lp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); if (!lp->mii_bus->irq) { dev_err(&pdev->dev, "mii_bus irq allocation failed\n"); diff --git a/trunk/drivers/net/ethernet/realtek/8139cp.c b/trunk/drivers/net/ethernet/realtek/8139cp.c index cc6b391479ca..ee5da9293ce0 100644 --- a/trunk/drivers/net/ethernet/realtek/8139cp.c +++ b/trunk/drivers/net/ethernet/realtek/8139cp.c @@ -859,6 +859,7 @@ static void __cp_set_rx_mode (struct net_device *dev) struct cp_private *cp = netdev_priv(dev); u32 mc_filter[2]; /* Multicast hash filter */ int rx_mode; + u32 tmp; /* Note: do not reorder, GCC is clever about common statements. */ if (dev->flags & IFF_PROMISC) { @@ -885,9 +886,11 @@ static void __cp_set_rx_mode (struct net_device *dev) } /* We can safely update without stopping the chip. */ - cp->rx_config = cp_rx_config | rx_mode; - cpw32_f(RxConfig, cp->rx_config); - + tmp = cp_rx_config | rx_mode; + if (cp->rx_config != tmp) { + cpw32_f (RxConfig, tmp); + cp->rx_config = tmp; + } cpw32_f (MAR0 + 0, mc_filter[0]); cpw32_f (MAR0 + 4, mc_filter[1]); } @@ -1316,9 +1319,9 @@ static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info { struct cp_private *cp = netdev_priv(dev); - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info)); + strcpy (info->driver, DRV_NAME); + strcpy (info->version, DRV_VERSION); + strcpy (info->bus_info, pci_name(cp->pdev)); } static void cp_get_ringparam(struct net_device *dev, @@ -1389,7 +1392,7 @@ static void cp_set_msglevel(struct net_device *dev, u32 value) cp->msg_enable = value; } -static int cp_set_features(struct net_device *dev, netdev_features_t features) +static int cp_set_features(struct net_device *dev, u32 features) { struct cp_private *cp = netdev_priv(dev); unsigned long flags; @@ -1586,7 +1589,7 @@ static int cp_set_mac_address(struct net_device *dev, void *p) No extra delay is needed with 33Mhz PCI, but 66Mhz may change this. */ -#define eeprom_delay() readb(ee_addr) +#define eeprom_delay() readl(ee_addr) /* The EEPROM commands include the alway-set leading bit. */ #define EE_EXTEND_CMD (4) diff --git a/trunk/drivers/net/ethernet/realtek/8139too.c b/trunk/drivers/net/ethernet/realtek/8139too.c index a8779bedb3d9..4d6b254fc6c1 100644 --- a/trunk/drivers/net/ethernet/realtek/8139too.c +++ b/trunk/drivers/net/ethernet/realtek/8139too.c @@ -1122,7 +1122,7 @@ static void __devexit rtl8139_remove_one (struct pci_dev *pdev) No extra delay is needed with 33Mhz PCI, but 66Mhz may change this. */ -#define eeprom_delay() (void)RTL_R8(Cfg9346) +#define eeprom_delay() (void)RTL_R32(Cfg9346) /* The EEPROM commands include the alway-set leading bit. */ #define EE_WRITE_CMD (5) @@ -2330,9 +2330,9 @@ static int rtl8139_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) static void rtl8139_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct rtl8139_private *tp = netdev_priv(dev); - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info)); + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + strcpy(info->bus_info, pci_name(tp->pci_dev)); info->regdump_len = tp->regs_len; } diff --git a/trunk/drivers/net/ethernet/realtek/r8169.c b/trunk/drivers/net/ethernet/realtek/r8169.c index 7a0c800b50ad..c8f47f17186f 100644 --- a/trunk/drivers/net/ethernet/realtek/r8169.c +++ b/trunk/drivers/net/ethernet/realtek/r8169.c @@ -69,6 +69,9 @@ The RTL chips use a 64 element hash table based on the Ethernet CRC. */ static const int multicast_filter_limit = 32; +/* MAC address length */ +#define MAC_ADDR_LEN 6 + #define MAX_READ_REQUEST_SHIFT 12 #define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ #define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */ @@ -1403,13 +1406,12 @@ static void rtl8169_get_drvinfo(struct net_device *dev, struct rtl8169_private *tp = netdev_priv(dev); struct rtl_fw *rtl_fw = tp->rtl_fw; - strlcpy(info->driver, MODULENAME, sizeof(info->driver)); - strlcpy(info->version, RTL8169_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info)); + strcpy(info->driver, MODULENAME); + strcpy(info->version, RTL8169_VERSION); + strcpy(info->bus_info, pci_name(tp->pci_dev)); BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version)); - if (!IS_ERR_OR_NULL(rtl_fw)) - strlcpy(info->fw_version, rtl_fw->version, - sizeof(info->fw_version)); + strcpy(info->fw_version, IS_ERR_OR_NULL(rtl_fw) ? "N/A" : + rtl_fw->version); } static int rtl8169_get_regs_len(struct net_device *dev) @@ -1553,8 +1555,7 @@ static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) return ret; } -static netdev_features_t rtl8169_fix_features(struct net_device *dev, - netdev_features_t features) +static u32 rtl8169_fix_features(struct net_device *dev, u32 features) { struct rtl8169_private *tp = netdev_priv(dev); @@ -1568,8 +1569,7 @@ static netdev_features_t rtl8169_fix_features(struct net_device *dev, return features; } -static int rtl8169_set_features(struct net_device *dev, - netdev_features_t features) +static int rtl8169_set_features(struct net_device *dev, u32 features) { struct rtl8169_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; @@ -4101,7 +4101,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) spin_lock_init(&tp->lock); /* Get MAC address */ - for (i = 0; i < ETH_ALEN; i++) + for (i = 0; i < MAC_ADDR_LEN; i++) dev->dev_addr[i] = RTL_R8(MAC0 + i); memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); diff --git a/trunk/drivers/net/ethernet/renesas/sh_eth.c b/trunk/drivers/net/ethernet/renesas/sh_eth.c index fc9bda9bc36c..9b230740c6ab 100644 --- a/trunk/drivers/net/ethernet/renesas/sh_eth.c +++ b/trunk/drivers/net/ethernet/renesas/sh_eth.c @@ -1369,13 +1369,13 @@ static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data) } } -static const struct ethtool_ops sh_eth_ethtool_ops = { +static struct ethtool_ops sh_eth_ethtool_ops = { .get_settings = sh_eth_get_settings, .set_settings = sh_eth_set_settings, - .nway_reset = sh_eth_nway_reset, + .nway_reset = sh_eth_nway_reset, .get_msglevel = sh_eth_get_msglevel, .set_msglevel = sh_eth_set_msglevel, - .get_link = ethtool_op_get_link, + .get_link = ethtool_op_get_link, .get_strings = sh_eth_get_strings, .get_ethtool_stats = sh_eth_get_ethtool_stats, .get_sset_count = sh_eth_get_sset_count, @@ -1957,7 +1957,18 @@ static struct platform_driver sh_eth_driver = { }, }; -module_platform_driver(sh_eth_driver); +static int __init sh_eth_init(void) +{ + return platform_driver_register(&sh_eth_driver); +} + +static void __exit sh_eth_cleanup(void) +{ + platform_driver_unregister(&sh_eth_driver); +} + +module_init(sh_eth_init); +module_exit(sh_eth_cleanup); MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda"); MODULE_DESCRIPTION("Renesas SuperH Ethernet driver"); diff --git a/trunk/drivers/net/ethernet/seeq/sgiseeq.c b/trunk/drivers/net/ethernet/seeq/sgiseeq.c index f955a19eb22f..c3673f151a41 100644 --- a/trunk/drivers/net/ethernet/seeq/sgiseeq.c +++ b/trunk/drivers/net/ethernet/seeq/sgiseeq.c @@ -834,7 +834,23 @@ static struct platform_driver sgiseeq_driver = { } }; -module_platform_driver(sgiseeq_driver); +static int __init sgiseeq_module_init(void) +{ + if (platform_driver_register(&sgiseeq_driver)) { + printk(KERN_ERR "Driver registration failed\n"); + return -ENODEV; + } + + return 0; +} + +static void __exit sgiseeq_module_exit(void) +{ + platform_driver_unregister(&sgiseeq_driver); +} + +module_init(sgiseeq_module_init); +module_exit(sgiseeq_module_exit); MODULE_DESCRIPTION("SGI Seeq 8003 driver"); MODULE_AUTHOR("Linux/MIPS Mailing List "); diff --git a/trunk/drivers/net/ethernet/sfc/efx.c b/trunk/drivers/net/ethernet/sfc/efx.c index e43702f33b62..d5731f1fe6d6 100644 --- a/trunk/drivers/net/ethernet/sfc/efx.c +++ b/trunk/drivers/net/ethernet/sfc/efx.c @@ -1336,8 +1336,7 @@ static int efx_probe_nic(struct efx_nic *efx) if (efx->n_channels > 1) get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key)); for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) - efx->rx_indir_table[i] = - ethtool_rxfh_indir_default(i, efx->n_rx_channels); + efx->rx_indir_table[i] = i % efx->n_rx_channels; efx_set_channels(efx); netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); @@ -1901,7 +1900,7 @@ static void efx_set_multicast_list(struct net_device *net_dev) /* Otherwise efx_start_port() will do this */ } -static int efx_set_features(struct net_device *net_dev, netdev_features_t data) +static int efx_set_features(struct net_device *net_dev, u32 data) { struct efx_nic *efx = netdev_priv(net_dev); @@ -2236,9 +2235,9 @@ static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = { {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, PCI_DEVICE_ID_SOLARFLARE_SFC4000B), .driver_data = (unsigned long) &falcon_b0_nic_type}, - {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803), /* SFC9020 */ + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, BETHPAGE_A_P_DEVID), .driver_data = (unsigned long) &siena_a0_nic_type}, - {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */ + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, SIENA_A_P_DEVID), .driver_data = (unsigned long) &siena_a0_nic_type}, {0} /* end of list */ }; diff --git a/trunk/drivers/net/ethernet/sfc/efx.h b/trunk/drivers/net/ethernet/sfc/efx.h index a3541ac6ea01..4764793ed234 100644 --- a/trunk/drivers/net/ethernet/sfc/efx.h +++ b/trunk/drivers/net/ethernet/sfc/efx.h @@ -14,6 +14,10 @@ #include "net_driver.h" #include "filter.h" +/* PCI IDs */ +#define BETHPAGE_A_P_DEVID 0x0803 +#define SIENA_A_P_DEVID 0x0813 + /* Solarstorm controllers use BAR 0 for I/O space and BAR 2(&3) for memory */ #define EFX_MEM_BAR 2 @@ -61,23 +65,13 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); extern int efx_probe_filters(struct efx_nic *efx); extern void efx_restore_filters(struct efx_nic *efx); extern void efx_remove_filters(struct efx_nic *efx); -extern s32 efx_filter_insert_filter(struct efx_nic *efx, +extern int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, bool replace); -extern int efx_filter_remove_id_safe(struct efx_nic *efx, - enum efx_filter_priority priority, - u32 filter_id); -extern int efx_filter_get_filter_safe(struct efx_nic *efx, - enum efx_filter_priority priority, - u32 filter_id, struct efx_filter_spec *); +extern int efx_filter_remove_filter(struct efx_nic *efx, + struct efx_filter_spec *spec); extern void efx_filter_clear_rx(struct efx_nic *efx, enum efx_filter_priority priority); -extern u32 efx_filter_count_rx_used(struct efx_nic *efx, - enum efx_filter_priority priority); -extern u32 efx_filter_get_rx_id_limit(struct efx_nic *efx); -extern s32 efx_filter_get_rx_ids(struct efx_nic *efx, - enum efx_filter_priority priority, - u32 *buf, u32 size); #ifdef CONFIG_RFS_ACCEL extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, u16 rxq_index, u32 flow_id); diff --git a/trunk/drivers/net/ethernet/sfc/ethtool.c b/trunk/drivers/net/ethernet/sfc/ethtool.c index 29b2ebfef19f..f3cd96dfa398 100644 --- a/trunk/drivers/net/ethernet/sfc/ethtool.c +++ b/trunk/drivers/net/ethernet/sfc/ethtool.c @@ -818,58 +818,9 @@ static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags) return efx_reset(efx, rc); } -static int efx_ethtool_get_class_rule(struct efx_nic *efx, - struct ethtool_rx_flow_spec *rule) -{ - struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec; - struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec; - struct efx_filter_spec spec; - u16 vid; - u8 proto; - int rc; - - rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL, - rule->location, &spec); - if (rc) - return rc; - - if (spec.dmaq_id == 0xfff) - rule->ring_cookie = RX_CLS_FLOW_DISC; - else - rule->ring_cookie = spec.dmaq_id; - - rc = efx_filter_get_eth_local(&spec, &vid, - rule->h_u.ether_spec.h_dest); - if (rc == 0) { - rule->flow_type = ETHER_FLOW; - memset(rule->m_u.ether_spec.h_dest, ~0, ETH_ALEN); - if (vid != EFX_FILTER_VID_UNSPEC) { - rule->flow_type |= FLOW_EXT; - rule->h_ext.vlan_tci = htons(vid); - rule->m_ext.vlan_tci = htons(0xfff); - } - return 0; - } - - rc = efx_filter_get_ipv4_local(&spec, &proto, - &ip_entry->ip4dst, &ip_entry->pdst); - if (rc != 0) { - rc = efx_filter_get_ipv4_full( - &spec, &proto, &ip_entry->ip4src, &ip_entry->psrc, - &ip_entry->ip4dst, &ip_entry->pdst); - EFX_WARN_ON_PARANOID(rc); - ip_mask->ip4src = ~0; - ip_mask->psrc = ~0; - } - rule->flow_type = (proto == IPPROTO_TCP) ? TCP_V4_FLOW : UDP_V4_FLOW; - ip_mask->ip4dst = ~0; - ip_mask->pdst = ~0; - return rc; -} - static int efx_ethtool_get_rxnfc(struct net_device *net_dev, - struct ethtool_rxnfc *info, u32 *rule_locs) + struct ethtool_rxnfc *info, u32 *rules __always_unused) { struct efx_nic *efx = netdev_priv(net_dev); @@ -911,80 +862,42 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev, return 0; } - case ETHTOOL_GRXCLSRLCNT: - info->data = efx_filter_get_rx_id_limit(efx); - if (info->data == 0) - return -EOPNOTSUPP; - info->data |= RX_CLS_LOC_SPECIAL; - info->rule_cnt = - efx_filter_count_rx_used(efx, EFX_FILTER_PRI_MANUAL); - return 0; - - case ETHTOOL_GRXCLSRULE: - if (efx_filter_get_rx_id_limit(efx) == 0) - return -EOPNOTSUPP; - return efx_ethtool_get_class_rule(efx, &info->fs); - - case ETHTOOL_GRXCLSRLALL: { - s32 rc; - info->data = efx_filter_get_rx_id_limit(efx); - if (info->data == 0) - return -EOPNOTSUPP; - rc = efx_filter_get_rx_ids(efx, EFX_FILTER_PRI_MANUAL, - rule_locs, info->rule_cnt); - if (rc < 0) - return rc; - info->rule_cnt = rc; - return 0; - } - default: return -EOPNOTSUPP; } } -static int efx_ethtool_set_class_rule(struct efx_nic *efx, - struct ethtool_rx_flow_spec *rule) +static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev, + struct ethtool_rx_ntuple *ntuple) { - struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec; - struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec; - struct ethhdr *mac_entry = &rule->h_u.ether_spec; - struct ethhdr *mac_mask = &rule->m_u.ether_spec; - struct efx_filter_spec spec; + struct efx_nic *efx = netdev_priv(net_dev); + struct ethtool_tcpip4_spec *ip_entry = &ntuple->fs.h_u.tcp_ip4_spec; + struct ethtool_tcpip4_spec *ip_mask = &ntuple->fs.m_u.tcp_ip4_spec; + struct ethhdr *mac_entry = &ntuple->fs.h_u.ether_spec; + struct ethhdr *mac_mask = &ntuple->fs.m_u.ether_spec; + struct efx_filter_spec filter; int rc; - /* Check that user wants us to choose the location */ - if (rule->location != RX_CLS_LOC_ANY && - rule->location != RX_CLS_LOC_FIRST && - rule->location != RX_CLS_LOC_LAST) - return -EINVAL; - - /* Range-check ring_cookie */ - if (rule->ring_cookie >= efx->n_rx_channels && - rule->ring_cookie != RX_CLS_FLOW_DISC) + /* Range-check action */ + if (ntuple->fs.action < ETHTOOL_RXNTUPLE_ACTION_CLEAR || + ntuple->fs.action >= (s32)efx->n_rx_channels) return -EINVAL; - /* Check for unsupported extensions */ - if ((rule->flow_type & FLOW_EXT) && - (rule->m_ext.vlan_etype | rule->m_ext.data[0] | - rule->m_ext.data[1])) + if (~ntuple->fs.data_mask) return -EINVAL; - efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, - (rule->location == RX_CLS_LOC_FIRST) ? - EFX_FILTER_FLAG_RX_OVERRIDE_IP : 0, - (rule->ring_cookie == RX_CLS_FLOW_DISC) ? - 0xfff : rule->ring_cookie); + efx_filter_init_rx(&filter, EFX_FILTER_PRI_MANUAL, 0, + (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP) ? + 0xfff : ntuple->fs.action); - switch (rule->flow_type) { + switch (ntuple->fs.flow_type) { case TCP_V4_FLOW: case UDP_V4_FLOW: { - u8 proto = (rule->flow_type == TCP_V4_FLOW ? + u8 proto = (ntuple->fs.flow_type == TCP_V4_FLOW ? IPPROTO_TCP : IPPROTO_UDP); /* Must match all of destination, */ - if ((__force u32)~ip_mask->ip4dst | - (__force u16)~ip_mask->pdst) + if (ip_mask->ip4dst | ip_mask->pdst) return -EINVAL; /* all or none of source, */ if ((ip_mask->ip4src | ip_mask->psrc) && @@ -992,17 +905,17 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx, (__force u16)~ip_mask->psrc)) return -EINVAL; /* and nothing else */ - if (ip_mask->tos | rule->m_ext.vlan_tci) + if ((u8)~ip_mask->tos | (u16)~ntuple->fs.vlan_tag_mask) return -EINVAL; - if (ip_mask->ip4src) - rc = efx_filter_set_ipv4_full(&spec, proto, + if (!ip_mask->ip4src) + rc = efx_filter_set_ipv4_full(&filter, proto, ip_entry->ip4dst, ip_entry->pdst, ip_entry->ip4src, ip_entry->psrc); else - rc = efx_filter_set_ipv4_local(&spec, proto, + rc = efx_filter_set_ipv4_local(&filter, proto, ip_entry->ip4dst, ip_entry->pdst); if (rc) @@ -1010,24 +923,23 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx, break; } - case ETHER_FLOW | FLOW_EXT: - /* Must match all or none of VID */ - if (rule->m_ext.vlan_tci != htons(0xfff) && - rule->m_ext.vlan_tci != 0) - return -EINVAL; case ETHER_FLOW: - /* Must match all of destination */ - if (!is_broadcast_ether_addr(mac_mask->h_dest)) + /* Must match all of destination, */ + if (!is_zero_ether_addr(mac_mask->h_dest)) + return -EINVAL; + /* all or none of VID, */ + if (ntuple->fs.vlan_tag_mask != 0xf000 && + ntuple->fs.vlan_tag_mask != 0xffff) return -EINVAL; /* and nothing else */ - if (!is_zero_ether_addr(mac_mask->h_source) || - mac_mask->h_proto) + if (!is_broadcast_ether_addr(mac_mask->h_source) || + mac_mask->h_proto != htons(0xffff)) return -EINVAL; rc = efx_filter_set_eth_local( - &spec, - (rule->flow_type & FLOW_EXT && rule->m_ext.vlan_tci) ? - ntohs(rule->h_ext.vlan_tci) : EFX_FILTER_VID_UNSPEC, + &filter, + (ntuple->fs.vlan_tag_mask == 0xf000) ? + ntuple->fs.vlan_tag : EFX_FILTER_VID_UNSPEC, mac_entry->h_dest); if (rc) return rc; @@ -1037,57 +949,47 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx, return -EINVAL; } - rc = efx_filter_insert_filter(efx, &spec, true); - if (rc < 0) - return rc; + if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_CLEAR) + return efx_filter_remove_filter(efx, &filter); - rule->location = rc; - return 0; + rc = efx_filter_insert_filter(efx, &filter, true); + return rc < 0 ? rc : 0; } -static int efx_ethtool_set_rxnfc(struct net_device *net_dev, - struct ethtool_rxnfc *info) +static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, + struct ethtool_rxfh_indir *indir) { struct efx_nic *efx = netdev_priv(net_dev); + size_t copy_size = + min_t(size_t, indir->size, ARRAY_SIZE(efx->rx_indir_table)); - if (efx_filter_get_rx_id_limit(efx) == 0) + if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) return -EOPNOTSUPP; - switch (info->cmd) { - case ETHTOOL_SRXCLSRLINS: - return efx_ethtool_set_class_rule(efx, &info->fs); - - case ETHTOOL_SRXCLSRLDEL: - return efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_MANUAL, - info->fs.location); - - default: - return -EOPNOTSUPP; - } -} - -static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev) -{ - struct efx_nic *efx = netdev_priv(net_dev); - - return (efx_nic_rev(efx) < EFX_REV_FALCON_B0 ? - 0 : ARRAY_SIZE(efx->rx_indir_table)); -} - -static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, u32 *indir) -{ - struct efx_nic *efx = netdev_priv(net_dev); - - memcpy(indir, efx->rx_indir_table, sizeof(efx->rx_indir_table)); + indir->size = ARRAY_SIZE(efx->rx_indir_table); + memcpy(indir->ring_index, efx->rx_indir_table, + copy_size * sizeof(indir->ring_index[0])); return 0; } static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev, - const u32 *indir) + const struct ethtool_rxfh_indir *indir) { struct efx_nic *efx = netdev_priv(net_dev); + size_t i; + + if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) + return -EOPNOTSUPP; + + /* Validate size and indices */ + if (indir->size != ARRAY_SIZE(efx->rx_indir_table)) + return -EINVAL; + for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) + if (indir->ring_index[i] >= efx->n_rx_channels) + return -EINVAL; - memcpy(efx->rx_indir_table, indir, sizeof(efx->rx_indir_table)); + memcpy(efx->rx_indir_table, indir->ring_index, + sizeof(efx->rx_indir_table)); efx_nic_push_rx_indir_table(efx); return 0; } @@ -1117,8 +1019,7 @@ const struct ethtool_ops efx_ethtool_ops = { .set_wol = efx_ethtool_set_wol, .reset = efx_ethtool_reset, .get_rxnfc = efx_ethtool_get_rxnfc, - .set_rxnfc = efx_ethtool_set_rxnfc, - .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size, + .set_rx_ntuple = efx_ethtool_set_rx_ntuple, .get_rxfh_indir = efx_ethtool_get_rxfh_indir, .set_rxfh_indir = efx_ethtool_set_rxfh_indir, }; diff --git a/trunk/drivers/net/ethernet/sfc/falcon.c b/trunk/drivers/net/ethernet/sfc/falcon.c index 8ae1ebd35397..97b606b92e88 100644 --- a/trunk/drivers/net/ethernet/sfc/falcon.c +++ b/trunk/drivers/net/ethernet/sfc/falcon.c @@ -610,7 +610,7 @@ static void falcon_stats_complete(struct efx_nic *efx) if (!nic_data->stats_pending) return; - nic_data->stats_pending = false; + nic_data->stats_pending = 0; if (*nic_data->stats_dma_done == FALCON_STATS_DONE) { rmb(); /* read the done flag before the stats */ efx->mac_op->update_stats(efx); diff --git a/trunk/drivers/net/ethernet/sfc/filter.c b/trunk/drivers/net/ethernet/sfc/filter.c index 1fbbbee7b1ae..2b9636f96e05 100644 --- a/trunk/drivers/net/ethernet/sfc/filter.c +++ b/trunk/drivers/net/ethernet/sfc/filter.c @@ -155,16 +155,6 @@ static inline void __efx_filter_set_ipv4(struct efx_filter_spec *spec, spec->data[2] = ntohl(host2); } -static inline void __efx_filter_get_ipv4(const struct efx_filter_spec *spec, - __be32 *host1, __be16 *port1, - __be32 *host2, __be16 *port2) -{ - *host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16); - *port1 = htons(spec->data[0]); - *host2 = htonl(spec->data[2]); - *port2 = htons(spec->data[1] >> 16); -} - /** * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port * @spec: Specification to initialise @@ -215,26 +205,6 @@ int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto, return 0; } -int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec, - u8 *proto, __be32 *host, __be16 *port) -{ - __be32 host1; - __be16 port1; - - switch (spec->type) { - case EFX_FILTER_TCP_WILD: - *proto = IPPROTO_TCP; - __efx_filter_get_ipv4(spec, &host1, &port1, host, port); - return 0; - case EFX_FILTER_UDP_WILD: - *proto = IPPROTO_UDP; - __efx_filter_get_ipv4(spec, &host1, port, host, &port1); - return 0; - default: - return -EINVAL; - } -} - /** * efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports * @spec: Specification to initialise @@ -272,25 +242,6 @@ int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto, return 0; } -int efx_filter_get_ipv4_full(const struct efx_filter_spec *spec, - u8 *proto, __be32 *host, __be16 *port, - __be32 *rhost, __be16 *rport) -{ - switch (spec->type) { - case EFX_FILTER_TCP_FULL: - *proto = IPPROTO_TCP; - break; - case EFX_FILTER_UDP_FULL: - *proto = IPPROTO_UDP; - break; - default: - return -EINVAL; - } - - __efx_filter_get_ipv4(spec, rhost, rport, host, port); - return 0; -} - /** * efx_filter_set_eth_local - specify local Ethernet address and optional VID * @spec: Specification to initialise @@ -319,29 +270,6 @@ int efx_filter_set_eth_local(struct efx_filter_spec *spec, return 0; } -int efx_filter_get_eth_local(const struct efx_filter_spec *spec, - u16 *vid, u8 *addr) -{ - switch (spec->type) { - case EFX_FILTER_MAC_WILD: - *vid = EFX_FILTER_VID_UNSPEC; - break; - case EFX_FILTER_MAC_FULL: - *vid = spec->data[0]; - break; - default: - return -EINVAL; - } - - addr[0] = spec->data[2] >> 8; - addr[1] = spec->data[2]; - addr[2] = spec->data[1] >> 24; - addr[3] = spec->data[1] >> 16; - addr[4] = spec->data[1] >> 8; - addr[5] = spec->data[1]; - return 0; -} - /* Build a filter entry and return its n-tuple key. */ static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec) { @@ -404,7 +332,7 @@ static bool efx_filter_equal(const struct efx_filter_spec *left, static int efx_filter_search(struct efx_filter_table *table, struct efx_filter_spec *spec, u32 key, - bool for_insert, unsigned int *depth_required) + bool for_insert, int *depth_required) { unsigned hash, incr, filter_idx, depth, depth_max; @@ -438,59 +366,12 @@ static int efx_filter_search(struct efx_filter_table *table, } } -/* - * Construct/deconstruct external filter IDs. These must be ordered - * by matching priority, for RX NFC semantics. - * - * Each RX MAC filter entry has a flag for whether it can override an - * RX IP filter that also matches. So we assign locations for MAC - * filters with overriding behaviour, then for IP filters, then for - * MAC filters without overriding behaviour. - */ - -#define EFX_FILTER_INDEX_WIDTH 13 -#define EFX_FILTER_INDEX_MASK ((1 << EFX_FILTER_INDEX_WIDTH) - 1) - -static inline u32 efx_filter_make_id(enum efx_filter_table_id table_id, - unsigned int index, u8 flags) -{ - return (table_id == EFX_FILTER_TABLE_RX_MAC && - flags & EFX_FILTER_FLAG_RX_OVERRIDE_IP) ? - index : - (table_id + 1) << EFX_FILTER_INDEX_WIDTH | index; -} - -static inline enum efx_filter_table_id efx_filter_id_table_id(u32 id) -{ - return (id <= EFX_FILTER_INDEX_MASK) ? - EFX_FILTER_TABLE_RX_MAC : - (id >> EFX_FILTER_INDEX_WIDTH) - 1; -} - -static inline unsigned int efx_filter_id_index(u32 id) -{ - return id & EFX_FILTER_INDEX_MASK; -} +/* Construct/deconstruct external filter IDs */ -static inline u8 efx_filter_id_flags(u32 id) +static inline int +efx_filter_make_id(enum efx_filter_table_id table_id, unsigned index) { - return (id <= EFX_FILTER_INDEX_MASK) ? - EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_OVERRIDE_IP : - EFX_FILTER_FLAG_RX; -} - -u32 efx_filter_get_rx_id_limit(struct efx_nic *efx) -{ - struct efx_filter_state *state = efx->filter_state; - - if (state->table[EFX_FILTER_TABLE_RX_MAC].size != 0) - return ((EFX_FILTER_TABLE_RX_MAC + 1) << EFX_FILTER_INDEX_WIDTH) - + state->table[EFX_FILTER_TABLE_RX_MAC].size; - else if (state->table[EFX_FILTER_TABLE_RX_IP].size != 0) - return ((EFX_FILTER_TABLE_RX_IP + 1) << EFX_FILTER_INDEX_WIDTH) - + state->table[EFX_FILTER_TABLE_RX_IP].size; - else - return 0; + return table_id << 16 | index; } /** @@ -503,14 +384,14 @@ u32 efx_filter_get_rx_id_limit(struct efx_nic *efx) * On success, return the filter ID. * On failure, return a negative error code. */ -s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, +int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, bool replace) { struct efx_filter_state *state = efx->filter_state; struct efx_filter_table *table = efx_filter_spec_table(state, spec); struct efx_filter_spec *saved_spec; efx_oword_t filter; - unsigned int filter_idx, depth; + int filter_idx, depth; u32 key; int rc; @@ -558,7 +439,7 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, netif_vdbg(efx, hw, efx->net_dev, "%s: filter type %d index %d rxq %u set", __func__, spec->type, filter_idx, spec->dmaq_id); - rc = efx_filter_make_id(table->id, filter_idx, spec->flags); + rc = efx_filter_make_id(table->id, filter_idx); out: spin_unlock_bh(&state->lock); @@ -567,7 +448,7 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, static void efx_filter_table_clear_entry(struct efx_nic *efx, struct efx_filter_table *table, - unsigned int filter_idx) + int filter_idx) { static efx_oword_t filter; @@ -582,101 +463,48 @@ static void efx_filter_table_clear_entry(struct efx_nic *efx, } /** - * efx_filter_remove_id_safe - remove a filter by ID, carefully + * efx_filter_remove_filter - remove a filter by specification * @efx: NIC from which to remove the filter - * @priority: Priority of filter, as passed to @efx_filter_insert_filter - * @filter_id: ID of filter, as returned by @efx_filter_insert_filter + * @spec: Specification for the filter * - * This function will range-check @filter_id, so it is safe to call - * with a value passed from userland. + * On success, return zero. + * On failure, return a negative error code. */ -int efx_filter_remove_id_safe(struct efx_nic *efx, - enum efx_filter_priority priority, - u32 filter_id) +int efx_filter_remove_filter(struct efx_nic *efx, struct efx_filter_spec *spec) { struct efx_filter_state *state = efx->filter_state; - enum efx_filter_table_id table_id; - struct efx_filter_table *table; - unsigned int filter_idx; - struct efx_filter_spec *spec; - u8 filter_flags; + struct efx_filter_table *table = efx_filter_spec_table(state, spec); + struct efx_filter_spec *saved_spec; + efx_oword_t filter; + int filter_idx, depth; + u32 key; int rc; - table_id = efx_filter_id_table_id(filter_id); - if ((unsigned int)table_id >= EFX_FILTER_TABLE_COUNT) - return -ENOENT; - table = &state->table[table_id]; - - filter_idx = efx_filter_id_index(filter_id); - if (filter_idx >= table->size) - return -ENOENT; - spec = &table->spec[filter_idx]; + if (!table) + return -EINVAL; - filter_flags = efx_filter_id_flags(filter_id); + key = efx_filter_build(&filter, spec); spin_lock_bh(&state->lock); - if (test_bit(filter_idx, table->used_bitmap) && - spec->priority == priority && spec->flags == filter_flags) { - efx_filter_table_clear_entry(efx, table, filter_idx); - if (table->used == 0) - efx_filter_table_reset_search_depth(table); - rc = 0; - } else { - rc = -ENOENT; - } - - spin_unlock_bh(&state->lock); - - return rc; -} - -/** - * efx_filter_get_filter_safe - retrieve a filter by ID, carefully - * @efx: NIC from which to remove the filter - * @priority: Priority of filter, as passed to @efx_filter_insert_filter - * @filter_id: ID of filter, as returned by @efx_filter_insert_filter - * @spec: Buffer in which to store filter specification - * - * This function will range-check @filter_id, so it is safe to call - * with a value passed from userland. - */ -int efx_filter_get_filter_safe(struct efx_nic *efx, - enum efx_filter_priority priority, - u32 filter_id, struct efx_filter_spec *spec_buf) -{ - struct efx_filter_state *state = efx->filter_state; - enum efx_filter_table_id table_id; - struct efx_filter_table *table; - struct efx_filter_spec *spec; - unsigned int filter_idx; - u8 filter_flags; - int rc; - - table_id = efx_filter_id_table_id(filter_id); - if ((unsigned int)table_id >= EFX_FILTER_TABLE_COUNT) - return -ENOENT; - table = &state->table[table_id]; - - filter_idx = efx_filter_id_index(filter_id); - if (filter_idx >= table->size) - return -ENOENT; - spec = &table->spec[filter_idx]; - - filter_flags = efx_filter_id_flags(filter_id); - - spin_lock_bh(&state->lock); + rc = efx_filter_search(table, spec, key, false, &depth); + if (rc < 0) + goto out; + filter_idx = rc; + saved_spec = &table->spec[filter_idx]; - if (test_bit(filter_idx, table->used_bitmap) && - spec->priority == priority && spec->flags == filter_flags) { - *spec_buf = *spec; - rc = 0; - } else { - rc = -ENOENT; + if (spec->priority < saved_spec->priority) { + rc = -EPERM; + goto out; } - spin_unlock_bh(&state->lock); + efx_filter_table_clear_entry(efx, table, filter_idx); + if (table->used == 0) + efx_filter_table_reset_search_depth(table); + rc = 0; +out: + spin_unlock_bh(&state->lock); return rc; } @@ -686,7 +514,7 @@ static void efx_filter_table_clear(struct efx_nic *efx, { struct efx_filter_state *state = efx->filter_state; struct efx_filter_table *table = &state->table[table_id]; - unsigned int filter_idx; + int filter_idx; spin_lock_bh(&state->lock); @@ -710,68 +538,6 @@ void efx_filter_clear_rx(struct efx_nic *efx, enum efx_filter_priority priority) efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC, priority); } -u32 efx_filter_count_rx_used(struct efx_nic *efx, - enum efx_filter_priority priority) -{ - struct efx_filter_state *state = efx->filter_state; - enum efx_filter_table_id table_id; - struct efx_filter_table *table; - unsigned int filter_idx; - u32 count = 0; - - spin_lock_bh(&state->lock); - - for (table_id = EFX_FILTER_TABLE_RX_IP; - table_id <= EFX_FILTER_TABLE_RX_MAC; - table_id++) { - table = &state->table[table_id]; - for (filter_idx = 0; filter_idx < table->size; filter_idx++) { - if (test_bit(filter_idx, table->used_bitmap) && - table->spec[filter_idx].priority == priority) - ++count; - } - } - - spin_unlock_bh(&state->lock); - - return count; -} - -s32 efx_filter_get_rx_ids(struct efx_nic *efx, - enum efx_filter_priority priority, - u32 *buf, u32 size) -{ - struct efx_filter_state *state = efx->filter_state; - enum efx_filter_table_id table_id; - struct efx_filter_table *table; - unsigned int filter_idx; - s32 count = 0; - - spin_lock_bh(&state->lock); - - for (table_id = EFX_FILTER_TABLE_RX_IP; - table_id <= EFX_FILTER_TABLE_RX_MAC; - table_id++) { - table = &state->table[table_id]; - for (filter_idx = 0; filter_idx < table->size; filter_idx++) { - if (test_bit(filter_idx, table->used_bitmap) && - table->spec[filter_idx].priority == priority) { - if (count == size) { - count = -EMSGSIZE; - goto out; - } - buf[count++] = efx_filter_make_id( - table_id, filter_idx, - table->spec[filter_idx].flags); - } - } - } -out: - spin_unlock_bh(&state->lock); - - return count; -} - /* Restore filter stater after reset */ void efx_restore_filters(struct efx_nic *efx) { @@ -779,7 +545,7 @@ void efx_restore_filters(struct efx_nic *efx) enum efx_filter_table_id table_id; struct efx_filter_table *table; efx_oword_t filter; - unsigned int filter_idx; + int filter_idx; spin_lock_bh(&state->lock); diff --git a/trunk/drivers/net/ethernet/sfc/filter.h b/trunk/drivers/net/ethernet/sfc/filter.h index 3d4108cd90ca..872f2132a496 100644 --- a/trunk/drivers/net/ethernet/sfc/filter.h +++ b/trunk/drivers/net/ethernet/sfc/filter.h @@ -78,11 +78,6 @@ enum efx_filter_flags { * * Use the efx_filter_set_*() functions to initialise the @type and * @data fields. - * - * The @priority field is used by software to determine whether a new - * filter may replace an old one. The hardware priority of a filter - * depends on the filter type and %EFX_FILTER_FLAG_RX_OVERRIDE_IP - * flag. */ struct efx_filter_spec { u8 type:4; @@ -105,18 +100,11 @@ static inline void efx_filter_init_rx(struct efx_filter_spec *spec, extern int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto, __be32 host, __be16 port); -extern int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec, - u8 *proto, __be32 *host, __be16 *port); extern int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto, __be32 host, __be16 port, __be32 rhost, __be16 rport); -extern int efx_filter_get_ipv4_full(const struct efx_filter_spec *spec, - u8 *proto, __be32 *host, __be16 *port, - __be32 *rhost, __be16 *rport); extern int efx_filter_set_eth_local(struct efx_filter_spec *spec, u16 vid, const u8 *addr); -extern int efx_filter_get_eth_local(const struct efx_filter_spec *spec, - u16 *vid, u8 *addr); enum { EFX_FILTER_VID_UNSPEC = 0xffff, }; diff --git a/trunk/drivers/net/ethernet/sfc/mtd.c b/trunk/drivers/net/ethernet/sfc/mtd.c index bc9dcd6b30d7..b6304486f244 100644 --- a/trunk/drivers/net/ethernet/sfc/mtd.c +++ b/trunk/drivers/net/ethernet/sfc/mtd.c @@ -496,7 +496,7 @@ static int siena_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len) rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type); if (rc) goto out; - part->mcdi.updating = true; + part->mcdi.updating = 1; } /* The MCDI interface can in fact do multiple erase blocks at once; @@ -528,7 +528,7 @@ static int siena_mtd_write(struct mtd_info *mtd, loff_t start, rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type); if (rc) goto out; - part->mcdi.updating = true; + part->mcdi.updating = 1; } while (offset < end) { @@ -553,7 +553,7 @@ static int siena_mtd_sync(struct mtd_info *mtd) int rc = 0; if (part->mcdi.updating) { - part->mcdi.updating = false; + part->mcdi.updating = 0; rc = efx_mcdi_nvram_update_finish(efx, part->mcdi.nvram_type); } diff --git a/trunk/drivers/net/ethernet/sfc/net_driver.h b/trunk/drivers/net/ethernet/sfc/net_driver.h index c49502bab6a3..b8e251a1ee48 100644 --- a/trunk/drivers/net/ethernet/sfc/net_driver.h +++ b/trunk/drivers/net/ethernet/sfc/net_driver.h @@ -908,7 +908,7 @@ struct efx_nic_type { unsigned int phys_addr_channels; unsigned int tx_dc_base; unsigned int rx_dc_base; - netdev_features_t offload_features; + u32 offload_features; }; /************************************************************************** diff --git a/trunk/drivers/net/ethernet/sfc/rx.c b/trunk/drivers/net/ethernet/sfc/rx.c index aca349861767..752d521c09b1 100644 --- a/trunk/drivers/net/ethernet/sfc/rx.c +++ b/trunk/drivers/net/ethernet/sfc/rx.c @@ -479,8 +479,11 @@ static void efx_rx_packet_gro(struct efx_channel *channel, if (efx->net_dev->features & NETIF_F_RXHASH) skb->rxhash = efx_rx_buf_hash(eh); - skb_fill_page_desc(skb, 0, page, - efx_rx_buf_offset(efx, rx_buf), rx_buf->len); + skb_frag_set_page(skb, 0, page); + skb_shinfo(skb)->frags[0].page_offset = + efx_rx_buf_offset(efx, rx_buf); + skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx_buf->len); + skb_shinfo(skb)->nr_frags = 1; skb->len = rx_buf->len; skb->data_len = rx_buf->len; @@ -666,7 +669,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) rx_queue->ptr_mask); /* Allocate RX buffers */ - rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer), + rx_queue->buffer = kzalloc(entries * sizeof(*rx_queue->buffer), GFP_KERNEL); if (!rx_queue->buffer) return -ENOMEM; diff --git a/trunk/drivers/net/ethernet/sfc/selftest.c b/trunk/drivers/net/ethernet/sfc/selftest.c index 52edd24fcde3..822f6c2a6a7c 100644 --- a/trunk/drivers/net/ethernet/sfc/selftest.c +++ b/trunk/drivers/net/ethernet/sfc/selftest.c @@ -503,8 +503,8 @@ efx_test_loopback(struct efx_tx_queue *tx_queue, /* Determine how many packets to send */ state->packet_count = efx->txq_entries / 3; state->packet_count = min(1 << (i << 2), state->packet_count); - state->skbs = kcalloc(state->packet_count, - sizeof(state->skbs[0]), GFP_KERNEL); + state->skbs = kzalloc(sizeof(state->skbs[0]) * + state->packet_count, GFP_KERNEL); if (!state->skbs) return -ENOMEM; state->flush = false; diff --git a/trunk/drivers/net/ethernet/sfc/siena.c b/trunk/drivers/net/ethernet/sfc/siena.c index 4d5d619feaa6..cc2549cb7076 100644 --- a/trunk/drivers/net/ethernet/sfc/siena.c +++ b/trunk/drivers/net/ethernet/sfc/siena.c @@ -232,7 +232,7 @@ static int siena_probe_nvconfig(struct efx_nic *efx) static int siena_probe_nic(struct efx_nic *efx) { struct siena_nic_data *nic_data; - bool already_attached = false; + bool already_attached = 0; efx_oword_t reg; int rc; diff --git a/trunk/drivers/net/ethernet/sfc/tx.c b/trunk/drivers/net/ethernet/sfc/tx.c index 72f0fbc73b1a..df88c5430f95 100644 --- a/trunk/drivers/net/ethernet/sfc/tx.c +++ b/trunk/drivers/net/ethernet/sfc/tx.c @@ -31,9 +31,7 @@ #define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u) static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, - struct efx_tx_buffer *buffer, - unsigned int *pkts_compl, - unsigned int *bytes_compl) + struct efx_tx_buffer *buffer) { if (buffer->unmap_len) { struct pci_dev *pci_dev = tx_queue->efx->pci_dev; @@ -50,8 +48,6 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, } if (buffer->skb) { - (*pkts_compl)++; - (*bytes_compl) += buffer->skb->len; dev_kfree_skb_any((struct sk_buff *) buffer->skb); buffer->skb = NULL; netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, @@ -254,8 +250,6 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) buffer->skb = skb; buffer->continuation = false; - netdev_tx_sent_queue(tx_queue->core_txq, skb->len); - /* Pass off to hardware */ efx_nic_push_buffers(tx_queue); @@ -273,11 +267,10 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) unwind: /* Work backwards until we hit the original insert pointer value */ while (tx_queue->insert_count != tx_queue->write_count) { - unsigned int pkts_compl = 0, bytes_compl = 0; --tx_queue->insert_count; insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; buffer = &tx_queue->buffer[insert_ptr]; - efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); + efx_dequeue_buffer(tx_queue, buffer); buffer->len = 0; } @@ -300,9 +293,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) * specified index. */ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, - unsigned int index, - unsigned int *pkts_compl, - unsigned int *bytes_compl) + unsigned int index) { struct efx_nic *efx = tx_queue->efx; unsigned int stop_index, read_ptr; @@ -320,7 +311,7 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, return; } - efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl); + efx_dequeue_buffer(tx_queue, buffer); buffer->continuation = true; buffer->len = 0; @@ -431,12 +422,10 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) { unsigned fill_level; struct efx_nic *efx = tx_queue->efx; - unsigned int pkts_compl = 0, bytes_compl = 0; EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask); - efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl); - netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl); + efx_dequeue_buffers(tx_queue, index); /* See if we need to restart the netif queue. This barrier * separates the update of read_count from the test of the @@ -479,7 +468,7 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask); /* Allocate software ring */ - tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer), + tx_queue->buffer = kzalloc(entries * sizeof(*tx_queue->buffer), GFP_KERNEL); if (!tx_queue->buffer) return -ENOMEM; @@ -526,15 +515,13 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) /* Free any buffers left in the ring */ while (tx_queue->read_count != tx_queue->write_count) { - unsigned int pkts_compl = 0, bytes_compl = 0; buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; - efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); + efx_dequeue_buffer(tx_queue, buffer); buffer->continuation = true; buffer->len = 0; ++tx_queue->read_count; } - netdev_tx_reset_queue(tx_queue->core_txq); } void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) @@ -1173,8 +1160,6 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, goto mem_err; } - netdev_tx_sent_queue(tx_queue->core_txq, skb->len); - /* Pass off to hardware */ efx_nic_push_buffers(tx_queue); diff --git a/trunk/drivers/net/ethernet/sgi/meth.c b/trunk/drivers/net/ethernet/sgi/meth.c index 53efe7c7b1c0..60135aa55802 100644 --- a/trunk/drivers/net/ethernet/sgi/meth.c +++ b/trunk/drivers/net/ethernet/sgi/meth.c @@ -28,7 +28,6 @@ #include /* struct tcphdr */ #include #include /* MII definitions */ -#include #include #include @@ -58,20 +57,13 @@ static const char *meth_str="SGI O2 Fast Ethernet"; static int timeout = TX_TIMEOUT; module_param(timeout, int, 0); -/* - * Maximum number of multicast addresses to filter (vs. Rx-all-multicast). - * MACE Ethernet uses a 64 element hash table based on the Ethernet CRC. - */ -#define METH_MCF_LIMIT 32 - /* * This structure is private to each device. It is used to pass * packets in and out, so there is place for a packet */ struct meth_private { /* in-memory copy of MAC Control register */ - u64 mac_ctrl; - + unsigned long mac_ctrl; /* in-memory copy of DMA Control register */ unsigned long dma_ctrl; /* address of PHY, used by mdio_* functions, initialized in mdio_probe */ @@ -87,9 +79,6 @@ struct meth_private { struct sk_buff *rx_skbs[RX_RING_ENTRIES]; unsigned long rx_write; - /* Multicast filter. */ - u64 mcast_filter; - spinlock_t meth_lock; }; @@ -776,40 +765,6 @@ static int meth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) } } -static void meth_set_rx_mode(struct net_device *dev) -{ - struct meth_private *priv = netdev_priv(dev); - unsigned long flags; - - netif_stop_queue(dev); - spin_lock_irqsave(&priv->meth_lock, flags); - priv->mac_ctrl &= ~METH_PROMISC; - - if (dev->flags & IFF_PROMISC) { - priv->mac_ctrl |= METH_PROMISC; - priv->mcast_filter = 0xffffffffffffffffUL; - } else if ((netdev_mc_count(dev) > METH_MCF_LIMIT) || - (dev->flags & IFF_ALLMULTI)) { - priv->mac_ctrl |= METH_ACCEPT_AMCAST; - priv->mcast_filter = 0xffffffffffffffffUL; - } else { - struct netdev_hw_addr *ha; - priv->mac_ctrl |= METH_ACCEPT_MCAST; - - netdev_for_each_mc_addr(ha, dev) - set_bit((ether_crc(ETH_ALEN, ha->addr) >> 26), - (volatile unsigned long *)&priv->mcast_filter); - } - - /* Write the changes to the chip registers. */ - mace->eth.mac_ctrl = priv->mac_ctrl; - mace->eth.mcast_filter = priv->mcast_filter; - - /* Done! */ - spin_unlock_irqrestore(&priv->meth_lock, flags); - netif_wake_queue(dev); -} - static const struct net_device_ops meth_netdev_ops = { .ndo_open = meth_open, .ndo_stop = meth_release, @@ -819,7 +774,6 @@ static const struct net_device_ops meth_netdev_ops = { .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, - .ndo_set_rx_mode = meth_set_rx_mode, }; /* @@ -876,7 +830,24 @@ static struct platform_driver meth_driver = { } }; -module_platform_driver(meth_driver); +static int __init meth_init_module(void) +{ + int err; + + err = platform_driver_register(&meth_driver); + if (err) + printk(KERN_ERR "Driver registration failed\n"); + + return err; +} + +static void __exit meth_exit_module(void) +{ + platform_driver_unregister(&meth_driver); +} + +module_init(meth_init_module); +module_exit(meth_exit_module); MODULE_AUTHOR("Ilya Volynets "); MODULE_DESCRIPTION("SGI O2 Builtin Fast Ethernet driver"); diff --git a/trunk/drivers/net/ethernet/sis/sis190.c b/trunk/drivers/net/ethernet/sis/sis190.c index 5b118cd5bf94..1b4658c99391 100644 --- a/trunk/drivers/net/ethernet/sis/sis190.c +++ b/trunk/drivers/net/ethernet/sis/sis190.c @@ -47,6 +47,8 @@ #define sis190_rx_skb netif_rx #define sis190_rx_quota(count, quota) count +#define MAC_ADDR_LEN 6 + #define NUM_TX_DESC 64 /* [8..1024] */ #define NUM_RX_DESC 64 /* [8..8192] */ #define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc)) @@ -1599,7 +1601,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev, } /* Get MAC address from EEPROM */ - for (i = 0; i < ETH_ALEN / 2; i++) { + for (i = 0; i < MAC_ADDR_LEN / 2; i++) { u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i); ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(w); @@ -1651,7 +1653,7 @@ static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev, udelay(50); pci_read_config_byte(isa_bridge, 0x48, ®); - for (i = 0; i < ETH_ALEN; i++) { + for (i = 0; i < MAC_ADDR_LEN; i++) { outb(0x9 + i, 0x78); dev->dev_addr[i] = inb(0x79); } @@ -1690,7 +1692,7 @@ static inline void sis190_init_rxfilter(struct net_device *dev) */ SIS_W16(RxMacControl, ctl & ~0x0f00); - for (i = 0; i < ETH_ALEN; i++) + for (i = 0; i < MAC_ADDR_LEN; i++) SIS_W8(RxMacAddr + i, dev->dev_addr[i]); SIS_W16(RxMacControl, ctl); @@ -1758,10 +1760,9 @@ static void sis190_get_drvinfo(struct net_device *dev, { struct sis190_private *tp = netdev_priv(dev); - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, pci_name(tp->pci_dev), - sizeof(info->bus_info)); + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + strcpy(info->bus_info, pci_name(tp->pci_dev)); } static int sis190_get_regs_len(struct net_device *dev) diff --git a/trunk/drivers/net/ethernet/sis/sis900.c b/trunk/drivers/net/ethernet/sis/sis900.c index c8efc708c792..a184abc5ef11 100644 --- a/trunk/drivers/net/ethernet/sis/sis900.c +++ b/trunk/drivers/net/ethernet/sis/sis900.c @@ -1991,10 +1991,9 @@ static void sis900_get_drvinfo(struct net_device *net_dev, { struct sis900_private *sis_priv = netdev_priv(net_dev); - strlcpy(info->driver, SIS900_MODULE_NAME, sizeof(info->driver)); - strlcpy(info->version, SIS900_DRV_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, pci_name(sis_priv->pci_dev), - sizeof(info->bus_info)); + strcpy (info->driver, SIS900_MODULE_NAME); + strcpy (info->version, SIS900_DRV_VERSION); + strcpy (info->bus_info, pci_name(sis_priv->pci_dev)); } static u32 sis900_get_msglevel(struct net_device *net_dev) diff --git a/trunk/drivers/net/ethernet/smsc/epic100.c b/trunk/drivers/net/ethernet/smsc/epic100.c index 2c077ce0b6d6..0a5dfb814157 100644 --- a/trunk/drivers/net/ethernet/smsc/epic100.c +++ b/trunk/drivers/net/ethernet/smsc/epic100.c @@ -1414,9 +1414,9 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo * { struct epic_private *np = netdev_priv(dev); - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); + strcpy (info->driver, DRV_NAME); + strcpy (info->version, DRV_VERSION); + strcpy (info->bus_info, pci_name(np->pci_dev)); } static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) diff --git a/trunk/drivers/net/ethernet/smsc/smc911x.c b/trunk/drivers/net/ethernet/smsc/smc911x.c index 313ba3b32ab4..8f61fe9db1d0 100644 --- a/trunk/drivers/net/ethernet/smsc/smc911x.c +++ b/trunk/drivers/net/ethernet/smsc/smc911x.c @@ -2196,4 +2196,15 @@ static struct platform_driver smc911x_driver = { }, }; -module_platform_driver(smc911x_driver); +static int __init smc911x_init(void) +{ + return platform_driver_register(&smc911x_driver); +} + +static void __exit smc911x_cleanup(void) +{ + platform_driver_unregister(&smc911x_driver); +} + +module_init(smc911x_init); +module_exit(smc911x_cleanup); diff --git a/trunk/drivers/net/ethernet/smsc/smc91c92_cs.c b/trunk/drivers/net/ethernet/smsc/smc91c92_cs.c index ada927aba7a5..cbfa98187131 100644 --- a/trunk/drivers/net/ethernet/smsc/smc91c92_cs.c +++ b/trunk/drivers/net/ethernet/smsc/smc91c92_cs.c @@ -1909,8 +1909,8 @@ static int check_if_running(struct net_device *dev) static void smc_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); } static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) diff --git a/trunk/drivers/net/ethernet/smsc/smc91x.c b/trunk/drivers/net/ethernet/smsc/smc91x.c index 64ad3ed74495..f47f81e25322 100644 --- a/trunk/drivers/net/ethernet/smsc/smc91x.c +++ b/trunk/drivers/net/ethernet/smsc/smc91x.c @@ -2417,4 +2417,15 @@ static struct platform_driver smc_driver = { }, }; -module_platform_driver(smc_driver); +static int __init smc_init(void) +{ + return platform_driver_register(&smc_driver); +} + +static void __exit smc_cleanup(void) +{ + platform_driver_unregister(&smc_driver); +} + +module_init(smc_init); +module_exit(smc_cleanup); diff --git a/trunk/drivers/net/ethernet/smsc/smsc911x.c b/trunk/drivers/net/ethernet/smsc/smsc911x.c index 9d0b8ced0234..8843071fe987 100644 --- a/trunk/drivers/net/ethernet/smsc/smsc911x.c +++ b/trunk/drivers/net/ethernet/smsc/smsc911x.c @@ -44,7 +44,6 @@ #include #include #include -#include #include #include #include @@ -89,8 +88,6 @@ struct smsc911x_ops { unsigned int *buf, unsigned int wordcount); }; -#define SMSC911X_NUM_SUPPLIES 2 - struct smsc911x_data { void __iomem *ioaddr; @@ -141,9 +138,6 @@ struct smsc911x_data { /* register access functions */ const struct smsc911x_ops *ops; - - /* regulators */ - struct regulator_bulk_data supplies[SMSC911X_NUM_SUPPLIES]; }; /* Easy access to information */ @@ -368,76 +362,6 @@ smsc911x_rx_readfifo_shift(struct smsc911x_data *pdata, unsigned int *buf, spin_unlock_irqrestore(&pdata->dev_lock, flags); } -/* - * enable resources, currently just regulators. - */ -static int smsc911x_enable_resources(struct platform_device *pdev) -{ - struct net_device *ndev = platform_get_drvdata(pdev); - struct smsc911x_data *pdata = netdev_priv(ndev); - int ret = 0; - - ret = regulator_bulk_enable(ARRAY_SIZE(pdata->supplies), - pdata->supplies); - if (ret) - netdev_err(ndev, "failed to enable regulators %d\n", - ret); - return ret; -} - -/* - * disable resources, currently just regulators. - */ -static int smsc911x_disable_resources(struct platform_device *pdev) -{ - struct net_device *ndev = platform_get_drvdata(pdev); - struct smsc911x_data *pdata = netdev_priv(ndev); - int ret = 0; - - ret = regulator_bulk_disable(ARRAY_SIZE(pdata->supplies), - pdata->supplies); - return ret; -} - -/* - * Request resources, currently just regulators. - * - * The SMSC911x has two power pins: vddvario and vdd33a, in designs where - * these are not always-on we need to request regulators to be turned on - * before we can try to access the device registers. - */ -static int smsc911x_request_resources(struct platform_device *pdev) -{ - struct net_device *ndev = platform_get_drvdata(pdev); - struct smsc911x_data *pdata = netdev_priv(ndev); - int ret = 0; - - /* Request regulators */ - pdata->supplies[0].supply = "vdd33a"; - pdata->supplies[1].supply = "vddvario"; - ret = regulator_bulk_get(&pdev->dev, - ARRAY_SIZE(pdata->supplies), - pdata->supplies); - if (ret) - netdev_err(ndev, "couldn't get regulators %d\n", - ret); - return ret; -} - -/* - * Free resources, currently just regulators. - * - */ -static void smsc911x_free_resources(struct platform_device *pdev) -{ - struct net_device *ndev = platform_get_drvdata(pdev); - struct smsc911x_data *pdata = netdev_priv(ndev); - - /* Free regulators */ - regulator_bulk_free(ARRAY_SIZE(pdata->supplies), - pdata->supplies); -} - /* waits for MAC not busy, with timeout. Only called by smsc911x_mac_read * and smsc911x_mac_write, so assumes mac_lock is held */ static int smsc911x_mac_complete(struct smsc911x_data *pdata) @@ -1319,92 +1243,10 @@ static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata) spin_unlock(&pdata->mac_lock); } -static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata) -{ - int rc = 0; - - if (!pdata->phy_dev) - return rc; - - rc = phy_read(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS); - - if (rc < 0) { - SMSC_WARN(pdata, drv, "Failed reading PHY control reg"); - return rc; - } - - /* - * If energy is detected the PHY is already awake so is not necessary - * to disable the energy detect power-down mode. - */ - if ((rc & MII_LAN83C185_EDPWRDOWN) && - !(rc & MII_LAN83C185_ENERGYON)) { - /* Disable energy detect mode for this SMSC Transceivers */ - rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS, - rc & (~MII_LAN83C185_EDPWRDOWN)); - - if (rc < 0) { - SMSC_WARN(pdata, drv, "Failed writing PHY control reg"); - return rc; - } - - mdelay(1); - } - - return 0; -} - -static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata) -{ - int rc = 0; - - if (!pdata->phy_dev) - return rc; - - rc = phy_read(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS); - - if (rc < 0) { - SMSC_WARN(pdata, drv, "Failed reading PHY control reg"); - return rc; - } - - /* Only enable if energy detect mode is already disabled */ - if (!(rc & MII_LAN83C185_EDPWRDOWN)) { - mdelay(100); - /* Enable energy detect mode for this SMSC Transceivers */ - rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS, - rc | MII_LAN83C185_EDPWRDOWN); - - if (rc < 0) { - SMSC_WARN(pdata, drv, "Failed writing PHY control reg"); - return rc; - } - - mdelay(1); - } - return 0; -} - static int smsc911x_soft_reset(struct smsc911x_data *pdata) { unsigned int timeout; unsigned int temp; - int ret; - - /* - * LAN9210/LAN9211/LAN9220/LAN9221 chips have an internal PHY that - * are initialized in a Energy Detect Power-Down mode that prevents - * the MAC chip to be software reseted. So we have to wakeup the PHY - * before. - */ - if (pdata->generation == 4) { - ret = smsc911x_phy_disable_energy_detect(pdata); - - if (ret) { - SMSC_WARN(pdata, drv, "Failed to wakeup the PHY chip"); - return ret; - } - } /* Reset the LAN911x */ smsc911x_reg_write(pdata, HW_CFG, HW_CFG_SRST_); @@ -1418,16 +1260,6 @@ static int smsc911x_soft_reset(struct smsc911x_data *pdata) SMSC_WARN(pdata, drv, "Failed to complete reset"); return -EIO; } - - if (pdata->generation == 4) { - ret = smsc911x_phy_enable_energy_detect(pdata); - - if (ret) { - SMSC_WARN(pdata, drv, "Failed to wakeup the PHY chip"); - return ret; - } - } - return 0; } @@ -2260,9 +2092,6 @@ static int __devexit smsc911x_drv_remove(struct platform_device *pdev) iounmap(pdata->ioaddr); - (void)smsc911x_disable_resources(pdev); - smsc911x_free_resources(pdev); - free_netdev(dev); return 0; @@ -2389,20 +2218,10 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev) pdata->dev = dev; pdata->msg_enable = ((1 << debug) - 1); - platform_set_drvdata(pdev, dev); - - retval = smsc911x_request_resources(pdev); - if (retval) - goto out_return_resources; - - retval = smsc911x_enable_resources(pdev); - if (retval) - goto out_disable_resources; - if (pdata->ioaddr == NULL) { SMSC_WARN(pdata, probe, "Error smsc911x base address invalid"); retval = -ENOMEM; - goto out_disable_resources; + goto out_free_netdev_2; } retval = smsc911x_probe_config_dt(&pdata->config, np); @@ -2414,7 +2233,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev) if (retval) { SMSC_WARN(pdata, probe, "Error smsc911x config not found"); - goto out_disable_resources; + goto out_unmap_io_3; } /* assume standard, non-shifted, access to HW registers */ @@ -2425,7 +2244,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev) retval = smsc911x_init(dev); if (retval < 0) - goto out_disable_resources; + goto out_unmap_io_3; /* configure irq polarity and type before connecting isr */ if (pdata->config.irq_polarity == SMSC911X_IRQ_POLARITY_ACTIVE_HIGH) @@ -2445,13 +2264,15 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev) if (retval) { SMSC_WARN(pdata, probe, "Unable to claim requested irq: %d", dev->irq); - goto out_free_irq; + goto out_unmap_io_3; } + platform_set_drvdata(pdev, dev); + retval = register_netdev(dev); if (retval) { SMSC_WARN(pdata, probe, "Error %i registering device", retval); - goto out_free_irq; + goto out_unset_drvdata_4; } else { SMSC_TRACE(pdata, probe, "Network interface: \"%s\"", dev->name); @@ -2500,14 +2321,12 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev) out_unregister_netdev_5: unregister_netdev(dev); -out_free_irq: - free_irq(dev->irq, dev); -out_disable_resources: - (void)smsc911x_disable_resources(pdev); -out_return_resources: - smsc911x_free_resources(pdev); +out_unset_drvdata_4: platform_set_drvdata(pdev, NULL); + free_irq(dev->irq, dev); +out_unmap_io_3: iounmap(pdata->ioaddr); +out_free_netdev_2: free_netdev(dev); out_release_io_1: release_mem_region(res->start, resource_size(res)); diff --git a/trunk/drivers/net/ethernet/smsc/smsc911x.h b/trunk/drivers/net/ethernet/smsc/smsc911x.h index 938ecf290813..8d67aacf8867 100644 --- a/trunk/drivers/net/ethernet/smsc/smsc911x.h +++ b/trunk/drivers/net/ethernet/smsc/smsc911x.h @@ -401,8 +401,4 @@ #include #endif -#ifdef CONFIG_SMSC_PHY -#include -#endif - #endif /* __SMSC911X_H__ */ diff --git a/trunk/drivers/net/ethernet/smsc/smsc9420.c b/trunk/drivers/net/ethernet/smsc/smsc9420.c index a9efbdfe5302..edb24b0e337b 100644 --- a/trunk/drivers/net/ethernet/smsc/smsc9420.c +++ b/trunk/drivers/net/ethernet/smsc/smsc9420.c @@ -279,10 +279,9 @@ static void smsc9420_ethtool_get_drvinfo(struct net_device *netdev, { struct smsc9420_pdata *pd = netdev_priv(netdev); - strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); - strlcpy(drvinfo->bus_info, pci_name(pd->pdev), - sizeof(drvinfo->bus_info)); - strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); + strcpy(drvinfo->driver, DRV_NAME); + strcpy(drvinfo->bus_info, pci_name(pd->pdev)); + strcpy(drvinfo->version, DRV_VERSION); } static u32 smsc9420_ethtool_get_msglevel(struct net_device *netdev) diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/Kconfig b/trunk/drivers/net/ethernet/stmicro/stmmac/Kconfig index 036428348faa..22745d7bf530 100644 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/trunk/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -12,36 +12,11 @@ config STMMAC_ETH if STMMAC_ETH -config STMMAC_PLATFORM - tristate "STMMAC platform bus support" - depends on STMMAC_ETH - default y - ---help--- - This selects the platform specific bus support for - the stmmac device driver. This is the driver used - on many embedded STM platforms based on ARM and SuperH - processors. - If you have a controller with this interface, say Y or M here. - - If unsure, say N. - -config STMMAC_PCI - tristate "STMMAC support on PCI bus (EXPERIMENTAL)" - depends on STMMAC_ETH && PCI && EXPERIMENTAL - ---help--- - This is to select the Synopsys DWMAC available on PCI devices, - if you have a controller with this interface, say Y or M here. - - This PCI support is tested on XLINX XC2V3000 FF1152AMT0221 - D1215994A VIRTEX FPGA board. - - If unsure, say N. - config STMMAC_DEBUG_FS bool "Enable monitoring via sysFS " default n depends on STMMAC_ETH && DEBUG_FS - ---help--- + -- help The stmmac entry in /sys reports DMA TX/RX rings or (if supported) the HW cap register. diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/Makefile b/trunk/drivers/net/ethernet/stmicro/stmmac/Makefile index bc965ac9e025..d7c45164ea79 100644 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/trunk/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -2,8 +2,6 @@ obj-$(CONFIG_STMMAC_ETH) += stmmac.o stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o stmmac-$(CONFIG_STMMAC_RING) += ring_mode.o stmmac-$(CONFIG_STMMAC_CHAINED) += chain_mode.o -stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o -stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \ dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \ dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \ diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/common.h b/trunk/drivers/net/ethernet/stmicro/stmmac/common.h index d0b814ef0675..2cc119295821 100644 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/trunk/drivers/net/ethernet/stmicro/stmmac/common.h @@ -22,11 +22,7 @@ Author: Giuseppe Cavallaro *******************************************************************************/ -#include #include -#include -#include -#include #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) #define STMMAC_VLAN_TAG_USED #include @@ -319,8 +315,5 @@ extern void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6], unsigned int high, unsigned int low); extern void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, unsigned int high, unsigned int low); - -extern void stmmac_set_mac(void __iomem *ioaddr, bool enable); - extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); extern const struct stmmac_ring_mode_ops ring_mode_ops; diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/trunk/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c index f20aa12931d0..e25093510b0c 100644 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c +++ b/trunk/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c @@ -238,19 +238,6 @@ void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6], writel(data, ioaddr + low); } -/* Enable disable MAC RX/TX */ -void stmmac_set_mac(void __iomem *ioaddr, bool enable) -{ - u32 value = readl(ioaddr + MAC_CTRL_REG); - - if (enable) - value |= MAC_RNABLE_RX | MAC_ENABLE_TX; - else - value &= ~(MAC_ENABLE_TX | MAC_RNABLE_RX); - - writel(value, ioaddr + MAC_CTRL_REG); -} - void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, unsigned int high, unsigned int low) { diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 120740020e2c..a140a8fbf051 100644 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -20,8 +20,7 @@ Author: Giuseppe Cavallaro *******************************************************************************/ -#define STMMAC_RESOURCE_NAME "stmmaceth" -#define DRV_MODULE_VERSION "Dec_2011" +#define DRV_MODULE_VERSION "Oct_2011" #include #include #include "common.h" @@ -83,18 +82,8 @@ struct stmmac_priv { int hw_cap_support; }; -extern int phyaddr; - extern int stmmac_mdio_unregister(struct net_device *ndev); extern int stmmac_mdio_register(struct net_device *ndev); extern void stmmac_set_ethtool_ops(struct net_device *netdev); extern const struct stmmac_desc_ops enh_desc_ops; extern const struct stmmac_desc_ops ndesc_ops; - -int stmmac_freeze(struct net_device *ndev); -int stmmac_restore(struct net_device *ndev); -int stmmac_resume(struct net_device *ndev); -int stmmac_suspend(struct net_device *ndev); -int stmmac_dvr_remove(struct net_device *ndev); -struct stmmac_priv *stmmac_dvr_probe(struct device *device, - struct plat_stmmacenet_data *plat_dat); diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 9573303a706b..0395f9eba801 100644 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -185,10 +185,9 @@ static void stmmac_ethtool_getdrvinfo(struct net_device *dev, struct stmmac_priv *priv = netdev_priv(dev); if (priv->plat->has_gmac) - strlcpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver)); + strcpy(info->driver, GMAC_ETHTOOL_NAME); else - strlcpy(info->driver, MAC100_ETHTOOL_NAME, - sizeof(info->driver)); + strcpy(info->driver, MAC100_ETHTOOL_NAME); strcpy(info->version, DRV_MODULE_VERSION); info->fw_version[0] = '\0'; @@ -459,7 +458,7 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) return 0; } -static const struct ethtool_ops stmmac_ethtool_ops = { +static struct ethtool_ops stmmac_ethtool_ops = { .begin = stmmac_check_if_running, .get_drvinfo = stmmac_ethtool_getdrvinfo, .get_settings = stmmac_ethtool_getsettings, diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 3738b4700548..72cd190b9c1a 100644 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -28,8 +28,12 @@ https://bugzilla.stlinux.com/ *******************************************************************************/ +#include +#include #include #include +#include +#include #include #include #include @@ -48,6 +52,8 @@ #endif #include "stmmac.h" +#define STMMAC_RESOURCE_NAME "stmmaceth" + #undef STMMAC_DEBUG /*#define STMMAC_DEBUG*/ #ifdef STMMAC_DEBUG @@ -87,7 +93,7 @@ static int debug = -1; /* -1: default, 0: no output, 16: all */ module_param(debug, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Message Level (0: no output, 16: all)"); -int phyaddr = -1; +static int phyaddr = -1; module_param(phyaddr, int, S_IRUGO); MODULE_PARM_DESC(phyaddr, "Physical device address"); @@ -135,11 +141,6 @@ static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | static irqreturn_t stmmac_interrupt(int irq, void *dev_id); -#ifdef CONFIG_STMMAC_DEBUG_FS -static int stmmac_init_fs(struct net_device *dev); -static void stmmac_exit_fs(void); -#endif - /** * stmmac_verify_args - verify the driver parameters. * Description: it verifies if some wrong parameter is passed to the driver. @@ -344,6 +345,22 @@ static int stmmac_init_phy(struct net_device *dev) return 0; } +static inline void stmmac_enable_mac(void __iomem *ioaddr) +{ + u32 value = readl(ioaddr + MAC_CTRL_REG); + + value |= MAC_RNABLE_RX | MAC_ENABLE_TX; + writel(value, ioaddr + MAC_CTRL_REG); +} + +static inline void stmmac_disable_mac(void __iomem *ioaddr) +{ + u32 value = readl(ioaddr + MAC_CTRL_REG); + + value &= ~(MAC_ENABLE_TX | MAC_RNABLE_RX); + writel(value, ioaddr + MAC_CTRL_REG); +} + /** * display_ring * @p: pointer to the ring. @@ -869,53 +886,6 @@ static int stmmac_get_hw_features(struct stmmac_priv *priv) return hw_cap; } -/** - * stmmac_mac_device_setup - * @dev : device pointer - * Description: this is to attach the GMAC or MAC 10/100 - * main core structures that will be completed during the - * open step. - */ -static int stmmac_mac_device_setup(struct net_device *dev) -{ - struct stmmac_priv *priv = netdev_priv(dev); - - struct mac_device_info *device; - - if (priv->plat->has_gmac) - device = dwmac1000_setup(priv->ioaddr); - else - device = dwmac100_setup(priv->ioaddr); - - if (!device) - return -ENOMEM; - - priv->hw = device; - priv->hw->ring = &ring_mode_ops; - - if (device_can_wakeup(priv->device)) { - priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ - enable_irq_wake(priv->wol_irq); - } - - return 0; -} - -static void stmmac_check_ether_addr(struct stmmac_priv *priv) -{ - /* verify if the MAC address is valid, in case of failures it - * generates a random MAC address */ - if (!is_valid_ether_addr(priv->dev->dev_addr)) { - priv->hw->mac->get_umac_addr((void __iomem *) - priv->dev->base_addr, - priv->dev->dev_addr, 0); - if (!is_valid_ether_addr(priv->dev->dev_addr)) - random_ether_addr(priv->dev->dev_addr); - } - pr_warning("%s: device MAC address %pM\n", priv->dev->name, - priv->dev->dev_addr); -} - /** * stmmac_open - open entry point of the driver * @dev : pointer to the device structure. @@ -930,28 +900,18 @@ static int stmmac_open(struct net_device *dev) struct stmmac_priv *priv = netdev_priv(dev); int ret; - /* MAC HW device setup */ - ret = stmmac_mac_device_setup(dev); - if (ret < 0) - return ret; - - stmmac_check_ether_addr(priv); + /* Check that the MAC address is valid. If its not, refuse + * to bring the device up. The user must specify an + * address using the following linux command: + * ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx */ + if (!is_valid_ether_addr(dev->dev_addr)) { + random_ether_addr(dev->dev_addr); + pr_warning("%s: generated random MAC address %pM\n", dev->name, + dev->dev_addr); + } stmmac_verify_args(); - /* Override with kernel parameters if supplied XXX CRS XXX - * this needs to have multiple instances */ - if ((phyaddr >= 0) && (phyaddr <= 31)) - priv->plat->phy_addr = phyaddr; - - /* MDIO bus Registration */ - ret = stmmac_mdio_register(dev); - if (ret < 0) { - pr_debug("%s: MDIO bus (id: %d) registration failed", - __func__, priv->plat->bus_id); - return ret; - } - #ifdef CONFIG_STMMAC_TIMER priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL); if (unlikely(priv->tm == NULL)) { @@ -1048,7 +1008,7 @@ static int stmmac_open(struct net_device *dev) } /* Enable the MAC Rx/Tx */ - stmmac_set_mac(priv->ioaddr, true); + stmmac_enable_mac(priv->ioaddr); /* Set the HW DMA mode and the COE */ stmmac_dma_operation_mode(priv); @@ -1059,11 +1019,6 @@ static int stmmac_open(struct net_device *dev) stmmac_mmc_setup(priv); -#ifdef CONFIG_STMMAC_DEBUG_FS - ret = stmmac_init_fs(dev); - if (ret < 0) - pr_warning("\tFailed debugFS registration"); -#endif /* Start the ball rolling... */ DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name); priv->hw->dma->start_tx(priv->ioaddr); @@ -1136,15 +1091,10 @@ static int stmmac_release(struct net_device *dev) free_dma_desc_resources(priv); /* Disable the MAC Rx/Tx */ - stmmac_set_mac(priv->ioaddr, false); + stmmac_disable_mac(priv->ioaddr); netif_carrier_off(dev); -#ifdef CONFIG_STMMAC_DEBUG_FS - stmmac_exit_fs(); -#endif - stmmac_mdio_unregister(dev); - return 0; } @@ -1520,8 +1470,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu) return 0; } -static netdev_features_t stmmac_fix_features(struct net_device *dev, - netdev_features_t features) +static u32 stmmac_fix_features(struct net_device *dev, u32 features) { struct stmmac_priv *priv = netdev_priv(dev); @@ -1789,41 +1738,28 @@ static const struct net_device_ops stmmac_netdev_ops = { }; /** - * stmmac_dvr_probe - * @device: device pointer - * Description: this is the main probe function used to - * call the alloc_etherdev, allocate the priv structure. + * stmmac_probe - Initialization of the adapter . + * @dev : device pointer + * Description: The function initializes the network device structure for + * the STMMAC driver. It also calls the low level routines + * in order to init the HW (i.e. the DMA engine) */ -struct stmmac_priv *stmmac_dvr_probe(struct device *device, - struct plat_stmmacenet_data *plat_dat) +static int stmmac_probe(struct net_device *dev) { int ret = 0; - struct net_device *ndev = NULL; - struct stmmac_priv *priv; - - ndev = alloc_etherdev(sizeof(struct stmmac_priv)); - if (!ndev) { - pr_err("%s: ERROR: allocating the device\n", __func__); - return NULL; - } - - SET_NETDEV_DEV(ndev, device); - - priv = netdev_priv(ndev); - priv->device = device; - priv->dev = ndev; + struct stmmac_priv *priv = netdev_priv(dev); - ether_setup(ndev); + ether_setup(dev); - ndev->netdev_ops = &stmmac_netdev_ops; - stmmac_set_ethtool_ops(ndev); + dev->netdev_ops = &stmmac_netdev_ops; + stmmac_set_ethtool_ops(dev); - ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; - ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; - ndev->watchdog_timeo = msecs_to_jiffies(watchdog); + dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + dev->features |= dev->hw_features | NETIF_F_HIGHDMA; + dev->watchdog_timeo = msecs_to_jiffies(watchdog); #ifdef STMMAC_VLAN_TAG_USED /* Both mac100 and gmac support receive VLAN tag detection */ - ndev->features |= NETIF_F_HW_VLAN_RX; + dev->features |= NETIF_F_HW_VLAN_RX; #endif priv->msg_enable = netif_msg_init(debug, default_msg_level); @@ -1831,60 +1767,248 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device, priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ priv->pause = pause; - priv->plat = plat_dat; - netif_napi_add(ndev, &priv->napi, stmmac_poll, 64); + netif_napi_add(dev, &priv->napi, stmmac_poll, 64); + + /* Get the MAC address */ + priv->hw->mac->get_umac_addr((void __iomem *) dev->base_addr, + dev->dev_addr, 0); + + if (!is_valid_ether_addr(dev->dev_addr)) + pr_warning("\tno valid MAC address;" + "please, use ifconfig or nwhwconfig!\n"); spin_lock_init(&priv->lock); spin_lock_init(&priv->tx_lock); - ret = register_netdev(ndev); + ret = register_netdev(dev); if (ret) { pr_err("%s: ERROR %i registering the device\n", __func__, ret); - goto error; + return -ENODEV; } DBG(probe, DEBUG, "%s: Scatter/Gather: %s - HW checksums: %s\n", - ndev->name, (ndev->features & NETIF_F_SG) ? "on" : "off", - (ndev->features & NETIF_F_IP_CSUM) ? "on" : "off"); + dev->name, (dev->features & NETIF_F_SG) ? "on" : "off", + (dev->features & NETIF_F_IP_CSUM) ? "on" : "off"); - return priv; + return ret; +} -error: - netif_napi_del(&priv->napi); +/** + * stmmac_mac_device_setup + * @dev : device pointer + * Description: select and initialise the mac device (mac100 or Gmac). + */ +static int stmmac_mac_device_setup(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + struct mac_device_info *device; + + if (priv->plat->has_gmac) { + dev->priv_flags |= IFF_UNICAST_FLT; + device = dwmac1000_setup(priv->ioaddr); + } else { + device = dwmac100_setup(priv->ioaddr); + } + if (!device) + return -ENOMEM; + + priv->hw = device; + priv->hw->ring = &ring_mode_ops; + + if (device_can_wakeup(priv->device)) { + priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ + enable_irq_wake(priv->wol_irq); + } + + return 0; +} + +/** + * stmmac_dvr_probe + * @pdev: platform device pointer + * Description: the driver is initialized through platform_device. + */ +static int stmmac_dvr_probe(struct platform_device *pdev) +{ + int ret = 0; + struct resource *res; + void __iomem *addr = NULL; + struct net_device *ndev = NULL; + struct stmmac_priv *priv = NULL; + struct plat_stmmacenet_data *plat_dat; + + pr_info("STMMAC driver:\n\tplatform registration... "); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + pr_info("\tdone!\n"); + + if (!request_mem_region(res->start, resource_size(res), + pdev->name)) { + pr_err("%s: ERROR: memory allocation failed" + "cannot get the I/O addr 0x%x\n", + __func__, (unsigned int)res->start); + return -EBUSY; + } + + addr = ioremap(res->start, resource_size(res)); + if (!addr) { + pr_err("%s: ERROR: memory mapping failed\n", __func__); + ret = -ENOMEM; + goto out_release_region; + } + + ndev = alloc_etherdev(sizeof(struct stmmac_priv)); + if (!ndev) { + pr_err("%s: ERROR: allocating the device\n", __func__); + ret = -ENOMEM; + goto out_unmap; + } + + SET_NETDEV_DEV(ndev, &pdev->dev); + + /* Get the MAC information */ + ndev->irq = platform_get_irq_byname(pdev, "macirq"); + if (ndev->irq == -ENXIO) { + pr_err("%s: ERROR: MAC IRQ configuration " + "information not found\n", __func__); + ret = -ENXIO; + goto out_free_ndev; + } + + priv = netdev_priv(ndev); + priv->device = &(pdev->dev); + priv->dev = ndev; + plat_dat = pdev->dev.platform_data; + + priv->plat = plat_dat; + + priv->ioaddr = addr; + + /* + * On some platforms e.g. SPEAr the wake up irq differs from the mac irq + * The external wake up irq can be passed through the platform code + * named as "eth_wake_irq" + * + * In case the wake up interrupt is not passed from the platform + * so the driver will continue to use the mac irq (ndev->irq) + */ + priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); + if (priv->wol_irq == -ENXIO) + priv->wol_irq = ndev->irq; + + platform_set_drvdata(pdev, ndev); + + /* Set the I/O base addr */ + ndev->base_addr = (unsigned long)addr; + + /* Custom initialisation */ + if (priv->plat->init) { + ret = priv->plat->init(pdev); + if (unlikely(ret)) + goto out_free_ndev; + } + + /* MAC HW device detection */ + ret = stmmac_mac_device_setup(ndev); + if (ret < 0) + goto out_plat_exit; + + /* Network Device Registration */ + ret = stmmac_probe(ndev); + if (ret < 0) + goto out_plat_exit; + + /* Override with kernel parameters if supplied XXX CRS XXX + * this needs to have multiple instances */ + if ((phyaddr >= 0) && (phyaddr <= 31)) + priv->plat->phy_addr = phyaddr; + + pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n" + "\tIO base addr: 0x%p)\n", ndev->name, pdev->name, + pdev->id, ndev->irq, addr); + + /* MDIO bus Registration */ + pr_debug("\tMDIO bus (id: %d)...", priv->plat->bus_id); + ret = stmmac_mdio_register(ndev); + if (ret < 0) + goto out_unregister; + pr_debug("registered!\n"); + +#ifdef CONFIG_STMMAC_DEBUG_FS + ret = stmmac_init_fs(ndev); + if (ret < 0) + pr_warning("\tFailed debugFS registration"); +#endif + + return 0; + +out_unregister: unregister_netdev(ndev); +out_plat_exit: + if (priv->plat->exit) + priv->plat->exit(pdev); +out_free_ndev: free_netdev(ndev); + platform_set_drvdata(pdev, NULL); +out_unmap: + iounmap(addr); +out_release_region: + release_mem_region(res->start, resource_size(res)); - return NULL; + return ret; } /** * stmmac_dvr_remove - * @ndev: net device pointer + * @pdev: platform device pointer * Description: this function resets the TX/RX processes, disables the MAC RX/TX - * changes the link status, releases the DMA descriptor rings. + * changes the link status, releases the DMA descriptor rings, + * unregisters the MDIO bus and unmaps the allocated memory. */ -int stmmac_dvr_remove(struct net_device *ndev) +static int stmmac_dvr_remove(struct platform_device *pdev) { + struct net_device *ndev = platform_get_drvdata(pdev); struct stmmac_priv *priv = netdev_priv(ndev); + struct resource *res; pr_info("%s:\n\tremoving driver", __func__); priv->hw->dma->stop_rx(priv->ioaddr); priv->hw->dma->stop_tx(priv->ioaddr); - stmmac_set_mac(priv->ioaddr, false); + stmmac_disable_mac(priv->ioaddr); + netif_carrier_off(ndev); + + stmmac_mdio_unregister(ndev); + + if (priv->plat->exit) + priv->plat->exit(pdev); + + platform_set_drvdata(pdev, NULL); unregister_netdev(ndev); + + iounmap((void *)priv->ioaddr); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(res->start, resource_size(res)); + +#ifdef CONFIG_STMMAC_DEBUG_FS + stmmac_exit_fs(); +#endif + free_netdev(ndev); return 0; } #ifdef CONFIG_PM -int stmmac_suspend(struct net_device *ndev) +static int stmmac_suspend(struct device *dev) { + struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); int dis_ic = 0; @@ -1918,14 +2042,15 @@ int stmmac_suspend(struct net_device *ndev) if (device_may_wakeup(priv->device)) priv->hw->mac->pmt(priv->ioaddr, priv->wolopts); else - stmmac_set_mac(priv->ioaddr, false); + stmmac_disable_mac(priv->ioaddr); spin_unlock(&priv->lock); return 0; } -int stmmac_resume(struct net_device *ndev) +static int stmmac_resume(struct device *dev) { + struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); if (!netif_running(ndev)) @@ -1944,7 +2069,7 @@ int stmmac_resume(struct net_device *ndev) netif_device_attach(ndev); /* Enable the MAC and DMA */ - stmmac_set_mac(priv->ioaddr, true); + stmmac_enable_mac(priv->ioaddr); priv->hw->dma->start_tx(priv->ioaddr); priv->hw->dma->start_rx(priv->ioaddr); @@ -1964,23 +2089,68 @@ int stmmac_resume(struct net_device *ndev) return 0; } -int stmmac_freeze(struct net_device *ndev) +static int stmmac_freeze(struct device *dev) { + struct net_device *ndev = dev_get_drvdata(dev); + if (!ndev || !netif_running(ndev)) return 0; return stmmac_release(ndev); } -int stmmac_restore(struct net_device *ndev) +static int stmmac_restore(struct device *dev) { + struct net_device *ndev = dev_get_drvdata(dev); + if (!ndev || !netif_running(ndev)) return 0; return stmmac_open(ndev); } + +static const struct dev_pm_ops stmmac_pm_ops = { + .suspend = stmmac_suspend, + .resume = stmmac_resume, + .freeze = stmmac_freeze, + .thaw = stmmac_restore, + .restore = stmmac_restore, +}; +#else +static const struct dev_pm_ops stmmac_pm_ops; #endif /* CONFIG_PM */ +static struct platform_driver stmmac_driver = { + .probe = stmmac_dvr_probe, + .remove = stmmac_dvr_remove, + .driver = { + .name = STMMAC_RESOURCE_NAME, + .owner = THIS_MODULE, + .pm = &stmmac_pm_ops, + }, +}; + +/** + * stmmac_init_module - Entry point for the driver + * Description: This function is the entry point for the driver. + */ +static int __init stmmac_init_module(void) +{ + int ret; + + ret = platform_driver_register(&stmmac_driver); + return ret; +} + +/** + * stmmac_cleanup_module - Cleanup routine for the driver + * Description: This function is the cleanup routine for the driver. + */ +static void __exit stmmac_cleanup_module(void) +{ + platform_driver_unregister(&stmmac_driver); +} + #ifndef MODULE static int __init stmmac_cmdline_opt(char *str) { @@ -2040,6 +2210,9 @@ static int __init stmmac_cmdline_opt(char *str) __setup("stmmaceth=", stmmac_cmdline_opt); #endif -MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver"); +module_init(stmmac_init_module); +module_exit(stmmac_cleanup_module); + +MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet driver"); MODULE_AUTHOR("Giuseppe Cavallaro "); MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index 51f441233962..9c3b9d5c3411 100644 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -109,7 +109,6 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, */ static int stmmac_mdio_reset(struct mii_bus *bus) { -#if defined(CONFIG_STMMAC_PLATFORM) struct net_device *ndev = bus->priv; struct stmmac_priv *priv = netdev_priv(ndev); unsigned int mii_address = priv->hw->mii.addr; @@ -124,7 +123,7 @@ static int stmmac_mdio_reset(struct mii_bus *bus) * on MDC, so perform a dummy mdio read. */ writel(0, priv->ioaddr + mii_address); -#endif + return 0; } diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c deleted file mode 100644 index 54a819a36487..000000000000 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ /dev/null @@ -1,221 +0,0 @@ -/******************************************************************************* - This contains the functions to handle the pci driver. - - Copyright (C) 2011-2012 Vayavya Labs Pvt Ltd - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Author: Rayagond Kokatanur - Author: Giuseppe Cavallaro -*******************************************************************************/ - -#include -#include "stmmac.h" - -struct plat_stmmacenet_data plat_dat; -struct stmmac_mdio_bus_data mdio_data; - -static void stmmac_default_data(void) -{ - memset(&plat_dat, 0, sizeof(struct plat_stmmacenet_data)); - plat_dat.bus_id = 1; - plat_dat.phy_addr = 0; - plat_dat.interface = PHY_INTERFACE_MODE_GMII; - plat_dat.pbl = 32; - plat_dat.clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */ - plat_dat.has_gmac = 1; - plat_dat.force_sf_dma_mode = 1; - - mdio_data.bus_id = 1; - mdio_data.phy_reset = NULL; - mdio_data.phy_mask = 0; - plat_dat.mdio_bus_data = &mdio_data; -} - -/** - * stmmac_pci_probe - * - * @pdev: pci device pointer - * @id: pointer to table of device id/id's. - * - * Description: This probing function gets called for all PCI devices which - * match the ID table and are not "owned" by other driver yet. This function - * gets passed a "struct pci_dev *" for each device whose entry in the ID table - * matches the device. The probe functions returns zero when the driver choose - * to take "ownership" of the device or an error code(-ve no) otherwise. - */ -static int __devinit stmmac_pci_probe(struct pci_dev *pdev, - const struct pci_device_id *id) -{ - int ret = 0; - void __iomem *addr = NULL; - struct stmmac_priv *priv = NULL; - int i; - - /* Enable pci device */ - ret = pci_enable_device(pdev); - if (ret) { - pr_err("%s : ERROR: failed to enable %s device\n", __func__, - pci_name(pdev)); - return ret; - } - if (pci_request_regions(pdev, STMMAC_RESOURCE_NAME)) { - pr_err("%s: ERROR: failed to get PCI region\n", __func__); - ret = -ENODEV; - goto err_out_req_reg_failed; - } - - /* Get the base address of device */ - for (i = 0; i <= 5; i++) { - if (pci_resource_len(pdev, i) == 0) - continue; - addr = pci_iomap(pdev, i, 0); - if (addr == NULL) { - pr_err("%s: ERROR: cannot map regiser memory, aborting", - __func__); - ret = -EIO; - goto err_out_map_failed; - } - break; - } - pci_set_master(pdev); - - stmmac_default_data(); - - priv = stmmac_dvr_probe(&(pdev->dev), &plat_dat); - if (!priv) { - pr_err("%s: main drivr probe failed", __func__); - goto err_out; - } - priv->ioaddr = addr; - priv->dev->base_addr = (unsigned long)addr; - priv->dev->irq = pdev->irq; - priv->wol_irq = pdev->irq; - - pci_set_drvdata(pdev, priv->dev); - - pr_debug("STMMAC platform driver registration completed"); - - return 0; - -err_out: - pci_clear_master(pdev); -err_out_map_failed: - pci_release_regions(pdev); -err_out_req_reg_failed: - pci_disable_device(pdev); - - return ret; -} - -/** - * stmmac_dvr_remove - * - * @pdev: platform device pointer - * Description: this function calls the main to free the net resources - * and releases the PCI resources. - */ -static void __devexit stmmac_pci_remove(struct pci_dev *pdev) -{ - struct net_device *ndev = pci_get_drvdata(pdev); - struct stmmac_priv *priv = netdev_priv(ndev); - - stmmac_dvr_remove(ndev); - - pci_set_drvdata(pdev, NULL); - pci_iounmap(pdev, priv->ioaddr); - pci_release_regions(pdev); - pci_disable_device(pdev); -} - -#ifdef CONFIG_PM -static int stmmac_pci_suspend(struct pci_dev *pdev, pm_message_t state) -{ - struct net_device *ndev = pci_get_drvdata(pdev); - int ret; - - ret = stmmac_suspend(ndev); - pci_save_state(pdev); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); - - return ret; -} - -static int stmmac_pci_resume(struct pci_dev *pdev) -{ - struct net_device *ndev = pci_get_drvdata(pdev); - - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - - return stmmac_resume(ndev); -} -#endif - -#define STMMAC_VENDOR_ID 0x700 -#define STMMAC_DEVICE_ID 0x1108 - -static DEFINE_PCI_DEVICE_TABLE(stmmac_id_table) = { - { - PCI_DEVICE(STMMAC_VENDOR_ID, STMMAC_DEVICE_ID)}, { - } -}; - -MODULE_DEVICE_TABLE(pci, stmmac_id_table); - -static struct pci_driver stmmac_driver = { - .name = STMMAC_RESOURCE_NAME, - .id_table = stmmac_id_table, - .probe = stmmac_pci_probe, - .remove = __devexit_p(stmmac_pci_remove), -#ifdef CONFIG_PM - .suspend = stmmac_pci_suspend, - .resume = stmmac_pci_resume, -#endif -}; - -/** - * stmmac_init_module - Entry point for the driver - * Description: This function is the entry point for the driver. - */ -static int __init stmmac_init_module(void) -{ - int ret; - - ret = pci_register_driver(&stmmac_driver); - if (ret < 0) - pr_err("%s: ERROR: driver registration failed\n", __func__); - - return ret; -} - -/** - * stmmac_cleanup_module - Cleanup routine for the driver - * Description: This function is the cleanup routine for the driver. - */ -static void __exit stmmac_cleanup_module(void) -{ - pci_unregister_driver(&stmmac_driver); -} - -module_init(stmmac_init_module); -module_exit(stmmac_cleanup_module); - -MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver"); -MODULE_AUTHOR("Rayagond Kokatanur "); -MODULE_AUTHOR("Giuseppe Cavallaro "); -MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c deleted file mode 100644 index 7b1594f4944e..000000000000 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ /dev/null @@ -1,198 +0,0 @@ -/******************************************************************************* - This contains the functions to handle the platform driver. - - Copyright (C) 2007-2011 STMicroelectronics Ltd - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Author: Giuseppe Cavallaro -*******************************************************************************/ - -#include -#include -#include "stmmac.h" - -/** - * stmmac_pltfr_probe - * @pdev: platform device pointer - * Description: platform_device probe function. It allocates - * the necessary resources and invokes the main to init - * the net device, register the mdio bus etc. - */ -static int stmmac_pltfr_probe(struct platform_device *pdev) -{ - int ret = 0; - struct resource *res; - void __iomem *addr = NULL; - struct stmmac_priv *priv = NULL; - struct plat_stmmacenet_data *plat_dat; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) - return -ENODEV; - - if (!request_mem_region(res->start, resource_size(res), pdev->name)) { - pr_err("%s: ERROR: memory allocation failed" - "cannot get the I/O addr 0x%x\n", - __func__, (unsigned int)res->start); - return -EBUSY; - } - - addr = ioremap(res->start, resource_size(res)); - if (!addr) { - pr_err("%s: ERROR: memory mapping failed", __func__); - ret = -ENOMEM; - goto out_release_region; - } - plat_dat = pdev->dev.platform_data; - priv = stmmac_dvr_probe(&(pdev->dev), plat_dat); - if (!priv) { - pr_err("%s: main drivr probe failed", __func__); - goto out_release_region; - } - - priv->ioaddr = addr; - /* Set the I/O base addr */ - priv->dev->base_addr = (unsigned long)addr; - - /* Get the MAC information */ - priv->dev->irq = platform_get_irq_byname(pdev, "macirq"); - if (priv->dev->irq == -ENXIO) { - pr_err("%s: ERROR: MAC IRQ configuration " - "information not found\n", __func__); - ret = -ENXIO; - goto out_unmap; - } - - /* - * On some platforms e.g. SPEAr the wake up irq differs from the mac irq - * The external wake up irq can be passed through the platform code - * named as "eth_wake_irq" - * - * In case the wake up interrupt is not passed from the platform - * so the driver will continue to use the mac irq (ndev->irq) - */ - priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); - if (priv->wol_irq == -ENXIO) - priv->wol_irq = priv->dev->irq; - - platform_set_drvdata(pdev, priv->dev); - - /* Custom initialisation */ - if (priv->plat->init) { - ret = priv->plat->init(pdev); - if (unlikely(ret)) - goto out_unmap; - } - - pr_debug("STMMAC platform driver registration completed"); - - return 0; - -out_unmap: - iounmap(addr); - platform_set_drvdata(pdev, NULL); - -out_release_region: - release_mem_region(res->start, resource_size(res)); - - return ret; -} - -/** - * stmmac_pltfr_remove - * @pdev: platform device pointer - * Description: this function calls the main to free the net resources - * and calls the platforms hook and release the resources (e.g. mem). - */ -static int stmmac_pltfr_remove(struct platform_device *pdev) -{ - struct net_device *ndev = platform_get_drvdata(pdev); - struct stmmac_priv *priv = netdev_priv(ndev); - struct resource *res; - int ret = stmmac_dvr_remove(ndev); - - if (priv->plat->exit) - priv->plat->exit(pdev); - - if (priv->plat->exit) - priv->plat->exit(pdev); - - platform_set_drvdata(pdev, NULL); - - iounmap((void *)priv->ioaddr); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(res->start, resource_size(res)); - - return ret; -} - -#ifdef CONFIG_PM -static int stmmac_pltfr_suspend(struct device *dev) -{ - struct net_device *ndev = dev_get_drvdata(dev); - - return stmmac_suspend(ndev); -} - -static int stmmac_pltfr_resume(struct device *dev) -{ - struct net_device *ndev = dev_get_drvdata(dev); - - return stmmac_resume(ndev); -} - -int stmmac_pltfr_freeze(struct device *dev) -{ - struct net_device *ndev = dev_get_drvdata(dev); - - return stmmac_freeze(ndev); -} - -int stmmac_pltfr_restore(struct device *dev) -{ - struct net_device *ndev = dev_get_drvdata(dev); - - return stmmac_restore(ndev); -} - -static const struct dev_pm_ops stmmac_pltfr_pm_ops = { - .suspend = stmmac_pltfr_suspend, - .resume = stmmac_pltfr_resume, - .freeze = stmmac_pltfr_freeze, - .thaw = stmmac_pltfr_restore, - .restore = stmmac_pltfr_restore, -}; -#else -static const struct dev_pm_ops stmmac_pltfr_pm_ops; -#endif /* CONFIG_PM */ - -static struct platform_driver stmmac_driver = { - .probe = stmmac_pltfr_probe, - .remove = stmmac_pltfr_remove, - .driver = { - .name = STMMAC_RESOURCE_NAME, - .owner = THIS_MODULE, - .pm = &stmmac_pltfr_pm_ops, - }, -}; - -module_platform_driver(stmmac_driver); - -MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver"); -MODULE_AUTHOR("Giuseppe Cavallaro "); -MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/net/ethernet/sun/cassini.c b/trunk/drivers/net/ethernet/sun/cassini.c index f10665f594c4..fd40988c19a6 100644 --- a/trunk/drivers/net/ethernet/sun/cassini.c +++ b/trunk/drivers/net/ethernet/sun/cassini.c @@ -4532,9 +4532,10 @@ static void cas_set_multicast(struct net_device *dev) static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct cas *cp = netdev_priv(dev); - strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info)); + strncpy(info->driver, DRV_MODULE_NAME, ETHTOOL_BUSINFO_LEN); + strncpy(info->version, DRV_MODULE_VERSION, ETHTOOL_BUSINFO_LEN); + info->fw_version[0] = '\0'; + strncpy(info->bus_info, pci_name(cp->pdev), ETHTOOL_BUSINFO_LEN); info->regdump_len = cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len : CAS_MAX_REGS; info->n_stats = CAS_NUM_STAT_KEYS; diff --git a/trunk/drivers/net/ethernet/sun/niu.c b/trunk/drivers/net/ethernet/sun/niu.c index cf433931304f..73c708107a37 100644 --- a/trunk/drivers/net/ethernet/sun/niu.c +++ b/trunk/drivers/net/ethernet/sun/niu.c @@ -1151,8 +1151,19 @@ static int link_status_mii(struct niu *np, int *link_up_p) supported |= SUPPORTED_1000baseT_Full; lp->supported = supported; - advertising = mii_adv_to_ethtool_adv_t(advert); - advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000); + advertising = 0; + if (advert & ADVERTISE_10HALF) + advertising |= ADVERTISED_10baseT_Half; + if (advert & ADVERTISE_10FULL) + advertising |= ADVERTISED_10baseT_Full; + if (advert & ADVERTISE_100HALF) + advertising |= ADVERTISED_100baseT_Half; + if (advert & ADVERTISE_100FULL) + advertising |= ADVERTISED_100baseT_Full; + if (ctrl1000 & ADVERTISE_1000HALF) + advertising |= ADVERTISED_1000baseT_Half; + if (ctrl1000 & ADVERTISE_1000FULL) + advertising |= ADVERTISED_1000baseT_Full; if (bmcr & BMCR_ANENABLE) { int neg, neg1000; @@ -3598,7 +3609,6 @@ static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx) static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) { struct netdev_queue *txq; - unsigned int tx_bytes; u16 pkt_cnt, tmp; int cons, index; u64 cs; @@ -3621,18 +3631,12 @@ static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) netif_printk(np, tx_done, KERN_DEBUG, np->dev, "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons); - tx_bytes = 0; - tmp = pkt_cnt; - while (tmp--) { - tx_bytes += rp->tx_buffs[cons].skb->len; + while (pkt_cnt--) cons = release_tx_packet(np, rp, cons); - } rp->cons = cons; smp_mb(); - netdev_tx_completed_queue(txq, pkt_cnt, tx_bytes); - out: if (unlikely(netif_tx_queue_stopped(txq) && (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) { @@ -4333,7 +4337,6 @@ static void niu_free_channels(struct niu *np) struct tx_ring_info *rp = &np->tx_rings[i]; niu_free_tx_ring_info(np, rp); - netdev_tx_reset_queue(netdev_get_tx_queue(np->dev, i)); } kfree(np->tx_rings); np->tx_rings = NULL; @@ -6739,8 +6742,6 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb, prod = NEXT_TX(rp, prod); } - netdev_tx_sent_queue(txq, skb->len); - if (prod < rp->prod) rp->wrap_bit ^= TX_RING_KICK_WRAP; rp->prod = prod; @@ -6822,13 +6823,12 @@ static void niu_get_drvinfo(struct net_device *dev, struct niu *np = netdev_priv(dev); struct niu_vpd *vpd = &np->vpd; - strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); - snprintf(info->fw_version, sizeof(info->fw_version), "%d.%d", + strcpy(info->driver, DRV_MODULE_NAME); + strcpy(info->version, DRV_MODULE_VERSION); + sprintf(info->fw_version, "%d.%d", vpd->fcode_major, vpd->fcode_minor); if (np->parent->plat_type != PLAT_TYPE_NIU) - strlcpy(info->bus_info, pci_name(np->pdev), - sizeof(info->bus_info)); + strcpy(info->bus_info, pci_name(np->pdev)); } static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) @@ -8589,11 +8589,9 @@ static int __devinit phy_record(struct niu_parent *parent, if (dev_id_1 < 0 || dev_id_2 < 0) return 0; if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) { - /* Because of the NIU_PHY_ID_MASK being applied, the 8704 - * test covers the 8706 as well. - */ if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) && - ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011)) + ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011) && + ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8706)) return 0; } else { if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R) diff --git a/trunk/drivers/net/ethernet/sun/sunbmac.c b/trunk/drivers/net/ethernet/sun/sunbmac.c index 220f724c3377..0d8cfd9ea053 100644 --- a/trunk/drivers/net/ethernet/sun/sunbmac.c +++ b/trunk/drivers/net/ethernet/sun/sunbmac.c @@ -1293,4 +1293,15 @@ static struct platform_driver bigmac_sbus_driver = { .remove = __devexit_p(bigmac_sbus_remove), }; -module_platform_driver(bigmac_sbus_driver); +static int __init bigmac_init(void) +{ + return platform_driver_register(&bigmac_sbus_driver); +} + +static void __exit bigmac_exit(void) +{ + platform_driver_unregister(&bigmac_sbus_driver); +} + +module_init(bigmac_init); +module_exit(bigmac_exit); diff --git a/trunk/drivers/net/ethernet/sun/sungem.c b/trunk/drivers/net/ethernet/sun/sungem.c index 31441a870b0b..ceab215bb4a3 100644 --- a/trunk/drivers/net/ethernet/sun/sungem.c +++ b/trunk/drivers/net/ethernet/sun/sungem.c @@ -2517,9 +2517,9 @@ static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info { struct gem *gp = netdev_priv(dev); - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, pci_name(gp->pdev), sizeof(info->bus_info)); + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + strcpy(info->bus_info, pci_name(gp->pdev)); } static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) diff --git a/trunk/drivers/net/ethernet/sun/sunhme.c b/trunk/drivers/net/ethernet/sun/sunhme.c index 09c518655db2..cf14ab9db576 100644 --- a/trunk/drivers/net/ethernet/sun/sunhme.c +++ b/trunk/drivers/net/ethernet/sun/sunhme.c @@ -2457,11 +2457,11 @@ static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info { struct happy_meal *hp = netdev_priv(dev); - strlcpy(info->driver, "sunhme", sizeof(info->driver)); - strlcpy(info->version, "2.02", sizeof(info->version)); + strcpy(info->driver, "sunhme"); + strcpy(info->version, "2.02"); if (hp->happy_flags & HFLAG_PCI) { struct pci_dev *pdev = hp->happy_dev; - strlcpy(info->bus_info, pci_name(pdev), sizeof(info->bus_info)); + strcpy(info->bus_info, pci_name(pdev)); } #ifdef CONFIG_SBUS else { @@ -2469,8 +2469,7 @@ static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info struct platform_device *op = hp->happy_dev; regs = of_get_property(op->dev.of_node, "regs", NULL); if (regs) - snprintf(info->bus_info, sizeof(info->bus_info), - "SBUS:%d", + sprintf(info->bus_info, "SBUS:%d", regs->which_io); } #endif @@ -2850,7 +2849,7 @@ static int __devinit happy_meal_sbus_probe_one(struct platform_device *op, int i static int is_quattro_p(struct pci_dev *pdev) { struct pci_dev *busdev = pdev->bus->self; - struct pci_dev *this_pdev; + struct list_head *tmp; int n_hmes; if (busdev == NULL || @@ -2859,10 +2858,15 @@ static int is_quattro_p(struct pci_dev *pdev) return 0; n_hmes = 0; - list_for_each_entry(this_pdev, &pdev->bus->devices, bus_list) { + tmp = pdev->bus->devices.next; + while (tmp != &pdev->bus->devices) { + struct pci_dev *this_pdev = pci_dev_b(tmp); + if (this_pdev->vendor == PCI_VENDOR_ID_SUN && this_pdev->device == PCI_DEVICE_ID_SUN_HAPPYMEAL) n_hmes++; + + tmp = tmp->next; } if (n_hmes != 4) diff --git a/trunk/drivers/net/ethernet/tehuti/tehuti.c b/trunk/drivers/net/ethernet/tehuti/tehuti.c index 4b19e9b0606b..3a90af6d111c 100644 --- a/trunk/drivers/net/ethernet/tehuti/tehuti.c +++ b/trunk/drivers/net/ethernet/tehuti/tehuti.c @@ -727,10 +727,9 @@ static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable) * @ndev network device * @vid VLAN vid to add */ -static int bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid) +static void bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid) { __bdx_vlan_rx_vid(ndev, vid, 1); - return 0; } /* @@ -738,10 +737,9 @@ static int bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid) * @ndev network device * @vid VLAN vid to kill */ -static int bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid) +static void bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid) { __bdx_vlan_rx_vid(ndev, vid, 0); - return 0; } /** diff --git a/trunk/drivers/net/ethernet/ti/davinci_emac.c b/trunk/drivers/net/ethernet/ti/davinci_emac.c index 794ac30a577b..815c7970261b 100644 --- a/trunk/drivers/net/ethernet/ti/davinci_emac.c +++ b/trunk/drivers/net/ethernet/ti/davinci_emac.c @@ -115,7 +115,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1"; #define EMAC_DEF_TX_CH (0) /* Default 0th channel */ #define EMAC_DEF_RX_CH (0) /* Default 0th channel */ #define EMAC_DEF_RX_NUM_DESC (128) -#define EMAC_DEF_TX_NUM_DESC (128) #define EMAC_DEF_MAX_TX_CH (1) /* Max TX channels configured */ #define EMAC_DEF_MAX_RX_CH (1) /* Max RX channels configured */ #define EMAC_POLL_WEIGHT (64) /* Default NAPI poll weight */ @@ -337,7 +336,6 @@ struct emac_priv { u32 mac_hash2; u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS]; u32 rx_addr_type; - atomic_t cur_tx; const char *phy_id; struct phy_device *phydev; spinlock_t lock; @@ -1046,9 +1044,6 @@ static void emac_tx_handler(void *token, int len, int status) { struct sk_buff *skb = token; struct net_device *ndev = skb->dev; - struct emac_priv *priv = netdev_priv(ndev); - - atomic_dec(&priv->cur_tx); if (unlikely(netif_queue_stopped(ndev))) netif_start_queue(ndev); @@ -1097,9 +1092,6 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev) goto fail_tx; } - if (atomic_inc_return(&priv->cur_tx) >= EMAC_DEF_TX_NUM_DESC) - netif_stop_queue(ndev); - return NETDEV_TX_OK; fail_tx: diff --git a/trunk/drivers/net/ethernet/tile/tilepro.c b/trunk/drivers/net/ethernet/tile/tilepro.c index 6b75063988ec..1187a1169eb2 100644 --- a/trunk/drivers/net/ethernet/tile/tilepro.c +++ b/trunk/drivers/net/ethernet/tile/tilepro.c @@ -1256,7 +1256,7 @@ static void tile_net_stop_aux(struct net_device *dev) sizeof(dummy), NETIO_IPP_STOP_SHIM_OFF) < 0) panic("Failed to stop LIPP/LEPP!\n"); - priv->partly_opened = false; + priv->partly_opened = 0; } @@ -1507,7 +1507,7 @@ static int tile_net_open(struct net_device *dev) priv->network_cpus_count, priv->network_cpus_credits); #endif - priv->partly_opened = true; + priv->partly_opened = 1; } else { /* FIXME: Is this possible? */ diff --git a/trunk/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/trunk/drivers/net/ethernet/toshiba/ps3_gelic_net.c index 5ee82a77723b..7bf1e2015784 100644 --- a/trunk/drivers/net/ethernet/toshiba/ps3_gelic_net.c +++ b/trunk/drivers/net/ethernet/toshiba/ps3_gelic_net.c @@ -640,7 +640,7 @@ static inline void gelic_card_disable_rxdmac(struct gelic_card *card) int status; /* this hvc blocks until the DMA in progress really stopped */ - status = lv1_net_stop_rx_dma(bus_id(card), dev_id(card)); + status = lv1_net_stop_rx_dma(bus_id(card), dev_id(card), 0); if (status) dev_err(ctodev(card), "lv1_net_stop_rx_dma failed, %d\n", status); @@ -658,7 +658,7 @@ static inline void gelic_card_disable_txdmac(struct gelic_card *card) int status; /* this hvc blocks until the DMA in progress really stopped */ - status = lv1_net_stop_tx_dma(bus_id(card), dev_id(card)); + status = lv1_net_stop_tx_dma(bus_id(card), dev_id(card), 0); if (status) dev_err(ctodev(card), "lv1_net_stop_tx_dma failed, status=%d\n", status); diff --git a/trunk/drivers/net/ethernet/tundra/tsi108_eth.c b/trunk/drivers/net/ethernet/tundra/tsi108_eth.c index a9ce01bafd20..a8df7eca0956 100644 --- a/trunk/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/trunk/drivers/net/ethernet/tundra/tsi108_eth.c @@ -1688,6 +1688,18 @@ static void tsi108_timed_checker(unsigned long dev_ptr) mod_timer(&data->timer, jiffies + CHECK_PHY_INTERVAL); } +static int tsi108_ether_init(void) +{ + int ret; + ret = platform_driver_register (&tsi_eth_driver); + if (ret < 0){ + printk("tsi108_ether_init: error initializing ethernet " + "device\n"); + return ret; + } + return 0; +} + static int tsi108_ether_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); @@ -1702,7 +1714,13 @@ static int tsi108_ether_remove(struct platform_device *pdev) return 0; } -module_platform_driver(tsi_eth_driver); +static void tsi108_ether_exit(void) +{ + platform_driver_unregister(&tsi_eth_driver); +} + +module_init(tsi108_ether_init); +module_exit(tsi108_ether_exit); MODULE_AUTHOR("Tundra Semiconductor Corporation"); MODULE_DESCRIPTION("Tsi108 Gigabit Ethernet driver"); diff --git a/trunk/drivers/net/ethernet/via/via-rhine.c b/trunk/drivers/net/ethernet/via/via-rhine.c index 5c4983b2870a..f34dd99fe579 100644 --- a/trunk/drivers/net/ethernet/via/via-rhine.c +++ b/trunk/drivers/net/ethernet/via/via-rhine.c @@ -35,7 +35,6 @@ #define DRV_VERSION "1.5.0" #define DRV_RELDATE "2010-10-09" -#include /* A few user-configurable values. These may be modified when a driver module is loaded. */ @@ -56,7 +55,7 @@ static int rx_copybreak; /* Work-around for broken BIOSes: they are unable to get the chip back out of power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */ -static bool avoid_D3; +static int avoid_D3; /* * In case you are looking for 'options[]' or 'full_duplex[]', they @@ -489,8 +488,8 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static const struct ethtool_ops netdev_ethtool_ops; static int rhine_close(struct net_device *dev); static void rhine_shutdown (struct pci_dev *pdev); -static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid); -static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid); +static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid); +static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid); static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr); static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr); static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask); @@ -1262,7 +1261,7 @@ static void rhine_update_vcam(struct net_device *dev) rhine_set_vlan_cam_mask(ioaddr, vCAMmask); } -static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) +static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) { struct rhine_private *rp = netdev_priv(dev); @@ -1270,10 +1269,9 @@ static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) set_bit(vid, rp->active_vlans); rhine_update_vcam(dev); spin_unlock_irq(&rp->lock); - return 0; } -static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) +static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) { struct rhine_private *rp = netdev_priv(dev); @@ -1281,7 +1279,6 @@ static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) clear_bit(vid, rp->active_vlans); rhine_update_vcam(dev); spin_unlock_irq(&rp->lock); - return 0; } static void init_registers(struct net_device *dev) @@ -2012,9 +2009,9 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i { struct rhine_private *rp = netdev_priv(dev); - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info)); + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + strcpy(info->bus_info, pci_name(rp->pdev)); } static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) @@ -2323,7 +2320,7 @@ static int __init rhine_init(void) #endif if (dmi_check_system(rhine_dmi_table)) { /* these BIOSes fail at PXE boot if chip is in D3 */ - avoid_D3 = true; + avoid_D3 = 1; pr_warn("Broken BIOS detected, avoid_D3 enabled\n"); } else if (avoid_D3) diff --git a/trunk/drivers/net/ethernet/via/via-velocity.c b/trunk/drivers/net/ethernet/via/via-velocity.c index 4128d6b8cc28..4535d7cc848e 100644 --- a/trunk/drivers/net/ethernet/via/via-velocity.c +++ b/trunk/drivers/net/ethernet/via/via-velocity.c @@ -522,7 +522,7 @@ static void velocity_init_cam_filter(struct velocity_info *vptr) mac_set_vlan_cam_mask(regs, vptr->vCAMmask); } -static int velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) +static void velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) { struct velocity_info *vptr = netdev_priv(dev); @@ -530,10 +530,9 @@ static int velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) set_bit(vid, vptr->active_vlans); velocity_init_cam_filter(vptr); spin_unlock_irq(&vptr->lock); - return 0; } -static int velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) +static void velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) { struct velocity_info *vptr = netdev_priv(dev); @@ -541,7 +540,6 @@ static int velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) clear_bit(vid, vptr->active_vlans); velocity_init_cam_filter(vptr); spin_unlock_irq(&vptr->lock); - return 0; } static void velocity_init_rx_ring_indexes(struct velocity_info *vptr) @@ -3272,9 +3270,9 @@ static int velocity_set_settings(struct net_device *dev, static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct velocity_info *vptr = netdev_priv(dev); - strlcpy(info->driver, VELOCITY_NAME, sizeof(info->driver)); - strlcpy(info->version, VELOCITY_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, pci_name(vptr->pdev), sizeof(info->bus_info)); + strcpy(info->driver, VELOCITY_NAME); + strcpy(info->version, VELOCITY_VERSION); + strcpy(info->bus_info, pci_name(vptr->pdev)); } static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) diff --git a/trunk/drivers/net/ethernet/xilinx/ll_temac_main.c b/trunk/drivers/net/ethernet/xilinx/ll_temac_main.c index f21addb1db95..2681b53820ee 100644 --- a/trunk/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/trunk/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -237,7 +237,7 @@ static int temac_dma_bd_init(struct net_device *ndev) struct sk_buff *skb; int i; - lp->rx_skb = kcalloc(RX_BD_NUM, sizeof(*lp->rx_skb), GFP_KERNEL); + lp->rx_skb = kzalloc(sizeof(*lp->rx_skb) * RX_BD_NUM, GFP_KERNEL); if (!lp->rx_skb) { dev_err(&ndev->dev, "can't allocate memory for DMA RX buffer\n"); @@ -920,26 +920,12 @@ temac_poll_controller(struct net_device *ndev) } #endif -static int temac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) -{ - struct temac_local *lp = netdev_priv(ndev); - - if (!netif_running(ndev)) - return -EINVAL; - - if (!lp->phy_dev) - return -EINVAL; - - return phy_mii_ioctl(lp->phy_dev, rq, cmd); -} - static const struct net_device_ops temac_netdev_ops = { .ndo_open = temac_open, .ndo_stop = temac_stop, .ndo_start_xmit = temac_start_xmit, .ndo_set_mac_address = netdev_set_mac_address, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = temac_ioctl, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = temac_poll_controller, #endif @@ -1091,7 +1077,7 @@ static int __devinit temac_of_probe(struct platform_device *op) of_node_put(np); /* Finished with the DMA node; drop the reference */ - if (!lp->rx_irq || !lp->tx_irq) { + if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) { dev_err(&op->dev, "could not determine irqs\n"); rc = -ENOMEM; goto err_iounmap_2; @@ -1181,7 +1167,17 @@ static struct platform_driver temac_of_driver = { }, }; -module_platform_driver(temac_of_driver); +static int __init temac_init(void) +{ + return platform_driver_register(&temac_of_driver); +} +module_init(temac_init); + +static void __exit temac_exit(void) +{ + platform_driver_unregister(&temac_of_driver); +} +module_exit(temac_exit); MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver"); MODULE_AUTHOR("Yoshio Kashiwagi"); diff --git a/trunk/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/trunk/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 79013e5731a5..8018d7d045b0 100644 --- a/trunk/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/trunk/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -662,7 +662,7 @@ static void xemaclite_rx_handler(struct net_device *dev) */ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id) { - bool tx_complete = false; + bool tx_complete = 0; struct net_device *dev = dev_id; struct net_local *lp = netdev_priv(dev); void __iomem *base_addr = lp->base_addr; @@ -683,7 +683,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id) tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK; out_be32(base_addr + XEL_TSR_OFFSET, tx_status); - tx_complete = true; + tx_complete = 1; } /* Check if the Transmission for the second buffer is completed */ @@ -695,7 +695,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id) out_be32(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET, tx_status); - tx_complete = true; + tx_complete = 1; } /* If there was a Tx interrupt, call the Tx Handler */ @@ -1129,7 +1129,7 @@ static int __devinit xemaclite_of_probe(struct platform_device *ofdev) /* Get IRQ for the device */ rc = of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq); - if (!rc) { + if (rc == NO_IRQ) { dev_err(dev, "no IRQ found\n"); return rc; } @@ -1303,7 +1303,27 @@ static struct platform_driver xemaclite_of_driver = { .remove = __devexit_p(xemaclite_of_remove), }; -module_platform_driver(xemaclite_of_driver); +/** + * xgpiopss_init - Initial driver registration call + * + * Return: 0 upon success, or a negative error upon failure. + */ +static int __init xemaclite_init(void) +{ + /* No kernel boot options used, we just need to register the driver */ + return platform_driver_register(&xemaclite_of_driver); +} + +/** + * xemaclite_cleanup - Driver un-registration call + */ +static void __exit xemaclite_cleanup(void) +{ + platform_driver_unregister(&xemaclite_of_driver); +} + +module_init(xemaclite_init); +module_exit(xemaclite_cleanup); MODULE_AUTHOR("Xilinx, Inc."); MODULE_DESCRIPTION("Xilinx Ethernet MAC Lite driver"); diff --git a/trunk/drivers/net/ethernet/xircom/xirc2ps_cs.c b/trunk/drivers/net/ethernet/xircom/xirc2ps_cs.c index 33979c3ac943..bbe8b7dbf3f3 100644 --- a/trunk/drivers/net/ethernet/xircom/xirc2ps_cs.c +++ b/trunk/drivers/net/ethernet/xircom/xirc2ps_cs.c @@ -1411,7 +1411,7 @@ do_open(struct net_device *dev) static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strlcpy(info->driver, "xirc2ps_cs", sizeof(info->driver)); + strcpy(info->driver, "xirc2ps_cs"); sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr); } diff --git a/trunk/drivers/net/ifb.c b/trunk/drivers/net/ifb.c index e05b645bbc32..46b5f5fd686b 100644 --- a/trunk/drivers/net/ifb.c +++ b/trunk/drivers/net/ifb.c @@ -164,7 +164,7 @@ static const struct net_device_ops ifb_netdev_ops = { .ndo_validate_addr = eth_validate_addr, }; -#define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \ +#define IFB_FEATURES (NETIF_F_NO_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \ NETIF_F_TSO_ECN | NETIF_F_TSO | NETIF_F_TSO6 | \ NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_TX) diff --git a/trunk/drivers/net/irda/bfin_sir.c b/trunk/drivers/net/irda/bfin_sir.c index a561ae44a9ac..9d4ce1aba10c 100644 --- a/trunk/drivers/net/irda/bfin_sir.c +++ b/trunk/drivers/net/irda/bfin_sir.c @@ -806,7 +806,18 @@ static struct platform_driver bfin_ir_driver = { }, }; -module_platform_driver(bfin_ir_driver); +static int __init bfin_sir_init(void) +{ + return platform_driver_register(&bfin_ir_driver); +} + +static void __exit bfin_sir_exit(void) +{ + platform_driver_unregister(&bfin_ir_driver); +} + +module_init(bfin_sir_init); +module_exit(bfin_sir_exit); module_param(max_rate, int, 0); MODULE_PARM_DESC(max_rate, "Maximum baud rate (115200, 57600, 38400, 19200, 9600)"); diff --git a/trunk/drivers/net/irda/donauboe.c b/trunk/drivers/net/irda/donauboe.c index 64f403da101c..b45b2cc42804 100644 --- a/trunk/drivers/net/irda/donauboe.c +++ b/trunk/drivers/net/irda/donauboe.c @@ -197,7 +197,7 @@ static char *driver_name = DRIVER_NAME; static int max_baud = 4000000; #ifdef USE_PROBE -static bool do_probe = false; +static int do_probe = 0; #endif diff --git a/trunk/drivers/net/irda/pxaficp_ir.c b/trunk/drivers/net/irda/pxaficp_ir.c index 81d5275a15e2..d0851dfa0378 100644 --- a/trunk/drivers/net/irda/pxaficp_ir.c +++ b/trunk/drivers/net/irda/pxaficp_ir.c @@ -966,7 +966,18 @@ static struct platform_driver pxa_ir_driver = { .resume = pxa_irda_resume, }; -module_platform_driver(pxa_ir_driver); +static int __init pxa_irda_init(void) +{ + return platform_driver_register(&pxa_ir_driver); +} + +static void __exit pxa_irda_exit(void) +{ + platform_driver_unregister(&pxa_ir_driver); +} + +module_init(pxa_irda_init); +module_exit(pxa_irda_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:pxa2xx-ir"); diff --git a/trunk/drivers/net/irda/sh_irda.c b/trunk/drivers/net/irda/sh_irda.c index 725d6b367822..d275e276e742 100644 --- a/trunk/drivers/net/irda/sh_irda.c +++ b/trunk/drivers/net/irda/sh_irda.c @@ -873,7 +873,18 @@ static struct platform_driver sh_irda_driver = { }, }; -module_platform_driver(sh_irda_driver); +static int __init sh_irda_init(void) +{ + return platform_driver_register(&sh_irda_driver); +} + +static void __exit sh_irda_exit(void) +{ + platform_driver_unregister(&sh_irda_driver); +} + +module_init(sh_irda_init); +module_exit(sh_irda_exit); MODULE_AUTHOR("Kuninori Morimoto "); MODULE_DESCRIPTION("SuperH IrDA driver"); diff --git a/trunk/drivers/net/irda/sh_sir.c b/trunk/drivers/net/irda/sh_sir.c index e6661b5c1f83..ed7d7d62bf68 100644 --- a/trunk/drivers/net/irda/sh_sir.c +++ b/trunk/drivers/net/irda/sh_sir.c @@ -808,7 +808,18 @@ static struct platform_driver sh_sir_driver = { }, }; -module_platform_driver(sh_sir_driver); +static int __init sh_sir_init(void) +{ + return platform_driver_register(&sh_sir_driver); +} + +static void __exit sh_sir_exit(void) +{ + platform_driver_unregister(&sh_sir_driver); +} + +module_init(sh_sir_init); +module_exit(sh_sir_exit); MODULE_AUTHOR("Kuninori Morimoto "); MODULE_DESCRIPTION("SuperH IrDA driver"); diff --git a/trunk/drivers/net/irda/smsc-ircc2.c b/trunk/drivers/net/irda/smsc-ircc2.c index 6c95d4087b2d..8b1c3484d271 100644 --- a/trunk/drivers/net/irda/smsc-ircc2.c +++ b/trunk/drivers/net/irda/smsc-ircc2.c @@ -79,7 +79,7 @@ MODULE_AUTHOR("Daniele Peri "); MODULE_DESCRIPTION("SMC IrCC SIR/FIR controller driver"); MODULE_LICENSE("GPL"); -static bool smsc_nopnp = true; +static int smsc_nopnp = 1; module_param_named(nopnp, smsc_nopnp, bool, 0); MODULE_PARM_DESC(nopnp, "Do not use PNP to detect controller settings, defaults to true"); diff --git a/trunk/drivers/net/loopback.c b/trunk/drivers/net/loopback.c index b71998d0b5b4..4ce9e5f2c069 100644 --- a/trunk/drivers/net/loopback.c +++ b/trunk/drivers/net/loopback.c @@ -169,7 +169,7 @@ static void loopback_setup(struct net_device *dev) dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | NETIF_F_UFO - | NETIF_F_HW_CSUM + | NETIF_F_NO_CSUM | NETIF_F_RXCSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX diff --git a/trunk/drivers/net/macvlan.c b/trunk/drivers/net/macvlan.c index f2f820c4b40a..74134970b709 100644 --- a/trunk/drivers/net/macvlan.c +++ b/trunk/drivers/net/macvlan.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include #include @@ -521,23 +520,26 @@ static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev, return stats; } -static int macvlan_vlan_rx_add_vid(struct net_device *dev, +static void macvlan_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) { struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; + const struct net_device_ops *ops = lowerdev->netdev_ops; - return vlan_vid_add(lowerdev, vid); + if (ops->ndo_vlan_rx_add_vid) + ops->ndo_vlan_rx_add_vid(lowerdev, vid); } -static int macvlan_vlan_rx_kill_vid(struct net_device *dev, +static void macvlan_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) { struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; + const struct net_device_ops *ops = lowerdev->netdev_ops; - vlan_vid_del(lowerdev, vid); - return 0; + if (ops->ndo_vlan_rx_kill_vid) + ops->ndo_vlan_rx_kill_vid(lowerdev, vid); } static void macvlan_ethtool_get_drvinfo(struct net_device *dev, diff --git a/trunk/drivers/net/macvtap.c b/trunk/drivers/net/macvtap.c index 58dc117a8d78..1b7082d08f33 100644 --- a/trunk/drivers/net/macvtap.c +++ b/trunk/drivers/net/macvtap.c @@ -145,8 +145,8 @@ static void macvtap_put_queue(struct macvtap_queue *q) if (vlan) { int index = get_slot(vlan, q); - RCU_INIT_POINTER(vlan->taps[index], NULL); - RCU_INIT_POINTER(q->vlan, NULL); + rcu_assign_pointer(vlan->taps[index], NULL); + rcu_assign_pointer(q->vlan, NULL); sock_put(&q->sk); --vlan->numvtaps; } @@ -175,14 +175,6 @@ static struct macvtap_queue *macvtap_get_queue(struct net_device *dev, if (!numvtaps) goto out; - /* Check if we can use flow to select a queue */ - rxq = skb_get_rxhash(skb); - if (rxq) { - tap = rcu_dereference(vlan->taps[rxq % numvtaps]); - if (tap) - goto out; - } - if (likely(skb_rx_queue_recorded(skb))) { rxq = skb_get_rx_queue(skb); @@ -194,6 +186,14 @@ static struct macvtap_queue *macvtap_get_queue(struct net_device *dev, goto out; } + /* Check if we can use flow to select a queue */ + rxq = skb_get_rxhash(skb); + if (rxq) { + tap = rcu_dereference(vlan->taps[rxq % numvtaps]); + if (tap) + goto out; + } + /* Everything failed - find first available queue */ for (rxq = 0; rxq < MAX_MACVTAP_QUEUES; rxq++) { tap = rcu_dereference(vlan->taps[rxq]); @@ -223,8 +223,8 @@ static void macvtap_del_queues(struct net_device *dev) lockdep_is_held(&macvtap_lock)); if (q) { qlist[j++] = q; - RCU_INIT_POINTER(vlan->taps[i], NULL); - RCU_INIT_POINTER(q->vlan, NULL); + rcu_assign_pointer(vlan->taps[i], NULL); + rcu_assign_pointer(q->vlan, NULL); vlan->numvtaps--; } } diff --git a/trunk/drivers/net/mii.c b/trunk/drivers/net/mii.c index c70c2332d15e..c62e7816d548 100644 --- a/trunk/drivers/net/mii.c +++ b/trunk/drivers/net/mii.c @@ -35,11 +35,26 @@ static u32 mii_get_an(struct mii_if_info *mii, u16 addr) { + u32 result = 0; int advert; advert = mii->mdio_read(mii->dev, mii->phy_id, addr); - - return mii_lpa_to_ethtool_lpa_t(advert); + if (advert & LPA_LPACK) + result |= ADVERTISED_Autoneg; + if (advert & ADVERTISE_10HALF) + result |= ADVERTISED_10baseT_Half; + if (advert & ADVERTISE_10FULL) + result |= ADVERTISED_10baseT_Full; + if (advert & ADVERTISE_100HALF) + result |= ADVERTISED_100baseT_Half; + if (advert & ADVERTISE_100FULL) + result |= ADVERTISED_100baseT_Full; + if (advert & ADVERTISE_PAUSE_CAP) + result |= ADVERTISED_Pause; + if (advert & ADVERTISE_PAUSE_ASYM) + result |= ADVERTISED_Asym_Pause; + + return result; } /** @@ -89,14 +104,19 @@ int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) ecmd->autoneg = AUTONEG_ENABLE; ecmd->advertising |= mii_get_an(mii, MII_ADVERTISE); - if (mii->supports_gmii) - ecmd->advertising |= - mii_ctrl1000_to_ethtool_adv_t(ctrl1000); + if (ctrl1000 & ADVERTISE_1000HALF) + ecmd->advertising |= ADVERTISED_1000baseT_Half; + if (ctrl1000 & ADVERTISE_1000FULL) + ecmd->advertising |= ADVERTISED_1000baseT_Full; if (bmsr & BMSR_ANEGCOMPLETE) { ecmd->lp_advertising = mii_get_an(mii, MII_LPA); - ecmd->lp_advertising |= - mii_stat1000_to_ethtool_lpa_t(stat1000); + if (stat1000 & LPA_1000HALF) + ecmd->lp_advertising |= + ADVERTISED_1000baseT_Half; + if (stat1000 & LPA_1000FULL) + ecmd->lp_advertising |= + ADVERTISED_1000baseT_Full; } else { ecmd->lp_advertising = 0; } @@ -184,11 +204,20 @@ int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) advert2 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000); tmp2 = advert2 & ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL); } - tmp |= ethtool_adv_to_mii_adv_t(ecmd->advertising); - - if (mii->supports_gmii) - tmp2 |= - ethtool_adv_to_mii_ctrl1000_t(ecmd->advertising); + if (ecmd->advertising & ADVERTISED_10baseT_Half) + tmp |= ADVERTISE_10HALF; + if (ecmd->advertising & ADVERTISED_10baseT_Full) + tmp |= ADVERTISE_10FULL; + if (ecmd->advertising & ADVERTISED_100baseT_Half) + tmp |= ADVERTISE_100HALF; + if (ecmd->advertising & ADVERTISED_100baseT_Full) + tmp |= ADVERTISE_100FULL; + if (mii->supports_gmii) { + if (ecmd->advertising & ADVERTISED_1000baseT_Half) + tmp2 |= ADVERTISE_1000HALF; + if (ecmd->advertising & ADVERTISED_1000baseT_Full) + tmp2 |= ADVERTISE_1000FULL; + } if (advert != tmp) { mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp); mii->advertising = tmp; diff --git a/trunk/drivers/net/phy/Kconfig b/trunk/drivers/net/phy/Kconfig index fbdcdf83cbfd..a70244306c94 100644 --- a/trunk/drivers/net/phy/Kconfig +++ b/trunk/drivers/net/phy/Kconfig @@ -131,7 +131,3 @@ config MDIO_OCTEON If in doubt, say Y. endif # PHYLIB - -config MICREL_KS8995MA - tristate "Micrel KS8995MA 5-ports 10/100 managed Ethernet switch" - depends on SPI diff --git a/trunk/drivers/net/phy/Makefile b/trunk/drivers/net/phy/Makefile index e15c83fecbe0..2333215bbb32 100644 --- a/trunk/drivers/net/phy/Makefile +++ b/trunk/drivers/net/phy/Makefile @@ -23,4 +23,3 @@ obj-$(CONFIG_DP83640_PHY) += dp83640.o obj-$(CONFIG_STE10XP) += ste10Xp.o obj-$(CONFIG_MICREL_PHY) += micrel.o obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o -obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o diff --git a/trunk/drivers/net/phy/mdio-bitbang.c b/trunk/drivers/net/phy/mdio-bitbang.c index daec9b05d168..65391891d8c4 100644 --- a/trunk/drivers/net/phy/mdio-bitbang.c +++ b/trunk/drivers/net/phy/mdio-bitbang.c @@ -202,14 +202,6 @@ static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val) return 0; } -static int mdiobb_reset(struct mii_bus *bus) -{ - struct mdiobb_ctrl *ctrl = bus->priv; - if (ctrl->reset) - ctrl->reset(bus); - return 0; -} - struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl) { struct mii_bus *bus; @@ -222,7 +214,6 @@ struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl) bus->read = mdiobb_read; bus->write = mdiobb_write; - bus->reset = mdiobb_reset; bus->priv = ctrl; return bus; diff --git a/trunk/drivers/net/phy/mdio-gpio.c b/trunk/drivers/net/phy/mdio-gpio.c index 89c5a3eccc12..2843c90f712f 100644 --- a/trunk/drivers/net/phy/mdio-gpio.c +++ b/trunk/drivers/net/phy/mdio-gpio.c @@ -95,7 +95,6 @@ static struct mii_bus * __devinit mdio_gpio_bus_init(struct device *dev, goto out; bitbang->ctrl.ops = &mdio_gpio_ops; - bitbang->ctrl.reset = pdata->reset; bitbang->mdc = pdata->mdc; bitbang->mdio = pdata->mdio; diff --git a/trunk/drivers/net/phy/phy_device.c b/trunk/drivers/net/phy/phy_device.c index f320f466f03b..83a5a5afec67 100644 --- a/trunk/drivers/net/phy/phy_device.c +++ b/trunk/drivers/net/phy/phy_device.c @@ -563,9 +563,20 @@ static int genphy_config_advert(struct phy_device *phydev) if (adv < 0) return adv; - adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | + adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); - adv |= ethtool_adv_to_mii_adv_t(advertise); + if (advertise & ADVERTISED_10baseT_Half) + adv |= ADVERTISE_10HALF; + if (advertise & ADVERTISED_10baseT_Full) + adv |= ADVERTISE_10FULL; + if (advertise & ADVERTISED_100baseT_Half) + adv |= ADVERTISE_100HALF; + if (advertise & ADVERTISED_100baseT_Full) + adv |= ADVERTISE_100FULL; + if (advertise & ADVERTISED_Pause) + adv |= ADVERTISE_PAUSE_CAP; + if (advertise & ADVERTISED_Asym_Pause) + adv |= ADVERTISE_PAUSE_ASYM; if (adv != oldadv) { err = phy_write(phydev, MII_ADVERTISE, adv); @@ -584,7 +595,10 @@ static int genphy_config_advert(struct phy_device *phydev) return adv; adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF); - adv |= ethtool_adv_to_mii_ctrl1000_t(advertise); + if (advertise & SUPPORTED_1000baseT_Half) + adv |= ADVERTISE_1000HALF; + if (advertise & SUPPORTED_1000baseT_Full) + adv |= ADVERTISE_1000FULL; if (adv != oldadv) { err = phy_write(phydev, MII_CTRL1000, adv); diff --git a/trunk/drivers/net/phy/smsc.c b/trunk/drivers/net/phy/smsc.c index fc3e7e96c88c..342505c976d6 100644 --- a/trunk/drivers/net/phy/smsc.c +++ b/trunk/drivers/net/phy/smsc.c @@ -22,7 +22,26 @@ #include #include #include -#include + +#define MII_LAN83C185_ISF 29 /* Interrupt Source Flags */ +#define MII_LAN83C185_IM 30 /* Interrupt Mask */ +#define MII_LAN83C185_CTRL_STATUS 17 /* Mode/Status Register */ + +#define MII_LAN83C185_ISF_INT1 (1<<1) /* Auto-Negotiation Page Received */ +#define MII_LAN83C185_ISF_INT2 (1<<2) /* Parallel Detection Fault */ +#define MII_LAN83C185_ISF_INT3 (1<<3) /* Auto-Negotiation LP Ack */ +#define MII_LAN83C185_ISF_INT4 (1<<4) /* Link Down */ +#define MII_LAN83C185_ISF_INT5 (1<<5) /* Remote Fault Detected */ +#define MII_LAN83C185_ISF_INT6 (1<<6) /* Auto-Negotiation complete */ +#define MII_LAN83C185_ISF_INT7 (1<<7) /* ENERGYON */ + +#define MII_LAN83C185_ISF_INT_ALL (0x0e) + +#define MII_LAN83C185_ISF_INT_PHYLIB_EVENTS \ + (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4 | \ + MII_LAN83C185_ISF_INT7) + +#define MII_LAN83C185_EDPWRDOWN (1 << 13) /* EDPWRDOWN */ static int smsc_phy_config_intr(struct phy_device *phydev) { diff --git a/trunk/drivers/net/phy/spi_ks8995.c b/trunk/drivers/net/phy/spi_ks8995.c deleted file mode 100644 index 116a2dd7c879..000000000000 --- a/trunk/drivers/net/phy/spi_ks8995.c +++ /dev/null @@ -1,375 +0,0 @@ -/* - * SPI driver for Micrel/Kendin KS8995M ethernet switch - * - * Copyright (C) 2008 Gabor Juhos - * - * This file was based on: drivers/spi/at25.c - * Copyright (C) 2006 David Brownell - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include - -#include - -#define DRV_VERSION "0.1.1" -#define DRV_DESC "Micrel KS8995 Ethernet switch SPI driver" - -/* ------------------------------------------------------------------------ */ - -#define KS8995_REG_ID0 0x00 /* Chip ID0 */ -#define KS8995_REG_ID1 0x01 /* Chip ID1 */ - -#define KS8995_REG_GC0 0x02 /* Global Control 0 */ -#define KS8995_REG_GC1 0x03 /* Global Control 1 */ -#define KS8995_REG_GC2 0x04 /* Global Control 2 */ -#define KS8995_REG_GC3 0x05 /* Global Control 3 */ -#define KS8995_REG_GC4 0x06 /* Global Control 4 */ -#define KS8995_REG_GC5 0x07 /* Global Control 5 */ -#define KS8995_REG_GC6 0x08 /* Global Control 6 */ -#define KS8995_REG_GC7 0x09 /* Global Control 7 */ -#define KS8995_REG_GC8 0x0a /* Global Control 8 */ -#define KS8995_REG_GC9 0x0b /* Global Control 9 */ - -#define KS8995_REG_PC(p, r) ((0x10 * p) + r) /* Port Control */ -#define KS8995_REG_PS(p, r) ((0x10 * p) + r + 0xe) /* Port Status */ - -#define KS8995_REG_TPC0 0x60 /* TOS Priority Control 0 */ -#define KS8995_REG_TPC1 0x61 /* TOS Priority Control 1 */ -#define KS8995_REG_TPC2 0x62 /* TOS Priority Control 2 */ -#define KS8995_REG_TPC3 0x63 /* TOS Priority Control 3 */ -#define KS8995_REG_TPC4 0x64 /* TOS Priority Control 4 */ -#define KS8995_REG_TPC5 0x65 /* TOS Priority Control 5 */ -#define KS8995_REG_TPC6 0x66 /* TOS Priority Control 6 */ -#define KS8995_REG_TPC7 0x67 /* TOS Priority Control 7 */ - -#define KS8995_REG_MAC0 0x68 /* MAC address 0 */ -#define KS8995_REG_MAC1 0x69 /* MAC address 1 */ -#define KS8995_REG_MAC2 0x6a /* MAC address 2 */ -#define KS8995_REG_MAC3 0x6b /* MAC address 3 */ -#define KS8995_REG_MAC4 0x6c /* MAC address 4 */ -#define KS8995_REG_MAC5 0x6d /* MAC address 5 */ - -#define KS8995_REG_IAC0 0x6e /* Indirect Access Control 0 */ -#define KS8995_REG_IAC1 0x6f /* Indirect Access Control 0 */ -#define KS8995_REG_IAD7 0x70 /* Indirect Access Data 7 */ -#define KS8995_REG_IAD6 0x71 /* Indirect Access Data 6 */ -#define KS8995_REG_IAD5 0x72 /* Indirect Access Data 5 */ -#define KS8995_REG_IAD4 0x73 /* Indirect Access Data 4 */ -#define KS8995_REG_IAD3 0x74 /* Indirect Access Data 3 */ -#define KS8995_REG_IAD2 0x75 /* Indirect Access Data 2 */ -#define KS8995_REG_IAD1 0x76 /* Indirect Access Data 1 */ -#define KS8995_REG_IAD0 0x77 /* Indirect Access Data 0 */ - -#define KS8995_REGS_SIZE 0x80 - -#define ID1_CHIPID_M 0xf -#define ID1_CHIPID_S 4 -#define ID1_REVISION_M 0x7 -#define ID1_REVISION_S 1 -#define ID1_START_SW 1 /* start the switch */ - -#define FAMILY_KS8995 0x95 -#define CHIPID_M 0 - -#define KS8995_CMD_WRITE 0x02U -#define KS8995_CMD_READ 0x03U - -#define KS8995_RESET_DELAY 10 /* usec */ - -struct ks8995_pdata { - /* not yet implemented */ -}; - -struct ks8995_switch { - struct spi_device *spi; - struct mutex lock; - struct ks8995_pdata *pdata; -}; - -static inline u8 get_chip_id(u8 val) -{ - return (val >> ID1_CHIPID_S) & ID1_CHIPID_M; -} - -static inline u8 get_chip_rev(u8 val) -{ - return (val >> ID1_REVISION_S) & ID1_REVISION_M; -} - -/* ------------------------------------------------------------------------ */ -static int ks8995_read(struct ks8995_switch *ks, char *buf, - unsigned offset, size_t count) -{ - u8 cmd[2]; - struct spi_transfer t[2]; - struct spi_message m; - int err; - - spi_message_init(&m); - - memset(&t, 0, sizeof(t)); - - t[0].tx_buf = cmd; - t[0].len = sizeof(cmd); - spi_message_add_tail(&t[0], &m); - - t[1].rx_buf = buf; - t[1].len = count; - spi_message_add_tail(&t[1], &m); - - cmd[0] = KS8995_CMD_READ; - cmd[1] = offset; - - mutex_lock(&ks->lock); - err = spi_sync(ks->spi, &m); - mutex_unlock(&ks->lock); - - return err ? err : count; -} - - -static int ks8995_write(struct ks8995_switch *ks, char *buf, - unsigned offset, size_t count) -{ - u8 cmd[2]; - struct spi_transfer t[2]; - struct spi_message m; - int err; - - spi_message_init(&m); - - memset(&t, 0, sizeof(t)); - - t[0].tx_buf = cmd; - t[0].len = sizeof(cmd); - spi_message_add_tail(&t[0], &m); - - t[1].tx_buf = buf; - t[1].len = count; - spi_message_add_tail(&t[1], &m); - - cmd[0] = KS8995_CMD_WRITE; - cmd[1] = offset; - - mutex_lock(&ks->lock); - err = spi_sync(ks->spi, &m); - mutex_unlock(&ks->lock); - - return err ? err : count; -} - -static inline int ks8995_read_reg(struct ks8995_switch *ks, u8 addr, u8 *buf) -{ - return (ks8995_read(ks, buf, addr, 1) != 1); -} - -static inline int ks8995_write_reg(struct ks8995_switch *ks, u8 addr, u8 val) -{ - char buf = val; - - return (ks8995_write(ks, &buf, addr, 1) != 1); -} - -/* ------------------------------------------------------------------------ */ - -static int ks8995_stop(struct ks8995_switch *ks) -{ - return ks8995_write_reg(ks, KS8995_REG_ID1, 0); -} - -static int ks8995_start(struct ks8995_switch *ks) -{ - return ks8995_write_reg(ks, KS8995_REG_ID1, 1); -} - -static int ks8995_reset(struct ks8995_switch *ks) -{ - int err; - - err = ks8995_stop(ks); - if (err) - return err; - - udelay(KS8995_RESET_DELAY); - - return ks8995_start(ks); -} - -/* ------------------------------------------------------------------------ */ - -static ssize_t ks8995_registers_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) -{ - struct device *dev; - struct ks8995_switch *ks8995; - - dev = container_of(kobj, struct device, kobj); - ks8995 = dev_get_drvdata(dev); - - if (unlikely(off > KS8995_REGS_SIZE)) - return 0; - - if ((off + count) > KS8995_REGS_SIZE) - count = KS8995_REGS_SIZE - off; - - if (unlikely(!count)) - return count; - - return ks8995_read(ks8995, buf, off, count); -} - - -static ssize_t ks8995_registers_write(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) -{ - struct device *dev; - struct ks8995_switch *ks8995; - - dev = container_of(kobj, struct device, kobj); - ks8995 = dev_get_drvdata(dev); - - if (unlikely(off >= KS8995_REGS_SIZE)) - return -EFBIG; - - if ((off + count) > KS8995_REGS_SIZE) - count = KS8995_REGS_SIZE - off; - - if (unlikely(!count)) - return count; - - return ks8995_write(ks8995, buf, off, count); -} - - -static struct bin_attribute ks8995_registers_attr = { - .attr = { - .name = "registers", - .mode = S_IRUSR | S_IWUSR, - }, - .size = KS8995_REGS_SIZE, - .read = ks8995_registers_read, - .write = ks8995_registers_write, -}; - -/* ------------------------------------------------------------------------ */ - -static int __devinit ks8995_probe(struct spi_device *spi) -{ - struct ks8995_switch *ks; - struct ks8995_pdata *pdata; - u8 ids[2]; - int err; - - /* Chip description */ - pdata = spi->dev.platform_data; - - ks = kzalloc(sizeof(*ks), GFP_KERNEL); - if (!ks) { - dev_err(&spi->dev, "no memory for private data\n"); - return -ENOMEM; - } - - mutex_init(&ks->lock); - ks->pdata = pdata; - ks->spi = spi_dev_get(spi); - dev_set_drvdata(&spi->dev, ks); - - spi->mode = SPI_MODE_0; - spi->bits_per_word = 8; - err = spi_setup(spi); - if (err) { - dev_err(&spi->dev, "spi_setup failed, err=%d\n", err); - goto err_drvdata; - } - - err = ks8995_read(ks, ids, KS8995_REG_ID0, sizeof(ids)); - if (err < 0) { - dev_err(&spi->dev, "unable to read id registers, err=%d\n", - err); - goto err_drvdata; - } - - switch (ids[0]) { - case FAMILY_KS8995: - break; - default: - dev_err(&spi->dev, "unknown family id:%02x\n", ids[0]); - err = -ENODEV; - goto err_drvdata; - } - - err = ks8995_reset(ks); - if (err) - goto err_drvdata; - - err = sysfs_create_bin_file(&spi->dev.kobj, &ks8995_registers_attr); - if (err) { - dev_err(&spi->dev, "unable to create sysfs file, err=%d\n", - err); - goto err_drvdata; - } - - dev_info(&spi->dev, "KS89%02X device found, Chip ID:%01x, " - "Revision:%01x\n", ids[0], - get_chip_id(ids[1]), get_chip_rev(ids[1])); - - return 0; - -err_drvdata: - dev_set_drvdata(&spi->dev, NULL); - kfree(ks); - return err; -} - -static int __devexit ks8995_remove(struct spi_device *spi) -{ - struct ks8995_data *ks8995; - - ks8995 = dev_get_drvdata(&spi->dev); - sysfs_remove_bin_file(&spi->dev.kobj, &ks8995_registers_attr); - - dev_set_drvdata(&spi->dev, NULL); - kfree(ks8995); - - return 0; -} - -/* ------------------------------------------------------------------------ */ - -static struct spi_driver ks8995_driver = { - .driver = { - .name = "spi-ks8995", - .bus = &spi_bus_type, - .owner = THIS_MODULE, - }, - .probe = ks8995_probe, - .remove = __devexit_p(ks8995_remove), -}; - -static int __init ks8995_init(void) -{ - printk(KERN_INFO DRV_DESC " version " DRV_VERSION"\n"); - - return spi_register_driver(&ks8995_driver); -} -module_init(ks8995_init); - -static void __exit ks8995_exit(void) -{ - spi_unregister_driver(&ks8995_driver); -} -module_exit(ks8995_exit); - -MODULE_DESCRIPTION(DRV_DESC); -MODULE_VERSION(DRV_VERSION); -MODULE_AUTHOR("Gabor Juhos "); -MODULE_LICENSE("GPL v2"); diff --git a/trunk/drivers/net/ppp/pptp.c b/trunk/drivers/net/ppp/pptp.c index c1c9293c2bbf..f8a6853b692e 100644 --- a/trunk/drivers/net/ppp/pptp.c +++ b/trunk/drivers/net/ppp/pptp.c @@ -162,7 +162,7 @@ static void del_chan(struct pppox_sock *sock) { spin_lock(&chan_lock); clear_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap); - RCU_INIT_POINTER(callid_sock[sock->proto.pptp.src_addr.call_id], NULL); + rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], NULL); spin_unlock(&chan_lock); synchronize_rcu(); } diff --git a/trunk/drivers/net/team/Kconfig b/trunk/drivers/net/team/Kconfig deleted file mode 100644 index 248a144033ca..000000000000 --- a/trunk/drivers/net/team/Kconfig +++ /dev/null @@ -1,43 +0,0 @@ -menuconfig NET_TEAM - tristate "Ethernet team driver support (EXPERIMENTAL)" - depends on EXPERIMENTAL - ---help--- - This allows one to create virtual interfaces that teams together - multiple ethernet devices. - - Team devices can be added using the "ip" command from the - iproute2 package: - - "ip link add link [ address MAC ] [ NAME ] type team" - - To compile this driver as a module, choose M here: the module - will be called team. - -if NET_TEAM - -config NET_TEAM_MODE_ROUNDROBIN - tristate "Round-robin mode support" - depends on NET_TEAM - ---help--- - Basic mode where port used for transmitting packets is selected in - round-robin fashion using packet counter. - - All added ports are setup to have bond's mac address. - - To compile this team mode as a module, choose M here: the module - will be called team_mode_roundrobin. - -config NET_TEAM_MODE_ACTIVEBACKUP - tristate "Active-backup mode support" - depends on NET_TEAM - ---help--- - Only one port is active at a time and the rest of ports are used - for backup. - - Mac addresses of ports are not modified. Userspace is responsible - to do so. - - To compile this team mode as a module, choose M here: the module - will be called team_mode_activebackup. - -endif # NET_TEAM diff --git a/trunk/drivers/net/team/Makefile b/trunk/drivers/net/team/Makefile deleted file mode 100644 index 85f2028a87af..000000000000 --- a/trunk/drivers/net/team/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -# -# Makefile for the network team driver -# - -obj-$(CONFIG_NET_TEAM) += team.o -obj-$(CONFIG_NET_TEAM_MODE_ROUNDROBIN) += team_mode_roundrobin.o -obj-$(CONFIG_NET_TEAM_MODE_ACTIVEBACKUP) += team_mode_activebackup.o diff --git a/trunk/drivers/net/team/team.c b/trunk/drivers/net/team/team.c deleted file mode 100644 index ed2a862b835d..000000000000 --- a/trunk/drivers/net/team/team.c +++ /dev/null @@ -1,1684 +0,0 @@ -/* - * net/drivers/team/team.c - Network team device driver - * Copyright (c) 2011 Jiri Pirko - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define DRV_NAME "team" - - -/********** - * Helpers - **********/ - -#define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT) - -static struct team_port *team_port_get_rcu(const struct net_device *dev) -{ - struct team_port *port = rcu_dereference(dev->rx_handler_data); - - return team_port_exists(dev) ? port : NULL; -} - -static struct team_port *team_port_get_rtnl(const struct net_device *dev) -{ - struct team_port *port = rtnl_dereference(dev->rx_handler_data); - - return team_port_exists(dev) ? port : NULL; -} - -/* - * Since the ability to change mac address for open port device is tested in - * team_port_add, this function can be called without control of return value - */ -static int __set_port_mac(struct net_device *port_dev, - const unsigned char *dev_addr) -{ - struct sockaddr addr; - - memcpy(addr.sa_data, dev_addr, ETH_ALEN); - addr.sa_family = ARPHRD_ETHER; - return dev_set_mac_address(port_dev, &addr); -} - -int team_port_set_orig_mac(struct team_port *port) -{ - return __set_port_mac(port->dev, port->orig.dev_addr); -} - -int team_port_set_team_mac(struct team_port *port) -{ - return __set_port_mac(port->dev, port->team->dev->dev_addr); -} -EXPORT_SYMBOL(team_port_set_team_mac); - - -/******************* - * Options handling - *******************/ - -struct team_option *__team_find_option(struct team *team, const char *opt_name) -{ - struct team_option *option; - - list_for_each_entry(option, &team->option_list, list) { - if (strcmp(option->name, opt_name) == 0) - return option; - } - return NULL; -} - -int team_options_register(struct team *team, - const struct team_option *option, - size_t option_count) -{ - int i; - struct team_option **dst_opts; - int err; - - dst_opts = kzalloc(sizeof(struct team_option *) * option_count, - GFP_KERNEL); - if (!dst_opts) - return -ENOMEM; - for (i = 0; i < option_count; i++, option++) { - if (__team_find_option(team, option->name)) { - err = -EEXIST; - goto rollback; - } - dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL); - if (!dst_opts[i]) { - err = -ENOMEM; - goto rollback; - } - } - - for (i = 0; i < option_count; i++) - list_add_tail(&dst_opts[i]->list, &team->option_list); - - kfree(dst_opts); - return 0; - -rollback: - for (i = 0; i < option_count; i++) - kfree(dst_opts[i]); - - kfree(dst_opts); - return err; -} - -EXPORT_SYMBOL(team_options_register); - -static void __team_options_change_check(struct team *team, - struct team_option *changed_option); - -static void __team_options_unregister(struct team *team, - const struct team_option *option, - size_t option_count) -{ - int i; - - for (i = 0; i < option_count; i++, option++) { - struct team_option *del_opt; - - del_opt = __team_find_option(team, option->name); - if (del_opt) { - list_del(&del_opt->list); - kfree(del_opt); - } - } -} - -void team_options_unregister(struct team *team, - const struct team_option *option, - size_t option_count) -{ - __team_options_unregister(team, option, option_count); - __team_options_change_check(team, NULL); -} -EXPORT_SYMBOL(team_options_unregister); - -static int team_option_get(struct team *team, struct team_option *option, - void *arg) -{ - return option->getter(team, arg); -} - -static int team_option_set(struct team *team, struct team_option *option, - void *arg) -{ - int err; - - err = option->setter(team, arg); - if (err) - return err; - - __team_options_change_check(team, option); - return err; -} - -/**************** - * Mode handling - ****************/ - -static LIST_HEAD(mode_list); -static DEFINE_SPINLOCK(mode_list_lock); - -static struct team_mode *__find_mode(const char *kind) -{ - struct team_mode *mode; - - list_for_each_entry(mode, &mode_list, list) { - if (strcmp(mode->kind, kind) == 0) - return mode; - } - return NULL; -} - -static bool is_good_mode_name(const char *name) -{ - while (*name != '\0') { - if (!isalpha(*name) && !isdigit(*name) && *name != '_') - return false; - name++; - } - return true; -} - -int team_mode_register(struct team_mode *mode) -{ - int err = 0; - - if (!is_good_mode_name(mode->kind) || - mode->priv_size > TEAM_MODE_PRIV_SIZE) - return -EINVAL; - spin_lock(&mode_list_lock); - if (__find_mode(mode->kind)) { - err = -EEXIST; - goto unlock; - } - list_add_tail(&mode->list, &mode_list); -unlock: - spin_unlock(&mode_list_lock); - return err; -} -EXPORT_SYMBOL(team_mode_register); - -int team_mode_unregister(struct team_mode *mode) -{ - spin_lock(&mode_list_lock); - list_del_init(&mode->list); - spin_unlock(&mode_list_lock); - return 0; -} -EXPORT_SYMBOL(team_mode_unregister); - -static struct team_mode *team_mode_get(const char *kind) -{ - struct team_mode *mode; - - spin_lock(&mode_list_lock); - mode = __find_mode(kind); - if (!mode) { - spin_unlock(&mode_list_lock); - request_module("team-mode-%s", kind); - spin_lock(&mode_list_lock); - mode = __find_mode(kind); - } - if (mode) - if (!try_module_get(mode->owner)) - mode = NULL; - - spin_unlock(&mode_list_lock); - return mode; -} - -static void team_mode_put(const struct team_mode *mode) -{ - module_put(mode->owner); -} - -static bool team_dummy_transmit(struct team *team, struct sk_buff *skb) -{ - dev_kfree_skb_any(skb); - return false; -} - -rx_handler_result_t team_dummy_receive(struct team *team, - struct team_port *port, - struct sk_buff *skb) -{ - return RX_HANDLER_ANOTHER; -} - -static void team_adjust_ops(struct team *team) -{ - /* - * To avoid checks in rx/tx skb paths, ensure here that non-null and - * correct ops are always set. - */ - - if (list_empty(&team->port_list) || - !team->mode || !team->mode->ops->transmit) - team->ops.transmit = team_dummy_transmit; - else - team->ops.transmit = team->mode->ops->transmit; - - if (list_empty(&team->port_list) || - !team->mode || !team->mode->ops->receive) - team->ops.receive = team_dummy_receive; - else - team->ops.receive = team->mode->ops->receive; -} - -/* - * We can benefit from the fact that it's ensured no port is present - * at the time of mode change. Therefore no packets are in fly so there's no - * need to set mode operations in any special way. - */ -static int __team_change_mode(struct team *team, - const struct team_mode *new_mode) -{ - /* Check if mode was previously set and do cleanup if so */ - if (team->mode) { - void (*exit_op)(struct team *team) = team->ops.exit; - - /* Clear ops area so no callback is called any longer */ - memset(&team->ops, 0, sizeof(struct team_mode_ops)); - team_adjust_ops(team); - - if (exit_op) - exit_op(team); - team_mode_put(team->mode); - team->mode = NULL; - /* zero private data area */ - memset(&team->mode_priv, 0, - sizeof(struct team) - offsetof(struct team, mode_priv)); - } - - if (!new_mode) - return 0; - - if (new_mode->ops->init) { - int err; - - err = new_mode->ops->init(team); - if (err) - return err; - } - - team->mode = new_mode; - memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops)); - team_adjust_ops(team); - - return 0; -} - -static int team_change_mode(struct team *team, const char *kind) -{ - struct team_mode *new_mode; - struct net_device *dev = team->dev; - int err; - - if (!list_empty(&team->port_list)) { - netdev_err(dev, "No ports can be present during mode change\n"); - return -EBUSY; - } - - if (team->mode && strcmp(team->mode->kind, kind) == 0) { - netdev_err(dev, "Unable to change to the same mode the team is in\n"); - return -EINVAL; - } - - new_mode = team_mode_get(kind); - if (!new_mode) { - netdev_err(dev, "Mode \"%s\" not found\n", kind); - return -EINVAL; - } - - err = __team_change_mode(team, new_mode); - if (err) { - netdev_err(dev, "Failed to change to mode \"%s\"\n", kind); - team_mode_put(new_mode); - return err; - } - - netdev_info(dev, "Mode changed to \"%s\"\n", kind); - return 0; -} - - -/************************ - * Rx path frame handler - ************************/ - -/* note: already called with rcu_read_lock */ -static rx_handler_result_t team_handle_frame(struct sk_buff **pskb) -{ - struct sk_buff *skb = *pskb; - struct team_port *port; - struct team *team; - rx_handler_result_t res; - - skb = skb_share_check(skb, GFP_ATOMIC); - if (!skb) - return RX_HANDLER_CONSUMED; - - *pskb = skb; - - port = team_port_get_rcu(skb->dev); - team = port->team; - - res = team->ops.receive(team, port, skb); - if (res == RX_HANDLER_ANOTHER) { - struct team_pcpu_stats *pcpu_stats; - - pcpu_stats = this_cpu_ptr(team->pcpu_stats); - u64_stats_update_begin(&pcpu_stats->syncp); - pcpu_stats->rx_packets++; - pcpu_stats->rx_bytes += skb->len; - if (skb->pkt_type == PACKET_MULTICAST) - pcpu_stats->rx_multicast++; - u64_stats_update_end(&pcpu_stats->syncp); - - skb->dev = team->dev; - } else { - this_cpu_inc(team->pcpu_stats->rx_dropped); - } - - return res; -} - - -/**************** - * Port handling - ****************/ - -static bool team_port_find(const struct team *team, - const struct team_port *port) -{ - struct team_port *cur; - - list_for_each_entry(cur, &team->port_list, list) - if (cur == port) - return true; - return false; -} - -/* - * Add/delete port to the team port list. Write guarded by rtnl_lock. - * Takes care of correct port->index setup (might be racy). - */ -static void team_port_list_add_port(struct team *team, - struct team_port *port) -{ - port->index = team->port_count++; - hlist_add_head_rcu(&port->hlist, - team_port_index_hash(team, port->index)); - list_add_tail_rcu(&port->list, &team->port_list); -} - -static void __reconstruct_port_hlist(struct team *team, int rm_index) -{ - int i; - struct team_port *port; - - for (i = rm_index + 1; i < team->port_count; i++) { - port = team_get_port_by_index(team, i); - hlist_del_rcu(&port->hlist); - port->index--; - hlist_add_head_rcu(&port->hlist, - team_port_index_hash(team, port->index)); - } -} - -static void team_port_list_del_port(struct team *team, - struct team_port *port) -{ - int rm_index = port->index; - - hlist_del_rcu(&port->hlist); - list_del_rcu(&port->list); - __reconstruct_port_hlist(team, rm_index); - team->port_count--; -} - -#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \ - NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \ - NETIF_F_HIGHDMA | NETIF_F_LRO) - -static void __team_compute_features(struct team *team) -{ - struct team_port *port; - u32 vlan_features = TEAM_VLAN_FEATURES; - unsigned short max_hard_header_len = ETH_HLEN; - - list_for_each_entry(port, &team->port_list, list) { - vlan_features = netdev_increment_features(vlan_features, - port->dev->vlan_features, - TEAM_VLAN_FEATURES); - - if (port->dev->hard_header_len > max_hard_header_len) - max_hard_header_len = port->dev->hard_header_len; - } - - team->dev->vlan_features = vlan_features; - team->dev->hard_header_len = max_hard_header_len; - - netdev_change_features(team->dev); -} - -static void team_compute_features(struct team *team) -{ - mutex_lock(&team->lock); - __team_compute_features(team); - mutex_unlock(&team->lock); -} - -static int team_port_enter(struct team *team, struct team_port *port) -{ - int err = 0; - - dev_hold(team->dev); - port->dev->priv_flags |= IFF_TEAM_PORT; - if (team->ops.port_enter) { - err = team->ops.port_enter(team, port); - if (err) { - netdev_err(team->dev, "Device %s failed to enter team mode\n", - port->dev->name); - goto err_port_enter; - } - } - - return 0; - -err_port_enter: - port->dev->priv_flags &= ~IFF_TEAM_PORT; - dev_put(team->dev); - - return err; -} - -static void team_port_leave(struct team *team, struct team_port *port) -{ - if (team->ops.port_leave) - team->ops.port_leave(team, port); - port->dev->priv_flags &= ~IFF_TEAM_PORT; - dev_put(team->dev); -} - -static void __team_port_change_check(struct team_port *port, bool linkup); - -static int team_port_add(struct team *team, struct net_device *port_dev) -{ - struct net_device *dev = team->dev; - struct team_port *port; - char *portname = port_dev->name; - int err; - - if (port_dev->flags & IFF_LOOPBACK || - port_dev->type != ARPHRD_ETHER) { - netdev_err(dev, "Device %s is of an unsupported type\n", - portname); - return -EINVAL; - } - - if (team_port_exists(port_dev)) { - netdev_err(dev, "Device %s is already a port " - "of a team device\n", portname); - return -EBUSY; - } - - if (port_dev->flags & IFF_UP) { - netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n", - portname); - return -EBUSY; - } - - port = kzalloc(sizeof(struct team_port), GFP_KERNEL); - if (!port) - return -ENOMEM; - - port->dev = port_dev; - port->team = team; - - port->orig.mtu = port_dev->mtu; - err = dev_set_mtu(port_dev, dev->mtu); - if (err) { - netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err); - goto err_set_mtu; - } - - memcpy(port->orig.dev_addr, port_dev->dev_addr, ETH_ALEN); - - err = team_port_enter(team, port); - if (err) { - netdev_err(dev, "Device %s failed to enter team mode\n", - portname); - goto err_port_enter; - } - - err = dev_open(port_dev); - if (err) { - netdev_dbg(dev, "Device %s opening failed\n", - portname); - goto err_dev_open; - } - - err = vlan_vids_add_by_dev(port_dev, dev); - if (err) { - netdev_err(dev, "Failed to add vlan ids to device %s\n", - portname); - goto err_vids_add; - } - - err = netdev_set_master(port_dev, dev); - if (err) { - netdev_err(dev, "Device %s failed to set master\n", portname); - goto err_set_master; - } - - err = netdev_rx_handler_register(port_dev, team_handle_frame, - port); - if (err) { - netdev_err(dev, "Device %s failed to register rx_handler\n", - portname); - goto err_handler_register; - } - - team_port_list_add_port(team, port); - team_adjust_ops(team); - __team_compute_features(team); - __team_port_change_check(port, !!netif_carrier_ok(port_dev)); - - netdev_info(dev, "Port device %s added\n", portname); - - return 0; - -err_handler_register: - netdev_set_master(port_dev, NULL); - -err_set_master: - vlan_vids_del_by_dev(port_dev, dev); - -err_vids_add: - dev_close(port_dev); - -err_dev_open: - team_port_leave(team, port); - team_port_set_orig_mac(port); - -err_port_enter: - dev_set_mtu(port_dev, port->orig.mtu); - -err_set_mtu: - kfree(port); - - return err; -} - -static int team_port_del(struct team *team, struct net_device *port_dev) -{ - struct net_device *dev = team->dev; - struct team_port *port; - char *portname = port_dev->name; - - port = team_port_get_rtnl(port_dev); - if (!port || !team_port_find(team, port)) { - netdev_err(dev, "Device %s does not act as a port of this team\n", - portname); - return -ENOENT; - } - - __team_port_change_check(port, false); - team_port_list_del_port(team, port); - team_adjust_ops(team); - netdev_rx_handler_unregister(port_dev); - netdev_set_master(port_dev, NULL); - vlan_vids_del_by_dev(port_dev, dev); - dev_close(port_dev); - team_port_leave(team, port); - team_port_set_orig_mac(port); - dev_set_mtu(port_dev, port->orig.mtu); - synchronize_rcu(); - kfree(port); - netdev_info(dev, "Port device %s removed\n", portname); - __team_compute_features(team); - - return 0; -} - - -/***************** - * Net device ops - *****************/ - -static const char team_no_mode_kind[] = "*NOMODE*"; - -static int team_mode_option_get(struct team *team, void *arg) -{ - const char **str = arg; - - *str = team->mode ? team->mode->kind : team_no_mode_kind; - return 0; -} - -static int team_mode_option_set(struct team *team, void *arg) -{ - const char **str = arg; - - return team_change_mode(team, *str); -} - -static const struct team_option team_options[] = { - { - .name = "mode", - .type = TEAM_OPTION_TYPE_STRING, - .getter = team_mode_option_get, - .setter = team_mode_option_set, - }, -}; - -static int team_init(struct net_device *dev) -{ - struct team *team = netdev_priv(dev); - int i; - int err; - - team->dev = dev; - mutex_init(&team->lock); - - team->pcpu_stats = alloc_percpu(struct team_pcpu_stats); - if (!team->pcpu_stats) - return -ENOMEM; - - for (i = 0; i < TEAM_PORT_HASHENTRIES; i++) - INIT_HLIST_HEAD(&team->port_hlist[i]); - INIT_LIST_HEAD(&team->port_list); - - team_adjust_ops(team); - - INIT_LIST_HEAD(&team->option_list); - err = team_options_register(team, team_options, ARRAY_SIZE(team_options)); - if (err) - goto err_options_register; - netif_carrier_off(dev); - - return 0; - -err_options_register: - free_percpu(team->pcpu_stats); - - return err; -} - -static void team_uninit(struct net_device *dev) -{ - struct team *team = netdev_priv(dev); - struct team_port *port; - struct team_port *tmp; - - mutex_lock(&team->lock); - list_for_each_entry_safe(port, tmp, &team->port_list, list) - team_port_del(team, port->dev); - - __team_change_mode(team, NULL); /* cleanup */ - __team_options_unregister(team, team_options, ARRAY_SIZE(team_options)); - mutex_unlock(&team->lock); -} - -static void team_destructor(struct net_device *dev) -{ - struct team *team = netdev_priv(dev); - - free_percpu(team->pcpu_stats); - free_netdev(dev); -} - -static int team_open(struct net_device *dev) -{ - netif_carrier_on(dev); - return 0; -} - -static int team_close(struct net_device *dev) -{ - netif_carrier_off(dev); - return 0; -} - -/* - * note: already called with rcu_read_lock - */ -static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct team *team = netdev_priv(dev); - bool tx_success = false; - unsigned int len = skb->len; - - tx_success = team->ops.transmit(team, skb); - if (tx_success) { - struct team_pcpu_stats *pcpu_stats; - - pcpu_stats = this_cpu_ptr(team->pcpu_stats); - u64_stats_update_begin(&pcpu_stats->syncp); - pcpu_stats->tx_packets++; - pcpu_stats->tx_bytes += len; - u64_stats_update_end(&pcpu_stats->syncp); - } else { - this_cpu_inc(team->pcpu_stats->tx_dropped); - } - - return NETDEV_TX_OK; -} - -static void team_change_rx_flags(struct net_device *dev, int change) -{ - struct team *team = netdev_priv(dev); - struct team_port *port; - int inc; - - rcu_read_lock(); - list_for_each_entry_rcu(port, &team->port_list, list) { - if (change & IFF_PROMISC) { - inc = dev->flags & IFF_PROMISC ? 1 : -1; - dev_set_promiscuity(port->dev, inc); - } - if (change & IFF_ALLMULTI) { - inc = dev->flags & IFF_ALLMULTI ? 1 : -1; - dev_set_allmulti(port->dev, inc); - } - } - rcu_read_unlock(); -} - -static void team_set_rx_mode(struct net_device *dev) -{ - struct team *team = netdev_priv(dev); - struct team_port *port; - - rcu_read_lock(); - list_for_each_entry_rcu(port, &team->port_list, list) { - dev_uc_sync(port->dev, dev); - dev_mc_sync(port->dev, dev); - } - rcu_read_unlock(); -} - -static int team_set_mac_address(struct net_device *dev, void *p) -{ - struct team *team = netdev_priv(dev); - struct team_port *port; - struct sockaddr *addr = p; - - memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); - rcu_read_lock(); - list_for_each_entry_rcu(port, &team->port_list, list) - if (team->ops.port_change_mac) - team->ops.port_change_mac(team, port); - rcu_read_unlock(); - return 0; -} - -static int team_change_mtu(struct net_device *dev, int new_mtu) -{ - struct team *team = netdev_priv(dev); - struct team_port *port; - int err; - - /* - * Alhough this is reader, it's guarded by team lock. It's not possible - * to traverse list in reverse under rcu_read_lock - */ - mutex_lock(&team->lock); - list_for_each_entry(port, &team->port_list, list) { - err = dev_set_mtu(port->dev, new_mtu); - if (err) { - netdev_err(dev, "Device %s failed to change mtu", - port->dev->name); - goto unwind; - } - } - mutex_unlock(&team->lock); - - dev->mtu = new_mtu; - - return 0; - -unwind: - list_for_each_entry_continue_reverse(port, &team->port_list, list) - dev_set_mtu(port->dev, dev->mtu); - mutex_unlock(&team->lock); - - return err; -} - -static struct rtnl_link_stats64 * -team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) -{ - struct team *team = netdev_priv(dev); - struct team_pcpu_stats *p; - u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes; - u32 rx_dropped = 0, tx_dropped = 0; - unsigned int start; - int i; - - for_each_possible_cpu(i) { - p = per_cpu_ptr(team->pcpu_stats, i); - do { - start = u64_stats_fetch_begin_bh(&p->syncp); - rx_packets = p->rx_packets; - rx_bytes = p->rx_bytes; - rx_multicast = p->rx_multicast; - tx_packets = p->tx_packets; - tx_bytes = p->tx_bytes; - } while (u64_stats_fetch_retry_bh(&p->syncp, start)); - - stats->rx_packets += rx_packets; - stats->rx_bytes += rx_bytes; - stats->multicast += rx_multicast; - stats->tx_packets += tx_packets; - stats->tx_bytes += tx_bytes; - /* - * rx_dropped & tx_dropped are u32, updated - * without syncp protection. - */ - rx_dropped += p->rx_dropped; - tx_dropped += p->tx_dropped; - } - stats->rx_dropped = rx_dropped; - stats->tx_dropped = tx_dropped; - return stats; -} - -static int team_vlan_rx_add_vid(struct net_device *dev, uint16_t vid) -{ - struct team *team = netdev_priv(dev); - struct team_port *port; - int err; - - /* - * Alhough this is reader, it's guarded by team lock. It's not possible - * to traverse list in reverse under rcu_read_lock - */ - mutex_lock(&team->lock); - list_for_each_entry(port, &team->port_list, list) { - err = vlan_vid_add(port->dev, vid); - if (err) - goto unwind; - } - mutex_unlock(&team->lock); - - return 0; - -unwind: - list_for_each_entry_continue_reverse(port, &team->port_list, list) - vlan_vid_del(port->dev, vid); - mutex_unlock(&team->lock); - - return err; -} - -static int team_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid) -{ - struct team *team = netdev_priv(dev); - struct team_port *port; - - rcu_read_lock(); - list_for_each_entry_rcu(port, &team->port_list, list) - vlan_vid_del(port->dev, vid); - rcu_read_unlock(); - - return 0; -} - -static int team_add_slave(struct net_device *dev, struct net_device *port_dev) -{ - struct team *team = netdev_priv(dev); - int err; - - mutex_lock(&team->lock); - err = team_port_add(team, port_dev); - mutex_unlock(&team->lock); - return err; -} - -static int team_del_slave(struct net_device *dev, struct net_device *port_dev) -{ - struct team *team = netdev_priv(dev); - int err; - - mutex_lock(&team->lock); - err = team_port_del(team, port_dev); - mutex_unlock(&team->lock); - return err; -} - -static netdev_features_t team_fix_features(struct net_device *dev, - netdev_features_t features) -{ - struct team_port *port; - struct team *team = netdev_priv(dev); - netdev_features_t mask; - - mask = features; - features &= ~NETIF_F_ONE_FOR_ALL; - features |= NETIF_F_ALL_FOR_ALL; - - rcu_read_lock(); - list_for_each_entry_rcu(port, &team->port_list, list) { - features = netdev_increment_features(features, - port->dev->features, - mask); - } - rcu_read_unlock(); - return features; -} - -static const struct net_device_ops team_netdev_ops = { - .ndo_init = team_init, - .ndo_uninit = team_uninit, - .ndo_open = team_open, - .ndo_stop = team_close, - .ndo_start_xmit = team_xmit, - .ndo_change_rx_flags = team_change_rx_flags, - .ndo_set_rx_mode = team_set_rx_mode, - .ndo_set_mac_address = team_set_mac_address, - .ndo_change_mtu = team_change_mtu, - .ndo_get_stats64 = team_get_stats64, - .ndo_vlan_rx_add_vid = team_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = team_vlan_rx_kill_vid, - .ndo_add_slave = team_add_slave, - .ndo_del_slave = team_del_slave, - .ndo_fix_features = team_fix_features, -}; - - -/*********************** - * rt netlink interface - ***********************/ - -static void team_setup(struct net_device *dev) -{ - ether_setup(dev); - - dev->netdev_ops = &team_netdev_ops; - dev->destructor = team_destructor; - dev->tx_queue_len = 0; - dev->flags |= IFF_MULTICAST; - dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); - - /* - * Indicate we support unicast address filtering. That way core won't - * bring us to promisc mode in case a unicast addr is added. - * Let this up to underlay drivers. - */ - dev->priv_flags |= IFF_UNICAST_FLT; - - dev->features |= NETIF_F_LLTX; - dev->features |= NETIF_F_GRO; - dev->hw_features = NETIF_F_HW_VLAN_TX | - NETIF_F_HW_VLAN_RX | - NETIF_F_HW_VLAN_FILTER; - - dev->features |= dev->hw_features; -} - -static int team_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[]) -{ - int err; - - if (tb[IFLA_ADDRESS] == NULL) - random_ether_addr(dev->dev_addr); - - err = register_netdevice(dev); - if (err) - return err; - - return 0; -} - -static int team_validate(struct nlattr *tb[], struct nlattr *data[]) -{ - if (tb[IFLA_ADDRESS]) { - if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) - return -EINVAL; - if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) - return -EADDRNOTAVAIL; - } - return 0; -} - -static struct rtnl_link_ops team_link_ops __read_mostly = { - .kind = DRV_NAME, - .priv_size = sizeof(struct team), - .setup = team_setup, - .newlink = team_newlink, - .validate = team_validate, -}; - - -/*********************************** - * Generic netlink custom interface - ***********************************/ - -static struct genl_family team_nl_family = { - .id = GENL_ID_GENERATE, - .name = TEAM_GENL_NAME, - .version = TEAM_GENL_VERSION, - .maxattr = TEAM_ATTR_MAX, - .netnsok = true, -}; - -static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = { - [TEAM_ATTR_UNSPEC] = { .type = NLA_UNSPEC, }, - [TEAM_ATTR_TEAM_IFINDEX] = { .type = NLA_U32 }, - [TEAM_ATTR_LIST_OPTION] = { .type = NLA_NESTED }, - [TEAM_ATTR_LIST_PORT] = { .type = NLA_NESTED }, -}; - -static const struct nla_policy -team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = { - [TEAM_ATTR_OPTION_UNSPEC] = { .type = NLA_UNSPEC, }, - [TEAM_ATTR_OPTION_NAME] = { - .type = NLA_STRING, - .len = TEAM_STRING_MAX_LEN, - }, - [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG }, - [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 }, - [TEAM_ATTR_OPTION_DATA] = { - .type = NLA_BINARY, - .len = TEAM_STRING_MAX_LEN, - }, -}; - -static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info) -{ - struct sk_buff *msg; - void *hdr; - int err; - - msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); - if (!msg) - return -ENOMEM; - - hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, - &team_nl_family, 0, TEAM_CMD_NOOP); - if (IS_ERR(hdr)) { - err = PTR_ERR(hdr); - goto err_msg_put; - } - - genlmsg_end(msg, hdr); - - return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid); - -err_msg_put: - nlmsg_free(msg); - - return err; -} - -/* - * Netlink cmd functions should be locked by following two functions. - * Since dev gets held here, that ensures dev won't disappear in between. - */ -static struct team *team_nl_team_get(struct genl_info *info) -{ - struct net *net = genl_info_net(info); - int ifindex; - struct net_device *dev; - struct team *team; - - if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX]) - return NULL; - - ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]); - dev = dev_get_by_index(net, ifindex); - if (!dev || dev->netdev_ops != &team_netdev_ops) { - if (dev) - dev_put(dev); - return NULL; - } - - team = netdev_priv(dev); - mutex_lock(&team->lock); - return team; -} - -static void team_nl_team_put(struct team *team) -{ - mutex_unlock(&team->lock); - dev_put(team->dev); -} - -static int team_nl_send_generic(struct genl_info *info, struct team *team, - int (*fill_func)(struct sk_buff *skb, - struct genl_info *info, - int flags, struct team *team)) -{ - struct sk_buff *skb; - int err; - - skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); - if (!skb) - return -ENOMEM; - - err = fill_func(skb, info, NLM_F_ACK, team); - if (err < 0) - goto err_fill; - - err = genlmsg_unicast(genl_info_net(info), skb, info->snd_pid); - return err; - -err_fill: - nlmsg_free(skb); - return err; -} - -static int team_nl_fill_options_get_changed(struct sk_buff *skb, - u32 pid, u32 seq, int flags, - struct team *team, - struct team_option *changed_option) -{ - struct nlattr *option_list; - void *hdr; - struct team_option *option; - - hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags, - TEAM_CMD_OPTIONS_GET); - if (IS_ERR(hdr)) - return PTR_ERR(hdr); - - NLA_PUT_U32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex); - option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION); - if (!option_list) - return -EMSGSIZE; - - list_for_each_entry(option, &team->option_list, list) { - struct nlattr *option_item; - long arg; - - option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION); - if (!option_item) - goto nla_put_failure; - NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_NAME, option->name); - if (option == changed_option) - NLA_PUT_FLAG(skb, TEAM_ATTR_OPTION_CHANGED); - switch (option->type) { - case TEAM_OPTION_TYPE_U32: - NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32); - team_option_get(team, option, &arg); - NLA_PUT_U32(skb, TEAM_ATTR_OPTION_DATA, arg); - break; - case TEAM_OPTION_TYPE_STRING: - NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING); - team_option_get(team, option, &arg); - NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_DATA, - (char *) arg); - break; - default: - BUG(); - } - nla_nest_end(skb, option_item); - } - - nla_nest_end(skb, option_list); - return genlmsg_end(skb, hdr); - -nla_put_failure: - genlmsg_cancel(skb, hdr); - return -EMSGSIZE; -} - -static int team_nl_fill_options_get(struct sk_buff *skb, - struct genl_info *info, int flags, - struct team *team) -{ - return team_nl_fill_options_get_changed(skb, info->snd_pid, - info->snd_seq, NLM_F_ACK, - team, NULL); -} - -static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info) -{ - struct team *team; - int err; - - team = team_nl_team_get(info); - if (!team) - return -EINVAL; - - err = team_nl_send_generic(info, team, team_nl_fill_options_get); - - team_nl_team_put(team); - - return err; -} - -static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) -{ - struct team *team; - int err = 0; - int i; - struct nlattr *nl_option; - - team = team_nl_team_get(info); - if (!team) - return -EINVAL; - - err = -EINVAL; - if (!info->attrs[TEAM_ATTR_LIST_OPTION]) { - err = -EINVAL; - goto team_put; - } - - nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) { - struct nlattr *mode_attrs[TEAM_ATTR_OPTION_MAX + 1]; - enum team_option_type opt_type; - struct team_option *option; - char *opt_name; - bool opt_found = false; - - if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) { - err = -EINVAL; - goto team_put; - } - err = nla_parse_nested(mode_attrs, TEAM_ATTR_OPTION_MAX, - nl_option, team_nl_option_policy); - if (err) - goto team_put; - if (!mode_attrs[TEAM_ATTR_OPTION_NAME] || - !mode_attrs[TEAM_ATTR_OPTION_TYPE] || - !mode_attrs[TEAM_ATTR_OPTION_DATA]) { - err = -EINVAL; - goto team_put; - } - switch (nla_get_u8(mode_attrs[TEAM_ATTR_OPTION_TYPE])) { - case NLA_U32: - opt_type = TEAM_OPTION_TYPE_U32; - break; - case NLA_STRING: - opt_type = TEAM_OPTION_TYPE_STRING; - break; - default: - goto team_put; - } - - opt_name = nla_data(mode_attrs[TEAM_ATTR_OPTION_NAME]); - list_for_each_entry(option, &team->option_list, list) { - long arg; - struct nlattr *opt_data_attr; - - if (option->type != opt_type || - strcmp(option->name, opt_name)) - continue; - opt_found = true; - opt_data_attr = mode_attrs[TEAM_ATTR_OPTION_DATA]; - switch (opt_type) { - case TEAM_OPTION_TYPE_U32: - arg = nla_get_u32(opt_data_attr); - break; - case TEAM_OPTION_TYPE_STRING: - arg = (long) nla_data(opt_data_attr); - break; - default: - BUG(); - } - err = team_option_set(team, option, &arg); - if (err) - goto team_put; - } - if (!opt_found) { - err = -ENOENT; - goto team_put; - } - } - -team_put: - team_nl_team_put(team); - - return err; -} - -static int team_nl_fill_port_list_get_changed(struct sk_buff *skb, - u32 pid, u32 seq, int flags, - struct team *team, - struct team_port *changed_port) -{ - struct nlattr *port_list; - void *hdr; - struct team_port *port; - - hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags, - TEAM_CMD_PORT_LIST_GET); - if (IS_ERR(hdr)) - return PTR_ERR(hdr); - - NLA_PUT_U32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex); - port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT); - if (!port_list) - return -EMSGSIZE; - - list_for_each_entry(port, &team->port_list, list) { - struct nlattr *port_item; - - port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT); - if (!port_item) - goto nla_put_failure; - NLA_PUT_U32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex); - if (port == changed_port) - NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_CHANGED); - if (port->linkup) - NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_LINKUP); - NLA_PUT_U32(skb, TEAM_ATTR_PORT_SPEED, port->speed); - NLA_PUT_U8(skb, TEAM_ATTR_PORT_DUPLEX, port->duplex); - nla_nest_end(skb, port_item); - } - - nla_nest_end(skb, port_list); - return genlmsg_end(skb, hdr); - -nla_put_failure: - genlmsg_cancel(skb, hdr); - return -EMSGSIZE; -} - -static int team_nl_fill_port_list_get(struct sk_buff *skb, - struct genl_info *info, int flags, - struct team *team) -{ - return team_nl_fill_port_list_get_changed(skb, info->snd_pid, - info->snd_seq, NLM_F_ACK, - team, NULL); -} - -static int team_nl_cmd_port_list_get(struct sk_buff *skb, - struct genl_info *info) -{ - struct team *team; - int err; - - team = team_nl_team_get(info); - if (!team) - return -EINVAL; - - err = team_nl_send_generic(info, team, team_nl_fill_port_list_get); - - team_nl_team_put(team); - - return err; -} - -static struct genl_ops team_nl_ops[] = { - { - .cmd = TEAM_CMD_NOOP, - .doit = team_nl_cmd_noop, - .policy = team_nl_policy, - }, - { - .cmd = TEAM_CMD_OPTIONS_SET, - .doit = team_nl_cmd_options_set, - .policy = team_nl_policy, - .flags = GENL_ADMIN_PERM, - }, - { - .cmd = TEAM_CMD_OPTIONS_GET, - .doit = team_nl_cmd_options_get, - .policy = team_nl_policy, - .flags = GENL_ADMIN_PERM, - }, - { - .cmd = TEAM_CMD_PORT_LIST_GET, - .doit = team_nl_cmd_port_list_get, - .policy = team_nl_policy, - .flags = GENL_ADMIN_PERM, - }, -}; - -static struct genl_multicast_group team_change_event_mcgrp = { - .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, -}; - -static int team_nl_send_event_options_get(struct team *team, - struct team_option *changed_option) -{ - struct sk_buff *skb; - int err; - struct net *net = dev_net(team->dev); - - skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); - if (!skb) - return -ENOMEM; - - err = team_nl_fill_options_get_changed(skb, 0, 0, 0, team, - changed_option); - if (err < 0) - goto err_fill; - - err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id, - GFP_KERNEL); - return err; - -err_fill: - nlmsg_free(skb); - return err; -} - -static int team_nl_send_event_port_list_get(struct team_port *port) -{ - struct sk_buff *skb; - int err; - struct net *net = dev_net(port->team->dev); - - skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); - if (!skb) - return -ENOMEM; - - err = team_nl_fill_port_list_get_changed(skb, 0, 0, 0, - port->team, port); - if (err < 0) - goto err_fill; - - err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id, - GFP_KERNEL); - return err; - -err_fill: - nlmsg_free(skb); - return err; -} - -static int team_nl_init(void) -{ - int err; - - err = genl_register_family_with_ops(&team_nl_family, team_nl_ops, - ARRAY_SIZE(team_nl_ops)); - if (err) - return err; - - err = genl_register_mc_group(&team_nl_family, &team_change_event_mcgrp); - if (err) - goto err_change_event_grp_reg; - - return 0; - -err_change_event_grp_reg: - genl_unregister_family(&team_nl_family); - - return err; -} - -static void team_nl_fini(void) -{ - genl_unregister_family(&team_nl_family); -} - - -/****************** - * Change checkers - ******************/ - -static void __team_options_change_check(struct team *team, - struct team_option *changed_option) -{ - int err; - - err = team_nl_send_event_options_get(team, changed_option); - if (err) - netdev_warn(team->dev, "Failed to send options change via netlink\n"); -} - -/* rtnl lock is held */ -static void __team_port_change_check(struct team_port *port, bool linkup) -{ - int err; - - if (port->linkup == linkup) - return; - - port->linkup = linkup; - if (linkup) { - struct ethtool_cmd ecmd; - - err = __ethtool_get_settings(port->dev, &ecmd); - if (!err) { - port->speed = ethtool_cmd_speed(&ecmd); - port->duplex = ecmd.duplex; - goto send_event; - } - } - port->speed = 0; - port->duplex = 0; - -send_event: - err = team_nl_send_event_port_list_get(port); - if (err) - netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink\n", - port->dev->name); - -} - -static void team_port_change_check(struct team_port *port, bool linkup) -{ - struct team *team = port->team; - - mutex_lock(&team->lock); - __team_port_change_check(port, linkup); - mutex_unlock(&team->lock); -} - -/************************************ - * Net device notifier event handler - ************************************/ - -static int team_device_event(struct notifier_block *unused, - unsigned long event, void *ptr) -{ - struct net_device *dev = (struct net_device *) ptr; - struct team_port *port; - - port = team_port_get_rtnl(dev); - if (!port) - return NOTIFY_DONE; - - switch (event) { - case NETDEV_UP: - if (netif_carrier_ok(dev)) - team_port_change_check(port, true); - case NETDEV_DOWN: - team_port_change_check(port, false); - case NETDEV_CHANGE: - if (netif_running(port->dev)) - team_port_change_check(port, - !!netif_carrier_ok(port->dev)); - break; - case NETDEV_UNREGISTER: - team_del_slave(port->team->dev, dev); - break; - case NETDEV_FEAT_CHANGE: - team_compute_features(port->team); - break; - case NETDEV_CHANGEMTU: - /* Forbid to change mtu of underlaying device */ - return NOTIFY_BAD; - case NETDEV_PRE_TYPE_CHANGE: - /* Forbid to change type of underlaying device */ - return NOTIFY_BAD; - } - return NOTIFY_DONE; -} - -static struct notifier_block team_notifier_block __read_mostly = { - .notifier_call = team_device_event, -}; - - -/*********************** - * Module init and exit - ***********************/ - -static int __init team_module_init(void) -{ - int err; - - register_netdevice_notifier(&team_notifier_block); - - err = rtnl_link_register(&team_link_ops); - if (err) - goto err_rtnl_reg; - - err = team_nl_init(); - if (err) - goto err_nl_init; - - return 0; - -err_nl_init: - rtnl_link_unregister(&team_link_ops); - -err_rtnl_reg: - unregister_netdevice_notifier(&team_notifier_block); - - return err; -} - -static void __exit team_module_exit(void) -{ - team_nl_fini(); - rtnl_link_unregister(&team_link_ops); - unregister_netdevice_notifier(&team_notifier_block); -} - -module_init(team_module_init); -module_exit(team_module_exit); - -MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Jiri Pirko "); -MODULE_DESCRIPTION("Ethernet team device driver"); -MODULE_ALIAS_RTNL_LINK(DRV_NAME); diff --git a/trunk/drivers/net/team/team_mode_activebackup.c b/trunk/drivers/net/team/team_mode_activebackup.c deleted file mode 100644 index f4d960e82e29..000000000000 --- a/trunk/drivers/net/team/team_mode_activebackup.c +++ /dev/null @@ -1,136 +0,0 @@ -/* - * net/drivers/team/team_mode_activebackup.c - Active-backup mode for team - * Copyright (c) 2011 Jiri Pirko - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -struct ab_priv { - struct team_port __rcu *active_port; -}; - -static struct ab_priv *ab_priv(struct team *team) -{ - return (struct ab_priv *) &team->mode_priv; -} - -static rx_handler_result_t ab_receive(struct team *team, struct team_port *port, - struct sk_buff *skb) { - struct team_port *active_port; - - active_port = rcu_dereference(ab_priv(team)->active_port); - if (active_port != port) - return RX_HANDLER_EXACT; - return RX_HANDLER_ANOTHER; -} - -static bool ab_transmit(struct team *team, struct sk_buff *skb) -{ - struct team_port *active_port; - - active_port = rcu_dereference(ab_priv(team)->active_port); - if (unlikely(!active_port)) - goto drop; - skb->dev = active_port->dev; - if (dev_queue_xmit(skb)) - return false; - return true; - -drop: - dev_kfree_skb_any(skb); - return false; -} - -static void ab_port_leave(struct team *team, struct team_port *port) -{ - if (ab_priv(team)->active_port == port) - RCU_INIT_POINTER(ab_priv(team)->active_port, NULL); -} - -static int ab_active_port_get(struct team *team, void *arg) -{ - u32 *ifindex = arg; - - *ifindex = 0; - if (ab_priv(team)->active_port) - *ifindex = ab_priv(team)->active_port->dev->ifindex; - return 0; -} - -static int ab_active_port_set(struct team *team, void *arg) -{ - u32 *ifindex = arg; - struct team_port *port; - - list_for_each_entry_rcu(port, &team->port_list, list) { - if (port->dev->ifindex == *ifindex) { - rcu_assign_pointer(ab_priv(team)->active_port, port); - return 0; - } - } - return -ENOENT; -} - -static const struct team_option ab_options[] = { - { - .name = "activeport", - .type = TEAM_OPTION_TYPE_U32, - .getter = ab_active_port_get, - .setter = ab_active_port_set, - }, -}; - -int ab_init(struct team *team) -{ - return team_options_register(team, ab_options, ARRAY_SIZE(ab_options)); -} - -void ab_exit(struct team *team) -{ - team_options_unregister(team, ab_options, ARRAY_SIZE(ab_options)); -} - -static const struct team_mode_ops ab_mode_ops = { - .init = ab_init, - .exit = ab_exit, - .receive = ab_receive, - .transmit = ab_transmit, - .port_leave = ab_port_leave, -}; - -static struct team_mode ab_mode = { - .kind = "activebackup", - .owner = THIS_MODULE, - .priv_size = sizeof(struct ab_priv), - .ops = &ab_mode_ops, -}; - -static int __init ab_init_module(void) -{ - return team_mode_register(&ab_mode); -} - -static void __exit ab_cleanup_module(void) -{ - team_mode_unregister(&ab_mode); -} - -module_init(ab_init_module); -module_exit(ab_cleanup_module); - -MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Jiri Pirko "); -MODULE_DESCRIPTION("Active-backup mode for team"); -MODULE_ALIAS("team-mode-activebackup"); diff --git a/trunk/drivers/net/team/team_mode_roundrobin.c b/trunk/drivers/net/team/team_mode_roundrobin.c deleted file mode 100644 index a0e8f806331a..000000000000 --- a/trunk/drivers/net/team/team_mode_roundrobin.c +++ /dev/null @@ -1,107 +0,0 @@ -/* - * net/drivers/team/team_mode_roundrobin.c - Round-robin mode for team - * Copyright (c) 2011 Jiri Pirko - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include - -struct rr_priv { - unsigned int sent_packets; -}; - -static struct rr_priv *rr_priv(struct team *team) -{ - return (struct rr_priv *) &team->mode_priv; -} - -static struct team_port *__get_first_port_up(struct team *team, - struct team_port *port) -{ - struct team_port *cur; - - if (port->linkup) - return port; - cur = port; - list_for_each_entry_continue_rcu(cur, &team->port_list, list) - if (cur->linkup) - return cur; - list_for_each_entry_rcu(cur, &team->port_list, list) { - if (cur == port) - break; - if (cur->linkup) - return cur; - } - return NULL; -} - -static bool rr_transmit(struct team *team, struct sk_buff *skb) -{ - struct team_port *port; - int port_index; - - port_index = rr_priv(team)->sent_packets++ % team->port_count; - port = team_get_port_by_index_rcu(team, port_index); - port = __get_first_port_up(team, port); - if (unlikely(!port)) - goto drop; - skb->dev = port->dev; - if (dev_queue_xmit(skb)) - return false; - return true; - -drop: - dev_kfree_skb_any(skb); - return false; -} - -static int rr_port_enter(struct team *team, struct team_port *port) -{ - return team_port_set_team_mac(port); -} - -static void rr_port_change_mac(struct team *team, struct team_port *port) -{ - team_port_set_team_mac(port); -} - -static const struct team_mode_ops rr_mode_ops = { - .transmit = rr_transmit, - .port_enter = rr_port_enter, - .port_change_mac = rr_port_change_mac, -}; - -static struct team_mode rr_mode = { - .kind = "roundrobin", - .owner = THIS_MODULE, - .priv_size = sizeof(struct rr_priv), - .ops = &rr_mode_ops, -}; - -static int __init rr_init_module(void) -{ - return team_mode_register(&rr_mode); -} - -static void __exit rr_cleanup_module(void) -{ - team_mode_unregister(&rr_mode); -} - -module_init(rr_init_module); -module_exit(rr_cleanup_module); - -MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Jiri Pirko "); -MODULE_DESCRIPTION("Round-robin mode for team"); -MODULE_ALIAS("team-mode-roundrobin"); diff --git a/trunk/drivers/net/tun.c b/trunk/drivers/net/tun.c index 93c5d72711b0..7bea9c65119e 100644 --- a/trunk/drivers/net/tun.c +++ b/trunk/drivers/net/tun.c @@ -123,7 +123,7 @@ struct tun_struct { gid_t group; struct net_device *dev; - netdev_features_t set_features; + u32 set_features; #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ NETIF_F_TSO6|NETIF_F_UFO) struct fasync_struct *fasync; @@ -454,8 +454,7 @@ tun_net_change_mtu(struct net_device *dev, int new_mtu) return 0; } -static netdev_features_t tun_net_fix_features(struct net_device *dev, - netdev_features_t features) +static u32 tun_net_fix_features(struct net_device *dev, u32 features) { struct tun_struct *tun = netdev_priv(dev); @@ -1197,7 +1196,7 @@ static int tun_get_iff(struct net *net, struct tun_struct *tun, * privs required. */ static int set_offload(struct tun_struct *tun, unsigned long arg) { - netdev_features_t features = 0; + u32 features = 0; if (arg & TUN_F_CSUM) { features |= NETIF_F_HW_CSUM; @@ -1590,15 +1589,16 @@ static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info { struct tun_struct *tun = netdev_priv(dev); - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + strcpy(info->fw_version, "N/A"); switch (tun->flags & TUN_TYPE_MASK) { case TUN_TUN_DEV: - strlcpy(info->bus_info, "tun", sizeof(info->bus_info)); + strcpy(info->bus_info, "tun"); break; case TUN_TAP_DEV: - strlcpy(info->bus_info, "tap", sizeof(info->bus_info)); + strcpy(info->bus_info, "tap"); break; } } diff --git a/trunk/drivers/net/usb/asix.c b/trunk/drivers/net/usb/asix.c index dbdca225b846..e95f0e60a9bc 100644 --- a/trunk/drivers/net/usb/asix.c +++ b/trunk/drivers/net/usb/asix.c @@ -36,7 +36,7 @@ #include #include -#define DRIVER_VERSION "22-Dec-2011" +#define DRIVER_VERSION "08-Nov-2011" #define DRIVER_NAME "asix" /* ASIX AX8817X based USB 2.0 Ethernet Devices */ @@ -689,10 +689,6 @@ asix_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo) } wolinfo->supported = WAKE_PHY | WAKE_MAGIC; wolinfo->wolopts = 0; - if (opt & AX_MONITOR_LINK) - wolinfo->wolopts |= WAKE_PHY; - if (opt & AX_MONITOR_MAGIC) - wolinfo->wolopts |= WAKE_MAGIC; } static int diff --git a/trunk/drivers/net/usb/cdc-phonet.c b/trunk/drivers/net/usb/cdc-phonet.c index 331e44056f5a..a60d0069cc45 100644 --- a/trunk/drivers/net/usb/cdc-phonet.c +++ b/trunk/drivers/net/usb/cdc-phonet.c @@ -130,7 +130,7 @@ static int rx_submit(struct usbpn_dev *pnd, struct urb *req, gfp_t gfp_flags) struct page *page; int err; - page = alloc_page(gfp_flags); + page = __netdev_alloc_page(dev, gfp_flags); if (!page) return -ENOMEM; @@ -140,7 +140,7 @@ static int rx_submit(struct usbpn_dev *pnd, struct urb *req, gfp_t gfp_flags) err = usb_submit_urb(req, gfp_flags); if (unlikely(err)) { dev_dbg(&dev->dev, "RX submit error (%d)\n", err); - put_page(page); + netdev_free_page(dev, page); } return err; } @@ -208,9 +208,9 @@ static void rx_complete(struct urb *req) dev->stats.rx_errors++; resubmit: if (page) - put_page(page); + netdev_free_page(dev, page); if (req) - rx_submit(pnd, req, GFP_ATOMIC | __GFP_COLD); + rx_submit(pnd, req, GFP_ATOMIC); } static int usbpn_close(struct net_device *dev); @@ -229,7 +229,7 @@ static int usbpn_open(struct net_device *dev) for (i = 0; i < rxq_size; i++) { struct urb *req = usb_alloc_urb(0, GFP_KERNEL); - if (!req || rx_submit(pnd, req, GFP_KERNEL | __GFP_COLD)) { + if (!req || rx_submit(pnd, req, GFP_KERNEL)) { usbpn_close(dev); return -ENOMEM; } diff --git a/trunk/drivers/net/usb/cdc_ncm.c b/trunk/drivers/net/usb/cdc_ncm.c index 009dd0f18535..f06fb78383a1 100644 --- a/trunk/drivers/net/usb/cdc_ncm.c +++ b/trunk/drivers/net/usb/cdc_ncm.c @@ -465,10 +465,12 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf) int temp; u8 iface_no; - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); if (ctx == NULL) return -ENODEV; + memset(ctx, 0, sizeof(*ctx)); + init_timer(&ctx->tx_timer); spin_lock_init(&ctx->mtx); ctx->netdev = dev->net; diff --git a/trunk/drivers/net/usb/pegasus.c b/trunk/drivers/net/usb/pegasus.c index 5d99b8cacd7d..769f5090bda1 100644 --- a/trunk/drivers/net/usb/pegasus.c +++ b/trunk/drivers/net/usb/pegasus.c @@ -55,8 +55,8 @@ static const char driver_name[] = "pegasus"; #define BMSR_MEDIA (BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | \ BMSR_100FULL | BMSR_ANEGCAPABLE) -static bool loopback; -static bool mii_mode; +static int loopback; +static int mii_mode; static char *devid; static struct usb_eth_dev usb_dev_id[] = { @@ -517,7 +517,7 @@ static inline int reset_mac(pegasus_t *pegasus) for (i = 0; i < REG_TIMEOUT; i++) { get_registers(pegasus, EthCtrl1, 1, &data); if (~data & 0x08) { - if (loopback) + if (loopback & 1) break; if (mii_mode && (pegasus->features & HAS_HOME_PNA)) set_register(pegasus, Gpio1, 0x34); @@ -561,7 +561,7 @@ static int enable_net_traffic(struct net_device *dev, struct usb_device *usb) data[1] |= 0x10; /* set 100 Mbps */ if (mii_mode) data[1] = 0; - data[2] = loopback ? 0x09 : 0x01; + data[2] = (loopback & 1) ? 0x09 : 0x01; memcpy(pegasus->eth_regs, data, sizeof(data)); ret = set_registers(pegasus, EthCtrl0, 3, data); diff --git a/trunk/drivers/net/usb/smsc75xx.c b/trunk/drivers/net/usb/smsc75xx.c index 0d5da82f0ff7..a5b9b12ef268 100644 --- a/trunk/drivers/net/usb/smsc75xx.c +++ b/trunk/drivers/net/usb/smsc75xx.c @@ -76,7 +76,7 @@ struct usb_context { struct usbnet *dev; }; -static bool turbo_mode = true; +static int turbo_mode = true; module_param(turbo_mode, bool, 0644); MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction"); @@ -728,8 +728,7 @@ static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu) } /* Enable or disable Rx checksum offload engine */ -static int smsc75xx_set_features(struct net_device *netdev, - netdev_features_t features) +static int smsc75xx_set_features(struct net_device *netdev, u32 features) { struct usbnet *dev = netdev_priv(netdev); struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); diff --git a/trunk/drivers/net/usb/smsc95xx.c b/trunk/drivers/net/usb/smsc95xx.c index db217ad66f26..eff67678c5a6 100644 --- a/trunk/drivers/net/usb/smsc95xx.c +++ b/trunk/drivers/net/usb/smsc95xx.c @@ -59,7 +59,7 @@ struct usb_context { struct usbnet *dev; }; -static bool turbo_mode = true; +static int turbo_mode = true; module_param(turbo_mode, bool, 0644); MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction"); @@ -516,8 +516,7 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb) } /* Enable or disable Tx & Rx checksum offload engines */ -static int smsc95xx_set_features(struct net_device *netdev, - netdev_features_t features) +static int smsc95xx_set_features(struct net_device *netdev, u32 features) { struct usbnet *dev = netdev_priv(netdev); u32 read_buf; diff --git a/trunk/drivers/net/veth.c b/trunk/drivers/net/veth.c index 49f4667e1fa3..ef883e97cee0 100644 --- a/trunk/drivers/net/veth.c +++ b/trunk/drivers/net/veth.c @@ -27,8 +27,8 @@ struct veth_net_stats { u64 rx_packets; - u64 rx_bytes; u64 tx_packets; + u64 rx_bytes; u64 tx_bytes; u64 rx_dropped; struct u64_stats_sync syncp; @@ -66,8 +66,9 @@ static int veth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + strcpy(info->fw_version, "N/A"); } static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf) @@ -270,7 +271,7 @@ static void veth_setup(struct net_device *dev) dev->features |= NETIF_F_LLTX; dev->destructor = veth_dev_free; - dev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_RXCSUM; + dev->hw_features = NETIF_F_NO_CSUM | NETIF_F_SG | NETIF_F_RXCSUM; } /* diff --git a/trunk/drivers/net/virtio_net.c b/trunk/drivers/net/virtio_net.c index 76fe14efb2b5..6ee8410443c4 100644 --- a/trunk/drivers/net/virtio_net.c +++ b/trunk/drivers/net/virtio_net.c @@ -30,7 +30,7 @@ static int napi_weight = 128; module_param(napi_weight, int, 0444); -static bool csum = true, gso = true; +static int csum = 1, gso = 1; module_param(csum, bool, 0444); module_param(gso, bool, 0444); @@ -39,7 +39,6 @@ module_param(gso, bool, 0444); #define GOOD_COPY_LEN 128 #define VIRTNET_SEND_COMMAND_SG_MAX 2 -#define VIRTNET_DRIVER_VERSION "1.0.0" struct virtnet_stats { struct u64_stats_sync syncp; @@ -156,7 +155,6 @@ static void set_skb_frag(struct sk_buff *skb, struct page *page, *len -= size; } -/* Called from bottom half context */ static struct sk_buff *page_to_skb(struct virtnet_info *vi, struct page *page, unsigned int len) { @@ -359,7 +357,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp) struct skb_vnet_hdr *hdr; int err; - skb = __netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN, gfp); + skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN); if (unlikely(!skb)) return -ENOMEM; @@ -441,13 +439,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp) return err; } -/* - * Returns false if we couldn't fill entirely (OOM). - * - * Normally run in the receive path, but can also be run from ndo_open - * before we're receiving packets, or from refill_work which is - * careful to disable receiving (using napi_disable). - */ +/* Returns false if we couldn't fill entirely (OOM). */ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp) { int err; @@ -509,7 +501,7 @@ static void refill_work(struct work_struct *work) /* In theory, this can happen: if we don't get any buffers in * we will *never* try to fill again. */ if (still_empty) - queue_delayed_work(system_nrt_wq, &vi->refill, HZ/2); + schedule_delayed_work(&vi->refill, HZ/2); } static int virtnet_poll(struct napi_struct *napi, int budget) @@ -528,7 +520,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget) if (vi->num < vi->max / 2) { if (!try_fill_recv(vi, GFP_ATOMIC)) - queue_delayed_work(system_nrt_wq, &vi->refill, 0); + schedule_delayed_work(&vi->refill, 0); } /* Out of packets? */ @@ -707,7 +699,6 @@ static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev, } tot->tx_dropped = dev->stats.tx_dropped; - tot->tx_fifo_errors = dev->stats.tx_fifo_errors; tot->rx_dropped = dev->stats.rx_dropped; tot->rx_length_errors = dev->stats.rx_length_errors; tot->rx_frame_errors = dev->stats.rx_frame_errors; @@ -728,10 +719,6 @@ static int virtnet_open(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); - /* Make sure we have some buffers: if oom use wq. */ - if (!try_fill_recv(vi, GFP_KERNEL)) - queue_delayed_work(system_nrt_wq, &vi->refill, 0); - virtnet_napi_enable(vi); return 0; } @@ -785,8 +772,6 @@ static int virtnet_close(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); - /* Make sure refill_work doesn't re-enable napi! */ - cancel_delayed_work_sync(&vi->refill); napi_disable(&vi->napi); return 0; @@ -868,7 +853,7 @@ static void virtnet_set_rx_mode(struct net_device *dev) kfree(buf); } -static int virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid) +static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid) { struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg; @@ -878,10 +863,9 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid) if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0)) dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); - return 0; } -static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid) +static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid) { struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg; @@ -891,7 +875,6 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid) if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0)) dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); - return 0; } static void virtnet_get_ringparam(struct net_device *dev, @@ -906,21 +889,7 @@ static void virtnet_get_ringparam(struct net_device *dev, } - -static void virtnet_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) -{ - struct virtnet_info *vi = netdev_priv(dev); - struct virtio_device *vdev = vi->vdev; - - strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); - strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); - -} - static const struct ethtool_ops virtnet_ethtool_ops = { - .get_drvinfo = virtnet_get_drvinfo, .get_link = ethtool_op_get_link, .get_ringparam = virtnet_get_ringparam, }; @@ -1113,6 +1082,7 @@ static int virtnet_probe(struct virtio_device *vdev) unregister: unregister_netdev(dev); + cancel_delayed_work_sync(&vi->refill); free_vqs: vdev->config->del_vqs(vdev); free_stats: @@ -1151,7 +1121,9 @@ static void __devexit virtnet_remove(struct virtio_device *vdev) /* Stop all the virtqueues. */ vdev->config->reset(vdev); + unregister_netdev(vi->dev); + cancel_delayed_work_sync(&vi->refill); /* Free unused buffers in both send and recv, if any. */ free_unused_bufs(vi); diff --git a/trunk/drivers/net/vmxnet3/vmxnet3_drv.c b/trunk/drivers/net/vmxnet3/vmxnet3_drv.c index de7fc345148a..d96bfb1ac20b 100644 --- a/trunk/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/trunk/drivers/net/vmxnet3/vmxnet3_drv.c @@ -1926,7 +1926,7 @@ vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter) } -static int +static void vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); @@ -1943,12 +1943,10 @@ vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid) } set_bit(vid, adapter->active_vlans); - - return 0; } -static int +static void vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); @@ -1965,8 +1963,6 @@ vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) } clear_bit(vid, adapter->active_vlans); - - return 0; } @@ -2167,8 +2163,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE; get_random_bytes(&rssConf->hashKey[0], rssConf->hashKeySize); for (i = 0; i < rssConf->indTableSize; i++) - rssConf->indTable[i] = ethtool_rxfh_indir_default( - i, adapter->num_rx_queues); + rssConf->indTable[i] = i % adapter->num_rx_queues; devRead->rssConfDesc.confVer = 1; devRead->rssConfDesc.confLen = sizeof(*rssConf); diff --git a/trunk/drivers/net/vmxnet3/vmxnet3_ethtool.c b/trunk/drivers/net/vmxnet3/vmxnet3_ethtool.c index 587a218b2345..e662cbc8bfbd 100644 --- a/trunk/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/trunk/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -202,9 +202,14 @@ vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) struct vmxnet3_adapter *adapter = netdev_priv(netdev); strlcpy(drvinfo->driver, vmxnet3_driver_name, sizeof(drvinfo->driver)); + drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0'; strlcpy(drvinfo->version, VMXNET3_DRIVER_VERSION_REPORT, sizeof(drvinfo->version)); + drvinfo->driver[sizeof(drvinfo->version) - 1] = '\0'; + + strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); + drvinfo->fw_version[sizeof(drvinfo->fw_version) - 1] = '\0'; strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), ETHTOOL_BUSINFO_LEN); @@ -257,11 +262,11 @@ vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) } } -int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features) +int vmxnet3_set_features(struct net_device *netdev, u32 features) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); unsigned long flags; - netdev_features_t changed = features ^ netdev->features; + u32 changed = features ^ netdev->features; if (changed & (NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_HW_VLAN_RX)) { if (features & NETIF_F_RXCSUM) @@ -565,38 +570,44 @@ vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info, } #ifdef VMXNET3_RSS -static u32 -vmxnet3_get_rss_indir_size(struct net_device *netdev) -{ - struct vmxnet3_adapter *adapter = netdev_priv(netdev); - struct UPT1_RSSConf *rssConf = adapter->rss_conf; - - return rssConf->indTableSize; -} - static int -vmxnet3_get_rss_indir(struct net_device *netdev, u32 *p) +vmxnet3_get_rss_indir(struct net_device *netdev, + struct ethtool_rxfh_indir *p) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); struct UPT1_RSSConf *rssConf = adapter->rss_conf; - unsigned int n = rssConf->indTableSize; + unsigned int n = min_t(unsigned int, p->size, rssConf->indTableSize); + p->size = rssConf->indTableSize; while (n--) - p[n] = rssConf->indTable[n]; + p->ring_index[n] = rssConf->indTable[n]; return 0; } static int -vmxnet3_set_rss_indir(struct net_device *netdev, const u32 *p) +vmxnet3_set_rss_indir(struct net_device *netdev, + const struct ethtool_rxfh_indir *p) { unsigned int i; unsigned long flags; struct vmxnet3_adapter *adapter = netdev_priv(netdev); struct UPT1_RSSConf *rssConf = adapter->rss_conf; + if (p->size != rssConf->indTableSize) + return -EINVAL; + for (i = 0; i < rssConf->indTableSize; i++) { + /* + * Return with error code if any of the queue indices + * is out of range + */ + if (p->ring_index[i] < 0 || + p->ring_index[i] >= adapter->num_rx_queues) + return -EINVAL; + } + for (i = 0; i < rssConf->indTableSize; i++) - rssConf->indTable[i] = p[i]; + rssConf->indTable[i] = p->ring_index[i]; spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, @@ -608,7 +619,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev, const u32 *p) } #endif -static const struct ethtool_ops vmxnet3_ethtool_ops = { +static struct ethtool_ops vmxnet3_ethtool_ops = { .get_settings = vmxnet3_get_settings, .get_drvinfo = vmxnet3_get_drvinfo, .get_regs_len = vmxnet3_get_regs_len, @@ -623,7 +634,6 @@ static const struct ethtool_ops vmxnet3_ethtool_ops = { .set_ringparam = vmxnet3_set_ringparam, .get_rxnfc = vmxnet3_get_rxnfc, #ifdef VMXNET3_RSS - .get_rxfh_indir_size = vmxnet3_get_rss_indir_size, .get_rxfh_indir = vmxnet3_get_rss_indir, .set_rxfh_indir = vmxnet3_set_rss_indir, #endif diff --git a/trunk/drivers/net/vmxnet3/vmxnet3_int.h b/trunk/drivers/net/vmxnet3/vmxnet3_int.h index ed54797db191..b18eac1dccaa 100644 --- a/trunk/drivers/net/vmxnet3/vmxnet3_int.h +++ b/trunk/drivers/net/vmxnet3/vmxnet3_int.h @@ -401,7 +401,7 @@ void vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter); int -vmxnet3_set_features(struct net_device *netdev, netdev_features_t features); +vmxnet3_set_features(struct net_device *netdev, u32 features); int vmxnet3_create_queues(struct vmxnet3_adapter *adapter, diff --git a/trunk/drivers/net/wan/sbni.c b/trunk/drivers/net/wan/sbni.c index d43f4efd3e07..783168cce077 100644 --- a/trunk/drivers/net/wan/sbni.c +++ b/trunk/drivers/net/wan/sbni.c @@ -155,7 +155,7 @@ static int emancipate( struct net_device * ); static const char version[] = "Granch SBNI12 driver ver 5.0.1 Jun 22 2001 Denis I.Timofeev.\n"; -static bool skip_pci_probe __initdata = false; +static int skip_pci_probe __initdata = 0; static int scandone __initdata = 0; static int num __initdata = 0; diff --git a/trunk/drivers/net/wan/sealevel.c b/trunk/drivers/net/wan/sealevel.c index 4f7748478984..0b4fd05e1508 100644 --- a/trunk/drivers/net/wan/sealevel.c +++ b/trunk/drivers/net/wan/sealevel.c @@ -362,7 +362,7 @@ static int io=0x238; static int txdma=1; static int rxdma=3; static int irq=5; -static bool slow=false; +static int slow=0; module_param(io, int, 0); MODULE_PARM_DESC(io, "The I/O base of the Sealevel card"); diff --git a/trunk/drivers/net/wimax/i2400m/tx.c b/trunk/drivers/net/wimax/i2400m/tx.c index f20886ade1cc..4b9ecb20deec 100644 --- a/trunk/drivers/net/wimax/i2400m/tx.c +++ b/trunk/drivers/net/wimax/i2400m/tx.c @@ -562,7 +562,7 @@ void i2400m_tx_new(struct i2400m *i2400m) { struct device *dev = i2400m_dev(i2400m); struct i2400m_msg_hdr *tx_msg; - bool try_head = false; + bool try_head = 0; BUG_ON(i2400m->tx_msg != NULL); /* * In certain situations, TX queue might have enough space to @@ -580,7 +580,7 @@ void i2400m_tx_new(struct i2400m *i2400m) else if (tx_msg == TAIL_FULL) { i2400m_tx_skip_tail(i2400m); d_printf(2, dev, "new TX message: tail full, trying head\n"); - try_head = true; + try_head = 1; goto try_head; } memset(tx_msg, 0, I2400M_TX_PLD_SIZE); @@ -720,7 +720,7 @@ int i2400m_tx(struct i2400m *i2400m, const void *buf, size_t buf_len, unsigned long flags; size_t padded_len; void *ptr; - bool try_head = false; + bool try_head = 0; unsigned is_singleton = pl_type == I2400M_PT_RESET_WARM || pl_type == I2400M_PT_RESET_COLD; @@ -771,7 +771,7 @@ int i2400m_tx(struct i2400m *i2400m, const void *buf, size_t buf_len, d_printf(2, dev, "pl append: tail full\n"); i2400m_tx_close(i2400m); i2400m_tx_skip_tail(i2400m); - try_head = true; + try_head = 1; goto try_new; } else if (ptr == NULL) { /* All full */ result = -ENOSPC; diff --git a/trunk/drivers/net/wimax/i2400m/usb-tx.c b/trunk/drivers/net/wimax/i2400m/usb-tx.c index 99ef81b3d5a5..ac357acfb3e9 100644 --- a/trunk/drivers/net/wimax/i2400m/usb-tx.c +++ b/trunk/drivers/net/wimax/i2400m/usb-tx.c @@ -177,6 +177,7 @@ int i2400mu_tx(struct i2400mu *i2400mu, struct i2400m_msg_hdr *tx_msg, static int i2400mu_txd(void *_i2400mu) { + int result = 0; struct i2400mu *i2400mu = _i2400mu; struct i2400m *i2400m = &i2400mu->i2400m; struct device *dev = &i2400mu->usb_iface->dev; @@ -207,14 +208,16 @@ int i2400mu_txd(void *_i2400mu) /* Yeah, we ignore errors ... not much we can do */ i2400mu_tx(i2400mu, tx_msg, tx_msg_size); i2400m_tx_msg_sent(i2400m); /* ack it, advance the FIFO */ + if (result < 0) + break; } spin_lock_irqsave(&i2400m->tx_lock, flags); i2400mu->tx_kthread = NULL; spin_unlock_irqrestore(&i2400m->tx_lock, flags); - d_fnend(4, dev, "(i2400mu %p)\n", i2400mu); - return 0; + d_fnend(4, dev, "(i2400mu %p) = %d\n", i2400mu, result); + return result; } diff --git a/trunk/drivers/net/wireless/Makefile b/trunk/drivers/net/wireless/Makefile index 98db76196b59..0a304b060b6c 100644 --- a/trunk/drivers/net/wireless/Makefile +++ b/trunk/drivers/net/wireless/Makefile @@ -42,7 +42,7 @@ obj-$(CONFIG_ADM8211) += adm8211.o obj-$(CONFIG_MWL8K) += mwl8k.o obj-$(CONFIG_IWLWIFI) += iwlwifi/ -obj-$(CONFIG_IWLEGACY) += iwlegacy/ +obj-$(CONFIG_IWLWIFI_LEGACY) += iwlegacy/ obj-$(CONFIG_RT2X00) += rt2x00/ obj-$(CONFIG_P54_COMMON) += p54/ @@ -58,6 +58,6 @@ obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx/ obj-$(CONFIG_IWM) += iwmc3200wifi/ obj-$(CONFIG_MWIFIEX) += mwifiex/ - -obj-$(CONFIG_BRCMFMAC) += brcm80211/ -obj-$(CONFIG_BRCMSMAC) += brcm80211/ +obj-$(CONFIG_BRCMFMAC) += brcm80211/ +obj-$(CONFIG_BRCMUMAC) += brcm80211/ +obj-$(CONFIG_BRCMSMAC) += brcm80211/ diff --git a/trunk/drivers/net/wireless/airo.c b/trunk/drivers/net/wireless/airo.c index 1c008c61b95c..ac1176a4f465 100644 --- a/trunk/drivers/net/wireless/airo.c +++ b/trunk/drivers/net/wireless/airo.c @@ -1418,7 +1418,7 @@ static int encapsulate(struct airo_info *ai ,etherHead *frame, MICBuffer *mic, i emmh32_update(&context->seed,frame->da,ETH_ALEN * 2); // DA,SA emmh32_update(&context->seed,(u8*)&mic->typelen,10); // Type/Length and Snap emmh32_update(&context->seed,(u8*)&mic->seq,sizeof(mic->seq)); //SEQ - emmh32_update(&context->seed,(u8*)(frame + 1),payLen); //payload + emmh32_update(&context->seed,frame->da + ETH_ALEN * 2,payLen); //payload emmh32_final(&context->seed, (u8*)&mic->mic); /* New Type/length ?????????? */ @@ -1506,7 +1506,7 @@ static int decapsulate(struct airo_info *ai, MICBuffer *mic, etherHead *eth, u16 emmh32_update(&context->seed, eth->da, ETH_ALEN*2); emmh32_update(&context->seed, (u8 *)&mic->typelen, sizeof(mic->typelen)+sizeof(mic->u.snap)); emmh32_update(&context->seed, (u8 *)&mic->seq,sizeof(mic->seq)); - emmh32_update(&context->seed, (u8 *)(eth + 1),payLen); + emmh32_update(&context->seed, eth->da + ETH_ALEN*2,payLen); //Calculate MIC emmh32_final(&context->seed, digest); diff --git a/trunk/drivers/net/wireless/ath/Makefile b/trunk/drivers/net/wireless/ath/Makefile index d716b748e574..d1214696a35b 100644 --- a/trunk/drivers/net/wireless/ath/Makefile +++ b/trunk/drivers/net/wireless/ath/Makefile @@ -11,4 +11,3 @@ ath-objs := main.o \ key.o ath-$(CONFIG_ATH_DEBUG) += debug.o -ccflags-y += -D__CHECK_ENDIAN__ diff --git a/trunk/drivers/net/wireless/ath/ath.h b/trunk/drivers/net/wireless/ath/ath.h index efc01110dc34..0f9ee46cfc97 100644 --- a/trunk/drivers/net/wireless/ath/ath.h +++ b/trunk/drivers/net/wireless/ath/ath.h @@ -152,7 +152,6 @@ struct ath_common { struct ath_cycle_counters cc_survey; struct ath_regulatory regulatory; - struct ath_regulatory reg_world_copy; const struct ath_ops *ops; const struct ath_bus_ops *bus_ops; @@ -215,10 +214,6 @@ do { \ * @ATH_DBG_HWTIMER: hardware timer handling * @ATH_DBG_BTCOEX: bluetooth coexistance * @ATH_DBG_BSTUCK: stuck beacons - * @ATH_DBG_MCI: Message Coexistence Interface, a private protocol - * used exclusively for WLAN-BT coexistence starting from - * AR9462. - * @ATH_DBG_DFS: radar datection * @ATH_DBG_ANY: enable all debugging * * The debug level is used to control the amount and type of debugging output @@ -244,8 +239,6 @@ enum ATH_DEBUG { ATH_DBG_BTCOEX = 0x00002000, ATH_DBG_WMI = 0x00004000, ATH_DBG_BSTUCK = 0x00008000, - ATH_DBG_MCI = 0x00010000, - ATH_DBG_DFS = 0x00020000, ATH_DBG_ANY = 0xffffffff }; @@ -255,7 +248,7 @@ enum ATH_DEBUG { #define ath_dbg(common, dbg_mask, fmt, ...) \ do { \ - if ((common)->debug_mask & ATH_DBG_##dbg_mask) \ + if ((common)->debug_mask & dbg_mask) \ _ath_printk(KERN_DEBUG, common, fmt, ##__VA_ARGS__); \ } while (0) @@ -265,13 +258,10 @@ do { \ #else static inline __attribute__ ((format (printf, 3, 4))) -void _ath_dbg(struct ath_common *common, enum ATH_DEBUG dbg_mask, +void ath_dbg(struct ath_common *common, enum ATH_DEBUG dbg_mask, const char *fmt, ...) { } -#define ath_dbg(common, dbg_mask, fmt, ...) \ - _ath_dbg(common, ATH_DBG_##dbg_mask, fmt, ##__VA_ARGS__) - #define ATH_DBG_WARN(foo, arg...) do {} while (0) #define ATH_DBG_WARN_ON_ONCE(foo) ({ \ int __ret_warn_once = !!(foo); \ diff --git a/trunk/drivers/net/wireless/ath/ath5k/ahb.c b/trunk/drivers/net/wireless/ath/ath5k/ahb.c index ee7ea572b065..e5be7e701816 100644 --- a/trunk/drivers/net/wireless/ath/ath5k/ahb.c +++ b/trunk/drivers/net/wireless/ath/ath5k/ahb.c @@ -166,9 +166,7 @@ static int ath_ahb_probe(struct platform_device *pdev) if (to_platform_device(ah->dev)->id == 0 && (bcfg->config->flags & (BD_WLAN0 | BD_WLAN1)) == (BD_WLAN1 | BD_WLAN0)) - ah->ah_capabilities.cap_needs_2GHz_ovr = true; - else - ah->ah_capabilities.cap_needs_2GHz_ovr = false; + __set_bit(ATH_STAT_2G_DISABLED, ah->status); } ret = ath5k_init_ah(ah, &ath_ahb_bus_ops); diff --git a/trunk/drivers/net/wireless/ath/ath5k/ani.c b/trunk/drivers/net/wireless/ath/ath5k/ani.c index bf674161a217..bea90e6be70e 100644 --- a/trunk/drivers/net/wireless/ath/ath5k/ani.c +++ b/trunk/drivers/net/wireless/ath/ath5k/ani.c @@ -27,21 +27,15 @@ * or reducing sensitivity as necessary. * * The parameters are: - * * - "noise immunity" - * * - "spur immunity" - * * - "firstep level" - * * - "OFDM weak signal detection" - * * - "CCK weak signal detection" * * Basically we look at the amount of ODFM and CCK timing errors we get and then * raise or lower immunity accordingly by setting one or more of these * parameters. - * * Newer chipsets have PHY error counters in hardware which will generate a MIB * interrupt when they overflow. Older hardware has too enable PHY error frames * by setting a RX flag and then count every single PHY error. When a specified @@ -51,13 +45,11 @@ */ -/***********************\ -* ANI parameter control * -\***********************/ +/*** ANI parameter control ***/ /** * ath5k_ani_set_noise_immunity_level() - Set noise immunity level - * @ah: The &struct ath5k_hw + * * @level: level between 0 and @ATH5K_ANI_MAX_NOISE_IMM_LVL */ void @@ -99,11 +91,12 @@ ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level) ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level); } + /** * ath5k_ani_set_spur_immunity_level() - Set spur immunity level - * @ah: The &struct ath5k_hw + * * @level: level between 0 and @max_spur_level (the maximum level is dependent - * on the chip revision). + * on the chip revision). */ void ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level) @@ -124,9 +117,10 @@ ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level) ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level); } + /** * ath5k_ani_set_firstep_level() - Set "firstep" level - * @ah: The &struct ath5k_hw + * * @level: level between 0 and @ATH5K_ANI_MAX_FIRSTEP_LVL */ void @@ -146,9 +140,11 @@ ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level) ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level); } + /** - * ath5k_ani_set_ofdm_weak_signal_detection() - Set OFDM weak signal detection - * @ah: The &struct ath5k_hw + * ath5k_ani_set_ofdm_weak_signal_detection() - Control OFDM weak signal + * detection + * * @on: turn on or off */ void @@ -186,9 +182,10 @@ ath5k_ani_set_ofdm_weak_signal_detection(struct ath5k_hw *ah, bool on) on ? "on" : "off"); } + /** - * ath5k_ani_set_cck_weak_signal_detection() - Set CCK weak signal detection - * @ah: The &struct ath5k_hw + * ath5k_ani_set_cck_weak_signal_detection() - control CCK weak signal detection + * * @on: turn on or off */ void @@ -203,16 +200,13 @@ ath5k_ani_set_cck_weak_signal_detection(struct ath5k_hw *ah, bool on) } -/***************\ -* ANI algorithm * -\***************/ +/*** ANI algorithm ***/ /** * ath5k_ani_raise_immunity() - Increase noise immunity - * @ah: The &struct ath5k_hw - * @as: The &struct ath5k_ani_state + * * @ofdm_trigger: If this is true we are called because of too many OFDM errors, - * the algorithm will tune more parameters then. + * the algorithm will tune more parameters then. * * Try to raise noise immunity (=decrease sensitivity) in several steps * depending on the average RSSI of the beacons we received. @@ -296,10 +290,9 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as, */ } + /** * ath5k_ani_lower_immunity() - Decrease noise immunity - * @ah: The &struct ath5k_hw - * @as: The &struct ath5k_ani_state * * Try to lower noise immunity (=increase sensitivity) in several steps * depending on the average RSSI of the beacons we received. @@ -359,10 +352,9 @@ ath5k_ani_lower_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as) } } + /** * ath5k_hw_ani_get_listen_time() - Update counters and return listening time - * @ah: The &struct ath5k_hw - * @as: The &struct ath5k_ani_state * * Return an approximation of the time spent "listening" in milliseconds (ms) * since the last call of this function. @@ -387,10 +379,9 @@ ath5k_hw_ani_get_listen_time(struct ath5k_hw *ah, struct ath5k_ani_state *as) return listen; } + /** * ath5k_ani_save_and_clear_phy_errors() - Clear and save PHY error counters - * @ah: The &struct ath5k_hw - * @as: The &struct ath5k_ani_state * * Clear the PHY error counters as soon as possible, since this might be called * from a MIB interrupt and we want to make sure we don't get interrupted again. @@ -438,14 +429,14 @@ ath5k_ani_save_and_clear_phy_errors(struct ath5k_hw *ah, return 1; } + /** * ath5k_ani_period_restart() - Restart ANI period - * @as: The &struct ath5k_ani_state * * Just reset counters, so they are clear for the next "ani period". */ static void -ath5k_ani_period_restart(struct ath5k_ani_state *as) +ath5k_ani_period_restart(struct ath5k_hw *ah, struct ath5k_ani_state *as) { /* keep last values for debugging */ as->last_ofdm_errors = as->ofdm_errors; @@ -457,9 +448,9 @@ ath5k_ani_period_restart(struct ath5k_ani_state *as) as->listen_time = 0; } + /** * ath5k_ani_calibration() - The main ANI calibration function - * @ah: The &struct ath5k_hw * * We count OFDM and CCK errors relative to the time where we did not send or * receive ("listen" time) and raise or lower immunity accordingly. @@ -501,7 +492,7 @@ ath5k_ani_calibration(struct ath5k_hw *ah) /* too many PHY errors - we have to raise immunity */ bool ofdm_flag = as->ofdm_errors > ofdm_high ? true : false; ath5k_ani_raise_immunity(ah, as, ofdm_flag); - ath5k_ani_period_restart(as); + ath5k_ani_period_restart(ah, as); } else if (as->listen_time > 5 * ATH5K_ANI_LISTEN_PERIOD) { /* If more than 5 (TODO: why 5?) periods have passed and we got @@ -513,18 +504,15 @@ ath5k_ani_calibration(struct ath5k_hw *ah) if (as->ofdm_errors <= ofdm_low && as->cck_errors <= cck_low) ath5k_ani_lower_immunity(ah, as); - ath5k_ani_period_restart(as); + ath5k_ani_period_restart(ah, as); } } -/*******************\ -* Interrupt handler * -\*******************/ +/*** INTERRUPT HANDLER ***/ /** * ath5k_ani_mib_intr() - Interrupt handler for ANI MIB counters - * @ah: The &struct ath5k_hw * * Just read & reset the registers quickly, so they don't generate more * interrupts, save the counters and schedule the tasklet to decide whether @@ -561,11 +549,9 @@ ath5k_ani_mib_intr(struct ath5k_hw *ah) tasklet_schedule(&ah->ani_tasklet); } + /** - * ath5k_ani_phy_error_report - Used by older HW to report PHY errors - * - * @ah: The &struct ath5k_hw - * @phyerr: One of enum ath5k_phy_error_code + * ath5k_ani_phy_error_report() - Used by older HW to report PHY errors * * This is used by hardware without PHY error counters to report PHY errors * on a frame-by-frame basis, instead of the interrupt. @@ -588,13 +574,10 @@ ath5k_ani_phy_error_report(struct ath5k_hw *ah, } -/****************\ -* Initialization * -\****************/ +/*** INIT ***/ /** * ath5k_enable_phy_err_counters() - Enable PHY error counters - * @ah: The &struct ath5k_hw * * Enable PHY error counters for OFDM and CCK timing errors. */ @@ -613,9 +596,9 @@ ath5k_enable_phy_err_counters(struct ath5k_hw *ah) ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT); } + /** * ath5k_disable_phy_err_counters() - Disable PHY error counters - * @ah: The &struct ath5k_hw * * Disable PHY error counters for OFDM and CCK timing errors. */ @@ -632,10 +615,10 @@ ath5k_disable_phy_err_counters(struct ath5k_hw *ah) ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT); } + /** * ath5k_ani_init() - Initialize ANI - * @ah: The &struct ath5k_hw - * @mode: One of enum ath5k_ani_mode + * @mode: Which mode to use (auto, manual high, manual low, off) * * Initialize ANI according to mode. */ @@ -712,18 +695,10 @@ ath5k_ani_init(struct ath5k_hw *ah, enum ath5k_ani_mode mode) } -/**************\ -* Debug output * -\**************/ +/*** DEBUG ***/ #ifdef CONFIG_ATH5K_DEBUG -/** - * ath5k_ani_print_counters() - Print ANI counters - * @ah: The &struct ath5k_hw - * - * Used for debugging ANI - */ void ath5k_ani_print_counters(struct ath5k_hw *ah) { diff --git a/trunk/drivers/net/wireless/ath/ath5k/ani.h b/trunk/drivers/net/wireless/ath/ath5k/ani.h index 21aa355460bb..7358b6c83c6c 100644 --- a/trunk/drivers/net/wireless/ath/ath5k/ani.h +++ b/trunk/drivers/net/wireless/ath/ath5k/ani.h @@ -40,13 +40,13 @@ enum ath5k_phy_error_code; * enum ath5k_ani_mode - mode for ANI / noise sensitivity * * @ATH5K_ANI_MODE_OFF: Turn ANI off. This can be useful to just stop the ANI - * algorithm after it has been on auto mode. - * @ATH5K_ANI_MODE_MANUAL_LOW: Manually set all immunity parameters to low, - * maximizing sensitivity. ANI will not run. - * @ATH5K_ANI_MODE_MANUAL_HIGH: Manually set all immunity parameters to high, - * minimizing sensitivity. ANI will not run. - * @ATH5K_ANI_MODE_AUTO: Automatically control immunity parameters based on the - * amount of OFDM and CCK frame errors (default). + * algorithm after it has been on auto mode. + * ATH5K_ANI_MODE_MANUAL_LOW: Manually set all immunity parameters to low, + * maximizing sensitivity. ANI will not run. + * ATH5K_ANI_MODE_MANUAL_HIGH: Manually set all immunity parameters to high, + * minimizing sensitivity. ANI will not run. + * ATH5K_ANI_MODE_AUTO: Automatically control immunity parameters based on the + * amount of OFDM and CCK frame errors (default). */ enum ath5k_ani_mode { ATH5K_ANI_MODE_OFF = 0, @@ -58,22 +58,8 @@ enum ath5k_ani_mode { /** * struct ath5k_ani_state - ANI state and associated counters - * @ani_mode: One of enum ath5k_ani_mode - * @noise_imm_level: Noise immunity level - * @spur_level: Spur immunity level - * @firstep_level: FIRstep level - * @ofdm_weak_sig: OFDM weak signal detection state (on/off) - * @cck_weak_sig: CCK weak signal detection state (on/off) - * @max_spur_level: Max spur immunity level (chip specific) - * @listen_time: Listen time - * @ofdm_errors: OFDM timing error count - * @cck_errors: CCK timing error count - * @last_cc: The &struct ath_cycle_counters (for stats) - * @last_listen: Listen time from previous run (for stats) - * @last_ofdm_errors: OFDM timing error count from previous run (for tats) - * @last_cck_errors: CCK timing error count from previous run (for stats) - * @sum_ofdm_errors: Sum of OFDM timing errors (for stats) - * @sum_cck_errors: Sum of all CCK timing errors (for stats) + * + * @max_spur_level: the maximum spur level is chip dependent */ struct ath5k_ani_state { enum ath5k_ani_mode ani_mode; diff --git a/trunk/drivers/net/wireless/ath/ath5k/ath5k.h b/trunk/drivers/net/wireless/ath/ath5k/ath5k.h index c2b2518c2ecd..fecbcd9a4259 100644 --- a/trunk/drivers/net/wireless/ath/ath5k/ath5k.h +++ b/trunk/drivers/net/wireless/ath/ath5k/ath5k.h @@ -187,9 +187,10 @@ #define AR5K_TUNE_MAX_TXPOWER 63 #define AR5K_TUNE_DEFAULT_TXPOWER 25 #define AR5K_TUNE_TPC_TXPOWER false -#define ATH5K_TUNE_CALIBRATION_INTERVAL_FULL 60000 /* 60 sec */ -#define ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT 10000 /* 10 sec */ +#define ATH5K_TUNE_CALIBRATION_INTERVAL_FULL 10000 /* 10 sec */ #define ATH5K_TUNE_CALIBRATION_INTERVAL_ANI 1000 /* 1 sec */ +#define ATH5K_TUNE_CALIBRATION_INTERVAL_NF 60000 /* 60 sec */ + #define ATH5K_TX_COMPLETE_POLL_INT 3000 /* 3 sec */ #define AR5K_INIT_CARR_SENSE_EN 1 @@ -261,34 +262,16 @@ #define AR5K_AGC_SETTLING_TURBO 37 +/* GENERIC CHIPSET DEFINITIONS */ -/*****************************\ -* GENERIC CHIPSET DEFINITIONS * -\*****************************/ - -/** - * enum ath5k_version - MAC Chips - * @AR5K_AR5210: AR5210 (Crete) - * @AR5K_AR5211: AR5211 (Oahu/Maui) - * @AR5K_AR5212: AR5212 (Venice) and newer - */ +/* MAC Chips */ enum ath5k_version { AR5K_AR5210 = 0, AR5K_AR5211 = 1, AR5K_AR5212 = 2, }; -/** - * enum ath5k_radio - PHY Chips - * @AR5K_RF5110: RF5110 (Fez) - * @AR5K_RF5111: RF5111 (Sombrero) - * @AR5K_RF5112: RF2112/5112(A) (Derby/Derby2) - * @AR5K_RF2413: RF2413/2414 (Griffin/Griffin-Lite) - * @AR5K_RF5413: RF5413/5414/5424 (Eagle/Condor) - * @AR5K_RF2316: RF2315/2316 (Cobra SoC) - * @AR5K_RF2317: RF2317 (Spider SoC) - * @AR5K_RF2425: RF2425/2417 (Swan/Nalla) - */ +/* PHY Chips */ enum ath5k_radio { AR5K_RF5110 = 0, AR5K_RF5111 = 1, @@ -320,11 +303,11 @@ enum ath5k_radio { #define AR5K_SREV_AR5213A 0x59 /* Hainan */ #define AR5K_SREV_AR2413 0x78 /* Griffin lite */ #define AR5K_SREV_AR2414 0x70 /* Griffin */ -#define AR5K_SREV_AR2315_R6 0x86 /* AP51-Light */ -#define AR5K_SREV_AR2315_R7 0x87 /* AP51-Full */ +#define AR5K_SREV_AR2315_R6 0x86 /* AP51-Light */ +#define AR5K_SREV_AR2315_R7 0x87 /* AP51-Full */ #define AR5K_SREV_AR5424 0x90 /* Condor */ -#define AR5K_SREV_AR2317_R1 0x90 /* AP61-Light */ -#define AR5K_SREV_AR2317_R2 0x91 /* AP61-Full */ +#define AR5K_SREV_AR2317_R1 0x90 /* AP61-Light */ +#define AR5K_SREV_AR2317_R2 0x91 /* AP61-Full */ #define AR5K_SREV_AR5413 0xa4 /* Eagle lite */ #define AR5K_SREV_AR5414 0xa0 /* Eagle */ #define AR5K_SREV_AR2415 0xb0 /* Talon */ @@ -361,40 +344,32 @@ enum ath5k_radio { /* TODO add support to mac80211 for vendor-specific rates and modes */ -/** - * DOC: Atheros XR - * +/* * Some of this information is based on Documentation from: * * http://madwifi-project.org/wiki/ChipsetFeatures/SuperAG * - * Atheros' eXtended Range - range enhancing extension is a modulation scheme - * that is supposed to double the link distance between an Atheros XR-enabled - * client device with an Atheros XR-enabled access point. This is achieved - * by increasing the receiver sensitivity up to, -105dBm, which is about 20dB - * above what the 802.11 specifications demand. In addition, new (proprietary) - * data rates are introduced: 3, 2, 1, 0.5 and 0.25 MBit/s. + * Modulation for Atheros' eXtended Range - range enhancing extension that is + * supposed to double the distance an Atheros client device can keep a + * connection with an Atheros access point. This is achieved by increasing + * the receiver sensitivity up to, -105dBm, which is about 20dB above what + * the 802.11 specifications demand. In addition, new (proprietary) data rates + * are introduced: 3, 2, 1, 0.5 and 0.25 MBit/s. * * Please note that can you either use XR or TURBO but you cannot use both, * they are exclusive. * - * Also note that we do not plan to support XR mode at least for now. You can - * get a mode similar to XR by using 5MHz bwmode. */ - - -/** - * DOC: Atheros SuperAG - * - * In addition to XR we have another modulation scheme called TURBO mode - * that is supposed to provide a throughput transmission speed up to 40Mbit/s - * -60Mbit/s at a 108Mbit/s signaling rate achieved through the bonding of two - * 54Mbit/s 802.11g channels. To use this feature both ends must support it. +#define MODULATION_XR 0x00000200 +/* + * Modulation for Atheros' Turbo G and Turbo A, its supposed to provide a + * throughput transmission speed up to 40Mbit/s-60Mbit/s at a 108Mbit/s + * signaling rate achieved through the bonding of two 54Mbit/s 802.11g + * channels. To use this feature your Access Point must also support it. * There is also a distinction between "static" and "dynamic" turbo modes: * * - Static: is the dumb version: devices set to this mode stick to it until * the mode is turned off. - * * - Dynamic: is the intelligent version, the network decides itself if it * is ok to use turbo. As soon as traffic is detected on adjacent channels * (which would get used in turbo mode), or when a non-turbo station joins @@ -408,39 +383,24 @@ enum ath5k_radio { * * http://www.pcworld.com/article/id,113428-page,1/article.html * - * The channel bonding seems to be driver specific though. - * - * In addition to TURBO modes we also have the following features for even - * greater speed-up: + * The channel bonding seems to be driver specific though. In addition to + * deciding what channels will be used, these "Turbo" modes are accomplished + * by also enabling the following features: * * - Bursting: allows multiple frames to be sent at once, rather than pausing * after each frame. Bursting is a standards-compliant feature that can be * used with any Access Point. - * * - Fast frames: increases the amount of information that can be sent per * frame, also resulting in a reduction of transmission overhead. It is a * proprietary feature that needs to be supported by the Access Point. - * * - Compression: data frames are compressed in real time using a Lempel Ziv * algorithm. This is done transparently. Once this feature is enabled, * compression and decompression takes place inside the chipset, without * putting additional load on the host CPU. * - * As with XR we also don't plan to support SuperAG features for now. You can - * get a mode similar to TURBO by using 40MHz bwmode. */ +#define MODULATION_TURBO 0x00000080 - -/** - * enum ath5k_driver_mode - PHY operation mode - * @AR5K_MODE_11A: 802.11a - * @AR5K_MODE_11B: 802.11b - * @AR5K_MODE_11G: 801.11g - * @AR5K_MODE_MAX: Used for boundary checks - * - * Do not change the order here, we use these as - * array indices and it also maps EEPROM structures. - */ enum ath5k_driver_mode { AR5K_MODE_11A = 0, AR5K_MODE_11B = 1, @@ -448,64 +408,30 @@ enum ath5k_driver_mode { AR5K_MODE_MAX = 3 }; -/** - * enum ath5k_ant_mode - Antenna operation mode - * @AR5K_ANTMODE_DEFAULT: Default antenna setup - * @AR5K_ANTMODE_FIXED_A: Only antenna A is present - * @AR5K_ANTMODE_FIXED_B: Only antenna B is present - * @AR5K_ANTMODE_SINGLE_AP: STA locked on a single ap - * @AR5K_ANTMODE_SECTOR_AP: AP with tx antenna set on tx desc - * @AR5K_ANTMODE_SECTOR_STA: STA with tx antenna set on tx desc - * @AR5K_ANTMODE_DEBUG: Debug mode -A -> Rx, B-> Tx- - * @AR5K_ANTMODE_MAX: Used for boundary checks - * - * For more infos on antenna control check out phy.c - */ enum ath5k_ant_mode { - AR5K_ANTMODE_DEFAULT = 0, - AR5K_ANTMODE_FIXED_A = 1, - AR5K_ANTMODE_FIXED_B = 2, - AR5K_ANTMODE_SINGLE_AP = 3, - AR5K_ANTMODE_SECTOR_AP = 4, - AR5K_ANTMODE_SECTOR_STA = 5, - AR5K_ANTMODE_DEBUG = 6, + AR5K_ANTMODE_DEFAULT = 0, /* default antenna setup */ + AR5K_ANTMODE_FIXED_A = 1, /* only antenna A is present */ + AR5K_ANTMODE_FIXED_B = 2, /* only antenna B is present */ + AR5K_ANTMODE_SINGLE_AP = 3, /* sta locked on a single ap */ + AR5K_ANTMODE_SECTOR_AP = 4, /* AP with tx antenna set on tx desc */ + AR5K_ANTMODE_SECTOR_STA = 5, /* STA with tx antenna set on tx desc */ + AR5K_ANTMODE_DEBUG = 6, /* Debug mode -A -> Rx, B-> Tx- */ AR5K_ANTMODE_MAX, }; -/** - * enum ath5k_bw_mode - Bandwidth operation mode - * @AR5K_BWMODE_DEFAULT: 20MHz, default operation - * @AR5K_BWMODE_5MHZ: Quarter rate - * @AR5K_BWMODE_10MHZ: Half rate - * @AR5K_BWMODE_40MHZ: Turbo - */ enum ath5k_bw_mode { - AR5K_BWMODE_DEFAULT = 0, - AR5K_BWMODE_5MHZ = 1, - AR5K_BWMODE_10MHZ = 2, - AR5K_BWMODE_40MHZ = 3 + AR5K_BWMODE_DEFAULT = 0, /* 20MHz, default operation */ + AR5K_BWMODE_5MHZ = 1, /* Quarter rate */ + AR5K_BWMODE_10MHZ = 2, /* Half rate */ + AR5K_BWMODE_40MHZ = 3 /* Turbo */ }; - - /****************\ TX DEFINITIONS \****************/ -/** - * struct ath5k_tx_status - TX Status descriptor - * @ts_seqnum: Sequence number - * @ts_tstamp: Timestamp - * @ts_status: Status code - * @ts_final_idx: Final transmission series index - * @ts_final_retry: Final retry count - * @ts_rssi: RSSI for received ACK - * @ts_shortretry: Short retry count - * @ts_virtcol: Virtual collision count - * @ts_antenna: Antenna used - * - * TX status descriptor gets filled by the hw - * on each transmission attempt. +/* + * TX Status descriptor */ struct ath5k_tx_status { u16 ts_seqnum; @@ -528,6 +454,7 @@ struct ath5k_tx_status { * enum ath5k_tx_queue - Queue types used to classify tx queues. * @AR5K_TX_QUEUE_INACTIVE: q is unused -- see ath5k_hw_release_tx_queue * @AR5K_TX_QUEUE_DATA: A normal data queue + * @AR5K_TX_QUEUE_XR_DATA: An XR-data queue * @AR5K_TX_QUEUE_BEACON: The beacon queue * @AR5K_TX_QUEUE_CAB: The after-beacon queue * @AR5K_TX_QUEUE_UAPSD: Unscheduled Automatic Power Save Delivery queue @@ -535,6 +462,7 @@ struct ath5k_tx_status { enum ath5k_tx_queue { AR5K_TX_QUEUE_INACTIVE = 0, AR5K_TX_QUEUE_DATA, + AR5K_TX_QUEUE_XR_DATA, AR5K_TX_QUEUE_BEACON, AR5K_TX_QUEUE_CAB, AR5K_TX_QUEUE_UAPSD, @@ -543,46 +471,36 @@ enum ath5k_tx_queue { #define AR5K_NUM_TX_QUEUES 10 #define AR5K_NUM_TX_QUEUES_NOQCU 2 -/** - * enum ath5k_tx_queue_subtype - Queue sub-types to classify normal data queues - * @AR5K_WME_AC_BK: Background traffic - * @AR5K_WME_AC_BE: Best-effort (normal) traffic - * @AR5K_WME_AC_VI: Video traffic - * @AR5K_WME_AC_VO: Voice traffic - * +/* + * Queue syb-types to classify normal data queues. * These are the 4 Access Categories as defined in * WME spec. 0 is the lowest priority and 4 is the * highest. Normal data that hasn't been classified * goes to the Best Effort AC. */ enum ath5k_tx_queue_subtype { - AR5K_WME_AC_BK = 0, - AR5K_WME_AC_BE, - AR5K_WME_AC_VI, - AR5K_WME_AC_VO, + AR5K_WME_AC_BK = 0, /*Background traffic*/ + AR5K_WME_AC_BE, /*Best-effort (normal) traffic*/ + AR5K_WME_AC_VI, /*Video traffic*/ + AR5K_WME_AC_VO, /*Voice traffic*/ }; -/** - * enum ath5k_tx_queue_id - Queue ID numbers as returned by the hw functions - * @AR5K_TX_QUEUE_ID_NOQCU_DATA: Data queue on AR5210 (no QCU available) - * @AR5K_TX_QUEUE_ID_NOQCU_BEACON: Beacon queue on AR5210 (no QCU available) - * @AR5K_TX_QUEUE_ID_DATA_MIN: Data queue min index - * @AR5K_TX_QUEUE_ID_DATA_MAX: Data queue max index - * @AR5K_TX_QUEUE_ID_CAB: Content after beacon queue - * @AR5K_TX_QUEUE_ID_BEACON: Beacon queue - * @AR5K_TX_QUEUE_ID_UAPSD: Urgent Automatic Power Save Delivery, - * - * Each number represents a hw queue. If hw does not support hw queues - * (eg 5210) all data goes in one queue. +/* + * Queue ID numbers as returned by the hw functions, each number + * represents a hw queue. If hw does not support hw queues + * (eg 5210) all data goes in one queue. These match + * d80211 definitions (net80211/MadWiFi don't use them). */ enum ath5k_tx_queue_id { AR5K_TX_QUEUE_ID_NOQCU_DATA = 0, AR5K_TX_QUEUE_ID_NOQCU_BEACON = 1, - AR5K_TX_QUEUE_ID_DATA_MIN = 0, - AR5K_TX_QUEUE_ID_DATA_MAX = 3, - AR5K_TX_QUEUE_ID_UAPSD = 7, - AR5K_TX_QUEUE_ID_CAB = 8, - AR5K_TX_QUEUE_ID_BEACON = 9, + AR5K_TX_QUEUE_ID_DATA_MIN = 0, /*IEEE80211_TX_QUEUE_DATA0*/ + AR5K_TX_QUEUE_ID_DATA_MAX = 3, /*IEEE80211_TX_QUEUE_DATA3*/ + AR5K_TX_QUEUE_ID_DATA_SVP = 5, /*IEEE80211_TX_QUEUE_SVP - Spectralink Voice Protocol*/ + AR5K_TX_QUEUE_ID_CAB = 6, /*IEEE80211_TX_QUEUE_AFTER_BEACON*/ + AR5K_TX_QUEUE_ID_BEACON = 7, /*IEEE80211_TX_QUEUE_BEACON*/ + AR5K_TX_QUEUE_ID_UAPSD = 8, + AR5K_TX_QUEUE_ID_XR_DATA = 9, }; /* @@ -603,70 +521,46 @@ enum ath5k_tx_queue_id { #define AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS 0x1000 /* Disable backoff while bursting */ #define AR5K_TXQ_FLAG_COMPRESSION_ENABLE 0x2000 /* Enable hw compression -not implemented-*/ -/** - * struct ath5k_txq - Transmit queue state - * @qnum: Hardware q number - * @link: Link ptr in last TX desc - * @q: Transmit queue (&struct list_head) - * @lock: Lock on q and link - * @setup: Is the queue configured - * @txq_len:Number of queued buffers - * @txq_max: Max allowed num of queued buffers - * @txq_poll_mark: Used to check if queue got stuck - * @txq_stuck: Queue stuck counter - * - * One of these exists for each hardware transmit queue. - * Packets sent to us from above are assigned to queues based - * on their priority. Not all devices support a complete set - * of hardware transmit queues. For those devices the array - * sc_ac2q will map multiple priorities to fewer hardware queues - * (typically all to one hardware queue). +/* + * Data transmit queue state. One of these exists for each + * hardware transmit queue. Packets sent to us from above + * are assigned to queues based on their priority. Not all + * devices support a complete set of hardware transmit queues. + * For those devices the array sc_ac2q will map multiple + * priorities to fewer hardware queues (typically all to one + * hardware queue). */ struct ath5k_txq { - unsigned int qnum; - u32 *link; - struct list_head q; - spinlock_t lock; + unsigned int qnum; /* hardware q number */ + u32 *link; /* link ptr in last TX desc */ + struct list_head q; /* transmit queue */ + spinlock_t lock; /* lock on q and link */ bool setup; - int txq_len; - int txq_max; + int txq_len; /* number of queued buffers */ + int txq_max; /* max allowed num of queued buffers */ bool txq_poll_mark; - unsigned int txq_stuck; + unsigned int txq_stuck; /* informational counter */ }; -/** - * struct ath5k_txq_info - A struct to hold TX queue's parameters - * @tqi_type: One of enum ath5k_tx_queue - * @tqi_subtype: One of enum ath5k_tx_queue_subtype - * @tqi_flags: TX queue flags (see above) - * @tqi_aifs: Arbitrated Inter-frame Space - * @tqi_cw_min: Minimum Contention Window - * @tqi_cw_max: Maximum Contention Window - * @tqi_cbr_period: Constant bit rate period - * @tqi_ready_time: Time queue waits after an event when RDYTIME is enabled +/* + * A struct to hold tx queue's parameters */ struct ath5k_txq_info { enum ath5k_tx_queue tqi_type; enum ath5k_tx_queue_subtype tqi_subtype; - u16 tqi_flags; - u8 tqi_aifs; - u16 tqi_cw_min; - u16 tqi_cw_max; - u32 tqi_cbr_period; + u16 tqi_flags; /* Tx queue flags (see above) */ + u8 tqi_aifs; /* Arbitrated Interframe Space */ + u16 tqi_cw_min; /* Minimum Contention Window */ + u16 tqi_cw_max; /* Maximum Contention Window */ + u32 tqi_cbr_period; /* Constant bit rate period */ u32 tqi_cbr_overflow_limit; u32 tqi_burst_time; - u32 tqi_ready_time; + u32 tqi_ready_time; /* Time queue waits after an event */ }; -/** - * enum ath5k_pkt_type - Transmit packet types - * @AR5K_PKT_TYPE_NORMAL: Normal data - * @AR5K_PKT_TYPE_ATIM: ATIM - * @AR5K_PKT_TYPE_PSPOLL: PS-Poll - * @AR5K_PKT_TYPE_BEACON: Beacon - * @AR5K_PKT_TYPE_PROBE_RESP: Probe response - * @AR5K_PKT_TYPE_PIFS: PIFS - * Used on tx control descriptor +/* + * Transmit packet types. + * used on tx control descriptor */ enum ath5k_pkt_type { AR5K_PKT_TYPE_NORMAL = 0, @@ -689,23 +583,27 @@ enum ath5k_pkt_type { (ah->ah_txpower.txp_rates_power_table[(_r)] & 0x3f) << (_v) \ ) +/* + * DMA size definitions (2^(n+2)) + */ +enum ath5k_dmasize { + AR5K_DMASIZE_4B = 0, + AR5K_DMASIZE_8B, + AR5K_DMASIZE_16B, + AR5K_DMASIZE_32B, + AR5K_DMASIZE_64B, + AR5K_DMASIZE_128B, + AR5K_DMASIZE_256B, + AR5K_DMASIZE_512B +}; /****************\ RX DEFINITIONS \****************/ -/** - * struct ath5k_rx_status - RX Status descriptor - * @rs_datalen: Data length - * @rs_tstamp: Timestamp - * @rs_status: Status code - * @rs_phyerr: PHY error mask - * @rs_rssi: RSSI in 0.5dbm units - * @rs_keyix: Index to the key used for decrypting - * @rs_rate: Rate used to decode the frame - * @rs_antenna: Antenna used to receive the frame - * @rs_more: Indicates this is a frame fragment (Fast frames) +/* + * RX Status descriptor */ struct ath5k_rx_status { u16 rs_datalen; @@ -747,18 +645,10 @@ struct ath5k_rx_status { #define TSF_TO_TU(_tsf) (u32)((_tsf) >> 10) - /*******************************\ GAIN OPTIMIZATION DEFINITIONS \*******************************/ -/** - * enum ath5k_rfgain - RF Gain optimization engine state - * @AR5K_RFGAIN_INACTIVE: Engine disabled - * @AR5K_RFGAIN_ACTIVE: Probe active - * @AR5K_RFGAIN_READ_REQUESTED: Probe requested - * @AR5K_RFGAIN_NEED_CHANGE: Gain_F needs change - */ enum ath5k_rfgain { AR5K_RFGAIN_INACTIVE = 0, AR5K_RFGAIN_ACTIVE, @@ -766,16 +656,6 @@ enum ath5k_rfgain { AR5K_RFGAIN_NEED_CHANGE, }; -/** - * struct ath5k_gain - RF Gain optimization engine state data - * @g_step_idx: Current step index - * @g_current: Current gain - * @g_target: Target gain - * @g_low: Low gain boundary - * @g_high: High gain boundary - * @g_f_corr: Gain_F correction - * @g_state: One of enum ath5k_rfgain - */ struct ath5k_gain { u8 g_step_idx; u8 g_current; @@ -786,8 +666,6 @@ struct ath5k_gain { u8 g_state; }; - - /********************\ COMMON DEFINITIONS \********************/ @@ -796,14 +674,9 @@ struct ath5k_gain { #define AR5K_SLOT_TIME_20 880 #define AR5K_SLOT_TIME_MAX 0xffff -/** - * struct ath5k_athchan_2ghz - 2GHz to 5GHZ map for RF5111 - * @a2_flags: Channel flags (internal) - * @a2_athchan: HW channel number (internal) - * - * This structure is used to map 2GHz channels to - * 5GHz Atheros channels on 2111 frequency converter - * that comes together with RF5111 +/* + * The following structure is used to map 2GHz channels to + * 5GHz Atheros channels. * TODO: Clean up */ struct ath5k_athchan_2ghz { @@ -811,80 +684,36 @@ struct ath5k_athchan_2ghz { u16 a2_athchan; }; -/** - * enum ath5k_dmasize - DMA size definitions (2^(n+2)) - * @AR5K_DMASIZE_4B: 4Bytes - * @AR5K_DMASIZE_8B: 8Bytes - * @AR5K_DMASIZE_16B: 16Bytes - * @AR5K_DMASIZE_32B: 32Bytes - * @AR5K_DMASIZE_64B: 64Bytes (Default) - * @AR5K_DMASIZE_128B: 128Bytes - * @AR5K_DMASIZE_256B: 256Bytes - * @AR5K_DMASIZE_512B: 512Bytes - * - * These are used to set DMA burst size on hw - * - * Note: Some platforms can't handle more than 4Bytes - * be careful on embedded boards. - */ -enum ath5k_dmasize { - AR5K_DMASIZE_4B = 0, - AR5K_DMASIZE_8B, - AR5K_DMASIZE_16B, - AR5K_DMASIZE_32B, - AR5K_DMASIZE_64B, - AR5K_DMASIZE_128B, - AR5K_DMASIZE_256B, - AR5K_DMASIZE_512B -}; - - /******************\ RATE DEFINITIONS \******************/ /** - * DOC: Rate codes - * * Seems the ar5xxx hardware supports up to 32 rates, indexed by 1-32. * * The rate code is used to get the RX rate or set the TX rate on the * hardware descriptors. It is also used for internal modulation control * and settings. * - * This is the hardware rate map we are aware of (html unfriendly): + * This is the hardware rate map we are aware of: + * + * rate_code 0x01 0x02 0x03 0x04 0x05 0x06 0x07 0x08 + * rate_kbps 3000 1000 ? ? ? 2000 500 48000 + * + * rate_code 0x09 0x0A 0x0B 0x0C 0x0D 0x0E 0x0F 0x10 + * rate_kbps 24000 12000 6000 54000 36000 18000 9000 ? * - * Rate code Rate (Kbps) - * --------- ----------- - * 0x01 3000 (XR) - * 0x02 1000 (XR) - * 0x03 250 (XR) - * 0x04 - 05 -Reserved- - * 0x06 2000 (XR) - * 0x07 500 (XR) - * 0x08 48000 (OFDM) - * 0x09 24000 (OFDM) - * 0x0A 12000 (OFDM) - * 0x0B 6000 (OFDM) - * 0x0C 54000 (OFDM) - * 0x0D 36000 (OFDM) - * 0x0E 18000 (OFDM) - * 0x0F 9000 (OFDM) - * 0x10 - 17 -Reserved- - * 0x18 11000L (CCK) - * 0x19 5500L (CCK) - * 0x1A 2000L (CCK) - * 0x1B 1000L (CCK) - * 0x1C 11000S (CCK) - * 0x1D 5500S (CCK) - * 0x1E 2000S (CCK) - * 0x1F -Reserved- + * rate_code 17 18 19 20 21 22 23 24 + * rate_kbps ? ? ? ? ? ? ? 11000 * - * "S" indicates CCK rates with short preamble and "L" with long preamble. + * rate_code 25 26 27 28 29 30 31 32 + * rate_kbps 5500 2000 1000 11000S 5500S 2000S ? ? + * + * "S" indicates CCK rates with short preamble. * * AR5211 has different rate codes for CCK (802.11B) rates. It only uses the - * lowest 4 bits, so they are the same as above with a 0xF mask. + * lowest 4 bits, so they are the same as below with a 0xF mask. * (0xB, 0xA, 0x9 and 0x8 for 1M, 2M, 5.5M and 11M). * We handle this in ath5k_setup_bands(). */ @@ -904,9 +733,13 @@ enum ath5k_dmasize { #define ATH5K_RATE_CODE_36M 0x0D #define ATH5K_RATE_CODE_48M 0x08 #define ATH5K_RATE_CODE_54M 0x0C +/* XR */ +#define ATH5K_RATE_CODE_XR_500K 0x07 +#define ATH5K_RATE_CODE_XR_1M 0x02 +#define ATH5K_RATE_CODE_XR_2M 0x06 +#define ATH5K_RATE_CODE_XR_3M 0x01 -/* Adding this flag to rate_code on B rates - * enables short preamble */ +/* adding this flag to rate_code enables short preamble */ #define AR5K_SET_SHORT_PREAMBLE 0x04 /* @@ -914,7 +747,7 @@ enum ath5k_dmasize { */ #define AR5K_KEYCACHE_SIZE 8 -extern bool ath5k_modparam_nohwcrypt; +extern int ath5k_modparam_nohwcrypt; /***********************\ HW RELATED DEFINITIONS @@ -936,65 +769,49 @@ extern bool ath5k_modparam_nohwcrypt; /** * enum ath5k_int - Hardware interrupt masks helpers - * @AR5K_INT_RXOK: Frame successfully received - * @AR5K_INT_RXDESC: Request RX descriptor/Read RX descriptor - * @AR5K_INT_RXERR: Frame reception failed - * @AR5K_INT_RXNOFRM: No frame received within a specified time period - * @AR5K_INT_RXEOL: Reached "End Of List", means we need more RX descriptors - * @AR5K_INT_RXORN: Indicates we got RX FIFO overrun. Note that Rx overrun is - * not always fatal, on some chips we can continue operation - * without resetting the card, that's why %AR5K_INT_FATAL is not - * common for all chips. - * @AR5K_INT_RX_ALL: Mask to identify all RX related interrupts - * - * @AR5K_INT_TXOK: Frame transmission success - * @AR5K_INT_TXDESC: Request TX descriptor/Read TX status descriptor - * @AR5K_INT_TXERR: Frame transmission failure - * @AR5K_INT_TXEOL: Received End Of List for VEOL (Virtual End Of List). The - * Queue Control Unit (QCU) signals an EOL interrupt only if a - * descriptor's LinkPtr is NULL. For more details, refer to: - * "http://www.freepatentsonline.com/20030225739.html" - * @AR5K_INT_TXNOFRM: No frame was transmitted within a specified time period - * @AR5K_INT_TXURN: Indicates we got TX FIFO underrun. In such case we should - * increase the TX trigger threshold. - * @AR5K_INT_TX_ALL: Mask to identify all TX related interrupts * + * @AR5K_INT_RX: mask to identify received frame interrupts, of type + * AR5K_ISR_RXOK or AR5K_ISR_RXERR + * @AR5K_INT_RXDESC: Request RX descriptor/Read RX descriptor (?) + * @AR5K_INT_RXNOFRM: No frame received (?) + * @AR5K_INT_RXEOL: received End Of List for VEOL (Virtual End Of List). The + * Queue Control Unit (QCU) signals an EOL interrupt only if a descriptor's + * LinkPtr is NULL. For more details, refer to: + * http://www.freepatentsonline.com/20030225739.html + * @AR5K_INT_RXORN: Indicates we got RX overrun (eg. no more descriptors). + * Note that Rx overrun is not always fatal, on some chips we can continue + * operation without resetting the card, that's why int_fatal is not + * common for all chips. + * @AR5K_INT_TX: mask to identify received frame interrupts, of type + * AR5K_ISR_TXOK or AR5K_ISR_TXERR + * @AR5K_INT_TXDESC: Request TX descriptor/Read TX status descriptor (?) + * @AR5K_INT_TXURN: received when we should increase the TX trigger threshold + * We currently do increments on interrupt by + * (AR5K_TUNE_MAX_TX_FIFO_THRES - current_trigger_level) / 2 * @AR5K_INT_MIB: Indicates the either Management Information Base counters or - * one of the PHY error counters reached the maximum value and - * should be read and cleared. - * @AR5K_INT_SWI: Software triggered interrupt. + * one of the PHY error counters reached the maximum value and should be + * read and cleared. * @AR5K_INT_RXPHY: RX PHY Error * @AR5K_INT_RXKCM: RX Key cache miss * @AR5K_INT_SWBA: SoftWare Beacon Alert - indicates its time to send a - * beacon that must be handled in software. The alternative is if - * you have VEOL support, in that case you let the hardware deal - * with things. - * @AR5K_INT_BRSSI: Beacon received with an RSSI value below our threshold + * beacon that must be handled in software. The alternative is if you + * have VEOL support, in that case you let the hardware deal with things. * @AR5K_INT_BMISS: If in STA mode this indicates we have stopped seeing - * beacons from the AP have associated with, we should probably - * try to reassociate. When in IBSS mode this might mean we have - * not received any beacons from any local stations. Note that - * every station in an IBSS schedules to send beacons at the - * Target Beacon Transmission Time (TBTT) with a random backoff. - * @AR5K_INT_BNR: Beacon queue got triggered (DMA beacon alert) while empty. - * @AR5K_INT_TIM: Beacon with local station's TIM bit set - * @AR5K_INT_DTIM: Beacon with DTIM bit and zero DTIM count received - * @AR5K_INT_DTIM_SYNC: DTIM sync lost - * @AR5K_INT_GPIO: GPIO interrupt is used for RF Kill switches connected to - * our GPIO pins. - * @AR5K_INT_BCN_TIMEOUT: Beacon timeout, we waited after TBTT but got noting - * @AR5K_INT_CAB_TIMEOUT: We waited for CAB traffic after the beacon but got - * nothing or an incomplete CAB frame sequence. - * @AR5K_INT_QCBRORN: A queue got it's CBR counter expired - * @AR5K_INT_QCBRURN: A queue got triggered wile empty - * @AR5K_INT_QTRIG: A queue got triggered - * - * @AR5K_INT_FATAL: Fatal errors were encountered, typically caused by bus/DMA - * errors. Indicates we need to reset the card. + * beacons from the AP have associated with, we should probably try to + * reassociate. When in IBSS mode this might mean we have not received + * any beacons from any local stations. Note that every station in an + * IBSS schedules to send beacons at the Target Beacon Transmission Time + * (TBTT) with a random backoff. + * @AR5K_INT_BNR: Beacon Not Ready interrupt - ?? + * @AR5K_INT_GPIO: GPIO interrupt is used for RF Kill, disabled for now + * until properly handled + * @AR5K_INT_FATAL: Fatal errors were encountered, typically caused by DMA + * errors. These types of errors we can enable seem to be of type + * AR5K_SIMR2_MCABT, AR5K_SIMR2_SSERR and AR5K_SIMR2_DPERR. * @AR5K_INT_GLOBAL: Used to clear and set the IER - * @AR5K_INT_NOCARD: Signals the card has been removed - * @AR5K_INT_COMMON: Common interrupts shared among MACs with the same - * bit value + * @AR5K_INT_NOCARD: signals the card has been removed + * @AR5K_INT_COMMON: common interrupts shared among MACs with the same + * bit value * * These are mapped to take advantage of some common bits * between the MACs, to be able to set intr properties @@ -1030,15 +847,15 @@ enum ath5k_int { AR5K_INT_GPIO = 0x01000000, AR5K_INT_BCN_TIMEOUT = 0x02000000, /* Non common */ AR5K_INT_CAB_TIMEOUT = 0x04000000, /* Non common */ - AR5K_INT_QCBRORN = 0x08000000, /* Non common */ - AR5K_INT_QCBRURN = 0x10000000, /* Non common */ - AR5K_INT_QTRIG = 0x20000000, /* Non common */ + AR5K_INT_RX_DOPPLER = 0x08000000, /* Non common */ + AR5K_INT_QCBRORN = 0x10000000, /* Non common */ + AR5K_INT_QCBRURN = 0x20000000, /* Non common */ + AR5K_INT_QTRIG = 0x40000000, /* Non common */ AR5K_INT_GLOBAL = 0x80000000, AR5K_INT_TX_ALL = AR5K_INT_TXOK | AR5K_INT_TXDESC | AR5K_INT_TXERR - | AR5K_INT_TXNOFRM | AR5K_INT_TXEOL | AR5K_INT_TXURN, @@ -1074,32 +891,15 @@ enum ath5k_int { AR5K_INT_NOCARD = 0xffffffff }; -/** - * enum ath5k_calibration_mask - Mask which calibration is active at the moment - * @AR5K_CALIBRATION_FULL: Full calibration (AGC + SHORT) - * @AR5K_CALIBRATION_SHORT: Short calibration (NF + I/Q) - * @AR5K_CALIBRATION_NF: Noise Floor calibration - * @AR5K_CALIBRATION_ANI: Adaptive Noise Immunity - */ +/* mask which calibration is active at the moment */ enum ath5k_calibration_mask { AR5K_CALIBRATION_FULL = 0x01, AR5K_CALIBRATION_SHORT = 0x02, - AR5K_CALIBRATION_NF = 0x04, - AR5K_CALIBRATION_ANI = 0x08, + AR5K_CALIBRATION_ANI = 0x04, }; -/** - * enum ath5k_power_mode - Power management modes - * @AR5K_PM_UNDEFINED: Undefined - * @AR5K_PM_AUTO: Allow card to sleep if possible - * @AR5K_PM_AWAKE: Force card to wake up - * @AR5K_PM_FULL_SLEEP: Force card to full sleep (DANGEROUS) - * @AR5K_PM_NETWORK_SLEEP: Allow to sleep for a specified duration - * - * Currently only PM_AWAKE is used, FULL_SLEEP and NETWORK_SLEEP/AUTO - * are also known to have problems on some cards. This is not a big - * problem though because we can have almost the same effect as - * FULL_SLEEP by putting card on warm reset (it's almost powered down). +/* + * Power management */ enum ath5k_power_mode { AR5K_PM_UNDEFINED = 0, @@ -1157,8 +957,6 @@ struct ath5k_capabilities { } cap_queues; bool cap_has_phyerr_counters; - bool cap_has_mrr_support; - bool cap_needs_2GHz_ovr; }; /* size of noise floor history (keep it a power of two) */ @@ -1274,11 +1072,13 @@ struct ath5k_hw { dma_addr_t desc_daddr; /* DMA (physical) address */ size_t desc_len; /* size of TX/RX descriptors */ - DECLARE_BITMAP(status, 4); + DECLARE_BITMAP(status, 6); #define ATH_STAT_INVALID 0 /* disable hardware accesses */ -#define ATH_STAT_PROMISC 1 -#define ATH_STAT_LEDSOFT 2 /* enable LED gpio status */ -#define ATH_STAT_STARTED 3 /* opened & irqs enabled */ +#define ATH_STAT_MRRETRY 1 /* multi-rate retry support */ +#define ATH_STAT_PROMISC 2 +#define ATH_STAT_LEDSOFT 3 /* enable LED gpio status */ +#define ATH_STAT_STARTED 4 /* opened & irqs enabled */ +#define ATH_STAT_2G_DISABLED 5 /* multiband radio without 2G */ unsigned int filter_flags; /* HW flags, AR5K_RX_FILTER_* */ struct ieee80211_channel *curchan; /* current h/w channel */ @@ -1297,7 +1097,6 @@ struct ath5k_hw { led_on; /* pin setting for LED on */ struct work_struct reset_work; /* deferred chip reset */ - struct work_struct calib_work; /* deferred phy calibration */ struct list_head rxbuf; /* receive buffer */ spinlock_t rxbuflock; @@ -1314,6 +1113,8 @@ struct ath5k_hw { struct ath5k_rfkill rf_kill; + struct tasklet_struct calib; /* calibration tasklet */ + spinlock_t block; /* protects beacon */ struct tasklet_struct beacontq; /* beacon intr tasklet */ struct list_head bcbuf; /* beacon buffer */ @@ -1343,7 +1144,7 @@ struct ath5k_hw { enum ath5k_int ah_imr; struct ieee80211_channel *ah_current_channel; - bool ah_iq_cal_needed; + bool ah_calibration; bool ah_single_chip; enum ath5k_version ah_version; @@ -1386,13 +1187,7 @@ struct ath5k_hw { u32 ah_txq_imr_cbrurn; u32 ah_txq_imr_qtrig; u32 ah_txq_imr_nofrm; - - u32 ah_txq_isr_txok_all; - u32 ah_txq_isr_txurn; - u32 ah_txq_isr_qcborn; - u32 ah_txq_isr_qcburn; - u32 ah_txq_isr_qtrig; - + u32 ah_txq_isr; u32 *ah_rf_banks; size_t ah_rf_banks_size; size_t ah_rf_regs_count; @@ -1433,8 +1228,8 @@ struct ath5k_hw { /* Calibration timestamp */ unsigned long ah_cal_next_full; - unsigned long ah_cal_next_short; unsigned long ah_cal_next_ani; + unsigned long ah_cal_next_nf; /* Calibration mask */ u8 ah_cal_mask; @@ -1543,11 +1338,11 @@ void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah); u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah); void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64); void ath5k_hw_reset_tsf(struct ath5k_hw *ah); -void ath5k_hw_init_beacon_timers(struct ath5k_hw *ah, u32 next_beacon, - u32 interval); +void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval); bool ath5k_hw_check_beacon_timers(struct ath5k_hw *ah, int intval); /* Init function */ -void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode); +void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode, + u8 mode); /* Queue Control Unit, DFS Control Unit Functions */ int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue, diff --git a/trunk/drivers/net/wireless/ath/ath5k/attach.c b/trunk/drivers/net/wireless/ath/ath5k/attach.c index d7114c75fe9b..91627dd2c26a 100644 --- a/trunk/drivers/net/wireless/ath/ath5k/attach.c +++ b/trunk/drivers/net/wireless/ath/ath5k/attach.c @@ -27,7 +27,8 @@ #include "debug.h" /** - * ath5k_hw_post() - Power On Self Test helper function + * ath5k_hw_post - Power On Self Test helper function + * * @ah: The &struct ath5k_hw */ static int ath5k_hw_post(struct ath5k_hw *ah) @@ -91,7 +92,8 @@ static int ath5k_hw_post(struct ath5k_hw *ah) } /** - * ath5k_hw_init() - Check if hw is supported and init the needed structs + * ath5k_hw_init - Check if hw is supported and init the needed structs + * * @ah: The &struct ath5k_hw associated with the device * * Check if the device is supported, perform a POST and initialize the needed @@ -296,7 +298,7 @@ int ath5k_hw_init(struct ath5k_hw *ah) /* Reset SERDES to load new settings */ ath5k_hw_reg_write(ah, 0x00000000, AR5K_PCIE_SERDES_RESET); - usleep_range(1000, 1500); + mdelay(1); } /* Get misc capabilities */ @@ -306,6 +308,11 @@ int ath5k_hw_init(struct ath5k_hw *ah) goto err; } + if (test_bit(ATH_STAT_2G_DISABLED, ah->status)) { + __clear_bit(AR5K_MODE_11B, ah->ah_capabilities.cap_mode); + __clear_bit(AR5K_MODE_11G, ah->ah_capabilities.cap_mode); + } + /* Crypto settings */ common->keymax = (ah->ah_version == AR5K_AR5210 ? AR5K_KEYTABLE_SIZE_5210 : AR5K_KEYTABLE_SIZE_5211); @@ -342,7 +349,8 @@ int ath5k_hw_init(struct ath5k_hw *ah) } /** - * ath5k_hw_deinit() - Free the &struct ath5k_hw + * ath5k_hw_deinit - Free the ath5k_hw struct + * * @ah: The &struct ath5k_hw */ void ath5k_hw_deinit(struct ath5k_hw *ah) diff --git a/trunk/drivers/net/wireless/ath/ath5k/base.c b/trunk/drivers/net/wireless/ath/ath5k/base.c index d366dadcf86e..b346d0492001 100644 --- a/trunk/drivers/net/wireless/ath/ath5k/base.c +++ b/trunk/drivers/net/wireless/ath/ath5k/base.c @@ -68,23 +68,18 @@ #define CREATE_TRACE_POINTS #include "trace.h" -bool ath5k_modparam_nohwcrypt; +int ath5k_modparam_nohwcrypt; module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); -static bool modparam_all_channels; +static int modparam_all_channels; module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO); MODULE_PARM_DESC(all_channels, "Expose all channels the device can use."); -static bool modparam_fastchanswitch; +static int modparam_fastchanswitch; module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO); MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios."); -static int ath5k_modparam_no_hw_rfkill_switch; -module_param_named(no_hw_rfkill_switch, ath5k_modparam_no_hw_rfkill_switch, - bool, S_IRUGO); -MODULE_PARM_DESC(no_hw_rfkill_switch, "Ignore the GPIO RFKill switch state"); - /* Module info */ MODULE_AUTHOR("Jiri Slaby"); @@ -188,6 +183,7 @@ static const struct ieee80211_rate ath5k_rates[] = { { .bitrate = 540, .hw_value = ATH5K_RATE_CODE_54M, .flags = 0 }, + /* XR missing */ }; static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp) @@ -725,25 +721,22 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf, if (ret) goto err_unmap; - /* Set up MRR descriptor */ - if (ah->ah_capabilities.cap_has_mrr_support) { - memset(mrr_rate, 0, sizeof(mrr_rate)); - memset(mrr_tries, 0, sizeof(mrr_tries)); - for (i = 0; i < 3; i++) { - rate = ieee80211_get_alt_retry_rate(ah->hw, info, i); - if (!rate) - break; - - mrr_rate[i] = rate->hw_value; - mrr_tries[i] = info->control.rates[i + 1].count; - } + memset(mrr_rate, 0, sizeof(mrr_rate)); + memset(mrr_tries, 0, sizeof(mrr_tries)); + for (i = 0; i < 3; i++) { + rate = ieee80211_get_alt_retry_rate(ah->hw, info, i); + if (!rate) + break; - ath5k_hw_setup_mrr_tx_desc(ah, ds, - mrr_rate[0], mrr_tries[0], - mrr_rate[1], mrr_tries[1], - mrr_rate[2], mrr_tries[2]); + mrr_rate[i] = rate->hw_value; + mrr_tries[i] = info->control.rates[i + 1].count; } + ath5k_hw_setup_mrr_tx_desc(ah, ds, + mrr_rate[0], mrr_tries[0], + mrr_rate[1], mrr_tries[1], + mrr_rate[2], mrr_tries[2]); + ds->ds_link = 0; ds->ds_data = bf->skbaddr; @@ -1696,7 +1689,7 @@ ath5k_tasklet_tx(unsigned long data) struct ath5k_hw *ah = (void *)data; for (i = 0; i < AR5K_NUM_TX_QUEUES; i++) - if (ah->txqs[i].setup && (ah->ah_txq_isr_txok_all & BIT(i))) + if (ah->txqs[i].setup && (ah->ah_txq_isr & BIT(i))) ath5k_tx_processq(ah, &ah->txqs[i]); ah->tx_pending = false; @@ -2012,7 +2005,7 @@ ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf) ah->nexttbtt = nexttbtt; intval |= AR5K_BEACON_ENA; - ath5k_hw_init_beacon_timers(ah, nexttbtt, intval); + ath5k_hw_init_beacon(ah, nexttbtt, intval); /* * debugging output last in order to preserve the time critical aspect @@ -2119,29 +2112,16 @@ static void ath5k_intr_calibration_poll(struct ath5k_hw *ah) { if (time_is_before_eq_jiffies(ah->ah_cal_next_ani) && - !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) && - !(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) { - - /* Run ANI only when calibration is not active */ - + !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL)) { + /* run ANI only when full calibration is not active */ ah->ah_cal_next_ani = jiffies + msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI); tasklet_schedule(&ah->ani_tasklet); - } else if (time_is_before_eq_jiffies(ah->ah_cal_next_short) && - !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) && - !(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) { - - /* Run calibration only when another calibration - * is not running. - * - * Note: This is for both full/short calibration, - * if it's time for a full one, ath5k_calibrate_work will deal - * with it. */ - - ah->ah_cal_next_short = jiffies + - msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT); - ieee80211_queue_work(ah->hw, &ah->calib_work); + } else if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) { + ah->ah_cal_next_full = jiffies + + msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL); + tasklet_schedule(&ah->calib); } /* we could use SWI to generate enough interrupts to meet our * calibration interval requirements, if necessary: @@ -2169,110 +2149,69 @@ ath5k_intr(int irq, void *dev_id) enum ath5k_int status; unsigned int counter = 1000; - - /* - * If hw is not ready (or detached) and we get an - * interrupt, or if we have no interrupts pending - * (that means it's not for us) skip it. - * - * NOTE: Group 0/1 PCI interface registers are not - * supported on WiSOCs, so we can't check for pending - * interrupts (ISR belongs to another register group - * so we are ok). - */ if (unlikely(test_bit(ATH_STAT_INVALID, ah->status) || - ((ath5k_get_bus_type(ah) != ATH_AHB) && - !ath5k_hw_is_intr_pending(ah)))) + ((ath5k_get_bus_type(ah) != ATH_AHB) && + !ath5k_hw_is_intr_pending(ah)))) return IRQ_NONE; - /** Main loop **/ do { - ath5k_hw_get_isr(ah, &status); /* NB: clears IRQ too */ - + ath5k_hw_get_isr(ah, &status); /* NB: clears IRQ too */ ATH5K_DBG(ah, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n", status, ah->imask); - - /* - * Fatal hw error -> Log and reset - * - * Fatal errors are unrecoverable so we have to - * reset the card. These errors include bus and - * dma errors. - */ if (unlikely(status & AR5K_INT_FATAL)) { - + /* + * Fatal errors are unrecoverable. + * Typically these are caused by DMA errors. + */ ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "fatal int, resetting\n"); ieee80211_queue_work(ah->hw, &ah->reset_work); - - /* - * RX Overrun -> Count and reset if needed - * - * Receive buffers are full. Either the bus is busy or - * the CPU is not fast enough to process all received - * frames. - */ } else if (unlikely(status & AR5K_INT_RXORN)) { - /* + * Receive buffers are full. Either the bus is busy or + * the CPU is not fast enough to process all received + * frames. * Older chipsets need a reset to come out of this * condition, but we treat it as RX for newer chips. - * We don't know exactly which versions need a reset + * We don't know exactly which versions need a reset - * this guess is copied from the HAL. */ ah->stats.rxorn_intr++; - if (ah->ah_mac_srev < AR5K_SREV_AR5212) { ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "rx overrun, resetting\n"); ieee80211_queue_work(ah->hw, &ah->reset_work); } else ath5k_schedule_rx(ah); - } else { - - /* Software Beacon Alert -> Schedule beacon tasklet */ if (status & AR5K_INT_SWBA) tasklet_hi_schedule(&ah->beacontq); - /* - * No more RX descriptors -> Just count - * - * NB: the hardware should re-read the link when - * RXE bit is written, but it doesn't work at - * least on older hardware revs. - */ - if (status & AR5K_INT_RXEOL) + if (status & AR5K_INT_RXEOL) { + /* + * NB: the hardware should re-read the link when + * RXE bit is written, but it doesn't work at + * least on older hardware revs. + */ ah->stats.rxeol_intr++; - - - /* TX Underrun -> Bump tx trigger level */ - if (status & AR5K_INT_TXURN) + } + if (status & AR5K_INT_TXURN) { + /* bump tx trigger level */ ath5k_hw_update_tx_triglevel(ah, true); - - /* RX -> Schedule rx tasklet */ + } if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR)) ath5k_schedule_rx(ah); - - /* TX -> Schedule tx tasklet */ - if (status & (AR5K_INT_TXOK - | AR5K_INT_TXDESC - | AR5K_INT_TXERR - | AR5K_INT_TXEOL)) + if (status & (AR5K_INT_TXOK | AR5K_INT_TXDESC + | AR5K_INT_TXERR | AR5K_INT_TXEOL)) ath5k_schedule_tx(ah); - - /* Missed beacon -> TODO - if (status & AR5K_INT_BMISS) - */ - - /* MIB event -> Update counters and notify ANI */ + if (status & AR5K_INT_BMISS) { + /* TODO */ + } if (status & AR5K_INT_MIB) { ah->stats.mib_intr++; ath5k_hw_update_mib_counters(ah); ath5k_ani_mib_intr(ah); } - - /* GPIO -> Notify RFKill layer */ if (status & AR5K_INT_GPIO) tasklet_schedule(&ah->rf_kill.toggleq); @@ -2283,19 +2222,12 @@ ath5k_intr(int irq, void *dev_id) } while (ath5k_hw_is_intr_pending(ah) && --counter > 0); - /* - * Until we handle rx/tx interrupts mask them on IMR - * - * NOTE: ah->(rx/tx)_pending are set when scheduling the tasklets - * and unset after we 've handled the interrupts. - */ if (ah->rx_pending || ah->tx_pending) ath5k_set_current_imask(ah); if (unlikely(!counter)) ATH5K_WARN(ah, "too many interrupts, giving up for now\n"); - /* Fire up calibration poll */ ath5k_intr_calibration_poll(ah); return IRQ_HANDLED; @@ -2306,58 +2238,41 @@ ath5k_intr(int irq, void *dev_id) * for temperature/environment changes. */ static void -ath5k_calibrate_work(struct work_struct *work) +ath5k_tasklet_calibrate(unsigned long data) { - struct ath5k_hw *ah = container_of(work, struct ath5k_hw, - calib_work); - - /* Should we run a full calibration ? */ - if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) { - - ah->ah_cal_next_full = jiffies + - msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL); - ah->ah_cal_mask |= AR5K_CALIBRATION_FULL; - - ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE, - "running full calibration\n"); - - if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) { - /* - * Rfgain is out of bounds, reset the chip - * to load new gain values. - */ - ATH5K_DBG(ah, ATH5K_DEBUG_RESET, - "got new rfgain, resetting\n"); - ieee80211_queue_work(ah->hw, &ah->reset_work); - } - - /* TODO: On full calibration we should stop TX here, - * so that it doesn't interfere (mostly due to gain_f - * calibration that messes with tx packets -see phy.c). - * - * NOTE: Stopping the queues from above is not enough - * to stop TX but saves us from disconecting (at least - * we don't lose packets). */ - ieee80211_stop_queues(ah->hw); - } else - ah->ah_cal_mask |= AR5K_CALIBRATION_SHORT; + struct ath5k_hw *ah = (void *)data; + /* Only full calibration for now */ + ah->ah_cal_mask |= AR5K_CALIBRATION_FULL; ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n", ieee80211_frequency_to_channel(ah->curchan->center_freq), ah->curchan->hw_value); + if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) { + /* + * Rfgain is out of bounds, reset the chip + * to load new gain values. + */ + ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "calibration, resetting\n"); + ieee80211_queue_work(ah->hw, &ah->reset_work); + } if (ath5k_hw_phy_calibrate(ah, ah->curchan)) ATH5K_ERR(ah, "calibration of channel %u failed\n", ieee80211_frequency_to_channel( ah->curchan->center_freq)); - /* Clear calibration flags */ - if (ah->ah_cal_mask & AR5K_CALIBRATION_FULL) { - ieee80211_wake_queues(ah->hw); - ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL; - } else if (ah->ah_cal_mask & AR5K_CALIBRATION_SHORT) - ah->ah_cal_mask &= ~AR5K_CALIBRATION_SHORT; + /* Noise floor calibration interrupts rx/tx path while I/Q calibration + * doesn't. + * TODO: We should stop TX here, so that it doesn't interfere. + * Note that stopping the queues is not enough to stop TX! */ + if (time_is_before_eq_jiffies(ah->ah_cal_next_nf)) { + ah->ah_cal_next_nf = jiffies + + msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_NF); + ath5k_hw_update_noise_floor(ah); + } + + ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL; } @@ -2492,8 +2407,8 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops) if (ret) goto err_irq; - /* Set up multi-rate retry capabilities */ - if (ah->ah_capabilities.cap_has_mrr_support) { + /* set up multi-rate retry capabilities */ + if (ah->ah_version == AR5K_AR5212) { hw->max_rates = 4; hw->max_rate_tries = max(AR5K_INIT_RETRY_SHORT, AR5K_INIT_RETRY_LONG); @@ -2629,22 +2544,15 @@ int ath5k_start(struct ieee80211_hw *hw) * and then setup of the interrupt mask. */ ah->curchan = ah->hw->conf.channel; - ah->imask = AR5K_INT_RXOK - | AR5K_INT_RXERR - | AR5K_INT_RXEOL - | AR5K_INT_RXORN - | AR5K_INT_TXDESC - | AR5K_INT_TXEOL - | AR5K_INT_FATAL - | AR5K_INT_GLOBAL - | AR5K_INT_MIB; + ah->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL | + AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL | + AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB; ret = ath5k_reset(ah, NULL, false); if (ret) goto done; - if (!ath5k_modparam_no_hw_rfkill_switch) - ath5k_rfkill_hw_start(ah); + ath5k_rfkill_hw_start(ah); /* * Reset the key cache since some parts do not reset the @@ -2677,6 +2585,7 @@ static void ath5k_stop_tasklets(struct ath5k_hw *ah) ah->tx_pending = false; tasklet_kill(&ah->rxtq); tasklet_kill(&ah->txtq); + tasklet_kill(&ah->calib); tasklet_kill(&ah->beacontq); tasklet_kill(&ah->ani_tasklet); } @@ -2728,8 +2637,7 @@ void ath5k_stop(struct ieee80211_hw *hw) cancel_delayed_work_sync(&ah->tx_complete_work); - if (!ath5k_modparam_no_hw_rfkill_switch) - ath5k_rfkill_hw_stop(ah); + ath5k_rfkill_hw_stop(ah); } /* @@ -2781,24 +2689,9 @@ ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan, ath5k_ani_init(ah, ani_mode); - /* - * Set calibration intervals - * - * Note: We don't need to run calibration imediately - * since some initial calibration is done on reset - * even for fast channel switching. Also on scanning - * this will get set again and again and it won't get - * executed unless we connect somewhere and spend some - * time on the channel (that's what calibration needs - * anyway to be accurate). - */ - ah->ah_cal_next_full = jiffies + - msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL); - ah->ah_cal_next_ani = jiffies + - msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI); - ah->ah_cal_next_short = jiffies + - msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT); - + ah->ah_cal_next_full = jiffies + msecs_to_jiffies(100); + ah->ah_cal_next_ani = jiffies; + ah->ah_cal_next_nf = jiffies; ewma_init(&ah->ah_beacon_rssi_avg, 1024, 8); /* clear survey data and cycle counters */ @@ -2851,6 +2744,20 @@ ath5k_init(struct ieee80211_hw *hw) int ret; + /* + * Check if the MAC has multi-rate retry support. + * We do this by trying to setup a fake extended + * descriptor. MACs that don't have support will + * return false w/o doing anything. MACs that do + * support it will return true w/o doing anything. + */ + ret = ath5k_hw_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0); + + if (ret < 0) + goto err; + if (ret > 0) + __set_bit(ATH_STAT_MRRETRY, ah->status); + /* * Collect the channel list. The 802.11 layer * is responsible for filtering this list based @@ -2934,11 +2841,11 @@ ath5k_init(struct ieee80211_hw *hw) tasklet_init(&ah->rxtq, ath5k_tasklet_rx, (unsigned long)ah); tasklet_init(&ah->txtq, ath5k_tasklet_tx, (unsigned long)ah); + tasklet_init(&ah->calib, ath5k_tasklet_calibrate, (unsigned long)ah); tasklet_init(&ah->beacontq, ath5k_tasklet_beacon, (unsigned long)ah); tasklet_init(&ah->ani_tasklet, ath5k_tasklet_ani, (unsigned long)ah); INIT_WORK(&ah->reset_work, ath5k_reset_work); - INIT_WORK(&ah->calib_work, ath5k_calibrate_work); INIT_DELAYED_WORK(&ah->tx_complete_work, ath5k_tx_complete_poll_work); ret = ath5k_hw_common(ah)->bus_ops->eeprom_read_mac(ah, mac); diff --git a/trunk/drivers/net/wireless/ath/ath5k/caps.c b/trunk/drivers/net/wireless/ath/ath5k/caps.c index 994169ad39cb..810fba96702b 100644 --- a/trunk/drivers/net/wireless/ath/ath5k/caps.c +++ b/trunk/drivers/net/wireless/ath/ath5k/caps.c @@ -85,19 +85,12 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah) caps->cap_range.range_2ghz_min = 2412; caps->cap_range.range_2ghz_max = 2732; - /* Override 2GHz modes on SoCs that need it - * NOTE: cap_needs_2GHz_ovr gets set from - * ath_ahb_probe */ - if (!caps->cap_needs_2GHz_ovr) { - if (AR5K_EEPROM_HDR_11B(ee_header)) - __set_bit(AR5K_MODE_11B, - caps->cap_mode); - - if (AR5K_EEPROM_HDR_11G(ee_header) && - ah->ah_version != AR5K_AR5211) - __set_bit(AR5K_MODE_11G, - caps->cap_mode); - } + if (AR5K_EEPROM_HDR_11B(ee_header)) + __set_bit(AR5K_MODE_11B, caps->cap_mode); + + if (AR5K_EEPROM_HDR_11G(ee_header) && + ah->ah_version != AR5K_AR5211) + __set_bit(AR5K_MODE_11G, caps->cap_mode); } } @@ -110,18 +103,12 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah) else caps->cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES; - /* Newer hardware has PHY error counters */ + /* newer hardware has PHY error counters */ if (ah->ah_mac_srev >= AR5K_SREV_AR5213A) caps->cap_has_phyerr_counters = true; else caps->cap_has_phyerr_counters = false; - /* MACs since AR5212 have MRR support */ - if (ah->ah_version == AR5K_AR5212) - caps->cap_has_mrr_support = true; - else - caps->cap_has_mrr_support = false; - return 0; } diff --git a/trunk/drivers/net/wireless/ath/ath5k/desc.c b/trunk/drivers/net/wireless/ath/ath5k/desc.c index f8bfa3ac2af0..7e88dda82221 100644 --- a/trunk/drivers/net/wireless/ath/ath5k/desc.c +++ b/trunk/drivers/net/wireless/ath/ath5k/desc.c @@ -26,61 +26,20 @@ #include "debug.h" -/** - * DOC: Hardware descriptor functions - * - * Here we handle the processing of the low-level hw descriptors - * that hw reads and writes via DMA for each TX and RX attempt (that means - * we can also have descriptors for failed TX/RX tries). We have two kind of - * descriptors for RX and TX, control descriptors tell the hw how to send or - * receive a packet where to read/write it from/to etc and status descriptors - * that contain information about how the packet was sent or received (errors - * included). - * - * Descriptor format is not exactly the same for each MAC chip version so we - * have function pointers on &struct ath5k_hw we initialize at runtime based on - * the chip used. - */ - - /************************\ * TX Control descriptors * \************************/ -/** - * ath5k_hw_setup_2word_tx_desc() - Initialize a 2-word tx control descriptor - * @ah: The &struct ath5k_hw - * @desc: The &struct ath5k_desc - * @pkt_len: Frame length in bytes - * @hdr_len: Header length in bytes (only used on AR5210) - * @padsize: Any padding we've added to the frame length - * @type: One of enum ath5k_pkt_type - * @tx_power: Tx power in 0.5dB steps - * @tx_rate0: HW idx for transmission rate - * @tx_tries0: Max number of retransmissions - * @key_index: Index on key table to use for encryption - * @antenna_mode: Which antenna to use (0 for auto) - * @flags: One of AR5K_TXDESC_* flags (desc.h) - * @rtscts_rate: HW idx for RTS/CTS transmission rate - * @rtscts_duration: What to put on duration field on the header of RTS/CTS - * - * Internal function to initialize a 2-Word TX control descriptor - * found on AR5210 and AR5211 MACs chips. - * - * Returns 0 on success or -EINVAL on false input +/* + * Initialize the 2-word tx control descriptor on 5210/5211 */ static int -ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, - struct ath5k_desc *desc, - unsigned int pkt_len, unsigned int hdr_len, - int padsize, - enum ath5k_pkt_type type, - unsigned int tx_power, - unsigned int tx_rate0, unsigned int tx_tries0, - unsigned int key_index, - unsigned int antenna_mode, - unsigned int flags, - unsigned int rtscts_rate, unsigned int rtscts_duration) +ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, + unsigned int pkt_len, unsigned int hdr_len, int padsize, + enum ath5k_pkt_type type, + unsigned int tx_power, unsigned int tx_rate0, unsigned int tx_tries0, + unsigned int key_index, unsigned int antenna_mode, unsigned int flags, + unsigned int rtscts_rate, unsigned int rtscts_duration) { u32 frame_type; struct ath5k_hw_2w_tx_ctl *tx_ctl; @@ -213,40 +172,17 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, return 0; } -/** - * ath5k_hw_setup_4word_tx_desc() - Initialize a 4-word tx control descriptor - * @ah: The &struct ath5k_hw - * @desc: The &struct ath5k_desc - * @pkt_len: Frame length in bytes - * @hdr_len: Header length in bytes (only used on AR5210) - * @padsize: Any padding we've added to the frame length - * @type: One of enum ath5k_pkt_type - * @tx_power: Tx power in 0.5dB steps - * @tx_rate0: HW idx for transmission rate - * @tx_tries0: Max number of retransmissions - * @key_index: Index on key table to use for encryption - * @antenna_mode: Which antenna to use (0 for auto) - * @flags: One of AR5K_TXDESC_* flags (desc.h) - * @rtscts_rate: HW idx for RTS/CTS transmission rate - * @rtscts_duration: What to put on duration field on the header of RTS/CTS - * - * Internal function to initialize a 4-Word TX control descriptor - * found on AR5212 and later MACs chips. - * - * Returns 0 on success or -EINVAL on false input +/* + * Initialize the 4-word tx control descriptor on 5212 */ -static int -ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah, - struct ath5k_desc *desc, - unsigned int pkt_len, unsigned int hdr_len, - int padsize, - enum ath5k_pkt_type type, - unsigned int tx_power, - unsigned int tx_rate0, unsigned int tx_tries0, - unsigned int key_index, - unsigned int antenna_mode, - unsigned int flags, - unsigned int rtscts_rate, unsigned int rtscts_duration) +static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah, + struct ath5k_desc *desc, unsigned int pkt_len, unsigned int hdr_len, + int padsize, + enum ath5k_pkt_type type, unsigned int tx_power, unsigned int tx_rate0, + unsigned int tx_tries0, unsigned int key_index, + unsigned int antenna_mode, unsigned int flags, + unsigned int rtscts_rate, + unsigned int rtscts_duration) { struct ath5k_hw_4w_tx_ctl *tx_ctl; unsigned int frame_len; @@ -356,29 +292,13 @@ ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah, return 0; } -/** - * ath5k_hw_setup_mrr_tx_desc() - Initialize an MRR tx control descriptor - * @ah: The &struct ath5k_hw - * @desc: The &struct ath5k_desc - * @tx_rate1: HW idx for rate used on transmission series 1 - * @tx_tries1: Max number of retransmissions for transmission series 1 - * @tx_rate2: HW idx for rate used on transmission series 2 - * @tx_tries2: Max number of retransmissions for transmission series 2 - * @tx_rate3: HW idx for rate used on transmission series 3 - * @tx_tries3: Max number of retransmissions for transmission series 3 - * - * Multi rate retry (MRR) tx control descriptors are available only on AR5212 - * MACs, they are part of the normal 4-word tx control descriptor (see above) - * but we handle them through a separate function for better abstraction. - * - * Returns 0 on success or -EINVAL on invalid input +/* + * Initialize a 4-word multi rate retry tx control descriptor on 5212 */ int -ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, - struct ath5k_desc *desc, - u_int tx_rate1, u_int tx_tries1, - u_int tx_rate2, u_int tx_tries2, - u_int tx_rate3, u_int tx_tries3) +ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, + unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2, + u_int tx_tries2, unsigned int tx_rate3, u_int tx_tries3) { struct ath5k_hw_4w_tx_ctl *tx_ctl; @@ -430,16 +350,11 @@ ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, * TX Status descriptors * \***********************/ -/** - * ath5k_hw_proc_2word_tx_status() - Process a tx status descriptor on 5210/1 - * @ah: The &struct ath5k_hw - * @desc: The &struct ath5k_desc - * @ts: The &struct ath5k_tx_status +/* + * Process the tx status descriptor on 5210/5211 */ -static int -ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah, - struct ath5k_desc *desc, - struct ath5k_tx_status *ts) +static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah, + struct ath5k_desc *desc, struct ath5k_tx_status *ts) { struct ath5k_hw_2w_tx_ctl *tx_ctl; struct ath5k_hw_tx_status *tx_status; @@ -484,16 +399,11 @@ ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah, return 0; } -/** - * ath5k_hw_proc_4word_tx_status() - Process a tx status descriptor on 5212 - * @ah: The &struct ath5k_hw - * @desc: The &struct ath5k_desc - * @ts: The &struct ath5k_tx_status +/* + * Process a tx status descriptor on 5212 */ -static int -ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah, - struct ath5k_desc *desc, - struct ath5k_tx_status *ts) +static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah, + struct ath5k_desc *desc, struct ath5k_tx_status *ts) { struct ath5k_hw_4w_tx_ctl *tx_ctl; struct ath5k_hw_tx_status *tx_status; @@ -550,17 +460,11 @@ ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah, * RX Descriptors * \****************/ -/** - * ath5k_hw_setup_rx_desc() - Initialize an rx control descriptor - * @ah: The &struct ath5k_hw - * @desc: The &struct ath5k_desc - * @size: RX buffer length in bytes - * @flags: One of AR5K_RXDESC_* flags +/* + * Initialize an rx control descriptor */ -int -ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, - struct ath5k_desc *desc, - u32 size, unsigned int flags) +int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, + u32 size, unsigned int flags) { struct ath5k_hw_rx_ctl *rx_ctl; @@ -587,22 +491,11 @@ ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, return 0; } -/** - * ath5k_hw_proc_5210_rx_status() - Process the rx status descriptor on 5210/1 - * @ah: The &struct ath5k_hw - * @desc: The &struct ath5k_desc - * @rs: The &struct ath5k_rx_status - * - * Internal function used to process an RX status descriptor - * on AR5210/5211 MAC. - * - * Returns 0 on success or -EINPROGRESS in case we haven't received the who;e - * frame yet. +/* + * Process the rx status descriptor on 5210/5211 */ -static int -ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah, - struct ath5k_desc *desc, - struct ath5k_rx_status *rs) +static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah, + struct ath5k_desc *desc, struct ath5k_rx_status *rs) { struct ath5k_hw_rx_status *rx_status; @@ -681,22 +574,12 @@ ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah, return 0; } -/** - * ath5k_hw_proc_5212_rx_status() - Process the rx status descriptor on 5212 - * @ah: The &struct ath5k_hw - * @desc: The &struct ath5k_desc - * @rs: The &struct ath5k_rx_status - * - * Internal function used to process an RX status descriptor - * on AR5212 and later MAC. - * - * Returns 0 on success or -EINPROGRESS in case we haven't received the who;e - * frame yet. +/* + * Process the rx status descriptor on 5212 */ -static int -ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah, - struct ath5k_desc *desc, - struct ath5k_rx_status *rs) +static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah, + struct ath5k_desc *desc, + struct ath5k_rx_status *rs) { struct ath5k_hw_rx_status *rx_status; u32 rxstat0, rxstat1; @@ -763,16 +646,10 @@ ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah, * Attach * \********/ -/** - * ath5k_hw_init_desc_functions() - Init function pointers inside ah - * @ah: The &struct ath5k_hw - * - * Maps the internal descriptor functions to the function pointers on ah, used - * from above. This is used as an abstraction layer to handle the various chips - * the same way. +/* + * Init function pointers inside ath5k_hw struct */ -int -ath5k_hw_init_desc_functions(struct ath5k_hw *ah) +int ath5k_hw_init_desc_functions(struct ath5k_hw *ah) { if (ah->ah_version == AR5K_AR5212) { ah->ah_setup_tx_desc = ath5k_hw_setup_4word_tx_desc; diff --git a/trunk/drivers/net/wireless/ath/ath5k/desc.h b/trunk/drivers/net/wireless/ath/ath5k/desc.h index 8d6c01a49ea3..cfd529b548f3 100644 --- a/trunk/drivers/net/wireless/ath/ath5k/desc.h +++ b/trunk/drivers/net/wireless/ath/ath5k/desc.h @@ -20,30 +20,25 @@ * RX/TX descriptor structures */ -/** - * struct ath5k_hw_rx_ctl - Common hardware RX control descriptor - * @rx_control_0: RX control word 0 - * @rx_control_1: RX control word 1 +/* + * Common hardware RX control descriptor */ struct ath5k_hw_rx_ctl { - u32 rx_control_0; - u32 rx_control_1; + u32 rx_control_0; /* RX control word 0 */ + u32 rx_control_1; /* RX control word 1 */ } __packed __aligned(4); /* RX control word 1 fields/flags */ #define AR5K_DESC_RX_CTL1_BUF_LEN 0x00000fff /* data buffer length */ #define AR5K_DESC_RX_CTL1_INTREQ 0x00002000 /* RX interrupt request */ -/** - * struct ath5k_hw_rx_status - Common hardware RX status descriptor - * @rx_status_0: RX status word 0 - * @rx_status_1: RX status word 1 - * +/* + * Common hardware RX status descriptor * 5210, 5211 and 5212 differ only in the fields and flags defined below */ struct ath5k_hw_rx_status { - u32 rx_status_0; - u32 rx_status_1; + u32 rx_status_0; /* RX status word 0 */ + u32 rx_status_1; /* RX status word 1 */ } __packed __aligned(4); /* 5210/5211 */ @@ -103,36 +98,17 @@ struct ath5k_hw_rx_status { /** * enum ath5k_phy_error_code - PHY Error codes - * @AR5K_RX_PHY_ERROR_UNDERRUN: Transmit underrun, [5210] No error - * @AR5K_RX_PHY_ERROR_TIMING: Timing error - * @AR5K_RX_PHY_ERROR_PARITY: Illegal parity - * @AR5K_RX_PHY_ERROR_RATE: Illegal rate - * @AR5K_RX_PHY_ERROR_LENGTH: Illegal length - * @AR5K_RX_PHY_ERROR_RADAR: Radar detect, [5210] 64 QAM rate - * @AR5K_RX_PHY_ERROR_SERVICE: Illegal service - * @AR5K_RX_PHY_ERROR_TOR: Transmit override receive - * @AR5K_RX_PHY_ERROR_OFDM_TIMING: OFDM Timing error [5212+] - * @AR5K_RX_PHY_ERROR_OFDM_SIGNAL_PARITY: OFDM Signal parity error [5212+] - * @AR5K_RX_PHY_ERROR_OFDM_RATE_ILLEGAL: OFDM Illegal rate [5212+] - * @AR5K_RX_PHY_ERROR_OFDM_LENGTH_ILLEGAL: OFDM Illegal length [5212+] - * @AR5K_RX_PHY_ERROR_OFDM_POWER_DROP: OFDM Power drop [5212+] - * @AR5K_RX_PHY_ERROR_OFDM_SERVICE: OFDM Service (?) [5212+] - * @AR5K_RX_PHY_ERROR_OFDM_RESTART: OFDM Restart (?) [5212+] - * @AR5K_RX_PHY_ERROR_CCK_TIMING: CCK Timing error [5212+] - * @AR5K_RX_PHY_ERROR_CCK_HEADER_CRC: Header CRC error [5212+] - * @AR5K_RX_PHY_ERROR_CCK_RATE_ILLEGAL: Illegal rate [5212+] - * @AR5K_RX_PHY_ERROR_CCK_SERVICE: CCK Service (?) [5212+] - * @AR5K_RX_PHY_ERROR_CCK_RESTART: CCK Restart (?) [5212+] */ enum ath5k_phy_error_code { - AR5K_RX_PHY_ERROR_UNDERRUN = 0, - AR5K_RX_PHY_ERROR_TIMING = 1, - AR5K_RX_PHY_ERROR_PARITY = 2, - AR5K_RX_PHY_ERROR_RATE = 3, - AR5K_RX_PHY_ERROR_LENGTH = 4, - AR5K_RX_PHY_ERROR_RADAR = 5, - AR5K_RX_PHY_ERROR_SERVICE = 6, - AR5K_RX_PHY_ERROR_TOR = 7, + AR5K_RX_PHY_ERROR_UNDERRUN = 0, /* Transmit underrun, [5210] No error */ + AR5K_RX_PHY_ERROR_TIMING = 1, /* Timing error */ + AR5K_RX_PHY_ERROR_PARITY = 2, /* Illegal parity */ + AR5K_RX_PHY_ERROR_RATE = 3, /* Illegal rate */ + AR5K_RX_PHY_ERROR_LENGTH = 4, /* Illegal length */ + AR5K_RX_PHY_ERROR_RADAR = 5, /* Radar detect, [5210] 64 QAM rate */ + AR5K_RX_PHY_ERROR_SERVICE = 6, /* Illegal service */ + AR5K_RX_PHY_ERROR_TOR = 7, /* Transmit override receive */ + /* these are specific to the 5212 */ AR5K_RX_PHY_ERROR_OFDM_TIMING = 17, AR5K_RX_PHY_ERROR_OFDM_SIGNAL_PARITY = 18, AR5K_RX_PHY_ERROR_OFDM_RATE_ILLEGAL = 19, @@ -147,14 +123,12 @@ enum ath5k_phy_error_code { AR5K_RX_PHY_ERROR_CCK_RESTART = 31, }; -/** - * struct ath5k_hw_2w_tx_ctl - 5210/5211 hardware 2-word TX control descriptor - * @tx_control_0: TX control word 0 - * @tx_control_1: TX control word 1 +/* + * 5210/5211 hardware 2-word TX control descriptor */ struct ath5k_hw_2w_tx_ctl { - u32 tx_control_0; - u32 tx_control_1; + u32 tx_control_0; /* TX control word 0 */ + u32 tx_control_1; /* TX control word 1 */ } __packed __aligned(4); /* TX control word 0 fields/flags */ @@ -203,18 +177,14 @@ struct ath5k_hw_2w_tx_ctl { #define AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS 4 #define AR5K_AR5211_TX_DESC_FRAME_TYPE_PRESP 4 -/** - * struct ath5k_hw_4w_tx_ctl - 5212 hardware 4-word TX control descriptor - * @tx_control_0: TX control word 0 - * @tx_control_1: TX control word 1 - * @tx_control_2: TX control word 2 - * @tx_control_3: TX control word 3 +/* + * 5212 hardware 4-word TX control descriptor */ struct ath5k_hw_4w_tx_ctl { - u32 tx_control_0; - u32 tx_control_1; - u32 tx_control_2; - u32 tx_control_3; + u32 tx_control_0; /* TX control word 0 */ + u32 tx_control_1; /* TX control word 1 */ + u32 tx_control_2; /* TX control word 2 */ + u32 tx_control_3; /* TX control word 3 */ } __packed __aligned(4); /* TX control word 0 fields/flags */ @@ -268,14 +238,12 @@ struct ath5k_hw_4w_tx_ctl { #define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE 0x01f00000 /* RTS or CTS rate */ #define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE_S 20 -/** - * struct ath5k_hw_tx_status - Common TX status descriptor - * @tx_status_0: TX status word 0 - * @tx_status_1: TX status word 1 +/* + * Common TX status descriptor */ struct ath5k_hw_tx_status { - u32 tx_status_0; - u32 tx_status_1; + u32 tx_status_0; /* TX status word 0 */ + u32 tx_status_1; /* TX status word 1 */ } __packed __aligned(4); /* TX status word 0 fields/flags */ @@ -308,47 +276,37 @@ struct ath5k_hw_tx_status { #define AR5K_DESC_TX_STATUS1_COMP_SUCCESS_5212 0x00800000 /* [5212] compression status */ #define AR5K_DESC_TX_STATUS1_XMIT_ANTENNA_5212 0x01000000 /* [5212] transmit antenna */ -/** - * struct ath5k_hw_5210_tx_desc - 5210/5211 hardware TX descriptor - * @tx_ctl: The &struct ath5k_hw_2w_tx_ctl - * @tx_stat: The &struct ath5k_hw_tx_status +/* + * 5210/5211 hardware TX descriptor */ struct ath5k_hw_5210_tx_desc { struct ath5k_hw_2w_tx_ctl tx_ctl; struct ath5k_hw_tx_status tx_stat; } __packed __aligned(4); -/** - * struct ath5k_hw_5212_tx_desc - 5212 hardware TX descriptor - * @tx_ctl: The &struct ath5k_hw_4w_tx_ctl - * @tx_stat: The &struct ath5k_hw_tx_status +/* + * 5212 hardware TX descriptor */ struct ath5k_hw_5212_tx_desc { struct ath5k_hw_4w_tx_ctl tx_ctl; struct ath5k_hw_tx_status tx_stat; } __packed __aligned(4); -/** - * struct ath5k_hw_all_rx_desc - Common hardware RX descriptor - * @rx_ctl: The &struct ath5k_hw_rx_ctl - * @rx_stat: The &struct ath5k_hw_rx_status +/* + * Common hardware RX descriptor */ struct ath5k_hw_all_rx_desc { struct ath5k_hw_rx_ctl rx_ctl; struct ath5k_hw_rx_status rx_stat; } __packed __aligned(4); -/** - * struct ath5k_desc - Atheros hardware DMA descriptor - * @ds_link: Physical address of the next descriptor - * @ds_data: Physical address of data buffer (skb) - * @ud: Union containing hw_5xxx_tx_desc structs and hw_all_rx_desc - * +/* + * Atheros hardware DMA descriptor * This is read and written to by the hardware */ struct ath5k_desc { - u32 ds_link; - u32 ds_data; + u32 ds_link; /* physical address of the next descriptor */ + u32 ds_data; /* physical address of data buffer (skb) */ union { struct ath5k_hw_5210_tx_desc ds_tx5210; diff --git a/trunk/drivers/net/wireless/ath/ath5k/dma.c b/trunk/drivers/net/wireless/ath/ath5k/dma.c index 5cc9aa814697..2481f9c7f4b6 100644 --- a/trunk/drivers/net/wireless/ath/ath5k/dma.c +++ b/trunk/drivers/net/wireless/ath/ath5k/dma.c @@ -20,13 +20,16 @@ * DMA and interrupt masking functions * \*************************************/ -/** - * DOC: DMA and interrupt masking functions +/* + * dma.c - DMA and interrupt masking functions * * Here we setup descriptor pointers (rxdp/txdp) start/stop dma engine and * handle queue setup for 5210 chipset (rest are handled on qcu.c). * Also we setup interrupt mask register (IMR) and read the various interrupt * status registers (ISR). + * + * TODO: Handle SISR on 5211+ and introduce a function to return the queue + * number that resulted the interrupt. */ #include "ath5k.h" @@ -39,22 +42,22 @@ \*********/ /** - * ath5k_hw_start_rx_dma() - Start DMA receive + * ath5k_hw_start_rx_dma - Start DMA receive + * * @ah: The &struct ath5k_hw */ -void -ath5k_hw_start_rx_dma(struct ath5k_hw *ah) +void ath5k_hw_start_rx_dma(struct ath5k_hw *ah) { ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR); ath5k_hw_reg_read(ah, AR5K_CR); } /** - * ath5k_hw_stop_rx_dma() - Stop DMA receive + * ath5k_hw_stop_rx_dma - Stop DMA receive + * * @ah: The &struct ath5k_hw */ -static int -ath5k_hw_stop_rx_dma(struct ath5k_hw *ah) +static int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah) { unsigned int i; @@ -76,24 +79,24 @@ ath5k_hw_stop_rx_dma(struct ath5k_hw *ah) } /** - * ath5k_hw_get_rxdp() - Get RX Descriptor's address + * ath5k_hw_get_rxdp - Get RX Descriptor's address + * * @ah: The &struct ath5k_hw */ -u32 -ath5k_hw_get_rxdp(struct ath5k_hw *ah) +u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah) { return ath5k_hw_reg_read(ah, AR5K_RXDP); } /** - * ath5k_hw_set_rxdp() - Set RX Descriptor's address + * ath5k_hw_set_rxdp - Set RX Descriptor's address + * * @ah: The &struct ath5k_hw * @phys_addr: RX descriptor address * * Returns -EIO if rx is active */ -int -ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr) +int ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr) { if (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) { ATH5K_DBG(ah, ATH5K_DEBUG_DMA, @@ -111,7 +114,8 @@ ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr) \**********/ /** - * ath5k_hw_start_tx_dma() - Start DMA transmit for a specific queue + * ath5k_hw_start_tx_dma - Start DMA transmit for a specific queue + * * @ah: The &struct ath5k_hw * @queue: The hw queue number * @@ -124,8 +128,7 @@ ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr) * NOTE: Must be called after setting up tx control descriptor for that * queue (see below). */ -int -ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue) +int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue) { u32 tx_queue; @@ -174,16 +177,17 @@ ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue) } /** - * ath5k_hw_stop_tx_dma() - Stop DMA transmit on a specific queue + * ath5k_hw_stop_tx_dma - Stop DMA transmit on a specific queue + * * @ah: The &struct ath5k_hw * @queue: The hw queue number * * Stop DMA transmit on a specific hw queue and drain queue so we don't * have any pending frames. Returns -EBUSY if we still have pending frames, * -EINVAL if queue number is out of range or inactive. + * */ -static int -ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue) +static int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue) { unsigned int i = 40; u32 tx_queue, pending; @@ -316,14 +320,14 @@ ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue) } /** - * ath5k_hw_stop_beacon_queue() - Stop beacon queue - * @ah: The &struct ath5k_hw - * @queue: The queue number + * ath5k_hw_stop_beacon_queue - Stop beacon queue + * + * @ah The &struct ath5k_hw + * @queue The queue number * * Returns -EIO if queue didn't stop */ -int -ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue) +int ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue) { int ret; ret = ath5k_hw_stop_tx_dma(ah, queue); @@ -336,7 +340,8 @@ ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue) } /** - * ath5k_hw_get_txdp() - Get TX Descriptor's address for a specific queue + * ath5k_hw_get_txdp - Get TX Descriptor's address for a specific queue + * * @ah: The &struct ath5k_hw * @queue: The hw queue number * @@ -347,8 +352,7 @@ ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue) * * XXX: Is TXDP read and clear ? */ -u32 -ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue) +u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue) { u16 tx_reg; @@ -378,10 +382,10 @@ ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue) } /** - * ath5k_hw_set_txdp() - Set TX Descriptor's address for a specific queue + * ath5k_hw_set_txdp - Set TX Descriptor's address for a specific queue + * * @ah: The &struct ath5k_hw * @queue: The hw queue number - * @phys_addr: The physical address * * Set TX descriptor's address for a specific queue. For 5210 we ignore * the queue number and we use tx queue type since we only have 2 queues @@ -390,8 +394,7 @@ ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue) * Returns -EINVAL if queue type is invalid for 5210 and -EIO if queue is still * active. */ -int -ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr) +int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr) { u16 tx_reg; @@ -432,7 +435,8 @@ ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr) } /** - * ath5k_hw_update_tx_triglevel() - Update tx trigger level + * ath5k_hw_update_tx_triglevel - Update tx trigger level + * * @ah: The &struct ath5k_hw * @increase: Flag to force increase of trigger level * @@ -440,15 +444,15 @@ ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr) * buffer (aka FIFO threshold) that is used to indicate when PCU flushes * the buffer and transmits its data. Lowering this results sending small * frames more quickly but can lead to tx underruns, raising it a lot can - * result other problems. Right now we start with the lowest possible - * (64Bytes) and if we get tx underrun we increase it using the increase - * flag. Returns -EIO if we have reached maximum/minimum. + * result other problems (i think bmiss is related). Right now we start with + * the lowest possible (64Bytes) and if we get tx underrun we increase it using + * the increase flag. Returns -EIO if we have reached maximum/minimum. * * XXX: Link this with tx DMA size ? - * XXX2: Use it to save interrupts ? + * XXX: Use it to save interrupts ? + * TODO: Needs testing, i think it's related to bmiss... */ -int -ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase) +int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase) { u32 trigger_level, imr; int ret = -EIO; @@ -494,20 +498,21 @@ ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase) \*******************/ /** - * ath5k_hw_is_intr_pending() - Check if we have pending interrupts + * ath5k_hw_is_intr_pending - Check if we have pending interrupts + * * @ah: The &struct ath5k_hw * * Check if we have pending interrupts to process. Returns 1 if we * have pending interrupts and 0 if we haven't. */ -bool -ath5k_hw_is_intr_pending(struct ath5k_hw *ah) +bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah) { return ath5k_hw_reg_read(ah, AR5K_INTPEND) == 1 ? 1 : 0; } /** - * ath5k_hw_get_isr() - Get interrupt status + * ath5k_hw_get_isr - Get interrupt status + * * @ah: The @struct ath5k_hw * @interrupt_mask: Driver's interrupt mask used to filter out * interrupts in sw. @@ -518,162 +523,62 @@ ath5k_hw_is_intr_pending(struct ath5k_hw *ah) * being mapped on some standard non hw-specific positions * (check out &ath5k_int). * - * NOTE: We do write-to-clear, so the active PISR/SISR bits at the time this - * function gets called are cleared on return. + * NOTE: We use read-and-clear register, so after this function is called ISR + * is zeroed. */ -int -ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask) +int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask) { - u32 data = 0; + u32 data; /* - * Read interrupt status from Primary Interrupt - * Register. - * - * Note: PISR/SISR Not available on 5210 + * Read interrupt status from the Interrupt Status register + * on 5210 */ if (ah->ah_version == AR5K_AR5210) { - u32 isr = 0; - isr = ath5k_hw_reg_read(ah, AR5K_ISR); - if (unlikely(isr == AR5K_INT_NOCARD)) { - *interrupt_mask = isr; + data = ath5k_hw_reg_read(ah, AR5K_ISR); + if (unlikely(data == AR5K_INT_NOCARD)) { + *interrupt_mask = data; return -ENODEV; } - - /* - * Filter out the non-common bits from the interrupt - * status. - */ - *interrupt_mask = (isr & AR5K_INT_COMMON) & ah->ah_imr; - - /* Hanlde INT_FATAL */ - if (unlikely(isr & (AR5K_ISR_SSERR | AR5K_ISR_MCABT - | AR5K_ISR_DPERR))) - *interrupt_mask |= AR5K_INT_FATAL; - - /* - * XXX: BMISS interrupts may occur after association. - * I found this on 5210 code but it needs testing. If this is - * true we should disable them before assoc and re-enable them - * after a successful assoc + some jiffies. - interrupt_mask &= ~AR5K_INT_BMISS; - */ - - data = isr; } else { - u32 pisr = 0; - u32 pisr_clear = 0; - u32 sisr0 = 0; - u32 sisr1 = 0; - u32 sisr2 = 0; - u32 sisr3 = 0; - u32 sisr4 = 0; - - /* Read PISR and SISRs... */ - pisr = ath5k_hw_reg_read(ah, AR5K_PISR); - if (unlikely(pisr == AR5K_INT_NOCARD)) { - *interrupt_mask = pisr; - return -ENODEV; - } - - sisr0 = ath5k_hw_reg_read(ah, AR5K_SISR0); - sisr1 = ath5k_hw_reg_read(ah, AR5K_SISR1); - sisr2 = ath5k_hw_reg_read(ah, AR5K_SISR2); - sisr3 = ath5k_hw_reg_read(ah, AR5K_SISR3); - sisr4 = ath5k_hw_reg_read(ah, AR5K_SISR4); - /* - * PISR holds the logical OR of interrupt bits - * from SISR registers: - * - * TXOK and TXDESC -> Logical OR of TXOK and TXDESC - * per-queue bits on SISR0 - * - * TXERR and TXEOL -> Logical OR of TXERR and TXEOL - * per-queue bits on SISR1 - * - * TXURN -> Logical OR of TXURN per-queue bits on SISR2 + * Read interrupt status from Interrupt + * Status Register shadow copy (Read And Clear) * - * HIUERR -> Logical OR of MCABT, SSERR and DPER bits on SISR2 - * - * BCNMISC -> Logical OR of TIM, CAB_END, DTIM_SYNC - * BCN_TIMEOUT, CAB_TIMEOUT and DTIM - * (and TSFOOR ?) bits on SISR2 - * - * QCBRORN and QCBRURN -> Logical OR of QCBRORN and - * QCBRURN per-queue bits on SISR3 - * QTRIG -> Logical OR of QTRIG per-queue bits on SISR4 - * - * If we clean these bits on PISR we 'll also clear all - * related bits from SISRs, e.g. if we write the TXOK bit on - * PISR we 'll clean all TXOK bits from SISR0 so if a new TXOK - * interrupt got fired for another queue while we were reading - * the interrupt registers and we write back the TXOK bit on - * PISR we 'll lose it. So make sure that we don't write back - * on PISR any bits that come from SISRs. Clearing them from - * SISRs will also clear PISR so no need to worry here. - */ - - pisr_clear = pisr & ~AR5K_ISR_BITS_FROM_SISRS; - - /* - * Write to clear them... - * Note: This means that each bit we write back - * to the registers will get cleared, leaving the - * rest unaffected. So this won't affect new interrupts - * we didn't catch while reading/processing, we 'll get - * them next time get_isr gets called. - */ - ath5k_hw_reg_write(ah, sisr0, AR5K_SISR0); - ath5k_hw_reg_write(ah, sisr1, AR5K_SISR1); - ath5k_hw_reg_write(ah, sisr2, AR5K_SISR2); - ath5k_hw_reg_write(ah, sisr3, AR5K_SISR3); - ath5k_hw_reg_write(ah, sisr4, AR5K_SISR4); - ath5k_hw_reg_write(ah, pisr_clear, AR5K_PISR); - /* Flush previous write */ - ath5k_hw_reg_read(ah, AR5K_PISR); - - /* - * Filter out the non-common bits from the interrupt - * status. + * Note: PISR/SISR Not available on 5210 */ - *interrupt_mask = (pisr & AR5K_INT_COMMON) & ah->ah_imr; - - - /* We treat TXOK,TXDESC, TXERR and TXEOL - * the same way (schedule the tx tasklet) - * so we track them all together per queue */ - if (pisr & AR5K_ISR_TXOK) - ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0, - AR5K_SISR0_QCU_TXOK); + data = ath5k_hw_reg_read(ah, AR5K_RAC_PISR); + if (unlikely(data == AR5K_INT_NOCARD)) { + *interrupt_mask = data; + return -ENODEV; + } + } - if (pisr & AR5K_ISR_TXDESC) - ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0, - AR5K_SISR0_QCU_TXDESC); + /* + * Get abstract interrupt mask (driver-compatible) + */ + *interrupt_mask = (data & AR5K_INT_COMMON) & ah->ah_imr; - if (pisr & AR5K_ISR_TXERR) - ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1, - AR5K_SISR1_QCU_TXERR); + if (ah->ah_version != AR5K_AR5210) { + u32 sisr2 = ath5k_hw_reg_read(ah, AR5K_RAC_SISR2); - if (pisr & AR5K_ISR_TXEOL) - ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1, - AR5K_SISR1_QCU_TXEOL); + /*HIU = Host Interface Unit (PCI etc)*/ + if (unlikely(data & (AR5K_ISR_HIUERR))) + *interrupt_mask |= AR5K_INT_FATAL; - /* Currently this is not much usefull since we treat - * all queues the same way if we get a TXURN (update - * tx trigger level) but we might need it later on*/ - if (pisr & AR5K_ISR_TXURN) - ah->ah_txq_isr_txurn |= AR5K_REG_MS(sisr2, - AR5K_SISR2_QCU_TXURN); + /*Beacon Not Ready*/ + if (unlikely(data & (AR5K_ISR_BNR))) + *interrupt_mask |= AR5K_INT_BNR; - /* Misc Beacon related interrupts */ + if (unlikely(sisr2 & (AR5K_SISR2_SSERR | + AR5K_SISR2_DPERR | + AR5K_SISR2_MCABT))) + *interrupt_mask |= AR5K_INT_FATAL; - /* For AR5211 */ - if (pisr & AR5K_ISR_TIM) + if (data & AR5K_ISR_TIM) *interrupt_mask |= AR5K_INT_TIM; - /* For AR5212+ */ - if (pisr & AR5K_ISR_BCNMISC) { + if (data & AR5K_ISR_BCNMISC) { if (sisr2 & AR5K_SISR2_TIM) *interrupt_mask |= AR5K_INT_TIM; if (sisr2 & AR5K_SISR2_DTIM) @@ -686,39 +591,63 @@ ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask) *interrupt_mask |= AR5K_INT_CAB_TIMEOUT; } - /* Below interrupts are unlikely to happen */ - - /* HIU = Host Interface Unit (PCI etc) - * Can be one of MCABT, SSERR, DPERR from SISR2 */ - if (unlikely(pisr & (AR5K_ISR_HIUERR))) - *interrupt_mask |= AR5K_INT_FATAL; - - /*Beacon Not Ready*/ - if (unlikely(pisr & (AR5K_ISR_BNR))) - *interrupt_mask |= AR5K_INT_BNR; - - /* A queue got CBR overrun */ - if (unlikely(pisr & (AR5K_ISR_QCBRORN))) { + if (data & AR5K_ISR_RXDOPPLER) + *interrupt_mask |= AR5K_INT_RX_DOPPLER; + if (data & AR5K_ISR_QCBRORN) { *interrupt_mask |= AR5K_INT_QCBRORN; - ah->ah_txq_isr_qcborn |= AR5K_REG_MS(sisr3, - AR5K_SISR3_QCBRORN); + ah->ah_txq_isr |= AR5K_REG_MS( + ath5k_hw_reg_read(ah, AR5K_RAC_SISR3), + AR5K_SISR3_QCBRORN); } - - /* A queue got CBR underrun */ - if (unlikely(pisr & (AR5K_ISR_QCBRURN))) { + if (data & AR5K_ISR_QCBRURN) { *interrupt_mask |= AR5K_INT_QCBRURN; - ah->ah_txq_isr_qcburn |= AR5K_REG_MS(sisr3, - AR5K_SISR3_QCBRURN); + ah->ah_txq_isr |= AR5K_REG_MS( + ath5k_hw_reg_read(ah, AR5K_RAC_SISR3), + AR5K_SISR3_QCBRURN); } - - /* A queue got triggered */ - if (unlikely(pisr & (AR5K_ISR_QTRIG))) { + if (data & AR5K_ISR_QTRIG) { *interrupt_mask |= AR5K_INT_QTRIG; - ah->ah_txq_isr_qtrig |= AR5K_REG_MS(sisr4, - AR5K_SISR4_QTRIG); + ah->ah_txq_isr |= AR5K_REG_MS( + ath5k_hw_reg_read(ah, AR5K_RAC_SISR4), + AR5K_SISR4_QTRIG); } - data = pisr; + if (data & AR5K_ISR_TXOK) + ah->ah_txq_isr |= AR5K_REG_MS( + ath5k_hw_reg_read(ah, AR5K_RAC_SISR0), + AR5K_SISR0_QCU_TXOK); + + if (data & AR5K_ISR_TXDESC) + ah->ah_txq_isr |= AR5K_REG_MS( + ath5k_hw_reg_read(ah, AR5K_RAC_SISR0), + AR5K_SISR0_QCU_TXDESC); + + if (data & AR5K_ISR_TXERR) + ah->ah_txq_isr |= AR5K_REG_MS( + ath5k_hw_reg_read(ah, AR5K_RAC_SISR1), + AR5K_SISR1_QCU_TXERR); + + if (data & AR5K_ISR_TXEOL) + ah->ah_txq_isr |= AR5K_REG_MS( + ath5k_hw_reg_read(ah, AR5K_RAC_SISR1), + AR5K_SISR1_QCU_TXEOL); + + if (data & AR5K_ISR_TXURN) + ah->ah_txq_isr |= AR5K_REG_MS( + ath5k_hw_reg_read(ah, AR5K_RAC_SISR2), + AR5K_SISR2_QCU_TXURN); + } else { + if (unlikely(data & (AR5K_ISR_SSERR | AR5K_ISR_MCABT + | AR5K_ISR_HIUERR | AR5K_ISR_DPERR))) + *interrupt_mask |= AR5K_INT_FATAL; + + /* + * XXX: BMISS interrupts may occur after association. + * I found this on 5210 code but it needs testing. If this is + * true we should disable them before assoc and re-enable them + * after a successful assoc + some jiffies. + interrupt_mask &= ~AR5K_INT_BMISS; + */ } /* @@ -732,7 +661,8 @@ ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask) } /** - * ath5k_hw_set_imr() - Set interrupt mask + * ath5k_hw_set_imr - Set interrupt mask + * * @ah: The &struct ath5k_hw * @new_mask: The new interrupt mask to be set * @@ -740,8 +670,7 @@ ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask) * ath5k_int bits to hw-specific bits to remove abstraction and writing * Interrupt Mask Register. */ -enum ath5k_int -ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask) +enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask) { enum ath5k_int old_mask, int_mask; @@ -768,14 +697,16 @@ ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask) u32 simr2 = ath5k_hw_reg_read(ah, AR5K_SIMR2) & AR5K_SIMR2_QCU_TXURN; - /* Fatal interrupt abstraction for 5211+ */ if (new_mask & AR5K_INT_FATAL) { int_mask |= AR5K_IMR_HIUERR; simr2 |= (AR5K_SIMR2_MCABT | AR5K_SIMR2_SSERR | AR5K_SIMR2_DPERR); } - /* Misc beacon related interrupts */ + /*Beacon Not Ready*/ + if (new_mask & AR5K_INT_BNR) + int_mask |= AR5K_INT_BNR; + if (new_mask & AR5K_INT_TIM) int_mask |= AR5K_IMR_TIM; @@ -790,9 +721,8 @@ ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask) if (new_mask & AR5K_INT_CAB_TIMEOUT) simr2 |= AR5K_SISR2_CAB_TIMEOUT; - /*Beacon Not Ready*/ - if (new_mask & AR5K_INT_BNR) - int_mask |= AR5K_INT_BNR; + if (new_mask & AR5K_INT_RX_DOPPLER) + int_mask |= AR5K_IMR_RXDOPPLER; /* Note: Per queue interrupt masks * are set via ath5k_hw_reset_tx_queue() (qcu.c) */ @@ -800,12 +730,10 @@ ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask) ath5k_hw_reg_write(ah, simr2, AR5K_SIMR2); } else { - /* Fatal interrupt abstraction for 5210 */ if (new_mask & AR5K_INT_FATAL) int_mask |= (AR5K_IMR_SSERR | AR5K_IMR_MCABT | AR5K_IMR_HIUERR | AR5K_IMR_DPERR); - /* Only common interrupts left for 5210 (no SIMRs) */ ath5k_hw_reg_write(ah, int_mask, AR5K_IMR); } @@ -832,7 +760,8 @@ ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask) \********************/ /** - * ath5k_hw_dma_init() - Initialize DMA unit + * ath5k_hw_dma_init - Initialize DMA unit + * * @ah: The &struct ath5k_hw * * Set DMA size and pre-enable interrupts @@ -841,8 +770,7 @@ ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask) * * XXX: Save/restore RXDP/TXDP registers ? */ -void -ath5k_hw_dma_init(struct ath5k_hw *ah) +void ath5k_hw_dma_init(struct ath5k_hw *ah) { /* * Set Rx/Tx DMA Configuration @@ -871,7 +799,8 @@ ath5k_hw_dma_init(struct ath5k_hw *ah) } /** - * ath5k_hw_dma_stop() - stop DMA unit + * ath5k_hw_dma_stop - stop DMA unit + * * @ah: The &struct ath5k_hw * * Stop tx/rx DMA and interrupts. Returns @@ -881,8 +810,7 @@ ath5k_hw_dma_init(struct ath5k_hw *ah) * stuck frames on tx queues, only a reset * can fix that. */ -int -ath5k_hw_dma_stop(struct ath5k_hw *ah) +int ath5k_hw_dma_stop(struct ath5k_hw *ah) { int i, qmax, err; err = 0; diff --git a/trunk/drivers/net/wireless/ath/ath5k/gpio.c b/trunk/drivers/net/wireless/ath/ath5k/gpio.c index 73d3dd8a306a..859297811914 100644 --- a/trunk/drivers/net/wireless/ath/ath5k/gpio.c +++ b/trunk/drivers/net/wireless/ath/ath5k/gpio.c @@ -24,33 +24,10 @@ #include "reg.h" #include "debug.h" - -/** - * DOC: GPIO/LED functions - * - * Here we control the 6 bidirectional GPIO pins provided by the hw. - * We can set a GPIO pin to be an input or an output pin on GPIO control - * register and then read or set its status from GPIO data input/output - * registers. - * - * We also control the two LED pins provided by the hw, LED_0 is our - * "power" LED and LED_1 is our "network activity" LED but many scenarios - * are available from hw. Vendors might also provide LEDs connected to the - * GPIO pins, we handle them through the LED subsystem on led.c - */ - - -/** - * ath5k_hw_set_ledstate() - Set led state - * @ah: The &struct ath5k_hw - * @state: One of AR5K_LED_* - * - * Used to set the LED blinking state. This only - * works for the LED connected to the LED_0, LED_1 pins, - * not the GPIO based. +/* + * Set led state */ -void -ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state) +void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state) { u32 led; /*5210 has different led mode handling*/ @@ -97,13 +74,10 @@ ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state) AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, led_5210); } -/** - * ath5k_hw_set_gpio_input() - Set GPIO inputs - * @ah: The &struct ath5k_hw - * @gpio: GPIO pin to set as input +/* + * Set GPIO inputs */ -int -ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio) +int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio) { if (gpio >= AR5K_NUM_GPIO) return -EINVAL; @@ -115,13 +89,10 @@ ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio) return 0; } -/** - * ath5k_hw_set_gpio_output() - Set GPIO outputs - * @ah: The &struct ath5k_hw - * @gpio: The GPIO pin to set as output +/* + * Set GPIO outputs */ -int -ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio) +int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio) { if (gpio >= AR5K_NUM_GPIO) return -EINVAL; @@ -133,13 +104,10 @@ ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio) return 0; } -/** - * ath5k_hw_get_gpio() - Get GPIO state - * @ah: The &struct ath5k_hw - * @gpio: The GPIO pin to read +/* + * Get GPIO state */ -u32 -ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio) +u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio) { if (gpio >= AR5K_NUM_GPIO) return 0xffffffff; @@ -149,14 +117,10 @@ ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio) 0x1; } -/** - * ath5k_hw_set_gpio() - Set GPIO state - * @ah: The &struct ath5k_hw - * @gpio: The GPIO pin to set - * @val: Value to set (boolean) +/* + * Set GPIO state */ -int -ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val) +int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val) { u32 data; @@ -174,19 +138,10 @@ ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val) return 0; } -/** - * ath5k_hw_set_gpio_intr() - Initialize the GPIO interrupt (RFKill switch) - * @ah: The &struct ath5k_hw - * @gpio: The GPIO pin to use - * @interrupt_level: True to generate interrupt on active pin (high) - * - * This function is used to set up the GPIO interrupt for the hw RFKill switch. - * That switch is connected to a GPIO pin and it's number is stored on EEPROM. - * It can either open or close the circuit to indicate that we should disable - * RF/Wireless to save power (we also get that from EEPROM). +/* + * Initialize the GPIO interrupt (RFKill switch) */ -void -ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio, +void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio, u32 interrupt_level) { u32 data; diff --git a/trunk/drivers/net/wireless/ath/ath5k/initvals.c b/trunk/drivers/net/wireless/ath/ath5k/initvals.c index a1ea78e05b47..1ffecc0fd3ed 100644 --- a/trunk/drivers/net/wireless/ath/ath5k/initvals.c +++ b/trunk/drivers/net/wireless/ath/ath5k/initvals.c @@ -23,27 +23,24 @@ #include "reg.h" #include "debug.h" -/** - * struct ath5k_ini - Mode-independent initial register writes - * @ini_register: Register address - * @ini_value: Default value - * @ini_mode: 0 to write 1 to read (and clear) +/* + * Mode-independent initial register writes */ + struct ath5k_ini { u16 ini_register; u32 ini_value; enum { AR5K_INI_WRITE = 0, /* Default */ - AR5K_INI_READ = 1, + AR5K_INI_READ = 1, /* Cleared on read */ } ini_mode; }; -/** - * struct ath5k_ini_mode - Mode specific initial register values - * @mode_register: Register address - * @mode_value: Set of values for each enum ath5k_driver_mode +/* + * Mode specific initial register values */ + struct ath5k_ini_mode { u16 mode_register; u32 mode_value[3]; @@ -389,10 +386,11 @@ static const struct ath5k_ini ar5211_ini[] = { /* Initial mode-specific settings for AR5211 * 5211 supports OFDM-only g (draft g) but we - * need to test it ! */ + * need to test it ! + */ static const struct ath5k_ini_mode ar5211_ini_mode[] = { { AR5K_TXCFG, - /* A B G */ + /* A/XR B G */ { 0x00000015, 0x0000001d, 0x00000015 } }, { AR5K_QUEUE_DFS_LOCAL_IFS(0), { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, @@ -462,7 +460,7 @@ static const struct ath5k_ini_mode ar5211_ini_mode[] = { { 0x00000010, 0x00000010, 0x00000010 } }, }; -/* Initial register settings for AR5212 and newer chips */ +/* Initial register settings for AR5212 */ static const struct ath5k_ini ar5212_ini_common_start[] = { { AR5K_RXDP, 0x00000000 }, { AR5K_RXCFG, 0x00000005 }, @@ -726,8 +724,7 @@ static const struct ath5k_ini_mode ar5212_ini_mode_start[] = { { 0x00000000, 0x00000000, 0x00000108 } }, }; -/* Initial mode-specific settings for AR5212 + RF5111 - * (Written after ar5212_ini) */ +/* Initial mode-specific settings for AR5212 + RF5111 (Written after ar5212_ini) */ static const struct ath5k_ini_mode rf5111_ini_mode_end[] = { { AR5K_TXCFG, /* A/XR B G */ @@ -760,7 +757,6 @@ static const struct ath5k_ini_mode rf5111_ini_mode_end[] = { { 0x1883800a, 0x1873800a, 0x1883800a } }, }; -/* Common for all modes */ static const struct ath5k_ini rf5111_ini_common_end[] = { { AR5K_DCU_FP, 0x00000000 }, { AR5K_PHY_AGC, 0x00000000 }, @@ -778,9 +774,7 @@ static const struct ath5k_ini rf5111_ini_common_end[] = { { 0xa23c, 0x13c889af }, }; - -/* Initial mode-specific settings for AR5212 + RF5112 - * (Written after ar5212_ini) */ +/* Initial mode-specific settings for AR5212 + RF5112 (Written after ar5212_ini) */ static const struct ath5k_ini_mode rf5112_ini_mode_end[] = { { AR5K_TXCFG, /* A/XR B G */ @@ -831,9 +825,7 @@ static const struct ath5k_ini rf5112_ini_common_end[] = { { 0xa23c, 0x13c889af }, }; - -/* Initial mode-specific settings for RF5413/5414 - * (Written after ar5212_ini) */ +/* Initial mode-specific settings for RF5413/5414 (Written after ar5212_ini) */ static const struct ath5k_ini_mode rf5413_ini_mode_end[] = { { AR5K_TXCFG, /* A/XR B G */ @@ -971,8 +963,7 @@ static const struct ath5k_ini rf5413_ini_common_end[] = { { 0xa384, 0xf3307ff0 }, }; -/* Initial mode-specific settings for RF2413/2414 - * (Written after ar5212_ini) */ +/* Initial mode-specific settings for RF2413/2414 (Written after ar5212_ini) */ /* XXX: a mode ? */ static const struct ath5k_ini_mode rf2413_ini_mode_end[] = { { AR5K_TXCFG, @@ -1094,8 +1085,7 @@ static const struct ath5k_ini rf2413_ini_common_end[] = { { 0xa384, 0xf3307ff0 }, }; -/* Initial mode-specific settings for RF2425 - * (Written after ar5212_ini) */ +/* Initial mode-specific settings for RF2425 (Written after ar5212_ini) */ /* XXX: a mode ? */ static const struct ath5k_ini_mode rf2425_ini_mode_end[] = { { AR5K_TXCFG, @@ -1367,15 +1357,10 @@ static const struct ath5k_ini rf5112_ini_bbgain[] = { }; -/** - * ath5k_hw_ini_registers() - Write initial register dump common for all modes - * @ah: The &struct ath5k_hw - * @size: Dump size - * @ini_regs: The array of &struct ath5k_ini - * @skip_pcu: Skip PCU registers +/* + * Write initial register dump */ -static void -ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size, +static void ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size, const struct ath5k_ini *ini_regs, bool skip_pcu) { unsigned int i; @@ -1403,15 +1388,7 @@ ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size, } } -/** - * ath5k_hw_ini_mode_registers() - Write initial mode-specific register dump - * @ah: The &struct ath5k_hw - * @size: Dump size - * @ini_mode: The array of &struct ath5k_ini_mode - * @mode: One of enum ath5k_driver_mode - */ -static void -ath5k_hw_ini_mode_registers(struct ath5k_hw *ah, +static void ath5k_hw_ini_mode_registers(struct ath5k_hw *ah, unsigned int size, const struct ath5k_ini_mode *ini_mode, u8 mode) { @@ -1425,17 +1402,7 @@ ath5k_hw_ini_mode_registers(struct ath5k_hw *ah, } -/** - * ath5k_hw_write_initvals() - Write initial chip-specific register dump - * @ah: The &struct ath5k_hw - * @mode: One of enum ath5k_driver_mode - * @skip_pcu: Skip PCU registers - * - * Write initial chip-specific register dump, to get the chipset on a - * clean and ready-to-work state after warm reset. - */ -int -ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool skip_pcu) +int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool skip_pcu) { /* * Write initial register settings diff --git a/trunk/drivers/net/wireless/ath/ath5k/pci.c b/trunk/drivers/net/wireless/ath/ath5k/pci.c index 849fa060ebc4..dfa48eb7d953 100644 --- a/trunk/drivers/net/wireless/ath/ath5k/pci.c +++ b/trunk/drivers/net/wireless/ath/ath5k/pci.c @@ -98,7 +98,7 @@ ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data) 0xffff); return true; } - usleep_range(15, 20); + udelay(15); } return false; diff --git a/trunk/drivers/net/wireless/ath/ath5k/pcu.c b/trunk/drivers/net/wireless/ath/ath5k/pcu.c index cebfd6fd31d3..a7eafa3edc21 100644 --- a/trunk/drivers/net/wireless/ath/ath5k/pcu.c +++ b/trunk/drivers/net/wireless/ath/ath5k/pcu.c @@ -30,47 +30,11 @@ #include "reg.h" #include "debug.h" -/** - * DOC: Protocol Control Unit (PCU) functions - * - * Protocol control unit is responsible to maintain various protocol - * properties before a frame is send and after a frame is received to/from - * baseband. To be more specific, PCU handles: - * - * - Buffering of RX and TX frames (after QCU/DCUs) - * - * - Encrypting and decrypting (using the built-in engine) - * - * - Generating ACKs, RTS/CTS frames - * - * - Maintaining TSF - * - * - FCS - * - * - Updating beacon data (with TSF etc) - * - * - Generating virtual CCA - * - * - RX/Multicast filtering - * - * - BSSID filtering - * - * - Various statistics - * - * -Different operating modes: AP, STA, IBSS - * - * Note: Most of these functions can be tweaked/bypassed so you can do - * them on sw above for debugging or research. For more infos check out PCU - * registers on reg.h. - */ - -/** - * DOC: ACK rates - * +/* * AR5212+ can use higher rates for ack transmission * based on current tx rate instead of the base rate. * It does this to better utilize channel usage. - * There is a mapping between G rates (that cover both + * This is a mapping between G rates (that cover both * CCK and OFDM) and ack rates that we use when setting * rate -> duration table. This mapping is hw-based so * don't change anything. @@ -99,18 +63,17 @@ static const unsigned int ack_rates_high[] = \*******************/ /** - * ath5k_hw_get_frame_duration() - Get tx time of a frame + * ath5k_hw_get_frame_duration - Get tx time of a frame + * * @ah: The &struct ath5k_hw * @len: Frame's length in bytes * @rate: The @struct ieee80211_rate - * @shortpre: Indicate short preample * * Calculate tx duration of a frame given it's rate and length * It extends ieee80211_generic_frame_duration for non standard * bwmodes. */ -int -ath5k_hw_get_frame_duration(struct ath5k_hw *ah, +int ath5k_hw_get_frame_duration(struct ath5k_hw *ah, int len, struct ieee80211_rate *rate, bool shortpre) { int sifs, preamble, plcp_bits, sym_time; @@ -166,11 +129,11 @@ ath5k_hw_get_frame_duration(struct ath5k_hw *ah, } /** - * ath5k_hw_get_default_slottime() - Get the default slot time for current mode + * ath5k_hw_get_default_slottime - Get the default slot time for current mode + * * @ah: The &struct ath5k_hw */ -unsigned int -ath5k_hw_get_default_slottime(struct ath5k_hw *ah) +unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah) { struct ieee80211_channel *channel = ah->ah_current_channel; unsigned int slot_time; @@ -197,11 +160,11 @@ ath5k_hw_get_default_slottime(struct ath5k_hw *ah) } /** - * ath5k_hw_get_default_sifs() - Get the default SIFS for current mode + * ath5k_hw_get_default_sifs - Get the default SIFS for current mode + * * @ah: The &struct ath5k_hw */ -unsigned int -ath5k_hw_get_default_sifs(struct ath5k_hw *ah) +unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah) { struct ieee80211_channel *channel = ah->ah_current_channel; unsigned int sifs; @@ -228,17 +191,17 @@ ath5k_hw_get_default_sifs(struct ath5k_hw *ah) } /** - * ath5k_hw_update_mib_counters() - Update MIB counters (mac layer statistics) + * ath5k_hw_update_mib_counters - Update MIB counters (mac layer statistics) + * * @ah: The &struct ath5k_hw * * Reads MIB counters from PCU and updates sw statistics. Is called after a * MIB interrupt, because one of these counters might have reached their maximum * and triggered the MIB interrupt, to let us read and clear the counter. * - * NOTE: Is called in interrupt context! + * Is called in interrupt context! */ -void -ath5k_hw_update_mib_counters(struct ath5k_hw *ah) +void ath5k_hw_update_mib_counters(struct ath5k_hw *ah) { struct ath5k_statistics *stats = &ah->stats; @@ -256,8 +219,10 @@ ath5k_hw_update_mib_counters(struct ath5k_hw *ah) \******************/ /** - * ath5k_hw_write_rate_duration() - Fill rate code to duration table - * @ah: The &struct ath5k_hw + * ath5k_hw_write_rate_duration - fill rate code to duration table + * + * @ah: the &struct ath5k_hw + * @mode: one of enum ath5k_driver_mode * * Write the rate code to duration table upon hw reset. This is a helper for * ath5k_hw_pcu_init(). It seems all this is doing is setting an ACK timeout on @@ -271,8 +236,7 @@ ath5k_hw_update_mib_counters(struct ath5k_hw *ah) * that include all OFDM and CCK rates. * */ -static inline void -ath5k_hw_write_rate_duration(struct ath5k_hw *ah) +static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah) { struct ieee80211_rate *rate; unsigned int i; @@ -316,12 +280,12 @@ ath5k_hw_write_rate_duration(struct ath5k_hw *ah) } /** - * ath5k_hw_set_ack_timeout() - Set ACK timeout on PCU + * ath5k_hw_set_ack_timeout - Set ACK timeout on PCU + * * @ah: The &struct ath5k_hw * @timeout: Timeout in usec */ -static int -ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout) +static int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout) { if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK)) <= timeout) @@ -334,12 +298,12 @@ ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout) } /** - * ath5k_hw_set_cts_timeout() - Set CTS timeout on PCU + * ath5k_hw_set_cts_timeout - Set CTS timeout on PCU + * * @ah: The &struct ath5k_hw * @timeout: Timeout in usec */ -static int -ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout) +static int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout) { if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS)) <= timeout) @@ -357,14 +321,14 @@ ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout) \*******************/ /** - * ath5k_hw_set_lladdr() - Set station id + * ath5k_hw_set_lladdr - Set station id + * * @ah: The &struct ath5k_hw - * @mac: The card's mac address (array of octets) + * @mac: The card's mac address * * Set station id on hw using the provided mac address */ -int -ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac) +int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac) { struct ath_common *common = ath5k_hw_common(ah); u32 low_id, high_id; @@ -385,14 +349,14 @@ ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac) } /** - * ath5k_hw_set_bssid() - Set current BSSID on hw + * ath5k_hw_set_bssid - Set current BSSID on hw + * * @ah: The &struct ath5k_hw * * Sets the current BSSID and BSSID mask we have from the * common struct into the hardware */ -void -ath5k_hw_set_bssid(struct ath5k_hw *ah) +void ath5k_hw_set_bssid(struct ath5k_hw *ah) { struct ath_common *common = ath5k_hw_common(ah); u16 tim_offset = 0; @@ -425,23 +389,7 @@ ath5k_hw_set_bssid(struct ath5k_hw *ah) ath5k_hw_enable_pspoll(ah, NULL, 0); } -/** - * ath5k_hw_set_bssid_mask() - Filter out bssids we listen - * @ah: The &struct ath5k_hw - * @mask: The BSSID mask to set (array of octets) - * - * BSSID masking is a method used by AR5212 and newer hardware to inform PCU - * which bits of the interface's MAC address should be looked at when trying - * to decide which packets to ACK. In station mode and AP mode with a single - * BSS every bit matters since we lock to only one BSS. In AP mode with - * multiple BSSes (virtual interfaces) not every bit matters because hw must - * accept frames for all BSSes and so we tweak some bits of our mac address - * in order to have multiple BSSes. - * - * For more information check out ../hw.c of the common ath module. - */ -void -ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask) +void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask) { struct ath_common *common = ath5k_hw_common(ah); @@ -452,21 +400,18 @@ ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask) ath_hw_setbssidmask(common); } -/** - * ath5k_hw_set_mcast_filter() - Set multicast filter - * @ah: The &struct ath5k_hw - * @filter0: Lower 32bits of muticast filter - * @filter1: Higher 16bits of multicast filter +/* + * Set multicast filter */ -void -ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1) +void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1) { ath5k_hw_reg_write(ah, filter0, AR5K_MCAST_FILTER0); ath5k_hw_reg_write(ah, filter1, AR5K_MCAST_FILTER1); } /** - * ath5k_hw_get_rx_filter() - Get current rx filter + * ath5k_hw_get_rx_filter - Get current rx filter + * * @ah: The &struct ath5k_hw * * Returns the RX filter by reading rx filter and @@ -475,8 +420,7 @@ ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1) * and pass to the driver. For a list of frame types * check out reg.h. */ -u32 -ath5k_hw_get_rx_filter(struct ath5k_hw *ah) +u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah) { u32 data, filter = 0; @@ -496,7 +440,8 @@ ath5k_hw_get_rx_filter(struct ath5k_hw *ah) } /** - * ath5k_hw_set_rx_filter() - Set rx filter + * ath5k_hw_set_rx_filter - Set rx filter + * * @ah: The &struct ath5k_hw * @filter: RX filter mask (see reg.h) * @@ -504,8 +449,7 @@ ath5k_hw_get_rx_filter(struct ath5k_hw *ah) * register on 5212 and newer chips so that we have proper PHY * error reporting. */ -void -ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter) +void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter) { u32 data = 0; @@ -549,13 +493,13 @@ ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter) #define ATH5K_MAX_TSF_READ 10 /** - * ath5k_hw_get_tsf64() - Get the full 64bit TSF + * ath5k_hw_get_tsf64 - Get the full 64bit TSF + * * @ah: The &struct ath5k_hw * * Returns the current TSF */ -u64 -ath5k_hw_get_tsf64(struct ath5k_hw *ah) +u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah) { u32 tsf_lower, tsf_upper1, tsf_upper2; int i; @@ -592,30 +536,28 @@ ath5k_hw_get_tsf64(struct ath5k_hw *ah) return ((u64)tsf_upper1 << 32) | tsf_lower; } -#undef ATH5K_MAX_TSF_READ - /** - * ath5k_hw_set_tsf64() - Set a new 64bit TSF + * ath5k_hw_set_tsf64 - Set a new 64bit TSF + * * @ah: The &struct ath5k_hw * @tsf64: The new 64bit TSF * * Sets the new TSF */ -void -ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64) +void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64) { ath5k_hw_reg_write(ah, tsf64 & 0xffffffff, AR5K_TSF_L32); ath5k_hw_reg_write(ah, (tsf64 >> 32) & 0xffffffff, AR5K_TSF_U32); } /** - * ath5k_hw_reset_tsf() - Force a TSF reset + * ath5k_hw_reset_tsf - Force a TSF reset + * * @ah: The &struct ath5k_hw * * Forces a TSF reset on PCU */ -void -ath5k_hw_reset_tsf(struct ath5k_hw *ah) +void ath5k_hw_reset_tsf(struct ath5k_hw *ah) { u32 val; @@ -631,17 +573,10 @@ ath5k_hw_reset_tsf(struct ath5k_hw *ah) ath5k_hw_reg_write(ah, val, AR5K_BEACON); } -/** - * ath5k_hw_init_beacon_timers() - Initialize beacon timers - * @ah: The &struct ath5k_hw - * @next_beacon: Next TBTT - * @interval: Current beacon interval - * - * This function is used to initialize beacon timers based on current - * operation mode and settings. +/* + * Initialize beacon timers */ -void -ath5k_hw_init_beacon_timers(struct ath5k_hw *ah, u32 next_beacon, u32 interval) +void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval) { u32 timer1, timer2, timer3; @@ -720,7 +655,8 @@ ath5k_hw_init_beacon_timers(struct ath5k_hw *ah, u32 next_beacon, u32 interval) } /** - * ath5k_check_timer_win() - Check if timer B is timer A + window + * ath5k_check_timer_win - Check if timer B is timer A + window + * * @a: timer a (before b) * @b: timer b (after a) * @window: difference between a and b @@ -750,11 +686,12 @@ ath5k_check_timer_win(int a, int b, int window, int intval) } /** - * ath5k_hw_check_beacon_timers() - Check if the beacon timers are correct + * ath5k_hw_check_beacon_timers - Check if the beacon timers are correct + * * @ah: The &struct ath5k_hw * @intval: beacon interval * - * This is a workaround for IBSS mode + * This is a workaround for IBSS mode: * * The need for this function arises from the fact that we have 4 separate * HW timer registers (TIMER0 - TIMER3), which are closely related to the @@ -809,14 +746,14 @@ ath5k_hw_check_beacon_timers(struct ath5k_hw *ah, int intval) } /** - * ath5k_hw_set_coverage_class() - Set IEEE 802.11 coverage class + * ath5k_hw_set_coverage_class - Set IEEE 802.11 coverage class + * * @ah: The &struct ath5k_hw * @coverage_class: IEEE 802.11 coverage class number * * Sets IFS intervals and ACK/CTS timeouts for given coverage class. */ -void -ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class) +void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class) { /* As defined by IEEE 802.11-2007 17.3.8.6 */ int slot_time = ath5k_hw_get_default_slottime(ah) + 3 * coverage_class; @@ -835,7 +772,8 @@ ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class) \***************************/ /** - * ath5k_hw_start_rx_pcu() - Start RX engine + * ath5k_hw_start_rx_pcu - Start RX engine + * * @ah: The &struct ath5k_hw * * Starts RX engine on PCU so that hw can process RXed frames @@ -843,33 +781,32 @@ ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class) * * NOTE: RX DMA should be already enabled using ath5k_hw_start_rx_dma */ -void -ath5k_hw_start_rx_pcu(struct ath5k_hw *ah) +void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah) { AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX); } /** - * at5k_hw_stop_rx_pcu() - Stop RX engine + * at5k_hw_stop_rx_pcu - Stop RX engine + * * @ah: The &struct ath5k_hw * * Stops RX engine on PCU */ -void -ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah) +void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah) { AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX); } /** - * ath5k_hw_set_opmode() - Set PCU operating mode + * ath5k_hw_set_opmode - Set PCU operating mode + * * @ah: The &struct ath5k_hw - * @op_mode: One of enum nl80211_iftype + * @op_mode: &enum nl80211_iftype operating mode * * Configure PCU for the various operating modes (AP/STA etc) */ -int -ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode) +int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode) { struct ath_common *common = ath5k_hw_common(ah); u32 pcu_reg, beacon_reg, low_id, high_id; @@ -936,17 +873,8 @@ ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode) return 0; } -/** - * ath5k_hw_pcu_init() - Initialize PCU - * @ah: The &struct ath5k_hw - * @op_mode: One of enum nl80211_iftype - * @mode: One of enum ath5k_driver_mode - * - * This function is used to initialize PCU by setting current - * operation mode and various other settings. - */ -void -ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode) +void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode, + u8 mode) { /* Set bssid and bssid mask */ ath5k_hw_set_bssid(ah); diff --git a/trunk/drivers/net/wireless/ath/ath5k/phy.c b/trunk/drivers/net/wireless/ath/ath5k/phy.c index e1f8613426a9..01cb72de44cb 100644 --- a/trunk/drivers/net/wireless/ath/ath5k/phy.c +++ b/trunk/drivers/net/wireless/ath/ath5k/phy.c @@ -1,4 +1,6 @@ /* + * PHY functions + * * Copyright (c) 2004-2007 Reyk Floeter * Copyright (c) 2006-2009 Nick Kossifidis * Copyright (c) 2007-2008 Jiri Slaby @@ -18,10 +20,6 @@ * */ -/***********************\ -* PHY related functions * -\***********************/ - #include #include #include @@ -33,53 +31,14 @@ #include "../regd.h" -/** - * DOC: PHY related functions - * - * Here we handle the low-level functions related to baseband - * and analog frontend (RF) parts. This is by far the most complex - * part of the hw code so make sure you know what you are doing. - * - * Here is a list of what this is all about: - * - * - Channel setting/switching - * - * - Automatic Gain Control (AGC) calibration - * - * - Noise Floor calibration - * - * - I/Q imbalance calibration (QAM correction) - * - * - Calibration due to thermal changes (gain_F) - * - * - Spur noise mitigation - * - * - RF/PHY initialization for the various operating modes and bwmodes - * - * - Antenna control - * - * - TX power control per channel/rate/packet type - * - * Also have in mind we never got documentation for most of these - * functions, what we have comes mostly from Atheros's code, reverse - * engineering and patent docs/presentations etc. - */ - - /******************\ * Helper functions * \******************/ -/** - * ath5k_hw_radio_revision() - Get the PHY Chip revision - * @ah: The &struct ath5k_hw - * @band: One of enum ieee80211_band - * - * Returns the revision number of a 2GHz, 5GHz or single chip - * radio. +/* + * Get the PHY Chip revision */ -u16 -ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band) +u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band) { unsigned int i; u32 srev; @@ -99,7 +58,7 @@ ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band) return 0; } - usleep_range(2000, 2500); + mdelay(2); /* ...wait until PHY is ready and read the selected radio revision */ ath5k_hw_reg_write(ah, 0x00001c16, AR5K_PHY(0x34)); @@ -122,16 +81,10 @@ ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band) return ret; } -/** - * ath5k_channel_ok() - Check if a channel is supported by the hw - * @ah: The &struct ath5k_hw - * @channel: The &struct ieee80211_channel - * - * Note: We don't do any regulatory domain checks here, it's just - * a sanity check. +/* + * Check if a channel is supported */ -bool -ath5k_channel_ok(struct ath5k_hw *ah, struct ieee80211_channel *channel) +bool ath5k_channel_ok(struct ath5k_hw *ah, struct ieee80211_channel *channel) { u16 freq = channel->center_freq; @@ -148,13 +101,7 @@ ath5k_channel_ok(struct ath5k_hw *ah, struct ieee80211_channel *channel) return false; } -/** - * ath5k_hw_chan_has_spur_noise() - Check if channel is sensitive to spur noise - * @ah: The &struct ath5k_hw - * @channel: The &struct ieee80211_channel - */ -bool -ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah, +bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah, struct ieee80211_channel *channel) { u8 refclk_freq; @@ -175,20 +122,11 @@ ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah, return false; } -/** - * ath5k_hw_rfb_op() - Perform an operation on the given RF Buffer - * @ah: The &struct ath5k_hw - * @rf_regs: The struct ath5k_rf_reg - * @val: New value - * @reg_id: RF register ID - * @set: Indicate we need to swap data - * - * This is an internal function used to modify RF Banks before - * writing them to AR5K_RF_BUFFER. Check out rfbuffer.h for more - * infos. +/* + * Used to modify RF Banks before writing them to AR5K_RF_BUFFER */ -static unsigned int -ath5k_hw_rfb_op(struct ath5k_hw *ah, const struct ath5k_rf_reg *rf_regs, +static unsigned int ath5k_hw_rfb_op(struct ath5k_hw *ah, + const struct ath5k_rf_reg *rf_regs, u32 val, u8 reg_id, bool set) { const struct ath5k_rf_reg *rfreg = NULL; @@ -266,7 +204,8 @@ ath5k_hw_rfb_op(struct ath5k_hw *ah, const struct ath5k_rf_reg *rf_regs, } /** - * ath5k_hw_write_ofdm_timings() - set OFDM timings on AR5212 + * ath5k_hw_write_ofdm_timings - set OFDM timings on AR5212 + * * @ah: the &struct ath5k_hw * @channel: the currently set channel upon reset * @@ -277,11 +216,10 @@ ath5k_hw_rfb_op(struct ath5k_hw *ah, const struct ath5k_rf_reg *rf_regs, * mantissa and provide these values on hw. * * For more infos i think this patent is related - * "http://www.freepatentsonline.com/7184495.html" + * http://www.freepatentsonline.com/7184495.html */ -static inline int -ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah, - struct ieee80211_channel *channel) +static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah, + struct ieee80211_channel *channel) { /* Get exponent and mantissa and set it */ u32 coef_scaled, coef_exp, coef_man, @@ -340,10 +278,6 @@ ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah, return 0; } -/** - * ath5k_hw_phy_disable() - Disable PHY - * @ah: The &struct ath5k_hw - */ int ath5k_hw_phy_disable(struct ath5k_hw *ah) { /*Just a try M.F.*/ @@ -352,13 +286,10 @@ int ath5k_hw_phy_disable(struct ath5k_hw *ah) return 0; } -/** - * ath5k_hw_wait_for_synth() - Wait for synth to settle - * @ah: The &struct ath5k_hw - * @channel: The &struct ieee80211_channel +/* + * Wait for synth to settle */ -static void -ath5k_hw_wait_for_synth(struct ath5k_hw *ah, +static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah, struct ieee80211_channel *channel) { /* @@ -377,9 +308,9 @@ ath5k_hw_wait_for_synth(struct ath5k_hw *ah, delay = delay << 2; /* XXX: /2 on turbo ? Let's be safe * for now */ - usleep_range(100 + delay, 100 + (2 * delay)); + udelay(100 + delay); } else { - usleep_range(1000, 1500); + mdelay(1); } } @@ -388,9 +319,7 @@ ath5k_hw_wait_for_synth(struct ath5k_hw *ah, * RF Gain optimization * \**********************/ -/** - * DOC: RF Gain optimization - * +/* * This code is used to optimize RF gain on different environments * (temperature mostly) based on feedback from a power detector. * @@ -399,22 +328,22 @@ ath5k_hw_wait_for_synth(struct ath5k_hw *ah, * no gain optimization ladder-. * * For more infos check out this patent doc - * "http://www.freepatentsonline.com/7400691.html" + * http://www.freepatentsonline.com/7400691.html * * This paper describes power drops as seen on the receiver due to * probe packets - * "http://www.cnri.dit.ie/publications/ICT08%20-%20Practical%20Issues - * %20of%20Power%20Control.pdf" + * http://www.cnri.dit.ie/publications/ICT08%20-%20Practical%20Issues + * %20of%20Power%20Control.pdf * * And this is the MadWiFi bug entry related to the above - * "http://madwifi-project.org/ticket/1659" + * http://madwifi-project.org/ticket/1659 * with various measurements and diagrams + * + * TODO: Deal with power drops due to probes by setting an appropriate + * tx power on the probe packets ! Make this part of the calibration process. */ -/** - * ath5k_hw_rfgain_opt_init() - Initialize ah_gain during attach - * @ah: The &struct ath5k_hw - */ +/* Initialize ah_gain during attach */ int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah) { /* Initialize the gain optimization values */ @@ -438,21 +367,17 @@ int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah) return 0; } -/** - * ath5k_hw_request_rfgain_probe() - Request a PAPD probe packet - * @ah: The &struct ath5k_hw - * - * Schedules a gain probe check on the next transmitted packet. +/* Schedule a gain probe check on the next transmitted packet. * That means our next packet is going to be sent with lower * tx power and a Peak to Average Power Detector (PAPD) will try * to measure the gain. * - * TODO: Force a tx packet (bypassing PCU arbitrator etc) + * XXX: How about forcing a tx packet (bypassing PCU arbitrator etc) * just after we enable the probe so that we don't mess with - * standard traffic. + * standard traffic ? Maybe it's time to use sw interrupts and + * a probe tasklet !!! */ -static void -ath5k_hw_request_rfgain_probe(struct ath5k_hw *ah) +static void ath5k_hw_request_rfgain_probe(struct ath5k_hw *ah) { /* Skip if gain calibration is inactive or @@ -470,15 +395,9 @@ ath5k_hw_request_rfgain_probe(struct ath5k_hw *ah) } -/** - * ath5k_hw_rf_gainf_corr() - Calculate Gain_F measurement correction - * @ah: The &struct ath5k_hw - * - * Calculate Gain_F measurement correction - * based on the current step for RF5112 rev. 2 - */ -static u32 -ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah) +/* Calculate gain_F measurement correction + * based on the current step for RF5112 rev. 2 */ +static u32 ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah) { u32 mix, step; u32 *rf; @@ -531,19 +450,11 @@ ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah) return ah->ah_gain.g_f_corr; } -/** - * ath5k_hw_rf_check_gainf_readback() - Validate Gain_F feedback from detector - * @ah: The &struct ath5k_hw - * - * Check if current gain_F measurement is in the range of our +/* Check if current gain_F measurement is in the range of our * power detector windows. If we get a measurement outside range * we know it's not accurate (detectors can't measure anything outside - * their detection window) so we must ignore it. - * - * Returns true if readback was O.K. or false on failure - */ -static bool -ath5k_hw_rf_check_gainf_readback(struct ath5k_hw *ah) + * their detection window) so we must ignore it */ +static bool ath5k_hw_rf_check_gainf_readback(struct ath5k_hw *ah) { const struct ath5k_rf_reg *rf_regs; u32 step, mix_ovr, level[4]; @@ -595,15 +506,9 @@ ath5k_hw_rf_check_gainf_readback(struct ath5k_hw *ah) ah->ah_gain.g_current <= level[3]); } -/** - * ath5k_hw_rf_gainf_adjust() - Perform Gain_F adjustment - * @ah: The &struct ath5k_hw - * - * Choose the right target gain based on current gain - * and RF gain optimization ladder - */ -static s8 -ath5k_hw_rf_gainf_adjust(struct ath5k_hw *ah) +/* Perform gain_F adjustment by choosing the right set + * of parameters from RF gain optimization ladder */ +static s8 ath5k_hw_rf_gainf_adjust(struct ath5k_hw *ah) { const struct ath5k_gain_opt *go; const struct ath5k_gain_opt_step *g_step; @@ -667,18 +572,13 @@ ath5k_hw_rf_gainf_adjust(struct ath5k_hw *ah) return ret; } -/** - * ath5k_hw_gainf_calibrate() - Do a gain_F calibration - * @ah: The &struct ath5k_hw - * - * Main callback for thermal RF gain calibration engine +/* Main callback for thermal RF gain calibration engine * Check for a new gain reading and schedule an adjustment * if needed. * - * Returns one of enum ath5k_rfgain codes - */ -enum ath5k_rfgain -ath5k_hw_gainf_calibrate(struct ath5k_hw *ah) + * TODO: Use sw interrupt to schedule reset if gain_F needs + * adjustment */ +enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah) { u32 data, type; struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; @@ -738,18 +638,10 @@ ath5k_hw_gainf_calibrate(struct ath5k_hw *ah) return ah->ah_gain.g_state; } -/** - * ath5k_hw_rfgain_init() - Write initial RF gain settings to hw - * @ah: The &struct ath5k_hw - * @band: One of enum ieee80211_band - * - * Write initial RF gain table to set the RF sensitivity. - * - * NOTE: This one works on all RF chips and has nothing to do - * with Gain_F calibration - */ -static int -ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band) +/* Write initial RF gain table to set the RF sensitivity + * this one works on all RF chips and has nothing to do + * with gain_F calibration */ +static int ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band) { const struct ath5k_ini_rfgain *ath5k_rfg; unsigned int i, size, index; @@ -796,23 +688,16 @@ ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band) } + /********************\ * RF Registers setup * \********************/ -/** - * ath5k_hw_rfregs_init() - Initialize RF register settings - * @ah: The &struct ath5k_hw - * @channel: The &struct ieee80211_channel - * @mode: One of enum ath5k_driver_mode - * - * Setup RF registers by writing RF buffer on hw. For - * more infos on this, check out rfbuffer.h +/* + * Setup RF registers by writing RF buffer on hw */ -static int -ath5k_hw_rfregs_init(struct ath5k_hw *ah, - struct ieee80211_channel *channel, - unsigned int mode) +static int ath5k_hw_rfregs_init(struct ath5k_hw *ah, + struct ieee80211_channel *channel, unsigned int mode) { const struct ath5k_rf_reg *rf_regs; const struct ath5k_ini_rfbuffer *ini_rfb; @@ -1170,18 +1055,19 @@ ath5k_hw_rfregs_init(struct ath5k_hw *ah, PHY/RF channel functions \**************************/ -/** - * ath5k_hw_rf5110_chan2athchan() - Convert channel freq on RF5110 - * @channel: The &struct ieee80211_channel - * - * Map channel frequency to IEEE channel number and convert it - * to an internal channel value used by the RF5110 chipset. +/* + * Conversion needed for RF5110 */ -static u32 -ath5k_hw_rf5110_chan2athchan(struct ieee80211_channel *channel) +static u32 ath5k_hw_rf5110_chan2athchan(struct ieee80211_channel *channel) { u32 athchan; + /* + * Convert IEEE channel/MHz to an internal channel value used + * by the AR5210 chipset. This has not been verified with + * newer chipsets like the AR5212A who have a completely + * different RF/PHY part. + */ athchan = (ath5k_hw_bitswap( (ieee80211_frequency_to_channel( channel->center_freq) - 24) / 2, 5) @@ -1189,13 +1075,10 @@ ath5k_hw_rf5110_chan2athchan(struct ieee80211_channel *channel) return athchan; } -/** - * ath5k_hw_rf5110_channel() - Set channel frequency on RF5110 - * @ah: The &struct ath5k_hw - * @channel: The &struct ieee80211_channel +/* + * Set channel on RF5110 */ -static int -ath5k_hw_rf5110_channel(struct ath5k_hw *ah, +static int ath5k_hw_rf5110_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel) { u32 data; @@ -1206,23 +1089,15 @@ ath5k_hw_rf5110_channel(struct ath5k_hw *ah, data = ath5k_hw_rf5110_chan2athchan(channel); ath5k_hw_reg_write(ah, data, AR5K_RF_BUFFER); ath5k_hw_reg_write(ah, 0, AR5K_RF_BUFFER_CONTROL_0); - usleep_range(1000, 1500); + mdelay(1); return 0; } -/** - * ath5k_hw_rf5111_chan2athchan() - Handle 2GHz channels on RF5111/2111 - * @ieee: IEEE channel number - * @athchan: The &struct ath5k_athchan_2ghz - * - * In order to enable the RF2111 frequency converter on RF5111/2111 setups - * we need to add some offsets and extra flags to the data values we pass - * on to the PHY. So for every 2GHz channel this function gets called - * to do the conversion. +/* + * Conversion needed for 5111 */ -static int -ath5k_hw_rf5111_chan2athchan(unsigned int ieee, +static int ath5k_hw_rf5111_chan2athchan(unsigned int ieee, struct ath5k_athchan_2ghz *athchan) { int channel; @@ -1248,13 +1123,10 @@ ath5k_hw_rf5111_chan2athchan(unsigned int ieee, return 0; } -/** - * ath5k_hw_rf5111_channel() - Set channel frequency on RF5111/2111 - * @ah: The &struct ath5k_hw - * @channel: The &struct ieee80211_channel +/* + * Set channel on 5111 */ -static int -ath5k_hw_rf5111_channel(struct ath5k_hw *ah, +static int ath5k_hw_rf5111_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel) { struct ath5k_athchan_2ghz ath5k_channel_2ghz; @@ -1299,20 +1171,10 @@ ath5k_hw_rf5111_channel(struct ath5k_hw *ah, return 0; } -/** - * ath5k_hw_rf5112_channel() - Set channel frequency on 5112 and newer - * @ah: The &struct ath5k_hw - * @channel: The &struct ieee80211_channel - * - * On RF5112/2112 and newer we don't need to do any conversion. - * We pass the frequency value after a few modifications to the - * chip directly. - * - * NOTE: Make sure channel frequency given is within our range or else - * we might damage the chip ! Use ath5k_channel_ok before calling this one. +/* + * Set channel on 5112 and newer */ -static int -ath5k_hw_rf5112_channel(struct ath5k_hw *ah, +static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel) { u32 data, data0, data1, data2; @@ -1321,37 +1183,17 @@ ath5k_hw_rf5112_channel(struct ath5k_hw *ah, data = data0 = data1 = data2 = 0; c = channel->center_freq; - /* My guess based on code: - * 2GHz RF has 2 synth modes, one with a Local Oscillator - * at 2224Hz and one with a LO at 2192Hz. IF is 1520Hz - * (3040/2). data0 is used to set the PLL divider and data1 - * selects synth mode. */ if (c < 4800) { - /* Channel 14 and all frequencies with 2Hz spacing - * below/above (non-standard channels) */ if (!((c - 2224) % 5)) { - /* Same as (c - 2224) / 5 */ data0 = ((2 * (c - 704)) - 3040) / 10; data1 = 1; - /* Channel 1 and all frequencies with 5Hz spacing - * below/above (standard channels without channel 14) */ } else if (!((c - 2192) % 5)) { - /* Same as (c - 2192) / 5 */ data0 = ((2 * (c - 672)) - 3040) / 10; data1 = 0; } else return -EINVAL; data0 = ath5k_hw_bitswap((data0 << 2) & 0xff, 8); - /* This is more complex, we have a single synthesizer with - * 4 reference clock settings (?) based on frequency spacing - * and set using data2. LO is at 4800Hz and data0 is again used - * to set some divider. - * - * NOTE: There is an old atheros presentation at Stanford - * that mentions a method called dual direct conversion - * with 1GHz sliding IF for RF5110. Maybe that's what we - * have here, or an updated version. */ } else if ((c % 5) != 2 || c > 5435) { if (!(c % 20) && c >= 5120) { data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8); @@ -1377,16 +1219,10 @@ ath5k_hw_rf5112_channel(struct ath5k_hw *ah, return 0; } -/** - * ath5k_hw_rf2425_channel() - Set channel frequency on RF2425 - * @ah: The &struct ath5k_hw - * @channel: The &struct ieee80211_channel - * - * AR2425/2417 have a different 2GHz RF so code changes - * a little bit from RF5112. +/* + * Set the channel on the RF2425 */ -static int -ath5k_hw_rf2425_channel(struct ath5k_hw *ah, +static int ath5k_hw_rf2425_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel) { u32 data, data0, data2; @@ -1422,16 +1258,10 @@ ath5k_hw_rf2425_channel(struct ath5k_hw *ah, return 0; } -/** - * ath5k_hw_channel() - Set a channel on the radio chip - * @ah: The &struct ath5k_hw - * @channel: The &struct ieee80211_channel - * - * This is the main function called to set a channel on the - * radio chip based on the radio chip version. +/* + * Set a channel on the radio chip */ -static int -ath5k_hw_channel(struct ath5k_hw *ah, +static int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel) { int ret; @@ -1483,46 +1313,11 @@ ath5k_hw_channel(struct ath5k_hw *ah, return 0; } - /*****************\ PHY calibration \*****************/ -/** - * DOC: PHY Calibration routines - * - * Noise floor calibration: When we tell the hardware to - * perform a noise floor calibration by setting the - * AR5K_PHY_AGCCTL_NF bit on AR5K_PHY_AGCCTL, it will periodically - * sample-and-hold the minimum noise level seen at the antennas. - * This value is then stored in a ring buffer of recently measured - * noise floor values so we have a moving window of the last few - * samples. The median of the values in the history is then loaded - * into the hardware for its own use for RSSI and CCA measurements. - * This type of calibration doesn't interfere with traffic. - * - * AGC calibration: When we tell the hardware to perform - * an AGC (Automatic Gain Control) calibration by setting the - * AR5K_PHY_AGCCTL_CAL, hw disconnects the antennas and does - * a calibration on the DC offsets of ADCs. During this period - * rx/tx gets disabled so we have to deal with it on the driver - * part. - * - * I/Q calibration: When we tell the hardware to perform - * an I/Q calibration, it tries to correct I/Q imbalance and - * fix QAM constellation by sampling data from rxed frames. - * It doesn't interfere with traffic. - * - * For more infos on AGC and I/Q calibration check out patent doc - * #03/094463. - */ - -/** - * ath5k_hw_read_measured_noise_floor() - Read measured NF from hw - * @ah: The &struct ath5k_hw - */ -static s32 -ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah) +static s32 ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah) { s32 val; @@ -1530,12 +1325,7 @@ ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah) return sign_extend32(AR5K_REG_MS(val, AR5K_PHY_NF_MINCCA_PWR), 8); } -/** - * ath5k_hw_init_nfcal_hist() - Initialize NF calibration history buffer - * @ah: The &struct ath5k_hw - */ -void -ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah) +void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah) { int i; @@ -1544,11 +1334,6 @@ ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah) ah->ah_nfcal_hist.nfval[i] = AR5K_TUNE_CCA_MAX_GOOD_VALUE; } -/** - * ath5k_hw_update_nfcal_hist() - Update NF calibration history buffer - * @ah: The &struct ath5k_hw - * @noise_floor: The NF we got from hw - */ static void ath5k_hw_update_nfcal_hist(struct ath5k_hw *ah, s16 noise_floor) { struct ath5k_nfcal_hist *hist = &ah->ah_nfcal_hist; @@ -1556,12 +1341,7 @@ static void ath5k_hw_update_nfcal_hist(struct ath5k_hw *ah, s16 noise_floor) hist->nfval[hist->index] = noise_floor; } -/** - * ath5k_hw_get_median_noise_floor() - Get median NF from history buffer - * @ah: The &struct ath5k_hw - */ -static s16 -ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah) +static s16 ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah) { s16 sort[ATH5K_NF_CAL_HIST_MAX]; s16 tmp; @@ -1584,16 +1364,18 @@ ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah) return sort[(ATH5K_NF_CAL_HIST_MAX - 1) / 2]; } -/** - * ath5k_hw_update_noise_floor() - Update NF on hardware - * @ah: The &struct ath5k_hw +/* + * When we tell the hardware to perform a noise floor calibration + * by setting the AR5K_PHY_AGCCTL_NF bit, it will periodically + * sample-and-hold the minimum noise level seen at the antennas. + * This value is then stored in a ring buffer of recently measured + * noise floor values so we have a moving window of the last few + * samples. * - * This is the main function we call to perform a NF calibration, - * it reads NF from hardware, calculates the median and updates - * NF on hw. + * The median of the values in the history is then loaded into the + * hardware for its own use for RSSI and CCA measurements. */ -void -ath5k_hw_update_noise_floor(struct ath5k_hw *ah) +void ath5k_hw_update_noise_floor(struct ath5k_hw *ah) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; u32 val; @@ -1608,8 +1390,6 @@ ath5k_hw_update_noise_floor(struct ath5k_hw *ah) return; } - ah->ah_cal_mask |= AR5K_CALIBRATION_NF; - ee_mode = ath5k_eeprom_mode_from_channel(ah->ah_current_channel); /* completed NF calibration, test threshold */ @@ -1654,29 +1434,20 @@ ath5k_hw_update_noise_floor(struct ath5k_hw *ah) ah->ah_noise_floor = nf; - ah->ah_cal_mask &= ~AR5K_CALIBRATION_NF; - ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE, "noise floor calibrated: %d\n", nf); } -/** - * ath5k_hw_rf5110_calibrate() - Perform a PHY calibration on RF5110 - * @ah: The &struct ath5k_hw - * @channel: The &struct ieee80211_channel - * - * Do a complete PHY calibration (AGC + NF + I/Q) on RF5110 +/* + * Perform a PHY calibration on RF5110 + * -Fix BPSK/QAM Constellation (I/Q correction) */ -static int -ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah, +static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah, struct ieee80211_channel *channel) { u32 phy_sig, phy_agc, phy_sat, beacon; int ret; - if (!(ah->ah_cal_mask & AR5K_CALIBRATION_FULL)) - return 0; - /* * Disable beacons and RX/TX queues, wait */ @@ -1685,7 +1456,7 @@ ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah, beacon = ath5k_hw_reg_read(ah, AR5K_BEACON_5210); ath5k_hw_reg_write(ah, beacon & ~AR5K_BEACON_ENABLE, AR5K_BEACON_5210); - usleep_range(2000, 2500); + mdelay(2); /* * Set the channel (with AGC turned off) @@ -1698,7 +1469,7 @@ ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah, * Activate PHY and wait */ ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT); - usleep_range(1000, 1500); + mdelay(1); AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_AGC, AR5K_PHY_AGC_DISABLE); @@ -1735,7 +1506,7 @@ ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah, ath5k_hw_reg_write(ah, AR5K_PHY_RFSTG_DISABLE, AR5K_PHY_RFSTG); AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_AGC, AR5K_PHY_AGC_DISABLE); - usleep_range(1000, 1500); + mdelay(1); /* * Enable calibration and wait until completion @@ -1766,9 +1537,8 @@ ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah, return 0; } -/** - * ath5k_hw_rf511x_iq_calibrate() - Perform I/Q calibration on RF5111 and newer - * @ah: The &struct ath5k_hw +/* + * Perform I/Q calibration on RF5111/5112 and newer chips */ static int ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah) @@ -1777,19 +1547,12 @@ ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah) s32 iq_corr, i_coff, i_coffd, q_coff, q_coffd; int i; - /* Skip if I/Q calibration is not needed or if it's still running */ - if (!ah->ah_iq_cal_needed) - return -EINVAL; - else if (ath5k_hw_reg_read(ah, AR5K_PHY_IQ) & AR5K_PHY_IQ_RUN) { - ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_CALIBRATE, - "I/Q calibration still running"); - return -EBUSY; - } + if (!ah->ah_calibration || + ath5k_hw_reg_read(ah, AR5K_PHY_IQ) & AR5K_PHY_IQ_RUN) + return 0; /* Calibration has finished, get the results and re-run */ - - /* Work around for empty results which can apparently happen on 5212: - * Read registers up to 10 times until we get both i_pr and q_pwr */ + /* work around empty results which can apparently happen on 5212 */ for (i = 0; i <= 10; i++) { iq_corr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_CORR); i_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_I); @@ -1807,13 +1570,9 @@ ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah) else q_coffd = q_pwr >> 7; - /* In case i_coffd became zero, cancel calibration - * not only it's too small, it'll also result a divide - * by zero later on. */ + /* protect against divide by 0 and loss of sign bits */ if (i_coffd == 0 || q_coffd < 2) - return -ECANCELED; - - /* Protect against loss of sign bits */ + return 0; i_coff = (-iq_corr) / i_coffd; i_coff = clamp(i_coff, -32, 31); /* signed 6 bit */ @@ -1842,17 +1601,10 @@ ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah) return 0; } -/** - * ath5k_hw_phy_calibrate() - Perform a PHY calibration - * @ah: The &struct ath5k_hw - * @channel: The &struct ieee80211_channel - * - * The main function we call from above to perform - * a short or full PHY calibration based on RF chip - * and current channel +/* + * Perform a PHY calibration */ -int -ath5k_hw_phy_calibrate(struct ath5k_hw *ah, +int ath5k_hw_phy_calibrate(struct ath5k_hw *ah, struct ieee80211_channel *channel) { int ret; @@ -1861,43 +1613,10 @@ ath5k_hw_phy_calibrate(struct ath5k_hw *ah, return ath5k_hw_rf5110_calibrate(ah, channel); ret = ath5k_hw_rf511x_iq_calibrate(ah); - if (ret) { - ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_CALIBRATE, - "No I/Q correction performed (%uMHz)\n", - channel->center_freq); - - /* Happens all the time if there is not much - * traffic, consider it normal behaviour. */ - ret = 0; - } - - /* On full calibration do an AGC calibration and - * request a PAPD probe for gainf calibration if - * needed */ - if (ah->ah_cal_mask & AR5K_CALIBRATION_FULL) { - AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL, - AR5K_PHY_AGCCTL_CAL); - - ret = ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL, - AR5K_PHY_AGCCTL_CAL | AR5K_PHY_AGCCTL_NF, - 0, false); - if (ret) { - ATH5K_ERR(ah, - "gain calibration timeout (%uMHz)\n", - channel->center_freq); - } - - if ((ah->ah_radio == AR5K_RF5111 || - ah->ah_radio == AR5K_RF5112) - && (channel->hw_value != AR5K_MODE_11B)) - ath5k_hw_request_rfgain_probe(ah); - } - - /* Update noise floor - * XXX: Only do this after AGC calibration */ - if (!(ah->ah_cal_mask & AR5K_CALIBRATION_NF)) - ath5k_hw_update_noise_floor(ah); + if ((ah->ah_radio == AR5K_RF5111 || ah->ah_radio == AR5K_RF5112) && + (channel->hw_value != AR5K_MODE_11B)) + ath5k_hw_request_rfgain_probe(ah); return ret; } @@ -1907,16 +1626,6 @@ ath5k_hw_phy_calibrate(struct ath5k_hw *ah, * Spur mitigation functions * \***************************/ -/** - * ath5k_hw_set_spur_mitigation_filter() - Configure SPUR filter - * @ah: The &struct ath5k_hw - * @channel: The &struct ieee80211_channel - * - * This function gets called during PHY initialization to - * configure the spur filter for the given channel. Spur is noise - * generated due to "reflection" effects, for more information on this - * method check out patent US7643810 - */ static void ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah, struct ieee80211_channel *channel) @@ -2156,73 +1865,15 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah, * Antenna control * \*****************/ -/** - * DOC: Antenna control - * - * Hw supports up to 14 antennas ! I haven't found any card that implements - * that. The maximum number of antennas I've seen is up to 4 (2 for 2GHz and 2 - * for 5GHz). Antenna 1 (MAIN) should be omnidirectional, 2 (AUX) - * omnidirectional or sectorial and antennas 3-14 sectorial (or directional). - * - * We can have a single antenna for RX and multiple antennas for TX. - * RX antenna is our "default" antenna (usually antenna 1) set on - * DEFAULT_ANTENNA register and TX antenna is set on each TX control descriptor - * (0 for automatic selection, 1 - 14 antenna number). - * - * We can let hw do all the work doing fast antenna diversity for both - * tx and rx or we can do things manually. Here are the options we have - * (all are bits of STA_ID1 register): - * - * AR5K_STA_ID1_DEFAULT_ANTENNA -> When 0 is set as the TX antenna on TX - * control descriptor, use the default antenna to transmit or else use the last - * antenna on which we received an ACK. - * - * AR5K_STA_ID1_DESC_ANTENNA -> Update default antenna after each TX frame to - * the antenna on which we got the ACK for that frame. - * - * AR5K_STA_ID1_RTS_DEF_ANTENNA -> Use default antenna for RTS or else use the - * one on the TX descriptor. - * - * AR5K_STA_ID1_SELFGEN_DEF_ANT -> Use default antenna for self generated frames - * (ACKs etc), or else use current antenna (the one we just used for TX). - * - * Using the above we support the following scenarios: - * - * AR5K_ANTMODE_DEFAULT -> Hw handles antenna diversity etc automatically - * - * AR5K_ANTMODE_FIXED_A -> Only antenna A (MAIN) is present - * - * AR5K_ANTMODE_FIXED_B -> Only antenna B (AUX) is present - * - * AR5K_ANTMODE_SINGLE_AP -> Sta locked on a single ap - * - * AR5K_ANTMODE_SECTOR_AP -> AP with tx antenna set on tx desc - * - * AR5K_ANTMODE_SECTOR_STA -> STA with tx antenna set on tx desc - * - * AR5K_ANTMODE_DEBUG Debug mode -A -> Rx, B-> Tx- - * - * Also note that when setting antenna to F on tx descriptor card inverts - * current tx antenna. - */ - -/** - * ath5k_hw_set_def_antenna() - Set default rx antenna on AR5211/5212 and newer - * @ah: The &struct ath5k_hw - * @ant: Antenna number - */ -static void +static void /*TODO:Boundary check*/ ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant) { if (ah->ah_version != AR5K_AR5210) ath5k_hw_reg_write(ah, ant & 0x7, AR5K_DEFAULT_ANTENNA); } -/** - * ath5k_hw_set_fast_div() - Enable/disable fast rx antenna diversity - * @ah: The &struct ath5k_hw - * @ee_mode: One of enum ath5k_driver_mode - * @enable: True to enable, false to disable +/* + * Enable/disable fast rx antenna diversity */ static void ath5k_hw_set_fast_div(struct ath5k_hw *ah, u8 ee_mode, bool enable) @@ -2262,14 +1913,6 @@ ath5k_hw_set_fast_div(struct ath5k_hw *ah, u8 ee_mode, bool enable) } } -/** - * ath5k_hw_set_antenna_switch() - Set up antenna switch table - * @ah: The &struct ath5k_hw - * @ee_mode: One of enum ath5k_driver_mode - * - * Switch table comes from EEPROM and includes information on controlling - * the 2 antenna RX attenuators - */ void ath5k_hw_set_antenna_switch(struct ath5k_hw *ah, u8 ee_mode) { @@ -2301,10 +1944,8 @@ ath5k_hw_set_antenna_switch(struct ath5k_hw *ah, u8 ee_mode) AR5K_PHY_ANT_SWITCH_TABLE_1); } -/** - * ath5k_hw_set_antenna_mode() - Set antenna operating mode - * @ah: The &struct ath5k_hw - * @ant_mode: One of enum ath5k_ant_mode +/* + * Set antenna operating mode */ void ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode) @@ -2427,13 +2068,8 @@ ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode) * Helper functions */ -/** - * ath5k_get_interpolated_value() - Get interpolated Y val between two points - * @target: X value of the middle point - * @x_left: X value of the left point - * @x_right: X value of the right point - * @y_left: Y value of the left point - * @y_right: Y value of the right point +/* + * Do linear interpolation between two given (x, y) points */ static s16 ath5k_get_interpolated_value(s16 target, s16 x_left, s16 x_right, @@ -2460,18 +2096,13 @@ ath5k_get_interpolated_value(s16 target, s16 x_left, s16 x_right, return result; } -/** - * ath5k_get_linear_pcdac_min() - Find vertical boundary (min pwr) for the - * linear PCDAC curve - * @stepL: Left array with y values (pcdac steps) - * @stepR: Right array with y values (pcdac steps) - * @pwrL: Left array with x values (power steps) - * @pwrR: Right array with x values (power steps) +/* + * Find vertical boundary (min pwr) for the linear PCDAC curve. * * Since we have the top of the curve and we draw the line below * until we reach 1 (1 pcdac step) we need to know which point - * (x value) that is so that we don't go below x axis and have negative - * pcdac values when creating the curve, or fill the table with zeros. + * (x value) that is so that we don't go below y axis and have negative + * pcdac values when creating the curve, or fill the table with zeroes. */ static s16 ath5k_get_linear_pcdac_min(const u8 *stepL, const u8 *stepR, @@ -2517,16 +2148,7 @@ ath5k_get_linear_pcdac_min(const u8 *stepL, const u8 *stepR, return max(min_pwrL, min_pwrR); } -/** - * ath5k_create_power_curve() - Create a Power to PDADC or PCDAC curve - * @pmin: Minimum power value (xmin) - * @pmax: Maximum power value (xmax) - * @pwr: Array of power steps (x values) - * @vpd: Array of matching PCDAC/PDADC steps (y values) - * @num_points: Number of provided points - * @vpd_table: Array to fill with the full PCDAC/PDADC values (y values) - * @type: One of enum ath5k_powertable_type (eeprom.h) - * +/* * Interpolate (pwr,vpd) points to create a Power to PDADC or a * Power to PCDAC curve. * @@ -2584,14 +2206,7 @@ ath5k_create_power_curve(s16 pmin, s16 pmax, } } -/** - * ath5k_get_chan_pcal_surrounding_piers() - Get surrounding calibration piers - * for a given channel. - * @ah: The &struct ath5k_hw - * @channel: The &struct ieee80211_channel - * @pcinfo_l: The &struct ath5k_chan_pcal_info to put the left cal. pier - * @pcinfo_r: The &struct ath5k_chan_pcal_info to put the right cal. pier - * +/* * Get the surrounding per-channel power calibration piers * for a given frequency so that we can interpolate between * them and come up with an appropriate dataset for our current @@ -2674,17 +2289,11 @@ ath5k_get_chan_pcal_surrounding_piers(struct ath5k_hw *ah, *pcinfo_r = &pcinfo[idx_r]; } -/** - * ath5k_get_rate_pcal_data() - Get the interpolated per-rate power - * calibration data - * @ah: The &struct ath5k_hw *ah, - * @channel: The &struct ieee80211_channel - * @rates: The &struct ath5k_rate_pcal_info to fill - * +/* * Get the surrounding per-rate power calibration data * for a given frequency and interpolate between power * values to set max target power supported by hw for - * each rate on this frequency. + * each rate. */ static void ath5k_get_rate_pcal_data(struct ath5k_hw *ah, @@ -2772,11 +2381,7 @@ ath5k_get_rate_pcal_data(struct ath5k_hw *ah, rpinfo[idx_r].target_power_54); } -/** - * ath5k_get_max_ctl_power() - Get max edge power for a given frequency - * @ah: the &struct ath5k_hw - * @channel: The &struct ieee80211_channel - * +/* * Get the max edge power for this channel if * we have such data from EEPROM's Conformance Test * Limits (CTL), and limit max power if needed. @@ -2856,39 +2461,8 @@ ath5k_get_max_ctl_power(struct ath5k_hw *ah, * Power to PCDAC table functions */ -/** - * DOC: Power to PCDAC table functions - * - * For RF5111 we have an XPD -eXternal Power Detector- curve - * for each calibrated channel. Each curve has 0,5dB Power steps - * on x axis and PCDAC steps (offsets) on y axis and looks like an - * exponential function. To recreate the curve we read 11 points - * from eeprom (eeprom.c) and interpolate here. - * - * For RF5112 we have 4 XPD -eXternal Power Detector- curves - * for each calibrated channel on 0, -6, -12 and -18dBm but we only - * use the higher (3) and the lower (0) curves. Each curve again has 0.5dB - * power steps on x axis and PCDAC steps on y axis and looks like a - * linear function. To recreate the curve and pass the power values - * on hw, we get 4 points for xpd 0 (lower gain -> max power) - * and 3 points for xpd 3 (higher gain -> lower power) from eeprom (eeprom.c) - * and interpolate here. - * - * For a given channel we get the calibrated points (piers) for it or - * -if we don't have calibration data for this specific channel- from the - * available surrounding channels we have calibration data for, after we do a - * linear interpolation between them. Then since we have our calibrated points - * for this channel, we do again a linear interpolation between them to get the - * whole curve. - * - * We finally write the Y values of the curve(s) (the PCDAC values) on hw - */ - -/** - * ath5k_fill_pwr_to_pcdac_table() - Fill Power to PCDAC table on RF5111 - * @ah: The &struct ath5k_hw - * @table_min: Minimum power (x min) - * @table_max: Maximum power (x max) +/* + * Fill Power to PCDAC table on RF5111 * * No further processing is needed for RF5111, the only thing we have to * do is fill the values below and above calibration range since eeprom data @@ -2929,14 +2503,10 @@ ath5k_fill_pwr_to_pcdac_table(struct ath5k_hw *ah, s16* table_min, } -/** - * ath5k_combine_linear_pcdac_curves() - Combine available PCDAC Curves - * @ah: The &struct ath5k_hw - * @table_min: Minimum power (x min) - * @table_max: Maximum power (x max) - * @pdcurves: Number of pd curves +/* + * Combine available XPD Curves and fill Linear Power to PCDAC table + * on RF5112 * - * Combine available XPD Curves and fill Linear Power to PCDAC table on RF5112 * RFX112 can have up to 2 curves (one for low txpower range and one for * higher txpower range). We need to put them both on pcdac_out and place * them in the correct location. In case we only have one curve available @@ -3038,10 +2608,7 @@ ath5k_combine_linear_pcdac_curves(struct ath5k_hw *ah, s16* table_min, } } -/** - * ath5k_write_pcdac_table() - Write the PCDAC values on hw - * @ah: The &struct ath5k_hw - */ +/* Write PCDAC values on hw */ static void ath5k_write_pcdac_table(struct ath5k_hw *ah) { @@ -3064,32 +2631,9 @@ ath5k_write_pcdac_table(struct ath5k_hw *ah) * Power to PDADC table functions */ -/** - * DOC: Power to PDADC table functions - * - * For RF2413 and later we have a Power to PDADC table (Power Detector) - * instead of a PCDAC (Power Control) and 4 pd gain curves for each - * calibrated channel. Each curve has power on x axis in 0.5 db steps and - * PDADC steps on y axis and looks like an exponential function like the - * RF5111 curve. - * - * To recreate the curves we read the points from eeprom (eeprom.c) - * and interpolate here. Note that in most cases only 2 (higher and lower) - * curves are used (like RF5112) but vendors have the opportunity to include - * all 4 curves on eeprom. The final curve (higher power) has an extra - * point for better accuracy like RF5112. - * - * The process is similar to what we do above for RF5111/5112 - */ - -/** - * ath5k_combine_pwr_to_pdadc_curves() - Combine the various PDADC curves - * @ah: The &struct ath5k_hw - * @pwr_min: Minimum power (x min) - * @pwr_max: Maximum power (x max) - * @pdcurves: Number of available curves +/* + * Set the gain boundaries and create final Power to PDADC table * - * Combine the various pd curves and create the final Power to PDADC table * We can have up to 4 pd curves, we need to do a similar process * as we do for RF5112. This time we don't have an edge_flag but we * set the gain boundaries on a separate register. @@ -3213,11 +2757,7 @@ ath5k_combine_pwr_to_pdadc_curves(struct ath5k_hw *ah, } -/** - * ath5k_write_pwr_to_pdadc_table() - Write the PDADC values on hw - * @ah: The &struct ath5k_hw - * @ee_mode: One of enum ath5k_driver_mode - */ +/* Write PDADC values on hw */ static void ath5k_write_pwr_to_pdadc_table(struct ath5k_hw *ah, u8 ee_mode) { @@ -3274,13 +2814,7 @@ ath5k_write_pwr_to_pdadc_table(struct ath5k_hw *ah, u8 ee_mode) * Common code for PCDAC/PDADC tables */ -/** - * ath5k_setup_channel_powertable() - Set up power table for this channel - * @ah: The &struct ath5k_hw - * @channel: The &struct ieee80211_channel - * @ee_mode: One of enum ath5k_driver_mode - * @type: One of enum ath5k_powertable_type (eeprom.h) - * +/* * This is the main function that uses all of the above * to set PCDAC/PDADC table on hw for the current channel. * This table is used for tx power calibration on the baseband, @@ -3478,12 +3012,7 @@ ath5k_setup_channel_powertable(struct ath5k_hw *ah, return 0; } -/** - * ath5k_write_channel_powertable() - Set power table for current channel on hw - * @ah: The &struct ath5k_hw - * @ee_mode: One of enum ath5k_driver_mode - * @type: One of enum ath5k_powertable_type (eeprom.h) - */ +/* Write power table for current channel to hw */ static void ath5k_write_channel_powertable(struct ath5k_hw *ah, u8 ee_mode, u8 type) { @@ -3493,36 +3022,28 @@ ath5k_write_channel_powertable(struct ath5k_hw *ah, u8 ee_mode, u8 type) ath5k_write_pcdac_table(ah); } - -/** - * DOC: Per-rate tx power setting +/* + * Per-rate tx power setting * - * This is the code that sets the desired tx power limit (below + * This is the code that sets the desired tx power (below * maximum) on hw for each rate (we also have TPC that sets - * power per packet type). We do that by providing an index on the - * PCDAC/PDADC table we set up above, for each rate. + * power per packet). We do that by providing an index on the + * PCDAC/PDADC table we set up. + */ + +/* + * Set rate power table * * For now we only limit txpower based on maximum tx power - * supported by hw (what's inside rate_info) + conformance test - * limits. We need to limit this even more, based on regulatory domain - * etc to be safe. Normally this is done from above so we don't care - * here, all we care is that the tx power we set will be O.K. - * for the hw (e.g. won't create noise on PA etc). + * supported by hw (what's inside rate_info). We need to limit + * this even more, based on regulatory domain etc. * - * Rate power table contains indices to PCDAC/PDADC table (0.5dB steps - - * x values) and is indexed as follows: + * Rate power table contains indices to PCDAC/PDADC table (0.5dB steps) + * and is indexed as follows: * rates[0] - rates[7] -> OFDM rates * rates[8] - rates[14] -> CCK rates * rates[15] -> XR rates (they all have the same power) */ - -/** - * ath5k_setup_rate_powertable() - Set up rate power table for a given tx power - * @ah: The &struct ath5k_hw - * @max_pwr: The maximum tx power requested in 0.5dB steps - * @rate_info: The &struct ath5k_rate_pcal_info to fill - * @ee_mode: One of enum ath5k_driver_mode - */ static void ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr, struct ath5k_rate_pcal_info *rate_info, @@ -3593,14 +3114,8 @@ ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr, } -/** - * ath5k_hw_txpower() - Set transmission power limit for a given channel - * @ah: The &struct ath5k_hw - * @channel: The &struct ieee80211_channel - * @txpower: Requested tx power in 0.5dB steps - * - * Combines all of the above to set the requested tx power limit - * on hw. +/* + * Set transmission power */ static int ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, @@ -3718,16 +3233,7 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, return 0; } -/** - * ath5k_hw_set_txpower_limit() - Set txpower limit for the current channel - * @ah: The &struct ath5k_hw - * @txpower: The requested tx power limit in 0.5dB steps - * - * This function provides access to ath5k_hw_txpower to the driver in - * case user or an application changes it while PHY is running. - */ -int -ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower) +int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower) { ATH5K_DBG(ah, ATH5K_DEBUG_TXPOWER, "changing txpower to %d\n", txpower); @@ -3735,26 +3241,11 @@ ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower) return ath5k_hw_txpower(ah, ah->ah_current_channel, txpower); } - /*************\ Init function \*************/ -/** - * ath5k_hw_phy_init() - Initialize PHY - * @ah: The &struct ath5k_hw - * @channel: The @struct ieee80211_channel - * @mode: One of enum ath5k_driver_mode - * @fast: Try a fast channel switch instead - * - * This is the main function used during reset to initialize PHY - * or do a fast channel change if possible. - * - * NOTE: Do not call this one from the driver, it assumes PHY is in a - * warm reset state ! - */ -int -ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, +int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, u8 mode, bool fast) { struct ieee80211_channel *curr_channel; @@ -3864,7 +3355,7 @@ ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, if (ret) return ret; - usleep_range(1000, 1500); + mdelay(1); /* * Write RF buffer @@ -3885,10 +3376,10 @@ ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, } } else if (ah->ah_version == AR5K_AR5210) { - usleep_range(1000, 1500); + mdelay(1); /* Disable phy and wait */ ath5k_hw_reg_write(ah, AR5K_PHY_ACT_DISABLE, AR5K_PHY_ACT); - usleep_range(1000, 1500); + mdelay(1); } /* Set channel on PHY */ @@ -3914,7 +3405,7 @@ ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, for (i = 0; i <= 20; i++) { if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10)) break; - usleep_range(200, 250); + udelay(200); } ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1); @@ -3942,9 +3433,9 @@ ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, /* At the same time start I/Q calibration for QAM constellation * -no need for CCK- */ - ah->ah_iq_cal_needed = false; + ah->ah_calibration = false; if (!(mode == AR5K_MODE_11B)) { - ah->ah_iq_cal_needed = true; + ah->ah_calibration = true; AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15); AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ, diff --git a/trunk/drivers/net/wireless/ath/ath5k/qcu.c b/trunk/drivers/net/wireless/ath/ath5k/qcu.c index 30b50f934172..776654228eaa 100644 --- a/trunk/drivers/net/wireless/ath/ath5k/qcu.c +++ b/trunk/drivers/net/wireless/ath/ath5k/qcu.c @@ -17,48 +17,23 @@ */ /********************************************\ -Queue Control Unit, DCF Control Unit Functions +Queue Control Unit, DFS Control Unit Functions \********************************************/ #include "ath5k.h" #include "reg.h" #include "debug.h" -#include - -/** - * DOC: Queue Control Unit (QCU)/DCF Control Unit (DCU) functions - * - * Here we setup parameters for the 12 available TX queues. Note that - * on the various registers we can usually only map the first 10 of them so - * basically we have 10 queues to play with. Each queue has a matching - * QCU that controls when the queue will get triggered and multiple QCUs - * can be mapped to a single DCU that controls the various DFS parameters - * for the various queues. In our setup we have a 1:1 mapping between QCUs - * and DCUs allowing us to have different DFS settings for each queue. - * - * When a frame goes into a TX queue, QCU decides when it'll trigger a - * transmission based on various criteria (such as how many data we have inside - * it's buffer or -if it's a beacon queue- if it's time to fire up the queue - * based on TSF etc), DCU adds backoff, IFSes etc and then a scheduler - * (arbitrator) decides the priority of each QCU based on it's configuration - * (e.g. beacons are always transmitted when they leave DCU bypassing all other - * frames from other queues waiting to be transmitted). After a frame leaves - * the DCU it goes to PCU for further processing and then to PHY for - * the actual transmission. - */ /******************\ * Helper functions * \******************/ -/** - * ath5k_hw_num_tx_pending() - Get number of pending frames for a given queue - * @ah: The &struct ath5k_hw - * @queue: One of enum ath5k_tx_queue_id +/* + * Get number of pending frames + * for a specific queue [5211+] */ -u32 -ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue) +u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue) { u32 pending; AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); @@ -83,13 +58,10 @@ ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue) return pending; } -/** - * ath5k_hw_release_tx_queue() - Set a transmit queue inactive - * @ah: The &struct ath5k_hw - * @queue: One of enum ath5k_tx_queue_id +/* + * Set a transmit queue inactive */ -void -ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue) +void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue) { if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num)) return; @@ -100,56 +72,34 @@ ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue) AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue); } -/** - * ath5k_cw_validate() - Make sure the given cw is valid - * @cw_req: The contention window value to check - * +/* * Make sure cw is a power of 2 minus 1 and smaller than 1024 */ -static u16 -ath5k_cw_validate(u16 cw_req) +static u16 ath5k_cw_validate(u16 cw_req) { + u32 cw = 1; cw_req = min(cw_req, (u16)1023); - /* Check if cw_req + 1 a power of 2 */ - if (is_power_of_2(cw_req + 1)) - return cw_req; + while (cw < cw_req) + cw = (cw << 1) | 1; - /* Check if cw_req is a power of 2 */ - if (is_power_of_2(cw_req)) - return cw_req - 1; - - /* If none of the above is correct - * find the closest power of 2 */ - cw_req = (u16) roundup_pow_of_two(cw_req) - 1; - - return cw_req; + return cw; } -/** - * ath5k_hw_get_tx_queueprops() - Get properties for a transmit queue - * @ah: The &struct ath5k_hw - * @queue: One of enum ath5k_tx_queue_id - * @queue_info: The &struct ath5k_txq_info to fill +/* + * Get properties for a transmit queue */ -int -ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue, +int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue, struct ath5k_txq_info *queue_info) { memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info)); return 0; } -/** - * ath5k_hw_set_tx_queueprops() - Set properties for a transmit queue - * @ah: The &struct ath5k_hw - * @queue: One of enum ath5k_tx_queue_id - * @qinfo: The &struct ath5k_txq_info to use - * - * Returns 0 on success or -EIO if queue is inactive +/* + * Set properties for a transmit queue */ -int -ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue, +int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue, const struct ath5k_txq_info *qinfo) { struct ath5k_txq_info *qi; @@ -189,16 +139,10 @@ ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue, return 0; } -/** - * ath5k_hw_setup_tx_queue() - Initialize a transmit queue - * @ah: The &struct ath5k_hw - * @queue_type: One of enum ath5k_tx_queue - * @queue_info: The &struct ath5k_txq_info to use - * - * Returns 0 on success, -EINVAL on invalid arguments +/* + * Initialize a transmit queue */ -int -ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type, +int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type, struct ath5k_txq_info *queue_info) { unsigned int queue; @@ -273,16 +217,10 @@ ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type, * Single QCU/DCU initialization * \*******************************/ -/** - * ath5k_hw_set_tx_retry_limits() - Set tx retry limits on DCU - * @ah: The &struct ath5k_hw - * @queue: One of enum ath5k_tx_queue_id - * - * This function is used when initializing a queue, to set - * retry limits based on ah->ah_retry_* and the chipset used. +/* + * Set tx retry limits on DCU */ -void -ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah, +void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah, unsigned int queue) { /* Single data queue on AR5210 */ @@ -317,15 +255,15 @@ ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah, } /** - * ath5k_hw_reset_tx_queue() - Initialize a single hw queue - * @ah: The &struct ath5k_hw - * @queue: One of enum ath5k_tx_queue_id + * ath5k_hw_reset_tx_queue - Initialize a single hw queue * - * Set DCF properties for the given transmit queue on DCU + * @ah The &struct ath5k_hw + * @queue The hw queue number + * + * Set DFS properties for the given transmit queue on DCU * and configures all queue-specific parameters. */ -int -ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue) +int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue) { struct ath5k_txq_info *tq = &ah->ah_txq[queue]; @@ -553,9 +491,10 @@ ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue) \**************************/ /** - * ath5k_hw_set_ifs_intervals() - Set global inter-frame spaces on DCU - * @ah: The &struct ath5k_hw - * @slot_time: Slot time in us + * ath5k_hw_set_ifs_intervals - Set global inter-frame spaces on DCU + * + * @ah The &struct ath5k_hw + * @slot_time Slot time in us * * Sets the global IFS intervals on DCU (also works on AR5210) for * the given slot time and the current bwmode. @@ -658,15 +597,7 @@ int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time) } -/** - * ath5k_hw_init_queues() - Initialize tx queues - * @ah: The &struct ath5k_hw - * - * Initializes all tx queues based on information on - * ah->ah_txq* set by the driver - */ -int -ath5k_hw_init_queues(struct ath5k_hw *ah) +int ath5k_hw_init_queues(struct ath5k_hw *ah) { int i, ret; diff --git a/trunk/drivers/net/wireless/ath/ath5k/reg.h b/trunk/drivers/net/wireless/ath/ath5k/reg.h index 0ea1608b47fd..f5c1000045d3 100644 --- a/trunk/drivers/net/wireless/ath/ath5k/reg.h +++ b/trunk/drivers/net/wireless/ath/ath5k/reg.h @@ -280,10 +280,6 @@ * 5211/5212 we have one primary and 4 secondary registers. * So we have AR5K_ISR for 5210 and AR5K_PISR /SISRx for 5211/5212. * Most of these bits are common for all chipsets. - * - * NOTE: On 5211+ TXOK, TXDESC, TXERR, TXEOL and TXURN contain - * the logical OR from per-queue interrupt bits found on SISR registers - * (see below). */ #define AR5K_ISR 0x001c /* Register Address [5210] */ #define AR5K_PISR 0x0080 /* Register Address [5211+] */ @@ -296,10 +292,7 @@ #define AR5K_ISR_TXOK 0x00000040 /* Frame successfully transmitted */ #define AR5K_ISR_TXDESC 0x00000080 /* TX descriptor request */ #define AR5K_ISR_TXERR 0x00000100 /* Transmit error */ -#define AR5K_ISR_TXNOFRM 0x00000200 /* No frame transmitted (transmit timeout) - * NOTE: We don't have per-queue info for this - * one, but we can enable it per-queue through - * TXNOFRM_QCU field on TXNOFRM register */ +#define AR5K_ISR_TXNOFRM 0x00000200 /* No frame transmitted (transmit timeout) */ #define AR5K_ISR_TXEOL 0x00000400 /* Empty TX descriptor */ #define AR5K_ISR_TXURN 0x00000800 /* Transmit FIFO underrun */ #define AR5K_ISR_MIB 0x00001000 /* Update MIB counters */ @@ -309,29 +302,21 @@ #define AR5K_ISR_SWBA 0x00010000 /* Software beacon alert */ #define AR5K_ISR_BRSSI 0x00020000 /* Beacon rssi below threshold (?) */ #define AR5K_ISR_BMISS 0x00040000 /* Beacon missed */ -#define AR5K_ISR_HIUERR 0x00080000 /* Host Interface Unit error [5211+] - * 'or' of MCABT, SSERR, DPERR from SISR2 */ +#define AR5K_ISR_HIUERR 0x00080000 /* Host Interface Unit error [5211+] */ #define AR5K_ISR_BNR 0x00100000 /* Beacon not ready [5211+] */ #define AR5K_ISR_MCABT 0x00100000 /* Master Cycle Abort [5210] */ #define AR5K_ISR_RXCHIRP 0x00200000 /* CHIRP Received [5212+] */ #define AR5K_ISR_SSERR 0x00200000 /* Signaled System Error [5210] */ -#define AR5K_ISR_DPERR 0x00400000 /* Bus parity error [5210] */ +#define AR5K_ISR_DPERR 0x00400000 /* Det par Error (?) [5210] */ #define AR5K_ISR_RXDOPPLER 0x00400000 /* Doppler chirp received [5212+] */ #define AR5K_ISR_TIM 0x00800000 /* [5211+] */ -#define AR5K_ISR_BCNMISC 0x00800000 /* Misc beacon related interrupt - * 'or' of TIM, CAB_END, DTIM_SYNC, BCN_TIMEOUT, - * CAB_TIMEOUT and DTIM bits from SISR2 [5212+] */ +#define AR5K_ISR_BCNMISC 0x00800000 /* 'or' of TIM, CAB_END, DTIM_SYNC, BCN_TIMEOUT, + CAB_TIMEOUT and DTIM bits from SISR2 [5212+] */ #define AR5K_ISR_GPIO 0x01000000 /* GPIO (rf kill) */ #define AR5K_ISR_QCBRORN 0x02000000 /* QCU CBR overrun [5211+] */ #define AR5K_ISR_QCBRURN 0x04000000 /* QCU CBR underrun [5211+] */ #define AR5K_ISR_QTRIG 0x08000000 /* QCU scheduling trigger [5211+] */ -#define AR5K_ISR_BITS_FROM_SISRS (AR5K_ISR_TXOK | AR5K_ISR_TXDESC |\ - AR5K_ISR_TXERR | AR5K_ISR_TXEOL |\ - AR5K_ISR_TXURN | AR5K_ISR_HIUERR |\ - AR5K_ISR_BCNMISC | AR5K_ISR_QCBRORN |\ - AR5K_ISR_QCBRURN | AR5K_ISR_QTRIG) - /* * Secondary status registers [5211+] (0 - 4) * @@ -362,7 +347,7 @@ #define AR5K_SISR2_BCN_TIMEOUT 0x08000000 /* Beacon Timeout [5212+] */ #define AR5K_SISR2_CAB_TIMEOUT 0x10000000 /* CAB Timeout [5212+] */ #define AR5K_SISR2_DTIM 0x20000000 /* [5212+] */ -#define AR5K_SISR2_TSFOOR 0x80000000 /* TSF Out of range */ +#define AR5K_SISR2_TSFOOR 0x80000000 /* TSF OOR (?) */ #define AR5K_SISR3 0x0090 /* Register Address [5211+] */ #define AR5K_SISR3_QCBRORN 0x000003ff /* Mask for QCBRORN */ diff --git a/trunk/drivers/net/wireless/ath/ath5k/reset.c b/trunk/drivers/net/wireless/ath/ath5k/reset.c index 250db40b751d..2abac257b4b4 100644 --- a/trunk/drivers/net/wireless/ath/ath5k/reset.c +++ b/trunk/drivers/net/wireless/ath/ath5k/reset.c @@ -19,9 +19,9 @@ * */ -/****************************\ - Reset function and helpers -\****************************/ +/*****************************\ + Reset functions and helpers +\*****************************/ #include @@ -33,36 +33,14 @@ #include "debug.h" -/** - * DOC: Reset function and helpers - * - * Here we implement the main reset routine, used to bring the card - * to a working state and ready to receive. We also handle routines - * that don't fit on other places such as clock, sleep and power control - */ - - /******************\ * Helper functions * \******************/ -/** - * ath5k_hw_register_timeout() - Poll a register for a flag/field change - * @ah: The &struct ath5k_hw - * @reg: The register to read - * @flag: The flag/field to check on the register - * @val: The field value we expect (if we check a field) - * @is_set: Instead of checking if the flag got cleared, check if it got set - * - * Some registers contain flags that indicate that an operation is - * running. We use this function to poll these registers and check - * if these flags get cleared. We also use it to poll a register - * field (containing multiple flags) until it gets a specific value. - * - * Returns -EAGAIN if we exceeded AR5K_TUNE_REGISTER_TIMEOUT * 15us or 0 +/* + * Check if a register write has been completed */ -int -ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val, +int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val, bool is_set) { int i; @@ -86,48 +64,35 @@ ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val, \*************************/ /** - * ath5k_hw_htoclock() - Translate usec to hw clock units + * ath5k_hw_htoclock - Translate usec to hw clock units + * * @ah: The &struct ath5k_hw * @usec: value in microseconds - * - * Translate usecs to hw clock units based on the current - * hw clock rate. - * - * Returns number of clock units */ -unsigned int -ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec) +unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec) { struct ath_common *common = ath5k_hw_common(ah); return usec * common->clockrate; } /** - * ath5k_hw_clocktoh() - Translate hw clock units to usec - * @ah: The &struct ath5k_hw + * ath5k_hw_clocktoh - Translate hw clock units to usec * @clock: value in hw clock units - * - * Translate hw clock units to usecs based on the current - * hw clock rate. - * - * Returns number of usecs */ -unsigned int -ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock) +unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock) { struct ath_common *common = ath5k_hw_common(ah); return clock / common->clockrate; } /** - * ath5k_hw_init_core_clock() - Initialize core clock - * @ah: The &struct ath5k_hw + * ath5k_hw_init_core_clock - Initialize core clock + * + * @ah The &struct ath5k_hw * - * Initialize core clock parameters (usec, usec32, latencies etc), - * based on current bwmode and chipset properties. + * Initialize core clock parameters (usec, usec32, latencies etc). */ -static void -ath5k_hw_init_core_clock(struct ath5k_hw *ah) +static void ath5k_hw_init_core_clock(struct ath5k_hw *ah) { struct ieee80211_channel *channel = ah->ah_current_channel; struct ath_common *common = ath5k_hw_common(ah); @@ -262,21 +227,16 @@ ath5k_hw_init_core_clock(struct ath5k_hw *ah) } } -/** - * ath5k_hw_set_sleep_clock() - Setup sleep clock operation - * @ah: The &struct ath5k_hw - * @enable: Enable sleep clock operation (false to disable) - * +/* * If there is an external 32KHz crystal available, use it * as ref. clock instead of 32/40MHz clock and baseband clocks * to save power during sleep or restore normal 32/40MHz * operation. * - * NOTE: When operating on 32KHz certain PHY registers (27 - 31, - * 123 - 127) require delay on access. + * XXX: When operating on 32KHz certain PHY registers (27 - 31, + * 123 - 127) require delay on access. */ -static void -ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable) +static void ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; u32 scal, spending, sclock; @@ -380,19 +340,10 @@ ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable) * Reset/Sleep control * \*********************/ -/** - * ath5k_hw_nic_reset() - Reset the various chipset units - * @ah: The &struct ath5k_hw - * @val: Mask to indicate what units to reset - * - * To reset the various chipset units we need to write - * the mask to AR5K_RESET_CTL and poll the register until - * all flags are cleared. - * - * Returns 0 if we are O.K. or -EAGAIN (from athk5_hw_register_timeout) +/* + * Reset chipset */ -static int -ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val) +static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val) { int ret; u32 mask = val ? val : ~0U; @@ -406,7 +357,7 @@ ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val) ath5k_hw_reg_write(ah, val, AR5K_RESET_CTL); /* Wait at least 128 PCI clocks */ - usleep_range(15, 20); + udelay(15); if (ah->ah_version == AR5K_AR5210) { val &= AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_DMA @@ -431,17 +382,12 @@ ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val) return ret; } -/** - * ath5k_hw_wisoc_reset() - Reset AHB chipset - * @ah: The &struct ath5k_hw - * @flags: Mask to indicate what units to reset - * - * Same as ath5k_hw_nic_reset but for AHB based devices - * - * Returns 0 if we are O.K. or -EAGAIN (from athk5_hw_register_timeout) +/* + * Reset AHB chipset + * AR5K_RESET_CTL_PCU flag resets WMAC + * AR5K_RESET_CTL_BASEBAND flag resets WBB */ -static int -ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags) +static int ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags) { u32 mask = flags ? flags : ~0U; u32 __iomem *reg; @@ -476,7 +422,7 @@ ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags) regval = __raw_readl(reg); __raw_writel(regval | val, reg); regval = __raw_readl(reg); - usleep_range(100, 150); + udelay(100); /* Bring BB/MAC out of reset */ __raw_writel(regval & ~val, reg); @@ -493,23 +439,11 @@ ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags) return 0; } -/** - * ath5k_hw_set_power_mode() - Set power mode - * @ah: The &struct ath5k_hw - * @mode: One of enum ath5k_power_mode - * @set_chip: Set to true to write sleep control register - * @sleep_duration: How much time the device is allowed to sleep - * when sleep logic is enabled (in 128 microsecond increments). - * - * This function is used to configure sleep policy and allowed - * sleep modes. For more information check out the sleep control - * register on reg.h and STA_ID1. - * - * Returns 0 on success, -EIO if chip didn't wake up or -EINVAL if an invalid - * mode is requested. + +/* + * Sleep control */ -static int -ath5k_hw_set_power_mode(struct ath5k_hw *ah, enum ath5k_power_mode mode, +static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode, bool set_chip, u16 sleep_duration) { unsigned int i; @@ -559,7 +493,7 @@ ath5k_hw_set_power_mode(struct ath5k_hw *ah, enum ath5k_power_mode mode, ath5k_hw_reg_write(ah, data | AR5K_SLEEP_CTL_SLE_WAKE, AR5K_SLEEP_CTL); - usleep_range(15, 20); + udelay(15); for (i = 200; i > 0; i--) { /* Check if the chip did wake up */ @@ -568,7 +502,7 @@ ath5k_hw_set_power_mode(struct ath5k_hw *ah, enum ath5k_power_mode mode, break; /* Wait a bit and retry */ - usleep_range(50, 75); + udelay(50); ath5k_hw_reg_write(ah, data | AR5K_SLEEP_CTL_SLE_WAKE, AR5K_SLEEP_CTL); } @@ -589,20 +523,17 @@ ath5k_hw_set_power_mode(struct ath5k_hw *ah, enum ath5k_power_mode mode, return 0; } -/** - * ath5k_hw_on_hold() - Put device on hold - * @ah: The &struct ath5k_hw +/* + * Put device on hold * - * Put MAC and Baseband on warm reset and keep that state - * (don't clean sleep control register). After this MAC - * and Baseband are disabled and a full reset is needed - * to come back. This way we save as much power as possible + * Put MAC and Baseband on warm reset and + * keep that state (don't clean sleep control + * register). After this MAC and Baseband are + * disabled and a full reset is needed to come + * back. This way we save as much power as possible * without putting the card on full sleep. - * - * Returns 0 on success or -EIO on error */ -int -ath5k_hw_on_hold(struct ath5k_hw *ah) +int ath5k_hw_on_hold(struct ath5k_hw *ah) { struct pci_dev *pdev = ah->pdev; u32 bus_flags; @@ -612,7 +543,7 @@ ath5k_hw_on_hold(struct ath5k_hw *ah) return 0; /* Make sure device is awake */ - ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0); + ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0); if (ret) { ATH5K_ERR(ah, "failed to wakeup the MAC Chip\n"); return ret; @@ -632,7 +563,7 @@ ath5k_hw_on_hold(struct ath5k_hw *ah) ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA | AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI); - usleep_range(2000, 2500); + mdelay(2); } else { ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_BASEBAND | bus_flags); @@ -644,7 +575,7 @@ ath5k_hw_on_hold(struct ath5k_hw *ah) } /* ...wakeup again!*/ - ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0); + ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0); if (ret) { ATH5K_ERR(ah, "failed to put device on hold\n"); return ret; @@ -653,18 +584,11 @@ ath5k_hw_on_hold(struct ath5k_hw *ah) return ret; } -/** - * ath5k_hw_nic_wakeup() - Force card out of sleep - * @ah: The &struct ath5k_hw - * @channel: The &struct ieee80211_channel - * +/* * Bring up MAC + PHY Chips and program PLL - * NOTE: Channel is NULL for the initial wakeup. - * - * Returns 0 on success, -EIO on hw failure or -EINVAL for false channel infos + * Channel is NULL for the initial wakeup. */ -int -ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel) +int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel) { struct pci_dev *pdev = ah->pdev; u32 turbo, mode, clock, bus_flags; @@ -676,7 +600,7 @@ ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel) if ((ath5k_get_bus_type(ah) != ATH_AHB) || channel) { /* Wakeup the device */ - ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0); + ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0); if (ret) { ATH5K_ERR(ah, "failed to wakeup the MAC Chip\n"); return ret; @@ -697,7 +621,7 @@ ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel) ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA | AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI); - usleep_range(2000, 2500); + mdelay(2); } else { if (ath5k_get_bus_type(ah) == ATH_AHB) ret = ath5k_hw_wisoc_reset(ah, AR5K_RESET_CTL_PCU | @@ -713,7 +637,7 @@ ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel) } /* ...wakeup again!...*/ - ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0); + ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0); if (ret) { ATH5K_ERR(ah, "failed to resume the MAC Chip\n"); return ret; @@ -815,7 +739,7 @@ ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel) /* ...update PLL if needed */ if (ath5k_hw_reg_read(ah, AR5K_PHY_PLL) != clock) { ath5k_hw_reg_write(ah, clock, AR5K_PHY_PLL); - usleep_range(300, 350); + udelay(300); } /* ...set the PHY operating mode */ @@ -831,19 +755,8 @@ ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel) * Post-initvals register modifications * \**************************************/ -/** - * ath5k_hw_tweak_initval_settings() - Tweak initial settings - * @ah: The &struct ath5k_hw - * @channel: The &struct ieee80211_channel - * - * Some settings are not handled on initvals, e.g. bwmode - * settings, some phy settings, workarounds etc that in general - * don't fit anywhere else or are too small to introduce a separate - * function for each one. So we have this function to handle - * them all during reset and complete card's initialization. - */ -static void -ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah, +/* TODO: Half/Quarter rate */ +static void ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah, struct ieee80211_channel *channel) { if (ah->ah_version == AR5K_AR5212 && @@ -962,16 +875,7 @@ ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah, } } -/** - * ath5k_hw_commit_eeprom_settings() - Commit settings from EEPROM - * @ah: The &struct ath5k_hw - * @channel: The &struct ieee80211_channel - * - * Use settings stored on EEPROM to properly initialize the card - * based on various infos and per-mode calibration data. - */ -static void -ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah, +static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah, struct ieee80211_channel *channel) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; @@ -1125,23 +1029,7 @@ ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah, * Main reset function * \*********************/ -/** - * ath5k_hw_reset() - The main reset function - * @ah: The &struct ath5k_hw - * @op_mode: One of enum nl80211_iftype - * @channel: The &struct ieee80211_channel - * @fast: Enable fast channel switching - * @skip_pcu: Skip pcu initialization - * - * This is the function we call each time we want to (re)initialize the - * card and pass new settings to hw. We also call it when hw runs into - * trouble to make it come back to a working state. - * - * Returns 0 on success, -EINVAL on false op_mode or channel infos, or -EIO - * on failure. - */ -int -ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode, +int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode, struct ieee80211_channel *channel, bool fast, bool skip_pcu) { u32 s_seq[10], s_led[3], tsf_up, tsf_lo; @@ -1159,7 +1047,7 @@ ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode, */ if (fast && (ah->ah_radio != AR5K_RF2413) && (ah->ah_radio != AR5K_RF5413)) - fast = false; + fast = 0; /* Disable sleep clock operation * to avoid register access delay on certain @@ -1185,7 +1073,7 @@ ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode, if (ret && fast) { ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "DMA didn't stop, falling back to normal reset\n"); - fast = false; + fast = 0; /* Non fatal, just continue with * normal reset */ ret = 0; @@ -1354,7 +1242,7 @@ ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode, /* * Initialize PCU */ - ath5k_hw_pcu_init(ah, op_mode); + ath5k_hw_pcu_init(ah, op_mode, mode); /* * Initialize PHY diff --git a/trunk/drivers/net/wireless/ath/ath5k/rfbuffer.h b/trunk/drivers/net/wireless/ath/ath5k/rfbuffer.h index aed34d9954c0..5d11c23b4297 100644 --- a/trunk/drivers/net/wireless/ath/ath5k/rfbuffer.h +++ b/trunk/drivers/net/wireless/ath/ath5k/rfbuffer.h @@ -18,9 +18,7 @@ */ -/** - * DOC: RF Buffer registers - * +/* * There are some special registers on the RF chip * that control various operation settings related mostly to * the analog parts (channel, gain adjustment etc). @@ -46,63 +44,40 @@ */ -/** - * struct ath5k_ini_rfbuffer - Initial RF Buffer settings - * @rfb_bank: RF Bank number - * @rfb_ctrl_register: RF Buffer control register - * @rfb_mode_data: RF Buffer data for each mode - * +/* * Struct to hold default mode specific RF - * register values (RF Banks) for each chip. + * register values (RF Banks) */ struct ath5k_ini_rfbuffer { - u8 rfb_bank; - u16 rfb_ctrl_register; - u32 rfb_mode_data[3]; + u8 rfb_bank; /* RF Bank number */ + u16 rfb_ctrl_register; /* RF Buffer control register */ + u32 rfb_mode_data[3]; /* RF Buffer data for each mode */ }; -/** - * struct ath5k_rfb_field - An RF Buffer field (register/value) - * @len: Field length - * @pos: Offset on the raw packet - * @col: Used for shifting - * +/* * Struct to hold RF Buffer field * infos used to access certain RF * analog registers */ struct ath5k_rfb_field { - u8 len; - u16 pos; - u8 col; + u8 len; /* Field length */ + u16 pos; /* Offset on the raw packet */ + u8 col; /* Column -used for shifting */ }; -/** - * struct ath5k_rf_reg - RF analog register definition - * @bank: RF Buffer Bank number - * @index: Register's index on ath5k_rf_regx_idx - * @field: The &struct ath5k_rfb_field - * - * We use this struct to define the set of RF registers - * on each chip that we want to tweak. Some RF registers - * are common between different chip versions so this saves - * us space and complexity because we can refer to an rf - * register by it's index no matter what chip we work with - * as long as it has that register. +/* + * RF analog register definition */ struct ath5k_rf_reg { - u8 bank; - u8 index; - struct ath5k_rfb_field field; + u8 bank; /* RF Buffer Bank number */ + u8 index; /* Register's index on rf_regs_idx */ + struct ath5k_rfb_field field; /* RF Buffer field for this register */ }; -/** - * enum ath5k_rf_regs_idx - Map RF registers to indexes - * +/* Map RF registers to indexes * We do this to handle common bits and make our * life easier by using an index for each register - * instead of a full rfb_field - */ + * instead of a full rfb_field */ enum ath5k_rf_regs_idx { /* BANK 2 */ AR5K_RF_TURBO = 0, diff --git a/trunk/drivers/net/wireless/ath/ath5k/rfgain.h b/trunk/drivers/net/wireless/ath/ath5k/rfgain.h index 4d21df0e5975..ebfae052d89e 100644 --- a/trunk/drivers/net/wireless/ath/ath5k/rfgain.h +++ b/trunk/drivers/net/wireless/ath/ath5k/rfgain.h @@ -18,17 +18,13 @@ * */ -/** - * struct ath5k_ini_rfgain - RF Gain table - * @rfg_register: RF Gain register address - * @rfg_value: Register value for 5 and 2GHz - * +/* * Mode-specific RF Gain table (64bytes) for RF5111/5112 * (RF5110 only comes with AR5210 and only supports a/turbo a mode so initial * RF Gain values are included in AR5K_AR5210_INI) */ struct ath5k_ini_rfgain { - u16 rfg_register; + u16 rfg_register; /* RF Gain register address */ u32 rfg_value[2]; /* [freq (see below)] */ }; @@ -459,31 +455,18 @@ static const struct ath5k_ini_rfgain rfgain_2425[] = { #define AR5K_GAIN_CHECK_ADJUST(_g) \ ((_g)->g_current <= (_g)->g_low || (_g)->g_current >= (_g)->g_high) -/** - * struct ath5k_gain_opt_step - An RF gain optimization step - * @gos_param: Set of parameters - * @gos_gain: Gain - */ struct ath5k_gain_opt_step { s8 gos_param[AR5K_GAIN_CRN_MAX_FIX_BITS]; s8 gos_gain; }; -/** - * struct ath5k_gain_opt - RF Gain optimization ladder - * @go_default: The default step - * @go_steps_count: How many optimization steps - * @go_step: Array of &struct ath5k_gain_opt_step - */ struct ath5k_gain_opt { u8 go_default; u8 go_steps_count; const struct ath5k_gain_opt_step go_step[AR5K_GAIN_STEP_COUNT]; }; - /* - * RF5111 * Parameters on gos_param: * 1) Tx clip PHY register * 2) PWD 90 RF register @@ -507,7 +490,6 @@ static const struct ath5k_gain_opt rfgain_opt_5111 = { }; /* - * RF5112 * Parameters on gos_param: * 1) Mixgain ovr RF register * 2) PWD 138 RF register diff --git a/trunk/drivers/net/wireless/ath/ath5k/trace.h b/trunk/drivers/net/wireless/ath/ath5k/trace.h index 00f015819344..39f002ed4a88 100644 --- a/trunk/drivers/net/wireless/ath/ath5k/trace.h +++ b/trunk/drivers/net/wireless/ath/ath5k/trace.h @@ -3,8 +3,7 @@ #include - -#if !defined(CONFIG_ATH5K_TRACER) || defined(__CHECKER__) +#ifndef CONFIG_ATH5K_TRACER #undef TRACE_EVENT #define TRACE_EVENT(name, proto, ...) \ static inline void trace_ ## name(proto) {} @@ -94,7 +93,7 @@ TRACE_EVENT(ath5k_tx_complete, #endif /* __TRACE_ATH5K_H */ -#if defined(CONFIG_ATH5K_TRACER) && !defined(__CHECKER__) +#ifdef CONFIG_ATH5K_TRACER #undef TRACE_INCLUDE_PATH #define TRACE_INCLUDE_PATH ../../drivers/net/wireless/ath/ath5k diff --git a/trunk/drivers/net/wireless/ath/ath6kl/Makefile b/trunk/drivers/net/wireless/ath/ath6kl/Makefile index 707069303550..8f7a0d1c290c 100644 --- a/trunk/drivers/net/wireless/ath/ath6kl/Makefile +++ b/trunk/drivers/net/wireless/ath/ath6kl/Makefile @@ -23,7 +23,7 @@ obj-$(CONFIG_ATH6KL) := ath6kl.o ath6kl-y += debug.o -ath6kl-y += hif.o +ath6kl-y += htc_hif.o ath6kl-y += htc.o ath6kl-y += bmi.o ath6kl-y += cfg80211.o diff --git a/trunk/drivers/net/wireless/ath/ath6kl/bmi.c b/trunk/drivers/net/wireless/ath/ath6kl/bmi.c index bce3575c310a..c5d11cc536e0 100644 --- a/trunk/drivers/net/wireless/ath/ath6kl/bmi.c +++ b/trunk/drivers/net/wireless/ath/ath6kl/bmi.c @@ -19,6 +19,165 @@ #include "target.h" #include "debug.h" +static int ath6kl_get_bmi_cmd_credits(struct ath6kl *ar) +{ + u32 addr; + unsigned long timeout; + int ret; + + ar->bmi.cmd_credits = 0; + + /* Read the counter register to get the command credits */ + addr = COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4; + + timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT); + while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) { + + /* + * Hit the credit counter with a 4-byte access, the first byte + * read will hit the counter and cause a decrement, while the + * remaining 3 bytes has no effect. The rationale behind this + * is to make all HIF accesses 4-byte aligned. + */ + ret = hif_read_write_sync(ar, addr, + (u8 *)&ar->bmi.cmd_credits, 4, + HIF_RD_SYNC_BYTE_INC); + if (ret) { + ath6kl_err("Unable to decrement the command credit count register: %d\n", + ret); + return ret; + } + + /* The counter is only 8 bits. + * Ignore anything in the upper 3 bytes + */ + ar->bmi.cmd_credits &= 0xFF; + } + + if (!ar->bmi.cmd_credits) { + ath6kl_err("bmi communication timeout\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static int ath6kl_bmi_get_rx_lkahd(struct ath6kl *ar) +{ + unsigned long timeout; + u32 rx_word = 0; + int ret = 0; + + timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT); + while (time_before(jiffies, timeout) && !rx_word) { + ret = hif_read_write_sync(ar, RX_LOOKAHEAD_VALID_ADDRESS, + (u8 *)&rx_word, sizeof(rx_word), + HIF_RD_SYNC_BYTE_INC); + if (ret) { + ath6kl_err("unable to read RX_LOOKAHEAD_VALID\n"); + return ret; + } + + /* all we really want is one bit */ + rx_word &= (1 << ENDPOINT1); + } + + if (!rx_word) { + ath6kl_err("bmi_recv_buf FIFO empty\n"); + return -EINVAL; + } + + return ret; +} + +static int ath6kl_bmi_send_buf(struct ath6kl *ar, u8 *buf, u32 len) +{ + int ret; + u32 addr; + + ret = ath6kl_get_bmi_cmd_credits(ar); + if (ret) + return ret; + + addr = ar->mbox_info.htc_addr; + + ret = hif_read_write_sync(ar, addr, buf, len, + HIF_WR_SYNC_BYTE_INC); + if (ret) + ath6kl_err("unable to send the bmi data to the device\n"); + + return ret; +} + +static int ath6kl_bmi_recv_buf(struct ath6kl *ar, u8 *buf, u32 len) +{ + int ret; + u32 addr; + + /* + * During normal bootup, small reads may be required. + * Rather than issue an HIF Read and then wait as the Target + * adds successive bytes to the FIFO, we wait here until + * we know that response data is available. + * + * This allows us to cleanly timeout on an unexpected + * Target failure rather than risk problems at the HIF level. + * In particular, this avoids SDIO timeouts and possibly garbage + * data on some host controllers. And on an interconnect + * such as Compact Flash (as well as some SDIO masters) which + * does not provide any indication on data timeout, it avoids + * a potential hang or garbage response. + * + * Synchronization is more difficult for reads larger than the + * size of the MBOX FIFO (128B), because the Target is unable + * to push the 129th byte of data until AFTER the Host posts an + * HIF Read and removes some FIFO data. So for large reads the + * Host proceeds to post an HIF Read BEFORE all the data is + * actually available to read. Fortunately, large BMI reads do + * not occur in practice -- they're supported for debug/development. + * + * So Host/Target BMI synchronization is divided into these cases: + * CASE 1: length < 4 + * Should not happen + * + * CASE 2: 4 <= length <= 128 + * Wait for first 4 bytes to be in FIFO + * If CONSERVATIVE_BMI_READ is enabled, also wait for + * a BMI command credit, which indicates that the ENTIRE + * response is available in the the FIFO + * + * CASE 3: length > 128 + * Wait for the first 4 bytes to be in FIFO + * + * For most uses, a small timeout should be sufficient and we will + * usually see a response quickly; but there may be some unusual + * (debug) cases of BMI_EXECUTE where we want an larger timeout. + * For now, we use an unbounded busy loop while waiting for + * BMI_EXECUTE. + * + * If BMI_EXECUTE ever needs to support longer-latency execution, + * especially in production, this code needs to be enhanced to sleep + * and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently + * a function of Host processor speed. + */ + if (len >= 4) { /* NB: Currently, always true */ + ret = ath6kl_bmi_get_rx_lkahd(ar); + if (ret) + return ret; + } + + addr = ar->mbox_info.htc_addr; + ret = hif_read_write_sync(ar, addr, buf, len, + HIF_RD_SYNC_BYTE_INC); + if (ret) { + ath6kl_err("Unable to read the bmi data from the device: %d\n", + ret); + return ret; + } + + return 0; +} + int ath6kl_bmi_done(struct ath6kl *ar) { int ret; @@ -31,12 +190,14 @@ int ath6kl_bmi_done(struct ath6kl *ar) ar->bmi.done_sent = true; - ret = ath6kl_hif_bmi_write(ar, (u8 *)&cid, sizeof(cid)); + ret = ath6kl_bmi_send_buf(ar, (u8 *)&cid, sizeof(cid)); if (ret) { ath6kl_err("Unable to send bmi done: %d\n", ret); return ret; } + ath6kl_bmi_cleanup(ar); + return 0; } @@ -51,13 +212,13 @@ int ath6kl_bmi_get_target_info(struct ath6kl *ar, return -EACCES; } - ret = ath6kl_hif_bmi_write(ar, (u8 *)&cid, sizeof(cid)); + ret = ath6kl_bmi_send_buf(ar, (u8 *)&cid, sizeof(cid)); if (ret) { ath6kl_err("Unable to send get target info: %d\n", ret); return ret; } - ret = ath6kl_hif_bmi_read(ar, (u8 *)&targ_info->version, + ret = ath6kl_bmi_recv_buf(ar, (u8 *)&targ_info->version, sizeof(targ_info->version)); if (ret) { ath6kl_err("Unable to recv target info: %d\n", ret); @@ -66,7 +227,7 @@ int ath6kl_bmi_get_target_info(struct ath6kl *ar, if (le32_to_cpu(targ_info->version) == TARGET_VERSION_SENTINAL) { /* Determine how many bytes are in the Target's targ_info */ - ret = ath6kl_hif_bmi_read(ar, + ret = ath6kl_bmi_recv_buf(ar, (u8 *)&targ_info->byte_count, sizeof(targ_info->byte_count)); if (ret) { @@ -85,7 +246,7 @@ int ath6kl_bmi_get_target_info(struct ath6kl *ar, } /* Read the remainder of the targ_info */ - ret = ath6kl_hif_bmi_read(ar, + ret = ath6kl_bmi_recv_buf(ar, ((u8 *)targ_info) + sizeof(targ_info->byte_count), sizeof(*targ_info) - @@ -117,8 +278,8 @@ int ath6kl_bmi_read(struct ath6kl *ar, u32 addr, u8 *buf, u32 len) return -EACCES; } - size = ar->bmi.max_data_size + sizeof(cid) + sizeof(addr) + sizeof(len); - if (size > ar->bmi.max_cmd_size) { + size = BMI_DATASZ_MAX + sizeof(cid) + sizeof(addr) + sizeof(len); + if (size > MAX_BMI_CMDBUF_SZ) { WARN_ON(1); return -EINVAL; } @@ -131,8 +292,8 @@ int ath6kl_bmi_read(struct ath6kl *ar, u32 addr, u8 *buf, u32 len) len_remain = len; while (len_remain) { - rx_len = (len_remain < ar->bmi.max_data_size) ? - len_remain : ar->bmi.max_data_size; + rx_len = (len_remain < BMI_DATASZ_MAX) ? + len_remain : BMI_DATASZ_MAX; offset = 0; memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid)); offset += sizeof(cid); @@ -141,13 +302,13 @@ int ath6kl_bmi_read(struct ath6kl *ar, u32 addr, u8 *buf, u32 len) memcpy(&(ar->bmi.cmd_buf[offset]), &rx_len, sizeof(rx_len)); offset += sizeof(len); - ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset); + ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset); if (ret) { ath6kl_err("Unable to write to the device: %d\n", ret); return ret; } - ret = ath6kl_hif_bmi_read(ar, ar->bmi.cmd_buf, rx_len); + ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, rx_len); if (ret) { ath6kl_err("Unable to read from the device: %d\n", ret); @@ -167,7 +328,7 @@ int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len) u32 offset; u32 len_remain, tx_len; const u32 header = sizeof(cid) + sizeof(addr) + sizeof(len); - u8 aligned_buf[400]; + u8 aligned_buf[BMI_DATASZ_MAX]; u8 *src; if (ar->bmi.done_sent) { @@ -175,15 +336,12 @@ int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len) return -EACCES; } - if ((ar->bmi.max_data_size + header) > ar->bmi.max_cmd_size) { + if ((BMI_DATASZ_MAX + header) > MAX_BMI_CMDBUF_SZ) { WARN_ON(1); return -EINVAL; } - if (WARN_ON(ar->bmi.max_data_size > sizeof(aligned_buf))) - return -E2BIG; - - memset(ar->bmi.cmd_buf, 0, ar->bmi.max_data_size + header); + memset(ar->bmi.cmd_buf, 0, BMI_DATASZ_MAX + header); ath6kl_dbg(ATH6KL_DBG_BMI, "bmi write memory: addr: 0x%x, len: %d\n", addr, len); @@ -192,7 +350,7 @@ int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len) while (len_remain) { src = &buf[len - len_remain]; - if (len_remain < (ar->bmi.max_data_size - header)) { + if (len_remain < (BMI_DATASZ_MAX - header)) { if (len_remain & 3) { /* align it with 4 bytes */ len_remain = len_remain + @@ -202,7 +360,7 @@ int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len) } tx_len = len_remain; } else { - tx_len = (ar->bmi.max_data_size - header); + tx_len = (BMI_DATASZ_MAX - header); } offset = 0; @@ -215,7 +373,7 @@ int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len) memcpy(&(ar->bmi.cmd_buf[offset]), src, tx_len); offset += tx_len; - ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset); + ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset); if (ret) { ath6kl_err("Unable to write to the device: %d\n", ret); @@ -240,7 +398,7 @@ int ath6kl_bmi_execute(struct ath6kl *ar, u32 addr, u32 *param) } size = sizeof(cid) + sizeof(addr) + sizeof(param); - if (size > ar->bmi.max_cmd_size) { + if (size > MAX_BMI_CMDBUF_SZ) { WARN_ON(1); return -EINVAL; } @@ -257,13 +415,13 @@ int ath6kl_bmi_execute(struct ath6kl *ar, u32 addr, u32 *param) memcpy(&(ar->bmi.cmd_buf[offset]), param, sizeof(*param)); offset += sizeof(*param); - ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset); + ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset); if (ret) { ath6kl_err("Unable to write to the device: %d\n", ret); return ret; } - ret = ath6kl_hif_bmi_read(ar, ar->bmi.cmd_buf, sizeof(*param)); + ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, sizeof(*param)); if (ret) { ath6kl_err("Unable to read from the device: %d\n", ret); return ret; @@ -287,7 +445,7 @@ int ath6kl_bmi_set_app_start(struct ath6kl *ar, u32 addr) } size = sizeof(cid) + sizeof(addr); - if (size > ar->bmi.max_cmd_size) { + if (size > MAX_BMI_CMDBUF_SZ) { WARN_ON(1); return -EINVAL; } @@ -301,7 +459,7 @@ int ath6kl_bmi_set_app_start(struct ath6kl *ar, u32 addr) memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr)); offset += sizeof(addr); - ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset); + ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset); if (ret) { ath6kl_err("Unable to write to the device: %d\n", ret); return ret; @@ -323,7 +481,7 @@ int ath6kl_bmi_reg_read(struct ath6kl *ar, u32 addr, u32 *param) } size = sizeof(cid) + sizeof(addr); - if (size > ar->bmi.max_cmd_size) { + if (size > MAX_BMI_CMDBUF_SZ) { WARN_ON(1); return -EINVAL; } @@ -337,13 +495,13 @@ int ath6kl_bmi_reg_read(struct ath6kl *ar, u32 addr, u32 *param) memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr)); offset += sizeof(addr); - ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset); + ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset); if (ret) { ath6kl_err("Unable to write to the device: %d\n", ret); return ret; } - ret = ath6kl_hif_bmi_read(ar, ar->bmi.cmd_buf, sizeof(*param)); + ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, sizeof(*param)); if (ret) { ath6kl_err("Unable to read from the device: %d\n", ret); return ret; @@ -366,7 +524,7 @@ int ath6kl_bmi_reg_write(struct ath6kl *ar, u32 addr, u32 param) } size = sizeof(cid) + sizeof(addr) + sizeof(param); - if (size > ar->bmi.max_cmd_size) { + if (size > MAX_BMI_CMDBUF_SZ) { WARN_ON(1); return -EINVAL; } @@ -384,7 +542,7 @@ int ath6kl_bmi_reg_write(struct ath6kl *ar, u32 addr, u32 param) memcpy(&(ar->bmi.cmd_buf[offset]), ¶m, sizeof(param)); offset += sizeof(param); - ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset); + ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset); if (ret) { ath6kl_err("Unable to write to the device: %d\n", ret); return ret; @@ -407,8 +565,8 @@ int ath6kl_bmi_lz_data(struct ath6kl *ar, u8 *buf, u32 len) return -EACCES; } - size = ar->bmi.max_data_size + header; - if (size > ar->bmi.max_cmd_size) { + size = BMI_DATASZ_MAX + header; + if (size > MAX_BMI_CMDBUF_SZ) { WARN_ON(1); return -EINVAL; } @@ -419,8 +577,8 @@ int ath6kl_bmi_lz_data(struct ath6kl *ar, u8 *buf, u32 len) len_remain = len; while (len_remain) { - tx_len = (len_remain < (ar->bmi.max_data_size - header)) ? - len_remain : (ar->bmi.max_data_size - header); + tx_len = (len_remain < (BMI_DATASZ_MAX - header)) ? + len_remain : (BMI_DATASZ_MAX - header); offset = 0; memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid)); @@ -431,7 +589,7 @@ int ath6kl_bmi_lz_data(struct ath6kl *ar, u8 *buf, u32 len) tx_len); offset += tx_len; - ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset); + ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset); if (ret) { ath6kl_err("Unable to write to the device: %d\n", ret); @@ -457,7 +615,7 @@ int ath6kl_bmi_lz_stream_start(struct ath6kl *ar, u32 addr) } size = sizeof(cid) + sizeof(addr); - if (size > ar->bmi.max_cmd_size) { + if (size > MAX_BMI_CMDBUF_SZ) { WARN_ON(1); return -EINVAL; } @@ -473,7 +631,7 @@ int ath6kl_bmi_lz_stream_start(struct ath6kl *ar, u32 addr) memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr)); offset += sizeof(addr); - ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset); + ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset); if (ret) { ath6kl_err("Unable to start LZ stream to the device: %d\n", ret); @@ -514,20 +672,10 @@ int ath6kl_bmi_fast_download(struct ath6kl *ar, u32 addr, u8 *buf, u32 len) return ret; } -void ath6kl_bmi_reset(struct ath6kl *ar) -{ - ar->bmi.done_sent = false; -} - int ath6kl_bmi_init(struct ath6kl *ar) { - if (WARN_ON(ar->bmi.max_data_size == 0)) - return -EINVAL; - - /* cmd + addr + len + data_size */ - ar->bmi.max_cmd_size = ar->bmi.max_data_size + (sizeof(u32) * 3); + ar->bmi.cmd_buf = kzalloc(MAX_BMI_CMDBUF_SZ, GFP_ATOMIC); - ar->bmi.cmd_buf = kzalloc(ar->bmi.max_cmd_size, GFP_ATOMIC); if (!ar->bmi.cmd_buf) return -ENOMEM; diff --git a/trunk/drivers/net/wireless/ath/ath6kl/bmi.h b/trunk/drivers/net/wireless/ath/ath6kl/bmi.h index f1ca6812456d..96851d5df24b 100644 --- a/trunk/drivers/net/wireless/ath/ath6kl/bmi.h +++ b/trunk/drivers/net/wireless/ath/ath6kl/bmi.h @@ -44,6 +44,12 @@ * BMI handles all required Target-side cache flushing. */ +#define MAX_BMI_CMDBUF_SZ (BMI_DATASZ_MAX + \ + (sizeof(u32) * 3 /* cmd + addr + len */)) + +/* Maximum data size used for BMI transfers */ +#define BMI_DATASZ_MAX 256 + /* BMI Commands */ #define BMI_NO_COMMAND 0 @@ -224,8 +230,6 @@ struct ath6kl_bmi_target_info { int ath6kl_bmi_init(struct ath6kl *ar); void ath6kl_bmi_cleanup(struct ath6kl *ar); -void ath6kl_bmi_reset(struct ath6kl *ar); - int ath6kl_bmi_done(struct ath6kl *ar); int ath6kl_bmi_get_target_info(struct ath6kl *ar, struct ath6kl_bmi_target_info *targ_info); diff --git a/trunk/drivers/net/wireless/ath/ath6kl/cfg80211.c b/trunk/drivers/net/wireless/ath/ath6kl/cfg80211.c index 6c59a217b1a1..f517eb8f7b44 100644 --- a/trunk/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/trunk/drivers/net/wireless/ath/ath6kl/cfg80211.c @@ -123,50 +123,17 @@ static struct ieee80211_supported_band ath6kl_band_5ghz = { .bitrates = ath6kl_a_rates, }; -#define CCKM_KRK_CIPHER_SUITE 0x004096ff /* use for KRK */ - -/* returns true if scheduled scan was stopped */ -static bool __ath6kl_cfg80211_sscan_stop(struct ath6kl_vif *vif) -{ - struct ath6kl *ar = vif->ar; - - if (ar->state != ATH6KL_STATE_SCHED_SCAN) - return false; - - del_timer_sync(&vif->sched_scan_timer); - - ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx, - ATH6KL_HOST_MODE_AWAKE); - - ar->state = ATH6KL_STATE_ON; - - return true; -} - -static void ath6kl_cfg80211_sscan_disable(struct ath6kl_vif *vif) -{ - struct ath6kl *ar = vif->ar; - bool stopped; - - stopped = __ath6kl_cfg80211_sscan_stop(vif); - - if (!stopped) - return; - - cfg80211_sched_scan_stopped(ar->wiphy); -} - -static int ath6kl_set_wpa_version(struct ath6kl_vif *vif, +static int ath6kl_set_wpa_version(struct ath6kl *ar, enum nl80211_wpa_versions wpa_version) { ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: %u\n", __func__, wpa_version); if (!wpa_version) { - vif->auth_mode = NONE_AUTH; + ar->auth_mode = NONE_AUTH; } else if (wpa_version & NL80211_WPA_VERSION_2) { - vif->auth_mode = WPA2_AUTH; + ar->auth_mode = WPA2_AUTH; } else if (wpa_version & NL80211_WPA_VERSION_1) { - vif->auth_mode = WPA_AUTH; + ar->auth_mode = WPA_AUTH; } else { ath6kl_err("%s: %u not supported\n", __func__, wpa_version); return -ENOTSUPP; @@ -175,24 +142,25 @@ static int ath6kl_set_wpa_version(struct ath6kl_vif *vif, return 0; } -static int ath6kl_set_auth_type(struct ath6kl_vif *vif, +static int ath6kl_set_auth_type(struct ath6kl *ar, enum nl80211_auth_type auth_type) { + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: 0x%x\n", __func__, auth_type); switch (auth_type) { case NL80211_AUTHTYPE_OPEN_SYSTEM: - vif->dot11_auth_mode = OPEN_AUTH; + ar->dot11_auth_mode = OPEN_AUTH; break; case NL80211_AUTHTYPE_SHARED_KEY: - vif->dot11_auth_mode = SHARED_AUTH; + ar->dot11_auth_mode = SHARED_AUTH; break; case NL80211_AUTHTYPE_NETWORK_EAP: - vif->dot11_auth_mode = LEAP_AUTH; + ar->dot11_auth_mode = LEAP_AUTH; break; case NL80211_AUTHTYPE_AUTOMATIC: - vif->dot11_auth_mode = OPEN_AUTH | SHARED_AUTH; + ar->dot11_auth_mode = OPEN_AUTH | SHARED_AUTH; break; default: @@ -203,11 +171,11 @@ static int ath6kl_set_auth_type(struct ath6kl_vif *vif, return 0; } -static int ath6kl_set_cipher(struct ath6kl_vif *vif, u32 cipher, bool ucast) +static int ath6kl_set_cipher(struct ath6kl *ar, u32 cipher, bool ucast) { - u8 *ar_cipher = ucast ? &vif->prwise_crypto : &vif->grp_crypto; - u8 *ar_cipher_len = ucast ? &vif->prwise_crypto_len : - &vif->grp_crypto_len; + u8 *ar_cipher = ucast ? &ar->prwise_crypto : &ar->grp_crypto; + u8 *ar_cipher_len = ucast ? &ar->prwise_crypto_len : + &ar->grp_crypto_len; ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: cipher 0x%x, ucast %u\n", __func__, cipher, ucast); @@ -234,10 +202,6 @@ static int ath6kl_set_cipher(struct ath6kl_vif *vif, u32 cipher, bool ucast) *ar_cipher = AES_CRYPT; *ar_cipher_len = 0; break; - case WLAN_CIPHER_SUITE_SMS4: - *ar_cipher = WAPI_CRYPT; - *ar_cipher_len = 0; - break; default: ath6kl_err("cipher 0x%x not supported\n", cipher); return -ENOTSUPP; @@ -246,35 +210,28 @@ static int ath6kl_set_cipher(struct ath6kl_vif *vif, u32 cipher, bool ucast) return 0; } -static void ath6kl_set_key_mgmt(struct ath6kl_vif *vif, u32 key_mgmt) +static void ath6kl_set_key_mgmt(struct ath6kl *ar, u32 key_mgmt) { ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: 0x%x\n", __func__, key_mgmt); if (key_mgmt == WLAN_AKM_SUITE_PSK) { - if (vif->auth_mode == WPA_AUTH) - vif->auth_mode = WPA_PSK_AUTH; - else if (vif->auth_mode == WPA2_AUTH) - vif->auth_mode = WPA2_PSK_AUTH; - } else if (key_mgmt == 0x00409600) { - if (vif->auth_mode == WPA_AUTH) - vif->auth_mode = WPA_AUTH_CCKM; - else if (vif->auth_mode == WPA2_AUTH) - vif->auth_mode = WPA2_AUTH_CCKM; + if (ar->auth_mode == WPA_AUTH) + ar->auth_mode = WPA_PSK_AUTH; + else if (ar->auth_mode == WPA2_AUTH) + ar->auth_mode = WPA2_PSK_AUTH; } else if (key_mgmt != WLAN_AKM_SUITE_8021X) { - vif->auth_mode = NONE_AUTH; + ar->auth_mode = NONE_AUTH; } } -static bool ath6kl_cfg80211_ready(struct ath6kl_vif *vif) +static bool ath6kl_cfg80211_ready(struct ath6kl *ar) { - struct ath6kl *ar = vif->ar; - if (!test_bit(WMI_READY, &ar->flag)) { ath6kl_err("wmi is not ready\n"); return false; } - if (!test_bit(WLAN_ENABLED, &vif->flags)) { + if (!test_bit(WLAN_ENABLED, &ar->flag)) { ath6kl_err("wlan disabled\n"); return false; } @@ -282,146 +239,15 @@ static bool ath6kl_cfg80211_ready(struct ath6kl_vif *vif) return true; } -static bool ath6kl_is_wpa_ie(const u8 *pos) -{ - return pos[0] == WLAN_EID_WPA && pos[1] >= 4 && - pos[2] == 0x00 && pos[3] == 0x50 && - pos[4] == 0xf2 && pos[5] == 0x01; -} - -static bool ath6kl_is_rsn_ie(const u8 *pos) -{ - return pos[0] == WLAN_EID_RSN; -} - -static bool ath6kl_is_wps_ie(const u8 *pos) -{ - return (pos[0] == WLAN_EID_VENDOR_SPECIFIC && - pos[1] >= 4 && - pos[2] == 0x00 && pos[3] == 0x50 && pos[4] == 0xf2 && - pos[5] == 0x04); -} - -static int ath6kl_set_assoc_req_ies(struct ath6kl_vif *vif, const u8 *ies, - size_t ies_len) -{ - struct ath6kl *ar = vif->ar; - const u8 *pos; - u8 *buf = NULL; - size_t len = 0; - int ret; - - /* - * Clear previously set flag - */ - - ar->connect_ctrl_flags &= ~CONNECT_WPS_FLAG; - - /* - * Filter out RSN/WPA IE(s) - */ - - if (ies && ies_len) { - buf = kmalloc(ies_len, GFP_KERNEL); - if (buf == NULL) - return -ENOMEM; - pos = ies; - - while (pos + 1 < ies + ies_len) { - if (pos + 2 + pos[1] > ies + ies_len) - break; - if (!(ath6kl_is_wpa_ie(pos) || ath6kl_is_rsn_ie(pos))) { - memcpy(buf + len, pos, 2 + pos[1]); - len += 2 + pos[1]; - } - - if (ath6kl_is_wps_ie(pos)) - ar->connect_ctrl_flags |= CONNECT_WPS_FLAG; - - pos += 2 + pos[1]; - } - } - - ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx, - WMI_FRAME_ASSOC_REQ, buf, len); - kfree(buf); - return ret; -} - -static int ath6kl_nliftype_to_drv_iftype(enum nl80211_iftype type, u8 *nw_type) -{ - switch (type) { - case NL80211_IFTYPE_STATION: - *nw_type = INFRA_NETWORK; - break; - case NL80211_IFTYPE_ADHOC: - *nw_type = ADHOC_NETWORK; - break; - case NL80211_IFTYPE_AP: - *nw_type = AP_NETWORK; - break; - case NL80211_IFTYPE_P2P_CLIENT: - *nw_type = INFRA_NETWORK; - break; - case NL80211_IFTYPE_P2P_GO: - *nw_type = AP_NETWORK; - break; - default: - ath6kl_err("invalid interface type %u\n", type); - return -ENOTSUPP; - } - - return 0; -} - -static bool ath6kl_is_valid_iftype(struct ath6kl *ar, enum nl80211_iftype type, - u8 *if_idx, u8 *nw_type) -{ - int i; - - if (ath6kl_nliftype_to_drv_iftype(type, nw_type)) - return false; - - if (ar->ibss_if_active || ((type == NL80211_IFTYPE_ADHOC) && - ar->num_vif)) - return false; - - if (type == NL80211_IFTYPE_STATION || - type == NL80211_IFTYPE_AP || type == NL80211_IFTYPE_ADHOC) { - for (i = 0; i < ar->vif_max; i++) { - if ((ar->avail_idx_map >> i) & BIT(0)) { - *if_idx = i; - return true; - } - } - } - - if (type == NL80211_IFTYPE_P2P_CLIENT || - type == NL80211_IFTYPE_P2P_GO) { - for (i = ar->max_norm_iface; i < ar->vif_max; i++) { - if ((ar->avail_idx_map >> i) & BIT(0)) { - *if_idx = i; - return true; - } - } - } - - return false; -} - static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_connect_params *sme) { struct ath6kl *ar = ath6kl_priv(dev); - struct ath6kl_vif *vif = netdev_priv(dev); int status; - u8 nw_subtype = (ar->p2p) ? SUBTYPE_P2PDEV : SUBTYPE_NONE; - ath6kl_cfg80211_sscan_disable(vif); + ar->sme_state = SME_CONNECTING; - vif->sme_state = SME_CONNECTING; - - if (!ath6kl_cfg80211_ready(vif)) + if (!ath6kl_cfg80211_ready(ar)) return -EIO; if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) { @@ -461,22 +287,12 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, } } - if (sme->ie && (sme->ie_len > 0)) { - status = ath6kl_set_assoc_req_ies(vif, sme->ie, sme->ie_len); - if (status) { - up(&ar->sem); - return status; - } - } else - ar->connect_ctrl_flags &= ~CONNECT_WPS_FLAG; - - if (test_bit(CONNECTED, &vif->flags) && - vif->ssid_len == sme->ssid_len && - !memcmp(vif->ssid, sme->ssid, vif->ssid_len)) { - vif->reconnect_flag = true; - status = ath6kl_wmi_reconnect_cmd(ar->wmi, vif->fw_vif_idx, - vif->req_bssid, - vif->ch_hint); + if (test_bit(CONNECTED, &ar->flag) && + ar->ssid_len == sme->ssid_len && + !memcmp(ar->ssid, sme->ssid, ar->ssid_len)) { + ar->reconnect_flag = true; + status = ath6kl_wmi_reconnect_cmd(ar->wmi, ar->req_bssid, + ar->ch_hint); up(&ar->sem); if (status) { @@ -484,43 +300,42 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, return -EIO; } return 0; - } else if (vif->ssid_len == sme->ssid_len && - !memcmp(vif->ssid, sme->ssid, vif->ssid_len)) { - ath6kl_disconnect(vif); + } else if (ar->ssid_len == sme->ssid_len && + !memcmp(ar->ssid, sme->ssid, ar->ssid_len)) { + ath6kl_disconnect(ar); } - memset(vif->ssid, 0, sizeof(vif->ssid)); - vif->ssid_len = sme->ssid_len; - memcpy(vif->ssid, sme->ssid, sme->ssid_len); + memset(ar->ssid, 0, sizeof(ar->ssid)); + ar->ssid_len = sme->ssid_len; + memcpy(ar->ssid, sme->ssid, sme->ssid_len); if (sme->channel) - vif->ch_hint = sme->channel->center_freq; + ar->ch_hint = sme->channel->center_freq; - memset(vif->req_bssid, 0, sizeof(vif->req_bssid)); + memset(ar->req_bssid, 0, sizeof(ar->req_bssid)); if (sme->bssid && !is_broadcast_ether_addr(sme->bssid)) - memcpy(vif->req_bssid, sme->bssid, sizeof(vif->req_bssid)); + memcpy(ar->req_bssid, sme->bssid, sizeof(ar->req_bssid)); - ath6kl_set_wpa_version(vif, sme->crypto.wpa_versions); + ath6kl_set_wpa_version(ar, sme->crypto.wpa_versions); - status = ath6kl_set_auth_type(vif, sme->auth_type); + status = ath6kl_set_auth_type(ar, sme->auth_type); if (status) { up(&ar->sem); return status; } if (sme->crypto.n_ciphers_pairwise) - ath6kl_set_cipher(vif, sme->crypto.ciphers_pairwise[0], true); + ath6kl_set_cipher(ar, sme->crypto.ciphers_pairwise[0], true); else - ath6kl_set_cipher(vif, 0, true); + ath6kl_set_cipher(ar, 0, true); - ath6kl_set_cipher(vif, sme->crypto.cipher_group, false); + ath6kl_set_cipher(ar, sme->crypto.cipher_group, false); if (sme->crypto.n_akm_suites) - ath6kl_set_key_mgmt(vif, sme->crypto.akm_suites[0]); + ath6kl_set_key_mgmt(ar, sme->crypto.akm_suites[0]); if ((sme->key_len) && - (vif->auth_mode == NONE_AUTH) && - (vif->prwise_crypto == WEP_CRYPT)) { + (ar->auth_mode == NONE_AUTH) && (ar->prwise_crypto == WEP_CRYPT)) { struct ath6kl_key *key = NULL; if (sme->key_idx < WMI_MIN_KEY_INDEX || @@ -531,60 +346,56 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, return -ENOENT; } - key = &vif->keys[sme->key_idx]; + key = &ar->keys[sme->key_idx]; key->key_len = sme->key_len; memcpy(key->key, sme->key, key->key_len); - key->cipher = vif->prwise_crypto; - vif->def_txkey_index = sme->key_idx; + key->cipher = ar->prwise_crypto; + ar->def_txkey_index = sme->key_idx; - ath6kl_wmi_addkey_cmd(ar->wmi, vif->fw_vif_idx, sme->key_idx, - vif->prwise_crypto, + ath6kl_wmi_addkey_cmd(ar->wmi, sme->key_idx, + ar->prwise_crypto, GROUP_USAGE | TX_USAGE, key->key_len, - NULL, 0, + NULL, key->key, KEY_OP_INIT_VAL, NULL, NO_SYNC_WMIFLAG); } if (!ar->usr_bss_filter) { - clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags); - if (ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, - ALL_BSS_FILTER, 0) != 0) { + clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag); + if (ath6kl_wmi_bssfilter_cmd(ar->wmi, ALL_BSS_FILTER, 0) != 0) { ath6kl_err("couldn't set bss filtering\n"); up(&ar->sem); return -EIO; } } - vif->nw_type = vif->next_mode; - - if (vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT) - nw_subtype = SUBTYPE_P2PCLIENT; + ar->nw_type = ar->next_mode; ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: connect called with authmode %d dot11 auth %d" " PW crypto %d PW crypto len %d GRP crypto %d" " GRP crypto len %d channel hint %u\n", __func__, - vif->auth_mode, vif->dot11_auth_mode, vif->prwise_crypto, - vif->prwise_crypto_len, vif->grp_crypto, - vif->grp_crypto_len, vif->ch_hint); - - vif->reconnect_flag = 0; - status = ath6kl_wmi_connect_cmd(ar->wmi, vif->fw_vif_idx, vif->nw_type, - vif->dot11_auth_mode, vif->auth_mode, - vif->prwise_crypto, - vif->prwise_crypto_len, - vif->grp_crypto, vif->grp_crypto_len, - vif->ssid_len, vif->ssid, - vif->req_bssid, vif->ch_hint, - ar->connect_ctrl_flags, nw_subtype); + ar->auth_mode, ar->dot11_auth_mode, ar->prwise_crypto, + ar->prwise_crypto_len, ar->grp_crypto, + ar->grp_crypto_len, ar->ch_hint); + + ar->reconnect_flag = 0; + status = ath6kl_wmi_connect_cmd(ar->wmi, ar->nw_type, + ar->dot11_auth_mode, ar->auth_mode, + ar->prwise_crypto, + ar->prwise_crypto_len, + ar->grp_crypto, ar->grp_crypto_len, + ar->ssid_len, ar->ssid, + ar->req_bssid, ar->ch_hint, + ar->connect_ctrl_flags); up(&ar->sem); if (status == -EINVAL) { - memset(vif->ssid, 0, sizeof(vif->ssid)); - vif->ssid_len = 0; + memset(ar->ssid, 0, sizeof(ar->ssid)); + ar->ssid_len = 0; ath6kl_err("invalid request\n"); return -ENOENT; } else if (status) { @@ -593,40 +404,28 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, } if ((!(ar->connect_ctrl_flags & CONNECT_DO_WPA_OFFLOAD)) && - ((vif->auth_mode == WPA_PSK_AUTH) - || (vif->auth_mode == WPA2_PSK_AUTH))) { - mod_timer(&vif->disconnect_timer, + ((ar->auth_mode == WPA_PSK_AUTH) + || (ar->auth_mode == WPA2_PSK_AUTH))) { + mod_timer(&ar->disconnect_timer, jiffies + msecs_to_jiffies(DISCON_TIMER_INTVAL)); } ar->connect_ctrl_flags &= ~CONNECT_DO_WPA_OFFLOAD; - set_bit(CONNECT_PEND, &vif->flags); + set_bit(CONNECT_PEND, &ar->flag); return 0; } -static int ath6kl_add_bss_if_needed(struct ath6kl_vif *vif, - enum network_type nw_type, - const u8 *bssid, +static int ath6kl_add_bss_if_needed(struct ath6kl *ar, const u8 *bssid, struct ieee80211_channel *chan, const u8 *beacon_ie, size_t beacon_ie_len) { - struct ath6kl *ar = vif->ar; struct cfg80211_bss *bss; - u16 cap_mask, cap_val; u8 *ie; - if (nw_type & ADHOC_NETWORK) { - cap_mask = WLAN_CAPABILITY_IBSS; - cap_val = WLAN_CAPABILITY_IBSS; - } else { - cap_mask = WLAN_CAPABILITY_ESS; - cap_val = WLAN_CAPABILITY_ESS; - } - - bss = cfg80211_get_bss(ar->wiphy, chan, bssid, - vif->ssid, vif->ssid_len, - cap_mask, cap_val); + bss = cfg80211_get_bss(ar->wdev->wiphy, chan, bssid, + ar->ssid, ar->ssid_len, WLAN_CAPABILITY_ESS, + WLAN_CAPABILITY_ESS); if (bss == NULL) { /* * Since cfg80211 may not yet know about the BSS, @@ -636,20 +435,21 @@ static int ath6kl_add_bss_if_needed(struct ath6kl_vif *vif, * Prepend SSID element since it is not included in the Beacon * IEs from the target. */ - ie = kmalloc(2 + vif->ssid_len + beacon_ie_len, GFP_KERNEL); + ie = kmalloc(2 + ar->ssid_len + beacon_ie_len, GFP_KERNEL); if (ie == NULL) return -ENOMEM; ie[0] = WLAN_EID_SSID; - ie[1] = vif->ssid_len; - memcpy(ie + 2, vif->ssid, vif->ssid_len); - memcpy(ie + 2 + vif->ssid_len, beacon_ie, beacon_ie_len); - bss = cfg80211_inform_bss(ar->wiphy, chan, - bssid, 0, cap_val, 100, - ie, 2 + vif->ssid_len + beacon_ie_len, + ie[1] = ar->ssid_len; + memcpy(ie + 2, ar->ssid, ar->ssid_len); + memcpy(ie + 2 + ar->ssid_len, beacon_ie, beacon_ie_len); + bss = cfg80211_inform_bss(ar->wdev->wiphy, chan, + bssid, 0, WLAN_CAPABILITY_ESS, 100, + ie, 2 + ar->ssid_len + beacon_ie_len, 0, GFP_KERNEL); if (bss) - ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "added bss %pM to " - "cfg80211\n", bssid); + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "added dummy bss for " + "%pM prior to indicating connect/roamed " + "event\n", bssid); kfree(ie); } else ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "cfg80211 already has a bss " @@ -663,7 +463,7 @@ static int ath6kl_add_bss_if_needed(struct ath6kl_vif *vif, return 0; } -void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel, +void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel, u8 *bssid, u16 listen_intvl, u16 beacon_intvl, enum network_type nw_type, @@ -671,7 +471,6 @@ void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel, u8 assoc_resp_len, u8 *assoc_info) { struct ieee80211_channel *chan; - struct ath6kl *ar = vif->ar; /* capinfo + listen interval */ u8 assoc_req_ie_offset = sizeof(u16) + sizeof(u16); @@ -690,11 +489,11 @@ void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel, * Store Beacon interval here; DTIM period will be available only once * a Beacon frame from the AP is seen. */ - vif->assoc_bss_beacon_int = beacon_intvl; - clear_bit(DTIM_PERIOD_AVAIL, &vif->flags); + ar->assoc_bss_beacon_int = beacon_intvl; + clear_bit(DTIM_PERIOD_AVAIL, &ar->flag); if (nw_type & ADHOC_NETWORK) { - if (vif->wdev.iftype != NL80211_IFTYPE_ADHOC) { + if (ar->wdev->iftype != NL80211_IFTYPE_ADHOC) { ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: ath6k not in ibss mode\n", __func__); return; @@ -702,39 +501,39 @@ void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel, } if (nw_type & INFRA_NETWORK) { - if (vif->wdev.iftype != NL80211_IFTYPE_STATION && - vif->wdev.iftype != NL80211_IFTYPE_P2P_CLIENT) { + if (ar->wdev->iftype != NL80211_IFTYPE_STATION && + ar->wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) { ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: ath6k not in station mode\n", __func__); return; } } - chan = ieee80211_get_channel(ar->wiphy, (int) channel); + chan = ieee80211_get_channel(ar->wdev->wiphy, (int) channel); - if (ath6kl_add_bss_if_needed(vif, nw_type, bssid, chan, assoc_info, - beacon_ie_len) < 0) { - ath6kl_err("could not add cfg80211 bss entry\n"); + + if (nw_type & ADHOC_NETWORK) { + cfg80211_ibss_joined(ar->net_dev, bssid, GFP_KERNEL); return; } - if (nw_type & ADHOC_NETWORK) { - ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "ad-hoc %s selected\n", - nw_type & ADHOC_CREATOR ? "creator" : "joiner"); - cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL); + if (ath6kl_add_bss_if_needed(ar, bssid, chan, assoc_info, + beacon_ie_len) < 0) { + ath6kl_err("could not add cfg80211 bss entry for " + "connect/roamed notification\n"); return; } - if (vif->sme_state == SME_CONNECTING) { + if (ar->sme_state == SME_CONNECTING) { /* inform connect result to cfg80211 */ - vif->sme_state = SME_CONNECTED; - cfg80211_connect_result(vif->ndev, bssid, + ar->sme_state = SME_CONNECTED; + cfg80211_connect_result(ar->net_dev, bssid, assoc_req_ie, assoc_req_len, assoc_resp_ie, assoc_resp_len, WLAN_STATUS_SUCCESS, GFP_KERNEL); - } else if (vif->sme_state == SME_CONNECTED) { + } else if (ar->sme_state == SME_CONNECTED) { /* inform roam event to cfg80211 */ - cfg80211_roamed(vif->ndev, chan, bssid, + cfg80211_roamed(ar->net_dev, chan, bssid, assoc_req_ie, assoc_req_len, assoc_resp_ie, assoc_resp_len, GFP_KERNEL); } @@ -743,15 +542,12 @@ void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel, static int ath6kl_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev, u16 reason_code) { - struct ath6kl *ar = ath6kl_priv(dev); - struct ath6kl_vif *vif = netdev_priv(dev); + struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(dev); ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: reason=%u\n", __func__, reason_code); - ath6kl_cfg80211_sscan_disable(vif); - - if (!ath6kl_cfg80211_ready(vif)) + if (!ath6kl_cfg80211_ready(ar)) return -EIO; if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) { @@ -764,46 +560,44 @@ static int ath6kl_cfg80211_disconnect(struct wiphy *wiphy, return -ERESTARTSYS; } - vif->reconnect_flag = 0; - ath6kl_disconnect(vif); - memset(vif->ssid, 0, sizeof(vif->ssid)); - vif->ssid_len = 0; + ar->reconnect_flag = 0; + ath6kl_disconnect(ar); + memset(ar->ssid, 0, sizeof(ar->ssid)); + ar->ssid_len = 0; if (!test_bit(SKIP_SCAN, &ar->flag)) - memset(vif->req_bssid, 0, sizeof(vif->req_bssid)); + memset(ar->req_bssid, 0, sizeof(ar->req_bssid)); up(&ar->sem); - vif->sme_state = SME_DISCONNECTED; + ar->sme_state = SME_DISCONNECTED; return 0; } -void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason, +void ath6kl_cfg80211_disconnect_event(struct ath6kl *ar, u8 reason, u8 *bssid, u8 assoc_resp_len, u8 *assoc_info, u16 proto_reason) { - struct ath6kl *ar = vif->ar; - - if (vif->scan_req) { - cfg80211_scan_done(vif->scan_req, true); - vif->scan_req = NULL; + if (ar->scan_req) { + cfg80211_scan_done(ar->scan_req, true); + ar->scan_req = NULL; } - if (vif->nw_type & ADHOC_NETWORK) { - if (vif->wdev.iftype != NL80211_IFTYPE_ADHOC) { + if (ar->nw_type & ADHOC_NETWORK) { + if (ar->wdev->iftype != NL80211_IFTYPE_ADHOC) { ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: ath6k not in ibss mode\n", __func__); return; } memset(bssid, 0, ETH_ALEN); - cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL); + cfg80211_ibss_joined(ar->net_dev, bssid, GFP_KERNEL); return; } - if (vif->nw_type & INFRA_NETWORK) { - if (vif->wdev.iftype != NL80211_IFTYPE_STATION && - vif->wdev.iftype != NL80211_IFTYPE_P2P_CLIENT) { + if (ar->nw_type & INFRA_NETWORK) { + if (ar->wdev->iftype != NL80211_IFTYPE_STATION && + ar->wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) { ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: ath6k not in station mode\n", __func__); return; @@ -820,46 +614,42 @@ void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason, */ if (reason != DISCONNECT_CMD) { - ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx); + ath6kl_wmi_disconnect_cmd(ar->wmi); return; } - clear_bit(CONNECT_PEND, &vif->flags); + clear_bit(CONNECT_PEND, &ar->flag); - if (vif->sme_state == SME_CONNECTING) { - cfg80211_connect_result(vif->ndev, + if (ar->sme_state == SME_CONNECTING) { + cfg80211_connect_result(ar->net_dev, bssid, NULL, 0, NULL, 0, WLAN_STATUS_UNSPECIFIED_FAILURE, GFP_KERNEL); - } else if (vif->sme_state == SME_CONNECTED) { - cfg80211_disconnected(vif->ndev, reason, + } else if (ar->sme_state == SME_CONNECTED) { + cfg80211_disconnected(ar->net_dev, reason, NULL, 0, GFP_KERNEL); } - vif->sme_state = SME_DISCONNECTED; + ar->sme_state = SME_DISCONNECTED; } static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_scan_request *request) { - struct ath6kl *ar = ath6kl_priv(ndev); - struct ath6kl_vif *vif = netdev_priv(ndev); + struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev); s8 n_channels = 0; u16 *channels = NULL; int ret = 0; - u32 force_fg_scan = 0; - if (!ath6kl_cfg80211_ready(vif)) + if (!ath6kl_cfg80211_ready(ar)) return -EIO; - ath6kl_cfg80211_sscan_disable(vif); - if (!ar->usr_bss_filter) { - clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags); + clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag); ret = ath6kl_wmi_bssfilter_cmd( - ar->wmi, vif->fw_vif_idx, - (test_bit(CONNECTED, &vif->flags) ? + ar->wmi, + (test_bit(CONNECTED, &ar->flag) ? ALL_BUT_BSS_FILTER : ALL_BSS_FILTER), 0); if (ret) { ath6kl_err("couldn't set bss filtering\n"); @@ -874,19 +664,14 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, request->n_ssids = MAX_PROBED_SSID_INDEX - 1; for (i = 0; i < request->n_ssids; i++) - ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx, - i + 1, SPECIFIC_SSID_FLAG, + ath6kl_wmi_probedssid_cmd(ar->wmi, i + 1, + SPECIFIC_SSID_FLAG, request->ssids[i].ssid_len, request->ssids[i].ssid); } - /* - * FIXME: we should clear the IE in fw if it's not set so just - * remove the check altogether - */ if (request->ie) { - ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx, - WMI_FRAME_PROBE_REQ, + ret = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_PROBE_REQ, request->ie, request->ie_len); if (ret) { ath6kl_err("failed to set Probe Request appie for " @@ -917,63 +702,44 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, channels[i] = request->channels[i]->center_freq; } - if (test_bit(CONNECTED, &vif->flags)) - force_fg_scan = 1; - - if (test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX, - ar->fw_capabilities)) { - /* - * If capable of doing P2P mgmt operations using - * station interface, send additional information like - * supported rates to advertise and xmit rates for - * probe requests - */ - ret = ath6kl_wmi_beginscan_cmd(ar->wmi, vif->fw_vif_idx, - WMI_LONG_SCAN, force_fg_scan, - false, 0, 0, n_channels, - channels, request->no_cck, - request->rates); - } else { - ret = ath6kl_wmi_startscan_cmd(ar->wmi, vif->fw_vif_idx, - WMI_LONG_SCAN, force_fg_scan, - false, 0, 0, n_channels, - channels); - } + ret = ath6kl_wmi_startscan_cmd(ar->wmi, WMI_LONG_SCAN, 0, + false, 0, 0, n_channels, channels); if (ret) ath6kl_err("wmi_startscan_cmd failed\n"); else - vif->scan_req = request; + ar->scan_req = request; kfree(channels); return ret; } -void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted) +void ath6kl_cfg80211_scan_complete_event(struct ath6kl *ar, int status) { - struct ath6kl *ar = vif->ar; int i; - ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: status%s\n", __func__, - aborted ? " aborted" : ""); + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: status %d\n", __func__, status); - if (!vif->scan_req) + if (!ar->scan_req) return; - if (aborted) + if ((status == -ECANCELED) || (status == -EBUSY)) { + cfg80211_scan_done(ar->scan_req, true); goto out; + } - if (vif->scan_req->n_ssids && vif->scan_req->ssids[0].ssid_len) { - for (i = 0; i < vif->scan_req->n_ssids; i++) { - ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx, - i + 1, DISABLE_SSID_FLAG, + cfg80211_scan_done(ar->scan_req, false); + + if (ar->scan_req->n_ssids && ar->scan_req->ssids[0].ssid_len) { + for (i = 0; i < ar->scan_req->n_ssids; i++) { + ath6kl_wmi_probedssid_cmd(ar->wmi, i + 1, + DISABLE_SSID_FLAG, 0, NULL); } } out: - cfg80211_scan_done(vif->scan_req, aborted); - vif->scan_req = NULL; + ar->scan_req = NULL; } static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, @@ -981,22 +747,15 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, const u8 *mac_addr, struct key_params *params) { - struct ath6kl *ar = ath6kl_priv(ndev); - struct ath6kl_vif *vif = netdev_priv(ndev); + struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev); struct ath6kl_key *key = NULL; u8 key_usage; u8 key_type; + int status = 0; - if (!ath6kl_cfg80211_ready(vif)) + if (!ath6kl_cfg80211_ready(ar)) return -EIO; - if (params->cipher == CCKM_KRK_CIPHER_SUITE) { - if (params->key_len != WMI_KRK_LEN) - return -EINVAL; - return ath6kl_wmi_add_krk_cmd(ar->wmi, vif->fw_vif_idx, - params->key); - } - if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) { ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: key index %d out of bounds\n", __func__, @@ -1004,7 +763,7 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, return -ENOENT; } - key = &vif->keys[key_index]; + key = &ar->keys[key_index]; memset(key, 0, sizeof(struct ath6kl_key)); if (pairwise) @@ -1013,19 +772,13 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, key_usage = GROUP_USAGE; if (params) { - int seq_len = params->seq_len; - if (params->cipher == WLAN_CIPHER_SUITE_SMS4 && - seq_len > ATH6KL_KEY_SEQ_LEN) { - /* Only first half of the WPI PN is configured */ - seq_len = ATH6KL_KEY_SEQ_LEN; - } if (params->key_len > WLAN_MAX_KEY_LEN || - seq_len > sizeof(key->seq)) + params->seq_len > sizeof(key->seq)) return -EINVAL; key->key_len = params->key_len; memcpy(key->key, params->key, key->key_len); - key->seq_len = seq_len; + key->seq_len = params->seq_len; memcpy(key->seq, params->seq, key->seq_len); key->cipher = params->cipher; } @@ -1043,33 +796,31 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, case WLAN_CIPHER_SUITE_CCMP: key_type = AES_CRYPT; break; - case WLAN_CIPHER_SUITE_SMS4: - key_type = WAPI_CRYPT; - break; default: return -ENOTSUPP; } - if (((vif->auth_mode == WPA_PSK_AUTH) - || (vif->auth_mode == WPA2_PSK_AUTH)) + if (((ar->auth_mode == WPA_PSK_AUTH) + || (ar->auth_mode == WPA2_PSK_AUTH)) && (key_usage & GROUP_USAGE)) - del_timer(&vif->disconnect_timer); + del_timer(&ar->disconnect_timer); ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d, key_len %d, key_type 0x%x, key_usage 0x%x, seq_len %d\n", __func__, key_index, key->key_len, key_type, key_usage, key->seq_len); - if (vif->nw_type == AP_NETWORK && !pairwise && - (key_type == TKIP_CRYPT || key_type == AES_CRYPT || - key_type == WAPI_CRYPT) && params) { + ar->def_txkey_index = key_index; + + if (ar->nw_type == AP_NETWORK && !pairwise && + (key_type == TKIP_CRYPT || key_type == AES_CRYPT) && params) { ar->ap_mode_bkey.valid = true; ar->ap_mode_bkey.key_index = key_index; ar->ap_mode_bkey.key_type = key_type; ar->ap_mode_bkey.key_len = key->key_len; memcpy(ar->ap_mode_bkey.key, key->key, key->key_len); - if (!test_bit(CONNECTED, &vif->flags)) { + if (!test_bit(CONNECTED, &ar->flag)) { ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delay initial group " "key configuration until AP mode has been " "started\n"); @@ -1081,8 +832,8 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, } } - if (vif->next_mode == AP_NETWORK && key_type == WEP_CRYPT && - !test_bit(CONNECTED, &vif->flags)) { + if (ar->next_mode == AP_NETWORK && key_type == WEP_CRYPT && + !test_bit(CONNECTED, &ar->flag)) { /* * Store the key locally so that it can be re-configured after * the AP mode has properly started @@ -1090,29 +841,31 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, */ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delay WEP key configuration " "until AP mode has been started\n"); - vif->wep_key_list[key_index].key_len = key->key_len; - memcpy(vif->wep_key_list[key_index].key, key->key, - key->key_len); + ar->wep_key_list[key_index].key_len = key->key_len; + memcpy(ar->wep_key_list[key_index].key, key->key, key->key_len); return 0; } - return ath6kl_wmi_addkey_cmd(ar->wmi, vif->fw_vif_idx, key_index, - key_type, key_usage, key->key_len, - key->seq, key->seq_len, key->key, - KEY_OP_INIT_VAL, - (u8 *) mac_addr, SYNC_BOTH_WMIFLAG); + status = ath6kl_wmi_addkey_cmd(ar->wmi, ar->def_txkey_index, + key_type, key_usage, key->key_len, + key->seq, key->key, KEY_OP_INIT_VAL, + (u8 *) mac_addr, SYNC_BOTH_WMIFLAG); + + if (status) + return -EIO; + + return 0; } static int ath6kl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_index, bool pairwise, const u8 *mac_addr) { - struct ath6kl *ar = ath6kl_priv(ndev); - struct ath6kl_vif *vif = netdev_priv(ndev); + struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev); ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index); - if (!ath6kl_cfg80211_ready(vif)) + if (!ath6kl_cfg80211_ready(ar)) return -EIO; if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) { @@ -1122,15 +875,15 @@ static int ath6kl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev, return -ENOENT; } - if (!vif->keys[key_index].key_len) { + if (!ar->keys[key_index].key_len) { ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d is empty\n", __func__, key_index); return 0; } - vif->keys[key_index].key_len = 0; + ar->keys[key_index].key_len = 0; - return ath6kl_wmi_deletekey_cmd(ar->wmi, vif->fw_vif_idx, key_index); + return ath6kl_wmi_deletekey_cmd(ar->wmi, key_index); } static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, @@ -1139,13 +892,13 @@ static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, void (*callback) (void *cookie, struct key_params *)) { - struct ath6kl_vif *vif = netdev_priv(ndev); + struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev); struct ath6kl_key *key = NULL; struct key_params params; ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index); - if (!ath6kl_cfg80211_ready(vif)) + if (!ath6kl_cfg80211_ready(ar)) return -EIO; if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) { @@ -1155,7 +908,7 @@ static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, return -ENOENT; } - key = &vif->keys[key_index]; + key = &ar->keys[key_index]; memset(¶ms, 0, sizeof(params)); params.cipher = key->cipher; params.key_len = key->key_len; @@ -1173,15 +926,15 @@ static int ath6kl_cfg80211_set_default_key(struct wiphy *wiphy, u8 key_index, bool unicast, bool multicast) { - struct ath6kl *ar = ath6kl_priv(ndev); - struct ath6kl_vif *vif = netdev_priv(ndev); + struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev); struct ath6kl_key *key = NULL; + int status = 0; u8 key_usage; enum crypto_type key_type = NONE_CRYPT; ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index); - if (!ath6kl_cfg80211_ready(vif)) + if (!ath6kl_cfg80211_ready(ar)) return -EIO; if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) { @@ -1191,41 +944,43 @@ static int ath6kl_cfg80211_set_default_key(struct wiphy *wiphy, return -ENOENT; } - if (!vif->keys[key_index].key_len) { + if (!ar->keys[key_index].key_len) { ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: invalid key index %d\n", __func__, key_index); return -EINVAL; } - vif->def_txkey_index = key_index; - key = &vif->keys[vif->def_txkey_index]; + ar->def_txkey_index = key_index; + key = &ar->keys[ar->def_txkey_index]; key_usage = GROUP_USAGE; - if (vif->prwise_crypto == WEP_CRYPT) + if (ar->prwise_crypto == WEP_CRYPT) key_usage |= TX_USAGE; if (unicast) - key_type = vif->prwise_crypto; + key_type = ar->prwise_crypto; if (multicast) - key_type = vif->grp_crypto; + key_type = ar->grp_crypto; - if (vif->next_mode == AP_NETWORK && !test_bit(CONNECTED, &vif->flags)) + if (ar->next_mode == AP_NETWORK && !test_bit(CONNECTED, &ar->flag)) return 0; /* Delay until AP mode has been started */ - return ath6kl_wmi_addkey_cmd(ar->wmi, vif->fw_vif_idx, - vif->def_txkey_index, - key_type, key_usage, - key->key_len, key->seq, key->seq_len, - key->key, - KEY_OP_INIT_VAL, NULL, - SYNC_BOTH_WMIFLAG); + status = ath6kl_wmi_addkey_cmd(ar->wmi, ar->def_txkey_index, + key_type, key_usage, + key->key_len, key->seq, key->key, + KEY_OP_INIT_VAL, NULL, + SYNC_BOTH_WMIFLAG); + if (status) + return -EIO; + + return 0; } -void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, +void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl *ar, u8 keyid, bool ismcast) { ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: keyid %d, ismcast %d\n", __func__, keyid, ismcast); - cfg80211_michael_mic_failure(vif->ndev, vif->bssid, + cfg80211_michael_mic_failure(ar->net_dev, ar->bssid, (ismcast ? NL80211_KEYTYPE_GROUP : NL80211_KEYTYPE_PAIRWISE), keyid, NULL, GFP_KERNEL); @@ -1234,17 +989,12 @@ void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, static int ath6kl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) { struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy); - struct ath6kl_vif *vif; int ret; ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: changed 0x%x\n", __func__, changed); - vif = ath6kl_vif_first(ar); - if (!vif) - return -EIO; - - if (!ath6kl_cfg80211_ready(vif)) + if (!ath6kl_cfg80211_ready(ar)) return -EIO; if (changed & WIPHY_PARAM_RTS_THRESHOLD) { @@ -1264,21 +1014,15 @@ static int ath6kl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) */ static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy, enum nl80211_tx_power_setting type, - int mbm) + int dbm) { struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy); - struct ath6kl_vif *vif; u8 ath6kl_dbm; - int dbm = MBM_TO_DBM(mbm); ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type 0x%x, dbm %d\n", __func__, type, dbm); - vif = ath6kl_vif_first(ar); - if (!vif) - return -EIO; - - if (!ath6kl_cfg80211_ready(vif)) + if (!ath6kl_cfg80211_ready(ar)) return -EIO; switch (type) { @@ -1293,7 +1037,7 @@ static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy, return -EOPNOTSUPP; } - ath6kl_wmi_set_tx_pwr_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_dbm); + ath6kl_wmi_set_tx_pwr_cmd(ar->wmi, ath6kl_dbm); return 0; } @@ -1301,19 +1045,14 @@ static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy, static int ath6kl_cfg80211_get_txpower(struct wiphy *wiphy, int *dbm) { struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy); - struct ath6kl_vif *vif; - - vif = ath6kl_vif_first(ar); - if (!vif) - return -EIO; - if (!ath6kl_cfg80211_ready(vif)) + if (!ath6kl_cfg80211_ready(ar)) return -EIO; - if (test_bit(CONNECTED, &vif->flags)) { + if (test_bit(CONNECTED, &ar->flag)) { ar->tx_pwr = 0; - if (ath6kl_wmi_get_tx_pwr_cmd(ar->wmi, vif->fw_vif_idx) != 0) { + if (ath6kl_wmi_get_tx_pwr_cmd(ar->wmi) != 0) { ath6kl_err("ath6kl_wmi_get_tx_pwr_cmd failed\n"); return -EIO; } @@ -1337,12 +1076,11 @@ static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy, { struct ath6kl *ar = ath6kl_priv(dev); struct wmi_power_mode_cmd mode; - struct ath6kl_vif *vif = netdev_priv(dev); ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: pmgmt %d, timeout %d\n", __func__, pmgmt, timeout); - if (!ath6kl_cfg80211_ready(vif)) + if (!ath6kl_cfg80211_ready(ar)) return -EIO; if (pmgmt) { @@ -1353,8 +1091,7 @@ static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy, mode.pwr_mode = MAX_PERF_POWER; } - if (ath6kl_wmi_powermode_cmd(ar->wmi, vif->fw_vif_idx, - mode.pwr_mode) != 0) { + if (ath6kl_wmi_powermode_cmd(ar->wmi, mode.pwr_mode) != 0) { ath6kl_err("wmi_powermode_cmd failed\n"); return -EIO; } @@ -1362,83 +1099,41 @@ static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy, return 0; } -static struct net_device *ath6kl_cfg80211_add_iface(struct wiphy *wiphy, - char *name, - enum nl80211_iftype type, - u32 *flags, - struct vif_params *params) -{ - struct ath6kl *ar = wiphy_priv(wiphy); - struct net_device *ndev; - u8 if_idx, nw_type; - - if (ar->num_vif == ar->vif_max) { - ath6kl_err("Reached maximum number of supported vif\n"); - return ERR_PTR(-EINVAL); - } - - if (!ath6kl_is_valid_iftype(ar, type, &if_idx, &nw_type)) { - ath6kl_err("Not a supported interface type\n"); - return ERR_PTR(-EINVAL); - } - - ndev = ath6kl_interface_add(ar, name, type, if_idx, nw_type); - if (!ndev) - return ERR_PTR(-ENOMEM); - - ar->num_vif++; - - return ndev; -} - -static int ath6kl_cfg80211_del_iface(struct wiphy *wiphy, - struct net_device *ndev) -{ - struct ath6kl *ar = wiphy_priv(wiphy); - struct ath6kl_vif *vif = netdev_priv(ndev); - - spin_lock_bh(&ar->list_lock); - list_del(&vif->list); - spin_unlock_bh(&ar->list_lock); - - ath6kl_cleanup_vif(vif, test_bit(WMI_READY, &ar->flag)); - - ath6kl_deinit_if_data(vif); - - return 0; -} - static int ath6kl_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev, enum nl80211_iftype type, u32 *flags, struct vif_params *params) { - struct ath6kl_vif *vif = netdev_priv(ndev); + struct ath6kl *ar = ath6kl_priv(ndev); + struct wireless_dev *wdev = ar->wdev; ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type %u\n", __func__, type); + if (!ath6kl_cfg80211_ready(ar)) + return -EIO; + switch (type) { case NL80211_IFTYPE_STATION: - vif->next_mode = INFRA_NETWORK; + ar->next_mode = INFRA_NETWORK; break; case NL80211_IFTYPE_ADHOC: - vif->next_mode = ADHOC_NETWORK; + ar->next_mode = ADHOC_NETWORK; break; case NL80211_IFTYPE_AP: - vif->next_mode = AP_NETWORK; + ar->next_mode = AP_NETWORK; break; case NL80211_IFTYPE_P2P_CLIENT: - vif->next_mode = INFRA_NETWORK; + ar->next_mode = INFRA_NETWORK; break; case NL80211_IFTYPE_P2P_GO: - vif->next_mode = AP_NETWORK; + ar->next_mode = AP_NETWORK; break; default: ath6kl_err("invalid interface type %u\n", type); return -EOPNOTSUPP; } - vif->wdev.iftype = type; + wdev->iftype = type; return 0; } @@ -1448,17 +1143,16 @@ static int ath6kl_cfg80211_join_ibss(struct wiphy *wiphy, struct cfg80211_ibss_params *ibss_param) { struct ath6kl *ar = ath6kl_priv(dev); - struct ath6kl_vif *vif = netdev_priv(dev); int status; - if (!ath6kl_cfg80211_ready(vif)) + if (!ath6kl_cfg80211_ready(ar)) return -EIO; - vif->ssid_len = ibss_param->ssid_len; - memcpy(vif->ssid, ibss_param->ssid, vif->ssid_len); + ar->ssid_len = ibss_param->ssid_len; + memcpy(ar->ssid, ibss_param->ssid, ar->ssid_len); if (ibss_param->channel) - vif->ch_hint = ibss_param->channel->center_freq; + ar->ch_hint = ibss_param->channel->center_freq; if (ibss_param->channel_fixed) { /* @@ -1470,45 +1164,44 @@ static int ath6kl_cfg80211_join_ibss(struct wiphy *wiphy, return -EOPNOTSUPP; } - memset(vif->req_bssid, 0, sizeof(vif->req_bssid)); + memset(ar->req_bssid, 0, sizeof(ar->req_bssid)); if (ibss_param->bssid && !is_broadcast_ether_addr(ibss_param->bssid)) - memcpy(vif->req_bssid, ibss_param->bssid, - sizeof(vif->req_bssid)); + memcpy(ar->req_bssid, ibss_param->bssid, sizeof(ar->req_bssid)); - ath6kl_set_wpa_version(vif, 0); + ath6kl_set_wpa_version(ar, 0); - status = ath6kl_set_auth_type(vif, NL80211_AUTHTYPE_OPEN_SYSTEM); + status = ath6kl_set_auth_type(ar, NL80211_AUTHTYPE_OPEN_SYSTEM); if (status) return status; if (ibss_param->privacy) { - ath6kl_set_cipher(vif, WLAN_CIPHER_SUITE_WEP40, true); - ath6kl_set_cipher(vif, WLAN_CIPHER_SUITE_WEP40, false); + ath6kl_set_cipher(ar, WLAN_CIPHER_SUITE_WEP40, true); + ath6kl_set_cipher(ar, WLAN_CIPHER_SUITE_WEP40, false); } else { - ath6kl_set_cipher(vif, 0, true); - ath6kl_set_cipher(vif, 0, false); + ath6kl_set_cipher(ar, 0, true); + ath6kl_set_cipher(ar, 0, false); } - vif->nw_type = vif->next_mode; + ar->nw_type = ar->next_mode; ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: connect called with authmode %d dot11 auth %d" " PW crypto %d PW crypto len %d GRP crypto %d" " GRP crypto len %d channel hint %u\n", __func__, - vif->auth_mode, vif->dot11_auth_mode, vif->prwise_crypto, - vif->prwise_crypto_len, vif->grp_crypto, - vif->grp_crypto_len, vif->ch_hint); - - status = ath6kl_wmi_connect_cmd(ar->wmi, vif->fw_vif_idx, vif->nw_type, - vif->dot11_auth_mode, vif->auth_mode, - vif->prwise_crypto, - vif->prwise_crypto_len, - vif->grp_crypto, vif->grp_crypto_len, - vif->ssid_len, vif->ssid, - vif->req_bssid, vif->ch_hint, - ar->connect_ctrl_flags, SUBTYPE_NONE); - set_bit(CONNECT_PEND, &vif->flags); + ar->auth_mode, ar->dot11_auth_mode, ar->prwise_crypto, + ar->prwise_crypto_len, ar->grp_crypto, + ar->grp_crypto_len, ar->ch_hint); + + status = ath6kl_wmi_connect_cmd(ar->wmi, ar->nw_type, + ar->dot11_auth_mode, ar->auth_mode, + ar->prwise_crypto, + ar->prwise_crypto_len, + ar->grp_crypto, ar->grp_crypto_len, + ar->ssid_len, ar->ssid, + ar->req_bssid, ar->ch_hint, + ar->connect_ctrl_flags); + set_bit(CONNECT_PEND, &ar->flag); return 0; } @@ -1516,14 +1209,14 @@ static int ath6kl_cfg80211_join_ibss(struct wiphy *wiphy, static int ath6kl_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev) { - struct ath6kl_vif *vif = netdev_priv(dev); + struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(dev); - if (!ath6kl_cfg80211_ready(vif)) + if (!ath6kl_cfg80211_ready(ar)) return -EIO; - ath6kl_disconnect(vif); - memset(vif->ssid, 0, sizeof(vif->ssid)); - vif->ssid_len = 0; + ath6kl_disconnect(ar); + memset(ar->ssid, 0, sizeof(ar->ssid)); + ar->ssid_len = 0; return 0; } @@ -1533,8 +1226,6 @@ static const u32 cipher_suites[] = { WLAN_CIPHER_SUITE_WEP104, WLAN_CIPHER_SUITE_TKIP, WLAN_CIPHER_SUITE_CCMP, - CCKM_KRK_CIPHER_SUITE, - WLAN_CIPHER_SUITE_SMS4, }; static bool is_rate_legacy(s32 rate) @@ -1602,22 +1293,21 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev, u8 *mac, struct station_info *sinfo) { struct ath6kl *ar = ath6kl_priv(dev); - struct ath6kl_vif *vif = netdev_priv(dev); long left; bool sgi; s32 rate; int ret; u8 mcs; - if (memcmp(mac, vif->bssid, ETH_ALEN) != 0) + if (memcmp(mac, ar->bssid, ETH_ALEN) != 0) return -ENOENT; if (down_interruptible(&ar->sem)) return -EBUSY; - set_bit(STATS_UPDATE_PEND, &vif->flags); + set_bit(STATS_UPDATE_PEND, &ar->flag); - ret = ath6kl_wmi_get_stats_cmd(ar->wmi, vif->fw_vif_idx); + ret = ath6kl_wmi_get_stats_cmd(ar->wmi); if (ret != 0) { up(&ar->sem); @@ -1626,7 +1316,7 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev, left = wait_event_interruptible_timeout(ar->event_wq, !test_bit(STATS_UPDATE_PEND, - &vif->flags), + &ar->flag), WMI_TIMEOUT); up(&ar->sem); @@ -1636,24 +1326,24 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev, else if (left < 0) return left; - if (vif->target_stats.rx_byte) { - sinfo->rx_bytes = vif->target_stats.rx_byte; + if (ar->target_stats.rx_byte) { + sinfo->rx_bytes = ar->target_stats.rx_byte; sinfo->filled |= STATION_INFO_RX_BYTES; - sinfo->rx_packets = vif->target_stats.rx_pkt; + sinfo->rx_packets = ar->target_stats.rx_pkt; sinfo->filled |= STATION_INFO_RX_PACKETS; } - if (vif->target_stats.tx_byte) { - sinfo->tx_bytes = vif->target_stats.tx_byte; + if (ar->target_stats.tx_byte) { + sinfo->tx_bytes = ar->target_stats.tx_byte; sinfo->filled |= STATION_INFO_TX_BYTES; - sinfo->tx_packets = vif->target_stats.tx_pkt; + sinfo->tx_packets = ar->target_stats.tx_pkt; sinfo->filled |= STATION_INFO_TX_PACKETS; } - sinfo->signal = vif->target_stats.cs_rssi; + sinfo->signal = ar->target_stats.cs_rssi; sinfo->filled |= STATION_INFO_SIGNAL; - rate = vif->target_stats.tx_ucast_rate; + rate = ar->target_stats.tx_ucast_rate; if (is_rate_legacy(rate)) { sinfo->txrate.legacy = rate / 100; @@ -1685,13 +1375,13 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev, sinfo->filled |= STATION_INFO_TX_BITRATE; - if (test_bit(CONNECTED, &vif->flags) && - test_bit(DTIM_PERIOD_AVAIL, &vif->flags) && - vif->nw_type == INFRA_NETWORK) { + if (test_bit(CONNECTED, &ar->flag) && + test_bit(DTIM_PERIOD_AVAIL, &ar->flag) && + ar->nw_type == INFRA_NETWORK) { sinfo->filled |= STATION_INFO_BSS_PARAM; sinfo->bss_param.flags = 0; - sinfo->bss_param.dtim_period = vif->assoc_bss_dtim_period; - sinfo->bss_param.beacon_interval = vif->assoc_bss_beacon_int; + sinfo->bss_param.dtim_period = ar->assoc_bss_dtim_period; + sinfo->bss_param.beacon_interval = ar->assoc_bss_beacon_int; } return 0; @@ -1701,9 +1391,7 @@ static int ath6kl_set_pmksa(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_pmksa *pmksa) { struct ath6kl *ar = ath6kl_priv(netdev); - struct ath6kl_vif *vif = netdev_priv(netdev); - - return ath6kl_wmi_setpmkid_cmd(ar->wmi, vif->fw_vif_idx, pmksa->bssid, + return ath6kl_wmi_setpmkid_cmd(ar->wmi, pmksa->bssid, pmksa->pmkid, true); } @@ -1711,302 +1399,25 @@ static int ath6kl_del_pmksa(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_pmksa *pmksa) { struct ath6kl *ar = ath6kl_priv(netdev); - struct ath6kl_vif *vif = netdev_priv(netdev); - - return ath6kl_wmi_setpmkid_cmd(ar->wmi, vif->fw_vif_idx, pmksa->bssid, + return ath6kl_wmi_setpmkid_cmd(ar->wmi, pmksa->bssid, pmksa->pmkid, false); } static int ath6kl_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev) { struct ath6kl *ar = ath6kl_priv(netdev); - struct ath6kl_vif *vif = netdev_priv(netdev); - - if (test_bit(CONNECTED, &vif->flags)) - return ath6kl_wmi_setpmkid_cmd(ar->wmi, vif->fw_vif_idx, - vif->bssid, NULL, false); - return 0; -} - -static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow) -{ - struct ath6kl_vif *vif; - int ret, pos, left; - u32 filter = 0; - u16 i; - u8 mask[WOW_MASK_SIZE]; - - vif = ath6kl_vif_first(ar); - if (!vif) - return -EIO; - - if (!ath6kl_cfg80211_ready(vif)) - return -EIO; - - if (!test_bit(CONNECTED, &vif->flags)) - return -EINVAL; - - /* Clear existing WOW patterns */ - for (i = 0; i < WOW_MAX_FILTERS_PER_LIST; i++) - ath6kl_wmi_del_wow_pattern_cmd(ar->wmi, vif->fw_vif_idx, - WOW_LIST_ID, i); - /* Configure new WOW patterns */ - for (i = 0; i < wow->n_patterns; i++) { - - /* - * Convert given nl80211 specific mask value to equivalent - * driver specific mask value and send it to the chip along - * with patterns. For example, If the mask value defined in - * struct cfg80211_wowlan is 0xA (equivalent binary is 1010), - * then equivalent driver specific mask value is - * "0xFF 0x00 0xFF 0x00". - */ - memset(&mask, 0, sizeof(mask)); - for (pos = 0; pos < wow->patterns[i].pattern_len; pos++) { - if (wow->patterns[i].mask[pos / 8] & (0x1 << (pos % 8))) - mask[pos] = 0xFF; - } - /* - * Note: Pattern's offset is not passed as part of wowlan - * parameter from CFG layer. So it's always passed as ZERO - * to the firmware. It means, given WOW patterns are always - * matched from the first byte of received pkt in the firmware. - */ - ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi, - vif->fw_vif_idx, WOW_LIST_ID, - wow->patterns[i].pattern_len, - 0 /* pattern offset */, - wow->patterns[i].pattern, mask); - if (ret) - return ret; - } - - if (wow->disconnect) - filter |= WOW_FILTER_OPTION_NWK_DISASSOC; - - if (wow->magic_pkt) - filter |= WOW_FILTER_OPTION_MAGIC_PACKET; - - if (wow->gtk_rekey_failure) - filter |= WOW_FILTER_OPTION_GTK_ERROR; - - if (wow->eap_identity_req) - filter |= WOW_FILTER_OPTION_EAP_REQ; - - if (wow->four_way_handshake) - filter |= WOW_FILTER_OPTION_8021X_4WAYHS; - - ret = ath6kl_wmi_set_wow_mode_cmd(ar->wmi, vif->fw_vif_idx, - ATH6KL_WOW_MODE_ENABLE, - filter, - WOW_HOST_REQ_DELAY); - if (ret) - return ret; - - ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx, - ATH6KL_HOST_MODE_ASLEEP); - if (ret) - return ret; - - if (ar->tx_pending[ar->ctrl_ep]) { - left = wait_event_interruptible_timeout(ar->event_wq, - ar->tx_pending[ar->ctrl_ep] == 0, WMI_TIMEOUT); - if (left == 0) { - ath6kl_warn("clear wmi ctrl data timeout\n"); - ret = -ETIMEDOUT; - } else if (left < 0) { - ath6kl_warn("clear wmi ctrl data failed: %d\n", left); - ret = left; - } - } - - return ret; -} - -static int ath6kl_wow_resume(struct ath6kl *ar) -{ - struct ath6kl_vif *vif; - int ret; - - vif = ath6kl_vif_first(ar); - if (!vif) - return -EIO; - - ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx, - ATH6KL_HOST_MODE_AWAKE); - return ret; -} - -int ath6kl_cfg80211_suspend(struct ath6kl *ar, - enum ath6kl_cfg_suspend_mode mode, - struct cfg80211_wowlan *wow) -{ - int ret; - - switch (mode) { - case ATH6KL_CFG_SUSPEND_WOW: - - ath6kl_dbg(ATH6KL_DBG_SUSPEND, "wow mode suspend\n"); - - /* Flush all non control pkts in TX path */ - ath6kl_tx_data_cleanup(ar); - - ret = ath6kl_wow_suspend(ar, wow); - if (ret) { - ath6kl_err("wow suspend failed: %d\n", ret); - return ret; - } - ar->state = ATH6KL_STATE_WOW; - break; - - case ATH6KL_CFG_SUSPEND_DEEPSLEEP: - - ath6kl_cfg80211_stop_all(ar); - - /* save the current power mode before enabling power save */ - ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode; - - ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0, REC_POWER); - if (ret) { - ath6kl_warn("wmi powermode command failed during suspend: %d\n", - ret); - } - - ar->state = ATH6KL_STATE_DEEPSLEEP; - - break; - - case ATH6KL_CFG_SUSPEND_CUTPOWER: - - ath6kl_cfg80211_stop_all(ar); - - if (ar->state == ATH6KL_STATE_OFF) { - ath6kl_dbg(ATH6KL_DBG_SUSPEND, - "suspend hw off, no action for cutpower\n"); - break; - } - - ath6kl_dbg(ATH6KL_DBG_SUSPEND, "suspend cutting power\n"); - - ret = ath6kl_init_hw_stop(ar); - if (ret) { - ath6kl_warn("failed to stop hw during suspend: %d\n", - ret); - } - - ar->state = ATH6KL_STATE_CUTPOWER; - - break; - - case ATH6KL_CFG_SUSPEND_SCHED_SCAN: - /* - * Nothing needed for schedule scan, firmware is already in - * wow mode and sleeping most of the time. - */ - break; - - default: - break; - } - - return 0; -} - -int ath6kl_cfg80211_resume(struct ath6kl *ar) -{ - int ret; - - switch (ar->state) { - case ATH6KL_STATE_WOW: - ath6kl_dbg(ATH6KL_DBG_SUSPEND, "wow mode resume\n"); - - ret = ath6kl_wow_resume(ar); - if (ret) { - ath6kl_warn("wow mode resume failed: %d\n", ret); - return ret; - } - - ar->state = ATH6KL_STATE_ON; - break; - - case ATH6KL_STATE_DEEPSLEEP: - if (ar->wmi->pwr_mode != ar->wmi->saved_pwr_mode) { - ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0, - ar->wmi->saved_pwr_mode); - if (ret) { - ath6kl_warn("wmi powermode command failed during resume: %d\n", - ret); - } - } - - ar->state = ATH6KL_STATE_ON; - - break; - - case ATH6KL_STATE_CUTPOWER: - ath6kl_dbg(ATH6KL_DBG_SUSPEND, "resume restoring power\n"); - - ret = ath6kl_init_hw_start(ar); - if (ret) { - ath6kl_warn("Failed to boot hw in resume: %d\n", ret); - return ret; - } - break; - - case ATH6KL_STATE_SCHED_SCAN: - break; - - default: - break; - } - + if (test_bit(CONNECTED, &ar->flag)) + return ath6kl_wmi_setpmkid_cmd(ar->wmi, ar->bssid, NULL, false); return 0; } #ifdef CONFIG_PM - -/* hif layer decides what suspend mode to use */ -static int __ath6kl_cfg80211_suspend(struct wiphy *wiphy, +static int ar6k_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow) { struct ath6kl *ar = wiphy_priv(wiphy); - return ath6kl_hif_suspend(ar, wow); -} - -static int __ath6kl_cfg80211_resume(struct wiphy *wiphy) -{ - struct ath6kl *ar = wiphy_priv(wiphy); - - return ath6kl_hif_resume(ar); -} - -/* - * FIXME: WOW suspend mode is selected if the host sdio controller supports - * both sdio irq wake up and keep power. The target pulls sdio data line to - * wake up the host when WOW pattern matches. This causes sdio irq handler - * is being called in the host side which internally hits ath6kl's RX path. - * - * Since sdio interrupt is not disabled, RX path executes even before - * the host executes the actual resume operation from PM module. - * - * In the current scenario, WOW resume should happen before start processing - * any data from the target. So It's required to perform WOW resume in RX path. - * Ideally we should perform WOW resume only in the actual platform - * resume path. This area needs bit rework to avoid WOW resume in RX path. - * - * ath6kl_check_wow_status() is called from ath6kl_rx(). - */ -void ath6kl_check_wow_status(struct ath6kl *ar) -{ - if (ar->state == ATH6KL_STATE_WOW) - ath6kl_cfg80211_resume(ar); -} - -#else - -void ath6kl_check_wow_status(struct ath6kl *ar) -{ + return ath6kl_hif_suspend(ar); } #endif @@ -2014,14 +1425,14 @@ static int ath6kl_set_channel(struct wiphy *wiphy, struct net_device *dev, struct ieee80211_channel *chan, enum nl80211_channel_type channel_type) { - struct ath6kl_vif *vif = netdev_priv(dev); + struct ath6kl *ar = ath6kl_priv(dev); - if (!ath6kl_cfg80211_ready(vif)) + if (!ath6kl_cfg80211_ready(ar)) return -EIO; ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: center_freq=%u hw_value=%u\n", __func__, chan->center_freq, chan->hw_value); - vif->next_chan = chan->center_freq; + ar->next_chan = chan->center_freq; return 0; } @@ -2033,10 +1444,9 @@ static bool ath6kl_is_p2p_ie(const u8 *pos) pos[4] == 0x9a && pos[5] == 0x09; } -static int ath6kl_set_ap_probe_resp_ies(struct ath6kl_vif *vif, - const u8 *ies, size_t ies_len) +static int ath6kl_set_ap_probe_resp_ies(struct ath6kl *ar, const u8 *ies, + size_t ies_len) { - struct ath6kl *ar = vif->ar; const u8 *pos; u8 *buf = NULL; size_t len = 0; @@ -2063,8 +1473,8 @@ static int ath6kl_set_ap_probe_resp_ies(struct ath6kl_vif *vif, } } - ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx, - WMI_FRAME_PROBE_RESP, buf, len); + ret = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_PROBE_RESP, + buf, len); kfree(buf); return ret; } @@ -2073,39 +1483,36 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev, struct beacon_parameters *info, bool add) { struct ath6kl *ar = ath6kl_priv(dev); - struct ath6kl_vif *vif = netdev_priv(dev); struct ieee80211_mgmt *mgmt; u8 *ies; int ies_len; struct wmi_connect_cmd p; int res; - int i, ret; + int i; ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: add=%d\n", __func__, add); - if (!ath6kl_cfg80211_ready(vif)) + if (!ath6kl_cfg80211_ready(ar)) return -EIO; - if (vif->next_mode != AP_NETWORK) + if (ar->next_mode != AP_NETWORK) return -EOPNOTSUPP; if (info->beacon_ies) { - res = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx, - WMI_FRAME_BEACON, + res = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_BEACON, info->beacon_ies, info->beacon_ies_len); if (res) return res; } if (info->proberesp_ies) { - res = ath6kl_set_ap_probe_resp_ies(vif, info->proberesp_ies, + res = ath6kl_set_ap_probe_resp_ies(ar, info->proberesp_ies, info->proberesp_ies_len); if (res) return res; } if (info->assocresp_ies) { - res = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx, - WMI_FRAME_ASSOC_RESP, + res = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_ASSOC_RESP, info->assocresp_ies, info->assocresp_ies_len); if (res) @@ -2132,14 +1539,12 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev, if (info->ssid == NULL) return -EINVAL; - memcpy(vif->ssid, info->ssid, info->ssid_len); - vif->ssid_len = info->ssid_len; + memcpy(ar->ssid, info->ssid, info->ssid_len); + ar->ssid_len = info->ssid_len; if (info->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE) return -EOPNOTSUPP; /* TODO */ - ret = ath6kl_set_auth_type(vif, info->auth_type); - if (ret) - return ret; + ar->dot11_auth_mode = OPEN_AUTH; memset(&p, 0, sizeof(p)); @@ -2161,7 +1566,7 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev, } if (p.auth_mode == 0) p.auth_mode = NONE_AUTH; - vif->auth_mode = p.auth_mode; + ar->auth_mode = p.auth_mode; for (i = 0; i < info->crypto.n_ciphers_pairwise; i++) { switch (info->crypto.ciphers_pairwise[i]) { @@ -2175,16 +1580,13 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev, case WLAN_CIPHER_SUITE_CCMP: p.prwise_crypto_type |= AES_CRYPT; break; - case WLAN_CIPHER_SUITE_SMS4: - p.prwise_crypto_type |= WAPI_CRYPT; - break; } } if (p.prwise_crypto_type == 0) { p.prwise_crypto_type = NONE_CRYPT; - ath6kl_set_cipher(vif, 0, true); + ath6kl_set_cipher(ar, 0, true); } else if (info->crypto.n_ciphers_pairwise == 1) - ath6kl_set_cipher(vif, info->crypto.ciphers_pairwise[0], true); + ath6kl_set_cipher(ar, info->crypto.ciphers_pairwise[0], true); switch (info->crypto.cipher_group) { case WLAN_CIPHER_SUITE_WEP40: @@ -2197,34 +1599,21 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev, case WLAN_CIPHER_SUITE_CCMP: p.grp_crypto_type = AES_CRYPT; break; - case WLAN_CIPHER_SUITE_SMS4: - p.grp_crypto_type = WAPI_CRYPT; - break; default: p.grp_crypto_type = NONE_CRYPT; break; } - ath6kl_set_cipher(vif, info->crypto.cipher_group, false); + ath6kl_set_cipher(ar, info->crypto.cipher_group, false); p.nw_type = AP_NETWORK; - vif->nw_type = vif->next_mode; + ar->nw_type = ar->next_mode; - p.ssid_len = vif->ssid_len; - memcpy(p.ssid, vif->ssid, vif->ssid_len); - p.dot11_auth_mode = vif->dot11_auth_mode; - p.ch = cpu_to_le16(vif->next_chan); + p.ssid_len = ar->ssid_len; + memcpy(p.ssid, ar->ssid, ar->ssid_len); + p.dot11_auth_mode = ar->dot11_auth_mode; + p.ch = cpu_to_le16(ar->next_chan); - if (vif->wdev.iftype == NL80211_IFTYPE_P2P_GO) { - p.nw_subtype = SUBTYPE_P2PGO; - } else { - /* - * Due to firmware limitation, it is not possible to - * do P2P mgmt operations in AP mode - */ - p.nw_subtype = SUBTYPE_NONE; - } - - res = ath6kl_wmi_ap_profile_commit(ar->wmi, vif->fw_vif_idx, &p); + res = ath6kl_wmi_ap_profile_commit(ar->wmi, &p); if (res < 0) return res; @@ -2246,15 +1635,14 @@ static int ath6kl_set_beacon(struct wiphy *wiphy, struct net_device *dev, static int ath6kl_del_beacon(struct wiphy *wiphy, struct net_device *dev) { struct ath6kl *ar = ath6kl_priv(dev); - struct ath6kl_vif *vif = netdev_priv(dev); - if (vif->nw_type != AP_NETWORK) + if (ar->nw_type != AP_NETWORK) return -EOPNOTSUPP; - if (!test_bit(CONNECTED, &vif->flags)) + if (!test_bit(CONNECTED, &ar->flag)) return -ENOTCONN; - ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx); - clear_bit(CONNECTED, &vif->flags); + ath6kl_wmi_disconnect_cmd(ar->wmi); + clear_bit(CONNECTED, &ar->flag); return 0; } @@ -2263,9 +1651,8 @@ static int ath6kl_change_station(struct wiphy *wiphy, struct net_device *dev, u8 *mac, struct station_parameters *params) { struct ath6kl *ar = ath6kl_priv(dev); - struct ath6kl_vif *vif = netdev_priv(dev); - if (vif->nw_type != AP_NETWORK) + if (ar->nw_type != AP_NETWORK) return -EOPNOTSUPP; /* Use this only for authorizing/unauthorizing a station */ @@ -2273,10 +1660,10 @@ static int ath6kl_change_station(struct wiphy *wiphy, struct net_device *dev, return -EOPNOTSUPP; if (params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED)) - return ath6kl_wmi_ap_set_mlme(ar->wmi, vif->fw_vif_idx, - WMI_AP_MLME_AUTHORIZE, mac, 0); - return ath6kl_wmi_ap_set_mlme(ar->wmi, vif->fw_vif_idx, - WMI_AP_MLME_UNAUTHORIZE, mac, 0); + return ath6kl_wmi_ap_set_mlme(ar->wmi, WMI_AP_MLME_AUTHORIZE, + mac, 0); + return ath6kl_wmi_ap_set_mlme(ar->wmi, WMI_AP_MLME_UNAUTHORIZE, mac, + 0); } static int ath6kl_remain_on_channel(struct wiphy *wiphy, @@ -2287,20 +1674,13 @@ static int ath6kl_remain_on_channel(struct wiphy *wiphy, u64 *cookie) { struct ath6kl *ar = ath6kl_priv(dev); - struct ath6kl_vif *vif = netdev_priv(dev); - u32 id; /* TODO: if already pending or ongoing remain-on-channel, * return -EBUSY */ - id = ++vif->last_roc_id; - if (id == 0) { - /* Do not use 0 as the cookie value */ - id = ++vif->last_roc_id; - } - *cookie = id; + *cookie = 1; /* only a single pending request is supported */ - return ath6kl_wmi_remain_on_chnl_cmd(ar->wmi, vif->fw_vif_idx, - chan->center_freq, duration); + return ath6kl_wmi_remain_on_chnl_cmd(ar->wmi, chan->center_freq, + duration); } static int ath6kl_cancel_remain_on_channel(struct wiphy *wiphy, @@ -2308,20 +1688,16 @@ static int ath6kl_cancel_remain_on_channel(struct wiphy *wiphy, u64 cookie) { struct ath6kl *ar = ath6kl_priv(dev); - struct ath6kl_vif *vif = netdev_priv(dev); - if (cookie != vif->last_roc_id) + if (cookie != 1) return -ENOENT; - vif->last_cancel_roc_id = cookie; - return ath6kl_wmi_cancel_remain_on_chnl_cmd(ar->wmi, vif->fw_vif_idx); + return ath6kl_wmi_cancel_remain_on_chnl_cmd(ar->wmi); } -static int ath6kl_send_go_probe_resp(struct ath6kl_vif *vif, - const u8 *buf, size_t len, - unsigned int freq) +static int ath6kl_send_go_probe_resp(struct ath6kl *ar, const u8 *buf, + size_t len, unsigned int freq) { - struct ath6kl *ar = vif->ar; const u8 *pos; u8 *p2p; int p2p_len; @@ -2348,8 +1724,8 @@ static int ath6kl_send_go_probe_resp(struct ath6kl_vif *vif, pos += 2 + pos[1]; } - ret = ath6kl_wmi_send_probe_response_cmd(ar->wmi, vif->fw_vif_idx, freq, - mgmt->da, p2p, p2p_len); + ret = ath6kl_wmi_send_probe_response_cmd(ar->wmi, freq, mgmt->da, + p2p, p2p_len); kfree(p2p); return ret; } @@ -2358,61 +1734,44 @@ static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct net_device *dev, struct ieee80211_channel *chan, bool offchan, enum nl80211_channel_type channel_type, bool channel_type_valid, unsigned int wait, - const u8 *buf, size_t len, bool no_cck, - bool dont_wait_for_ack, u64 *cookie) + const u8 *buf, size_t len, bool no_cck, u64 *cookie) { struct ath6kl *ar = ath6kl_priv(dev); - struct ath6kl_vif *vif = netdev_priv(dev); u32 id; const struct ieee80211_mgmt *mgmt; mgmt = (const struct ieee80211_mgmt *) buf; if (buf + len >= mgmt->u.probe_resp.variable && - vif->nw_type == AP_NETWORK && test_bit(CONNECTED, &vif->flags) && + ar->nw_type == AP_NETWORK && test_bit(CONNECTED, &ar->flag) && ieee80211_is_probe_resp(mgmt->frame_control)) { /* * Send Probe Response frame in AP mode using a separate WMI * command to allow the target to fill in the generic IEs. */ *cookie = 0; /* TX status not supported */ - return ath6kl_send_go_probe_resp(vif, buf, len, + return ath6kl_send_go_probe_resp(ar, buf, len, chan->center_freq); } - id = vif->send_action_id++; + id = ar->send_action_id++; if (id == 0) { /* * 0 is a reserved value in the WMI command and shall not be * used for the command. */ - id = vif->send_action_id++; + id = ar->send_action_id++; } *cookie = id; - - if (test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX, - ar->fw_capabilities)) { - /* - * If capable of doing P2P mgmt operations using - * station interface, send additional information like - * supported rates to advertise and xmit rates for - * probe requests - */ - return ath6kl_wmi_send_mgmt_cmd(ar->wmi, vif->fw_vif_idx, id, - chan->center_freq, wait, - buf, len, no_cck); - } else { - return ath6kl_wmi_send_action_cmd(ar->wmi, vif->fw_vif_idx, id, - chan->center_freq, wait, - buf, len); - } + return ath6kl_wmi_send_action_cmd(ar->wmi, id, chan->center_freq, wait, + buf, len); } static void ath6kl_mgmt_frame_register(struct wiphy *wiphy, struct net_device *dev, u16 frame_type, bool reg) { - struct ath6kl_vif *vif = netdev_priv(dev); + struct ath6kl *ar = ath6kl_priv(dev); ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: frame_type=0x%x reg=%d\n", __func__, frame_type, reg); @@ -2422,92 +1781,8 @@ static void ath6kl_mgmt_frame_register(struct wiphy *wiphy, * we cannot send WMI_PROBE_REQ_REPORT_CMD here. Instead, we * hardcode target to report Probe Request frames all the time. */ - vif->probe_req_report = reg; - } -} - -static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy, - struct net_device *dev, - struct cfg80211_sched_scan_request *request) -{ - struct ath6kl *ar = ath6kl_priv(dev); - struct ath6kl_vif *vif = netdev_priv(dev); - u16 interval; - int ret; - u8 i; - - if (ar->state != ATH6KL_STATE_ON) - return -EIO; - - if (vif->sme_state != SME_DISCONNECTED) - return -EBUSY; - - for (i = 0; i < ar->wiphy->max_sched_scan_ssids; i++) { - ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx, - i, DISABLE_SSID_FLAG, - 0, NULL); + ar->probe_req_report = reg; } - - /* fw uses seconds, also make sure that it's >0 */ - interval = max_t(u16, 1, request->interval / 1000); - - ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx, - interval, interval, - 10, 0, 0, 0, 3, 0, 0, 0); - - if (request->n_ssids && request->ssids[0].ssid_len) { - for (i = 0; i < request->n_ssids; i++) { - ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx, - i, SPECIFIC_SSID_FLAG, - request->ssids[i].ssid_len, - request->ssids[i].ssid); - } - } - - ret = ath6kl_wmi_set_wow_mode_cmd(ar->wmi, vif->fw_vif_idx, - ATH6KL_WOW_MODE_ENABLE, - WOW_FILTER_SSID, - WOW_HOST_REQ_DELAY); - if (ret) { - ath6kl_warn("Failed to enable wow with ssid filter: %d\n", ret); - return ret; - } - - /* this also clears IE in fw if it's not set */ - ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx, - WMI_FRAME_PROBE_REQ, - request->ie, request->ie_len); - if (ret) { - ath6kl_warn("Failed to set probe request IE for scheduled scan: %d", - ret); - return ret; - } - - ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx, - ATH6KL_HOST_MODE_ASLEEP); - if (ret) { - ath6kl_warn("Failed to enable host sleep mode for sched scan: %d\n", - ret); - return ret; - } - - ar->state = ATH6KL_STATE_SCHED_SCAN; - - return ret; -} - -static int ath6kl_cfg80211_sscan_stop(struct wiphy *wiphy, - struct net_device *dev) -{ - struct ath6kl_vif *vif = netdev_priv(dev); - bool stopped; - - stopped = __ath6kl_cfg80211_sscan_stop(vif); - - if (!stopped) - return -EIO; - - return 0; } static const struct ieee80211_txrx_stypes @@ -2533,8 +1808,6 @@ ath6kl_mgmt_stypes[NUM_NL80211_IFTYPES] = { }; static struct cfg80211_ops ath6kl_cfg80211_ops = { - .add_virtual_intf = ath6kl_cfg80211_add_iface, - .del_virtual_intf = ath6kl_cfg80211_del_iface, .change_virtual_intf = ath6kl_cfg80211_change_iface, .scan = ath6kl_cfg80211_scan, .connect = ath6kl_cfg80211_connect, @@ -2555,8 +1828,7 @@ static struct cfg80211_ops ath6kl_cfg80211_ops = { .flush_pmksa = ath6kl_flush_pmksa, CFG80211_TESTMODE_CMD(ath6kl_tm_cmd) #ifdef CONFIG_PM - .suspend = __ath6kl_cfg80211_suspend, - .resume = __ath6kl_cfg80211_resume, + .suspend = ar6k_cfg80211_suspend, #endif .set_channel = ath6kl_set_channel, .add_beacon = ath6kl_add_beacon, @@ -2567,277 +1839,78 @@ static struct cfg80211_ops ath6kl_cfg80211_ops = { .cancel_remain_on_channel = ath6kl_cancel_remain_on_channel, .mgmt_tx = ath6kl_mgmt_tx, .mgmt_frame_register = ath6kl_mgmt_frame_register, - .sched_scan_start = ath6kl_cfg80211_sscan_start, - .sched_scan_stop = ath6kl_cfg80211_sscan_stop, }; -void ath6kl_cfg80211_stop(struct ath6kl_vif *vif) -{ - ath6kl_cfg80211_sscan_disable(vif); - - switch (vif->sme_state) { - case SME_DISCONNECTED: - break; - case SME_CONNECTING: - cfg80211_connect_result(vif->ndev, vif->bssid, NULL, 0, - NULL, 0, - WLAN_STATUS_UNSPECIFIED_FAILURE, - GFP_KERNEL); - break; - case SME_CONNECTED: - cfg80211_disconnected(vif->ndev, 0, NULL, 0, GFP_KERNEL); - break; - } - - if (test_bit(CONNECTED, &vif->flags) || - test_bit(CONNECT_PEND, &vif->flags)) - ath6kl_wmi_disconnect_cmd(vif->ar->wmi, vif->fw_vif_idx); - - vif->sme_state = SME_DISCONNECTED; - clear_bit(CONNECTED, &vif->flags); - clear_bit(CONNECT_PEND, &vif->flags); - - /* disable scanning */ - if (ath6kl_wmi_scanparams_cmd(vif->ar->wmi, vif->fw_vif_idx, 0xFFFF, - 0, 0, 0, 0, 0, 0, 0, 0, 0) != 0) - ath6kl_warn("failed to disable scan during stop\n"); - - ath6kl_cfg80211_scan_complete_event(vif, true); -} - -void ath6kl_cfg80211_stop_all(struct ath6kl *ar) +struct wireless_dev *ath6kl_cfg80211_init(struct device *dev) { - struct ath6kl_vif *vif; - - vif = ath6kl_vif_first(ar); - if (!vif) { - /* save the current power mode before enabling power save */ - ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode; + int ret = 0; + struct wireless_dev *wdev; + struct ath6kl *ar; - if (ath6kl_wmi_powermode_cmd(ar->wmi, 0, REC_POWER) != 0) - ath6kl_warn("ath6kl_deep_sleep_enable: " - "wmi_powermode_cmd failed\n"); - return; + wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL); + if (!wdev) { + ath6kl_err("couldn't allocate wireless device\n"); + return NULL; } - /* - * FIXME: we should take ar->list_lock to protect changes in the - * vif_list, but that's not trivial to do as ath6kl_cfg80211_stop() - * sleeps. - */ - list_for_each_entry(vif, &ar->vif_list, list) - ath6kl_cfg80211_stop(vif); -} - -struct ath6kl *ath6kl_core_alloc(struct device *dev) -{ - struct ath6kl *ar; - struct wiphy *wiphy; - u8 ctr; - /* create a new wiphy for use with cfg80211 */ - wiphy = wiphy_new(&ath6kl_cfg80211_ops, sizeof(struct ath6kl)); - - if (!wiphy) { + wdev->wiphy = wiphy_new(&ath6kl_cfg80211_ops, sizeof(struct ath6kl)); + if (!wdev->wiphy) { ath6kl_err("couldn't allocate wiphy device\n"); + kfree(wdev); return NULL; } - ar = wiphy_priv(wiphy); + ar = wiphy_priv(wdev->wiphy); ar->p2p = !!ath6kl_p2p; - ar->wiphy = wiphy; - ar->dev = dev; - ar->vif_max = 1; + wdev->wiphy->mgmt_stypes = ath6kl_mgmt_stypes; - ar->max_norm_iface = 1; - - spin_lock_init(&ar->lock); - spin_lock_init(&ar->mcastpsq_lock); - spin_lock_init(&ar->list_lock); - - init_waitqueue_head(&ar->event_wq); - sema_init(&ar->sem, 1); - - INIT_LIST_HEAD(&ar->amsdu_rx_buffer_queue); - INIT_LIST_HEAD(&ar->vif_list); - - clear_bit(WMI_ENABLED, &ar->flag); - clear_bit(SKIP_SCAN, &ar->flag); - clear_bit(DESTROY_IN_PROGRESS, &ar->flag); - - ar->listen_intvl_t = A_DEFAULT_LISTEN_INTERVAL; - ar->listen_intvl_b = 0; - ar->tx_pwr = 0; - - ar->intra_bss = 1; - ar->lrssi_roam_threshold = DEF_LRSSI_ROAM_THRESHOLD; - - ar->state = ATH6KL_STATE_OFF; - - memset((u8 *)ar->sta_list, 0, - AP_MAX_NUM_STA * sizeof(struct ath6kl_sta)); - - /* Init the PS queues */ - for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) { - spin_lock_init(&ar->sta_list[ctr].psq_lock); - skb_queue_head_init(&ar->sta_list[ctr].psq); - } - - skb_queue_head_init(&ar->mcastpsq); - - memcpy(ar->ap_country_code, DEF_AP_COUNTRY_CODE, 3); - - return ar; -} - -int ath6kl_register_ieee80211_hw(struct ath6kl *ar) -{ - struct wiphy *wiphy = ar->wiphy; - int ret; - - wiphy->mgmt_stypes = ath6kl_mgmt_stypes; - - wiphy->max_remain_on_channel_duration = 5000; + wdev->wiphy->max_remain_on_channel_duration = 5000; /* set device pointer for wiphy */ - set_wiphy_dev(wiphy, ar->dev); + set_wiphy_dev(wdev->wiphy, dev); - wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_ADHOC) | - BIT(NL80211_IFTYPE_AP); + wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP); if (ar->p2p) { - wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_GO) | - BIT(NL80211_IFTYPE_P2P_CLIENT); + wdev->wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_GO) | + BIT(NL80211_IFTYPE_P2P_CLIENT); } - /* max num of ssids that can be probed during scanning */ - wiphy->max_scan_ssids = MAX_PROBED_SSID_INDEX; - wiphy->max_scan_ie_len = 1000; /* FIX: what is correct limit? */ - wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz; - wiphy->bands[IEEE80211_BAND_5GHZ] = &ath6kl_band_5ghz; - wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; - - wiphy->cipher_suites = cipher_suites; - wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); - - wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | - WIPHY_WOWLAN_DISCONNECT | - WIPHY_WOWLAN_GTK_REKEY_FAILURE | - WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | - WIPHY_WOWLAN_EAP_IDENTITY_REQ | - WIPHY_WOWLAN_4WAY_HANDSHAKE; - wiphy->wowlan.n_patterns = WOW_MAX_FILTERS_PER_LIST; - wiphy->wowlan.pattern_min_len = 1; - wiphy->wowlan.pattern_max_len = WOW_PATTERN_SIZE; - - wiphy->max_sched_scan_ssids = 10; - - ret = wiphy_register(wiphy); - if (ret < 0) { - ath6kl_err("couldn't register wiphy device\n"); - return ret; - } + wdev->wiphy->max_scan_ssids = MAX_PROBED_SSID_INDEX; + wdev->wiphy->max_scan_ie_len = 1000; /* FIX: what is correct limit? */ + wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz; + wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &ath6kl_band_5ghz; + wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; - return 0; -} + wdev->wiphy->cipher_suites = cipher_suites; + wdev->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); -static int ath6kl_init_if_data(struct ath6kl_vif *vif) -{ - vif->aggr_cntxt = aggr_init(vif->ndev); - if (!vif->aggr_cntxt) { - ath6kl_err("failed to initialize aggr\n"); - return -ENOMEM; + ret = wiphy_register(wdev->wiphy); + if (ret < 0) { + ath6kl_err("couldn't register wiphy device\n"); + wiphy_free(wdev->wiphy); + kfree(wdev); + return NULL; } - setup_timer(&vif->disconnect_timer, disconnect_timer_handler, - (unsigned long) vif->ndev); - setup_timer(&vif->sched_scan_timer, ath6kl_wmi_sscan_timer, - (unsigned long) vif); - - set_bit(WMM_ENABLED, &vif->flags); - spin_lock_init(&vif->if_lock); - - return 0; -} - -void ath6kl_deinit_if_data(struct ath6kl_vif *vif) -{ - struct ath6kl *ar = vif->ar; - - aggr_module_destroy(vif->aggr_cntxt); - - ar->avail_idx_map |= BIT(vif->fw_vif_idx); - - if (vif->nw_type == ADHOC_NETWORK) - ar->ibss_if_active = false; - - unregister_netdevice(vif->ndev); - - ar->num_vif--; + return wdev; } -struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name, - enum nl80211_iftype type, u8 fw_vif_idx, - u8 nw_type) +void ath6kl_cfg80211_deinit(struct ath6kl *ar) { - struct net_device *ndev; - struct ath6kl_vif *vif; - - ndev = alloc_netdev(sizeof(*vif), name, ether_setup); - if (!ndev) - return NULL; - - vif = netdev_priv(ndev); - ndev->ieee80211_ptr = &vif->wdev; - vif->wdev.wiphy = ar->wiphy; - vif->ar = ar; - vif->ndev = ndev; - SET_NETDEV_DEV(ndev, wiphy_dev(vif->wdev.wiphy)); - vif->wdev.netdev = ndev; - vif->wdev.iftype = type; - vif->fw_vif_idx = fw_vif_idx; - vif->nw_type = vif->next_mode = nw_type; - - memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN); - if (fw_vif_idx != 0) - ndev->dev_addr[0] = (ndev->dev_addr[0] ^ (1 << fw_vif_idx)) | - 0x2; - - init_netdev(ndev); - - ath6kl_init_control_info(vif); - - /* TODO: Pass interface specific pointer instead of ar */ - if (ath6kl_init_if_data(vif)) - goto err; - - if (register_netdevice(ndev)) - goto err; + struct wireless_dev *wdev = ar->wdev; - ar->avail_idx_map &= ~BIT(fw_vif_idx); - vif->sme_state = SME_DISCONNECTED; - set_bit(WLAN_ENABLED, &vif->flags); - ar->wlan_pwr_state = WLAN_POWER_STATE_ON; - set_bit(NETDEV_REGISTERED, &vif->flags); - - if (type == NL80211_IFTYPE_ADHOC) - ar->ibss_if_active = true; - - spin_lock_bh(&ar->list_lock); - list_add_tail(&vif->list, &ar->vif_list); - spin_unlock_bh(&ar->list_lock); - - return ndev; + if (ar->scan_req) { + cfg80211_scan_done(ar->scan_req, true); + ar->scan_req = NULL; + } -err: - aggr_module_destroy(vif->aggr_cntxt); - free_netdev(ndev); - return NULL; -} + if (!wdev) + return; -void ath6kl_deinit_ieee80211_hw(struct ath6kl *ar) -{ - wiphy_unregister(ar->wiphy); - wiphy_free(ar->wiphy); + wiphy_unregister(wdev->wiphy); + wiphy_free(wdev->wiphy); + kfree(wdev); } diff --git a/trunk/drivers/net/wireless/ath/ath6kl/cfg80211.h b/trunk/drivers/net/wireless/ath/ath6kl/cfg80211.h index 81f20a572315..a84adc249c61 100644 --- a/trunk/drivers/net/wireless/ath/ath6kl/cfg80211.h +++ b/trunk/drivers/net/wireless/ath/ath6kl/cfg80211.h @@ -17,43 +17,23 @@ #ifndef ATH6KL_CFG80211_H #define ATH6KL_CFG80211_H -enum ath6kl_cfg_suspend_mode { - ATH6KL_CFG_SUSPEND_DEEPSLEEP, - ATH6KL_CFG_SUSPEND_CUTPOWER, - ATH6KL_CFG_SUSPEND_WOW, - ATH6KL_CFG_SUSPEND_SCHED_SCAN, -}; +struct wireless_dev *ath6kl_cfg80211_init(struct device *dev); +void ath6kl_cfg80211_deinit(struct ath6kl *ar); -struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name, - enum nl80211_iftype type, - u8 fw_vif_idx, u8 nw_type); -int ath6kl_register_ieee80211_hw(struct ath6kl *ar); -struct ath6kl *ath6kl_core_alloc(struct device *dev); -void ath6kl_deinit_ieee80211_hw(struct ath6kl *ar); +void ath6kl_cfg80211_scan_complete_event(struct ath6kl *ar, int status); -void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted); - -void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel, +void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel, u8 *bssid, u16 listen_intvl, u16 beacon_intvl, enum network_type nw_type, u8 beacon_ie_len, u8 assoc_req_len, u8 assoc_resp_len, u8 *assoc_info); -void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason, +void ath6kl_cfg80211_disconnect_event(struct ath6kl *ar, u8 reason, u8 *bssid, u8 assoc_resp_len, u8 *assoc_info, u16 proto_reason); -void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, +void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl *ar, u8 keyid, bool ismcast); -int ath6kl_cfg80211_suspend(struct ath6kl *ar, - enum ath6kl_cfg_suspend_mode mode, - struct cfg80211_wowlan *wow); - -int ath6kl_cfg80211_resume(struct ath6kl *ar); - -void ath6kl_cfg80211_stop(struct ath6kl_vif *vif); -void ath6kl_cfg80211_stop_all(struct ath6kl *ar); - #endif /* ATH6KL_CFG80211_H */ diff --git a/trunk/drivers/net/wireless/ath/ath6kl/common.h b/trunk/drivers/net/wireless/ath/ath6kl/common.h index bfd6597763da..b92f0e5d2336 100644 --- a/trunk/drivers/net/wireless/ath/ath6kl/common.h +++ b/trunk/drivers/net/wireless/ath/ath6kl/common.h @@ -23,6 +23,8 @@ extern int ath6kl_printk(const char *level, const char *fmt, ...); +#define A_CACHE_LINE_PAD 128 + /* * Reflects the version of binary interface exposed by ATH6KL target * firmware. Needs to be incremented by 1 for any change in the firmware @@ -71,16 +73,25 @@ enum crypto_type { WEP_CRYPT = 0x02, TKIP_CRYPT = 0x04, AES_CRYPT = 0x08, - WAPI_CRYPT = 0x10, }; struct htc_endpoint_credit_dist; struct ath6kl; enum htc_credit_dist_reason; -struct ath6kl_htc_credit_info; +struct htc_credit_state_info; +int ath6k_setup_credit_dist(void *htc_handle, + struct htc_credit_state_info *cred_info); +void ath6k_credit_distribute(struct htc_credit_state_info *cred_inf, + struct list_head *epdist_list, + enum htc_credit_dist_reason reason); +void ath6k_credit_init(struct htc_credit_state_info *cred_inf, + struct list_head *ep_list, + int tot_credits); +void ath6k_seek_credits(struct htc_credit_state_info *cred_inf, + struct htc_endpoint_credit_dist *ep_dist); struct ath6kl *ath6kl_core_alloc(struct device *sdev); int ath6kl_core_init(struct ath6kl *ar); -void ath6kl_core_cleanup(struct ath6kl *ar); +int ath6kl_unavail_ev(struct ath6kl *ar); struct sk_buff *ath6kl_buf_alloc(int size); #endif /* COMMON_H */ diff --git a/trunk/drivers/net/wireless/ath/ath6kl/core.h b/trunk/drivers/net/wireless/ath/ath6kl/core.h index c863a28f2e0c..6d8a4845baaf 100644 --- a/trunk/drivers/net/wireless/ath/ath6kl/core.h +++ b/trunk/drivers/net/wireless/ath/ath6kl/core.h @@ -70,20 +70,10 @@ enum ath6kl_fw_ie_type { ATH6KL_FW_IE_RESERVED_RAM_SIZE = 5, ATH6KL_FW_IE_CAPABILITIES = 6, ATH6KL_FW_IE_PATCH_ADDR = 7, - ATH6KL_FW_IE_BOARD_ADDR = 8, - ATH6KL_FW_IE_VIF_MAX = 9, }; enum ath6kl_fw_capability { ATH6KL_FW_CAPABILITY_HOST_P2P = 0, - ATH6KL_FW_CAPABILITY_SCHED_SCAN = 1, - - /* - * Firmware is capable of supporting P2P mgmt operations on a - * station interface. After group formation, the station - * interface will become a P2P client/GO interface as the case may be - */ - ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX, /* this needs to be last */ ATH6KL_FW_CAPABILITY_MAX, @@ -98,47 +88,37 @@ struct ath6kl_fw_ie { }; /* AR6003 1.0 definitions */ -#define AR6003_HW_1_0_VERSION 0x300002ba +#define AR6003_REV1_VERSION 0x300002ba /* AR6003 2.0 definitions */ -#define AR6003_HW_2_0_VERSION 0x30000384 -#define AR6003_HW_2_0_PATCH_DOWNLOAD_ADDRESS 0x57e910 -#define AR6003_HW_2_0_OTP_FILE "ath6k/AR6003/hw2.0/otp.bin.z77" -#define AR6003_HW_2_0_FIRMWARE_FILE "ath6k/AR6003/hw2.0/athwlan.bin.z77" -#define AR6003_HW_2_0_TCMD_FIRMWARE_FILE "ath6k/AR6003/hw2.0/athtcmd_ram.bin" -#define AR6003_HW_2_0_PATCH_FILE "ath6k/AR6003/hw2.0/data.patch.bin" -#define AR6003_HW_2_0_FIRMWARE_2_FILE "ath6k/AR6003/hw2.0/fw-2.bin" -#define AR6003_HW_2_0_BOARD_DATA_FILE "ath6k/AR6003/hw2.0/bdata.bin" -#define AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE \ - "ath6k/AR6003/hw2.0/bdata.SD31.bin" +#define AR6003_REV2_VERSION 0x30000384 +#define AR6003_REV2_PATCH_DOWNLOAD_ADDRESS 0x57e910 +#define AR6003_REV2_OTP_FILE "ath6k/AR6003/hw2.0/otp.bin.z77" +#define AR6003_REV2_FIRMWARE_FILE "ath6k/AR6003/hw2.0/athwlan.bin.z77" +#define AR6003_REV2_TCMD_FIRMWARE_FILE "ath6k/AR6003/hw2.0/athtcmd_ram.bin" +#define AR6003_REV2_PATCH_FILE "ath6k/AR6003/hw2.0/data.patch.bin" +#define AR6003_REV2_FIRMWARE_2_FILE "ath6k/AR6003/hw2.0/fw-2.bin" +#define AR6003_REV2_BOARD_DATA_FILE "ath6k/AR6003/hw2.0/bdata.bin" +#define AR6003_REV2_DEFAULT_BOARD_DATA_FILE "ath6k/AR6003/hw2.0/bdata.SD31.bin" /* AR6003 3.0 definitions */ -#define AR6003_HW_2_1_1_VERSION 0x30000582 -#define AR6003_HW_2_1_1_OTP_FILE "ath6k/AR6003/hw2.1.1/otp.bin" -#define AR6003_HW_2_1_1_FIRMWARE_FILE "ath6k/AR6003/hw2.1.1/athwlan.bin" -#define AR6003_HW_2_1_1_TCMD_FIRMWARE_FILE \ - "ath6k/AR6003/hw2.1.1/athtcmd_ram.bin" -#define AR6003_HW_2_1_1_PATCH_FILE "ath6k/AR6003/hw2.1.1/data.patch.bin" -#define AR6003_HW_2_1_1_FIRMWARE_2_FILE "ath6k/AR6003/hw2.1.1/fw-2.bin" -#define AR6003_HW_2_1_1_BOARD_DATA_FILE "ath6k/AR6003/hw2.1.1/bdata.bin" -#define AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE \ - "ath6k/AR6003/hw2.1.1/bdata.SD31.bin" +#define AR6003_REV3_VERSION 0x30000582 +#define AR6003_REV3_OTP_FILE "ath6k/AR6003/hw2.1.1/otp.bin" +#define AR6003_REV3_FIRMWARE_FILE "ath6k/AR6003/hw2.1.1/athwlan.bin" +#define AR6003_REV3_TCMD_FIRMWARE_FILE "ath6k/AR6003/hw2.1.1/athtcmd_ram.bin" +#define AR6003_REV3_PATCH_FILE "ath6k/AR6003/hw2.1.1/data.patch.bin" +#define AR6003_REV3_FIRMWARE_2_FILE "ath6k/AR6003/hw2.1.1/fw-2.bin" +#define AR6003_REV3_BOARD_DATA_FILE "ath6k/AR6003/hw2.1.1/bdata.bin" +#define AR6003_REV3_DEFAULT_BOARD_DATA_FILE \ + "ath6k/AR6003/hw2.1.1/bdata.SD31.bin" /* AR6004 1.0 definitions */ -#define AR6004_HW_1_0_VERSION 0x30000623 -#define AR6004_HW_1_0_FIRMWARE_2_FILE "ath6k/AR6004/hw1.0/fw-2.bin" -#define AR6004_HW_1_0_FIRMWARE_FILE "ath6k/AR6004/hw1.0/fw.ram.bin" -#define AR6004_HW_1_0_BOARD_DATA_FILE "ath6k/AR6004/hw1.0/bdata.bin" -#define AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE \ - "ath6k/AR6004/hw1.0/bdata.DB132.bin" - -/* AR6004 1.1 definitions */ -#define AR6004_HW_1_1_VERSION 0x30000001 -#define AR6004_HW_1_1_FIRMWARE_2_FILE "ath6k/AR6004/hw1.1/fw-2.bin" -#define AR6004_HW_1_1_FIRMWARE_FILE "ath6k/AR6004/hw1.1/fw.ram.bin" -#define AR6004_HW_1_1_BOARD_DATA_FILE "ath6k/AR6004/hw1.1/bdata.bin" -#define AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE \ - "ath6k/AR6004/hw1.1/bdata.DB132.bin" +#define AR6004_REV1_VERSION 0x30000623 +#define AR6004_REV1_FIRMWARE_FILE "ath6k/AR6004/hw6.1/fw.ram.bin" +#define AR6004_REV1_FIRMWARE_2_FILE "ath6k/AR6004/hw6.1/fw-2.bin" +#define AR6004_REV1_BOARD_DATA_FILE "ath6k/AR6004/hw6.1/bdata.bin" +#define AR6004_REV1_DEFAULT_BOARD_DATA_FILE "ath6k/AR6004/hw6.1/bdata.DB132.bin" +#define AR6004_REV1_EPPING_FIRMWARE_FILE "ath6k/AR6004/hw6.1/endpointping.bin" /* Per STA data, used in AP mode */ #define STA_PS_AWAKE BIT(0) @@ -186,7 +166,6 @@ struct ath6kl_fw_ie { #define ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN BIT(1) #define ATH6KL_CONF_ENABLE_11N BIT(2) #define ATH6KL_CONF_ENABLE_TX_BURST BIT(3) -#define ATH6KL_CONF_SUSPEND_CUTPOWER BIT(4) enum wlan_low_pwr_state { WLAN_POWER_STATE_ON, @@ -292,8 +271,6 @@ struct ath6kl_bmi { u32 cmd_credits; bool done_sent; u8 *cmd_buf; - u32 max_data_size; - u32 max_cmd_size; }; struct target_stats { @@ -403,42 +380,40 @@ struct ath6kl_req_key { u8 key_len; }; -enum ath6kl_hif_type { - ATH6KL_HIF_TYPE_SDIO, - ATH6KL_HIF_TYPE_USB, -}; - -/* - * Driver's maximum limit, note that some firmwares support only one vif - * and the runtime (current) limit must be checked from ar->vif_max. - */ -#define ATH6KL_VIF_MAX 3 - -/* vif flags info */ -enum ath6kl_vif_state { - CONNECTED, - CONNECT_PEND, - WMM_ENABLED, - NETQ_STOPPED, - DTIM_EXPIRED, - NETDEV_REGISTERED, - CLEAR_BSSFILTER_ON_BEACON, - DTIM_PERIOD_AVAIL, - WLAN_ENABLED, - STATS_UPDATE_PEND, -}; +/* Flag info */ +#define WMI_ENABLED 0 +#define WMI_READY 1 +#define CONNECTED 2 +#define STATS_UPDATE_PEND 3 +#define CONNECT_PEND 4 +#define WMM_ENABLED 5 +#define NETQ_STOPPED 6 +#define WMI_CTRL_EP_FULL 7 +#define DTIM_EXPIRED 8 +#define DESTROY_IN_PROGRESS 9 +#define NETDEV_REGISTERED 10 +#define SKIP_SCAN 11 +#define WLAN_ENABLED 12 +#define TESTMODE 13 +#define CLEAR_BSSFILTER_ON_BEACON 14 +#define DTIM_PERIOD_AVAIL 15 -struct ath6kl_vif { - struct list_head list; - struct wireless_dev wdev; - struct net_device *ndev; - struct ath6kl *ar; - /* Lock to protect vif specific net_stats and flags */ - spinlock_t if_lock; - u8 fw_vif_idx; - unsigned long flags; +struct ath6kl { + struct device *dev; + struct net_device *net_dev; + struct ath6kl_bmi bmi; + const struct ath6kl_hif_ops *hif_ops; + struct wmi *wmi; + int tx_pending[ENDPOINT_MAX]; + int total_tx_data_pend; + struct htc_target *htc_target; + void *hif_priv; + spinlock_t lock; + struct semaphore sem; int ssid_len; u8 ssid[IEEE80211_MAX_SSID_LEN]; + u8 next_mode; + u8 nw_type; u8 dot11_auth_mode; u8 auth_mode; u8 prwise_crypto; @@ -446,91 +421,21 @@ struct ath6kl_vif { u8 grp_crypto; u8 grp_crypto_len; u8 def_txkey_index; - u8 next_mode; - u8 nw_type; + struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1]; u8 bssid[ETH_ALEN]; u8 req_bssid[ETH_ALEN]; u16 ch_hint; u16 bss_ch; - struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1]; - struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1]; - struct aggr_info *aggr_cntxt; - - struct timer_list disconnect_timer; - struct timer_list sched_scan_timer; - - struct cfg80211_scan_request *scan_req; - enum sme_state sme_state; - int reconnect_flag; - u32 last_roc_id; - u32 last_cancel_roc_id; - u32 send_action_id; - bool probe_req_report; - u16 next_chan; - u16 assoc_bss_beacon_int; - u8 assoc_bss_dtim_period; - struct net_device_stats net_stats; - struct target_stats target_stats; -}; - -#define WOW_LIST_ID 0 -#define WOW_HOST_REQ_DELAY 500 /* ms */ - -#define ATH6KL_SCHED_SCAN_RESULT_DELAY 5000 /* ms */ - -/* Flag info */ -enum ath6kl_dev_state { - WMI_ENABLED, - WMI_READY, - WMI_CTRL_EP_FULL, - TESTMODE, - DESTROY_IN_PROGRESS, - SKIP_SCAN, - ROAM_TBL_PEND, - FIRST_BOOT, -}; - -enum ath6kl_state { - ATH6KL_STATE_OFF, - ATH6KL_STATE_ON, - ATH6KL_STATE_DEEPSLEEP, - ATH6KL_STATE_CUTPOWER, - ATH6KL_STATE_WOW, - ATH6KL_STATE_SCHED_SCAN, -}; - -struct ath6kl { - struct device *dev; - struct wiphy *wiphy; - - enum ath6kl_state state; - - struct ath6kl_bmi bmi; - const struct ath6kl_hif_ops *hif_ops; - struct wmi *wmi; - int tx_pending[ENDPOINT_MAX]; - int total_tx_data_pend; - struct htc_target *htc_target; - enum ath6kl_hif_type hif_type; - void *hif_priv; - struct list_head vif_list; - /* Lock to avoid race in vif_list entries among add/del/traverse */ - spinlock_t list_lock; - u8 num_vif; - unsigned int vif_max; - u8 max_norm_iface; - u8 avail_idx_map; - spinlock_t lock; - struct semaphore sem; u16 listen_intvl_b; u16 listen_intvl_t; u8 lrssi_roam_threshold; struct ath6kl_version version; u32 target_type; u8 tx_pwr; + struct net_device_stats net_stats; + struct target_stats target_stats; struct ath6kl_node_mapping node_map[MAX_NODE_NUM]; u8 ibss_ps_enable; - bool ibss_if_active; u8 node_num; u8 next_ep_id; struct ath6kl_cookie *cookie_list; @@ -541,7 +446,7 @@ struct ath6kl { u8 hiac_stream_active_pri; u8 ep2ac_map[ENDPOINT_MAX]; enum htc_endpoint_id ctrl_ep; - struct ath6kl_htc_credit_info credit_state_info; + struct htc_credit_state_info credit_state_info; u32 connect_ctrl_flags; u32 user_key_ctrl; u8 usr_bss_filter; @@ -551,37 +456,30 @@ struct ath6kl { struct sk_buff_head mcastpsq; spinlock_t mcastpsq_lock; u8 intra_bss; + struct aggr_info *aggr_cntxt; struct wmi_ap_mode_stat ap_stats; u8 ap_country_code[3]; struct list_head amsdu_rx_buffer_queue; + struct timer_list disconnect_timer; u8 rx_meta_ver; + struct wireless_dev *wdev; + struct cfg80211_scan_request *scan_req; + struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1]; + enum sme_state sme_state; enum wlan_low_pwr_state wlan_pwr_state; - u8 mac_addr[ETH_ALEN]; + struct wmi_scan_params_cmd sc_params; #define AR_MCAST_FILTER_MAC_ADDR_SIZE 4 struct { void *rx_report; size_t rx_report_len; } tm; - struct ath6kl_hw { - u32 id; - const char *name; + struct { u32 dataset_patch_addr; u32 app_load_addr; u32 app_start_override_addr; u32 board_ext_data_addr; u32 reserved_ram_size; - u32 board_addr; - u32 refclk_hz; - u32 uarttx_pin; - - const char *fw_otp; - const char *fw; - const char *fw_tcmd; - const char *fw_patch; - const char *fw_api2; - const char *fw_board; - const char *fw_default_board; } hw; u16 conf_flags; @@ -589,6 +487,7 @@ struct ath6kl { struct ath6kl_mbox_info mbox_info; struct ath6kl_cookie cookie_mem[MAX_COOKIE_NUM]; + int reconnect_flag; unsigned long flag; u8 *fw_board; @@ -609,7 +508,13 @@ struct ath6kl { struct dentry *debugfs_phy; + u32 send_action_id; + bool probe_req_report; + u16 next_chan; + bool p2p; + u16 assoc_bss_beacon_int; + u8 assoc_bss_dtim_period; #ifdef CONFIG_ATH6KL_DEBUG struct { @@ -624,19 +529,23 @@ struct ath6kl { struct { unsigned int invalid_rate; } war_stats; - - u8 *roam_tbl; - unsigned int roam_tbl_len; - - u8 keepalive; - u8 disc_timeout; } debug; #endif /* CONFIG_ATH6KL_DEBUG */ }; -static inline struct ath6kl *ath6kl_priv(struct net_device *dev) +static inline void *ath6kl_priv(struct net_device *dev) { - return ((struct ath6kl_vif *) netdev_priv(dev))->ar; + return wdev_priv(dev->ieee80211_ptr); +} + +static inline void ath6kl_deposit_credit_to_ep(struct htc_credit_state_info + *cred_info, + struct htc_endpoint_credit_dist + *ep_dist, int credits) +{ + ep_dist->credits += credits; + ep_dist->cred_assngd += credits; + cred_info->cur_free_credits -= credits; } static inline u32 ath6kl_get_hi_item_addr(struct ath6kl *ar, @@ -652,6 +561,7 @@ static inline u32 ath6kl_get_hi_item_addr(struct ath6kl *ar, return addr; } +void ath6kl_destroy(struct net_device *dev, unsigned int unregister); int ath6kl_configure_target(struct ath6kl *ar); void ath6kl_detect_error(unsigned long ptr); void disconnect_timer_handler(unsigned long ptr); @@ -669,8 +579,10 @@ int ath6kl_diag_write(struct ath6kl *ar, u32 address, void *data, u32 length); int ath6kl_diag_read32(struct ath6kl *ar, u32 address, u32 *value); int ath6kl_diag_read(struct ath6kl *ar, u32 address, void *data, u32 length); int ath6kl_read_fwlogs(struct ath6kl *ar); -void ath6kl_init_profile_info(struct ath6kl_vif *vif); +void ath6kl_init_profile_info(struct ath6kl *ar); void ath6kl_tx_data_cleanup(struct ath6kl *ar); +void ath6kl_stop_endpoint(struct net_device *dev, bool keep_profile, + bool get_dbglogs); struct ath6kl_cookie *ath6kl_alloc_cookie(struct ath6kl *ar); void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie); @@ -686,49 +598,40 @@ struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target, void aggr_module_destroy(struct aggr_info *aggr_info); void aggr_reset_state(struct aggr_info *aggr_info); -struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 * node_addr); +struct ath6kl_sta *ath6kl_find_sta(struct ath6kl *ar, u8 * node_addr); struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid); void ath6kl_ready_event(void *devt, u8 * datap, u32 sw_ver, u32 abi_ver); int ath6kl_control_tx(void *devt, struct sk_buff *skb, enum htc_endpoint_id eid); -void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel, +void ath6kl_connect_event(struct ath6kl *ar, u16 channel, u8 *bssid, u16 listen_int, u16 beacon_int, enum network_type net_type, u8 beacon_ie_len, u8 assoc_req_len, u8 assoc_resp_len, u8 *assoc_info); -void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel); -void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr, +void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel); +void ath6kl_connect_ap_mode_sta(struct ath6kl *ar, u16 aid, u8 *mac_addr, u8 keymgmt, u8 ucipher, u8 auth, u8 assoc_req_len, u8 *assoc_info); -void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason, +void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason, u8 *bssid, u8 assoc_resp_len, u8 *assoc_info, u16 prot_reason_status); -void ath6kl_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, bool ismcast); +void ath6kl_tkip_micerr_event(struct ath6kl *ar, u8 keyid, bool ismcast); void ath6kl_txpwr_rx_evt(void *devt, u8 tx_pwr); -void ath6kl_scan_complete_evt(struct ath6kl_vif *vif, int status); -void ath6kl_tgt_stats_event(struct ath6kl_vif *vif, u8 *ptr, u32 len); +void ath6kl_scan_complete_evt(struct ath6kl *ar, int status); +void ath6kl_tgt_stats_event(struct ath6kl *ar, u8 *ptr, u32 len); void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active); enum htc_endpoint_id ath6kl_ac2_endpoint_id(void *devt, u8 ac); -void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid); +void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid); -void ath6kl_dtimexpiry_event(struct ath6kl_vif *vif); -void ath6kl_disconnect(struct ath6kl_vif *vif); -void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid); -void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid, u16 seq_no, +void ath6kl_dtimexpiry_event(struct ath6kl *ar); +void ath6kl_disconnect(struct ath6kl *ar); +void ath6kl_deep_sleep_enable(struct ath6kl *ar); +void aggr_recv_delba_req_evt(struct ath6kl *ar, u8 tid); +void aggr_recv_addba_req_evt(struct ath6kl *ar, u8 tid, u16 seq_no, u8 win_sz); void ath6kl_wakeup_event(void *dev); - -void ath6kl_reset_device(struct ath6kl *ar, u32 target_type, - bool wait_fot_compltn, bool cold_reset); -void ath6kl_init_control_info(struct ath6kl_vif *vif); -void ath6kl_deinit_if_data(struct ath6kl_vif *vif); -void ath6kl_core_free(struct ath6kl *ar); -struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar); -void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready); -int ath6kl_init_hw_start(struct ath6kl *ar); -int ath6kl_init_hw_stop(struct ath6kl *ar); -void ath6kl_check_wow_status(struct ath6kl *ar); +void ath6kl_target_failure(struct ath6kl *ar); #endif /* CORE_H */ diff --git a/trunk/drivers/net/wireless/ath/ath6kl/debug.c b/trunk/drivers/net/wireless/ath/ath6kl/debug.c index eb808b46f94c..7879b5314285 100644 --- a/trunk/drivers/net/wireless/ath/ath6kl/debug.c +++ b/trunk/drivers/net/wireless/ath/ath6kl/debug.c @@ -143,48 +143,49 @@ void ath6kl_dump_registers(struct ath6kl_device *dev, static void dump_cred_dist(struct htc_endpoint_credit_dist *ep_dist) { - ath6kl_dbg(ATH6KL_DBG_CREDIT, + ath6kl_dbg(ATH6KL_DBG_ANY, "--- endpoint: %d svc_id: 0x%X ---\n", ep_dist->endpoint, ep_dist->svc_id); - ath6kl_dbg(ATH6KL_DBG_CREDIT, " dist_flags : 0x%X\n", + ath6kl_dbg(ATH6KL_DBG_ANY, " dist_flags : 0x%X\n", ep_dist->dist_flags); - ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_norm : %d\n", + ath6kl_dbg(ATH6KL_DBG_ANY, " cred_norm : %d\n", ep_dist->cred_norm); - ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_min : %d\n", + ath6kl_dbg(ATH6KL_DBG_ANY, " cred_min : %d\n", ep_dist->cred_min); - ath6kl_dbg(ATH6KL_DBG_CREDIT, " credits : %d\n", + ath6kl_dbg(ATH6KL_DBG_ANY, " credits : %d\n", ep_dist->credits); - ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_assngd : %d\n", + ath6kl_dbg(ATH6KL_DBG_ANY, " cred_assngd : %d\n", ep_dist->cred_assngd); - ath6kl_dbg(ATH6KL_DBG_CREDIT, " seek_cred : %d\n", + ath6kl_dbg(ATH6KL_DBG_ANY, " seek_cred : %d\n", ep_dist->seek_cred); - ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_sz : %d\n", + ath6kl_dbg(ATH6KL_DBG_ANY, " cred_sz : %d\n", ep_dist->cred_sz); - ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_per_msg : %d\n", + ath6kl_dbg(ATH6KL_DBG_ANY, " cred_per_msg : %d\n", ep_dist->cred_per_msg); - ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_to_dist : %d\n", + ath6kl_dbg(ATH6KL_DBG_ANY, " cred_to_dist : %d\n", ep_dist->cred_to_dist); - ath6kl_dbg(ATH6KL_DBG_CREDIT, " txq_depth : %d\n", - get_queue_depth(&ep_dist->htc_ep->txq)); - ath6kl_dbg(ATH6KL_DBG_CREDIT, + ath6kl_dbg(ATH6KL_DBG_ANY, " txq_depth : %d\n", + get_queue_depth(&((struct htc_endpoint *) + ep_dist->htc_rsvd)->txq)); + ath6kl_dbg(ATH6KL_DBG_ANY, "----------------------------------\n"); } -/* FIXME: move to htc.c */ void dump_cred_dist_stats(struct htc_target *target) { struct htc_endpoint_credit_dist *ep_list; - if (!AR_DBG_LVL_CHECK(ATH6KL_DBG_CREDIT)) + if (!AR_DBG_LVL_CHECK(ATH6KL_DBG_TRC)) return; list_for_each_entry(ep_list, &target->cred_dist_list, list) dump_cred_dist(ep_list); - ath6kl_dbg(ATH6KL_DBG_CREDIT, - "credit distribution total %d free %d\n", - target->credit_info->total_avail_credits, - target->credit_info->cur_free_credits); + ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:%p dist:%p\n", + target->cred_dist_cntxt, NULL); + ath6kl_dbg(ATH6KL_DBG_TRC, "credit distribution, total : %d, free : %d\n", + target->cred_dist_cntxt->total_avail_credits, + target->cred_dist_cntxt->cur_free_credits); } static int ath6kl_debugfs_open(struct inode *inode, struct file *file) @@ -396,20 +397,13 @@ static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath6kl *ar = file->private_data; - struct ath6kl_vif *vif; - struct target_stats *tgt_stats; + struct target_stats *tgt_stats = &ar->target_stats; char *buf; unsigned int len = 0, buf_len = 1500; int i; long left; ssize_t ret_cnt; - vif = ath6kl_vif_first(ar); - if (!vif) - return -EIO; - - tgt_stats = &vif->target_stats; - buf = kzalloc(buf_len, GFP_KERNEL); if (!buf) return -ENOMEM; @@ -419,9 +413,9 @@ static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf, return -EBUSY; } - set_bit(STATS_UPDATE_PEND, &vif->flags); + set_bit(STATS_UPDATE_PEND, &ar->flag); - if (ath6kl_wmi_get_stats_cmd(ar->wmi, 0)) { + if (ath6kl_wmi_get_stats_cmd(ar->wmi)) { up(&ar->sem); kfree(buf); return -EIO; @@ -429,7 +423,7 @@ static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf, left = wait_event_interruptible_timeout(ar->event_wq, !test_bit(STATS_UPDATE_PEND, - &vif->flags), WMI_TIMEOUT); + &ar->flag), WMI_TIMEOUT); up(&ar->sem); @@ -561,10 +555,10 @@ static ssize_t read_file_credit_dist_stats(struct file *file, len += scnprintf(buf + len, buf_len - len, "%25s%5d\n", "Total Avail Credits: ", - target->credit_info->total_avail_credits); + target->cred_dist_cntxt->total_avail_credits); len += scnprintf(buf + len, buf_len - len, "%25s%5d\n", "Free credits :", - target->credit_info->cur_free_credits); + target->cred_dist_cntxt->cur_free_credits); len += scnprintf(buf + len, buf_len - len, " Epid Flags Cred_norm Cred_min Credits Cred_assngd" @@ -583,7 +577,8 @@ static ssize_t read_file_credit_dist_stats(struct file *file, print_credit_info("%9d", cred_per_msg); print_credit_info("%14d", cred_to_dist); len += scnprintf(buf + len, buf_len - len, "%12d\n", - get_queue_depth(&ep_list->htc_ep->txq)); + get_queue_depth(&((struct htc_endpoint *) + ep_list->htc_rsvd)->txq)); } if (len > buf_len) @@ -601,107 +596,6 @@ static const struct file_operations fops_credit_dist_stats = { .llseek = default_llseek, }; -static unsigned int print_endpoint_stat(struct htc_target *target, char *buf, - unsigned int buf_len, unsigned int len, - int offset, const char *name) -{ - int i; - struct htc_endpoint_stats *ep_st; - u32 *counter; - - len += scnprintf(buf + len, buf_len - len, "%s:", name); - for (i = 0; i < ENDPOINT_MAX; i++) { - ep_st = &target->endpoint[i].ep_st; - counter = ((u32 *) ep_st) + (offset / 4); - len += scnprintf(buf + len, buf_len - len, " %u", *counter); - } - len += scnprintf(buf + len, buf_len - len, "\n"); - - return len; -} - -static ssize_t ath6kl_endpoint_stats_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ath6kl *ar = file->private_data; - struct htc_target *target = ar->htc_target; - char *buf; - unsigned int buf_len, len = 0; - ssize_t ret_cnt; - - buf_len = sizeof(struct htc_endpoint_stats) / sizeof(u32) * - (25 + ENDPOINT_MAX * 11); - buf = kmalloc(buf_len, GFP_KERNEL); - if (!buf) - return -ENOMEM; - -#define EPSTAT(name) \ - len = print_endpoint_stat(target, buf, buf_len, len, \ - offsetof(struct htc_endpoint_stats, name), \ - #name) - EPSTAT(cred_low_indicate); - EPSTAT(tx_issued); - EPSTAT(tx_pkt_bundled); - EPSTAT(tx_bundles); - EPSTAT(tx_dropped); - EPSTAT(tx_cred_rpt); - EPSTAT(cred_rpt_from_rx); - EPSTAT(cred_rpt_from_other); - EPSTAT(cred_rpt_ep0); - EPSTAT(cred_from_rx); - EPSTAT(cred_from_other); - EPSTAT(cred_from_ep0); - EPSTAT(cred_cosumd); - EPSTAT(cred_retnd); - EPSTAT(rx_pkts); - EPSTAT(rx_lkahds); - EPSTAT(rx_bundl); - EPSTAT(rx_bundle_lkahd); - EPSTAT(rx_bundle_from_hdr); - EPSTAT(rx_alloc_thresh_hit); - EPSTAT(rxalloc_thresh_byte); -#undef EPSTAT - - if (len > buf_len) - len = buf_len; - - ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); - kfree(buf); - return ret_cnt; -} - -static ssize_t ath6kl_endpoint_stats_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ath6kl *ar = file->private_data; - struct htc_target *target = ar->htc_target; - int ret, i; - u32 val; - struct htc_endpoint_stats *ep_st; - - ret = kstrtou32_from_user(user_buf, count, 0, &val); - if (ret) - return ret; - if (val == 0) { - for (i = 0; i < ENDPOINT_MAX; i++) { - ep_st = &target->endpoint[i].ep_st; - memset(ep_st, 0, sizeof(*ep_st)); - } - } - - return count; -} - -static const struct file_operations fops_endpoint_stats = { - .open = ath6kl_debugfs_open, - .read = ath6kl_endpoint_stats_read, - .write = ath6kl_endpoint_stats_write, - .owner = THIS_MODULE, - .llseek = default_llseek, -}; - static unsigned long ath6kl_get_num_reg(void) { int i; @@ -974,660 +868,6 @@ static const struct file_operations fops_diag_reg_write = { .llseek = default_llseek, }; -int ath6kl_debug_roam_tbl_event(struct ath6kl *ar, const void *buf, - size_t len) -{ - const struct wmi_target_roam_tbl *tbl; - u16 num_entries; - - if (len < sizeof(*tbl)) - return -EINVAL; - - tbl = (const struct wmi_target_roam_tbl *) buf; - num_entries = le16_to_cpu(tbl->num_entries); - if (sizeof(*tbl) + num_entries * sizeof(struct wmi_bss_roam_info) > - len) - return -EINVAL; - - if (ar->debug.roam_tbl == NULL || - ar->debug.roam_tbl_len < (unsigned int) len) { - kfree(ar->debug.roam_tbl); - ar->debug.roam_tbl = kmalloc(len, GFP_ATOMIC); - if (ar->debug.roam_tbl == NULL) - return -ENOMEM; - } - - memcpy(ar->debug.roam_tbl, buf, len); - ar->debug.roam_tbl_len = len; - - if (test_bit(ROAM_TBL_PEND, &ar->flag)) { - clear_bit(ROAM_TBL_PEND, &ar->flag); - wake_up(&ar->event_wq); - } - - return 0; -} - -static ssize_t ath6kl_roam_table_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ath6kl *ar = file->private_data; - int ret; - long left; - struct wmi_target_roam_tbl *tbl; - u16 num_entries, i; - char *buf; - unsigned int len, buf_len; - ssize_t ret_cnt; - - if (down_interruptible(&ar->sem)) - return -EBUSY; - - set_bit(ROAM_TBL_PEND, &ar->flag); - - ret = ath6kl_wmi_get_roam_tbl_cmd(ar->wmi); - if (ret) { - up(&ar->sem); - return ret; - } - - left = wait_event_interruptible_timeout( - ar->event_wq, !test_bit(ROAM_TBL_PEND, &ar->flag), WMI_TIMEOUT); - up(&ar->sem); - - if (left <= 0) - return -ETIMEDOUT; - - if (ar->debug.roam_tbl == NULL) - return -ENOMEM; - - tbl = (struct wmi_target_roam_tbl *) ar->debug.roam_tbl; - num_entries = le16_to_cpu(tbl->num_entries); - - buf_len = 100 + num_entries * 100; - buf = kzalloc(buf_len, GFP_KERNEL); - if (buf == NULL) - return -ENOMEM; - len = 0; - len += scnprintf(buf + len, buf_len - len, - "roam_mode=%u\n\n" - "# roam_util bssid rssi rssidt last_rssi util bias\n", - le16_to_cpu(tbl->roam_mode)); - - for (i = 0; i < num_entries; i++) { - struct wmi_bss_roam_info *info = &tbl->info[i]; - len += scnprintf(buf + len, buf_len - len, - "%d %pM %d %d %d %d %d\n", - a_sle32_to_cpu(info->roam_util), info->bssid, - info->rssi, info->rssidt, info->last_rssi, - info->util, info->bias); - } - - if (len > buf_len) - len = buf_len; - - ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); - - kfree(buf); - return ret_cnt; -} - -static const struct file_operations fops_roam_table = { - .read = ath6kl_roam_table_read, - .open = ath6kl_debugfs_open, - .owner = THIS_MODULE, - .llseek = default_llseek, -}; - -static ssize_t ath6kl_force_roam_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ath6kl *ar = file->private_data; - int ret; - char buf[20]; - size_t len; - u8 bssid[ETH_ALEN]; - int i; - int addr[ETH_ALEN]; - - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; - buf[len] = '\0'; - - if (sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x", - &addr[0], &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) - != ETH_ALEN) - return -EINVAL; - for (i = 0; i < ETH_ALEN; i++) - bssid[i] = addr[i]; - - ret = ath6kl_wmi_force_roam_cmd(ar->wmi, bssid); - if (ret) - return ret; - - return count; -} - -static const struct file_operations fops_force_roam = { - .write = ath6kl_force_roam_write, - .open = ath6kl_debugfs_open, - .owner = THIS_MODULE, - .llseek = default_llseek, -}; - -static ssize_t ath6kl_roam_mode_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ath6kl *ar = file->private_data; - int ret; - char buf[20]; - size_t len; - enum wmi_roam_mode mode; - - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; - buf[len] = '\0'; - if (len > 0 && buf[len - 1] == '\n') - buf[len - 1] = '\0'; - - if (strcasecmp(buf, "default") == 0) - mode = WMI_DEFAULT_ROAM_MODE; - else if (strcasecmp(buf, "bssbias") == 0) - mode = WMI_HOST_BIAS_ROAM_MODE; - else if (strcasecmp(buf, "lock") == 0) - mode = WMI_LOCK_BSS_MODE; - else - return -EINVAL; - - ret = ath6kl_wmi_set_roam_mode_cmd(ar->wmi, mode); - if (ret) - return ret; - - return count; -} - -static const struct file_operations fops_roam_mode = { - .write = ath6kl_roam_mode_write, - .open = ath6kl_debugfs_open, - .owner = THIS_MODULE, - .llseek = default_llseek, -}; - -void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive) -{ - ar->debug.keepalive = keepalive; -} - -static ssize_t ath6kl_keepalive_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ath6kl *ar = file->private_data; - char buf[16]; - int len; - - len = snprintf(buf, sizeof(buf), "%u\n", ar->debug.keepalive); - - return simple_read_from_buffer(user_buf, count, ppos, buf, len); -} - -static ssize_t ath6kl_keepalive_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ath6kl *ar = file->private_data; - int ret; - u8 val; - - ret = kstrtou8_from_user(user_buf, count, 0, &val); - if (ret) - return ret; - - ret = ath6kl_wmi_set_keepalive_cmd(ar->wmi, 0, val); - if (ret) - return ret; - - return count; -} - -static const struct file_operations fops_keepalive = { - .open = ath6kl_debugfs_open, - .read = ath6kl_keepalive_read, - .write = ath6kl_keepalive_write, - .owner = THIS_MODULE, - .llseek = default_llseek, -}; - -void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar, u8 timeout) -{ - ar->debug.disc_timeout = timeout; -} - -static ssize_t ath6kl_disconnect_timeout_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ath6kl *ar = file->private_data; - char buf[16]; - int len; - - len = snprintf(buf, sizeof(buf), "%u\n", ar->debug.disc_timeout); - - return simple_read_from_buffer(user_buf, count, ppos, buf, len); -} - -static ssize_t ath6kl_disconnect_timeout_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ath6kl *ar = file->private_data; - int ret; - u8 val; - - ret = kstrtou8_from_user(user_buf, count, 0, &val); - if (ret) - return ret; - - ret = ath6kl_wmi_disctimeout_cmd(ar->wmi, 0, val); - if (ret) - return ret; - - return count; -} - -static const struct file_operations fops_disconnect_timeout = { - .open = ath6kl_debugfs_open, - .read = ath6kl_disconnect_timeout_read, - .write = ath6kl_disconnect_timeout_write, - .owner = THIS_MODULE, - .llseek = default_llseek, -}; - -static ssize_t ath6kl_create_qos_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - - struct ath6kl *ar = file->private_data; - struct ath6kl_vif *vif; - char buf[200]; - ssize_t len; - char *sptr, *token; - struct wmi_create_pstream_cmd pstream; - u32 val32; - u16 val16; - - vif = ath6kl_vif_first(ar); - if (!vif) - return -EIO; - - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; - buf[len] = '\0'; - sptr = buf; - - token = strsep(&sptr, " "); - if (!token) - return -EINVAL; - if (kstrtou8(token, 0, &pstream.user_pri)) - return -EINVAL; - - token = strsep(&sptr, " "); - if (!token) - return -EINVAL; - if (kstrtou8(token, 0, &pstream.traffic_direc)) - return -EINVAL; - - token = strsep(&sptr, " "); - if (!token) - return -EINVAL; - if (kstrtou8(token, 0, &pstream.traffic_class)) - return -EINVAL; - - token = strsep(&sptr, " "); - if (!token) - return -EINVAL; - if (kstrtou8(token, 0, &pstream.traffic_type)) - return -EINVAL; - - token = strsep(&sptr, " "); - if (!token) - return -EINVAL; - if (kstrtou8(token, 0, &pstream.voice_psc_cap)) - return -EINVAL; - - token = strsep(&sptr, " "); - if (!token) - return -EINVAL; - if (kstrtou32(token, 0, &val32)) - return -EINVAL; - pstream.min_service_int = cpu_to_le32(val32); - - token = strsep(&sptr, " "); - if (!token) - return -EINVAL; - if (kstrtou32(token, 0, &val32)) - return -EINVAL; - pstream.max_service_int = cpu_to_le32(val32); - - token = strsep(&sptr, " "); - if (!token) - return -EINVAL; - if (kstrtou32(token, 0, &val32)) - return -EINVAL; - pstream.inactivity_int = cpu_to_le32(val32); - - token = strsep(&sptr, " "); - if (!token) - return -EINVAL; - if (kstrtou32(token, 0, &val32)) - return -EINVAL; - pstream.suspension_int = cpu_to_le32(val32); - - token = strsep(&sptr, " "); - if (!token) - return -EINVAL; - if (kstrtou32(token, 0, &val32)) - return -EINVAL; - pstream.service_start_time = cpu_to_le32(val32); - - token = strsep(&sptr, " "); - if (!token) - return -EINVAL; - if (kstrtou8(token, 0, &pstream.tsid)) - return -EINVAL; - - token = strsep(&sptr, " "); - if (!token) - return -EINVAL; - if (kstrtou16(token, 0, &val16)) - return -EINVAL; - pstream.nominal_msdu = cpu_to_le16(val16); - - token = strsep(&sptr, " "); - if (!token) - return -EINVAL; - if (kstrtou16(token, 0, &val16)) - return -EINVAL; - pstream.max_msdu = cpu_to_le16(val16); - - token = strsep(&sptr, " "); - if (!token) - return -EINVAL; - if (kstrtou32(token, 0, &val32)) - return -EINVAL; - pstream.min_data_rate = cpu_to_le32(val32); - - token = strsep(&sptr, " "); - if (!token) - return -EINVAL; - if (kstrtou32(token, 0, &val32)) - return -EINVAL; - pstream.mean_data_rate = cpu_to_le32(val32); - - token = strsep(&sptr, " "); - if (!token) - return -EINVAL; - if (kstrtou32(token, 0, &val32)) - return -EINVAL; - pstream.peak_data_rate = cpu_to_le32(val32); - - token = strsep(&sptr, " "); - if (!token) - return -EINVAL; - if (kstrtou32(token, 0, &val32)) - return -EINVAL; - pstream.max_burst_size = cpu_to_le32(val32); - - token = strsep(&sptr, " "); - if (!token) - return -EINVAL; - if (kstrtou32(token, 0, &val32)) - return -EINVAL; - pstream.delay_bound = cpu_to_le32(val32); - - token = strsep(&sptr, " "); - if (!token) - return -EINVAL; - if (kstrtou32(token, 0, &val32)) - return -EINVAL; - pstream.min_phy_rate = cpu_to_le32(val32); - - token = strsep(&sptr, " "); - if (!token) - return -EINVAL; - if (kstrtou32(token, 0, &val32)) - return -EINVAL; - pstream.sba = cpu_to_le32(val32); - - token = strsep(&sptr, " "); - if (!token) - return -EINVAL; - if (kstrtou32(token, 0, &val32)) - return -EINVAL; - pstream.medium_time = cpu_to_le32(val32); - - ath6kl_wmi_create_pstream_cmd(ar->wmi, vif->fw_vif_idx, &pstream); - - return count; -} - -static const struct file_operations fops_create_qos = { - .write = ath6kl_create_qos_write, - .open = ath6kl_debugfs_open, - .owner = THIS_MODULE, - .llseek = default_llseek, -}; - -static ssize_t ath6kl_delete_qos_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - - struct ath6kl *ar = file->private_data; - struct ath6kl_vif *vif; - char buf[100]; - ssize_t len; - char *sptr, *token; - u8 traffic_class; - u8 tsid; - - vif = ath6kl_vif_first(ar); - if (!vif) - return -EIO; - - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; - buf[len] = '\0'; - sptr = buf; - - token = strsep(&sptr, " "); - if (!token) - return -EINVAL; - if (kstrtou8(token, 0, &traffic_class)) - return -EINVAL; - - token = strsep(&sptr, " "); - if (!token) - return -EINVAL; - if (kstrtou8(token, 0, &tsid)) - return -EINVAL; - - ath6kl_wmi_delete_pstream_cmd(ar->wmi, vif->fw_vif_idx, - traffic_class, tsid); - - return count; -} - -static const struct file_operations fops_delete_qos = { - .write = ath6kl_delete_qos_write, - .open = ath6kl_debugfs_open, - .owner = THIS_MODULE, - .llseek = default_llseek, -}; - -static ssize_t ath6kl_bgscan_int_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ath6kl *ar = file->private_data; - u16 bgscan_int; - char buf[32]; - ssize_t len; - - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; - - buf[len] = '\0'; - if (kstrtou16(buf, 0, &bgscan_int)) - return -EINVAL; - - if (bgscan_int == 0) - bgscan_int = 0xffff; - - ath6kl_wmi_scanparams_cmd(ar->wmi, 0, 0, 0, bgscan_int, 0, 0, 0, 3, - 0, 0, 0); - - return count; -} - -static const struct file_operations fops_bgscan_int = { - .write = ath6kl_bgscan_int_write, - .open = ath6kl_debugfs_open, - .owner = THIS_MODULE, - .llseek = default_llseek, -}; - -static ssize_t ath6kl_listen_int_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ath6kl *ar = file->private_data; - u16 listen_int_t, listen_int_b; - char buf[32]; - char *sptr, *token; - ssize_t len; - - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; - - buf[len] = '\0'; - sptr = buf; - - token = strsep(&sptr, " "); - if (!token) - return -EINVAL; - - if (kstrtou16(token, 0, &listen_int_t)) - return -EINVAL; - - if (kstrtou16(sptr, 0, &listen_int_b)) - return -EINVAL; - - if ((listen_int_t < 15) || (listen_int_t > 5000)) - return -EINVAL; - - if ((listen_int_b < 1) || (listen_int_b > 50)) - return -EINVAL; - - ar->listen_intvl_t = listen_int_t; - ar->listen_intvl_b = listen_int_b; - - ath6kl_wmi_listeninterval_cmd(ar->wmi, 0, ar->listen_intvl_t, - ar->listen_intvl_b); - - return count; -} - -static ssize_t ath6kl_listen_int_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ath6kl *ar = file->private_data; - char buf[32]; - int len; - - len = scnprintf(buf, sizeof(buf), "%u %u\n", ar->listen_intvl_t, - ar->listen_intvl_b); - - return simple_read_from_buffer(user_buf, count, ppos, buf, len); -} - -static const struct file_operations fops_listen_int = { - .read = ath6kl_listen_int_read, - .write = ath6kl_listen_int_write, - .open = ath6kl_debugfs_open, - .owner = THIS_MODULE, - .llseek = default_llseek, -}; - -static ssize_t ath6kl_power_params_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ath6kl *ar = file->private_data; - u8 buf[100]; - unsigned int len = 0; - char *sptr, *token; - u16 idle_period, ps_poll_num, dtim, - tx_wakeup, num_tx; - - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; - buf[len] = '\0'; - sptr = buf; - - token = strsep(&sptr, " "); - if (!token) - return -EINVAL; - if (kstrtou16(token, 0, &idle_period)) - return -EINVAL; - - token = strsep(&sptr, " "); - if (!token) - return -EINVAL; - if (kstrtou16(token, 0, &ps_poll_num)) - return -EINVAL; - - token = strsep(&sptr, " "); - if (!token) - return -EINVAL; - if (kstrtou16(token, 0, &dtim)) - return -EINVAL; - - token = strsep(&sptr, " "); - if (!token) - return -EINVAL; - if (kstrtou16(token, 0, &tx_wakeup)) - return -EINVAL; - - token = strsep(&sptr, " "); - if (!token) - return -EINVAL; - if (kstrtou16(token, 0, &num_tx)) - return -EINVAL; - - ath6kl_wmi_pmparams_cmd(ar->wmi, 0, idle_period, ps_poll_num, - dtim, tx_wakeup, num_tx, 0); - - return count; -} - -static const struct file_operations fops_power_params = { - .write = ath6kl_power_params_write, - .open = ath6kl_debugfs_open, - .owner = THIS_MODULE, - .llseek = default_llseek, -}; - int ath6kl_debug_init(struct ath6kl *ar) { ar->debug.fwlog_buf.buf = vmalloc(ATH6KL_FWLOG_SIZE); @@ -1649,7 +889,7 @@ int ath6kl_debug_init(struct ath6kl *ar) ar->debug.fwlog_mask = 0; ar->debugfs_phy = debugfs_create_dir("ath6kl", - ar->wiphy->debugfsdir); + ar->wdev->wiphy->debugfsdir); if (!ar->debugfs_phy) { vfree(ar->debug.fwlog_buf.buf); kfree(ar->debug.fwlog_tmp); @@ -1662,9 +902,6 @@ int ath6kl_debug_init(struct ath6kl *ar) debugfs_create_file("credit_dist_stats", S_IRUSR, ar->debugfs_phy, ar, &fops_credit_dist_stats); - debugfs_create_file("endpoint_stats", S_IRUSR | S_IWUSR, - ar->debugfs_phy, ar, &fops_endpoint_stats); - debugfs_create_file("fwlog", S_IRUSR, ar->debugfs_phy, ar, &fops_fwlog); @@ -1686,33 +923,6 @@ int ath6kl_debug_init(struct ath6kl *ar) debugfs_create_file("war_stats", S_IRUSR, ar->debugfs_phy, ar, &fops_war_stats); - debugfs_create_file("roam_table", S_IRUSR, ar->debugfs_phy, ar, - &fops_roam_table); - - debugfs_create_file("force_roam", S_IWUSR, ar->debugfs_phy, ar, - &fops_force_roam); - - debugfs_create_file("roam_mode", S_IWUSR, ar->debugfs_phy, ar, - &fops_roam_mode); - - debugfs_create_file("keepalive", S_IRUSR | S_IWUSR, ar->debugfs_phy, ar, - &fops_keepalive); - - debugfs_create_file("disconnect_timeout", S_IRUSR | S_IWUSR, - ar->debugfs_phy, ar, &fops_disconnect_timeout); - - debugfs_create_file("create_qos", S_IWUSR, ar->debugfs_phy, ar, - &fops_create_qos); - - debugfs_create_file("delete_qos", S_IWUSR, ar->debugfs_phy, ar, - &fops_delete_qos); - - debugfs_create_file("bgscan_interval", S_IWUSR, - ar->debugfs_phy, ar, &fops_bgscan_int); - - debugfs_create_file("power_params", S_IWUSR, ar->debugfs_phy, ar, - &fops_power_params); - return 0; } @@ -1720,7 +930,6 @@ void ath6kl_debug_cleanup(struct ath6kl *ar) { vfree(ar->debug.fwlog_buf.buf); kfree(ar->debug.fwlog_tmp); - kfree(ar->debug.roam_tbl); } #endif diff --git a/trunk/drivers/net/wireless/ath/ath6kl/debug.h b/trunk/drivers/net/wireless/ath/ath6kl/debug.h index 9853c9c125c1..7b7675f70a10 100644 --- a/trunk/drivers/net/wireless/ath/ath6kl/debug.h +++ b/trunk/drivers/net/wireless/ath/ath6kl/debug.h @@ -17,19 +17,19 @@ #ifndef DEBUG_H #define DEBUG_H -#include "hif.h" +#include "htc_hif.h" enum ATH6K_DEBUG_MASK { - ATH6KL_DBG_CREDIT = BIT(0), - /* hole */ + ATH6KL_DBG_WLAN_CONNECT = BIT(0), /* wlan connect */ + ATH6KL_DBG_WLAN_SCAN = BIT(1), /* wlan scan */ ATH6KL_DBG_WLAN_TX = BIT(2), /* wlan tx */ ATH6KL_DBG_WLAN_RX = BIT(3), /* wlan rx */ ATH6KL_DBG_BMI = BIT(4), /* bmi tracing */ - ATH6KL_DBG_HTC = BIT(5), - ATH6KL_DBG_HIF = BIT(6), + ATH6KL_DBG_HTC_SEND = BIT(5), /* htc send */ + ATH6KL_DBG_HTC_RECV = BIT(6), /* htc recv */ ATH6KL_DBG_IRQ = BIT(7), /* interrupt processing */ - /* hole */ - /* hole */ + ATH6KL_DBG_PM = BIT(8), /* power management */ + ATH6KL_DBG_WLAN_NODE = BIT(9), /* general wlan node tracing */ ATH6KL_DBG_WMI = BIT(10), /* wmi tracing */ ATH6KL_DBG_TRC = BIT(11), /* generic func tracing */ ATH6KL_DBG_SCATTER = BIT(12), /* hif scatter tracing */ @@ -40,7 +40,6 @@ enum ATH6K_DEBUG_MASK { ATH6KL_DBG_SDIO_DUMP = BIT(17), ATH6KL_DBG_BOOT = BIT(18), /* driver init and fw boot */ ATH6KL_DBG_WMI_DUMP = BIT(19), - ATH6KL_DBG_SUSPEND = BIT(20), ATH6KL_DBG_ANY = 0xffffffff /* enable all logs */ }; @@ -91,10 +90,6 @@ void ath6kl_dump_registers(struct ath6kl_device *dev, void dump_cred_dist_stats(struct htc_target *target); void ath6kl_debug_fwlog_event(struct ath6kl *ar, const void *buf, size_t len); void ath6kl_debug_war(struct ath6kl *ar, enum ath6kl_war war); -int ath6kl_debug_roam_tbl_event(struct ath6kl *ar, const void *buf, - size_t len); -void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive); -void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar, u8 timeout); int ath6kl_debug_init(struct ath6kl *ar); void ath6kl_debug_cleanup(struct ath6kl *ar); @@ -130,21 +125,6 @@ static inline void ath6kl_debug_war(struct ath6kl *ar, enum ath6kl_war war) { } -static inline int ath6kl_debug_roam_tbl_event(struct ath6kl *ar, - const void *buf, size_t len) -{ - return 0; -} - -static inline void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive) -{ -} - -static inline void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar, - u8 timeout) -{ -} - static inline int ath6kl_debug_init(struct ath6kl *ar) { return 0; diff --git a/trunk/drivers/net/wireless/ath/ath6kl/hif-ops.h b/trunk/drivers/net/wireless/ath/ath6kl/hif-ops.h index 2fe1dadfc77a..d6c898f3d0b3 100644 --- a/trunk/drivers/net/wireless/ath/ath6kl/hif-ops.h +++ b/trunk/drivers/net/wireless/ath/ath6kl/hif-ops.h @@ -18,16 +18,10 @@ #define HIF_OPS_H #include "hif.h" -#include "debug.h" static inline int hif_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf, u32 len, u32 request) { - ath6kl_dbg(ATH6KL_DBG_HIF, - "hif %s sync addr 0x%x buf 0x%p len %d request 0x%x\n", - (request & HIF_WRITE) ? "write" : "read", - addr, buf, len, request); - return ar->hif_ops->read_write_sync(ar, addr, buf, len, request); } @@ -35,24 +29,16 @@ static inline int hif_write_async(struct ath6kl *ar, u32 address, u8 *buffer, u32 length, u32 request, struct htc_packet *packet) { - ath6kl_dbg(ATH6KL_DBG_HIF, - "hif write async addr 0x%x buf 0x%p len %d request 0x%x\n", - address, buffer, length, request); - return ar->hif_ops->write_async(ar, address, buffer, length, request, packet); } static inline void ath6kl_hif_irq_enable(struct ath6kl *ar) { - ath6kl_dbg(ATH6KL_DBG_HIF, "hif irq enable\n"); - return ar->hif_ops->irq_enable(ar); } static inline void ath6kl_hif_irq_disable(struct ath6kl *ar) { - ath6kl_dbg(ATH6KL_DBG_HIF, "hif irq disable\n"); - return ar->hif_ops->irq_disable(ar); } @@ -83,70 +69,9 @@ static inline void ath6kl_hif_cleanup_scatter(struct ath6kl *ar) return ar->hif_ops->cleanup_scatter(ar); } -static inline int ath6kl_hif_suspend(struct ath6kl *ar, - struct cfg80211_wowlan *wow) -{ - ath6kl_dbg(ATH6KL_DBG_HIF, "hif suspend\n"); - - return ar->hif_ops->suspend(ar, wow); -} - -/* - * Read from the ATH6KL through its diagnostic window. No cooperation from - * the Target is required for this. - */ -static inline int ath6kl_hif_diag_read32(struct ath6kl *ar, u32 address, - u32 *value) +static inline int ath6kl_hif_suspend(struct ath6kl *ar) { - return ar->hif_ops->diag_read32(ar, address, value); -} - -/* - * Write to the ATH6KL through its diagnostic window. No cooperation from - * the Target is required for this. - */ -static inline int ath6kl_hif_diag_write32(struct ath6kl *ar, u32 address, - __le32 value) -{ - return ar->hif_ops->diag_write32(ar, address, value); -} - -static inline int ath6kl_hif_bmi_read(struct ath6kl *ar, u8 *buf, u32 len) -{ - return ar->hif_ops->bmi_read(ar, buf, len); -} - -static inline int ath6kl_hif_bmi_write(struct ath6kl *ar, u8 *buf, u32 len) -{ - return ar->hif_ops->bmi_write(ar, buf, len); -} - -static inline int ath6kl_hif_resume(struct ath6kl *ar) -{ - ath6kl_dbg(ATH6KL_DBG_HIF, "hif resume\n"); - - return ar->hif_ops->resume(ar); -} - -static inline int ath6kl_hif_power_on(struct ath6kl *ar) -{ - ath6kl_dbg(ATH6KL_DBG_HIF, "hif power on\n"); - - return ar->hif_ops->power_on(ar); -} - -static inline int ath6kl_hif_power_off(struct ath6kl *ar) -{ - ath6kl_dbg(ATH6KL_DBG_HIF, "hif power off\n"); - - return ar->hif_ops->power_off(ar); -} - -static inline void ath6kl_hif_stop(struct ath6kl *ar) -{ - ath6kl_dbg(ATH6KL_DBG_HIF, "hif stop\n"); - - ar->hif_ops->stop(ar); + return ar->hif_ops->suspend(ar); } #endif diff --git a/trunk/drivers/net/wireless/ath/ath6kl/hif.h b/trunk/drivers/net/wireless/ath/ath6kl/hif.h index 699a036f3a44..797e2d1d9bf9 100644 --- a/trunk/drivers/net/wireless/ath/ath6kl/hif.h +++ b/trunk/drivers/net/wireless/ath/ath6kl/hif.h @@ -35,7 +35,6 @@ #define MAX_SCATTER_REQ_TRANSFER_SIZE (32 * 1024) #define MANUFACTURER_ID_AR6003_BASE 0x300 -#define MANUFACTURER_ID_AR6004_BASE 0x400 /* SDIO manufacturer ID and Codes */ #define MANUFACTURER_ID_ATH6KL_BASE_MASK 0xFF00 #define MANUFACTURER_CODE 0x271 /* Atheros */ @@ -60,18 +59,6 @@ /* mode to enable special 4-bit interrupt assertion without clock */ #define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ (1 << 0) -/* HTC runs over mailbox 0 */ -#define HTC_MAILBOX 0 - -#define ATH6KL_TARGET_DEBUG_INTR_MASK 0x01 - -/* FIXME: are these duplicates with MAX_SCATTER_ values in hif.h? */ -#define ATH6KL_SCATTER_ENTRIES_PER_REQ 16 -#define ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER (16 * 1024) -#define ATH6KL_SCATTER_REQS 4 - -#define ATH6KL_HIF_COMMUNICATION_TIMEOUT 1000 - struct bus_request { struct list_head list; @@ -199,34 +186,6 @@ struct hif_scatter_req { struct hif_scatter_item scat_list[1]; }; -struct ath6kl_irq_proc_registers { - u8 host_int_status; - u8 cpu_int_status; - u8 error_int_status; - u8 counter_int_status; - u8 mbox_frame; - u8 rx_lkahd_valid; - u8 host_int_status2; - u8 gmbox_rx_avail; - __le32 rx_lkahd[2]; - __le32 rx_gmbox_lkahd_alias[2]; -} __packed; - -struct ath6kl_irq_enable_reg { - u8 int_status_en; - u8 cpu_int_status_en; - u8 err_int_status_en; - u8 cntr_int_status_en; -} __packed; - -struct ath6kl_device { - spinlock_t lock; - struct ath6kl_irq_proc_registers irq_proc_reg; - struct ath6kl_irq_enable_reg irq_en_reg; - struct htc_target *htc_cnxt; - struct ath6kl *ar; -}; - struct ath6kl_hif_ops { int (*read_write_sync)(struct ath6kl *ar, u32 addr, u8 *buf, u32 len, u32 request); @@ -243,30 +202,7 @@ struct ath6kl_hif_ops { int (*scat_req_rw) (struct ath6kl *ar, struct hif_scatter_req *scat_req); void (*cleanup_scatter)(struct ath6kl *ar); - int (*suspend)(struct ath6kl *ar, struct cfg80211_wowlan *wow); - int (*resume)(struct ath6kl *ar); - int (*diag_read32)(struct ath6kl *ar, u32 address, u32 *value); - int (*diag_write32)(struct ath6kl *ar, u32 address, __le32 value); - int (*bmi_read)(struct ath6kl *ar, u8 *buf, u32 len); - int (*bmi_write)(struct ath6kl *ar, u8 *buf, u32 len); - int (*power_on)(struct ath6kl *ar); - int (*power_off)(struct ath6kl *ar); - void (*stop)(struct ath6kl *ar); + int (*suspend)(struct ath6kl *ar); }; -int ath6kl_hif_setup(struct ath6kl_device *dev); -int ath6kl_hif_unmask_intrs(struct ath6kl_device *dev); -int ath6kl_hif_mask_intrs(struct ath6kl_device *dev); -int ath6kl_hif_poll_mboxmsg_rx(struct ath6kl_device *dev, - u32 *lk_ahd, int timeout); -int ath6kl_hif_rx_control(struct ath6kl_device *dev, bool enable_rx); -int ath6kl_hif_disable_intrs(struct ath6kl_device *dev); - -int ath6kl_hif_rw_comp_handler(void *context, int status); -int ath6kl_hif_intr_bh_handler(struct ath6kl *ar); - -/* Scatter Function and Definitions */ -int ath6kl_hif_submit_scat_req(struct ath6kl_device *dev, - struct hif_scatter_req *scat_req, bool read); - #endif diff --git a/trunk/drivers/net/wireless/ath/ath6kl/htc.c b/trunk/drivers/net/wireless/ath/ath6kl/htc.c index f3b63ca25c7e..f88a7c9e4148 100644 --- a/trunk/drivers/net/wireless/ath/ath6kl/htc.c +++ b/trunk/drivers/net/wireless/ath/ath6kl/htc.c @@ -15,321 +15,13 @@ */ #include "core.h" -#include "hif.h" +#include "htc_hif.h" #include "debug.h" #include "hif-ops.h" #include #define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask)) -/* Functions for Tx credit handling */ -static void ath6kl_credit_deposit(struct ath6kl_htc_credit_info *cred_info, - struct htc_endpoint_credit_dist *ep_dist, - int credits) -{ - ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit deposit ep %d credits %d\n", - ep_dist->endpoint, credits); - - ep_dist->credits += credits; - ep_dist->cred_assngd += credits; - cred_info->cur_free_credits -= credits; -} - -static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info, - struct list_head *ep_list, - int tot_credits) -{ - struct htc_endpoint_credit_dist *cur_ep_dist; - int count; - - ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit init total %d\n", tot_credits); - - cred_info->cur_free_credits = tot_credits; - cred_info->total_avail_credits = tot_credits; - - list_for_each_entry(cur_ep_dist, ep_list, list) { - if (cur_ep_dist->endpoint == ENDPOINT_0) - continue; - - cur_ep_dist->cred_min = cur_ep_dist->cred_per_msg; - - if (tot_credits > 4) { - if ((cur_ep_dist->svc_id == WMI_DATA_BK_SVC) || - (cur_ep_dist->svc_id == WMI_DATA_BE_SVC)) { - ath6kl_credit_deposit(cred_info, - cur_ep_dist, - cur_ep_dist->cred_min); - cur_ep_dist->dist_flags |= HTC_EP_ACTIVE; - } - } - - if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) { - ath6kl_credit_deposit(cred_info, cur_ep_dist, - cur_ep_dist->cred_min); - /* - * Control service is always marked active, it - * never goes inactive EVER. - */ - cur_ep_dist->dist_flags |= HTC_EP_ACTIVE; - } else if (cur_ep_dist->svc_id == WMI_DATA_BK_SVC) - /* this is the lowest priority data endpoint */ - /* FIXME: this looks fishy, check */ - cred_info->lowestpri_ep_dist = cur_ep_dist->list; - - /* - * Streams have to be created (explicit | implicit) for all - * kinds of traffic. BE endpoints are also inactive in the - * beginning. When BE traffic starts it creates implicit - * streams that redistributes credits. - * - * Note: all other endpoints have minimums set but are - * initially given NO credits. credits will be distributed - * as traffic activity demands - */ - } - - WARN_ON(cred_info->cur_free_credits <= 0); - - list_for_each_entry(cur_ep_dist, ep_list, list) { - if (cur_ep_dist->endpoint == ENDPOINT_0) - continue; - - if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) - cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg; - else { - /* - * For the remaining data endpoints, we assume that - * each cred_per_msg are the same. We use a simple - * calculation here, we take the remaining credits - * and determine how many max messages this can - * cover and then set each endpoint's normal value - * equal to 3/4 this amount. - */ - count = (cred_info->cur_free_credits / - cur_ep_dist->cred_per_msg) - * cur_ep_dist->cred_per_msg; - count = (count * 3) >> 2; - count = max(count, cur_ep_dist->cred_per_msg); - cur_ep_dist->cred_norm = count; - - } - - ath6kl_dbg(ATH6KL_DBG_CREDIT, - "credit ep %d svc_id %d credits %d per_msg %d norm %d min %d\n", - cur_ep_dist->endpoint, - cur_ep_dist->svc_id, - cur_ep_dist->credits, - cur_ep_dist->cred_per_msg, - cur_ep_dist->cred_norm, - cur_ep_dist->cred_min); - } -} - -/* initialize and setup credit distribution */ -int ath6kl_credit_setup(void *htc_handle, - struct ath6kl_htc_credit_info *cred_info) -{ - u16 servicepriority[5]; - - memset(cred_info, 0, sizeof(struct ath6kl_htc_credit_info)); - - servicepriority[0] = WMI_CONTROL_SVC; /* highest */ - servicepriority[1] = WMI_DATA_VO_SVC; - servicepriority[2] = WMI_DATA_VI_SVC; - servicepriority[3] = WMI_DATA_BE_SVC; - servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */ - - /* set priority list */ - ath6kl_htc_set_credit_dist(htc_handle, cred_info, servicepriority, 5); - - return 0; -} - -/* reduce an ep's credits back to a set limit */ -static void ath6kl_credit_reduce(struct ath6kl_htc_credit_info *cred_info, - struct htc_endpoint_credit_dist *ep_dist, - int limit) -{ - int credits; - - ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit reduce ep %d limit %d\n", - ep_dist->endpoint, limit); - - ep_dist->cred_assngd = limit; - - if (ep_dist->credits <= limit) - return; - - credits = ep_dist->credits - limit; - ep_dist->credits -= credits; - cred_info->cur_free_credits += credits; -} - -static void ath6kl_credit_update(struct ath6kl_htc_credit_info *cred_info, - struct list_head *epdist_list) -{ - struct htc_endpoint_credit_dist *cur_dist_list; - - list_for_each_entry(cur_dist_list, epdist_list, list) { - if (cur_dist_list->endpoint == ENDPOINT_0) - continue; - - if (cur_dist_list->cred_to_dist > 0) { - cur_dist_list->credits += - cur_dist_list->cred_to_dist; - cur_dist_list->cred_to_dist = 0; - if (cur_dist_list->credits > - cur_dist_list->cred_assngd) - ath6kl_credit_reduce(cred_info, - cur_dist_list, - cur_dist_list->cred_assngd); - - if (cur_dist_list->credits > - cur_dist_list->cred_norm) - ath6kl_credit_reduce(cred_info, cur_dist_list, - cur_dist_list->cred_norm); - - if (!(cur_dist_list->dist_flags & HTC_EP_ACTIVE)) { - if (cur_dist_list->txq_depth == 0) - ath6kl_credit_reduce(cred_info, - cur_dist_list, 0); - } - } - } -} - -/* - * HTC has an endpoint that needs credits, ep_dist is the endpoint in - * question. - */ -static void ath6kl_credit_seek(struct ath6kl_htc_credit_info *cred_info, - struct htc_endpoint_credit_dist *ep_dist) -{ - struct htc_endpoint_credit_dist *curdist_list; - int credits = 0; - int need; - - if (ep_dist->svc_id == WMI_CONTROL_SVC) - goto out; - - if ((ep_dist->svc_id == WMI_DATA_VI_SVC) || - (ep_dist->svc_id == WMI_DATA_VO_SVC)) - if ((ep_dist->cred_assngd >= ep_dist->cred_norm)) - goto out; - - /* - * For all other services, we follow a simple algorithm of: - * - * 1. checking the free pool for credits - * 2. checking lower priority endpoints for credits to take - */ - - credits = min(cred_info->cur_free_credits, ep_dist->seek_cred); - - if (credits >= ep_dist->seek_cred) - goto out; - - /* - * We don't have enough in the free pool, try taking away from - * lower priority services The rule for taking away credits: - * - * 1. Only take from lower priority endpoints - * 2. Only take what is allocated above the minimum (never - * starve an endpoint completely) - * 3. Only take what you need. - */ - - list_for_each_entry_reverse(curdist_list, - &cred_info->lowestpri_ep_dist, - list) { - if (curdist_list == ep_dist) - break; - - need = ep_dist->seek_cred - cred_info->cur_free_credits; - - if ((curdist_list->cred_assngd - need) >= - curdist_list->cred_min) { - /* - * The current one has been allocated more than - * it's minimum and it has enough credits assigned - * above it's minimum to fulfill our need try to - * take away just enough to fulfill our need. - */ - ath6kl_credit_reduce(cred_info, curdist_list, - curdist_list->cred_assngd - need); - - if (cred_info->cur_free_credits >= - ep_dist->seek_cred) - break; - } - - if (curdist_list->endpoint == ENDPOINT_0) - break; - } - - credits = min(cred_info->cur_free_credits, ep_dist->seek_cred); - -out: - /* did we find some credits? */ - if (credits) - ath6kl_credit_deposit(cred_info, ep_dist, credits); - - ep_dist->seek_cred = 0; -} - -/* redistribute credits based on activity change */ -static void ath6kl_credit_redistribute(struct ath6kl_htc_credit_info *info, - struct list_head *ep_dist_list) -{ - struct htc_endpoint_credit_dist *curdist_list; - - list_for_each_entry(curdist_list, ep_dist_list, list) { - if (curdist_list->endpoint == ENDPOINT_0) - continue; - - if ((curdist_list->svc_id == WMI_DATA_BK_SVC) || - (curdist_list->svc_id == WMI_DATA_BE_SVC)) - curdist_list->dist_flags |= HTC_EP_ACTIVE; - - if ((curdist_list->svc_id != WMI_CONTROL_SVC) && - !(curdist_list->dist_flags & HTC_EP_ACTIVE)) { - if (curdist_list->txq_depth == 0) - ath6kl_credit_reduce(info, curdist_list, 0); - else - ath6kl_credit_reduce(info, - curdist_list, - curdist_list->cred_min); - } - } -} - -/* - * - * This function is invoked whenever endpoints require credit - * distributions. A lock is held while this function is invoked, this - * function shall NOT block. The ep_dist_list is a list of distribution - * structures in prioritized order as defined by the call to the - * htc_set_credit_dist() api. - */ -static void ath6kl_credit_distribute(struct ath6kl_htc_credit_info *cred_info, - struct list_head *ep_dist_list, - enum htc_credit_dist_reason reason) -{ - switch (reason) { - case HTC_CREDIT_DIST_SEND_COMPLETE: - ath6kl_credit_update(cred_info, ep_dist_list); - break; - case HTC_CREDIT_DIST_ACTIVITY_CHANGE: - ath6kl_credit_redistribute(cred_info, ep_dist_list); - break; - default: - break; - } - - WARN_ON(cred_info->cur_free_credits > cred_info->total_avail_credits); - WARN_ON(cred_info->cur_free_credits < 0); -} - static void ath6kl_htc_tx_buf_align(u8 **buf, unsigned long len) { u8 *align_addr; @@ -410,12 +102,12 @@ static void htc_tx_comp_update(struct htc_target *target, packet->info.tx.cred_used; endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq); - ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx ctxt 0x%p dist 0x%p\n", - target->credit_info, &target->cred_dist_list); + ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n", + target->cred_dist_cntxt, &target->cred_dist_list); - ath6kl_credit_distribute(target->credit_info, - &target->cred_dist_list, - HTC_CREDIT_DIST_SEND_COMPLETE); + ath6k_credit_distribute(target->cred_dist_cntxt, + &target->cred_dist_list, + HTC_CREDIT_DIST_SEND_COMPLETE); spin_unlock_bh(&target->tx_lock); } @@ -426,8 +118,8 @@ static void htc_tx_complete(struct htc_endpoint *endpoint, if (list_empty(txq)) return; - ath6kl_dbg(ATH6KL_DBG_HTC, - "htc tx complete ep %d pkts %d\n", + ath6kl_dbg(ATH6KL_DBG_HTC_SEND, + "send complete ep %d, (%d pkts)\n", endpoint->eid, get_queue_depth(txq)); ath6kl_tx_complete(endpoint->target->dev->ar, txq); @@ -439,9 +131,6 @@ static void htc_tx_comp_handler(struct htc_target *target, struct htc_endpoint *endpoint = &target->endpoint[packet->endpoint]; struct list_head container; - ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx complete seqno %d\n", - packet->info.tx.seqno); - htc_tx_comp_update(target, endpoint, packet); INIT_LIST_HEAD(&container); list_add_tail(&packet->list, &container); @@ -459,8 +148,8 @@ static void htc_async_tx_scat_complete(struct htc_target *target, INIT_LIST_HEAD(&tx_compq); - ath6kl_dbg(ATH6KL_DBG_HTC, - "htc tx scat complete len %d entries %d\n", + ath6kl_dbg(ATH6KL_DBG_HTC_SEND, + "htc_async_tx_scat_complete total len: %d entries: %d\n", scat_req->len, scat_req->scat_entries); if (scat_req->status) @@ -501,13 +190,16 @@ static int ath6kl_htc_tx_issue(struct htc_target *target, send_len = packet->act_len + HTC_HDR_LENGTH; + ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "%s: transmit len : %d (%s)\n", + __func__, send_len, sync ? "sync" : "async"); + padded_len = CALC_TXRX_PADDED_LEN(target, send_len); - ath6kl_dbg(ATH6KL_DBG_HTC, - "htc tx issue len %d seqno %d padded_len %d mbox 0x%X %s\n", - send_len, packet->info.tx.seqno, padded_len, - target->dev->ar->mbox_info.htc_addr, - sync ? "sync" : "async"); + ath6kl_dbg(ATH6KL_DBG_HTC_SEND, + "DevSendPacket, padded len: %d mbox:0x%X (mode:%s)\n", + padded_len, + target->dev->ar->mbox_info.htc_addr, + sync ? "sync" : "async"); if (sync) { status = hif_read_write_sync(target->dev->ar, @@ -535,7 +227,7 @@ static int htc_check_credits(struct htc_target *target, *req_cred = (len > target->tgt_cred_sz) ? DIV_ROUND_UP(len, target->tgt_cred_sz) : 1; - ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit check need %d got %d\n", + ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "creds required:%d got:%d\n", *req_cred, ep->cred_dist.credits); if (ep->cred_dist.credits < *req_cred) { @@ -545,13 +237,16 @@ static int htc_check_credits(struct htc_target *target, /* Seek more credits */ ep->cred_dist.seek_cred = *req_cred - ep->cred_dist.credits; - ath6kl_credit_seek(target->credit_info, &ep->cred_dist); + ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n", + target->cred_dist_cntxt, &ep->cred_dist); + + ath6k_seek_credits(target->cred_dist_cntxt, &ep->cred_dist); ep->cred_dist.seek_cred = 0; if (ep->cred_dist.credits < *req_cred) { - ath6kl_dbg(ATH6KL_DBG_CREDIT, - "credit not found for ep %d\n", + ath6kl_dbg(ATH6KL_DBG_HTC_SEND, + "not enough credits for ep %d - leaving packet in queue\n", eid); return -EINVAL; } @@ -565,15 +260,17 @@ static int htc_check_credits(struct htc_target *target, ep->cred_dist.seek_cred = ep->cred_dist.cred_per_msg - ep->cred_dist.credits; - ath6kl_credit_seek(target->credit_info, &ep->cred_dist); + ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n", + target->cred_dist_cntxt, &ep->cred_dist); + + ath6k_seek_credits(target->cred_dist_cntxt, &ep->cred_dist); /* see if we were successful in getting more */ if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) { /* tell the target we need credits ASAP! */ *flags |= HTC_FLAGS_NEED_CREDIT_UPDATE; ep->ep_st.cred_low_indicate += 1; - ath6kl_dbg(ATH6KL_DBG_CREDIT, - "credit we need credits asap\n"); + ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "host needs credits\n"); } } @@ -598,8 +295,8 @@ static void ath6kl_htc_tx_pkts_get(struct htc_target *target, packet = list_first_entry(&endpoint->txq, struct htc_packet, list); - ath6kl_dbg(ATH6KL_DBG_HTC, - "htc tx got packet 0x%p queue depth %d\n", + ath6kl_dbg(ATH6KL_DBG_HTC_SEND, + "got head pkt:0x%p , queue depth: %d\n", packet, get_queue_depth(&endpoint->txq)); len = CALC_TXRX_PADDED_LEN(target, @@ -707,9 +404,9 @@ static int ath6kl_htc_tx_setup_scat_list(struct htc_target *target, scat_req->len += len; scat_req->scat_entries++; - ath6kl_dbg(ATH6KL_DBG_HTC, - "htc tx adding (%d) pkt 0x%p seqno %d len %d remaining %d\n", - i, packet, packet->info.tx.seqno, len, rem_scat); + ath6kl_dbg(ATH6KL_DBG_HTC_SEND, + "%d, adding pkt : 0x%p len:%d (remaining space:%d)\n", + i, packet, len, rem_scat); } /* Roll back scatter setup in case of any failure */ @@ -758,12 +455,12 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint, if (!scat_req) { /* no scatter resources */ - ath6kl_dbg(ATH6KL_DBG_HTC, - "htc tx no more scatter resources\n"); + ath6kl_dbg(ATH6KL_DBG_HTC_SEND, + "no more scatter resources\n"); break; } - ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx pkts to scatter: %d\n", + ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "pkts to scatter: %d\n", n_scat); scat_req->len = 0; @@ -782,10 +479,10 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint, n_sent_bundle++; tot_pkts_bundle += scat_req->scat_entries; - ath6kl_dbg(ATH6KL_DBG_HTC, - "htc tx scatter bytes %d entries %d\n", + ath6kl_dbg(ATH6KL_DBG_HTC_SEND, + "send scatter total bytes: %d , entries: %d\n", scat_req->len, scat_req->scat_entries); - ath6kl_hif_submit_scat_req(target->dev, scat_req, false); + ath6kldev_submit_scat_req(target->dev, scat_req, false); if (status) break; @@ -793,8 +490,8 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint, *sent_bundle = n_sent_bundle; *n_bundle_pkts = tot_pkts_bundle; - ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx bundle sent %d pkts\n", - n_sent_bundle); + ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "%s (sent:%d)\n", + __func__, n_sent_bundle); return; } @@ -813,7 +510,7 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target, if (endpoint->tx_proc_cnt > 1) { endpoint->tx_proc_cnt--; spin_unlock_bh(&target->tx_lock); - ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx busy\n"); + ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "htc_try_send (busy)\n"); return; } @@ -891,12 +588,15 @@ static bool ath6kl_htc_tx_try(struct htc_target *target, overflow = true; if (overflow) - ath6kl_dbg(ATH6KL_DBG_HTC, - "htc tx overflow ep %d depth %d max %d\n", - endpoint->eid, txq_depth, + ath6kl_dbg(ATH6KL_DBG_HTC_SEND, + "ep %d, tx queue will overflow :%d , tx depth:%d, max:%d\n", + endpoint->eid, overflow, txq_depth, endpoint->max_txq_depth); if (overflow && ep_cb.tx_full) { + ath6kl_dbg(ATH6KL_DBG_HTC_SEND, + "indicating overflowed tx packet: 0x%p\n", tx_pkt); + if (ep_cb.tx_full(endpoint->target, tx_pkt) == HTC_SEND_FULL_DROP) { endpoint->ep_st.tx_dropped += 1; @@ -925,12 +625,12 @@ static void htc_chk_ep_txq(struct htc_target *target) * are not modifying any state. */ list_for_each_entry(cred_dist, &target->cred_dist_list, list) { - endpoint = cred_dist->htc_ep; + endpoint = (struct htc_endpoint *)cred_dist->htc_rsvd; spin_lock_bh(&target->tx_lock); if (!list_empty(&endpoint->txq)) { - ath6kl_dbg(ATH6KL_DBG_HTC, - "htc creds ep %d credits %d pkts %d\n", + ath6kl_dbg(ATH6KL_DBG_HTC_SEND, + "ep %d has %d credits and %d packets in tx queue\n", cred_dist->endpoint, endpoint->cred_dist.credits, get_queue_depth(&endpoint->txq)); @@ -1004,13 +704,13 @@ static int htc_setup_tx_complete(struct htc_target *target) } void ath6kl_htc_set_credit_dist(struct htc_target *target, - struct ath6kl_htc_credit_info *credit_info, + struct htc_credit_state_info *cred_dist_cntxt, u16 srvc_pri_order[], int list_len) { struct htc_endpoint *endpoint; int i, ep; - target->credit_info = credit_info; + target->cred_dist_cntxt = cred_dist_cntxt; list_add_tail(&target->endpoint[ENDPOINT_0].cred_dist.list, &target->cred_dist_list); @@ -1036,8 +736,8 @@ int ath6kl_htc_tx(struct htc_target *target, struct htc_packet *packet) struct htc_endpoint *endpoint; struct list_head queue; - ath6kl_dbg(ATH6KL_DBG_HTC, - "htc tx ep id %d buf 0x%p len %d\n", + ath6kl_dbg(ATH6KL_DBG_HTC_SEND, + "htc_tx: ep id: %d, buf: 0x%p, len: %d\n", packet->endpoint, packet->buf, packet->act_len); if (packet->endpoint >= ENDPOINT_MAX) { @@ -1087,8 +787,8 @@ void ath6kl_htc_flush_txep(struct htc_target *target, list_for_each_entry_safe(packet, tmp_pkt, &discard_q, list) { packet->status = -ECANCELED; list_del(&packet->list); - ath6kl_dbg(ATH6KL_DBG_HTC, - "htc tx flushing pkt 0x%p len %d ep %d tag 0x%x\n", + ath6kl_dbg(ATH6KL_DBG_TRC, + "flushing tx pkt:0x%p, len:%d, ep:%d tag:0x%X\n", packet, packet->act_len, packet->endpoint, packet->info.tx.tag); @@ -1144,13 +844,12 @@ void ath6kl_htc_indicate_activity_change(struct htc_target *target, endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq); - ath6kl_dbg(ATH6KL_DBG_HTC, - "htc tx activity ctxt 0x%p dist 0x%p\n", - target->credit_info, &target->cred_dist_list); + ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n", + target->cred_dist_cntxt, &target->cred_dist_list); - ath6kl_credit_distribute(target->credit_info, - &target->cred_dist_list, - HTC_CREDIT_DIST_ACTIVITY_CHANGE); + ath6k_credit_distribute(target->cred_dist_cntxt, + &target->cred_dist_list, + HTC_CREDIT_DIST_ACTIVITY_CHANGE); } spin_unlock_bh(&target->tx_lock); @@ -1220,15 +919,15 @@ static int ath6kl_htc_rx_packet(struct htc_target *target, padded_len = CALC_TXRX_PADDED_LEN(target, rx_len); if (padded_len > packet->buf_len) { - ath6kl_err("not enough receive space for packet - padlen %d recvlen %d bufferlen %d\n", + ath6kl_err("not enough receive space for packet - padlen:%d recvlen:%d bufferlen:%d\n", padded_len, rx_len, packet->buf_len); return -ENOMEM; } - ath6kl_dbg(ATH6KL_DBG_HTC, - "htc rx 0x%p hdr x%x len %d mbox 0x%x\n", + ath6kl_dbg(ATH6KL_DBG_HTC_RECV, + "dev_rx_pkt (0x%p : hdr:0x%X) padded len: %d mbox:0x%X (mode:%s)\n", packet, packet->info.rx.exp_hdr, - padded_len, dev->ar->mbox_info.htc_addr); + padded_len, dev->ar->mbox_info.htc_addr, "sync"); status = hif_read_write_sync(dev->ar, dev->ar->mbox_info.htc_addr, @@ -1438,8 +1137,8 @@ static int ath6kl_htc_rx_alloc(struct htc_target *target, } endpoint->ep_st.rx_bundle_from_hdr += 1; - ath6kl_dbg(ATH6KL_DBG_HTC, - "htc rx bundle pkts %d\n", + ath6kl_dbg(ATH6KL_DBG_HTC_RECV, + "htc hdr indicates :%d msg can be fetched as a bundle\n", n_msg); } else /* HTC header only indicates 1 message to fetch */ @@ -1492,8 +1191,8 @@ static void htc_ctrl_rx(struct htc_target *context, struct htc_packet *packets) ath6kl_err("htc_ctrl_rx, got message with len:%zu\n", packets->act_len + HTC_HDR_LENGTH); - ath6kl_dbg_dump(ATH6KL_DBG_HTC, - "htc rx unexpected endpoint 0 message", "", + ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, + "Unexpected ENDPOINT 0 Message", "", packets->buf - HTC_HDR_LENGTH, packets->act_len + HTC_HDR_LENGTH); } @@ -1510,6 +1209,9 @@ static void htc_proc_cred_rpt(struct htc_target *target, int tot_credits = 0, i; bool dist = false; + ath6kl_dbg(ATH6KL_DBG_HTC_SEND, + "htc_proc_cred_rpt, credit report entries:%d\n", n_entries); + spin_lock_bh(&target->tx_lock); for (i = 0; i < n_entries; i++, rpt++) { @@ -1521,9 +1223,8 @@ static void htc_proc_cred_rpt(struct htc_target *target, endpoint = &target->endpoint[rpt->eid]; - ath6kl_dbg(ATH6KL_DBG_CREDIT, - "credit report ep %d credits %d\n", - rpt->eid, rpt->credits); + ath6kl_dbg(ATH6KL_DBG_HTC_SEND, " ep %d got %d credits\n", + rpt->eid, rpt->credits); endpoint->ep_st.tx_cred_rpt += 1; endpoint->ep_st.cred_retnd += rpt->credits; @@ -1563,14 +1264,21 @@ static void htc_proc_cred_rpt(struct htc_target *target, tot_credits += rpt->credits; } + ath6kl_dbg(ATH6KL_DBG_HTC_SEND, + "report indicated %d credits to distribute\n", + tot_credits); + if (dist) { /* * This was a credit return based on a completed send * operations note, this is done with the lock held */ - ath6kl_credit_distribute(target->credit_info, - &target->cred_dist_list, - HTC_CREDIT_DIST_SEND_COMPLETE); + ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n", + target->cred_dist_cntxt, &target->cred_dist_list); + + ath6k_credit_distribute(target->cred_dist_cntxt, + &target->cred_dist_list, + HTC_CREDIT_DIST_SEND_COMPLETE); } spin_unlock_bh(&target->tx_lock); @@ -1612,15 +1320,14 @@ static int htc_parse_trailer(struct htc_target *target, if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF)) && next_lk_ahds) { - ath6kl_dbg(ATH6KL_DBG_HTC, - "htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n", + ath6kl_dbg(ATH6KL_DBG_HTC_RECV, + "lk_ahd report found (pre valid:0x%X, post valid:0x%X)\n", lk_ahd->pre_valid, lk_ahd->post_valid); /* look ahead bytes are valid, copy them over */ memcpy((u8 *)&next_lk_ahds[0], lk_ahd->lk_ahd, 4); - ath6kl_dbg_dump(ATH6KL_DBG_HTC, - "htc rx next look ahead", + ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Next Look Ahead", "", next_lk_ahds, 4); *n_lk_ahds = 1; @@ -1639,7 +1346,7 @@ static int htc_parse_trailer(struct htc_target *target, bundle_lkahd_rpt = (struct htc_bundle_lkahd_rpt *) record_buf; - ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bundle lk_ahd", + ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Bundle lk_ahd", "", record_buf, record->len); for (i = 0; i < len; i++) { @@ -1671,8 +1378,10 @@ static int htc_proc_trailer(struct htc_target *target, u8 *record_buf; u8 *orig_buf; - ath6kl_dbg(ATH6KL_DBG_HTC, "htc rx trailer len %d\n", len); - ath6kl_dbg_dump(ATH6KL_DBG_HTC, NULL, "", buf, len); + ath6kl_dbg(ATH6KL_DBG_HTC_RECV, "+htc_proc_trailer (len:%d)\n", len); + + ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Recv Trailer", "", + buf, len); orig_buf = buf; orig_len = len; @@ -1709,7 +1418,7 @@ static int htc_proc_trailer(struct htc_target *target, } if (status) - ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad trailer", + ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "BAD Recv Trailer", "", orig_buf, orig_len); return status; @@ -1727,6 +1436,9 @@ static int ath6kl_htc_rx_process_hdr(struct htc_target *target, if (n_lkahds != NULL) *n_lkahds = 0; + ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "HTC Recv PKT", "htc ", + packet->buf, packet->act_len); + /* * NOTE: we cannot assume the alignment of buf, so we use the safe * macros to retrieve 16 bit fields. @@ -1768,9 +1480,9 @@ static int ath6kl_htc_rx_process_hdr(struct htc_target *target, if (lk_ahd != packet->info.rx.exp_hdr) { ath6kl_err("%s(): lk_ahd mismatch! (pPkt:0x%p flags:0x%X)\n", __func__, packet, packet->info.rx.rx_flags); - ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx expected lk_ahd", + ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Expected Message lk_ahd", "", &packet->info.rx.exp_hdr, 4); - ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx current header", + ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Current Frame Header", "", (u8 *)&lk_ahd, sizeof(lk_ahd)); status = -ENOMEM; goto fail_rx; @@ -1806,8 +1518,15 @@ static int ath6kl_htc_rx_process_hdr(struct htc_target *target, fail_rx: if (status) - ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad packet", - "", packet->buf, packet->act_len); + ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "BAD HTC Recv PKT", + "", packet->buf, + packet->act_len < 256 ? packet->act_len : 256); + else { + if (packet->act_len > 0) + ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, + "HTC - Application Msg", "", + packet->buf, packet->act_len); + } return status; } @@ -1815,8 +1534,8 @@ static int ath6kl_htc_rx_process_hdr(struct htc_target *target, static void ath6kl_htc_rx_complete(struct htc_endpoint *endpoint, struct htc_packet *packet) { - ath6kl_dbg(ATH6KL_DBG_HTC, - "htc rx complete ep %d packet 0x%p\n", + ath6kl_dbg(ATH6KL_DBG_HTC_RECV, + "htc calling ep %d recv callback on packet 0x%p\n", endpoint->eid, packet); endpoint->ep_cb.rx(endpoint->target, packet); } @@ -1852,9 +1571,9 @@ static int ath6kl_htc_rx_bundle(struct htc_target *target, len = 0; - ath6kl_dbg(ATH6KL_DBG_HTC, - "htc rx bundle depth %d pkts %d\n", - get_queue_depth(rxq), n_scat_pkt); + ath6kl_dbg(ATH6KL_DBG_HTC_RECV, + "%s(): (numpackets: %d , actual : %d)\n", + __func__, get_queue_depth(rxq), n_scat_pkt); scat_req = hif_scatter_req_get(target->dev->ar); @@ -1901,7 +1620,7 @@ static int ath6kl_htc_rx_bundle(struct htc_target *target, scat_req->len = len; scat_req->scat_entries = i; - status = ath6kl_hif_submit_scat_req(target->dev, scat_req, true); + status = ath6kldev_submit_scat_req(target->dev, scat_req, true); if (!status) *n_pkt_fetched = i; @@ -1924,6 +1643,7 @@ static int ath6kl_htc_rx_process_packets(struct htc_target *target, int status = 0; list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) { + list_del(&packet->list); ep = &target->endpoint[packet->endpoint]; /* process header for each of the recv packet */ @@ -1932,8 +1652,6 @@ static int ath6kl_htc_rx_process_packets(struct htc_target *target, if (status) return status; - list_del(&packet->list); - if (list_empty(comp_pktq)) { /* * Last packet's more packet flag is set @@ -1968,15 +1686,11 @@ static int ath6kl_htc_rx_fetch(struct htc_target *target, int fetched_pkts; bool part_bundle = false; int status = 0; - struct list_head tmp_rxq; - struct htc_packet *packet, *tmp_pkt; /* now go fetch the list of HTC packets */ while (!list_empty(rx_pktq)) { fetched_pkts = 0; - INIT_LIST_HEAD(&tmp_rxq); - if (target->rx_bndl_enable && (get_queue_depth(rx_pktq) > 1)) { /* * There are enough packets to attempt a @@ -1984,27 +1698,28 @@ static int ath6kl_htc_rx_fetch(struct htc_target *target, * allowed. */ status = ath6kl_htc_rx_bundle(target, rx_pktq, - &tmp_rxq, + comp_pktq, &fetched_pkts, part_bundle); if (status) - goto fail_rx; + return status; if (!list_empty(rx_pktq)) part_bundle = true; - - list_splice_tail_init(&tmp_rxq, comp_pktq); } if (!fetched_pkts) { + struct htc_packet *packet; packet = list_first_entry(rx_pktq, struct htc_packet, list); + list_del(&packet->list); + /* fully synchronous */ packet->completion = NULL; - if (!list_is_singular(rx_pktq)) + if (!list_empty(rx_pktq)) /* * look_aheads in all packet * except the last one in the @@ -2016,42 +1731,18 @@ static int ath6kl_htc_rx_fetch(struct htc_target *target, /* go fetch the packet */ status = ath6kl_htc_rx_packet(target, packet, packet->act_len); - - list_move_tail(&packet->list, &tmp_rxq); - if (status) - goto fail_rx; + return status; - list_splice_tail_init(&tmp_rxq, comp_pktq); + list_add_tail(&packet->list, comp_pktq); } } - return 0; - -fail_rx: - - /* - * Cleanup any packets we allocated but didn't use to - * actually fetch any packets. - */ - - list_for_each_entry_safe(packet, tmp_pkt, rx_pktq, list) { - list_del(&packet->list); - htc_reclaim_rxbuf(target, packet, - &target->endpoint[packet->endpoint]); - } - - list_for_each_entry_safe(packet, tmp_pkt, &tmp_rxq, list) { - list_del(&packet->list); - htc_reclaim_rxbuf(target, packet, - &target->endpoint[packet->endpoint]); - } - return status; } int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target, - u32 msg_look_ahead, int *num_pkts) + u32 msg_look_ahead[], int *num_pkts) { struct htc_packet *packets, *tmp_pkt; struct htc_endpoint *endpoint; @@ -2068,7 +1759,7 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target, * On first entry copy the look_aheads into our temp array for * processing */ - look_aheads[0] = msg_look_ahead; + memcpy(look_aheads, msg_look_ahead, sizeof(look_aheads)); while (true) { @@ -2136,6 +1827,15 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target, if (status) { ath6kl_err("failed to get pending recv messages: %d\n", status); + /* + * Cleanup any packets we allocated but didn't use to + * actually fetch any packets. + */ + list_for_each_entry_safe(packets, tmp_pkt, &rx_pktq, list) { + list_del(&packets->list); + htc_reclaim_rxbuf(target, packets, + &target->endpoint[packets->endpoint]); + } /* cleanup any packets in sync completion queue */ list_for_each_entry_safe(packets, tmp_pkt, &comp_pktq, list) { @@ -2146,7 +1846,7 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target, if (target->htc_flags & HTC_OP_STATE_STOPPING) { ath6kl_warn("host is going to stop blocking receiver for htc_stop\n"); - ath6kl_hif_rx_control(target->dev, false); + ath6kldev_rx_control(target->dev, false); } } @@ -2156,7 +1856,7 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target, */ if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) { ath6kl_warn("host has no rx buffers blocking receiver to prevent overrun\n"); - ath6kl_hif_rx_control(target->dev, false); + ath6kldev_rx_control(target->dev, false); } *num_pkts = n_fetched; @@ -2174,12 +1874,12 @@ static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target) struct htc_frame_hdr *htc_hdr; u32 look_ahead; - if (ath6kl_hif_poll_mboxmsg_rx(target->dev, &look_ahead, + if (ath6kldev_poll_mboxmsg_rx(target->dev, &look_ahead, HTC_TARGET_RESPONSE_TIMEOUT)) return NULL; - ath6kl_dbg(ATH6KL_DBG_HTC, - "htc rx wait ctrl look_ahead 0x%X\n", look_ahead); + ath6kl_dbg(ATH6KL_DBG_HTC_RECV, + "htc_wait_for_ctrl_msg: look_ahead : 0x%X\n", look_ahead); htc_hdr = (struct htc_frame_hdr *)&look_ahead; @@ -2243,8 +1943,8 @@ int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target, depth = get_queue_depth(pkt_queue); - ath6kl_dbg(ATH6KL_DBG_HTC, - "htc rx add multiple ep id %d cnt %d len %d\n", + ath6kl_dbg(ATH6KL_DBG_HTC_RECV, + "htc_add_rxbuf_multiple: ep id: %d, cnt:%d, len: %d\n", first_pkt->endpoint, depth, first_pkt->buf_len); endpoint = &target->endpoint[first_pkt->endpoint]; @@ -2269,8 +1969,8 @@ int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target, /* check if we are blocked waiting for a new buffer */ if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) { if (target->ep_waiting == first_pkt->endpoint) { - ath6kl_dbg(ATH6KL_DBG_HTC, - "htc rx blocked on ep %d, unblocking\n", + ath6kl_dbg(ATH6KL_DBG_HTC_RECV, + "receiver was blocked on ep:%d, unblocking.\n", target->ep_waiting); target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS; target->ep_waiting = ENDPOINT_MAX; @@ -2282,7 +1982,7 @@ int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target, if (rx_unblock && !(target->htc_flags & HTC_OP_STATE_STOPPING)) /* TODO : implement a buffer threshold count? */ - ath6kl_hif_rx_control(target->dev, true); + ath6kldev_rx_control(target->dev, true); return status; } @@ -2304,8 +2004,8 @@ void ath6kl_htc_flush_rx_buf(struct htc_target *target) &endpoint->rx_bufq, list) { list_del(&packet->list); spin_unlock_bh(&target->rx_lock); - ath6kl_dbg(ATH6KL_DBG_HTC, - "htc rx flush pkt 0x%p len %d ep %d\n", + ath6kl_dbg(ATH6KL_DBG_HTC_RECV, + "flushing rx pkt:0x%p, len:%d, ep:%d\n", packet, packet->buf_len, packet->endpoint); dev_kfree_skb(packet->pkt_cntxt); @@ -2328,8 +2028,8 @@ int ath6kl_htc_conn_service(struct htc_target *target, unsigned int max_msg_sz = 0; int status = 0; - ath6kl_dbg(ATH6KL_DBG_HTC, - "htc connect service target 0x%p service id 0x%x\n", + ath6kl_dbg(ATH6KL_DBG_TRC, + "htc_conn_service, target:0x%p service id:0x%X\n", target, conn_req->svc_id); if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) { @@ -2415,7 +2115,7 @@ int ath6kl_htc_conn_service(struct htc_target *target, endpoint->len_max = max_msg_sz; endpoint->ep_cb = conn_req->ep_cb; endpoint->cred_dist.svc_id = conn_req->svc_id; - endpoint->cred_dist.htc_ep = endpoint; + endpoint->cred_dist.htc_rsvd = endpoint; endpoint->cred_dist.endpoint = assigned_ep; endpoint->cred_dist.cred_sz = target->tgt_cred_sz; @@ -2472,7 +2172,6 @@ static void reset_ep_state(struct htc_target *target) } /* reset distribution list */ - /* FIXME: free existing entries */ INIT_LIST_HEAD(&target->cred_dist_list); } @@ -2502,8 +2201,8 @@ static void htc_setup_msg_bndl(struct htc_target *target) target->msg_per_bndl_max = min(target->max_scat_entries, target->msg_per_bndl_max); - ath6kl_dbg(ATH6KL_DBG_BOOT, - "htc bundling allowed msg_per_bndl_max %d\n", + ath6kl_dbg(ATH6KL_DBG_TRC, + "htc bundling allowed. max msg per htc bundle: %d\n", target->msg_per_bndl_max); /* Max rx bundle size is limited by the max tx bundle size */ @@ -2512,7 +2211,7 @@ static void htc_setup_msg_bndl(struct htc_target *target) target->max_tx_bndl_sz = min(HIF_MBOX0_EXT_WIDTH, target->max_xfer_szper_scatreq); - ath6kl_dbg(ATH6KL_DBG_BOOT, "htc max_rx_bndl_sz %d max_tx_bndl_sz %d\n", + ath6kl_dbg(ATH6KL_DBG_ANY, "max recv: %d max send: %d\n", target->max_rx_bndl_sz, target->max_tx_bndl_sz); if (target->max_tx_bndl_sz) @@ -2566,8 +2265,8 @@ int ath6kl_htc_wait_target(struct htc_target *target) target->tgt_creds = le16_to_cpu(rdy_msg->ver2_0_info.cred_cnt); target->tgt_cred_sz = le16_to_cpu(rdy_msg->ver2_0_info.cred_sz); - ath6kl_dbg(ATH6KL_DBG_BOOT, - "htc target ready credits %d size %d\n", + ath6kl_dbg(ATH6KL_DBG_HTC_RECV, + "target ready: credits: %d credit size: %d\n", target->tgt_creds, target->tgt_cred_sz); /* check if this is an extended ready message */ @@ -2581,7 +2280,7 @@ int ath6kl_htc_wait_target(struct htc_target *target) target->msg_per_bndl_max = 0; } - ath6kl_dbg(ATH6KL_DBG_BOOT, "htc using protocol %s (%d)\n", + ath6kl_dbg(ATH6KL_DBG_TRC, "using htc protocol version : %s (%d)\n", (target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1", target->htc_tgt_ver); @@ -2601,10 +2300,6 @@ int ath6kl_htc_wait_target(struct htc_target *target) status = ath6kl_htc_conn_service((void *)target, &connect, &resp); if (status) - /* - * FIXME: this call doesn't make sense, the caller should - * call ath6kl_htc_cleanup() when it wants remove htc - */ ath6kl_hif_cleanup_scatter(target->dev->ar); fail_wait_target: @@ -2625,11 +2320,8 @@ int ath6kl_htc_start(struct htc_target *target) struct htc_packet *packet; int status; - memset(&target->dev->irq_proc_reg, 0, - sizeof(target->dev->irq_proc_reg)); - /* Disable interrupts at the chip level */ - ath6kl_hif_disable_intrs(target->dev); + ath6kldev_disable_intrs(target->dev); target->htc_flags = 0; target->rx_st_flags = 0; @@ -2642,8 +2334,8 @@ int ath6kl_htc_start(struct htc_target *target) } /* NOTE: the first entry in the distribution list is ENDPOINT_0 */ - ath6kl_credit_init(target->credit_info, &target->cred_dist_list, - target->tgt_creds); + ath6k_credit_init(target->cred_dist_cntxt, &target->cred_dist_list, + target->tgt_creds); dump_cred_dist_stats(target); @@ -2654,7 +2346,7 @@ int ath6kl_htc_start(struct htc_target *target) return status; /* unmask interrupts */ - status = ath6kl_hif_unmask_intrs(target->dev); + status = ath6kldev_unmask_intrs(target->dev); if (status) ath6kl_htc_stop(target); @@ -2662,44 +2354,6 @@ int ath6kl_htc_start(struct htc_target *target) return status; } -static int ath6kl_htc_reset(struct htc_target *target) -{ - u32 block_size, ctrl_bufsz; - struct htc_packet *packet; - int i; - - reset_ep_state(target); - - block_size = target->dev->ar->mbox_info.block_size; - - ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ? - (block_size + HTC_HDR_LENGTH) : - (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH); - - for (i = 0; i < NUM_CONTROL_BUFFERS; i++) { - packet = kzalloc(sizeof(*packet), GFP_KERNEL); - if (!packet) - return -ENOMEM; - - packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL); - if (!packet->buf_start) { - kfree(packet); - return -ENOMEM; - } - - packet->buf_len = ctrl_bufsz; - if (i < NUM_CONTROL_RX_BUFFERS) { - packet->act_len = 0; - packet->buf = packet->buf_start; - packet->endpoint = ENDPOINT_0; - list_add_tail(&packet->list, &target->free_ctrl_rxbuf); - } else - list_add_tail(&packet->list, &target->free_ctrl_txbuf); - } - - return 0; -} - /* htc_stop: stop interrupt reception, and flush all queued buffers */ void ath6kl_htc_stop(struct htc_target *target) { @@ -2712,19 +2366,21 @@ void ath6kl_htc_stop(struct htc_target *target) * function returns all pending HIF I/O has completed, we can * safely flush the queues. */ - ath6kl_hif_mask_intrs(target->dev); + ath6kldev_mask_intrs(target->dev); ath6kl_htc_flush_txep_all(target); ath6kl_htc_flush_rx_buf(target); - ath6kl_htc_reset(target); + reset_ep_state(target); } void *ath6kl_htc_create(struct ath6kl *ar) { struct htc_target *target = NULL; - int status = 0; + struct htc_packet *packet; + int status = 0, i = 0; + u32 block_size, ctrl_bufsz; target = kzalloc(sizeof(*target), GFP_KERNEL); if (!target) { @@ -2736,7 +2392,7 @@ void *ath6kl_htc_create(struct ath6kl *ar) if (!target->dev) { ath6kl_err("unable to allocate memory\n"); status = -ENOMEM; - goto err_htc_cleanup; + goto fail_create_htc; } spin_lock_init(&target->htc_lock); @@ -2751,20 +2407,49 @@ void *ath6kl_htc_create(struct ath6kl *ar) target->dev->htc_cnxt = target; target->ep_waiting = ENDPOINT_MAX; - status = ath6kl_hif_setup(target->dev); - if (status) - goto err_htc_cleanup; + reset_ep_state(target); + + status = ath6kldev_setup(target->dev); - status = ath6kl_htc_reset(target); if (status) - goto err_htc_cleanup; + goto fail_create_htc; - return target; + block_size = ar->mbox_info.block_size; + + ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ? + (block_size + HTC_HDR_LENGTH) : + (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH); + + for (i = 0; i < NUM_CONTROL_BUFFERS; i++) { + packet = kzalloc(sizeof(*packet), GFP_KERNEL); + if (!packet) + break; -err_htc_cleanup: - ath6kl_htc_cleanup(target); + packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL); + if (!packet->buf_start) { + kfree(packet); + break; + } - return NULL; + packet->buf_len = ctrl_bufsz; + if (i < NUM_CONTROL_RX_BUFFERS) { + packet->act_len = 0; + packet->buf = packet->buf_start; + packet->endpoint = ENDPOINT_0; + list_add_tail(&packet->list, &target->free_ctrl_rxbuf); + } else + list_add_tail(&packet->list, &target->free_ctrl_txbuf); + } + +fail_create_htc: + if (i != NUM_CONTROL_BUFFERS || status) { + if (target) { + ath6kl_htc_cleanup(target); + target = NULL; + } + } + + return target; } /* cleanup the HTC instance */ diff --git a/trunk/drivers/net/wireless/ath/ath6kl/htc.h b/trunk/drivers/net/wireless/ath/ath6kl/htc.h index 57672e1ed1a6..8ce0c2c07ded 100644 --- a/trunk/drivers/net/wireless/ath/ath6kl/htc.h +++ b/trunk/drivers/net/wireless/ath/ath6kl/htc.h @@ -393,7 +393,7 @@ struct htc_endpoint_credit_dist { int cred_per_msg; /* reserved for HTC use */ - struct htc_endpoint *htc_ep; + void *htc_rsvd; /* * current depth of TX queue , i.e. messages waiting for credits @@ -414,11 +414,9 @@ enum htc_credit_dist_reason { HTC_CREDIT_DIST_SEEK_CREDITS, }; -struct ath6kl_htc_credit_info { +struct htc_credit_state_info { int total_avail_credits; int cur_free_credits; - - /* list of lowest priority endpoints */ struct list_head lowestpri_ep_dist; }; @@ -510,13 +508,10 @@ struct ath6kl_device; /* our HTC target state */ struct htc_target { struct htc_endpoint endpoint[ENDPOINT_MAX]; - - /* contains struct htc_endpoint_credit_dist */ struct list_head cred_dist_list; - struct list_head free_ctrl_txbuf; struct list_head free_ctrl_rxbuf; - struct ath6kl_htc_credit_info *credit_info; + struct htc_credit_state_info *cred_dist_cntxt; int tgt_creds; unsigned int tgt_cred_sz; spinlock_t htc_lock; @@ -547,7 +542,7 @@ struct htc_target { void *ath6kl_htc_create(struct ath6kl *ar); void ath6kl_htc_set_credit_dist(struct htc_target *target, - struct ath6kl_htc_credit_info *cred_info, + struct htc_credit_state_info *cred_info, u16 svc_pri_order[], int len); int ath6kl_htc_wait_target(struct htc_target *target); int ath6kl_htc_start(struct htc_target *target); @@ -568,10 +563,7 @@ int ath6kl_htc_get_rxbuf_num(struct htc_target *target, int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target, struct list_head *pktq); int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target, - u32 msg_look_ahead, int *n_pkts); - -int ath6kl_credit_setup(void *htc_handle, - struct ath6kl_htc_credit_info *cred_info); + u32 msg_look_ahead[], int *n_pkts); static inline void set_htc_pkt_info(struct htc_packet *packet, void *context, u8 *buf, unsigned int len, diff --git a/trunk/drivers/net/wireless/ath/ath6kl/hif.c b/trunk/drivers/net/wireless/ath/ath6kl/htc_hif.c similarity index 80% rename from trunk/drivers/net/wireless/ath/ath6kl/hif.c rename to trunk/drivers/net/wireless/ath/ath6kl/htc_hif.c index e57da35e59fa..86b1cc7409c2 100644 --- a/trunk/drivers/net/wireless/ath/ath6kl/hif.c +++ b/trunk/drivers/net/wireless/ath/ath6kl/htc_hif.c @@ -13,19 +13,18 @@ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#include "hif.h" #include "core.h" #include "target.h" #include "hif-ops.h" +#include "htc_hif.h" #include "debug.h" #define MAILBOX_FOR_BLOCK_SIZE 1 #define ATH6KL_TIME_QUANTUM 10 /* in ms */ -static int ath6kl_hif_cp_scat_dma_buf(struct hif_scatter_req *req, - bool from_dma) +static int ath6kldev_cp_scat_dma_buf(struct hif_scatter_req *req, bool from_dma) { u8 *buf; int i; @@ -47,11 +46,12 @@ static int ath6kl_hif_cp_scat_dma_buf(struct hif_scatter_req *req, return 0; } -int ath6kl_hif_rw_comp_handler(void *context, int status) +int ath6kldev_rw_comp_handler(void *context, int status) { struct htc_packet *packet = context; - ath6kl_dbg(ATH6KL_DBG_HIF, "hif rw completion pkt 0x%p status %d\n", + ath6kl_dbg(ATH6KL_DBG_HTC_RECV, + "ath6kldev_rw_comp_handler (pkt:0x%p , status: %d\n", packet, status); packet->status = status; @@ -59,83 +59,30 @@ int ath6kl_hif_rw_comp_handler(void *context, int status) return 0; } -#define REG_DUMP_COUNT_AR6003 60 -#define REGISTER_DUMP_LEN_MAX 60 -static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar) -{ - __le32 regdump_val[REGISTER_DUMP_LEN_MAX]; - u32 i, address, regdump_addr = 0; - int ret; - - if (ar->target_type != TARGET_TYPE_AR6003) - return; - - /* the reg dump pointer is copied to the host interest area */ - address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_failure_state)); - address = TARG_VTOP(ar->target_type, address); - - /* read RAM location through diagnostic window */ - ret = ath6kl_diag_read32(ar, address, ®dump_addr); - - if (ret || !regdump_addr) { - ath6kl_warn("failed to get ptr to register dump area: %d\n", - ret); - return; - } - - ath6kl_dbg(ATH6KL_DBG_IRQ, "register dump data address 0x%x\n", - regdump_addr); - regdump_addr = TARG_VTOP(ar->target_type, regdump_addr); - - /* fetch register dump data */ - ret = ath6kl_diag_read(ar, regdump_addr, (u8 *)®dump_val[0], - REG_DUMP_COUNT_AR6003 * (sizeof(u32))); - if (ret) { - ath6kl_warn("failed to get register dump: %d\n", ret); - return; - } - - ath6kl_info("crash dump:\n"); - ath6kl_info("hw 0x%x fw %s\n", ar->wiphy->hw_version, - ar->wiphy->fw_version); - - BUILD_BUG_ON(REG_DUMP_COUNT_AR6003 % 4); - - for (i = 0; i < REG_DUMP_COUNT_AR6003 / 4; i++) { - ath6kl_info("%d: 0x%8.8x 0x%8.8x 0x%8.8x 0x%8.8x\n", - 4 * i, - le32_to_cpu(regdump_val[i]), - le32_to_cpu(regdump_val[i + 1]), - le32_to_cpu(regdump_val[i + 2]), - le32_to_cpu(regdump_val[i + 3])); - } - -} - -static int ath6kl_hif_proc_dbg_intr(struct ath6kl_device *dev) +static int ath6kldev_proc_dbg_intr(struct ath6kl_device *dev) { u32 dummy; - int ret; + int status; + + ath6kl_err("target debug interrupt\n"); - ath6kl_warn("firmware crashed\n"); + ath6kl_target_failure(dev->ar); /* * read counter to clear the interrupt, the debug error interrupt is * counter 0. */ - ret = hif_read_write_sync(dev->ar, COUNT_DEC_ADDRESS, + status = hif_read_write_sync(dev->ar, COUNT_DEC_ADDRESS, (u8 *)&dummy, 4, HIF_RD_SYNC_BYTE_INC); - if (ret) - ath6kl_warn("Failed to clear debug interrupt: %d\n", ret); - - ath6kl_hif_dump_fw_crash(dev->ar); + if (status) + WARN_ON(1); - return ret; + return status; } /* mailbox recv message polling */ -int ath6kl_hif_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd, +int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd, int timeout) { struct ath6kl_irq_proc_registers *rg; @@ -171,7 +118,7 @@ int ath6kl_hif_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd, /* delay a little */ mdelay(ATH6KL_TIME_QUANTUM); - ath6kl_dbg(ATH6KL_DBG_HIF, "hif retry mbox poll try %d\n", i); + ath6kl_dbg(ATH6KL_DBG_HTC_RECV, "retry mbox poll : %d\n", i); } if (i == 0) { @@ -184,7 +131,7 @@ int ath6kl_hif_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd, * Target failure handler will be called in case of * an assert. */ - ath6kl_hif_proc_dbg_intr(dev); + ath6kldev_proc_dbg_intr(dev); } return status; @@ -194,14 +141,11 @@ int ath6kl_hif_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd, * Disable packet reception (used in case the host runs out of buffers) * using the interrupt enable registers through the host I/F */ -int ath6kl_hif_rx_control(struct ath6kl_device *dev, bool enable_rx) +int ath6kldev_rx_control(struct ath6kl_device *dev, bool enable_rx) { struct ath6kl_irq_enable_reg regs; int status = 0; - ath6kl_dbg(ATH6KL_DBG_HIF, "hif rx %s\n", - enable_rx ? "enable" : "disable"); - /* take the lock to protect interrupt enable shadows */ spin_lock_bh(&dev->lock); @@ -224,7 +168,7 @@ int ath6kl_hif_rx_control(struct ath6kl_device *dev, bool enable_rx) return status; } -int ath6kl_hif_submit_scat_req(struct ath6kl_device *dev, +int ath6kldev_submit_scat_req(struct ath6kl_device *dev, struct hif_scatter_req *scat_req, bool read) { int status = 0; @@ -241,14 +185,14 @@ int ath6kl_hif_submit_scat_req(struct ath6kl_device *dev, dev->ar->mbox_info.htc_addr; } - ath6kl_dbg(ATH6KL_DBG_HIF, - "hif submit scatter request entries %d len %d mbox 0x%x %s %s\n", + ath6kl_dbg((ATH6KL_DBG_HTC_RECV | ATH6KL_DBG_HTC_SEND), + "ath6kldev_submit_scat_req, entries: %d, total len: %d mbox:0x%X (mode: %s : %s)\n", scat_req->scat_entries, scat_req->len, scat_req->addr, !read ? "async" : "sync", (read) ? "rd" : "wr"); if (!read && scat_req->virt_scat) { - status = ath6kl_hif_cp_scat_dma_buf(scat_req, false); + status = ath6kldev_cp_scat_dma_buf(scat_req, false); if (status) { scat_req->status = status; scat_req->complete(dev->ar->htc_target, scat_req); @@ -263,13 +207,13 @@ int ath6kl_hif_submit_scat_req(struct ath6kl_device *dev, scat_req->status = status; if (!status && scat_req->virt_scat) scat_req->status = - ath6kl_hif_cp_scat_dma_buf(scat_req, true); + ath6kldev_cp_scat_dma_buf(scat_req, true); } return status; } -static int ath6kl_hif_proc_counter_intr(struct ath6kl_device *dev) +static int ath6kldev_proc_counter_intr(struct ath6kl_device *dev) { u8 counter_int_status; @@ -288,12 +232,12 @@ static int ath6kl_hif_proc_counter_intr(struct ath6kl_device *dev) * the debug assertion counter interrupt. */ if (counter_int_status & ATH6KL_TARGET_DEBUG_INTR_MASK) - return ath6kl_hif_proc_dbg_intr(dev); + return ath6kldev_proc_dbg_intr(dev); return 0; } -static int ath6kl_hif_proc_err_intr(struct ath6kl_device *dev) +static int ath6kldev_proc_err_intr(struct ath6kl_device *dev) { int status; u8 error_int_status; @@ -338,7 +282,7 @@ static int ath6kl_hif_proc_err_intr(struct ath6kl_device *dev) return status; } -static int ath6kl_hif_proc_cpu_intr(struct ath6kl_device *dev) +static int ath6kldev_proc_cpu_intr(struct ath6kl_device *dev) { int status; u8 cpu_int_status; @@ -473,7 +417,7 @@ static int proc_pending_irqs(struct ath6kl_device *dev, bool *done) * we rapidly pull packets. */ status = ath6kl_htc_rxmsg_pending_handler(dev->htc_cnxt, - lk_ahd, &fetched); + &lk_ahd, &fetched); if (status) goto out; @@ -492,21 +436,21 @@ static int proc_pending_irqs(struct ath6kl_device *dev, bool *done) if (MS(HOST_INT_STATUS_CPU, host_int_status)) { /* CPU Interrupt */ - status = ath6kl_hif_proc_cpu_intr(dev); + status = ath6kldev_proc_cpu_intr(dev); if (status) goto out; } if (MS(HOST_INT_STATUS_ERROR, host_int_status)) { /* Error Interrupt */ - status = ath6kl_hif_proc_err_intr(dev); + status = ath6kldev_proc_err_intr(dev); if (status) goto out; } if (MS(HOST_INT_STATUS_COUNTER, host_int_status)) /* Counter Interrupt */ - status = ath6kl_hif_proc_counter_intr(dev); + status = ath6kldev_proc_counter_intr(dev); out: /* @@ -535,10 +479,9 @@ static int proc_pending_irqs(struct ath6kl_device *dev, bool *done) } /* interrupt handler, kicks off all interrupt processing */ -int ath6kl_hif_intr_bh_handler(struct ath6kl *ar) +int ath6kldev_intr_bh_handler(struct ath6kl *ar) { struct ath6kl_device *dev = ar->htc_target->dev; - unsigned long timeout; int status = 0; bool done = false; @@ -552,8 +495,7 @@ int ath6kl_hif_intr_bh_handler(struct ath6kl *ar) * IRQ processing is synchronous, interrupt status registers can be * re-read. */ - timeout = jiffies + msecs_to_jiffies(ATH6KL_HIF_COMMUNICATION_TIMEOUT); - while (time_before(jiffies, timeout) && !done) { + while (!done) { status = proc_pending_irqs(dev, &done); if (status) break; @@ -562,7 +504,7 @@ int ath6kl_hif_intr_bh_handler(struct ath6kl *ar) return status; } -static int ath6kl_hif_enable_intrs(struct ath6kl_device *dev) +static int ath6kldev_enable_intrs(struct ath6kl_device *dev) { struct ath6kl_irq_enable_reg regs; int status; @@ -610,7 +552,7 @@ static int ath6kl_hif_enable_intrs(struct ath6kl_device *dev) return status; } -int ath6kl_hif_disable_intrs(struct ath6kl_device *dev) +int ath6kldev_disable_intrs(struct ath6kl_device *dev) { struct ath6kl_irq_enable_reg regs; @@ -629,7 +571,7 @@ int ath6kl_hif_disable_intrs(struct ath6kl_device *dev) } /* enable device interrupts */ -int ath6kl_hif_unmask_intrs(struct ath6kl_device *dev) +int ath6kldev_unmask_intrs(struct ath6kl_device *dev) { int status = 0; @@ -641,29 +583,29 @@ int ath6kl_hif_unmask_intrs(struct ath6kl_device *dev) * target "soft" resets. The ATH6KL interrupt enables reset back to an * "enabled" state when this happens. */ - ath6kl_hif_disable_intrs(dev); + ath6kldev_disable_intrs(dev); /* unmask the host controller interrupts */ ath6kl_hif_irq_enable(dev->ar); - status = ath6kl_hif_enable_intrs(dev); + status = ath6kldev_enable_intrs(dev); return status; } /* disable all device interrupts */ -int ath6kl_hif_mask_intrs(struct ath6kl_device *dev) +int ath6kldev_mask_intrs(struct ath6kl_device *dev) { /* * Mask the interrupt at the HIF layer to avoid any stray interrupt * taken while we zero out our shadow registers in - * ath6kl_hif_disable_intrs(). + * ath6kldev_disable_intrs(). */ ath6kl_hif_irq_disable(dev->ar); - return ath6kl_hif_disable_intrs(dev); + return ath6kldev_disable_intrs(dev); } -int ath6kl_hif_setup(struct ath6kl_device *dev) +int ath6kldev_setup(struct ath6kl_device *dev) { int status = 0; @@ -679,17 +621,19 @@ int ath6kl_hif_setup(struct ath6kl_device *dev) /* must be a power of 2 */ if ((dev->htc_cnxt->block_sz & (dev->htc_cnxt->block_sz - 1)) != 0) { WARN_ON(1); - status = -EINVAL; goto fail_setup; } /* assemble mask, used for padding to a block */ dev->htc_cnxt->block_mask = dev->htc_cnxt->block_sz - 1; - ath6kl_dbg(ATH6KL_DBG_HIF, "hif block size %d mbox addr 0x%x\n", + ath6kl_dbg(ATH6KL_DBG_TRC, "block size: %d, mbox addr:0x%X\n", dev->htc_cnxt->block_sz, dev->ar->mbox_info.htc_addr); - status = ath6kl_hif_disable_intrs(dev); + ath6kl_dbg(ATH6KL_DBG_TRC, + "hif interrupt processing is sync only\n"); + + status = ath6kldev_disable_intrs(dev); fail_setup: return status; diff --git a/trunk/drivers/net/wireless/ath/ath6kl/htc_hif.h b/trunk/drivers/net/wireless/ath/ath6kl/htc_hif.h new file mode 100644 index 000000000000..171ad63d89b0 --- /dev/null +++ b/trunk/drivers/net/wireless/ath/ath6kl/htc_hif.h @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2007-2011 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef HTC_HIF_H +#define HTC_HIF_H + +#include "htc.h" +#include "hif.h" + +#define ATH6KL_MAILBOXES 4 + +/* HTC runs over mailbox 0 */ +#define HTC_MAILBOX 0 + +#define ATH6KL_TARGET_DEBUG_INTR_MASK 0x01 + +#define OTHER_INTS_ENABLED (INT_STATUS_ENABLE_ERROR_MASK | \ + INT_STATUS_ENABLE_CPU_MASK | \ + INT_STATUS_ENABLE_COUNTER_MASK) + +#define ATH6KL_REG_IO_BUFFER_SIZE 32 +#define ATH6KL_MAX_REG_IO_BUFFERS 8 +#define ATH6KL_SCATTER_ENTRIES_PER_REQ 16 +#define ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER (16 * 1024) +#define ATH6KL_SCATTER_REQS 4 + +#ifndef A_CACHE_LINE_PAD +#define A_CACHE_LINE_PAD 128 +#endif +#define ATH6KL_MIN_SCATTER_ENTRIES_PER_REQ 2 +#define ATH6KL_MIN_TRANSFER_SIZE_PER_SCATTER (4 * 1024) + +struct ath6kl_irq_proc_registers { + u8 host_int_status; + u8 cpu_int_status; + u8 error_int_status; + u8 counter_int_status; + u8 mbox_frame; + u8 rx_lkahd_valid; + u8 host_int_status2; + u8 gmbox_rx_avail; + __le32 rx_lkahd[2]; + __le32 rx_gmbox_lkahd_alias[2]; +} __packed; + +struct ath6kl_irq_enable_reg { + u8 int_status_en; + u8 cpu_int_status_en; + u8 err_int_status_en; + u8 cntr_int_status_en; +} __packed; + +struct ath6kl_device { + spinlock_t lock; + u8 pad1[A_CACHE_LINE_PAD]; + struct ath6kl_irq_proc_registers irq_proc_reg; + u8 pad2[A_CACHE_LINE_PAD]; + struct ath6kl_irq_enable_reg irq_en_reg; + u8 pad3[A_CACHE_LINE_PAD]; + struct htc_target *htc_cnxt; + struct ath6kl *ar; +}; + +int ath6kldev_setup(struct ath6kl_device *dev); +int ath6kldev_unmask_intrs(struct ath6kl_device *dev); +int ath6kldev_mask_intrs(struct ath6kl_device *dev); +int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, + u32 *lk_ahd, int timeout); +int ath6kldev_rx_control(struct ath6kl_device *dev, bool enable_rx); +int ath6kldev_disable_intrs(struct ath6kl_device *dev); + +int ath6kldev_rw_comp_handler(void *context, int status); +int ath6kldev_intr_bh_handler(struct ath6kl *ar); + +/* Scatter Function and Definitions */ +int ath6kldev_submit_scat_req(struct ath6kl_device *dev, + struct hif_scatter_req *scat_req, bool read); + +#endif /*ATH6KL_H_ */ diff --git a/trunk/drivers/net/wireless/ath/ath6kl/init.c b/trunk/drivers/net/wireless/ath/ath6kl/init.c index 7f55be3092d1..c1d2366704b5 100644 --- a/trunk/drivers/net/wireless/ath/ath6kl/init.c +++ b/trunk/drivers/net/wireless/ath/ath6kl/init.c @@ -16,7 +16,6 @@ */ #include -#include #include #include #include "core.h" @@ -27,85 +26,9 @@ unsigned int debug_mask; static unsigned int testmode; -static bool suspend_cutpower; module_param(debug_mask, uint, 0644); module_param(testmode, uint, 0644); -module_param(suspend_cutpower, bool, 0444); - -static const struct ath6kl_hw hw_list[] = { - { - .id = AR6003_HW_2_0_VERSION, - .name = "ar6003 hw 2.0", - .dataset_patch_addr = 0x57e884, - .app_load_addr = 0x543180, - .board_ext_data_addr = 0x57e500, - .reserved_ram_size = 6912, - .refclk_hz = 26000000, - .uarttx_pin = 8, - - /* hw2.0 needs override address hardcoded */ - .app_start_override_addr = 0x944C00, - - .fw_otp = AR6003_HW_2_0_OTP_FILE, - .fw = AR6003_HW_2_0_FIRMWARE_FILE, - .fw_tcmd = AR6003_HW_2_0_TCMD_FIRMWARE_FILE, - .fw_patch = AR6003_HW_2_0_PATCH_FILE, - .fw_api2 = AR6003_HW_2_0_FIRMWARE_2_FILE, - .fw_board = AR6003_HW_2_0_BOARD_DATA_FILE, - .fw_default_board = AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE, - }, - { - .id = AR6003_HW_2_1_1_VERSION, - .name = "ar6003 hw 2.1.1", - .dataset_patch_addr = 0x57ff74, - .app_load_addr = 0x1234, - .board_ext_data_addr = 0x542330, - .reserved_ram_size = 512, - .refclk_hz = 26000000, - .uarttx_pin = 8, - - .fw_otp = AR6003_HW_2_1_1_OTP_FILE, - .fw = AR6003_HW_2_1_1_FIRMWARE_FILE, - .fw_tcmd = AR6003_HW_2_1_1_TCMD_FIRMWARE_FILE, - .fw_patch = AR6003_HW_2_1_1_PATCH_FILE, - .fw_api2 = AR6003_HW_2_1_1_FIRMWARE_2_FILE, - .fw_board = AR6003_HW_2_1_1_BOARD_DATA_FILE, - .fw_default_board = AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE, - }, - { - .id = AR6004_HW_1_0_VERSION, - .name = "ar6004 hw 1.0", - .dataset_patch_addr = 0x57e884, - .app_load_addr = 0x1234, - .board_ext_data_addr = 0x437000, - .reserved_ram_size = 19456, - .board_addr = 0x433900, - .refclk_hz = 26000000, - .uarttx_pin = 11, - - .fw = AR6004_HW_1_0_FIRMWARE_FILE, - .fw_api2 = AR6004_HW_1_0_FIRMWARE_2_FILE, - .fw_board = AR6004_HW_1_0_BOARD_DATA_FILE, - .fw_default_board = AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE, - }, - { - .id = AR6004_HW_1_1_VERSION, - .name = "ar6004 hw 1.1", - .dataset_patch_addr = 0x57e884, - .app_load_addr = 0x1234, - .board_ext_data_addr = 0x437000, - .reserved_ram_size = 11264, - .board_addr = 0x43d400, - .refclk_hz = 40000000, - .uarttx_pin = 11, - - .fw = AR6004_HW_1_1_FIRMWARE_FILE, - .fw_api2 = AR6004_HW_1_1_FIRMWARE_2_FILE, - .fw_board = AR6004_HW_1_1_BOARD_DATA_FILE, - .fw_default_board = AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE, - }, -}; /* * Include definitions here that can be used to tune the WLAN module @@ -132,6 +55,7 @@ static const struct ath6kl_hw hw_list[] = { */ #define WLAN_CONFIG_DISCONNECT_TIMEOUT 10 +#define CONFIG_AR600x_DEBUG_UART_TX_PIN 8 #define ATH6KL_DATA_OFFSET 64 struct sk_buff *ath6kl_buf_alloc(int size) @@ -149,21 +73,37 @@ struct sk_buff *ath6kl_buf_alloc(int size) return skb; } -void ath6kl_init_profile_info(struct ath6kl_vif *vif) +void ath6kl_init_profile_info(struct ath6kl *ar) { - vif->ssid_len = 0; - memset(vif->ssid, 0, sizeof(vif->ssid)); - - vif->dot11_auth_mode = OPEN_AUTH; - vif->auth_mode = NONE_AUTH; - vif->prwise_crypto = NONE_CRYPT; - vif->prwise_crypto_len = 0; - vif->grp_crypto = NONE_CRYPT; - vif->grp_crypto_len = 0; - memset(vif->wep_key_list, 0, sizeof(vif->wep_key_list)); - memset(vif->req_bssid, 0, sizeof(vif->req_bssid)); - memset(vif->bssid, 0, sizeof(vif->bssid)); - vif->bss_ch = 0; + ar->ssid_len = 0; + memset(ar->ssid, 0, sizeof(ar->ssid)); + + ar->dot11_auth_mode = OPEN_AUTH; + ar->auth_mode = NONE_AUTH; + ar->prwise_crypto = NONE_CRYPT; + ar->prwise_crypto_len = 0; + ar->grp_crypto = NONE_CRYPT; + ar->grp_crypto_len = 0; + memset(ar->wep_key_list, 0, sizeof(ar->wep_key_list)); + memset(ar->req_bssid, 0, sizeof(ar->req_bssid)); + memset(ar->bssid, 0, sizeof(ar->bssid)); + ar->bss_ch = 0; + ar->nw_type = ar->next_mode = INFRA_NETWORK; +} + +static u8 ath6kl_get_fw_iftype(struct ath6kl *ar) +{ + switch (ar->nw_type) { + case INFRA_NETWORK: + return HI_OPTION_FW_MODE_BSS_STA; + case ADHOC_NETWORK: + return HI_OPTION_FW_MODE_IBSS; + case AP_NETWORK: + return HI_OPTION_FW_MODE_AP; + default: + ath6kl_err("Unsupported interface type :%d\n", ar->nw_type); + return 0xff; + } } static int ath6kl_set_host_app_area(struct ath6kl *ar) @@ -180,7 +120,7 @@ static int ath6kl_set_host_app_area(struct ath6kl *ar) return -EIO; address = TARG_VTOP(ar->target_type, data); - host_app_area.wmi_protocol_ver = cpu_to_le32(WMI_PROTOCOL_VERSION); + host_app_area.wmi_protocol_ver = WMI_PROTOCOL_VERSION; if (ath6kl_diag_write(ar, address, (u8 *) &host_app_area, sizeof(struct host_app_area))) return -EIO; @@ -318,12 +258,40 @@ static int ath6kl_init_service_ep(struct ath6kl *ar) return 0; } -void ath6kl_init_control_info(struct ath6kl_vif *vif) +static void ath6kl_init_control_info(struct ath6kl *ar) { - ath6kl_init_profile_info(vif); - vif->def_txkey_index = 0; - memset(vif->wep_key_list, 0, sizeof(vif->wep_key_list)); - vif->ch_hint = 0; + u8 ctr; + + clear_bit(WMI_ENABLED, &ar->flag); + ath6kl_init_profile_info(ar); + ar->def_txkey_index = 0; + memset(ar->wep_key_list, 0, sizeof(ar->wep_key_list)); + ar->ch_hint = 0; + ar->listen_intvl_t = A_DEFAULT_LISTEN_INTERVAL; + ar->listen_intvl_b = 0; + ar->tx_pwr = 0; + clear_bit(SKIP_SCAN, &ar->flag); + set_bit(WMM_ENABLED, &ar->flag); + ar->intra_bss = 1; + memset(&ar->sc_params, 0, sizeof(ar->sc_params)); + ar->sc_params.short_scan_ratio = WMI_SHORTSCANRATIO_DEFAULT; + ar->sc_params.scan_ctrl_flags = DEFAULT_SCAN_CTRL_FLAGS; + ar->lrssi_roam_threshold = DEF_LRSSI_ROAM_THRESHOLD; + + memset((u8 *)ar->sta_list, 0, + AP_MAX_NUM_STA * sizeof(struct ath6kl_sta)); + + spin_lock_init(&ar->mcastpsq_lock); + + /* Init the PS queues */ + for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) { + spin_lock_init(&ar->sta_list[ctr].psq_lock); + skb_queue_head_init(&ar->sta_list[ctr].psq); + } + + skb_queue_head_init(&ar->mcastpsq); + + memcpy(ar->ap_country_code, DEF_AP_COUNTRY_CODE, 3); } /* @@ -373,7 +341,62 @@ static int ath6kl_set_htc_params(struct ath6kl *ar, u32 mbox_isr_yield_val, return status; } -static int ath6kl_target_config_wlan_params(struct ath6kl *ar, int idx) +#define REG_DUMP_COUNT_AR6003 60 +#define REGISTER_DUMP_LEN_MAX 60 + +static void ath6kl_dump_target_assert_info(struct ath6kl *ar) +{ + u32 address; + u32 regdump_loc = 0; + int status; + u32 regdump_val[REGISTER_DUMP_LEN_MAX]; + u32 i; + + if (ar->target_type != TARGET_TYPE_AR6003) + return; + + /* the reg dump pointer is copied to the host interest area */ + address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_failure_state)); + address = TARG_VTOP(ar->target_type, address); + + /* read RAM location through diagnostic window */ + status = ath6kl_diag_read32(ar, address, ®dump_loc); + + if (status || !regdump_loc) { + ath6kl_err("failed to get ptr to register dump area\n"); + return; + } + + ath6kl_dbg(ATH6KL_DBG_TRC, "location of register dump data: 0x%X\n", + regdump_loc); + regdump_loc = TARG_VTOP(ar->target_type, regdump_loc); + + /* fetch register dump data */ + status = ath6kl_diag_read(ar, regdump_loc, (u8 *)®dump_val[0], + REG_DUMP_COUNT_AR6003 * (sizeof(u32))); + + if (status) { + ath6kl_err("failed to get register dump\n"); + return; + } + ath6kl_dbg(ATH6KL_DBG_TRC, "Register Dump:\n"); + + for (i = 0; i < REG_DUMP_COUNT_AR6003; i++) + ath6kl_dbg(ATH6KL_DBG_TRC, " %d : 0x%8.8X\n", + i, regdump_val[i]); + +} + +void ath6kl_target_failure(struct ath6kl *ar) +{ + ath6kl_err("target asserted\n"); + + /* try dumping target assertion information (if any) */ + ath6kl_dump_target_assert_info(ar); + +} + +static int ath6kl_target_config_wlan_params(struct ath6kl *ar) { int status = 0; int ret; @@ -383,46 +406,46 @@ static int ath6kl_target_config_wlan_params(struct ath6kl *ar, int idx) * default values. Required if checksum offload is needed. Set * RxMetaVersion to 2. */ - if (ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi, idx, + if (ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi, ar->rx_meta_ver, 0, 0)) { ath6kl_err("unable to set the rx frame format\n"); status = -EIO; } if (ar->conf_flags & ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN) - if ((ath6kl_wmi_pmparams_cmd(ar->wmi, idx, 0, 1, 0, 0, 1, + if ((ath6kl_wmi_pmparams_cmd(ar->wmi, 0, 1, 0, 0, 1, IGNORE_POWER_SAVE_FAIL_EVENT_DURING_SCAN)) != 0) { ath6kl_err("unable to set power save fail event policy\n"); status = -EIO; } if (!(ar->conf_flags & ATH6KL_CONF_IGNORE_ERP_BARKER)) - if ((ath6kl_wmi_set_lpreamble_cmd(ar->wmi, idx, 0, + if ((ath6kl_wmi_set_lpreamble_cmd(ar->wmi, 0, WMI_DONOT_IGNORE_BARKER_IN_ERP)) != 0) { ath6kl_err("unable to set barker preamble policy\n"); status = -EIO; } - if (ath6kl_wmi_set_keepalive_cmd(ar->wmi, idx, + if (ath6kl_wmi_set_keepalive_cmd(ar->wmi, WLAN_CONFIG_KEEP_ALIVE_INTERVAL)) { ath6kl_err("unable to set keep alive interval\n"); status = -EIO; } - if (ath6kl_wmi_disctimeout_cmd(ar->wmi, idx, + if (ath6kl_wmi_disctimeout_cmd(ar->wmi, WLAN_CONFIG_DISCONNECT_TIMEOUT)) { ath6kl_err("unable to set disconnect timeout\n"); status = -EIO; } if (!(ar->conf_flags & ATH6KL_CONF_ENABLE_TX_BURST)) - if (ath6kl_wmi_set_wmm_txop(ar->wmi, idx, WMI_TXOP_DISABLED)) { + if (ath6kl_wmi_set_wmm_txop(ar->wmi, WMI_TXOP_DISABLED)) { ath6kl_err("unable to set txop bursting\n"); status = -EIO; } - if (ar->p2p && (ar->vif_max == 1 || idx)) { - ret = ath6kl_wmi_info_req_cmd(ar->wmi, idx, + if (ar->p2p) { + ret = ath6kl_wmi_info_req_cmd(ar->wmi, P2P_FLAG_CAPABILITIES_REQ | P2P_FLAG_MACADDR_REQ | P2P_FLAG_HMODEL_REQ); @@ -430,13 +453,13 @@ static int ath6kl_target_config_wlan_params(struct ath6kl *ar, int idx) ath6kl_dbg(ATH6KL_DBG_TRC, "failed to request P2P " "capabilities (%d) - assuming P2P not " "supported\n", ret); - ar->p2p = false; + ar->p2p = 0; } } - if (ar->p2p && (ar->vif_max == 1 || idx)) { + if (ar->p2p) { /* Enable Probe Request reporting for P2P */ - ret = ath6kl_wmi_probe_report_req_cmd(ar->wmi, idx, true); + ret = ath6kl_wmi_probe_report_req_cmd(ar->wmi, true); if (ret) { ath6kl_dbg(ATH6KL_DBG_TRC, "failed to enable Probe " "Request reporting (%d)\n", ret); @@ -449,40 +472,13 @@ static int ath6kl_target_config_wlan_params(struct ath6kl *ar, int idx) int ath6kl_configure_target(struct ath6kl *ar) { u32 param, ram_reserved_size; - u8 fw_iftype, fw_mode = 0, fw_submode = 0; - int i, status; - - /* - * Note: Even though the firmware interface type is - * chosen as BSS_STA for all three interfaces, can - * be configured to IBSS/AP as long as the fw submode - * remains normal mode (0 - AP, STA and IBSS). But - * due to an target assert in firmware only one interface is - * configured for now. - */ - fw_iftype = HI_OPTION_FW_MODE_BSS_STA; - - for (i = 0; i < ar->vif_max; i++) - fw_mode |= fw_iftype << (i * HI_OPTION_FW_MODE_BITS); - - /* - * By default, submodes : - * vif[0] - AP/STA/IBSS - * vif[1] - "P2P dev"/"P2P GO"/"P2P Client" - * vif[2] - "P2P dev"/"P2P GO"/"P2P Client" - */ - - for (i = 0; i < ar->max_norm_iface; i++) - fw_submode |= HI_OPTION_FW_SUBMODE_NONE << - (i * HI_OPTION_FW_SUBMODE_BITS); + u8 fw_iftype; - for (i = ar->max_norm_iface; i < ar->vif_max; i++) - fw_submode |= HI_OPTION_FW_SUBMODE_P2PDEV << - (i * HI_OPTION_FW_SUBMODE_BITS); - - if (ar->p2p && ar->vif_max == 1) - fw_submode = HI_OPTION_FW_SUBMODE_P2PDEV; + fw_iftype = ath6kl_get_fw_iftype(ar); + if (fw_iftype == 0xff) + return -EINVAL; + /* Tell target which HTC version it is used*/ param = HTC_PROTOCOL_VERSION; if (ath6kl_bmi_write(ar, ath6kl_get_hi_item_addr(ar, @@ -503,10 +499,12 @@ int ath6kl_configure_target(struct ath6kl *ar) return -EIO; } - param |= (ar->vif_max << HI_OPTION_NUM_DEV_SHIFT); - param |= fw_mode << HI_OPTION_FW_MODE_SHIFT; - param |= fw_submode << HI_OPTION_FW_SUBMODE_SHIFT; - + param |= (1 << HI_OPTION_NUM_DEV_SHIFT); + param |= (fw_iftype << HI_OPTION_FW_MODE_SHIFT); + if (ar->p2p && fw_iftype == HI_OPTION_FW_MODE_BSS_STA) { + param |= HI_OPTION_FW_SUBMODE_P2PDEV << + HI_OPTION_FW_SUBMODE_SHIFT; + } param |= (0 << HI_OPTION_MAC_ADDR_METHOD_SHIFT); param |= (0 << HI_OPTION_FW_BRIDGE_SHIFT); @@ -552,55 +550,71 @@ int ath6kl_configure_target(struct ath6kl *ar) /* use default number of control buffers */ return -EIO; - /* Configure GPIO AR600x UART */ - param = ar->hw.uarttx_pin; - status = ath6kl_bmi_write(ar, - ath6kl_get_hi_item_addr(ar, - HI_ITEM(hi_dbg_uart_txpin)), - (u8 *)¶m, 4); - if (status) - return status; - - /* Configure target refclk_hz */ - param = ar->hw.refclk_hz; - status = ath6kl_bmi_write(ar, - ath6kl_get_hi_item_addr(ar, - HI_ITEM(hi_refclk_hz)), - (u8 *)¶m, 4); - if (status) - return status; - return 0; } -void ath6kl_core_free(struct ath6kl *ar) +struct ath6kl *ath6kl_core_alloc(struct device *sdev) { - wiphy_free(ar->wiphy); -} + struct net_device *dev; + struct ath6kl *ar; + struct wireless_dev *wdev; + + wdev = ath6kl_cfg80211_init(sdev); + if (!wdev) { + ath6kl_err("ath6kl_cfg80211_init failed\n"); + return NULL; + } -void ath6kl_core_cleanup(struct ath6kl *ar) -{ - ath6kl_hif_power_off(ar); + ar = wdev_priv(wdev); + ar->dev = sdev; + ar->wdev = wdev; + wdev->iftype = NL80211_IFTYPE_STATION; - destroy_workqueue(ar->ath6kl_wq); + if (ath6kl_debug_init(ar)) { + ath6kl_err("Failed to initialize debugfs\n"); + ath6kl_cfg80211_deinit(ar); + return NULL; + } - if (ar->htc_target) - ath6kl_htc_cleanup(ar->htc_target); + dev = alloc_netdev(0, "wlan%d", ether_setup); + if (!dev) { + ath6kl_err("no memory for network device instance\n"); + ath6kl_cfg80211_deinit(ar); + return NULL; + } - ath6kl_cookie_cleanup(ar); + dev->ieee80211_ptr = wdev; + SET_NETDEV_DEV(dev, wiphy_dev(wdev->wiphy)); + wdev->netdev = dev; + ar->sme_state = SME_DISCONNECTED; - ath6kl_cleanup_amsdu_rxbufs(ar); + init_netdev(dev); - ath6kl_bmi_cleanup(ar); + ar->net_dev = dev; + set_bit(WLAN_ENABLED, &ar->flag); - ath6kl_debug_cleanup(ar); + ar->wlan_pwr_state = WLAN_POWER_STATE_ON; - kfree(ar->fw_board); - kfree(ar->fw_otp); - kfree(ar->fw); - kfree(ar->fw_patch); + spin_lock_init(&ar->lock); + + ath6kl_init_control_info(ar); + init_waitqueue_head(&ar->event_wq); + sema_init(&ar->sem, 1); + clear_bit(DESTROY_IN_PROGRESS, &ar->flag); + + INIT_LIST_HEAD(&ar->amsdu_rx_buffer_queue); + + setup_timer(&ar->disconnect_timer, disconnect_timer_handler, + (unsigned long) dev); - ath6kl_deinit_ieee80211_hw(ar); + return ar; +} + +int ath6kl_unavail_ev(struct ath6kl *ar) +{ + ath6kl_destroy(ar->net_dev, 1); + + return 0; } /* firmware upload */ @@ -629,11 +643,11 @@ static int ath6kl_get_fw(struct ath6kl *ar, const char *filename, static const char *get_target_ver_dir(const struct ath6kl *ar) { switch (ar->version.target_ver) { - case AR6003_HW_1_0_VERSION: + case AR6003_REV1_VERSION: return "ath6k/AR6003/hw1.0"; - case AR6003_HW_2_0_VERSION: + case AR6003_REV2_VERSION: return "ath6k/AR6003/hw2.0"; - case AR6003_HW_2_1_1_VERSION: + case AR6003_REV3_VERSION: return "ath6k/AR6003/hw2.1.1"; } ath6kl_warn("%s: unsupported target version 0x%x.\n", __func__, @@ -691,10 +705,17 @@ static int ath6kl_fetch_board_file(struct ath6kl *ar) if (ar->fw_board != NULL) return 0; - if (WARN_ON(ar->hw.fw_board == NULL)) - return -EINVAL; - - filename = ar->hw.fw_board; + switch (ar->version.target_ver) { + case AR6003_REV2_VERSION: + filename = AR6003_REV2_BOARD_DATA_FILE; + break; + case AR6004_REV1_VERSION: + filename = AR6004_REV1_BOARD_DATA_FILE; + break; + default: + filename = AR6003_REV3_BOARD_DATA_FILE; + break; + } ret = ath6kl_get_fw(ar, filename, &ar->fw_board, &ar->fw_board_len); @@ -712,7 +733,17 @@ static int ath6kl_fetch_board_file(struct ath6kl *ar) ath6kl_warn("Failed to get board file %s (%d), trying to find default board file.\n", filename, ret); - filename = ar->hw.fw_default_board; + switch (ar->version.target_ver) { + case AR6003_REV2_VERSION: + filename = AR6003_REV2_DEFAULT_BOARD_DATA_FILE; + break; + case AR6004_REV1_VERSION: + filename = AR6004_REV1_DEFAULT_BOARD_DATA_FILE; + break; + default: + filename = AR6003_REV3_DEFAULT_BOARD_DATA_FILE; + break; + } ret = ath6kl_get_fw(ar, filename, &ar->fw_board, &ar->fw_board_len); @@ -736,14 +767,19 @@ static int ath6kl_fetch_otp_file(struct ath6kl *ar) if (ar->fw_otp != NULL) return 0; - if (ar->hw.fw_otp == NULL) { - ath6kl_dbg(ATH6KL_DBG_BOOT, - "no OTP file configured for this hw\n"); + switch (ar->version.target_ver) { + case AR6003_REV2_VERSION: + filename = AR6003_REV2_OTP_FILE; + break; + case AR6004_REV1_VERSION: + ath6kl_dbg(ATH6KL_DBG_TRC, "AR6004 doesn't need OTP file\n"); return 0; + break; + default: + filename = AR6003_REV3_OTP_FILE; + break; } - filename = ar->hw.fw_otp; - ret = ath6kl_get_fw(ar, filename, &ar->fw_otp, &ar->fw_otp_len); if (ret) { @@ -764,22 +800,38 @@ static int ath6kl_fetch_fw_file(struct ath6kl *ar) return 0; if (testmode) { - if (ar->hw.fw_tcmd == NULL) { - ath6kl_warn("testmode not supported\n"); + switch (ar->version.target_ver) { + case AR6003_REV2_VERSION: + filename = AR6003_REV2_TCMD_FIRMWARE_FILE; + break; + case AR6003_REV3_VERSION: + filename = AR6003_REV3_TCMD_FIRMWARE_FILE; + break; + case AR6004_REV1_VERSION: + ath6kl_warn("testmode not supported with ar6004\n"); return -EOPNOTSUPP; + default: + ath6kl_warn("unknown target version: 0x%x\n", + ar->version.target_ver); + return -EINVAL; } - filename = ar->hw.fw_tcmd; - set_bit(TESTMODE, &ar->flag); goto get_fw; } - if (WARN_ON(ar->hw.fw == NULL)) - return -EINVAL; - - filename = ar->hw.fw; + switch (ar->version.target_ver) { + case AR6003_REV2_VERSION: + filename = AR6003_REV2_FIRMWARE_FILE; + break; + case AR6004_REV1_VERSION: + filename = AR6004_REV1_FIRMWARE_FILE; + break; + default: + filename = AR6003_REV3_FIRMWARE_FILE; + break; + } get_fw: ret = ath6kl_get_fw(ar, filename, &ar->fw, &ar->fw_len); @@ -797,20 +849,27 @@ static int ath6kl_fetch_patch_file(struct ath6kl *ar) const char *filename; int ret; - if (ar->fw_patch != NULL) - return 0; - - if (ar->hw.fw_patch == NULL) + switch (ar->version.target_ver) { + case AR6003_REV2_VERSION: + filename = AR6003_REV2_PATCH_FILE; + break; + case AR6004_REV1_VERSION: + /* FIXME: implement for AR6004 */ return 0; + break; + default: + filename = AR6003_REV3_PATCH_FILE; + break; + } - filename = ar->hw.fw_patch; - - ret = ath6kl_get_fw(ar, filename, &ar->fw_patch, - &ar->fw_patch_len); - if (ret) { - ath6kl_err("Failed to get patch file %s: %d\n", - filename, ret); - return ret; + if (ar->fw_patch == NULL) { + ret = ath6kl_get_fw(ar, filename, &ar->fw_patch, + &ar->fw_patch_len); + if (ret) { + ath6kl_err("Failed to get patch file %s: %d\n", + filename, ret); + return ret; + } } return 0; @@ -845,10 +904,19 @@ static int ath6kl_fetch_fw_api2(struct ath6kl *ar) int ret, ie_id, i, index, bit; __le32 *val; - if (ar->hw.fw_api2 == NULL) + switch (ar->version.target_ver) { + case AR6003_REV2_VERSION: + filename = AR6003_REV2_FIRMWARE_2_FILE; + break; + case AR6003_REV3_VERSION: + filename = AR6003_REV3_FIRMWARE_2_FILE; + break; + case AR6004_REV1_VERSION: + filename = AR6004_REV1_FIRMWARE_2_FILE; + break; + default: return -EOPNOTSUPP; - - filename = ar->hw.fw_api2; + } ret = request_firmware(&fw, filename, ar->dev); if (ret) @@ -938,15 +1006,12 @@ static int ath6kl_fetch_fw_api2(struct ath6kl *ar) ar->hw.reserved_ram_size); break; case ATH6KL_FW_IE_CAPABILITIES: - if (ie_len < DIV_ROUND_UP(ATH6KL_FW_CAPABILITY_MAX, 8)) - break; - ath6kl_dbg(ATH6KL_DBG_BOOT, "found firmware capabilities ie (%zd B)\n", ie_len); for (i = 0; i < ATH6KL_FW_CAPABILITY_MAX; i++) { - index = i / 8; + index = ALIGN(i, 8) / 8; bit = i % 8; if (data[index] & (1 << bit)) @@ -965,34 +1030,9 @@ static int ath6kl_fetch_fw_api2(struct ath6kl *ar) ar->hw.dataset_patch_addr = le32_to_cpup(val); ath6kl_dbg(ATH6KL_DBG_BOOT, - "found patch address ie 0x%x\n", + "found patch address ie 0x%d\n", ar->hw.dataset_patch_addr); break; - case ATH6KL_FW_IE_BOARD_ADDR: - if (ie_len != sizeof(*val)) - break; - - val = (__le32 *) data; - ar->hw.board_addr = le32_to_cpup(val); - - ath6kl_dbg(ATH6KL_DBG_BOOT, - "found board address ie 0x%x\n", - ar->hw.board_addr); - break; - case ATH6KL_FW_IE_VIF_MAX: - if (ie_len != sizeof(*val)) - break; - - val = (__le32 *) data; - ar->vif_max = min_t(unsigned int, le32_to_cpup(val), - ATH6KL_VIF_MAX); - - if (ar->vif_max > 1 && !ar->p2p) - ar->max_norm_iface = 2; - - ath6kl_dbg(ATH6KL_DBG_BOOT, - "found vif max ie %d\n", ar->vif_max); - break; default: ath6kl_dbg(ATH6KL_DBG_BOOT, "Unknown fw ie: %u\n", le32_to_cpup(&hdr->id)); @@ -1047,8 +1087,8 @@ static int ath6kl_upload_board_file(struct ath6kl *ar) * For AR6004, host determine Target RAM address for * writing board data. */ - if (ar->hw.board_addr != 0) { - board_address = ar->hw.board_addr; + if (ar->target_type == TARGET_TYPE_AR6004) { + board_address = AR6004_REV1_BOARD_DATA_ADDRESS; ath6kl_bmi_write(ar, ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_board_data)), @@ -1066,8 +1106,7 @@ static int ath6kl_upload_board_file(struct ath6kl *ar) HI_ITEM(hi_board_ext_data)), (u8 *) &board_ext_address, 4); - if (ar->target_type == TARGET_TYPE_AR6003 && - board_ext_address == 0) { + if (board_ext_address == 0) { ath6kl_err("Failed to get board file target address.\n"); return -EINVAL; } @@ -1087,8 +1126,8 @@ static int ath6kl_upload_board_file(struct ath6kl *ar) break; } - if (board_ext_address && - ar->fw_board_len == (board_data_size + board_ext_data_size)) { + if (ar->fw_board_len == (board_data_size + + board_ext_data_size)) { /* write extended board data */ ath6kl_dbg(ATH6KL_DBG_BOOT, @@ -1143,11 +1182,10 @@ static int ath6kl_upload_board_file(struct ath6kl *ar) static int ath6kl_upload_otp(struct ath6kl *ar) { u32 address, param; - bool from_hw = false; int ret; - if (ar->fw_otp == NULL) - return 0; + if (WARN_ON(ar->fw_otp == NULL)) + return -ENOENT; address = ar->hw.app_load_addr; @@ -1172,20 +1210,15 @@ static int ath6kl_upload_otp(struct ath6kl *ar) return ret; } - if (ar->hw.app_start_override_addr == 0) { - ar->hw.app_start_override_addr = address; - from_hw = true; - } + ar->hw.app_start_override_addr = address; - ath6kl_dbg(ATH6KL_DBG_BOOT, "app_start_override_addr%s 0x%x\n", - from_hw ? " (from hw)" : "", + ath6kl_dbg(ATH6KL_DBG_BOOT, "app_start_override_addr 0x%x\n", ar->hw.app_start_override_addr); /* execute the OTP code */ - ath6kl_dbg(ATH6KL_DBG_BOOT, "executing OTP at 0x%x\n", - ar->hw.app_start_override_addr); + ath6kl_dbg(ATH6KL_DBG_BOOT, "executing OTP at 0x%x\n", address); param = 0; - ath6kl_bmi_execute(ar, ar->hw.app_start_override_addr, ¶m); + ath6kl_bmi_execute(ar, address, ¶m); return ret; } @@ -1196,7 +1229,7 @@ static int ath6kl_upload_firmware(struct ath6kl *ar) int ret; if (WARN_ON(ar->fw == NULL)) - return 0; + return -ENOENT; address = ar->hw.app_load_addr; @@ -1226,8 +1259,8 @@ static int ath6kl_upload_patch(struct ath6kl *ar) u32 address, param; int ret; - if (ar->fw_patch == NULL) - return 0; + if (WARN_ON(ar->fw_patch == NULL)) + return -ENOENT; address = ar->hw.dataset_patch_addr; @@ -1312,7 +1345,7 @@ static int ath6kl_init_upload(struct ath6kl *ar) return status; /* WAR to avoid SDIO CRC err */ - if (ar->version.target_ver == AR6003_HW_2_0_VERSION) { + if (ar->version.target_ver == AR6003_REV2_VERSION) { ath6kl_err("temporary war to avoid sdio crc error\n"); param = 0x20; @@ -1369,29 +1402,43 @@ static int ath6kl_init_upload(struct ath6kl *ar) if (status) return status; + /* Configure GPIO AR6003 UART */ + param = CONFIG_AR600x_DEBUG_UART_TX_PIN; + status = ath6kl_bmi_write(ar, + ath6kl_get_hi_item_addr(ar, + HI_ITEM(hi_dbg_uart_txpin)), + (u8 *)¶m, 4); + return status; } static int ath6kl_init_hw_params(struct ath6kl *ar) { - const struct ath6kl_hw *hw; - int i; - - for (i = 0; i < ARRAY_SIZE(hw_list); i++) { - hw = &hw_list[i]; - - if (hw->id == ar->version.target_ver) - break; - } - - if (i == ARRAY_SIZE(hw_list)) { + switch (ar->version.target_ver) { + case AR6003_REV2_VERSION: + ar->hw.dataset_patch_addr = AR6003_REV2_DATASET_PATCH_ADDRESS; + ar->hw.app_load_addr = AR6003_REV2_APP_LOAD_ADDRESS; + ar->hw.board_ext_data_addr = AR6003_REV2_BOARD_EXT_DATA_ADDRESS; + ar->hw.reserved_ram_size = AR6003_REV2_RAM_RESERVE_SIZE; + break; + case AR6003_REV3_VERSION: + ar->hw.dataset_patch_addr = AR6003_REV3_DATASET_PATCH_ADDRESS; + ar->hw.app_load_addr = 0x1234; + ar->hw.board_ext_data_addr = AR6003_REV3_BOARD_EXT_DATA_ADDRESS; + ar->hw.reserved_ram_size = AR6003_REV3_RAM_RESERVE_SIZE; + break; + case AR6004_REV1_VERSION: + ar->hw.dataset_patch_addr = AR6003_REV2_DATASET_PATCH_ADDRESS; + ar->hw.app_load_addr = AR6003_REV3_APP_LOAD_ADDRESS; + ar->hw.board_ext_data_addr = AR6004_REV1_BOARD_EXT_DATA_ADDRESS; + ar->hw.reserved_ram_size = AR6004_REV1_RAM_RESERVE_SIZE; + break; + default: ath6kl_err("Unsupported hardware version: 0x%x\n", ar->version.target_ver); return -EINVAL; } - ar->hw = *hw; - ath6kl_dbg(ATH6KL_DBG_BOOT, "target_ver 0x%x target_type 0x%x dataset_patch 0x%x app_load_addr 0x%x\n", ar->version.target_ver, ar->target_type, @@ -1400,75 +1447,75 @@ static int ath6kl_init_hw_params(struct ath6kl *ar) "app_start_override_addr 0x%x board_ext_data_addr 0x%x reserved_ram_size 0x%x", ar->hw.app_start_override_addr, ar->hw.board_ext_data_addr, ar->hw.reserved_ram_size); - ath6kl_dbg(ATH6KL_DBG_BOOT, - "refclk_hz %d uarttx_pin %d", - ar->hw.refclk_hz, ar->hw.uarttx_pin); return 0; } -static const char *ath6kl_init_get_hif_name(enum ath6kl_hif_type type) +static int ath6kl_init(struct net_device *dev) { - switch (type) { - case ATH6KL_HIF_TYPE_SDIO: - return "sdio"; - case ATH6KL_HIF_TYPE_USB: - return "usb"; - } - - return NULL; -} - -int ath6kl_init_hw_start(struct ath6kl *ar) -{ - long timeleft; - int ret, i; - - ath6kl_dbg(ATH6KL_DBG_BOOT, "hw start\n"); - - ret = ath6kl_hif_power_on(ar); - if (ret) - return ret; - - ret = ath6kl_configure_target(ar); - if (ret) - goto err_power_off; + struct ath6kl *ar = ath6kl_priv(dev); + int status = 0; + s32 timeleft; - ret = ath6kl_init_upload(ar); - if (ret) - goto err_power_off; + if (!ar) + return -EIO; /* Do we need to finish the BMI phase */ - /* FIXME: return error from ath6kl_bmi_done() */ if (ath6kl_bmi_done(ar)) { - ret = -EIO; - goto err_power_off; + status = -EIO; + goto ath6kl_init_done; + } + + /* Indicate that WMI is enabled (although not ready yet) */ + set_bit(WMI_ENABLED, &ar->flag); + ar->wmi = ath6kl_wmi_init(ar); + if (!ar->wmi) { + ath6kl_err("failed to initialize wmi\n"); + status = -EIO; + goto ath6kl_init_done; } + ath6kl_dbg(ATH6KL_DBG_TRC, "%s: got wmi @ 0x%p.\n", __func__, ar->wmi); + /* * The reason we have to wait for the target here is that the * driver layer has to init BMI in order to set the host block * size. */ if (ath6kl_htc_wait_target(ar->htc_target)) { - ret = -EIO; - goto err_power_off; + status = -EIO; + goto err_node_cleanup; } if (ath6kl_init_service_ep(ar)) { - ret = -EIO; + status = -EIO; goto err_cleanup_scatter; } + /* setup access class priority mappings */ + ar->ac_stream_pri_map[WMM_AC_BK] = 0; /* lowest */ + ar->ac_stream_pri_map[WMM_AC_BE] = 1; + ar->ac_stream_pri_map[WMM_AC_VI] = 2; + ar->ac_stream_pri_map[WMM_AC_VO] = 3; /* highest */ + + /* give our connected endpoints some buffers */ + ath6kl_rx_refill(ar->htc_target, ar->ctrl_ep); + ath6kl_rx_refill(ar->htc_target, ar->ac2ep_map[WMM_AC_BE]); + + /* allocate some buffers that handle larger AMSDU frames */ + ath6kl_refill_amsdu_rxbufs(ar, ATH6KL_MAX_AMSDU_RX_BUFFERS); + /* setup credit distribution */ - ath6kl_credit_setup(ar->htc_target, &ar->credit_state_info); + ath6k_setup_credit_dist(ar->htc_target, &ar->credit_state_info); + + ath6kl_cookie_init(ar); /* start HTC */ - ret = ath6kl_htc_start(ar->htc_target); - if (ret) { - /* FIXME: call this */ + status = ath6kl_htc_start(ar->htc_target); + + if (status) { ath6kl_cookie_cleanup(ar); - goto err_cleanup_scatter; + goto err_rxbuf_cleanup; } /* Wait for Wmi event to be ready */ @@ -1479,81 +1526,54 @@ int ath6kl_init_hw_start(struct ath6kl *ar) ath6kl_dbg(ATH6KL_DBG_BOOT, "firmware booted\n"); - - if (test_and_clear_bit(FIRST_BOOT, &ar->flag)) { - ath6kl_info("%s %s fw %s%s\n", - ar->hw.name, - ath6kl_init_get_hif_name(ar->hif_type), - ar->wiphy->fw_version, - test_bit(TESTMODE, &ar->flag) ? " testmode" : ""); - } - if (ar->version.abi_ver != ATH6KL_ABI_VERSION) { ath6kl_err("abi version mismatch: host(0x%x), target(0x%x)\n", ATH6KL_ABI_VERSION, ar->version.abi_ver); - ret = -EIO; + status = -EIO; goto err_htc_stop; } if (!timeleft || signal_pending(current)) { ath6kl_err("wmi is not ready or wait was interrupted\n"); - ret = -EIO; + status = -EIO; goto err_htc_stop; } ath6kl_dbg(ATH6KL_DBG_TRC, "%s: wmi is ready\n", __func__); /* communicate the wmi protocol verision to the target */ - /* FIXME: return error */ if ((ath6kl_set_host_app_area(ar)) != 0) ath6kl_err("unable to set the host app area\n"); - for (i = 0; i < ar->vif_max; i++) { - ret = ath6kl_target_config_wlan_params(ar, i); - if (ret) - goto err_htc_stop; - } + ar->conf_flags = ATH6KL_CONF_IGNORE_ERP_BARKER | + ATH6KL_CONF_ENABLE_11N | ATH6KL_CONF_ENABLE_TX_BURST; - ar->state = ATH6KL_STATE_ON; + ar->wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM; - return 0; + status = ath6kl_target_config_wlan_params(ar); + if (!status) + goto ath6kl_init_done; err_htc_stop: ath6kl_htc_stop(ar->htc_target); +err_rxbuf_cleanup: + ath6kl_htc_flush_rx_buf(ar->htc_target); + ath6kl_cleanup_amsdu_rxbufs(ar); err_cleanup_scatter: ath6kl_hif_cleanup_scatter(ar); -err_power_off: - ath6kl_hif_power_off(ar); - - return ret; -} - -int ath6kl_init_hw_stop(struct ath6kl *ar) -{ - int ret; - - ath6kl_dbg(ATH6KL_DBG_BOOT, "hw stop\n"); - - ath6kl_htc_stop(ar->htc_target); - - ath6kl_hif_stop(ar); - - ath6kl_bmi_reset(ar); - - ret = ath6kl_hif_power_off(ar); - if (ret) - ath6kl_warn("failed to power off hif: %d\n", ret); - - ar->state = ATH6KL_STATE_OFF; +err_node_cleanup: + ath6kl_wmi_shutdown(ar->wmi); + clear_bit(WMI_ENABLED, &ar->flag); + ar->wmi = NULL; - return 0; +ath6kl_init_done: + return status; } int ath6kl_core_init(struct ath6kl *ar) { + int ret = 0; struct ath6kl_bmi_target_info targ_info; - struct net_device *ndev; - int ret = 0, i; ar->ath6kl_wq = create_singlethread_workqueue("ath6kl"); if (!ar->ath6kl_wq) @@ -1563,236 +1583,145 @@ int ath6kl_core_init(struct ath6kl *ar) if (ret) goto err_wq; - /* - * Turn on power to get hardware (target) version and leave power - * on delibrately as we will boot the hardware anyway within few - * seconds. - */ - ret = ath6kl_hif_power_on(ar); - if (ret) - goto err_bmi_cleanup; - ret = ath6kl_bmi_get_target_info(ar, &targ_info); if (ret) - goto err_power_off; + goto err_bmi_cleanup; ar->version.target_ver = le32_to_cpu(targ_info.version); ar->target_type = le32_to_cpu(targ_info.type); - ar->wiphy->hw_version = le32_to_cpu(targ_info.version); + ar->wdev->wiphy->hw_version = le32_to_cpu(targ_info.version); ret = ath6kl_init_hw_params(ar); if (ret) - goto err_power_off; + goto err_bmi_cleanup; + + ret = ath6kl_configure_target(ar); + if (ret) + goto err_bmi_cleanup; ar->htc_target = ath6kl_htc_create(ar); if (!ar->htc_target) { ret = -ENOMEM; - goto err_power_off; + goto err_bmi_cleanup; + } + + ar->aggr_cntxt = aggr_init(ar->net_dev); + if (!ar->aggr_cntxt) { + ath6kl_err("failed to initialize aggr\n"); + ret = -ENOMEM; + goto err_htc_cleanup; } ret = ath6kl_fetch_firmwares(ar); if (ret) goto err_htc_cleanup; - /* FIXME: we should free all firmwares in the error cases below */ - - /* Indicate that WMI is enabled (although not ready yet) */ - set_bit(WMI_ENABLED, &ar->flag); - ar->wmi = ath6kl_wmi_init(ar); - if (!ar->wmi) { - ath6kl_err("failed to initialize wmi\n"); - ret = -EIO; + ret = ath6kl_init_upload(ar); + if (ret) goto err_htc_cleanup; - } - - ath6kl_dbg(ATH6KL_DBG_TRC, "%s: got wmi @ 0x%p.\n", __func__, ar->wmi); - ret = ath6kl_register_ieee80211_hw(ar); + ret = ath6kl_init(ar->net_dev); if (ret) - goto err_node_cleanup; + goto err_htc_cleanup; - ret = ath6kl_debug_init(ar); + /* This runs the init function if registered */ + ret = register_netdev(ar->net_dev); if (ret) { - wiphy_unregister(ar->wiphy); - goto err_node_cleanup; - } - - for (i = 0; i < ar->vif_max; i++) - ar->avail_idx_map |= BIT(i); - - rtnl_lock(); - - /* Add an initial station interface */ - ndev = ath6kl_interface_add(ar, "wlan%d", NL80211_IFTYPE_STATION, 0, - INFRA_NETWORK); - - rtnl_unlock(); - - if (!ndev) { - ath6kl_err("Failed to instantiate a network device\n"); - ret = -ENOMEM; - wiphy_unregister(ar->wiphy); - goto err_debug_init; + ath6kl_err("register_netdev failed\n"); + ath6kl_destroy(ar->net_dev, 0); + return ret; } + set_bit(NETDEV_REGISTERED, &ar->flag); ath6kl_dbg(ATH6KL_DBG_TRC, "%s: name=%s dev=0x%p, ar=0x%p\n", - __func__, ndev->name, ndev, ar); - - /* setup access class priority mappings */ - ar->ac_stream_pri_map[WMM_AC_BK] = 0; /* lowest */ - ar->ac_stream_pri_map[WMM_AC_BE] = 1; - ar->ac_stream_pri_map[WMM_AC_VI] = 2; - ar->ac_stream_pri_map[WMM_AC_VO] = 3; /* highest */ - - /* give our connected endpoints some buffers */ - ath6kl_rx_refill(ar->htc_target, ar->ctrl_ep); - ath6kl_rx_refill(ar->htc_target, ar->ac2ep_map[WMM_AC_BE]); - - /* allocate some buffers that handle larger AMSDU frames */ - ath6kl_refill_amsdu_rxbufs(ar, ATH6KL_MAX_AMSDU_RX_BUFFERS); - - ath6kl_cookie_init(ar); - - ar->conf_flags = ATH6KL_CONF_IGNORE_ERP_BARKER | - ATH6KL_CONF_ENABLE_11N | ATH6KL_CONF_ENABLE_TX_BURST; - - if (suspend_cutpower) - ar->conf_flags |= ATH6KL_CONF_SUSPEND_CUTPOWER; - - ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM | - WIPHY_FLAG_HAVE_AP_SME | - WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | - WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; - - if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN, ar->fw_capabilities)) - ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; - - ar->wiphy->probe_resp_offload = - NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | - NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | - NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P | - NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U; - - set_bit(FIRST_BOOT, &ar->flag); - - ret = ath6kl_init_hw_start(ar); - if (ret) { - ath6kl_err("Failed to start hardware: %d\n", ret); - goto err_rxbuf_cleanup; - } - - /* - * Set mac address which is received in ready event - * FIXME: Move to ath6kl_interface_add() - */ - memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN); + __func__, ar->net_dev->name, ar->net_dev, ar); return ret; -err_rxbuf_cleanup: - ath6kl_htc_flush_rx_buf(ar->htc_target); - ath6kl_cleanup_amsdu_rxbufs(ar); - rtnl_lock(); - ath6kl_deinit_if_data(netdev_priv(ndev)); - rtnl_unlock(); - wiphy_unregister(ar->wiphy); -err_debug_init: - ath6kl_debug_cleanup(ar); -err_node_cleanup: - ath6kl_wmi_shutdown(ar->wmi); - clear_bit(WMI_ENABLED, &ar->flag); - ar->wmi = NULL; err_htc_cleanup: ath6kl_htc_cleanup(ar->htc_target); -err_power_off: - ath6kl_hif_power_off(ar); err_bmi_cleanup: ath6kl_bmi_cleanup(ar); err_wq: destroy_workqueue(ar->ath6kl_wq); - return ret; } -void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready) +void ath6kl_stop_txrx(struct ath6kl *ar) { - static u8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; - bool discon_issued; - - netif_stop_queue(vif->ndev); + struct net_device *ndev = ar->net_dev; - clear_bit(WLAN_ENABLED, &vif->flags); + if (!ndev) + return; - if (wmi_ready) { - discon_issued = test_bit(CONNECTED, &vif->flags) || - test_bit(CONNECT_PEND, &vif->flags); - ath6kl_disconnect(vif); - del_timer(&vif->disconnect_timer); + set_bit(DESTROY_IN_PROGRESS, &ar->flag); - if (discon_issued) - ath6kl_disconnect_event(vif, DISCONNECT_CMD, - (vif->nw_type & AP_NETWORK) ? - bcast_mac : vif->bssid, - 0, NULL, 0); + if (down_interruptible(&ar->sem)) { + ath6kl_err("down_interruptible failed\n"); + return; } - if (vif->scan_req) { - cfg80211_scan_done(vif->scan_req, true); - vif->scan_req = NULL; - } + if (ar->wlan_pwr_state != WLAN_POWER_STATE_CUT_PWR) + ath6kl_stop_endpoint(ndev, false, true); + + clear_bit(WLAN_ENABLED, &ar->flag); } -void ath6kl_stop_txrx(struct ath6kl *ar) +/* + * We need to differentiate between the surprise and planned removal of the + * device because of the following consideration: + * + * - In case of surprise removal, the hcd already frees up the pending + * for the device and hence there is no need to unregister the function + * driver inorder to get these requests. For planned removal, the function + * driver has to explicitly unregister itself to have the hcd return all the + * pending requests before the data structures for the devices are freed up. + * Note that as per the current implementation, the function driver will + * end up releasing all the devices since there is no API to selectively + * release a particular device. + * + * - Certain commands issued to the target can be skipped for surprise + * removal since they will anyway not go through. + */ +void ath6kl_destroy(struct net_device *dev, unsigned int unregister) { - struct ath6kl_vif *vif, *tmp_vif; + struct ath6kl *ar; - set_bit(DESTROY_IN_PROGRESS, &ar->flag); - - if (down_interruptible(&ar->sem)) { - ath6kl_err("down_interruptible failed\n"); + if (!dev || !ath6kl_priv(dev)) { + ath6kl_err("failed to get device structure\n"); return; } - spin_lock_bh(&ar->list_lock); - list_for_each_entry_safe(vif, tmp_vif, &ar->vif_list, list) { - list_del(&vif->list); - spin_unlock_bh(&ar->list_lock); - ath6kl_cleanup_vif(vif, test_bit(WMI_READY, &ar->flag)); - rtnl_lock(); - ath6kl_deinit_if_data(vif); - rtnl_unlock(); - spin_lock_bh(&ar->list_lock); - } - spin_unlock_bh(&ar->list_lock); + ar = ath6kl_priv(dev); - clear_bit(WMI_READY, &ar->flag); + destroy_workqueue(ar->ath6kl_wq); - /* - * After wmi_shudown all WMI events will be dropped. We - * need to cleanup the buffers allocated in AP mode and - * give disconnect notification to stack, which usually - * happens in the disconnect_event. Simulate the disconnect - * event by calling the function directly. Sometimes - * disconnect_event will be received when the debug logs - * are collected. - */ - ath6kl_wmi_shutdown(ar->wmi); + if (ar->htc_target) + ath6kl_htc_cleanup(ar->htc_target); - clear_bit(WMI_ENABLED, &ar->flag); - if (ar->htc_target) { - ath6kl_dbg(ATH6KL_DBG_TRC, "%s: shut down htc\n", __func__); - ath6kl_htc_stop(ar->htc_target); + aggr_module_destroy(ar->aggr_cntxt); + + ath6kl_cookie_cleanup(ar); + + ath6kl_cleanup_amsdu_rxbufs(ar); + + ath6kl_bmi_cleanup(ar); + + ath6kl_debug_cleanup(ar); + + if (unregister && test_bit(NETDEV_REGISTERED, &ar->flag)) { + unregister_netdev(dev); + clear_bit(NETDEV_REGISTERED, &ar->flag); } - /* - * Try to reset the device if we can. The driver may have been - * configure NOT to reset the target during a debug session. - */ - ath6kl_dbg(ATH6KL_DBG_TRC, - "attempting to reset target on instance destroy\n"); - ath6kl_reset_device(ar, ar->target_type, true, true); + free_netdev(dev); - clear_bit(WLAN_ENABLED, &ar->flag); + kfree(ar->fw_board); + kfree(ar->fw_otp); + kfree(ar->fw); + kfree(ar->fw_patch); + + ath6kl_cfg80211_deinit(ar); } diff --git a/trunk/drivers/net/wireless/ath/ath6kl/main.c b/trunk/drivers/net/wireless/ath/ath6kl/main.c index eea3c747653e..30b5a53db9ed 100644 --- a/trunk/drivers/net/wireless/ath/ath6kl/main.c +++ b/trunk/drivers/net/wireless/ath/ath6kl/main.c @@ -20,13 +20,12 @@ #include "target.h" #include "debug.h" -struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 *node_addr) +struct ath6kl_sta *ath6kl_find_sta(struct ath6kl *ar, u8 *node_addr) { - struct ath6kl *ar = vif->ar; struct ath6kl_sta *conn = NULL; u8 i, max_conn; - max_conn = (vif->nw_type == AP_NETWORK) ? AP_MAX_NUM_STA : 0; + max_conn = (ar->nw_type == AP_NETWORK) ? AP_MAX_NUM_STA : 0; for (i = 0; i < max_conn; i++) { if (memcmp(node_addr, ar->sta_list[i].mac, ETH_ALEN) == 0) { @@ -175,6 +174,64 @@ void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie) ar->cookie_count++; } +/* set the window address register (using 4-byte register access ). */ +static int ath6kl_set_addrwin_reg(struct ath6kl *ar, u32 reg_addr, u32 addr) +{ + int status; + s32 i; + __le32 addr_val; + + /* + * Write bytes 1,2,3 of the register to set the upper address bytes, + * the LSB is written last to initiate the access cycle + */ + + for (i = 1; i <= 3; i++) { + /* + * Fill the buffer with the address byte value we want to + * hit 4 times. No need to worry about endianness as the + * same byte is copied to all four bytes of addr_val at + * any time. + */ + memset((u8 *)&addr_val, ((u8 *)&addr)[i], 4); + + /* + * Hit each byte of the register address with a 4-byte + * write operation to the same address, this is a harmless + * operation. + */ + status = hif_read_write_sync(ar, reg_addr + i, (u8 *)&addr_val, + 4, HIF_WR_SYNC_BYTE_FIX); + if (status) + break; + } + + if (status) { + ath6kl_err("failed to write initial bytes of 0x%x to window reg: 0x%X\n", + addr, reg_addr); + return status; + } + + /* + * Write the address register again, this time write the whole + * 4-byte value. The effect here is that the LSB write causes the + * cycle to start, the extra 3 byte write to bytes 1,2,3 has no + * effect since we are writing the same values again + */ + addr_val = cpu_to_le32(addr); + status = hif_read_write_sync(ar, reg_addr, + (u8 *)&(addr_val), + 4, HIF_WR_SYNC_BYTE_INC); + + if (status) { + ath6kl_err("failed to write 0x%x to window reg: 0x%X\n", + addr, reg_addr); + return status; + } + + return 0; +} + /* * Read from the hardware through its diagnostic window. No cooperation * from the firmware is required for this. @@ -183,7 +240,14 @@ int ath6kl_diag_read32(struct ath6kl *ar, u32 address, u32 *value) { int ret; - ret = ath6kl_hif_diag_read32(ar, address, value); + /* set window register to start read cycle */ + ret = ath6kl_set_addrwin_reg(ar, WINDOW_READ_ADDR_ADDRESS, address); + if (ret) + return ret; + + /* read the data */ + ret = hif_read_write_sync(ar, WINDOW_DATA_ADDRESS, (u8 *) value, + sizeof(*value), HIF_RD_SYNC_BYTE_INC); if (ret) { ath6kl_warn("failed to read32 through diagnose window: %d\n", ret); @@ -201,15 +265,18 @@ int ath6kl_diag_write32(struct ath6kl *ar, u32 address, __le32 value) { int ret; - ret = ath6kl_hif_diag_write32(ar, address, value); - + /* set write data */ + ret = hif_read_write_sync(ar, WINDOW_DATA_ADDRESS, (u8 *) &value, + sizeof(value), HIF_WR_SYNC_BYTE_INC); if (ret) { ath6kl_err("failed to write 0x%x during diagnose window to 0x%d\n", address, value); return ret; } - return 0; + /* set window register, which starts the write cycle */ + return ath6kl_set_addrwin_reg(ar, WINDOW_WRITE_ADDR_ADDRESS, + address); } int ath6kl_diag_read(struct ath6kl *ar, u32 address, void *data, u32 length) @@ -326,8 +393,8 @@ int ath6kl_read_fwlogs(struct ath6kl *ar) #define AR6003_RESET_CONTROL_ADDRESS 0x00004000 #define AR6004_RESET_CONTROL_ADDRESS 0x00004000 -void ath6kl_reset_device(struct ath6kl *ar, u32 target_type, - bool wait_fot_compltn, bool cold_reset) +static void ath6kl_reset_device(struct ath6kl *ar, u32 target_type, + bool wait_fot_compltn, bool cold_reset) { int status = 0; u32 address; @@ -358,33 +425,102 @@ void ath6kl_reset_device(struct ath6kl *ar, u32 target_type, ath6kl_err("failed to reset target\n"); } -static void ath6kl_install_static_wep_keys(struct ath6kl_vif *vif) +void ath6kl_stop_endpoint(struct net_device *dev, bool keep_profile, + bool get_dbglogs) +{ + struct ath6kl *ar = ath6kl_priv(dev); + static u8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + bool discon_issued; + + netif_stop_queue(dev); + + /* disable the target and the interrupts associated with it */ + if (test_bit(WMI_READY, &ar->flag)) { + discon_issued = (test_bit(CONNECTED, &ar->flag) || + test_bit(CONNECT_PEND, &ar->flag)); + ath6kl_disconnect(ar); + if (!keep_profile) + ath6kl_init_profile_info(ar); + + del_timer(&ar->disconnect_timer); + + clear_bit(WMI_READY, &ar->flag); + ath6kl_wmi_shutdown(ar->wmi); + clear_bit(WMI_ENABLED, &ar->flag); + ar->wmi = NULL; + + /* + * After wmi_shudown all WMI events will be dropped. We + * need to cleanup the buffers allocated in AP mode and + * give disconnect notification to stack, which usually + * happens in the disconnect_event. Simulate the disconnect + * event by calling the function directly. Sometimes + * disconnect_event will be received when the debug logs + * are collected. + */ + if (discon_issued) + ath6kl_disconnect_event(ar, DISCONNECT_CMD, + (ar->nw_type & AP_NETWORK) ? + bcast_mac : ar->bssid, + 0, NULL, 0); + + ar->user_key_ctrl = 0; + + } else { + ath6kl_dbg(ATH6KL_DBG_TRC, + "%s: wmi is not ready 0x%p 0x%p\n", + __func__, ar, ar->wmi); + + /* Shut down WMI if we have started it */ + if (test_bit(WMI_ENABLED, &ar->flag)) { + ath6kl_dbg(ATH6KL_DBG_TRC, + "%s: shut down wmi\n", __func__); + ath6kl_wmi_shutdown(ar->wmi); + clear_bit(WMI_ENABLED, &ar->flag); + ar->wmi = NULL; + } + } + + if (ar->htc_target) { + ath6kl_dbg(ATH6KL_DBG_TRC, "%s: shut down htc\n", __func__); + ath6kl_htc_stop(ar->htc_target); + } + + /* + * Try to reset the device if we can. The driver may have been + * configure NOT to reset the target during a debug session. + */ + ath6kl_dbg(ATH6KL_DBG_TRC, + "attempting to reset target on instance destroy\n"); + ath6kl_reset_device(ar, ar->target_type, true, true); +} + +static void ath6kl_install_static_wep_keys(struct ath6kl *ar) { u8 index; u8 keyusage; for (index = WMI_MIN_KEY_INDEX; index <= WMI_MAX_KEY_INDEX; index++) { - if (vif->wep_key_list[index].key_len) { + if (ar->wep_key_list[index].key_len) { keyusage = GROUP_USAGE; - if (index == vif->def_txkey_index) + if (index == ar->def_txkey_index) keyusage |= TX_USAGE; - ath6kl_wmi_addkey_cmd(vif->ar->wmi, vif->fw_vif_idx, + ath6kl_wmi_addkey_cmd(ar->wmi, index, WEP_CRYPT, keyusage, - vif->wep_key_list[index].key_len, - NULL, 0, - vif->wep_key_list[index].key, + ar->wep_key_list[index].key_len, + NULL, + ar->wep_key_list[index].key, KEY_OP_INIT_VAL, NULL, NO_SYNC_WMIFLAG); } } } -void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel) +void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel) { - struct ath6kl *ar = vif->ar; struct ath6kl_req_key *ik; int res; u8 key_rsc[ATH6KL_KEY_SEQ_LEN]; @@ -393,13 +529,11 @@ void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel) ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "AP mode started on %u MHz\n", channel); - switch (vif->auth_mode) { + switch (ar->auth_mode) { case NONE_AUTH: - if (vif->prwise_crypto == WEP_CRYPT) - ath6kl_install_static_wep_keys(vif); - if (!ik->valid || ik->key_type != WAPI_CRYPT) - break; - /* for WAPI, we need to set the delayed group key, continue: */ + if (ar->prwise_crypto == WEP_CRYPT) + ath6kl_install_static_wep_keys(ar); + break; case WPA_PSK_AUTH: case WPA2_PSK_AUTH: case (WPA_PSK_AUTH | WPA2_PSK_AUTH): @@ -410,9 +544,8 @@ void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel) "the initial group key for AP mode\n"); memset(key_rsc, 0, sizeof(key_rsc)); res = ath6kl_wmi_addkey_cmd( - ar->wmi, vif->fw_vif_idx, ik->key_index, ik->key_type, - GROUP_USAGE, ik->key_len, key_rsc, ATH6KL_KEY_SEQ_LEN, - ik->key, + ar->wmi, ik->key_index, ik->key_type, + GROUP_USAGE, ik->key_len, key_rsc, ik->key, KEY_OP_INIT_VAL, NULL, SYNC_BOTH_WMIFLAG); if (res) { ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delayed " @@ -421,16 +554,15 @@ void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel) break; } - ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, NONE_BSS_FILTER, 0); - set_bit(CONNECTED, &vif->flags); - netif_carrier_on(vif->ndev); + ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0); + set_bit(CONNECTED, &ar->flag); + netif_carrier_on(ar->net_dev); } -void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr, +void ath6kl_connect_ap_mode_sta(struct ath6kl *ar, u16 aid, u8 *mac_addr, u8 keymgmt, u8 ucipher, u8 auth, u8 assoc_req_len, u8 *assoc_info) { - struct ath6kl *ar = vif->ar; u8 *ies = NULL, *wpa_ie = NULL, *pos; size_t ies_len = 0; struct station_info sinfo; @@ -468,18 +600,6 @@ void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr, wpa_ie = pos; /* WPS IE */ break; /* overrides WPA/RSN IE */ } - } else if (pos[0] == 0x44 && wpa_ie == NULL) { - /* - * Note: WAPI Parameter Set IE re-uses Element ID that - * was officially allocated for BSS AC Access Delay. As - * such, we need to be a bit more careful on when - * parsing the frame. However, BSS AC Access Delay - * element is not supposed to be included in - * (Re)Association Request frames, so this should not - * cause problems. - */ - wpa_ie = pos; /* WAPI IE */ - break; } pos += 2 + pos[1]; } @@ -497,49 +617,380 @@ void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr, sinfo.assoc_req_ies_len = ies_len; sinfo.filled |= STATION_INFO_ASSOC_REQ_IES; - cfg80211_new_sta(vif->ndev, mac_addr, &sinfo, GFP_KERNEL); + cfg80211_new_sta(ar->net_dev, mac_addr, &sinfo, GFP_KERNEL); + + netif_wake_queue(ar->net_dev); +} + +/* Functions for Tx credit handling */ +void ath6k_credit_init(struct htc_credit_state_info *cred_info, + struct list_head *ep_list, + int tot_credits) +{ + struct htc_endpoint_credit_dist *cur_ep_dist; + int count; + + cred_info->cur_free_credits = tot_credits; + cred_info->total_avail_credits = tot_credits; + + list_for_each_entry(cur_ep_dist, ep_list, list) { + if (cur_ep_dist->endpoint == ENDPOINT_0) + continue; + + cur_ep_dist->cred_min = cur_ep_dist->cred_per_msg; + + if (tot_credits > 4) + if ((cur_ep_dist->svc_id == WMI_DATA_BK_SVC) || + (cur_ep_dist->svc_id == WMI_DATA_BE_SVC)) { + ath6kl_deposit_credit_to_ep(cred_info, + cur_ep_dist, + cur_ep_dist->cred_min); + cur_ep_dist->dist_flags |= HTC_EP_ACTIVE; + } + + if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) { + ath6kl_deposit_credit_to_ep(cred_info, cur_ep_dist, + cur_ep_dist->cred_min); + /* + * Control service is always marked active, it + * never goes inactive EVER. + */ + cur_ep_dist->dist_flags |= HTC_EP_ACTIVE; + } else if (cur_ep_dist->svc_id == WMI_DATA_BK_SVC) + /* this is the lowest priority data endpoint */ + cred_info->lowestpri_ep_dist = cur_ep_dist->list; + + /* + * Streams have to be created (explicit | implicit) for all + * kinds of traffic. BE endpoints are also inactive in the + * beginning. When BE traffic starts it creates implicit + * streams that redistributes credits. + * + * Note: all other endpoints have minimums set but are + * initially given NO credits. credits will be distributed + * as traffic activity demands + */ + } + + WARN_ON(cred_info->cur_free_credits <= 0); + + list_for_each_entry(cur_ep_dist, ep_list, list) { + if (cur_ep_dist->endpoint == ENDPOINT_0) + continue; + + if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) + cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg; + else { + /* + * For the remaining data endpoints, we assume that + * each cred_per_msg are the same. We use a simple + * calculation here, we take the remaining credits + * and determine how many max messages this can + * cover and then set each endpoint's normal value + * equal to 3/4 this amount. + */ + count = (cred_info->cur_free_credits / + cur_ep_dist->cred_per_msg) + * cur_ep_dist->cred_per_msg; + count = (count * 3) >> 2; + count = max(count, cur_ep_dist->cred_per_msg); + cur_ep_dist->cred_norm = count; + + } + } +} + +/* initialize and setup credit distribution */ +int ath6k_setup_credit_dist(void *htc_handle, + struct htc_credit_state_info *cred_info) +{ + u16 servicepriority[5]; + + memset(cred_info, 0, sizeof(struct htc_credit_state_info)); + + servicepriority[0] = WMI_CONTROL_SVC; /* highest */ + servicepriority[1] = WMI_DATA_VO_SVC; + servicepriority[2] = WMI_DATA_VI_SVC; + servicepriority[3] = WMI_DATA_BE_SVC; + servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */ + + /* set priority list */ + ath6kl_htc_set_credit_dist(htc_handle, cred_info, servicepriority, 5); + + return 0; +} + +/* reduce an ep's credits back to a set limit */ +static void ath6k_reduce_credits(struct htc_credit_state_info *cred_info, + struct htc_endpoint_credit_dist *ep_dist, + int limit) +{ + int credits; + + ep_dist->cred_assngd = limit; + + if (ep_dist->credits <= limit) + return; - netif_wake_queue(vif->ndev); + credits = ep_dist->credits - limit; + ep_dist->credits -= credits; + cred_info->cur_free_credits += credits; +} + +static void ath6k_credit_update(struct htc_credit_state_info *cred_info, + struct list_head *epdist_list) +{ + struct htc_endpoint_credit_dist *cur_dist_list; + + list_for_each_entry(cur_dist_list, epdist_list, list) { + if (cur_dist_list->endpoint == ENDPOINT_0) + continue; + + if (cur_dist_list->cred_to_dist > 0) { + cur_dist_list->credits += + cur_dist_list->cred_to_dist; + cur_dist_list->cred_to_dist = 0; + if (cur_dist_list->credits > + cur_dist_list->cred_assngd) + ath6k_reduce_credits(cred_info, + cur_dist_list, + cur_dist_list->cred_assngd); + + if (cur_dist_list->credits > + cur_dist_list->cred_norm) + ath6k_reduce_credits(cred_info, cur_dist_list, + cur_dist_list->cred_norm); + + if (!(cur_dist_list->dist_flags & HTC_EP_ACTIVE)) { + if (cur_dist_list->txq_depth == 0) + ath6k_reduce_credits(cred_info, + cur_dist_list, 0); + } + } + } +} + +/* + * HTC has an endpoint that needs credits, ep_dist is the endpoint in + * question. + */ +void ath6k_seek_credits(struct htc_credit_state_info *cred_info, + struct htc_endpoint_credit_dist *ep_dist) +{ + struct htc_endpoint_credit_dist *curdist_list; + int credits = 0; + int need; + + if (ep_dist->svc_id == WMI_CONTROL_SVC) + goto out; + + if ((ep_dist->svc_id == WMI_DATA_VI_SVC) || + (ep_dist->svc_id == WMI_DATA_VO_SVC)) + if ((ep_dist->cred_assngd >= ep_dist->cred_norm)) + goto out; + + /* + * For all other services, we follow a simple algorithm of: + * + * 1. checking the free pool for credits + * 2. checking lower priority endpoints for credits to take + */ + + credits = min(cred_info->cur_free_credits, ep_dist->seek_cred); + + if (credits >= ep_dist->seek_cred) + goto out; + + /* + * We don't have enough in the free pool, try taking away from + * lower priority services The rule for taking away credits: + * + * 1. Only take from lower priority endpoints + * 2. Only take what is allocated above the minimum (never + * starve an endpoint completely) + * 3. Only take what you need. + */ + + list_for_each_entry_reverse(curdist_list, + &cred_info->lowestpri_ep_dist, + list) { + if (curdist_list == ep_dist) + break; + + need = ep_dist->seek_cred - cred_info->cur_free_credits; + + if ((curdist_list->cred_assngd - need) >= + curdist_list->cred_min) { + /* + * The current one has been allocated more than + * it's minimum and it has enough credits assigned + * above it's minimum to fulfill our need try to + * take away just enough to fulfill our need. + */ + ath6k_reduce_credits(cred_info, curdist_list, + curdist_list->cred_assngd - need); + + if (cred_info->cur_free_credits >= + ep_dist->seek_cred) + break; + } + + if (curdist_list->endpoint == ENDPOINT_0) + break; + } + + credits = min(cred_info->cur_free_credits, ep_dist->seek_cred); + +out: + /* did we find some credits? */ + if (credits) + ath6kl_deposit_credit_to_ep(cred_info, ep_dist, credits); + + ep_dist->seek_cred = 0; +} + +/* redistribute credits based on activity change */ +static void ath6k_redistribute_credits(struct htc_credit_state_info *info, + struct list_head *ep_dist_list) +{ + struct htc_endpoint_credit_dist *curdist_list; + + list_for_each_entry(curdist_list, ep_dist_list, list) { + if (curdist_list->endpoint == ENDPOINT_0) + continue; + + if ((curdist_list->svc_id == WMI_DATA_BK_SVC) || + (curdist_list->svc_id == WMI_DATA_BE_SVC)) + curdist_list->dist_flags |= HTC_EP_ACTIVE; + + if ((curdist_list->svc_id != WMI_CONTROL_SVC) && + !(curdist_list->dist_flags & HTC_EP_ACTIVE)) { + if (curdist_list->txq_depth == 0) + ath6k_reduce_credits(info, + curdist_list, 0); + else + ath6k_reduce_credits(info, + curdist_list, + curdist_list->cred_min); + } + } +} + +/* + * + * This function is invoked whenever endpoints require credit + * distributions. A lock is held while this function is invoked, this + * function shall NOT block. The ep_dist_list is a list of distribution + * structures in prioritized order as defined by the call to the + * htc_set_credit_dist() api. + */ +void ath6k_credit_distribute(struct htc_credit_state_info *cred_info, + struct list_head *ep_dist_list, + enum htc_credit_dist_reason reason) +{ + switch (reason) { + case HTC_CREDIT_DIST_SEND_COMPLETE: + ath6k_credit_update(cred_info, ep_dist_list); + break; + case HTC_CREDIT_DIST_ACTIVITY_CHANGE: + ath6k_redistribute_credits(cred_info, ep_dist_list); + break; + default: + break; + } + + WARN_ON(cred_info->cur_free_credits > cred_info->total_avail_credits); + WARN_ON(cred_info->cur_free_credits < 0); } void disconnect_timer_handler(unsigned long ptr) { struct net_device *dev = (struct net_device *)ptr; - struct ath6kl_vif *vif = netdev_priv(dev); + struct ath6kl *ar = ath6kl_priv(dev); - ath6kl_init_profile_info(vif); - ath6kl_disconnect(vif); + ath6kl_init_profile_info(ar); + ath6kl_disconnect(ar); } -void ath6kl_disconnect(struct ath6kl_vif *vif) +void ath6kl_disconnect(struct ath6kl *ar) { - if (test_bit(CONNECTED, &vif->flags) || - test_bit(CONNECT_PEND, &vif->flags)) { - ath6kl_wmi_disconnect_cmd(vif->ar->wmi, vif->fw_vif_idx); + if (test_bit(CONNECTED, &ar->flag) || + test_bit(CONNECT_PEND, &ar->flag)) { + ath6kl_wmi_disconnect_cmd(ar->wmi); /* * Disconnect command is issued, clear the connect pending * flag. The connected flag will be cleared in * disconnect event notification. */ - clear_bit(CONNECT_PEND, &vif->flags); + clear_bit(CONNECT_PEND, &ar->flag); } } +void ath6kl_deep_sleep_enable(struct ath6kl *ar) +{ + switch (ar->sme_state) { + case SME_CONNECTING: + cfg80211_connect_result(ar->net_dev, ar->bssid, NULL, 0, + NULL, 0, + WLAN_STATUS_UNSPECIFIED_FAILURE, + GFP_KERNEL); + break; + case SME_CONNECTED: + default: + /* + * FIXME: oddly enough smeState is in DISCONNECTED during + * suspend, why? Need to send disconnected event in that + * state. + */ + cfg80211_disconnected(ar->net_dev, 0, NULL, 0, GFP_KERNEL); + break; + } + + if (test_bit(CONNECTED, &ar->flag) || + test_bit(CONNECT_PEND, &ar->flag)) + ath6kl_wmi_disconnect_cmd(ar->wmi); + + ar->sme_state = SME_DISCONNECTED; + + /* disable scanning */ + if (ath6kl_wmi_scanparams_cmd(ar->wmi, 0xFFFF, 0, 0, 0, 0, 0, 0, 0, + 0, 0) != 0) + printk(KERN_WARNING "ath6kl: failed to disable scan " + "during suspend\n"); + + ath6kl_cfg80211_scan_complete_event(ar, -ECANCELED); +} + /* WMI Event handlers */ +static const char *get_hw_id_string(u32 id) +{ + switch (id) { + case AR6003_REV1_VERSION: + return "1.0"; + case AR6003_REV2_VERSION: + return "2.0"; + case AR6003_REV3_VERSION: + return "2.1.1"; + default: + return "unknown"; + } +} + void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver) { struct ath6kl *ar = devt; + struct net_device *dev = ar->net_dev; - memcpy(ar->mac_addr, datap, ETH_ALEN); + memcpy(dev->dev_addr, datap, ETH_ALEN); ath6kl_dbg(ATH6KL_DBG_TRC, "%s: mac addr = %pM\n", - __func__, ar->mac_addr); + __func__, dev->dev_addr); ar->version.wlan_ver = sw_ver; ar->version.abi_ver = abi_ver; - snprintf(ar->wiphy->fw_version, - sizeof(ar->wiphy->fw_version), + snprintf(ar->wdev->wiphy->fw_version, + sizeof(ar->wdev->wiphy->fw_version), "%u.%u.%u.%u", (ar->version.wlan_ver & 0xf0000000) >> 28, (ar->version.wlan_ver & 0x0f000000) >> 24, @@ -549,85 +1000,79 @@ void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver) /* indicate to the waiting thread that the ready event was received */ set_bit(WMI_READY, &ar->flag); wake_up(&ar->event_wq); + + ath6kl_info("hw %s fw %s%s\n", + get_hw_id_string(ar->wdev->wiphy->hw_version), + ar->wdev->wiphy->fw_version, + test_bit(TESTMODE, &ar->flag) ? " testmode" : ""); } -void ath6kl_scan_complete_evt(struct ath6kl_vif *vif, int status) +void ath6kl_scan_complete_evt(struct ath6kl *ar, int status) { - struct ath6kl *ar = vif->ar; - bool aborted = false; - - if (status != WMI_SCAN_STATUS_SUCCESS) - aborted = true; - - ath6kl_cfg80211_scan_complete_event(vif, aborted); + ath6kl_cfg80211_scan_complete_event(ar, status); if (!ar->usr_bss_filter) { - clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags); - ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, - NONE_BSS_FILTER, 0); + clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag); + ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0); } - ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "scan complete: %d\n", status); + ath6kl_dbg(ATH6KL_DBG_WLAN_SCAN, "scan complete: %d\n", status); } -void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel, u8 *bssid, +void ath6kl_connect_event(struct ath6kl *ar, u16 channel, u8 *bssid, u16 listen_int, u16 beacon_int, enum network_type net_type, u8 beacon_ie_len, u8 assoc_req_len, u8 assoc_resp_len, u8 *assoc_info) { - struct ath6kl *ar = vif->ar; + unsigned long flags; - ath6kl_cfg80211_connect_event(vif, channel, bssid, + ath6kl_cfg80211_connect_event(ar, channel, bssid, listen_int, beacon_int, net_type, beacon_ie_len, assoc_req_len, assoc_resp_len, assoc_info); - memcpy(vif->bssid, bssid, sizeof(vif->bssid)); - vif->bss_ch = channel; + memcpy(ar->bssid, bssid, sizeof(ar->bssid)); + ar->bss_ch = channel; - if ((vif->nw_type == INFRA_NETWORK)) - ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx, - ar->listen_intvl_t, + if ((ar->nw_type == INFRA_NETWORK)) + ath6kl_wmi_listeninterval_cmd(ar->wmi, ar->listen_intvl_t, ar->listen_intvl_b); - netif_wake_queue(vif->ndev); + netif_wake_queue(ar->net_dev); /* Update connect & link status atomically */ - spin_lock_bh(&vif->if_lock); - set_bit(CONNECTED, &vif->flags); - clear_bit(CONNECT_PEND, &vif->flags); - netif_carrier_on(vif->ndev); - spin_unlock_bh(&vif->if_lock); + spin_lock_irqsave(&ar->lock, flags); + set_bit(CONNECTED, &ar->flag); + clear_bit(CONNECT_PEND, &ar->flag); + netif_carrier_on(ar->net_dev); + spin_unlock_irqrestore(&ar->lock, flags); - aggr_reset_state(vif->aggr_cntxt); - vif->reconnect_flag = 0; + aggr_reset_state(ar->aggr_cntxt); + ar->reconnect_flag = 0; - if ((vif->nw_type == ADHOC_NETWORK) && ar->ibss_ps_enable) { + if ((ar->nw_type == ADHOC_NETWORK) && ar->ibss_ps_enable) { memset(ar->node_map, 0, sizeof(ar->node_map)); ar->node_num = 0; ar->next_ep_id = ENDPOINT_2; } if (!ar->usr_bss_filter) { - set_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags); - ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, - CURRENT_BSS_FILTER, 0); + set_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag); + ath6kl_wmi_bssfilter_cmd(ar->wmi, CURRENT_BSS_FILTER, 0); } } -void ath6kl_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, bool ismcast) +void ath6kl_tkip_micerr_event(struct ath6kl *ar, u8 keyid, bool ismcast) { struct ath6kl_sta *sta; - struct ath6kl *ar = vif->ar; u8 tsc[6]; - /* * For AP case, keyid will have aid of STA which sent pkt with * MIC error. Use this aid to get MAC & send it to hostapd. */ - if (vif->nw_type == AP_NETWORK) { + if (ar->nw_type == AP_NETWORK) { sta = ath6kl_find_sta_by_aid(ar, (keyid >> 2)); if (!sta) return; @@ -636,20 +1081,19 @@ void ath6kl_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, bool ismcast) "ap tkip mic error received from aid=%d\n", keyid); memset(tsc, 0, sizeof(tsc)); /* FIX: get correct TSC */ - cfg80211_michael_mic_failure(vif->ndev, sta->mac, + cfg80211_michael_mic_failure(ar->net_dev, sta->mac, NL80211_KEYTYPE_PAIRWISE, keyid, tsc, GFP_KERNEL); } else - ath6kl_cfg80211_tkip_micerr_event(vif, keyid, ismcast); + ath6kl_cfg80211_tkip_micerr_event(ar, keyid, ismcast); } -static void ath6kl_update_target_stats(struct ath6kl_vif *vif, u8 *ptr, u32 len) +static void ath6kl_update_target_stats(struct ath6kl *ar, u8 *ptr, u32 len) { struct wmi_target_stats *tgt_stats = (struct wmi_target_stats *) ptr; - struct ath6kl *ar = vif->ar; - struct target_stats *stats = &vif->target_stats; + struct target_stats *stats = &ar->target_stats; struct tkip_ccmp_stats *ccmp_stats; u8 ac; @@ -745,8 +1189,8 @@ static void ath6kl_update_target_stats(struct ath6kl_vif *vif, u8 *ptr, u32 len) stats->wow_evt_discarded += le16_to_cpu(tgt_stats->wow_stats.wow_evt_discarded); - if (test_bit(STATS_UPDATE_PEND, &vif->flags)) { - clear_bit(STATS_UPDATE_PEND, &vif->flags); + if (test_bit(STATS_UPDATE_PEND, &ar->flag)) { + clear_bit(STATS_UPDATE_PEND, &ar->flag); wake_up(&ar->event_wq); } } @@ -756,15 +1200,14 @@ static void ath6kl_add_le32(__le32 *var, __le32 val) *var = cpu_to_le32(le32_to_cpu(*var) + le32_to_cpu(val)); } -void ath6kl_tgt_stats_event(struct ath6kl_vif *vif, u8 *ptr, u32 len) +void ath6kl_tgt_stats_event(struct ath6kl *ar, u8 *ptr, u32 len) { struct wmi_ap_mode_stat *p = (struct wmi_ap_mode_stat *) ptr; - struct ath6kl *ar = vif->ar; struct wmi_ap_mode_stat *ap = &ar->ap_stats; struct wmi_per_sta_stat *st_ap, *st_p; u8 ac; - if (vif->nw_type == AP_NETWORK) { + if (ar->nw_type == AP_NETWORK) { if (len < sizeof(*p)) return; @@ -783,7 +1226,7 @@ void ath6kl_tgt_stats_event(struct ath6kl_vif *vif, u8 *ptr, u32 len) } } else { - ath6kl_update_target_stats(vif, ptr, len); + ath6kl_update_target_stats(ar, ptr, len); } } @@ -802,12 +1245,11 @@ void ath6kl_txpwr_rx_evt(void *devt, u8 tx_pwr) wake_up(&ar->event_wq); } -void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid) +void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid) { struct ath6kl_sta *conn; struct sk_buff *skb; bool psq_empty = false; - struct ath6kl *ar = vif->ar; conn = ath6kl_find_sta_by_aid(ar, aid); @@ -830,7 +1272,7 @@ void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid) spin_unlock_bh(&conn->psq_lock); conn->sta_flags |= STA_PS_POLLED; - ath6kl_data_tx(skb, vif->ndev); + ath6kl_data_tx(skb, ar->net_dev); conn->sta_flags &= ~STA_PS_POLLED; spin_lock_bh(&conn->psq_lock); @@ -838,14 +1280,13 @@ void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid) spin_unlock_bh(&conn->psq_lock); if (psq_empty) - ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, conn->aid, 0); + ath6kl_wmi_set_pvb_cmd(ar->wmi, conn->aid, 0); } -void ath6kl_dtimexpiry_event(struct ath6kl_vif *vif) +void ath6kl_dtimexpiry_event(struct ath6kl *ar) { bool mcastq_empty = false; struct sk_buff *skb; - struct ath6kl *ar = vif->ar; /* * If there are no associated STAs, ignore the DTIM expiry event. @@ -867,31 +1308,31 @@ void ath6kl_dtimexpiry_event(struct ath6kl_vif *vif) return; /* set the STA flag to dtim_expired for the frame to go out */ - set_bit(DTIM_EXPIRED, &vif->flags); + set_bit(DTIM_EXPIRED, &ar->flag); spin_lock_bh(&ar->mcastpsq_lock); while ((skb = skb_dequeue(&ar->mcastpsq)) != NULL) { spin_unlock_bh(&ar->mcastpsq_lock); - ath6kl_data_tx(skb, vif->ndev); + ath6kl_data_tx(skb, ar->net_dev); spin_lock_bh(&ar->mcastpsq_lock); } spin_unlock_bh(&ar->mcastpsq_lock); - clear_bit(DTIM_EXPIRED, &vif->flags); + clear_bit(DTIM_EXPIRED, &ar->flag); /* clear the LSB of the BitMapCtl field of the TIM IE */ - ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, MCAST_AID, 0); + ath6kl_wmi_set_pvb_cmd(ar->wmi, MCAST_AID, 0); } -void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason, u8 *bssid, +void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason, u8 *bssid, u8 assoc_resp_len, u8 *assoc_info, u16 prot_reason_status) { - struct ath6kl *ar = vif->ar; + unsigned long flags; - if (vif->nw_type == AP_NETWORK) { + if (ar->nw_type == AP_NETWORK) { if (!ath6kl_remove_sta(ar, bssid, prot_reason_status)) return; @@ -903,31 +1344,31 @@ void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason, u8 *bssid, /* clear the LSB of the TIM IE's BitMapCtl field */ if (test_bit(WMI_READY, &ar->flag)) - ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, - MCAST_AID, 0); + ath6kl_wmi_set_pvb_cmd(ar->wmi, MCAST_AID, 0); } if (!is_broadcast_ether_addr(bssid)) { /* send event to application */ - cfg80211_del_sta(vif->ndev, bssid, GFP_KERNEL); + cfg80211_del_sta(ar->net_dev, bssid, GFP_KERNEL); } - if (memcmp(vif->ndev->dev_addr, bssid, ETH_ALEN) == 0) { - memset(vif->wep_key_list, 0, sizeof(vif->wep_key_list)); - clear_bit(CONNECTED, &vif->flags); + if (memcmp(ar->net_dev->dev_addr, bssid, ETH_ALEN) == 0) { + memset(ar->wep_key_list, 0, sizeof(ar->wep_key_list)); + clear_bit(CONNECTED, &ar->flag); } return; } - ath6kl_cfg80211_disconnect_event(vif, reason, bssid, + ath6kl_cfg80211_disconnect_event(ar, reason, bssid, assoc_resp_len, assoc_info, prot_reason_status); - aggr_reset_state(vif->aggr_cntxt); + aggr_reset_state(ar->aggr_cntxt); - del_timer(&vif->disconnect_timer); + del_timer(&ar->disconnect_timer); - ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "disconnect reason is %d\n", reason); + ath6kl_dbg(ATH6KL_DBG_WLAN_CONNECT, + "disconnect reason is %d\n", reason); /* * If the event is due to disconnect cmd from the host, only they @@ -936,88 +1377,83 @@ void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason, u8 *bssid, */ if (reason == DISCONNECT_CMD) { if (!ar->usr_bss_filter && test_bit(WMI_READY, &ar->flag)) - ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, - NONE_BSS_FILTER, 0); + ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0); } else { - set_bit(CONNECT_PEND, &vif->flags); + set_bit(CONNECT_PEND, &ar->flag); if (((reason == ASSOC_FAILED) && (prot_reason_status == 0x11)) || ((reason == ASSOC_FAILED) && (prot_reason_status == 0x0) - && (vif->reconnect_flag == 1))) { - set_bit(CONNECTED, &vif->flags); + && (ar->reconnect_flag == 1))) { + set_bit(CONNECTED, &ar->flag); return; } } /* update connect & link status atomically */ - spin_lock_bh(&vif->if_lock); - clear_bit(CONNECTED, &vif->flags); - netif_carrier_off(vif->ndev); - spin_unlock_bh(&vif->if_lock); + spin_lock_irqsave(&ar->lock, flags); + clear_bit(CONNECTED, &ar->flag); + netif_carrier_off(ar->net_dev); + spin_unlock_irqrestore(&ar->lock, flags); - if ((reason != CSERV_DISCONNECT) || (vif->reconnect_flag != 1)) - vif->reconnect_flag = 0; + if ((reason != CSERV_DISCONNECT) || (ar->reconnect_flag != 1)) + ar->reconnect_flag = 0; if (reason != CSERV_DISCONNECT) ar->user_key_ctrl = 0; - netif_stop_queue(vif->ndev); - memset(vif->bssid, 0, sizeof(vif->bssid)); - vif->bss_ch = 0; + netif_stop_queue(ar->net_dev); + memset(ar->bssid, 0, sizeof(ar->bssid)); + ar->bss_ch = 0; ath6kl_tx_data_cleanup(ar); } -struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar) -{ - struct ath6kl_vif *vif; - - spin_lock_bh(&ar->list_lock); - if (list_empty(&ar->vif_list)) { - spin_unlock_bh(&ar->list_lock); - return NULL; - } - - vif = list_first_entry(&ar->vif_list, struct ath6kl_vif, list); - - spin_unlock_bh(&ar->list_lock); - - return vif; -} - static int ath6kl_open(struct net_device *dev) { - struct ath6kl_vif *vif = netdev_priv(dev); + struct ath6kl *ar = ath6kl_priv(dev); + unsigned long flags; - set_bit(WLAN_ENABLED, &vif->flags); + spin_lock_irqsave(&ar->lock, flags); - if (test_bit(CONNECTED, &vif->flags)) { + set_bit(WLAN_ENABLED, &ar->flag); + + if (test_bit(CONNECTED, &ar->flag)) { netif_carrier_on(dev); netif_wake_queue(dev); } else netif_carrier_off(dev); + spin_unlock_irqrestore(&ar->lock, flags); + return 0; } static int ath6kl_close(struct net_device *dev) { - struct ath6kl_vif *vif = netdev_priv(dev); + struct ath6kl *ar = ath6kl_priv(dev); netif_stop_queue(dev); - ath6kl_cfg80211_stop(vif); + ath6kl_disconnect(ar); + + if (test_bit(WMI_READY, &ar->flag)) { + if (ath6kl_wmi_scanparams_cmd(ar->wmi, 0xFFFF, 0, 0, 0, 0, 0, 0, + 0, 0, 0)) + return -EIO; + + clear_bit(WLAN_ENABLED, &ar->flag); + } - clear_bit(WLAN_ENABLED, &vif->flags); + ath6kl_cfg80211_scan_complete_event(ar, -ECANCELED); return 0; } static struct net_device_stats *ath6kl_get_stats(struct net_device *dev) { - struct ath6kl_vif *vif = netdev_priv(dev); + struct ath6kl *ar = ath6kl_priv(dev); - return &vif->net_stats; + return &ar->net_stats; } static struct net_device_ops ath6kl_netdev_ops = { @@ -1030,7 +1466,6 @@ static struct net_device_ops ath6kl_netdev_ops = { void init_netdev(struct net_device *dev) { dev->netdev_ops = &ath6kl_netdev_ops; - dev->destructor = free_netdev; dev->watchdog_timeo = ATH6KL_TX_TIMEOUT; dev->needed_headroom = ETH_HLEN; diff --git a/trunk/drivers/net/wireless/ath/ath6kl/sdio.c b/trunk/drivers/net/wireless/ath/ath6kl/sdio.c index 9475e2d0d0b7..066d4f88807f 100644 --- a/trunk/drivers/net/wireless/ath/ath6kl/sdio.c +++ b/trunk/drivers/net/wireless/ath/ath6kl/sdio.c @@ -22,7 +22,7 @@ #include #include #include -#include "hif.h" +#include "htc_hif.h" #include "hif-ops.h" #include "target.h" #include "debug.h" @@ -40,18 +40,12 @@ struct ath6kl_sdio { struct bus_request bus_req[BUS_REQUEST_MAX_NUM]; struct ath6kl *ar; - u8 *dma_buffer; - /* protects access to dma_buffer */ - struct mutex dma_buffer_mutex; - /* scatter request list head */ struct list_head scat_req; spinlock_t scat_lock; - bool scatter_enabled; - bool is_disabled; atomic_t irq_handling; const struct sdio_device_id *id; @@ -141,8 +135,6 @@ static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr, { int ret = 0; - sdio_claim_host(func); - if (request & HIF_WRITE) { /* FIXME: looks like ugly workaround for something */ if (addr >= HIF_MBOX_BASE_ADDR && @@ -164,8 +156,6 @@ static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr, ret = sdio_memcpy_fromio(func, buf, addr, len); } - sdio_release_host(func); - ath6kl_dbg(ATH6KL_DBG_SDIO, "%s addr 0x%x%s buf 0x%p len %d\n", request & HIF_WRITE ? "wr" : "rd", addr, request & HIF_FIXED_ADDRESS ? " (fixed)" : "", buf, len); @@ -177,11 +167,12 @@ static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr, static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio) { struct bus_request *bus_req; + unsigned long flag; - spin_lock_bh(&ar_sdio->lock); + spin_lock_irqsave(&ar_sdio->lock, flag); if (list_empty(&ar_sdio->bus_req_freeq)) { - spin_unlock_bh(&ar_sdio->lock); + spin_unlock_irqrestore(&ar_sdio->lock, flag); return NULL; } @@ -189,7 +180,7 @@ static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio) struct bus_request, list); list_del(&bus_req->list); - spin_unlock_bh(&ar_sdio->lock); + spin_unlock_irqrestore(&ar_sdio->lock, flag); ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n", __func__, bus_req); @@ -199,12 +190,14 @@ static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio) static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio, struct bus_request *bus_req) { + unsigned long flag; + ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n", __func__, bus_req); - spin_lock_bh(&ar_sdio->lock); + spin_lock_irqsave(&ar_sdio->lock, flag); list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq); - spin_unlock_bh(&ar_sdio->lock); + spin_unlock_irqrestore(&ar_sdio->lock, flag); } static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req, @@ -298,14 +291,10 @@ static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio, mmc_req.cmd = &cmd; mmc_req.data = &data; - sdio_claim_host(ar_sdio->func); - mmc_set_data_timeout(&data, ar_sdio->func->card); /* synchronous call to process request */ mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req); - sdio_release_host(ar_sdio->func); - status = cmd.error ? cmd.error : data.error; scat_complete: @@ -400,19 +389,17 @@ static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf, if (buf_needs_bounce(buf)) { if (!ar_sdio->dma_buffer) return -ENOMEM; - mutex_lock(&ar_sdio->dma_buffer_mutex); tbuf = ar_sdio->dma_buffer; memcpy(tbuf, buf, len); bounced = true; } else tbuf = buf; + sdio_claim_host(ar_sdio->func); ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len); if ((request & HIF_READ) && bounced) memcpy(buf, tbuf, len); - - if (bounced) - mutex_unlock(&ar_sdio->dma_buffer_mutex); + sdio_release_host(ar_sdio->func); return ret; } @@ -431,25 +418,29 @@ static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio, req->request); context = req->packet; ath6kl_sdio_free_bus_req(ar_sdio, req); - ath6kl_hif_rw_comp_handler(context, status); + ath6kldev_rw_comp_handler(context, status); } } static void ath6kl_sdio_write_async_work(struct work_struct *work) { struct ath6kl_sdio *ar_sdio; + unsigned long flags; struct bus_request *req, *tmp_req; ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work); + sdio_claim_host(ar_sdio->func); - spin_lock_bh(&ar_sdio->wr_async_lock); + spin_lock_irqsave(&ar_sdio->wr_async_lock, flags); list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) { list_del(&req->list); - spin_unlock_bh(&ar_sdio->wr_async_lock); + spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags); __ath6kl_sdio_write_async(ar_sdio, req); - spin_lock_bh(&ar_sdio->wr_async_lock); + spin_lock_irqsave(&ar_sdio->wr_async_lock, flags); } - spin_unlock_bh(&ar_sdio->wr_async_lock); + spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags); + + sdio_release_host(ar_sdio->func); } static void ath6kl_sdio_irq_handler(struct sdio_func *func) @@ -468,23 +459,20 @@ static void ath6kl_sdio_irq_handler(struct sdio_func *func) */ sdio_release_host(ar_sdio->func); - status = ath6kl_hif_intr_bh_handler(ar_sdio->ar); + status = ath6kldev_intr_bh_handler(ar_sdio->ar); sdio_claim_host(ar_sdio->func); atomic_set(&ar_sdio->irq_handling, 0); WARN_ON(status && status != -ECANCELED); } -static int ath6kl_sdio_power_on(struct ath6kl *ar) +static int ath6kl_sdio_power_on(struct ath6kl_sdio *ar_sdio) { - struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); struct sdio_func *func = ar_sdio->func; int ret = 0; if (!ar_sdio->is_disabled) return 0; - ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power on\n"); - sdio_claim_host(func); ret = sdio_enable_func(func); @@ -507,16 +495,13 @@ static int ath6kl_sdio_power_on(struct ath6kl *ar) return ret; } -static int ath6kl_sdio_power_off(struct ath6kl *ar) +static int ath6kl_sdio_power_off(struct ath6kl_sdio *ar_sdio) { - struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); int ret; if (ar_sdio->is_disabled) return 0; - ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power off\n"); - /* Disable the card */ sdio_claim_host(ar_sdio->func); ret = sdio_disable_func(ar_sdio->func); @@ -536,6 +521,7 @@ static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer, { struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); struct bus_request *bus_req; + unsigned long flags; bus_req = ath6kl_sdio_alloc_busreq(ar_sdio); @@ -548,9 +534,9 @@ static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer, bus_req->request = request; bus_req->packet = packet; - spin_lock_bh(&ar_sdio->wr_async_lock); + spin_lock_irqsave(&ar_sdio->wr_async_lock, flags); list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq); - spin_unlock_bh(&ar_sdio->wr_async_lock); + spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags); queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work); return 0; @@ -596,8 +582,9 @@ static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar) { struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); struct hif_scatter_req *node = NULL; + unsigned long flag; - spin_lock_bh(&ar_sdio->scat_lock); + spin_lock_irqsave(&ar_sdio->scat_lock, flag); if (!list_empty(&ar_sdio->scat_req)) { node = list_first_entry(&ar_sdio->scat_req, @@ -605,7 +592,7 @@ static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar) list_del(&node->list); } - spin_unlock_bh(&ar_sdio->scat_lock); + spin_unlock_irqrestore(&ar_sdio->scat_lock, flag); return node; } @@ -614,12 +601,13 @@ static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar, struct hif_scatter_req *s_req) { struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); + unsigned long flag; - spin_lock_bh(&ar_sdio->scat_lock); + spin_lock_irqsave(&ar_sdio->scat_lock, flag); list_add_tail(&s_req->list, &ar_sdio->scat_req); - spin_unlock_bh(&ar_sdio->scat_lock); + spin_unlock_irqrestore(&ar_sdio->scat_lock, flag); } @@ -630,6 +618,7 @@ static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar, struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); u32 request = scat_req->req; int status = 0; + unsigned long flags; if (!scat_req->len) return -EINVAL; @@ -638,12 +627,14 @@ static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar, "hif-scatter: total len: %d scatter entries: %d\n", scat_req->len, scat_req->scat_entries); - if (request & HIF_SYNCHRONOUS) + if (request & HIF_SYNCHRONOUS) { + sdio_claim_host(ar_sdio->func); status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest); - else { - spin_lock_bh(&ar_sdio->wr_async_lock); + sdio_release_host(ar_sdio->func); + } else { + spin_lock_irqsave(&ar_sdio->wr_async_lock, flags); list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq); - spin_unlock_bh(&ar_sdio->wr_async_lock); + spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags); queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work); } @@ -655,27 +646,23 @@ static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar) { struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); struct hif_scatter_req *s_req, *tmp_req; + unsigned long flag; /* empty the free list */ - spin_lock_bh(&ar_sdio->scat_lock); + spin_lock_irqsave(&ar_sdio->scat_lock, flag); list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) { list_del(&s_req->list); - spin_unlock_bh(&ar_sdio->scat_lock); + spin_unlock_irqrestore(&ar_sdio->scat_lock, flag); - /* - * FIXME: should we also call completion handler with - * ath6kl_hif_rw_comp_handler() with status -ECANCELED so - * that the packet is properly freed? - */ if (s_req->busrequest) ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest); kfree(s_req->virt_dma_buf); kfree(s_req->sgentries); kfree(s_req); - spin_lock_bh(&ar_sdio->scat_lock); + spin_lock_irqsave(&ar_sdio->scat_lock, flag); } - spin_unlock_bh(&ar_sdio->scat_lock); + spin_unlock_irqrestore(&ar_sdio->scat_lock, flag); } /* setup of HIF scatter resources */ @@ -686,11 +673,6 @@ static int ath6kl_sdio_enable_scatter(struct ath6kl *ar) int ret; bool virt_scat = false; - if (ar_sdio->scatter_enabled) - return 0; - - ar_sdio->scatter_enabled = true; - /* check if host supports scatter and it meets our requirements */ if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) { ath6kl_err("host only supports scatter of :%d entries, need: %d\n", @@ -705,8 +687,8 @@ static int ath6kl_sdio_enable_scatter(struct ath6kl *ar) MAX_SCATTER_REQUESTS, virt_scat); if (!ret) { - ath6kl_dbg(ATH6KL_DBG_BOOT, - "hif-scatter enabled requests %d entries %d\n", + ath6kl_dbg(ATH6KL_DBG_SCATTER, + "hif-scatter enabled: max scatter req : %d entries: %d\n", MAX_SCATTER_REQUESTS, MAX_SCATTER_ENTRIES_PER_REQ); @@ -730,8 +712,8 @@ static int ath6kl_sdio_enable_scatter(struct ath6kl *ar) return ret; } - ath6kl_dbg(ATH6KL_DBG_BOOT, - "virtual scatter enabled requests %d entries %d\n", + ath6kl_dbg(ATH6KL_DBG_SCATTER, + "Vitual scatter enabled, max_scat_req:%d, entries:%d\n", ATH6KL_SCATTER_REQS, ATH6KL_SCATTER_ENTRIES_PER_REQ); target->max_scat_entries = ATH6KL_SCATTER_ENTRIES_PER_REQ; @@ -742,47 +724,7 @@ static int ath6kl_sdio_enable_scatter(struct ath6kl *ar) return 0; } -static int ath6kl_sdio_config(struct ath6kl *ar) -{ - struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); - struct sdio_func *func = ar_sdio->func; - int ret; - - sdio_claim_host(func); - - if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >= - MANUFACTURER_ID_AR6003_BASE) { - /* enable 4-bit ASYNC interrupt on AR6003 or later */ - ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card, - CCCR_SDIO_IRQ_MODE_REG, - SDIO_IRQ_MODE_ASYNC_4BIT_IRQ); - if (ret) { - ath6kl_err("Failed to enable 4-bit async irq mode %d\n", - ret); - goto out; - } - - ath6kl_dbg(ATH6KL_DBG_BOOT, "4-bit async irq mode enabled\n"); - } - - /* give us some time to enable, in ms */ - func->enable_timeout = 100; - - ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE); - if (ret) { - ath6kl_err("Set sdio block size %d failed: %d)\n", - HIF_MBOX_BLOCK_SIZE, ret); - sdio_release_host(func); - goto out; - } - -out: - sdio_release_host(func); - - return ret; -} - -static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow) +static int ath6kl_sdio_suspend(struct ath6kl *ar) { struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); struct sdio_func *func = ar_sdio->func; @@ -791,14 +733,12 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow) flags = sdio_get_host_pm_caps(func); - ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio suspend pm_caps 0x%x\n", flags); - - if (!(flags & MMC_PM_KEEP_POWER) || - (ar->conf_flags & ATH6KL_CONF_SUSPEND_CUTPOWER)) { - /* as host doesn't support keep power we need to cut power */ - return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_CUTPOWER, - NULL); - } + if (!(flags & MMC_PM_KEEP_POWER)) + /* as host doesn't support keep power we need to bail out */ + ath6kl_dbg(ATH6KL_DBG_SDIO, + "func %d doesn't support MMC_PM_KEEP_POWER\n", + func->num); + return -EINVAL; ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); if (ret) { @@ -807,367 +747,11 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow) return ret; } - if (!(flags & MMC_PM_WAKE_SDIO_IRQ)) - goto deepsleep; - - /* sdio irq wakes up host */ - - if (ar->state == ATH6KL_STATE_SCHED_SCAN) { - ret = ath6kl_cfg80211_suspend(ar, - ATH6KL_CFG_SUSPEND_SCHED_SCAN, - NULL); - if (ret) { - ath6kl_warn("Schedule scan suspend failed: %d", ret); - return ret; - } - - ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ); - if (ret) - ath6kl_warn("set sdio wake irq flag failed: %d\n", ret); - - return ret; - } - - if (wow) { - /* - * The host sdio controller is capable of keep power and - * sdio irq wake up at this point. It's fine to continue - * wow suspend operation. - */ - ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_WOW, wow); - if (ret) - return ret; - - ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ); - if (ret) - ath6kl_err("set sdio wake irq flag failed: %d\n", ret); - - return ret; - } - -deepsleep: - return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_DEEPSLEEP, NULL); -} - -static int ath6kl_sdio_resume(struct ath6kl *ar) -{ - switch (ar->state) { - case ATH6KL_STATE_OFF: - case ATH6KL_STATE_CUTPOWER: - ath6kl_dbg(ATH6KL_DBG_SUSPEND, - "sdio resume configuring sdio\n"); - - /* need to set sdio settings after power is cut from sdio */ - ath6kl_sdio_config(ar); - break; - - case ATH6KL_STATE_ON: - break; - - case ATH6KL_STATE_DEEPSLEEP: - break; - - case ATH6KL_STATE_WOW: - break; - case ATH6KL_STATE_SCHED_SCAN: - break; - } - - ath6kl_cfg80211_resume(ar); - - return 0; -} - -/* set the window address register (using 4-byte register access ). */ -static int ath6kl_set_addrwin_reg(struct ath6kl *ar, u32 reg_addr, u32 addr) -{ - int status; - u8 addr_val[4]; - s32 i; - - /* - * Write bytes 1,2,3 of the register to set the upper address bytes, - * the LSB is written last to initiate the access cycle - */ - - for (i = 1; i <= 3; i++) { - /* - * Fill the buffer with the address byte value we want to - * hit 4 times. - */ - memset(addr_val, ((u8 *)&addr)[i], 4); - - /* - * Hit each byte of the register address with a 4-byte - * write operation to the same address, this is a harmless - * operation. - */ - status = ath6kl_sdio_read_write_sync(ar, reg_addr + i, addr_val, - 4, HIF_WR_SYNC_BYTE_FIX); - if (status) - break; - } - - if (status) { - ath6kl_err("%s: failed to write initial bytes of 0x%x " - "to window reg: 0x%X\n", __func__, - addr, reg_addr); - return status; - } - - /* - * Write the address register again, this time write the whole - * 4-byte value. The effect here is that the LSB write causes the - * cycle to start, the extra 3 byte write to bytes 1,2,3 has no - * effect since we are writing the same values again - */ - status = ath6kl_sdio_read_write_sync(ar, reg_addr, (u8 *)(&addr), - 4, HIF_WR_SYNC_BYTE_INC); - - if (status) { - ath6kl_err("%s: failed to write 0x%x to window reg: 0x%X\n", - __func__, addr, reg_addr); - return status; - } - - return 0; -} - -static int ath6kl_sdio_diag_read32(struct ath6kl *ar, u32 address, u32 *data) -{ - int status; - - /* set window register to start read cycle */ - status = ath6kl_set_addrwin_reg(ar, WINDOW_READ_ADDR_ADDRESS, - address); - - if (status) - return status; - - /* read the data */ - status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS, - (u8 *)data, sizeof(u32), HIF_RD_SYNC_BYTE_INC); - if (status) { - ath6kl_err("%s: failed to read from window data addr\n", - __func__); - return status; - } - - return status; -} - -static int ath6kl_sdio_diag_write32(struct ath6kl *ar, u32 address, - __le32 data) -{ - int status; - u32 val = (__force u32) data; - - /* set write data */ - status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS, - (u8 *) &val, sizeof(u32), HIF_WR_SYNC_BYTE_INC); - if (status) { - ath6kl_err("%s: failed to write 0x%x to window data addr\n", - __func__, data); - return status; - } - - /* set window register, which starts the write cycle */ - return ath6kl_set_addrwin_reg(ar, WINDOW_WRITE_ADDR_ADDRESS, - address); -} - -static int ath6kl_sdio_bmi_credits(struct ath6kl *ar) -{ - u32 addr; - unsigned long timeout; - int ret; - - ar->bmi.cmd_credits = 0; - - /* Read the counter register to get the command credits */ - addr = COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4; - - timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT); - while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) { - - /* - * Hit the credit counter with a 4-byte access, the first byte - * read will hit the counter and cause a decrement, while the - * remaining 3 bytes has no effect. The rationale behind this - * is to make all HIF accesses 4-byte aligned. - */ - ret = ath6kl_sdio_read_write_sync(ar, addr, - (u8 *)&ar->bmi.cmd_credits, 4, - HIF_RD_SYNC_BYTE_INC); - if (ret) { - ath6kl_err("Unable to decrement the command credit " - "count register: %d\n", ret); - return ret; - } - - /* The counter is only 8 bits. - * Ignore anything in the upper 3 bytes - */ - ar->bmi.cmd_credits &= 0xFF; - } - - if (!ar->bmi.cmd_credits) { - ath6kl_err("bmi communication timeout\n"); - return -ETIMEDOUT; - } - - return 0; -} - -static int ath6kl_bmi_get_rx_lkahd(struct ath6kl *ar) -{ - unsigned long timeout; - u32 rx_word = 0; - int ret = 0; - - timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT); - while ((time_before(jiffies, timeout)) && !rx_word) { - ret = ath6kl_sdio_read_write_sync(ar, - RX_LOOKAHEAD_VALID_ADDRESS, - (u8 *)&rx_word, sizeof(rx_word), - HIF_RD_SYNC_BYTE_INC); - if (ret) { - ath6kl_err("unable to read RX_LOOKAHEAD_VALID\n"); - return ret; - } - - /* all we really want is one bit */ - rx_word &= (1 << ENDPOINT1); - } - - if (!rx_word) { - ath6kl_err("bmi_recv_buf FIFO empty\n"); - return -EINVAL; - } - - return ret; -} - -static int ath6kl_sdio_bmi_write(struct ath6kl *ar, u8 *buf, u32 len) -{ - int ret; - u32 addr; - - ret = ath6kl_sdio_bmi_credits(ar); - if (ret) - return ret; - - addr = ar->mbox_info.htc_addr; - - ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len, - HIF_WR_SYNC_BYTE_INC); - if (ret) - ath6kl_err("unable to send the bmi data to the device\n"); - - return ret; -} - -static int ath6kl_sdio_bmi_read(struct ath6kl *ar, u8 *buf, u32 len) -{ - int ret; - u32 addr; - - /* - * During normal bootup, small reads may be required. - * Rather than issue an HIF Read and then wait as the Target - * adds successive bytes to the FIFO, we wait here until - * we know that response data is available. - * - * This allows us to cleanly timeout on an unexpected - * Target failure rather than risk problems at the HIF level. - * In particular, this avoids SDIO timeouts and possibly garbage - * data on some host controllers. And on an interconnect - * such as Compact Flash (as well as some SDIO masters) which - * does not provide any indication on data timeout, it avoids - * a potential hang or garbage response. - * - * Synchronization is more difficult for reads larger than the - * size of the MBOX FIFO (128B), because the Target is unable - * to push the 129th byte of data until AFTER the Host posts an - * HIF Read and removes some FIFO data. So for large reads the - * Host proceeds to post an HIF Read BEFORE all the data is - * actually available to read. Fortunately, large BMI reads do - * not occur in practice -- they're supported for debug/development. - * - * So Host/Target BMI synchronization is divided into these cases: - * CASE 1: length < 4 - * Should not happen - * - * CASE 2: 4 <= length <= 128 - * Wait for first 4 bytes to be in FIFO - * If CONSERVATIVE_BMI_READ is enabled, also wait for - * a BMI command credit, which indicates that the ENTIRE - * response is available in the the FIFO - * - * CASE 3: length > 128 - * Wait for the first 4 bytes to be in FIFO - * - * For most uses, a small timeout should be sufficient and we will - * usually see a response quickly; but there may be some unusual - * (debug) cases of BMI_EXECUTE where we want an larger timeout. - * For now, we use an unbounded busy loop while waiting for - * BMI_EXECUTE. - * - * If BMI_EXECUTE ever needs to support longer-latency execution, - * especially in production, this code needs to be enhanced to sleep - * and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently - * a function of Host processor speed. - */ - if (len >= 4) { /* NB: Currently, always true */ - ret = ath6kl_bmi_get_rx_lkahd(ar); - if (ret) - return ret; - } - - addr = ar->mbox_info.htc_addr; - ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len, - HIF_RD_SYNC_BYTE_INC); - if (ret) { - ath6kl_err("Unable to read the bmi data from the device: %d\n", - ret); - return ret; - } + ath6kl_deep_sleep_enable(ar); return 0; } -static void ath6kl_sdio_stop(struct ath6kl *ar) -{ - struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); - struct bus_request *req, *tmp_req; - void *context; - - /* FIXME: make sure that wq is not queued again */ - - cancel_work_sync(&ar_sdio->wr_async_work); - - spin_lock_bh(&ar_sdio->wr_async_lock); - - list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) { - list_del(&req->list); - - if (req->scat_req) { - /* this is a scatter gather request */ - req->scat_req->status = -ECANCELED; - req->scat_req->complete(ar_sdio->ar->htc_target, - req->scat_req); - } else { - context = req->packet; - ath6kl_sdio_free_bus_req(ar_sdio, req); - ath6kl_hif_rw_comp_handler(context, -ECANCELED); - } - } - - spin_unlock_bh(&ar_sdio->wr_async_lock); - - WARN_ON(get_queue_depth(&ar_sdio->scat_req) != 4); -} - static const struct ath6kl_hif_ops ath6kl_sdio_ops = { .read_write_sync = ath6kl_sdio_read_write_sync, .write_async = ath6kl_sdio_write_async, @@ -1179,47 +763,8 @@ static const struct ath6kl_hif_ops ath6kl_sdio_ops = { .scat_req_rw = ath6kl_sdio_async_rw_scatter, .cleanup_scatter = ath6kl_sdio_cleanup_scatter, .suspend = ath6kl_sdio_suspend, - .resume = ath6kl_sdio_resume, - .diag_read32 = ath6kl_sdio_diag_read32, - .diag_write32 = ath6kl_sdio_diag_write32, - .bmi_read = ath6kl_sdio_bmi_read, - .bmi_write = ath6kl_sdio_bmi_write, - .power_on = ath6kl_sdio_power_on, - .power_off = ath6kl_sdio_power_off, - .stop = ath6kl_sdio_stop, }; -#ifdef CONFIG_PM_SLEEP - -/* - * Empty handlers so that mmc subsystem doesn't remove us entirely during - * suspend. We instead follow cfg80211 suspend/resume handlers. - */ -static int ath6kl_sdio_pm_suspend(struct device *device) -{ - ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm suspend\n"); - - return 0; -} - -static int ath6kl_sdio_pm_resume(struct device *device) -{ - ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm resume\n"); - - return 0; -} - -static SIMPLE_DEV_PM_OPS(ath6kl_sdio_pm_ops, ath6kl_sdio_pm_suspend, - ath6kl_sdio_pm_resume); - -#define ATH6KL_SDIO_PM_OPS (&ath6kl_sdio_pm_ops) - -#else - -#define ATH6KL_SDIO_PM_OPS NULL - -#endif /* CONFIG_PM_SLEEP */ - static int ath6kl_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) { @@ -1228,8 +773,8 @@ static int ath6kl_sdio_probe(struct sdio_func *func, struct ath6kl *ar; int count; - ath6kl_dbg(ATH6KL_DBG_BOOT, - "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n", + ath6kl_dbg(ATH6KL_DBG_SDIO, + "new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n", func->num, func->vendor, func->device, func->max_blksize, func->cur_blksize); @@ -1252,7 +797,6 @@ static int ath6kl_sdio_probe(struct sdio_func *func, spin_lock_init(&ar_sdio->lock); spin_lock_init(&ar_sdio->scat_lock); spin_lock_init(&ar_sdio->wr_async_lock); - mutex_init(&ar_sdio->dma_buffer_mutex); INIT_LIST_HEAD(&ar_sdio->scat_req); INIT_LIST_HEAD(&ar_sdio->bus_req_freeq); @@ -1271,29 +815,62 @@ static int ath6kl_sdio_probe(struct sdio_func *func, } ar_sdio->ar = ar; - ar->hif_type = ATH6KL_HIF_TYPE_SDIO; ar->hif_priv = ar_sdio; ar->hif_ops = &ath6kl_sdio_ops; - ar->bmi.max_data_size = 256; ath6kl_sdio_set_mbox_info(ar); - ret = ath6kl_sdio_config(ar); + sdio_claim_host(func); + + if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >= + MANUFACTURER_ID_AR6003_BASE) { + /* enable 4-bit ASYNC interrupt on AR6003 or later */ + ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card, + CCCR_SDIO_IRQ_MODE_REG, + SDIO_IRQ_MODE_ASYNC_4BIT_IRQ); + if (ret) { + ath6kl_err("Failed to enable 4-bit async irq mode %d\n", + ret); + sdio_release_host(func); + goto err_cfg80211; + } + + ath6kl_dbg(ATH6KL_DBG_SDIO, "4-bit async irq mode enabled\n"); + } + + /* give us some time to enable, in ms */ + func->enable_timeout = 100; + + sdio_release_host(func); + + ret = ath6kl_sdio_power_on(ar_sdio); + if (ret) + goto err_cfg80211; + + sdio_claim_host(func); + + ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE); if (ret) { - ath6kl_err("Failed to config sdio: %d\n", ret); - goto err_core_alloc; + ath6kl_err("Set sdio block size %d failed: %d)\n", + HIF_MBOX_BLOCK_SIZE, ret); + sdio_release_host(func); + goto err_off; } + sdio_release_host(func); + ret = ath6kl_core_init(ar); if (ret) { ath6kl_err("Failed to init ath6kl core\n"); - goto err_core_alloc; + goto err_off; } return ret; -err_core_alloc: - ath6kl_core_free(ar_sdio->ar); +err_off: + ath6kl_sdio_power_off(ar_sdio); +err_cfg80211: + ath6kl_cfg80211_deinit(ar_sdio->ar); err_dma: kfree(ar_sdio->dma_buffer); err_hif: @@ -1306,8 +883,8 @@ static void ath6kl_sdio_remove(struct sdio_func *func) { struct ath6kl_sdio *ar_sdio; - ath6kl_dbg(ATH6KL_DBG_BOOT, - "sdio removed func %d vendor 0x%x device 0x%x\n", + ath6kl_dbg(ATH6KL_DBG_SDIO, + "removed func %d vendor 0x%x device 0x%x\n", func->num, func->vendor, func->device); ar_sdio = sdio_get_drvdata(func); @@ -1315,7 +892,9 @@ static void ath6kl_sdio_remove(struct sdio_func *func) ath6kl_stop_txrx(ar_sdio->ar); cancel_work_sync(&ar_sdio->wr_async_work); - ath6kl_core_cleanup(ar_sdio->ar); + ath6kl_unavail_ev(ar_sdio->ar); + + ath6kl_sdio_power_off(ar_sdio); kfree(ar_sdio->dma_buffer); kfree(ar_sdio); @@ -1324,19 +903,16 @@ static void ath6kl_sdio_remove(struct sdio_func *func) static const struct sdio_device_id ath6kl_sdio_devices[] = { {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x0))}, {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1))}, - {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x0))}, - {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))}, {}, }; MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices); static struct sdio_driver ath6kl_sdio_driver = { - .name = "ath6kl", + .name = "ath6kl_sdio", .id_table = ath6kl_sdio_devices, .probe = ath6kl_sdio_probe, .remove = ath6kl_sdio_remove, - .drv.pm = ATH6KL_SDIO_PM_OPS, }; static int __init ath6kl_sdio_init(void) @@ -1362,19 +938,13 @@ MODULE_AUTHOR("Atheros Communications, Inc."); MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices"); MODULE_LICENSE("Dual BSD/GPL"); -MODULE_FIRMWARE(AR6003_HW_2_0_OTP_FILE); -MODULE_FIRMWARE(AR6003_HW_2_0_FIRMWARE_FILE); -MODULE_FIRMWARE(AR6003_HW_2_0_PATCH_FILE); -MODULE_FIRMWARE(AR6003_HW_2_0_BOARD_DATA_FILE); -MODULE_FIRMWARE(AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE); -MODULE_FIRMWARE(AR6003_HW_2_1_1_OTP_FILE); -MODULE_FIRMWARE(AR6003_HW_2_1_1_FIRMWARE_FILE); -MODULE_FIRMWARE(AR6003_HW_2_1_1_PATCH_FILE); -MODULE_FIRMWARE(AR6003_HW_2_1_1_BOARD_DATA_FILE); -MODULE_FIRMWARE(AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE); -MODULE_FIRMWARE(AR6004_HW_1_0_FIRMWARE_FILE); -MODULE_FIRMWARE(AR6004_HW_1_0_BOARD_DATA_FILE); -MODULE_FIRMWARE(AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE); -MODULE_FIRMWARE(AR6004_HW_1_1_FIRMWARE_FILE); -MODULE_FIRMWARE(AR6004_HW_1_1_BOARD_DATA_FILE); -MODULE_FIRMWARE(AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE); +MODULE_FIRMWARE(AR6003_REV2_OTP_FILE); +MODULE_FIRMWARE(AR6003_REV2_FIRMWARE_FILE); +MODULE_FIRMWARE(AR6003_REV2_PATCH_FILE); +MODULE_FIRMWARE(AR6003_REV2_BOARD_DATA_FILE); +MODULE_FIRMWARE(AR6003_REV2_DEFAULT_BOARD_DATA_FILE); +MODULE_FIRMWARE(AR6003_REV3_OTP_FILE); +MODULE_FIRMWARE(AR6003_REV3_FIRMWARE_FILE); +MODULE_FIRMWARE(AR6003_REV3_PATCH_FILE); +MODULE_FIRMWARE(AR6003_REV3_BOARD_DATA_FILE); +MODULE_FIRMWARE(AR6003_REV3_DEFAULT_BOARD_DATA_FILE); diff --git a/trunk/drivers/net/wireless/ath/ath6kl/target.h b/trunk/drivers/net/wireless/ath/ath6kl/target.h index 108a723a1085..c9a76051f042 100644 --- a/trunk/drivers/net/wireless/ath/ath6kl/target.h +++ b/trunk/drivers/net/wireless/ath/ath6kl/target.h @@ -20,7 +20,7 @@ #define AR6003_BOARD_DATA_SZ 1024 #define AR6003_BOARD_EXT_DATA_SZ 768 -#define AR6004_BOARD_DATA_SZ 6144 +#define AR6004_BOARD_DATA_SZ 7168 #define AR6004_BOARD_EXT_DATA_SZ 0 #define RESET_CONTROL_ADDRESS 0x00000000 @@ -320,10 +320,7 @@ struct host_interest { | (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2) |------------------------------------------------------------------------------| */ -#define HI_OPTION_FW_MODE_BITS 0x2 #define HI_OPTION_FW_MODE_SHIFT 0xC - -#define HI_OPTION_FW_SUBMODE_BITS 0x2 #define HI_OPTION_FW_SUBMODE_SHIFT 0x14 /* Convert a Target virtual address into a Target physical address */ @@ -334,6 +331,20 @@ struct host_interest { (((target_type) == TARGET_TYPE_AR6003) ? AR6003_VTOP(vaddr) : \ (((target_type) == TARGET_TYPE_AR6004) ? AR6004_VTOP(vaddr) : 0)) +#define AR6003_REV2_APP_LOAD_ADDRESS 0x543180 +#define AR6003_REV2_BOARD_EXT_DATA_ADDRESS 0x57E500 +#define AR6003_REV2_DATASET_PATCH_ADDRESS 0x57e884 +#define AR6003_REV2_RAM_RESERVE_SIZE 6912 + +#define AR6003_REV3_APP_LOAD_ADDRESS 0x545000 +#define AR6003_REV3_BOARD_EXT_DATA_ADDRESS 0x542330 +#define AR6003_REV3_DATASET_PATCH_ADDRESS 0x57FF74 +#define AR6003_REV3_RAM_RESERVE_SIZE 512 + +#define AR6004_REV1_BOARD_DATA_ADDRESS 0x435400 +#define AR6004_REV1_BOARD_EXT_DATA_ADDRESS 0x437000 +#define AR6004_REV1_RAM_RESERVE_SIZE 11264 + #define ATH6KL_FWLOG_PAYLOAD_SIZE 1500 struct ath6kl_dbglog_buf { diff --git a/trunk/drivers/net/wireless/ath/ath6kl/txrx.c b/trunk/drivers/net/wireless/ath/ath6kl/txrx.c index 506a3031a885..a7117074f81c 100644 --- a/trunk/drivers/net/wireless/ath/ath6kl/txrx.c +++ b/trunk/drivers/net/wireless/ath/ath6kl/txrx.c @@ -77,13 +77,12 @@ static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev, return ar->node_map[ep_map].ep_id; } -static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb, +static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb, bool *more_data) { struct ethhdr *datap = (struct ethhdr *) skb->data; struct ath6kl_sta *conn = NULL; bool ps_queued = false, is_psq_empty = false; - struct ath6kl *ar = vif->ar; if (is_multicast_ether_addr(datap->h_dest)) { u8 ctr = 0; @@ -101,7 +100,7 @@ static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb, * If this transmit is not because of a Dtim Expiry * q it. */ - if (!test_bit(DTIM_EXPIRED, &vif->flags)) { + if (!test_bit(DTIM_EXPIRED, &ar->flag)) { bool is_mcastq_empty = false; spin_lock_bh(&ar->mcastpsq_lock); @@ -117,7 +116,6 @@ static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb, */ if (is_mcastq_empty) ath6kl_wmi_set_pvb_cmd(ar->wmi, - vif->fw_vif_idx, MCAST_AID, 1); ps_queued = true; @@ -133,7 +131,7 @@ static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb, } } } else { - conn = ath6kl_find_sta(vif, datap->h_dest); + conn = ath6kl_find_sta(ar, datap->h_dest); if (!conn) { dev_kfree_skb(skb); @@ -156,7 +154,6 @@ static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb, */ if (is_psq_empty) ath6kl_wmi_set_pvb_cmd(ar->wmi, - vif->fw_vif_idx, conn->aid, 1); ps_queued = true; @@ -238,7 +235,6 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev) struct ath6kl *ar = ath6kl_priv(dev); struct ath6kl_cookie *cookie = NULL; enum htc_endpoint_id eid = ENDPOINT_UNUSED; - struct ath6kl_vif *vif = netdev_priv(dev); u32 map_no = 0; u16 htc_tag = ATH6KL_DATA_PKT_TAG; u8 ac = 99 ; /* initialize to unmapped ac */ @@ -250,7 +246,7 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev) skb, skb->data, skb->len); /* If target is not associated */ - if (!test_bit(CONNECTED, &vif->flags)) { + if (!test_bit(CONNECTED, &ar->flag)) { dev_kfree_skb(skb); return 0; } @@ -259,21 +255,15 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev) goto fail_tx; /* AP mode Power saving processing */ - if (vif->nw_type == AP_NETWORK) { - if (ath6kl_powersave_ap(vif, skb, &more_data)) + if (ar->nw_type == AP_NETWORK) { + if (ath6kl_powersave_ap(ar, skb, &more_data)) return 0; } if (test_bit(WMI_ENABLED, &ar->flag)) { if (skb_headroom(skb) < dev->needed_headroom) { - struct sk_buff *tmp_skb = skb; - - skb = skb_realloc_headroom(skb, dev->needed_headroom); - kfree_skb(tmp_skb); - if (skb == NULL) { - vif->net_stats.tx_dropped++; - return 0; - } + WARN_ON(1); + goto fail_tx; } if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) { @@ -282,20 +272,18 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev) } if (ath6kl_wmi_data_hdr_add(ar->wmi, skb, DATA_MSGTYPE, - more_data, 0, 0, NULL, - vif->fw_vif_idx)) { + more_data, 0, 0, NULL)) { ath6kl_err("wmi_data_hdr_add failed\n"); goto fail_tx; } - if ((vif->nw_type == ADHOC_NETWORK) && - ar->ibss_ps_enable && test_bit(CONNECTED, &vif->flags)) + if ((ar->nw_type == ADHOC_NETWORK) && + ar->ibss_ps_enable && test_bit(CONNECTED, &ar->flag)) chk_adhoc_ps_mapping = true; else { /* get the stream mapping */ - ret = ath6kl_wmi_implicit_create_pstream(ar->wmi, - vif->fw_vif_idx, skb, - 0, test_bit(WMM_ENABLED, &vif->flags), &ac); + ret = ath6kl_wmi_implicit_create_pstream(ar->wmi, skb, + 0, test_bit(WMM_ENABLED, &ar->flag), &ac); if (ret) goto fail_tx; } @@ -366,8 +354,8 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev) fail_tx: dev_kfree_skb(skb); - vif->net_stats.tx_dropped++; - vif->net_stats.tx_aborted_errors++; + ar->net_stats.tx_dropped++; + ar->net_stats.tx_aborted_errors++; return 0; } @@ -438,9 +426,7 @@ enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target, struct htc_packet *packet) { struct ath6kl *ar = target->dev->ar; - struct ath6kl_vif *vif; enum htc_endpoint_id endpoint = packet->endpoint; - enum htc_send_full_action action = HTC_SEND_FULL_KEEP; if (endpoint == ar->ctrl_ep) { /* @@ -453,11 +439,19 @@ enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target, set_bit(WMI_CTRL_EP_FULL, &ar->flag); spin_unlock_bh(&ar->lock); ath6kl_err("wmi ctrl ep is full\n"); - return action; + return HTC_SEND_FULL_KEEP; } if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG) - return action; + return HTC_SEND_FULL_KEEP; + + if (ar->nw_type == ADHOC_NETWORK) + /* + * In adhoc mode, we cannot differentiate traffic + * priorities so there is no need to continue, however we + * should stop the network. + */ + goto stop_net_queues; /* * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for @@ -470,36 +464,24 @@ enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target, * Give preference to the highest priority stream by * dropping the packets which overflowed. */ - action = HTC_SEND_FULL_DROP; + return HTC_SEND_FULL_DROP; - /* FIXME: Locking */ - spin_lock_bh(&ar->list_lock); - list_for_each_entry(vif, &ar->vif_list, list) { - if (vif->nw_type == ADHOC_NETWORK || - action != HTC_SEND_FULL_DROP) { - spin_unlock_bh(&ar->list_lock); - - spin_lock_bh(&vif->if_lock); - set_bit(NETQ_STOPPED, &vif->flags); - spin_unlock_bh(&vif->if_lock); - netif_stop_queue(vif->ndev); - - return action; - } - } - spin_unlock_bh(&ar->list_lock); +stop_net_queues: + spin_lock_bh(&ar->lock); + set_bit(NETQ_STOPPED, &ar->flag); + spin_unlock_bh(&ar->lock); + netif_stop_queue(ar->net_dev); - return action; + return HTC_SEND_FULL_KEEP; } /* TODO this needs to be looked at */ -static void ath6kl_tx_clear_node_map(struct ath6kl_vif *vif, +static void ath6kl_tx_clear_node_map(struct ath6kl *ar, enum htc_endpoint_id eid, u32 map_no) { - struct ath6kl *ar = vif->ar; u32 i; - if (vif->nw_type != ADHOC_NETWORK) + if (ar->nw_type != ADHOC_NETWORK) return; if (!ar->ibss_ps_enable) @@ -541,9 +523,7 @@ void ath6kl_tx_complete(void *context, struct list_head *packet_queue) int status; enum htc_endpoint_id eid; bool wake_event = false; - bool flushing[ATH6KL_VIF_MAX] = {false}; - u8 if_idx; - struct ath6kl_vif *vif; + bool flushing = false; skb_queue_head_init(&skb_queue); @@ -569,6 +549,8 @@ void ath6kl_tx_complete(void *context, struct list_head *packet_queue) if (!skb || !skb->data) goto fatal; + packet->buf = skb->data; + __skb_queue_tail(&skb_queue, skb); if (!status && (packet->act_len != skb->len)) @@ -587,30 +569,15 @@ void ath6kl_tx_complete(void *context, struct list_head *packet_queue) wake_event = true; } - if (eid == ar->ctrl_ep) { - if_idx = wmi_cmd_hdr_get_if_idx( - (struct wmi_cmd_hdr *) packet->buf); - } else { - if_idx = wmi_data_hdr_get_if_idx( - (struct wmi_data_hdr *) packet->buf); - } - - vif = ath6kl_get_vif_by_index(ar, if_idx); - if (!vif) { - ath6kl_free_cookie(ar, ath6kl_cookie); - continue; - } - if (status) { if (status == -ECANCELED) /* a packet was flushed */ - flushing[if_idx] = true; - - vif->net_stats.tx_errors++; + flushing = true; - if (status != -ENOSPC && status != -ECANCELED) - ath6kl_warn("tx complete error: %d\n", status); + ar->net_stats.tx_errors++; + if (status != -ENOSPC) + ath6kl_err("tx error, status: 0x%x\n", status); ath6kl_dbg(ATH6KL_DBG_WLAN_TX, "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n", __func__, skb, packet->buf, packet->act_len, @@ -621,34 +588,27 @@ void ath6kl_tx_complete(void *context, struct list_head *packet_queue) __func__, skb, packet->buf, packet->act_len, eid, "OK"); - flushing[if_idx] = false; - vif->net_stats.tx_packets++; - vif->net_stats.tx_bytes += skb->len; + flushing = false; + ar->net_stats.tx_packets++; + ar->net_stats.tx_bytes += skb->len; } - ath6kl_tx_clear_node_map(vif, eid, map_no); + ath6kl_tx_clear_node_map(ar, eid, map_no); ath6kl_free_cookie(ar, ath6kl_cookie); - if (test_bit(NETQ_STOPPED, &vif->flags)) - clear_bit(NETQ_STOPPED, &vif->flags); + if (test_bit(NETQ_STOPPED, &ar->flag)) + clear_bit(NETQ_STOPPED, &ar->flag); } spin_unlock_bh(&ar->lock); __skb_queue_purge(&skb_queue); - /* FIXME: Locking */ - spin_lock_bh(&ar->list_lock); - list_for_each_entry(vif, &ar->vif_list, list) { - if (test_bit(CONNECTED, &vif->flags) && - !flushing[vif->fw_vif_idx]) { - spin_unlock_bh(&ar->list_lock); - netif_wake_queue(vif->ndev); - spin_lock_bh(&ar->list_lock); - } + if (test_bit(CONNECTED, &ar->flag)) { + if (!flushing) + netif_wake_queue(ar->net_dev); } - spin_unlock_bh(&ar->list_lock); if (wake_event) wake_up(&ar->event_wq); @@ -1081,9 +1041,8 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) struct ath6kl_sta *conn = NULL; struct sk_buff *skb1 = NULL; struct ethhdr *datap = NULL; - struct ath6kl_vif *vif; u16 seq_no, offset; - u8 tid, if_idx; + u8 tid; ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d", @@ -1091,23 +1050,7 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) packet->act_len, status); if (status || !(skb->data + HTC_HDR_LENGTH)) { - dev_kfree_skb(skb); - return; - } - - skb_put(skb, packet->act_len + HTC_HDR_LENGTH); - skb_pull(skb, HTC_HDR_LENGTH); - - if (ept == ar->ctrl_ep) { - if_idx = - wmi_cmd_hdr_get_if_idx((struct wmi_cmd_hdr *) skb->data); - } else { - if_idx = - wmi_data_hdr_get_if_idx((struct wmi_data_hdr *) skb->data); - } - - vif = ath6kl_get_vif_by_index(ar, if_idx); - if (!vif) { + ar->net_stats.rx_errors++; dev_kfree_skb(skb); return; } @@ -1116,28 +1059,28 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) * Take lock to protect buffer counts and adaptive power throughput * state. */ - spin_lock_bh(&vif->if_lock); + spin_lock_bh(&ar->lock); - vif->net_stats.rx_packets++; - vif->net_stats.rx_bytes += packet->act_len; + ar->net_stats.rx_packets++; + ar->net_stats.rx_bytes += packet->act_len; - spin_unlock_bh(&vif->if_lock); + spin_unlock_bh(&ar->lock); + skb_put(skb, packet->act_len + HTC_HDR_LENGTH); + skb_pull(skb, HTC_HDR_LENGTH); ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ", skb->data, skb->len); - skb->dev = vif->ndev; + skb->dev = ar->net_dev; if (!test_bit(WMI_ENABLED, &ar->flag)) { if (EPPING_ALIGNMENT_PAD > 0) skb_pull(skb, EPPING_ALIGNMENT_PAD); - ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb); + ath6kl_deliver_frames_to_nw_stack(ar->net_dev, skb); return; } - ath6kl_check_wow_status(ar); - if (ept == ar->ctrl_ep) { ath6kl_wmi_control_rx(ar->wmi, skb); return; @@ -1153,18 +1096,18 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) * that do not have LLC hdr. They are 16 bytes in size. * Allow these frames in the AP mode. */ - if (vif->nw_type != AP_NETWORK && + if (ar->nw_type != AP_NETWORK && ((packet->act_len < min_hdr_len) || (packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) { ath6kl_info("frame len is too short or too long\n"); - vif->net_stats.rx_errors++; - vif->net_stats.rx_length_errors++; + ar->net_stats.rx_errors++; + ar->net_stats.rx_length_errors++; dev_kfree_skb(skb); return; } /* Get the Power save state of the STA */ - if (vif->nw_type == AP_NETWORK) { + if (ar->nw_type == AP_NETWORK) { meta_type = wmi_data_hdr_get_meta(dhdr); ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) & @@ -1186,7 +1129,7 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) } datap = (struct ethhdr *) (skb->data + offset); - conn = ath6kl_find_sta(vif, datap->h_source); + conn = ath6kl_find_sta(ar, datap->h_source); if (!conn) { dev_kfree_skb(skb); @@ -1217,13 +1160,12 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) while ((skbuff = skb_dequeue(&conn->psq)) != NULL) { spin_unlock_bh(&conn->psq_lock); - ath6kl_data_tx(skbuff, vif->ndev); + ath6kl_data_tx(skbuff, ar->net_dev); spin_lock_bh(&conn->psq_lock); } spin_unlock_bh(&conn->psq_lock); /* Clear the PVB for this STA */ - ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, - conn->aid, 0); + ath6kl_wmi_set_pvb_cmd(ar->wmi, conn->aid, 0); } } @@ -1273,12 +1215,12 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) return; } - if (!(vif->ndev->flags & IFF_UP)) { + if (!(ar->net_dev->flags & IFF_UP)) { dev_kfree_skb(skb); return; } - if (vif->nw_type == AP_NETWORK) { + if (ar->nw_type == AP_NETWORK) { datap = (struct ethhdr *) skb->data; if (is_multicast_ether_addr(datap->h_dest)) /* @@ -1293,7 +1235,8 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) * frame to it on the air else send the * frame up the stack. */ - conn = ath6kl_find_sta(vif, datap->h_dest); + struct ath6kl_sta *conn = NULL; + conn = ath6kl_find_sta(ar, datap->h_dest); if (conn && ar->intra_bss) { skb1 = skb; @@ -1304,23 +1247,18 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) } } if (skb1) - ath6kl_data_tx(skb1, vif->ndev); - - if (skb == NULL) { - /* nothing to deliver up the stack */ - return; - } + ath6kl_data_tx(skb1, ar->net_dev); } datap = (struct ethhdr *) skb->data; if (is_unicast_ether_addr(datap->h_dest) && - aggr_process_recv_frm(vif->aggr_cntxt, tid, seq_no, + aggr_process_recv_frm(ar->aggr_cntxt, tid, seq_no, is_amsdu, skb)) /* aggregation code will handle the skb */ return; - ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb); + ath6kl_deliver_frames_to_nw_stack(ar->net_dev, skb); } static void aggr_timeout(unsigned long arg) @@ -1398,10 +1336,9 @@ static void aggr_delete_tid_state(struct aggr_info *p_aggr, u8 tid) memset(stats, 0, sizeof(struct rxtid_stats)); } -void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid, u16 seq_no, - u8 win_sz) +void aggr_recv_addba_req_evt(struct ath6kl *ar, u8 tid, u16 seq_no, u8 win_sz) { - struct aggr_info *p_aggr = vif->aggr_cntxt; + struct aggr_info *p_aggr = ar->aggr_cntxt; struct rxtid *rxtid; struct rxtid_stats *stats; u16 hold_q_size; @@ -1468,9 +1405,9 @@ struct aggr_info *aggr_init(struct net_device *dev) return p_aggr; } -void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid) +void aggr_recv_delba_req_evt(struct ath6kl *ar, u8 tid) { - struct aggr_info *p_aggr = vif->aggr_cntxt; + struct aggr_info *p_aggr = ar->aggr_cntxt; struct rxtid *rxtid; if (!p_aggr) diff --git a/trunk/drivers/net/wireless/ath/ath6kl/wmi.c b/trunk/drivers/net/wireless/ath/ath6kl/wmi.c index f6f2aa27fc20..a7de23cbd2c7 100644 --- a/trunk/drivers/net/wireless/ath/ath6kl/wmi.c +++ b/trunk/drivers/net/wireless/ath/ath6kl/wmi.c @@ -21,7 +21,7 @@ #include "../regd.h" #include "../regd_common.h" -static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx); +static int ath6kl_wmi_sync_point(struct wmi *wmi); static const s32 wmi_rate_tbl[][2] = { /* {W/O SGI, with SGI} */ @@ -81,26 +81,6 @@ enum htc_endpoint_id ath6kl_wmi_get_control_ep(struct wmi *wmi) return wmi->ep_id; } -struct ath6kl_vif *ath6kl_get_vif_by_index(struct ath6kl *ar, u8 if_idx) -{ - struct ath6kl_vif *vif, *found = NULL; - - if (WARN_ON(if_idx > (ar->vif_max - 1))) - return NULL; - - /* FIXME: Locking */ - spin_lock_bh(&ar->list_lock); - list_for_each_entry(vif, &ar->vif_list, list) { - if (vif->fw_vif_idx == if_idx) { - found = vif; - break; - } - } - spin_unlock_bh(&ar->list_lock); - - return found; -} - /* Performs DIX to 802.3 encapsulation for transmit packets. * Assumes the entire DIX header is contigous and that there is * enough room in the buffer for a 802.3 mac header and LLC+SNAP headers. @@ -182,12 +162,12 @@ static int ath6kl_wmi_meta_add(struct wmi *wmi, struct sk_buff *skb, int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb, u8 msg_type, bool more_data, enum wmi_data_hdr_data_type data_type, - u8 meta_ver, void *tx_meta_info, u8 if_idx) + u8 meta_ver, void *tx_meta_info) { struct wmi_data_hdr *data_hdr; int ret; - if (WARN_ON(skb == NULL || (if_idx > wmi->parent_dev->vif_max - 1))) + if (WARN_ON(skb == NULL)) return -EINVAL; if (tx_meta_info) { @@ -209,7 +189,7 @@ int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb, WMI_DATA_HDR_MORE_MASK << WMI_DATA_HDR_MORE_SHIFT; data_hdr->info2 = cpu_to_le16(meta_ver << WMI_DATA_HDR_META_SHIFT); - data_hdr->info3 = cpu_to_le16(if_idx & WMI_DATA_HDR_IF_IDX_MASK); + data_hdr->info3 = 0; return 0; } @@ -236,8 +216,7 @@ static u8 ath6kl_wmi_determine_user_priority(u8 *pkt, u32 layer2_pri) return ip_pri; } -int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, u8 if_idx, - struct sk_buff *skb, +int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, struct sk_buff *skb, u32 layer2_priority, bool wmm_enabled, u8 *ac) { @@ -283,12 +262,7 @@ int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, u8 if_idx, usr_pri = layer2_priority & 0x7; } - /* - * workaround for WMM S5 - * - * FIXME: wmi->traffic_class is always 100 so this test doesn't - * make sense - */ + /* workaround for WMM S5 */ if ((wmi->traffic_class == WMM_AC_VI) && ((usr_pri == 5) || (usr_pri == 4))) usr_pri = 1; @@ -310,7 +284,7 @@ int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, u8 if_idx, cpu_to_le32(WMI_IMPLICIT_PSTREAM_INACTIVITY_INT); /* Implicit streams are created with TSID 0xFF */ cmd.tsid = WMI_IMPLICIT_PSTREAM; - ath6kl_wmi_create_pstream_cmd(wmi, if_idx, &cmd); + ath6kl_wmi_create_pstream_cmd(wmi, &cmd); } *ac = traffic_class; @@ -436,14 +410,13 @@ static int ath6kl_wmi_tx_complete_event_rx(u8 *datap, int len) } static int ath6kl_wmi_remain_on_chnl_event_rx(struct wmi *wmi, u8 *datap, - int len, struct ath6kl_vif *vif) + int len) { struct wmi_remain_on_chnl_event *ev; u32 freq; u32 dur; struct ieee80211_channel *chan; struct ath6kl *ar = wmi->parent_dev; - u32 id; if (len < sizeof(*ev)) return -EINVAL; @@ -453,29 +426,26 @@ static int ath6kl_wmi_remain_on_chnl_event_rx(struct wmi *wmi, u8 *datap, dur = le32_to_cpu(ev->duration); ath6kl_dbg(ATH6KL_DBG_WMI, "remain_on_chnl: freq=%u dur=%u\n", freq, dur); - chan = ieee80211_get_channel(ar->wiphy, freq); + chan = ieee80211_get_channel(ar->wdev->wiphy, freq); if (!chan) { ath6kl_dbg(ATH6KL_DBG_WMI, "remain_on_chnl: Unknown channel " "(freq=%u)\n", freq); return -EINVAL; } - id = vif->last_roc_id; - cfg80211_ready_on_channel(vif->ndev, id, chan, NL80211_CHAN_NO_HT, + cfg80211_ready_on_channel(ar->net_dev, 1, chan, NL80211_CHAN_NO_HT, dur, GFP_ATOMIC); return 0; } static int ath6kl_wmi_cancel_remain_on_chnl_event_rx(struct wmi *wmi, - u8 *datap, int len, - struct ath6kl_vif *vif) + u8 *datap, int len) { struct wmi_cancel_remain_on_chnl_event *ev; u32 freq; u32 dur; struct ieee80211_channel *chan; struct ath6kl *ar = wmi->parent_dev; - u32 id; if (len < sizeof(*ev)) return -EINVAL; @@ -485,29 +455,23 @@ static int ath6kl_wmi_cancel_remain_on_chnl_event_rx(struct wmi *wmi, dur = le32_to_cpu(ev->duration); ath6kl_dbg(ATH6KL_DBG_WMI, "cancel_remain_on_chnl: freq=%u dur=%u " "status=%u\n", freq, dur, ev->status); - chan = ieee80211_get_channel(ar->wiphy, freq); + chan = ieee80211_get_channel(ar->wdev->wiphy, freq); if (!chan) { ath6kl_dbg(ATH6KL_DBG_WMI, "cancel_remain_on_chnl: Unknown " "channel (freq=%u)\n", freq); return -EINVAL; } - if (vif->last_cancel_roc_id && - vif->last_cancel_roc_id + 1 == vif->last_roc_id) - id = vif->last_cancel_roc_id; /* event for cancel command */ - else - id = vif->last_roc_id; /* timeout on uncanceled r-o-c */ - vif->last_cancel_roc_id = 0; - cfg80211_remain_on_channel_expired(vif->ndev, id, chan, + cfg80211_remain_on_channel_expired(ar->net_dev, 1, chan, NL80211_CHAN_NO_HT, GFP_ATOMIC); return 0; } -static int ath6kl_wmi_tx_status_event_rx(struct wmi *wmi, u8 *datap, int len, - struct ath6kl_vif *vif) +static int ath6kl_wmi_tx_status_event_rx(struct wmi *wmi, u8 *datap, int len) { struct wmi_tx_status_event *ev; u32 id; + struct ath6kl *ar = wmi->parent_dev; if (len < sizeof(*ev)) return -EINVAL; @@ -517,7 +481,7 @@ static int ath6kl_wmi_tx_status_event_rx(struct wmi *wmi, u8 *datap, int len, ath6kl_dbg(ATH6KL_DBG_WMI, "tx_status: id=%x ack_status=%u\n", id, ev->ack_status); if (wmi->last_mgmt_tx_frame) { - cfg80211_mgmt_tx_status(vif->ndev, id, + cfg80211_mgmt_tx_status(ar->net_dev, id, wmi->last_mgmt_tx_frame, wmi->last_mgmt_tx_frame_len, !!ev->ack_status, GFP_ATOMIC); @@ -529,12 +493,12 @@ static int ath6kl_wmi_tx_status_event_rx(struct wmi *wmi, u8 *datap, int len, return 0; } -static int ath6kl_wmi_rx_probe_req_event_rx(struct wmi *wmi, u8 *datap, int len, - struct ath6kl_vif *vif) +static int ath6kl_wmi_rx_probe_req_event_rx(struct wmi *wmi, u8 *datap, int len) { struct wmi_p2p_rx_probe_req_event *ev; u32 freq; u16 dlen; + struct ath6kl *ar = wmi->parent_dev; if (len < sizeof(*ev)) return -EINVAL; @@ -549,10 +513,10 @@ static int ath6kl_wmi_rx_probe_req_event_rx(struct wmi *wmi, u8 *datap, int len, } ath6kl_dbg(ATH6KL_DBG_WMI, "rx_probe_req: len=%u freq=%u " "probe_req_report=%d\n", - dlen, freq, vif->probe_req_report); + dlen, freq, ar->probe_req_report); - if (vif->probe_req_report || vif->nw_type == AP_NETWORK) - cfg80211_rx_mgmt(vif->ndev, freq, ev->data, dlen, GFP_ATOMIC); + if (ar->probe_req_report || ar->nw_type == AP_NETWORK) + cfg80211_rx_mgmt(ar->net_dev, freq, ev->data, dlen, GFP_ATOMIC); return 0; } @@ -572,12 +536,12 @@ static int ath6kl_wmi_p2p_capabilities_event_rx(u8 *datap, int len) return 0; } -static int ath6kl_wmi_rx_action_event_rx(struct wmi *wmi, u8 *datap, int len, - struct ath6kl_vif *vif) +static int ath6kl_wmi_rx_action_event_rx(struct wmi *wmi, u8 *datap, int len) { struct wmi_rx_action_event *ev; u32 freq; u16 dlen; + struct ath6kl *ar = wmi->parent_dev; if (len < sizeof(*ev)) return -EINVAL; @@ -591,7 +555,7 @@ static int ath6kl_wmi_rx_action_event_rx(struct wmi *wmi, u8 *datap, int len, return -EINVAL; } ath6kl_dbg(ATH6KL_DBG_WMI, "rx_action: len=%u freq=%u\n", dlen, freq); - cfg80211_rx_mgmt(vif->ndev, freq, ev->data, dlen, GFP_ATOMIC); + cfg80211_rx_mgmt(ar->net_dev, freq, ev->data, dlen, GFP_ATOMIC); return 0; } @@ -656,8 +620,7 @@ static inline struct sk_buff *ath6kl_wmi_get_new_buf(u32 size) } /* Send a "simple" wmi command -- one with no arguments */ -static int ath6kl_wmi_simple_cmd(struct wmi *wmi, u8 if_idx, - enum wmi_cmd_id cmd_id) +static int ath6kl_wmi_simple_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id) { struct sk_buff *skb; int ret; @@ -666,7 +629,7 @@ static int ath6kl_wmi_simple_cmd(struct wmi *wmi, u8 if_idx, if (!skb) return -ENOMEM; - ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, cmd_id, NO_SYNC_WMIFLAG); + ret = ath6kl_wmi_cmd_send(wmi, skb, cmd_id, NO_SYNC_WMIFLAG); return ret; } @@ -678,6 +641,7 @@ static int ath6kl_wmi_ready_event_rx(struct wmi *wmi, u8 *datap, int len) if (len < sizeof(struct wmi_ready_event_2)) return -EINVAL; + wmi->ready = true; ath6kl_ready_event(wmi->parent_dev, ev->mac_addr, le32_to_cpu(ev->sw_version), le32_to_cpu(ev->abi_version)); @@ -709,73 +673,32 @@ int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi) cmd->info.params.roam_rssi_floor = DEF_LRSSI_ROAM_FLOOR; cmd->roam_ctrl = WMI_SET_LRSSI_SCAN_PARAMS; - ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_ROAM_CTRL_CMDID, - NO_SYNC_WMIFLAG); + ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_ROAM_CTRL_CMDID, NO_SYNC_WMIFLAG); return 0; } -int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid) -{ - struct sk_buff *skb; - struct roam_ctrl_cmd *cmd; - - skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); - if (!skb) - return -ENOMEM; - - cmd = (struct roam_ctrl_cmd *) skb->data; - memset(cmd, 0, sizeof(*cmd)); - - memcpy(cmd->info.bssid, bssid, ETH_ALEN); - cmd->roam_ctrl = WMI_FORCE_ROAM; - - ath6kl_dbg(ATH6KL_DBG_WMI, "force roam to %pM\n", bssid); - return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_ROAM_CTRL_CMDID, - NO_SYNC_WMIFLAG); -} - -int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode) -{ - struct sk_buff *skb; - struct roam_ctrl_cmd *cmd; - - skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); - if (!skb) - return -ENOMEM; - - cmd = (struct roam_ctrl_cmd *) skb->data; - memset(cmd, 0, sizeof(*cmd)); - - cmd->info.roam_mode = mode; - cmd->roam_ctrl = WMI_SET_ROAM_MODE; - - ath6kl_dbg(ATH6KL_DBG_WMI, "set roam mode %d\n", mode); - return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_ROAM_CTRL_CMDID, - NO_SYNC_WMIFLAG); -} - -static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len, - struct ath6kl_vif *vif) +static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len) { struct wmi_connect_event *ev; u8 *pie, *peie; + struct ath6kl *ar = wmi->parent_dev; if (len < sizeof(struct wmi_connect_event)) return -EINVAL; ev = (struct wmi_connect_event *) datap; - if (vif->nw_type == AP_NETWORK) { + if (ar->nw_type == AP_NETWORK) { /* AP mode start/STA connected event */ - struct net_device *dev = vif->ndev; + struct net_device *dev = ar->net_dev; if (memcmp(dev->dev_addr, ev->u.ap_bss.bssid, ETH_ALEN) == 0) { ath6kl_dbg(ATH6KL_DBG_WMI, "%s: freq %d bssid %pM " "(AP started)\n", __func__, le16_to_cpu(ev->u.ap_bss.ch), ev->u.ap_bss.bssid); ath6kl_connect_ap_mode_bss( - vif, le16_to_cpu(ev->u.ap_bss.ch)); + ar, le16_to_cpu(ev->u.ap_bss.ch)); } else { ath6kl_dbg(ATH6KL_DBG_WMI, "%s: aid %u mac_addr %pM " "auth=%u keymgmt=%u cipher=%u apsd_info=%u " @@ -787,7 +710,7 @@ static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len, le16_to_cpu(ev->u.ap_sta.cipher), ev->u.ap_sta.apsd_info); ath6kl_connect_ap_mode_sta( - vif, ev->u.ap_sta.aid, ev->u.ap_sta.mac_addr, + ar, ev->u.ap_sta.aid, ev->u.ap_sta.mac_addr, ev->u.ap_sta.keymgmt, le16_to_cpu(ev->u.ap_sta.cipher), ev->u.ap_sta.auth, ev->assoc_req_len, @@ -832,7 +755,7 @@ static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len, pie += pie[1] + 2; } - ath6kl_connect_event(vif, le16_to_cpu(ev->u.sta.ch), + ath6kl_connect_event(wmi->parent_dev, le16_to_cpu(ev->u.sta.ch), ev->u.sta.bssid, le16_to_cpu(ev->u.sta.listen_intvl), le16_to_cpu(ev->u.sta.beacon_intvl), @@ -911,15 +834,14 @@ static void ath6kl_wmi_regdomain_event(struct wmi *wmi, u8 *datap, int len) alpha2[0] = country->isoName[0]; alpha2[1] = country->isoName[1]; - regulatory_hint(wmi->parent_dev->wiphy, alpha2); + regulatory_hint(wmi->parent_dev->wdev->wiphy, alpha2); ath6kl_dbg(ATH6KL_DBG_WMI, "Country alpha2 being used: %c%c\n", alpha2[0], alpha2[1]); } } -static int ath6kl_wmi_disconnect_event_rx(struct wmi *wmi, u8 *datap, int len, - struct ath6kl_vif *vif) +static int ath6kl_wmi_disconnect_event_rx(struct wmi *wmi, u8 *datap, int len) { struct wmi_disconnect_event *ev; wmi->traffic_class = 100; @@ -935,8 +857,10 @@ static int ath6kl_wmi_disconnect_event_rx(struct wmi *wmi, u8 *datap, int len, ev->disconn_reason, ev->assoc_resp_len); wmi->is_wmm_enabled = false; + wmi->pair_crypto_type = NONE_CRYPT; + wmi->grp_crypto_type = NONE_CRYPT; - ath6kl_disconnect_event(vif, ev->disconn_reason, + ath6kl_disconnect_event(wmi->parent_dev, ev->disconn_reason, ev->bssid, ev->assoc_resp_len, ev->assoc_info, le16_to_cpu(ev->proto_reason_status)); @@ -962,8 +886,7 @@ static int ath6kl_wmi_peer_node_event_rx(struct wmi *wmi, u8 *datap, int len) return 0; } -static int ath6kl_wmi_tkip_micerr_event_rx(struct wmi *wmi, u8 *datap, int len, - struct ath6kl_vif *vif) +static int ath6kl_wmi_tkip_micerr_event_rx(struct wmi *wmi, u8 *datap, int len) { struct wmi_tkip_micerr_event *ev; @@ -972,20 +895,12 @@ static int ath6kl_wmi_tkip_micerr_event_rx(struct wmi *wmi, u8 *datap, int len, ev = (struct wmi_tkip_micerr_event *) datap; - ath6kl_tkip_micerr_event(vif, ev->key_id, ev->is_mcast); + ath6kl_tkip_micerr_event(wmi->parent_dev, ev->key_id, ev->is_mcast); return 0; } -void ath6kl_wmi_sscan_timer(unsigned long ptr) -{ - struct ath6kl_vif *vif = (struct ath6kl_vif *) ptr; - - cfg80211_sched_scan_results(vif->ar->wiphy); -} - -static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len, - struct ath6kl_vif *vif) +static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len) { struct wmi_bss_info_hdr2 *bih; u8 *buf; @@ -1012,27 +927,26 @@ static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len, return 0; /* Only update BSS table for now */ if (bih->frame_type == BEACON_FTYPE && - test_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags)) { - clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags); - ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, - NONE_BSS_FILTER, 0); + test_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag)) { + clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag); + ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0); } - channel = ieee80211_get_channel(ar->wiphy, le16_to_cpu(bih->ch)); + channel = ieee80211_get_channel(ar->wdev->wiphy, le16_to_cpu(bih->ch)); if (channel == NULL) return -EINVAL; if (len < 8 + 2 + 2) return -EINVAL; - if (bih->frame_type == BEACON_FTYPE && test_bit(CONNECTED, &vif->flags) - && memcmp(bih->bssid, vif->bssid, ETH_ALEN) == 0) { + if (bih->frame_type == BEACON_FTYPE && test_bit(CONNECTED, &ar->flag) && + memcmp(bih->bssid, ar->bssid, ETH_ALEN) == 0) { const u8 *tim; tim = cfg80211_find_ie(WLAN_EID_TIM, buf + 8 + 2 + 2, len - 8 - 2 - 2); if (tim && tim[1] >= 2) { - vif->assoc_bss_dtim_period = tim[3]; - set_bit(DTIM_PERIOD_AVAIL, &vif->flags); + ar->assoc_bss_dtim_period = tim[3]; + set_bit(DTIM_PERIOD_AVAIL, &ar->flag); } } @@ -1052,7 +966,7 @@ static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len, IEEE80211_STYPE_BEACON); memset(mgmt->da, 0xff, ETH_ALEN); } else { - struct net_device *dev = vif->ndev; + struct net_device *dev = ar->net_dev; mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_RESP); @@ -1065,7 +979,7 @@ static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len, memcpy(&mgmt->u.beacon, buf, len); - bss = cfg80211_inform_bss_frame(ar->wiphy, channel, mgmt, + bss = cfg80211_inform_bss_frame(ar->wdev->wiphy, channel, mgmt, 24 + len, (bih->snr - 95) * 100, GFP_ATOMIC); kfree(mgmt); @@ -1073,21 +987,6 @@ static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len, return -ENOMEM; cfg80211_put_bss(bss); - /* - * Firmware doesn't return any event when scheduled scan has - * finished, so we need to use a timer to find out when there are - * no more results. - * - * The timer is started from the first bss info received, otherwise - * the timer would not ever fire if the scan interval is short - * enough. - */ - if (ar->state == ATH6KL_STATE_SCHED_SCAN && - !timer_pending(&vif->sched_scan_timer)) { - mod_timer(&vif->sched_scan_timer, jiffies + - msecs_to_jiffies(ATH6KL_SCHED_SCAN_RESULT_DELAY)); - } - return 0; } @@ -1195,21 +1094,20 @@ static int ath6kl_wmi_keepalive_reply_rx(struct wmi *wmi, u8 *datap, int len) return 0; } -static int ath6kl_wmi_scan_complete_rx(struct wmi *wmi, u8 *datap, int len, - struct ath6kl_vif *vif) +static int ath6kl_wmi_scan_complete_rx(struct wmi *wmi, u8 *datap, int len) { struct wmi_scan_complete_event *ev; ev = (struct wmi_scan_complete_event *) datap; - ath6kl_scan_complete_evt(vif, a_sle32_to_cpu(ev->status)); + ath6kl_scan_complete_evt(wmi->parent_dev, a_sle32_to_cpu(ev->status)); wmi->is_probe_ssid = false; return 0; } static int ath6kl_wmi_neighbor_report_event_rx(struct wmi *wmi, u8 *datap, - int len, struct ath6kl_vif *vif) + int len) { struct wmi_neighbor_report_event *ev; u8 i; @@ -1227,7 +1125,7 @@ static int ath6kl_wmi_neighbor_report_event_rx(struct wmi *wmi, u8 *datap, ath6kl_dbg(ATH6KL_DBG_WMI, "neighbor %d/%d - %pM 0x%x\n", i + 1, ev->num_neighbors, ev->neighbor[i].bssid, ev->neighbor[i].bss_flags); - cfg80211_pmksa_candidate_notify(vif->ndev, i, + cfg80211_pmksa_candidate_notify(wmi->parent_dev->net_dev, i, ev->neighbor[i].bssid, !!(ev->neighbor[i].bss_flags & WMI_PREAUTH_CAPABLE_BSS), @@ -1268,10 +1166,9 @@ static int ath6kl_wmi_error_event_rx(struct wmi *wmi, u8 *datap, int len) return 0; } -static int ath6kl_wmi_stats_event_rx(struct wmi *wmi, u8 *datap, int len, - struct ath6kl_vif *vif) +static int ath6kl_wmi_stats_event_rx(struct wmi *wmi, u8 *datap, int len) { - ath6kl_tgt_stats_event(vif, datap, len); + ath6kl_tgt_stats_event(wmi->parent_dev, datap, len); return 0; } @@ -1325,7 +1222,7 @@ static int ath6kl_wmi_send_rssi_threshold_params(struct wmi *wmi, cmd = (struct wmi_rssi_threshold_params_cmd *) skb->data; memcpy(cmd, rssi_cmd, sizeof(struct wmi_rssi_threshold_params_cmd)); - return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_RSSI_THRESHOLD_PARAMS_CMDID, + return ath6kl_wmi_cmd_send(wmi, skb, WMI_RSSI_THRESHOLD_PARAMS_CMDID, NO_SYNC_WMIFLAG); } @@ -1425,8 +1322,7 @@ static int ath6kl_wmi_rssi_threshold_event_rx(struct wmi *wmi, u8 *datap, return 0; } -static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len, - struct ath6kl_vif *vif) +static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len) { struct wmi_cac_event *reply; struct ieee80211_tspec_ie *ts; @@ -1447,8 +1343,7 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len, tsid = (tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) & IEEE80211_WMM_IE_TSPEC_TID_MASK; - ath6kl_wmi_delete_pstream_cmd(wmi, vif->fw_vif_idx, - reply->ac, tsid); + ath6kl_wmi_delete_pstream_cmd(wmi, reply->ac, tsid); } else if (reply->cac_indication == CAC_INDICATION_NO_RESP) { /* * Following assumes that there is only one outstanding @@ -1463,8 +1358,7 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len, break; } if (index < (sizeof(active_tsids) * 8)) - ath6kl_wmi_delete_pstream_cmd(wmi, vif->fw_vif_idx, - reply->ac, index); + ath6kl_wmi_delete_pstream_cmd(wmi, reply->ac, index); } /* @@ -1509,7 +1403,7 @@ static int ath6kl_wmi_send_snr_threshold_params(struct wmi *wmi, cmd = (struct wmi_snr_threshold_params_cmd *) skb->data; memcpy(cmd, snr_cmd, sizeof(struct wmi_snr_threshold_params_cmd)); - return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SNR_THRESHOLD_PARAMS_CMDID, + return ath6kl_wmi_cmd_send(wmi, skb, WMI_SNR_THRESHOLD_PARAMS_CMDID, NO_SYNC_WMIFLAG); } @@ -1634,15 +1528,14 @@ static int ath6kl_wmi_aplist_event_rx(struct wmi *wmi, u8 *datap, int len) return 0; } -int ath6kl_wmi_cmd_send(struct wmi *wmi, u8 if_idx, struct sk_buff *skb, +int ath6kl_wmi_cmd_send(struct wmi *wmi, struct sk_buff *skb, enum wmi_cmd_id cmd_id, enum wmi_sync_flag sync_flag) { struct wmi_cmd_hdr *cmd_hdr; enum htc_endpoint_id ep_id = wmi->ep_id; int ret; - u16 info1; - if (WARN_ON(skb == NULL || (if_idx > (wmi->parent_dev->vif_max - 1)))) + if (WARN_ON(skb == NULL)) return -EINVAL; ath6kl_dbg(ATH6KL_DBG_WMI, "wmi tx id %d len %d flag %d\n", @@ -1661,20 +1554,19 @@ int ath6kl_wmi_cmd_send(struct wmi *wmi, u8 if_idx, struct sk_buff *skb, * Make sure all data currently queued is transmitted before * the cmd execution. Establish a new sync point. */ - ath6kl_wmi_sync_point(wmi, if_idx); + ath6kl_wmi_sync_point(wmi); } skb_push(skb, sizeof(struct wmi_cmd_hdr)); cmd_hdr = (struct wmi_cmd_hdr *) skb->data; cmd_hdr->cmd_id = cpu_to_le16(cmd_id); - info1 = if_idx & WMI_CMD_HDR_IF_ID_MASK; - cmd_hdr->info1 = cpu_to_le16(info1); + cmd_hdr->info1 = 0; /* added for virtual interface */ /* Only for OPT_TX_CMD, use BE endpoint. */ if (cmd_id == WMI_OPT_TX_FRAME_CMDID) { ret = ath6kl_wmi_data_hdr_add(wmi, skb, OPT_MSGTYPE, - false, false, 0, NULL, if_idx); + false, false, 0, NULL); if (ret) { dev_kfree_skb(skb); return ret; @@ -1690,22 +1582,20 @@ int ath6kl_wmi_cmd_send(struct wmi *wmi, u8 if_idx, struct sk_buff *skb, * Make sure all new data queued waits for the command to * execute. Establish a new sync point. */ - ath6kl_wmi_sync_point(wmi, if_idx); + ath6kl_wmi_sync_point(wmi); } return 0; } -int ath6kl_wmi_connect_cmd(struct wmi *wmi, u8 if_idx, - enum network_type nw_type, +int ath6kl_wmi_connect_cmd(struct wmi *wmi, enum network_type nw_type, enum dot11_auth_mode dot11_auth_mode, enum auth_mode auth_mode, enum crypto_type pairwise_crypto, u8 pairwise_crypto_len, enum crypto_type group_crypto, u8 group_crypto_len, int ssid_len, u8 *ssid, - u8 *bssid, u16 channel, u32 ctrl_flags, - u8 nw_subtype) + u8 *bssid, u16 channel, u32 ctrl_flags) { struct sk_buff *skb; struct wmi_connect_cmd *cc; @@ -1745,19 +1635,19 @@ int ath6kl_wmi_connect_cmd(struct wmi *wmi, u8 if_idx, cc->grp_crypto_len = group_crypto_len; cc->ch = cpu_to_le16(channel); cc->ctrl_flags = cpu_to_le32(ctrl_flags); - cc->nw_subtype = nw_subtype; if (bssid != NULL) memcpy(cc->bssid, bssid, ETH_ALEN); - ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_CONNECT_CMDID, - NO_SYNC_WMIFLAG); + wmi->pair_crypto_type = pairwise_crypto; + wmi->grp_crypto_type = group_crypto; + + ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_CONNECT_CMDID, NO_SYNC_WMIFLAG); return ret; } -int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 if_idx, u8 *bssid, - u16 channel) +int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 *bssid, u16 channel) { struct sk_buff *skb; struct wmi_reconnect_cmd *cc; @@ -1778,13 +1668,13 @@ int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 if_idx, u8 *bssid, if (bssid != NULL) memcpy(cc->bssid, bssid, ETH_ALEN); - ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_RECONNECT_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_RECONNECT_CMDID, NO_SYNC_WMIFLAG); return ret; } -int ath6kl_wmi_disconnect_cmd(struct wmi *wmi, u8 if_idx) +int ath6kl_wmi_disconnect_cmd(struct wmi *wmi) { int ret; @@ -1793,79 +1683,12 @@ int ath6kl_wmi_disconnect_cmd(struct wmi *wmi, u8 if_idx) wmi->traffic_class = 100; /* Disconnect command does not need to do a SYNC before. */ - ret = ath6kl_wmi_simple_cmd(wmi, if_idx, WMI_DISCONNECT_CMDID); - - return ret; -} - -int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx, - enum wmi_scan_type scan_type, - u32 force_fgscan, u32 is_legacy, - u32 home_dwell_time, u32 force_scan_interval, - s8 num_chan, u16 *ch_list, u32 no_cck, u32 *rates) -{ - struct sk_buff *skb; - struct wmi_begin_scan_cmd *sc; - s8 size; - int i, band, ret; - struct ath6kl *ar = wmi->parent_dev; - int num_rates; - - size = sizeof(struct wmi_begin_scan_cmd); - - if ((scan_type != WMI_LONG_SCAN) && (scan_type != WMI_SHORT_SCAN)) - return -EINVAL; - - if (num_chan > WMI_MAX_CHANNELS) - return -EINVAL; - - if (num_chan) - size += sizeof(u16) * (num_chan - 1); - - skb = ath6kl_wmi_get_new_buf(size); - if (!skb) - return -ENOMEM; - - sc = (struct wmi_begin_scan_cmd *) skb->data; - sc->scan_type = scan_type; - sc->force_fg_scan = cpu_to_le32(force_fgscan); - sc->is_legacy = cpu_to_le32(is_legacy); - sc->home_dwell_time = cpu_to_le32(home_dwell_time); - sc->force_scan_intvl = cpu_to_le32(force_scan_interval); - sc->no_cck = cpu_to_le32(no_cck); - sc->num_ch = num_chan; - - for (band = 0; band < IEEE80211_NUM_BANDS; band++) { - struct ieee80211_supported_band *sband = - ar->wiphy->bands[band]; - u32 ratemask = rates[band]; - u8 *supp_rates = sc->supp_rates[band].rates; - num_rates = 0; - - for (i = 0; i < sband->n_bitrates; i++) { - if ((BIT(i) & ratemask) == 0) - continue; /* skip rate */ - supp_rates[num_rates++] = - (u8) (sband->bitrates[i].bitrate / 5); - } - sc->supp_rates[band].nrates = num_rates; - } - - for (i = 0; i < num_chan; i++) - sc->ch_list[i] = cpu_to_le16(ch_list[i]); - - ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_BEGIN_SCAN_CMDID, - NO_SYNC_WMIFLAG); + ret = ath6kl_wmi_simple_cmd(wmi, WMI_DISCONNECT_CMDID); return ret; } -/* ath6kl_wmi_start_scan_cmd is to be deprecated. Use - * ath6kl_wmi_begin_scan_cmd instead. The new function supports P2P - * mgmt operations using station interface. - */ -int ath6kl_wmi_startscan_cmd(struct wmi *wmi, u8 if_idx, - enum wmi_scan_type scan_type, +int ath6kl_wmi_startscan_cmd(struct wmi *wmi, enum wmi_scan_type scan_type, u32 force_fgscan, u32 is_legacy, u32 home_dwell_time, u32 force_scan_interval, s8 num_chan, u16 *ch_list) @@ -1901,14 +1724,13 @@ int ath6kl_wmi_startscan_cmd(struct wmi *wmi, u8 if_idx, for (i = 0; i < num_chan; i++) sc->ch_list[i] = cpu_to_le16(ch_list[i]); - ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_START_SCAN_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_START_SCAN_CMDID, NO_SYNC_WMIFLAG); return ret; } -int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u8 if_idx, - u16 fg_start_sec, +int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u16 fg_start_sec, u16 fg_end_sec, u16 bg_sec, u16 minact_chdw_msec, u16 maxact_chdw_msec, u16 pas_chdw_msec, u8 short_scan_ratio, @@ -1935,12 +1757,12 @@ int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u8 if_idx, sc->max_dfsch_act_time = cpu_to_le32(max_dfsch_act_time); sc->maxact_scan_per_ssid = cpu_to_le16(maxact_scan_per_ssid); - ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_SCAN_PARAMS_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_SCAN_PARAMS_CMDID, NO_SYNC_WMIFLAG); return ret; } -int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 if_idx, u8 filter, u32 ie_mask) +int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 filter, u32 ie_mask) { struct sk_buff *skb; struct wmi_bss_filter_cmd *cmd; @@ -1957,12 +1779,12 @@ int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 if_idx, u8 filter, u32 ie_mask) cmd->bss_filter = filter; cmd->ie_mask = cpu_to_le32(ie_mask); - ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_BSS_FILTER_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_BSS_FILTER_CMDID, NO_SYNC_WMIFLAG); return ret; } -int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 if_idx, u8 index, u8 flag, +int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 index, u8 flag, u8 ssid_len, u8 *ssid) { struct sk_buff *skb; @@ -1994,13 +1816,12 @@ int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 if_idx, u8 index, u8 flag, cmd->ssid_len = ssid_len; memcpy(cmd->ssid, ssid, ssid_len); - ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_PROBED_SSID_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_PROBED_SSID_CMDID, NO_SYNC_WMIFLAG); return ret; } -int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u8 if_idx, - u16 listen_interval, +int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u16 listen_interval, u16 listen_beacons) { struct sk_buff *skb; @@ -2015,12 +1836,12 @@ int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u8 if_idx, cmd->listen_intvl = cpu_to_le16(listen_interval); cmd->num_beacons = cpu_to_le16(listen_beacons); - ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_LISTEN_INT_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_LISTEN_INT_CMDID, NO_SYNC_WMIFLAG); return ret; } -int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 if_idx, u8 pwr_mode) +int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 pwr_mode) { struct sk_buff *skb; struct wmi_power_mode_cmd *cmd; @@ -2034,12 +1855,12 @@ int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 if_idx, u8 pwr_mode) cmd->pwr_mode = pwr_mode; wmi->pwr_mode = pwr_mode; - ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_POWER_MODE_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_POWER_MODE_CMDID, NO_SYNC_WMIFLAG); return ret; } -int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u8 if_idx, u16 idle_period, +int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u16 idle_period, u16 ps_poll_num, u16 dtim_policy, u16 tx_wakeup_policy, u16 num_tx_to_wakeup, u16 ps_fail_event_policy) @@ -2060,12 +1881,12 @@ int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u8 if_idx, u16 idle_period, pm->num_tx_to_wakeup = cpu_to_le16(num_tx_to_wakeup); pm->ps_fail_event_policy = cpu_to_le16(ps_fail_event_policy); - ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_POWER_PARAMS_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_POWER_PARAMS_CMDID, NO_SYNC_WMIFLAG); return ret; } -int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 if_idx, u8 timeout) +int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 timeout) { struct sk_buff *skb; struct wmi_disc_timeout_cmd *cmd; @@ -2078,20 +1899,15 @@ int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 if_idx, u8 timeout) cmd = (struct wmi_disc_timeout_cmd *) skb->data; cmd->discon_timeout = timeout; - ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_DISC_TIMEOUT_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_DISC_TIMEOUT_CMDID, NO_SYNC_WMIFLAG); - - if (ret == 0) - ath6kl_debug_set_disconnect_timeout(wmi->parent_dev, timeout); - return ret; } -int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index, +int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 key_index, enum crypto_type key_type, u8 key_usage, u8 key_len, - u8 *key_rsc, unsigned int key_rsc_len, - u8 *key_material, + u8 *key_rsc, u8 *key_material, u8 key_op_ctrl, u8 *mac_addr, enum wmi_sync_flag sync_flag) { @@ -2104,7 +1920,7 @@ int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index, key_index, key_type, key_usage, key_len, key_op_ctrl); if ((key_index > WMI_MAX_KEY_INDEX) || (key_len > WMI_MAX_KEY_LEN) || - (key_material == NULL) || key_rsc_len > 8) + (key_material == NULL)) return -EINVAL; if ((WEP_CRYPT != key_type) && (NULL == key_rsc)) @@ -2122,20 +1938,20 @@ int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index, memcpy(cmd->key, key_material, key_len); if (key_rsc != NULL) - memcpy(cmd->key_rsc, key_rsc, key_rsc_len); + memcpy(cmd->key_rsc, key_rsc, sizeof(cmd->key_rsc)); cmd->key_op_ctrl = key_op_ctrl; if (mac_addr) memcpy(cmd->key_mac_addr, mac_addr, ETH_ALEN); - ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_ADD_CIPHER_KEY_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_ADD_CIPHER_KEY_CMDID, sync_flag); return ret; } -int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, u8 *krk) +int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 *krk) { struct sk_buff *skb; struct wmi_add_krk_cmd *cmd; @@ -2148,13 +1964,12 @@ int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, u8 *krk) cmd = (struct wmi_add_krk_cmd *) skb->data; memcpy(cmd->krk, krk, WMI_KRK_LEN); - ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_ADD_KRK_CMDID, - NO_SYNC_WMIFLAG); + ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_ADD_KRK_CMDID, NO_SYNC_WMIFLAG); return ret; } -int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index) +int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 key_index) { struct sk_buff *skb; struct wmi_delete_cipher_key_cmd *cmd; @@ -2170,13 +1985,13 @@ int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index) cmd = (struct wmi_delete_cipher_key_cmd *) skb->data; cmd->key_index = key_index; - ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_DELETE_CIPHER_KEY_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_DELETE_CIPHER_KEY_CMDID, NO_SYNC_WMIFLAG); return ret; } -int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, u8 if_idx, const u8 *bssid, +int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, const u8 *bssid, const u8 *pmkid, bool set) { struct sk_buff *skb; @@ -2203,14 +2018,14 @@ int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, u8 if_idx, const u8 *bssid, cmd->enable = PMKID_DISABLE; } - ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_PMKID_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_PMKID_CMDID, NO_SYNC_WMIFLAG); return ret; } static int ath6kl_wmi_data_sync_send(struct wmi *wmi, struct sk_buff *skb, - enum htc_endpoint_id ep_id, u8 if_idx) + enum htc_endpoint_id ep_id) { struct wmi_data_hdr *data_hdr; int ret; @@ -2222,14 +2037,14 @@ static int ath6kl_wmi_data_sync_send(struct wmi *wmi, struct sk_buff *skb, data_hdr = (struct wmi_data_hdr *) skb->data; data_hdr->info = SYNC_MSGTYPE << WMI_DATA_HDR_MSG_TYPE_SHIFT; - data_hdr->info3 = cpu_to_le16(if_idx & WMI_DATA_HDR_IF_IDX_MASK); + data_hdr->info3 = 0; ret = ath6kl_control_tx(wmi->parent_dev, skb, ep_id); return ret; } -static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx) +static int ath6kl_wmi_sync_point(struct wmi *wmi) { struct sk_buff *skb; struct wmi_sync_cmd *cmd; @@ -2285,7 +2100,7 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx) * Send sync cmd followed by sync data messages on all * endpoints being used */ - ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SYNCHRONIZE_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SYNCHRONIZE_CMDID, NO_SYNC_WMIFLAG); if (ret) @@ -2304,7 +2119,7 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx) traffic_class); ret = ath6kl_wmi_data_sync_send(wmi, data_sync_bufs[index].skb, - ep_id, if_idx); + ep_id); if (ret) break; @@ -2327,7 +2142,7 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx) return ret; } -int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi, u8 if_idx, +int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi, struct wmi_create_pstream_cmd *params) { struct sk_buff *skb; @@ -2416,13 +2231,12 @@ int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi, u8 if_idx, ath6kl_indicate_tx_activity(wmi->parent_dev, params->traffic_class, true); - ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_CREATE_PSTREAM_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_CREATE_PSTREAM_CMDID, NO_SYNC_WMIFLAG); return ret; } -int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class, - u8 tsid) +int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 traffic_class, u8 tsid) { struct sk_buff *skb; struct wmi_delete_pstream_cmd *cmd; @@ -2458,7 +2272,7 @@ int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class, "sending delete_pstream_cmd: traffic class: %d tsid=%d\n", traffic_class, tsid); - ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_DELETE_PSTREAM_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_DELETE_PSTREAM_CMDID, SYNC_BEFORE_WMIFLAG); spin_lock_bh(&wmi->lock); @@ -2497,173 +2311,17 @@ int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, struct wmi_set_ip_cmd *ip_cmd) cmd = (struct wmi_set_ip_cmd *) skb->data; memcpy(cmd, ip_cmd, sizeof(struct wmi_set_ip_cmd)); - ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_IP_CMDID, - NO_SYNC_WMIFLAG); + ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_IP_CMDID, NO_SYNC_WMIFLAG); return ret; } -static void ath6kl_wmi_relinquish_implicit_pstream_credits(struct wmi *wmi) +static int ath6kl_wmi_get_wow_list_event_rx(struct wmi *wmi, u8 * datap, + int len) { - u16 active_tsids; - u8 stream_exist; - int i; - - /* - * Relinquish credits from all implicitly created pstreams - * since when we go to sleep. If user created explicit - * thinstreams exists with in a fatpipe leave them intact - * for the user to delete. - */ - spin_lock_bh(&wmi->lock); - stream_exist = wmi->fat_pipe_exist; - spin_unlock_bh(&wmi->lock); - - for (i = 0; i < WMM_NUM_AC; i++) { - if (stream_exist & (1 << i)) { - - /* - * FIXME: Is this lock & unlock inside - * for loop correct? may need rework. - */ - spin_lock_bh(&wmi->lock); - active_tsids = wmi->stream_exist_for_ac[i]; - spin_unlock_bh(&wmi->lock); - - /* - * If there are no user created thin streams - * delete the fatpipe - */ - if (!active_tsids) { - stream_exist &= ~(1 << i); - /* - * Indicate inactivity to driver layer for - * this fatpipe (pstream) - */ - ath6kl_indicate_tx_activity(wmi->parent_dev, - i, false); - } - } - } - - /* FIXME: Can we do this assignment without locking ? */ - spin_lock_bh(&wmi->lock); - wmi->fat_pipe_exist = stream_exist; - spin_unlock_bh(&wmi->lock); -} - -int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx, - enum ath6kl_host_mode host_mode) -{ - struct sk_buff *skb; - struct wmi_set_host_sleep_mode_cmd *cmd; - int ret; - - if ((host_mode != ATH6KL_HOST_MODE_ASLEEP) && - (host_mode != ATH6KL_HOST_MODE_AWAKE)) { - ath6kl_err("invalid host sleep mode: %d\n", host_mode); + if (len < sizeof(struct wmi_get_wow_list_reply)) return -EINVAL; - } - - skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); - if (!skb) - return -ENOMEM; - - cmd = (struct wmi_set_host_sleep_mode_cmd *) skb->data; - - if (host_mode == ATH6KL_HOST_MODE_ASLEEP) { - ath6kl_wmi_relinquish_implicit_pstream_credits(wmi); - cmd->asleep = cpu_to_le32(1); - } else - cmd->awake = cpu_to_le32(1); - - ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, - WMI_SET_HOST_SLEEP_MODE_CMDID, - NO_SYNC_WMIFLAG); - return ret; -} - -int ath6kl_wmi_set_wow_mode_cmd(struct wmi *wmi, u8 if_idx, - enum ath6kl_wow_mode wow_mode, - u32 filter, u16 host_req_delay) -{ - struct sk_buff *skb; - struct wmi_set_wow_mode_cmd *cmd; - int ret; - - if ((wow_mode != ATH6KL_WOW_MODE_ENABLE) && - wow_mode != ATH6KL_WOW_MODE_DISABLE) { - ath6kl_err("invalid wow mode: %d\n", wow_mode); - return -EINVAL; - } - - skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); - if (!skb) - return -ENOMEM; - cmd = (struct wmi_set_wow_mode_cmd *) skb->data; - cmd->enable_wow = cpu_to_le32(wow_mode); - cmd->filter = cpu_to_le32(filter); - cmd->host_req_delay = cpu_to_le16(host_req_delay); - - ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_WOW_MODE_CMDID, - NO_SYNC_WMIFLAG); - return ret; -} - -int ath6kl_wmi_add_wow_pattern_cmd(struct wmi *wmi, u8 if_idx, - u8 list_id, u8 filter_size, - u8 filter_offset, u8 *filter, u8 *mask) -{ - struct sk_buff *skb; - struct wmi_add_wow_pattern_cmd *cmd; - u16 size; - u8 *filter_mask; - int ret; - - /* - * Allocate additional memory in the buffer to hold - * filter and mask value, which is twice of filter_size. - */ - size = sizeof(*cmd) + (2 * filter_size); - - skb = ath6kl_wmi_get_new_buf(size); - if (!skb) - return -ENOMEM; - - cmd = (struct wmi_add_wow_pattern_cmd *) skb->data; - cmd->filter_list_id = list_id; - cmd->filter_size = filter_size; - cmd->filter_offset = filter_offset; - - memcpy(cmd->filter, filter, filter_size); - - filter_mask = (u8 *) (cmd->filter + filter_size); - memcpy(filter_mask, mask, filter_size); - - ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_ADD_WOW_PATTERN_CMDID, - NO_SYNC_WMIFLAG); - - return ret; -} - -int ath6kl_wmi_del_wow_pattern_cmd(struct wmi *wmi, u8 if_idx, - u16 list_id, u16 filter_id) -{ - struct sk_buff *skb; - struct wmi_del_wow_pattern_cmd *cmd; - int ret; - - skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); - if (!skb) - return -ENOMEM; - - cmd = (struct wmi_del_wow_pattern_cmd *) skb->data; - cmd->filter_list_id = cpu_to_le16(list_id); - cmd->filter_id = cpu_to_le16(filter_id); - - ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_DEL_WOW_PATTERN_CMDID, - NO_SYNC_WMIFLAG); - return ret; + return 0; } static int ath6kl_wmi_cmd_send_xtnd(struct wmi *wmi, struct sk_buff *skb, @@ -2678,7 +2336,7 @@ static int ath6kl_wmi_cmd_send_xtnd(struct wmi *wmi, struct sk_buff *skb, cmd_hdr = (struct wmix_cmd_hdr *) skb->data; cmd_hdr->cmd_id = cpu_to_le32(cmd_id); - ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_EXTENSION_CMDID, sync_flag); + ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_EXTENSION_CMDID, sync_flag); return ret; } @@ -2721,12 +2379,12 @@ int ath6kl_wmi_config_debug_module_cmd(struct wmi *wmi, u32 valid, u32 config) return ret; } -int ath6kl_wmi_get_stats_cmd(struct wmi *wmi, u8 if_idx) +int ath6kl_wmi_get_stats_cmd(struct wmi *wmi) { - return ath6kl_wmi_simple_cmd(wmi, if_idx, WMI_GET_STATISTICS_CMDID); + return ath6kl_wmi_simple_cmd(wmi, WMI_GET_STATISTICS_CMDID); } -int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 if_idx, u8 dbM) +int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 dbM) { struct sk_buff *skb; struct wmi_set_tx_pwr_cmd *cmd; @@ -2739,24 +2397,18 @@ int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 if_idx, u8 dbM) cmd = (struct wmi_set_tx_pwr_cmd *) skb->data; cmd->dbM = dbM; - ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_TX_PWR_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_TX_PWR_CMDID, NO_SYNC_WMIFLAG); return ret; } -int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi, u8 if_idx) -{ - return ath6kl_wmi_simple_cmd(wmi, if_idx, WMI_GET_TX_PWR_CMDID); -} - -int ath6kl_wmi_get_roam_tbl_cmd(struct wmi *wmi) +int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi) { - return ath6kl_wmi_simple_cmd(wmi, 0, WMI_GET_ROAM_TBL_CMDID); + return ath6kl_wmi_simple_cmd(wmi, WMI_GET_TX_PWR_CMDID); } -int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 if_idx, u8 status, - u8 preamble_policy) +int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 status, u8 preamble_policy) { struct sk_buff *skb; struct wmi_set_lpreamble_cmd *cmd; @@ -2770,7 +2422,7 @@ int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 if_idx, u8 status, cmd->status = status; cmd->preamble_policy = preamble_policy; - ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_LPREAMBLE_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_LPREAMBLE_CMDID, NO_SYNC_WMIFLAG); return ret; } @@ -2788,12 +2440,11 @@ int ath6kl_wmi_set_rts_cmd(struct wmi *wmi, u16 threshold) cmd = (struct wmi_set_rts_cmd *) skb->data; cmd->threshold = cpu_to_le16(threshold); - ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_RTS_CMDID, - NO_SYNC_WMIFLAG); + ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_RTS_CMDID, NO_SYNC_WMIFLAG); return ret; } -int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, u8 if_idx, enum wmi_txop_cfg cfg) +int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, enum wmi_txop_cfg cfg) { struct sk_buff *skb; struct wmi_set_wmm_txop_cmd *cmd; @@ -2809,13 +2460,12 @@ int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, u8 if_idx, enum wmi_txop_cfg cfg) cmd = (struct wmi_set_wmm_txop_cmd *) skb->data; cmd->txop_enable = cfg; - ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_WMM_TXOP_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_WMM_TXOP_CMDID, NO_SYNC_WMIFLAG); return ret; } -int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx, - u8 keep_alive_intvl) +int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 keep_alive_intvl) { struct sk_buff *skb; struct wmi_set_keepalive_cmd *cmd; @@ -2827,13 +2477,10 @@ int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx, cmd = (struct wmi_set_keepalive_cmd *) skb->data; cmd->keep_alive_intvl = keep_alive_intvl; + wmi->keep_alive_intvl = keep_alive_intvl; - ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_KEEPALIVE_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_KEEPALIVE_CMDID, NO_SYNC_WMIFLAG); - - if (ret == 0) - ath6kl_debug_set_keepalive(wmi->parent_dev, keep_alive_intvl); - return ret; } @@ -2848,7 +2495,7 @@ int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len) memcpy(skb->data, buf, len); - ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_TEST_CMDID, NO_SYNC_WMIFLAG); + ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_TEST_CMDID, NO_SYNC_WMIFLAG); return ret; } @@ -2881,31 +2528,28 @@ static int ath6kl_wmi_get_pmkid_list_event_rx(struct wmi *wmi, u8 *datap, return 0; } -static int ath6kl_wmi_addba_req_event_rx(struct wmi *wmi, u8 *datap, int len, - struct ath6kl_vif *vif) +static int ath6kl_wmi_addba_req_event_rx(struct wmi *wmi, u8 *datap, int len) { struct wmi_addba_req_event *cmd = (struct wmi_addba_req_event *) datap; - aggr_recv_addba_req_evt(vif, cmd->tid, + aggr_recv_addba_req_evt(wmi->parent_dev, cmd->tid, le16_to_cpu(cmd->st_seq_no), cmd->win_sz); return 0; } -static int ath6kl_wmi_delba_req_event_rx(struct wmi *wmi, u8 *datap, int len, - struct ath6kl_vif *vif) +static int ath6kl_wmi_delba_req_event_rx(struct wmi *wmi, u8 *datap, int len) { struct wmi_delba_event *cmd = (struct wmi_delba_event *) datap; - aggr_recv_delba_req_evt(vif, cmd->tid); + aggr_recv_delba_req_evt(wmi->parent_dev, cmd->tid); return 0; } /* AP mode functions */ -int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, u8 if_idx, - struct wmi_connect_cmd *p) +int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, struct wmi_connect_cmd *p) { struct sk_buff *skb; struct wmi_connect_cmd *cm; @@ -2918,7 +2562,7 @@ int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, u8 if_idx, cm = (struct wmi_connect_cmd *) skb->data; memcpy(cm, p, sizeof(*cm)); - res = ath6kl_wmi_cmd_send(wmip, if_idx, skb, WMI_AP_CONFIG_COMMIT_CMDID, + res = ath6kl_wmi_cmd_send(wmip, skb, WMI_AP_CONFIG_COMMIT_CMDID, NO_SYNC_WMIFLAG); ath6kl_dbg(ATH6KL_DBG_WMI, "%s: nw_type=%u auth_mode=%u ch=%u " "ctrl_flags=0x%x-> res=%d\n", @@ -2927,8 +2571,7 @@ int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, u8 if_idx, return res; } -int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 if_idx, u8 cmd, const u8 *mac, - u16 reason) +int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 cmd, const u8 *mac, u16 reason) { struct sk_buff *skb; struct wmi_ap_set_mlme_cmd *cm; @@ -2942,12 +2585,11 @@ int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 if_idx, u8 cmd, const u8 *mac, cm->reason = cpu_to_le16(reason); cm->cmd = cmd; - return ath6kl_wmi_cmd_send(wmip, if_idx, skb, WMI_AP_SET_MLME_CMDID, + return ath6kl_wmi_cmd_send(wmip, skb, WMI_AP_SET_MLME_CMDID, NO_SYNC_WMIFLAG); } -static int ath6kl_wmi_pspoll_event_rx(struct wmi *wmi, u8 *datap, int len, - struct ath6kl_vif *vif) +static int ath6kl_wmi_pspoll_event_rx(struct wmi *wmi, u8 *datap, int len) { struct wmi_pspoll_event *ev; @@ -2956,21 +2598,19 @@ static int ath6kl_wmi_pspoll_event_rx(struct wmi *wmi, u8 *datap, int len, ev = (struct wmi_pspoll_event *) datap; - ath6kl_pspoll_event(vif, le16_to_cpu(ev->aid)); + ath6kl_pspoll_event(wmi->parent_dev, le16_to_cpu(ev->aid)); return 0; } -static int ath6kl_wmi_dtimexpiry_event_rx(struct wmi *wmi, u8 *datap, int len, - struct ath6kl_vif *vif) +static int ath6kl_wmi_dtimexpiry_event_rx(struct wmi *wmi, u8 *datap, int len) { - ath6kl_dtimexpiry_event(vif); + ath6kl_dtimexpiry_event(wmi->parent_dev); return 0; } -int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u8 if_idx, u16 aid, - bool flag) +int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u16 aid, bool flag) { struct sk_buff *skb; struct wmi_ap_set_pvb_cmd *cmd; @@ -2985,14 +2625,13 @@ int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u8 if_idx, u16 aid, cmd->rsvd = cpu_to_le16(0); cmd->flag = cpu_to_le32(flag); - ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_AP_SET_PVB_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_AP_SET_PVB_CMDID, NO_SYNC_WMIFLAG); return 0; } -int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 if_idx, - u8 rx_meta_ver, +int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 rx_meta_ver, bool rx_dot11_hdr, bool defrag_on_host) { struct sk_buff *skb; @@ -3009,14 +2648,14 @@ int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 if_idx, cmd->meta_ver = rx_meta_ver; /* Delete the local aggr state, on host */ - ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_RX_FRAME_FORMAT_CMDID, + ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_RX_FRAME_FORMAT_CMDID, NO_SYNC_WMIFLAG); return ret; } -int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type, - const u8 *ie, u8 ie_len) +int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 mgmt_frm_type, const u8 *ie, + u8 ie_len) { struct sk_buff *skb; struct wmi_set_appie_cmd *p; @@ -3030,11 +2669,8 @@ int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type, p = (struct wmi_set_appie_cmd *) skb->data; p->mgmt_frm_type = mgmt_frm_type; p->ie_len = ie_len; - - if (ie != NULL && ie_len > 0) - memcpy(p->ie_info, ie, ie_len); - - return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_APPIE_CMDID, + memcpy(p->ie_info, ie, ie_len); + return ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_APPIE_CMDID, NO_SYNC_WMIFLAG); } @@ -3052,11 +2688,11 @@ int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable) cmd = (struct wmi_disable_11b_rates_cmd *) skb->data; cmd->disable = disable ? 1 : 0; - return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_DISABLE_11B_RATES_CMDID, + return ath6kl_wmi_cmd_send(wmi, skb, WMI_DISABLE_11B_RATES_CMDID, NO_SYNC_WMIFLAG); } -int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx, u32 freq, u32 dur) +int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u32 freq, u32 dur) { struct sk_buff *skb; struct wmi_remain_on_chnl_cmd *p; @@ -3070,16 +2706,12 @@ int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx, u32 freq, u32 dur) p = (struct wmi_remain_on_chnl_cmd *) skb->data; p->freq = cpu_to_le32(freq); p->duration = cpu_to_le32(dur); - return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_REMAIN_ON_CHNL_CMDID, + return ath6kl_wmi_cmd_send(wmi, skb, WMI_REMAIN_ON_CHNL_CMDID, NO_SYNC_WMIFLAG); } -/* ath6kl_wmi_send_action_cmd is to be deprecated. Use - * ath6kl_wmi_send_mgmt_cmd instead. The new function supports P2P - * mgmt operations using station interface. - */ -int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq, - u32 wait, const u8 *data, u16 data_len) +int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u32 id, u32 freq, u32 wait, + const u8 *data, u16 data_len) { struct sk_buff *skb; struct wmi_send_action_cmd *p; @@ -3099,7 +2731,6 @@ int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq, } kfree(wmi->last_mgmt_tx_frame); - memcpy(buf, data, data_len); wmi->last_mgmt_tx_frame = buf; wmi->last_mgmt_tx_frame_len = data_len; @@ -3111,61 +2742,18 @@ int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq, p->wait = cpu_to_le32(wait); p->len = cpu_to_le16(data_len); memcpy(p->data, data, data_len); - return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SEND_ACTION_CMDID, + return ath6kl_wmi_cmd_send(wmi, skb, WMI_SEND_ACTION_CMDID, NO_SYNC_WMIFLAG); } -int ath6kl_wmi_send_mgmt_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq, - u32 wait, const u8 *data, u16 data_len, - u32 no_cck) -{ - struct sk_buff *skb; - struct wmi_send_mgmt_cmd *p; - u8 *buf; - - if (wait) - return -EINVAL; /* Offload for wait not supported */ - - buf = kmalloc(data_len, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - skb = ath6kl_wmi_get_new_buf(sizeof(*p) + data_len); - if (!skb) { - kfree(buf); - return -ENOMEM; - } - - kfree(wmi->last_mgmt_tx_frame); - memcpy(buf, data, data_len); - wmi->last_mgmt_tx_frame = buf; - wmi->last_mgmt_tx_frame_len = data_len; - - ath6kl_dbg(ATH6KL_DBG_WMI, "send_action_cmd: id=%u freq=%u wait=%u " - "len=%u\n", id, freq, wait, data_len); - p = (struct wmi_send_mgmt_cmd *) skb->data; - p->id = cpu_to_le32(id); - p->freq = cpu_to_le32(freq); - p->wait = cpu_to_le32(wait); - p->no_cck = cpu_to_le32(no_cck); - p->len = cpu_to_le16(data_len); - memcpy(p->data, data, data_len); - return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SEND_MGMT_CMDID, - NO_SYNC_WMIFLAG); -} - -int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u8 if_idx, u32 freq, - const u8 *dst, const u8 *data, - u16 data_len) +int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u32 freq, + const u8 *dst, + const u8 *data, u16 data_len) { struct sk_buff *skb; struct wmi_p2p_probe_response_cmd *p; - size_t cmd_len = sizeof(*p) + data_len; - if (data_len == 0) - cmd_len++; /* work around target minimum length requirement */ - - skb = ath6kl_wmi_get_new_buf(cmd_len); + skb = ath6kl_wmi_get_new_buf(sizeof(*p) + data_len); if (!skb) return -ENOMEM; @@ -3176,12 +2764,11 @@ int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u8 if_idx, u32 freq, memcpy(p->destination_addr, dst, ETH_ALEN); p->len = cpu_to_le16(data_len); memcpy(p->data, data, data_len); - return ath6kl_wmi_cmd_send(wmi, if_idx, skb, - WMI_SEND_PROBE_RESPONSE_CMDID, + return ath6kl_wmi_cmd_send(wmi, skb, WMI_SEND_PROBE_RESPONSE_CMDID, NO_SYNC_WMIFLAG); } -int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, u8 if_idx, bool enable) +int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, bool enable) { struct sk_buff *skb; struct wmi_probe_req_report_cmd *p; @@ -3194,11 +2781,11 @@ int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, u8 if_idx, bool enable) enable); p = (struct wmi_probe_req_report_cmd *) skb->data; p->enable = enable ? 1 : 0; - return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_PROBE_REQ_REPORT_CMDID, + return ath6kl_wmi_cmd_send(wmi, skb, WMI_PROBE_REQ_REPORT_CMDID, NO_SYNC_WMIFLAG); } -int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u8 if_idx, u32 info_req_flags) +int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u32 info_req_flags) { struct sk_buff *skb; struct wmi_get_p2p_info *p; @@ -3211,15 +2798,14 @@ int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u8 if_idx, u32 info_req_flags) info_req_flags); p = (struct wmi_get_p2p_info *) skb->data; p->info_req_flags = cpu_to_le32(info_req_flags); - return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_GET_P2P_INFO_CMDID, + return ath6kl_wmi_cmd_send(wmi, skb, WMI_GET_P2P_INFO_CMDID, NO_SYNC_WMIFLAG); } -int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx) +int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi) { ath6kl_dbg(ATH6KL_DBG_WMI, "cancel_remain_on_chnl_cmd\n"); - return ath6kl_wmi_simple_cmd(wmi, if_idx, - WMI_CANCEL_REMAIN_ON_CHNL_CMDID); + return ath6kl_wmi_simple_cmd(wmi, WMI_CANCEL_REMAIN_ON_CHNL_CMDID); } static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb) @@ -3232,6 +2818,7 @@ static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb) if (skb->len < sizeof(struct wmix_cmd_hdr)) { ath6kl_err("bad packet 1\n"); + wmi->stat.cmd_len_err++; return -EINVAL; } @@ -3253,6 +2840,7 @@ static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb) break; default: ath6kl_warn("unknown cmd id 0x%x\n", id); + wmi->stat.cmd_id_err++; ret = -EINVAL; break; } @@ -3260,19 +2848,12 @@ static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb) return ret; } -static int ath6kl_wmi_roam_tbl_event_rx(struct wmi *wmi, u8 *datap, int len) -{ - return ath6kl_debug_roam_tbl_event(wmi->parent_dev, datap, len); -} - /* Control Path */ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb) { struct wmi_cmd_hdr *cmd; - struct ath6kl_vif *vif; u32 len; u16 id; - u8 if_idx; u8 *datap; int ret = 0; @@ -3282,12 +2863,12 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb) if (skb->len < sizeof(struct wmi_cmd_hdr)) { ath6kl_err("bad packet 1\n"); dev_kfree_skb(skb); + wmi->stat.cmd_len_err++; return -EINVAL; } cmd = (struct wmi_cmd_hdr *) skb->data; id = le16_to_cpu(cmd->cmd_id); - if_idx = le16_to_cpu(cmd->info1) & WMI_CMD_HDR_IF_ID_MASK; skb_pull(skb, sizeof(struct wmi_cmd_hdr)); @@ -3298,15 +2879,6 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb) ath6kl_dbg_dump(ATH6KL_DBG_WMI_DUMP, NULL, "wmi rx ", datap, len); - vif = ath6kl_get_vif_by_index(wmi->parent_dev, if_idx); - if (!vif) { - ath6kl_dbg(ATH6KL_DBG_WMI, - "Wmi event for unavailable vif, vif_index:%d\n", - if_idx); - dev_kfree_skb(skb); - return -EINVAL; - } - switch (id) { case WMI_GET_BITRATE_CMDID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_BITRATE_CMDID\n"); @@ -3326,11 +2898,11 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb) break; case WMI_CONNECT_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CONNECT_EVENTID\n"); - ret = ath6kl_wmi_connect_event_rx(wmi, datap, len, vif); + ret = ath6kl_wmi_connect_event_rx(wmi, datap, len); break; case WMI_DISCONNECT_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DISCONNECT_EVENTID\n"); - ret = ath6kl_wmi_disconnect_event_rx(wmi, datap, len, vif); + ret = ath6kl_wmi_disconnect_event_rx(wmi, datap, len); break; case WMI_PEER_NODE_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PEER_NODE_EVENTID\n"); @@ -3338,11 +2910,11 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb) break; case WMI_TKIP_MICERR_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TKIP_MICERR_EVENTID\n"); - ret = ath6kl_wmi_tkip_micerr_event_rx(wmi, datap, len, vif); + ret = ath6kl_wmi_tkip_micerr_event_rx(wmi, datap, len); break; case WMI_BSSINFO_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_BSSINFO_EVENTID\n"); - ret = ath6kl_wmi_bssinfo_event_rx(wmi, datap, len, vif); + ret = ath6kl_wmi_bssinfo_event_rx(wmi, datap, len); break; case WMI_REGDOMAIN_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REGDOMAIN_EVENTID\n"); @@ -3354,12 +2926,11 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb) break; case WMI_NEIGHBOR_REPORT_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_NEIGHBOR_REPORT_EVENTID\n"); - ret = ath6kl_wmi_neighbor_report_event_rx(wmi, datap, len, - vif); + ret = ath6kl_wmi_neighbor_report_event_rx(wmi, datap, len); break; case WMI_SCAN_COMPLETE_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SCAN_COMPLETE_EVENTID\n"); - ret = ath6kl_wmi_scan_complete_rx(wmi, datap, len, vif); + ret = ath6kl_wmi_scan_complete_rx(wmi, datap, len); break; case WMI_CMDERROR_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CMDERROR_EVENTID\n"); @@ -3367,7 +2938,7 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb) break; case WMI_REPORT_STATISTICS_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REPORT_STATISTICS_EVENTID\n"); - ret = ath6kl_wmi_stats_event_rx(wmi, datap, len, vif); + ret = ath6kl_wmi_stats_event_rx(wmi, datap, len); break; case WMI_RSSI_THRESHOLD_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RSSI_THRESHOLD_EVENTID\n"); @@ -3382,7 +2953,6 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb) break; case WMI_REPORT_ROAM_TBL_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REPORT_ROAM_TBL_EVENTID\n"); - ret = ath6kl_wmi_roam_tbl_event_rx(wmi, datap, len); break; case WMI_EXTENSION_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_EXTENSION_EVENTID\n"); @@ -3390,7 +2960,7 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb) break; case WMI_CAC_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CAC_EVENTID\n"); - ret = ath6kl_wmi_cac_event_rx(wmi, datap, len, vif); + ret = ath6kl_wmi_cac_event_rx(wmi, datap, len); break; case WMI_CHANNEL_CHANGE_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CHANNEL_CHANGE_EVENTID\n"); @@ -3426,6 +2996,7 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb) break; case WMI_GET_WOW_LIST_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_WOW_LIST_EVENTID\n"); + ret = ath6kl_wmi_get_wow_list_event_rx(wmi, datap, len); break; case WMI_GET_PMKID_LIST_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_PMKID_LIST_EVENTID\n"); @@ -3433,25 +3004,25 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb) break; case WMI_PSPOLL_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PSPOLL_EVENTID\n"); - ret = ath6kl_wmi_pspoll_event_rx(wmi, datap, len, vif); + ret = ath6kl_wmi_pspoll_event_rx(wmi, datap, len); break; case WMI_DTIMEXPIRY_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DTIMEXPIRY_EVENTID\n"); - ret = ath6kl_wmi_dtimexpiry_event_rx(wmi, datap, len, vif); + ret = ath6kl_wmi_dtimexpiry_event_rx(wmi, datap, len); break; case WMI_SET_PARAMS_REPLY_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SET_PARAMS_REPLY_EVENTID\n"); break; case WMI_ADDBA_REQ_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ADDBA_REQ_EVENTID\n"); - ret = ath6kl_wmi_addba_req_event_rx(wmi, datap, len, vif); + ret = ath6kl_wmi_addba_req_event_rx(wmi, datap, len); break; case WMI_ADDBA_RESP_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ADDBA_RESP_EVENTID\n"); break; case WMI_DELBA_REQ_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DELBA_REQ_EVENTID\n"); - ret = ath6kl_wmi_delba_req_event_rx(wmi, datap, len, vif); + ret = ath6kl_wmi_delba_req_event_rx(wmi, datap, len); break; case WMI_REPORT_BTCOEX_CONFIG_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, @@ -3467,21 +3038,21 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb) break; case WMI_REMAIN_ON_CHNL_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REMAIN_ON_CHNL_EVENTID\n"); - ret = ath6kl_wmi_remain_on_chnl_event_rx(wmi, datap, len, vif); + ret = ath6kl_wmi_remain_on_chnl_event_rx(wmi, datap, len); break; case WMI_CANCEL_REMAIN_ON_CHNL_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CANCEL_REMAIN_ON_CHNL_EVENTID\n"); ret = ath6kl_wmi_cancel_remain_on_chnl_event_rx(wmi, datap, - len, vif); + len); break; case WMI_TX_STATUS_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TX_STATUS_EVENTID\n"); - ret = ath6kl_wmi_tx_status_event_rx(wmi, datap, len, vif); + ret = ath6kl_wmi_tx_status_event_rx(wmi, datap, len); break; case WMI_RX_PROBE_REQ_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RX_PROBE_REQ_EVENTID\n"); - ret = ath6kl_wmi_rx_probe_req_event_rx(wmi, datap, len, vif); + ret = ath6kl_wmi_rx_probe_req_event_rx(wmi, datap, len); break; case WMI_P2P_CAPABILITIES_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_P2P_CAPABILITIES_EVENTID\n"); @@ -3489,7 +3060,7 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb) break; case WMI_RX_ACTION_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RX_ACTION_EVENTID\n"); - ret = ath6kl_wmi_rx_action_event_rx(wmi, datap, len, vif); + ret = ath6kl_wmi_rx_action_event_rx(wmi, datap, len); break; case WMI_P2P_INFO_EVENTID: ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_P2P_INFO_EVENTID\n"); @@ -3497,6 +3068,7 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb) break; default: ath6kl_dbg(ATH6KL_DBG_WMI, "unknown cmd id 0x%x\n", id); + wmi->stat.cmd_id_err++; ret = -EINVAL; break; } @@ -3506,8 +3078,11 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb) return ret; } -void ath6kl_wmi_reset(struct wmi *wmi) +static void ath6kl_wmi_qos_state_init(struct wmi *wmi) { + if (!wmi) + return; + spin_lock_bh(&wmi->lock); wmi->fat_pipe_exist = 0; @@ -3528,9 +3103,16 @@ void *ath6kl_wmi_init(struct ath6kl *dev) wmi->parent_dev = dev; + ath6kl_wmi_qos_state_init(wmi); + wmi->pwr_mode = REC_POWER; + wmi->phy_mode = WMI_11G_MODE; + + wmi->pair_crypto_type = NONE_CRYPT; + wmi->grp_crypto_type = NONE_CRYPT; - ath6kl_wmi_reset(wmi); + wmi->ht_allowed[A_BAND_24GHZ] = 1; + wmi->ht_allowed[A_BAND_5GHZ] = 1; return wmi; } diff --git a/trunk/drivers/net/wireless/ath/ath6kl/wmi.h b/trunk/drivers/net/wireless/ath/ath6kl/wmi.h index 42ac311eda4e..f8e644d54aa7 100644 --- a/trunk/drivers/net/wireless/ath/ath6kl/wmi.h +++ b/trunk/drivers/net/wireless/ath/ath6kl/wmi.h @@ -93,6 +93,11 @@ struct sq_threshold_params { u8 last_rssi_poll_event; }; +struct wmi_stats { + u32 cmd_len_err; + u32 cmd_id_err; +}; + struct wmi_data_sync_bufs { u8 traffic_class; struct sk_buff *skb; @@ -106,26 +111,32 @@ struct wmi_data_sync_bufs { #define WMM_AC_VO 3 /* voice */ struct wmi { + bool ready; u16 stream_exist_for_ac[WMM_NUM_AC]; u8 fat_pipe_exist; struct ath6kl *parent_dev; + struct wmi_stats stat; u8 pwr_mode; + u8 phy_mode; + u8 keep_alive_intvl; spinlock_t lock; enum htc_endpoint_id ep_id; struct sq_threshold_params sq_threshld[SIGNAL_QUALITY_METRICS_NUM_MAX]; + enum crypto_type pair_crypto_type; + enum crypto_type grp_crypto_type; bool is_wmm_enabled; + u8 ht_allowed[A_NUM_BANDS]; u8 traffic_class; bool is_probe_ssid; u8 *last_mgmt_tx_frame; size_t last_mgmt_tx_frame_len; - u8 saved_pwr_mode; }; struct host_app_area { - __le32 wmi_protocol_ver; -} __packed; + u32 wmi_protocol_ver; +}; enum wmi_msg_type { DATA_MSGTYPE = 0x0, @@ -173,8 +184,6 @@ enum wmi_data_hdr_data_type { #define WMI_DATA_HDR_META_MASK 0x7 #define WMI_DATA_HDR_META_SHIFT 13 -#define WMI_DATA_HDR_IF_IDX_MASK 0xF - struct wmi_data_hdr { s8 rssi; @@ -199,12 +208,6 @@ struct wmi_data_hdr { * b15:b13 - META_DATA_VERSION 0 - 7 */ __le16 info2; - - /* - * usage of info3, 16-bit: - * b3:b0 - Interface index - * b15:b4 - Reserved - */ __le16 info3; } __packed; @@ -247,11 +250,6 @@ static inline u8 wmi_data_hdr_get_meta(struct wmi_data_hdr *dhdr) WMI_DATA_HDR_META_MASK; } -static inline u8 wmi_data_hdr_get_if_idx(struct wmi_data_hdr *dhdr) -{ - return le16_to_cpu(dhdr->info3) & WMI_DATA_HDR_IF_IDX_MASK; -} - /* Tx meta version definitions */ #define WMI_MAX_TX_META_SZ 12 #define WMI_META_VERSION_1 0x01 @@ -301,8 +299,6 @@ struct wmi_rx_meta_v2 { u8 csum_flags; } __packed; -#define WMI_CMD_HDR_IF_ID_MASK 0xF - /* Control Path */ struct wmi_cmd_hdr { __le16 cmd_id; @@ -316,11 +312,6 @@ struct wmi_cmd_hdr { __le16 reserved; } __packed; -static inline u8 wmi_cmd_hdr_get_if_idx(struct wmi_cmd_hdr *chdr) -{ - return le16_to_cpu(chdr->info1) & WMI_CMD_HDR_IF_ID_MASK; -} - /* List of WMI commands */ enum wmi_cmd_id { WMI_CONNECT_CMDID = 0x0001, @@ -329,10 +320,6 @@ enum wmi_cmd_id { WMI_SYNCHRONIZE_CMDID, WMI_CREATE_PSTREAM_CMDID, WMI_DELETE_PSTREAM_CMDID, - /* WMI_START_SCAN_CMDID is to be deprecated. Use - * WMI_BEGIN_SCAN_CMDID instead. The new cmd supports P2P mgmt - * operations using station interface. - */ WMI_START_SCAN_CMDID, WMI_SET_SCAN_PARAMS_CMDID, WMI_SET_BSS_FILTER_CMDID, @@ -546,61 +533,12 @@ enum wmi_cmd_id { WMI_GTK_OFFLOAD_OP_CMDID, WMI_REMAIN_ON_CHNL_CMDID, WMI_CANCEL_REMAIN_ON_CHNL_CMDID, - /* WMI_SEND_ACTION_CMDID is to be deprecated. Use - * WMI_SEND_MGMT_CMDID instead. The new cmd supports P2P mgmt - * operations using station interface. - */ WMI_SEND_ACTION_CMDID, WMI_PROBE_REQ_REPORT_CMDID, WMI_DISABLE_11B_RATES_CMDID, WMI_SEND_PROBE_RESPONSE_CMDID, WMI_GET_P2P_INFO_CMDID, WMI_AP_JOIN_BSS_CMDID, - - WMI_SMPS_ENABLE_CMDID, - WMI_SMPS_CONFIG_CMDID, - WMI_SET_RATECTRL_PARM_CMDID, - /* LPL specific commands*/ - WMI_LPL_FORCE_ENABLE_CMDID, - WMI_LPL_SET_POLICY_CMDID, - WMI_LPL_GET_POLICY_CMDID, - WMI_LPL_GET_HWSTATE_CMDID, - WMI_LPL_SET_PARAMS_CMDID, - WMI_LPL_GET_PARAMS_CMDID, - - WMI_SET_BUNDLE_PARAM_CMDID, - - /*GreenTx specific commands*/ - - WMI_GREENTX_PARAMS_CMDID, - - WMI_RTT_MEASREQ_CMDID, - WMI_RTT_CAPREQ_CMDID, - WMI_RTT_STATUSREQ_CMDID, - - /* WPS Commands */ - WMI_WPS_START_CMDID, - WMI_GET_WPS_STATUS_CMDID, - - /* More P2P commands */ - WMI_SET_NOA_CMDID, - WMI_GET_NOA_CMDID, - WMI_SET_OPPPS_CMDID, - WMI_GET_OPPPS_CMDID, - WMI_ADD_PORT_CMDID, - WMI_DEL_PORT_CMDID, - - /* 802.11w cmd */ - WMI_SET_RSN_CAP_CMDID, - WMI_GET_RSN_CAP_CMDID, - WMI_SET_IGTK_CMDID, - - WMI_RX_FILTER_COALESCE_FILTER_OP_CMDID, - WMI_RX_FILTER_SET_FRAME_TEST_LIST_CMDID, - - WMI_SEND_MGMT_CMDID, - WMI_BEGIN_SCAN_CMDID, - }; enum wmi_mgmt_frame_type { @@ -620,14 +558,6 @@ enum network_type { AP_NETWORK = 0x10, }; -enum network_subtype { - SUBTYPE_NONE, - SUBTYPE_BT, - SUBTYPE_P2PDEV, - SUBTYPE_P2PCLIENT, - SUBTYPE_P2PGO, -}; - enum dot11_auth_mode { OPEN_AUTH = 0x01, SHARED_AUTH = 0x02, @@ -646,6 +576,9 @@ enum auth_mode { WPA2_AUTH_CCKM = 0x40, }; +#define WMI_MIN_CRYPTO_TYPE NONE_CRYPT +#define WMI_MAX_CRYPTO_TYPE (AES_CRYPT + 1) + #define WMI_MIN_KEY_INDEX 0 #define WMI_MAX_KEY_INDEX 3 @@ -684,7 +617,6 @@ enum wmi_connect_ctrl_flags_bits { CONNECT_CSA_FOLLOW_BSS = 0x0020, CONNECT_DO_WPA_OFFLOAD = 0x0040, CONNECT_DO_NOT_DEAUTH = 0x0080, - CONNECT_WPS_FLAG = 0x0100, }; struct wmi_connect_cmd { @@ -700,7 +632,6 @@ struct wmi_connect_cmd { __le16 ch; u8 bssid[ETH_ALEN]; __le32 ctrl_flags; - u8 nw_subtype; } __packed; /* WMI_RECONNECT_CMDID */ @@ -788,43 +719,6 @@ enum wmi_scan_type { WMI_SHORT_SCAN = 1, }; -struct wmi_supp_rates { - u8 nrates; - u8 rates[ATH6KL_RATE_MAXSIZE]; -}; - -struct wmi_begin_scan_cmd { - __le32 force_fg_scan; - - /* for legacy cisco AP compatibility */ - __le32 is_legacy; - - /* max duration in the home channel(msec) */ - __le32 home_dwell_time; - - /* time interval between scans (msec) */ - __le32 force_scan_intvl; - - /* no CCK rates */ - __le32 no_cck; - - /* enum wmi_scan_type */ - u8 scan_type; - - /* Supported rates to advertise in the probe request frames */ - struct wmi_supp_rates supp_rates[IEEE80211_NUM_BANDS]; - - /* how many channels follow */ - u8 num_ch; - - /* channels in Mhz */ - __le16 ch_list[1]; -} __packed; - -/* wmi_start_scan_cmd is to be deprecated. Use - * wmi_begin_scan_cmd instead. The new structure supports P2P mgmt - * operations using station interface. - */ struct wmi_start_scan_cmd { __le32 force_fg_scan; @@ -847,6 +741,9 @@ struct wmi_start_scan_cmd { __le16 ch_list[1]; } __packed; +/* WMI_SET_SCAN_PARAMS_CMDID */ +#define WMI_SHORTSCANRATIO_DEFAULT 3 + /* * Warning: scan control flag value of 0xFF is used to disable * all flags in WMI_SCAN_PARAMS_CMD. Do not add any more @@ -879,6 +776,13 @@ enum wmi_scan_ctrl_flags_bits { ENABLE_SCAN_ABORT_EVENT = 0x40 }; +#define DEFAULT_SCAN_CTRL_FLAGS \ + (CONNECT_SCAN_CTRL_FLAGS | \ + SCAN_CONNECTED_CTRL_FLAGS | \ + ACTIVE_SCAN_CTRL_FLAGS | \ + ROAM_SCAN_CTRL_FLAGS | \ + ENABLE_AUTO_CTRL_FLAGS) + struct wmi_scan_params_cmd { /* sec */ __le16 fg_start_period; @@ -1461,20 +1365,14 @@ enum wmi_roam_ctrl { WMI_SET_LRSSI_SCAN_PARAMS, }; -enum wmi_roam_mode { - WMI_DEFAULT_ROAM_MODE = 1, /* RSSI based roam */ - WMI_HOST_BIAS_ROAM_MODE = 2, /* Host bias based roam */ - WMI_LOCK_BSS_MODE = 3, /* Lock to the current BSS */ -}; - struct bss_bias { u8 bssid[ETH_ALEN]; - s8 bias; + u8 bias; } __packed; struct bss_bias_info { u8 num_bss; - struct bss_bias bss_bias[0]; + struct bss_bias bss_bias[1]; } __packed; struct low_rssi_scan_params { @@ -1487,11 +1385,10 @@ struct low_rssi_scan_params { struct roam_ctrl_cmd { union { - u8 bssid[ETH_ALEN]; /* WMI_FORCE_ROAM */ - u8 roam_mode; /* WMI_SET_ROAM_MODE */ - struct bss_bias_info bss; /* WMI_SET_HOST_BIAS */ - struct low_rssi_scan_params params; /* WMI_SET_LRSSI_SCAN_PARAMS - */ + u8 bssid[ETH_ALEN]; + u8 roam_mode; + struct bss_bias_info bss; + struct low_rssi_scan_params params; } __packed info; u8 roam_ctrl; } __packed; @@ -1558,10 +1455,6 @@ struct wmi_tkip_micerr_event { u8 is_mcast; } __packed; -enum wmi_scan_status { - WMI_SCAN_STATUS_SUCCESS = 0, -}; - /* WMI_SCAN_COMPLETE_EVENTID */ struct wmi_scan_complete_event { a_sle32 status; @@ -1742,12 +1635,6 @@ struct wmi_bss_roam_info { u8 reserved; } __packed; -struct wmi_target_roam_tbl { - __le16 roam_mode; - __le16 num_entries; - struct wmi_bss_roam_info info[]; -} __packed; - /* WMI_CAC_EVENTID */ enum cac_indication { CAC_INDICATION_ADMISSION = 0x00, @@ -1884,6 +1771,7 @@ struct wmi_set_appie_cmd { #define WSC_REG_ACTIVE 1 #define WSC_REG_INACTIVE 0 +#define WOW_MAX_FILTER_LISTS 1 #define WOW_MAX_FILTERS_PER_LIST 4 #define WOW_PATTERN_SIZE 64 #define WOW_MASK_SIZE 64 @@ -1906,52 +1794,17 @@ struct wmi_set_ip_cmd { __le32 ips[MAX_IP_ADDRS]; } __packed; -enum ath6kl_wow_filters { - WOW_FILTER_SSID = BIT(1), - WOW_FILTER_OPTION_MAGIC_PACKET = BIT(2), - WOW_FILTER_OPTION_EAP_REQ = BIT(3), - WOW_FILTER_OPTION_PATTERNS = BIT(4), - WOW_FILTER_OPTION_OFFLOAD_ARP = BIT(5), - WOW_FILTER_OPTION_OFFLOAD_NS = BIT(6), - WOW_FILTER_OPTION_OFFLOAD_GTK = BIT(7), - WOW_FILTER_OPTION_8021X_4WAYHS = BIT(8), - WOW_FILTER_OPTION_NLO_DISCVRY = BIT(9), - WOW_FILTER_OPTION_NWK_DISASSOC = BIT(10), - WOW_FILTER_OPTION_GTK_ERROR = BIT(11), - WOW_FILTER_OPTION_TEST_MODE = BIT(15), -}; - -enum ath6kl_host_mode { - ATH6KL_HOST_MODE_AWAKE, - ATH6KL_HOST_MODE_ASLEEP, -}; - -struct wmi_set_host_sleep_mode_cmd { - __le32 awake; - __le32 asleep; -} __packed; - -enum ath6kl_wow_mode { - ATH6KL_WOW_MODE_DISABLE, - ATH6KL_WOW_MODE_ENABLE, -}; - -struct wmi_set_wow_mode_cmd { - __le32 enable_wow; - __le32 filter; - __le16 host_req_delay; -} __packed; +/* WMI_GET_WOW_LIST_CMD reply */ +struct wmi_get_wow_list_reply { + /* number of patterns in reply */ + u8 num_filters; -struct wmi_add_wow_pattern_cmd { - u8 filter_list_id; - u8 filter_size; - u8 filter_offset; - u8 filter[0]; -} __packed; + /* this is filter # x of total num_filters */ + u8 this_filter_num; -struct wmi_del_wow_pattern_cmd { - __le16 filter_list_id; - __le16 filter_id; + u8 wow_mode; + u8 host_mode; + struct wow_filter wow_filters[1]; } __packed; /* WMI_SET_AKMP_PARAMS_CMD */ @@ -2052,7 +1905,7 @@ struct wmi_tx_complete_event { * !!! Warning !!! * -Changing the following values needs compilation of both driver and firmware */ -#define AP_MAX_NUM_STA 10 +#define AP_MAX_NUM_STA 8 /* Spl. AID used to set DTIM flag in the beacons */ #define MCAST_AID 0xFF @@ -2135,10 +1988,6 @@ struct wmi_remain_on_chnl_cmd { __le32 duration; } __packed; -/* wmi_send_action_cmd is to be deprecated. Use - * wmi_send_mgmt_cmd instead. The new structure supports P2P mgmt - * operations using station interface. - */ struct wmi_send_action_cmd { __le32 id; __le32 freq; @@ -2147,15 +1996,6 @@ struct wmi_send_action_cmd { u8 data[0]; } __packed; -struct wmi_send_mgmt_cmd { - __le32 id; - __le32 freq; - __le32 wait; - __le32 no_cck; - __le16 len; - u8 data[0]; -} __packed; - struct wmi_tx_status_event { __le32 id; u8 ack_status; @@ -2323,162 +2163,120 @@ int ath6kl_wmi_dix_2_dot3(struct wmi *wmi, struct sk_buff *skb); int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb, u8 msg_type, bool more_data, enum wmi_data_hdr_data_type data_type, - u8 meta_ver, void *tx_meta_info, u8 if_idx); + u8 meta_ver, void *tx_meta_info); int ath6kl_wmi_dot11_hdr_remove(struct wmi *wmi, struct sk_buff *skb); int ath6kl_wmi_dot3_2_dix(struct sk_buff *skb); -int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, u8 if_idx, - struct sk_buff *skb, u32 layer2_priority, - bool wmm_enabled, u8 *ac); +int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, struct sk_buff *skb, + u32 layer2_priority, bool wmm_enabled, + u8 *ac); int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb); -int ath6kl_wmi_cmd_send(struct wmi *wmi, u8 if_idx, struct sk_buff *skb, +int ath6kl_wmi_cmd_send(struct wmi *wmi, struct sk_buff *skb, enum wmi_cmd_id cmd_id, enum wmi_sync_flag sync_flag); -int ath6kl_wmi_connect_cmd(struct wmi *wmi, u8 if_idx, - enum network_type nw_type, +int ath6kl_wmi_connect_cmd(struct wmi *wmi, enum network_type nw_type, enum dot11_auth_mode dot11_auth_mode, enum auth_mode auth_mode, enum crypto_type pairwise_crypto, u8 pairwise_crypto_len, enum crypto_type group_crypto, u8 group_crypto_len, int ssid_len, u8 *ssid, - u8 *bssid, u16 channel, u32 ctrl_flags, - u8 nw_subtype); - -int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 if_idx, u8 *bssid, - u16 channel); -int ath6kl_wmi_disconnect_cmd(struct wmi *wmi, u8 if_idx); -int ath6kl_wmi_startscan_cmd(struct wmi *wmi, u8 if_idx, - enum wmi_scan_type scan_type, - u32 force_fgscan, u32 is_legacy, - u32 home_dwell_time, u32 force_scan_interval, - s8 num_chan, u16 *ch_list); + u8 *bssid, u16 channel, u32 ctrl_flags); -int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx, - enum wmi_scan_type scan_type, +int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 *bssid, u16 channel); +int ath6kl_wmi_disconnect_cmd(struct wmi *wmi); +int ath6kl_wmi_startscan_cmd(struct wmi *wmi, enum wmi_scan_type scan_type, u32 force_fgscan, u32 is_legacy, u32 home_dwell_time, u32 force_scan_interval, - s8 num_chan, u16 *ch_list, u32 no_cck, - u32 *rates); - -int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u8 if_idx, u16 fg_start_sec, + s8 num_chan, u16 *ch_list); +int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u16 fg_start_sec, u16 fg_end_sec, u16 bg_sec, u16 minact_chdw_msec, u16 maxact_chdw_msec, u16 pas_chdw_msec, u8 short_scan_ratio, u8 scan_ctrl_flag, u32 max_dfsch_act_time, u16 maxact_scan_per_ssid); -int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 if_idx, u8 filter, - u32 ie_mask); -int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 if_idx, u8 index, u8 flag, +int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 filter, u32 ie_mask); +int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 index, u8 flag, u8 ssid_len, u8 *ssid); -int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u8 if_idx, - u16 listen_interval, +int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u16 listen_interval, u16 listen_beacons); -int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 if_idx, u8 pwr_mode); -int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u8 if_idx, u16 idle_period, +int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 pwr_mode); +int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u16 idle_period, u16 ps_poll_num, u16 dtim_policy, u16 tx_wakup_policy, u16 num_tx_to_wakeup, u16 ps_fail_event_policy); -int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi, u8 if_idx, +int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 timeout); +int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi, struct wmi_create_pstream_cmd *pstream); -int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class, - u8 tsid); -int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 if_idx, u8 timeout); +int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 traffic_class, u8 tsid); int ath6kl_wmi_set_rts_cmd(struct wmi *wmi, u16 threshold); -int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 if_idx, u8 status, +int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 status, u8 preamble_policy); int ath6kl_wmi_get_challenge_resp_cmd(struct wmi *wmi, u32 cookie, u32 source); int ath6kl_wmi_config_debug_module_cmd(struct wmi *wmi, u32 valid, u32 config); -int ath6kl_wmi_get_stats_cmd(struct wmi *wmi, u8 if_idx); -int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index, +int ath6kl_wmi_get_stats_cmd(struct wmi *wmi); +int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 key_index, enum crypto_type key_type, u8 key_usage, u8 key_len, - u8 *key_rsc, unsigned int key_rsc_len, - u8 *key_material, + u8 *key_rsc, u8 *key_material, u8 key_op_ctrl, u8 *mac_addr, enum wmi_sync_flag sync_flag); -int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, u8 *krk); -int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index); -int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, u8 if_idx, const u8 *bssid, +int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 *krk); +int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 key_index); +int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, const u8 *bssid, const u8 *pmkid, bool set); -int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 if_idx, u8 dbM); -int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi, u8 if_idx); -int ath6kl_wmi_get_roam_tbl_cmd(struct wmi *wmi); +int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 dbM); +int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi); -int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, u8 if_idx, enum wmi_txop_cfg cfg); -int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx, - u8 keep_alive_intvl); +int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, enum wmi_txop_cfg cfg); +int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 keep_alive_intvl); int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len); s32 ath6kl_wmi_get_rate(s8 rate_index); int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, struct wmi_set_ip_cmd *ip_cmd); -int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx, - enum ath6kl_host_mode host_mode); -int ath6kl_wmi_set_wow_mode_cmd(struct wmi *wmi, u8 if_idx, - enum ath6kl_wow_mode wow_mode, - u32 filter, u16 host_req_delay); -int ath6kl_wmi_add_wow_pattern_cmd(struct wmi *wmi, u8 if_idx, - u8 list_id, u8 filter_size, - u8 filter_offset, u8 *filter, u8 *mask); -int ath6kl_wmi_del_wow_pattern_cmd(struct wmi *wmi, u8 if_idx, - u16 list_id, u16 filter_id); int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi); -int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid); -int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode); /* AP mode */ -int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, u8 if_idx, - struct wmi_connect_cmd *p); +int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, struct wmi_connect_cmd *p); -int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 if_idx, u8 cmd, - const u8 *mac, u16 reason); +int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 cmd, const u8 *mac, u16 reason); -int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u8 if_idx, u16 aid, bool flag); +int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u16 aid, bool flag); -int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 if_idx, - u8 rx_meta_version, +int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 rx_meta_version, bool rx_dot11_hdr, bool defrag_on_host); -int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type, - const u8 *ie, u8 ie_len); +int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 mgmt_frm_type, const u8 *ie, + u8 ie_len); /* P2P */ int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable); -int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx, u32 freq, - u32 dur); - -int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq, - u32 wait, const u8 *data, u16 data_len); - -int ath6kl_wmi_send_mgmt_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq, - u32 wait, const u8 *data, u16 data_len, - u32 no_cck); +int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u32 freq, u32 dur); -int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u8 if_idx, u32 freq, - const u8 *dst, const u8 *data, - u16 data_len); +int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u32 id, u32 freq, u32 wait, + const u8 *data, u16 data_len); -int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, u8 if_idx, bool enable); +int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u32 freq, + const u8 *dst, + const u8 *data, u16 data_len); -int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u8 if_idx, u32 info_req_flags); +int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, bool enable); -int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx); +int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u32 info_req_flags); -int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type, - const u8 *ie, u8 ie_len); +int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi); -void ath6kl_wmi_sscan_timer(unsigned long ptr); +int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 mgmt_frm_type, const u8 *ie, + u8 ie_len); -struct ath6kl_vif *ath6kl_get_vif_by_index(struct ath6kl *ar, u8 if_idx); void *ath6kl_wmi_init(struct ath6kl *devt); void ath6kl_wmi_shutdown(struct wmi *wmi); -void ath6kl_wmi_reset(struct wmi *wmi); #endif /* WMI_H */ diff --git a/trunk/drivers/net/wireless/ath/ath9k/Kconfig b/trunk/drivers/net/wireless/ath/ath9k/Kconfig index dc6be4afe8eb..d9c08c619a3a 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/Kconfig +++ b/trunk/drivers/net/wireless/ath/ath9k/Kconfig @@ -2,9 +2,6 @@ config ATH9K_HW tristate config ATH9K_COMMON tristate -config ATH9K_DFS_DEBUGFS - def_bool y - depends on ATH9K_DEBUGFS && ATH9K_DFS_CERTIFIED config ATH9K tristate "Atheros 802.11n wireless cards support" @@ -28,7 +25,6 @@ config ATH9K config ATH9K_PCI bool "Atheros ath9k PCI/PCIe bus support" - default y depends on ATH9K && PCI ---help--- This option enables the PCI bus support in ath9k. @@ -54,25 +50,6 @@ config ATH9K_DEBUGFS Also required for changing debug message flags at run time. -config ATH9K_DFS_CERTIFIED - bool "Atheros DFS support for certified platforms" - depends on ATH9K && EXPERT - default n - ---help--- - This option enables DFS support for initiating radiation on - ath9k. There is no way to dynamically detect if a card was DFS - certified and as such this is left as a build time option. This - option should only be enabled by system integrators that can - guarantee that all the platforms that their kernel will run on - have obtained appropriate regulatory body certification for a - respective Atheros card by using ath9k on the target shipping - platforms. - - This is currently only a placeholder for future DFS support, - as DFS support requires more components that still need to be - developed. At this point enabling this option won't do anything - except increase code size. - config ATH9K_RATE_CONTROL bool "Atheros ath9k rate control" depends on ATH9K @@ -81,14 +58,6 @@ config ATH9K_RATE_CONTROL Say Y, if you want to use the ath9k specific rate control module instead of minstrel_ht. -config ATH9K_BTCOEX_SUPPORT - bool "Atheros ath9k bluetooth coexistence support" - depends on ATH9K - default y - ---help--- - Say Y, if you want to use the ath9k radios together with - Bluetooth modules in the same system. - config ATH9K_HTC tristate "Atheros HTC based wireless cards support" depends on USB && MAC80211 diff --git a/trunk/drivers/net/wireless/ath/ath9k/Makefile b/trunk/drivers/net/wireless/ath/ath9k/Makefile index da02242499af..36ed3c46fec6 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/Makefile +++ b/trunk/drivers/net/wireless/ath/ath9k/Makefile @@ -4,14 +4,11 @@ ath9k-y += beacon.o \ main.o \ recv.o \ xmit.o \ - mci.o \ ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o ath9k-$(CONFIG_ATH9K_PCI) += pci.o ath9k-$(CONFIG_ATH9K_AHB) += ahb.o ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o -ath9k-$(CONFIG_ATH9K_DFS_DEBUGFS) += dfs_debug.o -ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += dfs.o obj-$(CONFIG_ATH9K) += ath9k.o @@ -36,8 +33,7 @@ ath9k_hw-y:= \ ar9002_mac.o \ ar9003_mac.o \ ar9003_eeprom.o \ - ar9003_paprd.o \ - ar9003_mci.o + ar9003_paprd.o obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o diff --git a/trunk/drivers/net/wireless/ath/ath9k/ani.c b/trunk/drivers/net/wireless/ath/ath9k/ani.c index bc56f57b393b..a639b94f7643 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ani.c +++ b/trunk/drivers/net/wireless/ath/ath9k/ani.c @@ -136,8 +136,8 @@ static void ath9k_ani_restart(struct ath_hw *ah) cck_base = AR_PHY_COUNTMAX - ah->config.cck_trig_high; } - ath_dbg(common, ANI, "Writing ofdmbase=%u cckbase=%u\n", - ofdm_base, cck_base); + ath_dbg(common, ATH_DBG_ANI, + "Writing ofdmbase=%u cckbase=%u\n", ofdm_base, cck_base); ENABLE_REGWRITE_BUFFER(ah); @@ -268,7 +268,8 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel) aniState->noiseFloor = BEACON_RSSI(ah); - ath_dbg(common, ANI, "**** ofdmlevel %d=>%d, rssi=%d[lo=%d hi=%d]\n", + ath_dbg(common, ATH_DBG_ANI, + "**** ofdmlevel %d=>%d, rssi=%d[lo=%d hi=%d]\n", aniState->ofdmNoiseImmunityLevel, immunityLevel, aniState->noiseFloor, aniState->rssiThrLow, aniState->rssiThrHigh); @@ -335,7 +336,8 @@ static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel) const struct ani_cck_level_entry *entry_cck; aniState->noiseFloor = BEACON_RSSI(ah); - ath_dbg(common, ANI, "**** ccklevel %d=>%d, rssi=%d[lo=%d hi=%d]\n", + ath_dbg(common, ATH_DBG_ANI, + "**** ccklevel %d=>%d, rssi=%d[lo=%d hi=%d]\n", aniState->cckNoiseImmunityLevel, immunityLevel, aniState->noiseFloor, aniState->rssiThrLow, aniState->rssiThrHigh); @@ -479,7 +481,8 @@ static void ath9k_ani_reset_old(struct ath_hw *ah, bool is_scanning) if (ah->opmode != NL80211_IFTYPE_STATION && ah->opmode != NL80211_IFTYPE_ADHOC) { - ath_dbg(common, ANI, "Reset ANI state opmode %u\n", ah->opmode); + ath_dbg(common, ATH_DBG_ANI, + "Reset ANI state opmode %u\n", ah->opmode); ah->stats.ast_ani_reset++; if (ah->opmode == NL80211_IFTYPE_AP) { @@ -579,7 +582,7 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning) ATH9K_ANI_OFDM_DEF_LEVEL || aniState->cckNoiseImmunityLevel != ATH9K_ANI_CCK_DEF_LEVEL) { - ath_dbg(common, ANI, + ath_dbg(common, ATH_DBG_ANI, "Restore defaults: opmode %u chan %d Mhz/0x%x is_scanning=%d ofdm:%d cck:%d\n", ah->opmode, chan->channel, @@ -596,7 +599,7 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning) /* * restore historical levels for this channel */ - ath_dbg(common, ANI, + ath_dbg(common, ATH_DBG_ANI, "Restore history: opmode %u chan %d Mhz/0x%x is_scanning=%d ofdm:%d cck:%d\n", ah->opmode, chan->channel, @@ -659,7 +662,7 @@ static bool ath9k_hw_ani_read_counters(struct ath_hw *ah) if (!use_new_ani(ah) && (phyCnt1 < ofdm_base || phyCnt2 < cck_base)) { if (phyCnt1 < ofdm_base) { - ath_dbg(common, ANI, + ath_dbg(common, ATH_DBG_ANI, "phyCnt1 0x%x, resetting counter value to 0x%x\n", phyCnt1, ofdm_base); REG_WRITE(ah, AR_PHY_ERR_1, ofdm_base); @@ -667,7 +670,7 @@ static bool ath9k_hw_ani_read_counters(struct ath_hw *ah) AR_PHY_ERR_OFDM_TIMING); } if (phyCnt2 < cck_base) { - ath_dbg(common, ANI, + ath_dbg(common, ATH_DBG_ANI, "phyCnt2 0x%x, resetting counter value to 0x%x\n", phyCnt2, cck_base); REG_WRITE(ah, AR_PHY_ERR_2, cck_base); @@ -710,7 +713,7 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan) cckPhyErrRate = aniState->cckPhyErrCount * 1000 / aniState->listenTime; - ath_dbg(common, ANI, + ath_dbg(common, ATH_DBG_ANI, "listenTime=%d OFDM:%d errs=%d/s CCK:%d errs=%d/s ofdm_turn=%d\n", aniState->listenTime, aniState->ofdmNoiseImmunityLevel, @@ -745,7 +748,7 @@ void ath9k_enable_mib_counters(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); - ath_dbg(common, ANI, "Enable MIB counters\n"); + ath_dbg(common, ATH_DBG_ANI, "Enable MIB counters\n"); ath9k_hw_update_mibstats(ah, &ah->ah_mibStats); @@ -767,7 +770,7 @@ void ath9k_hw_disable_mib_counters(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); - ath_dbg(common, ANI, "Disable MIB counters\n"); + ath_dbg(common, ATH_DBG_ANI, "Disable MIB counters\n"); REG_WRITE(ah, AR_MIBC, AR_MIBC_FMC); ath9k_hw_update_mibstats(ah, &ah->ah_mibStats); @@ -842,7 +845,7 @@ void ath9k_hw_ani_init(struct ath_hw *ah) struct ath_common *common = ath9k_hw_common(ah); int i; - ath_dbg(common, ANI, "Initialize ANI\n"); + ath_dbg(common, ATH_DBG_ANI, "Initialize ANI\n"); if (use_new_ani(ah)) { ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH_NEW; diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/trunk/drivers/net/wireless/ath/ath9k/ar5008_phy.c index f901a17f76ba..f199e9e25149 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ar5008_phy.c +++ b/trunk/drivers/net/wireless/ath/ath9k/ar5008_phy.c @@ -158,7 +158,7 @@ static void ar5008_hw_force_bias(struct ath_hw *ah, u16 synth_freq) /* pre-reverse this field */ tmp_reg = ath9k_hw_reverse_bits(new_bias, 3); - ath_dbg(common, CONFIG, "Force rf_pwd_icsyndiv to %1d on %4d\n", + ath_dbg(common, ATH_DBG_CONFIG, "Force rf_pwd_icsyndiv to %1d on %4d\n", new_bias, synth_freq); /* swizzle rf_pwd_icsyndiv */ @@ -1053,7 +1053,8 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah, u32 level = param; if (level >= ARRAY_SIZE(ah->totalSizeDesired)) { - ath_dbg(common, ANI, "level out of range (%u > %zu)\n", + ath_dbg(common, ATH_DBG_ANI, + "level out of range (%u > %zu)\n", level, ARRAY_SIZE(ah->totalSizeDesired)); return false; } @@ -1156,7 +1157,8 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah, u32 level = param; if (level >= ARRAY_SIZE(firstep)) { - ath_dbg(common, ANI, "level out of range (%u > %zu)\n", + ath_dbg(common, ATH_DBG_ANI, + "level out of range (%u > %zu)\n", level, ARRAY_SIZE(firstep)); return false; } @@ -1175,7 +1177,8 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah, u32 level = param; if (level >= ARRAY_SIZE(cycpwrThr1)) { - ath_dbg(common, ANI, "level out of range (%u > %zu)\n", + ath_dbg(common, ATH_DBG_ANI, + "level out of range (%u > %zu)\n", level, ARRAY_SIZE(cycpwrThr1)); return false; } @@ -1192,22 +1195,23 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah, case ATH9K_ANI_PRESENT: break; default: - ath_dbg(common, ANI, "invalid cmd %u\n", cmd); + ath_dbg(common, ATH_DBG_ANI, "invalid cmd %u\n", cmd); return false; } - ath_dbg(common, ANI, "ANI parameters:\n"); - ath_dbg(common, ANI, + ath_dbg(common, ATH_DBG_ANI, "ANI parameters:\n"); + ath_dbg(common, ATH_DBG_ANI, "noiseImmunityLevel=%d, spurImmunityLevel=%d, ofdmWeakSigDetectOff=%d\n", aniState->noiseImmunityLevel, aniState->spurImmunityLevel, !aniState->ofdmWeakSigDetectOff); - ath_dbg(common, ANI, + ath_dbg(common, ATH_DBG_ANI, "cckWeakSigThreshold=%d, firstepLevel=%d, listenTime=%d\n", aniState->cckWeakSigThreshold, aniState->firstepLevel, aniState->listenTime); - ath_dbg(common, ANI, "ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n", + ath_dbg(common, ATH_DBG_ANI, + "ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n", aniState->ofdmPhyErrCount, aniState->cckPhyErrCount); @@ -1291,7 +1295,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah, AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW); if (!on != aniState->ofdmWeakSigDetectOff) { - ath_dbg(common, ANI, + ath_dbg(common, ATH_DBG_ANI, "** ch %d: ofdm weak signal: %s=>%s\n", chan->channel, !aniState->ofdmWeakSigDetectOff ? @@ -1309,7 +1313,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah, u32 level = param; if (level >= ARRAY_SIZE(firstep_table)) { - ath_dbg(common, ANI, + ath_dbg(common, ATH_DBG_ANI, "ATH9K_ANI_FIRSTEP_LEVEL: level out of range (%u > %zu)\n", level, ARRAY_SIZE(firstep_table)); return false; @@ -1346,7 +1350,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah, AR_PHY_FIND_SIG_FIRSTEP_LOW, value2); if (level != aniState->firstepLevel) { - ath_dbg(common, ANI, + ath_dbg(common, ATH_DBG_ANI, "** ch %d: level %d=>%d[def:%d] firstep[level]=%d ini=%d\n", chan->channel, aniState->firstepLevel, @@ -1354,7 +1358,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah, ATH9K_ANI_FIRSTEP_LVL_NEW, value, aniState->iniDef.firstep); - ath_dbg(common, ANI, + ath_dbg(common, ATH_DBG_ANI, "** ch %d: level %d=>%d[def:%d] firstep_low[level]=%d ini=%d\n", chan->channel, aniState->firstepLevel, @@ -1374,7 +1378,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah, u32 level = param; if (level >= ARRAY_SIZE(cycpwrThr1_table)) { - ath_dbg(common, ANI, + ath_dbg(common, ATH_DBG_ANI, "ATH9K_ANI_SPUR_IMMUNITY_LEVEL: level out of range (%u > %zu)\n", level, ARRAY_SIZE(cycpwrThr1_table)); return false; @@ -1410,7 +1414,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah, AR_PHY_EXT_TIMING5_CYCPWR_THR1, value2); if (level != aniState->spurImmunityLevel) { - ath_dbg(common, ANI, + ath_dbg(common, ATH_DBG_ANI, "** ch %d: level %d=>%d[def:%d] cycpwrThr1[level]=%d ini=%d\n", chan->channel, aniState->spurImmunityLevel, @@ -1418,7 +1422,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah, ATH9K_ANI_SPUR_IMMUNE_LVL_NEW, value, aniState->iniDef.cycpwrThr1); - ath_dbg(common, ANI, + ath_dbg(common, ATH_DBG_ANI, "** ch %d: level %d=>%d[def:%d] cycpwrThr1Ext[level]=%d ini=%d\n", chan->channel, aniState->spurImmunityLevel, @@ -1444,11 +1448,11 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah, case ATH9K_ANI_PRESENT: break; default: - ath_dbg(common, ANI, "invalid cmd %u\n", cmd); + ath_dbg(common, ATH_DBG_ANI, "invalid cmd %u\n", cmd); return false; } - ath_dbg(common, ANI, + ath_dbg(common, ATH_DBG_ANI, "ANI parameters: SI=%d, ofdmWS=%s FS=%d MRCcck=%s listenTime=%d ofdmErrs=%d cckErrs=%d\n", aniState->spurImmunityLevel, !aniState->ofdmWeakSigDetectOff ? "on" : "off", @@ -1502,7 +1506,7 @@ static void ar5008_hw_ani_cache_ini_regs(struct ath_hw *ah) iniDef = &aniState->iniDef; - ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n", + ath_dbg(common, ATH_DBG_ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n", ah->hw_version.macVersion, ah->hw_version.macRev, ah->opmode, diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/trunk/drivers/net/wireless/ath/ath9k/ar9002_calib.c index c55e5bbafc46..88279e325dca 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9002_calib.c +++ b/trunk/drivers/net/wireless/ath/ath9k/ar9002_calib.c @@ -61,16 +61,18 @@ static void ar9002_hw_setup_calibration(struct ath_hw *ah, switch (currCal->calData->calType) { case IQ_MISMATCH_CAL: REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ); - ath_dbg(common, CALIBRATE, + ath_dbg(common, ATH_DBG_CALIBRATE, "starting IQ Mismatch Calibration\n"); break; case ADC_GAIN_CAL: REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_GAIN); - ath_dbg(common, CALIBRATE, "starting ADC Gain Calibration\n"); + ath_dbg(common, ATH_DBG_CALIBRATE, + "starting ADC Gain Calibration\n"); break; case ADC_DC_CAL: REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_PER); - ath_dbg(common, CALIBRATE, "starting ADC DC Calibration\n"); + ath_dbg(common, ATH_DBG_CALIBRATE, + "starting ADC DC Calibration\n"); break; } @@ -127,7 +129,7 @@ static void ar9002_hw_iqcal_collect(struct ath_hw *ah) REG_READ(ah, AR_PHY_CAL_MEAS_1(i)); ah->totalIqCorrMeas[i] += (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i)); - ath_dbg(ath9k_hw_common(ah), CALIBRATE, + ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE, "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n", ah->cal_samples, i, ah->totalPowerMeasI[i], ah->totalPowerMeasQ[i], @@ -149,7 +151,7 @@ static void ar9002_hw_adc_gaincal_collect(struct ath_hw *ah) ah->totalAdcQEvenPhase[i] += REG_READ(ah, AR_PHY_CAL_MEAS_3(i)); - ath_dbg(ath9k_hw_common(ah), CALIBRATE, + ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE, "%d: Chn %d oddi=0x%08x; eveni=0x%08x; oddq=0x%08x; evenq=0x%08x;\n", ah->cal_samples, i, ah->totalAdcIOddPhase[i], @@ -173,7 +175,7 @@ static void ar9002_hw_adc_dccal_collect(struct ath_hw *ah) ah->totalAdcDcOffsetQEvenPhase[i] += (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_3(i)); - ath_dbg(ath9k_hw_common(ah), CALIBRATE, + ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE, "%d: Chn %d oddi=0x%08x; eveni=0x%08x; oddq=0x%08x; evenq=0x%08x;\n", ah->cal_samples, i, ah->totalAdcDcOffsetIOddPhase[i], @@ -196,12 +198,12 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains) powerMeasQ = ah->totalPowerMeasQ[i]; iqCorrMeas = ah->totalIqCorrMeas[i]; - ath_dbg(common, CALIBRATE, + ath_dbg(common, ATH_DBG_CALIBRATE, "Starting IQ Cal and Correction for Chain %d\n", i); - ath_dbg(common, CALIBRATE, - "Original: Chn %d iq_corr_meas = 0x%08x\n", + ath_dbg(common, ATH_DBG_CALIBRATE, + "Orignal: Chn %diq_corr_meas = 0x%08x\n", i, ah->totalIqCorrMeas[i]); iqCorrNeg = 0; @@ -211,11 +213,12 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains) iqCorrNeg = 1; } - ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_i = 0x%08x\n", - i, powerMeasI); - ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_q = 0x%08x\n", - i, powerMeasQ); - ath_dbg(common, CALIBRATE, "iqCorrNeg is 0x%08x\n", iqCorrNeg); + ath_dbg(common, ATH_DBG_CALIBRATE, + "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI); + ath_dbg(common, ATH_DBG_CALIBRATE, + "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ); + ath_dbg(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n", + iqCorrNeg); iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 128; qCoffDenom = powerMeasQ / 64; @@ -224,13 +227,13 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains) (qCoffDenom != 0)) { iCoff = iqCorrMeas / iCoffDenom; qCoff = powerMeasI / qCoffDenom - 64; - ath_dbg(common, CALIBRATE, "Chn %d iCoff = 0x%08x\n", - i, iCoff); - ath_dbg(common, CALIBRATE, "Chn %d qCoff = 0x%08x\n", - i, qCoff); + ath_dbg(common, ATH_DBG_CALIBRATE, + "Chn %d iCoff = 0x%08x\n", i, iCoff); + ath_dbg(common, ATH_DBG_CALIBRATE, + "Chn %d qCoff = 0x%08x\n", i, qCoff); iCoff = iCoff & 0x3f; - ath_dbg(common, CALIBRATE, + ath_dbg(common, ATH_DBG_CALIBRATE, "New: Chn %d iCoff = 0x%08x\n", i, iCoff); if (iqCorrNeg == 0x0) iCoff = 0x40 - iCoff; @@ -240,7 +243,7 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains) else if (qCoff <= -16) qCoff = -16; - ath_dbg(common, CALIBRATE, + ath_dbg(common, ATH_DBG_CALIBRATE, "Chn %d : iCoff = 0x%x qCoff = 0x%x\n", i, iCoff, qCoff); @@ -250,7 +253,7 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains) REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i), AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF, qCoff); - ath_dbg(common, CALIBRATE, + ath_dbg(common, ATH_DBG_CALIBRATE, "IQ Cal and Correction done for Chain %d\n", i); } @@ -272,17 +275,21 @@ static void ar9002_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains) qOddMeasOffset = ah->totalAdcQOddPhase[i]; qEvenMeasOffset = ah->totalAdcQEvenPhase[i]; - ath_dbg(common, CALIBRATE, + ath_dbg(common, ATH_DBG_CALIBRATE, "Starting ADC Gain Cal for Chain %d\n", i); - ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_odd_i = 0x%08x\n", - i, iOddMeasOffset); - ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_even_i = 0x%08x\n", - i, iEvenMeasOffset); - ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_odd_q = 0x%08x\n", - i, qOddMeasOffset); - ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_even_q = 0x%08x\n", - i, qEvenMeasOffset); + ath_dbg(common, ATH_DBG_CALIBRATE, + "Chn %d pwr_meas_odd_i = 0x%08x\n", i, + iOddMeasOffset); + ath_dbg(common, ATH_DBG_CALIBRATE, + "Chn %d pwr_meas_even_i = 0x%08x\n", i, + iEvenMeasOffset); + ath_dbg(common, ATH_DBG_CALIBRATE, + "Chn %d pwr_meas_odd_q = 0x%08x\n", i, + qOddMeasOffset); + ath_dbg(common, ATH_DBG_CALIBRATE, + "Chn %d pwr_meas_even_q = 0x%08x\n", i, + qEvenMeasOffset); if (iOddMeasOffset != 0 && qEvenMeasOffset != 0) { iGainMismatch = @@ -292,19 +299,19 @@ static void ar9002_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains) ((qOddMeasOffset * 32) / qEvenMeasOffset) & 0x3f; - ath_dbg(common, CALIBRATE, - "Chn %d gain_mismatch_i = 0x%08x\n", - i, iGainMismatch); - ath_dbg(common, CALIBRATE, - "Chn %d gain_mismatch_q = 0x%08x\n", - i, qGainMismatch); + ath_dbg(common, ATH_DBG_CALIBRATE, + "Chn %d gain_mismatch_i = 0x%08x\n", i, + iGainMismatch); + ath_dbg(common, ATH_DBG_CALIBRATE, + "Chn %d gain_mismatch_q = 0x%08x\n", i, + qGainMismatch); val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i)); val &= 0xfffff000; val |= (qGainMismatch) | (iGainMismatch << 6); REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val); - ath_dbg(common, CALIBRATE, + ath_dbg(common, ATH_DBG_CALIBRATE, "ADC Gain Cal done for Chain %d\n", i); } } @@ -330,36 +337,40 @@ static void ar9002_hw_adc_dccal_calibrate(struct ath_hw *ah, u8 numChains) qOddMeasOffset = ah->totalAdcDcOffsetQOddPhase[i]; qEvenMeasOffset = ah->totalAdcDcOffsetQEvenPhase[i]; - ath_dbg(common, CALIBRATE, + ath_dbg(common, ATH_DBG_CALIBRATE, "Starting ADC DC Offset Cal for Chain %d\n", i); - ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_odd_i = %d\n", - i, iOddMeasOffset); - ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_even_i = %d\n", - i, iEvenMeasOffset); - ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_odd_q = %d\n", - i, qOddMeasOffset); - ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_even_q = %d\n", - i, qEvenMeasOffset); + ath_dbg(common, ATH_DBG_CALIBRATE, + "Chn %d pwr_meas_odd_i = %d\n", i, + iOddMeasOffset); + ath_dbg(common, ATH_DBG_CALIBRATE, + "Chn %d pwr_meas_even_i = %d\n", i, + iEvenMeasOffset); + ath_dbg(common, ATH_DBG_CALIBRATE, + "Chn %d pwr_meas_odd_q = %d\n", i, + qOddMeasOffset); + ath_dbg(common, ATH_DBG_CALIBRATE, + "Chn %d pwr_meas_even_q = %d\n", i, + qEvenMeasOffset); iDcMismatch = (((iEvenMeasOffset - iOddMeasOffset) * 2) / numSamples) & 0x1ff; qDcMismatch = (((qOddMeasOffset - qEvenMeasOffset) * 2) / numSamples) & 0x1ff; - ath_dbg(common, CALIBRATE, - "Chn %d dc_offset_mismatch_i = 0x%08x\n", - i, iDcMismatch); - ath_dbg(common, CALIBRATE, - "Chn %d dc_offset_mismatch_q = 0x%08x\n", - i, qDcMismatch); + ath_dbg(common, ATH_DBG_CALIBRATE, + "Chn %d dc_offset_mismatch_i = 0x%08x\n", i, + iDcMismatch); + ath_dbg(common, ATH_DBG_CALIBRATE, + "Chn %d dc_offset_mismatch_q = 0x%08x\n", i, + qDcMismatch); val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i)); val &= 0xc0000fff; val |= (qDcMismatch << 12) | (iDcMismatch << 21); REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val); - ath_dbg(common, CALIBRATE, + ath_dbg(common, ATH_DBG_CALIBRATE, "ADC DC Offset Cal done for Chain %d\n", i); } @@ -549,7 +560,7 @@ static inline void ar9285_hw_pa_cal(struct ath_hw *ah, bool is_reset) { 0x7838, 0 }, }; - ath_dbg(common, CALIBRATE, "Running PA Calibration\n"); + ath_dbg(common, ATH_DBG_CALIBRATE, "Running PA Calibration\n"); /* PA CAL is not needed for high power solution */ if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) == @@ -730,7 +741,7 @@ static bool ar9285_hw_cl_cal(struct ath_hw *ah, struct ath9k_channel *chan) REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL); if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT)) { - ath_dbg(common, CALIBRATE, + ath_dbg(common, ATH_DBG_CALIBRATE, "offset calibration failed to complete in 1ms; noisy environment?\n"); return false; } @@ -744,7 +755,7 @@ static bool ar9285_hw_cl_cal(struct ath_hw *ah, struct ath9k_channel *chan) REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL); if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT)) { - ath_dbg(common, CALIBRATE, + ath_dbg(common, ATH_DBG_CALIBRATE, "offset calibration failed to complete in 1ms; noisy environment?\n"); return false; } @@ -840,7 +851,7 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan) if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT)) { - ath_dbg(common, CALIBRATE, + ath_dbg(common, ATH_DBG_CALIBRATE, "offset calibration failed to complete in 1ms; noisy environment?\n"); return false; } @@ -875,21 +886,22 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan) if (ar9002_hw_is_cal_supported(ah, chan, ADC_GAIN_CAL)) { INIT_CAL(&ah->adcgain_caldata); INSERT_CAL(ah, &ah->adcgain_caldata); - ath_dbg(common, CALIBRATE, - "enabling ADC Gain Calibration\n"); + ath_dbg(common, ATH_DBG_CALIBRATE, + "enabling ADC Gain Calibration.\n"); } if (ar9002_hw_is_cal_supported(ah, chan, ADC_DC_CAL)) { INIT_CAL(&ah->adcdc_caldata); INSERT_CAL(ah, &ah->adcdc_caldata); - ath_dbg(common, CALIBRATE, - "enabling ADC DC Calibration\n"); + ath_dbg(common, ATH_DBG_CALIBRATE, + "enabling ADC DC Calibration.\n"); } if (ar9002_hw_is_cal_supported(ah, chan, IQ_MISMATCH_CAL)) { INIT_CAL(&ah->iq_caldata); INSERT_CAL(ah, &ah->iq_caldata); - ath_dbg(common, CALIBRATE, "enabling IQ Calibration\n"); + ath_dbg(common, ATH_DBG_CALIBRATE, + "enabling IQ Calibration.\n"); } ah->cal_list_curr = ah->cal_list; diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/trunk/drivers/net/wireless/ath/ath9k/ar9002_mac.c index 7b6417b5212e..b5920168606d 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9002_mac.c +++ b/trunk/drivers/net/wireless/ath/ath9k/ar9002_mac.c @@ -107,7 +107,7 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked) } if (isr & AR_ISR_RXORN) { - ath_dbg(common, INTERRUPT, + ath_dbg(common, ATH_DBG_INTERRUPT, "receive FIFO overrun interrupt\n"); } @@ -143,24 +143,24 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked) if (fatal_int) { if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) { - ath_dbg(common, ANY, + ath_dbg(common, ATH_DBG_ANY, "received PCI FATAL interrupt\n"); } if (sync_cause & AR_INTR_SYNC_HOST1_PERR) { - ath_dbg(common, ANY, + ath_dbg(common, ATH_DBG_ANY, "received PCI PERR interrupt\n"); } *masked |= ATH9K_INT_FATAL; } if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) { - ath_dbg(common, INTERRUPT, + ath_dbg(common, ATH_DBG_INTERRUPT, "AR_INTR_SYNC_RADM_CPL_TIMEOUT\n"); REG_WRITE(ah, AR_RC, AR_RC_HOSTIF); REG_WRITE(ah, AR_RC, 0); *masked |= ATH9K_INT_FATAL; } if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) { - ath_dbg(common, INTERRUPT, + ath_dbg(common, ATH_DBG_INTERRUPT, "AR_INTR_SYNC_LOCAL_TIMEOUT\n"); } diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/trunk/drivers/net/wireless/ath/ath9k/ar9003_calib.c index 8e70f0bc073e..12a730dcb500 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9003_calib.c +++ b/trunk/drivers/net/wireless/ath/ath9k/ar9003_calib.c @@ -18,7 +18,6 @@ #include "hw-ops.h" #include "ar9003_phy.h" #include "ar9003_rtt.h" -#include "ar9003_mci.h" #define MAX_MEASUREMENT MAX_IQCAL_MEASUREMENT #define MAX_MAG_DELTA 11 @@ -52,7 +51,7 @@ static void ar9003_hw_setup_calibration(struct ath_hw *ah, currCal->calData->calCountMax); REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ); - ath_dbg(common, CALIBRATE, + ath_dbg(common, ATH_DBG_CALIBRATE, "starting IQ Mismatch Calibration\n"); /* Kick-off cal */ @@ -64,7 +63,7 @@ static void ar9003_hw_setup_calibration(struct ath_hw *ah, REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_THERM, AR_PHY_65NM_CH0_THERM_START, 1); - ath_dbg(common, CALIBRATE, + ath_dbg(common, ATH_DBG_CALIBRATE, "starting Temperature Compensation Calibration\n"); break; } @@ -194,7 +193,7 @@ static void ar9003_hw_iqcal_collect(struct ath_hw *ah) REG_READ(ah, AR_PHY_CAL_MEAS_1(i)); ah->totalIqCorrMeas[i] += (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i)); - ath_dbg(ath9k_hw_common(ah), CALIBRATE, + ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE, "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n", ah->cal_samples, i, ah->totalPowerMeasI[i], ah->totalPowerMeasQ[i], @@ -221,11 +220,12 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains) powerMeasQ = ah->totalPowerMeasQ[i]; iqCorrMeas = ah->totalIqCorrMeas[i]; - ath_dbg(common, CALIBRATE, - "Starting IQ Cal and Correction for Chain %d\n", i); + ath_dbg(common, ATH_DBG_CALIBRATE, + "Starting IQ Cal and Correction for Chain %d\n", + i); - ath_dbg(common, CALIBRATE, - "Original: Chn %d iq_corr_meas = 0x%08x\n", + ath_dbg(common, ATH_DBG_CALIBRATE, + "Orignal: Chn %diq_corr_meas = 0x%08x\n", i, ah->totalIqCorrMeas[i]); iqCorrNeg = 0; @@ -235,11 +235,12 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains) iqCorrNeg = 1; } - ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_i = 0x%08x\n", - i, powerMeasI); - ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_q = 0x%08x\n", - i, powerMeasQ); - ath_dbg(common, CALIBRATE, "iqCorrNeg is 0x%08x\n", iqCorrNeg); + ath_dbg(common, ATH_DBG_CALIBRATE, + "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI); + ath_dbg(common, ATH_DBG_CALIBRATE, + "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ); + ath_dbg(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n", + iqCorrNeg); iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 256; qCoffDenom = powerMeasQ / 64; @@ -247,10 +248,10 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains) if ((iCoffDenom != 0) && (qCoffDenom != 0)) { iCoff = iqCorrMeas / iCoffDenom; qCoff = powerMeasI / qCoffDenom - 64; - ath_dbg(common, CALIBRATE, "Chn %d iCoff = 0x%08x\n", - i, iCoff); - ath_dbg(common, CALIBRATE, "Chn %d qCoff = 0x%08x\n", - i, qCoff); + ath_dbg(common, ATH_DBG_CALIBRATE, + "Chn %d iCoff = 0x%08x\n", i, iCoff); + ath_dbg(common, ATH_DBG_CALIBRATE, + "Chn %d qCoff = 0x%08x\n", i, qCoff); /* Force bounds on iCoff */ if (iCoff >= 63) @@ -271,10 +272,10 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains) iCoff = iCoff & 0x7f; qCoff = qCoff & 0x7f; - ath_dbg(common, CALIBRATE, + ath_dbg(common, ATH_DBG_CALIBRATE, "Chn %d : iCoff = 0x%x qCoff = 0x%x\n", i, iCoff, qCoff); - ath_dbg(common, CALIBRATE, + ath_dbg(common, ATH_DBG_CALIBRATE, "Register offset (0x%04x) before update = 0x%x\n", offset_array[i], REG_READ(ah, offset_array[i])); @@ -285,25 +286,25 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains) REG_RMW_FIELD(ah, offset_array[i], AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF, qCoff); - ath_dbg(common, CALIBRATE, + ath_dbg(common, ATH_DBG_CALIBRATE, "Register offset (0x%04x) QI COFF (bitfields 0x%08x) after update = 0x%x\n", offset_array[i], AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF, REG_READ(ah, offset_array[i])); - ath_dbg(common, CALIBRATE, + ath_dbg(common, ATH_DBG_CALIBRATE, "Register offset (0x%04x) QQ COFF (bitfields 0x%08x) after update = 0x%x\n", offset_array[i], AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF, REG_READ(ah, offset_array[i])); - ath_dbg(common, CALIBRATE, + ath_dbg(common, ATH_DBG_CALIBRATE, "IQ Cal and Correction done for Chain %d\n", i); } } REG_SET_BIT(ah, AR_PHY_RX_IQCAL_CORR_B0, AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE); - ath_dbg(common, CALIBRATE, + ath_dbg(common, ATH_DBG_CALIBRATE, "IQ Cal and Correction (offset 0x%04x) enabled (bit position 0x%08x). New Value 0x%08x\n", (unsigned) (AR_PHY_RX_IQCAL_CORR_B0), AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE, @@ -347,7 +348,7 @@ static bool ar9003_hw_solve_iq_cal(struct ath_hw *ah, f2 = (f1 * f1 + f3 * f3) / result_shift; if (!f2) { - ath_dbg(common, CALIBRATE, "Divide by 0\n"); + ath_dbg(common, ATH_DBG_CALIBRATE, "Divide by 0\n"); return false; } @@ -468,7 +469,7 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah, if ((i2_p_q2_a0_d0 == 0) || (i2_p_q2_a0_d1 == 0) || (i2_p_q2_a1_d0 == 0) || (i2_p_q2_a1_d1 == 0)) { - ath_dbg(common, CALIBRATE, + ath_dbg(common, ATH_DBG_CALIBRATE, "Divide by 0:\n" "a0_d0=%d\n" "a0_d1=%d\n" @@ -508,7 +509,8 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah, mag2 = ar9003_hw_find_mag_approx(ah, cos_2phi_2, sin_2phi_2); if ((mag1 == 0) || (mag2 == 0)) { - ath_dbg(common, CALIBRATE, "Divide by 0: mag1=%d, mag2=%d\n", + ath_dbg(common, ATH_DBG_CALIBRATE, + "Divide by 0: mag1=%d, mag2=%d\n", mag1, mag2); return false; } @@ -526,8 +528,8 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah, mag_a0_d0, phs_a0_d0, mag_a1_d0, phs_a1_d0, solved_eq)) { - ath_dbg(common, CALIBRATE, - "Call to ar9003_hw_solve_iq_cal() failed\n"); + ath_dbg(common, ATH_DBG_CALIBRATE, + "Call to ar9003_hw_solve_iq_cal() failed.\n"); return false; } @@ -536,12 +538,12 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah, mag_rx = solved_eq[2]; phs_rx = solved_eq[3]; - ath_dbg(common, CALIBRATE, + ath_dbg(common, ATH_DBG_CALIBRATE, "chain %d: mag mismatch=%d phase mismatch=%d\n", chain_idx, mag_tx/res_scale, phs_tx/res_scale); if (res_scale == mag_tx) { - ath_dbg(common, CALIBRATE, + ath_dbg(common, ATH_DBG_CALIBRATE, "Divide by 0: mag_tx=%d, res_scale=%d\n", mag_tx, res_scale); return false; @@ -554,7 +556,8 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah, q_q_coff = (mag_corr_tx * 128 / res_scale); q_i_coff = (phs_corr_tx * 256 / res_scale); - ath_dbg(common, CALIBRATE, "tx chain %d: mag corr=%d phase corr=%d\n", + ath_dbg(common, ATH_DBG_CALIBRATE, + "tx chain %d: mag corr=%d phase corr=%d\n", chain_idx, q_q_coff, q_i_coff); if (q_i_coff < -63) @@ -568,11 +571,12 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah, iqc_coeff[0] = (q_q_coff * 128) + q_i_coff; - ath_dbg(common, CALIBRATE, "tx chain %d: iq corr coeff=%x\n", + ath_dbg(common, ATH_DBG_CALIBRATE, + "tx chain %d: iq corr coeff=%x\n", chain_idx, iqc_coeff[0]); if (-mag_rx == res_scale) { - ath_dbg(common, CALIBRATE, + ath_dbg(common, ATH_DBG_CALIBRATE, "Divide by 0: mag_rx=%d, res_scale=%d\n", mag_rx, res_scale); return false; @@ -585,7 +589,8 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah, q_q_coff = (mag_corr_rx * 128 / res_scale); q_i_coff = (phs_corr_rx * 256 / res_scale); - ath_dbg(common, CALIBRATE, "rx chain %d: mag corr=%d phase corr=%d\n", + ath_dbg(common, ATH_DBG_CALIBRATE, + "rx chain %d: mag corr=%d phase corr=%d\n", chain_idx, q_q_coff, q_i_coff); if (q_i_coff < -63) @@ -599,7 +604,8 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah, iqc_coeff[1] = (q_q_coff * 128) + q_i_coff; - ath_dbg(common, CALIBRATE, "rx chain %d: iq corr coeff=%x\n", + ath_dbg(common, ATH_DBG_CALIBRATE, + "rx chain %d: iq corr coeff=%x\n", chain_idx, iqc_coeff[1]); return true; @@ -746,7 +752,8 @@ static bool ar9003_hw_tx_iq_cal_run(struct ath_hw *ah) if (!ath9k_hw_wait(ah, AR_PHY_TX_IQCAL_START, AR_PHY_TX_IQCAL_START_DO_CAL, 0, AH_WAIT_TIMEOUT)) { - ath_dbg(common, CALIBRATE, "Tx IQ Cal is not completed\n"); + ath_dbg(common, ATH_DBG_CALIBRATE, + "Tx IQ Cal is not completed.\n"); return false; } return true; @@ -784,13 +791,13 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable) nmeasurement = MAX_MEASUREMENT; for (im = 0; im < nmeasurement; im++) { - ath_dbg(common, CALIBRATE, - "Doing Tx IQ Cal for chain %d\n", i); + ath_dbg(common, ATH_DBG_CALIBRATE, + "Doing Tx IQ Cal for chain %d.\n", i); if (REG_READ(ah, txiqcal_status[i]) & AR_PHY_TX_IQCAL_STATUS_FAILED) { - ath_dbg(common, CALIBRATE, - "Tx IQ Cal failed for chain %d\n", i); + ath_dbg(common, ATH_DBG_CALIBRATE, + "Tx IQ Cal failed for chain %d.\n", i); goto tx_iqcal_fail; } @@ -816,16 +823,18 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable) iq_res[idx + 1] = 0xffff & REG_READ(ah, chan_info_tab[i] + offset); - ath_dbg(common, CALIBRATE, - "IQ_RES[%d]=0x%x IQ_RES[%d]=0x%x\n", + ath_dbg(common, ATH_DBG_CALIBRATE, + "IQ RES[%d]=0x%x" + "IQ_RES[%d]=0x%x\n", idx, iq_res[idx], idx + 1, iq_res[idx + 1]); } if (!ar9003_hw_calc_iq_corr(ah, i, iq_res, coeff.iqc_coeff)) { - ath_dbg(common, CALIBRATE, - "Failed in calculation of IQ correction\n"); + ath_dbg(common, ATH_DBG_CALIBRATE, + "Failed in calculation of \ + IQ correction.\n"); goto tx_iqcal_fail; } @@ -845,7 +854,7 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable) return; tx_iqcal_fail: - ath_dbg(common, CALIBRATE, "Tx IQ Cal failed\n"); + ath_dbg(common, ATH_DBG_CALIBRATE, "Tx IQ Cal failed\n"); return; } @@ -925,12 +934,10 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah, { struct ath_common *common = ath9k_hw_common(ah); struct ath9k_hw_cal_data *caldata = ah->caldata; - struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci; bool txiqcal_done = false, txclcal_done = false; bool is_reusable = true, status = true; bool run_rtt_cal = false, run_agc_cal; bool rtt = !!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT); - bool mci = !!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI); u32 agc_ctrl = 0, agc_supp_cals = AR_PHY_AGC_CONTROL_OFFSET_CAL | AR_PHY_AGC_CONTROL_FLTR_CAL | AR_PHY_AGC_CONTROL_PKDET_CAL; @@ -943,7 +950,7 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah, if (!ar9003_hw_rtt_restore(ah, chan)) run_rtt_cal = true; - ath_dbg(common, CALIBRATE, "RTT restore %s\n", + ath_dbg(common, ATH_DBG_CALIBRATE, "RTT restore %s\n", run_rtt_cal ? "failed" : "succeed"); } run_agc_cal = run_rtt_cal; @@ -998,31 +1005,6 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah, } else if (caldata && !caldata->done_txiqcal_once) run_agc_cal = true; - if (mci && IS_CHAN_2GHZ(chan) && - (mci_hw->bt_state == MCI_BT_AWAKE) && - run_agc_cal && - !(mci_hw->config & ATH_MCI_CONFIG_DISABLE_MCI_CAL)) { - - u32 pld[4] = {0, 0, 0, 0}; - - /* send CAL_REQ only when BT is AWAKE. */ - ath_dbg(common, MCI, "MCI send WLAN_CAL_REQ 0x%x\n", - mci_hw->wlan_cal_seq); - MCI_GPM_SET_CAL_TYPE(pld, MCI_GPM_WLAN_CAL_REQ); - pld[MCI_GPM_WLAN_CAL_W_SEQUENCE] = mci_hw->wlan_cal_seq++; - ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, true, false); - - /* Wait BT_CAL_GRANT for 50ms */ - ath_dbg(common, MCI, "MCI wait for BT_CAL_GRANT\n"); - - if (ar9003_mci_wait_for_gpm(ah, MCI_GPM_BT_CAL_GRANT, 0, 50000)) - ath_dbg(common, MCI, "MCI got BT_CAL_GRANT\n"); - else { - is_reusable = false; - ath_dbg(common, MCI, "\nMCI BT is not responding\n"); - } - } - txiqcal_done = ar9003_hw_tx_iq_cal_run(ah); REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS); udelay(5); @@ -1040,21 +1022,6 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah, AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT); } - - if (mci && IS_CHAN_2GHZ(chan) && - (mci_hw->bt_state == MCI_BT_AWAKE) && - run_agc_cal && - !(mci_hw->config & ATH_MCI_CONFIG_DISABLE_MCI_CAL)) { - - u32 pld[4] = {0, 0, 0, 0}; - - ath_dbg(common, MCI, "MCI Send WLAN_CAL_DONE 0x%x\n", - mci_hw->wlan_cal_done); - MCI_GPM_SET_CAL_TYPE(pld, MCI_GPM_WLAN_CAL_DONE); - pld[MCI_GPM_WLAN_CAL_W_SEQUENCE] = mci_hw->wlan_cal_done++; - ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, true, false); - } - if (rtt && !run_rtt_cal) { agc_ctrl |= agc_supp_cals; REG_WRITE(ah, AR_PHY_AGC_CONTROL, agc_ctrl); @@ -1064,8 +1031,9 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah, if (run_rtt_cal) ar9003_hw_rtt_disable(ah); - ath_dbg(common, CALIBRATE, - "offset calibration failed to complete in 1ms; noisy environment?\n"); + ath_dbg(common, ATH_DBG_CALIBRATE, + "offset calibration failed to complete in 1ms;" + "noisy environment?\n"); return false; } @@ -1124,14 +1092,15 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah, if (ah->supp_cals & IQ_MISMATCH_CAL) { INIT_CAL(&ah->iq_caldata); INSERT_CAL(ah, &ah->iq_caldata); - ath_dbg(common, CALIBRATE, "enabling IQ Calibration\n"); + ath_dbg(common, ATH_DBG_CALIBRATE, + "enabling IQ Calibration.\n"); } if (ah->supp_cals & TEMP_COMP_CAL) { INIT_CAL(&ah->tempCompCalData); INSERT_CAL(ah, &ah->tempCompCalData); - ath_dbg(common, CALIBRATE, - "enabling Temperature Compensation Calibration\n"); + ath_dbg(common, ATH_DBG_CALIBRATE, + "enabling Temperature Compensation Calibration.\n"); } /* Initialize current pointer to first element in list */ diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/trunk/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index 9fbcbddea165..3b262ba6b172 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/trunk/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -121,8 +121,10 @@ static const struct ar9300_eeprom ar9300_default = { * if the register is per chain */ .noiseFloorThreshCh = {-1, 0, 0}, - .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - .quick_drop = 0, + .ob = {1, 1, 1},/* 3 chain */ + .db_stage2 = {1, 1, 1}, /* 3 chain */ + .db_stage3 = {0, 0, 0}, + .db_stage4 = {0, 0, 0}, .xpaBiasLvl = 0, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, @@ -142,7 +144,7 @@ static const struct ar9300_eeprom ar9300_default = { }, .base_ext1 = { .ant_div_control = 0, - .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, .calFreqPier2G = { FREQ2FBIN(2412, 1), @@ -321,8 +323,10 @@ static const struct ar9300_eeprom ar9300_default = { .spurChans = {0, 0, 0, 0, 0}, /* noiseFloorThreshCh Check if the register is per chain */ .noiseFloorThreshCh = {-1, 0, 0}, - .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - .quick_drop = 0, + .ob = {3, 3, 3}, /* 3 chain */ + .db_stage2 = {3, 3, 3}, /* 3 chain */ + .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */ + .db_stage4 = {3, 3, 3}, /* don't exist for 2G */ .xpaBiasLvl = 0, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, @@ -694,8 +698,10 @@ static const struct ar9300_eeprom ar9300_x113 = { * if the register is per chain */ .noiseFloorThreshCh = {-1, 0, 0}, - .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - .quick_drop = 0, + .ob = {1, 1, 1},/* 3 chain */ + .db_stage2 = {1, 1, 1}, /* 3 chain */ + .db_stage3 = {0, 0, 0}, + .db_stage4 = {0, 0, 0}, .xpaBiasLvl = 0, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, @@ -715,7 +721,7 @@ static const struct ar9300_eeprom ar9300_x113 = { }, .base_ext1 = { .ant_div_control = 0, - .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, .calFreqPier2G = { FREQ2FBIN(2412, 1), @@ -894,8 +900,10 @@ static const struct ar9300_eeprom ar9300_x113 = { .spurChans = {FREQ2FBIN(5500, 0), 0, 0, 0, 0}, /* noiseFloorThreshCh Check if the register is per chain */ .noiseFloorThreshCh = {-1, 0, 0}, - .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - .quick_drop = 0, + .ob = {3, 3, 3}, /* 3 chain */ + .db_stage2 = {3, 3, 3}, /* 3 chain */ + .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */ + .db_stage4 = {3, 3, 3}, /* don't exist for 2G */ .xpaBiasLvl = 0xf, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, @@ -1268,8 +1276,10 @@ static const struct ar9300_eeprom ar9300_h112 = { * if the register is per chain */ .noiseFloorThreshCh = {-1, 0, 0}, - .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - .quick_drop = 0, + .ob = {1, 1, 1},/* 3 chain */ + .db_stage2 = {1, 1, 1}, /* 3 chain */ + .db_stage3 = {0, 0, 0}, + .db_stage4 = {0, 0, 0}, .xpaBiasLvl = 0, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, @@ -1281,20 +1291,20 @@ static const struct ar9300_eeprom ar9300_h112 = { .txEndToRxOn = 0x2, .txFrameToXpaOn = 0xe, .thresh62 = 28, - .papdRateMaskHt20 = LE32(0x0c80c080), - .papdRateMaskHt40 = LE32(0x0080c080), + .papdRateMaskHt20 = LE32(0x80c080), + .papdRateMaskHt40 = LE32(0x80c080), .futureModal = { 0, 0, 0, 0, 0, 0, 0, 0, }, }, .base_ext1 = { .ant_div_control = 0, - .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, .calFreqPier2G = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), - FREQ2FBIN(2462, 1), + FREQ2FBIN(2472, 1), }, /* ar9300_cal_data_per_freq_op_loop 2g */ .calPierData2G = { @@ -1304,7 +1314,7 @@ static const struct ar9300_eeprom ar9300_h112 = { }, .calTarget_freqbin_Cck = { FREQ2FBIN(2412, 1), - FREQ2FBIN(2472, 1), + FREQ2FBIN(2484, 1), }, .calTarget_freqbin_2G = { FREQ2FBIN(2412, 1), @@ -1468,8 +1478,10 @@ static const struct ar9300_eeprom ar9300_h112 = { .spurChans = {0, 0, 0, 0, 0}, /* noiseFloorThreshCh Check if the register is per chain */ .noiseFloorThreshCh = {-1, 0, 0}, - .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - .quick_drop = 0, + .ob = {3, 3, 3}, /* 3 chain */ + .db_stage2 = {3, 3, 3}, /* 3 chain */ + .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */ + .db_stage4 = {3, 3, 3}, /* don't exist for 2G */ .xpaBiasLvl = 0, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, @@ -1503,7 +1515,7 @@ static const struct ar9300_eeprom ar9300_h112 = { FREQ2FBIN(5500, 0), FREQ2FBIN(5600, 0), FREQ2FBIN(5700, 0), - FREQ2FBIN(5785, 0) + FREQ2FBIN(5825, 0) }, .calPierData5G = { { @@ -1842,8 +1854,10 @@ static const struct ar9300_eeprom ar9300_x112 = { * if the register is per chain */ .noiseFloorThreshCh = {-1, 0, 0}, - .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - .quick_drop = 0, + .ob = {1, 1, 1},/* 3 chain */ + .db_stage2 = {1, 1, 1}, /* 3 chain */ + .db_stage3 = {0, 0, 0}, + .db_stage4 = {0, 0, 0}, .xpaBiasLvl = 0, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, @@ -1863,7 +1877,7 @@ static const struct ar9300_eeprom ar9300_x112 = { }, .base_ext1 = { .ant_div_control = 0, - .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, .calFreqPier2G = { FREQ2FBIN(2412, 1), @@ -2042,8 +2056,10 @@ static const struct ar9300_eeprom ar9300_x112 = { .spurChans = {0, 0, 0, 0, 0}, /* noiseFloorThreshch check if the register is per chain */ .noiseFloorThreshCh = {-1, 0, 0}, - .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - .quick_drop = 0, + .ob = {3, 3, 3}, /* 3 chain */ + .db_stage2 = {3, 3, 3}, /* 3 chain */ + .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */ + .db_stage4 = {3, 3, 3}, /* don't exist for 2G */ .xpaBiasLvl = 0, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, @@ -2415,8 +2431,10 @@ static const struct ar9300_eeprom ar9300_h116 = { * if the register is per chain */ .noiseFloorThreshCh = {-1, 0, 0}, - .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - .quick_drop = 0, + .ob = {1, 1, 1},/* 3 chain */ + .db_stage2 = {1, 1, 1}, /* 3 chain */ + .db_stage3 = {0, 0, 0}, + .db_stage4 = {0, 0, 0}, .xpaBiasLvl = 0, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, @@ -2436,12 +2454,12 @@ static const struct ar9300_eeprom ar9300_h116 = { }, .base_ext1 = { .ant_div_control = 0, - .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, .calFreqPier2G = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), - FREQ2FBIN(2462, 1), + FREQ2FBIN(2472, 1), }, /* ar9300_cal_data_per_freq_op_loop 2g */ .calPierData2G = { @@ -2615,8 +2633,10 @@ static const struct ar9300_eeprom ar9300_h116 = { .spurChans = {0, 0, 0, 0, 0}, /* noiseFloorThreshCh Check if the register is per chain */ .noiseFloorThreshCh = {-1, 0, 0}, - .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - .quick_drop = 0, + .ob = {3, 3, 3}, /* 3 chain */ + .db_stage2 = {3, 3, 3}, /* 3 chain */ + .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */ + .db_stage4 = {3, 3, 3}, /* don't exist for 2G */ .xpaBiasLvl = 0, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, @@ -2643,7 +2663,7 @@ static const struct ar9300_eeprom ar9300_h116 = { .xatten1MarginHigh = {0, 0, 0} }, .calFreqPier5G = { - FREQ2FBIN(5160, 0), + FREQ2FBIN(5180, 0), FREQ2FBIN(5220, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5400, 0), @@ -3003,8 +3023,6 @@ static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah, return eep->modalHeader5G.antennaGain; case EEP_ANTENNA_GAIN_2G: return eep->modalHeader2G.antennaGain; - case EEP_QUICK_DROP: - return pBase->miscConfiguration & BIT(1); default: return 0; } @@ -3043,7 +3061,8 @@ static bool ar9300_read_eeprom(struct ath_hw *ah, int address, u8 *buffer, int i; if ((address < 0) || ((address + count) / 2 > AR9300_EEPROM_SIZE - 1)) { - ath_dbg(common, EEPROM, "eeprom address not in range\n"); + ath_dbg(common, ATH_DBG_EEPROM, + "eeprom address not in range\n"); return false; } @@ -3074,8 +3093,8 @@ static bool ar9300_read_eeprom(struct ath_hw *ah, int address, u8 *buffer, return true; error: - ath_dbg(common, EEPROM, "unable to read eeprom region at offset %d\n", - address); + ath_dbg(common, ATH_DBG_EEPROM, + "unable to read eeprom region at offset %d\n", address); return false; } @@ -3159,13 +3178,13 @@ static bool ar9300_uncompress_block(struct ath_hw *ah, length &= 0xff; if (length > 0 && spot >= 0 && spot+length <= mdataSize) { - ath_dbg(common, EEPROM, + ath_dbg(common, ATH_DBG_EEPROM, "Restore at %d: spot=%d offset=%d length=%d\n", it, spot, offset, length); memcpy(&mptr[spot], &block[it+2], length); spot += length; } else if (length > 0) { - ath_dbg(common, EEPROM, + ath_dbg(common, ATH_DBG_EEPROM, "Bad restore at %d: spot=%d offset=%d length=%d\n", it, spot, offset, length); return false; @@ -3187,13 +3206,13 @@ static int ar9300_compress_decision(struct ath_hw *ah, switch (code) { case _CompressNone: if (length != mdata_size) { - ath_dbg(common, EEPROM, + ath_dbg(common, ATH_DBG_EEPROM, "EEPROM structure size mismatch memory=%d eeprom=%d\n", mdata_size, length); return -1; } memcpy(mptr, (u8 *) (word + COMP_HDR_LEN), length); - ath_dbg(common, EEPROM, + ath_dbg(common, ATH_DBG_EEPROM, "restored eeprom %d: uncompressed, length %d\n", it, length); break; @@ -3202,21 +3221,22 @@ static int ar9300_compress_decision(struct ath_hw *ah, } else { eep = ar9003_eeprom_struct_find_by_id(reference); if (eep == NULL) { - ath_dbg(common, EEPROM, + ath_dbg(common, ATH_DBG_EEPROM, "can't find reference eeprom struct %d\n", reference); return -1; } memcpy(mptr, eep, mdata_size); } - ath_dbg(common, EEPROM, + ath_dbg(common, ATH_DBG_EEPROM, "restore eeprom %d: block, reference %d, length %d\n", it, reference, length); ar9300_uncompress_block(ah, mptr, mdata_size, (u8 *) (word + COMP_HDR_LEN), length); break; default: - ath_dbg(common, EEPROM, "unknown compression code %d\n", code); + ath_dbg(common, ATH_DBG_EEPROM, + "unknown compression code %d\n", code); return -1; } return 0; @@ -3292,32 +3312,34 @@ static int ar9300_eeprom_restore_internal(struct ath_hw *ah, cptr = AR9300_BASE_ADDR_512; else cptr = AR9300_BASE_ADDR; - ath_dbg(common, EEPROM, "Trying EEPROM access at Address 0x%04x\n", - cptr); + ath_dbg(common, ATH_DBG_EEPROM, + "Trying EEPROM access at Address 0x%04x\n", cptr); if (ar9300_check_eeprom_header(ah, read, cptr)) goto found; cptr = AR9300_BASE_ADDR_512; - ath_dbg(common, EEPROM, "Trying EEPROM access at Address 0x%04x\n", - cptr); + ath_dbg(common, ATH_DBG_EEPROM, + "Trying EEPROM access at Address 0x%04x\n", cptr); if (ar9300_check_eeprom_header(ah, read, cptr)) goto found; read = ar9300_read_otp; cptr = AR9300_BASE_ADDR; - ath_dbg(common, EEPROM, "Trying OTP access at Address 0x%04x\n", cptr); + ath_dbg(common, ATH_DBG_EEPROM, + "Trying OTP access at Address 0x%04x\n", cptr); if (ar9300_check_eeprom_header(ah, read, cptr)) goto found; cptr = AR9300_BASE_ADDR_512; - ath_dbg(common, EEPROM, "Trying OTP access at Address 0x%04x\n", cptr); + ath_dbg(common, ATH_DBG_EEPROM, + "Trying OTP access at Address 0x%04x\n", cptr); if (ar9300_check_eeprom_header(ah, read, cptr)) goto found; goto fail; found: - ath_dbg(common, EEPROM, "Found valid EEPROM data\n"); + ath_dbg(common, ATH_DBG_EEPROM, "Found valid EEPROM data\n"); for (it = 0; it < MSTATE; it++) { if (!read(ah, cptr, word, COMP_HDR_LEN)) @@ -3328,12 +3350,13 @@ static int ar9300_eeprom_restore_internal(struct ath_hw *ah, ar9300_comp_hdr_unpack(word, &code, &reference, &length, &major, &minor); - ath_dbg(common, EEPROM, + ath_dbg(common, ATH_DBG_EEPROM, "Found block at %x: code=%d ref=%d length=%d major=%d minor=%d\n", cptr, code, reference, length, major, minor); if ((!AR_SREV_9485(ah) && length >= 1024) || (AR_SREV_9485(ah) && length > EEPROM_DATA_LEN_9485)) { - ath_dbg(common, EEPROM, "Skipping bad header\n"); + ath_dbg(common, ATH_DBG_EEPROM, + "Skipping bad header\n"); cptr -= COMP_HDR_LEN; continue; } @@ -3342,13 +3365,13 @@ static int ar9300_eeprom_restore_internal(struct ath_hw *ah, read(ah, cptr, word, COMP_HDR_LEN + osize + COMP_CKSUM_LEN); checksum = ar9300_comp_cksum(&word[COMP_HDR_LEN], length); mchecksum = get_unaligned_le16(&word[COMP_HDR_LEN + osize]); - ath_dbg(common, EEPROM, "checksum %x %x\n", - checksum, mchecksum); + ath_dbg(common, ATH_DBG_EEPROM, + "checksum %x %x\n", checksum, mchecksum); if (checksum == mchecksum) { ar9300_compress_decision(ah, it, code, reference, mptr, word, length, mdata_size); } else { - ath_dbg(common, EEPROM, + ath_dbg(common, ATH_DBG_EEPROM, "skipping block with bad checksum\n"); } cptr -= (COMP_HDR_LEN + osize + COMP_CKSUM_LEN); @@ -3405,14 +3428,25 @@ static u32 ar9003_dump_modal_eeprom(char *buf, u32 len, u32 size, PR_EEP("Chain0 NF Threshold", modal_hdr->noiseFloorThreshCh[0]); PR_EEP("Chain1 NF Threshold", modal_hdr->noiseFloorThreshCh[1]); PR_EEP("Chain2 NF Threshold", modal_hdr->noiseFloorThreshCh[2]); - PR_EEP("Quick Drop", modal_hdr->quick_drop); - PR_EEP("txEndToXpaOff", modal_hdr->txEndToXpaOff); PR_EEP("xPA Bias Level", modal_hdr->xpaBiasLvl); PR_EEP("txFrameToDataStart", modal_hdr->txFrameToDataStart); PR_EEP("txFrameToPaOn", modal_hdr->txFrameToPaOn); PR_EEP("txFrameToXpaOn", modal_hdr->txFrameToXpaOn); PR_EEP("txClip", modal_hdr->txClip); PR_EEP("ADC Desired size", modal_hdr->adcDesiredSize); + PR_EEP("Chain0 ob", modal_hdr->ob[0]); + PR_EEP("Chain1 ob", modal_hdr->ob[1]); + PR_EEP("Chain2 ob", modal_hdr->ob[2]); + + PR_EEP("Chain0 db_stage2", modal_hdr->db_stage2[0]); + PR_EEP("Chain1 db_stage2", modal_hdr->db_stage2[1]); + PR_EEP("Chain2 db_stage2", modal_hdr->db_stage2[2]); + PR_EEP("Chain0 db_stage3", modal_hdr->db_stage3[0]); + PR_EEP("Chain1 db_stage3", modal_hdr->db_stage3[1]); + PR_EEP("Chain2 db_stage3", modal_hdr->db_stage3[2]); + PR_EEP("Chain0 db_stage4", modal_hdr->db_stage4[0]); + PR_EEP("Chain1 db_stage4", modal_hdr->db_stage4[1]); + PR_EEP("Chain2 db_stage4", modal_hdr->db_stage4[2]); return len; } @@ -3469,7 +3503,6 @@ static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, PR_EEP("Internal regulator", !!(pBase->featureEnable & BIT(4))); PR_EEP("Enable Paprd", !!(pBase->featureEnable & BIT(5))); PR_EEP("Driver Strength", !!(pBase->miscConfiguration & BIT(0))); - PR_EEP("Quick Drop", !!(pBase->miscConfiguration & BIT(1))); PR_EEP("Chain mask Reduce", (pBase->miscConfiguration >> 0x3) & 0x1); PR_EEP("Write enable Gpio", pBase->eepromWriteEnableGpio); PR_EEP("WLAN Disable Gpio", pBase->wlanDisableGpio); @@ -3538,13 +3571,13 @@ static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz) static u16 ar9003_switch_com_spdt_get(struct ath_hw *ah, bool is_2ghz) { struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; - __le16 val; + __le32 val; if (is_2ghz) val = eep->modalHeader2G.switchcomspdt; else val = eep->modalHeader5G.switchcomspdt; - return le16_to_cpu(val); + return le32_to_cpu(val); } @@ -3932,40 +3965,6 @@ static void ar9003_hw_apply_tuning_caps(struct ath_hw *ah) } } -static void ar9003_hw_quick_drop_apply(struct ath_hw *ah, u16 freq) -{ - struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; - int quick_drop = ath9k_hw_ar9300_get_eeprom(ah, EEP_QUICK_DROP); - s32 t[3], f[3] = {5180, 5500, 5785}; - - if (!quick_drop) - return; - - if (freq < 4000) - quick_drop = eep->modalHeader2G.quick_drop; - else { - t[0] = eep->base_ext1.quick_drop_low; - t[1] = eep->modalHeader5G.quick_drop; - t[2] = eep->base_ext1.quick_drop_high; - quick_drop = ar9003_hw_power_interpolate(freq, f, t, 3); - } - REG_RMW_FIELD(ah, AR_PHY_AGC, AR_PHY_AGC_QUICK_DROP, quick_drop); -} - -static void ar9003_hw_txend_to_xpa_off_apply(struct ath_hw *ah, u16 freq) -{ - struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; - u32 value; - - value = (freq < 4000) ? eep->modalHeader2G.txEndToXpaOff : - eep->modalHeader5G.txEndToXpaOff; - - REG_RMW_FIELD(ah, AR_PHY_XPA_TIMING_CTL, - AR_PHY_XPA_TIMING_CTL_TX_END_XPAB_OFF, value); - REG_RMW_FIELD(ah, AR_PHY_XPA_TIMING_CTL, - AR_PHY_XPA_TIMING_CTL_TX_END_XPAA_OFF, value); -} - static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah, struct ath9k_channel *chan) { @@ -3973,12 +3972,10 @@ static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah, ar9003_hw_ant_ctrl_apply(ah, IS_CHAN_2GHZ(chan)); ar9003_hw_drive_strength_apply(ah); ar9003_hw_atten_apply(ah, chan); - ar9003_hw_quick_drop_apply(ah, chan->channel); if (!AR_SREV_9330(ah) && !AR_SREV_9340(ah)) ar9003_hw_internal_regulator_apply(ah); if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah)) ar9003_hw_apply_tuning_caps(ah); - ar9003_hw_txend_to_xpa_off_apply(ah, chan->channel); } static void ath9k_hw_ar9300_set_addac(struct ath_hw *ah, @@ -4419,8 +4416,8 @@ static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq, is2GHz) + ht40PowerIncForPdadc; for (i = 0; i < ar9300RateSize; i++) { - ath_dbg(common, EEPROM, "TPC[%02d] 0x%08x\n", - i, targetPowerValT2[i]); + ath_dbg(common, ATH_DBG_EEPROM, + "TPC[%02d] 0x%08x\n", i, targetPowerValT2[i]); } } @@ -4439,7 +4436,7 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah, struct ath_common *common = ath9k_hw_common(ah); if (ichain >= AR9300_MAX_CHAINS) { - ath_dbg(common, EEPROM, + ath_dbg(common, ATH_DBG_EEPROM, "Invalid chain index, must be less than %d\n", AR9300_MAX_CHAINS); return -1; @@ -4447,7 +4444,7 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah, if (mode) { /* 5GHz */ if (ipier >= AR9300_NUM_5G_CAL_PIERS) { - ath_dbg(common, EEPROM, + ath_dbg(common, ATH_DBG_EEPROM, "Invalid 5GHz cal pier index, must be less than %d\n", AR9300_NUM_5G_CAL_PIERS); return -1; @@ -4457,7 +4454,7 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah, is2GHz = 0; } else { if (ipier >= AR9300_NUM_2G_CAL_PIERS) { - ath_dbg(common, EEPROM, + ath_dbg(common, ATH_DBG_EEPROM, "Invalid 2GHz cal pier index, must be less than %d\n", AR9300_NUM_2G_CAL_PIERS); return -1; @@ -4619,7 +4616,8 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency) /* interpolate */ for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) { - ath_dbg(common, EEPROM, "ch=%d f=%d low=%d %d h=%d %d\n", + ath_dbg(common, ATH_DBG_EEPROM, + "ch=%d f=%d low=%d %d h=%d %d\n", ichain, frequency, lfrequency[ichain], lcorrection[ichain], hfrequency[ichain], hcorrection[ichain]); @@ -4674,7 +4672,7 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency) ar9003_hw_power_control_override(ah, frequency, correction, voltage, temperature); - ath_dbg(common, EEPROM, + ath_dbg(common, ATH_DBG_EEPROM, "for frequency=%d, calibration correction = %d %d %d\n", frequency, correction[0], correction[1], correction[2]); @@ -4773,7 +4771,7 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah, { struct ath_common *common = ath9k_hw_common(ah); struct ar9300_eeprom *pEepData = &ah->eeprom.ar9300_eep; - u16 twiceMaxEdgePower; + u16 twiceMaxEdgePower = MAX_RATE_POWER; int i; u16 scaledPower = 0, minCtlPower; static const u16 ctlModesFor11a[] = { @@ -4860,7 +4858,7 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah, else freq = centers.ctl_center; - ath_dbg(common, REGULATORY, + ath_dbg(common, ATH_DBG_REGULATORY, "LOOP-Mode ctlMode %d < %d, isHt40CtlMode %d, EXT_ADDITIVE %d\n", ctlMode, numCtlModes, isHt40CtlMode, (pCtlMode[ctlMode] & EXT_ADDITIVE)); @@ -4874,9 +4872,8 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah, ctlNum = AR9300_NUM_CTLS_5G; } - twiceMaxEdgePower = MAX_RATE_POWER; for (i = 0; (i < ctlNum) && ctlIndex[i]; i++) { - ath_dbg(common, REGULATORY, + ath_dbg(common, ATH_DBG_REGULATORY, "LOOP-Ctlidx %d: cfgCtl 0x%2.2x pCtlMode 0x%2.2x ctlIndex 0x%2.2x chan %d\n", i, cfgCtl, pCtlMode[ctlMode], ctlIndex[i], chan->channel); @@ -4918,7 +4915,7 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah, minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower); - ath_dbg(common, REGULATORY, + ath_dbg(common, ATH_DBG_REGULATORY, "SEL-Min ctlMode %d pCtlMode %d 2xMaxEdge %d sP %d minCtlPwr %d\n", ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower, scaledPower, minCtlPower); @@ -5042,7 +5039,7 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah, target_power_val_t2_eep[i]) > paprd_scale_factor)) { ah->paprd_ratemask &= ~(1 << i); - ath_dbg(common, EEPROM, + ath_dbg(common, ATH_DBG_EEPROM, "paprd disabled for mcs %d\n", i); } } @@ -5054,14 +5051,12 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah, regulatory->max_power_level = targetPowerValT2[i]; } - ath9k_hw_update_regulatory_maxpower(ah); - if (test) return; for (i = 0; i < ar9300RateSize; i++) { - ath_dbg(common, EEPROM, "TPC[%02d] 0x%08x\n", - i, targetPowerValT2[i]); + ath_dbg(common, ATH_DBG_EEPROM, + "TPC[%02d] 0x%08x\n", i, targetPowerValT2[i]); } ah->txpower_limit = regulatory->max_power_level; diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/trunk/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h index bb223fe82816..6335a867527e 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h +++ b/trunk/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h @@ -216,8 +216,10 @@ struct ar9300_modal_eep_header { u8 spurChans[AR_EEPROM_MODAL_SPURS]; /* 3 Check if the register is per chain */ int8_t noiseFloorThreshCh[AR9300_MAX_CHAINS]; - u8 reserved[11]; - int8_t quick_drop; + u8 ob[AR9300_MAX_CHAINS]; + u8 db_stage2[AR9300_MAX_CHAINS]; + u8 db_stage3[AR9300_MAX_CHAINS]; + u8 db_stage4[AR9300_MAX_CHAINS]; u8 xpaBiasLvl; u8 txFrameToDataStart; u8 txFrameToPaOn; @@ -267,9 +269,7 @@ struct cal_ctl_data_5g { struct ar9300_BaseExtension_1 { u8 ant_div_control; - u8 future[11]; - int8_t quick_drop_low; - int8_t quick_drop_high; + u8 future[13]; } __packed; struct ar9300_BaseExtension_2 { diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/trunk/drivers/net/wireless/ath/ath9k/ar9003_mac.c index 88c81c5706b2..ccde784a842f 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9003_mac.c +++ b/trunk/drivers/net/wireless/ath/ath9k/ar9003_mac.c @@ -175,24 +175,20 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked) u32 isr = 0; u32 mask2 = 0; struct ath9k_hw_capabilities *pCap = &ah->caps; + u32 sync_cause = 0; struct ath_common *common = ath9k_hw_common(ah); - struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; - u32 sync_cause = 0, async_cause; - async_cause = REG_READ(ah, AR_INTR_ASYNC_CAUSE); - - if (async_cause & (AR_INTR_MAC_IRQ | AR_INTR_ASYNC_MASK_MCI)) { + if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) { if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M) == AR_RTC_STATUS_ON) isr = REG_READ(ah, AR_ISR); } - sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) & AR_INTR_SYNC_DEFAULT; *masked = 0; - if (!isr && !sync_cause && !async_cause) + if (!isr && !sync_cause) return false; if (isr) { @@ -298,33 +294,6 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked) ar9003_hw_bb_watchdog_read(ah); } - if (async_cause & AR_INTR_ASYNC_MASK_MCI) { - u32 raw_intr, rx_msg_intr; - - rx_msg_intr = REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW); - raw_intr = REG_READ(ah, AR_MCI_INTERRUPT_RAW); - - if ((raw_intr == 0xdeadbeef) || (rx_msg_intr == 0xdeadbeef)) - ath_dbg(common, MCI, - "MCI gets 0xdeadbeef during MCI int processing new raw_intr=0x%08x, new rx_msg_raw=0x%08x, raw_intr=0x%08x, rx_msg_raw=0x%08x\n", - raw_intr, rx_msg_intr, mci->raw_intr, - mci->rx_msg_intr); - else { - mci->rx_msg_intr |= rx_msg_intr; - mci->raw_intr |= raw_intr; - *masked |= ATH9K_INT_MCI; - - if (rx_msg_intr & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO) - mci->cont_status = - REG_READ(ah, AR_MCI_CONT_STATUS); - - REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, rx_msg_intr); - REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, raw_intr); - ath_dbg(common, MCI, "AR_INTR_SYNC_MCI\n"); - - } - } - if (sync_cause) { if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) { REG_WRITE(ah, AR_RC, AR_RC_HOSTIF); @@ -333,7 +302,7 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked) } if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) - ath_dbg(common, INTERRUPT, + ath_dbg(common, ATH_DBG_INTERRUPT, "AR_INTR_SYNC_LOCAL_TIMEOUT\n"); REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause); @@ -364,7 +333,7 @@ static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds, if ((MS(ads->ds_info, AR_DescId) != ATHEROS_VENDOR_ID) || (MS(ads->ds_info, AR_TxRxDesc) != 1)) { - ath_dbg(ath9k_hw_common(ah), XMIT, + ath_dbg(ath9k_hw_common(ah), ATH_DBG_XMIT, "Tx Descriptor error %x\n", ads->ds_info); memset(ads, 0, sizeof(*ads)); return -EIO; @@ -572,7 +541,7 @@ void ath9k_hw_reset_txstatus_ring(struct ath_hw *ah) memset((void *) ah->ts_ring, 0, ah->ts_size * sizeof(struct ar9003_txs)); - ath_dbg(ath9k_hw_common(ah), XMIT, + ath_dbg(ath9k_hw_common(ah), ATH_DBG_XMIT, "TS Start 0x%x End 0x%x Virt %p, Size %d\n", ah->ts_paddr_start, ah->ts_paddr_end, ah->ts_ring, ah->ts_size); @@ -583,7 +552,7 @@ void ath9k_hw_reset_txstatus_ring(struct ath_hw *ah) void ath9k_hw_setup_statusring(struct ath_hw *ah, void *ts_start, u32 ts_paddr_start, - u16 size) + u8 size) { ah->ts_paddr_start = ts_paddr_start; diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9003_mac.h b/trunk/drivers/net/wireless/ath/ath9k/ar9003_mac.h index e203b51e968b..c50449387bf1 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9003_mac.h +++ b/trunk/drivers/net/wireless/ath/ath9k/ar9003_mac.h @@ -118,5 +118,5 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, void ath9k_hw_reset_txstatus_ring(struct ath_hw *ah); void ath9k_hw_setup_statusring(struct ath_hw *ah, void *ts_start, u32 ts_paddr_start, - u16 size); + u8 size); #endif diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/trunk/drivers/net/wireless/ath/ath9k/ar9003_mci.c deleted file mode 100644 index 709520c6835b..000000000000 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9003_mci.c +++ /dev/null @@ -1,1493 +0,0 @@ -/* - * Copyright (c) 2008-2011 Atheros Communications Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#include -#include "hw.h" -#include "ar9003_phy.h" -#include "ar9003_mci.h" - -static void ar9003_mci_reset_req_wakeup(struct ath_hw *ah) -{ - if (!AR_SREV_9462_20(ah)) - return; - - REG_RMW_FIELD(ah, AR_MCI_COMMAND2, - AR_MCI_COMMAND2_RESET_REQ_WAKEUP, 1); - udelay(1); - REG_RMW_FIELD(ah, AR_MCI_COMMAND2, - AR_MCI_COMMAND2_RESET_REQ_WAKEUP, 0); -} - -static int ar9003_mci_wait_for_interrupt(struct ath_hw *ah, u32 address, - u32 bit_position, int time_out) -{ - struct ath_common *common = ath9k_hw_common(ah); - - while (time_out) { - - if (REG_READ(ah, address) & bit_position) { - - REG_WRITE(ah, address, bit_position); - - if (address == AR_MCI_INTERRUPT_RX_MSG_RAW) { - - if (bit_position & - AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE) - ar9003_mci_reset_req_wakeup(ah); - - if (bit_position & - (AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING | - AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING)) - REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, - AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE); - - REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, - AR_MCI_INTERRUPT_RX_MSG); - } - break; - } - - udelay(10); - time_out -= 10; - - if (time_out < 0) - break; - } - - if (time_out <= 0) { - ath_dbg(common, MCI, - "MCI Wait for Reg 0x%08x = 0x%08x timeout\n", - address, bit_position); - ath_dbg(common, MCI, - "MCI INT_RAW = 0x%08x, RX_MSG_RAW = 0x%08x\n", - REG_READ(ah, AR_MCI_INTERRUPT_RAW), - REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW)); - time_out = 0; - } - - return time_out; -} - -void ar9003_mci_remote_reset(struct ath_hw *ah, bool wait_done) -{ - u32 payload[4] = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffff00}; - - if (!ATH9K_HW_CAP_MCI) - return; - - ar9003_mci_send_message(ah, MCI_REMOTE_RESET, 0, payload, 16, - wait_done, false); - udelay(5); -} - -void ar9003_mci_send_lna_transfer(struct ath_hw *ah, bool wait_done) -{ - u32 payload = 0x00000000; - - if (!ATH9K_HW_CAP_MCI) - return; - - ar9003_mci_send_message(ah, MCI_LNA_TRANS, 0, &payload, 1, - wait_done, false); -} - -static void ar9003_mci_send_req_wake(struct ath_hw *ah, bool wait_done) -{ - ar9003_mci_send_message(ah, MCI_REQ_WAKE, MCI_FLAG_DISABLE_TIMESTAMP, - NULL, 0, wait_done, false); - udelay(5); -} - -void ar9003_mci_send_sys_waking(struct ath_hw *ah, bool wait_done) -{ - if (!ATH9K_HW_CAP_MCI) - return; - - ar9003_mci_send_message(ah, MCI_SYS_WAKING, MCI_FLAG_DISABLE_TIMESTAMP, - NULL, 0, wait_done, false); -} - -static void ar9003_mci_send_lna_take(struct ath_hw *ah, bool wait_done) -{ - u32 payload = 0x70000000; - - ar9003_mci_send_message(ah, MCI_LNA_TAKE, 0, &payload, 1, - wait_done, false); -} - -static void ar9003_mci_send_sys_sleeping(struct ath_hw *ah, bool wait_done) -{ - ar9003_mci_send_message(ah, MCI_SYS_SLEEPING, - MCI_FLAG_DISABLE_TIMESTAMP, - NULL, 0, wait_done, false); -} - -static void ar9003_mci_send_coex_version_query(struct ath_hw *ah, - bool wait_done) -{ - struct ath_common *common = ath9k_hw_common(ah); - struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; - u32 payload[4] = {0, 0, 0, 0}; - - if (!mci->bt_version_known && - (mci->bt_state != MCI_BT_SLEEP)) { - ath_dbg(common, MCI, "MCI Send Coex version query\n"); - MCI_GPM_SET_TYPE_OPCODE(payload, - MCI_GPM_COEX_AGENT, MCI_GPM_COEX_VERSION_QUERY); - ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, - wait_done, true); - } -} - -static void ar9003_mci_send_coex_version_response(struct ath_hw *ah, - bool wait_done) -{ - struct ath_common *common = ath9k_hw_common(ah); - struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; - u32 payload[4] = {0, 0, 0, 0}; - - ath_dbg(common, MCI, "MCI Send Coex version response\n"); - MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT, - MCI_GPM_COEX_VERSION_RESPONSE); - *(((u8 *)payload) + MCI_GPM_COEX_B_MAJOR_VERSION) = - mci->wlan_ver_major; - *(((u8 *)payload) + MCI_GPM_COEX_B_MINOR_VERSION) = - mci->wlan_ver_minor; - ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true); -} - -static void ar9003_mci_send_coex_wlan_channels(struct ath_hw *ah, - bool wait_done) -{ - struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; - u32 *payload = &mci->wlan_channels[0]; - - if ((mci->wlan_channels_update == true) && - (mci->bt_state != MCI_BT_SLEEP)) { - MCI_GPM_SET_TYPE_OPCODE(payload, - MCI_GPM_COEX_AGENT, MCI_GPM_COEX_WLAN_CHANNELS); - ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, - wait_done, true); - MCI_GPM_SET_TYPE_OPCODE(payload, 0xff, 0xff); - } -} - -static void ar9003_mci_send_coex_bt_status_query(struct ath_hw *ah, - bool wait_done, u8 query_type) -{ - struct ath_common *common = ath9k_hw_common(ah); - struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; - u32 payload[4] = {0, 0, 0, 0}; - bool query_btinfo = !!(query_type & (MCI_GPM_COEX_QUERY_BT_ALL_INFO | - MCI_GPM_COEX_QUERY_BT_TOPOLOGY)); - - if (mci->bt_state != MCI_BT_SLEEP) { - - ath_dbg(common, MCI, "MCI Send Coex BT Status Query 0x%02X\n", - query_type); - - MCI_GPM_SET_TYPE_OPCODE(payload, - MCI_GPM_COEX_AGENT, MCI_GPM_COEX_STATUS_QUERY); - - *(((u8 *)payload) + MCI_GPM_COEX_B_BT_BITMAP) = query_type; - /* - * If bt_status_query message is not sent successfully, - * then need_flush_btinfo should be set again. - */ - if (!ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, - wait_done, true)) { - if (query_btinfo) { - mci->need_flush_btinfo = true; - - ath_dbg(common, MCI, - "MCI send bt_status_query fail, set flush flag again\n"); - } - } - - if (query_btinfo) - mci->query_bt = false; - } -} - -void ar9003_mci_send_coex_halt_bt_gpm(struct ath_hw *ah, bool halt, - bool wait_done) -{ - struct ath_common *common = ath9k_hw_common(ah); - struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; - u32 payload[4] = {0, 0, 0, 0}; - - if (!ATH9K_HW_CAP_MCI) - return; - - ath_dbg(common, MCI, "MCI Send Coex %s BT GPM\n", - (halt) ? "halt" : "unhalt"); - - MCI_GPM_SET_TYPE_OPCODE(payload, - MCI_GPM_COEX_AGENT, MCI_GPM_COEX_HALT_BT_GPM); - - if (halt) { - mci->query_bt = true; - /* Send next unhalt no matter halt sent or not */ - mci->unhalt_bt_gpm = true; - mci->need_flush_btinfo = true; - *(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) = - MCI_GPM_COEX_BT_GPM_HALT; - } else - *(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) = - MCI_GPM_COEX_BT_GPM_UNHALT; - - ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true); -} - - -static void ar9003_mci_prep_interface(struct ath_hw *ah) -{ - struct ath_common *common = ath9k_hw_common(ah); - struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; - u32 saved_mci_int_en; - u32 mci_timeout = 150; - - mci->bt_state = MCI_BT_SLEEP; - saved_mci_int_en = REG_READ(ah, AR_MCI_INTERRUPT_EN); - - REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0); - REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, - REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW)); - REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, - REG_READ(ah, AR_MCI_INTERRUPT_RAW)); - - /* Remote Reset */ - ath_dbg(common, MCI, "MCI Reset sequence start\n"); - ath_dbg(common, MCI, "MCI send REMOTE_RESET\n"); - ar9003_mci_remote_reset(ah, true); - - /* - * This delay is required for the reset delay worst case value 255 in - * MCI_COMMAND2 register - */ - - if (AR_SREV_9462_10(ah)) - udelay(252); - - ath_dbg(common, MCI, "MCI Send REQ_WAKE to remoter(BT)\n"); - ar9003_mci_send_req_wake(ah, true); - - if (ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, - AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING, 500)) { - - ath_dbg(common, MCI, "MCI SYS_WAKING from remote(BT)\n"); - mci->bt_state = MCI_BT_AWAKE; - - if (AR_SREV_9462_10(ah)) - udelay(10); - /* - * we don't need to send more remote_reset at this moment. - * If BT receive first remote_reset, then BT HW will - * be cleaned up and will be able to receive req_wake - * and BT HW will respond sys_waking. - * In this case, WLAN will receive BT's HW sys_waking. - * Otherwise, if BT SW missed initial remote_reset, - * that remote_reset will still clean up BT MCI RX, - * and the req_wake will wake BT up, - * and BT SW will respond this req_wake with a remote_reset and - * sys_waking. In this case, WLAN will receive BT's SW - * sys_waking. In either case, BT's RX is cleaned up. So we - * don't need to reply BT's remote_reset now, if any. - * Similarly, if in any case, WLAN can receive BT's sys_waking, - * that means WLAN's RX is also fine. - */ - - /* Send SYS_WAKING to BT */ - - ath_dbg(common, MCI, "MCI send SW SYS_WAKING to remote BT\n"); - - ar9003_mci_send_sys_waking(ah, true); - udelay(10); - - /* - * Set BT priority interrupt value to be 0xff to - * avoid having too many BT PRIORITY interrupts. - */ - - REG_WRITE(ah, AR_MCI_BT_PRI0, 0xFFFFFFFF); - REG_WRITE(ah, AR_MCI_BT_PRI1, 0xFFFFFFFF); - REG_WRITE(ah, AR_MCI_BT_PRI2, 0xFFFFFFFF); - REG_WRITE(ah, AR_MCI_BT_PRI3, 0xFFFFFFFF); - REG_WRITE(ah, AR_MCI_BT_PRI, 0X000000FF); - - /* - * A contention reset will be received after send out - * sys_waking. Also BT priority interrupt bits will be set. - * Clear those bits before the next step. - */ - - REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, - AR_MCI_INTERRUPT_RX_MSG_CONT_RST); - REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, - AR_MCI_INTERRUPT_BT_PRI); - - if (AR_SREV_9462_10(ah) || mci->is_2g) { - /* Send LNA_TRANS */ - ath_dbg(common, MCI, "MCI send LNA_TRANS to BT\n"); - ar9003_mci_send_lna_transfer(ah, true); - udelay(5); - } - - if (AR_SREV_9462_10(ah) || (mci->is_2g && - !mci->update_2g5g)) { - if (ar9003_mci_wait_for_interrupt(ah, - AR_MCI_INTERRUPT_RX_MSG_RAW, - AR_MCI_INTERRUPT_RX_MSG_LNA_INFO, - mci_timeout)) - ath_dbg(common, MCI, - "MCI WLAN has control over the LNA & BT obeys it\n"); - else - ath_dbg(common, MCI, - "MCI BT didn't respond to LNA_TRANS\n"); - } - - if (AR_SREV_9462_10(ah)) { - /* Send another remote_reset to deassert BT clk_req. */ - ath_dbg(common, MCI, - "MCI another remote_reset to deassert clk_req\n"); - ar9003_mci_remote_reset(ah, true); - udelay(252); - } - } - - /* Clear the extra redundant SYS_WAKING from BT */ - if ((mci->bt_state == MCI_BT_AWAKE) && - (REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, - AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING)) && - (REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, - AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) == 0)) { - - REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, - AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING); - REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, - AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE); - } - - REG_WRITE(ah, AR_MCI_INTERRUPT_EN, saved_mci_int_en); -} - -void ar9003_mci_disable_interrupt(struct ath_hw *ah) -{ - if (!ATH9K_HW_CAP_MCI) - return; - - REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0); - REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, 0); -} - -void ar9003_mci_enable_interrupt(struct ath_hw *ah) -{ - if (!ATH9K_HW_CAP_MCI) - return; - - REG_WRITE(ah, AR_MCI_INTERRUPT_EN, AR_MCI_INTERRUPT_DEFAULT); - REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, - AR_MCI_INTERRUPT_RX_MSG_DEFAULT); -} - -bool ar9003_mci_check_int(struct ath_hw *ah, u32 ints) -{ - u32 intr; - - if (!ATH9K_HW_CAP_MCI) - return false; - - intr = REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW); - return ((intr & ints) == ints); -} - -void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr, - u32 *rx_msg_intr) -{ - struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; - - if (!ATH9K_HW_CAP_MCI) - return; - - *raw_intr = mci->raw_intr; - *rx_msg_intr = mci->rx_msg_intr; - - /* Clean int bits after the values are read. */ - mci->raw_intr = 0; - mci->rx_msg_intr = 0; -} -EXPORT_SYMBOL(ar9003_mci_get_interrupt); - -void ar9003_mci_2g5g_changed(struct ath_hw *ah, bool is_2g) -{ - struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; - - if (!ATH9K_HW_CAP_MCI) - return; - - if (!mci->update_2g5g && - (mci->is_2g != is_2g)) - mci->update_2g5g = true; - - mci->is_2g = is_2g; -} - -static bool ar9003_mci_is_gpm_valid(struct ath_hw *ah, u32 msg_index) -{ - struct ath_common *common = ath9k_hw_common(ah); - struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; - u32 *payload; - u32 recv_type, offset; - - if (msg_index == MCI_GPM_INVALID) - return false; - - offset = msg_index << 4; - - payload = (u32 *)(mci->gpm_buf + offset); - recv_type = MCI_GPM_TYPE(payload); - - if (recv_type == MCI_GPM_RSVD_PATTERN) { - ath_dbg(common, MCI, "MCI Skip RSVD GPM\n"); - return false; - } - - return true; -} - -static void ar9003_mci_observation_set_up(struct ath_hw *ah) -{ - struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; - if (mci->config & ATH_MCI_CONFIG_MCI_OBS_MCI) { - - ath9k_hw_cfg_output(ah, 3, - AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_DATA); - ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_CLK); - ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA); - ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK); - - } else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_TXRX) { - - ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_WL_IN_TX); - ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_WL_IN_RX); - ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX); - ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX); - ath9k_hw_cfg_output(ah, 5, AR_GPIO_OUTPUT_MUX_AS_OUTPUT); - - } else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_BT) { - - ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX); - ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX); - ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA); - ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK); - - } else - return; - - REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE); - - if (AR_SREV_9462_20_OR_LATER(ah)) { - REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL, - AR_GLB_DS_JTAG_DISABLE, 1); - REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL, - AR_GLB_WLAN_UART_INTF_EN, 0); - REG_SET_BIT(ah, AR_GLB_GPIO_CONTROL, - ATH_MCI_CONFIG_MCI_OBS_GPIO); - } - - REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_GPIO_OBS_SEL, 0); - REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL, 1); - REG_WRITE(ah, AR_OBS, 0x4b); - REG_RMW_FIELD(ah, AR_DIAG_SW, AR_DIAG_OBS_PT_SEL1, 0x03); - REG_RMW_FIELD(ah, AR_DIAG_SW, AR_DIAG_OBS_PT_SEL2, 0x01); - REG_RMW_FIELD(ah, AR_MACMISC, AR_MACMISC_MISC_OBS_BUS_LSB, 0x02); - REG_RMW_FIELD(ah, AR_MACMISC, AR_MACMISC_MISC_OBS_BUS_MSB, 0x03); - REG_RMW_FIELD(ah, AR_PHY_TEST_CTL_STATUS, - AR_PHY_TEST_CTL_DEBUGPORT_SEL, 0x07); -} - -static bool ar9003_mci_send_coex_bt_flags(struct ath_hw *ah, bool wait_done, - u8 opcode, u32 bt_flags) -{ - struct ath_common *common = ath9k_hw_common(ah); - u32 pld[4] = {0, 0, 0, 0}; - - MCI_GPM_SET_TYPE_OPCODE(pld, - MCI_GPM_COEX_AGENT, MCI_GPM_COEX_BT_UPDATE_FLAGS); - - *(((u8 *)pld) + MCI_GPM_COEX_B_BT_FLAGS_OP) = opcode; - *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 0) = bt_flags & 0xFF; - *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 1) = (bt_flags >> 8) & 0xFF; - *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 2) = (bt_flags >> 16) & 0xFF; - *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 3) = (bt_flags >> 24) & 0xFF; - - ath_dbg(common, MCI, - "MCI BT_MCI_FLAGS: Send Coex BT Update Flags %s 0x%08x\n", - opcode == MCI_GPM_COEX_BT_FLAGS_READ ? "READ" : - opcode == MCI_GPM_COEX_BT_FLAGS_SET ? "SET" : "CLEAR", - bt_flags); - - return ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, - wait_done, true); -} - -void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g, - bool is_full_sleep) -{ - struct ath_common *common = ath9k_hw_common(ah); - struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; - u32 regval, thresh; - - if (!ATH9K_HW_CAP_MCI) - return; - - ath_dbg(common, MCI, "MCI full_sleep = %d, is_2g = %d\n", - is_full_sleep, is_2g); - - /* - * GPM buffer and scheduling message buffer are not allocated - */ - - if (!mci->gpm_addr && !mci->sched_addr) { - ath_dbg(common, MCI, - "MCI GPM and schedule buffers are not allocated\n"); - return; - } - - if (REG_READ(ah, AR_BTCOEX_CTRL) == 0xdeadbeef) { - ath_dbg(common, MCI, "MCI it's deadbeef, quit mci_reset\n"); - return; - } - - /* Program MCI DMA related registers */ - REG_WRITE(ah, AR_MCI_GPM_0, mci->gpm_addr); - REG_WRITE(ah, AR_MCI_GPM_1, mci->gpm_len); - REG_WRITE(ah, AR_MCI_SCHD_TABLE_0, mci->sched_addr); - - /* - * To avoid MCI state machine be affected by incoming remote MCI msgs, - * MCI mode will be enabled later, right before reset the MCI TX and RX. - */ - - regval = SM(1, AR_BTCOEX_CTRL_AR9462_MODE) | - SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) | - SM(1, AR_BTCOEX_CTRL_PA_SHARED) | - SM(1, AR_BTCOEX_CTRL_LNA_SHARED) | - SM(2, AR_BTCOEX_CTRL_NUM_ANTENNAS) | - SM(3, AR_BTCOEX_CTRL_RX_CHAIN_MASK) | - SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) | - SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) | - SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN); - - if (is_2g && (AR_SREV_9462_20(ah)) && - !(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA)) { - - regval |= SM(1, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN); - ath_dbg(common, MCI, "MCI sched one step look ahead\n"); - - if (!(mci->config & - ATH_MCI_CONFIG_DISABLE_AGGR_THRESH)) { - - thresh = MS(mci->config, - ATH_MCI_CONFIG_AGGR_THRESH); - thresh &= 7; - regval |= SM(1, - AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN); - regval |= SM(thresh, AR_BTCOEX_CTRL_AGGR_THRESH); - - REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2, - AR_MCI_SCHD_TABLE_2_HW_BASED, 1); - REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2, - AR_MCI_SCHD_TABLE_2_MEM_BASED, 1); - - } else - ath_dbg(common, MCI, "MCI sched aggr thresh: off\n"); - } else - ath_dbg(common, MCI, "MCI SCHED one step look ahead off\n"); - - if (AR_SREV_9462_10(ah)) - regval |= SM(1, AR_BTCOEX_CTRL_SPDT_ENABLE_10); - - REG_WRITE(ah, AR_BTCOEX_CTRL, regval); - - if (AR_SREV_9462_20(ah)) { - REG_SET_BIT(ah, AR_PHY_GLB_CONTROL, - AR_BTCOEX_CTRL_SPDT_ENABLE); - REG_RMW_FIELD(ah, AR_BTCOEX_CTRL3, - AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT, 20); - } - - REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_RX_DEWEIGHT, 1); - REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0); - - thresh = MS(mci->config, ATH_MCI_CONFIG_CLK_DIV); - REG_RMW_FIELD(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_CLK_DIV, thresh); - REG_SET_BIT(ah, AR_BTCOEX_CTRL, AR_BTCOEX_CTRL_MCI_MODE_EN); - - /* Resetting the Rx and Tx paths of MCI */ - regval = REG_READ(ah, AR_MCI_COMMAND2); - regval |= SM(1, AR_MCI_COMMAND2_RESET_TX); - REG_WRITE(ah, AR_MCI_COMMAND2, regval); - - udelay(1); - - regval &= ~SM(1, AR_MCI_COMMAND2_RESET_TX); - REG_WRITE(ah, AR_MCI_COMMAND2, regval); - - if (is_full_sleep) { - ar9003_mci_mute_bt(ah); - udelay(100); - } - - regval |= SM(1, AR_MCI_COMMAND2_RESET_RX); - REG_WRITE(ah, AR_MCI_COMMAND2, regval); - udelay(1); - regval &= ~SM(1, AR_MCI_COMMAND2_RESET_RX); - REG_WRITE(ah, AR_MCI_COMMAND2, regval); - - ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET, NULL); - REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE, - (SM(0xe801, AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR) | - SM(0x0000, AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM))); - - REG_CLR_BIT(ah, AR_MCI_TX_CTRL, - AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE); - - if (AR_SREV_9462_20_OR_LATER(ah)) - ar9003_mci_observation_set_up(ah); - - mci->ready = true; - ar9003_mci_prep_interface(ah); - - if (en_int) - ar9003_mci_enable_interrupt(ah); -} - -void ar9003_mci_mute_bt(struct ath_hw *ah) -{ - struct ath_common *common = ath9k_hw_common(ah); - - if (!ATH9K_HW_CAP_MCI) - return; - - /* disable all MCI messages */ - REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE, 0xffff0000); - REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS0, 0xffffffff); - REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS1, 0xffffffff); - REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS2, 0xffffffff); - REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS3, 0xffffffff); - REG_SET_BIT(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE); - - /* wait pending HW messages to flush out */ - udelay(10); - - /* - * Send LNA_TAKE and SYS_SLEEPING when - * 1. reset not after resuming from full sleep - * 2. before reset MCI RX, to quiet BT and avoid MCI RX misalignment - */ - - ath_dbg(common, MCI, "MCI Send LNA take\n"); - ar9003_mci_send_lna_take(ah, true); - - udelay(5); - - ath_dbg(common, MCI, "MCI Send sys sleeping\n"); - ar9003_mci_send_sys_sleeping(ah, true); -} - -void ar9003_mci_sync_bt_state(struct ath_hw *ah) -{ - struct ath_common *common = ath9k_hw_common(ah); - struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; - u32 cur_bt_state; - - if (!ATH9K_HW_CAP_MCI) - return; - - cur_bt_state = ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL); - - if (mci->bt_state != cur_bt_state) { - ath_dbg(common, MCI, - "MCI BT state mismatches. old: %d, new: %d\n", - mci->bt_state, cur_bt_state); - mci->bt_state = cur_bt_state; - } - - if (mci->bt_state != MCI_BT_SLEEP) { - - ar9003_mci_send_coex_version_query(ah, true); - ar9003_mci_send_coex_wlan_channels(ah, true); - - if (mci->unhalt_bt_gpm == true) { - ath_dbg(common, MCI, "MCI unhalt BT GPM\n"); - ar9003_mci_send_coex_halt_bt_gpm(ah, false, true); - } - } -} - -static void ar9003_mci_send_2g5g_status(struct ath_hw *ah, bool wait_done) -{ - struct ath_common *common = ath9k_hw_common(ah); - struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; - u32 new_flags, to_set, to_clear; - - if (AR_SREV_9462_20(ah) && - mci->update_2g5g && - (mci->bt_state != MCI_BT_SLEEP)) { - - if (mci->is_2g) { - new_flags = MCI_2G_FLAGS; - to_clear = MCI_2G_FLAGS_CLEAR_MASK; - to_set = MCI_2G_FLAGS_SET_MASK; - } else { - new_flags = MCI_5G_FLAGS; - to_clear = MCI_5G_FLAGS_CLEAR_MASK; - to_set = MCI_5G_FLAGS_SET_MASK; - } - - ath_dbg(common, MCI, - "MCI BT_MCI_FLAGS: %s 0x%08x clr=0x%08x, set=0x%08x\n", - mci->is_2g ? "2G" : "5G", new_flags, to_clear, to_set); - - if (to_clear) - ar9003_mci_send_coex_bt_flags(ah, wait_done, - MCI_GPM_COEX_BT_FLAGS_CLEAR, to_clear); - - if (to_set) - ar9003_mci_send_coex_bt_flags(ah, wait_done, - MCI_GPM_COEX_BT_FLAGS_SET, to_set); - } - - if (AR_SREV_9462_10(ah) && (mci->bt_state != MCI_BT_SLEEP)) - mci->update_2g5g = false; -} - -static void ar9003_mci_queue_unsent_gpm(struct ath_hw *ah, u8 header, - u32 *payload, bool queue) -{ - struct ath_common *common = ath9k_hw_common(ah); - struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; - u8 type, opcode; - - if (queue) { - - if (payload) - ath_dbg(common, MCI, - "MCI ERROR: Send fail: %02x: %02x %02x %02x\n", - header, - *(((u8 *)payload) + 4), - *(((u8 *)payload) + 5), - *(((u8 *)payload) + 6)); - else - ath_dbg(common, MCI, "MCI ERROR: Send fail: %02x\n", - header); - } - - /* check if the message is to be queued */ - if (header != MCI_GPM) - return; - - type = MCI_GPM_TYPE(payload); - opcode = MCI_GPM_OPCODE(payload); - - if (type != MCI_GPM_COEX_AGENT) - return; - - switch (opcode) { - case MCI_GPM_COEX_BT_UPDATE_FLAGS: - - if (AR_SREV_9462_10(ah)) - break; - - if (*(((u8 *)payload) + MCI_GPM_COEX_B_BT_FLAGS_OP) == - MCI_GPM_COEX_BT_FLAGS_READ) - break; - - mci->update_2g5g = queue; - - if (queue) - ath_dbg(common, MCI, - "MCI BT_MCI_FLAGS: 2G5G status %s\n", - mci->is_2g ? "2G" : "5G"); - else - ath_dbg(common, MCI, - "MCI BT_MCI_FLAGS: 2G5G status %s\n", - mci->is_2g ? "2G" : "5G"); - - break; - - case MCI_GPM_COEX_WLAN_CHANNELS: - - mci->wlan_channels_update = queue; - if (queue) - ath_dbg(common, MCI, "MCI WLAN channel map \n"); - else - ath_dbg(common, MCI, "MCI WLAN channel map \n"); - break; - - case MCI_GPM_COEX_HALT_BT_GPM: - - if (*(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) == - MCI_GPM_COEX_BT_GPM_UNHALT) { - - mci->unhalt_bt_gpm = queue; - - if (queue) - ath_dbg(common, MCI, - "MCI UNHALT BT GPM \n"); - else { - mci->halted_bt_gpm = false; - ath_dbg(common, MCI, - "MCI UNHALT BT GPM \n"); - } - } - - if (*(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) == - MCI_GPM_COEX_BT_GPM_HALT) { - - mci->halted_bt_gpm = !queue; - - if (queue) - ath_dbg(common, MCI, - "MCI HALT BT GPM \n"); - else - ath_dbg(common, MCI, - "MCI UNHALT BT GPM \n"); - } - - break; - default: - break; - } -} - -void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done) -{ - struct ath_common *common = ath9k_hw_common(ah); - struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; - - if (!ATH9K_HW_CAP_MCI) - return; - - if (mci->update_2g5g) { - if (mci->is_2g) { - - ar9003_mci_send_2g5g_status(ah, true); - ath_dbg(common, MCI, "MCI Send LNA trans\n"); - ar9003_mci_send_lna_transfer(ah, true); - udelay(5); - - REG_CLR_BIT(ah, AR_MCI_TX_CTRL, - AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE); - - if (AR_SREV_9462_20(ah)) { - REG_CLR_BIT(ah, AR_PHY_GLB_CONTROL, - AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL); - if (!(mci->config & - ATH_MCI_CONFIG_DISABLE_OSLA)) { - REG_SET_BIT(ah, AR_BTCOEX_CTRL, - AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN); - } - } - } else { - ath_dbg(common, MCI, "MCI Send LNA take\n"); - ar9003_mci_send_lna_take(ah, true); - udelay(5); - - REG_SET_BIT(ah, AR_MCI_TX_CTRL, - AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE); - - if (AR_SREV_9462_20(ah)) { - REG_SET_BIT(ah, AR_PHY_GLB_CONTROL, - AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL); - REG_CLR_BIT(ah, AR_BTCOEX_CTRL, - AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN); - } - - ar9003_mci_send_2g5g_status(ah, true); - } - } -} - -bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag, - u32 *payload, u8 len, bool wait_done, - bool check_bt) -{ - struct ath_common *common = ath9k_hw_common(ah); - struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; - bool msg_sent = false; - u32 regval; - u32 saved_mci_int_en; - int i; - - if (!ATH9K_HW_CAP_MCI) - return false; - - saved_mci_int_en = REG_READ(ah, AR_MCI_INTERRUPT_EN); - regval = REG_READ(ah, AR_BTCOEX_CTRL); - - if ((regval == 0xdeadbeef) || !(regval & AR_BTCOEX_CTRL_MCI_MODE_EN)) { - - ath_dbg(common, MCI, - "MCI Not sending 0x%x. MCI is not enabled. full_sleep = %d\n", - header, - (ah->power_mode == ATH9K_PM_FULL_SLEEP) ? 1 : 0); - - ar9003_mci_queue_unsent_gpm(ah, header, payload, true); - return false; - - } else if (check_bt && (mci->bt_state == MCI_BT_SLEEP)) { - - ath_dbg(common, MCI, - "MCI Don't send message 0x%x. BT is in sleep state\n", - header); - - ar9003_mci_queue_unsent_gpm(ah, header, payload, true); - return false; - } - - if (wait_done) - REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0); - - /* Need to clear SW_MSG_DONE raw bit before wait */ - - REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, - (AR_MCI_INTERRUPT_SW_MSG_DONE | - AR_MCI_INTERRUPT_MSG_FAIL_MASK)); - - if (payload) { - for (i = 0; (i * 4) < len; i++) - REG_WRITE(ah, (AR_MCI_TX_PAYLOAD0 + i * 4), - *(payload + i)); - } - - REG_WRITE(ah, AR_MCI_COMMAND0, - (SM((flag & MCI_FLAG_DISABLE_TIMESTAMP), - AR_MCI_COMMAND0_DISABLE_TIMESTAMP) | - SM(len, AR_MCI_COMMAND0_LEN) | - SM(header, AR_MCI_COMMAND0_HEADER))); - - if (wait_done && - !(ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RAW, - AR_MCI_INTERRUPT_SW_MSG_DONE, 500))) - ar9003_mci_queue_unsent_gpm(ah, header, payload, true); - else { - ar9003_mci_queue_unsent_gpm(ah, header, payload, false); - msg_sent = true; - } - - if (wait_done) - REG_WRITE(ah, AR_MCI_INTERRUPT_EN, saved_mci_int_en); - - return msg_sent; -} -EXPORT_SYMBOL(ar9003_mci_send_message); - -void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf, - u16 len, u32 sched_addr) -{ - struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; - void *sched_buf = (void *)((char *) gpm_buf + (sched_addr - gpm_addr)); - - if (!ATH9K_HW_CAP_MCI) - return; - - mci->gpm_addr = gpm_addr; - mci->gpm_buf = gpm_buf; - mci->gpm_len = len; - mci->sched_addr = sched_addr; - mci->sched_buf = sched_buf; - - ar9003_mci_reset(ah, true, true, true); -} -EXPORT_SYMBOL(ar9003_mci_setup); - -void ar9003_mci_cleanup(struct ath_hw *ah) -{ - struct ath_common *common = ath9k_hw_common(ah); - - if (!ATH9K_HW_CAP_MCI) - return; - - /* Turn off MCI and Jupiter mode. */ - REG_WRITE(ah, AR_BTCOEX_CTRL, 0x00); - ath_dbg(common, MCI, "MCI ar9003_mci_cleanup\n"); - ar9003_mci_disable_interrupt(ah); -} -EXPORT_SYMBOL(ar9003_mci_cleanup); - -static void ar9003_mci_process_gpm_extra(struct ath_hw *ah, u8 gpm_type, - u8 gpm_opcode, u32 *p_gpm) -{ - struct ath_common *common = ath9k_hw_common(ah); - struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; - u8 *p_data = (u8 *) p_gpm; - - if (gpm_type != MCI_GPM_COEX_AGENT) - return; - - switch (gpm_opcode) { - case MCI_GPM_COEX_VERSION_QUERY: - ath_dbg(common, MCI, "MCI Recv GPM COEX Version Query\n"); - ar9003_mci_send_coex_version_response(ah, true); - break; - case MCI_GPM_COEX_VERSION_RESPONSE: - ath_dbg(common, MCI, "MCI Recv GPM COEX Version Response\n"); - mci->bt_ver_major = - *(p_data + MCI_GPM_COEX_B_MAJOR_VERSION); - mci->bt_ver_minor = - *(p_data + MCI_GPM_COEX_B_MINOR_VERSION); - mci->bt_version_known = true; - ath_dbg(common, MCI, "MCI BT Coex version: %d.%d\n", - mci->bt_ver_major, mci->bt_ver_minor); - break; - case MCI_GPM_COEX_STATUS_QUERY: - ath_dbg(common, MCI, - "MCI Recv GPM COEX Status Query = 0x%02X\n", - *(p_data + MCI_GPM_COEX_B_WLAN_BITMAP)); - mci->wlan_channels_update = true; - ar9003_mci_send_coex_wlan_channels(ah, true); - break; - case MCI_GPM_COEX_BT_PROFILE_INFO: - mci->query_bt = true; - ath_dbg(common, MCI, "MCI Recv GPM COEX BT_Profile_Info\n"); - break; - case MCI_GPM_COEX_BT_STATUS_UPDATE: - mci->query_bt = true; - ath_dbg(common, MCI, - "MCI Recv GPM COEX BT_Status_Update SEQ=%d (drop&query)\n", - *(p_gpm + 3)); - break; - default: - break; - } -} - -u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type, - u8 gpm_opcode, int time_out) -{ - struct ath_common *common = ath9k_hw_common(ah); - struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; - u32 *p_gpm = NULL, mismatch = 0, more_data; - u32 offset; - u8 recv_type = 0, recv_opcode = 0; - bool b_is_bt_cal_done = (gpm_type == MCI_GPM_BT_CAL_DONE); - - if (!ATH9K_HW_CAP_MCI) - return 0; - - more_data = time_out ? MCI_GPM_NOMORE : MCI_GPM_MORE; - - while (time_out > 0) { - if (p_gpm) { - MCI_GPM_RECYCLE(p_gpm); - p_gpm = NULL; - } - - if (more_data != MCI_GPM_MORE) - time_out = ar9003_mci_wait_for_interrupt(ah, - AR_MCI_INTERRUPT_RX_MSG_RAW, - AR_MCI_INTERRUPT_RX_MSG_GPM, - time_out); - - if (!time_out) - break; - - offset = ar9003_mci_state(ah, - MCI_STATE_NEXT_GPM_OFFSET, &more_data); - - if (offset == MCI_GPM_INVALID) - continue; - - p_gpm = (u32 *) (mci->gpm_buf + offset); - recv_type = MCI_GPM_TYPE(p_gpm); - recv_opcode = MCI_GPM_OPCODE(p_gpm); - - if (MCI_GPM_IS_CAL_TYPE(recv_type)) { - - if (recv_type == gpm_type) { - - if ((gpm_type == MCI_GPM_BT_CAL_DONE) && - !b_is_bt_cal_done) { - gpm_type = MCI_GPM_BT_CAL_GRANT; - ath_dbg(common, MCI, - "MCI Recv BT_CAL_DONE wait BT_CAL_GRANT\n"); - continue; - } - - break; - } - } else if ((recv_type == gpm_type) && - (recv_opcode == gpm_opcode)) - break; - - /* not expected message */ - - /* - * check if it's cal_grant - * - * When we're waiting for cal_grant in reset routine, - * it's possible that BT sends out cal_request at the - * same time. Since BT's calibration doesn't happen - * that often, we'll let BT completes calibration then - * we continue to wait for cal_grant from BT. - * Orginal: Wait BT_CAL_GRANT. - * New: Receive BT_CAL_REQ -> send WLAN_CAL_GRANT->wait - * BT_CAL_DONE -> Wait BT_CAL_GRANT. - */ - - if ((gpm_type == MCI_GPM_BT_CAL_GRANT) && - (recv_type == MCI_GPM_BT_CAL_REQ)) { - - u32 payload[4] = {0, 0, 0, 0}; - - gpm_type = MCI_GPM_BT_CAL_DONE; - ath_dbg(common, MCI, - "MCI Rcv BT_CAL_REQ, send WLAN_CAL_GRANT\n"); - - MCI_GPM_SET_CAL_TYPE(payload, - MCI_GPM_WLAN_CAL_GRANT); - - ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, - false, false); - - ath_dbg(common, MCI, "MCI now wait for BT_CAL_DONE\n"); - - continue; - } else { - ath_dbg(common, MCI, "MCI GPM subtype not match 0x%x\n", - *(p_gpm + 1)); - mismatch++; - ar9003_mci_process_gpm_extra(ah, recv_type, - recv_opcode, p_gpm); - } - } - if (p_gpm) { - MCI_GPM_RECYCLE(p_gpm); - p_gpm = NULL; - } - - if (time_out <= 0) { - time_out = 0; - ath_dbg(common, MCI, - "MCI GPM received timeout, mismatch = %d\n", mismatch); - } else - ath_dbg(common, MCI, "MCI Receive GPM type=0x%x, code=0x%x\n", - gpm_type, gpm_opcode); - - while (more_data == MCI_GPM_MORE) { - - ath_dbg(common, MCI, "MCI discard remaining GPM\n"); - offset = ar9003_mci_state(ah, MCI_STATE_NEXT_GPM_OFFSET, - &more_data); - - if (offset == MCI_GPM_INVALID) - break; - - p_gpm = (u32 *) (mci->gpm_buf + offset); - recv_type = MCI_GPM_TYPE(p_gpm); - recv_opcode = MCI_GPM_OPCODE(p_gpm); - - if (!MCI_GPM_IS_CAL_TYPE(recv_type)) - ar9003_mci_process_gpm_extra(ah, recv_type, - recv_opcode, p_gpm); - - MCI_GPM_RECYCLE(p_gpm); - } - - return time_out; -} - -u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data) -{ - struct ath_common *common = ath9k_hw_common(ah); - struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; - u32 value = 0, more_gpm = 0, gpm_ptr; - u8 query_type; - - if (!ATH9K_HW_CAP_MCI) - return 0; - - switch (state_type) { - case MCI_STATE_ENABLE: - if (mci->ready) { - - value = REG_READ(ah, AR_BTCOEX_CTRL); - - if ((value == 0xdeadbeef) || (value == 0xffffffff)) - value = 0; - } - value &= AR_BTCOEX_CTRL_MCI_MODE_EN; - break; - case MCI_STATE_INIT_GPM_OFFSET: - value = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR); - ath_dbg(common, MCI, "MCI GPM initial WRITE_PTR=%d\n", value); - mci->gpm_idx = value; - break; - case MCI_STATE_NEXT_GPM_OFFSET: - case MCI_STATE_LAST_GPM_OFFSET: - /* - * This could be useful to avoid new GPM message interrupt which - * may lead to spurious interrupt after power sleep, or multiple - * entry of ath_mci_intr(). - * Adding empty GPM check by returning HAL_MCI_GPM_INVALID can - * alleviate this effect, but clearing GPM RX interrupt bit is - * safe, because whether this is called from hw or driver code - * there must be an interrupt bit set/triggered initially - */ - REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, - AR_MCI_INTERRUPT_RX_MSG_GPM); - - gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR); - value = gpm_ptr; - - if (value == 0) - value = mci->gpm_len - 1; - else if (value >= mci->gpm_len) { - if (value != 0xFFFF) { - value = 0; - ath_dbg(common, MCI, - "MCI GPM offset out of range\n"); - } - } else - value--; - - if (value == 0xFFFF) { - value = MCI_GPM_INVALID; - more_gpm = MCI_GPM_NOMORE; - ath_dbg(common, MCI, - "MCI GPM ptr invalid @ptr=%d, offset=%d, more=GPM_NOMORE\n", - gpm_ptr, value); - } else if (state_type == MCI_STATE_NEXT_GPM_OFFSET) { - - if (gpm_ptr == mci->gpm_idx) { - value = MCI_GPM_INVALID; - more_gpm = MCI_GPM_NOMORE; - - ath_dbg(common, MCI, - "MCI GPM message not available @ptr=%d, @offset=%d, more=GPM_NOMORE\n", - gpm_ptr, value); - } else { - for (;;) { - - u32 temp_index; - - /* skip reserved GPM if any */ - - if (value != mci->gpm_idx) - more_gpm = MCI_GPM_MORE; - else - more_gpm = MCI_GPM_NOMORE; - - temp_index = mci->gpm_idx; - mci->gpm_idx++; - - if (mci->gpm_idx >= - mci->gpm_len) - mci->gpm_idx = 0; - - ath_dbg(common, MCI, - "MCI GPM message got ptr=%d, @offset=%d, more=%d\n", - gpm_ptr, temp_index, - (more_gpm == MCI_GPM_MORE)); - - if (ar9003_mci_is_gpm_valid(ah, - temp_index)) { - value = temp_index; - break; - } - - if (more_gpm == MCI_GPM_NOMORE) { - value = MCI_GPM_INVALID; - break; - } - } - } - if (p_data) - *p_data = more_gpm; - } - - if (value != MCI_GPM_INVALID) - value <<= 4; - - break; - case MCI_STATE_LAST_SCHD_MSG_OFFSET: - value = MS(REG_READ(ah, AR_MCI_RX_STATUS), - AR_MCI_RX_LAST_SCHD_MSG_INDEX); - /* Make it in bytes */ - value <<= 4; - break; - - case MCI_STATE_REMOTE_SLEEP: - value = MS(REG_READ(ah, AR_MCI_RX_STATUS), - AR_MCI_RX_REMOTE_SLEEP) ? - MCI_BT_SLEEP : MCI_BT_AWAKE; - break; - - case MCI_STATE_CONT_RSSI_POWER: - value = MS(mci->cont_status, AR_MCI_CONT_RSSI_POWER); - break; - - case MCI_STATE_CONT_PRIORITY: - value = MS(mci->cont_status, AR_MCI_CONT_RRIORITY); - break; - - case MCI_STATE_CONT_TXRX: - value = MS(mci->cont_status, AR_MCI_CONT_TXRX); - break; - - case MCI_STATE_BT: - value = mci->bt_state; - break; - - case MCI_STATE_SET_BT_SLEEP: - mci->bt_state = MCI_BT_SLEEP; - break; - - case MCI_STATE_SET_BT_AWAKE: - mci->bt_state = MCI_BT_AWAKE; - ar9003_mci_send_coex_version_query(ah, true); - ar9003_mci_send_coex_wlan_channels(ah, true); - - if (mci->unhalt_bt_gpm) { - - ath_dbg(common, MCI, "MCI unhalt BT GPM\n"); - ar9003_mci_send_coex_halt_bt_gpm(ah, false, true); - } - - ar9003_mci_2g5g_switch(ah, true); - break; - - case MCI_STATE_SET_BT_CAL_START: - mci->bt_state = MCI_BT_CAL_START; - break; - - case MCI_STATE_SET_BT_CAL: - mci->bt_state = MCI_BT_CAL; - break; - - case MCI_STATE_RESET_REQ_WAKE: - ar9003_mci_reset_req_wakeup(ah); - mci->update_2g5g = true; - - if ((AR_SREV_9462_20_OR_LATER(ah)) && - (mci->config & ATH_MCI_CONFIG_MCI_OBS_MASK)) { - /* Check if we still have control of the GPIOs */ - if ((REG_READ(ah, AR_GLB_GPIO_CONTROL) & - ATH_MCI_CONFIG_MCI_OBS_GPIO) != - ATH_MCI_CONFIG_MCI_OBS_GPIO) { - - ath_dbg(common, MCI, - "MCI reconfigure observation\n"); - ar9003_mci_observation_set_up(ah); - } - } - break; - - case MCI_STATE_SEND_WLAN_COEX_VERSION: - ar9003_mci_send_coex_version_response(ah, true); - break; - - case MCI_STATE_SET_BT_COEX_VERSION: - - if (!p_data) - ath_dbg(common, MCI, - "MCI Set BT Coex version with NULL data!!\n"); - else { - mci->bt_ver_major = (*p_data >> 8) & 0xff; - mci->bt_ver_minor = (*p_data) & 0xff; - mci->bt_version_known = true; - ath_dbg(common, MCI, "MCI BT version set: %d.%d\n", - mci->bt_ver_major, mci->bt_ver_minor); - } - break; - - case MCI_STATE_SEND_WLAN_CHANNELS: - if (p_data) { - if (((mci->wlan_channels[1] & 0xffff0000) == - (*(p_data + 1) & 0xffff0000)) && - (mci->wlan_channels[2] == *(p_data + 2)) && - (mci->wlan_channels[3] == *(p_data + 3))) - break; - - mci->wlan_channels[0] = *p_data++; - mci->wlan_channels[1] = *p_data++; - mci->wlan_channels[2] = *p_data++; - mci->wlan_channels[3] = *p_data++; - } - mci->wlan_channels_update = true; - ar9003_mci_send_coex_wlan_channels(ah, true); - break; - - case MCI_STATE_SEND_VERSION_QUERY: - ar9003_mci_send_coex_version_query(ah, true); - break; - - case MCI_STATE_SEND_STATUS_QUERY: - query_type = (AR_SREV_9462_10(ah)) ? - MCI_GPM_COEX_QUERY_BT_ALL_INFO : - MCI_GPM_COEX_QUERY_BT_TOPOLOGY; - - ar9003_mci_send_coex_bt_status_query(ah, true, query_type); - break; - - case MCI_STATE_NEED_FLUSH_BT_INFO: - /* - * btcoex_hw.mci.unhalt_bt_gpm means whether it's - * needed to send UNHALT message. It's set whenever - * there's a request to send HALT message. - * mci_halted_bt_gpm means whether HALT message is sent - * out successfully. - * - * Checking (mci_unhalt_bt_gpm == false) instead of - * checking (ah->mci_halted_bt_gpm == false) will make - * sure currently is in UNHALT-ed mode and BT can - * respond to status query. - */ - value = (!mci->unhalt_bt_gpm && - mci->need_flush_btinfo) ? 1 : 0; - if (p_data) - mci->need_flush_btinfo = - (*p_data != 0) ? true : false; - break; - - case MCI_STATE_RECOVER_RX: - - ath_dbg(common, MCI, "MCI hw RECOVER_RX\n"); - ar9003_mci_prep_interface(ah); - mci->query_bt = true; - mci->need_flush_btinfo = true; - ar9003_mci_send_coex_wlan_channels(ah, true); - ar9003_mci_2g5g_switch(ah, true); - break; - - case MCI_STATE_NEED_FTP_STOMP: - value = !(mci->config & ATH_MCI_CONFIG_DISABLE_FTP_STOMP); - break; - - case MCI_STATE_NEED_TUNING: - value = !(mci->config & ATH_MCI_CONFIG_DISABLE_TUNING); - break; - - default: - break; - - } - - return value; -} -EXPORT_SYMBOL(ar9003_mci_state); diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9003_mci.h b/trunk/drivers/net/wireless/ath/ath9k/ar9003_mci.h deleted file mode 100644 index 798da116a44c..000000000000 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9003_mci.h +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright (c) 2010-2011 Atheros Communications Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#ifndef AR9003_MCI_H -#define AR9003_MCI_H - -#define MCI_FLAG_DISABLE_TIMESTAMP 0x00000001 /* Disable time stamp */ - -/* Default remote BT device MCI COEX version */ -#define MCI_GPM_COEX_MAJOR_VERSION_DEFAULT 3 -#define MCI_GPM_COEX_MINOR_VERSION_DEFAULT 0 - -/* Local WLAN MCI COEX version */ -#define MCI_GPM_COEX_MAJOR_VERSION_WLAN 3 -#define MCI_GPM_COEX_MINOR_VERSION_WLAN 0 - -enum mci_gpm_coex_query_type { - MCI_GPM_COEX_QUERY_BT_ALL_INFO = BIT(0), - MCI_GPM_COEX_QUERY_BT_TOPOLOGY = BIT(1), - MCI_GPM_COEX_QUERY_BT_DEBUG = BIT(2), -}; - -enum mci_gpm_coex_halt_bt_gpm { - MCI_GPM_COEX_BT_GPM_UNHALT, - MCI_GPM_COEX_BT_GPM_HALT -}; - -enum mci_gpm_coex_bt_update_flags_op { - MCI_GPM_COEX_BT_FLAGS_READ, - MCI_GPM_COEX_BT_FLAGS_SET, - MCI_GPM_COEX_BT_FLAGS_CLEAR -}; - -#define MCI_NUM_BT_CHANNELS 79 - -#define MCI_BT_MCI_FLAGS_UPDATE_CORR 0x00000002 -#define MCI_BT_MCI_FLAGS_UPDATE_HDR 0x00000004 -#define MCI_BT_MCI_FLAGS_UPDATE_PLD 0x00000008 -#define MCI_BT_MCI_FLAGS_LNA_CTRL 0x00000010 -#define MCI_BT_MCI_FLAGS_DEBUG 0x00000020 -#define MCI_BT_MCI_FLAGS_SCHED_MSG 0x00000040 -#define MCI_BT_MCI_FLAGS_CONT_MSG 0x00000080 -#define MCI_BT_MCI_FLAGS_COEX_GPM 0x00000100 -#define MCI_BT_MCI_FLAGS_CPU_INT_MSG 0x00000200 -#define MCI_BT_MCI_FLAGS_MCI_MODE 0x00000400 -#define MCI_BT_MCI_FLAGS_AR9462_MODE 0x00001000 -#define MCI_BT_MCI_FLAGS_OTHER 0x00010000 - -#define MCI_DEFAULT_BT_MCI_FLAGS 0x00011dde - -#define MCI_TOGGLE_BT_MCI_FLAGS (MCI_BT_MCI_FLAGS_UPDATE_CORR | \ - MCI_BT_MCI_FLAGS_UPDATE_HDR | \ - MCI_BT_MCI_FLAGS_UPDATE_PLD | \ - MCI_BT_MCI_FLAGS_MCI_MODE) - -#define MCI_2G_FLAGS_CLEAR_MASK 0x00000000 -#define MCI_2G_FLAGS_SET_MASK MCI_TOGGLE_BT_MCI_FLAGS -#define MCI_2G_FLAGS MCI_DEFAULT_BT_MCI_FLAGS - -#define MCI_5G_FLAGS_CLEAR_MASK MCI_TOGGLE_BT_MCI_FLAGS -#define MCI_5G_FLAGS_SET_MASK 0x00000000 -#define MCI_5G_FLAGS (MCI_DEFAULT_BT_MCI_FLAGS & \ - ~MCI_TOGGLE_BT_MCI_FLAGS) - -/* - * Default value for AR9462 is 0x00002201 - */ -#define ATH_MCI_CONFIG_CONCUR_TX 0x00000003 -#define ATH_MCI_CONFIG_MCI_OBS_MCI 0x00000004 -#define ATH_MCI_CONFIG_MCI_OBS_TXRX 0x00000008 -#define ATH_MCI_CONFIG_MCI_OBS_BT 0x00000010 -#define ATH_MCI_CONFIG_DISABLE_MCI_CAL 0x00000020 -#define ATH_MCI_CONFIG_DISABLE_OSLA 0x00000040 -#define ATH_MCI_CONFIG_DISABLE_FTP_STOMP 0x00000080 -#define ATH_MCI_CONFIG_AGGR_THRESH 0x00000700 -#define ATH_MCI_CONFIG_AGGR_THRESH_S 8 -#define ATH_MCI_CONFIG_DISABLE_AGGR_THRESH 0x00000800 -#define ATH_MCI_CONFIG_CLK_DIV 0x00003000 -#define ATH_MCI_CONFIG_CLK_DIV_S 12 -#define ATH_MCI_CONFIG_DISABLE_TUNING 0x00004000 -#define ATH_MCI_CONFIG_MCI_WEIGHT_DBG 0x40000000 -#define ATH_MCI_CONFIG_DISABLE_MCI 0x80000000 - -#define ATH_MCI_CONFIG_MCI_OBS_MASK (ATH_MCI_CONFIG_MCI_OBS_MCI | \ - ATH_MCI_CONFIG_MCI_OBS_TXRX | \ - ATH_MCI_CONFIG_MCI_OBS_BT) -#define ATH_MCI_CONFIG_MCI_OBS_GPIO 0x0000002F - -#endif diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/trunk/drivers/net/wireless/ath/ath9k/ar9003_paprd.c index 59647a3ceb7f..a4450cba0653 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9003_paprd.c +++ b/trunk/drivers/net/wireless/ath/ath9k/ar9003_paprd.c @@ -119,8 +119,8 @@ static int ar9003_get_training_power_5g(struct ath_hw *ah) break; default: delta = 0; - ath_dbg(common, CALIBRATE, "Invalid tx-chainmask: %u\n", - ah->txchainmask); + ath_dbg(common, ATH_DBG_CALIBRATE, + "Invalid tx-chainmask: %u\n", ah->txchainmask); } power += delta; @@ -148,12 +148,13 @@ static int ar9003_paprd_setup_single_table(struct ath_hw *ah) else training_power = ar9003_get_training_power_5g(ah); - ath_dbg(common, CALIBRATE, "Training power: %d, Target power: %d\n", + ath_dbg(common, ATH_DBG_CALIBRATE, + "Training power: %d, Target power: %d\n", training_power, ah->paprd_target_power); if (training_power < 0) { - ath_dbg(common, CALIBRATE, - "PAPRD target power delta out of range\n"); + ath_dbg(common, ATH_DBG_CALIBRATE, + "PAPRD target power delta out of range"); return -ERANGE; } ah->paprd_training_power = training_power; @@ -310,8 +311,8 @@ static unsigned int ar9003_get_desired_gain(struct ath_hw *ah, int chain, reg_cl_gain = AR_PHY_CL_TAB_2; break; default: - ath_dbg(ath9k_hw_common(ah), CALIBRATE, - "Invalid chainmask: %d\n", chain); + ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE, + "Invalid chainmask: %d\n", chain); break; } @@ -849,7 +850,7 @@ bool ar9003_paprd_is_done(struct ath_hw *ah) agc2_pwr = REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_STAT1, AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR); - ath_dbg(ath9k_hw_common(ah), CALIBRATE, + ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE, "AGC2_PWR = 0x%x training done = 0x%x\n", agc2_pwr, paprd_done); /* diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/trunk/drivers/net/wireless/ath/ath9k/ar9003_phy.c index 2589b38b689a..2330e7ede199 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/trunk/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -199,14 +199,12 @@ static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah, synth_freq = chan->channel; } } else { - range = AR_SREV_9462(ah) ? 5 : 10; + range = 10; max_spur_cnts = 4; synth_freq = chan->channel; } for (i = 0; i < max_spur_cnts; i++) { - if (AR_SREV_9462(ah) && (i == 0 || i == 3)) - continue; negative = 0; if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah)) cur_bb_spur = FBIN2FREQ(spur_fbin_ptr[i], @@ -882,7 +880,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah, AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW); if (!on != aniState->ofdmWeakSigDetectOff) { - ath_dbg(common, ANI, + ath_dbg(common, ATH_DBG_ANI, "** ch %d: ofdm weak signal: %s=>%s\n", chan->channel, !aniState->ofdmWeakSigDetectOff ? @@ -900,7 +898,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah, u32 level = param; if (level >= ARRAY_SIZE(firstep_table)) { - ath_dbg(common, ANI, + ath_dbg(common, ATH_DBG_ANI, "ATH9K_ANI_FIRSTEP_LEVEL: level out of range (%u > %zu)\n", level, ARRAY_SIZE(firstep_table)); return false; @@ -937,7 +935,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah, AR_PHY_FIND_SIG_LOW_FIRSTEP_LOW, value2); if (level != aniState->firstepLevel) { - ath_dbg(common, ANI, + ath_dbg(common, ATH_DBG_ANI, "** ch %d: level %d=>%d[def:%d] firstep[level]=%d ini=%d\n", chan->channel, aniState->firstepLevel, @@ -945,7 +943,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah, ATH9K_ANI_FIRSTEP_LVL_NEW, value, aniState->iniDef.firstep); - ath_dbg(common, ANI, + ath_dbg(common, ATH_DBG_ANI, "** ch %d: level %d=>%d[def:%d] firstep_low[level]=%d ini=%d\n", chan->channel, aniState->firstepLevel, @@ -965,7 +963,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah, u32 level = param; if (level >= ARRAY_SIZE(cycpwrThr1_table)) { - ath_dbg(common, ANI, + ath_dbg(common, ATH_DBG_ANI, "ATH9K_ANI_SPUR_IMMUNITY_LEVEL: level out of range (%u > %zu)\n", level, ARRAY_SIZE(cycpwrThr1_table)); return false; @@ -1001,7 +999,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah, AR_PHY_EXT_CYCPWR_THR1, value2); if (level != aniState->spurImmunityLevel) { - ath_dbg(common, ANI, + ath_dbg(common, ATH_DBG_ANI, "** ch %d: level %d=>%d[def:%d] cycpwrThr1[level]=%d ini=%d\n", chan->channel, aniState->spurImmunityLevel, @@ -1009,7 +1007,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah, ATH9K_ANI_SPUR_IMMUNE_LVL_NEW, value, aniState->iniDef.cycpwrThr1); - ath_dbg(common, ANI, + ath_dbg(common, ATH_DBG_ANI, "** ch %d: level %d=>%d[def:%d] cycpwrThr1Ext[level]=%d ini=%d\n", chan->channel, aniState->spurImmunityLevel, @@ -1036,7 +1034,8 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah, REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL, AR_PHY_MRC_CCK_MUX_REG, is_on); if (!is_on != aniState->mrcCCKOff) { - ath_dbg(common, ANI, "** ch %d: MRC CCK: %s=>%s\n", + ath_dbg(common, ATH_DBG_ANI, + "** ch %d: MRC CCK: %s=>%s\n", chan->channel, !aniState->mrcCCKOff ? "on" : "off", is_on ? "on" : "off"); @@ -1051,11 +1050,11 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah, case ATH9K_ANI_PRESENT: break; default: - ath_dbg(common, ANI, "invalid cmd %u\n", cmd); + ath_dbg(common, ATH_DBG_ANI, "invalid cmd %u\n", cmd); return false; } - ath_dbg(common, ANI, + ath_dbg(common, ATH_DBG_ANI, "ANI parameters: SI=%d, ofdmWS=%s FS=%d MRCcck=%s listenTime=%d ofdmErrs=%d cckErrs=%d\n", aniState->spurImmunityLevel, !aniState->ofdmWeakSigDetectOff ? "on" : "off", @@ -1124,7 +1123,8 @@ static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah) aniState = &ah->curchan->ani; iniDef = &aniState->iniDef; - ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n", + ath_dbg(common, ATH_DBG_ANI, + "ver %d.%d opmode %u chan %d Mhz/0x%x\n", ah->hw_version.macVersion, ah->hw_version.macRev, ah->opmode, @@ -1386,7 +1386,7 @@ void ar9003_hw_bb_watchdog_config(struct ath_hw *ah) ~(AR_PHY_WATCHDOG_NON_IDLE_ENABLE | AR_PHY_WATCHDOG_IDLE_ENABLE)); - ath_dbg(common, RESET, "Disabled BB Watchdog\n"); + ath_dbg(common, ATH_DBG_RESET, "Disabled BB Watchdog\n"); return; } @@ -1422,7 +1422,8 @@ void ar9003_hw_bb_watchdog_config(struct ath_hw *ah) AR_PHY_WATCHDOG_IDLE_MASK | (AR_PHY_WATCHDOG_NON_IDLE_MASK & (idle_count << 2))); - ath_dbg(common, RESET, "Enabled BB Watchdog timeout (%u ms)\n", + ath_dbg(common, ATH_DBG_RESET, + "Enabled BB Watchdog timeout (%u ms)\n", idle_tmo_ms); } @@ -1451,9 +1452,9 @@ void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah) return; status = ah->bb_watchdog_last_status; - ath_dbg(common, RESET, + ath_dbg(common, ATH_DBG_RESET, "\n==== BB update: BB status=0x%08x ====\n", status); - ath_dbg(common, RESET, + ath_dbg(common, ATH_DBG_RESET, "** BB state: wd=%u det=%u rdar=%u rOFDM=%d rCCK=%u tOFDM=%u tCCK=%u agc=%u src=%u **\n", MS(status, AR_PHY_WATCHDOG_INFO), MS(status, AR_PHY_WATCHDOG_DET_HANG), @@ -1465,19 +1466,22 @@ void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah) MS(status, AR_PHY_WATCHDOG_AGC_SM), MS(status, AR_PHY_WATCHDOG_SRCH_SM)); - ath_dbg(common, RESET, "** BB WD cntl: cntl1=0x%08x cntl2=0x%08x **\n", + ath_dbg(common, ATH_DBG_RESET, + "** BB WD cntl: cntl1=0x%08x cntl2=0x%08x **\n", REG_READ(ah, AR_PHY_WATCHDOG_CTL_1), REG_READ(ah, AR_PHY_WATCHDOG_CTL_2)); - ath_dbg(common, RESET, "** BB mode: BB_gen_controls=0x%08x **\n", + ath_dbg(common, ATH_DBG_RESET, + "** BB mode: BB_gen_controls=0x%08x **\n", REG_READ(ah, AR_PHY_GEN_CTRL)); #define PCT(_field) (common->cc_survey._field * 100 / common->cc_survey.cycles) if (common->cc_survey.cycles) - ath_dbg(common, RESET, + ath_dbg(common, ATH_DBG_RESET, "** BB busy times: rx_clear=%d%%, rx_frame=%d%%, tx_frame=%d%% **\n", PCT(rx_busy), PCT(rx_frame), PCT(tx_frame)); - ath_dbg(common, RESET, "==== BB update: done ====\n\n"); + ath_dbg(common, ATH_DBG_RESET, + "==== BB update: done ====\n\n"); } EXPORT_SYMBOL(ar9003_hw_bb_watchdog_dbg_info); diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/trunk/drivers/net/wireless/ath/ath9k/ar9003_phy.h index ed64114571fc..4114fe752c6b 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9003_phy.h +++ b/trunk/drivers/net/wireless/ath/ath9k/ar9003_phy.h @@ -389,8 +389,6 @@ #define AR_PHY_DAG_CTRLCCK_RSSI_THR_S 10 #define AR_PHY_RIFS_INIT_DELAY 0x3ff0000 -#define AR_PHY_AGC_QUICK_DROP 0x03c00000 -#define AR_PHY_AGC_QUICK_DROP_S 22 #define AR_PHY_AGC_COARSE_LOW 0x00007F80 #define AR_PHY_AGC_COARSE_LOW_S 7 #define AR_PHY_AGC_COARSE_HIGH 0x003F8000 @@ -490,8 +488,6 @@ #define AR_PHY_TEST_CTL_TSTADC_EN_S 8 #define AR_PHY_TEST_CTL_RX_OBS_SEL 0x3C00 #define AR_PHY_TEST_CTL_RX_OBS_SEL_S 10 -#define AR_PHY_TEST_CTL_DEBUGPORT_SEL 0xe0000000 -#define AR_PHY_TEST_CTL_DEBUGPORT_SEL_S 29 #define AR_PHY_TSTDAC (AR_SM_BASE + 0x168) @@ -1003,7 +999,6 @@ /* GLB Registers */ #define AR_GLB_BASE 0x20000 -#define AR_GLB_GPIO_CONTROL (AR_GLB_BASE) #define AR_PHY_GLB_CONTROL (AR_GLB_BASE + 0x44) #define AR_GLB_SCRATCH(_ah) (AR_GLB_BASE + \ (AR_SREV_9462_20(_ah) ? 0x4c : 0x50)) diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9003_rtt.c b/trunk/drivers/net/wireless/ath/ath9k/ar9003_rtt.c index 458bedf0b0ae..48803ee9c0d6 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9003_rtt.c +++ b/trunk/drivers/net/wireless/ath/ath9k/ar9003_rtt.c @@ -16,7 +16,6 @@ #include "hw.h" #include "ar9003_phy.h" -#include "ar9003_rtt.h" #define RTT_RESTORE_TIMEOUT 1000 #define RTT_ACCESS_TIMEOUT 100 diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/trunk/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h index dc2054f0378e..9c51b395b4ff 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h +++ b/trunk/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h @@ -41,24 +41,24 @@ static const u32 ar9462_pciephy_clkreq_enable_L1_2p0[][2] = { static const u32 ar9462_2p0_baseband_postamble[][5] = { /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ - {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a800d}, - {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae}, - {0x00009824, 0x5ac640de, 0x5ac640d0, 0x5ac640d0, 0x63c640da}, - {0x00009828, 0x0796be89, 0x0696b081, 0x0696b881, 0x09143e81}, + {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011}, + {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e}, + {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0}, + {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881}, {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4}, {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c}, {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4}, {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0}, {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020}, - {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8}, - {0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e}, - {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3376605e, 0x33795d5e}, + {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2}, + {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e}, + {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3039605e, 0x33795d5e}, {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, - {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282}, - {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27}, + {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c782}, + {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27}, {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012}, {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, {0x0000a204, 0x013187c0, 0x013187c4, 0x013187c4, 0x013187c0}, @@ -81,15 +81,6 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = { {0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982}, {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b}, {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, - {0x0000a3a4, 0x00000010, 0x00000010, 0x00000000, 0x00000000}, - {0x0000a3a8, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa}, - {0x0000a3ac, 0xaaaaaa00, 0xaaaaaa30, 0xaaaaaa00, 0xaaaaaa00}, - {0x0000a41c, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce}, - {0x0000a420, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce}, - {0x0000a424, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce}, - {0x0000a428, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce}, - {0x0000a42c, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce}, - {0x0000a430, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce}, {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c}, {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000}, {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, @@ -697,8 +688,8 @@ static const u32 ar9462_2p0_mac_postamble_emulation[][5] = { static const u32 ar9462_2p0_radio_postamble_sys3ant[][5] = { /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ {0x000160ac, 0xa4646c08, 0xa4646c08, 0x24645808, 0x24645808}, - {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008}, - {0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008}, + {0x00016140, 0x10804008, 0x10804008, 0x90804008, 0x90804008}, + {0x00016540, 0x10804008, 0x10804008, 0x90804008, 0x90804008}, }; static const u32 ar9462_2p0_baseband_postamble_emulation[][5] = { @@ -726,8 +717,8 @@ static const u32 ar9462_2p0_baseband_postamble_emulation[][5] = { static const u32 ar9462_2p0_radio_postamble_sys2ant[][5] = { /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ {0x000160ac, 0xa4646c08, 0xa4646c08, 0x24645808, 0x24645808}, - {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008}, - {0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008}, + {0x00016140, 0x10804008, 0x10804008, 0x90804008, 0x90804008}, + {0x00016540, 0x10804008, 0x10804008, 0x90804008, 0x90804008}, }; static const u32 ar9462_common_wo_xlna_rx_gain_table_2p0[][2] = { @@ -1068,7 +1059,7 @@ static const u32 ar9462_modes_low_ob_db_tx_gain_table_2p0[][5] = { static const u32 ar9462_2p0_soc_postamble[][5] = { /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ - {0x00007010, 0x00000033, 0x00000033, 0x00000033, 0x00000033}, + {0x00007010, 0x00002233, 0x00002233, 0x00002233, 0x00002233}, }; static const u32 ar9462_2p0_baseband_core[][2] = { @@ -1116,11 +1107,11 @@ static const u32 ar9462_2p0_baseband_core[][2] = { {0x00009e30, 0x06336f77}, {0x00009e34, 0x6af6532f}, {0x00009e38, 0x0cc80c00}, - {0x00009e40, 0x15262820}, + {0x00009e40, 0x0d261820}, {0x00009e4c, 0x00001004}, {0x00009e50, 0x00ff03f1}, - {0x00009e54, 0xe4c555c2}, - {0x00009e58, 0xfd857722}, + {0x00009e54, 0xe4c355c7}, + {0x00009e58, 0xfd897735}, {0x00009e5c, 0xe9198724}, {0x00009fc0, 0x803e4788}, {0x00009fc4, 0x0001efb5}, @@ -1151,6 +1142,9 @@ static const u32 ar9462_2p0_baseband_core[][2] = { {0x0000a398, 0x001f0e0f}, {0x0000a39c, 0x0075393f}, {0x0000a3a0, 0xb79f6427}, + {0x0000a3a4, 0x00000000}, + {0x0000a3a8, 0xaaaaaaaa}, + {0x0000a3ac, 0x3c466478}, {0x0000a3c0, 0x20202020}, {0x0000a3c4, 0x22222220}, {0x0000a3c8, 0x20200020}, @@ -1173,6 +1167,12 @@ static const u32 ar9462_2p0_baseband_core[][2] = { {0x0000a40c, 0x00820820}, {0x0000a414, 0x1ce739ce}, {0x0000a418, 0x2d001dce}, + {0x0000a41c, 0x1ce739ce}, + {0x0000a420, 0x000001ce}, + {0x0000a424, 0x1ce739ce}, + {0x0000a428, 0x000001ce}, + {0x0000a42c, 0x1ce739ce}, + {0x0000a430, 0x1ce739ce}, {0x0000a434, 0x00000000}, {0x0000a438, 0x00001801}, {0x0000a43c, 0x00100000}, @@ -1257,8 +1257,8 @@ static const u32 ar9462_modes_high_ob_db_tx_gain_table_2p0[][5] = { {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660}, {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861}, {0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81}, - {0x0000a54c, 0x59025eb6, 0x59025eb6, 0x42001a83, 0x42001a83}, - {0x0000a550, 0x5d025ef6, 0x5d025ef6, 0x44001c84, 0x44001c84}, + {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83}, + {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84}, {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3}, {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5}, {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9}, @@ -1850,8 +1850,8 @@ static const u32 ar9462_modes_green_ob_db_tx_gain_table_2p0[][5] = { {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660}, {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861}, {0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81}, - {0x0000a54c, 0x59025eb6, 0x59025eb6, 0x42001a83, 0x42001a83}, - {0x0000a550, 0x5d025ef6, 0x5d025ef6, 0x44001c84, 0x44001c84}, + {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83}, + {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84}, {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3}, {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5}, {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9}, diff --git a/trunk/drivers/net/wireless/ath/ath9k/ath9k.h b/trunk/drivers/net/wireless/ath/ath9k/ath9k.h index b30e9fc6433f..1c269f50822b 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/trunk/drivers/net/wireless/ath/ath9k/ath9k.h @@ -25,7 +25,6 @@ #include "debug.h" #include "common.h" -#include "mci.h" /* * Header for the ath9k.ko driver core *only* -- hw code nor any other driver @@ -97,7 +96,7 @@ enum buffer_type { #define bf_isampdu(bf) (bf->bf_state.bf_type & BUF_AMPDU) #define bf_isaggr(bf) (bf->bf_state.bf_type & BUF_AGGR) -#define ATH_TXSTATUS_RING_SIZE 512 +#define ATH_TXSTATUS_RING_SIZE 64 #define DS2PHYS(_dd, _ds) \ ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) @@ -159,9 +158,6 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd, /* return block-ack bitmap index given sequence and starting sequence */ #define ATH_BA_INDEX(_st, _seq) (((_seq) - (_st)) & (IEEE80211_SEQ_MAX - 1)) -/* return the seqno for _start + _offset */ -#define ATH_BA_INDEX2SEQ(_seq, _offset) (((_seq) + (_offset)) & (IEEE80211_SEQ_MAX - 1)) - /* returns delimiter padding required given the packet length */ #define ATH_AGGR_GET_NDELIM(_len) \ (((_len) >= ATH_AGGR_MINPLEN) ? 0 : \ @@ -196,7 +192,6 @@ struct ath_txq { u8 txq_headidx; u8 txq_tailidx; int pending_frames; - struct sk_buff_head complete_q; }; struct ath_atx_ac { @@ -242,7 +237,6 @@ struct ath_atx_tid { struct ath_node *an; struct ath_atx_ac *ac; unsigned long tx_buf[BITS_TO_LONGS(ATH_TID_MAX_BUFS)]; - int bar_index; u16 seq_start; u16 seq_next; u16 baw_size; @@ -257,9 +251,8 @@ struct ath_atx_tid { struct ath_node { #ifdef CONFIG_ATH9K_DEBUGFS struct list_head list; /* for sc->nodes */ -#endif struct ieee80211_sta *sta; /* station struct we're part of */ - struct ieee80211_vif *vif; /* interface with which we're associated */ +#endif struct ath_atx_tid tid[WME_NUM_TID]; struct ath_atx_ac ac[WME_NUM_AC]; int ps_key; @@ -281,6 +274,7 @@ struct ath_tx_control { }; #define ATH_TX_ERROR 0x01 +#define ATH_TX_BAR 0x02 /** * @txq_map: Index is mac80211 queue number. This is @@ -449,9 +443,7 @@ struct ath_btcoex { u32 btcoex_no_stomp; /* in usec */ u32 btcoex_period; /* in usec */ u32 btscan_no_stomp; /* in usec */ - u32 duty_cycle; struct ath_gen_timer *no_stomp_timer; /* Timer for no BT stomping */ - struct ath_mci_profile mci; }; int ath_init_btcoex_timer(struct ath_softc *sc); @@ -466,7 +458,7 @@ void ath9k_btcoex_timer_pause(struct ath_softc *sc); #define ATH_LED_PIN_9287 8 #define ATH_LED_PIN_9300 10 #define ATH_LED_PIN_9485 6 -#define ATH_LED_PIN_9462 4 +#define ATH_LED_PIN_9462 0 #ifdef CONFIG_MAC80211_LEDS void ath_init_leds(struct ath_softc *sc); @@ -546,7 +538,7 @@ struct ath_ant_comb { #define DEFAULT_CACHELINE 32 #define ATH_REGCLASSIDS_MAX 10 #define ATH_CABQ_READY_TIME 80 /* % of beacon interval */ -#define ATH_MAX_SW_RETRIES 30 +#define ATH_MAX_SW_RETRIES 10 #define ATH_CHAN_MAX 255 #define ATH_TXPOWER_MAX 100 /* .5 dBm units */ @@ -651,7 +643,6 @@ struct ath_softc { struct delayed_work tx_complete_work; struct delayed_work hw_pll_work; struct ath_btcoex btcoex; - struct ath_mci_coex mci_coex; struct ath_descdma txsdma; diff --git a/trunk/drivers/net/wireless/ath/ath9k/beacon.c b/trunk/drivers/net/wireless/ath/ath9k/beacon.c index b8967e482e6e..a13cabb95435 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/beacon.c +++ b/trunk/drivers/net/wireless/ath/ath9k/beacon.c @@ -117,10 +117,11 @@ static void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb) memset(&txctl, 0, sizeof(struct ath_tx_control)); txctl.txq = sc->beacon.cabq; - ath_dbg(common, XMIT, "transmitting CABQ packet, skb: %p\n", skb); + ath_dbg(common, ATH_DBG_XMIT, + "transmitting CABQ packet, skb: %p\n", skb); if (ath_tx_start(hw, skb, &txctl) != 0) { - ath_dbg(common, XMIT, "CABQ TX failed\n"); + ath_dbg(common, ATH_DBG_XMIT, "CABQ TX failed\n"); dev_kfree_skb_any(skb); } } @@ -203,7 +204,7 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw, if (skb && cabq_depth) { if (sc->nvifs > 1) { - ath_dbg(common, BEACON, + ath_dbg(common, ATH_DBG_BEACON, "Flushing previous cabq traffic\n"); ath_draintxq(sc, cabq, false); } @@ -296,7 +297,7 @@ int ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_vif *vif) tsfadjust = TU_TO_USEC(intval * avp->av_bslot) / ATH_BCBUF; avp->tsf_adjust = cpu_to_le64(tsfadjust); - ath_dbg(common, BEACON, + ath_dbg(common, ATH_DBG_BEACON, "stagger beacons, bslot %d intval %u tsfadjust %llu\n", avp->av_bslot, intval, (unsigned long long)tsfadjust); @@ -356,7 +357,6 @@ void ath_beacon_tasklet(unsigned long data) struct ath_buf *bf = NULL; struct ieee80211_vif *vif; struct ath_tx_status ts; - bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); int slot; u32 bfaddr, bc = 0; @@ -371,14 +371,15 @@ void ath_beacon_tasklet(unsigned long data) sc->beacon.bmisscnt++; if (sc->beacon.bmisscnt < BSTUCK_THRESH * sc->nbcnvifs) { - ath_dbg(common, BSTUCK, + ath_dbg(common, ATH_DBG_BSTUCK, "missed %u consecutive beacons\n", sc->beacon.bmisscnt); ath9k_hw_stop_dma_queue(ah, sc->beacon.beaconq); if (sc->beacon.bmisscnt > 3) ath9k_hw_bstuck_nfcal(ah); } else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) { - ath_dbg(common, BSTUCK, "beacon is officially stuck\n"); + ath_dbg(common, ATH_DBG_BSTUCK, + "beacon is officially stuck\n"); sc->sc_flags |= SC_OP_TSF_RESET; ieee80211_queue_work(sc->hw, &sc->hw_reset_work); } @@ -405,7 +406,7 @@ void ath_beacon_tasklet(unsigned long data) slot = (tsftu % (intval * ATH_BCBUF)) / intval; vif = sc->beacon.bslot[slot]; - ath_dbg(common, BEACON, + ath_dbg(common, ATH_DBG_BEACON, "slot %d [tsf %llu tsftu %u intval %u] vif %p\n", slot, tsf, tsftu / ATH_BCBUF, intval, vif); } else { @@ -423,7 +424,7 @@ void ath_beacon_tasklet(unsigned long data) } if (sc->beacon.bmisscnt != 0) { - ath_dbg(common, BSTUCK, + ath_dbg(common, ATH_DBG_BSTUCK, "resume beacon xmit after %u misses\n", sc->beacon.bmisscnt); sc->beacon.bmisscnt = 0; @@ -457,12 +458,10 @@ void ath_beacon_tasklet(unsigned long data) if (bfaddr != 0) { /* NB: cabq traffic should already be queued and primed */ ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bfaddr); - - if (!edma) - ath9k_hw_txstart(ah, sc->beacon.beaconq); + ath9k_hw_txstart(ah, sc->beacon.beaconq); sc->beacon.ast_be_xmit += bc; /* XXX per-vif? */ - if (edma) { + if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { spin_lock_bh(&sc->sc_pcu_lock); ath9k_hw_txprocdesc(ah, bf->bf_desc, (void *)&ts); spin_unlock_bh(&sc->sc_pcu_lock); @@ -542,7 +541,7 @@ static void ath_beacon_config_sta(struct ath_softc *sc, /* No need to configure beacon if we are not associated */ if (!common->curaid) { - ath_dbg(common, BEACON, + ath_dbg(common, ATH_DBG_BEACON, "STA is not yet associated..skipping beacon config\n"); return; } @@ -632,8 +631,8 @@ static void ath_beacon_config_sta(struct ath_softc *sc, /* TSF out of range threshold fixed at 1 second */ bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD; - ath_dbg(common, BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu); - ath_dbg(common, BEACON, + ath_dbg(common, ATH_DBG_BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu); + ath_dbg(common, ATH_DBG_BEACON, "bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n", bs.bs_bmissthreshold, bs.bs_sleepduration, bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext); @@ -661,7 +660,8 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc, tsf = roundup(ath9k_hw_gettsf32(ah) + TU_TO_USEC(FUDGE), intval); nexttbtt = tsf + intval; - ath_dbg(common, BEACON, "IBSS nexttbtt %u intval %u (%u)\n", + ath_dbg(common, ATH_DBG_BEACON, + "IBSS nexttbtt %u intval %u (%u)\n", nexttbtt, intval, conf->beacon_interval); /* @@ -699,8 +699,9 @@ static bool ath9k_allow_beacon_config(struct ath_softc *sc, (sc->nbcnvifs > 1) && (vif->type == NL80211_IFTYPE_AP) && (cur_conf->beacon_interval != bss_conf->beacon_int)) { - ath_dbg(common, CONFIG, - "Changing beacon interval of multiple AP interfaces !\n"); + ath_dbg(common, ATH_DBG_CONFIG, + "Changing beacon interval of multiple \ + AP interfaces !\n"); return false; } /* @@ -709,7 +710,7 @@ static bool ath9k_allow_beacon_config(struct ath_softc *sc, */ if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) && (vif->type != NL80211_IFTYPE_AP)) { - ath_dbg(common, CONFIG, + ath_dbg(common, ATH_DBG_CONFIG, "STA vif's beacon not allowed on AP mode\n"); return false; } @@ -721,7 +722,7 @@ static bool ath9k_allow_beacon_config(struct ath_softc *sc, (vif->type == NL80211_IFTYPE_STATION) && (sc->sc_flags & SC_OP_BEACONS) && !avp->primary_sta_vif) { - ath_dbg(common, CONFIG, + ath_dbg(common, ATH_DBG_CONFIG, "Beacon already configured for a station interface\n"); return false; } @@ -801,7 +802,8 @@ void ath_set_beacon(struct ath_softc *sc) ath_beacon_config_sta(sc, cur_conf); break; default: - ath_dbg(common, CONFIG, "Unsupported beaconing mode\n"); + ath_dbg(common, ATH_DBG_CONFIG, + "Unsupported beaconing mode\n"); return; } diff --git a/trunk/drivers/net/wireless/ath/ath9k/btcoex.c b/trunk/drivers/net/wireless/ath/ath9k/btcoex.c index a6712a95d76a..012263968d64 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/btcoex.c +++ b/trunk/drivers/net/wireless/ath/ath9k/btcoex.c @@ -21,7 +21,7 @@ enum ath_bt_mode { ATH_BT_COEX_MODE_LEGACY, /* legacy rx_clear mode */ ATH_BT_COEX_MODE_UNSLOTTED, /* untimed/unslotted mode */ ATH_BT_COEX_MODE_SLOTTED, /* slotted mode */ - ATH_BT_COEX_MODE_DISABLED, /* coexistence disabled */ + ATH_BT_COEX_MODE_DISALBED, /* coexistence disabled */ }; struct ath_btcoex_config { @@ -36,20 +36,6 @@ struct ath_btcoex_config { bool bt_hold_rx_clear; }; -static const u32 ar9003_wlan_weights[ATH_BTCOEX_STOMP_MAX] - [AR9300_NUM_WLAN_WEIGHTS] = { - { 0xfffffff0, 0xfffffff0, 0xfffffff0, 0xfffffff0 }, /* STOMP_ALL */ - { 0x88888880, 0x88888880, 0x88888880, 0x88888880 }, /* STOMP_LOW */ - { 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* STOMP_NONE */ -}; - -static const u32 ar9462_wlan_weights[ATH_BTCOEX_STOMP_MAX] - [AR9300_NUM_WLAN_WEIGHTS] = { - { 0x01017d01, 0x41414101, 0x41414101, 0x41414141 }, /* STOMP_ALL */ - { 0x01017d01, 0x3b3b3b01, 0x3b3b3b01, 0x3b3b3b3b }, /* STOMP_LOW */ - { 0x01017d01, 0x01010101, 0x01010101, 0x01010101 }, /* STOMP_NONE */ - { 0x01017d01, 0x013b0101, 0x3b3b0101, 0x3b3b013b }, /* STOMP_LOW_FTP */ -}; void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum) { @@ -68,9 +54,6 @@ void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum) u32 i, idx; bool rxclear_polarity = ath_bt_config.bt_rxclear_polarity; - if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE) - return; - if (AR_SREV_9300_20_OR_LATER(ah)) rxclear_polarity = !ath_bt_config.bt_rxclear_polarity; @@ -102,9 +85,6 @@ void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah) { struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; - if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE) - return; - /* connect bt_active to baseband */ REG_CLR_BIT(ah, AR_GPIO_INPUT_EN_VAL, (AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_DEF | @@ -127,9 +107,6 @@ void ath9k_hw_btcoex_init_3wire(struct ath_hw *ah) { struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; - if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE) - return; - /* btcoex 3-wire */ REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, (AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB | @@ -156,9 +133,6 @@ static void ath9k_hw_btcoex_enable_2wire(struct ath_hw *ah) { struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; - if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE) - return; - /* Configure the desired GPIO port for TX_FRAME output */ ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio, AR_GPIO_OUTPUT_MUX_AS_TX_FRAME); @@ -170,9 +144,6 @@ void ath9k_hw_btcoex_set_weight(struct ath_hw *ah, { struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; - if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE) - return; - btcoex_hw->bt_coex_weights = SM(bt_weight, AR_BTCOEX_BT_WGHT) | SM(wlan_weight, AR_BTCOEX_WL_WGHT); } @@ -181,26 +152,27 @@ EXPORT_SYMBOL(ath9k_hw_btcoex_set_weight); static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah) { - struct ath_btcoex_hw *btcoex = &ah->btcoex_hw; + struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; u32 val; - int i; /* * Program coex mode and weight registers to * enable coex 3-wire */ - REG_WRITE(ah, AR_BT_COEX_MODE, btcoex->bt_coex_mode); - REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex->bt_coex_mode2); + REG_WRITE(ah, AR_BT_COEX_MODE, btcoex_hw->bt_coex_mode); + REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex_hw->bt_coex_mode2); if (AR_SREV_9300_20_OR_LATER(ah)) { - REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, btcoex->wlan_weight[0]); - REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS1, btcoex->wlan_weight[1]); - for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++) - REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS(i), - btcoex->bt_weight[i]); + REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, ah->bt_coex_wlan_weight[0]); + REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS1, ah->bt_coex_wlan_weight[1]); + REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS0, ah->bt_coex_bt_weight[0]); + REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS1, ah->bt_coex_bt_weight[1]); + REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS2, ah->bt_coex_bt_weight[2]); + REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS3, ah->bt_coex_bt_weight[3]); + } else - REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex->bt_coex_weights); + REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex_hw->bt_coex_weights); @@ -213,39 +185,23 @@ static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah) REG_RMW_FIELD(ah, AR_QUIET1, AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1); REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0); - ath9k_hw_cfg_output(ah, btcoex->wlanactive_gpio, + ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio, AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL); } -static void ath9k_hw_btcoex_enable_mci(struct ath_hw *ah) -{ - struct ath_btcoex_hw *btcoex = &ah->btcoex_hw; - int i; - - for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++) - REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i), - btcoex->wlan_weight[i]); - - REG_RMW_FIELD(ah, AR_QUIET1, AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1); - btcoex->enabled = true; -} - void ath9k_hw_btcoex_enable(struct ath_hw *ah) { struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; - switch (ath9k_hw_get_btcoex_scheme(ah)) { + switch (btcoex_hw->scheme) { case ATH_BTCOEX_CFG_NONE: - return; + break; case ATH_BTCOEX_CFG_2WIRE: ath9k_hw_btcoex_enable_2wire(ah); break; case ATH_BTCOEX_CFG_3WIRE: ath9k_hw_btcoex_enable_3wire(ah); break; - case ATH_BTCOEX_CFG_MCI: - ath9k_hw_btcoex_enable_mci(ah); - return; } REG_RMW(ah, AR_GPIO_PDPU, @@ -259,18 +215,7 @@ EXPORT_SYMBOL(ath9k_hw_btcoex_enable); void ath9k_hw_btcoex_disable(struct ath_hw *ah) { struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; - int i; - - if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE) - return; - btcoex_hw->enabled = false; - if (btcoex_hw->scheme == ATH_BTCOEX_CFG_MCI) { - ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE); - for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++) - REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i), - btcoex_hw->wlan_weight[i]); - } ath9k_hw_set_gpio(ah, btcoex_hw->wlanactive_gpio, 0); ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio, @@ -283,27 +228,49 @@ void ath9k_hw_btcoex_disable(struct ath_hw *ah) if (AR_SREV_9300_20_OR_LATER(ah)) { REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, 0); REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS1, 0); - for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++) - REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS(i), 0); + REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS0, 0); + REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS1, 0); + REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS2, 0); + REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS3, 0); } else REG_WRITE(ah, AR_BT_COEX_WEIGHT, 0); } + + ah->btcoex_hw.enabled = false; } EXPORT_SYMBOL(ath9k_hw_btcoex_disable); static void ar9003_btcoex_bt_stomp(struct ath_hw *ah, enum ath_stomp_type stomp_type) { - struct ath_btcoex_hw *btcoex = &ah->btcoex_hw; - const u32 *weight = AR_SREV_9462(ah) ? ar9003_wlan_weights[stomp_type] : - ar9462_wlan_weights[stomp_type]; - int i; - - for (i = 0; i < AR9300_NUM_WLAN_WEIGHTS; i++) { - btcoex->bt_weight[i] = AR9300_BT_WGHT; - btcoex->wlan_weight[i] = weight[i]; + ah->bt_coex_bt_weight[0] = AR9300_BT_WGHT; + ah->bt_coex_bt_weight[1] = AR9300_BT_WGHT; + ah->bt_coex_bt_weight[2] = AR9300_BT_WGHT; + ah->bt_coex_bt_weight[3] = AR9300_BT_WGHT; + + + switch (stomp_type) { + case ATH_BTCOEX_STOMP_ALL: + ah->bt_coex_wlan_weight[0] = AR9300_STOMP_ALL_WLAN_WGHT0; + ah->bt_coex_wlan_weight[1] = AR9300_STOMP_ALL_WLAN_WGHT1; + break; + case ATH_BTCOEX_STOMP_LOW: + ah->bt_coex_wlan_weight[0] = AR9300_STOMP_LOW_WLAN_WGHT0; + ah->bt_coex_wlan_weight[1] = AR9300_STOMP_LOW_WLAN_WGHT1; + break; + case ATH_BTCOEX_STOMP_NONE: + ah->bt_coex_wlan_weight[0] = AR9300_STOMP_NONE_WLAN_WGHT0; + ah->bt_coex_wlan_weight[1] = AR9300_STOMP_NONE_WLAN_WGHT1; + break; + + default: + ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX, + "Invalid Stomptype\n"); + break; } + + ath9k_hw_btcoex_enable(ah); } /* @@ -312,9 +279,6 @@ static void ar9003_btcoex_bt_stomp(struct ath_hw *ah, void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah, enum ath_stomp_type stomp_type) { - if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE) - return; - if (AR_SREV_9300_20_OR_LATER(ah)) { ar9003_btcoex_bt_stomp(ah, stomp_type); return; @@ -334,8 +298,11 @@ void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah, AR_STOMP_NONE_WLAN_WGHT); break; default: - ath_dbg(ath9k_hw_common(ah), BTCOEX, "Invalid Stomptype\n"); + ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX, + "Invalid Stomptype\n"); break; } + + ath9k_hw_btcoex_enable(ah); } EXPORT_SYMBOL(ath9k_hw_btcoex_bt_stomp); diff --git a/trunk/drivers/net/wireless/ath/ath9k/btcoex.h b/trunk/drivers/net/wireless/ath/ath9k/btcoex.h index 278361c867ca..234f77689b14 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/btcoex.h +++ b/trunk/drivers/net/wireless/ath/ath9k/btcoex.h @@ -36,57 +36,22 @@ #define ATH_BT_CNT_THRESHOLD 3 #define ATH_BT_CNT_SCAN_THRESHOLD 15 -#define AR9300_NUM_BT_WEIGHTS 4 -#define AR9300_NUM_WLAN_WEIGHTS 4 /* Defines the BT AR_BT_COEX_WGHT used */ enum ath_stomp_type { + ATH_BTCOEX_NO_STOMP, ATH_BTCOEX_STOMP_ALL, ATH_BTCOEX_STOMP_LOW, - ATH_BTCOEX_STOMP_NONE, - ATH_BTCOEX_STOMP_LOW_FTP, - ATH_BTCOEX_STOMP_MAX + ATH_BTCOEX_STOMP_NONE }; enum ath_btcoex_scheme { ATH_BTCOEX_CFG_NONE, ATH_BTCOEX_CFG_2WIRE, ATH_BTCOEX_CFG_3WIRE, - ATH_BTCOEX_CFG_MCI, -}; - -struct ath9k_hw_mci { - u32 raw_intr; - u32 rx_msg_intr; - u32 cont_status; - u32 gpm_addr; - u32 gpm_len; - u32 gpm_idx; - u32 sched_addr; - u32 wlan_channels[4]; - u32 wlan_cal_seq; - u32 wlan_cal_done; - u32 config; - u8 *gpm_buf; - u8 *sched_buf; - bool ready; - bool update_2g5g; - bool is_2g; - bool query_bt; - bool unhalt_bt_gpm; /* need send UNHALT */ - bool halted_bt_gpm; /* HALT sent */ - bool need_flush_btinfo; - bool bt_version_known; - bool wlan_channels_update; - u8 wlan_ver_major; - u8 wlan_ver_minor; - u8 bt_ver_major; - u8 bt_ver_minor; - u8 bt_state; }; struct ath_btcoex_hw { enum ath_btcoex_scheme scheme; - struct ath9k_hw_mci mci; bool enabled; u8 wlanactive_gpio; u8 btactive_gpio; @@ -94,8 +59,6 @@ struct ath_btcoex_hw { u32 bt_coex_mode; /* Register setting for AR_BT_COEX_MODE */ u32 bt_coex_weights; /* Register setting for AR_BT_COEX_WEIGHT */ u32 bt_coex_mode2; /* Register setting for AR_BT_COEX_MODE2 */ - u32 bt_weight[AR9300_NUM_BT_WEIGHTS]; - u32 wlan_weight[AR9300_NUM_WLAN_WEIGHTS]; }; void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah); diff --git a/trunk/drivers/net/wireless/ath/ath9k/calib.c b/trunk/drivers/net/wireless/ath/ath9k/calib.c index 172e33db7f4c..99538810a312 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/calib.c +++ b/trunk/drivers/net/wireless/ath/ath9k/calib.c @@ -116,7 +116,7 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah, if (h[i].privNF > limit->max) { high_nf_mid = true; - ath_dbg(common, CALIBRATE, + ath_dbg(common, ATH_DBG_CALIBRATE, "NFmid[%d] (%d) > MAX (%d), %s\n", i, h[i].privNF, limit->max, (cal->nfcal_interference ? @@ -199,7 +199,8 @@ bool ath9k_hw_reset_calvalid(struct ath_hw *ah) return true; if (currCal->calState != CAL_DONE) { - ath_dbg(common, CALIBRATE, "Calibration state incorrect, %d\n", + ath_dbg(common, ATH_DBG_CALIBRATE, + "Calibration state incorrect, %d\n", currCal->calState); return true; } @@ -207,7 +208,8 @@ bool ath9k_hw_reset_calvalid(struct ath_hw *ah) if (!(ah->supp_cals & currCal->calData->calType)) return true; - ath_dbg(common, CALIBRATE, "Resetting Cal %d state for channel %u\n", + ath_dbg(common, ATH_DBG_CALIBRATE, + "Resetting Cal %d state for channel %u\n", currCal->calData->calType, conf->channel->center_freq); ah->caldata->CalValid &= ~currCal->calData->calType; @@ -300,7 +302,7 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) * noisefloor until the next calibration timer. */ if (j == 10000) { - ath_dbg(common, ANY, + ath_dbg(common, ATH_DBG_ANY, "Timeout while waiting for nf to load: AR_PHY_AGC_CONTROL=0x%x\n", REG_READ(ah, AR_PHY_AGC_CONTROL)); return; @@ -342,17 +344,17 @@ static void ath9k_hw_nf_sanitize(struct ath_hw *ah, s16 *nf) if (!nf[i]) continue; - ath_dbg(common, CALIBRATE, + ath_dbg(common, ATH_DBG_CALIBRATE, "NF calibrated [%s] [chain %d] is %d\n", (i >= 3 ? "ext" : "ctl"), i % 3, nf[i]); if (nf[i] > ATH9K_NF_TOO_HIGH) { - ath_dbg(common, CALIBRATE, + ath_dbg(common, ATH_DBG_CALIBRATE, "NF[%d] (%d) > MAX (%d), correcting to MAX\n", i, nf[i], ATH9K_NF_TOO_HIGH); nf[i] = limit->max; } else if (nf[i] < limit->min) { - ath_dbg(common, CALIBRATE, + ath_dbg(common, ATH_DBG_CALIBRATE, "NF[%d] (%d) < MIN (%d), correcting to NOM\n", i, nf[i], limit->min); nf[i] = limit->nominal; @@ -371,7 +373,7 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan) chan->channelFlags &= (~CHANNEL_CW_INT); if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) { - ath_dbg(common, CALIBRATE, + ath_dbg(common, ATH_DBG_CALIBRATE, "NF did not complete in calibration window\n"); return false; } @@ -381,7 +383,7 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan) nf = nfarray[0]; if (ath9k_hw_get_nf_thresh(ah, c->band, &nfThresh) && nf > nfThresh) { - ath_dbg(common, CALIBRATE, + ath_dbg(common, ATH_DBG_CALIBRATE, "noise floor failed detected; detected %d, threshold %d\n", nf, nfThresh); chan->channelFlags |= CHANNEL_CW_INT; diff --git a/trunk/drivers/net/wireless/ath/ath9k/debug.c b/trunk/drivers/net/wireless/ath/ath9k/debug.c index 68d972bf232d..2741203e803f 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/debug.c +++ b/trunk/drivers/net/wireless/ath/ath9k/debug.c @@ -709,29 +709,24 @@ static ssize_t read_file_stations(struct file *file, char __user *user_buf, len += snprintf(buf + len, size - len, "Stations:\n" - " tid: addr sched paused buf_q-empty an ac baw\n" + " tid: addr sched paused buf_q-empty an ac\n" " ac: addr sched tid_q-empty txq\n"); spin_lock(&sc->nodes_lock); list_for_each_entry(an, &sc->nodes, list) { - unsigned short ma = an->maxampdu; - if (ma == 0) - ma = 65535; /* see ath_lookup_rate */ len += snprintf(buf + len, size - len, - "iface: %pM sta: %pM max-ampdu: %hu mpdu-density: %uus\n", - an->vif->addr, an->sta->addr, ma, - (unsigned int)(an->mpdudensity)); + "%pM\n", an->sta->addr); if (len >= size) goto done; for (q = 0; q < WME_NUM_TID; q++) { struct ath_atx_tid *tid = &(an->tid[q]); len += snprintf(buf + len, size - len, - " tid: %p %s %s %i %p %p %hu\n", + " tid: %p %s %s %i %p %p\n", tid, tid->sched ? "sched" : "idle", tid->paused ? "paused" : "running", skb_queue_empty(&tid->buf_q), - tid->an, tid->ac, tid->baw_size); + tid->an, tid->ac); if (len >= size) goto done; } @@ -856,7 +851,7 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf, sc->debug.stats.txstats[qnum].tx_bytes_all += bf->bf_mpdu->len; if (bf_isampdu(bf)) { - if (flags & ATH_TX_ERROR) + if (flags & ATH_TX_BAR) TX_STAT_INC(qnum, a_xretries); else TX_STAT_INC(qnum, a_completed); @@ -1630,9 +1625,6 @@ int ath9k_init_debug(struct ath_hw *ah) debugfs_create_file("debug", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, sc, &fops_debug); #endif - - ath9k_dfs_init_debug(sc); - debugfs_create_file("dma", S_IRUSR, sc->debug.debugfs_phy, sc, &fops_dma); debugfs_create_file("interrupt", S_IRUSR, sc->debug.debugfs_phy, sc, diff --git a/trunk/drivers/net/wireless/ath/ath9k/debug.h b/trunk/drivers/net/wireless/ath/ath9k/debug.h index 776a24ada600..356352ac2d6e 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/debug.h +++ b/trunk/drivers/net/wireless/ath/ath9k/debug.h @@ -19,7 +19,6 @@ #include "hw.h" #include "rc.h" -#include "dfs_debug.h" struct ath_txq; struct ath_buf; @@ -188,7 +187,6 @@ struct ath_stats { struct ath_interrupt_stats istats; struct ath_tx_stats txstats[ATH9K_NUM_TX_QUEUES]; struct ath_rx_stats rxstats; - struct ath_dfs_stats dfs_stats; u32 reset[__RESET_TYPE_MAX]; }; diff --git a/trunk/drivers/net/wireless/ath/ath9k/dfs.c b/trunk/drivers/net/wireless/ath/ath9k/dfs.c deleted file mode 100644 index f4f56aff1e9d..000000000000 --- a/trunk/drivers/net/wireless/ath/ath9k/dfs.c +++ /dev/null @@ -1,215 +0,0 @@ -/* - * Copyright (c) 2008-2011 Atheros Communications Inc. - * Copyright (c) 2011 Neratec Solutions AG - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#include "hw.h" -#include "hw-ops.h" -#include "ath9k.h" -#include "dfs.h" -#include "dfs_debug.h" - -/* - * TODO: move into or synchronize this with generic header - * as soon as IF is defined - */ -struct dfs_radar_pulse { - u16 freq; - u64 ts; - u32 width; - u8 rssi; -}; - -/* internal struct to pass radar data */ -struct ath_radar_data { - u8 pulse_bw_info; - u8 rssi; - u8 ext_rssi; - u8 pulse_length_ext; - u8 pulse_length_pri; -}; - -/* convert pulse duration to usecs, considering clock mode */ -static u32 dur_to_usecs(struct ath_hw *ah, u32 dur) -{ - const u32 AR93X_NSECS_PER_DUR = 800; - const u32 AR93X_NSECS_PER_DUR_FAST = (8000 / 11); - u32 nsecs; - - if (IS_CHAN_A_FAST_CLOCK(ah, ah->curchan)) - nsecs = dur * AR93X_NSECS_PER_DUR_FAST; - else - nsecs = dur * AR93X_NSECS_PER_DUR; - - return (nsecs + 500) / 1000; -} - -#define PRI_CH_RADAR_FOUND 0x01 -#define EXT_CH_RADAR_FOUND 0x02 -static bool -ath9k_postprocess_radar_event(struct ath_softc *sc, - struct ath_radar_data *are, - struct dfs_radar_pulse *drp) -{ - u8 rssi; - u16 dur; - - ath_dbg(ath9k_hw_common(sc->sc_ah), DFS, - "pulse_bw_info=0x%x, pri,ext len/rssi=(%u/%u, %u/%u)\n", - are->pulse_bw_info, - are->pulse_length_pri, are->rssi, - are->pulse_length_ext, are->ext_rssi); - - /* - * Only the last 2 bits of the BW info are relevant, they indicate - * which channel the radar was detected in. - */ - are->pulse_bw_info &= 0x03; - - switch (are->pulse_bw_info) { - case PRI_CH_RADAR_FOUND: - /* radar in ctrl channel */ - dur = are->pulse_length_pri; - DFS_STAT_INC(sc, pri_phy_errors); - /* - * cannot use ctrl channel RSSI - * if extension channel is stronger - */ - rssi = (are->ext_rssi >= (are->rssi + 3)) ? 0 : are->rssi; - break; - case EXT_CH_RADAR_FOUND: - /* radar in extension channel */ - dur = are->pulse_length_ext; - DFS_STAT_INC(sc, ext_phy_errors); - /* - * cannot use extension channel RSSI - * if control channel is stronger - */ - rssi = (are->rssi >= (are->ext_rssi + 12)) ? 0 : are->ext_rssi; - break; - case (PRI_CH_RADAR_FOUND | EXT_CH_RADAR_FOUND): - /* - * Conducted testing, when pulse is on DC, both pri and ext - * durations are reported to be same - * - * Radiated testing, when pulse is on DC, different pri and - * ext durations are reported, so take the larger of the two - */ - if (are->pulse_length_ext >= are->pulse_length_pri) - dur = are->pulse_length_ext; - else - dur = are->pulse_length_pri; - DFS_STAT_INC(sc, dc_phy_errors); - - /* when both are present use stronger one */ - rssi = (are->rssi < are->ext_rssi) ? are->ext_rssi : are->rssi; - break; - default: - /* - * Bogus bandwidth info was received in descriptor, - * so ignore this PHY error - */ - DFS_STAT_INC(sc, bwinfo_discards); - return false; - } - - if (rssi == 0) { - DFS_STAT_INC(sc, rssi_discards); - return false; - } - - /* - * TODO: check chirping pulses - * checks for chirping are dependent on the DFS regulatory domain - * used, which is yet TBD - */ - - /* convert duration to usecs */ - drp->width = dur_to_usecs(sc->sc_ah, dur); - drp->rssi = rssi; - - DFS_STAT_INC(sc, pulses_detected); - return true; -} -#undef PRI_CH_RADAR_FOUND -#undef EXT_CH_RADAR_FOUND - -/* - * DFS: check PHY-error for radar pulse and feed the detector - */ -void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data, - struct ath_rx_status *rs, u64 mactime) -{ - struct ath_radar_data ard; - u16 datalen; - char *vdata_end; - struct dfs_radar_pulse drp; - struct ath_hw *ah = sc->sc_ah; - struct ath_common *common = ath9k_hw_common(ah); - - if ((!(rs->rs_phyerr != ATH9K_PHYERR_RADAR)) && - (!(rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT))) { - ath_dbg(common, DFS, - "Error: rs_phyer=0x%x not a radar error\n", - rs->rs_phyerr); - return; - } - - datalen = rs->rs_datalen; - if (datalen == 0) { - DFS_STAT_INC(sc, datalen_discards); - return; - } - - ard.rssi = rs->rs_rssi_ctl0; - ard.ext_rssi = rs->rs_rssi_ext0; - - /* - * hardware stores this as 8 bit signed value. - * we will cap it at 0 if it is a negative number - */ - if (ard.rssi & 0x80) - ard.rssi = 0; - if (ard.ext_rssi & 0x80) - ard.ext_rssi = 0; - - vdata_end = (char *)data + datalen; - ard.pulse_bw_info = vdata_end[-1]; - ard.pulse_length_ext = vdata_end[-2]; - ard.pulse_length_pri = vdata_end[-3]; - - ath_dbg(common, DFS, - "bw_info=%d, length_pri=%d, length_ext=%d, " - "rssi_pri=%d, rssi_ext=%d\n", - ard.pulse_bw_info, ard.pulse_length_pri, ard.pulse_length_ext, - ard.rssi, ard.ext_rssi); - - drp.freq = ah->curchan->channel; - drp.ts = mactime; - if (ath9k_postprocess_radar_event(sc, &ard, &drp)) { - static u64 last_ts; - ath_dbg(common, DFS, - "ath9k_dfs_process_phyerr: channel=%d, ts=%llu, " - "width=%d, rssi=%d, delta_ts=%llu\n", - drp.freq, drp.ts, drp.width, drp.rssi, drp.ts-last_ts); - last_ts = drp.ts; - /* - * TODO: forward pulse to pattern detector - * - * ieee80211_add_radar_pulse(drp.freq, drp.ts, - * drp.width, drp.rssi); - */ - } -} diff --git a/trunk/drivers/net/wireless/ath/ath9k/dfs.h b/trunk/drivers/net/wireless/ath/ath9k/dfs.h deleted file mode 100644 index c2412857f122..000000000000 --- a/trunk/drivers/net/wireless/ath/ath9k/dfs.h +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright (c) 2008-2011 Atheros Communications Inc. - * Copyright (c) 2011 Neratec Solutions AG - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#ifndef ATH9K_DFS_H -#define ATH9K_DFS_H - -#if defined(CONFIG_ATH9K_DFS_CERTIFIED) -/** - * ath9k_dfs_process_phyerr - process radar PHY error - * @sc: ath_softc - * @data: RX payload data - * @rs: RX status after processing descriptor - * @mactime: receive time - * - * This function is called whenever the HW DFS module detects a radar - * pulse and reports it as a PHY error. - * - * The radar information provided as raw payload data is validated and - * filtered for false pulses. Events passing all tests are forwarded to - * the upper layer for pattern detection. - */ -void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data, - struct ath_rx_status *rs, u64 mactime); -#else -static inline void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data, - struct ath_rx_status *rs, u64 mactime) { } -#endif - -#endif /* ATH9K_DFS_H */ diff --git a/trunk/drivers/net/wireless/ath/ath9k/dfs_debug.c b/trunk/drivers/net/wireless/ath/ath9k/dfs_debug.c deleted file mode 100644 index 106d031d834a..000000000000 --- a/trunk/drivers/net/wireless/ath/ath9k/dfs_debug.c +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright (c) 2008-2011 Atheros Communications Inc. - * Copyright (c) 2011 Neratec Solutions AG - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#include -#include - -#include "ath9k.h" -#include "dfs_debug.h" - -#define ATH9K_DFS_STAT(s, p) \ - len += snprintf(buf + len, size - len, "%28s : %10u\n", s, \ - sc->debug.stats.dfs_stats.p); - -static ssize_t read_file_dfs(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ath_softc *sc = file->private_data; - struct ath9k_hw_version *hw_ver = &sc->sc_ah->hw_version; - char *buf; - unsigned int len = 0, size = 8000; - ssize_t retval = 0; - - buf = kzalloc(size, GFP_KERNEL); - if (buf == NULL) - return -ENOMEM; - - len += snprintf(buf + len, size - len, "DFS support for " - "macVersion = 0x%x, macRev = 0x%x: %s\n", - hw_ver->macVersion, hw_ver->macRev, - (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_DFS) ? - "enabled" : "disabled"); - ATH9K_DFS_STAT("DFS pulses detected ", pulses_detected); - ATH9K_DFS_STAT("Datalen discards ", datalen_discards); - ATH9K_DFS_STAT("RSSI discards ", rssi_discards); - ATH9K_DFS_STAT("BW info discards ", bwinfo_discards); - ATH9K_DFS_STAT("Primary channel pulses ", pri_phy_errors); - ATH9K_DFS_STAT("Secondary channel pulses", ext_phy_errors); - ATH9K_DFS_STAT("Dual channel pulses ", dc_phy_errors); - - if (len > size) - len = size; - - retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); - kfree(buf); - - return retval; -} - -static int ath9k_dfs_debugfs_open(struct inode *inode, struct file *file) -{ - file->private_data = inode->i_private; - - return 0; -} - -static const struct file_operations fops_dfs_stats = { - .read = read_file_dfs, - .open = ath9k_dfs_debugfs_open, - .owner = THIS_MODULE, - .llseek = default_llseek, -}; - -void ath9k_dfs_init_debug(struct ath_softc *sc) -{ - debugfs_create_file("dfs_stats", S_IRUSR, - sc->debug.debugfs_phy, sc, &fops_dfs_stats); -} diff --git a/trunk/drivers/net/wireless/ath/ath9k/dfs_debug.h b/trunk/drivers/net/wireless/ath/ath9k/dfs_debug.h deleted file mode 100644 index 4911724cb445..000000000000 --- a/trunk/drivers/net/wireless/ath/ath9k/dfs_debug.h +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright (c) 2008-2011 Atheros Communications Inc. - * Copyright (c) 2011 Neratec Solutions AG - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - - -#ifndef ATH9K_DFS_DEBUG_H -#define ATH9K_DFS_DEBUG_H - -#include "hw.h" - -/** - * struct ath_dfs_stats - DFS Statistics - * - * @pulses_detected: No. of pulses detected so far - * @datalen_discards: No. of pulses discarded due to invalid datalen - * @rssi_discards: No. of pulses discarded due to invalid RSSI - * @bwinfo_discards: No. of pulses discarded due to invalid BW info - * @pri_phy_errors: No. of pulses reported for primary channel - * @ext_phy_errors: No. of pulses reported for extension channel - * @dc_phy_errors: No. of pulses reported for primary + extension channel - */ -struct ath_dfs_stats { - u32 pulses_detected; - u32 datalen_discards; - u32 rssi_discards; - u32 bwinfo_discards; - u32 pri_phy_errors; - u32 ext_phy_errors; - u32 dc_phy_errors; -}; - -#if defined(CONFIG_ATH9K_DFS_DEBUGFS) - -#define DFS_STAT_INC(sc, c) (sc->debug.stats.dfs_stats.c++) -void ath9k_dfs_init_debug(struct ath_softc *sc); - -#else - -#define DFS_STAT_INC(sc, c) do { } while (0) -static inline void ath9k_dfs_init_debug(struct ath_softc *sc) { } - -#endif /* CONFIG_ATH9K_DFS_DEBUGFS */ - -#endif /* ATH9K_DFS_DEBUG_H */ diff --git a/trunk/drivers/net/wireless/ath/ath9k/eeprom.c b/trunk/drivers/net/wireless/ath/ath9k/eeprom.c index c43523233319..e46f751ab508 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/eeprom.c +++ b/trunk/drivers/net/wireless/ath/ath9k/eeprom.c @@ -305,7 +305,8 @@ void ath9k_hw_update_regulatory_maxpower(struct ath_hw *ah) regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN; break; default: - ath_dbg(common, EEPROM, "Invalid chainmask configuration\n"); + ath_dbg(common, ATH_DBG_EEPROM, + "Invalid chainmask configuration\n"); break; } } diff --git a/trunk/drivers/net/wireless/ath/ath9k/eeprom.h b/trunk/drivers/net/wireless/ath/ath9k/eeprom.h index 5ff7ab965120..49abd34be741 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/eeprom.h +++ b/trunk/drivers/net/wireless/ath/ath9k/eeprom.h @@ -249,8 +249,7 @@ enum eeprom_param { EEP_ANT_DIV_CTL1, EEP_CHAIN_MASK_REDUCE, EEP_ANTENNA_GAIN_2G, - EEP_ANTENNA_GAIN_5G, - EEP_QUICK_DROP + EEP_ANTENNA_GAIN_5G }; enum ar5416_rates { diff --git a/trunk/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/trunk/drivers/net/wireless/ath/ath9k/eeprom_4k.c index 4322ac80c203..9a7520f987f0 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/eeprom_4k.c +++ b/trunk/drivers/net/wireless/ath/ath9k/eeprom_4k.c @@ -38,7 +38,7 @@ static bool __ath9k_hw_4k_fill_eeprom(struct ath_hw *ah) for (addr = 0; addr < SIZE_EEPROM_4K; addr++) { if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) { - ath_dbg(common, EEPROM, + ath_dbg(common, ATH_DBG_EEPROM, "Unable to read eeprom region\n"); return false; } @@ -62,7 +62,8 @@ static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah) struct ath_common *common = ath9k_hw_common(ah); if (!ath9k_hw_use_flash(ah)) { - ath_dbg(common, EEPROM, "Reading from EEPROM, not flash\n"); + ath_dbg(common, ATH_DBG_EEPROM, + "Reading from EEPROM, not flash\n"); } if (common->bus_ops->ath_bus_type == ATH_USB) @@ -203,7 +204,8 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah) return false; } - ath_dbg(common, EEPROM, "Read Magic = 0x%04X\n", magic); + ath_dbg(common, ATH_DBG_EEPROM, + "Read Magic = 0x%04X\n", magic); if (magic != AR5416_EEPROM_MAGIC) { magic2 = swab16(magic); @@ -225,7 +227,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah) } } - ath_dbg(common, EEPROM, "need_swap = %s\n", + ath_dbg(common, ATH_DBG_EEPROM, "need_swap = %s.\n", need_swap ? "True" : "False"); if (need_swap) @@ -247,7 +249,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah) u32 integer; u16 word; - ath_dbg(common, EEPROM, + ath_dbg(common, ATH_DBG_EEPROM, "EEPROM Endianness is not native.. Changing\n"); word = swab16(eep->baseEepHeader.length); @@ -433,11 +435,11 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah, reg32 = get_unaligned_le32(&pdadcValues[4 * j]); REG_WRITE(ah, regOffset, reg32); - ath_dbg(common, EEPROM, + ath_dbg(common, ATH_DBG_EEPROM, "PDADC (%d,%4x): %4.4x %8.8x\n", i, regChainOffset, regOffset, reg32); - ath_dbg(common, EEPROM, + ath_dbg(common, ATH_DBG_EEPROM, "PDADC: Chain %d | " "PDADC %3d Value %3d | " "PDADC %3d Value %3d | " @@ -471,7 +473,7 @@ static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah, int i; u16 twiceMinEdgePower; - u16 twiceMaxEdgePower; + u16 twiceMaxEdgePower = MAX_RATE_POWER; u16 scaledPower = 0, minCtlPower; u16 numCtlModes; const u16 *pCtlMode; @@ -540,7 +542,9 @@ static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah, else freq = centers.ctl_center; - twiceMaxEdgePower = MAX_RATE_POWER; + if (ah->eep_ops->get_eeprom_ver(ah) == 14 && + ah->eep_ops->get_eeprom_rev(ah) <= 2) + twiceMaxEdgePower = MAX_RATE_POWER; for (i = 0; (i < AR5416_EEP4K_NUM_CTLS) && pEepData->ctlIndex[i]; i++) { @@ -1077,7 +1081,8 @@ static u16 ath9k_hw_4k_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz) u16 spur_val = AR_NO_SPUR; - ath_dbg(common, ANI, "Getting spur idx:%d is2Ghz:%d val:%x\n", + ath_dbg(common, ATH_DBG_ANI, + "Getting spur idx:%d is2Ghz:%d val:%x\n", i, is2GHz, ah->config.spurchans[i][is2GHz]); switch (ah->config.spurmode) { @@ -1085,8 +1090,8 @@ static u16 ath9k_hw_4k_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz) break; case SPUR_ENABLE_IOCTL: spur_val = ah->config.spurchans[i][is2GHz]; - ath_dbg(common, ANI, "Getting spur val from new loc. %d\n", - spur_val); + ath_dbg(common, ATH_DBG_ANI, + "Getting spur val from new loc. %d\n", spur_val); break; case SPUR_ENABLE_EEPROM: spur_val = EEP_MAP4K_SPURCHAN; diff --git a/trunk/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/trunk/drivers/net/wireless/ath/ath9k/eeprom_9287.c index f272236d8053..4f5c50a87ce3 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/eeprom_9287.c +++ b/trunk/drivers/net/wireless/ath/ath9k/eeprom_9287.c @@ -41,7 +41,7 @@ static bool __ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah) for (addr = 0; addr < SIZE_EEPROM_AR9287; addr++) { if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) { - ath_dbg(common, EEPROM, + ath_dbg(common, ATH_DBG_EEPROM, "Unable to read eeprom region\n"); return false; } @@ -66,7 +66,8 @@ static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah) struct ath_common *common = ath9k_hw_common(ah); if (!ath9k_hw_use_flash(ah)) { - ath_dbg(common, EEPROM, "Reading from EEPROM, not flash\n"); + ath_dbg(common, ATH_DBG_EEPROM, + "Reading from EEPROM, not flash\n"); } if (common->bus_ops->ath_bus_type == ATH_USB) @@ -196,7 +197,8 @@ static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah) return false; } - ath_dbg(common, EEPROM, "Read Magic = 0x%04X\n", magic); + ath_dbg(common, ATH_DBG_EEPROM, + "Read Magic = 0x%04X\n", magic); if (magic != AR5416_EEPROM_MAGIC) { magic2 = swab16(magic); @@ -218,7 +220,7 @@ static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah) } } - ath_dbg(common, EEPROM, "need_swap = %s\n", + ath_dbg(common, ATH_DBG_EEPROM, "need_swap = %s.\n", need_swap ? "True" : "False"); if (need_swap) @@ -567,7 +569,7 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah, #define REDUCE_SCALED_POWER_BY_TWO_CHAIN 6 #define REDUCE_SCALED_POWER_BY_THREE_CHAIN 10 - u16 twiceMaxEdgePower; + u16 twiceMaxEdgePower = MAX_RATE_POWER; int i; struct cal_ctl_data_ar9287 *rep; struct cal_target_power_leg targetPowerOfdm = {0, {0, 0, 0, 0} }, @@ -667,7 +669,6 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah, else freq = centers.ctl_center; - twiceMaxEdgePower = MAX_RATE_POWER; /* Walk through the CTL indices stored in EEPROM */ for (i = 0; (i < AR9287_NUM_CTLS) && pEepData->ctlIndex[i]; i++) { struct cal_ctl_edges *pRdEdgesPower; @@ -1039,7 +1040,8 @@ static u16 ath9k_hw_ar9287_get_spur_channel(struct ath_hw *ah, struct ath_common *common = ath9k_hw_common(ah); u16 spur_val = AR_NO_SPUR; - ath_dbg(common, ANI, "Getting spur idx:%d is2Ghz:%d val:%x\n", + ath_dbg(common, ATH_DBG_ANI, + "Getting spur idx:%d is2Ghz:%d val:%x\n", i, is2GHz, ah->config.spurchans[i][is2GHz]); switch (ah->config.spurmode) { @@ -1047,8 +1049,8 @@ static u16 ath9k_hw_ar9287_get_spur_channel(struct ath_hw *ah, break; case SPUR_ENABLE_IOCTL: spur_val = ah->config.spurchans[i][is2GHz]; - ath_dbg(common, ANI, "Getting spur val from new loc. %d\n", - spur_val); + ath_dbg(common, ATH_DBG_ANI, + "Getting spur val from new loc. %d\n", spur_val); break; case SPUR_ENABLE_EEPROM: spur_val = EEP_MAP9287_SPURCHAN; diff --git a/trunk/drivers/net/wireless/ath/ath9k/eeprom_def.c b/trunk/drivers/net/wireless/ath/ath9k/eeprom_def.c index 619b95d764ff..81e629671679 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/eeprom_def.c +++ b/trunk/drivers/net/wireless/ath/ath9k/eeprom_def.c @@ -121,7 +121,8 @@ static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah) struct ath_common *common = ath9k_hw_common(ah); if (!ath9k_hw_use_flash(ah)) { - ath_dbg(common, EEPROM, "Reading from EEPROM, not flash\n"); + ath_dbg(common, ATH_DBG_EEPROM, + "Reading from EEPROM, not flash\n"); } if (common->bus_ops->ath_bus_type == ATH_USB) @@ -278,7 +279,8 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah) } if (!ath9k_hw_use_flash(ah)) { - ath_dbg(common, EEPROM, "Read Magic = 0x%04X\n", magic); + ath_dbg(common, ATH_DBG_EEPROM, + "Read Magic = 0x%04X\n", magic); if (magic != AR5416_EEPROM_MAGIC) { magic2 = swab16(magic); @@ -301,7 +303,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah) } } - ath_dbg(common, EEPROM, "need_swap = %s\n", + ath_dbg(common, ATH_DBG_EEPROM, "need_swap = %s.\n", need_swap ? "True" : "False"); if (need_swap) @@ -323,7 +325,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah) u32 integer, j; u16 word; - ath_dbg(common, EEPROM, + ath_dbg(common, ATH_DBG_EEPROM, "EEPROM Endianness is not native.. Changing.\n"); word = swab16(eep->baseEepHeader.length); @@ -383,7 +385,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah) if ((ah->hw_version.devid == AR9280_DEVID_PCI) && ((eep->baseEepHeader.version & 0xff) > 0x0a) && (eep->baseEepHeader.pwdclkind == 0)) - ah->need_an_top2_fixup = true; + ah->need_an_top2_fixup = 1; if ((common->bus_ops->ath_bus_type == ATH_USB) && (AR_SREV_9280(ah))) @@ -963,12 +965,15 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah, reg32 = get_unaligned_le32(&pdadcValues[4 * j]); REG_WRITE(ah, regOffset, reg32); - ath_dbg(common, EEPROM, + ath_dbg(common, ATH_DBG_EEPROM, "PDADC (%d,%4x): %4.4x %8.8x\n", i, regChainOffset, regOffset, reg32); - ath_dbg(common, EEPROM, - "PDADC: Chain %d | PDADC %3d Value %3d | PDADC %3d Value %3d | PDADC %3d Value %3d | PDADC %3d Value %3d |\n", + ath_dbg(common, ATH_DBG_EEPROM, + "PDADC: Chain %d | PDADC %3d " + "Value %3d | PDADC %3d Value %3d | " + "PDADC %3d Value %3d | PDADC %3d " + "Value %3d |\n", i, 4 * j, pdadcValues[4 * j], 4 * j + 1, pdadcValues[4 * j + 1], 4 * j + 2, pdadcValues[4 * j + 2], @@ -995,7 +1000,7 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah, #define REDUCE_SCALED_POWER_BY_THREE_CHAIN 9 /* 10*log10(3)*2 */ struct ar5416_eeprom_def *pEepData = &ah->eeprom.def; - u16 twiceMaxEdgePower; + u16 twiceMaxEdgePower = MAX_RATE_POWER; int i; struct cal_ctl_data *rep; struct cal_target_power_leg targetPowerOfdm, targetPowerCck = { @@ -1116,7 +1121,9 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah, else freq = centers.ctl_center; - twiceMaxEdgePower = MAX_RATE_POWER; + if (ah->eep_ops->get_eeprom_ver(ah) == 14 && + ah->eep_ops->get_eeprom_rev(ah) <= 2) + twiceMaxEdgePower = MAX_RATE_POWER; for (i = 0; (i < AR5416_NUM_CTLS) && pEepData->ctlIndex[i]; i++) { if ((((cfgCtl & ~CTL_MODE_M) | @@ -1273,7 +1280,7 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah, regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN; break; default: - ath_dbg(ath9k_hw_common(ah), EEPROM, + ath_dbg(ath9k_hw_common(ah), ATH_DBG_EEPROM, "Invalid chainmask configuration\n"); break; } @@ -1391,7 +1398,8 @@ static u16 ath9k_hw_def_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz) u16 spur_val = AR_NO_SPUR; - ath_dbg(common, ANI, "Getting spur idx:%d is2Ghz:%d val:%x\n", + ath_dbg(common, ATH_DBG_ANI, + "Getting spur idx:%d is2Ghz:%d val:%x\n", i, is2GHz, ah->config.spurchans[i][is2GHz]); switch (ah->config.spurmode) { @@ -1399,8 +1407,8 @@ static u16 ath9k_hw_def_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz) break; case SPUR_ENABLE_IOCTL: spur_val = ah->config.spurchans[i][is2GHz]; - ath_dbg(common, ANI, "Getting spur val from new loc. %d\n", - spur_val); + ath_dbg(common, ATH_DBG_ANI, + "Getting spur val from new loc. %d\n", spur_val); break; case SPUR_ENABLE_EEPROM: spur_val = EEP_DEF_SPURCHAN; diff --git a/trunk/drivers/net/wireless/ath/ath9k/gpio.c b/trunk/drivers/net/wireless/ath/ath9k/gpio.c index 597c84e31adb..655576c8fdab 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/gpio.c +++ b/trunk/drivers/net/wireless/ath/ath9k/gpio.c @@ -130,12 +130,12 @@ static void ath_detect_bt_priority(struct ath_softc *sc) sc->sc_flags &= ~(SC_OP_BT_PRIORITY_DETECTED | SC_OP_BT_SCAN); /* Detect if colocated bt started scanning */ if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) { - ath_dbg(ath9k_hw_common(sc->sc_ah), BTCOEX, + ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX, "BT scan detected\n"); sc->sc_flags |= (SC_OP_BT_SCAN | SC_OP_BT_PRIORITY_DETECTED); } else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) { - ath_dbg(ath9k_hw_common(sc->sc_ah), BTCOEX, + ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX, "BT priority traffic detected\n"); sc->sc_flags |= SC_OP_BT_PRIORITY_DETECTED; } @@ -189,8 +189,8 @@ static void ath_btcoex_period_timer(unsigned long data) bool is_btscan; ath9k_ps_wakeup(sc); - if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI)) - ath_detect_bt_priority(sc); + ath_detect_bt_priority(sc); + is_btscan = sc->sc_flags & SC_OP_BT_SCAN; spin_lock_bh(&btcoex->btcoex_lock); @@ -198,7 +198,6 @@ static void ath_btcoex_period_timer(unsigned long data) ath9k_hw_btcoex_bt_stomp(ah, is_btscan ? ATH_BTCOEX_STOMP_ALL : btcoex->bt_stomp_type); - ath9k_hw_btcoex_enable(ah); spin_unlock_bh(&btcoex->btcoex_lock); if (btcoex->btcoex_period != btcoex->btcoex_no_stomp) { @@ -213,9 +212,8 @@ static void ath_btcoex_period_timer(unsigned long data) } ath9k_ps_restore(sc); - timer_period = btcoex->btcoex_period / 1000; mod_timer(&btcoex->period_timer, jiffies + - msecs_to_jiffies(timer_period)); + msecs_to_jiffies(ATH_BTCOEX_DEF_BT_PERIOD)); } /* @@ -230,7 +228,8 @@ static void ath_btcoex_no_stomp_timer(void *arg) struct ath_common *common = ath9k_hw_common(ah); bool is_btscan = sc->sc_flags & SC_OP_BT_SCAN; - ath_dbg(common, BTCOEX, "no stomp timer running\n"); + ath_dbg(common, ATH_DBG_BTCOEX, + "no stomp timer running\n"); ath9k_ps_wakeup(sc); spin_lock_bh(&btcoex->btcoex_lock); @@ -240,7 +239,6 @@ static void ath_btcoex_no_stomp_timer(void *arg) else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL) ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW); - ath9k_hw_btcoex_enable(ah); spin_unlock_bh(&btcoex->btcoex_lock); ath9k_ps_restore(sc); } @@ -249,9 +247,6 @@ int ath_init_btcoex_timer(struct ath_softc *sc) { struct ath_btcoex *btcoex = &sc->btcoex; - if (ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_NONE) - return 0; - btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD * 1000; btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) * btcoex->btcoex_period / 100; @@ -282,10 +277,8 @@ void ath9k_btcoex_timer_resume(struct ath_softc *sc) struct ath_btcoex *btcoex = &sc->btcoex; struct ath_hw *ah = sc->sc_ah; - ath_dbg(ath9k_hw_common(ah), BTCOEX, "Starting btcoex timers\n"); - - if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE) - return; + ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX, + "Starting btcoex timers\n"); /* make sure duty cycle timer is also stopped when resuming */ if (btcoex->hw_timer_enabled) @@ -307,9 +300,6 @@ void ath9k_btcoex_timer_pause(struct ath_softc *sc) struct ath_btcoex *btcoex = &sc->btcoex; struct ath_hw *ah = sc->sc_ah; - if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE) - return; - del_timer_sync(&btcoex->period_timer); if (btcoex->hw_timer_enabled) diff --git a/trunk/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/trunk/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c index 2eadffb7971c..57fe22b24247 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c +++ b/trunk/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c @@ -167,9 +167,9 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv, /* TSF out of range threshold fixed at 1 second */ bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD; - ath_dbg(common, CONFIG, "intval: %u tsf: %llu tsftu: %u\n", + ath_dbg(common, ATH_DBG_CONFIG, "intval: %u tsf: %llu tsftu: %u\n", intval, tsf, tsftu); - ath_dbg(common, CONFIG, + ath_dbg(common, ATH_DBG_CONFIG, "bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n", bs.bs_bmissthreshold, bs.bs_sleepduration, bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext); @@ -224,8 +224,9 @@ static void ath9k_htc_beacon_config_ap(struct ath9k_htc_priv *priv, if (priv->op_flags & OP_ENABLE_BEACON) imask |= ATH9K_INT_SWBA; - ath_dbg(common, CONFIG, - "AP Beacon config, intval: %d, nexttbtt: %u, resp_time: %d imask: 0x%x\n", + ath_dbg(common, ATH_DBG_CONFIG, + "AP Beacon config, intval: %d, nexttbtt: %u, resp_time: %d " + "imask: 0x%x\n", bss_conf->beacon_interval, nexttbtt, priv->ah->config.sw_beacon_response_time, imask); @@ -272,8 +273,9 @@ static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv, if (priv->op_flags & OP_ENABLE_BEACON) imask |= ATH9K_INT_SWBA; - ath_dbg(common, CONFIG, - "IBSS Beacon config, intval: %d, nexttbtt: %u, resp_time: %d, imask: 0x%x\n", + ath_dbg(common, ATH_DBG_CONFIG, + "IBSS Beacon config, intval: %d, nexttbtt: %u, " + "resp_time: %d, imask: 0x%x\n", bss_conf->beacon_interval, nexttbtt, priv->ah->config.sw_beacon_response_time, imask); @@ -321,7 +323,7 @@ static void ath9k_htc_send_buffered(struct ath9k_htc_priv *priv, tx_slot = ath9k_htc_tx_get_slot(priv); if (tx_slot < 0) { - ath_dbg(common, XMIT, "No free CAB slot\n"); + ath_dbg(common, ATH_DBG_XMIT, "No free CAB slot\n"); dev_kfree_skb_any(skb); goto next; } @@ -331,7 +333,8 @@ static void ath9k_htc_send_buffered(struct ath9k_htc_priv *priv, ath9k_htc_tx_clear_slot(priv, tx_slot); dev_kfree_skb_any(skb); - ath_dbg(common, XMIT, "Failed to send CAB frame\n"); + ath_dbg(common, ATH_DBG_XMIT, + "Failed to send CAB frame\n"); } else { spin_lock_bh(&priv->tx.tx_lock); priv->tx.queued_cnt++; @@ -406,7 +409,7 @@ static void ath9k_htc_send_beacon(struct ath9k_htc_priv *priv, ret = htc_send(priv->htc, beacon); if (ret != 0) { if (ret == -ENOMEM) { - ath_dbg(common, BSTUCK, + ath_dbg(common, ATH_DBG_BSTUCK, "Failed to send beacon, no free TX buffer\n"); } dev_kfree_skb_any(beacon); @@ -431,7 +434,7 @@ static int ath9k_htc_choose_bslot(struct ath9k_htc_priv *priv, slot = ((tsftu % intval) * ATH9K_HTC_MAX_BCN_VIF) / intval; slot = ATH9K_HTC_MAX_BCN_VIF - slot - 1; - ath_dbg(common, BEACON, + ath_dbg(common, ATH_DBG_BEACON, "Choose slot: %d, tsf: %llu, tsftu: %u, intval: %u\n", slot, tsf, tsftu, intval); @@ -447,7 +450,8 @@ void ath9k_htc_swba(struct ath9k_htc_priv *priv, if (swba->beacon_pending != 0) { priv->cur_beacon_conf.bmiss_cnt++; if (priv->cur_beacon_conf.bmiss_cnt > BSTUCK_THRESHOLD) { - ath_dbg(common, BSTUCK, "Beacon stuck, HW reset\n"); + ath_dbg(common, ATH_DBG_BSTUCK, + "Beacon stuck, HW reset\n"); ieee80211_queue_work(priv->hw, &priv->fatal_work); } @@ -455,7 +459,7 @@ void ath9k_htc_swba(struct ath9k_htc_priv *priv, } if (priv->cur_beacon_conf.bmiss_cnt) { - ath_dbg(common, BSTUCK, + ath_dbg(common, ATH_DBG_BSTUCK, "Resuming beacon xmit after %u misses\n", priv->cur_beacon_conf.bmiss_cnt); priv->cur_beacon_conf.bmiss_cnt = 0; @@ -491,8 +495,8 @@ void ath9k_htc_assign_bslot(struct ath9k_htc_priv *priv, priv->cur_beacon_conf.bslot[avp->bslot] = vif; spin_unlock_bh(&priv->beacon_lock); - ath_dbg(common, CONFIG, "Added interface at beacon slot: %d\n", - avp->bslot); + ath_dbg(common, ATH_DBG_CONFIG, + "Added interface at beacon slot: %d\n", avp->bslot); } void ath9k_htc_remove_bslot(struct ath9k_htc_priv *priv, @@ -505,8 +509,8 @@ void ath9k_htc_remove_bslot(struct ath9k_htc_priv *priv, priv->cur_beacon_conf.bslot[avp->bslot] = NULL; spin_unlock_bh(&priv->beacon_lock); - ath_dbg(common, CONFIG, "Removed interface at beacon slot: %d\n", - avp->bslot); + ath_dbg(common, ATH_DBG_CONFIG, + "Removed interface at beacon slot: %d\n", avp->bslot); } /* @@ -532,7 +536,8 @@ void ath9k_htc_set_tsfadjust(struct ath9k_htc_priv *priv, tsfadjust = cur_conf->beacon_interval * avp->bslot / ATH9K_HTC_MAX_BCN_VIF; avp->tsfadjust = cpu_to_le64(TU_TO_USEC(tsfadjust)); - ath_dbg(common, CONFIG, "tsfadjust is: %llu for bslot: %d\n", + ath_dbg(common, ATH_DBG_CONFIG, + "tsfadjust is: %llu for bslot: %d\n", (unsigned long long)tsfadjust, avp->bslot); } @@ -563,7 +568,7 @@ static bool ath9k_htc_check_beacon_config(struct ath9k_htc_priv *priv, (priv->num_ap_vif > 1) && (vif->type == NL80211_IFTYPE_AP) && (cur_conf->beacon_interval != bss_conf->beacon_int)) { - ath_dbg(common, CONFIG, + ath_dbg(common, ATH_DBG_CONFIG, "Changing beacon interval of multiple AP interfaces !\n"); return false; } @@ -574,7 +579,7 @@ static bool ath9k_htc_check_beacon_config(struct ath9k_htc_priv *priv, */ if (priv->num_ap_vif && (vif->type != NL80211_IFTYPE_AP)) { - ath_dbg(common, CONFIG, + ath_dbg(common, ATH_DBG_CONFIG, "HW in AP mode, cannot set STA beacon parameters\n"); return false; } @@ -592,7 +597,7 @@ static bool ath9k_htc_check_beacon_config(struct ath9k_htc_priv *priv, &beacon_configured); if (beacon_configured) { - ath_dbg(common, CONFIG, + ath_dbg(common, ATH_DBG_CONFIG, "Beacon already configured for a station interface\n"); return false; } @@ -632,7 +637,8 @@ void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv, ath9k_htc_beacon_config_ap(priv, cur_conf); break; default: - ath_dbg(common, CONFIG, "Unsupported beaconing mode\n"); + ath_dbg(common, ATH_DBG_CONFIG, + "Unsupported beaconing mode\n"); return; } } @@ -653,7 +659,8 @@ void ath9k_htc_beacon_reconfig(struct ath9k_htc_priv *priv) ath9k_htc_beacon_config_ap(priv, cur_conf); break; default: - ath_dbg(common, CONFIG, "Unsupported beaconing mode\n"); + ath_dbg(common, ATH_DBG_CONFIG, + "Unsupported beaconing mode\n"); return; } } diff --git a/trunk/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/trunk/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c index 6506e1fd5036..e3a02eb8e0cc 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c +++ b/trunk/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c @@ -36,12 +36,12 @@ static void ath_detect_bt_priority(struct ath9k_htc_priv *priv) priv->op_flags &= ~(OP_BT_PRIORITY_DETECTED | OP_BT_SCAN); /* Detect if colocated bt started scanning */ if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) { - ath_dbg(ath9k_hw_common(ah), BTCOEX, + ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX, "BT scan detected\n"); priv->op_flags |= (OP_BT_SCAN | OP_BT_PRIORITY_DETECTED); } else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) { - ath_dbg(ath9k_hw_common(ah), BTCOEX, + ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX, "BT priority traffic detected\n"); priv->op_flags |= OP_BT_PRIORITY_DETECTED; } @@ -80,7 +80,6 @@ static void ath_btcoex_period_work(struct work_struct *work) ath9k_hw_btcoex_bt_stomp(priv->ah, is_btscan ? ATH_BTCOEX_STOMP_ALL : btcoex->bt_stomp_type); - ath9k_hw_btcoex_enable(priv->ah); timer_period = is_btscan ? btcoex->btscan_no_stomp : btcoex->btcoex_no_stomp; ieee80211_queue_delayed_work(priv->hw, &priv->duty_cycle_work, @@ -102,22 +101,19 @@ static void ath_btcoex_duty_cycle_work(struct work_struct *work) struct ath_common *common = ath9k_hw_common(ah); bool is_btscan = priv->op_flags & OP_BT_SCAN; - ath_dbg(common, BTCOEX, "time slice work for bt and wlan\n"); + ath_dbg(common, ATH_DBG_BTCOEX, + "time slice work for bt and wlan\n"); if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW || is_btscan) ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE); else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL) ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW); - ath9k_hw_btcoex_enable(priv->ah); } void ath_htc_init_btcoex_work(struct ath9k_htc_priv *priv) { struct ath_btcoex *btcoex = &priv->btcoex; - if (ath9k_hw_get_btcoex_scheme(priv->ah) == ATH_BTCOEX_CFG_NONE) - return; - btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD; btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) * btcoex->btcoex_period / 100; @@ -136,10 +132,7 @@ void ath_htc_resume_btcoex_work(struct ath9k_htc_priv *priv) struct ath_btcoex *btcoex = &priv->btcoex; struct ath_hw *ah = priv->ah; - if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE) - return; - - ath_dbg(ath9k_hw_common(ah), BTCOEX, "Starting btcoex work\n"); + ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX, "Starting btcoex work\n"); btcoex->bt_priority_cnt = 0; btcoex->bt_priority_time = jiffies; @@ -153,9 +146,6 @@ void ath_htc_resume_btcoex_work(struct ath9k_htc_priv *priv) */ void ath_htc_cancel_btcoex_work(struct ath9k_htc_priv *priv) { - if (ath9k_hw_get_btcoex_scheme(priv->ah) == ATH_BTCOEX_CFG_NONE) - return; - cancel_delayed_work_sync(&priv->coex_period_work); cancel_delayed_work_sync(&priv->duty_cycle_work); } diff --git a/trunk/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/trunk/drivers/net/wireless/ath/ath9k/htc_drv_init.c index 9be10a2da1c2..966661c9e586 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/trunk/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -299,7 +299,8 @@ static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset) (u8 *) &val, sizeof(val), 100); if (unlikely(r)) { - ath_dbg(common, WMI, "REGISTER READ FAILED: (0x%04x, %d)\n", + ath_dbg(common, ATH_DBG_WMI, + "REGISTER READ FAILED: (0x%04x, %d)\n", reg_offset, r); return -EIO; } @@ -326,7 +327,7 @@ static void ath9k_multi_regread(void *hw_priv, u32 *addr, (u8 *)tmpval, sizeof(u32) * count, 100); if (unlikely(ret)) { - ath_dbg(common, WMI, + ath_dbg(common, ATH_DBG_WMI, "Multiple REGISTER READ FAILED (count: %d)\n", count); } @@ -351,7 +352,8 @@ static void ath9k_regwrite_single(void *hw_priv, u32 val, u32 reg_offset) (u8 *) &val, sizeof(val), 100); if (unlikely(r)) { - ath_dbg(common, WMI, "REGISTER WRITE FAILED:(0x%04x, %d)\n", + ath_dbg(common, ATH_DBG_WMI, + "REGISTER WRITE FAILED:(0x%04x, %d)\n", reg_offset, r); } } @@ -382,7 +384,7 @@ static void ath9k_regwrite_buffer(void *hw_priv, u32 val, u32 reg_offset) (u8 *) &rsp_status, sizeof(rsp_status), 100); if (unlikely(r)) { - ath_dbg(common, WMI, + ath_dbg(common, ATH_DBG_WMI, "REGISTER WRITE FAILED, multi len: %d\n", priv->wmi->multi_write_idx); } @@ -432,7 +434,7 @@ static void ath9k_regwrite_flush(void *hw_priv) (u8 *) &rsp_status, sizeof(rsp_status), 100); if (unlikely(r)) { - ath_dbg(common, WMI, + ath_dbg(common, ATH_DBG_WMI, "REGISTER WRITE FAILED, multi len: %d\n", priv->wmi->multi_write_idx); } @@ -510,7 +512,8 @@ static void setup_ht_cap(struct ath9k_htc_priv *priv, tx_streams = ath9k_cmn_count_streams(priv->ah->txchainmask, 2); rx_streams = ath9k_cmn_count_streams(priv->ah->rxchainmask, 2); - ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n", + ath_dbg(common, ATH_DBG_CONFIG, + "TX streams %d, RX streams: %d\n", tx_streams, rx_streams); if (tx_streams != rx_streams) { @@ -607,7 +610,7 @@ static void ath9k_init_btcoex(struct ath9k_htc_priv *priv) { int qnum; - switch (ath9k_hw_get_btcoex_scheme(priv->ah)) { + switch (priv->ah->btcoex_hw.scheme) { case ATH_BTCOEX_CFG_NONE: break; case ATH_BTCOEX_CFG_3WIRE: @@ -701,8 +704,7 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv, if (product && strncmp(product, ATH_HTC_BTCOEX_PRODUCT_ID, 5) == 0) { ah->btcoex_hw.scheme = ATH_BTCOEX_CFG_3WIRE; - if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) - ath9k_init_btcoex(priv); + ath9k_init_btcoex(priv); } return 0; @@ -874,8 +876,9 @@ static int ath9k_init_device(struct ath9k_htc_priv *priv, goto err_world; } - ath_dbg(common, CONFIG, - "WMI:%d, BCN:%d, CAB:%d, UAPSD:%d, MGMT:%d, BE:%d, BK:%d, VI:%d, VO:%d\n", + ath_dbg(common, ATH_DBG_CONFIG, + "WMI:%d, BCN:%d, CAB:%d, UAPSD:%d, MGMT:%d, " + "BE:%d, BK:%d, VI:%d, VO:%d\n", priv->wmi_cmd_ep, priv->beacon_ep, priv->cab_ep, diff --git a/trunk/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/trunk/drivers/net/wireless/ath/ath9k/htc_drv_main.c index ef4c60661290..0b9a0e8a4958 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/trunk/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -266,7 +266,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv, ath9k_wmi_event_drain(priv); - ath_dbg(common, CONFIG, + ath_dbg(common, ATH_DBG_CONFIG, "(%u MHz) -> (%u MHz), HT: %d, HT40: %d fastcc: %d\n", priv->ah->curchan->channel, channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf), @@ -415,7 +415,7 @@ static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv) priv->vif_sta_pos[priv->mon_vif_idx] = sta_idx; priv->ah->is_monitoring = true; - ath_dbg(common, CONFIG, + ath_dbg(common, ATH_DBG_CONFIG, "Attached a monitor interface at idx: %d, sta idx: %d\n", priv->mon_vif_idx, sta_idx); @@ -427,7 +427,7 @@ static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv) */ __ath9k_htc_remove_monitor_interface(priv); err_vif: - ath_dbg(common, FATAL, "Unable to attach a monitor interface\n"); + ath_dbg(common, ATH_DBG_FATAL, "Unable to attach a monitor interface\n"); return ret; } @@ -452,7 +452,7 @@ static int ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv) priv->nstations--; priv->ah->is_monitoring = false; - ath_dbg(common, CONFIG, + ath_dbg(common, ATH_DBG_CONFIG, "Removed a monitor interface at idx: %d, sta idx: %d\n", priv->mon_vif_idx, sta_idx); @@ -512,11 +512,11 @@ static int ath9k_htc_add_station(struct ath9k_htc_priv *priv, } if (sta) { - ath_dbg(common, CONFIG, + ath_dbg(common, ATH_DBG_CONFIG, "Added a station entry for: %pM (idx: %d)\n", sta->addr, tsta.sta_index); } else { - ath_dbg(common, CONFIG, + ath_dbg(common, ATH_DBG_CONFIG, "Added a station entry for VIF %d (idx: %d)\n", avp->index, tsta.sta_index); } @@ -556,11 +556,11 @@ static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv, } if (sta) { - ath_dbg(common, CONFIG, + ath_dbg(common, ATH_DBG_CONFIG, "Removed a station entry for: %pM (idx: %d)\n", sta->addr, sta_idx); } else { - ath_dbg(common, CONFIG, + ath_dbg(common, ATH_DBG_CONFIG, "Removed a station entry for VIF %d (idx: %d)\n", avp->index, sta_idx); } @@ -665,7 +665,7 @@ static void ath9k_htc_init_rate(struct ath9k_htc_priv *priv, ath9k_htc_setup_rate(priv, sta, &trate); ret = ath9k_htc_send_rate_cmd(priv, &trate); if (!ret) - ath_dbg(common, CONFIG, + ath_dbg(common, ATH_DBG_CONFIG, "Updated target sta: %pM, rate caps: 0x%X\n", sta->addr, be32_to_cpu(trate.capflags)); } @@ -692,7 +692,7 @@ static void ath9k_htc_update_rate(struct ath9k_htc_priv *priv, ret = ath9k_htc_send_rate_cmd(priv, &trate); if (!ret) - ath_dbg(common, CONFIG, + ath_dbg(common, ATH_DBG_CONFIG, "Updated target sta: %pM, rate caps: 0x%X\n", bss_conf->bssid, be32_to_cpu(trate.capflags)); } @@ -721,11 +721,11 @@ static int ath9k_htc_tx_aggr_oper(struct ath9k_htc_priv *priv, WMI_CMD_BUF(WMI_TX_AGGR_ENABLE_CMDID, &aggr); if (ret) - ath_dbg(common, CONFIG, + ath_dbg(common, ATH_DBG_CONFIG, "Unable to %s TX aggregation for (%pM, %d)\n", (aggr.aggr_enable) ? "start" : "stop", sta->addr, tid); else - ath_dbg(common, CONFIG, + ath_dbg(common, ATH_DBG_CONFIG, "%s TX aggregation for (%pM, %d)\n", (aggr.aggr_enable) ? "Starting" : "Stopping", sta->addr, tid); @@ -784,7 +784,7 @@ void ath9k_htc_ani_work(struct work_struct *work) /* Long calibration runs independently of short calibration. */ if ((timestamp - common->ani.longcal_timer) >= ATH_LONG_CALINTERVAL) { longcal = true; - ath_dbg(common, ANI, "longcal @%lu\n", jiffies); + ath_dbg(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies); common->ani.longcal_timer = timestamp; } @@ -793,7 +793,8 @@ void ath9k_htc_ani_work(struct work_struct *work) if ((timestamp - common->ani.shortcal_timer) >= short_cal_interval) { shortcal = true; - ath_dbg(common, ANI, "shortcal @%lu\n", jiffies); + ath_dbg(common, ATH_DBG_ANI, + "shortcal @%lu\n", jiffies); common->ani.shortcal_timer = timestamp; common->ani.resetcal_timer = timestamp; } @@ -807,8 +808,7 @@ void ath9k_htc_ani_work(struct work_struct *work) } /* Verify whether we must check ANI */ - if (ah->config.enable_ani && - (timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) { + if ((timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) { aniflag = true; common->ani.checkani_timer = timestamp; } @@ -838,7 +838,7 @@ void ath9k_htc_ani_work(struct work_struct *work) * short calibration and long calibration. */ cal_interval = ATH_LONG_CALINTERVAL; - if (ah->config.enable_ani) + if (priv->ah->config.enable_ani) cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL); if (!common->ani.caldone) cal_interval = min(cal_interval, (u32)short_cal_interval); @@ -865,7 +865,7 @@ static void ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb) padsize = padpos & 3; if (padsize && skb->len > padpos) { if (skb_headroom(skb) < padsize) { - ath_dbg(common, XMIT, "No room for padding\n"); + ath_dbg(common, ATH_DBG_XMIT, "No room for padding\n"); goto fail_tx; } skb_push(skb, padsize); @@ -874,13 +874,13 @@ static void ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb) slot = ath9k_htc_tx_get_slot(priv); if (slot < 0) { - ath_dbg(common, XMIT, "No free TX slot\n"); + ath_dbg(common, ATH_DBG_XMIT, "No free TX slot\n"); goto fail_tx; } ret = ath9k_htc_tx_start(priv, skb, slot, false); if (ret != 0) { - ath_dbg(common, XMIT, "Tx failed\n"); + ath_dbg(common, ATH_DBG_XMIT, "Tx failed\n"); goto clear_slot; } @@ -908,7 +908,7 @@ static int ath9k_htc_start(struct ieee80211_hw *hw) mutex_lock(&priv->mutex); - ath_dbg(common, CONFIG, + ath_dbg(common, ATH_DBG_CONFIG, "Starting driver with initial channel: %d MHz\n", curchan->center_freq); @@ -942,7 +942,7 @@ static int ath9k_htc_start(struct ieee80211_hw *hw) ret = ath9k_htc_update_cap_target(priv, 0); if (ret) - ath_dbg(common, CONFIG, + ath_dbg(common, ATH_DBG_CONFIG, "Failed to update capability in target\n"); priv->op_flags &= ~OP_INVALID; @@ -957,7 +957,7 @@ static int ath9k_htc_start(struct ieee80211_hw *hw) mod_timer(&priv->tx.cleanup_timer, jiffies + msecs_to_jiffies(ATH9K_HTC_TX_CLEANUP_INTERVAL)); - if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE) { + if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) { ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT, AR_STOMP_LOW_WLAN_WGHT); ath9k_hw_btcoex_enable(ah); @@ -979,7 +979,7 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw) mutex_lock(&priv->mutex); if (priv->op_flags & OP_INVALID) { - ath_dbg(common, ANY, "Device not present\n"); + ath_dbg(common, ATH_DBG_ANY, "Device not present\n"); mutex_unlock(&priv->mutex); return; } @@ -1009,8 +1009,7 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw) mutex_lock(&priv->mutex); - if (ah->btcoex_hw.enabled && - ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) { + if (ah->btcoex_hw.enabled) { ath9k_hw_btcoex_disable(ah); if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) ath_htc_cancel_btcoex_work(priv); @@ -1027,7 +1026,7 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw) priv->op_flags |= OP_INVALID; - ath_dbg(common, CONFIG, "Driver halt\n"); + ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n"); mutex_unlock(&priv->mutex); } @@ -1120,8 +1119,8 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw, ath9k_htc_start_ani(priv); } - ath_dbg(common, CONFIG, "Attach a VIF of type: %d at idx: %d\n", - vif->type, avp->index); + ath_dbg(common, ATH_DBG_CONFIG, + "Attach a VIF of type: %d at idx: %d\n", vif->type, avp->index); out: ath9k_htc_ps_restore(priv); @@ -1177,7 +1176,7 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw, ath9k_htc_stop_ani(priv); } - ath_dbg(common, CONFIG, "Detach Interface at idx: %d\n", avp->index); + ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface at idx: %d\n", avp->index); ath9k_htc_ps_restore(priv); mutex_unlock(&priv->mutex); @@ -1202,7 +1201,8 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed) mutex_unlock(&priv->htc_pm_lock); if (enable_radio) { - ath_dbg(common, CONFIG, "not-idle: enabling radio\n"); + ath_dbg(common, ATH_DBG_CONFIG, + "not-idle: enabling radio\n"); ath9k_htc_setpower(priv, ATH9K_PM_AWAKE); ath9k_htc_radio_enable(hw); } @@ -1224,7 +1224,7 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed) struct ieee80211_channel *curchan = hw->conf.channel; int pos = curchan->hw_value; - ath_dbg(common, CONFIG, "Set channel: %d MHz\n", + ath_dbg(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n", curchan->center_freq); ath9k_cmn_update_ichannel(&priv->ah->channels[pos], @@ -1264,7 +1264,8 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed) } mutex_unlock(&priv->htc_pm_lock); - ath_dbg(common, CONFIG, "idle: disabling radio\n"); + ath_dbg(common, ATH_DBG_CONFIG, + "idle: disabling radio\n"); ath9k_htc_radio_disable(hw); } @@ -1296,7 +1297,7 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw, *total_flags &= SUPPORTED_FILTERS; if (priv->op_flags & OP_INVALID) { - ath_dbg(ath9k_hw_common(priv->ah), ANY, + ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_ANY, "Unable to configure filter on invalid state\n"); mutex_unlock(&priv->mutex); return; @@ -1307,8 +1308,8 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw, rfilt = ath9k_htc_calcrxfilter(priv); ath9k_hw_setrxfilter(priv->ah, rfilt); - ath_dbg(ath9k_hw_common(priv->ah), CONFIG, "Set HW RX filter: 0x%x\n", - rfilt); + ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_CONFIG, + "Set HW RX filter: 0x%x\n", rfilt); ath9k_htc_ps_restore(priv); mutex_unlock(&priv->mutex); @@ -1375,7 +1376,7 @@ static int ath9k_htc_conf_tx(struct ieee80211_hw *hw, qnum = get_hw_qnum(queue, priv->hwq_map); - ath_dbg(common, CONFIG, + ath_dbg(common, ATH_DBG_CONFIG, "Configure tx [queue/hwq] [%d/%d], aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n", queue, qnum, params->aifs, params->cw_min, params->cw_max, params->txop); @@ -1410,7 +1411,7 @@ static int ath9k_htc_set_key(struct ieee80211_hw *hw, return -ENOSPC; mutex_lock(&priv->mutex); - ath_dbg(common, CONFIG, "Set HW Key\n"); + ath_dbg(common, ATH_DBG_CONFIG, "Set HW Key\n"); ath9k_htc_ps_wakeup(priv); switch (cmd) { @@ -1446,7 +1447,8 @@ static void ath9k_htc_set_bssid(struct ath9k_htc_priv *priv) struct ath_common *common = ath9k_hw_common(priv->ah); ath9k_hw_write_associd(priv->ah); - ath_dbg(common, CONFIG, "BSSID: %pM aid: 0x%x\n", + ath_dbg(common, ATH_DBG_CONFIG, + "BSSID: %pM aid: 0x%x\n", common->curbssid, common->curaid); } @@ -1484,7 +1486,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw, ath9k_htc_ps_wakeup(priv); if (changed & BSS_CHANGED_ASSOC) { - ath_dbg(common, CONFIG, "BSS Changed ASSOC %d\n", + ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n", bss_conf->assoc); bss_conf->assoc ? @@ -1509,8 +1511,8 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw, } if ((changed & BSS_CHANGED_BEACON_ENABLED) && bss_conf->enable_beacon) { - ath_dbg(common, CONFIG, "Beacon enabled for BSS: %pM\n", - bss_conf->bssid); + ath_dbg(common, ATH_DBG_CONFIG, + "Beacon enabled for BSS: %pM\n", bss_conf->bssid); ath9k_htc_set_tsfadjust(priv, vif); priv->op_flags |= OP_ENABLE_BEACON; ath9k_htc_beacon_config(priv, vif); @@ -1522,7 +1524,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw, * AP/IBSS interfaces. */ if ((priv->num_ap_vif <= 1) || priv->num_ibss_vif) { - ath_dbg(common, CONFIG, + ath_dbg(common, ATH_DBG_CONFIG, "Beacon disabled for BSS: %pM\n", bss_conf->bssid); priv->op_flags &= ~OP_ENABLE_BEACON; @@ -1540,7 +1542,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw, (vif->type == NL80211_IFTYPE_AP)) { priv->op_flags |= OP_TSF_RESET; } - ath_dbg(common, CONFIG, + ath_dbg(common, ATH_DBG_CONFIG, "Beacon interval changed for BSS: %pM\n", bss_conf->bssid); ath9k_htc_beacon_config(priv, vif); @@ -1730,7 +1732,8 @@ static int ath9k_htc_set_bitrate_mask(struct ieee80211_hw *hw, goto out; } - ath_dbg(common, CONFIG, "Set bitrate masks: 0x%x, 0x%x\n", + ath_dbg(common, ATH_DBG_CONFIG, + "Set bitrate masks: 0x%x, 0x%x\n", mask->control[IEEE80211_BAND_2GHZ].legacy, mask->control[IEEE80211_BAND_5GHZ].legacy); out: diff --git a/trunk/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/trunk/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index 3e40a6461512..2d81c700e201 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/trunk/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c @@ -355,7 +355,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, vif_idx = avp->index; } else { if (!priv->ah->is_monitoring) { - ath_dbg(ath9k_hw_common(priv->ah), XMIT, + ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_XMIT, "VIF is null, but no monitor interface !\n"); return -EINVAL; } @@ -620,7 +620,8 @@ static struct sk_buff* ath9k_htc_tx_get_packet(struct ath9k_htc_priv *priv, } spin_unlock_irqrestore(&epid_queue->lock, flags); - ath_dbg(common, XMIT, "No matching packet for cookie: %d, epid: %d\n", + ath_dbg(common, ATH_DBG_XMIT, + "No matching packet for cookie: %d, epid: %d\n", txs->cookie, epid); return NULL; @@ -704,7 +705,8 @@ static inline bool check_packet(struct ath9k_htc_priv *priv, struct sk_buff *skb if (time_after(jiffies, tx_ctl->timestamp + msecs_to_jiffies(ATH9K_HTC_TX_TIMEOUT_INTERVAL))) { - ath_dbg(common, XMIT, "Dropping a packet due to TX timeout\n"); + ath_dbg(common, ATH_DBG_XMIT, + "Dropping a packet due to TX timeout\n"); return true; } @@ -751,7 +753,7 @@ void ath9k_htc_tx_cleanup_timer(unsigned long data) skb = ath9k_htc_tx_get_packet(priv, &event->txs); if (skb) { - ath_dbg(common, XMIT, + ath_dbg(common, ATH_DBG_XMIT, "Found packet for cookie: %d, epid: %d\n", event->txs.cookie, MS(event->txs.ts_rate, ATH9K_HTC_TXSTAT_EPID)); @@ -1165,7 +1167,8 @@ void ath9k_htc_rxep(void *drv_priv, struct sk_buff *skb, spin_unlock(&priv->rx.rxbuflock); if (rxbuf == NULL) { - ath_dbg(common, ANY, "No free RX buffer\n"); + ath_dbg(common, ATH_DBG_ANY, + "No free RX buffer\n"); goto err; } diff --git a/trunk/drivers/net/wireless/ath/ath9k/hw-ops.h b/trunk/drivers/net/wireless/ath/ath9k/hw-ops.h index c4ad0b06bdbc..e74c233757a2 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/hw-ops.h +++ b/trunk/drivers/net/wireless/ath/ath9k/hw-ops.h @@ -212,13 +212,4 @@ static inline int ath9k_hw_fast_chan_change(struct ath_hw *ah, return ath9k_hw_private_ops(ah)->fast_chan_change(ah, chan, ini_reloaded); } - -static inline void ath9k_hw_set_radar_params(struct ath_hw *ah) -{ - if (!ath9k_hw_private_ops(ah)->set_radar_params) - return; - - ath9k_hw_private_ops(ah)->set_radar_params(ah, &ah->radar_conf); -} - #endif /* ATH9K_HW_OPS_H */ diff --git a/trunk/drivers/net/wireless/ath/ath9k/hw.c b/trunk/drivers/net/wireless/ath/ath9k/hw.c index ee7759575050..8873c6e6fb96 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/hw.c +++ b/trunk/drivers/net/wireless/ath/ath9k/hw.c @@ -133,7 +133,7 @@ bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout) udelay(AH_TIME_QUANTUM); } - ath_dbg(ath9k_hw_common(ah), ANY, + ath_dbg(ath9k_hw_common(ah), ATH_DBG_ANY, "timeout (%d us) on reg 0x%x: 0x%08x & 0x%08x != 0x%08x\n", timeout, reg, REG_READ(ah, reg), mask, val); @@ -491,7 +491,8 @@ static int ath9k_hw_post_init(struct ath_hw *ah) if (ecode != 0) return ecode; - ath_dbg(ath9k_hw_common(ah), CONFIG, "Eeprom VER: %d, REV: %d\n", + ath_dbg(ath9k_hw_common(ah), ATH_DBG_CONFIG, + "Eeprom VER: %d, REV: %d\n", ah->eep_ops->get_eeprom_ver(ah), ah->eep_ops->get_eeprom_rev(ah)); @@ -503,7 +504,7 @@ static int ath9k_hw_post_init(struct ath_hw *ah) return ecode; } - if (ah->config.enable_ani) { + if (!AR_SREV_9100(ah) && !AR_SREV_9340(ah)) { ath9k_hw_ani_setup(ah); ath9k_hw_ani_init(ah); } @@ -566,7 +567,7 @@ static int __ath9k_hw_init(struct ath_hw *ah) } } - ath_dbg(common, RESET, "serialize_regmode is %d\n", + ath_dbg(common, ATH_DBG_RESET, "serialize_regmode is %d\n", ah->config.serialize_regmode); if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) @@ -609,10 +610,6 @@ static int __ath9k_hw_init(struct ath_hw *ah) if (!AR_SREV_9300_20_OR_LATER(ah)) ah->ani_function &= ~ATH9K_ANI_MRC_CCK; - /* disable ANI for 9340 */ - if (AR_SREV_9340(ah)) - ah->config.enable_ani = false; - ath9k_hw_init_mode_regs(ah); if (!ah->is_pciexpress) @@ -957,8 +954,8 @@ static void ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us) static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu) { if (tu > 0xFFFF) { - ath_dbg(ath9k_hw_common(ah), XMIT, "bad global tx timeout %u\n", - tu); + ath_dbg(ath9k_hw_common(ah), ATH_DBG_XMIT, + "bad global tx timeout %u\n", tu); ah->globaltxtimeout = (u32) -1; return false; } else { @@ -979,7 +976,7 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah) int rx_lat = 0, tx_lat = 0, eifs = 0; u32 reg; - ath_dbg(ath9k_hw_common(ah), RESET, "ah->misc_mode 0x%x\n", + ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET, "ah->misc_mode 0x%x\n", ah->misc_mode); if (!chan) @@ -1274,7 +1271,7 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type) (npend || type == ATH9K_RESET_COLD)) { int reset_err = 0; - ath_dbg(ath9k_hw_common(ah), RESET, + ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET, "reset MAC via external reset\n"); reset_err = ah->external_reset(); @@ -1297,7 +1294,8 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type) REG_WRITE(ah, AR_RTC_RC, 0); if (!ath9k_hw_wait(ah, AR_RTC_RC, AR_RTC_RC_M, 0, AH_WAIT_TIMEOUT)) { - ath_dbg(ath9k_hw_common(ah), RESET, "RTC stuck in MAC reset\n"); + ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET, + "RTC stuck in MAC reset\n"); return false; } @@ -1342,7 +1340,8 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah) AR_RTC_STATUS_M, AR_RTC_STATUS_ON, AH_WAIT_TIMEOUT)) { - ath_dbg(ath9k_hw_common(ah), RESET, "RTC not waking up\n"); + ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET, + "RTC not waking up\n"); return false; } @@ -1351,7 +1350,6 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah) static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type) { - bool ret = false; if (AR_SREV_9300_20_OR_LATER(ah)) { REG_WRITE(ah, AR_WA, ah->WARegVal); @@ -1363,20 +1361,13 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type) switch (type) { case ATH9K_RESET_POWER_ON: - ret = ath9k_hw_set_reset_power_on(ah); - break; + return ath9k_hw_set_reset_power_on(ah); case ATH9K_RESET_WARM: case ATH9K_RESET_COLD: - ret = ath9k_hw_set_reset(ah, type); - break; + return ath9k_hw_set_reset(ah, type); default: - break; + return false; } - - if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) - REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2); - - return ret; } static bool ath9k_hw_chip_reset(struct ath_hw *ah, @@ -1415,7 +1406,7 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah, for (qnum = 0; qnum < AR_NUM_QCU; qnum++) { if (ath9k_hw_numtxpending(ah, qnum)) { - ath_dbg(common, QUEUE, + ath_dbg(common, ATH_DBG_QUEUE, "Transmit frames pending on queue %d\n", qnum); return false; } @@ -1515,7 +1506,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, struct ath9k_hw_cal_data *caldata, bool bChannelChange) { struct ath_common *common = ath9k_hw_common(ah); - struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci; u32 saveLedState; struct ath9k_channel *curchan = ah->curchan; u32 saveDefAntenna; @@ -1523,52 +1513,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, u64 tsf = 0; int i, r; bool allow_fbs = false; - bool mci = !!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI); - bool save_fullsleep = ah->chip_fullsleep; - - if (mci) { - - ar9003_mci_2g5g_changed(ah, IS_CHAN_2GHZ(chan)); - - if (mci_hw->bt_state == MCI_BT_CAL_START) { - u32 payload[4] = {0, 0, 0, 0}; - - ath_dbg(common, MCI, "MCI stop rx for BT CAL\n"); - - mci_hw->bt_state = MCI_BT_CAL; - - /* - * MCI FIX: disable mci interrupt here. This is to avoid - * SW_MSG_DONE or RX_MSG bits to trigger MCI_INT and - * lead to mci_intr reentry. - */ - - ar9003_mci_disable_interrupt(ah); - - ath_dbg(common, MCI, "send WLAN_CAL_GRANT\n"); - MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_GRANT); - ar9003_mci_send_message(ah, MCI_GPM, 0, payload, - 16, true, false); - - ath_dbg(common, MCI, "\nMCI BT is calibrating\n"); - - /* Wait BT calibration to be completed for 25ms */ - - if (ar9003_mci_wait_for_gpm(ah, MCI_GPM_BT_CAL_DONE, - 0, 25000)) - ath_dbg(common, MCI, - "MCI got BT_CAL_DONE\n"); - else - ath_dbg(common, MCI, - "MCI ### BT cal takes to long, force bt_state to be bt_awake\n"); - mci_hw->bt_state = MCI_BT_AWAKE; - /* MCI FIX: enable mci interrupt here */ - ar9003_mci_enable_interrupt(ah); - - return true; - } - } - if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) return -EIO; @@ -1606,29 +1550,12 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, if (ath9k_hw_channel_change(ah, chan)) { ath9k_hw_loadnf(ah, ah->curchan); ath9k_hw_start_nfcal(ah, true); - if (mci && mci_hw->ready) - ar9003_mci_2g5g_switch(ah, true); - if (AR_SREV_9271(ah)) ar9002_hw_load_ani_reg(ah, chan); return 0; } } - if (mci) { - ar9003_mci_disable_interrupt(ah); - - if (mci_hw->ready && !save_fullsleep) { - ar9003_mci_mute_bt(ah); - udelay(20); - REG_WRITE(ah, AR_BTCOEX_CTRL, 0); - } - - mci_hw->bt_state = MCI_BT_SLEEP; - mci_hw->ready = false; - } - - saveDefAntenna = REG_READ(ah, AR_DEF_ANTENNA); if (saveDefAntenna == 0) saveDefAntenna = 1; @@ -1684,9 +1611,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, if (r) return r; - if (mci) - ar9003_mci_reset(ah, false, IS_CHAN_2GHZ(chan), save_fullsleep); - /* * Some AR91xx SoC devices frequently fail to accept TSF writes * right after the chip reset. When that happens, write a new @@ -1804,54 +1728,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, ath9k_hw_loadnf(ah, chan); ath9k_hw_start_nfcal(ah, true); - if (mci && mci_hw->ready) { - - if (IS_CHAN_2GHZ(chan) && - (mci_hw->bt_state == MCI_BT_SLEEP)) { - - if (ar9003_mci_check_int(ah, - AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET) || - ar9003_mci_check_int(ah, - AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)) { - - /* - * BT is sleeping. Check if BT wakes up during - * WLAN calibration. If BT wakes up during - * WLAN calibration, need to go through all - * message exchanges again and recal. - */ - - ath_dbg(common, MCI, - "MCI BT wakes up during WLAN calibration\n"); - - REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, - AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET | - AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE); - ath_dbg(common, MCI, "MCI send REMOTE_RESET\n"); - ar9003_mci_remote_reset(ah, true); - ar9003_mci_send_sys_waking(ah, true); - udelay(1); - if (IS_CHAN_2GHZ(chan)) - ar9003_mci_send_lna_transfer(ah, true); - - mci_hw->bt_state = MCI_BT_AWAKE; - - ath_dbg(common, MCI, "MCI re-cal\n"); - - if (caldata) { - caldata->done_txiqcal_once = false; - caldata->done_txclcal_once = false; - caldata->rtt_hist.num_readings = 0; - } - - if (!ath9k_hw_init_cal(ah, chan)) - return -EIO; - - } - } - ar9003_mci_enable_interrupt(ah); - } - ENABLE_REGWRITE_BUFFER(ah); ath9k_hw_restore_chainmask(ah); @@ -1866,14 +1742,14 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, u32 mask; mask = REG_READ(ah, AR_CFG); if (mask & (AR_CFG_SWRB | AR_CFG_SWTB | AR_CFG_SWRG)) { - ath_dbg(common, RESET, "CFG Byte Swap Set 0x%x\n", - mask); + ath_dbg(common, ATH_DBG_RESET, + "CFG Byte Swap Set 0x%x\n", mask); } else { mask = INIT_CONFIG_STATUS | AR_CFG_SWRB | AR_CFG_SWTB; REG_WRITE(ah, AR_CFG, mask); - ath_dbg(common, RESET, "Setting CFG 0x%x\n", - REG_READ(ah, AR_CFG)); + ath_dbg(common, ATH_DBG_RESET, + "Setting CFG 0x%x\n", REG_READ(ah, AR_CFG)); } } else { if (common->bus_ops->ath_bus_type == ATH_USB) { @@ -1891,25 +1767,9 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, #endif } - if (ah->btcoex_hw.enabled && - ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) + if (ah->btcoex_hw.enabled) ath9k_hw_btcoex_enable(ah); - if (mci && mci_hw->ready) { - /* - * check BT state again to make - * sure it's not changed. - */ - - ar9003_mci_sync_bt_state(ah); - ar9003_mci_2g5g_switch(ah, true); - - if ((mci_hw->bt_state == MCI_BT_AWAKE) && - (mci_hw->query_bt == true)) { - mci_hw->need_flush_btinfo = true; - } - } - if (AR_SREV_9300_20_OR_LATER(ah)) { ar9003_hw_bb_watchdog_config(ah); @@ -2074,7 +1934,6 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip) bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode) { struct ath_common *common = ath9k_hw_common(ah); - struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; int status = true, setChip = true; static const char *modes[] = { "AWAKE", @@ -2086,41 +1945,18 @@ bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode) if (ah->power_mode == mode) return status; - ath_dbg(common, RESET, "%s -> %s\n", + ath_dbg(common, ATH_DBG_RESET, "%s -> %s\n", modes[ah->power_mode], modes[mode]); switch (mode) { case ATH9K_PM_AWAKE: status = ath9k_hw_set_power_awake(ah, setChip); - - if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) - REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2); - break; case ATH9K_PM_FULL_SLEEP: - - if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) { - if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) && - (mci->bt_state != MCI_BT_SLEEP) && - !mci->halted_bt_gpm) { - ath_dbg(common, MCI, - "MCI halt BT GPM (full_sleep)\n"); - ar9003_mci_send_coex_halt_bt_gpm(ah, - true, true); - } - - mci->ready = false; - REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2); - } - ath9k_set_power_sleep(ah, setChip); ah->chip_fullsleep = true; break; case ATH9K_PM_NETWORK_SLEEP: - - if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) - REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2); - ath9k_set_power_network_sleep(ah, setChip); break; default: @@ -2170,8 +2006,9 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period) AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN; break; default: - ath_dbg(ath9k_hw_common(ah), BEACON, - "%s: unsupported opmode: %d\n", __func__, ah->opmode); + ath_dbg(ath9k_hw_common(ah), ATH_DBG_BEACON, + "%s: unsupported opmode: %d\n", + __func__, ah->opmode); return; break; } @@ -2222,10 +2059,10 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah, else nextTbtt = bs->bs_nexttbtt; - ath_dbg(common, BEACON, "next DTIM %d\n", bs->bs_nextdtim); - ath_dbg(common, BEACON, "next beacon %d\n", nextTbtt); - ath_dbg(common, BEACON, "beacon period %d\n", beaconintval); - ath_dbg(common, BEACON, "DTIM period %d\n", dtimperiod); + ath_dbg(common, ATH_DBG_BEACON, "next DTIM %d\n", bs->bs_nextdtim); + ath_dbg(common, ATH_DBG_BEACON, "next beacon %d\n", nextTbtt); + ath_dbg(common, ATH_DBG_BEACON, "beacon period %d\n", beaconintval); + ath_dbg(common, ATH_DBG_BEACON, "DTIM period %d\n", dtimperiod); ENABLE_REGWRITE_BUFFER(ah); @@ -2272,30 +2109,6 @@ static u8 fixup_chainmask(u8 chip_chainmask, u8 eeprom_chainmask) return chip_chainmask; } -/** - * ath9k_hw_dfs_tested - checks if DFS has been tested with used chipset - * @ah: the atheros hardware data structure - * - * We enable DFS support upstream on chipsets which have passed a series - * of tests. The testing requirements are going to be documented. Desired - * test requirements are documented at: - * - * http://wireless.kernel.org/en/users/Drivers/ath9k/dfs - * - * Once a new chipset gets properly tested an individual commit can be used - * to document the testing for DFS for that chipset. - */ -static bool ath9k_hw_dfs_tested(struct ath_hw *ah) -{ - - switch (ah->hw_version.macVersion) { - /* AR9580 will likely be our first target to get testing on */ - case AR_SREV_VERSION_9580: - default: - return false; - } -} - int ath9k_hw_fill_cap_info(struct ath_hw *ah) { struct ath9k_hw_capabilities *pCap = &ah->caps; @@ -2317,8 +2130,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah) regulatory->current_rd += 5; else if (regulatory->current_rd == 0x41) regulatory->current_rd = 0x43; - ath_dbg(common, REGULATORY, "regdomain mapped to 0x%x\n", - regulatory->current_rd); + ath_dbg(common, ATH_DBG_REGULATORY, + "regdomain mapped to 0x%x\n", regulatory->current_rd); } eeval = ah->eep_ops->get_eeprom(ah, EEP_OP_MODE); @@ -2336,8 +2149,6 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah) if (AR_SREV_9485(ah) || AR_SREV_9285(ah) || AR_SREV_9330(ah)) chip_chainmask = 1; - else if (AR_SREV_9462(ah)) - chip_chainmask = 3; else if (!AR_SREV_9280_20_OR_LATER(ah)) chip_chainmask = 7; else if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9340(ah)) @@ -2394,10 +2205,12 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah) else pCap->num_gpio_pins = AR_NUM_GPIO; - if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah)) + if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah)) { + pCap->hw_caps |= ATH9K_HW_CAP_CST; pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX; - else + } else { pCap->rts_aggr_limit = (8 * 1024); + } #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) ah->rfsilent = ah->eep_ops->get_eeprom(ah, EEP_RF_SILENT); @@ -2421,9 +2234,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah) pCap->hw_caps |= ATH9K_HW_CAP_4KB_SPLITTRANS; if (common->btcoex_enabled) { - if (AR_SREV_9462(ah)) - btcoex_hw->scheme = ATH_BTCOEX_CFG_MCI; - else if (AR_SREV_9300_20_OR_LATER(ah)) { + if (AR_SREV_9300_20_OR_LATER(ah)) { btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE; btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9300; btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9300; @@ -2507,9 +2318,6 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah) pCap->pcie_lcr_offset = 0x80; } - if (ath9k_hw_dfs_tested(ah)) - pCap->hw_caps |= ATH9K_HW_CAP_DFS; - tx_chainmask = pCap->tx_chainmask; rx_chainmask = pCap->rx_chainmask; while (tx_chainmask || rx_chainmask) { @@ -2524,11 +2332,11 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah) if (AR_SREV_9300_20_OR_LATER(ah)) { ah->enabled_cals |= TX_IQ_CAL; - if (AR_SREV_9485_OR_LATER(ah)) + if (!AR_SREV_9330(ah)) ah->enabled_cals |= TX_IQ_ON_AGC_CAL; } if (AR_SREV_9462(ah)) - pCap->hw_caps |= ATH9K_HW_CAP_RTT | ATH9K_HW_CAP_MCI; + pCap->hw_caps |= ATH9K_HW_CAP_RTT; return 0; } @@ -2776,7 +2584,7 @@ void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test) struct ath9k_channel *chan = ah->curchan; struct ieee80211_channel *channel = chan->chan; - reg->power_limit = min_t(u32, limit, MAX_RATE_POWER); + reg->power_limit = min_t(int, limit, MAX_RATE_POWER); if (test) channel->max_power = MAX_RATE_POWER / 2; @@ -2843,7 +2651,7 @@ void ath9k_hw_reset_tsf(struct ath_hw *ah) { if (!ath9k_hw_wait(ah, AR_SLP32_MODE, AR_SLP32_TSF_WRITE_STATUS, 0, AH_TSF_WRITE_TIMEOUT)) - ath_dbg(ath9k_hw_common(ah), RESET, + ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET, "AR_SLP32_TSF_WRITE_STATUS limit exceeded\n"); REG_WRITE(ah, AR_RESET_TSF, AR_RESET_TSF_ONCE); @@ -2968,7 +2776,7 @@ void ath9k_hw_gen_timer_start(struct ath_hw *ah, timer_next = tsf + trig_timeout; - ath_dbg(ath9k_hw_common(ah), HWTIMER, + ath_dbg(ath9k_hw_common(ah), ATH_DBG_HWTIMER, "current tsf %x period %x timer_next %x\n", tsf, timer_period, timer_next); @@ -3057,8 +2865,8 @@ void ath_gen_timer_isr(struct ath_hw *ah) index = rightmost_index(timer_table, &thresh_mask); timer = timer_table->timers[index]; BUG_ON(!timer); - ath_dbg(common, HWTIMER, "TSF overflow for Gen timer %d\n", - index); + ath_dbg(common, ATH_DBG_HWTIMER, + "TSF overflow for Gen timer %d\n", index); timer->overflow(timer->arg); } @@ -3066,7 +2874,7 @@ void ath_gen_timer_isr(struct ath_hw *ah) index = rightmost_index(timer_table, &trigger_mask); timer = timer_table->timers[index]; BUG_ON(!timer); - ath_dbg(common, HWTIMER, + ath_dbg(common, ATH_DBG_HWTIMER, "Gen timer[%d] trigger\n", index); timer->trigger(timer->arg); } diff --git a/trunk/drivers/net/wireless/ath/ath9k/hw.h b/trunk/drivers/net/wireless/ath/ath9k/hw.h index 6a29004a71b0..f389b3c93cf3 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/hw.h +++ b/trunk/drivers/net/wireless/ath/ath9k/hw.h @@ -59,6 +59,9 @@ #define AT9285_COEX3WIRE_SA_SUBSYSID 0x30aa #define AT9285_COEX3WIRE_DA_SUBSYSID 0x30ab +#define AR9300_NUM_BT_WEIGHTS 4 +#define AR9300_NUM_WLAN_WEIGHTS 4 + #define ATH_AMPDU_LIMIT_MAX (64 * 1024 - 1) #define ATH_DEFAULT_NOISE_FLOOR -95 @@ -126,16 +129,6 @@ #define AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL 4 #define AR_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED 5 #define AR_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED 6 -#define AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_DATA 0x16 -#define AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_CLK 0x17 -#define AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA 0x18 -#define AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK 0x19 -#define AR_GPIO_OUTPUT_MUX_AS_WL_IN_TX 0x14 -#define AR_GPIO_OUTPUT_MUX_AS_WL_IN_RX 0x13 -#define AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX 9 -#define AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX 8 -#define AR_GPIO_OUTPUT_MUX_AS_RUCKUS_STROBE 0x1d -#define AR_GPIO_OUTPUT_MUX_AS_RUCKUS_DATA 0x1e #define AR_GPIOD_MASK 0x00001FFF #define AR_GPIO_BIT(_gpio) (1 << (_gpio)) @@ -196,25 +189,20 @@ enum ath_ini_subsys { enum ath9k_hw_caps { ATH9K_HW_CAP_HT = BIT(0), ATH9K_HW_CAP_RFSILENT = BIT(1), - ATH9K_HW_CAP_AUTOSLEEP = BIT(2), - ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(3), - ATH9K_HW_CAP_EDMA = BIT(4), - ATH9K_HW_CAP_RAC_SUPPORTED = BIT(5), - ATH9K_HW_CAP_LDPC = BIT(6), - ATH9K_HW_CAP_FASTCLOCK = BIT(7), - ATH9K_HW_CAP_SGI_20 = BIT(8), - ATH9K_HW_CAP_PAPRD = BIT(9), - ATH9K_HW_CAP_ANT_DIV_COMB = BIT(10), - ATH9K_HW_CAP_2GHZ = BIT(11), - ATH9K_HW_CAP_5GHZ = BIT(12), - ATH9K_HW_CAP_APM = BIT(13), - ATH9K_HW_CAP_RTT = BIT(14), -#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT - ATH9K_HW_CAP_MCI = BIT(15), -#else - ATH9K_HW_CAP_MCI = 0, -#endif - ATH9K_HW_CAP_DFS = BIT(16), + ATH9K_HW_CAP_CST = BIT(2), + ATH9K_HW_CAP_AUTOSLEEP = BIT(4), + ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(5), + ATH9K_HW_CAP_EDMA = BIT(6), + ATH9K_HW_CAP_RAC_SUPPORTED = BIT(7), + ATH9K_HW_CAP_LDPC = BIT(8), + ATH9K_HW_CAP_FASTCLOCK = BIT(9), + ATH9K_HW_CAP_SGI_20 = BIT(10), + ATH9K_HW_CAP_PAPRD = BIT(11), + ATH9K_HW_CAP_ANT_DIV_COMB = BIT(12), + ATH9K_HW_CAP_2GHZ = BIT(13), + ATH9K_HW_CAP_5GHZ = BIT(14), + ATH9K_HW_CAP_APM = BIT(15), + ATH9K_HW_CAP_RTT = BIT(16), }; struct ath9k_hw_capabilities { @@ -280,7 +268,6 @@ enum ath9k_int { ATH9K_INT_TX = 0x00000040, ATH9K_INT_TXDESC = 0x00000080, ATH9K_INT_TIM_TIMER = 0x00000100, - ATH9K_INT_MCI = 0x00000200, ATH9K_INT_BB_WATCHDOG = 0x00000400, ATH9K_INT_TXURN = 0x00000800, ATH9K_INT_MIB = 0x00001000, @@ -432,161 +419,6 @@ enum ath9k_rx_qtype { ATH9K_RX_QUEUE_MAX, }; -enum mci_message_header { /* length of payload */ - MCI_LNA_CTRL = 0x10, /* len = 0 */ - MCI_CONT_NACK = 0x20, /* len = 0 */ - MCI_CONT_INFO = 0x30, /* len = 4 */ - MCI_CONT_RST = 0x40, /* len = 0 */ - MCI_SCHD_INFO = 0x50, /* len = 16 */ - MCI_CPU_INT = 0x60, /* len = 4 */ - MCI_SYS_WAKING = 0x70, /* len = 0 */ - MCI_GPM = 0x80, /* len = 16 */ - MCI_LNA_INFO = 0x90, /* len = 1 */ - MCI_LNA_STATE = 0x94, - MCI_LNA_TAKE = 0x98, - MCI_LNA_TRANS = 0x9c, - MCI_SYS_SLEEPING = 0xa0, /* len = 0 */ - MCI_REQ_WAKE = 0xc0, /* len = 0 */ - MCI_DEBUG_16 = 0xfe, /* len = 2 */ - MCI_REMOTE_RESET = 0xff /* len = 16 */ -}; - -enum ath_mci_gpm_coex_profile_type { - MCI_GPM_COEX_PROFILE_UNKNOWN, - MCI_GPM_COEX_PROFILE_RFCOMM, - MCI_GPM_COEX_PROFILE_A2DP, - MCI_GPM_COEX_PROFILE_HID, - MCI_GPM_COEX_PROFILE_BNEP, - MCI_GPM_COEX_PROFILE_VOICE, - MCI_GPM_COEX_PROFILE_MAX -}; - -/* MCI GPM/Coex opcode/type definitions */ -enum { - MCI_GPM_COEX_W_GPM_PAYLOAD = 1, - MCI_GPM_COEX_B_GPM_TYPE = 4, - MCI_GPM_COEX_B_GPM_OPCODE = 5, - /* MCI_GPM_WLAN_CAL_REQ, MCI_GPM_WLAN_CAL_DONE */ - MCI_GPM_WLAN_CAL_W_SEQUENCE = 2, - - /* MCI_GPM_COEX_VERSION_QUERY */ - /* MCI_GPM_COEX_VERSION_RESPONSE */ - MCI_GPM_COEX_B_MAJOR_VERSION = 6, - MCI_GPM_COEX_B_MINOR_VERSION = 7, - /* MCI_GPM_COEX_STATUS_QUERY */ - MCI_GPM_COEX_B_BT_BITMAP = 6, - MCI_GPM_COEX_B_WLAN_BITMAP = 7, - /* MCI_GPM_COEX_HALT_BT_GPM */ - MCI_GPM_COEX_B_HALT_STATE = 6, - /* MCI_GPM_COEX_WLAN_CHANNELS */ - MCI_GPM_COEX_B_CHANNEL_MAP = 6, - /* MCI_GPM_COEX_BT_PROFILE_INFO */ - MCI_GPM_COEX_B_PROFILE_TYPE = 6, - MCI_GPM_COEX_B_PROFILE_LINKID = 7, - MCI_GPM_COEX_B_PROFILE_STATE = 8, - MCI_GPM_COEX_B_PROFILE_ROLE = 9, - MCI_GPM_COEX_B_PROFILE_RATE = 10, - MCI_GPM_COEX_B_PROFILE_VOTYPE = 11, - MCI_GPM_COEX_H_PROFILE_T = 12, - MCI_GPM_COEX_B_PROFILE_W = 14, - MCI_GPM_COEX_B_PROFILE_A = 15, - /* MCI_GPM_COEX_BT_STATUS_UPDATE */ - MCI_GPM_COEX_B_STATUS_TYPE = 6, - MCI_GPM_COEX_B_STATUS_LINKID = 7, - MCI_GPM_COEX_B_STATUS_STATE = 8, - /* MCI_GPM_COEX_BT_UPDATE_FLAGS */ - MCI_GPM_COEX_W_BT_FLAGS = 6, - MCI_GPM_COEX_B_BT_FLAGS_OP = 10 -}; - -enum mci_gpm_subtype { - MCI_GPM_BT_CAL_REQ = 0, - MCI_GPM_BT_CAL_GRANT = 1, - MCI_GPM_BT_CAL_DONE = 2, - MCI_GPM_WLAN_CAL_REQ = 3, - MCI_GPM_WLAN_CAL_GRANT = 4, - MCI_GPM_WLAN_CAL_DONE = 5, - MCI_GPM_COEX_AGENT = 0x0c, - MCI_GPM_RSVD_PATTERN = 0xfe, - MCI_GPM_RSVD_PATTERN32 = 0xfefefefe, - MCI_GPM_BT_DEBUG = 0xff -}; - -enum mci_bt_state { - MCI_BT_SLEEP, - MCI_BT_AWAKE, - MCI_BT_CAL_START, - MCI_BT_CAL -}; - -/* Type of state query */ -enum mci_state_type { - MCI_STATE_ENABLE, - MCI_STATE_INIT_GPM_OFFSET, - MCI_STATE_NEXT_GPM_OFFSET, - MCI_STATE_LAST_GPM_OFFSET, - MCI_STATE_BT, - MCI_STATE_SET_BT_SLEEP, - MCI_STATE_SET_BT_AWAKE, - MCI_STATE_SET_BT_CAL_START, - MCI_STATE_SET_BT_CAL, - MCI_STATE_LAST_SCHD_MSG_OFFSET, - MCI_STATE_REMOTE_SLEEP, - MCI_STATE_CONT_RSSI_POWER, - MCI_STATE_CONT_PRIORITY, - MCI_STATE_CONT_TXRX, - MCI_STATE_RESET_REQ_WAKE, - MCI_STATE_SEND_WLAN_COEX_VERSION, - MCI_STATE_SET_BT_COEX_VERSION, - MCI_STATE_SEND_WLAN_CHANNELS, - MCI_STATE_SEND_VERSION_QUERY, - MCI_STATE_SEND_STATUS_QUERY, - MCI_STATE_NEED_FLUSH_BT_INFO, - MCI_STATE_SET_CONCUR_TX_PRI, - MCI_STATE_RECOVER_RX, - MCI_STATE_NEED_FTP_STOMP, - MCI_STATE_NEED_TUNING, - MCI_STATE_DEBUG, - MCI_STATE_MAX -}; - -enum mci_gpm_coex_opcode { - MCI_GPM_COEX_VERSION_QUERY, - MCI_GPM_COEX_VERSION_RESPONSE, - MCI_GPM_COEX_STATUS_QUERY, - MCI_GPM_COEX_HALT_BT_GPM, - MCI_GPM_COEX_WLAN_CHANNELS, - MCI_GPM_COEX_BT_PROFILE_INFO, - MCI_GPM_COEX_BT_STATUS_UPDATE, - MCI_GPM_COEX_BT_UPDATE_FLAGS -}; - -#define MCI_GPM_NOMORE 0 -#define MCI_GPM_MORE 1 -#define MCI_GPM_INVALID 0xffffffff - -#define MCI_GPM_RECYCLE(_p_gpm) do { \ - *(((u32 *)_p_gpm) + MCI_GPM_COEX_W_GPM_PAYLOAD) = \ - MCI_GPM_RSVD_PATTERN32; \ -} while (0) - -#define MCI_GPM_TYPE(_p_gpm) \ - (*(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) & 0xff) - -#define MCI_GPM_OPCODE(_p_gpm) \ - (*(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_OPCODE) & 0xff) - -#define MCI_GPM_SET_CAL_TYPE(_p_gpm, _cal_type) do { \ - *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) = (_cal_type) & 0xff;\ -} while (0) - -#define MCI_GPM_SET_TYPE_OPCODE(_p_gpm, _type, _opcode) do { \ - *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) = (_type) & 0xff; \ - *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_OPCODE) = (_opcode) & 0xff;\ -} while (0) - -#define MCI_GPM_IS_CAL_TYPE(_type) ((_type) <= MCI_GPM_WLAN_CAL_DONE) - struct ath9k_beacon_state { u32 bs_nexttbtt; u32 bs_nextdtim; @@ -959,6 +791,8 @@ struct ath_hw { /* Bluetooth coexistance */ struct ath_btcoex_hw btcoex_hw; + u32 bt_coex_bt_weight[AR9300_NUM_BT_WEIGHTS]; + u32 bt_coex_wlan_weight[AR9300_NUM_WLAN_WEIGHTS]; u32 intr_txqs; u8 txchainmask; @@ -1016,7 +850,7 @@ struct ath_hw { u32 ts_paddr_start; u32 ts_paddr_end; u16 ts_tail; - u16 ts_size; + u8 ts_size; u32 bb_watchdog_last_status; u32 bb_watchdog_timeout_ms; /* in ms, 0 to disable */ @@ -1114,6 +948,7 @@ bool ath9k_hw_disable(struct ath_hw *ah); void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test); void ath9k_hw_setopmode(struct ath_hw *ah); void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1); +void ath9k_hw_setbssidmask(struct ath_hw *ah); void ath9k_hw_write_associd(struct ath_hw *ah); u32 ath9k_hw_gettsf32(struct ath_hw *ah); u64 ath9k_hw_gettsf64(struct ath_hw *ah); @@ -1206,42 +1041,6 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning); void ath9k_hw_proc_mib_event(struct ath_hw *ah); void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan); -bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag, - u32 *payload, u8 len, bool wait_done, - bool check_bt); -void ar9003_mci_mute_bt(struct ath_hw *ah); -u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data); -void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf, - u16 len, u32 sched_addr); -void ar9003_mci_cleanup(struct ath_hw *ah); -void ar9003_mci_send_coex_halt_bt_gpm(struct ath_hw *ah, bool halt, - bool wait_done); -u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type, - u8 gpm_opcode, int time_out); -void ar9003_mci_2g5g_changed(struct ath_hw *ah, bool is_2g); -void ar9003_mci_disable_interrupt(struct ath_hw *ah); -void ar9003_mci_enable_interrupt(struct ath_hw *ah); -void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done); -void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g, - bool is_full_sleep); -bool ar9003_mci_check_int(struct ath_hw *ah, u32 ints); -void ar9003_mci_remote_reset(struct ath_hw *ah, bool wait_done); -void ar9003_mci_send_sys_waking(struct ath_hw *ah, bool wait_done); -void ar9003_mci_send_lna_transfer(struct ath_hw *ah, bool wait_done); -void ar9003_mci_sync_bt_state(struct ath_hw *ah); -void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr, - u32 *rx_msg_intr); - -#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT -static inline enum ath_btcoex_scheme -ath9k_hw_get_btcoex_scheme(struct ath_hw *ah) -{ - return ah->btcoex_hw.scheme; -} -#else -#define ath9k_hw_get_btcoex_scheme(...) ATH_BTCOEX_CFG_NONE -#endif - #define ATH9K_CLOCK_RATE_CCK 22 #define ATH9K_CLOCK_RATE_5GHZ_OFDM 40 #define ATH9K_CLOCK_RATE_2GHZ_OFDM 44 diff --git a/trunk/drivers/net/wireless/ath/ath9k/init.c b/trunk/drivers/net/wireless/ath/ath9k/init.c index abf943557dee..d4c909f8e474 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/init.c +++ b/trunk/drivers/net/wireless/ath/ath9k/init.c @@ -258,8 +258,6 @@ static void setup_ht_cap(struct ath_softc *sc, if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) max_streams = 1; - else if (AR_SREV_9462(ah)) - max_streams = 2; else if (AR_SREV_9300_20_OR_LATER(ah)) max_streams = 3; else @@ -276,7 +274,8 @@ static void setup_ht_cap(struct ath_softc *sc, tx_streams = ath9k_cmn_count_streams(ah->txchainmask, max_streams); rx_streams = ath9k_cmn_count_streams(ah->rxchainmask, max_streams); - ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n", + ath_dbg(common, ATH_DBG_CONFIG, + "TX streams %d, RX streams: %d\n", tx_streams, rx_streams); if (tx_streams != rx_streams) { @@ -296,22 +295,9 @@ static int ath9k_reg_notifier(struct wiphy *wiphy, { struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct ath_softc *sc = hw->priv; - struct ath_hw *ah = sc->sc_ah; - struct ath_regulatory *reg = ath9k_hw_regulatory(ah); - int ret; - - ret = ath_reg_notifier_apply(wiphy, request, reg); - - /* Set tx power */ - if (ah->curchan) { - sc->config.txpowlimit = 2 * ah->curchan->chan->max_power; - ath9k_ps_wakeup(sc); - ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit, false); - sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit; - ath9k_ps_restore(sc); - } + struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah); - return ret; + return ath_reg_notifier_apply(wiphy, request, reg); } /* @@ -328,7 +314,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd, struct ath_buf *bf; int i, bsize, error, desc_len; - ath_dbg(common, CONFIG, "%s DMA: %u buffers %u desc/buf\n", + ath_dbg(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n", name, nbuf, ndesc); INIT_LIST_HEAD(head); @@ -374,7 +360,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd, goto fail; } ds = (u8 *) dd->dd_desc; - ath_dbg(common, CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n", + ath_dbg(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n", name, ds, (u32) dd->dd_desc_len, ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len); @@ -422,10 +408,9 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd, static int ath9k_init_btcoex(struct ath_softc *sc) { struct ath_txq *txq; - struct ath_hw *ah = sc->sc_ah; int r; - switch (ath9k_hw_get_btcoex_scheme(sc->sc_ah)) { + switch (sc->sc_ah->btcoex_hw.scheme) { case ATH_BTCOEX_CFG_NONE: break; case ATH_BTCOEX_CFG_2WIRE: @@ -440,37 +425,6 @@ static int ath9k_init_btcoex(struct ath_softc *sc) ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum); sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW; break; - case ATH_BTCOEX_CFG_MCI: - sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW; - sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE; - INIT_LIST_HEAD(&sc->btcoex.mci.info); - - r = ath_mci_setup(sc); - if (r) - return r; - - if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) { - ah->btcoex_hw.mci.ready = false; - ah->btcoex_hw.mci.bt_state = 0; - ah->btcoex_hw.mci.bt_ver_major = 3; - ah->btcoex_hw.mci.bt_ver_minor = 0; - ah->btcoex_hw.mci.bt_version_known = false; - ah->btcoex_hw.mci.update_2g5g = true; - ah->btcoex_hw.mci.is_2g = true; - ah->btcoex_hw.mci.wlan_channels_update = false; - ah->btcoex_hw.mci.wlan_channels[0] = 0x00000000; - ah->btcoex_hw.mci.wlan_channels[1] = 0xffffffff; - ah->btcoex_hw.mci.wlan_channels[2] = 0xffffffff; - ah->btcoex_hw.mci.wlan_channels[3] = 0x7fffffff; - ah->btcoex_hw.mci.query_bt = true; - ah->btcoex_hw.mci.unhalt_bt_gpm = true; - ah->btcoex_hw.mci.halted_bt_gpm = false; - ah->btcoex_hw.mci.need_flush_btinfo = false; - ah->btcoex_hw.mci.wlan_cal_seq = 0; - ah->btcoex_hw.mci.wlan_cal_done = 0; - ah->btcoex_hw.mci.config = 0x2201; - } - break; default: WARN_ON(1); break; @@ -741,7 +695,6 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; - hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; hw->queues = 4; hw->max_rates = 4; @@ -880,12 +833,9 @@ static void ath9k_deinit_softc(struct ath_softc *sc) kfree(sc->sbands[IEEE80211_BAND_5GHZ].channels); if ((sc->btcoex.no_stomp_timer) && - ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_3WIRE) + sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer); - if (ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_MCI) - ath_mci_cleanup(sc); - for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) if (ATH_TXQ_SETUP(sc, i)) ath_tx_cleanupq(sc, &sc->tx.txq[i]); diff --git a/trunk/drivers/net/wireless/ath/ath9k/mac.c b/trunk/drivers/net/wireless/ath/ath9k/mac.c index fd3f19c2e550..ecdb6fd29079 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/mac.c +++ b/trunk/drivers/net/wireless/ath/ath9k/mac.c @@ -21,7 +21,7 @@ static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah, struct ath9k_tx_queue_info *qi) { - ath_dbg(ath9k_hw_common(ah), INTERRUPT, + ath_dbg(ath9k_hw_common(ah), ATH_DBG_INTERRUPT, "tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n", ah->txok_interrupt_mask, ah->txerr_interrupt_mask, ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask, @@ -57,7 +57,8 @@ EXPORT_SYMBOL(ath9k_hw_puttxbuf); void ath9k_hw_txstart(struct ath_hw *ah, u32 q) { - ath_dbg(ath9k_hw_common(ah), QUEUE, "Enable TXE on queue: %u\n", q); + ath_dbg(ath9k_hw_common(ah), ATH_DBG_QUEUE, + "Enable TXE on queue: %u\n", q); REG_WRITE(ah, AR_Q_TXE, 1 << q); } EXPORT_SYMBOL(ath9k_hw_txstart); @@ -201,12 +202,12 @@ bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q, qi = &ah->txq[q]; if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { - ath_dbg(common, QUEUE, + ath_dbg(common, ATH_DBG_QUEUE, "Set TXQ properties, inactive queue: %u\n", q); return false; } - ath_dbg(common, QUEUE, "Set queue properties for: %u\n", q); + ath_dbg(common, ATH_DBG_QUEUE, "Set queue properties for: %u\n", q); qi->tqi_ver = qinfo->tqi_ver; qi->tqi_subtype = qinfo->tqi_subtype; @@ -265,7 +266,7 @@ bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q, qi = &ah->txq[q]; if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { - ath_dbg(common, QUEUE, + ath_dbg(common, ATH_DBG_QUEUE, "Get TXQ properties, inactive queue: %u\n", q); return false; } @@ -324,7 +325,7 @@ int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type, return -1; } - ath_dbg(common, QUEUE, "Setup TX queue: %u\n", q); + ath_dbg(common, ATH_DBG_QUEUE, "Setup TX queue: %u\n", q); qi = &ah->txq[q]; if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) { @@ -347,11 +348,12 @@ bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q) qi = &ah->txq[q]; if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { - ath_dbg(common, QUEUE, "Release TXQ, inactive queue: %u\n", q); + ath_dbg(common, ATH_DBG_QUEUE, + "Release TXQ, inactive queue: %u\n", q); return false; } - ath_dbg(common, QUEUE, "Release TX queue: %u\n", q); + ath_dbg(common, ATH_DBG_QUEUE, "Release TX queue: %u\n", q); qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE; ah->txok_interrupt_mask &= ~(1 << q); @@ -374,11 +376,12 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q) qi = &ah->txq[q]; if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { - ath_dbg(common, QUEUE, "Reset TXQ, inactive queue: %u\n", q); + ath_dbg(common, ATH_DBG_QUEUE, + "Reset TXQ, inactive queue: %u\n", q); return true; } - ath_dbg(common, QUEUE, "Reset TX queue: %u\n", q); + ath_dbg(common, ATH_DBG_QUEUE, "Reset TX queue: %u\n", q); if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) { if (chan && IS_CHAN_B(chan)) @@ -757,10 +760,7 @@ bool ath9k_hw_intrpend(struct ath_hw *ah) return true; host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE); - - if (((host_isr & AR_INTR_MAC_IRQ) || - (host_isr & AR_INTR_ASYNC_MASK_MCI)) && - (host_isr != AR_INTR_SPURIOUS)) + if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS)) return true; host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE); @@ -781,7 +781,7 @@ void ath9k_hw_disable_interrupts(struct ath_hw *ah) else atomic_dec(&ah->intr_ref_cnt); - ath_dbg(common, INTERRUPT, "disable IER\n"); + ath_dbg(common, ATH_DBG_INTERRUPT, "disable IER\n"); REG_WRITE(ah, AR_IER, AR_IER_DISABLE); (void) REG_READ(ah, AR_IER); if (!AR_SREV_9100(ah)) { @@ -798,13 +798,13 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); u32 sync_default = AR_INTR_SYNC_DEFAULT; - u32 async_mask; if (!(ah->imask & ATH9K_INT_GLOBAL)) return; if (!atomic_inc_and_test(&ah->intr_ref_cnt)) { - ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n", + ath_dbg(common, ATH_DBG_INTERRUPT, + "Do not enable IER ref count %d\n", atomic_read(&ah->intr_ref_cnt)); return; } @@ -812,21 +812,18 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah) if (AR_SREV_9340(ah)) sync_default &= ~AR_INTR_SYNC_HOST1_FATAL; - async_mask = AR_INTR_MAC_IRQ; - - if (ah->imask & ATH9K_INT_MCI) - async_mask |= AR_INTR_ASYNC_MASK_MCI; - - ath_dbg(common, INTERRUPT, "enable IER\n"); + ath_dbg(common, ATH_DBG_INTERRUPT, "enable IER\n"); REG_WRITE(ah, AR_IER, AR_IER_ENABLE); if (!AR_SREV_9100(ah)) { - REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, async_mask); - REG_WRITE(ah, AR_INTR_ASYNC_MASK, async_mask); + REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, + AR_INTR_MAC_IRQ); + REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ); + REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default); REG_WRITE(ah, AR_INTR_SYNC_MASK, sync_default); } - ath_dbg(common, INTERRUPT, "AR_IMR 0x%x IER 0x%x\n", + ath_dbg(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n", REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER)); } EXPORT_SYMBOL(ath9k_hw_enable_interrupts); @@ -841,7 +838,7 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah) if (!(ints & ATH9K_INT_GLOBAL)) ath9k_hw_disable_interrupts(ah); - ath_dbg(common, INTERRUPT, "New interrupt mask 0x%x\n", ints); + ath_dbg(common, ATH_DBG_INTERRUPT, "New interrupt mask 0x%x\n", ints); mask = ints & ATH9K_INT_COMMON; mask2 = 0; @@ -904,7 +901,7 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah) mask2 |= AR_IMR_S2_CST; } - ath_dbg(common, INTERRUPT, "new IMR 0x%x\n", mask); + ath_dbg(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask); REG_WRITE(ah, AR_IMR, mask); ah->imrs2_reg &= ~(AR_IMR_S2_TIM | AR_IMR_S2_DTIM | AR_IMR_S2_DTIMSYNC | AR_IMR_S2_CABEND | AR_IMR_S2_CABTO | diff --git a/trunk/drivers/net/wireless/ath/ath9k/main.c b/trunk/drivers/net/wireless/ath/ath9k/main.c index e267c92dbfb8..d2348a5a7809 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/main.c +++ b/trunk/drivers/net/wireless/ath/ath9k/main.c @@ -118,7 +118,7 @@ void ath9k_ps_restore(struct ath_softc *sc) if (--sc->ps_usecount != 0) goto unlock; - if (sc->ps_idle && (sc->ps_flags & PS_WAIT_FOR_TX_ACK)) + if (sc->ps_idle) mode = ATH9K_PM_FULL_SLEEP; else if (sc->ps_enabled && !(sc->ps_flags & (PS_WAIT_FOR_BEACON | @@ -332,14 +332,14 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan, hchan = ah->curchan; } - if (fastcc && (ah->chip_fullsleep || - !ath9k_hw_check_alive(ah))) + if (fastcc && !ath9k_hw_check_alive(ah)) fastcc = false; if (!ath_prepare_reset(sc, retry_tx, flush)) fastcc = false; - ath_dbg(common, CONFIG, "Reset to %u MHz, HT40: %d fastcc: %d\n", + ath_dbg(common, ATH_DBG_CONFIG, + "Reset to %u MHz, HT40: %d fastcc: %d\n", hchan->channel, !!(hchan->channelFlags & (CHANNEL_HT40MINUS | CHANNEL_HT40PLUS)), fastcc); @@ -428,7 +428,7 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int txctl.paprd = BIT(chain); if (ath_tx_start(hw, skb, &txctl) != 0) { - ath_dbg(common, CALIBRATE, "PAPRD TX failed\n"); + ath_dbg(common, ATH_DBG_CALIBRATE, "PAPRD TX failed\n"); dev_kfree_skb_any(skb); return false; } @@ -437,7 +437,7 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int msecs_to_jiffies(ATH_PAPRD_TIMEOUT)); if (!time_left) - ath_dbg(common, CALIBRATE, + ath_dbg(common, ATH_DBG_CALIBRATE, "Timeout waiting for paprd training on TX chain %d\n", chain); @@ -486,27 +486,27 @@ void ath_paprd_calibrate(struct work_struct *work) chain_ok = 0; - ath_dbg(common, CALIBRATE, - "Sending PAPRD frame for thermal measurement on chain %d\n", - chain); + ath_dbg(common, ATH_DBG_CALIBRATE, + "Sending PAPRD frame for thermal measurement " + "on chain %d\n", chain); if (!ath_paprd_send_frame(sc, skb, chain)) goto fail_paprd; ar9003_paprd_setup_gain_table(ah, chain); - ath_dbg(common, CALIBRATE, + ath_dbg(common, ATH_DBG_CALIBRATE, "Sending PAPRD training frame on chain %d\n", chain); if (!ath_paprd_send_frame(sc, skb, chain)) goto fail_paprd; if (!ar9003_paprd_is_done(ah)) { - ath_dbg(common, CALIBRATE, + ath_dbg(common, ATH_DBG_CALIBRATE, "PAPRD not yet done on chain %d\n", chain); break; } if (ar9003_paprd_create_curve(ah, caldata, chain)) { - ath_dbg(common, CALIBRATE, + ath_dbg(common, ATH_DBG_CALIBRATE, "PAPRD create curve failed on chain %d\n", chain); break; @@ -561,6 +561,7 @@ void ath_ani_calibrate(unsigned long data) /* Long calibration runs independently of short calibration. */ if ((timestamp - common->ani.longcal_timer) >= long_cal_interval) { longcal = true; + ath_dbg(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies); common->ani.longcal_timer = timestamp; } @@ -568,6 +569,8 @@ void ath_ani_calibrate(unsigned long data) if (!common->ani.caldone) { if ((timestamp - common->ani.shortcal_timer) >= short_cal_interval) { shortcal = true; + ath_dbg(common, ATH_DBG_ANI, + "shortcal @%lu\n", jiffies); common->ani.shortcal_timer = timestamp; common->ani.resetcal_timer = timestamp; } @@ -581,9 +584,8 @@ void ath_ani_calibrate(unsigned long data) } /* Verify whether we must check ANI */ - if (sc->sc_ah->config.enable_ani - && (timestamp - common->ani.checkani_timer) >= - ah->config.ani_poll_interval) { + if ((timestamp - common->ani.checkani_timer) >= + ah->config.ani_poll_interval) { aniflag = true; common->ani.checkani_timer = timestamp; } @@ -603,12 +605,6 @@ void ath_ani_calibrate(unsigned long data) ah->rxchainmask, longcal); } - ath_dbg(common, ANI, - "Calibration @%lu finished: %s %s %s, caldone: %s\n", - jiffies, - longcal ? "long" : "", shortcal ? "short" : "", - aniflag ? "ani" : "", common->ani.caldone ? "true" : "false"); - ath9k_ps_restore(sc); set_timer: @@ -634,8 +630,7 @@ void ath_ani_calibrate(unsigned long data) } } -static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta, - struct ieee80211_vif *vif) +static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta) { struct ath_node *an; an = (struct ath_node *)sta->drv_priv; @@ -644,9 +639,8 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta, spin_lock(&sc->nodes_lock); list_add(&an->list, &sc->nodes); spin_unlock(&sc->nodes_lock); -#endif an->sta = sta; - an->vif = vif; +#endif if (sc->sc_flags & SC_OP_TXAGGR) { ath_tx_node_init(sc, an); an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + @@ -715,7 +709,8 @@ void ath9k_tasklet(unsigned long data) * TSF sync does not look correct; remain awake to sync with * the next Beacon. */ - ath_dbg(common, PS, "TSFOOR - Sync with next Beacon\n"); + ath_dbg(common, ATH_DBG_PS, + "TSFOOR - Sync with next Beacon\n"); sc->ps_flags |= PS_WAIT_FOR_BEACON | PS_BEACON_SYNC; } @@ -741,13 +736,10 @@ void ath9k_tasklet(unsigned long data) ath_tx_tasklet(sc); } - if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE) + if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) if (status & ATH9K_INT_GENTIMER) ath_gen_timer_isr(sc->sc_ah); - if ((status & ATH9K_INT_MCI) && ATH9K_HW_CAP_MCI) - ath_mci_intr(sc); - out: /* re-enable hardware interrupt */ ath9k_hw_enable_interrupts(ah); @@ -770,8 +762,7 @@ irqreturn_t ath_isr(int irq, void *dev) ATH9K_INT_BMISS | \ ATH9K_INT_CST | \ ATH9K_INT_TSFOOR | \ - ATH9K_INT_GENTIMER | \ - ATH9K_INT_MCI) + ATH9K_INT_GENTIMER) struct ath_softc *sc = dev; struct ath_hw *ah = sc->sc_ah; @@ -889,6 +880,82 @@ irqreturn_t ath_isr(int irq, void *dev) #undef SCHED_INTR } +static void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw) +{ + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + struct ieee80211_channel *channel = hw->conf.channel; + int r; + + ath9k_ps_wakeup(sc); + spin_lock_bh(&sc->sc_pcu_lock); + atomic_set(&ah->intr_ref_cnt, -1); + + ath9k_hw_configpcipowersave(ah, false); + + if (!ah->curchan) + ah->curchan = ath9k_cmn_get_curchannel(sc->hw, ah); + + r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false); + if (r) { + ath_err(common, + "Unable to reset channel (%u MHz), reset status %d\n", + channel->center_freq, r); + } + + ath_complete_reset(sc, true); + + /* Enable LED */ + ath9k_hw_cfg_output(ah, ah->led_pin, + AR_GPIO_OUTPUT_MUX_AS_OUTPUT); + ath9k_hw_set_gpio(ah, ah->led_pin, 0); + + spin_unlock_bh(&sc->sc_pcu_lock); + + ath9k_ps_restore(sc); +} + +void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw) +{ + struct ath_hw *ah = sc->sc_ah; + struct ieee80211_channel *channel = hw->conf.channel; + int r; + + ath9k_ps_wakeup(sc); + + ath_cancel_work(sc); + + spin_lock_bh(&sc->sc_pcu_lock); + + /* + * Keep the LED on when the radio is disabled + * during idle unassociated state. + */ + if (!sc->ps_idle) { + ath9k_hw_set_gpio(ah, ah->led_pin, 1); + ath9k_hw_cfg_gpio_input(ah, ah->led_pin); + } + + ath_prepare_reset(sc, false, true); + + if (!ah->curchan) + ah->curchan = ath9k_cmn_get_curchannel(hw, ah); + + r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false); + if (r) { + ath_err(ath9k_hw_common(sc->sc_ah), + "Unable to reset channel (%u MHz), reset status %d\n", + channel->center_freq, r); + } + + ath9k_hw_phy_disable(ah); + + ath9k_hw_configpcipowersave(ah, true); + + spin_unlock_bh(&sc->sc_pcu_lock); + ath9k_ps_restore(sc); +} + static int ath_reset(struct ath_softc *sc, bool retry_tx) { int r; @@ -935,8 +1002,8 @@ void ath_hw_check(struct work_struct *work) busy = ath_update_survey_stats(sc); spin_unlock_irqrestore(&common->cc_lock, flags); - ath_dbg(common, RESET, "Possible baseband hang, busy=%d (try %d)\n", - busy, sc->hw_busy_count + 1); + ath_dbg(common, ATH_DBG_RESET, "Possible baseband hang, " + "busy=%d (try %d)\n", busy, sc->hw_busy_count + 1); if (busy >= 99) { if (++sc->hw_busy_count >= 3) { RESET_STAT_INC(sc, RESET_TYPE_BB_HANG); @@ -959,7 +1026,8 @@ static void ath_hw_pll_rx_hang_check(struct ath_softc *sc, u32 pll_sqsum) count++; if (count == 3) { /* Rx is hung for more than 500ms. Reset it */ - ath_dbg(common, RESET, "Possible RX hang, resetting\n"); + ath_dbg(common, ATH_DBG_RESET, + "Possible RX hang, resetting"); RESET_STAT_INC(sc, RESET_TYPE_PLL_HANG); ieee80211_queue_work(sc->hw, &sc->hw_reset_work); count = 0; @@ -999,7 +1067,7 @@ static int ath9k_start(struct ieee80211_hw *hw) struct ath9k_channel *init_channel; int r; - ath_dbg(common, CONFIG, + ath_dbg(common, ATH_DBG_CONFIG, "Starting driver with initial channel: %d MHz\n", curchan->center_freq); @@ -1023,9 +1091,6 @@ static int ath9k_start(struct ieee80211_hw *hw) * and then setup of the interrupt mask. */ spin_lock_bh(&sc->sc_pcu_lock); - - atomic_set(&ah->intr_ref_cnt, -1); - r = ath9k_hw_reset(ah, init_channel, ah->caldata, false); if (r) { ath_err(common, @@ -1052,9 +1117,6 @@ static int ath9k_start(struct ieee80211_hw *hw) if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) ah->imask |= ATH9K_INT_CST; - if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) - ah->imask |= ATH9K_INT_MCI; - sc->sc_flags &= ~SC_OP_INVALID; sc->sc_ah->is_monitoring = false; @@ -1067,28 +1129,15 @@ static int ath9k_start(struct ieee80211_hw *hw) goto mutex_unlock; } - if (ah->led_pin >= 0) { - ath9k_hw_cfg_output(ah, ah->led_pin, - AR_GPIO_OUTPUT_MUX_AS_OUTPUT); - ath9k_hw_set_gpio(ah, ah->led_pin, 0); - } - - /* - * Reset key cache to sane defaults (all entries cleared) instead of - * semi-random values after suspend/resume. - */ - ath9k_cmn_init_crypto(sc->sc_ah); - spin_unlock_bh(&sc->sc_pcu_lock); - if ((ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) && + if ((ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE) && !ah->btcoex_hw.enabled) { - if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI)) - ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT, - AR_STOMP_LOW_WLAN_WGHT); + ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT, + AR_STOMP_LOW_WLAN_WGHT); ath9k_hw_btcoex_enable(ah); - if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE) + if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) ath9k_btcoex_timer_resume(sc); } @@ -1118,19 +1167,12 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) if (ieee80211_is_data(hdr->frame_control) && !ieee80211_is_nullfunc(hdr->frame_control) && !ieee80211_has_pm(hdr->frame_control)) { - ath_dbg(common, PS, + ath_dbg(common, ATH_DBG_PS, "Add PM=1 for a TX frame while in PS mode\n"); hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); } } - /* - * Cannot tx while the hardware is in full sleep, it first needs a full - * chip reset to recover from that - */ - if (unlikely(sc->sc_ah->power_mode == ATH9K_PM_FULL_SLEEP)) - goto exit; - if (unlikely(sc->sc_ah->power_mode != ATH9K_PM_AWAKE)) { /* * We are using PS-Poll and mac80211 can request TX while in @@ -1141,11 +1183,12 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) ath9k_hw_setrxabort(sc->sc_ah, 0); if (ieee80211_is_pspoll(hdr->frame_control)) { - ath_dbg(common, PS, + ath_dbg(common, ATH_DBG_PS, "Sending PS-Poll to pick a buffered frame\n"); sc->ps_flags |= PS_WAIT_FOR_PSPOLL_DATA; } else { - ath_dbg(common, PS, "Wake up to complete TX\n"); + ath_dbg(common, ATH_DBG_PS, + "Wake up to complete TX\n"); sc->ps_flags |= PS_WAIT_FOR_TX_ACK; } /* @@ -1159,10 +1202,10 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) memset(&txctl, 0, sizeof(struct ath_tx_control)); txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)]; - ath_dbg(common, XMIT, "transmitting packet, skb: %p\n", skb); + ath_dbg(common, ATH_DBG_XMIT, "transmitting packet, skb: %p\n", skb); if (ath_tx_start(hw, skb, &txctl) != 0) { - ath_dbg(common, XMIT, "TX failed\n"); + ath_dbg(common, ATH_DBG_XMIT, "TX failed\n"); goto exit; } @@ -1176,14 +1219,13 @@ static void ath9k_stop(struct ieee80211_hw *hw) struct ath_softc *sc = hw->priv; struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); - bool prev_idle; mutex_lock(&sc->mutex); ath_cancel_work(sc); if (sc->sc_flags & SC_OP_INVALID) { - ath_dbg(common, ANY, "Device not present\n"); + ath_dbg(common, ATH_DBG_ANY, "Device not present\n"); mutex_unlock(&sc->mutex); return; } @@ -1191,12 +1233,10 @@ static void ath9k_stop(struct ieee80211_hw *hw) /* Ensure HW is awake when we try to shut it down. */ ath9k_ps_wakeup(sc); - if (ah->btcoex_hw.enabled && - ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) { + if (ah->btcoex_hw.enabled) { ath9k_hw_btcoex_disable(ah); - if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE) + if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) ath9k_btcoex_timer_pause(sc); - ath_mci_flush_profile(&sc->btcoex.mci); } spin_lock_bh(&sc->sc_pcu_lock); @@ -1208,49 +1248,39 @@ static void ath9k_stop(struct ieee80211_hw *hw) * before setting the invalid flag. */ ath9k_hw_disable_interrupts(ah); - spin_unlock_bh(&sc->sc_pcu_lock); - - /* we can now sync irq and kill any running tasklets, since we already - * disabled interrupts and not holding a spin lock */ - synchronize_irq(sc->irq); - tasklet_kill(&sc->intr_tq); - tasklet_kill(&sc->bcon_tasklet); - - prev_idle = sc->ps_idle; - sc->ps_idle = true; - - spin_lock_bh(&sc->sc_pcu_lock); - - if (ah->led_pin >= 0) { - ath9k_hw_set_gpio(ah, ah->led_pin, 1); - ath9k_hw_cfg_gpio_input(ah, ah->led_pin); - } - - ath_prepare_reset(sc, false, true); + if (!(sc->sc_flags & SC_OP_INVALID)) { + ath_drain_all_txq(sc, false); + ath_stoprecv(sc); + ath9k_hw_phy_disable(ah); + } else + sc->rx.rxlink = NULL; if (sc->rx.frag) { dev_kfree_skb_any(sc->rx.frag); sc->rx.frag = NULL; } - if (!ah->curchan) - ah->curchan = ath9k_cmn_get_curchannel(hw, ah); - - ath9k_hw_reset(ah, ah->curchan, ah->caldata, false); - ath9k_hw_phy_disable(ah); - - ath9k_hw_configpcipowersave(ah, true); + /* disable HAL and put h/w to sleep */ + ath9k_hw_disable(ah); spin_unlock_bh(&sc->sc_pcu_lock); + /* we can now sync irq and kill any running tasklets, since we already + * disabled interrupts and not holding a spin lock */ + synchronize_irq(sc->irq); + tasklet_kill(&sc->intr_tq); + tasklet_kill(&sc->bcon_tasklet); + ath9k_ps_restore(sc); + sc->ps_idle = true; + ath_radio_disable(sc, hw); + sc->sc_flags |= SC_OP_INVALID; - sc->ps_idle = prev_idle; mutex_unlock(&sc->mutex); - ath_dbg(common, CONFIG, "Driver halt\n"); + ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n"); } bool ath9k_uses_beacons(int type) @@ -1465,7 +1495,8 @@ static int ath9k_add_interface(struct ieee80211_hw *hw, goto out; } - ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type); + ath_dbg(common, ATH_DBG_CONFIG, + "Attach a VIF of type: %d\n", vif->type); sc->nvifs++; @@ -1485,7 +1516,7 @@ static int ath9k_change_interface(struct ieee80211_hw *hw, struct ath_common *common = ath9k_hw_common(sc->sc_ah); int ret = 0; - ath_dbg(common, CONFIG, "Change Interface\n"); + ath_dbg(common, ATH_DBG_CONFIG, "Change Interface\n"); mutex_lock(&sc->mutex); ath9k_ps_wakeup(sc); @@ -1528,7 +1559,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw, struct ath_softc *sc = hw->priv; struct ath_common *common = ath9k_hw_common(sc->sc_ah); - ath_dbg(common, CONFIG, "Detach Interface\n"); + ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface\n"); ath9k_ps_wakeup(sc); mutex_lock(&sc->mutex); @@ -1585,8 +1616,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); struct ieee80211_conf *conf = &hw->conf; + bool disable_radio = false; - ath9k_ps_wakeup(sc); mutex_lock(&sc->mutex); /* @@ -1597,8 +1628,13 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) */ if (changed & IEEE80211_CONF_CHANGE_IDLE) { sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE); - if (sc->ps_idle) - ath_cancel_work(sc); + if (!sc->ps_idle) { + ath_radio_enable(sc, hw); + ath_dbg(common, ATH_DBG_CONFIG, + "not-idle: enabling radio\n"); + } else { + disable_radio = true; + } } /* @@ -1619,10 +1655,12 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) if (changed & IEEE80211_CONF_CHANGE_MONITOR) { if (conf->flags & IEEE80211_CONF_MONITOR) { - ath_dbg(common, CONFIG, "Monitor mode is enabled\n"); + ath_dbg(common, ATH_DBG_CONFIG, + "Monitor mode is enabled\n"); sc->sc_ah->is_monitoring = true; } else { - ath_dbg(common, CONFIG, "Monitor mode is disabled\n"); + ath_dbg(common, ATH_DBG_CONFIG, + "Monitor mode is disabled\n"); sc->sc_ah->is_monitoring = false; } } @@ -1642,7 +1680,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) else sc->sc_flags &= ~SC_OP_OFFCHANNEL; - ath_dbg(common, CONFIG, "Set channel: %d MHz type: %d\n", + ath_dbg(common, ATH_DBG_CONFIG, + "Set channel: %d MHz type: %d\n", curchan->center_freq, conf->channel_type); /* update survey stats for the old channel before switching */ @@ -1699,14 +1738,21 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) } if (changed & IEEE80211_CONF_CHANGE_POWER) { - ath_dbg(common, CONFIG, "Set power: %d\n", conf->power_level); + ath_dbg(common, ATH_DBG_CONFIG, + "Set power: %d\n", conf->power_level); sc->config.txpowlimit = 2 * conf->power_level; + ath9k_ps_wakeup(sc); ath9k_cmn_update_txpow(ah, sc->curtxpow, sc->config.txpowlimit, &sc->curtxpow); + ath9k_ps_restore(sc); + } + + if (disable_radio) { + ath_dbg(common, ATH_DBG_CONFIG, "idle: disabling radio\n"); + ath_radio_disable(sc, hw); } mutex_unlock(&sc->mutex); - ath9k_ps_restore(sc); return 0; } @@ -1739,8 +1785,8 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw, ath9k_hw_setrxfilter(sc->sc_ah, rfilt); ath9k_ps_restore(sc); - ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG, "Set HW RX filter: 0x%x\n", - rfilt); + ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG, + "Set HW RX filter: 0x%x\n", rfilt); } static int ath9k_sta_add(struct ieee80211_hw *hw, @@ -1752,7 +1798,7 @@ static int ath9k_sta_add(struct ieee80211_hw *hw, struct ath_node *an = (struct ath_node *) sta->drv_priv; struct ieee80211_key_conf ps_key = { }; - ath_node_attach(sc, sta, vif); + ath_node_attach(sc, sta); if (vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_AP_VLAN) @@ -1797,9 +1843,6 @@ static void ath9k_sta_notify(struct ieee80211_hw *hw, struct ath_softc *sc = hw->priv; struct ath_node *an = (struct ath_node *) sta->drv_priv; - if (!(sc->sc_flags & SC_OP_TXAGGR)) - return; - switch (cmd) { case STA_NOTIFY_SLEEP: an->sleeping = true; @@ -1837,7 +1880,7 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw, qi.tqi_cwmax = params->cw_max; qi.tqi_burstTime = params->txop; - ath_dbg(common, CONFIG, + ath_dbg(common, ATH_DBG_CONFIG, "Configure tx [queue/halq] [%d/%d], aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n", queue, txq->axq_qnum, params->aifs, params->cw_min, params->cw_max, params->txop); @@ -1869,8 +1912,7 @@ static int ath9k_set_key(struct ieee80211_hw *hw, if (ath9k_modparam_nohwcrypt) return -ENOSPC; - if ((vif->type == NL80211_IFTYPE_ADHOC || - vif->type == NL80211_IFTYPE_MESH_POINT) && + if (vif->type == NL80211_IFTYPE_ADHOC && (key->cipher == WLAN_CIPHER_SUITE_TKIP || key->cipher == WLAN_CIPHER_SUITE_CCMP) && !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { @@ -1886,7 +1928,7 @@ static int ath9k_set_key(struct ieee80211_hw *hw, mutex_lock(&sc->mutex); ath9k_ps_wakeup(sc); - ath_dbg(common, CONFIG, "Set HW Key\n"); + ath_dbg(common, ATH_DBG_CONFIG, "Set HW Key\n"); switch (cmd) { case SET_KEY: @@ -1938,8 +1980,9 @@ static void ath9k_bss_iter(void *data, u8 *mac, struct ieee80211_vif *vif) memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN); common->curaid = bss_conf->aid; ath9k_hw_write_associd(sc->sc_ah); - ath_dbg(common, CONFIG, "Bss Info ASSOC %d, bssid: %pM\n", - bss_conf->aid, common->curbssid); + ath_dbg(common, ATH_DBG_CONFIG, + "Bss Info ASSOC %d, bssid: %pM\n", + bss_conf->aid, common->curbssid); ath_beacon_config(sc, vif); /* * Request a re-configuration of Beacon related timers @@ -1970,7 +2013,8 @@ static void ath9k_config_bss(struct ath_softc *sc, struct ieee80211_vif *vif) /* Reconfigure bss info */ if (avp->primary_sta_vif && !bss_conf->assoc) { - ath_dbg(common, CONFIG, "Bss Info DISASSOC %d, bssid %pM\n", + ath_dbg(common, ATH_DBG_CONFIG, + "Bss Info DISASSOC %d, bssid %pM\n", common->curaid, common->curbssid); sc->sc_flags &= ~(SC_OP_PRIM_STA_VIF | SC_OP_BEACONS); avp->primary_sta_vif = false; @@ -2012,7 +2056,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_BSSID) { ath9k_config_bss(sc, vif); - ath_dbg(common, CONFIG, "BSSID: %pM aid: 0x%x\n", + ath_dbg(common, ATH_DBG_CONFIG, "BSSID: %pM aid: 0x%x\n", common->curbssid, common->curaid); } @@ -2090,7 +2134,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw, } if (changed & BSS_CHANGED_ERP_PREAMBLE) { - ath_dbg(common, CONFIG, "BSS Changed PREAMBLE %d\n", + ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n", bss_conf->use_short_preamble); if (bss_conf->use_short_preamble) sc->sc_flags |= SC_OP_PREAMBLE_SHORT; @@ -2099,7 +2143,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw, } if (changed & BSS_CHANGED_ERP_CTS_PROT) { - ath_dbg(common, CONFIG, "BSS Changed CTS PROT %d\n", + ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed CTS PROT %d\n", bss_conf->use_cts_prot); if (bss_conf->use_cts_prot && hw->conf.channel->band != IEEE80211_BAND_5GHZ) @@ -2265,17 +2309,20 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop) cancel_delayed_work_sync(&sc->tx_complete_work); if (ah->ah_flags & AH_UNPLUGGED) { - ath_dbg(common, ANY, "Device has been unplugged!\n"); + ath_dbg(common, ATH_DBG_ANY, "Device has been unplugged!\n"); mutex_unlock(&sc->mutex); return; } if (sc->sc_flags & SC_OP_INVALID) { - ath_dbg(common, ANY, "Device not present\n"); + ath_dbg(common, ATH_DBG_ANY, "Device not present\n"); mutex_unlock(&sc->mutex); return; } + if (drop) + timeout = 1; + for (j = 0; j < timeout; j++) { bool npend = false; @@ -2293,22 +2340,21 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop) } if (!npend) - break; + goto out; } - if (drop) { - ath9k_ps_wakeup(sc); - spin_lock_bh(&sc->sc_pcu_lock); - drain_txq = ath_drain_all_txq(sc, false); - spin_unlock_bh(&sc->sc_pcu_lock); + ath9k_ps_wakeup(sc); + spin_lock_bh(&sc->sc_pcu_lock); + drain_txq = ath_drain_all_txq(sc, false); + spin_unlock_bh(&sc->sc_pcu_lock); - if (!drain_txq) - ath_reset(sc, false); + if (!drain_txq) + ath_reset(sc, false); - ath9k_ps_restore(sc); - ieee80211_wake_queues(hw); - } + ath9k_ps_restore(sc); + ieee80211_wake_queues(hw); +out: ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0); mutex_unlock(&sc->mutex); } diff --git a/trunk/drivers/net/wireless/ath/ath9k/mci.c b/trunk/drivers/net/wireless/ath/ath9k/mci.c deleted file mode 100644 index 05c23ea4c633..000000000000 --- a/trunk/drivers/net/wireless/ath/ath9k/mci.c +++ /dev/null @@ -1,668 +0,0 @@ -/* - * Copyright (c) 2010-2011 Atheros Communications Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#include -#include - -#include "ath9k.h" -#include "mci.h" - -static const u8 ath_mci_duty_cycle[] = { 0, 50, 60, 70, 80, 85, 90, 95, 98 }; - -static struct ath_mci_profile_info* -ath_mci_find_profile(struct ath_mci_profile *mci, - struct ath_mci_profile_info *info) -{ - struct ath_mci_profile_info *entry; - - list_for_each_entry(entry, &mci->info, list) { - if (entry->conn_handle == info->conn_handle) - break; - } - return entry; -} - -static bool ath_mci_add_profile(struct ath_common *common, - struct ath_mci_profile *mci, - struct ath_mci_profile_info *info) -{ - struct ath_mci_profile_info *entry; - - if ((mci->num_sco == ATH_MCI_MAX_SCO_PROFILE) && - (info->type == MCI_GPM_COEX_PROFILE_VOICE)) { - ath_dbg(common, MCI, - "Too many SCO profile, failed to add new profile\n"); - return false; - } - - if (((NUM_PROF(mci) - mci->num_sco) == ATH_MCI_MAX_ACL_PROFILE) && - (info->type != MCI_GPM_COEX_PROFILE_VOICE)) { - ath_dbg(common, MCI, - "Too many ACL profile, failed to add new profile\n"); - return false; - } - - entry = ath_mci_find_profile(mci, info); - - if (entry) - memcpy(entry, info, 10); - else { - entry = kzalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - return false; - - memcpy(entry, info, 10); - INC_PROF(mci, info); - list_add_tail(&info->list, &mci->info); - } - return true; -} - -static void ath_mci_del_profile(struct ath_common *common, - struct ath_mci_profile *mci, - struct ath_mci_profile_info *info) -{ - struct ath_mci_profile_info *entry; - - entry = ath_mci_find_profile(mci, info); - - if (!entry) { - ath_dbg(common, MCI, "Profile to be deleted not found\n"); - return; - } - DEC_PROF(mci, entry); - list_del(&entry->list); - kfree(entry); -} - -void ath_mci_flush_profile(struct ath_mci_profile *mci) -{ - struct ath_mci_profile_info *info, *tinfo; - - list_for_each_entry_safe(info, tinfo, &mci->info, list) { - list_del(&info->list); - DEC_PROF(mci, info); - kfree(info); - } - mci->aggr_limit = 0; -} - -static void ath_mci_adjust_aggr_limit(struct ath_btcoex *btcoex) -{ - struct ath_mci_profile *mci = &btcoex->mci; - u32 wlan_airtime = btcoex->btcoex_period * - (100 - btcoex->duty_cycle) / 100; - - /* - * Scale: wlan_airtime is in ms, aggr_limit is in 0.25 ms. - * When wlan_airtime is less than 4ms, aggregation limit has to be - * adjusted half of wlan_airtime to ensure that the aggregation can fit - * without collision with BT traffic. - */ - if ((wlan_airtime <= 4) && - (!mci->aggr_limit || (mci->aggr_limit > (2 * wlan_airtime)))) - mci->aggr_limit = 2 * wlan_airtime; -} - -static void ath_mci_update_scheme(struct ath_softc *sc) -{ - struct ath_common *common = ath9k_hw_common(sc->sc_ah); - struct ath_btcoex *btcoex = &sc->btcoex; - struct ath_mci_profile *mci = &btcoex->mci; - struct ath_mci_profile_info *info; - u32 num_profile = NUM_PROF(mci); - - if (num_profile == 1) { - info = list_first_entry(&mci->info, - struct ath_mci_profile_info, - list); - if (mci->num_sco && info->T == 12) { - mci->aggr_limit = 8; - ath_dbg(common, MCI, - "Single SCO, aggregation limit 2 ms\n"); - } else if ((info->type == MCI_GPM_COEX_PROFILE_BNEP) && - !info->master) { - btcoex->btcoex_period = 60; - ath_dbg(common, MCI, - "Single slave PAN/FTP, bt period 60 ms\n"); - } else if ((info->type == MCI_GPM_COEX_PROFILE_HID) && - (info->T > 0 && info->T < 50) && - (info->A > 1 || info->W > 1)) { - btcoex->duty_cycle = 30; - mci->aggr_limit = 8; - ath_dbg(common, MCI, - "Multiple attempt/timeout single HID " - "aggregation limit 2 ms dutycycle 30%%\n"); - } - } else if ((num_profile == 2) && (mci->num_hid == 2)) { - btcoex->duty_cycle = 30; - mci->aggr_limit = 8; - ath_dbg(common, MCI, - "Two HIDs aggregation limit 2 ms dutycycle 30%%\n"); - } else if (num_profile > 3) { - mci->aggr_limit = 6; - ath_dbg(common, MCI, - "Three or more profiles aggregation limit 1.5 ms\n"); - } - - if (IS_CHAN_2GHZ(sc->sc_ah->curchan)) { - if (IS_CHAN_HT(sc->sc_ah->curchan)) - ath_mci_adjust_aggr_limit(btcoex); - else - btcoex->btcoex_period >>= 1; - } - - ath9k_hw_btcoex_disable(sc->sc_ah); - ath9k_btcoex_timer_pause(sc); - - if (IS_CHAN_5GHZ(sc->sc_ah->curchan)) - return; - - btcoex->duty_cycle += (mci->num_bdr ? ATH_MCI_MAX_DUTY_CYCLE : 0); - if (btcoex->duty_cycle > ATH_MCI_MAX_DUTY_CYCLE) - btcoex->duty_cycle = ATH_MCI_MAX_DUTY_CYCLE; - - btcoex->btcoex_period *= 1000; - btcoex->btcoex_no_stomp = btcoex->btcoex_period * - (100 - btcoex->duty_cycle) / 100; - - ath9k_hw_btcoex_enable(sc->sc_ah); - ath9k_btcoex_timer_resume(sc); -} - - -static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload) -{ - struct ath_hw *ah = sc->sc_ah; - struct ath_common *common = ath9k_hw_common(ah); - u32 payload[4] = {0, 0, 0, 0}; - - switch (opcode) { - case MCI_GPM_BT_CAL_REQ: - - ath_dbg(common, MCI, "MCI received BT_CAL_REQ\n"); - - if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_AWAKE) { - ar9003_mci_state(ah, MCI_STATE_SET_BT_CAL_START, NULL); - ieee80211_queue_work(sc->hw, &sc->hw_reset_work); - } else - ath_dbg(common, MCI, "MCI State mismatches: %d\n", - ar9003_mci_state(ah, MCI_STATE_BT, NULL)); - - break; - - case MCI_GPM_BT_CAL_DONE: - - ath_dbg(common, MCI, "MCI received BT_CAL_DONE\n"); - - if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_CAL) - ath_dbg(common, MCI, "MCI error illegal!\n"); - else - ath_dbg(common, MCI, "MCI BT not in CAL state\n"); - - break; - - case MCI_GPM_BT_CAL_GRANT: - - ath_dbg(common, MCI, "MCI received BT_CAL_GRANT\n"); - - /* Send WLAN_CAL_DONE for now */ - ath_dbg(common, MCI, "MCI send WLAN_CAL_DONE\n"); - MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_DONE); - ar9003_mci_send_message(sc->sc_ah, MCI_GPM, 0, payload, - 16, false, true); - break; - - default: - ath_dbg(common, MCI, "MCI Unknown GPM CAL message\n"); - break; - } -} - -static void ath_mci_process_profile(struct ath_softc *sc, - struct ath_mci_profile_info *info) -{ - struct ath_common *common = ath9k_hw_common(sc->sc_ah); - struct ath_btcoex *btcoex = &sc->btcoex; - struct ath_mci_profile *mci = &btcoex->mci; - - if (info->start) { - if (!ath_mci_add_profile(common, mci, info)) - return; - } else - ath_mci_del_profile(common, mci, info); - - btcoex->btcoex_period = ATH_MCI_DEF_BT_PERIOD; - mci->aggr_limit = mci->num_sco ? 6 : 0; - if (NUM_PROF(mci)) { - btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW; - btcoex->duty_cycle = ath_mci_duty_cycle[NUM_PROF(mci)]; - } else { - btcoex->bt_stomp_type = mci->num_mgmt ? ATH_BTCOEX_STOMP_ALL : - ATH_BTCOEX_STOMP_LOW; - btcoex->duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE; - } - - ath_mci_update_scheme(sc); -} - -static void ath_mci_process_status(struct ath_softc *sc, - struct ath_mci_profile_status *status) -{ - struct ath_common *common = ath9k_hw_common(sc->sc_ah); - struct ath_btcoex *btcoex = &sc->btcoex; - struct ath_mci_profile *mci = &btcoex->mci; - struct ath_mci_profile_info info; - int i = 0, old_num_mgmt = mci->num_mgmt; - - /* Link status type are not handled */ - if (status->is_link) { - ath_dbg(common, MCI, "Skip link type status update\n"); - return; - } - - memset(&info, 0, sizeof(struct ath_mci_profile_info)); - - info.conn_handle = status->conn_handle; - if (ath_mci_find_profile(mci, &info)) { - ath_dbg(common, MCI, - "Skip non link state update for existing profile %d\n", - status->conn_handle); - return; - } - if (status->conn_handle >= ATH_MCI_MAX_PROFILE) { - ath_dbg(common, MCI, "Ignore too many non-link update\n"); - return; - } - if (status->is_critical) - __set_bit(status->conn_handle, mci->status); - else - __clear_bit(status->conn_handle, mci->status); - - mci->num_mgmt = 0; - do { - if (test_bit(i, mci->status)) - mci->num_mgmt++; - } while (++i < ATH_MCI_MAX_PROFILE); - - if (old_num_mgmt != mci->num_mgmt) - ath_mci_update_scheme(sc); -} - -static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload) -{ - struct ath_hw *ah = sc->sc_ah; - struct ath_mci_profile_info profile_info; - struct ath_mci_profile_status profile_status; - struct ath_common *common = ath9k_hw_common(sc->sc_ah); - u32 version; - u8 major; - u8 minor; - u32 seq_num; - - switch (opcode) { - - case MCI_GPM_COEX_VERSION_QUERY: - ath_dbg(common, MCI, "MCI Recv GPM COEX Version Query\n"); - version = ar9003_mci_state(ah, - MCI_STATE_SEND_WLAN_COEX_VERSION, NULL); - break; - - case MCI_GPM_COEX_VERSION_RESPONSE: - ath_dbg(common, MCI, "MCI Recv GPM COEX Version Response\n"); - major = *(rx_payload + MCI_GPM_COEX_B_MAJOR_VERSION); - minor = *(rx_payload + MCI_GPM_COEX_B_MINOR_VERSION); - ath_dbg(common, MCI, "MCI BT Coex version: %d.%d\n", - major, minor); - version = (major << 8) + minor; - version = ar9003_mci_state(ah, - MCI_STATE_SET_BT_COEX_VERSION, &version); - break; - - case MCI_GPM_COEX_STATUS_QUERY: - ath_dbg(common, MCI, - "MCI Recv GPM COEX Status Query = 0x%02x\n", - *(rx_payload + MCI_GPM_COEX_B_WLAN_BITMAP)); - ar9003_mci_state(ah, - MCI_STATE_SEND_WLAN_CHANNELS, NULL); - break; - - case MCI_GPM_COEX_BT_PROFILE_INFO: - ath_dbg(common, MCI, "MCI Recv GPM Coex BT profile info\n"); - memcpy(&profile_info, - (rx_payload + MCI_GPM_COEX_B_PROFILE_TYPE), 10); - - if ((profile_info.type == MCI_GPM_COEX_PROFILE_UNKNOWN) - || (profile_info.type >= - MCI_GPM_COEX_PROFILE_MAX)) { - - ath_dbg(common, MCI, - "illegal profile type = %d, state = %d\n", - profile_info.type, - profile_info.start); - break; - } - - ath_mci_process_profile(sc, &profile_info); - break; - - case MCI_GPM_COEX_BT_STATUS_UPDATE: - profile_status.is_link = *(rx_payload + - MCI_GPM_COEX_B_STATUS_TYPE); - profile_status.conn_handle = *(rx_payload + - MCI_GPM_COEX_B_STATUS_LINKID); - profile_status.is_critical = *(rx_payload + - MCI_GPM_COEX_B_STATUS_STATE); - - seq_num = *((u32 *)(rx_payload + 12)); - ath_dbg(common, MCI, - "MCI Recv GPM COEX BT_Status_Update: is_link=%d, linkId=%d, state=%d, SEQ=%d\n", - profile_status.is_link, profile_status.conn_handle, - profile_status.is_critical, seq_num); - - ath_mci_process_status(sc, &profile_status); - break; - - default: - ath_dbg(common, MCI, "MCI Unknown GPM COEX message = 0x%02x\n", - opcode); - break; - } -} - -static int ath_mci_buf_alloc(struct ath_softc *sc, struct ath_mci_buf *buf) -{ - int error = 0; - - buf->bf_addr = dma_alloc_coherent(sc->dev, buf->bf_len, - &buf->bf_paddr, GFP_KERNEL); - - if (buf->bf_addr == NULL) { - error = -ENOMEM; - goto fail; - } - - return 0; - -fail: - memset(buf, 0, sizeof(*buf)); - return error; -} - -static void ath_mci_buf_free(struct ath_softc *sc, struct ath_mci_buf *buf) -{ - if (buf->bf_addr) { - dma_free_coherent(sc->dev, buf->bf_len, buf->bf_addr, - buf->bf_paddr); - memset(buf, 0, sizeof(*buf)); - } -} - -int ath_mci_setup(struct ath_softc *sc) -{ - struct ath_common *common = ath9k_hw_common(sc->sc_ah); - struct ath_mci_coex *mci = &sc->mci_coex; - int error = 0; - - if (!ATH9K_HW_CAP_MCI) - return 0; - - mci->sched_buf.bf_len = ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE; - - if (ath_mci_buf_alloc(sc, &mci->sched_buf)) { - ath_dbg(common, FATAL, "MCI buffer alloc failed\n"); - error = -ENOMEM; - goto fail; - } - - mci->sched_buf.bf_len = ATH_MCI_SCHED_BUF_SIZE; - - memset(mci->sched_buf.bf_addr, MCI_GPM_RSVD_PATTERN, - mci->sched_buf.bf_len); - - mci->gpm_buf.bf_len = ATH_MCI_GPM_BUF_SIZE; - mci->gpm_buf.bf_addr = (u8 *)mci->sched_buf.bf_addr + - mci->sched_buf.bf_len; - mci->gpm_buf.bf_paddr = mci->sched_buf.bf_paddr + mci->sched_buf.bf_len; - - /* initialize the buffer */ - memset(mci->gpm_buf.bf_addr, MCI_GPM_RSVD_PATTERN, mci->gpm_buf.bf_len); - - ar9003_mci_setup(sc->sc_ah, mci->gpm_buf.bf_paddr, - mci->gpm_buf.bf_addr, (mci->gpm_buf.bf_len >> 4), - mci->sched_buf.bf_paddr); -fail: - return error; -} - -void ath_mci_cleanup(struct ath_softc *sc) -{ - struct ath_hw *ah = sc->sc_ah; - struct ath_mci_coex *mci = &sc->mci_coex; - - if (!ATH9K_HW_CAP_MCI) - return; - - /* - * both schedule and gpm buffers will be released - */ - ath_mci_buf_free(sc, &mci->sched_buf); - ar9003_mci_cleanup(ah); -} - -void ath_mci_intr(struct ath_softc *sc) -{ - struct ath_mci_coex *mci = &sc->mci_coex; - struct ath_hw *ah = sc->sc_ah; - struct ath_common *common = ath9k_hw_common(ah); - u32 mci_int, mci_int_rxmsg; - u32 offset, subtype, opcode; - u32 *pgpm; - u32 more_data = MCI_GPM_MORE; - bool skip_gpm = false; - - if (!ATH9K_HW_CAP_MCI) - return; - - ar9003_mci_get_interrupt(sc->sc_ah, &mci_int, &mci_int_rxmsg); - - if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) == 0) { - - ar9003_mci_state(sc->sc_ah, MCI_STATE_INIT_GPM_OFFSET, NULL); - ath_dbg(common, MCI, "MCI interrupt but MCI disabled\n"); - - ath_dbg(common, MCI, - "MCI interrupt: intr = 0x%x, intr_rxmsg = 0x%x\n", - mci_int, mci_int_rxmsg); - return; - } - - if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE) { - u32 payload[4] = { 0xffffffff, 0xffffffff, - 0xffffffff, 0xffffff00}; - - /* - * The following REMOTE_RESET and SYS_WAKING used to sent - * only when BT wake up. Now they are always sent, as a - * recovery method to reset BT MCI's RX alignment. - */ - ath_dbg(common, MCI, "MCI interrupt send REMOTE_RESET\n"); - - ar9003_mci_send_message(ah, MCI_REMOTE_RESET, 0, - payload, 16, true, false); - ath_dbg(common, MCI, "MCI interrupt send SYS_WAKING\n"); - ar9003_mci_send_message(ah, MCI_SYS_WAKING, 0, - NULL, 0, true, false); - - mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE; - ar9003_mci_state(ah, MCI_STATE_RESET_REQ_WAKE, NULL); - - /* - * always do this for recovery and 2G/5G toggling and LNA_TRANS - */ - ath_dbg(common, MCI, "MCI Set BT state to AWAKE\n"); - ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE, NULL); - } - - /* Processing SYS_WAKING/SYS_SLEEPING */ - if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING) { - mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING; - - if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_SLEEP) { - - if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL) - == MCI_BT_SLEEP) - ath_dbg(common, MCI, - "MCI BT stays in sleep mode\n"); - else { - ath_dbg(common, MCI, - "MCI Set BT state to AWAKE\n"); - ar9003_mci_state(ah, - MCI_STATE_SET_BT_AWAKE, NULL); - } - } else - ath_dbg(common, MCI, "MCI BT stays in AWAKE mode\n"); - } - - if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) { - - mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING; - - if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_AWAKE) { - - if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL) - == MCI_BT_AWAKE) - ath_dbg(common, MCI, - "MCI BT stays in AWAKE mode\n"); - else { - ath_dbg(common, MCI, - "MCI SetBT state to SLEEP\n"); - ar9003_mci_state(ah, MCI_STATE_SET_BT_SLEEP, - NULL); - } - } else - ath_dbg(common, MCI, "MCI BT stays in SLEEP mode\n"); - } - - if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) || - (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) { - - ath_dbg(common, MCI, "MCI RX broken, skip GPM msgs\n"); - ar9003_mci_state(ah, MCI_STATE_RECOVER_RX, NULL); - skip_gpm = true; - } - - if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO) { - - mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO; - offset = ar9003_mci_state(ah, MCI_STATE_LAST_SCHD_MSG_OFFSET, - NULL); - } - - if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_GPM) { - - mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_GPM; - - while (more_data == MCI_GPM_MORE) { - - pgpm = mci->gpm_buf.bf_addr; - offset = ar9003_mci_state(ah, - MCI_STATE_NEXT_GPM_OFFSET, &more_data); - - if (offset == MCI_GPM_INVALID) - break; - - pgpm += (offset >> 2); - - /* - * The first dword is timer. - * The real data starts from 2nd dword. - */ - - subtype = MCI_GPM_TYPE(pgpm); - opcode = MCI_GPM_OPCODE(pgpm); - - if (!skip_gpm) { - - if (MCI_GPM_IS_CAL_TYPE(subtype)) - ath_mci_cal_msg(sc, subtype, - (u8 *) pgpm); - else { - switch (subtype) { - case MCI_GPM_COEX_AGENT: - ath_mci_msg(sc, opcode, - (u8 *) pgpm); - break; - default: - break; - } - } - } - MCI_GPM_RECYCLE(pgpm); - } - } - - if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_HW_MSG_MASK) { - - if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL) - mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL; - - if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_LNA_INFO) { - mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_INFO; - ath_dbg(common, MCI, "MCI LNA_INFO\n"); - } - - if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO) { - - int value_dbm = ar9003_mci_state(ah, - MCI_STATE_CONT_RSSI_POWER, NULL); - - mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_INFO; - - if (ar9003_mci_state(ah, MCI_STATE_CONT_TXRX, NULL)) - ath_dbg(common, MCI, - "MCI CONT_INFO: (tx) pri = %d, pwr = %d dBm\n", - ar9003_mci_state(ah, - MCI_STATE_CONT_PRIORITY, NULL), - value_dbm); - else - ath_dbg(common, MCI, - "MCI CONT_INFO: (rx) pri = %d,pwr = %d dBm\n", - ar9003_mci_state(ah, - MCI_STATE_CONT_PRIORITY, NULL), - value_dbm); - } - - if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_NACK) { - mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_NACK; - ath_dbg(common, MCI, "MCI CONT_NACK\n"); - } - - if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_RST) { - mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_RST; - ath_dbg(common, MCI, "MCI CONT_RST\n"); - } - } - - if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) || - (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) - mci_int &= ~(AR_MCI_INTERRUPT_RX_INVALID_HDR | - AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT); - - if (mci_int_rxmsg & 0xfffffffe) - ath_dbg(common, MCI, "MCI not processed mci_int_rxmsg = 0x%x\n", - mci_int_rxmsg); -} diff --git a/trunk/drivers/net/wireless/ath/ath9k/mci.h b/trunk/drivers/net/wireless/ath/ath9k/mci.h deleted file mode 100644 index 29e3e51d078f..000000000000 --- a/trunk/drivers/net/wireless/ath/ath9k/mci.h +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Copyright (c) 2010-2011 Atheros Communications Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#ifndef MCI_H -#define MCI_H - -#define ATH_MCI_SCHED_BUF_SIZE (16 * 16) /* 16 entries, 4 dword each */ -#define ATH_MCI_GPM_MAX_ENTRY 16 -#define ATH_MCI_GPM_BUF_SIZE (ATH_MCI_GPM_MAX_ENTRY * 16) -#define ATH_MCI_DEF_BT_PERIOD 40 -#define ATH_MCI_BDR_DUTY_CYCLE 20 -#define ATH_MCI_MAX_DUTY_CYCLE 90 - -#define ATH_MCI_DEF_AGGR_LIMIT 6 /* in 0.24 ms */ -#define ATH_MCI_MAX_ACL_PROFILE 7 -#define ATH_MCI_MAX_SCO_PROFILE 1 -#define ATH_MCI_MAX_PROFILE (ATH_MCI_MAX_ACL_PROFILE +\ - ATH_MCI_MAX_SCO_PROFILE) - -#define INC_PROF(_mci, _info) do { \ - switch (_info->type) { \ - case MCI_GPM_COEX_PROFILE_RFCOMM:\ - _mci->num_other_acl++; \ - break; \ - case MCI_GPM_COEX_PROFILE_A2DP: \ - _mci->num_a2dp++; \ - if (!_info->edr) \ - _mci->num_bdr++; \ - break; \ - case MCI_GPM_COEX_PROFILE_HID: \ - _mci->num_hid++; \ - break; \ - case MCI_GPM_COEX_PROFILE_BNEP: \ - _mci->num_pan++; \ - break; \ - case MCI_GPM_COEX_PROFILE_VOICE: \ - _mci->num_sco++; \ - break; \ - default: \ - break; \ - } \ - } while (0) - -#define DEC_PROF(_mci, _info) do { \ - switch (_info->type) { \ - case MCI_GPM_COEX_PROFILE_RFCOMM:\ - _mci->num_other_acl--; \ - break; \ - case MCI_GPM_COEX_PROFILE_A2DP: \ - _mci->num_a2dp--; \ - if (!_info->edr) \ - _mci->num_bdr--; \ - break; \ - case MCI_GPM_COEX_PROFILE_HID: \ - _mci->num_hid--; \ - break; \ - case MCI_GPM_COEX_PROFILE_BNEP: \ - _mci->num_pan--; \ - break; \ - case MCI_GPM_COEX_PROFILE_VOICE: \ - _mci->num_sco--; \ - break; \ - default: \ - break; \ - } \ - } while (0) - -#define NUM_PROF(_mci) (_mci->num_other_acl + _mci->num_a2dp + \ - _mci->num_hid + _mci->num_pan + _mci->num_sco) - -struct ath_mci_profile_info { - u8 type; - u8 conn_handle; - bool start; - bool master; - bool edr; - u8 voice_type; - u16 T; /* Voice: Tvoice, HID: Tsniff, in slots */ - u8 W; /* Voice: Wvoice, HID: Sniff timeout, in slots */ - u8 A; /* HID: Sniff attempt, in slots */ - struct list_head list; -}; - -struct ath_mci_profile_status { - bool is_critical; - bool is_link; - u8 conn_handle; -}; - -struct ath_mci_profile { - struct list_head info; - DECLARE_BITMAP(status, ATH_MCI_MAX_PROFILE); - u16 aggr_limit; - u8 num_mgmt; - u8 num_sco; - u8 num_a2dp; - u8 num_hid; - u8 num_pan; - u8 num_other_acl; - u8 num_bdr; -}; - - -struct ath_mci_buf { - void *bf_addr; /* virtual addr of desc */ - dma_addr_t bf_paddr; /* physical addr of buffer */ - u32 bf_len; /* len of data */ -}; - -struct ath_mci_coex { - atomic_t mci_cal_flag; - struct ath_mci_buf sched_buf; - struct ath_mci_buf gpm_buf; - u32 bt_cal_start; -}; - -void ath_mci_flush_profile(struct ath_mci_profile *mci); -int ath_mci_setup(struct ath_softc *sc); -void ath_mci_cleanup(struct ath_softc *sc); -void ath_mci_intr(struct ath_softc *sc); -#endif diff --git a/trunk/drivers/net/wireless/ath/ath9k/pci.c b/trunk/drivers/net/wireless/ath/ath9k/pci.c index 77dc327def8d..2dcdf63cb390 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/pci.c +++ b/trunk/drivers/net/wireless/ath/ath9k/pci.c @@ -121,7 +121,7 @@ static void ath_pci_aspm_init(struct ath_common *common) if (!parent) return; - if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) { + if (ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE) { /* Bluetooth coexistance requires disabling ASPM. */ pci_read_config_byte(pdev, pos + PCI_EXP_LNKCTL, &aspm); aspm &= ~(PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); @@ -307,11 +307,12 @@ static int ath_pci_suspend(struct device *device) struct ieee80211_hw *hw = pci_get_drvdata(pdev); struct ath_softc *sc = hw->priv; + ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1); + /* The device has to be moved to FULLSLEEP forcibly. * Otherwise the chip never moved to full sleep, * when no interface is up. */ - ath9k_hw_disable(sc->sc_ah); ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP); return 0; @@ -320,6 +321,8 @@ static int ath_pci_suspend(struct device *device) static int ath_pci_resume(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); + struct ieee80211_hw *hw = pci_get_drvdata(pdev); + struct ath_softc *sc = hw->priv; u32 val; /* @@ -331,6 +334,22 @@ static int ath_pci_resume(struct device *device) if ((val & 0x0000ff00) != 0) pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); + ath9k_ps_wakeup(sc); + /* Enable LED */ + ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin, + AR_GPIO_OUTPUT_MUX_AS_OUTPUT); + ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0); + + /* + * Reset key cache to sane defaults (all entries cleared) instead of + * semi-random values after suspend/resume. + */ + ath9k_cmn_init_crypto(sc->sc_ah); + ath9k_ps_restore(sc); + + sc->ps_idle = true; + ath_radio_disable(sc, hw); + return 0; } diff --git a/trunk/drivers/net/wireless/ath/ath9k/rc.c b/trunk/drivers/net/wireless/ath/ath9k/rc.c index b3c3798fe513..528d5f3e868c 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/rc.c +++ b/trunk/drivers/net/wireless/ath/ath9k/rc.c @@ -1199,7 +1199,7 @@ struct ath_rate_table *ath_choose_rate_table(struct ath_softc *sc, return &ar5416_11na_ratetable; return &ar5416_11a_ratetable; default: - ath_dbg(common, CONFIG, "Invalid band\n"); + ath_dbg(common, ATH_DBG_CONFIG, "Invalid band\n"); return NULL; } } @@ -1276,7 +1276,8 @@ static void ath_rc_init(struct ath_softc *sc, ath_rc_priv->valid_rate_index[k-1]; ath_rc_priv->rate_table = rate_table; - ath_dbg(common, CONFIG, "RC Initialized with capabilities: 0x%x\n", + ath_dbg(common, ATH_DBG_CONFIG, + "RC Initialized with capabilities: 0x%x\n", ath_rc_priv->ht_cap); } @@ -1473,7 +1474,7 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband, oper_cw40, oper_sgi); ath_rc_init(sc, priv_sta, sband, sta, rate_table); - ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG, + ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG, "Operating HT Bandwidth changed to: %d\n", sc->hw->conf.channel_type); } diff --git a/trunk/drivers/net/wireless/ath/ath9k/recv.c b/trunk/drivers/net/wireless/ath/ath9k/recv.c index 0e666fbe0842..67b862cdae6d 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/recv.c +++ b/trunk/drivers/net/wireless/ath/ath9k/recv.c @@ -172,7 +172,7 @@ static void ath_rx_addbuffer_edma(struct ath_softc *sc, u32 nbuf = 0; if (list_empty(&sc->rx.rxbuf)) { - ath_dbg(common, QUEUE, "No free rx buf available\n"); + ath_dbg(common, ATH_DBG_QUEUE, "No free rx buf available\n"); return; } @@ -337,7 +337,7 @@ int ath_rx_init(struct ath_softc *sc, int nbufs) if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { return ath_rx_edma_init(sc, nbufs); } else { - ath_dbg(common, CONFIG, "cachelsz %u rxbufsize %u\n", + ath_dbg(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n", common->cachelsz, common->rx_bufsize); /* Initialize rx descriptors */ @@ -475,6 +475,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc) return rfilt; +#undef RX_FILTER_PRESERVE } int ath_startrecv(struct ath_softc *sc) @@ -591,7 +592,7 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) if (sc->ps_flags & PS_BEACON_SYNC) { sc->ps_flags &= ~PS_BEACON_SYNC; - ath_dbg(common, PS, + ath_dbg(common, ATH_DBG_PS, "Reconfigure Beacon timers based on timestamp from the AP\n"); ath_set_beacon(sc); } @@ -604,7 +605,7 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) * a backup trigger for returning into NETWORK SLEEP state, * so we are waiting for it as well. */ - ath_dbg(common, PS, + ath_dbg(common, ATH_DBG_PS, "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n"); sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON; return; @@ -617,7 +618,8 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) * been delivered. */ sc->ps_flags &= ~PS_WAIT_FOR_CAB; - ath_dbg(common, PS, "PS wait for CAB frames timed out\n"); + ath_dbg(common, ATH_DBG_PS, + "PS wait for CAB frames timed out\n"); } } @@ -642,13 +644,13 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon) * point. */ sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON); - ath_dbg(common, PS, + ath_dbg(common, ATH_DBG_PS, "All PS CAB frames received, back to sleep\n"); } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) && !is_multicast_ether_addr(hdr->addr1) && !ieee80211_has_morefrags(hdr->frame_control)) { sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA; - ath_dbg(common, PS, + ath_dbg(common, ATH_DBG_PS, "Going back to sleep after having received PS-Poll data (0x%lx)\n", sc->ps_flags & (PS_WAIT_FOR_BEACON | PS_WAIT_FOR_CAB | @@ -931,7 +933,7 @@ static int ath9k_process_rate(struct ath_common *common, * No valid hardware bitrate found -- we should not get here * because hardware has already validated this frame as OK. */ - ath_dbg(common, ANY, + ath_dbg(common, ATH_DBG_ANY, "unsupported hw bitrate detected 0x%02x using 1 Mbit\n", rx_stats->rs_rate); @@ -1822,7 +1824,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len); rxs = IEEE80211_SKB_RXCB(hdr_skb); if (ieee80211_is_beacon(hdr->frame_control) && - !is_zero_ether_addr(common->curbssid) && !compare_ether_addr(hdr->addr3, common->curbssid)) rs.is_mybeacon = true; else @@ -1837,6 +1838,11 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) if (sc->sc_flags & SC_OP_RXFLUSH) goto requeue_drop_frag; + retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs, + rxs, &decrypt_error); + if (retval) + goto requeue_drop_frag; + rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp; if (rs.rs_tstamp > tsf_lower && unlikely(rs.rs_tstamp - tsf_lower > 0x10000000)) @@ -1846,11 +1852,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) unlikely(tsf_lower - rs.rs_tstamp > 0x10000000)) rxs->mactime += 0x100000000ULL; - retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs, - rxs, &decrypt_error); - if (retval) - goto requeue_drop_frag; - /* Ensure we always have an skb to requeue once we are done * processing the current buffer's skb */ requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); @@ -1922,20 +1923,15 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) skb = hdr_skb; } - - if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) { - - /* - * change the default rx antenna if rx diversity - * chooses the other antenna 3 times in a row. - */ - if (sc->rx.defant != rs.rs_antenna) { - if (++sc->rx.rxotherant >= 3) - ath_setdefantenna(sc, rs.rs_antenna); - } else { - sc->rx.rxotherant = 0; - } - + /* + * change the default rx antenna if rx diversity chooses the + * other antenna 3 times in a row. + */ + if (sc->rx.defant != rs.rs_antenna) { + if (++sc->rx.rxotherant >= 3) + ath_setdefantenna(sc, rs.rs_antenna); + } else { + sc->rx.rxotherant = 0; } if (rxs->flag & RX_FLAG_MMIC_STRIPPED) diff --git a/trunk/drivers/net/wireless/ath/ath9k/reg.h b/trunk/drivers/net/wireless/ath/ath9k/reg.h index 6e2f18861f5d..8fcb7e9e8399 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/reg.h +++ b/trunk/drivers/net/wireless/ath/ath9k/reg.h @@ -1006,8 +1006,6 @@ enum { #define AR_INTR_ASYNC_MASK (AR_SREV_9340(ah) ? 0x4018 : 0x4030) #define AR_INTR_ASYNC_MASK_GPIO 0xFFFC0000 #define AR_INTR_ASYNC_MASK_GPIO_S 18 -#define AR_INTR_ASYNC_MASK_MCI 0x00000080 -#define AR_INTR_ASYNC_MASK_MCI_S 7 #define AR_INTR_SYNC_MASK (AR_SREV_9340(ah) ? 0x401c : 0x4034) #define AR_INTR_SYNC_MASK_GPIO 0xFFFC0000 @@ -1015,14 +1013,6 @@ enum { #define AR_INTR_ASYNC_CAUSE_CLR (AR_SREV_9340(ah) ? 0x4020 : 0x4038) #define AR_INTR_ASYNC_CAUSE (AR_SREV_9340(ah) ? 0x4020 : 0x4038) -#define AR_INTR_ASYNC_CAUSE_MCI 0x00000080 -#define AR_INTR_ASYNC_USED (AR_INTR_MAC_IRQ | \ - AR_INTR_ASYNC_CAUSE_MCI) - -/* Asynchronous Interrupt Enable Register */ -#define AR_INTR_ASYNC_ENABLE_MCI 0x00000080 -#define AR_INTR_ASYNC_ENABLE_MCI_S 7 - #define AR_INTR_ASYNC_ENABLE (AR_SREV_9340(ah) ? 0x4024 : 0x403c) #define AR_INTR_ASYNC_ENABLE_GPIO 0xFFFC0000 @@ -1279,8 +1269,6 @@ enum { #define AR_RTC_INTR_MASK \ ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0058) : 0x7058) -#define AR_RTC_KEEP_AWAKE 0x7034 - /* RTC_DERIVED_* - only for AR9100 */ #define AR_RTC_DERIVED_CLK \ @@ -1567,8 +1555,6 @@ enum { #define AR_DIAG_FRAME_NV0 0x00020000 #define AR_DIAG_OBS_PT_SEL1 0x000C0000 #define AR_DIAG_OBS_PT_SEL1_S 18 -#define AR_DIAG_OBS_PT_SEL2 0x08000000 -#define AR_DIAG_OBS_PT_SEL2_S 27 #define AR_DIAG_FORCE_RX_CLEAR 0x00100000 /* force rx_clear high */ #define AR_DIAG_IGNORE_VIRT_CS 0x00200000 #define AR_DIAG_FORCE_CH_IDLE_HIGH 0x00400000 @@ -1766,10 +1752,19 @@ enum { #define AR_BT_COEX_WL_WEIGHTS0 0x8174 #define AR_BT_COEX_WL_WEIGHTS1 0x81c4 -#define AR_MCI_COEX_WL_WEIGHTS(_i) (0x18b0 + (_i << 2)) -#define AR_BT_COEX_BT_WEIGHTS(_i) (0x83ac + (_i << 2)) -#define AR9300_BT_WGHT 0xcccc4444 +#define AR_BT_COEX_BT_WEIGHTS0 0x83ac +#define AR_BT_COEX_BT_WEIGHTS1 0x83b0 +#define AR_BT_COEX_BT_WEIGHTS2 0x83b4 +#define AR_BT_COEX_BT_WEIGHTS3 0x83b8 + +#define AR9300_BT_WGHT 0xcccc4444 +#define AR9300_STOMP_ALL_WLAN_WGHT0 0xfffffff0 +#define AR9300_STOMP_ALL_WLAN_WGHT1 0xfffffff0 +#define AR9300_STOMP_LOW_WLAN_WGHT0 0x88888880 +#define AR9300_STOMP_LOW_WLAN_WGHT1 0x88888880 +#define AR9300_STOMP_NONE_WLAN_WGHT0 0x00000000 +#define AR9300_STOMP_NONE_WLAN_WGHT1 0x00000000 #define AR_BT_COEX_MODE2 0x817c #define AR_BT_BCN_MISS_THRESH 0x000000ff @@ -1943,277 +1938,37 @@ enum { #define AR_PHY_AGC_CONTROL_YCOK_MAX_S 6 /* MCI Registers */ - -#define AR_MCI_COMMAND0 0x1800 -#define AR_MCI_COMMAND0_HEADER 0xFF -#define AR_MCI_COMMAND0_HEADER_S 0 -#define AR_MCI_COMMAND0_LEN 0x1f00 -#define AR_MCI_COMMAND0_LEN_S 8 -#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP 0x2000 -#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP_S 13 - -#define AR_MCI_COMMAND1 0x1804 - -#define AR_MCI_COMMAND2 0x1808 -#define AR_MCI_COMMAND2_RESET_TX 0x01 -#define AR_MCI_COMMAND2_RESET_TX_S 0 -#define AR_MCI_COMMAND2_RESET_RX 0x02 -#define AR_MCI_COMMAND2_RESET_RX_S 1 -#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES 0x3FC -#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES_S 2 -#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP 0x400 -#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP_S 10 - -#define AR_MCI_RX_CTRL 0x180c - -#define AR_MCI_TX_CTRL 0x1810 -/* 0 = no division, 1 = divide by 2, 2 = divide by 4, 3 = divide by 8 */ -#define AR_MCI_TX_CTRL_CLK_DIV 0x03 -#define AR_MCI_TX_CTRL_CLK_DIV_S 0 -#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE 0x04 -#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE_S 2 -#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ 0xFFFFF8 -#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ_S 3 -#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM 0xF000000 -#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM_S 24 - -#define AR_MCI_MSG_ATTRIBUTES_TABLE 0x1814 -#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM 0xFFFF -#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM_S 0 -#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR 0xFFFF0000 -#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR_S 16 - -#define AR_MCI_SCHD_TABLE_0 0x1818 -#define AR_MCI_SCHD_TABLE_1 0x181c -#define AR_MCI_GPM_0 0x1820 -#define AR_MCI_GPM_1 0x1824 -#define AR_MCI_GPM_WRITE_PTR 0xFFFF0000 -#define AR_MCI_GPM_WRITE_PTR_S 16 -#define AR_MCI_GPM_BUF_LEN 0x0000FFFF -#define AR_MCI_GPM_BUF_LEN_S 0 - -#define AR_MCI_INTERRUPT_RAW 0x1828 -#define AR_MCI_INTERRUPT_EN 0x182c -#define AR_MCI_INTERRUPT_SW_MSG_DONE 0x00000001 -#define AR_MCI_INTERRUPT_SW_MSG_DONE_S 0 -#define AR_MCI_INTERRUPT_CPU_INT_MSG 0x00000002 -#define AR_MCI_INTERRUPT_CPU_INT_MSG_S 1 -#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL 0x00000004 -#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL_S 2 -#define AR_MCI_INTERRUPT_RX_INVALID_HDR 0x00000008 -#define AR_MCI_INTERRUPT_RX_INVALID_HDR_S 3 -#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL 0x00000010 -#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL_S 4 -#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL 0x00000020 -#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL_S 5 -#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL 0x00000080 -#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL_S 7 -#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL 0x00000100 -#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL_S 8 -#define AR_MCI_INTERRUPT_RX_MSG 0x00000200 -#define AR_MCI_INTERRUPT_RX_MSG_S 9 -#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE 0x00000400 -#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE_S 10 -#define AR_MCI_INTERRUPT_BT_PRI 0x07fff800 -#define AR_MCI_INTERRUPT_BT_PRI_S 11 -#define AR_MCI_INTERRUPT_BT_PRI_THRESH 0x08000000 -#define AR_MCI_INTERRUPT_BT_PRI_THRESH_S 27 -#define AR_MCI_INTERRUPT_BT_FREQ 0x10000000 -#define AR_MCI_INTERRUPT_BT_FREQ_S 28 -#define AR_MCI_INTERRUPT_BT_STOMP 0x20000000 -#define AR_MCI_INTERRUPT_BT_STOMP_S 29 -#define AR_MCI_INTERRUPT_BB_AIC_IRQ 0x40000000 -#define AR_MCI_INTERRUPT_BB_AIC_IRQ_S 30 -#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT 0x80000000 -#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT_S 31 - -#define AR_MCI_INTERRUPT_DEFAULT (AR_MCI_INTERRUPT_SW_MSG_DONE | \ - AR_MCI_INTERRUPT_RX_INVALID_HDR | \ - AR_MCI_INTERRUPT_RX_HW_MSG_FAIL | \ - AR_MCI_INTERRUPT_RX_SW_MSG_FAIL | \ - AR_MCI_INTERRUPT_TX_HW_MSG_FAIL | \ - AR_MCI_INTERRUPT_TX_SW_MSG_FAIL | \ - AR_MCI_INTERRUPT_RX_MSG | \ - AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE | \ - AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT) - -#define AR_MCI_INTERRUPT_MSG_FAIL_MASK (AR_MCI_INTERRUPT_RX_HW_MSG_FAIL | \ - AR_MCI_INTERRUPT_RX_SW_MSG_FAIL | \ - AR_MCI_INTERRUPT_TX_HW_MSG_FAIL | \ - AR_MCI_INTERRUPT_TX_SW_MSG_FAIL) - -#define AR_MCI_REMOTE_CPU_INT 0x1830 -#define AR_MCI_REMOTE_CPU_INT_EN 0x1834 -#define AR_MCI_INTERRUPT_RX_MSG_RAW 0x1838 -#define AR_MCI_INTERRUPT_RX_MSG_EN 0x183c -#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET 0x00000001 -#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET_S 0 -#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL 0x00000002 -#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL_S 1 -#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK 0x00000004 -#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK_S 2 -#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO 0x00000008 -#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO_S 3 -#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST 0x00000010 -#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST_S 4 -#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO 0x00000020 -#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO_S 5 -#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT 0x00000040 -#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT_S 6 -#define AR_MCI_INTERRUPT_RX_MSG_GPM 0x00000100 -#define AR_MCI_INTERRUPT_RX_MSG_GPM_S 8 -#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO 0x00000200 -#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO_S 9 -#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING 0x00000400 -#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING_S 10 -#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING 0x00000800 -#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING_S 11 -#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE 0x00001000 -#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE_S 12 -#define AR_MCI_INTERRUPT_RX_HW_MSG_MASK (AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO | \ +#define AR_MCI_INTERRUPT_RX_MSG_EN 0x183c +#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET 0x00000001 +#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET_S 0 +#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL 0x00000002 +#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL_S 1 +#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK 0x00000004 +#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK_S 2 +#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO 0x00000008 +#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO_S 3 +#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST 0x00000010 +#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST_S 4 +#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO 0x00000020 +#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO_S 5 +#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT 0x00000040 +#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT_S 6 +#define AR_MCI_INTERRUPT_RX_MSG_GPM 0x00000100 +#define AR_MCI_INTERRUPT_RX_MSG_GPM_S 8 +#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO 0x00000200 +#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO_S 9 +#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING 0x00000400 +#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING_S 10 +#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING 0x00000800 +#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING_S 11 +#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE 0x00001000 +#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE_S 12 +#define AR_MCI_INTERRUPT_RX_HW_MSG_MASK (AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO | \ AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL| \ AR_MCI_INTERRUPT_RX_MSG_LNA_INFO | \ AR_MCI_INTERRUPT_RX_MSG_CONT_NACK | \ AR_MCI_INTERRUPT_RX_MSG_CONT_INFO | \ AR_MCI_INTERRUPT_RX_MSG_CONT_RST) -#define AR_MCI_INTERRUPT_RX_MSG_DEFAULT (AR_MCI_INTERRUPT_RX_MSG_GPM | \ - AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET| \ - AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING | \ - AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING| \ - AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO | \ - AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL | \ - AR_MCI_INTERRUPT_RX_MSG_LNA_INFO | \ - AR_MCI_INTERRUPT_RX_MSG_CONT_NACK | \ - AR_MCI_INTERRUPT_RX_MSG_CONT_INFO | \ - AR_MCI_INTERRUPT_RX_MSG_CONT_RST | \ - AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE) - -#define AR_MCI_CPU_INT 0x1840 - -#define AR_MCI_RX_STATUS 0x1844 -#define AR_MCI_RX_LAST_SCHD_MSG_INDEX 0x00000F00 -#define AR_MCI_RX_LAST_SCHD_MSG_INDEX_S 8 -#define AR_MCI_RX_REMOTE_SLEEP 0x00001000 -#define AR_MCI_RX_REMOTE_SLEEP_S 12 -#define AR_MCI_RX_MCI_CLK_REQ 0x00002000 -#define AR_MCI_RX_MCI_CLK_REQ_S 13 - -#define AR_MCI_CONT_STATUS 0x1848 -#define AR_MCI_CONT_RSSI_POWER 0x000000FF -#define AR_MCI_CONT_RSSI_POWER_S 0 -#define AR_MCI_CONT_RRIORITY 0x0000FF00 -#define AR_MCI_CONT_RRIORITY_S 8 -#define AR_MCI_CONT_TXRX 0x00010000 -#define AR_MCI_CONT_TXRX_S 16 - -#define AR_MCI_BT_PRI0 0x184c -#define AR_MCI_BT_PRI1 0x1850 -#define AR_MCI_BT_PRI2 0x1854 -#define AR_MCI_BT_PRI3 0x1858 -#define AR_MCI_BT_PRI 0x185c -#define AR_MCI_WL_FREQ0 0x1860 -#define AR_MCI_WL_FREQ1 0x1864 -#define AR_MCI_WL_FREQ2 0x1868 -#define AR_MCI_GAIN 0x186c -#define AR_MCI_WBTIMER1 0x1870 -#define AR_MCI_WBTIMER2 0x1874 -#define AR_MCI_WBTIMER3 0x1878 -#define AR_MCI_WBTIMER4 0x187c -#define AR_MCI_MAXGAIN 0x1880 -#define AR_MCI_HW_SCHD_TBL_CTL 0x1884 -#define AR_MCI_HW_SCHD_TBL_D0 0x1888 -#define AR_MCI_HW_SCHD_TBL_D1 0x188c -#define AR_MCI_HW_SCHD_TBL_D2 0x1890 -#define AR_MCI_HW_SCHD_TBL_D3 0x1894 -#define AR_MCI_TX_PAYLOAD0 0x1898 -#define AR_MCI_TX_PAYLOAD1 0x189c -#define AR_MCI_TX_PAYLOAD2 0x18a0 -#define AR_MCI_TX_PAYLOAD3 0x18a4 -#define AR_BTCOEX_WBTIMER 0x18a8 - -#define AR_BTCOEX_CTRL 0x18ac -#define AR_BTCOEX_CTRL_AR9462_MODE 0x00000001 -#define AR_BTCOEX_CTRL_AR9462_MODE_S 0 -#define AR_BTCOEX_CTRL_WBTIMER_EN 0x00000002 -#define AR_BTCOEX_CTRL_WBTIMER_EN_S 1 -#define AR_BTCOEX_CTRL_MCI_MODE_EN 0x00000004 -#define AR_BTCOEX_CTRL_MCI_MODE_EN_S 2 -#define AR_BTCOEX_CTRL_LNA_SHARED 0x00000008 -#define AR_BTCOEX_CTRL_LNA_SHARED_S 3 -#define AR_BTCOEX_CTRL_PA_SHARED 0x00000010 -#define AR_BTCOEX_CTRL_PA_SHARED_S 4 -#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN 0x00000020 -#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN_S 5 -#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN 0x00000040 -#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN_S 6 -#define AR_BTCOEX_CTRL_NUM_ANTENNAS 0x00000180 -#define AR_BTCOEX_CTRL_NUM_ANTENNAS_S 7 -#define AR_BTCOEX_CTRL_RX_CHAIN_MASK 0x00000E00 -#define AR_BTCOEX_CTRL_RX_CHAIN_MASK_S 9 -#define AR_BTCOEX_CTRL_AGGR_THRESH 0x00007000 -#define AR_BTCOEX_CTRL_AGGR_THRESH_S 12 -#define AR_BTCOEX_CTRL_1_CHAIN_BCN 0x00080000 -#define AR_BTCOEX_CTRL_1_CHAIN_BCN_S 19 -#define AR_BTCOEX_CTRL_1_CHAIN_ACK 0x00100000 -#define AR_BTCOEX_CTRL_1_CHAIN_ACK_S 20 -#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN 0x1FE00000 -#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN_S 28 -#define AR_BTCOEX_CTRL_REDUCE_TXPWR 0x20000000 -#define AR_BTCOEX_CTRL_REDUCE_TXPWR_S 29 -#define AR_BTCOEX_CTRL_SPDT_ENABLE_10 0x40000000 -#define AR_BTCOEX_CTRL_SPDT_ENABLE_10_S 30 -#define AR_BTCOEX_CTRL_SPDT_POLARITY 0x80000000 -#define AR_BTCOEX_CTRL_SPDT_POLARITY_S 31 - -#define AR_BTCOEX_WL_WEIGHTS0 0x18b0 -#define AR_BTCOEX_WL_WEIGHTS1 0x18b4 -#define AR_BTCOEX_WL_WEIGHTS2 0x18b8 -#define AR_BTCOEX_WL_WEIGHTS3 0x18bc -#define AR_BTCOEX_MAX_TXPWR(_x) (0x18c0 + ((_x) << 2)) -#define AR_BTCOEX_WL_LNA 0x1940 -#define AR_BTCOEX_RFGAIN_CTRL 0x1944 - -#define AR_BTCOEX_CTRL2 0x1948 -#define AR_BTCOEX_CTRL2_TXPWR_THRESH 0x0007F800 -#define AR_BTCOEX_CTRL2_TXPWR_THRESH_S 11 -#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK 0x00380000 -#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK_S 19 -#define AR_BTCOEX_CTRL2_RX_DEWEIGHT 0x00400000 -#define AR_BTCOEX_CTRL2_RX_DEWEIGHT_S 22 -#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL 0x00800000 -#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL_S 23 -#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL 0x01000000 -#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL_S 24 -#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE 0x02000000 -#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE_S 25 - -#define AR_BTCOEX_CTRL_SPDT_ENABLE 0x00000001 -#define AR_BTCOEX_CTRL_SPDT_ENABLE_S 0 -#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL 0x00000002 -#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL_S 1 -#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT 0x00000004 -#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT_S 2 -#define AR_GLB_WLAN_UART_INTF_EN 0x00020000 -#define AR_GLB_WLAN_UART_INTF_EN_S 17 -#define AR_GLB_DS_JTAG_DISABLE 0x00040000 -#define AR_GLB_DS_JTAG_DISABLE_S 18 - -#define AR_BTCOEX_RC 0x194c -#define AR_BTCOEX_MAX_RFGAIN(_x) (0x1950 + ((_x) << 2)) -#define AR_BTCOEX_DBG 0x1a50 -#define AR_MCI_LAST_HW_MSG_HDR 0x1a54 -#define AR_MCI_LAST_HW_MSG_BDY 0x1a58 - -#define AR_MCI_SCHD_TABLE_2 0x1a5c -#define AR_MCI_SCHD_TABLE_2_MEM_BASED 0x00000001 -#define AR_MCI_SCHD_TABLE_2_MEM_BASED_S 0 -#define AR_MCI_SCHD_TABLE_2_HW_BASED 0x00000002 -#define AR_MCI_SCHD_TABLE_2_HW_BASED_S 1 - -#define AR_BTCOEX_CTRL3 0x1a60 -#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT 0x00000fff -#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT_S 0 - #endif diff --git a/trunk/drivers/net/wireless/ath/ath9k/wmi.c b/trunk/drivers/net/wireless/ath/ath9k/wmi.c index 65c8894c5f81..35422fc1f2ce 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/wmi.c +++ b/trunk/drivers/net/wireless/ath/ath9k/wmi.c @@ -187,7 +187,7 @@ void ath9k_fatal_work(struct work_struct *work) fatal_work); struct ath_common *common = ath9k_hw_common(priv->ah); - ath_dbg(common, FATAL, "FATAL Event received, resetting device\n"); + ath_dbg(common, ATH_DBG_FATAL, "FATAL Event received, resetting device\n"); ath9k_htc_reset(priv); } @@ -330,7 +330,8 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, time_left = wait_for_completion_timeout(&wmi->cmd_wait, timeout); if (!time_left) { - ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n", + ath_dbg(common, ATH_DBG_WMI, + "Timeout waiting for WMI command: %s\n", wmi_cmd_to_name(cmd_id)); mutex_unlock(&wmi->op_mutex); return -ETIMEDOUT; @@ -341,7 +342,8 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, return 0; out: - ath_dbg(common, WMI, "WMI failure for: %s\n", wmi_cmd_to_name(cmd_id)); + ath_dbg(common, ATH_DBG_WMI, + "WMI failure for: %s\n", wmi_cmd_to_name(cmd_id)); mutex_unlock(&wmi->op_mutex); kfree_skb(skb); diff --git a/trunk/drivers/net/wireless/ath/ath9k/xmit.c b/trunk/drivers/net/wireless/ath/ath9k/xmit.c index 3182408ffe35..03b0a651a591 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/xmit.c +++ b/trunk/drivers/net/wireless/ath/ath9k/xmit.c @@ -53,7 +53,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, int tx_flags, struct ath_txq *txq); static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, struct ath_txq *txq, struct list_head *bf_q, - struct ath_tx_status *ts, int txok); + struct ath_tx_status *ts, int txok, int sendbar); static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, struct list_head *head, bool internal); static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, @@ -104,32 +104,6 @@ static int ath_max_4ms_framelen[4][32] = { /* Aggregation logic */ /*********************/ -static void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq) - __acquires(&txq->axq_lock) -{ - spin_lock_bh(&txq->axq_lock); -} - -static void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq) - __releases(&txq->axq_lock) -{ - spin_unlock_bh(&txq->axq_lock); -} - -static void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq) - __releases(&txq->axq_lock) -{ - struct sk_buff_head q; - struct sk_buff *skb; - - __skb_queue_head_init(&q); - skb_queue_splice_init(&txq->complete_q, &q); - spin_unlock_bh(&txq->axq_lock); - - while ((skb = __skb_dequeue(&q))) - ieee80211_tx_status(sc->hw, skb); -} - static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid) { struct ath_atx_ac *ac = tid->ac; @@ -156,7 +130,7 @@ static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid) WARN_ON(!tid->paused); - ath_txq_lock(sc, txq); + spin_lock_bh(&txq->axq_lock); tid->paused = false; if (skb_queue_empty(&tid->buf_q)) @@ -165,7 +139,7 @@ static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid) ath_tx_queue_tid(txq, tid); ath_txq_schedule(sc, txq); unlock: - ath_txq_unlock_complete(sc, txq); + spin_unlock_bh(&txq->axq_lock); } static struct ath_frame_info *get_frame_info(struct sk_buff *skb) @@ -176,12 +150,6 @@ static struct ath_frame_info *get_frame_info(struct sk_buff *skb) return (struct ath_frame_info *) &tx_info->rate_driver_data[0]; } -static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno) -{ - ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno, - seqno << IEEE80211_SEQ_SEQ_SHIFT); -} - static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) { struct ath_txq *txq = tid->ac->txq; @@ -190,36 +158,28 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) struct list_head bf_head; struct ath_tx_status ts; struct ath_frame_info *fi; - bool sendbar = false; INIT_LIST_HEAD(&bf_head); memset(&ts, 0, sizeof(ts)); + spin_lock_bh(&txq->axq_lock); while ((skb = __skb_dequeue(&tid->buf_q))) { fi = get_frame_info(skb); bf = fi->bf; + spin_unlock_bh(&txq->axq_lock); if (bf && fi->retries) { list_add_tail(&bf->list, &bf_head); ath_tx_update_baw(sc, tid, bf->bf_state.seqno); - ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); - sendbar = true; + ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1); } else { ath_tx_send_normal(sc, txq, NULL, skb); } + spin_lock_bh(&txq->axq_lock); } - if (tid->baw_head == tid->baw_tail) { - tid->state &= ~AGGR_ADDBA_COMPLETE; - tid->state &= ~AGGR_CLEANUP; - } - - if (sendbar) { - ath_txq_unlock(sc, txq); - ath_send_bar(tid, tid->seq_start); - ath_txq_lock(sc, txq); - } + spin_unlock_bh(&txq->axq_lock); } static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, @@ -235,8 +195,6 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) { INCR(tid->seq_start, IEEE80211_SEQ_MAX); INCR(tid->baw_head, ATH_TID_MAX_BUFS); - if (tid->bar_index >= 0) - tid->bar_index--; } } @@ -280,7 +238,9 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq, bf = fi->bf; if (!bf) { + spin_unlock(&txq->axq_lock); ath_tx_complete(sc, skb, ATH_TX_ERROR, txq); + spin_lock(&txq->axq_lock); continue; } @@ -289,26 +249,24 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq, if (fi->retries) ath_tx_update_baw(sc, tid, bf->bf_state.seqno); - ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); + spin_unlock(&txq->axq_lock); + ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0); + spin_lock(&txq->axq_lock); } tid->seq_next = tid->seq_start; tid->baw_tail = tid->baw_head; - tid->bar_index = -1; } static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq, - struct sk_buff *skb, int count) + struct sk_buff *skb) { struct ath_frame_info *fi = get_frame_info(skb); struct ath_buf *bf = fi->bf; struct ieee80211_hdr *hdr; - int prev = fi->retries; TX_STAT_INC(txq->axq_qnum, a_retries); - fi->retries += count; - - if (prev > 0) + if (fi->retries++ > 0) return; hdr = (struct ieee80211_hdr *)skb->data; @@ -407,7 +365,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf_next, *bf_last = bf->bf_lastbf; struct list_head bf_head; struct sk_buff_head bf_pending; - u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first; + u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0; u32 ba[WME_BA_BMP_SIZE >> 5]; int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0; bool rc_update = true; @@ -416,8 +374,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, int nframes; u8 tidno; bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH); - int i, retries; - int bar_index = -1; skb = bf->bf_mpdu; hdr = (struct ieee80211_hdr *)skb->data; @@ -426,10 +382,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, memcpy(rates, tx_info->control.rates, sizeof(rates)); - retries = ts->ts_longretry + 1; - for (i = 0; i < ts->ts_rateindex; i++) - retries += rates[i].count; - rcu_read_lock(); sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2); @@ -443,7 +395,8 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, if (!bf->bf_stale || bf_next != NULL) list_move_tail(&bf->list, &bf_head); - ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0); + ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, + 0, 0); bf = bf_next; } @@ -453,7 +406,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, an = (struct ath_node *)sta->drv_priv; tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK; tid = ATH_AN_2_TID(an, tidno); - seq_first = tid->seq_start; /* * The hardware occasionally sends a tx status for the wrong TID. @@ -503,25 +455,25 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, } else if (!isaggr && txok) { /* transmit completion */ acked_cnt++; - } else if ((tid->state & AGGR_CLEANUP) || !retry) { - /* - * cleanup in progress, just fail - * the un-acked sub-frames - */ - txfail = 1; - } else if (flush) { - txpending = 1; - } else if (fi->retries < ATH_MAX_SW_RETRIES) { - if (txok || !an->sleeping) - ath_tx_set_retry(sc, txq, bf->bf_mpdu, - retries); - - txpending = 1; } else { - txfail = 1; - txfail_cnt++; - bar_index = max_t(int, bar_index, - ATH_BA_INDEX(seq_first, seqno)); + if ((tid->state & AGGR_CLEANUP) || !retry) { + /* + * cleanup in progress, just fail + * the un-acked sub-frames + */ + txfail = 1; + } else if (flush) { + txpending = 1; + } else if (fi->retries < ATH_MAX_SW_RETRIES) { + if (txok || !an->sleeping) + ath_tx_set_retry(sc, txq, bf->bf_mpdu); + + txpending = 1; + } else { + txfail = 1; + sendbar = 1; + txfail_cnt++; + } } /* @@ -538,7 +490,9 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, * complete the acked-ones/xretried ones; update * block-ack window */ + spin_lock_bh(&txq->axq_lock); ath_tx_update_baw(sc, tid, seqno); + spin_unlock_bh(&txq->axq_lock); if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) { memcpy(tx_info->control.rates, rates, sizeof(rates)); @@ -547,30 +501,33 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, } ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, - !txfail); + !txfail, sendbar); } else { /* retry the un-acked ones */ - if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) && - bf->bf_next == NULL && bf_last->bf_stale) { - struct ath_buf *tbf; - - tbf = ath_clone_txbuf(sc, bf_last); - /* - * Update tx baw and complete the - * frame with failed status if we - * run out of tx buf. - */ - if (!tbf) { - ath_tx_update_baw(sc, tid, seqno); - - ath_tx_complete_buf(sc, bf, txq, - &bf_head, ts, 0); - bar_index = max_t(int, bar_index, - ATH_BA_INDEX(seq_first, seqno)); - break; + if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) { + if (bf->bf_next == NULL && bf_last->bf_stale) { + struct ath_buf *tbf; + + tbf = ath_clone_txbuf(sc, bf_last); + /* + * Update tx baw and complete the + * frame with failed status if we + * run out of tx buf. + */ + if (!tbf) { + spin_lock_bh(&txq->axq_lock); + ath_tx_update_baw(sc, tid, seqno); + spin_unlock_bh(&txq->axq_lock); + + ath_tx_complete_buf(sc, bf, txq, + &bf_head, + ts, 0, + !flush); + break; + } + + fi->bf = tbf; } - - fi->bf = tbf; } /* @@ -588,6 +545,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, if (an->sleeping) ieee80211_sta_set_buffered(sta, tid->tidno, true); + spin_lock_bh(&txq->axq_lock); skb_queue_splice(&bf_pending, &tid->buf_q); if (!an->sleeping) { ath_tx_queue_tid(txq, tid); @@ -595,22 +553,18 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, if (ts->ts_status & ATH9K_TXERR_FILT) tid->ac->clear_ps_filter = true; } + spin_unlock_bh(&txq->axq_lock); } - if (bar_index >= 0) { - u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index); - - if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq)) - tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq); + if (tid->state & AGGR_CLEANUP) { + ath_tx_flush_tid(sc, tid); - ath_txq_unlock(sc, txq); - ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1)); - ath_txq_lock(sc, txq); + if (tid->baw_head == tid->baw_tail) { + tid->state &= ~AGGR_ADDBA_COMPLETE; + tid->state &= ~AGGR_CLEANUP; + } } - if (tid->state & AGGR_CLEANUP) - ath_tx_flush_tid(sc, tid); - rcu_read_unlock(); if (needreset) { @@ -647,7 +601,6 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf, struct sk_buff *skb; struct ieee80211_tx_info *tx_info; struct ieee80211_tx_rate *rates; - struct ath_mci_profile *mci = &sc->btcoex.mci; u32 max_4ms_framelen, frmlen; u16 aggr_limit, legacy = 0; int i; @@ -664,26 +617,24 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf, max_4ms_framelen = ATH_AMPDU_LIMIT_MAX; for (i = 0; i < 4; i++) { - int modeidx; - - if (!rates[i].count) - continue; - - if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) { - legacy = 1; - break; - } + if (rates[i].count) { + int modeidx; + if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) { + legacy = 1; + break; + } - if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) - modeidx = MCS_HT40; - else - modeidx = MCS_HT20; + if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + modeidx = MCS_HT40; + else + modeidx = MCS_HT20; - if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI) - modeidx++; + if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI) + modeidx++; - frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx]; - max_4ms_framelen = min(max_4ms_framelen, frmlen); + frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx]; + max_4ms_framelen = min(max_4ms_framelen, frmlen); + } } /* @@ -694,9 +645,7 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf, if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy) return 0; - if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) && mci->aggr_limit) - aggr_limit = (max_4ms_framelen * mci->aggr_limit) >> 4; - else if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED) + if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED) aggr_limit = min((max_4ms_framelen * 3) / 8, (u32)ATH_AMPDU_LIMIT_MAX); else @@ -819,6 +768,8 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR; seqno = bf->bf_state.seqno; + if (!bf_first) + bf_first = bf; /* do not step over block-ack window */ if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) { @@ -826,21 +777,6 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, break; } - if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) { - struct ath_tx_status ts = {}; - struct list_head bf_head; - - INIT_LIST_HEAD(&bf_head); - list_add(&bf->list, &bf_head); - __skb_unlink(skb, &tid->buf_q); - ath_tx_update_baw(sc, tid, seqno); - ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); - continue; - } - - if (!bf_first) - bf_first = bf; - if (!rl) { aggr_limit = ath_lookup_rate(sc, bf, tid); rl = 1; @@ -1183,7 +1119,6 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, txtid->state |= AGGR_ADDBA_PROGRESS; txtid->paused = true; *ssn = txtid->seq_start = txtid->seq_next; - txtid->bar_index = -1; memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf)); txtid->baw_head = txtid->baw_tail = 0; @@ -1205,7 +1140,7 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) return; } - ath_txq_lock(sc, txq); + spin_lock_bh(&txq->axq_lock); txtid->paused = true; /* @@ -1218,9 +1153,9 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) txtid->state |= AGGR_CLEANUP; else txtid->state &= ~AGGR_ADDBA_COMPLETE; + spin_unlock_bh(&txq->axq_lock); ath_tx_flush_tid(sc, txtid); - ath_txq_unlock_complete(sc, txq); } void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc, @@ -1241,7 +1176,7 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc, ac = tid->ac; txq = ac->txq; - ath_txq_lock(sc, txq); + spin_lock_bh(&txq->axq_lock); buffered = !skb_queue_empty(&tid->buf_q); @@ -1253,7 +1188,7 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc, list_del(&ac->list); } - ath_txq_unlock(sc, txq); + spin_unlock_bh(&txq->axq_lock); ieee80211_sta_set_buffered(sta, tidno, buffered); } @@ -1272,7 +1207,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an) ac = tid->ac; txq = ac->txq; - ath_txq_lock(sc, txq); + spin_lock_bh(&txq->axq_lock); ac->clear_ps_filter = true; if (!skb_queue_empty(&tid->buf_q) && !tid->paused) { @@ -1280,7 +1215,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an) ath_txq_schedule(sc, txq); } - ath_txq_unlock_complete(sc, txq); + spin_unlock_bh(&txq->axq_lock); } } @@ -1380,7 +1315,6 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) txq->axq_qnum = axq_qnum; txq->mac80211_qnum = -1; txq->axq_link = NULL; - __skb_queue_head_init(&txq->complete_q); INIT_LIST_HEAD(&txq->axq_q); INIT_LIST_HEAD(&txq->axq_acq); spin_lock_init(&txq->axq_lock); @@ -1463,6 +1397,8 @@ static bool bf_is_ampdu_not_probing(struct ath_buf *bf) static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq, struct list_head *list, bool retry_tx) + __releases(txq->axq_lock) + __acquires(txq->axq_lock) { struct ath_buf *bf, *lastbf; struct list_head bf_head; @@ -1489,11 +1425,13 @@ static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq, if (bf_is_ampdu_not_probing(bf)) txq->axq_ampdu_depth--; + spin_unlock_bh(&txq->axq_lock); if (bf_isampdu(bf)) ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0, retry_tx); else - ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); + ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0); + spin_lock_bh(&txq->axq_lock); } } @@ -1505,8 +1443,7 @@ static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq, */ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx) { - ath_txq_lock(sc, txq); - + spin_lock_bh(&txq->axq_lock); if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { int idx = txq->txq_tailidx; @@ -1527,7 +1464,7 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx) if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx) ath_txq_drain_pending_buffers(sc, txq); - ath_txq_unlock_complete(sc, txq); + spin_unlock_bh(&txq->axq_lock); } bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx) @@ -1621,9 +1558,11 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) break; } - if (!list_empty(&ac->tid_q) && !ac->sched) { - ac->sched = true; - list_add_tail(&ac->list, &txq->axq_acq); + if (!list_empty(&ac->tid_q)) { + if (!ac->sched) { + ac->sched = true; + list_add_tail(&ac->list, &txq->axq_acq); + } } if (ac == last_ac || @@ -1661,8 +1600,8 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, bf = list_first_entry(head, struct ath_buf, list); bf_last = list_entry(head->prev, struct ath_buf, list); - ath_dbg(common, QUEUE, "qnum: %d, txq depth: %d\n", - txq->axq_qnum, txq->axq_depth); + ath_dbg(common, ATH_DBG_QUEUE, + "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth); if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) { list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]); @@ -1673,7 +1612,8 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, if (txq->axq_link) { ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr); - ath_dbg(common, XMIT, "link[%u] (%p)=%llx (%p)\n", + ath_dbg(common, ATH_DBG_XMIT, + "link[%u] (%p)=%llx (%p)\n", txq->axq_qnum, txq->axq_link, ito64(bf->bf_daddr), bf->bf_desc); } else if (!edma) @@ -1685,7 +1625,7 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, if (puttxbuf) { TX_STAT_INC(txq->axq_qnum, puttxbuf); ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); - ath_dbg(common, XMIT, "TXDP[%u] = %llx (%p)\n", + ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n", txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc); } @@ -1765,6 +1705,10 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, list_add_tail(&bf->list, &bf_head); bf->bf_state.bf_type = 0; + /* update starting sequence number for subsequent ADDBA request */ + if (tid) + INCR(tid->seq_start, IEEE80211_SEQ_MAX); + bf->bf_lastbf = bf; ath_tx_fill_desc(sc, bf, txq, fi->framelen); ath_tx_txqaddbuf(sc, txq, &bf_head, false); @@ -1827,7 +1771,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, bf = ath_tx_get_buffer(sc); if (!bf) { - ath_dbg(common, XMIT, "TX buffers are full\n"); + ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n"); goto error; } @@ -1872,6 +1816,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb, struct ath_buf *bf; u8 tidno; + spin_lock_bh(&txctl->txq->axq_lock); if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an && ieee80211_is_data_qos(hdr->frame_control)) { tidno = ieee80211_get_qos_ctl(hdr)[0] & @@ -1890,7 +1835,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb, } else { bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb); if (!bf) - return; + goto out; bf->bf_state.bfs_paprd = txctl->paprd; @@ -1899,6 +1844,9 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb, ath_tx_send_normal(sc, txctl->txq, tid, skb); } + +out: + spin_unlock_bh(&txctl->txq->axq_lock); } /* Upon failure caller should free skb */ @@ -1959,18 +1907,15 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, */ q = skb_get_queue_mapping(skb); - - ath_txq_lock(sc, txq); + spin_lock_bh(&txq->axq_lock); if (txq == sc->tx.txq_map[q] && ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) { ieee80211_stop_queue(sc->hw, q); - txq->stopped = true; + txq->stopped = 1; } + spin_unlock_bh(&txq->axq_lock); ath_tx_start_dma(sc, skb, txctl); - - ath_txq_unlock(sc, txq); - return 0; } @@ -1981,12 +1926,16 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, int tx_flags, struct ath_txq *txq) { + struct ieee80211_hw *hw = sc->hw; struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data; int q, padpos, padsize; - ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb); + ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb); + + if (tx_flags & ATH_TX_BAR) + tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; if (!(tx_flags & ATH_TX_ERROR)) /* Frame was ACKed */ @@ -2003,9 +1952,9 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, skb_pull(skb, padsize); } - if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) { + if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) { sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK; - ath_dbg(common, PS, + ath_dbg(common, ATH_DBG_PS, "Going back to sleep after having received TX status (0x%lx)\n", sc->ps_flags & (PS_WAIT_FOR_BEACON | PS_WAIT_FOR_CAB | @@ -2015,27 +1964,32 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, q = skb_get_queue_mapping(skb); if (txq == sc->tx.txq_map[q]) { + spin_lock_bh(&txq->axq_lock); if (WARN_ON(--txq->pending_frames < 0)) txq->pending_frames = 0; if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) { ieee80211_wake_queue(sc->hw, q); - txq->stopped = false; + txq->stopped = 0; } + spin_unlock_bh(&txq->axq_lock); } - __skb_queue_tail(&txq->complete_q, skb); + ieee80211_tx_status(hw, skb); } static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, struct ath_txq *txq, struct list_head *bf_q, - struct ath_tx_status *ts, int txok) + struct ath_tx_status *ts, int txok, int sendbar) { struct sk_buff *skb = bf->bf_mpdu; struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); unsigned long flags; int tx_flags = 0; + if (sendbar) + tx_flags = ATH_TX_BAR; + if (!txok) tx_flags |= ATH_TX_ERROR; @@ -2127,6 +2081,8 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, struct ath_tx_status *ts, struct ath_buf *bf, struct list_head *bf_head) + __releases(txq->axq_lock) + __acquires(txq->axq_lock) { int txok; @@ -2136,12 +2092,16 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, if (bf_is_ampdu_not_probing(bf)) txq->axq_ampdu_depth--; + spin_unlock_bh(&txq->axq_lock); + if (!bf_isampdu(bf)) { ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok); - ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok); + ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0); } else ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true); + spin_lock_bh(&txq->axq_lock); + if (sc->sc_flags & SC_OP_TXAGGR) ath_txq_schedule(sc, txq); } @@ -2156,11 +2116,11 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) struct ath_tx_status ts; int status; - ath_dbg(common, QUEUE, "tx queue %d (%x), link %p\n", + ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n", txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum), txq->axq_link); - ath_txq_lock(sc, txq); + spin_lock_bh(&txq->axq_lock); for (;;) { if (work_pending(&sc->hw_reset_work)) break; @@ -2219,7 +2179,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head); } - ath_txq_unlock_complete(sc, txq); + spin_unlock_bh(&txq->axq_lock); } static void ath_tx_complete_poll_work(struct work_struct *work) @@ -2236,21 +2196,21 @@ static void ath_tx_complete_poll_work(struct work_struct *work) for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) if (ATH_TXQ_SETUP(sc, i)) { txq = &sc->tx.txq[i]; - ath_txq_lock(sc, txq); + spin_lock_bh(&txq->axq_lock); if (txq->axq_depth) { if (txq->axq_tx_inprogress) { needreset = true; - ath_txq_unlock(sc, txq); + spin_unlock_bh(&txq->axq_lock); break; } else { txq->axq_tx_inprogress = true; } } - ath_txq_unlock_complete(sc, txq); + spin_unlock_bh(&txq->axq_lock); } if (needreset) { - ath_dbg(ath9k_hw_common(sc->sc_ah), RESET, + ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET, "tx hung, resetting the chip\n"); RESET_STAT_INC(sc, RESET_TYPE_TX_HANG); ieee80211_queue_work(sc->hw, &sc->hw_reset_work); @@ -2293,7 +2253,8 @@ void ath_tx_edma_tasklet(struct ath_softc *sc) if (status == -EINPROGRESS) break; if (status == -EIO) { - ath_dbg(common, XMIT, "Error processing tx status\n"); + ath_dbg(common, ATH_DBG_XMIT, + "Error processing tx status\n"); break; } @@ -2303,10 +2264,10 @@ void ath_tx_edma_tasklet(struct ath_softc *sc) txq = &sc->tx.txq[ts.qid]; - ath_txq_lock(sc, txq); + spin_lock_bh(&txq->axq_lock); if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) { - ath_txq_unlock(sc, txq); + spin_unlock_bh(&txq->axq_lock); return; } @@ -2332,7 +2293,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc) } ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head); - ath_txq_unlock_complete(sc, txq); + spin_unlock_bh(&txq->axq_lock); } } @@ -2470,7 +2431,7 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an) ac = tid->ac; txq = ac->txq; - ath_txq_lock(sc, txq); + spin_lock_bh(&txq->axq_lock); if (tid->sched) { list_del(&tid->list); @@ -2486,6 +2447,6 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an) tid->state &= ~AGGR_ADDBA_COMPLETE; tid->state &= ~AGGR_CLEANUP; - ath_txq_unlock(sc, txq); + spin_unlock_bh(&txq->axq_lock); } } diff --git a/trunk/drivers/net/wireless/ath/carl9170/fw.c b/trunk/drivers/net/wireless/ath/carl9170/fw.c index 3de61adacd34..cba9d0435dc4 100644 --- a/trunk/drivers/net/wireless/ath/carl9170/fw.c +++ b/trunk/drivers/net/wireless/ath/carl9170/fw.c @@ -146,15 +146,13 @@ static bool valid_cpu_addr(const u32 address) return false; } -static int carl9170_fw_checksum(struct ar9170 *ar, const __u8 *data, - size_t len) +static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len) { const struct carl9170fw_otus_desc *otus_desc; - const struct carl9170fw_last_desc *last_desc; const struct carl9170fw_chk_desc *chk_desc; - unsigned long fin, diff; - unsigned int dsc_len; - u32 crc32; + const struct carl9170fw_last_desc *last_desc; + const struct carl9170fw_txsq_desc *txsq_desc; + u16 if_comb_types; last_desc = carl9170_fw_find_desc(ar, LAST_MAGIC, sizeof(*last_desc), CARL9170FW_LAST_DESC_CUR_VER); @@ -172,68 +170,36 @@ static int carl9170_fw_checksum(struct ar9170 *ar, const __u8 *data, chk_desc = carl9170_fw_find_desc(ar, CHK_MAGIC, sizeof(*chk_desc), CARL9170FW_CHK_DESC_CUR_VER); - if (!chk_desc) { - dev_warn(&ar->udev->dev, "Unprotected firmware image.\n"); - return 0; - } + if (chk_desc) { + unsigned long fin, diff; + unsigned int dsc_len; + u32 crc32; - dsc_len = min_t(unsigned int, len, + dsc_len = min_t(unsigned int, len, (unsigned long)chk_desc - (unsigned long)otus_desc); - fin = (unsigned long) last_desc + sizeof(*last_desc); - diff = fin - (unsigned long) otus_desc; - - if (diff < len) - len -= diff; - - if (len < 256) - return -EIO; + fin = (unsigned long) last_desc + sizeof(*last_desc); + diff = fin - (unsigned long) otus_desc; - crc32 = crc32_le(~0, data, len); - if (cpu_to_le32(crc32) != chk_desc->fw_crc32) { - dev_err(&ar->udev->dev, "fw checksum test failed.\n"); - return -ENOEXEC; - } + if (diff < len) + len -= diff; - crc32 = crc32_le(crc32, (void *)otus_desc, dsc_len); - if (cpu_to_le32(crc32) != chk_desc->hdr_crc32) { - dev_err(&ar->udev->dev, "descriptor check failed.\n"); - return -EINVAL; - } - return 0; -} + if (len < 256) + return -EIO; -static int carl9170_fw_tx_sequence(struct ar9170 *ar) -{ - const struct carl9170fw_txsq_desc *txsq_desc; + crc32 = crc32_le(~0, data, len); + if (cpu_to_le32(crc32) != chk_desc->fw_crc32) { + dev_err(&ar->udev->dev, "fw checksum test failed.\n"); + return -ENOEXEC; + } - txsq_desc = carl9170_fw_find_desc(ar, TXSQ_MAGIC, sizeof(*txsq_desc), - CARL9170FW_TXSQ_DESC_CUR_VER); - if (txsq_desc) { - ar->fw.tx_seq_table = le32_to_cpu(txsq_desc->seq_table_addr); - if (!valid_cpu_addr(ar->fw.tx_seq_table)) + crc32 = crc32_le(crc32, (void *)otus_desc, dsc_len); + if (cpu_to_le32(crc32) != chk_desc->hdr_crc32) { + dev_err(&ar->udev->dev, "descriptor check failed.\n"); return -EINVAL; + } } else { - ar->fw.tx_seq_table = 0; - } - - return 0; -} - -static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len) -{ - const struct carl9170fw_otus_desc *otus_desc; - int err; - u16 if_comb_types; - - err = carl9170_fw_checksum(ar, data, len); - if (err) - return err; - - otus_desc = carl9170_fw_find_desc(ar, OTUS_MAGIC, - sizeof(*otus_desc), CARL9170FW_OTUS_DESC_CUR_VER); - if (!otus_desc) { - return -ENODATA; + dev_warn(&ar->udev->dev, "Unprotected firmware image.\n"); } #define SUPP(feat) \ @@ -355,8 +321,19 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len) ar->hw->wiphy->interface_modes |= if_comb_types; + txsq_desc = carl9170_fw_find_desc(ar, TXSQ_MAGIC, + sizeof(*txsq_desc), CARL9170FW_TXSQ_DESC_CUR_VER); + + if (txsq_desc) { + ar->fw.tx_seq_table = le32_to_cpu(txsq_desc->seq_table_addr); + if (!valid_cpu_addr(ar->fw.tx_seq_table)) + return -EINVAL; + } else { + ar->fw.tx_seq_table = 0; + } + #undef SUPPORTED - return carl9170_fw_tx_sequence(ar); + return 0; } static struct carl9170fw_desc_head * diff --git a/trunk/drivers/net/wireless/ath/carl9170/main.c b/trunk/drivers/net/wireless/ath/carl9170/main.c index db774212161b..f06e0695d412 100644 --- a/trunk/drivers/net/wireless/ath/carl9170/main.c +++ b/trunk/drivers/net/wireless/ath/carl9170/main.c @@ -48,7 +48,7 @@ #include "carl9170.h" #include "cmd.h" -static bool modparam_nohwcrypt; +static int modparam_nohwcrypt; module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); MODULE_PARM_DESC(nohwcrypt, "Disable hardware crypto offload."); @@ -446,7 +446,7 @@ static void carl9170_op_stop(struct ieee80211_hw *hw) mutex_lock(&ar->mutex); if (IS_ACCEPTING_CMD(ar)) { - RCU_INIT_POINTER(ar->beacon_iter, NULL); + rcu_assign_pointer(ar->beacon_iter, NULL); carl9170_led_set_state(ar, 0); @@ -678,7 +678,7 @@ static int carl9170_op_add_interface(struct ieee80211_hw *hw, vif_priv->active = false; bitmap_release_region(&ar->vif_bitmap, vif_id, 0); ar->vifs--; - RCU_INIT_POINTER(ar->vif_priv[vif_id].vif, NULL); + rcu_assign_pointer(ar->vif_priv[vif_id].vif, NULL); list_del_rcu(&vif_priv->list); mutex_unlock(&ar->mutex); synchronize_rcu(); @@ -716,7 +716,7 @@ static void carl9170_op_remove_interface(struct ieee80211_hw *hw, WARN_ON(vif_priv->enable_beacon); vif_priv->enable_beacon = false; list_del_rcu(&vif_priv->list); - RCU_INIT_POINTER(ar->vif_priv[id].vif, NULL); + rcu_assign_pointer(ar->vif_priv[id].vif, NULL); if (vif == main_vif) { rcu_read_unlock(); @@ -1258,7 +1258,7 @@ static int carl9170_op_sta_add(struct ieee80211_hw *hw, } for (i = 0; i < CARL9170_NUM_TID; i++) - RCU_INIT_POINTER(sta_info->agg[i], NULL); + rcu_assign_pointer(sta_info->agg[i], NULL); sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor); sta_info->ht_sta = true; @@ -1285,7 +1285,7 @@ static int carl9170_op_sta_remove(struct ieee80211_hw *hw, struct carl9170_sta_tid *tid_info; tid_info = rcu_dereference(sta_info->agg[i]); - RCU_INIT_POINTER(sta_info->agg[i], NULL); + rcu_assign_pointer(sta_info->agg[i], NULL); if (!tid_info) continue; @@ -1398,7 +1398,7 @@ static int carl9170_op_ampdu_action(struct ieee80211_hw *hw, spin_unlock_bh(&ar->tx_ampdu_list_lock); } - RCU_INIT_POINTER(sta_info->agg[tid], NULL); + rcu_assign_pointer(sta_info->agg[tid], NULL); rcu_read_unlock(); ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); diff --git a/trunk/drivers/net/wireless/ath/carl9170/tx.c b/trunk/drivers/net/wireless/ath/carl9170/tx.c index d19a9ee9d057..59472e1605cd 100644 --- a/trunk/drivers/net/wireless/ath/carl9170/tx.c +++ b/trunk/drivers/net/wireless/ath/carl9170/tx.c @@ -314,7 +314,7 @@ static void carl9170_tx_release(struct kref *ref) * feedback either [CTL_REQ_TX_STATUS not set] */ - ieee80211_free_txskb(ar->hw, skb); + dev_kfree_skb_any(skb); return; } else { /* @@ -1432,7 +1432,7 @@ void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) err_free: ar->tx_dropped++; - ieee80211_free_txskb(ar->hw, skb); + dev_kfree_skb_any(skb); } void carl9170_tx_scheduler(struct ar9170 *ar) diff --git a/trunk/drivers/net/wireless/ath/key.c b/trunk/drivers/net/wireless/ath/key.c index 0e81904956cf..4cf7c5eb4813 100644 --- a/trunk/drivers/net/wireless/ath/key.c +++ b/trunk/drivers/net/wireless/ath/key.c @@ -143,7 +143,7 @@ static bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry, break; case ATH_CIPHER_AES_CCM: if (!(common->crypt_caps & ATH_CRYPT_CAP_CIPHER_AESCCM)) { - ath_dbg(common, ANY, + ath_dbg(common, ATH_DBG_ANY, "AES-CCM not supported by this mac rev\n"); return false; } @@ -152,15 +152,15 @@ static bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry, case ATH_CIPHER_TKIP: keyType = AR_KEYTABLE_TYPE_TKIP; if (entry + 64 >= common->keymax) { - ath_dbg(common, ANY, + ath_dbg(common, ATH_DBG_ANY, "entry %u inappropriate for TKIP\n", entry); return false; } break; case ATH_CIPHER_WEP: if (k->kv_len < WLAN_KEY_LEN_WEP40) { - ath_dbg(common, ANY, "WEP key length %u too small\n", - k->kv_len); + ath_dbg(common, ATH_DBG_ANY, + "WEP key length %u too small\n", k->kv_len); return false; } if (k->kv_len <= WLAN_KEY_LEN_WEP40) diff --git a/trunk/drivers/net/wireless/ath/regd.c b/trunk/drivers/net/wireless/ath/regd.c index 10dea37431b3..65ecb5bab25a 100644 --- a/trunk/drivers/net/wireless/ath/regd.c +++ b/trunk/drivers/net/wireless/ath/regd.c @@ -21,8 +21,6 @@ #include "regd.h" #include "regd_common.h" -static int __ath_regd_init(struct ath_regulatory *reg); - /* * This is a set of common rules used by our world regulatory domains. * We have 12 world regulatory domains. To save space we consolidate @@ -349,26 +347,10 @@ static void ath_reg_apply_world_flags(struct wiphy *wiphy, } } -static u16 ath_regd_find_country_by_name(char *alpha2) -{ - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(allCountries); i++) { - if (!memcmp(allCountries[i].isoName, alpha2, 2)) - return allCountries[i].countryCode; - } - - return -1; -} - int ath_reg_notifier_apply(struct wiphy *wiphy, struct regulatory_request *request, struct ath_regulatory *reg) { - struct ath_common *common = container_of(reg, struct ath_common, - regulatory); - u16 country_code; - /* We always apply this */ ath_reg_apply_radar_flags(wiphy); @@ -381,37 +363,14 @@ int ath_reg_notifier_apply(struct wiphy *wiphy, return 0; switch (request->initiator) { - case NL80211_REGDOM_SET_BY_CORE: - /* - * If common->reg_world_copy is world roaming it means we *were* - * world roaming... so we now have to restore that data. - */ - if (!ath_is_world_regd(&common->reg_world_copy)) - break; - - memcpy(reg, &common->reg_world_copy, - sizeof(struct ath_regulatory)); - break; case NL80211_REGDOM_SET_BY_DRIVER: + case NL80211_REGDOM_SET_BY_CORE: case NL80211_REGDOM_SET_BY_USER: break; case NL80211_REGDOM_SET_BY_COUNTRY_IE: - if (!ath_is_world_regd(reg)) - break; - - country_code = ath_regd_find_country_by_name(request->alpha2); - if (country_code == (u16) -1) - break; - - reg->current_rd = COUNTRY_ERD_FLAG; - reg->current_rd |= country_code; - - printk(KERN_DEBUG "ath: regdomain 0x%0x updated by CountryIE\n", - reg->current_rd); - __ath_regd_init(reg); - - ath_reg_apply_world_flags(wiphy, request->initiator, reg); - + if (ath_is_world_regd(reg)) + ath_reg_apply_world_flags(wiphy, request->initiator, + reg); break; } @@ -549,7 +508,11 @@ static void ath_regd_sanitize(struct ath_regulatory *reg) reg->current_rd = 0x64; } -static int __ath_regd_init(struct ath_regulatory *reg) +int +ath_regd_init(struct ath_regulatory *reg, + struct wiphy *wiphy, + int (*reg_notifier)(struct wiphy *wiphy, + struct regulatory_request *request)) { struct country_code_to_enum_rd *country = NULL; u16 regdmn; @@ -620,29 +583,7 @@ static int __ath_regd_init(struct ath_regulatory *reg) printk(KERN_DEBUG "ath: Regpair used: 0x%0x\n", reg->regpair->regDmnEnum); - return 0; -} - -int -ath_regd_init(struct ath_regulatory *reg, - struct wiphy *wiphy, - int (*reg_notifier)(struct wiphy *wiphy, - struct regulatory_request *request)) -{ - struct ath_common *common = container_of(reg, struct ath_common, - regulatory); - int r; - - r = __ath_regd_init(reg); - if (r) - return r; - - if (ath_is_world_regd(reg)) - memcpy(&common->reg_world_copy, reg, - sizeof(struct ath_regulatory)); - ath_regd_init_wiphy(reg, wiphy, reg_notifier); - return 0; } EXPORT_SYMBOL(ath_regd_init); diff --git a/trunk/drivers/net/wireless/b43/b43.h b/trunk/drivers/net/wireless/b43/b43.h index 16e8f8058155..37110dfd2c96 100644 --- a/trunk/drivers/net/wireless/b43/b43.h +++ b/trunk/drivers/net/wireless/b43/b43.h @@ -191,9 +191,6 @@ #define B43_BFH_BUCKBOOST 0x0020 /* has buck/booster */ #define B43_BFH_FEM_BT 0x0040 /* has FEM and switch to share antenna * with bluetooth */ -#define B43_BFH_NOCBUCK 0x0080 -#define B43_BFH_PALDO 0x0200 -#define B43_BFH_EXTLNA_5GHZ 0x1000 /* has an external LNA (5GHz mode) */ /* SPROM boardflags2_lo values */ #define B43_BFL2_RXBB_INT_REG_DIS 0x0001 /* external RX BB regulator present */ @@ -207,14 +204,6 @@ #define B43_BFL2_SKWRKFEM_BRD 0x0100 /* 4321mcm93 uses Skyworks FEM */ #define B43_BFL2_SPUR_WAR 0x0200 /* has a workaround for clock-harmonic spurs */ #define B43_BFL2_GPLL_WAR 0x0400 /* altenative G-band PLL settings implemented */ -#define B43_BFL2_SINGLEANT_CCK 0x1000 -#define B43_BFL2_2G_SPUR_WAR 0x2000 - -/* SPROM boardflags2_hi values */ -#define B43_BFH2_GPLL_WAR2 0x0001 -#define B43_BFH2_IPALVLSHIFT_3P3 0x0002 -#define B43_BFH2_INTERNDET_TXIQCAL 0x0004 -#define B43_BFH2_XTALBUFOUTEN 0x0008 /* GPIO register offset, in both ChipCommon and PCI core. */ #define B43_GPIO_CONTROL 0x6c @@ -678,7 +667,6 @@ struct b43_key { }; /* SHM offsets to the QOS data structures for the 4 different queues. */ -#define B43_QOS_QUEUE_NUM 4 #define B43_QOS_PARAMS(queue) (B43_SHM_SH_EDCFQ + \ (B43_NR_QOSPARAMS * sizeof(u16) * (queue))) #define B43_QOS_BACKGROUND B43_QOS_PARAMS(0) @@ -916,7 +904,7 @@ struct b43_wl { struct work_struct beacon_update_trigger; /* The current QOS parameters for the 4 queues. */ - struct b43_qos_params qos_params[B43_QOS_QUEUE_NUM]; + struct b43_qos_params qos_params[4]; /* Work for adjustment of the transmission power. * This is scheduled when we determine that the actual TX output @@ -925,12 +913,8 @@ struct b43_wl { /* Packet transmit work */ struct work_struct tx_work; - /* Queue of packets to be transmitted. */ - struct sk_buff_head tx_queue[B43_QOS_QUEUE_NUM]; - - /* Flag that implement the queues stopping. */ - bool tx_queue_stopped[B43_QOS_QUEUE_NUM]; + struct sk_buff_head tx_queue; /* The device LEDs. */ struct b43_leds leds; diff --git a/trunk/drivers/net/wireless/b43/dma.c b/trunk/drivers/net/wireless/b43/dma.c index b5f1b91002bb..5e45604f0f5d 100644 --- a/trunk/drivers/net/wireless/b43/dma.c +++ b/trunk/drivers/net/wireless/b43/dma.c @@ -890,7 +890,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, else ring->ops = &dma32_ops; if (for_tx) { - ring->tx = true; + ring->tx = 1; ring->current_slot = -1; } else { if (ring->index == 0) { @@ -1061,7 +1061,7 @@ void b43_dma_free(struct b43_wldev *dev) static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask) { u64 orig_mask = mask; - bool fallback = false; + bool fallback = 0; int err; /* Try to set the DMA mask. If it fails, try falling back to a @@ -1075,12 +1075,12 @@ static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask) } if (mask == DMA_BIT_MASK(64)) { mask = DMA_BIT_MASK(32); - fallback = true; + fallback = 1; continue; } if (mask == DMA_BIT_MASK(32)) { mask = DMA_BIT_MASK(30); - fallback = true; + fallback = 1; continue; } b43err(dev->wl, "The machine/kernel does not support " @@ -1307,7 +1307,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring, memset(meta, 0, sizeof(*meta)); meta->skb = skb; - meta->is_last_fragment = true; + meta->is_last_fragment = 1; priv_info->bouncebuffer = NULL; meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); @@ -1465,10 +1465,8 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb) if ((free_slots(ring) < TX_SLOTS_PER_FRAME) || should_inject_overflow(ring)) { /* This TX ring is full. */ - unsigned int skb_mapping = skb_get_queue_mapping(skb); - ieee80211_stop_queue(dev->wl->hw, skb_mapping); - dev->wl->tx_queue_stopped[skb_mapping] = 1; - ring->stopped = true; + ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb)); + ring->stopped = 1; if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index); } @@ -1586,21 +1584,12 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, } if (ring->stopped) { B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME); - ring->stopped = false; - } - - if (dev->wl->tx_queue_stopped[ring->queue_prio]) { - dev->wl->tx_queue_stopped[ring->queue_prio] = 0; - } else { - /* If the driver queue is running wake the corresponding - * mac80211 queue. */ ieee80211_wake_queue(dev->wl->hw, ring->queue_prio); + ring->stopped = 0; if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index); } } - /* Add work to the queue. */ - ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work); } static void dma_rx(struct b43_dmaring *ring, int *slot) diff --git a/trunk/drivers/net/wireless/b43/leds.c b/trunk/drivers/net/wireless/b43/leds.c index d79ab2a227e1..a38c1c6446ad 100644 --- a/trunk/drivers/net/wireless/b43/leds.c +++ b/trunk/drivers/net/wireless/b43/leds.c @@ -74,7 +74,7 @@ static void b43_led_update(struct b43_wldev *dev, if (radio_enabled) turn_on = atomic_read(&led->state) != LED_OFF; else - turn_on = false; + turn_on = 0; if (turn_on == led->hw_state) return; led->hw_state = turn_on; @@ -225,11 +225,11 @@ static void b43_led_get_sprominfo(struct b43_wldev *dev, if (sprom[led_index] == 0xFF) { /* There is no LED information in the SPROM * for this LED. Hardcode it here. */ - *activelow = false; + *activelow = 0; switch (led_index) { case 0: *behaviour = B43_LED_ACTIVITY; - *activelow = true; + *activelow = 1; if (dev->dev->board_vendor == PCI_VENDOR_ID_COMPAQ) *behaviour = B43_LED_RADIO_ALL; break; @@ -267,11 +267,11 @@ void b43_leds_init(struct b43_wldev *dev) if (led->wl) { if (dev->phy.radio_on && b43_is_hw_radio_enabled(dev)) { b43_led_turn_on(dev, led->index, led->activelow); - led->hw_state = true; + led->hw_state = 1; atomic_set(&led->state, 1); } else { b43_led_turn_off(dev, led->index, led->activelow); - led->hw_state = false; + led->hw_state = 0; atomic_set(&led->state, 0); } } @@ -280,19 +280,19 @@ void b43_leds_init(struct b43_wldev *dev) led = &dev->wl->leds.led_tx; if (led->wl) { b43_led_turn_off(dev, led->index, led->activelow); - led->hw_state = false; + led->hw_state = 0; atomic_set(&led->state, 0); } led = &dev->wl->leds.led_rx; if (led->wl) { b43_led_turn_off(dev, led->index, led->activelow); - led->hw_state = false; + led->hw_state = 0; atomic_set(&led->state, 0); } led = &dev->wl->leds.led_assoc; if (led->wl) { b43_led_turn_off(dev, led->index, led->activelow); - led->hw_state = false; + led->hw_state = 0; atomic_set(&led->state, 0); } diff --git a/trunk/drivers/net/wireless/b43/lo.c b/trunk/drivers/net/wireless/b43/lo.c index 916123a3d74e..4c82d582a524 100644 --- a/trunk/drivers/net/wireless/b43/lo.c +++ b/trunk/drivers/net/wireless/b43/lo.c @@ -826,7 +826,7 @@ void b43_gphy_dc_lt_init(struct b43_wldev *dev, bool update_all) const struct b43_rfatt *rfatt; const struct b43_bbatt *bbatt; u64 power_vector; - bool table_changed = false; + bool table_changed = 0; BUILD_BUG_ON(B43_DC_LT_SIZE != 32); B43_WARN_ON(lo->rfatt_list.len * lo->bbatt_list.len > 64); @@ -876,7 +876,7 @@ void b43_gphy_dc_lt_init(struct b43_wldev *dev, bool update_all) lo->dc_lt[idx] = (lo->dc_lt[idx] & 0xFF00) | (val & 0x00FF); } - table_changed = true; + table_changed = 1; } if (table_changed) { /* The table changed in memory. Update the hardware table. */ @@ -938,7 +938,7 @@ void b43_lo_g_maintanance_work(struct b43_wldev *dev) unsigned long now; unsigned long expire; struct b43_lo_calib *cal, *tmp; - bool current_item_expired = false; + bool current_item_expired = 0; bool hwpctl; if (!lo) @@ -968,7 +968,7 @@ void b43_lo_g_maintanance_work(struct b43_wldev *dev) if (b43_compare_bbatt(&cal->bbatt, &gphy->bbatt) && b43_compare_rfatt(&cal->rfatt, &gphy->rfatt)) { B43_WARN_ON(current_item_expired); - current_item_expired = true; + current_item_expired = 1; } if (b43_debug(dev, B43_DBG_LO)) { b43dbg(dev->wl, "LO: Item BB(%u), RF(%u,%u), " diff --git a/trunk/drivers/net/wireless/b43/main.c b/trunk/drivers/net/wireless/b43/main.c index 1c6f19393efa..5634d9a9965b 100644 --- a/trunk/drivers/net/wireless/b43/main.c +++ b/trunk/drivers/net/wireless/b43/main.c @@ -1122,17 +1122,17 @@ void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags) B43_WARN_ON((ps_flags & B43_PS_AWAKE) && (ps_flags & B43_PS_ASLEEP)); if (ps_flags & B43_PS_ENABLED) { - hwps = true; + hwps = 1; } else if (ps_flags & B43_PS_DISABLED) { - hwps = false; + hwps = 0; } else { //TODO: If powersave is not off and FIXME is not set and we are not in adhoc // and thus is not an AP and we are associated, set bit 25 } if (ps_flags & B43_PS_AWAKE) { - awake = true; + awake = 1; } else if (ps_flags & B43_PS_ASLEEP) { - awake = false; + awake = 0; } else { //TODO: If the device is awake or this is an AP, or we are scanning, or FIXME, // or we are associated, or FIXME, or the latest PS-Poll packet sent was @@ -1140,8 +1140,8 @@ void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags) } /* FIXME: For now we force awake-on and hwps-off */ - hwps = false; - awake = true; + hwps = 0; + awake = 1; macctl = b43_read32(dev, B43_MMIO_MACCTL); if (hwps) @@ -1339,7 +1339,7 @@ static void b43_calculate_link_quality(struct b43_wldev *dev) return; if (dev->noisecalc.calculation_running) return; - dev->noisecalc.calculation_running = true; + dev->noisecalc.calculation_running = 1; dev->noisecalc.nr_samples = 0; b43_generate_noise_sample(dev); @@ -1408,7 +1408,7 @@ static void handle_irq_noise(struct b43_wldev *dev) average -= 48; dev->stats.link_noise = average; - dev->noisecalc.calculation_running = false; + dev->noisecalc.calculation_running = 0; return; } generate_new: @@ -1424,7 +1424,7 @@ static void handle_irq_tbtt_indication(struct b43_wldev *dev) b43_power_saving_ctl_bits(dev, 0); } if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC)) - dev->dfq_valid = true; + dev->dfq_valid = 1; } static void handle_irq_atim_end(struct b43_wldev *dev) @@ -1433,7 +1433,7 @@ static void handle_irq_atim_end(struct b43_wldev *dev) b43_write32(dev, B43_MMIO_MACCMD, b43_read32(dev, B43_MMIO_MACCMD) | B43_MACCMD_DFQ_VALID); - dev->dfq_valid = false; + dev->dfq_valid = 0; } } @@ -1539,7 +1539,7 @@ static void b43_write_beacon_template(struct b43_wldev *dev, unsigned int i, len, variable_len; const struct ieee80211_mgmt *bcn; const u8 *ie; - bool tim_found = false; + bool tim_found = 0; unsigned int rate; u16 ctl; int antenna; @@ -1588,7 +1588,7 @@ static void b43_write_beacon_template(struct b43_wldev *dev, /* A valid TIM is at least 4 bytes long. */ if (ie_len < 4) break; - tim_found = true; + tim_found = 1; tim_position = sizeof(struct b43_plcp_hdr6); tim_position += offsetof(struct ieee80211_mgmt, u.beacon.variable); @@ -1625,7 +1625,7 @@ static void b43_upload_beacon0(struct b43_wldev *dev) if (wl->beacon0_uploaded) return; b43_write_beacon_template(dev, 0x68, 0x18); - wl->beacon0_uploaded = true; + wl->beacon0_uploaded = 1; } static void b43_upload_beacon1(struct b43_wldev *dev) @@ -1635,7 +1635,7 @@ static void b43_upload_beacon1(struct b43_wldev *dev) if (wl->beacon1_uploaded) return; b43_write_beacon_template(dev, 0x468, 0x1A); - wl->beacon1_uploaded = true; + wl->beacon1_uploaded = 1; } static void handle_irq_beacon(struct b43_wldev *dev) @@ -1667,7 +1667,7 @@ static void handle_irq_beacon(struct b43_wldev *dev) if (unlikely(wl->beacon_templates_virgin)) { /* We never uploaded a beacon before. * Upload both templates now, but only mark one valid. */ - wl->beacon_templates_virgin = false; + wl->beacon_templates_virgin = 0; b43_upload_beacon0(dev); b43_upload_beacon1(dev); cmd = b43_read32(dev, B43_MMIO_MACCMD); @@ -1755,8 +1755,8 @@ static void b43_update_templates(struct b43_wl *wl) if (wl->current_beacon) dev_kfree_skb_any(wl->current_beacon); wl->current_beacon = beacon; - wl->beacon0_uploaded = false; - wl->beacon1_uploaded = false; + wl->beacon0_uploaded = 0; + wl->beacon1_uploaded = 0; ieee80211_queue_work(wl->hw, &wl->beacon_update_trigger); } @@ -1913,7 +1913,7 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev) b43err(dev->wl, "This device does not support DMA " "on your system. It will now be switched to PIO.\n"); /* Fall back to PIO transfers if we get fatal DMA errors! */ - dev->use_pio = true; + dev->use_pio = 1; b43_controller_restart(dev, "DMA error"); return; } @@ -2240,12 +2240,12 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx) filename = NULL; else goto err_no_pcm; - fw->pcm_request_failed = false; + fw->pcm_request_failed = 0; err = b43_do_request_fw(ctx, filename, &fw->pcm); if (err == -ENOENT) { /* We did not find a PCM file? Not fatal, but * core rev <= 10 must do without hwcrypto then. */ - fw->pcm_request_failed = true; + fw->pcm_request_failed = 1; } else if (err) goto err_load; @@ -2535,7 +2535,7 @@ static int b43_upload_microcode(struct b43_wldev *dev) dev->wl->hw->queues = dev->wl->mac80211_initially_registered_queues; dev->qos_enabled = !!modparam_qos; /* Default to firmware/hardware crypto acceleration. */ - dev->hwcrypto_enabled = true; + dev->hwcrypto_enabled = 1; if (dev->fw.opensource) { u16 fwcapa; @@ -2549,7 +2549,7 @@ static int b43_upload_microcode(struct b43_wldev *dev) if (!(fwcapa & B43_FWCAPA_HWCRYPTO) || dev->fw.pcm_request_failed) { b43info(dev->wl, "Hardware crypto acceleration not supported by firmware\n"); /* Disable hardware crypto and fall back to software crypto. */ - dev->hwcrypto_enabled = false; + dev->hwcrypto_enabled = 0; } if (!(fwcapa & B43_FWCAPA_QOS)) { b43info(dev->wl, "QoS not supported by firmware\n"); @@ -2557,7 +2557,7 @@ static int b43_upload_microcode(struct b43_wldev *dev) * ieee80211_unregister to make sure the networking core can * properly free possible resources. */ dev->wl->hw->queues = 1; - dev->qos_enabled = false; + dev->qos_enabled = 0; } } else { b43info(dev->wl, "Loading firmware version %u.%u " @@ -3361,10 +3361,10 @@ static int b43_rng_init(struct b43_wl *wl) wl->rng.name = wl->rng_name; wl->rng.data_read = b43_rng_read; wl->rng.priv = (unsigned long)wl; - wl->rng_initialized = true; + wl->rng_initialized = 1; err = hwrng_register(&wl->rng); if (err) { - wl->rng_initialized = false; + wl->rng_initialized = 0; b43err(wl, "Failed to register the random " "number generator (%d)\n", err); } @@ -3378,7 +3378,6 @@ static void b43_tx_work(struct work_struct *work) struct b43_wl *wl = container_of(work, struct b43_wl, tx_work); struct b43_wldev *dev; struct sk_buff *skb; - int queue_num; int err = 0; mutex_lock(&wl->mutex); @@ -3388,26 +3387,15 @@ static void b43_tx_work(struct work_struct *work) return; } - for (queue_num = 0; queue_num < B43_QOS_QUEUE_NUM; queue_num++) { - while (skb_queue_len(&wl->tx_queue[queue_num])) { - skb = skb_dequeue(&wl->tx_queue[queue_num]); - if (b43_using_pio_transfers(dev)) - err = b43_pio_tx(dev, skb); - else - err = b43_dma_tx(dev, skb); - if (err == -ENOSPC) { - wl->tx_queue_stopped[queue_num] = 1; - ieee80211_stop_queue(wl->hw, queue_num); - skb_queue_head(&wl->tx_queue[queue_num], skb); - break; - } - if (unlikely(err)) - dev_kfree_skb(skb); /* Drop it */ - err = 0; - } + while (skb_queue_len(&wl->tx_queue)) { + skb = skb_dequeue(&wl->tx_queue); - if (!err) - wl->tx_queue_stopped[queue_num] = 0; + if (b43_using_pio_transfers(dev)) + err = b43_pio_tx(dev, skb); + else + err = b43_dma_tx(dev, skb); + if (unlikely(err)) + dev_kfree_skb(skb); /* Drop it */ } #if B43_DEBUG @@ -3428,12 +3416,8 @@ static void b43_op_tx(struct ieee80211_hw *hw, } B43_WARN_ON(skb_shinfo(skb)->nr_frags); - skb_queue_tail(&wl->tx_queue[skb->queue_mapping], skb); - if (!wl->tx_queue_stopped[skb->queue_mapping]) { - ieee80211_queue_work(wl->hw, &wl->tx_work); - } else { - ieee80211_stop_queue(wl->hw, skb->queue_mapping); - } + skb_queue_tail(&wl->tx_queue, skb); + ieee80211_queue_work(wl->hw, &wl->tx_work); } static void b43_qos_params_upload(struct b43_wldev *dev, @@ -3718,13 +3702,13 @@ static int b43_switch_band(struct b43_wl *wl, struct ieee80211_channel *chan) case IEEE80211_BAND_5GHZ: if (d->phy.supports_5ghz) { up_dev = d; - gmode = false; + gmode = 0; } break; case IEEE80211_BAND_2GHZ: if (d->phy.supports_2ghz) { up_dev = d; - gmode = true; + gmode = 1; } break; default: @@ -4163,7 +4147,6 @@ static struct b43_wldev * b43_wireless_core_stop(struct b43_wldev *dev) struct b43_wl *wl; struct b43_wldev *orig_dev; u32 mask; - int queue_num; if (!dev) return NULL; @@ -4216,11 +4199,9 @@ static struct b43_wldev * b43_wireless_core_stop(struct b43_wldev *dev) mask = b43_read32(dev, B43_MMIO_GEN_IRQ_MASK); B43_WARN_ON(mask != 0xFFFFFFFF && mask); - /* Drain all TX queues. */ - for (queue_num = 0; queue_num < B43_QOS_QUEUE_NUM; queue_num++) { - while (skb_queue_len(&wl->tx_queue[queue_num])) - dev_kfree_skb(skb_dequeue(&wl->tx_queue[queue_num])); - } + /* Drain the TX queue */ + while (skb_queue_len(&wl->tx_queue)) + dev_kfree_skb(skb_dequeue(&wl->tx_queue)); b43_mac_suspend(dev); b43_leds_exit(dev); @@ -4444,18 +4425,18 @@ static void setup_struct_phy_for_init(struct b43_wldev *dev, atomic_set(&phy->txerr_cnt, B43_PHY_TX_BADNESS_LIMIT); #if B43_DEBUG - phy->phy_locked = false; - phy->radio_locked = false; + phy->phy_locked = 0; + phy->radio_locked = 0; #endif } static void setup_struct_wldev_for_init(struct b43_wldev *dev) { - dev->dfq_valid = false; + dev->dfq_valid = 0; /* Assume the radio is enabled. If it's not enabled, the state will * immediately get fixed on the first periodic work run. */ - dev->radio_hw_enable = true; + dev->radio_hw_enable = 1; /* Stats */ memset(&dev->stats, 0, sizeof(dev->stats)); @@ -4689,16 +4670,16 @@ static int b43_wireless_core_init(struct b43_wldev *dev) if (b43_bus_host_is_pcmcia(dev->dev) || b43_bus_host_is_sdio(dev->dev)) { - dev->__using_pio_transfers = true; + dev->__using_pio_transfers = 1; err = b43_pio_init(dev); } else if (dev->use_pio) { b43warn(dev->wl, "Forced PIO by use_pio module parameter. " "This should not be needed and will result in lower " "performance.\n"); - dev->__using_pio_transfers = true; + dev->__using_pio_transfers = 1; err = b43_pio_init(dev); } else { - dev->__using_pio_transfers = false; + dev->__using_pio_transfers = 0; err = b43_dma_init(dev); } if (err) @@ -4752,7 +4733,7 @@ static int b43_op_add_interface(struct ieee80211_hw *hw, b43dbg(wl, "Adding Interface type %d\n", vif->type); dev = wl->current_dev; - wl->operating = true; + wl->operating = 1; wl->vif = vif; wl->if_type = vif->type; memcpy(wl->mac_addr, vif->addr, ETH_ALEN); @@ -4786,7 +4767,7 @@ static void b43_op_remove_interface(struct ieee80211_hw *hw, B43_WARN_ON(wl->vif != vif); wl->vif = NULL; - wl->operating = false; + wl->operating = 0; b43_adjust_opmode(dev); memset(wl->mac_addr, 0, ETH_ALEN); @@ -4808,12 +4789,12 @@ static int b43_op_start(struct ieee80211_hw *hw) memset(wl->bssid, 0, ETH_ALEN); memset(wl->mac_addr, 0, ETH_ALEN); wl->filter_flags = 0; - wl->radiotap_enabled = false; + wl->radiotap_enabled = 0; b43_qos_clear(wl); - wl->beacon0_uploaded = false; - wl->beacon1_uploaded = false; - wl->beacon_templates_virgin = true; - wl->radio_enabled = true; + wl->beacon0_uploaded = 0; + wl->beacon1_uploaded = 0; + wl->beacon_templates_virgin = 1; + wl->radio_enabled = 1; mutex_lock(&wl->mutex); @@ -4859,7 +4840,7 @@ static void b43_op_stop(struct ieee80211_hw *hw) goto out_unlock; } b43_wireless_core_exit(dev); - wl->radio_enabled = false; + wl->radio_enabled = 0; out_unlock: mutex_unlock(&wl->mutex); @@ -5047,7 +5028,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev) struct pci_dev *pdev = NULL; int err; u32 tmp; - bool have_2ghz_phy = false, have_5ghz_phy = false; + bool have_2ghz_phy = 0, have_5ghz_phy = 0; /* Do NOT do any device initialization here. * Do it in wireless_core_init() instead. @@ -5090,7 +5071,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev) } dev->phy.gmode = have_2ghz_phy; - dev->phy.radio_on = true; + dev->phy.radio_on = 1; b43_wireless_core_reset(dev, dev->phy.gmode); err = b43_phy_versioning(dev); @@ -5101,11 +5082,11 @@ static int b43_wireless_core_attach(struct b43_wldev *dev) (pdev->device != 0x4312 && pdev->device != 0x4319 && pdev->device != 0x4324)) { /* No multiband support. */ - have_2ghz_phy = false; - have_5ghz_phy = false; + have_2ghz_phy = 0; + have_5ghz_phy = 0; switch (dev->phy.type) { case B43_PHYTYPE_A: - have_5ghz_phy = true; + have_5ghz_phy = 1; break; case B43_PHYTYPE_LP: //FIXME not always! #if 0 //FIXME enabling 5GHz causes a NULL pointer dereference @@ -5115,7 +5096,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev) case B43_PHYTYPE_N: case B43_PHYTYPE_HT: case B43_PHYTYPE_LCN: - have_2ghz_phy = true; + have_2ghz_phy = 1; break; default: B43_WARN_ON(1); @@ -5131,8 +5112,8 @@ static int b43_wireless_core_attach(struct b43_wldev *dev) /* FIXME: For now we disable the A-PHY on multi-PHY devices. */ if (dev->phy.type != B43_PHYTYPE_N && dev->phy.type != B43_PHYTYPE_LP) { - have_2ghz_phy = true; - have_5ghz_phy = false; + have_2ghz_phy = 1; + have_5ghz_phy = 0; } } @@ -5264,7 +5245,6 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev) struct ieee80211_hw *hw; struct b43_wl *wl; char chip_name[6]; - int queue_num; hw = ieee80211_alloc_hw(sizeof(*wl), &b43_hw_ops); if (!hw) { @@ -5284,7 +5264,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev) BIT(NL80211_IFTYPE_WDS) | BIT(NL80211_IFTYPE_ADHOC); - hw->queues = modparam_qos ? B43_QOS_QUEUE_NUM : 1; + hw->queues = modparam_qos ? 4 : 1; wl->mac80211_initially_registered_queues = hw->queues; hw->max_rates = 2; SET_IEEE80211_DEV(hw, dev->dev); @@ -5301,12 +5281,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev) INIT_WORK(&wl->beacon_update_trigger, b43_beacon_update_trigger_work); INIT_WORK(&wl->txpower_adjust_work, b43_phy_txpower_adjust_work); INIT_WORK(&wl->tx_work, b43_tx_work); - - /* Initialize queues and flags. */ - for (queue_num = 0; queue_num < B43_QOS_QUEUE_NUM; queue_num++) { - skb_queue_head_init(&wl->tx_queue[queue_num]); - wl->tx_queue_stopped[queue_num] = 0; - } + skb_queue_head_init(&wl->tx_queue); snprintf(chip_name, ARRAY_SIZE(chip_name), (dev->chip_id > 0x9999) ? "%d" : "%04X", dev->chip_id); diff --git a/trunk/drivers/net/wireless/b43/phy_common.c b/trunk/drivers/net/wireless/b43/phy_common.c index 3f8883b14d9c..3ea44bb03684 100644 --- a/trunk/drivers/net/wireless/b43/phy_common.c +++ b/trunk/drivers/net/wireless/b43/phy_common.c @@ -145,7 +145,7 @@ void b43_radio_lock(struct b43_wldev *dev) #if B43_DEBUG B43_WARN_ON(dev->phy.radio_locked); - dev->phy.radio_locked = true; + dev->phy.radio_locked = 1; #endif macctl = b43_read32(dev, B43_MMIO_MACCTL); @@ -163,7 +163,7 @@ void b43_radio_unlock(struct b43_wldev *dev) #if B43_DEBUG B43_WARN_ON(!dev->phy.radio_locked); - dev->phy.radio_locked = false; + dev->phy.radio_locked = 0; #endif /* Commit any write */ @@ -178,7 +178,7 @@ void b43_phy_lock(struct b43_wldev *dev) { #if B43_DEBUG B43_WARN_ON(dev->phy.phy_locked); - dev->phy.phy_locked = true; + dev->phy.phy_locked = 1; #endif B43_WARN_ON(dev->dev->core_rev < 3); @@ -190,7 +190,7 @@ void b43_phy_unlock(struct b43_wldev *dev) { #if B43_DEBUG B43_WARN_ON(!dev->phy.phy_locked); - dev->phy.phy_locked = false; + dev->phy.phy_locked = 0; #endif B43_WARN_ON(dev->dev->core_rev < 3); diff --git a/trunk/drivers/net/wireless/b43/phy_g.c b/trunk/drivers/net/wireless/b43/phy_g.c index 12f467b8d564..8e157bc213f3 100644 --- a/trunk/drivers/net/wireless/b43/phy_g.c +++ b/trunk/drivers/net/wireless/b43/phy_g.c @@ -897,7 +897,7 @@ b43_radio_interference_mitigation_enable(struct b43_wldev *dev, int mode) if (b43_phy_read(dev, 0x0033) & 0x0800) break; - gphy->aci_enable = true; + gphy->aci_enable = 1; phy_stacksave(B43_PHY_RADIO_BITFIELD); phy_stacksave(B43_PHY_G_CRS); @@ -1038,7 +1038,7 @@ b43_radio_interference_mitigation_disable(struct b43_wldev *dev, int mode) if (!(b43_phy_read(dev, 0x0033) & 0x0800)) break; - gphy->aci_enable = false; + gphy->aci_enable = 0; phy_stackrestore(B43_PHY_RADIO_BITFIELD); phy_stackrestore(B43_PHY_G_CRS); @@ -1956,10 +1956,10 @@ static void b43_phy_init_pctl(struct b43_wldev *dev) bbatt.att = 11; if (phy->radio_rev == 8) { rfatt.att = 15; - rfatt.with_padmix = true; + rfatt.with_padmix = 1; } else { rfatt.att = 9; - rfatt.with_padmix = false; + rfatt.with_padmix = 0; } b43_set_txpower_g(dev, &bbatt, &rfatt, 0); } @@ -2137,7 +2137,7 @@ static void default_radio_attenuation(struct b43_wldev *dev, struct b43_bus_dev *bdev = dev->dev; struct b43_phy *phy = &dev->phy; - rf->with_padmix = false; + rf->with_padmix = 0; if (dev->dev->board_vendor == SSB_BOARDVENDOR_BCM && dev->dev->board_type == SSB_BOARD_BCM4309G) { @@ -2221,7 +2221,7 @@ static void default_radio_attenuation(struct b43_wldev *dev, return; case 8: rf->att = 0xA; - rf->with_padmix = true; + rf->with_padmix = 1; return; case 9: default: @@ -2389,7 +2389,7 @@ static int b43_gphy_init_tssi2dbm_table(struct b43_wldev *dev) B43_WARN_ON((dev->dev->chip_id == 0x4301) && (phy->radio_ver != 0x2050)); /* Not supported anymore */ - gphy->dyn_tssi_tbl = false; + gphy->dyn_tssi_tbl = 0; if (pab0 != 0 && pab1 != 0 && pab2 != 0 && pab0 != -1 && pab1 != -1 && pab2 != -1) { @@ -2404,7 +2404,7 @@ static int b43_gphy_init_tssi2dbm_table(struct b43_wldev *dev) pab1, pab2); if (!gphy->tssi2dbm) return -ENOMEM; - gphy->dyn_tssi_tbl = true; + gphy->dyn_tssi_tbl = 1; } else { /* pabX values not set in SPROM. */ gphy->tgt_idle_tssi = 52; @@ -2504,7 +2504,7 @@ static void b43_gphy_op_free(struct b43_wldev *dev) if (gphy->dyn_tssi_tbl) kfree(gphy->tssi2dbm); - gphy->dyn_tssi_tbl = false; + gphy->dyn_tssi_tbl = 0; gphy->tssi2dbm = NULL; kfree(gphy); @@ -2531,10 +2531,10 @@ static int b43_gphy_op_prepare_hardware(struct b43_wldev *dev) if (phy->rev == 1) { /* Workaround: Temporarly disable gmode through the early init * phase, as the gmode stuff is not needed for phy rev 1 */ - phy->gmode = false; + phy->gmode = 0; b43_wireless_core_reset(dev, 0); b43_phy_initg(dev); - phy->gmode = true; + phy->gmode = 1; b43_wireless_core_reset(dev, 1); } @@ -2613,7 +2613,7 @@ static void b43_gphy_op_software_rfkill(struct b43_wldev *dev, gphy->radio_off_context.rfover); b43_phy_write(dev, B43_PHY_RFOVERVAL, gphy->radio_off_context.rfoverval); - gphy->radio_off_context.valid = false; + gphy->radio_off_context.valid = 0; } channel = phy->channel; b43_gphy_channel_switch(dev, 6, 1); @@ -2626,7 +2626,7 @@ static void b43_gphy_op_software_rfkill(struct b43_wldev *dev, rfoverval = b43_phy_read(dev, B43_PHY_RFOVERVAL); gphy->radio_off_context.rfover = rfover; gphy->radio_off_context.rfoverval = rfoverval; - gphy->radio_off_context.valid = true; + gphy->radio_off_context.valid = 1; b43_phy_write(dev, B43_PHY_RFOVER, rfover | 0x008C); b43_phy_write(dev, B43_PHY_RFOVERVAL, rfoverval & 0xFF73); } @@ -2711,10 +2711,10 @@ static int b43_gphy_op_interf_mitigation(struct b43_wldev *dev, if ((phy->rev == 0) || (!phy->gmode)) return -ENODEV; - gphy->aci_wlan_automatic = false; + gphy->aci_wlan_automatic = 0; switch (mode) { case B43_INTERFMODE_AUTOWLAN: - gphy->aci_wlan_automatic = true; + gphy->aci_wlan_automatic = 1; if (gphy->aci_enable) mode = B43_INTERFMODE_MANUALWLAN; else @@ -2735,8 +2735,8 @@ static int b43_gphy_op_interf_mitigation(struct b43_wldev *dev, b43_radio_interference_mitigation_disable(dev, currentmode); if (mode == B43_INTERFMODE_NONE) { - gphy->aci_enable = false; - gphy->aci_hw_rssi = false; + gphy->aci_enable = 0; + gphy->aci_hw_rssi = 0; } else b43_radio_interference_mitigation_enable(dev, mode); gphy->interfmode = mode; diff --git a/trunk/drivers/net/wireless/b43/phy_lp.c b/trunk/drivers/net/wireless/b43/phy_lp.c index 3ae28561f7a4..f93d66b1817b 100644 --- a/trunk/drivers/net/wireless/b43/phy_lp.c +++ b/trunk/drivers/net/wireless/b43/phy_lp.c @@ -736,9 +736,9 @@ static void lpphy_set_deaf(struct b43_wldev *dev, bool user) struct b43_phy_lp *lpphy = dev->phy.lp; if (user) - lpphy->crs_usr_disable = true; + lpphy->crs_usr_disable = 1; else - lpphy->crs_sys_disable = true; + lpphy->crs_sys_disable = 1; b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xFF1F, 0x80); } @@ -747,9 +747,9 @@ static void lpphy_clear_deaf(struct b43_wldev *dev, bool user) struct b43_phy_lp *lpphy = dev->phy.lp; if (user) - lpphy->crs_usr_disable = false; + lpphy->crs_usr_disable = 0; else - lpphy->crs_sys_disable = false; + lpphy->crs_sys_disable = 0; if (!lpphy->crs_usr_disable && !lpphy->crs_sys_disable) { if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) diff --git a/trunk/drivers/net/wireless/b43/phy_n.c b/trunk/drivers/net/wireless/b43/phy_n.c index bf5a43855358..b17d9b6c33a5 100644 --- a/trunk/drivers/net/wireless/b43/phy_n.c +++ b/trunk/drivers/net/wireless/b43/phy_n.c @@ -78,6 +78,19 @@ enum b43_nphy_rssi_type { B43_NPHY_RSSI_TBD, }; +/* TODO: reorder functions */ +static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, + bool enable); +static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd, + u8 *events, u8 *delays, u8 length); +static void b43_nphy_force_rf_sequence(struct b43_wldev *dev, + enum b43_nphy_rf_sequence seq); +static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field, + u16 value, u8 core, bool off); +static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field, + u16 value, u8 core); +static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev); + static inline bool b43_nphy_ipa(struct b43_wldev *dev) { enum ieee80211_band band = b43_current_band(dev->wl); @@ -85,662 +98,438 @@ static inline bool b43_nphy_ipa(struct b43_wldev *dev) (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ)); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetIpaGainTbl */ -static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev) -{ - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { - if (dev->phy.rev >= 6) { - if (dev->dev->chip_id == 47162) - return txpwrctrl_tx_gain_ipa_rev5; - return txpwrctrl_tx_gain_ipa_rev6; - } else if (dev->phy.rev >= 5) { - return txpwrctrl_tx_gain_ipa_rev5; - } else { - return txpwrctrl_tx_gain_ipa; - } - } else { - return txpwrctrl_tx_gain_ipa_5g; - } +void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna) +{//TODO } -/************************************************** - * RF (just without b43_nphy_rf_control_intc_override) - **************************************************/ - -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ForceRFSeq */ -static void b43_nphy_force_rf_sequence(struct b43_wldev *dev, - enum b43_nphy_rf_sequence seq) -{ - static const u16 trigger[] = { - [B43_RFSEQ_RX2TX] = B43_NPHY_RFSEQTR_RX2TX, - [B43_RFSEQ_TX2RX] = B43_NPHY_RFSEQTR_TX2RX, - [B43_RFSEQ_RESET2RX] = B43_NPHY_RFSEQTR_RST2RX, - [B43_RFSEQ_UPDATE_GAINH] = B43_NPHY_RFSEQTR_UPGH, - [B43_RFSEQ_UPDATE_GAINL] = B43_NPHY_RFSEQTR_UPGL, - [B43_RFSEQ_UPDATE_GAINU] = B43_NPHY_RFSEQTR_UPGU, - }; - int i; - u16 seq_mode = b43_phy_read(dev, B43_NPHY_RFSEQMODE); - - B43_WARN_ON(seq >= ARRAY_SIZE(trigger)); +static void b43_nphy_op_adjust_txpower(struct b43_wldev *dev) +{//TODO +} - b43_phy_set(dev, B43_NPHY_RFSEQMODE, - B43_NPHY_RFSEQMODE_CAOVER | B43_NPHY_RFSEQMODE_TROVER); - b43_phy_set(dev, B43_NPHY_RFSEQTR, trigger[seq]); - for (i = 0; i < 200; i++) { - if (!(b43_phy_read(dev, B43_NPHY_RFSEQST) & trigger[seq])) - goto ok; - msleep(1); - } - b43err(dev->wl, "RF sequence status timeout\n"); -ok: - b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode); +static enum b43_txpwr_result b43_nphy_op_recalc_txpower(struct b43_wldev *dev, + bool ignore_tssi) +{//TODO + return B43_TXPWR_RES_DONE; } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */ -static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field, - u16 value, u8 core, bool off) +static void b43_chantab_radio_upload(struct b43_wldev *dev, + const struct b43_nphy_channeltab_entry_rev2 *e) { - int i; - u8 index = fls(field); - u8 addr, en_addr, val_addr; - /* we expect only one bit set */ - B43_WARN_ON(field & (~(1 << (index - 1)))); + b43_radio_write(dev, B2055_PLL_REF, e->radio_pll_ref); + b43_radio_write(dev, B2055_RF_PLLMOD0, e->radio_rf_pllmod0); + b43_radio_write(dev, B2055_RF_PLLMOD1, e->radio_rf_pllmod1); + b43_radio_write(dev, B2055_VCO_CAPTAIL, e->radio_vco_captail); + b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */ - if (dev->phy.rev >= 3) { - const struct nphy_rf_control_override_rev3 *rf_ctrl; - for (i = 0; i < 2; i++) { - if (index == 0 || index == 16) { - b43err(dev->wl, - "Unsupported RF Ctrl Override call\n"); - return; - } + b43_radio_write(dev, B2055_VCO_CAL1, e->radio_vco_cal1); + b43_radio_write(dev, B2055_VCO_CAL2, e->radio_vco_cal2); + b43_radio_write(dev, B2055_PLL_LFC1, e->radio_pll_lfc1); + b43_radio_write(dev, B2055_PLL_LFR1, e->radio_pll_lfr1); + b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */ - rf_ctrl = &tbl_rf_control_override_rev3[index - 1]; - en_addr = B43_PHY_N((i == 0) ? - rf_ctrl->en_addr0 : rf_ctrl->en_addr1); - val_addr = B43_PHY_N((i == 0) ? - rf_ctrl->val_addr0 : rf_ctrl->val_addr1); + b43_radio_write(dev, B2055_PLL_LFC2, e->radio_pll_lfc2); + b43_radio_write(dev, B2055_LGBUF_CENBUF, e->radio_lgbuf_cenbuf); + b43_radio_write(dev, B2055_LGEN_TUNE1, e->radio_lgen_tune1); + b43_radio_write(dev, B2055_LGEN_TUNE2, e->radio_lgen_tune2); + b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */ - if (off) { - b43_phy_mask(dev, en_addr, ~(field)); - b43_phy_mask(dev, val_addr, - ~(rf_ctrl->val_mask)); - } else { - if (core == 0 || ((1 << i) & core)) { - b43_phy_set(dev, en_addr, field); - b43_phy_maskset(dev, val_addr, - ~(rf_ctrl->val_mask), - (value << rf_ctrl->val_shift)); - } - } - } - } else { - const struct nphy_rf_control_override_rev2 *rf_ctrl; - if (off) { - b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~(field)); - value = 0; - } else { - b43_phy_set(dev, B43_NPHY_RFCTL_OVER, field); - } + b43_radio_write(dev, B2055_C1_LGBUF_ATUNE, e->radio_c1_lgbuf_atune); + b43_radio_write(dev, B2055_C1_LGBUF_GTUNE, e->radio_c1_lgbuf_gtune); + b43_radio_write(dev, B2055_C1_RX_RFR1, e->radio_c1_rx_rfr1); + b43_radio_write(dev, B2055_C1_TX_PGAPADTN, e->radio_c1_tx_pgapadtn); + b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */ - for (i = 0; i < 2; i++) { - if (index <= 1 || index == 16) { - b43err(dev->wl, - "Unsupported RF Ctrl Override call\n"); - return; - } + b43_radio_write(dev, B2055_C1_TX_MXBGTRIM, e->radio_c1_tx_mxbgtrim); + b43_radio_write(dev, B2055_C2_LGBUF_ATUNE, e->radio_c2_lgbuf_atune); + b43_radio_write(dev, B2055_C2_LGBUF_GTUNE, e->radio_c2_lgbuf_gtune); + b43_radio_write(dev, B2055_C2_RX_RFR1, e->radio_c2_rx_rfr1); + b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */ - if (index == 2 || index == 10 || - (index >= 13 && index <= 15)) { - core = 1; - } + b43_radio_write(dev, B2055_C2_TX_PGAPADTN, e->radio_c2_tx_pgapadtn); + b43_radio_write(dev, B2055_C2_TX_MXBGTRIM, e->radio_c2_tx_mxbgtrim); +} - rf_ctrl = &tbl_rf_control_override_rev2[index - 2]; - addr = B43_PHY_N((i == 0) ? - rf_ctrl->addr0 : rf_ctrl->addr1); +static void b43_chantab_radio_2056_upload(struct b43_wldev *dev, + const struct b43_nphy_channeltab_entry_rev3 *e) +{ + b43_radio_write(dev, B2056_SYN_PLL_VCOCAL1, e->radio_syn_pll_vcocal1); + b43_radio_write(dev, B2056_SYN_PLL_VCOCAL2, e->radio_syn_pll_vcocal2); + b43_radio_write(dev, B2056_SYN_PLL_REFDIV, e->radio_syn_pll_refdiv); + b43_radio_write(dev, B2056_SYN_PLL_MMD2, e->radio_syn_pll_mmd2); + b43_radio_write(dev, B2056_SYN_PLL_MMD1, e->radio_syn_pll_mmd1); + b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, + e->radio_syn_pll_loopfilter1); + b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, + e->radio_syn_pll_loopfilter2); + b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER3, + e->radio_syn_pll_loopfilter3); + b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, + e->radio_syn_pll_loopfilter4); + b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER5, + e->radio_syn_pll_loopfilter5); + b43_radio_write(dev, B2056_SYN_RESERVED_ADDR27, + e->radio_syn_reserved_addr27); + b43_radio_write(dev, B2056_SYN_RESERVED_ADDR28, + e->radio_syn_reserved_addr28); + b43_radio_write(dev, B2056_SYN_RESERVED_ADDR29, + e->radio_syn_reserved_addr29); + b43_radio_write(dev, B2056_SYN_LOGEN_VCOBUF1, + e->radio_syn_logen_vcobuf1); + b43_radio_write(dev, B2056_SYN_LOGEN_MIXER2, e->radio_syn_logen_mixer2); + b43_radio_write(dev, B2056_SYN_LOGEN_BUF3, e->radio_syn_logen_buf3); + b43_radio_write(dev, B2056_SYN_LOGEN_BUF4, e->radio_syn_logen_buf4); - if ((1 << i) & core) - b43_phy_maskset(dev, addr, ~(rf_ctrl->bmask), - (value << rf_ctrl->shift)); + b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAA_TUNE, + e->radio_rx0_lnaa_tune); + b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAG_TUNE, + e->radio_rx0_lnag_tune); - b43_phy_set(dev, B43_NPHY_RFCTL_OVER, 0x1); - b43_phy_set(dev, B43_NPHY_RFCTL_CMD, - B43_NPHY_RFCTL_CMD_START); - udelay(1); - b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, 0xFFFE); - } - } + b43_radio_write(dev, B2056_TX0 | B2056_TX_INTPAA_BOOST_TUNE, + e->radio_tx0_intpaa_boost_tune); + b43_radio_write(dev, B2056_TX0 | B2056_TX_INTPAG_BOOST_TUNE, + e->radio_tx0_intpag_boost_tune); + b43_radio_write(dev, B2056_TX0 | B2056_TX_PADA_BOOST_TUNE, + e->radio_tx0_pada_boost_tune); + b43_radio_write(dev, B2056_TX0 | B2056_TX_PADG_BOOST_TUNE, + e->radio_tx0_padg_boost_tune); + b43_radio_write(dev, B2056_TX0 | B2056_TX_PGAA_BOOST_TUNE, + e->radio_tx0_pgaa_boost_tune); + b43_radio_write(dev, B2056_TX0 | B2056_TX_PGAG_BOOST_TUNE, + e->radio_tx0_pgag_boost_tune); + b43_radio_write(dev, B2056_TX0 | B2056_TX_MIXA_BOOST_TUNE, + e->radio_tx0_mixa_boost_tune); + b43_radio_write(dev, B2056_TX0 | B2056_TX_MIXG_BOOST_TUNE, + e->radio_tx0_mixg_boost_tune); + + b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAA_TUNE, + e->radio_rx1_lnaa_tune); + b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAG_TUNE, + e->radio_rx1_lnag_tune); + + b43_radio_write(dev, B2056_TX1 | B2056_TX_INTPAA_BOOST_TUNE, + e->radio_tx1_intpaa_boost_tune); + b43_radio_write(dev, B2056_TX1 | B2056_TX_INTPAG_BOOST_TUNE, + e->radio_tx1_intpag_boost_tune); + b43_radio_write(dev, B2056_TX1 | B2056_TX_PADA_BOOST_TUNE, + e->radio_tx1_pada_boost_tune); + b43_radio_write(dev, B2056_TX1 | B2056_TX_PADG_BOOST_TUNE, + e->radio_tx1_padg_boost_tune); + b43_radio_write(dev, B2056_TX1 | B2056_TX_PGAA_BOOST_TUNE, + e->radio_tx1_pgaa_boost_tune); + b43_radio_write(dev, B2056_TX1 | B2056_TX_PGAG_BOOST_TUNE, + e->radio_tx1_pgag_boost_tune); + b43_radio_write(dev, B2056_TX1 | B2056_TX_MIXA_BOOST_TUNE, + e->radio_tx1_mixa_boost_tune); + b43_radio_write(dev, B2056_TX1 | B2056_TX_MIXG_BOOST_TUNE, + e->radio_tx1_mixg_boost_tune); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlIntcOverride */ -static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field, - u16 value, u8 core) +/* http://bcm-v4.sipsolutions.net/802.11/PHY/Radio/2056Setup */ +static void b43_radio_2056_setup(struct b43_wldev *dev, + const struct b43_nphy_channeltab_entry_rev3 *e) { - u8 i, j; - u16 reg, tmp, val; - B43_WARN_ON(dev->phy.rev < 3); - B43_WARN_ON(field > 4); - for (i = 0; i < 2; i++) { - if ((core == 1 && i == 1) || (core == 2 && !i)) - continue; - - reg = (i == 0) ? - B43_NPHY_RFCTL_INTC1 : B43_NPHY_RFCTL_INTC2; - b43_phy_mask(dev, reg, 0xFBFF); + b43_chantab_radio_2056_upload(dev, e); + /* TODO */ + udelay(50); + /* VCO calibration */ + b43_radio_write(dev, B2056_SYN_PLL_VCOCAL12, 0x00); + b43_radio_write(dev, B2056_TX_INTPAA_PA_MISC, 0x38); + b43_radio_write(dev, B2056_TX_INTPAA_PA_MISC, 0x18); + b43_radio_write(dev, B2056_TX_INTPAA_PA_MISC, 0x38); + b43_radio_write(dev, B2056_TX_INTPAA_PA_MISC, 0x39); + udelay(300); +} - switch (field) { - case 0: - b43_phy_write(dev, reg, 0); - b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX); - break; - case 1: - if (!i) { - b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC1, - 0xFC3F, (value << 6)); - b43_phy_maskset(dev, B43_NPHY_TXF_40CO_B1S1, - 0xFFFE, 1); - b43_phy_set(dev, B43_NPHY_RFCTL_CMD, - B43_NPHY_RFCTL_CMD_START); - for (j = 0; j < 100; j++) { - if (b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_START) { - j = 0; - break; - } - udelay(10); - } - if (j) - b43err(dev->wl, - "intc override timeout\n"); - b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S1, - 0xFFFE); - } else { - b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC2, - 0xFC3F, (value << 6)); - b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER, - 0xFFFE, 1); - b43_phy_set(dev, B43_NPHY_RFCTL_CMD, - B43_NPHY_RFCTL_CMD_RXTX); - for (j = 0; j < 100; j++) { - if (b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_RXTX) { - j = 0; - break; - } - udelay(10); - } - if (j) - b43err(dev->wl, - "intc override timeout\n"); - b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, - 0xFFFE); - } - break; - case 2: - if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { - tmp = 0x0020; - val = value << 5; - } else { - tmp = 0x0010; - val = value << 4; - } - b43_phy_maskset(dev, reg, ~tmp, val); - break; - case 3: - if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { - tmp = 0x0001; - val = value; - } else { - tmp = 0x0004; - val = value << 2; - } - b43_phy_maskset(dev, reg, ~tmp, val); - break; - case 4: - if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { - tmp = 0x0002; - val = value << 1; - } else { - tmp = 0x0008; - val = value << 3; - } - b43_phy_maskset(dev, reg, ~tmp, val); - break; - } - } -} - -/************************************************** - * Various PHY ops - **************************************************/ - -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */ -static void b43_nphy_write_clip_detection(struct b43_wldev *dev, - const u16 *clip_st) +static void b43_chantab_phy_upload(struct b43_wldev *dev, + const struct b43_phy_n_sfo_cfg *e) { - b43_phy_write(dev, B43_NPHY_C1_CLIP1THRES, clip_st[0]); - b43_phy_write(dev, B43_NPHY_C2_CLIP1THRES, clip_st[1]); + b43_phy_write(dev, B43_NPHY_BW1A, e->phy_bw1a); + b43_phy_write(dev, B43_NPHY_BW2, e->phy_bw2); + b43_phy_write(dev, B43_NPHY_BW3, e->phy_bw3); + b43_phy_write(dev, B43_NPHY_BW4, e->phy_bw4); + b43_phy_write(dev, B43_NPHY_BW5, e->phy_bw5); + b43_phy_write(dev, B43_NPHY_BW6, e->phy_bw6); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */ -static void b43_nphy_read_clip_detection(struct b43_wldev *dev, u16 *clip_st) +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlEnable */ +static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable) { - clip_st[0] = b43_phy_read(dev, B43_NPHY_C1_CLIP1THRES); - clip_st[1] = b43_phy_read(dev, B43_NPHY_C2_CLIP1THRES); -} + struct b43_phy_n *nphy = dev->phy.n; + u8 i; + u16 bmask, val, tmp; + enum ieee80211_band band = b43_current_band(dev->wl); -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/classifier */ -static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val) -{ - u16 tmp; + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, 1); - if (dev->dev->core_rev == 16) - b43_mac_suspend(dev); + nphy->txpwrctrl = enable; + if (!enable) { + if (dev->phy.rev >= 3 && + (b43_phy_read(dev, B43_NPHY_TXPCTL_CMD) & + (B43_NPHY_TXPCTL_CMD_COEFF | + B43_NPHY_TXPCTL_CMD_HWPCTLEN | + B43_NPHY_TXPCTL_CMD_PCTLEN))) { + /* We disable enabled TX pwr ctl, save it's state */ + nphy->tx_pwr_idx[0] = b43_phy_read(dev, + B43_NPHY_C1_TXPCTL_STAT) & 0x7f; + nphy->tx_pwr_idx[1] = b43_phy_read(dev, + B43_NPHY_C2_TXPCTL_STAT) & 0x7f; + } - tmp = b43_phy_read(dev, B43_NPHY_CLASSCTL); - tmp &= (B43_NPHY_CLASSCTL_CCKEN | B43_NPHY_CLASSCTL_OFDMEN | - B43_NPHY_CLASSCTL_WAITEDEN); - tmp &= ~mask; - tmp |= (val & mask); - b43_phy_maskset(dev, B43_NPHY_CLASSCTL, 0xFFF8, tmp); + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x6840); + for (i = 0; i < 84; i++) + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0); - if (dev->dev->core_rev == 16) - b43_mac_enable(dev); + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x6C40); + for (i = 0; i < 84; i++) + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0); - return tmp; -} + tmp = B43_NPHY_TXPCTL_CMD_COEFF | B43_NPHY_TXPCTL_CMD_HWPCTLEN; + if (dev->phy.rev >= 3) + tmp |= B43_NPHY_TXPCTL_CMD_PCTLEN; + b43_phy_mask(dev, B43_NPHY_TXPCTL_CMD, ~tmp); -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CCA */ -static void b43_nphy_reset_cca(struct b43_wldev *dev) -{ - u16 bbcfg; + if (dev->phy.rev >= 3) { + b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0100); + b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0100); + } else { + b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x4000); + } - b43_phy_force_clock(dev, 1); - bbcfg = b43_phy_read(dev, B43_NPHY_BBCFG); - b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg | B43_NPHY_BBCFG_RSTCCA); - udelay(1); - b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg & ~B43_NPHY_BBCFG_RSTCCA); - b43_phy_force_clock(dev, 0); - b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX); -} + if (dev->phy.rev == 2) + b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, + ~B43_NPHY_BPHY_CTL3_SCALE, 0x53); + else if (dev->phy.rev < 2) + b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, + ~B43_NPHY_BPHY_CTL3_SCALE, 0x5A); -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/carriersearch */ -static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, bool enable) -{ - struct b43_phy *phy = &dev->phy; - struct b43_phy_n *nphy = phy->n; + if (dev->phy.rev < 2 && dev->phy.is_40mhz) + b43_hf_write(dev, b43_hf_read(dev) | B43_HF_TSSIRPSMW); + } else { + b43_ntab_write_bulk(dev, B43_NTAB16(26, 64), 84, + nphy->adj_pwr_tbl); + b43_ntab_write_bulk(dev, B43_NTAB16(27, 64), 84, + nphy->adj_pwr_tbl); - if (enable) { - static const u16 clip[] = { 0xFFFF, 0xFFFF }; - if (nphy->deaf_count++ == 0) { - nphy->classifier_state = b43_nphy_classifier(dev, 0, 0); - b43_nphy_classifier(dev, 0x7, 0); - b43_nphy_read_clip_detection(dev, nphy->clip_state); - b43_nphy_write_clip_detection(dev, clip); + bmask = B43_NPHY_TXPCTL_CMD_COEFF | + B43_NPHY_TXPCTL_CMD_HWPCTLEN; + /* wl does useless check for "enable" param here */ + val = B43_NPHY_TXPCTL_CMD_COEFF | B43_NPHY_TXPCTL_CMD_HWPCTLEN; + if (dev->phy.rev >= 3) { + bmask |= B43_NPHY_TXPCTL_CMD_PCTLEN; + if (val) + val |= B43_NPHY_TXPCTL_CMD_PCTLEN; } - b43_nphy_reset_cca(dev); - } else { - if (--nphy->deaf_count == 0) { - b43_nphy_classifier(dev, 0x7, nphy->classifier_state); - b43_nphy_write_clip_detection(dev, nphy->clip_state); + b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD, ~(bmask), val); + + if (band == IEEE80211_BAND_5GHZ) { + b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD, + ~B43_NPHY_TXPCTL_CMD_INIT, 0x64); + if (dev->phy.rev > 1) + b43_phy_maskset(dev, B43_NPHY_TXPCTL_INIT, + ~B43_NPHY_TXPCTL_INIT_PIDXI1, + 0x64); + } + + if (dev->phy.rev >= 3) { + if (nphy->tx_pwr_idx[0] != 128 && + nphy->tx_pwr_idx[1] != 128) { + /* Recover TX pwr ctl state */ + b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD, + ~B43_NPHY_TXPCTL_CMD_INIT, + nphy->tx_pwr_idx[0]); + if (dev->phy.rev > 1) + b43_phy_maskset(dev, + B43_NPHY_TXPCTL_INIT, + ~0xff, nphy->tx_pwr_idx[1]); + } + } + + if (dev->phy.rev >= 3) { + b43_phy_mask(dev, B43_NPHY_AFECTL_OVER1, ~0x100); + b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x100); + } else { + b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x4000); + } + + if (dev->phy.rev == 2) + b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, ~0xFF, 0x3b); + else if (dev->phy.rev < 2) + b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, ~0xFF, 0x40); + + if (dev->phy.rev < 2 && dev->phy.is_40mhz) + b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_TSSIRPSMW); + + if (b43_nphy_ipa(dev)) { + b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x4); + b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x4); } } + + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, 0); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/AdjustLnaGainTbl */ -static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev) +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrFix */ +static void b43_nphy_tx_power_fix(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; + struct ssb_sprom *sprom = dev->dev->bus_sprom; - u8 i; - s16 tmp; - u16 data[4]; - s16 gain[2]; - u16 minmax[2]; - static const u16 lna_gain[4] = { -2, 10, 19, 25 }; - - if (nphy->hang_avoid) - b43_nphy_stay_in_carrier_search(dev, 1); + u8 txpi[2], bbmult, i; + u16 tmp, radio_gain, dac_gain; + u16 freq = dev->phy.channel_freq; + u32 txgain; + /* u32 gaintbl; rev3+ */ - if (nphy->gain_boost) { + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, 1); + + if (dev->phy.rev >= 3) { + txpi[0] = 40; + txpi[1] = 40; + } else if (sprom->revision < 4) { + txpi[0] = 72; + txpi[1] = 72; + } else { if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { - gain[0] = 6; - gain[1] = 6; + txpi[0] = sprom->txpid2g[0]; + txpi[1] = sprom->txpid2g[1]; + } else if (freq >= 4900 && freq < 5100) { + txpi[0] = sprom->txpid5gl[0]; + txpi[1] = sprom->txpid5gl[1]; + } else if (freq >= 5100 && freq < 5500) { + txpi[0] = sprom->txpid5g[0]; + txpi[1] = sprom->txpid5g[1]; + } else if (freq >= 5500) { + txpi[0] = sprom->txpid5gh[0]; + txpi[1] = sprom->txpid5gh[1]; } else { - tmp = 40370 - 315 * dev->phy.channel; - gain[0] = ((tmp >> 13) + ((tmp >> 12) & 1)); - tmp = 23242 - 224 * dev->phy.channel; - gain[1] = ((tmp >> 13) + ((tmp >> 12) & 1)); + txpi[0] = 91; + txpi[1] = 91; } - } else { - gain[0] = 0; - gain[1] = 0; } + /* for (i = 0; i < 2; i++) { - if (nphy->elna_gain_config) { - data[0] = 19 + gain[i]; - data[1] = 25 + gain[i]; - data[2] = 25 + gain[i]; - data[3] = 25 + gain[i]; + nphy->txpwrindex[i].index_internal = txpi[i]; + nphy->txpwrindex[i].index_internal_save = txpi[i]; + } + */ + + for (i = 0; i < 2; i++) { + if (dev->phy.rev >= 3) { + /* FIXME: support 5GHz */ + txgain = b43_ntab_tx_gain_rev3plus_2ghz[txpi[i]]; + radio_gain = (txgain >> 16) & 0x1FFFF; } else { - data[0] = lna_gain[0] + gain[i]; - data[1] = lna_gain[1] + gain[i]; - data[2] = lna_gain[2] + gain[i]; - data[3] = lna_gain[3] + gain[i]; + txgain = b43_ntab_tx_gain_rev0_1_2[txpi[i]]; + radio_gain = (txgain >> 16) & 0x1FFF; } - b43_ntab_write_bulk(dev, B43_NTAB16(i, 8), 4, data); - - minmax[i] = 23 + gain[i]; - } - b43_phy_maskset(dev, B43_NPHY_C1_MINMAX_GAIN, ~B43_NPHY_C1_MINGAIN, - minmax[0] << B43_NPHY_C1_MINGAIN_SHIFT); - b43_phy_maskset(dev, B43_NPHY_C2_MINMAX_GAIN, ~B43_NPHY_C2_MINGAIN, - minmax[1] << B43_NPHY_C2_MINGAIN_SHIFT); + dac_gain = (txgain >> 8) & 0x3F; + bbmult = txgain & 0xFF; - if (nphy->hang_avoid) - b43_nphy_stay_in_carrier_search(dev, 0); -} + if (dev->phy.rev >= 3) { + if (i == 0) + b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0100); + else + b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0100); + } else { + b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x4000); + } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRfSeq */ -static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd, - u8 *events, u8 *delays, u8 length) -{ - struct b43_phy_n *nphy = dev->phy.n; - u8 i; - u8 end = (dev->phy.rev >= 3) ? 0x1F : 0x0F; - u16 offset1 = cmd << 4; - u16 offset2 = offset1 + 0x80; + if (i == 0) + b43_phy_write(dev, B43_NPHY_AFECTL_DACGAIN1, dac_gain); + else + b43_phy_write(dev, B43_NPHY_AFECTL_DACGAIN2, dac_gain); - if (nphy->hang_avoid) - b43_nphy_stay_in_carrier_search(dev, true); + b43_ntab_write(dev, B43_NTAB16(0x7, 0x110 + i), radio_gain); - b43_ntab_write_bulk(dev, B43_NTAB8(7, offset1), length, events); - b43_ntab_write_bulk(dev, B43_NTAB8(7, offset2), length, delays); + tmp = b43_ntab_read(dev, B43_NTAB16(0xF, 0x57)); + if (i == 0) + tmp = (tmp & 0x00FF) | (bbmult << 8); + else + tmp = (tmp & 0xFF00) | bbmult; + b43_ntab_write(dev, B43_NTAB16(0xF, 0x57), tmp); - for (i = length; i < 16; i++) { - b43_ntab_write(dev, B43_NTAB8(7, offset1 + i), end); - b43_ntab_write(dev, B43_NTAB8(7, offset2 + i), 1); + if (b43_nphy_ipa(dev)) { + u32 tmp32; + u16 reg = (i == 0) ? + B43_NPHY_PAPD_EN0 : B43_NPHY_PAPD_EN1; + tmp32 = b43_ntab_read(dev, B43_NTAB32(26 + i, txpi[i])); + b43_phy_maskset(dev, reg, 0xE00F, (u32) tmp32 << 4); + b43_phy_set(dev, reg, 0x4); + } } - if (nphy->hang_avoid) - b43_nphy_stay_in_carrier_search(dev, false); -} - -/************************************************** - * Radio 0x2056 - **************************************************/ - -static void b43_chantab_radio_2056_upload(struct b43_wldev *dev, - const struct b43_nphy_channeltab_entry_rev3 *e) -{ - b43_radio_write(dev, B2056_SYN_PLL_VCOCAL1, e->radio_syn_pll_vcocal1); - b43_radio_write(dev, B2056_SYN_PLL_VCOCAL2, e->radio_syn_pll_vcocal2); - b43_radio_write(dev, B2056_SYN_PLL_REFDIV, e->radio_syn_pll_refdiv); - b43_radio_write(dev, B2056_SYN_PLL_MMD2, e->radio_syn_pll_mmd2); - b43_radio_write(dev, B2056_SYN_PLL_MMD1, e->radio_syn_pll_mmd1); - b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, - e->radio_syn_pll_loopfilter1); - b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, - e->radio_syn_pll_loopfilter2); - b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER3, - e->radio_syn_pll_loopfilter3); - b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, - e->radio_syn_pll_loopfilter4); - b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER5, - e->radio_syn_pll_loopfilter5); - b43_radio_write(dev, B2056_SYN_RESERVED_ADDR27, - e->radio_syn_reserved_addr27); - b43_radio_write(dev, B2056_SYN_RESERVED_ADDR28, - e->radio_syn_reserved_addr28); - b43_radio_write(dev, B2056_SYN_RESERVED_ADDR29, - e->radio_syn_reserved_addr29); - b43_radio_write(dev, B2056_SYN_LOGEN_VCOBUF1, - e->radio_syn_logen_vcobuf1); - b43_radio_write(dev, B2056_SYN_LOGEN_MIXER2, e->radio_syn_logen_mixer2); - b43_radio_write(dev, B2056_SYN_LOGEN_BUF3, e->radio_syn_logen_buf3); - b43_radio_write(dev, B2056_SYN_LOGEN_BUF4, e->radio_syn_logen_buf4); - - b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAA_TUNE, - e->radio_rx0_lnaa_tune); - b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAG_TUNE, - e->radio_rx0_lnag_tune); - - b43_radio_write(dev, B2056_TX0 | B2056_TX_INTPAA_BOOST_TUNE, - e->radio_tx0_intpaa_boost_tune); - b43_radio_write(dev, B2056_TX0 | B2056_TX_INTPAG_BOOST_TUNE, - e->radio_tx0_intpag_boost_tune); - b43_radio_write(dev, B2056_TX0 | B2056_TX_PADA_BOOST_TUNE, - e->radio_tx0_pada_boost_tune); - b43_radio_write(dev, B2056_TX0 | B2056_TX_PADG_BOOST_TUNE, - e->radio_tx0_padg_boost_tune); - b43_radio_write(dev, B2056_TX0 | B2056_TX_PGAA_BOOST_TUNE, - e->radio_tx0_pgaa_boost_tune); - b43_radio_write(dev, B2056_TX0 | B2056_TX_PGAG_BOOST_TUNE, - e->radio_tx0_pgag_boost_tune); - b43_radio_write(dev, B2056_TX0 | B2056_TX_MIXA_BOOST_TUNE, - e->radio_tx0_mixa_boost_tune); - b43_radio_write(dev, B2056_TX0 | B2056_TX_MIXG_BOOST_TUNE, - e->radio_tx0_mixg_boost_tune); - - b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAA_TUNE, - e->radio_rx1_lnaa_tune); - b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAG_TUNE, - e->radio_rx1_lnag_tune); + b43_phy_mask(dev, B43_NPHY_BPHY_CTL2, ~B43_NPHY_BPHY_CTL2_LUT); - b43_radio_write(dev, B2056_TX1 | B2056_TX_INTPAA_BOOST_TUNE, - e->radio_tx1_intpaa_boost_tune); - b43_radio_write(dev, B2056_TX1 | B2056_TX_INTPAG_BOOST_TUNE, - e->radio_tx1_intpag_boost_tune); - b43_radio_write(dev, B2056_TX1 | B2056_TX_PADA_BOOST_TUNE, - e->radio_tx1_pada_boost_tune); - b43_radio_write(dev, B2056_TX1 | B2056_TX_PADG_BOOST_TUNE, - e->radio_tx1_padg_boost_tune); - b43_radio_write(dev, B2056_TX1 | B2056_TX_PGAA_BOOST_TUNE, - e->radio_tx1_pgaa_boost_tune); - b43_radio_write(dev, B2056_TX1 | B2056_TX_PGAG_BOOST_TUNE, - e->radio_tx1_pgag_boost_tune); - b43_radio_write(dev, B2056_TX1 | B2056_TX_MIXA_BOOST_TUNE, - e->radio_tx1_mixa_boost_tune); - b43_radio_write(dev, B2056_TX1 | B2056_TX_MIXG_BOOST_TUNE, - e->radio_tx1_mixg_boost_tune); + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, 0); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/Radio/2056Setup */ -static void b43_radio_2056_setup(struct b43_wldev *dev, - const struct b43_nphy_channeltab_entry_rev3 *e) +static void b43_nphy_tx_gain_table_upload(struct b43_wldev *dev) { - struct ssb_sprom *sprom = dev->dev->bus_sprom; - enum ieee80211_band band = b43_current_band(dev->wl); - u16 offset; - u8 i; - u16 bias, cbias, pag_boost, pgag_boost, mixg_boost, padg_boost; + struct b43_phy *phy = &dev->phy; - B43_WARN_ON(dev->phy.rev < 3); + const u32 *table = NULL; +#if 0 + TODO: b43_ntab_papd_pga_gain_delta_ipa_2* + u32 rfpwr_offset; + u8 pga_gain; + int i; +#endif - b43_chantab_radio_2056_upload(dev, e); - b2056_upload_syn_pll_cp2(dev, band == IEEE80211_BAND_5GHZ); - - if (sprom->boardflags2_lo & B43_BFL2_GPLL_WAR && - b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { - b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F); - b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F); - if (dev->dev->chip_id == 0x4716) { - b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x14); - b43_radio_write(dev, B2056_SYN_PLL_CP2, 0); + if (phy->rev >= 3) { + if (b43_nphy_ipa(dev)) { + table = b43_nphy_get_ipa_gain_table(dev); } else { - b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x0B); - b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x14); + if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { + if (phy->rev == 3) + table = b43_ntab_tx_gain_rev3_5ghz; + if (phy->rev == 4) + table = b43_ntab_tx_gain_rev4_5ghz; + else + table = b43_ntab_tx_gain_rev5plus_5ghz; + } else { + table = b43_ntab_tx_gain_rev3plus_2ghz; + } } + } else { + table = b43_ntab_tx_gain_rev0_1_2; } - if (sprom->boardflags2_lo & B43_BFL2_APLL_WAR && - b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { - b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F); - b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F); - b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x05); - b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x0C); - } + b43_ntab_write_bulk(dev, B43_NTAB32(26, 192), 128, table); + b43_ntab_write_bulk(dev, B43_NTAB32(27, 192), 128, table); - if (dev->phy.n->ipa2g_on && band == IEEE80211_BAND_2GHZ) { - for (i = 0; i < 2; i++) { - offset = i ? B2056_TX1 : B2056_TX0; - if (dev->phy.rev >= 5) { - b43_radio_write(dev, - offset | B2056_TX_PADG_IDAC, 0xcc); - - if (dev->dev->chip_id == 0x4716) { - bias = 0x40; - cbias = 0x45; - pag_boost = 0x5; - pgag_boost = 0x33; - mixg_boost = 0x55; - } else { - bias = 0x25; - cbias = 0x20; - pag_boost = 0x4; - pgag_boost = 0x03; - mixg_boost = 0x65; - } - padg_boost = 0x77; - - b43_radio_write(dev, - offset | B2056_TX_INTPAG_IMAIN_STAT, - bias); - b43_radio_write(dev, - offset | B2056_TX_INTPAG_IAUX_STAT, - bias); - b43_radio_write(dev, - offset | B2056_TX_INTPAG_CASCBIAS, - cbias); - b43_radio_write(dev, - offset | B2056_TX_INTPAG_BOOST_TUNE, - pag_boost); - b43_radio_write(dev, - offset | B2056_TX_PGAG_BOOST_TUNE, - pgag_boost); - b43_radio_write(dev, - offset | B2056_TX_PADG_BOOST_TUNE, - padg_boost); - b43_radio_write(dev, - offset | B2056_TX_MIXG_BOOST_TUNE, - mixg_boost); - } else { - bias = dev->phy.is_40mhz ? 0x40 : 0x20; - b43_radio_write(dev, - offset | B2056_TX_INTPAG_IMAIN_STAT, - bias); - b43_radio_write(dev, - offset | B2056_TX_INTPAG_IAUX_STAT, - bias); - b43_radio_write(dev, - offset | B2056_TX_INTPAG_CASCBIAS, - 0x30); - } - b43_radio_write(dev, offset | B2056_TX_PA_SPARE1, 0xee); + if (phy->rev >= 3) { +#if 0 + nphy->gmval = (table[0] >> 16) & 0x7000; + + for (i = 0; i < 128; i++) { + pga_gain = (table[i] >> 24) & 0xF; + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + rfpwr_offset = b43_ntab_papd_pga_gain_delta_ipa_2g[pga_gain]; + else + rfpwr_offset = b43_ntab_papd_pga_gain_delta_ipa_5g[pga_gain]; + b43_ntab_write(dev, B43_NTAB32(26, 576 + i), + rfpwr_offset); + b43_ntab_write(dev, B43_NTAB32(27, 576 + i), + rfpwr_offset); } - } else if (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ) { - /* TODO */ +#endif } +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/Radio/2055Setup */ +static void b43_radio_2055_setup(struct b43_wldev *dev, + const struct b43_nphy_channeltab_entry_rev2 *e) +{ + B43_WARN_ON(dev->phy.rev >= 3); - udelay(50); - /* VCO calibration */ - b43_radio_write(dev, B2056_SYN_PLL_VCOCAL12, 0x00); - b43_radio_write(dev, B2056_TX_INTPAA_PA_MISC, 0x38); - b43_radio_write(dev, B2056_TX_INTPAA_PA_MISC, 0x18); - b43_radio_write(dev, B2056_TX_INTPAA_PA_MISC, 0x38); - b43_radio_write(dev, B2056_TX_INTPAA_PA_MISC, 0x39); - udelay(300); -} - -static void b43_radio_init2056_pre(struct b43_wldev *dev) -{ - b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, - ~B43_NPHY_RFCTL_CMD_CHIP0PU); - /* Maybe wl meant to reset and set (order?) RFCTL_CMD_OEPORFORCE? */ - b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, - B43_NPHY_RFCTL_CMD_OEPORFORCE); - b43_phy_set(dev, B43_NPHY_RFCTL_CMD, - ~B43_NPHY_RFCTL_CMD_OEPORFORCE); - b43_phy_set(dev, B43_NPHY_RFCTL_CMD, - B43_NPHY_RFCTL_CMD_CHIP0PU); -} - -static void b43_radio_init2056_post(struct b43_wldev *dev) -{ - b43_radio_set(dev, B2056_SYN_COM_CTRL, 0xB); - b43_radio_set(dev, B2056_SYN_COM_PU, 0x2); - b43_radio_set(dev, B2056_SYN_COM_RESET, 0x2); - msleep(1); - b43_radio_mask(dev, B2056_SYN_COM_RESET, ~0x2); - b43_radio_mask(dev, B2056_SYN_PLL_MAST2, ~0xFC); - b43_radio_mask(dev, B2056_SYN_RCCAL_CTRL0, ~0x1); - /* - if (nphy->init_por) - Call Radio 2056 Recalibrate - */ -} - -/* - * Initialize a Broadcom 2056 N-radio - * http://bcm-v4.sipsolutions.net/802.11/Radio/2056/Init - */ -static void b43_radio_init2056(struct b43_wldev *dev) -{ - b43_radio_init2056_pre(dev); - b2056_upload_inittabs(dev, 0, 0); - b43_radio_init2056_post(dev); -} - -/************************************************** - * Radio 0x2055 - **************************************************/ - -static void b43_chantab_radio_upload(struct b43_wldev *dev, - const struct b43_nphy_channeltab_entry_rev2 *e) -{ - b43_radio_write(dev, B2055_PLL_REF, e->radio_pll_ref); - b43_radio_write(dev, B2055_RF_PLLMOD0, e->radio_rf_pllmod0); - b43_radio_write(dev, B2055_RF_PLLMOD1, e->radio_rf_pllmod1); - b43_radio_write(dev, B2055_VCO_CAPTAIL, e->radio_vco_captail); - b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */ - - b43_radio_write(dev, B2055_VCO_CAL1, e->radio_vco_cal1); - b43_radio_write(dev, B2055_VCO_CAL2, e->radio_vco_cal2); - b43_radio_write(dev, B2055_PLL_LFC1, e->radio_pll_lfc1); - b43_radio_write(dev, B2055_PLL_LFR1, e->radio_pll_lfr1); - b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */ - - b43_radio_write(dev, B2055_PLL_LFC2, e->radio_pll_lfc2); - b43_radio_write(dev, B2055_LGBUF_CENBUF, e->radio_lgbuf_cenbuf); - b43_radio_write(dev, B2055_LGEN_TUNE1, e->radio_lgen_tune1); - b43_radio_write(dev, B2055_LGEN_TUNE2, e->radio_lgen_tune2); - b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */ - - b43_radio_write(dev, B2055_C1_LGBUF_ATUNE, e->radio_c1_lgbuf_atune); - b43_radio_write(dev, B2055_C1_LGBUF_GTUNE, e->radio_c1_lgbuf_gtune); - b43_radio_write(dev, B2055_C1_RX_RFR1, e->radio_c1_rx_rfr1); - b43_radio_write(dev, B2055_C1_TX_PGAPADTN, e->radio_c1_tx_pgapadtn); - b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */ - - b43_radio_write(dev, B2055_C1_TX_MXBGTRIM, e->radio_c1_tx_mxbgtrim); - b43_radio_write(dev, B2055_C2_LGBUF_ATUNE, e->radio_c2_lgbuf_atune); - b43_radio_write(dev, B2055_C2_LGBUF_GTUNE, e->radio_c2_lgbuf_gtune); - b43_radio_write(dev, B2055_C2_RX_RFR1, e->radio_c2_rx_rfr1); - b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */ - - b43_radio_write(dev, B2055_C2_TX_PGAPADTN, e->radio_c2_tx_pgapadtn); - b43_radio_write(dev, B2055_C2_TX_MXBGTRIM, e->radio_c2_tx_mxbgtrim); -} - -/* http://bcm-v4.sipsolutions.net/802.11/PHY/Radio/2055Setup */ -static void b43_radio_2055_setup(struct b43_wldev *dev, - const struct b43_nphy_channeltab_entry_rev2 *e) -{ - B43_WARN_ON(dev->phy.rev >= 3); - - b43_chantab_radio_upload(dev, e); + b43_chantab_radio_upload(dev, e); udelay(50); b43_radio_write(dev, B2055_VCO_CAL10, 0x05); b43_radio_write(dev, B2055_VCO_CAL10, 0x45); @@ -833,873 +622,869 @@ static void b43_radio_init2055(struct b43_wldev *dev) b43_radio_init2055_post(dev); } -/************************************************** - * Samples - **************************************************/ - -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/LoadSampleTable */ -static int b43_nphy_load_samples(struct b43_wldev *dev, - struct b43_c32 *samples, u16 len) { - struct b43_phy_n *nphy = dev->phy.n; - u16 i; - u32 *data; - - data = kzalloc(len * sizeof(u32), GFP_KERNEL); - if (!data) { - b43err(dev->wl, "allocation for samples loading failed\n"); - return -ENOMEM; - } - if (nphy->hang_avoid) - b43_nphy_stay_in_carrier_search(dev, 1); - - for (i = 0; i < len; i++) { - data[i] = (samples[i].i & 0x3FF << 10); - data[i] |= samples[i].q & 0x3FF; - } - b43_ntab_write_bulk(dev, B43_NTAB32(17, 0), len, data); - - kfree(data); - if (nphy->hang_avoid) - b43_nphy_stay_in_carrier_search(dev, 0); - return 0; +static void b43_radio_init2056_pre(struct b43_wldev *dev) +{ + b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, + ~B43_NPHY_RFCTL_CMD_CHIP0PU); + /* Maybe wl meant to reset and set (order?) RFCTL_CMD_OEPORFORCE? */ + b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, + B43_NPHY_RFCTL_CMD_OEPORFORCE); + b43_phy_set(dev, B43_NPHY_RFCTL_CMD, + ~B43_NPHY_RFCTL_CMD_OEPORFORCE); + b43_phy_set(dev, B43_NPHY_RFCTL_CMD, + B43_NPHY_RFCTL_CMD_CHIP0PU); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GenLoadSamples */ -static u16 b43_nphy_gen_load_samples(struct b43_wldev *dev, u32 freq, u16 max, - bool test) +static void b43_radio_init2056_post(struct b43_wldev *dev) { - int i; - u16 bw, len, rot, angle; - struct b43_c32 *samples; - - - bw = (dev->phy.is_40mhz) ? 40 : 20; - len = bw << 3; - - if (test) { - if (b43_phy_read(dev, B43_NPHY_BBCFG) & B43_NPHY_BBCFG_RSTRX) - bw = 82; - else - bw = 80; + b43_radio_set(dev, B2056_SYN_COM_CTRL, 0xB); + b43_radio_set(dev, B2056_SYN_COM_PU, 0x2); + b43_radio_set(dev, B2056_SYN_COM_RESET, 0x2); + msleep(1); + b43_radio_mask(dev, B2056_SYN_COM_RESET, ~0x2); + b43_radio_mask(dev, B2056_SYN_PLL_MAST2, ~0xFC); + b43_radio_mask(dev, B2056_SYN_RCCAL_CTRL0, ~0x1); + /* + if (nphy->init_por) + Call Radio 2056 Recalibrate + */ +} - if (dev->phy.is_40mhz) - bw <<= 1; +/* + * Initialize a Broadcom 2056 N-radio + * http://bcm-v4.sipsolutions.net/802.11/Radio/2056/Init + */ +static void b43_radio_init2056(struct b43_wldev *dev) +{ + b43_radio_init2056_pre(dev); + b2056_upload_inittabs(dev, 0, 0); + b43_radio_init2056_post(dev); +} - len = bw << 1; - } +/* + * Upload the N-PHY tables. + * http://bcm-v4.sipsolutions.net/802.11/PHY/N/InitTables + */ +static void b43_nphy_tables_init(struct b43_wldev *dev) +{ + if (dev->phy.rev < 3) + b43_nphy_rev0_1_2_tables_init(dev); + else + b43_nphy_rev3plus_tables_init(dev); +} - samples = kcalloc(len, sizeof(struct b43_c32), GFP_KERNEL); - if (!samples) { - b43err(dev->wl, "allocation for samples generation failed\n"); - return 0; - } - rot = (((freq * 36) / bw) << 16) / 100; - angle = 0; +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PA%20override */ +static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable) +{ + struct b43_phy_n *nphy = dev->phy.n; + enum ieee80211_band band; + u16 tmp; - for (i = 0; i < len; i++) { - samples[i] = b43_cordic(angle); - angle += rot; - samples[i].q = CORDIC_CONVERT(samples[i].q * max); - samples[i].i = CORDIC_CONVERT(samples[i].i * max); + if (!enable) { + nphy->rfctrl_intc1_save = b43_phy_read(dev, + B43_NPHY_RFCTL_INTC1); + nphy->rfctrl_intc2_save = b43_phy_read(dev, + B43_NPHY_RFCTL_INTC2); + band = b43_current_band(dev->wl); + if (dev->phy.rev >= 3) { + if (band == IEEE80211_BAND_5GHZ) + tmp = 0x600; + else + tmp = 0x480; + } else { + if (band == IEEE80211_BAND_5GHZ) + tmp = 0x180; + else + tmp = 0x120; + } + b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, tmp); + b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, tmp); + } else { + b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, + nphy->rfctrl_intc1_save); + b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, + nphy->rfctrl_intc2_save); } - - i = b43_nphy_load_samples(dev, samples, len); - kfree(samples); - return (i < 0) ? 0 : len; } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RunSamples */ -static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops, - u16 wait, bool iqmode, bool dac_test) +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxLpFbw */ +static void b43_nphy_tx_lp_fbw(struct b43_wldev *dev) { - struct b43_phy_n *nphy = dev->phy.n; - int i; - u16 seq_mode; - u32 tmp; + u16 tmp; - if (nphy->hang_avoid) - b43_nphy_stay_in_carrier_search(dev, true); + if (dev->phy.rev >= 3) { + if (b43_nphy_ipa(dev)) { + tmp = 4; + b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S2, + (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp); + } - if ((nphy->bb_mult_save & 0x80000000) == 0) { - tmp = b43_ntab_read(dev, B43_NTAB16(15, 87)); - nphy->bb_mult_save = (tmp & 0xFFFF) | 0x80000000; + tmp = 1; + b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S2, + (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp); } +} - if (!dev->phy.is_40mhz) - tmp = 0x6464; - else - tmp = 0x4747; - b43_ntab_write(dev, B43_NTAB16(15, 87), tmp); +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CCA */ +static void b43_nphy_reset_cca(struct b43_wldev *dev) +{ + u16 bbcfg; - if (nphy->hang_avoid) - b43_nphy_stay_in_carrier_search(dev, false); + b43_phy_force_clock(dev, 1); + bbcfg = b43_phy_read(dev, B43_NPHY_BBCFG); + b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg | B43_NPHY_BBCFG_RSTCCA); + udelay(1); + b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg & ~B43_NPHY_BBCFG_RSTCCA); + b43_phy_force_clock(dev, 0); + b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX); +} - b43_phy_write(dev, B43_NPHY_SAMP_DEPCNT, (samps - 1)); +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/MIMOConfig */ +static void b43_nphy_update_mimo_config(struct b43_wldev *dev, s32 preamble) +{ + u16 mimocfg = b43_phy_read(dev, B43_NPHY_MIMOCFG); - if (loops != 0xFFFF) - b43_phy_write(dev, B43_NPHY_SAMP_LOOPCNT, (loops - 1)); + mimocfg |= B43_NPHY_MIMOCFG_AUTO; + if (preamble == 1) + mimocfg |= B43_NPHY_MIMOCFG_GFMIX; else - b43_phy_write(dev, B43_NPHY_SAMP_LOOPCNT, loops); + mimocfg &= ~B43_NPHY_MIMOCFG_GFMIX; - b43_phy_write(dev, B43_NPHY_SAMP_WAITCNT, wait); + b43_phy_write(dev, B43_NPHY_MIMOCFG, mimocfg); +} - seq_mode = b43_phy_read(dev, B43_NPHY_RFSEQMODE); +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Chains */ +static void b43_nphy_update_txrx_chain(struct b43_wldev *dev) +{ + struct b43_phy_n *nphy = dev->phy.n; - b43_phy_set(dev, B43_NPHY_RFSEQMODE, B43_NPHY_RFSEQMODE_CAOVER); - if (iqmode) { - b43_phy_mask(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x7FFF); - b43_phy_set(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x8000); - } else { - if (dac_test) - b43_phy_write(dev, B43_NPHY_SAMP_CMD, 5); - else - b43_phy_write(dev, B43_NPHY_SAMP_CMD, 1); - } - for (i = 0; i < 100; i++) { - if (!(b43_phy_read(dev, B43_NPHY_RFSEQST) & 1)) { - i = 0; - break; - } - udelay(10); + bool override = false; + u16 chain = 0x33; + + if (nphy->txrx_chain == 0) { + chain = 0x11; + override = true; + } else if (nphy->txrx_chain == 1) { + chain = 0x22; + override = true; } - if (i) - b43err(dev->wl, "run samples timeout\n"); - b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode); -} + b43_phy_maskset(dev, B43_NPHY_RFSEQCA, + ~(B43_NPHY_RFSEQCA_TXEN | B43_NPHY_RFSEQCA_RXEN), + chain); -/************************************************** - * RSSI - **************************************************/ + if (override) + b43_phy_set(dev, B43_NPHY_RFSEQMODE, + B43_NPHY_RFSEQMODE_CAOVER); + else + b43_phy_mask(dev, B43_NPHY_RFSEQMODE, + ~B43_NPHY_RFSEQMODE_CAOVER); +} -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ScaleOffsetRssi */ -static void b43_nphy_scale_offset_rssi(struct b43_wldev *dev, u16 scale, - s8 offset, u8 core, u8 rail, - enum b43_nphy_rssi_type type) +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqEst */ +static void b43_nphy_rx_iq_est(struct b43_wldev *dev, struct nphy_iq_est *est, + u16 samps, u8 time, bool wait) { + int i; u16 tmp; - bool core1or5 = (core == 1) || (core == 5); - bool core2or5 = (core == 2) || (core == 5); - - offset = clamp_val(offset, -32, 31); - tmp = ((scale & 0x3F) << 8) | (offset & 0x3F); - - if (core1or5 && (rail == 0) && (type == B43_NPHY_RSSI_Z)) - b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Z, tmp); - if (core1or5 && (rail == 1) && (type == B43_NPHY_RSSI_Z)) - b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Z, tmp); - if (core2or5 && (rail == 0) && (type == B43_NPHY_RSSI_Z)) - b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Z, tmp); - if (core2or5 && (rail == 1) && (type == B43_NPHY_RSSI_Z)) - b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Z, tmp); - - if (core1or5 && (rail == 0) && (type == B43_NPHY_RSSI_X)) - b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_X, tmp); - if (core1or5 && (rail == 1) && (type == B43_NPHY_RSSI_X)) - b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_X, tmp); - if (core2or5 && (rail == 0) && (type == B43_NPHY_RSSI_X)) - b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_X, tmp); - if (core2or5 && (rail == 1) && (type == B43_NPHY_RSSI_X)) - b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_X, tmp); - if (core1or5 && (rail == 0) && (type == B43_NPHY_RSSI_Y)) - b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Y, tmp); - if (core1or5 && (rail == 1) && (type == B43_NPHY_RSSI_Y)) - b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Y, tmp); - if (core2or5 && (rail == 0) && (type == B43_NPHY_RSSI_Y)) - b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Y, tmp); - if (core2or5 && (rail == 1) && (type == B43_NPHY_RSSI_Y)) - b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y, tmp); + b43_phy_write(dev, B43_NPHY_IQEST_SAMCNT, samps); + b43_phy_maskset(dev, B43_NPHY_IQEST_WT, ~B43_NPHY_IQEST_WT_VAL, time); + if (wait) + b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_MODE); + else + b43_phy_mask(dev, B43_NPHY_IQEST_CMD, ~B43_NPHY_IQEST_CMD_MODE); - if (core1or5 && (rail == 0) && (type == B43_NPHY_RSSI_TBD)) - b43_phy_write(dev, B43_NPHY_RSSIMC_0I_TBD, tmp); - if (core1or5 && (rail == 1) && (type == B43_NPHY_RSSI_TBD)) - b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_TBD, tmp); - if (core2or5 && (rail == 0) && (type == B43_NPHY_RSSI_TBD)) - b43_phy_write(dev, B43_NPHY_RSSIMC_1I_TBD, tmp); - if (core2or5 && (rail == 1) && (type == B43_NPHY_RSSI_TBD)) - b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_TBD, tmp); + b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_START); - if (core1or5 && (rail == 0) && (type == B43_NPHY_RSSI_PWRDET)) - b43_phy_write(dev, B43_NPHY_RSSIMC_0I_PWRDET, tmp); - if (core1or5 && (rail == 1) && (type == B43_NPHY_RSSI_PWRDET)) - b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_PWRDET, tmp); - if (core2or5 && (rail == 0) && (type == B43_NPHY_RSSI_PWRDET)) - b43_phy_write(dev, B43_NPHY_RSSIMC_1I_PWRDET, tmp); - if (core2or5 && (rail == 1) && (type == B43_NPHY_RSSI_PWRDET)) - b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_PWRDET, tmp); + for (i = 1000; i; i--) { + tmp = b43_phy_read(dev, B43_NPHY_IQEST_CMD); + if (!(tmp & B43_NPHY_IQEST_CMD_START)) { + est->i0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI0) << 16) | + b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO0); + est->q0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI0) << 16) | + b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO0); + est->iq0_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI0) << 16) | + b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO0); - if (core1or5 && (type == B43_NPHY_RSSI_TSSI_I)) - b43_phy_write(dev, B43_NPHY_RSSIMC_0I_TSSI, tmp); - if (core2or5 && (type == B43_NPHY_RSSI_TSSI_I)) - b43_phy_write(dev, B43_NPHY_RSSIMC_1I_TSSI, tmp); + est->i1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI1) << 16) | + b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO1); + est->q1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI1) << 16) | + b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO1); + est->iq1_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI1) << 16) | + b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO1); + return; + } + udelay(10); + } + memset(est, 0, sizeof(*est)); +} - if (core1or5 && (type == B43_NPHY_RSSI_TSSI_Q)) - b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_TSSI, tmp); - if (core2or5 && (type == B43_NPHY_RSSI_TSSI_Q)) - b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_TSSI, tmp); +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqCoeffs */ +static void b43_nphy_rx_iq_coeffs(struct b43_wldev *dev, bool write, + struct b43_phy_n_iq_comp *pcomp) +{ + if (write) { + b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPA0, pcomp->a0); + b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPB0, pcomp->b0); + b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPA1, pcomp->a1); + b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPB1, pcomp->b1); + } else { + pcomp->a0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPA0); + pcomp->b0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPB0); + pcomp->a1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPA1); + pcomp->b1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPB1); + } } -static void b43_nphy_rev3_rssi_select(struct b43_wldev *dev, u8 code, u8 type) +#if 0 +/* Ready but not used anywhere */ +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhyCleanup */ +static void b43_nphy_rx_cal_phy_cleanup(struct b43_wldev *dev, u8 core) { - u8 i; - u16 reg, val; + u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs; - if (code == 0) { - b43_phy_mask(dev, B43_NPHY_AFECTL_OVER1, 0xFDFF); - b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, 0xFDFF); - b43_phy_mask(dev, B43_NPHY_AFECTL_C1, 0xFCFF); - b43_phy_mask(dev, B43_NPHY_AFECTL_C2, 0xFCFF); - b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S0, 0xFFDF); - b43_phy_mask(dev, B43_NPHY_TXF_40CO_B32S1, 0xFFDF); - b43_phy_mask(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0xFFC3); - b43_phy_mask(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0xFFC3); + b43_phy_write(dev, B43_NPHY_RFSEQCA, regs[0]); + if (core == 0) { + b43_phy_write(dev, B43_NPHY_AFECTL_C1, regs[1]); + b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, regs[2]); } else { - for (i = 0; i < 2; i++) { - if ((code == 1 && i == 1) || (code == 2 && !i)) - continue; + b43_phy_write(dev, B43_NPHY_AFECTL_C2, regs[1]); + b43_phy_write(dev, B43_NPHY_AFECTL_OVER, regs[2]); + } + b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs[3]); + b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs[4]); + b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO1, regs[5]); + b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO2, regs[6]); + b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S1, regs[7]); + b43_phy_write(dev, B43_NPHY_RFCTL_OVER, regs[8]); + b43_phy_write(dev, B43_NPHY_PAPD_EN0, regs[9]); + b43_phy_write(dev, B43_NPHY_PAPD_EN1, regs[10]); +} - reg = (i == 0) ? - B43_NPHY_AFECTL_OVER1 : B43_NPHY_AFECTL_OVER; - b43_phy_maskset(dev, reg, 0xFDFF, 0x0200); +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhySetup */ +static void b43_nphy_rx_cal_phy_setup(struct b43_wldev *dev, u8 core) +{ + u8 rxval, txval; + u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs; - if (type < 3) { - reg = (i == 0) ? - B43_NPHY_AFECTL_C1 : - B43_NPHY_AFECTL_C2; - b43_phy_maskset(dev, reg, 0xFCFF, 0); + regs[0] = b43_phy_read(dev, B43_NPHY_RFSEQCA); + if (core == 0) { + regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C1); + regs[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1); + } else { + regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2); + regs[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER); + } + regs[3] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1); + regs[4] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2); + regs[5] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO1); + regs[6] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO2); + regs[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S1); + regs[8] = b43_phy_read(dev, B43_NPHY_RFCTL_OVER); + regs[9] = b43_phy_read(dev, B43_NPHY_PAPD_EN0); + regs[10] = b43_phy_read(dev, B43_NPHY_PAPD_EN1); - reg = (i == 0) ? - B43_NPHY_RFCTL_LUT_TRSW_UP1 : - B43_NPHY_RFCTL_LUT_TRSW_UP2; - b43_phy_maskset(dev, reg, 0xFFC3, 0); + b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x0001); + b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x0001); - if (type == 0) - val = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ? 4 : 8; - else if (type == 1) - val = 16; - else - val = 32; - b43_phy_set(dev, reg, val); - - reg = (i == 0) ? - B43_NPHY_TXF_40CO_B1S0 : - B43_NPHY_TXF_40CO_B32S1; - b43_phy_set(dev, reg, 0x0020); - } else { - if (type == 6) - val = 0x0100; - else if (type == 3) - val = 0x0200; - else - val = 0x0300; - - reg = (i == 0) ? - B43_NPHY_AFECTL_C1 : - B43_NPHY_AFECTL_C2; - - b43_phy_maskset(dev, reg, 0xFCFF, val); - b43_phy_maskset(dev, reg, 0xF3FF, val << 2); + b43_phy_maskset(dev, B43_NPHY_RFSEQCA, + ~B43_NPHY_RFSEQCA_RXDIS & 0xFFFF, + ((1 - core) << B43_NPHY_RFSEQCA_RXDIS_SHIFT)); + b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXEN, + ((1 - core) << B43_NPHY_RFSEQCA_TXEN_SHIFT)); + b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_RXEN, + (core << B43_NPHY_RFSEQCA_RXEN_SHIFT)); + b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXDIS, + (core << B43_NPHY_RFSEQCA_TXDIS_SHIFT)); - if (type != 3 && type != 6) { - enum ieee80211_band band = - b43_current_band(dev->wl); + if (core == 0) { + b43_phy_mask(dev, B43_NPHY_AFECTL_C1, ~0x0007); + b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0007); + } else { + b43_phy_mask(dev, B43_NPHY_AFECTL_C2, ~0x0007); + b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0007); + } - if (b43_nphy_ipa(dev)) - val = (band == IEEE80211_BAND_5GHZ) ? 0xC : 0xE; - else - val = 0x11; - reg = (i == 0) ? 0x2000 : 0x3000; - reg |= B2055_PADDRV; - b43_radio_write16(dev, reg, val); + b43_nphy_rf_control_intc_override(dev, 2, 0, 3); + b43_nphy_rf_control_override(dev, 8, 0, 3, false); + b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX); - reg = (i == 0) ? - B43_NPHY_AFECTL_OVER1 : - B43_NPHY_AFECTL_OVER; - b43_phy_set(dev, reg, 0x0200); - } - } - } + if (core == 0) { + rxval = 1; + txval = 8; + } else { + rxval = 4; + txval = 2; } + b43_nphy_rf_control_intc_override(dev, 1, rxval, (core + 1)); + b43_nphy_rf_control_intc_override(dev, 1, txval, (2 - core)); } +#endif -static void b43_nphy_rev2_rssi_select(struct b43_wldev *dev, u8 code, u8 type) +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalcRxIqComp */ +static void b43_nphy_calc_rx_iq_comp(struct b43_wldev *dev, u8 mask) { - u16 val; + int i; + s32 iq; + u32 ii; + u32 qq; + int iq_nbits, qq_nbits; + int arsh, brsh; + u16 tmp, a, b; - if (type < 3) - val = 0; - else if (type == 6) - val = 1; - else if (type == 3) - val = 2; - else - val = 3; + struct nphy_iq_est est; + struct b43_phy_n_iq_comp old; + struct b43_phy_n_iq_comp new = { }; + bool error = false; - val = (val << 12) | (val << 14); - b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, val); - b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, val); + if (mask == 0) + return; - if (type < 3) { - b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO1, 0xFFCF, - (type + 1) << 4); - b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO2, 0xFFCF, - (type + 1) << 4); - } + b43_nphy_rx_iq_coeffs(dev, false, &old); + b43_nphy_rx_iq_coeffs(dev, true, &new); + b43_nphy_rx_iq_est(dev, &est, 0x4000, 32, false); + new = old; - if (code == 0) { - b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x3000); - if (type < 3) { - b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, - ~(B43_NPHY_RFCTL_CMD_RXEN | - B43_NPHY_RFCTL_CMD_CORESEL)); - b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, - ~(0x1 << 12 | - 0x1 << 5 | - 0x1 << 1 | - 0x1)); - b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, - ~B43_NPHY_RFCTL_CMD_START); - udelay(20); - b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~0x1); + for (i = 0; i < 2; i++) { + if (i == 0 && (mask & 1)) { + iq = est.iq0_prod; + ii = est.i0_pwr; + qq = est.q0_pwr; + } else if (i == 1 && (mask & 2)) { + iq = est.iq1_prod; + ii = est.i1_pwr; + qq = est.q1_pwr; + } else { + continue; } - } else { - b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x3000); - if (type < 3) { - b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD, - ~(B43_NPHY_RFCTL_CMD_RXEN | - B43_NPHY_RFCTL_CMD_CORESEL), - (B43_NPHY_RFCTL_CMD_RXEN | - code << B43_NPHY_RFCTL_CMD_CORESEL_SHIFT)); - b43_phy_set(dev, B43_NPHY_RFCTL_OVER, - (0x1 << 12 | - 0x1 << 5 | - 0x1 << 1 | - 0x1)); - b43_phy_set(dev, B43_NPHY_RFCTL_CMD, - B43_NPHY_RFCTL_CMD_START); - udelay(20); - b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~0x1); + + if (ii + qq < 2) { + error = true; + break; } - } -} -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSISel */ -static void b43_nphy_rssi_select(struct b43_wldev *dev, u8 code, u8 type) -{ - if (dev->phy.rev >= 3) - b43_nphy_rev3_rssi_select(dev, code, type); - else - b43_nphy_rev2_rssi_select(dev, code, type); -} + iq_nbits = fls(abs(iq)); + qq_nbits = fls(qq); -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRssi2055Vcm */ -static void b43_nphy_set_rssi_2055_vcm(struct b43_wldev *dev, u8 type, u8 *buf) -{ - int i; - for (i = 0; i < 2; i++) { - if (type == 2) { - if (i == 0) { - b43_radio_maskset(dev, B2055_C1_B0NB_RSSIVCM, - 0xFC, buf[0]); - b43_radio_maskset(dev, B2055_C1_RX_BB_RSSICTL5, - 0xFC, buf[1]); + arsh = iq_nbits - 20; + if (arsh >= 0) { + a = -((iq << (30 - iq_nbits)) + (ii >> (1 + arsh))); + tmp = ii >> arsh; + } else { + a = -((iq << (30 - iq_nbits)) + (ii << (-1 - arsh))); + tmp = ii << -arsh; + } + if (tmp == 0) { + error = true; + break; + } + a /= tmp; + + brsh = qq_nbits - 11; + if (brsh >= 0) { + b = (qq << (31 - qq_nbits)); + tmp = ii >> brsh; + } else { + b = (qq << (31 - qq_nbits)); + tmp = ii << -brsh; + } + if (tmp == 0) { + error = true; + break; + } + b = int_sqrt(b / tmp - a * a) - (1 << 10); + + if (i == 0 && (mask & 0x1)) { + if (dev->phy.rev >= 3) { + new.a0 = a & 0x3FF; + new.b0 = b & 0x3FF; } else { - b43_radio_maskset(dev, B2055_C2_B0NB_RSSIVCM, - 0xFC, buf[2 * i]); - b43_radio_maskset(dev, B2055_C2_RX_BB_RSSICTL5, - 0xFC, buf[2 * i + 1]); + new.a0 = b & 0x3FF; + new.b0 = a & 0x3FF; + } + } else if (i == 1 && (mask & 0x2)) { + if (dev->phy.rev >= 3) { + new.a1 = a & 0x3FF; + new.b1 = b & 0x3FF; + } else { + new.a1 = b & 0x3FF; + new.b1 = a & 0x3FF; } - } else { - if (i == 0) - b43_radio_maskset(dev, B2055_C1_RX_BB_RSSICTL5, - 0xF3, buf[0] << 2); - else - b43_radio_maskset(dev, B2055_C2_RX_BB_RSSICTL5, - 0xF3, buf[2 * i + 1] << 2); } } + + if (error) + new = old; + + b43_nphy_rx_iq_coeffs(dev, true, &new); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PollRssi */ -static int b43_nphy_poll_rssi(struct b43_wldev *dev, u8 type, s32 *buf, - u8 nsamp) +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxIqWar */ +static void b43_nphy_tx_iq_workaround(struct b43_wldev *dev) { - int i; - int out; - u16 save_regs_phy[9]; - u16 s[2]; + u16 array[4]; + b43_ntab_read_bulk(dev, B43_NTAB16(0xF, 0x50), 4, array); - if (dev->phy.rev >= 3) { - save_regs_phy[0] = b43_phy_read(dev, - B43_NPHY_RFCTL_LUT_TRSW_UP1); - save_regs_phy[1] = b43_phy_read(dev, - B43_NPHY_RFCTL_LUT_TRSW_UP2); - save_regs_phy[2] = b43_phy_read(dev, B43_NPHY_AFECTL_C1); - save_regs_phy[3] = b43_phy_read(dev, B43_NPHY_AFECTL_C2); - save_regs_phy[4] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1); - save_regs_phy[5] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER); - save_regs_phy[6] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S0); - save_regs_phy[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B32S1); - save_regs_phy[8] = 0; - } else { - save_regs_phy[0] = b43_phy_read(dev, B43_NPHY_AFECTL_C1); - save_regs_phy[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2); - save_regs_phy[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER); - save_regs_phy[3] = b43_phy_read(dev, B43_NPHY_RFCTL_CMD); - save_regs_phy[4] = b43_phy_read(dev, B43_NPHY_RFCTL_OVER); - save_regs_phy[5] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO1); - save_regs_phy[6] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO2); - save_regs_phy[7] = 0; - save_regs_phy[8] = 0; - } + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW0, array[0]); + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW1, array[1]); + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW2, array[2]); + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW3, array[3]); +} - b43_nphy_rssi_select(dev, 5, type); +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */ +static void b43_nphy_write_clip_detection(struct b43_wldev *dev, + const u16 *clip_st) +{ + b43_phy_write(dev, B43_NPHY_C1_CLIP1THRES, clip_st[0]); + b43_phy_write(dev, B43_NPHY_C2_CLIP1THRES, clip_st[1]); +} - if (dev->phy.rev < 2) { - save_regs_phy[8] = b43_phy_read(dev, B43_NPHY_GPIO_SEL); - b43_phy_write(dev, B43_NPHY_GPIO_SEL, 5); - } +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */ +static void b43_nphy_read_clip_detection(struct b43_wldev *dev, u16 *clip_st) +{ + clip_st[0] = b43_phy_read(dev, B43_NPHY_C1_CLIP1THRES); + clip_st[1] = b43_phy_read(dev, B43_NPHY_C2_CLIP1THRES); +} - for (i = 0; i < 4; i++) - buf[i] = 0; +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SuperSwitchInit */ +static void b43_nphy_superswitch_init(struct b43_wldev *dev, bool init) +{ + if (dev->phy.rev >= 3) { + if (!init) + return; + if (0 /* FIXME */) { + b43_ntab_write(dev, B43_NTAB16(9, 2), 0x211); + b43_ntab_write(dev, B43_NTAB16(9, 3), 0x222); + b43_ntab_write(dev, B43_NTAB16(9, 8), 0x144); + b43_ntab_write(dev, B43_NTAB16(9, 12), 0x188); + } + } else { + b43_phy_write(dev, B43_NPHY_GPIO_LOOEN, 0); + b43_phy_write(dev, B43_NPHY_GPIO_HIOEN, 0); - for (i = 0; i < nsamp; i++) { - if (dev->phy.rev < 2) { - s[0] = b43_phy_read(dev, B43_NPHY_GPIO_LOOUT); - s[1] = b43_phy_read(dev, B43_NPHY_GPIO_HIOUT); - } else { - s[0] = b43_phy_read(dev, B43_NPHY_RSSI1); - s[1] = b43_phy_read(dev, B43_NPHY_RSSI2); + switch (dev->dev->bus_type) { +#ifdef CONFIG_B43_BCMA + case B43_BUS_BCMA: + bcma_chipco_gpio_control(&dev->dev->bdev->bus->drv_cc, + 0xFC00, 0xFC00); + break; +#endif +#ifdef CONFIG_B43_SSB + case B43_BUS_SSB: + ssb_chipco_gpio_control(&dev->dev->sdev->bus->chipco, + 0xFC00, 0xFC00); + break; +#endif } - buf[0] += ((s8)((s[0] & 0x3F) << 2)) >> 2; - buf[1] += ((s8)(((s[0] >> 8) & 0x3F) << 2)) >> 2; - buf[2] += ((s8)((s[1] & 0x3F) << 2)) >> 2; - buf[3] += ((s8)(((s[1] >> 8) & 0x3F) << 2)) >> 2; + b43_write32(dev, B43_MMIO_MACCTL, + b43_read32(dev, B43_MMIO_MACCTL) & + ~B43_MACCTL_GPOUTSMSK); + b43_write16(dev, B43_MMIO_GPIO_MASK, + b43_read16(dev, B43_MMIO_GPIO_MASK) | 0xFC00); + b43_write16(dev, B43_MMIO_GPIO_CONTROL, + b43_read16(dev, B43_MMIO_GPIO_CONTROL) & ~0xFC00); + + if (init) { + b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8); + b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301); + b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8); + b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301); + } } - out = (buf[0] & 0xFF) << 24 | (buf[1] & 0xFF) << 16 | - (buf[2] & 0xFF) << 8 | (buf[3] & 0xFF); +} - if (dev->phy.rev < 2) - b43_phy_write(dev, B43_NPHY_GPIO_SEL, save_regs_phy[8]); +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/classifier */ +static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val) +{ + u16 tmp; - if (dev->phy.rev >= 3) { - b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, - save_regs_phy[0]); - b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, - save_regs_phy[1]); - b43_phy_write(dev, B43_NPHY_AFECTL_C1, save_regs_phy[2]); - b43_phy_write(dev, B43_NPHY_AFECTL_C2, save_regs_phy[3]); - b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, save_regs_phy[4]); - b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[5]); - b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S0, save_regs_phy[6]); - b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S1, save_regs_phy[7]); + if (dev->dev->core_rev == 16) + b43_mac_suspend(dev); + + tmp = b43_phy_read(dev, B43_NPHY_CLASSCTL); + tmp &= (B43_NPHY_CLASSCTL_CCKEN | B43_NPHY_CLASSCTL_OFDMEN | + B43_NPHY_CLASSCTL_WAITEDEN); + tmp &= ~mask; + tmp |= (val & mask); + b43_phy_maskset(dev, B43_NPHY_CLASSCTL, 0xFFF8, tmp); + + if (dev->dev->core_rev == 16) + b43_mac_enable(dev); + + return tmp; +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/carriersearch */ +static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, bool enable) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_n *nphy = phy->n; + + if (enable) { + static const u16 clip[] = { 0xFFFF, 0xFFFF }; + if (nphy->deaf_count++ == 0) { + nphy->classifier_state = b43_nphy_classifier(dev, 0, 0); + b43_nphy_classifier(dev, 0x7, 0); + b43_nphy_read_clip_detection(dev, nphy->clip_state); + b43_nphy_write_clip_detection(dev, clip); + } + b43_nphy_reset_cca(dev); } else { - b43_phy_write(dev, B43_NPHY_AFECTL_C1, save_regs_phy[0]); - b43_phy_write(dev, B43_NPHY_AFECTL_C2, save_regs_phy[1]); - b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[2]); - b43_phy_write(dev, B43_NPHY_RFCTL_CMD, save_regs_phy[3]); - b43_phy_write(dev, B43_NPHY_RFCTL_OVER, save_regs_phy[4]); - b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO1, save_regs_phy[5]); - b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO2, save_regs_phy[6]); + if (--nphy->deaf_count == 0) { + b43_nphy_classifier(dev, 0x7, nphy->classifier_state); + b43_nphy_write_clip_detection(dev, nphy->clip_state); + } } - - return out; } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal */ -static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type) +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/stop-playback */ +static void b43_nphy_stop_playback(struct b43_wldev *dev) { - int i, j; - u8 state[4]; - u8 code, val; - u16 class, override; - u8 regs_save_radio[2]; - u16 regs_save_phy[2]; + struct b43_phy_n *nphy = dev->phy.n; + u16 tmp; - s8 offset[4]; - u8 core; - u8 rail; + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, 1); - u16 clip_state[2]; - u16 clip_off[2] = { 0xFFFF, 0xFFFF }; - s32 results_min[4] = { }; - u8 vcm_final[4] = { }; - s32 results[4][4] = { }; - s32 miniq[4][2] = { }; + tmp = b43_phy_read(dev, B43_NPHY_SAMP_STAT); + if (tmp & 0x1) + b43_phy_set(dev, B43_NPHY_SAMP_CMD, B43_NPHY_SAMP_CMD_STOP); + else if (tmp & 0x2) + b43_phy_mask(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x7FFF); - if (type == 2) { - code = 0; - val = 6; - } else if (type < 2) { - code = 25; - val = 4; - } else { - B43_WARN_ON(1); - return; - } + b43_phy_mask(dev, B43_NPHY_SAMP_CMD, ~0x0004); - class = b43_nphy_classifier(dev, 0, 0); - b43_nphy_classifier(dev, 7, 4); - b43_nphy_read_clip_detection(dev, clip_state); - b43_nphy_write_clip_detection(dev, clip_off); + if (nphy->bb_mult_save & 0x80000000) { + tmp = nphy->bb_mult_save & 0xFFFF; + b43_ntab_write(dev, B43_NTAB16(15, 87), tmp); + nphy->bb_mult_save = 0; + } - if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) - override = 0x140; - else - override = 0x110; + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, 0); +} - regs_save_phy[0] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1); - regs_save_radio[0] = b43_radio_read16(dev, B2055_C1_PD_RXTX); - b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, override); - b43_radio_write16(dev, B2055_C1_PD_RXTX, val); +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SpurWar */ +static void b43_nphy_spur_workaround(struct b43_wldev *dev) +{ + struct b43_phy_n *nphy = dev->phy.n; - regs_save_phy[1] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2); - regs_save_radio[1] = b43_radio_read16(dev, B2055_C2_PD_RXTX); - b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, override); - b43_radio_write16(dev, B2055_C2_PD_RXTX, val); + u8 channel = dev->phy.channel; + int tone[2] = { 57, 58 }; + u32 noise[2] = { 0x3FF, 0x3FF }; - state[0] = b43_radio_read16(dev, B2055_C1_PD_RSSIMISC) & 0x07; - state[1] = b43_radio_read16(dev, B2055_C2_PD_RSSIMISC) & 0x07; - b43_radio_mask(dev, B2055_C1_PD_RSSIMISC, 0xF8); - b43_radio_mask(dev, B2055_C2_PD_RSSIMISC, 0xF8); - state[2] = b43_radio_read16(dev, B2055_C1_SP_RSSI) & 0x07; - state[3] = b43_radio_read16(dev, B2055_C2_SP_RSSI) & 0x07; + B43_WARN_ON(dev->phy.rev < 3); - b43_nphy_rssi_select(dev, 5, type); - b43_nphy_scale_offset_rssi(dev, 0, 0, 5, 0, type); - b43_nphy_scale_offset_rssi(dev, 0, 0, 5, 1, type); + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, 1); - for (i = 0; i < 4; i++) { - u8 tmp[4]; - for (j = 0; j < 4; j++) - tmp[j] = i; - if (type != 1) - b43_nphy_set_rssi_2055_vcm(dev, type, tmp); - b43_nphy_poll_rssi(dev, type, results[i], 8); - if (type < 2) - for (j = 0; j < 2; j++) - miniq[i][j] = min(results[i][2 * j], - results[i][2 * j + 1]); + if (nphy->gband_spurwar_en) { + /* TODO: N PHY Adjust Analog Pfbw (7) */ + if (channel == 11 && dev->phy.is_40mhz) + ; /* TODO: N PHY Adjust Min Noise Var(2, tone, noise)*/ + else + ; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/ + /* TODO: N PHY Adjust CRS Min Power (0x1E) */ } - for (i = 0; i < 4; i++) { - s32 mind = 40; - u8 minvcm = 0; - s32 minpoll = 249; - s32 curr; - for (j = 0; j < 4; j++) { - if (type == 2) - curr = abs(results[j][i]); - else - curr = abs(miniq[j][i / 2] - code * 8); - - if (curr < mind) { - mind = curr; - minvcm = j; + if (nphy->aband_spurwar_en) { + if (channel == 54) { + tone[0] = 0x20; + noise[0] = 0x25F; + } else if (channel == 38 || channel == 102 || channel == 118) { + if (0 /* FIXME */) { + tone[0] = 0x20; + noise[0] = 0x21F; + } else { + tone[0] = 0; + noise[0] = 0; } - - if (results[j][i] < minpoll) - minpoll = results[j][i]; + } else if (channel == 134) { + tone[0] = 0x20; + noise[0] = 0x21F; + } else if (channel == 151) { + tone[0] = 0x10; + noise[0] = 0x23F; + } else if (channel == 153 || channel == 161) { + tone[0] = 0x30; + noise[0] = 0x23F; + } else { + tone[0] = 0; + noise[0] = 0; } - results_min[i] = minpoll; - vcm_final[i] = minvcm; - } - - if (type != 1) - b43_nphy_set_rssi_2055_vcm(dev, type, vcm_final); - - for (i = 0; i < 4; i++) { - offset[i] = (code * 8) - results[vcm_final[i]][i]; - if (offset[i] < 0) - offset[i] = -((abs(offset[i]) + 4) / 8); + if (!tone[0] && !noise[0]) + ; /* TODO: N PHY Adjust Min Noise Var(1, tone, noise)*/ else - offset[i] = (offset[i] + 4) / 8; - - if (results_min[i] == 248) - offset[i] = code - 32; - - core = (i / 2) ? 2 : 1; - rail = (i % 2) ? 1 : 0; - - b43_nphy_scale_offset_rssi(dev, 0, offset[i], core, rail, - type); - } - - b43_radio_maskset(dev, B2055_C1_PD_RSSIMISC, 0xF8, state[0]); - b43_radio_maskset(dev, B2055_C2_PD_RSSIMISC, 0xF8, state[1]); - - switch (state[2]) { - case 1: - b43_nphy_rssi_select(dev, 1, 2); - break; - case 4: - b43_nphy_rssi_select(dev, 1, 0); - break; - case 2: - b43_nphy_rssi_select(dev, 1, 1); - break; - default: - b43_nphy_rssi_select(dev, 1, 1); - break; - } - - switch (state[3]) { - case 1: - b43_nphy_rssi_select(dev, 2, 2); - break; - case 4: - b43_nphy_rssi_select(dev, 2, 0); - break; - default: - b43_nphy_rssi_select(dev, 2, 1); - break; + ; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/ } - b43_nphy_rssi_select(dev, 0, type); - - b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs_save_phy[0]); - b43_radio_write16(dev, B2055_C1_PD_RXTX, regs_save_radio[0]); - b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs_save_phy[1]); - b43_radio_write16(dev, B2055_C2_PD_RXTX, regs_save_radio[1]); - - b43_nphy_classifier(dev, 7, class); - b43_nphy_write_clip_detection(dev, clip_state); - /* Specs don't say about reset here, but it makes wl and b43 dumps - identical, it really seems wl performs this */ - b43_nphy_reset_cca(dev); + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, 0); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICalRev3 */ -static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev) +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/AdjustLnaGainTbl */ +static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev) { - /* TODO */ -} + struct b43_phy_n *nphy = dev->phy.n; -/* - * RSSI Calibration - * http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal - */ -static void b43_nphy_rssi_cal(struct b43_wldev *dev) -{ - if (dev->phy.rev >= 3) { - b43_nphy_rev3_rssi_cal(dev); + u8 i; + s16 tmp; + u16 data[4]; + s16 gain[2]; + u16 minmax[2]; + static const u16 lna_gain[4] = { -2, 10, 19, 25 }; + + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, 1); + + if (nphy->gain_boost) { + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + gain[0] = 6; + gain[1] = 6; + } else { + tmp = 40370 - 315 * dev->phy.channel; + gain[0] = ((tmp >> 13) + ((tmp >> 12) & 1)); + tmp = 23242 - 224 * dev->phy.channel; + gain[1] = ((tmp >> 13) + ((tmp >> 12) & 1)); + } } else { - b43_nphy_rev2_rssi_cal(dev, B43_NPHY_RSSI_Z); - b43_nphy_rev2_rssi_cal(dev, B43_NPHY_RSSI_X); - b43_nphy_rev2_rssi_cal(dev, B43_NPHY_RSSI_Y); + gain[0] = 0; + gain[1] = 0; } -} -/************************************************** - * Workarounds - **************************************************/ + for (i = 0; i < 2; i++) { + if (nphy->elna_gain_config) { + data[0] = 19 + gain[i]; + data[1] = 25 + gain[i]; + data[2] = 25 + gain[i]; + data[3] = 25 + gain[i]; + } else { + data[0] = lna_gain[0] + gain[i]; + data[1] = lna_gain[1] + gain[i]; + data[2] = lna_gain[2] + gain[i]; + data[3] = lna_gain[3] + gain[i]; + } + b43_ntab_write_bulk(dev, B43_NTAB16(i, 8), 4, data); -static void b43_nphy_gain_ctl_workarounds_rev3plus(struct b43_wldev *dev) -{ - struct ssb_sprom *sprom = dev->dev->bus_sprom; + minmax[i] = 23 + gain[i]; + } - bool ghz5; - bool ext_lna; - u16 rssi_gain; - struct nphy_gain_ctl_workaround_entry *e; - u8 lpf_gain[6] = { 0x00, 0x06, 0x0C, 0x12, 0x12, 0x12 }; - u8 lpf_bits[6] = { 0, 1, 2, 3, 3, 3 }; + b43_phy_maskset(dev, B43_NPHY_C1_MINMAX_GAIN, ~B43_NPHY_C1_MINGAIN, + minmax[0] << B43_NPHY_C1_MINGAIN_SHIFT); + b43_phy_maskset(dev, B43_NPHY_C2_MINMAX_GAIN, ~B43_NPHY_C2_MINGAIN, + minmax[1] << B43_NPHY_C2_MINGAIN_SHIFT); - /* Prepare values */ - ghz5 = b43_phy_read(dev, B43_NPHY_BANDCTL) - & B43_NPHY_BANDCTL_5GHZ; - ext_lna = ghz5 ? sprom->boardflags_hi & B43_BFH_EXTLNA_5GHZ : - sprom->boardflags_lo & B43_BFL_EXTLNA; - e = b43_nphy_get_gain_ctl_workaround_ent(dev, ghz5, ext_lna); - if (ghz5 && dev->phy.rev >= 5) - rssi_gain = 0x90; - else - rssi_gain = 0x50; - - b43_phy_set(dev, B43_NPHY_RXCTL, 0x0040); - - /* Set Clip 2 detect */ - b43_phy_set(dev, B43_NPHY_C1_CGAINI, - B43_NPHY_C1_CGAINI_CL2DETECT); - b43_phy_set(dev, B43_NPHY_C2_CGAINI, - B43_NPHY_C2_CGAINI_CL2DETECT); - - b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAG1_IDAC, - 0x17); - b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAG1_IDAC, - 0x17); - b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAG2_IDAC, 0xF0); - b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAG2_IDAC, 0xF0); - b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_POLE, 0x00); - b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_POLE, 0x00); - b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_GAIN, - rssi_gain); - b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_GAIN, - rssi_gain); - b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAA1_IDAC, - 0x17); - b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAA1_IDAC, - 0x17); - b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAA2_IDAC, 0xFF); - b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAA2_IDAC, 0xFF); - - b43_ntab_write_bulk(dev, B43_NTAB8(0, 8), 4, e->lna1_gain); - b43_ntab_write_bulk(dev, B43_NTAB8(1, 8), 4, e->lna1_gain); - b43_ntab_write_bulk(dev, B43_NTAB8(0, 16), 4, e->lna2_gain); - b43_ntab_write_bulk(dev, B43_NTAB8(1, 16), 4, e->lna2_gain); - b43_ntab_write_bulk(dev, B43_NTAB8(0, 32), 10, e->gain_db); - b43_ntab_write_bulk(dev, B43_NTAB8(1, 32), 10, e->gain_db); - b43_ntab_write_bulk(dev, B43_NTAB8(2, 32), 10, e->gain_bits); - b43_ntab_write_bulk(dev, B43_NTAB8(3, 32), 10, e->gain_bits); - b43_ntab_write_bulk(dev, B43_NTAB8(0, 0x40), 6, lpf_gain); - b43_ntab_write_bulk(dev, B43_NTAB8(1, 0x40), 6, lpf_gain); - b43_ntab_write_bulk(dev, B43_NTAB8(2, 0x40), 6, lpf_bits); - b43_ntab_write_bulk(dev, B43_NTAB8(3, 0x40), 6, lpf_bits); - - b43_phy_write(dev, B43_NPHY_C1_INITGAIN, e->init_gain); - b43_phy_write(dev, 0x2A7, e->init_gain); - b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x106), 2, - e->rfseq_init); - - /* TODO: check defines. Do not match variables names */ - b43_phy_write(dev, B43_NPHY_C1_CLIP1_MEDGAIN, e->cliphi_gain); - b43_phy_write(dev, 0x2A9, e->cliphi_gain); - b43_phy_write(dev, B43_NPHY_C1_CLIP2_GAIN, e->clipmd_gain); - b43_phy_write(dev, 0x2AB, e->clipmd_gain); - b43_phy_write(dev, B43_NPHY_C2_CLIP1_HIGAIN, e->cliplo_gain); - b43_phy_write(dev, 0x2AD, e->cliplo_gain); - - b43_phy_maskset(dev, 0x27D, 0xFF00, e->crsmin); - b43_phy_maskset(dev, 0x280, 0xFF00, e->crsminl); - b43_phy_maskset(dev, 0x283, 0xFF00, e->crsminu); - b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, e->nbclip); - b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, e->nbclip); - b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES, - ~B43_NPHY_C1_CLIPWBTHRES_CLIP2, e->wlclip); - b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES, - ~B43_NPHY_C2_CLIPWBTHRES_CLIP2, e->wlclip); - b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C); + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, 0); } -static void b43_nphy_gain_ctl_workarounds_rev1_2(struct b43_wldev *dev) +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */ +static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; + struct ssb_sprom *sprom = dev->dev->bus_sprom; + /* PHY rev 0, 1, 2 */ u8 i, j; u8 code; u16 tmp; u8 rfseq_events[3] = { 6, 8, 7 }; u8 rfseq_delays[3] = { 10, 30, 1 }; - /* Set Clip 2 detect */ - b43_phy_set(dev, B43_NPHY_C1_CGAINI, B43_NPHY_C1_CGAINI_CL2DETECT); - b43_phy_set(dev, B43_NPHY_C2_CGAINI, B43_NPHY_C2_CGAINI_CL2DETECT); - - /* Set narrowband clip threshold */ - b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, 0x84); - b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, 0x84); - - if (!dev->phy.is_40mhz) { - /* Set dwell lengths */ - b43_phy_write(dev, B43_NPHY_CLIP1_NBDWELL_LEN, 0x002B); - b43_phy_write(dev, B43_NPHY_CLIP2_NBDWELL_LEN, 0x002B); - b43_phy_write(dev, B43_NPHY_W1CLIP1_DWELL_LEN, 0x0009); - b43_phy_write(dev, B43_NPHY_W1CLIP2_DWELL_LEN, 0x0009); - } - - /* Set wideband clip 2 threshold */ - b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES, - ~B43_NPHY_C1_CLIPWBTHRES_CLIP2, 21); - b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES, - ~B43_NPHY_C2_CLIPWBTHRES_CLIP2, 21); - - if (!dev->phy.is_40mhz) { - b43_phy_maskset(dev, B43_NPHY_C1_CGAINI, - ~B43_NPHY_C1_CGAINI_GAINBKOFF, 0x1); - b43_phy_maskset(dev, B43_NPHY_C2_CGAINI, - ~B43_NPHY_C2_CGAINI_GAINBKOFF, 0x1); - b43_phy_maskset(dev, B43_NPHY_C1_CCK_CGAINI, - ~B43_NPHY_C1_CCK_CGAINI_GAINBKOFF, 0x1); - b43_phy_maskset(dev, B43_NPHY_C2_CCK_CGAINI, - ~B43_NPHY_C2_CCK_CGAINI_GAINBKOFF, 0x1); - } - - b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C); + /* PHY rev >= 3 */ + bool ghz5; + bool ext_lna; + u16 rssi_gain; + struct nphy_gain_ctl_workaround_entry *e; + u8 lpf_gain[6] = { 0x00, 0x06, 0x0C, 0x12, 0x12, 0x12 }; + u8 lpf_bits[6] = { 0, 1, 2, 3, 3, 3 }; - if (nphy->gain_boost) { - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ && - dev->phy.is_40mhz) - code = 4; + if (dev->phy.rev >= 3) { + /* Prepare values */ + ghz5 = b43_phy_read(dev, B43_NPHY_BANDCTL) + & B43_NPHY_BANDCTL_5GHZ; + ext_lna = sprom->boardflags_lo & B43_BFL_EXTLNA; + e = b43_nphy_get_gain_ctl_workaround_ent(dev, ghz5, ext_lna); + if (ghz5 && dev->phy.rev >= 5) + rssi_gain = 0x90; else - code = 5; + rssi_gain = 0x50; + + b43_phy_set(dev, B43_NPHY_RXCTL, 0x0040); + + /* Set Clip 2 detect */ + b43_phy_set(dev, B43_NPHY_C1_CGAINI, + B43_NPHY_C1_CGAINI_CL2DETECT); + b43_phy_set(dev, B43_NPHY_C2_CGAINI, + B43_NPHY_C2_CGAINI_CL2DETECT); + + b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAG1_IDAC, + 0x17); + b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAG1_IDAC, + 0x17); + b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAG2_IDAC, 0xF0); + b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAG2_IDAC, 0xF0); + b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_POLE, 0x00); + b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_POLE, 0x00); + b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_GAIN, + rssi_gain); + b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_GAIN, + rssi_gain); + b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAA1_IDAC, + 0x17); + b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAA1_IDAC, + 0x17); + b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAA2_IDAC, 0xFF); + b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAA2_IDAC, 0xFF); + + b43_ntab_write_bulk(dev, B43_NTAB8(0, 8), 4, e->lna1_gain); + b43_ntab_write_bulk(dev, B43_NTAB8(1, 8), 4, e->lna1_gain); + b43_ntab_write_bulk(dev, B43_NTAB8(0, 16), 4, e->lna2_gain); + b43_ntab_write_bulk(dev, B43_NTAB8(1, 16), 4, e->lna2_gain); + b43_ntab_write_bulk(dev, B43_NTAB8(0, 32), 10, e->gain_db); + b43_ntab_write_bulk(dev, B43_NTAB8(1, 32), 10, e->gain_db); + b43_ntab_write_bulk(dev, B43_NTAB8(2, 32), 10, e->gain_bits); + b43_ntab_write_bulk(dev, B43_NTAB8(3, 32), 10, e->gain_bits); + b43_ntab_write_bulk(dev, B43_NTAB8(0, 0x40), 6, lpf_gain); + b43_ntab_write_bulk(dev, B43_NTAB8(1, 0x40), 6, lpf_gain); + b43_ntab_write_bulk(dev, B43_NTAB8(2, 0x40), 6, lpf_bits); + b43_ntab_write_bulk(dev, B43_NTAB8(3, 0x40), 6, lpf_bits); + + b43_phy_write(dev, B43_NPHY_C1_INITGAIN, e->init_gain); + b43_phy_write(dev, 0x2A7, e->init_gain); + b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x106), 2, + e->rfseq_init); + b43_phy_write(dev, B43_NPHY_C1_INITGAIN, e->init_gain); + + /* TODO: check defines. Do not match variables names */ + b43_phy_write(dev, B43_NPHY_C1_CLIP1_MEDGAIN, e->cliphi_gain); + b43_phy_write(dev, 0x2A9, e->cliphi_gain); + b43_phy_write(dev, B43_NPHY_C1_CLIP2_GAIN, e->clipmd_gain); + b43_phy_write(dev, 0x2AB, e->clipmd_gain); + b43_phy_write(dev, B43_NPHY_C2_CLIP1_HIGAIN, e->cliplo_gain); + b43_phy_write(dev, 0x2AD, e->cliplo_gain); + + b43_phy_maskset(dev, 0x27D, 0xFF00, e->crsmin); + b43_phy_maskset(dev, 0x280, 0xFF00, e->crsminl); + b43_phy_maskset(dev, 0x283, 0xFF00, e->crsminu); + b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, e->nbclip); + b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, e->nbclip); + b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES, + ~B43_NPHY_C1_CLIPWBTHRES_CLIP2, e->wlclip); + b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES, + ~B43_NPHY_C2_CLIPWBTHRES_CLIP2, e->wlclip); + b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C); } else { - code = dev->phy.is_40mhz ? 6 : 7; - } - - /* Set HPVGA2 index */ - b43_phy_maskset(dev, B43_NPHY_C1_INITGAIN, ~B43_NPHY_C1_INITGAIN_HPVGA2, - code << B43_NPHY_C1_INITGAIN_HPVGA2_SHIFT); - b43_phy_maskset(dev, B43_NPHY_C2_INITGAIN, ~B43_NPHY_C2_INITGAIN_HPVGA2, - code << B43_NPHY_C2_INITGAIN_HPVGA2_SHIFT); + /* Set Clip 2 detect */ + b43_phy_set(dev, B43_NPHY_C1_CGAINI, + B43_NPHY_C1_CGAINI_CL2DETECT); + b43_phy_set(dev, B43_NPHY_C2_CGAINI, + B43_NPHY_C2_CGAINI_CL2DETECT); + + /* Set narrowband clip threshold */ + b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, 0x84); + b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, 0x84); + + if (!dev->phy.is_40mhz) { + /* Set dwell lengths */ + b43_phy_write(dev, B43_NPHY_CLIP1_NBDWELL_LEN, 0x002B); + b43_phy_write(dev, B43_NPHY_CLIP2_NBDWELL_LEN, 0x002B); + b43_phy_write(dev, B43_NPHY_W1CLIP1_DWELL_LEN, 0x0009); + b43_phy_write(dev, B43_NPHY_W1CLIP2_DWELL_LEN, 0x0009); + } - b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06); - /* specs say about 2 loops, but wl does 4 */ - for (i = 0; i < 4; i++) - b43_phy_write(dev, B43_NPHY_TABLE_DATALO, (code << 8 | 0x7C)); + /* Set wideband clip 2 threshold */ + b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES, + ~B43_NPHY_C1_CLIPWBTHRES_CLIP2, + 21); + b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES, + ~B43_NPHY_C2_CLIPWBTHRES_CLIP2, + 21); + + if (!dev->phy.is_40mhz) { + b43_phy_maskset(dev, B43_NPHY_C1_CGAINI, + ~B43_NPHY_C1_CGAINI_GAINBKOFF, 0x1); + b43_phy_maskset(dev, B43_NPHY_C2_CGAINI, + ~B43_NPHY_C2_CGAINI_GAINBKOFF, 0x1); + b43_phy_maskset(dev, B43_NPHY_C1_CCK_CGAINI, + ~B43_NPHY_C1_CCK_CGAINI_GAINBKOFF, 0x1); + b43_phy_maskset(dev, B43_NPHY_C2_CCK_CGAINI, + ~B43_NPHY_C2_CCK_CGAINI_GAINBKOFF, 0x1); + } - b43_nphy_adjust_lna_gain_table(dev); + b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C); - if (nphy->elna_gain_config) { - b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0808); - b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0); - b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); - b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); - b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); + if (nphy->gain_boost) { + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ && + dev->phy.is_40mhz) + code = 4; + else + code = 5; + } else { + code = dev->phy.is_40mhz ? 6 : 7; + } - b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0C08); - b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0); - b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); - b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); - b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); + /* Set HPVGA2 index */ + b43_phy_maskset(dev, B43_NPHY_C1_INITGAIN, + ~B43_NPHY_C1_INITGAIN_HPVGA2, + code << B43_NPHY_C1_INITGAIN_HPVGA2_SHIFT); + b43_phy_maskset(dev, B43_NPHY_C2_INITGAIN, + ~B43_NPHY_C2_INITGAIN_HPVGA2, + code << B43_NPHY_C2_INITGAIN_HPVGA2_SHIFT); b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06); /* specs say about 2 loops, but wl does 4 */ for (i = 0; i < 4; i++) b43_phy_write(dev, B43_NPHY_TABLE_DATALO, - (code << 8 | 0x74)); - } + (code << 8 | 0x7C)); - if (dev->phy.rev == 2) { - for (i = 0; i < 4; i++) { - b43_phy_write(dev, B43_NPHY_TABLE_ADDR, - (0x0400 * i) + 0x0020); - for (j = 0; j < 21; j++) { - tmp = j * (i < 2 ? 3 : 1); - b43_phy_write(dev, - B43_NPHY_TABLE_DATALO, tmp); - } + b43_nphy_adjust_lna_gain_table(dev); + + if (nphy->elna_gain_config) { + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0808); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); + + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0C08); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); + + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06); + /* specs say about 2 loops, but wl does 4 */ + for (i = 0; i < 4; i++) + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, + (code << 8 | 0x74)); } - } - b43_nphy_set_rf_sequence(dev, 5, rfseq_events, rfseq_delays, 3); - b43_phy_maskset(dev, B43_NPHY_OVER_DGAIN1, - ~B43_NPHY_OVER_DGAIN_CCKDGECV & 0xFFFF, - 0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT); + if (dev->phy.rev == 2) { + for (i = 0; i < 4; i++) { + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, + (0x0400 * i) + 0x0020); + for (j = 0; j < 21; j++) { + tmp = j * (i < 2 ? 3 : 1); + b43_phy_write(dev, + B43_NPHY_TABLE_DATALO, tmp); + } + } + } - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) - b43_phy_maskset(dev, B43_PHY_N(0xC5D), 0xFF80, 4); -} + b43_nphy_set_rf_sequence(dev, 5, + rfseq_events, rfseq_delays, 3); + b43_phy_maskset(dev, B43_NPHY_OVER_DGAIN1, + ~B43_NPHY_OVER_DGAIN_CCKDGECV & 0xFFFF, + 0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT); -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */ -static void b43_nphy_gain_ctl_workarounds(struct b43_wldev *dev) -{ - if (dev->phy.rev >= 3) - b43_nphy_gain_ctl_workarounds_rev3plus(dev); - else - b43_nphy_gain_ctl_workarounds_rev1_2(dev); + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + b43_phy_maskset(dev, B43_PHY_N(0xC5D), + 0xFF80, 4); + } } static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev) @@ -1708,8 +1493,8 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev) struct ssb_sprom *sprom = dev->dev->bus_sprom; /* TX to RX */ - u8 tx2rx_events[8] = { 0x4, 0x3, 0x6, 0x5, 0x2, 0x1, 0x8, 0x1F }; - u8 tx2rx_delays[8] = { 8, 4, 2, 2, 4, 4, 6, 1 }; + u8 tx2rx_events[9] = { 0x4, 0x3, 0x6, 0x5, 0x2, 0x1, 0x8, 0x1F }; + u8 tx2rx_delays[9] = { 8, 4, 2, 2, 4, 4, 6, 1 }; /* RX to TX */ u8 rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3, 0x1F }; @@ -1720,9 +1505,6 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev) u16 tmp16; u32 tmp32; - b43_phy_write(dev, 0x23f, 0x1f8); - b43_phy_write(dev, 0x240, 0x1f8); - tmp32 = b43_ntab_read(dev, B43_NTAB32(30, 0)); tmp32 &= 0xffffff; b43_ntab_write(dev, B43_NTAB32(30, 0), tmp32); @@ -1738,13 +1520,12 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev) b43_phy_write(dev, 0x2AE, 0x000C); /* TX to RX */ - b43_nphy_set_rf_sequence(dev, 1, tx2rx_events, tx2rx_delays, - ARRAY_SIZE(tx2rx_events)); + b43_nphy_set_rf_sequence(dev, 1, tx2rx_events, tx2rx_delays, 9); /* RX to TX */ if (b43_nphy_ipa(dev)) - b43_nphy_set_rf_sequence(dev, 0, rx2tx_events_ipa, - rx2tx_delays_ipa, ARRAY_SIZE(rx2tx_events_ipa)); + b43_nphy_set_rf_sequence(dev, 1, rx2tx_events_ipa, + rx2tx_delays_ipa, 9); if (nphy->hw_phyrxchain != 3 && nphy->hw_phyrxchain != nphy->hw_phytxchain) { if (b43_nphy_ipa(dev)) { @@ -1752,8 +1533,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev) rx2tx_delays[6] = 1; rx2tx_events[7] = 0x1F; } - b43_nphy_set_rf_sequence(dev, 1, rx2tx_events, rx2tx_delays, - ARRAY_SIZE(rx2tx_events)); + b43_nphy_set_rf_sequence(dev, 1, rx2tx_events, rx2tx_delays, 9); } tmp16 = (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ? @@ -1765,10 +1545,10 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev) b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D); b43_ntab_write(dev, B43_NTAB32(16, 127), 0x18D); - b43_nphy_gain_ctl_workarounds(dev); + b43_nphy_gain_ctrl_workarounds(dev); - b43_ntab_write(dev, B43_NTAB16(8, 0), 2); - b43_ntab_write(dev, B43_NTAB16(8, 16), 2); + b43_ntab_write(dev, B43_NTAB32(8, 0), 2); + b43_ntab_write(dev, B43_NTAB32(8, 16), 2); /* TODO */ @@ -1780,8 +1560,6 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev) b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_AUX, 0x07); b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_LOB_BIAS, 0x88); b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_LOB_BIAS, 0x88); - b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_CMFB_IDAC, 0x00); - b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_CMFB_IDAC, 0x00); b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXG_CMFB_IDAC, 0x00); b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXG_CMFB_IDAC, 0x00); @@ -1806,18 +1584,18 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev) 0x70); } - b43_phy_write(dev, 0x224, 0x03eb); - b43_phy_write(dev, 0x225, 0x03eb); - b43_phy_write(dev, 0x226, 0x0341); - b43_phy_write(dev, 0x227, 0x0341); - b43_phy_write(dev, 0x228, 0x042b); - b43_phy_write(dev, 0x229, 0x042b); - b43_phy_write(dev, 0x22a, 0x0381); - b43_phy_write(dev, 0x22b, 0x0381); - b43_phy_write(dev, 0x22c, 0x042b); - b43_phy_write(dev, 0x22d, 0x042b); - b43_phy_write(dev, 0x22e, 0x0381); - b43_phy_write(dev, 0x22f, 0x0381); + b43_phy_write(dev, 0x224, 0x039C); + b43_phy_write(dev, 0x225, 0x0357); + b43_phy_write(dev, 0x226, 0x0317); + b43_phy_write(dev, 0x227, 0x02D7); + b43_phy_write(dev, 0x228, 0x039C); + b43_phy_write(dev, 0x229, 0x0357); + b43_phy_write(dev, 0x22A, 0x0317); + b43_phy_write(dev, 0x22B, 0x02D7); + b43_phy_write(dev, 0x22C, 0x039C); + b43_phy_write(dev, 0x22D, 0x0357); + b43_phy_write(dev, 0x22E, 0x0317); + b43_phy_write(dev, 0x22F, 0x02D7); } static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev) @@ -1868,7 +1646,7 @@ static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev) b43_nphy_set_rf_sequence(dev, 0, events1, delays1, 7); b43_nphy_set_rf_sequence(dev, 1, events2, delays2, 7); - b43_nphy_gain_ctl_workarounds(dev); + b43_nphy_gain_ctrl_workarounds(dev); if (dev->phy.rev < 2) { if (b43_phy_read(dev, B43_NPHY_RXCTL) & 0x2) @@ -1928,976 +1706,984 @@ static void b43_nphy_workarounds(struct b43_wldev *dev) b43_nphy_stay_in_carrier_search(dev, 0); } -/************************************************** - * Tx/Rx common - **************************************************/ +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/LoadSampleTable */ +static int b43_nphy_load_samples(struct b43_wldev *dev, + struct b43_c32 *samples, u16 len) { + struct b43_phy_n *nphy = dev->phy.n; + u16 i; + u32 *data; + + data = kzalloc(len * sizeof(u32), GFP_KERNEL); + if (!data) { + b43err(dev->wl, "allocation for samples loading failed\n"); + return -ENOMEM; + } + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, 1); -/* - * Transmits a known value for LO calibration - * http://bcm-v4.sipsolutions.net/802.11/PHY/N/TXTone - */ -static int b43_nphy_tx_tone(struct b43_wldev *dev, u32 freq, u16 max_val, - bool iqmode, bool dac_test) -{ - u16 samp = b43_nphy_gen_load_samples(dev, freq, max_val, dac_test); - if (samp == 0) - return -1; - b43_nphy_run_samples(dev, samp, 0xFFFF, 0, iqmode, dac_test); + for (i = 0; i < len; i++) { + data[i] = (samples[i].i & 0x3FF << 10); + data[i] |= samples[i].q & 0x3FF; + } + b43_ntab_write_bulk(dev, B43_NTAB32(17, 0), len, data); + + kfree(data); + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, 0); return 0; } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Chains */ -static void b43_nphy_update_txrx_chain(struct b43_wldev *dev) +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GenLoadSamples */ +static u16 b43_nphy_gen_load_samples(struct b43_wldev *dev, u32 freq, u16 max, + bool test) { - struct b43_phy_n *nphy = dev->phy.n; + int i; + u16 bw, len, rot, angle; + struct b43_c32 *samples; - bool override = false; - u16 chain = 0x33; - if (nphy->txrx_chain == 0) { - chain = 0x11; - override = true; - } else if (nphy->txrx_chain == 1) { - chain = 0x22; - override = true; + bw = (dev->phy.is_40mhz) ? 40 : 20; + len = bw << 3; + + if (test) { + if (b43_phy_read(dev, B43_NPHY_BBCFG) & B43_NPHY_BBCFG_RSTRX) + bw = 82; + else + bw = 80; + + if (dev->phy.is_40mhz) + bw <<= 1; + + len = bw << 1; } - b43_phy_maskset(dev, B43_NPHY_RFSEQCA, - ~(B43_NPHY_RFSEQCA_TXEN | B43_NPHY_RFSEQCA_RXEN), - chain); + samples = kcalloc(len, sizeof(struct b43_c32), GFP_KERNEL); + if (!samples) { + b43err(dev->wl, "allocation for samples generation failed\n"); + return 0; + } + rot = (((freq * 36) / bw) << 16) / 100; + angle = 0; - if (override) - b43_phy_set(dev, B43_NPHY_RFSEQMODE, - B43_NPHY_RFSEQMODE_CAOVER); - else - b43_phy_mask(dev, B43_NPHY_RFSEQMODE, - ~B43_NPHY_RFSEQMODE_CAOVER); + for (i = 0; i < len; i++) { + samples[i] = b43_cordic(angle); + angle += rot; + samples[i].q = CORDIC_CONVERT(samples[i].q * max); + samples[i].i = CORDIC_CONVERT(samples[i].i * max); + } + + i = b43_nphy_load_samples(dev, samples, len); + kfree(samples); + return (i < 0) ? 0 : len; } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/stop-playback */ -static void b43_nphy_stop_playback(struct b43_wldev *dev) +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RunSamples */ +static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops, + u16 wait, bool iqmode, bool dac_test) { struct b43_phy_n *nphy = dev->phy.n; - u16 tmp; + int i; + u16 seq_mode; + u32 tmp; if (nphy->hang_avoid) - b43_nphy_stay_in_carrier_search(dev, 1); - - tmp = b43_phy_read(dev, B43_NPHY_SAMP_STAT); - if (tmp & 0x1) - b43_phy_set(dev, B43_NPHY_SAMP_CMD, B43_NPHY_SAMP_CMD_STOP); - else if (tmp & 0x2) - b43_phy_mask(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x7FFF); - - b43_phy_mask(dev, B43_NPHY_SAMP_CMD, ~0x0004); + b43_nphy_stay_in_carrier_search(dev, true); - if (nphy->bb_mult_save & 0x80000000) { - tmp = nphy->bb_mult_save & 0xFFFF; - b43_ntab_write(dev, B43_NTAB16(15, 87), tmp); - nphy->bb_mult_save = 0; + if ((nphy->bb_mult_save & 0x80000000) == 0) { + tmp = b43_ntab_read(dev, B43_NTAB16(15, 87)); + nphy->bb_mult_save = (tmp & 0xFFFF) | 0x80000000; } - if (nphy->hang_avoid) - b43_nphy_stay_in_carrier_search(dev, 0); -} + if (!dev->phy.is_40mhz) + tmp = 0x6464; + else + tmp = 0x4747; + b43_ntab_write(dev, B43_NTAB16(15, 87), tmp); -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/IqCalGainParams */ -static void b43_nphy_iq_cal_gain_params(struct b43_wldev *dev, u16 core, - struct nphy_txgains target, - struct nphy_iqcal_params *params) -{ - int i, j, indx; - u16 gain; + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, false); - if (dev->phy.rev >= 3) { - params->txgm = target.txgm[core]; - params->pga = target.pga[core]; - params->pad = target.pad[core]; - params->ipa = target.ipa[core]; - params->cal_gain = (params->txgm << 12) | (params->pga << 8) | - (params->pad << 4) | (params->ipa); - for (j = 0; j < 5; j++) - params->ncorr[j] = 0x79; - } else { - gain = (target.pad[core]) | (target.pga[core] << 4) | - (target.txgm[core] << 8); + b43_phy_write(dev, B43_NPHY_SAMP_DEPCNT, (samps - 1)); - indx = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ? - 1 : 0; - for (i = 0; i < 9; i++) - if (tbl_iqcal_gainparams[indx][i][0] == gain) - break; - i = min(i, 8); + if (loops != 0xFFFF) + b43_phy_write(dev, B43_NPHY_SAMP_LOOPCNT, (loops - 1)); + else + b43_phy_write(dev, B43_NPHY_SAMP_LOOPCNT, loops); - params->txgm = tbl_iqcal_gainparams[indx][i][1]; - params->pga = tbl_iqcal_gainparams[indx][i][2]; - params->pad = tbl_iqcal_gainparams[indx][i][3]; - params->cal_gain = (params->txgm << 7) | (params->pga << 4) | - (params->pad << 2); - for (j = 0; j < 4; j++) - params->ncorr[j] = tbl_iqcal_gainparams[indx][i][4 + j]; - } -} + b43_phy_write(dev, B43_NPHY_SAMP_WAITCNT, wait); -/************************************************** - * Tx and Rx - **************************************************/ + seq_mode = b43_phy_read(dev, B43_NPHY_RFSEQMODE); -void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna) -{//TODO -} + b43_phy_set(dev, B43_NPHY_RFSEQMODE, B43_NPHY_RFSEQMODE_CAOVER); + if (iqmode) { + b43_phy_mask(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x7FFF); + b43_phy_set(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x8000); + } else { + if (dac_test) + b43_phy_write(dev, B43_NPHY_SAMP_CMD, 5); + else + b43_phy_write(dev, B43_NPHY_SAMP_CMD, 1); + } + for (i = 0; i < 100; i++) { + if (b43_phy_read(dev, B43_NPHY_RFSEQST) & 1) { + i = 0; + break; + } + udelay(10); + } + if (i) + b43err(dev->wl, "run samples timeout\n"); -static void b43_nphy_op_adjust_txpower(struct b43_wldev *dev) -{//TODO + b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode); } -static enum b43_txpwr_result b43_nphy_op_recalc_txpower(struct b43_wldev *dev, - bool ignore_tssi) -{//TODO - return B43_TXPWR_RES_DONE; +/* + * Transmits a known value for LO calibration + * http://bcm-v4.sipsolutions.net/802.11/PHY/N/TXTone + */ +static int b43_nphy_tx_tone(struct b43_wldev *dev, u32 freq, u16 max_val, + bool iqmode, bool dac_test) +{ + u16 samp = b43_nphy_gen_load_samples(dev, freq, max_val, dac_test); + if (samp == 0) + return -1; + b43_nphy_run_samples(dev, samp, 0xFFFF, 0, iqmode, dac_test); + return 0; } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlEnable */ -static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable) +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlCoefSetup */ +static void b43_nphy_tx_pwr_ctrl_coef_setup(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; - u8 i; - u16 bmask, val, tmp; - enum ieee80211_band band = b43_current_band(dev->wl); + int i, j; + u32 tmp; + u32 cur_real, cur_imag, real_part, imag_part; - if (nphy->hang_avoid) - b43_nphy_stay_in_carrier_search(dev, 1); + u16 buffer[7]; - nphy->txpwrctrl = enable; - if (!enable) { - if (dev->phy.rev >= 3 && - (b43_phy_read(dev, B43_NPHY_TXPCTL_CMD) & - (B43_NPHY_TXPCTL_CMD_COEFF | - B43_NPHY_TXPCTL_CMD_HWPCTLEN | - B43_NPHY_TXPCTL_CMD_PCTLEN))) { - /* We disable enabled TX pwr ctl, save it's state */ - nphy->tx_pwr_idx[0] = b43_phy_read(dev, - B43_NPHY_C1_TXPCTL_STAT) & 0x7f; - nphy->tx_pwr_idx[1] = b43_phy_read(dev, - B43_NPHY_C2_TXPCTL_STAT) & 0x7f; - } + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, true); - b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x6840); - for (i = 0; i < 84; i++) - b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0); + b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 7, buffer); - b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x6C40); - for (i = 0; i < 84; i++) - b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0); + for (i = 0; i < 2; i++) { + tmp = ((buffer[i * 2] & 0x3FF) << 10) | + (buffer[i * 2 + 1] & 0x3FF); + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, + (((i + 26) << 10) | 320)); + for (j = 0; j < 128; j++) { + b43_phy_write(dev, B43_NPHY_TABLE_DATAHI, + ((tmp >> 16) & 0xFFFF)); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, + (tmp & 0xFFFF)); + } + } - tmp = B43_NPHY_TXPCTL_CMD_COEFF | B43_NPHY_TXPCTL_CMD_HWPCTLEN; - if (dev->phy.rev >= 3) - tmp |= B43_NPHY_TXPCTL_CMD_PCTLEN; - b43_phy_mask(dev, B43_NPHY_TXPCTL_CMD, ~tmp); + for (i = 0; i < 2; i++) { + tmp = buffer[5 + i]; + real_part = (tmp >> 8) & 0xFF; + imag_part = (tmp & 0xFF); + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, + (((i + 26) << 10) | 448)); if (dev->phy.rev >= 3) { - b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0100); - b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0100); - } else { - b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x4000); + cur_real = real_part; + cur_imag = imag_part; + tmp = ((cur_real & 0xFF) << 8) | (cur_imag & 0xFF); } - if (dev->phy.rev == 2) - b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, - ~B43_NPHY_BPHY_CTL3_SCALE, 0x53); - else if (dev->phy.rev < 2) - b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, - ~B43_NPHY_BPHY_CTL3_SCALE, 0x5A); + for (j = 0; j < 128; j++) { + if (dev->phy.rev < 3) { + cur_real = (real_part * loscale[j] + 128) >> 8; + cur_imag = (imag_part * loscale[j] + 128) >> 8; + tmp = ((cur_real & 0xFF) << 8) | + (cur_imag & 0xFF); + } + b43_phy_write(dev, B43_NPHY_TABLE_DATAHI, + ((tmp >> 16) & 0xFFFF)); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, + (tmp & 0xFFFF)); + } + } - if (dev->phy.rev < 2 && dev->phy.is_40mhz) - b43_hf_write(dev, b43_hf_read(dev) | B43_HF_TSSIRPSMW); - } else { - b43_ntab_write_bulk(dev, B43_NTAB16(26, 64), 84, - nphy->adj_pwr_tbl); - b43_ntab_write_bulk(dev, B43_NTAB16(27, 64), 84, - nphy->adj_pwr_tbl); - - bmask = B43_NPHY_TXPCTL_CMD_COEFF | - B43_NPHY_TXPCTL_CMD_HWPCTLEN; - /* wl does useless check for "enable" param here */ - val = B43_NPHY_TXPCTL_CMD_COEFF | B43_NPHY_TXPCTL_CMD_HWPCTLEN; - if (dev->phy.rev >= 3) { - bmask |= B43_NPHY_TXPCTL_CMD_PCTLEN; - if (val) - val |= B43_NPHY_TXPCTL_CMD_PCTLEN; - } - b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD, ~(bmask), val); - - if (band == IEEE80211_BAND_5GHZ) { - b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD, - ~B43_NPHY_TXPCTL_CMD_INIT, 0x64); - if (dev->phy.rev > 1) - b43_phy_maskset(dev, B43_NPHY_TXPCTL_INIT, - ~B43_NPHY_TXPCTL_INIT_PIDXI1, - 0x64); - } + if (dev->phy.rev >= 3) { + b43_shm_write16(dev, B43_SHM_SHARED, + B43_SHM_SH_NPHY_TXPWR_INDX0, 0xFFFF); + b43_shm_write16(dev, B43_SHM_SHARED, + B43_SHM_SH_NPHY_TXPWR_INDX1, 0xFFFF); + } - if (dev->phy.rev >= 3) { - if (nphy->tx_pwr_idx[0] != 128 && - nphy->tx_pwr_idx[1] != 128) { - /* Recover TX pwr ctl state */ - b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD, - ~B43_NPHY_TXPCTL_CMD_INIT, - nphy->tx_pwr_idx[0]); - if (dev->phy.rev > 1) - b43_phy_maskset(dev, - B43_NPHY_TXPCTL_INIT, - ~0xff, nphy->tx_pwr_idx[1]); - } - } + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, false); +} - if (dev->phy.rev >= 3) { - b43_phy_mask(dev, B43_NPHY_AFECTL_OVER1, ~0x100); - b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x100); - } else { - b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x4000); - } +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRfSeq */ +static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd, + u8 *events, u8 *delays, u8 length) +{ + struct b43_phy_n *nphy = dev->phy.n; + u8 i; + u8 end = (dev->phy.rev >= 3) ? 0x1F : 0x0F; + u16 offset1 = cmd << 4; + u16 offset2 = offset1 + 0x80; - if (dev->phy.rev == 2) - b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, ~0xFF, 0x3b); - else if (dev->phy.rev < 2) - b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, ~0xFF, 0x40); + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, true); - if (dev->phy.rev < 2 && dev->phy.is_40mhz) - b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_TSSIRPSMW); + b43_ntab_write_bulk(dev, B43_NTAB8(7, offset1), length, events); + b43_ntab_write_bulk(dev, B43_NTAB8(7, offset2), length, delays); - if (b43_nphy_ipa(dev)) { - b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x4); - b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x4); - } + for (i = length; i < 16; i++) { + b43_ntab_write(dev, B43_NTAB8(7, offset1 + i), end); + b43_ntab_write(dev, B43_NTAB8(7, offset2 + i), 1); } if (nphy->hang_avoid) - b43_nphy_stay_in_carrier_search(dev, 0); + b43_nphy_stay_in_carrier_search(dev, false); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrFix */ -static void b43_nphy_tx_power_fix(struct b43_wldev *dev) +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ForceRFSeq */ +static void b43_nphy_force_rf_sequence(struct b43_wldev *dev, + enum b43_nphy_rf_sequence seq) { - struct b43_phy_n *nphy = dev->phy.n; - struct ssb_sprom *sprom = dev->dev->bus_sprom; - - u8 txpi[2], bbmult, i; - u16 tmp, radio_gain, dac_gain; - u16 freq = dev->phy.channel_freq; - u32 txgain; - /* u32 gaintbl; rev3+ */ + static const u16 trigger[] = { + [B43_RFSEQ_RX2TX] = B43_NPHY_RFSEQTR_RX2TX, + [B43_RFSEQ_TX2RX] = B43_NPHY_RFSEQTR_TX2RX, + [B43_RFSEQ_RESET2RX] = B43_NPHY_RFSEQTR_RST2RX, + [B43_RFSEQ_UPDATE_GAINH] = B43_NPHY_RFSEQTR_UPGH, + [B43_RFSEQ_UPDATE_GAINL] = B43_NPHY_RFSEQTR_UPGL, + [B43_RFSEQ_UPDATE_GAINU] = B43_NPHY_RFSEQTR_UPGU, + }; + int i; + u16 seq_mode = b43_phy_read(dev, B43_NPHY_RFSEQMODE); - if (nphy->hang_avoid) - b43_nphy_stay_in_carrier_search(dev, 1); + B43_WARN_ON(seq >= ARRAY_SIZE(trigger)); - if (dev->phy.rev >= 7) { - txpi[0] = txpi[1] = 30; - } else if (dev->phy.rev >= 3) { - txpi[0] = 40; - txpi[1] = 40; - } else if (sprom->revision < 4) { - txpi[0] = 72; - txpi[1] = 72; - } else { - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { - txpi[0] = sprom->txpid2g[0]; - txpi[1] = sprom->txpid2g[1]; - } else if (freq >= 4900 && freq < 5100) { - txpi[0] = sprom->txpid5gl[0]; - txpi[1] = sprom->txpid5gl[1]; - } else if (freq >= 5100 && freq < 5500) { - txpi[0] = sprom->txpid5g[0]; - txpi[1] = sprom->txpid5g[1]; - } else if (freq >= 5500) { - txpi[0] = sprom->txpid5gh[0]; - txpi[1] = sprom->txpid5gh[1]; - } else { - txpi[0] = 91; - txpi[1] = 91; - } + b43_phy_set(dev, B43_NPHY_RFSEQMODE, + B43_NPHY_RFSEQMODE_CAOVER | B43_NPHY_RFSEQMODE_TROVER); + b43_phy_set(dev, B43_NPHY_RFSEQTR, trigger[seq]); + for (i = 0; i < 200; i++) { + if (!(b43_phy_read(dev, B43_NPHY_RFSEQST) & trigger[seq])) + goto ok; + msleep(1); } - if (dev->phy.rev < 7 && - (txpi[0] < 40 || txpi[0] > 100 || txpi[1] < 40 || txpi[1] > 100)) - txpi[0] = txpi[1] = 91; + b43err(dev->wl, "RF sequence status timeout\n"); +ok: + b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode); +} - /* - for (i = 0; i < 2; i++) { - nphy->txpwrindex[i].index_internal = txpi[i]; - nphy->txpwrindex[i].index_internal_save = txpi[i]; - } - */ +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */ +static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field, + u16 value, u8 core, bool off) +{ + int i; + u8 index = fls(field); + u8 addr, en_addr, val_addr; + /* we expect only one bit set */ + B43_WARN_ON(field & (~(1 << (index - 1)))); - for (i = 0; i < 2; i++) { - if (dev->phy.rev >= 3) { - if (b43_nphy_ipa(dev)) { - txgain = *(b43_nphy_get_ipa_gain_table(dev) + - txpi[i]); - } else if (b43_current_band(dev->wl) == - IEEE80211_BAND_5GHZ) { - /* FIXME: use 5GHz tables */ - txgain = - b43_ntab_tx_gain_rev3plus_2ghz[txpi[i]]; - } else { - if (dev->phy.rev >= 5 && - sprom->fem.ghz5.extpa_gain == 3) - ; /* FIXME: 5GHz_txgain_HiPwrEPA */ - txgain = - b43_ntab_tx_gain_rev3plus_2ghz[txpi[i]]; + if (dev->phy.rev >= 3) { + const struct nphy_rf_control_override_rev3 *rf_ctrl; + for (i = 0; i < 2; i++) { + if (index == 0 || index == 16) { + b43err(dev->wl, + "Unsupported RF Ctrl Override call\n"); + return; } - radio_gain = (txgain >> 16) & 0x1FFFF; - } else { - txgain = b43_ntab_tx_gain_rev0_1_2[txpi[i]]; - radio_gain = (txgain >> 16) & 0x1FFF; - } - if (dev->phy.rev >= 7) - dac_gain = (txgain >> 8) & 0x7; - else - dac_gain = (txgain >> 8) & 0x3F; - bbmult = txgain & 0xFF; + rf_ctrl = &tbl_rf_control_override_rev3[index - 1]; + en_addr = B43_PHY_N((i == 0) ? + rf_ctrl->en_addr0 : rf_ctrl->en_addr1); + val_addr = B43_PHY_N((i == 0) ? + rf_ctrl->val_addr0 : rf_ctrl->val_addr1); - if (dev->phy.rev >= 3) { - if (i == 0) - b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0100); - else - b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0100); + if (off) { + b43_phy_mask(dev, en_addr, ~(field)); + b43_phy_mask(dev, val_addr, + ~(rf_ctrl->val_mask)); + } else { + if (core == 0 || ((1 << core) & i) != 0) { + b43_phy_set(dev, en_addr, field); + b43_phy_maskset(dev, val_addr, + ~(rf_ctrl->val_mask), + (value << rf_ctrl->val_shift)); + } + } + } + } else { + const struct nphy_rf_control_override_rev2 *rf_ctrl; + if (off) { + b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~(field)); + value = 0; } else { - b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x4000); + b43_phy_set(dev, B43_NPHY_RFCTL_OVER, field); } - if (i == 0) - b43_phy_write(dev, B43_NPHY_AFECTL_DACGAIN1, dac_gain); - else - b43_phy_write(dev, B43_NPHY_AFECTL_DACGAIN2, dac_gain); + for (i = 0; i < 2; i++) { + if (index <= 1 || index == 16) { + b43err(dev->wl, + "Unsupported RF Ctrl Override call\n"); + return; + } - b43_ntab_write(dev, B43_NTAB16(0x7, 0x110 + i), radio_gain); + if (index == 2 || index == 10 || + (index >= 13 && index <= 15)) { + core = 1; + } - tmp = b43_ntab_read(dev, B43_NTAB16(0xF, 0x57)); - if (i == 0) - tmp = (tmp & 0x00FF) | (bbmult << 8); - else - tmp = (tmp & 0xFF00) | bbmult; - b43_ntab_write(dev, B43_NTAB16(0xF, 0x57), tmp); + rf_ctrl = &tbl_rf_control_override_rev2[index - 2]; + addr = B43_PHY_N((i == 0) ? + rf_ctrl->addr0 : rf_ctrl->addr1); - if (b43_nphy_ipa(dev)) { - u32 tmp32; - u16 reg = (i == 0) ? - B43_NPHY_PAPD_EN0 : B43_NPHY_PAPD_EN1; - tmp32 = b43_ntab_read(dev, B43_NTAB32(26 + i, - 576 + txpi[i])); - b43_phy_maskset(dev, reg, 0xE00F, (u32) tmp32 << 4); - b43_phy_set(dev, reg, 0x4); + if ((core & (1 << i)) != 0) + b43_phy_maskset(dev, addr, ~(rf_ctrl->bmask), + (value << rf_ctrl->shift)); + + b43_phy_set(dev, B43_NPHY_RFCTL_OVER, 0x1); + b43_phy_set(dev, B43_NPHY_RFCTL_CMD, + B43_NPHY_RFCTL_CMD_START); + udelay(1); + b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, 0xFFFE); } } - - b43_phy_mask(dev, B43_NPHY_BPHY_CTL2, ~B43_NPHY_BPHY_CTL2_LUT); - - if (nphy->hang_avoid) - b43_nphy_stay_in_carrier_search(dev, 0); } -static void b43_nphy_ipa_internal_tssi_setup(struct b43_wldev *dev) +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlIntcOverride */ +static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field, + u16 value, u8 core) { - struct b43_phy *phy = &dev->phy; + u8 i, j; + u16 reg, tmp, val; - u8 core; - u16 r; /* routing */ - - if (phy->rev >= 7) { - for (core = 0; core < 2; core++) { - r = core ? 0x190 : 0x170; - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { - b43_radio_write(dev, r + 0x5, 0x5); - b43_radio_write(dev, r + 0x9, 0xE); - if (phy->rev != 5) - b43_radio_write(dev, r + 0xA, 0); - if (phy->rev != 7) - b43_radio_write(dev, r + 0xB, 1); - else - b43_radio_write(dev, r + 0xB, 0x31); - } else { - b43_radio_write(dev, r + 0x5, 0x9); - b43_radio_write(dev, r + 0x9, 0xC); - b43_radio_write(dev, r + 0xB, 0x0); - if (phy->rev != 5) - b43_radio_write(dev, r + 0xA, 1); - else - b43_radio_write(dev, r + 0xA, 0x31); - } - b43_radio_write(dev, r + 0x6, 0); - b43_radio_write(dev, r + 0x7, 0); - b43_radio_write(dev, r + 0x8, 3); - b43_radio_write(dev, r + 0xC, 0); - } - } else { - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) - b43_radio_write(dev, B2056_SYN_RESERVED_ADDR31, 0x128); - else - b43_radio_write(dev, B2056_SYN_RESERVED_ADDR31, 0x80); - b43_radio_write(dev, B2056_SYN_RESERVED_ADDR30, 0); - b43_radio_write(dev, B2056_SYN_GPIO_MASTER1, 0x29); - - for (core = 0; core < 2; core++) { - r = core ? B2056_TX1 : B2056_TX0; - - b43_radio_write(dev, r | B2056_TX_IQCAL_VCM_HG, 0); - b43_radio_write(dev, r | B2056_TX_IQCAL_IDAC, 0); - b43_radio_write(dev, r | B2056_TX_TSSI_VCM, 3); - b43_radio_write(dev, r | B2056_TX_TX_AMP_DET, 0); - b43_radio_write(dev, r | B2056_TX_TSSI_MISC1, 8); - b43_radio_write(dev, r | B2056_TX_TSSI_MISC2, 0); - b43_radio_write(dev, r | B2056_TX_TSSI_MISC3, 0); - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { - b43_radio_write(dev, r | B2056_TX_TX_SSI_MASTER, - 0x5); - if (phy->rev != 5) - b43_radio_write(dev, r | B2056_TX_TSSIA, - 0x00); - if (phy->rev >= 5) - b43_radio_write(dev, r | B2056_TX_TSSIG, - 0x31); - else - b43_radio_write(dev, r | B2056_TX_TSSIG, - 0x11); - b43_radio_write(dev, r | B2056_TX_TX_SSI_MUX, - 0xE); + B43_WARN_ON(dev->phy.rev < 3); + B43_WARN_ON(field > 4); + + for (i = 0; i < 2; i++) { + if ((core == 1 && i == 1) || (core == 2 && !i)) + continue; + + reg = (i == 0) ? + B43_NPHY_RFCTL_INTC1 : B43_NPHY_RFCTL_INTC2; + b43_phy_mask(dev, reg, 0xFBFF); + + switch (field) { + case 0: + b43_phy_write(dev, reg, 0); + b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX); + break; + case 1: + if (!i) { + b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC1, + 0xFC3F, (value << 6)); + b43_phy_maskset(dev, B43_NPHY_TXF_40CO_B1S1, + 0xFFFE, 1); + b43_phy_set(dev, B43_NPHY_RFCTL_CMD, + B43_NPHY_RFCTL_CMD_START); + for (j = 0; j < 100; j++) { + if (b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_START) { + j = 0; + break; + } + udelay(10); + } + if (j) + b43err(dev->wl, + "intc override timeout\n"); + b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S1, + 0xFFFE); + } else { + b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC2, + 0xFC3F, (value << 6)); + b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER, + 0xFFFE, 1); + b43_phy_set(dev, B43_NPHY_RFCTL_CMD, + B43_NPHY_RFCTL_CMD_RXTX); + for (j = 0; j < 100; j++) { + if (b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_RXTX) { + j = 0; + break; + } + udelay(10); + } + if (j) + b43err(dev->wl, + "intc override timeout\n"); + b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, + 0xFFFE); + } + break; + case 2: + if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { + tmp = 0x0020; + val = value << 5; + } else { + tmp = 0x0010; + val = value << 4; + } + b43_phy_maskset(dev, reg, ~tmp, val); + break; + case 3: + if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { + tmp = 0x0001; + val = value; + } else { + tmp = 0x0004; + val = value << 2; + } + b43_phy_maskset(dev, reg, ~tmp, val); + break; + case 4: + if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { + tmp = 0x0002; + val = value << 1; } else { - b43_radio_write(dev, r | B2056_TX_TX_SSI_MASTER, - 0x9); - b43_radio_write(dev, r | B2056_TX_TSSIA, 0x31); - b43_radio_write(dev, r | B2056_TX_TSSIG, 0x0); - b43_radio_write(dev, r | B2056_TX_TX_SSI_MUX, - 0xC); + tmp = 0x0008; + val = value << 3; } + b43_phy_maskset(dev, reg, ~tmp, val); + break; } } } -/* - * Stop radio and transmit known signal. Then check received signal strength to - * get TSSI (Transmit Signal Strength Indicator). - * http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlIdleTssi - */ -static void b43_nphy_tx_power_ctl_idle_tssi(struct b43_wldev *dev) +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/BPHYInit */ +static void b43_nphy_bphy_init(struct b43_wldev *dev) { - struct b43_phy *phy = &dev->phy; - struct b43_phy_n *nphy = dev->phy.n; + unsigned int i; + u16 val; - u32 tmp; - s32 rssi[4] = { }; + val = 0x1E1F; + for (i = 0; i < 16; i++) { + b43_phy_write(dev, B43_PHY_N_BMODE(0x88 + i), val); + val -= 0x202; + } + val = 0x3E3F; + for (i = 0; i < 16; i++) { + b43_phy_write(dev, B43_PHY_N_BMODE(0x98 + i), val); + val -= 0x202; + } + b43_phy_write(dev, B43_PHY_N_BMODE(0x38), 0x668); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ScaleOffsetRssi */ +static void b43_nphy_scale_offset_rssi(struct b43_wldev *dev, u16 scale, + s8 offset, u8 core, u8 rail, + enum b43_nphy_rssi_type type) +{ + u16 tmp; + bool core1or5 = (core == 1) || (core == 5); + bool core2or5 = (core == 2) || (core == 5); - /* TODO: check if we can transmit */ + offset = clamp_val(offset, -32, 31); + tmp = ((scale & 0x3F) << 8) | (offset & 0x3F); - if (b43_nphy_ipa(dev)) - b43_nphy_ipa_internal_tssi_setup(dev); + if (core1or5 && (rail == 0) && (type == B43_NPHY_RSSI_Z)) + b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Z, tmp); + if (core1or5 && (rail == 1) && (type == B43_NPHY_RSSI_Z)) + b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Z, tmp); + if (core2or5 && (rail == 0) && (type == B43_NPHY_RSSI_Z)) + b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Z, tmp); + if (core2or5 && (rail == 1) && (type == B43_NPHY_RSSI_Z)) + b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Z, tmp); - if (phy->rev >= 7) - ; /* TODO: Override Rev7 with 0x2000, 0, 3, 0, 0 as arguments */ - else if (phy->rev >= 3) - b43_nphy_rf_control_override(dev, 0x2000, 0, 3, false); + if (core1or5 && (rail == 0) && (type == B43_NPHY_RSSI_X)) + b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_X, tmp); + if (core1or5 && (rail == 1) && (type == B43_NPHY_RSSI_X)) + b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_X, tmp); + if (core2or5 && (rail == 0) && (type == B43_NPHY_RSSI_X)) + b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_X, tmp); + if (core2or5 && (rail == 1) && (type == B43_NPHY_RSSI_X)) + b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_X, tmp); - b43_nphy_stop_playback(dev); - b43_nphy_tx_tone(dev, 0xFA0, 0, false, false); - udelay(20); - tmp = b43_nphy_poll_rssi(dev, 4, rssi, 1); - b43_nphy_stop_playback(dev); - b43_nphy_rssi_select(dev, 0, 0); + if (core1or5 && (rail == 0) && (type == B43_NPHY_RSSI_Y)) + b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Y, tmp); + if (core1or5 && (rail == 1) && (type == B43_NPHY_RSSI_Y)) + b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Y, tmp); + if (core2or5 && (rail == 0) && (type == B43_NPHY_RSSI_Y)) + b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Y, tmp); + if (core2or5 && (rail == 1) && (type == B43_NPHY_RSSI_Y)) + b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y, tmp); - if (phy->rev >= 7) - ; /* TODO: Override Rev7 with 0x2000, 0, 3, 1, 0 as arguments */ - else if (phy->rev >= 3) - b43_nphy_rf_control_override(dev, 0x2000, 0, 3, true); + if (core1or5 && (rail == 0) && (type == B43_NPHY_RSSI_TBD)) + b43_phy_write(dev, B43_NPHY_RSSIMC_0I_TBD, tmp); + if (core1or5 && (rail == 1) && (type == B43_NPHY_RSSI_TBD)) + b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_TBD, tmp); + if (core2or5 && (rail == 0) && (type == B43_NPHY_RSSI_TBD)) + b43_phy_write(dev, B43_NPHY_RSSIMC_1I_TBD, tmp); + if (core2or5 && (rail == 1) && (type == B43_NPHY_RSSI_TBD)) + b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_TBD, tmp); - if (phy->rev >= 3) { - nphy->pwr_ctl_info[0].idle_tssi_5g = (tmp >> 24) & 0xFF; - nphy->pwr_ctl_info[1].idle_tssi_5g = (tmp >> 8) & 0xFF; - } else { - nphy->pwr_ctl_info[0].idle_tssi_5g = (tmp >> 16) & 0xFF; - nphy->pwr_ctl_info[1].idle_tssi_5g = tmp & 0xFF; - } - nphy->pwr_ctl_info[0].idle_tssi_2g = (tmp >> 24) & 0xFF; - nphy->pwr_ctl_info[1].idle_tssi_2g = (tmp >> 8) & 0xFF; + if (core1or5 && (rail == 0) && (type == B43_NPHY_RSSI_PWRDET)) + b43_phy_write(dev, B43_NPHY_RSSIMC_0I_PWRDET, tmp); + if (core1or5 && (rail == 1) && (type == B43_NPHY_RSSI_PWRDET)) + b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_PWRDET, tmp); + if (core2or5 && (rail == 0) && (type == B43_NPHY_RSSI_PWRDET)) + b43_phy_write(dev, B43_NPHY_RSSIMC_1I_PWRDET, tmp); + if (core2or5 && (rail == 1) && (type == B43_NPHY_RSSI_PWRDET)) + b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_PWRDET, tmp); + + if (core1or5 && (type == B43_NPHY_RSSI_TSSI_I)) + b43_phy_write(dev, B43_NPHY_RSSIMC_0I_TSSI, tmp); + if (core2or5 && (type == B43_NPHY_RSSI_TSSI_I)) + b43_phy_write(dev, B43_NPHY_RSSIMC_1I_TSSI, tmp); + + if (core1or5 && (type == B43_NPHY_RSSI_TSSI_Q)) + b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_TSSI, tmp); + if (core2or5 && (type == B43_NPHY_RSSI_TSSI_Q)) + b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_TSSI, tmp); } -static void b43_nphy_tx_gain_table_upload(struct b43_wldev *dev) +static void b43_nphy_rev2_rssi_select(struct b43_wldev *dev, u8 code, u8 type) { - struct b43_phy *phy = &dev->phy; + u16 val; - const u32 *table = NULL; -#if 0 - TODO: b43_ntab_papd_pga_gain_delta_ipa_2* - u32 rfpwr_offset; - u8 pga_gain; - int i; -#endif + if (type < 3) + val = 0; + else if (type == 6) + val = 1; + else if (type == 3) + val = 2; + else + val = 3; - if (phy->rev >= 3) { - if (b43_nphy_ipa(dev)) { - table = b43_nphy_get_ipa_gain_table(dev); - } else { - if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { - if (phy->rev == 3) - table = b43_ntab_tx_gain_rev3_5ghz; - if (phy->rev == 4) - table = b43_ntab_tx_gain_rev4_5ghz; - else - table = b43_ntab_tx_gain_rev5plus_5ghz; - } else { - table = b43_ntab_tx_gain_rev3plus_2ghz; - } + val = (val << 12) | (val << 14); + b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, val); + b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, val); + + if (type < 3) { + b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO1, 0xFFCF, + (type + 1) << 4); + b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO2, 0xFFCF, + (type + 1) << 4); + } + + if (code == 0) { + b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x3000); + if (type < 3) { + b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, + ~(B43_NPHY_RFCTL_CMD_RXEN | + B43_NPHY_RFCTL_CMD_CORESEL)); + b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, + ~(0x1 << 12 | + 0x1 << 5 | + 0x1 << 1 | + 0x1)); + b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, + ~B43_NPHY_RFCTL_CMD_START); + udelay(20); + b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~0x1); } } else { - table = b43_ntab_tx_gain_rev0_1_2; + b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x3000); + if (type < 3) { + b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD, + ~(B43_NPHY_RFCTL_CMD_RXEN | + B43_NPHY_RFCTL_CMD_CORESEL), + (B43_NPHY_RFCTL_CMD_RXEN | + code << B43_NPHY_RFCTL_CMD_CORESEL_SHIFT)); + b43_phy_set(dev, B43_NPHY_RFCTL_OVER, + (0x1 << 12 | + 0x1 << 5 | + 0x1 << 1 | + 0x1)); + b43_phy_set(dev, B43_NPHY_RFCTL_CMD, + B43_NPHY_RFCTL_CMD_START); + udelay(20); + b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~0x1); + } } - b43_ntab_write_bulk(dev, B43_NTAB32(26, 192), 128, table); - b43_ntab_write_bulk(dev, B43_NTAB32(27, 192), 128, table); +} - if (phy->rev >= 3) { -#if 0 - nphy->gmval = (table[0] >> 16) & 0x7000; - - for (i = 0; i < 128; i++) { - pga_gain = (table[i] >> 24) & 0xF; - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) - rfpwr_offset = b43_ntab_papd_pga_gain_delta_ipa_2g[pga_gain]; - else - rfpwr_offset = b43_ntab_papd_pga_gain_delta_ipa_5g[pga_gain]; - b43_ntab_write(dev, B43_NTAB32(26, 576 + i), - rfpwr_offset); - b43_ntab_write(dev, B43_NTAB32(27, 576 + i), - rfpwr_offset); - } -#endif - } -} - -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PA%20override */ -static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable) +static void b43_nphy_rev3_rssi_select(struct b43_wldev *dev, u8 code, u8 type) { - struct b43_phy_n *nphy = dev->phy.n; - enum ieee80211_band band; - u16 tmp; + u8 i; + u16 reg, val; - if (!enable) { - nphy->rfctrl_intc1_save = b43_phy_read(dev, - B43_NPHY_RFCTL_INTC1); - nphy->rfctrl_intc2_save = b43_phy_read(dev, - B43_NPHY_RFCTL_INTC2); - band = b43_current_band(dev->wl); - if (dev->phy.rev >= 3) { - if (band == IEEE80211_BAND_5GHZ) - tmp = 0x600; - else - tmp = 0x480; - } else { - if (band == IEEE80211_BAND_5GHZ) - tmp = 0x180; - else - tmp = 0x120; - } - b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, tmp); - b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, tmp); + if (code == 0) { + b43_phy_mask(dev, B43_NPHY_AFECTL_OVER1, 0xFDFF); + b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, 0xFDFF); + b43_phy_mask(dev, B43_NPHY_AFECTL_C1, 0xFCFF); + b43_phy_mask(dev, B43_NPHY_AFECTL_C2, 0xFCFF); + b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S0, 0xFFDF); + b43_phy_mask(dev, B43_NPHY_TXF_40CO_B32S1, 0xFFDF); + b43_phy_mask(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0xFFC3); + b43_phy_mask(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0xFFC3); } else { - b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, - nphy->rfctrl_intc1_save); - b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, - nphy->rfctrl_intc2_save); - } -} + for (i = 0; i < 2; i++) { + if ((code == 1 && i == 1) || (code == 2 && !i)) + continue; -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxLpFbw */ -static void b43_nphy_tx_lp_fbw(struct b43_wldev *dev) -{ - u16 tmp; + reg = (i == 0) ? + B43_NPHY_AFECTL_OVER1 : B43_NPHY_AFECTL_OVER; + b43_phy_maskset(dev, reg, 0xFDFF, 0x0200); - if (dev->phy.rev >= 3) { - if (b43_nphy_ipa(dev)) { - tmp = 4; - b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S2, - (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp); - } + if (type < 3) { + reg = (i == 0) ? + B43_NPHY_AFECTL_C1 : + B43_NPHY_AFECTL_C2; + b43_phy_maskset(dev, reg, 0xFCFF, 0); - tmp = 1; - b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S2, - (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp); - } -} + reg = (i == 0) ? + B43_NPHY_RFCTL_LUT_TRSW_UP1 : + B43_NPHY_RFCTL_LUT_TRSW_UP2; + b43_phy_maskset(dev, reg, 0xFFC3, 0); -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqEst */ -static void b43_nphy_rx_iq_est(struct b43_wldev *dev, struct nphy_iq_est *est, - u16 samps, u8 time, bool wait) -{ - int i; - u16 tmp; + if (type == 0) + val = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ? 4 : 8; + else if (type == 1) + val = 16; + else + val = 32; + b43_phy_set(dev, reg, val); - b43_phy_write(dev, B43_NPHY_IQEST_SAMCNT, samps); - b43_phy_maskset(dev, B43_NPHY_IQEST_WT, ~B43_NPHY_IQEST_WT_VAL, time); - if (wait) - b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_MODE); - else - b43_phy_mask(dev, B43_NPHY_IQEST_CMD, ~B43_NPHY_IQEST_CMD_MODE); + reg = (i == 0) ? + B43_NPHY_TXF_40CO_B1S0 : + B43_NPHY_TXF_40CO_B32S1; + b43_phy_set(dev, reg, 0x0020); + } else { + if (type == 6) + val = 0x0100; + else if (type == 3) + val = 0x0200; + else + val = 0x0300; - b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_START); + reg = (i == 0) ? + B43_NPHY_AFECTL_C1 : + B43_NPHY_AFECTL_C2; - for (i = 1000; i; i--) { - tmp = b43_phy_read(dev, B43_NPHY_IQEST_CMD); - if (!(tmp & B43_NPHY_IQEST_CMD_START)) { - est->i0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI0) << 16) | - b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO0); - est->q0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI0) << 16) | - b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO0); - est->iq0_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI0) << 16) | - b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO0); + b43_phy_maskset(dev, reg, 0xFCFF, val); + b43_phy_maskset(dev, reg, 0xF3FF, val << 2); - est->i1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI1) << 16) | - b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO1); - est->q1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI1) << 16) | - b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO1); - est->iq1_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI1) << 16) | - b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO1); - return; + if (type != 3 && type != 6) { + enum ieee80211_band band = + b43_current_band(dev->wl); + + if (b43_nphy_ipa(dev)) + val = (band == IEEE80211_BAND_5GHZ) ? 0xC : 0xE; + else + val = 0x11; + reg = (i == 0) ? 0x2000 : 0x3000; + reg |= B2055_PADDRV; + b43_radio_write16(dev, reg, val); + + reg = (i == 0) ? + B43_NPHY_AFECTL_OVER1 : + B43_NPHY_AFECTL_OVER; + b43_phy_set(dev, reg, 0x0200); + } + } } - udelay(10); } - memset(est, 0, sizeof(*est)); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqCoeffs */ -static void b43_nphy_rx_iq_coeffs(struct b43_wldev *dev, bool write, - struct b43_phy_n_iq_comp *pcomp) +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSISel */ +static void b43_nphy_rssi_select(struct b43_wldev *dev, u8 code, u8 type) { - if (write) { - b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPA0, pcomp->a0); - b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPB0, pcomp->b0); - b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPA1, pcomp->a1); - b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPB1, pcomp->b1); - } else { - pcomp->a0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPA0); - pcomp->b0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPB0); - pcomp->a1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPA1); - pcomp->b1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPB1); - } + if (dev->phy.rev >= 3) + b43_nphy_rev3_rssi_select(dev, code, type); + else + b43_nphy_rev2_rssi_select(dev, code, type); } -#if 0 -/* Ready but not used anywhere */ -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhyCleanup */ -static void b43_nphy_rx_cal_phy_cleanup(struct b43_wldev *dev, u8 core) +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRssi2055Vcm */ +static void b43_nphy_set_rssi_2055_vcm(struct b43_wldev *dev, u8 type, u8 *buf) { - u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs; - - b43_phy_write(dev, B43_NPHY_RFSEQCA, regs[0]); - if (core == 0) { - b43_phy_write(dev, B43_NPHY_AFECTL_C1, regs[1]); - b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, regs[2]); - } else { - b43_phy_write(dev, B43_NPHY_AFECTL_C2, regs[1]); - b43_phy_write(dev, B43_NPHY_AFECTL_OVER, regs[2]); + int i; + for (i = 0; i < 2; i++) { + if (type == 2) { + if (i == 0) { + b43_radio_maskset(dev, B2055_C1_B0NB_RSSIVCM, + 0xFC, buf[0]); + b43_radio_maskset(dev, B2055_C1_RX_BB_RSSICTL5, + 0xFC, buf[1]); + } else { + b43_radio_maskset(dev, B2055_C2_B0NB_RSSIVCM, + 0xFC, buf[2 * i]); + b43_radio_maskset(dev, B2055_C2_RX_BB_RSSICTL5, + 0xFC, buf[2 * i + 1]); + } + } else { + if (i == 0) + b43_radio_maskset(dev, B2055_C1_RX_BB_RSSICTL5, + 0xF3, buf[0] << 2); + else + b43_radio_maskset(dev, B2055_C2_RX_BB_RSSICTL5, + 0xF3, buf[2 * i + 1] << 2); + } } - b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs[3]); - b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs[4]); - b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO1, regs[5]); - b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO2, regs[6]); - b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S1, regs[7]); - b43_phy_write(dev, B43_NPHY_RFCTL_OVER, regs[8]); - b43_phy_write(dev, B43_NPHY_PAPD_EN0, regs[9]); - b43_phy_write(dev, B43_NPHY_PAPD_EN1, regs[10]); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhySetup */ -static void b43_nphy_rx_cal_phy_setup(struct b43_wldev *dev, u8 core) +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PollRssi */ +static int b43_nphy_poll_rssi(struct b43_wldev *dev, u8 type, s32 *buf, + u8 nsamp) { - u8 rxval, txval; - u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs; - - regs[0] = b43_phy_read(dev, B43_NPHY_RFSEQCA); - if (core == 0) { - regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C1); - regs[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1); - } else { - regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2); - regs[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER); - } - regs[3] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1); - regs[4] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2); - regs[5] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO1); - regs[6] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO2); - regs[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S1); - regs[8] = b43_phy_read(dev, B43_NPHY_RFCTL_OVER); - regs[9] = b43_phy_read(dev, B43_NPHY_PAPD_EN0); - regs[10] = b43_phy_read(dev, B43_NPHY_PAPD_EN1); - - b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x0001); - b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x0001); - - b43_phy_maskset(dev, B43_NPHY_RFSEQCA, - ~B43_NPHY_RFSEQCA_RXDIS & 0xFFFF, - ((1 - core) << B43_NPHY_RFSEQCA_RXDIS_SHIFT)); - b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXEN, - ((1 - core) << B43_NPHY_RFSEQCA_TXEN_SHIFT)); - b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_RXEN, - (core << B43_NPHY_RFSEQCA_RXEN_SHIFT)); - b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXDIS, - (core << B43_NPHY_RFSEQCA_TXDIS_SHIFT)); + int i; + int out; + u16 save_regs_phy[9]; + u16 s[2]; - if (core == 0) { - b43_phy_mask(dev, B43_NPHY_AFECTL_C1, ~0x0007); - b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0007); + if (dev->phy.rev >= 3) { + save_regs_phy[0] = b43_phy_read(dev, + B43_NPHY_RFCTL_LUT_TRSW_UP1); + save_regs_phy[1] = b43_phy_read(dev, + B43_NPHY_RFCTL_LUT_TRSW_UP2); + save_regs_phy[2] = b43_phy_read(dev, B43_NPHY_AFECTL_C1); + save_regs_phy[3] = b43_phy_read(dev, B43_NPHY_AFECTL_C2); + save_regs_phy[4] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1); + save_regs_phy[5] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER); + save_regs_phy[6] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S0); + save_regs_phy[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B32S1); + save_regs_phy[8] = 0; } else { - b43_phy_mask(dev, B43_NPHY_AFECTL_C2, ~0x0007); - b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0007); + save_regs_phy[0] = b43_phy_read(dev, B43_NPHY_AFECTL_C1); + save_regs_phy[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2); + save_regs_phy[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER); + save_regs_phy[3] = b43_phy_read(dev, B43_NPHY_RFCTL_CMD); + save_regs_phy[4] = b43_phy_read(dev, B43_NPHY_RFCTL_OVER); + save_regs_phy[5] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO1); + save_regs_phy[6] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO2); + save_regs_phy[7] = 0; + save_regs_phy[8] = 0; } - b43_nphy_rf_control_intc_override(dev, 2, 0, 3); - b43_nphy_rf_control_override(dev, 8, 0, 3, false); - b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX); + b43_nphy_rssi_select(dev, 5, type); - if (core == 0) { - rxval = 1; - txval = 8; - } else { - rxval = 4; - txval = 2; + if (dev->phy.rev < 2) { + save_regs_phy[8] = b43_phy_read(dev, B43_NPHY_GPIO_SEL); + b43_phy_write(dev, B43_NPHY_GPIO_SEL, 5); } - b43_nphy_rf_control_intc_override(dev, 1, rxval, (core + 1)); - b43_nphy_rf_control_intc_override(dev, 1, txval, (2 - core)); -} -#endif - -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalcRxIqComp */ -static void b43_nphy_calc_rx_iq_comp(struct b43_wldev *dev, u8 mask) -{ - int i; - s32 iq; - u32 ii; - u32 qq; - int iq_nbits, qq_nbits; - int arsh, brsh; - u16 tmp, a, b; - - struct nphy_iq_est est; - struct b43_phy_n_iq_comp old; - struct b43_phy_n_iq_comp new = { }; - bool error = false; - - if (mask == 0) - return; - - b43_nphy_rx_iq_coeffs(dev, false, &old); - b43_nphy_rx_iq_coeffs(dev, true, &new); - b43_nphy_rx_iq_est(dev, &est, 0x4000, 32, false); - new = old; - - for (i = 0; i < 2; i++) { - if (i == 0 && (mask & 1)) { - iq = est.iq0_prod; - ii = est.i0_pwr; - qq = est.q0_pwr; - } else if (i == 1 && (mask & 2)) { - iq = est.iq1_prod; - ii = est.i1_pwr; - qq = est.q1_pwr; - } else { - continue; - } - - if (ii + qq < 2) { - error = true; - break; - } - - iq_nbits = fls(abs(iq)); - qq_nbits = fls(qq); - - arsh = iq_nbits - 20; - if (arsh >= 0) { - a = -((iq << (30 - iq_nbits)) + (ii >> (1 + arsh))); - tmp = ii >> arsh; - } else { - a = -((iq << (30 - iq_nbits)) + (ii << (-1 - arsh))); - tmp = ii << -arsh; - } - if (tmp == 0) { - error = true; - break; - } - a /= tmp; - - brsh = qq_nbits - 11; - if (brsh >= 0) { - b = (qq << (31 - qq_nbits)); - tmp = ii >> brsh; - } else { - b = (qq << (31 - qq_nbits)); - tmp = ii << -brsh; - } - if (tmp == 0) { - error = true; - break; - } - b = int_sqrt(b / tmp - a * a) - (1 << 10); - if (i == 0 && (mask & 0x1)) { - if (dev->phy.rev >= 3) { - new.a0 = a & 0x3FF; - new.b0 = b & 0x3FF; - } else { - new.a0 = b & 0x3FF; - new.b0 = a & 0x3FF; - } - } else if (i == 1 && (mask & 0x2)) { - if (dev->phy.rev >= 3) { - new.a1 = a & 0x3FF; - new.b1 = b & 0x3FF; - } else { - new.a1 = b & 0x3FF; - new.b1 = a & 0x3FF; - } + for (i = 0; i < 4; i++) + buf[i] = 0; + + for (i = 0; i < nsamp; i++) { + if (dev->phy.rev < 2) { + s[0] = b43_phy_read(dev, B43_NPHY_GPIO_LOOUT); + s[1] = b43_phy_read(dev, B43_NPHY_GPIO_HIOUT); + } else { + s[0] = b43_phy_read(dev, B43_NPHY_RSSI1); + s[1] = b43_phy_read(dev, B43_NPHY_RSSI2); } + + buf[0] += ((s8)((s[0] & 0x3F) << 2)) >> 2; + buf[1] += ((s8)(((s[0] >> 8) & 0x3F) << 2)) >> 2; + buf[2] += ((s8)((s[1] & 0x3F) << 2)) >> 2; + buf[3] += ((s8)(((s[1] >> 8) & 0x3F) << 2)) >> 2; } + out = (buf[0] & 0xFF) << 24 | (buf[1] & 0xFF) << 16 | + (buf[2] & 0xFF) << 8 | (buf[3] & 0xFF); - if (error) - new = old; + if (dev->phy.rev < 2) + b43_phy_write(dev, B43_NPHY_GPIO_SEL, save_regs_phy[8]); - b43_nphy_rx_iq_coeffs(dev, true, &new); + if (dev->phy.rev >= 3) { + b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, + save_regs_phy[0]); + b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, + save_regs_phy[1]); + b43_phy_write(dev, B43_NPHY_AFECTL_C1, save_regs_phy[2]); + b43_phy_write(dev, B43_NPHY_AFECTL_C2, save_regs_phy[3]); + b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, save_regs_phy[4]); + b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[5]); + b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S0, save_regs_phy[6]); + b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S1, save_regs_phy[7]); + } else { + b43_phy_write(dev, B43_NPHY_AFECTL_C1, save_regs_phy[0]); + b43_phy_write(dev, B43_NPHY_AFECTL_C2, save_regs_phy[1]); + b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[2]); + b43_phy_write(dev, B43_NPHY_RFCTL_CMD, save_regs_phy[3]); + b43_phy_write(dev, B43_NPHY_RFCTL_OVER, save_regs_phy[4]); + b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO1, save_regs_phy[5]); + b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO2, save_regs_phy[6]); + } + + return out; } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxIqWar */ -static void b43_nphy_tx_iq_workaround(struct b43_wldev *dev) +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal */ +static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type) { - u16 array[4]; - b43_ntab_read_bulk(dev, B43_NTAB16(0xF, 0x50), 4, array); + int i, j; + u8 state[4]; + u8 code, val; + u16 class, override; + u8 regs_save_radio[2]; + u16 regs_save_phy[2]; - b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW0, array[0]); - b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW1, array[1]); - b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW2, array[2]); - b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW3, array[3]); -} + s8 offset[4]; + u8 core; + u8 rail; -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SpurWar */ -static void b43_nphy_spur_workaround(struct b43_wldev *dev) -{ - struct b43_phy_n *nphy = dev->phy.n; + u16 clip_state[2]; + u16 clip_off[2] = { 0xFFFF, 0xFFFF }; + s32 results_min[4] = { }; + u8 vcm_final[4] = { }; + s32 results[4][4] = { }; + s32 miniq[4][2] = { }; - u8 channel = dev->phy.channel; - int tone[2] = { 57, 58 }; - u32 noise[2] = { 0x3FF, 0x3FF }; + if (type == 2) { + code = 0; + val = 6; + } else if (type < 2) { + code = 25; + val = 4; + } else { + B43_WARN_ON(1); + return; + } - B43_WARN_ON(dev->phy.rev < 3); + class = b43_nphy_classifier(dev, 0, 0); + b43_nphy_classifier(dev, 7, 4); + b43_nphy_read_clip_detection(dev, clip_state); + b43_nphy_write_clip_detection(dev, clip_off); - if (nphy->hang_avoid) - b43_nphy_stay_in_carrier_search(dev, 1); + if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) + override = 0x140; + else + override = 0x110; - if (nphy->gband_spurwar_en) { - /* TODO: N PHY Adjust Analog Pfbw (7) */ - if (channel == 11 && dev->phy.is_40mhz) - ; /* TODO: N PHY Adjust Min Noise Var(2, tone, noise)*/ - else - ; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/ - /* TODO: N PHY Adjust CRS Min Power (0x1E) */ + regs_save_phy[0] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1); + regs_save_radio[0] = b43_radio_read16(dev, B2055_C1_PD_RXTX); + b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, override); + b43_radio_write16(dev, B2055_C1_PD_RXTX, val); + + regs_save_phy[1] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2); + regs_save_radio[1] = b43_radio_read16(dev, B2055_C2_PD_RXTX); + b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, override); + b43_radio_write16(dev, B2055_C2_PD_RXTX, val); + + state[0] = b43_radio_read16(dev, B2055_C1_PD_RSSIMISC) & 0x07; + state[1] = b43_radio_read16(dev, B2055_C2_PD_RSSIMISC) & 0x07; + b43_radio_mask(dev, B2055_C1_PD_RSSIMISC, 0xF8); + b43_radio_mask(dev, B2055_C2_PD_RSSIMISC, 0xF8); + state[2] = b43_radio_read16(dev, B2055_C1_SP_RSSI) & 0x07; + state[3] = b43_radio_read16(dev, B2055_C2_SP_RSSI) & 0x07; + + b43_nphy_rssi_select(dev, 5, type); + b43_nphy_scale_offset_rssi(dev, 0, 0, 5, 0, type); + b43_nphy_scale_offset_rssi(dev, 0, 0, 5, 1, type); + + for (i = 0; i < 4; i++) { + u8 tmp[4]; + for (j = 0; j < 4; j++) + tmp[j] = i; + if (type != 1) + b43_nphy_set_rssi_2055_vcm(dev, type, tmp); + b43_nphy_poll_rssi(dev, type, results[i], 8); + if (type < 2) + for (j = 0; j < 2; j++) + miniq[i][j] = min(results[i][2 * j], + results[i][2 * j + 1]); } - if (nphy->aband_spurwar_en) { - if (channel == 54) { - tone[0] = 0x20; - noise[0] = 0x25F; - } else if (channel == 38 || channel == 102 || channel == 118) { - if (0 /* FIXME */) { - tone[0] = 0x20; - noise[0] = 0x21F; - } else { - tone[0] = 0; - noise[0] = 0; + for (i = 0; i < 4; i++) { + s32 mind = 40; + u8 minvcm = 0; + s32 minpoll = 249; + s32 curr; + for (j = 0; j < 4; j++) { + if (type == 2) + curr = abs(results[j][i]); + else + curr = abs(miniq[j][i / 2] - code * 8); + + if (curr < mind) { + mind = curr; + minvcm = j; } - } else if (channel == 134) { - tone[0] = 0x20; - noise[0] = 0x21F; - } else if (channel == 151) { - tone[0] = 0x10; - noise[0] = 0x23F; - } else if (channel == 153 || channel == 161) { - tone[0] = 0x30; - noise[0] = 0x23F; - } else { - tone[0] = 0; - noise[0] = 0; - } - if (!tone[0] && !noise[0]) - ; /* TODO: N PHY Adjust Min Noise Var(1, tone, noise)*/ - else - ; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/ + if (results[j][i] < minpoll) + minpoll = results[j][i]; + } + results_min[i] = minpoll; + vcm_final[i] = minvcm; } - if (nphy->hang_avoid) - b43_nphy_stay_in_carrier_search(dev, 0); -} + if (type != 1) + b43_nphy_set_rssi_2055_vcm(dev, type, vcm_final); -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlCoefSetup */ -static void b43_nphy_tx_pwr_ctrl_coef_setup(struct b43_wldev *dev) -{ - struct b43_phy_n *nphy = dev->phy.n; - int i, j; - u32 tmp; - u32 cur_real, cur_imag, real_part, imag_part; + for (i = 0; i < 4; i++) { + offset[i] = (code * 8) - results[vcm_final[i]][i]; - u16 buffer[7]; + if (offset[i] < 0) + offset[i] = -((abs(offset[i]) + 4) / 8); + else + offset[i] = (offset[i] + 4) / 8; - if (nphy->hang_avoid) - b43_nphy_stay_in_carrier_search(dev, true); + if (results_min[i] == 248) + offset[i] = code - 32; - b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 7, buffer); + core = (i / 2) ? 2 : 1; + rail = (i % 2) ? 1 : 0; - for (i = 0; i < 2; i++) { - tmp = ((buffer[i * 2] & 0x3FF) << 10) | - (buffer[i * 2 + 1] & 0x3FF); - b43_phy_write(dev, B43_NPHY_TABLE_ADDR, - (((i + 26) << 10) | 320)); - for (j = 0; j < 128; j++) { - b43_phy_write(dev, B43_NPHY_TABLE_DATAHI, - ((tmp >> 16) & 0xFFFF)); - b43_phy_write(dev, B43_NPHY_TABLE_DATALO, - (tmp & 0xFFFF)); - } + b43_nphy_scale_offset_rssi(dev, 0, offset[i], core, rail, + type); } - for (i = 0; i < 2; i++) { - tmp = buffer[5 + i]; - real_part = (tmp >> 8) & 0xFF; - imag_part = (tmp & 0xFF); - b43_phy_write(dev, B43_NPHY_TABLE_ADDR, - (((i + 26) << 10) | 448)); + b43_radio_maskset(dev, B2055_C1_PD_RSSIMISC, 0xF8, state[0]); + b43_radio_maskset(dev, B2055_C2_PD_RSSIMISC, 0xF8, state[1]); + + switch (state[2]) { + case 1: + b43_nphy_rssi_select(dev, 1, 2); + break; + case 4: + b43_nphy_rssi_select(dev, 1, 0); + break; + case 2: + b43_nphy_rssi_select(dev, 1, 1); + break; + default: + b43_nphy_rssi_select(dev, 1, 1); + break; + } + + switch (state[3]) { + case 1: + b43_nphy_rssi_select(dev, 2, 2); + break; + case 4: + b43_nphy_rssi_select(dev, 2, 0); + break; + default: + b43_nphy_rssi_select(dev, 2, 1); + break; + } - if (dev->phy.rev >= 3) { - cur_real = real_part; - cur_imag = imag_part; - tmp = ((cur_real & 0xFF) << 8) | (cur_imag & 0xFF); - } + b43_nphy_rssi_select(dev, 0, type); - for (j = 0; j < 128; j++) { - if (dev->phy.rev < 3) { - cur_real = (real_part * loscale[j] + 128) >> 8; - cur_imag = (imag_part * loscale[j] + 128) >> 8; - tmp = ((cur_real & 0xFF) << 8) | - (cur_imag & 0xFF); - } - b43_phy_write(dev, B43_NPHY_TABLE_DATAHI, - ((tmp >> 16) & 0xFFFF)); - b43_phy_write(dev, B43_NPHY_TABLE_DATALO, - (tmp & 0xFFFF)); - } - } + b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs_save_phy[0]); + b43_radio_write16(dev, B2055_C1_PD_RXTX, regs_save_radio[0]); + b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs_save_phy[1]); + b43_radio_write16(dev, B2055_C2_PD_RXTX, regs_save_radio[1]); + + b43_nphy_classifier(dev, 7, class); + b43_nphy_write_clip_detection(dev, clip_state); + /* Specs don't say about reset here, but it makes wl and b43 dumps + identical, it really seems wl performs this */ + b43_nphy_reset_cca(dev); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICalRev3 */ +static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev) +{ + /* TODO */ +} +/* + * RSSI Calibration + * http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal + */ +static void b43_nphy_rssi_cal(struct b43_wldev *dev) +{ if (dev->phy.rev >= 3) { - b43_shm_write16(dev, B43_SHM_SHARED, - B43_SHM_SH_NPHY_TXPWR_INDX0, 0xFFFF); - b43_shm_write16(dev, B43_SHM_SHARED, - B43_SHM_SH_NPHY_TXPWR_INDX1, 0xFFFF); + b43_nphy_rev3_rssi_cal(dev); + } else { + b43_nphy_rev2_rssi_cal(dev, B43_NPHY_RSSI_Z); + b43_nphy_rev2_rssi_cal(dev, B43_NPHY_RSSI_X); + b43_nphy_rev2_rssi_cal(dev, B43_NPHY_RSSI_Y); } - - if (nphy->hang_avoid) - b43_nphy_stay_in_carrier_search(dev, false); } /* @@ -2943,6 +2729,24 @@ static void b43_nphy_restore_rssi_cal(struct b43_wldev *dev) b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y, rssical_phy_regs[11]); } +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetIpaGainTbl */ +static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev) +{ + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (dev->phy.rev >= 6) { + if (dev->dev->chip_id == 47162) + return txpwrctrl_tx_gain_ipa_rev5; + return txpwrctrl_tx_gain_ipa_rev6; + } else if (dev->phy.rev >= 5) { + return txpwrctrl_tx_gain_ipa_rev5; + } else { + return txpwrctrl_tx_gain_ipa; + } + } else { + return txpwrctrl_tx_gain_ipa_5g; + } +} + /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalRadioSetup */ static void b43_nphy_tx_cal_radio_setup(struct b43_wldev *dev) { @@ -3037,6 +2841,44 @@ static void b43_nphy_tx_cal_radio_setup(struct b43_wldev *dev) } } +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/IqCalGainParams */ +static void b43_nphy_iq_cal_gain_params(struct b43_wldev *dev, u16 core, + struct nphy_txgains target, + struct nphy_iqcal_params *params) +{ + int i, j, indx; + u16 gain; + + if (dev->phy.rev >= 3) { + params->txgm = target.txgm[core]; + params->pga = target.pga[core]; + params->pad = target.pad[core]; + params->ipa = target.ipa[core]; + params->cal_gain = (params->txgm << 12) | (params->pga << 8) | + (params->pad << 4) | (params->ipa); + for (j = 0; j < 5; j++) + params->ncorr[j] = 0x79; + } else { + gain = (target.pad[core]) | (target.pga[core] << 4) | + (target.txgm[core] << 8); + + indx = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ? + 1 : 0; + for (i = 0; i < 9; i++) + if (tbl_iqcal_gainparams[indx][i][0] == gain) + break; + i = min(i, 8); + + params->txgm = tbl_iqcal_gainparams[indx][i][1]; + params->pga = tbl_iqcal_gainparams[indx][i][2]; + params->pad = tbl_iqcal_gainparams[indx][i][3]; + params->cal_gain = (params->txgm << 7) | (params->pga << 4) | + (params->pad << 2); + for (j = 0; j < 4; j++) + params->ncorr[j] = tbl_iqcal_gainparams[indx][i][4 + j]; + } +} + /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/UpdateTxCalLadder */ static void b43_nphy_update_tx_cal_ladder(struct b43_wldev *dev, u16 core) { @@ -3416,7 +3258,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev, if (dev->phy.rev >= 4) { avoid = nphy->hang_avoid; - nphy->hang_avoid = false; + nphy->hang_avoid = 0; } b43_ntab_read_bulk(dev, B43_NTAB16(7, 0x110), 2, save); @@ -3526,7 +3368,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev, if (phy6or5x && updated[core] == 0) { b43_nphy_update_tx_cal_ladder(dev, core); - updated[core] = true; + updated[core] = 1; } tmp = (params[core].ncorr[type] << 8) | 0x66; @@ -3888,104 +3730,10 @@ static void b43_nphy_set_rx_core_state(struct b43_wldev *dev, u8 mask) b43_mac_enable(dev); } -/************************************************** - * N-PHY init - **************************************************/ - /* - * Upload the N-PHY tables. - * http://bcm-v4.sipsolutions.net/802.11/PHY/N/InitTables + * Init N-PHY + * http://bcm-v4.sipsolutions.net/802.11/PHY/Init/N */ -static void b43_nphy_tables_init(struct b43_wldev *dev) -{ - if (dev->phy.rev < 3) - b43_nphy_rev0_1_2_tables_init(dev); - else - b43_nphy_rev3plus_tables_init(dev); -} - -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/MIMOConfig */ -static void b43_nphy_update_mimo_config(struct b43_wldev *dev, s32 preamble) -{ - u16 mimocfg = b43_phy_read(dev, B43_NPHY_MIMOCFG); - - mimocfg |= B43_NPHY_MIMOCFG_AUTO; - if (preamble == 1) - mimocfg |= B43_NPHY_MIMOCFG_GFMIX; - else - mimocfg &= ~B43_NPHY_MIMOCFG_GFMIX; - - b43_phy_write(dev, B43_NPHY_MIMOCFG, mimocfg); -} - -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/BPHYInit */ -static void b43_nphy_bphy_init(struct b43_wldev *dev) -{ - unsigned int i; - u16 val; - - val = 0x1E1F; - for (i = 0; i < 16; i++) { - b43_phy_write(dev, B43_PHY_N_BMODE(0x88 + i), val); - val -= 0x202; - } - val = 0x3E3F; - for (i = 0; i < 16; i++) { - b43_phy_write(dev, B43_PHY_N_BMODE(0x98 + i), val); - val -= 0x202; - } - b43_phy_write(dev, B43_PHY_N_BMODE(0x38), 0x668); -} - -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SuperSwitchInit */ -static void b43_nphy_superswitch_init(struct b43_wldev *dev, bool init) -{ - if (dev->phy.rev >= 3) { - if (!init) - return; - if (0 /* FIXME */) { - b43_ntab_write(dev, B43_NTAB16(9, 2), 0x211); - b43_ntab_write(dev, B43_NTAB16(9, 3), 0x222); - b43_ntab_write(dev, B43_NTAB16(9, 8), 0x144); - b43_ntab_write(dev, B43_NTAB16(9, 12), 0x188); - } - } else { - b43_phy_write(dev, B43_NPHY_GPIO_LOOEN, 0); - b43_phy_write(dev, B43_NPHY_GPIO_HIOEN, 0); - - switch (dev->dev->bus_type) { -#ifdef CONFIG_B43_BCMA - case B43_BUS_BCMA: - bcma_chipco_gpio_control(&dev->dev->bdev->bus->drv_cc, - 0xFC00, 0xFC00); - break; -#endif -#ifdef CONFIG_B43_SSB - case B43_BUS_SSB: - ssb_chipco_gpio_control(&dev->dev->sdev->bus->chipco, - 0xFC00, 0xFC00); - break; -#endif - } - - b43_write32(dev, B43_MMIO_MACCTL, - b43_read32(dev, B43_MMIO_MACCTL) & - ~B43_MACCTL_GPOUTSMSK); - b43_write16(dev, B43_MMIO_GPIO_MASK, - b43_read16(dev, B43_MMIO_GPIO_MASK) | 0xFC00); - b43_write16(dev, B43_MMIO_GPIO_CONTROL, - b43_read16(dev, B43_MMIO_GPIO_CONTROL) & ~0xFC00); - - if (init) { - b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8); - b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301); - b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8); - b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301); - } - } -} - -/* http://bcm-v4.sipsolutions.net/802.11/PHY/Init/N */ int b43_phy_initn(struct b43_wldev *dev) { struct ssb_sprom *sprom = dev->dev->bus_sprom; @@ -4109,7 +3857,7 @@ int b43_phy_initn(struct b43_wldev *dev) tx_pwr_state = nphy->txpwrctrl; b43_nphy_tx_power_ctrl(dev, false); b43_nphy_tx_power_fix(dev); - b43_nphy_tx_power_ctl_idle_tssi(dev); + /* TODO N PHY TX Power Control Idle TSSI */ /* TODO N PHY TX Power Control Setup */ b43_nphy_tx_gain_table_upload(dev); @@ -4180,91 +3928,6 @@ int b43_phy_initn(struct b43_wldev *dev) return 0; } -/************************************************** - * Channel switching ops. - **************************************************/ - -static void b43_chantab_phy_upload(struct b43_wldev *dev, - const struct b43_phy_n_sfo_cfg *e) -{ - b43_phy_write(dev, B43_NPHY_BW1A, e->phy_bw1a); - b43_phy_write(dev, B43_NPHY_BW2, e->phy_bw2); - b43_phy_write(dev, B43_NPHY_BW3, e->phy_bw3); - b43_phy_write(dev, B43_NPHY_BW4, e->phy_bw4); - b43_phy_write(dev, B43_NPHY_BW5, e->phy_bw5); - b43_phy_write(dev, B43_NPHY_BW6, e->phy_bw6); -} - -/* http://bcm-v4.sipsolutions.net/802.11/PmuSpurAvoid */ -static void b43_nphy_pmu_spur_avoid(struct b43_wldev *dev, bool avoid) -{ - struct bcma_drv_cc __maybe_unused *cc; - u32 __maybe_unused pmu_ctl; - - switch (dev->dev->bus_type) { -#ifdef CONFIG_B43_BCMA - case B43_BUS_BCMA: - cc = &dev->dev->bdev->bus->drv_cc; - if (dev->dev->chip_id == 43224 || dev->dev->chip_id == 43225) { - if (avoid) { - bcma_chipco_pll_write(cc, 0x0, 0x11500010); - bcma_chipco_pll_write(cc, 0x1, 0x000C0C06); - bcma_chipco_pll_write(cc, 0x2, 0x0F600a08); - bcma_chipco_pll_write(cc, 0x3, 0x00000000); - bcma_chipco_pll_write(cc, 0x4, 0x2001E920); - bcma_chipco_pll_write(cc, 0x5, 0x88888815); - } else { - bcma_chipco_pll_write(cc, 0x0, 0x11100010); - bcma_chipco_pll_write(cc, 0x1, 0x000c0c06); - bcma_chipco_pll_write(cc, 0x2, 0x03000a08); - bcma_chipco_pll_write(cc, 0x3, 0x00000000); - bcma_chipco_pll_write(cc, 0x4, 0x200005c0); - bcma_chipco_pll_write(cc, 0x5, 0x88888815); - } - pmu_ctl = BCMA_CC_PMU_CTL_PLL_UPD; - } else if (dev->dev->chip_id == 0x4716) { - if (avoid) { - bcma_chipco_pll_write(cc, 0x0, 0x11500060); - bcma_chipco_pll_write(cc, 0x1, 0x080C0C06); - bcma_chipco_pll_write(cc, 0x2, 0x0F600000); - bcma_chipco_pll_write(cc, 0x3, 0x00000000); - bcma_chipco_pll_write(cc, 0x4, 0x2001E924); - bcma_chipco_pll_write(cc, 0x5, 0x88888815); - } else { - bcma_chipco_pll_write(cc, 0x0, 0x11100060); - bcma_chipco_pll_write(cc, 0x1, 0x080c0c06); - bcma_chipco_pll_write(cc, 0x2, 0x03000000); - bcma_chipco_pll_write(cc, 0x3, 0x00000000); - bcma_chipco_pll_write(cc, 0x4, 0x200005c0); - bcma_chipco_pll_write(cc, 0x5, 0x88888815); - } - pmu_ctl = BCMA_CC_PMU_CTL_PLL_UPD | - BCMA_CC_PMU_CTL_NOILPONW; - } else if (dev->dev->chip_id == 0x4322 || - dev->dev->chip_id == 0x4340 || - dev->dev->chip_id == 0x4341) { - bcma_chipco_pll_write(cc, 0x0, 0x11100070); - bcma_chipco_pll_write(cc, 0x1, 0x1014140a); - bcma_chipco_pll_write(cc, 0x5, 0x88888854); - if (avoid) - bcma_chipco_pll_write(cc, 0x2, 0x05201828); - else - bcma_chipco_pll_write(cc, 0x2, 0x05001828); - pmu_ctl = BCMA_CC_PMU_CTL_PLL_UPD; - } else { - return; - } - bcma_cc_set32(cc, BCMA_CC_PMU_CTL, pmu_ctl); - break; -#endif -#ifdef CONFIG_B43_SSB - case B43_BUS_SSB: - /* FIXME */ - break; -#endif - } -} - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ChanspecSetup */ static void b43_nphy_channel_setup(struct b43_wldev *dev, const struct b43_phy_n_sfo_cfg *e, @@ -4272,7 +3935,6 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev, { struct b43_phy *phy = &dev->phy; struct b43_phy_n *nphy = dev->phy.n; - int ch = new_channel->hw_value; u16 old_band_5ghz; u32 tmp32; @@ -4312,41 +3974,8 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev, b43_nphy_tx_lp_fbw(dev); - if (dev->phy.rev >= 3 && - dev->phy.n->spur_avoid != B43_SPUR_AVOID_DISABLE) { - bool avoid = false; - if (dev->phy.n->spur_avoid == B43_SPUR_AVOID_FORCE) { - avoid = true; - } else if (!b43_channel_type_is_40mhz(phy->channel_type)) { - if ((ch >= 5 && ch <= 8) || ch == 13 || ch == 14) - avoid = true; - } else { /* 40MHz */ - if (nphy->aband_spurwar_en && - (ch == 38 || ch == 102 || ch == 118)) - avoid = dev->dev->chip_id == 0x4716; - } - - b43_nphy_pmu_spur_avoid(dev, avoid); - - if (dev->dev->chip_id == 43222 || dev->dev->chip_id == 43224 || - dev->dev->chip_id == 43225) { - b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_LOW, - avoid ? 0x5341 : 0x8889); - b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_HIGH, 0x8); - } - - if (dev->phy.rev == 3 || dev->phy.rev == 4) - ; /* TODO: reset PLL */ - - if (avoid) - b43_phy_set(dev, B43_NPHY_BBCFG, B43_NPHY_BBCFG_RSTRX); - else - b43_phy_mask(dev, B43_NPHY_BBCFG, - ~B43_NPHY_BBCFG_RSTRX & 0xFFFF); - - b43_nphy_reset_cca(dev); - - /* wl sets useless phy_isspuravoid here */ + if (dev->phy.rev >= 3 && 0) { + /* TODO */ } b43_phy_write(dev, B43_NPHY_NDATAT_DUP40, 0x3830); @@ -4410,10 +4039,6 @@ static int b43_nphy_set_channel(struct b43_wldev *dev, return 0; } -/************************************************** - * Basic PHY ops. - **************************************************/ - static int b43_nphy_op_allocate(struct b43_wldev *dev) { struct b43_phy_n *nphy; @@ -4430,13 +4055,10 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; struct b43_phy_n *nphy = phy->n; - struct ssb_sprom *sprom = dev->dev->bus_sprom; memset(nphy, 0, sizeof(*nphy)); nphy->hang_avoid = (phy->rev == 3 || phy->rev == 4); - nphy->spur_avoid = (phy->rev >= 3) ? - B43_SPUR_AVOID_AUTO : B43_SPUR_AVOID_DISABLE; nphy->gain_boost = true; /* this way we follow wl, assume it is true */ nphy->txrx_chain = 2; /* sth different than 0 and 1 for now */ nphy->phyrxchain = 3; /* to avoid b43_nphy_set_rx_core_state like wl */ @@ -4445,38 +4067,6 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev) * 0x7f == 127 and we check for 128 when restoring TX pwr ctl. */ nphy->tx_pwr_idx[0] = 128; nphy->tx_pwr_idx[1] = 128; - - /* Hardware TX power control and 5GHz power gain */ - nphy->txpwrctrl = false; - nphy->pwg_gain_5ghz = false; - if (dev->phy.rev >= 3 || - (dev->dev->board_vendor == PCI_VENDOR_ID_APPLE && - (dev->dev->core_rev == 11 || dev->dev->core_rev == 12))) { - nphy->txpwrctrl = true; - nphy->pwg_gain_5ghz = true; - } else if (sprom->revision >= 4) { - if (dev->phy.rev >= 2 && - (sprom->boardflags2_lo & B43_BFL2_TXPWRCTRL_EN)) { - nphy->txpwrctrl = true; -#ifdef CONFIG_B43_SSB - if (dev->dev->bus_type == B43_BUS_SSB && - dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI) { - struct pci_dev *pdev = - dev->dev->sdev->bus->host_pci; - if (pdev->device == 0x4328 || - pdev->device == 0x432a) - nphy->pwg_gain_5ghz = true; - } -#endif - } else if (sprom->boardflags2_lo & B43_BFL2_5G_PWRGAIN) { - nphy->pwg_gain_5ghz = true; - } - } - - if (dev->phy.rev >= 3) { - nphy->ipa2g_on = sprom->fem.ghz2.extpa_gain == 2; - nphy->ipa5g_on = sprom->fem.ghz5.extpa_gain == 2; - } } static void b43_nphy_op_free(struct b43_wldev *dev) diff --git a/trunk/drivers/net/wireless/b43/phy_n.h b/trunk/drivers/net/wireless/b43/phy_n.h index 5de8f74cc02f..fbf520285bd1 100644 --- a/trunk/drivers/net/wireless/b43/phy_n.h +++ b/trunk/drivers/net/wireless/b43/phy_n.h @@ -716,12 +716,6 @@ struct b43_wldev; -enum b43_nphy_spur_avoid { - B43_SPUR_AVOID_DISABLE, - B43_SPUR_AVOID_AUTO, - B43_SPUR_AVOID_FORCE, -}; - struct b43_chanspec { u16 center_freq; enum nl80211_channel_type channel_type; @@ -765,11 +759,6 @@ struct b43_phy_n_txpwrindex { u16 locomp; }; -struct b43_phy_n_pwr_ctl_info { - u8 idle_tssi_2g; - u8 idle_tssi_5g; -}; - struct b43_phy_n { u8 antsel_type; u8 cal_orig_pwr_idx[2]; @@ -796,14 +785,12 @@ struct b43_phy_n { u16 mphase_txcal_bestcoeffs[11]; bool txpwrctrl; - bool pwg_gain_5ghz; u8 tx_pwr_idx[2]; u16 adj_pwr_tbl[84]; u16 txcal_bbmult; u16 txiqlocal_bestc[11]; bool txiqlocal_coeffsvalid; struct b43_phy_n_txpwrindex txpwrindex[2]; - struct b43_phy_n_pwr_ctl_info pwr_ctl_info[2]; struct b43_chanspec txiqlocal_chanspec; u8 txrx_chain; @@ -816,7 +803,6 @@ struct b43_phy_n { u16 classifier_state; u16 clip_state[2]; - enum b43_nphy_spur_avoid spur_avoid; bool aband_spurwar_en; bool gband_spurwar_en; diff --git a/trunk/drivers/net/wireless/b43/pio.c b/trunk/drivers/net/wireless/b43/pio.c index 3533ab86bd36..fcff923b3c18 100644 --- a/trunk/drivers/net/wireless/b43/pio.c +++ b/trunk/drivers/net/wireless/b43/pio.c @@ -539,7 +539,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb) /* Not enough memory on the queue. */ err = -EBUSY; ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb)); - q->stopped = true; + q->stopped = 1; goto out; } @@ -566,7 +566,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb) (q->free_packet_slots == 0)) { /* The queue is full. */ ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb)); - q->stopped = true; + q->stopped = 1; } out: @@ -601,7 +601,7 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev, if (q->stopped) { ieee80211_wake_queue(dev->wl->hw, q->queue_prio); - q->stopped = false; + q->stopped = 0; } } @@ -617,19 +617,9 @@ static bool pio_rx_frame(struct b43_pio_rxqueue *q) const char *err_msg = NULL; struct b43_rxhdr_fw4 *rxhdr = (struct b43_rxhdr_fw4 *)wl->pio_scratchspace; - size_t rxhdr_size = sizeof(*rxhdr); BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(*rxhdr)); - switch (dev->fw.hdr_format) { - case B43_FW_HDR_410: - case B43_FW_HDR_351: - rxhdr_size -= sizeof(rxhdr->format_598) - - sizeof(rxhdr->format_351); - break; - case B43_FW_HDR_598: - break; - } - memset(rxhdr, 0, rxhdr_size); + memset(rxhdr, 0, sizeof(*rxhdr)); /* Check if we have data and wait for it to get ready. */ if (q->rev >= 8) { @@ -667,11 +657,11 @@ static bool pio_rx_frame(struct b43_pio_rxqueue *q) /* Get the preamble (RX header) */ if (q->rev >= 8) { - b43_block_read(dev, rxhdr, rxhdr_size, + b43_block_read(dev, rxhdr, sizeof(*rxhdr), q->mmio_base + B43_PIO8_RXDATA, sizeof(u32)); } else { - b43_block_read(dev, rxhdr, rxhdr_size, + b43_block_read(dev, rxhdr, sizeof(*rxhdr), q->mmio_base + B43_PIO_RXDATA, sizeof(u16)); } diff --git a/trunk/drivers/net/wireless/b43/radio_2056.c b/trunk/drivers/net/wireless/b43/radio_2056.c index ce037fb6789a..a01f776ca4de 100644 --- a/trunk/drivers/net/wireless/b43/radio_2056.c +++ b/trunk/drivers/net/wireless/b43/radio_2056.c @@ -1572,14 +1572,14 @@ static const struct b2056_inittab_entry b2056_inittab_rev6_syn[] = { [B2056_SYN_PLL_XTAL5] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, }, [B2056_SYN_PLL_XTAL6] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, }, [B2056_SYN_PLL_REFDIV] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, }, - [B2056_SYN_PLL_PFD] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, }, + [B2056_SYN_PLL_PFD] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, }, [B2056_SYN_PLL_CP1] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, }, - [B2056_SYN_PLL_CP2] = { .ghz5 = 0x003f, .ghz2 = 0x003f, UPLOAD, }, + [B2056_SYN_PLL_CP2] = { .ghz5 = 0x0030, .ghz2 = 0x0030, NOUPLOAD, }, [B2056_SYN_PLL_CP3] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, }, - [B2056_SYN_PLL_LOOPFILTER1] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, }, - [B2056_SYN_PLL_LOOPFILTER2] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, }, + [B2056_SYN_PLL_LOOPFILTER1] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, }, + [B2056_SYN_PLL_LOOPFILTER2] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, }, [B2056_SYN_PLL_LOOPFILTER3] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, }, - [B2056_SYN_PLL_LOOPFILTER4] = { .ghz5 = 0x002b, .ghz2 = 0x002b, UPLOAD, }, + [B2056_SYN_PLL_LOOPFILTER4] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, }, [B2056_SYN_PLL_LOOPFILTER5] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, }, [B2056_SYN_PLL_MMD1] = { .ghz5 = 0x001c, .ghz2 = 0x001c, NOUPLOAD, }, [B2056_SYN_PLL_MMD2] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, }, @@ -9055,21 +9055,6 @@ void b2056_upload_inittabs(struct b43_wldev *dev, B2056_RX1, pts->rx, pts->rx_length); } -void b2056_upload_syn_pll_cp2(struct b43_wldev *dev, bool ghz5) -{ - struct b2056_inittabs_pts *pts; - const struct b2056_inittab_entry *e; - - if (dev->phy.rev >= ARRAY_SIZE(b2056_inittabs)) { - B43_WARN_ON(1); - return; - } - pts = &b2056_inittabs[dev->phy.rev]; - e = &pts->syn[B2056_SYN_PLL_CP2]; - - b43_radio_write(dev, B2056_SYN_PLL_CP2, ghz5 ? e->ghz5 : e->ghz2); -} - const struct b43_nphy_channeltab_entry_rev3 * b43_nphy_get_chantabent_rev3(struct b43_wldev *dev, u16 freq) { diff --git a/trunk/drivers/net/wireless/b43/radio_2056.h b/trunk/drivers/net/wireless/b43/radio_2056.h index 5b86673459fa..a7159d8578be 100644 --- a/trunk/drivers/net/wireless/b43/radio_2056.h +++ b/trunk/drivers/net/wireless/b43/radio_2056.h @@ -1090,7 +1090,6 @@ struct b43_nphy_channeltab_entry_rev3 { void b2056_upload_inittabs(struct b43_wldev *dev, bool ghz5, bool ignore_uploadflag); -void b2056_upload_syn_pll_cp2(struct b43_wldev *dev, bool ghz5); /* Get the NPHY Channel Switch Table entry for a channel. * Returns NULL on failure to find an entry. */ diff --git a/trunk/drivers/net/wireless/b43/tables_nphy.c b/trunk/drivers/net/wireless/b43/tables_nphy.c index f7def13524dd..7b326f2efdc9 100644 --- a/trunk/drivers/net/wireless/b43/tables_nphy.c +++ b/trunk/drivers/net/wireless/b43/tables_nphy.c @@ -2171,48 +2171,6 @@ static const u16 b43_ntab_loftlt1_r3[] = { 0x0000, 0x0000, }; -/* volatile tables, PHY revision >= 3 */ - -/* indexed by antswctl2g */ -static const u16 b43_ntab_antswctl2g_r3[4][32] = { - { - 0x0082, 0x0082, 0x0211, 0x0222, 0x0328, - 0x0000, 0x0000, 0x0000, 0x0144, 0x0000, - 0x0000, 0x0000, 0x0188, 0x0000, 0x0000, - 0x0000, 0x0082, 0x0082, 0x0211, 0x0222, - 0x0328, 0x0000, 0x0000, 0x0000, 0x0144, - 0x0000, 0x0000, 0x0000, 0x0188, 0x0000, - 0x0000, 0x0000, - }, - { - 0x0022, 0x0022, 0x0011, 0x0022, 0x0022, - 0x0000, 0x0000, 0x0000, 0x0011, 0x0000, - 0x0000, 0x0000, 0x0022, 0x0000, 0x0000, - 0x0000, 0x0022, 0x0022, 0x0011, 0x0022, - 0x0022, 0x0000, 0x0000, 0x0000, 0x0011, - 0x0000, 0x0000, 0x0000, 0x0022, 0x0000, - 0x0000, 0x0000, - }, - { - 0x0088, 0x0088, 0x0044, 0x0088, 0x0088, - 0x0000, 0x0000, 0x0000, 0x0044, 0x0000, - 0x0000, 0x0000, 0x0088, 0x0000, 0x0000, - 0x0000, 0x0088, 0x0088, 0x0044, 0x0088, - 0x0088, 0x0000, 0x0000, 0x0000, 0x0044, - 0x0000, 0x0000, 0x0000, 0x0088, 0x0000, - 0x0000, 0x0000, - }, - { - 0x0022, 0x0022, 0x0011, 0x0022, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0011, 0x0000, - 0x0000, 0x0000, 0x0022, 0x0000, 0x0000, - 0x03cc, 0x0022, 0x0022, 0x0011, 0x0022, - 0x0000, 0x0000, 0x0000, 0x0000, 0x0011, - 0x0000, 0x0000, 0x0000, 0x0022, 0x0000, - 0x0000, 0x03cc, - } -}; - /* TX gain tables */ const u32 b43_ntab_tx_gain_rev0_1_2[] = { 0x03cc2b44, 0x03cc2b42, 0x03cc2a44, 0x03cc2a42, @@ -2694,7 +2652,7 @@ const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = { const s16 tbl_tx_filter_coef_rev4[7][15] = { { -377, 137, -407, 208, -1527, 956, 93, 186, 93, 230, - -44, 230, 201, -191, 201 }, + -44, 230, 20, -191, 201 }, { -77, 20, -98, 49, -93, 60, 56, 111, 56, 26, -5, 26, 34, -32, 34 }, @@ -2752,18 +2710,7 @@ const struct nphy_rf_control_override_rev3 tbl_rf_control_override_rev3[] = { { 0x00C0, 6, 0xE7, 0xF9, 0xEC, 0xFB } /* field == 0x4000 (fls 15) */ }; -struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_wa_phy6_radio11_ghz2 = { - { 10, 14, 19, 27 }, - { -5, 6, 10, 15 }, - { 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA }, - { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 }, - 0x427E, - { 0x413F, 0x413F, 0x413F, 0x413F }, - 0x007E, 0x0066, 0x1074, - 0x18, 0x18, 0x18, - 0x01D0, 0x5, -}; -struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_workaround[2][4] = { +struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_workaround[2][3] = { { /* 2GHz */ { /* PHY rev 3 */ { 7, 11, 16, 23 }, @@ -2787,26 +2734,15 @@ struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_workaround[2][4] = { 0x18, 0x18, 0x18, 0x01A1, 0x5, }, - { /* PHY rev 5 */ + { /* PHY rev 5+ */ { 9, 13, 18, 26 }, { -3, 7, 11, 16 }, { 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA }, { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 }, 0x427E, /* invalid for external LNA! */ { 0x413F, 0x413F, 0x413F, 0x413F }, /* invalid for external LNA! */ - 0x1076, 0x0066, 0x0000, /* low is invalid (the last one) */ - 0x18, 0x18, 0x18, - 0x01D0, 0x9, - }, - { /* PHY rev 6+ */ - { 8, 13, 18, 25 }, - { -5, 6, 10, 14 }, - { 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA }, - { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 }, - 0x527E, /* invalid for external LNA! */ - { 0x513F, 0x513F, 0x513F, 0x513F }, /* invalid for external LNA! */ - 0x1076, 0x0066, 0x0000, /* low is invalid (the last one) */ - 0x18, 0x18, 0x18, + 0x1076, 0x0066, 0x106A, + 0xC, 0xC, 0xC, 0x01D0, 0x5, }, }, @@ -2833,7 +2769,7 @@ struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_workaround[2][4] = { 0x24, 0x24, 0x24, 0x0107, 25, }, - { /* PHY rev 5 */ + { /* PHY rev 5+ */ { 6, 10, 16, 21 }, { -7, 0, 4, 8 }, { 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD }, @@ -2844,17 +2780,6 @@ struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_workaround[2][4] = { 0x24, 0x24, 0x24, 0x00A9, 25, }, - { /* PHY rev 6+ */ - { 6, 10, 16, 21 }, - { -7, 0, 4, 8 }, - { 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD }, - { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 }, - 0x729E, - { 0x714F, 0x714F, 0x714F, 0x714F }, - 0x029E, 0x2084, 0x2086, - 0x24, 0x24, 0x24, /* low is invalid for radio rev 11! */ - 0x00F0, 25, - }, }, }; @@ -2913,8 +2838,9 @@ u32 b43_ntab_read(struct b43_wldev *dev, u32 offset) break; case B43_NTAB_32BIT: b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset); - value = b43_phy_read(dev, B43_NPHY_TABLE_DATALO); - value |= b43_phy_read(dev, B43_NPHY_TABLE_DATAHI) << 16; + value = b43_phy_read(dev, B43_NPHY_TABLE_DATAHI); + value <<= 16; + value |= b43_phy_read(dev, B43_NPHY_TABLE_DATALO); break; default: B43_WARN_ON(1); @@ -2938,12 +2864,6 @@ void b43_ntab_read_bulk(struct b43_wldev *dev, u32 offset, b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset); for (i = 0; i < nr_elements; i++) { - /* Auto increment broken + caching issue on BCM43224? */ - if (dev->dev->chip_id == 43224 && dev->dev->chip_rev == 1) { - b43_phy_read(dev, B43_NPHY_TABLE_DATALO); - b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset + i); - } - switch (type) { case B43_NTAB_8BIT: *data = b43_phy_read(dev, B43_NPHY_TABLE_DATALO) & 0xFF; @@ -2954,10 +2874,9 @@ void b43_ntab_read_bulk(struct b43_wldev *dev, u32 offset, data += 2; break; case B43_NTAB_32BIT: - *((u32 *)data) = - b43_phy_read(dev, B43_NPHY_TABLE_DATALO); - *((u32 *)data) |= - b43_phy_read(dev, B43_NPHY_TABLE_DATAHI) << 16; + *((u32 *)data) = b43_phy_read(dev, B43_NPHY_TABLE_DATAHI); + *((u32 *)data) <<= 16; + *((u32 *)data) |= b43_phy_read(dev, B43_NPHY_TABLE_DATALO); data += 4; break; default: @@ -3013,13 +2932,6 @@ void b43_ntab_write_bulk(struct b43_wldev *dev, u32 offset, b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset); for (i = 0; i < nr_elements; i++) { - /* Auto increment broken + caching issue on BCM43224? */ - if ((offset >> 10) == 9 && dev->dev->chip_id == 43224 && - dev->dev->chip_rev == 1) { - b43_phy_read(dev, B43_NPHY_TABLE_DATALO); - b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset + i); - } - switch (type) { case B43_NTAB_8BIT: value = *data; @@ -3087,8 +2999,6 @@ void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev) } while (0) void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev) { - struct ssb_sprom *sprom = dev->dev->bus_sprom; - /* Static tables */ ntab_upload_r3(dev, B43_NTAB_FRAMESTRUCT_R3, b43_ntab_framestruct_r3); ntab_upload_r3(dev, B43_NTAB_PILOT_R3, b43_ntab_pilot_r3); @@ -3119,11 +3029,7 @@ void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev) ntab_upload_r3(dev, B43_NTAB_C1_LOFEEDTH_R3, b43_ntab_loftlt1_r3); /* Volatile tables */ - if (sprom->fem.ghz2.antswlut < ARRAY_SIZE(b43_ntab_antswctl2g_r3)) - ntab_upload_r3(dev, B43_NTAB_ANT_SW_CTL_R3, - b43_ntab_antswctl2g_r3[sprom->fem.ghz2.antswlut]); - else - B43_WARN_ON(1); + /* TODO */ } struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent( @@ -3131,67 +3037,26 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent( { struct nphy_gain_ctl_workaround_entry *e; u8 phy_idx; - u8 tr_iso = ghz5 ? dev->dev->bus_sprom->fem.ghz5.tr_iso : - dev->dev->bus_sprom->fem.ghz2.tr_iso; - - if (!ghz5 && dev->phy.rev >= 6 && dev->phy.radio_rev == 11) - return &nphy_gain_ctl_wa_phy6_radio11_ghz2; B43_WARN_ON(dev->phy.rev < 3); - if (dev->phy.rev >= 6) - phy_idx = 3; - else if (dev->phy.rev == 5) + if (dev->phy.rev >= 5) phy_idx = 2; else if (dev->phy.rev == 4) phy_idx = 1; else phy_idx = 0; + e = &nphy_gain_ctl_workaround[ghz5][phy_idx]; - /* Some workarounds to the workarounds... */ - if (ghz5 && dev->phy.rev >= 6) { - if (dev->phy.radio_rev == 11 && - !b43_channel_type_is_40mhz(dev->phy.channel_type)) - e->cliplo_gain = 0x2d; - } else if (!ghz5 && dev->phy.rev >= 5) { - if (ext_lna) { - e->rfseq_init[0] &= ~0x4000; - e->rfseq_init[1] &= ~0x4000; - e->rfseq_init[2] &= ~0x4000; - e->rfseq_init[3] &= ~0x4000; - e->init_gain &= ~0x4000; - } - switch (tr_iso) { - case 0: - e->cliplo_gain = 0x0062; - case 1: - e->cliplo_gain = 0x0064; - case 2: - e->cliplo_gain = 0x006a; - case 3: - e->cliplo_gain = 0x106a; - case 4: - e->cliplo_gain = 0x106c; - case 5: - e->cliplo_gain = 0x1074; - case 6: - e->cliplo_gain = 0x107c; - case 7: - e->cliplo_gain = 0x207c; - default: - e->cliplo_gain = 0x106a; - } - } else if (ghz5 && dev->phy.rev == 4 && ext_lna) { - e->rfseq_init[0] &= ~0x4000; - e->rfseq_init[1] &= ~0x4000; - e->rfseq_init[2] &= ~0x4000; - e->rfseq_init[3] &= ~0x4000; - e->init_gain &= ~0x4000; - e->rfseq_init[0] |= 0x1000; - e->rfseq_init[1] |= 0x1000; - e->rfseq_init[2] |= 0x1000; - e->rfseq_init[3] |= 0x1000; - e->init_gain |= 0x1000; + /* Only one entry differs for external LNA, so instead making whole + * table 2 times bigger, hack is here + */ + if (!ghz5 && dev->phy.rev >= 5 && ext_lna) { + e->rfseq_init[0] &= 0x0FFF; + e->rfseq_init[1] &= 0x0FFF; + e->rfseq_init[2] &= 0x0FFF; + e->rfseq_init[3] &= 0x0FFF; + e->init_gain &= 0x0FFF; } return e; diff --git a/trunk/drivers/net/wireless/b43/tables_nphy.h b/trunk/drivers/net/wireless/b43/tables_nphy.h index 97038c481930..a81696bff0ed 100644 --- a/trunk/drivers/net/wireless/b43/tables_nphy.h +++ b/trunk/drivers/net/wireless/b43/tables_nphy.h @@ -126,29 +126,26 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent( #define B43_NTAB_C1_LOFEEDTH B43_NTAB16(0x1B, 0x1C0) /* Local Oscillator Feed Through Lookup Table Core 1 */ #define B43_NTAB_C1_LOFEEDTH_SIZE 128 -/* Volatile N-PHY tables, PHY revision >= 3 */ -#define B43_NTAB_ANT_SW_CTL_R3 B43_NTAB16( 9, 0) /* antenna software control */ - /* Static N-PHY tables, PHY revision >= 3 */ -#define B43_NTAB_FRAMESTRUCT_R3 B43_NTAB32(10, 0) /* frame struct */ -#define B43_NTAB_PILOT_R3 B43_NTAB16(11, 0) /* pilot */ -#define B43_NTAB_TMAP_R3 B43_NTAB32(12, 0) /* TM AP */ -#define B43_NTAB_INTLEVEL_R3 B43_NTAB32(13, 0) /* INT LV */ -#define B43_NTAB_TDTRN_R3 B43_NTAB32(14, 0) /* TD TRN */ -#define B43_NTAB_NOISEVAR0_R3 B43_NTAB32(16, 0) /* noise variance 0 */ +#define B43_NTAB_FRAMESTRUCT_R3 B43_NTAB32(10, 000) /* frame struct */ +#define B43_NTAB_PILOT_R3 B43_NTAB16(11, 000) /* pilot */ +#define B43_NTAB_TMAP_R3 B43_NTAB32(12, 000) /* TM AP */ +#define B43_NTAB_INTLEVEL_R3 B43_NTAB32(13, 000) /* INT LV */ +#define B43_NTAB_TDTRN_R3 B43_NTAB32(14, 000) /* TD TRN */ +#define B43_NTAB_NOISEVAR0_R3 B43_NTAB32(16, 000) /* noise variance 0 */ #define B43_NTAB_NOISEVAR1_R3 B43_NTAB32(16, 128) /* noise variance 1 */ -#define B43_NTAB_MCS_R3 B43_NTAB16(18, 0) /* MCS */ +#define B43_NTAB_MCS_R3 B43_NTAB16(18, 000) /* MCS */ #define B43_NTAB_TDI20A0_R3 B43_NTAB32(19, 128) /* TDI 20/0 */ #define B43_NTAB_TDI20A1_R3 B43_NTAB32(19, 256) /* TDI 20/1 */ #define B43_NTAB_TDI40A0_R3 B43_NTAB32(19, 640) /* TDI 40/0 */ #define B43_NTAB_TDI40A1_R3 B43_NTAB32(19, 768) /* TDI 40/1 */ -#define B43_NTAB_PILOTLT_R3 B43_NTAB32(20, 0) /* PLT lookup */ -#define B43_NTAB_CHANEST_R3 B43_NTAB32(22, 0) /* channel estimate */ -#define B43_NTAB_FRAMELT_R3 B43_NTAB8(24, 0) /* frame lookup */ -#define B43_NTAB_C0_ESTPLT_R3 B43_NTAB8(26, 0) /* estimated power lookup 0 */ -#define B43_NTAB_C1_ESTPLT_R3 B43_NTAB8(27, 0) /* estimated power lookup 1 */ -#define B43_NTAB_C0_ADJPLT_R3 B43_NTAB8(26, 64) /* adjusted power lookup 0 */ -#define B43_NTAB_C1_ADJPLT_R3 B43_NTAB8(27, 64) /* adjusted power lookup 1 */ +#define B43_NTAB_PILOTLT_R3 B43_NTAB32(20, 000) /* PLT lookup */ +#define B43_NTAB_CHANEST_R3 B43_NTAB32(22, 000) /* channel estimate */ +#define B43_NTAB_FRAMELT_R3 B43_NTAB8 (24, 000) /* frame lookup */ +#define B43_NTAB_C0_ESTPLT_R3 B43_NTAB8 (26, 000) /* estimated power lookup 0 */ +#define B43_NTAB_C1_ESTPLT_R3 B43_NTAB8 (27, 000) /* estimated power lookup 1 */ +#define B43_NTAB_C0_ADJPLT_R3 B43_NTAB8 (26, 064) /* adjusted power lookup 0 */ +#define B43_NTAB_C1_ADJPLT_R3 B43_NTAB8 (27, 064) /* adjusted power lookup 1 */ #define B43_NTAB_C0_GAINCTL_R3 B43_NTAB32(26, 192) /* gain control lookup 0 */ #define B43_NTAB_C1_GAINCTL_R3 B43_NTAB32(27, 192) /* gain control lookup 1 */ #define B43_NTAB_C0_IQLT_R3 B43_NTAB32(26, 320) /* I/Q lookup 0 */ diff --git a/trunk/drivers/net/wireless/b43/xmit.c b/trunk/drivers/net/wireless/b43/xmit.c index 2c5367884b3f..5f77cbe0b6aa 100644 --- a/trunk/drivers/net/wireless/b43/xmit.c +++ b/trunk/drivers/net/wireless/b43/xmit.c @@ -874,7 +874,7 @@ bool b43_fill_txstatus_report(struct b43_wldev *dev, struct ieee80211_tx_info *report, const struct b43_txstatus *status) { - bool frame_success = true; + bool frame_success = 1; int retry_limit; /* preserve the confiured retry limit before clearing the status @@ -890,7 +890,7 @@ bool b43_fill_txstatus_report(struct b43_wldev *dev, /* The frame was not ACKed... */ if (!(report->flags & IEEE80211_TX_CTL_NO_ACK)) { /* ...but we expected an ACK. */ - frame_success = false; + frame_success = 0; } } if (status->frame_count == 0) { diff --git a/trunk/drivers/net/wireless/b43legacy/b43legacy.h b/trunk/drivers/net/wireless/b43legacy/b43legacy.h index 98e3d44400c6..1d4fc9db7f5e 100644 --- a/trunk/drivers/net/wireless/b43legacy/b43legacy.h +++ b/trunk/drivers/net/wireless/b43legacy/b43legacy.h @@ -560,16 +560,8 @@ struct b43legacy_key { u8 algorithm; }; -#define B43legacy_QOS_QUEUE_NUM 4 - struct b43legacy_wldev; -/* QOS parameters for a queue. */ -struct b43legacy_qos_params { - /* The QOS parameters */ - struct ieee80211_tx_queue_params p; -}; - /* Data structure for the WLAN parts (802.11 cores) of the b43legacy chip. */ struct b43legacy_wl { /* Pointer to the active wireless device on this chip */ @@ -619,18 +611,6 @@ struct b43legacy_wl { bool beacon1_uploaded; bool beacon_templates_virgin; /* Never wrote the templates? */ struct work_struct beacon_update_trigger; - /* The current QOS parameters for the 4 queues. */ - struct b43legacy_qos_params qos_params[B43legacy_QOS_QUEUE_NUM]; - - /* Packet transmit work */ - struct work_struct tx_work; - - /* Queue of packets to be transmitted. */ - struct sk_buff_head tx_queue[B43legacy_QOS_QUEUE_NUM]; - - /* Flag that implement the queues stopping. */ - bool tx_queue_stopped[B43legacy_QOS_QUEUE_NUM]; - }; /* Pointers to the firmware data and meta information about it. */ diff --git a/trunk/drivers/net/wireless/b43legacy/dma.c b/trunk/drivers/net/wireless/b43legacy/dma.c index f1f8bd09bd87..c5535adf6991 100644 --- a/trunk/drivers/net/wireless/b43legacy/dma.c +++ b/trunk/drivers/net/wireless/b43legacy/dma.c @@ -715,7 +715,7 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev, ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index); ring->index = controller_index; if (for_tx) { - ring->tx = true; + ring->tx = 1; ring->current_slot = -1; } else { if (ring->index == 0) { @@ -727,6 +727,7 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev, } else B43legacy_WARN_ON(1); } + spin_lock_init(&ring->lock); #ifdef CONFIG_B43LEGACY_DEBUG ring->last_injected_overflow = jiffies; #endif @@ -805,7 +806,7 @@ void b43legacy_dma_free(struct b43legacy_wldev *dev) static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask) { u64 orig_mask = mask; - bool fallback = false; + bool fallback = 0; int err; /* Try to set the DMA mask. If it fails, try falling back to a @@ -819,12 +820,12 @@ static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask) } if (mask == DMA_BIT_MASK(64)) { mask = DMA_BIT_MASK(32); - fallback = true; + fallback = 1; continue; } if (mask == DMA_BIT_MASK(32)) { mask = DMA_BIT_MASK(30); - fallback = true; + fallback = 1; continue; } b43legacyerr(dev->wl, "The machine/kernel does not support " @@ -857,7 +858,7 @@ int b43legacy_dma_init(struct b43legacy_wldev *dev) #ifdef CONFIG_B43LEGACY_PIO b43legacywarn(dev->wl, "DMA for this device not supported. " "Falling back to PIO\n"); - dev->__using_pio = true; + dev->__using_pio = 1; return -EAGAIN; #else b43legacyerr(dev->wl, "DMA for this device not supported and " @@ -1067,7 +1068,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring, memset(meta, 0, sizeof(*meta)); meta->skb = skb; - meta->is_last_fragment = true; + meta->is_last_fragment = 1; meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); /* create a bounce buffer in zone_dma on mapping failure. */ @@ -1143,8 +1144,10 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev, { struct b43legacy_dmaring *ring; int err = 0; + unsigned long flags; ring = priority_to_txring(dev, skb_get_queue_mapping(skb)); + spin_lock_irqsave(&ring->lock, flags); B43legacy_WARN_ON(!ring->tx); if (unlikely(ring->stopped)) { @@ -1154,14 +1157,16 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev, * For now, just refuse the transmit. */ if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE)) b43legacyerr(dev->wl, "Packet after queue stopped\n"); - return -ENOSPC; + err = -ENOSPC; + goto out_unlock; } if (unlikely(WARN_ON(free_slots(ring) < SLOTS_PER_PACKET))) { /* If we get here, we have a real error with the queue * full, but queues not stopped. */ b43legacyerr(dev->wl, "DMA queue overflow\n"); - return -ENOSPC; + err = -ENOSPC; + goto out_unlock; } /* dma_tx_fragment might reallocate the skb, so invalidate pointers pointing @@ -1171,23 +1176,25 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev, /* Drop this packet, as we don't have the encryption key * anymore and must not transmit it unencrypted. */ dev_kfree_skb_any(skb); - return 0; + err = 0; + goto out_unlock; } if (unlikely(err)) { b43legacyerr(dev->wl, "DMA tx mapping failure\n"); - return err; + goto out_unlock; } if ((free_slots(ring) < SLOTS_PER_PACKET) || should_inject_overflow(ring)) { /* This TX ring is full. */ - unsigned int skb_mapping = skb_get_queue_mapping(skb); - ieee80211_stop_queue(dev->wl->hw, skb_mapping); - dev->wl->tx_queue_stopped[skb_mapping] = 1; - ring->stopped = true; + ieee80211_stop_queue(dev->wl->hw, txring_to_priority(ring)); + ring->stopped = 1; if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE)) b43legacydbg(dev->wl, "Stopped TX ring %d\n", ring->index); } +out_unlock: + spin_unlock_irqrestore(&ring->lock, flags); + return err; } @@ -1198,29 +1205,14 @@ void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev, struct b43legacy_dmadesc_meta *meta; int retry_limit; int slot; - int firstused; ring = parse_cookie(dev, status->cookie, &slot); if (unlikely(!ring)) return; - B43legacy_WARN_ON(!ring->tx); - - /* Sanity check: TX packets are processed in-order on one ring. - * Check if the slot deduced from the cookie really is the first - * used slot. */ - firstused = ring->current_slot - ring->used_slots + 1; - if (firstused < 0) - firstused = ring->nr_slots + firstused; - if (unlikely(slot != firstused)) { - /* This possibly is a firmware bug and will result in - * malfunction, memory leaks and/or stall of DMA functionality. - */ - b43legacydbg(dev->wl, "Out of order TX status report on DMA " - "ring %d. Expected %d, but got %d\n", - ring->index, firstused, slot); - return; - } + B43legacy_WARN_ON(!irqs_disabled()); + spin_lock(&ring->lock); + B43legacy_WARN_ON(!ring->tx); while (1) { B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); op32_idx2desc(ring, slot, &meta); @@ -1293,21 +1285,14 @@ void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev, dev->stats.last_tx = jiffies; if (ring->stopped) { B43legacy_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET); - ring->stopped = false; - } - - if (dev->wl->tx_queue_stopped[ring->queue_prio]) { - dev->wl->tx_queue_stopped[ring->queue_prio] = 0; - } else { - /* If the driver queue is running wake the corresponding - * mac80211 queue. */ - ieee80211_wake_queue(dev->wl->hw, ring->queue_prio); + ieee80211_wake_queue(dev->wl->hw, txring_to_priority(ring)); + ring->stopped = 0; if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE)) b43legacydbg(dev->wl, "Woke up TX ring %d\n", - ring->index); + ring->index); } - /* Add work to the queue. */ - ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work); + + spin_unlock(&ring->lock); } static void dma_rx(struct b43legacy_dmaring *ring, @@ -1430,14 +1415,22 @@ void b43legacy_dma_rx(struct b43legacy_dmaring *ring) static void b43legacy_dma_tx_suspend_ring(struct b43legacy_dmaring *ring) { + unsigned long flags; + + spin_lock_irqsave(&ring->lock, flags); B43legacy_WARN_ON(!ring->tx); op32_tx_suspend(ring); + spin_unlock_irqrestore(&ring->lock, flags); } static void b43legacy_dma_tx_resume_ring(struct b43legacy_dmaring *ring) { + unsigned long flags; + + spin_lock_irqsave(&ring->lock, flags); B43legacy_WARN_ON(!ring->tx); op32_tx_resume(ring); + spin_unlock_irqrestore(&ring->lock, flags); } void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev) diff --git a/trunk/drivers/net/wireless/b43legacy/dma.h b/trunk/drivers/net/wireless/b43legacy/dma.h index c3282f906bc7..504a58767e95 100644 --- a/trunk/drivers/net/wireless/b43legacy/dma.h +++ b/trunk/drivers/net/wireless/b43legacy/dma.h @@ -150,9 +150,8 @@ struct b43legacy_dmaring { enum b43legacy_dmatype type; /* Boolean. Is this ring stopped at ieee80211 level? */ bool stopped; - /* The QOS priority assigned to this ring. Only used for TX rings. - * This is the mac80211 "queue" value. */ - u8 queue_prio; + /* Lock, only used for TX. */ + spinlock_t lock; struct b43legacy_wldev *dev; #ifdef CONFIG_B43LEGACY_DEBUG /* Maximum number of used slots. */ diff --git a/trunk/drivers/net/wireless/b43legacy/leds.c b/trunk/drivers/net/wireless/b43legacy/leds.c index fd4565389c77..2f1bfdc44f94 100644 --- a/trunk/drivers/net/wireless/b43legacy/leds.c +++ b/trunk/drivers/net/wireless/b43legacy/leds.c @@ -203,11 +203,11 @@ void b43legacy_leds_init(struct b43legacy_wldev *dev) if (sprom[i] == 0xFF) { /* There is no LED information in the SPROM * for this LED. Hardcode it here. */ - activelow = false; + activelow = 0; switch (i) { case 0: behaviour = B43legacy_LED_ACTIVITY; - activelow = true; + activelow = 1; if (bus->boardinfo.vendor == PCI_VENDOR_ID_COMPAQ) behaviour = B43legacy_LED_RADIO_ALL; break; diff --git a/trunk/drivers/net/wireless/b43legacy/main.c b/trunk/drivers/net/wireless/b43legacy/main.c index 75e70bce40f6..20f02437af8c 100644 --- a/trunk/drivers/net/wireless/b43legacy/main.c +++ b/trunk/drivers/net/wireless/b43legacy/main.c @@ -722,9 +722,9 @@ void b43legacy_wireless_core_reset(struct b43legacy_wldev *dev, u32 flags) macctl &= ~B43legacy_MACCTL_GMODE; if (flags & B43legacy_TMSLOW_GMODE) { macctl |= B43legacy_MACCTL_GMODE; - dev->phy.gmode = true; + dev->phy.gmode = 1; } else - dev->phy.gmode = false; + dev->phy.gmode = 0; macctl |= B43legacy_MACCTL_IHR_ENABLED; b43legacy_write32(dev, B43legacy_MMIO_MACCTL, macctl); } @@ -811,7 +811,7 @@ static void b43legacy_calculate_link_quality(struct b43legacy_wldev *dev) if (dev->noisecalc.calculation_running) return; dev->noisecalc.channel_at_start = dev->phy.channel; - dev->noisecalc.calculation_running = true; + dev->noisecalc.calculation_running = 1; dev->noisecalc.nr_samples = 0; b43legacy_generate_noise_sample(dev); @@ -873,7 +873,7 @@ static void handle_irq_noise(struct b43legacy_wldev *dev) dev->stats.link_noise = average; drop_calculation: - dev->noisecalc.calculation_running = false; + dev->noisecalc.calculation_running = 0; return; } generate_new: @@ -889,7 +889,7 @@ static void handle_irq_tbtt_indication(struct b43legacy_wldev *dev) b43legacy_power_saving_ctl_bits(dev, -1, -1); } if (b43legacy_is_mode(dev->wl, NL80211_IFTYPE_ADHOC)) - dev->dfq_valid = true; + dev->dfq_valid = 1; } static void handle_irq_atim_end(struct b43legacy_wldev *dev) @@ -898,7 +898,7 @@ static void handle_irq_atim_end(struct b43legacy_wldev *dev) b43legacy_write32(dev, B43legacy_MMIO_MACCMD, b43legacy_read32(dev, B43legacy_MMIO_MACCMD) | B43legacy_MACCMD_DFQ_VALID); - dev->dfq_valid = false; + dev->dfq_valid = 0; } } @@ -971,7 +971,7 @@ static void b43legacy_write_beacon_template(struct b43legacy_wldev *dev, unsigned int i, len, variable_len; const struct ieee80211_mgmt *bcn; const u8 *ie; - bool tim_found = false; + bool tim_found = 0; unsigned int rate; u16 ctl; int antenna; @@ -1019,7 +1019,7 @@ static void b43legacy_write_beacon_template(struct b43legacy_wldev *dev, /* A valid TIM is at least 4 bytes long. */ if (ie_len < 4) break; - tim_found = true; + tim_found = 1; tim_position = sizeof(struct b43legacy_plcp_hdr6); tim_position += offsetof(struct ieee80211_mgmt, @@ -1172,7 +1172,7 @@ static void b43legacy_upload_beacon0(struct b43legacy_wldev *dev) * but we don't use that feature anyway. */ b43legacy_write_probe_resp_template(dev, 0x268, 0x4A, &__b43legacy_ratetable[3]); - wl->beacon0_uploaded = true; + wl->beacon0_uploaded = 1; } static void b43legacy_upload_beacon1(struct b43legacy_wldev *dev) @@ -1182,7 +1182,7 @@ static void b43legacy_upload_beacon1(struct b43legacy_wldev *dev) if (wl->beacon1_uploaded) return; b43legacy_write_beacon_template(dev, 0x468, 0x1A); - wl->beacon1_uploaded = true; + wl->beacon1_uploaded = 1; } static void handle_irq_beacon(struct b43legacy_wldev *dev) @@ -1212,7 +1212,7 @@ static void handle_irq_beacon(struct b43legacy_wldev *dev) if (unlikely(wl->beacon_templates_virgin)) { /* We never uploaded a beacon before. * Upload both templates now, but only mark one valid. */ - wl->beacon_templates_virgin = false; + wl->beacon_templates_virgin = 0; b43legacy_upload_beacon0(dev); b43legacy_upload_beacon1(dev); cmd = b43legacy_read32(dev, B43legacy_MMIO_MACCMD); @@ -1275,8 +1275,8 @@ static void b43legacy_update_templates(struct b43legacy_wl *wl) if (wl->current_beacon) dev_kfree_skb_any(wl->current_beacon); wl->current_beacon = beacon; - wl->beacon0_uploaded = false; - wl->beacon1_uploaded = false; + wl->beacon0_uploaded = 0; + wl->beacon1_uploaded = 0; ieee80211_queue_work(wl->hw, &wl->beacon_update_trigger); } @@ -2440,64 +2440,30 @@ static int b43legacy_rng_init(struct b43legacy_wl *wl) return err; } -static void b43legacy_tx_work(struct work_struct *work) -{ - struct b43legacy_wl *wl = container_of(work, struct b43legacy_wl, - tx_work); - struct b43legacy_wldev *dev; - struct sk_buff *skb; - int queue_num; - int err = 0; - - mutex_lock(&wl->mutex); - dev = wl->current_dev; - if (unlikely(!dev || b43legacy_status(dev) < B43legacy_STAT_STARTED)) { - mutex_unlock(&wl->mutex); - return; - } - - for (queue_num = 0; queue_num < B43legacy_QOS_QUEUE_NUM; queue_num++) { - while (skb_queue_len(&wl->tx_queue[queue_num])) { - skb = skb_dequeue(&wl->tx_queue[queue_num]); - if (b43legacy_using_pio(dev)) - err = b43legacy_pio_tx(dev, skb); - else - err = b43legacy_dma_tx(dev, skb); - if (err == -ENOSPC) { - wl->tx_queue_stopped[queue_num] = 1; - ieee80211_stop_queue(wl->hw, queue_num); - skb_queue_head(&wl->tx_queue[queue_num], skb); - break; - } - if (unlikely(err)) - dev_kfree_skb(skb); /* Drop it */ - err = 0; - } - - if (!err) - wl->tx_queue_stopped[queue_num] = 0; - } - - mutex_unlock(&wl->mutex); -} - static void b43legacy_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); + struct b43legacy_wldev *dev = wl->current_dev; + int err = -ENODEV; + unsigned long flags; - if (unlikely(skb->len < 2 + 2 + 6)) { - /* Too short, this can't be a valid frame. */ + if (unlikely(!dev)) + goto out; + if (unlikely(b43legacy_status(dev) < B43legacy_STAT_STARTED)) + goto out; + /* DMA-TX is done without a global lock. */ + if (b43legacy_using_pio(dev)) { + spin_lock_irqsave(&wl->irq_lock, flags); + err = b43legacy_pio_tx(dev, skb); + spin_unlock_irqrestore(&wl->irq_lock, flags); + } else + err = b43legacy_dma_tx(dev, skb); +out: + if (unlikely(err)) { + /* Drop the packet. */ dev_kfree_skb_any(skb); - return; } - B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags); - - skb_queue_tail(&wl->tx_queue[skb->queue_mapping], skb); - if (!wl->tx_queue_stopped[skb->queue_mapping]) - ieee80211_queue_work(wl->hw, &wl->tx_work); - else - ieee80211_stop_queue(wl->hw, skb->queue_mapping); } static int b43legacy_op_conf_tx(struct ieee80211_hw *hw, @@ -2544,7 +2510,7 @@ static int find_wldev_for_phymode(struct b43legacy_wl *wl, if (d->phy.possible_phymodes & phymode) { /* Ok, this device supports the PHY-mode. * Set the gmode bit. */ - *gmode = true; + *gmode = 1; *dev = d; return 0; @@ -2580,7 +2546,7 @@ static int b43legacy_switch_phymode(struct b43legacy_wl *wl, struct b43legacy_wldev *uninitialized_var(up_dev); struct b43legacy_wldev *down_dev; int err; - bool gmode = false; + bool gmode = 0; int prev_status; err = find_wldev_for_phymode(wl, new_mode, &up_dev, &gmode); @@ -2913,7 +2879,6 @@ static void b43legacy_wireless_core_stop(struct b43legacy_wldev *dev) { struct b43legacy_wl *wl = dev->wl; unsigned long flags; - int queue_num; if (b43legacy_status(dev) < B43legacy_STAT_STARTED) return; @@ -2933,16 +2898,11 @@ static void b43legacy_wireless_core_stop(struct b43legacy_wldev *dev) /* Must unlock as it would otherwise deadlock. No races here. * Cancel the possibly running self-rearming periodic work. */ cancel_delayed_work_sync(&dev->periodic_work); - cancel_work_sync(&wl->tx_work); mutex_lock(&wl->mutex); - /* Drain all TX queues. */ - for (queue_num = 0; queue_num < B43legacy_QOS_QUEUE_NUM; queue_num++) { - while (skb_queue_len(&wl->tx_queue[queue_num])) - dev_kfree_skb(skb_dequeue(&wl->tx_queue[queue_num])); - } + ieee80211_stop_queues(wl->hw); /* FIXME this could cause a deadlock */ -b43legacy_mac_suspend(dev); + b43legacy_mac_suspend(dev); free_irq(dev->dev->irq, dev); b43legacydbg(wl, "Wireless interface stopped\n"); } @@ -3084,12 +3044,12 @@ static void setup_struct_phy_for_init(struct b43legacy_wldev *dev, /* Assume the radio is enabled. If it's not enabled, the state will * immediately get fixed on the first periodic work run. */ - dev->radio_hw_enable = true; + dev->radio_hw_enable = 1; phy->savedpctlreg = 0xFFFF; - phy->aci_enable = false; - phy->aci_wlan_automatic = false; - phy->aci_hw_rssi = false; + phy->aci_enable = 0; + phy->aci_wlan_automatic = 0; + phy->aci_hw_rssi = 0; lo = phy->_lo_pairs; if (lo) @@ -3121,7 +3081,7 @@ static void setup_struct_phy_for_init(struct b43legacy_wldev *dev, static void setup_struct_wldev_for_init(struct b43legacy_wldev *dev) { /* Flags */ - dev->dfq_valid = false; + dev->dfq_valid = 0; /* Stats */ memset(&dev->stats, 0, sizeof(dev->stats)); @@ -3227,9 +3187,9 @@ static void prepare_phy_data_for_init(struct b43legacy_wldev *dev) phy->lofcal = 0xFFFF; phy->initval = 0xFFFF; - phy->aci_enable = false; - phy->aci_wlan_automatic = false; - phy->aci_hw_rssi = false; + phy->aci_enable = 0; + phy->aci_wlan_automatic = 0; + phy->aci_hw_rssi = 0; phy->antenna_diversity = 0xFFFF; memset(phy->minlowsig, 0xFF, sizeof(phy->minlowsig)); @@ -3395,7 +3355,7 @@ static int b43legacy_op_add_interface(struct ieee80211_hw *hw, b43legacydbg(wl, "Adding Interface type %d\n", vif->type); dev = wl->current_dev; - wl->operating = true; + wl->operating = 1; wl->vif = vif; wl->if_type = vif->type; memcpy(wl->mac_addr, vif->addr, ETH_ALEN); @@ -3429,7 +3389,7 @@ static void b43legacy_op_remove_interface(struct ieee80211_hw *hw, B43legacy_WARN_ON(wl->vif != vif); wl->vif = NULL; - wl->operating = false; + wl->operating = 0; spin_lock_irqsave(&wl->irq_lock, flags); b43legacy_adjust_opmode(dev); @@ -3453,10 +3413,10 @@ static int b43legacy_op_start(struct ieee80211_hw *hw) memset(wl->bssid, 0, ETH_ALEN); memset(wl->mac_addr, 0, ETH_ALEN); wl->filter_flags = 0; - wl->beacon0_uploaded = false; - wl->beacon1_uploaded = false; - wl->beacon_templates_virgin = true; - wl->radio_enabled = true; + wl->beacon0_uploaded = 0; + wl->beacon1_uploaded = 0; + wl->beacon_templates_virgin = 1; + wl->radio_enabled = 1; mutex_lock(&wl->mutex); @@ -3495,7 +3455,7 @@ static void b43legacy_op_stop(struct ieee80211_hw *hw) if (b43legacy_status(dev) >= B43legacy_STAT_STARTED) b43legacy_wireless_core_stop(dev); b43legacy_wireless_core_exit(dev); - wl->radio_enabled = false; + wl->radio_enabled = 0; mutex_unlock(&wl->mutex); } @@ -3654,7 +3614,7 @@ static int b43legacy_wireless_core_attach(struct b43legacy_wldev *dev) have_bphy = 1; dev->phy.gmode = (have_gphy || have_bphy); - dev->phy.radio_on = true; + dev->phy.radio_on = 1; tmp = dev->phy.gmode ? B43legacy_TMSLOW_GMODE : 0; b43legacy_wireless_core_reset(dev, tmp); @@ -3745,7 +3705,7 @@ static int b43legacy_one_core_attach(struct ssb_device *dev, (void (*)(unsigned long))b43legacy_interrupt_tasklet, (unsigned long)wldev); if (modparam_pio) - wldev->__using_pio = true; + wldev->__using_pio = 1; INIT_LIST_HEAD(&wldev->list); err = b43legacy_wireless_core_attach(wldev); @@ -3788,7 +3748,6 @@ static int b43legacy_wireless_init(struct ssb_device *dev) struct ieee80211_hw *hw; struct b43legacy_wl *wl; int err = -ENOMEM; - int queue_num; b43legacy_sprom_fixup(dev->bus); @@ -3823,13 +3782,6 @@ static int b43legacy_wireless_init(struct ssb_device *dev) mutex_init(&wl->mutex); INIT_LIST_HEAD(&wl->devlist); INIT_WORK(&wl->beacon_update_trigger, b43legacy_beacon_update_trigger_work); - INIT_WORK(&wl->tx_work, b43legacy_tx_work); - - /* Initialize queues and flags. */ - for (queue_num = 0; queue_num < B43legacy_QOS_QUEUE_NUM; queue_num++) { - skb_queue_head_init(&wl->tx_queue[queue_num]); - wl->tx_queue_stopped[queue_num] = 0; - } ssb_set_devtypedata(dev, wl); b43legacyinfo(wl, "Broadcom %04X WLAN found (core revision %u)\n", diff --git a/trunk/drivers/net/wireless/b43legacy/radio.c b/trunk/drivers/net/wireless/b43legacy/radio.c index fcbafcd603cc..475eb14e665b 100644 --- a/trunk/drivers/net/wireless/b43legacy/radio.c +++ b/trunk/drivers/net/wireless/b43legacy/radio.c @@ -1067,7 +1067,7 @@ b43legacy_radio_interference_mitigation_enable(struct b43legacy_wldev *dev, if (b43legacy_phy_read(dev, 0x0033) & 0x0800) break; - phy->aci_enable = true; + phy->aci_enable = 1; phy_stacksave(B43legacy_PHY_RADIO_BITFIELD); phy_stacksave(B43legacy_PHY_G_CRS); @@ -1279,7 +1279,7 @@ b43legacy_radio_interference_mitigation_disable(struct b43legacy_wldev *dev, if (!(b43legacy_phy_read(dev, 0x0033) & 0x0800)) break; - phy->aci_enable = false; + phy->aci_enable = 0; phy_stackrestore(B43legacy_PHY_RADIO_BITFIELD); phy_stackrestore(B43legacy_PHY_G_CRS); @@ -1346,10 +1346,10 @@ int b43legacy_radio_set_interference_mitigation(struct b43legacy_wldev *dev, (phy->rev == 0) || (!phy->gmode)) return -ENODEV; - phy->aci_wlan_automatic = false; + phy->aci_wlan_automatic = 0; switch (mode) { case B43legacy_RADIO_INTERFMODE_AUTOWLAN: - phy->aci_wlan_automatic = true; + phy->aci_wlan_automatic = 1; if (phy->aci_enable) mode = B43legacy_RADIO_INTERFMODE_MANUALWLAN; else @@ -1371,8 +1371,8 @@ int b43legacy_radio_set_interference_mitigation(struct b43legacy_wldev *dev, currentmode); if (mode == B43legacy_RADIO_INTERFMODE_NONE) { - phy->aci_enable = false; - phy->aci_hw_rssi = false; + phy->aci_enable = 0; + phy->aci_hw_rssi = 0; } else b43legacy_radio_interference_mitigation_enable(dev, mode); phy->interfmode = mode; @@ -2102,7 +2102,7 @@ void b43legacy_radio_turn_on(struct b43legacy_wldev *dev) phy->radio_off_context.rfover); b43legacy_phy_write(dev, B43legacy_PHY_RFOVERVAL, phy->radio_off_context.rfoverval); - phy->radio_off_context.valid = false; + phy->radio_off_context.valid = 0; } channel = phy->channel; err = b43legacy_radio_selectchannel(dev, @@ -2113,7 +2113,7 @@ void b43legacy_radio_turn_on(struct b43legacy_wldev *dev) default: B43legacy_BUG_ON(1); } - phy->radio_on = true; + phy->radio_on = 1; } void b43legacy_radio_turn_off(struct b43legacy_wldev *dev, bool force) @@ -2131,14 +2131,14 @@ void b43legacy_radio_turn_off(struct b43legacy_wldev *dev, bool force) if (!force) { phy->radio_off_context.rfover = rfover; phy->radio_off_context.rfoverval = rfoverval; - phy->radio_off_context.valid = true; + phy->radio_off_context.valid = 1; } b43legacy_phy_write(dev, B43legacy_PHY_RFOVER, rfover | 0x008C); b43legacy_phy_write(dev, B43legacy_PHY_RFOVERVAL, rfoverval & 0xFF73); } else b43legacy_phy_write(dev, 0x0015, 0xAA00); - phy->radio_on = false; + phy->radio_on = 0; b43legacydbg(dev->wl, "Radio initialized\n"); } diff --git a/trunk/drivers/net/wireless/brcm80211/Kconfig b/trunk/drivers/net/wireless/brcm80211/Kconfig index cd6375de2a60..2069fc8f7ad1 100644 --- a/trunk/drivers/net/wireless/brcm80211/Kconfig +++ b/trunk/drivers/net/wireless/brcm80211/Kconfig @@ -3,8 +3,9 @@ config BRCMUTIL config BRCMSMAC tristate "Broadcom IEEE802.11n PCIe SoftMAC WLAN driver" + depends on PCI depends on MAC80211 - depends on BCMA + depends on BCMA=n select BRCMUTIL select FW_LOADER select CRC_CCITT @@ -17,26 +18,16 @@ config BRCMSMAC config BRCMFMAC tristate "Broadcom IEEE802.11n embedded FullMAC WLAN driver" + depends on MMC depends on CFG80211 select BRCMUTIL + select FW_LOADER ---help--- This module adds support for embedded wireless adapters based on - Broadcom IEEE802.11n FullMAC chipsets. It has to work with at least - one of the bus interface support. If you choose to build a module, + Broadcom IEEE802.11n FullMAC chipsets. This driver uses the kernel's + wireless extensions subsystem. If you choose to build a module, it'll be called brcmfmac.ko. -config BRCMFMAC_SDIO - bool "SDIO bus interface support for FullMAC" - depends on MMC - depends on BRCMFMAC - select FW_LOADER - default y - ---help--- - This option enables the SDIO bus interface support for Broadcom - FullMAC WLAN driver. - Say Y if you want to use brcmfmac for a compatible SDIO interface - wireless card. - config BRCMDBG bool "Broadcom driver debug functions" depends on BRCMSMAC || BRCMFMAC diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/trunk/drivers/net/wireless/brcm80211/brcmfmac/Makefile index 9ca9ea1135ea..b44e3094588a 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/Makefile +++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/Makefile @@ -19,16 +19,15 @@ ccflags-y += \ -Idrivers/net/wireless/brcm80211/brcmfmac \ -Idrivers/net/wireless/brcm80211/include -obj-$(CONFIG_BRCMFMAC) += brcmfmac.o -brcmfmac-objs += \ - wl_cfg80211.o \ - dhd_cdc.o \ - dhd_common.o \ - dhd_linux.o -brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \ - dhd_sdio.o \ - bcmsdh.o \ - bcmsdh_sdmmc.o \ - sdio_chip.o +DHDOFILES = \ + wl_cfg80211.o \ + dhd_cdc.o \ + dhd_common.o \ + dhd_sdio.o \ + dhd_linux.o \ + bcmsdh.o \ + bcmsdh_sdmmc.o +obj-$(CONFIG_BRCMFMAC) += brcmfmac.o +brcmfmac-objs += $(DHDOFILES) ccflags-y += -D__CHECK_ENDIAN__ diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/bcmchip.h b/trunk/drivers/net/wireless/brcm80211/brcmfmac/bcmchip.h new file mode 100644 index 000000000000..d7d3afd5a10f --- /dev/null +++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/bcmchip.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2011 Broadcom Corporation + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _bcmchip_h_ +#define _bcmchip_h_ + +/* bcm4329 */ +/* SDIO device core, ID 0x829 */ +#define BCM4329_CORE_BUS_BASE 0x18011000 +/* internal memory core, ID 0x80e */ +#define BCM4329_CORE_SOCRAM_BASE 0x18003000 +/* ARM Cortex M3 core, ID 0x82a */ +#define BCM4329_CORE_ARM_BASE 0x18002000 +#define BCM4329_RAMSIZE 0x48000 +/* firmware name */ +#define BCM4329_FW_NAME "brcm/bcm4329-fullmac-4.bin" +#define BCM4329_NV_NAME "brcm/bcm4329-fullmac-4.txt" + +#endif /* _bcmchip_h_ */ diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/trunk/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c index 4bc8d251acf8..89ff94da556a 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c @@ -31,6 +31,7 @@ #include #include #include +#include "dhd.h" #include "dhd_bus.h" #include "dhd_dbg.h" #include "sdio_host.h" @@ -50,18 +51,12 @@ static void brcmf_sdioh_irqhandler(struct sdio_func *func) sdio_claim_host(func); } -/* dummy handler for SDIO function 2 interrupt */ -static void brcmf_sdioh_dummy_irq_handler(struct sdio_func *func) -{ -} - int brcmf_sdcard_intr_reg(struct brcmf_sdio_dev *sdiodev) { brcmf_dbg(TRACE, "Entering\n"); sdio_claim_host(sdiodev->func[1]); sdio_claim_irq(sdiodev->func[1], brcmf_sdioh_irqhandler); - sdio_claim_irq(sdiodev->func[2], brcmf_sdioh_dummy_irq_handler); sdio_release_host(sdiodev->func[1]); return 0; @@ -72,7 +67,6 @@ int brcmf_sdcard_intr_dereg(struct brcmf_sdio_dev *sdiodev) brcmf_dbg(TRACE, "Entering\n"); sdio_claim_host(sdiodev->func[1]); - sdio_release_irq(sdiodev->func[2]); sdio_release_irq(sdiodev->func[1]); sdio_release_host(sdiodev->func[1]); @@ -228,12 +222,19 @@ bool brcmf_sdcard_regfail(struct brcmf_sdio_dev *sdiodev) return sdiodev->regfail; } -static int brcmf_sdcard_recv_prepare(struct brcmf_sdio_dev *sdiodev, uint fn, - uint flags, uint width, u32 *addr) +int +brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, + uint flags, + u8 *buf, uint nbytes, struct sk_buff *pkt) { - uint bar0 = *addr & ~SBSDIO_SB_OFT_ADDR_MASK; + int status; + uint incr_fix; + uint width; + uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK; int err = 0; + brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", fn, addr, nbytes); + /* Async not implemented yet */ if (flags & SDIO_REQ_ASYNC) return -ENOTSUPP; @@ -246,114 +247,29 @@ static int brcmf_sdcard_recv_prepare(struct brcmf_sdio_dev *sdiodev, uint fn, sdiodev->sbwad = bar0; } - *addr &= SBSDIO_SB_OFT_ADDR_MASK; - - if (width == 4) - *addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; - - return 0; -} - -int -brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, - uint flags, u8 *buf, uint nbytes) -{ - struct sk_buff *mypkt; - int err; - - mypkt = brcmu_pkt_buf_get_skb(nbytes); - if (!mypkt) { - brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n", - nbytes); - return -EIO; - } - - err = brcmf_sdcard_recv_pkt(sdiodev, addr, fn, flags, mypkt); - if (!err) - memcpy(buf, mypkt->data, nbytes); - - brcmu_pkt_buf_free_skb(mypkt); - return err; -} - -int -brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, - uint flags, struct sk_buff *pkt) -{ - uint incr_fix; - uint width; - int err = 0; - - brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", - fn, addr, pkt->len); - - width = (flags & SDIO_REQ_4BYTE) ? 4 : 2; - err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr); - if (err) - return err; + addr &= SBSDIO_SB_OFT_ADDR_MASK; incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC; - err = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_READ, - fn, addr, pkt); - - return err; -} - -int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, - uint flags, struct sk_buff_head *pktq) -{ - uint incr_fix; - uint width; - int err = 0; - - brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", - fn, addr, pktq->qlen); - width = (flags & SDIO_REQ_4BYTE) ? 4 : 2; - err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr); - if (err) - return err; + if (width == 4) + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; - incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC; - err = brcmf_sdioh_request_chain(sdiodev, incr_fix, SDIOH_READ, fn, addr, - pktq); + status = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_READ, + fn, addr, width, nbytes, buf, pkt); - return err; + return status; } int brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, - uint flags, u8 *buf, uint nbytes) -{ - struct sk_buff *mypkt; - int err; - - mypkt = brcmu_pkt_buf_get_skb(nbytes); - if (!mypkt) { - brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n", - nbytes); - return -EIO; - } - - memcpy(mypkt->data, buf, nbytes); - err = brcmf_sdcard_send_pkt(sdiodev, addr, fn, flags, mypkt); - - brcmu_pkt_buf_free_skb(mypkt); - return err; - -} - -int -brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, - uint flags, struct sk_buff *pkt) + uint flags, u8 *buf, uint nbytes, struct sk_buff *pkt) { uint incr_fix; uint width; uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK; int err = 0; - brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", - fn, addr, pkt->len); + brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", fn, addr, nbytes); /* Async not implemented yet */ if (flags & SDIO_REQ_ASYNC) @@ -375,39 +291,18 @@ brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; return brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_WRITE, fn, - addr, pkt); + addr, width, nbytes, buf, pkt); } int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw, u32 addr, u8 *buf, uint nbytes) { - struct sk_buff *mypkt; - bool write = rw ? SDIOH_WRITE : SDIOH_READ; - int err; - addr &= SBSDIO_SB_OFT_ADDR_MASK; addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; - mypkt = brcmu_pkt_buf_get_skb(nbytes); - if (!mypkt) { - brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n", - nbytes); - return -EIO; - } - - /* For a write, copy the buffer data into the packet. */ - if (write) - memcpy(mypkt->data, buf, nbytes); - - err = brcmf_sdioh_request_buffer(sdiodev, SDIOH_DATA_INC, write, - SDIO_FUNC_1, addr, mypkt); - - /* For a read, copy the packet data back to the buffer. */ - if (!err && !write) - memcpy(buf, mypkt->data, nbytes); - - brcmu_pkt_buf_free_skb(mypkt); - return err; + return brcmf_sdioh_request_buffer(sdiodev, SDIOH_DATA_INC, + (rw ? SDIOH_WRITE : SDIOH_READ), SDIO_FUNC_1, + addr, 4, nbytes, buf, NULL); } int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn) @@ -438,7 +333,7 @@ int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) sdiodev->sbwad = SI_ENUM_BASE; /* try to attach to the target device */ - sdiodev->bus = brcmf_sdbrcm_probe(regs, sdiodev); + sdiodev->bus = brcmf_sdbrcm_probe(0, 0, 0, 0, regs, sdiodev); if (!sdiodev->bus) { brcmf_dbg(ERROR, "device attach failed\n"); ret = -ENODEV; diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/trunk/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c index 9b8c0ed833d4..bbaeb2d5c93a 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c @@ -31,15 +31,15 @@ #include #include #include "sdio_host.h" +#include "dhd.h" #include "dhd_dbg.h" -#include "dhd_bus.h" +#include "wl_cfg80211.h" #define SDIO_VENDOR_ID_BROADCOM 0x02d0 #define DMA_ALIGN_MASK 0x03 #define SDIO_DEVICE_ID_BROADCOM_4329 0x4329 -#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330 #define SDIO_FUNC1_BLOCKSIZE 64 #define SDIO_FUNC2_BLOCKSIZE 512 @@ -47,7 +47,6 @@ /* devices we support, null terminated */ static const struct sdio_device_id brcmf_sdmmc_ids[] = { {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)}, - {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)}, { /* end: all zeroes */ }, }; MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids); @@ -205,75 +204,62 @@ int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev, return err_ret; } -/* precondition: host controller is claimed */ static int -brcmf_sdioh_request_data(struct brcmf_sdio_dev *sdiodev, uint write, bool fifo, - uint func, uint addr, struct sk_buff *pkt, uint pktlen) -{ - int err_ret = 0; - - if ((write) && (!fifo)) { - err_ret = sdio_memcpy_toio(sdiodev->func[func], addr, - ((u8 *) (pkt->data)), pktlen); - } else if (write) { - err_ret = sdio_memcpy_toio(sdiodev->func[func], addr, - ((u8 *) (pkt->data)), pktlen); - } else if (fifo) { - err_ret = sdio_readsb(sdiodev->func[func], - ((u8 *) (pkt->data)), addr, pktlen); - } else { - err_ret = sdio_memcpy_fromio(sdiodev->func[func], - ((u8 *) (pkt->data)), - addr, pktlen); - } - - return err_ret; -} - -/* - * This function takes a queue of packets. The packets on the queue - * are assumed to be properly aligned by the caller. - */ -int -brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc, - uint write, uint func, uint addr, - struct sk_buff_head *pktq) +brcmf_sdioh_request_packet(struct brcmf_sdio_dev *sdiodev, uint fix_inc, + uint write, uint func, uint addr, + struct sk_buff *pkt) { bool fifo = (fix_inc == SDIOH_DATA_FIX); u32 SGCount = 0; int err_ret = 0; - struct sk_buff *pkt; + struct sk_buff *pnext; brcmf_dbg(TRACE, "Enter\n"); - brcmf_pm_resume_wait(sdiodev, &sdiodev->request_chain_wait); + brcmf_pm_resume_wait(sdiodev, &sdiodev->request_packet_wait); if (brcmf_pm_resume_error(sdiodev)) return -EIO; /* Claim host controller */ sdio_claim_host(sdiodev->func[func]); - - skb_queue_walk(pktq, pkt) { - uint pkt_len = pkt->len; + for (pnext = pkt; pnext; pnext = pnext->next) { + uint pkt_len = pnext->len; pkt_len += 3; pkt_len &= 0xFFFFFFFC; - err_ret = brcmf_sdioh_request_data(sdiodev, write, fifo, func, - addr, pkt, pkt_len); + if ((write) && (!fifo)) { + err_ret = sdio_memcpy_toio(sdiodev->func[func], addr, + ((u8 *) (pnext->data)), + pkt_len); + } else if (write) { + err_ret = sdio_memcpy_toio(sdiodev->func[func], addr, + ((u8 *) (pnext->data)), + pkt_len); + } else if (fifo) { + err_ret = sdio_readsb(sdiodev->func[func], + ((u8 *) (pnext->data)), + addr, pkt_len); + } else { + err_ret = sdio_memcpy_fromio(sdiodev->func[func], + ((u8 *) (pnext->data)), + addr, pkt_len); + } + if (err_ret) { brcmf_dbg(ERROR, "%s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=0x%08x\n", - write ? "TX" : "RX", pkt, SGCount, addr, + write ? "TX" : "RX", pnext, SGCount, addr, pkt_len, err_ret); } else { brcmf_dbg(TRACE, "%s xfr'd %p[%d], addr=0x%05x, len=%d\n", - write ? "TX" : "RX", pkt, SGCount, addr, + write ? "TX" : "RX", pnext, SGCount, addr, pkt_len); } + if (!fifo) addr += pkt_len; - SGCount++; + } /* Release host controller */ @@ -284,45 +270,91 @@ brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc, } /* - * This function takes a single DMA-able packet. + * This function takes a buffer or packet, and fixes everything up + * so that in the end, a DMA-able packet is created. + * + * A buffer does not have an associated packet pointer, + * and may or may not be aligned. + * A packet may consist of a single packet, or a packet chain. + * If it is a packet chain, then all the packets in the chain + * must be properly aligned. + * + * If the packet data is not aligned, then there may only be + * one packet, and in this case, it is copied to a new + * aligned packet. + * */ int brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev, uint fix_inc, uint write, uint func, uint addr, + uint reg_width, uint buflen_u, u8 *buffer, struct sk_buff *pkt) { - int status; - uint pkt_len = pkt->len; - bool fifo = (fix_inc == SDIOH_DATA_FIX); + int Status; + struct sk_buff *mypkt = NULL; brcmf_dbg(TRACE, "Enter\n"); - if (pkt == NULL) - return -EINVAL; - brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait); if (brcmf_pm_resume_error(sdiodev)) return -EIO; + /* Case 1: we don't have a packet. */ + if (pkt == NULL) { + brcmf_dbg(DATA, "Creating new %s Packet, len=%d\n", + write ? "TX" : "RX", buflen_u); + mypkt = brcmu_pkt_buf_get_skb(buflen_u); + if (!mypkt) { + brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n", + buflen_u); + return -EIO; + } - /* Claim host controller */ - sdio_claim_host(sdiodev->func[func]); + /* For a write, copy the buffer data into the packet. */ + if (write) + memcpy(mypkt->data, buffer, buflen_u); + + Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write, + func, addr, mypkt); + + /* For a read, copy the packet data back to the buffer. */ + if (!write) + memcpy(buffer, mypkt->data, buflen_u); + + brcmu_pkt_buf_free_skb(mypkt); + } else if (((ulong) (pkt->data) & DMA_ALIGN_MASK) != 0) { + /* + * Case 2: We have a packet, but it is unaligned. + * In this case, we cannot have a chain (pkt->next == NULL) + */ + brcmf_dbg(DATA, "Creating aligned %s Packet, len=%d\n", + write ? "TX" : "RX", pkt->len); + mypkt = brcmu_pkt_buf_get_skb(pkt->len); + if (!mypkt) { + brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n", + pkt->len); + return -EIO; + } - pkt_len += 3; - pkt_len &= (uint)~3; + /* For a write, copy the buffer data into the packet. */ + if (write) + memcpy(mypkt->data, pkt->data, pkt->len); - status = brcmf_sdioh_request_data(sdiodev, write, fifo, func, - addr, pkt, pkt_len); - if (status) { - brcmf_dbg(ERROR, "%s FAILED %p, addr=0x%05x, pkt_len=%d, ERR=0x%08x\n", - write ? "TX" : "RX", pkt, addr, pkt_len, status); - } else { - brcmf_dbg(TRACE, "%s xfr'd %p, addr=0x%05x, len=%d\n", - write ? "TX" : "RX", pkt, addr, pkt_len); - } + Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write, + func, addr, mypkt); - /* Release host controller */ - sdio_release_host(sdiodev->func[func]); + /* For a read, copy the packet data back to the buffer. */ + if (!write) + memcpy(pkt->data, mypkt->data, mypkt->len); + + brcmu_pkt_buf_free_skb(mypkt); + } else { /* case 3: We have a packet and + it is aligned. */ + brcmf_dbg(DATA, "Aligned %s Packet, direct DMA\n", + write ? "Tx" : "Rx"); + Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write, + func, addr, pkt); + } - return status; + return Status; } /* Read client card reg */ @@ -462,7 +494,6 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func, { int ret = 0; struct brcmf_sdio_dev *sdiodev; - struct brcmf_bus *bus_if; brcmf_dbg(TRACE, "Enter\n"); brcmf_dbg(TRACE, "func->class=%x\n", func->class); brcmf_dbg(TRACE, "sdio_vendor: 0x%04x\n", func->vendor); @@ -474,26 +505,17 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func, brcmf_dbg(ERROR, "card private drvdata occupied\n"); return -ENXIO; } - bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL); - if (!bus_if) - return -ENOMEM; sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL); - if (!sdiodev) { - kfree(bus_if); + if (!sdiodev) return -ENOMEM; - } sdiodev->func[0] = func->card->sdio_func[0]; sdiodev->func[1] = func; - sdiodev->bus_if = bus_if; - bus_if->bus_priv = sdiodev; - bus_if->type = SDIO_BUS; - bus_if->align = BRCMF_SDALIGN; dev_set_drvdata(&func->card->dev, sdiodev); atomic_set(&sdiodev->suspend, false); init_waitqueue_head(&sdiodev->request_byte_wait); init_waitqueue_head(&sdiodev->request_word_wait); - init_waitqueue_head(&sdiodev->request_chain_wait); + init_waitqueue_head(&sdiodev->request_packet_wait); init_waitqueue_head(&sdiodev->request_buffer_wait); } @@ -503,10 +525,6 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func, return -ENODEV; sdiodev->func[2] = func; - bus_if = sdiodev->bus_if; - sdiodev->dev = &func->dev; - dev_set_drvdata(&func->dev, bus_if); - brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_probe...\n"); ret = brcmf_sdio_probe(sdiodev); } @@ -516,7 +534,6 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func, static void brcmf_ops_sdio_remove(struct sdio_func *func) { - struct brcmf_bus *bus_if; struct brcmf_sdio_dev *sdiodev; brcmf_dbg(TRACE, "Enter\n"); brcmf_dbg(INFO, "func->class=%x\n", func->class); @@ -525,13 +542,10 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func) brcmf_dbg(INFO, "Function#: 0x%04x\n", func->num); if (func->num == 2) { - bus_if = dev_get_drvdata(&func->dev); - sdiodev = bus_if->bus_priv; + sdiodev = dev_get_drvdata(&func->card->dev); brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_remove...\n"); brcmf_sdio_remove(sdiodev); dev_set_drvdata(&func->card->dev, NULL); - dev_set_drvdata(&func->dev, NULL); - kfree(bus_if); kfree(sdiodev); } } @@ -540,12 +554,14 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func) static int brcmf_sdio_suspend(struct device *dev) { mmc_pm_flag_t sdio_flags; + struct brcmf_sdio_dev *sdiodev; struct sdio_func *func = dev_to_sdio_func(dev); - struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev); int ret = 0; brcmf_dbg(TRACE, "\n"); + sdiodev = dev_get_drvdata(&func->card->dev); + atomic_set(&sdiodev->suspend, true); sdio_flags = sdio_get_host_pm_caps(sdiodev->func[1]); @@ -567,9 +583,10 @@ static int brcmf_sdio_suspend(struct device *dev) static int brcmf_sdio_resume(struct device *dev) { + struct brcmf_sdio_dev *sdiodev; struct sdio_func *func = dev_to_sdio_func(dev); - struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev); + sdiodev = dev_get_drvdata(&func->card->dev); brcmf_sdio_wdtmr_enable(sdiodev, true); atomic_set(&sdiodev->suspend, false); return 0; @@ -593,26 +610,17 @@ static struct sdio_driver brcmf_sdmmc_driver = { #endif /* CONFIG_PM_SLEEP */ }; -static void __exit brcmf_sdio_exit(void) +/* bus register interface */ +int brcmf_bus_register(void) { brcmf_dbg(TRACE, "Enter\n"); - sdio_unregister_driver(&brcmf_sdmmc_driver); + return sdio_register_driver(&brcmf_sdmmc_driver); } -static int __init brcmf_sdio_init(void) +void brcmf_bus_unregister(void) { - int ret; - brcmf_dbg(TRACE, "Enter\n"); - ret = sdio_register_driver(&brcmf_sdmmc_driver); - - if (ret) - brcmf_dbg(ERROR, "sdio_register_driver failed: %d\n", ret); - - return ret; + sdio_unregister_driver(&brcmf_sdmmc_driver); } - -module_init(brcmf_sdio_init); -module_exit(brcmf_sdio_exit); diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd.h index e58ea40a75b0..4645766b4070 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd.h +++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd.h @@ -87,7 +87,7 @@ #define TOE_TX_CSUM_OL 0x00000001 #define TOE_RX_CSUM_OL 0x00000002 -#define BRCMF_BSS_INFO_VERSION 109 /* curr ver of brcmf_bss_info_le struct */ +#define BRCMF_BSS_INFO_VERSION 108 /* current ver of brcmf_bss_info struct */ /* size of brcmf_scan_params not including variable length array */ #define BRCMF_SCAN_PARAMS_FIXED_SIZE 64 @@ -122,6 +122,8 @@ /* For supporting multiple interfaces */ #define BRCMF_MAX_IFS 16 +#define BRCMF_DEL_IF -0xe +#define BRCMF_BAD_IF -0xf #define DOT11_BSSTYPE_ANY 2 #define DOT11_MAX_DEFAULT_KEYS 4 @@ -156,6 +158,18 @@ struct brcmf_event { struct brcmf_event_msg msg; } __packed; +struct dngl_stats { + unsigned long rx_packets; /* total packets received */ + unsigned long tx_packets; /* total packets transmitted */ + unsigned long rx_bytes; /* total bytes received */ + unsigned long tx_bytes; /* total bytes transmitted */ + unsigned long rx_errors; /* bad packets received */ + unsigned long tx_errors; /* packet transmit problems */ + unsigned long rx_dropped; /* packets dropped by dongle */ + unsigned long tx_dropped; /* packets dropped by dongle */ + unsigned long multicast; /* multicast packets received */ +}; + /* event codes sent by the dongle to this driver */ #define BRCMF_E_SET_SSID 0 #define BRCMF_E_JOIN 1 @@ -305,6 +319,13 @@ struct brcmf_event { #define BRCMF_E_LINK_ASSOC_REC 3 #define BRCMF_E_LINK_BSSCFG_DIS 4 +/* The level of bus communication with the dongle */ +enum brcmf_bus_state { + BRCMF_BUS_DOWN, /* Not ready for frame transfers */ + BRCMF_BUS_LOAD, /* Download access only (CPU reset) */ + BRCMF_BUS_DATA /* Ready for frame transfers */ +}; + /* Pattern matching filter. Specifies an offset within received packets to * start matching, the pattern to match, the size of the pattern, and a bitmask * that indicates which bits within the pattern should be matched. @@ -344,7 +365,7 @@ struct brcmf_pkt_filter_enable_le { * Applications MUST CHECK ie_offset field and length field to access IEs and * next bss_info structure in a vector (in struct brcmf_scan_results) */ -struct brcmf_bss_info_le { +struct brcmf_bss_info { __le32 version; /* version field */ __le32 length; /* byte length of data in this record, * starting at version and including IEs @@ -445,13 +466,14 @@ struct brcmf_scan_results { u32 buflen; u32 version; u32 count; - struct brcmf_bss_info_le bss_info_le[]; + struct brcmf_bss_info bss_info[1]; }; struct brcmf_scan_results_le { __le32 buflen; __le32 version; __le32 count; + struct brcmf_bss_info bss_info[1]; }; /* used for association with a specific BSSID and chanspec list */ @@ -471,6 +493,10 @@ struct brcmf_join_params { struct brcmf_assoc_params_le params_le; }; +/* size of brcmf_scan_results not including variable length array */ +#define BRCMF_SCAN_RESULTS_FIXED_SIZE \ + (sizeof(struct brcmf_scan_results) - sizeof(struct brcmf_bss_info)) + /* incremental scan results struct */ struct brcmf_iscan_results { union { @@ -485,7 +511,7 @@ struct brcmf_iscan_results { /* size of brcmf_iscan_results not including variable length array */ #define BRCMF_ISCAN_RESULTS_FIXED_SIZE \ - (sizeof(struct brcmf_scan_results) + \ + (BRCMF_SCAN_RESULTS_FIXED_SIZE + \ offsetof(struct brcmf_iscan_results, results)) struct brcmf_wsec_key { @@ -553,19 +579,25 @@ struct brcmf_dcmd { }; /* Forward decls for struct brcmf_pub (see below) */ +struct brcmf_bus; /* device bus info */ struct brcmf_proto; /* device communication protocol info */ +struct brcmf_info; /* device driver info */ struct brcmf_cfg80211_dev; /* cfg80211 device info */ /* Common structure for module and instance linkage */ struct brcmf_pub { /* Linkage ponters */ - struct brcmf_bus *bus_if; + struct brcmf_bus *bus; struct brcmf_proto *prot; + struct brcmf_info *info; struct brcmf_cfg80211_dev *config; - struct device *dev; /* fullmac dongle device pointer */ /* Internal brcmf items */ + bool up; /* Driver up/down (to OS) */ + bool txoff; /* Transmit flow-controlled */ + enum brcmf_bus_state busstate; uint hdrlen; /* Total BRCMF header length (proto + bus) */ + uint maxctl; /* Max size rxctl request from proto to bus */ uint rxsz; /* Rx buffer size bus module should use */ u8 wme_dp; /* wme discard priority */ @@ -573,21 +605,48 @@ struct brcmf_pub { bool iswl; /* Dongle-resident driver is wl */ unsigned long drv_version; /* Version of dongle-resident driver */ u8 mac[ETH_ALEN]; /* MAC address obtained from dongle */ + struct dngl_stats dstats; /* Stats for dongle-based data */ /* Additional stats for the bus level */ + /* Data packets sent to dongle */ + unsigned long tx_packets; /* Multicast data packets sent to dongle */ unsigned long tx_multicast; + /* Errors in sending data to dongle */ + unsigned long tx_errors; + /* Control packets sent to dongle */ + unsigned long tx_ctlpkts; + /* Errors sending control frames to dongle */ + unsigned long tx_ctlerrs; + /* Packets sent up the network interface */ + unsigned long rx_packets; + /* Multicast packets sent up the network interface */ + unsigned long rx_multicast; + /* Errors processing rx data packets */ + unsigned long rx_errors; + /* Control frames processed from dongle */ + unsigned long rx_ctlpkts; + + /* Errors in processing rx control frames */ + unsigned long rx_ctlerrs; + /* Packets dropped locally (no memory) */ + unsigned long rx_dropped; /* Packets flushed due to unscheduled sendup thread */ unsigned long rx_flushed; /* Number of times dpc scheduled by watchdog timer */ unsigned long wd_dpc_sched; + /* Number of packets where header read-ahead was used. */ + unsigned long rx_readahead_cnt; + /* Number of tx packets we had to realloc for headroom */ + unsigned long tx_realloc; /* Number of flow control pkts recvd */ unsigned long fc_packets; /* Last error return */ int bcmerror; + uint tickcnt; /* Last error from dongle */ int dongle_error; @@ -605,14 +664,6 @@ struct brcmf_pub { u8 country_code[BRCM_CNTRY_BUF_SZ]; char eventmask[BRCMF_EVENTING_MASK_LEN]; - struct brcmf_if *iflist[BRCMF_MAX_IFS]; - - struct mutex proto_block; - - struct work_struct setmacaddr_work; - struct work_struct multicast_work; - u8 macvalue[ETH_ALEN]; - atomic_t pend_8021x_cnt; }; struct brcmf_if_event { @@ -632,33 +683,67 @@ extern const struct bcmevent_name bcmevent_names[]; extern uint brcmf_c_mkiovar(char *name, char *data, uint datalen, char *buf, uint len); +/* Indication from bus module regarding presence/insertion of dongle. + * Return struct brcmf_pub pointer, used as handle to OS module in later calls. + * Returned structure should have bus and prot pointers filled in. + * bus_hdrlen specifies required headroom for bus module header. + */ +extern struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, + uint bus_hdrlen); extern int brcmf_net_attach(struct brcmf_pub *drvr, int idx); extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev); extern s32 brcmf_exec_dcmd(struct net_device *dev, u32 cmd, void *arg, u32 len); +/* Indication from bus module regarding removal/absence of dongle */ +extern void brcmf_detach(struct brcmf_pub *drvr); + +/* Indication from bus module to change flow-control state */ +extern void brcmf_txflowcontrol(struct brcmf_pub *drvr, int ifidx, bool on); + +extern bool brcmf_c_prec_enq(struct brcmf_pub *drvr, struct pktq *q, + struct sk_buff *pkt, int prec); + +/* Receive frame for delivery to OS. Callee disposes of rxp. */ +extern void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx, + struct sk_buff *rxp, int numpkt); + /* Return pointer to interface name */ extern char *brcmf_ifname(struct brcmf_pub *drvr, int idx); +/* Notify tx completion */ +extern void brcmf_txcomplete(struct brcmf_pub *drvr, struct sk_buff *txp, + bool success); + /* Query dongle */ extern int brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd, void *buf, uint len); +/* OS independent layer functions */ +extern int brcmf_os_proto_block(struct brcmf_pub *drvr); +extern int brcmf_os_proto_unblock(struct brcmf_pub *drvr); #ifdef BCMDBG extern int brcmf_write_to_file(struct brcmf_pub *drvr, const u8 *buf, int size); #endif /* BCMDBG */ -extern int brcmf_ifname2idx(struct brcmf_pub *drvr, char *name); -extern int brcmf_c_host_event(struct brcmf_pub *drvr, int *idx, +extern int brcmf_ifname2idx(struct brcmf_info *drvr_priv, char *name); +extern int brcmf_c_host_event(struct brcmf_info *drvr_priv, int *idx, void *pktdata, struct brcmf_event_msg *, void **data_ptr); -extern void brcmf_del_if(struct brcmf_pub *drvr, int ifidx); +extern void brcmf_c_init(void); + +extern int brcmf_add_if(struct brcmf_info *drvr_priv, int ifidx, + struct net_device *ndev, char *name, u8 *mac_addr, + u32 flags, u8 bssidx); +extern void brcmf_del_if(struct brcmf_info *drvr_priv, int ifidx); /* Send packet to dongle via data channel */ extern int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx,\ struct sk_buff *pkt); +extern int brcmf_bus_start(struct brcmf_pub *drvr); + extern void brcmf_c_pktfilter_offload_set(struct brcmf_pub *drvr, char *arg); extern void brcmf_c_pktfilter_offload_enable(struct brcmf_pub *drvr, char *arg, int enable, int master_mode); @@ -667,4 +752,25 @@ extern void brcmf_c_pktfilter_offload_enable(struct brcmf_pub *drvr, char *arg, #define BRCMF_DCMD_MEDLEN 1536 /* "med" cmd buffer required */ #define BRCMF_DCMD_MAXLEN 8192 /* max length cmd buffer required */ +/* message levels */ +#define BRCMF_ERROR_VAL 0x0001 +#define BRCMF_TRACE_VAL 0x0002 +#define BRCMF_INFO_VAL 0x0004 +#define BRCMF_DATA_VAL 0x0008 +#define BRCMF_CTL_VAL 0x0010 +#define BRCMF_TIMER_VAL 0x0020 +#define BRCMF_HDRS_VAL 0x0040 +#define BRCMF_BYTES_VAL 0x0080 +#define BRCMF_INTR_VAL 0x0100 +#define BRCMF_GLOM_VAL 0x0400 +#define BRCMF_EVENT_VAL 0x0800 +#define BRCMF_BTA_VAL 0x1000 +#define BRCMF_ISCAN_VAL 0x2000 + +/* Enter idle immediately (no timeout) */ +#define BRCMF_IDLE_IMMEDIATE (-1) +#define BRCMF_IDLE_ACTIVE 0 /* Do not request any SD clock change + when idle */ +#define BRCMF_IDLE_INTERVAL 1 + #endif /* _BRCMF_H_ */ diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h index ad9be2410b59..a249407c9a1b 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h +++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h @@ -17,89 +17,41 @@ #ifndef _BRCMF_BUS_H_ #define _BRCMF_BUS_H_ -/* The level of bus communication with the dongle */ -enum brcmf_bus_state { - BRCMF_BUS_DOWN, /* Not ready for frame transfers */ - BRCMF_BUS_LOAD, /* Download access only (CPU reset) */ - BRCMF_BUS_DATA /* Ready for frame transfers */ -}; +/* Packet alignment for most efficient SDIO (can change based on platform) */ +#define BRCMF_SDALIGN (1 << 6) -struct dngl_stats { - unsigned long rx_packets; /* total packets received */ - unsigned long tx_packets; /* total packets transmitted */ - unsigned long rx_bytes; /* total bytes received */ - unsigned long tx_bytes; /* total bytes transmitted */ - unsigned long rx_errors; /* bad packets received */ - unsigned long tx_errors; /* packet transmit problems */ - unsigned long rx_dropped; /* packets dropped by dongle */ - unsigned long tx_dropped; /* packets dropped by dongle */ - unsigned long multicast; /* multicast packets received */ -}; - -/* interface structure between common and bus layer */ -struct brcmf_bus { - u8 type; /* bus type */ - void *bus_priv; /* pointer to bus private structure */ - void *drvr; /* pointer to driver pub structure brcmf_pub */ - enum brcmf_bus_state state; - uint maxctl; /* Max size rxctl request from proto to bus */ - bool drvr_up; /* Status flag of driver up/down */ - unsigned long tx_realloc; /* Tx packets realloced for headroom */ - struct dngl_stats dstats; /* Stats for dongle-based data */ - u8 align; /* bus alignment requirement */ - - /* interface functions pointers */ - /* Stop bus module: clear pending frames, disable data flow */ - void (*brcmf_bus_stop)(struct device *); - /* Initialize bus module: prepare for communication w/dongle */ - int (*brcmf_bus_init)(struct device *); - /* Send a data frame to the dongle. Callee disposes of txp. */ - int (*brcmf_bus_txdata)(struct device *, struct sk_buff *); - /* Send/receive a control message to/from the dongle. - * Expects caller to enforce a single outstanding transaction. - */ - int (*brcmf_bus_txctl)(struct device *, unsigned char *, uint); - int (*brcmf_bus_rxctl)(struct device *, unsigned char *, uint); -}; +/* watchdog polling interval in ms */ +#define BRCMF_WD_POLL_MS 10 /* - * interface functions from common layer + * Exported from brcmf bus module (brcmf_usb, brcmf_sdio) */ -/* Remove any protocol-specific data header. */ -extern int brcmf_proto_hdrpull(struct device *dev, int *ifidx, - struct sk_buff *rxp); +/* Indicate (dis)interest in finding dongles. */ +extern int brcmf_bus_register(void); +extern void brcmf_bus_unregister(void); -extern bool brcmf_c_prec_enq(struct device *dev, struct pktq *q, - struct sk_buff *pkt, int prec); +/* obtain linux device object providing bus function */ +extern struct device *brcmf_bus_get_device(struct brcmf_bus *bus); -/* Receive frame for delivery to OS. Callee disposes of rxp. */ -extern void brcmf_rx_frame(struct device *dev, int ifidx, - struct sk_buff_head *rxlist); -static inline void brcmf_rx_packet(struct device *dev, int ifidx, - struct sk_buff *pkt) -{ - struct sk_buff_head q; +/* Stop bus module: clear pending frames, disable data flow */ +extern void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus); - skb_queue_head_init(&q); - skb_queue_tail(&q, pkt); - brcmf_rx_frame(dev, ifidx, &q); -} +/* Initialize bus module: prepare for communication w/dongle */ +extern int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr); -/* Indication from bus module regarding presence/insertion of dongle. */ -extern int brcmf_attach(uint bus_hdrlen, struct device *dev); -/* Indication from bus module regarding removal/absence of dongle */ -extern void brcmf_detach(struct device *dev); +/* Send a data frame to the dongle. Callee disposes of txp. */ +extern int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *txp); -/* Indication from bus module to change flow-control state */ -extern void brcmf_txflowcontrol(struct device *dev, int ifidx, bool on); +/* Send/receive a control message to/from the dongle. + * Expects caller to enforce a single outstanding transaction. + */ +extern int +brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen); -/* Notify tx completion */ -extern void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, - bool success); +extern int +brcmf_sdbrcm_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen); -extern int brcmf_bus_start(struct device *dev); +extern void brcmf_sdbrcm_wd_timer(struct brcmf_bus *bus, uint wdtick); -extern int brcmf_add_if(struct device *dev, int ifidx, - char *name, u8 *mac_addr); #endif /* _BRCMF_BUS_H_ */ diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c index ac8d1f437888..e34c5c3d1d55 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c @@ -58,7 +58,7 @@ struct brcmf_proto_cdc_dcmd { * Used on data packets to convey priority across USB. */ #define BDC_HEADER_LEN 4 -#define BDC_PROTO_VER 2 /* Protocol version */ +#define BDC_PROTO_VER 1 /* Protocol version */ #define BDC_FLAG_VER_MASK 0xf0 /* Protocol version mask */ #define BDC_FLAG_VER_SHIFT 4 /* Protocol version shift */ #define BDC_FLAG_SUM_GOOD 0x04 /* Good RX checksums */ @@ -77,19 +77,18 @@ struct brcmf_proto_bdc_header { u8 flags; u8 priority; /* 802.1d Priority, 4:7 flow control info for usb */ u8 flags2; - u8 data_offset; + u8 rssi; }; #define RETRIES 2 /* # of retries to retrieve matching dcmd response */ -#define BUS_HEADER_LEN (16+64) /* Must be atleast SDPCM_RESERVE +#define BUS_HEADER_LEN (16+BRCMF_SDALIGN) /* Must be atleast SDPCM_RESERVE * (amount of header tha might be added) * plus any space that might be needed - * for bus alignment padding. + * for alignment padding. */ -#define ROUND_UP_MARGIN 2048 /* Biggest bus block size possible for +#define ROUND_UP_MARGIN 2048 /* Biggest SDIO block size possible for * round off at the end of buffer - * Currently is SDIO */ struct brcmf_proto { @@ -117,9 +116,8 @@ static int brcmf_proto_cdc_msg(struct brcmf_pub *drvr) len = CDC_MAX_MSG_SIZE; /* Send request */ - return drvr->bus_if->brcmf_bus_txctl(drvr->dev, - (unsigned char *)&prot->msg, - len); + return brcmf_sdbrcm_bus_txctl(drvr->bus, (unsigned char *)&prot->msg, + len); } static int brcmf_proto_cdc_cmplt(struct brcmf_pub *drvr, u32 id, u32 len) @@ -130,7 +128,7 @@ static int brcmf_proto_cdc_cmplt(struct brcmf_pub *drvr, u32 id, u32 len) brcmf_dbg(TRACE, "Enter\n"); do { - ret = drvr->bus_if->brcmf_bus_rxctl(drvr->dev, + ret = brcmf_sdbrcm_bus_rxctl(drvr->bus, (unsigned char *)&prot->msg, len + sizeof(struct brcmf_proto_cdc_dcmd)); if (ret < 0) @@ -282,11 +280,11 @@ brcmf_proto_dcmd(struct brcmf_pub *drvr, int ifidx, struct brcmf_dcmd *dcmd, struct brcmf_proto *prot = drvr->prot; int ret = -1; - if (drvr->bus_if->state == BRCMF_BUS_DOWN) { + if (drvr->busstate == BRCMF_BUS_DOWN) { brcmf_dbg(ERROR, "bus is down. we have nothing to do.\n"); return ret; } - mutex_lock(&drvr->proto_block); + brcmf_os_proto_block(drvr); brcmf_dbg(TRACE, "Enter\n"); @@ -340,7 +338,7 @@ brcmf_proto_dcmd(struct brcmf_pub *drvr, int ifidx, struct brcmf_dcmd *dcmd, prot->pending = false; done: - mutex_unlock(&drvr->proto_block); + brcmf_os_proto_unblock(drvr); return ret; } @@ -374,16 +372,14 @@ void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx, h->priority = (pktbuf->priority & BDC_PRIORITY_MASK); h->flags2 = 0; - h->data_offset = 0; + h->rssi = 0; BDC_SET_IF_IDX(h, ifidx); } -int brcmf_proto_hdrpull(struct device *dev, int *ifidx, +int brcmf_proto_hdrpull(struct brcmf_pub *drvr, int *ifidx, struct sk_buff *pktbuf) { struct brcmf_proto_bdc_header *h; - struct brcmf_bus *bus_if = dev_get_drvdata(dev); - struct brcmf_pub *drvr = bus_if->drvr; brcmf_dbg(TRACE, "Enter\n"); @@ -439,7 +435,7 @@ int brcmf_proto_attach(struct brcmf_pub *drvr) drvr->prot = cdc; drvr->hdrlen += BDC_HEADER_LEN; - drvr->bus_if->maxctl = BRCMF_DCMD_MAXLEN + + drvr->maxctl = BRCMF_DCMD_MAXLEN + sizeof(struct brcmf_proto_cdc_dcmd) + ROUND_UP_MARGIN; return 0; @@ -455,6 +451,18 @@ void brcmf_proto_detach(struct brcmf_pub *drvr) drvr->prot = NULL; } +void brcmf_proto_dstats(struct brcmf_pub *drvr) +{ + /* No stats from dongle added yet, copy bus stats */ + drvr->dstats.tx_packets = drvr->tx_packets; + drvr->dstats.tx_errors = drvr->tx_errors; + drvr->dstats.rx_packets = drvr->rx_packets; + drvr->dstats.rx_errors = drvr->rx_errors; + drvr->dstats.rx_dropped = drvr->rx_dropped; + drvr->dstats.multicast = drvr->rx_multicast; + return; +} + int brcmf_proto_init(struct brcmf_pub *drvr) { int ret = 0; @@ -462,19 +470,19 @@ int brcmf_proto_init(struct brcmf_pub *drvr) brcmf_dbg(TRACE, "Enter\n"); - mutex_lock(&drvr->proto_block); + brcmf_os_proto_block(drvr); /* Get the device MAC address */ strcpy(buf, "cur_etheraddr"); ret = brcmf_proto_cdc_query_dcmd(drvr, 0, BRCMF_C_GET_VAR, buf, sizeof(buf)); if (ret < 0) { - mutex_unlock(&drvr->proto_block); + brcmf_os_proto_unblock(drvr); return ret; } memcpy(drvr->mac, buf, ETH_ALEN); - mutex_unlock(&drvr->proto_block); + brcmf_os_proto_unblock(drvr); ret = brcmf_c_preinit_dcmds(drvr); diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c index a51d8f5d36fc..891826197f96 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c @@ -32,6 +32,8 @@ #define PKTFILTER_BUF_SIZE 2048 #define BRCMF_ARPOL_MODE 0xb /* agent|snoop|peer_autoreply */ +int brcmf_msg_level; + #define MSGTRACE_VERSION 1 #define BRCMF_PKT_FILTER_FIXED_LEN offsetof(struct brcmf_pkt_filter_le, u) @@ -83,14 +85,25 @@ brcmf_c_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen) return len; } -bool brcmf_c_prec_enq(struct device *dev, struct pktq *q, +void brcmf_c_init(void) +{ + /* Init global variables at run-time, not as part of the declaration. + * This is required to support init/de-init of the driver. + * Initialization + * of globals as part of the declaration results in non-deterministic + * behaviour since the value of the globals may be different on the + * first time that the driver is initialized vs subsequent + * initializations. + */ + brcmf_msg_level = BRCMF_ERROR_VAL; +} + +bool brcmf_c_prec_enq(struct brcmf_pub *drvr, struct pktq *q, struct sk_buff *pkt, int prec) { struct sk_buff *p; int eprec = -1; /* precedence to evict from */ bool discard_oldest; - struct brcmf_bus *bus_if = dev_get_drvdata(dev); - struct brcmf_pub *drvr = bus_if->drvr; /* Fast case, precedence queue is not full and we are also not * exceeding total queue length @@ -433,7 +446,7 @@ brcmf_c_show_host_event(struct brcmf_event_msg *event, void *event_data) #endif /* BCMDBG */ int -brcmf_c_host_event(struct brcmf_pub *drvr, int *ifidx, void *pktdata, +brcmf_c_host_event(struct brcmf_info *drvr_priv, int *ifidx, void *pktdata, struct brcmf_event_msg *event, void **data_ptr) { /* check whether packet is a BRCM event pkt */ @@ -475,18 +488,19 @@ brcmf_c_host_event(struct brcmf_pub *drvr, int *ifidx, void *pktdata, if (ifevent->ifidx > 0 && ifevent->ifidx < BRCMF_MAX_IFS) { if (ifevent->action == BRCMF_E_IF_ADD) - brcmf_add_if(drvr->dev, ifevent->ifidx, + brcmf_add_if(drvr_priv, ifevent->ifidx, NULL, event->ifname, - pvt_data->eth.h_dest); + pvt_data->eth.h_dest, + ifevent->flags, ifevent->bssidx); else - brcmf_del_if(drvr, ifevent->ifidx); + brcmf_del_if(drvr_priv, ifevent->ifidx); } else { brcmf_dbg(ERROR, "Invalid ifidx %d for %s\n", ifevent->ifidx, event->ifname); } /* send up the if event: btamp user needs it */ - *ifidx = brcmf_ifname2idx(drvr, event->ifname); + *ifidx = brcmf_ifname2idx(drvr_priv, event->ifname); break; /* These are what external supplicant/authenticator wants */ @@ -498,7 +512,7 @@ brcmf_c_host_event(struct brcmf_pub *drvr, int *ifidx, void *pktdata, default: /* Fall through: this should get _everything_ */ - *ifidx = brcmf_ifname2idx(drvr, event->ifname); + *ifidx = brcmf_ifname2idx(drvr_priv, event->ifname); brcmf_dbg(TRACE, "MAC event %d, flags %x, status %x\n", type, flags, status); @@ -798,7 +812,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr) "event_msgs" + '\0' + bitvec */ uint up = 0; char buf[128], *ptr; - u32 dongle_align = drvr->bus_if->align; + u32 dongle_align = BRCMF_SDALIGN; u32 glom = 0; u32 roaming = 1; uint bcn_timeout = 3; @@ -806,7 +820,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr) int scan_unassoc_time = 40; int i; - mutex_lock(&drvr->proto_block); + brcmf_os_proto_block(drvr); /* Set Country code */ if (drvr->country_code[0] != 0) { @@ -875,7 +889,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr) 0, true); } - mutex_unlock(&drvr->proto_block); + brcmf_os_proto_unblock(drvr); return 0; } diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h index bb26ee36bc68..7467922f0536 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h +++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h @@ -17,21 +17,6 @@ #ifndef _BRCMF_DBG_H_ #define _BRCMF_DBG_H_ -/* message levels */ -#define BRCMF_ERROR_VAL 0x0001 -#define BRCMF_TRACE_VAL 0x0002 -#define BRCMF_INFO_VAL 0x0004 -#define BRCMF_DATA_VAL 0x0008 -#define BRCMF_CTL_VAL 0x0010 -#define BRCMF_TIMER_VAL 0x0020 -#define BRCMF_HDRS_VAL 0x0040 -#define BRCMF_BYTES_VAL 0x0080 -#define BRCMF_INTR_VAL 0x0100 -#define BRCMF_GLOM_VAL 0x0400 -#define BRCMF_EVENT_VAL 0x0800 -#define BRCMF_BTA_VAL 0x1000 -#define BRCMF_ISCAN_VAL 0x2000 - #if defined(BCMDBG) #define brcmf_dbg(level, fmt, ...) \ diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c index eb9eb766ac27..4acbac5a74c6 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c @@ -43,6 +43,7 @@ #include "dhd_proto.h" #include "dhd_dbg.h" #include "wl_cfg80211.h" +#include "bcmchip.h" MODULE_AUTHOR("Broadcom Corporation"); MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN fullmac driver."); @@ -52,19 +53,48 @@ MODULE_LICENSE("Dual BSD/GPL"); /* Interface control information */ struct brcmf_if { - struct brcmf_pub *drvr; /* back pointer to brcmf_pub */ + struct brcmf_info *info; /* back pointer to brcmf_info */ /* OS/stack specifics */ struct net_device *ndev; struct net_device_stats stats; int idx; /* iface idx in dongle */ + int state; /* interface state */ u8 mac_addr[ETH_ALEN]; /* assigned MAC address */ }; +/* Local private structure (extension of pub) */ +struct brcmf_info { + struct brcmf_pub pub; + + /* OS/stack specifics */ + struct brcmf_if *iflist[BRCMF_MAX_IFS]; + + struct mutex proto_block; + + struct work_struct setmacaddr_work; + struct work_struct multicast_work; + u8 macvalue[ETH_ALEN]; + atomic_t pend_8021x_cnt; +}; + /* Error bits */ -int brcmf_msg_level = BRCMF_ERROR_VAL; module_param(brcmf_msg_level, int, 0); -int brcmf_ifname2idx(struct brcmf_pub *drvr, char *name) + +static int brcmf_net2idx(struct brcmf_info *drvr_priv, struct net_device *ndev) +{ + int i = 0; + + while (i < BRCMF_MAX_IFS) { + if (drvr_priv->iflist[i] && drvr_priv->iflist[i]->ndev == ndev) + return i; + i++; + } + + return BRCMF_BAD_IF; +} + +int brcmf_ifname2idx(struct brcmf_info *drvr_priv, char *name) { int i = BRCMF_MAX_IFS; struct brcmf_if *ifp; @@ -73,7 +103,7 @@ int brcmf_ifname2idx(struct brcmf_pub *drvr, char *name) return 0; while (--i > 0) { - ifp = drvr->iflist[i]; + ifp = drvr_priv->iflist[i]; if (ifp && !strncmp(ifp->ndev->name, name, IFNAMSIZ)) break; } @@ -85,18 +115,20 @@ int brcmf_ifname2idx(struct brcmf_pub *drvr, char *name) char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx) { + struct brcmf_info *drvr_priv = drvr->info; + if (ifidx < 0 || ifidx >= BRCMF_MAX_IFS) { brcmf_dbg(ERROR, "ifidx %d out of range\n", ifidx); return ""; } - if (drvr->iflist[ifidx] == NULL) { + if (drvr_priv->iflist[ifidx] == NULL) { brcmf_dbg(ERROR, "null i/f %d\n", ifidx); return ""; } - if (drvr->iflist[ifidx]->ndev) - return drvr->iflist[ifidx]->ndev->name; + if (drvr_priv->iflist[ifidx]->ndev) + return drvr_priv->iflist[ifidx]->ndev->name; return ""; } @@ -114,10 +146,10 @@ static void _brcmf_set_multicast_list(struct work_struct *work) uint buflen; int ret; - struct brcmf_pub *drvr = container_of(work, struct brcmf_pub, + struct brcmf_info *drvr_priv = container_of(work, struct brcmf_info, multicast_work); - ndev = drvr->iflist[0]->ndev; + ndev = drvr_priv->iflist[0]->ndev; cnt = netdev_mc_count(ndev); /* Determine initial value of allmulti flag */ @@ -151,10 +183,10 @@ static void _brcmf_set_multicast_list(struct work_struct *work) dcmd.len = buflen; dcmd.set = true; - ret = brcmf_proto_dcmd(drvr, 0, &dcmd, dcmd.len); + ret = brcmf_proto_dcmd(&drvr_priv->pub, 0, &dcmd, dcmd.len); if (ret < 0) { brcmf_dbg(ERROR, "%s: set mcast_list failed, cnt %d\n", - brcmf_ifname(drvr, 0), cnt); + brcmf_ifname(&drvr_priv->pub, 0), cnt); dcmd_value = cnt ? true : dcmd_value; } @@ -176,7 +208,7 @@ static void _brcmf_set_multicast_list(struct work_struct *work) ("allmulti", (void *)&dcmd_le_value, sizeof(dcmd_le_value), buf, buflen)) { brcmf_dbg(ERROR, "%s: mkiovar failed for allmulti, datalen %d buflen %u\n", - brcmf_ifname(drvr, 0), + brcmf_ifname(&drvr_priv->pub, 0), (int)sizeof(dcmd_value), buflen); kfree(buf); return; @@ -188,10 +220,10 @@ static void _brcmf_set_multicast_list(struct work_struct *work) dcmd.len = buflen; dcmd.set = true; - ret = brcmf_proto_dcmd(drvr, 0, &dcmd, dcmd.len); + ret = brcmf_proto_dcmd(&drvr_priv->pub, 0, &dcmd, dcmd.len); if (ret < 0) { brcmf_dbg(ERROR, "%s: set allmulti %d failed\n", - brcmf_ifname(drvr, 0), + brcmf_ifname(&drvr_priv->pub, 0), le32_to_cpu(dcmd_le_value)); } @@ -209,10 +241,10 @@ static void _brcmf_set_multicast_list(struct work_struct *work) dcmd.len = sizeof(dcmd_le_value); dcmd.set = true; - ret = brcmf_proto_dcmd(drvr, 0, &dcmd, dcmd.len); + ret = brcmf_proto_dcmd(&drvr_priv->pub, 0, &dcmd, dcmd.len); if (ret < 0) { brcmf_dbg(ERROR, "%s: set promisc %d failed\n", - brcmf_ifname(drvr, 0), + brcmf_ifname(&drvr_priv->pub, 0), le32_to_cpu(dcmd_le_value)); } } @@ -224,14 +256,14 @@ _brcmf_set_mac_address(struct work_struct *work) struct brcmf_dcmd dcmd; int ret; - struct brcmf_pub *drvr = container_of(work, struct brcmf_pub, + struct brcmf_info *drvr_priv = container_of(work, struct brcmf_info, setmacaddr_work); brcmf_dbg(TRACE, "enter\n"); - if (!brcmf_c_mkiovar("cur_etheraddr", (char *)drvr->macvalue, + if (!brcmf_c_mkiovar("cur_etheraddr", (char *)drvr_priv->macvalue, ETH_ALEN, buf, 32)) { brcmf_dbg(ERROR, "%s: mkiovar failed for cur_etheraddr\n", - brcmf_ifname(drvr, 0)); + brcmf_ifname(&drvr_priv->pub, 0)); return; } memset(&dcmd, 0, sizeof(dcmd)); @@ -240,40 +272,52 @@ _brcmf_set_mac_address(struct work_struct *work) dcmd.len = 32; dcmd.set = true; - ret = brcmf_proto_dcmd(drvr, 0, &dcmd, dcmd.len); + ret = brcmf_proto_dcmd(&drvr_priv->pub, 0, &dcmd, dcmd.len); if (ret < 0) brcmf_dbg(ERROR, "%s: set cur_etheraddr failed\n", - brcmf_ifname(drvr, 0)); + brcmf_ifname(&drvr_priv->pub, 0)); else - memcpy(drvr->iflist[0]->ndev->dev_addr, - drvr->macvalue, ETH_ALEN); + memcpy(drvr_priv->iflist[0]->ndev->dev_addr, + drvr_priv->macvalue, ETH_ALEN); return; } static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr) { - struct brcmf_if *ifp = netdev_priv(ndev); - struct brcmf_pub *drvr = ifp->drvr; + struct brcmf_info *drvr_priv = *(struct brcmf_info **) + netdev_priv(ndev); struct sockaddr *sa = (struct sockaddr *)addr; + int ifidx; + + ifidx = brcmf_net2idx(drvr_priv, ndev); + if (ifidx == BRCMF_BAD_IF) + return -1; - memcpy(&drvr->macvalue, sa->sa_data, ETH_ALEN); - schedule_work(&drvr->setmacaddr_work); + memcpy(&drvr_priv->macvalue, sa->sa_data, ETH_ALEN); + schedule_work(&drvr_priv->setmacaddr_work); return 0; } static void brcmf_netdev_set_multicast_list(struct net_device *ndev) { - struct brcmf_if *ifp = netdev_priv(ndev); - struct brcmf_pub *drvr = ifp->drvr; + struct brcmf_info *drvr_priv = *(struct brcmf_info **) + netdev_priv(ndev); + int ifidx; + + ifidx = brcmf_net2idx(drvr_priv, ndev); + if (ifidx == BRCMF_BAD_IF) + return; - schedule_work(&drvr->multicast_work); + schedule_work(&drvr_priv->multicast_work); } int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx, struct sk_buff *pktbuf) { + struct brcmf_info *drvr_priv = drvr->info; + /* Reject if down */ - if (!drvr->bus_if->drvr_up || (drvr->bus_if->state == BRCMF_BUS_DOWN)) + if (!drvr->up || (drvr->busstate == BRCMF_BUS_DOWN)) return -ENODEV; /* Update multicast statistic */ @@ -284,118 +328,122 @@ int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx, struct sk_buff *pktbuf) if (is_multicast_ether_addr(eh->h_dest)) drvr->tx_multicast++; if (ntohs(eh->h_proto) == ETH_P_PAE) - atomic_inc(&drvr->pend_8021x_cnt); + atomic_inc(&drvr_priv->pend_8021x_cnt); } /* If the protocol uses a data header, apply it */ brcmf_proto_hdrpush(drvr, ifidx, pktbuf); /* Use bus module to send data frame */ - return drvr->bus_if->brcmf_bus_txdata(drvr->dev, pktbuf); + return brcmf_sdbrcm_bus_txdata(drvr->bus, pktbuf); } static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev) { int ret; - struct brcmf_if *ifp = netdev_priv(ndev); - struct brcmf_pub *drvr = ifp->drvr; + struct brcmf_info *drvr_priv = *(struct brcmf_info **) + netdev_priv(ndev); + int ifidx; brcmf_dbg(TRACE, "Enter\n"); /* Reject if down */ - if (!drvr->bus_if->drvr_up || - (drvr->bus_if->state == BRCMF_BUS_DOWN)) { - brcmf_dbg(ERROR, "xmit rejected drvup=%d state=%d\n", - drvr->bus_if->drvr_up, - drvr->bus_if->state); + if (!drvr_priv->pub.up || (drvr_priv->pub.busstate == BRCMF_BUS_DOWN)) { + brcmf_dbg(ERROR, "xmit rejected pub.up=%d busstate=%d\n", + drvr_priv->pub.up, drvr_priv->pub.busstate); netif_stop_queue(ndev); return -ENODEV; } - if (!drvr->iflist[ifp->idx]) { - brcmf_dbg(ERROR, "bad ifidx %d\n", ifp->idx); + ifidx = brcmf_net2idx(drvr_priv, ndev); + if (ifidx == BRCMF_BAD_IF) { + brcmf_dbg(ERROR, "bad ifidx %d\n", ifidx); netif_stop_queue(ndev); return -ENODEV; } /* Make sure there's enough room for any header */ - if (skb_headroom(skb) < drvr->hdrlen) { + if (skb_headroom(skb) < drvr_priv->pub.hdrlen) { struct sk_buff *skb2; brcmf_dbg(INFO, "%s: insufficient headroom\n", - brcmf_ifname(drvr, ifp->idx)); - drvr->bus_if->tx_realloc++; - skb2 = skb_realloc_headroom(skb, drvr->hdrlen); + brcmf_ifname(&drvr_priv->pub, ifidx)); + drvr_priv->pub.tx_realloc++; + skb2 = skb_realloc_headroom(skb, drvr_priv->pub.hdrlen); dev_kfree_skb(skb); skb = skb2; if (skb == NULL) { brcmf_dbg(ERROR, "%s: skb_realloc_headroom failed\n", - brcmf_ifname(drvr, ifp->idx)); + brcmf_ifname(&drvr_priv->pub, ifidx)); ret = -ENOMEM; goto done; } } - ret = brcmf_sendpkt(drvr, ifp->idx, skb); + ret = brcmf_sendpkt(&drvr_priv->pub, ifidx, skb); done: if (ret) - drvr->bus_if->dstats.tx_dropped++; + drvr_priv->pub.dstats.tx_dropped++; else - drvr->bus_if->dstats.tx_packets++; + drvr_priv->pub.tx_packets++; /* Return ok: we always eat the packet */ return 0; } -void brcmf_txflowcontrol(struct device *dev, int ifidx, bool state) +void brcmf_txflowcontrol(struct brcmf_pub *drvr, int ifidx, bool state) { struct net_device *ndev; - struct brcmf_bus *bus_if = dev_get_drvdata(dev); - struct brcmf_pub *drvr = bus_if->drvr; + struct brcmf_info *drvr_priv = drvr->info; brcmf_dbg(TRACE, "Enter\n"); - ndev = drvr->iflist[ifidx]->ndev; + drvr->txoff = state; + ndev = drvr_priv->iflist[ifidx]->ndev; if (state == ON) netif_stop_queue(ndev); else netif_wake_queue(ndev); } -static int brcmf_host_event(struct brcmf_pub *drvr, int *ifidx, +static int brcmf_host_event(struct brcmf_info *drvr_priv, int *ifidx, void *pktdata, struct brcmf_event_msg *event, void **data) { int bcmerror = 0; - bcmerror = brcmf_c_host_event(drvr, ifidx, pktdata, event, data); + bcmerror = brcmf_c_host_event(drvr_priv, ifidx, pktdata, event, data); if (bcmerror != 0) return bcmerror; - if (drvr->iflist[*ifidx]->ndev) - brcmf_cfg80211_event(drvr->iflist[*ifidx]->ndev, + if (drvr_priv->iflist[*ifidx]->ndev) + brcmf_cfg80211_event(drvr_priv->iflist[*ifidx]->ndev, event, *data); return bcmerror; } -void brcmf_rx_frame(struct device *dev, int ifidx, - struct sk_buff_head *skb_list) +void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx, struct sk_buff *skb, + int numpkt) { + struct brcmf_info *drvr_priv = drvr->info; unsigned char *eth; uint len; void *data; - struct sk_buff *skb, *pnext; + struct sk_buff *pnext, *save_pktbuf; + int i; struct brcmf_if *ifp; struct brcmf_event_msg event; - struct brcmf_bus *bus_if = dev_get_drvdata(dev); - struct brcmf_pub *drvr = bus_if->drvr; brcmf_dbg(TRACE, "Enter\n"); - skb_queue_walk_safe(skb_list, skb, pnext) { - skb_unlink(skb, skb_list); + save_pktbuf = skb; + + for (i = 0; skb && i < numpkt; i++, skb = pnext) { + + pnext = skb->next; + skb->next = NULL; /* Get the protocol, maintain skb around eth_type_trans() * The main reason for this hack is for the limitation of @@ -412,21 +460,15 @@ void brcmf_rx_frame(struct device *dev, int ifidx, eth = skb->data; len = skb->len; - ifp = drvr->iflist[ifidx]; + ifp = drvr_priv->iflist[ifidx]; if (ifp == NULL) - ifp = drvr->iflist[0]; - - if (!ifp || !ifp->ndev || - ifp->ndev->reg_state != NETREG_REGISTERED) { - brcmu_pkt_buf_free_skb(skb); - continue; - } + ifp = drvr_priv->iflist[0]; skb->dev = ifp->ndev; skb->protocol = eth_type_trans(skb, skb->dev); if (skb->pkt_type == PACKET_MULTICAST) - bus_if->dstats.multicast++; + drvr_priv->pub.rx_multicast++; skb->data = eth; skb->len = len; @@ -436,17 +478,19 @@ void brcmf_rx_frame(struct device *dev, int ifidx, /* Process special event packets and then discard them */ if (ntohs(skb->protocol) == ETH_P_LINK_CTL) - brcmf_host_event(drvr, &ifidx, + brcmf_host_event(drvr_priv, &ifidx, skb_mac_header(skb), &event, &data); - if (drvr->iflist[ifidx]) { - ifp = drvr->iflist[ifidx]; + if (drvr_priv->iflist[ifidx] && + !drvr_priv->iflist[ifidx]->state) + ifp = drvr_priv->iflist[ifidx]; + + if (ifp->ndev) ifp->ndev->last_rx = jiffies; - } - bus_if->dstats.rx_bytes += skb->len; - bus_if->dstats.rx_packets++; /* Local count */ + drvr->dstats.rx_bytes += skb->len; + drvr->rx_packets++; /* Local count */ if (in_interrupt()) netif_rx(skb); @@ -461,48 +505,59 @@ void brcmf_rx_frame(struct device *dev, int ifidx, } } -void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success) +void brcmf_txcomplete(struct brcmf_pub *drvr, struct sk_buff *txp, bool success) { uint ifidx; + struct brcmf_info *drvr_priv = drvr->info; struct ethhdr *eh; u16 type; - struct brcmf_bus *bus_if = dev_get_drvdata(dev); - struct brcmf_pub *drvr = bus_if->drvr; - brcmf_proto_hdrpull(dev, &ifidx, txp); + brcmf_proto_hdrpull(drvr, &ifidx, txp); eh = (struct ethhdr *)(txp->data); type = ntohs(eh->h_proto); if (type == ETH_P_PAE) - atomic_dec(&drvr->pend_8021x_cnt); + atomic_dec(&drvr_priv->pend_8021x_cnt); } static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev) { - struct brcmf_if *ifp = netdev_priv(ndev); - struct brcmf_bus *bus_if = ifp->drvr->bus_if; + struct brcmf_info *drvr_priv = *(struct brcmf_info **) + netdev_priv(ndev); + struct brcmf_if *ifp; + int ifidx; brcmf_dbg(TRACE, "Enter\n"); + ifidx = brcmf_net2idx(drvr_priv, ndev); + if (ifidx == BRCMF_BAD_IF) + return NULL; + + ifp = drvr_priv->iflist[ifidx]; + + if (drvr_priv->pub.up) + /* Use the protocol to get dongle stats */ + brcmf_proto_dstats(&drvr_priv->pub); + /* Copy dongle stats to net device stats */ - ifp->stats.rx_packets = bus_if->dstats.rx_packets; - ifp->stats.tx_packets = bus_if->dstats.tx_packets; - ifp->stats.rx_bytes = bus_if->dstats.rx_bytes; - ifp->stats.tx_bytes = bus_if->dstats.tx_bytes; - ifp->stats.rx_errors = bus_if->dstats.rx_errors; - ifp->stats.tx_errors = bus_if->dstats.tx_errors; - ifp->stats.rx_dropped = bus_if->dstats.rx_dropped; - ifp->stats.tx_dropped = bus_if->dstats.tx_dropped; - ifp->stats.multicast = bus_if->dstats.multicast; + ifp->stats.rx_packets = drvr_priv->pub.dstats.rx_packets; + ifp->stats.tx_packets = drvr_priv->pub.dstats.tx_packets; + ifp->stats.rx_bytes = drvr_priv->pub.dstats.rx_bytes; + ifp->stats.tx_bytes = drvr_priv->pub.dstats.tx_bytes; + ifp->stats.rx_errors = drvr_priv->pub.dstats.rx_errors; + ifp->stats.tx_errors = drvr_priv->pub.dstats.tx_errors; + ifp->stats.rx_dropped = drvr_priv->pub.dstats.rx_dropped; + ifp->stats.tx_dropped = drvr_priv->pub.dstats.tx_dropped; + ifp->stats.multicast = drvr_priv->pub.dstats.multicast; return &ifp->stats; } /* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */ -static int brcmf_toe_get(struct brcmf_pub *drvr, int ifidx, u32 *toe_ol) +static int brcmf_toe_get(struct brcmf_info *drvr_priv, int ifidx, u32 *toe_ol) { struct brcmf_dcmd dcmd; __le32 toe_le; @@ -517,17 +572,17 @@ static int brcmf_toe_get(struct brcmf_pub *drvr, int ifidx, u32 *toe_ol) dcmd.set = false; strcpy(buf, "toe_ol"); - ret = brcmf_proto_dcmd(drvr, ifidx, &dcmd, dcmd.len); + ret = brcmf_proto_dcmd(&drvr_priv->pub, ifidx, &dcmd, dcmd.len); if (ret < 0) { /* Check for older dongle image that doesn't support toe_ol */ if (ret == -EIO) { brcmf_dbg(ERROR, "%s: toe not supported by device\n", - brcmf_ifname(drvr, ifidx)); + brcmf_ifname(&drvr_priv->pub, ifidx)); return -EOPNOTSUPP; } brcmf_dbg(INFO, "%s: could not get toe_ol: ret=%d\n", - brcmf_ifname(drvr, ifidx), ret); + brcmf_ifname(&drvr_priv->pub, ifidx), ret); return ret; } @@ -538,7 +593,7 @@ static int brcmf_toe_get(struct brcmf_pub *drvr, int ifidx, u32 *toe_ol) /* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */ -static int brcmf_toe_set(struct brcmf_pub *drvr, int ifidx, u32 toe_ol) +static int brcmf_toe_set(struct brcmf_info *drvr_priv, int ifidx, u32 toe_ol) { struct brcmf_dcmd dcmd; char buf[32]; @@ -556,10 +611,10 @@ static int brcmf_toe_set(struct brcmf_pub *drvr, int ifidx, u32 toe_ol) strcpy(buf, "toe_ol"); memcpy(&buf[sizeof("toe_ol")], &toe_le, sizeof(u32)); - ret = brcmf_proto_dcmd(drvr, ifidx, &dcmd, dcmd.len); + ret = brcmf_proto_dcmd(&drvr_priv->pub, ifidx, &dcmd, dcmd.len); if (ret < 0) { brcmf_dbg(ERROR, "%s: could not set toe_ol: ret=%d\n", - brcmf_ifname(drvr, ifidx), ret); + brcmf_ifname(&drvr_priv->pub, ifidx), ret); return ret; } @@ -569,10 +624,10 @@ static int brcmf_toe_set(struct brcmf_pub *drvr, int ifidx, u32 toe_ol) strcpy(buf, "toe"); memcpy(&buf[sizeof("toe")], &toe_le, sizeof(u32)); - ret = brcmf_proto_dcmd(drvr, ifidx, &dcmd, dcmd.len); + ret = brcmf_proto_dcmd(&drvr_priv->pub, ifidx, &dcmd, dcmd.len); if (ret < 0) { brcmf_dbg(ERROR, "%s: could not set toe: ret=%d\n", - brcmf_ifname(drvr, ifidx), ret); + brcmf_ifname(&drvr_priv->pub, ifidx), ret); return ret; } @@ -582,19 +637,21 @@ static int brcmf_toe_set(struct brcmf_pub *drvr, int ifidx, u32 toe_ol) static void brcmf_ethtool_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { - struct brcmf_if *ifp = netdev_priv(ndev); - struct brcmf_pub *drvr = ifp->drvr; + struct brcmf_info *drvr_priv = *(struct brcmf_info **) + netdev_priv(ndev); sprintf(info->driver, KBUILD_MODNAME); - sprintf(info->version, "%lu", drvr->drv_version); - sprintf(info->bus_info, "%s", dev_name(drvr->dev)); + sprintf(info->version, "%lu", drvr_priv->pub.drv_version); + sprintf(info->fw_version, "%s", BCM4329_FW_NAME); + sprintf(info->bus_info, "%s", + dev_name(brcmf_bus_get_device(drvr_priv->pub.bus))); } static struct ethtool_ops brcmf_ethtool_ops = { .get_drvinfo = brcmf_ethtool_get_drvinfo }; -static int brcmf_ethtool(struct brcmf_pub *drvr, void __user *uaddr) +static int brcmf_ethtool(struct brcmf_info *drvr_priv, void __user *uaddr) { struct ethtool_drvinfo info; char drvname[sizeof(info.driver)]; @@ -628,18 +685,18 @@ static int brcmf_ethtool(struct brcmf_pub *drvr, void __user *uaddr) } /* otherwise, require dongle to be up */ - else if (!drvr->bus_if->drvr_up) { + else if (!drvr_priv->pub.up) { brcmf_dbg(ERROR, "dongle is not up\n"); return -ENODEV; } /* finally, report dongle driver type */ - else if (drvr->iswl) + else if (drvr_priv->pub.iswl) sprintf(info.driver, "wl"); else sprintf(info.driver, "xx"); - sprintf(info.version, "%lu", drvr->drv_version); + sprintf(info.version, "%lu", drvr_priv->pub.drv_version); if (copy_to_user(uaddr, &info, sizeof(info))) return -EFAULT; brcmf_dbg(CTL, "given %*s, returning %s\n", @@ -649,7 +706,7 @@ static int brcmf_ethtool(struct brcmf_pub *drvr, void __user *uaddr) /* Get toe offload components from dongle */ case ETHTOOL_GRXCSUM: case ETHTOOL_GTXCSUM: - ret = brcmf_toe_get(drvr, 0, &toe_cmpnt); + ret = brcmf_toe_get(drvr_priv, 0, &toe_cmpnt); if (ret < 0) return ret; @@ -670,7 +727,7 @@ static int brcmf_ethtool(struct brcmf_pub *drvr, void __user *uaddr) return -EFAULT; /* Read the current settings, update and write back */ - ret = brcmf_toe_get(drvr, 0, &toe_cmpnt); + ret = brcmf_toe_get(drvr_priv, 0, &toe_cmpnt); if (ret < 0) return ret; @@ -682,17 +739,17 @@ static int brcmf_ethtool(struct brcmf_pub *drvr, void __user *uaddr) else toe_cmpnt &= ~csum_dir; - ret = brcmf_toe_set(drvr, 0, toe_cmpnt); + ret = brcmf_toe_set(drvr_priv, 0, toe_cmpnt); if (ret < 0) return ret; /* If setting TX checksum mode, tell Linux the new mode */ if (cmd == ETHTOOL_STXCSUM) { if (edata.data) - drvr->iflist[0]->ndev->features |= + drvr_priv->iflist[0]->ndev->features |= NETIF_F_IP_CSUM; else - drvr->iflist[0]->ndev->features &= + drvr_priv->iflist[0]->ndev->features &= ~NETIF_F_IP_CSUM; } @@ -708,16 +765,18 @@ static int brcmf_ethtool(struct brcmf_pub *drvr, void __user *uaddr) static int brcmf_netdev_ioctl_entry(struct net_device *ndev, struct ifreq *ifr, int cmd) { - struct brcmf_if *ifp = netdev_priv(ndev); - struct brcmf_pub *drvr = ifp->drvr; + struct brcmf_info *drvr_priv = *(struct brcmf_info **) + netdev_priv(ndev); + int ifidx; - brcmf_dbg(TRACE, "ifidx %d, cmd 0x%04x\n", ifp->idx, cmd); + ifidx = brcmf_net2idx(drvr_priv, ndev); + brcmf_dbg(TRACE, "ifidx %d, cmd 0x%04x\n", ifidx, cmd); - if (!drvr->iflist[ifp->idx]) + if (ifidx == BRCMF_BAD_IF) return -1; if (cmd == SIOCETHTOOL) - return brcmf_ethtool(drvr, ifr->ifr_data); + return brcmf_ethtool(drvr_priv, ifr->ifr_data); return -EOPNOTSUPP; } @@ -729,25 +788,28 @@ s32 brcmf_exec_dcmd(struct net_device *ndev, u32 cmd, void *arg, u32 len) s32 err = 0; int buflen = 0; bool is_set_key_cmd; - struct brcmf_if *ifp = netdev_priv(ndev); - struct brcmf_pub *drvr = ifp->drvr; + struct brcmf_info *drvr_priv = *(struct brcmf_info **) + netdev_priv(ndev); + int ifidx; memset(&dcmd, 0, sizeof(dcmd)); dcmd.cmd = cmd; dcmd.buf = arg; dcmd.len = len; + ifidx = brcmf_net2idx(drvr_priv, ndev); + if (dcmd.buf != NULL) buflen = min_t(uint, dcmd.len, BRCMF_DCMD_MAXLEN); /* send to dongle (must be up, and wl) */ - if ((drvr->bus_if->state != BRCMF_BUS_DATA)) { + if ((drvr_priv->pub.busstate != BRCMF_BUS_DATA)) { brcmf_dbg(ERROR, "DONGLE_DOWN\n"); err = -EIO; goto done; } - if (!drvr->iswl) { + if (!drvr_priv->pub.iswl) { err = -EIO; goto done; } @@ -764,7 +826,7 @@ s32 brcmf_exec_dcmd(struct net_device *ndev, u32 cmd, void *arg, u32 len) if (is_set_key_cmd) brcmf_netdev_wait_pend8021x(ndev); - err = brcmf_proto_dcmd(drvr, ifp->idx, &dcmd, buflen); + err = brcmf_proto_dcmd(&drvr_priv->pub, ifidx, &dcmd, buflen); done: if (err > 0) @@ -775,16 +837,15 @@ s32 brcmf_exec_dcmd(struct net_device *ndev, u32 cmd, void *arg, u32 len) static int brcmf_netdev_stop(struct net_device *ndev) { - struct brcmf_if *ifp = netdev_priv(ndev); - struct brcmf_pub *drvr = ifp->drvr; + struct brcmf_pub *drvr = *(struct brcmf_pub **) netdev_priv(ndev); brcmf_dbg(TRACE, "Enter\n"); brcmf_cfg80211_down(drvr->config); - if (drvr->bus_if->drvr_up == 0) + if (drvr->up == 0) return 0; /* Set state and stop OS transmissions */ - drvr->bus_if->drvr_up = false; + drvr->up = 0; netif_stop_queue(ndev); return 0; @@ -792,37 +853,39 @@ static int brcmf_netdev_stop(struct net_device *ndev) static int brcmf_netdev_open(struct net_device *ndev) { - struct brcmf_if *ifp = netdev_priv(ndev); - struct brcmf_pub *drvr = ifp->drvr; + struct brcmf_info *drvr_priv = *(struct brcmf_info **) + netdev_priv(ndev); u32 toe_ol; + int ifidx = brcmf_net2idx(drvr_priv, ndev); s32 ret = 0; - brcmf_dbg(TRACE, "ifidx %d\n", ifp->idx); + brcmf_dbg(TRACE, "ifidx %d\n", ifidx); + + if (ifidx == 0) { /* do it only for primary eth0 */ - if (ifp->idx == 0) { /* do it only for primary eth0 */ /* try to bring up bus */ - ret = brcmf_bus_start(drvr->dev); + ret = brcmf_bus_start(&drvr_priv->pub); if (ret != 0) { brcmf_dbg(ERROR, "failed with code %d\n", ret); return -1; } - atomic_set(&drvr->pend_8021x_cnt, 0); + atomic_set(&drvr_priv->pend_8021x_cnt, 0); - memcpy(ndev->dev_addr, drvr->mac, ETH_ALEN); + memcpy(ndev->dev_addr, drvr_priv->pub.mac, ETH_ALEN); /* Get current TOE mode from dongle */ - if (brcmf_toe_get(drvr, ifp->idx, &toe_ol) >= 0 + if (brcmf_toe_get(drvr_priv, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0) - drvr->iflist[ifp->idx]->ndev->features |= + drvr_priv->iflist[ifidx]->ndev->features |= NETIF_F_IP_CSUM; else - drvr->iflist[ifp->idx]->ndev->features &= + drvr_priv->iflist[ifidx]->ndev->features &= ~NETIF_F_IP_CSUM; } /* Allow transmit calls */ netif_start_queue(ndev); - drvr->bus_if->drvr_up = true; - if (brcmf_cfg80211_up(drvr->config)) { + drvr_priv->pub.up = 1; + if (brcmf_cfg80211_up(drvr_priv->pub.config)) { brcmf_dbg(ERROR, "failed to bring up cfg80211\n"); return -1; } @@ -830,155 +893,193 @@ static int brcmf_netdev_open(struct net_device *ndev) return ret; } -static const struct net_device_ops brcmf_netdev_ops_pri = { - .ndo_open = brcmf_netdev_open, - .ndo_stop = brcmf_netdev_stop, - .ndo_get_stats = brcmf_netdev_get_stats, - .ndo_do_ioctl = brcmf_netdev_ioctl_entry, - .ndo_start_xmit = brcmf_netdev_start_xmit, - .ndo_set_mac_address = brcmf_netdev_set_mac_address, - .ndo_set_rx_mode = brcmf_netdev_set_multicast_list -}; - int -brcmf_add_if(struct device *dev, int ifidx, char *name, u8 *mac_addr) +brcmf_add_if(struct brcmf_info *drvr_priv, int ifidx, struct net_device *ndev, + char *name, u8 *mac_addr, u32 flags, u8 bssidx) { struct brcmf_if *ifp; - struct net_device *ndev; - struct brcmf_bus *bus_if = dev_get_drvdata(dev); - struct brcmf_pub *drvr = bus_if->drvr; - - brcmf_dbg(TRACE, "idx %d\n", ifidx); + int ret = 0, err = 0; - ifp = drvr->iflist[ifidx]; - /* - * Delete the existing interface before overwriting it - * in case we missed the BRCMF_E_IF_DEL event. - */ - if (ifp) { - brcmf_dbg(ERROR, "ERROR: netdev:%s already exists, try free & unregister\n", - ifp->ndev->name); - netif_stop_queue(ifp->ndev); - unregister_netdev(ifp->ndev); - free_netdev(ifp->ndev); - drvr->iflist[ifidx] = NULL; - } + brcmf_dbg(TRACE, "idx %d, handle->%p\n", ifidx, ndev); - /* Allocate netdev, including space for private structure */ - ndev = alloc_netdev(sizeof(struct brcmf_if), name, ether_setup); - if (!ndev) { - brcmf_dbg(ERROR, "OOM - alloc_netdev\n"); - return -ENOMEM; + ifp = drvr_priv->iflist[ifidx]; + if (!ifp) { + ifp = kmalloc(sizeof(struct brcmf_if), GFP_ATOMIC); + if (!ifp) + return -ENOMEM; } - ifp = netdev_priv(ndev); - ifp->ndev = ndev; - ifp->drvr = drvr; - drvr->iflist[ifidx] = ifp; - ifp->idx = ifidx; + memset(ifp, 0, sizeof(struct brcmf_if)); + ifp->info = drvr_priv; + drvr_priv->iflist[ifidx] = ifp; if (mac_addr != NULL) memcpy(&ifp->mac_addr, mac_addr, ETH_ALEN); - if (brcmf_net_attach(drvr, ifp->idx)) { - brcmf_dbg(ERROR, "brcmf_net_attach failed"); - free_netdev(ifp->ndev); - drvr->iflist[ifidx] = NULL; - return -EOPNOTSUPP; - } + if (ndev == NULL) { + ifp->state = BRCMF_E_IF_ADD; + ifp->idx = ifidx; + /* + * Delete the existing interface before overwriting it + * in case we missed the BRCMF_E_IF_DEL event. + */ + if (ifp->ndev != NULL) { + brcmf_dbg(ERROR, "ERROR: netdev:%s already exists, try free & unregister\n", + ifp->ndev->name); + netif_stop_queue(ifp->ndev); + unregister_netdev(ifp->ndev); + free_netdev(ifp->ndev); + } + + /* Allocate netdev, including space for private structure */ + ifp->ndev = alloc_netdev(sizeof(drvr_priv), "wlan%d", + ether_setup); + if (!ifp->ndev) { + brcmf_dbg(ERROR, "OOM - alloc_netdev\n"); + ret = -ENOMEM; + } - brcmf_dbg(TRACE, " ==== pid:%x, net_device for if:%s created ===\n", - current->pid, ifp->ndev->name); + if (ret == 0) { + memcpy(netdev_priv(ifp->ndev), &drvr_priv, + sizeof(drvr_priv)); + err = brcmf_net_attach(&drvr_priv->pub, ifp->idx); + if (err != 0) { + brcmf_dbg(ERROR, "brcmf_net_attach failed, err %d\n", + err); + ret = -EOPNOTSUPP; + } else { + brcmf_dbg(TRACE, " ==== pid:%x, net_device for if:%s created ===\n", + current->pid, ifp->ndev->name); + ifp->state = 0; + } + } + + if (ret < 0) { + if (ifp->ndev) + free_netdev(ifp->ndev); + + drvr_priv->iflist[ifp->idx] = NULL; + kfree(ifp); + } + } else + ifp->ndev = ndev; return 0; } -void brcmf_del_if(struct brcmf_pub *drvr, int ifidx) +void brcmf_del_if(struct brcmf_info *drvr_priv, int ifidx) { struct brcmf_if *ifp; brcmf_dbg(TRACE, "idx %d\n", ifidx); - ifp = drvr->iflist[ifidx]; + ifp = drvr_priv->iflist[ifidx]; if (!ifp) { brcmf_dbg(ERROR, "Null interface\n"); return; } - if (ifp->ndev) { - if (ifidx == 0) { - if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) { - rtnl_lock(); - brcmf_netdev_stop(ifp->ndev); - rtnl_unlock(); - } - } else { - netif_stop_queue(ifp->ndev); - } + ifp->state = BRCMF_E_IF_DEL; + ifp->idx = ifidx; + if (ifp->ndev != NULL) { + netif_stop_queue(ifp->ndev); unregister_netdev(ifp->ndev); - drvr->iflist[ifidx] = NULL; - if (ifidx == 0) - brcmf_cfg80211_detach(drvr->config); free_netdev(ifp->ndev); + drvr_priv->iflist[ifidx] = NULL; + kfree(ifp); } } -int brcmf_attach(uint bus_hdrlen, struct device *dev) +struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, uint bus_hdrlen) { - struct brcmf_pub *drvr = NULL; - int ret = 0; + struct brcmf_info *drvr_priv = NULL; + struct net_device *ndev; brcmf_dbg(TRACE, "Enter\n"); + /* Allocate netdev, including space for private structure */ + ndev = alloc_netdev(sizeof(drvr_priv), "wlan%d", ether_setup); + if (!ndev) { + brcmf_dbg(ERROR, "OOM - alloc_netdev\n"); + goto fail; + } + /* Allocate primary brcmf_info */ - drvr = kzalloc(sizeof(struct brcmf_pub), GFP_ATOMIC); - if (!drvr) - return -ENOMEM; + drvr_priv = kzalloc(sizeof(struct brcmf_info), GFP_ATOMIC); + if (!drvr_priv) + goto fail; + + /* + * Save the brcmf_info into the priv + */ + memcpy(netdev_priv(ndev), &drvr_priv, sizeof(drvr_priv)); + + if (brcmf_add_if(drvr_priv, 0, ndev, ndev->name, NULL, 0, 0) == + BRCMF_BAD_IF) + goto fail; + + ndev->netdev_ops = NULL; + mutex_init(&drvr_priv->proto_block); - mutex_init(&drvr->proto_block); + /* Link to info module */ + drvr_priv->pub.info = drvr_priv; /* Link to bus module */ - drvr->hdrlen = bus_hdrlen; - drvr->bus_if = dev_get_drvdata(dev); - drvr->bus_if->drvr = drvr; - drvr->dev = dev; + drvr_priv->pub.bus = bus; + drvr_priv->pub.hdrlen = bus_hdrlen; /* Attach and link in the protocol */ - ret = brcmf_proto_attach(drvr); - if (ret != 0) { + if (brcmf_proto_attach(&drvr_priv->pub) != 0) { brcmf_dbg(ERROR, "brcmf_prot_attach failed\n"); goto fail; } - INIT_WORK(&drvr->setmacaddr_work, _brcmf_set_mac_address); - INIT_WORK(&drvr->multicast_work, _brcmf_set_multicast_list); + /* Attach and link in the cfg80211 */ + drvr_priv->pub.config = + brcmf_cfg80211_attach(ndev, + brcmf_bus_get_device(bus), + &drvr_priv->pub); + if (drvr_priv->pub.config == NULL) { + brcmf_dbg(ERROR, "wl_cfg80211_attach failed\n"); + goto fail; + } - return ret; + INIT_WORK(&drvr_priv->setmacaddr_work, _brcmf_set_mac_address); + INIT_WORK(&drvr_priv->multicast_work, _brcmf_set_multicast_list); + + /* + * Save the brcmf_info into the priv + */ + memcpy(netdev_priv(ndev), &drvr_priv, sizeof(drvr_priv)); + + return &drvr_priv->pub; fail: - brcmf_detach(dev); + if (ndev) + free_netdev(ndev); + if (drvr_priv) + brcmf_detach(&drvr_priv->pub); - return ret; + return NULL; } -int brcmf_bus_start(struct device *dev) +int brcmf_bus_start(struct brcmf_pub *drvr) { int ret = -1; + struct brcmf_info *drvr_priv = drvr->info; /* Room for "event_msgs" + '\0' + bitvec */ char iovbuf[BRCMF_EVENTING_MASK_LEN + 12]; - struct brcmf_bus *bus_if = dev_get_drvdata(dev); - struct brcmf_pub *drvr = bus_if->drvr; brcmf_dbg(TRACE, "\n"); /* Bring up the bus */ - ret = bus_if->brcmf_bus_init(dev); + ret = brcmf_sdbrcm_bus_init(&drvr_priv->pub); if (ret != 0) { brcmf_dbg(ERROR, "brcmf_sdbrcm_bus_init failed %d\n", ret); return ret; } /* If bus is not ready, can't come up */ - if (bus_if->state != BRCMF_BUS_DATA) { + if (drvr_priv->pub.busstate != BRCMF_BUS_DATA) { brcmf_dbg(ERROR, "failed bus is not ready\n"); return -ENODEV; } @@ -1015,22 +1116,33 @@ int brcmf_bus_start(struct device *dev) drvr->pktfilter[0] = "100 0 0 0 0x01 0x00"; /* Bus is ready, do any protocol initialization */ - ret = brcmf_proto_init(drvr); + ret = brcmf_proto_init(&drvr_priv->pub); if (ret < 0) return ret; return 0; } +static struct net_device_ops brcmf_netdev_ops_pri = { + .ndo_open = brcmf_netdev_open, + .ndo_stop = brcmf_netdev_stop, + .ndo_get_stats = brcmf_netdev_get_stats, + .ndo_do_ioctl = brcmf_netdev_ioctl_entry, + .ndo_start_xmit = brcmf_netdev_start_xmit, + .ndo_set_mac_address = brcmf_netdev_set_mac_address, + .ndo_set_rx_mode = brcmf_netdev_set_multicast_list +}; + int brcmf_net_attach(struct brcmf_pub *drvr, int ifidx) { + struct brcmf_info *drvr_priv = drvr->info; struct net_device *ndev; u8 temp_addr[ETH_ALEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33}; brcmf_dbg(TRACE, "ifidx %d\n", ifidx); - ndev = drvr->iflist[ifidx]->ndev; + ndev = drvr_priv->iflist[ifidx]->ndev; ndev->netdev_ops = &brcmf_netdev_ops_pri; /* @@ -1038,7 +1150,7 @@ int brcmf_net_attach(struct brcmf_pub *drvr, int ifidx) */ if (ifidx != 0) { /* for virtual interfaces use the primary MAC */ - memcpy(temp_addr, drvr->mac, ETH_ALEN); + memcpy(temp_addr, drvr_priv->pub.mac, ETH_ALEN); } @@ -1049,23 +1161,14 @@ int brcmf_net_attach(struct brcmf_pub *drvr, int ifidx) - Locally Administered address */ } - ndev->hard_header_len = ETH_HLEN + drvr->hdrlen; + ndev->hard_header_len = ETH_HLEN + drvr_priv->pub.hdrlen; ndev->ethtool_ops = &brcmf_ethtool_ops; - drvr->rxsz = ndev->mtu + ndev->hard_header_len + - drvr->hdrlen; + drvr_priv->pub.rxsz = ndev->mtu + ndev->hard_header_len + + drvr_priv->pub.hdrlen; memcpy(ndev->dev_addr, temp_addr, ETH_ALEN); - /* attach to cfg80211 for primary interface */ - if (!ifidx) { - drvr->config = brcmf_cfg80211_attach(ndev, drvr->dev, drvr); - if (drvr->config == NULL) { - brcmf_dbg(ERROR, "wl_cfg80211_attach failed\n"); - goto fail; - } - } - if (register_netdev(ndev) != 0) { brcmf_dbg(ERROR, "couldn't register the net device\n"); goto fail; @@ -1082,57 +1185,127 @@ int brcmf_net_attach(struct brcmf_pub *drvr, int ifidx) static void brcmf_bus_detach(struct brcmf_pub *drvr) { + struct brcmf_info *drvr_priv; + brcmf_dbg(TRACE, "Enter\n"); if (drvr) { - /* Stop the protocol module */ - brcmf_proto_stop(drvr); + drvr_priv = drvr->info; + if (drvr_priv) { + /* Stop the protocol module */ + brcmf_proto_stop(&drvr_priv->pub); - /* Stop the bus module */ - drvr->bus_if->brcmf_bus_stop(drvr->dev); + /* Stop the bus module */ + brcmf_sdbrcm_bus_stop(drvr_priv->pub.bus); + } } } -void brcmf_detach(struct device *dev) +void brcmf_detach(struct brcmf_pub *drvr) { - int i; - struct brcmf_bus *bus_if = dev_get_drvdata(dev); - struct brcmf_pub *drvr = bus_if->drvr; + struct brcmf_info *drvr_priv; brcmf_dbg(TRACE, "Enter\n"); + if (drvr) { + drvr_priv = drvr->info; + if (drvr_priv) { + struct brcmf_if *ifp; + int i; + + for (i = 1; i < BRCMF_MAX_IFS; i++) + if (drvr_priv->iflist[i]) + brcmf_del_if(drvr_priv, i); + + ifp = drvr_priv->iflist[0]; + if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) { + rtnl_lock(); + brcmf_netdev_stop(ifp->ndev); + rtnl_unlock(); + unregister_netdev(ifp->ndev); + } + + cancel_work_sync(&drvr_priv->setmacaddr_work); + cancel_work_sync(&drvr_priv->multicast_work); + + brcmf_bus_detach(drvr); + + if (drvr->prot) + brcmf_proto_detach(drvr); + + brcmf_cfg80211_detach(drvr->config); + + free_netdev(ifp->ndev); + kfree(ifp); + kfree(drvr_priv); + } + } +} + +static void __exit brcmf_module_cleanup(void) +{ + brcmf_dbg(TRACE, "Enter\n"); - /* make sure primary interface removed last */ - for (i = BRCMF_MAX_IFS-1; i > -1; i--) - if (drvr->iflist[i]) - brcmf_del_if(drvr, i); + brcmf_bus_unregister(); +} + +static int __init brcmf_module_init(void) +{ + int error; + + brcmf_dbg(TRACE, "Enter\n"); + + error = brcmf_bus_register(); + + if (error) { + brcmf_dbg(ERROR, "brcmf_bus_register failed\n"); + goto failed; + } + return 0; + +failed: + return -EINVAL; +} + +module_init(brcmf_module_init); +module_exit(brcmf_module_cleanup); - cancel_work_sync(&drvr->setmacaddr_work); - cancel_work_sync(&drvr->multicast_work); +int brcmf_os_proto_block(struct brcmf_pub *drvr) +{ + struct brcmf_info *drvr_priv = drvr->info; + + if (drvr_priv) { + mutex_lock(&drvr_priv->proto_block); + return 1; + } + return 0; +} - brcmf_bus_detach(drvr); +int brcmf_os_proto_unblock(struct brcmf_pub *drvr) +{ + struct brcmf_info *drvr_priv = drvr->info; - if (drvr->prot) - brcmf_proto_detach(drvr); + if (drvr_priv) { + mutex_unlock(&drvr_priv->proto_block); + return 1; + } - bus_if->drvr = NULL; - kfree(drvr); + return 0; } -static int brcmf_get_pend_8021x_cnt(struct brcmf_pub *drvr) +static int brcmf_get_pend_8021x_cnt(struct brcmf_info *drvr_priv) { - return atomic_read(&drvr->pend_8021x_cnt); + return atomic_read(&drvr_priv->pend_8021x_cnt); } #define MAX_WAIT_FOR_8021X_TX 10 int brcmf_netdev_wait_pend8021x(struct net_device *ndev) { - struct brcmf_if *ifp = netdev_priv(ndev); - struct brcmf_pub *drvr = ifp->drvr; + struct brcmf_info *drvr_priv = *(struct brcmf_info **)netdev_priv(ndev); int timeout = 10 * HZ / 1000; int ntimes = MAX_WAIT_FOR_8021X_TX; - int pend = brcmf_get_pend_8021x_cnt(drvr); + int pend = brcmf_get_pend_8021x_cnt(drvr_priv); while (ntimes && pend) { if (pend) { @@ -1141,7 +1314,7 @@ int brcmf_netdev_wait_pend8021x(struct net_device *ndev) set_current_state(TASK_RUNNING); ntimes--; } - pend = brcmf_get_pend_8021x_cnt(drvr); + pend = brcmf_get_pend_8021x_cnt(drvr_priv); } return pend; } diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h index 6bc4425a8b0f..4ee1ea846f6d 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h +++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h @@ -41,10 +41,17 @@ extern void brcmf_proto_stop(struct brcmf_pub *drvr); extern void brcmf_proto_hdrpush(struct brcmf_pub *, int ifidx, struct sk_buff *txp); +/* Remove any protocol-specific data header. */ +extern int brcmf_proto_hdrpull(struct brcmf_pub *, int *ifidx, + struct sk_buff *rxp); + /* Use protocol to issue command to dongle */ extern int brcmf_proto_dcmd(struct brcmf_pub *drvr, int ifidx, struct brcmf_dcmd *dcmd, int len); +/* Update local copy of dongle statistics */ +extern void brcmf_proto_dstats(struct brcmf_pub *drvr); + extern int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr); extern int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c index 5a002a21f108..313b8bf592d1 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c @@ -28,7 +28,6 @@ #include #include #include -#include #include #include #include @@ -36,7 +35,6 @@ #include #include #include "sdio_host.h" -#include "sdio_chip.h" #define DCMD_RESP_TIMEOUT 2000 /* In milli second */ @@ -87,8 +85,11 @@ struct rte_console { #endif /* BCMDBG */ #include +#include "dhd.h" #include "dhd_bus.h" +#include "dhd_proto.h" #include "dhd_dbg.h" +#include #define TXQLEN 2048 /* bulk tx queue length */ #define TXHI (TXQLEN - 256) /* turn on flow control above TXHI */ @@ -133,6 +134,33 @@ struct rte_console { /* Force no backplane reset */ #define SBSDIO_DEVCTL_RST_NOBPRESET 0x20 +/* SBSDIO_FUNC1_CHIPCLKCSR */ + +/* Force ALP request to backplane */ +#define SBSDIO_FORCE_ALP 0x01 +/* Force HT request to backplane */ +#define SBSDIO_FORCE_HT 0x02 +/* Force ILP request to backplane */ +#define SBSDIO_FORCE_ILP 0x04 +/* Make ALP ready (power up xtal) */ +#define SBSDIO_ALP_AVAIL_REQ 0x08 +/* Make HT ready (power up PLL) */ +#define SBSDIO_HT_AVAIL_REQ 0x10 +/* Squelch clock requests from HW */ +#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20 +/* Status: ALP is ready */ +#define SBSDIO_ALP_AVAIL 0x40 +/* Status: HT is ready */ +#define SBSDIO_HT_AVAIL 0x80 + +#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL) +#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS) +#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS) +#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval)) + +#define SBSDIO_CLKAV(regval, alponly) \ + (SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval))) + /* direct(mapped) cis space */ /* MAPPED common CIS address */ @@ -307,16 +335,49 @@ struct rte_console { /* Flags for SDH calls */ #define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED) -#define BRCMFMAC_FW_NAME "brcm/brcmfmac.bin" -#define BRCMFMAC_NV_NAME "brcm/brcmfmac.txt" -MODULE_FIRMWARE(BRCMFMAC_FW_NAME); -MODULE_FIRMWARE(BRCMFMAC_NV_NAME); - -#define BRCMF_IDLE_IMMEDIATE (-1) /* Enter idle immediately */ -#define BRCMF_IDLE_ACTIVE 0 /* Do not request any SD clock change - * when idle - */ -#define BRCMF_IDLE_INTERVAL 1 +/* sbimstate */ +#define SBIM_IBE 0x20000 /* inbanderror */ +#define SBIM_TO 0x40000 /* timeout */ +#define SBIM_BY 0x01800000 /* busy (sonics >= 2.3) */ +#define SBIM_RJ 0x02000000 /* reject (sonics >= 2.3) */ + +/* sbtmstatelow */ + +/* reset */ +#define SBTML_RESET 0x0001 +/* reject field */ +#define SBTML_REJ_MASK 0x0006 +/* reject */ +#define SBTML_REJ 0x0002 +/* temporary reject, for error recovery */ +#define SBTML_TMPREJ 0x0004 + +/* Shift to locate the SI control flags in sbtml */ +#define SBTML_SICF_SHIFT 16 + +/* sbtmstatehigh */ +#define SBTMH_SERR 0x0001 /* serror */ +#define SBTMH_INT 0x0002 /* interrupt */ +#define SBTMH_BUSY 0x0004 /* busy */ +#define SBTMH_TO 0x0020 /* timeout (sonics >= 2.3) */ + +/* Shift to locate the SI status flags in sbtmh */ +#define SBTMH_SISF_SHIFT 16 + +/* sbidlow */ +#define SBIDL_INIT 0x80 /* initiator */ + +/* sbidhigh */ +#define SBIDH_RC_MASK 0x000f /* revision code */ +#define SBIDH_RCE_MASK 0x7000 /* revision code extension field */ +#define SBIDH_RCE_SHIFT 8 +#define SBCOREREV(sbidh) \ + ((((sbidh) & SBIDH_RCE_MASK) >> SBIDH_RCE_SHIFT) | \ + ((sbidh) & SBIDH_RC_MASK)) +#define SBIDH_CC_MASK 0x8ff0 /* core code */ +#define SBIDH_CC_SHIFT 4 +#define SBIDH_VC_MASK 0xffff0000 /* vendor code */ +#define SBIDH_VC_SHIFT 16 /* * Conversion of 802.1D priority to precedence level @@ -327,6 +388,17 @@ static uint prio2prec(u32 prio) (prio^2) : prio; } +/* + * Core reg address translation. + * Both macro's returns a 32 bits byte address on the backplane bus. + */ +#define CORE_CC_REG(base, field) \ + (base + offsetof(struct chipcregs, field)) +#define CORE_BUS_REG(base, field) \ + (base + offsetof(struct sdpcmd_regs, field)) +#define CORE_SB(base, field) \ + (base + SBCONFIGOFF + offsetof(struct sbconfig, field)) + /* core registers */ struct sdpcmd_regs { u32 corecontrol; /* 0x00, rev8 */ @@ -452,8 +524,25 @@ struct sdpcm_shared_le { /* misc chip info needed by some of the routines */ +struct chip_info { + u32 chip; + u32 chiprev; + u32 cccorebase; + u32 ccrev; + u32 cccaps; + u32 buscorebase; /* 32 bits backplane bus address */ + u32 buscorerev; + u32 buscoretype; + u32 ramcorebase; + u32 armcorebase; + u32 pmurev; + u32 ramsize; +}; + /* Private data for SDIO bus interaction */ -struct brcmf_sdio { +struct brcmf_bus { + struct brcmf_pub *drvr; + struct brcmf_sdio_dev *sdiodev; /* sdio device handler */ struct chip_info *ci; /* Chip info struct */ char *vars; /* Variables (from CIS and/or other) */ @@ -485,7 +574,7 @@ struct brcmf_sdio { uint txminmax; struct sk_buff *glomd; /* Packet containing glomming descriptor */ - struct sk_buff_head glom; /* Packet list for glommed superframe */ + struct sk_buff *glom; /* Packet chain for glommed superframe */ uint glomerr; /* Glom packet read errors */ u8 *rxbuf; /* Buffer for receiving control packets */ @@ -548,13 +637,6 @@ struct brcmf_sdio { uint f2rxdata; /* Number of frame data reads */ uint f2txdata; /* Number of f2 frame writes */ uint f1regdata; /* Number of f1 register accesses */ - uint tickcnt; /* Number of watchdog been schedule */ - unsigned long tx_ctlerrs; /* Err of sending ctrl frames */ - unsigned long tx_ctlpkts; /* Ctrl frames sent to dongle */ - unsigned long rx_ctlerrs; /* Err of processing rx ctrl frames */ - unsigned long rx_ctlpkts; /* Ctrl frames processed from dongle */ - unsigned long rx_readahead_cnt; /* Number of packets where header - * read-ahead was used. */ u8 *ctrl_frame_buf; u32 ctrl_frame_len; @@ -575,10 +657,50 @@ struct brcmf_sdio { struct semaphore sdsem; + const char *fw_name; const struct firmware *firmware; + const char *nv_name; u32 fw_ptr; +}; - bool txoff; /* Transmit flow-controlled */ +struct sbconfig { + u32 PAD[2]; + u32 sbipsflag; /* initiator port ocp slave flag */ + u32 PAD[3]; + u32 sbtpsflag; /* target port ocp slave flag */ + u32 PAD[11]; + u32 sbtmerrloga; /* (sonics >= 2.3) */ + u32 PAD; + u32 sbtmerrlog; /* (sonics >= 2.3) */ + u32 PAD[3]; + u32 sbadmatch3; /* address match3 */ + u32 PAD; + u32 sbadmatch2; /* address match2 */ + u32 PAD; + u32 sbadmatch1; /* address match1 */ + u32 PAD[7]; + u32 sbimstate; /* initiator agent state */ + u32 sbintvec; /* interrupt mask */ + u32 sbtmstatelow; /* target state */ + u32 sbtmstatehigh; /* target state */ + u32 sbbwa0; /* bandwidth allocation table0 */ + u32 PAD; + u32 sbimconfiglow; /* initiator configuration */ + u32 sbimconfighigh; /* initiator configuration */ + u32 sbadmatch0; /* address match0 */ + u32 PAD; + u32 sbtmconfiglow; /* target configuration */ + u32 sbtmconfighigh; /* target configuration */ + u32 sbbconfig; /* broadcast configuration */ + u32 PAD; + u32 sbbstate; /* broadcast state */ + u32 PAD[3]; + u32 sbactcnfg; /* activate configuration */ + u32 PAD[3]; + u32 sbflagst; /* current sbflags */ + u32 PAD[3]; + u32 sbidlow; /* identification */ + u32 sbidhigh; /* identification */ }; /* clkstate */ @@ -615,7 +737,7 @@ static void pkt_align(struct sk_buff *p, int len, int align) } /* To check if there's window offered */ -static bool data_ok(struct brcmf_sdio *bus) +static bool data_ok(struct brcmf_bus *bus) { return (u8)(bus->tx_max - bus->tx_seq) != 0 && ((u8)(bus->tx_max - bus->tx_seq) & 0x80) == 0; @@ -626,14 +748,12 @@ static bool data_ok(struct brcmf_sdio *bus) * adresses on the 32 bit backplane bus. */ static void -r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 reg_offset, u32 *retryvar) +r_sdreg32(struct brcmf_bus *bus, u32 *regvar, u32 reg_offset, u32 *retryvar) { - u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV); *retryvar = 0; do { *regvar = brcmf_sdcard_reg_read(bus->sdiodev, - bus->ci->c_inf[idx].base + reg_offset, - sizeof(u32)); + bus->ci->buscorebase + reg_offset, sizeof(u32)); } while (brcmf_sdcard_regfail(bus->sdiodev) && (++(*retryvar) <= retry_limit)); if (*retryvar) { @@ -646,13 +766,12 @@ r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 reg_offset, u32 *retryvar) } static void -w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset, u32 *retryvar) +w_sdreg32(struct brcmf_bus *bus, u32 regval, u32 reg_offset, u32 *retryvar) { - u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV); *retryvar = 0; do { brcmf_sdcard_reg_write(bus->sdiodev, - bus->ci->c_inf[idx].base + reg_offset, + bus->ci->buscorebase + reg_offset, sizeof(u32), regval); } while (brcmf_sdcard_regfail(bus->sdiodev) && (++(*retryvar) <= retry_limit)); @@ -671,14 +790,14 @@ w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset, u32 *retryvar) /* Packet free applicable unconditionally for sdio and sdspi. * Conditional if bufpool was present for gspi bus. */ -static void brcmf_sdbrcm_pktfree2(struct brcmf_sdio *bus, struct sk_buff *pkt) +static void brcmf_sdbrcm_pktfree2(struct brcmf_bus *bus, struct sk_buff *pkt) { if (bus->usebufpool) brcmu_pkt_buf_free_skb(pkt); } /* Turn backplane clock on or off */ -static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok) +static int brcmf_sdbrcm_htclk(struct brcmf_bus *bus, bool on, bool pendok) { int err; u8 clkctl, clkreq, devctl; @@ -693,6 +812,10 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok) clkreq = bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ; + if ((bus->ci->chip == BCM4329_CHIP_ID) + && (bus->ci->chiprev == 0)) + clkreq |= SBSDIO_FORCE_ALP; + brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err); if (err) { @@ -700,6 +823,14 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok) return -EBADE; } + if (pendok && ((bus->ci->buscoretype == PCMCIA_CORE_ID) + && (bus->ci->buscorerev == 9))) { + u32 dummy, retries; + r_sdreg32(bus, &dummy, + offsetof(struct sdpcmd_regs, clockctlstatus), + &retries); + } + /* Check current status */ clkctl = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); @@ -799,7 +930,7 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok) } /* Change idle/active SD state */ -static int brcmf_sdbrcm_sdclk(struct brcmf_sdio *bus, bool on) +static int brcmf_sdbrcm_sdclk(struct brcmf_bus *bus, bool on) { brcmf_dbg(TRACE, "Enter\n"); @@ -812,7 +943,7 @@ static int brcmf_sdbrcm_sdclk(struct brcmf_sdio *bus, bool on) } /* Transition SD and backplane clock readiness */ -static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok) +static int brcmf_sdbrcm_clkctl(struct brcmf_bus *bus, uint target, bool pendok) { #ifdef BCMDBG uint oldstate = bus->clkstate; @@ -868,7 +999,7 @@ static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok) return 0; } -static int brcmf_sdbrcm_bussleep(struct brcmf_sdio *bus, bool sleep) +static int brcmf_sdbrcm_bussleep(struct brcmf_bus *bus, bool sleep) { uint retries = 0; @@ -903,9 +1034,11 @@ static int brcmf_sdbrcm_bussleep(struct brcmf_sdio *bus, bool sleep) SBSDIO_FORCE_HW_CLKREQ_OFF, NULL); /* Isolate the bus */ - brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1, - SBSDIO_DEVICE_CTL, - SBSDIO_DEVCTL_PADS_ISO, NULL); + if (bus->ci->chip != BCM4329_CHIP_ID) { + brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1, + SBSDIO_DEVICE_CTL, + SBSDIO_DEVCTL_PADS_ISO, NULL); + } /* Change state */ bus->sleeping = true; @@ -916,6 +1049,13 @@ static int brcmf_sdbrcm_bussleep(struct brcmf_sdio *bus, bool sleep) brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL); + /* Force pad isolation off if possible + (in case power never toggled) */ + if ((bus->ci->buscoretype == PCMCIA_CORE_ID) + && (bus->ci->buscorerev >= 10)) + brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1, + SBSDIO_DEVICE_CTL, 0, NULL); + /* Make sure the controller has the bus up */ brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false); @@ -940,13 +1080,13 @@ static int brcmf_sdbrcm_bussleep(struct brcmf_sdio *bus, bool sleep) return 0; } -static void bus_wake(struct brcmf_sdio *bus) +static void bus_wake(struct brcmf_bus *bus) { if (bus->sleeping) brcmf_sdbrcm_bussleep(bus, false); } -static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus) +static u32 brcmf_sdbrcm_hostmail(struct brcmf_bus *bus) { u32 intstatus = 0; u32 hmb_data; @@ -1022,7 +1162,7 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus) return intstatus; } -static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx) +static void brcmf_sdbrcm_rxfail(struct brcmf_bus *bus, bool abort, bool rtx) { uint retries = 0; u16 lastrbc; @@ -1079,61 +1219,16 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx) /* If we can't reach the device, signal failure */ if (err || brcmf_sdcard_regfail(bus->sdiodev)) - bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN; -} - -/* copy a buffer into a pkt buffer chain */ -static uint brcmf_sdbrcm_glom_from_buf(struct brcmf_sdio *bus, uint len) -{ - uint n, ret = 0; - struct sk_buff *p; - u8 *buf; - - buf = bus->dataptr; - - /* copy the data */ - skb_queue_walk(&bus->glom, p) { - n = min_t(uint, p->len, len); - memcpy(p->data, buf, n); - buf += n; - len -= n; - ret += n; - if (!len) - break; - } - - return ret; -} - -/* return total length of buffer chain */ -static uint brcmf_sdbrcm_glom_len(struct brcmf_sdio *bus) -{ - struct sk_buff *p; - uint total; - - total = 0; - skb_queue_walk(&bus->glom, p) - total += p->len; - return total; + bus->drvr->busstate = BRCMF_BUS_DOWN; } -static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus) -{ - struct sk_buff *cur, *next; - - skb_queue_walk_safe(&bus->glom, cur, next) { - skb_unlink(cur, &bus->glom); - brcmu_pkt_buf_free_skb(cur); - } -} - -static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) +static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq) { u16 dlen, totlen; u8 *dptr, num = 0; u16 sublen, check; - struct sk_buff *pfirst, *pnext; + struct sk_buff *pfirst, *plast, *pnext, *save_pfirst; int errcode; u8 chan, seq, doff, sfdoff; @@ -1145,12 +1240,11 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) /* If packets, issue read(s) and send up packet chain */ /* Return sequence numbers consumed? */ - brcmf_dbg(TRACE, "start: glomd %p glom %p\n", - bus->glomd, skb_peek(&bus->glom)); + brcmf_dbg(TRACE, "start: glomd %p glom %p\n", bus->glomd, bus->glom); /* If there's a descriptor, generate the packet chain */ if (bus->glomd) { - pfirst = pnext = NULL; + pfirst = plast = pnext = NULL; dlen = (u16) (bus->glomd->len); dptr = bus->glomd->data; if (!dlen || (dlen & 1)) { @@ -1193,7 +1287,12 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) num, sublen); break; } - skb_queue_tail(&bus->glom, pnext); + if (!pfirst) { + pfirst = plast = pnext; + } else { + plast->next = pnext; + plast = pnext; + } /* Adhere to start alignment requirements */ pkt_align(pnext, sublen, BRCMF_SDALIGN); @@ -1209,9 +1308,12 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) brcmf_dbg(GLOM, "glomdesc mismatch: nextlen %d glomdesc %d rxseq %d\n", bus->nextlen, totlen, rxseq); } + bus->glom = pfirst; pfirst = pnext = NULL; } else { - brcmf_sdbrcm_free_glom(bus); + if (pfirst) + brcmu_pkt_buf_free_skb(pfirst); + bus->glom = NULL; num = 0; } @@ -1223,33 +1325,37 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) /* Ok -- either we just generated a packet chain, or had one from before */ - if (!skb_queue_empty(&bus->glom)) { + if (bus->glom) { if (BRCMF_GLOM_ON()) { brcmf_dbg(GLOM, "try superframe read, packet chain:\n"); - skb_queue_walk(&bus->glom, pnext) { + for (pnext = bus->glom; pnext; pnext = pnext->next) { brcmf_dbg(GLOM, " %p: %p len 0x%04x (%d)\n", pnext, (u8 *) (pnext->data), pnext->len, pnext->len); } } - pfirst = skb_peek(&bus->glom); - dlen = (u16) brcmf_sdbrcm_glom_len(bus); + pfirst = bus->glom; + dlen = (u16) brcmu_pkttotlen(pfirst); /* Do an SDIO read for the superframe. Configurable iovar to * read directly into the chained packet, or allocate a large * packet and and copy into the chain. */ if (usechain) { - errcode = brcmf_sdcard_recv_chain(bus->sdiodev, + errcode = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad, - SDIO_FUNC_2, F2SYNC, &bus->glom); + SDIO_FUNC_2, + F2SYNC, (u8 *) pfirst->data, dlen, + pfirst); } else if (bus->dataptr) { errcode = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad, - SDIO_FUNC_2, F2SYNC, - bus->dataptr, dlen); - sublen = (u16) brcmf_sdbrcm_glom_from_buf(bus, dlen); + SDIO_FUNC_2, + F2SYNC, bus->dataptr, dlen, + NULL); + sublen = (u16) brcmu_pktfrombuf(pfirst, 0, dlen, + bus->dataptr); if (sublen != dlen) { brcmf_dbg(ERROR, "FAILED TO COPY, dlen %d sublen %d\n", dlen, sublen); @@ -1267,15 +1373,16 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) if (errcode < 0) { brcmf_dbg(ERROR, "glom read of %d bytes failed: %d\n", dlen, errcode); - bus->sdiodev->bus_if->dstats.rx_errors++; + bus->drvr->rx_errors++; if (bus->glomerr++ < 3) { brcmf_sdbrcm_rxfail(bus, true, true); } else { bus->glomerr = 0; brcmf_sdbrcm_rxfail(bus, true, false); + brcmu_pkt_buf_free_skb(bus->glom); bus->rxglomfail++; - brcmf_sdbrcm_free_glom(bus); + bus->glom = NULL; } return 0; } @@ -1348,14 +1455,10 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) /* Remove superframe header, remember offset */ skb_pull(pfirst, doff); sfdoff = doff; - num = 0; /* Validate all the subframe headers */ - skb_queue_walk(&bus->glom, pnext) { - /* leave when invalid subframe is found */ - if (errcode) - break; - + for (num = 0, pnext = pfirst; pnext && !errcode; + num++, pnext = pnext->next) { dptr = (u8 *) (pnext->data); dlen = (u16) (pnext->len); sublen = get_unaligned_le16(dptr); @@ -1388,8 +1491,6 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) num, doff, sublen, SDPCM_HDRLEN); errcode = -1; } - /* increase the subframe count */ - num++; } if (errcode) { @@ -1402,16 +1503,23 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) } else { bus->glomerr = 0; brcmf_sdbrcm_rxfail(bus, true, false); + brcmu_pkt_buf_free_skb(bus->glom); bus->rxglomfail++; - brcmf_sdbrcm_free_glom(bus); + bus->glom = NULL; } bus->nextlen = 0; return 0; } /* Basic SD framing looks ok - process each packet (header) */ + save_pfirst = pfirst; + bus->glom = NULL; + plast = NULL; + + for (num = 0; pfirst; rxseq++, pfirst = pnext) { + pnext = pfirst->next; + pfirst->next = NULL; - skb_queue_walk_safe(&bus->glom, pfirst, pnext) { dptr = (u8 *) (pfirst->data); sublen = get_unaligned_le16(dptr); chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]); @@ -1431,8 +1539,6 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) bus->rx_badseq++; rxseq = seq; } - rxseq++; - #ifdef BCMDBG if (BRCMF_BYTES_ON() && BRCMF_DATA_ON()) { printk(KERN_DEBUG "Rx Subframe Data:\n"); @@ -1445,22 +1551,36 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) skb_pull(pfirst, doff); if (pfirst->len == 0) { - skb_unlink(pfirst, &bus->glom); brcmu_pkt_buf_free_skb(pfirst); + if (plast) + plast->next = pnext; + else + save_pfirst = pnext; + continue; - } else if (brcmf_proto_hdrpull(bus->sdiodev->dev, - &ifidx, pfirst) != 0) { + } else if (brcmf_proto_hdrpull(bus->drvr, &ifidx, + pfirst) != 0) { brcmf_dbg(ERROR, "rx protocol error\n"); - bus->sdiodev->bus_if->dstats.rx_errors++; - skb_unlink(pfirst, &bus->glom); + bus->drvr->rx_errors++; brcmu_pkt_buf_free_skb(pfirst); + if (plast) + plast->next = pnext; + else + save_pfirst = pnext; + continue; } + /* this packet will go up, link back into + chain and count it */ + pfirst->next = pnext; + plast = pfirst; + num++; + #ifdef BCMDBG if (BRCMF_GLOM_ON()) { brcmf_dbg(GLOM, "subframe %d to stack, %p (%p/%d) nxt/lnk %p/%p\n", - bus->glom.qlen, pfirst, pfirst->data, + num, pfirst, pfirst->data, pfirst->len, pfirst->next, pfirst->prev); print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, @@ -1469,20 +1589,19 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) } #endif /* BCMDBG */ } - /* sent any remaining packets up */ - if (bus->glom.qlen) { + if (num) { up(&bus->sdsem); - brcmf_rx_frame(bus->sdiodev->dev, ifidx, &bus->glom); + brcmf_rx_frame(bus->drvr, ifidx, save_pfirst, num); down(&bus->sdsem); } bus->rxglomframes++; - bus->rxglompkts += bus->glom.qlen; + bus->rxglompkts += num; } return num; } -static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_sdio *bus, uint *condition, +static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_bus *bus, uint *condition, bool *pending) { DECLARE_WAITQUEUE(wait, current); @@ -1504,7 +1623,7 @@ static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_sdio *bus, uint *condition, return timeout; } -static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_sdio *bus) +static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_bus *bus) { if (waitqueue_active(&bus->dcmd_resp_wait)) wake_up_interruptible(&bus->dcmd_resp_wait); @@ -1512,7 +1631,7 @@ static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_sdio *bus) return 0; } static void -brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff) +brcmf_sdbrcm_read_control(struct brcmf_bus *bus, u8 *hdr, uint len, uint doff) { uint rdlen, pad; @@ -1538,7 +1657,7 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff) if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) { pad = bus->blocksize - (rdlen % bus->blocksize); if ((pad <= bus->roundup) && (pad < bus->blocksize) && - ((len + pad) < bus->sdiodev->bus_if->maxctl)) + ((len + pad) < bus->drvr->maxctl)) rdlen += pad; } else if (rdlen % BRCMF_SDALIGN) { rdlen += BRCMF_SDALIGN - (rdlen % BRCMF_SDALIGN); @@ -1549,18 +1668,18 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff) rdlen = roundup(rdlen, ALIGNMENT); /* Drop if the read is too big or it exceeds our maximum */ - if ((rdlen + BRCMF_FIRSTREAD) > bus->sdiodev->bus_if->maxctl) { + if ((rdlen + BRCMF_FIRSTREAD) > bus->drvr->maxctl) { brcmf_dbg(ERROR, "%d-byte control read exceeds %d-byte buffer\n", - rdlen, bus->sdiodev->bus_if->maxctl); - bus->sdiodev->bus_if->dstats.rx_errors++; + rdlen, bus->drvr->maxctl); + bus->drvr->rx_errors++; brcmf_sdbrcm_rxfail(bus, false, false); goto done; } - if ((len - doff) > bus->sdiodev->bus_if->maxctl) { + if ((len - doff) > bus->drvr->maxctl) { brcmf_dbg(ERROR, "%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n", - len, len - doff, bus->sdiodev->bus_if->maxctl); - bus->sdiodev->bus_if->dstats.rx_errors++; + len, len - doff, bus->drvr->maxctl); + bus->drvr->rx_errors++; bus->rx_toolong++; brcmf_sdbrcm_rxfail(bus, false, false); goto done; @@ -1570,7 +1689,8 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff) sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad, SDIO_FUNC_2, - F2SYNC, (bus->rxctl + BRCMF_FIRSTREAD), rdlen); + F2SYNC, (bus->rxctl + BRCMF_FIRSTREAD), rdlen, + NULL); bus->f2rxdata++; /* Control frame failures need retransmission */ @@ -1601,7 +1721,7 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff) } /* Pad read to blocksize for efficiency */ -static void brcmf_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen) +static void brcmf_pad(struct brcmf_bus *bus, u16 *pad, u16 *rdlen) { if (bus->roundup && bus->blocksize && *rdlen > bus->blocksize) { *pad = bus->blocksize - (*rdlen % bus->blocksize); @@ -1614,7 +1734,7 @@ static void brcmf_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen) } static void -brcmf_alloc_pkt_and_read(struct brcmf_sdio *bus, u16 rdlen, +brcmf_alloc_pkt_and_read(struct brcmf_bus *bus, u16 rdlen, struct sk_buff **pkt, u8 **rxbuf) { int sdret; /* Return code from calls */ @@ -1626,15 +1746,16 @@ brcmf_alloc_pkt_and_read(struct brcmf_sdio *bus, u16 rdlen, pkt_align(*pkt, rdlen, BRCMF_SDALIGN); *rxbuf = (u8 *) ((*pkt)->data); /* Read the entire frame */ - sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad, - SDIO_FUNC_2, F2SYNC, *pkt); + sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad, + SDIO_FUNC_2, F2SYNC, + *rxbuf, rdlen, *pkt); bus->f2rxdata++; if (sdret < 0) { brcmf_dbg(ERROR, "(nextlen): read %d bytes failed: %d\n", rdlen, sdret); brcmu_pkt_buf_free_skb(*pkt); - bus->sdiodev->bus_if->dstats.rx_errors++; + bus->drvr->rx_errors++; /* Force retry w/normal header read. * Don't attempt NAK for * gSPI @@ -1646,7 +1767,7 @@ brcmf_alloc_pkt_and_read(struct brcmf_sdio *bus, u16 rdlen, /* Checks the header */ static int -brcmf_check_rxbuf(struct brcmf_sdio *bus, struct sk_buff *pkt, u8 *rxbuf, +brcmf_check_rxbuf(struct brcmf_bus *bus, struct sk_buff *pkt, u8 *rxbuf, u8 rxseq, u16 nextlen, u16 *len) { u16 check; @@ -1702,7 +1823,7 @@ brcmf_check_rxbuf(struct brcmf_sdio *bus, struct sk_buff *pkt, u8 *rxbuf, /* Return true if there may be more frames to read */ static uint -brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished) +brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished) { u16 len, check; /* Extracted hardware header fields */ u8 chan, seq, doff; /* Extracted software header fields */ @@ -1725,15 +1846,14 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished) *finished = false; for (rxseq = bus->rx_seq, rxleft = maxframes; - !bus->rxskip && rxleft && - bus->sdiodev->bus_if->state != BRCMF_BUS_DOWN; + !bus->rxskip && rxleft && bus->drvr->busstate != BRCMF_BUS_DOWN; rxseq++, rxleft--) { /* Handle glomming separately */ - if (bus->glomd || !skb_queue_empty(&bus->glom)) { + if (bus->glom || bus->glomd) { u8 cnt; brcmf_dbg(GLOM, "calling rxglom: glomd %p, glom %p\n", - bus->glomd, skb_peek(&bus->glom)); + bus->glomd, bus->glom); cnt = brcmf_sdbrcm_rxglom(bus, rxseq); brcmf_dbg(GLOM, "rxglom returned %d\n", cnt); rxseq += cnt - 1; @@ -1785,7 +1905,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished) bus->nextlen = 0; } - bus->rx_readahead_cnt++; + bus->drvr->rx_readahead_cnt++; /* Handle Flow Control */ fcbits = SDPCM_FCMASK_VALUE( @@ -1856,7 +1976,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished) /* Read frame header (hardware and software) */ sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad, SDIO_FUNC_2, F2SYNC, bus->rxhdr, - BRCMF_FIRSTREAD); + BRCMF_FIRSTREAD, NULL); bus->f2rxhdrs++; if (sdret < 0) { @@ -1983,7 +2103,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished) /* Too long -- skip this frame */ brcmf_dbg(ERROR, "too long: len %d rdlen %d\n", len, rdlen); - bus->sdiodev->bus_if->dstats.rx_errors++; + bus->drvr->rx_errors++; bus->rx_toolong++; brcmf_sdbrcm_rxfail(bus, false, false); continue; @@ -1995,7 +2115,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished) /* Give up on data, request rtx of events */ brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: rdlen %d chan %d\n", rdlen, chan); - bus->sdiodev->bus_if->dstats.rx_dropped++; + bus->drvr->rx_dropped++; brcmf_sdbrcm_rxfail(bus, false, RETRYCHAN(chan)); continue; } @@ -2005,8 +2125,9 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished) pkt_align(pkt, rdlen, BRCMF_SDALIGN); /* Read the remaining frame data */ - sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad, - SDIO_FUNC_2, F2SYNC, pkt); + sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad, + SDIO_FUNC_2, F2SYNC, ((u8 *) (pkt->data)), + rdlen, pkt); bus->f2rxdata++; if (sdret < 0) { @@ -2015,7 +2136,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished) : ((chan == SDPCM_DATA_CHANNEL) ? "data" : "test")), sdret); brcmu_pkt_buf_free_skb(pkt); - bus->sdiodev->bus_if->dstats.rx_errors++; + bus->drvr->rx_errors++; brcmf_sdbrcm_rxfail(bus, true, RETRYCHAN(chan)); continue; } @@ -2064,17 +2185,16 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished) if (pkt->len == 0) { brcmu_pkt_buf_free_skb(pkt); continue; - } else if (brcmf_proto_hdrpull(bus->sdiodev->dev, &ifidx, - pkt) != 0) { + } else if (brcmf_proto_hdrpull(bus->drvr, &ifidx, pkt) != 0) { brcmf_dbg(ERROR, "rx protocol error\n"); brcmu_pkt_buf_free_skb(pkt); - bus->sdiodev->bus_if->dstats.rx_errors++; + bus->drvr->rx_errors++; continue; } /* Unlock during rx call */ up(&bus->sdsem); - brcmf_rx_packet(bus->sdiodev->dev, ifidx, pkt); + brcmf_rx_frame(bus->drvr, ifidx, pkt, 1); down(&bus->sdsem); } rxcount = maxframes - rxleft; @@ -2094,8 +2214,16 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished) return rxcount; } +static int +brcmf_sdbrcm_send_buf(struct brcmf_bus *bus, u32 addr, uint fn, uint flags, + u8 *buf, uint nbytes, struct sk_buff *pkt) +{ + return brcmf_sdcard_send_buf + (bus->sdiodev, addr, fn, flags, buf, nbytes, pkt); +} + static void -brcmf_sdbrcm_wait_for_event(struct brcmf_sdio *bus, bool *lockvar) +brcmf_sdbrcm_wait_for_event(struct brcmf_bus *bus, bool *lockvar) { up(&bus->sdsem); wait_event_interruptible_timeout(bus->ctrl_wait, @@ -2105,7 +2233,7 @@ brcmf_sdbrcm_wait_for_event(struct brcmf_sdio *bus, bool *lockvar) } static void -brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus) +brcmf_sdbrcm_wait_event_wakeup(struct brcmf_bus *bus) { if (waitqueue_active(&bus->ctrl_wait)) wake_up_interruptible(&bus->ctrl_wait); @@ -2114,7 +2242,7 @@ brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus) /* Writes a HW/SW header into the packet and sends it. */ /* Assumes: (a) header space already there, (b) caller holds lock */ -static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt, +static int brcmf_sdbrcm_txpkt(struct brcmf_bus *bus, struct sk_buff *pkt, uint chan, bool free_pkt) { int ret; @@ -2134,7 +2262,7 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt, if (skb_headroom(pkt) < pad) { brcmf_dbg(INFO, "insufficient headroom %d for %d pad\n", skb_headroom(pkt), pad); - bus->sdiodev->bus_if->tx_realloc++; + bus->drvr->tx_realloc++; new = brcmu_pkt_buf_get_skb(pkt->len + BRCMF_SDALIGN); if (!new) { brcmf_dbg(ERROR, "couldn't allocate new %d-byte packet\n", @@ -2203,8 +2331,9 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt, if (len & (ALIGNMENT - 1)) len = roundup(len, ALIGNMENT); - ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad, - SDIO_FUNC_2, F2SYNC, pkt); + ret = brcmf_sdbrcm_send_buf(bus, bus->sdiodev->sbwad, + SDIO_FUNC_2, F2SYNC, frame, + len, pkt); bus->f2txdata++; if (ret < 0) { @@ -2242,7 +2371,7 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt, /* restore pkt buffer pointer before calling tx complete routine */ skb_pull(pkt, SDPCM_HDRLEN + pad); up(&bus->sdsem); - brcmf_txcomplete(bus->sdiodev->dev, pkt, ret != 0); + brcmf_txcomplete(bus->drvr, pkt, ret != 0); down(&bus->sdsem); if (free_pkt) @@ -2251,7 +2380,7 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt, return ret; } -static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes) +static uint brcmf_sdbrcm_sendfromq(struct brcmf_bus *bus, uint maxframes) { struct sk_buff *pkt; u32 intstatus = 0; @@ -2261,6 +2390,8 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes) uint datalen; u8 tx_prec_map; + struct brcmf_pub *drvr = bus->drvr; + brcmf_dbg(TRACE, "Enter\n"); tx_prec_map = ~bus->flowcontrol; @@ -2278,9 +2409,9 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes) ret = brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, true); if (ret) - bus->sdiodev->bus_if->dstats.tx_errors++; + bus->drvr->tx_errors++; else - bus->sdiodev->bus_if->dstats.tx_bytes += datalen; + bus->drvr->dstats.tx_bytes += datalen; /* In poll mode, need to check for other events */ if (!bus->intr && cnt) { @@ -2297,98 +2428,14 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes) } /* Deflow-control stack if needed */ - if (bus->sdiodev->bus_if->drvr_up && - (bus->sdiodev->bus_if->state == BRCMF_BUS_DATA) && - bus->txoff && (pktq_len(&bus->txq) < TXLOW)) { - bus->txoff = OFF; - brcmf_txflowcontrol(bus->sdiodev->dev, 0, OFF); - } + if (drvr->up && (drvr->busstate == BRCMF_BUS_DATA) && + drvr->txoff && (pktq_len(&bus->txq) < TXLOW)) + brcmf_txflowcontrol(drvr, 0, OFF); return cnt; } -static void brcmf_sdbrcm_bus_stop(struct device *dev) -{ - u32 local_hostintmask; - u8 saveclk; - uint retries; - int err; - struct brcmf_bus *bus_if = dev_get_drvdata(dev); - struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv; - struct brcmf_sdio *bus = sdiodev->bus; - - brcmf_dbg(TRACE, "Enter\n"); - - if (bus->watchdog_tsk) { - send_sig(SIGTERM, bus->watchdog_tsk, 1); - kthread_stop(bus->watchdog_tsk); - bus->watchdog_tsk = NULL; - } - - if (bus->dpc_tsk && bus->dpc_tsk != current) { - send_sig(SIGTERM, bus->dpc_tsk, 1); - kthread_stop(bus->dpc_tsk); - bus->dpc_tsk = NULL; - } - - down(&bus->sdsem); - - bus_wake(bus); - - /* Enable clock for device interrupts */ - brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false); - - /* Disable and clear interrupts at the chip level also */ - w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask), &retries); - local_hostintmask = bus->hostintmask; - bus->hostintmask = 0; - - /* Change our idea of bus state */ - bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN; - - /* Force clocks on backplane to be sure F2 interrupt propagates */ - saveclk = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1, - SBSDIO_FUNC1_CHIPCLKCSR, &err); - if (!err) { - brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1, - SBSDIO_FUNC1_CHIPCLKCSR, - (saveclk | SBSDIO_FORCE_HT), &err); - } - if (err) - brcmf_dbg(ERROR, "Failed to force clock for F2: err %d\n", err); - - /* Turn off the bus (F2), free any pending packets */ - brcmf_dbg(INTR, "disable SDIO interrupts\n"); - brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx, - SDIO_FUNC_ENABLE_1, NULL); - - /* Clear any pending interrupts now that F2 is disabled */ - w_sdreg32(bus, local_hostintmask, - offsetof(struct sdpcmd_regs, intstatus), &retries); - - /* Turn off the backplane clock (only) */ - brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false); - - /* Clear the data packet queues */ - brcmu_pktq_flush(&bus->txq, true, NULL, NULL); - - /* Clear any held glomming stuff */ - if (bus->glomd) - brcmu_pkt_buf_free_skb(bus->glomd); - brcmf_sdbrcm_free_glom(bus); - - /* Clear rx control and wake any waiters */ - bus->rxlen = 0; - brcmf_sdbrcm_dcmd_resp_wake(bus); - - /* Reset some F2 state stuff */ - bus->rxskip = false; - bus->tx_seq = bus->rx_seq = 0; - - up(&bus->sdsem); -} - -static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus) +static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus) { u32 intstatus, newstatus = 0; uint retries = 0; @@ -2416,7 +2463,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus) SBSDIO_DEVICE_CTL, &err); if (err) { brcmf_dbg(ERROR, "error reading DEVCTL: %d\n", err); - bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN; + bus->drvr->busstate = BRCMF_BUS_DOWN; } #endif /* BCMDBG */ @@ -2426,7 +2473,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus) if (err) { brcmf_dbg(ERROR, "error reading CSR: %d\n", err); - bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN; + bus->drvr->busstate = BRCMF_BUS_DOWN; } brcmf_dbg(INFO, "DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n", @@ -2439,7 +2486,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus) if (err) { brcmf_dbg(ERROR, "error reading DEVCTL: %d\n", err); - bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN; + bus->drvr->busstate = BRCMF_BUS_DOWN; } devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY; brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1, @@ -2447,7 +2494,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus) if (err) { brcmf_dbg(ERROR, "error writing DEVCTL: %d\n", err); - bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN; + bus->drvr->busstate = BRCMF_BUS_DOWN; } bus->clkstate = CLK_AVAIL; } else { @@ -2549,9 +2596,9 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus) (bus->clkstate == CLK_AVAIL)) { int ret, i; - ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad, + ret = brcmf_sdbrcm_send_buf(bus, bus->sdiodev->sbwad, SDIO_FUNC_2, F2SYNC, (u8 *) bus->ctrl_frame_buf, - (u32) bus->ctrl_frame_len); + (u32) bus->ctrl_frame_len, NULL); if (ret < 0) { /* On failure, abort the command and @@ -2603,11 +2650,11 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus) else await next interrupt */ /* On failed register access, all bets are off: no resched or interrupts */ - if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) || + if ((bus->drvr->busstate == BRCMF_BUS_DOWN) || brcmf_sdcard_regfail(bus->sdiodev)) { brcmf_dbg(ERROR, "failed backplane access over SDIO, halting operation %d\n", brcmf_sdcard_regfail(bus->sdiodev)); - bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN; + bus->drvr->busstate = BRCMF_BUS_DOWN; bus->intstatus = 0; } else if (bus->clkstate == CLK_PENDING) { brcmf_dbg(INFO, "rescheduled due to CLK_PENDING awaiting I_CHIPACTIVE interrupt\n"); @@ -2634,7 +2681,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus) static int brcmf_sdbrcm_dpc_thread(void *data) { - struct brcmf_sdio *bus = (struct brcmf_sdio *) data; + struct brcmf_bus *bus = (struct brcmf_bus *) data; allow_signal(SIGTERM); /* Run until signal received */ @@ -2644,12 +2691,12 @@ static int brcmf_sdbrcm_dpc_thread(void *data) if (!wait_for_completion_interruptible(&bus->dpc_wait)) { /* Call bus dpc unless it indicated down (then clean stop) */ - if (bus->sdiodev->bus_if->state != BRCMF_BUS_DOWN) { + if (bus->drvr->busstate != BRCMF_BUS_DOWN) { if (brcmf_sdbrcm_dpc(bus)) complete(&bus->dpc_wait); } else { /* after stopping the bus, exit thread */ - brcmf_sdbrcm_bus_stop(bus->sdiodev->dev); + brcmf_sdbrcm_bus_stop(bus); bus->dpc_tsk = NULL; break; } @@ -2659,13 +2706,10 @@ static int brcmf_sdbrcm_dpc_thread(void *data) return 0; } -static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt) +int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *pkt) { int ret = -EBADE; uint datalen, prec; - struct brcmf_bus *bus_if = dev_get_drvdata(dev); - struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv; - struct brcmf_sdio *bus = sdiodev->bus; brcmf_dbg(TRACE, "Enter\n"); @@ -2684,10 +2728,9 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt) /* Priority based enq */ spin_lock_bh(&bus->txqlock); - if (brcmf_c_prec_enq(bus->sdiodev->dev, &bus->txq, pkt, prec) == - false) { + if (brcmf_c_prec_enq(bus->drvr, &bus->txq, pkt, prec) == false) { skb_pull(pkt, SDPCM_HDRLEN); - brcmf_txcomplete(bus->sdiodev->dev, pkt, false); + brcmf_txcomplete(bus->drvr, pkt, false); brcmu_pkt_buf_free_skb(pkt); brcmf_dbg(ERROR, "out of bus->txq !!!\n"); ret = -ENOSR; @@ -2696,10 +2739,8 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt) } spin_unlock_bh(&bus->txqlock); - if (pktq_len(&bus->txq) >= TXHI) { - bus->txoff = ON; - brcmf_txflowcontrol(bus->sdiodev->dev, 0, ON); - } + if (pktq_len(&bus->txq) >= TXHI) + brcmf_txflowcontrol(bus->drvr, 0, ON); #ifdef BCMDBG if (pktq_plen(&bus->txq, prec) > qcount[prec]) @@ -2716,7 +2757,7 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt) } static int -brcmf_sdbrcm_membytes(struct brcmf_sdio *bus, bool write, u32 address, u8 *data, +brcmf_sdbrcm_membytes(struct brcmf_bus *bus, bool write, u32 address, u8 *data, uint size) { int bcmerror = 0; @@ -2777,7 +2818,7 @@ brcmf_sdbrcm_membytes(struct brcmf_sdio *bus, bool write, u32 address, u8 *data, #ifdef BCMDBG #define CONSOLE_LINE_MAX 192 -static int brcmf_sdbrcm_readconsole(struct brcmf_sdio *bus) +static int brcmf_sdbrcm_readconsole(struct brcmf_bus *bus) { struct brcmf_console *c = &bus->console; u8 line[CONSOLE_LINE_MAX], ch; @@ -2854,14 +2895,14 @@ static int brcmf_sdbrcm_readconsole(struct brcmf_sdio *bus) } #endif /* BCMDBG */ -static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len) +static int brcmf_tx_frame(struct brcmf_bus *bus, u8 *frame, u16 len) { int i; int ret; bus->ctrl_frame_stat = false; - ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad, - SDIO_FUNC_2, F2SYNC, frame, len); + ret = brcmf_sdbrcm_send_buf(bus, bus->sdiodev->sbwad, + SDIO_FUNC_2, F2SYNC, frame, len, NULL); if (ret < 0) { /* On failure, abort the command and terminate the frame */ @@ -2896,8 +2937,8 @@ static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len) return ret; } -static int -brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen) +int +brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen) { u8 *frame; u16 len; @@ -2905,9 +2946,6 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen) uint retries = 0; u8 doff = 0; int ret = -1; - struct brcmf_bus *bus_if = dev_get_drvdata(dev); - struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv; - struct brcmf_sdio *bus = sdiodev->bus; brcmf_dbg(TRACE, "Enter\n"); @@ -3007,22 +3045,19 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen) up(&bus->sdsem); if (ret) - bus->tx_ctlerrs++; + bus->drvr->tx_ctlerrs++; else - bus->tx_ctlpkts++; + bus->drvr->tx_ctlpkts++; return ret ? -EIO : 0; } -static int -brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen) +int +brcmf_sdbrcm_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen) { int timeleft; uint rxlen = 0; bool pending; - struct brcmf_bus *bus_if = dev_get_drvdata(dev); - struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv; - struct brcmf_sdio *bus = sdiodev->bus; brcmf_dbg(TRACE, "Enter\n"); @@ -3048,21 +3083,21 @@ brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen) } if (rxlen) - bus->rx_ctlpkts++; + bus->drvr->rx_ctlpkts++; else - bus->rx_ctlerrs++; + bus->drvr->rx_ctlerrs++; return rxlen ? (int)rxlen : -ETIMEDOUT; } -static int brcmf_sdbrcm_downloadvars(struct brcmf_sdio *bus, void *arg, int len) +static int brcmf_sdbrcm_downloadvars(struct brcmf_bus *bus, void *arg, int len) { int bcmerror = 0; brcmf_dbg(TRACE, "Enter\n"); /* Basic sanity checks */ - if (bus->sdiodev->bus_if->drvr_up) { + if (bus->drvr->up) { bcmerror = -EISCONN; goto err; } @@ -3088,7 +3123,7 @@ static int brcmf_sdbrcm_downloadvars(struct brcmf_sdio *bus, void *arg, int len) return bcmerror; } -static int brcmf_sdbrcm_write_vars(struct brcmf_sdio *bus) +static int brcmf_sdbrcm_write_vars(struct brcmf_bus *bus) { int bcmerror = 0; u32 varsize; @@ -3175,11 +3210,135 @@ static int brcmf_sdbrcm_write_vars(struct brcmf_sdio *bus) return bcmerror; } -static int brcmf_sdbrcm_download_state(struct brcmf_sdio *bus, bool enter) +static void +brcmf_sdbrcm_chip_disablecore(struct brcmf_sdio_dev *sdiodev, u32 corebase) +{ + u32 regdata; + + regdata = brcmf_sdcard_reg_read(sdiodev, + CORE_SB(corebase, sbtmstatelow), 4); + if (regdata & SBTML_RESET) + return; + + regdata = brcmf_sdcard_reg_read(sdiodev, + CORE_SB(corebase, sbtmstatelow), 4); + if ((regdata & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) != 0) { + /* + * set target reject and spin until busy is clear + * (preserve core-specific bits) + */ + regdata = brcmf_sdcard_reg_read(sdiodev, + CORE_SB(corebase, sbtmstatelow), 4); + brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), + 4, regdata | SBTML_REJ); + + regdata = brcmf_sdcard_reg_read(sdiodev, + CORE_SB(corebase, sbtmstatelow), 4); + udelay(1); + SPINWAIT((brcmf_sdcard_reg_read(sdiodev, + CORE_SB(corebase, sbtmstatehigh), 4) & + SBTMH_BUSY), 100000); + + regdata = brcmf_sdcard_reg_read(sdiodev, + CORE_SB(corebase, sbtmstatehigh), 4); + if (regdata & SBTMH_BUSY) + brcmf_dbg(ERROR, "ARM core still busy\n"); + + regdata = brcmf_sdcard_reg_read(sdiodev, + CORE_SB(corebase, sbidlow), 4); + if (regdata & SBIDL_INIT) { + regdata = brcmf_sdcard_reg_read(sdiodev, + CORE_SB(corebase, sbimstate), 4) | + SBIM_RJ; + brcmf_sdcard_reg_write(sdiodev, + CORE_SB(corebase, sbimstate), 4, + regdata); + regdata = brcmf_sdcard_reg_read(sdiodev, + CORE_SB(corebase, sbimstate), 4); + udelay(1); + SPINWAIT((brcmf_sdcard_reg_read(sdiodev, + CORE_SB(corebase, sbimstate), 4) & + SBIM_BY), 100000); + } + + /* set reset and reject while enabling the clocks */ + brcmf_sdcard_reg_write(sdiodev, + CORE_SB(corebase, sbtmstatelow), 4, + (((SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) | + SBTML_REJ | SBTML_RESET)); + regdata = brcmf_sdcard_reg_read(sdiodev, + CORE_SB(corebase, sbtmstatelow), 4); + udelay(10); + + /* clear the initiator reject bit */ + regdata = brcmf_sdcard_reg_read(sdiodev, + CORE_SB(corebase, sbidlow), 4); + if (regdata & SBIDL_INIT) { + regdata = brcmf_sdcard_reg_read(sdiodev, + CORE_SB(corebase, sbimstate), 4) & + ~SBIM_RJ; + brcmf_sdcard_reg_write(sdiodev, + CORE_SB(corebase, sbimstate), 4, + regdata); + } + } + + /* leave reset and reject asserted */ + brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), 4, + (SBTML_REJ | SBTML_RESET)); + udelay(1); +} + +static void +brcmf_sdbrcm_chip_resetcore(struct brcmf_sdio_dev *sdiodev, u32 corebase) +{ + u32 regdata; + + /* + * Must do the disable sequence first to work for + * arbitrary current core state. + */ + brcmf_sdbrcm_chip_disablecore(sdiodev, corebase); + + /* + * Now do the initialization sequence. + * set reset while enabling the clock and + * forcing them on throughout the core + */ + brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), 4, + ((SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) | + SBTML_RESET); + udelay(1); + + regdata = brcmf_sdcard_reg_read(sdiodev, + CORE_SB(corebase, sbtmstatehigh), 4); + if (regdata & SBTMH_SERR) + brcmf_sdcard_reg_write(sdiodev, + CORE_SB(corebase, sbtmstatehigh), 4, 0); + + regdata = brcmf_sdcard_reg_read(sdiodev, + CORE_SB(corebase, sbimstate), 4); + if (regdata & (SBIM_IBE | SBIM_TO)) + brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbimstate), 4, + regdata & ~(SBIM_IBE | SBIM_TO)); + + /* clear reset and allow it to propagate throughout the core */ + brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), 4, + (SICF_FGC << SBTML_SICF_SHIFT) | + (SICF_CLOCK_EN << SBTML_SICF_SHIFT)); + udelay(1); + + /* leave clock enabled */ + brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), 4, + (SICF_CLOCK_EN << SBTML_SICF_SHIFT)); + udelay(1); +} + +static int brcmf_sdbrcm_download_state(struct brcmf_bus *bus, bool enter) { uint retries; + u32 regdata; int bcmerror = 0; - struct chip_info *ci = bus->ci; /* To enter download state, disable ARM and reset SOCRAM. * To exit download state, simply reset ARM (default is RAM boot). @@ -3187,9 +3346,10 @@ static int brcmf_sdbrcm_download_state(struct brcmf_sdio *bus, bool enter) if (enter) { bus->alp_only = true; - ci->coredisable(bus->sdiodev, ci, BCMA_CORE_ARM_CM3); + brcmf_sdbrcm_chip_disablecore(bus->sdiodev, + bus->ci->armcorebase); - ci->resetcore(bus->sdiodev, ci, BCMA_CORE_INTERNAL_MEM); + brcmf_sdbrcm_chip_resetcore(bus->sdiodev, bus->ci->ramcorebase); /* Clear the top bit of memory */ if (bus->ramsize) { @@ -3198,7 +3358,11 @@ static int brcmf_sdbrcm_download_state(struct brcmf_sdio *bus, bool enter) (u8 *)&zeros, 4); } } else { - if (!ci->iscoreup(bus->sdiodev, ci, BCMA_CORE_INTERNAL_MEM)) { + regdata = brcmf_sdcard_reg_read(bus->sdiodev, + CORE_SB(bus->ci->ramcorebase, sbtmstatelow), 4); + regdata &= (SBTML_RESET | SBTML_REJ_MASK | + (SICF_CLOCK_EN << SBTML_SICF_SHIFT)); + if ((SICF_CLOCK_EN << SBTML_SICF_SHIFT) != regdata) { brcmf_dbg(ERROR, "SOCRAM core is down after reset?\n"); bcmerror = -EBADE; goto fail; @@ -3213,18 +3377,18 @@ static int brcmf_sdbrcm_download_state(struct brcmf_sdio *bus, bool enter) w_sdreg32(bus, 0xFFFFFFFF, offsetof(struct sdpcmd_regs, intstatus), &retries); - ci->resetcore(bus->sdiodev, ci, BCMA_CORE_ARM_CM3); + brcmf_sdbrcm_chip_resetcore(bus->sdiodev, bus->ci->armcorebase); /* Allow HT Clock now that the ARM is running. */ bus->alp_only = false; - bus->sdiodev->bus_if->state = BRCMF_BUS_LOAD; + bus->drvr->busstate = BRCMF_BUS_LOAD; } fail: return bcmerror; } -static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_sdio *bus) +static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_bus *bus) { if (bus->firmware->size < bus->fw_ptr + len) len = bus->firmware->size - bus->fw_ptr; @@ -3234,7 +3398,10 @@ static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_sdio *bus) return len; } -static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus) +MODULE_FIRMWARE(BCM4329_FW_NAME); +MODULE_FIRMWARE(BCM4329_NV_NAME); + +static int brcmf_sdbrcm_download_code_file(struct brcmf_bus *bus) { int offset = 0; uint len; @@ -3243,7 +3410,8 @@ static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus) brcmf_dbg(INFO, "Enter\n"); - ret = request_firmware(&bus->firmware, BRCMFMAC_FW_NAME, + bus->fw_name = BCM4329_FW_NAME; + ret = request_firmware(&bus->firmware, bus->fw_name, &bus->sdiodev->func[2]->dev); if (ret) { brcmf_dbg(ERROR, "Fail to request firmware %d\n", ret); @@ -3333,14 +3501,15 @@ static uint brcmf_process_nvram_vars(char *varbuf, uint len) return buf_len; } -static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus) +static int brcmf_sdbrcm_download_nvram(struct brcmf_bus *bus) { uint len; char *memblock = NULL; char *bufp; int ret; - ret = request_firmware(&bus->firmware, BRCMFMAC_NV_NAME, + bus->nv_name = BCM4329_NV_NAME; + ret = request_firmware(&bus->firmware, bus->nv_name, &bus->sdiodev->func[2]->dev); if (ret) { brcmf_dbg(ERROR, "Fail to request nvram %d\n", ret); @@ -3380,7 +3549,7 @@ static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus) return ret; } -static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus) +static int _brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus) { int bcmerror = -1; @@ -3413,7 +3582,7 @@ static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus) } static bool -brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus) +brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus) { bool ret; @@ -3427,11 +3596,91 @@ brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus) return ret; } -static int brcmf_sdbrcm_bus_init(struct device *dev) +void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus) { - struct brcmf_bus *bus_if = dev_get_drvdata(dev); - struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv; - struct brcmf_sdio *bus = sdiodev->bus; + u32 local_hostintmask; + u8 saveclk; + uint retries; + int err; + + brcmf_dbg(TRACE, "Enter\n"); + + if (bus->watchdog_tsk) { + send_sig(SIGTERM, bus->watchdog_tsk, 1); + kthread_stop(bus->watchdog_tsk); + bus->watchdog_tsk = NULL; + } + + if (bus->dpc_tsk && bus->dpc_tsk != current) { + send_sig(SIGTERM, bus->dpc_tsk, 1); + kthread_stop(bus->dpc_tsk); + bus->dpc_tsk = NULL; + } + + down(&bus->sdsem); + + bus_wake(bus); + + /* Enable clock for device interrupts */ + brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false); + + /* Disable and clear interrupts at the chip level also */ + w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask), &retries); + local_hostintmask = bus->hostintmask; + bus->hostintmask = 0; + + /* Change our idea of bus state */ + bus->drvr->busstate = BRCMF_BUS_DOWN; + + /* Force clocks on backplane to be sure F2 interrupt propagates */ + saveclk = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, &err); + if (!err) { + brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, + (saveclk | SBSDIO_FORCE_HT), &err); + } + if (err) + brcmf_dbg(ERROR, "Failed to force clock for F2: err %d\n", err); + + /* Turn off the bus (F2), free any pending packets */ + brcmf_dbg(INTR, "disable SDIO interrupts\n"); + brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx, + SDIO_FUNC_ENABLE_1, NULL); + + /* Clear any pending interrupts now that F2 is disabled */ + w_sdreg32(bus, local_hostintmask, + offsetof(struct sdpcmd_regs, intstatus), &retries); + + /* Turn off the backplane clock (only) */ + brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false); + + /* Clear the data packet queues */ + brcmu_pktq_flush(&bus->txq, true, NULL, NULL); + + /* Clear any held glomming stuff */ + if (bus->glomd) + brcmu_pkt_buf_free_skb(bus->glomd); + + if (bus->glom) + brcmu_pkt_buf_free_skb(bus->glom); + + bus->glom = bus->glomd = NULL; + + /* Clear rx control and wake any waiters */ + bus->rxlen = 0; + brcmf_sdbrcm_dcmd_resp_wake(bus); + + /* Reset some F2 state stuff */ + bus->rxskip = false; + bus->tx_seq = bus->rx_seq = 0; + + up(&bus->sdsem); +} + +int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr) +{ + struct brcmf_bus *bus = drvr->bus; unsigned long timeout; uint retries = 0; u8 ready, enable; @@ -3441,16 +3690,16 @@ static int brcmf_sdbrcm_bus_init(struct device *dev) brcmf_dbg(TRACE, "Enter\n"); /* try to download image and nvram to the dongle */ - if (bus_if->state == BRCMF_BUS_DOWN) { + if (drvr->busstate == BRCMF_BUS_DOWN) { if (!(brcmf_sdbrcm_download_firmware(bus))) return -1; } - if (!bus->sdiodev->bus_if->drvr) + if (!bus->drvr) return 0; /* Start the watchdog timer */ - bus->tickcnt = 0; + bus->drvr->tickcnt = 0; brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS); down(&bus->sdsem); @@ -3507,7 +3756,7 @@ static int brcmf_sdbrcm_bus_init(struct device *dev) SBSDIO_WATERMARK, 8, &err); /* Set bus state according to enable result */ - bus_if->state = BRCMF_BUS_DATA; + drvr->busstate = BRCMF_BUS_DATA; } else { @@ -3522,7 +3771,7 @@ static int brcmf_sdbrcm_bus_init(struct device *dev) SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err); /* If we didn't come up, turn off backplane clock */ - if (bus_if->state != BRCMF_BUS_DATA) + if (drvr->busstate != BRCMF_BUS_DATA) brcmf_sdbrcm_clkctl(bus, CLK_NONE, false); exit: @@ -3533,7 +3782,7 @@ static int brcmf_sdbrcm_bus_init(struct device *dev) void brcmf_sdbrcm_isr(void *arg) { - struct brcmf_sdio *bus = (struct brcmf_sdio *) arg; + struct brcmf_bus *bus = (struct brcmf_bus *) arg; brcmf_dbg(TRACE, "Enter\n"); @@ -3542,7 +3791,7 @@ void brcmf_sdbrcm_isr(void *arg) return; } - if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) { + if (bus->drvr->busstate == BRCMF_BUS_DOWN) { brcmf_dbg(ERROR, "bus is down. we have nothing to do\n"); return; } @@ -3565,14 +3814,14 @@ void brcmf_sdbrcm_isr(void *arg) complete(&bus->dpc_wait); } -static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus) +static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_pub *drvr) { -#ifdef BCMDBG - struct brcmf_bus *bus_if = dev_get_drvdata(bus->sdiodev->dev); -#endif /* BCMDBG */ + struct brcmf_bus *bus; brcmf_dbg(TIMER, "Enter\n"); + bus = drvr->bus; + /* Ignore the timer if simulating bus down */ if (bus->sleeping) return false; @@ -3616,8 +3865,7 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus) } #ifdef BCMDBG /* Poll for console output periodically */ - if (bus_if->state == BRCMF_BUS_DATA && - bus->console_interval != 0) { + if (drvr->busstate == BRCMF_BUS_DATA && bus->console_interval != 0) { bus->console.count += BRCMF_WD_POLL_MS; if (bus->console.count >= bus->console_interval) { bus->console.count -= bus->console_interval; @@ -3652,12 +3900,10 @@ static bool brcmf_sdbrcm_chipmatch(u16 chipid) { if (chipid == BCM4329_CHIP_ID) return true; - if (chipid == BCM4330_CHIP_ID) - return true; return false; } -static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus) +static void brcmf_sdbrcm_release_malloc(struct brcmf_bus *bus) { brcmf_dbg(TRACE, "Enter\n"); @@ -3669,13 +3915,13 @@ static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus) bus->databuf = NULL; } -static bool brcmf_sdbrcm_probe_malloc(struct brcmf_sdio *bus) +static bool brcmf_sdbrcm_probe_malloc(struct brcmf_bus *bus) { brcmf_dbg(TRACE, "Enter\n"); - if (bus->sdiodev->bus_if->maxctl) { + if (bus->drvr->maxctl) { bus->rxblen = - roundup((bus->sdiodev->bus_if->maxctl + SDPCM_HDRLEN), + roundup((bus->drvr->maxctl + SDPCM_HDRLEN), ALIGNMENT) + BRCMF_SDALIGN; bus->rxbuf = kmalloc(bus->rxblen, GFP_ATOMIC); if (!(bus->rxbuf)) @@ -3704,14 +3950,276 @@ static bool brcmf_sdbrcm_probe_malloc(struct brcmf_sdio *bus) return false; } +/* SDIO Pad drive strength to select value mappings */ +struct sdiod_drive_str { + u8 strength; /* Pad Drive Strength in mA */ + u8 sel; /* Chip-specific select value */ +}; + +/* SDIO Drive Strength to sel value table for PMU Rev 1 */ +static const struct sdiod_drive_str sdiod_drive_strength_tab1[] = { + { + 4, 0x2}, { + 2, 0x3}, { + 1, 0x0}, { + 0, 0x0} + }; + +/* SDIO Drive Strength to sel value table for PMU Rev 2, 3 */ +static const struct sdiod_drive_str sdiod_drive_strength_tab2[] = { + { + 12, 0x7}, { + 10, 0x6}, { + 8, 0x5}, { + 6, 0x4}, { + 4, 0x2}, { + 2, 0x1}, { + 0, 0x0} + }; + +/* SDIO Drive Strength to sel value table for PMU Rev 8 (1.8V) */ +static const struct sdiod_drive_str sdiod_drive_strength_tab3[] = { + { + 32, 0x7}, { + 26, 0x6}, { + 22, 0x5}, { + 16, 0x4}, { + 12, 0x3}, { + 8, 0x2}, { + 4, 0x1}, { + 0, 0x0} + }; + +#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu)) + +static char *brcmf_chipname(uint chipid, char *buf, uint len) +{ + const char *fmt; + + fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x"; + snprintf(buf, len, fmt, chipid); + return buf; +} + +static void brcmf_sdbrcm_sdiod_drive_strength_init(struct brcmf_bus *bus, + u32 drivestrength) { + struct sdiod_drive_str *str_tab = NULL; + u32 str_mask = 0; + u32 str_shift = 0; + char chn[8]; + + if (!(bus->ci->cccaps & CC_CAP_PMU)) + return; + + switch (SDIOD_DRVSTR_KEY(bus->ci->chip, bus->ci->pmurev)) { + case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 1): + str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab1; + str_mask = 0x30000000; + str_shift = 28; + break; + case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 2): + case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 3): + str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab2; + str_mask = 0x00003800; + str_shift = 11; + break; + case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 8): + str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab3; + str_mask = 0x00003800; + str_shift = 11; + break; + default: + brcmf_dbg(ERROR, "No SDIO Drive strength init done for chip %s rev %d pmurev %d\n", + brcmf_chipname(bus->ci->chip, chn, 8), + bus->ci->chiprev, bus->ci->pmurev); + break; + } + + if (str_tab != NULL) { + u32 drivestrength_sel = 0; + u32 cc_data_temp; + int i; + + for (i = 0; str_tab[i].strength != 0; i++) { + if (drivestrength >= str_tab[i].strength) { + drivestrength_sel = str_tab[i].sel; + break; + } + } + + brcmf_sdcard_reg_write(bus->sdiodev, + CORE_CC_REG(bus->ci->cccorebase, chipcontrol_addr), + 4, 1); + cc_data_temp = brcmf_sdcard_reg_read(bus->sdiodev, + CORE_CC_REG(bus->ci->cccorebase, chipcontrol_addr), 4); + cc_data_temp &= ~str_mask; + drivestrength_sel <<= str_shift; + cc_data_temp |= drivestrength_sel; + brcmf_sdcard_reg_write(bus->sdiodev, + CORE_CC_REG(bus->ci->cccorebase, chipcontrol_addr), + 4, cc_data_temp); + + brcmf_dbg(INFO, "SDIO: %dmA drive strength selected, set to 0x%08x\n", + drivestrength, cc_data_temp); + } +} + +static int +brcmf_sdbrcm_chip_recognition(struct brcmf_sdio_dev *sdiodev, + struct chip_info *ci, u32 regs) +{ + u32 regdata; + + /* + * Get CC core rev + * Chipid is assume to be at offset 0 from regs arg + * For different chiptypes or old sdio hosts w/o chipcommon, + * other ways of recognition should be added here. + */ + ci->cccorebase = regs; + regdata = brcmf_sdcard_reg_read(sdiodev, + CORE_CC_REG(ci->cccorebase, chipid), 4); + ci->chip = regdata & CID_ID_MASK; + ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT; + + brcmf_dbg(INFO, "chipid=0x%x chiprev=%d\n", ci->chip, ci->chiprev); + + /* Address of cores for new chips should be added here */ + switch (ci->chip) { + case BCM4329_CHIP_ID: + ci->buscorebase = BCM4329_CORE_BUS_BASE; + ci->ramcorebase = BCM4329_CORE_SOCRAM_BASE; + ci->armcorebase = BCM4329_CORE_ARM_BASE; + ci->ramsize = BCM4329_RAMSIZE; + break; + default: + brcmf_dbg(ERROR, "chipid 0x%x is not supported\n", ci->chip); + return -ENODEV; + } + + regdata = brcmf_sdcard_reg_read(sdiodev, + CORE_SB(ci->cccorebase, sbidhigh), 4); + ci->ccrev = SBCOREREV(regdata); + + regdata = brcmf_sdcard_reg_read(sdiodev, + CORE_CC_REG(ci->cccorebase, pmucapabilities), 4); + ci->pmurev = regdata & PCAP_REV_MASK; + + regdata = brcmf_sdcard_reg_read(sdiodev, + CORE_SB(ci->buscorebase, sbidhigh), 4); + ci->buscorerev = SBCOREREV(regdata); + ci->buscoretype = (regdata & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT; + + brcmf_dbg(INFO, "ccrev=%d, pmurev=%d, buscore rev/type=%d/0x%x\n", + ci->ccrev, ci->pmurev, ci->buscorerev, ci->buscoretype); + + /* get chipcommon capabilites */ + ci->cccaps = brcmf_sdcard_reg_read(sdiodev, + CORE_CC_REG(ci->cccorebase, capabilities), 4); + + return 0; +} + +static int +brcmf_sdbrcm_chip_attach(struct brcmf_bus *bus, u32 regs) +{ + struct chip_info *ci; + int err; + u8 clkval, clkset; + + brcmf_dbg(TRACE, "Enter\n"); + + /* alloc chip_info_t */ + ci = kzalloc(sizeof(struct chip_info), GFP_ATOMIC); + if (NULL == ci) + return -ENOMEM; + + /* bus/core/clk setup for register access */ + /* Try forcing SDIO core to do ALPAvail request only */ + clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ; + brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err); + if (err) { + brcmf_dbg(ERROR, "error writing for HT off\n"); + goto fail; + } + + /* If register supported, wait for ALPAvail and then force ALP */ + /* This may take up to 15 milliseconds */ + clkval = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, NULL); + if ((clkval & ~SBSDIO_AVBITS) == clkset) { + SPINWAIT(((clkval = + brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, + NULL)), + !SBSDIO_ALPAV(clkval)), + PMU_MAX_TRANSITION_DLY); + if (!SBSDIO_ALPAV(clkval)) { + brcmf_dbg(ERROR, "timeout on ALPAV wait, clkval 0x%02x\n", + clkval); + err = -EBUSY; + goto fail; + } + clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | + SBSDIO_FORCE_ALP; + brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, + clkset, &err); + udelay(65); + } else { + brcmf_dbg(ERROR, "ChipClkCSR access: wrote 0x%02x read 0x%02x\n", + clkset, clkval); + err = -EACCES; + goto fail; + } + + /* Also, disable the extra SDIO pull-ups */ + brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1, + SBSDIO_FUNC1_SDIOPULLUP, 0, NULL); + + err = brcmf_sdbrcm_chip_recognition(bus->sdiodev, ci, regs); + if (err) + goto fail; + + /* + * Make sure any on-chip ARM is off (in case strapping is wrong), + * or downloaded code was already running. + */ + brcmf_sdbrcm_chip_disablecore(bus->sdiodev, ci->armcorebase); + + brcmf_sdcard_reg_write(bus->sdiodev, + CORE_CC_REG(ci->cccorebase, gpiopullup), 4, 0); + brcmf_sdcard_reg_write(bus->sdiodev, + CORE_CC_REG(ci->cccorebase, gpiopulldown), 4, 0); + + /* Disable F2 to clear any intermediate frame state on the dongle */ + brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx, + SDIO_FUNC_ENABLE_1, NULL); + + /* WAR: cmd52 backplane read so core HW will drop ALPReq */ + clkval = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1, + 0, NULL); + + /* Done with backplane-dependent accesses, can drop clock... */ + brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL); + + bus->ci = ci; + return 0; +fail: + bus->ci = NULL; + kfree(ci); + return err; +} + static bool -brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva) +brcmf_sdbrcm_probe_attach(struct brcmf_bus *bus, u32 regsva) { u8 clkctl = 0; int err = 0; int reg_addr; u32 reg_val; - u8 idx; bus->alp_only = true; @@ -3726,7 +4234,7 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva) #endif /* BCMDBG */ /* - * Force PLL off until brcmf_sdio_chip_attach() + * Force PLL off until brcmf_sdbrcm_chip_attach() * programs PLL control regs */ @@ -3744,8 +4252,8 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva) goto fail; } - if (brcmf_sdio_chip_attach(bus->sdiodev, &bus->ci, regsva)) { - brcmf_dbg(ERROR, "brcmf_sdio_chip_attach failed!\n"); + if (brcmf_sdbrcm_chip_attach(bus, regsva)) { + brcmf_dbg(ERROR, "brcmf_sdbrcm_chip_attach failed!\n"); goto fail; } @@ -3754,10 +4262,11 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva) goto fail; } - brcmf_sdio_chip_drivestrengthinit(bus->sdiodev, bus->ci, - SDIO_DRIVE_STRENGTH); + brcmf_sdbrcm_sdiod_drive_strength_init(bus, SDIO_DRIVE_STRENGTH); - /* Get info on the SOCRAM cores... */ + /* Get info on the ARM and SOCRAM cores... */ + brcmf_sdcard_reg_read(bus->sdiodev, + CORE_SB(bus->ci->armcorebase, sbidhigh), 4); bus->ramsize = bus->ci->ramsize; if (!(bus->ramsize)) { brcmf_dbg(ERROR, "failed to find SOCRAM memory!\n"); @@ -3765,8 +4274,7 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva) } /* Set core control so an SDIO reset does a backplane reset */ - idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV); - reg_addr = bus->ci->c_inf[idx].base + + reg_addr = bus->ci->buscorebase + offsetof(struct sdpcmd_regs, corecontrol); reg_val = brcmf_sdcard_reg_read(bus->sdiodev, reg_addr, sizeof(u32)); brcmf_sdcard_reg_write(bus->sdiodev, reg_addr, sizeof(u32), @@ -3790,7 +4298,7 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva) return false; } -static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio *bus) +static bool brcmf_sdbrcm_probe_init(struct brcmf_bus *bus) { brcmf_dbg(TRACE, "Enter\n"); @@ -3798,7 +4306,7 @@ static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio *bus) brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx, SDIO_FUNC_ENABLE_1, NULL); - bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN; + bus->drvr->busstate = BRCMF_BUS_DOWN; bus->sleeping = false; bus->rxflow = false; @@ -3825,7 +4333,7 @@ static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio *bus) static int brcmf_sdbrcm_watchdog_thread(void *data) { - struct brcmf_sdio *bus = (struct brcmf_sdio *)data; + struct brcmf_bus *bus = (struct brcmf_bus *)data; allow_signal(SIGTERM); /* Run until signal received */ @@ -3833,9 +4341,9 @@ brcmf_sdbrcm_watchdog_thread(void *data) if (kthread_should_stop()) break; if (!wait_for_completion_interruptible(&bus->watchdog_wait)) { - brcmf_sdbrcm_bus_watchdog(bus); + brcmf_sdbrcm_bus_watchdog(bus->drvr); /* Count the tick for reference */ - bus->tickcnt++; + bus->drvr->tickcnt++; } else break; } @@ -3845,7 +4353,7 @@ brcmf_sdbrcm_watchdog_thread(void *data) static void brcmf_sdbrcm_watchdog(unsigned long data) { - struct brcmf_sdio *bus = (struct brcmf_sdio *)data; + struct brcmf_bus *bus = (struct brcmf_bus *)data; if (bus->watchdog_tsk) { complete(&bus->watchdog_wait); @@ -3856,14 +4364,23 @@ brcmf_sdbrcm_watchdog(unsigned long data) } } -static void brcmf_sdbrcm_release_dongle(struct brcmf_sdio *bus) +static void +brcmf_sdbrcm_chip_detach(struct brcmf_bus *bus) +{ + brcmf_dbg(TRACE, "Enter\n"); + + kfree(bus->ci); + bus->ci = NULL; +} + +static void brcmf_sdbrcm_release_dongle(struct brcmf_bus *bus) { brcmf_dbg(TRACE, "Enter\n"); if (bus->ci) { brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false); brcmf_sdbrcm_clkctl(bus, CLK_NONE, false); - brcmf_sdio_chip_detach(&bus->ci); + brcmf_sdbrcm_chip_detach(bus); if (bus->vars && bus->varsz) kfree(bus->vars); bus->vars = NULL; @@ -3873,7 +4390,7 @@ static void brcmf_sdbrcm_release_dongle(struct brcmf_sdio *bus) } /* Detach and free everything */ -static void brcmf_sdbrcm_release(struct brcmf_sdio *bus) +static void brcmf_sdbrcm_release(struct brcmf_bus *bus) { brcmf_dbg(TRACE, "Enter\n"); @@ -3881,9 +4398,10 @@ static void brcmf_sdbrcm_release(struct brcmf_sdio *bus) /* De-register interrupt handler */ brcmf_sdcard_intr_dereg(bus->sdiodev); - if (bus->sdiodev->bus_if->drvr) { - brcmf_detach(bus->sdiodev->dev); + if (bus->drvr) { + brcmf_detach(bus->drvr); brcmf_sdbrcm_release_dongle(bus); + bus->drvr = NULL; } brcmf_sdbrcm_release_malloc(bus); @@ -3894,10 +4412,21 @@ static void brcmf_sdbrcm_release(struct brcmf_sdio *bus) brcmf_dbg(TRACE, "Disconnected\n"); } -void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev) +void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype, + u32 regsva, struct brcmf_sdio_dev *sdiodev) { int ret; - struct brcmf_sdio *bus; + struct brcmf_bus *bus; + + /* Init global variables at run-time, not as part of the declaration. + * This is required to support init/de-init of the driver. + * Initialization + * of globals as part of the declaration results in non-deterministic + * behavior since the value of the globals may be different on the + * first time that the driver is initialized vs subsequent + * initializations. + */ + brcmf_c_init(); brcmf_dbg(TRACE, "Enter\n"); @@ -3905,13 +4434,12 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev) * regsva == SI_ENUM_BASE*/ /* Allocate private bus interface state */ - bus = kzalloc(sizeof(struct brcmf_sdio), GFP_ATOMIC); + bus = kzalloc(sizeof(struct brcmf_bus), GFP_ATOMIC); if (!bus) goto fail; bus->sdiodev = sdiodev; sdiodev->bus = bus; - skb_queue_head_init(&bus->glom); bus->txbound = BRCMF_TXBOUND; bus->rxbound = BRCMF_RXBOUND; bus->txminmax = BRCMF_TXMINMAX; @@ -3956,15 +4484,9 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev) bus->dpc_tsk = NULL; } - /* Assign bus interface call back */ - bus->sdiodev->bus_if->brcmf_bus_stop = brcmf_sdbrcm_bus_stop; - bus->sdiodev->bus_if->brcmf_bus_init = brcmf_sdbrcm_bus_init; - bus->sdiodev->bus_if->brcmf_bus_txdata = brcmf_sdbrcm_bus_txdata; - bus->sdiodev->bus_if->brcmf_bus_txctl = brcmf_sdbrcm_bus_txctl; - bus->sdiodev->bus_if->brcmf_bus_rxctl = brcmf_sdbrcm_bus_rxctl; /* Attach to the brcmf/OS/network interface */ - ret = brcmf_attach(SDPCM_RESERVE, bus->sdiodev->dev); - if (ret != 0) { + bus->drvr = brcmf_attach(bus, SDPCM_RESERVE); + if (!bus->drvr) { brcmf_dbg(ERROR, "brcmf_attach failed\n"); goto fail; } @@ -3992,17 +4514,16 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev) brcmf_dbg(INFO, "completed!!\n"); /* if firmware path present try to download and bring up bus */ - ret = brcmf_bus_start(bus->sdiodev->dev); + ret = brcmf_bus_start(bus->drvr); if (ret != 0) { if (ret == -ENOLINK) { brcmf_dbg(ERROR, "dongle is not responding\n"); goto fail; } } - - /* add interface and open for business */ - if (brcmf_add_if(bus->sdiodev->dev, 0, "wlan%d", NULL)) { - brcmf_dbg(ERROR, "Add primary net device interface failed!!\n"); + /* Ok, have the per-port tell the stack we're open for business */ + if (brcmf_net_attach(bus->drvr, 0) != 0) { + brcmf_dbg(ERROR, "Net attach failed!!\n"); goto fail; } @@ -4015,7 +4536,7 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev) void brcmf_sdbrcm_disconnect(void *ptr) { - struct brcmf_sdio *bus = (struct brcmf_sdio *)ptr; + struct brcmf_bus *bus = (struct brcmf_bus *)ptr; brcmf_dbg(TRACE, "Enter\n"); @@ -4025,9 +4546,18 @@ void brcmf_sdbrcm_disconnect(void *ptr) brcmf_dbg(TRACE, "Disconnected\n"); } +struct device *brcmf_bus_get_device(struct brcmf_bus *bus) +{ + return &bus->sdiodev->func[2]->dev; +} + void -brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick) +brcmf_sdbrcm_wd_timer(struct brcmf_bus *bus, uint wdtick) { + /* don't start the wd until fw is loaded */ + if (bus->drvr->busstate == BRCMF_BUS_DOWN) + return; + /* Totally stop the timer */ if (!wdtick && bus->wd_timer_valid == true) { del_timer_sync(&bus->timer); @@ -4036,10 +4566,6 @@ brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick) return; } - /* don't start the wd until fw is loaded */ - if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) - return; - if (wdtick) { if (bus->save_ms != BRCMF_WD_POLL_MS) { if (bus->wd_timer_valid == true) diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/trunk/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c deleted file mode 100644 index 11b2d7c97ba2..000000000000 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c +++ /dev/null @@ -1,607 +0,0 @@ -/* - * Copyright (c) 2011 Broadcom Corporation - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ -/* ***** SDIO interface chip backplane handle functions ***** */ - -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include "dhd_dbg.h" -#include "sdio_host.h" -#include "sdio_chip.h" - -/* chip core base & ramsize */ -/* bcm4329 */ -/* SDIO device core, ID 0x829 */ -#define BCM4329_CORE_BUS_BASE 0x18011000 -/* internal memory core, ID 0x80e */ -#define BCM4329_CORE_SOCRAM_BASE 0x18003000 -/* ARM Cortex M3 core, ID 0x82a */ -#define BCM4329_CORE_ARM_BASE 0x18002000 -#define BCM4329_RAMSIZE 0x48000 - -#define SBCOREREV(sbidh) \ - ((((sbidh) & SSB_IDHIGH_RCHI) >> SSB_IDHIGH_RCHI_SHIFT) | \ - ((sbidh) & SSB_IDHIGH_RCLO)) - -/* SOC Interconnect types (aka chip types) */ -#define SOCI_SB 0 -#define SOCI_AI 1 - -/* EROM CompIdentB */ -#define CIB_REV_MASK 0xff000000 -#define CIB_REV_SHIFT 24 - -#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu)) -/* SDIO Pad drive strength to select value mappings */ -struct sdiod_drive_str { - u8 strength; /* Pad Drive Strength in mA */ - u8 sel; /* Chip-specific select value */ -}; -/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8V) */ -static const struct sdiod_drive_str sdiod_drvstr_tab1_1v8[] = { - {32, 0x6}, - {26, 0x7}, - {22, 0x4}, - {16, 0x5}, - {12, 0x2}, - {8, 0x3}, - {4, 0x0}, - {0, 0x1} -}; - -u8 -brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid) -{ - u8 idx; - - for (idx = 0; idx < BRCMF_MAX_CORENUM; idx++) - if (coreid == ci->c_inf[idx].id) - return idx; - - return BRCMF_MAX_CORENUM; -} - -static u32 -brcmf_sdio_sb_corerev(struct brcmf_sdio_dev *sdiodev, - struct chip_info *ci, u16 coreid) -{ - u32 regdata; - u8 idx; - - idx = brcmf_sdio_chip_getinfidx(ci, coreid); - - regdata = brcmf_sdcard_reg_read(sdiodev, - CORE_SB(ci->c_inf[idx].base, sbidhigh), 4); - return SBCOREREV(regdata); -} - -static u32 -brcmf_sdio_ai_corerev(struct brcmf_sdio_dev *sdiodev, - struct chip_info *ci, u16 coreid) -{ - u8 idx; - - idx = brcmf_sdio_chip_getinfidx(ci, coreid); - - return (ci->c_inf[idx].cib & CIB_REV_MASK) >> CIB_REV_SHIFT; -} - -static bool -brcmf_sdio_sb_iscoreup(struct brcmf_sdio_dev *sdiodev, - struct chip_info *ci, u16 coreid) -{ - u32 regdata; - u8 idx; - - idx = brcmf_sdio_chip_getinfidx(ci, coreid); - - regdata = brcmf_sdcard_reg_read(sdiodev, - CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4); - regdata &= (SSB_TMSLOW_RESET | SSB_TMSLOW_REJECT | - SSB_IMSTATE_REJECT | SSB_TMSLOW_CLOCK); - return (SSB_TMSLOW_CLOCK == regdata); -} - -static bool -brcmf_sdio_ai_iscoreup(struct brcmf_sdio_dev *sdiodev, - struct chip_info *ci, u16 coreid) -{ - u32 regdata; - u8 idx; - bool ret; - - idx = brcmf_sdio_chip_getinfidx(ci, coreid); - - regdata = brcmf_sdcard_reg_read(sdiodev, - ci->c_inf[idx].wrapbase+BCMA_IOCTL, 4); - ret = (regdata & (BCMA_IOCTL_FGC | BCMA_IOCTL_CLK)) == BCMA_IOCTL_CLK; - - regdata = brcmf_sdcard_reg_read(sdiodev, - ci->c_inf[idx].wrapbase+BCMA_RESET_CTL, - 4); - ret = ret && ((regdata & BCMA_RESET_CTL_RESET) == 0); - - return ret; -} - -static void -brcmf_sdio_sb_coredisable(struct brcmf_sdio_dev *sdiodev, - struct chip_info *ci, u16 coreid) -{ - u32 regdata; - u8 idx; - - idx = brcmf_sdio_chip_getinfidx(ci, coreid); - - regdata = brcmf_sdcard_reg_read(sdiodev, - CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4); - if (regdata & SSB_TMSLOW_RESET) - return; - - regdata = brcmf_sdcard_reg_read(sdiodev, - CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4); - if ((regdata & SSB_TMSLOW_CLOCK) != 0) { - /* - * set target reject and spin until busy is clear - * (preserve core-specific bits) - */ - regdata = brcmf_sdcard_reg_read(sdiodev, - CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4); - brcmf_sdcard_reg_write(sdiodev, - CORE_SB(ci->c_inf[idx].base, sbtmstatelow), - 4, regdata | SSB_TMSLOW_REJECT); - - regdata = brcmf_sdcard_reg_read(sdiodev, - CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4); - udelay(1); - SPINWAIT((brcmf_sdcard_reg_read(sdiodev, - CORE_SB(ci->c_inf[idx].base, sbtmstatehigh), 4) & - SSB_TMSHIGH_BUSY), 100000); - - regdata = brcmf_sdcard_reg_read(sdiodev, - CORE_SB(ci->c_inf[idx].base, sbtmstatehigh), 4); - if (regdata & SSB_TMSHIGH_BUSY) - brcmf_dbg(ERROR, "core state still busy\n"); - - regdata = brcmf_sdcard_reg_read(sdiodev, - CORE_SB(ci->c_inf[idx].base, sbidlow), 4); - if (regdata & SSB_IDLOW_INITIATOR) { - regdata = brcmf_sdcard_reg_read(sdiodev, - CORE_SB(ci->c_inf[idx].base, sbimstate), 4) | - SSB_IMSTATE_REJECT; - brcmf_sdcard_reg_write(sdiodev, - CORE_SB(ci->c_inf[idx].base, sbimstate), 4, - regdata); - regdata = brcmf_sdcard_reg_read(sdiodev, - CORE_SB(ci->c_inf[idx].base, sbimstate), 4); - udelay(1); - SPINWAIT((brcmf_sdcard_reg_read(sdiodev, - CORE_SB(ci->c_inf[idx].base, sbimstate), 4) & - SSB_IMSTATE_BUSY), 100000); - } - - /* set reset and reject while enabling the clocks */ - brcmf_sdcard_reg_write(sdiodev, - CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4, - (SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | - SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET)); - regdata = brcmf_sdcard_reg_read(sdiodev, - CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4); - udelay(10); - - /* clear the initiator reject bit */ - regdata = brcmf_sdcard_reg_read(sdiodev, - CORE_SB(ci->c_inf[idx].base, sbidlow), 4); - if (regdata & SSB_IDLOW_INITIATOR) { - regdata = brcmf_sdcard_reg_read(sdiodev, - CORE_SB(ci->c_inf[idx].base, sbimstate), 4) & - ~SSB_IMSTATE_REJECT; - brcmf_sdcard_reg_write(sdiodev, - CORE_SB(ci->c_inf[idx].base, sbimstate), 4, - regdata); - } - } - - /* leave reset and reject asserted */ - brcmf_sdcard_reg_write(sdiodev, - CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4, - (SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET)); - udelay(1); -} - -static void -brcmf_sdio_ai_coredisable(struct brcmf_sdio_dev *sdiodev, - struct chip_info *ci, u16 coreid) -{ - u8 idx; - u32 regdata; - - idx = brcmf_sdio_chip_getinfidx(ci, coreid); - - /* if core is already in reset, just return */ - regdata = brcmf_sdcard_reg_read(sdiodev, - ci->c_inf[idx].wrapbase+BCMA_RESET_CTL, - 4); - if ((regdata & BCMA_RESET_CTL_RESET) != 0) - return; - - brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL, - 4, 0); - regdata = brcmf_sdcard_reg_read(sdiodev, - ci->c_inf[idx].wrapbase+BCMA_IOCTL, 4); - udelay(10); - - brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_RESET_CTL, - 4, BCMA_RESET_CTL_RESET); - udelay(1); -} - -static void -brcmf_sdio_sb_resetcore(struct brcmf_sdio_dev *sdiodev, - struct chip_info *ci, u16 coreid) -{ - u32 regdata; - u8 idx; - - idx = brcmf_sdio_chip_getinfidx(ci, coreid); - - /* - * Must do the disable sequence first to work for - * arbitrary current core state. - */ - brcmf_sdio_sb_coredisable(sdiodev, ci, coreid); - - /* - * Now do the initialization sequence. - * set reset while enabling the clock and - * forcing them on throughout the core - */ - brcmf_sdcard_reg_write(sdiodev, - CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4, - SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | SSB_TMSLOW_RESET); - regdata = brcmf_sdcard_reg_read(sdiodev, - CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4); - udelay(1); - - /* clear any serror */ - regdata = brcmf_sdcard_reg_read(sdiodev, - CORE_SB(ci->c_inf[idx].base, sbtmstatehigh), 4); - if (regdata & SSB_TMSHIGH_SERR) - brcmf_sdcard_reg_write(sdiodev, - CORE_SB(ci->c_inf[idx].base, sbtmstatehigh), 4, 0); - - regdata = brcmf_sdcard_reg_read(sdiodev, - CORE_SB(ci->c_inf[idx].base, sbimstate), 4); - if (regdata & (SSB_IMSTATE_IBE | SSB_IMSTATE_TO)) - brcmf_sdcard_reg_write(sdiodev, - CORE_SB(ci->c_inf[idx].base, sbimstate), 4, - regdata & ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO)); - - /* clear reset and allow it to propagate throughout the core */ - brcmf_sdcard_reg_write(sdiodev, - CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4, - SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK); - regdata = brcmf_sdcard_reg_read(sdiodev, - CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4); - udelay(1); - - /* leave clock enabled */ - brcmf_sdcard_reg_write(sdiodev, - CORE_SB(ci->c_inf[idx].base, sbtmstatelow), - 4, SSB_TMSLOW_CLOCK); - regdata = brcmf_sdcard_reg_read(sdiodev, - CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4); - udelay(1); -} - -static void -brcmf_sdio_ai_resetcore(struct brcmf_sdio_dev *sdiodev, - struct chip_info *ci, u16 coreid) -{ - u8 idx; - u32 regdata; - - idx = brcmf_sdio_chip_getinfidx(ci, coreid); - - /* must disable first to work for arbitrary current core state */ - brcmf_sdio_ai_coredisable(sdiodev, ci, coreid); - - /* now do initialization sequence */ - brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL, - 4, BCMA_IOCTL_FGC | BCMA_IOCTL_CLK); - regdata = brcmf_sdcard_reg_read(sdiodev, - ci->c_inf[idx].wrapbase+BCMA_IOCTL, 4); - brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_RESET_CTL, - 4, 0); - udelay(1); - - brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL, - 4, BCMA_IOCTL_CLK); - regdata = brcmf_sdcard_reg_read(sdiodev, - ci->c_inf[idx].wrapbase+BCMA_IOCTL, 4); - udelay(1); -} - -static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev, - struct chip_info *ci, u32 regs) -{ - u32 regdata; - - /* - * Get CC core rev - * Chipid is assume to be at offset 0 from regs arg - * For different chiptypes or old sdio hosts w/o chipcommon, - * other ways of recognition should be added here. - */ - ci->c_inf[0].id = BCMA_CORE_CHIPCOMMON; - ci->c_inf[0].base = regs; - regdata = brcmf_sdcard_reg_read(sdiodev, - CORE_CC_REG(ci->c_inf[0].base, chipid), 4); - ci->chip = regdata & CID_ID_MASK; - ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT; - ci->socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT; - - brcmf_dbg(INFO, "chipid=0x%x chiprev=%d\n", ci->chip, ci->chiprev); - - /* Address of cores for new chips should be added here */ - switch (ci->chip) { - case BCM4329_CHIP_ID: - ci->c_inf[1].id = BCMA_CORE_SDIO_DEV; - ci->c_inf[1].base = BCM4329_CORE_BUS_BASE; - ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM; - ci->c_inf[2].base = BCM4329_CORE_SOCRAM_BASE; - ci->c_inf[3].id = BCMA_CORE_ARM_CM3; - ci->c_inf[3].base = BCM4329_CORE_ARM_BASE; - ci->ramsize = BCM4329_RAMSIZE; - break; - case BCM4330_CHIP_ID: - ci->c_inf[0].wrapbase = 0x18100000; - ci->c_inf[0].cib = 0x27004211; - ci->c_inf[1].id = BCMA_CORE_SDIO_DEV; - ci->c_inf[1].base = 0x18002000; - ci->c_inf[1].wrapbase = 0x18102000; - ci->c_inf[1].cib = 0x07004211; - ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM; - ci->c_inf[2].base = 0x18004000; - ci->c_inf[2].wrapbase = 0x18104000; - ci->c_inf[2].cib = 0x0d080401; - ci->c_inf[3].id = BCMA_CORE_ARM_CM3; - ci->c_inf[3].base = 0x18003000; - ci->c_inf[3].wrapbase = 0x18103000; - ci->c_inf[3].cib = 0x03004211; - ci->ramsize = 0x48000; - break; - default: - brcmf_dbg(ERROR, "chipid 0x%x is not supported\n", ci->chip); - return -ENODEV; - } - - switch (ci->socitype) { - case SOCI_SB: - ci->iscoreup = brcmf_sdio_sb_iscoreup; - ci->corerev = brcmf_sdio_sb_corerev; - ci->coredisable = brcmf_sdio_sb_coredisable; - ci->resetcore = brcmf_sdio_sb_resetcore; - break; - case SOCI_AI: - ci->iscoreup = brcmf_sdio_ai_iscoreup; - ci->corerev = brcmf_sdio_ai_corerev; - ci->coredisable = brcmf_sdio_ai_coredisable; - ci->resetcore = brcmf_sdio_ai_resetcore; - break; - default: - brcmf_dbg(ERROR, "socitype %u not supported\n", ci->socitype); - return -ENODEV; - } - - return 0; -} - -static int -brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev) -{ - int err = 0; - u8 clkval, clkset; - - /* Try forcing SDIO core to do ALPAvail request only */ - clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ; - brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1, - SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err); - if (err) { - brcmf_dbg(ERROR, "error writing for HT off\n"); - return err; - } - - /* If register supported, wait for ALPAvail and then force ALP */ - /* This may take up to 15 milliseconds */ - clkval = brcmf_sdcard_cfg_read(sdiodev, SDIO_FUNC_1, - SBSDIO_FUNC1_CHIPCLKCSR, NULL); - - if ((clkval & ~SBSDIO_AVBITS) != clkset) { - brcmf_dbg(ERROR, "ChipClkCSR access: wrote 0x%02x read 0x%02x\n", - clkset, clkval); - return -EACCES; - } - - SPINWAIT(((clkval = brcmf_sdcard_cfg_read(sdiodev, SDIO_FUNC_1, - SBSDIO_FUNC1_CHIPCLKCSR, NULL)), - !SBSDIO_ALPAV(clkval)), - PMU_MAX_TRANSITION_DLY); - if (!SBSDIO_ALPAV(clkval)) { - brcmf_dbg(ERROR, "timeout on ALPAV wait, clkval 0x%02x\n", - clkval); - return -EBUSY; - } - - clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP; - brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1, - SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err); - udelay(65); - - /* Also, disable the extra SDIO pull-ups */ - brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1, - SBSDIO_FUNC1_SDIOPULLUP, 0, NULL); - - return 0; -} - -static void -brcmf_sdio_chip_buscoresetup(struct brcmf_sdio_dev *sdiodev, - struct chip_info *ci) -{ - /* get chipcommon rev */ - ci->c_inf[0].rev = ci->corerev(sdiodev, ci, ci->c_inf[0].id); - - /* get chipcommon capabilites */ - ci->c_inf[0].caps = - brcmf_sdcard_reg_read(sdiodev, - CORE_CC_REG(ci->c_inf[0].base, capabilities), 4); - - /* get pmu caps & rev */ - if (ci->c_inf[0].caps & CC_CAP_PMU) { - ci->pmucaps = brcmf_sdcard_reg_read(sdiodev, - CORE_CC_REG(ci->c_inf[0].base, pmucapabilities), 4); - ci->pmurev = ci->pmucaps & PCAP_REV_MASK; - } - - ci->c_inf[1].rev = ci->corerev(sdiodev, ci, ci->c_inf[1].id); - - brcmf_dbg(INFO, "ccrev=%d, pmurev=%d, buscore rev/type=%d/0x%x\n", - ci->c_inf[0].rev, ci->pmurev, - ci->c_inf[1].rev, ci->c_inf[1].id); - - /* - * Make sure any on-chip ARM is off (in case strapping is wrong), - * or downloaded code was already running. - */ - ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3); -} - -int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev, - struct chip_info **ci_ptr, u32 regs) -{ - int ret; - struct chip_info *ci; - - brcmf_dbg(TRACE, "Enter\n"); - - /* alloc chip_info_t */ - ci = kzalloc(sizeof(struct chip_info), GFP_ATOMIC); - if (!ci) - return -ENOMEM; - - ret = brcmf_sdio_chip_buscoreprep(sdiodev); - if (ret != 0) - goto err; - - ret = brcmf_sdio_chip_recognition(sdiodev, ci, regs); - if (ret != 0) - goto err; - - brcmf_sdio_chip_buscoresetup(sdiodev, ci); - - brcmf_sdcard_reg_write(sdiodev, - CORE_CC_REG(ci->c_inf[0].base, gpiopullup), 4, 0); - brcmf_sdcard_reg_write(sdiodev, - CORE_CC_REG(ci->c_inf[0].base, gpiopulldown), 4, 0); - - *ci_ptr = ci; - return 0; - -err: - kfree(ci); - return ret; -} - -void -brcmf_sdio_chip_detach(struct chip_info **ci_ptr) -{ - brcmf_dbg(TRACE, "Enter\n"); - - kfree(*ci_ptr); - *ci_ptr = NULL; -} - -static char *brcmf_sdio_chip_name(uint chipid, char *buf, uint len) -{ - const char *fmt; - - fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x"; - snprintf(buf, len, fmt, chipid); - return buf; -} - -void -brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev, - struct chip_info *ci, u32 drivestrength) -{ - struct sdiod_drive_str *str_tab = NULL; - u32 str_mask = 0; - u32 str_shift = 0; - char chn[8]; - - if (!(ci->c_inf[0].caps & CC_CAP_PMU)) - return; - - switch (SDIOD_DRVSTR_KEY(ci->chip, ci->pmurev)) { - case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12): - str_tab = (struct sdiod_drive_str *)&sdiod_drvstr_tab1_1v8; - str_mask = 0x00003800; - str_shift = 11; - break; - default: - brcmf_dbg(ERROR, "No SDIO Drive strength init done for chip %s rev %d pmurev %d\n", - brcmf_sdio_chip_name(ci->chip, chn, 8), - ci->chiprev, ci->pmurev); - break; - } - - if (str_tab != NULL) { - u32 drivestrength_sel = 0; - u32 cc_data_temp; - int i; - - for (i = 0; str_tab[i].strength != 0; i++) { - if (drivestrength >= str_tab[i].strength) { - drivestrength_sel = str_tab[i].sel; - break; - } - } - - brcmf_sdcard_reg_write(sdiodev, - CORE_CC_REG(ci->c_inf[0].base, chipcontrol_addr), - 4, 1); - cc_data_temp = brcmf_sdcard_reg_read(sdiodev, - CORE_CC_REG(ci->c_inf[0].base, chipcontrol_addr), 4); - cc_data_temp &= ~str_mask; - drivestrength_sel <<= str_shift; - cc_data_temp |= drivestrength_sel; - brcmf_sdcard_reg_write(sdiodev, - CORE_CC_REG(ci->c_inf[0].base, chipcontrol_addr), - 4, cc_data_temp); - - brcmf_dbg(INFO, "SDIO: %dmA drive strength selected, set to 0x%08x\n", - drivestrength, cc_data_temp); - } -} diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h b/trunk/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h deleted file mode 100644 index ce974d76bd92..000000000000 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Copyright (c) 2011 Broadcom Corporation - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#ifndef _BRCMFMAC_SDIO_CHIP_H_ -#define _BRCMFMAC_SDIO_CHIP_H_ - -/* - * Core reg address translation. - * Both macro's returns a 32 bits byte address on the backplane bus. - */ -#define CORE_CC_REG(base, field) \ - (base + offsetof(struct chipcregs, field)) -#define CORE_BUS_REG(base, field) \ - (base + offsetof(struct sdpcmd_regs, field)) -#define CORE_SB(base, field) \ - (base + SBCONFIGOFF + offsetof(struct sbconfig, field)) - -/* SDIO function 1 register CHIPCLKCSR */ -/* Force ALP request to backplane */ -#define SBSDIO_FORCE_ALP 0x01 -/* Force HT request to backplane */ -#define SBSDIO_FORCE_HT 0x02 -/* Force ILP request to backplane */ -#define SBSDIO_FORCE_ILP 0x04 -/* Make ALP ready (power up xtal) */ -#define SBSDIO_ALP_AVAIL_REQ 0x08 -/* Make HT ready (power up PLL) */ -#define SBSDIO_HT_AVAIL_REQ 0x10 -/* Squelch clock requests from HW */ -#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20 -/* Status: ALP is ready */ -#define SBSDIO_ALP_AVAIL 0x40 -/* Status: HT is ready */ -#define SBSDIO_HT_AVAIL 0x80 -#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL) -#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS) -#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS) -#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval)) -#define SBSDIO_CLKAV(regval, alponly) \ - (SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval))) - -#define BRCMF_MAX_CORENUM 6 - -struct chip_core_info { - u16 id; - u16 rev; - u32 base; - u32 wrapbase; - u32 caps; - u32 cib; -}; - -struct chip_info { - u32 chip; - u32 chiprev; - u32 socitype; - /* core info */ - /* always put chipcommon core at 0, bus core at 1 */ - struct chip_core_info c_inf[BRCMF_MAX_CORENUM]; - u32 pmurev; - u32 pmucaps; - u32 ramsize; - - bool (*iscoreup)(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci, - u16 coreid); - u32 (*corerev)(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci, - u16 coreid); - void (*coredisable)(struct brcmf_sdio_dev *sdiodev, - struct chip_info *ci, u16 coreid); - void (*resetcore)(struct brcmf_sdio_dev *sdiodev, - struct chip_info *ci, u16 coreid); -}; - -struct sbconfig { - u32 PAD[2]; - u32 sbipsflag; /* initiator port ocp slave flag */ - u32 PAD[3]; - u32 sbtpsflag; /* target port ocp slave flag */ - u32 PAD[11]; - u32 sbtmerrloga; /* (sonics >= 2.3) */ - u32 PAD; - u32 sbtmerrlog; /* (sonics >= 2.3) */ - u32 PAD[3]; - u32 sbadmatch3; /* address match3 */ - u32 PAD; - u32 sbadmatch2; /* address match2 */ - u32 PAD; - u32 sbadmatch1; /* address match1 */ - u32 PAD[7]; - u32 sbimstate; /* initiator agent state */ - u32 sbintvec; /* interrupt mask */ - u32 sbtmstatelow; /* target state */ - u32 sbtmstatehigh; /* target state */ - u32 sbbwa0; /* bandwidth allocation table0 */ - u32 PAD; - u32 sbimconfiglow; /* initiator configuration */ - u32 sbimconfighigh; /* initiator configuration */ - u32 sbadmatch0; /* address match0 */ - u32 PAD; - u32 sbtmconfiglow; /* target configuration */ - u32 sbtmconfighigh; /* target configuration */ - u32 sbbconfig; /* broadcast configuration */ - u32 PAD; - u32 sbbstate; /* broadcast state */ - u32 PAD[3]; - u32 sbactcnfg; /* activate configuration */ - u32 PAD[3]; - u32 sbflagst; /* current sbflags */ - u32 PAD[3]; - u32 sbidlow; /* identification */ - u32 sbidhigh; /* identification */ -}; - -extern int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev, - struct chip_info **ci_ptr, u32 regs); -extern void brcmf_sdio_chip_detach(struct chip_info **ci_ptr); -extern void brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev, - struct chip_info *ci, - u32 drivestrength); -extern u8 brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid); - - -#endif /* _BRCMFMAC_SDIO_CHIP_H_ */ diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/trunk/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h index 0281d207d998..726fa8981113 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h +++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h @@ -116,20 +116,12 @@ #define SUCCESS 0 #define ERROR 1 -/* Packet alignment for most efficient SDIO (can change based on platform) */ -#define BRCMF_SDALIGN (1 << 6) - -/* watchdog polling interval in ms */ -#define BRCMF_WD_POLL_MS 10 - struct brcmf_sdreg { int func; int offset; int value; }; -struct brcmf_sdio; - struct brcmf_sdio_dev { struct sdio_func *func[SDIO_MAX_FUNCS]; u8 num_funcs; /* Supported funcs on client */ @@ -140,10 +132,9 @@ struct brcmf_sdio_dev { atomic_t suspend; /* suspend flag */ wait_queue_head_t request_byte_wait; wait_queue_head_t request_word_wait; - wait_queue_head_t request_chain_wait; + wait_queue_head_t request_packet_wait; wait_queue_head_t request_buffer_wait; - struct device *dev; - struct brcmf_bus *bus_if; + }; /* Register/deregister device interrupt handler. */ @@ -191,21 +182,11 @@ extern bool brcmf_sdcard_regfail(struct brcmf_sdio_dev *sdiodev); * NOTE: Async operation is not currently supported. */ extern int -brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, - uint flags, struct sk_buff *pkt); -extern int brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, - uint flags, u8 *buf, uint nbytes); - -extern int -brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, - uint flags, struct sk_buff *pkt); + uint flags, u8 *buf, uint nbytes, struct sk_buff *pkt); extern int brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, - uint flags, u8 *buf, uint nbytes); -extern int -brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, - uint flags, struct sk_buff_head *pktq); + uint flags, u8 *buf, uint nbytes, struct sk_buff *pkt); /* Flags bits */ @@ -256,20 +237,16 @@ brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev, /* read or write any buffer using cmd53 */ extern int brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev, - uint fix_inc, uint rw, uint fnc_num, u32 addr, - struct sk_buff *pkt); -extern int -brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc, - uint write, uint func, uint addr, - struct sk_buff_head *pktq); + uint fix_inc, uint rw, uint fnc_num, + u32 addr, uint regwidth, + u32 buflen, u8 *buffer, struct sk_buff *pkt); /* Watchdog timer interface for pm ops */ extern void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev, bool enable); -extern void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev); +extern void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype, + u32 regsva, struct brcmf_sdio_dev *sdiodev); extern void brcmf_sdbrcm_disconnect(void *ptr); extern void brcmf_sdbrcm_isr(void *arg); - -extern void brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick); #endif /* _BRCM_SDH_H_ */ diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/trunk/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index f23b0c3e4ea3..5eddabe5228a 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c @@ -1429,7 +1429,7 @@ brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev, static s32 brcmf_cfg80211_set_tx_power(struct wiphy *wiphy, - enum nl80211_tx_power_setting type, s32 mbm) + enum nl80211_tx_power_setting type, s32 dbm) { struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); @@ -1437,7 +1437,6 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy, u16 txpwrmw; s32 err = 0; s32 disable = 0; - s32 dbm = MBM_TO_DBM(mbm); WL_TRACE("Enter\n"); if (!check_sys_up(wiphy)) @@ -1447,6 +1446,12 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy, case NL80211_TX_POWER_AUTOMATIC: break; case NL80211_TX_POWER_LIMITED: + if (dbm < 0) { + WL_ERR("TX_POWER_LIMITED - dbm is negative\n"); + err = -EINVAL; + goto done; + } + break; case NL80211_TX_POWER_FIXED: if (dbm < 0) { WL_ERR("TX_POWER_FIXED - dbm is negative\n"); @@ -1992,7 +1997,7 @@ brcmf_cfg80211_set_bitrate_mask(struct wiphy *wiphy, struct net_device *ndev, } static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_priv *cfg_priv, - struct brcmf_bss_info_le *bi) + struct brcmf_bss_info *bi) { struct wiphy *wiphy = cfg_to_wiphy(cfg_priv); struct ieee80211_channel *notify_channel; @@ -2044,27 +2049,18 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_priv *cfg_priv, notify_timestamp, notify_capability, notify_interval, notify_ie, notify_ielen, notify_signal, GFP_KERNEL); - if (!bss) - return -ENOMEM; - - cfg80211_put_bss(bss); + if (!bss) { + WL_ERR("cfg80211_inform_bss_frame error\n"); + return -EINVAL; + } return err; } -static struct brcmf_bss_info_le * -next_bss_le(struct brcmf_scan_results *list, struct brcmf_bss_info_le *bss) -{ - if (bss == NULL) - return list->bss_info_le; - return (struct brcmf_bss_info_le *)((unsigned long)bss + - le32_to_cpu(bss->length)); -} - static s32 brcmf_inform_bss(struct brcmf_cfg80211_priv *cfg_priv) { struct brcmf_scan_results *bss_list; - struct brcmf_bss_info_le *bi = NULL; /* must be initialized */ + struct brcmf_bss_info *bi = NULL; /* must be initialized */ s32 err = 0; int i; @@ -2076,7 +2072,7 @@ static s32 brcmf_inform_bss(struct brcmf_cfg80211_priv *cfg_priv) } WL_SCAN("scanned AP count (%d)\n", bss_list->count); for (i = 0; i < bss_list->count && i < WL_AP_MAX; i++) { - bi = next_bss_le(bss_list, bi); + bi = next_bss(bss_list, bi); err = brcmf_inform_single_bss(cfg_priv, bi); if (err) break; @@ -2089,9 +2085,8 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_priv *cfg_priv, { struct wiphy *wiphy = cfg_to_wiphy(cfg_priv); struct ieee80211_channel *notify_channel; - struct brcmf_bss_info_le *bi = NULL; + struct brcmf_bss_info *bi = NULL; struct ieee80211_supported_band *band; - struct cfg80211_bss *bss; u8 *buf = NULL; s32 err = 0; u16 channel; @@ -2119,7 +2114,7 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_priv *cfg_priv, goto CleanUp; } - bi = (struct brcmf_bss_info_le *)(buf + 4); + bi = (struct brcmf_bss_info *)(buf + 4); channel = bi->ctl_ch ? bi->ctl_ch : CHSPEC_CHANNEL(le16_to_cpu(bi->chanspec)); @@ -2145,17 +2140,10 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_priv *cfg_priv, WL_CONN("signal: %d\n", notify_signal); WL_CONN("notify_timestamp: %#018llx\n", notify_timestamp); - bss = cfg80211_inform_bss(wiphy, notify_channel, bssid, + cfg80211_inform_bss(wiphy, notify_channel, bssid, notify_timestamp, notify_capability, notify_interval, notify_ie, notify_ielen, notify_signal, GFP_KERNEL); - if (!bss) { - err = -ENOMEM; - goto CleanUp; - } - - cfg80211_put_bss(bss); - CleanUp: kfree(buf); @@ -2200,7 +2188,7 @@ static struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key) static s32 brcmf_update_bss_info(struct brcmf_cfg80211_priv *cfg_priv) { - struct brcmf_bss_info_le *bi; + struct brcmf_bss_info *bi; struct brcmf_ssid *ssid; struct brcmf_tlv *tim; u16 beacon_interval; @@ -2223,7 +2211,7 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_priv *cfg_priv) goto update_bss_info_out; } - bi = (struct brcmf_bss_info_le *)(cfg_priv->extra_buf + 4); + bi = (struct brcmf_bss_info *)(cfg_priv->extra_buf + 4); err = brcmf_inform_single_bss(cfg_priv, bi); if (err) goto update_bss_info_out; diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h b/trunk/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h index a613b49cb13f..62dc46144ede 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h +++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h @@ -352,6 +352,15 @@ brcmf_cfg80211_connect_info *cfg_to_conn(struct brcmf_cfg80211_priv *cfg) return &cfg->conn_info; } +static inline struct brcmf_bss_info *next_bss(struct brcmf_scan_results *list, + struct brcmf_bss_info *bss) +{ + return bss = bss ? + (struct brcmf_bss_info *)((unsigned long)bss + + le32_to_cpu(bss->length)) : + list->bss_info; +} + extern struct brcmf_cfg80211_dev *brcmf_cfg80211_attach(struct net_device *ndev, struct device *busdev, void *data); diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c b/trunk/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c index ab9bb11abfbb..025fa0eb6f47 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c @@ -16,8 +16,6 @@ * File contents: support functions for PCI/PCIe */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include #include @@ -318,24 +316,51 @@ #define BADIDX (SI_MAXCORES + 1) +/* Newer chips can access PCI/PCIE and CC core without requiring to change + * PCI BAR0 WIN + */ +#define SI_FAST(si) (((si)->pub.buscoretype == PCIE_CORE_ID) || \ + (((si)->pub.buscoretype == PCI_CORE_ID) && \ + (si)->pub.buscorerev >= 13)) + +#define CCREGS_FAST(si) (((char __iomem *)((si)->curmap) + \ + PCI_16KB0_CCREGS_OFFSET)) + #define IS_SIM(chippkg) \ ((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID)) -#define PCI(sih) (ai_get_buscoretype(sih) == PCI_CORE_ID) -#define PCIE(sih) (ai_get_buscoretype(sih) == PCIE_CORE_ID) +/* + * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts + * before after core switching to avoid invalid register accesss inside ISR. + */ +#define INTR_OFF(si, intr_val) \ + if ((si)->intrsoff_fn && \ + (si)->coreid[(si)->curidx] == (si)->dev_coreid) \ + intr_val = (*(si)->intrsoff_fn)((si)->intr_arg) -#define PCI_FORCEHT(sih) (PCIE(sih) && (ai_get_chip_id(sih) == BCM4716_CHIP_ID)) +#define INTR_RESTORE(si, intr_val) \ + if ((si)->intrsrestore_fn && \ + (si)->coreid[(si)->curidx] == (si)->dev_coreid) \ + (*(si)->intrsrestore_fn)((si)->intr_arg, intr_val) + +#define PCI(si) ((si)->pub.buscoretype == PCI_CORE_ID) +#define PCIE(si) ((si)->pub.buscoretype == PCIE_CORE_ID) + +#define PCI_FORCEHT(si) (PCIE(si) && (si->pub.chip == BCM4716_CHIP_ID)) #ifdef BCMDBG -#define SI_MSG(fmt, ...) pr_debug(fmt, ##__VA_ARGS__) +#define SI_MSG(args) printk args #else -#define SI_MSG(fmt, ...) no_printk(fmt, ##__VA_ARGS__) +#define SI_MSG(args) #endif /* BCMDBG */ #define GOODCOREADDR(x, b) \ (((x) >= (b)) && ((x) < ((b) + SI_MAXCORES * SI_CORE_SIZE)) && \ IS_ALIGNED((x), SI_CORE_SIZE)) +#define PCIEREGS(si) ((__iomem char *)((si)->curmap) + \ + PCI_16KB0_PCIREGS_OFFSET) + struct aidmp { u32 oobselina30; /* 0x000 */ u32 oobselina74; /* 0x004 */ @@ -454,13 +479,406 @@ struct aidmp { u32 componentid3; /* 0xffc */ }; +/* EROM parsing */ + +static u32 +get_erom_ent(struct si_pub *sih, u32 __iomem **eromptr, u32 mask, u32 match) +{ + u32 ent; + uint inv = 0, nom = 0; + + while (true) { + ent = R_REG(*eromptr); + (*eromptr)++; + + if (mask == 0) + break; + + if ((ent & ER_VALID) == 0) { + inv++; + continue; + } + + if (ent == (ER_END | ER_VALID)) + break; + + if ((ent & mask) == match) + break; + + nom++; + } + + return ent; +} + +static u32 +get_asd(struct si_pub *sih, u32 __iomem **eromptr, uint sp, uint ad, uint st, + u32 *addrl, u32 *addrh, u32 *sizel, u32 *sizeh) +{ + u32 asd, sz, szd; + + asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID); + if (((asd & ER_TAG1) != ER_ADD) || + (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) || + ((asd & AD_ST_MASK) != st)) { + /* This is not what we want, "push" it back */ + (*eromptr)--; + return 0; + } + *addrl = asd & AD_ADDR_MASK; + if (asd & AD_AG32) + *addrh = get_erom_ent(sih, eromptr, 0, 0); + else + *addrh = 0; + *sizeh = 0; + sz = asd & AD_SZ_MASK; + if (sz == AD_SZ_SZD) { + szd = get_erom_ent(sih, eromptr, 0, 0); + *sizel = szd & SD_SZ_MASK; + if (szd & SD_SG32) + *sizeh = get_erom_ent(sih, eromptr, 0, 0); + } else + *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT); + + return asd; +} + +static void ai_hwfixup(struct si_info *sii) +{ +} + +/* parse the enumeration rom to identify all cores */ +static void ai_scan(struct si_pub *sih, struct chipcregs __iomem *cc) +{ + struct si_info *sii = (struct si_info *)sih; + + u32 erombase; + u32 __iomem *eromptr, *eromlim; + void __iomem *regs = cc; + + erombase = R_REG(&cc->eromptr); + + /* Set wrappers address */ + sii->curwrap = (void *)((unsigned long)cc + SI_CORE_SIZE); + + /* Now point the window at the erom */ + pci_write_config_dword(sii->pbus, PCI_BAR0_WIN, erombase); + eromptr = regs; + eromlim = eromptr + (ER_REMAPCONTROL / sizeof(u32)); + + while (eromptr < eromlim) { + u32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp; + u32 mpd, asd, addrl, addrh, sizel, sizeh; + u32 __iomem *base; + uint i, j, idx; + bool br; + + br = false; + + /* Grok a component */ + cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI); + if (cia == (ER_END | ER_VALID)) { + /* Found END of erom */ + ai_hwfixup(sii); + return; + } + base = eromptr - 1; + cib = get_erom_ent(sih, &eromptr, 0, 0); + + if ((cib & ER_TAG) != ER_CI) { + /* CIA not followed by CIB */ + goto error; + } + + cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT; + mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT; + crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT; + nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT; + nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT; + nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT; + nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT; + + if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0)) + continue; + if ((nmw + nsw == 0)) { + /* A component which is not a core */ + if (cid == OOB_ROUTER_CORE_ID) { + asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, + &addrl, &addrh, &sizel, &sizeh); + if (asd != 0) + sii->oob_router = addrl; + } + continue; + } + + idx = sii->numcores; +/* sii->eromptr[idx] = base; */ + sii->cia[idx] = cia; + sii->cib[idx] = cib; + sii->coreid[idx] = cid; + + for (i = 0; i < nmp; i++) { + mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID); + if ((mpd & ER_TAG) != ER_MP) { + /* Not enough MP entries for component */ + goto error; + } + } + + /* First Slave Address Descriptor should be port 0: + * the main register space for the core + */ + asd = + get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, + &sizel, &sizeh); + if (asd == 0) { + /* Try again to see if it is a bridge */ + asd = + get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, + &addrh, &sizel, &sizeh); + if (asd != 0) + br = true; + else if ((addrh != 0) || (sizeh != 0) + || (sizel != SI_CORE_SIZE)) { + /* First Slave ASD for core malformed */ + goto error; + } + } + sii->coresba[idx] = addrl; + sii->coresba_size[idx] = sizel; + /* Get any more ASDs in port 0 */ + j = 1; + do { + asd = + get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, + &addrh, &sizel, &sizeh); + if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) { + sii->coresba2[idx] = addrl; + sii->coresba2_size[idx] = sizel; + } + j++; + } while (asd != 0); + + /* Go through the ASDs for other slave ports */ + for (i = 1; i < nsp; i++) { + j = 0; + do { + asd = + get_asd(sih, &eromptr, i, j++, AD_ST_SLAVE, + &addrl, &addrh, &sizel, &sizeh); + } while (asd != 0); + if (j == 0) { + /* SP has no address descriptors */ + goto error; + } + } + + /* Now get master wrappers */ + for (i = 0; i < nmw; i++) { + asd = + get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl, + &addrh, &sizel, &sizeh); + if (asd == 0) { + /* Missing descriptor for MW */ + goto error; + } + if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) { + /* Master wrapper %d is not 4KB */ + goto error; + } + if (i == 0) + sii->wrapba[idx] = addrl; + } + + /* And finally slave wrappers */ + for (i = 0; i < nsw; i++) { + uint fwp = (nsp == 1) ? 0 : 1; + asd = + get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP, + &addrl, &addrh, &sizel, &sizeh); + if (asd == 0) { + /* Missing descriptor for SW */ + goto error; + } + if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) { + /* Slave wrapper is not 4KB */ + goto error; + } + if ((nmw == 0) && (i == 0)) + sii->wrapba[idx] = addrl; + } + + /* Don't record bridges */ + if (br) + continue; + + /* Done with core */ + sii->numcores++; + } + + error: + /* Reached end of erom without finding END */ + sii->numcores = 0; + return; +} + +/* + * This function changes the logical "focus" to the indicated core. + * Return the current core's virtual address. Since each core starts with the + * same set of registers (BIST, clock control, etc), the returned address + * contains the first register of this 'common' register block (not to be + * confused with 'common core'). + */ +void __iomem *ai_setcoreidx(struct si_pub *sih, uint coreidx) +{ + struct si_info *sii = (struct si_info *)sih; + u32 addr = sii->coresba[coreidx]; + u32 wrap = sii->wrapba[coreidx]; + + if (coreidx >= sii->numcores) + return NULL; + + /* point bar0 window */ + pci_write_config_dword(sii->pbus, PCI_BAR0_WIN, addr); + /* point bar0 2nd 4KB window */ + pci_write_config_dword(sii->pbus, PCI_BAR0_WIN2, wrap); + sii->curidx = coreidx; + + return sii->curmap; +} + +/* Return the number of address spaces in current core */ +int ai_numaddrspaces(struct si_pub *sih) +{ + return 2; +} + +/* Return the address of the nth address space in the current core */ +u32 ai_addrspace(struct si_pub *sih, uint asidx) +{ + struct si_info *sii; + uint cidx; + + sii = (struct si_info *)sih; + cidx = sii->curidx; + + if (asidx == 0) + return sii->coresba[cidx]; + else if (asidx == 1) + return sii->coresba2[cidx]; + else { + /* Need to parse the erom again to find addr space */ + return 0; + } +} + +/* Return the size of the nth address space in the current core */ +u32 ai_addrspacesize(struct si_pub *sih, uint asidx) +{ + struct si_info *sii; + uint cidx; + + sii = (struct si_info *)sih; + cidx = sii->curidx; + + if (asidx == 0) + return sii->coresba_size[cidx]; + else if (asidx == 1) + return sii->coresba2_size[cidx]; + else { + /* Need to parse the erom again to find addr */ + return 0; + } +} + +uint ai_flag(struct si_pub *sih) +{ + struct si_info *sii; + struct aidmp *ai; + + sii = (struct si_info *)sih; + ai = sii->curwrap; + + return R_REG(&ai->oobselouta30) & 0x1f; +} + +void ai_setint(struct si_pub *sih, int siflag) +{ +} + +uint ai_corevendor(struct si_pub *sih) +{ + struct si_info *sii; + u32 cia; + + sii = (struct si_info *)sih; + cia = sii->cia[sii->curidx]; + return (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT; +} + +uint ai_corerev(struct si_pub *sih) +{ + struct si_info *sii; + u32 cib; + + sii = (struct si_info *)sih; + cib = sii->cib[sii->curidx]; + return (cib & CIB_REV_MASK) >> CIB_REV_SHIFT; +} + +bool ai_iscoreup(struct si_pub *sih) +{ + struct si_info *sii; + struct aidmp *ai; + + sii = (struct si_info *)sih; + ai = sii->curwrap; + + return (((R_REG(&ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) == + SICF_CLOCK_EN) + && ((R_REG(&ai->resetctrl) & AIRC_RESET) == 0)); +} + +void ai_core_cflags_wo(struct si_pub *sih, u32 mask, u32 val) +{ + struct si_info *sii; + struct aidmp *ai; + u32 w; + + sii = (struct si_info *)sih; + + ai = sii->curwrap; + + if (mask || val) { + w = ((R_REG(&ai->ioctrl) & ~mask) | val); + W_REG(&ai->ioctrl, w); + } +} + +u32 ai_core_cflags(struct si_pub *sih, u32 mask, u32 val) +{ + struct si_info *sii; + struct aidmp *ai; + u32 w; + + sii = (struct si_info *)sih; + ai = sii->curwrap; + + if (mask || val) { + w = ((R_REG(&ai->ioctrl) & ~mask) | val); + W_REG(&ai->ioctrl, w); + } + + return R_REG(&ai->ioctrl); +} + /* return true if PCIE capability exists in the pci config space */ static bool ai_ispcie(struct si_info *sii) { u8 cap_ptr; cap_ptr = - pcicore_find_pci_capability(sii->pcibus, PCI_CAP_ID_EXP, NULL, + pcicore_find_pci_capability(sii->pbus, PCI_CAP_ID_EXP, NULL, NULL); if (!cap_ptr) return false; @@ -476,69 +894,117 @@ static bool ai_buscore_prep(struct si_info *sii) return true; } -static bool -ai_buscore_setup(struct si_info *sii, struct bcma_device *cc) +u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val) { - struct bcma_device *pci = NULL; - struct bcma_device *pcie = NULL; - struct bcma_device *core; + struct si_info *sii; + struct aidmp *ai; + u32 w; + sii = (struct si_info *)sih; + ai = sii->curwrap; - /* no cores found, bail out */ - if (cc->bus->nr_cores == 0) - return false; + if (mask || val) { + w = ((R_REG(&ai->iostatus) & ~mask) | val); + W_REG(&ai->iostatus, w); + } + + return R_REG(&ai->iostatus); +} + +static bool +ai_buscore_setup(struct si_info *sii, u32 savewin, uint *origidx) +{ + bool pci, pcie; + uint i; + uint pciidx, pcieidx, pcirev, pcierev; + struct chipcregs __iomem *cc; + + cc = ai_setcoreidx(&sii->pub, SI_CC_IDX); /* get chipcommon rev */ - sii->pub.ccrev = cc->id.rev; + sii->pub.ccrev = (int)ai_corerev(&sii->pub); /* get chipcommon chipstatus */ - if (ai_get_ccrev(&sii->pub) >= 11) - sii->chipst = bcma_read32(cc, CHIPCREGOFFS(chipstatus)); + if (sii->pub.ccrev >= 11) + sii->pub.chipst = R_REG(&cc->chipstatus); /* get chipcommon capabilites */ - sii->pub.cccaps = bcma_read32(cc, CHIPCREGOFFS(capabilities)); + sii->pub.cccaps = R_REG(&cc->capabilities); + /* get chipcommon extended capabilities */ + + if (sii->pub.ccrev >= 35) + sii->pub.cccaps_ext = R_REG(&cc->capabilities_ext); /* get pmu rev and caps */ - if (ai_get_cccaps(&sii->pub) & CC_CAP_PMU) { - sii->pub.pmucaps = bcma_read32(cc, - CHIPCREGOFFS(pmucapabilities)); + if (sii->pub.cccaps & CC_CAP_PMU) { + sii->pub.pmucaps = R_REG(&cc->pmucapabilities); sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK; } - /* figure out buscore */ - list_for_each_entry(core, &cc->bus->cores, list) { + /* figure out bus/orignal core idx */ + sii->pub.buscoretype = NODEV_CORE_ID; + sii->pub.buscorerev = NOREV; + sii->pub.buscoreidx = BADIDX; + + pci = pcie = false; + pcirev = pcierev = NOREV; + pciidx = pcieidx = BADIDX; + + for (i = 0; i < sii->numcores; i++) { uint cid, crev; - cid = core->id.id; - crev = core->id.rev; + ai_setcoreidx(&sii->pub, i); + cid = ai_coreid(&sii->pub); + crev = ai_corerev(&sii->pub); if (cid == PCI_CORE_ID) { - pci = core; + pciidx = i; + pcirev = crev; + pci = true; } else if (cid == PCIE_CORE_ID) { - pcie = core; + pcieidx = i; + pcierev = crev; + pcie = true; } + + /* find the core idx before entering this func. */ + if ((savewin && (savewin == sii->coresba[i])) || + (cc == sii->regs[i])) + *origidx = i; } if (pci && pcie) { if (ai_ispcie(sii)) - pci = NULL; + pci = false; else - pcie = NULL; + pcie = false; } if (pci) { - sii->buscore = pci; + sii->pub.buscoretype = PCI_CORE_ID; + sii->pub.buscorerev = pcirev; + sii->pub.buscoreidx = pciidx; } else if (pcie) { - sii->buscore = pcie; + sii->pub.buscoretype = PCIE_CORE_ID; + sii->pub.buscorerev = pcierev; + sii->pub.buscoreidx = pcieidx; } /* fixup necessary chip/core configurations */ - if (!sii->pch) { - sii->pch = pcicore_init(&sii->pub, sii->icbus->drv_pci.core); - if (sii->pch == NULL) - return false; + if (SI_FAST(sii)) { + if (!sii->pch) { + sii->pch = pcicore_init(&sii->pub, sii->pbus, + (__iomem void *)PCIEREGS(sii)); + if (sii->pch == NULL) + return false; + } } - if (ai_pci_fixcfg(&sii->pub)) + if (ai_pci_fixcfg(&sii->pub)) { + /* si_doattach: si_pci_fixcfg failed */ return false; + } + + /* return to the original core */ + ai_setcoreidx(&sii->pub, *origidx); return true; } @@ -551,27 +1017,39 @@ static __used void ai_nvram_process(struct si_info *sii) uint w = 0; /* do a pci config read to get subsystem id and subvendor id */ - pci_read_config_dword(sii->pcibus, PCI_SUBSYSTEM_VENDOR_ID, &w); + pci_read_config_dword(sii->pbus, PCI_SUBSYSTEM_VENDOR_ID, &w); sii->pub.boardvendor = w & 0xffff; sii->pub.boardtype = (w >> 16) & 0xffff; + sii->pub.boardflags = getintvar(&sii->pub, BRCMS_SROM_BOARDFLAGS); } static struct si_info *ai_doattach(struct si_info *sii, - struct bcma_bus *pbus) + void __iomem *regs, struct pci_dev *pbus) { struct si_pub *sih = &sii->pub; u32 w, savewin; - struct bcma_device *cc; + struct chipcregs __iomem *cc; uint socitype; + uint origidx; + + memset((unsigned char *) sii, 0, sizeof(struct si_info)); savewin = 0; - sii->icbus = pbus; - sii->pcibus = pbus->host_pci; + sih->buscoreidx = BADIDX; + + sii->curmap = regs; + sii->pbus = pbus; + + /* find Chipcommon address */ + pci_read_config_dword(sii->pbus, PCI_BAR0_WIN, &savewin); + if (!GOODCOREADDR(savewin, SI_ENUM_BASE)) + savewin = SI_ENUM_BASE; - /* switch to Chipcommon core */ - cc = pbus->drv_cc.core; + pci_write_config_dword(sii->pbus, PCI_BAR0_WIN, + SI_ENUM_BASE); + cc = (struct chipcregs __iomem *) regs; /* bus/core/clk setup for register access */ if (!ai_buscore_prep(sii)) @@ -584,74 +1062,94 @@ static struct si_info *ai_doattach(struct si_info *sii, * hosts w/o chipcommon), some way of recognizing them needs to * be added here. */ - w = bcma_read32(cc, CHIPCREGOFFS(chipid)); + w = R_REG(&cc->chipid); socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT; /* Might as wll fill in chip id rev & pkg */ sih->chip = w & CID_ID_MASK; sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT; sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT; + sih->issim = false; + /* scan for cores */ - if (socitype != SOCI_AI) + if (socitype == SOCI_AI) { + SI_MSG(("Found chip type AI (0x%08x)\n", w)); + /* pass chipc address instead of original core base */ + ai_scan(&sii->pub, cc); + } else { + /* Found chip of unknown type */ + return NULL; + } + /* no cores found, bail out */ + if (sii->numcores == 0) return NULL; - SI_MSG("Found chip type AI (0x%08x)\n", w); - if (!ai_buscore_setup(sii, cc)) + /* bus/core/clk setup */ + origidx = SI_CC_IDX; + if (!ai_buscore_setup(sii, savewin, &origidx)) goto exit; /* Init nvram from sprom/otp if they exist */ - if (srom_var_init(&sii->pub)) + if (srom_var_init(&sii->pub, cc)) goto exit; ai_nvram_process(sii); /* === NVRAM, clock is ready === */ - bcma_write32(cc, CHIPCREGOFFS(gpiopullup), 0); - bcma_write32(cc, CHIPCREGOFFS(gpiopulldown), 0); + cc = (struct chipcregs __iomem *) ai_setcore(sih, CC_CORE_ID, 0); + W_REG(&cc->gpiopullup, 0); + W_REG(&cc->gpiopulldown, 0); + ai_setcoreidx(sih, origidx); /* PMU specific initializations */ - if (ai_get_cccaps(sih) & CC_CAP_PMU) { + if (sih->cccaps & CC_CAP_PMU) { + u32 xtalfreq; si_pmu_init(sih); - (void)si_pmu_measure_alpclk(sih); + si_pmu_chip_init(sih); + + xtalfreq = si_pmu_measure_alpclk(sih); + si_pmu_pll_init(sih, xtalfreq); si_pmu_res_init(sih); + si_pmu_swreg_init(sih); } /* setup the GPIO based LED powersave register */ w = getintvar(sih, BRCMS_SROM_LEDDC); if (w == 0) w = DEFAULT_GPIOTIMERVAL; - ai_cc_reg(sih, offsetof(struct chipcregs, gpiotimerval), - ~0, w); + ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, gpiotimerval), + ~0, w); - if (PCIE(sih)) + if (PCIE(sii)) pcicore_attach(sii->pch, SI_DOATTACH); - if (ai_get_chip_id(sih) == BCM43224_CHIP_ID) { + if (sih->chip == BCM43224_CHIP_ID) { /* * enable 12 mA drive strenth for 43224 and * set chipControl register bit 15 */ - if (ai_get_chiprev(sih) == 0) { - SI_MSG("Applying 43224A0 WARs\n"); - ai_cc_reg(sih, offsetof(struct chipcregs, chipcontrol), - CCTRL43224_GPIO_TOGGLE, - CCTRL43224_GPIO_TOGGLE); + if (sih->chiprev == 0) { + SI_MSG(("Applying 43224A0 WARs\n")); + ai_corereg(sih, SI_CC_IDX, + offsetof(struct chipcregs, chipcontrol), + CCTRL43224_GPIO_TOGGLE, + CCTRL43224_GPIO_TOGGLE); si_pmu_chipcontrol(sih, 0, CCTRL_43224A0_12MA_LED_DRIVE, CCTRL_43224A0_12MA_LED_DRIVE); } - if (ai_get_chiprev(sih) >= 1) { - SI_MSG("Applying 43224B0+ WARs\n"); + if (sih->chiprev >= 1) { + SI_MSG(("Applying 43224B0+ WARs\n")); si_pmu_chipcontrol(sih, 0, CCTRL_43224B0_12MA_LED_DRIVE, CCTRL_43224B0_12MA_LED_DRIVE); } } - if (ai_get_chip_id(sih) == BCM4313_CHIP_ID) { + if (sih->chip == BCM4313_CHIP_ID) { /* * enable 12 mA drive strenth for 4313 and * set chipControl register bit 1 */ - SI_MSG("Applying 4313 WARs\n"); + SI_MSG(("Applying 4313 WARs\n")); si_pmu_chipcontrol(sih, 0, CCTRL_4313_12MA_LED_DRIVE, CCTRL_4313_12MA_LED_DRIVE); } @@ -667,19 +1165,22 @@ static struct si_info *ai_doattach(struct si_info *sii, } /* - * Allocate a si handle and do the attach. + * Allocate a si handle. + * devid - pci device id (used to determine chip#) + * osh - opaque OS handle + * regs - virtual address of initial core registers */ struct si_pub * -ai_attach(struct bcma_bus *pbus) +ai_attach(void __iomem *regs, struct pci_dev *sdh) { struct si_info *sii; /* alloc struct si_info */ - sii = kzalloc(sizeof(struct si_info), GFP_ATOMIC); + sii = kmalloc(sizeof(struct si_info), GFP_ATOMIC); if (sii == NULL) return NULL; - if (ai_doattach(sii, pbus) == NULL) { + if (ai_doattach(sii, regs, sdh) == NULL) { kfree(sii); return NULL; } @@ -708,66 +1209,292 @@ void ai_detach(struct si_pub *sih) kfree(sii); } +/* register driver interrupt disabling and restoring callback functions */ +void +ai_register_intr_callback(struct si_pub *sih, void *intrsoff_fn, + void *intrsrestore_fn, + void *intrsenabled_fn, void *intr_arg) +{ + struct si_info *sii; + + sii = (struct si_info *)sih; + sii->intr_arg = intr_arg; + sii->intrsoff_fn = (u32 (*)(void *)) intrsoff_fn; + sii->intrsrestore_fn = (void (*) (void *, u32)) intrsrestore_fn; + sii->intrsenabled_fn = (bool (*)(void *)) intrsenabled_fn; + /* save current core id. when this function called, the current core + * must be the core which provides driver functions(il, et, wl, etc.) + */ + sii->dev_coreid = sii->coreid[sii->curidx]; +} + +void ai_deregister_intr_callback(struct si_pub *sih) +{ + struct si_info *sii; + + sii = (struct si_info *)sih; + sii->intrsoff_fn = NULL; +} + +uint ai_coreid(struct si_pub *sih) +{ + struct si_info *sii; + + sii = (struct si_info *)sih; + return sii->coreid[sii->curidx]; +} + +uint ai_coreidx(struct si_pub *sih) +{ + struct si_info *sii; + + sii = (struct si_info *)sih; + return sii->curidx; +} + +bool ai_backplane64(struct si_pub *sih) +{ + return (sih->cccaps & CC_CAP_BKPLN64) != 0; +} + /* return index of coreid or BADIDX if not found */ -struct bcma_device *ai_findcore(struct si_pub *sih, u16 coreid, u16 coreunit) +uint ai_findcoreidx(struct si_pub *sih, uint coreid, uint coreunit) { - struct bcma_device *core; struct si_info *sii; uint found; + uint i; sii = (struct si_info *)sih; found = 0; - list_for_each_entry(core, &sii->icbus->cores, list) - if (core->id.id == coreid) { + for (i = 0; i < sii->numcores; i++) + if (sii->coreid[i] == coreid) { if (found == coreunit) - return core; + return i; found++; } - return NULL; + return BADIDX; } /* - * read/modify chipcommon core register. + * This function changes logical "focus" to the indicated core; + * must be called with interrupts off. + * Moreover, callers should keep interrupts off during switching + * out of and back to d11 core. */ -uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val) +void __iomem *ai_setcore(struct si_pub *sih, uint coreid, uint coreunit) { - struct bcma_device *cc; - u32 w; + uint idx; + + idx = ai_findcoreidx(sih, coreid, coreunit); + if (idx >= SI_MAXCORES) + return NULL; + + return ai_setcoreidx(sih, idx); +} + +/* Turn off interrupt as required by ai_setcore, before switch core */ +void __iomem *ai_switch_core(struct si_pub *sih, uint coreid, uint *origidx, + uint *intr_val) +{ + void __iomem *cc; struct si_info *sii; sii = (struct si_info *)sih; - cc = sii->icbus->drv_cc.core; + + if (SI_FAST(sii)) { + /* Overloading the origidx variable to remember the coreid, + * this works because the core ids cannot be confused with + * core indices. + */ + *origidx = coreid; + if (coreid == CC_CORE_ID) + return CCREGS_FAST(sii); + else if (coreid == sih->buscoretype) + return PCIEREGS(sii); + } + INTR_OFF(sii, *intr_val); + *origidx = sii->curidx; + cc = ai_setcore(sih, coreid, 0); + return cc; +} + +/* restore coreidx and restore interrupt */ +void ai_restore_core(struct si_pub *sih, uint coreid, uint intr_val) +{ + struct si_info *sii; + + sii = (struct si_info *)sih; + if (SI_FAST(sii) + && ((coreid == CC_CORE_ID) || (coreid == sih->buscoretype))) + return; + + ai_setcoreidx(sih, coreid); + INTR_RESTORE(sii, intr_val); +} + +void ai_write_wrapperreg(struct si_pub *sih, u32 offset, u32 val) +{ + struct si_info *sii = (struct si_info *)sih; + u32 *w = (u32 *) sii->curwrap; + W_REG(w + (offset / 4), val); + return; +} + +/* + * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set + * operation, switch back to the original core, and return the new value. + * + * When using the silicon backplane, no fiddling with interrupts or core + * switches is needed. + * + * Also, when using pci/pcie, we can optimize away the core switching for pci + * registers and (on newer pci cores) chipcommon registers. + */ +uint ai_corereg(struct si_pub *sih, uint coreidx, uint regoff, uint mask, + uint val) +{ + uint origidx = 0; + u32 __iomem *r = NULL; + uint w; + uint intr_val = 0; + bool fast = false; + struct si_info *sii; + + sii = (struct si_info *)sih; + + if (coreidx >= SI_MAXCORES) + return 0; + + /* + * If pci/pcie, we can get at pci/pcie regs + * and on newer cores to chipc + */ + if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) { + /* Chipc registers are mapped at 12KB */ + fast = true; + r = (u32 __iomem *)((__iomem char *)sii->curmap + + PCI_16KB0_CCREGS_OFFSET + regoff); + } else if (sii->pub.buscoreidx == coreidx) { + /* + * pci registers are at either in the last 2KB of + * an 8KB window or, in pcie and pci rev 13 at 8KB + */ + fast = true; + if (SI_FAST(sii)) + r = (u32 __iomem *)((__iomem char *)sii->curmap + + PCI_16KB0_PCIREGS_OFFSET + regoff); + else + r = (u32 __iomem *)((__iomem char *)sii->curmap + + ((regoff >= SBCONFIGOFF) ? + PCI_BAR0_PCISBR_OFFSET : + PCI_BAR0_PCIREGS_OFFSET) + regoff); + } + + if (!fast) { + INTR_OFF(sii, intr_val); + + /* save current core index */ + origidx = ai_coreidx(&sii->pub); + + /* switch core */ + r = (u32 __iomem *) ((unsigned char __iomem *) + ai_setcoreidx(&sii->pub, coreidx) + regoff); + } /* mask and set */ if (mask || val) { - bcma_maskset32(cc, regoff, ~mask, val); + w = (R_REG(r) & ~mask) | val; + W_REG(r, w); } /* readback */ - w = bcma_read32(cc, regoff); + w = R_REG(r); + + if (!fast) { + /* restore core index */ + if (origidx != coreidx) + ai_setcoreidx(&sii->pub, origidx); + + INTR_RESTORE(sii, intr_val); + } return w; } -/* return the slow clock source - LPO, XTAL, or PCI */ -static uint ai_slowclk_src(struct si_pub *sih, struct bcma_device *cc) +void ai_core_disable(struct si_pub *sih, u32 bits) { struct si_info *sii; - u32 val; + u32 dummy; + struct aidmp *ai; sii = (struct si_info *)sih; - if (ai_get_ccrev(&sii->pub) < 6) { - pci_read_config_dword(sii->pcibus, PCI_GPIO_OUT, + + ai = sii->curwrap; + + /* if core is already in reset, just return */ + if (R_REG(&ai->resetctrl) & AIRC_RESET) + return; + + W_REG(&ai->ioctrl, bits); + dummy = R_REG(&ai->ioctrl); + udelay(10); + + W_REG(&ai->resetctrl, AIRC_RESET); + udelay(1); +} + +/* reset and re-enable a core + * inputs: + * bits - core specific bits that are set during and after reset sequence + * resetbits - core specific bits that are set only during reset sequence + */ +void ai_core_reset(struct si_pub *sih, u32 bits, u32 resetbits) +{ + struct si_info *sii; + struct aidmp *ai; + u32 dummy; + + sii = (struct si_info *)sih; + ai = sii->curwrap; + + /* + * Must do the disable sequence first to work + * for arbitrary current core state. + */ + ai_core_disable(sih, (bits | resetbits)); + + /* + * Now do the initialization sequence. + */ + W_REG(&ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN)); + dummy = R_REG(&ai->ioctrl); + W_REG(&ai->resetctrl, 0); + udelay(1); + + W_REG(&ai->ioctrl, (bits | SICF_CLOCK_EN)); + dummy = R_REG(&ai->ioctrl); + udelay(1); +} + +/* return the slow clock source - LPO, XTAL, or PCI */ +static uint ai_slowclk_src(struct si_info *sii) +{ + struct chipcregs __iomem *cc; + u32 val; + + if (sii->pub.ccrev < 6) { + pci_read_config_dword(sii->pbus, PCI_GPIO_OUT, &val); if (val & PCI_CFG_GPIO_SCS) return SCC_SS_PCI; return SCC_SS_XTAL; - } else if (ai_get_ccrev(&sii->pub) < 10) { - return bcma_read32(cc, CHIPCREGOFFS(slow_clk_ctl)) & - SCC_SS_MASK; + } else if (sii->pub.ccrev < 10) { + cc = (struct chipcregs __iomem *) + ai_setcoreidx(&sii->pub, sii->curidx); + return R_REG(&cc->slow_clk_ctl) & SCC_SS_MASK; } else /* Insta-clock */ return SCC_SS_XTAL; } @@ -776,24 +1503,24 @@ static uint ai_slowclk_src(struct si_pub *sih, struct bcma_device *cc) * return the ILP (slowclock) min or max frequency * precondition: we've established the chip has dynamic clk control */ -static uint ai_slowclk_freq(struct si_pub *sih, bool max_freq, - struct bcma_device *cc) +static uint ai_slowclk_freq(struct si_info *sii, bool max_freq, + struct chipcregs __iomem *cc) { u32 slowclk; uint div; - slowclk = ai_slowclk_src(sih, cc); - if (ai_get_ccrev(sih) < 6) { + slowclk = ai_slowclk_src(sii); + if (sii->pub.ccrev < 6) { if (slowclk == SCC_SS_PCI) return max_freq ? (PCIMAXFREQ / 64) : (PCIMINFREQ / 64); else return max_freq ? (XTALMAXFREQ / 32) : (XTALMINFREQ / 32); - } else if (ai_get_ccrev(sih) < 10) { + } else if (sii->pub.ccrev < 10) { div = 4 * - (((bcma_read32(cc, CHIPCREGOFFS(slow_clk_ctl)) & - SCC_CD_MASK) >> SCC_CD_SHIFT) + 1); + (((R_REG(&cc->slow_clk_ctl) & SCC_CD_MASK) >> + SCC_CD_SHIFT) + 1); if (slowclk == SCC_SS_LPO) return max_freq ? LPOMAXFREQ : LPOMINFREQ; else if (slowclk == SCC_SS_XTAL) @@ -804,15 +1531,15 @@ static uint ai_slowclk_freq(struct si_pub *sih, bool max_freq, : (PCIMINFREQ / div); } else { /* Chipc rev 10 is InstaClock */ - div = bcma_read32(cc, CHIPCREGOFFS(system_clk_ctl)); - div = 4 * ((div >> SYCC_CD_SHIFT) + 1); + div = R_REG(&cc->system_clk_ctl) >> SYCC_CD_SHIFT; + div = 4 * (div + 1); return max_freq ? XTALMAXFREQ : (XTALMINFREQ / div); } return 0; } static void -ai_clkctl_setdelay(struct si_pub *sih, struct bcma_device *cc) +ai_clkctl_setdelay(struct si_info *sii, struct chipcregs __iomem *cc) { uint slowmaxfreq, pll_delay, slowclk; uint pll_on_delay, fref_sel_delay; @@ -825,40 +1552,55 @@ ai_clkctl_setdelay(struct si_pub *sih, struct bcma_device *cc) * powered down by dynamic clk control logic. */ - slowclk = ai_slowclk_src(sih, cc); + slowclk = ai_slowclk_src(sii); if (slowclk != SCC_SS_XTAL) pll_delay += XTAL_ON_DELAY; /* Starting with 4318 it is ILP that is used for the delays */ slowmaxfreq = - ai_slowclk_freq(sih, - (ai_get_ccrev(sih) >= 10) ? false : true, cc); + ai_slowclk_freq(sii, (sii->pub.ccrev >= 10) ? false : true, cc); pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000; fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000; - bcma_write32(cc, CHIPCREGOFFS(pll_on_delay), pll_on_delay); - bcma_write32(cc, CHIPCREGOFFS(fref_sel_delay), fref_sel_delay); + W_REG(&cc->pll_on_delay, pll_on_delay); + W_REG(&cc->fref_sel_delay, fref_sel_delay); } /* initialize power control delay registers */ void ai_clkctl_init(struct si_pub *sih) { - struct bcma_device *cc; + struct si_info *sii; + uint origidx = 0; + struct chipcregs __iomem *cc; + bool fast; - if (!(ai_get_cccaps(sih) & CC_CAP_PWR_CTL)) + if (!(sih->cccaps & CC_CAP_PWR_CTL)) return; - cc = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0); - if (cc == NULL) - return; + sii = (struct si_info *)sih; + fast = SI_FAST(sii); + if (!fast) { + origidx = sii->curidx; + cc = (struct chipcregs __iomem *) + ai_setcore(sih, CC_CORE_ID, 0); + if (cc == NULL) + return; + } else { + cc = (struct chipcregs __iomem *) CCREGS_FAST(sii); + if (cc == NULL) + return; + } /* set all Instaclk chip ILP to 1 MHz */ - if (ai_get_ccrev(sih) >= 10) - bcma_maskset32(cc, CHIPCREGOFFS(system_clk_ctl), SYCC_CD_MASK, - (ILP_DIV_1MHZ << SYCC_CD_SHIFT)); + if (sih->ccrev >= 10) + SET_REG(&cc->system_clk_ctl, SYCC_CD_MASK, + (ILP_DIV_1MHZ << SYCC_CD_SHIFT)); - ai_clkctl_setdelay(sih, cc); + ai_clkctl_setdelay(sii, cc); + + if (!fast) + ai_setcoreidx(sih, origidx); } /* @@ -868,25 +1610,47 @@ void ai_clkctl_init(struct si_pub *sih) u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih) { struct si_info *sii; - struct bcma_device *cc; + uint origidx = 0; + struct chipcregs __iomem *cc; uint slowminfreq; u16 fpdelay; + uint intr_val = 0; + bool fast; sii = (struct si_info *)sih; - if (ai_get_cccaps(sih) & CC_CAP_PMU) { + if (sih->cccaps & CC_CAP_PMU) { + INTR_OFF(sii, intr_val); fpdelay = si_pmu_fast_pwrup_delay(sih); + INTR_RESTORE(sii, intr_val); return fpdelay; } - if (!(ai_get_cccaps(sih) & CC_CAP_PWR_CTL)) + if (!(sih->cccaps & CC_CAP_PWR_CTL)) return 0; + fast = SI_FAST(sii); fpdelay = 0; - cc = ai_findcore(sih, CC_CORE_ID, 0); - if (cc) { - slowminfreq = ai_slowclk_freq(sih, false, cc); - fpdelay = (((bcma_read32(cc, CHIPCREGOFFS(pll_on_delay)) + 2) - * 1000000) + (slowminfreq - 1)) / slowminfreq; + if (!fast) { + origidx = sii->curidx; + INTR_OFF(sii, intr_val); + cc = (struct chipcregs __iomem *) + ai_setcore(sih, CC_CORE_ID, 0); + if (cc == NULL) + goto done; + } else { + cc = (struct chipcregs __iomem *) CCREGS_FAST(sii); + if (cc == NULL) + goto done; + } + + slowminfreq = ai_slowclk_freq(sii, false, cc); + fpdelay = (((R_REG(&cc->pll_on_delay) + 2) * 1000000) + + (slowminfreq - 1)) / slowminfreq; + + done: + if (!fast) { + ai_setcoreidx(sih, origidx); + INTR_RESTORE(sii, intr_val); } return fpdelay; } @@ -900,12 +1664,12 @@ int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on) sii = (struct si_info *)sih; /* pcie core doesn't have any mapping to control the xtal pu */ - if (PCIE(sih)) + if (PCIE(sii)) return -1; - pci_read_config_dword(sii->pcibus, PCI_GPIO_IN, &in); - pci_read_config_dword(sii->pcibus, PCI_GPIO_OUT, &out); - pci_read_config_dword(sii->pcibus, PCI_GPIO_OUTEN, &outen); + pci_read_config_dword(sii->pbus, PCI_GPIO_IN, &in); + pci_read_config_dword(sii->pbus, PCI_GPIO_OUT, &out); + pci_read_config_dword(sii->pbus, PCI_GPIO_OUTEN, &outen); /* * Avoid glitching the clock if GPRS is already using it. @@ -926,9 +1690,9 @@ int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on) out |= PCI_CFG_GPIO_XTAL; if (what & PLL) out |= PCI_CFG_GPIO_PLL; - pci_write_config_dword(sii->pcibus, + pci_write_config_dword(sii->pbus, PCI_GPIO_OUT, out); - pci_write_config_dword(sii->pcibus, + pci_write_config_dword(sii->pbus, PCI_GPIO_OUTEN, outen); udelay(XTAL_ON_DELAY); } @@ -936,7 +1700,7 @@ int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on) /* turn pll on */ if (what & PLL) { out &= ~PCI_CFG_GPIO_PLL; - pci_write_config_dword(sii->pcibus, + pci_write_config_dword(sii->pbus, PCI_GPIO_OUT, out); mdelay(2); } @@ -945,9 +1709,9 @@ int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on) out &= ~PCI_CFG_GPIO_XTAL; if (what & PLL) out |= PCI_CFG_GPIO_PLL; - pci_write_config_dword(sii->pcibus, + pci_write_config_dword(sii->pbus, PCI_GPIO_OUT, out); - pci_write_config_dword(sii->pcibus, + pci_write_config_dword(sii->pbus, PCI_GPIO_OUTEN, outen); } @@ -957,52 +1721,63 @@ int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on) /* clk control mechanism through chipcommon, no policy checking */ static bool _ai_clkctl_cc(struct si_info *sii, uint mode) { - struct bcma_device *cc; + uint origidx = 0; + struct chipcregs __iomem *cc; u32 scc; + uint intr_val = 0; + bool fast = SI_FAST(sii); /* chipcommon cores prior to rev6 don't support dynamic clock control */ - if (ai_get_ccrev(&sii->pub) < 6) + if (sii->pub.ccrev < 6) return false; - cc = ai_findcore(&sii->pub, BCMA_CORE_CHIPCOMMON, 0); + if (!fast) { + INTR_OFF(sii, intr_val); + origidx = sii->curidx; + cc = (struct chipcregs __iomem *) + ai_setcore(&sii->pub, CC_CORE_ID, 0); + } else { + cc = (struct chipcregs __iomem *) CCREGS_FAST(sii); + if (cc == NULL) + goto done; + } - if (!(ai_get_cccaps(&sii->pub) & CC_CAP_PWR_CTL) && - (ai_get_ccrev(&sii->pub) < 20)) - return mode == CLK_FAST; + if (!(sii->pub.cccaps & CC_CAP_PWR_CTL) && (sii->pub.ccrev < 20)) + goto done; switch (mode) { case CLK_FAST: /* FORCEHT, fast (pll) clock */ - if (ai_get_ccrev(&sii->pub) < 10) { + if (sii->pub.ccrev < 10) { /* * don't forget to force xtal back * on before we clear SCC_DYN_XTAL.. */ ai_clkctl_xtal(&sii->pub, XTAL, ON); - bcma_maskset32(cc, CHIPCREGOFFS(slow_clk_ctl), - (SCC_XC | SCC_FS | SCC_IP), SCC_IP); - } else if (ai_get_ccrev(&sii->pub) < 20) { - bcma_set32(cc, CHIPCREGOFFS(system_clk_ctl), SYCC_HR); + SET_REG(&cc->slow_clk_ctl, + (SCC_XC | SCC_FS | SCC_IP), SCC_IP); + } else if (sii->pub.ccrev < 20) { + OR_REG(&cc->system_clk_ctl, SYCC_HR); } else { - bcma_set32(cc, CHIPCREGOFFS(clk_ctl_st), CCS_FORCEHT); + OR_REG(&cc->clk_ctl_st, CCS_FORCEHT); } /* wait for the PLL */ - if (ai_get_cccaps(&sii->pub) & CC_CAP_PMU) { + if (sii->pub.cccaps & CC_CAP_PMU) { u32 htavail = CCS_HTAVAIL; - SPINWAIT(((bcma_read32(cc, CHIPCREGOFFS(clk_ctl_st)) & - htavail) == 0), PMU_MAX_TRANSITION_DLY); + SPINWAIT(((R_REG(&cc->clk_ctl_st) & htavail) + == 0), PMU_MAX_TRANSITION_DLY); } else { udelay(PLL_DELAY); } break; case CLK_DYNAMIC: /* enable dynamic clock control */ - if (ai_get_ccrev(&sii->pub) < 10) { - scc = bcma_read32(cc, CHIPCREGOFFS(slow_clk_ctl)); + if (sii->pub.ccrev < 10) { + scc = R_REG(&cc->slow_clk_ctl); scc &= ~(SCC_FS | SCC_IP | SCC_XC); if ((scc & SCC_SS_MASK) != SCC_SS_XTAL) scc |= SCC_XC; - bcma_write32(cc, CHIPCREGOFFS(slow_clk_ctl), scc); + W_REG(&cc->slow_clk_ctl, scc); /* * for dynamic control, we have to @@ -1010,11 +1785,11 @@ static bool _ai_clkctl_cc(struct si_info *sii, uint mode) */ if (scc & SCC_XC) ai_clkctl_xtal(&sii->pub, XTAL, OFF); - } else if (ai_get_ccrev(&sii->pub) < 20) { + } else if (sii->pub.ccrev < 20) { /* Instaclock */ - bcma_mask32(cc, CHIPCREGOFFS(system_clk_ctl), ~SYCC_HR); + AND_REG(&cc->system_clk_ctl, ~SYCC_HR); } else { - bcma_mask32(cc, CHIPCREGOFFS(clk_ctl_st), ~CCS_FORCEHT); + AND_REG(&cc->clk_ctl_st, ~CCS_FORCEHT); } break; @@ -1022,6 +1797,11 @@ static bool _ai_clkctl_cc(struct si_info *sii, uint mode) break; } + done: + if (!fast) { + ai_setcoreidx(&sii->pub, origidx); + INTR_RESTORE(sii, intr_val); + } return mode == CLK_FAST; } @@ -1040,25 +1820,46 @@ bool ai_clkctl_cc(struct si_pub *sih, uint mode) sii = (struct si_info *)sih; /* chipcommon cores prior to rev6 don't support dynamic clock control */ - if (ai_get_ccrev(sih) < 6) + if (sih->ccrev < 6) return false; - if (PCI_FORCEHT(sih)) + if (PCI_FORCEHT(sii)) return mode == CLK_FAST; return _ai_clkctl_cc(sii, mode); } +/* Build device path */ +int ai_devpath(struct si_pub *sih, char *path, int size) +{ + int slen; + + if (!path || size <= 0) + return -1; + + slen = snprintf(path, (size_t) size, "pci/%u/%u/", + ((struct si_info *)sih)->pbus->bus->number, + PCI_SLOT(((struct pci_dev *) + (((struct si_info *)(sih))->pbus))->devfn)); + + if (slen < 0 || slen >= size) { + path[0] = '\0'; + return -1; + } + + return 0; +} + void ai_pci_up(struct si_pub *sih) { struct si_info *sii; sii = (struct si_info *)sih; - if (PCI_FORCEHT(sih)) + if (PCI_FORCEHT(sii)) _ai_clkctl_cc(sii, CLK_FAST); - if (PCIE(sih)) + if (PCIE(sii)) pcicore_up(sii->pch, SI_PCIUP); } @@ -1081,7 +1882,7 @@ void ai_pci_down(struct si_pub *sih) sii = (struct si_info *)sih; /* release FORCEHT since chip is going to "down" state */ - if (PCI_FORCEHT(sih)) + if (PCI_FORCEHT(sii)) _ai_clkctl_cc(sii, CLK_DYNAMIC); pcicore_down(sii->pch, SI_PCIDOWN); @@ -1094,23 +1895,42 @@ void ai_pci_down(struct si_pub *sih) void ai_pci_setup(struct si_pub *sih, uint coremask) { struct si_info *sii; - u32 w; + struct sbpciregs __iomem *regs = NULL; + u32 siflag = 0, w; + uint idx = 0; sii = (struct si_info *)sih; + if (PCI(sii)) { + /* get current core index */ + idx = sii->curidx; + + /* we interrupt on this backplane flag number */ + siflag = ai_flag(sih); + + /* switch over to pci core */ + regs = ai_setcoreidx(sih, sii->pub.buscoreidx); + } + /* * Enable sb->pci interrupts. Assume * PCI rev 2.3 support was added in pci core rev 6 and things changed.. */ - if (PCIE(sih) || (PCI(sih) && (ai_get_buscorerev(sih) >= 6))) { + if (PCIE(sii) || (PCI(sii) && ((sii->pub.buscorerev) >= 6))) { /* pci config write to set this core bit in PCIIntMask */ - pci_read_config_dword(sii->pcibus, PCI_INT_MASK, &w); + pci_read_config_dword(sii->pbus, PCI_INT_MASK, &w); w |= (coremask << PCI_SBIM_SHIFT); - pci_write_config_dword(sii->pcibus, PCI_INT_MASK, w); + pci_write_config_dword(sii->pbus, PCI_INT_MASK, w); + } else { + /* set sbintvec bit for our flag number */ + ai_setint(sih, siflag); } - if (PCI(sih)) { - pcicore_pci_setup(sii->pch); + if (PCI(sii)) { + pcicore_pci_setup(sii->pch, regs); + + /* switch back to previous core */ + ai_setcoreidx(sih, idx); } } @@ -1120,11 +1940,25 @@ void ai_pci_setup(struct si_pub *sih, uint coremask) */ int ai_pci_fixcfg(struct si_pub *sih) { + uint origidx; + void __iomem *regs = NULL; struct si_info *sii = (struct si_info *)sih; /* Fixup PI in SROM shadow area to enable the correct PCI core access */ + /* save the current index */ + origidx = ai_coreidx(&sii->pub); + /* check 'pi' is correct and fix it if not */ - pcicore_fixcfg(sii->pch); + regs = ai_setcore(&sii->pub, sii->pub.buscoretype, 0); + if (sii->pub.buscoretype == PCIE_CORE_ID) + pcicore_fixcfg_pcie(sii->pch, + (struct sbpcieregs __iomem *)regs); + else if (sii->pub.buscoretype == PCI_CORE_ID) + pcicore_fixcfg_pci(sii->pch, (struct sbpciregs __iomem *)regs); + + /* restore the original index */ + ai_setcoreidx(&sii->pub, origidx); + pcicore_hwup(sii->pch); return 0; } @@ -1135,42 +1969,58 @@ u32 ai_gpiocontrol(struct si_pub *sih, u32 mask, u32 val, u8 priority) uint regoff; regoff = offsetof(struct chipcregs, gpiocontrol); - return ai_cc_reg(sih, regoff, mask, val); + return ai_corereg(sih, SI_CC_IDX, regoff, mask, val); } void ai_chipcontrl_epa4331(struct si_pub *sih, bool on) { - struct bcma_device *cc; + struct si_info *sii; + struct chipcregs __iomem *cc; + uint origidx; u32 val; - cc = ai_findcore(sih, CC_CORE_ID, 0); + sii = (struct si_info *)sih; + origidx = ai_coreidx(sih); + + cc = (struct chipcregs __iomem *) ai_setcore(sih, CC_CORE_ID, 0); + + val = R_REG(&cc->chipcontrol); if (on) { - if (ai_get_chippkg(sih) == 9 || ai_get_chippkg(sih) == 0xb) + if (sih->chippkg == 9 || sih->chippkg == 0xb) /* Ext PA Controls for 4331 12x9 Package */ - bcma_set32(cc, CHIPCREGOFFS(chipcontrol), - CCTRL4331_EXTPA_EN | - CCTRL4331_EXTPA_ON_GPIO2_5); + W_REG(&cc->chipcontrol, val | + CCTRL4331_EXTPA_EN | + CCTRL4331_EXTPA_ON_GPIO2_5); else /* Ext PA Controls for 4331 12x12 Package */ - bcma_set32(cc, CHIPCREGOFFS(chipcontrol), - CCTRL4331_EXTPA_EN); + W_REG(&cc->chipcontrol, + val | CCTRL4331_EXTPA_EN); } else { val &= ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5); - bcma_mask32(cc, CHIPCREGOFFS(chipcontrol), - ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5)); + W_REG(&cc->chipcontrol, val); } + + ai_setcoreidx(sih, origidx); } /* Enable BT-COEX & Ex-PA for 4313 */ void ai_epa_4313war(struct si_pub *sih) { - struct bcma_device *cc; + struct si_info *sii; + struct chipcregs __iomem *cc; + uint origidx; - cc = ai_findcore(sih, CC_CORE_ID, 0); + sii = (struct si_info *)sih; + origidx = ai_coreidx(sih); + + cc = ai_setcore(sih, CC_CORE_ID, 0); /* EPA Fix */ - bcma_set32(cc, CHIPCREGOFFS(gpiocontrol), GPIO_CTRL_EPA_EN_MASK); + W_REG(&cc->gpiocontrol, + R_REG(&cc->gpiocontrol) | GPIO_CTRL_EPA_EN_MASK); + + ai_setcoreidx(sih, origidx); } /* check if the device is removed */ @@ -1181,7 +2031,7 @@ bool ai_deviceremoved(struct si_pub *sih) sii = (struct si_info *)sih; - pci_read_config_dword(sii->pcibus, PCI_VENDOR_ID, &w); + pci_read_config_dword(sii->pbus, PCI_VENDOR_ID, &w); if ((w & 0xFFFF) != PCI_VENDOR_ID_BROADCOM) return true; @@ -1190,23 +2040,26 @@ bool ai_deviceremoved(struct si_pub *sih) bool ai_is_sprom_available(struct si_pub *sih) { - struct si_info *sii = (struct si_info *)sih; - - if (ai_get_ccrev(sih) >= 31) { - struct bcma_device *cc; + if (sih->ccrev >= 31) { + struct si_info *sii; + uint origidx; + struct chipcregs __iomem *cc; u32 sromctrl; - if ((ai_get_cccaps(sih) & CC_CAP_SROM) == 0) + if ((sih->cccaps & CC_CAP_SROM) == 0) return false; - cc = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0); - sromctrl = bcma_read32(cc, CHIPCREGOFFS(sromcontrol)); + sii = (struct si_info *)sih; + origidx = sii->curidx; + cc = ai_setcoreidx(sih, SI_CC_IDX); + sromctrl = R_REG(&cc->sromcontrol); + ai_setcoreidx(sih, origidx); return sromctrl & SRC_PRESENT; } - switch (ai_get_chip_id(sih)) { + switch (sih->chip) { case BCM4313_CHIP_ID: - return (sii->chipst & CST4313_SPROM_PRESENT) != 0; + return (sih->chipst & CST4313_SPROM_PRESENT) != 0; default: return true; } @@ -1214,11 +2067,9 @@ bool ai_is_sprom_available(struct si_pub *sih) bool ai_is_otp_disabled(struct si_pub *sih) { - struct si_info *sii = (struct si_info *)sih; - - switch (ai_get_chip_id(sih)) { + switch (sih->chip) { case BCM4313_CHIP_ID: - return (sii->chipst & CST4313_OTP_PRESENT) == 0; + return (sih->chipst & CST4313_OTP_PRESENT) == 0; /* These chips always have their OTP on */ case BCM43224_CHIP_ID: case BCM43225_CHIP_ID: @@ -1226,15 +2077,3 @@ bool ai_is_otp_disabled(struct si_pub *sih) return false; } } - -uint ai_get_buscoretype(struct si_pub *sih) -{ - struct si_info *sii = (struct si_info *)sih; - return sii->buscore->id.id; -} - -uint ai_get_buscorerev(struct si_pub *sih) -{ - struct si_info *sii = (struct si_info *)sih; - return sii->buscore->id.rev; -} diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h b/trunk/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h index f84c6f781692..106a7424a7cd 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h +++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h @@ -17,8 +17,6 @@ #ifndef _BRCM_AIUTILS_H_ #define _BRCM_AIUTILS_H_ -#include - #include "types.h" /* @@ -40,12 +38,88 @@ /* PCIE Client Mode sb2pcitranslation2 (2 ZettaBytes), high 32 bits */ #define SI_PCIE_DMA_H32 0x80000000 +/* core codes */ +#define NODEV_CORE_ID 0x700 /* Invalid coreid */ +#define CC_CORE_ID 0x800 /* chipcommon core */ +#define ILINE20_CORE_ID 0x801 /* iline20 core */ +#define SRAM_CORE_ID 0x802 /* sram core */ +#define SDRAM_CORE_ID 0x803 /* sdram core */ +#define PCI_CORE_ID 0x804 /* pci core */ +#define MIPS_CORE_ID 0x805 /* mips core */ +#define ENET_CORE_ID 0x806 /* enet mac core */ +#define CODEC_CORE_ID 0x807 /* v90 codec core */ +#define USB_CORE_ID 0x808 /* usb 1.1 host/device core */ +#define ADSL_CORE_ID 0x809 /* ADSL core */ +#define ILINE100_CORE_ID 0x80a /* iline100 core */ +#define IPSEC_CORE_ID 0x80b /* ipsec core */ +#define UTOPIA_CORE_ID 0x80c /* utopia core */ +#define PCMCIA_CORE_ID 0x80d /* pcmcia core */ +#define SOCRAM_CORE_ID 0x80e /* internal memory core */ +#define MEMC_CORE_ID 0x80f /* memc sdram core */ +#define OFDM_CORE_ID 0x810 /* OFDM phy core */ +#define EXTIF_CORE_ID 0x811 /* external interface core */ +#define D11_CORE_ID 0x812 /* 802.11 MAC core */ +#define APHY_CORE_ID 0x813 /* 802.11a phy core */ +#define BPHY_CORE_ID 0x814 /* 802.11b phy core */ +#define GPHY_CORE_ID 0x815 /* 802.11g phy core */ +#define MIPS33_CORE_ID 0x816 /* mips3302 core */ +#define USB11H_CORE_ID 0x817 /* usb 1.1 host core */ +#define USB11D_CORE_ID 0x818 /* usb 1.1 device core */ +#define USB20H_CORE_ID 0x819 /* usb 2.0 host core */ +#define USB20D_CORE_ID 0x81a /* usb 2.0 device core */ +#define SDIOH_CORE_ID 0x81b /* sdio host core */ +#define ROBO_CORE_ID 0x81c /* roboswitch core */ +#define ATA100_CORE_ID 0x81d /* parallel ATA core */ +#define SATAXOR_CORE_ID 0x81e /* serial ATA & XOR DMA core */ +#define GIGETH_CORE_ID 0x81f /* gigabit ethernet core */ +#define PCIE_CORE_ID 0x820 /* pci express core */ +#define NPHY_CORE_ID 0x821 /* 802.11n 2x2 phy core */ +#define SRAMC_CORE_ID 0x822 /* SRAM controller core */ +#define MINIMAC_CORE_ID 0x823 /* MINI MAC/phy core */ +#define ARM11_CORE_ID 0x824 /* ARM 1176 core */ +#define ARM7S_CORE_ID 0x825 /* ARM7tdmi-s core */ +#define LPPHY_CORE_ID 0x826 /* 802.11a/b/g phy core */ +#define PMU_CORE_ID 0x827 /* PMU core */ +#define SSNPHY_CORE_ID 0x828 /* 802.11n single-stream phy core */ +#define SDIOD_CORE_ID 0x829 /* SDIO device core */ +#define ARMCM3_CORE_ID 0x82a /* ARM Cortex M3 core */ +#define HTPHY_CORE_ID 0x82b /* 802.11n 4x4 phy core */ +#define MIPS74K_CORE_ID 0x82c /* mips 74k core */ +#define GMAC_CORE_ID 0x82d /* Gigabit MAC core */ +#define DMEMC_CORE_ID 0x82e /* DDR1/2 memory controller core */ +#define PCIERC_CORE_ID 0x82f /* PCIE Root Complex core */ +#define OCP_CORE_ID 0x830 /* OCP2OCP bridge core */ +#define SC_CORE_ID 0x831 /* shared common core */ +#define AHB_CORE_ID 0x832 /* OCP2AHB bridge core */ +#define SPIH_CORE_ID 0x833 /* SPI host core */ +#define I2S_CORE_ID 0x834 /* I2S core */ +#define DMEMS_CORE_ID 0x835 /* SDR/DDR1 memory controller core */ +#define DEF_SHIM_COMP 0x837 /* SHIM component in ubus/6362 */ +#define OOB_ROUTER_CORE_ID 0x367 /* OOB router core ID */ +#define DEF_AI_COMP 0xfff /* Default component, in ai chips it + * maps all unused address ranges + */ + /* chipcommon being the first core: */ #define SI_CC_IDX 0 /* SOC Interconnect types (aka chip types) */ #define SOCI_AI 1 +/* Common core control flags */ +#define SICF_BIST_EN 0x8000 +#define SICF_PME_EN 0x4000 +#define SICF_CORE_BITS 0x3ffc +#define SICF_FGC 0x0002 +#define SICF_CLOCK_EN 0x0001 + +/* Common core status flags */ +#define SISF_BIST_DONE 0x8000 +#define SISF_BIST_ERROR 0x4000 +#define SISF_GATED_CLK 0x2000 +#define SISF_DMA64 0x1000 +#define SISF_CORE_BITS 0x0fff + /* A register that is common to all cores to * communicate w/PMU regarding clock control. */ @@ -146,15 +220,26 @@ * public (read-only) portion of aiutils handle returned by si_attach() */ struct si_pub { + uint buscoretype; /* PCI_CORE_ID, PCIE_CORE_ID, PCMCIA_CORE_ID */ + uint buscorerev; /* buscore rev */ + uint buscoreidx; /* buscore index */ int ccrev; /* chip common core rev */ u32 cccaps; /* chip common capabilities */ + u32 cccaps_ext; /* chip common capabilities extension */ int pmurev; /* pmu core rev */ u32 pmucaps; /* pmu capabilities */ uint boardtype; /* board type */ uint boardvendor; /* board vendor */ + uint boardflags; /* board flags */ + uint boardflags2; /* board flags2 */ uint chip; /* chip number */ uint chiprev; /* chip revision */ uint chippkg; /* chip package option */ + u32 chipst; /* chip status */ + bool issim; /* chip is in simulation or emulation */ + uint socirev; /* SOC interconnect rev */ + bool pci_pr32414; + }; struct pci_dev; @@ -170,13 +255,38 @@ struct gpioh_item { /* misc si info needed by some of the routines */ struct si_info { struct si_pub pub; /* back plane public state (must be first) */ - struct bcma_bus *icbus; /* handle to soc interconnect bus */ - struct pci_dev *pcibus; /* handle to pci bus */ + struct pci_dev *pbus; /* handle to pci bus */ + uint dev_coreid; /* the core provides driver functions */ + void *intr_arg; /* interrupt callback function arg */ + u32 (*intrsoff_fn) (void *intr_arg); /* turns chip interrupts off */ + /* restore chip interrupts */ + void (*intrsrestore_fn) (void *intr_arg, u32 arg); + /* check if interrupts are enabled */ + bool (*intrsenabled_fn) (void *intr_arg); + struct pcicore_info *pch; /* PCI/E core handle */ - struct bcma_device *buscore; + struct list_head var_list; /* list of srom variables */ - u32 chipst; /* chip status */ + void __iomem *curmap; /* current regs va */ + void __iomem *regs[SI_MAXCORES]; /* other regs va */ + + uint curidx; /* current core index */ + uint numcores; /* # discovered cores */ + uint coreid[SI_MAXCORES]; /* id of each core */ + u32 coresba[SI_MAXCORES]; /* backplane address of each core */ + void *regs2[SI_MAXCORES]; /* 2nd virtual address per core (usbh20) */ + u32 coresba2[SI_MAXCORES]; /* 2nd phys address per core (usbh20) */ + u32 coresba_size[SI_MAXCORES]; /* backplane address space size */ + u32 coresba2_size[SI_MAXCORES]; /* second address space size */ + + void *curwrap; /* current wrapper va */ + void *wrappers[SI_MAXCORES]; /* other cores wrapper va */ + u32 wrapba[SI_MAXCORES]; /* address of controlling wrapper */ + + u32 cia[SI_MAXCORES]; /* erom cia entry for each core */ + u32 cib[SI_MAXCORES]; /* erom cia entry for each core */ + u32 oob_router; /* oob router registers for axi */ }; /* @@ -189,15 +299,52 @@ struct si_info { /* AMBA Interconnect exported externs */ -extern struct bcma_device *ai_findcore(struct si_pub *sih, - u16 coreid, u16 coreunit); -extern u32 ai_core_cflags(struct bcma_device *core, u32 mask, u32 val); +extern uint ai_flag(struct si_pub *sih); +extern void ai_setint(struct si_pub *sih, int siflag); +extern uint ai_coreidx(struct si_pub *sih); +extern uint ai_corevendor(struct si_pub *sih); +extern uint ai_corerev(struct si_pub *sih); +extern bool ai_iscoreup(struct si_pub *sih); +extern u32 ai_core_cflags(struct si_pub *sih, u32 mask, u32 val); +extern void ai_core_cflags_wo(struct si_pub *sih, u32 mask, u32 val); +extern u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val); +extern uint ai_corereg(struct si_pub *sih, uint coreidx, uint regoff, uint mask, + uint val); +extern void ai_core_reset(struct si_pub *sih, u32 bits, u32 resetbits); +extern void ai_core_disable(struct si_pub *sih, u32 bits); +extern int ai_numaddrspaces(struct si_pub *sih); +extern u32 ai_addrspace(struct si_pub *sih, uint asidx); +extern u32 ai_addrspacesize(struct si_pub *sih, uint asidx); +extern void ai_write_wrap_reg(struct si_pub *sih, u32 offset, u32 val); /* === exported functions === */ -extern struct si_pub *ai_attach(struct bcma_bus *pbus); +extern struct si_pub *ai_attach(void __iomem *regs, struct pci_dev *sdh); extern void ai_detach(struct si_pub *sih); -extern uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val); +extern uint ai_coreid(struct si_pub *sih); +extern uint ai_corerev(struct si_pub *sih); +extern uint ai_corereg(struct si_pub *sih, uint coreidx, uint regoff, uint mask, + uint val); +extern void ai_write_wrapperreg(struct si_pub *sih, u32 offset, u32 val); +extern u32 ai_core_cflags(struct si_pub *sih, u32 mask, u32 val); +extern u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val); +extern bool ai_iscoreup(struct si_pub *sih); +extern uint ai_findcoreidx(struct si_pub *sih, uint coreid, uint coreunit); +extern void __iomem *ai_setcoreidx(struct si_pub *sih, uint coreidx); +extern void __iomem *ai_setcore(struct si_pub *sih, uint coreid, uint coreunit); +extern void __iomem *ai_switch_core(struct si_pub *sih, uint coreid, + uint *origidx, uint *intr_val); +extern void ai_restore_core(struct si_pub *sih, uint coreid, uint intr_val); +extern void ai_core_reset(struct si_pub *sih, u32 bits, u32 resetbits); +extern void ai_core_disable(struct si_pub *sih, u32 bits); +extern u32 ai_alp_clock(struct si_pub *sih); +extern u32 ai_ilp_clock(struct si_pub *sih); extern void ai_pci_setup(struct si_pub *sih, uint coremask); +extern void ai_setint(struct si_pub *sih, int siflag); +extern bool ai_backplane64(struct si_pub *sih); +extern void ai_register_intr_callback(struct si_pub *sih, void *intrsoff_fn, + void *intrsrestore_fn, + void *intrsenabled_fn, void *intr_arg); +extern void ai_deregister_intr_callback(struct si_pub *sih); extern void ai_clkctl_init(struct si_pub *sih); extern u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih); extern bool ai_clkctl_cc(struct si_pub *sih, uint mode); @@ -212,6 +359,13 @@ extern bool ai_is_otp_disabled(struct si_pub *sih); /* SPROM availability */ extern bool ai_is_sprom_available(struct si_pub *sih); +/* + * Build device path. Path size must be >= SI_DEVPATH_BUFSZ. + * The returned path is NULL terminated and has trailing '/'. + * Return 0 on success, nonzero otherwise. + */ +extern int ai_devpath(struct si_pub *sih, char *path, int size); + extern void ai_pci_sleep(struct si_pub *sih); extern void ai_pci_down(struct si_pub *sih); extern void ai_pci_up(struct si_pub *sih); @@ -221,52 +375,4 @@ extern void ai_chipcontrl_epa4331(struct si_pub *sih, bool on); /* Enable Ex-PA for 4313 */ extern void ai_epa_4313war(struct si_pub *sih); -extern uint ai_get_buscoretype(struct si_pub *sih); -extern uint ai_get_buscorerev(struct si_pub *sih); - -static inline int ai_get_ccrev(struct si_pub *sih) -{ - return sih->ccrev; -} - -static inline u32 ai_get_cccaps(struct si_pub *sih) -{ - return sih->cccaps; -} - -static inline int ai_get_pmurev(struct si_pub *sih) -{ - return sih->pmurev; -} - -static inline u32 ai_get_pmucaps(struct si_pub *sih) -{ - return sih->pmucaps; -} - -static inline uint ai_get_boardtype(struct si_pub *sih) -{ - return sih->boardtype; -} - -static inline uint ai_get_boardvendor(struct si_pub *sih) -{ - return sih->boardvendor; -} - -static inline uint ai_get_chip_id(struct si_pub *sih) -{ - return sih->chip; -} - -static inline uint ai_get_chiprev(struct si_pub *sih) -{ - return sih->chiprev; -} - -static inline uint ai_get_chippkg(struct si_pub *sih) -{ - return sih->chippkg; -} - #endif /* _BRCM_AIUTILS_H_ */ diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c b/trunk/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c index 90911eec0cf5..7f27dbdb6b60 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c @@ -649,7 +649,7 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi, len = roundup(len, 4); ampdu_len += (len + (ndelim + 1) * AMPDU_DELIMITER_LEN); - dma_len += (u16) p->len; + dma_len += (u16) brcmu_pkttotlen(p); BCMMSG(wlc->wiphy, "wl%d: ampdu_len %d" " seg_cnt %d null delim %d\n", @@ -741,7 +741,9 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi, if (p) { if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && ((u8) (p->priority) == tid)) { - plen = p->len + AMPDU_MAX_MPDU_OVERHEAD; + + plen = brcmu_pkttotlen(p) + + AMPDU_MAX_MPDU_OVERHEAD; plen = max(scb_ampdu->min_len, plen); if ((plen + ampdu_len) > max_ampdu_bytes) { @@ -1118,17 +1120,14 @@ brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb, u8 status_delay = 0; /* wait till the next 8 bytes of txstatus is available */ - s1 = bcma_read32(wlc->hw->d11core, D11REGOFFS(frmtxstatus)); - while ((s1 & TXS_V) == 0) { + while (((s1 = R_REG(&wlc->regs->frmtxstatus)) & TXS_V) == 0) { udelay(1); status_delay++; if (status_delay > 10) return; /* error condition */ - s1 = bcma_read32(wlc->hw->d11core, - D11REGOFFS(frmtxstatus)); } - s2 = bcma_read32(wlc->hw->d11core, D11REGOFFS(frmtxstatus2)); + s2 = R_REG(&wlc->regs->frmtxstatus2); } if (scb) { diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/trunk/drivers/net/wireless/brcm80211/brcmsmac/channel.c index 55e9f45fce22..89ad1b7dab8f 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/channel.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/channel.c @@ -1153,6 +1153,121 @@ brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec, &txpwr); } +#ifdef POWER_DBG +static void wlc_phy_txpower_limits_dump(struct txpwr_limits *txpwr) +{ + int i; + char buf[80]; + char fraction[4][4] = { " ", ".25", ".5 ", ".75" }; + + sprintf(buf, "CCK "); + for (i = 0; i < BRCMS_NUM_RATES_CCK; i++) + sprintf(buf[strlen(buf)], " %2d%s", + txpwr->cck[i] / BRCMS_TXPWR_DB_FACTOR, + fraction[txpwr->cck[i] % BRCMS_TXPWR_DB_FACTOR]); + printk(KERN_DEBUG "%s\n", buf); + + sprintf(buf, "20 MHz OFDM SISO "); + for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++) + sprintf(buf[strlen(buf)], " %2d%s", + txpwr->ofdm[i] / BRCMS_TXPWR_DB_FACTOR, + fraction[txpwr->ofdm[i] % BRCMS_TXPWR_DB_FACTOR]); + printk(KERN_DEBUG "%s\n", buf); + + sprintf(buf, "20 MHz OFDM CDD "); + for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++) + sprintf(buf[strlen(buf)], " %2d%s", + txpwr->ofdm_cdd[i] / BRCMS_TXPWR_DB_FACTOR, + fraction[txpwr->ofdm_cdd[i] % BRCMS_TXPWR_DB_FACTOR]); + printk(KERN_DEBUG "%s\n", buf); + + sprintf(buf, "40 MHz OFDM SISO "); + for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++) + sprintf(buf[strlen(buf)], " %2d%s", + txpwr->ofdm_40_siso[i] / BRCMS_TXPWR_DB_FACTOR, + fraction[txpwr->ofdm_40_siso[i] % + BRCMS_TXPWR_DB_FACTOR]); + printk(KERN_DEBUG "%s\n", buf); + + sprintf(buf, "40 MHz OFDM CDD "); + for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++) + sprintf(buf[strlen(buf)], " %2d%s", + txpwr->ofdm_40_cdd[i] / BRCMS_TXPWR_DB_FACTOR, + fraction[txpwr->ofdm_40_cdd[i] % + BRCMS_TXPWR_DB_FACTOR]); + printk(KERN_DEBUG "%s\n", buf); + + sprintf(buf, "20 MHz MCS0-7 SISO "); + for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) + sprintf(buf[strlen(buf)], " %2d%s", + txpwr->mcs_20_siso[i] / BRCMS_TXPWR_DB_FACTOR, + fraction[txpwr->mcs_20_siso[i] % + BRCMS_TXPWR_DB_FACTOR]); + printk(KERN_DEBUG "%s\n", buf); + + sprintf(buf, "20 MHz MCS0-7 CDD "); + for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) + sprintf(buf[strlen(buf)], " %2d%s", + txpwr->mcs_20_cdd[i] / BRCMS_TXPWR_DB_FACTOR, + fraction[txpwr->mcs_20_cdd[i] % + BRCMS_TXPWR_DB_FACTOR]); + printk(KERN_DEBUG "%s\n", buf); + + sprintf(buf, "20 MHz MCS0-7 STBC "); + for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) + sprintf(buf[strlen(buf)], " %2d%s", + txpwr->mcs_20_stbc[i] / BRCMS_TXPWR_DB_FACTOR, + fraction[txpwr->mcs_20_stbc[i] % + BRCMS_TXPWR_DB_FACTOR]); + printk(KERN_DEBUG "%s\n", buf); + + sprintf(buf, "20 MHz MCS8-15 SDM "); + for (i = 0; i < BRCMS_NUM_RATES_MCS_2_STREAM; i++) + sprintf(buf[strlen(buf)], " %2d%s", + txpwr->mcs_20_mimo[i] / BRCMS_TXPWR_DB_FACTOR, + fraction[txpwr->mcs_20_mimo[i] % + BRCMS_TXPWR_DB_FACTOR]); + printk(KERN_DEBUG "%s\n", buf); + + sprintf(buf, "40 MHz MCS0-7 SISO "); + for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) + sprintf(buf[strlen(buf)], " %2d%s", + txpwr->mcs_40_siso[i] / BRCMS_TXPWR_DB_FACTOR, + fraction[txpwr->mcs_40_siso[i] % + BRCMS_TXPWR_DB_FACTOR]); + printk(KERN_DEBUG "%s\n", buf); + + sprintf(buf, "40 MHz MCS0-7 CDD "); + for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) + sprintf(buf[strlen(buf)], " %2d%s", + txpwr->mcs_40_cdd[i] / BRCMS_TXPWR_DB_FACTOR, + fraction[txpwr->mcs_40_cdd[i] % + BRCMS_TXPWR_DB_FACTOR]); + printk(KERN_DEBUG "%s\n", buf); + + sprintf(buf, "40 MHz MCS0-7 STBC "); + for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) + sprintf(buf[strlen(buf)], " %2d%s", + txpwr->mcs_40_stbc[i] / BRCMS_TXPWR_DB_FACTOR, + fraction[txpwr->mcs_40_stbc[i] % + BRCMS_TXPWR_DB_FACTOR]); + printk(KERN_DEBUG "%s\n", buf); + + sprintf(buf, "40 MHz MCS8-15 SDM "); + for (i = 0; i < BRCMS_NUM_RATES_MCS_2_STREAM; i++) + sprintf(buf[strlen(buf)], " %2d%s", + txpwr->mcs_40_mimo[i] / BRCMS_TXPWR_DB_FACTOR, + fraction[txpwr->mcs_40_mimo[i] % + BRCMS_TXPWR_DB_FACTOR]); + } + printk(KERN_DEBUG "%s\n", buf); + + printk(KERN_DEBUG "MCS32 %2d%s\n", + txpwr->mcs32 / BRCMS_TXPWR_DB_FACTOR, + fraction[txpwr->mcs32 % BRCMS_TXPWR_DB_FACTOR]); +} +#endif /* POWER_DBG */ + void brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec, struct txpwr_limits *txpwr) @@ -1363,6 +1478,9 @@ brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec, txpwr->mcs_40_stbc[i] = txpwr->mcs_40_cdd[i]; } +#ifdef POWER_DBG + wlc_phy_txpower_limits_dump(txpwr); +#endif return; } diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/d11.h b/trunk/drivers/net/wireless/brcm80211/brcmsmac/d11.h index 1948cb2771e9..ed51616abc85 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/d11.h +++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/d11.h @@ -430,9 +430,6 @@ struct d11regs { u16 PAD[0x380]; /* 0x800 - 0xEFE */ }; -/* d11 register field offset */ -#define D11REGOFFS(field) offsetof(struct d11regs, field) - #define PIHR_BASE 0x0400 /* byte address of packed IHR region */ /* biststatus */ diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/trunk/drivers/net/wireless/brcm80211/brcmsmac/dma.c index 2e90a9a16ed6..6ebec8f42846 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/dma.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/dma.c @@ -13,10 +13,8 @@ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include +#include #include #include @@ -24,14 +22,6 @@ #include #include "types.h" #include "dma.h" -#include "soc.h" - -/* - * dma register field offset calculation - */ -#define DMA64REGOFFS(field) offsetof(struct dma64regs, field) -#define DMA64TXREGOFFS(di, field) (di->d64txregbase + DMA64REGOFFS(field)) -#define DMA64RXREGOFFS(di, field) (di->d64rxregbase + DMA64REGOFFS(field)) /* * DMA hardware requires each descriptor ring to be 8kB aligned, and fit within @@ -178,25 +168,26 @@ /* debug/trace */ #ifdef BCMDBG -#define DMA_ERROR(fmt, ...) \ -do { \ - if (*di->msg_level & 1) \ - pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \ -} while (0) -#define DMA_TRACE(fmt, ...) \ -do { \ - if (*di->msg_level & 2) \ - pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \ -} while (0) +#define DMA_ERROR(args) \ + do { \ + if (!(*di->msg_level & 1)) \ + ; \ + else \ + printk args; \ + } while (0) +#define DMA_TRACE(args) \ + do { \ + if (!(*di->msg_level & 2)) \ + ; \ + else \ + printk args; \ + } while (0) #else -#define DMA_ERROR(fmt, ...) \ - no_printk(fmt, ##__VA_ARGS__) -#define DMA_TRACE(fmt, ...) \ - no_printk(fmt, ##__VA_ARGS__) +#define DMA_ERROR(args) +#define DMA_TRACE(args) #endif /* BCMDBG */ -#define DMA_NONE(fmt, ...) \ - no_printk(fmt, ##__VA_ARGS__) +#define DMA_NONE(args) #define MAXNAMEL 8 /* 8 char names */ @@ -227,16 +218,15 @@ struct dma_info { uint *msg_level; /* message level pointer */ char name[MAXNAMEL]; /* callers name for diag msgs */ - struct bcma_device *core; - struct device *dmadev; + struct pci_dev *pbus; /* bus handle */ bool dma64; /* this dma engine is operating in 64-bit mode */ bool addrext; /* this dma engine supports DmaExtendedAddrChanges */ /* 64-bit dma tx engine registers */ - uint d64txregbase; + struct dma64regs __iomem *d64txregs; /* 64-bit dma rx engine registers */ - uint d64rxregbase; + struct dma64regs __iomem *d64rxregs; /* pointer to dma64 tx descriptor ring */ struct dma64desc *txd64; /* pointer to dma64 rx descriptor ring */ @@ -371,7 +361,7 @@ static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags) uint dmactrlflags; if (di == NULL) { - DMA_ERROR("NULL dma handle\n"); + DMA_ERROR(("_dma_ctrlflags: NULL dma handle\n")); return 0; } @@ -383,16 +373,15 @@ static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags) if (dmactrlflags & DMA_CTRL_PEN) { u32 control; - control = bcma_read32(di->core, DMA64TXREGOFFS(di, control)); - bcma_write32(di->core, DMA64TXREGOFFS(di, control), + control = R_REG(&di->d64txregs->control); + W_REG(&di->d64txregs->control, control | D64_XC_PD); - if (bcma_read32(di->core, DMA64TXREGOFFS(di, control)) & - D64_XC_PD) + if (R_REG(&di->d64txregs->control) & D64_XC_PD) /* We *can* disable it so it is supported, * restore control register */ - bcma_write32(di->core, DMA64TXREGOFFS(di, control), - control); + W_REG(&di->d64txregs->control, + control); else /* Not supported, don't allow it to be enabled */ dmactrlflags &= ~DMA_CTRL_PEN; @@ -403,12 +392,12 @@ static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags) return dmactrlflags; } -static bool _dma64_addrext(struct dma_info *di, uint ctrl_offset) +static bool _dma64_addrext(struct dma64regs __iomem *dma64regs) { u32 w; - bcma_set32(di->core, ctrl_offset, D64_XC_AE); - w = bcma_read32(di->core, ctrl_offset); - bcma_mask32(di->core, ctrl_offset, ~D64_XC_AE); + OR_REG(&dma64regs->control, D64_XC_AE); + w = R_REG(&dma64regs->control); + AND_REG(&dma64regs->control, ~D64_XC_AE); return (w & D64_XC_AE) == D64_XC_AE; } @@ -421,15 +410,15 @@ static bool _dma_isaddrext(struct dma_info *di) /* DMA64 supports full 32- or 64-bit operation. AE is always valid */ /* not all tx or rx channel are available */ - if (di->d64txregbase != 0) { - if (!_dma64_addrext(di, DMA64TXREGOFFS(di, control))) - DMA_ERROR("%s: DMA64 tx doesn't have AE set\n", - di->name); + if (di->d64txregs != NULL) { + if (!_dma64_addrext(di->d64txregs)) + DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have " + "AE set\n", di->name)); return true; - } else if (di->d64rxregbase != 0) { - if (!_dma64_addrext(di, DMA64RXREGOFFS(di, control))) - DMA_ERROR("%s: DMA64 rx doesn't have AE set\n", - di->name); + } else if (di->d64rxregs != NULL) { + if (!_dma64_addrext(di->d64rxregs)) + DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have " + "AE set\n", di->name)); return true; } @@ -441,14 +430,14 @@ static bool _dma_descriptor_align(struct dma_info *di) u32 addrl; /* Check to see if the descriptors need to be aligned on 4K/8K or not */ - if (di->d64txregbase != 0) { - bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow), 0xff0); - addrl = bcma_read32(di->core, DMA64TXREGOFFS(di, addrlow)); + if (di->d64txregs != NULL) { + W_REG(&di->d64txregs->addrlow, 0xff0); + addrl = R_REG(&di->d64txregs->addrlow); if (addrl != 0) return false; - } else if (di->d64rxregbase != 0) { - bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow), 0xff0); - addrl = bcma_read32(di->core, DMA64RXREGOFFS(di, addrlow)); + } else if (di->d64rxregs != NULL) { + W_REG(&di->d64rxregs->addrlow, 0xff0); + addrl = R_REG(&di->d64rxregs->addrlow); if (addrl != 0) return false; } @@ -459,7 +448,7 @@ static bool _dma_descriptor_align(struct dma_info *di) * Descriptor table must start at the DMA hardware dictated alignment, so * allocated memory must be large enough to support this requirement. */ -static void *dma_alloc_consistent(struct dma_info *di, uint size, +static void *dma_alloc_consistent(struct pci_dev *pdev, uint size, u16 align_bits, uint *alloced, dma_addr_t *pap) { @@ -469,7 +458,7 @@ static void *dma_alloc_consistent(struct dma_info *di, uint size, size += align; *alloced = size; } - return dma_alloc_coherent(di->dmadev, size, pap, GFP_ATOMIC); + return pci_alloc_consistent(pdev, size, pap); } static @@ -495,7 +484,7 @@ static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size, u32 desc_strtaddr; u32 alignbytes = 1 << *alignbits; - va = dma_alloc_consistent(di, size, *alignbits, alloced, descpa); + va = dma_alloc_consistent(di->pbus, size, *alignbits, alloced, descpa); if (NULL == va) return NULL; @@ -504,8 +493,8 @@ static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size, if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr & boundary)) { *alignbits = dma_align_sizetobits(size); - dma_free_coherent(di->dmadev, size, va, *descpa); - va = dma_alloc_consistent(di, size, *alignbits, + pci_free_consistent(di->pbus, size, va, *descpa); + va = dma_alloc_consistent(di->pbus, size, *alignbits, alloced, descpa); } return va; @@ -530,8 +519,8 @@ static bool dma64_alloc(struct dma_info *di, uint direction) va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits, &alloced, &di->txdpaorig); if (va == NULL) { - DMA_ERROR("%s: DMA_ALLOC_CONSISTENT(ntxd) failed\n", - di->name); + DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd)" + " failed\n", di->name)); return false; } align = (1 << align_bits); @@ -544,8 +533,8 @@ static bool dma64_alloc(struct dma_info *di, uint direction) va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits, &alloced, &di->rxdpaorig); if (va == NULL) { - DMA_ERROR("%s: DMA_ALLOC_CONSISTENT(nrxd) failed\n", - di->name); + DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd)" + " failed\n", di->name)); return false; } align = (1 << align_bits); @@ -565,13 +554,12 @@ static bool _dma_alloc(struct dma_info *di, uint direction) } struct dma_pub *dma_attach(char *name, struct si_pub *sih, - struct bcma_device *core, - uint txregbase, uint rxregbase, uint ntxd, uint nrxd, - uint rxbufsize, int rxextheadroom, - uint nrxpost, uint rxoffset, uint *msg_level) + void __iomem *dmaregstx, void __iomem *dmaregsrx, + uint ntxd, uint nrxd, + uint rxbufsize, int rxextheadroom, + uint nrxpost, uint rxoffset, uint *msg_level) { struct dma_info *di; - u8 rev = core->id.rev; uint size; /* allocate private info structure */ @@ -582,13 +570,11 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih, di->msg_level = msg_level ? msg_level : &dma_msg_level; - di->dma64 = - ((bcma_aread32(core, BCMA_IOST) & SISF_DMA64) == SISF_DMA64); + di->dma64 = ((ai_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64); - /* init dma reg info */ - di->core = core; - di->d64txregbase = txregbase; - di->d64rxregbase = rxregbase; + /* init dma reg pointer */ + di->d64txregs = (struct dma64regs __iomem *) dmaregstx; + di->d64rxregs = (struct dma64regs __iomem *) dmaregsrx; /* * Default flags (which can be changed by the driver calling @@ -597,17 +583,17 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih, */ _dma_ctrlflags(di, DMA_CTRL_ROC | DMA_CTRL_PEN, 0); - DMA_TRACE("%s: %s flags 0x%x ntxd %d nrxd %d " - "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d " - "txregbase %u rxregbase %u\n", name, "DMA64", - di->dma.dmactrlflags, ntxd, nrxd, rxbufsize, - rxextheadroom, nrxpost, rxoffset, txregbase, rxregbase); + DMA_TRACE(("%s: dma_attach: %s flags 0x%x ntxd %d nrxd %d " + "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d " + "dmaregstx %p dmaregsrx %p\n", name, "DMA64", + di->dma.dmactrlflags, ntxd, nrxd, rxbufsize, + rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx)); /* make a private copy of our callers name */ strncpy(di->name, name, MAXNAMEL); di->name[MAXNAMEL - 1] = '\0'; - di->dmadev = core->dma_dev; + di->pbus = ((struct si_info *)sih)->pbus; /* save tunables */ di->ntxd = (u16) ntxd; @@ -639,12 +625,12 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih, di->dataoffsetlow = di->ddoffsetlow; di->dataoffsethigh = di->ddoffsethigh; /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */ - if ((core->id.id == SDIOD_CORE_ID) - && ((rev > 0) && (rev <= 2))) - di->addrext = false; - else if ((core->id.id == I2S_CORE_ID) && - ((rev == 0) || (rev == 1))) - di->addrext = false; + if ((ai_coreid(sih) == SDIOD_CORE_ID) + && ((ai_corerev(sih) > 0) && (ai_corerev(sih) <= 2))) + di->addrext = 0; + else if ((ai_coreid(sih) == I2S_CORE_ID) && + ((ai_corerev(sih) == 0) || (ai_corerev(sih) == 1))) + di->addrext = 0; else di->addrext = _dma_isaddrext(di); @@ -659,8 +645,8 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih, di->dmadesc_align = 4; /* 16 byte alignment */ } - DMA_NONE("DMA descriptor align_needed %d, align %d\n", - di->aligndesc_4k, di->dmadesc_align); + DMA_NONE(("DMA descriptor align_needed %d, align %d\n", + di->aligndesc_4k, di->dmadesc_align)); /* allocate tx packet pointer vector */ if (ntxd) { @@ -698,21 +684,21 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih, if ((di->ddoffsetlow != 0) && !di->addrext) { if (di->txdpa > SI_PCI_DMA_SZ) { - DMA_ERROR("%s: txdpa 0x%x: addrext not supported\n", - di->name, (u32)di->txdpa); + DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not " + "supported\n", di->name, (u32)di->txdpa)); goto fail; } if (di->rxdpa > SI_PCI_DMA_SZ) { - DMA_ERROR("%s: rxdpa 0x%x: addrext not supported\n", - di->name, (u32)di->rxdpa); + DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not " + "supported\n", di->name, (u32)di->rxdpa)); goto fail; } } - DMA_TRACE("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh 0x%x addrext %d\n", - di->ddoffsetlow, di->ddoffsethigh, - di->dataoffsetlow, di->dataoffsethigh, - di->addrext); + DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x " + "dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow, + di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh, + di->addrext)); return (struct dma_pub *) di; @@ -758,17 +744,17 @@ void dma_detach(struct dma_pub *pub) { struct dma_info *di = (struct dma_info *)pub; - DMA_TRACE("%s:\n", di->name); + DMA_TRACE(("%s: dma_detach\n", di->name)); /* free dma descriptor rings */ if (di->txd64) - dma_free_coherent(di->dmadev, di->txdalloc, - ((s8 *)di->txd64 - di->txdalign), - (di->txdpaorig)); + pci_free_consistent(di->pbus, di->txdalloc, + ((s8 *)di->txd64 - di->txdalign), + (di->txdpaorig)); if (di->rxd64) - dma_free_coherent(di->dmadev, di->rxdalloc, - ((s8 *)di->rxd64 - di->rxdalign), - (di->rxdpaorig)); + pci_free_consistent(di->pbus, di->rxdalloc, + ((s8 *)di->rxd64 - di->rxdalign), + (di->rxdpaorig)); /* free packet pointer vectors */ kfree(di->txp); @@ -793,15 +779,11 @@ _dma_ddtable_init(struct dma_info *di, uint direction, dma_addr_t pa) if ((di->ddoffsetlow == 0) || !(pa & PCI32ADDR_HIGH)) { if (direction == DMA_TX) { - bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow), - pa + di->ddoffsetlow); - bcma_write32(di->core, DMA64TXREGOFFS(di, addrhigh), - di->ddoffsethigh); + W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow); + W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh); } else { - bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow), - pa + di->ddoffsetlow); - bcma_write32(di->core, DMA64RXREGOFFS(di, addrhigh), - di->ddoffsethigh); + W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow); + W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh); } } else { /* DMA64 32bits address extension */ @@ -812,19 +794,15 @@ _dma_ddtable_init(struct dma_info *di, uint direction, dma_addr_t pa) pa &= ~PCI32ADDR_HIGH; if (direction == DMA_TX) { - bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow), - pa + di->ddoffsetlow); - bcma_write32(di->core, DMA64TXREGOFFS(di, addrhigh), - di->ddoffsethigh); - bcma_maskset32(di->core, DMA64TXREGOFFS(di, control), - D64_XC_AE, (ae << D64_XC_AE_SHIFT)); + W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow); + W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh); + SET_REG(&di->d64txregs->control, + D64_XC_AE, (ae << D64_XC_AE_SHIFT)); } else { - bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow), - pa + di->ddoffsetlow); - bcma_write32(di->core, DMA64RXREGOFFS(di, addrhigh), - di->ddoffsethigh); - bcma_maskset32(di->core, DMA64RXREGOFFS(di, control), - D64_RC_AE, (ae << D64_RC_AE_SHIFT)); + W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow); + W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh); + SET_REG(&di->d64rxregs->control, + D64_RC_AE, (ae << D64_RC_AE_SHIFT)); } } } @@ -834,11 +812,11 @@ static void _dma_rxenable(struct dma_info *di) uint dmactrlflags = di->dma.dmactrlflags; u32 control; - DMA_TRACE("%s:\n", di->name); + DMA_TRACE(("%s: dma_rxenable\n", di->name)); - control = D64_RC_RE | (bcma_read32(di->core, - DMA64RXREGOFFS(di, control)) & - D64_RC_AE); + control = + (R_REG(&di->d64rxregs->control) & D64_RC_AE) | + D64_RC_RE; if ((dmactrlflags & DMA_CTRL_PEN) == 0) control |= D64_RC_PD; @@ -846,7 +824,7 @@ static void _dma_rxenable(struct dma_info *di) if (dmactrlflags & DMA_CTRL_ROC) control |= D64_RC_OC; - bcma_write32(di->core, DMA64RXREGOFFS(di, control), + W_REG(&di->d64rxregs->control, ((di->rxoffset << D64_RC_RO_SHIFT) | control)); } @@ -854,7 +832,7 @@ void dma_rxinit(struct dma_pub *pub) { struct dma_info *di = (struct dma_info *)pub; - DMA_TRACE("%s:\n", di->name); + DMA_TRACE(("%s: dma_rxinit\n", di->name)); if (di->nrxd == 0) return; @@ -889,8 +867,7 @@ static struct sk_buff *dma64_getnextrxp(struct dma_info *di, bool forceall) return NULL; curr = - B2I(((bcma_read32(di->core, - DMA64RXREGOFFS(di, status0)) & D64_RS0_CD_MASK) - + B2I(((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) - di->rcvptrbase) & D64_RS0_CD_MASK, struct dma64desc); /* ignore curr if forceall */ @@ -904,7 +881,7 @@ static struct sk_buff *dma64_getnextrxp(struct dma_info *di, bool forceall) pa = le32_to_cpu(di->rxd64[i].addrlow) - di->dataoffsetlow; /* clear this packet from the descriptor ring */ - dma_unmap_single(di->dmadev, pa, di->rxbufsize, DMA_FROM_DEVICE); + pci_unmap_single(di->pbus, pa, di->rxbufsize, PCI_DMA_FROMDEVICE); di->rxd64[i].addrlow = cpu_to_le32(0xdeadbeef); di->rxd64[i].addrhigh = cpu_to_le32(0xdeadbeef); @@ -924,7 +901,7 @@ static struct sk_buff *_dma_getnextrxp(struct dma_info *di, bool forceall) /* * !! rx entry routine - * returns the number packages in the next frame, or 0 if there are no more + * returns a pointer to the next frame received, or NULL if there are no more * if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is * supported with pkts chain * otherwise, it's treated as giant pkt and will be tossed. @@ -932,83 +909,74 @@ static struct sk_buff *_dma_getnextrxp(struct dma_info *di, bool forceall) * buffer data. After it reaches the max size of buffer, the data continues * in next DMA descriptor buffer WITHOUT DMA header */ -int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list) +struct sk_buff *dma_rx(struct dma_pub *pub) { struct dma_info *di = (struct dma_info *)pub; - struct sk_buff_head dma_frames; - struct sk_buff *p, *next; + struct sk_buff *p, *head, *tail; uint len; uint pkt_len; int resid = 0; - int pktcnt = 1; - skb_queue_head_init(&dma_frames); next_frame: - p = _dma_getnextrxp(di, false); - if (p == NULL) - return 0; + head = _dma_getnextrxp(di, false); + if (head == NULL) + return NULL; - len = le16_to_cpu(*(__le16 *) (p->data)); - DMA_TRACE("%s: dma_rx len %d\n", di->name, len); - dma_spin_for_len(len, p); + len = le16_to_cpu(*(__le16 *) (head->data)); + DMA_TRACE(("%s: dma_rx len %d\n", di->name, len)); + dma_spin_for_len(len, head); /* set actual length */ pkt_len = min((di->rxoffset + len), di->rxbufsize); - __skb_trim(p, pkt_len); - skb_queue_tail(&dma_frames, p); + __skb_trim(head, pkt_len); resid = len - (di->rxbufsize - di->rxoffset); /* check for single or multi-buffer rx */ if (resid > 0) { + tail = head; while ((resid > 0) && (p = _dma_getnextrxp(di, false))) { + tail->next = p; pkt_len = min_t(uint, resid, di->rxbufsize); __skb_trim(p, pkt_len); - skb_queue_tail(&dma_frames, p); + + tail = p; resid -= di->rxbufsize; - pktcnt++; } #ifdef BCMDBG if (resid > 0) { uint cur; cur = - B2I(((bcma_read32(di->core, - DMA64RXREGOFFS(di, status0)) & - D64_RS0_CD_MASK) - di->rcvptrbase) & - D64_RS0_CD_MASK, struct dma64desc); - DMA_ERROR("rxin %d rxout %d, hw_curr %d\n", - di->rxin, di->rxout, cur); + B2I(((R_REG(&di->d64rxregs->status0) & + D64_RS0_CD_MASK) - + di->rcvptrbase) & D64_RS0_CD_MASK, + struct dma64desc); + DMA_ERROR(("dma_rx, rxin %d rxout %d, hw_curr %d\n", + di->rxin, di->rxout, cur)); } #endif /* BCMDBG */ if ((di->dma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) { - DMA_ERROR("%s: bad frame length (%d)\n", - di->name, len); - skb_queue_walk_safe(&dma_frames, p, next) { - skb_unlink(p, &dma_frames); - brcmu_pkt_buf_free_skb(p); - } + DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n", + di->name, len)); + brcmu_pkt_buf_free_skb(head); di->dma.rxgiants++; - pktcnt = 1; goto next_frame; } } - skb_queue_splice_tail(&dma_frames, skb_list); - return pktcnt; + return head; } static bool dma64_rxidle(struct dma_info *di) { - DMA_TRACE("%s:\n", di->name); + DMA_TRACE(("%s: dma_rxidle\n", di->name)); if (di->nrxd == 0) return true; - return ((bcma_read32(di->core, - DMA64RXREGOFFS(di, status0)) & D64_RS0_CD_MASK) == - (bcma_read32(di->core, DMA64RXREGOFFS(di, ptr)) & - D64_RS0_CD_MASK)); + return ((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) == + (R_REG(&di->d64rxregs->ptr) & D64_RS0_CD_MASK)); } /* @@ -1042,7 +1010,7 @@ bool dma_rxfill(struct dma_pub *pub) n = di->nrxpost - nrxdactive(di, rxin, rxout); - DMA_TRACE("%s: post %d\n", di->name, n); + DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n)); if (di->rxbufsize > BCMEXTRAHDROOM) extra_offset = di->rxextrahdrroom; @@ -1055,9 +1023,11 @@ bool dma_rxfill(struct dma_pub *pub) p = brcmu_pkt_buf_get_skb(di->rxbufsize + extra_offset); if (p == NULL) { - DMA_ERROR("%s: out of rxbufs\n", di->name); + DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n", + di->name)); if (i == 0 && dma64_rxidle(di)) { - DMA_ERROR("%s: ring is empty !\n", di->name); + DMA_ERROR(("%s: rxfill64: ring is empty !\n", + di->name)); ring_empty = true; } di->dma.rxnobuf++; @@ -1072,8 +1042,8 @@ bool dma_rxfill(struct dma_pub *pub) */ *(u32 *) (p->data) = 0; - pa = dma_map_single(di->dmadev, p->data, di->rxbufsize, - DMA_FROM_DEVICE); + pa = pci_map_single(di->pbus, p->data, + di->rxbufsize, PCI_DMA_FROMDEVICE); /* save the free packet pointer */ di->rxp[rxout] = p; @@ -1091,7 +1061,7 @@ bool dma_rxfill(struct dma_pub *pub) di->rxout = rxout; /* update the chip lastdscr pointer */ - bcma_write32(di->core, DMA64RXREGOFFS(di, ptr), + W_REG(&di->d64rxregs->ptr, di->rcvptrbase + I2B(rxout, struct dma64desc)); return ring_empty; @@ -1102,7 +1072,7 @@ void dma_rxreclaim(struct dma_pub *pub) struct dma_info *di = (struct dma_info *)pub; struct sk_buff *p; - DMA_TRACE("%s:\n", di->name); + DMA_TRACE(("%s: dma_rxreclaim\n", di->name)); while ((p = _dma_getnextrxp(di, true))) brcmu_pkt_buf_free_skb(p); @@ -1133,7 +1103,7 @@ void dma_txinit(struct dma_pub *pub) struct dma_info *di = (struct dma_info *)pub; u32 control = D64_XC_XE; - DMA_TRACE("%s:\n", di->name); + DMA_TRACE(("%s: dma_txinit\n", di->name)); if (di->ntxd == 0) return; @@ -1152,7 +1122,7 @@ void dma_txinit(struct dma_pub *pub) if ((di->dma.dmactrlflags & DMA_CTRL_PEN) == 0) control |= D64_XC_PD; - bcma_set32(di->core, DMA64TXREGOFFS(di, control), control); + OR_REG(&di->d64txregs->control, control); /* DMA engine with alignment requirement requires table to be inited * before enabling the engine @@ -1165,24 +1135,24 @@ void dma_txsuspend(struct dma_pub *pub) { struct dma_info *di = (struct dma_info *)pub; - DMA_TRACE("%s:\n", di->name); + DMA_TRACE(("%s: dma_txsuspend\n", di->name)); if (di->ntxd == 0) return; - bcma_set32(di->core, DMA64TXREGOFFS(di, control), D64_XC_SE); + OR_REG(&di->d64txregs->control, D64_XC_SE); } void dma_txresume(struct dma_pub *pub) { struct dma_info *di = (struct dma_info *)pub; - DMA_TRACE("%s:\n", di->name); + DMA_TRACE(("%s: dma_txresume\n", di->name)); if (di->ntxd == 0) return; - bcma_mask32(di->core, DMA64TXREGOFFS(di, control), ~D64_XC_SE); + AND_REG(&di->d64txregs->control, ~D64_XC_SE); } bool dma_txsuspended(struct dma_pub *pub) @@ -1190,9 +1160,8 @@ bool dma_txsuspended(struct dma_pub *pub) struct dma_info *di = (struct dma_info *)pub; return (di->ntxd == 0) || - ((bcma_read32(di->core, - DMA64TXREGOFFS(di, control)) & D64_XC_SE) == - D64_XC_SE); + ((R_REG(&di->d64txregs->control) & D64_XC_SE) == + D64_XC_SE); } void dma_txreclaim(struct dma_pub *pub, enum txd_range range) @@ -1200,11 +1169,11 @@ void dma_txreclaim(struct dma_pub *pub, enum txd_range range) struct dma_info *di = (struct dma_info *)pub; struct sk_buff *p; - DMA_TRACE("%s: %s\n", - di->name, - range == DMA_RANGE_ALL ? "all" : - range == DMA_RANGE_TRANSMITTED ? "transmitted" : - "transferred"); + DMA_TRACE(("%s: dma_txreclaim %s\n", di->name, + (range == DMA_RANGE_ALL) ? "all" : + ((range == + DMA_RANGE_TRANSMITTED) ? "transmitted" : + "transferred"))); if (di->txin == di->txout) return; @@ -1225,17 +1194,16 @@ bool dma_txreset(struct dma_pub *pub) return true; /* suspend tx DMA first */ - bcma_write32(di->core, DMA64TXREGOFFS(di, control), D64_XC_SE); + W_REG(&di->d64txregs->control, D64_XC_SE); SPINWAIT(((status = - (bcma_read32(di->core, DMA64TXREGOFFS(di, status0)) & - D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED) && - (status != D64_XS0_XS_IDLE) && (status != D64_XS0_XS_STOPPED), - 10000); + (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK)) + != D64_XS0_XS_DISABLED) && (status != D64_XS0_XS_IDLE) + && (status != D64_XS0_XS_STOPPED), 10000); - bcma_write32(di->core, DMA64TXREGOFFS(di, control), 0); + W_REG(&di->d64txregs->control, 0); SPINWAIT(((status = - (bcma_read32(di->core, DMA64TXREGOFFS(di, status0)) & - D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED), 10000); + (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK)) + != D64_XS0_XS_DISABLED), 10000); /* wait for the last transaction to complete */ udelay(300); @@ -1251,10 +1219,10 @@ bool dma_rxreset(struct dma_pub *pub) if (di->nrxd == 0) return true; - bcma_write32(di->core, DMA64RXREGOFFS(di, control), 0); + W_REG(&di->d64rxregs->control, 0); SPINWAIT(((status = - (bcma_read32(di->core, DMA64RXREGOFFS(di, status0)) & - D64_RS0_RS_MASK)) != D64_RS0_RS_DISABLED), 10000); + (R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK)) + != D64_RS0_RS_DISABLED), 10000); return status == D64_RS0_RS_DISABLED; } @@ -1265,58 +1233,72 @@ bool dma_rxreset(struct dma_pub *pub) * the error(toss frames) could be fatal and cause many subsequent hard * to debug problems */ -int dma_txfast(struct dma_pub *pub, struct sk_buff *p, bool commit) +int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit) { struct dma_info *di = (struct dma_info *)pub; + struct sk_buff *p, *next; unsigned char *data; uint len; u16 txout; u32 flags = 0; dma_addr_t pa; - DMA_TRACE("%s:\n", di->name); + DMA_TRACE(("%s: dma_txfast\n", di->name)); txout = di->txout; /* - * obtain and initialize transmit descriptor entry. + * Walk the chain of packet buffers + * allocating and initializing transmit descriptor entries. */ - data = p->data; - len = p->len; + for (p = p0; p; p = next) { + data = p->data; + len = p->len; + next = p->next; - /* no use to transmit a zero length packet */ - if (len == 0) - return 0; + /* return nonzero if out of tx descriptors */ + if (nexttxd(di, txout) == di->txin) + goto outoftxd; - /* return nonzero if out of tx descriptors */ - if (nexttxd(di, txout) == di->txin) - goto outoftxd; + if (len == 0) + continue; - /* get physical address of buffer start */ - pa = dma_map_single(di->dmadev, data, len, DMA_TO_DEVICE); + /* get physical address of buffer start */ + pa = pci_map_single(di->pbus, data, len, PCI_DMA_TODEVICE); - /* With a DMA segment list, Descriptor table is filled - * using the segment list instead of looping over - * buffers in multi-chain DMA. Therefore, EOF for SGLIST - * is when end of segment list is reached. - */ - flags = D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF; - if (txout == (di->ntxd - 1)) - flags |= D64_CTRL1_EOT; + flags = 0; + if (p == p0) + flags |= D64_CTRL1_SOF; - dma64_dd_upd(di, di->txd64, pa, txout, &flags, len); + /* With a DMA segment list, Descriptor table is filled + * using the segment list instead of looping over + * buffers in multi-chain DMA. Therefore, EOF for SGLIST + * is when end of segment list is reached. + */ + if (next == NULL) + flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF); + if (txout == (di->ntxd - 1)) + flags |= D64_CTRL1_EOT; - txout = nexttxd(di, txout); + dma64_dd_upd(di, di->txd64, pa, txout, &flags, len); + + txout = nexttxd(di, txout); + } + + /* if last txd eof not set, fix it */ + if (!(flags & D64_CTRL1_EOF)) + di->txd64[prevtxd(di, txout)].ctrl1 = + cpu_to_le32(flags | D64_CTRL1_IOC | D64_CTRL1_EOF); /* save the packet */ - di->txp[prevtxd(di, txout)] = p; + di->txp[prevtxd(di, txout)] = p0; /* bump the tx descriptor index */ di->txout = txout; /* kick the chip */ if (commit) - bcma_write32(di->core, DMA64TXREGOFFS(di, ptr), + W_REG(&di->d64txregs->ptr, di->xmtptrbase + I2B(txout, struct dma64desc)); /* tx flow control */ @@ -1325,8 +1307,8 @@ int dma_txfast(struct dma_pub *pub, struct sk_buff *p, bool commit) return 0; outoftxd: - DMA_ERROR("%s: out of txds !!!\n", di->name); - brcmu_pkt_buf_free_skb(p); + DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di->name)); + brcmu_pkt_buf_free_skb(p0); di->dma.txavail = 0; di->dma.txnobuf++; return -1; @@ -1349,11 +1331,11 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range) u16 active_desc; struct sk_buff *txp; - DMA_TRACE("%s: %s\n", - di->name, - range == DMA_RANGE_ALL ? "all" : - range == DMA_RANGE_TRANSMITTED ? "transmitted" : - "transferred"); + DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name, + (range == DMA_RANGE_ALL) ? "all" : + ((range == + DMA_RANGE_TRANSMITTED) ? "transmitted" : + "transferred"))); if (di->ntxd == 0) return NULL; @@ -1364,15 +1346,16 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range) if (range == DMA_RANGE_ALL) end = di->txout; else { - end = (u16) (B2I(((bcma_read32(di->core, - DMA64TXREGOFFS(di, status0)) & - D64_XS0_CD_MASK) - di->xmtptrbase) & - D64_XS0_CD_MASK, struct dma64desc)); + struct dma64regs __iomem *dregs = di->d64txregs; + + end = (u16) (B2I(((R_REG(&dregs->status0) & + D64_XS0_CD_MASK) - + di->xmtptrbase) & D64_XS0_CD_MASK, + struct dma64desc)); if (range == DMA_RANGE_TRANSFERED) { active_desc = - (u16)(bcma_read32(di->core, - DMA64TXREGOFFS(di, status1)) & + (u16) (R_REG(&dregs->status1) & D64_XS1_AD_MASK); active_desc = (active_desc - di->xmtptrbase) & D64_XS0_CD_MASK; @@ -1401,7 +1384,7 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range) txp = di->txp[i]; di->txp[i] = NULL; - dma_unmap_single(di->dmadev, pa, size, DMA_TO_DEVICE); + pci_unmap_single(di->pbus, pa, size, PCI_DMA_TODEVICE); } di->txin = i; @@ -1412,8 +1395,8 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range) return txp; bogus: - DMA_NONE("bogus curr: start %d end %d txout %d\n", - start, end, di->txout); + DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d " + "force %d\n", start, end, di->txout, forceall)); return NULL; } diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/dma.h b/trunk/drivers/net/wireless/brcm80211/brcmsmac/dma.h index cc269ee5c499..ebc5bc546f3b 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/dma.h +++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/dma.h @@ -18,7 +18,6 @@ #define _BRCM_DMA_H_ #include -#include #include "types.h" /* forward structure declarations */ /* map/unmap direction */ @@ -75,14 +74,13 @@ struct dma_pub { }; extern struct dma_pub *dma_attach(char *name, struct si_pub *sih, - struct bcma_device *d11core, - uint txregbase, uint rxregbase, - uint ntxd, uint nrxd, - uint rxbufsize, int rxextheadroom, - uint nrxpost, uint rxoffset, uint *msg_level); + void __iomem *dmaregstx, void __iomem *dmaregsrx, + uint ntxd, uint nrxd, + uint rxbufsize, int rxextheadroom, + uint nrxpost, uint rxoffset, uint *msg_level); void dma_rxinit(struct dma_pub *pub); -int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list); +struct sk_buff *dma_rx(struct dma_pub *pub); bool dma_rxfill(struct dma_pub *pub); bool dma_rxreset(struct dma_pub *pub); bool dma_txreset(struct dma_pub *pub); diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/trunk/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c index d106576ce338..0d8a9cdf897a 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c @@ -17,11 +17,11 @@ #define __UNDEF_NO_VERSION__ #include +#include #include #include #include #include -#include #include #include #include "nicpci.h" @@ -40,10 +40,10 @@ #define MAC_FILTERS (FIF_PROMISC_IN_BSS | \ FIF_ALLMULTI | \ FIF_FCSFAIL | \ + FIF_PLCPFAIL | \ FIF_CONTROL | \ FIF_OTHER_BSS | \ - FIF_BCN_PRBRESP_PROMISC | \ - FIF_PSPOLL) + FIF_BCN_PRBRESP_PROMISC) #define CHAN2GHZ(channel, freqency, chflags) { \ .band = IEEE80211_BAND_2GHZ, \ @@ -87,14 +87,16 @@ MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN driver."); MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards"); MODULE_LICENSE("Dual BSD/GPL"); - -/* recognized BCMA Core IDs */ -static struct bcma_device_id brcms_coreid_table[] = { - BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 23, BCMA_ANY_CLASS), - BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 24, BCMA_ANY_CLASS), - BCMA_CORETABLE_END +/* recognized PCI IDs */ +static DEFINE_PCI_DEVICE_TABLE(brcms_pci_id_table) = { + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) }, /* 43225 2G */ + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) }, /* 43224 DUAL */ + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) }, /* 4313 DUAL */ + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) }, /* 43224 Ven */ + {0} }; -MODULE_DEVICE_TABLE(bcma, brcms_coreid_table); + +MODULE_DEVICE_TABLE(pci, brcms_pci_id_table); #ifdef BCMDBG static int msglevel = 0xdeadbeef; @@ -214,7 +216,8 @@ static const struct ieee80211_supported_band brcms_band_2GHz_nphy_template = { .ht_cap = { /* from include/linux/ieee80211.h */ .cap = IEEE80211_HT_CAP_GRN_FLD | - IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40, + IEEE80211_HT_CAP_SGI_20 | + IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_40MHZ_INTOLERANT, .ht_supported = true, .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K, .ampdu_density = AMPDU_DEF_MPDU_DENSITY, @@ -235,7 +238,8 @@ static const struct ieee80211_supported_band brcms_band_5GHz_nphy_template = { BRCMS_LEGACY_5G_RATE_OFFSET, .ht_cap = { .cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 | - IEEE80211_HT_CAP_SGI_40, + IEEE80211_HT_CAP_SGI_40 | + IEEE80211_HT_CAP_40MHZ_INTOLERANT, /* No 40 mhz yet */ .ht_supported = true, .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K, .ampdu_density = AMPDU_DEF_MPDU_DENSITY, @@ -283,7 +287,6 @@ static int brcms_ops_start(struct ieee80211_hw *hw) { struct brcms_info *wl = hw->priv; bool blocked; - int err; ieee80211_wake_queues(hw); spin_lock_bh(&wl->lock); @@ -292,69 +295,57 @@ static int brcms_ops_start(struct ieee80211_hw *hw) if (!blocked) wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy); - spin_lock_bh(&wl->lock); - /* avoid acknowledging frames before a non-monitor device is added */ - wl->mute_tx = true; - - if (!wl->pub->up) - err = brcms_up(wl); - else - err = -ENODEV; - spin_unlock_bh(&wl->lock); - - if (err != 0) - wiphy_err(hw->wiphy, "%s: brcms_up() returned %d\n", __func__, - err); - return err; + return 0; } static void brcms_ops_stop(struct ieee80211_hw *hw) { - struct brcms_info *wl = hw->priv; - int status; - ieee80211_stop_queues(hw); - - if (wl->wlc == NULL) - return; - - spin_lock_bh(&wl->lock); - status = brcms_c_chipmatch(wl->wlc->hw->vendorid, - wl->wlc->hw->deviceid); - spin_unlock_bh(&wl->lock); - if (!status) { - wiphy_err(wl->wiphy, - "wl: brcms_ops_stop: chipmatch failed\n"); - return; - } - - /* put driver in down state */ - spin_lock_bh(&wl->lock); - brcms_down(wl); - spin_unlock_bh(&wl->lock); } static int brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { - struct brcms_info *wl = hw->priv; + struct brcms_info *wl; + int err; /* Just STA for now */ - if (vif->type != NL80211_IFTYPE_STATION) { + if (vif->type != NL80211_IFTYPE_AP && + vif->type != NL80211_IFTYPE_MESH_POINT && + vif->type != NL80211_IFTYPE_STATION && + vif->type != NL80211_IFTYPE_WDS && + vif->type != NL80211_IFTYPE_ADHOC) { wiphy_err(hw->wiphy, "%s: Attempt to add type %d, only" " STA for now\n", __func__, vif->type); return -EOPNOTSUPP; } - wl->mute_tx = false; - brcms_c_mute(wl->wlc, false); + wl = hw->priv; + spin_lock_bh(&wl->lock); + if (!wl->pub->up) + err = brcms_up(wl); + else + err = -ENODEV; + spin_unlock_bh(&wl->lock); - return 0; + if (err != 0) + wiphy_err(hw->wiphy, "%s: brcms_up() returned %d\n", __func__, + err); + + return err; } static void brcms_ops_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { + struct brcms_info *wl; + + wl = hw->priv; + + /* put driver in down state */ + spin_lock_bh(&wl->lock); + brcms_down(wl); + spin_unlock_bh(&wl->lock); } static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed) @@ -371,7 +362,7 @@ static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed) conf->listen_interval); } if (changed & IEEE80211_CONF_CHANGE_MONITOR) - wiphy_dbg(wiphy, "%s: change monitor mode: %s\n", + wiphy_err(wiphy, "%s: change monitor mode: %s (implement)\n", __func__, conf->flags & IEEE80211_CONF_MONITOR ? "true" : "false"); if (changed & IEEE80211_CONF_CHANGE_PS) @@ -548,25 +539,29 @@ brcms_ops_configure_filter(struct ieee80211_hw *hw, changed_flags &= MAC_FILTERS; *total_flags &= MAC_FILTERS; - if (changed_flags & FIF_PROMISC_IN_BSS) - wiphy_dbg(wiphy, "FIF_PROMISC_IN_BSS\n"); + wiphy_err(wiphy, "FIF_PROMISC_IN_BSS\n"); if (changed_flags & FIF_ALLMULTI) - wiphy_dbg(wiphy, "FIF_ALLMULTI\n"); + wiphy_err(wiphy, "FIF_ALLMULTI\n"); if (changed_flags & FIF_FCSFAIL) - wiphy_dbg(wiphy, "FIF_FCSFAIL\n"); + wiphy_err(wiphy, "FIF_FCSFAIL\n"); + if (changed_flags & FIF_PLCPFAIL) + wiphy_err(wiphy, "FIF_PLCPFAIL\n"); if (changed_flags & FIF_CONTROL) - wiphy_dbg(wiphy, "FIF_CONTROL\n"); + wiphy_err(wiphy, "FIF_CONTROL\n"); if (changed_flags & FIF_OTHER_BSS) - wiphy_dbg(wiphy, "FIF_OTHER_BSS\n"); - if (changed_flags & FIF_PSPOLL) - wiphy_dbg(wiphy, "FIF_PSPOLL\n"); - if (changed_flags & FIF_BCN_PRBRESP_PROMISC) - wiphy_dbg(wiphy, "FIF_BCN_PRBRESP_PROMISC\n"); - - spin_lock_bh(&wl->lock); - brcms_c_mac_promisc(wl->wlc, *total_flags); - spin_unlock_bh(&wl->lock); + wiphy_err(wiphy, "FIF_OTHER_BSS\n"); + if (changed_flags & FIF_BCN_PRBRESP_PROMISC) { + spin_lock_bh(&wl->lock); + if (*total_flags & FIF_BCN_PRBRESP_PROMISC) { + wl->pub->mac80211_state |= MAC80211_PROMISC_BCNS; + brcms_c_mac_bcn_promisc_change(wl->wlc, 1); + } else { + brcms_c_mac_bcn_promisc_change(wl->wlc, 0); + wl->pub->mac80211_state &= ~MAC80211_PROMISC_BCNS; + } + spin_unlock_bh(&wl->lock); + } return; } @@ -614,6 +609,13 @@ brcms_ops_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, wl->pub->global_ampdu->scb = scb; wl->pub->global_ampdu->max_pdu = 16; + sta->ht_cap.ht_supported = true; + sta->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; + sta->ht_cap.ampdu_density = AMPDU_DEF_MPDU_DENSITY; + sta->ht_cap.cap = IEEE80211_HT_CAP_GRN_FLD | + IEEE80211_HT_CAP_SGI_20 | + IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_40MHZ_INTOLERANT; + /* * minstrel_ht initiates addBA on our behalf by calling * ieee80211_start_tx_ba_session() @@ -722,7 +724,7 @@ static const struct ieee80211_ops brcms_ops = { }; /* - * is called in brcms_bcma_probe() context, therefore no locking required. + * is called in brcms_pci_probe() context, therefore no locking required. */ static int brcms_set_hint(struct brcms_info *wl, char *abbrev) { @@ -862,26 +864,56 @@ static void brcms_free(struct brcms_info *wl) #endif kfree(t); } + + /* + * unregister_netdev() calls get_stats() which may read chip + * registers so we cannot unmap the chip registers until + * after calling unregister_netdev() . + */ + if (wl->regsva) + iounmap(wl->regsva); + + wl->regsva = NULL; } /* -* called from both kernel as from this kernel module (error flow on attach) +* called from both kernel as from this kernel module. * precondition: perimeter lock is not acquired. */ -static void brcms_remove(struct bcma_device *pdev) +static void brcms_remove(struct pci_dev *pdev) { - struct ieee80211_hw *hw = bcma_get_drvdata(pdev); - struct brcms_info *wl = hw->priv; + struct brcms_info *wl; + struct ieee80211_hw *hw; + int status; + hw = pci_get_drvdata(pdev); + wl = hw->priv; + if (!wl) { + pr_err("wl: brcms_remove: pci_get_drvdata failed\n"); + return; + } + + spin_lock_bh(&wl->lock); + status = brcms_c_chipmatch(pdev->vendor, pdev->device); + spin_unlock_bh(&wl->lock); + if (!status) { + wiphy_err(wl->wiphy, "wl: brcms_remove: chipmatch " + "failed\n"); + return; + } if (wl->wlc) { wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, false); wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy); ieee80211_unregister_hw(hw); + spin_lock_bh(&wl->lock); + brcms_down(wl); + spin_unlock_bh(&wl->lock); } + pci_disable_device(pdev); brcms_free(wl); - bcma_set_drvdata(pdev, NULL); + pci_set_drvdata(pdev, NULL); ieee80211_free_hw(hw); } @@ -989,9 +1021,11 @@ static int ieee_hw_init(struct ieee80211_hw *hw) * it as static. * * - * is called in brcms_bcma_probe() context, therefore no locking required. + * is called in brcms_pci_probe() context, therefore no locking required. */ -static struct brcms_info *brcms_attach(struct bcma_device *pdev) +static struct brcms_info *brcms_attach(u16 vendor, u16 device, + resource_size_t regs, + struct pci_dev *btparam, uint irq) { struct brcms_info *wl = NULL; int unit, err; @@ -1005,7 +1039,7 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev) return NULL; /* allocate private info */ - hw = bcma_get_drvdata(pdev); + hw = pci_get_drvdata(btparam); /* btparam == pdev */ if (hw != NULL) wl = hw->priv; if (WARN_ON(hw == NULL) || WARN_ON(wl == NULL)) @@ -1017,20 +1051,26 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev) /* setup the bottom half handler */ tasklet_init(&wl->tasklet, brcms_dpc, (unsigned long) wl); + wl->regsva = ioremap_nocache(regs, PCI_BAR0_WINSZ); + if (wl->regsva == NULL) { + wiphy_err(wl->wiphy, "wl%d: ioremap() failed\n", unit); + goto fail; + } spin_lock_init(&wl->lock); spin_lock_init(&wl->isr_lock); /* prepare ucode */ - if (brcms_request_fw(wl, pdev->bus->host_pci) < 0) { + if (brcms_request_fw(wl, btparam) < 0) { wiphy_err(wl->wiphy, "%s: Failed to find firmware usually in " "%s\n", KBUILD_MODNAME, "/lib/firmware/brcm"); brcms_release_fw(wl); - brcms_remove(pdev); + brcms_remove(btparam); return NULL; } /* common load-time initialization */ - wl->wlc = brcms_c_attach((void *)wl, pdev, unit, false, &err); + wl->wlc = brcms_c_attach(wl, vendor, device, unit, false, + wl->regsva, btparam, &err); brcms_release_fw(wl); if (!wl->wlc) { wiphy_err(wl->wiphy, "%s: attach() failed with code %d\n", @@ -1041,13 +1081,15 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev) wl->pub->ieee_hw = hw; + /* disable mpc */ + brcms_c_set_radio_mpc(wl->wlc, false); + /* register our interrupt handler */ - if (request_irq(pdev->bus->host_pci->irq, brcms_isr, - IRQF_SHARED, KBUILD_MODNAME, wl)) { + if (request_irq(irq, brcms_isr, IRQF_SHARED, KBUILD_MODNAME, wl)) { wiphy_err(wl->wiphy, "wl%d: request_irq() failed\n", unit); goto fail; } - wl->irq = pdev->bus->host_pci->irq; + wl->irq = irq; /* register module */ brcms_c_module_register(wl->pub, "linux", wl, NULL); @@ -1094,19 +1136,38 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev) * * Perimeter lock is initialized in the course of this function. */ -static int __devinit brcms_bcma_probe(struct bcma_device *pdev) +static int __devinit +brcms_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { + int rc; struct brcms_info *wl; struct ieee80211_hw *hw; + u32 val; - dev_info(&pdev->dev, "mfg %x core %x rev %d class %d irq %d\n", - pdev->id.manuf, pdev->id.id, pdev->id.rev, pdev->id.class, - pdev->bus->host_pci->irq); + dev_info(&pdev->dev, "bus %d slot %d func %d irq %d\n", + pdev->bus->number, PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn), pdev->irq); - if ((pdev->id.manuf != BCMA_MANUF_BCM) || - (pdev->id.id != BCMA_CORE_80211)) + if ((pdev->vendor != PCI_VENDOR_ID_BROADCOM) || + ((pdev->device != 0x0576) && + ((pdev->device & 0xff00) != 0x4300) && + ((pdev->device & 0xff00) != 0x4700) && + ((pdev->device < 43000) || (pdev->device > 43999)))) return -ENODEV; + rc = pci_enable_device(pdev); + if (rc) { + pr_err("%s: Cannot enable device %d-%d_%d\n", + __func__, pdev->bus->number, PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn)); + return -ENODEV; + } + pci_set_master(pdev); + + pci_read_config_dword(pdev, 0x40, &val); + if ((val & 0x0000ff00) != 0) + pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); + hw = ieee80211_alloc_hw(sizeof(struct brcms_info), &brcms_ops); if (!hw) { pr_err("%s: ieee80211_alloc_hw failed\n", __func__); @@ -1115,11 +1176,14 @@ static int __devinit brcms_bcma_probe(struct bcma_device *pdev) SET_IEEE80211_DEV(hw, &pdev->dev); - bcma_set_drvdata(pdev, hw); + pci_set_drvdata(pdev, hw); memset(hw->priv, 0, sizeof(*wl)); - wl = brcms_attach(pdev); + wl = brcms_attach(pdev->vendor, pdev->device, + pci_resource_start(pdev, 0), pdev, + pdev->irq); + if (!wl) { pr_err("%s: %s: brcms_attach failed!\n", KBUILD_MODNAME, __func__); @@ -1128,23 +1192,16 @@ static int __devinit brcms_bcma_probe(struct bcma_device *pdev) return 0; } -static int brcms_pci_suspend(struct pci_dev *pdev) -{ - pci_save_state(pdev); - pci_disable_device(pdev); - return pci_set_power_state(pdev, PCI_D3hot); -} - -static int brcms_suspend(struct bcma_device *pdev, pm_message_t state) +static int brcms_suspend(struct pci_dev *pdev, pm_message_t state) { struct brcms_info *wl; struct ieee80211_hw *hw; - hw = bcma_get_drvdata(pdev); + hw = pci_get_drvdata(pdev); wl = hw->priv; if (!wl) { wiphy_err(wl->wiphy, - "brcms_suspend: bcma_get_drvdata failed\n"); + "brcms_suspend: pci_get_drvdata failed\n"); return -ENODEV; } @@ -1153,14 +1210,25 @@ static int brcms_suspend(struct bcma_device *pdev, pm_message_t state) wl->pub->hw_up = false; spin_unlock_bh(&wl->lock); - /* temporarily do suspend ourselves */ - return brcms_pci_suspend(pdev->bus->host_pci); + pci_save_state(pdev); + pci_disable_device(pdev); + return pci_set_power_state(pdev, PCI_D3hot); } -static int brcms_pci_resume(struct pci_dev *pdev) +static int brcms_resume(struct pci_dev *pdev) { + struct brcms_info *wl; + struct ieee80211_hw *hw; int err = 0; - uint val; + u32 val; + + hw = pci_get_drvdata(pdev); + wl = hw->priv; + if (!wl) { + wiphy_err(wl->wiphy, + "wl: brcms_resume: pci_get_drvdata failed\n"); + return -ENODEV; + } err = pci_set_power_state(pdev, PCI_D0); if (err) @@ -1178,28 +1246,24 @@ static int brcms_pci_resume(struct pci_dev *pdev) if ((val & 0x0000ff00) != 0) pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); - return 0; -} - -static int brcms_resume(struct bcma_device *pdev) -{ /* - * just do pci resume for now until bcma supports it. + * done. driver will be put in up state + * in brcms_ops_add_interface() call. */ - return brcms_pci_resume(pdev->bus->host_pci); + return err; } -static struct bcma_driver brcms_bcma_driver = { +static struct pci_driver brcms_pci_driver = { .name = KBUILD_MODNAME, - .probe = brcms_bcma_probe, + .probe = brcms_pci_probe, .suspend = brcms_suspend, .resume = brcms_resume, .remove = __devexit_p(brcms_remove), - .id_table = brcms_coreid_table, + .id_table = brcms_pci_id_table, }; /** - * This is the main entry point for the brcmsmac driver. + * This is the main entry point for the WL driver. * * This function determines if a device pointed to by pdev is a WL device, * and if so, performs a brcms_attach() on it. @@ -1214,24 +1278,26 @@ static int __init brcms_module_init(void) brcm_msg_level = msglevel; #endif /* BCMDBG */ - error = bcma_driver_register(&brcms_bcma_driver); - printk(KERN_ERR "%s: register returned %d\n", __func__, error); + error = pci_register_driver(&brcms_pci_driver); if (!error) return 0; + + return error; } /** - * This function unloads the brcmsmac driver from the system. + * This function unloads the WL driver from the system. * - * This function unconditionally unloads the brcmsmac driver module from the + * This function unconditionally unloads the WL driver module from the * system. * */ static void __exit brcms_module_exit(void) { - bcma_driver_unregister(&brcms_bcma_driver); + pci_unregister_driver(&brcms_pci_driver); + } module_init(brcms_module_init); @@ -1253,7 +1319,8 @@ void brcms_init(struct brcms_info *wl) { BCMMSG(wl->pub->ieee_hw->wiphy, "wl%d\n", wl->pub->unit); brcms_reset(wl); - brcms_c_init(wl->wlc, wl->mute_tx); + + brcms_c_init(wl->wlc); } /* @@ -1265,19 +1332,11 @@ uint brcms_reset(struct brcms_info *wl) brcms_c_reset(wl->wlc); /* dpc will not be rescheduled */ - wl->resched = false; + wl->resched = 0; return 0; } -void brcms_fatal_error(struct brcms_info *wl) -{ - wiphy_err(wl->wlc->wiphy, "wl%d: fatal error, reinitializing\n", - wl->wlc->pub->unit); - brcms_reset(wl); - ieee80211_restart_hw(wl->pub->ieee_hw); -} - /* * These are interrupt on/off entry points. Disable interrupts * during interrupt state transition. @@ -1502,10 +1561,11 @@ int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf, u32 idx) if (le32_to_cpu(hdr->idx) == idx) { pdata = wl->fw.fw_bin[i]->data + le32_to_cpu(hdr->offset); - *pbuf = kmemdup(pdata, len, GFP_ATOMIC); + *pbuf = kmalloc(len, GFP_ATOMIC); if (*pbuf == NULL) goto fail; + memcpy(*pbuf, pdata, len); return 0; } } @@ -1518,7 +1578,7 @@ int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf, u32 idx) } /* - * Precondition: Since this function is called in brcms_bcma_probe() context, + * Precondition: Since this function is called in brcms_pci_probe() context, * no locking is required. */ int brcms_ucode_init_uint(struct brcms_info *wl, size_t *n_bytes, u32 idx) @@ -1558,7 +1618,7 @@ void brcms_ucode_free_buf(void *p) /* * checks validity of all firmware images loaded from user space * - * Precondition: Since this function is called in brcms_bcma_probe() context, + * Precondition: Since this function is called in brcms_pci_probe() context, * no locking is required. */ int brcms_check_firmwares(struct brcms_info *wl) diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h b/trunk/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h index 8f60419c37bf..177f0e44e4b6 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h +++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h @@ -68,6 +68,8 @@ struct brcms_info { spinlock_t lock; /* per-device perimeter lock */ spinlock_t isr_lock; /* per-device ISR synchronization lock */ + /* regsva for unmap in brcms_free() */ + void __iomem *regsva; /* opaque chip registers virtual address */ /* timer related fields */ atomic_t callbacks; /* # outstanding callback functions */ @@ -78,7 +80,6 @@ struct brcms_info { struct brcms_firmware fw; struct wiphy *wiphy; struct brcms_ucode ucode; - bool mute_tx; }; /* misc callbacks */ @@ -103,6 +104,5 @@ extern bool brcms_del_timer(struct brcms_timer *timer); extern void brcms_msleep(struct brcms_info *wl, uint ms); extern void brcms_dpc(unsigned long data); extern void brcms_timer(struct brcms_timer *t); -extern void brcms_fatal_error(struct brcms_info *wl); #endif /* _BRCM_MAC80211_IF_H_ */ diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/main.c b/trunk/drivers/net/wireless/brcm80211/brcmsmac/main.c index f7ed34034f88..510e9bb52287 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/main.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/main.c @@ -30,21 +30,44 @@ #include "mac80211_if.h" #include "ucode_loader.h" #include "main.h" -#include "soc.h" /* * Indication for txflowcontrol that all priority bits in * TXQ_STOP_FOR_PRIOFC_MASK are to be considered. */ -#define ALLPRIO -1 +#define ALLPRIO -1 + +/* + * 32 SSID chars, max of 4 chars for each SSID char "\xFF", plus NULL. + */ +#define SSID_FMT_BUF_LEN ((4 * IEEE80211_MAX_SSID_LEN) + 1) /* watchdog timer, in unit of ms */ -#define TIMER_INTERVAL_WATCHDOG 1000 +#define TIMER_INTERVAL_WATCHDOG 1000 /* radio monitor timer, in unit of ms */ -#define TIMER_INTERVAL_RADIOCHK 800 +#define TIMER_INTERVAL_RADIOCHK 800 + +/* Max MPC timeout, in unit of watchdog */ +#ifndef BRCMS_MPC_MAX_DELAYCNT +#define BRCMS_MPC_MAX_DELAYCNT 10 +#endif + +/* Min MPC timeout, in unit of watchdog */ +#define BRCMS_MPC_MIN_DELAYCNT 1 +#define BRCMS_MPC_THRESHOLD 3 /* MPC count threshold level */ /* beacon interval, in unit of 1024TU */ -#define BEACON_INTERVAL_DEFAULT 100 +#define BEACON_INTERVAL_DEFAULT 100 +/* DTIM interval, in unit of beacon interval */ +#define DTIM_INTERVAL_DEFAULT 3 + +/* Scale down delays to accommodate QT slow speed */ +/* beacon interval, in unit of 1024TU */ +#define BEACON_INTERVAL_DEF_QT 20 +/* DTIM interval, in unit of beacon interval */ +#define DTIM_INTERVAL_DEF_QT 1 + +#define TBTT_ALIGN_LEEWAY_US 100 /* min leeway before first TBTT in us */ /* n-mode support capability */ /* 2x2 includes both 1x1 & 2x2 devices @@ -55,71 +78,113 @@ #define WL_11N_3x3 3 #define WL_11N_4x4 4 -#define EDCF_ACI_MASK 0x60 -#define EDCF_ACI_SHIFT 5 -#define EDCF_ECWMIN_MASK 0x0f -#define EDCF_ECWMAX_SHIFT 4 -#define EDCF_AIFSN_MASK 0x0f -#define EDCF_AIFSN_MAX 15 -#define EDCF_ECWMAX_MASK 0xf0 - -#define EDCF_AC_BE_TXOP_STA 0x0000 -#define EDCF_AC_BK_TXOP_STA 0x0000 -#define EDCF_AC_VO_ACI_STA 0x62 -#define EDCF_AC_VO_ECW_STA 0x32 -#define EDCF_AC_VI_ACI_STA 0x42 -#define EDCF_AC_VI_ECW_STA 0x43 -#define EDCF_AC_BK_ECW_STA 0xA4 -#define EDCF_AC_VI_TXOP_STA 0x005e -#define EDCF_AC_VO_TXOP_STA 0x002f -#define EDCF_AC_BE_ACI_STA 0x03 -#define EDCF_AC_BE_ECW_STA 0xA4 -#define EDCF_AC_BK_ACI_STA 0x27 -#define EDCF_AC_VO_TXOP_AP 0x002f - -#define EDCF_TXOP2USEC(txop) ((txop) << 5) -#define EDCF_ECW2CW(exp) ((1 << (exp)) - 1) - -#define APHY_SYMBOL_TIME 4 -#define APHY_PREAMBLE_TIME 16 -#define APHY_SIGNAL_TIME 4 -#define APHY_SIFS_TIME 16 -#define APHY_SERVICE_NBITS 16 -#define APHY_TAIL_NBITS 6 -#define BPHY_SIFS_TIME 10 -#define BPHY_PLCP_SHORT_TIME 96 - -#define PREN_PREAMBLE 24 -#define PREN_MM_EXT 12 -#define PREN_PREAMBLE_EXT 4 +/* define 11n feature disable flags */ +#define WLFEATURE_DISABLE_11N 0x00000001 +#define WLFEATURE_DISABLE_11N_STBC_TX 0x00000002 +#define WLFEATURE_DISABLE_11N_STBC_RX 0x00000004 +#define WLFEATURE_DISABLE_11N_SGI_TX 0x00000008 +#define WLFEATURE_DISABLE_11N_SGI_RX 0x00000010 +#define WLFEATURE_DISABLE_11N_AMPDU_TX 0x00000020 +#define WLFEATURE_DISABLE_11N_AMPDU_RX 0x00000040 +#define WLFEATURE_DISABLE_11N_GF 0x00000080 + +#define EDCF_ACI_MASK 0x60 +#define EDCF_ACI_SHIFT 5 +#define EDCF_ECWMIN_MASK 0x0f +#define EDCF_ECWMAX_SHIFT 4 +#define EDCF_AIFSN_MASK 0x0f +#define EDCF_AIFSN_MAX 15 +#define EDCF_ECWMAX_MASK 0xf0 + +#define EDCF_AC_BE_TXOP_STA 0x0000 +#define EDCF_AC_BK_TXOP_STA 0x0000 +#define EDCF_AC_VO_ACI_STA 0x62 +#define EDCF_AC_VO_ECW_STA 0x32 +#define EDCF_AC_VI_ACI_STA 0x42 +#define EDCF_AC_VI_ECW_STA 0x43 +#define EDCF_AC_BK_ECW_STA 0xA4 +#define EDCF_AC_VI_TXOP_STA 0x005e +#define EDCF_AC_VO_TXOP_STA 0x002f +#define EDCF_AC_BE_ACI_STA 0x03 +#define EDCF_AC_BE_ECW_STA 0xA4 +#define EDCF_AC_BK_ACI_STA 0x27 +#define EDCF_AC_VO_TXOP_AP 0x002f + +#define EDCF_TXOP2USEC(txop) ((txop) << 5) +#define EDCF_ECW2CW(exp) ((1 << (exp)) - 1) + +#define APHY_SYMBOL_TIME 4 +#define APHY_PREAMBLE_TIME 16 +#define APHY_SIGNAL_TIME 4 +#define APHY_SIFS_TIME 16 +#define APHY_SERVICE_NBITS 16 +#define APHY_TAIL_NBITS 6 +#define BPHY_SIFS_TIME 10 +#define BPHY_PLCP_SHORT_TIME 96 + +#define PREN_PREAMBLE 24 +#define PREN_MM_EXT 12 +#define PREN_PREAMBLE_EXT 4 #define DOT11_MAC_HDR_LEN 24 -#define DOT11_ACK_LEN 10 -#define DOT11_BA_LEN 4 +#define DOT11_ACK_LEN 10 +#define DOT11_BA_LEN 4 #define DOT11_OFDM_SIGNAL_EXTENSION 6 #define DOT11_MIN_FRAG_LEN 256 -#define DOT11_RTS_LEN 16 -#define DOT11_CTS_LEN 10 +#define DOT11_RTS_LEN 16 +#define DOT11_CTS_LEN 10 #define DOT11_BA_BITMAP_LEN 128 #define DOT11_MIN_BEACON_PERIOD 1 #define DOT11_MAX_BEACON_PERIOD 0xFFFF -#define DOT11_MAXNUMFRAGS 16 +#define DOT11_MAXNUMFRAGS 16 #define DOT11_MAX_FRAG_LEN 2346 -#define BPHY_PLCP_TIME 192 -#define RIFS_11N_TIME 2 +#define BPHY_PLCP_TIME 192 +#define RIFS_11N_TIME 2 + +#define WME_VER 1 +#define WME_SUBTYPE_PARAM_IE 1 +#define WME_TYPE 2 +#define WME_OUI "\x00\x50\xf2" -/* length of the BCN template area */ -#define BCN_TMPL_LEN 512 +#define AC_BE 0 +#define AC_BK 1 +#define AC_VI 2 +#define AC_VO 3 + +#define BCN_TMPL_LEN 512 /* length of the BCN template area */ /* brcms_bss_info flag bit values */ -#define BRCMS_BSS_HT 0x0020 /* BSS is HT (MIMO) capable */ +#define BRCMS_BSS_HT 0x0020 /* BSS is HT (MIMO) capable */ + +/* Flags used in brcms_c_txq_info.stopped */ +/* per prio flow control bits */ +#define TXQ_STOP_FOR_PRIOFC_MASK 0x000000FF +/* stop txq enqueue for packet drain */ +#define TXQ_STOP_FOR_PKT_DRAIN 0x00000100 +/* stop txq enqueue for ampdu flow control */ +#define TXQ_STOP_FOR_AMPDU_FLOW_CNTRL 0x00000200 + +#define BRCMS_HWRXOFF 38 /* chip rx buffer offset */ -/* chip rx buffer offset */ -#define BRCMS_HWRXOFF 38 +/* Find basic rate for a given rate */ +static u8 brcms_basic_rate(struct brcms_c_info *wlc, u32 rspec) +{ + if (is_mcs_rate(rspec)) + return wlc->band->basic_rate[mcs_table[rspec & RSPEC_RATE_MASK] + .leg_ofdm]; + return wlc->band->basic_rate[rspec & RSPEC_RATE_MASK]; +} + +static u16 frametype(u32 rspec, u8 mimoframe) +{ + if (is_mcs_rate(rspec)) + return mimoframe; + return is_cck_rate(rspec) ? FT_CCK : FT_OFDM; +} /* rfdisable delay timer 500 ms, runs of ALP clock */ -#define RFDISABLE_DEFAULT 10000000 +#define RFDISABLE_DEFAULT 10000000 #define BRCMS_TEMPSENSE_PERIOD 10 /* 10 second timeout */ @@ -129,83 +194,87 @@ * These constants are used ONLY by wlc_prio2prec_map. Do not use them * elsewhere. */ -#define _BRCMS_PREC_NONE 0 /* None = - */ -#define _BRCMS_PREC_BK 2 /* BK - Background */ -#define _BRCMS_PREC_BE 4 /* BE - Best-effort */ -#define _BRCMS_PREC_EE 6 /* EE - Excellent-effort */ -#define _BRCMS_PREC_CL 8 /* CL - Controlled Load */ -#define _BRCMS_PREC_VI 10 /* Vi - Video */ -#define _BRCMS_PREC_VO 12 /* Vo - Voice */ -#define _BRCMS_PREC_NC 14 /* NC - Network Control */ - -/* synthpu_dly times in us */ -#define SYNTHPU_DLY_APHY_US 3700 -#define SYNTHPU_DLY_BPHY_US 1050 -#define SYNTHPU_DLY_NPHY_US 2048 -#define SYNTHPU_DLY_LPPHY_US 300 - -#define ANTCNT 10 /* vanilla M_MAX_ANTCNT val */ +#define _BRCMS_PREC_NONE 0 /* None = - */ +#define _BRCMS_PREC_BK 2 /* BK - Background */ +#define _BRCMS_PREC_BE 4 /* BE - Best-effort */ +#define _BRCMS_PREC_EE 6 /* EE - Excellent-effort */ +#define _BRCMS_PREC_CL 8 /* CL - Controlled Load */ +#define _BRCMS_PREC_VI 10 /* Vi - Video */ +#define _BRCMS_PREC_VO 12 /* Vo - Voice */ +#define _BRCMS_PREC_NC 14 /* NC - Network Control */ + +/* The BSS is generating beacons in HW */ +#define BRCMS_BSSCFG_HW_BCN 0x20 + +#define SYNTHPU_DLY_APHY_US 3700 /* a phy synthpu_dly time in us */ +#define SYNTHPU_DLY_BPHY_US 1050 /* b/g phy synthpu_dly time in us */ +#define SYNTHPU_DLY_NPHY_US 2048 /* n phy REV3 synthpu_dly time in us */ +#define SYNTHPU_DLY_LPPHY_US 300 /* lpphy synthpu_dly time in us */ + +#define SYNTHPU_DLY_PHY_US_QT 100 /* QT synthpu_dly time in us */ + +#define ANTCNT 10 /* vanilla M_MAX_ANTCNT value */ /* Per-AC retry limit register definitions; uses defs.h bitfield macros */ -#define EDCF_SHORT_S 0 -#define EDCF_SFB_S 4 -#define EDCF_LONG_S 8 -#define EDCF_LFB_S 12 -#define EDCF_SHORT_M BITFIELD_MASK(4) -#define EDCF_SFB_M BITFIELD_MASK(4) -#define EDCF_LONG_M BITFIELD_MASK(4) -#define EDCF_LFB_M BITFIELD_MASK(4) +#define EDCF_SHORT_S 0 +#define EDCF_SFB_S 4 +#define EDCF_LONG_S 8 +#define EDCF_LFB_S 12 +#define EDCF_SHORT_M BITFIELD_MASK(4) +#define EDCF_SFB_M BITFIELD_MASK(4) +#define EDCF_LONG_M BITFIELD_MASK(4) +#define EDCF_LFB_M BITFIELD_MASK(4) -#define RETRY_SHORT_DEF 7 /* Default Short retry Limit */ -#define RETRY_SHORT_MAX 255 /* Maximum Short retry Limit */ -#define RETRY_LONG_DEF 4 /* Default Long retry count */ -#define RETRY_SHORT_FB 3 /* Short count for fb rate */ -#define RETRY_LONG_FB 2 /* Long count for fb rate */ +#define RETRY_SHORT_DEF 7 /* Default Short retry Limit */ +#define RETRY_SHORT_MAX 255 /* Maximum Short retry Limit */ +#define RETRY_LONG_DEF 4 /* Default Long retry count */ +#define RETRY_SHORT_FB 3 /* Short count for fallback rate */ +#define RETRY_LONG_FB 2 /* Long count for fallback rate */ -#define APHY_CWMIN 15 -#define PHY_CWMAX 1023 +#define APHY_CWMIN 15 +#define PHY_CWMAX 1023 -#define EDCF_AIFSN_MIN 1 +#define EDCF_AIFSN_MIN 1 -#define FRAGNUM_MASK 0xF +#define FRAGNUM_MASK 0xF -#define APHY_SLOT_TIME 9 -#define BPHY_SLOT_TIME 20 +#define APHY_SLOT_TIME 9 +#define BPHY_SLOT_TIME 20 -#define WL_SPURAVOID_OFF 0 -#define WL_SPURAVOID_ON1 1 -#define WL_SPURAVOID_ON2 2 +#define WL_SPURAVOID_OFF 0 +#define WL_SPURAVOID_ON1 1 +#define WL_SPURAVOID_ON2 2 /* invalid core flags, use the saved coreflags */ -#define BRCMS_USE_COREFLAGS 0xffffffff +#define BRCMS_USE_COREFLAGS 0xffffffff /* values for PLCPHdr_override */ -#define BRCMS_PLCP_AUTO -1 -#define BRCMS_PLCP_SHORT 0 -#define BRCMS_PLCP_LONG 1 +#define BRCMS_PLCP_AUTO -1 +#define BRCMS_PLCP_SHORT 0 +#define BRCMS_PLCP_LONG 1 /* values for g_protection_override and n_protection_override */ #define BRCMS_PROTECTION_AUTO -1 #define BRCMS_PROTECTION_OFF 0 #define BRCMS_PROTECTION_ON 1 #define BRCMS_PROTECTION_MMHDR_ONLY 2 -#define BRCMS_PROTECTION_CTS_ONLY 3 +#define BRCMS_PROTECTION_CTS_ONLY 3 /* values for g_protection_control and n_protection_control */ -#define BRCMS_PROTECTION_CTL_OFF 0 +#define BRCMS_PROTECTION_CTL_OFF 0 #define BRCMS_PROTECTION_CTL_LOCAL 1 #define BRCMS_PROTECTION_CTL_OVERLAP 2 /* values for n_protection */ #define BRCMS_N_PROTECTION_OFF 0 #define BRCMS_N_PROTECTION_OPTIONAL 1 -#define BRCMS_N_PROTECTION_20IN40 2 +#define BRCMS_N_PROTECTION_20IN40 2 #define BRCMS_N_PROTECTION_MIXEDMODE 3 /* values for band specific 40MHz capabilities */ -#define BRCMS_N_BW_20ALL 0 -#define BRCMS_N_BW_40ALL 1 -#define BRCMS_N_BW_20IN2G_40IN5G 2 +#define BRCMS_N_BW_20ALL 0 +#define BRCMS_N_BW_40ALL 1 +#define BRCMS_N_BW_20IN2G_40IN5G 2 /* bitflags for SGI support (sgi_rx iovar) */ #define BRCMS_N_SGI_20 0x01 @@ -213,42 +282,48 @@ /* defines used by the nrate iovar */ /* MSC in use,indicates b0-6 holds an mcs */ -#define NRATE_MCS_INUSE 0x00000080 +#define NRATE_MCS_INUSE 0x00000080 /* rate/mcs value */ -#define NRATE_RATE_MASK 0x0000007f +#define NRATE_RATE_MASK 0x0000007f /* stf mode mask: siso, cdd, stbc, sdm */ -#define NRATE_STF_MASK 0x0000ff00 +#define NRATE_STF_MASK 0x0000ff00 /* stf mode shift */ -#define NRATE_STF_SHIFT 8 +#define NRATE_STF_SHIFT 8 +/* bit indicates override both rate & mode */ +#define NRATE_OVERRIDE 0x80000000 /* bit indicate to override mcs only */ -#define NRATE_OVERRIDE_MCS_ONLY 0x40000000 -#define NRATE_SGI_MASK 0x00800000 /* sgi mode */ -#define NRATE_SGI_SHIFT 23 /* sgi mode */ -#define NRATE_LDPC_CODING 0x00400000 /* adv coding in use */ -#define NRATE_LDPC_SHIFT 22 /* ldpc shift */ +#define NRATE_OVERRIDE_MCS_ONLY 0x40000000 +#define NRATE_SGI_MASK 0x00800000 /* sgi mode */ +#define NRATE_SGI_SHIFT 23 /* sgi mode */ +#define NRATE_LDPC_CODING 0x00400000 /* bit indicates adv coding in use */ +#define NRATE_LDPC_SHIFT 22 /* ldpc shift */ -#define NRATE_STF_SISO 0 /* stf mode SISO */ -#define NRATE_STF_CDD 1 /* stf mode CDD */ -#define NRATE_STF_STBC 2 /* stf mode STBC */ -#define NRATE_STF_SDM 3 /* stf mode SDM */ +#define NRATE_STF_SISO 0 /* stf mode SISO */ +#define NRATE_STF_CDD 1 /* stf mode CDD */ +#define NRATE_STF_STBC 2 /* stf mode STBC */ +#define NRATE_STF_SDM 3 /* stf mode SDM */ -#define MAX_DMA_SEGS 4 +#define MAX_DMA_SEGS 4 /* Max # of entries in Tx FIFO based on 4kb page size */ -#define NTXD 256 +#define NTXD 256 /* Max # of entries in Rx FIFO based on 4kb page size */ -#define NRXD 256 +#define NRXD 256 /* try to keep this # rbufs posted to the chip */ -#define NRXBUFPOST 32 +#define NRXBUFPOST 32 /* data msg txq hiwat mark */ -#define BRCMS_DATAHIWAT 50 +#define BRCMS_DATAHIWAT 50 -/* max # frames to process in brcms_c_recv() */ -#define RXBND 8 -/* max # tx status to process in wlc_txstatus() */ -#define TXSBND 8 +/* bounded rx loops */ +#define RXBND 8 /* max # frames to process in brcms_c_recv() */ +#define TXSBND 8 /* max # tx status to process in wlc_txstatus() */ + +/* + * 32 SSID chars, max of 4 chars for each SSID char "\xFF", plus NULL. + */ +#define SSID_FMT_BUF_LEN ((4 * IEEE80211_MAX_SSID_LEN) + 1) /* brcmu_format_flags() bit description structure */ struct brcms_c_bit_desc { @@ -300,22 +375,10 @@ uint brcm_msg_level = #endif /* BCMDBG */ /* TX FIFO number to WME/802.1E Access Category */ -static const u8 wme_fifo2ac[] = { - IEEE80211_AC_BK, - IEEE80211_AC_BE, - IEEE80211_AC_VI, - IEEE80211_AC_VO, - IEEE80211_AC_BE, - IEEE80211_AC_BE -}; +static const u8 wme_fifo2ac[] = { AC_BK, AC_BE, AC_VI, AC_VO, AC_BE, AC_BE }; -/* ieee80211 Access Category to TX FIFO number */ -static const u8 wme_ac2fifo[] = { - TX_AC_VO_FIFO, - TX_AC_VI_FIFO, - TX_AC_BE_FIFO, - TX_AC_BK_FIFO -}; +/* WME/802.1E Access Category to TX FIFO number */ +static const u8 wme_ac2fifo[] = { 1, 0, 2, 3 }; /* 802.1D Priority to precedence queue mapping */ const u8 wlc_prio2prec_map[] = { @@ -342,6 +405,13 @@ static const u16 xmtfifo_sz[][NFIFO] = { {9, 58, 22, 14, 14, 5}, }; +static const u8 acbitmap2maxprio[] = { + PRIO_8021D_BE, PRIO_8021D_BE, PRIO_8021D_BK, PRIO_8021D_BK, + PRIO_8021D_VI, PRIO_8021D_VI, PRIO_8021D_VI, PRIO_8021D_VI, + PRIO_8021D_VO, PRIO_8021D_VO, PRIO_8021D_VO, PRIO_8021D_VO, + PRIO_8021D_VO, PRIO_8021D_VO, PRIO_8021D_VO, PRIO_8021D_VO +}; + #ifdef BCMDBG static const char * const fifo_names[] = { "AC_BK", "AC_BE", "AC_VI", "AC_VO", "BCMC", "ATIM" }; @@ -354,22 +424,6 @@ static const char fifo_names[6][0]; static struct brcms_c_info *wlc_info_dbg = (struct brcms_c_info *) (NULL); #endif -/* Find basic rate for a given rate */ -static u8 brcms_basic_rate(struct brcms_c_info *wlc, u32 rspec) -{ - if (is_mcs_rate(rspec)) - return wlc->band->basic_rate[mcs_table[rspec & RSPEC_RATE_MASK] - .leg_ofdm]; - return wlc->band->basic_rate[rspec & RSPEC_RATE_MASK]; -} - -static u16 frametype(u32 rspec, u8 mimoframe) -{ - if (is_mcs_rate(rspec)) - return mimoframe; - return is_cck_rate(rspec) ? FT_CCK : FT_OFDM; -} - /* currently the best mechanism for determining SIFS is the band in use */ static u16 get_sifs(struct brcms_band *band) { @@ -388,13 +442,10 @@ static u16 get_sifs(struct brcms_band *band) */ static bool brcms_deviceremoved(struct brcms_c_info *wlc) { - u32 macctrl; - if (!wlc->hw->clk) return ai_deviceremoved(wlc->hw->sih); - macctrl = bcma_read32(wlc->hw->d11core, - D11REGOFFS(maccontrol)); - return (macctrl & (MCTL_PSM_JMP_0 | MCTL_IHR_EN)) != MCTL_IHR_EN; + return (R_REG(&wlc->hw->regs->maccontrol) & + (MCTL_PSM_JMP_0 | MCTL_IHR_EN)) != MCTL_IHR_EN; } /* sum the individual fifo tx pending packet counts */ @@ -419,6 +470,20 @@ static int brcms_chspec_bw(u16 chanspec) return BRCMS_10_MHZ; } +/* + * return true if Minimum Power Consumption should + * be entered, false otherwise + */ +static bool brcms_c_is_non_delay_mpc(struct brcms_c_info *wlc) +{ + return false; +} + +static bool brcms_c_ismpc(struct brcms_c_info *wlc) +{ + return (wlc->mpc_delay_off == 0) && (brcms_c_is_non_delay_mpc(wlc)); +} + static void brcms_c_bsscfg_mfree(struct brcms_bss_cfg *cfg) { if (cfg == NULL) @@ -585,15 +650,17 @@ brcms_c_attach_malloc(uint unit, uint *err, uint devid) static void brcms_b_update_slot_timing(struct brcms_hardware *wlc_hw, bool shortslot) { - struct bcma_device *core = wlc_hw->d11core; + struct d11regs __iomem *regs; + + regs = wlc_hw->regs; if (shortslot) { /* 11g short slot: 11a timing */ - bcma_write16(core, D11REGOFFS(ifs_slot), 0x0207); + W_REG(®s->ifs_slot, 0x0207); /* APHY_SLOT_TIME */ brcms_b_write_shm(wlc_hw, M_DOT11_SLOT, APHY_SLOT_TIME); } else { /* 11g long slot: 11b timing */ - bcma_write16(core, D11REGOFFS(ifs_slot), 0x0212); + W_REG(®s->ifs_slot, 0x0212); /* BPHY_SLOT_TIME */ brcms_b_write_shm(wlc_hw, M_DOT11_SLOT, BPHY_SLOT_TIME); } } @@ -602,8 +669,9 @@ static void brcms_b_update_slot_timing(struct brcms_hardware *wlc_hw, * calculate frame duration of a given rate and length, return * time in usec unit */ -static uint brcms_c_calc_frame_time(struct brcms_c_info *wlc, u32 ratespec, - u8 preamble_type, uint mac_len) +uint +brcms_c_calc_frame_time(struct brcms_c_info *wlc, u32 ratespec, + u8 preamble_type, uint mac_len) { uint nsyms, dur = 0, Ndps, kNdps; uint rate = rspec2rate(ratespec); @@ -673,22 +741,24 @@ static uint brcms_c_calc_frame_time(struct brcms_c_info *wlc, u32 ratespec, static void brcms_c_write_inits(struct brcms_hardware *wlc_hw, const struct d11init *inits) { - struct bcma_device *core = wlc_hw->d11core; int i; - uint offset; + u8 __iomem *base; + u8 __iomem *addr; u16 size; u32 value; BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); + base = (u8 __iomem *)wlc_hw->regs; + for (i = 0; inits[i].addr != cpu_to_le16(0xffff); i++) { size = le16_to_cpu(inits[i].size); - offset = le16_to_cpu(inits[i].addr); + addr = base + le16_to_cpu(inits[i].addr); value = le32_to_cpu(inits[i].value); if (size == 2) - bcma_write16(core, offset, value); + W_REG((u16 __iomem *)addr, value); else if (size == 4) - bcma_write32(core, offset, value); + W_REG((u32 __iomem *)addr, value); else break; } @@ -738,14 +808,6 @@ static void brcms_c_ucode_bsinit(struct brcms_hardware *wlc_hw) } } -static void brcms_b_core_ioctl(struct brcms_hardware *wlc_hw, u32 m, u32 v) -{ - struct bcma_device *core = wlc_hw->d11core; - u32 ioctl = bcma_aread32(core, BCMA_IOCTL) & ~m; - - bcma_awrite32(core, BCMA_IOCTL, ioctl | v); -} - static void brcms_b_core_phy_clk(struct brcms_hardware *wlc_hw, bool clk) { BCMMSG(wlc_hw->wlc->wiphy, "wl%d: clk %d\n", wlc_hw->unit, clk); @@ -754,17 +816,17 @@ static void brcms_b_core_phy_clk(struct brcms_hardware *wlc_hw, bool clk) if (OFF == clk) { /* clear gmode bit, put phy into reset */ - brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_FGC | SICF_GMODE), - (SICF_PRST | SICF_FGC)); + ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_FGC | SICF_GMODE), + (SICF_PRST | SICF_FGC)); udelay(1); - brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_FGC), SICF_PRST); + ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_FGC), SICF_PRST); udelay(1); } else { /* take phy out of reset */ - brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_FGC), SICF_FGC); + ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_FGC), SICF_FGC); udelay(1); - brcms_b_core_ioctl(wlc_hw, SICF_FGC, 0); + ai_core_cflags(wlc_hw->sih, (SICF_FGC), 0); udelay(1); } @@ -785,14 +847,9 @@ static void brcms_c_setxband(struct brcms_hardware *wlc_hw, uint bandunit) wlc_hw->wlc->band = wlc_hw->wlc->bandstate[bandunit]; /* set gmode core flag */ - if (wlc_hw->sbclk && !wlc_hw->noreset) { - u32 gmode = 0; - - if (bandunit == 0) - gmode = SICF_GMODE; - - brcms_b_core_ioctl(wlc_hw, SICF_GMODE, gmode); - } + if (wlc_hw->sbclk && !wlc_hw->noreset) + ai_core_cflags(wlc_hw->sih, SICF_GMODE, + ((bandunit == 0) ? SICF_GMODE : 0)); } /* switch to new band but leave it inactive */ @@ -800,12 +857,10 @@ static u32 brcms_c_setband_inact(struct brcms_c_info *wlc, uint bandunit) { struct brcms_hardware *wlc_hw = wlc->hw; u32 macintmask; - u32 macctrl; BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit); - macctrl = bcma_read32(wlc_hw->d11core, - D11REGOFFS(maccontrol)); - WARN_ON((macctrl & MCTL_EN_MAC) != 0); + + WARN_ON((R_REG(&wlc_hw->regs->maccontrol) & MCTL_EN_MAC) != 0); /* disable interrupts */ macintmask = brcms_intrsoff(wlc->wl); @@ -914,7 +969,7 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs) lfbl, /* Long Frame Rate Fallback Limit */ fbl; - if (queue < IEEE80211_NUM_ACS) { + if (queue < AC_COUNT) { sfbl = GFIELD(wlc->wme_retries[wme_fifo2ac[queue]], EDCF_SFB); lfbl = GFIELD(wlc->wme_retries[wme_fifo2ac[queue]], @@ -963,12 +1018,14 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs) tx_info->flags |= IEEE80211_TX_STAT_ACK; } - totlen = p->len; + totlen = brcmu_pkttotlen(p); free_pdu = true; brcms_c_txfifo_complete(wlc, queue, 1); if (lastframe) { + p->next = NULL; + p->prev = NULL; /* remove PLCP & Broadcom tx descriptor header */ skb_pull(p, D11_PHY_HDR_LEN); skb_pull(p, D11_TXH_LEN); @@ -996,7 +1053,7 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal) { bool morepending = false; struct brcms_c_info *wlc = wlc_hw->wlc; - struct bcma_device *core; + struct d11regs __iomem *regs; struct tx_status txstatus, *txs; u32 s1, s2; uint n = 0; @@ -1009,18 +1066,18 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal) BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit); txs = &txstatus; - core = wlc_hw->d11core; + regs = wlc_hw->regs; *fatal = false; - s1 = bcma_read32(core, D11REGOFFS(frmtxstatus)); while (!(*fatal) - && (s1 & TXS_V)) { + && (s1 = R_REG(®s->frmtxstatus)) & TXS_V) { if (s1 == 0xffffffff) { wiphy_err(wlc->wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit, __func__); return morepending; } - s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2)); + + s2 = R_REG(®s->frmtxstatus2); txs->status = s1 & TXS_STATUS_MASK; txs->frameid = (s1 & TXS_FID_MASK) >> TXS_FID_SHIFT; @@ -1033,7 +1090,6 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal) /* !give others some time to run! */ if (++n >= max_tx_num) break; - s1 = bcma_read32(core, D11REGOFFS(frmtxstatus)); } if (*fatal) @@ -1078,12 +1134,12 @@ brcms_c_mhfdef(struct brcms_c_info *wlc, u16 *mhfs, u16 mhf2_init) } } -static uint -dmareg(uint direction, uint fifonum) +static struct dma64regs __iomem * +dmareg(struct brcms_hardware *hw, uint direction, uint fifonum) { if (direction == DMA_TX) - return offsetof(struct d11regs, fifo64regs[fifonum].dmaxmt); - return offsetof(struct d11regs, fifo64regs[fifonum].dmarcv); + return &(hw->regs->fifo64regs[fifonum].dmaxmt); + return &(hw->regs->fifo64regs[fifonum].dmarcv); } static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme) @@ -1109,9 +1165,9 @@ static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme) * TX: TX_AC_BK_FIFO (TX AC Background data packets) * RX: RX_FIFO (RX data packets) */ - wlc_hw->di[0] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core, - (wme ? dmareg(DMA_TX, 0) : 0), - dmareg(DMA_RX, 0), + wlc_hw->di[0] = dma_attach(name, wlc_hw->sih, + (wme ? dmareg(wlc_hw, DMA_TX, 0) : + NULL), dmareg(wlc_hw, DMA_RX, 0), (wme ? NTXD : 0), NRXD, RXBUFSZ, -1, NRXBUFPOST, BRCMS_HWRXOFF, &brcm_msg_level); @@ -1123,8 +1179,8 @@ static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme) * (legacy) TX_DATA_FIFO (TX data packets) * RX: UNUSED */ - wlc_hw->di[1] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core, - dmareg(DMA_TX, 1), 0, + wlc_hw->di[1] = dma_attach(name, wlc_hw->sih, + dmareg(wlc_hw, DMA_TX, 1), NULL, NTXD, 0, 0, -1, 0, 0, &brcm_msg_level); dma_attach_err |= (NULL == wlc_hw->di[1]); @@ -1134,8 +1190,8 @@ static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme) * TX: TX_AC_VI_FIFO (TX AC Video data packets) * RX: UNUSED */ - wlc_hw->di[2] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core, - dmareg(DMA_TX, 2), 0, + wlc_hw->di[2] = dma_attach(name, wlc_hw->sih, + dmareg(wlc_hw, DMA_TX, 2), NULL, NTXD, 0, 0, -1, 0, 0, &brcm_msg_level); dma_attach_err |= (NULL == wlc_hw->di[2]); @@ -1144,9 +1200,9 @@ static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme) * TX: TX_AC_VO_FIFO (TX AC Voice data packets) * (legacy) TX_CTL_FIFO (TX control & mgmt packets) */ - wlc_hw->di[3] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core, - dmareg(DMA_TX, 3), - 0, NTXD, 0, 0, -1, + wlc_hw->di[3] = dma_attach(name, wlc_hw->sih, + dmareg(wlc_hw, DMA_TX, 3), + NULL, NTXD, 0, 0, -1, 0, 0, &brcm_msg_level); dma_attach_err |= (NULL == wlc_hw->di[3]); /* Cleaner to leave this as if with AP defined */ @@ -1220,7 +1276,7 @@ static void brcms_b_wait_for_wake(struct brcms_hardware *wlc_hw) /* control chip clock to save power, enable dynamic clock or force fast clock */ static void brcms_b_clkctl_clk(struct brcms_hardware *wlc_hw, uint mode) { - if (ai_get_cccaps(wlc_hw->sih) & CC_CAP_PMU) { + if (wlc_hw->sih->cccaps & CC_CAP_PMU) { /* new chips with PMU, CCS_FORCEHT will distribute the HT clock * on backplane, but mac core will still run on ALP(not HT) when * it enters powersave mode, which means the FCA bit may not be @@ -1229,33 +1285,29 @@ static void brcms_b_clkctl_clk(struct brcms_hardware *wlc_hw, uint mode) if (wlc_hw->clk) { if (mode == CLK_FAST) { - bcma_set32(wlc_hw->d11core, - D11REGOFFS(clk_ctl_st), - CCS_FORCEHT); + OR_REG(&wlc_hw->regs->clk_ctl_st, + CCS_FORCEHT); udelay(64); - SPINWAIT( - ((bcma_read32(wlc_hw->d11core, - D11REGOFFS(clk_ctl_st)) & - CCS_HTAVAIL) == 0), - PMU_MAX_TRANSITION_DLY); - WARN_ON(!(bcma_read32(wlc_hw->d11core, - D11REGOFFS(clk_ctl_st)) & - CCS_HTAVAIL)); + SPINWAIT(((R_REG + (&wlc_hw->regs-> + clk_ctl_st) & CCS_HTAVAIL) == 0), + PMU_MAX_TRANSITION_DLY); + WARN_ON(!(R_REG + (&wlc_hw->regs-> + clk_ctl_st) & CCS_HTAVAIL)); } else { - if ((ai_get_pmurev(wlc_hw->sih) == 0) && - (bcma_read32(wlc_hw->d11core, - D11REGOFFS(clk_ctl_st)) & - (CCS_FORCEHT | CCS_HTAREQ))) - SPINWAIT( - ((bcma_read32(wlc_hw->d11core, - offsetof(struct d11regs, - clk_ctl_st)) & - CCS_HTAVAIL) == 0), - PMU_MAX_TRANSITION_DLY); - bcma_mask32(wlc_hw->d11core, - D11REGOFFS(clk_ctl_st), + if ((wlc_hw->sih->pmurev == 0) && + (R_REG + (&wlc_hw->regs-> + clk_ctl_st) & (CCS_FORCEHT | CCS_HTAREQ))) + SPINWAIT(((R_REG + (&wlc_hw->regs-> + clk_ctl_st) & CCS_HTAVAIL) + == 0), + PMU_MAX_TRANSITION_DLY); + AND_REG(&wlc_hw->regs->clk_ctl_st, ~CCS_FORCEHT); } } @@ -1270,7 +1322,7 @@ static void brcms_b_clkctl_clk(struct brcms_hardware *wlc_hw, uint mode) /* check fast clock is available (if core is not in reset) */ if (wlc_hw->forcefastclk && wlc_hw->clk) - WARN_ON(!(bcma_aread32(wlc_hw->d11core, BCMA_IOST) & + WARN_ON(!(ai_core_sflags(wlc_hw->sih, 0, 0) & SISF_FCLKA)); /* @@ -1387,8 +1439,7 @@ static void brcms_c_mctrl_write(struct brcms_hardware *wlc_hw) maccontrol |= MCTL_INFRA; } - bcma_write32(wlc_hw->d11core, D11REGOFFS(maccontrol), - maccontrol); + W_REG(&wlc_hw->regs->maccontrol, maccontrol); } /* set or clear maccontrol bits */ @@ -1482,7 +1533,7 @@ static void brcms_b_set_addrmatch(struct brcms_hardware *wlc_hw, int match_reg_offset, const u8 *addr) { - struct bcma_device *core = wlc_hw->d11core; + struct d11regs __iomem *regs; u16 mac_l; u16 mac_m; u16 mac_h; @@ -1490,36 +1541,38 @@ brcms_b_set_addrmatch(struct brcms_hardware *wlc_hw, int match_reg_offset, BCMMSG(wlc_hw->wlc->wiphy, "wl%d: brcms_b_set_addrmatch\n", wlc_hw->unit); + regs = wlc_hw->regs; mac_l = addr[0] | (addr[1] << 8); mac_m = addr[2] | (addr[3] << 8); mac_h = addr[4] | (addr[5] << 8); /* enter the MAC addr into the RXE match registers */ - bcma_write16(core, D11REGOFFS(rcm_ctl), - RCM_INC_DATA | match_reg_offset); - bcma_write16(core, D11REGOFFS(rcm_mat_data), mac_l); - bcma_write16(core, D11REGOFFS(rcm_mat_data), mac_m); - bcma_write16(core, D11REGOFFS(rcm_mat_data), mac_h); + W_REG(®s->rcm_ctl, RCM_INC_DATA | match_reg_offset); + W_REG(®s->rcm_mat_data, mac_l); + W_REG(®s->rcm_mat_data, mac_m); + W_REG(®s->rcm_mat_data, mac_h); + } void brcms_b_write_template_ram(struct brcms_hardware *wlc_hw, int offset, int len, void *buf) { - struct bcma_device *core = wlc_hw->d11core; + struct d11regs __iomem *regs; u32 word; __le32 word_le; __be32 word_be; bool be_bit; BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); - bcma_write32(core, D11REGOFFS(tplatewrptr), offset); + regs = wlc_hw->regs; + W_REG(®s->tplatewrptr, offset); /* if MCTL_BIGEND bit set in mac control register, * the chip swaps data in fifo, as well as data in * template ram */ - be_bit = (bcma_read32(core, D11REGOFFS(maccontrol)) & MCTL_BIGEND) != 0; + be_bit = (R_REG(®s->maccontrol) & MCTL_BIGEND) != 0; while (len > 0) { memcpy(&word, buf, sizeof(u32)); @@ -1532,7 +1585,7 @@ brcms_b_write_template_ram(struct brcms_hardware *wlc_hw, int offset, int len, word = *(u32 *)&word_le; } - bcma_write32(core, D11REGOFFS(tplatewrdata), word); + W_REG(®s->tplatewrdata, word); buf = (u8 *) buf + sizeof(u32); len -= sizeof(u32); @@ -1543,20 +1596,18 @@ static void brcms_b_set_cwmin(struct brcms_hardware *wlc_hw, u16 newmin) { wlc_hw->band->CWmin = newmin; - bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr), - OBJADDR_SCR_SEL | S_DOT11_CWMIN); - (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr)); - bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), newmin); + W_REG(&wlc_hw->regs->objaddr, OBJADDR_SCR_SEL | S_DOT11_CWMIN); + (void)R_REG(&wlc_hw->regs->objaddr); + W_REG(&wlc_hw->regs->objdata, newmin); } static void brcms_b_set_cwmax(struct brcms_hardware *wlc_hw, u16 newmax) { wlc_hw->band->CWmax = newmax; - bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr), - OBJADDR_SCR_SEL | S_DOT11_CWMAX); - (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr)); - bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), newmax); + W_REG(&wlc_hw->regs->objaddr, OBJADDR_SCR_SEL | S_DOT11_CWMAX); + (void)R_REG(&wlc_hw->regs->objaddr); + W_REG(&wlc_hw->regs->objdata, newmax); } void brcms_b_bw_set(struct brcms_hardware *wlc_hw, u16 bw) @@ -1722,17 +1773,17 @@ void brcms_b_core_phypll_reset(struct brcms_hardware *wlc_hw) { BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); - ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_addr), - ~0, 0); + ai_corereg(wlc_hw->sih, SI_CC_IDX, + offsetof(struct chipcregs, chipcontrol_addr), ~0, 0); udelay(1); - ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_data), - 0x4, 0); + ai_corereg(wlc_hw->sih, SI_CC_IDX, + offsetof(struct chipcregs, chipcontrol_data), 0x4, 0); udelay(1); - ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_data), - 0x4, 4); + ai_corereg(wlc_hw->sih, SI_CC_IDX, + offsetof(struct chipcregs, chipcontrol_data), 0x4, 4); udelay(1); - ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_data), - 0x4, 0); + ai_corereg(wlc_hw->sih, SI_CC_IDX, + offsetof(struct chipcregs, chipcontrol_data), 0x4, 0); udelay(1); } @@ -1746,18 +1797,18 @@ void brcms_b_phyclk_fgc(struct brcms_hardware *wlc_hw, bool clk) return; if (ON == clk) - brcms_b_core_ioctl(wlc_hw, SICF_FGC, SICF_FGC); + ai_core_cflags(wlc_hw->sih, SICF_FGC, SICF_FGC); else - brcms_b_core_ioctl(wlc_hw, SICF_FGC, 0); + ai_core_cflags(wlc_hw->sih, SICF_FGC, 0); } void brcms_b_macphyclk_set(struct brcms_hardware *wlc_hw, bool clk) { if (ON == clk) - brcms_b_core_ioctl(wlc_hw, SICF_MPCLKE, SICF_MPCLKE); + ai_core_cflags(wlc_hw->sih, SICF_MPCLKE, SICF_MPCLKE); else - brcms_b_core_ioctl(wlc_hw, SICF_MPCLKE, 0); + ai_core_cflags(wlc_hw->sih, SICF_MPCLKE, 0); } void brcms_b_phy_reset(struct brcms_hardware *wlc_hw) @@ -1777,7 +1828,7 @@ void brcms_b_phy_reset(struct brcms_hardware *wlc_hw) if (BRCMS_ISNPHY(wlc_hw->band) && NREV_GE(wlc_hw->band->phyrev, 3) && NREV_LE(wlc_hw->band->phyrev, 4)) { /* Set the PHY bandwidth */ - brcms_b_core_ioctl(wlc_hw, SICF_BWMASK, phy_bw_clkbits); + ai_core_cflags(wlc_hw->sih, SICF_BWMASK, phy_bw_clkbits); udelay(1); @@ -1785,13 +1836,13 @@ void brcms_b_phy_reset(struct brcms_hardware *wlc_hw) brcms_b_core_phypll_reset(wlc_hw); /* reset the PHY */ - brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_PCLKE), - (SICF_PRST | SICF_PCLKE)); + ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_PCLKE), + (SICF_PRST | SICF_PCLKE)); phy_in_reset = true; } else { - brcms_b_core_ioctl(wlc_hw, - (SICF_PRST | SICF_PCLKE | SICF_BWMASK), - (SICF_PRST | SICF_PCLKE | phy_bw_clkbits)); + ai_core_cflags(wlc_hw->sih, + (SICF_PRST | SICF_PCLKE | SICF_BWMASK), + (SICF_PRST | SICF_PCLKE | phy_bw_clkbits)); } udelay(2); @@ -1808,8 +1859,8 @@ static void brcms_b_setband(struct brcms_hardware *wlc_hw, uint bandunit, u32 macintmask; /* Enable the d11 core before accessing it */ - if (!bcma_core_is_enabled(wlc_hw->d11core)) { - bcma_core_enable(wlc_hw->d11core, 0); + if (!ai_iscoreup(wlc_hw->sih)) { + ai_core_reset(wlc_hw->sih, 0, 0); brcms_c_mctrl_reset(wlc_hw); } @@ -1835,8 +1886,7 @@ static void brcms_b_setband(struct brcms_hardware *wlc_hw, uint bandunit, brcms_intrsrestore(wlc->wl, macintmask); /* ucode should still be suspended.. */ - WARN_ON((bcma_read32(wlc_hw->d11core, D11REGOFFS(maccontrol)) & - MCTL_EN_MAC) != 0); + WARN_ON((R_REG(&wlc_hw->regs->maccontrol) & MCTL_EN_MAC) != 0); } static bool brcms_c_isgoodchip(struct brcms_hardware *wlc_hw) @@ -1864,7 +1914,7 @@ static bool brcms_c_validboardtype(struct brcms_hardware *wlc_hw) uint b2 = boardrev & 0xf; /* voards from other vendors are always considered valid */ - if (ai_get_boardvendor(wlc_hw->sih) != PCI_VENDOR_ID_BROADCOM) + if (wlc_hw->sih->boardvendor != PCI_VENDOR_ID_BROADCOM) return true; /* do some boardrev sanity checks when boardvendor is Broadcom */ @@ -1936,7 +1986,7 @@ static void brcms_b_xtal(struct brcms_hardware *wlc_hw, bool want) static bool brcms_b_radio_read_hwdisabled(struct brcms_hardware *wlc_hw) { bool v, clk, xtal; - u32 flags = 0; + u32 resetbits = 0, flags = 0; xtal = wlc_hw->sbclk; if (!xtal) @@ -1953,22 +2003,22 @@ static bool brcms_b_radio_read_hwdisabled(struct brcms_hardware *wlc_hw) flags |= SICF_PCLKE; /* - * TODO: test suspend/resume - * * AI chip doesn't restore bar0win2 on * hibernation/resume, need sw fixup */ - - bcma_core_enable(wlc_hw->d11core, flags); + if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) || + (wlc_hw->sih->chip == BCM43225_CHIP_ID)) + wlc_hw->regs = (struct d11regs __iomem *) + ai_setcore(wlc_hw->sih, D11_CORE_ID, 0); + ai_core_reset(wlc_hw->sih, flags, resetbits); brcms_c_mctrl_reset(wlc_hw); } - v = ((bcma_read32(wlc_hw->d11core, - D11REGOFFS(phydebug)) & PDBG_RFD) != 0); + v = ((R_REG(&wlc_hw->regs->phydebug) & PDBG_RFD) != 0); /* put core back into reset */ if (!clk) - bcma_core_disable(wlc_hw->d11core, 0); + ai_core_disable(wlc_hw->sih, 0); if (!xtal) brcms_b_xtal(wlc_hw, OFF); @@ -1992,21 +2042,25 @@ static bool wlc_dma_rxreset(struct brcms_hardware *wlc_hw, uint fifo) */ void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags) { + struct d11regs __iomem *regs; uint i; bool fastclk; + u32 resetbits = 0; if (flags == BRCMS_USE_COREFLAGS) flags = (wlc_hw->band->pi ? wlc_hw->band->core_flags : 0); BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); + regs = wlc_hw->regs; + /* request FAST clock if not on */ fastclk = wlc_hw->forcefastclk; if (!fastclk) brcms_b_clkctl_clk(wlc_hw, CLK_FAST); /* reset the dma engines except first time thru */ - if (bcma_core_is_enabled(wlc_hw->d11core)) { + if (ai_iscoreup(wlc_hw->sih)) { for (i = 0; i < NFIFO; i++) if ((wlc_hw->di[i]) && (!dma_txreset(wlc_hw->di[i]))) wiphy_err(wlc_hw->wlc->wiphy, "wl%d: %s: " @@ -2044,14 +2098,14 @@ void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags) * they may touch chipcommon as well. */ wlc_hw->clk = false; - bcma_core_enable(wlc_hw->d11core, flags); + ai_core_reset(wlc_hw->sih, flags, resetbits); wlc_hw->clk = true; if (wlc_hw->band && wlc_hw->band->pi) wlc_phy_hw_clk_state_upd(wlc_hw->band->pi, true); brcms_c_mctrl_reset(wlc_hw); - if (ai_get_cccaps(wlc_hw->sih) & CC_CAP_PMU) + if (wlc_hw->sih->cccaps & CC_CAP_PMU) brcms_b_clkctl_clk(wlc_hw, CLK_FAST); brcms_b_phy_reset(wlc_hw); @@ -2072,7 +2126,7 @@ void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags) */ static void brcms_b_corerev_fifofixup(struct brcms_hardware *wlc_hw) { - struct bcma_device *core = wlc_hw->d11core; + struct d11regs __iomem *regs = wlc_hw->regs; u16 fifo_nu; u16 txfifo_startblk = TXFIFO_START_BLK, txfifo_endblk; u16 txfifo_def, txfifo_def1; @@ -2093,11 +2147,11 @@ static void brcms_b_corerev_fifofixup(struct brcms_hardware *wlc_hw) txfifo_cmd = TXFIFOCMD_RESET_MASK | (fifo_nu << TXFIFOCMD_FIFOSEL_SHIFT); - bcma_write16(core, D11REGOFFS(xmtfifocmd), txfifo_cmd); - bcma_write16(core, D11REGOFFS(xmtfifodef), txfifo_def); - bcma_write16(core, D11REGOFFS(xmtfifodef1), txfifo_def1); + W_REG(®s->xmtfifocmd, txfifo_cmd); + W_REG(®s->xmtfifodef, txfifo_def); + W_REG(®s->xmtfifodef1, txfifo_def1); - bcma_write16(core, D11REGOFFS(xmtfifocmd), txfifo_cmd); + W_REG(®s->xmtfifocmd, txfifo_cmd); txfifo_startblk += wlc_hw->xmtfifo_sz[fifo_nu]; } @@ -2132,27 +2186,27 @@ static void brcms_b_corerev_fifofixup(struct brcms_hardware *wlc_hw) void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode) { - struct bcma_device *core = wlc_hw->d11core; + struct d11regs __iomem *regs = wlc_hw->regs; - if ((ai_get_chip_id(wlc_hw->sih) == BCM43224_CHIP_ID) || - (ai_get_chip_id(wlc_hw->sih) == BCM43225_CHIP_ID)) { + if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) || + (wlc_hw->sih->chip == BCM43225_CHIP_ID)) { if (spurmode == WL_SPURAVOID_ON2) { /* 126Mhz */ - bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x2082); - bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0x8); + W_REG(®s->tsf_clk_frac_l, 0x2082); + W_REG(®s->tsf_clk_frac_h, 0x8); } else if (spurmode == WL_SPURAVOID_ON1) { /* 123Mhz */ - bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x5341); - bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0x8); + W_REG(®s->tsf_clk_frac_l, 0x5341); + W_REG(®s->tsf_clk_frac_h, 0x8); } else { /* 120Mhz */ - bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x8889); - bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0x8); + W_REG(®s->tsf_clk_frac_l, 0x8889); + W_REG(®s->tsf_clk_frac_h, 0x8); } } else if (BRCMS_ISLCNPHY(wlc_hw->band)) { if (spurmode == WL_SPURAVOID_ON1) { /* 82Mhz */ - bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x7CE0); - bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0xC); + W_REG(®s->tsf_clk_frac_l, 0x7CE0); + W_REG(®s->tsf_clk_frac_h, 0xC); } else { /* 80Mhz */ - bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0xCCCD); - bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0xC); + W_REG(®s->tsf_clk_frac_l, 0xCCCD); + W_REG(®s->tsf_clk_frac_h, 0xC); } } } @@ -2161,8 +2215,11 @@ void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode) static void brcms_c_gpio_init(struct brcms_c_info *wlc) { struct brcms_hardware *wlc_hw = wlc->hw; + struct d11regs __iomem *regs; u32 gc, gm; + regs = wlc_hw->regs; + /* use GPIO select 0 to get all gpio signals from the gpio out reg */ brcms_b_mctrl(wlc_hw, MCTL_GPOUT_SEL_MASK, 0); @@ -2193,10 +2250,10 @@ static void brcms_c_gpio_init(struct brcms_c_info *wlc) * The board itself is powered by these GPIOs * (when not sending pattern) so set them high */ - bcma_set16(wlc_hw->d11core, D11REGOFFS(psm_gpio_oe), - (BOARD_GPIO_12 | BOARD_GPIO_13)); - bcma_set16(wlc_hw->d11core, D11REGOFFS(psm_gpio_out), - (BOARD_GPIO_12 | BOARD_GPIO_13)); + OR_REG(®s->psm_gpio_oe, + (BOARD_GPIO_12 | BOARD_GPIO_13)); + OR_REG(®s->psm_gpio_out, + (BOARD_GPIO_12 | BOARD_GPIO_13)); /* Enable antenna diversity, use 2x4 mode */ brcms_b_mhf(wlc_hw, MHF3, MHF3_ANTSEL_EN, @@ -2223,7 +2280,7 @@ static void brcms_c_gpio_init(struct brcms_c_info *wlc) static void brcms_ucode_write(struct brcms_hardware *wlc_hw, const __le32 ucode[], const size_t nbytes) { - struct bcma_device *core = wlc_hw->d11core; + struct d11regs __iomem *regs = wlc_hw->regs; uint i; uint count; @@ -2231,11 +2288,10 @@ static void brcms_ucode_write(struct brcms_hardware *wlc_hw, count = (nbytes / sizeof(u32)); - bcma_write32(core, D11REGOFFS(objaddr), - OBJADDR_AUTO_INC | OBJADDR_UCM_SEL); - (void)bcma_read32(core, D11REGOFFS(objaddr)); + W_REG(®s->objaddr, (OBJADDR_AUTO_INC | OBJADDR_UCM_SEL)); + (void)R_REG(®s->objaddr); for (i = 0; i < count; i++) - bcma_write32(core, D11REGOFFS(objdata), le32_to_cpu(ucode[i])); + W_REG(®s->objdata, le32_to_cpu(ucode[i])); } @@ -2296,12 +2352,19 @@ void brcms_b_antsel_type_set(struct brcms_hardware *wlc_hw, u8 antsel_type) wlc_phy_antsel_type_set(wlc_hw->band->pi, antsel_type); } +static void brcms_c_fatal_error(struct brcms_c_info *wlc) +{ + wiphy_err(wlc->wiphy, "wl%d: fatal error, reinitializing\n", + wlc->pub->unit); + brcms_init(wlc->wl); +} + static void brcms_b_fifoerrors(struct brcms_hardware *wlc_hw) { bool fatal = false; uint unit; uint intstatus, idx; - struct bcma_device *core = wlc_hw->d11core; + struct d11regs __iomem *regs = wlc_hw->regs; struct wiphy *wiphy = wlc_hw->wlc->wiphy; unit = wlc_hw->unit; @@ -2309,9 +2372,7 @@ static void brcms_b_fifoerrors(struct brcms_hardware *wlc_hw) for (idx = 0; idx < NFIFO; idx++) { /* read intstatus register and ignore any non-error bits */ intstatus = - bcma_read32(core, - D11REGOFFS(intctrlregs[idx].intstatus)) & - I_ERRORS; + R_REG(®s->intctrlregs[idx].intstatus) & I_ERRORS; if (!intstatus) continue; @@ -2353,12 +2414,11 @@ static void brcms_b_fifoerrors(struct brcms_hardware *wlc_hw) } if (fatal) { - brcms_fatal_error(wlc_hw->wlc->wl); /* big hammer */ + brcms_c_fatal_error(wlc_hw->wlc); /* big hammer */ break; } else - bcma_write32(core, - D11REGOFFS(intctrlregs[idx].intstatus), - intstatus); + W_REG(®s->intctrlregs[idx].intstatus, + intstatus); } } @@ -2366,7 +2426,28 @@ void brcms_c_intrson(struct brcms_c_info *wlc) { struct brcms_hardware *wlc_hw = wlc->hw; wlc->macintmask = wlc->defmacintmask; - bcma_write32(wlc_hw->d11core, D11REGOFFS(macintmask), wlc->macintmask); + W_REG(&wlc_hw->regs->macintmask, wlc->macintmask); +} + +/* + * callback for siutils.c, which has only wlc handler, no wl they both check + * up, not only because there is no need to off/restore d11 interrupt but also + * because per-port code may require sync with valid interrupt. + */ +static u32 brcms_c_wlintrsoff(struct brcms_c_info *wlc) +{ + if (!wlc->hw->up) + return 0; + + return brcms_intrsoff(wlc->wl); +} + +static void brcms_c_wlintrsrestore(struct brcms_c_info *wlc, u32 macintmask) +{ + if (!wlc->hw->up) + return; + + brcms_intrsrestore(wlc->wl, macintmask); } u32 brcms_c_intrsoff(struct brcms_c_info *wlc) @@ -2379,8 +2460,8 @@ u32 brcms_c_intrsoff(struct brcms_c_info *wlc) macintmask = wlc->macintmask; /* isr can still happen */ - bcma_write32(wlc_hw->d11core, D11REGOFFS(macintmask), 0); - (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(macintmask)); + W_REG(&wlc_hw->regs->macintmask, 0); + (void)R_REG(&wlc_hw->regs->macintmask); /* sync readback */ udelay(1); /* ensure int line is no longer driven */ wlc->macintmask = 0; @@ -2395,10 +2476,9 @@ void brcms_c_intrsrestore(struct brcms_c_info *wlc, u32 macintmask) return; wlc->macintmask = macintmask; - bcma_write32(wlc_hw->d11core, D11REGOFFS(macintmask), wlc->macintmask); + W_REG(&wlc_hw->regs->macintmask, wlc->macintmask); } -/* assumes that the d11 MAC is enabled */ static void brcms_b_tx_fifo_suspend(struct brcms_hardware *wlc_hw, uint tx_fifo) { @@ -2455,12 +2535,11 @@ static void brcms_b_tx_fifo_resume(struct brcms_hardware *wlc_hw, } } -/* precondition: requires the mac core to be enabled */ -static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool mute_tx) +static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool on, u32 flags) { static const u8 null_ether_addr[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; - if (mute_tx) { + if (on) { /* suspend tx fifos */ brcms_b_tx_fifo_suspend(wlc_hw, TX_DATA_FIFO); brcms_b_tx_fifo_suspend(wlc_hw, TX_CTL_FIFO); @@ -2482,20 +2561,14 @@ static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool mute_tx) wlc_hw->etheraddr); } - wlc_phy_mute_upd(wlc_hw->band->pi, mute_tx, 0); + wlc_phy_mute_upd(wlc_hw->band->pi, on, flags); - if (mute_tx) + if (on) brcms_c_ucode_mute_override_set(wlc_hw); else brcms_c_ucode_mute_override_clear(wlc_hw); } -void -brcms_c_mute(struct brcms_c_info *wlc, bool mute_tx) -{ - brcms_b_mute(wlc->hw, mute_tx); -} - /* * Read and clear macintmask and macintstatus and intstatus registers. * This routine should be called with interrupts off @@ -2507,11 +2580,11 @@ brcms_c_mute(struct brcms_c_info *wlc, bool mute_tx) static inline u32 wlc_intstatus(struct brcms_c_info *wlc, bool in_isr) { struct brcms_hardware *wlc_hw = wlc->hw; - struct bcma_device *core = wlc_hw->d11core; + struct d11regs __iomem *regs = wlc_hw->regs; u32 macintstatus; /* macintstatus includes a DMA interrupt summary bit */ - macintstatus = bcma_read32(core, D11REGOFFS(macintstatus)); + macintstatus = R_REG(®s->macintstatus); BCMMSG(wlc->wiphy, "wl%d: macintstatus: 0x%x\n", wlc_hw->unit, macintstatus); @@ -2538,12 +2611,12 @@ static inline u32 wlc_intstatus(struct brcms_c_info *wlc, bool in_isr) * consequences */ /* turn off the interrupts */ - bcma_write32(core, D11REGOFFS(macintmask), 0); - (void)bcma_read32(core, D11REGOFFS(macintmask)); + W_REG(®s->macintmask, 0); + (void)R_REG(®s->macintmask); /* sync readback */ wlc->macintmask = 0; /* clear device interrupts */ - bcma_write32(core, D11REGOFFS(macintstatus), macintstatus); + W_REG(®s->macintstatus, macintstatus); /* MI_DMAINT is indication of non-zero intstatus */ if (macintstatus & MI_DMAINT) @@ -2552,8 +2625,8 @@ static inline u32 wlc_intstatus(struct brcms_c_info *wlc, bool in_isr) * RX_FIFO. If MI_DMAINT is set, assume it * is set and clear the interrupt. */ - bcma_write32(core, D11REGOFFS(intctrlregs[RX_FIFO].intstatus), - DEF_RXINTMASK); + W_REG(®s->intctrlregs[RX_FIFO].intstatus, + DEF_RXINTMASK); return macintstatus; } @@ -2616,7 +2689,7 @@ bool brcms_c_isr(struct brcms_c_info *wlc, bool *wantdpc) void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc) { struct brcms_hardware *wlc_hw = wlc->hw; - struct bcma_device *core = wlc_hw->d11core; + struct d11regs __iomem *regs = wlc_hw->regs; u32 mc, mi; struct wiphy *wiphy = wlc->wiphy; @@ -2633,7 +2706,7 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc) /* force the core awake */ brcms_c_ucode_wake_override_set(wlc_hw, BRCMS_WAKE_OVERRIDE_MACSUSPEND); - mc = bcma_read32(core, D11REGOFFS(maccontrol)); + mc = R_REG(®s->maccontrol); if (mc == 0xffffffff) { wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit, @@ -2645,7 +2718,7 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc) WARN_ON(!(mc & MCTL_PSM_RUN)); WARN_ON(!(mc & MCTL_EN_MAC)); - mi = bcma_read32(core, D11REGOFFS(macintstatus)); + mi = R_REG(®s->macintstatus); if (mi == 0xffffffff) { wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit, __func__); @@ -2656,21 +2729,21 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc) brcms_b_mctrl(wlc_hw, MCTL_EN_MAC, 0); - SPINWAIT(!(bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD), + SPINWAIT(!(R_REG(®s->macintstatus) & MI_MACSSPNDD), BRCMS_MAX_MAC_SUSPEND); - if (!(bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD)) { + if (!(R_REG(®s->macintstatus) & MI_MACSSPNDD)) { wiphy_err(wiphy, "wl%d: wlc_suspend_mac_and_wait: waited %d uS" " and MI_MACSSPNDD is still not on.\n", wlc_hw->unit, BRCMS_MAX_MAC_SUSPEND); wiphy_err(wiphy, "wl%d: psmdebug 0x%08x, phydebug 0x%08x, " "psm_brc 0x%04x\n", wlc_hw->unit, - bcma_read32(core, D11REGOFFS(psmdebug)), - bcma_read32(core, D11REGOFFS(phydebug)), - bcma_read16(core, D11REGOFFS(psm_brc))); + R_REG(®s->psmdebug), + R_REG(®s->phydebug), + R_REG(®s->psm_brc)); } - mc = bcma_read32(core, D11REGOFFS(maccontrol)); + mc = R_REG(®s->maccontrol); if (mc == 0xffffffff) { wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit, __func__); @@ -2685,7 +2758,7 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc) void brcms_c_enable_mac(struct brcms_c_info *wlc) { struct brcms_hardware *wlc_hw = wlc->hw; - struct bcma_device *core = wlc_hw->d11core; + struct d11regs __iomem *regs = wlc_hw->regs; u32 mc, mi; BCMMSG(wlc->wiphy, "wl%d: bandunit %d\n", wlc_hw->unit, @@ -2698,20 +2771,20 @@ void brcms_c_enable_mac(struct brcms_c_info *wlc) if (wlc_hw->mac_suspend_depth > 0) return; - mc = bcma_read32(core, D11REGOFFS(maccontrol)); + mc = R_REG(®s->maccontrol); WARN_ON(mc & MCTL_PSM_JMP_0); WARN_ON(mc & MCTL_EN_MAC); WARN_ON(!(mc & MCTL_PSM_RUN)); brcms_b_mctrl(wlc_hw, MCTL_EN_MAC, MCTL_EN_MAC); - bcma_write32(core, D11REGOFFS(macintstatus), MI_MACSSPNDD); + W_REG(®s->macintstatus, MI_MACSSPNDD); - mc = bcma_read32(core, D11REGOFFS(maccontrol)); + mc = R_REG(®s->maccontrol); WARN_ON(mc & MCTL_PSM_JMP_0); WARN_ON(!(mc & MCTL_EN_MAC)); WARN_ON(!(mc & MCTL_PSM_RUN)); - mi = bcma_read32(core, D11REGOFFS(macintstatus)); + mi = R_REG(®s->macintstatus); WARN_ON(mi & MI_MACSSPNDD); brcms_c_ucode_wake_override_clear(wlc_hw, @@ -2728,53 +2801,55 @@ void brcms_b_band_stf_ss_set(struct brcms_hardware *wlc_hw, u8 stf_mode) static bool brcms_b_validate_chip_access(struct brcms_hardware *wlc_hw) { - struct bcma_device *core = wlc_hw->d11core; + struct d11regs __iomem *regs; u32 w, val; struct wiphy *wiphy = wlc_hw->wlc->wiphy; BCMMSG(wiphy, "wl%d\n", wlc_hw->unit); + regs = wlc_hw->regs; + /* Validate dchip register access */ - bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0); - (void)bcma_read32(core, D11REGOFFS(objaddr)); - w = bcma_read32(core, D11REGOFFS(objdata)); + W_REG(®s->objaddr, OBJADDR_SHM_SEL | 0); + (void)R_REG(®s->objaddr); + w = R_REG(®s->objdata); /* Can we write and read back a 32bit register? */ - bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0); - (void)bcma_read32(core, D11REGOFFS(objaddr)); - bcma_write32(core, D11REGOFFS(objdata), (u32) 0xaa5555aa); + W_REG(®s->objaddr, OBJADDR_SHM_SEL | 0); + (void)R_REG(®s->objaddr); + W_REG(®s->objdata, (u32) 0xaa5555aa); - bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0); - (void)bcma_read32(core, D11REGOFFS(objaddr)); - val = bcma_read32(core, D11REGOFFS(objdata)); + W_REG(®s->objaddr, OBJADDR_SHM_SEL | 0); + (void)R_REG(®s->objaddr); + val = R_REG(®s->objdata); if (val != (u32) 0xaa5555aa) { wiphy_err(wiphy, "wl%d: validate_chip_access: SHM = 0x%x, " "expected 0xaa5555aa\n", wlc_hw->unit, val); return false; } - bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0); - (void)bcma_read32(core, D11REGOFFS(objaddr)); - bcma_write32(core, D11REGOFFS(objdata), (u32) 0x55aaaa55); + W_REG(®s->objaddr, OBJADDR_SHM_SEL | 0); + (void)R_REG(®s->objaddr); + W_REG(®s->objdata, (u32) 0x55aaaa55); - bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0); - (void)bcma_read32(core, D11REGOFFS(objaddr)); - val = bcma_read32(core, D11REGOFFS(objdata)); + W_REG(®s->objaddr, OBJADDR_SHM_SEL | 0); + (void)R_REG(®s->objaddr); + val = R_REG(®s->objdata); if (val != (u32) 0x55aaaa55) { wiphy_err(wiphy, "wl%d: validate_chip_access: SHM = 0x%x, " "expected 0x55aaaa55\n", wlc_hw->unit, val); return false; } - bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0); - (void)bcma_read32(core, D11REGOFFS(objaddr)); - bcma_write32(core, D11REGOFFS(objdata), w); + W_REG(®s->objaddr, OBJADDR_SHM_SEL | 0); + (void)R_REG(®s->objaddr); + W_REG(®s->objdata, w); /* clear CFPStart */ - bcma_write32(core, D11REGOFFS(tsf_cfpstart), 0); + W_REG(®s->tsf_cfpstart, 0); - w = bcma_read32(core, D11REGOFFS(maccontrol)); + w = R_REG(®s->maccontrol); if ((w != (MCTL_IHR_EN | MCTL_WAKE)) && (w != (MCTL_IHR_EN | MCTL_GMODE | MCTL_WAKE))) { wiphy_err(wiphy, "wl%d: validate_chip_access: maccontrol = " @@ -2791,38 +2866,38 @@ static bool brcms_b_validate_chip_access(struct brcms_hardware *wlc_hw) void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on) { - struct bcma_device *core = wlc_hw->d11core; + struct d11regs __iomem *regs; u32 tmp; BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); tmp = 0; + regs = wlc_hw->regs; if (on) { - if ((ai_get_chip_id(wlc_hw->sih) == BCM4313_CHIP_ID)) { - bcma_set32(core, D11REGOFFS(clk_ctl_st), - CCS_ERSRC_REQ_HT | - CCS_ERSRC_REQ_D11PLL | - CCS_ERSRC_REQ_PHYPLL); - SPINWAIT((bcma_read32(core, D11REGOFFS(clk_ctl_st)) & - CCS_ERSRC_AVAIL_HT) != CCS_ERSRC_AVAIL_HT, + if ((wlc_hw->sih->chip == BCM4313_CHIP_ID)) { + OR_REG(®s->clk_ctl_st, + (CCS_ERSRC_REQ_HT | CCS_ERSRC_REQ_D11PLL | + CCS_ERSRC_REQ_PHYPLL)); + SPINWAIT((R_REG(®s->clk_ctl_st) & + (CCS_ERSRC_AVAIL_HT)) != (CCS_ERSRC_AVAIL_HT), PHYPLL_WAIT_US); - tmp = bcma_read32(core, D11REGOFFS(clk_ctl_st)); - if ((tmp & CCS_ERSRC_AVAIL_HT) != CCS_ERSRC_AVAIL_HT) + tmp = R_REG(®s->clk_ctl_st); + if ((tmp & (CCS_ERSRC_AVAIL_HT)) != + (CCS_ERSRC_AVAIL_HT)) wiphy_err(wlc_hw->wlc->wiphy, "%s: turn on PHY" " PLL failed\n", __func__); } else { - bcma_set32(core, D11REGOFFS(clk_ctl_st), - tmp | CCS_ERSRC_REQ_D11PLL | - CCS_ERSRC_REQ_PHYPLL); - SPINWAIT((bcma_read32(core, D11REGOFFS(clk_ctl_st)) & + OR_REG(®s->clk_ctl_st, + (CCS_ERSRC_REQ_D11PLL | CCS_ERSRC_REQ_PHYPLL)); + SPINWAIT((R_REG(®s->clk_ctl_st) & (CCS_ERSRC_AVAIL_D11PLL | CCS_ERSRC_AVAIL_PHYPLL)) != (CCS_ERSRC_AVAIL_D11PLL | CCS_ERSRC_AVAIL_PHYPLL), PHYPLL_WAIT_US); - tmp = bcma_read32(core, D11REGOFFS(clk_ctl_st)); + tmp = R_REG(®s->clk_ctl_st); if ((tmp & (CCS_ERSRC_AVAIL_D11PLL | CCS_ERSRC_AVAIL_PHYPLL)) != @@ -2836,9 +2911,8 @@ void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on) * be requesting it; so we'll deassert the request but * not wait for status to comply. */ - bcma_mask32(core, D11REGOFFS(clk_ctl_st), - ~CCS_ERSRC_REQ_PHYPLL); - (void)bcma_read32(core, D11REGOFFS(clk_ctl_st)); + AND_REG(®s->clk_ctl_st, ~CCS_ERSRC_REQ_PHYPLL); + tmp = R_REG(®s->clk_ctl_st); } } @@ -2866,7 +2940,7 @@ static void brcms_c_coredisable(struct brcms_hardware *wlc_hw) brcms_b_core_phypll_ctl(wlc_hw, false); wlc_hw->clk = false; - bcma_core_disable(wlc_hw->d11core, 0); + ai_core_disable(wlc_hw->sih, 0); wlc_phy_hw_clk_state_upd(wlc_hw->band->pi, false); } @@ -2890,31 +2964,35 @@ static void brcms_c_flushqueues(struct brcms_c_info *wlc) static u16 brcms_b_read_objmem(struct brcms_hardware *wlc_hw, uint offset, u32 sel) { - struct bcma_device *core = wlc_hw->d11core; - u16 objoff = D11REGOFFS(objdata); + struct d11regs __iomem *regs = wlc_hw->regs; + u16 __iomem *objdata_lo = (u16 __iomem *)®s->objdata; + u16 __iomem *objdata_hi = objdata_lo + 1; + u16 v; - bcma_write32(core, D11REGOFFS(objaddr), sel | (offset >> 2)); - (void)bcma_read32(core, D11REGOFFS(objaddr)); + W_REG(®s->objaddr, sel | (offset >> 2)); + (void)R_REG(®s->objaddr); if (offset & 2) - objoff += 2; + v = R_REG(objdata_hi); + else + v = R_REG(objdata_lo); - return bcma_read16(core, objoff); -; + return v; } static void brcms_b_write_objmem(struct brcms_hardware *wlc_hw, uint offset, u16 v, u32 sel) { - struct bcma_device *core = wlc_hw->d11core; - u16 objoff = D11REGOFFS(objdata); + struct d11regs __iomem *regs = wlc_hw->regs; + u16 __iomem *objdata_lo = (u16 __iomem *)®s->objdata; + u16 __iomem *objdata_hi = objdata_lo + 1; - bcma_write32(core, D11REGOFFS(objaddr), sel | (offset >> 2)); - (void)bcma_read32(core, D11REGOFFS(objaddr)); + W_REG(®s->objaddr, sel | (offset >> 2)); + (void)R_REG(®s->objaddr); if (offset & 2) - objoff += 2; - - bcma_write16(core, objoff, v); + W_REG(objdata_hi, v); + else + W_REG(objdata_lo, v); } /* @@ -3000,14 +3078,14 @@ static void brcms_b_retrylimit_upd(struct brcms_hardware *wlc_hw, /* write retry limit to SCR, shouldn't need to suspend */ if (wlc_hw->up) { - bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr), - OBJADDR_SCR_SEL | S_DOT11_SRC_LMT); - (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr)); - bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), wlc_hw->SRL); - bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr), - OBJADDR_SCR_SEL | S_DOT11_LRC_LMT); - (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr)); - bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), wlc_hw->LRL); + W_REG(&wlc_hw->regs->objaddr, + OBJADDR_SCR_SEL | S_DOT11_SRC_LMT); + (void)R_REG(&wlc_hw->regs->objaddr); + W_REG(&wlc_hw->regs->objdata, wlc_hw->SRL); + W_REG(&wlc_hw->regs->objaddr, + OBJADDR_SCR_SEL | S_DOT11_LRC_LMT); + (void)R_REG(&wlc_hw->regs->objaddr); + W_REG(&wlc_hw->regs->objdata, wlc_hw->LRL); } } @@ -3054,7 +3132,7 @@ static bool brcms_c_ps_allowed(struct brcms_c_info *wlc) return false; /* disallow PS when one of these meets when not scanning */ - if (wlc->filter_flags & FIF_PROMISC_IN_BSS) + if (wlc->monitor) return false; if (cfg->associated) { @@ -3189,9 +3267,9 @@ void brcms_c_init_scb(struct scb *scb) static void brcms_b_coreinit(struct brcms_c_info *wlc) { struct brcms_hardware *wlc_hw = wlc->hw; - struct bcma_device *core = wlc_hw->d11core; + struct d11regs __iomem *regs; u32 sflags; - u32 bcnint_us; + uint bcnint_us; uint i = 0; bool fifosz_fixup = false; int err = 0; @@ -3199,6 +3277,8 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc) struct wiphy *wiphy = wlc->wiphy; struct brcms_ucode *ucode = &wlc_hw->wlc->wl->ucode; + regs = wlc_hw->regs; + BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit); /* reset PSM */ @@ -3211,20 +3291,20 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc) fifosz_fixup = true; /* let the PSM run to the suspended state, set mode to BSS STA */ - bcma_write32(core, D11REGOFFS(macintstatus), -1); + W_REG(®s->macintstatus, -1); brcms_b_mctrl(wlc_hw, ~0, (MCTL_IHR_EN | MCTL_INFRA | MCTL_PSM_RUN | MCTL_WAKE)); /* wait for ucode to self-suspend after auto-init */ - SPINWAIT(((bcma_read32(core, D11REGOFFS(macintstatus)) & - MI_MACSSPNDD) == 0), 1000 * 1000); - if ((bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD) == 0) + SPINWAIT(((R_REG(®s->macintstatus) & MI_MACSSPNDD) == 0), + 1000 * 1000); + if ((R_REG(®s->macintstatus) & MI_MACSSPNDD) == 0) wiphy_err(wiphy, "wl%d: wlc_coreinit: ucode did not self-" "suspend!\n", wlc_hw->unit); brcms_c_gpio_init(wlc); - sflags = bcma_aread32(core, BCMA_IOST); + sflags = ai_core_sflags(wlc_hw->sih, 0, 0); if (D11REV_IS(wlc_hw->corerev, 23)) { if (BRCMS_ISNPHY(wlc_hw->band)) @@ -3288,7 +3368,7 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc) wlc_hw->xmtfifo_sz[i], i); /* make sure we can still talk to the mac */ - WARN_ON(bcma_read32(core, D11REGOFFS(maccontrol)) == 0xffffffff); + WARN_ON(R_REG(®s->maccontrol) == 0xffffffff); /* band-specific inits done by wlc_bsinit() */ @@ -3297,7 +3377,7 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc) brcms_b_write_shm(wlc_hw, M_MAX_ANTCNT, ANTCNT); /* enable one rx interrupt per received frame */ - bcma_write32(core, D11REGOFFS(intrcvlazy[0]), (1 << IRL_FC_SHIFT)); + W_REG(®s->intrcvlazy[0], (1 << IRL_FC_SHIFT)); /* set the station mode (BSS STA) */ brcms_b_mctrl(wlc_hw, @@ -3306,21 +3386,19 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc) /* set up Beacon interval */ bcnint_us = 0x8000 << 10; - bcma_write32(core, D11REGOFFS(tsf_cfprep), - (bcnint_us << CFPREP_CBI_SHIFT)); - bcma_write32(core, D11REGOFFS(tsf_cfpstart), bcnint_us); - bcma_write32(core, D11REGOFFS(macintstatus), MI_GP1); + W_REG(®s->tsf_cfprep, (bcnint_us << CFPREP_CBI_SHIFT)); + W_REG(®s->tsf_cfpstart, bcnint_us); + W_REG(®s->macintstatus, MI_GP1); /* write interrupt mask */ - bcma_write32(core, D11REGOFFS(intctrlregs[RX_FIFO].intmask), - DEF_RXINTMASK); + W_REG(®s->intctrlregs[RX_FIFO].intmask, DEF_RXINTMASK); /* allow the MAC to control the PHY clock (dynamic on/off) */ brcms_b_macphyclk_set(wlc_hw, ON); /* program dynamic clock control fast powerup delay register */ wlc->fastpwrup_dly = ai_clkctl_fast_pwrup_delay(wlc_hw->sih); - bcma_write16(core, D11REGOFFS(scc_fastpwrup_dly), wlc->fastpwrup_dly); + W_REG(®s->scc_fastpwrup_dly, wlc->fastpwrup_dly); /* tell the ucode the corerev */ brcms_b_write_shm(wlc_hw, M_MACHW_VER, (u16) wlc_hw->corerev); @@ -3333,21 +3411,19 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc) machwcap >> 16) & 0xffff)); /* write retry limits to SCR, this done after PSM init */ - bcma_write32(core, D11REGOFFS(objaddr), - OBJADDR_SCR_SEL | S_DOT11_SRC_LMT); - (void)bcma_read32(core, D11REGOFFS(objaddr)); - bcma_write32(core, D11REGOFFS(objdata), wlc_hw->SRL); - bcma_write32(core, D11REGOFFS(objaddr), - OBJADDR_SCR_SEL | S_DOT11_LRC_LMT); - (void)bcma_read32(core, D11REGOFFS(objaddr)); - bcma_write32(core, D11REGOFFS(objdata), wlc_hw->LRL); + W_REG(®s->objaddr, OBJADDR_SCR_SEL | S_DOT11_SRC_LMT); + (void)R_REG(®s->objaddr); + W_REG(®s->objdata, wlc_hw->SRL); + W_REG(®s->objaddr, OBJADDR_SCR_SEL | S_DOT11_LRC_LMT); + (void)R_REG(®s->objaddr); + W_REG(®s->objdata, wlc_hw->LRL); /* write rate fallback retry limits */ brcms_b_write_shm(wlc_hw, M_SFRMTXCNTFBRTHSD, wlc_hw->SFBL); brcms_b_write_shm(wlc_hw, M_LFRMTXCNTFBRTHSD, wlc_hw->LFBL); - bcma_mask16(core, D11REGOFFS(ifs_ctl), 0x0FFF); - bcma_write16(core, D11REGOFFS(ifs_aifsn), EDCF_AIFSN_MIN); + AND_REG(®s->ifs_ctl, 0x0FFF); + W_REG(®s->ifs_aifsn, EDCF_AIFSN_MIN); /* init the tx dma engines */ for (i = 0; i < NFIFO; i++) { @@ -3361,7 +3437,8 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc) } void -static brcms_b_init(struct brcms_hardware *wlc_hw, u16 chanspec) { +static brcms_b_init(struct brcms_hardware *wlc_hw, u16 chanspec, + bool mute) { u32 macintmask; bool fastclk; struct brcms_c_info *wlc = wlc_hw->wlc; @@ -3386,6 +3463,10 @@ static brcms_b_init(struct brcms_hardware *wlc_hw, u16 chanspec) { /* core-specific initialization */ brcms_b_coreinit(wlc); + /* suspend the tx fifos and mute the phy for preism cac time */ + if (mute) + brcms_b_mute(wlc_hw, ON, PHY_MUTE_FOR_PREISM); + /* band-specific inits */ brcms_b_bsinit(wlc, chanspec); @@ -3575,32 +3656,42 @@ static void brcms_c_bandinit_ordered(struct brcms_c_info *wlc, brcms_c_set_phy_chanspec(wlc, chanspec); } -/* - * Set or clear filtering related maccontrol bits based on - * specified filter flags - */ -void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags) +static void brcms_c_mac_bcn_promisc(struct brcms_c_info *wlc) { - u32 promisc_bits = 0; - - wlc->filter_flags = filter_flags; + if (wlc->bcnmisc_monitor) + brcms_b_mctrl(wlc->hw, MCTL_BCNS_PROMISC, MCTL_BCNS_PROMISC); + else + brcms_b_mctrl(wlc->hw, MCTL_BCNS_PROMISC, 0); +} - if (filter_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) - promisc_bits |= MCTL_PROMISC; +void brcms_c_mac_bcn_promisc_change(struct brcms_c_info *wlc, bool promisc) +{ + wlc->bcnmisc_monitor = promisc; + brcms_c_mac_bcn_promisc(wlc); +} - if (filter_flags & FIF_BCN_PRBRESP_PROMISC) - promisc_bits |= MCTL_BCNS_PROMISC; +/* set or clear maccontrol bits MCTL_PROMISC and MCTL_KEEPCONTROL */ +static void brcms_c_mac_promisc(struct brcms_c_info *wlc) +{ + u32 promisc_bits = 0; - if (filter_flags & FIF_FCSFAIL) - promisc_bits |= MCTL_KEEPBADFCS; + /* + * promiscuous mode just sets MCTL_PROMISC + * Note: APs get all BSS traffic without the need to set + * the MCTL_PROMISC bit since all BSS data traffic is + * directed at the AP + */ + if (wlc->pub->promisc) + promisc_bits |= MCTL_PROMISC; - if (filter_flags & (FIF_CONTROL | FIF_PSPOLL)) - promisc_bits |= MCTL_KEEPCONTROL; + /* monitor mode needs both MCTL_PROMISC and MCTL_KEEPCONTROL + * Note: monitor mode also needs MCTL_BCNS_PROMISC, but that is + * handled in brcms_c_mac_bcn_promisc() + */ + if (wlc->monitor) + promisc_bits |= MCTL_PROMISC | MCTL_KEEPCONTROL; - brcms_b_mctrl(wlc->hw, - MCTL_PROMISC | MCTL_BCNS_PROMISC | - MCTL_KEEPCONTROL | MCTL_KEEPBADFCS, - promisc_bits); + brcms_b_mctrl(wlc->hw, MCTL_PROMISC | MCTL_KEEPCONTROL, promisc_bits); } /* @@ -3630,6 +3721,10 @@ static void brcms_c_ucode_mac_upd(struct brcms_c_info *wlc) } else { /* disable an active IBSS if we are not on the home channel */ } + + /* update the various promisc bits */ + brcms_c_mac_bcn_promisc(wlc); + brcms_c_mac_promisc(wlc); } static void brcms_c_write_rate_shm(struct brcms_c_info *wlc, u8 rate, @@ -3804,7 +3899,7 @@ static void brcms_c_set_ps_ctrl(struct brcms_c_info *wlc) BCMMSG(wlc->wiphy, "wl%d: hps %d\n", wlc->pub->unit, hps); - v1 = bcma_read32(wlc->hw->d11core, D11REGOFFS(maccontrol)); + v1 = R_REG(&wlc->regs->maccontrol); v2 = MCTL_WAKE; if (hps) v2 |= MCTL_HPS; @@ -3884,7 +3979,7 @@ static void brcms_c_set_home_chanspec(struct brcms_c_info *wlc, u16 chanspec) void brcms_b_set_chanspec(struct brcms_hardware *wlc_hw, u16 chanspec, - bool mute_tx, struct txpwr_limits *txpwr) + bool mute, struct txpwr_limits *txpwr) { uint bandunit; @@ -3910,7 +4005,7 @@ brcms_b_set_chanspec(struct brcms_hardware *wlc_hw, u16 chanspec, } } - wlc_phy_initcal_enable(wlc_hw->band->pi, !mute_tx); + wlc_phy_initcal_enable(wlc_hw->band->pi, !mute); if (!wlc_hw->up) { if (wlc_hw->clk) @@ -3922,7 +4017,7 @@ brcms_b_set_chanspec(struct brcms_hardware *wlc_hw, u16 chanspec, wlc_phy_txpower_limit_set(wlc_hw->band->pi, txpwr, chanspec); /* Update muting of the channel */ - brcms_b_mute(wlc_hw, mute_tx); + brcms_b_mute(wlc_hw, mute, 0); } } @@ -4110,7 +4205,7 @@ void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci, EDCF_TXOP2USEC(acp_shm.txop); acp_shm.aifs = (params->aifs & EDCF_AIFSN_MASK); - if (aci == IEEE80211_AC_VI && acp_shm.txop == 0 + if (aci == AC_VI && acp_shm.txop == 0 && acp_shm.aifs < EDCF_AIFSN_MAX) acp_shm.aifs++; @@ -4123,8 +4218,7 @@ void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci, acp_shm.cwmax = params->cw_max; acp_shm.cwcur = acp_shm.cwmin; acp_shm.bslots = - bcma_read16(wlc->hw->d11core, D11REGOFFS(tsf_random)) & - acp_shm.cwcur; + R_REG(&wlc->regs->tsf_random) & acp_shm.cwcur; acp_shm.reggap = acp_shm.bslots + acp_shm.aifs; /* Indicate the new params to the ucode */ acp_shm.status = brcms_b_read_shm(wlc->hw, (M_EDCF_QINFO + @@ -4148,7 +4242,7 @@ void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci, } } -static void brcms_c_edcf_setparams(struct brcms_c_info *wlc, bool suspend) +void brcms_c_edcf_setparams(struct brcms_c_info *wlc, bool suspend) { u16 aci; int i_ac; @@ -4161,7 +4255,7 @@ static void brcms_c_edcf_setparams(struct brcms_c_info *wlc, bool suspend) }; /* ucode needs these parameters during its initialization */ const struct edcf_acparam *edcf_acp = &default_edcf_acparams[0]; - for (i_ac = 0; i_ac < IEEE80211_NUM_ACS; i_ac++, edcf_acp++) { + for (i_ac = 0; i_ac < AC_COUNT; i_ac++, edcf_acp++) { /* find out which ac this set of params applies to */ aci = (edcf_acp->ACI & EDCF_ACI_MASK) >> EDCF_ACI_SHIFT; @@ -4183,6 +4277,17 @@ static void brcms_c_edcf_setparams(struct brcms_c_info *wlc, bool suspend) } } +/* maintain LED behavior in down state */ +static void brcms_c_down_led_upd(struct brcms_c_info *wlc) +{ + /* + * maintain LEDs while in down state, turn on sbclk if + * not available yet. Turn on sbclk if necessary + */ + brcms_b_pllreq(wlc->hw, true, BRCMS_PLLREQ_FLIP); + brcms_b_pllreq(wlc->hw, false, BRCMS_PLLREQ_FLIP); +} + static void brcms_c_radio_monitor_start(struct brcms_c_info *wlc) { /* Don't start the timer if HWRADIO feature is disabled */ @@ -4194,6 +4299,28 @@ static void brcms_c_radio_monitor_start(struct brcms_c_info *wlc) brcms_add_timer(wlc->radio_timer, TIMER_INTERVAL_RADIOCHK, true); } +static void brcms_c_radio_disable(struct brcms_c_info *wlc) +{ + if (!wlc->pub->up) { + brcms_c_down_led_upd(wlc); + return; + } + + brcms_c_radio_monitor_start(wlc); + brcms_down(wlc->wl); +} + +static void brcms_c_radio_enable(struct brcms_c_info *wlc) +{ + if (wlc->pub->up) + return; + + if (brcms_deviceremoved(wlc)) + return; + + brcms_up(wlc->wl); +} + static bool brcms_c_radio_monitor_stop(struct brcms_c_info *wlc) { if (!wlc->radio_monitor) @@ -4216,6 +4343,18 @@ static void brcms_c_radio_hwdisable_upd(struct brcms_c_info *wlc) mboolclr(wlc->pub->radio_disabled, WL_RADIO_HW_DISABLE); } +/* + * centralized radio disable/enable function, + * invoke radio enable/disable after updating hwradio status + */ +static void brcms_c_radio_upd(struct brcms_c_info *wlc) +{ + if (wlc->pub->radio_disabled) + brcms_c_radio_disable(wlc); + else + brcms_c_radio_enable(wlc); +} + /* update hwradio status and return it */ bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc) { @@ -4237,7 +4376,12 @@ static void brcms_c_radio_timer(void *arg) return; } + /* cap mpc off count */ + if (wlc->mpc_offcnt < BRCMS_MPC_MAX_DELAYCNT) + wlc->mpc_offcnt++; + brcms_c_radio_hwdisable_upd(wlc); + brcms_c_radio_upd(wlc); } /* common low-level watchdog code */ @@ -4263,6 +4407,60 @@ static void brcms_b_watchdog(void *arg) wlc_phy_watchdog(wlc_hw->band->pi); } +static void brcms_c_radio_mpc_upd(struct brcms_c_info *wlc) +{ + bool mpc_radio, radio_state; + + /* + * Clear the WL_RADIO_MPC_DISABLE bit when mpc feature is disabled + * in case the WL_RADIO_MPC_DISABLE bit was set. Stop the radio + * monitor also when WL_RADIO_MPC_DISABLE is the only reason that + * the radio is going down. + */ + if (!wlc->mpc) { + if (!wlc->pub->radio_disabled) + return; + mboolclr(wlc->pub->radio_disabled, WL_RADIO_MPC_DISABLE); + brcms_c_radio_upd(wlc); + if (!wlc->pub->radio_disabled) + brcms_c_radio_monitor_stop(wlc); + return; + } + + /* + * sync ismpc logic with WL_RADIO_MPC_DISABLE bit in + * wlc->pub->radio_disabled to go ON, always call radio_upd + * synchronously to go OFF, postpone radio_upd to later when + * context is safe(e.g. watchdog) + */ + radio_state = + (mboolisset(wlc->pub->radio_disabled, WL_RADIO_MPC_DISABLE) ? OFF : + ON); + mpc_radio = (brcms_c_ismpc(wlc) == true) ? OFF : ON; + + if (radio_state == ON && mpc_radio == OFF) + wlc->mpc_delay_off = wlc->mpc_dlycnt; + else if (radio_state == OFF && mpc_radio == ON) { + mboolclr(wlc->pub->radio_disabled, WL_RADIO_MPC_DISABLE); + brcms_c_radio_upd(wlc); + if (wlc->mpc_offcnt < BRCMS_MPC_THRESHOLD) + wlc->mpc_dlycnt = BRCMS_MPC_MAX_DELAYCNT; + else + wlc->mpc_dlycnt = BRCMS_MPC_MIN_DELAYCNT; + } + /* + * Below logic is meant to capture the transition from mpc off + * to mpc on for reasons other than wlc->mpc_delay_off keeping + * the mpc off. In that case reset wlc->mpc_delay_off to + * wlc->mpc_dlycnt, so that we restart the countdown of mpc_delay_off + */ + if ((wlc->prev_non_delay_mpc == false) && + (brcms_c_is_non_delay_mpc(wlc) == true) && wlc->mpc_delay_off) + wlc->mpc_delay_off = wlc->mpc_dlycnt; + + wlc->prev_non_delay_mpc = brcms_c_is_non_delay_mpc(wlc); +} + /* common watchdog code */ static void brcms_c_watchdog(void *arg) { @@ -4283,7 +4481,21 @@ static void brcms_c_watchdog(void *arg) /* increment second count */ wlc->pub->now++; + /* delay radio disable */ + if (wlc->mpc_delay_off) { + if (--wlc->mpc_delay_off == 0) { + mboolset(wlc->pub->radio_disabled, + WL_RADIO_MPC_DISABLE); + if (wlc->mpc && brcms_c_ismpc(wlc)) + wlc->mpc_offcnt = 0; + } + } + + /* mpc sync */ + brcms_c_radio_mpc_upd(wlc); + /* radio sync: sw/hw/mpc --> radio_disable/radio_enable */ brcms_c_radio_hwdisable_upd(wlc); + brcms_c_radio_upd(wlc); /* if radio is disable, driver may be down, quit here */ if (wlc->pub->radio_disabled) return; @@ -4387,6 +4599,9 @@ static void brcms_c_info_init(struct brcms_c_info *wlc, int unit) /* WME QoS mode is Auto by default */ wlc->pub->_ampdu = AMPDU_AGG_HOST; wlc->pub->bcmerror = 0; + + /* initialize mpc delay */ + wlc->mpc_delay_off = wlc->mpc_dlycnt = BRCMS_MPC_MIN_DELAYCNT; } static uint brcms_c_attach_module(struct brcms_c_info *wlc) @@ -4432,21 +4647,21 @@ struct brcms_pub *brcms_c_pub(struct brcms_c_info *wlc) * initialize software state for each core and band * put the whole chip in reset(driver down state), no clock */ -static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core, - uint unit, bool piomode) +static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device, + uint unit, bool piomode, void __iomem *regsva, + struct pci_dev *btparam) { struct brcms_hardware *wlc_hw; + struct d11regs __iomem *regs; char *macaddr = NULL; uint err = 0; uint j; bool wme = false; struct shared_phy_params sha_params; struct wiphy *wiphy = wlc->wiphy; - struct pci_dev *pcidev = core->bus->host_pci; - BCMMSG(wlc->wiphy, "wl%d: vendor 0x%x device 0x%x\n", unit, - pcidev->vendor, - pcidev->device); + BCMMSG(wlc->wiphy, "wl%d: vendor 0x%x device 0x%x\n", unit, vendor, + device); wme = true; @@ -4463,7 +4678,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core, * Do the hardware portion of the attach. Also initialize software * state that depends on the particular hardware we are running. */ - wlc_hw->sih = ai_attach(core->bus); + wlc_hw->sih = ai_attach(regsva, btparam); if (wlc_hw->sih == NULL) { wiphy_err(wiphy, "wl%d: brcms_b_attach: si_attach failed\n", unit); @@ -4472,19 +4687,25 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core, } /* verify again the device is supported */ - if (!brcms_c_chipmatch(pcidev->vendor, pcidev->device)) { + if (!brcms_c_chipmatch(vendor, device)) { wiphy_err(wiphy, "wl%d: brcms_b_attach: Unsupported " "vendor/device (0x%x/0x%x)\n", - unit, pcidev->vendor, pcidev->device); + unit, vendor, device); err = 12; goto fail; } - wlc_hw->vendorid = pcidev->vendor; - wlc_hw->deviceid = pcidev->device; + wlc_hw->vendorid = vendor; + wlc_hw->deviceid = device; + + /* set bar0 window to point at D11 core */ + wlc_hw->regs = (struct d11regs __iomem *) + ai_setcore(wlc_hw->sih, D11_CORE_ID, 0); + wlc_hw->corerev = ai_corerev(wlc_hw->sih); - wlc_hw->d11core = core; - wlc_hw->corerev = core->id.rev; + regs = wlc_hw->regs; + + wlc->regs = wlc_hw->regs; /* validate chip, chiprev and corerev */ if (!brcms_c_isgoodchip(wlc_hw)) { @@ -4519,9 +4740,8 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core, wlc_hw->boardrev = (u16) j; if (!brcms_c_validboardtype(wlc_hw)) { wiphy_err(wiphy, "wl%d: brcms_b_attach: Unsupported Broadcom " - "board type (0x%x)" " or revision level (0x%x)\n", - unit, ai_get_boardtype(wlc_hw->sih), - wlc_hw->boardrev); + "board type (0x%x)" " or revision level (0x%x)\n", + unit, wlc_hw->sih->boardtype, wlc_hw->boardrev); err = 15; goto fail; } @@ -4542,7 +4762,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core, else wlc_hw->_nbands = 1; - if ((ai_get_chip_id(wlc_hw->sih) == BCM43225_CHIP_ID)) + if ((wlc_hw->sih->chip == BCM43225_CHIP_ID)) wlc_hw->_nbands = 1; /* BMAC_NOTE: remove init of pub values when brcms_c_attach() @@ -4574,14 +4794,16 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core, sha_params.corerev = wlc_hw->corerev; sha_params.vid = wlc_hw->vendorid; sha_params.did = wlc_hw->deviceid; - sha_params.chip = ai_get_chip_id(wlc_hw->sih); - sha_params.chiprev = ai_get_chiprev(wlc_hw->sih); - sha_params.chippkg = ai_get_chippkg(wlc_hw->sih); + sha_params.chip = wlc_hw->sih->chip; + sha_params.chiprev = wlc_hw->sih->chiprev; + sha_params.chippkg = wlc_hw->sih->chippkg; sha_params.sromrev = wlc_hw->sromrev; - sha_params.boardtype = ai_get_boardtype(wlc_hw->sih); + sha_params.boardtype = wlc_hw->sih->boardtype; sha_params.boardrev = wlc_hw->boardrev; + sha_params.boardvendor = wlc_hw->sih->boardvendor; sha_params.boardflags = wlc_hw->boardflags; sha_params.boardflags2 = wlc_hw->boardflags2; + sha_params.buscorerev = wlc_hw->sih->buscorerev; /* alloc and save pointer to shared phy state area */ wlc_hw->phy_sh = wlc_phy_shared_attach(&sha_params); @@ -4603,9 +4825,9 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core, wlc_hw->band->bandtype = j ? BRCM_BAND_5G : BRCM_BAND_2G; wlc->band->bandunit = j; wlc->band->bandtype = j ? BRCM_BAND_5G : BRCM_BAND_2G; - wlc->core->coreidx = core->core_index; + wlc->core->coreidx = ai_coreidx(wlc_hw->sih); - wlc_hw->machwcap = bcma_read32(core, D11REGOFFS(machwcap)); + wlc_hw->machwcap = R_REG(®s->machwcap); wlc_hw->machwcap_backup = wlc_hw->machwcap; /* init tx fifo size */ @@ -4614,7 +4836,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core, /* Get a phy for this band */ wlc_hw->band->pi = - wlc_phy_attach(wlc_hw->phy_sh, core, + wlc_phy_attach(wlc_hw->phy_sh, regs, wlc_hw->band->bandtype, wlc->wiphy); if (wlc_hw->band->pi == NULL) { @@ -4688,6 +4910,10 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core, /* Match driver "down" state */ ai_pci_down(wlc_hw->sih); + /* register sb interrupt callback functions */ + ai_register_intr_callback(wlc_hw->sih, (void *)brcms_c_wlintrsoff, + (void *)brcms_c_wlintrsrestore, NULL, wlc); + /* turn off pll and xtal to match driver "down" state */ brcms_b_xtal(wlc_hw, OFF); @@ -4718,9 +4944,10 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core, goto fail; } - BCMMSG(wlc->wiphy, "deviceid 0x%x nbands %d board 0x%x macaddr: %s\n", - wlc_hw->deviceid, wlc_hw->_nbands, ai_get_boardtype(wlc_hw->sih), - macaddr); + BCMMSG(wlc->wiphy, + "deviceid 0x%x nbands %d board 0x%x macaddr: %s\n", + wlc_hw->deviceid, wlc_hw->_nbands, + wlc_hw->sih->boardtype, macaddr); return err; @@ -4958,6 +5185,7 @@ static int brcms_b_detach(struct brcms_c_info *wlc) * and per-port interrupt object may has been freed. this must * be done before sb core switch */ + ai_deregister_intr_callback(wlc_hw->sih); ai_pci_sleep(wlc_hw->sih); } @@ -5031,6 +5259,9 @@ static void brcms_c_ap_upd(struct brcms_c_info *wlc) { /* STA-BSS; short capable */ wlc->PLCPHdr_override = BRCMS_PLCP_SHORT; + + /* fixup mpc */ + wlc->mpc = true; } /* Initialize just the hardware when coming out of POR or S3/S5 system states */ @@ -5052,11 +5283,13 @@ static void brcms_b_hw_up(struct brcms_hardware *wlc_hw) ai_pci_fixcfg(wlc_hw->sih); /* - * TODO: test suspend/resume - * * AI chip doesn't restore bar0win2 on * hibernation/resume, need sw fixup */ + if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) || + (wlc_hw->sih->chip == BCM43225_CHIP_ID)) + wlc_hw->regs = (struct d11regs __iomem *) + ai_setcore(wlc_hw->sih, D11_CORE_ID, 0); /* * Inform phy that a POR reset has occurred so @@ -5068,7 +5301,7 @@ static void brcms_b_hw_up(struct brcms_hardware *wlc_hw) wlc_hw->wlc->pub->hw_up = true; if ((wlc_hw->boardflags & BFL_FEM) - && (ai_get_chip_id(wlc_hw->sih) == BCM4313_CHIP_ID)) { + && (wlc_hw->sih->chip == BCM4313_CHIP_ID)) { if (! (wlc_hw->boardrev >= 0x1250 && (wlc_hw->boardflags & BFL_FEM_BT))) @@ -5143,7 +5376,7 @@ static void brcms_c_wme_retries_write(struct brcms_c_info *wlc) if (!wlc->clk) return; - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) + for (ac = 0; ac < AC_COUNT; ac++) brcms_b_write_shm(wlc->hw, M_AC_TXLMT_ADDR(ac), wlc->wme_retries[ac]); } @@ -5163,7 +5396,7 @@ int brcms_c_up(struct brcms_c_info *wlc) } if ((wlc->pub->boardflags & BFL_FEM) - && (ai_get_chip_id(wlc->hw->sih) == BCM4313_CHIP_ID)) { + && (wlc->pub->sih->chip == BCM4313_CHIP_ID)) { if (wlc->pub->boardrev >= 0x1250 && (wlc->pub->boardflags & BFL_FEM_BT)) brcms_b_mhf(wlc->hw, MHF5, MHF5_4313_GPIOCTRL, @@ -5300,9 +5533,9 @@ static int brcms_b_down_finish(struct brcms_hardware *wlc_hw) } else { /* Reset and disable the core */ - if (bcma_core_is_enabled(wlc_hw->d11core)) { - if (bcma_read32(wlc_hw->d11core, - D11REGOFFS(maccontrol)) & MCTL_EN_MAC) + if (ai_iscoreup(wlc_hw->sih)) { + if (R_REG(&wlc_hw->regs->maccontrol) & + MCTL_EN_MAC) brcms_c_suspend_mac_and_wait(wlc_hw->wlc); callbacks += brcms_reset(wlc_hw->wlc->wl); brcms_c_coredisable(wlc_hw); @@ -5342,6 +5575,7 @@ uint brcms_c_down(struct brcms_c_info *wlc) if (!wlc->pub->up) return callbacks; + /* in between, mpc could try to bring down again.. */ wlc->going_down = true; callbacks += brcms_b_bmac_down_prep(wlc->hw); @@ -5618,7 +5852,7 @@ int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl) brcms_b_retrylimit_upd(wlc->hw, wlc->SRL, wlc->LRL); - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + for (ac = 0; ac < AC_COUNT; ac++) { wlc->wme_retries[ac] = SFIELD(wlc->wme_retries[ac], EDCF_SHORT, wlc->SRL); wlc->wme_retries[ac] = SFIELD(wlc->wme_retries[ac], @@ -5869,6 +6103,7 @@ void brcms_c_print_txdesc(struct d11txh *txh) u8 *rtsph = txh->RTSPhyHeader; struct ieee80211_rts rts = txh->rts_frame; + char hexbuf[256]; /* add plcp header along with txh descriptor */ printk(KERN_DEBUG "Raw TxDesc + plcp header:\n"); @@ -5889,16 +6124,17 @@ void brcms_c_print_txdesc(struct d11txh *txh) printk(KERN_DEBUG "XtraFrameTypes: %04x ", xtraft); printk(KERN_DEBUG "\n"); - print_hex_dump_bytes("SecIV:", DUMP_PREFIX_OFFSET, iv, sizeof(txh->IV)); - print_hex_dump_bytes("RA:", DUMP_PREFIX_OFFSET, - ra, sizeof(txh->TxFrameRA)); + brcmu_format_hex(hexbuf, iv, sizeof(txh->IV)); + printk(KERN_DEBUG "SecIV: %s\n", hexbuf); + brcmu_format_hex(hexbuf, ra, sizeof(txh->TxFrameRA)); + printk(KERN_DEBUG "RA: %s\n", hexbuf); printk(KERN_DEBUG "Fb FES Time: %04x ", tfestfb); - print_hex_dump_bytes("Fb RTS PLCP:", DUMP_PREFIX_OFFSET, - rtspfb, sizeof(txh->RTSPLCPFallback)); + brcmu_format_hex(hexbuf, rtspfb, sizeof(txh->RTSPLCPFallback)); + printk(KERN_DEBUG "RTS PLCP: %s ", hexbuf); printk(KERN_DEBUG "RTS DUR: %04x ", rtsdfb); - print_hex_dump_bytes("PLCP:", DUMP_PREFIX_OFFSET, - fragpfb, sizeof(txh->FragPLCPFallback)); + brcmu_format_hex(hexbuf, fragpfb, sizeof(txh->FragPLCPFallback)); + printk(KERN_DEBUG "PLCP: %s ", hexbuf); printk(KERN_DEBUG "DUR: %04x", fragdfb); printk(KERN_DEBUG "\n"); @@ -5913,18 +6149,18 @@ void brcms_c_print_txdesc(struct d11txh *txh) printk(KERN_DEBUG "MaxAggbyte_fb: %04x\n", mabyte_f); printk(KERN_DEBUG "MinByte: %04x\n", mmbyte); - print_hex_dump_bytes("RTS PLCP:", DUMP_PREFIX_OFFSET, - rtsph, sizeof(txh->RTSPhyHeader)); - print_hex_dump_bytes("RTS Frame:", DUMP_PREFIX_OFFSET, - (u8 *)&rts, sizeof(txh->rts_frame)); + brcmu_format_hex(hexbuf, rtsph, sizeof(txh->RTSPhyHeader)); + printk(KERN_DEBUG "RTS PLCP: %s ", hexbuf); + brcmu_format_hex(hexbuf, (u8 *) &rts, sizeof(txh->rts_frame)); + printk(KERN_DEBUG "RTS Frame: %s", hexbuf); printk(KERN_DEBUG "\n"); } #endif /* defined(BCMDBG) */ #if defined(BCMDBG) -static int +int brcms_c_format_flags(const struct brcms_c_bit_desc *bd, u32 flags, char *buf, - int len) + int len) { int i; char *p = buf; @@ -6680,7 +6916,7 @@ brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw, qos = ieee80211_is_data_qos(h->frame_control); /* compute length of frame in bytes for use in PLCP computations */ - len = p->len; + len = brcmu_pkttotlen(p); phylen = len + FCS_LEN; /* Get tx_info */ @@ -7459,11 +7695,11 @@ static void brcms_b_read_tsf(struct brcms_hardware *wlc_hw, u32 *tsf_l_ptr, u32 *tsf_h_ptr) { - struct bcma_device *core = wlc_hw->d11core; + struct d11regs __iomem *regs = wlc_hw->regs; /* read the tsf timer low, then high to get an atomic read */ - *tsf_l_ptr = bcma_read32(core, D11REGOFFS(tsf_timerlow)); - *tsf_h_ptr = bcma_read32(core, D11REGOFFS(tsf_timerhigh)); + *tsf_l_ptr = R_REG(®s->tsf_timerlow); + *tsf_h_ptr = R_REG(®s->tsf_timerhigh); } /* @@ -8017,6 +8253,12 @@ int brcms_c_get_tx_power(struct brcms_c_info *wlc) return (int)(qdbm / BRCMS_TXPWR_DB_FACTOR); } +void brcms_c_set_radio_mpc(struct brcms_c_info *wlc, bool mpc) +{ + wlc->mpc = mpc; + brcms_c_radio_mpc_upd(wlc); +} + /* Process received frames */ /* * Return true if more frames need to be processed. false otherwise. @@ -8051,8 +8293,14 @@ static void brcms_c_recv(struct brcms_c_info *wlc, struct sk_buff *p) len = p->len; if (rxh->RxStatus1 & RXS_FCSERR) { - if (!(wlc->filter_flags & FIF_FCSFAIL)) + if (wlc->pub->mac80211_state & MAC80211_PROMISC_BCNS) { + wiphy_err(wlc->wiphy, "FCSERR while scanning******* -" + " tossing\n"); goto toss; + } else { + wiphy_err(wlc->wiphy, "RCSERR!!!\n"); + goto toss; + } } /* check received pkt has at least frame control field */ @@ -8080,17 +8328,21 @@ static bool brcms_b_recv(struct brcms_hardware *wlc_hw, uint fifo, bool bound) { struct sk_buff *p; - struct sk_buff *next = NULL; - struct sk_buff_head recv_frames; - + struct sk_buff *head = NULL; + struct sk_buff *tail = NULL; uint n = 0; uint bound_limit = bound ? RXBND : -1; BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); - skb_queue_head_init(&recv_frames); - /* gather received frames */ - while (dma_rx(wlc_hw->di[fifo], &recv_frames)) { + while ((p = dma_rx(wlc_hw->di[fifo]))) { + + if (!tail) + head = tail = p; + else { + tail->prev = p; + tail = p; + } /* !give others some time to run! */ if (++n >= bound_limit) @@ -8101,11 +8353,12 @@ brcms_b_recv(struct brcms_hardware *wlc_hw, uint fifo, bool bound) dma_rxfill(wlc_hw->di[fifo]); /* process each frame */ - skb_queue_walk_safe(&recv_frames, p, next) { + while ((p = head) != NULL) { struct d11rxhdr_le *rxh_le; struct d11rxhdr *rxh; + head = head->prev; + p->prev = NULL; - skb_unlink(p, &recv_frames); rxh_le = (struct d11rxhdr_le *)p->data; rxh = (struct d11rxhdr *)p->data; @@ -8136,7 +8389,7 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded) { u32 macintstatus; struct brcms_hardware *wlc_hw = wlc->hw; - struct bcma_device *core = wlc_hw->d11core; + struct d11regs __iomem *regs = wlc_hw->regs; struct wiphy *wiphy = wlc->wiphy; if (brcms_deviceremoved(wlc)) { @@ -8172,7 +8425,7 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded) /* ATIM window end */ if (macintstatus & MI_ATIMWINEND) { BCMMSG(wlc->wiphy, "end of ATIM window\n"); - bcma_set32(core, D11REGOFFS(maccommand), wlc->qvalid); + OR_REG(®s->maccommand, wlc->qvalid); wlc->qvalid = 0; } @@ -8190,17 +8443,18 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded) if (macintstatus & MI_GP0) { wiphy_err(wiphy, "wl%d: PSM microcode watchdog fired at %d " - "(seconds). Resetting.\n", wlc_hw->unit, wlc_hw->now); + "(seconds). Resetting.\n", wlc_hw->unit, wlc_hw->now); printk_once("%s : PSM Watchdog, chipid 0x%x, chiprev 0x%x\n", - __func__, ai_get_chip_id(wlc_hw->sih), - ai_get_chiprev(wlc_hw->sih)); - brcms_fatal_error(wlc_hw->wlc->wl); + __func__, wlc_hw->sih->chip, + wlc_hw->sih->chiprev); + /* big hammer */ + brcms_init(wlc->wl); } /* gptimer timeout */ if (macintstatus & MI_TO) - bcma_write32(core, D11REGOFFS(gptimer), 0); + W_REG(®s->gptimer, 0); if (macintstatus & MI_RFDISABLE) { BCMMSG(wlc->wiphy, "wl%d: BMAC Detected a change on the" @@ -8216,17 +8470,20 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded) return wlc->macintstatus != 0; fatal: - brcms_fatal_error(wlc_hw->wlc->wl); + brcms_init(wlc->wl); return wlc->macintstatus != 0; } -void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx) +void brcms_c_init(struct brcms_c_info *wlc) { - struct bcma_device *core = wlc->hw->d11core; + struct d11regs __iomem *regs; u16 chanspec; + bool mute = false; BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit); + regs = wlc->regs; + /* * This will happen if a big-hammer was executed. In * that case, we want to go back to the channel that @@ -8237,7 +8494,7 @@ void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx) else chanspec = brcms_c_init_chanspec(wlc); - brcms_b_init(wlc->hw, chanspec); + brcms_b_init(wlc->hw, chanspec, mute); /* update beacon listen interval */ brcms_c_bcn_li_upd(wlc); @@ -8256,8 +8513,8 @@ void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx) * update since init path would reset * to default value */ - bcma_write32(core, D11REGOFFS(tsf_cfprep), - bi << CFPREP_CBI_SHIFT); + W_REG(®s->tsf_cfprep, + (bi << CFPREP_CBI_SHIFT)); /* Update maccontrol PM related bits */ brcms_c_set_ps_ctrl(wlc); @@ -8287,7 +8544,7 @@ void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx) brcms_c_bsinit(wlc); /* Enable EDCF mode (while the MAC is suspended) */ - bcma_set16(core, D11REGOFFS(ifs_ctl), IFS_USEEDCF); + OR_REG(®s->ifs_ctl, IFS_USEEDCF); brcms_c_edcf_setparams(wlc, false); /* Init precedence maps for empty FIFOs */ @@ -8303,15 +8560,14 @@ void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx) /* ..now really unleash hell (allow the MAC out of suspend) */ brcms_c_enable_mac(wlc); - /* suspend the tx fifos and mute the phy for preism cac time */ - if (mute_tx) - brcms_b_mute(wlc->hw, true); - /* clear tx flow control */ brcms_c_txflowcontrol_reset(wlc); /* enable the RF Disable Delay timer */ - bcma_write32(core, D11REGOFFS(rfdisabledly), RFDISABLE_DEFAULT); + W_REG(&wlc->regs->rfdisabledly, RFDISABLE_DEFAULT); + + /* initialize mpc delay */ + wlc->mpc_delay_off = wlc->mpc_dlycnt = BRCMS_MPC_MIN_DELAYCNT; /* * Initialize WME parameters; if they haven't been set by some other @@ -8321,7 +8577,7 @@ void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx) /* Uninitialized; read from HW */ int ac; - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) + for (ac = 0; ac < AC_COUNT; ac++) wlc->wme_retries[ac] = brcms_b_read_shm(wlc->hw, M_AC_TXLMT_ADDR(ac)); } @@ -8331,8 +8587,9 @@ void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx) * The common driver entry routine. Error codes should be unique */ struct brcms_c_info * -brcms_c_attach(struct brcms_info *wl, struct bcma_device *core, uint unit, - bool piomode, uint *perr) +brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit, + bool piomode, void __iomem *regsva, struct pci_dev *btparam, + uint *perr) { struct brcms_c_info *wlc; uint err = 0; @@ -8340,7 +8597,7 @@ brcms_c_attach(struct brcms_info *wl, struct bcma_device *core, uint unit, struct brcms_pub *pub; /* allocate struct brcms_c_info state and its substructures */ - wlc = (struct brcms_c_info *) brcms_c_attach_malloc(unit, &err, 0); + wlc = (struct brcms_c_info *) brcms_c_attach_malloc(unit, &err, device); if (wlc == NULL) goto fail; wlc->wiphy = wl->wiphy; @@ -8367,7 +8624,8 @@ brcms_c_attach(struct brcms_info *wl, struct bcma_device *core, uint unit, * low level attach steps(all hw accesses go * inside, no more in rest of the attach) */ - err = brcms_b_attach(wlc, core, unit, piomode); + err = brcms_b_attach(wlc, vendor, device, unit, piomode, regsva, + btparam); if (err) goto fail; @@ -8496,6 +8754,8 @@ brcms_c_attach(struct brcms_info *wl, struct bcma_device *core, uint unit, brcms_c_ht_update_sgi_rx(wlc, 0); } + /* initialize radio_mpc_disable according to wlc->mpc */ + brcms_c_radio_mpc_upd(wlc); brcms_b_antsel_set(wlc->hw, wlc->asi->antsel_avail); if (perr) diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/main.h b/trunk/drivers/net/wireless/brcm80211/brcmsmac/main.h index adb136ec1f04..c0e0fcfdfaf8 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/main.h +++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/main.h @@ -44,6 +44,8 @@ /* transmit buffer max headroom for protocol headers */ #define TXOFF (D11_TXH_LEN + D11_PHY_HDR_LEN) +#define AC_COUNT 4 + /* Macros for doing definition and get/set of bitfields * Usage example, e.g. a three-bit field (bits 4-6): * #define _M BITFIELD_MASK(3) @@ -334,7 +336,7 @@ struct brcms_hardware { u32 machwcap_backup; /* backup of machwcap */ struct si_pub *sih; /* SI handle (cookie for siutils calls) */ - struct bcma_device *d11core; /* pointer to 802.11 core */ + struct d11regs __iomem *regs; /* pointer to device registers */ struct phy_shim_info *physhim; /* phy shim layer handler */ struct shared_phy *phy_sh; /* pointer to shared phy state */ struct brcms_hw_band *band;/* pointer to active per-band state */ @@ -400,6 +402,7 @@ struct brcms_txq_info { * * pub: pointer to driver public state. * wl: pointer to specific private state. + * regs: pointer to device registers. * hw: HW related state. * clkreq_override: setting for clkreq for PCIE : Auto, 0, 1. * fastpwrup_dly: time in us needed to bring up d11 fast clock. @@ -424,6 +427,11 @@ struct brcms_txq_info { * bandinit_pending: track band init in auto band. * radio_monitor: radio timer is running. * going_down: down path intermediate variable. + * mpc: enable minimum power consumption. + * mpc_dlycnt: # of watchdog cnt before turn disable radio. + * mpc_offcnt: # of watchdog cnt that radio is disabled. + * mpc_delay_off: delay radio disable by # of watchdog cnt. + * prev_non_delay_mpc: prev state brcms_c_is_non_delay_mpc. * wdtimer: timer for watchdog routine. * radio_timer: timer for hw radio button monitor routine. * monitor: monitor (MPDU sniffing) mode. @@ -433,7 +441,7 @@ struct brcms_txq_info { * bcn_li_dtim: beacon listen interval in # dtims. * WDarmed: watchdog timer is armed. * WDlast: last time wlc_watchdog() was called. - * edcf_txop[IEEE80211_NUM_ACS]: current txop for each ac. + * edcf_txop[AC_COUNT]: current txop for each ac. * wme_retries: per-AC retry limits. * tx_prec_map: Precedence map based on HW FIFO space. * fifo2prec_map[NFIFO]: pointer to fifo2_prec map based on WME. @@ -476,6 +484,7 @@ struct brcms_txq_info { struct brcms_c_info { struct brcms_pub *pub; struct brcms_info *wl; + struct d11regs __iomem *regs; struct brcms_hardware *hw; /* clock */ @@ -513,11 +522,18 @@ struct brcms_c_info { bool radio_monitor; bool going_down; + bool mpc; + u8 mpc_dlycnt; + u8 mpc_offcnt; + u8 mpc_delay_off; + u8 prev_non_delay_mpc; + struct brcms_timer *wdtimer; struct brcms_timer *radio_timer; /* promiscuous */ - uint filter_flags; + bool monitor; + bool bcnmisc_monitor; /* driver feature */ bool _rifs; @@ -530,9 +546,9 @@ struct brcms_c_info { u32 WDlast; /* WME */ - u16 edcf_txop[IEEE80211_NUM_ACS]; + u16 edcf_txop[AC_COUNT]; - u16 wme_retries[IEEE80211_NUM_ACS]; + u16 wme_retries[AC_COUNT]; u16 tx_prec_map; u16 fifo2prec_map[NFIFO]; @@ -655,7 +671,8 @@ extern void brcms_c_print_txdesc(struct d11txh *txh); #endif extern int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config); -extern void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags); +extern void brcms_c_mac_bcn_promisc_change(struct brcms_c_info *wlc, + bool promisc); extern void brcms_c_send_q(struct brcms_c_info *wlc); extern int brcms_c_prep_pdu(struct brcms_c_info *wlc, struct sk_buff *pdu, uint *fifo); diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/nicpci.c b/trunk/drivers/net/wireless/brcm80211/brcmsmac/nicpci.c index 7fad6dc19258..0bcb26792046 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/nicpci.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/nicpci.c @@ -139,9 +139,6 @@ #define SRSH_PI_MASK 0xf000 /* bit 15:12 */ #define SRSH_PI_SHIFT 12 /* bit 15:12 */ -#define PCIREGOFFS(field) offsetof(struct sbpciregs, field) -#define PCIEREGOFFS(field) offsetof(struct sbpcieregs, field) - /* Sonics side: PCI core and host control registers */ struct sbpciregs { u32 control; /* PCI control */ @@ -208,7 +205,11 @@ struct sbpcieregs { }; struct pcicore_info { - struct bcma_device *core; + union { + struct sbpcieregs __iomem *pcieregs; + struct sbpciregs __iomem *pciregs; + } regs; /* Memory mapped register to the core */ + struct si_pub *sih; /* System interconnect handle */ struct pci_dev *dev; u8 pciecap_lcreg_offset;/* PCIE capability LCreg offset @@ -223,9 +224,9 @@ struct pcicore_info { }; #define PCIE_ASPM(sih) \ - ((ai_get_buscoretype(sih) == PCIE_CORE_ID) && \ - ((ai_get_buscorerev(sih) >= 3) && \ - (ai_get_buscorerev(sih) <= 5))) + (((sih)->buscoretype == PCIE_CORE_ID) && \ + (((sih)->buscorerev >= 3) && \ + ((sih)->buscorerev <= 5))) /* delay needed between the mdio control/ mdiodata register data access */ @@ -237,7 +238,8 @@ static void pr28829_delay(void) /* Initialize the PCI core. * It's caller's responsibility to make sure that this is done only once */ -struct pcicore_info *pcicore_init(struct si_pub *sih, struct bcma_device *core) +struct pcicore_info *pcicore_init(struct si_pub *sih, struct pci_dev *pdev, + void __iomem *regs) { struct pcicore_info *pi; @@ -247,15 +249,17 @@ struct pcicore_info *pcicore_init(struct si_pub *sih, struct bcma_device *core) return NULL; pi->sih = sih; - pi->dev = core->bus->host_pci; - pi->core = core; + pi->dev = pdev; - if (core->id.id == PCIE_CORE_ID) { + if (sih->buscoretype == PCIE_CORE_ID) { u8 cap_ptr; + pi->regs.pcieregs = regs; cap_ptr = pcicore_find_pci_capability(pi->dev, PCI_CAP_ID_EXP, NULL, NULL); pi->pciecap_lcreg_offset = cap_ptr + PCIE_CAP_LINKCTRL_OFFSET; - } + } else + pi->regs.pciregs = regs; + return pi; } @@ -330,37 +334,37 @@ pcicore_find_pci_capability(struct pci_dev *dev, u8 req_cap_id, /* ***** Register Access API */ static uint -pcie_readreg(struct bcma_device *core, uint addrtype, uint offset) +pcie_readreg(struct sbpcieregs __iomem *pcieregs, uint addrtype, uint offset) { uint retval = 0xFFFFFFFF; switch (addrtype) { case PCIE_CONFIGREGS: - bcma_write32(core, PCIEREGOFFS(configaddr), offset); - (void)bcma_read32(core, PCIEREGOFFS(configaddr)); - retval = bcma_read32(core, PCIEREGOFFS(configdata)); + W_REG(&pcieregs->configaddr, offset); + (void)R_REG((&pcieregs->configaddr)); + retval = R_REG(&pcieregs->configdata); break; case PCIE_PCIEREGS: - bcma_write32(core, PCIEREGOFFS(pcieindaddr), offset); - (void)bcma_read32(core, PCIEREGOFFS(pcieindaddr)); - retval = bcma_read32(core, PCIEREGOFFS(pcieinddata)); + W_REG(&pcieregs->pcieindaddr, offset); + (void)R_REG(&pcieregs->pcieindaddr); + retval = R_REG(&pcieregs->pcieinddata); break; } return retval; } -static uint pcie_writereg(struct bcma_device *core, uint addrtype, +static uint pcie_writereg(struct sbpcieregs __iomem *pcieregs, uint addrtype, uint offset, uint val) { switch (addrtype) { case PCIE_CONFIGREGS: - bcma_write32(core, PCIEREGOFFS(configaddr), offset); - bcma_write32(core, PCIEREGOFFS(configdata), val); + W_REG((&pcieregs->configaddr), offset); + W_REG((&pcieregs->configdata), val); break; case PCIE_PCIEREGS: - bcma_write32(core, PCIEREGOFFS(pcieindaddr), offset); - bcma_write32(core, PCIEREGOFFS(pcieinddata), val); + W_REG((&pcieregs->pcieindaddr), offset); + W_REG((&pcieregs->pcieinddata), val); break; default: break; @@ -370,6 +374,7 @@ static uint pcie_writereg(struct bcma_device *core, uint addrtype, static bool pcie_mdiosetblock(struct pcicore_info *pi, uint blk) { + struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs; uint mdiodata, i = 0; uint pcie_serdes_spinwait = 200; @@ -377,13 +382,12 @@ static bool pcie_mdiosetblock(struct pcicore_info *pi, uint blk) (MDIODATA_DEV_ADDR << MDIODATA_DEVADDR_SHF) | (MDIODATA_BLK_ADDR << MDIODATA_REGADDR_SHF) | (blk << 4)); - bcma_write32(pi->core, PCIEREGOFFS(mdiodata), mdiodata); + W_REG(&pcieregs->mdiodata, mdiodata); pr28829_delay(); /* retry till the transaction is complete */ while (i < pcie_serdes_spinwait) { - if (bcma_read32(pi->core, PCIEREGOFFS(mdiocontrol)) & - MDIOCTL_ACCESS_DONE) + if (R_REG(&pcieregs->mdiocontrol) & MDIOCTL_ACCESS_DONE) break; udelay(1000); @@ -400,15 +404,15 @@ static int pcie_mdioop(struct pcicore_info *pi, uint physmedia, uint regaddr, bool write, uint *val) { + struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs; uint mdiodata; uint i = 0; uint pcie_serdes_spinwait = 10; /* enable mdio access to SERDES */ - bcma_write32(pi->core, PCIEREGOFFS(mdiocontrol), - MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL); + W_REG(&pcieregs->mdiocontrol, MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL); - if (ai_get_buscorerev(pi->sih) >= 10) { + if (pi->sih->buscorerev >= 10) { /* new serdes is slower in rw, * using two layers of reg address mapping */ @@ -428,22 +432,20 @@ pcie_mdioop(struct pcicore_info *pi, uint physmedia, uint regaddr, bool write, mdiodata |= (MDIODATA_START | MDIODATA_WRITE | MDIODATA_TA | *val); - bcma_write32(pi->core, PCIEREGOFFS(mdiodata), mdiodata); + W_REG(&pcieregs->mdiodata, mdiodata); pr28829_delay(); /* retry till the transaction is complete */ while (i < pcie_serdes_spinwait) { - if (bcma_read32(pi->core, PCIEREGOFFS(mdiocontrol)) & - MDIOCTL_ACCESS_DONE) { + if (R_REG(&pcieregs->mdiocontrol) & MDIOCTL_ACCESS_DONE) { if (!write) { pr28829_delay(); - *val = (bcma_read32(pi->core, - PCIEREGOFFS(mdiodata)) & + *val = (R_REG(&pcieregs->mdiodata) & MDIODATA_MASK); } /* Disable mdio access to SERDES */ - bcma_write32(pi->core, PCIEREGOFFS(mdiocontrol), 0); + W_REG(&pcieregs->mdiocontrol, 0); return 0; } udelay(1000); @@ -451,7 +453,7 @@ pcie_mdioop(struct pcicore_info *pi, uint physmedia, uint regaddr, bool write, } /* Timed out. Disable mdio access to SERDES. */ - bcma_write32(pi->core, PCIEREGOFFS(mdiocontrol), 0); + W_REG(&pcieregs->mdiocontrol, 0); return 1; } @@ -500,18 +502,18 @@ static void pcie_extendL1timer(struct pcicore_info *pi, bool extend) { u32 w; struct si_pub *sih = pi->sih; + struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs; - if (ai_get_buscoretype(sih) != PCIE_CORE_ID || - ai_get_buscorerev(sih) < 7) + if (sih->buscoretype != PCIE_CORE_ID || sih->buscorerev < 7) return; - w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG); + w = pcie_readreg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG); if (extend) w |= PCIE_ASPMTIMER_EXTEND; else w &= ~PCIE_ASPMTIMER_EXTEND; - pcie_writereg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG, w); - w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG); + pcie_writereg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG, w); + w = pcie_readreg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG); } /* centralized clkreq control policy */ @@ -525,27 +527,25 @@ static void pcie_clkreq_upd(struct pcicore_info *pi, uint state) pcie_clkreq(pi, 1, 0); break; case SI_PCIDOWN: - /* turn on serdes PLL down */ - if (ai_get_buscorerev(sih) == 6) { - ai_cc_reg(sih, - offsetof(struct chipcregs, chipcontrol_addr), - ~0, 0); - ai_cc_reg(sih, - offsetof(struct chipcregs, chipcontrol_data), - ~0x40, 0); + if (sih->buscorerev == 6) { /* turn on serdes PLL down */ + ai_corereg(sih, SI_CC_IDX, + offsetof(struct chipcregs, chipcontrol_addr), + ~0, 0); + ai_corereg(sih, SI_CC_IDX, + offsetof(struct chipcregs, chipcontrol_data), + ~0x40, 0); } else if (pi->pcie_pr42767) { pcie_clkreq(pi, 1, 1); } break; case SI_PCIUP: - /* turn off serdes PLL down */ - if (ai_get_buscorerev(sih) == 6) { - ai_cc_reg(sih, - offsetof(struct chipcregs, chipcontrol_addr), - ~0, 0); - ai_cc_reg(sih, - offsetof(struct chipcregs, chipcontrol_data), - ~0x40, 0x40); + if (sih->buscorerev == 6) { /* turn off serdes PLL down */ + ai_corereg(sih, SI_CC_IDX, + offsetof(struct chipcregs, chipcontrol_addr), + ~0, 0); + ai_corereg(sih, SI_CC_IDX, + offsetof(struct chipcregs, chipcontrol_data), + ~0x40, 0x40); } else if (PCIE_ASPM(sih)) { /* disable clkreq */ pcie_clkreq(pi, 1, 0); } @@ -562,7 +562,7 @@ static void pcie_war_polarity(struct pcicore_info *pi) if (pi->pcie_polarity != 0) return; - w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_PLP_STATUSREG); + w = pcie_readreg(pi->regs.pcieregs, PCIE_PCIEREGS, PCIE_PLP_STATUSREG); /* Detect the current polarity at attach and force that polarity and * disable changing the polarity @@ -581,15 +581,18 @@ static void pcie_war_polarity(struct pcicore_info *pi) */ static void pcie_war_aspm_clkreq(struct pcicore_info *pi) { + struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs; struct si_pub *sih = pi->sih; u16 val16; + u16 __iomem *reg16; u32 w; if (!PCIE_ASPM(sih)) return; /* bypass this on QT or VSIM */ - val16 = bcma_read16(pi->core, PCIEREGOFFS(sprom[SRSH_ASPM_OFFSET])); + reg16 = &pcieregs->sprom[SRSH_ASPM_OFFSET]; + val16 = R_REG(reg16); val16 &= ~SRSH_ASPM_ENB; if (pi->pcie_war_aspm_ovr == PCIE_ASPM_ENAB) @@ -599,15 +602,15 @@ static void pcie_war_aspm_clkreq(struct pcicore_info *pi) else if (pi->pcie_war_aspm_ovr == PCIE_ASPM_L0s_ENAB) val16 |= SRSH_ASPM_L0s_ENB; - bcma_write16(pi->core, PCIEREGOFFS(sprom[SRSH_ASPM_OFFSET]), val16); + W_REG(reg16, val16); pci_read_config_dword(pi->dev, pi->pciecap_lcreg_offset, &w); w &= ~PCIE_ASPM_ENAB; w |= pi->pcie_war_aspm_ovr; pci_write_config_dword(pi->dev, pi->pciecap_lcreg_offset, w); - val16 = bcma_read16(pi->core, - PCIEREGOFFS(sprom[SRSH_CLKREQ_OFFSET_REV5])); + reg16 = &pcieregs->sprom[SRSH_CLKREQ_OFFSET_REV5]; + val16 = R_REG(reg16); if (pi->pcie_war_aspm_ovr != PCIE_ASPM_DISAB) { val16 |= SRSH_CLKREQ_ENB; @@ -615,8 +618,7 @@ static void pcie_war_aspm_clkreq(struct pcicore_info *pi) } else val16 &= ~SRSH_CLKREQ_ENB; - bcma_write16(pi->core, PCIEREGOFFS(sprom[SRSH_CLKREQ_OFFSET_REV5]), - val16); + W_REG(reg16, val16); } /* Apply the polarity determined at the start */ @@ -640,15 +642,16 @@ static void pcie_war_serdes(struct pcicore_info *pi) /* Needs to happen when coming out of 'standby'/'hibernate' */ static void pcie_misc_config_fixup(struct pcicore_info *pi) { + struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs; u16 val16; + u16 __iomem *reg16; - val16 = bcma_read16(pi->core, - PCIEREGOFFS(sprom[SRSH_PCIE_MISC_CONFIG])); + reg16 = &pcieregs->sprom[SRSH_PCIE_MISC_CONFIG]; + val16 = R_REG(reg16); if ((val16 & SRSH_L23READY_EXIT_NOPERST) == 0) { val16 |= SRSH_L23READY_EXIT_NOPERST; - bcma_write16(pi->core, - PCIEREGOFFS(sprom[SRSH_PCIE_MISC_CONFIG]), val16); + W_REG(reg16, val16); } } @@ -656,57 +659,62 @@ static void pcie_misc_config_fixup(struct pcicore_info *pi) /* Needs to happen when coming out of 'standby'/'hibernate' */ static void pcie_war_noplldown(struct pcicore_info *pi) { + struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs; + u16 __iomem *reg16; + /* turn off serdes PLL down */ - ai_cc_reg(pi->sih, offsetof(struct chipcregs, chipcontrol), - CHIPCTRL_4321_PLL_DOWN, CHIPCTRL_4321_PLL_DOWN); + ai_corereg(pi->sih, SI_CC_IDX, offsetof(struct chipcregs, chipcontrol), + CHIPCTRL_4321_PLL_DOWN, CHIPCTRL_4321_PLL_DOWN); /* clear srom shadow backdoor */ - bcma_write16(pi->core, PCIEREGOFFS(sprom[SRSH_BD_OFFSET]), 0); + reg16 = &pcieregs->sprom[SRSH_BD_OFFSET]; + W_REG(reg16, 0); } /* Needs to happen when coming out of 'standby'/'hibernate' */ static void pcie_war_pci_setup(struct pcicore_info *pi) { struct si_pub *sih = pi->sih; + struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs; u32 w; - if (ai_get_buscorerev(sih) == 0 || ai_get_buscorerev(sih) == 1) { - w = pcie_readreg(pi->core, PCIE_PCIEREGS, + if (sih->buscorerev == 0 || sih->buscorerev == 1) { + w = pcie_readreg(pcieregs, PCIE_PCIEREGS, PCIE_TLP_WORKAROUNDSREG); w |= 0x8; - pcie_writereg(pi->core, PCIE_PCIEREGS, + pcie_writereg(pcieregs, PCIE_PCIEREGS, PCIE_TLP_WORKAROUNDSREG, w); } - if (ai_get_buscorerev(sih) == 1) { - w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_LCREG); + if (sih->buscorerev == 1) { + w = pcie_readreg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_LCREG); w |= 0x40; - pcie_writereg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_LCREG, w); + pcie_writereg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_LCREG, w); } - if (ai_get_buscorerev(sih) == 0) { + if (sih->buscorerev == 0) { pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_TIMER1, 0x8128); pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CDR, 0x0100); pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CDRBW, 0x1466); } else if (PCIE_ASPM(sih)) { /* Change the L1 threshold for better performance */ - w = pcie_readreg(pi->core, PCIE_PCIEREGS, + w = pcie_readreg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG); w &= ~PCIE_L1THRESHOLDTIME_MASK; w |= PCIE_L1THRESHOLD_WARVAL << PCIE_L1THRESHOLDTIME_SHIFT; - pcie_writereg(pi->core, PCIE_PCIEREGS, + pcie_writereg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG, w); pcie_war_serdes(pi); pcie_war_aspm_clkreq(pi); - } else if (ai_get_buscorerev(pi->sih) == 7) + } else if (pi->sih->buscorerev == 7) pcie_war_noplldown(pi); /* Note that the fix is actually in the SROM, * that's why this is open-ended */ - if (ai_get_buscorerev(pi->sih) >= 6) + if (pi->sih->buscorerev >= 6) pcie_misc_config_fixup(pi); } @@ -737,7 +745,7 @@ void pcicore_attach(struct pcicore_info *pi, int state) void pcicore_hwup(struct pcicore_info *pi) { - if (!pi || ai_get_buscoretype(pi->sih) != PCIE_CORE_ID) + if (!pi || pi->sih->buscoretype != PCIE_CORE_ID) return; pcie_war_pci_setup(pi); @@ -745,7 +753,7 @@ void pcicore_hwup(struct pcicore_info *pi) void pcicore_up(struct pcicore_info *pi, int state) { - if (!pi || ai_get_buscoretype(pi->sih) != PCIE_CORE_ID) + if (!pi || pi->sih->buscoretype != PCIE_CORE_ID) return; /* Restore L1 timer for better performance */ @@ -773,7 +781,7 @@ void pcicore_sleep(struct pcicore_info *pi) void pcicore_down(struct pcicore_info *pi, int state) { - if (!pi || ai_get_buscoretype(pi->sih) != PCIE_CORE_ID) + if (!pi || pi->sih->buscoretype != PCIE_CORE_ID) return; pcie_clkreq_upd(pi, state); @@ -782,45 +790,46 @@ void pcicore_down(struct pcicore_info *pi, int state) pcie_extendL1timer(pi, false); } -void pcicore_fixcfg(struct pcicore_info *pi) +/* precondition: current core is sii->buscoretype */ +static void pcicore_fixcfg(struct pcicore_info *pi, u16 __iomem *reg16) { - struct bcma_device *core = pi->core; + struct si_info *sii = (struct si_info *)(pi->sih); u16 val16; - uint regoff; - - switch (pi->core->id.id) { - case BCMA_CORE_PCI: - regoff = PCIREGOFFS(sprom[SRSH_PI_OFFSET]); - break; - - case BCMA_CORE_PCIE: - regoff = PCIEREGOFFS(sprom[SRSH_PI_OFFSET]); - break; - - default: - return; - } + uint pciidx; - val16 = bcma_read16(pi->core, regoff); - if (((val16 & SRSH_PI_MASK) >> SRSH_PI_SHIFT) != - (u16)core->core_index) { - val16 = ((u16)core->core_index << SRSH_PI_SHIFT) | + pciidx = ai_coreidx(&sii->pub); + val16 = R_REG(reg16); + if (((val16 & SRSH_PI_MASK) >> SRSH_PI_SHIFT) != (u16)pciidx) { + val16 = (u16)(pciidx << SRSH_PI_SHIFT) | (val16 & ~SRSH_PI_MASK); - bcma_write16(pi->core, regoff, val16); + W_REG(reg16, val16); } } +void +pcicore_fixcfg_pci(struct pcicore_info *pi, struct sbpciregs __iomem *pciregs) +{ + pcicore_fixcfg(pi, &pciregs->sprom[SRSH_PI_OFFSET]); +} + +void pcicore_fixcfg_pcie(struct pcicore_info *pi, + struct sbpcieregs __iomem *pcieregs) +{ + pcicore_fixcfg(pi, &pcieregs->sprom[SRSH_PI_OFFSET]); +} + /* precondition: current core is pci core */ void -pcicore_pci_setup(struct pcicore_info *pi) +pcicore_pci_setup(struct pcicore_info *pi, struct sbpciregs __iomem *pciregs) { - bcma_set32(pi->core, PCIREGOFFS(sbtopci2), - SBTOPCI_PREF | SBTOPCI_BURST); - - if (pi->core->id.rev >= 11) { - bcma_set32(pi->core, PCIREGOFFS(sbtopci2), - SBTOPCI_RC_READMULTI); - bcma_set32(pi->core, PCIREGOFFS(clkrun), PCI_CLKRUN_DSBL); - (void)bcma_read32(pi->core, PCIREGOFFS(clkrun)); + u32 w; + + OR_REG(&pciregs->sbtopci2, SBTOPCI_PREF | SBTOPCI_BURST); + + if (((struct si_info *)(pi->sih))->pub.buscorerev >= 11) { + OR_REG(&pciregs->sbtopci2, SBTOPCI_RC_READMULTI); + w = R_REG(&pciregs->clkrun); + W_REG(&pciregs->clkrun, w | PCI_CLKRUN_DSBL); + w = R_REG(&pciregs->clkrun); } } diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h b/trunk/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h index 9fc3ead540a8..58aa80dc3329 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h +++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h @@ -62,7 +62,8 @@ struct sbpciregs; struct sbpcieregs; extern struct pcicore_info *pcicore_init(struct si_pub *sih, - struct bcma_device *core); + struct pci_dev *pdev, + void __iomem *regs); extern void pcicore_deinit(struct pcicore_info *pch); extern void pcicore_attach(struct pcicore_info *pch, int state); extern void pcicore_hwup(struct pcicore_info *pch); @@ -71,7 +72,11 @@ extern void pcicore_sleep(struct pcicore_info *pch); extern void pcicore_down(struct pcicore_info *pch, int state); extern u8 pcicore_find_pci_capability(struct pci_dev *dev, u8 req_cap_id, unsigned char *buf, u32 *buflen); -extern void pcicore_fixcfg(struct pcicore_info *pch); -extern void pcicore_pci_setup(struct pcicore_info *pch); +extern void pcicore_fixcfg_pci(struct pcicore_info *pch, + struct sbpciregs __iomem *pciregs); +extern void pcicore_fixcfg_pcie(struct pcicore_info *pch, + struct sbpcieregs __iomem *pciregs); +extern void pcicore_pci_setup(struct pcicore_info *pch, + struct sbpciregs __iomem *pciregs); #endif /* _BRCM_NICPCI_H_ */ diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/otp.c b/trunk/drivers/net/wireless/brcm80211/brcmsmac/otp.c index f1ca12625860..edf551561fd8 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/otp.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/otp.c @@ -77,7 +77,7 @@ struct otp_fn_s { }; struct otpinfo { - struct bcma_device *core; /* chipc core */ + uint ccrev; /* chipc revision */ const struct otp_fn_s *fn; /* OTP functions */ struct si_pub *sih; /* Saved sb handle */ @@ -133,10 +133,9 @@ struct otpinfo { #define OTP_SZ_FU_144 (144/8) /* 144 bits */ static u16 -ipxotp_otpr(struct otpinfo *oi, uint wn) +ipxotp_otpr(struct otpinfo *oi, struct chipcregs __iomem *cc, uint wn) { - return bcma_read16(oi->core, - CHIPCREGOFFS(sromotp[wn])); + return R_REG(&cc->sromotp[wn]); } /* @@ -147,7 +146,7 @@ static int ipxotp_max_rgnsz(struct si_pub *sih, int osizew) { int ret = 0; - switch (ai_get_chip_id(sih)) { + switch (sih->chip) { case BCM43224_CHIP_ID: case BCM43225_CHIP_ID: ret = osizew * 2 - OTP_SZ_FU_72 - OTP_SZ_CHECKSUM; @@ -162,21 +161,19 @@ static int ipxotp_max_rgnsz(struct si_pub *sih, int osizew) return ret; } -static void _ipxotp_init(struct otpinfo *oi) +static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc) { uint k; u32 otpp, st; - int ccrev = ai_get_ccrev(oi->sih); - /* * record word offset of General Use Region * for various chipcommon revs */ - if (ccrev == 21 || ccrev == 24 - || ccrev == 27) { + if (oi->sih->ccrev == 21 || oi->sih->ccrev == 24 + || oi->sih->ccrev == 27) { oi->otpgu_base = REVA4_OTPGU_BASE; - } else if (ccrev == 36) { + } else if (oi->sih->ccrev == 36) { /* * OTP size greater than equal to 2KB (128 words), * otpgu_base is similar to rev23 @@ -185,7 +182,7 @@ static void _ipxotp_init(struct otpinfo *oi) oi->otpgu_base = REVB8_OTPGU_BASE; else oi->otpgu_base = REV36_OTPGU_BASE; - } else if (ccrev == 23 || ccrev >= 25) { + } else if (oi->sih->ccrev == 23 || oi->sih->ccrev >= 25) { oi->otpgu_base = REVB8_OTPGU_BASE; } @@ -193,21 +190,24 @@ static void _ipxotp_init(struct otpinfo *oi) otpp = OTPP_START_BUSY | ((OTPPOC_INIT << OTPP_OC_SHIFT) & OTPP_OC_MASK); - bcma_write32(oi->core, CHIPCREGOFFS(otpprog), otpp); - st = bcma_read32(oi->core, CHIPCREGOFFS(otpprog)); - for (k = 0; (st & OTPP_START_BUSY) && (k < OTPP_TRIES); k++) - st = bcma_read32(oi->core, CHIPCREGOFFS(otpprog)); + W_REG(&cc->otpprog, otpp); + for (k = 0; + ((st = R_REG(&cc->otpprog)) & OTPP_START_BUSY) + && (k < OTPP_TRIES); k++) + ; if (k >= OTPP_TRIES) return; /* Read OTP lock bits and subregion programmed indication bits */ - oi->status = bcma_read32(oi->core, CHIPCREGOFFS(otpstatus)); + oi->status = R_REG(&cc->otpstatus); - if ((ai_get_chip_id(oi->sih) == BCM43224_CHIP_ID) - || (ai_get_chip_id(oi->sih) == BCM43225_CHIP_ID)) { + if ((oi->sih->chip == BCM43224_CHIP_ID) + || (oi->sih->chip == BCM43225_CHIP_ID)) { u32 p_bits; - p_bits = (ipxotp_otpr(oi, oi->otpgu_base + OTPGU_P_OFF) & - OTPGU_P_MSK) >> OTPGU_P_SHIFT; + p_bits = + (ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_P_OFF) & + OTPGU_P_MSK) + >> OTPGU_P_SHIFT; oi->status |= (p_bits << OTPS_GUP_SHIFT); } @@ -220,7 +220,7 @@ static void _ipxotp_init(struct otpinfo *oi) oi->hwlim = oi->wsize; if (oi->status & OTPS_GUP_HW) { oi->hwlim = - ipxotp_otpr(oi, oi->otpgu_base + OTPGU_HSB_OFF) / 16; + ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_HSB_OFF) / 16; oi->swbase = oi->hwlim; } else oi->swbase = oi->hwbase; @@ -230,7 +230,7 @@ static void _ipxotp_init(struct otpinfo *oi) if (oi->status & OTPS_GUP_SW) { oi->swlim = - ipxotp_otpr(oi, oi->otpgu_base + OTPGU_SFB_OFF) / 16; + ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_SFB_OFF) / 16; oi->fbase = oi->swlim; } else oi->fbase = oi->swbase; @@ -240,8 +240,11 @@ static void _ipxotp_init(struct otpinfo *oi) static int ipxotp_init(struct si_pub *sih, struct otpinfo *oi) { + uint idx; + struct chipcregs __iomem *cc; + /* Make sure we're running IPX OTP */ - if (!OTPTYPE_IPX(ai_get_ccrev(sih))) + if (!OTPTYPE_IPX(sih->ccrev)) return -EBADE; /* Make sure OTP is not disabled */ @@ -249,7 +252,7 @@ static int ipxotp_init(struct si_pub *sih, struct otpinfo *oi) return -EBADE; /* Check for otp size */ - switch ((ai_get_cccaps(sih) & CC_CAP_OTPSIZE) >> CC_CAP_OTPSIZE_SHIFT) { + switch ((sih->cccaps & CC_CAP_OTPSIZE) >> CC_CAP_OTPSIZE_SHIFT) { case 0: /* Nothing there */ return -EBADE; @@ -279,13 +282,21 @@ static int ipxotp_init(struct si_pub *sih, struct otpinfo *oi) } /* Retrieve OTP region info */ - _ipxotp_init(oi); + idx = ai_coreidx(sih); + cc = ai_setcoreidx(sih, SI_CC_IDX); + + _ipxotp_init(oi, cc); + + ai_setcoreidx(sih, idx); + return 0; } static int ipxotp_read_region(struct otpinfo *oi, int region, u16 *data, uint *wlen) { + uint idx; + struct chipcregs __iomem *cc; uint base, i, sz; /* Validate region selection */ @@ -354,10 +365,14 @@ ipxotp_read_region(struct otpinfo *oi, int region, u16 *data, uint *wlen) return -EINVAL; } + idx = ai_coreidx(oi->sih); + cc = ai_setcoreidx(oi->sih, SI_CC_IDX); + /* Read the data */ for (i = 0; i < sz; i++) - data[i] = ipxotp_otpr(oi, base + i); + data[i] = ipxotp_otpr(oi, cc, base + i); + ai_setcoreidx(oi->sih, idx); *wlen = sz; return 0; } @@ -369,13 +384,14 @@ static const struct otp_fn_s ipxotp_fn = { static int otp_init(struct si_pub *sih, struct otpinfo *oi) { + int ret; memset(oi, 0, sizeof(struct otpinfo)); - oi->core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0); + oi->ccrev = sih->ccrev; - if (OTPTYPE_IPX(ai_get_ccrev(sih))) + if (OTPTYPE_IPX(oi->ccrev)) oi->fn = &ipxotp_fn; if (oi->fn == NULL) @@ -383,7 +399,7 @@ static int otp_init(struct si_pub *sih, struct otpinfo *oi) oi->sih = sih; - ret = (oi->fn->init)(sih, oi); + ret = (oi->fn->init) (sih, oi); return ret; } diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c b/trunk/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c index 264f8c4c703d..a3149254cbcd 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c @@ -109,10 +109,10 @@ static const struct chan_info_basic chan_info_all[] = { {204, 5020}, {208, 5040}, {212, 5060}, - {216, 5080} + {216, 50800} }; -static const u8 ofdm_rate_lookup[] = { +const u8 ofdm_rate_lookup[] = { BRCM_RATE_48M, BRCM_RATE_24M, @@ -149,8 +149,9 @@ void wlc_radioreg_enter(struct brcms_phy_pub *pih) void wlc_radioreg_exit(struct brcms_phy_pub *pih) { struct brcms_phy *pi = (struct brcms_phy *) pih; + u16 dummy; - (void)bcma_read16(pi->d11core, D11REGOFFS(phyversion)); + dummy = R_REG(&pi->regs->phyversion); pi->phy_wreg = 0; wlapi_bmac_mctrl(pi->sh->physhim, MCTL_LOCK_RADIO, 0); } @@ -185,11 +186,19 @@ u16 read_radio_reg(struct brcms_phy *pi, u16 addr) if ((D11REV_GE(pi->sh->corerev, 24)) || (D11REV_IS(pi->sh->corerev, 22) && (pi->pubpi.phy_type != PHY_TYPE_SSN))) { - bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), addr); - data = bcma_read16(pi->d11core, D11REGOFFS(radioregdata)); + W_REG_FLUSH(&pi->regs->radioregaddr, addr); + data = R_REG(&pi->regs->radioregdata); } else { - bcma_wflush16(pi->d11core, D11REGOFFS(phy4waddr), addr); - data = bcma_read16(pi->d11core, D11REGOFFS(phy4wdatalo)); + W_REG_FLUSH(&pi->regs->phy4waddr, addr); + +#ifdef __ARM_ARCH_4T__ + __asm__(" .align 4 "); + __asm__(" nop "); + data = R_REG(&pi->regs->phy4wdatalo); +#else + data = R_REG(&pi->regs->phy4wdatalo); +#endif + } pi->phy_wreg = 0; @@ -202,15 +211,15 @@ void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val) (D11REV_IS(pi->sh->corerev, 22) && (pi->pubpi.phy_type != PHY_TYPE_SSN))) { - bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), addr); - bcma_write16(pi->d11core, D11REGOFFS(radioregdata), val); + W_REG_FLUSH(&pi->regs->radioregaddr, addr); + W_REG(&pi->regs->radioregdata, val); } else { - bcma_wflush16(pi->d11core, D11REGOFFS(phy4waddr), addr); - bcma_write16(pi->d11core, D11REGOFFS(phy4wdatalo), val); + W_REG_FLUSH(&pi->regs->phy4waddr, addr); + W_REG(&pi->regs->phy4wdatalo, val); } if (++pi->phy_wreg >= pi->phy_wreg_limit) { - (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol)); + (void)R_REG(&pi->regs->maccontrol); pi->phy_wreg = 0; } } @@ -222,20 +231,19 @@ static u32 read_radio_id(struct brcms_phy *pi) if (D11REV_GE(pi->sh->corerev, 24)) { u32 b0, b1, b2; - bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), 0); - b0 = (u32) bcma_read16(pi->d11core, D11REGOFFS(radioregdata)); - bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), 1); - b1 = (u32) bcma_read16(pi->d11core, D11REGOFFS(radioregdata)); - bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), 2); - b2 = (u32) bcma_read16(pi->d11core, D11REGOFFS(radioregdata)); + W_REG_FLUSH(&pi->regs->radioregaddr, 0); + b0 = (u32) R_REG(&pi->regs->radioregdata); + W_REG_FLUSH(&pi->regs->radioregaddr, 1); + b1 = (u32) R_REG(&pi->regs->radioregdata); + W_REG_FLUSH(&pi->regs->radioregaddr, 2); + b2 = (u32) R_REG(&pi->regs->radioregdata); id = ((b0 & 0xf) << 28) | (((b2 << 8) | b1) << 12) | ((b0 >> 4) & 0xf); } else { - bcma_wflush16(pi->d11core, D11REGOFFS(phy4waddr), RADIO_IDCODE); - id = (u32) bcma_read16(pi->d11core, D11REGOFFS(phy4wdatalo)); - id |= (u32) bcma_read16(pi->d11core, - D11REGOFFS(phy4wdatahi)) << 16; + W_REG_FLUSH(&pi->regs->phy4waddr, RADIO_IDCODE); + id = (u32) R_REG(&pi->regs->phy4wdatalo); + id |= (u32) R_REG(&pi->regs->phy4wdatahi) << 16; } pi->phy_wreg = 0; return id; @@ -275,52 +283,75 @@ void mod_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val) void write_phy_channel_reg(struct brcms_phy *pi, uint val) { - bcma_write16(pi->d11core, D11REGOFFS(phychannel), val); + W_REG(&pi->regs->phychannel, val); } u16 read_phy_reg(struct brcms_phy *pi, u16 addr) { - bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr); + struct d11regs __iomem *regs; + + regs = pi->regs; + + W_REG_FLUSH(®s->phyregaddr, addr); pi->phy_wreg = 0; - return bcma_read16(pi->d11core, D11REGOFFS(phyregdata)); + return R_REG(®s->phyregdata); } void write_phy_reg(struct brcms_phy *pi, u16 addr, u16 val) { + struct d11regs __iomem *regs; + + regs = pi->regs; + #ifdef CONFIG_BCM47XX - bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr); - bcma_write16(pi->d11core, D11REGOFFS(phyregdata), val); + W_REG_FLUSH(®s->phyregaddr, addr); + W_REG(®s->phyregdata, val); if (addr == 0x72) - (void)bcma_read16(pi->d11core, D11REGOFFS(phyversion)); + (void)R_REG(®s->phyregdata); #else - bcma_write32(pi->d11core, D11REGOFFS(phyregaddr), addr | (val << 16)); + W_REG((u32 __iomem *)(®s->phyregaddr), addr | (val << 16)); if (++pi->phy_wreg >= pi->phy_wreg_limit) { pi->phy_wreg = 0; - (void)bcma_read16(pi->d11core, D11REGOFFS(phyversion)); + (void)R_REG(®s->phyversion); } #endif } void and_phy_reg(struct brcms_phy *pi, u16 addr, u16 val) { - bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr); - bcma_mask16(pi->d11core, D11REGOFFS(phyregdata), val); + struct d11regs __iomem *regs; + + regs = pi->regs; + + W_REG_FLUSH(®s->phyregaddr, addr); + + W_REG(®s->phyregdata, (R_REG(®s->phyregdata) & val)); pi->phy_wreg = 0; } void or_phy_reg(struct brcms_phy *pi, u16 addr, u16 val) { - bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr); - bcma_set16(pi->d11core, D11REGOFFS(phyregdata), val); + struct d11regs __iomem *regs; + + regs = pi->regs; + + W_REG_FLUSH(®s->phyregaddr, addr); + + W_REG(®s->phyregdata, (R_REG(®s->phyregdata) | val)); pi->phy_wreg = 0; } void mod_phy_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val) { - val &= mask; - bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr); - bcma_maskset16(pi->d11core, D11REGOFFS(phyregdata), ~mask, val); + struct d11regs __iomem *regs; + + regs = pi->regs; + + W_REG_FLUSH(®s->phyregaddr, addr); + + W_REG(®s->phyregdata, + ((R_REG(®s->phyregdata) & ~mask) | (val & mask))); pi->phy_wreg = 0; } @@ -381,8 +412,10 @@ struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp) sh->sromrev = shp->sromrev; sh->boardtype = shp->boardtype; sh->boardrev = shp->boardrev; + sh->boardvendor = shp->boardvendor; sh->boardflags = shp->boardflags; sh->boardflags2 = shp->boardflags2; + sh->buscorerev = shp->buscorerev; sh->fast_timer = PHY_SW_TIMER_FAST; sh->slow_timer = PHY_SW_TIMER_SLOW; @@ -425,7 +458,7 @@ static u32 wlc_phy_get_radio_ver(struct brcms_phy *pi) } struct brcms_phy_pub * -wlc_phy_attach(struct shared_phy *sh, struct bcma_device *d11core, +wlc_phy_attach(struct shared_phy *sh, struct d11regs __iomem *regs, int bandtype, struct wiphy *wiphy) { struct brcms_phy *pi; @@ -437,7 +470,7 @@ wlc_phy_attach(struct shared_phy *sh, struct bcma_device *d11core, if (D11REV_IS(sh->corerev, 4)) sflags = SISF_2G_PHY | SISF_5G_PHY; else - sflags = bcma_aread32(d11core, BCMA_IOST); + sflags = ai_core_sflags(sh->sih, 0, 0); if (bandtype == BRCM_BAND_5G) { if ((sflags & (SISF_5G_PHY | SISF_DB_PHY)) == 0) @@ -455,7 +488,7 @@ wlc_phy_attach(struct shared_phy *sh, struct bcma_device *d11core, if (pi == NULL) return NULL; pi->wiphy = wiphy; - pi->d11core = d11core; + pi->regs = regs; pi->sh = sh; pi->phy_init_por = true; pi->phy_wreg_limit = PHY_WREG_LIMIT; @@ -470,7 +503,7 @@ wlc_phy_attach(struct shared_phy *sh, struct bcma_device *d11core, pi->pubpi.coreflags = SICF_GMODE; wlapi_bmac_corereset(pi->sh->physhim, pi->pubpi.coreflags); - phyversion = bcma_read16(pi->d11core, D11REGOFFS(phyversion)); + phyversion = R_REG(&pi->regs->phyversion); pi->pubpi.phy_type = PHY_TYPE(phyversion); pi->pubpi.phy_rev = phyversion & PV_PV_MASK; @@ -482,8 +515,8 @@ wlc_phy_attach(struct shared_phy *sh, struct bcma_device *d11core, pi->pubpi.phy_corenum = PHY_CORE_NUM_2; pi->pubpi.ana_rev = (phyversion & PV_AV_MASK) >> PV_AV_SHIFT; - if (pi->pubpi.phy_type != PHY_TYPE_N && - pi->pubpi.phy_type != PHY_TYPE_LCN) + if (!pi->pubpi.phy_type == PHY_TYPE_N && + !pi->pubpi.phy_type == PHY_TYPE_LCN) goto err; if (bandtype == BRCM_BAND_5G) { @@ -754,14 +787,14 @@ void wlc_phy_init(struct brcms_phy_pub *pih, u16 chanspec) pi->radio_chanspec = chanspec; - mc = bcma_read32(pi->d11core, D11REGOFFS(maccontrol)); + mc = R_REG(&pi->regs->maccontrol); if (WARN(mc & MCTL_EN_MAC, "HW error MAC running on init")) return; if (!(pi->measure_hold & PHY_HOLD_FOR_SCAN)) pi->measure_hold |= PHY_HOLD_FOR_NOT_ASSOC; - if (WARN(!(bcma_aread32(pi->d11core, BCMA_IOST) & SISF_FCLKA), + if (WARN(!(ai_core_sflags(pi->sh->sih, 0, 0) & SISF_FCLKA), "HW error SISF_FCLKA\n")) return; @@ -800,8 +833,8 @@ void wlc_phy_cal_init(struct brcms_phy_pub *pih) struct brcms_phy *pi = (struct brcms_phy *) pih; void (*cal_init)(struct brcms_phy *) = NULL; - if (WARN((bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & - MCTL_EN_MAC) != 0, "HW error: MAC enabled during phy cal\n")) + if (WARN((R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC) != 0, + "HW error: MAC enabled during phy cal\n")) return; if (!pi->initialized) { @@ -992,7 +1025,7 @@ wlc_phy_init_radio_regs(struct brcms_phy *pi, void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on) { #define DUMMY_PKT_LEN 20 - struct bcma_device *core = pi->d11core; + struct d11regs __iomem *regs = pi->regs; int i, count; u8 ofdmpkt[DUMMY_PKT_LEN] = { 0xcc, 0x01, 0x02, 0x00, 0x00, 0x00, 0xd4, 0x00, 0x00, 0x00, @@ -1008,28 +1041,26 @@ void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on) wlapi_bmac_write_template_ram(pi->sh->physhim, 0, DUMMY_PKT_LEN, dummypkt); - bcma_write16(core, D11REGOFFS(xmtsel), 0); + W_REG(®s->xmtsel, 0); if (D11REV_GE(pi->sh->corerev, 11)) - bcma_write16(core, D11REGOFFS(wepctl), 0x100); + W_REG(®s->wepctl, 0x100); else - bcma_write16(core, D11REGOFFS(wepctl), 0); + W_REG(®s->wepctl, 0); - bcma_write16(core, D11REGOFFS(txe_phyctl), - (ofdm ? 1 : 0) | PHY_TXC_ANT_0); + W_REG(®s->txe_phyctl, (ofdm ? 1 : 0) | PHY_TXC_ANT_0); if (ISNPHY(pi) || ISLCNPHY(pi)) - bcma_write16(core, D11REGOFFS(txe_phyctl1), 0x1A02); + W_REG(®s->txe_phyctl1, 0x1A02); - bcma_write16(core, D11REGOFFS(txe_wm_0), 0); - bcma_write16(core, D11REGOFFS(txe_wm_1), 0); + W_REG(®s->txe_wm_0, 0); + W_REG(®s->txe_wm_1, 0); - bcma_write16(core, D11REGOFFS(xmttplatetxptr), 0); - bcma_write16(core, D11REGOFFS(xmttxcnt), DUMMY_PKT_LEN); + W_REG(®s->xmttplatetxptr, 0); + W_REG(®s->xmttxcnt, DUMMY_PKT_LEN); - bcma_write16(core, D11REGOFFS(xmtsel), - ((8 << 8) | (1 << 5) | (1 << 2) | 2)); + W_REG(®s->xmtsel, ((8 << 8) | (1 << 5) | (1 << 2) | 2)); - bcma_write16(core, D11REGOFFS(txe_ctl), 0); + W_REG(®s->txe_ctl, 0); if (!pa_on) { if (ISNPHY(pi)) @@ -1037,28 +1068,27 @@ void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on) } if (ISNPHY(pi) || ISLCNPHY(pi)) - bcma_write16(core, D11REGOFFS(txe_aux), 0xD0); + W_REG(®s->txe_aux, 0xD0); else - bcma_write16(core, D11REGOFFS(txe_aux), ((1 << 5) | (1 << 4))); + W_REG(®s->txe_aux, ((1 << 5) | (1 << 4))); - (void)bcma_read16(core, D11REGOFFS(txe_aux)); + (void)R_REG(®s->txe_aux); i = 0; count = ofdm ? 30 : 250; while ((i++ < count) - && (bcma_read16(core, D11REGOFFS(txe_status)) & (1 << 7))) + && (R_REG(®s->txe_status) & (1 << 7))) udelay(10); i = 0; - while ((i++ < 10) && - ((bcma_read16(core, D11REGOFFS(txe_status)) & (1 << 10)) == 0)) + while ((i++ < 10) + && ((R_REG(®s->txe_status) & (1 << 10)) == 0)) udelay(10); i = 0; - while ((i++ < 10) && - ((bcma_read16(core, D11REGOFFS(ifsstat)) & (1 << 8)))) + while ((i++ < 10) && ((R_REG(®s->ifsstat) & (1 << 8)))) udelay(10); if (!pa_on) { @@ -1115,7 +1145,7 @@ static bool wlc_phy_cal_txpower_recalc_sw(struct brcms_phy *pi) void wlc_phy_switch_radio(struct brcms_phy_pub *pih, bool on) { struct brcms_phy *pi = (struct brcms_phy *) pih; - (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol)); + (void)R_REG(&pi->regs->maccontrol); if (ISNPHY(pi)) { wlc_phy_switch_radio_nphy(pi, on); @@ -1355,7 +1385,7 @@ void wlc_phy_txpower_target_set(struct brcms_phy_pub *ppi, memcpy(&pi->tx_user_target[TXP_FIRST_MCS_40_SDM], &txpwr->mcs_40_mimo[0], BRCMS_NUM_RATES_MCS_2_STREAM); - if (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & MCTL_EN_MAC) + if (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC) mac_enabled = true; if (mac_enabled) @@ -1385,8 +1415,7 @@ int wlc_phy_txpower_set(struct brcms_phy_pub *ppi, uint qdbm, bool override) if (!SCAN_INPROG_PHY(pi)) { bool suspend; - suspend = (0 == (bcma_read32(pi->d11core, - D11REGOFFS(maccontrol)) & + suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); if (!suspend) @@ -1839,17 +1868,18 @@ void wlc_phy_runbist_config(struct brcms_phy_pub *ppi, bool start_end) if (NREV_IS(pi->pubpi.phy_rev, 3) || NREV_IS(pi->pubpi.phy_rev, 4)) { - bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), - 0xa0); - bcma_set16(pi->d11core, D11REGOFFS(phyregdata), - 0x1 << 15); + W_REG(&pi->regs->phyregaddr, 0xa0); + (void)R_REG(&pi->regs->phyregaddr); + rxc = R_REG(&pi->regs->phyregdata); + W_REG(&pi->regs->phyregdata, + (0x1 << 15) | rxc); } } else { if (NREV_IS(pi->pubpi.phy_rev, 3) || NREV_IS(pi->pubpi.phy_rev, 4)) { - bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), - 0xa0); - bcma_write16(pi->d11core, D11REGOFFS(phyregdata), rxc); + W_REG(&pi->regs->phyregaddr, 0xa0); + (void)R_REG(&pi->regs->phyregaddr); + W_REG(&pi->regs->phyregdata, rxc); } wlc_phy_por_inform(ppi); @@ -1969,9 +1999,7 @@ void wlc_phy_txpower_hw_ctrl_set(struct brcms_phy_pub *ppi, bool hwpwrctrl) pi->txpwrctrl = hwpwrctrl; if (ISNPHY(pi)) { - suspend = (0 == (bcma_read32(pi->d11core, - D11REGOFFS(maccontrol)) & - MCTL_EN_MAC)); + suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); if (!suspend) wlapi_suspend_mac_and_wait(pi->sh->physhim); @@ -2173,8 +2201,7 @@ void wlc_phy_ant_rxdiv_set(struct brcms_phy_pub *ppi, u8 val) if (!pi->sh->clk) return; - suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & - MCTL_EN_MAC)); + suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); if (!suspend) wlapi_suspend_mac_and_wait(pi->sh->physhim); @@ -2392,8 +2419,8 @@ wlc_phy_noise_sample_request(struct brcms_phy_pub *pih, u8 reason, u8 ch) wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP2, 0); wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP3, 0); - bcma_set32(pi->d11core, D11REGOFFS(maccommand), - MCMD_BG_NOISE); + OR_REG(&pi->regs->maccommand, + MCMD_BG_NOISE); } else { wlapi_suspend_mac_and_wait(pi->sh->physhim); wlc_lcnphy_deaf_mode(pi, (bool) 0); @@ -2411,8 +2438,8 @@ wlc_phy_noise_sample_request(struct brcms_phy_pub *pih, u8 reason, u8 ch) wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP2, 0); wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP3, 0); - bcma_set32(pi->d11core, D11REGOFFS(maccommand), - MCMD_BG_NOISE); + OR_REG(&pi->regs->maccommand, + MCMD_BG_NOISE); } else { struct phy_iq_est est[PHY_CORE_MAX]; u32 cmplx_pwr[PHY_CORE_MAX]; @@ -2905,29 +2932,29 @@ void wlc_lcnphy_epa_switch(struct brcms_phy *pi, bool mode) mod_phy_reg(pi, 0x44c, (0x1 << 2), (1) << 2); } - ai_cc_reg(pi->sh->sih, - offsetof(struct chipcregs, gpiocontrol), - ~0x0, 0x0); - ai_cc_reg(pi->sh->sih, - offsetof(struct chipcregs, gpioout), - 0x40, 0x40); - ai_cc_reg(pi->sh->sih, - offsetof(struct chipcregs, gpioouten), - 0x40, 0x40); + ai_corereg(pi->sh->sih, SI_CC_IDX, + offsetof(struct chipcregs, gpiocontrol), + ~0x0, 0x0); + ai_corereg(pi->sh->sih, SI_CC_IDX, + offsetof(struct chipcregs, gpioout), 0x40, + 0x40); + ai_corereg(pi->sh->sih, SI_CC_IDX, + offsetof(struct chipcregs, gpioouten), 0x40, + 0x40); } else { mod_phy_reg(pi, 0x44c, (0x1 << 2), (0) << 2); mod_phy_reg(pi, 0x44d, (0x1 << 2), (0) << 2); - ai_cc_reg(pi->sh->sih, - offsetof(struct chipcregs, gpioout), - 0x40, 0x00); - ai_cc_reg(pi->sh->sih, - offsetof(struct chipcregs, gpioouten), - 0x40, 0x0); - ai_cc_reg(pi->sh->sih, - offsetof(struct chipcregs, gpiocontrol), - ~0x0, 0x40); + ai_corereg(pi->sh->sih, SI_CC_IDX, + offsetof(struct chipcregs, gpioout), 0x40, + 0x00); + ai_corereg(pi->sh->sih, SI_CC_IDX, + offsetof(struct chipcregs, gpioouten), 0x40, + 0x0); + ai_corereg(pi->sh->sih, SI_CC_IDX, + offsetof(struct chipcregs, gpiocontrol), + ~0x0, 0x40); } } } diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h b/trunk/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h index e34a71e7d242..96e15163222b 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h +++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h @@ -166,6 +166,7 @@ struct shared_phy_params { struct phy_shim_info *physhim; uint unit; uint corerev; + uint buscorerev; u16 vid; u16 did; uint chip; @@ -174,6 +175,7 @@ struct shared_phy_params { uint sromrev; uint boardtype; uint boardrev; + uint boardvendor; u32 boardflags; u32 boardflags2; }; @@ -181,7 +183,7 @@ struct shared_phy_params { extern struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp); extern struct brcms_phy_pub *wlc_phy_attach(struct shared_phy *sh, - struct bcma_device *d11core, + struct d11regs __iomem *regs, int bandtype, struct wiphy *wiphy); extern void wlc_phy_detach(struct brcms_phy_pub *ppi); diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/trunk/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h index af00e2c2b266..bea85241a244 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h +++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h @@ -503,8 +503,10 @@ struct shared_phy { uint sromrev; uint boardtype; uint boardrev; + uint boardvendor; u32 boardflags; u32 boardflags2; + uint buscorerev; uint fast_timer; uint slow_timer; uint glacial_timer; @@ -557,7 +559,7 @@ struct brcms_phy { } u; bool user_txpwr_at_rfport; - struct bcma_device *d11core; + struct d11regs __iomem *regs; struct brcms_phy *next; struct brcms_phy_pub pubpi; @@ -772,6 +774,11 @@ struct brcms_phy { s16 nphy_noise_win[PHY_CORE_MAX][PHY_NOISE_WINDOW_SZ]; u8 nphy_noise_index; + u8 nphy_txpid2g[PHY_CORE_NUM_2]; + u8 nphy_txpid5g[PHY_CORE_NUM_2]; + u8 nphy_txpid5gl[PHY_CORE_NUM_2]; + u8 nphy_txpid5gh[PHY_CORE_NUM_2]; + bool nphy_gain_boost; bool nphy_elna_gain_config; u16 old_bphy_test; @@ -1088,7 +1095,7 @@ extern void wlc_phy_table_write_nphy(struct brcms_phy *pi, u32, u32, u32, #define BRCMS_PHY_WAR_PR51571(pi) \ if (NREV_LT((pi)->pubpi.phy_rev, 3)) \ - (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) + (void)R_REG(&(pi)->regs->maccontrol) extern void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype); extern void wlc_phy_aci_reset_nphy(struct brcms_phy *pi); diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/trunk/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c index ce8562aa5db0..a63aa99d9810 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c @@ -1603,7 +1603,7 @@ wlc_lcnphy_set_chanspec_tweaks(struct brcms_phy *pi, u16 chanspec) si_pmu_pllupd(pi->sh->sih); write_phy_reg(pi, 0x942, 0); wlc_lcnphy_txrx_spur_avoidance_mode(pi, false); - pi_lcn->lcnphy_spurmod = false; + pi_lcn->lcnphy_spurmod = 0; mod_phy_reg(pi, 0x424, (0xff << 8), (0x1b) << 8); write_phy_reg(pi, 0x425, 0x5907); @@ -1616,7 +1616,7 @@ wlc_lcnphy_set_chanspec_tweaks(struct brcms_phy *pi, u16 chanspec) write_phy_reg(pi, 0x942, 0); wlc_lcnphy_txrx_spur_avoidance_mode(pi, true); - pi_lcn->lcnphy_spurmod = false; + pi_lcn->lcnphy_spurmod = 0; mod_phy_reg(pi, 0x424, (0xff << 8), (0x1f) << 8); write_phy_reg(pi, 0x425, 0x590a); @@ -2325,7 +2325,7 @@ static s8 wlc_lcnphy_tempcompensated_txpwrctrl(struct brcms_phy *pi) { s8 index, delta_brd, delta_temp, new_index, tempcorrx; s16 manp, meas_temp, temp_diff; - bool neg = false; + bool neg = 0; u16 temp; struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy; @@ -2348,7 +2348,7 @@ static s8 wlc_lcnphy_tempcompensated_txpwrctrl(struct brcms_phy *pi) manp = LCNPHY_TEMPSENSE(pi_lcn->lcnphy_rawtempsense); temp_diff = manp - meas_temp; if (temp_diff < 0) { - neg = true; + neg = 1; temp_diff = -temp_diff; } @@ -2813,8 +2813,10 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi) u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10; u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4; idleTssi = read_phy_reg(pi, 0x4ab); - suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & - MCTL_EN_MAC)); + suspend = + (0 == + (R_REG(&((struct brcms_phy *) pi)->regs->maccontrol) & + MCTL_EN_MAC)); if (!suspend) wlapi_suspend_mac_and_wait(pi->sh->physhim); wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF); @@ -2888,8 +2890,7 @@ static void wlc_lcnphy_vbat_temp_sense_setup(struct brcms_phy *pi, u8 mode) for (i = 0; i < 14; i++) values_to_save[i] = read_phy_reg(pi, tempsense_phy_regs[i]); - suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & - MCTL_EN_MAC)); + suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); if (!suspend) wlapi_suspend_mac_and_wait(pi->sh->physhim); save_txpwrCtrlEn = read_radio_reg(pi, 0x4a4); @@ -3015,8 +3016,8 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi) bool suspend; struct brcms_phy *pi = (struct brcms_phy *) ppi; - suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & - MCTL_EN_MAC)); + suspend = + (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); if (!suspend) wlapi_suspend_mac_and_wait(pi->sh->physhim); @@ -3534,17 +3535,15 @@ wlc_lcnphy_samp_cap(struct brcms_phy *pi, int clip_detect_algo, u16 thresh, timer = 0; old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da); - curval1 = bcma_read16(pi->d11core, D11REGOFFS(psm_corectlsts)); + curval1 = R_REG(&pi->regs->psm_corectlsts); ptr[130] = 0; - bcma_write16(pi->d11core, D11REGOFFS(psm_corectlsts), - ((1 << 6) | curval1)); + W_REG(&pi->regs->psm_corectlsts, ((1 << 6) | curval1)); - bcma_write16(pi->d11core, D11REGOFFS(smpl_clct_strptr), 0x7E00); - bcma_write16(pi->d11core, D11REGOFFS(smpl_clct_stpptr), 0x8000); + W_REG(&pi->regs->smpl_clct_strptr, 0x7E00); + W_REG(&pi->regs->smpl_clct_stpptr, 0x8000); udelay(20); - curval2 = bcma_read16(pi->d11core, D11REGOFFS(psm_phy_hdr_param)); - bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), - curval2 | 0x30); + curval2 = R_REG(&pi->regs->psm_phy_hdr_param); + W_REG(&pi->regs->psm_phy_hdr_param, curval2 | 0x30); write_phy_reg(pi, 0x555, 0x0); write_phy_reg(pi, 0x5a6, 0x5); @@ -3561,19 +3560,19 @@ wlc_lcnphy_samp_cap(struct brcms_phy *pi, int clip_detect_algo, u16 thresh, sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da); write_phy_reg(pi, 0x6da, (u32) (sslpnCalibClkEnCtrl | 0x2008)); - stpptr = bcma_read16(pi->d11core, D11REGOFFS(smpl_clct_stpptr)); - curptr = bcma_read16(pi->d11core, D11REGOFFS(smpl_clct_curptr)); + stpptr = R_REG(&pi->regs->smpl_clct_stpptr); + curptr = R_REG(&pi->regs->smpl_clct_curptr); do { udelay(10); - curptr = bcma_read16(pi->d11core, D11REGOFFS(smpl_clct_curptr)); + curptr = R_REG(&pi->regs->smpl_clct_curptr); timer++; } while ((curptr != stpptr) && (timer < 500)); - bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), 0x2); + W_REG(&pi->regs->psm_phy_hdr_param, 0x2); strptr = 0x7E00; - bcma_write32(pi->d11core, D11REGOFFS(tplatewrptr), strptr); + W_REG(&pi->regs->tplatewrptr, strptr); while (strptr < 0x8000) { - val = bcma_read32(pi->d11core, D11REGOFFS(tplatewrdata)); + val = R_REG(&pi->regs->tplatewrdata); imag = ((val >> 16) & 0x3ff); real = ((val) & 0x3ff); if (imag > 511) @@ -3598,8 +3597,8 @@ wlc_lcnphy_samp_cap(struct brcms_phy *pi, int clip_detect_algo, u16 thresh, } write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl); - bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), curval2); - bcma_write16(pi->d11core, D11REGOFFS(psm_corectlsts), curval1); + W_REG(&pi->regs->psm_phy_hdr_param, curval2); + W_REG(&pi->regs->psm_corectlsts, curval1); } static void @@ -3682,8 +3681,8 @@ wlc_lcnphy_a1(struct brcms_phy *pi, int cal_type, int num_levels, wlc_lcnphy_set_cc(pi, cal_type, phy_c15, phy_c16); udelay(20); for (phy_c8 = 0; phy_c7 != 0 && phy_c8 < num_levels; phy_c8++) { - phy_c23 = true; - phy_c22 = false; + phy_c23 = 1; + phy_c22 = 0; switch (cal_type) { case 0: phy_c10 = 511; @@ -3701,18 +3700,18 @@ wlc_lcnphy_a1(struct brcms_phy *pi, int cal_type, int num_levels, phy_c9 = read_phy_reg(pi, 0x93d); phy_c9 = 2 * phy_c9; - phy_c24 = false; + phy_c24 = 0; phy_c5 = 7; - phy_c25 = true; + phy_c25 = 1; while (1) { write_radio_reg(pi, RADIO_2064_REG026, (phy_c5 & 0x7) | ((phy_c5 & 0x7) << 4)); udelay(50); - phy_c22 = false; + phy_c22 = 0; ptr[130] = 0; wlc_lcnphy_samp_cap(pi, 1, phy_c9, &ptr[0], 2); if (ptr[130] == 1) - phy_c22 = true; + phy_c22 = 1; if (phy_c22) phy_c5 -= 1; if ((phy_c22 != phy_c24) && (!phy_c25)) @@ -3722,7 +3721,7 @@ wlc_lcnphy_a1(struct brcms_phy *pi, int cal_type, int num_levels, if (phy_c5 <= 0 || phy_c5 >= 7) break; phy_c24 = phy_c22; - phy_c25 = false; + phy_c25 = 0; } if (phy_c5 < 0) @@ -3773,10 +3772,10 @@ wlc_lcnphy_a1(struct brcms_phy *pi, int cal_type, int num_levels, phy_c13 = phy_c11; phy_c14 = phy_c12; } - phy_c23 = false; + phy_c23 = 0; } } - phy_c23 = true; + phy_c23 = 1; phy_c15 = phy_c13; phy_c16 = phy_c14; phy_c7 = phy_c7 >> 1; @@ -3966,12 +3965,12 @@ s16 wlc_lcnphy_tempsense_new(struct brcms_phy *pi, bool mode) { u16 tempsenseval1, tempsenseval2; s16 avg = 0; - bool suspend = false; + bool suspend = 0; if (mode == 1) { - suspend = (0 == (bcma_read32(pi->d11core, - D11REGOFFS(maccontrol)) & - MCTL_EN_MAC)); + suspend = + (0 == + (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); if (!suspend) wlapi_suspend_mac_and_wait(pi->sh->physhim); wlc_lcnphy_vbat_temp_sense_setup(pi, TEMPSENSE); @@ -4008,14 +4007,14 @@ u16 wlc_lcnphy_tempsense(struct brcms_phy *pi, bool mode) { u16 tempsenseval1, tempsenseval2; s32 avg = 0; - bool suspend = false; + bool suspend = 0; u16 SAVE_txpwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi); struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy; if (mode == 1) { - suspend = (0 == (bcma_read32(pi->d11core, - D11REGOFFS(maccontrol)) & - MCTL_EN_MAC)); + suspend = + (0 == + (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); if (!suspend) wlapi_suspend_mac_and_wait(pi->sh->physhim); wlc_lcnphy_vbat_temp_sense_setup(pi, TEMPSENSE); @@ -4076,12 +4075,12 @@ s8 wlc_lcnphy_vbatsense(struct brcms_phy *pi, bool mode) { u16 vbatsenseval; s32 avg = 0; - bool suspend = false; + bool suspend = 0; if (mode == 1) { - suspend = (0 == (bcma_read32(pi->d11core, - D11REGOFFS(maccontrol)) & - MCTL_EN_MAC)); + suspend = + (0 == + (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); if (!suspend) wlapi_suspend_mac_and_wait(pi->sh->physhim); wlc_lcnphy_vbat_temp_sense_setup(pi, VBATSENSE); @@ -4128,8 +4127,8 @@ static void wlc_lcnphy_glacial_timer_based_cal(struct brcms_phy *pi) s8 index; u16 SAVE_pwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi); struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy; - suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & - MCTL_EN_MAC)); + suspend = + (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); if (!suspend) wlapi_suspend_mac_and_wait(pi->sh->physhim); wlc_lcnphy_deaf_mode(pi, true); @@ -4167,8 +4166,8 @@ static void wlc_lcnphy_periodic_cal(struct brcms_phy *pi) pi_lcn->lcnphy_full_cal_channel = CHSPEC_CHANNEL(pi->radio_chanspec); index = pi_lcn->lcnphy_current_index; - suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & - MCTL_EN_MAC)); + suspend = + (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); if (!suspend) { wlapi_bmac_write_shm(pi->sh->physhim, M_CTS_DURATION, 10000); wlapi_suspend_mac_and_wait(pi->sh->physhim); diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c b/trunk/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c index a16f1ab292fd..cd19c2f7a347 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c @@ -29,7 +29,6 @@ #include "phy_radio.h" #include "phyreg_n.h" #include "phytbl_n.h" -#include "soc.h" #define READ_RADIO_REG2(pi, radio_type, jspace, core, reg_name) \ read_radio_reg(pi, radio_type##_##jspace##_##reg_name | \ @@ -14418,6 +14417,12 @@ static void wlc_phy_txpwr_srom_read_ppr_nphy(struct brcms_phy *pi) switch (band_num) { case 0: + pi->nphy_txpid2g[PHY_CORE_0] = + (u8) wlapi_getintvar(shim, + BRCMS_SROM_TXPID2GA0); + pi->nphy_txpid2g[PHY_CORE_1] = + (u8) wlapi_getintvar(shim, + BRCMS_SROM_TXPID2GA1); pi->nphy_pwrctrl_info[PHY_CORE_0].max_pwr_2g = (s8) wlapi_getintvar(shim, BRCMS_SROM_MAXP2GA0); @@ -14481,6 +14486,12 @@ static void wlc_phy_txpwr_srom_read_ppr_nphy(struct brcms_phy *pi) break; case 1: + pi->nphy_txpid5g[PHY_CORE_0] = + (u8) wlapi_getintvar(shim, + BRCMS_SROM_TXPID5GA0); + pi->nphy_txpid5g[PHY_CORE_1] = + (u8) wlapi_getintvar(shim, + BRCMS_SROM_TXPID5GA1); pi->nphy_pwrctrl_info[PHY_CORE_0].max_pwr_5gm = (s8) wlapi_getintvar(shim, BRCMS_SROM_MAXP5GA0); pi->nphy_pwrctrl_info[PHY_CORE_1].max_pwr_5gm = @@ -14540,6 +14551,12 @@ static void wlc_phy_txpwr_srom_read_ppr_nphy(struct brcms_phy *pi) break; case 2: + pi->nphy_txpid5gl[0] = + (u8) wlapi_getintvar(shim, + BRCMS_SROM_TXPID5GLA0); + pi->nphy_txpid5gl[1] = + (u8) wlapi_getintvar(shim, + BRCMS_SROM_TXPID5GLA1); pi->nphy_pwrctrl_info[0].max_pwr_5gl = (s8) wlapi_getintvar(shim, BRCMS_SROM_MAXP5GLA0); @@ -14598,6 +14615,12 @@ static void wlc_phy_txpwr_srom_read_ppr_nphy(struct brcms_phy *pi) break; case 3: + pi->nphy_txpid5gh[0] = + (u8) wlapi_getintvar(shim, + BRCMS_SROM_TXPID5GHA0); + pi->nphy_txpid5gh[1] = + (u8) wlapi_getintvar(shim, + BRCMS_SROM_TXPID5GHA1); pi->nphy_pwrctrl_info[0].max_pwr_5gh = (s8) wlapi_getintvar(shim, BRCMS_SROM_MAXP5GHA0); @@ -17802,7 +17825,7 @@ static void wlc_phy_txpwrctrl_pwr_setup_nphy(struct brcms_phy *pi) if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) { wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, MCTL_PHYLOCK); - (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol)); + (void)R_REG(&pi->regs->maccontrol); udelay(1); } @@ -17953,7 +17976,7 @@ static void wlc_phy_txpwrctrl_pwr_setup_nphy(struct brcms_phy *pi) if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) { wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, MCTL_PHYLOCK); - (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol)); + (void)R_REG(&pi->regs->maccontrol); udelay(1); } @@ -19447,6 +19470,8 @@ void wlc_phy_init_nphy(struct brcms_phy *pi) u8 tx_pwr_ctrl_state; bool do_nphy_cal = false; uint core; + uint origidx, intr_val; + struct d11regs __iomem *regs; u32 d11_clk_ctl_st; bool do_rssi_cal = false; @@ -19460,21 +19485,25 @@ void wlc_phy_init_nphy(struct brcms_phy *pi) (pi->sh->chippkg == BCM4718_PKG_ID))) { if ((pi->sh->boardflags & BFL_EXTLNA) && (CHSPEC_IS2G(pi->radio_chanspec))) - ai_cc_reg(pi->sh->sih, - offsetof(struct chipcregs, chipcontrol), - 0x40, 0x40); + ai_corereg(pi->sh->sih, SI_CC_IDX, + offsetof(struct chipcregs, chipcontrol), + 0x40, 0x40); } if ((pi->nphy_gband_spurwar2_en) && CHSPEC_IS2G(pi->radio_chanspec) && CHSPEC_IS40(pi->radio_chanspec)) { - d11_clk_ctl_st = bcma_read32(pi->d11core, - D11REGOFFS(clk_ctl_st)); - bcma_mask32(pi->d11core, D11REGOFFS(clk_ctl_st), - ~(CCS_FORCEHT | CCS_HTAREQ)); + regs = (struct d11regs __iomem *) + ai_switch_core(pi->sh->sih, + D11_CORE_ID, &origidx, + &intr_val); + d11_clk_ctl_st = R_REG(®s->clk_ctl_st); + AND_REG(®s->clk_ctl_st, + ~(CCS_FORCEHT | CCS_HTAREQ)); - bcma_write32(pi->d11core, D11REGOFFS(clk_ctl_st), - d11_clk_ctl_st); + W_REG(®s->clk_ctl_st, d11_clk_ctl_st); + + ai_restore_core(pi->sh->sih, origidx, intr_val); } pi->use_int_tx_iqlo_cal_nphy = @@ -19879,8 +19908,7 @@ void wlc_phy_rxcore_setstate_nphy(struct brcms_phy_pub *pih, u8 rxcore_bitmask) if (!pi->sh->clk) return; - suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & - MCTL_EN_MAC)); + suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); if (!suspend) wlapi_suspend_mac_and_wait(pi->sh->physhim); @@ -21258,28 +21286,28 @@ wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, u16 chanspec, val = read_phy_reg(pi, 0x09) & NPHY_BandControl_currentBand; if (CHSPEC_IS5G(chanspec) && !val) { - val = bcma_read16(pi->d11core, D11REGOFFS(psm_phy_hdr_param)); - bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), + val = R_REG(&pi->regs->psm_phy_hdr_param); + W_REG(&pi->regs->psm_phy_hdr_param, (val | MAC_PHY_FORCE_CLK)); or_phy_reg(pi, (NPHY_TO_BPHY_OFF + BPHY_BB_CONFIG), (BBCFG_RESETCCA | BBCFG_RESETRX)); - bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), val); + W_REG(&pi->regs->psm_phy_hdr_param, val); or_phy_reg(pi, 0x09, NPHY_BandControl_currentBand); } else if (!CHSPEC_IS5G(chanspec) && val) { and_phy_reg(pi, 0x09, ~NPHY_BandControl_currentBand); - val = bcma_read16(pi->d11core, D11REGOFFS(psm_phy_hdr_param)); - bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), + val = R_REG(&pi->regs->psm_phy_hdr_param); + W_REG(&pi->regs->psm_phy_hdr_param, (val | MAC_PHY_FORCE_CLK)); and_phy_reg(pi, (NPHY_TO_BPHY_OFF + BPHY_BB_CONFIG), (u16) (~(BBCFG_RESETCCA | BBCFG_RESETRX))); - bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), val); + W_REG(&pi->regs->psm_phy_hdr_param, val); } write_phy_reg(pi, 0x1ce, ci->PHY_BW1a); @@ -21337,23 +21365,24 @@ wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, u16 chanspec, spuravoid = 1; wlapi_bmac_core_phypll_ctl(pi->sh->physhim, false); - si_pmu_spuravoid_pllupdate(pi->sh->sih, spuravoid); + si_pmu_spuravoid(pi->sh->sih, spuravoid); wlapi_bmac_core_phypll_ctl(pi->sh->physhim, true); if ((pi->sh->chip == BCM43224_CHIP_ID) || (pi->sh->chip == BCM43225_CHIP_ID)) { + if (spuravoid == 1) { - bcma_write16(pi->d11core, - D11REGOFFS(tsf_clk_frac_l), - 0x5341); - bcma_write16(pi->d11core, - D11REGOFFS(tsf_clk_frac_h), 0x8); + + W_REG(&pi->regs->tsf_clk_frac_l, + 0x5341); + W_REG(&pi->regs->tsf_clk_frac_h, + 0x8); } else { - bcma_write16(pi->d11core, - D11REGOFFS(tsf_clk_frac_l), - 0x8889); - bcma_write16(pi->d11core, - D11REGOFFS(tsf_clk_frac_h), 0x8); + + W_REG(&pi->regs->tsf_clk_frac_l, + 0x8889); + W_REG(&pi->regs->tsf_clk_frac_h, + 0x8); } } @@ -21493,13 +21522,13 @@ void wlc_phy_antsel_init(struct brcms_phy_pub *ppi, bool lut_init) ai_gpiocontrol(pi->sh->sih, mask, mask, GPIO_DRV_PRIORITY); - mc = bcma_read32(pi->d11core, D11REGOFFS(maccontrol)); + mc = R_REG(&pi->regs->maccontrol); mc &= ~MCTL_GPOUT_SEL_MASK; - bcma_write32(pi->d11core, D11REGOFFS(maccontrol), mc); + W_REG(&pi->regs->maccontrol, mc); - bcma_set16(pi->d11core, D11REGOFFS(psm_gpio_oe), mask); + OR_REG(&pi->regs->psm_gpio_oe, mask); - bcma_mask16(pi->d11core, D11REGOFFS(psm_gpio_out), ~mask); + AND_REG(&pi->regs->psm_gpio_out, ~mask); if (lut_init) { write_phy_reg(pi, 0xf8, 0x02d8); @@ -21516,8 +21545,9 @@ u16 wlc_phy_classifier_nphy(struct brcms_phy *pi, u16 mask, u16 val) bool suspended = false; if (D11REV_IS(pi->sh->corerev, 16)) { - suspended = (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & - MCTL_EN_MAC) ? false : true; + suspended = + (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC) ? + false : true; if (!suspended) wlapi_suspend_mac_and_wait(pi->sh->physhim); } @@ -25376,8 +25406,7 @@ static void wlc_phy_a4(struct brcms_phy *pi, bool full_cal) if (pi->nphy_papd_skip == 1) return; - phy_b3 = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & - MCTL_EN_MAC)); + phy_b3 = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); if (!phy_b3) wlapi_suspend_mac_and_wait(pi->sh->physhim); @@ -27965,11 +27994,20 @@ void wlc_phy_txpwr_fixpower_nphy(struct brcms_phy *pi) chan_freq_range = wlc_phy_get_chan_freq_range_nphy(pi, 0); switch (chan_freq_range) { case WL_CHAN_FREQ_RANGE_2G: + txpi[0] = pi->nphy_txpid2g[0]; + txpi[1] = pi->nphy_txpid2g[1]; + break; case WL_CHAN_FREQ_RANGE_5GL: + txpi[0] = pi->nphy_txpid5gl[0]; + txpi[1] = pi->nphy_txpid5gl[1]; + break; case WL_CHAN_FREQ_RANGE_5GM: + txpi[0] = pi->nphy_txpid5g[0]; + txpi[1] = pi->nphy_txpid5g[1]; + break; case WL_CHAN_FREQ_RANGE_5GH: - txpi[0] = 0; - txpi[1] = 0; + txpi[0] = pi->nphy_txpid5gh[0]; + txpi[1] = pi->nphy_txpid5gh[1]; break; default: txpi[0] = txpi[1] = 91; @@ -28351,7 +28389,7 @@ void wlc_phy_txpower_recalc_target_nphy(struct brcms_phy *pi) if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) { wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, MCTL_PHYLOCK); - (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol)); + (void)R_REG(&pi->regs->maccontrol); udelay(1); } diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/pmu.c b/trunk/drivers/net/wireless/brcm80211/brcmsmac/pmu.c index 4931d29d077b..3b36e3acfd74 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/pmu.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/pmu.c @@ -23,7 +23,6 @@ #include "pub.h" #include "aiutils.h" #include "pmu.h" -#include "soc.h" /* * external LPO crystal frequency @@ -115,10 +114,10 @@ static void si_pmu_res_masks(struct si_pub *sih, u32 * pmin, u32 * pmax) uint rsrcs; /* # resources */ - rsrcs = (ai_get_pmucaps(sih) & PCAP_RC_MASK) >> PCAP_RC_SHIFT; + rsrcs = (sih->pmucaps & PCAP_RC_MASK) >> PCAP_RC_SHIFT; /* determine min/max rsrc masks */ - switch (ai_get_chip_id(sih)) { + switch (sih->chip) { case BCM43224_CHIP_ID: case BCM43225_CHIP_ID: /* ??? */ @@ -139,84 +138,75 @@ static void si_pmu_res_masks(struct si_pub *sih, u32 * pmin, u32 * pmax) *pmax = max_mask; } -void si_pmu_spuravoid_pllupdate(struct si_pub *sih, u8 spuravoid) +static void +si_pmu_spuravoid_pllupdate(struct si_pub *sih, struct chipcregs __iomem *cc, + u8 spuravoid) { u32 tmp = 0; - struct bcma_device *core; - /* switch to chipc */ - core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0); - - switch (ai_get_chip_id(sih)) { + switch (sih->chip) { case BCM43224_CHIP_ID: case BCM43225_CHIP_ID: if (spuravoid == 1) { - bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr), - PMU1_PLL0_PLLCTL0); - bcma_write32(core, CHIPCREGOFFS(pllcontrol_data), - 0x11500010); - bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr), - PMU1_PLL0_PLLCTL1); - bcma_write32(core, CHIPCREGOFFS(pllcontrol_data), - 0x000C0C06); - bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr), - PMU1_PLL0_PLLCTL2); - bcma_write32(core, CHIPCREGOFFS(pllcontrol_data), - 0x0F600a08); - bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr), - PMU1_PLL0_PLLCTL3); - bcma_write32(core, CHIPCREGOFFS(pllcontrol_data), - 0x00000000); - bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr), - PMU1_PLL0_PLLCTL4); - bcma_write32(core, CHIPCREGOFFS(pllcontrol_data), - 0x2001E920); - bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr), - PMU1_PLL0_PLLCTL5); - bcma_write32(core, CHIPCREGOFFS(pllcontrol_data), - 0x88888815); + W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0); + W_REG(&cc->pllcontrol_data, 0x11500010); + W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1); + W_REG(&cc->pllcontrol_data, 0x000C0C06); + W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2); + W_REG(&cc->pllcontrol_data, 0x0F600a08); + W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3); + W_REG(&cc->pllcontrol_data, 0x00000000); + W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4); + W_REG(&cc->pllcontrol_data, 0x2001E920); + W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5); + W_REG(&cc->pllcontrol_data, 0x88888815); } else { - bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr), - PMU1_PLL0_PLLCTL0); - bcma_write32(core, CHIPCREGOFFS(pllcontrol_data), - 0x11100010); - bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr), - PMU1_PLL0_PLLCTL1); - bcma_write32(core, CHIPCREGOFFS(pllcontrol_data), - 0x000c0c06); - bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr), - PMU1_PLL0_PLLCTL2); - bcma_write32(core, CHIPCREGOFFS(pllcontrol_data), - 0x03000a08); - bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr), - PMU1_PLL0_PLLCTL3); - bcma_write32(core, CHIPCREGOFFS(pllcontrol_data), - 0x00000000); - bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr), - PMU1_PLL0_PLLCTL4); - bcma_write32(core, CHIPCREGOFFS(pllcontrol_data), - 0x200005c0); - bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr), - PMU1_PLL0_PLLCTL5); - bcma_write32(core, CHIPCREGOFFS(pllcontrol_data), - 0x88888815); + W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0); + W_REG(&cc->pllcontrol_data, 0x11100010); + W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1); + W_REG(&cc->pllcontrol_data, 0x000c0c06); + W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2); + W_REG(&cc->pllcontrol_data, 0x03000a08); + W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3); + W_REG(&cc->pllcontrol_data, 0x00000000); + W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4); + W_REG(&cc->pllcontrol_data, 0x200005c0); + W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5); + W_REG(&cc->pllcontrol_data, 0x88888815); } tmp = 1 << 10; break; + W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0); + W_REG(&cc->pllcontrol_data, 0x11100008); + W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1); + W_REG(&cc->pllcontrol_data, 0x0c000c06); + W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2); + W_REG(&cc->pllcontrol_data, 0x03000a08); + W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3); + W_REG(&cc->pllcontrol_data, 0x00000000); + W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4); + W_REG(&cc->pllcontrol_data, 0x200005c0); + W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5); + W_REG(&cc->pllcontrol_data, 0x88888855); + + tmp = 1 << 10; + break; + default: /* bail out */ return; } - bcma_set32(core, CHIPCREGOFFS(pmucontrol), tmp); + tmp |= R_REG(&cc->pmucontrol); + W_REG(&cc->pmucontrol, tmp); } u16 si_pmu_fast_pwrup_delay(struct si_pub *sih) { uint delay = PMU_MAX_TRANSITION_DLY; - switch (ai_get_chip_id(sih)) { + switch (sih->chip) { case BCM43224_CHIP_ID: case BCM43225_CHIP_ID: case BCM4313_CHIP_ID: @@ -229,35 +219,54 @@ u16 si_pmu_fast_pwrup_delay(struct si_pub *sih) return (u16) delay; } +void si_pmu_sprom_enable(struct si_pub *sih, bool enable) +{ + struct chipcregs __iomem *cc; + uint origidx; + + /* Remember original core before switch to chipc */ + origidx = ai_coreidx(sih); + cc = ai_setcoreidx(sih, SI_CC_IDX); + + /* Return to original core */ + ai_setcoreidx(sih, origidx); +} + /* Read/write a chipcontrol reg */ u32 si_pmu_chipcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val) { - ai_cc_reg(sih, offsetof(struct chipcregs, chipcontrol_addr), ~0, reg); - return ai_cc_reg(sih, offsetof(struct chipcregs, chipcontrol_data), - mask, val); + ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, chipcontrol_addr), + ~0, reg); + return ai_corereg(sih, SI_CC_IDX, + offsetof(struct chipcregs, chipcontrol_data), mask, + val); } /* Read/write a regcontrol reg */ u32 si_pmu_regcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val) { - ai_cc_reg(sih, offsetof(struct chipcregs, regcontrol_addr), ~0, reg); - return ai_cc_reg(sih, offsetof(struct chipcregs, regcontrol_data), - mask, val); + ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, regcontrol_addr), + ~0, reg); + return ai_corereg(sih, SI_CC_IDX, + offsetof(struct chipcregs, regcontrol_data), mask, + val); } /* Read/write a pllcontrol reg */ u32 si_pmu_pllcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val) { - ai_cc_reg(sih, offsetof(struct chipcregs, pllcontrol_addr), ~0, reg); - return ai_cc_reg(sih, offsetof(struct chipcregs, pllcontrol_data), - mask, val); + ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, pllcontrol_addr), + ~0, reg); + return ai_corereg(sih, SI_CC_IDX, + offsetof(struct chipcregs, pllcontrol_data), mask, + val); } /* PMU PLL update */ void si_pmu_pllupd(struct si_pub *sih) { - ai_cc_reg(sih, offsetof(struct chipcregs, pmucontrol), - PCTL_PLL_PLLCTL_UPD, PCTL_PLL_PLLCTL_UPD); + ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, pmucontrol), + PCTL_PLL_PLLCTL_UPD, PCTL_PLL_PLLCTL_UPD); } /* query alp/xtal clock frequency */ @@ -266,10 +275,10 @@ u32 si_pmu_alp_clock(struct si_pub *sih) u32 clock = ALP_CLOCK; /* bail out with default */ - if (!(ai_get_cccaps(sih) & CC_CAP_PMU)) + if (!(sih->cccaps & CC_CAP_PMU)) return clock; - switch (ai_get_chip_id(sih)) { + switch (sih->chip) { case BCM43224_CHIP_ID: case BCM43225_CHIP_ID: case BCM4313_CHIP_ID: @@ -283,29 +292,95 @@ u32 si_pmu_alp_clock(struct si_pub *sih) return clock; } +void si_pmu_spuravoid(struct si_pub *sih, u8 spuravoid) +{ + struct chipcregs __iomem *cc; + uint origidx, intr_val; + + /* Remember original core before switch to chipc */ + cc = (struct chipcregs __iomem *) + ai_switch_core(sih, CC_CORE_ID, &origidx, &intr_val); + + /* update the pll changes */ + si_pmu_spuravoid_pllupdate(sih, cc, spuravoid); + + /* Return to original core */ + ai_restore_core(sih, origidx, intr_val); +} + /* initialize PMU */ void si_pmu_init(struct si_pub *sih) { - struct bcma_device *core; + struct chipcregs __iomem *cc; + uint origidx; - /* select chipc */ - core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0); + /* Remember original core before switch to chipc */ + origidx = ai_coreidx(sih); + cc = ai_setcoreidx(sih, SI_CC_IDX); + + if (sih->pmurev == 1) + AND_REG(&cc->pmucontrol, ~PCTL_NOILP_ON_WAIT); + else if (sih->pmurev >= 2) + OR_REG(&cc->pmucontrol, PCTL_NOILP_ON_WAIT); - if (ai_get_pmurev(sih) == 1) - bcma_mask32(core, CHIPCREGOFFS(pmucontrol), - ~PCTL_NOILP_ON_WAIT); - else if (ai_get_pmurev(sih) >= 2) - bcma_set32(core, CHIPCREGOFFS(pmucontrol), PCTL_NOILP_ON_WAIT); + /* Return to original core */ + ai_setcoreidx(sih, origidx); +} + +/* initialize PMU chip controls and other chip level stuff */ +void si_pmu_chip_init(struct si_pub *sih) +{ + uint origidx; + + /* Gate off SPROM clock and chip select signals */ + si_pmu_sprom_enable(sih, false); + + /* Remember original core */ + origidx = ai_coreidx(sih); + + /* Return to original core */ + ai_setcoreidx(sih, origidx); +} + +/* initialize PMU switch/regulators */ +void si_pmu_swreg_init(struct si_pub *sih) +{ +} + +/* initialize PLL */ +void si_pmu_pll_init(struct si_pub *sih, uint xtalfreq) +{ + struct chipcregs __iomem *cc; + uint origidx; + + /* Remember original core before switch to chipc */ + origidx = ai_coreidx(sih); + cc = ai_setcoreidx(sih, SI_CC_IDX); + + switch (sih->chip) { + case BCM4313_CHIP_ID: + case BCM43224_CHIP_ID: + case BCM43225_CHIP_ID: + /* ??? */ + break; + default: + break; + } + + /* Return to original core */ + ai_setcoreidx(sih, origidx); } /* initialize PMU resources */ void si_pmu_res_init(struct si_pub *sih) { - struct bcma_device *core; + struct chipcregs __iomem *cc; + uint origidx; u32 min_mask = 0, max_mask = 0; - /* select to chipc */ - core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0); + /* Remember original core before switch to chipc */ + origidx = ai_coreidx(sih); + cc = ai_setcoreidx(sih, SI_CC_IDX); /* Determine min/max rsrc masks */ si_pmu_res_masks(sih, &min_mask, &max_mask); @@ -315,50 +390,55 @@ void si_pmu_res_init(struct si_pub *sih) /* Program max resource mask */ if (max_mask) - bcma_write32(core, CHIPCREGOFFS(max_res_mask), max_mask); + W_REG(&cc->max_res_mask, max_mask); /* Program min resource mask */ if (min_mask) - bcma_write32(core, CHIPCREGOFFS(min_res_mask), min_mask); + W_REG(&cc->min_res_mask, min_mask); /* Add some delay; allow resources to come up and settle. */ mdelay(2); + + /* Return to original core */ + ai_setcoreidx(sih, origidx); } u32 si_pmu_measure_alpclk(struct si_pub *sih) { - struct bcma_device *core; + struct chipcregs __iomem *cc; + uint origidx; u32 alp_khz; - if (ai_get_pmurev(sih) < 10) + if (sih->pmurev < 10) return 0; /* Remember original core before switch to chipc */ - core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0); + origidx = ai_coreidx(sih); + cc = ai_setcoreidx(sih, SI_CC_IDX); - if (bcma_read32(core, CHIPCREGOFFS(pmustatus)) & PST_EXTLPOAVAIL) { + if (R_REG(&cc->pmustatus) & PST_EXTLPOAVAIL) { u32 ilp_ctr, alp_hz; /* * Enable the reg to measure the freq, * in case it was disabled before */ - bcma_write32(core, CHIPCREGOFFS(pmu_xtalfreq), - 1U << PMU_XTALFREQ_REG_MEASURE_SHIFT); + W_REG(&cc->pmu_xtalfreq, + 1U << PMU_XTALFREQ_REG_MEASURE_SHIFT); /* Delay for well over 4 ILP clocks */ udelay(1000); /* Read the latched number of ALP ticks per 4 ILP ticks */ - ilp_ctr = bcma_read32(core, CHIPCREGOFFS(pmu_xtalfreq)) & - PMU_XTALFREQ_REG_ILPCTR_MASK; + ilp_ctr = + R_REG(&cc->pmu_xtalfreq) & PMU_XTALFREQ_REG_ILPCTR_MASK; /* * Turn off the PMU_XTALFREQ_REG_MEASURE_SHIFT * bit to save power */ - bcma_write32(core, CHIPCREGOFFS(pmu_xtalfreq), 0); + W_REG(&cc->pmu_xtalfreq, 0); /* Calculate ALP frequency */ alp_hz = (ilp_ctr * EXT_ILP_HZ) / 4; @@ -371,5 +451,8 @@ u32 si_pmu_measure_alpclk(struct si_pub *sih) } else alp_khz = 0; + /* Return to original core */ + ai_setcoreidx(sih, origidx); + return alp_khz; } diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/pmu.h b/trunk/drivers/net/wireless/brcm80211/brcmsmac/pmu.h index 3e39c5e0f9ff..3a08c620640e 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/pmu.h +++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/pmu.h @@ -26,10 +26,13 @@ extern u32 si_pmu_chipcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val); extern u32 si_pmu_regcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val); extern u32 si_pmu_alp_clock(struct si_pub *sih); extern void si_pmu_pllupd(struct si_pub *sih); -extern void si_pmu_spuravoid_pllupdate(struct si_pub *sih, u8 spuravoid); +extern void si_pmu_spuravoid(struct si_pub *sih, u8 spuravoid); extern u32 si_pmu_pllcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val); extern void si_pmu_init(struct si_pub *sih); +extern void si_pmu_chip_init(struct si_pub *sih); +extern void si_pmu_pll_init(struct si_pub *sih, u32 xtalfreq); extern void si_pmu_res_init(struct si_pub *sih); +extern void si_pmu_swreg_init(struct si_pub *sih); extern u32 si_pmu_measure_alpclk(struct si_pub *sih); #endif /* _BRCM_PMU_H_ */ diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/pub.h b/trunk/drivers/net/wireless/brcm80211/brcmsmac/pub.h index f0038ad7d7bf..37bb2dcc113f 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/pub.h +++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/pub.h @@ -17,7 +17,6 @@ #ifndef _BRCM_PUB_H_ #define _BRCM_PUB_H_ -#include #include #include "types.h" #include "defs.h" @@ -171,6 +170,22 @@ enum brcms_srom_id { BRCMS_SROM_TSSIPOS2G, BRCMS_SROM_TSSIPOS5G, BRCMS_SROM_TXCHAIN, + BRCMS_SROM_TXPID2GA0, + BRCMS_SROM_TXPID2GA1, + BRCMS_SROM_TXPID2GA2, + BRCMS_SROM_TXPID2GA3, + BRCMS_SROM_TXPID5GA0, + BRCMS_SROM_TXPID5GA1, + BRCMS_SROM_TXPID5GA2, + BRCMS_SROM_TXPID5GA3, + BRCMS_SROM_TXPID5GHA0, + BRCMS_SROM_TXPID5GHA1, + BRCMS_SROM_TXPID5GHA2, + BRCMS_SROM_TXPID5GHA3, + BRCMS_SROM_TXPID5GLA0, + BRCMS_SROM_TXPID5GLA1, + BRCMS_SROM_TXPID5GLA2, + BRCMS_SROM_TXPID5GLA3, /* * per-path identifiers (see srom.c) */ @@ -210,6 +225,10 @@ enum brcms_srom_id { BRCMS_SROM_PA2GW2A1, BRCMS_SROM_PA2GW2A2, BRCMS_SROM_PA2GW2A3, + BRCMS_SROM_PA2GW3A0, + BRCMS_SROM_PA2GW3A1, + BRCMS_SROM_PA2GW3A2, + BRCMS_SROM_PA2GW3A3, BRCMS_SROM_PA5GHW0A0, BRCMS_SROM_PA5GHW0A1, BRCMS_SROM_PA5GHW0A2, @@ -222,6 +241,10 @@ enum brcms_srom_id { BRCMS_SROM_PA5GHW2A1, BRCMS_SROM_PA5GHW2A2, BRCMS_SROM_PA5GHW2A3, + BRCMS_SROM_PA5GHW3A0, + BRCMS_SROM_PA5GHW3A1, + BRCMS_SROM_PA5GHW3A2, + BRCMS_SROM_PA5GHW3A3, BRCMS_SROM_PA5GLW0A0, BRCMS_SROM_PA5GLW0A1, BRCMS_SROM_PA5GLW0A2, @@ -234,6 +257,10 @@ enum brcms_srom_id { BRCMS_SROM_PA5GLW2A1, BRCMS_SROM_PA5GLW2A2, BRCMS_SROM_PA5GLW2A3, + BRCMS_SROM_PA5GLW3A0, + BRCMS_SROM_PA5GLW3A1, + BRCMS_SROM_PA5GLW3A2, + BRCMS_SROM_PA5GLW3A3, BRCMS_SROM_PA5GW0A0, BRCMS_SROM_PA5GW0A1, BRCMS_SROM_PA5GW0A2, @@ -246,9 +273,14 @@ enum brcms_srom_id { BRCMS_SROM_PA5GW2A1, BRCMS_SROM_PA5GW2A2, BRCMS_SROM_PA5GW2A3, + BRCMS_SROM_PA5GW3A0, + BRCMS_SROM_PA5GW3A1, + BRCMS_SROM_PA5GW3A2, + BRCMS_SROM_PA5GW3A3, }; #define BRCMS_NUMRATES 16 /* max # of rates in a rateset */ +#define D11_PHY_HDR_LEN 6 /* Phy header length - 6 bytes */ /* phy types */ #define PHY_TYPE_A 0 /* Phy type A */ @@ -382,6 +414,7 @@ struct brcms_pub { uint _nbands; /* # bands supported */ uint now; /* # elapsed seconds */ + bool promisc; /* promiscuous destination address */ bool delayed_down; /* down delayed */ bool associated; /* true:part of [I]BSS, false: not */ /* (union of stas_associated, aps_associated) */ @@ -531,14 +564,15 @@ struct brcms_antselcfg { /* common functions for every port */ extern struct brcms_c_info * -brcms_c_attach(struct brcms_info *wl, struct bcma_device *core, uint unit, - bool piomode, uint *perr); +brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit, + bool piomode, void __iomem *regsva, struct pci_dev *btparam, + uint *perr); extern uint brcms_c_detach(struct brcms_c_info *wlc); extern int brcms_c_up(struct brcms_c_info *wlc); extern uint brcms_c_down(struct brcms_c_info *wlc); extern bool brcms_c_chipmatch(u16 vendor, u16 device); -extern void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx); +extern void brcms_c_init(struct brcms_c_info *wlc); extern void brcms_c_reset(struct brcms_c_info *wlc); extern void brcms_c_intrson(struct brcms_c_info *wlc); @@ -594,7 +628,7 @@ extern void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval); extern int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr); extern int brcms_c_get_tx_power(struct brcms_c_info *wlc); +extern void brcms_c_set_radio_mpc(struct brcms_c_info *wlc, bool mpc); extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc); -extern void brcms_c_mute(struct brcms_c_info *wlc, bool on); #endif /* _BRCM_PUB_H_ */ diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/rate.h b/trunk/drivers/net/wireless/brcm80211/brcmsmac/rate.h index 980d578825cc..e7b9dc2f2731 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/rate.h +++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/rate.h @@ -19,7 +19,6 @@ #include "types.h" #include "d11.h" -#include "phy_hal.h" extern const u8 rate_info[]; extern const struct brcms_c_rateset cck_ofdm_mimo_rates; @@ -199,9 +198,11 @@ static inline u8 cck_rspec(u8 cck) /* Convert encoded rate value in plcp header to numerical rates in 500 KHz * increments */ +extern const u8 ofdm_rate_lookup[]; + static inline u8 ofdm_phy2mac_rate(u8 rlpt) { - return wlc_phy_get_ofdm_rate_lookup()[rlpt & 0x7]; + return ofdm_rate_lookup[rlpt & 0x7]; } static inline u8 cck_phy2mac_rate(u8 signal) diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/srom.c b/trunk/drivers/net/wireless/brcm80211/brcmsmac/srom.c index 61092156755e..99f791048e84 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/srom.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/srom.c @@ -28,7 +28,6 @@ #include "aiutils.h" #include "otp.h" #include "srom.h" -#include "soc.h" /* * SROM CRC8 polynomial value: @@ -63,6 +62,9 @@ #define SROM_MACHI_ET1 42 #define SROM_MACMID_ET1 43 #define SROM_MACLO_ET1 44 +#define SROM3_MACHI 37 +#define SROM3_MACMID 38 +#define SROM3_MACLO 39 #define SROM_BXARSSI2G 40 #define SROM_BXARSSI5G 41 @@ -99,6 +101,7 @@ #define SROM_BFL 57 #define SROM_BFL2 28 +#define SROM3_BFL2 61 #define SROM_AG10 58 @@ -106,16 +109,99 @@ #define SROM_OPO 60 +#define SROM3_LEDDC 62 + #define SROM_CRCREV 63 +/* SROM Rev 4: Reallocate the software part of the srom to accommodate + * MIMO features. It assumes up to two PCIE functions and 440 bytes + * of usable srom i.e. the usable storage in chips with OTP that + * implements hardware redundancy. + */ + #define SROM4_WORDS 220 +#define SROM4_SIGN 32 +#define SROM4_SIGNATURE 0x5372 + +#define SROM4_BREV 33 + +#define SROM4_BFL0 34 +#define SROM4_BFL1 35 +#define SROM4_BFL2 36 +#define SROM4_BFL3 37 +#define SROM5_BFL0 37 +#define SROM5_BFL1 38 +#define SROM5_BFL2 39 +#define SROM5_BFL3 40 + +#define SROM4_MACHI 38 +#define SROM4_MACMID 39 +#define SROM4_MACLO 40 +#define SROM5_MACHI 41 +#define SROM5_MACMID 42 +#define SROM5_MACLO 43 + +#define SROM4_CCODE 41 +#define SROM4_REGREV 42 +#define SROM5_CCODE 34 +#define SROM5_REGREV 35 + +#define SROM4_LEDBH10 43 +#define SROM4_LEDBH32 44 +#define SROM5_LEDBH10 59 +#define SROM5_LEDBH32 60 + +#define SROM4_LEDDC 45 +#define SROM5_LEDDC 45 + +#define SROM4_AA 46 + +#define SROM4_AG10 47 +#define SROM4_AG32 48 + +#define SROM4_TXPID2G 49 +#define SROM4_TXPID5G 51 +#define SROM4_TXPID5GL 53 +#define SROM4_TXPID5GH 55 + +#define SROM4_TXRXC 61 #define SROM4_TXCHAIN_MASK 0x000f +#define SROM4_TXCHAIN_SHIFT 0 #define SROM4_RXCHAIN_MASK 0x00f0 +#define SROM4_RXCHAIN_SHIFT 4 #define SROM4_SWITCH_MASK 0xff00 +#define SROM4_SWITCH_SHIFT 8 /* Per-path fields */ #define MAX_PATH_SROM 4 +#define SROM4_PATH0 64 +#define SROM4_PATH1 87 +#define SROM4_PATH2 110 +#define SROM4_PATH3 133 + +#define SROM4_2G_ITT_MAXP 0 +#define SROM4_2G_PA 1 +#define SROM4_5G_ITT_MAXP 5 +#define SROM4_5GLH_MAXP 6 +#define SROM4_5G_PA 7 +#define SROM4_5GL_PA 11 +#define SROM4_5GH_PA 15 + +/* All the miriad power offsets */ +#define SROM4_2G_CCKPO 156 +#define SROM4_2G_OFDMPO 157 +#define SROM4_5G_OFDMPO 159 +#define SROM4_5GL_OFDMPO 161 +#define SROM4_5GH_OFDMPO 163 +#define SROM4_2G_MCSPO 165 +#define SROM4_5G_MCSPO 173 +#define SROM4_5GL_MCSPO 181 +#define SROM4_5GH_MCSPO 189 +#define SROM4_CDDPO 197 +#define SROM4_STBCPO 198 +#define SROM4_BW40PO 199 +#define SROM4_BWDUPPO 200 #define SROM4_CRCREV 219 @@ -338,32 +424,103 @@ struct brcms_varbuf { static const struct brcms_sromvar pci_sromvars[] = { {BRCMS_SROM_DEVID, 0xffffff00, SRFL_PRHEX | SRFL_NOVAR, PCI_F0DEVID, 0xffff}, + {BRCMS_SROM_BOARDREV, 0x0000000e, SRFL_PRHEX, SROM_AABREV, + SROM_BR_MASK}, + {BRCMS_SROM_BOARDREV, 0x000000f0, SRFL_PRHEX, SROM4_BREV, 0xffff}, {BRCMS_SROM_BOARDREV, 0xffffff00, SRFL_PRHEX, SROM8_BREV, 0xffff}, + {BRCMS_SROM_BOARDFLAGS, 0x00000002, SRFL_PRHEX, SROM_BFL, 0xffff}, + {BRCMS_SROM_BOARDFLAGS, 0x00000004, SRFL_PRHEX | SRFL_MORE, SROM_BFL, + 0xffff}, + {BRCMS_SROM_CONT, 0, 0, SROM_BFL2, 0xffff}, + {BRCMS_SROM_BOARDFLAGS, 0x00000008, SRFL_PRHEX | SRFL_MORE, SROM_BFL, + 0xffff}, + {BRCMS_SROM_CONT, 0, 0, SROM3_BFL2, 0xffff}, + {BRCMS_SROM_BOARDFLAGS, 0x00000010, SRFL_PRHEX | SRFL_MORE, SROM4_BFL0, + 0xffff}, + {BRCMS_SROM_CONT, 0, 0, SROM4_BFL1, 0xffff}, + {BRCMS_SROM_BOARDFLAGS, 0x000000e0, SRFL_PRHEX | SRFL_MORE, SROM5_BFL0, + 0xffff}, + {BRCMS_SROM_CONT, 0, 0, SROM5_BFL1, 0xffff}, {BRCMS_SROM_BOARDFLAGS, 0xffffff00, SRFL_PRHEX | SRFL_MORE, SROM8_BFL0, 0xffff}, {BRCMS_SROM_CONT, 0, 0, SROM8_BFL1, 0xffff}, + {BRCMS_SROM_BOARDFLAGS2, 0x00000010, SRFL_PRHEX | SRFL_MORE, SROM4_BFL2, + 0xffff}, + {BRCMS_SROM_CONT, 0, 0, SROM4_BFL3, 0xffff}, + {BRCMS_SROM_BOARDFLAGS2, 0x000000e0, SRFL_PRHEX | SRFL_MORE, SROM5_BFL2, + 0xffff}, + {BRCMS_SROM_CONT, 0, 0, SROM5_BFL3, 0xffff}, {BRCMS_SROM_BOARDFLAGS2, 0xffffff00, SRFL_PRHEX | SRFL_MORE, SROM8_BFL2, 0xffff}, {BRCMS_SROM_CONT, 0, 0, SROM8_BFL3, 0xffff}, {BRCMS_SROM_BOARDTYPE, 0xfffffffc, SRFL_PRHEX, SROM_SSID, 0xffff}, + {BRCMS_SROM_BOARDNUM, 0x00000006, 0, SROM_MACLO_IL0, 0xffff}, + {BRCMS_SROM_BOARDNUM, 0x00000008, 0, SROM3_MACLO, 0xffff}, + {BRCMS_SROM_BOARDNUM, 0x00000010, 0, SROM4_MACLO, 0xffff}, + {BRCMS_SROM_BOARDNUM, 0x000000e0, 0, SROM5_MACLO, 0xffff}, {BRCMS_SROM_BOARDNUM, 0xffffff00, 0, SROM8_MACLO, 0xffff}, + {BRCMS_SROM_CC, 0x00000002, 0, SROM_AABREV, SROM_CC_MASK}, + {BRCMS_SROM_REGREV, 0x00000008, 0, SROM_OPO, 0xff00}, + {BRCMS_SROM_REGREV, 0x00000010, 0, SROM4_REGREV, 0x00ff}, + {BRCMS_SROM_REGREV, 0x000000e0, 0, SROM5_REGREV, 0x00ff}, {BRCMS_SROM_REGREV, 0xffffff00, 0, SROM8_REGREV, 0x00ff}, + {BRCMS_SROM_LEDBH0, 0x0000000e, SRFL_NOFFS, SROM_LEDBH10, 0x00ff}, + {BRCMS_SROM_LEDBH1, 0x0000000e, SRFL_NOFFS, SROM_LEDBH10, 0xff00}, + {BRCMS_SROM_LEDBH2, 0x0000000e, SRFL_NOFFS, SROM_LEDBH32, 0x00ff}, + {BRCMS_SROM_LEDBH3, 0x0000000e, SRFL_NOFFS, SROM_LEDBH32, 0xff00}, + {BRCMS_SROM_LEDBH0, 0x00000010, SRFL_NOFFS, SROM4_LEDBH10, 0x00ff}, + {BRCMS_SROM_LEDBH1, 0x00000010, SRFL_NOFFS, SROM4_LEDBH10, 0xff00}, + {BRCMS_SROM_LEDBH2, 0x00000010, SRFL_NOFFS, SROM4_LEDBH32, 0x00ff}, + {BRCMS_SROM_LEDBH3, 0x00000010, SRFL_NOFFS, SROM4_LEDBH32, 0xff00}, + {BRCMS_SROM_LEDBH0, 0x000000e0, SRFL_NOFFS, SROM5_LEDBH10, 0x00ff}, + {BRCMS_SROM_LEDBH1, 0x000000e0, SRFL_NOFFS, SROM5_LEDBH10, 0xff00}, + {BRCMS_SROM_LEDBH2, 0x000000e0, SRFL_NOFFS, SROM5_LEDBH32, 0x00ff}, + {BRCMS_SROM_LEDBH3, 0x000000e0, SRFL_NOFFS, SROM5_LEDBH32, 0xff00}, {BRCMS_SROM_LEDBH0, 0xffffff00, SRFL_NOFFS, SROM8_LEDBH10, 0x00ff}, {BRCMS_SROM_LEDBH1, 0xffffff00, SRFL_NOFFS, SROM8_LEDBH10, 0xff00}, {BRCMS_SROM_LEDBH2, 0xffffff00, SRFL_NOFFS, SROM8_LEDBH32, 0x00ff}, {BRCMS_SROM_LEDBH3, 0xffffff00, SRFL_NOFFS, SROM8_LEDBH32, 0xff00}, + {BRCMS_SROM_PA0B0, 0x0000000e, SRFL_PRHEX, SROM_WL0PAB0, 0xffff}, + {BRCMS_SROM_PA0B1, 0x0000000e, SRFL_PRHEX, SROM_WL0PAB1, 0xffff}, + {BRCMS_SROM_PA0B2, 0x0000000e, SRFL_PRHEX, SROM_WL0PAB2, 0xffff}, + {BRCMS_SROM_PA0ITSSIT, 0x0000000e, 0, SROM_ITT, 0x00ff}, + {BRCMS_SROM_PA0MAXPWR, 0x0000000e, 0, SROM_WL10MAXP, 0x00ff}, {BRCMS_SROM_PA0B0, 0xffffff00, SRFL_PRHEX, SROM8_W0_PAB0, 0xffff}, {BRCMS_SROM_PA0B1, 0xffffff00, SRFL_PRHEX, SROM8_W0_PAB1, 0xffff}, {BRCMS_SROM_PA0B2, 0xffffff00, SRFL_PRHEX, SROM8_W0_PAB2, 0xffff}, {BRCMS_SROM_PA0ITSSIT, 0xffffff00, 0, SROM8_W0_ITTMAXP, 0xff00}, {BRCMS_SROM_PA0MAXPWR, 0xffffff00, 0, SROM8_W0_ITTMAXP, 0x00ff}, + {BRCMS_SROM_OPO, 0x0000000c, 0, SROM_OPO, 0x00ff}, {BRCMS_SROM_OPO, 0xffffff00, 0, SROM8_2G_OFDMPO, 0x00ff}, + {BRCMS_SROM_AA2G, 0x0000000e, 0, SROM_AABREV, SROM_AA0_MASK}, + {BRCMS_SROM_AA2G, 0x000000f0, 0, SROM4_AA, 0x00ff}, {BRCMS_SROM_AA2G, 0xffffff00, 0, SROM8_AA, 0x00ff}, + {BRCMS_SROM_AA5G, 0x0000000e, 0, SROM_AABREV, SROM_AA1_MASK}, + {BRCMS_SROM_AA5G, 0x000000f0, 0, SROM4_AA, 0xff00}, {BRCMS_SROM_AA5G, 0xffffff00, 0, SROM8_AA, 0xff00}, + {BRCMS_SROM_AG0, 0x0000000e, 0, SROM_AG10, 0x00ff}, + {BRCMS_SROM_AG1, 0x0000000e, 0, SROM_AG10, 0xff00}, + {BRCMS_SROM_AG0, 0x000000f0, 0, SROM4_AG10, 0x00ff}, + {BRCMS_SROM_AG1, 0x000000f0, 0, SROM4_AG10, 0xff00}, + {BRCMS_SROM_AG2, 0x000000f0, 0, SROM4_AG32, 0x00ff}, + {BRCMS_SROM_AG3, 0x000000f0, 0, SROM4_AG32, 0xff00}, {BRCMS_SROM_AG0, 0xffffff00, 0, SROM8_AG10, 0x00ff}, {BRCMS_SROM_AG1, 0xffffff00, 0, SROM8_AG10, 0xff00}, {BRCMS_SROM_AG2, 0xffffff00, 0, SROM8_AG32, 0x00ff}, {BRCMS_SROM_AG3, 0xffffff00, 0, SROM8_AG32, 0xff00}, + {BRCMS_SROM_PA1B0, 0x0000000e, SRFL_PRHEX, SROM_WL1PAB0, 0xffff}, + {BRCMS_SROM_PA1B1, 0x0000000e, SRFL_PRHEX, SROM_WL1PAB1, 0xffff}, + {BRCMS_SROM_PA1B2, 0x0000000e, SRFL_PRHEX, SROM_WL1PAB2, 0xffff}, + {BRCMS_SROM_PA1LOB0, 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB0, 0xffff}, + {BRCMS_SROM_PA1LOB1, 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB1, 0xffff}, + {BRCMS_SROM_PA1LOB2, 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB2, 0xffff}, + {BRCMS_SROM_PA1HIB0, 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB0, 0xffff}, + {BRCMS_SROM_PA1HIB1, 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB1, 0xffff}, + {BRCMS_SROM_PA1HIB2, 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB2, 0xffff}, + {BRCMS_SROM_PA1ITSSIT, 0x0000000e, 0, SROM_ITT, 0xff00}, + {BRCMS_SROM_PA1MAXPWR, 0x0000000e, 0, SROM_WL10MAXP, 0xff00}, + {BRCMS_SROM_PA1LOMAXPWR, 0x0000000c, 0, SROM_WL1LHMAXP, 0xff00}, + {BRCMS_SROM_PA1HIMAXPWR, 0x0000000c, 0, SROM_WL1LHMAXP, 0x00ff}, {BRCMS_SROM_PA1B0, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB0, 0xffff}, {BRCMS_SROM_PA1B1, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB1, 0xffff}, {BRCMS_SROM_PA1B2, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB2, 0xffff}, @@ -377,20 +534,40 @@ static const struct brcms_sromvar pci_sromvars[] = { {BRCMS_SROM_PA1MAXPWR, 0xffffff00, 0, SROM8_W1_ITTMAXP, 0x00ff}, {BRCMS_SROM_PA1LOMAXPWR, 0xffffff00, 0, SROM8_W1_MAXP_LCHC, 0xff00}, {BRCMS_SROM_PA1HIMAXPWR, 0xffffff00, 0, SROM8_W1_MAXP_LCHC, 0x00ff}, + {BRCMS_SROM_BXA2G, 0x00000008, 0, SROM_BXARSSI2G, 0x1800}, + {BRCMS_SROM_RSSISAV2G, 0x00000008, 0, SROM_BXARSSI2G, 0x0700}, + {BRCMS_SROM_RSSISMC2G, 0x00000008, 0, SROM_BXARSSI2G, 0x00f0}, + {BRCMS_SROM_RSSISMF2G, 0x00000008, 0, SROM_BXARSSI2G, 0x000f}, {BRCMS_SROM_BXA2G, 0xffffff00, 0, SROM8_BXARSSI2G, 0x1800}, {BRCMS_SROM_RSSISAV2G, 0xffffff00, 0, SROM8_BXARSSI2G, 0x0700}, {BRCMS_SROM_RSSISMC2G, 0xffffff00, 0, SROM8_BXARSSI2G, 0x00f0}, {BRCMS_SROM_RSSISMF2G, 0xffffff00, 0, SROM8_BXARSSI2G, 0x000f}, + {BRCMS_SROM_BXA5G, 0x00000008, 0, SROM_BXARSSI5G, 0x1800}, + {BRCMS_SROM_RSSISAV5G, 0x00000008, 0, SROM_BXARSSI5G, 0x0700}, + {BRCMS_SROM_RSSISMC5G, 0x00000008, 0, SROM_BXARSSI5G, 0x00f0}, + {BRCMS_SROM_RSSISMF5G, 0x00000008, 0, SROM_BXARSSI5G, 0x000f}, {BRCMS_SROM_BXA5G, 0xffffff00, 0, SROM8_BXARSSI5G, 0x1800}, {BRCMS_SROM_RSSISAV5G, 0xffffff00, 0, SROM8_BXARSSI5G, 0x0700}, {BRCMS_SROM_RSSISMC5G, 0xffffff00, 0, SROM8_BXARSSI5G, 0x00f0}, {BRCMS_SROM_RSSISMF5G, 0xffffff00, 0, SROM8_BXARSSI5G, 0x000f}, + {BRCMS_SROM_TRI2G, 0x00000008, 0, SROM_TRI52G, 0x00ff}, + {BRCMS_SROM_TRI5G, 0x00000008, 0, SROM_TRI52G, 0xff00}, + {BRCMS_SROM_TRI5GL, 0x00000008, 0, SROM_TRI5GHL, 0x00ff}, + {BRCMS_SROM_TRI5GH, 0x00000008, 0, SROM_TRI5GHL, 0xff00}, {BRCMS_SROM_TRI2G, 0xffffff00, 0, SROM8_TRI52G, 0x00ff}, {BRCMS_SROM_TRI5G, 0xffffff00, 0, SROM8_TRI52G, 0xff00}, {BRCMS_SROM_TRI5GL, 0xffffff00, 0, SROM8_TRI5GHL, 0x00ff}, {BRCMS_SROM_TRI5GH, 0xffffff00, 0, SROM8_TRI5GHL, 0xff00}, + {BRCMS_SROM_RXPO2G, 0x00000008, SRFL_PRSIGN, SROM_RXPO52G, 0x00ff}, + {BRCMS_SROM_RXPO5G, 0x00000008, SRFL_PRSIGN, SROM_RXPO52G, 0xff00}, {BRCMS_SROM_RXPO2G, 0xffffff00, SRFL_PRSIGN, SROM8_RXPO52G, 0x00ff}, {BRCMS_SROM_RXPO5G, 0xffffff00, SRFL_PRSIGN, SROM8_RXPO52G, 0xff00}, + {BRCMS_SROM_TXCHAIN, 0x000000f0, SRFL_NOFFS, SROM4_TXRXC, + SROM4_TXCHAIN_MASK}, + {BRCMS_SROM_RXCHAIN, 0x000000f0, SRFL_NOFFS, SROM4_TXRXC, + SROM4_RXCHAIN_MASK}, + {BRCMS_SROM_ANTSWITCH, 0x000000f0, SRFL_NOFFS, SROM4_TXRXC, + SROM4_SWITCH_MASK}, {BRCMS_SROM_TXCHAIN, 0xffffff00, SRFL_NOFFS, SROM8_TXRXC, SROM4_TXCHAIN_MASK}, {BRCMS_SROM_RXCHAIN, 0xffffff00, SRFL_NOFFS, SROM8_TXRXC, @@ -417,11 +594,43 @@ static const struct brcms_sromvar pci_sromvars[] = { SROM8_FEM_ANTSWLUT_MASK}, {BRCMS_SROM_TEMPTHRESH, 0xffffff00, 0, SROM8_THERMAL, 0xff00}, {BRCMS_SROM_TEMPOFFSET, 0xffffff00, 0, SROM8_THERMAL, 0x00ff}, - + {BRCMS_SROM_TXPID2GA0, 0x000000f0, 0, SROM4_TXPID2G, 0x00ff}, + {BRCMS_SROM_TXPID2GA1, 0x000000f0, 0, SROM4_TXPID2G, 0xff00}, + {BRCMS_SROM_TXPID2GA2, 0x000000f0, 0, SROM4_TXPID2G + 1, 0x00ff}, + {BRCMS_SROM_TXPID2GA3, 0x000000f0, 0, SROM4_TXPID2G + 1, 0xff00}, + {BRCMS_SROM_TXPID5GA0, 0x000000f0, 0, SROM4_TXPID5G, 0x00ff}, + {BRCMS_SROM_TXPID5GA1, 0x000000f0, 0, SROM4_TXPID5G, 0xff00}, + {BRCMS_SROM_TXPID5GA2, 0x000000f0, 0, SROM4_TXPID5G + 1, 0x00ff}, + {BRCMS_SROM_TXPID5GA3, 0x000000f0, 0, SROM4_TXPID5G + 1, 0xff00}, + {BRCMS_SROM_TXPID5GLA0, 0x000000f0, 0, SROM4_TXPID5GL, 0x00ff}, + {BRCMS_SROM_TXPID5GLA1, 0x000000f0, 0, SROM4_TXPID5GL, 0xff00}, + {BRCMS_SROM_TXPID5GLA2, 0x000000f0, 0, SROM4_TXPID5GL + 1, 0x00ff}, + {BRCMS_SROM_TXPID5GLA3, 0x000000f0, 0, SROM4_TXPID5GL + 1, 0xff00}, + {BRCMS_SROM_TXPID5GHA0, 0x000000f0, 0, SROM4_TXPID5GH, 0x00ff}, + {BRCMS_SROM_TXPID5GHA1, 0x000000f0, 0, SROM4_TXPID5GH, 0xff00}, + {BRCMS_SROM_TXPID5GHA2, 0x000000f0, 0, SROM4_TXPID5GH + 1, 0x00ff}, + {BRCMS_SROM_TXPID5GHA3, 0x000000f0, 0, SROM4_TXPID5GH + 1, 0xff00}, + + {BRCMS_SROM_CCODE, 0x0000000f, SRFL_CCODE, SROM_CCODE, 0xffff}, + {BRCMS_SROM_CCODE, 0x00000010, SRFL_CCODE, SROM4_CCODE, 0xffff}, + {BRCMS_SROM_CCODE, 0x000000e0, SRFL_CCODE, SROM5_CCODE, 0xffff}, {BRCMS_SROM_CCODE, 0xffffff00, SRFL_CCODE, SROM8_CCODE, 0xffff}, {BRCMS_SROM_MACADDR, 0xffffff00, SRFL_ETHADDR, SROM8_MACHI, 0xffff}, + {BRCMS_SROM_MACADDR, 0x000000e0, SRFL_ETHADDR, SROM5_MACHI, 0xffff}, + {BRCMS_SROM_MACADDR, 0x00000010, SRFL_ETHADDR, SROM4_MACHI, 0xffff}, + {BRCMS_SROM_MACADDR, 0x00000008, SRFL_ETHADDR, SROM3_MACHI, 0xffff}, + {BRCMS_SROM_IL0MACADDR, 0x00000007, SRFL_ETHADDR, SROM_MACHI_IL0, + 0xffff}, + {BRCMS_SROM_ET1MACADDR, 0x00000007, SRFL_ETHADDR, SROM_MACHI_ET1, + 0xffff}, {BRCMS_SROM_LEDDC, 0xffffff00, SRFL_NOFFS | SRFL_LEDDC, SROM8_LEDDC, 0xffff}, + {BRCMS_SROM_LEDDC, 0x000000e0, SRFL_NOFFS | SRFL_LEDDC, SROM5_LEDDC, + 0xffff}, + {BRCMS_SROM_LEDDC, 0x00000010, SRFL_NOFFS | SRFL_LEDDC, SROM4_LEDDC, + 0xffff}, + {BRCMS_SROM_LEDDC, 0x00000008, SRFL_NOFFS | SRFL_LEDDC, SROM3_LEDDC, + 0xffff}, {BRCMS_SROM_RAWTEMPSENSE, 0xffffff00, SRFL_PRHEX, SROM8_MPWR_RAWTS, 0x01ff}, {BRCMS_SROM_MEASPOWER, 0xffffff00, SRFL_PRHEX, SROM8_MPWR_RAWTS, @@ -441,7 +650,16 @@ static const struct brcms_sromvar pci_sromvars[] = { {BRCMS_SROM_PHYCAL_TEMPDELTA, 0xffffff00, 0, SROM8_PHYCAL_TEMPDELTA, 0x00ff}, + {BRCMS_SROM_CCK2GPO, 0x000000f0, 0, SROM4_2G_CCKPO, 0xffff}, {BRCMS_SROM_CCK2GPO, 0x00000100, 0, SROM8_2G_CCKPO, 0xffff}, + {BRCMS_SROM_OFDM2GPO, 0x000000f0, SRFL_MORE, SROM4_2G_OFDMPO, 0xffff}, + {BRCMS_SROM_CONT, 0, 0, SROM4_2G_OFDMPO + 1, 0xffff}, + {BRCMS_SROM_OFDM5GPO, 0x000000f0, SRFL_MORE, SROM4_5G_OFDMPO, 0xffff}, + {BRCMS_SROM_CONT, 0, 0, SROM4_5G_OFDMPO + 1, 0xffff}, + {BRCMS_SROM_OFDM5GLPO, 0x000000f0, SRFL_MORE, SROM4_5GL_OFDMPO, 0xffff}, + {BRCMS_SROM_CONT, 0, 0, SROM4_5GL_OFDMPO + 1, 0xffff}, + {BRCMS_SROM_OFDM5GHPO, 0x000000f0, SRFL_MORE, SROM4_5GH_OFDMPO, 0xffff}, + {BRCMS_SROM_CONT, 0, 0, SROM4_5GH_OFDMPO + 1, 0xffff}, {BRCMS_SROM_OFDM2GPO, 0x00000100, SRFL_MORE, SROM8_2G_OFDMPO, 0xffff}, {BRCMS_SROM_CONT, 0, 0, SROM8_2G_OFDMPO + 1, 0xffff}, {BRCMS_SROM_OFDM5GPO, 0x00000100, SRFL_MORE, SROM8_5G_OFDMPO, 0xffff}, @@ -450,6 +668,38 @@ static const struct brcms_sromvar pci_sromvars[] = { {BRCMS_SROM_CONT, 0, 0, SROM8_5GL_OFDMPO + 1, 0xffff}, {BRCMS_SROM_OFDM5GHPO, 0x00000100, SRFL_MORE, SROM8_5GH_OFDMPO, 0xffff}, {BRCMS_SROM_CONT, 0, 0, SROM8_5GH_OFDMPO + 1, 0xffff}, + {BRCMS_SROM_MCS2GPO0, 0x000000f0, 0, SROM4_2G_MCSPO, 0xffff}, + {BRCMS_SROM_MCS2GPO1, 0x000000f0, 0, SROM4_2G_MCSPO + 1, 0xffff}, + {BRCMS_SROM_MCS2GPO2, 0x000000f0, 0, SROM4_2G_MCSPO + 2, 0xffff}, + {BRCMS_SROM_MCS2GPO3, 0x000000f0, 0, SROM4_2G_MCSPO + 3, 0xffff}, + {BRCMS_SROM_MCS2GPO4, 0x000000f0, 0, SROM4_2G_MCSPO + 4, 0xffff}, + {BRCMS_SROM_MCS2GPO5, 0x000000f0, 0, SROM4_2G_MCSPO + 5, 0xffff}, + {BRCMS_SROM_MCS2GPO6, 0x000000f0, 0, SROM4_2G_MCSPO + 6, 0xffff}, + {BRCMS_SROM_MCS2GPO7, 0x000000f0, 0, SROM4_2G_MCSPO + 7, 0xffff}, + {BRCMS_SROM_MCS5GPO0, 0x000000f0, 0, SROM4_5G_MCSPO, 0xffff}, + {BRCMS_SROM_MCS5GPO1, 0x000000f0, 0, SROM4_5G_MCSPO + 1, 0xffff}, + {BRCMS_SROM_MCS5GPO2, 0x000000f0, 0, SROM4_5G_MCSPO + 2, 0xffff}, + {BRCMS_SROM_MCS5GPO3, 0x000000f0, 0, SROM4_5G_MCSPO + 3, 0xffff}, + {BRCMS_SROM_MCS5GPO4, 0x000000f0, 0, SROM4_5G_MCSPO + 4, 0xffff}, + {BRCMS_SROM_MCS5GPO5, 0x000000f0, 0, SROM4_5G_MCSPO + 5, 0xffff}, + {BRCMS_SROM_MCS5GPO6, 0x000000f0, 0, SROM4_5G_MCSPO + 6, 0xffff}, + {BRCMS_SROM_MCS5GPO7, 0x000000f0, 0, SROM4_5G_MCSPO + 7, 0xffff}, + {BRCMS_SROM_MCS5GLPO0, 0x000000f0, 0, SROM4_5GL_MCSPO, 0xffff}, + {BRCMS_SROM_MCS5GLPO1, 0x000000f0, 0, SROM4_5GL_MCSPO + 1, 0xffff}, + {BRCMS_SROM_MCS5GLPO2, 0x000000f0, 0, SROM4_5GL_MCSPO + 2, 0xffff}, + {BRCMS_SROM_MCS5GLPO3, 0x000000f0, 0, SROM4_5GL_MCSPO + 3, 0xffff}, + {BRCMS_SROM_MCS5GLPO4, 0x000000f0, 0, SROM4_5GL_MCSPO + 4, 0xffff}, + {BRCMS_SROM_MCS5GLPO5, 0x000000f0, 0, SROM4_5GL_MCSPO + 5, 0xffff}, + {BRCMS_SROM_MCS5GLPO6, 0x000000f0, 0, SROM4_5GL_MCSPO + 6, 0xffff}, + {BRCMS_SROM_MCS5GLPO7, 0x000000f0, 0, SROM4_5GL_MCSPO + 7, 0xffff}, + {BRCMS_SROM_MCS5GHPO0, 0x000000f0, 0, SROM4_5GH_MCSPO, 0xffff}, + {BRCMS_SROM_MCS5GHPO1, 0x000000f0, 0, SROM4_5GH_MCSPO + 1, 0xffff}, + {BRCMS_SROM_MCS5GHPO2, 0x000000f0, 0, SROM4_5GH_MCSPO + 2, 0xffff}, + {BRCMS_SROM_MCS5GHPO3, 0x000000f0, 0, SROM4_5GH_MCSPO + 3, 0xffff}, + {BRCMS_SROM_MCS5GHPO4, 0x000000f0, 0, SROM4_5GH_MCSPO + 4, 0xffff}, + {BRCMS_SROM_MCS5GHPO5, 0x000000f0, 0, SROM4_5GH_MCSPO + 5, 0xffff}, + {BRCMS_SROM_MCS5GHPO6, 0x000000f0, 0, SROM4_5GH_MCSPO + 6, 0xffff}, + {BRCMS_SROM_MCS5GHPO7, 0x000000f0, 0, SROM4_5GH_MCSPO + 7, 0xffff}, {BRCMS_SROM_MCS2GPO0, 0x00000100, 0, SROM8_2G_MCSPO, 0xffff}, {BRCMS_SROM_MCS2GPO1, 0x00000100, 0, SROM8_2G_MCSPO + 1, 0xffff}, {BRCMS_SROM_MCS2GPO2, 0x00000100, 0, SROM8_2G_MCSPO + 2, 0xffff}, @@ -482,6 +732,10 @@ static const struct brcms_sromvar pci_sromvars[] = { {BRCMS_SROM_MCS5GHPO5, 0x00000100, 0, SROM8_5GH_MCSPO + 5, 0xffff}, {BRCMS_SROM_MCS5GHPO6, 0x00000100, 0, SROM8_5GH_MCSPO + 6, 0xffff}, {BRCMS_SROM_MCS5GHPO7, 0x00000100, 0, SROM8_5GH_MCSPO + 7, 0xffff}, + {BRCMS_SROM_CDDPO, 0x000000f0, 0, SROM4_CDDPO, 0xffff}, + {BRCMS_SROM_STBCPO, 0x000000f0, 0, SROM4_STBCPO, 0xffff}, + {BRCMS_SROM_BW40PO, 0x000000f0, 0, SROM4_BW40PO, 0xffff}, + {BRCMS_SROM_BWDUPPO, 0x000000f0, 0, SROM4_BWDUPPO, 0xffff}, {BRCMS_SROM_CDDPO, 0x00000100, 0, SROM8_CDDPO, 0xffff}, {BRCMS_SROM_STBCPO, 0x00000100, 0, SROM8_STBCPO, 0xffff}, {BRCMS_SROM_BW40PO, 0x00000100, 0, SROM8_BW40PO, 0xffff}, @@ -557,6 +811,34 @@ static const struct brcms_sromvar pci_sromvars[] = { }; static const struct brcms_sromvar perpath_pci_sromvars[] = { + {BRCMS_SROM_MAXP2GA0, 0x000000f0, 0, SROM4_2G_ITT_MAXP, 0x00ff}, + {BRCMS_SROM_ITT2GA0, 0x000000f0, 0, SROM4_2G_ITT_MAXP, 0xff00}, + {BRCMS_SROM_ITT5GA0, 0x000000f0, 0, SROM4_5G_ITT_MAXP, 0xff00}, + {BRCMS_SROM_PA2GW0A0, 0x000000f0, SRFL_PRHEX, SROM4_2G_PA, 0xffff}, + {BRCMS_SROM_PA2GW1A0, 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 1, 0xffff}, + {BRCMS_SROM_PA2GW2A0, 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 2, 0xffff}, + {BRCMS_SROM_PA2GW3A0, 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 3, 0xffff}, + {BRCMS_SROM_MAXP5GA0, 0x000000f0, 0, SROM4_5G_ITT_MAXP, 0x00ff}, + {BRCMS_SROM_MAXP5GHA0, 0x000000f0, 0, SROM4_5GLH_MAXP, 0x00ff}, + {BRCMS_SROM_MAXP5GLA0, 0x000000f0, 0, SROM4_5GLH_MAXP, 0xff00}, + {BRCMS_SROM_PA5GW0A0, 0x000000f0, SRFL_PRHEX, SROM4_5G_PA, 0xffff}, + {BRCMS_SROM_PA5GW1A0, 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 1, 0xffff}, + {BRCMS_SROM_PA5GW2A0, 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 2, 0xffff}, + {BRCMS_SROM_PA5GW3A0, 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 3, 0xffff}, + {BRCMS_SROM_PA5GLW0A0, 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA, 0xffff}, + {BRCMS_SROM_PA5GLW1A0, 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 1, + 0xffff}, + {BRCMS_SROM_PA5GLW2A0, 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 2, + 0xffff}, + {BRCMS_SROM_PA5GLW3A0, 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 3, + 0xffff}, + {BRCMS_SROM_PA5GHW0A0, 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA, 0xffff}, + {BRCMS_SROM_PA5GHW1A0, 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 1, + 0xffff}, + {BRCMS_SROM_PA5GHW2A0, 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 2, + 0xffff}, + {BRCMS_SROM_PA5GHW3A0, 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 3, + 0xffff}, {BRCMS_SROM_MAXP2GA0, 0xffffff00, 0, SROM8_2G_ITT_MAXP, 0x00ff}, {BRCMS_SROM_ITT2GA0, 0xffffff00, 0, SROM8_2G_ITT_MAXP, 0xff00}, {BRCMS_SROM_ITT5GA0, 0xffffff00, 0, SROM8_5G_ITT_MAXP, 0xff00}, @@ -586,6 +868,24 @@ static const struct brcms_sromvar perpath_pci_sromvars[] = { * shared between devices. */ static u8 brcms_srom_crc8_table[CRC8_TABLE_SIZE]; +static u16 __iomem * +srom_window_address(struct si_pub *sih, u8 __iomem *curmap) +{ + if (sih->ccrev < 32) + return (u16 __iomem *)(curmap + PCI_BAR0_SPROM_OFFSET); + if (sih->cccaps & CC_CAP_SROM) + return (u16 __iomem *) + (curmap + PCI_16KB0_CCREGS_OFFSET + CC_SROM_OTP); + + return NULL; +} + +/* Parse SROM and create name=value pairs. 'srom' points to + * the SROM word array. 'off' specifies the offset of the + * first word 'srom' points to, which should be either 0 or + * SROM3_SWRG_OFF (full SROM or software region). + */ + static uint mask_shift(u16 mask) { uint i; @@ -606,16 +906,18 @@ static uint mask_width(u16 mask) return 0; } -static inline void le16_to_cpu_buf(u16 *buf, uint nwords) +static inline void ltoh16_buf(u16 *buf, unsigned int size) { - while (nwords--) - *(buf + nwords) = le16_to_cpu(*(__le16 *)(buf + nwords)); + size /= 2; + while (size--) + *(buf + size) = le16_to_cpu(*(__le16 *)(buf + size)); } -static inline void cpu_to_le16_buf(u16 *buf, uint nwords) +static inline void htol16_buf(u16 *buf, unsigned int size) { - while (nwords--) - *(__le16 *)(buf + nwords) = cpu_to_le16(*(buf + nwords)); + size /= 2; + while (size--) + *(__le16 *)(buf + size) = cpu_to_le16(*(buf + size)); } /* @@ -627,14 +929,11 @@ _initvars_srom_pci(u8 sromrev, u16 *srom, struct list_head *var_list) struct brcms_srom_list_head *entry; enum brcms_srom_id id; u16 w; - u32 val = 0; + u32 val; const struct brcms_sromvar *srv; uint width; uint flags; u32 sr = (1 << sromrev); - uint p; - uint pb = SROM8_PATH0; - const uint psz = SROM8_PATH1 - SROM8_PATH0; /* first store the srom revision */ entry = kzalloc(sizeof(struct brcms_srom_list_head), GFP_KERNEL); @@ -732,34 +1031,47 @@ _initvars_srom_pci(u8 sromrev, u16 *srom, struct list_head *var_list) list_add(&entry->var_list, var_list); } - for (p = 0; p < MAX_PATH_SROM; p++) { - for (srv = perpath_pci_sromvars; - srv->varid != BRCMS_SROM_NULL; srv++) { - if ((srv->revmask & sr) == 0) - continue; + if (sromrev >= 4) { + /* Do per-path variables */ + uint p, pb, psz; - if (srv->flags & SRFL_NOVAR) - continue; + if (sromrev >= 8) { + pb = SROM8_PATH0; + psz = SROM8_PATH1 - SROM8_PATH0; + } else { + pb = SROM4_PATH0; + psz = SROM4_PATH1 - SROM4_PATH0; + } - w = srom[pb + srv->off]; - val = (w & srv->mask) >> mask_shift(srv->mask); - width = mask_width(srv->mask); + for (p = 0; p < MAX_PATH_SROM; p++) { + for (srv = perpath_pci_sromvars; + srv->varid != BRCMS_SROM_NULL; srv++) { + if ((srv->revmask & sr) == 0) + continue; - /* Cheating: no per-path var is more than - * 1 word */ - if ((srv->flags & SRFL_NOFFS) - && ((int)val == (1 << width) - 1)) - continue; + if (srv->flags & SRFL_NOVAR) + continue; - entry = - kzalloc(sizeof(struct brcms_srom_list_head), - GFP_KERNEL); - entry->varid = srv->varid+p; - entry->var_type = BRCMS_SROM_UNUMBER; - entry->uval = val; - list_add(&entry->var_list, var_list); + w = srom[pb + srv->off]; + val = (w & srv->mask) >> mask_shift(srv->mask); + width = mask_width(srv->mask); + + /* Cheating: no per-path var is more than + * 1 word */ + if ((srv->flags & SRFL_NOFFS) + && ((int)val == (1 << width) - 1)) + continue; + + entry = + kzalloc(sizeof(struct brcms_srom_list_head), + GFP_KERNEL); + entry->varid = srv->varid+p; + entry->var_type = BRCMS_SROM_UNUMBER; + entry->uval = val; + list_add(&entry->var_list, var_list); + } + pb += psz; } - pb += psz; } } @@ -768,48 +1080,41 @@ _initvars_srom_pci(u8 sromrev, u16 *srom, struct list_head *var_list) * Return 0 on success, nonzero on error. */ static int -sprom_read_pci(struct si_pub *sih, u16 *buf, uint nwords, bool check_crc) +sprom_read_pci(struct si_pub *sih, u16 __iomem *sprom, uint wordoff, + u16 *buf, uint nwords, bool check_crc) { int err = 0; uint i; - u8 *bbuf = (u8 *)buf; /* byte buffer */ - uint nbytes = nwords << 1; - struct bcma_device *core; - uint sprom_offset; - - /* determine core to read */ - if (ai_get_ccrev(sih) < 32) { - core = ai_findcore(sih, BCMA_CORE_80211, 0); - sprom_offset = PCI_BAR0_SPROM_OFFSET; - } else { - core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0); - sprom_offset = CHIPCREGOFFS(sromotp); - } - /* read the sprom in bytes */ - for (i = 0; i < nbytes; i++) - bbuf[i] = bcma_read8(core, sprom_offset+i); + /* read the sprom */ + for (i = 0; i < nwords; i++) + buf[i] = R_REG(&sprom[wordoff + i]); - if (buf[0] == 0xffff) - /* - * The hardware thinks that an srom that starts with - * 0xffff is blank, regardless of the rest of the - * content, so declare it bad. - */ - return -ENODATA; + if (check_crc) { - if (check_crc && - crc8(brcms_srom_crc8_table, bbuf, nbytes, CRC8_INIT_VALUE) != - CRC8_GOOD_VALUE(brcms_srom_crc8_table)) - err = -EIO; - else - /* now correct the endianness of the byte array */ - le16_to_cpu_buf(buf, nwords); + if (buf[0] == 0xffff) + /* + * The hardware thinks that an srom that starts with + * 0xffff is blank, regardless of the rest of the + * content, so declare it bad. + */ + return -ENODATA; + + /* fixup the endianness so crc8 will pass */ + htol16_buf(buf, nwords * 2); + if (crc8(brcms_srom_crc8_table, (u8 *) buf, nwords * 2, + CRC8_INIT_VALUE) != + CRC8_GOOD_VALUE(brcms_srom_crc8_table)) + /* DBG only pci always read srom4 first, then srom8/9 */ + err = -EIO; + /* now correct the endianness of the byte array */ + ltoh16_buf(buf, nwords * 2); + } return err; } -static int otp_read_pci(struct si_pub *sih, u16 *buf, uint nwords) +static int otp_read_pci(struct si_pub *sih, u16 *buf, uint bufsz) { u8 *otp; uint sz = OTP_SZ_MAX / 2; /* size in words */ @@ -821,8 +1126,7 @@ static int otp_read_pci(struct si_pub *sih, u16 *buf, uint nwords) err = otp_read_region(sih, OTP_HW_RGN, (u16 *) otp, &sz); - sz = min_t(uint, sz, nwords); - memcpy(buf, otp, sz * 2); + memcpy(buf, otp, bufsz); kfree(otp); @@ -835,13 +1139,13 @@ static int otp_read_pci(struct si_pub *sih, u16 *buf, uint nwords) return -ENODATA; /* fixup the endianness so crc8 will pass */ - cpu_to_le16_buf(buf, sz); - if (crc8(brcms_srom_crc8_table, (u8 *) buf, sz * 2, + htol16_buf(buf, bufsz); + if (crc8(brcms_srom_crc8_table, (u8 *) buf, SROM4_WORDS * 2, CRC8_INIT_VALUE) != CRC8_GOOD_VALUE(brcms_srom_crc8_table)) err = -EIO; - else - /* now correct the endianness of the byte array */ - le16_to_cpu_buf(buf, sz); + + /* now correct the endianness of the byte array */ + ltoh16_buf(buf, bufsz); return err; } @@ -850,9 +1154,10 @@ static int otp_read_pci(struct si_pub *sih, u16 *buf, uint nwords) * Initialize nonvolatile variable table from sprom. * Return 0 on success, nonzero on error. */ -int srom_var_init(struct si_pub *sih) +static int initvars_srom_pci(struct si_pub *sih, void __iomem *curmap) { u16 *srom; + u16 __iomem *sromwindow; u8 sromrev = 0; u32 sr; int err = 0; @@ -864,17 +1169,33 @@ int srom_var_init(struct si_pub *sih) if (!srom) return -ENOMEM; + sromwindow = srom_window_address(sih, curmap); + crc8_populate_lsb(brcms_srom_crc8_table, SROM_CRC8_POLY); if (ai_is_sprom_available(sih)) { - err = sprom_read_pci(sih, srom, SROM4_WORDS, true); - - if (err == 0) - /* srom read and passed crc */ - /* top word of sprom contains version and crc8 */ + err = sprom_read_pci(sih, sromwindow, 0, srom, SROM_WORDS, + true); + + if ((srom[SROM4_SIGN] == SROM4_SIGNATURE) || + (((sih->buscoretype == PCIE_CORE_ID) + && (sih->buscorerev >= 6)) + || ((sih->buscoretype == PCI_CORE_ID) + && (sih->buscorerev >= 0xe)))) { + /* sromrev >= 4, read more */ + err = sprom_read_pci(sih, sromwindow, 0, srom, + SROM4_WORDS, true); sromrev = srom[SROM4_CRCREV] & 0xff; + } else if (err == 0) { + /* srom is good and is rev < 4 */ + /* top word of sprom contains version and crc8 */ + sromrev = srom[SROM_CRCREV] & 0xff; + /* bcm4401 sroms misprogrammed */ + if (sromrev == 0x10) + sromrev = 1; + } } else { /* Use OTP if SPROM not available */ - err = otp_read_pci(sih, srom, SROM4_WORDS); + err = otp_read_pci(sih, srom, SROM_MAX); if (err == 0) /* OTP only contain SROM rev8/rev9 for now */ sromrev = srom[SROM4_CRCREV] & 0xff; @@ -887,9 +1208,10 @@ int srom_var_init(struct si_pub *sih) sr = 1 << sromrev; /* - * srom version check: Current valid versions: 8, 9 + * srom version check: Current valid versions: 1, 2, 3, 4, 5, 8, + * 9 */ - if ((sr & 0x300) == 0) { + if ((sr & 0x33e) == 0) { err = -EINVAL; goto errout; } @@ -916,6 +1238,21 @@ void srom_free_vars(struct si_pub *sih) kfree(entry); } } +/* + * Initialize local vars from the right source for this platform. + * Return 0 on success, nonzero on error. + */ +int srom_var_init(struct si_pub *sih, void __iomem *curmap) +{ + uint len; + + len = 0; + + if (curmap != NULL) + return initvars_srom_pci(sih, curmap); + + return -EINVAL; +} /* * Search the name=value vars for a specific one and return its value. diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/srom.h b/trunk/drivers/net/wireless/brcm80211/brcmsmac/srom.h index f2a58f262c99..708c43ff51cc 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/srom.h +++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/srom.h @@ -20,10 +20,15 @@ #include "types.h" /* Prototypes */ -extern int srom_var_init(struct si_pub *sih); +extern int srom_var_init(struct si_pub *sih, void __iomem *curmap); extern void srom_free_vars(struct si_pub *sih); extern int srom_read(struct si_pub *sih, uint bus, void *curmap, uint byteoff, uint nbytes, u16 *buf, bool check_crc); +/* parse standard PCMCIA cis, normally used by SB/PCMCIA/SDIO/SPI/OTP + * and extract from it into name=value pairs + */ +extern int srom_parsecis(u8 **pcis, uint ciscnt, + char **vars, uint *count); #endif /* _BRCM_SROM_H_ */ diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/types.h b/trunk/drivers/net/wireless/brcm80211/brcmsmac/types.h index e11ae83111e4..27a814b07462 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/types.h +++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/types.h @@ -250,18 +250,66 @@ do { \ wiphy_err(dev, "%s: " fmt, __func__, ##args); \ } while (0) +/* + * Register access macros. + * + * These macro's take a pointer to the address to read as one of their + * arguments. The macro itself deduces the size of the IO transaction (u8, u16 + * or u32). Advantage of this approach in combination with using a struct to + * define the registers in a register block, is that access size and access + * location are defined in only one spot. This reduces the risk of the + * programmer trying to use an unsupported transaction size on a register. + * + */ + +#define R_REG(r) \ + ({ \ + __typeof(*(r)) __osl_v; \ + switch (sizeof(*(r))) { \ + case sizeof(u8): \ + __osl_v = readb((u8 __iomem *)(r)); \ + break; \ + case sizeof(u16): \ + __osl_v = readw((u16 __iomem *)(r)); \ + break; \ + case sizeof(u32): \ + __osl_v = readl((u32 __iomem *)(r)); \ + break; \ + } \ + __osl_v; \ + }) + +#define W_REG(r, v) do { \ + switch (sizeof(*(r))) { \ + case sizeof(u8): \ + writeb((u8)((v) & 0xFF), (u8 __iomem *)(r)); \ + break; \ + case sizeof(u16): \ + writew((u16)((v) & 0xFFFF), (u16 __iomem *)(r)); \ + break; \ + case sizeof(u32): \ + writel((u32)(v), (u32 __iomem *)(r)); \ + break; \ + } \ + } while (0) + #ifdef CONFIG_BCM47XX /* * bcm4716 (which includes 4717 & 4718), plus 4706 on PCIe can reorder * transactions. As a fix, a read after write is performed on certain places * in the code. Older chips and the newer 5357 family don't require this fix. */ -#define bcma_wflush16(c, o, v) \ - ({ bcma_write16(c, o, v); (void)bcma_read16(c, o); }) +#define W_REG_FLUSH(r, v) ({ W_REG((r), (v)); (void)R_REG(r); }) #else -#define bcma_wflush16(c, o, v) bcma_write16(c, o, v) +#define W_REG_FLUSH(r, v) W_REG((r), (v)) #endif /* CONFIG_BCM47XX */ +#define AND_REG(r, v) W_REG((r), R_REG(r) & (v)) +#define OR_REG(r, v) W_REG((r), R_REG(r) | (v)) + +#define SET_REG(r, mask, val) \ + W_REG((r), ((R_REG(r) & ~(mask)) | (val))) + /* multi-bool data type: set of bools, mbool is true if any is set */ /* set one bool */ diff --git a/trunk/drivers/net/wireless/brcm80211/brcmutil/utils.c b/trunk/drivers/net/wireless/brcm80211/brcmutil/utils.c index b7537f70a795..f27c48910827 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmutil/utils.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmutil/utils.c @@ -16,7 +16,6 @@ #include #include - #include MODULE_AUTHOR("Broadcom Corporation"); @@ -41,20 +40,74 @@ EXPORT_SYMBOL(brcmu_pkt_buf_get_skb); /* Free the driver packet. Free the tag if present */ void brcmu_pkt_buf_free_skb(struct sk_buff *skb) { - WARN_ON(skb->next); - if (skb->destructor) - /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if - * destructor exists - */ - dev_kfree_skb_any(skb); - else - /* can free immediately (even in_irq()) if destructor - * does not exist - */ - dev_kfree_skb(skb); + struct sk_buff *nskb; + int nest = 0; + + /* perversion: we use skb->next to chain multi-skb packets */ + while (skb) { + nskb = skb->next; + skb->next = NULL; + + if (skb->destructor) + /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if + * destructor exists + */ + dev_kfree_skb_any(skb); + else + /* can free immediately (even in_irq()) if destructor + * does not exist + */ + dev_kfree_skb(skb); + + nest++; + skb = nskb; + } } EXPORT_SYMBOL(brcmu_pkt_buf_free_skb); + +/* copy a buffer into a pkt buffer chain */ +uint brcmu_pktfrombuf(struct sk_buff *p, uint offset, int len, + unsigned char *buf) +{ + uint n, ret = 0; + + /* skip 'offset' bytes */ + for (; p && offset; p = p->next) { + if (offset < (uint) (p->len)) + break; + offset -= p->len; + } + + if (!p) + return 0; + + /* copy the data */ + for (; p && len; p = p->next) { + n = min((uint) (p->len) - offset, (uint) len); + memcpy(p->data + offset, buf, n); + buf += n; + len -= n; + ret += n; + offset = 0; + } + + return ret; +} +EXPORT_SYMBOL(brcmu_pktfrombuf); + +/* return total length of buffer chain */ +uint brcmu_pkttotlen(struct sk_buff *p) +{ + uint total; + + total = 0; + for (; p; p = p->next) + total += p->len; + return total; +} +EXPORT_SYMBOL(brcmu_pkttotlen); + /* * osl multiple-precedence packet queue * hi_prec is always >= the number of the highest non-empty precedence @@ -62,13 +115,21 @@ EXPORT_SYMBOL(brcmu_pkt_buf_free_skb); struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec, struct sk_buff *p) { - struct sk_buff_head *q; + struct pktq_prec *q; if (pktq_full(pq) || pktq_pfull(pq, prec)) return NULL; - q = &pq->q[prec].skblist; - skb_queue_tail(q, p); + q = &pq->q[prec]; + + if (q->head) + q->tail->prev = p; + else + q->head = p; + + q->tail = p; + q->len++; + pq->len++; if (pq->hi_prec < prec) @@ -81,13 +142,20 @@ EXPORT_SYMBOL(brcmu_pktq_penq); struct sk_buff *brcmu_pktq_penq_head(struct pktq *pq, int prec, struct sk_buff *p) { - struct sk_buff_head *q; + struct pktq_prec *q; if (pktq_full(pq) || pktq_pfull(pq, prec)) return NULL; - q = &pq->q[prec].skblist; - skb_queue_head(q, p); + q = &pq->q[prec]; + + if (q->head == NULL) + q->tail = p; + + p->prev = q->head; + q->head = p; + q->len++; + pq->len++; if (pq->hi_prec < prec) @@ -99,30 +167,53 @@ EXPORT_SYMBOL(brcmu_pktq_penq_head); struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec) { - struct sk_buff_head *q; + struct pktq_prec *q; struct sk_buff *p; - q = &pq->q[prec].skblist; - p = skb_dequeue(q); + q = &pq->q[prec]; + + p = q->head; if (p == NULL) return NULL; + q->head = p->prev; + if (q->head == NULL) + q->tail = NULL; + + q->len--; + pq->len--; + + p->prev = NULL; + return p; } EXPORT_SYMBOL(brcmu_pktq_pdeq); struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec) { - struct sk_buff_head *q; - struct sk_buff *p; + struct pktq_prec *q; + struct sk_buff *p, *prev; + + q = &pq->q[prec]; - q = &pq->q[prec].skblist; - p = skb_dequeue_tail(q); + p = q->head; if (p == NULL) return NULL; + for (prev = NULL; p != q->tail; p = p->prev) + prev = p; + + if (prev) + prev->prev = NULL; + else + q->head = NULL; + + q->tail = prev; + q->len--; + pq->len--; + return p; } EXPORT_SYMBOL(brcmu_pktq_pdeq_tail); @@ -131,17 +222,31 @@ void brcmu_pktq_pflush(struct pktq *pq, int prec, bool dir, bool (*fn)(struct sk_buff *, void *), void *arg) { - struct sk_buff_head *q; - struct sk_buff *p, *next; + struct pktq_prec *q; + struct sk_buff *p, *prev = NULL; - q = &pq->q[prec].skblist; - skb_queue_walk_safe(q, p, next) { + q = &pq->q[prec]; + p = q->head; + while (p) { if (fn == NULL || (*fn) (p, arg)) { - skb_unlink(p, q); + bool head = (p == q->head); + if (head) + q->head = p->prev; + else + prev->prev = p->prev; + p->prev = NULL; brcmu_pkt_buf_free_skb(p); + q->len--; pq->len--; + p = (head ? q->head : prev->prev); + } else { + prev = p; + p = p->prev; } } + + if (q->head == NULL) + q->tail = NULL; } EXPORT_SYMBOL(brcmu_pktq_pflush); @@ -166,10 +271,8 @@ void brcmu_pktq_init(struct pktq *pq, int num_prec, int max_len) pq->max = (u16) max_len; - for (prec = 0; prec < num_prec; prec++) { + for (prec = 0; prec < num_prec; prec++) pq->q[prec].max = pq->max; - skb_queue_head_init(&pq->q[prec].skblist); - } } EXPORT_SYMBOL(brcmu_pktq_init); @@ -181,13 +284,13 @@ struct sk_buff *brcmu_pktq_peek_tail(struct pktq *pq, int *prec_out) return NULL; for (prec = 0; prec < pq->hi_prec; prec++) - if (!skb_queue_empty(&pq->q[prec].skblist)) + if (pq->q[prec].head) break; if (prec_out) *prec_out = prec; - return skb_peek_tail(&pq->q[prec].skblist); + return pq->q[prec].tail; } EXPORT_SYMBOL(brcmu_pktq_peek_tail); @@ -200,7 +303,7 @@ int brcmu_pktq_mlen(struct pktq *pq, uint prec_bmp) for (prec = 0; prec <= pq->hi_prec; prec++) if (prec_bmp & (1 << prec)) - len += pq->q[prec].skblist.qlen; + len += pq->q[prec].len; return len; } @@ -210,32 +313,39 @@ EXPORT_SYMBOL(brcmu_pktq_mlen); struct sk_buff *brcmu_pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out) { - struct sk_buff_head *q; + struct pktq_prec *q; struct sk_buff *p; int prec; if (pq->len == 0) return NULL; - while ((prec = pq->hi_prec) > 0 && - skb_queue_empty(&pq->q[prec].skblist)) + while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL) pq->hi_prec--; - while ((prec_bmp & (1 << prec)) == 0 || - skb_queue_empty(&pq->q[prec].skblist)) + while ((prec_bmp & (1 << prec)) == 0 || pq->q[prec].head == NULL) if (prec-- == 0) return NULL; - q = &pq->q[prec].skblist; - p = skb_dequeue(q); + q = &pq->q[prec]; + + p = q->head; if (p == NULL) return NULL; - pq->len--; + q->head = p->prev; + if (q->head == NULL) + q->tail = NULL; + + q->len--; if (prec_out) *prec_out = prec; + pq->len--; + + p->prev = NULL; + return p; } EXPORT_SYMBOL(brcmu_pktq_mdeq); @@ -254,3 +364,23 @@ void brcmu_prpkt(const char *msg, struct sk_buff *p0) } EXPORT_SYMBOL(brcmu_prpkt); #endif /* defined(BCMDBG) */ + +#if defined(BCMDBG) +/* + * print bytes formatted as hex to a string. return the resulting + * string length + */ +int brcmu_format_hex(char *str, const void *bytes, int len) +{ + int i; + char *p = str; + const u8 *src = (const u8 *)bytes; + + for (i = 0; i < len; i++) { + p += snprintf(p, 3, "%02X", *src); + src++; + } + return (int)(p - str); +} +EXPORT_SYMBOL(brcmu_format_hex); +#endif /* defined(BCMDBG) */ diff --git a/trunk/drivers/net/wireless/brcm80211/include/brcmu_utils.h b/trunk/drivers/net/wireless/brcm80211/include/brcmu_utils.h index ad249a0b4730..7d0f46e0eb95 100644 --- a/trunk/drivers/net/wireless/brcm80211/include/brcmu_utils.h +++ b/trunk/drivers/net/wireless/brcm80211/include/brcmu_utils.h @@ -65,7 +65,9 @@ #define ETHER_ADDR_STR_LEN 18 struct pktq_prec { - struct sk_buff_head skblist; + struct sk_buff *head; /* first packet to dequeue */ + struct sk_buff *tail; /* last packet to dequeue */ + u16 len; /* number of queued packets */ u16 max; /* maximum number of queued packets */ }; @@ -86,32 +88,32 @@ struct pktq { static inline int pktq_plen(struct pktq *pq, int prec) { - return pq->q[prec].skblist.qlen; + return pq->q[prec].len; } static inline int pktq_pavail(struct pktq *pq, int prec) { - return pq->q[prec].max - pq->q[prec].skblist.qlen; + return pq->q[prec].max - pq->q[prec].len; } static inline bool pktq_pfull(struct pktq *pq, int prec) { - return pq->q[prec].skblist.qlen >= pq->q[prec].max; + return pq->q[prec].len >= pq->q[prec].max; } static inline bool pktq_pempty(struct pktq *pq, int prec) { - return skb_queue_empty(&pq->q[prec].skblist); + return pq->q[prec].len == 0; } static inline struct sk_buff *pktq_ppeek(struct pktq *pq, int prec) { - return skb_peek(&pq->q[prec].skblist); + return pq->q[prec].head; } static inline struct sk_buff *pktq_ppeek_tail(struct pktq *pq, int prec) { - return skb_peek_tail(&pq->q[prec].skblist); + return pq->q[prec].tail; } extern struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec, @@ -170,16 +172,24 @@ extern void brcmu_pktq_flush(struct pktq *pq, bool dir, bool (*fn)(struct sk_buff *, void *), void *arg); /* externs */ +/* packet */ +extern uint brcmu_pktfrombuf(struct sk_buff *p, + uint offset, int len, unsigned char *buf); +extern uint brcmu_pkttotlen(struct sk_buff *p); + /* ip address */ struct ipv4_addr; - -/* externs */ -/* format/print */ #ifdef BCMDBG extern void brcmu_prpkt(const char *msg, struct sk_buff *p0); #else #define brcmu_prpkt(a, b) #endif /* BCMDBG */ +/* externs */ +/* format/print */ +#if defined(BCMDBG) +extern int brcmu_format_hex(char *str, const void *bytes, int len); +#endif + #endif /* _BRCMU_UTILS_H_ */ diff --git a/trunk/drivers/net/wireless/brcm80211/include/chipcommon.h b/trunk/drivers/net/wireless/brcm80211/include/chipcommon.h index f96834a7c055..fefabc39e646 100644 --- a/trunk/drivers/net/wireless/brcm80211/include/chipcommon.h +++ b/trunk/drivers/net/wireless/brcm80211/include/chipcommon.h @@ -19,8 +19,6 @@ #include "defs.h" /* for PAD macro */ -#define CHIPCREGOFFS(field) offsetof(struct chipcregs, field) - struct chipcregs { u32 chipid; /* 0x0 */ u32 capabilities; diff --git a/trunk/drivers/net/wireless/brcm80211/include/defs.h b/trunk/drivers/net/wireless/brcm80211/include/defs.h index f0d8c04a9c8c..1e5f310af1e7 100644 --- a/trunk/drivers/net/wireless/brcm80211/include/defs.h +++ b/trunk/drivers/net/wireless/brcm80211/include/defs.h @@ -62,6 +62,7 @@ #define WL_RADIO_SW_DISABLE (1<<0) #define WL_RADIO_HW_DISABLE (1<<1) +#define WL_RADIO_MPC_DISABLE (1<<2) /* some countries don't support any channel */ #define WL_RADIO_COUNTRY_DISABLE (1<<3) diff --git a/trunk/drivers/net/wireless/brcm80211/include/soc.h b/trunk/drivers/net/wireless/brcm80211/include/soc.h index 4e9b7e4827ea..4fcb956ad9e0 100644 --- a/trunk/drivers/net/wireless/brcm80211/include/soc.h +++ b/trunk/drivers/net/wireless/brcm80211/include/soc.h @@ -77,9 +77,8 @@ #define DMEMS_CORE_ID 0x835 /* SDR/DDR1 memory controller core */ #define DEF_SHIM_COMP 0x837 /* SHIM component in ubus/6362 */ #define OOB_ROUTER_CORE_ID 0x367 /* OOB router core ID */ -#define DEF_AI_COMP 0xfff /* Default component, in ai chips it - * maps all unused address ranges - */ +/* Default component, in ai chips it maps all unused address ranges */ +#define DEF_AI_COMP 0xfff /* Common core control flags */ #define SICF_BIST_EN 0x8000 @@ -88,11 +87,4 @@ #define SICF_FGC 0x0002 #define SICF_CLOCK_EN 0x0001 -/* Common core status flags */ -#define SISF_BIST_DONE 0x8000 -#define SISF_BIST_ERROR 0x4000 -#define SISF_GATED_CLK 0x2000 -#define SISF_DMA64 0x1000 -#define SISF_CORE_BITS 0x0fff - #endif /* _BRCM_SOC_H */ diff --git a/trunk/drivers/net/wireless/hostap/hostap_cs.c b/trunk/drivers/net/wireless/hostap/hostap_cs.c index 89e9d3a78c3c..5441ad195119 100644 --- a/trunk/drivers/net/wireless/hostap/hostap_cs.c +++ b/trunk/drivers/net/wireless/hostap/hostap_cs.c @@ -655,9 +655,6 @@ static const struct pcmcia_device_id hostap_cs_ids[] = { PCMCIA_DEVICE_PROD_ID123( "Addtron", "AWP-100 Wireless PCMCIA", "Version 01.02", 0xe6ec52ce, 0x08649af2, 0x4b74baa0), - PCMCIA_DEVICE_PROD_ID123( - "Canon", "Wireless LAN CF Card K30225", "Version 01.00", - 0x96ef6fe2, 0x263fcbab, 0xa57adb8c), PCMCIA_DEVICE_PROD_ID123( "D", "Link DWL-650 11Mbps WLAN Card", "Version 01.02", 0x71b18589, 0xb6f1b0ab, 0x4b74baa0), diff --git a/trunk/drivers/net/wireless/hostap/hostap_ioctl.c b/trunk/drivers/net/wireless/hostap/hostap_ioctl.c index 18054d9c6688..045a93645a3d 100644 --- a/trunk/drivers/net/wireless/hostap/hostap_ioctl.c +++ b/trunk/drivers/net/wireless/hostap/hostap_ioctl.c @@ -3872,8 +3872,8 @@ static void prism2_get_drvinfo(struct net_device *dev, iface = netdev_priv(dev); local = iface->local; - strlcpy(info->driver, "hostap", sizeof(info->driver)); - snprintf(info->fw_version, sizeof(info->fw_version), + strncpy(info->driver, "hostap", sizeof(info->driver) - 1); + snprintf(info->fw_version, sizeof(info->fw_version) - 1, "%d.%d.%d", (local->sta_fw_ver >> 16) & 0xff, (local->sta_fw_ver >> 8) & 0xff, local->sta_fw_ver & 0xff); diff --git a/trunk/drivers/net/wireless/ipw2x00/ipw2100.c b/trunk/drivers/net/wireless/ipw2x00/ipw2100.c index a0e5c21d3657..127e9c63beaf 100644 --- a/trunk/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/trunk/drivers/net/wireless/ipw2x00/ipw2100.c @@ -5981,8 +5981,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev, struct ipw2100_priv *priv = libipw_priv(dev); char fw_ver[64], ucode_ver[64]; - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); ipw2100_get_fwversion(priv, fw_ver, sizeof(fw_ver)); ipw2100_get_ucodeversion(priv, ucode_ver, sizeof(ucode_ver)); @@ -5990,8 +5990,7 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev, snprintf(info->fw_version, sizeof(info->fw_version), "%s:%d:%s", fw_ver, priv->eeprom_version, ucode_ver); - strlcpy(info->bus_info, pci_name(priv->pci_dev), - sizeof(info->bus_info)); + strcpy(info->bus_info, pci_name(priv->pci_dev)); } static u32 ipw2100_ethtool_get_link(struct net_device *dev) diff --git a/trunk/drivers/net/wireless/ipw2x00/ipw2200.c b/trunk/drivers/net/wireless/ipw2x00/ipw2200.c index 018a8deb88a8..99a710dfe771 100644 --- a/trunk/drivers/net/wireless/ipw2x00/ipw2200.c +++ b/trunk/drivers/net/wireless/ipw2x00/ipw2200.c @@ -131,14 +131,6 @@ static struct ieee80211_rate ipw2200_rates[] = { #define ipw2200_bg_rates (ipw2200_rates + 0) #define ipw2200_num_bg_rates 12 -/* Ugly macro to convert literal channel numbers into their mhz equivalents - * There are certianly some conditions that will break this (like feeding it '30') - * but they shouldn't arise since nothing talks on channel 30. */ -#define ieee80211chan2mhz(x) \ - (((x) <= 14) ? \ - (((x) == 14) ? 2484 : ((x) * 5) + 2407) : \ - ((x) + 1000) * 5) - #ifdef CONFIG_IPW2200_QOS static int qos_enable = 0; static int qos_burst_enable = 0; @@ -10548,8 +10540,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev, char date[32]; u32 len; - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); len = sizeof(vers); ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len); @@ -10558,8 +10550,7 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev, snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)", vers, date); - strlcpy(info->bus_info, pci_name(p->pci_dev), - sizeof(info->bus_info)); + strcpy(info->bus_info, pci_name(p->pci_dev)); info->eedump_len = IPW_EEPROM_IMAGE_SIZE; } diff --git a/trunk/drivers/net/wireless/ipw2x00/libipw.h b/trunk/drivers/net/wireless/ipw2x00/libipw.h index 8874588fb929..70f5586d96bd 100644 --- a/trunk/drivers/net/wireless/ipw2x00/libipw.h +++ b/trunk/drivers/net/wireless/ipw2x00/libipw.h @@ -66,8 +66,16 @@ extern u32 libipw_debug_level; do { if (libipw_debug_level & (level)) \ printk(KERN_DEBUG "libipw: %c %s " fmt, \ in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0) +static inline bool libipw_ratelimit_debug(u32 level) +{ + return (libipw_debug_level & level) && net_ratelimit(); +} #else #define LIBIPW_DEBUG(level, fmt, args...) do {} while (0) +static inline bool libipw_ratelimit_debug(u32 level) +{ + return false; +} #endif /* CONFIG_LIBIPW_DEBUG */ /* @@ -805,6 +813,9 @@ struct libipw_device { /* WEP and other encryption related settings at the device level */ int open_wep; /* Set to 1 to allow unencrypted frames */ + int reset_on_keychange; /* Set to 1 if the HW needs to be reset on + * WEP key changes */ + /* If the host performs {en,de}cryption, then set to 1 */ int host_encrypt; int host_encrypt_msdu; @@ -857,6 +868,7 @@ struct libipw_device { struct libipw_security * sec); netdev_tx_t (*hard_start_xmit) (struct libipw_txb * txb, struct net_device * dev, int pri); + int (*reset_port) (struct net_device * dev); int (*is_queue_full) (struct net_device * dev, int pri); int (*handle_management) (struct net_device * dev, diff --git a/trunk/drivers/net/wireless/ipw2x00/libipw_wx.c b/trunk/drivers/net/wireless/ipw2x00/libipw_wx.c index 1571505b1a38..6623e5052254 100644 --- a/trunk/drivers/net/wireless/ipw2x00/libipw_wx.c +++ b/trunk/drivers/net/wireless/ipw2x00/libipw_wx.c @@ -474,6 +474,17 @@ int libipw_wx_set_encode(struct libipw_device *ieee, if (ieee->set_security) ieee->set_security(dev, &sec); + /* Do not reset port if card is in Managed mode since resetting will + * generate new IEEE 802.11 authentication which may end up in looping + * with IEEE 802.1X. If your hardware requires a reset after WEP + * configuration (for example... Prism2), implement the reset_port in + * the callbacks structures used to initialize the 802.11 stack. */ + if (ieee->reset_on_keychange && + ieee->iw_mode != IW_MODE_INFRA && + ieee->reset_port && ieee->reset_port(dev)) { + printk(KERN_DEBUG "%s: reset_port failed\n", dev->name); + return -EINVAL; + } return 0; } @@ -677,6 +688,20 @@ int libipw_wx_set_encodeext(struct libipw_device *ieee, if (ieee->set_security) ieee->set_security(ieee->dev, &sec); + /* + * Do not reset port if card is in Managed mode since resetting will + * generate new IEEE 802.11 authentication which may end up in looping + * with IEEE 802.1X. If your hardware requires a reset after WEP + * configuration (for example... Prism2), implement the reset_port in + * the callbacks structures used to initialize the 802.11 stack. + */ + if (ieee->reset_on_keychange && + ieee->iw_mode != IW_MODE_INFRA && + ieee->reset_port && ieee->reset_port(dev)) { + LIBIPW_DEBUG_WX("%s: reset_port failed\n", dev->name); + return -EINVAL; + } + return ret; } diff --git a/trunk/drivers/net/wireless/iwlegacy/3945-debug.c b/trunk/drivers/net/wireless/iwlegacy/3945-debug.c deleted file mode 100644 index 5e1a19fd354d..000000000000 --- a/trunk/drivers/net/wireless/iwlegacy/3945-debug.c +++ /dev/null @@ -1,505 +0,0 @@ -/****************************************************************************** - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - *****************************************************************************/ - -#include "common.h" -#include "3945.h" - -static int -il3945_stats_flag(struct il_priv *il, char *buf, int bufsz) -{ - int p = 0; - - p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", - le32_to_cpu(il->_3945.stats.flag)); - if (le32_to_cpu(il->_3945.stats.flag) & UCODE_STATS_CLEAR_MSK) - p += scnprintf(buf + p, bufsz - p, - "\tStatistics have been cleared\n"); - p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n", - (le32_to_cpu(il->_3945.stats.flag) & - UCODE_STATS_FREQUENCY_MSK) ? "2.4 GHz" : "5.2 GHz"); - p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n", - (le32_to_cpu(il->_3945.stats.flag) & - UCODE_STATS_NARROW_BAND_MSK) ? "enabled" : "disabled"); - return p; -} - -ssize_t -il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct il_priv *il = file->private_data; - int pos = 0; - char *buf; - int bufsz = - sizeof(struct iwl39_stats_rx_phy) * 40 + - sizeof(struct iwl39_stats_rx_non_phy) * 40 + 400; - ssize_t ret; - struct iwl39_stats_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm; - struct iwl39_stats_rx_phy *cck, *accum_cck, *delta_cck, *max_cck; - struct iwl39_stats_rx_non_phy *general, *accum_general; - struct iwl39_stats_rx_non_phy *delta_general, *max_general; - - if (!il_is_alive(il)) - return -EAGAIN; - - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) { - IL_ERR("Can not allocate Buffer\n"); - return -ENOMEM; - } - - /* - * The statistic information display here is based on - * the last stats notification from uCode - * might not reflect the current uCode activity - */ - ofdm = &il->_3945.stats.rx.ofdm; - cck = &il->_3945.stats.rx.cck; - general = &il->_3945.stats.rx.general; - accum_ofdm = &il->_3945.accum_stats.rx.ofdm; - accum_cck = &il->_3945.accum_stats.rx.cck; - accum_general = &il->_3945.accum_stats.rx.general; - delta_ofdm = &il->_3945.delta_stats.rx.ofdm; - delta_cck = &il->_3945.delta_stats.rx.cck; - delta_general = &il->_3945.delta_stats.rx.general; - max_ofdm = &il->_3945.max_delta.rx.ofdm; - max_cck = &il->_3945.max_delta.rx.cck; - max_general = &il->_3945.max_delta.rx.general; - - pos += il3945_stats_flag(il, buf, bufsz); - pos += - scnprintf(buf + pos, bufsz - pos, - "%-32s current" - "acumulative delta max\n", - "Statistics_Rx - OFDM:"); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "ina_cnt:", - le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt, - delta_ofdm->ina_cnt, max_ofdm->ina_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "fina_cnt:", - le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt, - delta_ofdm->fina_cnt, max_ofdm->fina_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "plcp_err:", - le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err, - delta_ofdm->plcp_err, max_ofdm->plcp_err); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "crc32_err:", - le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err, - delta_ofdm->crc32_err, max_ofdm->crc32_err); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "overrun_err:", - le32_to_cpu(ofdm->overrun_err), accum_ofdm->overrun_err, - delta_ofdm->overrun_err, max_ofdm->overrun_err); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "early_overrun_err:", - le32_to_cpu(ofdm->early_overrun_err), - accum_ofdm->early_overrun_err, - delta_ofdm->early_overrun_err, - max_ofdm->early_overrun_err); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "crc32_good:", - le32_to_cpu(ofdm->crc32_good), accum_ofdm->crc32_good, - delta_ofdm->crc32_good, max_ofdm->crc32_good); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:", - le32_to_cpu(ofdm->false_alarm_cnt), - accum_ofdm->false_alarm_cnt, delta_ofdm->false_alarm_cnt, - max_ofdm->false_alarm_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "fina_sync_err_cnt:", - le32_to_cpu(ofdm->fina_sync_err_cnt), - accum_ofdm->fina_sync_err_cnt, - delta_ofdm->fina_sync_err_cnt, - max_ofdm->fina_sync_err_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "sfd_timeout:", - le32_to_cpu(ofdm->sfd_timeout), accum_ofdm->sfd_timeout, - delta_ofdm->sfd_timeout, max_ofdm->sfd_timeout); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "fina_timeout:", - le32_to_cpu(ofdm->fina_timeout), accum_ofdm->fina_timeout, - delta_ofdm->fina_timeout, max_ofdm->fina_timeout); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "unresponded_rts:", - le32_to_cpu(ofdm->unresponded_rts), - accum_ofdm->unresponded_rts, delta_ofdm->unresponded_rts, - max_ofdm->unresponded_rts); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "rxe_frame_lmt_ovrun:", - le32_to_cpu(ofdm->rxe_frame_limit_overrun), - accum_ofdm->rxe_frame_limit_overrun, - delta_ofdm->rxe_frame_limit_overrun, - max_ofdm->rxe_frame_limit_overrun); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:", - le32_to_cpu(ofdm->sent_ack_cnt), accum_ofdm->sent_ack_cnt, - delta_ofdm->sent_ack_cnt, max_ofdm->sent_ack_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:", - le32_to_cpu(ofdm->sent_cts_cnt), accum_ofdm->sent_cts_cnt, - delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt); - - pos += - scnprintf(buf + pos, bufsz - pos, - "%-32s current" - "acumulative delta max\n", - "Statistics_Rx - CCK:"); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "ina_cnt:", - le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt, - delta_cck->ina_cnt, max_cck->ina_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "fina_cnt:", - le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt, - delta_cck->fina_cnt, max_cck->fina_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "plcp_err:", - le32_to_cpu(cck->plcp_err), accum_cck->plcp_err, - delta_cck->plcp_err, max_cck->plcp_err); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "crc32_err:", - le32_to_cpu(cck->crc32_err), accum_cck->crc32_err, - delta_cck->crc32_err, max_cck->crc32_err); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "overrun_err:", - le32_to_cpu(cck->overrun_err), accum_cck->overrun_err, - delta_cck->overrun_err, max_cck->overrun_err); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "early_overrun_err:", - le32_to_cpu(cck->early_overrun_err), - accum_cck->early_overrun_err, - delta_cck->early_overrun_err, max_cck->early_overrun_err); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "crc32_good:", - le32_to_cpu(cck->crc32_good), accum_cck->crc32_good, - delta_cck->crc32_good, max_cck->crc32_good); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:", - le32_to_cpu(cck->false_alarm_cnt), - accum_cck->false_alarm_cnt, delta_cck->false_alarm_cnt, - max_cck->false_alarm_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "fina_sync_err_cnt:", - le32_to_cpu(cck->fina_sync_err_cnt), - accum_cck->fina_sync_err_cnt, - delta_cck->fina_sync_err_cnt, max_cck->fina_sync_err_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "sfd_timeout:", - le32_to_cpu(cck->sfd_timeout), accum_cck->sfd_timeout, - delta_cck->sfd_timeout, max_cck->sfd_timeout); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "fina_timeout:", - le32_to_cpu(cck->fina_timeout), accum_cck->fina_timeout, - delta_cck->fina_timeout, max_cck->fina_timeout); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "unresponded_rts:", - le32_to_cpu(cck->unresponded_rts), - accum_cck->unresponded_rts, delta_cck->unresponded_rts, - max_cck->unresponded_rts); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "rxe_frame_lmt_ovrun:", - le32_to_cpu(cck->rxe_frame_limit_overrun), - accum_cck->rxe_frame_limit_overrun, - delta_cck->rxe_frame_limit_overrun, - max_cck->rxe_frame_limit_overrun); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:", - le32_to_cpu(cck->sent_ack_cnt), accum_cck->sent_ack_cnt, - delta_cck->sent_ack_cnt, max_cck->sent_ack_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:", - le32_to_cpu(cck->sent_cts_cnt), accum_cck->sent_cts_cnt, - delta_cck->sent_cts_cnt, max_cck->sent_cts_cnt); - - pos += - scnprintf(buf + pos, bufsz - pos, - "%-32s current" - "acumulative delta max\n", - "Statistics_Rx - GENERAL:"); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "bogus_cts:", - le32_to_cpu(general->bogus_cts), accum_general->bogus_cts, - delta_general->bogus_cts, max_general->bogus_cts); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "bogus_ack:", - le32_to_cpu(general->bogus_ack), accum_general->bogus_ack, - delta_general->bogus_ack, max_general->bogus_ack); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "non_bssid_frames:", - le32_to_cpu(general->non_bssid_frames), - accum_general->non_bssid_frames, - delta_general->non_bssid_frames, - max_general->non_bssid_frames); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "filtered_frames:", - le32_to_cpu(general->filtered_frames), - accum_general->filtered_frames, - delta_general->filtered_frames, - max_general->filtered_frames); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", - "non_channel_beacons:", - le32_to_cpu(general->non_channel_beacons), - accum_general->non_channel_beacons, - delta_general->non_channel_beacons, - max_general->non_channel_beacons); - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -ssize_t -il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct il_priv *il = file->private_data; - int pos = 0; - char *buf; - int bufsz = (sizeof(struct iwl39_stats_tx) * 48) + 250; - ssize_t ret; - struct iwl39_stats_tx *tx, *accum_tx, *delta_tx, *max_tx; - - if (!il_is_alive(il)) - return -EAGAIN; - - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) { - IL_ERR("Can not allocate Buffer\n"); - return -ENOMEM; - } - - /* - * The statistic information display here is based on - * the last stats notification from uCode - * might not reflect the current uCode activity - */ - tx = &il->_3945.stats.tx; - accum_tx = &il->_3945.accum_stats.tx; - delta_tx = &il->_3945.delta_stats.tx; - max_tx = &il->_3945.max_delta.tx; - pos += il3945_stats_flag(il, buf, bufsz); - pos += - scnprintf(buf + pos, bufsz - pos, - "%-32s current" - "acumulative delta max\n", - "Statistics_Tx:"); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "preamble:", - le32_to_cpu(tx->preamble_cnt), accum_tx->preamble_cnt, - delta_tx->preamble_cnt, max_tx->preamble_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "rx_detected_cnt:", - le32_to_cpu(tx->rx_detected_cnt), - accum_tx->rx_detected_cnt, delta_tx->rx_detected_cnt, - max_tx->rx_detected_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "bt_prio_defer_cnt:", - le32_to_cpu(tx->bt_prio_defer_cnt), - accum_tx->bt_prio_defer_cnt, delta_tx->bt_prio_defer_cnt, - max_tx->bt_prio_defer_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "bt_prio_kill_cnt:", - le32_to_cpu(tx->bt_prio_kill_cnt), - accum_tx->bt_prio_kill_cnt, delta_tx->bt_prio_kill_cnt, - max_tx->bt_prio_kill_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "few_bytes_cnt:", - le32_to_cpu(tx->few_bytes_cnt), accum_tx->few_bytes_cnt, - delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "cts_timeout:", - le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout, - delta_tx->cts_timeout, max_tx->cts_timeout); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "ack_timeout:", - le32_to_cpu(tx->ack_timeout), accum_tx->ack_timeout, - delta_tx->ack_timeout, max_tx->ack_timeout); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "expected_ack_cnt:", - le32_to_cpu(tx->expected_ack_cnt), - accum_tx->expected_ack_cnt, delta_tx->expected_ack_cnt, - max_tx->expected_ack_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "actual_ack_cnt:", - le32_to_cpu(tx->actual_ack_cnt), accum_tx->actual_ack_cnt, - delta_tx->actual_ack_cnt, max_tx->actual_ack_cnt); - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -ssize_t -il3945_ucode_general_stats_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct il_priv *il = file->private_data; - int pos = 0; - char *buf; - int bufsz = sizeof(struct iwl39_stats_general) * 10 + 300; - ssize_t ret; - struct iwl39_stats_general *general, *accum_general; - struct iwl39_stats_general *delta_general, *max_general; - struct stats_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg; - struct iwl39_stats_div *div, *accum_div, *delta_div, *max_div; - - if (!il_is_alive(il)) - return -EAGAIN; - - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) { - IL_ERR("Can not allocate Buffer\n"); - return -ENOMEM; - } - - /* - * The statistic information display here is based on - * the last stats notification from uCode - * might not reflect the current uCode activity - */ - general = &il->_3945.stats.general; - dbg = &il->_3945.stats.general.dbg; - div = &il->_3945.stats.general.div; - accum_general = &il->_3945.accum_stats.general; - delta_general = &il->_3945.delta_stats.general; - max_general = &il->_3945.max_delta.general; - accum_dbg = &il->_3945.accum_stats.general.dbg; - delta_dbg = &il->_3945.delta_stats.general.dbg; - max_dbg = &il->_3945.max_delta.general.dbg; - accum_div = &il->_3945.accum_stats.general.div; - delta_div = &il->_3945.delta_stats.general.div; - max_div = &il->_3945.max_delta.general.div; - pos += il3945_stats_flag(il, buf, bufsz); - pos += - scnprintf(buf + pos, bufsz - pos, - "%-32s current" - "acumulative delta max\n", - "Statistics_General:"); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "burst_check:", - le32_to_cpu(dbg->burst_check), accum_dbg->burst_check, - delta_dbg->burst_check, max_dbg->burst_check); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "burst_count:", - le32_to_cpu(dbg->burst_count), accum_dbg->burst_count, - delta_dbg->burst_count, max_dbg->burst_count); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "sleep_time:", - le32_to_cpu(general->sleep_time), - accum_general->sleep_time, delta_general->sleep_time, - max_general->sleep_time); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "slots_out:", - le32_to_cpu(general->slots_out), accum_general->slots_out, - delta_general->slots_out, max_general->slots_out); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "slots_idle:", - le32_to_cpu(general->slots_idle), - accum_general->slots_idle, delta_general->slots_idle, - max_general->slots_idle); - pos += - scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n", - le32_to_cpu(general->ttl_timestamp)); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "tx_on_a:", - le32_to_cpu(div->tx_on_a), accum_div->tx_on_a, - delta_div->tx_on_a, max_div->tx_on_a); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "tx_on_b:", - le32_to_cpu(div->tx_on_b), accum_div->tx_on_b, - delta_div->tx_on_b, max_div->tx_on_b); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "exec_time:", - le32_to_cpu(div->exec_time), accum_div->exec_time, - delta_div->exec_time, max_div->exec_time); - pos += - scnprintf(buf + pos, bufsz - pos, - " %-30s %10u %10u %10u %10u\n", "probe_time:", - le32_to_cpu(div->probe_time), accum_div->probe_time, - delta_div->probe_time, max_div->probe_time); - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} diff --git a/trunk/drivers/net/wireless/iwlegacy/3945-mac.c b/trunk/drivers/net/wireless/iwlegacy/3945-mac.c deleted file mode 100644 index 54b2d391e91a..000000000000 --- a/trunk/drivers/net/wireless/iwlegacy/3945-mac.c +++ /dev/null @@ -1,3976 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. - * - * Portions of this file are derived from the ipw3945 project, as well - * as portions of the ieee80211 subsystem header files. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include - -#define DRV_NAME "iwl3945" - -#include "commands.h" -#include "common.h" -#include "3945.h" -#include "iwl-spectrum.h" - -/* - * module name, copyright, version, etc. - */ - -#define DRV_DESCRIPTION \ -"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux" - -#ifdef CONFIG_IWLEGACY_DEBUG -#define VD "d" -#else -#define VD -#endif - -/* - * add "s" to indicate spectrum measurement included. - * we add it here to be consistent with previous releases in which - * this was configurable. - */ -#define DRV_VERSION IWLWIFI_VERSION VD "s" -#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation" -#define DRV_AUTHOR "" - -MODULE_DESCRIPTION(DRV_DESCRIPTION); -MODULE_VERSION(DRV_VERSION); -MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); -MODULE_LICENSE("GPL"); - - /* module parameters */ -struct il_mod_params il3945_mod_params = { - .sw_crypto = 1, - .restart_fw = 1, - .disable_hw_scan = 1, - /* the rest are 0 by default */ -}; - -/** - * il3945_get_antenna_flags - Get antenna flags for RXON command - * @il: eeprom and antenna fields are used to determine antenna flags - * - * il->eeprom39 is used to determine if antenna AUX/MAIN are reversed - * il3945_mod_params.antenna specifies the antenna diversity mode: - * - * IL_ANTENNA_DIVERSITY - NIC selects best antenna by itself - * IL_ANTENNA_MAIN - Force MAIN antenna - * IL_ANTENNA_AUX - Force AUX antenna - */ -__le32 -il3945_get_antenna_flags(const struct il_priv *il) -{ - struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom; - - switch (il3945_mod_params.antenna) { - case IL_ANTENNA_DIVERSITY: - return 0; - - case IL_ANTENNA_MAIN: - if (eeprom->antenna_switch_type) - return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK; - return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK; - - case IL_ANTENNA_AUX: - if (eeprom->antenna_switch_type) - return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK; - return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK; - } - - /* bad antenna selector value */ - IL_ERR("Bad antenna selector value (0x%x)\n", - il3945_mod_params.antenna); - - return 0; /* "diversity" is default if error */ -} - -static int -il3945_set_ccmp_dynamic_key_info(struct il_priv *il, - struct ieee80211_key_conf *keyconf, u8 sta_id) -{ - unsigned long flags; - __le16 key_flags = 0; - int ret; - - key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK); - key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); - - if (sta_id == il->ctx.bcast_sta_id) - key_flags |= STA_KEY_MULTICAST_MSK; - - keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; - keyconf->hw_key_idx = keyconf->keyidx; - key_flags &= ~STA_KEY_FLG_INVALID; - - spin_lock_irqsave(&il->sta_lock, flags); - il->stations[sta_id].keyinfo.cipher = keyconf->cipher; - il->stations[sta_id].keyinfo.keylen = keyconf->keylen; - memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen); - - memcpy(il->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen); - - if ((il->stations[sta_id].sta.key. - key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC) - il->stations[sta_id].sta.key.key_offset = - il_get_free_ucode_key_idx(il); - /* else, we are overriding an existing key => no need to allocated room - * in uCode. */ - - WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, - "no space for a new key"); - - il->stations[sta_id].sta.key.key_flags = key_flags; - il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; - il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; - - D_INFO("hwcrypto: modify ucode station key info\n"); - - ret = il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC); - - spin_unlock_irqrestore(&il->sta_lock, flags); - - return ret; -} - -static int -il3945_set_tkip_dynamic_key_info(struct il_priv *il, - struct ieee80211_key_conf *keyconf, u8 sta_id) -{ - return -EOPNOTSUPP; -} - -static int -il3945_set_wep_dynamic_key_info(struct il_priv *il, - struct ieee80211_key_conf *keyconf, u8 sta_id) -{ - return -EOPNOTSUPP; -} - -static int -il3945_clear_sta_key_info(struct il_priv *il, u8 sta_id) -{ - unsigned long flags; - struct il_addsta_cmd sta_cmd; - - spin_lock_irqsave(&il->sta_lock, flags); - memset(&il->stations[sta_id].keyinfo, 0, sizeof(struct il_hw_key)); - memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo)); - il->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC; - il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; - il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; - memcpy(&sta_cmd, &il->stations[sta_id].sta, - sizeof(struct il_addsta_cmd)); - spin_unlock_irqrestore(&il->sta_lock, flags); - - D_INFO("hwcrypto: clear ucode station key info\n"); - return il_send_add_sta(il, &sta_cmd, CMD_SYNC); -} - -static int -il3945_set_dynamic_key(struct il_priv *il, struct ieee80211_key_conf *keyconf, - u8 sta_id) -{ - int ret = 0; - - keyconf->hw_key_idx = HW_KEY_DYNAMIC; - - switch (keyconf->cipher) { - case WLAN_CIPHER_SUITE_CCMP: - ret = il3945_set_ccmp_dynamic_key_info(il, keyconf, sta_id); - break; - case WLAN_CIPHER_SUITE_TKIP: - ret = il3945_set_tkip_dynamic_key_info(il, keyconf, sta_id); - break; - case WLAN_CIPHER_SUITE_WEP40: - case WLAN_CIPHER_SUITE_WEP104: - ret = il3945_set_wep_dynamic_key_info(il, keyconf, sta_id); - break; - default: - IL_ERR("Unknown alg: %s alg=%x\n", __func__, keyconf->cipher); - ret = -EINVAL; - } - - D_WEP("Set dynamic key: alg=%x len=%d idx=%d sta=%d ret=%d\n", - keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret); - - return ret; -} - -static int -il3945_remove_static_key(struct il_priv *il) -{ - int ret = -EOPNOTSUPP; - - return ret; -} - -static int -il3945_set_static_key(struct il_priv *il, struct ieee80211_key_conf *key) -{ - if (key->cipher == WLAN_CIPHER_SUITE_WEP40 || - key->cipher == WLAN_CIPHER_SUITE_WEP104) - return -EOPNOTSUPP; - - IL_ERR("Static key invalid: cipher %x\n", key->cipher); - return -EINVAL; -} - -static void -il3945_clear_free_frames(struct il_priv *il) -{ - struct list_head *element; - - D_INFO("%d frames on pre-allocated heap on clear.\n", il->frames_count); - - while (!list_empty(&il->free_frames)) { - element = il->free_frames.next; - list_del(element); - kfree(list_entry(element, struct il3945_frame, list)); - il->frames_count--; - } - - if (il->frames_count) { - IL_WARN("%d frames still in use. Did we lose one?\n", - il->frames_count); - il->frames_count = 0; - } -} - -static struct il3945_frame * -il3945_get_free_frame(struct il_priv *il) -{ - struct il3945_frame *frame; - struct list_head *element; - if (list_empty(&il->free_frames)) { - frame = kzalloc(sizeof(*frame), GFP_KERNEL); - if (!frame) { - IL_ERR("Could not allocate frame!\n"); - return NULL; - } - - il->frames_count++; - return frame; - } - - element = il->free_frames.next; - list_del(element); - return list_entry(element, struct il3945_frame, list); -} - -static void -il3945_free_frame(struct il_priv *il, struct il3945_frame *frame) -{ - memset(frame, 0, sizeof(*frame)); - list_add(&frame->list, &il->free_frames); -} - -unsigned int -il3945_fill_beacon_frame(struct il_priv *il, struct ieee80211_hdr *hdr, - int left) -{ - - if (!il_is_associated(il) || !il->beacon_skb) - return 0; - - if (il->beacon_skb->len > left) - return 0; - - memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len); - - return il->beacon_skb->len; -} - -static int -il3945_send_beacon_cmd(struct il_priv *il) -{ - struct il3945_frame *frame; - unsigned int frame_size; - int rc; - u8 rate; - - frame = il3945_get_free_frame(il); - - if (!frame) { - IL_ERR("Could not obtain free frame buffer for beacon " - "command.\n"); - return -ENOMEM; - } - - rate = il_get_lowest_plcp(il, &il->ctx); - - frame_size = il3945_hw_get_beacon_cmd(il, frame, rate); - - rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size, &frame->u.cmd[0]); - - il3945_free_frame(il, frame); - - return rc; -} - -static void -il3945_unset_hw_params(struct il_priv *il) -{ - if (il->_3945.shared_virt) - dma_free_coherent(&il->pci_dev->dev, - sizeof(struct il3945_shared), - il->_3945.shared_virt, il->_3945.shared_phys); -} - -static void -il3945_build_tx_cmd_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info, - struct il_device_cmd *cmd, - struct sk_buff *skb_frag, int sta_id) -{ - struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload; - struct il_hw_key *keyinfo = &il->stations[sta_id].keyinfo; - - tx_cmd->sec_ctl = 0; - - switch (keyinfo->cipher) { - case WLAN_CIPHER_SUITE_CCMP: - tx_cmd->sec_ctl = TX_CMD_SEC_CCM; - memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen); - D_TX("tx_cmd with AES hwcrypto\n"); - break; - - case WLAN_CIPHER_SUITE_TKIP: - break; - - case WLAN_CIPHER_SUITE_WEP104: - tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; - /* fall through */ - case WLAN_CIPHER_SUITE_WEP40: - tx_cmd->sec_ctl |= - TX_CMD_SEC_WEP | (info->control.hw_key-> - hw_key_idx & TX_CMD_SEC_MSK) << - TX_CMD_SEC_SHIFT; - - memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen); - - D_TX("Configuring packet for WEP encryption " "with key %d\n", - info->control.hw_key->hw_key_idx); - break; - - default: - IL_ERR("Unknown encode cipher %x\n", keyinfo->cipher); - break; - } -} - -/* - * handle build C_TX command notification. - */ -static void -il3945_build_tx_cmd_basic(struct il_priv *il, struct il_device_cmd *cmd, - struct ieee80211_tx_info *info, - struct ieee80211_hdr *hdr, u8 std_id) -{ - struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload; - __le32 tx_flags = tx_cmd->tx_flags; - __le16 fc = hdr->frame_control; - - tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; - if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { - tx_flags |= TX_CMD_FLG_ACK_MSK; - if (ieee80211_is_mgmt(fc)) - tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; - if (ieee80211_is_probe_resp(fc) && - !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) - tx_flags |= TX_CMD_FLG_TSF_MSK; - } else { - tx_flags &= (~TX_CMD_FLG_ACK_MSK); - tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; - } - - tx_cmd->sta_id = std_id; - if (ieee80211_has_morefrags(fc)) - tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; - - if (ieee80211_is_data_qos(fc)) { - u8 *qc = ieee80211_get_qos_ctl(hdr); - tx_cmd->tid_tspec = qc[0] & 0xf; - tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; - } else { - tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; - } - - il_tx_cmd_protection(il, info, fc, &tx_flags); - - tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); - if (ieee80211_is_mgmt(fc)) { - if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) - tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); - else - tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); - } else { - tx_cmd->timeout.pm_frame_timeout = 0; - } - - tx_cmd->driver_txop = 0; - tx_cmd->tx_flags = tx_flags; - tx_cmd->next_frame_len = 0; -} - -/* - * start C_TX command process - */ -static int -il3945_tx_skb(struct il_priv *il, struct sk_buff *skb) -{ - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct il3945_tx_cmd *tx_cmd; - struct il_tx_queue *txq = NULL; - struct il_queue *q = NULL; - struct il_device_cmd *out_cmd; - struct il_cmd_meta *out_meta; - dma_addr_t phys_addr; - dma_addr_t txcmd_phys; - int txq_id = skb_get_queue_mapping(skb); - u16 len, idx, hdr_len; - u8 id; - u8 unicast; - u8 sta_id; - u8 tid = 0; - __le16 fc; - u8 wait_write_ptr = 0; - unsigned long flags; - - spin_lock_irqsave(&il->lock, flags); - if (il_is_rfkill(il)) { - D_DROP("Dropping - RF KILL\n"); - goto drop_unlock; - } - - if ((ieee80211_get_tx_rate(il->hw, info)->hw_value & 0xFF) == - IL_INVALID_RATE) { - IL_ERR("ERROR: No TX rate available.\n"); - goto drop_unlock; - } - - unicast = !is_multicast_ether_addr(hdr->addr1); - id = 0; - - fc = hdr->frame_control; - -#ifdef CONFIG_IWLEGACY_DEBUG - if (ieee80211_is_auth(fc)) - D_TX("Sending AUTH frame\n"); - else if (ieee80211_is_assoc_req(fc)) - D_TX("Sending ASSOC frame\n"); - else if (ieee80211_is_reassoc_req(fc)) - D_TX("Sending REASSOC frame\n"); -#endif - - spin_unlock_irqrestore(&il->lock, flags); - - hdr_len = ieee80211_hdrlen(fc); - - /* Find idx into station table for destination station */ - sta_id = il_sta_id_or_broadcast(il, &il->ctx, info->control.sta); - if (sta_id == IL_INVALID_STATION) { - D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1); - goto drop; - } - - D_RATE("station Id %d\n", sta_id); - - if (ieee80211_is_data_qos(fc)) { - u8 *qc = ieee80211_get_qos_ctl(hdr); - tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; - if (unlikely(tid >= MAX_TID_COUNT)) - goto drop; - } - - /* Descriptor for chosen Tx queue */ - txq = &il->txq[txq_id]; - q = &txq->q; - - if ((il_queue_space(q) < q->high_mark)) - goto drop; - - spin_lock_irqsave(&il->lock, flags); - - idx = il_get_cmd_idx(q, q->write_ptr, 0); - - /* Set up driver data for this TFD */ - memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct il_tx_info)); - txq->txb[q->write_ptr].skb = skb; - txq->txb[q->write_ptr].ctx = &il->ctx; - - /* Init first empty entry in queue's array of Tx/cmd buffers */ - out_cmd = txq->cmd[idx]; - out_meta = &txq->meta[idx]; - tx_cmd = (struct il3945_tx_cmd *)out_cmd->cmd.payload; - memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); - memset(tx_cmd, 0, sizeof(*tx_cmd)); - - /* - * Set up the Tx-command (not MAC!) header. - * Store the chosen Tx queue and TFD idx within the sequence field; - * after Tx, uCode's Tx response will return this value so driver can - * locate the frame within the tx queue and do post-tx processing. - */ - out_cmd->hdr.cmd = C_TX; - out_cmd->hdr.sequence = - cpu_to_le16((u16) - (QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr))); - - /* Copy MAC header from skb into command buffer */ - memcpy(tx_cmd->hdr, hdr, hdr_len); - - if (info->control.hw_key) - il3945_build_tx_cmd_hwcrypto(il, info, out_cmd, skb, sta_id); - - /* TODO need this for burst mode later on */ - il3945_build_tx_cmd_basic(il, out_cmd, info, hdr, sta_id); - - il3945_hw_build_tx_cmd_rate(il, out_cmd, info, hdr, sta_id); - - /* Total # bytes to be transmitted */ - len = (u16) skb->len; - tx_cmd->len = cpu_to_le16(len); - - il_dbg_log_tx_data_frame(il, len, hdr); - il_update_stats(il, true, fc, len); - tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK; - tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK; - - if (!ieee80211_has_morefrags(hdr->frame_control)) { - txq->need_update = 1; - } else { - wait_write_ptr = 1; - txq->need_update = 0; - } - - D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence)); - D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); - il_print_hex_dump(il, IL_DL_TX, tx_cmd, sizeof(*tx_cmd)); - il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr, - ieee80211_hdrlen(fc)); - - /* - * Use the first empty entry in this queue's command buffer array - * to contain the Tx command and MAC header concatenated together - * (payload data will be in another buffer). - * Size of this varies, due to varying MAC header length. - * If end is not dword aligned, we'll have 2 extra bytes at the end - * of the MAC header (device reads on dword boundaries). - * We'll tell device about this padding later. - */ - len = - sizeof(struct il3945_tx_cmd) + sizeof(struct il_cmd_header) + - hdr_len; - len = (len + 3) & ~3; - - /* Physical address of this Tx command's header (not MAC header!), - * within command buffer array. */ - txcmd_phys = - pci_map_single(il->pci_dev, &out_cmd->hdr, len, PCI_DMA_TODEVICE); - /* we do not map meta data ... so we can safely access address to - * provide to unmap command*/ - dma_unmap_addr_set(out_meta, mapping, txcmd_phys); - dma_unmap_len_set(out_meta, len, len); - - /* Add buffer containing Tx command and MAC(!) header to TFD's - * first entry */ - il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, txcmd_phys, len, 1, - 0); - - /* Set up TFD's 2nd entry to point directly to remainder of skb, - * if any (802.11 null frames have no payload). */ - len = skb->len - hdr_len; - if (len) { - phys_addr = - pci_map_single(il->pci_dev, skb->data + hdr_len, len, - PCI_DMA_TODEVICE); - il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, phys_addr, - len, 0, U32_PAD(len)); - } - - /* Tell device the write idx *just past* this latest filled TFD */ - q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd); - il_txq_update_write_ptr(il, txq); - spin_unlock_irqrestore(&il->lock, flags); - - if (il_queue_space(q) < q->high_mark && il->mac80211_registered) { - if (wait_write_ptr) { - spin_lock_irqsave(&il->lock, flags); - txq->need_update = 1; - il_txq_update_write_ptr(il, txq); - spin_unlock_irqrestore(&il->lock, flags); - } - - il_stop_queue(il, txq); - } - - return 0; - -drop_unlock: - spin_unlock_irqrestore(&il->lock, flags); -drop: - return -1; -} - -static int -il3945_get_measurement(struct il_priv *il, - struct ieee80211_measurement_params *params, u8 type) -{ - struct il_spectrum_cmd spectrum; - struct il_rx_pkt *pkt; - struct il_host_cmd cmd = { - .id = C_SPECTRUM_MEASUREMENT, - .data = (void *)&spectrum, - .flags = CMD_WANT_SKB, - }; - u32 add_time = le64_to_cpu(params->start_time); - int rc; - int spectrum_resp_status; - int duration = le16_to_cpu(params->duration); - struct il_rxon_context *ctx = &il->ctx; - - if (il_is_associated(il)) - add_time = - il_usecs_to_beacons(il, - le64_to_cpu(params->start_time) - - il->_3945.last_tsf, - le16_to_cpu(ctx->timing. - beacon_interval)); - - memset(&spectrum, 0, sizeof(spectrum)); - - spectrum.channel_count = cpu_to_le16(1); - spectrum.flags = - RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK; - spectrum.filter_flags = MEASUREMENT_FILTER_FLAG; - cmd.len = sizeof(spectrum); - spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len)); - - if (il_is_associated(il)) - spectrum.start_time = - il_add_beacon_time(il, il->_3945.last_beacon_time, add_time, - le16_to_cpu(ctx->timing. - beacon_interval)); - else - spectrum.start_time = 0; - - spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT); - spectrum.channels[0].channel = params->channel; - spectrum.channels[0].type = type; - if (ctx->active.flags & RXON_FLG_BAND_24G_MSK) - spectrum.flags |= - RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK | - RXON_FLG_TGG_PROTECT_MSK; - - rc = il_send_cmd_sync(il, &cmd); - if (rc) - return rc; - - pkt = (struct il_rx_pkt *)cmd.reply_page; - if (pkt->hdr.flags & IL_CMD_FAILED_MSK) { - IL_ERR("Bad return from N_RX_ON_ASSOC command\n"); - rc = -EIO; - } - - spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status); - switch (spectrum_resp_status) { - case 0: /* Command will be handled */ - if (pkt->u.spectrum.id != 0xff) { - D_INFO("Replaced existing measurement: %d\n", - pkt->u.spectrum.id); - il->measurement_status &= ~MEASUREMENT_READY; - } - il->measurement_status |= MEASUREMENT_ACTIVE; - rc = 0; - break; - - case 1: /* Command will not be handled */ - rc = -EAGAIN; - break; - } - - il_free_pages(il, cmd.reply_page); - - return rc; -} - -static void -il3945_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb) -{ - struct il_rx_pkt *pkt = rxb_addr(rxb); - struct il_alive_resp *palive; - struct delayed_work *pwork; - - palive = &pkt->u.alive_frame; - - D_INFO("Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n", - palive->is_valid, palive->ver_type, palive->ver_subtype); - - if (palive->ver_subtype == INITIALIZE_SUBTYPE) { - D_INFO("Initialization Alive received.\n"); - memcpy(&il->card_alive_init, &pkt->u.alive_frame, - sizeof(struct il_alive_resp)); - pwork = &il->init_alive_start; - } else { - D_INFO("Runtime Alive received.\n"); - memcpy(&il->card_alive, &pkt->u.alive_frame, - sizeof(struct il_alive_resp)); - pwork = &il->alive_start; - il3945_disable_events(il); - } - - /* We delay the ALIVE response by 5ms to - * give the HW RF Kill time to activate... */ - if (palive->is_valid == UCODE_VALID_OK) - queue_delayed_work(il->workqueue, pwork, msecs_to_jiffies(5)); - else - IL_WARN("uCode did not respond OK.\n"); -} - -static void -il3945_hdl_add_sta(struct il_priv *il, struct il_rx_buf *rxb) -{ -#ifdef CONFIG_IWLEGACY_DEBUG - struct il_rx_pkt *pkt = rxb_addr(rxb); -#endif - - D_RX("Received C_ADD_STA: 0x%02X\n", pkt->u.status); -} - -static void -il3945_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb) -{ - struct il_rx_pkt *pkt = rxb_addr(rxb); - struct il3945_beacon_notif *beacon = &(pkt->u.beacon_status); -#ifdef CONFIG_IWLEGACY_DEBUG - u8 rate = beacon->beacon_notify_hdr.rate; - - D_RX("beacon status %x retries %d iss %d " "tsf %d %d rate %d\n", - le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK, - beacon->beacon_notify_hdr.failure_frame, - le32_to_cpu(beacon->ibss_mgr_status), - le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate); -#endif - - il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status); - -} - -/* Handle notification from uCode that card's power state is changing - * due to software, hardware, or critical temperature RFKILL */ -static void -il3945_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb) -{ - struct il_rx_pkt *pkt = rxb_addr(rxb); - u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); - unsigned long status = il->status; - - IL_WARN("Card state received: HW:%s SW:%s\n", - (flags & HW_CARD_DISABLED) ? "Kill" : "On", - (flags & SW_CARD_DISABLED) ? "Kill" : "On"); - - _il_wr(il, CSR_UCODE_DRV_GP1_SET, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); - - if (flags & HW_CARD_DISABLED) - set_bit(S_RF_KILL_HW, &il->status); - else - clear_bit(S_RF_KILL_HW, &il->status); - - il_scan_cancel(il); - - if ((test_bit(S_RF_KILL_HW, &status) != - test_bit(S_RF_KILL_HW, &il->status))) - wiphy_rfkill_set_hw_state(il->hw->wiphy, - test_bit(S_RF_KILL_HW, &il->status)); - else - wake_up(&il->wait_command_queue); -} - -/** - * il3945_setup_handlers - Initialize Rx handler callbacks - * - * Setup the RX handlers for each of the reply types sent from the uCode - * to the host. - * - * This function chains into the hardware specific files for them to setup - * any hardware specific handlers as well. - */ -static void -il3945_setup_handlers(struct il_priv *il) -{ - il->handlers[N_ALIVE] = il3945_hdl_alive; - il->handlers[C_ADD_STA] = il3945_hdl_add_sta; - il->handlers[N_ERROR] = il_hdl_error; - il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa; - il->handlers[N_SPECTRUM_MEASUREMENT] = il_hdl_spectrum_measurement; - il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep; - il->handlers[N_PM_DEBUG_STATS] = il_hdl_pm_debug_stats; - il->handlers[N_BEACON] = il3945_hdl_beacon; - - /* - * The same handler is used for both the REPLY to a discrete - * stats request from the host as well as for the periodic - * stats notifications (after received beacons) from the uCode. - */ - il->handlers[C_STATS] = il3945_hdl_c_stats; - il->handlers[N_STATS] = il3945_hdl_stats; - - il_setup_rx_scan_handlers(il); - il->handlers[N_CARD_STATE] = il3945_hdl_card_state; - - /* Set up hardware specific Rx handlers */ - il3945_hw_handler_setup(il); -} - -/************************** RX-FUNCTIONS ****************************/ -/* - * Rx theory of operation - * - * The host allocates 32 DMA target addresses and passes the host address - * to the firmware at register IL_RFDS_TBL_LOWER + N * RFD_SIZE where N is - * 0 to 31 - * - * Rx Queue Indexes - * The host/firmware share two idx registers for managing the Rx buffers. - * - * The READ idx maps to the first position that the firmware may be writing - * to -- the driver can read up to (but not including) this position and get - * good data. - * The READ idx is managed by the firmware once the card is enabled. - * - * The WRITE idx maps to the last position the driver has read from -- the - * position preceding WRITE is the last slot the firmware can place a packet. - * - * The queue is empty (no good data) if WRITE = READ - 1, and is full if - * WRITE = READ. - * - * During initialization, the host sets up the READ queue position to the first - * IDX position, and WRITE to the last (READ - 1 wrapped) - * - * When the firmware places a packet in a buffer, it will advance the READ idx - * and fire the RX interrupt. The driver can then query the READ idx and - * process as many packets as possible, moving the WRITE idx forward as it - * resets the Rx queue buffers with new memory. - * - * The management in the driver is as follows: - * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When - * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled - * to replenish the iwl->rxq->rx_free. - * + In il3945_rx_replenish (scheduled) if 'processed' != 'read' then the - * iwl->rxq is replenished and the READ IDX is updated (updating the - * 'processed' and 'read' driver idxes as well) - * + A received packet is processed and handed to the kernel network stack, - * detached from the iwl->rxq. The driver 'processed' idx is updated. - * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free - * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ - * IDX is not incremented and iwl->status(RX_STALLED) is set. If there - * were enough free buffers and RX_STALLED is set it is cleared. - * - * - * Driver sequence: - * - * il3945_rx_replenish() Replenishes rx_free list from rx_used, and calls - * il3945_rx_queue_restock - * il3945_rx_queue_restock() Moves available buffers from rx_free into Rx - * queue, updates firmware pointers, and updates - * the WRITE idx. If insufficient rx_free buffers - * are available, schedules il3945_rx_replenish - * - * -- enable interrupts -- - * ISR - il3945_rx() Detach il_rx_bufs from pool up to the - * READ IDX, detaching the SKB from the pool. - * Moves the packet buffer from queue to rx_used. - * Calls il3945_rx_queue_restock to refill any empty - * slots. - * ... - * - */ - -/** - * il3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr - */ -static inline __le32 -il3945_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr) -{ - return cpu_to_le32((u32) dma_addr); -} - -/** - * il3945_rx_queue_restock - refill RX queue from pre-allocated pool - * - * If there are slots in the RX queue that need to be restocked, - * and we have free pre-allocated buffers, fill the ranks as much - * as we can, pulling from rx_free. - * - * This moves the 'write' idx forward to catch up with 'processed', and - * also updates the memory address in the firmware to reference the new - * target buffer. - */ -static void -il3945_rx_queue_restock(struct il_priv *il) -{ - struct il_rx_queue *rxq = &il->rxq; - struct list_head *element; - struct il_rx_buf *rxb; - unsigned long flags; - int write; - - spin_lock_irqsave(&rxq->lock, flags); - write = rxq->write & ~0x7; - while (il_rx_queue_space(rxq) > 0 && rxq->free_count) { - /* Get next free Rx buffer, remove from free list */ - element = rxq->rx_free.next; - rxb = list_entry(element, struct il_rx_buf, list); - list_del(element); - - /* Point to Rx buffer via next RBD in circular buffer */ - rxq->bd[rxq->write] = - il3945_dma_addr2rbd_ptr(il, rxb->page_dma); - rxq->queue[rxq->write] = rxb; - rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; - rxq->free_count--; - } - spin_unlock_irqrestore(&rxq->lock, flags); - /* If the pre-allocated buffer pool is dropping low, schedule to - * refill it */ - if (rxq->free_count <= RX_LOW_WATERMARK) - queue_work(il->workqueue, &il->rx_replenish); - - /* If we've added more space for the firmware to place data, tell it. - * Increment device's write pointer in multiples of 8. */ - if (rxq->write_actual != (rxq->write & ~0x7) || - abs(rxq->write - rxq->read) > 7) { - spin_lock_irqsave(&rxq->lock, flags); - rxq->need_update = 1; - spin_unlock_irqrestore(&rxq->lock, flags); - il_rx_queue_update_write_ptr(il, rxq); - } -} - -/** - * il3945_rx_replenish - Move all used packet from rx_used to rx_free - * - * When moving to rx_free an SKB is allocated for the slot. - * - * Also restock the Rx queue via il3945_rx_queue_restock. - * This is called as a scheduled work item (except for during initialization) - */ -static void -il3945_rx_allocate(struct il_priv *il, gfp_t priority) -{ - struct il_rx_queue *rxq = &il->rxq; - struct list_head *element; - struct il_rx_buf *rxb; - struct page *page; - unsigned long flags; - gfp_t gfp_mask = priority; - - while (1) { - spin_lock_irqsave(&rxq->lock, flags); - - if (list_empty(&rxq->rx_used)) { - spin_unlock_irqrestore(&rxq->lock, flags); - return; - } - spin_unlock_irqrestore(&rxq->lock, flags); - - if (rxq->free_count > RX_LOW_WATERMARK) - gfp_mask |= __GFP_NOWARN; - - if (il->hw_params.rx_page_order > 0) - gfp_mask |= __GFP_COMP; - - /* Alloc a new receive buffer */ - page = alloc_pages(gfp_mask, il->hw_params.rx_page_order); - if (!page) { - if (net_ratelimit()) - D_INFO("Failed to allocate SKB buffer.\n"); - if (rxq->free_count <= RX_LOW_WATERMARK && - net_ratelimit()) - IL_ERR("Failed to allocate SKB buffer with %0x." - "Only %u free buffers remaining.\n", - priority, rxq->free_count); - /* We don't reschedule replenish work here -- we will - * call the restock method and if it still needs - * more buffers it will schedule replenish */ - break; - } - - spin_lock_irqsave(&rxq->lock, flags); - if (list_empty(&rxq->rx_used)) { - spin_unlock_irqrestore(&rxq->lock, flags); - __free_pages(page, il->hw_params.rx_page_order); - return; - } - element = rxq->rx_used.next; - rxb = list_entry(element, struct il_rx_buf, list); - list_del(element); - spin_unlock_irqrestore(&rxq->lock, flags); - - rxb->page = page; - /* Get physical address of RB/SKB */ - rxb->page_dma = - pci_map_page(il->pci_dev, page, 0, - PAGE_SIZE << il->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); - - spin_lock_irqsave(&rxq->lock, flags); - - list_add_tail(&rxb->list, &rxq->rx_free); - rxq->free_count++; - il->alloc_rxb_page++; - - spin_unlock_irqrestore(&rxq->lock, flags); - } -} - -void -il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq) -{ - unsigned long flags; - int i; - spin_lock_irqsave(&rxq->lock, flags); - INIT_LIST_HEAD(&rxq->rx_free); - INIT_LIST_HEAD(&rxq->rx_used); - /* Fill the rx_used queue with _all_ of the Rx buffers */ - for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { - /* In the reset function, these buffers may have been allocated - * to an SKB, so we need to unmap and free potential storage */ - if (rxq->pool[i].page != NULL) { - pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma, - PAGE_SIZE << il->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); - __il_free_pages(il, rxq->pool[i].page); - rxq->pool[i].page = NULL; - } - list_add_tail(&rxq->pool[i].list, &rxq->rx_used); - } - - /* Set us so that we have processed and used all buffers, but have - * not restocked the Rx queue with fresh buffers */ - rxq->read = rxq->write = 0; - rxq->write_actual = 0; - rxq->free_count = 0; - spin_unlock_irqrestore(&rxq->lock, flags); -} - -void -il3945_rx_replenish(void *data) -{ - struct il_priv *il = data; - unsigned long flags; - - il3945_rx_allocate(il, GFP_KERNEL); - - spin_lock_irqsave(&il->lock, flags); - il3945_rx_queue_restock(il); - spin_unlock_irqrestore(&il->lock, flags); -} - -static void -il3945_rx_replenish_now(struct il_priv *il) -{ - il3945_rx_allocate(il, GFP_ATOMIC); - - il3945_rx_queue_restock(il); -} - -/* Assumes that the skb field of the buffers in 'pool' is kept accurate. - * If an SKB has been detached, the POOL needs to have its SKB set to NULL - * This free routine walks the list of POOL entries and if SKB is set to - * non NULL it is unmapped and freed - */ -static void -il3945_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq) -{ - int i; - for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { - if (rxq->pool[i].page != NULL) { - pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma, - PAGE_SIZE << il->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); - __il_free_pages(il, rxq->pool[i].page); - rxq->pool[i].page = NULL; - } - } - - dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, - rxq->bd_dma); - dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status), - rxq->rb_stts, rxq->rb_stts_dma); - rxq->bd = NULL; - rxq->rb_stts = NULL; -} - -/* Convert linear signal-to-noise ratio into dB */ -static u8 ratio2dB[100] = { -/* 0 1 2 3 4 5 6 7 8 9 */ - 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */ - 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */ - 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */ - 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */ - 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */ - 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */ - 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */ - 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */ - 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */ - 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */ -}; - -/* Calculates a relative dB value from a ratio of linear - * (i.e. not dB) signal levels. - * Conversion assumes that levels are voltages (20*log), not powers (10*log). */ -int -il3945_calc_db_from_ratio(int sig_ratio) -{ - /* 1000:1 or higher just report as 60 dB */ - if (sig_ratio >= 1000) - return 60; - - /* 100:1 or higher, divide by 10 and use table, - * add 20 dB to make up for divide by 10 */ - if (sig_ratio >= 100) - return 20 + (int)ratio2dB[sig_ratio / 10]; - - /* We shouldn't see this */ - if (sig_ratio < 1) - return 0; - - /* Use table for ratios 1:1 - 99:1 */ - return (int)ratio2dB[sig_ratio]; -} - -/** - * il3945_rx_handle - Main entry function for receiving responses from uCode - * - * Uses the il->handlers callback function array to invoke - * the appropriate handlers, including command responses, - * frame-received notifications, and other notifications. - */ -static void -il3945_rx_handle(struct il_priv *il) -{ - struct il_rx_buf *rxb; - struct il_rx_pkt *pkt; - struct il_rx_queue *rxq = &il->rxq; - u32 r, i; - int reclaim; - unsigned long flags; - u8 fill_rx = 0; - u32 count = 8; - int total_empty = 0; - - /* uCode's read idx (stored in shared DRAM) indicates the last Rx - * buffer that the driver may process (last buffer filled by ucode). */ - r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF; - i = rxq->read; - - /* calculate total frames need to be restock after handling RX */ - total_empty = r - rxq->write_actual; - if (total_empty < 0) - total_empty += RX_QUEUE_SIZE; - - if (total_empty > (RX_QUEUE_SIZE / 2)) - fill_rx = 1; - /* Rx interrupt, but nothing sent from uCode */ - if (i == r) - D_RX("r = %d, i = %d\n", r, i); - - while (i != r) { - int len; - - rxb = rxq->queue[i]; - - /* If an RXB doesn't have a Rx queue slot associated with it, - * then a bug has been introduced in the queue refilling - * routines -- catch it here */ - BUG_ON(rxb == NULL); - - rxq->queue[i] = NULL; - - pci_unmap_page(il->pci_dev, rxb->page_dma, - PAGE_SIZE << il->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); - pkt = rxb_addr(rxb); - - len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK; - len += sizeof(u32); /* account for status word */ - - /* Reclaim a command buffer only if this packet is a response - * to a (driver-originated) command. - * If the packet (e.g. Rx frame) originated from uCode, - * there is no command buffer to reclaim. - * Ucode should set SEQ_RX_FRAME bit if ucode-originated, - * but apparently a few don't get set; catch them here. */ - reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) && - pkt->hdr.cmd != N_STATS && pkt->hdr.cmd != C_TX; - - /* Based on type of command response or notification, - * handle those that need handling via function in - * handlers table. See il3945_setup_handlers() */ - if (il->handlers[pkt->hdr.cmd]) { - D_RX("r = %d, i = %d, %s, 0x%02x\n", r, i, - il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); - il->isr_stats.handlers[pkt->hdr.cmd]++; - il->handlers[pkt->hdr.cmd] (il, rxb); - } else { - /* No handling needed */ - D_RX("r %d i %d No handler needed for %s, 0x%02x\n", r, - i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); - } - - /* - * XXX: After here, we should always check rxb->page - * against NULL before touching it or its virtual - * memory (pkt). Because some handler might have - * already taken or freed the pages. - */ - - if (reclaim) { - /* Invoke any callbacks, transfer the buffer to caller, - * and fire off the (possibly) blocking il_send_cmd() - * as we reclaim the driver command queue */ - if (rxb->page) - il_tx_cmd_complete(il, rxb); - else - IL_WARN("Claim null rxb?\n"); - } - - /* Reuse the page if possible. For notification packets and - * SKBs that fail to Rx correctly, add them back into the - * rx_free list for reuse later. */ - spin_lock_irqsave(&rxq->lock, flags); - if (rxb->page != NULL) { - rxb->page_dma = - pci_map_page(il->pci_dev, rxb->page, 0, - PAGE_SIZE << il->hw_params. - rx_page_order, PCI_DMA_FROMDEVICE); - list_add_tail(&rxb->list, &rxq->rx_free); - rxq->free_count++; - } else - list_add_tail(&rxb->list, &rxq->rx_used); - - spin_unlock_irqrestore(&rxq->lock, flags); - - i = (i + 1) & RX_QUEUE_MASK; - /* If there are a lot of unused frames, - * restock the Rx queue so ucode won't assert. */ - if (fill_rx) { - count++; - if (count >= 8) { - rxq->read = i; - il3945_rx_replenish_now(il); - count = 0; - } - } - } - - /* Backtrack one entry */ - rxq->read = i; - if (fill_rx) - il3945_rx_replenish_now(il); - else - il3945_rx_queue_restock(il); -} - -/* call this function to flush any scheduled tasklet */ -static inline void -il3945_synchronize_irq(struct il_priv *il) -{ - /* wait to make sure we flush pending tasklet */ - synchronize_irq(il->pci_dev->irq); - tasklet_kill(&il->irq_tasklet); -} - -static const char * -il3945_desc_lookup(int i) -{ - switch (i) { - case 1: - return "FAIL"; - case 2: - return "BAD_PARAM"; - case 3: - return "BAD_CHECKSUM"; - case 4: - return "NMI_INTERRUPT"; - case 5: - return "SYSASSERT"; - case 6: - return "FATAL_ERROR"; - } - - return "UNKNOWN"; -} - -#define ERROR_START_OFFSET (1 * sizeof(u32)) -#define ERROR_ELEM_SIZE (7 * sizeof(u32)) - -void -il3945_dump_nic_error_log(struct il_priv *il) -{ - u32 i; - u32 desc, time, count, base, data1; - u32 blink1, blink2, ilink1, ilink2; - - base = le32_to_cpu(il->card_alive.error_event_table_ptr); - - if (!il3945_hw_valid_rtc_data_addr(base)) { - IL_ERR("Not valid error log pointer 0x%08X\n", base); - return; - } - - count = il_read_targ_mem(il, base); - - if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { - IL_ERR("Start IWL Error Log Dump:\n"); - IL_ERR("Status: 0x%08lX, count: %d\n", il->status, count); - } - - IL_ERR("Desc Time asrtPC blink2 " - "ilink1 nmiPC Line\n"); - for (i = ERROR_START_OFFSET; - i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET; - i += ERROR_ELEM_SIZE) { - desc = il_read_targ_mem(il, base + i); - time = il_read_targ_mem(il, base + i + 1 * sizeof(u32)); - blink1 = il_read_targ_mem(il, base + i + 2 * sizeof(u32)); - blink2 = il_read_targ_mem(il, base + i + 3 * sizeof(u32)); - ilink1 = il_read_targ_mem(il, base + i + 4 * sizeof(u32)); - ilink2 = il_read_targ_mem(il, base + i + 5 * sizeof(u32)); - data1 = il_read_targ_mem(il, base + i + 6 * sizeof(u32)); - - IL_ERR("%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n", - il3945_desc_lookup(desc), desc, time, blink1, blink2, - ilink1, ilink2, data1); - } -} - -static void -il3945_irq_tasklet(struct il_priv *il) -{ - u32 inta, handled = 0; - u32 inta_fh; - unsigned long flags; -#ifdef CONFIG_IWLEGACY_DEBUG - u32 inta_mask; -#endif - - spin_lock_irqsave(&il->lock, flags); - - /* Ack/clear/reset pending uCode interrupts. - * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, - * and will clear only when CSR_FH_INT_STATUS gets cleared. */ - inta = _il_rd(il, CSR_INT); - _il_wr(il, CSR_INT, inta); - - /* Ack/clear/reset pending flow-handler (DMA) interrupts. - * Any new interrupts that happen after this, either while we're - * in this tasklet, or later, will show up in next ISR/tasklet. */ - inta_fh = _il_rd(il, CSR_FH_INT_STATUS); - _il_wr(il, CSR_FH_INT_STATUS, inta_fh); - -#ifdef CONFIG_IWLEGACY_DEBUG - if (il_get_debug_level(il) & IL_DL_ISR) { - /* just for debug */ - inta_mask = _il_rd(il, CSR_INT_MASK); - D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta, - inta_mask, inta_fh); - } -#endif - - spin_unlock_irqrestore(&il->lock, flags); - - /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not - * atomic, make sure that inta covers all the interrupts that - * we've discovered, even if FH interrupt came in just after - * reading CSR_INT. */ - if (inta_fh & CSR39_FH_INT_RX_MASK) - inta |= CSR_INT_BIT_FH_RX; - if (inta_fh & CSR39_FH_INT_TX_MASK) - inta |= CSR_INT_BIT_FH_TX; - - /* Now service all interrupt bits discovered above. */ - if (inta & CSR_INT_BIT_HW_ERR) { - IL_ERR("Hardware error detected. Restarting.\n"); - - /* Tell the device to stop sending interrupts */ - il_disable_interrupts(il); - - il->isr_stats.hw++; - il_irq_handle_error(il); - - handled |= CSR_INT_BIT_HW_ERR; - - return; - } -#ifdef CONFIG_IWLEGACY_DEBUG - if (il_get_debug_level(il) & (IL_DL_ISR)) { - /* NIC fires this, but we don't use it, redundant with WAKEUP */ - if (inta & CSR_INT_BIT_SCD) { - D_ISR("Scheduler finished to transmit " - "the frame/frames.\n"); - il->isr_stats.sch++; - } - - /* Alive notification via Rx interrupt will do the real work */ - if (inta & CSR_INT_BIT_ALIVE) { - D_ISR("Alive interrupt\n"); - il->isr_stats.alive++; - } - } -#endif - /* Safely ignore these bits for debug checks below */ - inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); - - /* Error detected by uCode */ - if (inta & CSR_INT_BIT_SW_ERR) { - IL_ERR("Microcode SW error detected. " "Restarting 0x%X.\n", - inta); - il->isr_stats.sw++; - il_irq_handle_error(il); - handled |= CSR_INT_BIT_SW_ERR; - } - - /* uCode wakes up after power-down sleep */ - if (inta & CSR_INT_BIT_WAKEUP) { - D_ISR("Wakeup interrupt\n"); - il_rx_queue_update_write_ptr(il, &il->rxq); - il_txq_update_write_ptr(il, &il->txq[0]); - il_txq_update_write_ptr(il, &il->txq[1]); - il_txq_update_write_ptr(il, &il->txq[2]); - il_txq_update_write_ptr(il, &il->txq[3]); - il_txq_update_write_ptr(il, &il->txq[4]); - il_txq_update_write_ptr(il, &il->txq[5]); - - il->isr_stats.wakeup++; - handled |= CSR_INT_BIT_WAKEUP; - } - - /* All uCode command responses, including Tx command responses, - * Rx "responses" (frame-received notification), and other - * notifications from uCode come through here*/ - if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { - il3945_rx_handle(il); - il->isr_stats.rx++; - handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); - } - - if (inta & CSR_INT_BIT_FH_TX) { - D_ISR("Tx interrupt\n"); - il->isr_stats.tx++; - - _il_wr(il, CSR_FH_INT_STATUS, (1 << 6)); - il_wr(il, FH39_TCSR_CREDIT(FH39_SRVC_CHNL), 0x0); - handled |= CSR_INT_BIT_FH_TX; - } - - if (inta & ~handled) { - IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled); - il->isr_stats.unhandled++; - } - - if (inta & ~il->inta_mask) { - IL_WARN("Disabled INTA bits 0x%08x were pending\n", - inta & ~il->inta_mask); - IL_WARN(" with inta_fh = 0x%08x\n", inta_fh); - } - - /* Re-enable all interrupts */ - /* only Re-enable if disabled by irq */ - if (test_bit(S_INT_ENABLED, &il->status)) - il_enable_interrupts(il); - -#ifdef CONFIG_IWLEGACY_DEBUG - if (il_get_debug_level(il) & (IL_DL_ISR)) { - inta = _il_rd(il, CSR_INT); - inta_mask = _il_rd(il, CSR_INT_MASK); - inta_fh = _il_rd(il, CSR_FH_INT_STATUS); - D_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, " - "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); - } -#endif -} - -static int -il3945_get_channels_for_scan(struct il_priv *il, enum ieee80211_band band, - u8 is_active, u8 n_probes, - struct il3945_scan_channel *scan_ch, - struct ieee80211_vif *vif) -{ - struct ieee80211_channel *chan; - const struct ieee80211_supported_band *sband; - const struct il_channel_info *ch_info; - u16 passive_dwell = 0; - u16 active_dwell = 0; - int added, i; - - sband = il_get_hw_mode(il, band); - if (!sband) - return 0; - - active_dwell = il_get_active_dwell_time(il, band, n_probes); - passive_dwell = il_get_passive_dwell_time(il, band, vif); - - if (passive_dwell <= active_dwell) - passive_dwell = active_dwell + 1; - - for (i = 0, added = 0; i < il->scan_request->n_channels; i++) { - chan = il->scan_request->channels[i]; - - if (chan->band != band) - continue; - - scan_ch->channel = chan->hw_value; - - ch_info = il_get_channel_info(il, band, scan_ch->channel); - if (!il_is_channel_valid(ch_info)) { - D_SCAN("Channel %d is INVALID for this band.\n", - scan_ch->channel); - continue; - } - - scan_ch->active_dwell = cpu_to_le16(active_dwell); - scan_ch->passive_dwell = cpu_to_le16(passive_dwell); - /* If passive , set up for auto-switch - * and use long active_dwell time. - */ - if (!is_active || il_is_channel_passive(ch_info) || - (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) { - scan_ch->type = 0; /* passive */ - if (IL_UCODE_API(il->ucode_ver) == 1) - scan_ch->active_dwell = - cpu_to_le16(passive_dwell - 1); - } else { - scan_ch->type = 1; /* active */ - } - - /* Set direct probe bits. These may be used both for active - * scan channels (probes gets sent right away), - * or for passive channels (probes get se sent only after - * hearing clear Rx packet).*/ - if (IL_UCODE_API(il->ucode_ver) >= 2) { - if (n_probes) - scan_ch->type |= IL39_SCAN_PROBE_MASK(n_probes); - } else { - /* uCode v1 does not allow setting direct probe bits on - * passive channel. */ - if ((scan_ch->type & 1) && n_probes) - scan_ch->type |= IL39_SCAN_PROBE_MASK(n_probes); - } - - /* Set txpower levels to defaults */ - scan_ch->tpc.dsp_atten = 110; - /* scan_pwr_info->tpc.dsp_atten; */ - - /*scan_pwr_info->tpc.tx_gain; */ - if (band == IEEE80211_BAND_5GHZ) - scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3; - else { - scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3)); - /* NOTE: if we were doing 6Mb OFDM for scans we'd use - * power level: - * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3; - */ - } - - D_SCAN("Scanning %d [%s %d]\n", scan_ch->channel, - (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE", - (scan_ch->type & 1) ? active_dwell : passive_dwell); - - scan_ch++; - added++; - } - - D_SCAN("total channels to scan %d\n", added); - return added; -} - -static void -il3945_init_hw_rates(struct il_priv *il, struct ieee80211_rate *rates) -{ - int i; - - for (i = 0; i < RATE_COUNT_LEGACY; i++) { - rates[i].bitrate = il3945_rates[i].ieee * 5; - rates[i].hw_value = i; /* Rate scaling will work on idxes */ - rates[i].hw_value_short = i; - rates[i].flags = 0; - if (i > IL39_LAST_OFDM_RATE || i < IL_FIRST_OFDM_RATE) { - /* - * If CCK != 1M then set short preamble rate flag. - */ - rates[i].flags |= - (il3945_rates[i].plcp == - 10) ? 0 : IEEE80211_RATE_SHORT_PREAMBLE; - } - } -} - -/****************************************************************************** - * - * uCode download functions - * - ******************************************************************************/ - -static void -il3945_dealloc_ucode_pci(struct il_priv *il) -{ - il_free_fw_desc(il->pci_dev, &il->ucode_code); - il_free_fw_desc(il->pci_dev, &il->ucode_data); - il_free_fw_desc(il->pci_dev, &il->ucode_data_backup); - il_free_fw_desc(il->pci_dev, &il->ucode_init); - il_free_fw_desc(il->pci_dev, &il->ucode_init_data); - il_free_fw_desc(il->pci_dev, &il->ucode_boot); -} - -/** - * il3945_verify_inst_full - verify runtime uCode image in card vs. host, - * looking at all data. - */ -static int -il3945_verify_inst_full(struct il_priv *il, __le32 * image, u32 len) -{ - u32 val; - u32 save_len = len; - int rc = 0; - u32 errcnt; - - D_INFO("ucode inst image size is %u\n", len); - - il_wr(il, HBUS_TARG_MEM_RADDR, IL39_RTC_INST_LOWER_BOUND); - - errcnt = 0; - for (; len > 0; len -= sizeof(u32), image++) { - /* read data comes through single port, auto-incr addr */ - /* NOTE: Use the debugless read so we don't flood kernel log - * if IL_DL_IO is set */ - val = _il_rd(il, HBUS_TARG_MEM_RDAT); - if (val != le32_to_cpu(*image)) { - IL_ERR("uCode INST section is invalid at " - "offset 0x%x, is 0x%x, s/b 0x%x\n", - save_len - len, val, le32_to_cpu(*image)); - rc = -EIO; - errcnt++; - if (errcnt >= 20) - break; - } - } - - if (!errcnt) - D_INFO("ucode image in INSTRUCTION memory is good\n"); - - return rc; -} - -/** - * il3945_verify_inst_sparse - verify runtime uCode image in card vs. host, - * using sample data 100 bytes apart. If these sample points are good, - * it's a pretty good bet that everything between them is good, too. - */ -static int -il3945_verify_inst_sparse(struct il_priv *il, __le32 * image, u32 len) -{ - u32 val; - int rc = 0; - u32 errcnt = 0; - u32 i; - - D_INFO("ucode inst image size is %u\n", len); - - for (i = 0; i < len; i += 100, image += 100 / sizeof(u32)) { - /* read data comes through single port, auto-incr addr */ - /* NOTE: Use the debugless read so we don't flood kernel log - * if IL_DL_IO is set */ - il_wr(il, HBUS_TARG_MEM_RADDR, i + IL39_RTC_INST_LOWER_BOUND); - val = _il_rd(il, HBUS_TARG_MEM_RDAT); - if (val != le32_to_cpu(*image)) { -#if 0 /* Enable this if you want to see details */ - IL_ERR("uCode INST section is invalid at " - "offset 0x%x, is 0x%x, s/b 0x%x\n", i, val, - *image); -#endif - rc = -EIO; - errcnt++; - if (errcnt >= 3) - break; - } - } - - return rc; -} - -/** - * il3945_verify_ucode - determine which instruction image is in SRAM, - * and verify its contents - */ -static int -il3945_verify_ucode(struct il_priv *il) -{ - __le32 *image; - u32 len; - int rc = 0; - - /* Try bootstrap */ - image = (__le32 *) il->ucode_boot.v_addr; - len = il->ucode_boot.len; - rc = il3945_verify_inst_sparse(il, image, len); - if (rc == 0) { - D_INFO("Bootstrap uCode is good in inst SRAM\n"); - return 0; - } - - /* Try initialize */ - image = (__le32 *) il->ucode_init.v_addr; - len = il->ucode_init.len; - rc = il3945_verify_inst_sparse(il, image, len); - if (rc == 0) { - D_INFO("Initialize uCode is good in inst SRAM\n"); - return 0; - } - - /* Try runtime/protocol */ - image = (__le32 *) il->ucode_code.v_addr; - len = il->ucode_code.len; - rc = il3945_verify_inst_sparse(il, image, len); - if (rc == 0) { - D_INFO("Runtime uCode is good in inst SRAM\n"); - return 0; - } - - IL_ERR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n"); - - /* Since nothing seems to match, show first several data entries in - * instruction SRAM, so maybe visual inspection will give a clue. - * Selection of bootstrap image (vs. other images) is arbitrary. */ - image = (__le32 *) il->ucode_boot.v_addr; - len = il->ucode_boot.len; - rc = il3945_verify_inst_full(il, image, len); - - return rc; -} - -static void -il3945_nic_start(struct il_priv *il) -{ - /* Remove all resets to allow NIC to operate */ - _il_wr(il, CSR_RESET, 0); -} - -#define IL3945_UCODE_GET(item) \ -static u32 il3945_ucode_get_##item(const struct il_ucode_header *ucode)\ -{ \ - return le32_to_cpu(ucode->v1.item); \ -} - -static u32 -il3945_ucode_get_header_size(u32 api_ver) -{ - return 24; -} - -static u8 * -il3945_ucode_get_data(const struct il_ucode_header *ucode) -{ - return (u8 *) ucode->v1.data; -} - -IL3945_UCODE_GET(inst_size); -IL3945_UCODE_GET(data_size); -IL3945_UCODE_GET(init_size); -IL3945_UCODE_GET(init_data_size); -IL3945_UCODE_GET(boot_size); - -/** - * il3945_read_ucode - Read uCode images from disk file. - * - * Copy into buffers for card to fetch via bus-mastering - */ -static int -il3945_read_ucode(struct il_priv *il) -{ - const struct il_ucode_header *ucode; - int ret = -EINVAL, idx; - const struct firmware *ucode_raw; - /* firmware file name contains uCode/driver compatibility version */ - const char *name_pre = il->cfg->fw_name_pre; - const unsigned int api_max = il->cfg->ucode_api_max; - const unsigned int api_min = il->cfg->ucode_api_min; - char buf[25]; - u8 *src; - size_t len; - u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size; - - /* Ask kernel firmware_class module to get the boot firmware off disk. - * request_firmware() is synchronous, file is in memory on return. */ - for (idx = api_max; idx >= api_min; idx--) { - sprintf(buf, "%s%u%s", name_pre, idx, ".ucode"); - ret = request_firmware(&ucode_raw, buf, &il->pci_dev->dev); - if (ret < 0) { - IL_ERR("%s firmware file req failed: %d\n", buf, ret); - if (ret == -ENOENT) - continue; - else - goto error; - } else { - if (idx < api_max) - IL_ERR("Loaded firmware %s, " - "which is deprecated. " - " Please use API v%u instead.\n", buf, - api_max); - D_INFO("Got firmware '%s' file " - "(%zd bytes) from disk\n", buf, ucode_raw->size); - break; - } - } - - if (ret < 0) - goto error; - - /* Make sure that we got at least our header! */ - if (ucode_raw->size < il3945_ucode_get_header_size(1)) { - IL_ERR("File size way too small!\n"); - ret = -EINVAL; - goto err_release; - } - - /* Data from ucode file: header followed by uCode images */ - ucode = (struct il_ucode_header *)ucode_raw->data; - - il->ucode_ver = le32_to_cpu(ucode->ver); - api_ver = IL_UCODE_API(il->ucode_ver); - inst_size = il3945_ucode_get_inst_size(ucode); - data_size = il3945_ucode_get_data_size(ucode); - init_size = il3945_ucode_get_init_size(ucode); - init_data_size = il3945_ucode_get_init_data_size(ucode); - boot_size = il3945_ucode_get_boot_size(ucode); - src = il3945_ucode_get_data(ucode); - - /* api_ver should match the api version forming part of the - * firmware filename ... but we don't check for that and only rely - * on the API version read from firmware header from here on forward */ - - if (api_ver < api_min || api_ver > api_max) { - IL_ERR("Driver unable to support your firmware API. " - "Driver supports v%u, firmware is v%u.\n", api_max, - api_ver); - il->ucode_ver = 0; - ret = -EINVAL; - goto err_release; - } - if (api_ver != api_max) - IL_ERR("Firmware has old API version. Expected %u, " - "got %u. New firmware can be obtained " - "from http://www.intellinuxwireless.org.\n", api_max, - api_ver); - - IL_INFO("loaded firmware version %u.%u.%u.%u\n", - IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver), - IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver)); - - snprintf(il->hw->wiphy->fw_version, sizeof(il->hw->wiphy->fw_version), - "%u.%u.%u.%u", IL_UCODE_MAJOR(il->ucode_ver), - IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver), - IL_UCODE_SERIAL(il->ucode_ver)); - - D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver); - D_INFO("f/w package hdr runtime inst size = %u\n", inst_size); - D_INFO("f/w package hdr runtime data size = %u\n", data_size); - D_INFO("f/w package hdr init inst size = %u\n", init_size); - D_INFO("f/w package hdr init data size = %u\n", init_data_size); - D_INFO("f/w package hdr boot inst size = %u\n", boot_size); - - /* Verify size of file vs. image size info in file's header */ - if (ucode_raw->size != - il3945_ucode_get_header_size(api_ver) + inst_size + data_size + - init_size + init_data_size + boot_size) { - - D_INFO("uCode file size %zd does not match expected size\n", - ucode_raw->size); - ret = -EINVAL; - goto err_release; - } - - /* Verify that uCode images will fit in card's SRAM */ - if (inst_size > IL39_MAX_INST_SIZE) { - D_INFO("uCode instr len %d too large to fit in\n", inst_size); - ret = -EINVAL; - goto err_release; - } - - if (data_size > IL39_MAX_DATA_SIZE) { - D_INFO("uCode data len %d too large to fit in\n", data_size); - ret = -EINVAL; - goto err_release; - } - if (init_size > IL39_MAX_INST_SIZE) { - D_INFO("uCode init instr len %d too large to fit in\n", - init_size); - ret = -EINVAL; - goto err_release; - } - if (init_data_size > IL39_MAX_DATA_SIZE) { - D_INFO("uCode init data len %d too large to fit in\n", - init_data_size); - ret = -EINVAL; - goto err_release; - } - if (boot_size > IL39_MAX_BSM_SIZE) { - D_INFO("uCode boot instr len %d too large to fit in\n", - boot_size); - ret = -EINVAL; - goto err_release; - } - - /* Allocate ucode buffers for card's bus-master loading ... */ - - /* Runtime instructions and 2 copies of data: - * 1) unmodified from disk - * 2) backup cache for save/restore during power-downs */ - il->ucode_code.len = inst_size; - il_alloc_fw_desc(il->pci_dev, &il->ucode_code); - - il->ucode_data.len = data_size; - il_alloc_fw_desc(il->pci_dev, &il->ucode_data); - - il->ucode_data_backup.len = data_size; - il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup); - - if (!il->ucode_code.v_addr || !il->ucode_data.v_addr || - !il->ucode_data_backup.v_addr) - goto err_pci_alloc; - - /* Initialization instructions and data */ - if (init_size && init_data_size) { - il->ucode_init.len = init_size; - il_alloc_fw_desc(il->pci_dev, &il->ucode_init); - - il->ucode_init_data.len = init_data_size; - il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data); - - if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr) - goto err_pci_alloc; - } - - /* Bootstrap (instructions only, no data) */ - if (boot_size) { - il->ucode_boot.len = boot_size; - il_alloc_fw_desc(il->pci_dev, &il->ucode_boot); - - if (!il->ucode_boot.v_addr) - goto err_pci_alloc; - } - - /* Copy images into buffers for card's bus-master reads ... */ - - /* Runtime instructions (first block of data in file) */ - len = inst_size; - D_INFO("Copying (but not loading) uCode instr len %zd\n", len); - memcpy(il->ucode_code.v_addr, src, len); - src += len; - - D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n", - il->ucode_code.v_addr, (u32) il->ucode_code.p_addr); - - /* Runtime data (2nd block) - * NOTE: Copy into backup buffer will be done in il3945_up() */ - len = data_size; - D_INFO("Copying (but not loading) uCode data len %zd\n", len); - memcpy(il->ucode_data.v_addr, src, len); - memcpy(il->ucode_data_backup.v_addr, src, len); - src += len; - - /* Initialization instructions (3rd block) */ - if (init_size) { - len = init_size; - D_INFO("Copying (but not loading) init instr len %zd\n", len); - memcpy(il->ucode_init.v_addr, src, len); - src += len; - } - - /* Initialization data (4th block) */ - if (init_data_size) { - len = init_data_size; - D_INFO("Copying (but not loading) init data len %zd\n", len); - memcpy(il->ucode_init_data.v_addr, src, len); - src += len; - } - - /* Bootstrap instructions (5th block) */ - len = boot_size; - D_INFO("Copying (but not loading) boot instr len %zd\n", len); - memcpy(il->ucode_boot.v_addr, src, len); - - /* We have our copies now, allow OS release its copies */ - release_firmware(ucode_raw); - return 0; - -err_pci_alloc: - IL_ERR("failed to allocate pci memory\n"); - ret = -ENOMEM; - il3945_dealloc_ucode_pci(il); - -err_release: - release_firmware(ucode_raw); - -error: - return ret; -} - -/** - * il3945_set_ucode_ptrs - Set uCode address location - * - * Tell initialization uCode where to find runtime uCode. - * - * BSM registers initially contain pointers to initialization uCode. - * We need to replace them to load runtime uCode inst and data, - * and to save runtime data when powering down. - */ -static int -il3945_set_ucode_ptrs(struct il_priv *il) -{ - dma_addr_t pinst; - dma_addr_t pdata; - - /* bits 31:0 for 3945 */ - pinst = il->ucode_code.p_addr; - pdata = il->ucode_data_backup.p_addr; - - /* Tell bootstrap uCode where to find image to load */ - il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst); - il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata); - il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, il->ucode_data.len); - - /* Inst byte count must be last to set up, bit 31 signals uCode - * that all new ptr/size info is in place */ - il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG, - il->ucode_code.len | BSM_DRAM_INST_LOAD); - - D_INFO("Runtime uCode pointers are set.\n"); - - return 0; -} - -/** - * il3945_init_alive_start - Called after N_ALIVE notification received - * - * Called after N_ALIVE notification received from "initialize" uCode. - * - * Tell "initialize" uCode to go ahead and load the runtime uCode. - */ -static void -il3945_init_alive_start(struct il_priv *il) -{ - /* Check alive response for "valid" sign from uCode */ - if (il->card_alive_init.is_valid != UCODE_VALID_OK) { - /* We had an error bringing up the hardware, so take it - * all the way back down so we can try again */ - D_INFO("Initialize Alive failed.\n"); - goto restart; - } - - /* Bootstrap uCode has loaded initialize uCode ... verify inst image. - * This is a paranoid check, because we would not have gotten the - * "initialize" alive if code weren't properly loaded. */ - if (il3945_verify_ucode(il)) { - /* Runtime instruction load was bad; - * take it all the way back down so we can try again */ - D_INFO("Bad \"initialize\" uCode load.\n"); - goto restart; - } - - /* Send pointers to protocol/runtime uCode image ... init code will - * load and launch runtime uCode, which will send us another "Alive" - * notification. */ - D_INFO("Initialization Alive received.\n"); - if (il3945_set_ucode_ptrs(il)) { - /* Runtime instruction load won't happen; - * take it all the way back down so we can try again */ - D_INFO("Couldn't set up uCode pointers.\n"); - goto restart; - } - return; - -restart: - queue_work(il->workqueue, &il->restart); -} - -/** - * il3945_alive_start - called after N_ALIVE notification received - * from protocol/runtime uCode (initialization uCode's - * Alive gets handled by il3945_init_alive_start()). - */ -static void -il3945_alive_start(struct il_priv *il) -{ - int thermal_spin = 0; - u32 rfkill; - struct il_rxon_context *ctx = &il->ctx; - - D_INFO("Runtime Alive received.\n"); - - if (il->card_alive.is_valid != UCODE_VALID_OK) { - /* We had an error bringing up the hardware, so take it - * all the way back down so we can try again */ - D_INFO("Alive failed.\n"); - goto restart; - } - - /* Initialize uCode has loaded Runtime uCode ... verify inst image. - * This is a paranoid check, because we would not have gotten the - * "runtime" alive if code weren't properly loaded. */ - if (il3945_verify_ucode(il)) { - /* Runtime instruction load was bad; - * take it all the way back down so we can try again */ - D_INFO("Bad runtime uCode load.\n"); - goto restart; - } - - rfkill = il_rd_prph(il, APMG_RFKILL_REG); - D_INFO("RFKILL status: 0x%x\n", rfkill); - - if (rfkill & 0x1) { - clear_bit(S_RF_KILL_HW, &il->status); - /* if RFKILL is not on, then wait for thermal - * sensor in adapter to kick in */ - while (il3945_hw_get_temperature(il) == 0) { - thermal_spin++; - udelay(10); - } - - if (thermal_spin) - D_INFO("Thermal calibration took %dus\n", - thermal_spin * 10); - } else - set_bit(S_RF_KILL_HW, &il->status); - - /* After the ALIVE response, we can send commands to 3945 uCode */ - set_bit(S_ALIVE, &il->status); - - /* Enable watchdog to monitor the driver tx queues */ - il_setup_watchdog(il); - - if (il_is_rfkill(il)) - return; - - ieee80211_wake_queues(il->hw); - - il->active_rate = RATES_MASK_3945; - - il_power_update_mode(il, true); - - if (il_is_associated(il)) { - struct il3945_rxon_cmd *active_rxon = - (struct il3945_rxon_cmd *)(&ctx->active); - - ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; - active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; - } else { - /* Initialize our rx_config data */ - il_connection_init_rx_config(il, ctx); - } - - /* Configure Bluetooth device coexistence support */ - il_send_bt_config(il); - - set_bit(S_READY, &il->status); - - /* Configure the adapter for unassociated operation */ - il3945_commit_rxon(il, ctx); - - il3945_reg_txpower_periodic(il); - - D_INFO("ALIVE processing complete.\n"); - wake_up(&il->wait_command_queue); - - return; - -restart: - queue_work(il->workqueue, &il->restart); -} - -static void il3945_cancel_deferred_work(struct il_priv *il); - -static void -__il3945_down(struct il_priv *il) -{ - unsigned long flags; - int exit_pending; - - D_INFO(DRV_NAME " is going down\n"); - - il_scan_cancel_timeout(il, 200); - - exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status); - - /* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set - * to prevent rearm timer */ - del_timer_sync(&il->watchdog); - - /* Station information will now be cleared in device */ - il_clear_ucode_stations(il, NULL); - il_dealloc_bcast_stations(il); - il_clear_driver_stations(il); - - /* Unblock any waiting calls */ - wake_up_all(&il->wait_command_queue); - - /* Wipe out the EXIT_PENDING status bit if we are not actually - * exiting the module */ - if (!exit_pending) - clear_bit(S_EXIT_PENDING, &il->status); - - /* stop and reset the on-board processor */ - _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); - - /* tell the device to stop sending interrupts */ - spin_lock_irqsave(&il->lock, flags); - il_disable_interrupts(il); - spin_unlock_irqrestore(&il->lock, flags); - il3945_synchronize_irq(il); - - if (il->mac80211_registered) - ieee80211_stop_queues(il->hw); - - /* If we have not previously called il3945_init() then - * clear all bits but the RF Kill bits and return */ - if (!il_is_init(il)) { - il->status = - test_bit(S_RF_KILL_HW, - &il-> - status) << S_RF_KILL_HW | - test_bit(S_GEO_CONFIGURED, - &il-> - status) << S_GEO_CONFIGURED | - test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING; - goto exit; - } - - /* ...otherwise clear out all the status bits but the RF Kill - * bit and continue taking the NIC down. */ - il->status &= - test_bit(S_RF_KILL_HW, - &il->status) << S_RF_KILL_HW | test_bit(S_GEO_CONFIGURED, - &il-> - status) << - S_GEO_CONFIGURED | test_bit(S_FW_ERROR, - &il-> - status) << S_FW_ERROR | - test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING; - - il3945_hw_txq_ctx_stop(il); - il3945_hw_rxq_stop(il); - - /* Power-down device's busmaster DMA clocks */ - il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); - udelay(5); - - /* Stop the device, and put it in low power state */ - il_apm_stop(il); - -exit: - memset(&il->card_alive, 0, sizeof(struct il_alive_resp)); - - if (il->beacon_skb) - dev_kfree_skb(il->beacon_skb); - il->beacon_skb = NULL; - - /* clear out any free frames */ - il3945_clear_free_frames(il); -} - -static void -il3945_down(struct il_priv *il) -{ - mutex_lock(&il->mutex); - __il3945_down(il); - mutex_unlock(&il->mutex); - - il3945_cancel_deferred_work(il); -} - -#define MAX_HW_RESTARTS 5 - -static int -il3945_alloc_bcast_station(struct il_priv *il) -{ - struct il_rxon_context *ctx = &il->ctx; - unsigned long flags; - u8 sta_id; - - spin_lock_irqsave(&il->sta_lock, flags); - sta_id = il_prep_station(il, ctx, il_bcast_addr, false, NULL); - if (sta_id == IL_INVALID_STATION) { - IL_ERR("Unable to prepare broadcast station\n"); - spin_unlock_irqrestore(&il->sta_lock, flags); - - return -EINVAL; - } - - il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE; - il->stations[sta_id].used |= IL_STA_BCAST; - spin_unlock_irqrestore(&il->sta_lock, flags); - - return 0; -} - -static int -__il3945_up(struct il_priv *il) -{ - int rc, i; - - rc = il3945_alloc_bcast_station(il); - if (rc) - return rc; - - if (test_bit(S_EXIT_PENDING, &il->status)) { - IL_WARN("Exit pending; will not bring the NIC up\n"); - return -EIO; - } - - if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) { - IL_ERR("ucode not available for device bring up\n"); - return -EIO; - } - - /* If platform's RF_KILL switch is NOT set to KILL */ - if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) - clear_bit(S_RF_KILL_HW, &il->status); - else { - set_bit(S_RF_KILL_HW, &il->status); - IL_WARN("Radio disabled by HW RF Kill switch\n"); - return -ENODEV; - } - - _il_wr(il, CSR_INT, 0xFFFFFFFF); - - rc = il3945_hw_nic_init(il); - if (rc) { - IL_ERR("Unable to int nic\n"); - return rc; - } - - /* make sure rfkill handshake bits are cleared */ - _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); - _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); - - /* clear (again), then enable host interrupts */ - _il_wr(il, CSR_INT, 0xFFFFFFFF); - il_enable_interrupts(il); - - /* really make sure rfkill handshake bits are cleared */ - _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); - _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); - - /* Copy original ucode data image from disk into backup cache. - * This will be used to initialize the on-board processor's - * data SRAM for a clean start when the runtime program first loads. */ - memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr, - il->ucode_data.len); - - /* We return success when we resume from suspend and rf_kill is on. */ - if (test_bit(S_RF_KILL_HW, &il->status)) - return 0; - - for (i = 0; i < MAX_HW_RESTARTS; i++) { - - /* load bootstrap state machine, - * load bootstrap program into processor's memory, - * prepare to load the "initialize" uCode */ - rc = il->cfg->ops->lib->load_ucode(il); - - if (rc) { - IL_ERR("Unable to set up bootstrap uCode: %d\n", rc); - continue; - } - - /* start card; "initialize" will load runtime ucode */ - il3945_nic_start(il); - - D_INFO(DRV_NAME " is coming up\n"); - - return 0; - } - - set_bit(S_EXIT_PENDING, &il->status); - __il3945_down(il); - clear_bit(S_EXIT_PENDING, &il->status); - - /* tried to restart and config the device for as long as our - * patience could withstand */ - IL_ERR("Unable to initialize device after %d attempts.\n", i); - return -EIO; -} - -/***************************************************************************** - * - * Workqueue callbacks - * - *****************************************************************************/ - -static void -il3945_bg_init_alive_start(struct work_struct *data) -{ - struct il_priv *il = - container_of(data, struct il_priv, init_alive_start.work); - - mutex_lock(&il->mutex); - if (test_bit(S_EXIT_PENDING, &il->status)) - goto out; - - il3945_init_alive_start(il); -out: - mutex_unlock(&il->mutex); -} - -static void -il3945_bg_alive_start(struct work_struct *data) -{ - struct il_priv *il = - container_of(data, struct il_priv, alive_start.work); - - mutex_lock(&il->mutex); - if (test_bit(S_EXIT_PENDING, &il->status)) - goto out; - - il3945_alive_start(il); -out: - mutex_unlock(&il->mutex); -} - -/* - * 3945 cannot interrupt driver when hardware rf kill switch toggles; - * driver must poll CSR_GP_CNTRL_REG register for change. This register - * *is* readable even when device has been SW_RESET into low power mode - * (e.g. during RF KILL). - */ -static void -il3945_rfkill_poll(struct work_struct *data) -{ - struct il_priv *il = - container_of(data, struct il_priv, _3945.rfkill_poll.work); - bool old_rfkill = test_bit(S_RF_KILL_HW, &il->status); - bool new_rfkill = - !(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); - - if (new_rfkill != old_rfkill) { - if (new_rfkill) - set_bit(S_RF_KILL_HW, &il->status); - else - clear_bit(S_RF_KILL_HW, &il->status); - - wiphy_rfkill_set_hw_state(il->hw->wiphy, new_rfkill); - - D_RF_KILL("RF_KILL bit toggled to %s.\n", - new_rfkill ? "disable radio" : "enable radio"); - } - - /* Keep this running, even if radio now enabled. This will be - * cancelled in mac_start() if system decides to start again */ - queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll, - round_jiffies_relative(2 * HZ)); - -} - -int -il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif) -{ - struct il_host_cmd cmd = { - .id = C_SCAN, - .len = sizeof(struct il3945_scan_cmd), - .flags = CMD_SIZE_HUGE, - }; - struct il3945_scan_cmd *scan; - u8 n_probes = 0; - enum ieee80211_band band; - bool is_active = false; - int ret; - u16 len; - - lockdep_assert_held(&il->mutex); - - if (!il->scan_cmd) { - il->scan_cmd = - kmalloc(sizeof(struct il3945_scan_cmd) + IL_MAX_SCAN_SIZE, - GFP_KERNEL); - if (!il->scan_cmd) { - D_SCAN("Fail to allocate scan memory\n"); - return -ENOMEM; - } - } - scan = il->scan_cmd; - memset(scan, 0, sizeof(struct il3945_scan_cmd) + IL_MAX_SCAN_SIZE); - - scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH; - scan->quiet_time = IL_ACTIVE_QUIET_TIME; - - if (il_is_associated(il)) { - u16 interval; - u32 extra; - u32 suspend_time = 100; - u32 scan_suspend_time = 100; - - D_INFO("Scanning while associated...\n"); - - interval = vif->bss_conf.beacon_int; - - scan->suspend_time = 0; - scan->max_out_time = cpu_to_le32(200 * 1024); - if (!interval) - interval = suspend_time; - /* - * suspend time format: - * 0-19: beacon interval in usec (time before exec.) - * 20-23: 0 - * 24-31: number of beacons (suspend between channels) - */ - - extra = (suspend_time / interval) << 24; - scan_suspend_time = - 0xFF0FFFFF & (extra | ((suspend_time % interval) * 1024)); - - scan->suspend_time = cpu_to_le32(scan_suspend_time); - D_SCAN("suspend_time 0x%X beacon interval %d\n", - scan_suspend_time, interval); - } - - if (il->scan_request->n_ssids) { - int i, p = 0; - D_SCAN("Kicking off active scan\n"); - for (i = 0; i < il->scan_request->n_ssids; i++) { - /* always does wildcard anyway */ - if (!il->scan_request->ssids[i].ssid_len) - continue; - scan->direct_scan[p].id = WLAN_EID_SSID; - scan->direct_scan[p].len = - il->scan_request->ssids[i].ssid_len; - memcpy(scan->direct_scan[p].ssid, - il->scan_request->ssids[i].ssid, - il->scan_request->ssids[i].ssid_len); - n_probes++; - p++; - } - is_active = true; - } else - D_SCAN("Kicking off passive scan.\n"); - - /* We don't build a direct scan probe request; the uCode will do - * that based on the direct_mask added to each channel entry */ - scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; - scan->tx_cmd.sta_id = il->ctx.bcast_sta_id; - scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; - - /* flags + rate selection */ - - switch (il->scan_band) { - case IEEE80211_BAND_2GHZ: - scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; - scan->tx_cmd.rate = RATE_1M_PLCP; - band = IEEE80211_BAND_2GHZ; - break; - case IEEE80211_BAND_5GHZ: - scan->tx_cmd.rate = RATE_6M_PLCP; - band = IEEE80211_BAND_5GHZ; - break; - default: - IL_WARN("Invalid scan band\n"); - return -EIO; - } - - /* - * If active scaning is requested but a certain channel is marked - * passive, we can do active scanning if we detect transmissions. For - * passive only scanning disable switching to active on any channel. - */ - scan->good_CRC_th = - is_active ? IL_GOOD_CRC_TH_DEFAULT : IL_GOOD_CRC_TH_NEVER; - - len = - il_fill_probe_req(il, (struct ieee80211_mgmt *)scan->data, - vif->addr, il->scan_request->ie, - il->scan_request->ie_len, - IL_MAX_SCAN_SIZE - sizeof(*scan)); - scan->tx_cmd.len = cpu_to_le16(len); - - /* select Rx antennas */ - scan->flags |= il3945_get_antenna_flags(il); - - scan->channel_count = - il3945_get_channels_for_scan(il, band, is_active, n_probes, - (void *)&scan->data[len], vif); - if (scan->channel_count == 0) { - D_SCAN("channel count %d\n", scan->channel_count); - return -EIO; - } - - cmd.len += - le16_to_cpu(scan->tx_cmd.len) + - scan->channel_count * sizeof(struct il3945_scan_channel); - cmd.data = scan; - scan->len = cpu_to_le16(cmd.len); - - set_bit(S_SCAN_HW, &il->status); - ret = il_send_cmd_sync(il, &cmd); - if (ret) - clear_bit(S_SCAN_HW, &il->status); - return ret; -} - -void -il3945_post_scan(struct il_priv *il) -{ - struct il_rxon_context *ctx = &il->ctx; - - /* - * Since setting the RXON may have been deferred while - * performing the scan, fire one off if needed - */ - if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging))) - il3945_commit_rxon(il, ctx); -} - -static void -il3945_bg_restart(struct work_struct *data) -{ - struct il_priv *il = container_of(data, struct il_priv, restart); - - if (test_bit(S_EXIT_PENDING, &il->status)) - return; - - if (test_and_clear_bit(S_FW_ERROR, &il->status)) { - mutex_lock(&il->mutex); - il->ctx.vif = NULL; - il->is_open = 0; - mutex_unlock(&il->mutex); - il3945_down(il); - ieee80211_restart_hw(il->hw); - } else { - il3945_down(il); - - mutex_lock(&il->mutex); - if (test_bit(S_EXIT_PENDING, &il->status)) { - mutex_unlock(&il->mutex); - return; - } - - __il3945_up(il); - mutex_unlock(&il->mutex); - } -} - -static void -il3945_bg_rx_replenish(struct work_struct *data) -{ - struct il_priv *il = container_of(data, struct il_priv, rx_replenish); - - mutex_lock(&il->mutex); - if (test_bit(S_EXIT_PENDING, &il->status)) - goto out; - - il3945_rx_replenish(il); -out: - mutex_unlock(&il->mutex); -} - -void -il3945_post_associate(struct il_priv *il) -{ - int rc = 0; - struct ieee80211_conf *conf = NULL; - struct il_rxon_context *ctx = &il->ctx; - - if (!ctx->vif || !il->is_open) - return; - - D_ASSOC("Associated as %d to: %pM\n", ctx->vif->bss_conf.aid, - ctx->active.bssid_addr); - - if (test_bit(S_EXIT_PENDING, &il->status)) - return; - - il_scan_cancel_timeout(il, 200); - - conf = &il->hw->conf; - - ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; - il3945_commit_rxon(il, ctx); - - rc = il_send_rxon_timing(il, ctx); - if (rc) - IL_WARN("C_RXON_TIMING failed - " "Attempting to continue.\n"); - - ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; - - ctx->staging.assoc_id = cpu_to_le16(ctx->vif->bss_conf.aid); - - D_ASSOC("assoc id %d beacon interval %d\n", ctx->vif->bss_conf.aid, - ctx->vif->bss_conf.beacon_int); - - if (ctx->vif->bss_conf.use_short_preamble) - ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; - else - ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; - - if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) { - if (ctx->vif->bss_conf.use_short_slot) - ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; - else - ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; - } - - il3945_commit_rxon(il, ctx); - - switch (ctx->vif->type) { - case NL80211_IFTYPE_STATION: - il3945_rate_scale_init(il->hw, IL_AP_ID); - break; - case NL80211_IFTYPE_ADHOC: - il3945_send_beacon_cmd(il); - break; - default: - IL_ERR("%s Should not be called in %d mode\n", __func__, - ctx->vif->type); - break; - } -} - -/***************************************************************************** - * - * mac80211 entry point functions - * - *****************************************************************************/ - -#define UCODE_READY_TIMEOUT (2 * HZ) - -static int -il3945_mac_start(struct ieee80211_hw *hw) -{ - struct il_priv *il = hw->priv; - int ret; - - D_MAC80211("enter\n"); - - /* we should be verifying the device is ready to be opened */ - mutex_lock(&il->mutex); - - /* fetch ucode file from disk, alloc and copy to bus-master buffers ... - * ucode filename and max sizes are card-specific. */ - - if (!il->ucode_code.len) { - ret = il3945_read_ucode(il); - if (ret) { - IL_ERR("Could not read microcode: %d\n", ret); - mutex_unlock(&il->mutex); - goto out_release_irq; - } - } - - ret = __il3945_up(il); - - mutex_unlock(&il->mutex); - - if (ret) - goto out_release_irq; - - D_INFO("Start UP work.\n"); - - /* Wait for START_ALIVE from ucode. Otherwise callbacks from - * mac80211 will not be run successfully. */ - ret = wait_event_timeout(il->wait_command_queue, - test_bit(S_READY, &il->status), - UCODE_READY_TIMEOUT); - if (!ret) { - if (!test_bit(S_READY, &il->status)) { - IL_ERR("Wait for START_ALIVE timeout after %dms.\n", - jiffies_to_msecs(UCODE_READY_TIMEOUT)); - ret = -ETIMEDOUT; - goto out_release_irq; - } - } - - /* ucode is running and will send rfkill notifications, - * no need to poll the killswitch state anymore */ - cancel_delayed_work(&il->_3945.rfkill_poll); - - il->is_open = 1; - D_MAC80211("leave\n"); - return 0; - -out_release_irq: - il->is_open = 0; - D_MAC80211("leave - failed\n"); - return ret; -} - -static void -il3945_mac_stop(struct ieee80211_hw *hw) -{ - struct il_priv *il = hw->priv; - - D_MAC80211("enter\n"); - - if (!il->is_open) { - D_MAC80211("leave - skip\n"); - return; - } - - il->is_open = 0; - - il3945_down(il); - - flush_workqueue(il->workqueue); - - /* start polling the killswitch state again */ - queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll, - round_jiffies_relative(2 * HZ)); - - D_MAC80211("leave\n"); -} - -static void -il3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) -{ - struct il_priv *il = hw->priv; - - D_MAC80211("enter\n"); - - D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, - ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); - - if (il3945_tx_skb(il, skb)) - dev_kfree_skb_any(skb); - - D_MAC80211("leave\n"); -} - -void -il3945_config_ap(struct il_priv *il) -{ - struct il_rxon_context *ctx = &il->ctx; - struct ieee80211_vif *vif = ctx->vif; - int rc = 0; - - if (test_bit(S_EXIT_PENDING, &il->status)) - return; - - /* The following should be done only at AP bring up */ - if (!(il_is_associated(il))) { - - /* RXON - unassoc (to set timing command) */ - ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; - il3945_commit_rxon(il, ctx); - - /* RXON Timing */ - rc = il_send_rxon_timing(il, ctx); - if (rc) - IL_WARN("C_RXON_TIMING failed - " - "Attempting to continue.\n"); - - ctx->staging.assoc_id = 0; - - if (vif->bss_conf.use_short_preamble) - ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; - else - ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; - - if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) { - if (vif->bss_conf.use_short_slot) - ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; - else - ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; - } - /* restore RXON assoc */ - ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; - il3945_commit_rxon(il, ctx); - } - il3945_send_beacon_cmd(il); -} - -static int -il3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, - struct ieee80211_vif *vif, struct ieee80211_sta *sta, - struct ieee80211_key_conf *key) -{ - struct il_priv *il = hw->priv; - int ret = 0; - u8 sta_id = IL_INVALID_STATION; - u8 static_key; - - D_MAC80211("enter\n"); - - if (il3945_mod_params.sw_crypto) { - D_MAC80211("leave - hwcrypto disabled\n"); - return -EOPNOTSUPP; - } - - /* - * To support IBSS RSN, don't program group keys in IBSS, the - * hardware will then not attempt to decrypt the frames. - */ - if (vif->type == NL80211_IFTYPE_ADHOC && - !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) - return -EOPNOTSUPP; - - static_key = !il_is_associated(il); - - if (!static_key) { - sta_id = il_sta_id_or_broadcast(il, &il->ctx, sta); - if (sta_id == IL_INVALID_STATION) - return -EINVAL; - } - - mutex_lock(&il->mutex); - il_scan_cancel_timeout(il, 100); - - switch (cmd) { - case SET_KEY: - if (static_key) - ret = il3945_set_static_key(il, key); - else - ret = il3945_set_dynamic_key(il, key, sta_id); - D_MAC80211("enable hwcrypto key\n"); - break; - case DISABLE_KEY: - if (static_key) - ret = il3945_remove_static_key(il); - else - ret = il3945_clear_sta_key_info(il, sta_id); - D_MAC80211("disable hwcrypto key\n"); - break; - default: - ret = -EINVAL; - } - - mutex_unlock(&il->mutex); - D_MAC80211("leave\n"); - - return ret; -} - -static int -il3945_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_sta *sta) -{ - struct il_priv *il = hw->priv; - struct il3945_sta_priv *sta_priv = (void *)sta->drv_priv; - int ret; - bool is_ap = vif->type == NL80211_IFTYPE_STATION; - u8 sta_id; - - D_INFO("received request to add station %pM\n", sta->addr); - mutex_lock(&il->mutex); - D_INFO("proceeding to add station %pM\n", sta->addr); - sta_priv->common.sta_id = IL_INVALID_STATION; - - ret = - il_add_station_common(il, &il->ctx, sta->addr, is_ap, sta, &sta_id); - if (ret) { - IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret); - /* Should we return success if return code is EEXIST ? */ - mutex_unlock(&il->mutex); - return ret; - } - - sta_priv->common.sta_id = sta_id; - - /* Initialize rate scaling */ - D_INFO("Initializing rate scaling for station %pM\n", sta->addr); - il3945_rs_rate_init(il, sta, sta_id); - mutex_unlock(&il->mutex); - - return 0; -} - -static void -il3945_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, - unsigned int *total_flags, u64 multicast) -{ - struct il_priv *il = hw->priv; - __le32 filter_or = 0, filter_nand = 0; - struct il_rxon_context *ctx = &il->ctx; - -#define CHK(test, flag) do { \ - if (*total_flags & (test)) \ - filter_or |= (flag); \ - else \ - filter_nand |= (flag); \ - } while (0) - - D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags, - *total_flags); - - CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK); - CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK); - CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); - -#undef CHK - - mutex_lock(&il->mutex); - - ctx->staging.filter_flags &= ~filter_nand; - ctx->staging.filter_flags |= filter_or; - - /* - * Not committing directly because hardware can perform a scan, - * but even if hw is ready, committing here breaks for some reason, - * we'll eventually commit the filter flags change anyway. - */ - - mutex_unlock(&il->mutex); - - /* - * Receiving all multicast frames is always enabled by the - * default flags setup in il_connection_init_rx_config() - * since we currently do not support programming multicast - * filters into the device. - */ - *total_flags &= - FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS | - FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; -} - -/***************************************************************************** - * - * sysfs attributes - * - *****************************************************************************/ - -#ifdef CONFIG_IWLEGACY_DEBUG - -/* - * The following adds a new attribute to the sysfs representation - * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/) - * used for controlling the debug level. - * - * See the level definitions in iwl for details. - * - * The debug_level being managed using sysfs below is a per device debug - * level that is used instead of the global debug level if it (the per - * device debug level) is set. - */ -static ssize_t -il3945_show_debug_level(struct device *d, struct device_attribute *attr, - char *buf) -{ - struct il_priv *il = dev_get_drvdata(d); - return sprintf(buf, "0x%08X\n", il_get_debug_level(il)); -} - -static ssize_t -il3945_store_debug_level(struct device *d, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct il_priv *il = dev_get_drvdata(d); - unsigned long val; - int ret; - - ret = strict_strtoul(buf, 0, &val); - if (ret) - IL_INFO("%s is not in hex or decimal form.\n", buf); - else { - il->debug_level = val; - if (il_alloc_traffic_mem(il)) - IL_ERR("Not enough memory to generate traffic log\n"); - } - return strnlen(buf, count); -} - -static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, il3945_show_debug_level, - il3945_store_debug_level); - -#endif /* CONFIG_IWLEGACY_DEBUG */ - -static ssize_t -il3945_show_temperature(struct device *d, struct device_attribute *attr, - char *buf) -{ - struct il_priv *il = dev_get_drvdata(d); - - if (!il_is_alive(il)) - return -EAGAIN; - - return sprintf(buf, "%d\n", il3945_hw_get_temperature(il)); -} - -static DEVICE_ATTR(temperature, S_IRUGO, il3945_show_temperature, NULL); - -static ssize_t -il3945_show_tx_power(struct device *d, struct device_attribute *attr, char *buf) -{ - struct il_priv *il = dev_get_drvdata(d); - return sprintf(buf, "%d\n", il->tx_power_user_lmt); -} - -static ssize_t -il3945_store_tx_power(struct device *d, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct il_priv *il = dev_get_drvdata(d); - char *p = (char *)buf; - u32 val; - - val = simple_strtoul(p, &p, 10); - if (p == buf) - IL_INFO(": %s is not in decimal form.\n", buf); - else - il3945_hw_reg_set_txpower(il, val); - - return count; -} - -static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, il3945_show_tx_power, - il3945_store_tx_power); - -static ssize_t -il3945_show_flags(struct device *d, struct device_attribute *attr, char *buf) -{ - struct il_priv *il = dev_get_drvdata(d); - struct il_rxon_context *ctx = &il->ctx; - - return sprintf(buf, "0x%04X\n", ctx->active.flags); -} - -static ssize_t -il3945_store_flags(struct device *d, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct il_priv *il = dev_get_drvdata(d); - u32 flags = simple_strtoul(buf, NULL, 0); - struct il_rxon_context *ctx = &il->ctx; - - mutex_lock(&il->mutex); - if (le32_to_cpu(ctx->staging.flags) != flags) { - /* Cancel any currently running scans... */ - if (il_scan_cancel_timeout(il, 100)) - IL_WARN("Could not cancel scan.\n"); - else { - D_INFO("Committing rxon.flags = 0x%04X\n", flags); - ctx->staging.flags = cpu_to_le32(flags); - il3945_commit_rxon(il, ctx); - } - } - mutex_unlock(&il->mutex); - - return count; -} - -static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, il3945_show_flags, - il3945_store_flags); - -static ssize_t -il3945_show_filter_flags(struct device *d, struct device_attribute *attr, - char *buf) -{ - struct il_priv *il = dev_get_drvdata(d); - struct il_rxon_context *ctx = &il->ctx; - - return sprintf(buf, "0x%04X\n", le32_to_cpu(ctx->active.filter_flags)); -} - -static ssize_t -il3945_store_filter_flags(struct device *d, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct il_priv *il = dev_get_drvdata(d); - struct il_rxon_context *ctx = &il->ctx; - u32 filter_flags = simple_strtoul(buf, NULL, 0); - - mutex_lock(&il->mutex); - if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) { - /* Cancel any currently running scans... */ - if (il_scan_cancel_timeout(il, 100)) - IL_WARN("Could not cancel scan.\n"); - else { - D_INFO("Committing rxon.filter_flags = " "0x%04X\n", - filter_flags); - ctx->staging.filter_flags = cpu_to_le32(filter_flags); - il3945_commit_rxon(il, ctx); - } - } - mutex_unlock(&il->mutex); - - return count; -} - -static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, il3945_show_filter_flags, - il3945_store_filter_flags); - -static ssize_t -il3945_show_measurement(struct device *d, struct device_attribute *attr, - char *buf) -{ - struct il_priv *il = dev_get_drvdata(d); - struct il_spectrum_notification measure_report; - u32 size = sizeof(measure_report), len = 0, ofs = 0; - u8 *data = (u8 *) &measure_report; - unsigned long flags; - - spin_lock_irqsave(&il->lock, flags); - if (!(il->measurement_status & MEASUREMENT_READY)) { - spin_unlock_irqrestore(&il->lock, flags); - return 0; - } - memcpy(&measure_report, &il->measure_report, size); - il->measurement_status = 0; - spin_unlock_irqrestore(&il->lock, flags); - - while (size && PAGE_SIZE - len) { - hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len, - PAGE_SIZE - len, 1); - len = strlen(buf); - if (PAGE_SIZE - len) - buf[len++] = '\n'; - - ofs += 16; - size -= min(size, 16U); - } - - return len; -} - -static ssize_t -il3945_store_measurement(struct device *d, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct il_priv *il = dev_get_drvdata(d); - struct il_rxon_context *ctx = &il->ctx; - struct ieee80211_measurement_params params = { - .channel = le16_to_cpu(ctx->active.channel), - .start_time = cpu_to_le64(il->_3945.last_tsf), - .duration = cpu_to_le16(1), - }; - u8 type = IL_MEASURE_BASIC; - u8 buffer[32]; - u8 channel; - - if (count) { - char *p = buffer; - strncpy(buffer, buf, min(sizeof(buffer), count)); - channel = simple_strtoul(p, NULL, 0); - if (channel) - params.channel = channel; - - p = buffer; - while (*p && *p != ' ') - p++; - if (*p) - type = simple_strtoul(p + 1, NULL, 0); - } - - D_INFO("Invoking measurement of type %d on " "channel %d (for '%s')\n", - type, params.channel, buf); - il3945_get_measurement(il, ¶ms, type); - - return count; -} - -static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR, il3945_show_measurement, - il3945_store_measurement); - -static ssize_t -il3945_store_retry_rate(struct device *d, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct il_priv *il = dev_get_drvdata(d); - - il->retry_rate = simple_strtoul(buf, NULL, 0); - if (il->retry_rate <= 0) - il->retry_rate = 1; - - return count; -} - -static ssize_t -il3945_show_retry_rate(struct device *d, struct device_attribute *attr, - char *buf) -{ - struct il_priv *il = dev_get_drvdata(d); - return sprintf(buf, "%d", il->retry_rate); -} - -static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, il3945_show_retry_rate, - il3945_store_retry_rate); - -static ssize_t -il3945_show_channels(struct device *d, struct device_attribute *attr, char *buf) -{ - /* all this shit doesn't belong into sysfs anyway */ - return 0; -} - -static DEVICE_ATTR(channels, S_IRUSR, il3945_show_channels, NULL); - -static ssize_t -il3945_show_antenna(struct device *d, struct device_attribute *attr, char *buf) -{ - struct il_priv *il = dev_get_drvdata(d); - - if (!il_is_alive(il)) - return -EAGAIN; - - return sprintf(buf, "%d\n", il3945_mod_params.antenna); -} - -static ssize_t -il3945_store_antenna(struct device *d, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct il_priv *il __maybe_unused = dev_get_drvdata(d); - int ant; - - if (count == 0) - return 0; - - if (sscanf(buf, "%1i", &ant) != 1) { - D_INFO("not in hex or decimal form.\n"); - return count; - } - - if (ant >= 0 && ant <= 2) { - D_INFO("Setting antenna select to %d.\n", ant); - il3945_mod_params.antenna = (enum il3945_antenna)ant; - } else - D_INFO("Bad antenna select value %d.\n", ant); - - return count; -} - -static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, il3945_show_antenna, - il3945_store_antenna); - -static ssize_t -il3945_show_status(struct device *d, struct device_attribute *attr, char *buf) -{ - struct il_priv *il = dev_get_drvdata(d); - if (!il_is_alive(il)) - return -EAGAIN; - return sprintf(buf, "0x%08x\n", (int)il->status); -} - -static DEVICE_ATTR(status, S_IRUGO, il3945_show_status, NULL); - -static ssize_t -il3945_dump_error_log(struct device *d, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct il_priv *il = dev_get_drvdata(d); - char *p = (char *)buf; - - if (p[0] == '1') - il3945_dump_nic_error_log(il); - - return strnlen(buf, count); -} - -static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, il3945_dump_error_log); - -/***************************************************************************** - * - * driver setup and tear down - * - *****************************************************************************/ - -static void -il3945_setup_deferred_work(struct il_priv *il) -{ - il->workqueue = create_singlethread_workqueue(DRV_NAME); - - init_waitqueue_head(&il->wait_command_queue); - - INIT_WORK(&il->restart, il3945_bg_restart); - INIT_WORK(&il->rx_replenish, il3945_bg_rx_replenish); - INIT_DELAYED_WORK(&il->init_alive_start, il3945_bg_init_alive_start); - INIT_DELAYED_WORK(&il->alive_start, il3945_bg_alive_start); - INIT_DELAYED_WORK(&il->_3945.rfkill_poll, il3945_rfkill_poll); - - il_setup_scan_deferred_work(il); - - il3945_hw_setup_deferred_work(il); - - init_timer(&il->watchdog); - il->watchdog.data = (unsigned long)il; - il->watchdog.function = il_bg_watchdog; - - tasklet_init(&il->irq_tasklet, - (void (*)(unsigned long))il3945_irq_tasklet, - (unsigned long)il); -} - -static void -il3945_cancel_deferred_work(struct il_priv *il) -{ - il3945_hw_cancel_deferred_work(il); - - cancel_delayed_work_sync(&il->init_alive_start); - cancel_delayed_work(&il->alive_start); - - il_cancel_scan_deferred_work(il); -} - -static struct attribute *il3945_sysfs_entries[] = { - &dev_attr_antenna.attr, - &dev_attr_channels.attr, - &dev_attr_dump_errors.attr, - &dev_attr_flags.attr, - &dev_attr_filter_flags.attr, - &dev_attr_measurement.attr, - &dev_attr_retry_rate.attr, - &dev_attr_status.attr, - &dev_attr_temperature.attr, - &dev_attr_tx_power.attr, -#ifdef CONFIG_IWLEGACY_DEBUG - &dev_attr_debug_level.attr, -#endif - NULL -}; - -static struct attribute_group il3945_attribute_group = { - .name = NULL, /* put in device directory */ - .attrs = il3945_sysfs_entries, -}; - -struct ieee80211_ops il3945_hw_ops = { - .tx = il3945_mac_tx, - .start = il3945_mac_start, - .stop = il3945_mac_stop, - .add_interface = il_mac_add_interface, - .remove_interface = il_mac_remove_interface, - .change_interface = il_mac_change_interface, - .config = il_mac_config, - .configure_filter = il3945_configure_filter, - .set_key = il3945_mac_set_key, - .conf_tx = il_mac_conf_tx, - .reset_tsf = il_mac_reset_tsf, - .bss_info_changed = il_mac_bss_info_changed, - .hw_scan = il_mac_hw_scan, - .sta_add = il3945_mac_sta_add, - .sta_remove = il_mac_sta_remove, - .tx_last_beacon = il_mac_tx_last_beacon, -}; - -static int -il3945_init_drv(struct il_priv *il) -{ - int ret; - struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom; - - il->retry_rate = 1; - il->beacon_skb = NULL; - - spin_lock_init(&il->sta_lock); - spin_lock_init(&il->hcmd_lock); - - INIT_LIST_HEAD(&il->free_frames); - - mutex_init(&il->mutex); - - il->ieee_channels = NULL; - il->ieee_rates = NULL; - il->band = IEEE80211_BAND_2GHZ; - - il->iw_mode = NL80211_IFTYPE_STATION; - il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF; - - /* initialize force reset */ - il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD; - - if (eeprom->version < EEPROM_3945_EEPROM_VERSION) { - IL_WARN("Unsupported EEPROM version: 0x%04X\n", - eeprom->version); - ret = -EINVAL; - goto err; - } - ret = il_init_channel_map(il); - if (ret) { - IL_ERR("initializing regulatory failed: %d\n", ret); - goto err; - } - - /* Set up txpower settings in driver for all channels */ - if (il3945_txpower_set_from_eeprom(il)) { - ret = -EIO; - goto err_free_channel_map; - } - - ret = il_init_geos(il); - if (ret) { - IL_ERR("initializing geos failed: %d\n", ret); - goto err_free_channel_map; - } - il3945_init_hw_rates(il, il->ieee_rates); - - return 0; - -err_free_channel_map: - il_free_channel_map(il); -err: - return ret; -} - -#define IL3945_MAX_PROBE_REQUEST 200 - -static int -il3945_setup_mac(struct il_priv *il) -{ - int ret; - struct ieee80211_hw *hw = il->hw; - - hw->rate_control_algorithm = "iwl-3945-rs"; - hw->sta_data_size = sizeof(struct il3945_sta_priv); - hw->vif_data_size = sizeof(struct il_vif_priv); - - /* Tell mac80211 our characteristics */ - hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_SPECTRUM_MGMT; - - hw->wiphy->interface_modes = il->ctx.interface_modes; - - hw->wiphy->flags |= - WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS | - WIPHY_FLAG_IBSS_RSN; - - hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945; - /* we create the 802.11 header and a zero-length SSID element */ - hw->wiphy->max_scan_ie_len = IL3945_MAX_PROBE_REQUEST - 24 - 2; - - /* Default value; 4 EDCA QOS priorities */ - hw->queues = 4; - - if (il->bands[IEEE80211_BAND_2GHZ].n_channels) - il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = - &il->bands[IEEE80211_BAND_2GHZ]; - - if (il->bands[IEEE80211_BAND_5GHZ].n_channels) - il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = - &il->bands[IEEE80211_BAND_5GHZ]; - - il_leds_init(il); - - ret = ieee80211_register_hw(il->hw); - if (ret) { - IL_ERR("Failed to register hw (error %d)\n", ret); - return ret; - } - il->mac80211_registered = 1; - - return 0; -} - -static int -il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) -{ - int err = 0; - struct il_priv *il; - struct ieee80211_hw *hw; - struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data); - struct il3945_eeprom *eeprom; - unsigned long flags; - - /*********************** - * 1. Allocating HW data - * ********************/ - - /* mac80211 allocates memory for this device instance, including - * space for this driver's ilate structure */ - hw = il_alloc_all(cfg); - if (hw == NULL) { - pr_err("Can not allocate network device\n"); - err = -ENOMEM; - goto out; - } - il = hw->priv; - SET_IEEE80211_DEV(hw, &pdev->dev); - - il->cmd_queue = IL39_CMD_QUEUE_NUM; - - il->ctx.ctxid = 0; - - il->ctx.rxon_cmd = C_RXON; - il->ctx.rxon_timing_cmd = C_RXON_TIMING; - il->ctx.rxon_assoc_cmd = C_RXON_ASSOC; - il->ctx.qos_cmd = C_QOS_PARAM; - il->ctx.ap_sta_id = IL_AP_ID; - il->ctx.wep_key_cmd = C_WEPKEY; - il->ctx.interface_modes = - BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC); - il->ctx.ibss_devtype = RXON_DEV_TYPE_IBSS; - il->ctx.station_devtype = RXON_DEV_TYPE_ESS; - il->ctx.unused_devtype = RXON_DEV_TYPE_ESS; - - /* - * Disabling hardware scan means that mac80211 will perform scans - * "the hard way", rather than using device's scan. - */ - if (il3945_mod_params.disable_hw_scan) { - D_INFO("Disabling hw_scan\n"); - il3945_hw_ops.hw_scan = NULL; - } - - D_INFO("*** LOAD DRIVER ***\n"); - il->cfg = cfg; - il->pci_dev = pdev; - il->inta_mask = CSR_INI_SET_MASK; - - if (il_alloc_traffic_mem(il)) - IL_ERR("Not enough memory to generate traffic log\n"); - - /*************************** - * 2. Initializing PCI bus - * *************************/ - pci_disable_link_state(pdev, - PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | - PCIE_LINK_STATE_CLKPM); - - if (pci_enable_device(pdev)) { - err = -ENODEV; - goto out_ieee80211_free_hw; - } - - pci_set_master(pdev); - - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (!err) - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) { - IL_WARN("No suitable DMA available.\n"); - goto out_pci_disable_device; - } - - pci_set_drvdata(pdev, il); - err = pci_request_regions(pdev, DRV_NAME); - if (err) - goto out_pci_disable_device; - - /*********************** - * 3. Read REV Register - * ********************/ - il->hw_base = pci_iomap(pdev, 0, 0); - if (!il->hw_base) { - err = -ENODEV; - goto out_pci_release_regions; - } - - D_INFO("pci_resource_len = 0x%08llx\n", - (unsigned long long)pci_resource_len(pdev, 0)); - D_INFO("pci_resource_base = %p\n", il->hw_base); - - /* We disable the RETRY_TIMEOUT register (0x41) to keep - * PCI Tx retries from interfering with C3 CPU state */ - pci_write_config_byte(pdev, 0x41, 0x00); - - /* these spin locks will be used in apm_ops.init and EEPROM access - * we should init now - */ - spin_lock_init(&il->reg_lock); - spin_lock_init(&il->lock); - - /* - * stop and reset the on-board processor just in case it is in a - * strange state ... like being left stranded by a primary kernel - * and this is now the kdump kernel trying to start up - */ - _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); - - /*********************** - * 4. Read EEPROM - * ********************/ - - /* Read the EEPROM */ - err = il_eeprom_init(il); - if (err) { - IL_ERR("Unable to init EEPROM\n"); - goto out_iounmap; - } - /* MAC Address location in EEPROM same for 3945/4965 */ - eeprom = (struct il3945_eeprom *)il->eeprom; - D_INFO("MAC address: %pM\n", eeprom->mac_address); - SET_IEEE80211_PERM_ADDR(il->hw, eeprom->mac_address); - - /*********************** - * 5. Setup HW Constants - * ********************/ - /* Device-specific setup */ - if (il3945_hw_set_hw_params(il)) { - IL_ERR("failed to set hw settings\n"); - goto out_eeprom_free; - } - - /*********************** - * 6. Setup il - * ********************/ - - err = il3945_init_drv(il); - if (err) { - IL_ERR("initializing driver failed\n"); - goto out_unset_hw_params; - } - - IL_INFO("Detected Intel Wireless WiFi Link %s\n", il->cfg->name); - - /*********************** - * 7. Setup Services - * ********************/ - - spin_lock_irqsave(&il->lock, flags); - il_disable_interrupts(il); - spin_unlock_irqrestore(&il->lock, flags); - - pci_enable_msi(il->pci_dev); - - err = request_irq(il->pci_dev->irq, il_isr, IRQF_SHARED, DRV_NAME, il); - if (err) { - IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq); - goto out_disable_msi; - } - - err = sysfs_create_group(&pdev->dev.kobj, &il3945_attribute_group); - if (err) { - IL_ERR("failed to create sysfs device attributes\n"); - goto out_release_irq; - } - - il_set_rxon_channel(il, &il->bands[IEEE80211_BAND_2GHZ].channels[5], - &il->ctx); - il3945_setup_deferred_work(il); - il3945_setup_handlers(il); - il_power_initialize(il); - - /********************************* - * 8. Setup and Register mac80211 - * *******************************/ - - il_enable_interrupts(il); - - err = il3945_setup_mac(il); - if (err) - goto out_remove_sysfs; - - err = il_dbgfs_register(il, DRV_NAME); - if (err) - IL_ERR("failed to create debugfs files. Ignoring error: %d\n", - err); - - /* Start monitoring the killswitch */ - queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll, 2 * HZ); - - return 0; - -out_remove_sysfs: - destroy_workqueue(il->workqueue); - il->workqueue = NULL; - sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group); -out_release_irq: - free_irq(il->pci_dev->irq, il); -out_disable_msi: - pci_disable_msi(il->pci_dev); - il_free_geos(il); - il_free_channel_map(il); -out_unset_hw_params: - il3945_unset_hw_params(il); -out_eeprom_free: - il_eeprom_free(il); -out_iounmap: - pci_iounmap(pdev, il->hw_base); -out_pci_release_regions: - pci_release_regions(pdev); -out_pci_disable_device: - pci_set_drvdata(pdev, NULL); - pci_disable_device(pdev); -out_ieee80211_free_hw: - il_free_traffic_mem(il); - ieee80211_free_hw(il->hw); -out: - return err; -} - -static void __devexit -il3945_pci_remove(struct pci_dev *pdev) -{ - struct il_priv *il = pci_get_drvdata(pdev); - unsigned long flags; - - if (!il) - return; - - D_INFO("*** UNLOAD DRIVER ***\n"); - - il_dbgfs_unregister(il); - - set_bit(S_EXIT_PENDING, &il->status); - - il_leds_exit(il); - - if (il->mac80211_registered) { - ieee80211_unregister_hw(il->hw); - il->mac80211_registered = 0; - } else { - il3945_down(il); - } - - /* - * Make sure device is reset to low power before unloading driver. - * This may be redundant with il_down(), but there are paths to - * run il_down() without calling apm_ops.stop(), and there are - * paths to avoid running il_down() at all before leaving driver. - * This (inexpensive) call *makes sure* device is reset. - */ - il_apm_stop(il); - - /* make sure we flush any pending irq or - * tasklet for the driver - */ - spin_lock_irqsave(&il->lock, flags); - il_disable_interrupts(il); - spin_unlock_irqrestore(&il->lock, flags); - - il3945_synchronize_irq(il); - - sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group); - - cancel_delayed_work_sync(&il->_3945.rfkill_poll); - - il3945_dealloc_ucode_pci(il); - - if (il->rxq.bd) - il3945_rx_queue_free(il, &il->rxq); - il3945_hw_txq_ctx_free(il); - - il3945_unset_hw_params(il); - - /*netif_stop_queue(dev); */ - flush_workqueue(il->workqueue); - - /* ieee80211_unregister_hw calls il3945_mac_stop, which flushes - * il->workqueue... so we can't take down the workqueue - * until now... */ - destroy_workqueue(il->workqueue); - il->workqueue = NULL; - il_free_traffic_mem(il); - - free_irq(pdev->irq, il); - pci_disable_msi(pdev); - - pci_iounmap(pdev, il->hw_base); - pci_release_regions(pdev); - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); - - il_free_channel_map(il); - il_free_geos(il); - kfree(il->scan_cmd); - if (il->beacon_skb) - dev_kfree_skb(il->beacon_skb); - - ieee80211_free_hw(il->hw); -} - -/***************************************************************************** - * - * driver and module entry point - * - *****************************************************************************/ - -static struct pci_driver il3945_driver = { - .name = DRV_NAME, - .id_table = il3945_hw_card_ids, - .probe = il3945_pci_probe, - .remove = __devexit_p(il3945_pci_remove), - .driver.pm = IL_LEGACY_PM_OPS, -}; - -static int __init -il3945_init(void) -{ - - int ret; - pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n"); - pr_info(DRV_COPYRIGHT "\n"); - - ret = il3945_rate_control_register(); - if (ret) { - pr_err("Unable to register rate control algorithm: %d\n", ret); - return ret; - } - - ret = pci_register_driver(&il3945_driver); - if (ret) { - pr_err("Unable to initialize PCI module\n"); - goto error_register; - } - - return ret; - -error_register: - il3945_rate_control_unregister(); - return ret; -} - -static void __exit -il3945_exit(void) -{ - pci_unregister_driver(&il3945_driver); - il3945_rate_control_unregister(); -} - -MODULE_FIRMWARE(IL3945_MODULE_FIRMWARE(IL3945_UCODE_API_MAX)); - -module_param_named(antenna, il3945_mod_params.antenna, int, S_IRUGO); -MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])"); -module_param_named(swcrypto, il3945_mod_params.sw_crypto, int, S_IRUGO); -MODULE_PARM_DESC(swcrypto, "using software crypto (default 1 [software])"); -module_param_named(disable_hw_scan, il3945_mod_params.disable_hw_scan, int, - S_IRUGO); -MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 1)"); -#ifdef CONFIG_IWLEGACY_DEBUG -module_param_named(debug, il_debug_level, uint, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(debug, "debug output mask"); -#endif -module_param_named(fw_restart, il3945_mod_params.restart_fw, int, S_IRUGO); -MODULE_PARM_DESC(fw_restart, "restart firmware in case of error"); - -module_exit(il3945_exit); -module_init(il3945_init); diff --git a/trunk/drivers/net/wireless/iwlegacy/3945-rs.c b/trunk/drivers/net/wireless/iwlegacy/3945-rs.c deleted file mode 100644 index d7a83f229190..000000000000 --- a/trunk/drivers/net/wireless/iwlegacy/3945-rs.c +++ /dev/null @@ -1,986 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#include -#include -#include -#include -#include - -#include -#include -#include - -#include - -#include "commands.h" -#include "3945.h" - -#define RS_NAME "iwl-3945-rs" - -static s32 il3945_expected_tpt_g[RATE_COUNT_3945] = { - 7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202 -}; - -static s32 il3945_expected_tpt_g_prot[RATE_COUNT_3945] = { - 7, 13, 35, 58, 0, 0, 0, 80, 93, 113, 123, 125 -}; - -static s32 il3945_expected_tpt_a[RATE_COUNT_3945] = { - 0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186 -}; - -static s32 il3945_expected_tpt_b[RATE_COUNT_3945] = { - 7, 13, 35, 58, 0, 0, 0, 0, 0, 0, 0, 0 -}; - -struct il3945_tpt_entry { - s8 min_rssi; - u8 idx; -}; - -static struct il3945_tpt_entry il3945_tpt_table_a[] = { - {-60, RATE_54M_IDX}, - {-64, RATE_48M_IDX}, - {-72, RATE_36M_IDX}, - {-80, RATE_24M_IDX}, - {-84, RATE_18M_IDX}, - {-85, RATE_12M_IDX}, - {-87, RATE_9M_IDX}, - {-89, RATE_6M_IDX} -}; - -static struct il3945_tpt_entry il3945_tpt_table_g[] = { - {-60, RATE_54M_IDX}, - {-64, RATE_48M_IDX}, - {-68, RATE_36M_IDX}, - {-80, RATE_24M_IDX}, - {-84, RATE_18M_IDX}, - {-85, RATE_12M_IDX}, - {-86, RATE_11M_IDX}, - {-88, RATE_5M_IDX}, - {-90, RATE_2M_IDX}, - {-92, RATE_1M_IDX} -}; - -#define RATE_MAX_WINDOW 62 -#define RATE_FLUSH (3*HZ) -#define RATE_WIN_FLUSH (HZ/2) -#define IL39_RATE_HIGH_TH 11520 -#define IL_SUCCESS_UP_TH 8960 -#define IL_SUCCESS_DOWN_TH 10880 -#define RATE_MIN_FAILURE_TH 6 -#define RATE_MIN_SUCCESS_TH 8 -#define RATE_DECREASE_TH 1920 -#define RATE_RETRY_TH 15 - -static u8 -il3945_get_rate_idx_by_rssi(s32 rssi, enum ieee80211_band band) -{ - u32 idx = 0; - u32 table_size = 0; - struct il3945_tpt_entry *tpt_table = NULL; - - if (rssi < IL_MIN_RSSI_VAL || rssi > IL_MAX_RSSI_VAL) - rssi = IL_MIN_RSSI_VAL; - - switch (band) { - case IEEE80211_BAND_2GHZ: - tpt_table = il3945_tpt_table_g; - table_size = ARRAY_SIZE(il3945_tpt_table_g); - break; - case IEEE80211_BAND_5GHZ: - tpt_table = il3945_tpt_table_a; - table_size = ARRAY_SIZE(il3945_tpt_table_a); - break; - default: - BUG(); - break; - } - - while (idx < table_size && rssi < tpt_table[idx].min_rssi) - idx++; - - idx = min(idx, table_size - 1); - - return tpt_table[idx].idx; -} - -static void -il3945_clear_win(struct il3945_rate_scale_data *win) -{ - win->data = 0; - win->success_counter = 0; - win->success_ratio = -1; - win->counter = 0; - win->average_tpt = IL_INVALID_VALUE; - win->stamp = 0; -} - -/** - * il3945_rate_scale_flush_wins - flush out the rate scale wins - * - * Returns the number of wins that have gathered data but were - * not flushed. If there were any that were not flushed, then - * reschedule the rate flushing routine. - */ -static int -il3945_rate_scale_flush_wins(struct il3945_rs_sta *rs_sta) -{ - int unflushed = 0; - int i; - unsigned long flags; - struct il_priv *il __maybe_unused = rs_sta->il; - - /* - * For each rate, if we have collected data on that rate - * and it has been more than RATE_WIN_FLUSH - * since we flushed, clear out the gathered stats - */ - for (i = 0; i < RATE_COUNT_3945; i++) { - if (!rs_sta->win[i].counter) - continue; - - spin_lock_irqsave(&rs_sta->lock, flags); - if (time_after(jiffies, rs_sta->win[i].stamp + RATE_WIN_FLUSH)) { - D_RATE("flushing %d samples of rate " "idx %d\n", - rs_sta->win[i].counter, i); - il3945_clear_win(&rs_sta->win[i]); - } else - unflushed++; - spin_unlock_irqrestore(&rs_sta->lock, flags); - } - - return unflushed; -} - -#define RATE_FLUSH_MAX 5000 /* msec */ -#define RATE_FLUSH_MIN 50 /* msec */ -#define IL_AVERAGE_PACKETS 1500 - -static void -il3945_bg_rate_scale_flush(unsigned long data) -{ - struct il3945_rs_sta *rs_sta = (void *)data; - struct il_priv *il __maybe_unused = rs_sta->il; - int unflushed = 0; - unsigned long flags; - u32 packet_count, duration, pps; - - D_RATE("enter\n"); - - unflushed = il3945_rate_scale_flush_wins(rs_sta); - - spin_lock_irqsave(&rs_sta->lock, flags); - - /* Number of packets Rx'd since last time this timer ran */ - packet_count = (rs_sta->tx_packets - rs_sta->last_tx_packets) + 1; - - rs_sta->last_tx_packets = rs_sta->tx_packets + 1; - - if (unflushed) { - duration = - jiffies_to_msecs(jiffies - rs_sta->last_partial_flush); - - D_RATE("Tx'd %d packets in %dms\n", packet_count, duration); - - /* Determine packets per second */ - if (duration) - pps = (packet_count * 1000) / duration; - else - pps = 0; - - if (pps) { - duration = (IL_AVERAGE_PACKETS * 1000) / pps; - if (duration < RATE_FLUSH_MIN) - duration = RATE_FLUSH_MIN; - else if (duration > RATE_FLUSH_MAX) - duration = RATE_FLUSH_MAX; - } else - duration = RATE_FLUSH_MAX; - - rs_sta->flush_time = msecs_to_jiffies(duration); - - D_RATE("new flush period: %d msec ave %d\n", duration, - packet_count); - - mod_timer(&rs_sta->rate_scale_flush, - jiffies + rs_sta->flush_time); - - rs_sta->last_partial_flush = jiffies; - } else { - rs_sta->flush_time = RATE_FLUSH; - rs_sta->flush_pending = 0; - } - /* If there weren't any unflushed entries, we don't schedule the timer - * to run again */ - - rs_sta->last_flush = jiffies; - - spin_unlock_irqrestore(&rs_sta->lock, flags); - - D_RATE("leave\n"); -} - -/** - * il3945_collect_tx_data - Update the success/failure sliding win - * - * We keep a sliding win of the last 64 packets transmitted - * at this rate. win->data contains the bitmask of successful - * packets. - */ -static void -il3945_collect_tx_data(struct il3945_rs_sta *rs_sta, - struct il3945_rate_scale_data *win, int success, - int retries, int idx) -{ - unsigned long flags; - s32 fail_count; - struct il_priv *il __maybe_unused = rs_sta->il; - - if (!retries) { - D_RATE("leave: retries == 0 -- should be at least 1\n"); - return; - } - - spin_lock_irqsave(&rs_sta->lock, flags); - - /* - * Keep track of only the latest 62 tx frame attempts in this rate's - * history win; anything older isn't really relevant any more. - * If we have filled up the sliding win, drop the oldest attempt; - * if the oldest attempt (highest bit in bitmap) shows "success", - * subtract "1" from the success counter (this is the main reason - * we keep these bitmaps!). - * */ - while (retries > 0) { - if (win->counter >= RATE_MAX_WINDOW) { - - /* remove earliest */ - win->counter = RATE_MAX_WINDOW - 1; - - if (win->data & (1ULL << (RATE_MAX_WINDOW - 1))) { - win->data &= ~(1ULL << (RATE_MAX_WINDOW - 1)); - win->success_counter--; - } - } - - /* Increment frames-attempted counter */ - win->counter++; - - /* Shift bitmap by one frame (throw away oldest history), - * OR in "1", and increment "success" if this - * frame was successful. */ - win->data <<= 1; - if (success > 0) { - win->success_counter++; - win->data |= 0x1; - success--; - } - - retries--; - } - - /* Calculate current success ratio, avoid divide-by-0! */ - if (win->counter > 0) - win->success_ratio = - 128 * (100 * win->success_counter) / win->counter; - else - win->success_ratio = IL_INVALID_VALUE; - - fail_count = win->counter - win->success_counter; - - /* Calculate average throughput, if we have enough history. */ - if (fail_count >= RATE_MIN_FAILURE_TH || - win->success_counter >= RATE_MIN_SUCCESS_TH) - win->average_tpt = - ((win->success_ratio * rs_sta->expected_tpt[idx] + - 64) / 128); - else - win->average_tpt = IL_INVALID_VALUE; - - /* Tag this win as having been updated */ - win->stamp = jiffies; - - spin_unlock_irqrestore(&rs_sta->lock, flags); -} - -/* - * Called after adding a new station to initialize rate scaling - */ -void -il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id) -{ - struct ieee80211_hw *hw = il->hw; - struct ieee80211_conf *conf = &il->hw->conf; - struct il3945_sta_priv *psta; - struct il3945_rs_sta *rs_sta; - struct ieee80211_supported_band *sband; - int i; - - D_INFO("enter\n"); - if (sta_id == il->ctx.bcast_sta_id) - goto out; - - psta = (struct il3945_sta_priv *)sta->drv_priv; - rs_sta = &psta->rs_sta; - sband = hw->wiphy->bands[conf->channel->band]; - - rs_sta->il = il; - - rs_sta->start_rate = RATE_INVALID; - - /* default to just 802.11b */ - rs_sta->expected_tpt = il3945_expected_tpt_b; - - rs_sta->last_partial_flush = jiffies; - rs_sta->last_flush = jiffies; - rs_sta->flush_time = RATE_FLUSH; - rs_sta->last_tx_packets = 0; - - rs_sta->rate_scale_flush.data = (unsigned long)rs_sta; - rs_sta->rate_scale_flush.function = il3945_bg_rate_scale_flush; - - for (i = 0; i < RATE_COUNT_3945; i++) - il3945_clear_win(&rs_sta->win[i]); - - /* TODO: what is a good starting rate for STA? About middle? Maybe not - * the lowest or the highest rate.. Could consider using RSSI from - * previous packets? Need to have IEEE 802.1X auth succeed immediately - * after assoc.. */ - - for (i = sband->n_bitrates - 1; i >= 0; i--) { - if (sta->supp_rates[sband->band] & (1 << i)) { - rs_sta->last_txrate_idx = i; - break; - } - } - - il->_3945.sta_supp_rates = sta->supp_rates[sband->band]; - /* For 5 GHz band it start at IL_FIRST_OFDM_RATE */ - if (sband->band == IEEE80211_BAND_5GHZ) { - rs_sta->last_txrate_idx += IL_FIRST_OFDM_RATE; - il->_3945.sta_supp_rates <<= IL_FIRST_OFDM_RATE; - } - -out: - il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS; - - D_INFO("leave\n"); -} - -static void * -il3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) -{ - return hw->priv; -} - -/* rate scale requires free function to be implemented */ -static void -il3945_rs_free(void *il) -{ -} - -static void * -il3945_rs_alloc_sta(void *il_priv, struct ieee80211_sta *sta, gfp_t gfp) -{ - struct il3945_rs_sta *rs_sta; - struct il3945_sta_priv *psta = (void *)sta->drv_priv; - struct il_priv *il __maybe_unused = il_priv; - - D_RATE("enter\n"); - - rs_sta = &psta->rs_sta; - - spin_lock_init(&rs_sta->lock); - init_timer(&rs_sta->rate_scale_flush); - - D_RATE("leave\n"); - - return rs_sta; -} - -static void -il3945_rs_free_sta(void *il_priv, struct ieee80211_sta *sta, void *il_sta) -{ - struct il3945_rs_sta *rs_sta = il_sta; - - /* - * Be careful not to use any members of il3945_rs_sta (like trying - * to use il_priv to print out debugging) since it may not be fully - * initialized at this point. - */ - del_timer_sync(&rs_sta->rate_scale_flush); -} - -/** - * il3945_rs_tx_status - Update rate control values based on Tx results - * - * NOTE: Uses il_priv->retry_rate for the # of retries attempted by - * the hardware for each rate. - */ -static void -il3945_rs_tx_status(void *il_rate, struct ieee80211_supported_band *sband, - struct ieee80211_sta *sta, void *il_sta, - struct sk_buff *skb) -{ - s8 retries = 0, current_count; - int scale_rate_idx, first_idx, last_idx; - unsigned long flags; - struct il_priv *il = (struct il_priv *)il_rate; - struct il3945_rs_sta *rs_sta = il_sta; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - - D_RATE("enter\n"); - - retries = info->status.rates[0].count; - /* Sanity Check for retries */ - if (retries > RATE_RETRY_TH) - retries = RATE_RETRY_TH; - - first_idx = sband->bitrates[info->status.rates[0].idx].hw_value; - if (first_idx < 0 || first_idx >= RATE_COUNT_3945) { - D_RATE("leave: Rate out of bounds: %d\n", first_idx); - return; - } - - if (!il_sta) { - D_RATE("leave: No STA il data to update!\n"); - return; - } - - /* Treat uninitialized rate scaling data same as non-existing. */ - if (!rs_sta->il) { - D_RATE("leave: STA il data uninitialized!\n"); - return; - } - - rs_sta->tx_packets++; - - scale_rate_idx = first_idx; - last_idx = first_idx; - - /* - * Update the win for each rate. We determine which rates - * were Tx'd based on the total number of retries vs. the number - * of retries configured for each rate -- currently set to the - * il value 'retry_rate' vs. rate specific - * - * On exit from this while loop last_idx indicates the rate - * at which the frame was finally transmitted (or failed if no - * ACK) - */ - while (retries > 1) { - if ((retries - 1) < il->retry_rate) { - current_count = (retries - 1); - last_idx = scale_rate_idx; - } else { - current_count = il->retry_rate; - last_idx = il3945_rs_next_rate(il, scale_rate_idx); - } - - /* Update this rate accounting for as many retries - * as was used for it (per current_count) */ - il3945_collect_tx_data(rs_sta, &rs_sta->win[scale_rate_idx], 0, - current_count, scale_rate_idx); - D_RATE("Update rate %d for %d retries.\n", scale_rate_idx, - current_count); - - retries -= current_count; - - scale_rate_idx = last_idx; - } - - /* Update the last idx win with success/failure based on ACK */ - D_RATE("Update rate %d with %s.\n", last_idx, - (info->flags & IEEE80211_TX_STAT_ACK) ? "success" : "failure"); - il3945_collect_tx_data(rs_sta, &rs_sta->win[last_idx], - info->flags & IEEE80211_TX_STAT_ACK, 1, - last_idx); - - /* We updated the rate scale win -- if its been more than - * flush_time since the last run, schedule the flush - * again */ - spin_lock_irqsave(&rs_sta->lock, flags); - - if (!rs_sta->flush_pending && - time_after(jiffies, rs_sta->last_flush + rs_sta->flush_time)) { - - rs_sta->last_partial_flush = jiffies; - rs_sta->flush_pending = 1; - mod_timer(&rs_sta->rate_scale_flush, - jiffies + rs_sta->flush_time); - } - - spin_unlock_irqrestore(&rs_sta->lock, flags); - - D_RATE("leave\n"); -} - -static u16 -il3945_get_adjacent_rate(struct il3945_rs_sta *rs_sta, u8 idx, u16 rate_mask, - enum ieee80211_band band) -{ - u8 high = RATE_INVALID; - u8 low = RATE_INVALID; - struct il_priv *il __maybe_unused = rs_sta->il; - - /* 802.11A walks to the next literal adjacent rate in - * the rate table */ - if (unlikely(band == IEEE80211_BAND_5GHZ)) { - int i; - u32 mask; - - /* Find the previous rate that is in the rate mask */ - i = idx - 1; - for (mask = (1 << i); i >= 0; i--, mask >>= 1) { - if (rate_mask & mask) { - low = i; - break; - } - } - - /* Find the next rate that is in the rate mask */ - i = idx + 1; - for (mask = (1 << i); i < RATE_COUNT_3945; i++, mask <<= 1) { - if (rate_mask & mask) { - high = i; - break; - } - } - - return (high << 8) | low; - } - - low = idx; - while (low != RATE_INVALID) { - if (rs_sta->tgg) - low = il3945_rates[low].prev_rs_tgg; - else - low = il3945_rates[low].prev_rs; - if (low == RATE_INVALID) - break; - if (rate_mask & (1 << low)) - break; - D_RATE("Skipping masked lower rate: %d\n", low); - } - - high = idx; - while (high != RATE_INVALID) { - if (rs_sta->tgg) - high = il3945_rates[high].next_rs_tgg; - else - high = il3945_rates[high].next_rs; - if (high == RATE_INVALID) - break; - if (rate_mask & (1 << high)) - break; - D_RATE("Skipping masked higher rate: %d\n", high); - } - - return (high << 8) | low; -} - -/** - * il3945_rs_get_rate - find the rate for the requested packet - * - * Returns the ieee80211_rate structure allocated by the driver. - * - * The rate control algorithm has no internal mapping between hw_mode's - * rate ordering and the rate ordering used by the rate control algorithm. - * - * The rate control algorithm uses a single table of rates that goes across - * the entire A/B/G spectrum vs. being limited to just one particular - * hw_mode. - * - * As such, we can't convert the idx obtained below into the hw_mode's - * rate table and must reference the driver allocated rate table - * - */ -static void -il3945_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta, - struct ieee80211_tx_rate_control *txrc) -{ - struct ieee80211_supported_band *sband = txrc->sband; - struct sk_buff *skb = txrc->skb; - u8 low = RATE_INVALID; - u8 high = RATE_INVALID; - u16 high_low; - int idx; - struct il3945_rs_sta *rs_sta = il_sta; - struct il3945_rate_scale_data *win = NULL; - int current_tpt = IL_INVALID_VALUE; - int low_tpt = IL_INVALID_VALUE; - int high_tpt = IL_INVALID_VALUE; - u32 fail_count; - s8 scale_action = 0; - unsigned long flags; - u16 rate_mask; - s8 max_rate_idx = -1; - struct il_priv *il __maybe_unused = (struct il_priv *)il_r; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - - D_RATE("enter\n"); - - /* Treat uninitialized rate scaling data same as non-existing. */ - if (rs_sta && !rs_sta->il) { - D_RATE("Rate scaling information not initialized yet.\n"); - il_sta = NULL; - } - - if (rate_control_send_low(sta, il_sta, txrc)) - return; - - rate_mask = sta->supp_rates[sband->band]; - - /* get user max rate if set */ - max_rate_idx = txrc->max_rate_idx; - if (sband->band == IEEE80211_BAND_5GHZ && max_rate_idx != -1) - max_rate_idx += IL_FIRST_OFDM_RATE; - if (max_rate_idx < 0 || max_rate_idx >= RATE_COUNT) - max_rate_idx = -1; - - idx = min(rs_sta->last_txrate_idx & 0xffff, RATE_COUNT_3945 - 1); - - if (sband->band == IEEE80211_BAND_5GHZ) - rate_mask = rate_mask << IL_FIRST_OFDM_RATE; - - spin_lock_irqsave(&rs_sta->lock, flags); - - /* for recent assoc, choose best rate regarding - * to rssi value - */ - if (rs_sta->start_rate != RATE_INVALID) { - if (rs_sta->start_rate < idx && - (rate_mask & (1 << rs_sta->start_rate))) - idx = rs_sta->start_rate; - rs_sta->start_rate = RATE_INVALID; - } - - /* force user max rate if set by user */ - if (max_rate_idx != -1 && max_rate_idx < idx) { - if (rate_mask & (1 << max_rate_idx)) - idx = max_rate_idx; - } - - win = &(rs_sta->win[idx]); - - fail_count = win->counter - win->success_counter; - - if (fail_count < RATE_MIN_FAILURE_TH && - win->success_counter < RATE_MIN_SUCCESS_TH) { - spin_unlock_irqrestore(&rs_sta->lock, flags); - - D_RATE("Invalid average_tpt on rate %d: " - "counter: %d, success_counter: %d, " - "expected_tpt is %sNULL\n", idx, win->counter, - win->success_counter, - rs_sta->expected_tpt ? "not " : ""); - - /* Can't calculate this yet; not enough history */ - win->average_tpt = IL_INVALID_VALUE; - goto out; - - } - - current_tpt = win->average_tpt; - - high_low = - il3945_get_adjacent_rate(rs_sta, idx, rate_mask, sband->band); - low = high_low & 0xff; - high = (high_low >> 8) & 0xff; - - /* If user set max rate, dont allow higher than user constrain */ - if (max_rate_idx != -1 && max_rate_idx < high) - high = RATE_INVALID; - - /* Collect Measured throughputs of adjacent rates */ - if (low != RATE_INVALID) - low_tpt = rs_sta->win[low].average_tpt; - - if (high != RATE_INVALID) - high_tpt = rs_sta->win[high].average_tpt; - - spin_unlock_irqrestore(&rs_sta->lock, flags); - - scale_action = 0; - - /* Low success ratio , need to drop the rate */ - if (win->success_ratio < RATE_DECREASE_TH || !current_tpt) { - D_RATE("decrease rate because of low success_ratio\n"); - scale_action = -1; - /* No throughput measured yet for adjacent rates, - * try increase */ - } else if (low_tpt == IL_INVALID_VALUE && high_tpt == IL_INVALID_VALUE) { - - if (high != RATE_INVALID && - win->success_ratio >= RATE_INCREASE_TH) - scale_action = 1; - else if (low != RATE_INVALID) - scale_action = 0; - - /* Both adjacent throughputs are measured, but neither one has - * better throughput; we're using the best rate, don't change - * it! */ - } else if (low_tpt != IL_INVALID_VALUE && high_tpt != IL_INVALID_VALUE - && low_tpt < current_tpt && high_tpt < current_tpt) { - - D_RATE("No action -- low [%d] & high [%d] < " - "current_tpt [%d]\n", low_tpt, high_tpt, current_tpt); - scale_action = 0; - - /* At least one of the rates has better throughput */ - } else { - if (high_tpt != IL_INVALID_VALUE) { - - /* High rate has better throughput, Increase - * rate */ - if (high_tpt > current_tpt && - win->success_ratio >= RATE_INCREASE_TH) - scale_action = 1; - else { - D_RATE("decrease rate because of high tpt\n"); - scale_action = 0; - } - } else if (low_tpt != IL_INVALID_VALUE) { - if (low_tpt > current_tpt) { - D_RATE("decrease rate because of low tpt\n"); - scale_action = -1; - } else if (win->success_ratio >= RATE_INCREASE_TH) { - /* Lower rate has better - * throughput,decrease rate */ - scale_action = 1; - } - } - } - - /* Sanity check; asked for decrease, but success rate or throughput - * has been good at old rate. Don't change it. */ - if (scale_action == -1 && low != RATE_INVALID && - (win->success_ratio > RATE_HIGH_TH || - current_tpt > 100 * rs_sta->expected_tpt[low])) - scale_action = 0; - - switch (scale_action) { - case -1: - /* Decrese rate */ - if (low != RATE_INVALID) - idx = low; - break; - case 1: - /* Increase rate */ - if (high != RATE_INVALID) - idx = high; - - break; - case 0: - default: - /* No change */ - break; - } - - D_RATE("Selected %d (action %d) - low %d high %d\n", idx, scale_action, - low, high); - -out: - - if (sband->band == IEEE80211_BAND_5GHZ) { - if (WARN_ON_ONCE(idx < IL_FIRST_OFDM_RATE)) - idx = IL_FIRST_OFDM_RATE; - rs_sta->last_txrate_idx = idx; - info->control.rates[0].idx = idx - IL_FIRST_OFDM_RATE; - } else { - rs_sta->last_txrate_idx = idx; - info->control.rates[0].idx = rs_sta->last_txrate_idx; - } - - D_RATE("leave: %d\n", idx); -} - -#ifdef CONFIG_MAC80211_DEBUGFS -static int -il3945_open_file_generic(struct inode *inode, struct file *file) -{ - file->private_data = inode->i_private; - return 0; -} - -static ssize_t -il3945_sta_dbgfs_stats_table_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - char *buff; - int desc = 0; - int j; - ssize_t ret; - struct il3945_rs_sta *lq_sta = file->private_data; - - buff = kmalloc(1024, GFP_KERNEL); - if (!buff) - return -ENOMEM; - - desc += - sprintf(buff + desc, - "tx packets=%d last rate idx=%d\n" - "rate=0x%X flush time %d\n", lq_sta->tx_packets, - lq_sta->last_txrate_idx, lq_sta->start_rate, - jiffies_to_msecs(lq_sta->flush_time)); - for (j = 0; j < RATE_COUNT_3945; j++) { - desc += - sprintf(buff + desc, "counter=%d success=%d %%=%d\n", - lq_sta->win[j].counter, - lq_sta->win[j].success_counter, - lq_sta->win[j].success_ratio); - } - ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); - kfree(buff); - return ret; -} - -static const struct file_operations rs_sta_dbgfs_stats_table_ops = { - .read = il3945_sta_dbgfs_stats_table_read, - .open = il3945_open_file_generic, - .llseek = default_llseek, -}; - -static void -il3945_add_debugfs(void *il, void *il_sta, struct dentry *dir) -{ - struct il3945_rs_sta *lq_sta = il_sta; - - lq_sta->rs_sta_dbgfs_stats_table_file = - debugfs_create_file("rate_stats_table", 0600, dir, lq_sta, - &rs_sta_dbgfs_stats_table_ops); - -} - -static void -il3945_remove_debugfs(void *il, void *il_sta) -{ - struct il3945_rs_sta *lq_sta = il_sta; - debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file); -} -#endif - -/* - * Initialization of rate scaling information is done by driver after - * the station is added. Since mac80211 calls this function before a - * station is added we ignore it. - */ -static void -il3945_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband, - struct ieee80211_sta *sta, void *il_sta) -{ -} - -static struct rate_control_ops rs_ops = { - .module = NULL, - .name = RS_NAME, - .tx_status = il3945_rs_tx_status, - .get_rate = il3945_rs_get_rate, - .rate_init = il3945_rs_rate_init_stub, - .alloc = il3945_rs_alloc, - .free = il3945_rs_free, - .alloc_sta = il3945_rs_alloc_sta, - .free_sta = il3945_rs_free_sta, -#ifdef CONFIG_MAC80211_DEBUGFS - .add_sta_debugfs = il3945_add_debugfs, - .remove_sta_debugfs = il3945_remove_debugfs, -#endif - -}; - -void -il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id) -{ - struct il_priv *il = hw->priv; - s32 rssi = 0; - unsigned long flags; - struct il3945_rs_sta *rs_sta; - struct ieee80211_sta *sta; - struct il3945_sta_priv *psta; - - D_RATE("enter\n"); - - rcu_read_lock(); - - sta = - ieee80211_find_sta(il->ctx.vif, il->stations[sta_id].sta.sta.addr); - if (!sta) { - D_RATE("Unable to find station to initialize rate scaling.\n"); - rcu_read_unlock(); - return; - } - - psta = (void *)sta->drv_priv; - rs_sta = &psta->rs_sta; - - spin_lock_irqsave(&rs_sta->lock, flags); - - rs_sta->tgg = 0; - switch (il->band) { - case IEEE80211_BAND_2GHZ: - /* TODO: this always does G, not a regression */ - if (il->ctx.active.flags & RXON_FLG_TGG_PROTECT_MSK) { - rs_sta->tgg = 1; - rs_sta->expected_tpt = il3945_expected_tpt_g_prot; - } else - rs_sta->expected_tpt = il3945_expected_tpt_g; - break; - case IEEE80211_BAND_5GHZ: - rs_sta->expected_tpt = il3945_expected_tpt_a; - break; - case IEEE80211_NUM_BANDS: - BUG(); - break; - } - - spin_unlock_irqrestore(&rs_sta->lock, flags); - - rssi = il->_3945.last_rx_rssi; - if (rssi == 0) - rssi = IL_MIN_RSSI_VAL; - - D_RATE("Network RSSI: %d\n", rssi); - - rs_sta->start_rate = il3945_get_rate_idx_by_rssi(rssi, il->band); - - D_RATE("leave: rssi %d assign rate idx: " "%d (plcp 0x%x)\n", rssi, - rs_sta->start_rate, il3945_rates[rs_sta->start_rate].plcp); - rcu_read_unlock(); -} - -int -il3945_rate_control_register(void) -{ - return ieee80211_rate_control_register(&rs_ops); -} - -void -il3945_rate_control_unregister(void) -{ - ieee80211_rate_control_unregister(&rs_ops); -} diff --git a/trunk/drivers/net/wireless/iwlegacy/3945.c b/trunk/drivers/net/wireless/iwlegacy/3945.c deleted file mode 100644 index 1489b1573a6a..000000000000 --- a/trunk/drivers/net/wireless/iwlegacy/3945.c +++ /dev/null @@ -1,2743 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "common.h" -#include "3945.h" - -/* Send led command */ -static int -il3945_send_led_cmd(struct il_priv *il, struct il_led_cmd *led_cmd) -{ - struct il_host_cmd cmd = { - .id = C_LEDS, - .len = sizeof(struct il_led_cmd), - .data = led_cmd, - .flags = CMD_ASYNC, - .callback = NULL, - }; - - return il_send_cmd(il, &cmd); -} - -const struct il_led_ops il3945_led_ops = { - .cmd = il3945_send_led_cmd, -}; - -#define IL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \ - [RATE_##r##M_IDX] = { RATE_##r##M_PLCP, \ - RATE_##r##M_IEEE, \ - RATE_##ip##M_IDX, \ - RATE_##in##M_IDX, \ - RATE_##rp##M_IDX, \ - RATE_##rn##M_IDX, \ - RATE_##pp##M_IDX, \ - RATE_##np##M_IDX, \ - RATE_##r##M_IDX_TBL, \ - RATE_##ip##M_IDX_TBL } - -/* - * Parameter order: - * rate, prev rate, next rate, prev tgg rate, next tgg rate - * - * If there isn't a valid next or previous rate then INV is used which - * maps to RATE_INVALID - * - */ -const struct il3945_rate_info il3945_rates[RATE_COUNT_3945] = { - IL_DECLARE_RATE_INFO(1, INV, 2, INV, 2, INV, 2), /* 1mbps */ - IL_DECLARE_RATE_INFO(2, 1, 5, 1, 5, 1, 5), /* 2mbps */ - IL_DECLARE_RATE_INFO(5, 2, 6, 2, 11, 2, 11), /*5.5mbps */ - IL_DECLARE_RATE_INFO(11, 9, 12, 5, 12, 5, 18), /* 11mbps */ - IL_DECLARE_RATE_INFO(6, 5, 9, 5, 11, 5, 11), /* 6mbps */ - IL_DECLARE_RATE_INFO(9, 6, 11, 5, 11, 5, 11), /* 9mbps */ - IL_DECLARE_RATE_INFO(12, 11, 18, 11, 18, 11, 18), /* 12mbps */ - IL_DECLARE_RATE_INFO(18, 12, 24, 12, 24, 11, 24), /* 18mbps */ - IL_DECLARE_RATE_INFO(24, 18, 36, 18, 36, 18, 36), /* 24mbps */ - IL_DECLARE_RATE_INFO(36, 24, 48, 24, 48, 24, 48), /* 36mbps */ - IL_DECLARE_RATE_INFO(48, 36, 54, 36, 54, 36, 54), /* 48mbps */ - IL_DECLARE_RATE_INFO(54, 48, INV, 48, INV, 48, INV), /* 54mbps */ -}; - -static inline u8 -il3945_get_prev_ieee_rate(u8 rate_idx) -{ - u8 rate = il3945_rates[rate_idx].prev_ieee; - - if (rate == RATE_INVALID) - rate = rate_idx; - return rate; -} - -/* 1 = enable the il3945_disable_events() function */ -#define IL_EVT_DISABLE (0) -#define IL_EVT_DISABLE_SIZE (1532/32) - -/** - * il3945_disable_events - Disable selected events in uCode event log - * - * Disable an event by writing "1"s into "disable" - * bitmap in SRAM. Bit position corresponds to Event # (id/type). - * Default values of 0 enable uCode events to be logged. - * Use for only special debugging. This function is just a placeholder as-is, - * you'll need to provide the special bits! ... - * ... and set IL_EVT_DISABLE to 1. */ -void -il3945_disable_events(struct il_priv *il) -{ - int i; - u32 base; /* SRAM address of event log header */ - u32 disable_ptr; /* SRAM address of event-disable bitmap array */ - u32 array_size; /* # of u32 entries in array */ - static const u32 evt_disable[IL_EVT_DISABLE_SIZE] = { - 0x00000000, /* 31 - 0 Event id numbers */ - 0x00000000, /* 63 - 32 */ - 0x00000000, /* 95 - 64 */ - 0x00000000, /* 127 - 96 */ - 0x00000000, /* 159 - 128 */ - 0x00000000, /* 191 - 160 */ - 0x00000000, /* 223 - 192 */ - 0x00000000, /* 255 - 224 */ - 0x00000000, /* 287 - 256 */ - 0x00000000, /* 319 - 288 */ - 0x00000000, /* 351 - 320 */ - 0x00000000, /* 383 - 352 */ - 0x00000000, /* 415 - 384 */ - 0x00000000, /* 447 - 416 */ - 0x00000000, /* 479 - 448 */ - 0x00000000, /* 511 - 480 */ - 0x00000000, /* 543 - 512 */ - 0x00000000, /* 575 - 544 */ - 0x00000000, /* 607 - 576 */ - 0x00000000, /* 639 - 608 */ - 0x00000000, /* 671 - 640 */ - 0x00000000, /* 703 - 672 */ - 0x00000000, /* 735 - 704 */ - 0x00000000, /* 767 - 736 */ - 0x00000000, /* 799 - 768 */ - 0x00000000, /* 831 - 800 */ - 0x00000000, /* 863 - 832 */ - 0x00000000, /* 895 - 864 */ - 0x00000000, /* 927 - 896 */ - 0x00000000, /* 959 - 928 */ - 0x00000000, /* 991 - 960 */ - 0x00000000, /* 1023 - 992 */ - 0x00000000, /* 1055 - 1024 */ - 0x00000000, /* 1087 - 1056 */ - 0x00000000, /* 1119 - 1088 */ - 0x00000000, /* 1151 - 1120 */ - 0x00000000, /* 1183 - 1152 */ - 0x00000000, /* 1215 - 1184 */ - 0x00000000, /* 1247 - 1216 */ - 0x00000000, /* 1279 - 1248 */ - 0x00000000, /* 1311 - 1280 */ - 0x00000000, /* 1343 - 1312 */ - 0x00000000, /* 1375 - 1344 */ - 0x00000000, /* 1407 - 1376 */ - 0x00000000, /* 1439 - 1408 */ - 0x00000000, /* 1471 - 1440 */ - 0x00000000, /* 1503 - 1472 */ - }; - - base = le32_to_cpu(il->card_alive.log_event_table_ptr); - if (!il3945_hw_valid_rtc_data_addr(base)) { - IL_ERR("Invalid event log pointer 0x%08X\n", base); - return; - } - - disable_ptr = il_read_targ_mem(il, base + (4 * sizeof(u32))); - array_size = il_read_targ_mem(il, base + (5 * sizeof(u32))); - - if (IL_EVT_DISABLE && array_size == IL_EVT_DISABLE_SIZE) { - D_INFO("Disabling selected uCode log events at 0x%x\n", - disable_ptr); - for (i = 0; i < IL_EVT_DISABLE_SIZE; i++) - il_write_targ_mem(il, disable_ptr + (i * sizeof(u32)), - evt_disable[i]); - - } else { - D_INFO("Selected uCode log events may be disabled\n"); - D_INFO(" by writing \"1\"s into disable bitmap\n"); - D_INFO(" in SRAM at 0x%x, size %d u32s\n", disable_ptr, - array_size); - } - -} - -static int -il3945_hwrate_to_plcp_idx(u8 plcp) -{ - int idx; - - for (idx = 0; idx < RATE_COUNT_3945; idx++) - if (il3945_rates[idx].plcp == plcp) - return idx; - return -1; -} - -#ifdef CONFIG_IWLEGACY_DEBUG -#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x - -static const char * -il3945_get_tx_fail_reason(u32 status) -{ - switch (status & TX_STATUS_MSK) { - case TX_3945_STATUS_SUCCESS: - return "SUCCESS"; - TX_STATUS_ENTRY(SHORT_LIMIT); - TX_STATUS_ENTRY(LONG_LIMIT); - TX_STATUS_ENTRY(FIFO_UNDERRUN); - TX_STATUS_ENTRY(MGMNT_ABORT); - TX_STATUS_ENTRY(NEXT_FRAG); - TX_STATUS_ENTRY(LIFE_EXPIRE); - TX_STATUS_ENTRY(DEST_PS); - TX_STATUS_ENTRY(ABORTED); - TX_STATUS_ENTRY(BT_RETRY); - TX_STATUS_ENTRY(STA_INVALID); - TX_STATUS_ENTRY(FRAG_DROPPED); - TX_STATUS_ENTRY(TID_DISABLE); - TX_STATUS_ENTRY(FRAME_FLUSHED); - TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL); - TX_STATUS_ENTRY(TX_LOCKED); - TX_STATUS_ENTRY(NO_BEACON_ON_RADAR); - } - - return "UNKNOWN"; -} -#else -static inline const char * -il3945_get_tx_fail_reason(u32 status) -{ - return ""; -} -#endif - -/* - * get ieee prev rate from rate scale table. - * for A and B mode we need to overright prev - * value - */ -int -il3945_rs_next_rate(struct il_priv *il, int rate) -{ - int next_rate = il3945_get_prev_ieee_rate(rate); - - switch (il->band) { - case IEEE80211_BAND_5GHZ: - if (rate == RATE_12M_IDX) - next_rate = RATE_9M_IDX; - else if (rate == RATE_6M_IDX) - next_rate = RATE_6M_IDX; - break; - case IEEE80211_BAND_2GHZ: - if (!(il->_3945.sta_supp_rates & IL_OFDM_RATES_MASK) && - il_is_associated(il)) { - if (rate == RATE_11M_IDX) - next_rate = RATE_5M_IDX; - } - break; - - default: - break; - } - - return next_rate; -} - -/** - * il3945_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd - * - * When FW advances 'R' idx, all entries between old and new 'R' idx - * need to be reclaimed. As result, some free space forms. If there is - * enough free space (> low mark), wake the stack that feeds us. - */ -static void -il3945_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx) -{ - struct il_tx_queue *txq = &il->txq[txq_id]; - struct il_queue *q = &txq->q; - struct il_tx_info *tx_info; - - BUG_ON(txq_id == IL39_CMD_QUEUE_NUM); - - for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; - q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) { - - tx_info = &txq->txb[txq->q.read_ptr]; - ieee80211_tx_status_irqsafe(il->hw, tx_info->skb); - tx_info->skb = NULL; - il->cfg->ops->lib->txq_free_tfd(il, txq); - } - - if (il_queue_space(q) > q->low_mark && txq_id >= 0 && - txq_id != IL39_CMD_QUEUE_NUM && il->mac80211_registered) - il_wake_queue(il, txq); -} - -/** - * il3945_hdl_tx - Handle Tx response - */ -static void -il3945_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb) -{ - struct il_rx_pkt *pkt = rxb_addr(rxb); - u16 sequence = le16_to_cpu(pkt->hdr.sequence); - int txq_id = SEQ_TO_QUEUE(sequence); - int idx = SEQ_TO_IDX(sequence); - struct il_tx_queue *txq = &il->txq[txq_id]; - struct ieee80211_tx_info *info; - struct il3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; - u32 status = le32_to_cpu(tx_resp->status); - int rate_idx; - int fail; - - if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) { - IL_ERR("Read idx for DMA queue txq_id (%d) idx %d " - "is out of range [0-%d] %d %d\n", txq_id, idx, - txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr); - return; - } - - txq->time_stamp = jiffies; - info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb); - ieee80211_tx_info_clear_status(info); - - /* Fill the MRR chain with some info about on-chip retransmissions */ - rate_idx = il3945_hwrate_to_plcp_idx(tx_resp->rate); - if (info->band == IEEE80211_BAND_5GHZ) - rate_idx -= IL_FIRST_OFDM_RATE; - - fail = tx_resp->failure_frame; - - info->status.rates[0].idx = rate_idx; - info->status.rates[0].count = fail + 1; /* add final attempt */ - - /* tx_status->rts_retry_count = tx_resp->failure_rts; */ - info->flags |= - ((status & TX_STATUS_MSK) == - TX_STATUS_SUCCESS) ? IEEE80211_TX_STAT_ACK : 0; - - D_TX("Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n", txq_id, - il3945_get_tx_fail_reason(status), status, tx_resp->rate, - tx_resp->failure_frame); - - D_TX_REPLY("Tx queue reclaim %d\n", idx); - il3945_tx_queue_reclaim(il, txq_id, idx); - - if (status & TX_ABORT_REQUIRED_MSK) - IL_ERR("TODO: Implement Tx ABORT REQUIRED!!!\n"); -} - -/***************************************************************************** - * - * Intel PRO/Wireless 3945ABG/BG Network Connection - * - * RX handler implementations - * - *****************************************************************************/ -#ifdef CONFIG_IWLEGACY_DEBUGFS -static void -il3945_accumulative_stats(struct il_priv *il, __le32 * stats) -{ - int i; - __le32 *prev_stats; - u32 *accum_stats; - u32 *delta, *max_delta; - - prev_stats = (__le32 *) &il->_3945.stats; - accum_stats = (u32 *) &il->_3945.accum_stats; - delta = (u32 *) &il->_3945.delta_stats; - max_delta = (u32 *) &il->_3945.max_delta; - - for (i = sizeof(__le32); i < sizeof(struct il3945_notif_stats); - i += - sizeof(__le32), stats++, prev_stats++, delta++, max_delta++, - accum_stats++) { - if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) { - *delta = - (le32_to_cpu(*stats) - le32_to_cpu(*prev_stats)); - *accum_stats += *delta; - if (*delta > *max_delta) - *max_delta = *delta; - } - } - - /* reset accumulative stats for "no-counter" type stats */ - il->_3945.accum_stats.general.temperature = - il->_3945.stats.general.temperature; - il->_3945.accum_stats.general.ttl_timestamp = - il->_3945.stats.general.ttl_timestamp; -} -#endif - -void -il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb) -{ - struct il_rx_pkt *pkt = rxb_addr(rxb); - - D_RX("Statistics notification received (%d vs %d).\n", - (int)sizeof(struct il3945_notif_stats), - le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK); -#ifdef CONFIG_IWLEGACY_DEBUGFS - il3945_accumulative_stats(il, (__le32 *) &pkt->u.raw); -#endif - - memcpy(&il->_3945.stats, pkt->u.raw, sizeof(il->_3945.stats)); -} - -void -il3945_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb) -{ - struct il_rx_pkt *pkt = rxb_addr(rxb); - __le32 *flag = (__le32 *) &pkt->u.raw; - - if (le32_to_cpu(*flag) & UCODE_STATS_CLEAR_MSK) { -#ifdef CONFIG_IWLEGACY_DEBUGFS - memset(&il->_3945.accum_stats, 0, - sizeof(struct il3945_notif_stats)); - memset(&il->_3945.delta_stats, 0, - sizeof(struct il3945_notif_stats)); - memset(&il->_3945.max_delta, 0, - sizeof(struct il3945_notif_stats)); -#endif - D_RX("Statistics have been cleared\n"); - } - il3945_hdl_stats(il, rxb); -} - -/****************************************************************************** - * - * Misc. internal state and helper functions - * - ******************************************************************************/ - -/* This is necessary only for a number of stats, see the caller. */ -static int -il3945_is_network_packet(struct il_priv *il, struct ieee80211_hdr *header) -{ - /* Filter incoming packets to determine if they are targeted toward - * this network, discarding packets coming from ourselves */ - switch (il->iw_mode) { - case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */ - /* packets to our IBSS update information */ - return !compare_ether_addr(header->addr3, il->bssid); - case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */ - /* packets to our IBSS update information */ - return !compare_ether_addr(header->addr2, il->bssid); - default: - return 1; - } -} - -static void -il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb, - struct ieee80211_rx_status *stats) -{ - struct il_rx_pkt *pkt = rxb_addr(rxb); - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IL_RX_DATA(pkt); - struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt); - struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt); - u16 len = le16_to_cpu(rx_hdr->len); - struct sk_buff *skb; - __le16 fc = hdr->frame_control; - - /* We received data from the HW, so stop the watchdog */ - if (unlikely - (len + IL39_RX_FRAME_SIZE > - PAGE_SIZE << il->hw_params.rx_page_order)) { - D_DROP("Corruption detected!\n"); - return; - } - - /* We only process data packets if the interface is open */ - if (unlikely(!il->is_open)) { - D_DROP("Dropping packet while interface is not open.\n"); - return; - } - - skb = dev_alloc_skb(128); - if (!skb) { - IL_ERR("dev_alloc_skb failed\n"); - return; - } - - if (!il3945_mod_params.sw_crypto) - il_set_decrypted_flag(il, (struct ieee80211_hdr *)rxb_addr(rxb), - le32_to_cpu(rx_end->status), stats); - - skb_add_rx_frag(skb, 0, rxb->page, - (void *)rx_hdr->payload - (void *)pkt, len); - - il_update_stats(il, false, fc, len); - memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); - - ieee80211_rx(il->hw, skb); - il->alloc_rxb_page--; - rxb->page = NULL; -} - -#define IL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6) - -static void -il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb) -{ - struct ieee80211_hdr *header; - struct ieee80211_rx_status rx_status; - struct il_rx_pkt *pkt = rxb_addr(rxb); - struct il3945_rx_frame_stats *rx_stats = IL_RX_STATS(pkt); - struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt); - struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt); - u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg); - u16 rx_stats_noise_diff __maybe_unused = - le16_to_cpu(rx_stats->noise_diff); - u8 network_packet; - - rx_status.flag = 0; - rx_status.mactime = le64_to_cpu(rx_end->timestamp); - rx_status.band = - (rx_hdr-> - phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ : - IEEE80211_BAND_5GHZ; - rx_status.freq = - ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel), - rx_status.band); - - rx_status.rate_idx = il3945_hwrate_to_plcp_idx(rx_hdr->rate); - if (rx_status.band == IEEE80211_BAND_5GHZ) - rx_status.rate_idx -= IL_FIRST_OFDM_RATE; - - rx_status.antenna = - (le16_to_cpu(rx_hdr->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >> - 4; - - /* set the preamble flag if appropriate */ - if (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) - rx_status.flag |= RX_FLAG_SHORTPRE; - - if ((unlikely(rx_stats->phy_count > 20))) { - D_DROP("dsp size out of range [0,20]: %d/n", - rx_stats->phy_count); - return; - } - - if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR) || - !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) { - D_RX("Bad CRC or FIFO: 0x%08X.\n", rx_end->status); - return; - } - - /* Convert 3945's rssi indicator to dBm */ - rx_status.signal = rx_stats->rssi - IL39_RSSI_OFFSET; - - D_STATS("Rssi %d sig_avg %d noise_diff %d\n", rx_status.signal, - rx_stats_sig_avg, rx_stats_noise_diff); - - header = (struct ieee80211_hdr *)IL_RX_DATA(pkt); - - network_packet = il3945_is_network_packet(il, header); - - D_STATS("[%c] %d RSSI:%d Signal:%u, Rate:%u\n", - network_packet ? '*' : ' ', le16_to_cpu(rx_hdr->channel), - rx_status.signal, rx_status.signal, rx_status.rate_idx); - - il_dbg_log_rx_data_frame(il, le16_to_cpu(rx_hdr->len), header); - - if (network_packet) { - il->_3945.last_beacon_time = - le32_to_cpu(rx_end->beacon_timestamp); - il->_3945.last_tsf = le64_to_cpu(rx_end->timestamp); - il->_3945.last_rx_rssi = rx_status.signal; - } - - il3945_pass_packet_to_mac80211(il, rxb, &rx_status); -} - -int -il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq, - dma_addr_t addr, u16 len, u8 reset, u8 pad) -{ - int count; - struct il_queue *q; - struct il3945_tfd *tfd, *tfd_tmp; - - q = &txq->q; - tfd_tmp = (struct il3945_tfd *)txq->tfds; - tfd = &tfd_tmp[q->write_ptr]; - - if (reset) - memset(tfd, 0, sizeof(*tfd)); - - count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags)); - - if (count >= NUM_TFD_CHUNKS || count < 0) { - IL_ERR("Error can not send more than %d chunks\n", - NUM_TFD_CHUNKS); - return -EINVAL; - } - - tfd->tbs[count].addr = cpu_to_le32(addr); - tfd->tbs[count].len = cpu_to_le32(len); - - count++; - - tfd->control_flags = - cpu_to_le32(TFD_CTL_COUNT_SET(count) | TFD_CTL_PAD_SET(pad)); - - return 0; -} - -/** - * il3945_hw_txq_free_tfd - Free one TFD, those at idx [txq->q.read_ptr] - * - * Does NOT advance any idxes - */ -void -il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq) -{ - struct il3945_tfd *tfd_tmp = (struct il3945_tfd *)txq->tfds; - int idx = txq->q.read_ptr; - struct il3945_tfd *tfd = &tfd_tmp[idx]; - struct pci_dev *dev = il->pci_dev; - int i; - int counter; - - /* sanity check */ - counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags)); - if (counter > NUM_TFD_CHUNKS) { - IL_ERR("Too many chunks: %i\n", counter); - /* @todo issue fatal error, it is quite serious situation */ - return; - } - - /* Unmap tx_cmd */ - if (counter) - pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping), - dma_unmap_len(&txq->meta[idx], len), - PCI_DMA_TODEVICE); - - /* unmap chunks if any */ - - for (i = 1; i < counter; i++) - pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr), - le32_to_cpu(tfd->tbs[i].len), - PCI_DMA_TODEVICE); - - /* free SKB */ - if (txq->txb) { - struct sk_buff *skb; - - skb = txq->txb[txq->q.read_ptr].skb; - - /* can be called from irqs-disabled context */ - if (skb) { - dev_kfree_skb_any(skb); - txq->txb[txq->q.read_ptr].skb = NULL; - } - } -} - -/** - * il3945_hw_build_tx_cmd_rate - Add rate portion to TX_CMD: - * -*/ -void -il3945_hw_build_tx_cmd_rate(struct il_priv *il, struct il_device_cmd *cmd, - struct ieee80211_tx_info *info, - struct ieee80211_hdr *hdr, int sta_id) -{ - u16 hw_value = ieee80211_get_tx_rate(il->hw, info)->hw_value; - u16 rate_idx = min(hw_value & 0xffff, RATE_COUNT_3945 - 1); - u16 rate_mask; - int rate; - const u8 rts_retry_limit = 7; - u8 data_retry_limit; - __le32 tx_flags; - __le16 fc = hdr->frame_control; - struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload; - - rate = il3945_rates[rate_idx].plcp; - tx_flags = tx_cmd->tx_flags; - - /* We need to figure out how to get the sta->supp_rates while - * in this running context */ - rate_mask = RATES_MASK_3945; - - /* Set retry limit on DATA packets and Probe Responses */ - if (ieee80211_is_probe_resp(fc)) - data_retry_limit = 3; - else - data_retry_limit = IL_DEFAULT_TX_RETRY; - tx_cmd->data_retry_limit = data_retry_limit; - /* Set retry limit on RTS packets */ - tx_cmd->rts_retry_limit = min(data_retry_limit, rts_retry_limit); - - tx_cmd->rate = rate; - tx_cmd->tx_flags = tx_flags; - - /* OFDM */ - tx_cmd->supp_rates[0] = - ((rate_mask & IL_OFDM_RATES_MASK) >> IL_FIRST_OFDM_RATE) & 0xFF; - - /* CCK */ - tx_cmd->supp_rates[1] = (rate_mask & 0xF); - - D_RATE("Tx sta id: %d, rate: %d (plcp), flags: 0x%4X " - "cck/ofdm mask: 0x%x/0x%x\n", sta_id, tx_cmd->rate, - le32_to_cpu(tx_cmd->tx_flags), tx_cmd->supp_rates[1], - tx_cmd->supp_rates[0]); -} - -static u8 -il3945_sync_sta(struct il_priv *il, int sta_id, u16 tx_rate) -{ - unsigned long flags_spin; - struct il_station_entry *station; - - if (sta_id == IL_INVALID_STATION) - return IL_INVALID_STATION; - - spin_lock_irqsave(&il->sta_lock, flags_spin); - station = &il->stations[sta_id]; - - station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK; - station->sta.rate_n_flags = cpu_to_le16(tx_rate); - station->sta.mode = STA_CONTROL_MODIFY_MSK; - il_send_add_sta(il, &station->sta, CMD_ASYNC); - spin_unlock_irqrestore(&il->sta_lock, flags_spin); - - D_RATE("SCALE sync station %d to rate %d\n", sta_id, tx_rate); - return sta_id; -} - -static void -il3945_set_pwr_vmain(struct il_priv *il) -{ -/* - * (for documentation purposes) - * to set power to V_AUX, do - - if (pci_pme_capable(il->pci_dev, PCI_D3cold)) { - il_set_bits_mask_prph(il, APMG_PS_CTRL_REG, - APMG_PS_CTRL_VAL_PWR_SRC_VAUX, - ~APMG_PS_CTRL_MSK_PWR_SRC); - - _il_poll_bit(il, CSR_GPIO_IN, - CSR_GPIO_IN_VAL_VAUX_PWR_SRC, - CSR_GPIO_IN_BIT_AUX_POWER, 5000); - } - */ - - il_set_bits_mask_prph(il, APMG_PS_CTRL_REG, - APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, - ~APMG_PS_CTRL_MSK_PWR_SRC); - - _il_poll_bit(il, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC, - CSR_GPIO_IN_BIT_AUX_POWER, 5000); -} - -static int -il3945_rx_init(struct il_priv *il, struct il_rx_queue *rxq) -{ - il_wr(il, FH39_RCSR_RBD_BASE(0), rxq->bd_dma); - il_wr(il, FH39_RCSR_RPTR_ADDR(0), rxq->rb_stts_dma); - il_wr(il, FH39_RCSR_WPTR(0), 0); - il_wr(il, FH39_RCSR_CONFIG(0), - FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE | - FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE | - FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN | - FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 | (RX_QUEUE_SIZE_LOG - << - FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE) - | FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST | (1 << - FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH) - | FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH); - - /* fake read to flush all prev I/O */ - il_rd(il, FH39_RSSR_CTRL); - - return 0; -} - -static int -il3945_tx_reset(struct il_priv *il) -{ - - /* bypass mode */ - il_wr_prph(il, ALM_SCD_MODE_REG, 0x2); - - /* RA 0 is active */ - il_wr_prph(il, ALM_SCD_ARASTAT_REG, 0x01); - - /* all 6 fifo are active */ - il_wr_prph(il, ALM_SCD_TXFACT_REG, 0x3f); - - il_wr_prph(il, ALM_SCD_SBYP_MODE_1_REG, 0x010000); - il_wr_prph(il, ALM_SCD_SBYP_MODE_2_REG, 0x030002); - il_wr_prph(il, ALM_SCD_TXF4MF_REG, 0x000004); - il_wr_prph(il, ALM_SCD_TXF5MF_REG, 0x000005); - - il_wr(il, FH39_TSSR_CBB_BASE, il->_3945.shared_phys); - - il_wr(il, FH39_TSSR_MSG_CONFIG, - FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON | - FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON | - FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B | - FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON | - FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON | - FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH | - FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH); - - return 0; -} - -/** - * il3945_txq_ctx_reset - Reset TX queue context - * - * Destroys all DMA structures and initialize them again - */ -static int -il3945_txq_ctx_reset(struct il_priv *il) -{ - int rc; - int txq_id, slots_num; - - il3945_hw_txq_ctx_free(il); - - /* allocate tx queue structure */ - rc = il_alloc_txq_mem(il); - if (rc) - return rc; - - /* Tx CMD queue */ - rc = il3945_tx_reset(il); - if (rc) - goto error; - - /* Tx queue(s) */ - for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) { - slots_num = - (txq_id == - IL39_CMD_QUEUE_NUM) ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; - rc = il_tx_queue_init(il, &il->txq[txq_id], slots_num, txq_id); - if (rc) { - IL_ERR("Tx %d queue init failed\n", txq_id); - goto error; - } - } - - return rc; - -error: - il3945_hw_txq_ctx_free(il); - return rc; -} - -/* - * Start up 3945's basic functionality after it has been reset - * (e.g. after platform boot, or shutdown via il_apm_stop()) - * NOTE: This does not load uCode nor start the embedded processor - */ -static int -il3945_apm_init(struct il_priv *il) -{ - int ret = il_apm_init(il); - - /* Clear APMG (NIC's internal power management) interrupts */ - il_wr_prph(il, APMG_RTC_INT_MSK_REG, 0x0); - il_wr_prph(il, APMG_RTC_INT_STT_REG, 0xFFFFFFFF); - - /* Reset radio chip */ - il_set_bits_prph(il, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ); - udelay(5); - il_clear_bits_prph(il, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ); - - return ret; -} - -static void -il3945_nic_config(struct il_priv *il) -{ - struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom; - unsigned long flags; - u8 rev_id = il->pci_dev->revision; - - spin_lock_irqsave(&il->lock, flags); - - /* Determine HW type */ - D_INFO("HW Revision ID = 0x%X\n", rev_id); - - if (rev_id & PCI_CFG_REV_ID_BIT_RTP) - D_INFO("RTP type\n"); - else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) { - D_INFO("3945 RADIO-MB type\n"); - il_set_bit(il, CSR_HW_IF_CONFIG_REG, - CSR39_HW_IF_CONFIG_REG_BIT_3945_MB); - } else { - D_INFO("3945 RADIO-MM type\n"); - il_set_bit(il, CSR_HW_IF_CONFIG_REG, - CSR39_HW_IF_CONFIG_REG_BIT_3945_MM); - } - - if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) { - D_INFO("SKU OP mode is mrc\n"); - il_set_bit(il, CSR_HW_IF_CONFIG_REG, - CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC); - } else - D_INFO("SKU OP mode is basic\n"); - - if ((eeprom->board_revision & 0xF0) == 0xD0) { - D_INFO("3945ABG revision is 0x%X\n", eeprom->board_revision); - il_set_bit(il, CSR_HW_IF_CONFIG_REG, - CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE); - } else { - D_INFO("3945ABG revision is 0x%X\n", eeprom->board_revision); - il_clear_bit(il, CSR_HW_IF_CONFIG_REG, - CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE); - } - - if (eeprom->almgor_m_version <= 1) { - il_set_bit(il, CSR_HW_IF_CONFIG_REG, - CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A); - D_INFO("Card M type A version is 0x%X\n", - eeprom->almgor_m_version); - } else { - D_INFO("Card M type B version is 0x%X\n", - eeprom->almgor_m_version); - il_set_bit(il, CSR_HW_IF_CONFIG_REG, - CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B); - } - spin_unlock_irqrestore(&il->lock, flags); - - if (eeprom->sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE) - D_RF_KILL("SW RF KILL supported in EEPROM.\n"); - - if (eeprom->sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE) - D_RF_KILL("HW RF KILL supported in EEPROM.\n"); -} - -int -il3945_hw_nic_init(struct il_priv *il) -{ - int rc; - unsigned long flags; - struct il_rx_queue *rxq = &il->rxq; - - spin_lock_irqsave(&il->lock, flags); - il->cfg->ops->lib->apm_ops.init(il); - spin_unlock_irqrestore(&il->lock, flags); - - il3945_set_pwr_vmain(il); - - il->cfg->ops->lib->apm_ops.config(il); - - /* Allocate the RX queue, or reset if it is already allocated */ - if (!rxq->bd) { - rc = il_rx_queue_alloc(il); - if (rc) { - IL_ERR("Unable to initialize Rx queue\n"); - return -ENOMEM; - } - } else - il3945_rx_queue_reset(il, rxq); - - il3945_rx_replenish(il); - - il3945_rx_init(il, rxq); - - /* Look at using this instead: - rxq->need_update = 1; - il_rx_queue_update_write_ptr(il, rxq); - */ - - il_wr(il, FH39_RCSR_WPTR(0), rxq->write & ~7); - - rc = il3945_txq_ctx_reset(il); - if (rc) - return rc; - - set_bit(S_INIT, &il->status); - - return 0; -} - -/** - * il3945_hw_txq_ctx_free - Free TXQ Context - * - * Destroy all TX DMA queues and structures - */ -void -il3945_hw_txq_ctx_free(struct il_priv *il) -{ - int txq_id; - - /* Tx queues */ - if (il->txq) - for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) - if (txq_id == IL39_CMD_QUEUE_NUM) - il_cmd_queue_free(il); - else - il_tx_queue_free(il, txq_id); - - /* free tx queue structure */ - il_txq_mem(il); -} - -void -il3945_hw_txq_ctx_stop(struct il_priv *il) -{ - int txq_id; - - /* stop SCD */ - il_wr_prph(il, ALM_SCD_MODE_REG, 0); - il_wr_prph(il, ALM_SCD_TXFACT_REG, 0); - - /* reset TFD queues */ - for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) { - il_wr(il, FH39_TCSR_CONFIG(txq_id), 0x0); - il_poll_bit(il, FH39_TSSR_TX_STATUS, - FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id), - 1000); - } - - il3945_hw_txq_ctx_free(il); -} - -/** - * il3945_hw_reg_adjust_power_by_temp - * return idx delta into power gain settings table -*/ -static int -il3945_hw_reg_adjust_power_by_temp(int new_reading, int old_reading) -{ - return (new_reading - old_reading) * (-11) / 100; -} - -/** - * il3945_hw_reg_temp_out_of_range - Keep temperature in sane range - */ -static inline int -il3945_hw_reg_temp_out_of_range(int temperature) -{ - return (temperature < -260 || temperature > 25) ? 1 : 0; -} - -int -il3945_hw_get_temperature(struct il_priv *il) -{ - return _il_rd(il, CSR_UCODE_DRV_GP2); -} - -/** - * il3945_hw_reg_txpower_get_temperature - * get the current temperature by reading from NIC -*/ -static int -il3945_hw_reg_txpower_get_temperature(struct il_priv *il) -{ - struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom; - int temperature; - - temperature = il3945_hw_get_temperature(il); - - /* driver's okay range is -260 to +25. - * human readable okay range is 0 to +285 */ - D_INFO("Temperature: %d\n", temperature + IL_TEMP_CONVERT); - - /* handle insane temp reading */ - if (il3945_hw_reg_temp_out_of_range(temperature)) { - IL_ERR("Error bad temperature value %d\n", temperature); - - /* if really really hot(?), - * substitute the 3rd band/group's temp measured at factory */ - if (il->last_temperature > 100) - temperature = eeprom->groups[2].temperature; - else /* else use most recent "sane" value from driver */ - temperature = il->last_temperature; - } - - return temperature; /* raw, not "human readable" */ -} - -/* Adjust Txpower only if temperature variance is greater than threshold. - * - * Both are lower than older versions' 9 degrees */ -#define IL_TEMPERATURE_LIMIT_TIMER 6 - -/** - * il3945_is_temp_calib_needed - determines if new calibration is needed - * - * records new temperature in tx_mgr->temperature. - * replaces tx_mgr->last_temperature *only* if calib needed - * (assumes caller will actually do the calibration!). */ -static int -il3945_is_temp_calib_needed(struct il_priv *il) -{ - int temp_diff; - - il->temperature = il3945_hw_reg_txpower_get_temperature(il); - temp_diff = il->temperature - il->last_temperature; - - /* get absolute value */ - if (temp_diff < 0) { - D_POWER("Getting cooler, delta %d,\n", temp_diff); - temp_diff = -temp_diff; - } else if (temp_diff == 0) - D_POWER("Same temp,\n"); - else - D_POWER("Getting warmer, delta %d,\n", temp_diff); - - /* if we don't need calibration, *don't* update last_temperature */ - if (temp_diff < IL_TEMPERATURE_LIMIT_TIMER) { - D_POWER("Timed thermal calib not needed\n"); - return 0; - } - - D_POWER("Timed thermal calib needed\n"); - - /* assume that caller will actually do calib ... - * update the "last temperature" value */ - il->last_temperature = il->temperature; - return 1; -} - -#define IL_MAX_GAIN_ENTRIES 78 -#define IL_CCK_FROM_OFDM_POWER_DIFF -5 -#define IL_CCK_FROM_OFDM_IDX_DIFF (10) - -/* radio and DSP power table, each step is 1/2 dB. - * 1st number is for RF analog gain, 2nd number is for DSP pre-DAC gain. */ -static struct il3945_tx_power power_gain_table[2][IL_MAX_GAIN_ENTRIES] = { - { - {251, 127}, /* 2.4 GHz, highest power */ - {251, 127}, - {251, 127}, - {251, 127}, - {251, 125}, - {251, 110}, - {251, 105}, - {251, 98}, - {187, 125}, - {187, 115}, - {187, 108}, - {187, 99}, - {243, 119}, - {243, 111}, - {243, 105}, - {243, 97}, - {243, 92}, - {211, 106}, - {211, 100}, - {179, 120}, - {179, 113}, - {179, 107}, - {147, 125}, - {147, 119}, - {147, 112}, - {147, 106}, - {147, 101}, - {147, 97}, - {147, 91}, - {115, 107}, - {235, 121}, - {235, 115}, - {235, 109}, - {203, 127}, - {203, 121}, - {203, 115}, - {203, 108}, - {203, 102}, - {203, 96}, - {203, 92}, - {171, 110}, - {171, 104}, - {171, 98}, - {139, 116}, - {227, 125}, - {227, 119}, - {227, 113}, - {227, 107}, - {227, 101}, - {227, 96}, - {195, 113}, - {195, 106}, - {195, 102}, - {195, 95}, - {163, 113}, - {163, 106}, - {163, 102}, - {163, 95}, - {131, 113}, - {131, 106}, - {131, 102}, - {131, 95}, - {99, 113}, - {99, 106}, - {99, 102}, - {99, 95}, - {67, 113}, - {67, 106}, - {67, 102}, - {67, 95}, - {35, 113}, - {35, 106}, - {35, 102}, - {35, 95}, - {3, 113}, - {3, 106}, - {3, 102}, - {3, 95} /* 2.4 GHz, lowest power */ - }, - { - {251, 127}, /* 5.x GHz, highest power */ - {251, 120}, - {251, 114}, - {219, 119}, - {219, 101}, - {187, 113}, - {187, 102}, - {155, 114}, - {155, 103}, - {123, 117}, - {123, 107}, - {123, 99}, - {123, 92}, - {91, 108}, - {59, 125}, - {59, 118}, - {59, 109}, - {59, 102}, - {59, 96}, - {59, 90}, - {27, 104}, - {27, 98}, - {27, 92}, - {115, 118}, - {115, 111}, - {115, 104}, - {83, 126}, - {83, 121}, - {83, 113}, - {83, 105}, - {83, 99}, - {51, 118}, - {51, 111}, - {51, 104}, - {51, 98}, - {19, 116}, - {19, 109}, - {19, 102}, - {19, 98}, - {19, 93}, - {171, 113}, - {171, 107}, - {171, 99}, - {139, 120}, - {139, 113}, - {139, 107}, - {139, 99}, - {107, 120}, - {107, 113}, - {107, 107}, - {107, 99}, - {75, 120}, - {75, 113}, - {75, 107}, - {75, 99}, - {43, 120}, - {43, 113}, - {43, 107}, - {43, 99}, - {11, 120}, - {11, 113}, - {11, 107}, - {11, 99}, - {131, 107}, - {131, 99}, - {99, 120}, - {99, 113}, - {99, 107}, - {99, 99}, - {67, 120}, - {67, 113}, - {67, 107}, - {67, 99}, - {35, 120}, - {35, 113}, - {35, 107}, - {35, 99}, - {3, 120} /* 5.x GHz, lowest power */ - } -}; - -static inline u8 -il3945_hw_reg_fix_power_idx(int idx) -{ - if (idx < 0) - return 0; - if (idx >= IL_MAX_GAIN_ENTRIES) - return IL_MAX_GAIN_ENTRIES - 1; - return (u8) idx; -} - -/* Kick off thermal recalibration check every 60 seconds */ -#define REG_RECALIB_PERIOD (60) - -/** - * il3945_hw_reg_set_scan_power - Set Tx power for scan probe requests - * - * Set (in our channel info database) the direct scan Tx power for 1 Mbit (CCK) - * or 6 Mbit (OFDM) rates. - */ -static void -il3945_hw_reg_set_scan_power(struct il_priv *il, u32 scan_tbl_idx, s32 rate_idx, - const s8 *clip_pwrs, - struct il_channel_info *ch_info, int band_idx) -{ - struct il3945_scan_power_info *scan_power_info; - s8 power; - u8 power_idx; - - scan_power_info = &ch_info->scan_pwr_info[scan_tbl_idx]; - - /* use this channel group's 6Mbit clipping/saturation pwr, - * but cap at regulatory scan power restriction (set during init - * based on eeprom channel data) for this channel. */ - power = min(ch_info->scan_power, clip_pwrs[RATE_6M_IDX_TBL]); - - power = min(power, il->tx_power_user_lmt); - scan_power_info->requested_power = power; - - /* find difference between new scan *power* and current "normal" - * Tx *power* for 6Mb. Use this difference (x2) to adjust the - * current "normal" temperature-compensated Tx power *idx* for - * this rate (1Mb or 6Mb) to yield new temp-compensated scan power - * *idx*. */ - power_idx = - ch_info->power_info[rate_idx].power_table_idx - (power - - ch_info-> - power_info - [RATE_6M_IDX_TBL]. - requested_power) * - 2; - - /* store reference idx that we use when adjusting *all* scan - * powers. So we can accommodate user (all channel) or spectrum - * management (single channel) power changes "between" temperature - * feedback compensation procedures. - * don't force fit this reference idx into gain table; it may be a - * negative number. This will help avoid errors when we're at - * the lower bounds (highest gains, for warmest temperatures) - * of the table. */ - - /* don't exceed table bounds for "real" setting */ - power_idx = il3945_hw_reg_fix_power_idx(power_idx); - - scan_power_info->power_table_idx = power_idx; - scan_power_info->tpc.tx_gain = - power_gain_table[band_idx][power_idx].tx_gain; - scan_power_info->tpc.dsp_atten = - power_gain_table[band_idx][power_idx].dsp_atten; -} - -/** - * il3945_send_tx_power - fill in Tx Power command with gain settings - * - * Configures power settings for all rates for the current channel, - * using values from channel info struct, and send to NIC - */ -static int -il3945_send_tx_power(struct il_priv *il) -{ - int rate_idx, i; - const struct il_channel_info *ch_info = NULL; - struct il3945_txpowertable_cmd txpower = { - .channel = il->ctx.active.channel, - }; - u16 chan; - - if (WARN_ONCE - (test_bit(S_SCAN_HW, &il->status), - "TX Power requested while scanning!\n")) - return -EAGAIN; - - chan = le16_to_cpu(il->ctx.active.channel); - - txpower.band = (il->band == IEEE80211_BAND_5GHZ) ? 0 : 1; - ch_info = il_get_channel_info(il, il->band, chan); - if (!ch_info) { - IL_ERR("Failed to get channel info for channel %d [%d]\n", chan, - il->band); - return -EINVAL; - } - - if (!il_is_channel_valid(ch_info)) { - D_POWER("Not calling TX_PWR_TBL_CMD on " "non-Tx channel.\n"); - return 0; - } - - /* fill cmd with power settings for all rates for current channel */ - /* Fill OFDM rate */ - for (rate_idx = IL_FIRST_OFDM_RATE, i = 0; - rate_idx <= IL39_LAST_OFDM_RATE; rate_idx++, i++) { - - txpower.power[i].tpc = ch_info->power_info[i].tpc; - txpower.power[i].rate = il3945_rates[rate_idx].plcp; - - D_POWER("ch %d:%d rf %d dsp %3d rate code 0x%02x\n", - le16_to_cpu(txpower.channel), txpower.band, - txpower.power[i].tpc.tx_gain, - txpower.power[i].tpc.dsp_atten, txpower.power[i].rate); - } - /* Fill CCK rates */ - for (rate_idx = IL_FIRST_CCK_RATE; rate_idx <= IL_LAST_CCK_RATE; - rate_idx++, i++) { - txpower.power[i].tpc = ch_info->power_info[i].tpc; - txpower.power[i].rate = il3945_rates[rate_idx].plcp; - - D_POWER("ch %d:%d rf %d dsp %3d rate code 0x%02x\n", - le16_to_cpu(txpower.channel), txpower.band, - txpower.power[i].tpc.tx_gain, - txpower.power[i].tpc.dsp_atten, txpower.power[i].rate); - } - - return il_send_cmd_pdu(il, C_TX_PWR_TBL, - sizeof(struct il3945_txpowertable_cmd), - &txpower); - -} - -/** - * il3945_hw_reg_set_new_power - Configures power tables at new levels - * @ch_info: Channel to update. Uses power_info.requested_power. - * - * Replace requested_power and base_power_idx ch_info fields for - * one channel. - * - * Called if user or spectrum management changes power preferences. - * Takes into account h/w and modulation limitations (clip power). - * - * This does *not* send anything to NIC, just sets up ch_info for one channel. - * - * NOTE: reg_compensate_for_temperature_dif() *must* be run after this to - * properly fill out the scan powers, and actual h/w gain settings, - * and send changes to NIC - */ -static int -il3945_hw_reg_set_new_power(struct il_priv *il, struct il_channel_info *ch_info) -{ - struct il3945_channel_power_info *power_info; - int power_changed = 0; - int i; - const s8 *clip_pwrs; - int power; - - /* Get this chnlgrp's rate-to-max/clip-powers table */ - clip_pwrs = il->_3945.clip_groups[ch_info->group_idx].clip_powers; - - /* Get this channel's rate-to-current-power settings table */ - power_info = ch_info->power_info; - - /* update OFDM Txpower settings */ - for (i = RATE_6M_IDX_TBL; i <= RATE_54M_IDX_TBL; i++, ++power_info) { - int delta_idx; - - /* limit new power to be no more than h/w capability */ - power = min(ch_info->curr_txpow, clip_pwrs[i]); - if (power == power_info->requested_power) - continue; - - /* find difference between old and new requested powers, - * update base (non-temp-compensated) power idx */ - delta_idx = (power - power_info->requested_power) * 2; - power_info->base_power_idx -= delta_idx; - - /* save new requested power value */ - power_info->requested_power = power; - - power_changed = 1; - } - - /* update CCK Txpower settings, based on OFDM 12M setting ... - * ... all CCK power settings for a given channel are the *same*. */ - if (power_changed) { - power = - ch_info->power_info[RATE_12M_IDX_TBL].requested_power + - IL_CCK_FROM_OFDM_POWER_DIFF; - - /* do all CCK rates' il3945_channel_power_info structures */ - for (i = RATE_1M_IDX_TBL; i <= RATE_11M_IDX_TBL; i++) { - power_info->requested_power = power; - power_info->base_power_idx = - ch_info->power_info[RATE_12M_IDX_TBL]. - base_power_idx + IL_CCK_FROM_OFDM_IDX_DIFF; - ++power_info; - } - } - - return 0; -} - -/** - * il3945_hw_reg_get_ch_txpower_limit - returns new power limit for channel - * - * NOTE: Returned power limit may be less (but not more) than requested, - * based strictly on regulatory (eeprom and spectrum mgt) limitations - * (no consideration for h/w clipping limitations). - */ -static int -il3945_hw_reg_get_ch_txpower_limit(struct il_channel_info *ch_info) -{ - s8 max_power; - -#if 0 - /* if we're using TGd limits, use lower of TGd or EEPROM */ - if (ch_info->tgd_data.max_power != 0) - max_power = - min(ch_info->tgd_data.max_power, - ch_info->eeprom.max_power_avg); - - /* else just use EEPROM limits */ - else -#endif - max_power = ch_info->eeprom.max_power_avg; - - return min(max_power, ch_info->max_power_avg); -} - -/** - * il3945_hw_reg_comp_txpower_temp - Compensate for temperature - * - * Compensate txpower settings of *all* channels for temperature. - * This only accounts for the difference between current temperature - * and the factory calibration temperatures, and bases the new settings - * on the channel's base_power_idx. - * - * If RxOn is "associated", this sends the new Txpower to NIC! - */ -static int -il3945_hw_reg_comp_txpower_temp(struct il_priv *il) -{ - struct il_channel_info *ch_info = NULL; - struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom; - int delta_idx; - const s8 *clip_pwrs; /* array of h/w max power levels for each rate */ - u8 a_band; - u8 rate_idx; - u8 scan_tbl_idx; - u8 i; - int ref_temp; - int temperature = il->temperature; - - if (il->disable_tx_power_cal || test_bit(S_SCANNING, &il->status)) { - /* do not perform tx power calibration */ - return 0; - } - /* set up new Tx power info for each and every channel, 2.4 and 5.x */ - for (i = 0; i < il->channel_count; i++) { - ch_info = &il->channel_info[i]; - a_band = il_is_channel_a_band(ch_info); - - /* Get this chnlgrp's factory calibration temperature */ - ref_temp = (s16) eeprom->groups[ch_info->group_idx].temperature; - - /* get power idx adjustment based on current and factory - * temps */ - delta_idx = - il3945_hw_reg_adjust_power_by_temp(temperature, ref_temp); - - /* set tx power value for all rates, OFDM and CCK */ - for (rate_idx = 0; rate_idx < RATE_COUNT_3945; rate_idx++) { - int power_idx = - ch_info->power_info[rate_idx].base_power_idx; - - /* temperature compensate */ - power_idx += delta_idx; - - /* stay within table range */ - power_idx = il3945_hw_reg_fix_power_idx(power_idx); - ch_info->power_info[rate_idx].power_table_idx = - (u8) power_idx; - ch_info->power_info[rate_idx].tpc = - power_gain_table[a_band][power_idx]; - } - - /* Get this chnlgrp's rate-to-max/clip-powers table */ - clip_pwrs = - il->_3945.clip_groups[ch_info->group_idx].clip_powers; - - /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */ - for (scan_tbl_idx = 0; scan_tbl_idx < IL_NUM_SCAN_RATES; - scan_tbl_idx++) { - s32 actual_idx = - (scan_tbl_idx == - 0) ? RATE_1M_IDX_TBL : RATE_6M_IDX_TBL; - il3945_hw_reg_set_scan_power(il, scan_tbl_idx, - actual_idx, clip_pwrs, - ch_info, a_band); - } - } - - /* send Txpower command for current channel to ucode */ - return il->cfg->ops->lib->send_tx_power(il); -} - -int -il3945_hw_reg_set_txpower(struct il_priv *il, s8 power) -{ - struct il_channel_info *ch_info; - s8 max_power; - u8 a_band; - u8 i; - - if (il->tx_power_user_lmt == power) { - D_POWER("Requested Tx power same as current " "limit: %ddBm.\n", - power); - return 0; - } - - D_POWER("Setting upper limit clamp to %ddBm.\n", power); - il->tx_power_user_lmt = power; - - /* set up new Tx powers for each and every channel, 2.4 and 5.x */ - - for (i = 0; i < il->channel_count; i++) { - ch_info = &il->channel_info[i]; - a_band = il_is_channel_a_band(ch_info); - - /* find minimum power of all user and regulatory constraints - * (does not consider h/w clipping limitations) */ - max_power = il3945_hw_reg_get_ch_txpower_limit(ch_info); - max_power = min(power, max_power); - if (max_power != ch_info->curr_txpow) { - ch_info->curr_txpow = max_power; - - /* this considers the h/w clipping limitations */ - il3945_hw_reg_set_new_power(il, ch_info); - } - } - - /* update txpower settings for all channels, - * send to NIC if associated. */ - il3945_is_temp_calib_needed(il); - il3945_hw_reg_comp_txpower_temp(il); - - return 0; -} - -static int -il3945_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx) -{ - int rc = 0; - struct il_rx_pkt *pkt; - struct il3945_rxon_assoc_cmd rxon_assoc; - struct il_host_cmd cmd = { - .id = C_RXON_ASSOC, - .len = sizeof(rxon_assoc), - .flags = CMD_WANT_SKB, - .data = &rxon_assoc, - }; - const struct il_rxon_cmd *rxon1 = &ctx->staging; - const struct il_rxon_cmd *rxon2 = &ctx->active; - - if (rxon1->flags == rxon2->flags && - rxon1->filter_flags == rxon2->filter_flags && - rxon1->cck_basic_rates == rxon2->cck_basic_rates && - rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates) { - D_INFO("Using current RXON_ASSOC. Not resending.\n"); - return 0; - } - - rxon_assoc.flags = ctx->staging.flags; - rxon_assoc.filter_flags = ctx->staging.filter_flags; - rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates; - rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates; - rxon_assoc.reserved = 0; - - rc = il_send_cmd_sync(il, &cmd); - if (rc) - return rc; - - pkt = (struct il_rx_pkt *)cmd.reply_page; - if (pkt->hdr.flags & IL_CMD_FAILED_MSK) { - IL_ERR("Bad return from C_RXON_ASSOC command\n"); - rc = -EIO; - } - - il_free_pages(il, cmd.reply_page); - - return rc; -} - -/** - * il3945_commit_rxon - commit staging_rxon to hardware - * - * The RXON command in staging_rxon is committed to the hardware and - * the active_rxon structure is updated with the new data. This - * function correctly transitions out of the RXON_ASSOC_MSK state if - * a HW tune is required based on the RXON structure changes. - */ -int -il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx) -{ - /* cast away the const for active_rxon in this function */ - struct il3945_rxon_cmd *active_rxon = (void *)&ctx->active; - struct il3945_rxon_cmd *staging_rxon = (void *)&ctx->staging; - int rc = 0; - bool new_assoc = !!(staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK); - - if (test_bit(S_EXIT_PENDING, &il->status)) - return -EINVAL; - - if (!il_is_alive(il)) - return -1; - - /* always get timestamp with Rx frame */ - staging_rxon->flags |= RXON_FLG_TSF2HOST_MSK; - - /* select antenna */ - staging_rxon->flags &= ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK); - staging_rxon->flags |= il3945_get_antenna_flags(il); - - rc = il_check_rxon_cmd(il, ctx); - if (rc) { - IL_ERR("Invalid RXON configuration. Not committing.\n"); - return -EINVAL; - } - - /* If we don't need to send a full RXON, we can use - * il3945_rxon_assoc_cmd which is used to reconfigure filter - * and other flags for the current radio configuration. */ - if (!il_full_rxon_required(il, &il->ctx)) { - rc = il_send_rxon_assoc(il, &il->ctx); - if (rc) { - IL_ERR("Error setting RXON_ASSOC " - "configuration (%d).\n", rc); - return rc; - } - - memcpy(active_rxon, staging_rxon, sizeof(*active_rxon)); - /* - * We do not commit tx power settings while channel changing, - * do it now if tx power changed. - */ - il_set_tx_power(il, il->tx_power_next, false); - return 0; - } - - /* If we are currently associated and the new config requires - * an RXON_ASSOC and the new config wants the associated mask enabled, - * we must clear the associated from the active configuration - * before we apply the new config */ - if (il_is_associated(il) && new_assoc) { - D_INFO("Toggling associated bit on current RXON\n"); - active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; - - /* - * reserved4 and 5 could have been filled by the iwlcore code. - * Let's clear them before pushing to the 3945. - */ - active_rxon->reserved4 = 0; - active_rxon->reserved5 = 0; - rc = il_send_cmd_pdu(il, C_RXON, sizeof(struct il3945_rxon_cmd), - &il->ctx.active); - - /* If the mask clearing failed then we set - * active_rxon back to what it was previously */ - if (rc) { - active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK; - IL_ERR("Error clearing ASSOC_MSK on current " - "configuration (%d).\n", rc); - return rc; - } - il_clear_ucode_stations(il, &il->ctx); - il_restore_stations(il, &il->ctx); - } - - D_INFO("Sending RXON\n" "* with%s RXON_FILTER_ASSOC_MSK\n" - "* channel = %d\n" "* bssid = %pM\n", (new_assoc ? "" : "out"), - le16_to_cpu(staging_rxon->channel), staging_rxon->bssid_addr); - - /* - * reserved4 and 5 could have been filled by the iwlcore code. - * Let's clear them before pushing to the 3945. - */ - staging_rxon->reserved4 = 0; - staging_rxon->reserved5 = 0; - - il_set_rxon_hwcrypto(il, ctx, !il3945_mod_params.sw_crypto); - - /* Apply the new configuration */ - rc = il_send_cmd_pdu(il, C_RXON, sizeof(struct il3945_rxon_cmd), - staging_rxon); - if (rc) { - IL_ERR("Error setting new configuration (%d).\n", rc); - return rc; - } - - memcpy(active_rxon, staging_rxon, sizeof(*active_rxon)); - - if (!new_assoc) { - il_clear_ucode_stations(il, &il->ctx); - il_restore_stations(il, &il->ctx); - } - - /* If we issue a new RXON command which required a tune then we must - * send a new TXPOWER command or we won't be able to Tx any frames */ - rc = il_set_tx_power(il, il->tx_power_next, true); - if (rc) { - IL_ERR("Error setting Tx power (%d).\n", rc); - return rc; - } - - /* Init the hardware's rate fallback order based on the band */ - rc = il3945_init_hw_rate_table(il); - if (rc) { - IL_ERR("Error setting HW rate table: %02X\n", rc); - return -EIO; - } - - return 0; -} - -/** - * il3945_reg_txpower_periodic - called when time to check our temperature. - * - * -- reset periodic timer - * -- see if temp has changed enough to warrant re-calibration ... if so: - * -- correct coeffs for temp (can reset temp timer) - * -- save this temp as "last", - * -- send new set of gain settings to NIC - * NOTE: This should continue working, even when we're not associated, - * so we can keep our internal table of scan powers current. */ -void -il3945_reg_txpower_periodic(struct il_priv *il) -{ - /* This will kick in the "brute force" - * il3945_hw_reg_comp_txpower_temp() below */ - if (!il3945_is_temp_calib_needed(il)) - goto reschedule; - - /* Set up a new set of temp-adjusted TxPowers, send to NIC. - * This is based *only* on current temperature, - * ignoring any previous power measurements */ - il3945_hw_reg_comp_txpower_temp(il); - -reschedule: - queue_delayed_work(il->workqueue, &il->_3945.thermal_periodic, - REG_RECALIB_PERIOD * HZ); -} - -static void -il3945_bg_reg_txpower_periodic(struct work_struct *work) -{ - struct il_priv *il = container_of(work, struct il_priv, - _3945.thermal_periodic.work); - - if (test_bit(S_EXIT_PENDING, &il->status)) - return; - - mutex_lock(&il->mutex); - il3945_reg_txpower_periodic(il); - mutex_unlock(&il->mutex); -} - -/** - * il3945_hw_reg_get_ch_grp_idx - find the channel-group idx (0-4) for channel. - * - * This function is used when initializing channel-info structs. - * - * NOTE: These channel groups do *NOT* match the bands above! - * These channel groups are based on factory-tested channels; - * on A-band, EEPROM's "group frequency" entries represent the top - * channel in each group 1-4. Group 5 All B/G channels are in group 0. - */ -static u16 -il3945_hw_reg_get_ch_grp_idx(struct il_priv *il, - const struct il_channel_info *ch_info) -{ - struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom; - struct il3945_eeprom_txpower_group *ch_grp = &eeprom->groups[0]; - u8 group; - u16 group_idx = 0; /* based on factory calib frequencies */ - u8 grp_channel; - - /* Find the group idx for the channel ... don't use idx 1(?) */ - if (il_is_channel_a_band(ch_info)) { - for (group = 1; group < 5; group++) { - grp_channel = ch_grp[group].group_channel; - if (ch_info->channel <= grp_channel) { - group_idx = group; - break; - } - } - /* group 4 has a few channels *above* its factory cal freq */ - if (group == 5) - group_idx = 4; - } else - group_idx = 0; /* 2.4 GHz, group 0 */ - - D_POWER("Chnl %d mapped to grp %d\n", ch_info->channel, group_idx); - return group_idx; -} - -/** - * il3945_hw_reg_get_matched_power_idx - Interpolate to get nominal idx - * - * Interpolate to get nominal (i.e. at factory calibration temperature) idx - * into radio/DSP gain settings table for requested power. - */ -static int -il3945_hw_reg_get_matched_power_idx(struct il_priv *il, s8 requested_power, - s32 setting_idx, s32 *new_idx) -{ - const struct il3945_eeprom_txpower_group *chnl_grp = NULL; - struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom; - s32 idx0, idx1; - s32 power = 2 * requested_power; - s32 i; - const struct il3945_eeprom_txpower_sample *samples; - s32 gains0, gains1; - s32 res; - s32 denominator; - - chnl_grp = &eeprom->groups[setting_idx]; - samples = chnl_grp->samples; - for (i = 0; i < 5; i++) { - if (power == samples[i].power) { - *new_idx = samples[i].gain_idx; - return 0; - } - } - - if (power > samples[1].power) { - idx0 = 0; - idx1 = 1; - } else if (power > samples[2].power) { - idx0 = 1; - idx1 = 2; - } else if (power > samples[3].power) { - idx0 = 2; - idx1 = 3; - } else { - idx0 = 3; - idx1 = 4; - } - - denominator = (s32) samples[idx1].power - (s32) samples[idx0].power; - if (denominator == 0) - return -EINVAL; - gains0 = (s32) samples[idx0].gain_idx * (1 << 19); - gains1 = (s32) samples[idx1].gain_idx * (1 << 19); - res = - gains0 + (gains1 - gains0) * ((s32) power - - (s32) samples[idx0].power) / - denominator + (1 << 18); - *new_idx = res >> 19; - return 0; -} - -static void -il3945_hw_reg_init_channel_groups(struct il_priv *il) -{ - u32 i; - s32 rate_idx; - struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom; - const struct il3945_eeprom_txpower_group *group; - - D_POWER("Initializing factory calib info from EEPROM\n"); - - for (i = 0; i < IL_NUM_TX_CALIB_GROUPS; i++) { - s8 *clip_pwrs; /* table of power levels for each rate */ - s8 satur_pwr; /* saturation power for each chnl group */ - group = &eeprom->groups[i]; - - /* sanity check on factory saturation power value */ - if (group->saturation_power < 40) { - IL_WARN("Error: saturation power is %d, " - "less than minimum expected 40\n", - group->saturation_power); - return; - } - - /* - * Derive requested power levels for each rate, based on - * hardware capabilities (saturation power for band). - * Basic value is 3dB down from saturation, with further - * power reductions for highest 3 data rates. These - * backoffs provide headroom for high rate modulation - * power peaks, without too much distortion (clipping). - */ - /* we'll fill in this array with h/w max power levels */ - clip_pwrs = (s8 *) il->_3945.clip_groups[i].clip_powers; - - /* divide factory saturation power by 2 to find -3dB level */ - satur_pwr = (s8) (group->saturation_power >> 1); - - /* fill in channel group's nominal powers for each rate */ - for (rate_idx = 0; rate_idx < RATE_COUNT_3945; - rate_idx++, clip_pwrs++) { - switch (rate_idx) { - case RATE_36M_IDX_TBL: - if (i == 0) /* B/G */ - *clip_pwrs = satur_pwr; - else /* A */ - *clip_pwrs = satur_pwr - 5; - break; - case RATE_48M_IDX_TBL: - if (i == 0) - *clip_pwrs = satur_pwr - 7; - else - *clip_pwrs = satur_pwr - 10; - break; - case RATE_54M_IDX_TBL: - if (i == 0) - *clip_pwrs = satur_pwr - 9; - else - *clip_pwrs = satur_pwr - 12; - break; - default: - *clip_pwrs = satur_pwr; - break; - } - } - } -} - -/** - * il3945_txpower_set_from_eeprom - Set channel power info based on EEPROM - * - * Second pass (during init) to set up il->channel_info - * - * Set up Tx-power settings in our channel info database for each VALID - * (for this geo/SKU) channel, at all Tx data rates, based on eeprom values - * and current temperature. - * - * Since this is based on current temperature (at init time), these values may - * not be valid for very long, but it gives us a starting/default point, - * and allows us to active (i.e. using Tx) scan. - * - * This does *not* write values to NIC, just sets up our internal table. - */ -int -il3945_txpower_set_from_eeprom(struct il_priv *il) -{ - struct il_channel_info *ch_info = NULL; - struct il3945_channel_power_info *pwr_info; - struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom; - int delta_idx; - u8 rate_idx; - u8 scan_tbl_idx; - const s8 *clip_pwrs; /* array of power levels for each rate */ - u8 gain, dsp_atten; - s8 power; - u8 pwr_idx, base_pwr_idx, a_band; - u8 i; - int temperature; - - /* save temperature reference, - * so we can determine next time to calibrate */ - temperature = il3945_hw_reg_txpower_get_temperature(il); - il->last_temperature = temperature; - - il3945_hw_reg_init_channel_groups(il); - - /* initialize Tx power info for each and every channel, 2.4 and 5.x */ - for (i = 0, ch_info = il->channel_info; i < il->channel_count; - i++, ch_info++) { - a_band = il_is_channel_a_band(ch_info); - if (!il_is_channel_valid(ch_info)) - continue; - - /* find this channel's channel group (*not* "band") idx */ - ch_info->group_idx = il3945_hw_reg_get_ch_grp_idx(il, ch_info); - - /* Get this chnlgrp's rate->max/clip-powers table */ - clip_pwrs = - il->_3945.clip_groups[ch_info->group_idx].clip_powers; - - /* calculate power idx *adjustment* value according to - * diff between current temperature and factory temperature */ - delta_idx = - il3945_hw_reg_adjust_power_by_temp(temperature, - eeprom->groups[ch_info-> - group_idx]. - temperature); - - D_POWER("Delta idx for channel %d: %d [%d]\n", ch_info->channel, - delta_idx, temperature + IL_TEMP_CONVERT); - - /* set tx power value for all OFDM rates */ - for (rate_idx = 0; rate_idx < IL_OFDM_RATES; rate_idx++) { - s32 uninitialized_var(power_idx); - int rc; - - /* use channel group's clip-power table, - * but don't exceed channel's max power */ - s8 pwr = min(ch_info->max_power_avg, - clip_pwrs[rate_idx]); - - pwr_info = &ch_info->power_info[rate_idx]; - - /* get base (i.e. at factory-measured temperature) - * power table idx for this rate's power */ - rc = il3945_hw_reg_get_matched_power_idx(il, pwr, - ch_info-> - group_idx, - &power_idx); - if (rc) { - IL_ERR("Invalid power idx\n"); - return rc; - } - pwr_info->base_power_idx = (u8) power_idx; - - /* temperature compensate */ - power_idx += delta_idx; - - /* stay within range of gain table */ - power_idx = il3945_hw_reg_fix_power_idx(power_idx); - - /* fill 1 OFDM rate's il3945_channel_power_info struct */ - pwr_info->requested_power = pwr; - pwr_info->power_table_idx = (u8) power_idx; - pwr_info->tpc.tx_gain = - power_gain_table[a_band][power_idx].tx_gain; - pwr_info->tpc.dsp_atten = - power_gain_table[a_band][power_idx].dsp_atten; - } - - /* set tx power for CCK rates, based on OFDM 12 Mbit settings */ - pwr_info = &ch_info->power_info[RATE_12M_IDX_TBL]; - power = pwr_info->requested_power + IL_CCK_FROM_OFDM_POWER_DIFF; - pwr_idx = pwr_info->power_table_idx + IL_CCK_FROM_OFDM_IDX_DIFF; - base_pwr_idx = - pwr_info->base_power_idx + IL_CCK_FROM_OFDM_IDX_DIFF; - - /* stay within table range */ - pwr_idx = il3945_hw_reg_fix_power_idx(pwr_idx); - gain = power_gain_table[a_band][pwr_idx].tx_gain; - dsp_atten = power_gain_table[a_band][pwr_idx].dsp_atten; - - /* fill each CCK rate's il3945_channel_power_info structure - * NOTE: All CCK-rate Txpwrs are the same for a given chnl! - * NOTE: CCK rates start at end of OFDM rates! */ - for (rate_idx = 0; rate_idx < IL_CCK_RATES; rate_idx++) { - pwr_info = - &ch_info->power_info[rate_idx + IL_OFDM_RATES]; - pwr_info->requested_power = power; - pwr_info->power_table_idx = pwr_idx; - pwr_info->base_power_idx = base_pwr_idx; - pwr_info->tpc.tx_gain = gain; - pwr_info->tpc.dsp_atten = dsp_atten; - } - - /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */ - for (scan_tbl_idx = 0; scan_tbl_idx < IL_NUM_SCAN_RATES; - scan_tbl_idx++) { - s32 actual_idx = - (scan_tbl_idx == - 0) ? RATE_1M_IDX_TBL : RATE_6M_IDX_TBL; - il3945_hw_reg_set_scan_power(il, scan_tbl_idx, - actual_idx, clip_pwrs, - ch_info, a_band); - } - } - - return 0; -} - -int -il3945_hw_rxq_stop(struct il_priv *il) -{ - int rc; - - il_wr(il, FH39_RCSR_CONFIG(0), 0); - rc = il_poll_bit(il, FH39_RSSR_STATUS, - FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); - if (rc < 0) - IL_ERR("Can't stop Rx DMA.\n"); - - return 0; -} - -int -il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq) -{ - int txq_id = txq->q.id; - - struct il3945_shared *shared_data = il->_3945.shared_virt; - - shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32) txq->q.dma_addr); - - il_wr(il, FH39_CBCC_CTRL(txq_id), 0); - il_wr(il, FH39_CBCC_BASE(txq_id), 0); - - il_wr(il, FH39_TCSR_CONFIG(txq_id), - FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT | - FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF | - FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD | - FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL | - FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE); - - /* fake read to flush all prev. writes */ - _il_rd(il, FH39_TSSR_CBB_BASE); - - return 0; -} - -/* - * HCMD utils - */ -static u16 -il3945_get_hcmd_size(u8 cmd_id, u16 len) -{ - switch (cmd_id) { - case C_RXON: - return sizeof(struct il3945_rxon_cmd); - case C_POWER_TBL: - return sizeof(struct il3945_powertable_cmd); - default: - return len; - } -} - -static u16 -il3945_build_addsta_hcmd(const struct il_addsta_cmd *cmd, u8 * data) -{ - struct il3945_addsta_cmd *addsta = (struct il3945_addsta_cmd *)data; - addsta->mode = cmd->mode; - memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify)); - memcpy(&addsta->key, &cmd->key, sizeof(struct il4965_keyinfo)); - addsta->station_flags = cmd->station_flags; - addsta->station_flags_msk = cmd->station_flags_msk; - addsta->tid_disable_tx = cpu_to_le16(0); - addsta->rate_n_flags = cmd->rate_n_flags; - addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid; - addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid; - addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn; - - return (u16) sizeof(struct il3945_addsta_cmd); -} - -static int -il3945_add_bssid_station(struct il_priv *il, const u8 * addr, u8 * sta_id_r) -{ - struct il_rxon_context *ctx = &il->ctx; - int ret; - u8 sta_id; - unsigned long flags; - - if (sta_id_r) - *sta_id_r = IL_INVALID_STATION; - - ret = il_add_station_common(il, ctx, addr, 0, NULL, &sta_id); - if (ret) { - IL_ERR("Unable to add station %pM\n", addr); - return ret; - } - - if (sta_id_r) - *sta_id_r = sta_id; - - spin_lock_irqsave(&il->sta_lock, flags); - il->stations[sta_id].used |= IL_STA_LOCAL; - spin_unlock_irqrestore(&il->sta_lock, flags); - - return 0; -} - -static int -il3945_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif, - bool add) -{ - struct il_vif_priv *vif_priv = (void *)vif->drv_priv; - int ret; - - if (add) { - ret = - il3945_add_bssid_station(il, vif->bss_conf.bssid, - &vif_priv->ibss_bssid_sta_id); - if (ret) - return ret; - - il3945_sync_sta(il, vif_priv->ibss_bssid_sta_id, - (il->band == - IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP : - RATE_1M_PLCP); - il3945_rate_scale_init(il->hw, vif_priv->ibss_bssid_sta_id); - - return 0; - } - - return il_remove_station(il, vif_priv->ibss_bssid_sta_id, - vif->bss_conf.bssid); -} - -/** - * il3945_init_hw_rate_table - Initialize the hardware rate fallback table - */ -int -il3945_init_hw_rate_table(struct il_priv *il) -{ - int rc, i, idx, prev_idx; - struct il3945_rate_scaling_cmd rate_cmd = { - .reserved = {0, 0, 0}, - }; - struct il3945_rate_scaling_info *table = rate_cmd.table; - - for (i = 0; i < ARRAY_SIZE(il3945_rates); i++) { - idx = il3945_rates[i].table_rs_idx; - - table[idx].rate_n_flags = cpu_to_le16(il3945_rates[i].plcp); - table[idx].try_cnt = il->retry_rate; - prev_idx = il3945_get_prev_ieee_rate(i); - table[idx].next_rate_idx = il3945_rates[prev_idx].table_rs_idx; - } - - switch (il->band) { - case IEEE80211_BAND_5GHZ: - D_RATE("Select A mode rate scale\n"); - /* If one of the following CCK rates is used, - * have it fall back to the 6M OFDM rate */ - for (i = RATE_1M_IDX_TBL; i <= RATE_11M_IDX_TBL; i++) - table[i].next_rate_idx = - il3945_rates[IL_FIRST_OFDM_RATE].table_rs_idx; - - /* Don't fall back to CCK rates */ - table[RATE_12M_IDX_TBL].next_rate_idx = RATE_9M_IDX_TBL; - - /* Don't drop out of OFDM rates */ - table[RATE_6M_IDX_TBL].next_rate_idx = - il3945_rates[IL_FIRST_OFDM_RATE].table_rs_idx; - break; - - case IEEE80211_BAND_2GHZ: - D_RATE("Select B/G mode rate scale\n"); - /* If an OFDM rate is used, have it fall back to the - * 1M CCK rates */ - - if (!(il->_3945.sta_supp_rates & IL_OFDM_RATES_MASK) && - il_is_associated(il)) { - - idx = IL_FIRST_CCK_RATE; - for (i = RATE_6M_IDX_TBL; i <= RATE_54M_IDX_TBL; i++) - table[i].next_rate_idx = - il3945_rates[idx].table_rs_idx; - - idx = RATE_11M_IDX_TBL; - /* CCK shouldn't fall back to OFDM... */ - table[idx].next_rate_idx = RATE_5M_IDX_TBL; - } - break; - - default: - WARN_ON(1); - break; - } - - /* Update the rate scaling for control frame Tx */ - rate_cmd.table_id = 0; - rc = il_send_cmd_pdu(il, C_RATE_SCALE, sizeof(rate_cmd), &rate_cmd); - if (rc) - return rc; - - /* Update the rate scaling for data frame Tx */ - rate_cmd.table_id = 1; - return il_send_cmd_pdu(il, C_RATE_SCALE, sizeof(rate_cmd), &rate_cmd); -} - -/* Called when initializing driver */ -int -il3945_hw_set_hw_params(struct il_priv *il) -{ - memset((void *)&il->hw_params, 0, sizeof(struct il_hw_params)); - - il->_3945.shared_virt = - dma_alloc_coherent(&il->pci_dev->dev, sizeof(struct il3945_shared), - &il->_3945.shared_phys, GFP_KERNEL); - if (!il->_3945.shared_virt) { - IL_ERR("failed to allocate pci memory\n"); - return -ENOMEM; - } - - /* Assign number of Usable TX queues */ - il->hw_params.max_txq_num = il->cfg->base_params->num_of_queues; - - il->hw_params.tfd_size = sizeof(struct il3945_tfd); - il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_3K); - il->hw_params.max_rxq_size = RX_QUEUE_SIZE; - il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG; - il->hw_params.max_stations = IL3945_STATION_COUNT; - il->ctx.bcast_sta_id = IL3945_BROADCAST_ID; - - il->sta_key_max_num = STA_KEY_MAX_NUM; - - il->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR; - il->hw_params.max_beacon_itrvl = IL39_MAX_UCODE_BEACON_INTERVAL; - il->hw_params.beacon_time_tsf_bits = IL3945_EXT_BEACON_TIME_POS; - - return 0; -} - -unsigned int -il3945_hw_get_beacon_cmd(struct il_priv *il, struct il3945_frame *frame, - u8 rate) -{ - struct il3945_tx_beacon_cmd *tx_beacon_cmd; - unsigned int frame_size; - - tx_beacon_cmd = (struct il3945_tx_beacon_cmd *)&frame->u; - memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd)); - - tx_beacon_cmd->tx.sta_id = il->ctx.bcast_sta_id; - tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; - - frame_size = - il3945_fill_beacon_frame(il, tx_beacon_cmd->frame, - sizeof(frame->u) - sizeof(*tx_beacon_cmd)); - - BUG_ON(frame_size > MAX_MPDU_SIZE); - tx_beacon_cmd->tx.len = cpu_to_le16((u16) frame_size); - - tx_beacon_cmd->tx.rate = rate; - tx_beacon_cmd->tx.tx_flags = - (TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK); - - /* supp_rates[0] == OFDM start at IL_FIRST_OFDM_RATE */ - tx_beacon_cmd->tx.supp_rates[0] = - (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF; - - tx_beacon_cmd->tx.supp_rates[1] = (IL_CCK_BASIC_RATES_MASK & 0xF); - - return sizeof(struct il3945_tx_beacon_cmd) + frame_size; -} - -void -il3945_hw_handler_setup(struct il_priv *il) -{ - il->handlers[C_TX] = il3945_hdl_tx; - il->handlers[N_3945_RX] = il3945_hdl_rx; -} - -void -il3945_hw_setup_deferred_work(struct il_priv *il) -{ - INIT_DELAYED_WORK(&il->_3945.thermal_periodic, - il3945_bg_reg_txpower_periodic); -} - -void -il3945_hw_cancel_deferred_work(struct il_priv *il) -{ - cancel_delayed_work(&il->_3945.thermal_periodic); -} - -/* check contents of special bootstrap uCode SRAM */ -static int -il3945_verify_bsm(struct il_priv *il) -{ - __le32 *image = il->ucode_boot.v_addr; - u32 len = il->ucode_boot.len; - u32 reg; - u32 val; - - D_INFO("Begin verify bsm\n"); - - /* verify BSM SRAM contents */ - val = il_rd_prph(il, BSM_WR_DWCOUNT_REG); - for (reg = BSM_SRAM_LOWER_BOUND; reg < BSM_SRAM_LOWER_BOUND + len; - reg += sizeof(u32), image++) { - val = il_rd_prph(il, reg); - if (val != le32_to_cpu(*image)) { - IL_ERR("BSM uCode verification failed at " - "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n", - BSM_SRAM_LOWER_BOUND, reg - BSM_SRAM_LOWER_BOUND, - len, val, le32_to_cpu(*image)); - return -EIO; - } - } - - D_INFO("BSM bootstrap uCode image OK\n"); - - return 0; -} - -/****************************************************************************** - * - * EEPROM related functions - * - ******************************************************************************/ - -/* - * Clear the OWNER_MSK, to establish driver (instead of uCode running on - * embedded controller) as EEPROM reader; each read is a series of pulses - * to/from the EEPROM chip, not a single event, so even reads could conflict - * if they weren't arbitrated by some ownership mechanism. Here, the driver - * simply claims ownership, which should be safe when this function is called - * (i.e. before loading uCode!). - */ -static int -il3945_eeprom_acquire_semaphore(struct il_priv *il) -{ - _il_clear_bit(il, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK); - return 0; -} - -static void -il3945_eeprom_release_semaphore(struct il_priv *il) -{ - return; -} - - /** - * il3945_load_bsm - Load bootstrap instructions - * - * BSM operation: - * - * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program - * in special SRAM that does not power down during RFKILL. When powering back - * up after power-saving sleeps (or during initial uCode load), the BSM loads - * the bootstrap program into the on-board processor, and starts it. - * - * The bootstrap program loads (via DMA) instructions and data for a new - * program from host DRAM locations indicated by the host driver in the - * BSM_DRAM_* registers. Once the new program is loaded, it starts - * automatically. - * - * When initializing the NIC, the host driver points the BSM to the - * "initialize" uCode image. This uCode sets up some internal data, then - * notifies host via "initialize alive" that it is complete. - * - * The host then replaces the BSM_DRAM_* pointer values to point to the - * normal runtime uCode instructions and a backup uCode data cache buffer - * (filled initially with starting data values for the on-board processor), - * then triggers the "initialize" uCode to load and launch the runtime uCode, - * which begins normal operation. - * - * When doing a power-save shutdown, runtime uCode saves data SRAM into - * the backup data cache in DRAM before SRAM is powered down. - * - * When powering back up, the BSM loads the bootstrap program. This reloads - * the runtime uCode instructions and the backup data cache into SRAM, - * and re-launches the runtime uCode from where it left off. - */ -static int -il3945_load_bsm(struct il_priv *il) -{ - __le32 *image = il->ucode_boot.v_addr; - u32 len = il->ucode_boot.len; - dma_addr_t pinst; - dma_addr_t pdata; - u32 inst_len; - u32 data_len; - int rc; - int i; - u32 done; - u32 reg_offset; - - D_INFO("Begin load bsm\n"); - - /* make sure bootstrap program is no larger than BSM's SRAM size */ - if (len > IL39_MAX_BSM_SIZE) - return -EINVAL; - - /* Tell bootstrap uCode where to find the "Initialize" uCode - * in host DRAM ... host DRAM physical address bits 31:0 for 3945. - * NOTE: il3945_initialize_alive_start() will replace these values, - * after the "initialize" uCode has run, to point to - * runtime/protocol instructions and backup data cache. */ - pinst = il->ucode_init.p_addr; - pdata = il->ucode_init_data.p_addr; - inst_len = il->ucode_init.len; - data_len = il->ucode_init_data.len; - - il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst); - il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata); - il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG, inst_len); - il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, data_len); - - /* Fill BSM memory with bootstrap instructions */ - for (reg_offset = BSM_SRAM_LOWER_BOUND; - reg_offset < BSM_SRAM_LOWER_BOUND + len; - reg_offset += sizeof(u32), image++) - _il_wr_prph(il, reg_offset, le32_to_cpu(*image)); - - rc = il3945_verify_bsm(il); - if (rc) - return rc; - - /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */ - il_wr_prph(il, BSM_WR_MEM_SRC_REG, 0x0); - il_wr_prph(il, BSM_WR_MEM_DST_REG, IL39_RTC_INST_LOWER_BOUND); - il_wr_prph(il, BSM_WR_DWCOUNT_REG, len / sizeof(u32)); - - /* Load bootstrap code into instruction SRAM now, - * to prepare to load "initialize" uCode */ - il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START); - - /* Wait for load of bootstrap uCode to finish */ - for (i = 0; i < 100; i++) { - done = il_rd_prph(il, BSM_WR_CTRL_REG); - if (!(done & BSM_WR_CTRL_REG_BIT_START)) - break; - udelay(10); - } - if (i < 100) - D_INFO("BSM write complete, poll %d iterations\n", i); - else { - IL_ERR("BSM write did not complete!\n"); - return -EIO; - } - - /* Enable future boot loads whenever power management unit triggers it - * (e.g. when powering back up after power-save shutdown) */ - il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN); - - return 0; -} - -static struct il_hcmd_ops il3945_hcmd = { - .rxon_assoc = il3945_send_rxon_assoc, - .commit_rxon = il3945_commit_rxon, -}; - -static struct il_lib_ops il3945_lib = { - .txq_attach_buf_to_tfd = il3945_hw_txq_attach_buf_to_tfd, - .txq_free_tfd = il3945_hw_txq_free_tfd, - .txq_init = il3945_hw_tx_queue_init, - .load_ucode = il3945_load_bsm, - .dump_nic_error_log = il3945_dump_nic_error_log, - .apm_ops = { - .init = il3945_apm_init, - .config = il3945_nic_config, - }, - .eeprom_ops = { - .regulatory_bands = { - EEPROM_REGULATORY_BAND_1_CHANNELS, - EEPROM_REGULATORY_BAND_2_CHANNELS, - EEPROM_REGULATORY_BAND_3_CHANNELS, - EEPROM_REGULATORY_BAND_4_CHANNELS, - EEPROM_REGULATORY_BAND_5_CHANNELS, - EEPROM_REGULATORY_BAND_NO_HT40, - EEPROM_REGULATORY_BAND_NO_HT40, - }, - .acquire_semaphore = il3945_eeprom_acquire_semaphore, - .release_semaphore = il3945_eeprom_release_semaphore, - }, - .send_tx_power = il3945_send_tx_power, - .is_valid_rtc_data_addr = il3945_hw_valid_rtc_data_addr, - -#ifdef CONFIG_IWLEGACY_DEBUGFS - .debugfs_ops = { - .rx_stats_read = il3945_ucode_rx_stats_read, - .tx_stats_read = il3945_ucode_tx_stats_read, - .general_stats_read = il3945_ucode_general_stats_read, - }, -#endif -}; - -static const struct il_legacy_ops il3945_legacy_ops = { - .post_associate = il3945_post_associate, - .config_ap = il3945_config_ap, - .manage_ibss_station = il3945_manage_ibss_station, -}; - -static struct il_hcmd_utils_ops il3945_hcmd_utils = { - .get_hcmd_size = il3945_get_hcmd_size, - .build_addsta_hcmd = il3945_build_addsta_hcmd, - .request_scan = il3945_request_scan, - .post_scan = il3945_post_scan, -}; - -static const struct il_ops il3945_ops = { - .lib = &il3945_lib, - .hcmd = &il3945_hcmd, - .utils = &il3945_hcmd_utils, - .led = &il3945_led_ops, - .legacy = &il3945_legacy_ops, - .ieee80211_ops = &il3945_hw_ops, -}; - -static struct il_base_params il3945_base_params = { - .eeprom_size = IL3945_EEPROM_IMG_SIZE, - .num_of_queues = IL39_NUM_QUEUES, - .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL, - .set_l0s = false, - .use_bsm = true, - .led_compensation = 64, - .wd_timeout = IL_DEF_WD_TIMEOUT, -}; - -static struct il_cfg il3945_bg_cfg = { - .name = "3945BG", - .fw_name_pre = IL3945_FW_PRE, - .ucode_api_max = IL3945_UCODE_API_MAX, - .ucode_api_min = IL3945_UCODE_API_MIN, - .sku = IL_SKU_G, - .eeprom_ver = EEPROM_3945_EEPROM_VERSION, - .ops = &il3945_ops, - .mod_params = &il3945_mod_params, - .base_params = &il3945_base_params, - .led_mode = IL_LED_BLINK, -}; - -static struct il_cfg il3945_abg_cfg = { - .name = "3945ABG", - .fw_name_pre = IL3945_FW_PRE, - .ucode_api_max = IL3945_UCODE_API_MAX, - .ucode_api_min = IL3945_UCODE_API_MIN, - .sku = IL_SKU_A | IL_SKU_G, - .eeprom_ver = EEPROM_3945_EEPROM_VERSION, - .ops = &il3945_ops, - .mod_params = &il3945_mod_params, - .base_params = &il3945_base_params, - .led_mode = IL_LED_BLINK, -}; - -DEFINE_PCI_DEVICE_TABLE(il3945_hw_card_ids) = { - {IL_PCI_DEVICE(0x4222, 0x1005, il3945_bg_cfg)}, - {IL_PCI_DEVICE(0x4222, 0x1034, il3945_bg_cfg)}, - {IL_PCI_DEVICE(0x4222, 0x1044, il3945_bg_cfg)}, - {IL_PCI_DEVICE(0x4227, 0x1014, il3945_bg_cfg)}, - {IL_PCI_DEVICE(0x4222, PCI_ANY_ID, il3945_abg_cfg)}, - {IL_PCI_DEVICE(0x4227, PCI_ANY_ID, il3945_abg_cfg)}, - {0} -}; - -MODULE_DEVICE_TABLE(pci, il3945_hw_card_ids); diff --git a/trunk/drivers/net/wireless/iwlegacy/3945.h b/trunk/drivers/net/wireless/iwlegacy/3945.h deleted file mode 100644 index 9f42f79f8778..000000000000 --- a/trunk/drivers/net/wireless/iwlegacy/3945.h +++ /dev/null @@ -1,607 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#ifndef __il_3945_h__ -#define __il_3945_h__ - -#include /* for struct pci_device_id */ -#include -#include - -/* Hardware specific file defines the PCI IDs table for that hardware module */ -extern const struct pci_device_id il3945_hw_card_ids[]; - -#include "common.h" - -/* Highest firmware API version supported */ -#define IL3945_UCODE_API_MAX 2 - -/* Lowest firmware API version supported */ -#define IL3945_UCODE_API_MIN 1 - -#define IL3945_FW_PRE "iwlwifi-3945-" -#define _IL3945_MODULE_FIRMWARE(api) IL3945_FW_PRE #api ".ucode" -#define IL3945_MODULE_FIRMWARE(api) _IL3945_MODULE_FIRMWARE(api) - -/* Default noise level to report when noise measurement is not available. - * This may be because we're: - * 1) Not associated (4965, no beacon stats being sent to driver) - * 2) Scanning (noise measurement does not apply to associated channel) - * 3) Receiving CCK (3945 delivers noise info only for OFDM frames) - * Use default noise value of -127 ... this is below the range of measurable - * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user. - * Also, -127 works better than 0 when averaging frames with/without - * noise info (e.g. averaging might be done in app); measured dBm values are - * always negative ... using a negative value as the default keeps all - * averages within an s8's (used in some apps) range of negative values. */ -#define IL_NOISE_MEAS_NOT_AVAILABLE (-127) - -/* Module parameters accessible from iwl-*.c */ -extern struct il_mod_params il3945_mod_params; - -struct il3945_rate_scale_data { - u64 data; - s32 success_counter; - s32 success_ratio; - s32 counter; - s32 average_tpt; - unsigned long stamp; -}; - -struct il3945_rs_sta { - spinlock_t lock; - struct il_priv *il; - s32 *expected_tpt; - unsigned long last_partial_flush; - unsigned long last_flush; - u32 flush_time; - u32 last_tx_packets; - u32 tx_packets; - u8 tgg; - u8 flush_pending; - u8 start_rate; - struct timer_list rate_scale_flush; - struct il3945_rate_scale_data win[RATE_COUNT_3945]; -#ifdef CONFIG_MAC80211_DEBUGFS - struct dentry *rs_sta_dbgfs_stats_table_file; -#endif - - /* used to be in sta_info */ - int last_txrate_idx; -}; - -/* - * The common struct MUST be first because it is shared between - * 3945 and 4965! - */ -struct il3945_sta_priv { - struct il_station_priv_common common; - struct il3945_rs_sta rs_sta; -}; - -enum il3945_antenna { - IL_ANTENNA_DIVERSITY, - IL_ANTENNA_MAIN, - IL_ANTENNA_AUX -}; - -/* - * RTS threshold here is total size [2347] minus 4 FCS bytes - * Per spec: - * a value of 0 means RTS on all data/management packets - * a value > max MSDU size means no RTS - * else RTS for data/management frames where MPDU is larger - * than RTS value. - */ -#define DEFAULT_RTS_THRESHOLD 2347U -#define MIN_RTS_THRESHOLD 0U -#define MAX_RTS_THRESHOLD 2347U -#define MAX_MSDU_SIZE 2304U -#define MAX_MPDU_SIZE 2346U -#define DEFAULT_BEACON_INTERVAL 100U -#define DEFAULT_SHORT_RETRY_LIMIT 7U -#define DEFAULT_LONG_RETRY_LIMIT 4U - -#define IL_TX_FIFO_AC0 0 -#define IL_TX_FIFO_AC1 1 -#define IL_TX_FIFO_AC2 2 -#define IL_TX_FIFO_AC3 3 -#define IL_TX_FIFO_HCCA_1 5 -#define IL_TX_FIFO_HCCA_2 6 -#define IL_TX_FIFO_NONE 7 - -#define IEEE80211_DATA_LEN 2304 -#define IEEE80211_4ADDR_LEN 30 -#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN) -#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN) - -struct il3945_frame { - union { - struct ieee80211_hdr frame; - struct il3945_tx_beacon_cmd beacon; - u8 raw[IEEE80211_FRAME_LEN]; - u8 cmd[360]; - } u; - struct list_head list; -}; - -#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4) -#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ) -#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4) - -#define SUP_RATE_11A_MAX_NUM_CHANNELS 8 -#define SUP_RATE_11B_MAX_NUM_CHANNELS 4 -#define SUP_RATE_11G_MAX_NUM_CHANNELS 12 - -#define IL_SUPPORTED_RATES_IE_LEN 8 - -#define SCAN_INTERVAL 100 - -#define MAX_TID_COUNT 9 - -#define IL_INVALID_RATE 0xFF -#define IL_INVALID_VALUE -1 - -#define STA_PS_STATUS_WAKE 0 -#define STA_PS_STATUS_SLEEP 1 - -struct il3945_ibss_seq { - u8 mac[ETH_ALEN]; - u16 seq_num; - u16 frag_num; - unsigned long packet_time; - struct list_head list; -}; - -#define IL_RX_HDR(x) ((struct il3945_rx_frame_hdr *)(\ - x->u.rx_frame.stats.payload + \ - x->u.rx_frame.stats.phy_count)) -#define IL_RX_END(x) ((struct il3945_rx_frame_end *)(\ - IL_RX_HDR(x)->payload + \ - le16_to_cpu(IL_RX_HDR(x)->len))) -#define IL_RX_STATS(x) (&x->u.rx_frame.stats) -#define IL_RX_DATA(x) (IL_RX_HDR(x)->payload) - -/****************************************************************************** - * - * Functions implemented in iwl3945-base.c which are forward declared here - * for use by iwl-*.c - * - *****************************************************************************/ -extern int il3945_calc_db_from_ratio(int sig_ratio); -extern void il3945_rx_replenish(void *data); -extern void il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq); -extern unsigned int il3945_fill_beacon_frame(struct il_priv *il, - struct ieee80211_hdr *hdr, - int left); -extern int il3945_dump_nic_event_log(struct il_priv *il, bool full_log, - char **buf, bool display); -extern void il3945_dump_nic_error_log(struct il_priv *il); - -/****************************************************************************** - * - * Functions implemented in iwl-[34]*.c which are forward declared here - * for use by iwl3945-base.c - * - * NOTE: The implementation of these functions are hardware specific - * which is why they are in the hardware specific files (vs. iwl-base.c) - * - * Naming convention -- - * il3945_ <-- Its part of iwlwifi (should be changed to il3945_) - * il3945_hw_ <-- Hardware specific (implemented in iwl-XXXX.c by all HW) - * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX) - * il3945_bg_ <-- Called from work queue context - * il3945_mac_ <-- mac80211 callback - * - ****************************************************************************/ -extern void il3945_hw_handler_setup(struct il_priv *il); -extern void il3945_hw_setup_deferred_work(struct il_priv *il); -extern void il3945_hw_cancel_deferred_work(struct il_priv *il); -extern int il3945_hw_rxq_stop(struct il_priv *il); -extern int il3945_hw_set_hw_params(struct il_priv *il); -extern int il3945_hw_nic_init(struct il_priv *il); -extern int il3945_hw_nic_stop_master(struct il_priv *il); -extern void il3945_hw_txq_ctx_free(struct il_priv *il); -extern void il3945_hw_txq_ctx_stop(struct il_priv *il); -extern int il3945_hw_nic_reset(struct il_priv *il); -extern int il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il, - struct il_tx_queue *txq, - dma_addr_t addr, u16 len, u8 reset, - u8 pad); -extern void il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq); -extern int il3945_hw_get_temperature(struct il_priv *il); -extern int il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq); -extern unsigned int il3945_hw_get_beacon_cmd(struct il_priv *il, - struct il3945_frame *frame, - u8 rate); -void il3945_hw_build_tx_cmd_rate(struct il_priv *il, struct il_device_cmd *cmd, - struct ieee80211_tx_info *info, - struct ieee80211_hdr *hdr, int sta_id); -extern int il3945_hw_reg_send_txpower(struct il_priv *il); -extern int il3945_hw_reg_set_txpower(struct il_priv *il, s8 power); -extern void il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb); -void il3945_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb); -extern void il3945_disable_events(struct il_priv *il); -extern int il4965_get_temperature(const struct il_priv *il); -extern void il3945_post_associate(struct il_priv *il); -extern void il3945_config_ap(struct il_priv *il); - -extern int il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx); - -/** - * il3945_hw_find_station - Find station id for a given BSSID - * @bssid: MAC address of station ID to find - * - * NOTE: This should not be hardware specific but the code has - * not yet been merged into a single common layer for managing the - * station tables. - */ -extern u8 il3945_hw_find_station(struct il_priv *il, const u8 * bssid); - -extern struct ieee80211_ops il3945_hw_ops; - -extern __le32 il3945_get_antenna_flags(const struct il_priv *il); -extern int il3945_init_hw_rate_table(struct il_priv *il); -extern void il3945_reg_txpower_periodic(struct il_priv *il); -extern int il3945_txpower_set_from_eeprom(struct il_priv *il); - -extern int il3945_rs_next_rate(struct il_priv *il, int rate); - -/* scanning */ -int il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif); -void il3945_post_scan(struct il_priv *il); - -/* rates */ -extern const struct il3945_rate_info il3945_rates[RATE_COUNT_3945]; - -/* RSSI to dBm */ -#define IL39_RSSI_OFFSET 95 - -/* - * EEPROM related constants, enums, and structures. - */ -#define EEPROM_SKU_CAP_OP_MODE_MRC (1 << 7) - -/* - * Mapping of a Tx power level, at factory calibration temperature, - * to a radio/DSP gain table idx. - * One for each of 5 "sample" power levels in each band. - * v_det is measured at the factory, using the 3945's built-in power amplifier - * (PA) output voltage detector. This same detector is used during Tx of - * long packets in normal operation to provide feedback as to proper output - * level. - * Data copied from EEPROM. - * DO NOT ALTER THIS STRUCTURE!!! - */ -struct il3945_eeprom_txpower_sample { - u8 gain_idx; /* idx into power (gain) setup table ... */ - s8 power; /* ... for this pwr level for this chnl group */ - u16 v_det; /* PA output voltage */ -} __packed; - -/* - * Mappings of Tx power levels -> nominal radio/DSP gain table idxes. - * One for each channel group (a.k.a. "band") (1 for BG, 4 for A). - * Tx power setup code interpolates between the 5 "sample" power levels - * to determine the nominal setup for a requested power level. - * Data copied from EEPROM. - * DO NOT ALTER THIS STRUCTURE!!! - */ -struct il3945_eeprom_txpower_group { - struct il3945_eeprom_txpower_sample samples[5]; /* 5 power levels */ - s32 a, b, c, d, e; /* coefficients for voltage->power - * formula (signed) */ - s32 Fa, Fb, Fc, Fd, Fe; /* these modify coeffs based on - * frequency (signed) */ - s8 saturation_power; /* highest power possible by h/w in this - * band */ - u8 group_channel; /* "representative" channel # in this band */ - s16 temperature; /* h/w temperature at factory calib this band - * (signed) */ -} __packed; - -/* - * Temperature-based Tx-power compensation data, not band-specific. - * These coefficients are use to modify a/b/c/d/e coeffs based on - * difference between current temperature and factory calib temperature. - * Data copied from EEPROM. - */ -struct il3945_eeprom_temperature_corr { - u32 Ta; - u32 Tb; - u32 Tc; - u32 Td; - u32 Te; -} __packed; - -/* - * EEPROM map - */ -struct il3945_eeprom { - u8 reserved0[16]; - u16 device_id; /* abs.ofs: 16 */ - u8 reserved1[2]; - u16 pmc; /* abs.ofs: 20 */ - u8 reserved2[20]; - u8 mac_address[6]; /* abs.ofs: 42 */ - u8 reserved3[58]; - u16 board_revision; /* abs.ofs: 106 */ - u8 reserved4[11]; - u8 board_pba_number[9]; /* abs.ofs: 119 */ - u8 reserved5[8]; - u16 version; /* abs.ofs: 136 */ - u8 sku_cap; /* abs.ofs: 138 */ - u8 leds_mode; /* abs.ofs: 139 */ - u16 oem_mode; - u16 wowlan_mode; /* abs.ofs: 142 */ - u16 leds_time_interval; /* abs.ofs: 144 */ - u8 leds_off_time; /* abs.ofs: 146 */ - u8 leds_on_time; /* abs.ofs: 147 */ - u8 almgor_m_version; /* abs.ofs: 148 */ - u8 antenna_switch_type; /* abs.ofs: 149 */ - u8 reserved6[42]; - u8 sku_id[4]; /* abs.ofs: 192 */ - -/* - * Per-channel regulatory data. - * - * Each channel that *might* be supported by 3945 has a fixed location - * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory - * txpower (MSB). - * - * Entries immediately below are for 20 MHz channel width. - * - * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 - */ - u16 band_1_count; /* abs.ofs: 196 */ - struct il_eeprom_channel band_1_channels[14]; /* abs.ofs: 198 */ - -/* - * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196, - * 5.0 GHz channels 7, 8, 11, 12, 16 - * (4915-5080MHz) (none of these is ever supported) - */ - u16 band_2_count; /* abs.ofs: 226 */ - struct il_eeprom_channel band_2_channels[13]; /* abs.ofs: 228 */ - -/* - * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 - * (5170-5320MHz) - */ - u16 band_3_count; /* abs.ofs: 254 */ - struct il_eeprom_channel band_3_channels[12]; /* abs.ofs: 256 */ - -/* - * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 - * (5500-5700MHz) - */ - u16 band_4_count; /* abs.ofs: 280 */ - struct il_eeprom_channel band_4_channels[11]; /* abs.ofs: 282 */ - -/* - * 5.7 GHz channels 145, 149, 153, 157, 161, 165 - * (5725-5825MHz) - */ - u16 band_5_count; /* abs.ofs: 304 */ - struct il_eeprom_channel band_5_channels[6]; /* abs.ofs: 306 */ - - u8 reserved9[194]; - -/* - * 3945 Txpower calibration data. - */ -#define IL_NUM_TX_CALIB_GROUPS 5 - struct il3945_eeprom_txpower_group groups[IL_NUM_TX_CALIB_GROUPS]; -/* abs.ofs: 512 */ - struct il3945_eeprom_temperature_corr corrections; /* abs.ofs: 832 */ - u8 reserved16[172]; /* fill out to full 1024 byte block */ -} __packed; - -#define IL3945_EEPROM_IMG_SIZE 1024 - -/* End of EEPROM */ - -#define PCI_CFG_REV_ID_BIT_BASIC_SKU (0x40) /* bit 6 */ -#define PCI_CFG_REV_ID_BIT_RTP (0x80) /* bit 7 */ - -/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */ -#define IL39_NUM_QUEUES 5 -#define IL39_CMD_QUEUE_NUM 4 - -#define IL_DEFAULT_TX_RETRY 15 - -/*********************************************/ - -#define RFD_SIZE 4 -#define NUM_TFD_CHUNKS 4 - -#define TFD_CTL_COUNT_SET(n) (n << 24) -#define TFD_CTL_COUNT_GET(ctl) ((ctl >> 24) & 7) -#define TFD_CTL_PAD_SET(n) (n << 28) -#define TFD_CTL_PAD_GET(ctl) (ctl >> 28) - -/* Sizes and addresses for instruction and data memory (SRAM) in - * 3945's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */ -#define IL39_RTC_INST_LOWER_BOUND (0x000000) -#define IL39_RTC_INST_UPPER_BOUND (0x014000) - -#define IL39_RTC_DATA_LOWER_BOUND (0x800000) -#define IL39_RTC_DATA_UPPER_BOUND (0x808000) - -#define IL39_RTC_INST_SIZE (IL39_RTC_INST_UPPER_BOUND - \ - IL39_RTC_INST_LOWER_BOUND) -#define IL39_RTC_DATA_SIZE (IL39_RTC_DATA_UPPER_BOUND - \ - IL39_RTC_DATA_LOWER_BOUND) - -#define IL39_MAX_INST_SIZE IL39_RTC_INST_SIZE -#define IL39_MAX_DATA_SIZE IL39_RTC_DATA_SIZE - -/* Size of uCode instruction memory in bootstrap state machine */ -#define IL39_MAX_BSM_SIZE IL39_RTC_INST_SIZE - -static inline int -il3945_hw_valid_rtc_data_addr(u32 addr) -{ - return (addr >= IL39_RTC_DATA_LOWER_BOUND && - addr < IL39_RTC_DATA_UPPER_BOUND); -} - -/* Base physical address of il3945_shared is provided to FH39_TSSR_CBB_BASE - * and &il3945_shared.rx_read_ptr[0] is provided to FH39_RCSR_RPTR_ADDR(0) */ -struct il3945_shared { - __le32 tx_base_ptr[8]; -} __packed; - -/************************************/ -/* iwl3945 Flow Handler Definitions */ -/************************************/ - -/** - * This I/O area is directly read/writable by driver (e.g. Linux uses writel()) - * Addresses are offsets from device's PCI hardware base address. - */ -#define FH39_MEM_LOWER_BOUND (0x0800) -#define FH39_MEM_UPPER_BOUND (0x1000) - -#define FH39_CBCC_TBL (FH39_MEM_LOWER_BOUND + 0x140) -#define FH39_TFDB_TBL (FH39_MEM_LOWER_BOUND + 0x180) -#define FH39_RCSR_TBL (FH39_MEM_LOWER_BOUND + 0x400) -#define FH39_RSSR_TBL (FH39_MEM_LOWER_BOUND + 0x4c0) -#define FH39_TCSR_TBL (FH39_MEM_LOWER_BOUND + 0x500) -#define FH39_TSSR_TBL (FH39_MEM_LOWER_BOUND + 0x680) - -/* TFDB (Transmit Frame Buffer Descriptor) */ -#define FH39_TFDB(_ch, buf) (FH39_TFDB_TBL + \ - ((_ch) * 2 + (buf)) * 0x28) -#define FH39_TFDB_CHNL_BUF_CTRL_REG(_ch) (FH39_TFDB_TBL + 0x50 * (_ch)) - -/* CBCC channel is [0,2] */ -#define FH39_CBCC(_ch) (FH39_CBCC_TBL + (_ch) * 0x8) -#define FH39_CBCC_CTRL(_ch) (FH39_CBCC(_ch) + 0x00) -#define FH39_CBCC_BASE(_ch) (FH39_CBCC(_ch) + 0x04) - -/* RCSR channel is [0,2] */ -#define FH39_RCSR(_ch) (FH39_RCSR_TBL + (_ch) * 0x40) -#define FH39_RCSR_CONFIG(_ch) (FH39_RCSR(_ch) + 0x00) -#define FH39_RCSR_RBD_BASE(_ch) (FH39_RCSR(_ch) + 0x04) -#define FH39_RCSR_WPTR(_ch) (FH39_RCSR(_ch) + 0x20) -#define FH39_RCSR_RPTR_ADDR(_ch) (FH39_RCSR(_ch) + 0x24) - -#define FH39_RSCSR_CHNL0_WPTR (FH39_RCSR_WPTR(0)) - -/* RSSR */ -#define FH39_RSSR_CTRL (FH39_RSSR_TBL + 0x000) -#define FH39_RSSR_STATUS (FH39_RSSR_TBL + 0x004) - -/* TCSR */ -#define FH39_TCSR(_ch) (FH39_TCSR_TBL + (_ch) * 0x20) -#define FH39_TCSR_CONFIG(_ch) (FH39_TCSR(_ch) + 0x00) -#define FH39_TCSR_CREDIT(_ch) (FH39_TCSR(_ch) + 0x04) -#define FH39_TCSR_BUFF_STTS(_ch) (FH39_TCSR(_ch) + 0x08) - -/* TSSR */ -#define FH39_TSSR_CBB_BASE (FH39_TSSR_TBL + 0x000) -#define FH39_TSSR_MSG_CONFIG (FH39_TSSR_TBL + 0x008) -#define FH39_TSSR_TX_STATUS (FH39_TSSR_TBL + 0x010) - -/* DBM */ - -#define FH39_SRVC_CHNL (6) - -#define FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE (20) -#define FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH (4) - -#define FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN (0x08000000) - -#define FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE (0x80000000) - -#define FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE (0x20000000) - -#define FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 (0x01000000) - -#define FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST (0x00001000) - -#define FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH (0x00000000) - -#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000) -#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRIVER (0x00000001) - -#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL (0x00000000) -#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL (0x00000008) - -#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000) - -#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000) - -#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000) -#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000) - -#define FH39_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00004000) - -#define FH39_TCSR_CHNL_TX_BUF_STS_REG_BIT_TFDB_WPTR (0x00000001) - -#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON (0xFF000000) -#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON (0x00FF0000) - -#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B (0x00000400) - -#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON (0x00000100) -#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON (0x00000080) - -#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH (0x00000020) -#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH (0x00000005) - -#define FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) (BIT(_ch) << 24) -#define FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch) (BIT(_ch) << 16) - -#define FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_ch) \ - (FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) | \ - FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch)) - -#define FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000) - -struct il3945_tfd_tb { - __le32 addr; - __le32 len; -} __packed; - -struct il3945_tfd { - __le32 control_flags; - struct il3945_tfd_tb tbs[4]; - u8 __pad[28]; -} __packed; - -#ifdef CONFIG_IWLEGACY_DEBUGFS -ssize_t il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos); -ssize_t il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos); -ssize_t il3945_ucode_general_stats_read(struct file *file, - char __user *user_buf, size_t count, - loff_t *ppos); -#endif - -#endif diff --git a/trunk/drivers/net/wireless/iwlegacy/4965-debug.c b/trunk/drivers/net/wireless/iwlegacy/4965-debug.c deleted file mode 100644 index 98ec39f56ba3..000000000000 --- a/trunk/drivers/net/wireless/iwlegacy/4965-debug.c +++ /dev/null @@ -1,746 +0,0 @@ -/****************************************************************************** -* -* GPL LICENSE SUMMARY -* -* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. -* -* This program is free software; you can redistribute it and/or modify -* it under the terms of version 2 of the GNU General Public License as -* published by the Free Software Foundation. -* -* This program is distributed in the hope that it will be useful, but -* WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -* General Public License for more details. -* -* You should have received a copy of the GNU General Public License -* along with this program; if not, write to the Free Software -* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, -* USA -* -* The full GNU General Public License is included in this distribution -* in the file called LICENSE.GPL. -* -* Contact Information: -* Intel Linux Wireless -* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -*****************************************************************************/ -#include "common.h" -#include "4965.h" - -static const char *fmt_value = " %-30s %10u\n"; -static const char *fmt_table = " %-30s %10u %10u %10u %10u\n"; -static const char *fmt_header = - "%-32s current cumulative delta max\n"; - -static int -il4965_stats_flag(struct il_priv *il, char *buf, int bufsz) -{ - int p = 0; - u32 flag; - - flag = le32_to_cpu(il->_4965.stats.flag); - - p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag); - if (flag & UCODE_STATS_CLEAR_MSK) - p += scnprintf(buf + p, bufsz - p, - "\tStatistics have been cleared\n"); - p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n", - (flag & UCODE_STATS_FREQUENCY_MSK) ? "2.4 GHz" : - "5.2 GHz"); - p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n", - (flag & UCODE_STATS_NARROW_BAND_MSK) ? "enabled" : - "disabled"); - - return p; -} - -ssize_t -il4965_ucode_rx_stats_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct il_priv *il = file->private_data; - int pos = 0; - char *buf; - int bufsz = - sizeof(struct stats_rx_phy) * 40 + - sizeof(struct stats_rx_non_phy) * 40 + - sizeof(struct stats_rx_ht_phy) * 40 + 400; - ssize_t ret; - struct stats_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm; - struct stats_rx_phy *cck, *accum_cck, *delta_cck, *max_cck; - struct stats_rx_non_phy *general, *accum_general; - struct stats_rx_non_phy *delta_general, *max_general; - struct stats_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht; - - if (!il_is_alive(il)) - return -EAGAIN; - - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) { - IL_ERR("Can not allocate Buffer\n"); - return -ENOMEM; - } - - /* - * the statistic information display here is based on - * the last stats notification from uCode - * might not reflect the current uCode activity - */ - ofdm = &il->_4965.stats.rx.ofdm; - cck = &il->_4965.stats.rx.cck; - general = &il->_4965.stats.rx.general; - ht = &il->_4965.stats.rx.ofdm_ht; - accum_ofdm = &il->_4965.accum_stats.rx.ofdm; - accum_cck = &il->_4965.accum_stats.rx.cck; - accum_general = &il->_4965.accum_stats.rx.general; - accum_ht = &il->_4965.accum_stats.rx.ofdm_ht; - delta_ofdm = &il->_4965.delta_stats.rx.ofdm; - delta_cck = &il->_4965.delta_stats.rx.cck; - delta_general = &il->_4965.delta_stats.rx.general; - delta_ht = &il->_4965.delta_stats.rx.ofdm_ht; - max_ofdm = &il->_4965.max_delta.rx.ofdm; - max_cck = &il->_4965.max_delta.rx.cck; - max_general = &il->_4965.max_delta.rx.general; - max_ht = &il->_4965.max_delta.rx.ofdm_ht; - - pos += il4965_stats_flag(il, buf, bufsz); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_header, - "Statistics_Rx - OFDM:"); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "ina_cnt:", - le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt, - delta_ofdm->ina_cnt, max_ofdm->ina_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_cnt:", - le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt, - delta_ofdm->fina_cnt, max_ofdm->fina_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:", - le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err, - delta_ofdm->plcp_err, max_ofdm->plcp_err); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:", - le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err, - delta_ofdm->crc32_err, max_ofdm->crc32_err); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:", - le32_to_cpu(ofdm->overrun_err), accum_ofdm->overrun_err, - delta_ofdm->overrun_err, max_ofdm->overrun_err); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:", - le32_to_cpu(ofdm->early_overrun_err), - accum_ofdm->early_overrun_err, - delta_ofdm->early_overrun_err, - max_ofdm->early_overrun_err); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:", - le32_to_cpu(ofdm->crc32_good), accum_ofdm->crc32_good, - delta_ofdm->crc32_good, max_ofdm->crc32_good); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "false_alarm_cnt:", - le32_to_cpu(ofdm->false_alarm_cnt), - accum_ofdm->false_alarm_cnt, delta_ofdm->false_alarm_cnt, - max_ofdm->false_alarm_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_sync_err_cnt:", - le32_to_cpu(ofdm->fina_sync_err_cnt), - accum_ofdm->fina_sync_err_cnt, - delta_ofdm->fina_sync_err_cnt, - max_ofdm->fina_sync_err_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "sfd_timeout:", - le32_to_cpu(ofdm->sfd_timeout), accum_ofdm->sfd_timeout, - delta_ofdm->sfd_timeout, max_ofdm->sfd_timeout); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_timeout:", - le32_to_cpu(ofdm->fina_timeout), accum_ofdm->fina_timeout, - delta_ofdm->fina_timeout, max_ofdm->fina_timeout); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "unresponded_rts:", - le32_to_cpu(ofdm->unresponded_rts), - accum_ofdm->unresponded_rts, delta_ofdm->unresponded_rts, - max_ofdm->unresponded_rts); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "rxe_frame_lmt_ovrun:", - le32_to_cpu(ofdm->rxe_frame_limit_overrun), - accum_ofdm->rxe_frame_limit_overrun, - delta_ofdm->rxe_frame_limit_overrun, - max_ofdm->rxe_frame_limit_overrun); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ack_cnt:", - le32_to_cpu(ofdm->sent_ack_cnt), accum_ofdm->sent_ack_cnt, - delta_ofdm->sent_ack_cnt, max_ofdm->sent_ack_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_cts_cnt:", - le32_to_cpu(ofdm->sent_cts_cnt), accum_ofdm->sent_cts_cnt, - delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ba_rsp_cnt:", - le32_to_cpu(ofdm->sent_ba_rsp_cnt), - accum_ofdm->sent_ba_rsp_cnt, delta_ofdm->sent_ba_rsp_cnt, - max_ofdm->sent_ba_rsp_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_self_kill:", - le32_to_cpu(ofdm->dsp_self_kill), - accum_ofdm->dsp_self_kill, delta_ofdm->dsp_self_kill, - max_ofdm->dsp_self_kill); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:", - le32_to_cpu(ofdm->mh_format_err), - accum_ofdm->mh_format_err, delta_ofdm->mh_format_err, - max_ofdm->mh_format_err); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, - "re_acq_main_rssi_sum:", - le32_to_cpu(ofdm->re_acq_main_rssi_sum), - accum_ofdm->re_acq_main_rssi_sum, - delta_ofdm->re_acq_main_rssi_sum, - max_ofdm->re_acq_main_rssi_sum); - - pos += - scnprintf(buf + pos, bufsz - pos, fmt_header, - "Statistics_Rx - CCK:"); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "ina_cnt:", - le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt, - delta_cck->ina_cnt, max_cck->ina_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_cnt:", - le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt, - delta_cck->fina_cnt, max_cck->fina_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:", - le32_to_cpu(cck->plcp_err), accum_cck->plcp_err, - delta_cck->plcp_err, max_cck->plcp_err); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:", - le32_to_cpu(cck->crc32_err), accum_cck->crc32_err, - delta_cck->crc32_err, max_cck->crc32_err); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:", - le32_to_cpu(cck->overrun_err), accum_cck->overrun_err, - delta_cck->overrun_err, max_cck->overrun_err); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:", - le32_to_cpu(cck->early_overrun_err), - accum_cck->early_overrun_err, - delta_cck->early_overrun_err, max_cck->early_overrun_err); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:", - le32_to_cpu(cck->crc32_good), accum_cck->crc32_good, - delta_cck->crc32_good, max_cck->crc32_good); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "false_alarm_cnt:", - le32_to_cpu(cck->false_alarm_cnt), - accum_cck->false_alarm_cnt, delta_cck->false_alarm_cnt, - max_cck->false_alarm_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_sync_err_cnt:", - le32_to_cpu(cck->fina_sync_err_cnt), - accum_cck->fina_sync_err_cnt, - delta_cck->fina_sync_err_cnt, max_cck->fina_sync_err_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "sfd_timeout:", - le32_to_cpu(cck->sfd_timeout), accum_cck->sfd_timeout, - delta_cck->sfd_timeout, max_cck->sfd_timeout); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_timeout:", - le32_to_cpu(cck->fina_timeout), accum_cck->fina_timeout, - delta_cck->fina_timeout, max_cck->fina_timeout); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "unresponded_rts:", - le32_to_cpu(cck->unresponded_rts), - accum_cck->unresponded_rts, delta_cck->unresponded_rts, - max_cck->unresponded_rts); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "rxe_frame_lmt_ovrun:", - le32_to_cpu(cck->rxe_frame_limit_overrun), - accum_cck->rxe_frame_limit_overrun, - delta_cck->rxe_frame_limit_overrun, - max_cck->rxe_frame_limit_overrun); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ack_cnt:", - le32_to_cpu(cck->sent_ack_cnt), accum_cck->sent_ack_cnt, - delta_cck->sent_ack_cnt, max_cck->sent_ack_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_cts_cnt:", - le32_to_cpu(cck->sent_cts_cnt), accum_cck->sent_cts_cnt, - delta_cck->sent_cts_cnt, max_cck->sent_cts_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ba_rsp_cnt:", - le32_to_cpu(cck->sent_ba_rsp_cnt), - accum_cck->sent_ba_rsp_cnt, delta_cck->sent_ba_rsp_cnt, - max_cck->sent_ba_rsp_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_self_kill:", - le32_to_cpu(cck->dsp_self_kill), accum_cck->dsp_self_kill, - delta_cck->dsp_self_kill, max_cck->dsp_self_kill); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:", - le32_to_cpu(cck->mh_format_err), accum_cck->mh_format_err, - delta_cck->mh_format_err, max_cck->mh_format_err); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, - "re_acq_main_rssi_sum:", - le32_to_cpu(cck->re_acq_main_rssi_sum), - accum_cck->re_acq_main_rssi_sum, - delta_cck->re_acq_main_rssi_sum, - max_cck->re_acq_main_rssi_sum); - - pos += - scnprintf(buf + pos, bufsz - pos, fmt_header, - "Statistics_Rx - GENERAL:"); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "bogus_cts:", - le32_to_cpu(general->bogus_cts), accum_general->bogus_cts, - delta_general->bogus_cts, max_general->bogus_cts); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "bogus_ack:", - le32_to_cpu(general->bogus_ack), accum_general->bogus_ack, - delta_general->bogus_ack, max_general->bogus_ack); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "non_bssid_frames:", - le32_to_cpu(general->non_bssid_frames), - accum_general->non_bssid_frames, - delta_general->non_bssid_frames, - max_general->non_bssid_frames); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "filtered_frames:", - le32_to_cpu(general->filtered_frames), - accum_general->filtered_frames, - delta_general->filtered_frames, - max_general->filtered_frames); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "non_channel_beacons:", - le32_to_cpu(general->non_channel_beacons), - accum_general->non_channel_beacons, - delta_general->non_channel_beacons, - max_general->non_channel_beacons); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "channel_beacons:", - le32_to_cpu(general->channel_beacons), - accum_general->channel_beacons, - delta_general->channel_beacons, - max_general->channel_beacons); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "num_missed_bcon:", - le32_to_cpu(general->num_missed_bcon), - accum_general->num_missed_bcon, - delta_general->num_missed_bcon, - max_general->num_missed_bcon); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, - "adc_rx_saturation_time:", - le32_to_cpu(general->adc_rx_saturation_time), - accum_general->adc_rx_saturation_time, - delta_general->adc_rx_saturation_time, - max_general->adc_rx_saturation_time); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, - "ina_detect_search_tm:", - le32_to_cpu(general->ina_detection_search_time), - accum_general->ina_detection_search_time, - delta_general->ina_detection_search_time, - max_general->ina_detection_search_time); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, - "beacon_silence_rssi_a:", - le32_to_cpu(general->beacon_silence_rssi_a), - accum_general->beacon_silence_rssi_a, - delta_general->beacon_silence_rssi_a, - max_general->beacon_silence_rssi_a); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, - "beacon_silence_rssi_b:", - le32_to_cpu(general->beacon_silence_rssi_b), - accum_general->beacon_silence_rssi_b, - delta_general->beacon_silence_rssi_b, - max_general->beacon_silence_rssi_b); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, - "beacon_silence_rssi_c:", - le32_to_cpu(general->beacon_silence_rssi_c), - accum_general->beacon_silence_rssi_c, - delta_general->beacon_silence_rssi_c, - max_general->beacon_silence_rssi_c); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, - "interference_data_flag:", - le32_to_cpu(general->interference_data_flag), - accum_general->interference_data_flag, - delta_general->interference_data_flag, - max_general->interference_data_flag); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "channel_load:", - le32_to_cpu(general->channel_load), - accum_general->channel_load, delta_general->channel_load, - max_general->channel_load); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_false_alarms:", - le32_to_cpu(general->dsp_false_alarms), - accum_general->dsp_false_alarms, - delta_general->dsp_false_alarms, - max_general->dsp_false_alarms); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_a:", - le32_to_cpu(general->beacon_rssi_a), - accum_general->beacon_rssi_a, - delta_general->beacon_rssi_a, max_general->beacon_rssi_a); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_b:", - le32_to_cpu(general->beacon_rssi_b), - accum_general->beacon_rssi_b, - delta_general->beacon_rssi_b, max_general->beacon_rssi_b); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_c:", - le32_to_cpu(general->beacon_rssi_c), - accum_general->beacon_rssi_c, - delta_general->beacon_rssi_c, max_general->beacon_rssi_c); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_a:", - le32_to_cpu(general->beacon_energy_a), - accum_general->beacon_energy_a, - delta_general->beacon_energy_a, - max_general->beacon_energy_a); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_b:", - le32_to_cpu(general->beacon_energy_b), - accum_general->beacon_energy_b, - delta_general->beacon_energy_b, - max_general->beacon_energy_b); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_c:", - le32_to_cpu(general->beacon_energy_c), - accum_general->beacon_energy_c, - delta_general->beacon_energy_c, - max_general->beacon_energy_c); - - pos += - scnprintf(buf + pos, bufsz - pos, fmt_header, - "Statistics_Rx - OFDM_HT:"); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:", - le32_to_cpu(ht->plcp_err), accum_ht->plcp_err, - delta_ht->plcp_err, max_ht->plcp_err); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:", - le32_to_cpu(ht->overrun_err), accum_ht->overrun_err, - delta_ht->overrun_err, max_ht->overrun_err); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:", - le32_to_cpu(ht->early_overrun_err), - accum_ht->early_overrun_err, delta_ht->early_overrun_err, - max_ht->early_overrun_err); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:", - le32_to_cpu(ht->crc32_good), accum_ht->crc32_good, - delta_ht->crc32_good, max_ht->crc32_good); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:", - le32_to_cpu(ht->crc32_err), accum_ht->crc32_err, - delta_ht->crc32_err, max_ht->crc32_err); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:", - le32_to_cpu(ht->mh_format_err), accum_ht->mh_format_err, - delta_ht->mh_format_err, max_ht->mh_format_err); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_crc32_good:", - le32_to_cpu(ht->agg_crc32_good), accum_ht->agg_crc32_good, - delta_ht->agg_crc32_good, max_ht->agg_crc32_good); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_mpdu_cnt:", - le32_to_cpu(ht->agg_mpdu_cnt), accum_ht->agg_mpdu_cnt, - delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_cnt:", - le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt, - delta_ht->agg_cnt, max_ht->agg_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "unsupport_mcs:", - le32_to_cpu(ht->unsupport_mcs), accum_ht->unsupport_mcs, - delta_ht->unsupport_mcs, max_ht->unsupport_mcs); - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -ssize_t -il4965_ucode_tx_stats_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct il_priv *il = file->private_data; - int pos = 0; - char *buf; - int bufsz = (sizeof(struct stats_tx) * 48) + 250; - ssize_t ret; - struct stats_tx *tx, *accum_tx, *delta_tx, *max_tx; - - if (!il_is_alive(il)) - return -EAGAIN; - - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) { - IL_ERR("Can not allocate Buffer\n"); - return -ENOMEM; - } - - /* the statistic information display here is based on - * the last stats notification from uCode - * might not reflect the current uCode activity - */ - tx = &il->_4965.stats.tx; - accum_tx = &il->_4965.accum_stats.tx; - delta_tx = &il->_4965.delta_stats.tx; - max_tx = &il->_4965.max_delta.tx; - - pos += il4965_stats_flag(il, buf, bufsz); - pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Tx:"); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "preamble:", - le32_to_cpu(tx->preamble_cnt), accum_tx->preamble_cnt, - delta_tx->preamble_cnt, max_tx->preamble_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "rx_detected_cnt:", - le32_to_cpu(tx->rx_detected_cnt), - accum_tx->rx_detected_cnt, delta_tx->rx_detected_cnt, - max_tx->rx_detected_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "bt_prio_defer_cnt:", - le32_to_cpu(tx->bt_prio_defer_cnt), - accum_tx->bt_prio_defer_cnt, delta_tx->bt_prio_defer_cnt, - max_tx->bt_prio_defer_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "bt_prio_kill_cnt:", - le32_to_cpu(tx->bt_prio_kill_cnt), - accum_tx->bt_prio_kill_cnt, delta_tx->bt_prio_kill_cnt, - max_tx->bt_prio_kill_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "few_bytes_cnt:", - le32_to_cpu(tx->few_bytes_cnt), accum_tx->few_bytes_cnt, - delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "cts_timeout:", - le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout, - delta_tx->cts_timeout, max_tx->cts_timeout); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "ack_timeout:", - le32_to_cpu(tx->ack_timeout), accum_tx->ack_timeout, - delta_tx->ack_timeout, max_tx->ack_timeout); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "expected_ack_cnt:", - le32_to_cpu(tx->expected_ack_cnt), - accum_tx->expected_ack_cnt, delta_tx->expected_ack_cnt, - max_tx->expected_ack_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "actual_ack_cnt:", - le32_to_cpu(tx->actual_ack_cnt), accum_tx->actual_ack_cnt, - delta_tx->actual_ack_cnt, max_tx->actual_ack_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "dump_msdu_cnt:", - le32_to_cpu(tx->dump_msdu_cnt), accum_tx->dump_msdu_cnt, - delta_tx->dump_msdu_cnt, max_tx->dump_msdu_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, - "abort_nxt_frame_mismatch:", - le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt), - accum_tx->burst_abort_next_frame_mismatch_cnt, - delta_tx->burst_abort_next_frame_mismatch_cnt, - max_tx->burst_abort_next_frame_mismatch_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, - "abort_missing_nxt_frame:", - le32_to_cpu(tx->burst_abort_missing_next_frame_cnt), - accum_tx->burst_abort_missing_next_frame_cnt, - delta_tx->burst_abort_missing_next_frame_cnt, - max_tx->burst_abort_missing_next_frame_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, - "cts_timeout_collision:", - le32_to_cpu(tx->cts_timeout_collision), - accum_tx->cts_timeout_collision, - delta_tx->cts_timeout_collision, - max_tx->cts_timeout_collision); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, - "ack_ba_timeout_collision:", - le32_to_cpu(tx->ack_or_ba_timeout_collision), - accum_tx->ack_or_ba_timeout_collision, - delta_tx->ack_or_ba_timeout_collision, - max_tx->ack_or_ba_timeout_collision); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "agg ba_timeout:", - le32_to_cpu(tx->agg.ba_timeout), accum_tx->agg.ba_timeout, - delta_tx->agg.ba_timeout, max_tx->agg.ba_timeout); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, - "agg ba_resched_frames:", - le32_to_cpu(tx->agg.ba_reschedule_frames), - accum_tx->agg.ba_reschedule_frames, - delta_tx->agg.ba_reschedule_frames, - max_tx->agg.ba_reschedule_frames); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, - "agg scd_query_agg_frame:", - le32_to_cpu(tx->agg.scd_query_agg_frame_cnt), - accum_tx->agg.scd_query_agg_frame_cnt, - delta_tx->agg.scd_query_agg_frame_cnt, - max_tx->agg.scd_query_agg_frame_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, - "agg scd_query_no_agg:", - le32_to_cpu(tx->agg.scd_query_no_agg), - accum_tx->agg.scd_query_no_agg, - delta_tx->agg.scd_query_no_agg, - max_tx->agg.scd_query_no_agg); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "agg scd_query_agg:", - le32_to_cpu(tx->agg.scd_query_agg), - accum_tx->agg.scd_query_agg, delta_tx->agg.scd_query_agg, - max_tx->agg.scd_query_agg); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, - "agg scd_query_mismatch:", - le32_to_cpu(tx->agg.scd_query_mismatch), - accum_tx->agg.scd_query_mismatch, - delta_tx->agg.scd_query_mismatch, - max_tx->agg.scd_query_mismatch); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "agg frame_not_ready:", - le32_to_cpu(tx->agg.frame_not_ready), - accum_tx->agg.frame_not_ready, - delta_tx->agg.frame_not_ready, - max_tx->agg.frame_not_ready); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "agg underrun:", - le32_to_cpu(tx->agg.underrun), accum_tx->agg.underrun, - delta_tx->agg.underrun, max_tx->agg.underrun); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "agg bt_prio_kill:", - le32_to_cpu(tx->agg.bt_prio_kill), - accum_tx->agg.bt_prio_kill, delta_tx->agg.bt_prio_kill, - max_tx->agg.bt_prio_kill); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "agg rx_ba_rsp_cnt:", - le32_to_cpu(tx->agg.rx_ba_rsp_cnt), - accum_tx->agg.rx_ba_rsp_cnt, delta_tx->agg.rx_ba_rsp_cnt, - max_tx->agg.rx_ba_rsp_cnt); - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -ssize_t -il4965_ucode_general_stats_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct il_priv *il = file->private_data; - int pos = 0; - char *buf; - int bufsz = sizeof(struct stats_general) * 10 + 300; - ssize_t ret; - struct stats_general_common *general, *accum_general; - struct stats_general_common *delta_general, *max_general; - struct stats_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg; - struct stats_div *div, *accum_div, *delta_div, *max_div; - - if (!il_is_alive(il)) - return -EAGAIN; - - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) { - IL_ERR("Can not allocate Buffer\n"); - return -ENOMEM; - } - - /* the statistic information display here is based on - * the last stats notification from uCode - * might not reflect the current uCode activity - */ - general = &il->_4965.stats.general.common; - dbg = &il->_4965.stats.general.common.dbg; - div = &il->_4965.stats.general.common.div; - accum_general = &il->_4965.accum_stats.general.common; - accum_dbg = &il->_4965.accum_stats.general.common.dbg; - accum_div = &il->_4965.accum_stats.general.common.div; - delta_general = &il->_4965.delta_stats.general.common; - max_general = &il->_4965.max_delta.general.common; - delta_dbg = &il->_4965.delta_stats.general.common.dbg; - max_dbg = &il->_4965.max_delta.general.common.dbg; - delta_div = &il->_4965.delta_stats.general.common.div; - max_div = &il->_4965.max_delta.general.common.div; - - pos += il4965_stats_flag(il, buf, bufsz); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_header, - "Statistics_General:"); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_value, "temperature:", - le32_to_cpu(general->temperature)); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_value, "ttl_timestamp:", - le32_to_cpu(general->ttl_timestamp)); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "burst_check:", - le32_to_cpu(dbg->burst_check), accum_dbg->burst_check, - delta_dbg->burst_check, max_dbg->burst_check); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "burst_count:", - le32_to_cpu(dbg->burst_count), accum_dbg->burst_count, - delta_dbg->burst_count, max_dbg->burst_count); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, - "wait_for_silence_timeout_count:", - le32_to_cpu(dbg->wait_for_silence_timeout_cnt), - accum_dbg->wait_for_silence_timeout_cnt, - delta_dbg->wait_for_silence_timeout_cnt, - max_dbg->wait_for_silence_timeout_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "sleep_time:", - le32_to_cpu(general->sleep_time), - accum_general->sleep_time, delta_general->sleep_time, - max_general->sleep_time); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "slots_out:", - le32_to_cpu(general->slots_out), accum_general->slots_out, - delta_general->slots_out, max_general->slots_out); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "slots_idle:", - le32_to_cpu(general->slots_idle), - accum_general->slots_idle, delta_general->slots_idle, - max_general->slots_idle); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "tx_on_a:", - le32_to_cpu(div->tx_on_a), accum_div->tx_on_a, - delta_div->tx_on_a, max_div->tx_on_a); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "tx_on_b:", - le32_to_cpu(div->tx_on_b), accum_div->tx_on_b, - delta_div->tx_on_b, max_div->tx_on_b); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "exec_time:", - le32_to_cpu(div->exec_time), accum_div->exec_time, - delta_div->exec_time, max_div->exec_time); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "probe_time:", - le32_to_cpu(div->probe_time), accum_div->probe_time, - delta_div->probe_time, max_div->probe_time); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "rx_enable_counter:", - le32_to_cpu(general->rx_enable_counter), - accum_general->rx_enable_counter, - delta_general->rx_enable_counter, - max_general->rx_enable_counter); - pos += - scnprintf(buf + pos, bufsz - pos, fmt_table, "num_of_sos_states:", - le32_to_cpu(general->num_of_sos_states), - accum_general->num_of_sos_states, - delta_general->num_of_sos_states, - max_general->num_of_sos_states); - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} diff --git a/trunk/drivers/net/wireless/iwlegacy/4965-mac.c b/trunk/drivers/net/wireless/iwlegacy/4965-mac.c deleted file mode 100644 index 1667232af647..000000000000 --- a/trunk/drivers/net/wireless/iwlegacy/4965-mac.c +++ /dev/null @@ -1,6515 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. - * - * Portions of this file are derived from the ipw3945 project, as well - * as portions of the ieee80211 subsystem header files. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include - -#define DRV_NAME "iwl4965" - -#include "common.h" -#include "4965.h" - -/****************************************************************************** - * - * module boiler plate - * - ******************************************************************************/ - -/* - * module name, copyright, version, etc. - */ -#define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux" - -#ifdef CONFIG_IWLEGACY_DEBUG -#define VD "d" -#else -#define VD -#endif - -#define DRV_VERSION IWLWIFI_VERSION VD - -MODULE_DESCRIPTION(DRV_DESCRIPTION); -MODULE_VERSION(DRV_VERSION); -MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("iwl4965"); - -void -il4965_check_abort_status(struct il_priv *il, u8 frame_count, u32 status) -{ - if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) { - IL_ERR("Tx flush command to flush out all frames\n"); - if (!test_bit(S_EXIT_PENDING, &il->status)) - queue_work(il->workqueue, &il->tx_flush); - } -} - -/* - * EEPROM - */ -struct il_mod_params il4965_mod_params = { - .amsdu_size_8K = 1, - .restart_fw = 1, - /* the rest are 0 by default */ -}; - -void -il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq) -{ - unsigned long flags; - int i; - spin_lock_irqsave(&rxq->lock, flags); - INIT_LIST_HEAD(&rxq->rx_free); - INIT_LIST_HEAD(&rxq->rx_used); - /* Fill the rx_used queue with _all_ of the Rx buffers */ - for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { - /* In the reset function, these buffers may have been allocated - * to an SKB, so we need to unmap and free potential storage */ - if (rxq->pool[i].page != NULL) { - pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma, - PAGE_SIZE << il->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); - __il_free_pages(il, rxq->pool[i].page); - rxq->pool[i].page = NULL; - } - list_add_tail(&rxq->pool[i].list, &rxq->rx_used); - } - - for (i = 0; i < RX_QUEUE_SIZE; i++) - rxq->queue[i] = NULL; - - /* Set us so that we have processed and used all buffers, but have - * not restocked the Rx queue with fresh buffers */ - rxq->read = rxq->write = 0; - rxq->write_actual = 0; - rxq->free_count = 0; - spin_unlock_irqrestore(&rxq->lock, flags); -} - -int -il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq) -{ - u32 rb_size; - const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ - u32 rb_timeout = 0; - - if (il->cfg->mod_params->amsdu_size_8K) - rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; - else - rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; - - /* Stop Rx DMA */ - il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0); - - /* Reset driver's Rx queue write idx */ - il_wr(il, FH49_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); - - /* Tell device where to find RBD circular buffer in DRAM */ - il_wr(il, FH49_RSCSR_CHNL0_RBDCB_BASE_REG, (u32) (rxq->bd_dma >> 8)); - - /* Tell device where in DRAM to update its Rx status */ - il_wr(il, FH49_RSCSR_CHNL0_STTS_WPTR_REG, rxq->rb_stts_dma >> 4); - - /* Enable Rx DMA - * Direct rx interrupts to hosts - * Rx buffer size 4 or 8k - * RB timeout 0x10 - * 256 RBDs - */ - il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, - FH49_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | - FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | - FH49_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK | - rb_size | - (rb_timeout << FH49_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) | - (rfdnlog << FH49_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); - - /* Set interrupt coalescing timer to default (2048 usecs) */ - il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_TIMEOUT_DEF); - - return 0; -} - -static void -il4965_set_pwr_vmain(struct il_priv *il) -{ -/* - * (for documentation purposes) - * to set power to V_AUX, do: - - if (pci_pme_capable(il->pci_dev, PCI_D3cold)) - il_set_bits_mask_prph(il, APMG_PS_CTRL_REG, - APMG_PS_CTRL_VAL_PWR_SRC_VAUX, - ~APMG_PS_CTRL_MSK_PWR_SRC); - */ - - il_set_bits_mask_prph(il, APMG_PS_CTRL_REG, - APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, - ~APMG_PS_CTRL_MSK_PWR_SRC); -} - -int -il4965_hw_nic_init(struct il_priv *il) -{ - unsigned long flags; - struct il_rx_queue *rxq = &il->rxq; - int ret; - - /* nic_init */ - spin_lock_irqsave(&il->lock, flags); - il->cfg->ops->lib->apm_ops.init(il); - - /* Set interrupt coalescing calibration timer to default (512 usecs) */ - il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_CALIB_TIMEOUT_DEF); - - spin_unlock_irqrestore(&il->lock, flags); - - il4965_set_pwr_vmain(il); - - il->cfg->ops->lib->apm_ops.config(il); - - /* Allocate the RX queue, or reset if it is already allocated */ - if (!rxq->bd) { - ret = il_rx_queue_alloc(il); - if (ret) { - IL_ERR("Unable to initialize Rx queue\n"); - return -ENOMEM; - } - } else - il4965_rx_queue_reset(il, rxq); - - il4965_rx_replenish(il); - - il4965_rx_init(il, rxq); - - spin_lock_irqsave(&il->lock, flags); - - rxq->need_update = 1; - il_rx_queue_update_write_ptr(il, rxq); - - spin_unlock_irqrestore(&il->lock, flags); - - /* Allocate or reset and init all Tx and Command queues */ - if (!il->txq) { - ret = il4965_txq_ctx_alloc(il); - if (ret) - return ret; - } else - il4965_txq_ctx_reset(il); - - set_bit(S_INIT, &il->status); - - return 0; -} - -/** - * il4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr - */ -static inline __le32 -il4965_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr) -{ - return cpu_to_le32((u32) (dma_addr >> 8)); -} - -/** - * il4965_rx_queue_restock - refill RX queue from pre-allocated pool - * - * If there are slots in the RX queue that need to be restocked, - * and we have free pre-allocated buffers, fill the ranks as much - * as we can, pulling from rx_free. - * - * This moves the 'write' idx forward to catch up with 'processed', and - * also updates the memory address in the firmware to reference the new - * target buffer. - */ -void -il4965_rx_queue_restock(struct il_priv *il) -{ - struct il_rx_queue *rxq = &il->rxq; - struct list_head *element; - struct il_rx_buf *rxb; - unsigned long flags; - - spin_lock_irqsave(&rxq->lock, flags); - while (il_rx_queue_space(rxq) > 0 && rxq->free_count) { - /* The overwritten rxb must be a used one */ - rxb = rxq->queue[rxq->write]; - BUG_ON(rxb && rxb->page); - - /* Get next free Rx buffer, remove from free list */ - element = rxq->rx_free.next; - rxb = list_entry(element, struct il_rx_buf, list); - list_del(element); - - /* Point to Rx buffer via next RBD in circular buffer */ - rxq->bd[rxq->write] = - il4965_dma_addr2rbd_ptr(il, rxb->page_dma); - rxq->queue[rxq->write] = rxb; - rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; - rxq->free_count--; - } - spin_unlock_irqrestore(&rxq->lock, flags); - /* If the pre-allocated buffer pool is dropping low, schedule to - * refill it */ - if (rxq->free_count <= RX_LOW_WATERMARK) - queue_work(il->workqueue, &il->rx_replenish); - - /* If we've added more space for the firmware to place data, tell it. - * Increment device's write pointer in multiples of 8. */ - if (rxq->write_actual != (rxq->write & ~0x7)) { - spin_lock_irqsave(&rxq->lock, flags); - rxq->need_update = 1; - spin_unlock_irqrestore(&rxq->lock, flags); - il_rx_queue_update_write_ptr(il, rxq); - } -} - -/** - * il4965_rx_replenish - Move all used packet from rx_used to rx_free - * - * When moving to rx_free an SKB is allocated for the slot. - * - * Also restock the Rx queue via il_rx_queue_restock. - * This is called as a scheduled work item (except for during initialization) - */ -static void -il4965_rx_allocate(struct il_priv *il, gfp_t priority) -{ - struct il_rx_queue *rxq = &il->rxq; - struct list_head *element; - struct il_rx_buf *rxb; - struct page *page; - unsigned long flags; - gfp_t gfp_mask = priority; - - while (1) { - spin_lock_irqsave(&rxq->lock, flags); - if (list_empty(&rxq->rx_used)) { - spin_unlock_irqrestore(&rxq->lock, flags); - return; - } - spin_unlock_irqrestore(&rxq->lock, flags); - - if (rxq->free_count > RX_LOW_WATERMARK) - gfp_mask |= __GFP_NOWARN; - - if (il->hw_params.rx_page_order > 0) - gfp_mask |= __GFP_COMP; - - /* Alloc a new receive buffer */ - page = alloc_pages(gfp_mask, il->hw_params.rx_page_order); - if (!page) { - if (net_ratelimit()) - D_INFO("alloc_pages failed, " "order: %d\n", - il->hw_params.rx_page_order); - - if (rxq->free_count <= RX_LOW_WATERMARK && - net_ratelimit()) - IL_ERR("Failed to alloc_pages with %s. " - "Only %u free buffers remaining.\n", - priority == - GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL", - rxq->free_count); - /* We don't reschedule replenish work here -- we will - * call the restock method and if it still needs - * more buffers it will schedule replenish */ - return; - } - - spin_lock_irqsave(&rxq->lock, flags); - - if (list_empty(&rxq->rx_used)) { - spin_unlock_irqrestore(&rxq->lock, flags); - __free_pages(page, il->hw_params.rx_page_order); - return; - } - element = rxq->rx_used.next; - rxb = list_entry(element, struct il_rx_buf, list); - list_del(element); - - spin_unlock_irqrestore(&rxq->lock, flags); - - BUG_ON(rxb->page); - rxb->page = page; - /* Get physical address of the RB */ - rxb->page_dma = - pci_map_page(il->pci_dev, page, 0, - PAGE_SIZE << il->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); - /* dma address must be no more than 36 bits */ - BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); - /* and also 256 byte aligned! */ - BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); - - spin_lock_irqsave(&rxq->lock, flags); - - list_add_tail(&rxb->list, &rxq->rx_free); - rxq->free_count++; - il->alloc_rxb_page++; - - spin_unlock_irqrestore(&rxq->lock, flags); - } -} - -void -il4965_rx_replenish(struct il_priv *il) -{ - unsigned long flags; - - il4965_rx_allocate(il, GFP_KERNEL); - - spin_lock_irqsave(&il->lock, flags); - il4965_rx_queue_restock(il); - spin_unlock_irqrestore(&il->lock, flags); -} - -void -il4965_rx_replenish_now(struct il_priv *il) -{ - il4965_rx_allocate(il, GFP_ATOMIC); - - il4965_rx_queue_restock(il); -} - -/* Assumes that the skb field of the buffers in 'pool' is kept accurate. - * If an SKB has been detached, the POOL needs to have its SKB set to NULL - * This free routine walks the list of POOL entries and if SKB is set to - * non NULL it is unmapped and freed - */ -void -il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq) -{ - int i; - for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { - if (rxq->pool[i].page != NULL) { - pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma, - PAGE_SIZE << il->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); - __il_free_pages(il, rxq->pool[i].page); - rxq->pool[i].page = NULL; - } - } - - dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, - rxq->bd_dma); - dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status), - rxq->rb_stts, rxq->rb_stts_dma); - rxq->bd = NULL; - rxq->rb_stts = NULL; -} - -int -il4965_rxq_stop(struct il_priv *il) -{ - - /* stop Rx DMA */ - il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0); - il_poll_bit(il, FH49_MEM_RSSR_RX_STATUS_REG, - FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); - - return 0; -} - -int -il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band) -{ - int idx = 0; - int band_offset = 0; - - /* HT rate format: mac80211 wants an MCS number, which is just LSB */ - if (rate_n_flags & RATE_MCS_HT_MSK) { - idx = (rate_n_flags & 0xff); - return idx; - /* Legacy rate format, search for match in table */ - } else { - if (band == IEEE80211_BAND_5GHZ) - band_offset = IL_FIRST_OFDM_RATE; - for (idx = band_offset; idx < RATE_COUNT_LEGACY; idx++) - if (il_rates[idx].plcp == (rate_n_flags & 0xFF)) - return idx - band_offset; - } - - return -1; -} - -static int -il4965_calc_rssi(struct il_priv *il, struct il_rx_phy_res *rx_resp) -{ - /* data from PHY/DSP regarding signal strength, etc., - * contents are always there, not configurable by host. */ - struct il4965_rx_non_cfg_phy *ncphy = - (struct il4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf; - u32 agc = - (le16_to_cpu(ncphy->agc_info) & IL49_AGC_DB_MASK) >> - IL49_AGC_DB_POS; - - u32 valid_antennae = - (le16_to_cpu(rx_resp->phy_flags) & IL49_RX_PHY_FLAGS_ANTENNAE_MASK) - >> IL49_RX_PHY_FLAGS_ANTENNAE_OFFSET; - u8 max_rssi = 0; - u32 i; - - /* Find max rssi among 3 possible receivers. - * These values are measured by the digital signal processor (DSP). - * They should stay fairly constant even as the signal strength varies, - * if the radio's automatic gain control (AGC) is working right. - * AGC value (see below) will provide the "interesting" info. */ - for (i = 0; i < 3; i++) - if (valid_antennae & (1 << i)) - max_rssi = max(ncphy->rssi_info[i << 1], max_rssi); - - D_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n", - ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4], - max_rssi, agc); - - /* dBm = max_rssi dB - agc dB - constant. - * Higher AGC (higher radio gain) means lower signal. */ - return max_rssi - agc - IL4965_RSSI_OFFSET; -} - -static u32 -il4965_translate_rx_status(struct il_priv *il, u32 decrypt_in) -{ - u32 decrypt_out = 0; - - if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) == - RX_RES_STATUS_STATION_FOUND) - decrypt_out |= - (RX_RES_STATUS_STATION_FOUND | - RX_RES_STATUS_NO_STATION_INFO_MISMATCH); - - decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK); - - /* packet was not encrypted */ - if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == - RX_RES_STATUS_SEC_TYPE_NONE) - return decrypt_out; - - /* packet was encrypted with unknown alg */ - if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == - RX_RES_STATUS_SEC_TYPE_ERR) - return decrypt_out; - - /* decryption was not done in HW */ - if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) != - RX_MPDU_RES_STATUS_DEC_DONE_MSK) - return decrypt_out; - - switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) { - - case RX_RES_STATUS_SEC_TYPE_CCMP: - /* alg is CCM: check MIC only */ - if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK)) - /* Bad MIC */ - decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; - else - decrypt_out |= RX_RES_STATUS_DECRYPT_OK; - - break; - - case RX_RES_STATUS_SEC_TYPE_TKIP: - if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) { - /* Bad TTAK */ - decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK; - break; - } - /* fall through if TTAK OK */ - default: - if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK)) - decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; - else - decrypt_out |= RX_RES_STATUS_DECRYPT_OK; - break; - } - - D_RX("decrypt_in:0x%x decrypt_out = 0x%x\n", decrypt_in, decrypt_out); - - return decrypt_out; -} - -static void -il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr, - u16 len, u32 ampdu_status, struct il_rx_buf *rxb, - struct ieee80211_rx_status *stats) -{ - struct sk_buff *skb; - __le16 fc = hdr->frame_control; - - /* We only process data packets if the interface is open */ - if (unlikely(!il->is_open)) { - D_DROP("Dropping packet while interface is not open.\n"); - return; - } - - /* In case of HW accelerated crypto and bad decryption, drop */ - if (!il->cfg->mod_params->sw_crypto && - il_set_decrypted_flag(il, hdr, ampdu_status, stats)) - return; - - skb = dev_alloc_skb(128); - if (!skb) { - IL_ERR("dev_alloc_skb failed\n"); - return; - } - - skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len); - - il_update_stats(il, false, fc, len); - memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); - - ieee80211_rx(il->hw, skb); - il->alloc_rxb_page--; - rxb->page = NULL; -} - -/* Called for N_RX (legacy ABG frames), or - * N_RX_MPDU (HT high-throughput N frames). */ -void -il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb) -{ - struct ieee80211_hdr *header; - struct ieee80211_rx_status rx_status; - struct il_rx_pkt *pkt = rxb_addr(rxb); - struct il_rx_phy_res *phy_res; - __le32 rx_pkt_status; - struct il_rx_mpdu_res_start *amsdu; - u32 len; - u32 ampdu_status; - u32 rate_n_flags; - - /** - * N_RX and N_RX_MPDU are handled differently. - * N_RX: physical layer info is in this buffer - * N_RX_MPDU: physical layer info was sent in separate - * command and cached in il->last_phy_res - * - * Here we set up local variables depending on which command is - * received. - */ - if (pkt->hdr.cmd == N_RX) { - phy_res = (struct il_rx_phy_res *)pkt->u.raw; - header = - (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res) + - phy_res->cfg_phy_cnt); - - len = le16_to_cpu(phy_res->byte_count); - rx_pkt_status = - *(__le32 *) (pkt->u.raw + sizeof(*phy_res) + - phy_res->cfg_phy_cnt + len); - ampdu_status = le32_to_cpu(rx_pkt_status); - } else { - if (!il->_4965.last_phy_res_valid) { - IL_ERR("MPDU frame without cached PHY data\n"); - return; - } - phy_res = &il->_4965.last_phy_res; - amsdu = (struct il_rx_mpdu_res_start *)pkt->u.raw; - header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu)); - len = le16_to_cpu(amsdu->byte_count); - rx_pkt_status = *(__le32 *) (pkt->u.raw + sizeof(*amsdu) + len); - ampdu_status = - il4965_translate_rx_status(il, le32_to_cpu(rx_pkt_status)); - } - - if ((unlikely(phy_res->cfg_phy_cnt > 20))) { - D_DROP("dsp size out of range [0,20]: %d/n", - phy_res->cfg_phy_cnt); - return; - } - - if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) || - !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) { - D_RX("Bad CRC or FIFO: 0x%08X.\n", le32_to_cpu(rx_pkt_status)); - return; - } - - /* This will be used in several places later */ - rate_n_flags = le32_to_cpu(phy_res->rate_n_flags); - - /* rx_status carries information about the packet to mac80211 */ - rx_status.mactime = le64_to_cpu(phy_res->timestamp); - rx_status.band = - (phy_res-> - phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ : - IEEE80211_BAND_5GHZ; - rx_status.freq = - ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel), - rx_status.band); - rx_status.rate_idx = - il4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band); - rx_status.flag = 0; - - /* TSF isn't reliable. In order to allow smooth user experience, - * this W/A doesn't propagate it to the mac80211 */ - /*rx_status.flag |= RX_FLAG_MACTIME_MPDU; */ - - il->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp); - - /* Find max signal strength (dBm) among 3 antenna/receiver chains */ - rx_status.signal = il4965_calc_rssi(il, phy_res); - - il_dbg_log_rx_data_frame(il, len, header); - D_STATS("Rssi %d, TSF %llu\n", rx_status.signal, - (unsigned long long)rx_status.mactime); - - /* - * "antenna number" - * - * It seems that the antenna field in the phy flags value - * is actually a bit field. This is undefined by radiotap, - * it wants an actual antenna number but I always get "7" - * for most legacy frames I receive indicating that the - * same frame was received on all three RX chains. - * - * I think this field should be removed in favor of a - * new 802.11n radiotap field "RX chains" that is defined - * as a bitmask. - */ - rx_status.antenna = - (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >> - RX_RES_PHY_FLAGS_ANTENNA_POS; - - /* set the preamble flag if appropriate */ - if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) - rx_status.flag |= RX_FLAG_SHORTPRE; - - /* Set up the HT phy flags */ - if (rate_n_flags & RATE_MCS_HT_MSK) - rx_status.flag |= RX_FLAG_HT; - if (rate_n_flags & RATE_MCS_HT40_MSK) - rx_status.flag |= RX_FLAG_40MHZ; - if (rate_n_flags & RATE_MCS_SGI_MSK) - rx_status.flag |= RX_FLAG_SHORT_GI; - - il4965_pass_packet_to_mac80211(il, header, len, ampdu_status, rxb, - &rx_status); -} - -/* Cache phy data (Rx signal strength, etc) for HT frame (N_RX_PHY). - * This will be used later in il_hdl_rx() for N_RX_MPDU. */ -void -il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb) -{ - struct il_rx_pkt *pkt = rxb_addr(rxb); - il->_4965.last_phy_res_valid = true; - memcpy(&il->_4965.last_phy_res, pkt->u.raw, - sizeof(struct il_rx_phy_res)); -} - -static int -il4965_get_channels_for_scan(struct il_priv *il, struct ieee80211_vif *vif, - enum ieee80211_band band, u8 is_active, - u8 n_probes, struct il_scan_channel *scan_ch) -{ - struct ieee80211_channel *chan; - const struct ieee80211_supported_band *sband; - const struct il_channel_info *ch_info; - u16 passive_dwell = 0; - u16 active_dwell = 0; - int added, i; - u16 channel; - - sband = il_get_hw_mode(il, band); - if (!sband) - return 0; - - active_dwell = il_get_active_dwell_time(il, band, n_probes); - passive_dwell = il_get_passive_dwell_time(il, band, vif); - - if (passive_dwell <= active_dwell) - passive_dwell = active_dwell + 1; - - for (i = 0, added = 0; i < il->scan_request->n_channels; i++) { - chan = il->scan_request->channels[i]; - - if (chan->band != band) - continue; - - channel = chan->hw_value; - scan_ch->channel = cpu_to_le16(channel); - - ch_info = il_get_channel_info(il, band, channel); - if (!il_is_channel_valid(ch_info)) { - D_SCAN("Channel %d is INVALID for this band.\n", - channel); - continue; - } - - if (!is_active || il_is_channel_passive(ch_info) || - (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) - scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE; - else - scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE; - - if (n_probes) - scan_ch->type |= IL_SCAN_PROBE_MASK(n_probes); - - scan_ch->active_dwell = cpu_to_le16(active_dwell); - scan_ch->passive_dwell = cpu_to_le16(passive_dwell); - - /* Set txpower levels to defaults */ - scan_ch->dsp_atten = 110; - - /* NOTE: if we were doing 6Mb OFDM for scans we'd use - * power level: - * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3; - */ - if (band == IEEE80211_BAND_5GHZ) - scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3; - else - scan_ch->tx_gain = ((1 << 5) | (5 << 3)); - - D_SCAN("Scanning ch=%d prob=0x%X [%s %d]\n", channel, - le32_to_cpu(scan_ch->type), - (scan_ch-> - type & SCAN_CHANNEL_TYPE_ACTIVE) ? "ACTIVE" : "PASSIVE", - (scan_ch-> - type & SCAN_CHANNEL_TYPE_ACTIVE) ? active_dwell : - passive_dwell); - - scan_ch++; - added++; - } - - D_SCAN("total channels to scan %d\n", added); - return added; -} - -static void -il4965_toggle_tx_ant(struct il_priv *il, u8 *ant, u8 valid) -{ - int i; - u8 ind = *ant; - - for (i = 0; i < RATE_ANT_NUM - 1; i++) { - ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0; - if (valid & BIT(ind)) { - *ant = ind; - return; - } - } -} - -int -il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif) -{ - struct il_host_cmd cmd = { - .id = C_SCAN, - .len = sizeof(struct il_scan_cmd), - .flags = CMD_SIZE_HUGE, - }; - struct il_scan_cmd *scan; - struct il_rxon_context *ctx = &il->ctx; - u32 rate_flags = 0; - u16 cmd_len; - u16 rx_chain = 0; - enum ieee80211_band band; - u8 n_probes = 0; - u8 rx_ant = il->hw_params.valid_rx_ant; - u8 rate; - bool is_active = false; - int chan_mod; - u8 active_chains; - u8 scan_tx_antennas = il->hw_params.valid_tx_ant; - int ret; - - lockdep_assert_held(&il->mutex); - - ctx = il_rxon_ctx_from_vif(vif); - - if (!il->scan_cmd) { - il->scan_cmd = - kmalloc(sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE, - GFP_KERNEL); - if (!il->scan_cmd) { - D_SCAN("fail to allocate memory for scan\n"); - return -ENOMEM; - } - } - scan = il->scan_cmd; - memset(scan, 0, sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE); - - scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH; - scan->quiet_time = IL_ACTIVE_QUIET_TIME; - - if (il_is_any_associated(il)) { - u16 interval; - u32 extra; - u32 suspend_time = 100; - u32 scan_suspend_time = 100; - - D_INFO("Scanning while associated...\n"); - interval = vif->bss_conf.beacon_int; - - scan->suspend_time = 0; - scan->max_out_time = cpu_to_le32(200 * 1024); - if (!interval) - interval = suspend_time; - - extra = (suspend_time / interval) << 22; - scan_suspend_time = - (extra | ((suspend_time % interval) * 1024)); - scan->suspend_time = cpu_to_le32(scan_suspend_time); - D_SCAN("suspend_time 0x%X beacon interval %d\n", - scan_suspend_time, interval); - } - - if (il->scan_request->n_ssids) { - int i, p = 0; - D_SCAN("Kicking off active scan\n"); - for (i = 0; i < il->scan_request->n_ssids; i++) { - /* always does wildcard anyway */ - if (!il->scan_request->ssids[i].ssid_len) - continue; - scan->direct_scan[p].id = WLAN_EID_SSID; - scan->direct_scan[p].len = - il->scan_request->ssids[i].ssid_len; - memcpy(scan->direct_scan[p].ssid, - il->scan_request->ssids[i].ssid, - il->scan_request->ssids[i].ssid_len); - n_probes++; - p++; - } - is_active = true; - } else - D_SCAN("Start passive scan.\n"); - - scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; - scan->tx_cmd.sta_id = ctx->bcast_sta_id; - scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; - - switch (il->scan_band) { - case IEEE80211_BAND_2GHZ: - scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; - chan_mod = - le32_to_cpu(il->ctx.active. - flags & RXON_FLG_CHANNEL_MODE_MSK) >> - RXON_FLG_CHANNEL_MODE_POS; - if (chan_mod == CHANNEL_MODE_PURE_40) { - rate = RATE_6M_PLCP; - } else { - rate = RATE_1M_PLCP; - rate_flags = RATE_MCS_CCK_MSK; - } - break; - case IEEE80211_BAND_5GHZ: - rate = RATE_6M_PLCP; - break; - default: - IL_WARN("Invalid scan band\n"); - return -EIO; - } - - /* - * If active scanning is requested but a certain channel is - * marked passive, we can do active scanning if we detect - * transmissions. - * - * There is an issue with some firmware versions that triggers - * a sysassert on a "good CRC threshold" of zero (== disabled), - * on a radar channel even though this means that we should NOT - * send probes. - * - * The "good CRC threshold" is the number of frames that we - * need to receive during our dwell time on a channel before - * sending out probes -- setting this to a huge value will - * mean we never reach it, but at the same time work around - * the aforementioned issue. Thus use IL_GOOD_CRC_TH_NEVER - * here instead of IL_GOOD_CRC_TH_DISABLED. - */ - scan->good_CRC_th = - is_active ? IL_GOOD_CRC_TH_DEFAULT : IL_GOOD_CRC_TH_NEVER; - - band = il->scan_band; - - if (il->cfg->scan_rx_antennas[band]) - rx_ant = il->cfg->scan_rx_antennas[band]; - - il4965_toggle_tx_ant(il, &il->scan_tx_ant[band], scan_tx_antennas); - rate_flags |= BIT(il->scan_tx_ant[band]) << RATE_MCS_ANT_POS; - scan->tx_cmd.rate_n_flags = cpu_to_le32(rate | rate_flags); - - /* In power save mode use one chain, otherwise use all chains */ - if (test_bit(S_POWER_PMI, &il->status)) { - /* rx_ant has been set to all valid chains previously */ - active_chains = - rx_ant & ((u8) (il->chain_noise_data.active_chains)); - if (!active_chains) - active_chains = rx_ant; - - D_SCAN("chain_noise_data.active_chains: %u\n", - il->chain_noise_data.active_chains); - - rx_ant = il4965_first_antenna(active_chains); - } - - /* MIMO is not used here, but value is required */ - rx_chain |= il->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS; - rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS; - rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS; - rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS; - scan->rx_chain = cpu_to_le16(rx_chain); - - cmd_len = - il_fill_probe_req(il, (struct ieee80211_mgmt *)scan->data, - vif->addr, il->scan_request->ie, - il->scan_request->ie_len, - IL_MAX_SCAN_SIZE - sizeof(*scan)); - scan->tx_cmd.len = cpu_to_le16(cmd_len); - - scan->filter_flags |= - (RXON_FILTER_ACCEPT_GRP_MSK | RXON_FILTER_BCON_AWARE_MSK); - - scan->channel_count = - il4965_get_channels_for_scan(il, vif, band, is_active, n_probes, - (void *)&scan->data[cmd_len]); - if (scan->channel_count == 0) { - D_SCAN("channel count %d\n", scan->channel_count); - return -EIO; - } - - cmd.len += - le16_to_cpu(scan->tx_cmd.len) + - scan->channel_count * sizeof(struct il_scan_channel); - cmd.data = scan; - scan->len = cpu_to_le16(cmd.len); - - set_bit(S_SCAN_HW, &il->status); - - ret = il_send_cmd_sync(il, &cmd); - if (ret) - clear_bit(S_SCAN_HW, &il->status); - - return ret; -} - -int -il4965_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif, - bool add) -{ - struct il_vif_priv *vif_priv = (void *)vif->drv_priv; - - if (add) - return il4965_add_bssid_station(il, vif_priv->ctx, - vif->bss_conf.bssid, - &vif_priv->ibss_bssid_sta_id); - return il_remove_station(il, vif_priv->ibss_bssid_sta_id, - vif->bss_conf.bssid); -} - -void -il4965_free_tfds_in_queue(struct il_priv *il, int sta_id, int tid, int freed) -{ - lockdep_assert_held(&il->sta_lock); - - if (il->stations[sta_id].tid[tid].tfds_in_queue >= freed) - il->stations[sta_id].tid[tid].tfds_in_queue -= freed; - else { - D_TX("free more than tfds_in_queue (%u:%d)\n", - il->stations[sta_id].tid[tid].tfds_in_queue, freed); - il->stations[sta_id].tid[tid].tfds_in_queue = 0; - } -} - -#define IL_TX_QUEUE_MSK 0xfffff - -static bool -il4965_is_single_rx_stream(struct il_priv *il) -{ - return il->current_ht_config.smps == IEEE80211_SMPS_STATIC || - il->current_ht_config.single_chain_sufficient; -} - -#define IL_NUM_RX_CHAINS_MULTIPLE 3 -#define IL_NUM_RX_CHAINS_SINGLE 2 -#define IL_NUM_IDLE_CHAINS_DUAL 2 -#define IL_NUM_IDLE_CHAINS_SINGLE 1 - -/* - * Determine how many receiver/antenna chains to use. - * - * More provides better reception via diversity. Fewer saves power - * at the expense of throughput, but only when not in powersave to - * start with. - * - * MIMO (dual stream) requires at least 2, but works better with 3. - * This does not determine *which* chains to use, just how many. - */ -static int -il4965_get_active_rx_chain_count(struct il_priv *il) -{ - /* # of Rx chains to use when expecting MIMO. */ - if (il4965_is_single_rx_stream(il)) - return IL_NUM_RX_CHAINS_SINGLE; - else - return IL_NUM_RX_CHAINS_MULTIPLE; -} - -/* - * When we are in power saving mode, unless device support spatial - * multiplexing power save, use the active count for rx chain count. - */ -static int -il4965_get_idle_rx_chain_count(struct il_priv *il, int active_cnt) -{ - /* # Rx chains when idling, depending on SMPS mode */ - switch (il->current_ht_config.smps) { - case IEEE80211_SMPS_STATIC: - case IEEE80211_SMPS_DYNAMIC: - return IL_NUM_IDLE_CHAINS_SINGLE; - case IEEE80211_SMPS_OFF: - return active_cnt; - default: - WARN(1, "invalid SMPS mode %d", il->current_ht_config.smps); - return active_cnt; - } -} - -/* up to 4 chains */ -static u8 -il4965_count_chain_bitmap(u32 chain_bitmap) -{ - u8 res; - res = (chain_bitmap & BIT(0)) >> 0; - res += (chain_bitmap & BIT(1)) >> 1; - res += (chain_bitmap & BIT(2)) >> 2; - res += (chain_bitmap & BIT(3)) >> 3; - return res; -} - -/** - * il4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image - * - * Selects how many and which Rx receivers/antennas/chains to use. - * This should not be used for scan command ... it puts data in wrong place. - */ -void -il4965_set_rxon_chain(struct il_priv *il, struct il_rxon_context *ctx) -{ - bool is_single = il4965_is_single_rx_stream(il); - bool is_cam = !test_bit(S_POWER_PMI, &il->status); - u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt; - u32 active_chains; - u16 rx_chain; - - /* Tell uCode which antennas are actually connected. - * Before first association, we assume all antennas are connected. - * Just after first association, il4965_chain_noise_calibration() - * checks which antennas actually *are* connected. */ - if (il->chain_noise_data.active_chains) - active_chains = il->chain_noise_data.active_chains; - else - active_chains = il->hw_params.valid_rx_ant; - - rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS; - - /* How many receivers should we use? */ - active_rx_cnt = il4965_get_active_rx_chain_count(il); - idle_rx_cnt = il4965_get_idle_rx_chain_count(il, active_rx_cnt); - - /* correct rx chain count according hw settings - * and chain noise calibration - */ - valid_rx_cnt = il4965_count_chain_bitmap(active_chains); - if (valid_rx_cnt < active_rx_cnt) - active_rx_cnt = valid_rx_cnt; - - if (valid_rx_cnt < idle_rx_cnt) - idle_rx_cnt = valid_rx_cnt; - - rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS; - rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS; - - ctx->staging.rx_chain = cpu_to_le16(rx_chain); - - if (!is_single && active_rx_cnt >= IL_NUM_RX_CHAINS_SINGLE && is_cam) - ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK; - else - ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK; - - D_ASSOC("rx_chain=0x%X active=%d idle=%d\n", ctx->staging.rx_chain, - active_rx_cnt, idle_rx_cnt); - - WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 || - active_rx_cnt < idle_rx_cnt); -} - -static const char * -il4965_get_fh_string(int cmd) -{ - switch (cmd) { - IL_CMD(FH49_RSCSR_CHNL0_STTS_WPTR_REG); - IL_CMD(FH49_RSCSR_CHNL0_RBDCB_BASE_REG); - IL_CMD(FH49_RSCSR_CHNL0_WPTR); - IL_CMD(FH49_MEM_RCSR_CHNL0_CONFIG_REG); - IL_CMD(FH49_MEM_RSSR_SHARED_CTRL_REG); - IL_CMD(FH49_MEM_RSSR_RX_STATUS_REG); - IL_CMD(FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV); - IL_CMD(FH49_TSSR_TX_STATUS_REG); - IL_CMD(FH49_TSSR_TX_ERROR_REG); - default: - return "UNKNOWN"; - } -} - -int -il4965_dump_fh(struct il_priv *il, char **buf, bool display) -{ - int i; -#ifdef CONFIG_IWLEGACY_DEBUG - int pos = 0; - size_t bufsz = 0; -#endif - static const u32 fh_tbl[] = { - FH49_RSCSR_CHNL0_STTS_WPTR_REG, - FH49_RSCSR_CHNL0_RBDCB_BASE_REG, - FH49_RSCSR_CHNL0_WPTR, - FH49_MEM_RCSR_CHNL0_CONFIG_REG, - FH49_MEM_RSSR_SHARED_CTRL_REG, - FH49_MEM_RSSR_RX_STATUS_REG, - FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV, - FH49_TSSR_TX_STATUS_REG, - FH49_TSSR_TX_ERROR_REG - }; -#ifdef CONFIG_IWLEGACY_DEBUG - if (display) { - bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40; - *buf = kmalloc(bufsz, GFP_KERNEL); - if (!*buf) - return -ENOMEM; - pos += - scnprintf(*buf + pos, bufsz - pos, "FH register values:\n"); - for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { - pos += - scnprintf(*buf + pos, bufsz - pos, - " %34s: 0X%08x\n", - il4965_get_fh_string(fh_tbl[i]), - il_rd(il, fh_tbl[i])); - } - return pos; - } -#endif - IL_ERR("FH register values:\n"); - for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { - IL_ERR(" %34s: 0X%08x\n", il4965_get_fh_string(fh_tbl[i]), - il_rd(il, fh_tbl[i])); - } - return 0; -} - -void -il4965_hdl_missed_beacon(struct il_priv *il, struct il_rx_buf *rxb) -{ - struct il_rx_pkt *pkt = rxb_addr(rxb); - struct il_missed_beacon_notif *missed_beacon; - - missed_beacon = &pkt->u.missed_beacon; - if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) > - il->missed_beacon_threshold) { - D_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n", - le32_to_cpu(missed_beacon->consecutive_missed_beacons), - le32_to_cpu(missed_beacon->total_missed_becons), - le32_to_cpu(missed_beacon->num_recvd_beacons), - le32_to_cpu(missed_beacon->num_expected_beacons)); - if (!test_bit(S_SCANNING, &il->status)) - il4965_init_sensitivity(il); - } -} - -/* Calculate noise level, based on measurements during network silence just - * before arriving beacon. This measurement can be done only if we know - * exactly when to expect beacons, therefore only when we're associated. */ -static void -il4965_rx_calc_noise(struct il_priv *il) -{ - struct stats_rx_non_phy *rx_info; - int num_active_rx = 0; - int total_silence = 0; - int bcn_silence_a, bcn_silence_b, bcn_silence_c; - int last_rx_noise; - - rx_info = &(il->_4965.stats.rx.general); - bcn_silence_a = - le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER; - bcn_silence_b = - le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER; - bcn_silence_c = - le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER; - - if (bcn_silence_a) { - total_silence += bcn_silence_a; - num_active_rx++; - } - if (bcn_silence_b) { - total_silence += bcn_silence_b; - num_active_rx++; - } - if (bcn_silence_c) { - total_silence += bcn_silence_c; - num_active_rx++; - } - - /* Average among active antennas */ - if (num_active_rx) - last_rx_noise = (total_silence / num_active_rx) - 107; - else - last_rx_noise = IL_NOISE_MEAS_NOT_AVAILABLE; - - D_CALIB("inband silence a %u, b %u, c %u, dBm %d\n", bcn_silence_a, - bcn_silence_b, bcn_silence_c, last_rx_noise); -} - -#ifdef CONFIG_IWLEGACY_DEBUGFS -/* - * based on the assumption of all stats counter are in DWORD - * FIXME: This function is for debugging, do not deal with - * the case of counters roll-over. - */ -static void -il4965_accumulative_stats(struct il_priv *il, __le32 * stats) -{ - int i, size; - __le32 *prev_stats; - u32 *accum_stats; - u32 *delta, *max_delta; - struct stats_general_common *general, *accum_general; - struct stats_tx *tx, *accum_tx; - - prev_stats = (__le32 *) &il->_4965.stats; - accum_stats = (u32 *) &il->_4965.accum_stats; - size = sizeof(struct il_notif_stats); - general = &il->_4965.stats.general.common; - accum_general = &il->_4965.accum_stats.general.common; - tx = &il->_4965.stats.tx; - accum_tx = &il->_4965.accum_stats.tx; - delta = (u32 *) &il->_4965.delta_stats; - max_delta = (u32 *) &il->_4965.max_delta; - - for (i = sizeof(__le32); i < size; - i += - sizeof(__le32), stats++, prev_stats++, delta++, max_delta++, - accum_stats++) { - if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) { - *delta = - (le32_to_cpu(*stats) - le32_to_cpu(*prev_stats)); - *accum_stats += *delta; - if (*delta > *max_delta) - *max_delta = *delta; - } - } - - /* reset accumulative stats for "no-counter" type stats */ - accum_general->temperature = general->temperature; - accum_general->ttl_timestamp = general->ttl_timestamp; -} -#endif - -#define REG_RECALIB_PERIOD (60) - -void -il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb) -{ - int change; - struct il_rx_pkt *pkt = rxb_addr(rxb); - - D_RX("Statistics notification received (%d vs %d).\n", - (int)sizeof(struct il_notif_stats), - le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK); - - change = - ((il->_4965.stats.general.common.temperature != - pkt->u.stats.general.common.temperature) || - ((il->_4965.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK) != - (pkt->u.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK))); -#ifdef CONFIG_IWLEGACY_DEBUGFS - il4965_accumulative_stats(il, (__le32 *) &pkt->u.stats); -#endif - - /* TODO: reading some of stats is unneeded */ - memcpy(&il->_4965.stats, &pkt->u.stats, sizeof(il->_4965.stats)); - - set_bit(S_STATS, &il->status); - - /* Reschedule the stats timer to occur in - * REG_RECALIB_PERIOD seconds to ensure we get a - * thermal update even if the uCode doesn't give - * us one */ - mod_timer(&il->stats_periodic, - jiffies + msecs_to_jiffies(REG_RECALIB_PERIOD * 1000)); - - if (unlikely(!test_bit(S_SCANNING, &il->status)) && - (pkt->hdr.cmd == N_STATS)) { - il4965_rx_calc_noise(il); - queue_work(il->workqueue, &il->run_time_calib_work); - } - if (il->cfg->ops->lib->temp_ops.temperature && change) - il->cfg->ops->lib->temp_ops.temperature(il); -} - -void -il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb) -{ - struct il_rx_pkt *pkt = rxb_addr(rxb); - - if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATS_CLEAR_MSK) { -#ifdef CONFIG_IWLEGACY_DEBUGFS - memset(&il->_4965.accum_stats, 0, - sizeof(struct il_notif_stats)); - memset(&il->_4965.delta_stats, 0, - sizeof(struct il_notif_stats)); - memset(&il->_4965.max_delta, 0, sizeof(struct il_notif_stats)); -#endif - D_RX("Statistics have been cleared\n"); - } - il4965_hdl_stats(il, rxb); -} - - -/* - * mac80211 queues, ACs, hardware queues, FIFOs. - * - * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues - * - * Mac80211 uses the following numbers, which we get as from it - * by way of skb_get_queue_mapping(skb): - * - * VO 0 - * VI 1 - * BE 2 - * BK 3 - * - * - * Regular (not A-MPDU) frames are put into hardware queues corresponding - * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their - * own queue per aggregation session (RA/TID combination), such queues are - * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In - * order to map frames to the right queue, we also need an AC->hw queue - * mapping. This is implemented here. - * - * Due to the way hw queues are set up (by the hw specific modules like - * 4965.c), the AC->hw queue mapping is the identity - * mapping. - */ - -static const u8 tid_to_ac[] = { - IEEE80211_AC_BE, - IEEE80211_AC_BK, - IEEE80211_AC_BK, - IEEE80211_AC_BE, - IEEE80211_AC_VI, - IEEE80211_AC_VI, - IEEE80211_AC_VO, - IEEE80211_AC_VO -}; - -static inline int -il4965_get_ac_from_tid(u16 tid) -{ - if (likely(tid < ARRAY_SIZE(tid_to_ac))) - return tid_to_ac[tid]; - - /* no support for TIDs 8-15 yet */ - return -EINVAL; -} - -static inline int -il4965_get_fifo_from_tid(struct il_rxon_context *ctx, u16 tid) -{ - if (likely(tid < ARRAY_SIZE(tid_to_ac))) - return ctx->ac_to_fifo[tid_to_ac[tid]]; - - /* no support for TIDs 8-15 yet */ - return -EINVAL; -} - -/* - * handle build C_TX command notification. - */ -static void -il4965_tx_cmd_build_basic(struct il_priv *il, struct sk_buff *skb, - struct il_tx_cmd *tx_cmd, - struct ieee80211_tx_info *info, - struct ieee80211_hdr *hdr, u8 std_id) -{ - __le16 fc = hdr->frame_control; - __le32 tx_flags = tx_cmd->tx_flags; - - tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; - if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { - tx_flags |= TX_CMD_FLG_ACK_MSK; - if (ieee80211_is_mgmt(fc)) - tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; - if (ieee80211_is_probe_resp(fc) && - !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) - tx_flags |= TX_CMD_FLG_TSF_MSK; - } else { - tx_flags &= (~TX_CMD_FLG_ACK_MSK); - tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; - } - - if (ieee80211_is_back_req(fc)) - tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; - - tx_cmd->sta_id = std_id; - if (ieee80211_has_morefrags(fc)) - tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; - - if (ieee80211_is_data_qos(fc)) { - u8 *qc = ieee80211_get_qos_ctl(hdr); - tx_cmd->tid_tspec = qc[0] & 0xf; - tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; - } else { - tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; - } - - il_tx_cmd_protection(il, info, fc, &tx_flags); - - tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); - if (ieee80211_is_mgmt(fc)) { - if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) - tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); - else - tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); - } else { - tx_cmd->timeout.pm_frame_timeout = 0; - } - - tx_cmd->driver_txop = 0; - tx_cmd->tx_flags = tx_flags; - tx_cmd->next_frame_len = 0; -} - -static void -il4965_tx_cmd_build_rate(struct il_priv *il, struct il_tx_cmd *tx_cmd, - struct ieee80211_tx_info *info, __le16 fc) -{ - const u8 rts_retry_limit = 60; - u32 rate_flags; - int rate_idx; - u8 data_retry_limit; - u8 rate_plcp; - - /* Set retry limit on DATA packets and Probe Responses */ - if (ieee80211_is_probe_resp(fc)) - data_retry_limit = 3; - else - data_retry_limit = IL4965_DEFAULT_TX_RETRY; - tx_cmd->data_retry_limit = data_retry_limit; - /* Set retry limit on RTS packets */ - tx_cmd->rts_retry_limit = min(data_retry_limit, rts_retry_limit); - - /* DATA packets will use the uCode station table for rate/antenna - * selection */ - if (ieee80211_is_data(fc)) { - tx_cmd->initial_rate_idx = 0; - tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; - return; - } - - /** - * If the current TX rate stored in mac80211 has the MCS bit set, it's - * not really a TX rate. Thus, we use the lowest supported rate for - * this band. Also use the lowest supported rate if the stored rate - * idx is invalid. - */ - rate_idx = info->control.rates[0].idx; - if ((info->control.rates[0].flags & IEEE80211_TX_RC_MCS) || rate_idx < 0 - || rate_idx > RATE_COUNT_LEGACY) - rate_idx = - rate_lowest_index(&il->bands[info->band], - info->control.sta); - /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ - if (info->band == IEEE80211_BAND_5GHZ) - rate_idx += IL_FIRST_OFDM_RATE; - /* Get PLCP rate for tx_cmd->rate_n_flags */ - rate_plcp = il_rates[rate_idx].plcp; - /* Zero out flags for this packet */ - rate_flags = 0; - - /* Set CCK flag as needed */ - if (rate_idx >= IL_FIRST_CCK_RATE && rate_idx <= IL_LAST_CCK_RATE) - rate_flags |= RATE_MCS_CCK_MSK; - - /* Set up antennas */ - il4965_toggle_tx_ant(il, &il->mgmt_tx_ant, il->hw_params.valid_tx_ant); - rate_flags |= BIT(il->mgmt_tx_ant) << RATE_MCS_ANT_POS; - - /* Set the rate in the TX cmd */ - tx_cmd->rate_n_flags = cpu_to_le32(rate_plcp | rate_flags); -} - -static void -il4965_tx_cmd_build_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info, - struct il_tx_cmd *tx_cmd, struct sk_buff *skb_frag, - int sta_id) -{ - struct ieee80211_key_conf *keyconf = info->control.hw_key; - - switch (keyconf->cipher) { - case WLAN_CIPHER_SUITE_CCMP: - tx_cmd->sec_ctl = TX_CMD_SEC_CCM; - memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); - if (info->flags & IEEE80211_TX_CTL_AMPDU) - tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK; - D_TX("tx_cmd with AES hwcrypto\n"); - break; - - case WLAN_CIPHER_SUITE_TKIP: - tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; - ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key); - D_TX("tx_cmd with tkip hwcrypto\n"); - break; - - case WLAN_CIPHER_SUITE_WEP104: - tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; - /* fall through */ - case WLAN_CIPHER_SUITE_WEP40: - tx_cmd->sec_ctl |= - (TX_CMD_SEC_WEP | (keyconf->keyidx & TX_CMD_SEC_MSK) << - TX_CMD_SEC_SHIFT); - - memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); - - D_TX("Configuring packet for WEP encryption " "with key %d\n", - keyconf->keyidx); - break; - - default: - IL_ERR("Unknown encode cipher %x\n", keyconf->cipher); - break; - } -} - -/* - * start C_TX command process - */ -int -il4965_tx_skb(struct il_priv *il, struct sk_buff *skb) -{ - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_sta *sta = info->control.sta; - struct il_station_priv *sta_priv = NULL; - struct il_tx_queue *txq; - struct il_queue *q; - struct il_device_cmd *out_cmd; - struct il_cmd_meta *out_meta; - struct il_tx_cmd *tx_cmd; - struct il_rxon_context *ctx = &il->ctx; - int txq_id; - dma_addr_t phys_addr; - dma_addr_t txcmd_phys; - dma_addr_t scratch_phys; - u16 len, firstlen, secondlen; - u16 seq_number = 0; - __le16 fc; - u8 hdr_len; - u8 sta_id; - u8 wait_write_ptr = 0; - u8 tid = 0; - u8 *qc = NULL; - unsigned long flags; - bool is_agg = false; - - if (info->control.vif) - ctx = il_rxon_ctx_from_vif(info->control.vif); - - spin_lock_irqsave(&il->lock, flags); - if (il_is_rfkill(il)) { - D_DROP("Dropping - RF KILL\n"); - goto drop_unlock; - } - - fc = hdr->frame_control; - -#ifdef CONFIG_IWLEGACY_DEBUG - if (ieee80211_is_auth(fc)) - D_TX("Sending AUTH frame\n"); - else if (ieee80211_is_assoc_req(fc)) - D_TX("Sending ASSOC frame\n"); - else if (ieee80211_is_reassoc_req(fc)) - D_TX("Sending REASSOC frame\n"); -#endif - - hdr_len = ieee80211_hdrlen(fc); - - /* For management frames use broadcast id to do not break aggregation */ - if (!ieee80211_is_data(fc)) - sta_id = ctx->bcast_sta_id; - else { - /* Find idx into station table for destination station */ - sta_id = il_sta_id_or_broadcast(il, ctx, info->control.sta); - - if (sta_id == IL_INVALID_STATION) { - D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1); - goto drop_unlock; - } - } - - D_TX("station Id %d\n", sta_id); - - if (sta) - sta_priv = (void *)sta->drv_priv; - - if (sta_priv && sta_priv->asleep && - (info->flags & IEEE80211_TX_CTL_POLL_RESPONSE)) { - /* - * This sends an asynchronous command to the device, - * but we can rely on it being processed before the - * next frame is processed -- and the next frame to - * this station is the one that will consume this - * counter. - * For now set the counter to just 1 since we do not - * support uAPSD yet. - */ - il4965_sta_modify_sleep_tx_count(il, sta_id, 1); - } - - /* - * Send this frame after DTIM -- there's a special queue - * reserved for this for contexts that support AP mode. - */ - if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { - txq_id = ctx->mcast_queue; - /* - * The microcode will clear the more data - * bit in the last frame it transmits. - */ - hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); - } else - txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)]; - - /* irqs already disabled/saved above when locking il->lock */ - spin_lock(&il->sta_lock); - - if (ieee80211_is_data_qos(fc)) { - qc = ieee80211_get_qos_ctl(hdr); - tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; - if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) { - spin_unlock(&il->sta_lock); - goto drop_unlock; - } - seq_number = il->stations[sta_id].tid[tid].seq_number; - seq_number &= IEEE80211_SCTL_SEQ; - hdr->seq_ctrl = - hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG); - hdr->seq_ctrl |= cpu_to_le16(seq_number); - seq_number += 0x10; - /* aggregation is on for this */ - if (info->flags & IEEE80211_TX_CTL_AMPDU && - il->stations[sta_id].tid[tid].agg.state == IL_AGG_ON) { - txq_id = il->stations[sta_id].tid[tid].agg.txq_id; - is_agg = true; - } - } - - txq = &il->txq[txq_id]; - q = &txq->q; - - if (unlikely(il_queue_space(q) < q->high_mark)) { - spin_unlock(&il->sta_lock); - goto drop_unlock; - } - - if (ieee80211_is_data_qos(fc)) { - il->stations[sta_id].tid[tid].tfds_in_queue++; - if (!ieee80211_has_morefrags(fc)) - il->stations[sta_id].tid[tid].seq_number = seq_number; - } - - spin_unlock(&il->sta_lock); - - /* Set up driver data for this TFD */ - memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct il_tx_info)); - txq->txb[q->write_ptr].skb = skb; - txq->txb[q->write_ptr].ctx = ctx; - - /* Set up first empty entry in queue's array of Tx/cmd buffers */ - out_cmd = txq->cmd[q->write_ptr]; - out_meta = &txq->meta[q->write_ptr]; - tx_cmd = &out_cmd->cmd.tx; - memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); - memset(tx_cmd, 0, sizeof(struct il_tx_cmd)); - - /* - * Set up the Tx-command (not MAC!) header. - * Store the chosen Tx queue and TFD idx within the sequence field; - * after Tx, uCode's Tx response will return this value so driver can - * locate the frame within the tx queue and do post-tx processing. - */ - out_cmd->hdr.cmd = C_TX; - out_cmd->hdr.sequence = - cpu_to_le16((u16) - (QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr))); - - /* Copy MAC header from skb into command buffer */ - memcpy(tx_cmd->hdr, hdr, hdr_len); - - /* Total # bytes to be transmitted */ - len = (u16) skb->len; - tx_cmd->len = cpu_to_le16(len); - - if (info->control.hw_key) - il4965_tx_cmd_build_hwcrypto(il, info, tx_cmd, skb, sta_id); - - /* TODO need this for burst mode later on */ - il4965_tx_cmd_build_basic(il, skb, tx_cmd, info, hdr, sta_id); - il_dbg_log_tx_data_frame(il, len, hdr); - - il4965_tx_cmd_build_rate(il, tx_cmd, info, fc); - - il_update_stats(il, true, fc, len); - /* - * Use the first empty entry in this queue's command buffer array - * to contain the Tx command and MAC header concatenated together - * (payload data will be in another buffer). - * Size of this varies, due to varying MAC header length. - * If end is not dword aligned, we'll have 2 extra bytes at the end - * of the MAC header (device reads on dword boundaries). - * We'll tell device about this padding later. - */ - len = sizeof(struct il_tx_cmd) + sizeof(struct il_cmd_header) + hdr_len; - firstlen = (len + 3) & ~3; - - /* Tell NIC about any 2-byte padding after MAC header */ - if (firstlen != len) - tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; - - /* Physical address of this Tx command's header (not MAC header!), - * within command buffer array. */ - txcmd_phys = - pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen, - PCI_DMA_BIDIRECTIONAL); - dma_unmap_addr_set(out_meta, mapping, txcmd_phys); - dma_unmap_len_set(out_meta, len, firstlen); - /* Add buffer containing Tx command and MAC(!) header to TFD's - * first entry */ - il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, - 1, 0); - - if (!ieee80211_has_morefrags(hdr->frame_control)) { - txq->need_update = 1; - } else { - wait_write_ptr = 1; - txq->need_update = 0; - } - - /* Set up TFD's 2nd entry to point directly to remainder of skb, - * if any (802.11 null frames have no payload). */ - secondlen = skb->len - hdr_len; - if (secondlen > 0) { - phys_addr = - pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen, - PCI_DMA_TODEVICE); - il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, phys_addr, - secondlen, 0, 0); - } - - scratch_phys = - txcmd_phys + sizeof(struct il_cmd_header) + - offsetof(struct il_tx_cmd, scratch); - - /* take back ownership of DMA buffer to enable update */ - pci_dma_sync_single_for_cpu(il->pci_dev, txcmd_phys, firstlen, - PCI_DMA_BIDIRECTIONAL); - tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); - tx_cmd->dram_msb_ptr = il_get_dma_hi_addr(scratch_phys); - - D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence)); - D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); - il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd, sizeof(*tx_cmd)); - il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr, hdr_len); - - /* Set up entry for this TFD in Tx byte-count array */ - if (info->flags & IEEE80211_TX_CTL_AMPDU) - il->cfg->ops->lib->txq_update_byte_cnt_tbl(il, txq, - le16_to_cpu(tx_cmd-> - len)); - - pci_dma_sync_single_for_device(il->pci_dev, txcmd_phys, firstlen, - PCI_DMA_BIDIRECTIONAL); - - /* Tell device the write idx *just past* this latest filled TFD */ - q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd); - il_txq_update_write_ptr(il, txq); - spin_unlock_irqrestore(&il->lock, flags); - - /* - * At this point the frame is "transmitted" successfully - * and we will get a TX status notification eventually, - * regardless of the value of ret. "ret" only indicates - * whether or not we should update the write pointer. - */ - - /* - * Avoid atomic ops if it isn't an associated client. - * Also, if this is a packet for aggregation, don't - * increase the counter because the ucode will stop - * aggregation queues when their respective station - * goes to sleep. - */ - if (sta_priv && sta_priv->client && !is_agg) - atomic_inc(&sta_priv->pending_frames); - - if (il_queue_space(q) < q->high_mark && il->mac80211_registered) { - if (wait_write_ptr) { - spin_lock_irqsave(&il->lock, flags); - txq->need_update = 1; - il_txq_update_write_ptr(il, txq); - spin_unlock_irqrestore(&il->lock, flags); - } else { - il_stop_queue(il, txq); - } - } - - return 0; - -drop_unlock: - spin_unlock_irqrestore(&il->lock, flags); - return -1; -} - -static inline int -il4965_alloc_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr, size_t size) -{ - ptr->addr = - dma_alloc_coherent(&il->pci_dev->dev, size, &ptr->dma, GFP_KERNEL); - if (!ptr->addr) - return -ENOMEM; - ptr->size = size; - return 0; -} - -static inline void -il4965_free_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr) -{ - if (unlikely(!ptr->addr)) - return; - - dma_free_coherent(&il->pci_dev->dev, ptr->size, ptr->addr, ptr->dma); - memset(ptr, 0, sizeof(*ptr)); -} - -/** - * il4965_hw_txq_ctx_free - Free TXQ Context - * - * Destroy all TX DMA queues and structures - */ -void -il4965_hw_txq_ctx_free(struct il_priv *il) -{ - int txq_id; - - /* Tx queues */ - if (il->txq) { - for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) - if (txq_id == il->cmd_queue) - il_cmd_queue_free(il); - else - il_tx_queue_free(il, txq_id); - } - il4965_free_dma_ptr(il, &il->kw); - - il4965_free_dma_ptr(il, &il->scd_bc_tbls); - - /* free tx queue structure */ - il_txq_mem(il); -} - -/** - * il4965_txq_ctx_alloc - allocate TX queue context - * Allocate all Tx DMA structures and initialize them - * - * @param il - * @return error code - */ -int -il4965_txq_ctx_alloc(struct il_priv *il) -{ - int ret; - int txq_id, slots_num; - unsigned long flags; - - /* Free all tx/cmd queues and keep-warm buffer */ - il4965_hw_txq_ctx_free(il); - - ret = - il4965_alloc_dma_ptr(il, &il->scd_bc_tbls, - il->hw_params.scd_bc_tbls_size); - if (ret) { - IL_ERR("Scheduler BC Table allocation failed\n"); - goto error_bc_tbls; - } - /* Alloc keep-warm buffer */ - ret = il4965_alloc_dma_ptr(il, &il->kw, IL_KW_SIZE); - if (ret) { - IL_ERR("Keep Warm allocation failed\n"); - goto error_kw; - } - - /* allocate tx queue structure */ - ret = il_alloc_txq_mem(il); - if (ret) - goto error; - - spin_lock_irqsave(&il->lock, flags); - - /* Turn off all Tx DMA fifos */ - il4965_txq_set_sched(il, 0); - - /* Tell NIC where to find the "keep warm" buffer */ - il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4); - - spin_unlock_irqrestore(&il->lock, flags); - - /* Alloc and init all Tx queues, including the command queue (#4/#9) */ - for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) { - slots_num = - (txq_id == - il->cmd_queue) ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; - ret = il_tx_queue_init(il, &il->txq[txq_id], slots_num, txq_id); - if (ret) { - IL_ERR("Tx %d queue init failed\n", txq_id); - goto error; - } - } - - return ret; - -error: - il4965_hw_txq_ctx_free(il); - il4965_free_dma_ptr(il, &il->kw); -error_kw: - il4965_free_dma_ptr(il, &il->scd_bc_tbls); -error_bc_tbls: - return ret; -} - -void -il4965_txq_ctx_reset(struct il_priv *il) -{ - int txq_id, slots_num; - unsigned long flags; - - spin_lock_irqsave(&il->lock, flags); - - /* Turn off all Tx DMA fifos */ - il4965_txq_set_sched(il, 0); - - /* Tell NIC where to find the "keep warm" buffer */ - il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4); - - spin_unlock_irqrestore(&il->lock, flags); - - /* Alloc and init all Tx queues, including the command queue (#4) */ - for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) { - slots_num = - txq_id == il->cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; - il_tx_queue_reset(il, &il->txq[txq_id], slots_num, txq_id); - } -} - -/** - * il4965_txq_ctx_stop - Stop all Tx DMA channels - */ -void -il4965_txq_ctx_stop(struct il_priv *il) -{ - int ch, txq_id; - unsigned long flags; - - /* Turn off all Tx DMA fifos */ - spin_lock_irqsave(&il->lock, flags); - - il4965_txq_set_sched(il, 0); - - /* Stop each Tx DMA channel, and wait for it to be idle */ - for (ch = 0; ch < il->hw_params.dma_chnl_num; ch++) { - il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); - if (il_poll_bit - (il, FH49_TSSR_TX_STATUS_REG, - FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000)) - IL_ERR("Failing on timeout while stopping" - " DMA channel %d [0x%08x]", ch, - il_rd(il, FH49_TSSR_TX_STATUS_REG)); - } - spin_unlock_irqrestore(&il->lock, flags); - - if (!il->txq) - return; - - /* Unmap DMA from host system and free skb's */ - for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) - if (txq_id == il->cmd_queue) - il_cmd_queue_unmap(il); - else - il_tx_queue_unmap(il, txq_id); -} - -/* - * Find first available (lowest unused) Tx Queue, mark it "active". - * Called only when finding queue for aggregation. - * Should never return anything < 7, because they should already - * be in use as EDCA AC (0-3), Command (4), reserved (5, 6) - */ -static int -il4965_txq_ctx_activate_free(struct il_priv *il) -{ - int txq_id; - - for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) - if (!test_and_set_bit(txq_id, &il->txq_ctx_active_msk)) - return txq_id; - return -1; -} - -/** - * il4965_tx_queue_stop_scheduler - Stop queue, but keep configuration - */ -static void -il4965_tx_queue_stop_scheduler(struct il_priv *il, u16 txq_id) -{ - /* Simply stop the queue, but don't change any configuration; - * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ - il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id), - (0 << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) | - (1 << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); -} - -/** - * il4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue - */ -static int -il4965_tx_queue_set_q2ratid(struct il_priv *il, u16 ra_tid, u16 txq_id) -{ - u32 tbl_dw_addr; - u32 tbl_dw; - u16 scd_q2ratid; - - scd_q2ratid = ra_tid & IL_SCD_QUEUE_RA_TID_MAP_RATID_MSK; - - tbl_dw_addr = - il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id); - - tbl_dw = il_read_targ_mem(il, tbl_dw_addr); - - if (txq_id & 0x1) - tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); - else - tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); - - il_write_targ_mem(il, tbl_dw_addr, tbl_dw); - - return 0; -} - -/** - * il4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue - * - * NOTE: txq_id must be greater than IL49_FIRST_AMPDU_QUEUE, - * i.e. it must be one of the higher queues used for aggregation - */ -static int -il4965_txq_agg_enable(struct il_priv *il, int txq_id, int tx_fifo, int sta_id, - int tid, u16 ssn_idx) -{ - unsigned long flags; - u16 ra_tid; - int ret; - - if ((IL49_FIRST_AMPDU_QUEUE > txq_id) || - (IL49_FIRST_AMPDU_QUEUE + - il->cfg->base_params->num_of_ampdu_queues <= txq_id)) { - IL_WARN("queue number out of range: %d, must be %d to %d\n", - txq_id, IL49_FIRST_AMPDU_QUEUE, - IL49_FIRST_AMPDU_QUEUE + - il->cfg->base_params->num_of_ampdu_queues - 1); - return -EINVAL; - } - - ra_tid = BUILD_RAxTID(sta_id, tid); - - /* Modify device's station table to Tx this TID */ - ret = il4965_sta_tx_modify_enable_tid(il, sta_id, tid); - if (ret) - return ret; - - spin_lock_irqsave(&il->lock, flags); - - /* Stop this Tx queue before configuring it */ - il4965_tx_queue_stop_scheduler(il, txq_id); - - /* Map receiver-address / traffic-ID to this queue */ - il4965_tx_queue_set_q2ratid(il, ra_tid, txq_id); - - /* Set this queue as a chain-building queue */ - il_set_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id)); - - /* Place first TFD at idx corresponding to start sequence number. - * Assumes that ssn_idx is valid (!= 0xFFF) */ - il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); - il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); - il4965_set_wr_ptrs(il, txq_id, ssn_idx); - - /* Set up Tx win size and frame limit for this queue */ - il_write_targ_mem(il, - il->scd_base_addr + - IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id), - (SCD_WIN_SIZE << IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) - & IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK); - - il_write_targ_mem(il, - il->scd_base_addr + - IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), - (SCD_FRAME_LIMIT << - IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & - IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK); - - il_set_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id)); - - /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ - il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 1); - - spin_unlock_irqrestore(&il->lock, flags); - - return 0; -} - -int -il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u16 tid, u16 * ssn) -{ - int sta_id; - int tx_fifo; - int txq_id; - int ret; - unsigned long flags; - struct il_tid_data *tid_data; - - tx_fifo = il4965_get_fifo_from_tid(il_rxon_ctx_from_vif(vif), tid); - if (unlikely(tx_fifo < 0)) - return tx_fifo; - - D_HT("%s on ra = %pM tid = %d\n", __func__, sta->addr, tid); - - sta_id = il_sta_id(sta); - if (sta_id == IL_INVALID_STATION) { - IL_ERR("Start AGG on invalid station\n"); - return -ENXIO; - } - if (unlikely(tid >= MAX_TID_COUNT)) - return -EINVAL; - - if (il->stations[sta_id].tid[tid].agg.state != IL_AGG_OFF) { - IL_ERR("Start AGG when state is not IL_AGG_OFF !\n"); - return -ENXIO; - } - - txq_id = il4965_txq_ctx_activate_free(il); - if (txq_id == -1) { - IL_ERR("No free aggregation queue available\n"); - return -ENXIO; - } - - spin_lock_irqsave(&il->sta_lock, flags); - tid_data = &il->stations[sta_id].tid[tid]; - *ssn = SEQ_TO_SN(tid_data->seq_number); - tid_data->agg.txq_id = txq_id; - il_set_swq_id(&il->txq[txq_id], il4965_get_ac_from_tid(tid), txq_id); - spin_unlock_irqrestore(&il->sta_lock, flags); - - ret = il4965_txq_agg_enable(il, txq_id, tx_fifo, sta_id, tid, *ssn); - if (ret) - return ret; - - spin_lock_irqsave(&il->sta_lock, flags); - tid_data = &il->stations[sta_id].tid[tid]; - if (tid_data->tfds_in_queue == 0) { - D_HT("HW queue is empty\n"); - tid_data->agg.state = IL_AGG_ON; - ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); - } else { - D_HT("HW queue is NOT empty: %d packets in HW queue\n", - tid_data->tfds_in_queue); - tid_data->agg.state = IL_EMPTYING_HW_QUEUE_ADDBA; - } - spin_unlock_irqrestore(&il->sta_lock, flags); - return ret; -} - -/** - * txq_id must be greater than IL49_FIRST_AMPDU_QUEUE - * il->lock must be held by the caller - */ -static int -il4965_txq_agg_disable(struct il_priv *il, u16 txq_id, u16 ssn_idx, u8 tx_fifo) -{ - if ((IL49_FIRST_AMPDU_QUEUE > txq_id) || - (IL49_FIRST_AMPDU_QUEUE + - il->cfg->base_params->num_of_ampdu_queues <= txq_id)) { - IL_WARN("queue number out of range: %d, must be %d to %d\n", - txq_id, IL49_FIRST_AMPDU_QUEUE, - IL49_FIRST_AMPDU_QUEUE + - il->cfg->base_params->num_of_ampdu_queues - 1); - return -EINVAL; - } - - il4965_tx_queue_stop_scheduler(il, txq_id); - - il_clear_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id)); - - il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); - il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); - /* supposes that ssn_idx is valid (!= 0xFFF) */ - il4965_set_wr_ptrs(il, txq_id, ssn_idx); - - il_clear_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id)); - il_txq_ctx_deactivate(il, txq_id); - il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 0); - - return 0; -} - -int -il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u16 tid) -{ - int tx_fifo_id, txq_id, sta_id, ssn; - struct il_tid_data *tid_data; - int write_ptr, read_ptr; - unsigned long flags; - - tx_fifo_id = il4965_get_fifo_from_tid(il_rxon_ctx_from_vif(vif), tid); - if (unlikely(tx_fifo_id < 0)) - return tx_fifo_id; - - sta_id = il_sta_id(sta); - - if (sta_id == IL_INVALID_STATION) { - IL_ERR("Invalid station for AGG tid %d\n", tid); - return -ENXIO; - } - - spin_lock_irqsave(&il->sta_lock, flags); - - tid_data = &il->stations[sta_id].tid[tid]; - ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; - txq_id = tid_data->agg.txq_id; - - switch (il->stations[sta_id].tid[tid].agg.state) { - case IL_EMPTYING_HW_QUEUE_ADDBA: - /* - * This can happen if the peer stops aggregation - * again before we've had a chance to drain the - * queue we selected previously, i.e. before the - * session was really started completely. - */ - D_HT("AGG stop before setup done\n"); - goto turn_off; - case IL_AGG_ON: - break; - default: - IL_WARN("Stopping AGG while state not ON or starting\n"); - } - - write_ptr = il->txq[txq_id].q.write_ptr; - read_ptr = il->txq[txq_id].q.read_ptr; - - /* The queue is not empty */ - if (write_ptr != read_ptr) { - D_HT("Stopping a non empty AGG HW QUEUE\n"); - il->stations[sta_id].tid[tid].agg.state = - IL_EMPTYING_HW_QUEUE_DELBA; - spin_unlock_irqrestore(&il->sta_lock, flags); - return 0; - } - - D_HT("HW queue is empty\n"); -turn_off: - il->stations[sta_id].tid[tid].agg.state = IL_AGG_OFF; - - /* do not restore/save irqs */ - spin_unlock(&il->sta_lock); - spin_lock(&il->lock); - - /* - * the only reason this call can fail is queue number out of range, - * which can happen if uCode is reloaded and all the station - * information are lost. if it is outside the range, there is no need - * to deactivate the uCode queue, just return "success" to allow - * mac80211 to clean up it own data. - */ - il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo_id); - spin_unlock_irqrestore(&il->lock, flags); - - ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); - - return 0; -} - -int -il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id) -{ - struct il_queue *q = &il->txq[txq_id].q; - u8 *addr = il->stations[sta_id].sta.sta.addr; - struct il_tid_data *tid_data = &il->stations[sta_id].tid[tid]; - struct il_rxon_context *ctx; - - ctx = &il->ctx; - - lockdep_assert_held(&il->sta_lock); - - switch (il->stations[sta_id].tid[tid].agg.state) { - case IL_EMPTYING_HW_QUEUE_DELBA: - /* We are reclaiming the last packet of the */ - /* aggregated HW queue */ - if (txq_id == tid_data->agg.txq_id && - q->read_ptr == q->write_ptr) { - u16 ssn = SEQ_TO_SN(tid_data->seq_number); - int tx_fifo = il4965_get_fifo_from_tid(ctx, tid); - D_HT("HW queue empty: continue DELBA flow\n"); - il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo); - tid_data->agg.state = IL_AGG_OFF; - ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid); - } - break; - case IL_EMPTYING_HW_QUEUE_ADDBA: - /* We are reclaiming the last packet of the queue */ - if (tid_data->tfds_in_queue == 0) { - D_HT("HW queue empty: continue ADDBA flow\n"); - tid_data->agg.state = IL_AGG_ON; - ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid); - } - break; - } - - return 0; -} - -static void -il4965_non_agg_tx_status(struct il_priv *il, struct il_rxon_context *ctx, - const u8 *addr1) -{ - struct ieee80211_sta *sta; - struct il_station_priv *sta_priv; - - rcu_read_lock(); - sta = ieee80211_find_sta(ctx->vif, addr1); - if (sta) { - sta_priv = (void *)sta->drv_priv; - /* avoid atomic ops if this isn't a client */ - if (sta_priv->client && - atomic_dec_return(&sta_priv->pending_frames) == 0) - ieee80211_sta_block_awake(il->hw, sta, false); - } - rcu_read_unlock(); -} - -static void -il4965_tx_status(struct il_priv *il, struct il_tx_info *tx_info, bool is_agg) -{ - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data; - - if (!is_agg) - il4965_non_agg_tx_status(il, tx_info->ctx, hdr->addr1); - - ieee80211_tx_status_irqsafe(il->hw, tx_info->skb); -} - -int -il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx) -{ - struct il_tx_queue *txq = &il->txq[txq_id]; - struct il_queue *q = &txq->q; - struct il_tx_info *tx_info; - int nfreed = 0; - struct ieee80211_hdr *hdr; - - if (idx >= q->n_bd || il_queue_used(q, idx) == 0) { - IL_ERR("Read idx for DMA queue txq id (%d), idx %d, " - "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd, - q->write_ptr, q->read_ptr); - return 0; - } - - for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; - q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) { - - tx_info = &txq->txb[txq->q.read_ptr]; - - if (WARN_ON_ONCE(tx_info->skb == NULL)) - continue; - - hdr = (struct ieee80211_hdr *)tx_info->skb->data; - if (ieee80211_is_data_qos(hdr->frame_control)) - nfreed++; - - il4965_tx_status(il, tx_info, - txq_id >= IL4965_FIRST_AMPDU_QUEUE); - tx_info->skb = NULL; - - il->cfg->ops->lib->txq_free_tfd(il, txq); - } - return nfreed; -} - -/** - * il4965_tx_status_reply_compressed_ba - Update tx status from block-ack - * - * Go through block-ack's bitmap of ACK'd frames, update driver's record of - * ACK vs. not. This gets sent to mac80211, then to rate scaling algo. - */ -static int -il4965_tx_status_reply_compressed_ba(struct il_priv *il, struct il_ht_agg *agg, - struct il_compressed_ba_resp *ba_resp) -{ - int i, sh, ack; - u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl); - u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); - int successes = 0; - struct ieee80211_tx_info *info; - u64 bitmap, sent_bitmap; - - if (unlikely(!agg->wait_for_ba)) { - if (unlikely(ba_resp->bitmap)) - IL_ERR("Received BA when not expected\n"); - return -EINVAL; - } - - /* Mark that the expected block-ack response arrived */ - agg->wait_for_ba = 0; - D_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl); - - /* Calculate shift to align block-ack bits with our Tx win bits */ - sh = agg->start_idx - SEQ_TO_IDX(seq_ctl >> 4); - if (sh < 0) /* tbw something is wrong with indices */ - sh += 0x100; - - if (agg->frame_count > (64 - sh)) { - D_TX_REPLY("more frames than bitmap size"); - return -1; - } - - /* don't use 64-bit values for now */ - bitmap = le64_to_cpu(ba_resp->bitmap) >> sh; - - /* check for success or failure according to the - * transmitted bitmap and block-ack bitmap */ - sent_bitmap = bitmap & agg->bitmap; - - /* For each frame attempted in aggregation, - * update driver's record of tx frame's status. */ - i = 0; - while (sent_bitmap) { - ack = sent_bitmap & 1ULL; - successes += ack; - D_TX_REPLY("%s ON i=%d idx=%d raw=%d\n", ack ? "ACK" : "NACK", - i, (agg->start_idx + i) & 0xff, agg->start_idx + i); - sent_bitmap >>= 1; - ++i; - } - - D_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap); - - info = IEEE80211_SKB_CB(il->txq[scd_flow].txb[agg->start_idx].skb); - memset(&info->status, 0, sizeof(info->status)); - info->flags |= IEEE80211_TX_STAT_ACK; - info->flags |= IEEE80211_TX_STAT_AMPDU; - info->status.ampdu_ack_len = successes; - info->status.ampdu_len = agg->frame_count; - il4965_hwrate_to_tx_control(il, agg->rate_n_flags, info); - - return 0; -} - -/** - * translate ucode response to mac80211 tx status control values - */ -void -il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags, - struct ieee80211_tx_info *info) -{ - struct ieee80211_tx_rate *r = &info->control.rates[0]; - - info->antenna_sel_tx = - ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS); - if (rate_n_flags & RATE_MCS_HT_MSK) - r->flags |= IEEE80211_TX_RC_MCS; - if (rate_n_flags & RATE_MCS_GF_MSK) - r->flags |= IEEE80211_TX_RC_GREEN_FIELD; - if (rate_n_flags & RATE_MCS_HT40_MSK) - r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; - if (rate_n_flags & RATE_MCS_DUP_MSK) - r->flags |= IEEE80211_TX_RC_DUP_DATA; - if (rate_n_flags & RATE_MCS_SGI_MSK) - r->flags |= IEEE80211_TX_RC_SHORT_GI; - r->idx = il4965_hwrate_to_mac80211_idx(rate_n_flags, info->band); -} - -/** - * il4965_hdl_compressed_ba - Handler for N_COMPRESSED_BA - * - * Handles block-acknowledge notification from device, which reports success - * of frames sent via aggregation. - */ -void -il4965_hdl_compressed_ba(struct il_priv *il, struct il_rx_buf *rxb) -{ - struct il_rx_pkt *pkt = rxb_addr(rxb); - struct il_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; - struct il_tx_queue *txq = NULL; - struct il_ht_agg *agg; - int idx; - int sta_id; - int tid; - unsigned long flags; - - /* "flow" corresponds to Tx queue */ - u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); - - /* "ssn" is start of block-ack Tx win, corresponds to idx - * (in Tx queue's circular buffer) of first TFD/frame in win */ - u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); - - if (scd_flow >= il->hw_params.max_txq_num) { - IL_ERR("BUG_ON scd_flow is bigger than number of queues\n"); - return; - } - - txq = &il->txq[scd_flow]; - sta_id = ba_resp->sta_id; - tid = ba_resp->tid; - agg = &il->stations[sta_id].tid[tid].agg; - if (unlikely(agg->txq_id != scd_flow)) { - /* - * FIXME: this is a uCode bug which need to be addressed, - * log the information and return for now! - * since it is possible happen very often and in order - * not to fill the syslog, don't enable the logging by default - */ - D_TX_REPLY("BA scd_flow %d does not match txq_id %d\n", - scd_flow, agg->txq_id); - return; - } - - /* Find idx just before block-ack win */ - idx = il_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd); - - spin_lock_irqsave(&il->sta_lock, flags); - - D_TX_REPLY("N_COMPRESSED_BA [%d] Received from %pM, " "sta_id = %d\n", - agg->wait_for_ba, (u8 *) &ba_resp->sta_addr_lo32, - ba_resp->sta_id); - D_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx," "scd_flow = " - "%d, scd_ssn = %d\n", ba_resp->tid, ba_resp->seq_ctl, - (unsigned long long)le64_to_cpu(ba_resp->bitmap), - ba_resp->scd_flow, ba_resp->scd_ssn); - D_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx\n", agg->start_idx, - (unsigned long long)agg->bitmap); - - /* Update driver's record of ACK vs. not for each frame in win */ - il4965_tx_status_reply_compressed_ba(il, agg, ba_resp); - - /* Release all TFDs before the SSN, i.e. all TFDs in front of - * block-ack win (we assume that they've been successfully - * transmitted ... if not, it's too late anyway). */ - if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) { - /* calculate mac80211 ampdu sw queue to wake */ - int freed = il4965_tx_queue_reclaim(il, scd_flow, idx); - il4965_free_tfds_in_queue(il, sta_id, tid, freed); - - if (il_queue_space(&txq->q) > txq->q.low_mark && - il->mac80211_registered && - agg->state != IL_EMPTYING_HW_QUEUE_DELBA) - il_wake_queue(il, txq); - - il4965_txq_check_empty(il, sta_id, tid, scd_flow); - } - - spin_unlock_irqrestore(&il->sta_lock, flags); -} - -#ifdef CONFIG_IWLEGACY_DEBUG -const char * -il4965_get_tx_fail_reason(u32 status) -{ -#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x -#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x - - switch (status & TX_STATUS_MSK) { - case TX_STATUS_SUCCESS: - return "SUCCESS"; - TX_STATUS_POSTPONE(DELAY); - TX_STATUS_POSTPONE(FEW_BYTES); - TX_STATUS_POSTPONE(QUIET_PERIOD); - TX_STATUS_POSTPONE(CALC_TTAK); - TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY); - TX_STATUS_FAIL(SHORT_LIMIT); - TX_STATUS_FAIL(LONG_LIMIT); - TX_STATUS_FAIL(FIFO_UNDERRUN); - TX_STATUS_FAIL(DRAIN_FLOW); - TX_STATUS_FAIL(RFKILL_FLUSH); - TX_STATUS_FAIL(LIFE_EXPIRE); - TX_STATUS_FAIL(DEST_PS); - TX_STATUS_FAIL(HOST_ABORTED); - TX_STATUS_FAIL(BT_RETRY); - TX_STATUS_FAIL(STA_INVALID); - TX_STATUS_FAIL(FRAG_DROPPED); - TX_STATUS_FAIL(TID_DISABLE); - TX_STATUS_FAIL(FIFO_FLUSHED); - TX_STATUS_FAIL(INSUFFICIENT_CF_POLL); - TX_STATUS_FAIL(PASSIVE_NO_RX); - TX_STATUS_FAIL(NO_BEACON_ON_RADAR); - } - - return "UNKNOWN"; - -#undef TX_STATUS_FAIL -#undef TX_STATUS_POSTPONE -} -#endif /* CONFIG_IWLEGACY_DEBUG */ - -static struct il_link_quality_cmd * -il4965_sta_alloc_lq(struct il_priv *il, u8 sta_id) -{ - int i, r; - struct il_link_quality_cmd *link_cmd; - u32 rate_flags = 0; - __le32 rate_n_flags; - - link_cmd = kzalloc(sizeof(struct il_link_quality_cmd), GFP_KERNEL); - if (!link_cmd) { - IL_ERR("Unable to allocate memory for LQ cmd.\n"); - return NULL; - } - /* Set up the rate scaling to start at selected rate, fall back - * all the way down to 1M in IEEE order, and then spin on 1M */ - if (il->band == IEEE80211_BAND_5GHZ) - r = RATE_6M_IDX; - else - r = RATE_1M_IDX; - - if (r >= IL_FIRST_CCK_RATE && r <= IL_LAST_CCK_RATE) - rate_flags |= RATE_MCS_CCK_MSK; - - rate_flags |= - il4965_first_antenna(il->hw_params. - valid_tx_ant) << RATE_MCS_ANT_POS; - rate_n_flags = cpu_to_le32(il_rates[r].plcp | rate_flags); - for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) - link_cmd->rs_table[i].rate_n_flags = rate_n_flags; - - link_cmd->general_params.single_stream_ant_msk = - il4965_first_antenna(il->hw_params.valid_tx_ant); - - link_cmd->general_params.dual_stream_ant_msk = - il->hw_params.valid_tx_ant & ~il4965_first_antenna(il->hw_params. - valid_tx_ant); - if (!link_cmd->general_params.dual_stream_ant_msk) { - link_cmd->general_params.dual_stream_ant_msk = ANT_AB; - } else if (il4965_num_of_ant(il->hw_params.valid_tx_ant) == 2) { - link_cmd->general_params.dual_stream_ant_msk = - il->hw_params.valid_tx_ant; - } - - link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; - link_cmd->agg_params.agg_time_limit = - cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); - - link_cmd->sta_id = sta_id; - - return link_cmd; -} - -/* - * il4965_add_bssid_station - Add the special IBSS BSSID station - * - * Function sleeps. - */ -int -il4965_add_bssid_station(struct il_priv *il, struct il_rxon_context *ctx, - const u8 *addr, u8 *sta_id_r) -{ - int ret; - u8 sta_id; - struct il_link_quality_cmd *link_cmd; - unsigned long flags; - - if (sta_id_r) - *sta_id_r = IL_INVALID_STATION; - - ret = il_add_station_common(il, ctx, addr, 0, NULL, &sta_id); - if (ret) { - IL_ERR("Unable to add station %pM\n", addr); - return ret; - } - - if (sta_id_r) - *sta_id_r = sta_id; - - spin_lock_irqsave(&il->sta_lock, flags); - il->stations[sta_id].used |= IL_STA_LOCAL; - spin_unlock_irqrestore(&il->sta_lock, flags); - - /* Set up default rate scaling table in device's station table */ - link_cmd = il4965_sta_alloc_lq(il, sta_id); - if (!link_cmd) { - IL_ERR("Unable to initialize rate scaling for station %pM.\n", - addr); - return -ENOMEM; - } - - ret = il_send_lq_cmd(il, ctx, link_cmd, CMD_SYNC, true); - if (ret) - IL_ERR("Link quality command failed (%d)\n", ret); - - spin_lock_irqsave(&il->sta_lock, flags); - il->stations[sta_id].lq = link_cmd; - spin_unlock_irqrestore(&il->sta_lock, flags); - - return 0; -} - -static int -il4965_static_wepkey_cmd(struct il_priv *il, struct il_rxon_context *ctx, - bool send_if_empty) -{ - int i, not_empty = 0; - u8 buff[sizeof(struct il_wep_cmd) + - sizeof(struct il_wep_key) * WEP_KEYS_MAX]; - struct il_wep_cmd *wep_cmd = (struct il_wep_cmd *)buff; - size_t cmd_size = sizeof(struct il_wep_cmd); - struct il_host_cmd cmd = { - .id = ctx->wep_key_cmd, - .data = wep_cmd, - .flags = CMD_SYNC, - }; - - might_sleep(); - - memset(wep_cmd, 0, - cmd_size + (sizeof(struct il_wep_key) * WEP_KEYS_MAX)); - - for (i = 0; i < WEP_KEYS_MAX; i++) { - wep_cmd->key[i].key_idx = i; - if (ctx->wep_keys[i].key_size) { - wep_cmd->key[i].key_offset = i; - not_empty = 1; - } else { - wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET; - } - - wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size; - memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key, - ctx->wep_keys[i].key_size); - } - - wep_cmd->global_key_type = WEP_KEY_WEP_TYPE; - wep_cmd->num_keys = WEP_KEYS_MAX; - - cmd_size += sizeof(struct il_wep_key) * WEP_KEYS_MAX; - - cmd.len = cmd_size; - - if (not_empty || send_if_empty) - return il_send_cmd(il, &cmd); - else - return 0; -} - -int -il4965_restore_default_wep_keys(struct il_priv *il, struct il_rxon_context *ctx) -{ - lockdep_assert_held(&il->mutex); - - return il4965_static_wepkey_cmd(il, ctx, false); -} - -int -il4965_remove_default_wep_key(struct il_priv *il, struct il_rxon_context *ctx, - struct ieee80211_key_conf *keyconf) -{ - int ret; - - lockdep_assert_held(&il->mutex); - - D_WEP("Removing default WEP key: idx=%d\n", keyconf->keyidx); - - memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0])); - if (il_is_rfkill(il)) { - D_WEP("Not sending C_WEPKEY command due to RFKILL.\n"); - /* but keys in device are clear anyway so return success */ - return 0; - } - ret = il4965_static_wepkey_cmd(il, ctx, 1); - D_WEP("Remove default WEP key: idx=%d ret=%d\n", keyconf->keyidx, ret); - - return ret; -} - -int -il4965_set_default_wep_key(struct il_priv *il, struct il_rxon_context *ctx, - struct ieee80211_key_conf *keyconf) -{ - int ret; - - lockdep_assert_held(&il->mutex); - - if (keyconf->keylen != WEP_KEY_LEN_128 && - keyconf->keylen != WEP_KEY_LEN_64) { - D_WEP("Bad WEP key length %d\n", keyconf->keylen); - return -EINVAL; - } - - keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV; - keyconf->hw_key_idx = HW_KEY_DEFAULT; - il->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher; - - ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen; - memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key, - keyconf->keylen); - - ret = il4965_static_wepkey_cmd(il, ctx, false); - D_WEP("Set default WEP key: len=%d idx=%d ret=%d\n", keyconf->keylen, - keyconf->keyidx, ret); - - return ret; -} - -static int -il4965_set_wep_dynamic_key_info(struct il_priv *il, struct il_rxon_context *ctx, - struct ieee80211_key_conf *keyconf, u8 sta_id) -{ - unsigned long flags; - __le16 key_flags = 0; - struct il_addsta_cmd sta_cmd; - - lockdep_assert_held(&il->mutex); - - keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV; - - key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK); - key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); - key_flags &= ~STA_KEY_FLG_INVALID; - - if (keyconf->keylen == WEP_KEY_LEN_128) - key_flags |= STA_KEY_FLG_KEY_SIZE_MSK; - - if (sta_id == ctx->bcast_sta_id) - key_flags |= STA_KEY_MULTICAST_MSK; - - spin_lock_irqsave(&il->sta_lock, flags); - - il->stations[sta_id].keyinfo.cipher = keyconf->cipher; - il->stations[sta_id].keyinfo.keylen = keyconf->keylen; - il->stations[sta_id].keyinfo.keyidx = keyconf->keyidx; - - memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen); - - memcpy(&il->stations[sta_id].sta.key.key[3], keyconf->key, - keyconf->keylen); - - if ((il->stations[sta_id].sta.key. - key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC) - il->stations[sta_id].sta.key.key_offset = - il_get_free_ucode_key_idx(il); - /* else, we are overriding an existing key => no need to allocated room - * in uCode. */ - - WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, - "no space for a new key"); - - il->stations[sta_id].sta.key.key_flags = key_flags; - il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; - il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; - - memcpy(&sta_cmd, &il->stations[sta_id].sta, - sizeof(struct il_addsta_cmd)); - spin_unlock_irqrestore(&il->sta_lock, flags); - - return il_send_add_sta(il, &sta_cmd, CMD_SYNC); -} - -static int -il4965_set_ccmp_dynamic_key_info(struct il_priv *il, - struct il_rxon_context *ctx, - struct ieee80211_key_conf *keyconf, u8 sta_id) -{ - unsigned long flags; - __le16 key_flags = 0; - struct il_addsta_cmd sta_cmd; - - lockdep_assert_held(&il->mutex); - - key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK); - key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); - key_flags &= ~STA_KEY_FLG_INVALID; - - if (sta_id == ctx->bcast_sta_id) - key_flags |= STA_KEY_MULTICAST_MSK; - - keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; - - spin_lock_irqsave(&il->sta_lock, flags); - il->stations[sta_id].keyinfo.cipher = keyconf->cipher; - il->stations[sta_id].keyinfo.keylen = keyconf->keylen; - - memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen); - - memcpy(il->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen); - - if ((il->stations[sta_id].sta.key. - key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC) - il->stations[sta_id].sta.key.key_offset = - il_get_free_ucode_key_idx(il); - /* else, we are overriding an existing key => no need to allocated room - * in uCode. */ - - WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, - "no space for a new key"); - - il->stations[sta_id].sta.key.key_flags = key_flags; - il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; - il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; - - memcpy(&sta_cmd, &il->stations[sta_id].sta, - sizeof(struct il_addsta_cmd)); - spin_unlock_irqrestore(&il->sta_lock, flags); - - return il_send_add_sta(il, &sta_cmd, CMD_SYNC); -} - -static int -il4965_set_tkip_dynamic_key_info(struct il_priv *il, - struct il_rxon_context *ctx, - struct ieee80211_key_conf *keyconf, u8 sta_id) -{ - unsigned long flags; - int ret = 0; - __le16 key_flags = 0; - - key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK); - key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); - key_flags &= ~STA_KEY_FLG_INVALID; - - if (sta_id == ctx->bcast_sta_id) - key_flags |= STA_KEY_MULTICAST_MSK; - - keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; - keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; - - spin_lock_irqsave(&il->sta_lock, flags); - - il->stations[sta_id].keyinfo.cipher = keyconf->cipher; - il->stations[sta_id].keyinfo.keylen = 16; - - if ((il->stations[sta_id].sta.key. - key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC) - il->stations[sta_id].sta.key.key_offset = - il_get_free_ucode_key_idx(il); - /* else, we are overriding an existing key => no need to allocated room - * in uCode. */ - - WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, - "no space for a new key"); - - il->stations[sta_id].sta.key.key_flags = key_flags; - - /* This copy is acutally not needed: we get the key with each TX */ - memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, 16); - - memcpy(il->stations[sta_id].sta.key.key, keyconf->key, 16); - - spin_unlock_irqrestore(&il->sta_lock, flags); - - return ret; -} - -void -il4965_update_tkip_key(struct il_priv *il, struct il_rxon_context *ctx, - struct ieee80211_key_conf *keyconf, - struct ieee80211_sta *sta, u32 iv32, u16 * phase1key) -{ - u8 sta_id; - unsigned long flags; - int i; - - if (il_scan_cancel(il)) { - /* cancel scan failed, just live w/ bad key and rely - briefly on SW decryption */ - return; - } - - sta_id = il_sta_id_or_broadcast(il, ctx, sta); - if (sta_id == IL_INVALID_STATION) - return; - - spin_lock_irqsave(&il->sta_lock, flags); - - il->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32; - - for (i = 0; i < 5; i++) - il->stations[sta_id].sta.key.tkip_rx_ttak[i] = - cpu_to_le16(phase1key[i]); - - il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; - il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; - - il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC); - - spin_unlock_irqrestore(&il->sta_lock, flags); - -} - -int -il4965_remove_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx, - struct ieee80211_key_conf *keyconf, u8 sta_id) -{ - unsigned long flags; - u16 key_flags; - u8 keyidx; - struct il_addsta_cmd sta_cmd; - - lockdep_assert_held(&il->mutex); - - ctx->key_mapping_keys--; - - spin_lock_irqsave(&il->sta_lock, flags); - key_flags = le16_to_cpu(il->stations[sta_id].sta.key.key_flags); - keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3; - - D_WEP("Remove dynamic key: idx=%d sta=%d\n", keyconf->keyidx, sta_id); - - if (keyconf->keyidx != keyidx) { - /* We need to remove a key with idx different that the one - * in the uCode. This means that the key we need to remove has - * been replaced by another one with different idx. - * Don't do anything and return ok - */ - spin_unlock_irqrestore(&il->sta_lock, flags); - return 0; - } - - if (il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) { - IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx, - key_flags); - spin_unlock_irqrestore(&il->sta_lock, flags); - return 0; - } - - if (!test_and_clear_bit - (il->stations[sta_id].sta.key.key_offset, &il->ucode_key_table)) - IL_ERR("idx %d not used in uCode key table.\n", - il->stations[sta_id].sta.key.key_offset); - memset(&il->stations[sta_id].keyinfo, 0, sizeof(struct il_hw_key)); - memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo)); - il->stations[sta_id].sta.key.key_flags = - STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID; - il->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET; - il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; - il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; - - if (il_is_rfkill(il)) { - D_WEP - ("Not sending C_ADD_STA command because RFKILL enabled.\n"); - spin_unlock_irqrestore(&il->sta_lock, flags); - return 0; - } - memcpy(&sta_cmd, &il->stations[sta_id].sta, - sizeof(struct il_addsta_cmd)); - spin_unlock_irqrestore(&il->sta_lock, flags); - - return il_send_add_sta(il, &sta_cmd, CMD_SYNC); -} - -int -il4965_set_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx, - struct ieee80211_key_conf *keyconf, u8 sta_id) -{ - int ret; - - lockdep_assert_held(&il->mutex); - - ctx->key_mapping_keys++; - keyconf->hw_key_idx = HW_KEY_DYNAMIC; - - switch (keyconf->cipher) { - case WLAN_CIPHER_SUITE_CCMP: - ret = - il4965_set_ccmp_dynamic_key_info(il, ctx, keyconf, sta_id); - break; - case WLAN_CIPHER_SUITE_TKIP: - ret = - il4965_set_tkip_dynamic_key_info(il, ctx, keyconf, sta_id); - break; - case WLAN_CIPHER_SUITE_WEP40: - case WLAN_CIPHER_SUITE_WEP104: - ret = il4965_set_wep_dynamic_key_info(il, ctx, keyconf, sta_id); - break; - default: - IL_ERR("Unknown alg: %s cipher = %x\n", __func__, - keyconf->cipher); - ret = -EINVAL; - } - - D_WEP("Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n", - keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret); - - return ret; -} - -/** - * il4965_alloc_bcast_station - add broadcast station into driver's station table. - * - * This adds the broadcast station into the driver's station table - * and marks it driver active, so that it will be restored to the - * device at the next best time. - */ -int -il4965_alloc_bcast_station(struct il_priv *il, struct il_rxon_context *ctx) -{ - struct il_link_quality_cmd *link_cmd; - unsigned long flags; - u8 sta_id; - - spin_lock_irqsave(&il->sta_lock, flags); - sta_id = il_prep_station(il, ctx, il_bcast_addr, false, NULL); - if (sta_id == IL_INVALID_STATION) { - IL_ERR("Unable to prepare broadcast station\n"); - spin_unlock_irqrestore(&il->sta_lock, flags); - - return -EINVAL; - } - - il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE; - il->stations[sta_id].used |= IL_STA_BCAST; - spin_unlock_irqrestore(&il->sta_lock, flags); - - link_cmd = il4965_sta_alloc_lq(il, sta_id); - if (!link_cmd) { - IL_ERR - ("Unable to initialize rate scaling for bcast station.\n"); - return -ENOMEM; - } - - spin_lock_irqsave(&il->sta_lock, flags); - il->stations[sta_id].lq = link_cmd; - spin_unlock_irqrestore(&il->sta_lock, flags); - - return 0; -} - -/** - * il4965_update_bcast_station - update broadcast station's LQ command - * - * Only used by iwl4965. Placed here to have all bcast station management - * code together. - */ -static int -il4965_update_bcast_station(struct il_priv *il, struct il_rxon_context *ctx) -{ - unsigned long flags; - struct il_link_quality_cmd *link_cmd; - u8 sta_id = ctx->bcast_sta_id; - - link_cmd = il4965_sta_alloc_lq(il, sta_id); - if (!link_cmd) { - IL_ERR("Unable to initialize rate scaling for bcast sta.\n"); - return -ENOMEM; - } - - spin_lock_irqsave(&il->sta_lock, flags); - if (il->stations[sta_id].lq) - kfree(il->stations[sta_id].lq); - else - D_INFO("Bcast sta rate scaling has not been initialized.\n"); - il->stations[sta_id].lq = link_cmd; - spin_unlock_irqrestore(&il->sta_lock, flags); - - return 0; -} - -int -il4965_update_bcast_stations(struct il_priv *il) -{ - return il4965_update_bcast_station(il, &il->ctx); -} - -/** - * il4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table - */ -int -il4965_sta_tx_modify_enable_tid(struct il_priv *il, int sta_id, int tid) -{ - unsigned long flags; - struct il_addsta_cmd sta_cmd; - - lockdep_assert_held(&il->mutex); - - /* Remove "disable" flag, to enable Tx for this TID */ - spin_lock_irqsave(&il->sta_lock, flags); - il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX; - il->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid)); - il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; - memcpy(&sta_cmd, &il->stations[sta_id].sta, - sizeof(struct il_addsta_cmd)); - spin_unlock_irqrestore(&il->sta_lock, flags); - - return il_send_add_sta(il, &sta_cmd, CMD_SYNC); -} - -int -il4965_sta_rx_agg_start(struct il_priv *il, struct ieee80211_sta *sta, int tid, - u16 ssn) -{ - unsigned long flags; - int sta_id; - struct il_addsta_cmd sta_cmd; - - lockdep_assert_held(&il->mutex); - - sta_id = il_sta_id(sta); - if (sta_id == IL_INVALID_STATION) - return -ENXIO; - - spin_lock_irqsave(&il->sta_lock, flags); - il->stations[sta_id].sta.station_flags_msk = 0; - il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK; - il->stations[sta_id].sta.add_immediate_ba_tid = (u8) tid; - il->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn); - il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; - memcpy(&sta_cmd, &il->stations[sta_id].sta, - sizeof(struct il_addsta_cmd)); - spin_unlock_irqrestore(&il->sta_lock, flags); - - return il_send_add_sta(il, &sta_cmd, CMD_SYNC); -} - -int -il4965_sta_rx_agg_stop(struct il_priv *il, struct ieee80211_sta *sta, int tid) -{ - unsigned long flags; - int sta_id; - struct il_addsta_cmd sta_cmd; - - lockdep_assert_held(&il->mutex); - - sta_id = il_sta_id(sta); - if (sta_id == IL_INVALID_STATION) { - IL_ERR("Invalid station for AGG tid %d\n", tid); - return -ENXIO; - } - - spin_lock_irqsave(&il->sta_lock, flags); - il->stations[sta_id].sta.station_flags_msk = 0; - il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK; - il->stations[sta_id].sta.remove_immediate_ba_tid = (u8) tid; - il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; - memcpy(&sta_cmd, &il->stations[sta_id].sta, - sizeof(struct il_addsta_cmd)); - spin_unlock_irqrestore(&il->sta_lock, flags); - - return il_send_add_sta(il, &sta_cmd, CMD_SYNC); -} - -void -il4965_sta_modify_sleep_tx_count(struct il_priv *il, int sta_id, int cnt) -{ - unsigned long flags; - - spin_lock_irqsave(&il->sta_lock, flags); - il->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK; - il->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK; - il->stations[sta_id].sta.sta.modify_mask = - STA_MODIFY_SLEEP_TX_COUNT_MSK; - il->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt); - il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; - il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC); - spin_unlock_irqrestore(&il->sta_lock, flags); - -} - -void -il4965_update_chain_flags(struct il_priv *il) -{ - if (il->cfg->ops->hcmd->set_rxon_chain) { - il->cfg->ops->hcmd->set_rxon_chain(il, &il->ctx); - if (il->ctx.active.rx_chain != il->ctx.staging.rx_chain) - il_commit_rxon(il, &il->ctx); - } -} - -static void -il4965_clear_free_frames(struct il_priv *il) -{ - struct list_head *element; - - D_INFO("%d frames on pre-allocated heap on clear.\n", il->frames_count); - - while (!list_empty(&il->free_frames)) { - element = il->free_frames.next; - list_del(element); - kfree(list_entry(element, struct il_frame, list)); - il->frames_count--; - } - - if (il->frames_count) { - IL_WARN("%d frames still in use. Did we lose one?\n", - il->frames_count); - il->frames_count = 0; - } -} - -static struct il_frame * -il4965_get_free_frame(struct il_priv *il) -{ - struct il_frame *frame; - struct list_head *element; - if (list_empty(&il->free_frames)) { - frame = kzalloc(sizeof(*frame), GFP_KERNEL); - if (!frame) { - IL_ERR("Could not allocate frame!\n"); - return NULL; - } - - il->frames_count++; - return frame; - } - - element = il->free_frames.next; - list_del(element); - return list_entry(element, struct il_frame, list); -} - -static void -il4965_free_frame(struct il_priv *il, struct il_frame *frame) -{ - memset(frame, 0, sizeof(*frame)); - list_add(&frame->list, &il->free_frames); -} - -static u32 -il4965_fill_beacon_frame(struct il_priv *il, struct ieee80211_hdr *hdr, - int left) -{ - lockdep_assert_held(&il->mutex); - - if (!il->beacon_skb) - return 0; - - if (il->beacon_skb->len > left) - return 0; - - memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len); - - return il->beacon_skb->len; -} - -/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */ -static void -il4965_set_beacon_tim(struct il_priv *il, - struct il_tx_beacon_cmd *tx_beacon_cmd, u8 * beacon, - u32 frame_size) -{ - u16 tim_idx; - struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon; - - /* - * The idx is relative to frame start but we start looking at the - * variable-length part of the beacon. - */ - tim_idx = mgmt->u.beacon.variable - beacon; - - /* Parse variable-length elements of beacon to find WLAN_EID_TIM */ - while ((tim_idx < (frame_size - 2)) && - (beacon[tim_idx] != WLAN_EID_TIM)) - tim_idx += beacon[tim_idx + 1] + 2; - - /* If TIM field was found, set variables */ - if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) { - tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx); - tx_beacon_cmd->tim_size = beacon[tim_idx + 1]; - } else - IL_WARN("Unable to find TIM Element in beacon\n"); -} - -static unsigned int -il4965_hw_get_beacon_cmd(struct il_priv *il, struct il_frame *frame) -{ - struct il_tx_beacon_cmd *tx_beacon_cmd; - u32 frame_size; - u32 rate_flags; - u32 rate; - /* - * We have to set up the TX command, the TX Beacon command, and the - * beacon contents. - */ - - lockdep_assert_held(&il->mutex); - - if (!il->beacon_ctx) { - IL_ERR("trying to build beacon w/o beacon context!\n"); - return 0; - } - - /* Initialize memory */ - tx_beacon_cmd = &frame->u.beacon; - memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd)); - - /* Set up TX beacon contents */ - frame_size = - il4965_fill_beacon_frame(il, tx_beacon_cmd->frame, - sizeof(frame->u) - sizeof(*tx_beacon_cmd)); - if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE)) - return 0; - if (!frame_size) - return 0; - - /* Set up TX command fields */ - tx_beacon_cmd->tx.len = cpu_to_le16((u16) frame_size); - tx_beacon_cmd->tx.sta_id = il->beacon_ctx->bcast_sta_id; - tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; - tx_beacon_cmd->tx.tx_flags = - TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK | - TX_CMD_FLG_STA_RATE_MSK; - - /* Set up TX beacon command fields */ - il4965_set_beacon_tim(il, tx_beacon_cmd, (u8 *) tx_beacon_cmd->frame, - frame_size); - - /* Set up packet rate and flags */ - rate = il_get_lowest_plcp(il, il->beacon_ctx); - il4965_toggle_tx_ant(il, &il->mgmt_tx_ant, il->hw_params.valid_tx_ant); - rate_flags = BIT(il->mgmt_tx_ant) << RATE_MCS_ANT_POS; - if ((rate >= IL_FIRST_CCK_RATE) && (rate <= IL_LAST_CCK_RATE)) - rate_flags |= RATE_MCS_CCK_MSK; - tx_beacon_cmd->tx.rate_n_flags = cpu_to_le32(rate | rate_flags); - - return sizeof(*tx_beacon_cmd) + frame_size; -} - -int -il4965_send_beacon_cmd(struct il_priv *il) -{ - struct il_frame *frame; - unsigned int frame_size; - int rc; - - frame = il4965_get_free_frame(il); - if (!frame) { - IL_ERR("Could not obtain free frame buffer for beacon " - "command.\n"); - return -ENOMEM; - } - - frame_size = il4965_hw_get_beacon_cmd(il, frame); - if (!frame_size) { - IL_ERR("Error configuring the beacon command\n"); - il4965_free_frame(il, frame); - return -EINVAL; - } - - rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size, &frame->u.cmd[0]); - - il4965_free_frame(il, frame); - - return rc; -} - -static inline dma_addr_t -il4965_tfd_tb_get_addr(struct il_tfd *tfd, u8 idx) -{ - struct il_tfd_tb *tb = &tfd->tbs[idx]; - - dma_addr_t addr = get_unaligned_le32(&tb->lo); - if (sizeof(dma_addr_t) > sizeof(u32)) - addr |= - ((dma_addr_t) (le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << - 16; - - return addr; -} - -static inline u16 -il4965_tfd_tb_get_len(struct il_tfd *tfd, u8 idx) -{ - struct il_tfd_tb *tb = &tfd->tbs[idx]; - - return le16_to_cpu(tb->hi_n_len) >> 4; -} - -static inline void -il4965_tfd_set_tb(struct il_tfd *tfd, u8 idx, dma_addr_t addr, u16 len) -{ - struct il_tfd_tb *tb = &tfd->tbs[idx]; - u16 hi_n_len = len << 4; - - put_unaligned_le32(addr, &tb->lo); - if (sizeof(dma_addr_t) > sizeof(u32)) - hi_n_len |= ((addr >> 16) >> 16) & 0xF; - - tb->hi_n_len = cpu_to_le16(hi_n_len); - - tfd->num_tbs = idx + 1; -} - -static inline u8 -il4965_tfd_get_num_tbs(struct il_tfd *tfd) -{ - return tfd->num_tbs & 0x1f; -} - -/** - * il4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] - * @il - driver ilate data - * @txq - tx queue - * - * Does NOT advance any TFD circular buffer read/write idxes - * Does NOT free the TFD itself (which is within circular buffer) - */ -void -il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq) -{ - struct il_tfd *tfd_tmp = (struct il_tfd *)txq->tfds; - struct il_tfd *tfd; - struct pci_dev *dev = il->pci_dev; - int idx = txq->q.read_ptr; - int i; - int num_tbs; - - tfd = &tfd_tmp[idx]; - - /* Sanity check on number of chunks */ - num_tbs = il4965_tfd_get_num_tbs(tfd); - - if (num_tbs >= IL_NUM_OF_TBS) { - IL_ERR("Too many chunks: %i\n", num_tbs); - /* @todo issue fatal error, it is quite serious situation */ - return; - } - - /* Unmap tx_cmd */ - if (num_tbs) - pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping), - dma_unmap_len(&txq->meta[idx], len), - PCI_DMA_BIDIRECTIONAL); - - /* Unmap chunks, if any. */ - for (i = 1; i < num_tbs; i++) - pci_unmap_single(dev, il4965_tfd_tb_get_addr(tfd, i), - il4965_tfd_tb_get_len(tfd, i), - PCI_DMA_TODEVICE); - - /* free SKB */ - if (txq->txb) { - struct sk_buff *skb; - - skb = txq->txb[txq->q.read_ptr].skb; - - /* can be called from irqs-disabled context */ - if (skb) { - dev_kfree_skb_any(skb); - txq->txb[txq->q.read_ptr].skb = NULL; - } - } -} - -int -il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq, - dma_addr_t addr, u16 len, u8 reset, u8 pad) -{ - struct il_queue *q; - struct il_tfd *tfd, *tfd_tmp; - u32 num_tbs; - - q = &txq->q; - tfd_tmp = (struct il_tfd *)txq->tfds; - tfd = &tfd_tmp[q->write_ptr]; - - if (reset) - memset(tfd, 0, sizeof(*tfd)); - - num_tbs = il4965_tfd_get_num_tbs(tfd); - - /* Each TFD can point to a maximum 20 Tx buffers */ - if (num_tbs >= IL_NUM_OF_TBS) { - IL_ERR("Error can not send more than %d chunks\n", - IL_NUM_OF_TBS); - return -EINVAL; - } - - BUG_ON(addr & ~DMA_BIT_MASK(36)); - if (unlikely(addr & ~IL_TX_DMA_MASK)) - IL_ERR("Unaligned address = %llx\n", (unsigned long long)addr); - - il4965_tfd_set_tb(tfd, num_tbs, addr, len); - - return 0; -} - -/* - * Tell nic where to find circular buffer of Tx Frame Descriptors for - * given Tx queue, and enable the DMA channel used for that queue. - * - * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA - * channels supported in hardware. - */ -int -il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq) -{ - int txq_id = txq->q.id; - - /* Circular buffer (TFD queue in DRAM) physical base address */ - il_wr(il, FH49_MEM_CBBC_QUEUE(txq_id), txq->q.dma_addr >> 8); - - return 0; -} - -/****************************************************************************** - * - * Generic RX handler implementations - * - ******************************************************************************/ -static void -il4965_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb) -{ - struct il_rx_pkt *pkt = rxb_addr(rxb); - struct il_alive_resp *palive; - struct delayed_work *pwork; - - palive = &pkt->u.alive_frame; - - D_INFO("Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n", - palive->is_valid, palive->ver_type, palive->ver_subtype); - - if (palive->ver_subtype == INITIALIZE_SUBTYPE) { - D_INFO("Initialization Alive received.\n"); - memcpy(&il->card_alive_init, &pkt->u.alive_frame, - sizeof(struct il_init_alive_resp)); - pwork = &il->init_alive_start; - } else { - D_INFO("Runtime Alive received.\n"); - memcpy(&il->card_alive, &pkt->u.alive_frame, - sizeof(struct il_alive_resp)); - pwork = &il->alive_start; - } - - /* We delay the ALIVE response by 5ms to - * give the HW RF Kill time to activate... */ - if (palive->is_valid == UCODE_VALID_OK) - queue_delayed_work(il->workqueue, pwork, msecs_to_jiffies(5)); - else - IL_WARN("uCode did not respond OK.\n"); -} - -/** - * il4965_bg_stats_periodic - Timer callback to queue stats - * - * This callback is provided in order to send a stats request. - * - * This timer function is continually reset to execute within - * REG_RECALIB_PERIOD seconds since the last N_STATS - * was received. We need to ensure we receive the stats in order - * to update the temperature used for calibrating the TXPOWER. - */ -static void -il4965_bg_stats_periodic(unsigned long data) -{ - struct il_priv *il = (struct il_priv *)data; - - if (test_bit(S_EXIT_PENDING, &il->status)) - return; - - /* dont send host command if rf-kill is on */ - if (!il_is_ready_rf(il)) - return; - - il_send_stats_request(il, CMD_ASYNC, false); -} - -static void -il4965_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb) -{ - struct il_rx_pkt *pkt = rxb_addr(rxb); - struct il4965_beacon_notif *beacon = - (struct il4965_beacon_notif *)pkt->u.raw; -#ifdef CONFIG_IWLEGACY_DEBUG - u8 rate = il4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); - - D_RX("beacon status %x retries %d iss %d tsf:0x%.8x%.8x rate %d\n", - le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK, - beacon->beacon_notify_hdr.failure_frame, - le32_to_cpu(beacon->ibss_mgr_status), - le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate); -#endif - il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status); -} - -static void -il4965_perform_ct_kill_task(struct il_priv *il) -{ - unsigned long flags; - - D_POWER("Stop all queues\n"); - - if (il->mac80211_registered) - ieee80211_stop_queues(il->hw); - - _il_wr(il, CSR_UCODE_DRV_GP1_SET, - CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); - _il_rd(il, CSR_UCODE_DRV_GP1); - - spin_lock_irqsave(&il->reg_lock, flags); - if (!_il_grab_nic_access(il)) - _il_release_nic_access(il); - spin_unlock_irqrestore(&il->reg_lock, flags); -} - -/* Handle notification from uCode that card's power state is changing - * due to software, hardware, or critical temperature RFKILL */ -static void -il4965_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb) -{ - struct il_rx_pkt *pkt = rxb_addr(rxb); - u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); - unsigned long status = il->status; - - D_RF_KILL("Card state received: HW:%s SW:%s CT:%s\n", - (flags & HW_CARD_DISABLED) ? "Kill" : "On", - (flags & SW_CARD_DISABLED) ? "Kill" : "On", - (flags & CT_CARD_DISABLED) ? "Reached" : "Not reached"); - - if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | CT_CARD_DISABLED)) { - - _il_wr(il, CSR_UCODE_DRV_GP1_SET, - CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); - - il_wr(il, HBUS_TARG_MBX_C, HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); - - if (!(flags & RXON_CARD_DISABLED)) { - _il_wr(il, CSR_UCODE_DRV_GP1_CLR, - CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); - il_wr(il, HBUS_TARG_MBX_C, - HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); - } - } - - if (flags & CT_CARD_DISABLED) - il4965_perform_ct_kill_task(il); - - if (flags & HW_CARD_DISABLED) - set_bit(S_RF_KILL_HW, &il->status); - else - clear_bit(S_RF_KILL_HW, &il->status); - - if (!(flags & RXON_CARD_DISABLED)) - il_scan_cancel(il); - - if ((test_bit(S_RF_KILL_HW, &status) != - test_bit(S_RF_KILL_HW, &il->status))) - wiphy_rfkill_set_hw_state(il->hw->wiphy, - test_bit(S_RF_KILL_HW, &il->status)); - else - wake_up(&il->wait_command_queue); -} - -/** - * il4965_setup_handlers - Initialize Rx handler callbacks - * - * Setup the RX handlers for each of the reply types sent from the uCode - * to the host. - * - * This function chains into the hardware specific files for them to setup - * any hardware specific handlers as well. - */ -static void -il4965_setup_handlers(struct il_priv *il) -{ - il->handlers[N_ALIVE] = il4965_hdl_alive; - il->handlers[N_ERROR] = il_hdl_error; - il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa; - il->handlers[N_SPECTRUM_MEASUREMENT] = il_hdl_spectrum_measurement; - il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep; - il->handlers[N_PM_DEBUG_STATS] = il_hdl_pm_debug_stats; - il->handlers[N_BEACON] = il4965_hdl_beacon; - - /* - * The same handler is used for both the REPLY to a discrete - * stats request from the host as well as for the periodic - * stats notifications (after received beacons) from the uCode. - */ - il->handlers[C_STATS] = il4965_hdl_c_stats; - il->handlers[N_STATS] = il4965_hdl_stats; - - il_setup_rx_scan_handlers(il); - - /* status change handler */ - il->handlers[N_CARD_STATE] = il4965_hdl_card_state; - - il->handlers[N_MISSED_BEACONS] = il4965_hdl_missed_beacon; - /* Rx handlers */ - il->handlers[N_RX_PHY] = il4965_hdl_rx_phy; - il->handlers[N_RX_MPDU] = il4965_hdl_rx; - /* block ack */ - il->handlers[N_COMPRESSED_BA] = il4965_hdl_compressed_ba; - /* Set up hardware specific Rx handlers */ - il->cfg->ops->lib->handler_setup(il); -} - -/** - * il4965_rx_handle - Main entry function for receiving responses from uCode - * - * Uses the il->handlers callback function array to invoke - * the appropriate handlers, including command responses, - * frame-received notifications, and other notifications. - */ -void -il4965_rx_handle(struct il_priv *il) -{ - struct il_rx_buf *rxb; - struct il_rx_pkt *pkt; - struct il_rx_queue *rxq = &il->rxq; - u32 r, i; - int reclaim; - unsigned long flags; - u8 fill_rx = 0; - u32 count = 8; - int total_empty; - - /* uCode's read idx (stored in shared DRAM) indicates the last Rx - * buffer that the driver may process (last buffer filled by ucode). */ - r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF; - i = rxq->read; - - /* Rx interrupt, but nothing sent from uCode */ - if (i == r) - D_RX("r = %d, i = %d\n", r, i); - - /* calculate total frames need to be restock after handling RX */ - total_empty = r - rxq->write_actual; - if (total_empty < 0) - total_empty += RX_QUEUE_SIZE; - - if (total_empty > (RX_QUEUE_SIZE / 2)) - fill_rx = 1; - - while (i != r) { - int len; - - rxb = rxq->queue[i]; - - /* If an RXB doesn't have a Rx queue slot associated with it, - * then a bug has been introduced in the queue refilling - * routines -- catch it here */ - BUG_ON(rxb == NULL); - - rxq->queue[i] = NULL; - - pci_unmap_page(il->pci_dev, rxb->page_dma, - PAGE_SIZE << il->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); - pkt = rxb_addr(rxb); - - len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK; - len += sizeof(u32); /* account for status word */ - - /* Reclaim a command buffer only if this packet is a response - * to a (driver-originated) command. - * If the packet (e.g. Rx frame) originated from uCode, - * there is no command buffer to reclaim. - * Ucode should set SEQ_RX_FRAME bit if ucode-originated, - * but apparently a few don't get set; catch them here. */ - reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) && - (pkt->hdr.cmd != N_RX_PHY) && (pkt->hdr.cmd != N_RX) && - (pkt->hdr.cmd != N_RX_MPDU) && - (pkt->hdr.cmd != N_COMPRESSED_BA) && - (pkt->hdr.cmd != N_STATS) && (pkt->hdr.cmd != C_TX); - - /* Based on type of command response or notification, - * handle those that need handling via function in - * handlers table. See il4965_setup_handlers() */ - if (il->handlers[pkt->hdr.cmd]) { - D_RX("r = %d, i = %d, %s, 0x%02x\n", r, i, - il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); - il->isr_stats.handlers[pkt->hdr.cmd]++; - il->handlers[pkt->hdr.cmd] (il, rxb); - } else { - /* No handling needed */ - D_RX("r %d i %d No handler needed for %s, 0x%02x\n", r, - i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); - } - - /* - * XXX: After here, we should always check rxb->page - * against NULL before touching it or its virtual - * memory (pkt). Because some handler might have - * already taken or freed the pages. - */ - - if (reclaim) { - /* Invoke any callbacks, transfer the buffer to caller, - * and fire off the (possibly) blocking il_send_cmd() - * as we reclaim the driver command queue */ - if (rxb->page) - il_tx_cmd_complete(il, rxb); - else - IL_WARN("Claim null rxb?\n"); - } - - /* Reuse the page if possible. For notification packets and - * SKBs that fail to Rx correctly, add them back into the - * rx_free list for reuse later. */ - spin_lock_irqsave(&rxq->lock, flags); - if (rxb->page != NULL) { - rxb->page_dma = - pci_map_page(il->pci_dev, rxb->page, 0, - PAGE_SIZE << il->hw_params. - rx_page_order, PCI_DMA_FROMDEVICE); - list_add_tail(&rxb->list, &rxq->rx_free); - rxq->free_count++; - } else - list_add_tail(&rxb->list, &rxq->rx_used); - - spin_unlock_irqrestore(&rxq->lock, flags); - - i = (i + 1) & RX_QUEUE_MASK; - /* If there are a lot of unused frames, - * restock the Rx queue so ucode wont assert. */ - if (fill_rx) { - count++; - if (count >= 8) { - rxq->read = i; - il4965_rx_replenish_now(il); - count = 0; - } - } - } - - /* Backtrack one entry */ - rxq->read = i; - if (fill_rx) - il4965_rx_replenish_now(il); - else - il4965_rx_queue_restock(il); -} - -/* call this function to flush any scheduled tasklet */ -static inline void -il4965_synchronize_irq(struct il_priv *il) -{ - /* wait to make sure we flush pending tasklet */ - synchronize_irq(il->pci_dev->irq); - tasklet_kill(&il->irq_tasklet); -} - -static void -il4965_irq_tasklet(struct il_priv *il) -{ - u32 inta, handled = 0; - u32 inta_fh; - unsigned long flags; - u32 i; -#ifdef CONFIG_IWLEGACY_DEBUG - u32 inta_mask; -#endif - - spin_lock_irqsave(&il->lock, flags); - - /* Ack/clear/reset pending uCode interrupts. - * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, - * and will clear only when CSR_FH_INT_STATUS gets cleared. */ - inta = _il_rd(il, CSR_INT); - _il_wr(il, CSR_INT, inta); - - /* Ack/clear/reset pending flow-handler (DMA) interrupts. - * Any new interrupts that happen after this, either while we're - * in this tasklet, or later, will show up in next ISR/tasklet. */ - inta_fh = _il_rd(il, CSR_FH_INT_STATUS); - _il_wr(il, CSR_FH_INT_STATUS, inta_fh); - -#ifdef CONFIG_IWLEGACY_DEBUG - if (il_get_debug_level(il) & IL_DL_ISR) { - /* just for debug */ - inta_mask = _il_rd(il, CSR_INT_MASK); - D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta, - inta_mask, inta_fh); - } -#endif - - spin_unlock_irqrestore(&il->lock, flags); - - /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not - * atomic, make sure that inta covers all the interrupts that - * we've discovered, even if FH interrupt came in just after - * reading CSR_INT. */ - if (inta_fh & CSR49_FH_INT_RX_MASK) - inta |= CSR_INT_BIT_FH_RX; - if (inta_fh & CSR49_FH_INT_TX_MASK) - inta |= CSR_INT_BIT_FH_TX; - - /* Now service all interrupt bits discovered above. */ - if (inta & CSR_INT_BIT_HW_ERR) { - IL_ERR("Hardware error detected. Restarting.\n"); - - /* Tell the device to stop sending interrupts */ - il_disable_interrupts(il); - - il->isr_stats.hw++; - il_irq_handle_error(il); - - handled |= CSR_INT_BIT_HW_ERR; - - return; - } -#ifdef CONFIG_IWLEGACY_DEBUG - if (il_get_debug_level(il) & (IL_DL_ISR)) { - /* NIC fires this, but we don't use it, redundant with WAKEUP */ - if (inta & CSR_INT_BIT_SCD) { - D_ISR("Scheduler finished to transmit " - "the frame/frames.\n"); - il->isr_stats.sch++; - } - - /* Alive notification via Rx interrupt will do the real work */ - if (inta & CSR_INT_BIT_ALIVE) { - D_ISR("Alive interrupt\n"); - il->isr_stats.alive++; - } - } -#endif - /* Safely ignore these bits for debug checks below */ - inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); - - /* HW RF KILL switch toggled */ - if (inta & CSR_INT_BIT_RF_KILL) { - int hw_rf_kill = 0; - if (! - (_il_rd(il, CSR_GP_CNTRL) & - CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) - hw_rf_kill = 1; - - IL_WARN("RF_KILL bit toggled to %s.\n", - hw_rf_kill ? "disable radio" : "enable radio"); - - il->isr_stats.rfkill++; - - /* driver only loads ucode once setting the interface up. - * the driver allows loading the ucode even if the radio - * is killed. Hence update the killswitch state here. The - * rfkill handler will care about restarting if needed. - */ - if (!test_bit(S_ALIVE, &il->status)) { - if (hw_rf_kill) - set_bit(S_RF_KILL_HW, &il->status); - else - clear_bit(S_RF_KILL_HW, &il->status); - wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill); - } - - handled |= CSR_INT_BIT_RF_KILL; - } - - /* Chip got too hot and stopped itself */ - if (inta & CSR_INT_BIT_CT_KILL) { - IL_ERR("Microcode CT kill error detected.\n"); - il->isr_stats.ctkill++; - handled |= CSR_INT_BIT_CT_KILL; - } - - /* Error detected by uCode */ - if (inta & CSR_INT_BIT_SW_ERR) { - IL_ERR("Microcode SW error detected. " " Restarting 0x%X.\n", - inta); - il->isr_stats.sw++; - il_irq_handle_error(il); - handled |= CSR_INT_BIT_SW_ERR; - } - - /* - * uCode wakes up after power-down sleep. - * Tell device about any new tx or host commands enqueued, - * and about any Rx buffers made available while asleep. - */ - if (inta & CSR_INT_BIT_WAKEUP) { - D_ISR("Wakeup interrupt\n"); - il_rx_queue_update_write_ptr(il, &il->rxq); - for (i = 0; i < il->hw_params.max_txq_num; i++) - il_txq_update_write_ptr(il, &il->txq[i]); - il->isr_stats.wakeup++; - handled |= CSR_INT_BIT_WAKEUP; - } - - /* All uCode command responses, including Tx command responses, - * Rx "responses" (frame-received notification), and other - * notifications from uCode come through here*/ - if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { - il4965_rx_handle(il); - il->isr_stats.rx++; - handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); - } - - /* This "Tx" DMA channel is used only for loading uCode */ - if (inta & CSR_INT_BIT_FH_TX) { - D_ISR("uCode load interrupt\n"); - il->isr_stats.tx++; - handled |= CSR_INT_BIT_FH_TX; - /* Wake up uCode load routine, now that load is complete */ - il->ucode_write_complete = 1; - wake_up(&il->wait_command_queue); - } - - if (inta & ~handled) { - IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled); - il->isr_stats.unhandled++; - } - - if (inta & ~(il->inta_mask)) { - IL_WARN("Disabled INTA bits 0x%08x were pending\n", - inta & ~il->inta_mask); - IL_WARN(" with FH49_INT = 0x%08x\n", inta_fh); - } - - /* Re-enable all interrupts */ - /* only Re-enable if disabled by irq */ - if (test_bit(S_INT_ENABLED, &il->status)) - il_enable_interrupts(il); - /* Re-enable RF_KILL if it occurred */ - else if (handled & CSR_INT_BIT_RF_KILL) - il_enable_rfkill_int(il); - -#ifdef CONFIG_IWLEGACY_DEBUG - if (il_get_debug_level(il) & (IL_DL_ISR)) { - inta = _il_rd(il, CSR_INT); - inta_mask = _il_rd(il, CSR_INT_MASK); - inta_fh = _il_rd(il, CSR_FH_INT_STATUS); - D_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, " - "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); - } -#endif -} - -/***************************************************************************** - * - * sysfs attributes - * - *****************************************************************************/ - -#ifdef CONFIG_IWLEGACY_DEBUG - -/* - * The following adds a new attribute to the sysfs representation - * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/) - * used for controlling the debug level. - * - * See the level definitions in iwl for details. - * - * The debug_level being managed using sysfs below is a per device debug - * level that is used instead of the global debug level if it (the per - * device debug level) is set. - */ -static ssize_t -il4965_show_debug_level(struct device *d, struct device_attribute *attr, - char *buf) -{ - struct il_priv *il = dev_get_drvdata(d); - return sprintf(buf, "0x%08X\n", il_get_debug_level(il)); -} - -static ssize_t -il4965_store_debug_level(struct device *d, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct il_priv *il = dev_get_drvdata(d); - unsigned long val; - int ret; - - ret = strict_strtoul(buf, 0, &val); - if (ret) - IL_ERR("%s is not in hex or decimal form.\n", buf); - else { - il->debug_level = val; - if (il_alloc_traffic_mem(il)) - IL_ERR("Not enough memory to generate traffic log\n"); - } - return strnlen(buf, count); -} - -static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, il4965_show_debug_level, - il4965_store_debug_level); - -#endif /* CONFIG_IWLEGACY_DEBUG */ - -static ssize_t -il4965_show_temperature(struct device *d, struct device_attribute *attr, - char *buf) -{ - struct il_priv *il = dev_get_drvdata(d); - - if (!il_is_alive(il)) - return -EAGAIN; - - return sprintf(buf, "%d\n", il->temperature); -} - -static DEVICE_ATTR(temperature, S_IRUGO, il4965_show_temperature, NULL); - -static ssize_t -il4965_show_tx_power(struct device *d, struct device_attribute *attr, char *buf) -{ - struct il_priv *il = dev_get_drvdata(d); - - if (!il_is_ready_rf(il)) - return sprintf(buf, "off\n"); - else - return sprintf(buf, "%d\n", il->tx_power_user_lmt); -} - -static ssize_t -il4965_store_tx_power(struct device *d, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct il_priv *il = dev_get_drvdata(d); - unsigned long val; - int ret; - - ret = strict_strtoul(buf, 10, &val); - if (ret) - IL_INFO("%s is not in decimal form.\n", buf); - else { - ret = il_set_tx_power(il, val, false); - if (ret) - IL_ERR("failed setting tx power (0x%d).\n", ret); - else - ret = count; - } - return ret; -} - -static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, il4965_show_tx_power, - il4965_store_tx_power); - -static struct attribute *il_sysfs_entries[] = { - &dev_attr_temperature.attr, - &dev_attr_tx_power.attr, -#ifdef CONFIG_IWLEGACY_DEBUG - &dev_attr_debug_level.attr, -#endif - NULL -}; - -static struct attribute_group il_attribute_group = { - .name = NULL, /* put in device directory */ - .attrs = il_sysfs_entries, -}; - -/****************************************************************************** - * - * uCode download functions - * - ******************************************************************************/ - -static void -il4965_dealloc_ucode_pci(struct il_priv *il) -{ - il_free_fw_desc(il->pci_dev, &il->ucode_code); - il_free_fw_desc(il->pci_dev, &il->ucode_data); - il_free_fw_desc(il->pci_dev, &il->ucode_data_backup); - il_free_fw_desc(il->pci_dev, &il->ucode_init); - il_free_fw_desc(il->pci_dev, &il->ucode_init_data); - il_free_fw_desc(il->pci_dev, &il->ucode_boot); -} - -static void -il4965_nic_start(struct il_priv *il) -{ - /* Remove all resets to allow NIC to operate */ - _il_wr(il, CSR_RESET, 0); -} - -static void il4965_ucode_callback(const struct firmware *ucode_raw, - void *context); -static int il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length); - -static int __must_check -il4965_request_firmware(struct il_priv *il, bool first) -{ - const char *name_pre = il->cfg->fw_name_pre; - char tag[8]; - - if (first) { - il->fw_idx = il->cfg->ucode_api_max; - sprintf(tag, "%d", il->fw_idx); - } else { - il->fw_idx--; - sprintf(tag, "%d", il->fw_idx); - } - - if (il->fw_idx < il->cfg->ucode_api_min) { - IL_ERR("no suitable firmware found!\n"); - return -ENOENT; - } - - sprintf(il->firmware_name, "%s%s%s", name_pre, tag, ".ucode"); - - D_INFO("attempting to load firmware '%s'\n", il->firmware_name); - - return request_firmware_nowait(THIS_MODULE, 1, il->firmware_name, - &il->pci_dev->dev, GFP_KERNEL, il, - il4965_ucode_callback); -} - -struct il4965_firmware_pieces { - const void *inst, *data, *init, *init_data, *boot; - size_t inst_size, data_size, init_size, init_data_size, boot_size; -}; - -static int -il4965_load_firmware(struct il_priv *il, const struct firmware *ucode_raw, - struct il4965_firmware_pieces *pieces) -{ - struct il_ucode_header *ucode = (void *)ucode_raw->data; - u32 api_ver, hdr_size; - const u8 *src; - - il->ucode_ver = le32_to_cpu(ucode->ver); - api_ver = IL_UCODE_API(il->ucode_ver); - - switch (api_ver) { - default: - case 0: - case 1: - case 2: - hdr_size = 24; - if (ucode_raw->size < hdr_size) { - IL_ERR("File size too small!\n"); - return -EINVAL; - } - pieces->inst_size = le32_to_cpu(ucode->v1.inst_size); - pieces->data_size = le32_to_cpu(ucode->v1.data_size); - pieces->init_size = le32_to_cpu(ucode->v1.init_size); - pieces->init_data_size = le32_to_cpu(ucode->v1.init_data_size); - pieces->boot_size = le32_to_cpu(ucode->v1.boot_size); - src = ucode->v1.data; - break; - } - - /* Verify size of file vs. image size info in file's header */ - if (ucode_raw->size != - hdr_size + pieces->inst_size + pieces->data_size + - pieces->init_size + pieces->init_data_size + pieces->boot_size) { - - IL_ERR("uCode file size %d does not match expected size\n", - (int)ucode_raw->size); - return -EINVAL; - } - - pieces->inst = src; - src += pieces->inst_size; - pieces->data = src; - src += pieces->data_size; - pieces->init = src; - src += pieces->init_size; - pieces->init_data = src; - src += pieces->init_data_size; - pieces->boot = src; - src += pieces->boot_size; - - return 0; -} - -/** - * il4965_ucode_callback - callback when firmware was loaded - * - * If loaded successfully, copies the firmware into buffers - * for the card to fetch (via DMA). - */ -static void -il4965_ucode_callback(const struct firmware *ucode_raw, void *context) -{ - struct il_priv *il = context; - struct il_ucode_header *ucode; - int err; - struct il4965_firmware_pieces pieces; - const unsigned int api_max = il->cfg->ucode_api_max; - const unsigned int api_min = il->cfg->ucode_api_min; - u32 api_ver; - - u32 max_probe_length = 200; - u32 standard_phy_calibration_size = - IL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE; - - memset(&pieces, 0, sizeof(pieces)); - - if (!ucode_raw) { - if (il->fw_idx <= il->cfg->ucode_api_max) - IL_ERR("request for firmware file '%s' failed.\n", - il->firmware_name); - goto try_again; - } - - D_INFO("Loaded firmware file '%s' (%zd bytes).\n", il->firmware_name, - ucode_raw->size); - - /* Make sure that we got at least the API version number */ - if (ucode_raw->size < 4) { - IL_ERR("File size way too small!\n"); - goto try_again; - } - - /* Data from ucode file: header followed by uCode images */ - ucode = (struct il_ucode_header *)ucode_raw->data; - - err = il4965_load_firmware(il, ucode_raw, &pieces); - - if (err) - goto try_again; - - api_ver = IL_UCODE_API(il->ucode_ver); - - /* - * api_ver should match the api version forming part of the - * firmware filename ... but we don't check for that and only rely - * on the API version read from firmware header from here on forward - */ - if (api_ver < api_min || api_ver > api_max) { - IL_ERR("Driver unable to support your firmware API. " - "Driver supports v%u, firmware is v%u.\n", api_max, - api_ver); - goto try_again; - } - - if (api_ver != api_max) - IL_ERR("Firmware has old API version. Expected v%u, " - "got v%u. New firmware can be obtained " - "from http://www.intellinuxwireless.org.\n", api_max, - api_ver); - - IL_INFO("loaded firmware version %u.%u.%u.%u\n", - IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver), - IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver)); - - snprintf(il->hw->wiphy->fw_version, sizeof(il->hw->wiphy->fw_version), - "%u.%u.%u.%u", IL_UCODE_MAJOR(il->ucode_ver), - IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver), - IL_UCODE_SERIAL(il->ucode_ver)); - - /* - * For any of the failures below (before allocating pci memory) - * we will try to load a version with a smaller API -- maybe the - * user just got a corrupted version of the latest API. - */ - - D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver); - D_INFO("f/w package hdr runtime inst size = %Zd\n", pieces.inst_size); - D_INFO("f/w package hdr runtime data size = %Zd\n", pieces.data_size); - D_INFO("f/w package hdr init inst size = %Zd\n", pieces.init_size); - D_INFO("f/w package hdr init data size = %Zd\n", pieces.init_data_size); - D_INFO("f/w package hdr boot inst size = %Zd\n", pieces.boot_size); - - /* Verify that uCode images will fit in card's SRAM */ - if (pieces.inst_size > il->hw_params.max_inst_size) { - IL_ERR("uCode instr len %Zd too large to fit in\n", - pieces.inst_size); - goto try_again; - } - - if (pieces.data_size > il->hw_params.max_data_size) { - IL_ERR("uCode data len %Zd too large to fit in\n", - pieces.data_size); - goto try_again; - } - - if (pieces.init_size > il->hw_params.max_inst_size) { - IL_ERR("uCode init instr len %Zd too large to fit in\n", - pieces.init_size); - goto try_again; - } - - if (pieces.init_data_size > il->hw_params.max_data_size) { - IL_ERR("uCode init data len %Zd too large to fit in\n", - pieces.init_data_size); - goto try_again; - } - - if (pieces.boot_size > il->hw_params.max_bsm_size) { - IL_ERR("uCode boot instr len %Zd too large to fit in\n", - pieces.boot_size); - goto try_again; - } - - /* Allocate ucode buffers for card's bus-master loading ... */ - - /* Runtime instructions and 2 copies of data: - * 1) unmodified from disk - * 2) backup cache for save/restore during power-downs */ - il->ucode_code.len = pieces.inst_size; - il_alloc_fw_desc(il->pci_dev, &il->ucode_code); - - il->ucode_data.len = pieces.data_size; - il_alloc_fw_desc(il->pci_dev, &il->ucode_data); - - il->ucode_data_backup.len = pieces.data_size; - il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup); - - if (!il->ucode_code.v_addr || !il->ucode_data.v_addr || - !il->ucode_data_backup.v_addr) - goto err_pci_alloc; - - /* Initialization instructions and data */ - if (pieces.init_size && pieces.init_data_size) { - il->ucode_init.len = pieces.init_size; - il_alloc_fw_desc(il->pci_dev, &il->ucode_init); - - il->ucode_init_data.len = pieces.init_data_size; - il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data); - - if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr) - goto err_pci_alloc; - } - - /* Bootstrap (instructions only, no data) */ - if (pieces.boot_size) { - il->ucode_boot.len = pieces.boot_size; - il_alloc_fw_desc(il->pci_dev, &il->ucode_boot); - - if (!il->ucode_boot.v_addr) - goto err_pci_alloc; - } - - /* Now that we can no longer fail, copy information */ - - il->sta_key_max_num = STA_KEY_MAX_NUM; - - /* Copy images into buffers for card's bus-master reads ... */ - - /* Runtime instructions (first block of data in file) */ - D_INFO("Copying (but not loading) uCode instr len %Zd\n", - pieces.inst_size); - memcpy(il->ucode_code.v_addr, pieces.inst, pieces.inst_size); - - D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n", - il->ucode_code.v_addr, (u32) il->ucode_code.p_addr); - - /* - * Runtime data - * NOTE: Copy into backup buffer will be done in il_up() - */ - D_INFO("Copying (but not loading) uCode data len %Zd\n", - pieces.data_size); - memcpy(il->ucode_data.v_addr, pieces.data, pieces.data_size); - memcpy(il->ucode_data_backup.v_addr, pieces.data, pieces.data_size); - - /* Initialization instructions */ - if (pieces.init_size) { - D_INFO("Copying (but not loading) init instr len %Zd\n", - pieces.init_size); - memcpy(il->ucode_init.v_addr, pieces.init, pieces.init_size); - } - - /* Initialization data */ - if (pieces.init_data_size) { - D_INFO("Copying (but not loading) init data len %Zd\n", - pieces.init_data_size); - memcpy(il->ucode_init_data.v_addr, pieces.init_data, - pieces.init_data_size); - } - - /* Bootstrap instructions */ - D_INFO("Copying (but not loading) boot instr len %Zd\n", - pieces.boot_size); - memcpy(il->ucode_boot.v_addr, pieces.boot, pieces.boot_size); - - /* - * figure out the offset of chain noise reset and gain commands - * base on the size of standard phy calibration commands table size - */ - il->_4965.phy_calib_chain_noise_reset_cmd = - standard_phy_calibration_size; - il->_4965.phy_calib_chain_noise_gain_cmd = - standard_phy_calibration_size + 1; - - /************************************************** - * This is still part of probe() in a sense... - * - * 9. Setup and register with mac80211 and debugfs - **************************************************/ - err = il4965_mac_setup_register(il, max_probe_length); - if (err) - goto out_unbind; - - err = il_dbgfs_register(il, DRV_NAME); - if (err) - IL_ERR("failed to create debugfs files. Ignoring error: %d\n", - err); - - err = sysfs_create_group(&il->pci_dev->dev.kobj, &il_attribute_group); - if (err) { - IL_ERR("failed to create sysfs device attributes\n"); - goto out_unbind; - } - - /* We have our copies now, allow OS release its copies */ - release_firmware(ucode_raw); - complete(&il->_4965.firmware_loading_complete); - return; - -try_again: - /* try next, if any */ - if (il4965_request_firmware(il, false)) - goto out_unbind; - release_firmware(ucode_raw); - return; - -err_pci_alloc: - IL_ERR("failed to allocate pci memory\n"); - il4965_dealloc_ucode_pci(il); -out_unbind: - complete(&il->_4965.firmware_loading_complete); - device_release_driver(&il->pci_dev->dev); - release_firmware(ucode_raw); -} - -static const char *const desc_lookup_text[] = { - "OK", - "FAIL", - "BAD_PARAM", - "BAD_CHECKSUM", - "NMI_INTERRUPT_WDG", - "SYSASSERT", - "FATAL_ERROR", - "BAD_COMMAND", - "HW_ERROR_TUNE_LOCK", - "HW_ERROR_TEMPERATURE", - "ILLEGAL_CHAN_FREQ", - "VCC_NOT_STBL", - "FH49_ERROR", - "NMI_INTERRUPT_HOST", - "NMI_INTERRUPT_ACTION_PT", - "NMI_INTERRUPT_UNKNOWN", - "UCODE_VERSION_MISMATCH", - "HW_ERROR_ABS_LOCK", - "HW_ERROR_CAL_LOCK_FAIL", - "NMI_INTERRUPT_INST_ACTION_PT", - "NMI_INTERRUPT_DATA_ACTION_PT", - "NMI_TRM_HW_ER", - "NMI_INTERRUPT_TRM", - "NMI_INTERRUPT_BREAK_POINT", - "DEBUG_0", - "DEBUG_1", - "DEBUG_2", - "DEBUG_3", -}; - -static struct { - char *name; - u8 num; -} advanced_lookup[] = { - { - "NMI_INTERRUPT_WDG", 0x34}, { - "SYSASSERT", 0x35}, { - "UCODE_VERSION_MISMATCH", 0x37}, { - "BAD_COMMAND", 0x38}, { - "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C}, { - "FATAL_ERROR", 0x3D}, { - "NMI_TRM_HW_ERR", 0x46}, { - "NMI_INTERRUPT_TRM", 0x4C}, { - "NMI_INTERRUPT_BREAK_POINT", 0x54}, { - "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C}, { - "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64}, { - "NMI_INTERRUPT_HOST", 0x66}, { - "NMI_INTERRUPT_ACTION_PT", 0x7C}, { - "NMI_INTERRUPT_UNKNOWN", 0x84}, { - "NMI_INTERRUPT_INST_ACTION_PT", 0x86}, { -"ADVANCED_SYSASSERT", 0},}; - -static const char * -il4965_desc_lookup(u32 num) -{ - int i; - int max = ARRAY_SIZE(desc_lookup_text); - - if (num < max) - return desc_lookup_text[num]; - - max = ARRAY_SIZE(advanced_lookup) - 1; - for (i = 0; i < max; i++) { - if (advanced_lookup[i].num == num) - break; - } - return advanced_lookup[i].name; -} - -#define ERROR_START_OFFSET (1 * sizeof(u32)) -#define ERROR_ELEM_SIZE (7 * sizeof(u32)) - -void -il4965_dump_nic_error_log(struct il_priv *il) -{ - u32 data2, line; - u32 desc, time, count, base, data1; - u32 blink1, blink2, ilink1, ilink2; - u32 pc, hcmd; - - if (il->ucode_type == UCODE_INIT) - base = le32_to_cpu(il->card_alive_init.error_event_table_ptr); - else - base = le32_to_cpu(il->card_alive.error_event_table_ptr); - - if (!il->cfg->ops->lib->is_valid_rtc_data_addr(base)) { - IL_ERR("Not valid error log pointer 0x%08X for %s uCode\n", - base, (il->ucode_type == UCODE_INIT) ? "Init" : "RT"); - return; - } - - count = il_read_targ_mem(il, base); - - if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { - IL_ERR("Start IWL Error Log Dump:\n"); - IL_ERR("Status: 0x%08lX, count: %d\n", il->status, count); - } - - desc = il_read_targ_mem(il, base + 1 * sizeof(u32)); - il->isr_stats.err_code = desc; - pc = il_read_targ_mem(il, base + 2 * sizeof(u32)); - blink1 = il_read_targ_mem(il, base + 3 * sizeof(u32)); - blink2 = il_read_targ_mem(il, base + 4 * sizeof(u32)); - ilink1 = il_read_targ_mem(il, base + 5 * sizeof(u32)); - ilink2 = il_read_targ_mem(il, base + 6 * sizeof(u32)); - data1 = il_read_targ_mem(il, base + 7 * sizeof(u32)); - data2 = il_read_targ_mem(il, base + 8 * sizeof(u32)); - line = il_read_targ_mem(il, base + 9 * sizeof(u32)); - time = il_read_targ_mem(il, base + 11 * sizeof(u32)); - hcmd = il_read_targ_mem(il, base + 22 * sizeof(u32)); - - IL_ERR("Desc Time " - "data1 data2 line\n"); - IL_ERR("%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n", - il4965_desc_lookup(desc), desc, time, data1, data2, line); - IL_ERR("pc blink1 blink2 ilink1 ilink2 hcmd\n"); - IL_ERR("0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n", pc, blink1, - blink2, ilink1, ilink2, hcmd); -} - -static void -il4965_rf_kill_ct_config(struct il_priv *il) -{ - struct il_ct_kill_config cmd; - unsigned long flags; - int ret = 0; - - spin_lock_irqsave(&il->lock, flags); - _il_wr(il, CSR_UCODE_DRV_GP1_CLR, - CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); - spin_unlock_irqrestore(&il->lock, flags); - - cmd.critical_temperature_R = - cpu_to_le32(il->hw_params.ct_kill_threshold); - - ret = il_send_cmd_pdu(il, C_CT_KILL_CONFIG, sizeof(cmd), &cmd); - if (ret) - IL_ERR("C_CT_KILL_CONFIG failed\n"); - else - D_INFO("C_CT_KILL_CONFIG " "succeeded, " - "critical temperature is %d\n", - il->hw_params.ct_kill_threshold); -} - -static const s8 default_queue_to_tx_fifo[] = { - IL_TX_FIFO_VO, - IL_TX_FIFO_VI, - IL_TX_FIFO_BE, - IL_TX_FIFO_BK, - IL49_CMD_FIFO_NUM, - IL_TX_FIFO_UNUSED, - IL_TX_FIFO_UNUSED, -}; - -#define IL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo)))) - -static int -il4965_alive_notify(struct il_priv *il) -{ - u32 a; - unsigned long flags; - int i, chan; - u32 reg_val; - - spin_lock_irqsave(&il->lock, flags); - - /* Clear 4965's internal Tx Scheduler data base */ - il->scd_base_addr = il_rd_prph(il, IL49_SCD_SRAM_BASE_ADDR); - a = il->scd_base_addr + IL49_SCD_CONTEXT_DATA_OFFSET; - for (; a < il->scd_base_addr + IL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4) - il_write_targ_mem(il, a, 0); - for (; a < il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET; a += 4) - il_write_targ_mem(il, a, 0); - for (; - a < - il->scd_base_addr + - IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(il->hw_params.max_txq_num); - a += 4) - il_write_targ_mem(il, a, 0); - - /* Tel 4965 where to find Tx byte count tables */ - il_wr_prph(il, IL49_SCD_DRAM_BASE_ADDR, il->scd_bc_tbls.dma >> 10); - - /* Enable DMA channel */ - for (chan = 0; chan < FH49_TCSR_CHNL_NUM; chan++) - il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(chan), - FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | - FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); - - /* Update FH chicken bits */ - reg_val = il_rd(il, FH49_TX_CHICKEN_BITS_REG); - il_wr(il, FH49_TX_CHICKEN_BITS_REG, - reg_val | FH49_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); - - /* Disable chain mode for all queues */ - il_wr_prph(il, IL49_SCD_QUEUECHAIN_SEL, 0); - - /* Initialize each Tx queue (including the command queue) */ - for (i = 0; i < il->hw_params.max_txq_num; i++) { - - /* TFD circular buffer read/write idxes */ - il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(i), 0); - il_wr(il, HBUS_TARG_WRPTR, 0 | (i << 8)); - - /* Max Tx Window size for Scheduler-ACK mode */ - il_write_targ_mem(il, - il->scd_base_addr + - IL49_SCD_CONTEXT_QUEUE_OFFSET(i), - (SCD_WIN_SIZE << - IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) & - IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK); - - /* Frame limit */ - il_write_targ_mem(il, - il->scd_base_addr + - IL49_SCD_CONTEXT_QUEUE_OFFSET(i) + - sizeof(u32), - (SCD_FRAME_LIMIT << - IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & - IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK); - - } - il_wr_prph(il, IL49_SCD_INTERRUPT_MASK, - (1 << il->hw_params.max_txq_num) - 1); - - /* Activate all Tx DMA/FIFO channels */ - il4965_txq_set_sched(il, IL_MASK(0, 6)); - - il4965_set_wr_ptrs(il, IL_DEFAULT_CMD_QUEUE_NUM, 0); - - /* make sure all queue are not stopped */ - memset(&il->queue_stopped[0], 0, sizeof(il->queue_stopped)); - for (i = 0; i < 4; i++) - atomic_set(&il->queue_stop_count[i], 0); - - /* reset to 0 to enable all the queue first */ - il->txq_ctx_active_msk = 0; - /* Map each Tx/cmd queue to its corresponding fifo */ - BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7); - - for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) { - int ac = default_queue_to_tx_fifo[i]; - - il_txq_ctx_activate(il, i); - - if (ac == IL_TX_FIFO_UNUSED) - continue; - - il4965_tx_queue_set_status(il, &il->txq[i], ac, 0); - } - - spin_unlock_irqrestore(&il->lock, flags); - - return 0; -} - -/** - * il4965_alive_start - called after N_ALIVE notification received - * from protocol/runtime uCode (initialization uCode's - * Alive gets handled by il_init_alive_start()). - */ -static void -il4965_alive_start(struct il_priv *il) -{ - int ret = 0; - struct il_rxon_context *ctx = &il->ctx; - - D_INFO("Runtime Alive received.\n"); - - if (il->card_alive.is_valid != UCODE_VALID_OK) { - /* We had an error bringing up the hardware, so take it - * all the way back down so we can try again */ - D_INFO("Alive failed.\n"); - goto restart; - } - - /* Initialize uCode has loaded Runtime uCode ... verify inst image. - * This is a paranoid check, because we would not have gotten the - * "runtime" alive if code weren't properly loaded. */ - if (il4965_verify_ucode(il)) { - /* Runtime instruction load was bad; - * take it all the way back down so we can try again */ - D_INFO("Bad runtime uCode load.\n"); - goto restart; - } - - ret = il4965_alive_notify(il); - if (ret) { - IL_WARN("Could not complete ALIVE transition [ntf]: %d\n", ret); - goto restart; - } - - /* After the ALIVE response, we can send host commands to the uCode */ - set_bit(S_ALIVE, &il->status); - - /* Enable watchdog to monitor the driver tx queues */ - il_setup_watchdog(il); - - if (il_is_rfkill(il)) - return; - - ieee80211_wake_queues(il->hw); - - il->active_rate = RATES_MASK; - - if (il_is_associated_ctx(ctx)) { - struct il_rxon_cmd *active_rxon = - (struct il_rxon_cmd *)&ctx->active; - /* apply any changes in staging */ - ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; - active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; - } else { - /* Initialize our rx_config data */ - il_connection_init_rx_config(il, &il->ctx); - - if (il->cfg->ops->hcmd->set_rxon_chain) - il->cfg->ops->hcmd->set_rxon_chain(il, ctx); - } - - /* Configure bluetooth coexistence if enabled */ - il_send_bt_config(il); - - il4965_reset_run_time_calib(il); - - set_bit(S_READY, &il->status); - - /* Configure the adapter for unassociated operation */ - il_commit_rxon(il, ctx); - - /* At this point, the NIC is initialized and operational */ - il4965_rf_kill_ct_config(il); - - D_INFO("ALIVE processing complete.\n"); - wake_up(&il->wait_command_queue); - - il_power_update_mode(il, true); - D_INFO("Updated power mode\n"); - - return; - -restart: - queue_work(il->workqueue, &il->restart); -} - -static void il4965_cancel_deferred_work(struct il_priv *il); - -static void -__il4965_down(struct il_priv *il) -{ - unsigned long flags; - int exit_pending; - - D_INFO(DRV_NAME " is going down\n"); - - il_scan_cancel_timeout(il, 200); - - exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status); - - /* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set - * to prevent rearm timer */ - del_timer_sync(&il->watchdog); - - il_clear_ucode_stations(il, NULL); - il_dealloc_bcast_stations(il); - il_clear_driver_stations(il); - - /* Unblock any waiting calls */ - wake_up_all(&il->wait_command_queue); - - /* Wipe out the EXIT_PENDING status bit if we are not actually - * exiting the module */ - if (!exit_pending) - clear_bit(S_EXIT_PENDING, &il->status); - - /* stop and reset the on-board processor */ - _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); - - /* tell the device to stop sending interrupts */ - spin_lock_irqsave(&il->lock, flags); - il_disable_interrupts(il); - spin_unlock_irqrestore(&il->lock, flags); - il4965_synchronize_irq(il); - - if (il->mac80211_registered) - ieee80211_stop_queues(il->hw); - - /* If we have not previously called il_init() then - * clear all bits but the RF Kill bit and return */ - if (!il_is_init(il)) { - il->status = - test_bit(S_RF_KILL_HW, - &il-> - status) << S_RF_KILL_HW | - test_bit(S_GEO_CONFIGURED, - &il-> - status) << S_GEO_CONFIGURED | - test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING; - goto exit; - } - - /* ...otherwise clear out all the status bits but the RF Kill - * bit and continue taking the NIC down. */ - il->status &= - test_bit(S_RF_KILL_HW, - &il->status) << S_RF_KILL_HW | test_bit(S_GEO_CONFIGURED, - &il-> - status) << - S_GEO_CONFIGURED | test_bit(S_FW_ERROR, - &il-> - status) << S_FW_ERROR | - test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING; - - il4965_txq_ctx_stop(il); - il4965_rxq_stop(il); - - /* Power-down device's busmaster DMA clocks */ - il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); - udelay(5); - - /* Make sure (redundant) we've released our request to stay awake */ - il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - - /* Stop the device, and put it in low power state */ - il_apm_stop(il); - -exit: - memset(&il->card_alive, 0, sizeof(struct il_alive_resp)); - - dev_kfree_skb(il->beacon_skb); - il->beacon_skb = NULL; - - /* clear out any free frames */ - il4965_clear_free_frames(il); -} - -static void -il4965_down(struct il_priv *il) -{ - mutex_lock(&il->mutex); - __il4965_down(il); - mutex_unlock(&il->mutex); - - il4965_cancel_deferred_work(il); -} - -#define HW_READY_TIMEOUT (50) - -static int -il4965_set_hw_ready(struct il_priv *il) -{ - int ret = 0; - - il_set_bit(il, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); - - /* See if we got it */ - ret = - _il_poll_bit(il, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, - CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, HW_READY_TIMEOUT); - if (ret != -ETIMEDOUT) - il->hw_ready = true; - else - il->hw_ready = false; - - D_INFO("hardware %s\n", (il->hw_ready == 1) ? "ready" : "not ready"); - return ret; -} - -static int -il4965_prepare_card_hw(struct il_priv *il) -{ - int ret = 0; - - D_INFO("il4965_prepare_card_hw enter\n"); - - ret = il4965_set_hw_ready(il); - if (il->hw_ready) - return ret; - - /* If HW is not ready, prepare the conditions to check again */ - il_set_bit(il, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE); - - ret = - _il_poll_bit(il, CSR_HW_IF_CONFIG_REG, - ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, - CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000); - - /* HW should be ready by now, check again. */ - if (ret != -ETIMEDOUT) - il4965_set_hw_ready(il); - - return ret; -} - -#define MAX_HW_RESTARTS 5 - -static int -__il4965_up(struct il_priv *il) -{ - int i; - int ret; - - if (test_bit(S_EXIT_PENDING, &il->status)) { - IL_WARN("Exit pending; will not bring the NIC up\n"); - return -EIO; - } - - if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) { - IL_ERR("ucode not available for device bringup\n"); - return -EIO; - } - - ret = il4965_alloc_bcast_station(il, &il->ctx); - if (ret) { - il_dealloc_bcast_stations(il); - return ret; - } - - il4965_prepare_card_hw(il); - - if (!il->hw_ready) { - IL_WARN("Exit HW not ready\n"); - return -EIO; - } - - /* If platform's RF_KILL switch is NOT set to KILL */ - if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) - clear_bit(S_RF_KILL_HW, &il->status); - else - set_bit(S_RF_KILL_HW, &il->status); - - if (il_is_rfkill(il)) { - wiphy_rfkill_set_hw_state(il->hw->wiphy, true); - - il_enable_interrupts(il); - IL_WARN("Radio disabled by HW RF Kill switch\n"); - return 0; - } - - _il_wr(il, CSR_INT, 0xFFFFFFFF); - - /* must be initialised before il_hw_nic_init */ - il->cmd_queue = IL_DEFAULT_CMD_QUEUE_NUM; - - ret = il4965_hw_nic_init(il); - if (ret) { - IL_ERR("Unable to init nic\n"); - return ret; - } - - /* make sure rfkill handshake bits are cleared */ - _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); - _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); - - /* clear (again), then enable host interrupts */ - _il_wr(il, CSR_INT, 0xFFFFFFFF); - il_enable_interrupts(il); - - /* really make sure rfkill handshake bits are cleared */ - _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); - _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); - - /* Copy original ucode data image from disk into backup cache. - * This will be used to initialize the on-board processor's - * data SRAM for a clean start when the runtime program first loads. */ - memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr, - il->ucode_data.len); - - for (i = 0; i < MAX_HW_RESTARTS; i++) { - - /* load bootstrap state machine, - * load bootstrap program into processor's memory, - * prepare to load the "initialize" uCode */ - ret = il->cfg->ops->lib->load_ucode(il); - - if (ret) { - IL_ERR("Unable to set up bootstrap uCode: %d\n", ret); - continue; - } - - /* start card; "initialize" will load runtime ucode */ - il4965_nic_start(il); - - D_INFO(DRV_NAME " is coming up\n"); - - return 0; - } - - set_bit(S_EXIT_PENDING, &il->status); - __il4965_down(il); - clear_bit(S_EXIT_PENDING, &il->status); - - /* tried to restart and config the device for as long as our - * patience could withstand */ - IL_ERR("Unable to initialize device after %d attempts.\n", i); - return -EIO; -} - -/***************************************************************************** - * - * Workqueue callbacks - * - *****************************************************************************/ - -static void -il4965_bg_init_alive_start(struct work_struct *data) -{ - struct il_priv *il = - container_of(data, struct il_priv, init_alive_start.work); - - mutex_lock(&il->mutex); - if (test_bit(S_EXIT_PENDING, &il->status)) - goto out; - - il->cfg->ops->lib->init_alive_start(il); -out: - mutex_unlock(&il->mutex); -} - -static void -il4965_bg_alive_start(struct work_struct *data) -{ - struct il_priv *il = - container_of(data, struct il_priv, alive_start.work); - - mutex_lock(&il->mutex); - if (test_bit(S_EXIT_PENDING, &il->status)) - goto out; - - il4965_alive_start(il); -out: - mutex_unlock(&il->mutex); -} - -static void -il4965_bg_run_time_calib_work(struct work_struct *work) -{ - struct il_priv *il = container_of(work, struct il_priv, - run_time_calib_work); - - mutex_lock(&il->mutex); - - if (test_bit(S_EXIT_PENDING, &il->status) || - test_bit(S_SCANNING, &il->status)) { - mutex_unlock(&il->mutex); - return; - } - - if (il->start_calib) { - il4965_chain_noise_calibration(il, (void *)&il->_4965.stats); - il4965_sensitivity_calibration(il, (void *)&il->_4965.stats); - } - - mutex_unlock(&il->mutex); -} - -static void -il4965_bg_restart(struct work_struct *data) -{ - struct il_priv *il = container_of(data, struct il_priv, restart); - - if (test_bit(S_EXIT_PENDING, &il->status)) - return; - - if (test_and_clear_bit(S_FW_ERROR, &il->status)) { - mutex_lock(&il->mutex); - il->ctx.vif = NULL; - il->is_open = 0; - - __il4965_down(il); - - mutex_unlock(&il->mutex); - il4965_cancel_deferred_work(il); - ieee80211_restart_hw(il->hw); - } else { - il4965_down(il); - - mutex_lock(&il->mutex); - if (test_bit(S_EXIT_PENDING, &il->status)) { - mutex_unlock(&il->mutex); - return; - } - - __il4965_up(il); - mutex_unlock(&il->mutex); - } -} - -static void -il4965_bg_rx_replenish(struct work_struct *data) -{ - struct il_priv *il = container_of(data, struct il_priv, rx_replenish); - - if (test_bit(S_EXIT_PENDING, &il->status)) - return; - - mutex_lock(&il->mutex); - il4965_rx_replenish(il); - mutex_unlock(&il->mutex); -} - -/***************************************************************************** - * - * mac80211 entry point functions - * - *****************************************************************************/ - -#define UCODE_READY_TIMEOUT (4 * HZ) - -/* - * Not a mac80211 entry point function, but it fits in with all the - * other mac80211 functions grouped here. - */ -static int -il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length) -{ - int ret; - struct ieee80211_hw *hw = il->hw; - - hw->rate_control_algorithm = "iwl-4965-rs"; - - /* Tell mac80211 our characteristics */ - hw->flags = - IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION | - IEEE80211_HW_NEED_DTIM_PERIOD | IEEE80211_HW_SPECTRUM_MGMT | - IEEE80211_HW_REPORTS_TX_ACK_STATUS; - - if (il->cfg->sku & IL_SKU_N) - hw->flags |= - IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | - IEEE80211_HW_SUPPORTS_STATIC_SMPS; - - hw->sta_data_size = sizeof(struct il_station_priv); - hw->vif_data_size = sizeof(struct il_vif_priv); - - hw->wiphy->interface_modes |= il->ctx.interface_modes; - hw->wiphy->interface_modes |= il->ctx.exclusive_interface_modes; - - hw->wiphy->flags |= - WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS; - - /* - * For now, disable PS by default because it affects - * RX performance significantly. - */ - hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; - - hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; - /* we create the 802.11 header and a zero-length SSID element */ - hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2; - - /* Default value; 4 EDCA QOS priorities */ - hw->queues = 4; - - hw->max_listen_interval = IL_CONN_MAX_LISTEN_INTERVAL; - - if (il->bands[IEEE80211_BAND_2GHZ].n_channels) - il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = - &il->bands[IEEE80211_BAND_2GHZ]; - if (il->bands[IEEE80211_BAND_5GHZ].n_channels) - il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = - &il->bands[IEEE80211_BAND_5GHZ]; - - il_leds_init(il); - - ret = ieee80211_register_hw(il->hw); - if (ret) { - IL_ERR("Failed to register hw (error %d)\n", ret); - return ret; - } - il->mac80211_registered = 1; - - return 0; -} - -int -il4965_mac_start(struct ieee80211_hw *hw) -{ - struct il_priv *il = hw->priv; - int ret; - - D_MAC80211("enter\n"); - - /* we should be verifying the device is ready to be opened */ - mutex_lock(&il->mutex); - ret = __il4965_up(il); - mutex_unlock(&il->mutex); - - if (ret) - return ret; - - if (il_is_rfkill(il)) - goto out; - - D_INFO("Start UP work done.\n"); - - /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from - * mac80211 will not be run successfully. */ - ret = wait_event_timeout(il->wait_command_queue, - test_bit(S_READY, &il->status), - UCODE_READY_TIMEOUT); - if (!ret) { - if (!test_bit(S_READY, &il->status)) { - IL_ERR("START_ALIVE timeout after %dms.\n", - jiffies_to_msecs(UCODE_READY_TIMEOUT)); - return -ETIMEDOUT; - } - } - - il4965_led_enable(il); - -out: - il->is_open = 1; - D_MAC80211("leave\n"); - return 0; -} - -void -il4965_mac_stop(struct ieee80211_hw *hw) -{ - struct il_priv *il = hw->priv; - - D_MAC80211("enter\n"); - - if (!il->is_open) - return; - - il->is_open = 0; - - il4965_down(il); - - flush_workqueue(il->workqueue); - - /* User space software may expect getting rfkill changes - * even if interface is down */ - _il_wr(il, CSR_INT, 0xFFFFFFFF); - il_enable_rfkill_int(il); - - D_MAC80211("leave\n"); -} - -void -il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) -{ - struct il_priv *il = hw->priv; - - D_MACDUMP("enter\n"); - - D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, - ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); - - if (il4965_tx_skb(il, skb)) - dev_kfree_skb_any(skb); - - D_MACDUMP("leave\n"); -} - -void -il4965_mac_update_tkip_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_key_conf *keyconf, - struct ieee80211_sta *sta, u32 iv32, u16 * phase1key) -{ - struct il_priv *il = hw->priv; - struct il_vif_priv *vif_priv = (void *)vif->drv_priv; - - D_MAC80211("enter\n"); - - il4965_update_tkip_key(il, vif_priv->ctx, keyconf, sta, iv32, - phase1key); - - D_MAC80211("leave\n"); -} - -int -il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, - struct ieee80211_vif *vif, struct ieee80211_sta *sta, - struct ieee80211_key_conf *key) -{ - struct il_priv *il = hw->priv; - struct il_vif_priv *vif_priv = (void *)vif->drv_priv; - struct il_rxon_context *ctx = vif_priv->ctx; - int ret; - u8 sta_id; - bool is_default_wep_key = false; - - D_MAC80211("enter\n"); - - if (il->cfg->mod_params->sw_crypto) { - D_MAC80211("leave - hwcrypto disabled\n"); - return -EOPNOTSUPP; - } - - sta_id = il_sta_id_or_broadcast(il, vif_priv->ctx, sta); - if (sta_id == IL_INVALID_STATION) - return -EINVAL; - - mutex_lock(&il->mutex); - il_scan_cancel_timeout(il, 100); - - /* - * If we are getting WEP group key and we didn't receive any key mapping - * so far, we are in legacy wep mode (group key only), otherwise we are - * in 1X mode. - * In legacy wep mode, we use another host command to the uCode. - */ - if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 || - key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) { - if (cmd == SET_KEY) - is_default_wep_key = !ctx->key_mapping_keys; - else - is_default_wep_key = - (key->hw_key_idx == HW_KEY_DEFAULT); - } - - switch (cmd) { - case SET_KEY: - if (is_default_wep_key) - ret = - il4965_set_default_wep_key(il, vif_priv->ctx, key); - else - ret = - il4965_set_dynamic_key(il, vif_priv->ctx, key, - sta_id); - - D_MAC80211("enable hwcrypto key\n"); - break; - case DISABLE_KEY: - if (is_default_wep_key) - ret = il4965_remove_default_wep_key(il, ctx, key); - else - ret = il4965_remove_dynamic_key(il, ctx, key, sta_id); - - D_MAC80211("disable hwcrypto key\n"); - break; - default: - ret = -EINVAL; - } - - mutex_unlock(&il->mutex); - D_MAC80211("leave\n"); - - return ret; -} - -int -il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 * ssn, - u8 buf_size) -{ - struct il_priv *il = hw->priv; - int ret = -EINVAL; - - D_HT("A-MPDU action on addr %pM tid %d\n", sta->addr, tid); - - if (!(il->cfg->sku & IL_SKU_N)) - return -EACCES; - - mutex_lock(&il->mutex); - - switch (action) { - case IEEE80211_AMPDU_RX_START: - D_HT("start Rx\n"); - ret = il4965_sta_rx_agg_start(il, sta, tid, *ssn); - break; - case IEEE80211_AMPDU_RX_STOP: - D_HT("stop Rx\n"); - ret = il4965_sta_rx_agg_stop(il, sta, tid); - if (test_bit(S_EXIT_PENDING, &il->status)) - ret = 0; - break; - case IEEE80211_AMPDU_TX_START: - D_HT("start Tx\n"); - ret = il4965_tx_agg_start(il, vif, sta, tid, ssn); - break; - case IEEE80211_AMPDU_TX_STOP: - D_HT("stop Tx\n"); - ret = il4965_tx_agg_stop(il, vif, sta, tid); - if (test_bit(S_EXIT_PENDING, &il->status)) - ret = 0; - break; - case IEEE80211_AMPDU_TX_OPERATIONAL: - ret = 0; - break; - } - mutex_unlock(&il->mutex); - - return ret; -} - -int -il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_sta *sta) -{ - struct il_priv *il = hw->priv; - struct il_station_priv *sta_priv = (void *)sta->drv_priv; - struct il_vif_priv *vif_priv = (void *)vif->drv_priv; - bool is_ap = vif->type == NL80211_IFTYPE_STATION; - int ret; - u8 sta_id; - - D_INFO("received request to add station %pM\n", sta->addr); - mutex_lock(&il->mutex); - D_INFO("proceeding to add station %pM\n", sta->addr); - sta_priv->common.sta_id = IL_INVALID_STATION; - - atomic_set(&sta_priv->pending_frames, 0); - - ret = - il_add_station_common(il, vif_priv->ctx, sta->addr, is_ap, sta, - &sta_id); - if (ret) { - IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret); - /* Should we return success if return code is EEXIST ? */ - mutex_unlock(&il->mutex); - return ret; - } - - sta_priv->common.sta_id = sta_id; - - /* Initialize rate scaling */ - D_INFO("Initializing rate scaling for station %pM\n", sta->addr); - il4965_rs_rate_init(il, sta, sta_id); - mutex_unlock(&il->mutex); - - return 0; -} - -void -il4965_mac_channel_switch(struct ieee80211_hw *hw, - struct ieee80211_channel_switch *ch_switch) -{ - struct il_priv *il = hw->priv; - const struct il_channel_info *ch_info; - struct ieee80211_conf *conf = &hw->conf; - struct ieee80211_channel *channel = ch_switch->channel; - struct il_ht_config *ht_conf = &il->current_ht_config; - - struct il_rxon_context *ctx = &il->ctx; - u16 ch; - - D_MAC80211("enter\n"); - - mutex_lock(&il->mutex); - - if (il_is_rfkill(il)) - goto out; - - if (test_bit(S_EXIT_PENDING, &il->status) || - test_bit(S_SCANNING, &il->status) || - test_bit(S_CHANNEL_SWITCH_PENDING, &il->status)) - goto out; - - if (!il_is_associated_ctx(ctx)) - goto out; - - if (!il->cfg->ops->lib->set_channel_switch) - goto out; - - ch = channel->hw_value; - if (le16_to_cpu(ctx->active.channel) == ch) - goto out; - - ch_info = il_get_channel_info(il, channel->band, ch); - if (!il_is_channel_valid(ch_info)) { - D_MAC80211("invalid channel\n"); - goto out; - } - - spin_lock_irq(&il->lock); - - il->current_ht_config.smps = conf->smps_mode; - - /* Configure HT40 channels */ - ctx->ht.enabled = conf_is_ht(conf); - if (ctx->ht.enabled) { - if (conf_is_ht40_minus(conf)) { - ctx->ht.extension_chan_offset = - IEEE80211_HT_PARAM_CHA_SEC_BELOW; - ctx->ht.is_40mhz = true; - } else if (conf_is_ht40_plus(conf)) { - ctx->ht.extension_chan_offset = - IEEE80211_HT_PARAM_CHA_SEC_ABOVE; - ctx->ht.is_40mhz = true; - } else { - ctx->ht.extension_chan_offset = - IEEE80211_HT_PARAM_CHA_SEC_NONE; - ctx->ht.is_40mhz = false; - } - } else - ctx->ht.is_40mhz = false; - - if ((le16_to_cpu(ctx->staging.channel) != ch)) - ctx->staging.flags = 0; - - il_set_rxon_channel(il, channel, ctx); - il_set_rxon_ht(il, ht_conf); - il_set_flags_for_band(il, ctx, channel->band, ctx->vif); - - spin_unlock_irq(&il->lock); - - il_set_rate(il); - /* - * at this point, staging_rxon has the - * configuration for channel switch - */ - set_bit(S_CHANNEL_SWITCH_PENDING, &il->status); - il->switch_channel = cpu_to_le16(ch); - if (il->cfg->ops->lib->set_channel_switch(il, ch_switch)) { - clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status); - il->switch_channel = 0; - ieee80211_chswitch_done(ctx->vif, false); - } - -out: - mutex_unlock(&il->mutex); - D_MAC80211("leave\n"); -} - -void -il4965_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, - unsigned int *total_flags, u64 multicast) -{ - struct il_priv *il = hw->priv; - __le32 filter_or = 0, filter_nand = 0; - -#define CHK(test, flag) do { \ - if (*total_flags & (test)) \ - filter_or |= (flag); \ - else \ - filter_nand |= (flag); \ - } while (0) - - D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags, - *total_flags); - - CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK); - /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */ - CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK); - CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); - -#undef CHK - - mutex_lock(&il->mutex); - - il->ctx.staging.filter_flags &= ~filter_nand; - il->ctx.staging.filter_flags |= filter_or; - - /* - * Not committing directly because hardware can perform a scan, - * but we'll eventually commit the filter flags change anyway. - */ - - mutex_unlock(&il->mutex); - - /* - * Receiving all multicast frames is always enabled by the - * default flags setup in il_connection_init_rx_config() - * since we currently do not support programming multicast - * filters into the device. - */ - *total_flags &= - FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS | - FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; -} - -/***************************************************************************** - * - * driver setup and teardown - * - *****************************************************************************/ - -static void -il4965_bg_txpower_work(struct work_struct *work) -{ - struct il_priv *il = container_of(work, struct il_priv, - txpower_work); - - mutex_lock(&il->mutex); - - /* If a scan happened to start before we got here - * then just return; the stats notification will - * kick off another scheduled work to compensate for - * any temperature delta we missed here. */ - if (test_bit(S_EXIT_PENDING, &il->status) || - test_bit(S_SCANNING, &il->status)) - goto out; - - /* Regardless of if we are associated, we must reconfigure the - * TX power since frames can be sent on non-radar channels while - * not associated */ - il->cfg->ops->lib->send_tx_power(il); - - /* Update last_temperature to keep is_calib_needed from running - * when it isn't needed... */ - il->last_temperature = il->temperature; -out: - mutex_unlock(&il->mutex); -} - -static void -il4965_setup_deferred_work(struct il_priv *il) -{ - il->workqueue = create_singlethread_workqueue(DRV_NAME); - - init_waitqueue_head(&il->wait_command_queue); - - INIT_WORK(&il->restart, il4965_bg_restart); - INIT_WORK(&il->rx_replenish, il4965_bg_rx_replenish); - INIT_WORK(&il->run_time_calib_work, il4965_bg_run_time_calib_work); - INIT_DELAYED_WORK(&il->init_alive_start, il4965_bg_init_alive_start); - INIT_DELAYED_WORK(&il->alive_start, il4965_bg_alive_start); - - il_setup_scan_deferred_work(il); - - INIT_WORK(&il->txpower_work, il4965_bg_txpower_work); - - init_timer(&il->stats_periodic); - il->stats_periodic.data = (unsigned long)il; - il->stats_periodic.function = il4965_bg_stats_periodic; - - init_timer(&il->watchdog); - il->watchdog.data = (unsigned long)il; - il->watchdog.function = il_bg_watchdog; - - tasklet_init(&il->irq_tasklet, - (void (*)(unsigned long))il4965_irq_tasklet, - (unsigned long)il); -} - -static void -il4965_cancel_deferred_work(struct il_priv *il) -{ - cancel_work_sync(&il->txpower_work); - cancel_delayed_work_sync(&il->init_alive_start); - cancel_delayed_work(&il->alive_start); - cancel_work_sync(&il->run_time_calib_work); - - il_cancel_scan_deferred_work(il); - - del_timer_sync(&il->stats_periodic); -} - -static void -il4965_init_hw_rates(struct il_priv *il, struct ieee80211_rate *rates) -{ - int i; - - for (i = 0; i < RATE_COUNT_LEGACY; i++) { - rates[i].bitrate = il_rates[i].ieee * 5; - rates[i].hw_value = i; /* Rate scaling will work on idxes */ - rates[i].hw_value_short = i; - rates[i].flags = 0; - if ((i >= IL_FIRST_CCK_RATE) && (i <= IL_LAST_CCK_RATE)) { - /* - * If CCK != 1M then set short preamble rate flag. - */ - rates[i].flags |= - (il_rates[i].plcp == - RATE_1M_PLCP) ? 0 : IEEE80211_RATE_SHORT_PREAMBLE; - } - } -} - -/* - * Acquire il->lock before calling this function ! - */ -void -il4965_set_wr_ptrs(struct il_priv *il, int txq_id, u32 idx) -{ - il_wr(il, HBUS_TARG_WRPTR, (idx & 0xff) | (txq_id << 8)); - il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(txq_id), idx); -} - -void -il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq, - int tx_fifo_id, int scd_retry) -{ - int txq_id = txq->q.id; - - /* Find out whether to activate Tx queue */ - int active = test_bit(txq_id, &il->txq_ctx_active_msk) ? 1 : 0; - - /* Set up and activate */ - il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id), - (active << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) | - (tx_fifo_id << IL49_SCD_QUEUE_STTS_REG_POS_TXF) | - (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_WSL) | - (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) | - IL49_SCD_QUEUE_STTS_REG_MSK); - - txq->sched_retry = scd_retry; - - D_INFO("%s %s Queue %d on AC %d\n", active ? "Activate" : "Deactivate", - scd_retry ? "BA" : "AC", txq_id, tx_fifo_id); -} - -static int -il4965_init_drv(struct il_priv *il) -{ - int ret; - - spin_lock_init(&il->sta_lock); - spin_lock_init(&il->hcmd_lock); - - INIT_LIST_HEAD(&il->free_frames); - - mutex_init(&il->mutex); - - il->ieee_channels = NULL; - il->ieee_rates = NULL; - il->band = IEEE80211_BAND_2GHZ; - - il->iw_mode = NL80211_IFTYPE_STATION; - il->current_ht_config.smps = IEEE80211_SMPS_STATIC; - il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF; - - /* initialize force reset */ - il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD; - - /* Choose which receivers/antennas to use */ - if (il->cfg->ops->hcmd->set_rxon_chain) - il->cfg->ops->hcmd->set_rxon_chain(il, &il->ctx); - - il_init_scan_params(il); - - ret = il_init_channel_map(il); - if (ret) { - IL_ERR("initializing regulatory failed: %d\n", ret); - goto err; - } - - ret = il_init_geos(il); - if (ret) { - IL_ERR("initializing geos failed: %d\n", ret); - goto err_free_channel_map; - } - il4965_init_hw_rates(il, il->ieee_rates); - - return 0; - -err_free_channel_map: - il_free_channel_map(il); -err: - return ret; -} - -static void -il4965_uninit_drv(struct il_priv *il) -{ - il4965_calib_free_results(il); - il_free_geos(il); - il_free_channel_map(il); - kfree(il->scan_cmd); -} - -static void -il4965_hw_detect(struct il_priv *il) -{ - il->hw_rev = _il_rd(il, CSR_HW_REV); - il->hw_wa_rev = _il_rd(il, CSR_HW_REV_WA_REG); - il->rev_id = il->pci_dev->revision; - D_INFO("HW Revision ID = 0x%X\n", il->rev_id); -} - -static int -il4965_set_hw_params(struct il_priv *il) -{ - il->hw_params.max_rxq_size = RX_QUEUE_SIZE; - il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG; - if (il->cfg->mod_params->amsdu_size_8K) - il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_8K); - else - il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_4K); - - il->hw_params.max_beacon_itrvl = IL_MAX_UCODE_BEACON_INTERVAL; - - if (il->cfg->mod_params->disable_11n) - il->cfg->sku &= ~IL_SKU_N; - - /* Device-specific setup */ - return il->cfg->ops->lib->set_hw_params(il); -} - -static const u8 il4965_bss_ac_to_fifo[] = { - IL_TX_FIFO_VO, - IL_TX_FIFO_VI, - IL_TX_FIFO_BE, - IL_TX_FIFO_BK, -}; - -static const u8 il4965_bss_ac_to_queue[] = { - 0, 1, 2, 3, -}; - -static int -il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) -{ - int err = 0; - struct il_priv *il; - struct ieee80211_hw *hw; - struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data); - unsigned long flags; - u16 pci_cmd; - - /************************ - * 1. Allocating HW data - ************************/ - - hw = il_alloc_all(cfg); - if (!hw) { - err = -ENOMEM; - goto out; - } - il = hw->priv; - /* At this point both hw and il are allocated. */ - - il->ctx.ctxid = 0; - - il->ctx.always_active = true; - il->ctx.is_active = true; - il->ctx.rxon_cmd = C_RXON; - il->ctx.rxon_timing_cmd = C_RXON_TIMING; - il->ctx.rxon_assoc_cmd = C_RXON_ASSOC; - il->ctx.qos_cmd = C_QOS_PARAM; - il->ctx.ap_sta_id = IL_AP_ID; - il->ctx.wep_key_cmd = C_WEPKEY; - il->ctx.ac_to_fifo = il4965_bss_ac_to_fifo; - il->ctx.ac_to_queue = il4965_bss_ac_to_queue; - il->ctx.exclusive_interface_modes = BIT(NL80211_IFTYPE_ADHOC); - il->ctx.interface_modes = BIT(NL80211_IFTYPE_STATION); - il->ctx.ap_devtype = RXON_DEV_TYPE_AP; - il->ctx.ibss_devtype = RXON_DEV_TYPE_IBSS; - il->ctx.station_devtype = RXON_DEV_TYPE_ESS; - il->ctx.unused_devtype = RXON_DEV_TYPE_ESS; - - SET_IEEE80211_DEV(hw, &pdev->dev); - - D_INFO("*** LOAD DRIVER ***\n"); - il->cfg = cfg; - il->pci_dev = pdev; - il->inta_mask = CSR_INI_SET_MASK; - - if (il_alloc_traffic_mem(il)) - IL_ERR("Not enough memory to generate traffic log\n"); - - /************************** - * 2. Initializing PCI bus - **************************/ - pci_disable_link_state(pdev, - PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | - PCIE_LINK_STATE_CLKPM); - - if (pci_enable_device(pdev)) { - err = -ENODEV; - goto out_ieee80211_free_hw; - } - - pci_set_master(pdev); - - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); - if (!err) - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); - if (err) { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (!err) - err = - pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - /* both attempts failed: */ - if (err) { - IL_WARN("No suitable DMA available.\n"); - goto out_pci_disable_device; - } - } - - err = pci_request_regions(pdev, DRV_NAME); - if (err) - goto out_pci_disable_device; - - pci_set_drvdata(pdev, il); - - /*********************** - * 3. Read REV register - ***********************/ - il->hw_base = pci_iomap(pdev, 0, 0); - if (!il->hw_base) { - err = -ENODEV; - goto out_pci_release_regions; - } - - D_INFO("pci_resource_len = 0x%08llx\n", - (unsigned long long)pci_resource_len(pdev, 0)); - D_INFO("pci_resource_base = %p\n", il->hw_base); - - /* these spin locks will be used in apm_ops.init and EEPROM access - * we should init now - */ - spin_lock_init(&il->reg_lock); - spin_lock_init(&il->lock); - - /* - * stop and reset the on-board processor just in case it is in a - * strange state ... like being left stranded by a primary kernel - * and this is now the kdump kernel trying to start up - */ - _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); - - il4965_hw_detect(il); - IL_INFO("Detected %s, REV=0x%X\n", il->cfg->name, il->hw_rev); - - /* We disable the RETRY_TIMEOUT register (0x41) to keep - * PCI Tx retries from interfering with C3 CPU state */ - pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); - - il4965_prepare_card_hw(il); - if (!il->hw_ready) { - IL_WARN("Failed, HW not ready\n"); - goto out_iounmap; - } - - /***************** - * 4. Read EEPROM - *****************/ - /* Read the EEPROM */ - err = il_eeprom_init(il); - if (err) { - IL_ERR("Unable to init EEPROM\n"); - goto out_iounmap; - } - err = il4965_eeprom_check_version(il); - if (err) - goto out_free_eeprom; - - if (err) - goto out_free_eeprom; - - /* extract MAC Address */ - il4965_eeprom_get_mac(il, il->addresses[0].addr); - D_INFO("MAC address: %pM\n", il->addresses[0].addr); - il->hw->wiphy->addresses = il->addresses; - il->hw->wiphy->n_addresses = 1; - - /************************ - * 5. Setup HW constants - ************************/ - if (il4965_set_hw_params(il)) { - IL_ERR("failed to set hw parameters\n"); - goto out_free_eeprom; - } - - /******************* - * 6. Setup il - *******************/ - - err = il4965_init_drv(il); - if (err) - goto out_free_eeprom; - /* At this point both hw and il are initialized. */ - - /******************** - * 7. Setup services - ********************/ - spin_lock_irqsave(&il->lock, flags); - il_disable_interrupts(il); - spin_unlock_irqrestore(&il->lock, flags); - - pci_enable_msi(il->pci_dev); - - err = request_irq(il->pci_dev->irq, il_isr, IRQF_SHARED, DRV_NAME, il); - if (err) { - IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq); - goto out_disable_msi; - } - - il4965_setup_deferred_work(il); - il4965_setup_handlers(il); - - /********************************************* - * 8. Enable interrupts and read RFKILL state - *********************************************/ - - /* enable rfkill interrupt: hw bug w/a */ - pci_read_config_word(il->pci_dev, PCI_COMMAND, &pci_cmd); - if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { - pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; - pci_write_config_word(il->pci_dev, PCI_COMMAND, pci_cmd); - } - - il_enable_rfkill_int(il); - - /* If platform's RF_KILL switch is NOT set to KILL */ - if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) - clear_bit(S_RF_KILL_HW, &il->status); - else - set_bit(S_RF_KILL_HW, &il->status); - - wiphy_rfkill_set_hw_state(il->hw->wiphy, - test_bit(S_RF_KILL_HW, &il->status)); - - il_power_initialize(il); - - init_completion(&il->_4965.firmware_loading_complete); - - err = il4965_request_firmware(il, true); - if (err) - goto out_destroy_workqueue; - - return 0; - -out_destroy_workqueue: - destroy_workqueue(il->workqueue); - il->workqueue = NULL; - free_irq(il->pci_dev->irq, il); -out_disable_msi: - pci_disable_msi(il->pci_dev); - il4965_uninit_drv(il); -out_free_eeprom: - il_eeprom_free(il); -out_iounmap: - pci_iounmap(pdev, il->hw_base); -out_pci_release_regions: - pci_set_drvdata(pdev, NULL); - pci_release_regions(pdev); -out_pci_disable_device: - pci_disable_device(pdev); -out_ieee80211_free_hw: - il_free_traffic_mem(il); - ieee80211_free_hw(il->hw); -out: - return err; -} - -static void __devexit -il4965_pci_remove(struct pci_dev *pdev) -{ - struct il_priv *il = pci_get_drvdata(pdev); - unsigned long flags; - - if (!il) - return; - - wait_for_completion(&il->_4965.firmware_loading_complete); - - D_INFO("*** UNLOAD DRIVER ***\n"); - - il_dbgfs_unregister(il); - sysfs_remove_group(&pdev->dev.kobj, &il_attribute_group); - - /* ieee80211_unregister_hw call wil cause il_mac_stop to - * to be called and il4965_down since we are removing the device - * we need to set S_EXIT_PENDING bit. - */ - set_bit(S_EXIT_PENDING, &il->status); - - il_leds_exit(il); - - if (il->mac80211_registered) { - ieee80211_unregister_hw(il->hw); - il->mac80211_registered = 0; - } else { - il4965_down(il); - } - - /* - * Make sure device is reset to low power before unloading driver. - * This may be redundant with il4965_down(), but there are paths to - * run il4965_down() without calling apm_ops.stop(), and there are - * paths to avoid running il4965_down() at all before leaving driver. - * This (inexpensive) call *makes sure* device is reset. - */ - il_apm_stop(il); - - /* make sure we flush any pending irq or - * tasklet for the driver - */ - spin_lock_irqsave(&il->lock, flags); - il_disable_interrupts(il); - spin_unlock_irqrestore(&il->lock, flags); - - il4965_synchronize_irq(il); - - il4965_dealloc_ucode_pci(il); - - if (il->rxq.bd) - il4965_rx_queue_free(il, &il->rxq); - il4965_hw_txq_ctx_free(il); - - il_eeprom_free(il); - - /*netif_stop_queue(dev); */ - flush_workqueue(il->workqueue); - - /* ieee80211_unregister_hw calls il_mac_stop, which flushes - * il->workqueue... so we can't take down the workqueue - * until now... */ - destroy_workqueue(il->workqueue); - il->workqueue = NULL; - il_free_traffic_mem(il); - - free_irq(il->pci_dev->irq, il); - pci_disable_msi(il->pci_dev); - pci_iounmap(pdev, il->hw_base); - pci_release_regions(pdev); - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); - - il4965_uninit_drv(il); - - dev_kfree_skb(il->beacon_skb); - - ieee80211_free_hw(il->hw); -} - -/* - * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask - * must be called under il->lock and mac access - */ -void -il4965_txq_set_sched(struct il_priv *il, u32 mask) -{ - il_wr_prph(il, IL49_SCD_TXFACT, mask); -} - -/***************************************************************************** - * - * driver and module entry point - * - *****************************************************************************/ - -/* Hardware specific file defines the PCI IDs table for that hardware module */ -static DEFINE_PCI_DEVICE_TABLE(il4965_hw_card_ids) = { - {IL_PCI_DEVICE(0x4229, PCI_ANY_ID, il4965_cfg)}, - {IL_PCI_DEVICE(0x4230, PCI_ANY_ID, il4965_cfg)}, - {0} -}; -MODULE_DEVICE_TABLE(pci, il4965_hw_card_ids); - -static struct pci_driver il4965_driver = { - .name = DRV_NAME, - .id_table = il4965_hw_card_ids, - .probe = il4965_pci_probe, - .remove = __devexit_p(il4965_pci_remove), - .driver.pm = IL_LEGACY_PM_OPS, -}; - -static int __init -il4965_init(void) -{ - - int ret; - pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n"); - pr_info(DRV_COPYRIGHT "\n"); - - ret = il4965_rate_control_register(); - if (ret) { - pr_err("Unable to register rate control algorithm: %d\n", ret); - return ret; - } - - ret = pci_register_driver(&il4965_driver); - if (ret) { - pr_err("Unable to initialize PCI module\n"); - goto error_register; - } - - return ret; - -error_register: - il4965_rate_control_unregister(); - return ret; -} - -static void __exit -il4965_exit(void) -{ - pci_unregister_driver(&il4965_driver); - il4965_rate_control_unregister(); -} - -module_exit(il4965_exit); -module_init(il4965_init); - -#ifdef CONFIG_IWLEGACY_DEBUG -module_param_named(debug, il_debug_level, uint, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(debug, "debug output mask"); -#endif - -module_param_named(swcrypto, il4965_mod_params.sw_crypto, int, S_IRUGO); -MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])"); -module_param_named(queues_num, il4965_mod_params.num_of_queues, int, S_IRUGO); -MODULE_PARM_DESC(queues_num, "number of hw queues."); -module_param_named(11n_disable, il4965_mod_params.disable_11n, int, S_IRUGO); -MODULE_PARM_DESC(11n_disable, "disable 11n functionality"); -module_param_named(amsdu_size_8K, il4965_mod_params.amsdu_size_8K, int, - S_IRUGO); -MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size"); -module_param_named(fw_restart, il4965_mod_params.restart_fw, int, S_IRUGO); -MODULE_PARM_DESC(fw_restart, "restart firmware in case of error"); diff --git a/trunk/drivers/net/wireless/iwlegacy/4965-rs.c b/trunk/drivers/net/wireless/iwlegacy/4965-rs.c deleted file mode 100644 index 467d0cb14ecd..000000000000 --- a/trunk/drivers/net/wireless/iwlegacy/4965-rs.c +++ /dev/null @@ -1,2860 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ -#include -#include -#include -#include -#include - -#include -#include -#include - -#include - -#include "common.h" -#include "4965.h" - -#define IL4965_RS_NAME "iwl-4965-rs" - -#define NUM_TRY_BEFORE_ANT_TOGGLE 1 -#define IL_NUMBER_TRY 1 -#define IL_HT_NUMBER_TRY 3 - -#define RATE_MAX_WINDOW 62 /* # tx in history win */ -#define RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */ -#define RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */ - -/* max allowed rate miss before sync LQ cmd */ -#define IL_MISSED_RATE_MAX 15 -/* max time to accum history 2 seconds */ -#define RATE_SCALE_FLUSH_INTVL (3*HZ) - -static u8 rs_ht_to_legacy[] = { - RATE_6M_IDX, RATE_6M_IDX, - RATE_6M_IDX, RATE_6M_IDX, - RATE_6M_IDX, - RATE_6M_IDX, RATE_9M_IDX, - RATE_12M_IDX, RATE_18M_IDX, - RATE_24M_IDX, RATE_36M_IDX, - RATE_48M_IDX, RATE_54M_IDX -}; - -static const u8 ant_toggle_lookup[] = { - /*ANT_NONE -> */ ANT_NONE, - /*ANT_A -> */ ANT_B, - /*ANT_B -> */ ANT_C, - /*ANT_AB -> */ ANT_BC, - /*ANT_C -> */ ANT_A, - /*ANT_AC -> */ ANT_AB, - /*ANT_BC -> */ ANT_AC, - /*ANT_ABC -> */ ANT_ABC, -}; - -#define IL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \ - [RATE_##r##M_IDX] = { RATE_##r##M_PLCP, \ - RATE_SISO_##s##M_PLCP, \ - RATE_MIMO2_##s##M_PLCP,\ - RATE_##r##M_IEEE, \ - RATE_##ip##M_IDX, \ - RATE_##in##M_IDX, \ - RATE_##rp##M_IDX, \ - RATE_##rn##M_IDX, \ - RATE_##pp##M_IDX, \ - RATE_##np##M_IDX } - -/* - * Parameter order: - * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate - * - * If there isn't a valid next or previous rate then INV is used which - * maps to RATE_INVALID - * - */ -const struct il_rate_info il_rates[RATE_COUNT] = { - IL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */ - IL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */ - IL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */ - IL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */ - IL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */ - IL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */ - IL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */ - IL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */ - IL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */ - IL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */ - IL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */ - IL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */ - IL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */ -}; - -static int -il4965_hwrate_to_plcp_idx(u32 rate_n_flags) -{ - int idx = 0; - - /* HT rate format */ - if (rate_n_flags & RATE_MCS_HT_MSK) { - idx = (rate_n_flags & 0xff); - - if (idx >= RATE_MIMO2_6M_PLCP) - idx = idx - RATE_MIMO2_6M_PLCP; - - idx += IL_FIRST_OFDM_RATE; - /* skip 9M not supported in ht */ - if (idx >= RATE_9M_IDX) - idx += 1; - if (idx >= IL_FIRST_OFDM_RATE && idx <= IL_LAST_OFDM_RATE) - return idx; - - /* legacy rate format, search for match in table */ - } else { - for (idx = 0; idx < ARRAY_SIZE(il_rates); idx++) - if (il_rates[idx].plcp == (rate_n_flags & 0xFF)) - return idx; - } - - return -1; -} - -static void il4965_rs_rate_scale_perform(struct il_priv *il, - struct sk_buff *skb, - struct ieee80211_sta *sta, - struct il_lq_sta *lq_sta); -static void il4965_rs_fill_link_cmd(struct il_priv *il, - struct il_lq_sta *lq_sta, u32 rate_n_flags); -static void il4965_rs_stay_in_table(struct il_lq_sta *lq_sta, - bool force_search); - -#ifdef CONFIG_MAC80211_DEBUGFS -static void il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, - u32 *rate_n_flags, int idx); -#else -static void -il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, u32 * rate_n_flags, int idx) -{ -} -#endif - -/** - * The following tables contain the expected throughput metrics for all rates - * - * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits - * - * where invalid entries are zeros. - * - * CCK rates are only valid in legacy table and will only be used in G - * (2.4 GHz) band. - */ - -static s32 expected_tpt_legacy[RATE_COUNT] = { - 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0 -}; - -static s32 expected_tpt_siso20MHz[4][RATE_COUNT] = { - {0, 0, 0, 0, 42, 0, 76, 102, 124, 158, 183, 193, 202}, /* Norm */ - {0, 0, 0, 0, 46, 0, 82, 110, 132, 167, 192, 202, 210}, /* SGI */ - {0, 0, 0, 0, 48, 0, 93, 135, 176, 251, 319, 351, 381}, /* AGG */ - {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */ -}; - -static s32 expected_tpt_siso40MHz[4][RATE_COUNT] = { - {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */ - {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */ - {0, 0, 0, 0, 96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */ - {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */ -}; - -static s32 expected_tpt_mimo2_20MHz[4][RATE_COUNT] = { - {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */ - {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */ - {0, 0, 0, 0, 92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */ - {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI */ -}; - -static s32 expected_tpt_mimo2_40MHz[4][RATE_COUNT] = { - {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */ - {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */ - {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */ - {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */ -}; - -/* mbps, mcs */ -static const struct il_rate_mcs_info il_rate_mcs[RATE_COUNT] = { - {"1", "BPSK DSSS"}, - {"2", "QPSK DSSS"}, - {"5.5", "BPSK CCK"}, - {"11", "QPSK CCK"}, - {"6", "BPSK 1/2"}, - {"9", "BPSK 1/2"}, - {"12", "QPSK 1/2"}, - {"18", "QPSK 3/4"}, - {"24", "16QAM 1/2"}, - {"36", "16QAM 3/4"}, - {"48", "64QAM 2/3"}, - {"54", "64QAM 3/4"}, - {"60", "64QAM 5/6"}, -}; - -#define MCS_IDX_PER_STREAM (8) - -static inline u8 -il4965_rs_extract_rate(u32 rate_n_flags) -{ - return (u8) (rate_n_flags & 0xFF); -} - -static void -il4965_rs_rate_scale_clear_win(struct il_rate_scale_data *win) -{ - win->data = 0; - win->success_counter = 0; - win->success_ratio = IL_INVALID_VALUE; - win->counter = 0; - win->average_tpt = IL_INVALID_VALUE; - win->stamp = 0; -} - -static inline u8 -il4965_rs_is_valid_ant(u8 valid_antenna, u8 ant_type) -{ - return (ant_type & valid_antenna) == ant_type; -} - -/* - * removes the old data from the stats. All data that is older than - * TID_MAX_TIME_DIFF, will be deleted. - */ -static void -il4965_rs_tl_rm_old_stats(struct il_traffic_load *tl, u32 curr_time) -{ - /* The oldest age we want to keep */ - u32 oldest_time = curr_time - TID_MAX_TIME_DIFF; - - while (tl->queue_count && tl->time_stamp < oldest_time) { - tl->total -= tl->packet_count[tl->head]; - tl->packet_count[tl->head] = 0; - tl->time_stamp += TID_QUEUE_CELL_SPACING; - tl->queue_count--; - tl->head++; - if (tl->head >= TID_QUEUE_MAX_SIZE) - tl->head = 0; - } -} - -/* - * increment traffic load value for tid and also remove - * any old values if passed the certain time period - */ -static u8 -il4965_rs_tl_add_packet(struct il_lq_sta *lq_data, struct ieee80211_hdr *hdr) -{ - u32 curr_time = jiffies_to_msecs(jiffies); - u32 time_diff; - s32 idx; - struct il_traffic_load *tl = NULL; - u8 tid; - - if (ieee80211_is_data_qos(hdr->frame_control)) { - u8 *qc = ieee80211_get_qos_ctl(hdr); - tid = qc[0] & 0xf; - } else - return MAX_TID_COUNT; - - if (unlikely(tid >= TID_MAX_LOAD_COUNT)) - return MAX_TID_COUNT; - - tl = &lq_data->load[tid]; - - curr_time -= curr_time % TID_ROUND_VALUE; - - /* Happens only for the first packet. Initialize the data */ - if (!(tl->queue_count)) { - tl->total = 1; - tl->time_stamp = curr_time; - tl->queue_count = 1; - tl->head = 0; - tl->packet_count[0] = 1; - return MAX_TID_COUNT; - } - - time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time); - idx = time_diff / TID_QUEUE_CELL_SPACING; - - /* The history is too long: remove data that is older than */ - /* TID_MAX_TIME_DIFF */ - if (idx >= TID_QUEUE_MAX_SIZE) - il4965_rs_tl_rm_old_stats(tl, curr_time); - - idx = (tl->head + idx) % TID_QUEUE_MAX_SIZE; - tl->packet_count[idx] = tl->packet_count[idx] + 1; - tl->total = tl->total + 1; - - if ((idx + 1) > tl->queue_count) - tl->queue_count = idx + 1; - - return tid; -} - -/* - get the traffic load value for tid -*/ -static u32 -il4965_rs_tl_get_load(struct il_lq_sta *lq_data, u8 tid) -{ - u32 curr_time = jiffies_to_msecs(jiffies); - u32 time_diff; - s32 idx; - struct il_traffic_load *tl = NULL; - - if (tid >= TID_MAX_LOAD_COUNT) - return 0; - - tl = &(lq_data->load[tid]); - - curr_time -= curr_time % TID_ROUND_VALUE; - - if (!(tl->queue_count)) - return 0; - - time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time); - idx = time_diff / TID_QUEUE_CELL_SPACING; - - /* The history is too long: remove data that is older than */ - /* TID_MAX_TIME_DIFF */ - if (idx >= TID_QUEUE_MAX_SIZE) - il4965_rs_tl_rm_old_stats(tl, curr_time); - - return tl->total; -} - -static int -il4965_rs_tl_turn_on_agg_for_tid(struct il_priv *il, struct il_lq_sta *lq_data, - u8 tid, struct ieee80211_sta *sta) -{ - int ret = -EAGAIN; - u32 load; - - load = il4965_rs_tl_get_load(lq_data, tid); - - if (load > IL_AGG_LOAD_THRESHOLD) { - D_HT("Starting Tx agg: STA: %pM tid: %d\n", sta->addr, tid); - ret = ieee80211_start_tx_ba_session(sta, tid, 5000); - if (ret == -EAGAIN) { - /* - * driver and mac80211 is out of sync - * this might be cause by reloading firmware - * stop the tx ba session here - */ - IL_ERR("Fail start Tx agg on tid: %d\n", tid); - ieee80211_stop_tx_ba_session(sta, tid); - } - } else - D_HT("Aggregation not enabled for tid %d because load = %u\n", - tid, load); - - return ret; -} - -static void -il4965_rs_tl_turn_on_agg(struct il_priv *il, u8 tid, struct il_lq_sta *lq_data, - struct ieee80211_sta *sta) -{ - if (tid < TID_MAX_LOAD_COUNT) - il4965_rs_tl_turn_on_agg_for_tid(il, lq_data, tid, sta); - else - IL_ERR("tid exceeds max load count: %d/%d\n", tid, - TID_MAX_LOAD_COUNT); -} - -static inline int -il4965_get_il4965_num_of_ant_from_rate(u32 rate_n_flags) -{ - return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) + - !!(rate_n_flags & RATE_MCS_ANT_B_MSK) + - !!(rate_n_flags & RATE_MCS_ANT_C_MSK); -} - -/* - * Static function to get the expected throughput from an il_scale_tbl_info - * that wraps a NULL pointer check - */ -static s32 -il4965_get_expected_tpt(struct il_scale_tbl_info *tbl, int rs_idx) -{ - if (tbl->expected_tpt) - return tbl->expected_tpt[rs_idx]; - return 0; -} - -/** - * il4965_rs_collect_tx_data - Update the success/failure sliding win - * - * We keep a sliding win of the last 62 packets transmitted - * at this rate. win->data contains the bitmask of successful - * packets. - */ -static int -il4965_rs_collect_tx_data(struct il_scale_tbl_info *tbl, int scale_idx, - int attempts, int successes) -{ - struct il_rate_scale_data *win = NULL; - static const u64 mask = (((u64) 1) << (RATE_MAX_WINDOW - 1)); - s32 fail_count, tpt; - - if (scale_idx < 0 || scale_idx >= RATE_COUNT) - return -EINVAL; - - /* Select win for current tx bit rate */ - win = &(tbl->win[scale_idx]); - - /* Get expected throughput */ - tpt = il4965_get_expected_tpt(tbl, scale_idx); - - /* - * Keep track of only the latest 62 tx frame attempts in this rate's - * history win; anything older isn't really relevant any more. - * If we have filled up the sliding win, drop the oldest attempt; - * if the oldest attempt (highest bit in bitmap) shows "success", - * subtract "1" from the success counter (this is the main reason - * we keep these bitmaps!). - */ - while (attempts > 0) { - if (win->counter >= RATE_MAX_WINDOW) { - - /* remove earliest */ - win->counter = RATE_MAX_WINDOW - 1; - - if (win->data & mask) { - win->data &= ~mask; - win->success_counter--; - } - } - - /* Increment frames-attempted counter */ - win->counter++; - - /* Shift bitmap by one frame to throw away oldest history */ - win->data <<= 1; - - /* Mark the most recent #successes attempts as successful */ - if (successes > 0) { - win->success_counter++; - win->data |= 0x1; - successes--; - } - - attempts--; - } - - /* Calculate current success ratio, avoid divide-by-0! */ - if (win->counter > 0) - win->success_ratio = - 128 * (100 * win->success_counter) / win->counter; - else - win->success_ratio = IL_INVALID_VALUE; - - fail_count = win->counter - win->success_counter; - - /* Calculate average throughput, if we have enough history. */ - if (fail_count >= RATE_MIN_FAILURE_TH || - win->success_counter >= RATE_MIN_SUCCESS_TH) - win->average_tpt = (win->success_ratio * tpt + 64) / 128; - else - win->average_tpt = IL_INVALID_VALUE; - - /* Tag this win as having been updated */ - win->stamp = jiffies; - - return 0; -} - -/* - * Fill uCode API rate_n_flags field, based on "search" or "active" table. - */ -static u32 -il4965_rate_n_flags_from_tbl(struct il_priv *il, struct il_scale_tbl_info *tbl, - int idx, u8 use_green) -{ - u32 rate_n_flags = 0; - - if (is_legacy(tbl->lq_type)) { - rate_n_flags = il_rates[idx].plcp; - if (idx >= IL_FIRST_CCK_RATE && idx <= IL_LAST_CCK_RATE) - rate_n_flags |= RATE_MCS_CCK_MSK; - - } else if (is_Ht(tbl->lq_type)) { - if (idx > IL_LAST_OFDM_RATE) { - IL_ERR("Invalid HT rate idx %d\n", idx); - idx = IL_LAST_OFDM_RATE; - } - rate_n_flags = RATE_MCS_HT_MSK; - - if (is_siso(tbl->lq_type)) - rate_n_flags |= il_rates[idx].plcp_siso; - else - rate_n_flags |= il_rates[idx].plcp_mimo2; - } else { - IL_ERR("Invalid tbl->lq_type %d\n", tbl->lq_type); - } - - rate_n_flags |= - ((tbl->ant_type << RATE_MCS_ANT_POS) & RATE_MCS_ANT_ABC_MSK); - - if (is_Ht(tbl->lq_type)) { - if (tbl->is_ht40) { - if (tbl->is_dup) - rate_n_flags |= RATE_MCS_DUP_MSK; - else - rate_n_flags |= RATE_MCS_HT40_MSK; - } - if (tbl->is_SGI) - rate_n_flags |= RATE_MCS_SGI_MSK; - - if (use_green) { - rate_n_flags |= RATE_MCS_GF_MSK; - if (is_siso(tbl->lq_type) && tbl->is_SGI) { - rate_n_flags &= ~RATE_MCS_SGI_MSK; - IL_ERR("GF was set with SGI:SISO\n"); - } - } - } - return rate_n_flags; -} - -/* - * Interpret uCode API's rate_n_flags format, - * fill "search" or "active" tx mode table. - */ -static int -il4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags, - enum ieee80211_band band, - struct il_scale_tbl_info *tbl, int *rate_idx) -{ - u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK); - u8 il4965_num_of_ant = - il4965_get_il4965_num_of_ant_from_rate(rate_n_flags); - u8 mcs; - - memset(tbl, 0, sizeof(struct il_scale_tbl_info)); - *rate_idx = il4965_hwrate_to_plcp_idx(rate_n_flags); - - if (*rate_idx == RATE_INVALID) { - *rate_idx = -1; - return -EINVAL; - } - tbl->is_SGI = 0; /* default legacy setup */ - tbl->is_ht40 = 0; - tbl->is_dup = 0; - tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS); - tbl->lq_type = LQ_NONE; - tbl->max_search = IL_MAX_SEARCH; - - /* legacy rate format */ - if (!(rate_n_flags & RATE_MCS_HT_MSK)) { - if (il4965_num_of_ant == 1) { - if (band == IEEE80211_BAND_5GHZ) - tbl->lq_type = LQ_A; - else - tbl->lq_type = LQ_G; - } - /* HT rate format */ - } else { - if (rate_n_flags & RATE_MCS_SGI_MSK) - tbl->is_SGI = 1; - - if ((rate_n_flags & RATE_MCS_HT40_MSK) || - (rate_n_flags & RATE_MCS_DUP_MSK)) - tbl->is_ht40 = 1; - - if (rate_n_flags & RATE_MCS_DUP_MSK) - tbl->is_dup = 1; - - mcs = il4965_rs_extract_rate(rate_n_flags); - - /* SISO */ - if (mcs <= RATE_SISO_60M_PLCP) { - if (il4965_num_of_ant == 1) - tbl->lq_type = LQ_SISO; /*else NONE */ - /* MIMO2 */ - } else { - if (il4965_num_of_ant == 2) - tbl->lq_type = LQ_MIMO2; - } - } - return 0; -} - -/* switch to another antenna/antennas and return 1 */ -/* if no other valid antenna found, return 0 */ -static int -il4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags, - struct il_scale_tbl_info *tbl) -{ - u8 new_ant_type; - - if (!tbl->ant_type || tbl->ant_type > ANT_ABC) - return 0; - - if (!il4965_rs_is_valid_ant(valid_ant, tbl->ant_type)) - return 0; - - new_ant_type = ant_toggle_lookup[tbl->ant_type]; - - while (new_ant_type != tbl->ant_type && - !il4965_rs_is_valid_ant(valid_ant, new_ant_type)) - new_ant_type = ant_toggle_lookup[new_ant_type]; - - if (new_ant_type == tbl->ant_type) - return 0; - - tbl->ant_type = new_ant_type; - *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK; - *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS; - return 1; -} - -/** - * Green-field mode is valid if the station supports it and - * there are no non-GF stations present in the BSS. - */ -static bool -il4965_rs_use_green(struct ieee80211_sta *sta) -{ - struct il_station_priv *sta_priv = (void *)sta->drv_priv; - struct il_rxon_context *ctx = sta_priv->common.ctx; - - return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) && - !(ctx->ht.non_gf_sta_present); -} - -/** - * il4965_rs_get_supported_rates - get the available rates - * - * if management frame or broadcast frame only return - * basic available rates. - * - */ -static u16 -il4965_rs_get_supported_rates(struct il_lq_sta *lq_sta, - struct ieee80211_hdr *hdr, - enum il_table_type rate_type) -{ - if (is_legacy(rate_type)) { - return lq_sta->active_legacy_rate; - } else { - if (is_siso(rate_type)) - return lq_sta->active_siso_rate; - else - return lq_sta->active_mimo2_rate; - } -} - -static u16 -il4965_rs_get_adjacent_rate(struct il_priv *il, u8 idx, u16 rate_mask, - int rate_type) -{ - u8 high = RATE_INVALID; - u8 low = RATE_INVALID; - - /* 802.11A or ht walks to the next literal adjacent rate in - * the rate table */ - if (is_a_band(rate_type) || !is_legacy(rate_type)) { - int i; - u32 mask; - - /* Find the previous rate that is in the rate mask */ - i = idx - 1; - for (mask = (1 << i); i >= 0; i--, mask >>= 1) { - if (rate_mask & mask) { - low = i; - break; - } - } - - /* Find the next rate that is in the rate mask */ - i = idx + 1; - for (mask = (1 << i); i < RATE_COUNT; i++, mask <<= 1) { - if (rate_mask & mask) { - high = i; - break; - } - } - - return (high << 8) | low; - } - - low = idx; - while (low != RATE_INVALID) { - low = il_rates[low].prev_rs; - if (low == RATE_INVALID) - break; - if (rate_mask & (1 << low)) - break; - D_RATE("Skipping masked lower rate: %d\n", low); - } - - high = idx; - while (high != RATE_INVALID) { - high = il_rates[high].next_rs; - if (high == RATE_INVALID) - break; - if (rate_mask & (1 << high)) - break; - D_RATE("Skipping masked higher rate: %d\n", high); - } - - return (high << 8) | low; -} - -static u32 -il4965_rs_get_lower_rate(struct il_lq_sta *lq_sta, - struct il_scale_tbl_info *tbl, u8 scale_idx, - u8 ht_possible) -{ - s32 low; - u16 rate_mask; - u16 high_low; - u8 switch_to_legacy = 0; - u8 is_green = lq_sta->is_green; - struct il_priv *il = lq_sta->drv; - - /* check if we need to switch from HT to legacy rates. - * assumption is that mandatory rates (1Mbps or 6Mbps) - * are always supported (spec demand) */ - if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_idx)) { - switch_to_legacy = 1; - scale_idx = rs_ht_to_legacy[scale_idx]; - if (lq_sta->band == IEEE80211_BAND_5GHZ) - tbl->lq_type = LQ_A; - else - tbl->lq_type = LQ_G; - - if (il4965_num_of_ant(tbl->ant_type) > 1) - tbl->ant_type = - il4965_first_antenna(il->hw_params.valid_tx_ant); - - tbl->is_ht40 = 0; - tbl->is_SGI = 0; - tbl->max_search = IL_MAX_SEARCH; - } - - rate_mask = il4965_rs_get_supported_rates(lq_sta, NULL, tbl->lq_type); - - /* Mask with station rate restriction */ - if (is_legacy(tbl->lq_type)) { - /* supp_rates has no CCK bits in A mode */ - if (lq_sta->band == IEEE80211_BAND_5GHZ) - rate_mask = - (u16) (rate_mask & - (lq_sta->supp_rates << IL_FIRST_OFDM_RATE)); - else - rate_mask = (u16) (rate_mask & lq_sta->supp_rates); - } - - /* If we switched from HT to legacy, check current rate */ - if (switch_to_legacy && (rate_mask & (1 << scale_idx))) { - low = scale_idx; - goto out; - } - - high_low = - il4965_rs_get_adjacent_rate(lq_sta->drv, scale_idx, rate_mask, - tbl->lq_type); - low = high_low & 0xff; - - if (low == RATE_INVALID) - low = scale_idx; - -out: - return il4965_rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green); -} - -/* - * Simple function to compare two rate scale table types - */ -static bool -il4965_table_type_matches(struct il_scale_tbl_info *a, - struct il_scale_tbl_info *b) -{ - return (a->lq_type == b->lq_type && a->ant_type == b->ant_type && - a->is_SGI == b->is_SGI); -} - -/* - * mac80211 sends us Tx status - */ -static void -il4965_rs_tx_status(void *il_r, struct ieee80211_supported_band *sband, - struct ieee80211_sta *sta, void *il_sta, - struct sk_buff *skb) -{ - int legacy_success; - int retries; - int rs_idx, mac_idx, i; - struct il_lq_sta *lq_sta = il_sta; - struct il_link_quality_cmd *table; - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - struct il_priv *il = (struct il_priv *)il_r; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - enum mac80211_rate_control_flags mac_flags; - u32 tx_rate; - struct il_scale_tbl_info tbl_type; - struct il_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl; - struct il_station_priv *sta_priv = (void *)sta->drv_priv; - struct il_rxon_context *ctx = sta_priv->common.ctx; - - D_RATE("get frame ack response, update rate scale win\n"); - - /* Treat uninitialized rate scaling data same as non-existing. */ - if (!lq_sta) { - D_RATE("Station rate scaling not created yet.\n"); - return; - } else if (!lq_sta->drv) { - D_RATE("Rate scaling not initialized yet.\n"); - return; - } - - if (!ieee80211_is_data(hdr->frame_control) || - (info->flags & IEEE80211_TX_CTL_NO_ACK)) - return; - - /* This packet was aggregated but doesn't carry status info */ - if ((info->flags & IEEE80211_TX_CTL_AMPDU) && - !(info->flags & IEEE80211_TX_STAT_AMPDU)) - return; - - /* - * Ignore this Tx frame response if its initial rate doesn't match - * that of latest Link Quality command. There may be stragglers - * from a previous Link Quality command, but we're no longer interested - * in those; they're either from the "active" mode while we're trying - * to check "search" mode, or a prior "search" mode after we've moved - * to a new "search" mode (which might become the new "active" mode). - */ - table = &lq_sta->lq; - tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); - il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band, &tbl_type, &rs_idx); - if (il->band == IEEE80211_BAND_5GHZ) - rs_idx -= IL_FIRST_OFDM_RATE; - mac_flags = info->status.rates[0].flags; - mac_idx = info->status.rates[0].idx; - /* For HT packets, map MCS to PLCP */ - if (mac_flags & IEEE80211_TX_RC_MCS) { - mac_idx &= RATE_MCS_CODE_MSK; /* Remove # of streams */ - if (mac_idx >= (RATE_9M_IDX - IL_FIRST_OFDM_RATE)) - mac_idx++; - /* - * mac80211 HT idx is always zero-idxed; we need to move - * HT OFDM rates after CCK rates in 2.4 GHz band - */ - if (il->band == IEEE80211_BAND_2GHZ) - mac_idx += IL_FIRST_OFDM_RATE; - } - /* Here we actually compare this rate to the latest LQ command */ - if (mac_idx < 0 || - tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI) || - tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH) || - tbl_type.is_dup != !!(mac_flags & IEEE80211_TX_RC_DUP_DATA) || - tbl_type.ant_type != info->antenna_sel_tx || - !!(tx_rate & RATE_MCS_HT_MSK) != !!(mac_flags & IEEE80211_TX_RC_MCS) - || !!(tx_rate & RATE_MCS_GF_MSK) != - !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD) || rs_idx != mac_idx) { - D_RATE("initial rate %d does not match %d (0x%x)\n", mac_idx, - rs_idx, tx_rate); - /* - * Since rates mis-match, the last LQ command may have failed. - * After IL_MISSED_RATE_MAX mis-matches, resync the uCode with - * ... driver. - */ - lq_sta->missed_rate_counter++; - if (lq_sta->missed_rate_counter > IL_MISSED_RATE_MAX) { - lq_sta->missed_rate_counter = 0; - il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_ASYNC, false); - } - /* Regardless, ignore this status info for outdated rate */ - return; - } else - /* Rate did match, so reset the missed_rate_counter */ - lq_sta->missed_rate_counter = 0; - - /* Figure out if rate scale algorithm is in active or search table */ - if (il4965_table_type_matches - (&tbl_type, &(lq_sta->lq_info[lq_sta->active_tbl]))) { - curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); - other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); - } else - if (il4965_table_type_matches - (&tbl_type, &lq_sta->lq_info[1 - lq_sta->active_tbl])) { - curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); - other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); - } else { - D_RATE("Neither active nor search matches tx rate\n"); - tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); - D_RATE("active- lq:%x, ant:%x, SGI:%d\n", tmp_tbl->lq_type, - tmp_tbl->ant_type, tmp_tbl->is_SGI); - tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); - D_RATE("search- lq:%x, ant:%x, SGI:%d\n", tmp_tbl->lq_type, - tmp_tbl->ant_type, tmp_tbl->is_SGI); - D_RATE("actual- lq:%x, ant:%x, SGI:%d\n", tbl_type.lq_type, - tbl_type.ant_type, tbl_type.is_SGI); - /* - * no matching table found, let's by-pass the data collection - * and continue to perform rate scale to find the rate table - */ - il4965_rs_stay_in_table(lq_sta, true); - goto done; - } - - /* - * Updating the frame history depends on whether packets were - * aggregated. - * - * For aggregation, all packets were transmitted at the same rate, the - * first idx into rate scale table. - */ - if (info->flags & IEEE80211_TX_STAT_AMPDU) { - tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); - il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band, &tbl_type, - &rs_idx); - il4965_rs_collect_tx_data(curr_tbl, rs_idx, - info->status.ampdu_len, - info->status.ampdu_ack_len); - - /* Update success/fail counts if not searching for new mode */ - if (lq_sta->stay_in_tbl) { - lq_sta->total_success += info->status.ampdu_ack_len; - lq_sta->total_failed += - (info->status.ampdu_len - - info->status.ampdu_ack_len); - } - } else { - /* - * For legacy, update frame history with for each Tx retry. - */ - retries = info->status.rates[0].count - 1; - /* HW doesn't send more than 15 retries */ - retries = min(retries, 15); - - /* The last transmission may have been successful */ - legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK); - /* Collect data for each rate used during failed TX attempts */ - for (i = 0; i <= retries; ++i) { - tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags); - il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band, - &tbl_type, &rs_idx); - /* - * Only collect stats if retried rate is in the same RS - * table as active/search. - */ - if (il4965_table_type_matches(&tbl_type, curr_tbl)) - tmp_tbl = curr_tbl; - else if (il4965_table_type_matches - (&tbl_type, other_tbl)) - tmp_tbl = other_tbl; - else - continue; - il4965_rs_collect_tx_data(tmp_tbl, rs_idx, 1, - i < - retries ? 0 : legacy_success); - } - - /* Update success/fail counts if not searching for new mode */ - if (lq_sta->stay_in_tbl) { - lq_sta->total_success += legacy_success; - lq_sta->total_failed += retries + (1 - legacy_success); - } - } - /* The last TX rate is cached in lq_sta; it's set in if/else above */ - lq_sta->last_rate_n_flags = tx_rate; -done: - /* See if there's a better rate or modulation mode to try. */ - if (sta->supp_rates[sband->band]) - il4965_rs_rate_scale_perform(il, skb, sta, lq_sta); -} - -/* - * Begin a period of staying with a selected modulation mode. - * Set "stay_in_tbl" flag to prevent any mode switches. - * Set frame tx success limits according to legacy vs. high-throughput, - * and reset overall (spanning all rates) tx success history stats. - * These control how long we stay using same modulation mode before - * searching for a new mode. - */ -static void -il4965_rs_set_stay_in_table(struct il_priv *il, u8 is_legacy, - struct il_lq_sta *lq_sta) -{ - D_RATE("we are staying in the same table\n"); - lq_sta->stay_in_tbl = 1; /* only place this gets set */ - if (is_legacy) { - lq_sta->table_count_limit = IL_LEGACY_TBL_COUNT; - lq_sta->max_failure_limit = IL_LEGACY_FAILURE_LIMIT; - lq_sta->max_success_limit = IL_LEGACY_SUCCESS_LIMIT; - } else { - lq_sta->table_count_limit = IL_NONE_LEGACY_TBL_COUNT; - lq_sta->max_failure_limit = IL_NONE_LEGACY_FAILURE_LIMIT; - lq_sta->max_success_limit = IL_NONE_LEGACY_SUCCESS_LIMIT; - } - lq_sta->table_count = 0; - lq_sta->total_failed = 0; - lq_sta->total_success = 0; - lq_sta->flush_timer = jiffies; - lq_sta->action_counter = 0; -} - -/* - * Find correct throughput table for given mode of modulation - */ -static void -il4965_rs_set_expected_tpt_table(struct il_lq_sta *lq_sta, - struct il_scale_tbl_info *tbl) -{ - /* Used to choose among HT tables */ - s32(*ht_tbl_pointer)[RATE_COUNT]; - - /* Check for invalid LQ type */ - if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) { - tbl->expected_tpt = expected_tpt_legacy; - return; - } - - /* Legacy rates have only one table */ - if (is_legacy(tbl->lq_type)) { - tbl->expected_tpt = expected_tpt_legacy; - return; - } - - /* Choose among many HT tables depending on number of streams - * (SISO/MIMO2), channel width (20/40), SGI, and aggregation - * status */ - if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup)) - ht_tbl_pointer = expected_tpt_siso20MHz; - else if (is_siso(tbl->lq_type)) - ht_tbl_pointer = expected_tpt_siso40MHz; - else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup)) - ht_tbl_pointer = expected_tpt_mimo2_20MHz; - else /* if (is_mimo2(tbl->lq_type)) <-- must be true */ - ht_tbl_pointer = expected_tpt_mimo2_40MHz; - - if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */ - tbl->expected_tpt = ht_tbl_pointer[0]; - else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */ - tbl->expected_tpt = ht_tbl_pointer[1]; - else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */ - tbl->expected_tpt = ht_tbl_pointer[2]; - else /* AGG+SGI */ - tbl->expected_tpt = ht_tbl_pointer[3]; -} - -/* - * Find starting rate for new "search" high-throughput mode of modulation. - * Goal is to find lowest expected rate (under perfect conditions) that is - * above the current measured throughput of "active" mode, to give new mode - * a fair chance to prove itself without too many challenges. - * - * This gets called when transitioning to more aggressive modulation - * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive - * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need - * to decrease to match "active" throughput. When moving from MIMO to SISO, - * bit rate will typically need to increase, but not if performance was bad. - */ -static s32 -il4965_rs_get_best_rate(struct il_priv *il, struct il_lq_sta *lq_sta, - struct il_scale_tbl_info *tbl, /* "search" */ - u16 rate_mask, s8 idx) -{ - /* "active" values */ - struct il_scale_tbl_info *active_tbl = - &(lq_sta->lq_info[lq_sta->active_tbl]); - s32 active_sr = active_tbl->win[idx].success_ratio; - s32 active_tpt = active_tbl->expected_tpt[idx]; - - /* expected "search" throughput */ - s32 *tpt_tbl = tbl->expected_tpt; - - s32 new_rate, high, low, start_hi; - u16 high_low; - s8 rate = idx; - - new_rate = high = low = start_hi = RATE_INVALID; - - for (;;) { - high_low = - il4965_rs_get_adjacent_rate(il, rate, rate_mask, - tbl->lq_type); - - low = high_low & 0xff; - high = (high_low >> 8) & 0xff; - - /* - * Lower the "search" bit rate, to give new "search" mode - * approximately the same throughput as "active" if: - * - * 1) "Active" mode has been working modestly well (but not - * great), and expected "search" throughput (under perfect - * conditions) at candidate rate is above the actual - * measured "active" throughput (but less than expected - * "active" throughput under perfect conditions). - * OR - * 2) "Active" mode has been working perfectly or very well - * and expected "search" throughput (under perfect - * conditions) at candidate rate is above expected - * "active" throughput (under perfect conditions). - */ - if ((100 * tpt_tbl[rate] > lq_sta->last_tpt && - (active_sr > RATE_DECREASE_TH && active_sr <= RATE_HIGH_TH - && tpt_tbl[rate] <= active_tpt)) || - (active_sr >= RATE_SCALE_SWITCH && - tpt_tbl[rate] > active_tpt)) { - - /* (2nd or later pass) - * If we've already tried to raise the rate, and are - * now trying to lower it, use the higher rate. */ - if (start_hi != RATE_INVALID) { - new_rate = start_hi; - break; - } - - new_rate = rate; - - /* Loop again with lower rate */ - if (low != RATE_INVALID) - rate = low; - - /* Lower rate not available, use the original */ - else - break; - - /* Else try to raise the "search" rate to match "active" */ - } else { - /* (2nd or later pass) - * If we've already tried to lower the rate, and are - * now trying to raise it, use the lower rate. */ - if (new_rate != RATE_INVALID) - break; - - /* Loop again with higher rate */ - else if (high != RATE_INVALID) { - start_hi = high; - rate = high; - - /* Higher rate not available, use the original */ - } else { - new_rate = rate; - break; - } - } - } - - return new_rate; -} - -/* - * Set up search table for MIMO2 - */ -static int -il4965_rs_switch_to_mimo2(struct il_priv *il, struct il_lq_sta *lq_sta, - struct ieee80211_conf *conf, - struct ieee80211_sta *sta, - struct il_scale_tbl_info *tbl, int idx) -{ - u16 rate_mask; - s32 rate; - s8 is_green = lq_sta->is_green; - struct il_station_priv *sta_priv = (void *)sta->drv_priv; - struct il_rxon_context *ctx = sta_priv->common.ctx; - - if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported) - return -1; - - if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2) == - WLAN_HT_CAP_SM_PS_STATIC) - return -1; - - /* Need both Tx chains/antennas to support MIMO */ - if (il->hw_params.tx_chains_num < 2) - return -1; - - D_RATE("LQ: try to switch to MIMO2\n"); - - tbl->lq_type = LQ_MIMO2; - tbl->is_dup = lq_sta->is_dup; - tbl->action = 0; - tbl->max_search = IL_MAX_SEARCH; - rate_mask = lq_sta->active_mimo2_rate; - - if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap)) - tbl->is_ht40 = 1; - else - tbl->is_ht40 = 0; - - il4965_rs_set_expected_tpt_table(lq_sta, tbl); - - rate = il4965_rs_get_best_rate(il, lq_sta, tbl, rate_mask, idx); - - D_RATE("LQ: MIMO2 best rate %d mask %X\n", rate, rate_mask); - if (rate == RATE_INVALID || !((1 << rate) & rate_mask)) { - D_RATE("Can't switch with idx %d rate mask %x\n", rate, - rate_mask); - return -1; - } - tbl->current_rate = - il4965_rate_n_flags_from_tbl(il, tbl, rate, is_green); - - D_RATE("LQ: Switch to new mcs %X idx is green %X\n", tbl->current_rate, - is_green); - return 0; -} - -/* - * Set up search table for SISO - */ -static int -il4965_rs_switch_to_siso(struct il_priv *il, struct il_lq_sta *lq_sta, - struct ieee80211_conf *conf, struct ieee80211_sta *sta, - struct il_scale_tbl_info *tbl, int idx) -{ - u16 rate_mask; - u8 is_green = lq_sta->is_green; - s32 rate; - struct il_station_priv *sta_priv = (void *)sta->drv_priv; - struct il_rxon_context *ctx = sta_priv->common.ctx; - - if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported) - return -1; - - D_RATE("LQ: try to switch to SISO\n"); - - tbl->is_dup = lq_sta->is_dup; - tbl->lq_type = LQ_SISO; - tbl->action = 0; - tbl->max_search = IL_MAX_SEARCH; - rate_mask = lq_sta->active_siso_rate; - - if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap)) - tbl->is_ht40 = 1; - else - tbl->is_ht40 = 0; - - if (is_green) - tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield */ - - il4965_rs_set_expected_tpt_table(lq_sta, tbl); - rate = il4965_rs_get_best_rate(il, lq_sta, tbl, rate_mask, idx); - - D_RATE("LQ: get best rate %d mask %X\n", rate, rate_mask); - if (rate == RATE_INVALID || !((1 << rate) & rate_mask)) { - D_RATE("can not switch with idx %d rate mask %x\n", rate, - rate_mask); - return -1; - } - tbl->current_rate = - il4965_rate_n_flags_from_tbl(il, tbl, rate, is_green); - D_RATE("LQ: Switch to new mcs %X idx is green %X\n", tbl->current_rate, - is_green); - return 0; -} - -/* - * Try to switch to new modulation mode from legacy - */ -static int -il4965_rs_move_legacy_other(struct il_priv *il, struct il_lq_sta *lq_sta, - struct ieee80211_conf *conf, - struct ieee80211_sta *sta, int idx) -{ - struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); - struct il_scale_tbl_info *search_tbl = - &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); - struct il_rate_scale_data *win = &(tbl->win[idx]); - u32 sz = - (sizeof(struct il_scale_tbl_info) - - (sizeof(struct il_rate_scale_data) * RATE_COUNT)); - u8 start_action; - u8 valid_tx_ant = il->hw_params.valid_tx_ant; - u8 tx_chains_num = il->hw_params.tx_chains_num; - int ret = 0; - u8 update_search_tbl_counter = 0; - - tbl->action = IL_LEGACY_SWITCH_SISO; - - start_action = tbl->action; - for (;;) { - lq_sta->action_counter++; - switch (tbl->action) { - case IL_LEGACY_SWITCH_ANTENNA1: - case IL_LEGACY_SWITCH_ANTENNA2: - D_RATE("LQ: Legacy toggle Antenna\n"); - - if ((tbl->action == IL_LEGACY_SWITCH_ANTENNA1 && - tx_chains_num <= 1) || - (tbl->action == IL_LEGACY_SWITCH_ANTENNA2 && - tx_chains_num <= 2)) - break; - - /* Don't change antenna if success has been great */ - if (win->success_ratio >= IL_RS_GOOD_RATIO) - break; - - /* Set up search table to try other antenna */ - memcpy(search_tbl, tbl, sz); - - if (il4965_rs_toggle_antenna - (valid_tx_ant, &search_tbl->current_rate, - search_tbl)) { - update_search_tbl_counter = 1; - il4965_rs_set_expected_tpt_table(lq_sta, - search_tbl); - goto out; - } - break; - case IL_LEGACY_SWITCH_SISO: - D_RATE("LQ: Legacy switch to SISO\n"); - - /* Set up search table to try SISO */ - memcpy(search_tbl, tbl, sz); - search_tbl->is_SGI = 0; - ret = - il4965_rs_switch_to_siso(il, lq_sta, conf, sta, - search_tbl, idx); - if (!ret) { - lq_sta->action_counter = 0; - goto out; - } - - break; - case IL_LEGACY_SWITCH_MIMO2_AB: - case IL_LEGACY_SWITCH_MIMO2_AC: - case IL_LEGACY_SWITCH_MIMO2_BC: - D_RATE("LQ: Legacy switch to MIMO2\n"); - - /* Set up search table to try MIMO */ - memcpy(search_tbl, tbl, sz); - search_tbl->is_SGI = 0; - - if (tbl->action == IL_LEGACY_SWITCH_MIMO2_AB) - search_tbl->ant_type = ANT_AB; - else if (tbl->action == IL_LEGACY_SWITCH_MIMO2_AC) - search_tbl->ant_type = ANT_AC; - else - search_tbl->ant_type = ANT_BC; - - if (!il4965_rs_is_valid_ant - (valid_tx_ant, search_tbl->ant_type)) - break; - - ret = - il4965_rs_switch_to_mimo2(il, lq_sta, conf, sta, - search_tbl, idx); - if (!ret) { - lq_sta->action_counter = 0; - goto out; - } - break; - } - tbl->action++; - if (tbl->action > IL_LEGACY_SWITCH_MIMO2_BC) - tbl->action = IL_LEGACY_SWITCH_ANTENNA1; - - if (tbl->action == start_action) - break; - - } - search_tbl->lq_type = LQ_NONE; - return 0; - -out: - lq_sta->search_better_tbl = 1; - tbl->action++; - if (tbl->action > IL_LEGACY_SWITCH_MIMO2_BC) - tbl->action = IL_LEGACY_SWITCH_ANTENNA1; - if (update_search_tbl_counter) - search_tbl->action = tbl->action; - return 0; - -} - -/* - * Try to switch to new modulation mode from SISO - */ -static int -il4965_rs_move_siso_to_other(struct il_priv *il, struct il_lq_sta *lq_sta, - struct ieee80211_conf *conf, - struct ieee80211_sta *sta, int idx) -{ - u8 is_green = lq_sta->is_green; - struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); - struct il_scale_tbl_info *search_tbl = - &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); - struct il_rate_scale_data *win = &(tbl->win[idx]); - struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; - u32 sz = - (sizeof(struct il_scale_tbl_info) - - (sizeof(struct il_rate_scale_data) * RATE_COUNT)); - u8 start_action; - u8 valid_tx_ant = il->hw_params.valid_tx_ant; - u8 tx_chains_num = il->hw_params.tx_chains_num; - u8 update_search_tbl_counter = 0; - int ret; - - start_action = tbl->action; - - for (;;) { - lq_sta->action_counter++; - switch (tbl->action) { - case IL_SISO_SWITCH_ANTENNA1: - case IL_SISO_SWITCH_ANTENNA2: - D_RATE("LQ: SISO toggle Antenna\n"); - if ((tbl->action == IL_SISO_SWITCH_ANTENNA1 && - tx_chains_num <= 1) || - (tbl->action == IL_SISO_SWITCH_ANTENNA2 && - tx_chains_num <= 2)) - break; - - if (win->success_ratio >= IL_RS_GOOD_RATIO) - break; - - memcpy(search_tbl, tbl, sz); - if (il4965_rs_toggle_antenna - (valid_tx_ant, &search_tbl->current_rate, - search_tbl)) { - update_search_tbl_counter = 1; - goto out; - } - break; - case IL_SISO_SWITCH_MIMO2_AB: - case IL_SISO_SWITCH_MIMO2_AC: - case IL_SISO_SWITCH_MIMO2_BC: - D_RATE("LQ: SISO switch to MIMO2\n"); - memcpy(search_tbl, tbl, sz); - search_tbl->is_SGI = 0; - - if (tbl->action == IL_SISO_SWITCH_MIMO2_AB) - search_tbl->ant_type = ANT_AB; - else if (tbl->action == IL_SISO_SWITCH_MIMO2_AC) - search_tbl->ant_type = ANT_AC; - else - search_tbl->ant_type = ANT_BC; - - if (!il4965_rs_is_valid_ant - (valid_tx_ant, search_tbl->ant_type)) - break; - - ret = - il4965_rs_switch_to_mimo2(il, lq_sta, conf, sta, - search_tbl, idx); - if (!ret) - goto out; - break; - case IL_SISO_SWITCH_GI: - if (!tbl->is_ht40 && - !(ht_cap->cap & IEEE80211_HT_CAP_SGI_20)) - break; - if (tbl->is_ht40 && - !(ht_cap->cap & IEEE80211_HT_CAP_SGI_40)) - break; - - D_RATE("LQ: SISO toggle SGI/NGI\n"); - - memcpy(search_tbl, tbl, sz); - if (is_green) { - if (!tbl->is_SGI) - break; - else - IL_ERR("SGI was set in GF+SISO\n"); - } - search_tbl->is_SGI = !tbl->is_SGI; - il4965_rs_set_expected_tpt_table(lq_sta, search_tbl); - if (tbl->is_SGI) { - s32 tpt = lq_sta->last_tpt / 100; - if (tpt >= search_tbl->expected_tpt[idx]) - break; - } - search_tbl->current_rate = - il4965_rate_n_flags_from_tbl(il, search_tbl, idx, - is_green); - update_search_tbl_counter = 1; - goto out; - } - tbl->action++; - if (tbl->action > IL_SISO_SWITCH_GI) - tbl->action = IL_SISO_SWITCH_ANTENNA1; - - if (tbl->action == start_action) - break; - } - search_tbl->lq_type = LQ_NONE; - return 0; - -out: - lq_sta->search_better_tbl = 1; - tbl->action++; - if (tbl->action > IL_SISO_SWITCH_GI) - tbl->action = IL_SISO_SWITCH_ANTENNA1; - if (update_search_tbl_counter) - search_tbl->action = tbl->action; - - return 0; -} - -/* - * Try to switch to new modulation mode from MIMO2 - */ -static int -il4965_rs_move_mimo2_to_other(struct il_priv *il, struct il_lq_sta *lq_sta, - struct ieee80211_conf *conf, - struct ieee80211_sta *sta, int idx) -{ - s8 is_green = lq_sta->is_green; - struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); - struct il_scale_tbl_info *search_tbl = - &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); - struct il_rate_scale_data *win = &(tbl->win[idx]); - struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; - u32 sz = - (sizeof(struct il_scale_tbl_info) - - (sizeof(struct il_rate_scale_data) * RATE_COUNT)); - u8 start_action; - u8 valid_tx_ant = il->hw_params.valid_tx_ant; - u8 tx_chains_num = il->hw_params.tx_chains_num; - u8 update_search_tbl_counter = 0; - int ret; - - start_action = tbl->action; - for (;;) { - lq_sta->action_counter++; - switch (tbl->action) { - case IL_MIMO2_SWITCH_ANTENNA1: - case IL_MIMO2_SWITCH_ANTENNA2: - D_RATE("LQ: MIMO2 toggle Antennas\n"); - - if (tx_chains_num <= 2) - break; - - if (win->success_ratio >= IL_RS_GOOD_RATIO) - break; - - memcpy(search_tbl, tbl, sz); - if (il4965_rs_toggle_antenna - (valid_tx_ant, &search_tbl->current_rate, - search_tbl)) { - update_search_tbl_counter = 1; - goto out; - } - break; - case IL_MIMO2_SWITCH_SISO_A: - case IL_MIMO2_SWITCH_SISO_B: - case IL_MIMO2_SWITCH_SISO_C: - D_RATE("LQ: MIMO2 switch to SISO\n"); - - /* Set up new search table for SISO */ - memcpy(search_tbl, tbl, sz); - - if (tbl->action == IL_MIMO2_SWITCH_SISO_A) - search_tbl->ant_type = ANT_A; - else if (tbl->action == IL_MIMO2_SWITCH_SISO_B) - search_tbl->ant_type = ANT_B; - else - search_tbl->ant_type = ANT_C; - - if (!il4965_rs_is_valid_ant - (valid_tx_ant, search_tbl->ant_type)) - break; - - ret = - il4965_rs_switch_to_siso(il, lq_sta, conf, sta, - search_tbl, idx); - if (!ret) - goto out; - - break; - - case IL_MIMO2_SWITCH_GI: - if (!tbl->is_ht40 && - !(ht_cap->cap & IEEE80211_HT_CAP_SGI_20)) - break; - if (tbl->is_ht40 && - !(ht_cap->cap & IEEE80211_HT_CAP_SGI_40)) - break; - - D_RATE("LQ: MIMO2 toggle SGI/NGI\n"); - - /* Set up new search table for MIMO2 */ - memcpy(search_tbl, tbl, sz); - search_tbl->is_SGI = !tbl->is_SGI; - il4965_rs_set_expected_tpt_table(lq_sta, search_tbl); - /* - * If active table already uses the fastest possible - * modulation (dual stream with short guard interval), - * and it's working well, there's no need to look - * for a better type of modulation! - */ - if (tbl->is_SGI) { - s32 tpt = lq_sta->last_tpt / 100; - if (tpt >= search_tbl->expected_tpt[idx]) - break; - } - search_tbl->current_rate = - il4965_rate_n_flags_from_tbl(il, search_tbl, idx, - is_green); - update_search_tbl_counter = 1; - goto out; - - } - tbl->action++; - if (tbl->action > IL_MIMO2_SWITCH_GI) - tbl->action = IL_MIMO2_SWITCH_ANTENNA1; - - if (tbl->action == start_action) - break; - } - search_tbl->lq_type = LQ_NONE; - return 0; -out: - lq_sta->search_better_tbl = 1; - tbl->action++; - if (tbl->action > IL_MIMO2_SWITCH_GI) - tbl->action = IL_MIMO2_SWITCH_ANTENNA1; - if (update_search_tbl_counter) - search_tbl->action = tbl->action; - - return 0; - -} - -/* - * Check whether we should continue using same modulation mode, or - * begin search for a new mode, based on: - * 1) # tx successes or failures while using this mode - * 2) # times calling this function - * 3) elapsed time in this mode (not used, for now) - */ -static void -il4965_rs_stay_in_table(struct il_lq_sta *lq_sta, bool force_search) -{ - struct il_scale_tbl_info *tbl; - int i; - int active_tbl; - int flush_interval_passed = 0; - struct il_priv *il; - - il = lq_sta->drv; - active_tbl = lq_sta->active_tbl; - - tbl = &(lq_sta->lq_info[active_tbl]); - - /* If we've been disallowing search, see if we should now allow it */ - if (lq_sta->stay_in_tbl) { - - /* Elapsed time using current modulation mode */ - if (lq_sta->flush_timer) - flush_interval_passed = - time_after(jiffies, - (unsigned long)(lq_sta->flush_timer + - RATE_SCALE_FLUSH_INTVL)); - - /* - * Check if we should allow search for new modulation mode. - * If many frames have failed or succeeded, or we've used - * this same modulation for a long time, allow search, and - * reset history stats that keep track of whether we should - * allow a new search. Also (below) reset all bitmaps and - * stats in active history. - */ - if (force_search || - lq_sta->total_failed > lq_sta->max_failure_limit || - lq_sta->total_success > lq_sta->max_success_limit || - (!lq_sta->search_better_tbl && lq_sta->flush_timer && - flush_interval_passed)) { - D_RATE("LQ: stay is expired %d %d %d\n:", - lq_sta->total_failed, lq_sta->total_success, - flush_interval_passed); - - /* Allow search for new mode */ - lq_sta->stay_in_tbl = 0; /* only place reset */ - lq_sta->total_failed = 0; - lq_sta->total_success = 0; - lq_sta->flush_timer = 0; - - /* - * Else if we've used this modulation mode enough repetitions - * (regardless of elapsed time or success/failure), reset - * history bitmaps and rate-specific stats for all rates in - * active table. - */ - } else { - lq_sta->table_count++; - if (lq_sta->table_count >= lq_sta->table_count_limit) { - lq_sta->table_count = 0; - - D_RATE("LQ: stay in table clear win\n"); - for (i = 0; i < RATE_COUNT; i++) - il4965_rs_rate_scale_clear_win(& - (tbl-> - win - [i])); - } - } - - /* If transitioning to allow "search", reset all history - * bitmaps and stats in active table (this will become the new - * "search" table). */ - if (!lq_sta->stay_in_tbl) { - for (i = 0; i < RATE_COUNT; i++) - il4965_rs_rate_scale_clear_win(&(tbl->win[i])); - } - } -} - -/* - * setup rate table in uCode - */ -static void -il4965_rs_update_rate_tbl(struct il_priv *il, struct il_rxon_context *ctx, - struct il_lq_sta *lq_sta, - struct il_scale_tbl_info *tbl, int idx, u8 is_green) -{ - u32 rate; - - /* Update uCode's rate table. */ - rate = il4965_rate_n_flags_from_tbl(il, tbl, idx, is_green); - il4965_rs_fill_link_cmd(il, lq_sta, rate); - il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_ASYNC, false); -} - -/* - * Do rate scaling and search for new modulation mode. - */ -static void -il4965_rs_rate_scale_perform(struct il_priv *il, struct sk_buff *skb, - struct ieee80211_sta *sta, - struct il_lq_sta *lq_sta) -{ - struct ieee80211_hw *hw = il->hw; - struct ieee80211_conf *conf = &hw->conf; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - int low = RATE_INVALID; - int high = RATE_INVALID; - int idx; - int i; - struct il_rate_scale_data *win = NULL; - int current_tpt = IL_INVALID_VALUE; - int low_tpt = IL_INVALID_VALUE; - int high_tpt = IL_INVALID_VALUE; - u32 fail_count; - s8 scale_action = 0; - u16 rate_mask; - u8 update_lq = 0; - struct il_scale_tbl_info *tbl, *tbl1; - u16 rate_scale_idx_msk = 0; - u8 is_green = 0; - u8 active_tbl = 0; - u8 done_search = 0; - u16 high_low; - s32 sr; - u8 tid = MAX_TID_COUNT; - struct il_tid_data *tid_data; - struct il_station_priv *sta_priv = (void *)sta->drv_priv; - struct il_rxon_context *ctx = sta_priv->common.ctx; - - D_RATE("rate scale calculate new rate for skb\n"); - - /* Send management frames and NO_ACK data using lowest rate. */ - /* TODO: this could probably be improved.. */ - if (!ieee80211_is_data(hdr->frame_control) || - (info->flags & IEEE80211_TX_CTL_NO_ACK)) - return; - - lq_sta->supp_rates = sta->supp_rates[lq_sta->band]; - - tid = il4965_rs_tl_add_packet(lq_sta, hdr); - if (tid != MAX_TID_COUNT && (lq_sta->tx_agg_tid_en & (1 << tid))) { - tid_data = &il->stations[lq_sta->lq.sta_id].tid[tid]; - if (tid_data->agg.state == IL_AGG_OFF) - lq_sta->is_agg = 0; - else - lq_sta->is_agg = 1; - } else - lq_sta->is_agg = 0; - - /* - * Select rate-scale / modulation-mode table to work with in - * the rest of this function: "search" if searching for better - * modulation mode, or "active" if doing rate scaling within a mode. - */ - if (!lq_sta->search_better_tbl) - active_tbl = lq_sta->active_tbl; - else - active_tbl = 1 - lq_sta->active_tbl; - - tbl = &(lq_sta->lq_info[active_tbl]); - if (is_legacy(tbl->lq_type)) - lq_sta->is_green = 0; - else - lq_sta->is_green = il4965_rs_use_green(sta); - is_green = lq_sta->is_green; - - /* current tx rate */ - idx = lq_sta->last_txrate_idx; - - D_RATE("Rate scale idx %d for type %d\n", idx, tbl->lq_type); - - /* rates available for this association, and for modulation mode */ - rate_mask = il4965_rs_get_supported_rates(lq_sta, hdr, tbl->lq_type); - - D_RATE("mask 0x%04X\n", rate_mask); - - /* mask with station rate restriction */ - if (is_legacy(tbl->lq_type)) { - if (lq_sta->band == IEEE80211_BAND_5GHZ) - /* supp_rates has no CCK bits in A mode */ - rate_scale_idx_msk = - (u16) (rate_mask & - (lq_sta->supp_rates << IL_FIRST_OFDM_RATE)); - else - rate_scale_idx_msk = - (u16) (rate_mask & lq_sta->supp_rates); - - } else - rate_scale_idx_msk = rate_mask; - - if (!rate_scale_idx_msk) - rate_scale_idx_msk = rate_mask; - - if (!((1 << idx) & rate_scale_idx_msk)) { - IL_ERR("Current Rate is not valid\n"); - if (lq_sta->search_better_tbl) { - /* revert to active table if search table is not valid */ - tbl->lq_type = LQ_NONE; - lq_sta->search_better_tbl = 0; - tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); - /* get "active" rate info */ - idx = il4965_hwrate_to_plcp_idx(tbl->current_rate); - il4965_rs_update_rate_tbl(il, ctx, lq_sta, tbl, idx, - is_green); - } - return; - } - - /* Get expected throughput table and history win for current rate */ - if (!tbl->expected_tpt) { - IL_ERR("tbl->expected_tpt is NULL\n"); - return; - } - - /* force user max rate if set by user */ - if (lq_sta->max_rate_idx != -1 && lq_sta->max_rate_idx < idx) { - idx = lq_sta->max_rate_idx; - update_lq = 1; - win = &(tbl->win[idx]); - goto lq_update; - } - - win = &(tbl->win[idx]); - - /* - * If there is not enough history to calculate actual average - * throughput, keep analyzing results of more tx frames, without - * changing rate or mode (bypass most of the rest of this function). - * Set up new rate table in uCode only if old rate is not supported - * in current association (use new rate found above). - */ - fail_count = win->counter - win->success_counter; - if (fail_count < RATE_MIN_FAILURE_TH && - win->success_counter < RATE_MIN_SUCCESS_TH) { - D_RATE("LQ: still below TH. succ=%d total=%d " "for idx %d\n", - win->success_counter, win->counter, idx); - - /* Can't calculate this yet; not enough history */ - win->average_tpt = IL_INVALID_VALUE; - - /* Should we stay with this modulation mode, - * or search for a new one? */ - il4965_rs_stay_in_table(lq_sta, false); - - goto out; - } - /* Else we have enough samples; calculate estimate of - * actual average throughput */ - if (win->average_tpt != - ((win->success_ratio * tbl->expected_tpt[idx] + 64) / 128)) { - IL_ERR("expected_tpt should have been calculated by now\n"); - win->average_tpt = - ((win->success_ratio * tbl->expected_tpt[idx] + 64) / 128); - } - - /* If we are searching for better modulation mode, check success. */ - if (lq_sta->search_better_tbl) { - /* If good success, continue using the "search" mode; - * no need to send new link quality command, since we're - * continuing to use the setup that we've been trying. */ - if (win->average_tpt > lq_sta->last_tpt) { - - D_RATE("LQ: SWITCHING TO NEW TBL " - "suc=%d cur-tpt=%d old-tpt=%d\n", - win->success_ratio, win->average_tpt, - lq_sta->last_tpt); - - if (!is_legacy(tbl->lq_type)) - lq_sta->enable_counter = 1; - - /* Swap tables; "search" becomes "active" */ - lq_sta->active_tbl = active_tbl; - current_tpt = win->average_tpt; - - /* Else poor success; go back to mode in "active" table */ - } else { - - D_RATE("LQ: GOING BACK TO THE OLD TBL " - "suc=%d cur-tpt=%d old-tpt=%d\n", - win->success_ratio, win->average_tpt, - lq_sta->last_tpt); - - /* Nullify "search" table */ - tbl->lq_type = LQ_NONE; - - /* Revert to "active" table */ - active_tbl = lq_sta->active_tbl; - tbl = &(lq_sta->lq_info[active_tbl]); - - /* Revert to "active" rate and throughput info */ - idx = il4965_hwrate_to_plcp_idx(tbl->current_rate); - current_tpt = lq_sta->last_tpt; - - /* Need to set up a new rate table in uCode */ - update_lq = 1; - } - - /* Either way, we've made a decision; modulation mode - * search is done, allow rate adjustment next time. */ - lq_sta->search_better_tbl = 0; - done_search = 1; /* Don't switch modes below! */ - goto lq_update; - } - - /* (Else) not in search of better modulation mode, try for better - * starting rate, while staying in this mode. */ - high_low = - il4965_rs_get_adjacent_rate(il, idx, rate_scale_idx_msk, - tbl->lq_type); - low = high_low & 0xff; - high = (high_low >> 8) & 0xff; - - /* If user set max rate, dont allow higher than user constrain */ - if (lq_sta->max_rate_idx != -1 && lq_sta->max_rate_idx < high) - high = RATE_INVALID; - - sr = win->success_ratio; - - /* Collect measured throughputs for current and adjacent rates */ - current_tpt = win->average_tpt; - if (low != RATE_INVALID) - low_tpt = tbl->win[low].average_tpt; - if (high != RATE_INVALID) - high_tpt = tbl->win[high].average_tpt; - - scale_action = 0; - - /* Too many failures, decrease rate */ - if (sr <= RATE_DECREASE_TH || current_tpt == 0) { - D_RATE("decrease rate because of low success_ratio\n"); - scale_action = -1; - - /* No throughput measured yet for adjacent rates; try increase. */ - } else if (low_tpt == IL_INVALID_VALUE && high_tpt == IL_INVALID_VALUE) { - - if (high != RATE_INVALID && sr >= RATE_INCREASE_TH) - scale_action = 1; - else if (low != RATE_INVALID) - scale_action = 0; - } - - /* Both adjacent throughputs are measured, but neither one has better - * throughput; we're using the best rate, don't change it! */ - else if (low_tpt != IL_INVALID_VALUE && high_tpt != IL_INVALID_VALUE && - low_tpt < current_tpt && high_tpt < current_tpt) - scale_action = 0; - - /* At least one adjacent rate's throughput is measured, - * and may have better performance. */ - else { - /* Higher adjacent rate's throughput is measured */ - if (high_tpt != IL_INVALID_VALUE) { - /* Higher rate has better throughput */ - if (high_tpt > current_tpt && sr >= RATE_INCREASE_TH) - scale_action = 1; - else - scale_action = 0; - - /* Lower adjacent rate's throughput is measured */ - } else if (low_tpt != IL_INVALID_VALUE) { - /* Lower rate has better throughput */ - if (low_tpt > current_tpt) { - D_RATE("decrease rate because of low tpt\n"); - scale_action = -1; - } else if (sr >= RATE_INCREASE_TH) { - scale_action = 1; - } - } - } - - /* Sanity check; asked for decrease, but success rate or throughput - * has been good at old rate. Don't change it. */ - if (scale_action == -1 && low != RATE_INVALID && - (sr > RATE_HIGH_TH || current_tpt > 100 * tbl->expected_tpt[low])) - scale_action = 0; - - switch (scale_action) { - case -1: - /* Decrease starting rate, update uCode's rate table */ - if (low != RATE_INVALID) { - update_lq = 1; - idx = low; - } - - break; - case 1: - /* Increase starting rate, update uCode's rate table */ - if (high != RATE_INVALID) { - update_lq = 1; - idx = high; - } - - break; - case 0: - /* No change */ - default: - break; - } - - D_RATE("choose rate scale idx %d action %d low %d " "high %d type %d\n", - idx, scale_action, low, high, tbl->lq_type); - -lq_update: - /* Replace uCode's rate table for the destination station. */ - if (update_lq) - il4965_rs_update_rate_tbl(il, ctx, lq_sta, tbl, idx, - is_green); - - /* Should we stay with this modulation mode, - * or search for a new one? */ - il4965_rs_stay_in_table(lq_sta, false); - - /* - * Search for new modulation mode if we're: - * 1) Not changing rates right now - * 2) Not just finishing up a search - * 3) Allowing a new search - */ - if (!update_lq && !done_search && !lq_sta->stay_in_tbl && win->counter) { - /* Save current throughput to compare with "search" throughput */ - lq_sta->last_tpt = current_tpt; - - /* Select a new "search" modulation mode to try. - * If one is found, set up the new "search" table. */ - if (is_legacy(tbl->lq_type)) - il4965_rs_move_legacy_other(il, lq_sta, conf, sta, idx); - else if (is_siso(tbl->lq_type)) - il4965_rs_move_siso_to_other(il, lq_sta, conf, sta, - idx); - else /* (is_mimo2(tbl->lq_type)) */ - il4965_rs_move_mimo2_to_other(il, lq_sta, conf, sta, - idx); - - /* If new "search" mode was selected, set up in uCode table */ - if (lq_sta->search_better_tbl) { - /* Access the "search" table, clear its history. */ - tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); - for (i = 0; i < RATE_COUNT; i++) - il4965_rs_rate_scale_clear_win(&(tbl->win[i])); - - /* Use new "search" start rate */ - idx = il4965_hwrate_to_plcp_idx(tbl->current_rate); - - D_RATE("Switch current mcs: %X idx: %d\n", - tbl->current_rate, idx); - il4965_rs_fill_link_cmd(il, lq_sta, tbl->current_rate); - il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_ASYNC, false); - } else - done_search = 1; - } - - if (done_search && !lq_sta->stay_in_tbl) { - /* If the "active" (non-search) mode was legacy, - * and we've tried switching antennas, - * but we haven't been able to try HT modes (not available), - * stay with best antenna legacy modulation for a while - * before next round of mode comparisons. */ - tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]); - if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) && - lq_sta->action_counter > tbl1->max_search) { - D_RATE("LQ: STAY in legacy table\n"); - il4965_rs_set_stay_in_table(il, 1, lq_sta); - } - - /* If we're in an HT mode, and all 3 mode switch actions - * have been tried and compared, stay in this best modulation - * mode for a while before next round of mode comparisons. */ - if (lq_sta->enable_counter && - lq_sta->action_counter >= tbl1->max_search) { - if (lq_sta->last_tpt > IL_AGG_TPT_THREHOLD && - (lq_sta->tx_agg_tid_en & (1 << tid)) && - tid != MAX_TID_COUNT) { - tid_data = - &il->stations[lq_sta->lq.sta_id].tid[tid]; - if (tid_data->agg.state == IL_AGG_OFF) { - D_RATE("try to aggregate tid %d\n", - tid); - il4965_rs_tl_turn_on_agg(il, tid, - lq_sta, sta); - } - } - il4965_rs_set_stay_in_table(il, 0, lq_sta); - } - } - -out: - tbl->current_rate = - il4965_rate_n_flags_from_tbl(il, tbl, idx, is_green); - i = idx; - lq_sta->last_txrate_idx = i; -} - -/** - * il4965_rs_initialize_lq - Initialize a station's hardware rate table - * - * The uCode's station table contains a table of fallback rates - * for automatic fallback during transmission. - * - * NOTE: This sets up a default set of values. These will be replaced later - * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of - * rc80211_simple. - * - * NOTE: Run C_ADD_STA command to set up station table entry, before - * calling this function (which runs C_TX_LINK_QUALITY_CMD, - * which requires station table entry to exist). - */ -static void -il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf, - struct ieee80211_sta *sta, struct il_lq_sta *lq_sta) -{ - struct il_scale_tbl_info *tbl; - int rate_idx; - int i; - u32 rate; - u8 use_green = il4965_rs_use_green(sta); - u8 active_tbl = 0; - u8 valid_tx_ant; - struct il_station_priv *sta_priv; - struct il_rxon_context *ctx; - - if (!sta || !lq_sta) - return; - - sta_priv = (void *)sta->drv_priv; - ctx = sta_priv->common.ctx; - - i = lq_sta->last_txrate_idx; - - valid_tx_ant = il->hw_params.valid_tx_ant; - - if (!lq_sta->search_better_tbl) - active_tbl = lq_sta->active_tbl; - else - active_tbl = 1 - lq_sta->active_tbl; - - tbl = &(lq_sta->lq_info[active_tbl]); - - if (i < 0 || i >= RATE_COUNT) - i = 0; - - rate = il_rates[i].plcp; - tbl->ant_type = il4965_first_antenna(valid_tx_ant); - rate |= tbl->ant_type << RATE_MCS_ANT_POS; - - if (i >= IL_FIRST_CCK_RATE && i <= IL_LAST_CCK_RATE) - rate |= RATE_MCS_CCK_MSK; - - il4965_rs_get_tbl_info_from_mcs(rate, il->band, tbl, &rate_idx); - if (!il4965_rs_is_valid_ant(valid_tx_ant, tbl->ant_type)) - il4965_rs_toggle_antenna(valid_tx_ant, &rate, tbl); - - rate = il4965_rate_n_flags_from_tbl(il, tbl, rate_idx, use_green); - tbl->current_rate = rate; - il4965_rs_set_expected_tpt_table(lq_sta, tbl); - il4965_rs_fill_link_cmd(NULL, lq_sta, rate); - il->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq; - il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_SYNC, true); -} - -static void -il4965_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta, - struct ieee80211_tx_rate_control *txrc) -{ - - struct sk_buff *skb = txrc->skb; - struct ieee80211_supported_band *sband = txrc->sband; - struct il_priv *il __maybe_unused = (struct il_priv *)il_r; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct il_lq_sta *lq_sta = il_sta; - int rate_idx; - - D_RATE("rate scale calculate new rate for skb\n"); - - /* Get max rate if user set max rate */ - if (lq_sta) { - lq_sta->max_rate_idx = txrc->max_rate_idx; - if (sband->band == IEEE80211_BAND_5GHZ && - lq_sta->max_rate_idx != -1) - lq_sta->max_rate_idx += IL_FIRST_OFDM_RATE; - if (lq_sta->max_rate_idx < 0 || - lq_sta->max_rate_idx >= RATE_COUNT) - lq_sta->max_rate_idx = -1; - } - - /* Treat uninitialized rate scaling data same as non-existing. */ - if (lq_sta && !lq_sta->drv) { - D_RATE("Rate scaling not initialized yet.\n"); - il_sta = NULL; - } - - /* Send management frames and NO_ACK data using lowest rate. */ - if (rate_control_send_low(sta, il_sta, txrc)) - return; - - if (!lq_sta) - return; - - rate_idx = lq_sta->last_txrate_idx; - - if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) { - rate_idx -= IL_FIRST_OFDM_RATE; - /* 6M and 9M shared same MCS idx */ - rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0; - if (il4965_rs_extract_rate(lq_sta->last_rate_n_flags) >= - RATE_MIMO2_6M_PLCP) - rate_idx = rate_idx + MCS_IDX_PER_STREAM; - info->control.rates[0].flags = IEEE80211_TX_RC_MCS; - if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK) - info->control.rates[0].flags |= - IEEE80211_TX_RC_SHORT_GI; - if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK) - info->control.rates[0].flags |= - IEEE80211_TX_RC_DUP_DATA; - if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK) - info->control.rates[0].flags |= - IEEE80211_TX_RC_40_MHZ_WIDTH; - if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK) - info->control.rates[0].flags |= - IEEE80211_TX_RC_GREEN_FIELD; - } else { - /* Check for invalid rates */ - if (rate_idx < 0 || rate_idx >= RATE_COUNT_LEGACY || - (sband->band == IEEE80211_BAND_5GHZ && - rate_idx < IL_FIRST_OFDM_RATE)) - rate_idx = rate_lowest_index(sband, sta); - /* On valid 5 GHz rate, adjust idx */ - else if (sband->band == IEEE80211_BAND_5GHZ) - rate_idx -= IL_FIRST_OFDM_RATE; - info->control.rates[0].flags = 0; - } - info->control.rates[0].idx = rate_idx; - -} - -static void * -il4965_rs_alloc_sta(void *il_rate, struct ieee80211_sta *sta, gfp_t gfp) -{ - struct il_station_priv *sta_priv = - (struct il_station_priv *)sta->drv_priv; - struct il_priv *il; - - il = (struct il_priv *)il_rate; - D_RATE("create station rate scale win\n"); - - return &sta_priv->lq_sta; -} - -/* - * Called after adding a new station to initialize rate scaling - */ -void -il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id) -{ - int i, j; - struct ieee80211_hw *hw = il->hw; - struct ieee80211_conf *conf = &il->hw->conf; - struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; - struct il_station_priv *sta_priv; - struct il_lq_sta *lq_sta; - struct ieee80211_supported_band *sband; - - sta_priv = (struct il_station_priv *)sta->drv_priv; - lq_sta = &sta_priv->lq_sta; - sband = hw->wiphy->bands[conf->channel->band]; - - lq_sta->lq.sta_id = sta_id; - - for (j = 0; j < LQ_SIZE; j++) - for (i = 0; i < RATE_COUNT; i++) - il4965_rs_rate_scale_clear_win(&lq_sta->lq_info[j]. - win[i]); - - lq_sta->flush_timer = 0; - lq_sta->supp_rates = sta->supp_rates[sband->band]; - for (j = 0; j < LQ_SIZE; j++) - for (i = 0; i < RATE_COUNT; i++) - il4965_rs_rate_scale_clear_win(&lq_sta->lq_info[j]. - win[i]); - - D_RATE("LQ:" "*** rate scale station global init for station %d ***\n", - sta_id); - /* TODO: what is a good starting rate for STA? About middle? Maybe not - * the lowest or the highest rate.. Could consider using RSSI from - * previous packets? Need to have IEEE 802.1X auth succeed immediately - * after assoc.. */ - - lq_sta->is_dup = 0; - lq_sta->max_rate_idx = -1; - lq_sta->missed_rate_counter = IL_MISSED_RATE_MAX; - lq_sta->is_green = il4965_rs_use_green(sta); - lq_sta->active_legacy_rate = il->active_rate & ~(0x1000); - lq_sta->band = il->band; - /* - * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3), - * supp_rates[] does not; shift to convert format, force 9 MBits off. - */ - lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1; - lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1; - lq_sta->active_siso_rate &= ~((u16) 0x2); - lq_sta->active_siso_rate <<= IL_FIRST_OFDM_RATE; - - /* Same here */ - lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1; - lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1; - lq_sta->active_mimo2_rate &= ~((u16) 0x2); - lq_sta->active_mimo2_rate <<= IL_FIRST_OFDM_RATE; - - /* These values will be overridden later */ - lq_sta->lq.general_params.single_stream_ant_msk = - il4965_first_antenna(il->hw_params.valid_tx_ant); - lq_sta->lq.general_params.dual_stream_ant_msk = - il->hw_params.valid_tx_ant & ~il4965_first_antenna(il->hw_params. - valid_tx_ant); - if (!lq_sta->lq.general_params.dual_stream_ant_msk) { - lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB; - } else if (il4965_num_of_ant(il->hw_params.valid_tx_ant) == 2) { - lq_sta->lq.general_params.dual_stream_ant_msk = - il->hw_params.valid_tx_ant; - } - - /* as default allow aggregation for all tids */ - lq_sta->tx_agg_tid_en = IL_AGG_ALL_TID; - lq_sta->drv = il; - - /* Set last_txrate_idx to lowest rate */ - lq_sta->last_txrate_idx = rate_lowest_index(sband, sta); - if (sband->band == IEEE80211_BAND_5GHZ) - lq_sta->last_txrate_idx += IL_FIRST_OFDM_RATE; - lq_sta->is_agg = 0; - -#ifdef CONFIG_MAC80211_DEBUGFS - lq_sta->dbg_fixed_rate = 0; -#endif - - il4965_rs_initialize_lq(il, conf, sta, lq_sta); -} - -static void -il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta, - u32 new_rate) -{ - struct il_scale_tbl_info tbl_type; - int idx = 0; - int rate_idx; - int repeat_rate = 0; - u8 ant_toggle_cnt = 0; - u8 use_ht_possible = 1; - u8 valid_tx_ant = 0; - struct il_link_quality_cmd *lq_cmd = &lq_sta->lq; - - /* Override starting rate (idx 0) if needed for debug purposes */ - il4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, idx); - - /* Interpret new_rate (rate_n_flags) */ - il4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type, - &rate_idx); - - /* How many times should we repeat the initial rate? */ - if (is_legacy(tbl_type.lq_type)) { - ant_toggle_cnt = 1; - repeat_rate = IL_NUMBER_TRY; - } else { - repeat_rate = IL_HT_NUMBER_TRY; - } - - lq_cmd->general_params.mimo_delimiter = - is_mimo(tbl_type.lq_type) ? 1 : 0; - - /* Fill 1st table entry (idx 0) */ - lq_cmd->rs_table[idx].rate_n_flags = cpu_to_le32(new_rate); - - if (il4965_num_of_ant(tbl_type.ant_type) == 1) { - lq_cmd->general_params.single_stream_ant_msk = - tbl_type.ant_type; - } else if (il4965_num_of_ant(tbl_type.ant_type) == 2) { - lq_cmd->general_params.dual_stream_ant_msk = tbl_type.ant_type; - } - /* otherwise we don't modify the existing value */ - idx++; - repeat_rate--; - if (il) - valid_tx_ant = il->hw_params.valid_tx_ant; - - /* Fill rest of rate table */ - while (idx < LINK_QUAL_MAX_RETRY_NUM) { - /* Repeat initial/next rate. - * For legacy IL_NUMBER_TRY == 1, this loop will not execute. - * For HT IL_HT_NUMBER_TRY == 3, this executes twice. */ - while (repeat_rate > 0 && idx < LINK_QUAL_MAX_RETRY_NUM) { - if (is_legacy(tbl_type.lq_type)) { - if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE) - ant_toggle_cnt++; - else if (il && - il4965_rs_toggle_antenna(valid_tx_ant, - &new_rate, - &tbl_type)) - ant_toggle_cnt = 1; - } - - /* Override next rate if needed for debug purposes */ - il4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, idx); - - /* Fill next table entry */ - lq_cmd->rs_table[idx].rate_n_flags = - cpu_to_le32(new_rate); - repeat_rate--; - idx++; - } - - il4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, - &tbl_type, &rate_idx); - - /* Indicate to uCode which entries might be MIMO. - * If initial rate was MIMO, this will finally end up - * as (IL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */ - if (is_mimo(tbl_type.lq_type)) - lq_cmd->general_params.mimo_delimiter = idx; - - /* Get next rate */ - new_rate = - il4965_rs_get_lower_rate(lq_sta, &tbl_type, rate_idx, - use_ht_possible); - - /* How many times should we repeat the next rate? */ - if (is_legacy(tbl_type.lq_type)) { - if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE) - ant_toggle_cnt++; - else if (il && - il4965_rs_toggle_antenna(valid_tx_ant, - &new_rate, &tbl_type)) - ant_toggle_cnt = 1; - - repeat_rate = IL_NUMBER_TRY; - } else { - repeat_rate = IL_HT_NUMBER_TRY; - } - - /* Don't allow HT rates after next pass. - * il4965_rs_get_lower_rate() will change type to LQ_A or LQ_G. */ - use_ht_possible = 0; - - /* Override next rate if needed for debug purposes */ - il4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, idx); - - /* Fill next table entry */ - lq_cmd->rs_table[idx].rate_n_flags = cpu_to_le32(new_rate); - - idx++; - repeat_rate--; - } - - lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF; - lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; - - lq_cmd->agg_params.agg_time_limit = - cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); -} - -static void * -il4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) -{ - return hw->priv; -} - -/* rate scale requires free function to be implemented */ -static void -il4965_rs_free(void *il_rate) -{ - return; -} - -static void -il4965_rs_free_sta(void *il_r, struct ieee80211_sta *sta, void *il_sta) -{ - struct il_priv *il __maybe_unused = il_r; - - D_RATE("enter\n"); - D_RATE("leave\n"); -} - -#ifdef CONFIG_MAC80211_DEBUGFS -static int -il4965_open_file_generic(struct inode *inode, struct file *file) -{ - file->private_data = inode->i_private; - return 0; -} - -static void -il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, u32 * rate_n_flags, int idx) -{ - struct il_priv *il; - u8 valid_tx_ant; - u8 ant_sel_tx; - - il = lq_sta->drv; - valid_tx_ant = il->hw_params.valid_tx_ant; - if (lq_sta->dbg_fixed_rate) { - ant_sel_tx = - ((lq_sta-> - dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) >> - RATE_MCS_ANT_POS); - if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) { - *rate_n_flags = lq_sta->dbg_fixed_rate; - D_RATE("Fixed rate ON\n"); - } else { - lq_sta->dbg_fixed_rate = 0; - IL_ERR - ("Invalid antenna selection 0x%X, Valid is 0x%X\n", - ant_sel_tx, valid_tx_ant); - D_RATE("Fixed rate OFF\n"); - } - } else { - D_RATE("Fixed rate OFF\n"); - } -} - -static ssize_t -il4965_rs_sta_dbgfs_scale_table_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct il_lq_sta *lq_sta = file->private_data; - struct il_priv *il; - char buf[64]; - size_t buf_size; - u32 parsed_rate; - struct il_station_priv *sta_priv = - container_of(lq_sta, struct il_station_priv, lq_sta); - struct il_rxon_context *ctx = sta_priv->common.ctx; - - il = lq_sta->drv; - memset(buf, 0, sizeof(buf)); - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - - if (sscanf(buf, "%x", &parsed_rate) == 1) - lq_sta->dbg_fixed_rate = parsed_rate; - else - lq_sta->dbg_fixed_rate = 0; - - lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */ - lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ - lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ - - D_RATE("sta_id %d rate 0x%X\n", lq_sta->lq.sta_id, - lq_sta->dbg_fixed_rate); - - if (lq_sta->dbg_fixed_rate) { - il4965_rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate); - il_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC, false); - } - - return count; -} - -static ssize_t -il4965_rs_sta_dbgfs_scale_table_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - char *buff; - int desc = 0; - int i = 0; - int idx = 0; - ssize_t ret; - - struct il_lq_sta *lq_sta = file->private_data; - struct il_priv *il; - struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); - - il = lq_sta->drv; - buff = kmalloc(1024, GFP_KERNEL); - if (!buff) - return -ENOMEM; - - desc += sprintf(buff + desc, "sta_id %d\n", lq_sta->lq.sta_id); - desc += - sprintf(buff + desc, "failed=%d success=%d rate=0%X\n", - lq_sta->total_failed, lq_sta->total_success, - lq_sta->active_legacy_rate); - desc += - sprintf(buff + desc, "fixed rate 0x%X\n", lq_sta->dbg_fixed_rate); - desc += - sprintf(buff + desc, "valid_tx_ant %s%s%s\n", - (il->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "", - (il->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "", - (il->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : ""); - desc += - sprintf(buff + desc, "lq type %s\n", - (is_legacy(tbl->lq_type)) ? "legacy" : "HT"); - if (is_Ht(tbl->lq_type)) { - desc += - sprintf(buff + desc, " %s", - (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2"); - desc += - sprintf(buff + desc, " %s", - (tbl->is_ht40) ? "40MHz" : "20MHz"); - desc += - sprintf(buff + desc, " %s %s %s\n", - (tbl->is_SGI) ? "SGI" : "", - (lq_sta->is_green) ? "GF enabled" : "", - (lq_sta->is_agg) ? "AGG on" : ""); - } - desc += - sprintf(buff + desc, "last tx rate=0x%X\n", - lq_sta->last_rate_n_flags); - desc += - sprintf(buff + desc, - "general:" "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n", - lq_sta->lq.general_params.flags, - lq_sta->lq.general_params.mimo_delimiter, - lq_sta->lq.general_params.single_stream_ant_msk, - lq_sta->lq.general_params.dual_stream_ant_msk); - - desc += - sprintf(buff + desc, - "agg:" - "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n", - le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit), - lq_sta->lq.agg_params.agg_dis_start_th, - lq_sta->lq.agg_params.agg_frame_cnt_limit); - - desc += - sprintf(buff + desc, - "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n", - lq_sta->lq.general_params.start_rate_idx[0], - lq_sta->lq.general_params.start_rate_idx[1], - lq_sta->lq.general_params.start_rate_idx[2], - lq_sta->lq.general_params.start_rate_idx[3]); - - for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { - idx = - il4965_hwrate_to_plcp_idx(le32_to_cpu - (lq_sta->lq.rs_table[i]. - rate_n_flags)); - if (is_legacy(tbl->lq_type)) { - desc += - sprintf(buff + desc, " rate[%d] 0x%X %smbps\n", i, - le32_to_cpu(lq_sta->lq.rs_table[i]. - rate_n_flags), - il_rate_mcs[idx].mbps); - } else { - desc += - sprintf(buff + desc, " rate[%d] 0x%X %smbps (%s)\n", - i, - le32_to_cpu(lq_sta->lq.rs_table[i]. - rate_n_flags), - il_rate_mcs[idx].mbps, - il_rate_mcs[idx].mcs); - } - } - - ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); - kfree(buff); - return ret; -} - -static const struct file_operations rs_sta_dbgfs_scale_table_ops = { - .write = il4965_rs_sta_dbgfs_scale_table_write, - .read = il4965_rs_sta_dbgfs_scale_table_read, - .open = il4965_open_file_generic, - .llseek = default_llseek, -}; - -static ssize_t -il4965_rs_sta_dbgfs_stats_table_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - char *buff; - int desc = 0; - int i, j; - ssize_t ret; - - struct il_lq_sta *lq_sta = file->private_data; - - buff = kmalloc(1024, GFP_KERNEL); - if (!buff) - return -ENOMEM; - - for (i = 0; i < LQ_SIZE; i++) { - desc += - sprintf(buff + desc, - "%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n" - "rate=0x%X\n", lq_sta->active_tbl == i ? "*" : "x", - lq_sta->lq_info[i].lq_type, - lq_sta->lq_info[i].is_SGI, - lq_sta->lq_info[i].is_ht40, - lq_sta->lq_info[i].is_dup, lq_sta->is_green, - lq_sta->lq_info[i].current_rate); - for (j = 0; j < RATE_COUNT; j++) { - desc += - sprintf(buff + desc, - "counter=%d success=%d %%=%d\n", - lq_sta->lq_info[i].win[j].counter, - lq_sta->lq_info[i].win[j].success_counter, - lq_sta->lq_info[i].win[j].success_ratio); - } - } - ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); - kfree(buff); - return ret; -} - -static const struct file_operations rs_sta_dbgfs_stats_table_ops = { - .read = il4965_rs_sta_dbgfs_stats_table_read, - .open = il4965_open_file_generic, - .llseek = default_llseek, -}; - -static ssize_t -il4965_rs_sta_dbgfs_rate_scale_data_read(struct file *file, - char __user *user_buf, size_t count, - loff_t *ppos) -{ - char buff[120]; - int desc = 0; - struct il_lq_sta *lq_sta = file->private_data; - struct il_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl]; - - if (is_Ht(tbl->lq_type)) - desc += - sprintf(buff + desc, "Bit Rate= %d Mb/s\n", - tbl->expected_tpt[lq_sta->last_txrate_idx]); - else - desc += - sprintf(buff + desc, "Bit Rate= %d Mb/s\n", - il_rates[lq_sta->last_txrate_idx].ieee >> 1); - - return simple_read_from_buffer(user_buf, count, ppos, buff, desc); -} - -static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = { - .read = il4965_rs_sta_dbgfs_rate_scale_data_read, - .open = il4965_open_file_generic, - .llseek = default_llseek, -}; - -static void -il4965_rs_add_debugfs(void *il, void *il_sta, struct dentry *dir) -{ - struct il_lq_sta *lq_sta = il_sta; - lq_sta->rs_sta_dbgfs_scale_table_file = - debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir, - lq_sta, &rs_sta_dbgfs_scale_table_ops); - lq_sta->rs_sta_dbgfs_stats_table_file = - debugfs_create_file("rate_stats_table", S_IRUSR, dir, lq_sta, - &rs_sta_dbgfs_stats_table_ops); - lq_sta->rs_sta_dbgfs_rate_scale_data_file = - debugfs_create_file("rate_scale_data", S_IRUSR, dir, lq_sta, - &rs_sta_dbgfs_rate_scale_data_ops); - lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file = - debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir, - &lq_sta->tx_agg_tid_en); - -} - -static void -il4965_rs_remove_debugfs(void *il, void *il_sta) -{ - struct il_lq_sta *lq_sta = il_sta; - debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file); - debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file); - debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file); - debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file); -} -#endif - -/* - * Initialization of rate scaling information is done by driver after - * the station is added. Since mac80211 calls this function before a - * station is added we ignore it. - */ -static void -il4965_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband, - struct ieee80211_sta *sta, void *il_sta) -{ -} - -static struct rate_control_ops rs_4965_ops = { - .module = NULL, - .name = IL4965_RS_NAME, - .tx_status = il4965_rs_tx_status, - .get_rate = il4965_rs_get_rate, - .rate_init = il4965_rs_rate_init_stub, - .alloc = il4965_rs_alloc, - .free = il4965_rs_free, - .alloc_sta = il4965_rs_alloc_sta, - .free_sta = il4965_rs_free_sta, -#ifdef CONFIG_MAC80211_DEBUGFS - .add_sta_debugfs = il4965_rs_add_debugfs, - .remove_sta_debugfs = il4965_rs_remove_debugfs, -#endif -}; - -int -il4965_rate_control_register(void) -{ - return ieee80211_rate_control_register(&rs_4965_ops); -} - -void -il4965_rate_control_unregister(void) -{ - ieee80211_rate_control_unregister(&rs_4965_ops); -} diff --git a/trunk/drivers/net/wireless/iwlegacy/4965.c b/trunk/drivers/net/wireless/iwlegacy/4965.c deleted file mode 100644 index cacbc03880b0..000000000000 --- a/trunk/drivers/net/wireless/iwlegacy/4965.c +++ /dev/null @@ -1,2402 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "common.h" -#include "4965.h" - -/** - * il_verify_inst_sparse - verify runtime uCode image in card vs. host, - * using sample data 100 bytes apart. If these sample points are good, - * it's a pretty good bet that everything between them is good, too. - */ -static int -il4965_verify_inst_sparse(struct il_priv *il, __le32 * image, u32 len) -{ - u32 val; - int ret = 0; - u32 errcnt = 0; - u32 i; - - D_INFO("ucode inst image size is %u\n", len); - - for (i = 0; i < len; i += 100, image += 100 / sizeof(u32)) { - /* read data comes through single port, auto-incr addr */ - /* NOTE: Use the debugless read so we don't flood kernel log - * if IL_DL_IO is set */ - il_wr(il, HBUS_TARG_MEM_RADDR, i + IL4965_RTC_INST_LOWER_BOUND); - val = _il_rd(il, HBUS_TARG_MEM_RDAT); - if (val != le32_to_cpu(*image)) { - ret = -EIO; - errcnt++; - if (errcnt >= 3) - break; - } - } - - return ret; -} - -/** - * il4965_verify_inst_full - verify runtime uCode image in card vs. host, - * looking at all data. - */ -static int -il4965_verify_inst_full(struct il_priv *il, __le32 * image, u32 len) -{ - u32 val; - u32 save_len = len; - int ret = 0; - u32 errcnt; - - D_INFO("ucode inst image size is %u\n", len); - - il_wr(il, HBUS_TARG_MEM_RADDR, IL4965_RTC_INST_LOWER_BOUND); - - errcnt = 0; - for (; len > 0; len -= sizeof(u32), image++) { - /* read data comes through single port, auto-incr addr */ - /* NOTE: Use the debugless read so we don't flood kernel log - * if IL_DL_IO is set */ - val = _il_rd(il, HBUS_TARG_MEM_RDAT); - if (val != le32_to_cpu(*image)) { - IL_ERR("uCode INST section is invalid at " - "offset 0x%x, is 0x%x, s/b 0x%x\n", - save_len - len, val, le32_to_cpu(*image)); - ret = -EIO; - errcnt++; - if (errcnt >= 20) - break; - } - } - - if (!errcnt) - D_INFO("ucode image in INSTRUCTION memory is good\n"); - - return ret; -} - -/** - * il4965_verify_ucode - determine which instruction image is in SRAM, - * and verify its contents - */ -int -il4965_verify_ucode(struct il_priv *il) -{ - __le32 *image; - u32 len; - int ret; - - /* Try bootstrap */ - image = (__le32 *) il->ucode_boot.v_addr; - len = il->ucode_boot.len; - ret = il4965_verify_inst_sparse(il, image, len); - if (!ret) { - D_INFO("Bootstrap uCode is good in inst SRAM\n"); - return 0; - } - - /* Try initialize */ - image = (__le32 *) il->ucode_init.v_addr; - len = il->ucode_init.len; - ret = il4965_verify_inst_sparse(il, image, len); - if (!ret) { - D_INFO("Initialize uCode is good in inst SRAM\n"); - return 0; - } - - /* Try runtime/protocol */ - image = (__le32 *) il->ucode_code.v_addr; - len = il->ucode_code.len; - ret = il4965_verify_inst_sparse(il, image, len); - if (!ret) { - D_INFO("Runtime uCode is good in inst SRAM\n"); - return 0; - } - - IL_ERR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n"); - - /* Since nothing seems to match, show first several data entries in - * instruction SRAM, so maybe visual inspection will give a clue. - * Selection of bootstrap image (vs. other images) is arbitrary. */ - image = (__le32 *) il->ucode_boot.v_addr; - len = il->ucode_boot.len; - ret = il4965_verify_inst_full(il, image, len); - - return ret; -} - -/****************************************************************************** - * - * EEPROM related functions - * -******************************************************************************/ - -/* - * The device's EEPROM semaphore prevents conflicts between driver and uCode - * when accessing the EEPROM; each access is a series of pulses to/from the - * EEPROM chip, not a single event, so even reads could conflict if they - * weren't arbitrated by the semaphore. - */ -int -il4965_eeprom_acquire_semaphore(struct il_priv *il) -{ - u16 count; - int ret; - - for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) { - /* Request semaphore */ - il_set_bit(il, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); - - /* See if we got it */ - ret = - _il_poll_bit(il, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, - CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, - EEPROM_SEM_TIMEOUT); - if (ret >= 0) - return ret; - } - - return ret; -} - -void -il4965_eeprom_release_semaphore(struct il_priv *il) -{ - il_clear_bit(il, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); - -} - -int -il4965_eeprom_check_version(struct il_priv *il) -{ - u16 eeprom_ver; - u16 calib_ver; - - eeprom_ver = il_eeprom_query16(il, EEPROM_VERSION); - calib_ver = il_eeprom_query16(il, EEPROM_4965_CALIB_VERSION_OFFSET); - - if (eeprom_ver < il->cfg->eeprom_ver || - calib_ver < il->cfg->eeprom_calib_ver) - goto err; - - IL_INFO("device EEPROM VER=0x%x, CALIB=0x%x\n", eeprom_ver, calib_ver); - - return 0; -err: - IL_ERR("Unsupported (too old) EEPROM VER=0x%x < 0x%x " - "CALIB=0x%x < 0x%x\n", eeprom_ver, il->cfg->eeprom_ver, - calib_ver, il->cfg->eeprom_calib_ver); - return -EINVAL; - -} - -void -il4965_eeprom_get_mac(const struct il_priv *il, u8 * mac) -{ - const u8 *addr = il_eeprom_query_addr(il, - EEPROM_MAC_ADDRESS); - memcpy(mac, addr, ETH_ALEN); -} - -/* Send led command */ -static int -il4965_send_led_cmd(struct il_priv *il, struct il_led_cmd *led_cmd) -{ - struct il_host_cmd cmd = { - .id = C_LEDS, - .len = sizeof(struct il_led_cmd), - .data = led_cmd, - .flags = CMD_ASYNC, - .callback = NULL, - }; - u32 reg; - - reg = _il_rd(il, CSR_LED_REG); - if (reg != (reg & CSR_LED_BSM_CTRL_MSK)) - _il_wr(il, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK); - - return il_send_cmd(il, &cmd); -} - -/* Set led register off */ -void -il4965_led_enable(struct il_priv *il) -{ - _il_wr(il, CSR_LED_REG, CSR_LED_REG_TRUN_ON); -} - -const struct il_led_ops il4965_led_ops = { - .cmd = il4965_send_led_cmd, -}; - -static int il4965_send_tx_power(struct il_priv *il); -static int il4965_hw_get_temperature(struct il_priv *il); - -/* Highest firmware API version supported */ -#define IL4965_UCODE_API_MAX 2 - -/* Lowest firmware API version supported */ -#define IL4965_UCODE_API_MIN 2 - -#define IL4965_FW_PRE "iwlwifi-4965-" -#define _IL4965_MODULE_FIRMWARE(api) IL4965_FW_PRE #api ".ucode" -#define IL4965_MODULE_FIRMWARE(api) _IL4965_MODULE_FIRMWARE(api) - -/* check contents of special bootstrap uCode SRAM */ -static int -il4965_verify_bsm(struct il_priv *il) -{ - __le32 *image = il->ucode_boot.v_addr; - u32 len = il->ucode_boot.len; - u32 reg; - u32 val; - - D_INFO("Begin verify bsm\n"); - - /* verify BSM SRAM contents */ - val = il_rd_prph(il, BSM_WR_DWCOUNT_REG); - for (reg = BSM_SRAM_LOWER_BOUND; reg < BSM_SRAM_LOWER_BOUND + len; - reg += sizeof(u32), image++) { - val = il_rd_prph(il, reg); - if (val != le32_to_cpu(*image)) { - IL_ERR("BSM uCode verification failed at " - "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n", - BSM_SRAM_LOWER_BOUND, reg - BSM_SRAM_LOWER_BOUND, - len, val, le32_to_cpu(*image)); - return -EIO; - } - } - - D_INFO("BSM bootstrap uCode image OK\n"); - - return 0; -} - -/** - * il4965_load_bsm - Load bootstrap instructions - * - * BSM operation: - * - * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program - * in special SRAM that does not power down during RFKILL. When powering back - * up after power-saving sleeps (or during initial uCode load), the BSM loads - * the bootstrap program into the on-board processor, and starts it. - * - * The bootstrap program loads (via DMA) instructions and data for a new - * program from host DRAM locations indicated by the host driver in the - * BSM_DRAM_* registers. Once the new program is loaded, it starts - * automatically. - * - * When initializing the NIC, the host driver points the BSM to the - * "initialize" uCode image. This uCode sets up some internal data, then - * notifies host via "initialize alive" that it is complete. - * - * The host then replaces the BSM_DRAM_* pointer values to point to the - * normal runtime uCode instructions and a backup uCode data cache buffer - * (filled initially with starting data values for the on-board processor), - * then triggers the "initialize" uCode to load and launch the runtime uCode, - * which begins normal operation. - * - * When doing a power-save shutdown, runtime uCode saves data SRAM into - * the backup data cache in DRAM before SRAM is powered down. - * - * When powering back up, the BSM loads the bootstrap program. This reloads - * the runtime uCode instructions and the backup data cache into SRAM, - * and re-launches the runtime uCode from where it left off. - */ -static int -il4965_load_bsm(struct il_priv *il) -{ - __le32 *image = il->ucode_boot.v_addr; - u32 len = il->ucode_boot.len; - dma_addr_t pinst; - dma_addr_t pdata; - u32 inst_len; - u32 data_len; - int i; - u32 done; - u32 reg_offset; - int ret; - - D_INFO("Begin load bsm\n"); - - il->ucode_type = UCODE_RT; - - /* make sure bootstrap program is no larger than BSM's SRAM size */ - if (len > IL49_MAX_BSM_SIZE) - return -EINVAL; - - /* Tell bootstrap uCode where to find the "Initialize" uCode - * in host DRAM ... host DRAM physical address bits 35:4 for 4965. - * NOTE: il_init_alive_start() will replace these values, - * after the "initialize" uCode has run, to point to - * runtime/protocol instructions and backup data cache. - */ - pinst = il->ucode_init.p_addr >> 4; - pdata = il->ucode_init_data.p_addr >> 4; - inst_len = il->ucode_init.len; - data_len = il->ucode_init_data.len; - - il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst); - il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata); - il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG, inst_len); - il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, data_len); - - /* Fill BSM memory with bootstrap instructions */ - for (reg_offset = BSM_SRAM_LOWER_BOUND; - reg_offset < BSM_SRAM_LOWER_BOUND + len; - reg_offset += sizeof(u32), image++) - _il_wr_prph(il, reg_offset, le32_to_cpu(*image)); - - ret = il4965_verify_bsm(il); - if (ret) - return ret; - - /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */ - il_wr_prph(il, BSM_WR_MEM_SRC_REG, 0x0); - il_wr_prph(il, BSM_WR_MEM_DST_REG, IL49_RTC_INST_LOWER_BOUND); - il_wr_prph(il, BSM_WR_DWCOUNT_REG, len / sizeof(u32)); - - /* Load bootstrap code into instruction SRAM now, - * to prepare to load "initialize" uCode */ - il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START); - - /* Wait for load of bootstrap uCode to finish */ - for (i = 0; i < 100; i++) { - done = il_rd_prph(il, BSM_WR_CTRL_REG); - if (!(done & BSM_WR_CTRL_REG_BIT_START)) - break; - udelay(10); - } - if (i < 100) - D_INFO("BSM write complete, poll %d iterations\n", i); - else { - IL_ERR("BSM write did not complete!\n"); - return -EIO; - } - - /* Enable future boot loads whenever power management unit triggers it - * (e.g. when powering back up after power-save shutdown) */ - il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN); - - return 0; -} - -/** - * il4965_set_ucode_ptrs - Set uCode address location - * - * Tell initialization uCode where to find runtime uCode. - * - * BSM registers initially contain pointers to initialization uCode. - * We need to replace them to load runtime uCode inst and data, - * and to save runtime data when powering down. - */ -static int -il4965_set_ucode_ptrs(struct il_priv *il) -{ - dma_addr_t pinst; - dma_addr_t pdata; - int ret = 0; - - /* bits 35:4 for 4965 */ - pinst = il->ucode_code.p_addr >> 4; - pdata = il->ucode_data_backup.p_addr >> 4; - - /* Tell bootstrap uCode where to find image to load */ - il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst); - il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata); - il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, il->ucode_data.len); - - /* Inst byte count must be last to set up, bit 31 signals uCode - * that all new ptr/size info is in place */ - il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG, - il->ucode_code.len | BSM_DRAM_INST_LOAD); - D_INFO("Runtime uCode pointers are set.\n"); - - return ret; -} - -/** - * il4965_init_alive_start - Called after N_ALIVE notification received - * - * Called after N_ALIVE notification received from "initialize" uCode. - * - * The 4965 "initialize" ALIVE reply contains calibration data for: - * Voltage, temperature, and MIMO tx gain correction, now stored in il - * (3945 does not contain this data). - * - * Tell "initialize" uCode to go ahead and load the runtime uCode. -*/ -static void -il4965_init_alive_start(struct il_priv *il) -{ - /* Bootstrap uCode has loaded initialize uCode ... verify inst image. - * This is a paranoid check, because we would not have gotten the - * "initialize" alive if code weren't properly loaded. */ - if (il4965_verify_ucode(il)) { - /* Runtime instruction load was bad; - * take it all the way back down so we can try again */ - D_INFO("Bad \"initialize\" uCode load.\n"); - goto restart; - } - - /* Calculate temperature */ - il->temperature = il4965_hw_get_temperature(il); - - /* Send pointers to protocol/runtime uCode image ... init code will - * load and launch runtime uCode, which will send us another "Alive" - * notification. */ - D_INFO("Initialization Alive received.\n"); - if (il4965_set_ucode_ptrs(il)) { - /* Runtime instruction load won't happen; - * take it all the way back down so we can try again */ - D_INFO("Couldn't set up uCode pointers.\n"); - goto restart; - } - return; - -restart: - queue_work(il->workqueue, &il->restart); -} - -static bool -iw4965_is_ht40_channel(__le32 rxon_flags) -{ - int chan_mod = - le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK) >> - RXON_FLG_CHANNEL_MODE_POS; - return (chan_mod == CHANNEL_MODE_PURE_40 || - chan_mod == CHANNEL_MODE_MIXED); -} - -static void -il4965_nic_config(struct il_priv *il) -{ - unsigned long flags; - u16 radio_cfg; - - spin_lock_irqsave(&il->lock, flags); - - radio_cfg = il_eeprom_query16(il, EEPROM_RADIO_CONFIG); - - /* write radio config values to register */ - if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX) - il_set_bit(il, CSR_HW_IF_CONFIG_REG, - EEPROM_RF_CFG_TYPE_MSK(radio_cfg) | - EEPROM_RF_CFG_STEP_MSK(radio_cfg) | - EEPROM_RF_CFG_DASH_MSK(radio_cfg)); - - /* set CSR_HW_CONFIG_REG for uCode use */ - il_set_bit(il, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | - CSR_HW_IF_CONFIG_REG_BIT_MAC_SI); - - il->calib_info = - (struct il_eeprom_calib_info *) - il_eeprom_query_addr(il, EEPROM_4965_CALIB_TXPOWER_OFFSET); - - spin_unlock_irqrestore(&il->lock, flags); -} - -/* Reset differential Rx gains in NIC to prepare for chain noise calibration. - * Called after every association, but this runs only once! - * ... once chain noise is calibrated the first time, it's good forever. */ -static void -il4965_chain_noise_reset(struct il_priv *il) -{ - struct il_chain_noise_data *data = &(il->chain_noise_data); - - if (data->state == IL_CHAIN_NOISE_ALIVE && il_is_any_associated(il)) { - struct il_calib_diff_gain_cmd cmd; - - /* clear data for chain noise calibration algorithm */ - data->chain_noise_a = 0; - data->chain_noise_b = 0; - data->chain_noise_c = 0; - data->chain_signal_a = 0; - data->chain_signal_b = 0; - data->chain_signal_c = 0; - data->beacon_count = 0; - - memset(&cmd, 0, sizeof(cmd)); - cmd.hdr.op_code = IL_PHY_CALIBRATE_DIFF_GAIN_CMD; - cmd.diff_gain_a = 0; - cmd.diff_gain_b = 0; - cmd.diff_gain_c = 0; - if (il_send_cmd_pdu(il, C_PHY_CALIBRATION, sizeof(cmd), &cmd)) - IL_ERR("Could not send C_PHY_CALIBRATION\n"); - data->state = IL_CHAIN_NOISE_ACCUMULATE; - D_CALIB("Run chain_noise_calibrate\n"); - } -} - -static struct il_sensitivity_ranges il4965_sensitivity = { - .min_nrg_cck = 97, - .max_nrg_cck = 0, /* not used, set to 0 */ - - .auto_corr_min_ofdm = 85, - .auto_corr_min_ofdm_mrc = 170, - .auto_corr_min_ofdm_x1 = 105, - .auto_corr_min_ofdm_mrc_x1 = 220, - - .auto_corr_max_ofdm = 120, - .auto_corr_max_ofdm_mrc = 210, - .auto_corr_max_ofdm_x1 = 140, - .auto_corr_max_ofdm_mrc_x1 = 270, - - .auto_corr_min_cck = 125, - .auto_corr_max_cck = 200, - .auto_corr_min_cck_mrc = 200, - .auto_corr_max_cck_mrc = 400, - - .nrg_th_cck = 100, - .nrg_th_ofdm = 100, - - .barker_corr_th_min = 190, - .barker_corr_th_min_mrc = 390, - .nrg_th_cca = 62, -}; - -static void -il4965_set_ct_threshold(struct il_priv *il) -{ - /* want Kelvin */ - il->hw_params.ct_kill_threshold = - CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY); -} - -/** - * il4965_hw_set_hw_params - * - * Called when initializing driver - */ -static int -il4965_hw_set_hw_params(struct il_priv *il) -{ - if (il->cfg->mod_params->num_of_queues >= IL_MIN_NUM_QUEUES && - il->cfg->mod_params->num_of_queues <= IL49_NUM_QUEUES) - il->cfg->base_params->num_of_queues = - il->cfg->mod_params->num_of_queues; - - il->hw_params.max_txq_num = il->cfg->base_params->num_of_queues; - il->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM; - il->hw_params.scd_bc_tbls_size = - il->cfg->base_params->num_of_queues * - sizeof(struct il4965_scd_bc_tbl); - il->hw_params.tfd_size = sizeof(struct il_tfd); - il->hw_params.max_stations = IL4965_STATION_COUNT; - il->ctx.bcast_sta_id = IL4965_BROADCAST_ID; - il->hw_params.max_data_size = IL49_RTC_DATA_SIZE; - il->hw_params.max_inst_size = IL49_RTC_INST_SIZE; - il->hw_params.max_bsm_size = BSM_SRAM_SIZE; - il->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ); - - il->hw_params.rx_wrt_ptr_reg = FH49_RSCSR_CHNL0_WPTR; - - il->hw_params.tx_chains_num = il4965_num_of_ant(il->cfg->valid_tx_ant); - il->hw_params.rx_chains_num = il4965_num_of_ant(il->cfg->valid_rx_ant); - il->hw_params.valid_tx_ant = il->cfg->valid_tx_ant; - il->hw_params.valid_rx_ant = il->cfg->valid_rx_ant; - - il4965_set_ct_threshold(il); - - il->hw_params.sens = &il4965_sensitivity; - il->hw_params.beacon_time_tsf_bits = IL4965_EXT_BEACON_TIME_POS; - - return 0; -} - -static s32 -il4965_math_div_round(s32 num, s32 denom, s32 * res) -{ - s32 sign = 1; - - if (num < 0) { - sign = -sign; - num = -num; - } - if (denom < 0) { - sign = -sign; - denom = -denom; - } - *res = 1; - *res = ((num * 2 + denom) / (denom * 2)) * sign; - - return 1; -} - -/** - * il4965_get_voltage_compensation - Power supply voltage comp for txpower - * - * Determines power supply voltage compensation for txpower calculations. - * Returns number of 1/2-dB steps to subtract from gain table idx, - * to compensate for difference between power supply voltage during - * factory measurements, vs. current power supply voltage. - * - * Voltage indication is higher for lower voltage. - * Lower voltage requires more gain (lower gain table idx). - */ -static s32 -il4965_get_voltage_compensation(s32 eeprom_voltage, s32 current_voltage) -{ - s32 comp = 0; - - if (TX_POWER_IL_ILLEGAL_VOLTAGE == eeprom_voltage || - TX_POWER_IL_ILLEGAL_VOLTAGE == current_voltage) - return 0; - - il4965_math_div_round(current_voltage - eeprom_voltage, - TX_POWER_IL_VOLTAGE_CODES_PER_03V, &comp); - - if (current_voltage > eeprom_voltage) - comp *= 2; - if ((comp < -2) || (comp > 2)) - comp = 0; - - return comp; -} - -static s32 -il4965_get_tx_atten_grp(u16 channel) -{ - if (channel >= CALIB_IL_TX_ATTEN_GR5_FCH && - channel <= CALIB_IL_TX_ATTEN_GR5_LCH) - return CALIB_CH_GROUP_5; - - if (channel >= CALIB_IL_TX_ATTEN_GR1_FCH && - channel <= CALIB_IL_TX_ATTEN_GR1_LCH) - return CALIB_CH_GROUP_1; - - if (channel >= CALIB_IL_TX_ATTEN_GR2_FCH && - channel <= CALIB_IL_TX_ATTEN_GR2_LCH) - return CALIB_CH_GROUP_2; - - if (channel >= CALIB_IL_TX_ATTEN_GR3_FCH && - channel <= CALIB_IL_TX_ATTEN_GR3_LCH) - return CALIB_CH_GROUP_3; - - if (channel >= CALIB_IL_TX_ATTEN_GR4_FCH && - channel <= CALIB_IL_TX_ATTEN_GR4_LCH) - return CALIB_CH_GROUP_4; - - return -EINVAL; -} - -static u32 -il4965_get_sub_band(const struct il_priv *il, u32 channel) -{ - s32 b = -1; - - for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) { - if (il->calib_info->band_info[b].ch_from == 0) - continue; - - if (channel >= il->calib_info->band_info[b].ch_from && - channel <= il->calib_info->band_info[b].ch_to) - break; - } - - return b; -} - -static s32 -il4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2) -{ - s32 val; - - if (x2 == x1) - return y1; - else { - il4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val); - return val + y2; - } -} - -/** - * il4965_interpolate_chan - Interpolate factory measurements for one channel - * - * Interpolates factory measurements from the two sample channels within a - * sub-band, to apply to channel of interest. Interpolation is proportional to - * differences in channel frequencies, which is proportional to differences - * in channel number. - */ -static int -il4965_interpolate_chan(struct il_priv *il, u32 channel, - struct il_eeprom_calib_ch_info *chan_info) -{ - s32 s = -1; - u32 c; - u32 m; - const struct il_eeprom_calib_measure *m1; - const struct il_eeprom_calib_measure *m2; - struct il_eeprom_calib_measure *omeas; - u32 ch_i1; - u32 ch_i2; - - s = il4965_get_sub_band(il, channel); - if (s >= EEPROM_TX_POWER_BANDS) { - IL_ERR("Tx Power can not find channel %d\n", channel); - return -1; - } - - ch_i1 = il->calib_info->band_info[s].ch1.ch_num; - ch_i2 = il->calib_info->band_info[s].ch2.ch_num; - chan_info->ch_num = (u8) channel; - - D_TXPOWER("channel %d subband %d factory cal ch %d & %d\n", channel, s, - ch_i1, ch_i2); - - for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) { - for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) { - m1 = &(il->calib_info->band_info[s].ch1. - measurements[c][m]); - m2 = &(il->calib_info->band_info[s].ch2. - measurements[c][m]); - omeas = &(chan_info->measurements[c][m]); - - omeas->actual_pow = - (u8) il4965_interpolate_value(channel, ch_i1, - m1->actual_pow, ch_i2, - m2->actual_pow); - omeas->gain_idx = - (u8) il4965_interpolate_value(channel, ch_i1, - m1->gain_idx, ch_i2, - m2->gain_idx); - omeas->temperature = - (u8) il4965_interpolate_value(channel, ch_i1, - m1->temperature, - ch_i2, - m2->temperature); - omeas->pa_det = - (s8) il4965_interpolate_value(channel, ch_i1, - m1->pa_det, ch_i2, - m2->pa_det); - - D_TXPOWER("chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, - m, m1->actual_pow, m2->actual_pow, - omeas->actual_pow); - D_TXPOWER("chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, - m, m1->gain_idx, m2->gain_idx, - omeas->gain_idx); - D_TXPOWER("chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, - m, m1->pa_det, m2->pa_det, omeas->pa_det); - D_TXPOWER("chain %d meas %d T1=%d T2=%d T=%d\n", c, - m, m1->temperature, m2->temperature, - omeas->temperature); - } - } - - return 0; -} - -/* bit-rate-dependent table to prevent Tx distortion, in half-dB units, - * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */ -static s32 back_off_table[] = { - 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */ - 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */ - 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */ - 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */ - 10 /* CCK */ -}; - -/* Thermal compensation values for txpower for various frequency ranges ... - * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */ -static struct il4965_txpower_comp_entry { - s32 degrees_per_05db_a; - s32 degrees_per_05db_a_denom; -} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = { - { - 9, 2}, /* group 0 5.2, ch 34-43 */ - { - 4, 1}, /* group 1 5.2, ch 44-70 */ - { - 4, 1}, /* group 2 5.2, ch 71-124 */ - { - 4, 1}, /* group 3 5.2, ch 125-200 */ - { - 3, 1} /* group 4 2.4, ch all */ -}; - -static s32 -get_min_power_idx(s32 rate_power_idx, u32 band) -{ - if (!band) { - if ((rate_power_idx & 7) <= 4) - return MIN_TX_GAIN_IDX_52GHZ_EXT; - } - return MIN_TX_GAIN_IDX; -} - -struct gain_entry { - u8 dsp; - u8 radio; -}; - -static const struct gain_entry gain_table[2][108] = { - /* 5.2GHz power gain idx table */ - { - {123, 0x3F}, /* highest txpower */ - {117, 0x3F}, - {110, 0x3F}, - {104, 0x3F}, - {98, 0x3F}, - {110, 0x3E}, - {104, 0x3E}, - {98, 0x3E}, - {110, 0x3D}, - {104, 0x3D}, - {98, 0x3D}, - {110, 0x3C}, - {104, 0x3C}, - {98, 0x3C}, - {110, 0x3B}, - {104, 0x3B}, - {98, 0x3B}, - {110, 0x3A}, - {104, 0x3A}, - {98, 0x3A}, - {110, 0x39}, - {104, 0x39}, - {98, 0x39}, - {110, 0x38}, - {104, 0x38}, - {98, 0x38}, - {110, 0x37}, - {104, 0x37}, - {98, 0x37}, - {110, 0x36}, - {104, 0x36}, - {98, 0x36}, - {110, 0x35}, - {104, 0x35}, - {98, 0x35}, - {110, 0x34}, - {104, 0x34}, - {98, 0x34}, - {110, 0x33}, - {104, 0x33}, - {98, 0x33}, - {110, 0x32}, - {104, 0x32}, - {98, 0x32}, - {110, 0x31}, - {104, 0x31}, - {98, 0x31}, - {110, 0x30}, - {104, 0x30}, - {98, 0x30}, - {110, 0x25}, - {104, 0x25}, - {98, 0x25}, - {110, 0x24}, - {104, 0x24}, - {98, 0x24}, - {110, 0x23}, - {104, 0x23}, - {98, 0x23}, - {110, 0x22}, - {104, 0x18}, - {98, 0x18}, - {110, 0x17}, - {104, 0x17}, - {98, 0x17}, - {110, 0x16}, - {104, 0x16}, - {98, 0x16}, - {110, 0x15}, - {104, 0x15}, - {98, 0x15}, - {110, 0x14}, - {104, 0x14}, - {98, 0x14}, - {110, 0x13}, - {104, 0x13}, - {98, 0x13}, - {110, 0x12}, - {104, 0x08}, - {98, 0x08}, - {110, 0x07}, - {104, 0x07}, - {98, 0x07}, - {110, 0x06}, - {104, 0x06}, - {98, 0x06}, - {110, 0x05}, - {104, 0x05}, - {98, 0x05}, - {110, 0x04}, - {104, 0x04}, - {98, 0x04}, - {110, 0x03}, - {104, 0x03}, - {98, 0x03}, - {110, 0x02}, - {104, 0x02}, - {98, 0x02}, - {110, 0x01}, - {104, 0x01}, - {98, 0x01}, - {110, 0x00}, - {104, 0x00}, - {98, 0x00}, - {93, 0x00}, - {88, 0x00}, - {83, 0x00}, - {78, 0x00}, - }, - /* 2.4GHz power gain idx table */ - { - {110, 0x3f}, /* highest txpower */ - {104, 0x3f}, - {98, 0x3f}, - {110, 0x3e}, - {104, 0x3e}, - {98, 0x3e}, - {110, 0x3d}, - {104, 0x3d}, - {98, 0x3d}, - {110, 0x3c}, - {104, 0x3c}, - {98, 0x3c}, - {110, 0x3b}, - {104, 0x3b}, - {98, 0x3b}, - {110, 0x3a}, - {104, 0x3a}, - {98, 0x3a}, - {110, 0x39}, - {104, 0x39}, - {98, 0x39}, - {110, 0x38}, - {104, 0x38}, - {98, 0x38}, - {110, 0x37}, - {104, 0x37}, - {98, 0x37}, - {110, 0x36}, - {104, 0x36}, - {98, 0x36}, - {110, 0x35}, - {104, 0x35}, - {98, 0x35}, - {110, 0x34}, - {104, 0x34}, - {98, 0x34}, - {110, 0x33}, - {104, 0x33}, - {98, 0x33}, - {110, 0x32}, - {104, 0x32}, - {98, 0x32}, - {110, 0x31}, - {104, 0x31}, - {98, 0x31}, - {110, 0x30}, - {104, 0x30}, - {98, 0x30}, - {110, 0x6}, - {104, 0x6}, - {98, 0x6}, - {110, 0x5}, - {104, 0x5}, - {98, 0x5}, - {110, 0x4}, - {104, 0x4}, - {98, 0x4}, - {110, 0x3}, - {104, 0x3}, - {98, 0x3}, - {110, 0x2}, - {104, 0x2}, - {98, 0x2}, - {110, 0x1}, - {104, 0x1}, - {98, 0x1}, - {110, 0x0}, - {104, 0x0}, - {98, 0x0}, - {97, 0}, - {96, 0}, - {95, 0}, - {94, 0}, - {93, 0}, - {92, 0}, - {91, 0}, - {90, 0}, - {89, 0}, - {88, 0}, - {87, 0}, - {86, 0}, - {85, 0}, - {84, 0}, - {83, 0}, - {82, 0}, - {81, 0}, - {80, 0}, - {79, 0}, - {78, 0}, - {77, 0}, - {76, 0}, - {75, 0}, - {74, 0}, - {73, 0}, - {72, 0}, - {71, 0}, - {70, 0}, - {69, 0}, - {68, 0}, - {67, 0}, - {66, 0}, - {65, 0}, - {64, 0}, - {63, 0}, - {62, 0}, - {61, 0}, - {60, 0}, - {59, 0}, - } -}; - -static int -il4965_fill_txpower_tbl(struct il_priv *il, u8 band, u16 channel, u8 is_ht40, - u8 ctrl_chan_high, - struct il4965_tx_power_db *tx_power_tbl) -{ - u8 saturation_power; - s32 target_power; - s32 user_target_power; - s32 power_limit; - s32 current_temp; - s32 reg_limit; - s32 current_regulatory; - s32 txatten_grp = CALIB_CH_GROUP_MAX; - int i; - int c; - const struct il_channel_info *ch_info = NULL; - struct il_eeprom_calib_ch_info ch_eeprom_info; - const struct il_eeprom_calib_measure *measurement; - s16 voltage; - s32 init_voltage; - s32 voltage_compensation; - s32 degrees_per_05db_num; - s32 degrees_per_05db_denom; - s32 factory_temp; - s32 temperature_comp[2]; - s32 factory_gain_idx[2]; - s32 factory_actual_pwr[2]; - s32 power_idx; - - /* tx_power_user_lmt is in dBm, convert to half-dBm (half-dB units - * are used for idxing into txpower table) */ - user_target_power = 2 * il->tx_power_user_lmt; - - /* Get current (RXON) channel, band, width */ - D_TXPOWER("chan %d band %d is_ht40 %d\n", channel, band, is_ht40); - - ch_info = il_get_channel_info(il, il->band, channel); - - if (!il_is_channel_valid(ch_info)) - return -EINVAL; - - /* get txatten group, used to select 1) thermal txpower adjustment - * and 2) mimo txpower balance between Tx chains. */ - txatten_grp = il4965_get_tx_atten_grp(channel); - if (txatten_grp < 0) { - IL_ERR("Can't find txatten group for channel %d.\n", channel); - return txatten_grp; - } - - D_TXPOWER("channel %d belongs to txatten group %d\n", channel, - txatten_grp); - - if (is_ht40) { - if (ctrl_chan_high) - channel -= 2; - else - channel += 2; - } - - /* hardware txpower limits ... - * saturation (clipping distortion) txpowers are in half-dBm */ - if (band) - saturation_power = il->calib_info->saturation_power24; - else - saturation_power = il->calib_info->saturation_power52; - - if (saturation_power < IL_TX_POWER_SATURATION_MIN || - saturation_power > IL_TX_POWER_SATURATION_MAX) { - if (band) - saturation_power = IL_TX_POWER_DEFAULT_SATURATION_24; - else - saturation_power = IL_TX_POWER_DEFAULT_SATURATION_52; - } - - /* regulatory txpower limits ... reg_limit values are in half-dBm, - * max_power_avg values are in dBm, convert * 2 */ - if (is_ht40) - reg_limit = ch_info->ht40_max_power_avg * 2; - else - reg_limit = ch_info->max_power_avg * 2; - - if ((reg_limit < IL_TX_POWER_REGULATORY_MIN) || - (reg_limit > IL_TX_POWER_REGULATORY_MAX)) { - if (band) - reg_limit = IL_TX_POWER_DEFAULT_REGULATORY_24; - else - reg_limit = IL_TX_POWER_DEFAULT_REGULATORY_52; - } - - /* Interpolate txpower calibration values for this channel, - * based on factory calibration tests on spaced channels. */ - il4965_interpolate_chan(il, channel, &ch_eeprom_info); - - /* calculate tx gain adjustment based on power supply voltage */ - voltage = le16_to_cpu(il->calib_info->voltage); - init_voltage = (s32) le32_to_cpu(il->card_alive_init.voltage); - voltage_compensation = - il4965_get_voltage_compensation(voltage, init_voltage); - - D_TXPOWER("curr volt %d eeprom volt %d volt comp %d\n", init_voltage, - voltage, voltage_compensation); - - /* get current temperature (Celsius) */ - current_temp = max(il->temperature, IL_TX_POWER_TEMPERATURE_MIN); - current_temp = min(il->temperature, IL_TX_POWER_TEMPERATURE_MAX); - current_temp = KELVIN_TO_CELSIUS(current_temp); - - /* select thermal txpower adjustment params, based on channel group - * (same frequency group used for mimo txatten adjustment) */ - degrees_per_05db_num = - tx_power_cmp_tble[txatten_grp].degrees_per_05db_a; - degrees_per_05db_denom = - tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom; - - /* get per-chain txpower values from factory measurements */ - for (c = 0; c < 2; c++) { - measurement = &ch_eeprom_info.measurements[c][1]; - - /* txgain adjustment (in half-dB steps) based on difference - * between factory and current temperature */ - factory_temp = measurement->temperature; - il4965_math_div_round((current_temp - - factory_temp) * degrees_per_05db_denom, - degrees_per_05db_num, - &temperature_comp[c]); - - factory_gain_idx[c] = measurement->gain_idx; - factory_actual_pwr[c] = measurement->actual_pow; - - D_TXPOWER("chain = %d\n", c); - D_TXPOWER("fctry tmp %d, " "curr tmp %d, comp %d steps\n", - factory_temp, current_temp, temperature_comp[c]); - - D_TXPOWER("fctry idx %d, fctry pwr %d\n", factory_gain_idx[c], - factory_actual_pwr[c]); - } - - /* for each of 33 bit-rates (including 1 for CCK) */ - for (i = 0; i < POWER_TBL_NUM_ENTRIES; i++) { - u8 is_mimo_rate; - union il4965_tx_power_dual_stream tx_power; - - /* for mimo, reduce each chain's txpower by half - * (3dB, 6 steps), so total output power is regulatory - * compliant. */ - if (i & 0x8) { - current_regulatory = - reg_limit - - IL_TX_POWER_MIMO_REGULATORY_COMPENSATION; - is_mimo_rate = 1; - } else { - current_regulatory = reg_limit; - is_mimo_rate = 0; - } - - /* find txpower limit, either hardware or regulatory */ - power_limit = saturation_power - back_off_table[i]; - if (power_limit > current_regulatory) - power_limit = current_regulatory; - - /* reduce user's txpower request if necessary - * for this rate on this channel */ - target_power = user_target_power; - if (target_power > power_limit) - target_power = power_limit; - - D_TXPOWER("rate %d sat %d reg %d usr %d tgt %d\n", i, - saturation_power - back_off_table[i], - current_regulatory, user_target_power, target_power); - - /* for each of 2 Tx chains (radio transmitters) */ - for (c = 0; c < 2; c++) { - s32 atten_value; - - if (is_mimo_rate) - atten_value = - (s32) le32_to_cpu(il->card_alive_init. - tx_atten[txatten_grp][c]); - else - atten_value = 0; - - /* calculate idx; higher idx means lower txpower */ - power_idx = - (u8) (factory_gain_idx[c] - - (target_power - factory_actual_pwr[c]) - - temperature_comp[c] - voltage_compensation + - atten_value); - -/* D_TXPOWER("calculated txpower idx %d\n", - power_idx); */ - - if (power_idx < get_min_power_idx(i, band)) - power_idx = get_min_power_idx(i, band); - - /* adjust 5 GHz idx to support negative idxes */ - if (!band) - power_idx += 9; - - /* CCK, rate 32, reduce txpower for CCK */ - if (i == POWER_TBL_CCK_ENTRY) - power_idx += - IL_TX_POWER_CCK_COMPENSATION_C_STEP; - - /* stay within the table! */ - if (power_idx > 107) { - IL_WARN("txpower idx %d > 107\n", power_idx); - power_idx = 107; - } - if (power_idx < 0) { - IL_WARN("txpower idx %d < 0\n", power_idx); - power_idx = 0; - } - - /* fill txpower command for this rate/chain */ - tx_power.s.radio_tx_gain[c] = - gain_table[band][power_idx].radio; - tx_power.s.dsp_predis_atten[c] = - gain_table[band][power_idx].dsp; - - D_TXPOWER("chain %d mimo %d idx %d " - "gain 0x%02x dsp %d\n", c, atten_value, - power_idx, tx_power.s.radio_tx_gain[c], - tx_power.s.dsp_predis_atten[c]); - } /* for each chain */ - - tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw); - - } /* for each rate */ - - return 0; -} - -/** - * il4965_send_tx_power - Configure the TXPOWER level user limit - * - * Uses the active RXON for channel, band, and characteristics (ht40, high) - * The power limit is taken from il->tx_power_user_lmt. - */ -static int -il4965_send_tx_power(struct il_priv *il) -{ - struct il4965_txpowertable_cmd cmd = { 0 }; - int ret; - u8 band = 0; - bool is_ht40 = false; - u8 ctrl_chan_high = 0; - struct il_rxon_context *ctx = &il->ctx; - - if (WARN_ONCE - (test_bit(S_SCAN_HW, &il->status), - "TX Power requested while scanning!\n")) - return -EAGAIN; - - band = il->band == IEEE80211_BAND_2GHZ; - - is_ht40 = iw4965_is_ht40_channel(ctx->active.flags); - - if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK)) - ctrl_chan_high = 1; - - cmd.band = band; - cmd.channel = ctx->active.channel; - - ret = - il4965_fill_txpower_tbl(il, band, le16_to_cpu(ctx->active.channel), - is_ht40, ctrl_chan_high, &cmd.tx_power); - if (ret) - goto out; - - ret = il_send_cmd_pdu(il, C_TX_PWR_TBL, sizeof(cmd), &cmd); - -out: - return ret; -} - -static int -il4965_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx) -{ - int ret = 0; - struct il4965_rxon_assoc_cmd rxon_assoc; - const struct il_rxon_cmd *rxon1 = &ctx->staging; - const struct il_rxon_cmd *rxon2 = &ctx->active; - - if (rxon1->flags == rxon2->flags && - rxon1->filter_flags == rxon2->filter_flags && - rxon1->cck_basic_rates == rxon2->cck_basic_rates && - rxon1->ofdm_ht_single_stream_basic_rates == - rxon2->ofdm_ht_single_stream_basic_rates && - rxon1->ofdm_ht_dual_stream_basic_rates == - rxon2->ofdm_ht_dual_stream_basic_rates && - rxon1->rx_chain == rxon2->rx_chain && - rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates) { - D_INFO("Using current RXON_ASSOC. Not resending.\n"); - return 0; - } - - rxon_assoc.flags = ctx->staging.flags; - rxon_assoc.filter_flags = ctx->staging.filter_flags; - rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates; - rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates; - rxon_assoc.reserved = 0; - rxon_assoc.ofdm_ht_single_stream_basic_rates = - ctx->staging.ofdm_ht_single_stream_basic_rates; - rxon_assoc.ofdm_ht_dual_stream_basic_rates = - ctx->staging.ofdm_ht_dual_stream_basic_rates; - rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain; - - ret = - il_send_cmd_pdu_async(il, C_RXON_ASSOC, sizeof(rxon_assoc), - &rxon_assoc, NULL); - - return ret; -} - -static int -il4965_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx) -{ - /* cast away the const for active_rxon in this function */ - struct il_rxon_cmd *active_rxon = (void *)&ctx->active; - int ret; - bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK); - - if (!il_is_alive(il)) - return -EBUSY; - - if (!ctx->is_active) - return 0; - - /* always get timestamp with Rx frame */ - ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK; - - ret = il_check_rxon_cmd(il, ctx); - if (ret) { - IL_ERR("Invalid RXON configuration. Not committing.\n"); - return -EINVAL; - } - - /* - * receive commit_rxon request - * abort any previous channel switch if still in process - */ - if (test_bit(S_CHANNEL_SWITCH_PENDING, &il->status) && - il->switch_channel != ctx->staging.channel) { - D_11H("abort channel switch on %d\n", - le16_to_cpu(il->switch_channel)); - il_chswitch_done(il, false); - } - - /* If we don't need to send a full RXON, we can use - * il_rxon_assoc_cmd which is used to reconfigure filter - * and other flags for the current radio configuration. */ - if (!il_full_rxon_required(il, ctx)) { - ret = il_send_rxon_assoc(il, ctx); - if (ret) { - IL_ERR("Error setting RXON_ASSOC (%d)\n", ret); - return ret; - } - - memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon)); - il_print_rx_config_cmd(il, ctx); - /* - * We do not commit tx power settings while channel changing, - * do it now if tx power changed. - */ - il_set_tx_power(il, il->tx_power_next, false); - return 0; - } - - /* If we are currently associated and the new config requires - * an RXON_ASSOC and the new config wants the associated mask enabled, - * we must clear the associated from the active configuration - * before we apply the new config */ - if (il_is_associated_ctx(ctx) && new_assoc) { - D_INFO("Toggling associated bit on current RXON\n"); - active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; - - ret = - il_send_cmd_pdu(il, ctx->rxon_cmd, - sizeof(struct il_rxon_cmd), active_rxon); - - /* If the mask clearing failed then we set - * active_rxon back to what it was previously */ - if (ret) { - active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK; - IL_ERR("Error clearing ASSOC_MSK (%d)\n", ret); - return ret; - } - il_clear_ucode_stations(il, ctx); - il_restore_stations(il, ctx); - ret = il4965_restore_default_wep_keys(il, ctx); - if (ret) { - IL_ERR("Failed to restore WEP keys (%d)\n", ret); - return ret; - } - } - - D_INFO("Sending RXON\n" "* with%s RXON_FILTER_ASSOC_MSK\n" - "* channel = %d\n" "* bssid = %pM\n", (new_assoc ? "" : "out"), - le16_to_cpu(ctx->staging.channel), ctx->staging.bssid_addr); - - il_set_rxon_hwcrypto(il, ctx, !il->cfg->mod_params->sw_crypto); - - /* Apply the new configuration - * RXON unassoc clears the station table in uCode so restoration of - * stations is needed after it (the RXON command) completes - */ - if (!new_assoc) { - ret = - il_send_cmd_pdu(il, ctx->rxon_cmd, - sizeof(struct il_rxon_cmd), &ctx->staging); - if (ret) { - IL_ERR("Error setting new RXON (%d)\n", ret); - return ret; - } - D_INFO("Return from !new_assoc RXON.\n"); - memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon)); - il_clear_ucode_stations(il, ctx); - il_restore_stations(il, ctx); - ret = il4965_restore_default_wep_keys(il, ctx); - if (ret) { - IL_ERR("Failed to restore WEP keys (%d)\n", ret); - return ret; - } - } - if (new_assoc) { - il->start_calib = 0; - /* Apply the new configuration - * RXON assoc doesn't clear the station table in uCode, - */ - ret = - il_send_cmd_pdu(il, ctx->rxon_cmd, - sizeof(struct il_rxon_cmd), &ctx->staging); - if (ret) { - IL_ERR("Error setting new RXON (%d)\n", ret); - return ret; - } - memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon)); - } - il_print_rx_config_cmd(il, ctx); - - il4965_init_sensitivity(il); - - /* If we issue a new RXON command which required a tune then we must - * send a new TXPOWER command or we won't be able to Tx any frames */ - ret = il_set_tx_power(il, il->tx_power_next, true); - if (ret) { - IL_ERR("Error sending TX power (%d)\n", ret); - return ret; - } - - return 0; -} - -static int -il4965_hw_channel_switch(struct il_priv *il, - struct ieee80211_channel_switch *ch_switch) -{ - struct il_rxon_context *ctx = &il->ctx; - int rc; - u8 band = 0; - bool is_ht40 = false; - u8 ctrl_chan_high = 0; - struct il4965_channel_switch_cmd cmd; - const struct il_channel_info *ch_info; - u32 switch_time_in_usec, ucode_switch_time; - u16 ch; - u32 tsf_low; - u8 switch_count; - u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval); - struct ieee80211_vif *vif = ctx->vif; - band = il->band == IEEE80211_BAND_2GHZ; - - is_ht40 = iw4965_is_ht40_channel(ctx->staging.flags); - - if (is_ht40 && (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK)) - ctrl_chan_high = 1; - - cmd.band = band; - cmd.expect_beacon = 0; - ch = ch_switch->channel->hw_value; - cmd.channel = cpu_to_le16(ch); - cmd.rxon_flags = ctx->staging.flags; - cmd.rxon_filter_flags = ctx->staging.filter_flags; - switch_count = ch_switch->count; - tsf_low = ch_switch->timestamp & 0x0ffffffff; - /* - * calculate the ucode channel switch time - * adding TSF as one of the factor for when to switch - */ - if (il->ucode_beacon_time > tsf_low && beacon_interval) { - if (switch_count > - ((il->ucode_beacon_time - tsf_low) / beacon_interval)) { - switch_count -= - (il->ucode_beacon_time - tsf_low) / beacon_interval; - } else - switch_count = 0; - } - if (switch_count <= 1) - cmd.switch_time = cpu_to_le32(il->ucode_beacon_time); - else { - switch_time_in_usec = - vif->bss_conf.beacon_int * switch_count * TIME_UNIT; - ucode_switch_time = - il_usecs_to_beacons(il, switch_time_in_usec, - beacon_interval); - cmd.switch_time = - il_add_beacon_time(il, il->ucode_beacon_time, - ucode_switch_time, beacon_interval); - } - D_11H("uCode time for the switch is 0x%x\n", cmd.switch_time); - ch_info = il_get_channel_info(il, il->band, ch); - if (ch_info) - cmd.expect_beacon = il_is_channel_radar(ch_info); - else { - IL_ERR("invalid channel switch from %u to %u\n", - ctx->active.channel, ch); - return -EFAULT; - } - - rc = il4965_fill_txpower_tbl(il, band, ch, is_ht40, ctrl_chan_high, - &cmd.tx_power); - if (rc) { - D_11H("error:%d fill txpower_tbl\n", rc); - return rc; - } - - return il_send_cmd_pdu(il, C_CHANNEL_SWITCH, sizeof(cmd), &cmd); -} - -/** - * il4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array - */ -static void -il4965_txq_update_byte_cnt_tbl(struct il_priv *il, struct il_tx_queue *txq, - u16 byte_cnt) -{ - struct il4965_scd_bc_tbl *scd_bc_tbl = il->scd_bc_tbls.addr; - int txq_id = txq->q.id; - int write_ptr = txq->q.write_ptr; - int len = byte_cnt + IL_TX_CRC_SIZE + IL_TX_DELIMITER_SIZE; - __le16 bc_ent; - - WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX); - - bc_ent = cpu_to_le16(len & 0xFFF); - /* Set up byte count within first 256 entries */ - scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; - - /* If within first 64 entries, duplicate at end */ - if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) - scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = - bc_ent; -} - -/** - * il4965_hw_get_temperature - return the calibrated temperature (in Kelvin) - * @stats: Provides the temperature reading from the uCode - * - * A return of <0 indicates bogus data in the stats - */ -static int -il4965_hw_get_temperature(struct il_priv *il) -{ - s32 temperature; - s32 vt; - s32 R1, R2, R3; - u32 R4; - - if (test_bit(S_TEMPERATURE, &il->status) && - (il->_4965.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK)) { - D_TEMP("Running HT40 temperature calibration\n"); - R1 = (s32) le32_to_cpu(il->card_alive_init.therm_r1[1]); - R2 = (s32) le32_to_cpu(il->card_alive_init.therm_r2[1]); - R3 = (s32) le32_to_cpu(il->card_alive_init.therm_r3[1]); - R4 = le32_to_cpu(il->card_alive_init.therm_r4[1]); - } else { - D_TEMP("Running temperature calibration\n"); - R1 = (s32) le32_to_cpu(il->card_alive_init.therm_r1[0]); - R2 = (s32) le32_to_cpu(il->card_alive_init.therm_r2[0]); - R3 = (s32) le32_to_cpu(il->card_alive_init.therm_r3[0]); - R4 = le32_to_cpu(il->card_alive_init.therm_r4[0]); - } - - /* - * Temperature is only 23 bits, so sign extend out to 32. - * - * NOTE If we haven't received a stats notification yet - * with an updated temperature, use R4 provided to us in the - * "initialize" ALIVE response. - */ - if (!test_bit(S_TEMPERATURE, &il->status)) - vt = sign_extend32(R4, 23); - else - vt = sign_extend32(le32_to_cpu - (il->_4965.stats.general.common.temperature), - 23); - - D_TEMP("Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt); - - if (R3 == R1) { - IL_ERR("Calibration conflict R1 == R3\n"); - return -1; - } - - /* Calculate temperature in degrees Kelvin, adjust by 97%. - * Add offset to center the adjustment around 0 degrees Centigrade. */ - temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2); - temperature /= (R3 - R1); - temperature = - (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET; - - D_TEMP("Calibrated temperature: %dK, %dC\n", temperature, - KELVIN_TO_CELSIUS(temperature)); - - return temperature; -} - -/* Adjust Txpower only if temperature variance is greater than threshold. */ -#define IL_TEMPERATURE_THRESHOLD 3 - -/** - * il4965_is_temp_calib_needed - determines if new calibration is needed - * - * If the temperature changed has changed sufficiently, then a recalibration - * is needed. - * - * Assumes caller will replace il->last_temperature once calibration - * executed. - */ -static int -il4965_is_temp_calib_needed(struct il_priv *il) -{ - int temp_diff; - - if (!test_bit(S_STATS, &il->status)) { - D_TEMP("Temperature not updated -- no stats.\n"); - return 0; - } - - temp_diff = il->temperature - il->last_temperature; - - /* get absolute value */ - if (temp_diff < 0) { - D_POWER("Getting cooler, delta %d\n", temp_diff); - temp_diff = -temp_diff; - } else if (temp_diff == 0) - D_POWER("Temperature unchanged\n"); - else - D_POWER("Getting warmer, delta %d\n", temp_diff); - - if (temp_diff < IL_TEMPERATURE_THRESHOLD) { - D_POWER(" => thermal txpower calib not needed\n"); - return 0; - } - - D_POWER(" => thermal txpower calib needed\n"); - - return 1; -} - -static void -il4965_temperature_calib(struct il_priv *il) -{ - s32 temp; - - temp = il4965_hw_get_temperature(il); - if (IL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(temp)) - return; - - if (il->temperature != temp) { - if (il->temperature) - D_TEMP("Temperature changed " "from %dC to %dC\n", - KELVIN_TO_CELSIUS(il->temperature), - KELVIN_TO_CELSIUS(temp)); - else - D_TEMP("Temperature " "initialized to %dC\n", - KELVIN_TO_CELSIUS(temp)); - } - - il->temperature = temp; - set_bit(S_TEMPERATURE, &il->status); - - if (!il->disable_tx_power_cal && - unlikely(!test_bit(S_SCANNING, &il->status)) && - il4965_is_temp_calib_needed(il)) - queue_work(il->workqueue, &il->txpower_work); -} - -static u16 -il4965_get_hcmd_size(u8 cmd_id, u16 len) -{ - switch (cmd_id) { - case C_RXON: - return (u16) sizeof(struct il4965_rxon_cmd); - default: - return len; - } -} - -static u16 -il4965_build_addsta_hcmd(const struct il_addsta_cmd *cmd, u8 * data) -{ - struct il4965_addsta_cmd *addsta = (struct il4965_addsta_cmd *)data; - addsta->mode = cmd->mode; - memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify)); - memcpy(&addsta->key, &cmd->key, sizeof(struct il4965_keyinfo)); - addsta->station_flags = cmd->station_flags; - addsta->station_flags_msk = cmd->station_flags_msk; - addsta->tid_disable_tx = cmd->tid_disable_tx; - addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid; - addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid; - addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn; - addsta->sleep_tx_count = cmd->sleep_tx_count; - addsta->reserved1 = cpu_to_le16(0); - addsta->reserved2 = cpu_to_le16(0); - - return (u16) sizeof(struct il4965_addsta_cmd); -} - -static inline u32 -il4965_get_scd_ssn(struct il4965_tx_resp *tx_resp) -{ - return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN; -} - -static inline u32 -il4965_tx_status_to_mac80211(u32 status) -{ - status &= TX_STATUS_MSK; - - switch (status) { - case TX_STATUS_SUCCESS: - case TX_STATUS_DIRECT_DONE: - return IEEE80211_TX_STAT_ACK; - case TX_STATUS_FAIL_DEST_PS: - return IEEE80211_TX_STAT_TX_FILTERED; - default: - return 0; - } -} - -static inline bool -il4965_is_tx_success(u32 status) -{ - status &= TX_STATUS_MSK; - return (status == TX_STATUS_SUCCESS || status == TX_STATUS_DIRECT_DONE); -} - -/** - * il4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue - */ -static int -il4965_tx_status_reply_tx(struct il_priv *il, struct il_ht_agg *agg, - struct il4965_tx_resp *tx_resp, int txq_id, - u16 start_idx) -{ - u16 status; - struct agg_tx_status *frame_status = tx_resp->u.agg_status; - struct ieee80211_tx_info *info = NULL; - struct ieee80211_hdr *hdr = NULL; - u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags); - int i, sh, idx; - u16 seq; - if (agg->wait_for_ba) - D_TX_REPLY("got tx response w/o block-ack\n"); - - agg->frame_count = tx_resp->frame_count; - agg->start_idx = start_idx; - agg->rate_n_flags = rate_n_flags; - agg->bitmap = 0; - - /* num frames attempted by Tx command */ - if (agg->frame_count == 1) { - /* Only one frame was attempted; no block-ack will arrive */ - status = le16_to_cpu(frame_status[0].status); - idx = start_idx; - - D_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n", - agg->frame_count, agg->start_idx, idx); - - info = IEEE80211_SKB_CB(il->txq[txq_id].txb[idx].skb); - info->status.rates[0].count = tx_resp->failure_frame + 1; - info->flags &= ~IEEE80211_TX_CTL_AMPDU; - info->flags |= il4965_tx_status_to_mac80211(status); - il4965_hwrate_to_tx_control(il, rate_n_flags, info); - - D_TX_REPLY("1 Frame 0x%x failure :%d\n", status & 0xff, - tx_resp->failure_frame); - D_TX_REPLY("Rate Info rate_n_flags=%x\n", rate_n_flags); - - agg->wait_for_ba = 0; - } else { - /* Two or more frames were attempted; expect block-ack */ - u64 bitmap = 0; - int start = agg->start_idx; - - /* Construct bit-map of pending frames within Tx win */ - for (i = 0; i < agg->frame_count; i++) { - u16 sc; - status = le16_to_cpu(frame_status[i].status); - seq = le16_to_cpu(frame_status[i].sequence); - idx = SEQ_TO_IDX(seq); - txq_id = SEQ_TO_QUEUE(seq); - - if (status & - (AGG_TX_STATE_FEW_BYTES_MSK | - AGG_TX_STATE_ABORT_MSK)) - continue; - - D_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n", - agg->frame_count, txq_id, idx); - - hdr = il_tx_queue_get_hdr(il, txq_id, idx); - if (!hdr) { - IL_ERR("BUG_ON idx doesn't point to valid skb" - " idx=%d, txq_id=%d\n", idx, txq_id); - return -1; - } - - sc = le16_to_cpu(hdr->seq_ctrl); - if (idx != (SEQ_TO_SN(sc) & 0xff)) { - IL_ERR("BUG_ON idx doesn't match seq control" - " idx=%d, seq_idx=%d, seq=%d\n", idx, - SEQ_TO_SN(sc), hdr->seq_ctrl); - return -1; - } - - D_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n", i, idx, - SEQ_TO_SN(sc)); - - sh = idx - start; - if (sh > 64) { - sh = (start - idx) + 0xff; - bitmap = bitmap << sh; - sh = 0; - start = idx; - } else if (sh < -64) - sh = 0xff - (start - idx); - else if (sh < 0) { - sh = start - idx; - start = idx; - bitmap = bitmap << sh; - sh = 0; - } - bitmap |= 1ULL << sh; - D_TX_REPLY("start=%d bitmap=0x%llx\n", start, - (unsigned long long)bitmap); - } - - agg->bitmap = bitmap; - agg->start_idx = start; - D_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n", - agg->frame_count, agg->start_idx, - (unsigned long long)agg->bitmap); - - if (bitmap) - agg->wait_for_ba = 1; - } - return 0; -} - -static u8 -il4965_find_station(struct il_priv *il, const u8 * addr) -{ - int i; - int start = 0; - int ret = IL_INVALID_STATION; - unsigned long flags; - - if ((il->iw_mode == NL80211_IFTYPE_ADHOC)) - start = IL_STA_ID; - - if (is_broadcast_ether_addr(addr)) - return il->ctx.bcast_sta_id; - - spin_lock_irqsave(&il->sta_lock, flags); - for (i = start; i < il->hw_params.max_stations; i++) - if (il->stations[i].used && - (!compare_ether_addr(il->stations[i].sta.sta.addr, addr))) { - ret = i; - goto out; - } - - D_ASSOC("can not find STA %pM total %d\n", addr, il->num_stations); - -out: - /* - * It may be possible that more commands interacting with stations - * arrive before we completed processing the adding of - * station - */ - if (ret != IL_INVALID_STATION && - (!(il->stations[ret].used & IL_STA_UCODE_ACTIVE) || - ((il->stations[ret].used & IL_STA_UCODE_ACTIVE) && - (il->stations[ret].used & IL_STA_UCODE_INPROGRESS)))) { - IL_ERR("Requested station info for sta %d before ready.\n", - ret); - ret = IL_INVALID_STATION; - } - spin_unlock_irqrestore(&il->sta_lock, flags); - return ret; -} - -static int -il4965_get_ra_sta_id(struct il_priv *il, struct ieee80211_hdr *hdr) -{ - if (il->iw_mode == NL80211_IFTYPE_STATION) { - return IL_AP_ID; - } else { - u8 *da = ieee80211_get_DA(hdr); - return il4965_find_station(il, da); - } -} - -/** - * il4965_hdl_tx - Handle standard (non-aggregation) Tx response - */ -static void -il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb) -{ - struct il_rx_pkt *pkt = rxb_addr(rxb); - u16 sequence = le16_to_cpu(pkt->hdr.sequence); - int txq_id = SEQ_TO_QUEUE(sequence); - int idx = SEQ_TO_IDX(sequence); - struct il_tx_queue *txq = &il->txq[txq_id]; - struct ieee80211_hdr *hdr; - struct ieee80211_tx_info *info; - struct il4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; - u32 status = le32_to_cpu(tx_resp->u.status); - int uninitialized_var(tid); - int sta_id; - int freed; - u8 *qc = NULL; - unsigned long flags; - - if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) { - IL_ERR("Read idx for DMA queue txq_id (%d) idx %d " - "is out of range [0-%d] %d %d\n", txq_id, idx, - txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr); - return; - } - - txq->time_stamp = jiffies; - info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb); - memset(&info->status, 0, sizeof(info->status)); - - hdr = il_tx_queue_get_hdr(il, txq_id, idx); - if (ieee80211_is_data_qos(hdr->frame_control)) { - qc = ieee80211_get_qos_ctl(hdr); - tid = qc[0] & 0xf; - } - - sta_id = il4965_get_ra_sta_id(il, hdr); - if (txq->sched_retry && unlikely(sta_id == IL_INVALID_STATION)) { - IL_ERR("Station not known\n"); - return; - } - - spin_lock_irqsave(&il->sta_lock, flags); - if (txq->sched_retry) { - const u32 scd_ssn = il4965_get_scd_ssn(tx_resp); - struct il_ht_agg *agg = NULL; - WARN_ON(!qc); - - agg = &il->stations[sta_id].tid[tid].agg; - - il4965_tx_status_reply_tx(il, agg, tx_resp, txq_id, idx); - - /* check if BAR is needed */ - if ((tx_resp->frame_count == 1) && - !il4965_is_tx_success(status)) - info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; - - if (txq->q.read_ptr != (scd_ssn & 0xff)) { - idx = il_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd); - D_TX_REPLY("Retry scheduler reclaim scd_ssn " - "%d idx %d\n", scd_ssn, idx); - freed = il4965_tx_queue_reclaim(il, txq_id, idx); - if (qc) - il4965_free_tfds_in_queue(il, sta_id, tid, - freed); - - if (il->mac80211_registered && - il_queue_space(&txq->q) > txq->q.low_mark && - agg->state != IL_EMPTYING_HW_QUEUE_DELBA) - il_wake_queue(il, txq); - } - } else { - info->status.rates[0].count = tx_resp->failure_frame + 1; - info->flags |= il4965_tx_status_to_mac80211(status); - il4965_hwrate_to_tx_control(il, - le32_to_cpu(tx_resp->rate_n_flags), - info); - - D_TX_REPLY("TXQ %d status %s (0x%08x) " - "rate_n_flags 0x%x retries %d\n", txq_id, - il4965_get_tx_fail_reason(status), status, - le32_to_cpu(tx_resp->rate_n_flags), - tx_resp->failure_frame); - - freed = il4965_tx_queue_reclaim(il, txq_id, idx); - if (qc && likely(sta_id != IL_INVALID_STATION)) - il4965_free_tfds_in_queue(il, sta_id, tid, freed); - else if (sta_id == IL_INVALID_STATION) - D_TX_REPLY("Station not known\n"); - - if (il->mac80211_registered && - il_queue_space(&txq->q) > txq->q.low_mark) - il_wake_queue(il, txq); - } - if (qc && likely(sta_id != IL_INVALID_STATION)) - il4965_txq_check_empty(il, sta_id, tid, txq_id); - - il4965_check_abort_status(il, tx_resp->frame_count, status); - - spin_unlock_irqrestore(&il->sta_lock, flags); -} - -/* Set up 4965-specific Rx frame reply handlers */ -static void -il4965_handler_setup(struct il_priv *il) -{ - /* Legacy Rx frames */ - il->handlers[N_RX] = il4965_hdl_rx; - /* Tx response */ - il->handlers[C_TX] = il4965_hdl_tx; -} - -static struct il_hcmd_ops il4965_hcmd = { - .rxon_assoc = il4965_send_rxon_assoc, - .commit_rxon = il4965_commit_rxon, - .set_rxon_chain = il4965_set_rxon_chain, -}; - -static void -il4965_post_scan(struct il_priv *il) -{ - struct il_rxon_context *ctx = &il->ctx; - - /* - * Since setting the RXON may have been deferred while - * performing the scan, fire one off if needed - */ - if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging))) - il_commit_rxon(il, ctx); -} - -static void -il4965_post_associate(struct il_priv *il) -{ - struct il_rxon_context *ctx = &il->ctx; - struct ieee80211_vif *vif = ctx->vif; - struct ieee80211_conf *conf = NULL; - int ret = 0; - - if (!vif || !il->is_open) - return; - - if (test_bit(S_EXIT_PENDING, &il->status)) - return; - - il_scan_cancel_timeout(il, 200); - - conf = &il->hw->conf; - - ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; - il_commit_rxon(il, ctx); - - ret = il_send_rxon_timing(il, ctx); - if (ret) - IL_WARN("RXON timing - " "Attempting to continue.\n"); - - ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; - - il_set_rxon_ht(il, &il->current_ht_config); - - if (il->cfg->ops->hcmd->set_rxon_chain) - il->cfg->ops->hcmd->set_rxon_chain(il, ctx); - - ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid); - - D_ASSOC("assoc id %d beacon interval %d\n", vif->bss_conf.aid, - vif->bss_conf.beacon_int); - - if (vif->bss_conf.use_short_preamble) - ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; - else - ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; - - if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) { - if (vif->bss_conf.use_short_slot) - ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; - else - ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; - } - - il_commit_rxon(il, ctx); - - D_ASSOC("Associated as %d to: %pM\n", vif->bss_conf.aid, - ctx->active.bssid_addr); - - switch (vif->type) { - case NL80211_IFTYPE_STATION: - break; - case NL80211_IFTYPE_ADHOC: - il4965_send_beacon_cmd(il); - break; - default: - IL_ERR("%s Should not be called in %d mode\n", __func__, - vif->type); - break; - } - - /* the chain noise calibration will enabled PM upon completion - * If chain noise has already been run, then we need to enable - * power management here */ - if (il->chain_noise_data.state == IL_CHAIN_NOISE_DONE) - il_power_update_mode(il, false); - - /* Enable Rx differential gain and sensitivity calibrations */ - il4965_chain_noise_reset(il); - il->start_calib = 1; -} - -static void -il4965_config_ap(struct il_priv *il) -{ - struct il_rxon_context *ctx = &il->ctx; - struct ieee80211_vif *vif = ctx->vif; - int ret = 0; - - lockdep_assert_held(&il->mutex); - - if (test_bit(S_EXIT_PENDING, &il->status)) - return; - - /* The following should be done only at AP bring up */ - if (!il_is_associated_ctx(ctx)) { - - /* RXON - unassoc (to set timing command) */ - ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; - il_commit_rxon(il, ctx); - - /* RXON Timing */ - ret = il_send_rxon_timing(il, ctx); - if (ret) - IL_WARN("RXON timing failed - " - "Attempting to continue.\n"); - - /* AP has all antennas */ - il->chain_noise_data.active_chains = il->hw_params.valid_rx_ant; - il_set_rxon_ht(il, &il->current_ht_config); - if (il->cfg->ops->hcmd->set_rxon_chain) - il->cfg->ops->hcmd->set_rxon_chain(il, ctx); - - ctx->staging.assoc_id = 0; - - if (vif->bss_conf.use_short_preamble) - ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; - else - ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; - - if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) { - if (vif->bss_conf.use_short_slot) - ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; - else - ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; - } - /* need to send beacon cmd before committing assoc RXON! */ - il4965_send_beacon_cmd(il); - /* restore RXON assoc */ - ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; - il_commit_rxon(il, ctx); - } - il4965_send_beacon_cmd(il); -} - -static struct il_hcmd_utils_ops il4965_hcmd_utils = { - .get_hcmd_size = il4965_get_hcmd_size, - .build_addsta_hcmd = il4965_build_addsta_hcmd, - .request_scan = il4965_request_scan, - .post_scan = il4965_post_scan, -}; - -static struct il_lib_ops il4965_lib = { - .set_hw_params = il4965_hw_set_hw_params, - .txq_update_byte_cnt_tbl = il4965_txq_update_byte_cnt_tbl, - .txq_attach_buf_to_tfd = il4965_hw_txq_attach_buf_to_tfd, - .txq_free_tfd = il4965_hw_txq_free_tfd, - .txq_init = il4965_hw_tx_queue_init, - .handler_setup = il4965_handler_setup, - .is_valid_rtc_data_addr = il4965_hw_valid_rtc_data_addr, - .init_alive_start = il4965_init_alive_start, - .load_ucode = il4965_load_bsm, - .dump_nic_error_log = il4965_dump_nic_error_log, - .dump_fh = il4965_dump_fh, - .set_channel_switch = il4965_hw_channel_switch, - .apm_ops = { - .init = il_apm_init, - .config = il4965_nic_config, - }, - .eeprom_ops = { - .regulatory_bands = { - EEPROM_REGULATORY_BAND_1_CHANNELS, - EEPROM_REGULATORY_BAND_2_CHANNELS, - EEPROM_REGULATORY_BAND_3_CHANNELS, - EEPROM_REGULATORY_BAND_4_CHANNELS, - EEPROM_REGULATORY_BAND_5_CHANNELS, - EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS, - EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS}, - .acquire_semaphore = il4965_eeprom_acquire_semaphore, - .release_semaphore = il4965_eeprom_release_semaphore, - }, - .send_tx_power = il4965_send_tx_power, - .update_chain_flags = il4965_update_chain_flags, - .temp_ops = { - .temperature = il4965_temperature_calib, - }, -#ifdef CONFIG_IWLEGACY_DEBUGFS - .debugfs_ops = { - .rx_stats_read = il4965_ucode_rx_stats_read, - .tx_stats_read = il4965_ucode_tx_stats_read, - .general_stats_read = il4965_ucode_general_stats_read, - }, -#endif -}; - -static const struct il_legacy_ops il4965_legacy_ops = { - .post_associate = il4965_post_associate, - .config_ap = il4965_config_ap, - .manage_ibss_station = il4965_manage_ibss_station, - .update_bcast_stations = il4965_update_bcast_stations, -}; - -struct ieee80211_ops il4965_hw_ops = { - .tx = il4965_mac_tx, - .start = il4965_mac_start, - .stop = il4965_mac_stop, - .add_interface = il_mac_add_interface, - .remove_interface = il_mac_remove_interface, - .change_interface = il_mac_change_interface, - .config = il_mac_config, - .configure_filter = il4965_configure_filter, - .set_key = il4965_mac_set_key, - .update_tkip_key = il4965_mac_update_tkip_key, - .conf_tx = il_mac_conf_tx, - .reset_tsf = il_mac_reset_tsf, - .bss_info_changed = il_mac_bss_info_changed, - .ampdu_action = il4965_mac_ampdu_action, - .hw_scan = il_mac_hw_scan, - .sta_add = il4965_mac_sta_add, - .sta_remove = il_mac_sta_remove, - .channel_switch = il4965_mac_channel_switch, - .tx_last_beacon = il_mac_tx_last_beacon, -}; - -static const struct il_ops il4965_ops = { - .lib = &il4965_lib, - .hcmd = &il4965_hcmd, - .utils = &il4965_hcmd_utils, - .led = &il4965_led_ops, - .legacy = &il4965_legacy_ops, - .ieee80211_ops = &il4965_hw_ops, -}; - -static struct il_base_params il4965_base_params = { - .eeprom_size = IL4965_EEPROM_IMG_SIZE, - .num_of_queues = IL49_NUM_QUEUES, - .num_of_ampdu_queues = IL49_NUM_AMPDU_QUEUES, - .pll_cfg_val = 0, - .set_l0s = true, - .use_bsm = true, - .led_compensation = 61, - .chain_noise_num_beacons = IL4965_CAL_NUM_BEACONS, - .wd_timeout = IL_DEF_WD_TIMEOUT, - .temperature_kelvin = true, - .ucode_tracing = true, - .sensitivity_calib_by_driver = true, - .chain_noise_calib_by_driver = true, -}; - -struct il_cfg il4965_cfg = { - .name = "Intel(R) Wireless WiFi Link 4965AGN", - .fw_name_pre = IL4965_FW_PRE, - .ucode_api_max = IL4965_UCODE_API_MAX, - .ucode_api_min = IL4965_UCODE_API_MIN, - .sku = IL_SKU_A | IL_SKU_G | IL_SKU_N, - .valid_tx_ant = ANT_AB, - .valid_rx_ant = ANT_ABC, - .eeprom_ver = EEPROM_4965_EEPROM_VERSION, - .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION, - .ops = &il4965_ops, - .mod_params = &il4965_mod_params, - .base_params = &il4965_base_params, - .led_mode = IL_LED_BLINK, - /* - * Force use of chains B and C for scan RX on 5 GHz band - * because the device has off-channel reception on chain A. - */ - .scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC, -}; - -/* Module firmware */ -MODULE_FIRMWARE(IL4965_MODULE_FIRMWARE(IL4965_UCODE_API_MAX)); diff --git a/trunk/drivers/net/wireless/iwlegacy/4965.h b/trunk/drivers/net/wireless/iwlegacy/4965.h deleted file mode 100644 index f280e0161b17..000000000000 --- a/trunk/drivers/net/wireless/iwlegacy/4965.h +++ /dev/null @@ -1,1301 +0,0 @@ -/****************************************************************************** - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#ifndef __il_4965_h__ -#define __il_4965_h__ - -struct il_rx_queue; -struct il_rx_buf; -struct il_rx_pkt; -struct il_tx_queue; -struct il_rxon_context; - -/* configuration for the _4965 devices */ -extern struct il_cfg il4965_cfg; - -extern struct il_mod_params il4965_mod_params; - -extern struct ieee80211_ops il4965_hw_ops; - -/* tx queue */ -void il4965_free_tfds_in_queue(struct il_priv *il, int sta_id, int tid, - int freed); - -/* RXON */ -void il4965_set_rxon_chain(struct il_priv *il, struct il_rxon_context *ctx); - -/* uCode */ -int il4965_verify_ucode(struct il_priv *il); - -/* lib */ -void il4965_check_abort_status(struct il_priv *il, u8 frame_count, u32 status); - -void il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq); -int il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq); -int il4965_hw_nic_init(struct il_priv *il); -int il4965_dump_fh(struct il_priv *il, char **buf, bool display); - -/* rx */ -void il4965_rx_queue_restock(struct il_priv *il); -void il4965_rx_replenish(struct il_priv *il); -void il4965_rx_replenish_now(struct il_priv *il); -void il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq); -int il4965_rxq_stop(struct il_priv *il); -int il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band); -void il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb); -void il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb); -void il4965_rx_handle(struct il_priv *il); - -/* tx */ -void il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq); -int il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq, - dma_addr_t addr, u16 len, u8 reset, u8 pad); -int il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq); -void il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags, - struct ieee80211_tx_info *info); -int il4965_tx_skb(struct il_priv *il, struct sk_buff *skb); -int il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u16 tid, u16 * ssn); -int il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u16 tid); -int il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id); -void il4965_hdl_compressed_ba(struct il_priv *il, struct il_rx_buf *rxb); -int il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx); -void il4965_hw_txq_ctx_free(struct il_priv *il); -int il4965_txq_ctx_alloc(struct il_priv *il); -void il4965_txq_ctx_reset(struct il_priv *il); -void il4965_txq_ctx_stop(struct il_priv *il); -void il4965_txq_set_sched(struct il_priv *il, u32 mask); - -/* - * Acquire il->lock before calling this function ! - */ -void il4965_set_wr_ptrs(struct il_priv *il, int txq_id, u32 idx); -/** - * il4965_tx_queue_set_status - (optionally) start Tx/Cmd queue - * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed - * @scd_retry: (1) Indicates queue will be used in aggregation mode - * - * NOTE: Acquire il->lock before calling this function ! - */ -void il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq, - int tx_fifo_id, int scd_retry); - -/* rx */ -void il4965_hdl_missed_beacon(struct il_priv *il, struct il_rx_buf *rxb); -bool il4965_good_plcp_health(struct il_priv *il, struct il_rx_pkt *pkt); -void il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb); -void il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb); - -/* scan */ -int il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif); - -/* station mgmt */ -int il4965_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif, - bool add); - -/* hcmd */ -int il4965_send_beacon_cmd(struct il_priv *il); - -#ifdef CONFIG_IWLEGACY_DEBUG -const char *il4965_get_tx_fail_reason(u32 status); -#else -static inline const char * -il4965_get_tx_fail_reason(u32 status) -{ - return ""; -} -#endif - -/* station management */ -int il4965_alloc_bcast_station(struct il_priv *il, struct il_rxon_context *ctx); -int il4965_add_bssid_station(struct il_priv *il, struct il_rxon_context *ctx, - const u8 *addr, u8 *sta_id_r); -int il4965_remove_default_wep_key(struct il_priv *il, - struct il_rxon_context *ctx, - struct ieee80211_key_conf *key); -int il4965_set_default_wep_key(struct il_priv *il, struct il_rxon_context *ctx, - struct ieee80211_key_conf *key); -int il4965_restore_default_wep_keys(struct il_priv *il, - struct il_rxon_context *ctx); -int il4965_set_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx, - struct ieee80211_key_conf *key, u8 sta_id); -int il4965_remove_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx, - struct ieee80211_key_conf *key, u8 sta_id); -void il4965_update_tkip_key(struct il_priv *il, struct il_rxon_context *ctx, - struct ieee80211_key_conf *keyconf, - struct ieee80211_sta *sta, u32 iv32, - u16 *phase1key); -int il4965_sta_tx_modify_enable_tid(struct il_priv *il, int sta_id, int tid); -int il4965_sta_rx_agg_start(struct il_priv *il, struct ieee80211_sta *sta, - int tid, u16 ssn); -int il4965_sta_rx_agg_stop(struct il_priv *il, struct ieee80211_sta *sta, - int tid); -void il4965_sta_modify_sleep_tx_count(struct il_priv *il, int sta_id, int cnt); -int il4965_update_bcast_stations(struct il_priv *il); - -/* rate */ -static inline u8 -il4965_hw_get_rate(__le32 rate_n_flags) -{ - return le32_to_cpu(rate_n_flags) & 0xFF; -} - -/* eeprom */ -void il4965_eeprom_get_mac(const struct il_priv *il, u8 * mac); -int il4965_eeprom_acquire_semaphore(struct il_priv *il); -void il4965_eeprom_release_semaphore(struct il_priv *il); -int il4965_eeprom_check_version(struct il_priv *il); - -/* mac80211 handlers (for 4965) */ -void il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb); -int il4965_mac_start(struct ieee80211_hw *hw); -void il4965_mac_stop(struct ieee80211_hw *hw); -void il4965_configure_filter(struct ieee80211_hw *hw, - unsigned int changed_flags, - unsigned int *total_flags, u64 multicast); -int il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, - struct ieee80211_vif *vif, struct ieee80211_sta *sta, - struct ieee80211_key_conf *key); -void il4965_mac_update_tkip_key(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_key_conf *keyconf, - struct ieee80211_sta *sta, u32 iv32, - u16 *phase1key); -int il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 * ssn, - u8 buf_size); -int il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_sta *sta); -void il4965_mac_channel_switch(struct ieee80211_hw *hw, - struct ieee80211_channel_switch *ch_switch); - -void il4965_led_enable(struct il_priv *il); - -/* EEPROM */ -#define IL4965_EEPROM_IMG_SIZE 1024 - -/* - * uCode queue management definitions ... - * The first queue used for block-ack aggregation is #7 (4965 only). - * All block-ack aggregation queues should map to Tx DMA/FIFO channel 7. - */ -#define IL49_FIRST_AMPDU_QUEUE 7 - -/* Sizes and addresses for instruction and data memory (SRAM) in - * 4965's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */ -#define IL49_RTC_INST_LOWER_BOUND (0x000000) -#define IL49_RTC_INST_UPPER_BOUND (0x018000) - -#define IL49_RTC_DATA_LOWER_BOUND (0x800000) -#define IL49_RTC_DATA_UPPER_BOUND (0x80A000) - -#define IL49_RTC_INST_SIZE (IL49_RTC_INST_UPPER_BOUND - \ - IL49_RTC_INST_LOWER_BOUND) -#define IL49_RTC_DATA_SIZE (IL49_RTC_DATA_UPPER_BOUND - \ - IL49_RTC_DATA_LOWER_BOUND) - -#define IL49_MAX_INST_SIZE IL49_RTC_INST_SIZE -#define IL49_MAX_DATA_SIZE IL49_RTC_DATA_SIZE - -/* Size of uCode instruction memory in bootstrap state machine */ -#define IL49_MAX_BSM_SIZE BSM_SRAM_SIZE - -static inline int -il4965_hw_valid_rtc_data_addr(u32 addr) -{ - return (addr >= IL49_RTC_DATA_LOWER_BOUND && - addr < IL49_RTC_DATA_UPPER_BOUND); -} - -/********************* START TEMPERATURE *************************************/ - -/** - * 4965 temperature calculation. - * - * The driver must calculate the device temperature before calculating - * a txpower setting (amplifier gain is temperature dependent). The - * calculation uses 4 measurements, 3 of which (R1, R2, R3) are calibration - * values used for the life of the driver, and one of which (R4) is the - * real-time temperature indicator. - * - * uCode provides all 4 values to the driver via the "initialize alive" - * notification (see struct il4965_init_alive_resp). After the runtime uCode - * image loads, uCode updates the R4 value via stats notifications - * (see N_STATS), which occur after each received beacon - * when associated, or can be requested via C_STATS. - * - * NOTE: uCode provides the R4 value as a 23-bit signed value. Driver - * must sign-extend to 32 bits before applying formula below. - * - * Formula: - * - * degrees Kelvin = ((97 * 259 * (R4 - R2) / (R3 - R1)) / 100) + 8 - * - * NOTE: The basic formula is 259 * (R4-R2) / (R3-R1). The 97/100 is - * an additional correction, which should be centered around 0 degrees - * Celsius (273 degrees Kelvin). The 8 (3 percent of 273) compensates for - * centering the 97/100 correction around 0 degrees K. - * - * Add 273 to Kelvin value to find degrees Celsius, for comparing current - * temperature with factory-measured temperatures when calculating txpower - * settings. - */ -#define TEMPERATURE_CALIB_KELVIN_OFFSET 8 -#define TEMPERATURE_CALIB_A_VAL 259 - -/* Limit range of calculated temperature to be between these Kelvin values */ -#define IL_TX_POWER_TEMPERATURE_MIN (263) -#define IL_TX_POWER_TEMPERATURE_MAX (410) - -#define IL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(t) \ - ((t) < IL_TX_POWER_TEMPERATURE_MIN || \ - (t) > IL_TX_POWER_TEMPERATURE_MAX) - -/********************* END TEMPERATURE ***************************************/ - -/********************* START TXPOWER *****************************************/ - -/** - * 4965 txpower calculations rely on information from three sources: - * - * 1) EEPROM - * 2) "initialize" alive notification - * 3) stats notifications - * - * EEPROM data consists of: - * - * 1) Regulatory information (max txpower and channel usage flags) is provided - * separately for each channel that can possibly supported by 4965. - * 40 MHz wide (.11n HT40) channels are listed separately from 20 MHz - * (legacy) channels. - * - * See struct il4965_eeprom_channel for format, and struct il4965_eeprom - * for locations in EEPROM. - * - * 2) Factory txpower calibration information is provided separately for - * sub-bands of contiguous channels. 2.4GHz has just one sub-band, - * but 5 GHz has several sub-bands. - * - * In addition, per-band (2.4 and 5 Ghz) saturation txpowers are provided. - * - * See struct il4965_eeprom_calib_info (and the tree of structures - * contained within it) for format, and struct il4965_eeprom for - * locations in EEPROM. - * - * "Initialization alive" notification (see struct il4965_init_alive_resp) - * consists of: - * - * 1) Temperature calculation parameters. - * - * 2) Power supply voltage measurement. - * - * 3) Tx gain compensation to balance 2 transmitters for MIMO use. - * - * Statistics notifications deliver: - * - * 1) Current values for temperature param R4. - */ - -/** - * To calculate a txpower setting for a given desired target txpower, channel, - * modulation bit rate, and transmitter chain (4965 has 2 transmitters to - * support MIMO and transmit diversity), driver must do the following: - * - * 1) Compare desired txpower vs. (EEPROM) regulatory limit for this channel. - * Do not exceed regulatory limit; reduce target txpower if necessary. - * - * If setting up txpowers for MIMO rates (rate idxes 8-15, 24-31), - * 2 transmitters will be used simultaneously; driver must reduce the - * regulatory limit by 3 dB (half-power) for each transmitter, so the - * combined total output of the 2 transmitters is within regulatory limits. - * - * - * 2) Compare target txpower vs. (EEPROM) saturation txpower *reduced by - * backoff for this bit rate*. Do not exceed (saturation - backoff[rate]); - * reduce target txpower if necessary. - * - * Backoff values below are in 1/2 dB units (equivalent to steps in - * txpower gain tables): - * - * OFDM 6 - 36 MBit: 10 steps (5 dB) - * OFDM 48 MBit: 15 steps (7.5 dB) - * OFDM 54 MBit: 17 steps (8.5 dB) - * OFDM 60 MBit: 20 steps (10 dB) - * CCK all rates: 10 steps (5 dB) - * - * Backoff values apply to saturation txpower on a per-transmitter basis; - * when using MIMO (2 transmitters), each transmitter uses the same - * saturation level provided in EEPROM, and the same backoff values; - * no reduction (such as with regulatory txpower limits) is required. - * - * Saturation and Backoff values apply equally to 20 Mhz (legacy) channel - * widths and 40 Mhz (.11n HT40) channel widths; there is no separate - * factory measurement for ht40 channels. - * - * The result of this step is the final target txpower. The rest of - * the steps figure out the proper settings for the device to achieve - * that target txpower. - * - * - * 3) Determine (EEPROM) calibration sub band for the target channel, by - * comparing against first and last channels in each sub band - * (see struct il4965_eeprom_calib_subband_info). - * - * - * 4) Linearly interpolate (EEPROM) factory calibration measurement sets, - * referencing the 2 factory-measured (sample) channels within the sub band. - * - * Interpolation is based on difference between target channel's frequency - * and the sample channels' frequencies. Since channel numbers are based - * on frequency (5 MHz between each channel number), this is equivalent - * to interpolating based on channel number differences. - * - * Note that the sample channels may or may not be the channels at the - * edges of the sub band. The target channel may be "outside" of the - * span of the sampled channels. - * - * Driver may choose the pair (for 2 Tx chains) of measurements (see - * struct il4965_eeprom_calib_ch_info) for which the actual measured - * txpower comes closest to the desired txpower. Usually, though, - * the middle set of measurements is closest to the regulatory limits, - * and is therefore a good choice for all txpower calculations (this - * assumes that high accuracy is needed for maximizing legal txpower, - * while lower txpower configurations do not need as much accuracy). - * - * Driver should interpolate both members of the chosen measurement pair, - * i.e. for both Tx chains (radio transmitters), unless the driver knows - * that only one of the chains will be used (e.g. only one tx antenna - * connected, but this should be unusual). The rate scaling algorithm - * switches antennas to find best performance, so both Tx chains will - * be used (although only one at a time) even for non-MIMO transmissions. - * - * Driver should interpolate factory values for temperature, gain table - * idx, and actual power. The power amplifier detector values are - * not used by the driver. - * - * Sanity check: If the target channel happens to be one of the sample - * channels, the results should agree with the sample channel's - * measurements! - * - * - * 5) Find difference between desired txpower and (interpolated) - * factory-measured txpower. Using (interpolated) factory gain table idx - * (shown elsewhere) as a starting point, adjust this idx lower to - * increase txpower, or higher to decrease txpower, until the target - * txpower is reached. Each step in the gain table is 1/2 dB. - * - * For example, if factory measured txpower is 16 dBm, and target txpower - * is 13 dBm, add 6 steps to the factory gain idx to reduce txpower - * by 3 dB. - * - * - * 6) Find difference between current device temperature and (interpolated) - * factory-measured temperature for sub-band. Factory values are in - * degrees Celsius. To calculate current temperature, see comments for - * "4965 temperature calculation". - * - * If current temperature is higher than factory temperature, driver must - * increase gain (lower gain table idx), and vice verse. - * - * Temperature affects gain differently for different channels: - * - * 2.4 GHz all channels: 3.5 degrees per half-dB step - * 5 GHz channels 34-43: 4.5 degrees per half-dB step - * 5 GHz channels >= 44: 4.0 degrees per half-dB step - * - * NOTE: Temperature can increase rapidly when transmitting, especially - * with heavy traffic at high txpowers. Driver should update - * temperature calculations often under these conditions to - * maintain strong txpower in the face of rising temperature. - * - * - * 7) Find difference between current power supply voltage indicator - * (from "initialize alive") and factory-measured power supply voltage - * indicator (EEPROM). - * - * If the current voltage is higher (indicator is lower) than factory - * voltage, gain should be reduced (gain table idx increased) by: - * - * (eeprom - current) / 7 - * - * If the current voltage is lower (indicator is higher) than factory - * voltage, gain should be increased (gain table idx decreased) by: - * - * 2 * (current - eeprom) / 7 - * - * If number of idx steps in either direction turns out to be > 2, - * something is wrong ... just use 0. - * - * NOTE: Voltage compensation is independent of band/channel. - * - * NOTE: "Initialize" uCode measures current voltage, which is assumed - * to be constant after this initial measurement. Voltage - * compensation for txpower (number of steps in gain table) - * may be calculated once and used until the next uCode bootload. - * - * - * 8) If setting up txpowers for MIMO rates (rate idxes 8-15, 24-31), - * adjust txpower for each transmitter chain, so txpower is balanced - * between the two chains. There are 5 pairs of tx_atten[group][chain] - * values in "initialize alive", one pair for each of 5 channel ranges: - * - * Group 0: 5 GHz channel 34-43 - * Group 1: 5 GHz channel 44-70 - * Group 2: 5 GHz channel 71-124 - * Group 3: 5 GHz channel 125-200 - * Group 4: 2.4 GHz all channels - * - * Add the tx_atten[group][chain] value to the idx for the target chain. - * The values are signed, but are in pairs of 0 and a non-negative number, - * so as to reduce gain (if necessary) of the "hotter" channel. This - * avoids any need to double-check for regulatory compliance after - * this step. - * - * - * 9) If setting up for a CCK rate, lower the gain by adding a CCK compensation - * value to the idx: - * - * Hardware rev B: 9 steps (4.5 dB) - * Hardware rev C: 5 steps (2.5 dB) - * - * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG, - * bits [3:2], 1 = B, 2 = C. - * - * NOTE: This compensation is in addition to any saturation backoff that - * might have been applied in an earlier step. - * - * - * 10) Select the gain table, based on band (2.4 vs 5 GHz). - * - * Limit the adjusted idx to stay within the table! - * - * - * 11) Read gain table entries for DSP and radio gain, place into appropriate - * location(s) in command (struct il4965_txpowertable_cmd). - */ - -/** - * When MIMO is used (2 transmitters operating simultaneously), driver should - * limit each transmitter to deliver a max of 3 dB below the regulatory limit - * for the device. That is, use half power for each transmitter, so total - * txpower is within regulatory limits. - * - * The value "6" represents number of steps in gain table to reduce power 3 dB. - * Each step is 1/2 dB. - */ -#define IL_TX_POWER_MIMO_REGULATORY_COMPENSATION (6) - -/** - * CCK gain compensation. - * - * When calculating txpowers for CCK, after making sure that the target power - * is within regulatory and saturation limits, driver must additionally - * back off gain by adding these values to the gain table idx. - * - * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG, - * bits [3:2], 1 = B, 2 = C. - */ -#define IL_TX_POWER_CCK_COMPENSATION_B_STEP (9) -#define IL_TX_POWER_CCK_COMPENSATION_C_STEP (5) - -/* - * 4965 power supply voltage compensation for txpower - */ -#define TX_POWER_IL_VOLTAGE_CODES_PER_03V (7) - -/** - * Gain tables. - * - * The following tables contain pair of values for setting txpower, i.e. - * gain settings for the output of the device's digital signal processor (DSP), - * and for the analog gain structure of the transmitter. - * - * Each entry in the gain tables represents a step of 1/2 dB. Note that these - * are *relative* steps, not indications of absolute output power. Output - * power varies with temperature, voltage, and channel frequency, and also - * requires consideration of average power (to satisfy regulatory constraints), - * and peak power (to avoid distortion of the output signal). - * - * Each entry contains two values: - * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained - * linear value that multiplies the output of the digital signal processor, - * before being sent to the analog radio. - * 2) Radio gain. This sets the analog gain of the radio Tx path. - * It is a coarser setting, and behaves in a logarithmic (dB) fashion. - * - * EEPROM contains factory calibration data for txpower. This maps actual - * measured txpower levels to gain settings in the "well known" tables - * below ("well-known" means here that both factory calibration *and* the - * driver work with the same table). - * - * There are separate tables for 2.4 GHz and 5 GHz bands. The 5 GHz table - * has an extension (into negative idxes), in case the driver needs to - * boost power setting for high device temperatures (higher than would be - * present during factory calibration). A 5 Ghz EEPROM idx of "40" - * corresponds to the 49th entry in the table used by the driver. - */ -#define MIN_TX_GAIN_IDX (0) /* highest gain, lowest idx, 2.4 */ -#define MIN_TX_GAIN_IDX_52GHZ_EXT (-9) /* highest gain, lowest idx, 5 */ - -/** - * 2.4 GHz gain table - * - * Index Dsp gain Radio gain - * 0 110 0x3f (highest gain) - * 1 104 0x3f - * 2 98 0x3f - * 3 110 0x3e - * 4 104 0x3e - * 5 98 0x3e - * 6 110 0x3d - * 7 104 0x3d - * 8 98 0x3d - * 9 110 0x3c - * 10 104 0x3c - * 11 98 0x3c - * 12 110 0x3b - * 13 104 0x3b - * 14 98 0x3b - * 15 110 0x3a - * 16 104 0x3a - * 17 98 0x3a - * 18 110 0x39 - * 19 104 0x39 - * 20 98 0x39 - * 21 110 0x38 - * 22 104 0x38 - * 23 98 0x38 - * 24 110 0x37 - * 25 104 0x37 - * 26 98 0x37 - * 27 110 0x36 - * 28 104 0x36 - * 29 98 0x36 - * 30 110 0x35 - * 31 104 0x35 - * 32 98 0x35 - * 33 110 0x34 - * 34 104 0x34 - * 35 98 0x34 - * 36 110 0x33 - * 37 104 0x33 - * 38 98 0x33 - * 39 110 0x32 - * 40 104 0x32 - * 41 98 0x32 - * 42 110 0x31 - * 43 104 0x31 - * 44 98 0x31 - * 45 110 0x30 - * 46 104 0x30 - * 47 98 0x30 - * 48 110 0x6 - * 49 104 0x6 - * 50 98 0x6 - * 51 110 0x5 - * 52 104 0x5 - * 53 98 0x5 - * 54 110 0x4 - * 55 104 0x4 - * 56 98 0x4 - * 57 110 0x3 - * 58 104 0x3 - * 59 98 0x3 - * 60 110 0x2 - * 61 104 0x2 - * 62 98 0x2 - * 63 110 0x1 - * 64 104 0x1 - * 65 98 0x1 - * 66 110 0x0 - * 67 104 0x0 - * 68 98 0x0 - * 69 97 0 - * 70 96 0 - * 71 95 0 - * 72 94 0 - * 73 93 0 - * 74 92 0 - * 75 91 0 - * 76 90 0 - * 77 89 0 - * 78 88 0 - * 79 87 0 - * 80 86 0 - * 81 85 0 - * 82 84 0 - * 83 83 0 - * 84 82 0 - * 85 81 0 - * 86 80 0 - * 87 79 0 - * 88 78 0 - * 89 77 0 - * 90 76 0 - * 91 75 0 - * 92 74 0 - * 93 73 0 - * 94 72 0 - * 95 71 0 - * 96 70 0 - * 97 69 0 - * 98 68 0 - */ - -/** - * 5 GHz gain table - * - * Index Dsp gain Radio gain - * -9 123 0x3F (highest gain) - * -8 117 0x3F - * -7 110 0x3F - * -6 104 0x3F - * -5 98 0x3F - * -4 110 0x3E - * -3 104 0x3E - * -2 98 0x3E - * -1 110 0x3D - * 0 104 0x3D - * 1 98 0x3D - * 2 110 0x3C - * 3 104 0x3C - * 4 98 0x3C - * 5 110 0x3B - * 6 104 0x3B - * 7 98 0x3B - * 8 110 0x3A - * 9 104 0x3A - * 10 98 0x3A - * 11 110 0x39 - * 12 104 0x39 - * 13 98 0x39 - * 14 110 0x38 - * 15 104 0x38 - * 16 98 0x38 - * 17 110 0x37 - * 18 104 0x37 - * 19 98 0x37 - * 20 110 0x36 - * 21 104 0x36 - * 22 98 0x36 - * 23 110 0x35 - * 24 104 0x35 - * 25 98 0x35 - * 26 110 0x34 - * 27 104 0x34 - * 28 98 0x34 - * 29 110 0x33 - * 30 104 0x33 - * 31 98 0x33 - * 32 110 0x32 - * 33 104 0x32 - * 34 98 0x32 - * 35 110 0x31 - * 36 104 0x31 - * 37 98 0x31 - * 38 110 0x30 - * 39 104 0x30 - * 40 98 0x30 - * 41 110 0x25 - * 42 104 0x25 - * 43 98 0x25 - * 44 110 0x24 - * 45 104 0x24 - * 46 98 0x24 - * 47 110 0x23 - * 48 104 0x23 - * 49 98 0x23 - * 50 110 0x22 - * 51 104 0x18 - * 52 98 0x18 - * 53 110 0x17 - * 54 104 0x17 - * 55 98 0x17 - * 56 110 0x16 - * 57 104 0x16 - * 58 98 0x16 - * 59 110 0x15 - * 60 104 0x15 - * 61 98 0x15 - * 62 110 0x14 - * 63 104 0x14 - * 64 98 0x14 - * 65 110 0x13 - * 66 104 0x13 - * 67 98 0x13 - * 68 110 0x12 - * 69 104 0x08 - * 70 98 0x08 - * 71 110 0x07 - * 72 104 0x07 - * 73 98 0x07 - * 74 110 0x06 - * 75 104 0x06 - * 76 98 0x06 - * 77 110 0x05 - * 78 104 0x05 - * 79 98 0x05 - * 80 110 0x04 - * 81 104 0x04 - * 82 98 0x04 - * 83 110 0x03 - * 84 104 0x03 - * 85 98 0x03 - * 86 110 0x02 - * 87 104 0x02 - * 88 98 0x02 - * 89 110 0x01 - * 90 104 0x01 - * 91 98 0x01 - * 92 110 0x00 - * 93 104 0x00 - * 94 98 0x00 - * 95 93 0x00 - * 96 88 0x00 - * 97 83 0x00 - * 98 78 0x00 - */ - -/** - * Sanity checks and default values for EEPROM regulatory levels. - * If EEPROM values fall outside MIN/MAX range, use default values. - * - * Regulatory limits refer to the maximum average txpower allowed by - * regulatory agencies in the geographies in which the device is meant - * to be operated. These limits are SKU-specific (i.e. geography-specific), - * and channel-specific; each channel has an individual regulatory limit - * listed in the EEPROM. - * - * Units are in half-dBm (i.e. "34" means 17 dBm). - */ -#define IL_TX_POWER_DEFAULT_REGULATORY_24 (34) -#define IL_TX_POWER_DEFAULT_REGULATORY_52 (34) -#define IL_TX_POWER_REGULATORY_MIN (0) -#define IL_TX_POWER_REGULATORY_MAX (34) - -/** - * Sanity checks and default values for EEPROM saturation levels. - * If EEPROM values fall outside MIN/MAX range, use default values. - * - * Saturation is the highest level that the output power amplifier can produce - * without significant clipping distortion. This is a "peak" power level. - * Different types of modulation (i.e. various "rates", and OFDM vs. CCK) - * require differing amounts of backoff, relative to their average power output, - * in order to avoid clipping distortion. - * - * Driver must make sure that it is violating neither the saturation limit, - * nor the regulatory limit, when calculating Tx power settings for various - * rates. - * - * Units are in half-dBm (i.e. "38" means 19 dBm). - */ -#define IL_TX_POWER_DEFAULT_SATURATION_24 (38) -#define IL_TX_POWER_DEFAULT_SATURATION_52 (38) -#define IL_TX_POWER_SATURATION_MIN (20) -#define IL_TX_POWER_SATURATION_MAX (50) - -/** - * Channel groups used for Tx Attenuation calibration (MIMO tx channel balance) - * and thermal Txpower calibration. - * - * When calculating txpower, driver must compensate for current device - * temperature; higher temperature requires higher gain. Driver must calculate - * current temperature (see "4965 temperature calculation"), then compare vs. - * factory calibration temperature in EEPROM; if current temperature is higher - * than factory temperature, driver must *increase* gain by proportions shown - * in table below. If current temperature is lower than factory, driver must - * *decrease* gain. - * - * Different frequency ranges require different compensation, as shown below. - */ -/* Group 0, 5.2 GHz ch 34-43: 4.5 degrees per 1/2 dB. */ -#define CALIB_IL_TX_ATTEN_GR1_FCH 34 -#define CALIB_IL_TX_ATTEN_GR1_LCH 43 - -/* Group 1, 5.3 GHz ch 44-70: 4.0 degrees per 1/2 dB. */ -#define CALIB_IL_TX_ATTEN_GR2_FCH 44 -#define CALIB_IL_TX_ATTEN_GR2_LCH 70 - -/* Group 2, 5.5 GHz ch 71-124: 4.0 degrees per 1/2 dB. */ -#define CALIB_IL_TX_ATTEN_GR3_FCH 71 -#define CALIB_IL_TX_ATTEN_GR3_LCH 124 - -/* Group 3, 5.7 GHz ch 125-200: 4.0 degrees per 1/2 dB. */ -#define CALIB_IL_TX_ATTEN_GR4_FCH 125 -#define CALIB_IL_TX_ATTEN_GR4_LCH 200 - -/* Group 4, 2.4 GHz all channels: 3.5 degrees per 1/2 dB. */ -#define CALIB_IL_TX_ATTEN_GR5_FCH 1 -#define CALIB_IL_TX_ATTEN_GR5_LCH 20 - -enum { - CALIB_CH_GROUP_1 = 0, - CALIB_CH_GROUP_2 = 1, - CALIB_CH_GROUP_3 = 2, - CALIB_CH_GROUP_4 = 3, - CALIB_CH_GROUP_5 = 4, - CALIB_CH_GROUP_MAX -}; - -/********************* END TXPOWER *****************************************/ - -/** - * Tx/Rx Queues - * - * Most communication between driver and 4965 is via queues of data buffers. - * For example, all commands that the driver issues to device's embedded - * controller (uCode) are via the command queue (one of the Tx queues). All - * uCode command responses/replies/notifications, including Rx frames, are - * conveyed from uCode to driver via the Rx queue. - * - * Most support for these queues, including handshake support, resides in - * structures in host DRAM, shared between the driver and the device. When - * allocating this memory, the driver must make sure that data written by - * the host CPU updates DRAM immediately (and does not get "stuck" in CPU's - * cache memory), so DRAM and cache are consistent, and the device can - * immediately see changes made by the driver. - * - * 4965 supports up to 16 DRAM-based Tx queues, and services these queues via - * up to 7 DMA channels (FIFOs). Each Tx queue is supported by a circular array - * in DRAM containing 256 Transmit Frame Descriptors (TFDs). - */ -#define IL49_NUM_FIFOS 7 -#define IL49_CMD_FIFO_NUM 4 -#define IL49_NUM_QUEUES 16 -#define IL49_NUM_AMPDU_QUEUES 8 - -/** - * struct il4965_schedq_bc_tbl - * - * Byte Count table - * - * Each Tx queue uses a byte-count table containing 320 entries: - * one 16-bit entry for each of 256 TFDs, plus an additional 64 entries that - * duplicate the first 64 entries (to avoid wrap-around within a Tx win; - * max Tx win is 64 TFDs). - * - * When driver sets up a new TFD, it must also enter the total byte count - * of the frame to be transmitted into the corresponding entry in the byte - * count table for the chosen Tx queue. If the TFD idx is 0-63, the driver - * must duplicate the byte count entry in corresponding idx 256-319. - * - * padding puts each byte count table on a 1024-byte boundary; - * 4965 assumes tables are separated by 1024 bytes. - */ -struct il4965_scd_bc_tbl { - __le16 tfd_offset[TFD_QUEUE_BC_SIZE]; - u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)]; -} __packed; - -#define IL4965_RTC_INST_LOWER_BOUND (0x000000) - -/* RSSI to dBm */ -#define IL4965_RSSI_OFFSET 44 - -/* PCI registers */ -#define PCI_CFG_RETRY_TIMEOUT 0x041 - -/* PCI register values */ -#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01 -#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02 - -#define IL4965_DEFAULT_TX_RETRY 15 - -/* EEPROM */ -#define IL4965_FIRST_AMPDU_QUEUE 10 - -/* Calibration */ -void il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp); -void il4965_sensitivity_calibration(struct il_priv *il, void *resp); -void il4965_init_sensitivity(struct il_priv *il); -void il4965_reset_run_time_calib(struct il_priv *il); -void il4965_calib_free_results(struct il_priv *il); - -/* Debug */ -#ifdef CONFIG_IWLEGACY_DEBUGFS -ssize_t il4965_ucode_rx_stats_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos); -ssize_t il4965_ucode_tx_stats_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos); -ssize_t il4965_ucode_general_stats_read(struct file *file, - char __user *user_buf, size_t count, - loff_t *ppos); -#endif - -/****************************/ -/* Flow Handler Definitions */ -/****************************/ - -/** - * This I/O area is directly read/writable by driver (e.g. Linux uses writel()) - * Addresses are offsets from device's PCI hardware base address. - */ -#define FH49_MEM_LOWER_BOUND (0x1000) -#define FH49_MEM_UPPER_BOUND (0x2000) - -/** - * Keep-Warm (KW) buffer base address. - * - * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the - * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency - * DRAM access when 4965 is Txing or Rxing. The dummy accesses prevent host - * from going into a power-savings mode that would cause higher DRAM latency, - * and possible data over/under-runs, before all Tx/Rx is complete. - * - * Driver loads FH49_KW_MEM_ADDR_REG with the physical address (bits 35:4) - * of the buffer, which must be 4K aligned. Once this is set up, the 4965 - * automatically invokes keep-warm accesses when normal accesses might not - * be sufficient to maintain fast DRAM response. - * - * Bit fields: - * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned - */ -#define FH49_KW_MEM_ADDR_REG (FH49_MEM_LOWER_BOUND + 0x97C) - -/** - * TFD Circular Buffers Base (CBBC) addresses - * - * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident - * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs) - * (see struct il_tfd_frame). These 16 pointer registers are offset by 0x04 - * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte - * aligned (address bits 0-7 must be 0). - * - * Bit fields in each pointer register: - * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned - */ -#define FH49_MEM_CBBC_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0x9D0) -#define FH49_MEM_CBBC_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xA10) - -/* Find TFD CB base pointer for given queue (range 0-15). */ -#define FH49_MEM_CBBC_QUEUE(x) (FH49_MEM_CBBC_LOWER_BOUND + (x) * 0x4) - -/** - * Rx SRAM Control and Status Registers (RSCSR) - * - * These registers provide handshake between driver and 4965 for the Rx queue - * (this queue handles *all* command responses, notifications, Rx data, etc. - * sent from 4965 uCode to host driver). Unlike Tx, there is only one Rx - * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can - * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer - * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1 - * mapping between RBDs and RBs. - * - * Driver must allocate host DRAM memory for the following, and set the - * physical address of each into 4965 registers: - * - * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256 - * entries (although any power of 2, up to 4096, is selectable by driver). - * Each entry (1 dword) points to a receive buffer (RB) of consistent size - * (typically 4K, although 8K or 16K are also selectable by driver). - * Driver sets up RB size and number of RBDs in the CB via Rx config - * register FH49_MEM_RCSR_CHNL0_CONFIG_REG. - * - * Bit fields within one RBD: - * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned - * - * Driver sets physical address [35:8] of base of RBD circular buffer - * into FH49_RSCSR_CHNL0_RBDCB_BASE_REG [27:0]. - * - * 2) Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers - * (RBs) have been filled, via a "write pointer", actually the idx of - * the RB's corresponding RBD within the circular buffer. Driver sets - * physical address [35:4] into FH49_RSCSR_CHNL0_STTS_WPTR_REG [31:0]. - * - * Bit fields in lower dword of Rx status buffer (upper dword not used - * by driver; see struct il4965_shared, val0): - * 31-12: Not used by driver - * 11- 0: Index of last filled Rx buffer descriptor - * (4965 writes, driver reads this value) - * - * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must - * enter pointers to these RBs into contiguous RBD circular buffer entries, - * and update the 4965's "write" idx register, - * FH49_RSCSR_CHNL0_RBDCB_WPTR_REG. - * - * This "write" idx corresponds to the *next* RBD that the driver will make - * available, i.e. one RBD past the tail of the ready-to-fill RBDs within - * the circular buffer. This value should initially be 0 (before preparing any - * RBs), should be 8 after preparing the first 8 RBs (for example), and must - * wrap back to 0 at the end of the circular buffer (but don't wrap before - * "read" idx has advanced past 1! See below). - * NOTE: 4965 EXPECTS THE WRITE IDX TO BE INCREMENTED IN MULTIPLES OF 8. - * - * As the 4965 fills RBs (referenced from contiguous RBDs within the circular - * buffer), it updates the Rx status buffer in host DRAM, 2) described above, - * to tell the driver the idx of the latest filled RBD. The driver must - * read this "read" idx from DRAM after receiving an Rx interrupt from 4965. - * - * The driver must also internally keep track of a third idx, which is the - * next RBD to process. When receiving an Rx interrupt, driver should process - * all filled but unprocessed RBs up to, but not including, the RB - * corresponding to the "read" idx. For example, if "read" idx becomes "1", - * driver may process the RB pointed to by RBD 0. Depending on volume of - * traffic, there may be many RBs to process. - * - * If read idx == write idx, 4965 thinks there is no room to put new data. - * Due to this, the maximum number of filled RBs is 255, instead of 256. To - * be safe, make sure that there is a gap of at least 2 RBDs between "write" - * and "read" idxes; that is, make sure that there are no more than 254 - * buffers waiting to be filled. - */ -#define FH49_MEM_RSCSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xBC0) -#define FH49_MEM_RSCSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xC00) -#define FH49_MEM_RSCSR_CHNL0 (FH49_MEM_RSCSR_LOWER_BOUND) - -/** - * Physical base address of 8-byte Rx Status buffer. - * Bit fields: - * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned. - */ -#define FH49_RSCSR_CHNL0_STTS_WPTR_REG (FH49_MEM_RSCSR_CHNL0) - -/** - * Physical base address of Rx Buffer Descriptor Circular Buffer. - * Bit fields: - * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned. - */ -#define FH49_RSCSR_CHNL0_RBDCB_BASE_REG (FH49_MEM_RSCSR_CHNL0 + 0x004) - -/** - * Rx write pointer (idx, really!). - * Bit fields: - * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1. - * NOTE: For 256-entry circular buffer, use only bits [7:0]. - */ -#define FH49_RSCSR_CHNL0_RBDCB_WPTR_REG (FH49_MEM_RSCSR_CHNL0 + 0x008) -#define FH49_RSCSR_CHNL0_WPTR (FH49_RSCSR_CHNL0_RBDCB_WPTR_REG) - -/** - * Rx Config/Status Registers (RCSR) - * Rx Config Reg for channel 0 (only channel used) - * - * Driver must initialize FH49_MEM_RCSR_CHNL0_CONFIG_REG as follows for - * normal operation (see bit fields). - * - * Clearing FH49_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA. - * Driver should poll FH49_MEM_RSSR_RX_STATUS_REG for - * FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing. - * - * Bit fields: - * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame, - * '10' operate normally - * 29-24: reserved - * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal), - * min "5" for 32 RBDs, max "12" for 4096 RBDs. - * 19-18: reserved - * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K, - * '10' 12K, '11' 16K. - * 15-14: reserved - * 13-12: IRQ destination; '00' none, '01' host driver (normal operation) - * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec) - * typical value 0x10 (about 1/2 msec) - * 3- 0: reserved - */ -#define FH49_MEM_RCSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xC00) -#define FH49_MEM_RCSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xCC0) -#define FH49_MEM_RCSR_CHNL0 (FH49_MEM_RCSR_LOWER_BOUND) - -#define FH49_MEM_RCSR_CHNL0_CONFIG_REG (FH49_MEM_RCSR_CHNL0) - -#define FH49_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */ -#define FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */ -#define FH49_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */ -#define FH49_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK (0x00030000) /* bits 16-17 */ -#define FH49_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */ -#define FH49_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31 */ - -#define FH49_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20) -#define FH49_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4) -#define RX_RB_TIMEOUT (0x10) - -#define FH49_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000) -#define FH49_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000) -#define FH49_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000) - -#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000) -#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000) -#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000) -#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000) - -#define FH49_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY (0x00000004) -#define FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000) -#define FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000) - -/** - * Rx Shared Status Registers (RSSR) - * - * After stopping Rx DMA channel (writing 0 to - * FH49_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll - * FH49_MEM_RSSR_RX_STATUS_REG until Rx channel is idle. - * - * Bit fields: - * 24: 1 = Channel 0 is idle - * - * FH49_MEM_RSSR_SHARED_CTRL_REG and FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV - * contain default values that should not be altered by the driver. - */ -#define FH49_MEM_RSSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xC40) -#define FH49_MEM_RSSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xD00) - -#define FH49_MEM_RSSR_SHARED_CTRL_REG (FH49_MEM_RSSR_LOWER_BOUND) -#define FH49_MEM_RSSR_RX_STATUS_REG (FH49_MEM_RSSR_LOWER_BOUND + 0x004) -#define FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\ - (FH49_MEM_RSSR_LOWER_BOUND + 0x008) - -#define FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000) - -#define FH49_MEM_TFDIB_REG1_ADDR_BITSHIFT 28 - -/* TFDB Area - TFDs buffer table */ -#define FH49_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF) -#define FH49_TFDIB_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0x900) -#define FH49_TFDIB_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0x958) -#define FH49_TFDIB_CTRL0_REG(_chnl) (FH49_TFDIB_LOWER_BOUND + 0x8 * (_chnl)) -#define FH49_TFDIB_CTRL1_REG(_chnl) (FH49_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4) - -/** - * Transmit DMA Channel Control/Status Registers (TCSR) - * - * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels - * supported in hardware (don't confuse these with the 16 Tx queues in DRAM, - * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes. - * - * To use a Tx DMA channel, driver must initialize its - * FH49_TCSR_CHNL_TX_CONFIG_REG(chnl) with: - * - * FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | - * FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL - * - * All other bits should be 0. - * - * Bit fields: - * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame, - * '10' operate normally - * 29- 4: Reserved, set to "0" - * 3: Enable internal DMA requests (1, normal operation), disable (0) - * 2- 0: Reserved, set to "0" - */ -#define FH49_TCSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xD00) -#define FH49_TCSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xE60) - -/* Find Control/Status reg for given Tx DMA/FIFO channel */ -#define FH49_TCSR_CHNL_NUM (7) -#define FH50_TCSR_CHNL_NUM (8) - -/* TCSR: tx_config register values */ -#define FH49_TCSR_CHNL_TX_CONFIG_REG(_chnl) \ - (FH49_TCSR_LOWER_BOUND + 0x20 * (_chnl)) -#define FH49_TCSR_CHNL_TX_CREDIT_REG(_chnl) \ - (FH49_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4) -#define FH49_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \ - (FH49_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8) - -#define FH49_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000) -#define FH49_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV (0x00000001) - -#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE (0x00000000) -#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE (0x00000008) - -#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000) -#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000) -#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000) - -#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000) -#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000) -#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000) - -#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000) -#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000) -#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000) - -#define FH49_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000) -#define FH49_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000) -#define FH49_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003) - -#define FH49_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20) -#define FH49_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12) - -/** - * Tx Shared Status Registers (TSSR) - * - * After stopping Tx DMA channel (writing 0 to - * FH49_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll - * FH49_TSSR_TX_STATUS_REG until selected Tx channel is idle - * (channel's buffers empty | no pending requests). - * - * Bit fields: - * 31-24: 1 = Channel buffers empty (channel 7:0) - * 23-16: 1 = No pending requests (channel 7:0) - */ -#define FH49_TSSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xEA0) -#define FH49_TSSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xEC0) - -#define FH49_TSSR_TX_STATUS_REG (FH49_TSSR_LOWER_BOUND + 0x010) - -/** - * Bit fields for TSSR(Tx Shared Status & Control) error status register: - * 31: Indicates an address error when accessed to internal memory - * uCode/driver must write "1" in order to clear this flag - * 30: Indicates that Host did not send the expected number of dwords to FH - * uCode/driver must write "1" in order to clear this flag - * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA - * command was received from the scheduler while the TRB was already full - * with previous command - * uCode/driver must write "1" in order to clear this flag - * 7-0: Each status bit indicates a channel's TxCredit error. When an error - * bit is set, it indicates that the FH has received a full indication - * from the RTC TxFIFO and the current value of the TxCredit counter was - * not equal to zero. This mean that the credit mechanism was not - * synchronized to the TxFIFO status - * uCode/driver must write "1" in order to clear this flag - */ -#define FH49_TSSR_TX_ERROR_REG (FH49_TSSR_LOWER_BOUND + 0x018) - -#define FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16) - -/* Tx service channels */ -#define FH49_SRVC_CHNL (9) -#define FH49_SRVC_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0x9C8) -#define FH49_SRVC_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0x9D0) -#define FH49_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \ - (FH49_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4) - -#define FH49_TX_CHICKEN_BITS_REG (FH49_MEM_LOWER_BOUND + 0xE98) -/* Instruct FH to increment the retry count of a packet when - * it is brought from the memory to TX-FIFO - */ -#define FH49_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002) - -/* Keep Warm Size */ -#define IL_KW_SIZE 0x1000 /* 4k */ - -#endif /* __il_4965_h__ */ diff --git a/trunk/drivers/net/wireless/iwlegacy/Kconfig b/trunk/drivers/net/wireless/iwlegacy/Kconfig index 05bd375cb845..aef65cd47661 100644 --- a/trunk/drivers/net/wireless/iwlegacy/Kconfig +++ b/trunk/drivers/net/wireless/iwlegacy/Kconfig @@ -1,4 +1,4 @@ -config IWLEGACY +config IWLWIFI_LEGACY tristate select FW_LOADER select NEW_LEDS @@ -7,13 +7,13 @@ config IWLEGACY select MAC80211_LEDS menu "Debugging Options" - depends on IWLEGACY + depends on IWLWIFI_LEGACY -config IWLEGACY_DEBUG - bool "Enable full debugging output in iwlegacy (iwl 3945/4965) drivers" - depends on IWLEGACY +config IWLWIFI_LEGACY_DEBUG + bool "Enable full debugging output in 4965 and 3945 drivers" + depends on IWLWIFI_LEGACY ---help--- - This option will enable debug tracing output for the iwlegacy + This option will enable debug tracing output for the iwlwifilegacy drivers. This will result in the kernel module being ~100k larger. You can @@ -29,26 +29,43 @@ config IWLEGACY_DEBUG % echo 0x43fff > /sys/class/net/wlan0/device/debug_level You can find the list of debug mask values in: - drivers/net/wireless/iwlegacy/common.h + drivers/net/wireless/iwlwifilegacy/iwl-debug.h If this is your first time using this driver, you should say Y here as the debug information can assist others in helping you resolve any problems you may encounter. -config IWLEGACY_DEBUGFS - bool "iwlegacy (iwl 3945/4965) debugfs support" - depends on IWLEGACY && MAC80211_DEBUGFS +config IWLWIFI_LEGACY_DEBUGFS + bool "4965 and 3945 debugfs support" + depends on IWLWIFI_LEGACY && MAC80211_DEBUGFS ---help--- - Enable creation of debugfs files for the iwlegacy drivers. This + Enable creation of debugfs files for the iwlwifilegacy drivers. This is a low-impact option that allows getting insight into the driver's state at runtime. +config IWLWIFI_LEGACY_DEVICE_TRACING + bool "iwlwifilegacy legacy device access tracing" + depends on IWLWIFI_LEGACY + depends on EVENT_TRACING + help + Say Y here to trace all commands, including TX frames and IO + accesses, sent to the device. If you say yes, iwlwifilegacy will + register with the ftrace framework for event tracing and dump + all this information to the ringbuffer, you may need to + increase the ringbuffer size. See the ftrace documentation + for more information. + + When tracing is not enabled, this option still has some + (though rather small) overhead. + + If unsure, say Y so we can help you better when problems + occur. endmenu config IWL4965 tristate "Intel Wireless WiFi 4965AGN (iwl4965)" depends on PCI && MAC80211 - select IWLEGACY + select IWLWIFI_LEGACY ---help--- This option enables support for @@ -76,7 +93,7 @@ config IWL4965 config IWL3945 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)" depends on PCI && MAC80211 - select IWLEGACY + select IWLWIFI_LEGACY ---help--- Select to build the driver supporting the: diff --git a/trunk/drivers/net/wireless/iwlegacy/Makefile b/trunk/drivers/net/wireless/iwlegacy/Makefile index c985a01a0731..d56aeb38c211 100644 --- a/trunk/drivers/net/wireless/iwlegacy/Makefile +++ b/trunk/drivers/net/wireless/iwlegacy/Makefile @@ -1,17 +1,25 @@ -obj-$(CONFIG_IWLEGACY) += iwlegacy.o -iwlegacy-objs := common.o -iwlegacy-$(CONFIG_IWLEGACY_DEBUGFS) += debug.o +obj-$(CONFIG_IWLWIFI_LEGACY) += iwl-legacy.o +iwl-legacy-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o +iwl-legacy-objs += iwl-rx.o iwl-tx.o iwl-sta.o +iwl-legacy-objs += iwl-scan.o iwl-led.o +iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-debugfs.o +iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) += iwl-devtrace.o -iwlegacy-objs += $(iwlegacy-m) +iwl-legacy-objs += $(iwl-legacy-m) + +CFLAGS_iwl-devtrace.o := -I$(src) # 4965 obj-$(CONFIG_IWL4965) += iwl4965.o -iwl4965-objs := 4965.o 4965-mac.o 4965-rs.o 4965-calib.o -iwl4965-$(CONFIG_IWLEGACY_DEBUGFS) += 4965-debug.o +iwl4965-objs := iwl-4965.o iwl4965-base.o iwl-4965-rs.o iwl-4965-led.o +iwl4965-objs += iwl-4965-ucode.o iwl-4965-tx.o +iwl4965-objs += iwl-4965-lib.o iwl-4965-rx.o iwl-4965-calib.o +iwl4965-objs += iwl-4965-sta.o iwl-4965-eeprom.o +iwl4965-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-4965-debugfs.o # 3945 obj-$(CONFIG_IWL3945) += iwl3945.o -iwl3945-objs := 3945-mac.o 3945.o 3945-rs.o -iwl3945-$(CONFIG_IWLEGACY_DEBUGFS) += 3945-debug.o +iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o +iwl3945-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-3945-debugfs.o ccflags-y += -D__CHECK_ENDIAN__ diff --git a/trunk/drivers/net/wireless/iwlegacy/common.c b/trunk/drivers/net/wireless/iwlegacy/common.c deleted file mode 100644 index 36454d0bbeed..000000000000 --- a/trunk/drivers/net/wireless/iwlegacy/common.c +++ /dev/null @@ -1,5867 +0,0 @@ -/****************************************************************************** - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - *****************************************************************************/ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "common.h" - -int -_il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout) -{ - const int interval = 10; /* microseconds */ - int t = 0; - - do { - if ((_il_rd(il, addr) & mask) == (bits & mask)) - return t; - udelay(interval); - t += interval; - } while (t < timeout); - - return -ETIMEDOUT; -} -EXPORT_SYMBOL(_il_poll_bit); - -void -il_set_bit(struct il_priv *p, u32 r, u32 m) -{ - unsigned long reg_flags; - - spin_lock_irqsave(&p->reg_lock, reg_flags); - _il_set_bit(p, r, m); - spin_unlock_irqrestore(&p->reg_lock, reg_flags); -} -EXPORT_SYMBOL(il_set_bit); - -void -il_clear_bit(struct il_priv *p, u32 r, u32 m) -{ - unsigned long reg_flags; - - spin_lock_irqsave(&p->reg_lock, reg_flags); - _il_clear_bit(p, r, m); - spin_unlock_irqrestore(&p->reg_lock, reg_flags); -} -EXPORT_SYMBOL(il_clear_bit); - -int -_il_grab_nic_access(struct il_priv *il) -{ - int ret; - u32 val; - - /* this bit wakes up the NIC */ - _il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - - /* - * These bits say the device is running, and should keep running for - * at least a short while (at least as long as MAC_ACCESS_REQ stays 1), - * but they do not indicate that embedded SRAM is restored yet; - * 3945 and 4965 have volatile SRAM, and must save/restore contents - * to/from host DRAM when sleeping/waking for power-saving. - * Each direction takes approximately 1/4 millisecond; with this - * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a - * series of register accesses are expected (e.g. reading Event Log), - * to keep device from sleeping. - * - * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that - * SRAM is okay/restored. We don't check that here because this call - * is just for hardware register access; but GP1 MAC_SLEEP check is a - * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log). - * - */ - ret = - _il_poll_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, - (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | - CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000); - if (ret < 0) { - val = _il_rd(il, CSR_GP_CNTRL); - IL_ERR("MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val); - _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI); - return -EIO; - } - - return 0; -} -EXPORT_SYMBOL_GPL(_il_grab_nic_access); - -int -il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout) -{ - const int interval = 10; /* microseconds */ - int t = 0; - - do { - if ((il_rd(il, addr) & mask) == mask) - return t; - udelay(interval); - t += interval; - } while (t < timeout); - - return -ETIMEDOUT; -} -EXPORT_SYMBOL(il_poll_bit); - -u32 -il_rd_prph(struct il_priv *il, u32 reg) -{ - unsigned long reg_flags; - u32 val; - - spin_lock_irqsave(&il->reg_lock, reg_flags); - _il_grab_nic_access(il); - val = _il_rd_prph(il, reg); - _il_release_nic_access(il); - spin_unlock_irqrestore(&il->reg_lock, reg_flags); - return val; -} -EXPORT_SYMBOL(il_rd_prph); - -void -il_wr_prph(struct il_priv *il, u32 addr, u32 val) -{ - unsigned long reg_flags; - - spin_lock_irqsave(&il->reg_lock, reg_flags); - if (!_il_grab_nic_access(il)) { - _il_wr_prph(il, addr, val); - _il_release_nic_access(il); - } - spin_unlock_irqrestore(&il->reg_lock, reg_flags); -} -EXPORT_SYMBOL(il_wr_prph); - -u32 -il_read_targ_mem(struct il_priv *il, u32 addr) -{ - unsigned long reg_flags; - u32 value; - - spin_lock_irqsave(&il->reg_lock, reg_flags); - _il_grab_nic_access(il); - - _il_wr(il, HBUS_TARG_MEM_RADDR, addr); - rmb(); - value = _il_rd(il, HBUS_TARG_MEM_RDAT); - - _il_release_nic_access(il); - spin_unlock_irqrestore(&il->reg_lock, reg_flags); - return value; -} -EXPORT_SYMBOL(il_read_targ_mem); - -void -il_write_targ_mem(struct il_priv *il, u32 addr, u32 val) -{ - unsigned long reg_flags; - - spin_lock_irqsave(&il->reg_lock, reg_flags); - if (!_il_grab_nic_access(il)) { - _il_wr(il, HBUS_TARG_MEM_WADDR, addr); - wmb(); - _il_wr(il, HBUS_TARG_MEM_WDAT, val); - _il_release_nic_access(il); - } - spin_unlock_irqrestore(&il->reg_lock, reg_flags); -} -EXPORT_SYMBOL(il_write_targ_mem); - -const char * -il_get_cmd_string(u8 cmd) -{ - switch (cmd) { - IL_CMD(N_ALIVE); - IL_CMD(N_ERROR); - IL_CMD(C_RXON); - IL_CMD(C_RXON_ASSOC); - IL_CMD(C_QOS_PARAM); - IL_CMD(C_RXON_TIMING); - IL_CMD(C_ADD_STA); - IL_CMD(C_REM_STA); - IL_CMD(C_WEPKEY); - IL_CMD(N_3945_RX); - IL_CMD(C_TX); - IL_CMD(C_RATE_SCALE); - IL_CMD(C_LEDS); - IL_CMD(C_TX_LINK_QUALITY_CMD); - IL_CMD(C_CHANNEL_SWITCH); - IL_CMD(N_CHANNEL_SWITCH); - IL_CMD(C_SPECTRUM_MEASUREMENT); - IL_CMD(N_SPECTRUM_MEASUREMENT); - IL_CMD(C_POWER_TBL); - IL_CMD(N_PM_SLEEP); - IL_CMD(N_PM_DEBUG_STATS); - IL_CMD(C_SCAN); - IL_CMD(C_SCAN_ABORT); - IL_CMD(N_SCAN_START); - IL_CMD(N_SCAN_RESULTS); - IL_CMD(N_SCAN_COMPLETE); - IL_CMD(N_BEACON); - IL_CMD(C_TX_BEACON); - IL_CMD(C_TX_PWR_TBL); - IL_CMD(C_BT_CONFIG); - IL_CMD(C_STATS); - IL_CMD(N_STATS); - IL_CMD(N_CARD_STATE); - IL_CMD(N_MISSED_BEACONS); - IL_CMD(C_CT_KILL_CONFIG); - IL_CMD(C_SENSITIVITY); - IL_CMD(C_PHY_CALIBRATION); - IL_CMD(N_RX_PHY); - IL_CMD(N_RX_MPDU); - IL_CMD(N_RX); - IL_CMD(N_COMPRESSED_BA); - default: - return "UNKNOWN"; - - } -} -EXPORT_SYMBOL(il_get_cmd_string); - -#define HOST_COMPLETE_TIMEOUT (HZ / 2) - -static void -il_generic_cmd_callback(struct il_priv *il, struct il_device_cmd *cmd, - struct il_rx_pkt *pkt) -{ - if (pkt->hdr.flags & IL_CMD_FAILED_MSK) { - IL_ERR("Bad return from %s (0x%08X)\n", - il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); - return; - } -#ifdef CONFIG_IWLEGACY_DEBUG - switch (cmd->hdr.cmd) { - case C_TX_LINK_QUALITY_CMD: - case C_SENSITIVITY: - D_HC_DUMP("back from %s (0x%08X)\n", - il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); - break; - default: - D_HC("back from %s (0x%08X)\n", il_get_cmd_string(cmd->hdr.cmd), - pkt->hdr.flags); - } -#endif -} - -static int -il_send_cmd_async(struct il_priv *il, struct il_host_cmd *cmd) -{ - int ret; - - BUG_ON(!(cmd->flags & CMD_ASYNC)); - - /* An asynchronous command can not expect an SKB to be set. */ - BUG_ON(cmd->flags & CMD_WANT_SKB); - - /* Assign a generic callback if one is not provided */ - if (!cmd->callback) - cmd->callback = il_generic_cmd_callback; - - if (test_bit(S_EXIT_PENDING, &il->status)) - return -EBUSY; - - ret = il_enqueue_hcmd(il, cmd); - if (ret < 0) { - IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n", - il_get_cmd_string(cmd->id), ret); - return ret; - } - return 0; -} - -int -il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd) -{ - int cmd_idx; - int ret; - - lockdep_assert_held(&il->mutex); - - BUG_ON(cmd->flags & CMD_ASYNC); - - /* A synchronous command can not have a callback set. */ - BUG_ON(cmd->callback); - - D_INFO("Attempting to send sync command %s\n", - il_get_cmd_string(cmd->id)); - - set_bit(S_HCMD_ACTIVE, &il->status); - D_INFO("Setting HCMD_ACTIVE for command %s\n", - il_get_cmd_string(cmd->id)); - - cmd_idx = il_enqueue_hcmd(il, cmd); - if (cmd_idx < 0) { - ret = cmd_idx; - IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n", - il_get_cmd_string(cmd->id), ret); - goto out; - } - - ret = wait_event_timeout(il->wait_command_queue, - !test_bit(S_HCMD_ACTIVE, &il->status), - HOST_COMPLETE_TIMEOUT); - if (!ret) { - if (test_bit(S_HCMD_ACTIVE, &il->status)) { - IL_ERR("Error sending %s: time out after %dms.\n", - il_get_cmd_string(cmd->id), - jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); - - clear_bit(S_HCMD_ACTIVE, &il->status); - D_INFO("Clearing HCMD_ACTIVE for command %s\n", - il_get_cmd_string(cmd->id)); - ret = -ETIMEDOUT; - goto cancel; - } - } - - if (test_bit(S_RF_KILL_HW, &il->status)) { - IL_ERR("Command %s aborted: RF KILL Switch\n", - il_get_cmd_string(cmd->id)); - ret = -ECANCELED; - goto fail; - } - if (test_bit(S_FW_ERROR, &il->status)) { - IL_ERR("Command %s failed: FW Error\n", - il_get_cmd_string(cmd->id)); - ret = -EIO; - goto fail; - } - if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) { - IL_ERR("Error: Response NULL in '%s'\n", - il_get_cmd_string(cmd->id)); - ret = -EIO; - goto cancel; - } - - ret = 0; - goto out; - -cancel: - if (cmd->flags & CMD_WANT_SKB) { - /* - * Cancel the CMD_WANT_SKB flag for the cmd in the - * TX cmd queue. Otherwise in case the cmd comes - * in later, it will possibly set an invalid - * address (cmd->meta.source). - */ - il->txq[il->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB; - } -fail: - if (cmd->reply_page) { - il_free_pages(il, cmd->reply_page); - cmd->reply_page = 0; - } -out: - return ret; -} -EXPORT_SYMBOL(il_send_cmd_sync); - -int -il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd) -{ - if (cmd->flags & CMD_ASYNC) - return il_send_cmd_async(il, cmd); - - return il_send_cmd_sync(il, cmd); -} -EXPORT_SYMBOL(il_send_cmd); - -int -il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len, const void *data) -{ - struct il_host_cmd cmd = { - .id = id, - .len = len, - .data = data, - }; - - return il_send_cmd_sync(il, &cmd); -} -EXPORT_SYMBOL(il_send_cmd_pdu); - -int -il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data, - void (*callback) (struct il_priv *il, - struct il_device_cmd *cmd, - struct il_rx_pkt *pkt)) -{ - struct il_host_cmd cmd = { - .id = id, - .len = len, - .data = data, - }; - - cmd.flags |= CMD_ASYNC; - cmd.callback = callback; - - return il_send_cmd_async(il, &cmd); -} -EXPORT_SYMBOL(il_send_cmd_pdu_async); - -/* default: IL_LED_BLINK(0) using blinking idx table */ -static int led_mode; -module_param(led_mode, int, S_IRUGO); -MODULE_PARM_DESC(led_mode, - "0=system default, " "1=On(RF On)/Off(RF Off), 2=blinking"); - -/* Throughput OFF time(ms) ON time (ms) - * >300 25 25 - * >200 to 300 40 40 - * >100 to 200 55 55 - * >70 to 100 65 65 - * >50 to 70 75 75 - * >20 to 50 85 85 - * >10 to 20 95 95 - * >5 to 10 110 110 - * >1 to 5 130 130 - * >0 to 1 167 167 - * <=0 SOLID ON - */ -static const struct ieee80211_tpt_blink il_blink[] = { - {.throughput = 0, .blink_time = 334}, - {.throughput = 1 * 1024 - 1, .blink_time = 260}, - {.throughput = 5 * 1024 - 1, .blink_time = 220}, - {.throughput = 10 * 1024 - 1, .blink_time = 190}, - {.throughput = 20 * 1024 - 1, .blink_time = 170}, - {.throughput = 50 * 1024 - 1, .blink_time = 150}, - {.throughput = 70 * 1024 - 1, .blink_time = 130}, - {.throughput = 100 * 1024 - 1, .blink_time = 110}, - {.throughput = 200 * 1024 - 1, .blink_time = 80}, - {.throughput = 300 * 1024 - 1, .blink_time = 50}, -}; - -/* - * Adjust led blink rate to compensate on a MAC Clock difference on every HW - * Led blink rate analysis showed an average deviation of 0% on 3945, - * 5% on 4965 HW. - * Need to compensate on the led on/off time per HW according to the deviation - * to achieve the desired led frequency - * The calculation is: (100-averageDeviation)/100 * blinkTime - * For code efficiency the calculation will be: - * compensation = (100 - averageDeviation) * 64 / 100 - * NewBlinkTime = (compensation * BlinkTime) / 64 - */ -static inline u8 -il_blink_compensation(struct il_priv *il, u8 time, u16 compensation) -{ - if (!compensation) { - IL_ERR("undefined blink compensation: " - "use pre-defined blinking time\n"); - return time; - } - - return (u8) ((time * compensation) >> 6); -} - -/* Set led pattern command */ -static int -il_led_cmd(struct il_priv *il, unsigned long on, unsigned long off) -{ - struct il_led_cmd led_cmd = { - .id = IL_LED_LINK, - .interval = IL_DEF_LED_INTRVL - }; - int ret; - - if (!test_bit(S_READY, &il->status)) - return -EBUSY; - - if (il->blink_on == on && il->blink_off == off) - return 0; - - if (off == 0) { - /* led is SOLID_ON */ - on = IL_LED_SOLID; - } - - D_LED("Led blink time compensation=%u\n", - il->cfg->base_params->led_compensation); - led_cmd.on = - il_blink_compensation(il, on, - il->cfg->base_params->led_compensation); - led_cmd.off = - il_blink_compensation(il, off, - il->cfg->base_params->led_compensation); - - ret = il->cfg->ops->led->cmd(il, &led_cmd); - if (!ret) { - il->blink_on = on; - il->blink_off = off; - } - return ret; -} - -static void -il_led_brightness_set(struct led_classdev *led_cdev, - enum led_brightness brightness) -{ - struct il_priv *il = container_of(led_cdev, struct il_priv, led); - unsigned long on = 0; - - if (brightness > 0) - on = IL_LED_SOLID; - - il_led_cmd(il, on, 0); -} - -static int -il_led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on, - unsigned long *delay_off) -{ - struct il_priv *il = container_of(led_cdev, struct il_priv, led); - - return il_led_cmd(il, *delay_on, *delay_off); -} - -void -il_leds_init(struct il_priv *il) -{ - int mode = led_mode; - int ret; - - if (mode == IL_LED_DEFAULT) - mode = il->cfg->led_mode; - - il->led.name = - kasprintf(GFP_KERNEL, "%s-led", wiphy_name(il->hw->wiphy)); - il->led.brightness_set = il_led_brightness_set; - il->led.blink_set = il_led_blink_set; - il->led.max_brightness = 1; - - switch (mode) { - case IL_LED_DEFAULT: - WARN_ON(1); - break; - case IL_LED_BLINK: - il->led.default_trigger = - ieee80211_create_tpt_led_trigger(il->hw, - IEEE80211_TPT_LEDTRIG_FL_CONNECTED, - il_blink, - ARRAY_SIZE(il_blink)); - break; - case IL_LED_RF_STATE: - il->led.default_trigger = ieee80211_get_radio_led_name(il->hw); - break; - } - - ret = led_classdev_register(&il->pci_dev->dev, &il->led); - if (ret) { - kfree(il->led.name); - return; - } - - il->led_registered = true; -} -EXPORT_SYMBOL(il_leds_init); - -void -il_leds_exit(struct il_priv *il) -{ - if (!il->led_registered) - return; - - led_classdev_unregister(&il->led); - kfree(il->led.name); -} -EXPORT_SYMBOL(il_leds_exit); - -/************************** EEPROM BANDS **************************** - * - * The il_eeprom_band definitions below provide the mapping from the - * EEPROM contents to the specific channel number supported for each - * band. - * - * For example, il_priv->eeprom.band_3_channels[4] from the band_3 - * definition below maps to physical channel 42 in the 5.2GHz spectrum. - * The specific geography and calibration information for that channel - * is contained in the eeprom map itself. - * - * During init, we copy the eeprom information and channel map - * information into il->channel_info_24/52 and il->channel_map_24/52 - * - * channel_map_24/52 provides the idx in the channel_info array for a - * given channel. We have to have two separate maps as there is channel - * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and - * band_2 - * - * A value of 0xff stored in the channel_map indicates that the channel - * is not supported by the hardware at all. - * - * A value of 0xfe in the channel_map indicates that the channel is not - * valid for Tx with the current hardware. This means that - * while the system can tune and receive on a given channel, it may not - * be able to associate or transmit any frames on that - * channel. There is no corresponding channel information for that - * entry. - * - *********************************************************************/ - -/* 2.4 GHz */ -const u8 il_eeprom_band_1[14] = { - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 -}; - -/* 5.2 GHz bands */ -static const u8 il_eeprom_band_2[] = { /* 4915-5080MHz */ - 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16 -}; - -static const u8 il_eeprom_band_3[] = { /* 5170-5320MHz */ - 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 -}; - -static const u8 il_eeprom_band_4[] = { /* 5500-5700MHz */ - 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 -}; - -static const u8 il_eeprom_band_5[] = { /* 5725-5825MHz */ - 145, 149, 153, 157, 161, 165 -}; - -static const u8 il_eeprom_band_6[] = { /* 2.4 ht40 channel */ - 1, 2, 3, 4, 5, 6, 7 -}; - -static const u8 il_eeprom_band_7[] = { /* 5.2 ht40 channel */ - 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157 -}; - -/****************************************************************************** - * - * EEPROM related functions - * -******************************************************************************/ - -static int -il_eeprom_verify_signature(struct il_priv *il) -{ - u32 gp = _il_rd(il, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK; - int ret = 0; - - D_EEPROM("EEPROM signature=0x%08x\n", gp); - switch (gp) { - case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K: - case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K: - break; - default: - IL_ERR("bad EEPROM signature," "EEPROM_GP=0x%08x\n", gp); - ret = -ENOENT; - break; - } - return ret; -} - -const u8 * -il_eeprom_query_addr(const struct il_priv *il, size_t offset) -{ - BUG_ON(offset >= il->cfg->base_params->eeprom_size); - return &il->eeprom[offset]; -} -EXPORT_SYMBOL(il_eeprom_query_addr); - -u16 -il_eeprom_query16(const struct il_priv *il, size_t offset) -{ - if (!il->eeprom) - return 0; - return (u16) il->eeprom[offset] | ((u16) il->eeprom[offset + 1] << 8); -} -EXPORT_SYMBOL(il_eeprom_query16); - -/** - * il_eeprom_init - read EEPROM contents - * - * Load the EEPROM contents from adapter into il->eeprom - * - * NOTE: This routine uses the non-debug IO access functions. - */ -int -il_eeprom_init(struct il_priv *il) -{ - __le16 *e; - u32 gp = _il_rd(il, CSR_EEPROM_GP); - int sz; - int ret; - u16 addr; - - /* allocate eeprom */ - sz = il->cfg->base_params->eeprom_size; - D_EEPROM("NVM size = %d\n", sz); - il->eeprom = kzalloc(sz, GFP_KERNEL); - if (!il->eeprom) { - ret = -ENOMEM; - goto alloc_err; - } - e = (__le16 *) il->eeprom; - - il->cfg->ops->lib->apm_ops.init(il); - - ret = il_eeprom_verify_signature(il); - if (ret < 0) { - IL_ERR("EEPROM not found, EEPROM_GP=0x%08x\n", gp); - ret = -ENOENT; - goto err; - } - - /* Make sure driver (instead of uCode) is allowed to read EEPROM */ - ret = il->cfg->ops->lib->eeprom_ops.acquire_semaphore(il); - if (ret < 0) { - IL_ERR("Failed to acquire EEPROM semaphore.\n"); - ret = -ENOENT; - goto err; - } - - /* eeprom is an array of 16bit values */ - for (addr = 0; addr < sz; addr += sizeof(u16)) { - u32 r; - - _il_wr(il, CSR_EEPROM_REG, - CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); - - ret = - _il_poll_bit(il, CSR_EEPROM_REG, - CSR_EEPROM_REG_READ_VALID_MSK, - CSR_EEPROM_REG_READ_VALID_MSK, - IL_EEPROM_ACCESS_TIMEOUT); - if (ret < 0) { - IL_ERR("Time out reading EEPROM[%d]\n", addr); - goto done; - } - r = _il_rd(il, CSR_EEPROM_REG); - e[addr / 2] = cpu_to_le16(r >> 16); - } - - D_EEPROM("NVM Type: %s, version: 0x%x\n", "EEPROM", - il_eeprom_query16(il, EEPROM_VERSION)); - - ret = 0; -done: - il->cfg->ops->lib->eeprom_ops.release_semaphore(il); - -err: - if (ret) - il_eeprom_free(il); - /* Reset chip to save power until we load uCode during "up". */ - il_apm_stop(il); -alloc_err: - return ret; -} -EXPORT_SYMBOL(il_eeprom_init); - -void -il_eeprom_free(struct il_priv *il) -{ - kfree(il->eeprom); - il->eeprom = NULL; -} -EXPORT_SYMBOL(il_eeprom_free); - -static void -il_init_band_reference(const struct il_priv *il, int eep_band, - int *eeprom_ch_count, - const struct il_eeprom_channel **eeprom_ch_info, - const u8 **eeprom_ch_idx) -{ - u32 offset = - il->cfg->ops->lib->eeprom_ops.regulatory_bands[eep_band - 1]; - switch (eep_band) { - case 1: /* 2.4GHz band */ - *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_1); - *eeprom_ch_info = - (struct il_eeprom_channel *)il_eeprom_query_addr(il, - offset); - *eeprom_ch_idx = il_eeprom_band_1; - break; - case 2: /* 4.9GHz band */ - *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_2); - *eeprom_ch_info = - (struct il_eeprom_channel *)il_eeprom_query_addr(il, - offset); - *eeprom_ch_idx = il_eeprom_band_2; - break; - case 3: /* 5.2GHz band */ - *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_3); - *eeprom_ch_info = - (struct il_eeprom_channel *)il_eeprom_query_addr(il, - offset); - *eeprom_ch_idx = il_eeprom_band_3; - break; - case 4: /* 5.5GHz band */ - *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_4); - *eeprom_ch_info = - (struct il_eeprom_channel *)il_eeprom_query_addr(il, - offset); - *eeprom_ch_idx = il_eeprom_band_4; - break; - case 5: /* 5.7GHz band */ - *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_5); - *eeprom_ch_info = - (struct il_eeprom_channel *)il_eeprom_query_addr(il, - offset); - *eeprom_ch_idx = il_eeprom_band_5; - break; - case 6: /* 2.4GHz ht40 channels */ - *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_6); - *eeprom_ch_info = - (struct il_eeprom_channel *)il_eeprom_query_addr(il, - offset); - *eeprom_ch_idx = il_eeprom_band_6; - break; - case 7: /* 5 GHz ht40 channels */ - *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_7); - *eeprom_ch_info = - (struct il_eeprom_channel *)il_eeprom_query_addr(il, - offset); - *eeprom_ch_idx = il_eeprom_band_7; - break; - default: - BUG(); - } -} - -#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \ - ? # x " " : "") -/** - * il_mod_ht40_chan_info - Copy ht40 channel info into driver's il. - * - * Does not set up a command, or touch hardware. - */ -static int -il_mod_ht40_chan_info(struct il_priv *il, enum ieee80211_band band, u16 channel, - const struct il_eeprom_channel *eeprom_ch, - u8 clear_ht40_extension_channel) -{ - struct il_channel_info *ch_info; - - ch_info = - (struct il_channel_info *)il_get_channel_info(il, band, channel); - - if (!il_is_channel_valid(ch_info)) - return -1; - - D_EEPROM("HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):" - " Ad-Hoc %ssupported\n", ch_info->channel, - il_is_channel_a_band(ch_info) ? "5.2" : "2.4", - CHECK_AND_PRINT(IBSS), CHECK_AND_PRINT(ACTIVE), - CHECK_AND_PRINT(RADAR), CHECK_AND_PRINT(WIDE), - CHECK_AND_PRINT(DFS), eeprom_ch->flags, - eeprom_ch->max_power_avg, - ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) && - !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? "" : "not "); - - ch_info->ht40_eeprom = *eeprom_ch; - ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg; - ch_info->ht40_flags = eeprom_ch->flags; - if (eeprom_ch->flags & EEPROM_CHANNEL_VALID) - ch_info->ht40_extension_channel &= - ~clear_ht40_extension_channel; - - return 0; -} - -#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \ - ? # x " " : "") - -/** - * il_init_channel_map - Set up driver's info for all possible channels - */ -int -il_init_channel_map(struct il_priv *il) -{ - int eeprom_ch_count = 0; - const u8 *eeprom_ch_idx = NULL; - const struct il_eeprom_channel *eeprom_ch_info = NULL; - int band, ch; - struct il_channel_info *ch_info; - - if (il->channel_count) { - D_EEPROM("Channel map already initialized.\n"); - return 0; - } - - D_EEPROM("Initializing regulatory info from EEPROM\n"); - - il->channel_count = - ARRAY_SIZE(il_eeprom_band_1) + ARRAY_SIZE(il_eeprom_band_2) + - ARRAY_SIZE(il_eeprom_band_3) + ARRAY_SIZE(il_eeprom_band_4) + - ARRAY_SIZE(il_eeprom_band_5); - - D_EEPROM("Parsing data for %d channels.\n", il->channel_count); - - il->channel_info = - kzalloc(sizeof(struct il_channel_info) * il->channel_count, - GFP_KERNEL); - if (!il->channel_info) { - IL_ERR("Could not allocate channel_info\n"); - il->channel_count = 0; - return -ENOMEM; - } - - ch_info = il->channel_info; - - /* Loop through the 5 EEPROM bands adding them in order to the - * channel map we maintain (that contains additional information than - * what just in the EEPROM) */ - for (band = 1; band <= 5; band++) { - - il_init_band_reference(il, band, &eeprom_ch_count, - &eeprom_ch_info, &eeprom_ch_idx); - - /* Loop through each band adding each of the channels */ - for (ch = 0; ch < eeprom_ch_count; ch++) { - ch_info->channel = eeprom_ch_idx[ch]; - ch_info->band = - (band == - 1) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; - - /* permanently store EEPROM's channel regulatory flags - * and max power in channel info database. */ - ch_info->eeprom = eeprom_ch_info[ch]; - - /* Copy the run-time flags so they are there even on - * invalid channels */ - ch_info->flags = eeprom_ch_info[ch].flags; - /* First write that ht40 is not enabled, and then enable - * one by one */ - ch_info->ht40_extension_channel = - IEEE80211_CHAN_NO_HT40; - - if (!(il_is_channel_valid(ch_info))) { - D_EEPROM("Ch. %d Flags %x [%sGHz] - " - "No traffic\n", ch_info->channel, - ch_info->flags, - il_is_channel_a_band(ch_info) ? "5.2" : - "2.4"); - ch_info++; - continue; - } - - /* Initialize regulatory-based run-time data */ - ch_info->max_power_avg = ch_info->curr_txpow = - eeprom_ch_info[ch].max_power_avg; - ch_info->scan_power = eeprom_ch_info[ch].max_power_avg; - ch_info->min_power = 0; - - D_EEPROM("Ch. %d [%sGHz] " "%s%s%s%s%s%s(0x%02x %ddBm):" - " Ad-Hoc %ssupported\n", ch_info->channel, - il_is_channel_a_band(ch_info) ? "5.2" : "2.4", - CHECK_AND_PRINT_I(VALID), - CHECK_AND_PRINT_I(IBSS), - CHECK_AND_PRINT_I(ACTIVE), - CHECK_AND_PRINT_I(RADAR), - CHECK_AND_PRINT_I(WIDE), - CHECK_AND_PRINT_I(DFS), - eeprom_ch_info[ch].flags, - eeprom_ch_info[ch].max_power_avg, - ((eeprom_ch_info[ch]. - flags & EEPROM_CHANNEL_IBSS) && - !(eeprom_ch_info[ch]. - flags & EEPROM_CHANNEL_RADAR)) ? "" : - "not "); - - ch_info++; - } - } - - /* Check if we do have HT40 channels */ - if (il->cfg->ops->lib->eeprom_ops.regulatory_bands[5] == - EEPROM_REGULATORY_BAND_NO_HT40 && - il->cfg->ops->lib->eeprom_ops.regulatory_bands[6] == - EEPROM_REGULATORY_BAND_NO_HT40) - return 0; - - /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */ - for (band = 6; band <= 7; band++) { - enum ieee80211_band ieeeband; - - il_init_band_reference(il, band, &eeprom_ch_count, - &eeprom_ch_info, &eeprom_ch_idx); - - /* EEPROM band 6 is 2.4, band 7 is 5 GHz */ - ieeeband = - (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; - - /* Loop through each band adding each of the channels */ - for (ch = 0; ch < eeprom_ch_count; ch++) { - /* Set up driver's info for lower half */ - il_mod_ht40_chan_info(il, ieeeband, eeprom_ch_idx[ch], - &eeprom_ch_info[ch], - IEEE80211_CHAN_NO_HT40PLUS); - - /* Set up driver's info for upper half */ - il_mod_ht40_chan_info(il, ieeeband, - eeprom_ch_idx[ch] + 4, - &eeprom_ch_info[ch], - IEEE80211_CHAN_NO_HT40MINUS); - } - } - - return 0; -} -EXPORT_SYMBOL(il_init_channel_map); - -/* - * il_free_channel_map - undo allocations in il_init_channel_map - */ -void -il_free_channel_map(struct il_priv *il) -{ - kfree(il->channel_info); - il->channel_count = 0; -} -EXPORT_SYMBOL(il_free_channel_map); - -/** - * il_get_channel_info - Find driver's ilate channel info - * - * Based on band and channel number. - */ -const struct il_channel_info * -il_get_channel_info(const struct il_priv *il, enum ieee80211_band band, - u16 channel) -{ - int i; - - switch (band) { - case IEEE80211_BAND_5GHZ: - for (i = 14; i < il->channel_count; i++) { - if (il->channel_info[i].channel == channel) - return &il->channel_info[i]; - } - break; - case IEEE80211_BAND_2GHZ: - if (channel >= 1 && channel <= 14) - return &il->channel_info[channel - 1]; - break; - default: - BUG(); - } - - return NULL; -} -EXPORT_SYMBOL(il_get_channel_info); - -/* - * Setting power level allows the card to go to sleep when not busy. - * - * We calculate a sleep command based on the required latency, which - * we get from mac80211. In order to handle thermal throttling, we can - * also use pre-defined power levels. - */ - -/* - * This defines the old power levels. They are still used by default - * (level 1) and for thermal throttle (levels 3 through 5) - */ - -struct il_power_vec_entry { - struct il_powertable_cmd cmd; - u8 no_dtim; /* number of skip dtim */ -}; - -static void -il_power_sleep_cam_cmd(struct il_priv *il, struct il_powertable_cmd *cmd) -{ - memset(cmd, 0, sizeof(*cmd)); - - if (il->power_data.pci_pm) - cmd->flags |= IL_POWER_PCI_PM_MSK; - - D_POWER("Sleep command for CAM\n"); -} - -static int -il_set_power(struct il_priv *il, struct il_powertable_cmd *cmd) -{ - D_POWER("Sending power/sleep command\n"); - D_POWER("Flags value = 0x%08X\n", cmd->flags); - D_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout)); - D_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout)); - D_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n", - le32_to_cpu(cmd->sleep_interval[0]), - le32_to_cpu(cmd->sleep_interval[1]), - le32_to_cpu(cmd->sleep_interval[2]), - le32_to_cpu(cmd->sleep_interval[3]), - le32_to_cpu(cmd->sleep_interval[4])); - - return il_send_cmd_pdu(il, C_POWER_TBL, - sizeof(struct il_powertable_cmd), cmd); -} - -int -il_power_set_mode(struct il_priv *il, struct il_powertable_cmd *cmd, bool force) -{ - int ret; - bool update_chains; - - lockdep_assert_held(&il->mutex); - - /* Don't update the RX chain when chain noise calibration is running */ - update_chains = il->chain_noise_data.state == IL_CHAIN_NOISE_DONE || - il->chain_noise_data.state == IL_CHAIN_NOISE_ALIVE; - - if (!memcmp(&il->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force) - return 0; - - if (!il_is_ready_rf(il)) - return -EIO; - - /* scan complete use sleep_power_next, need to be updated */ - memcpy(&il->power_data.sleep_cmd_next, cmd, sizeof(*cmd)); - if (test_bit(S_SCANNING, &il->status) && !force) { - D_INFO("Defer power set mode while scanning\n"); - return 0; - } - - if (cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK) - set_bit(S_POWER_PMI, &il->status); - - ret = il_set_power(il, cmd); - if (!ret) { - if (!(cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK)) - clear_bit(S_POWER_PMI, &il->status); - - if (il->cfg->ops->lib->update_chain_flags && update_chains) - il->cfg->ops->lib->update_chain_flags(il); - else if (il->cfg->ops->lib->update_chain_flags) - D_POWER("Cannot update the power, chain noise " - "calibration running: %d\n", - il->chain_noise_data.state); - - memcpy(&il->power_data.sleep_cmd, cmd, sizeof(*cmd)); - } else - IL_ERR("set power fail, ret = %d", ret); - - return ret; -} - -int -il_power_update_mode(struct il_priv *il, bool force) -{ - struct il_powertable_cmd cmd; - - il_power_sleep_cam_cmd(il, &cmd); - return il_power_set_mode(il, &cmd, force); -} -EXPORT_SYMBOL(il_power_update_mode); - -/* initialize to default */ -void -il_power_initialize(struct il_priv *il) -{ - u16 lctl = il_pcie_link_ctl(il); - - il->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN); - - il->power_data.debug_sleep_level_override = -1; - - memset(&il->power_data.sleep_cmd, 0, sizeof(il->power_data.sleep_cmd)); -} -EXPORT_SYMBOL(il_power_initialize); - -/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after - * sending probe req. This should be set long enough to hear probe responses - * from more than one AP. */ -#define IL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */ -#define IL_ACTIVE_DWELL_TIME_52 (20) - -#define IL_ACTIVE_DWELL_FACTOR_24GHZ (3) -#define IL_ACTIVE_DWELL_FACTOR_52GHZ (2) - -/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel. - * Must be set longer than active dwell time. - * For the most reliable scan, set > AP beacon interval (typically 100msec). */ -#define IL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */ -#define IL_PASSIVE_DWELL_TIME_52 (10) -#define IL_PASSIVE_DWELL_BASE (100) -#define IL_CHANNEL_TUNE_TIME 5 - -static int -il_send_scan_abort(struct il_priv *il) -{ - int ret; - struct il_rx_pkt *pkt; - struct il_host_cmd cmd = { - .id = C_SCAN_ABORT, - .flags = CMD_WANT_SKB, - }; - - /* Exit instantly with error when device is not ready - * to receive scan abort command or it does not perform - * hardware scan currently */ - if (!test_bit(S_READY, &il->status) || - !test_bit(S_GEO_CONFIGURED, &il->status) || - !test_bit(S_SCAN_HW, &il->status) || - test_bit(S_FW_ERROR, &il->status) || - test_bit(S_EXIT_PENDING, &il->status)) - return -EIO; - - ret = il_send_cmd_sync(il, &cmd); - if (ret) - return ret; - - pkt = (struct il_rx_pkt *)cmd.reply_page; - if (pkt->u.status != CAN_ABORT_STATUS) { - /* The scan abort will return 1 for success or - * 2 for "failure". A failure condition can be - * due to simply not being in an active scan which - * can occur if we send the scan abort before we - * the microcode has notified us that a scan is - * completed. */ - D_SCAN("SCAN_ABORT ret %d.\n", pkt->u.status); - ret = -EIO; - } - - il_free_pages(il, cmd.reply_page); - return ret; -} - -static void -il_complete_scan(struct il_priv *il, bool aborted) -{ - /* check if scan was requested from mac80211 */ - if (il->scan_request) { - D_SCAN("Complete scan in mac80211\n"); - ieee80211_scan_completed(il->hw, aborted); - } - - il->scan_vif = NULL; - il->scan_request = NULL; -} - -void -il_force_scan_end(struct il_priv *il) -{ - lockdep_assert_held(&il->mutex); - - if (!test_bit(S_SCANNING, &il->status)) { - D_SCAN("Forcing scan end while not scanning\n"); - return; - } - - D_SCAN("Forcing scan end\n"); - clear_bit(S_SCANNING, &il->status); - clear_bit(S_SCAN_HW, &il->status); - clear_bit(S_SCAN_ABORTING, &il->status); - il_complete_scan(il, true); -} - -static void -il_do_scan_abort(struct il_priv *il) -{ - int ret; - - lockdep_assert_held(&il->mutex); - - if (!test_bit(S_SCANNING, &il->status)) { - D_SCAN("Not performing scan to abort\n"); - return; - } - - if (test_and_set_bit(S_SCAN_ABORTING, &il->status)) { - D_SCAN("Scan abort in progress\n"); - return; - } - - ret = il_send_scan_abort(il); - if (ret) { - D_SCAN("Send scan abort failed %d\n", ret); - il_force_scan_end(il); - } else - D_SCAN("Successfully send scan abort\n"); -} - -/** - * il_scan_cancel - Cancel any currently executing HW scan - */ -int -il_scan_cancel(struct il_priv *il) -{ - D_SCAN("Queuing abort scan\n"); - queue_work(il->workqueue, &il->abort_scan); - return 0; -} -EXPORT_SYMBOL(il_scan_cancel); - -/** - * il_scan_cancel_timeout - Cancel any currently executing HW scan - * @ms: amount of time to wait (in milliseconds) for scan to abort - * - */ -int -il_scan_cancel_timeout(struct il_priv *il, unsigned long ms) -{ - unsigned long timeout = jiffies + msecs_to_jiffies(ms); - - lockdep_assert_held(&il->mutex); - - D_SCAN("Scan cancel timeout\n"); - - il_do_scan_abort(il); - - while (time_before_eq(jiffies, timeout)) { - if (!test_bit(S_SCAN_HW, &il->status)) - break; - msleep(20); - } - - return test_bit(S_SCAN_HW, &il->status); -} -EXPORT_SYMBOL(il_scan_cancel_timeout); - -/* Service response to C_SCAN (0x80) */ -static void -il_hdl_scan(struct il_priv *il, struct il_rx_buf *rxb) -{ -#ifdef CONFIG_IWLEGACY_DEBUG - struct il_rx_pkt *pkt = rxb_addr(rxb); - struct il_scanreq_notification *notif = - (struct il_scanreq_notification *)pkt->u.raw; - - D_SCAN("Scan request status = 0x%x\n", notif->status); -#endif -} - -/* Service N_SCAN_START (0x82) */ -static void -il_hdl_scan_start(struct il_priv *il, struct il_rx_buf *rxb) -{ - struct il_rx_pkt *pkt = rxb_addr(rxb); - struct il_scanstart_notification *notif = - (struct il_scanstart_notification *)pkt->u.raw; - il->scan_start_tsf = le32_to_cpu(notif->tsf_low); - D_SCAN("Scan start: " "%d [802.11%s] " - "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", notif->channel, - notif->band ? "bg" : "a", le32_to_cpu(notif->tsf_high), - le32_to_cpu(notif->tsf_low), notif->status, notif->beacon_timer); -} - -/* Service N_SCAN_RESULTS (0x83) */ -static void -il_hdl_scan_results(struct il_priv *il, struct il_rx_buf *rxb) -{ -#ifdef CONFIG_IWLEGACY_DEBUG - struct il_rx_pkt *pkt = rxb_addr(rxb); - struct il_scanresults_notification *notif = - (struct il_scanresults_notification *)pkt->u.raw; - - D_SCAN("Scan ch.res: " "%d [802.11%s] " "(TSF: 0x%08X:%08X) - %d " - "elapsed=%lu usec\n", notif->channel, notif->band ? "bg" : "a", - le32_to_cpu(notif->tsf_high), le32_to_cpu(notif->tsf_low), - le32_to_cpu(notif->stats[0]), - le32_to_cpu(notif->tsf_low) - il->scan_start_tsf); -#endif -} - -/* Service N_SCAN_COMPLETE (0x84) */ -static void -il_hdl_scan_complete(struct il_priv *il, struct il_rx_buf *rxb) -{ - -#ifdef CONFIG_IWLEGACY_DEBUG - struct il_rx_pkt *pkt = rxb_addr(rxb); - struct il_scancomplete_notification *scan_notif = (void *)pkt->u.raw; -#endif - - D_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n", - scan_notif->scanned_channels, scan_notif->tsf_low, - scan_notif->tsf_high, scan_notif->status); - - /* The HW is no longer scanning */ - clear_bit(S_SCAN_HW, &il->status); - - D_SCAN("Scan on %sGHz took %dms\n", - (il->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2", - jiffies_to_msecs(jiffies - il->scan_start)); - - queue_work(il->workqueue, &il->scan_completed); -} - -void -il_setup_rx_scan_handlers(struct il_priv *il) -{ - /* scan handlers */ - il->handlers[C_SCAN] = il_hdl_scan; - il->handlers[N_SCAN_START] = il_hdl_scan_start; - il->handlers[N_SCAN_RESULTS] = il_hdl_scan_results; - il->handlers[N_SCAN_COMPLETE] = il_hdl_scan_complete; -} -EXPORT_SYMBOL(il_setup_rx_scan_handlers); - -inline u16 -il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band, - u8 n_probes) -{ - if (band == IEEE80211_BAND_5GHZ) - return IL_ACTIVE_DWELL_TIME_52 + - IL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1); - else - return IL_ACTIVE_DWELL_TIME_24 + - IL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1); -} -EXPORT_SYMBOL(il_get_active_dwell_time); - -u16 -il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band, - struct ieee80211_vif *vif) -{ - struct il_rxon_context *ctx = &il->ctx; - u16 value; - - u16 passive = - (band == - IEEE80211_BAND_2GHZ) ? IL_PASSIVE_DWELL_BASE + - IL_PASSIVE_DWELL_TIME_24 : IL_PASSIVE_DWELL_BASE + - IL_PASSIVE_DWELL_TIME_52; - - if (il_is_any_associated(il)) { - /* - * If we're associated, we clamp the maximum passive - * dwell time to be 98% of the smallest beacon interval - * (minus 2 * channel tune time) - */ - value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0; - if (value > IL_PASSIVE_DWELL_BASE || !value) - value = IL_PASSIVE_DWELL_BASE; - value = (value * 98) / 100 - IL_CHANNEL_TUNE_TIME * 2; - passive = min(value, passive); - } - - return passive; -} -EXPORT_SYMBOL(il_get_passive_dwell_time); - -void -il_init_scan_params(struct il_priv *il) -{ - u8 ant_idx = fls(il->hw_params.valid_tx_ant) - 1; - if (!il->scan_tx_ant[IEEE80211_BAND_5GHZ]) - il->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx; - if (!il->scan_tx_ant[IEEE80211_BAND_2GHZ]) - il->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx; -} -EXPORT_SYMBOL(il_init_scan_params); - -static int -il_scan_initiate(struct il_priv *il, struct ieee80211_vif *vif) -{ - int ret; - - lockdep_assert_held(&il->mutex); - - if (WARN_ON(!il->cfg->ops->utils->request_scan)) - return -EOPNOTSUPP; - - cancel_delayed_work(&il->scan_check); - - if (!il_is_ready_rf(il)) { - IL_WARN("Request scan called when driver not ready.\n"); - return -EIO; - } - - if (test_bit(S_SCAN_HW, &il->status)) { - D_SCAN("Multiple concurrent scan requests in parallel.\n"); - return -EBUSY; - } - - if (test_bit(S_SCAN_ABORTING, &il->status)) { - D_SCAN("Scan request while abort pending.\n"); - return -EBUSY; - } - - D_SCAN("Starting scan...\n"); - - set_bit(S_SCANNING, &il->status); - il->scan_start = jiffies; - - ret = il->cfg->ops->utils->request_scan(il, vif); - if (ret) { - clear_bit(S_SCANNING, &il->status); - return ret; - } - - queue_delayed_work(il->workqueue, &il->scan_check, - IL_SCAN_CHECK_WATCHDOG); - - return 0; -} - -int -il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct cfg80211_scan_request *req) -{ - struct il_priv *il = hw->priv; - int ret; - - D_MAC80211("enter\n"); - - if (req->n_channels == 0) - return -EINVAL; - - mutex_lock(&il->mutex); - - if (test_bit(S_SCANNING, &il->status)) { - D_SCAN("Scan already in progress.\n"); - ret = -EAGAIN; - goto out_unlock; - } - - /* mac80211 will only ask for one band at a time */ - il->scan_request = req; - il->scan_vif = vif; - il->scan_band = req->channels[0]->band; - - ret = il_scan_initiate(il, vif); - - D_MAC80211("leave\n"); - -out_unlock: - mutex_unlock(&il->mutex); - - return ret; -} -EXPORT_SYMBOL(il_mac_hw_scan); - -static void -il_bg_scan_check(struct work_struct *data) -{ - struct il_priv *il = - container_of(data, struct il_priv, scan_check.work); - - D_SCAN("Scan check work\n"); - - /* Since we are here firmware does not finish scan and - * most likely is in bad shape, so we don't bother to - * send abort command, just force scan complete to mac80211 */ - mutex_lock(&il->mutex); - il_force_scan_end(il); - mutex_unlock(&il->mutex); -} - -/** - * il_fill_probe_req - fill in all required fields and IE for probe request - */ - -u16 -il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame, - const u8 *ta, const u8 *ies, int ie_len, int left) -{ - int len = 0; - u8 *pos = NULL; - - /* Make sure there is enough space for the probe request, - * two mandatory IEs and the data */ - left -= 24; - if (left < 0) - return 0; - - frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); - memcpy(frame->da, il_bcast_addr, ETH_ALEN); - memcpy(frame->sa, ta, ETH_ALEN); - memcpy(frame->bssid, il_bcast_addr, ETH_ALEN); - frame->seq_ctrl = 0; - - len += 24; - - /* ...next IE... */ - pos = &frame->u.probe_req.variable[0]; - - /* fill in our indirect SSID IE */ - left -= 2; - if (left < 0) - return 0; - *pos++ = WLAN_EID_SSID; - *pos++ = 0; - - len += 2; - - if (WARN_ON(left < ie_len)) - return len; - - if (ies && ie_len) { - memcpy(pos, ies, ie_len); - len += ie_len; - } - - return (u16) len; -} -EXPORT_SYMBOL(il_fill_probe_req); - -static void -il_bg_abort_scan(struct work_struct *work) -{ - struct il_priv *il = container_of(work, struct il_priv, abort_scan); - - D_SCAN("Abort scan work\n"); - - /* We keep scan_check work queued in case when firmware will not - * report back scan completed notification */ - mutex_lock(&il->mutex); - il_scan_cancel_timeout(il, 200); - mutex_unlock(&il->mutex); -} - -static void -il_bg_scan_completed(struct work_struct *work) -{ - struct il_priv *il = container_of(work, struct il_priv, scan_completed); - bool aborted; - - D_SCAN("Completed scan.\n"); - - cancel_delayed_work(&il->scan_check); - - mutex_lock(&il->mutex); - - aborted = test_and_clear_bit(S_SCAN_ABORTING, &il->status); - if (aborted) - D_SCAN("Aborted scan completed.\n"); - - if (!test_and_clear_bit(S_SCANNING, &il->status)) { - D_SCAN("Scan already completed.\n"); - goto out_settings; - } - - il_complete_scan(il, aborted); - -out_settings: - /* Can we still talk to firmware ? */ - if (!il_is_ready_rf(il)) - goto out; - - /* - * We do not commit power settings while scan is pending, - * do it now if the settings changed. - */ - il_power_set_mode(il, &il->power_data.sleep_cmd_next, false); - il_set_tx_power(il, il->tx_power_next, false); - - il->cfg->ops->utils->post_scan(il); - -out: - mutex_unlock(&il->mutex); -} - -void -il_setup_scan_deferred_work(struct il_priv *il) -{ - INIT_WORK(&il->scan_completed, il_bg_scan_completed); - INIT_WORK(&il->abort_scan, il_bg_abort_scan); - INIT_DELAYED_WORK(&il->scan_check, il_bg_scan_check); -} -EXPORT_SYMBOL(il_setup_scan_deferred_work); - -void -il_cancel_scan_deferred_work(struct il_priv *il) -{ - cancel_work_sync(&il->abort_scan); - cancel_work_sync(&il->scan_completed); - - if (cancel_delayed_work_sync(&il->scan_check)) { - mutex_lock(&il->mutex); - il_force_scan_end(il); - mutex_unlock(&il->mutex); - } -} -EXPORT_SYMBOL(il_cancel_scan_deferred_work); - -/* il->sta_lock must be held */ -static void -il_sta_ucode_activate(struct il_priv *il, u8 sta_id) -{ - - if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) - IL_ERR("ACTIVATE a non DRIVER active station id %u addr %pM\n", - sta_id, il->stations[sta_id].sta.sta.addr); - - if (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) { - D_ASSOC("STA id %u addr %pM already present" - " in uCode (according to driver)\n", sta_id, - il->stations[sta_id].sta.sta.addr); - } else { - il->stations[sta_id].used |= IL_STA_UCODE_ACTIVE; - D_ASSOC("Added STA id %u addr %pM to uCode\n", sta_id, - il->stations[sta_id].sta.sta.addr); - } -} - -static int -il_process_add_sta_resp(struct il_priv *il, struct il_addsta_cmd *addsta, - struct il_rx_pkt *pkt, bool sync) -{ - u8 sta_id = addsta->sta.sta_id; - unsigned long flags; - int ret = -EIO; - - if (pkt->hdr.flags & IL_CMD_FAILED_MSK) { - IL_ERR("Bad return from C_ADD_STA (0x%08X)\n", pkt->hdr.flags); - return ret; - } - - D_INFO("Processing response for adding station %u\n", sta_id); - - spin_lock_irqsave(&il->sta_lock, flags); - - switch (pkt->u.add_sta.status) { - case ADD_STA_SUCCESS_MSK: - D_INFO("C_ADD_STA PASSED\n"); - il_sta_ucode_activate(il, sta_id); - ret = 0; - break; - case ADD_STA_NO_ROOM_IN_TBL: - IL_ERR("Adding station %d failed, no room in table.\n", sta_id); - break; - case ADD_STA_NO_BLOCK_ACK_RESOURCE: - IL_ERR("Adding station %d failed, no block ack resource.\n", - sta_id); - break; - case ADD_STA_MODIFY_NON_EXIST_STA: - IL_ERR("Attempting to modify non-existing station %d\n", - sta_id); - break; - default: - D_ASSOC("Received C_ADD_STA:(0x%08X)\n", pkt->u.add_sta.status); - break; - } - - D_INFO("%s station id %u addr %pM\n", - il->stations[sta_id].sta.mode == - STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", sta_id, - il->stations[sta_id].sta.sta.addr); - - /* - * XXX: The MAC address in the command buffer is often changed from - * the original sent to the device. That is, the MAC address - * written to the command buffer often is not the same MAC address - * read from the command buffer when the command returns. This - * issue has not yet been resolved and this debugging is left to - * observe the problem. - */ - D_INFO("%s station according to cmd buffer %pM\n", - il->stations[sta_id].sta.mode == - STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", addsta->sta.addr); - spin_unlock_irqrestore(&il->sta_lock, flags); - - return ret; -} - -static void -il_add_sta_callback(struct il_priv *il, struct il_device_cmd *cmd, - struct il_rx_pkt *pkt) -{ - struct il_addsta_cmd *addsta = (struct il_addsta_cmd *)cmd->cmd.payload; - - il_process_add_sta_resp(il, addsta, pkt, false); - -} - -int -il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags) -{ - struct il_rx_pkt *pkt = NULL; - int ret = 0; - u8 data[sizeof(*sta)]; - struct il_host_cmd cmd = { - .id = C_ADD_STA, - .flags = flags, - .data = data, - }; - u8 sta_id __maybe_unused = sta->sta.sta_id; - - D_INFO("Adding sta %u (%pM) %ssynchronously\n", sta_id, sta->sta.addr, - flags & CMD_ASYNC ? "a" : ""); - - if (flags & CMD_ASYNC) - cmd.callback = il_add_sta_callback; - else { - cmd.flags |= CMD_WANT_SKB; - might_sleep(); - } - - cmd.len = il->cfg->ops->utils->build_addsta_hcmd(sta, data); - ret = il_send_cmd(il, &cmd); - - if (ret || (flags & CMD_ASYNC)) - return ret; - - if (ret == 0) { - pkt = (struct il_rx_pkt *)cmd.reply_page; - ret = il_process_add_sta_resp(il, sta, pkt, true); - } - il_free_pages(il, cmd.reply_page); - - return ret; -} -EXPORT_SYMBOL(il_send_add_sta); - -static void -il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta, - struct il_rxon_context *ctx) -{ - struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap; - __le32 sta_flags; - u8 mimo_ps_mode; - - if (!sta || !sta_ht_inf->ht_supported) - goto done; - - mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2; - D_ASSOC("spatial multiplexing power save mode: %s\n", - (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ? "static" : - (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ? "dynamic" : - "disabled"); - - sta_flags = il->stations[idx].sta.station_flags; - - sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK); - - switch (mimo_ps_mode) { - case WLAN_HT_CAP_SM_PS_STATIC: - sta_flags |= STA_FLG_MIMO_DIS_MSK; - break; - case WLAN_HT_CAP_SM_PS_DYNAMIC: - sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK; - break; - case WLAN_HT_CAP_SM_PS_DISABLED: - break; - default: - IL_WARN("Invalid MIMO PS mode %d\n", mimo_ps_mode); - break; - } - - sta_flags |= - cpu_to_le32((u32) sta_ht_inf-> - ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS); - - sta_flags |= - cpu_to_le32((u32) sta_ht_inf-> - ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS); - - if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap)) - sta_flags |= STA_FLG_HT40_EN_MSK; - else - sta_flags &= ~STA_FLG_HT40_EN_MSK; - - il->stations[idx].sta.station_flags = sta_flags; -done: - return; -} - -/** - * il_prep_station - Prepare station information for addition - * - * should be called with sta_lock held - */ -u8 -il_prep_station(struct il_priv *il, struct il_rxon_context *ctx, - const u8 *addr, bool is_ap, struct ieee80211_sta *sta) -{ - struct il_station_entry *station; - int i; - u8 sta_id = IL_INVALID_STATION; - u16 rate; - - if (is_ap) - sta_id = ctx->ap_sta_id; - else if (is_broadcast_ether_addr(addr)) - sta_id = ctx->bcast_sta_id; - else - for (i = IL_STA_ID; i < il->hw_params.max_stations; i++) { - if (!compare_ether_addr - (il->stations[i].sta.sta.addr, addr)) { - sta_id = i; - break; - } - - if (!il->stations[i].used && - sta_id == IL_INVALID_STATION) - sta_id = i; - } - - /* - * These two conditions have the same outcome, but keep them - * separate - */ - if (unlikely(sta_id == IL_INVALID_STATION)) - return sta_id; - - /* - * uCode is not able to deal with multiple requests to add a - * station. Keep track if one is in progress so that we do not send - * another. - */ - if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) { - D_INFO("STA %d already in process of being added.\n", sta_id); - return sta_id; - } - - if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) && - (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) && - !compare_ether_addr(il->stations[sta_id].sta.sta.addr, addr)) { - D_ASSOC("STA %d (%pM) already added, not adding again.\n", - sta_id, addr); - return sta_id; - } - - station = &il->stations[sta_id]; - station->used = IL_STA_DRIVER_ACTIVE; - D_ASSOC("Add STA to driver ID %d: %pM\n", sta_id, addr); - il->num_stations++; - - /* Set up the C_ADD_STA command to send to device */ - memset(&station->sta, 0, sizeof(struct il_addsta_cmd)); - memcpy(station->sta.sta.addr, addr, ETH_ALEN); - station->sta.mode = 0; - station->sta.sta.sta_id = sta_id; - station->sta.station_flags = ctx->station_flags; - station->ctxid = ctx->ctxid; - - if (sta) { - struct il_station_priv_common *sta_priv; - - sta_priv = (void *)sta->drv_priv; - sta_priv->ctx = ctx; - } - - /* - * OK to call unconditionally, since local stations (IBSS BSSID - * STA and broadcast STA) pass in a NULL sta, and mac80211 - * doesn't allow HT IBSS. - */ - il_set_ht_add_station(il, sta_id, sta, ctx); - - /* 3945 only */ - rate = (il->band == IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP; - /* Turn on both antennas for the station... */ - station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK); - - return sta_id; - -} -EXPORT_SYMBOL_GPL(il_prep_station); - -#define STA_WAIT_TIMEOUT (HZ/2) - -/** - * il_add_station_common - - */ -int -il_add_station_common(struct il_priv *il, struct il_rxon_context *ctx, - const u8 *addr, bool is_ap, struct ieee80211_sta *sta, - u8 *sta_id_r) -{ - unsigned long flags_spin; - int ret = 0; - u8 sta_id; - struct il_addsta_cmd sta_cmd; - - *sta_id_r = 0; - spin_lock_irqsave(&il->sta_lock, flags_spin); - sta_id = il_prep_station(il, ctx, addr, is_ap, sta); - if (sta_id == IL_INVALID_STATION) { - IL_ERR("Unable to prepare station %pM for addition\n", addr); - spin_unlock_irqrestore(&il->sta_lock, flags_spin); - return -EINVAL; - } - - /* - * uCode is not able to deal with multiple requests to add a - * station. Keep track if one is in progress so that we do not send - * another. - */ - if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) { - D_INFO("STA %d already in process of being added.\n", sta_id); - spin_unlock_irqrestore(&il->sta_lock, flags_spin); - return -EEXIST; - } - - if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) && - (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) { - D_ASSOC("STA %d (%pM) already added, not adding again.\n", - sta_id, addr); - spin_unlock_irqrestore(&il->sta_lock, flags_spin); - return -EEXIST; - } - - il->stations[sta_id].used |= IL_STA_UCODE_INPROGRESS; - memcpy(&sta_cmd, &il->stations[sta_id].sta, - sizeof(struct il_addsta_cmd)); - spin_unlock_irqrestore(&il->sta_lock, flags_spin); - - /* Add station to device's station table */ - ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC); - if (ret) { - spin_lock_irqsave(&il->sta_lock, flags_spin); - IL_ERR("Adding station %pM failed.\n", - il->stations[sta_id].sta.sta.addr); - il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE; - il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS; - spin_unlock_irqrestore(&il->sta_lock, flags_spin); - } - *sta_id_r = sta_id; - return ret; -} -EXPORT_SYMBOL(il_add_station_common); - -/** - * il_sta_ucode_deactivate - deactivate ucode status for a station - * - * il->sta_lock must be held - */ -static void -il_sta_ucode_deactivate(struct il_priv *il, u8 sta_id) -{ - /* Ucode must be active and driver must be non active */ - if ((il->stations[sta_id]. - used & (IL_STA_UCODE_ACTIVE | IL_STA_DRIVER_ACTIVE)) != - IL_STA_UCODE_ACTIVE) - IL_ERR("removed non active STA %u\n", sta_id); - - il->stations[sta_id].used &= ~IL_STA_UCODE_ACTIVE; - - memset(&il->stations[sta_id], 0, sizeof(struct il_station_entry)); - D_ASSOC("Removed STA %u\n", sta_id); -} - -static int -il_send_remove_station(struct il_priv *il, const u8 * addr, int sta_id, - bool temporary) -{ - struct il_rx_pkt *pkt; - int ret; - - unsigned long flags_spin; - struct il_rem_sta_cmd rm_sta_cmd; - - struct il_host_cmd cmd = { - .id = C_REM_STA, - .len = sizeof(struct il_rem_sta_cmd), - .flags = CMD_SYNC, - .data = &rm_sta_cmd, - }; - - memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd)); - rm_sta_cmd.num_sta = 1; - memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN); - - cmd.flags |= CMD_WANT_SKB; - - ret = il_send_cmd(il, &cmd); - - if (ret) - return ret; - - pkt = (struct il_rx_pkt *)cmd.reply_page; - if (pkt->hdr.flags & IL_CMD_FAILED_MSK) { - IL_ERR("Bad return from C_REM_STA (0x%08X)\n", pkt->hdr.flags); - ret = -EIO; - } - - if (!ret) { - switch (pkt->u.rem_sta.status) { - case REM_STA_SUCCESS_MSK: - if (!temporary) { - spin_lock_irqsave(&il->sta_lock, flags_spin); - il_sta_ucode_deactivate(il, sta_id); - spin_unlock_irqrestore(&il->sta_lock, - flags_spin); - } - D_ASSOC("C_REM_STA PASSED\n"); - break; - default: - ret = -EIO; - IL_ERR("C_REM_STA failed\n"); - break; - } - } - il_free_pages(il, cmd.reply_page); - - return ret; -} - -/** - * il_remove_station - Remove driver's knowledge of station. - */ -int -il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr) -{ - unsigned long flags; - - if (!il_is_ready(il)) { - D_INFO("Unable to remove station %pM, device not ready.\n", - addr); - /* - * It is typical for stations to be removed when we are - * going down. Return success since device will be down - * soon anyway - */ - return 0; - } - - D_ASSOC("Removing STA from driver:%d %pM\n", sta_id, addr); - - if (WARN_ON(sta_id == IL_INVALID_STATION)) - return -EINVAL; - - spin_lock_irqsave(&il->sta_lock, flags); - - if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) { - D_INFO("Removing %pM but non DRIVER active\n", addr); - goto out_err; - } - - if (!(il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) { - D_INFO("Removing %pM but non UCODE active\n", addr); - goto out_err; - } - - if (il->stations[sta_id].used & IL_STA_LOCAL) { - kfree(il->stations[sta_id].lq); - il->stations[sta_id].lq = NULL; - } - - il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE; - - il->num_stations--; - - BUG_ON(il->num_stations < 0); - - spin_unlock_irqrestore(&il->sta_lock, flags); - - return il_send_remove_station(il, addr, sta_id, false); -out_err: - spin_unlock_irqrestore(&il->sta_lock, flags); - return -EINVAL; -} -EXPORT_SYMBOL_GPL(il_remove_station); - -/** - * il_clear_ucode_stations - clear ucode station table bits - * - * This function clears all the bits in the driver indicating - * which stations are active in the ucode. Call when something - * other than explicit station management would cause this in - * the ucode, e.g. unassociated RXON. - */ -void -il_clear_ucode_stations(struct il_priv *il, struct il_rxon_context *ctx) -{ - int i; - unsigned long flags_spin; - bool cleared = false; - - D_INFO("Clearing ucode stations in driver\n"); - - spin_lock_irqsave(&il->sta_lock, flags_spin); - for (i = 0; i < il->hw_params.max_stations; i++) { - if (ctx && ctx->ctxid != il->stations[i].ctxid) - continue; - - if (il->stations[i].used & IL_STA_UCODE_ACTIVE) { - D_INFO("Clearing ucode active for station %d\n", i); - il->stations[i].used &= ~IL_STA_UCODE_ACTIVE; - cleared = true; - } - } - spin_unlock_irqrestore(&il->sta_lock, flags_spin); - - if (!cleared) - D_INFO("No active stations found to be cleared\n"); -} -EXPORT_SYMBOL(il_clear_ucode_stations); - -/** - * il_restore_stations() - Restore driver known stations to device - * - * All stations considered active by driver, but not present in ucode, is - * restored. - * - * Function sleeps. - */ -void -il_restore_stations(struct il_priv *il, struct il_rxon_context *ctx) -{ - struct il_addsta_cmd sta_cmd; - struct il_link_quality_cmd lq; - unsigned long flags_spin; - int i; - bool found = false; - int ret; - bool send_lq; - - if (!il_is_ready(il)) { - D_INFO("Not ready yet, not restoring any stations.\n"); - return; - } - - D_ASSOC("Restoring all known stations ... start.\n"); - spin_lock_irqsave(&il->sta_lock, flags_spin); - for (i = 0; i < il->hw_params.max_stations; i++) { - if (ctx->ctxid != il->stations[i].ctxid) - continue; - if ((il->stations[i].used & IL_STA_DRIVER_ACTIVE) && - !(il->stations[i].used & IL_STA_UCODE_ACTIVE)) { - D_ASSOC("Restoring sta %pM\n", - il->stations[i].sta.sta.addr); - il->stations[i].sta.mode = 0; - il->stations[i].used |= IL_STA_UCODE_INPROGRESS; - found = true; - } - } - - for (i = 0; i < il->hw_params.max_stations; i++) { - if ((il->stations[i].used & IL_STA_UCODE_INPROGRESS)) { - memcpy(&sta_cmd, &il->stations[i].sta, - sizeof(struct il_addsta_cmd)); - send_lq = false; - if (il->stations[i].lq) { - memcpy(&lq, il->stations[i].lq, - sizeof(struct il_link_quality_cmd)); - send_lq = true; - } - spin_unlock_irqrestore(&il->sta_lock, flags_spin); - ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC); - if (ret) { - spin_lock_irqsave(&il->sta_lock, flags_spin); - IL_ERR("Adding station %pM failed.\n", - il->stations[i].sta.sta.addr); - il->stations[i].used &= ~IL_STA_DRIVER_ACTIVE; - il->stations[i].used &= - ~IL_STA_UCODE_INPROGRESS; - spin_unlock_irqrestore(&il->sta_lock, - flags_spin); - } - /* - * Rate scaling has already been initialized, send - * current LQ command - */ - if (send_lq) - il_send_lq_cmd(il, ctx, &lq, CMD_SYNC, true); - spin_lock_irqsave(&il->sta_lock, flags_spin); - il->stations[i].used &= ~IL_STA_UCODE_INPROGRESS; - } - } - - spin_unlock_irqrestore(&il->sta_lock, flags_spin); - if (!found) - D_INFO("Restoring all known stations" - " .... no stations to be restored.\n"); - else - D_INFO("Restoring all known stations" " .... complete.\n"); -} -EXPORT_SYMBOL(il_restore_stations); - -int -il_get_free_ucode_key_idx(struct il_priv *il) -{ - int i; - - for (i = 0; i < il->sta_key_max_num; i++) - if (!test_and_set_bit(i, &il->ucode_key_table)) - return i; - - return WEP_INVALID_OFFSET; -} -EXPORT_SYMBOL(il_get_free_ucode_key_idx); - -void -il_dealloc_bcast_stations(struct il_priv *il) -{ - unsigned long flags; - int i; - - spin_lock_irqsave(&il->sta_lock, flags); - for (i = 0; i < il->hw_params.max_stations; i++) { - if (!(il->stations[i].used & IL_STA_BCAST)) - continue; - - il->stations[i].used &= ~IL_STA_UCODE_ACTIVE; - il->num_stations--; - BUG_ON(il->num_stations < 0); - kfree(il->stations[i].lq); - il->stations[i].lq = NULL; - } - spin_unlock_irqrestore(&il->sta_lock, flags); -} -EXPORT_SYMBOL_GPL(il_dealloc_bcast_stations); - -#ifdef CONFIG_IWLEGACY_DEBUG -static void -il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq) -{ - int i; - D_RATE("lq station id 0x%x\n", lq->sta_id); - D_RATE("lq ant 0x%X 0x%X\n", lq->general_params.single_stream_ant_msk, - lq->general_params.dual_stream_ant_msk); - - for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) - D_RATE("lq idx %d 0x%X\n", i, lq->rs_table[i].rate_n_flags); -} -#else -static inline void -il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq) -{ -} -#endif - -/** - * il_is_lq_table_valid() - Test one aspect of LQ cmd for validity - * - * It sometimes happens when a HT rate has been in use and we - * loose connectivity with AP then mac80211 will first tell us that the - * current channel is not HT anymore before removing the station. In such a - * scenario the RXON flags will be updated to indicate we are not - * communicating HT anymore, but the LQ command may still contain HT rates. - * Test for this to prevent driver from sending LQ command between the time - * RXON flags are updated and when LQ command is updated. - */ -static bool -il_is_lq_table_valid(struct il_priv *il, struct il_rxon_context *ctx, - struct il_link_quality_cmd *lq) -{ - int i; - - if (ctx->ht.enabled) - return true; - - D_INFO("Channel %u is not an HT channel\n", ctx->active.channel); - for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { - if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) { - D_INFO("idx %d of LQ expects HT channel\n", i); - return false; - } - } - return true; -} - -/** - * il_send_lq_cmd() - Send link quality command - * @init: This command is sent as part of station initialization right - * after station has been added. - * - * The link quality command is sent as the last step of station creation. - * This is the special case in which init is set and we call a callback in - * this case to clear the state indicating that station creation is in - * progress. - */ -int -il_send_lq_cmd(struct il_priv *il, struct il_rxon_context *ctx, - struct il_link_quality_cmd *lq, u8 flags, bool init) -{ - int ret = 0; - unsigned long flags_spin; - - struct il_host_cmd cmd = { - .id = C_TX_LINK_QUALITY_CMD, - .len = sizeof(struct il_link_quality_cmd), - .flags = flags, - .data = lq, - }; - - if (WARN_ON(lq->sta_id == IL_INVALID_STATION)) - return -EINVAL; - - spin_lock_irqsave(&il->sta_lock, flags_spin); - if (!(il->stations[lq->sta_id].used & IL_STA_DRIVER_ACTIVE)) { - spin_unlock_irqrestore(&il->sta_lock, flags_spin); - return -EINVAL; - } - spin_unlock_irqrestore(&il->sta_lock, flags_spin); - - il_dump_lq_cmd(il, lq); - BUG_ON(init && (cmd.flags & CMD_ASYNC)); - - if (il_is_lq_table_valid(il, ctx, lq)) - ret = il_send_cmd(il, &cmd); - else - ret = -EINVAL; - - if (cmd.flags & CMD_ASYNC) - return ret; - - if (init) { - D_INFO("init LQ command complete," - " clearing sta addition status for sta %d\n", - lq->sta_id); - spin_lock_irqsave(&il->sta_lock, flags_spin); - il->stations[lq->sta_id].used &= ~IL_STA_UCODE_INPROGRESS; - spin_unlock_irqrestore(&il->sta_lock, flags_spin); - } - return ret; -} -EXPORT_SYMBOL(il_send_lq_cmd); - -int -il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_sta *sta) -{ - struct il_priv *il = hw->priv; - struct il_station_priv_common *sta_common = (void *)sta->drv_priv; - int ret; - - D_INFO("received request to remove station %pM\n", sta->addr); - mutex_lock(&il->mutex); - D_INFO("proceeding to remove station %pM\n", sta->addr); - ret = il_remove_station(il, sta_common->sta_id, sta->addr); - if (ret) - IL_ERR("Error removing station %pM\n", sta->addr); - mutex_unlock(&il->mutex); - return ret; -} -EXPORT_SYMBOL(il_mac_sta_remove); - -/************************** RX-FUNCTIONS ****************************/ -/* - * Rx theory of operation - * - * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), - * each of which point to Receive Buffers to be filled by the NIC. These get - * used not only for Rx frames, but for any command response or notification - * from the NIC. The driver and NIC manage the Rx buffers by means - * of idxes into the circular buffer. - * - * Rx Queue Indexes - * The host/firmware share two idx registers for managing the Rx buffers. - * - * The READ idx maps to the first position that the firmware may be writing - * to -- the driver can read up to (but not including) this position and get - * good data. - * The READ idx is managed by the firmware once the card is enabled. - * - * The WRITE idx maps to the last position the driver has read from -- the - * position preceding WRITE is the last slot the firmware can place a packet. - * - * The queue is empty (no good data) if WRITE = READ - 1, and is full if - * WRITE = READ. - * - * During initialization, the host sets up the READ queue position to the first - * IDX position, and WRITE to the last (READ - 1 wrapped) - * - * When the firmware places a packet in a buffer, it will advance the READ idx - * and fire the RX interrupt. The driver can then query the READ idx and - * process as many packets as possible, moving the WRITE idx forward as it - * resets the Rx queue buffers with new memory. - * - * The management in the driver is as follows: - * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When - * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled - * to replenish the iwl->rxq->rx_free. - * + In il_rx_replenish (scheduled) if 'processed' != 'read' then the - * iwl->rxq is replenished and the READ IDX is updated (updating the - * 'processed' and 'read' driver idxes as well) - * + A received packet is processed and handed to the kernel network stack, - * detached from the iwl->rxq. The driver 'processed' idx is updated. - * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free - * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ - * IDX is not incremented and iwl->status(RX_STALLED) is set. If there - * were enough free buffers and RX_STALLED is set it is cleared. - * - * - * Driver sequence: - * - * il_rx_queue_alloc() Allocates rx_free - * il_rx_replenish() Replenishes rx_free list from rx_used, and calls - * il_rx_queue_restock - * il_rx_queue_restock() Moves available buffers from rx_free into Rx - * queue, updates firmware pointers, and updates - * the WRITE idx. If insufficient rx_free buffers - * are available, schedules il_rx_replenish - * - * -- enable interrupts -- - * ISR - il_rx() Detach il_rx_bufs from pool up to the - * READ IDX, detaching the SKB from the pool. - * Moves the packet buffer from queue to rx_used. - * Calls il_rx_queue_restock to refill any empty - * slots. - * ... - * - */ - -/** - * il_rx_queue_space - Return number of free slots available in queue. - */ -int -il_rx_queue_space(const struct il_rx_queue *q) -{ - int s = q->read - q->write; - if (s <= 0) - s += RX_QUEUE_SIZE; - /* keep some buffer to not confuse full and empty queue */ - s -= 2; - if (s < 0) - s = 0; - return s; -} -EXPORT_SYMBOL(il_rx_queue_space); - -/** - * il_rx_queue_update_write_ptr - Update the write pointer for the RX queue - */ -void -il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q) -{ - unsigned long flags; - u32 rx_wrt_ptr_reg = il->hw_params.rx_wrt_ptr_reg; - u32 reg; - - spin_lock_irqsave(&q->lock, flags); - - if (q->need_update == 0) - goto exit_unlock; - - /* If power-saving is in use, make sure device is awake */ - if (test_bit(S_POWER_PMI, &il->status)) { - reg = _il_rd(il, CSR_UCODE_DRV_GP1); - - if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { - D_INFO("Rx queue requesting wakeup," " GP1 = 0x%x\n", - reg); - il_set_bit(il, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - goto exit_unlock; - } - - q->write_actual = (q->write & ~0x7); - il_wr(il, rx_wrt_ptr_reg, q->write_actual); - - /* Else device is assumed to be awake */ - } else { - /* Device expects a multiple of 8 */ - q->write_actual = (q->write & ~0x7); - il_wr(il, rx_wrt_ptr_reg, q->write_actual); - } - - q->need_update = 0; - -exit_unlock: - spin_unlock_irqrestore(&q->lock, flags); -} -EXPORT_SYMBOL(il_rx_queue_update_write_ptr); - -int -il_rx_queue_alloc(struct il_priv *il) -{ - struct il_rx_queue *rxq = &il->rxq; - struct device *dev = &il->pci_dev->dev; - int i; - - spin_lock_init(&rxq->lock); - INIT_LIST_HEAD(&rxq->rx_free); - INIT_LIST_HEAD(&rxq->rx_used); - - /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */ - rxq->bd = - dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma, - GFP_KERNEL); - if (!rxq->bd) - goto err_bd; - - rxq->rb_stts = - dma_alloc_coherent(dev, sizeof(struct il_rb_status), - &rxq->rb_stts_dma, GFP_KERNEL); - if (!rxq->rb_stts) - goto err_rb; - - /* Fill the rx_used queue with _all_ of the Rx buffers */ - for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) - list_add_tail(&rxq->pool[i].list, &rxq->rx_used); - - /* Set us so that we have processed and used all buffers, but have - * not restocked the Rx queue with fresh buffers */ - rxq->read = rxq->write = 0; - rxq->write_actual = 0; - rxq->free_count = 0; - rxq->need_update = 0; - return 0; - -err_rb: - dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, - rxq->bd_dma); -err_bd: - return -ENOMEM; -} -EXPORT_SYMBOL(il_rx_queue_alloc); - -void -il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb) -{ - struct il_rx_pkt *pkt = rxb_addr(rxb); - struct il_spectrum_notification *report = &(pkt->u.spectrum_notif); - - if (!report->state) { - D_11H("Spectrum Measure Notification: Start\n"); - return; - } - - memcpy(&il->measure_report, report, sizeof(*report)); - il->measurement_status |= MEASUREMENT_READY; -} -EXPORT_SYMBOL(il_hdl_spectrum_measurement); - -/* - * returns non-zero if packet should be dropped - */ -int -il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr, - u32 decrypt_res, struct ieee80211_rx_status *stats) -{ - u16 fc = le16_to_cpu(hdr->frame_control); - - /* - * All contexts have the same setting here due to it being - * a module parameter, so OK to check any context. - */ - if (il->ctx.active.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK) - return 0; - - if (!(fc & IEEE80211_FCTL_PROTECTED)) - return 0; - - D_RX("decrypt_res:0x%x\n", decrypt_res); - switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) { - case RX_RES_STATUS_SEC_TYPE_TKIP: - /* The uCode has got a bad phase 1 Key, pushes the packet. - * Decryption will be done in SW. */ - if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == - RX_RES_STATUS_BAD_KEY_TTAK) - break; - - case RX_RES_STATUS_SEC_TYPE_WEP: - if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == - RX_RES_STATUS_BAD_ICV_MIC) { - /* bad ICV, the packet is destroyed since the - * decryption is inplace, drop it */ - D_RX("Packet destroyed\n"); - return -1; - } - case RX_RES_STATUS_SEC_TYPE_CCMP: - if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == - RX_RES_STATUS_DECRYPT_OK) { - D_RX("hw decrypt successfully!!!\n"); - stats->flag |= RX_FLAG_DECRYPTED; - } - break; - - default: - break; - } - return 0; -} -EXPORT_SYMBOL(il_set_decrypted_flag); - -/** - * il_txq_update_write_ptr - Send new write idx to hardware - */ -void -il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq) -{ - u32 reg = 0; - int txq_id = txq->q.id; - - if (txq->need_update == 0) - return; - - /* if we're trying to save power */ - if (test_bit(S_POWER_PMI, &il->status)) { - /* wake up nic if it's powered down ... - * uCode will wake up, and interrupt us again, so next - * time we'll skip this part. */ - reg = _il_rd(il, CSR_UCODE_DRV_GP1); - - if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { - D_INFO("Tx queue %d requesting wakeup," " GP1 = 0x%x\n", - txq_id, reg); - il_set_bit(il, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - return; - } - - il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); - - /* - * else not in power-save mode, - * uCode will never sleep when we're - * trying to tx (during RFKILL, we're not trying to tx). - */ - } else - _il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); - txq->need_update = 0; -} -EXPORT_SYMBOL(il_txq_update_write_ptr); - -/** - * il_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's - */ -void -il_tx_queue_unmap(struct il_priv *il, int txq_id) -{ - struct il_tx_queue *txq = &il->txq[txq_id]; - struct il_queue *q = &txq->q; - - if (q->n_bd == 0) - return; - - while (q->write_ptr != q->read_ptr) { - il->cfg->ops->lib->txq_free_tfd(il, txq); - q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd); - } -} -EXPORT_SYMBOL(il_tx_queue_unmap); - -/** - * il_tx_queue_free - Deallocate DMA queue. - * @txq: Transmit queue to deallocate. - * - * Empty queue by removing and destroying all BD's. - * Free all buffers. - * 0-fill, but do not free "txq" descriptor structure. - */ -void -il_tx_queue_free(struct il_priv *il, int txq_id) -{ - struct il_tx_queue *txq = &il->txq[txq_id]; - struct device *dev = &il->pci_dev->dev; - int i; - - il_tx_queue_unmap(il, txq_id); - - /* De-alloc array of command/tx buffers */ - for (i = 0; i < TFD_TX_CMD_SLOTS; i++) - kfree(txq->cmd[i]); - - /* De-alloc circular buffer of TFDs */ - if (txq->q.n_bd) - dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd, - txq->tfds, txq->q.dma_addr); - - /* De-alloc array of per-TFD driver data */ - kfree(txq->txb); - txq->txb = NULL; - - /* deallocate arrays */ - kfree(txq->cmd); - kfree(txq->meta); - txq->cmd = NULL; - txq->meta = NULL; - - /* 0-fill queue descriptor structure */ - memset(txq, 0, sizeof(*txq)); -} -EXPORT_SYMBOL(il_tx_queue_free); - -/** - * il_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue - */ -void -il_cmd_queue_unmap(struct il_priv *il) -{ - struct il_tx_queue *txq = &il->txq[il->cmd_queue]; - struct il_queue *q = &txq->q; - int i; - - if (q->n_bd == 0) - return; - - while (q->read_ptr != q->write_ptr) { - i = il_get_cmd_idx(q, q->read_ptr, 0); - - if (txq->meta[i].flags & CMD_MAPPED) { - pci_unmap_single(il->pci_dev, - dma_unmap_addr(&txq->meta[i], mapping), - dma_unmap_len(&txq->meta[i], len), - PCI_DMA_BIDIRECTIONAL); - txq->meta[i].flags = 0; - } - - q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd); - } - - i = q->n_win; - if (txq->meta[i].flags & CMD_MAPPED) { - pci_unmap_single(il->pci_dev, - dma_unmap_addr(&txq->meta[i], mapping), - dma_unmap_len(&txq->meta[i], len), - PCI_DMA_BIDIRECTIONAL); - txq->meta[i].flags = 0; - } -} -EXPORT_SYMBOL(il_cmd_queue_unmap); - -/** - * il_cmd_queue_free - Deallocate DMA queue. - * @txq: Transmit queue to deallocate. - * - * Empty queue by removing and destroying all BD's. - * Free all buffers. - * 0-fill, but do not free "txq" descriptor structure. - */ -void -il_cmd_queue_free(struct il_priv *il) -{ - struct il_tx_queue *txq = &il->txq[il->cmd_queue]; - struct device *dev = &il->pci_dev->dev; - int i; - - il_cmd_queue_unmap(il); - - /* De-alloc array of command/tx buffers */ - for (i = 0; i <= TFD_CMD_SLOTS; i++) - kfree(txq->cmd[i]); - - /* De-alloc circular buffer of TFDs */ - if (txq->q.n_bd) - dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd, - txq->tfds, txq->q.dma_addr); - - /* deallocate arrays */ - kfree(txq->cmd); - kfree(txq->meta); - txq->cmd = NULL; - txq->meta = NULL; - - /* 0-fill queue descriptor structure */ - memset(txq, 0, sizeof(*txq)); -} -EXPORT_SYMBOL(il_cmd_queue_free); - -/*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** - * DMA services - * - * Theory of operation - * - * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer - * of buffer descriptors, each of which points to one or more data buffers for - * the device to read from or fill. Driver and device exchange status of each - * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty - * entries in each circular buffer, to protect against confusing empty and full - * queue states. - * - * The device reads or writes the data in the queues via the device's several - * DMA/FIFO channels. Each queue is mapped to a single DMA channel. - * - * For Tx queue, there are low mark and high mark limits. If, after queuing - * the packet for Tx, free space become < low mark, Tx queue stopped. When - * reclaiming packets (on 'tx done IRQ), if free space become > high mark, - * Tx queue resumed. - * - * See more detailed info in 4965.h. - ***************************************************/ - -int -il_queue_space(const struct il_queue *q) -{ - int s = q->read_ptr - q->write_ptr; - - if (q->read_ptr > q->write_ptr) - s -= q->n_bd; - - if (s <= 0) - s += q->n_win; - /* keep some reserve to not confuse empty and full situations */ - s -= 2; - if (s < 0) - s = 0; - return s; -} -EXPORT_SYMBOL(il_queue_space); - - -/** - * il_queue_init - Initialize queue's high/low-water and read/write idxes - */ -static int -il_queue_init(struct il_priv *il, struct il_queue *q, int count, int slots_num, - u32 id) -{ - q->n_bd = count; - q->n_win = slots_num; - q->id = id; - - /* count must be power-of-two size, otherwise il_queue_inc_wrap - * and il_queue_dec_wrap are broken. */ - BUG_ON(!is_power_of_2(count)); - - /* slots_num must be power-of-two size, otherwise - * il_get_cmd_idx is broken. */ - BUG_ON(!is_power_of_2(slots_num)); - - q->low_mark = q->n_win / 4; - if (q->low_mark < 4) - q->low_mark = 4; - - q->high_mark = q->n_win / 8; - if (q->high_mark < 2) - q->high_mark = 2; - - q->write_ptr = q->read_ptr = 0; - - return 0; -} - -/** - * il_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue - */ -static int -il_tx_queue_alloc(struct il_priv *il, struct il_tx_queue *txq, u32 id) -{ - struct device *dev = &il->pci_dev->dev; - size_t tfd_sz = il->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX; - - /* Driver ilate data, only for Tx (not command) queues, - * not shared with device. */ - if (id != il->cmd_queue) { - txq->txb = kcalloc(TFD_QUEUE_SIZE_MAX, sizeof(txq->txb[0]), - GFP_KERNEL); - if (!txq->txb) { - IL_ERR("kmalloc for auxiliary BD " - "structures failed\n"); - goto error; - } - } else { - txq->txb = NULL; - } - - /* Circular buffer of transmit frame descriptors (TFDs), - * shared with device */ - txq->tfds = - dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL); - if (!txq->tfds) { - IL_ERR("pci_alloc_consistent(%zd) failed\n", tfd_sz); - goto error; - } - txq->q.id = id; - - return 0; - -error: - kfree(txq->txb); - txq->txb = NULL; - - return -ENOMEM; -} - -/** - * il_tx_queue_init - Allocate and initialize one tx/cmd queue - */ -int -il_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq, int slots_num, - u32 txq_id) -{ - int i, len; - int ret; - int actual_slots = slots_num; - - /* - * Alloc buffer array for commands (Tx or other types of commands). - * For the command queue (#4/#9), allocate command space + one big - * command for scan, since scan command is very huge; the system will - * not have two scans at the same time, so only one is needed. - * For normal Tx queues (all other queues), no super-size command - * space is needed. - */ - if (txq_id == il->cmd_queue) - actual_slots++; - - txq->meta = - kzalloc(sizeof(struct il_cmd_meta) * actual_slots, GFP_KERNEL); - txq->cmd = - kzalloc(sizeof(struct il_device_cmd *) * actual_slots, GFP_KERNEL); - - if (!txq->meta || !txq->cmd) - goto out_free_arrays; - - len = sizeof(struct il_device_cmd); - for (i = 0; i < actual_slots; i++) { - /* only happens for cmd queue */ - if (i == slots_num) - len = IL_MAX_CMD_SIZE; - - txq->cmd[i] = kmalloc(len, GFP_KERNEL); - if (!txq->cmd[i]) - goto err; - } - - /* Alloc driver data array and TFD circular buffer */ - ret = il_tx_queue_alloc(il, txq, txq_id); - if (ret) - goto err; - - txq->need_update = 0; - - /* - * For the default queues 0-3, set up the swq_id - * already -- all others need to get one later - * (if they need one at all). - */ - if (txq_id < 4) - il_set_swq_id(txq, txq_id, txq_id); - - /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise - * il_queue_inc_wrap and il_queue_dec_wrap are broken. */ - BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); - - /* Initialize queue's high/low-water marks, and head/tail idxes */ - il_queue_init(il, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); - - /* Tell device where to find queue */ - il->cfg->ops->lib->txq_init(il, txq); - - return 0; -err: - for (i = 0; i < actual_slots; i++) - kfree(txq->cmd[i]); -out_free_arrays: - kfree(txq->meta); - kfree(txq->cmd); - - return -ENOMEM; -} -EXPORT_SYMBOL(il_tx_queue_init); - -void -il_tx_queue_reset(struct il_priv *il, struct il_tx_queue *txq, int slots_num, - u32 txq_id) -{ - int actual_slots = slots_num; - - if (txq_id == il->cmd_queue) - actual_slots++; - - memset(txq->meta, 0, sizeof(struct il_cmd_meta) * actual_slots); - - txq->need_update = 0; - - /* Initialize queue's high/low-water marks, and head/tail idxes */ - il_queue_init(il, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); - - /* Tell device where to find queue */ - il->cfg->ops->lib->txq_init(il, txq); -} -EXPORT_SYMBOL(il_tx_queue_reset); - -/*************** HOST COMMAND QUEUE FUNCTIONS *****/ - -/** - * il_enqueue_hcmd - enqueue a uCode command - * @il: device ilate data point - * @cmd: a point to the ucode command structure - * - * The function returns < 0 values to indicate the operation is - * failed. On success, it turns the idx (> 0) of command in the - * command queue. - */ -int -il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd) -{ - struct il_tx_queue *txq = &il->txq[il->cmd_queue]; - struct il_queue *q = &txq->q; - struct il_device_cmd *out_cmd; - struct il_cmd_meta *out_meta; - dma_addr_t phys_addr; - unsigned long flags; - int len; - u32 idx; - u16 fix_size; - - cmd->len = il->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len); - fix_size = (u16) (cmd->len + sizeof(out_cmd->hdr)); - - /* If any of the command structures end up being larger than - * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then - * we will need to increase the size of the TFD entries - * Also, check to see if command buffer should not exceed the size - * of device_cmd and max_cmd_size. */ - BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && - !(cmd->flags & CMD_SIZE_HUGE)); - BUG_ON(fix_size > IL_MAX_CMD_SIZE); - - if (il_is_rfkill(il) || il_is_ctkill(il)) { - IL_WARN("Not sending command - %s KILL\n", - il_is_rfkill(il) ? "RF" : "CT"); - return -EIO; - } - - spin_lock_irqsave(&il->hcmd_lock, flags); - - if (il_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { - spin_unlock_irqrestore(&il->hcmd_lock, flags); - - IL_ERR("Restarting adapter due to command queue full\n"); - queue_work(il->workqueue, &il->restart); - return -ENOSPC; - } - - idx = il_get_cmd_idx(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); - out_cmd = txq->cmd[idx]; - out_meta = &txq->meta[idx]; - - if (WARN_ON(out_meta->flags & CMD_MAPPED)) { - spin_unlock_irqrestore(&il->hcmd_lock, flags); - return -ENOSPC; - } - - memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ - out_meta->flags = cmd->flags | CMD_MAPPED; - if (cmd->flags & CMD_WANT_SKB) - out_meta->source = cmd; - if (cmd->flags & CMD_ASYNC) - out_meta->callback = cmd->callback; - - out_cmd->hdr.cmd = cmd->id; - memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len); - - /* At this point, the out_cmd now has all of the incoming cmd - * information */ - - out_cmd->hdr.flags = 0; - out_cmd->hdr.sequence = - cpu_to_le16(QUEUE_TO_SEQ(il->cmd_queue) | IDX_TO_SEQ(q->write_ptr)); - if (cmd->flags & CMD_SIZE_HUGE) - out_cmd->hdr.sequence |= SEQ_HUGE_FRAME; - len = sizeof(struct il_device_cmd); - if (idx == TFD_CMD_SLOTS) - len = IL_MAX_CMD_SIZE; - -#ifdef CONFIG_IWLEGACY_DEBUG - switch (out_cmd->hdr.cmd) { - case C_TX_LINK_QUALITY_CMD: - case C_SENSITIVITY: - D_HC_DUMP("Sending command %s (#%x), seq: 0x%04X, " - "%d bytes at %d[%d]:%d\n", - il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd, - le16_to_cpu(out_cmd->hdr.sequence), fix_size, - q->write_ptr, idx, il->cmd_queue); - break; - default: - D_HC("Sending command %s (#%x), seq: 0x%04X, " - "%d bytes at %d[%d]:%d\n", - il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd, - le16_to_cpu(out_cmd->hdr.sequence), fix_size, q->write_ptr, - idx, il->cmd_queue); - } -#endif - txq->need_update = 1; - - if (il->cfg->ops->lib->txq_update_byte_cnt_tbl) - /* Set up entry in queue's byte count circular buffer */ - il->cfg->ops->lib->txq_update_byte_cnt_tbl(il, txq, 0); - - phys_addr = - pci_map_single(il->pci_dev, &out_cmd->hdr, fix_size, - PCI_DMA_BIDIRECTIONAL); - dma_unmap_addr_set(out_meta, mapping, phys_addr); - dma_unmap_len_set(out_meta, len, fix_size); - - il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, phys_addr, fix_size, - 1, U32_PAD(cmd->len)); - - /* Increment and update queue's write idx */ - q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd); - il_txq_update_write_ptr(il, txq); - - spin_unlock_irqrestore(&il->hcmd_lock, flags); - return idx; -} - -/** - * il_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd - * - * When FW advances 'R' idx, all entries between old and new 'R' idx - * need to be reclaimed. As result, some free space forms. If there is - * enough free space (> low mark), wake the stack that feeds us. - */ -static void -il_hcmd_queue_reclaim(struct il_priv *il, int txq_id, int idx, int cmd_idx) -{ - struct il_tx_queue *txq = &il->txq[txq_id]; - struct il_queue *q = &txq->q; - int nfreed = 0; - - if (idx >= q->n_bd || il_queue_used(q, idx) == 0) { - IL_ERR("Read idx for DMA queue txq id (%d), idx %d, " - "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd, - q->write_ptr, q->read_ptr); - return; - } - - for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; - q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) { - - if (nfreed++ > 0) { - IL_ERR("HCMD skipped: idx (%d) %d %d\n", idx, - q->write_ptr, q->read_ptr); - queue_work(il->workqueue, &il->restart); - } - - } -} - -/** - * il_tx_cmd_complete - Pull unused buffers off the queue and reclaim them - * @rxb: Rx buffer to reclaim - * - * If an Rx buffer has an async callback associated with it the callback - * will be executed. The attached skb (if present) will only be freed - * if the callback returns 1 - */ -void -il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb) -{ - struct il_rx_pkt *pkt = rxb_addr(rxb); - u16 sequence = le16_to_cpu(pkt->hdr.sequence); - int txq_id = SEQ_TO_QUEUE(sequence); - int idx = SEQ_TO_IDX(sequence); - int cmd_idx; - bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); - struct il_device_cmd *cmd; - struct il_cmd_meta *meta; - struct il_tx_queue *txq = &il->txq[il->cmd_queue]; - unsigned long flags; - - /* If a Tx command is being handled and it isn't in the actual - * command queue then there a command routing bug has been introduced - * in the queue management code. */ - if (WARN - (txq_id != il->cmd_queue, - "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", - txq_id, il->cmd_queue, sequence, il->txq[il->cmd_queue].q.read_ptr, - il->txq[il->cmd_queue].q.write_ptr)) { - il_print_hex_error(il, pkt, 32); - return; - } - - cmd_idx = il_get_cmd_idx(&txq->q, idx, huge); - cmd = txq->cmd[cmd_idx]; - meta = &txq->meta[cmd_idx]; - - txq->time_stamp = jiffies; - - pci_unmap_single(il->pci_dev, dma_unmap_addr(meta, mapping), - dma_unmap_len(meta, len), PCI_DMA_BIDIRECTIONAL); - - /* Input error checking is done when commands are added to queue. */ - if (meta->flags & CMD_WANT_SKB) { - meta->source->reply_page = (unsigned long)rxb_addr(rxb); - rxb->page = NULL; - } else if (meta->callback) - meta->callback(il, cmd, pkt); - - spin_lock_irqsave(&il->hcmd_lock, flags); - - il_hcmd_queue_reclaim(il, txq_id, idx, cmd_idx); - - if (!(meta->flags & CMD_ASYNC)) { - clear_bit(S_HCMD_ACTIVE, &il->status); - D_INFO("Clearing HCMD_ACTIVE for command %s\n", - il_get_cmd_string(cmd->hdr.cmd)); - wake_up(&il->wait_command_queue); - } - - /* Mark as unmapped */ - meta->flags = 0; - - spin_unlock_irqrestore(&il->hcmd_lock, flags); -} -EXPORT_SYMBOL(il_tx_cmd_complete); - -MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965"); -MODULE_VERSION(IWLWIFI_VERSION); -MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); -MODULE_LICENSE("GPL"); - -/* - * set bt_coex_active to true, uCode will do kill/defer - * every time the priority line is asserted (BT is sending signals on the - * priority line in the PCIx). - * set bt_coex_active to false, uCode will ignore the BT activity and - * perform the normal operation - * - * User might experience transmit issue on some platform due to WiFi/BT - * co-exist problem. The possible behaviors are: - * Able to scan and finding all the available AP - * Not able to associate with any AP - * On those platforms, WiFi communication can be restored by set - * "bt_coex_active" module parameter to "false" - * - * default: bt_coex_active = true (BT_COEX_ENABLE) - */ -static bool bt_coex_active = true; -module_param(bt_coex_active, bool, S_IRUGO); -MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist"); - -u32 il_debug_level; -EXPORT_SYMBOL(il_debug_level); - -const u8 il_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; -EXPORT_SYMBOL(il_bcast_addr); - -/* This function both allocates and initializes hw and il. */ -struct ieee80211_hw * -il_alloc_all(struct il_cfg *cfg) -{ - struct il_priv *il; - /* mac80211 allocates memory for this device instance, including - * space for this driver's ilate structure */ - struct ieee80211_hw *hw; - - hw = ieee80211_alloc_hw(sizeof(struct il_priv), - cfg->ops->ieee80211_ops); - if (hw == NULL) { - pr_err("%s: Can not allocate network device\n", cfg->name); - goto out; - } - - il = hw->priv; - il->hw = hw; - -out: - return hw; -} -EXPORT_SYMBOL(il_alloc_all); - -#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ -#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */ -static void -il_init_ht_hw_capab(const struct il_priv *il, - struct ieee80211_sta_ht_cap *ht_info, - enum ieee80211_band band) -{ - u16 max_bit_rate = 0; - u8 rx_chains_num = il->hw_params.rx_chains_num; - u8 tx_chains_num = il->hw_params.tx_chains_num; - - ht_info->cap = 0; - memset(&ht_info->mcs, 0, sizeof(ht_info->mcs)); - - ht_info->ht_supported = true; - - ht_info->cap |= IEEE80211_HT_CAP_SGI_20; - max_bit_rate = MAX_BIT_RATE_20_MHZ; - if (il->hw_params.ht40_channel & BIT(band)) { - ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; - ht_info->cap |= IEEE80211_HT_CAP_SGI_40; - ht_info->mcs.rx_mask[4] = 0x01; - max_bit_rate = MAX_BIT_RATE_40_MHZ; - } - - if (il->cfg->mod_params->amsdu_size_8K) - ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; - - ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF; - ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF; - - ht_info->mcs.rx_mask[0] = 0xFF; - if (rx_chains_num >= 2) - ht_info->mcs.rx_mask[1] = 0xFF; - if (rx_chains_num >= 3) - ht_info->mcs.rx_mask[2] = 0xFF; - - /* Highest supported Rx data rate */ - max_bit_rate *= rx_chains_num; - WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK); - ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate); - - /* Tx MCS capabilities */ - ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; - if (tx_chains_num != rx_chains_num) { - ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; - ht_info->mcs.tx_params |= - ((tx_chains_num - - 1) << IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT); - } -} - -/** - * il_init_geos - Initialize mac80211's geo/channel info based from eeprom - */ -int -il_init_geos(struct il_priv *il) -{ - struct il_channel_info *ch; - struct ieee80211_supported_band *sband; - struct ieee80211_channel *channels; - struct ieee80211_channel *geo_ch; - struct ieee80211_rate *rates; - int i = 0; - s8 max_tx_power = 0; - - if (il->bands[IEEE80211_BAND_2GHZ].n_bitrates || - il->bands[IEEE80211_BAND_5GHZ].n_bitrates) { - D_INFO("Geography modes already initialized.\n"); - set_bit(S_GEO_CONFIGURED, &il->status); - return 0; - } - - channels = - kzalloc(sizeof(struct ieee80211_channel) * il->channel_count, - GFP_KERNEL); - if (!channels) - return -ENOMEM; - - rates = - kzalloc((sizeof(struct ieee80211_rate) * RATE_COUNT_LEGACY), - GFP_KERNEL); - if (!rates) { - kfree(channels); - return -ENOMEM; - } - - /* 5.2GHz channels start after the 2.4GHz channels */ - sband = &il->bands[IEEE80211_BAND_5GHZ]; - sband->channels = &channels[ARRAY_SIZE(il_eeprom_band_1)]; - /* just OFDM */ - sband->bitrates = &rates[IL_FIRST_OFDM_RATE]; - sband->n_bitrates = RATE_COUNT_LEGACY - IL_FIRST_OFDM_RATE; - - if (il->cfg->sku & IL_SKU_N) - il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_5GHZ); - - sband = &il->bands[IEEE80211_BAND_2GHZ]; - sband->channels = channels; - /* OFDM & CCK */ - sband->bitrates = rates; - sband->n_bitrates = RATE_COUNT_LEGACY; - - if (il->cfg->sku & IL_SKU_N) - il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_2GHZ); - - il->ieee_channels = channels; - il->ieee_rates = rates; - - for (i = 0; i < il->channel_count; i++) { - ch = &il->channel_info[i]; - - if (!il_is_channel_valid(ch)) - continue; - - sband = &il->bands[ch->band]; - - geo_ch = &sband->channels[sband->n_channels++]; - - geo_ch->center_freq = - ieee80211_channel_to_frequency(ch->channel, ch->band); - geo_ch->max_power = ch->max_power_avg; - geo_ch->max_antenna_gain = 0xff; - geo_ch->hw_value = ch->channel; - - if (il_is_channel_valid(ch)) { - if (!(ch->flags & EEPROM_CHANNEL_IBSS)) - geo_ch->flags |= IEEE80211_CHAN_NO_IBSS; - - if (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) - geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN; - - if (ch->flags & EEPROM_CHANNEL_RADAR) - geo_ch->flags |= IEEE80211_CHAN_RADAR; - - geo_ch->flags |= ch->ht40_extension_channel; - - if (ch->max_power_avg > max_tx_power) - max_tx_power = ch->max_power_avg; - } else { - geo_ch->flags |= IEEE80211_CHAN_DISABLED; - } - - D_INFO("Channel %d Freq=%d[%sGHz] %s flag=0x%X\n", ch->channel, - geo_ch->center_freq, - il_is_channel_a_band(ch) ? "5.2" : "2.4", - geo_ch-> - flags & IEEE80211_CHAN_DISABLED ? "restricted" : "valid", - geo_ch->flags); - } - - il->tx_power_device_lmt = max_tx_power; - il->tx_power_user_lmt = max_tx_power; - il->tx_power_next = max_tx_power; - - if (il->bands[IEEE80211_BAND_5GHZ].n_channels == 0 && - (il->cfg->sku & IL_SKU_A)) { - IL_INFO("Incorrectly detected BG card as ABG. " - "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n", - il->pci_dev->device, il->pci_dev->subsystem_device); - il->cfg->sku &= ~IL_SKU_A; - } - - IL_INFO("Tunable channels: %d 802.11bg, %d 802.11a channels\n", - il->bands[IEEE80211_BAND_2GHZ].n_channels, - il->bands[IEEE80211_BAND_5GHZ].n_channels); - - set_bit(S_GEO_CONFIGURED, &il->status); - - return 0; -} -EXPORT_SYMBOL(il_init_geos); - -/* - * il_free_geos - undo allocations in il_init_geos - */ -void -il_free_geos(struct il_priv *il) -{ - kfree(il->ieee_channels); - kfree(il->ieee_rates); - clear_bit(S_GEO_CONFIGURED, &il->status); -} -EXPORT_SYMBOL(il_free_geos); - -static bool -il_is_channel_extension(struct il_priv *il, enum ieee80211_band band, - u16 channel, u8 extension_chan_offset) -{ - const struct il_channel_info *ch_info; - - ch_info = il_get_channel_info(il, band, channel); - if (!il_is_channel_valid(ch_info)) - return false; - - if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE) - return !(ch_info-> - ht40_extension_channel & IEEE80211_CHAN_NO_HT40PLUS); - else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW) - return !(ch_info-> - ht40_extension_channel & IEEE80211_CHAN_NO_HT40MINUS); - - return false; -} - -bool -il_is_ht40_tx_allowed(struct il_priv *il, struct il_rxon_context *ctx, - struct ieee80211_sta_ht_cap *ht_cap) -{ - if (!ctx->ht.enabled || !ctx->ht.is_40mhz) - return false; - - /* - * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40 - * the bit will not set if it is pure 40MHz case - */ - if (ht_cap && !ht_cap->ht_supported) - return false; - -#ifdef CONFIG_IWLEGACY_DEBUGFS - if (il->disable_ht40) - return false; -#endif - - return il_is_channel_extension(il, il->band, - le16_to_cpu(ctx->staging.channel), - ctx->ht.extension_chan_offset); -} -EXPORT_SYMBOL(il_is_ht40_tx_allowed); - -static u16 -il_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val) -{ - u16 new_val; - u16 beacon_factor; - - /* - * If mac80211 hasn't given us a beacon interval, program - * the default into the device. - */ - if (!beacon_val) - return DEFAULT_BEACON_INTERVAL; - - /* - * If the beacon interval we obtained from the peer - * is too large, we'll have to wake up more often - * (and in IBSS case, we'll beacon too much) - * - * For example, if max_beacon_val is 4096, and the - * requested beacon interval is 7000, we'll have to - * use 3500 to be able to wake up on the beacons. - * - * This could badly influence beacon detection stats. - */ - - beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val; - new_val = beacon_val / beacon_factor; - - if (!new_val) - new_val = max_beacon_val; - - return new_val; -} - -int -il_send_rxon_timing(struct il_priv *il, struct il_rxon_context *ctx) -{ - u64 tsf; - s32 interval_tm, rem; - struct ieee80211_conf *conf = NULL; - u16 beacon_int; - struct ieee80211_vif *vif = ctx->vif; - - conf = &il->hw->conf; - - lockdep_assert_held(&il->mutex); - - memset(&ctx->timing, 0, sizeof(struct il_rxon_time_cmd)); - - ctx->timing.timestamp = cpu_to_le64(il->timestamp); - ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval); - - beacon_int = vif ? vif->bss_conf.beacon_int : 0; - - /* - * TODO: For IBSS we need to get atim_win from mac80211, - * for now just always use 0 - */ - ctx->timing.atim_win = 0; - - beacon_int = - il_adjust_beacon_interval(beacon_int, - il->hw_params.max_beacon_itrvl * - TIME_UNIT); - ctx->timing.beacon_interval = cpu_to_le16(beacon_int); - - tsf = il->timestamp; /* tsf is modifed by do_div: copy it */ - interval_tm = beacon_int * TIME_UNIT; - rem = do_div(tsf, interval_tm); - ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem); - - ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ? : 1) : 1; - - D_ASSOC("beacon interval %d beacon timer %d beacon tim %d\n", - le16_to_cpu(ctx->timing.beacon_interval), - le32_to_cpu(ctx->timing.beacon_init_val), - le16_to_cpu(ctx->timing.atim_win)); - - return il_send_cmd_pdu(il, ctx->rxon_timing_cmd, sizeof(ctx->timing), - &ctx->timing); -} -EXPORT_SYMBOL(il_send_rxon_timing); - -void -il_set_rxon_hwcrypto(struct il_priv *il, struct il_rxon_context *ctx, - int hw_decrypt) -{ - struct il_rxon_cmd *rxon = &ctx->staging; - - if (hw_decrypt) - rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK; - else - rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK; - -} -EXPORT_SYMBOL(il_set_rxon_hwcrypto); - -/* validate RXON structure is valid */ -int -il_check_rxon_cmd(struct il_priv *il, struct il_rxon_context *ctx) -{ - struct il_rxon_cmd *rxon = &ctx->staging; - bool error = false; - - if (rxon->flags & RXON_FLG_BAND_24G_MSK) { - if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) { - IL_WARN("check 2.4G: wrong narrow\n"); - error = true; - } - if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) { - IL_WARN("check 2.4G: wrong radar\n"); - error = true; - } - } else { - if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) { - IL_WARN("check 5.2G: not short slot!\n"); - error = true; - } - if (rxon->flags & RXON_FLG_CCK_MSK) { - IL_WARN("check 5.2G: CCK!\n"); - error = true; - } - } - if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) { - IL_WARN("mac/bssid mcast!\n"); - error = true; - } - - /* make sure basic rates 6Mbps and 1Mbps are supported */ - if ((rxon->ofdm_basic_rates & RATE_6M_MASK) == 0 && - (rxon->cck_basic_rates & RATE_1M_MASK) == 0) { - IL_WARN("neither 1 nor 6 are basic\n"); - error = true; - } - - if (le16_to_cpu(rxon->assoc_id) > 2007) { - IL_WARN("aid > 2007\n"); - error = true; - } - - if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) == - (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) { - IL_WARN("CCK and short slot\n"); - error = true; - } - - if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) == - (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) { - IL_WARN("CCK and auto detect"); - error = true; - } - - if ((rxon-> - flags & (RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK)) == - RXON_FLG_TGG_PROTECT_MSK) { - IL_WARN("TGg but no auto-detect\n"); - error = true; - } - - if (error) - IL_WARN("Tuning to channel %d\n", le16_to_cpu(rxon->channel)); - - if (error) { - IL_ERR("Invalid RXON\n"); - return -EINVAL; - } - return 0; -} -EXPORT_SYMBOL(il_check_rxon_cmd); - -/** - * il_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed - * @il: staging_rxon is compared to active_rxon - * - * If the RXON structure is changing enough to require a new tune, - * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that - * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required. - */ -int -il_full_rxon_required(struct il_priv *il, struct il_rxon_context *ctx) -{ - const struct il_rxon_cmd *staging = &ctx->staging; - const struct il_rxon_cmd *active = &ctx->active; - -#define CHK(cond) \ - if ((cond)) { \ - D_INFO("need full RXON - " #cond "\n"); \ - return 1; \ - } - -#define CHK_NEQ(c1, c2) \ - if ((c1) != (c2)) { \ - D_INFO("need full RXON - " \ - #c1 " != " #c2 " - %d != %d\n", \ - (c1), (c2)); \ - return 1; \ - } - - /* These items are only settable from the full RXON command */ - CHK(!il_is_associated_ctx(ctx)); - CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr)); - CHK(compare_ether_addr(staging->node_addr, active->node_addr)); - CHK(compare_ether_addr - (staging->wlap_bssid_addr, active->wlap_bssid_addr)); - CHK_NEQ(staging->dev_type, active->dev_type); - CHK_NEQ(staging->channel, active->channel); - CHK_NEQ(staging->air_propagation, active->air_propagation); - CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates, - active->ofdm_ht_single_stream_basic_rates); - CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates, - active->ofdm_ht_dual_stream_basic_rates); - CHK_NEQ(staging->assoc_id, active->assoc_id); - - /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can - * be updated with the RXON_ASSOC command -- however only some - * flag transitions are allowed using RXON_ASSOC */ - - /* Check if we are not switching bands */ - CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK, - active->flags & RXON_FLG_BAND_24G_MSK); - - /* Check if we are switching association toggle */ - CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK, - active->filter_flags & RXON_FILTER_ASSOC_MSK); - -#undef CHK -#undef CHK_NEQ - - return 0; -} -EXPORT_SYMBOL(il_full_rxon_required); - -u8 -il_get_lowest_plcp(struct il_priv *il, struct il_rxon_context *ctx) -{ - /* - * Assign the lowest rate -- should really get this from - * the beacon skb from mac80211. - */ - if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) - return RATE_1M_PLCP; - else - return RATE_6M_PLCP; -} -EXPORT_SYMBOL(il_get_lowest_plcp); - -static void -_il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf, - struct il_rxon_context *ctx) -{ - struct il_rxon_cmd *rxon = &ctx->staging; - - if (!ctx->ht.enabled) { - rxon->flags &= - ~(RXON_FLG_CHANNEL_MODE_MSK | - RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | RXON_FLG_HT40_PROT_MSK - | RXON_FLG_HT_PROT_MSK); - return; - } - - rxon->flags |= - cpu_to_le32(ctx->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS); - - /* Set up channel bandwidth: - * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */ - /* clear the HT channel mode before set the mode */ - rxon->flags &= - ~(RXON_FLG_CHANNEL_MODE_MSK | RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); - if (il_is_ht40_tx_allowed(il, ctx, NULL)) { - /* pure ht40 */ - if (ctx->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) { - rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40; - /* Note: control channel is opposite of extension channel */ - switch (ctx->ht.extension_chan_offset) { - case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: - rxon->flags &= - ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; - break; - case IEEE80211_HT_PARAM_CHA_SEC_BELOW: - rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; - break; - } - } else { - /* Note: control channel is opposite of extension channel */ - switch (ctx->ht.extension_chan_offset) { - case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: - rxon->flags &= - ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); - rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; - break; - case IEEE80211_HT_PARAM_CHA_SEC_BELOW: - rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; - rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; - break; - case IEEE80211_HT_PARAM_CHA_SEC_NONE: - default: - /* channel location only valid if in Mixed mode */ - IL_ERR("invalid extension channel offset\n"); - break; - } - } - } else { - rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY; - } - - if (il->cfg->ops->hcmd->set_rxon_chain) - il->cfg->ops->hcmd->set_rxon_chain(il, ctx); - - D_ASSOC("rxon flags 0x%X operation mode :0x%X " - "extension channel offset 0x%x\n", le32_to_cpu(rxon->flags), - ctx->ht.protection, ctx->ht.extension_chan_offset); -} - -void -il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf) -{ - _il_set_rxon_ht(il, ht_conf, &il->ctx); -} -EXPORT_SYMBOL(il_set_rxon_ht); - -/* Return valid, unused, channel for a passive scan to reset the RF */ -u8 -il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band) -{ - const struct il_channel_info *ch_info; - int i; - u8 channel = 0; - u8 min, max; - - if (band == IEEE80211_BAND_5GHZ) { - min = 14; - max = il->channel_count; - } else { - min = 0; - max = 14; - } - - for (i = min; i < max; i++) { - channel = il->channel_info[i].channel; - if (channel == le16_to_cpu(il->ctx.staging.channel)) - continue; - - ch_info = il_get_channel_info(il, band, channel); - if (il_is_channel_valid(ch_info)) - break; - } - - return channel; -} -EXPORT_SYMBOL(il_get_single_channel_number); - -/** - * il_set_rxon_channel - Set the band and channel values in staging RXON - * @ch: requested channel as a pointer to struct ieee80211_channel - - * NOTE: Does not commit to the hardware; it sets appropriate bit fields - * in the staging RXON flag structure based on the ch->band - */ -int -il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch, - struct il_rxon_context *ctx) -{ - enum ieee80211_band band = ch->band; - u16 channel = ch->hw_value; - - if (le16_to_cpu(ctx->staging.channel) == channel && il->band == band) - return 0; - - ctx->staging.channel = cpu_to_le16(channel); - if (band == IEEE80211_BAND_5GHZ) - ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK; - else - ctx->staging.flags |= RXON_FLG_BAND_24G_MSK; - - il->band = band; - - D_INFO("Staging channel set to %d [%d]\n", channel, band); - - return 0; -} -EXPORT_SYMBOL(il_set_rxon_channel); - -void -il_set_flags_for_band(struct il_priv *il, struct il_rxon_context *ctx, - enum ieee80211_band band, struct ieee80211_vif *vif) -{ - if (band == IEEE80211_BAND_5GHZ) { - ctx->staging.flags &= - ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK | - RXON_FLG_CCK_MSK); - ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; - } else { - /* Copied from il_post_associate() */ - if (vif && vif->bss_conf.use_short_slot) - ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; - else - ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; - - ctx->staging.flags |= RXON_FLG_BAND_24G_MSK; - ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK; - ctx->staging.flags &= ~RXON_FLG_CCK_MSK; - } -} -EXPORT_SYMBOL(il_set_flags_for_band); - -/* - * initialize rxon structure with default values from eeprom - */ -void -il_connection_init_rx_config(struct il_priv *il, struct il_rxon_context *ctx) -{ - const struct il_channel_info *ch_info; - - memset(&ctx->staging, 0, sizeof(ctx->staging)); - - if (!ctx->vif) { - ctx->staging.dev_type = ctx->unused_devtype; - } else - switch (ctx->vif->type) { - - case NL80211_IFTYPE_STATION: - ctx->staging.dev_type = ctx->station_devtype; - ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK; - break; - - case NL80211_IFTYPE_ADHOC: - ctx->staging.dev_type = ctx->ibss_devtype; - ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK; - ctx->staging.filter_flags = - RXON_FILTER_BCON_AWARE_MSK | - RXON_FILTER_ACCEPT_GRP_MSK; - break; - - default: - IL_ERR("Unsupported interface type %d\n", - ctx->vif->type); - break; - } - -#if 0 - /* TODO: Figure out when short_preamble would be set and cache from - * that */ - if (!hw_to_local(il->hw)->short_preamble) - ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; - else - ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; -#endif - - ch_info = - il_get_channel_info(il, il->band, le16_to_cpu(ctx->active.channel)); - - if (!ch_info) - ch_info = &il->channel_info[0]; - - ctx->staging.channel = cpu_to_le16(ch_info->channel); - il->band = ch_info->band; - - il_set_flags_for_band(il, ctx, il->band, ctx->vif); - - ctx->staging.ofdm_basic_rates = - (IL_OFDM_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF; - ctx->staging.cck_basic_rates = - (IL_CCK_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF; - - /* clear both MIX and PURE40 mode flag */ - ctx->staging.flags &= - ~(RXON_FLG_CHANNEL_MODE_MIXED | RXON_FLG_CHANNEL_MODE_PURE_40); - if (ctx->vif) - memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN); - - ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff; - ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff; -} -EXPORT_SYMBOL(il_connection_init_rx_config); - -void -il_set_rate(struct il_priv *il) -{ - const struct ieee80211_supported_band *hw = NULL; - struct ieee80211_rate *rate; - int i; - - hw = il_get_hw_mode(il, il->band); - if (!hw) { - IL_ERR("Failed to set rate: unable to get hw mode\n"); - return; - } - - il->active_rate = 0; - - for (i = 0; i < hw->n_bitrates; i++) { - rate = &(hw->bitrates[i]); - if (rate->hw_value < RATE_COUNT_LEGACY) - il->active_rate |= (1 << rate->hw_value); - } - - D_RATE("Set active_rate = %0x\n", il->active_rate); - - il->ctx.staging.cck_basic_rates = - (IL_CCK_BASIC_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF; - - il->ctx.staging.ofdm_basic_rates = - (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF; -} -EXPORT_SYMBOL(il_set_rate); - -void -il_chswitch_done(struct il_priv *il, bool is_success) -{ - struct il_rxon_context *ctx = &il->ctx; - - if (test_bit(S_EXIT_PENDING, &il->status)) - return; - - if (test_and_clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status)) - ieee80211_chswitch_done(ctx->vif, is_success); -} -EXPORT_SYMBOL(il_chswitch_done); - -void -il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb) -{ - struct il_rx_pkt *pkt = rxb_addr(rxb); - struct il_csa_notification *csa = &(pkt->u.csa_notif); - - struct il_rxon_context *ctx = &il->ctx; - struct il_rxon_cmd *rxon = (void *)&ctx->active; - - if (!test_bit(S_CHANNEL_SWITCH_PENDING, &il->status)) - return; - - if (!le32_to_cpu(csa->status) && csa->channel == il->switch_channel) { - rxon->channel = csa->channel; - ctx->staging.channel = csa->channel; - D_11H("CSA notif: channel %d\n", le16_to_cpu(csa->channel)); - il_chswitch_done(il, true); - } else { - IL_ERR("CSA notif (fail) : channel %d\n", - le16_to_cpu(csa->channel)); - il_chswitch_done(il, false); - } -} -EXPORT_SYMBOL(il_hdl_csa); - -#ifdef CONFIG_IWLEGACY_DEBUG -void -il_print_rx_config_cmd(struct il_priv *il, struct il_rxon_context *ctx) -{ - struct il_rxon_cmd *rxon = &ctx->staging; - - D_RADIO("RX CONFIG:\n"); - il_print_hex_dump(il, IL_DL_RADIO, (u8 *) rxon, sizeof(*rxon)); - D_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel)); - D_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags)); - D_RADIO("u32 filter_flags: 0x%08x\n", le32_to_cpu(rxon->filter_flags)); - D_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type); - D_RADIO("u8 ofdm_basic_rates: 0x%02x\n", rxon->ofdm_basic_rates); - D_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates); - D_RADIO("u8[6] node_addr: %pM\n", rxon->node_addr); - D_RADIO("u8[6] bssid_addr: %pM\n", rxon->bssid_addr); - D_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id)); -} -EXPORT_SYMBOL(il_print_rx_config_cmd); -#endif -/** - * il_irq_handle_error - called for HW or SW error interrupt from card - */ -void -il_irq_handle_error(struct il_priv *il) -{ - /* Set the FW error flag -- cleared on il_down */ - set_bit(S_FW_ERROR, &il->status); - - /* Cancel currently queued command. */ - clear_bit(S_HCMD_ACTIVE, &il->status); - - IL_ERR("Loaded firmware version: %s\n", il->hw->wiphy->fw_version); - - il->cfg->ops->lib->dump_nic_error_log(il); - if (il->cfg->ops->lib->dump_fh) - il->cfg->ops->lib->dump_fh(il, NULL, false); -#ifdef CONFIG_IWLEGACY_DEBUG - if (il_get_debug_level(il) & IL_DL_FW_ERRORS) - il_print_rx_config_cmd(il, &il->ctx); -#endif - - wake_up(&il->wait_command_queue); - - /* Keep the restart process from trying to send host - * commands by clearing the INIT status bit */ - clear_bit(S_READY, &il->status); - - if (!test_bit(S_EXIT_PENDING, &il->status)) { - IL_DBG(IL_DL_FW_ERRORS, - "Restarting adapter due to uCode error.\n"); - - if (il->cfg->mod_params->restart_fw) - queue_work(il->workqueue, &il->restart); - } -} -EXPORT_SYMBOL(il_irq_handle_error); - -static int -il_apm_stop_master(struct il_priv *il) -{ - int ret = 0; - - /* stop device's busmaster DMA activity */ - il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); - - ret = - _il_poll_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED, - CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); - if (ret) - IL_WARN("Master Disable Timed Out, 100 usec\n"); - - D_INFO("stop master\n"); - - return ret; -} - -void -il_apm_stop(struct il_priv *il) -{ - D_INFO("Stop card, put in low power state\n"); - - /* Stop device's DMA activity */ - il_apm_stop_master(il); - - /* Reset the entire device */ - il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); - - udelay(10); - - /* - * Clear "initialization complete" bit to move adapter from - * D0A* (powered-up Active) --> D0U* (Uninitialized) state. - */ - il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); -} -EXPORT_SYMBOL(il_apm_stop); - -/* - * Start up NIC's basic functionality after it has been reset - * (e.g. after platform boot, or shutdown via il_apm_stop()) - * NOTE: This does not load uCode nor start the embedded processor - */ -int -il_apm_init(struct il_priv *il) -{ - int ret = 0; - u16 lctl; - - D_INFO("Init card's basic functions\n"); - - /* - * Use "set_bit" below rather than "write", to preserve any hardware - * bits already set by default after reset. - */ - - /* Disable L0S exit timer (platform NMI Work/Around) */ - il_set_bit(il, CSR_GIO_CHICKEN_BITS, - CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); - - /* - * Disable L0s without affecting L1; - * don't wait for ICH L0s (ICH bug W/A) - */ - il_set_bit(il, CSR_GIO_CHICKEN_BITS, - CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); - - /* Set FH wait threshold to maximum (HW error during stress W/A) */ - il_set_bit(il, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); - - /* - * Enable HAP INTA (interrupt from management bus) to - * wake device's PCI Express link L1a -> L0s - * NOTE: This is no-op for 3945 (non-existent bit) - */ - il_set_bit(il, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); - - /* - * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition. - * Check if BIOS (or OS) enabled L1-ASPM on this device. - * If so (likely), disable L0S, so device moves directly L0->L1; - * costs negligible amount of power savings. - * If not (unlikely), enable L0S, so there is at least some - * power savings, even without L1. - */ - if (il->cfg->base_params->set_l0s) { - lctl = il_pcie_link_ctl(il); - if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) == - PCI_CFG_LINK_CTRL_VAL_L1_EN) { - /* L1-ASPM enabled; disable(!) L0S */ - il_set_bit(il, CSR_GIO_REG, - CSR_GIO_REG_VAL_L0S_ENABLED); - D_POWER("L1 Enabled; Disabling L0S\n"); - } else { - /* L1-ASPM disabled; enable(!) L0S */ - il_clear_bit(il, CSR_GIO_REG, - CSR_GIO_REG_VAL_L0S_ENABLED); - D_POWER("L1 Disabled; Enabling L0S\n"); - } - } - - /* Configure analog phase-lock-loop before activating to D0A */ - if (il->cfg->base_params->pll_cfg_val) - il_set_bit(il, CSR_ANA_PLL_CFG, - il->cfg->base_params->pll_cfg_val); - - /* - * Set "initialization complete" bit to move adapter from - * D0U* --> D0A* (powered-up active) state. - */ - il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); - - /* - * Wait for clock stabilization; once stabilized, access to - * device-internal resources is supported, e.g. il_wr_prph() - * and accesses to uCode SRAM. - */ - ret = - _il_poll_bit(il, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); - if (ret < 0) { - D_INFO("Failed to init the card\n"); - goto out; - } - - /* - * Enable DMA and BSM (if used) clocks, wait for them to stabilize. - * BSM (Boostrap State Machine) is only in 3945 and 4965. - * - * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits - * do not disable clocks. This preserves any hardware bits already - * set by default in "CLK_CTRL_REG" after reset. - */ - if (il->cfg->base_params->use_bsm) - il_wr_prph(il, APMG_CLK_EN_REG, - APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT); - else - il_wr_prph(il, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT); - udelay(20); - - /* Disable L1-Active */ - il_set_bits_prph(il, APMG_PCIDEV_STT_REG, - APMG_PCIDEV_STT_VAL_L1_ACT_DIS); - -out: - return ret; -} -EXPORT_SYMBOL(il_apm_init); - -int -il_set_tx_power(struct il_priv *il, s8 tx_power, bool force) -{ - int ret; - s8 prev_tx_power; - bool defer; - struct il_rxon_context *ctx = &il->ctx; - - lockdep_assert_held(&il->mutex); - - if (il->tx_power_user_lmt == tx_power && !force) - return 0; - - if (!il->cfg->ops->lib->send_tx_power) - return -EOPNOTSUPP; - - /* 0 dBm mean 1 milliwatt */ - if (tx_power < 0) { - IL_WARN("Requested user TXPOWER %d below 1 mW.\n", tx_power); - return -EINVAL; - } - - if (tx_power > il->tx_power_device_lmt) { - IL_WARN("Requested user TXPOWER %d above upper limit %d.\n", - tx_power, il->tx_power_device_lmt); - return -EINVAL; - } - - if (!il_is_ready_rf(il)) - return -EIO; - - /* scan complete and commit_rxon use tx_power_next value, - * it always need to be updated for newest request */ - il->tx_power_next = tx_power; - - /* do not set tx power when scanning or channel changing */ - defer = test_bit(S_SCANNING, &il->status) || - memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)); - if (defer && !force) { - D_INFO("Deferring tx power set\n"); - return 0; - } - - prev_tx_power = il->tx_power_user_lmt; - il->tx_power_user_lmt = tx_power; - - ret = il->cfg->ops->lib->send_tx_power(il); - - /* if fail to set tx_power, restore the orig. tx power */ - if (ret) { - il->tx_power_user_lmt = prev_tx_power; - il->tx_power_next = prev_tx_power; - } - return ret; -} -EXPORT_SYMBOL(il_set_tx_power); - -void -il_send_bt_config(struct il_priv *il) -{ - struct il_bt_cmd bt_cmd = { - .lead_time = BT_LEAD_TIME_DEF, - .max_kill = BT_MAX_KILL_DEF, - .kill_ack_mask = 0, - .kill_cts_mask = 0, - }; - - if (!bt_coex_active) - bt_cmd.flags = BT_COEX_DISABLE; - else - bt_cmd.flags = BT_COEX_ENABLE; - - D_INFO("BT coex %s\n", - (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active"); - - if (il_send_cmd_pdu(il, C_BT_CONFIG, sizeof(struct il_bt_cmd), &bt_cmd)) - IL_ERR("failed to send BT Coex Config\n"); -} -EXPORT_SYMBOL(il_send_bt_config); - -int -il_send_stats_request(struct il_priv *il, u8 flags, bool clear) -{ - struct il_stats_cmd stats_cmd = { - .configuration_flags = clear ? IL_STATS_CONF_CLEAR_STATS : 0, - }; - - if (flags & CMD_ASYNC) - return il_send_cmd_pdu_async(il, C_STATS, sizeof(struct il_stats_cmd), - &stats_cmd, NULL); - else - return il_send_cmd_pdu(il, C_STATS, sizeof(struct il_stats_cmd), - &stats_cmd); -} -EXPORT_SYMBOL(il_send_stats_request); - -void -il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb) -{ -#ifdef CONFIG_IWLEGACY_DEBUG - struct il_rx_pkt *pkt = rxb_addr(rxb); - struct il_sleep_notification *sleep = &(pkt->u.sleep_notif); - D_RX("sleep mode: %d, src: %d\n", - sleep->pm_sleep_mode, sleep->pm_wakeup_src); -#endif -} -EXPORT_SYMBOL(il_hdl_pm_sleep); - -void -il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb) -{ - struct il_rx_pkt *pkt = rxb_addr(rxb); - u32 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK; - D_RADIO("Dumping %d bytes of unhandled notification for %s:\n", len, - il_get_cmd_string(pkt->hdr.cmd)); - il_print_hex_dump(il, IL_DL_RADIO, pkt->u.raw, len); -} -EXPORT_SYMBOL(il_hdl_pm_debug_stats); - -void -il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb) -{ - struct il_rx_pkt *pkt = rxb_addr(rxb); - - IL_ERR("Error Reply type 0x%08X cmd %s (0x%02X) " - "seq 0x%04X ser 0x%08X\n", - le32_to_cpu(pkt->u.err_resp.error_type), - il_get_cmd_string(pkt->u.err_resp.cmd_id), - pkt->u.err_resp.cmd_id, - le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num), - le32_to_cpu(pkt->u.err_resp.error_info)); -} -EXPORT_SYMBOL(il_hdl_error); - -void -il_clear_isr_stats(struct il_priv *il) -{ - memset(&il->isr_stats, 0, sizeof(il->isr_stats)); -} - -int -il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue, - const struct ieee80211_tx_queue_params *params) -{ - struct il_priv *il = hw->priv; - unsigned long flags; - int q; - - D_MAC80211("enter\n"); - - if (!il_is_ready_rf(il)) { - D_MAC80211("leave - RF not ready\n"); - return -EIO; - } - - if (queue >= AC_NUM) { - D_MAC80211("leave - queue >= AC_NUM %d\n", queue); - return 0; - } - - q = AC_NUM - 1 - queue; - - spin_lock_irqsave(&il->lock, flags); - - il->ctx.qos_data.def_qos_parm.ac[q].cw_min = - cpu_to_le16(params->cw_min); - il->ctx.qos_data.def_qos_parm.ac[q].cw_max = - cpu_to_le16(params->cw_max); - il->ctx.qos_data.def_qos_parm.ac[q].aifsn = params->aifs; - il->ctx.qos_data.def_qos_parm.ac[q].edca_txop = - cpu_to_le16((params->txop * 32)); - - il->ctx.qos_data.def_qos_parm.ac[q].reserved1 = 0; - - spin_unlock_irqrestore(&il->lock, flags); - - D_MAC80211("leave\n"); - return 0; -} -EXPORT_SYMBOL(il_mac_conf_tx); - -int -il_mac_tx_last_beacon(struct ieee80211_hw *hw) -{ - struct il_priv *il = hw->priv; - - return il->ibss_manager == IL_IBSS_MANAGER; -} -EXPORT_SYMBOL_GPL(il_mac_tx_last_beacon); - -static int -il_set_mode(struct il_priv *il, struct il_rxon_context *ctx) -{ - il_connection_init_rx_config(il, ctx); - - if (il->cfg->ops->hcmd->set_rxon_chain) - il->cfg->ops->hcmd->set_rxon_chain(il, ctx); - - return il_commit_rxon(il, ctx); -} - -static int -il_setup_interface(struct il_priv *il, struct il_rxon_context *ctx) -{ - struct ieee80211_vif *vif = ctx->vif; - int err; - - lockdep_assert_held(&il->mutex); - - /* - * This variable will be correct only when there's just - * a single context, but all code using it is for hardware - * that supports only one context. - */ - il->iw_mode = vif->type; - - ctx->is_active = true; - - err = il_set_mode(il, ctx); - if (err) { - if (!ctx->always_active) - ctx->is_active = false; - return err; - } - - return 0; -} - -int -il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) -{ - struct il_priv *il = hw->priv; - struct il_vif_priv *vif_priv = (void *)vif->drv_priv; - int err; - u32 modes; - - D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr); - - mutex_lock(&il->mutex); - - if (!il_is_ready_rf(il)) { - IL_WARN("Try to add interface when device not ready\n"); - err = -EINVAL; - goto out; - } - - /* check if busy context is exclusive */ - if (il->ctx.vif && - (il->ctx.exclusive_interface_modes & BIT(il->ctx.vif->type))) { - err = -EINVAL; - goto out; - } - - modes = il->ctx.interface_modes | il->ctx.exclusive_interface_modes; - if (!(modes & BIT(vif->type))) { - err = -EOPNOTSUPP; - goto out; - } - - vif_priv->ctx = &il->ctx; - il->ctx.vif = vif; - - err = il_setup_interface(il, &il->ctx); - if (err) { - il->ctx.vif = NULL; - il->iw_mode = NL80211_IFTYPE_STATION; - } - -out: - mutex_unlock(&il->mutex); - - D_MAC80211("leave\n"); - return err; -} -EXPORT_SYMBOL(il_mac_add_interface); - -static void -il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif, - bool mode_change) -{ - struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif); - - lockdep_assert_held(&il->mutex); - - if (il->scan_vif == vif) { - il_scan_cancel_timeout(il, 200); - il_force_scan_end(il); - } - - if (!mode_change) { - il_set_mode(il, ctx); - if (!ctx->always_active) - ctx->is_active = false; - } -} - -void -il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) -{ - struct il_priv *il = hw->priv; - struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif); - - D_MAC80211("enter\n"); - - mutex_lock(&il->mutex); - - WARN_ON(ctx->vif != vif); - ctx->vif = NULL; - - il_teardown_interface(il, vif, false); - - memset(il->bssid, 0, ETH_ALEN); - mutex_unlock(&il->mutex); - - D_MAC80211("leave\n"); - -} -EXPORT_SYMBOL(il_mac_remove_interface); - -int -il_alloc_txq_mem(struct il_priv *il) -{ - if (!il->txq) - il->txq = - kzalloc(sizeof(struct il_tx_queue) * - il->cfg->base_params->num_of_queues, GFP_KERNEL); - if (!il->txq) { - IL_ERR("Not enough memory for txq\n"); - return -ENOMEM; - } - return 0; -} -EXPORT_SYMBOL(il_alloc_txq_mem); - -void -il_txq_mem(struct il_priv *il) -{ - kfree(il->txq); - il->txq = NULL; -} -EXPORT_SYMBOL(il_txq_mem); - -#ifdef CONFIG_IWLEGACY_DEBUGFS - -#define IL_TRAFFIC_DUMP_SIZE (IL_TRAFFIC_ENTRY_SIZE * IL_TRAFFIC_ENTRIES) - -void -il_reset_traffic_log(struct il_priv *il) -{ - il->tx_traffic_idx = 0; - il->rx_traffic_idx = 0; - if (il->tx_traffic) - memset(il->tx_traffic, 0, IL_TRAFFIC_DUMP_SIZE); - if (il->rx_traffic) - memset(il->rx_traffic, 0, IL_TRAFFIC_DUMP_SIZE); -} - -int -il_alloc_traffic_mem(struct il_priv *il) -{ - u32 traffic_size = IL_TRAFFIC_DUMP_SIZE; - - if (il_debug_level & IL_DL_TX) { - if (!il->tx_traffic) { - il->tx_traffic = kzalloc(traffic_size, GFP_KERNEL); - if (!il->tx_traffic) - return -ENOMEM; - } - } - if (il_debug_level & IL_DL_RX) { - if (!il->rx_traffic) { - il->rx_traffic = kzalloc(traffic_size, GFP_KERNEL); - if (!il->rx_traffic) - return -ENOMEM; - } - } - il_reset_traffic_log(il); - return 0; -} -EXPORT_SYMBOL(il_alloc_traffic_mem); - -void -il_free_traffic_mem(struct il_priv *il) -{ - kfree(il->tx_traffic); - il->tx_traffic = NULL; - - kfree(il->rx_traffic); - il->rx_traffic = NULL; -} -EXPORT_SYMBOL(il_free_traffic_mem); - -void -il_dbg_log_tx_data_frame(struct il_priv *il, u16 length, - struct ieee80211_hdr *header) -{ - __le16 fc; - u16 len; - - if (likely(!(il_debug_level & IL_DL_TX))) - return; - - if (!il->tx_traffic) - return; - - fc = header->frame_control; - if (ieee80211_is_data(fc)) { - len = - (length > - IL_TRAFFIC_ENTRY_SIZE) ? IL_TRAFFIC_ENTRY_SIZE : length; - memcpy((il->tx_traffic + - (il->tx_traffic_idx * IL_TRAFFIC_ENTRY_SIZE)), header, - len); - il->tx_traffic_idx = - (il->tx_traffic_idx + 1) % IL_TRAFFIC_ENTRIES; - } -} -EXPORT_SYMBOL(il_dbg_log_tx_data_frame); - -void -il_dbg_log_rx_data_frame(struct il_priv *il, u16 length, - struct ieee80211_hdr *header) -{ - __le16 fc; - u16 len; - - if (likely(!(il_debug_level & IL_DL_RX))) - return; - - if (!il->rx_traffic) - return; - - fc = header->frame_control; - if (ieee80211_is_data(fc)) { - len = - (length > - IL_TRAFFIC_ENTRY_SIZE) ? IL_TRAFFIC_ENTRY_SIZE : length; - memcpy((il->rx_traffic + - (il->rx_traffic_idx * IL_TRAFFIC_ENTRY_SIZE)), header, - len); - il->rx_traffic_idx = - (il->rx_traffic_idx + 1) % IL_TRAFFIC_ENTRIES; - } -} -EXPORT_SYMBOL(il_dbg_log_rx_data_frame); - -const char * -il_get_mgmt_string(int cmd) -{ - switch (cmd) { - IL_CMD(MANAGEMENT_ASSOC_REQ); - IL_CMD(MANAGEMENT_ASSOC_RESP); - IL_CMD(MANAGEMENT_REASSOC_REQ); - IL_CMD(MANAGEMENT_REASSOC_RESP); - IL_CMD(MANAGEMENT_PROBE_REQ); - IL_CMD(MANAGEMENT_PROBE_RESP); - IL_CMD(MANAGEMENT_BEACON); - IL_CMD(MANAGEMENT_ATIM); - IL_CMD(MANAGEMENT_DISASSOC); - IL_CMD(MANAGEMENT_AUTH); - IL_CMD(MANAGEMENT_DEAUTH); - IL_CMD(MANAGEMENT_ACTION); - default: - return "UNKNOWN"; - - } -} - -const char * -il_get_ctrl_string(int cmd) -{ - switch (cmd) { - IL_CMD(CONTROL_BACK_REQ); - IL_CMD(CONTROL_BACK); - IL_CMD(CONTROL_PSPOLL); - IL_CMD(CONTROL_RTS); - IL_CMD(CONTROL_CTS); - IL_CMD(CONTROL_ACK); - IL_CMD(CONTROL_CFEND); - IL_CMD(CONTROL_CFENDACK); - default: - return "UNKNOWN"; - - } -} - -void -il_clear_traffic_stats(struct il_priv *il) -{ - memset(&il->tx_stats, 0, sizeof(struct traffic_stats)); - memset(&il->rx_stats, 0, sizeof(struct traffic_stats)); -} - -/* - * if CONFIG_IWLEGACY_DEBUGFS defined, - * il_update_stats function will - * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass - * Use debugFs to display the rx/rx_stats - * if CONFIG_IWLEGACY_DEBUGFS not being defined, then no MGMT and CTRL - * information will be recorded, but DATA pkt still will be recorded - * for the reason of il_led.c need to control the led blinking based on - * number of tx and rx data. - * - */ -void -il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len) -{ - struct traffic_stats *stats; - - if (is_tx) - stats = &il->tx_stats; - else - stats = &il->rx_stats; - - if (ieee80211_is_mgmt(fc)) { - switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { - case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ): - stats->mgmt[MANAGEMENT_ASSOC_REQ]++; - break; - case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP): - stats->mgmt[MANAGEMENT_ASSOC_RESP]++; - break; - case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ): - stats->mgmt[MANAGEMENT_REASSOC_REQ]++; - break; - case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP): - stats->mgmt[MANAGEMENT_REASSOC_RESP]++; - break; - case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ): - stats->mgmt[MANAGEMENT_PROBE_REQ]++; - break; - case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP): - stats->mgmt[MANAGEMENT_PROBE_RESP]++; - break; - case cpu_to_le16(IEEE80211_STYPE_BEACON): - stats->mgmt[MANAGEMENT_BEACON]++; - break; - case cpu_to_le16(IEEE80211_STYPE_ATIM): - stats->mgmt[MANAGEMENT_ATIM]++; - break; - case cpu_to_le16(IEEE80211_STYPE_DISASSOC): - stats->mgmt[MANAGEMENT_DISASSOC]++; - break; - case cpu_to_le16(IEEE80211_STYPE_AUTH): - stats->mgmt[MANAGEMENT_AUTH]++; - break; - case cpu_to_le16(IEEE80211_STYPE_DEAUTH): - stats->mgmt[MANAGEMENT_DEAUTH]++; - break; - case cpu_to_le16(IEEE80211_STYPE_ACTION): - stats->mgmt[MANAGEMENT_ACTION]++; - break; - } - } else if (ieee80211_is_ctl(fc)) { - switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { - case cpu_to_le16(IEEE80211_STYPE_BACK_REQ): - stats->ctrl[CONTROL_BACK_REQ]++; - break; - case cpu_to_le16(IEEE80211_STYPE_BACK): - stats->ctrl[CONTROL_BACK]++; - break; - case cpu_to_le16(IEEE80211_STYPE_PSPOLL): - stats->ctrl[CONTROL_PSPOLL]++; - break; - case cpu_to_le16(IEEE80211_STYPE_RTS): - stats->ctrl[CONTROL_RTS]++; - break; - case cpu_to_le16(IEEE80211_STYPE_CTS): - stats->ctrl[CONTROL_CTS]++; - break; - case cpu_to_le16(IEEE80211_STYPE_ACK): - stats->ctrl[CONTROL_ACK]++; - break; - case cpu_to_le16(IEEE80211_STYPE_CFEND): - stats->ctrl[CONTROL_CFEND]++; - break; - case cpu_to_le16(IEEE80211_STYPE_CFENDACK): - stats->ctrl[CONTROL_CFENDACK]++; - break; - } - } else { - /* data */ - stats->data_cnt++; - stats->data_bytes += len; - } -} -EXPORT_SYMBOL(il_update_stats); -#endif - -int -il_force_reset(struct il_priv *il, bool external) -{ - struct il_force_reset *force_reset; - - if (test_bit(S_EXIT_PENDING, &il->status)) - return -EINVAL; - - force_reset = &il->force_reset; - force_reset->reset_request_count++; - if (!external) { - if (force_reset->last_force_reset_jiffies && - time_after(force_reset->last_force_reset_jiffies + - force_reset->reset_duration, jiffies)) { - D_INFO("force reset rejected\n"); - force_reset->reset_reject_count++; - return -EAGAIN; - } - } - force_reset->reset_success_count++; - force_reset->last_force_reset_jiffies = jiffies; - - /* - * if the request is from external(ex: debugfs), - * then always perform the request in regardless the module - * parameter setting - * if the request is from internal (uCode error or driver - * detect failure), then fw_restart module parameter - * need to be check before performing firmware reload - */ - - if (!external && !il->cfg->mod_params->restart_fw) { - D_INFO("Cancel firmware reload based on " - "module parameter setting\n"); - return 0; - } - - IL_ERR("On demand firmware reload\n"); - - /* Set the FW error flag -- cleared on il_down */ - set_bit(S_FW_ERROR, &il->status); - wake_up(&il->wait_command_queue); - /* - * Keep the restart process from trying to send host - * commands by clearing the INIT status bit - */ - clear_bit(S_READY, &il->status); - queue_work(il->workqueue, &il->restart); - - return 0; -} - -int -il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - enum nl80211_iftype newtype, bool newp2p) -{ - struct il_priv *il = hw->priv; - struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif); - u32 modes; - int err; - - newtype = ieee80211_iftype_p2p(newtype, newp2p); - - mutex_lock(&il->mutex); - - if (!ctx->vif || !il_is_ready_rf(il)) { - /* - * Huh? But wait ... this can maybe happen when - * we're in the middle of a firmware restart! - */ - err = -EBUSY; - goto out; - } - - modes = ctx->interface_modes | ctx->exclusive_interface_modes; - if (!(modes & BIT(newtype))) { - err = -EOPNOTSUPP; - goto out; - } - - if ((il->ctx.exclusive_interface_modes & BIT(il->ctx.vif->type)) || - (il->ctx.exclusive_interface_modes & BIT(newtype))) { - err = -EINVAL; - goto out; - } - - /* success */ - il_teardown_interface(il, vif, true); - vif->type = newtype; - vif->p2p = newp2p; - err = il_setup_interface(il, ctx); - WARN_ON(err); - /* - * We've switched internally, but submitting to the - * device may have failed for some reason. Mask this - * error, because otherwise mac80211 will not switch - * (and set the interface type back) and we'll be - * out of sync with it. - */ - err = 0; - -out: - mutex_unlock(&il->mutex); - return err; -} -EXPORT_SYMBOL(il_mac_change_interface); - -/* - * On every watchdog tick we check (latest) time stamp. If it does not - * change during timeout period and queue is not empty we reset firmware. - */ -static int -il_check_stuck_queue(struct il_priv *il, int cnt) -{ - struct il_tx_queue *txq = &il->txq[cnt]; - struct il_queue *q = &txq->q; - unsigned long timeout; - int ret; - - if (q->read_ptr == q->write_ptr) { - txq->time_stamp = jiffies; - return 0; - } - - timeout = - txq->time_stamp + - msecs_to_jiffies(il->cfg->base_params->wd_timeout); - - if (time_after(jiffies, timeout)) { - IL_ERR("Queue %d stuck for %u ms.\n", q->id, - il->cfg->base_params->wd_timeout); - ret = il_force_reset(il, false); - return (ret == -EAGAIN) ? 0 : 1; - } - - return 0; -} - -/* - * Making watchdog tick be a quarter of timeout assure we will - * discover the queue hung between timeout and 1.25*timeout - */ -#define IL_WD_TICK(timeout) ((timeout) / 4) - -/* - * Watchdog timer callback, we check each tx queue for stuck, if if hung - * we reset the firmware. If everything is fine just rearm the timer. - */ -void -il_bg_watchdog(unsigned long data) -{ - struct il_priv *il = (struct il_priv *)data; - int cnt; - unsigned long timeout; - - if (test_bit(S_EXIT_PENDING, &il->status)) - return; - - timeout = il->cfg->base_params->wd_timeout; - if (timeout == 0) - return; - - /* monitor and check for stuck cmd queue */ - if (il_check_stuck_queue(il, il->cmd_queue)) - return; - - /* monitor and check for other stuck queues */ - if (il_is_any_associated(il)) { - for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) { - /* skip as we already checked the command queue */ - if (cnt == il->cmd_queue) - continue; - if (il_check_stuck_queue(il, cnt)) - return; - } - } - - mod_timer(&il->watchdog, - jiffies + msecs_to_jiffies(IL_WD_TICK(timeout))); -} -EXPORT_SYMBOL(il_bg_watchdog); - -void -il_setup_watchdog(struct il_priv *il) -{ - unsigned int timeout = il->cfg->base_params->wd_timeout; - - if (timeout) - mod_timer(&il->watchdog, - jiffies + msecs_to_jiffies(IL_WD_TICK(timeout))); - else - del_timer(&il->watchdog); -} -EXPORT_SYMBOL(il_setup_watchdog); - -/* - * extended beacon time format - * time in usec will be changed into a 32-bit value in extended:internal format - * the extended part is the beacon counts - * the internal part is the time in usec within one beacon interval - */ -u32 -il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval) -{ - u32 quot; - u32 rem; - u32 interval = beacon_interval * TIME_UNIT; - - if (!interval || !usec) - return 0; - - quot = - (usec / - interval) & (il_beacon_time_mask_high(il, - il->hw_params. - beacon_time_tsf_bits) >> il-> - hw_params.beacon_time_tsf_bits); - rem = - (usec % interval) & il_beacon_time_mask_low(il, - il->hw_params. - beacon_time_tsf_bits); - - return (quot << il->hw_params.beacon_time_tsf_bits) + rem; -} -EXPORT_SYMBOL(il_usecs_to_beacons); - -/* base is usually what we get from ucode with each received frame, - * the same as HW timer counter counting down - */ -__le32 -il_add_beacon_time(struct il_priv *il, u32 base, u32 addon, - u32 beacon_interval) -{ - u32 base_low = base & il_beacon_time_mask_low(il, - il->hw_params. - beacon_time_tsf_bits); - u32 addon_low = addon & il_beacon_time_mask_low(il, - il->hw_params. - beacon_time_tsf_bits); - u32 interval = beacon_interval * TIME_UNIT; - u32 res = (base & il_beacon_time_mask_high(il, - il->hw_params. - beacon_time_tsf_bits)) + - (addon & il_beacon_time_mask_high(il, - il->hw_params. - beacon_time_tsf_bits)); - - if (base_low > addon_low) - res += base_low - addon_low; - else if (base_low < addon_low) { - res += interval + base_low - addon_low; - res += (1 << il->hw_params.beacon_time_tsf_bits); - } else - res += (1 << il->hw_params.beacon_time_tsf_bits); - - return cpu_to_le32(res); -} -EXPORT_SYMBOL(il_add_beacon_time); - -#ifdef CONFIG_PM - -int -il_pci_suspend(struct device *device) -{ - struct pci_dev *pdev = to_pci_dev(device); - struct il_priv *il = pci_get_drvdata(pdev); - - /* - * This function is called when system goes into suspend state - * mac80211 will call il_mac_stop() from the mac80211 suspend function - * first but since il_mac_stop() has no knowledge of who the caller is, - * it will not call apm_ops.stop() to stop the DMA operation. - * Calling apm_ops.stop here to make sure we stop the DMA. - */ - il_apm_stop(il); - - return 0; -} -EXPORT_SYMBOL(il_pci_suspend); - -int -il_pci_resume(struct device *device) -{ - struct pci_dev *pdev = to_pci_dev(device); - struct il_priv *il = pci_get_drvdata(pdev); - bool hw_rfkill = false; - - /* - * We disable the RETRY_TIMEOUT register (0x41) to keep - * PCI Tx retries from interfering with C3 CPU state. - */ - pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); - - il_enable_interrupts(il); - - if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) - hw_rfkill = true; - - if (hw_rfkill) - set_bit(S_RF_KILL_HW, &il->status); - else - clear_bit(S_RF_KILL_HW, &il->status); - - wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rfkill); - - return 0; -} -EXPORT_SYMBOL(il_pci_resume); - -const struct dev_pm_ops il_pm_ops = { - .suspend = il_pci_suspend, - .resume = il_pci_resume, - .freeze = il_pci_suspend, - .thaw = il_pci_resume, - .poweroff = il_pci_suspend, - .restore = il_pci_resume, -}; -EXPORT_SYMBOL(il_pm_ops); - -#endif /* CONFIG_PM */ - -static void -il_update_qos(struct il_priv *il, struct il_rxon_context *ctx) -{ - if (test_bit(S_EXIT_PENDING, &il->status)) - return; - - if (!ctx->is_active) - return; - - ctx->qos_data.def_qos_parm.qos_flags = 0; - - if (ctx->qos_data.qos_active) - ctx->qos_data.def_qos_parm.qos_flags |= - QOS_PARAM_FLG_UPDATE_EDCA_MSK; - - if (ctx->ht.enabled) - ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK; - - D_QOS("send QoS cmd with Qos active=%d FLAGS=0x%X\n", - ctx->qos_data.qos_active, ctx->qos_data.def_qos_parm.qos_flags); - - il_send_cmd_pdu_async(il, ctx->qos_cmd, sizeof(struct il_qosparam_cmd), - &ctx->qos_data.def_qos_parm, NULL); -} - -/** - * il_mac_config - mac80211 config callback - */ -int -il_mac_config(struct ieee80211_hw *hw, u32 changed) -{ - struct il_priv *il = hw->priv; - const struct il_channel_info *ch_info; - struct ieee80211_conf *conf = &hw->conf; - struct ieee80211_channel *channel = conf->channel; - struct il_ht_config *ht_conf = &il->current_ht_config; - struct il_rxon_context *ctx = &il->ctx; - unsigned long flags = 0; - int ret = 0; - u16 ch; - int scan_active = 0; - bool ht_changed = false; - - if (WARN_ON(!il->cfg->ops->legacy)) - return -EOPNOTSUPP; - - mutex_lock(&il->mutex); - - D_MAC80211("enter to channel %d changed 0x%X\n", channel->hw_value, - changed); - - if (unlikely(test_bit(S_SCANNING, &il->status))) { - scan_active = 1; - D_MAC80211("scan active\n"); - } - - if (changed & - (IEEE80211_CONF_CHANGE_SMPS | IEEE80211_CONF_CHANGE_CHANNEL)) { - /* mac80211 uses static for non-HT which is what we want */ - il->current_ht_config.smps = conf->smps_mode; - - /* - * Recalculate chain counts. - * - * If monitor mode is enabled then mac80211 will - * set up the SM PS mode to OFF if an HT channel is - * configured. - */ - if (il->cfg->ops->hcmd->set_rxon_chain) - il->cfg->ops->hcmd->set_rxon_chain(il, &il->ctx); - } - - /* during scanning mac80211 will delay channel setting until - * scan finish with changed = 0 - */ - if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) { - - if (scan_active) - goto set_ch_out; - - ch = channel->hw_value; - ch_info = il_get_channel_info(il, channel->band, ch); - if (!il_is_channel_valid(ch_info)) { - D_MAC80211("leave - invalid channel\n"); - ret = -EINVAL; - goto set_ch_out; - } - - if (il->iw_mode == NL80211_IFTYPE_ADHOC && - !il_is_channel_ibss(ch_info)) { - D_MAC80211("leave - not IBSS channel\n"); - ret = -EINVAL; - goto set_ch_out; - } - - spin_lock_irqsave(&il->lock, flags); - - /* Configure HT40 channels */ - if (ctx->ht.enabled != conf_is_ht(conf)) { - ctx->ht.enabled = conf_is_ht(conf); - ht_changed = true; - } - if (ctx->ht.enabled) { - if (conf_is_ht40_minus(conf)) { - ctx->ht.extension_chan_offset = - IEEE80211_HT_PARAM_CHA_SEC_BELOW; - ctx->ht.is_40mhz = true; - } else if (conf_is_ht40_plus(conf)) { - ctx->ht.extension_chan_offset = - IEEE80211_HT_PARAM_CHA_SEC_ABOVE; - ctx->ht.is_40mhz = true; - } else { - ctx->ht.extension_chan_offset = - IEEE80211_HT_PARAM_CHA_SEC_NONE; - ctx->ht.is_40mhz = false; - } - } else - ctx->ht.is_40mhz = false; - - /* - * Default to no protection. Protection mode will - * later be set from BSS config in il_ht_conf - */ - ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE; - - /* if we are switching from ht to 2.4 clear flags - * from any ht related info since 2.4 does not - * support ht */ - if ((le16_to_cpu(ctx->staging.channel) != ch)) - ctx->staging.flags = 0; - - il_set_rxon_channel(il, channel, ctx); - il_set_rxon_ht(il, ht_conf); - - il_set_flags_for_band(il, ctx, channel->band, ctx->vif); - - spin_unlock_irqrestore(&il->lock, flags); - - if (il->cfg->ops->legacy->update_bcast_stations) - ret = il->cfg->ops->legacy->update_bcast_stations(il); - -set_ch_out: - /* The list of supported rates and rate mask can be different - * for each band; since the band may have changed, reset - * the rate mask to what mac80211 lists */ - il_set_rate(il); - } - - if (changed & (IEEE80211_CONF_CHANGE_PS | IEEE80211_CONF_CHANGE_IDLE)) { - ret = il_power_update_mode(il, false); - if (ret) - D_MAC80211("Error setting sleep level\n"); - } - - if (changed & IEEE80211_CONF_CHANGE_POWER) { - D_MAC80211("TX Power old=%d new=%d\n", il->tx_power_user_lmt, - conf->power_level); - - il_set_tx_power(il, conf->power_level, false); - } - - if (!il_is_ready(il)) { - D_MAC80211("leave - not ready\n"); - goto out; - } - - if (scan_active) - goto out; - - if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging))) - il_commit_rxon(il, ctx); - else - D_INFO("Not re-sending same RXON configuration.\n"); - if (ht_changed) - il_update_qos(il, ctx); - -out: - D_MAC80211("leave\n"); - mutex_unlock(&il->mutex); - return ret; -} -EXPORT_SYMBOL(il_mac_config); - -void -il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) -{ - struct il_priv *il = hw->priv; - unsigned long flags; - struct il_rxon_context *ctx = &il->ctx; - - if (WARN_ON(!il->cfg->ops->legacy)) - return; - - mutex_lock(&il->mutex); - D_MAC80211("enter\n"); - - spin_lock_irqsave(&il->lock, flags); - memset(&il->current_ht_config, 0, sizeof(struct il_ht_config)); - spin_unlock_irqrestore(&il->lock, flags); - - spin_lock_irqsave(&il->lock, flags); - - /* new association get rid of ibss beacon skb */ - if (il->beacon_skb) - dev_kfree_skb(il->beacon_skb); - - il->beacon_skb = NULL; - - il->timestamp = 0; - - spin_unlock_irqrestore(&il->lock, flags); - - il_scan_cancel_timeout(il, 100); - if (!il_is_ready_rf(il)) { - D_MAC80211("leave - not ready\n"); - mutex_unlock(&il->mutex); - return; - } - - /* we are restarting association process - * clear RXON_FILTER_ASSOC_MSK bit - */ - ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; - il_commit_rxon(il, ctx); - - il_set_rate(il); - - mutex_unlock(&il->mutex); - - D_MAC80211("leave\n"); -} -EXPORT_SYMBOL(il_mac_reset_tsf); - -static void -il_ht_conf(struct il_priv *il, struct ieee80211_vif *vif) -{ - struct il_ht_config *ht_conf = &il->current_ht_config; - struct ieee80211_sta *sta; - struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; - struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif); - - D_ASSOC("enter:\n"); - - if (!ctx->ht.enabled) - return; - - ctx->ht.protection = - bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION; - ctx->ht.non_gf_sta_present = - !!(bss_conf-> - ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); - - ht_conf->single_chain_sufficient = false; - - switch (vif->type) { - case NL80211_IFTYPE_STATION: - rcu_read_lock(); - sta = ieee80211_find_sta(vif, bss_conf->bssid); - if (sta) { - struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; - int maxstreams; - - maxstreams = - (ht_cap->mcs. - tx_params & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK) - >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT; - maxstreams += 1; - - if (ht_cap->mcs.rx_mask[1] == 0 && - ht_cap->mcs.rx_mask[2] == 0) - ht_conf->single_chain_sufficient = true; - if (maxstreams <= 1) - ht_conf->single_chain_sufficient = true; - } else { - /* - * If at all, this can only happen through a race - * when the AP disconnects us while we're still - * setting up the connection, in that case mac80211 - * will soon tell us about that. - */ - ht_conf->single_chain_sufficient = true; - } - rcu_read_unlock(); - break; - case NL80211_IFTYPE_ADHOC: - ht_conf->single_chain_sufficient = true; - break; - default: - break; - } - - D_ASSOC("leave\n"); -} - -static inline void -il_set_no_assoc(struct il_priv *il, struct ieee80211_vif *vif) -{ - struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif); - - /* - * inform the ucode that there is no longer an - * association and that no more packets should be - * sent - */ - ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; - ctx->staging.assoc_id = 0; - il_commit_rxon(il, ctx); -} - -static void -il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif) -{ - struct il_priv *il = hw->priv; - unsigned long flags; - __le64 timestamp; - struct sk_buff *skb = ieee80211_beacon_get(hw, vif); - - if (!skb) - return; - - D_MAC80211("enter\n"); - - lockdep_assert_held(&il->mutex); - - if (!il->beacon_ctx) { - IL_ERR("update beacon but no beacon context!\n"); - dev_kfree_skb(skb); - return; - } - - spin_lock_irqsave(&il->lock, flags); - - if (il->beacon_skb) - dev_kfree_skb(il->beacon_skb); - - il->beacon_skb = skb; - - timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp; - il->timestamp = le64_to_cpu(timestamp); - - D_MAC80211("leave\n"); - spin_unlock_irqrestore(&il->lock, flags); - - if (!il_is_ready_rf(il)) { - D_MAC80211("leave - RF not ready\n"); - return; - } - - il->cfg->ops->legacy->post_associate(il); -} - -void -il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_bss_conf *bss_conf, u32 changes) -{ - struct il_priv *il = hw->priv; - struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif); - int ret; - - if (WARN_ON(!il->cfg->ops->legacy)) - return; - - D_MAC80211("changes = 0x%X\n", changes); - - mutex_lock(&il->mutex); - - if (!il_is_alive(il)) { - mutex_unlock(&il->mutex); - return; - } - - if (changes & BSS_CHANGED_QOS) { - unsigned long flags; - - spin_lock_irqsave(&il->lock, flags); - ctx->qos_data.qos_active = bss_conf->qos; - il_update_qos(il, ctx); - spin_unlock_irqrestore(&il->lock, flags); - } - - if (changes & BSS_CHANGED_BEACON_ENABLED) { - /* - * the add_interface code must make sure we only ever - * have a single interface that could be beaconing at - * any time. - */ - if (vif->bss_conf.enable_beacon) - il->beacon_ctx = ctx; - else - il->beacon_ctx = NULL; - } - - if (changes & BSS_CHANGED_BSSID) { - D_MAC80211("BSSID %pM\n", bss_conf->bssid); - - /* - * If there is currently a HW scan going on in the - * background then we need to cancel it else the RXON - * below/in post_associate will fail. - */ - if (il_scan_cancel_timeout(il, 100)) { - IL_WARN("Aborted scan still in progress after 100ms\n"); - D_MAC80211("leaving - scan abort failed.\n"); - mutex_unlock(&il->mutex); - return; - } - - /* mac80211 only sets assoc when in STATION mode */ - if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) { - memcpy(ctx->staging.bssid_addr, bss_conf->bssid, - ETH_ALEN); - - /* currently needed in a few places */ - memcpy(il->bssid, bss_conf->bssid, ETH_ALEN); - } else { - ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; - } - - } - - /* - * This needs to be after setting the BSSID in case - * mac80211 decides to do both changes at once because - * it will invoke post_associate. - */ - if (vif->type == NL80211_IFTYPE_ADHOC && (changes & BSS_CHANGED_BEACON)) - il_beacon_update(hw, vif); - - if (changes & BSS_CHANGED_ERP_PREAMBLE) { - D_MAC80211("ERP_PREAMBLE %d\n", bss_conf->use_short_preamble); - if (bss_conf->use_short_preamble) - ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; - else - ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; - } - - if (changes & BSS_CHANGED_ERP_CTS_PROT) { - D_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot); - if (bss_conf->use_cts_prot && il->band != IEEE80211_BAND_5GHZ) - ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK; - else - ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK; - if (bss_conf->use_cts_prot) - ctx->staging.flags |= RXON_FLG_SELF_CTS_EN; - else - ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN; - } - - if (changes & BSS_CHANGED_BASIC_RATES) { - /* XXX use this information - * - * To do that, remove code from il_set_rate() and put something - * like this here: - * - if (A-band) - ctx->staging.ofdm_basic_rates = - bss_conf->basic_rates; - else - ctx->staging.ofdm_basic_rates = - bss_conf->basic_rates >> 4; - ctx->staging.cck_basic_rates = - bss_conf->basic_rates & 0xF; - */ - } - - if (changes & BSS_CHANGED_HT) { - il_ht_conf(il, vif); - - if (il->cfg->ops->hcmd->set_rxon_chain) - il->cfg->ops->hcmd->set_rxon_chain(il, ctx); - } - - if (changes & BSS_CHANGED_ASSOC) { - D_MAC80211("ASSOC %d\n", bss_conf->assoc); - if (bss_conf->assoc) { - il->timestamp = bss_conf->timestamp; - - if (!il_is_rfkill(il)) - il->cfg->ops->legacy->post_associate(il); - } else - il_set_no_assoc(il, vif); - } - - if (changes && il_is_associated_ctx(ctx) && bss_conf->aid) { - D_MAC80211("Changes (%#x) while associated\n", changes); - ret = il_send_rxon_assoc(il, ctx); - if (!ret) { - /* Sync active_rxon with latest change. */ - memcpy((void *)&ctx->active, &ctx->staging, - sizeof(struct il_rxon_cmd)); - } - } - - if (changes & BSS_CHANGED_BEACON_ENABLED) { - if (vif->bss_conf.enable_beacon) { - memcpy(ctx->staging.bssid_addr, bss_conf->bssid, - ETH_ALEN); - memcpy(il->bssid, bss_conf->bssid, ETH_ALEN); - il->cfg->ops->legacy->config_ap(il); - } else - il_set_no_assoc(il, vif); - } - - if (changes & BSS_CHANGED_IBSS) { - ret = - il->cfg->ops->legacy->manage_ibss_station(il, vif, - bss_conf-> - ibss_joined); - if (ret) - IL_ERR("failed to %s IBSS station %pM\n", - bss_conf->ibss_joined ? "add" : "remove", - bss_conf->bssid); - } - - mutex_unlock(&il->mutex); - - D_MAC80211("leave\n"); -} -EXPORT_SYMBOL(il_mac_bss_info_changed); - -irqreturn_t -il_isr(int irq, void *data) -{ - struct il_priv *il = data; - u32 inta, inta_mask; - u32 inta_fh; - unsigned long flags; - if (!il) - return IRQ_NONE; - - spin_lock_irqsave(&il->lock, flags); - - /* Disable (but don't clear!) interrupts here to avoid - * back-to-back ISRs and sporadic interrupts from our NIC. - * If we have something to service, the tasklet will re-enable ints. - * If we *don't* have something, we'll re-enable before leaving here. */ - inta_mask = _il_rd(il, CSR_INT_MASK); /* just for debug */ - _il_wr(il, CSR_INT_MASK, 0x00000000); - - /* Discover which interrupts are active/pending */ - inta = _il_rd(il, CSR_INT); - inta_fh = _il_rd(il, CSR_FH_INT_STATUS); - - /* Ignore interrupt if there's nothing in NIC to service. - * This may be due to IRQ shared with another device, - * or due to sporadic interrupts thrown from our NIC. */ - if (!inta && !inta_fh) { - D_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n"); - goto none; - } - - if (inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0) { - /* Hardware disappeared. It might have already raised - * an interrupt */ - IL_WARN("HARDWARE GONE?? INTA == 0x%08x\n", inta); - goto unplugged; - } - - D_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta, inta_mask, - inta_fh); - - inta &= ~CSR_INT_BIT_SCD; - - /* il_irq_tasklet() will service interrupts and re-enable them */ - if (likely(inta || inta_fh)) - tasklet_schedule(&il->irq_tasklet); - -unplugged: - spin_unlock_irqrestore(&il->lock, flags); - return IRQ_HANDLED; - -none: - /* re-enable interrupts here since we don't have anything to service. */ - /* only Re-enable if disabled by irq */ - if (test_bit(S_INT_ENABLED, &il->status)) - il_enable_interrupts(il); - spin_unlock_irqrestore(&il->lock, flags); - return IRQ_NONE; -} -EXPORT_SYMBOL(il_isr); - -/* - * il_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this - * function. - */ -void -il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info, - __le16 fc, __le32 *tx_flags) -{ - if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) { - *tx_flags |= TX_CMD_FLG_RTS_MSK; - *tx_flags &= ~TX_CMD_FLG_CTS_MSK; - *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; - - if (!ieee80211_is_mgmt(fc)) - return; - - switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { - case cpu_to_le16(IEEE80211_STYPE_AUTH): - case cpu_to_le16(IEEE80211_STYPE_DEAUTH): - case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ): - case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ): - *tx_flags &= ~TX_CMD_FLG_RTS_MSK; - *tx_flags |= TX_CMD_FLG_CTS_MSK; - break; - } - } else if (info->control.rates[0]. - flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { - *tx_flags &= ~TX_CMD_FLG_RTS_MSK; - *tx_flags |= TX_CMD_FLG_CTS_MSK; - *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; - } -} -EXPORT_SYMBOL(il_tx_cmd_protection); diff --git a/trunk/drivers/net/wireless/iwlegacy/common.h b/trunk/drivers/net/wireless/iwlegacy/common.h deleted file mode 100644 index abfa388588be..000000000000 --- a/trunk/drivers/net/wireless/iwlegacy/common.h +++ /dev/null @@ -1,3246 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ -#ifndef __il_core_h__ -#define __il_core_h__ - -#include -#include /* for struct pci_device_id */ -#include -#include -#include -#include -#include -#include - -#include "commands.h" -#include "csr.h" -#include "prph.h" - -struct il_host_cmd; -struct il_cmd; -struct il_tx_queue; - -#define IL_ERR(f, a...) dev_err(&il->pci_dev->dev, f, ## a) -#define IL_WARN(f, a...) dev_warn(&il->pci_dev->dev, f, ## a) -#define IL_INFO(f, a...) dev_info(&il->pci_dev->dev, f, ## a) - -#define RX_QUEUE_SIZE 256 -#define RX_QUEUE_MASK 255 -#define RX_QUEUE_SIZE_LOG 8 - -/* - * RX related structures and functions - */ -#define RX_FREE_BUFFERS 64 -#define RX_LOW_WATERMARK 8 - -#define U32_PAD(n) ((4-(n))&0x3) - -/* CT-KILL constants */ -#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */ - -/* Default noise level to report when noise measurement is not available. - * This may be because we're: - * 1) Not associated (4965, no beacon stats being sent to driver) - * 2) Scanning (noise measurement does not apply to associated channel) - * 3) Receiving CCK (3945 delivers noise info only for OFDM frames) - * Use default noise value of -127 ... this is below the range of measurable - * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user. - * Also, -127 works better than 0 when averaging frames with/without - * noise info (e.g. averaging might be done in app); measured dBm values are - * always negative ... using a negative value as the default keeps all - * averages within an s8's (used in some apps) range of negative values. */ -#define IL_NOISE_MEAS_NOT_AVAILABLE (-127) - -/* - * RTS threshold here is total size [2347] minus 4 FCS bytes - * Per spec: - * a value of 0 means RTS on all data/management packets - * a value > max MSDU size means no RTS - * else RTS for data/management frames where MPDU is larger - * than RTS value. - */ -#define DEFAULT_RTS_THRESHOLD 2347U -#define MIN_RTS_THRESHOLD 0U -#define MAX_RTS_THRESHOLD 2347U -#define MAX_MSDU_SIZE 2304U -#define MAX_MPDU_SIZE 2346U -#define DEFAULT_BEACON_INTERVAL 100U -#define DEFAULT_SHORT_RETRY_LIMIT 7U -#define DEFAULT_LONG_RETRY_LIMIT 4U - -struct il_rx_buf { - dma_addr_t page_dma; - struct page *page; - struct list_head list; -}; - -#define rxb_addr(r) page_address(r->page) - -/* defined below */ -struct il_device_cmd; - -struct il_cmd_meta { - /* only for SYNC commands, iff the reply skb is wanted */ - struct il_host_cmd *source; - /* - * only for ASYNC commands - * (which is somewhat stupid -- look at common.c for instance - * which duplicates a bunch of code because the callback isn't - * invoked for SYNC commands, if it were and its result passed - * through it would be simpler...) - */ - void (*callback) (struct il_priv *il, struct il_device_cmd *cmd, - struct il_rx_pkt *pkt); - - /* The CMD_SIZE_HUGE flag bit indicates that the command - * structure is stored at the end of the shared queue memory. */ - u32 flags; - - DEFINE_DMA_UNMAP_ADDR(mapping); - DEFINE_DMA_UNMAP_LEN(len); -}; - -/* - * Generic queue structure - * - * Contains common data for Rx and Tx queues - */ -struct il_queue { - int n_bd; /* number of BDs in this queue */ - int write_ptr; /* 1-st empty entry (idx) host_w */ - int read_ptr; /* last used entry (idx) host_r */ - /* use for monitoring and recovering the stuck queue */ - dma_addr_t dma_addr; /* physical addr for BD's */ - int n_win; /* safe queue win */ - u32 id; - int low_mark; /* low watermark, resume queue if free - * space more than this */ - int high_mark; /* high watermark, stop queue if free - * space less than this */ -}; - -/* One for each TFD */ -struct il_tx_info { - struct sk_buff *skb; - struct il_rxon_context *ctx; -}; - -/** - * struct il_tx_queue - Tx Queue for DMA - * @q: generic Rx/Tx queue descriptor - * @bd: base of circular buffer of TFDs - * @cmd: array of command/TX buffer pointers - * @meta: array of meta data for each command/tx buffer - * @dma_addr_cmd: physical address of cmd/tx buffer array - * @txb: array of per-TFD driver data - * @time_stamp: time (in jiffies) of last read_ptr change - * @need_update: indicates need to update read/write idx - * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled - * - * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame - * descriptors) and required locking structures. - */ -#define TFD_TX_CMD_SLOTS 256 -#define TFD_CMD_SLOTS 32 - -struct il_tx_queue { - struct il_queue q; - void *tfds; - struct il_device_cmd **cmd; - struct il_cmd_meta *meta; - struct il_tx_info *txb; - unsigned long time_stamp; - u8 need_update; - u8 sched_retry; - u8 active; - u8 swq_id; -}; - -/* - * EEPROM access time values: - * - * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG. - * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1). - * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec. - * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG. - */ -#define IL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */ - -#define IL_EEPROM_SEM_TIMEOUT 10 /* microseconds */ -#define IL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */ - -/* - * Regulatory channel usage flags in EEPROM struct il4965_eeprom_channel.flags. - * - * IBSS and/or AP operation is allowed *only* on those channels with - * (VALID && IBSS && ACTIVE && !RADAR). This restriction is in place because - * RADAR detection is not supported by the 4965 driver, but is a - * requirement for establishing a new network for legal operation on channels - * requiring RADAR detection or restricting ACTIVE scanning. - * - * NOTE: "WIDE" flag does not indicate anything about "HT40" 40 MHz channels. - * It only indicates that 20 MHz channel use is supported; HT40 channel - * usage is indicated by a separate set of regulatory flags for each - * HT40 channel pair. - * - * NOTE: Using a channel inappropriately will result in a uCode error! - */ -#define IL_NUM_TX_CALIB_GROUPS 5 -enum { - EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */ - EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */ - /* Bit 2 Reserved */ - EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */ - EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */ - EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */ - /* Bit 6 Reserved (was Narrow Channel) */ - EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */ -}; - -/* SKU Capabilities */ -/* 3945 only */ -#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0) -#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1) - -/* *regulatory* channel data format in eeprom, one for each channel. - * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */ -struct il_eeprom_channel { - u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */ - s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */ -} __packed; - -/* 3945 Specific */ -#define EEPROM_3945_EEPROM_VERSION (0x2f) - -/* 4965 has two radio transmitters (and 3 radio receivers) */ -#define EEPROM_TX_POWER_TX_CHAINS (2) - -/* 4965 has room for up to 8 sets of txpower calibration data */ -#define EEPROM_TX_POWER_BANDS (8) - -/* 4965 factory calibration measures txpower gain settings for - * each of 3 target output levels */ -#define EEPROM_TX_POWER_MEASUREMENTS (3) - -/* 4965 Specific */ -/* 4965 driver does not work with txpower calibration version < 5 */ -#define EEPROM_4965_TX_POWER_VERSION (5) -#define EEPROM_4965_EEPROM_VERSION (0x2f) -#define EEPROM_4965_CALIB_VERSION_OFFSET (2*0xB6) /* 2 bytes */ -#define EEPROM_4965_CALIB_TXPOWER_OFFSET (2*0xE8) /* 48 bytes */ -#define EEPROM_4965_BOARD_REVISION (2*0x4F) /* 2 bytes */ -#define EEPROM_4965_BOARD_PBA (2*0x56+1) /* 9 bytes */ - -/* 2.4 GHz */ -extern const u8 il_eeprom_band_1[14]; - -/* - * factory calibration data for one txpower level, on one channel, - * measured on one of the 2 tx chains (radio transmitter and associated - * antenna). EEPROM contains: - * - * 1) Temperature (degrees Celsius) of device when measurement was made. - * - * 2) Gain table idx used to achieve the target measurement power. - * This refers to the "well-known" gain tables (see 4965.h). - * - * 3) Actual measured output power, in half-dBm ("34" = 17 dBm). - * - * 4) RF power amplifier detector level measurement (not used). - */ -struct il_eeprom_calib_measure { - u8 temperature; /* Device temperature (Celsius) */ - u8 gain_idx; /* Index into gain table */ - u8 actual_pow; /* Measured RF output power, half-dBm */ - s8 pa_det; /* Power amp detector level (not used) */ -} __packed; - -/* - * measurement set for one channel. EEPROM contains: - * - * 1) Channel number measured - * - * 2) Measurements for each of 3 power levels for each of 2 radio transmitters - * (a.k.a. "tx chains") (6 measurements altogether) - */ -struct il_eeprom_calib_ch_info { - u8 ch_num; - struct il_eeprom_calib_measure - measurements[EEPROM_TX_POWER_TX_CHAINS] - [EEPROM_TX_POWER_MEASUREMENTS]; -} __packed; - -/* - * txpower subband info. - * - * For each frequency subband, EEPROM contains the following: - * - * 1) First and last channels within range of the subband. "0" values - * indicate that this sample set is not being used. - * - * 2) Sample measurement sets for 2 channels close to the range endpoints. - */ -struct il_eeprom_calib_subband_info { - u8 ch_from; /* channel number of lowest channel in subband */ - u8 ch_to; /* channel number of highest channel in subband */ - struct il_eeprom_calib_ch_info ch1; - struct il_eeprom_calib_ch_info ch2; -} __packed; - -/* - * txpower calibration info. EEPROM contains: - * - * 1) Factory-measured saturation power levels (maximum levels at which - * tx power amplifier can output a signal without too much distortion). - * There is one level for 2.4 GHz band and one for 5 GHz band. These - * values apply to all channels within each of the bands. - * - * 2) Factory-measured power supply voltage level. This is assumed to be - * constant (i.e. same value applies to all channels/bands) while the - * factory measurements are being made. - * - * 3) Up to 8 sets of factory-measured txpower calibration values. - * These are for different frequency ranges, since txpower gain - * characteristics of the analog radio circuitry vary with frequency. - * - * Not all sets need to be filled with data; - * struct il_eeprom_calib_subband_info contains range of channels - * (0 if unused) for each set of data. - */ -struct il_eeprom_calib_info { - u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */ - u8 saturation_power52; /* half-dBm */ - __le16 voltage; /* signed */ - struct il_eeprom_calib_subband_info band_info[EEPROM_TX_POWER_BANDS]; -} __packed; - -/* General */ -#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */ -#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */ -#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */ -#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */ -#define EEPROM_VERSION (2*0x44) /* 2 bytes */ -#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */ -#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */ -#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */ -#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */ -#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */ - -/* The following masks are to be applied on EEPROM_RADIO_CONFIG */ -#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */ -#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */ -#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */ -#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */ -#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */ -#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */ - -#define EEPROM_3945_RF_CFG_TYPE_MAX 0x0 -#define EEPROM_4965_RF_CFG_TYPE_MAX 0x1 - -/* - * Per-channel regulatory data. - * - * Each channel that *might* be supported by iwl has a fixed location - * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory - * txpower (MSB). - * - * Entries immediately below are for 20 MHz channel width. HT40 (40 MHz) - * channels (only for 4965, not supported by 3945) appear later in the EEPROM. - * - * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 - */ -#define EEPROM_REGULATORY_SKU_ID (2*0x60) /* 4 bytes */ -#define EEPROM_REGULATORY_BAND_1 (2*0x62) /* 2 bytes */ -#define EEPROM_REGULATORY_BAND_1_CHANNELS (2*0x63) /* 28 bytes */ - -/* - * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196, - * 5.0 GHz channels 7, 8, 11, 12, 16 - * (4915-5080MHz) (none of these is ever supported) - */ -#define EEPROM_REGULATORY_BAND_2 (2*0x71) /* 2 bytes */ -#define EEPROM_REGULATORY_BAND_2_CHANNELS (2*0x72) /* 26 bytes */ - -/* - * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 - * (5170-5320MHz) - */ -#define EEPROM_REGULATORY_BAND_3 (2*0x7F) /* 2 bytes */ -#define EEPROM_REGULATORY_BAND_3_CHANNELS (2*0x80) /* 24 bytes */ - -/* - * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 - * (5500-5700MHz) - */ -#define EEPROM_REGULATORY_BAND_4 (2*0x8C) /* 2 bytes */ -#define EEPROM_REGULATORY_BAND_4_CHANNELS (2*0x8D) /* 22 bytes */ - -/* - * 5.7 GHz channels 145, 149, 153, 157, 161, 165 - * (5725-5825MHz) - */ -#define EEPROM_REGULATORY_BAND_5 (2*0x98) /* 2 bytes */ -#define EEPROM_REGULATORY_BAND_5_CHANNELS (2*0x99) /* 12 bytes */ - -/* - * 2.4 GHz HT40 channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11) - * - * The channel listed is the center of the lower 20 MHz half of the channel. - * The overall center frequency is actually 2 channels (10 MHz) above that, - * and the upper half of each HT40 channel is centered 4 channels (20 MHz) away - * from the lower half; e.g. the upper half of HT40 channel 1 is channel 5, - * and the overall HT40 channel width centers on channel 3. - * - * NOTE: The RXON command uses 20 MHz channel numbers to specify the - * control channel to which to tune. RXON also specifies whether the - * control channel is the upper or lower half of a HT40 channel. - * - * NOTE: 4965 does not support HT40 channels on 2.4 GHz. - */ -#define EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS (2*0xA0) /* 14 bytes */ - -/* - * 5.2 GHz HT40 channels 36 (40), 44 (48), 52 (56), 60 (64), - * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161) - */ -#define EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS (2*0xA8) /* 22 bytes */ - -#define EEPROM_REGULATORY_BAND_NO_HT40 (0) - -struct il_eeprom_ops { - const u32 regulatory_bands[7]; - int (*acquire_semaphore) (struct il_priv *il); - void (*release_semaphore) (struct il_priv *il); -}; - -int il_eeprom_init(struct il_priv *il); -void il_eeprom_free(struct il_priv *il); -const u8 *il_eeprom_query_addr(const struct il_priv *il, size_t offset); -u16 il_eeprom_query16(const struct il_priv *il, size_t offset); -int il_init_channel_map(struct il_priv *il); -void il_free_channel_map(struct il_priv *il); -const struct il_channel_info *il_get_channel_info(const struct il_priv *il, - enum ieee80211_band band, - u16 channel); - -#define IL_NUM_SCAN_RATES (2) - -struct il4965_channel_tgd_info { - u8 type; - s8 max_power; -}; - -struct il4965_channel_tgh_info { - s64 last_radar_time; -}; - -#define IL4965_MAX_RATE (33) - -struct il3945_clip_group { - /* maximum power level to prevent clipping for each rate, derived by - * us from this band's saturation power in EEPROM */ - const s8 clip_powers[IL_MAX_RATES]; -}; - -/* current Tx power values to use, one for each rate for each channel. - * requested power is limited by: - * -- regulatory EEPROM limits for this channel - * -- hardware capabilities (clip-powers) - * -- spectrum management - * -- user preference (e.g. iwconfig) - * when requested power is set, base power idx must also be set. */ -struct il3945_channel_power_info { - struct il3945_tx_power tpc; /* actual radio and DSP gain settings */ - s8 power_table_idx; /* actual (compenst'd) idx into gain table */ - s8 base_power_idx; /* gain idx for power at factory temp. */ - s8 requested_power; /* power (dBm) requested for this chnl/rate */ -}; - -/* current scan Tx power values to use, one for each scan rate for each - * channel. */ -struct il3945_scan_power_info { - struct il3945_tx_power tpc; /* actual radio and DSP gain settings */ - s8 power_table_idx; /* actual (compenst'd) idx into gain table */ - s8 requested_power; /* scan pwr (dBm) requested for chnl/rate */ -}; - -/* - * One for each channel, holds all channel setup data - * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant - * with one another! - */ -struct il_channel_info { - struct il4965_channel_tgd_info tgd; - struct il4965_channel_tgh_info tgh; - struct il_eeprom_channel eeprom; /* EEPROM regulatory limit */ - struct il_eeprom_channel ht40_eeprom; /* EEPROM regulatory limit for - * HT40 channel */ - - u8 channel; /* channel number */ - u8 flags; /* flags copied from EEPROM */ - s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */ - s8 curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) limit */ - s8 min_power; /* always 0 */ - s8 scan_power; /* (dBm) regul. eeprom, direct scans, any rate */ - - u8 group_idx; /* 0-4, maps channel to group1/2/3/4/5 */ - u8 band_idx; /* 0-4, maps channel to band1/2/3/4/5 */ - enum ieee80211_band band; - - /* HT40 channel info */ - s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */ - u8 ht40_flags; /* flags copied from EEPROM */ - u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */ - - /* Radio/DSP gain settings for each "normal" data Tx rate. - * These include, in addition to RF and DSP gain, a few fields for - * remembering/modifying gain settings (idxes). */ - struct il3945_channel_power_info power_info[IL4965_MAX_RATE]; - - /* Radio/DSP gain settings for each scan rate, for directed scans. */ - struct il3945_scan_power_info scan_pwr_info[IL_NUM_SCAN_RATES]; -}; - -#define IL_TX_FIFO_BK 0 /* shared */ -#define IL_TX_FIFO_BE 1 -#define IL_TX_FIFO_VI 2 /* shared */ -#define IL_TX_FIFO_VO 3 -#define IL_TX_FIFO_UNUSED -1 - -/* Minimum number of queues. MAX_NUM is defined in hw specific files. - * Set the minimum to accommodate the 4 standard TX queues, 1 command - * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */ -#define IL_MIN_NUM_QUEUES 10 - -#define IL_DEFAULT_CMD_QUEUE_NUM 4 - -#define IEEE80211_DATA_LEN 2304 -#define IEEE80211_4ADDR_LEN 30 -#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN) -#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN) - -struct il_frame { - union { - struct ieee80211_hdr frame; - struct il_tx_beacon_cmd beacon; - u8 raw[IEEE80211_FRAME_LEN]; - u8 cmd[360]; - } u; - struct list_head list; -}; - -#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4) -#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ) -#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4) - -enum { - CMD_SYNC = 0, - CMD_SIZE_NORMAL = 0, - CMD_NO_SKB = 0, - CMD_SIZE_HUGE = (1 << 0), - CMD_ASYNC = (1 << 1), - CMD_WANT_SKB = (1 << 2), - CMD_MAPPED = (1 << 3), -}; - -#define DEF_CMD_PAYLOAD_SIZE 320 - -/** - * struct il_device_cmd - * - * For allocation of the command and tx queues, this establishes the overall - * size of the largest command we send to uCode, except for a scan command - * (which is relatively huge; space is allocated separately). - */ -struct il_device_cmd { - struct il_cmd_header hdr; /* uCode API */ - union { - u32 flags; - u8 val8; - u16 val16; - u32 val32; - struct il_tx_cmd tx; - u8 payload[DEF_CMD_PAYLOAD_SIZE]; - } __packed cmd; -} __packed; - -#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct il_device_cmd)) - -struct il_host_cmd { - const void *data; - unsigned long reply_page; - void (*callback) (struct il_priv *il, struct il_device_cmd *cmd, - struct il_rx_pkt *pkt); - u32 flags; - u16 len; - u8 id; -}; - -#define SUP_RATE_11A_MAX_NUM_CHANNELS 8 -#define SUP_RATE_11B_MAX_NUM_CHANNELS 4 -#define SUP_RATE_11G_MAX_NUM_CHANNELS 12 - -/** - * struct il_rx_queue - Rx queue - * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) - * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) - * @read: Shared idx to newest available Rx buffer - * @write: Shared idx to oldest written Rx packet - * @free_count: Number of pre-allocated buffers in rx_free - * @rx_free: list of free SKBs for use - * @rx_used: List of Rx buffers with no SKB - * @need_update: flag to indicate we need to update read/write idx - * @rb_stts: driver's pointer to receive buffer status - * @rb_stts_dma: bus address of receive buffer status - * - * NOTE: rx_free and rx_used are used as a FIFO for il_rx_bufs - */ -struct il_rx_queue { - __le32 *bd; - dma_addr_t bd_dma; - struct il_rx_buf pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; - struct il_rx_buf *queue[RX_QUEUE_SIZE]; - u32 read; - u32 write; - u32 free_count; - u32 write_actual; - struct list_head rx_free; - struct list_head rx_used; - int need_update; - struct il_rb_status *rb_stts; - dma_addr_t rb_stts_dma; - spinlock_t lock; -}; - -#define IL_SUPPORTED_RATES_IE_LEN 8 - -#define MAX_TID_COUNT 9 - -#define IL_INVALID_RATE 0xFF -#define IL_INVALID_VALUE -1 - -/** - * struct il_ht_agg -- aggregation status while waiting for block-ack - * @txq_id: Tx queue used for Tx attempt - * @frame_count: # frames attempted by Tx command - * @wait_for_ba: Expect block-ack before next Tx reply - * @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx win - * @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx win - * @bitmap1: High order, one bit for each frame pending ACK in Tx win - * @rate_n_flags: Rate at which Tx was attempted - * - * If C_TX indicates that aggregation was attempted, driver must wait - * for block ack (N_COMPRESSED_BA). This struct stores tx reply info - * until block ack arrives. - */ -struct il_ht_agg { - u16 txq_id; - u16 frame_count; - u16 wait_for_ba; - u16 start_idx; - u64 bitmap; - u32 rate_n_flags; -#define IL_AGG_OFF 0 -#define IL_AGG_ON 1 -#define IL_EMPTYING_HW_QUEUE_ADDBA 2 -#define IL_EMPTYING_HW_QUEUE_DELBA 3 - u8 state; -}; - -struct il_tid_data { - u16 seq_number; /* 4965 only */ - u16 tfds_in_queue; - struct il_ht_agg agg; -}; - -struct il_hw_key { - u32 cipher; - int keylen; - u8 keyidx; - u8 key[32]; -}; - -union il_ht_rate_supp { - u16 rates; - struct { - u8 siso_rate; - u8 mimo_rate; - }; -}; - -#define CFG_HT_RX_AMPDU_FACTOR_8K (0x0) -#define CFG_HT_RX_AMPDU_FACTOR_16K (0x1) -#define CFG_HT_RX_AMPDU_FACTOR_32K (0x2) -#define CFG_HT_RX_AMPDU_FACTOR_64K (0x3) -#define CFG_HT_RX_AMPDU_FACTOR_DEF CFG_HT_RX_AMPDU_FACTOR_64K -#define CFG_HT_RX_AMPDU_FACTOR_MAX CFG_HT_RX_AMPDU_FACTOR_64K -#define CFG_HT_RX_AMPDU_FACTOR_MIN CFG_HT_RX_AMPDU_FACTOR_8K - -/* - * Maximal MPDU density for TX aggregation - * 4 - 2us density - * 5 - 4us density - * 6 - 8us density - * 7 - 16us density - */ -#define CFG_HT_MPDU_DENSITY_2USEC (0x4) -#define CFG_HT_MPDU_DENSITY_4USEC (0x5) -#define CFG_HT_MPDU_DENSITY_8USEC (0x6) -#define CFG_HT_MPDU_DENSITY_16USEC (0x7) -#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC -#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC -#define CFG_HT_MPDU_DENSITY_MIN (0x1) - -struct il_ht_config { - bool single_chain_sufficient; - enum ieee80211_smps_mode smps; /* current smps mode */ -}; - -/* QoS structures */ -struct il_qos_info { - int qos_active; - struct il_qosparam_cmd def_qos_parm; -}; - -/* - * Structure should be accessed with sta_lock held. When station addition - * is in progress (IL_STA_UCODE_INPROGRESS) it is possible to access only - * the commands (il_addsta_cmd and il_link_quality_cmd) without - * sta_lock held. - */ -struct il_station_entry { - struct il_addsta_cmd sta; - struct il_tid_data tid[MAX_TID_COUNT]; - u8 used, ctxid; - struct il_hw_key keyinfo; - struct il_link_quality_cmd *lq; -}; - -struct il_station_priv_common { - struct il_rxon_context *ctx; - u8 sta_id; -}; - -/** - * struct il_vif_priv - driver's ilate per-interface information - * - * When mac80211 allocates a virtual interface, it can allocate - * space for us to put data into. - */ -struct il_vif_priv { - struct il_rxon_context *ctx; - u8 ibss_bssid_sta_id; -}; - -/* one for each uCode image (inst/data, boot/init/runtime) */ -struct fw_desc { - void *v_addr; /* access by driver */ - dma_addr_t p_addr; /* access by card's busmaster DMA */ - u32 len; /* bytes */ -}; - -/* uCode file layout */ -struct il_ucode_header { - __le32 ver; /* major/minor/API/serial */ - struct { - __le32 inst_size; /* bytes of runtime code */ - __le32 data_size; /* bytes of runtime data */ - __le32 init_size; /* bytes of init code */ - __le32 init_data_size; /* bytes of init data */ - __le32 boot_size; /* bytes of bootstrap code */ - u8 data[0]; /* in same order as sizes */ - } v1; -}; - -struct il4965_ibss_seq { - u8 mac[ETH_ALEN]; - u16 seq_num; - u16 frag_num; - unsigned long packet_time; - struct list_head list; -}; - -struct il_sensitivity_ranges { - u16 min_nrg_cck; - u16 max_nrg_cck; - - u16 nrg_th_cck; - u16 nrg_th_ofdm; - - u16 auto_corr_min_ofdm; - u16 auto_corr_min_ofdm_mrc; - u16 auto_corr_min_ofdm_x1; - u16 auto_corr_min_ofdm_mrc_x1; - - u16 auto_corr_max_ofdm; - u16 auto_corr_max_ofdm_mrc; - u16 auto_corr_max_ofdm_x1; - u16 auto_corr_max_ofdm_mrc_x1; - - u16 auto_corr_max_cck; - u16 auto_corr_max_cck_mrc; - u16 auto_corr_min_cck; - u16 auto_corr_min_cck_mrc; - - u16 barker_corr_th_min; - u16 barker_corr_th_min_mrc; - u16 nrg_th_cca; -}; - -#define KELVIN_TO_CELSIUS(x) ((x)-273) -#define CELSIUS_TO_KELVIN(x) ((x)+273) - -/** - * struct il_hw_params - * @max_txq_num: Max # Tx queues supported - * @dma_chnl_num: Number of Tx DMA/FIFO channels - * @scd_bc_tbls_size: size of scheduler byte count tables - * @tfd_size: TFD size - * @tx/rx_chains_num: Number of TX/RX chains - * @valid_tx/rx_ant: usable antennas - * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2) - * @max_rxq_log: Log-base-2 of max_rxq_size - * @rx_page_order: Rx buffer page order - * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR - * @max_stations: - * @ht40_channel: is 40MHz width possible in band 2.4 - * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ) - * @sw_crypto: 0 for hw, 1 for sw - * @max_xxx_size: for ucode uses - * @ct_kill_threshold: temperature threshold - * @beacon_time_tsf_bits: number of valid tsf bits for beacon time - * @struct il_sensitivity_ranges: range of sensitivity values - */ -struct il_hw_params { - u8 max_txq_num; - u8 dma_chnl_num; - u16 scd_bc_tbls_size; - u32 tfd_size; - u8 tx_chains_num; - u8 rx_chains_num; - u8 valid_tx_ant; - u8 valid_rx_ant; - u16 max_rxq_size; - u16 max_rxq_log; - u32 rx_page_order; - u32 rx_wrt_ptr_reg; - u8 max_stations; - u8 ht40_channel; - u8 max_beacon_itrvl; /* in 1024 ms */ - u32 max_inst_size; - u32 max_data_size; - u32 max_bsm_size; - u32 ct_kill_threshold; /* value in hw-dependent units */ - u16 beacon_time_tsf_bits; - const struct il_sensitivity_ranges *sens; -}; - -/****************************************************************************** - * - * Functions implemented in core module which are forward declared here - * for use by iwl-[4-5].c - * - * NOTE: The implementation of these functions are not hardware specific - * which is why they are in the core module files. - * - * Naming convention -- - * il_ <-- Is part of iwlwifi - * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX) - * il4965_bg_ <-- Called from work queue context - * il4965_mac_ <-- mac80211 callback - * - ****************************************************************************/ -extern void il4965_update_chain_flags(struct il_priv *il); -extern const u8 il_bcast_addr[ETH_ALEN]; -extern int il_queue_space(const struct il_queue *q); -static inline int -il_queue_used(const struct il_queue *q, int i) -{ - return q->write_ptr >= q->read_ptr ? (i >= q->read_ptr && - i < q->write_ptr) : !(i < - q->read_ptr - && i >= - q-> - write_ptr); -} - -static inline u8 -il_get_cmd_idx(struct il_queue *q, u32 idx, int is_huge) -{ - /* - * This is for init calibration result and scan command which - * required buffer > TFD_MAX_PAYLOAD_SIZE, - * the big buffer at end of command array - */ - if (is_huge) - return q->n_win; /* must be power of 2 */ - - /* Otherwise, use normal size buffers */ - return idx & (q->n_win - 1); -} - -struct il_dma_ptr { - dma_addr_t dma; - void *addr; - size_t size; -}; - -#define IL_OPERATION_MODE_AUTO 0 -#define IL_OPERATION_MODE_HT_ONLY 1 -#define IL_OPERATION_MODE_MIXED 2 -#define IL_OPERATION_MODE_20MHZ 3 - -#define IL_TX_CRC_SIZE 4 -#define IL_TX_DELIMITER_SIZE 4 - -#define TX_POWER_IL_ILLEGAL_VOLTAGE -10000 - -/* Sensitivity and chain noise calibration */ -#define INITIALIZATION_VALUE 0xFFFF -#define IL4965_CAL_NUM_BEACONS 20 -#define IL_CAL_NUM_BEACONS 16 -#define MAXIMUM_ALLOWED_PATHLOSS 15 - -#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3 - -#define MAX_FA_OFDM 50 -#define MIN_FA_OFDM 5 -#define MAX_FA_CCK 50 -#define MIN_FA_CCK 5 - -#define AUTO_CORR_STEP_OFDM 1 - -#define AUTO_CORR_STEP_CCK 3 -#define AUTO_CORR_MAX_TH_CCK 160 - -#define NRG_DIFF 2 -#define NRG_STEP_CCK 2 -#define NRG_MARGIN 8 -#define MAX_NUMBER_CCK_NO_FA 100 - -#define AUTO_CORR_CCK_MIN_VAL_DEF (125) - -#define CHAIN_A 0 -#define CHAIN_B 1 -#define CHAIN_C 2 -#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4 -#define ALL_BAND_FILTER 0xFF00 -#define IN_BAND_FILTER 0xFF -#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF - -#define NRG_NUM_PREV_STAT_L 20 -#define NUM_RX_CHAINS 3 - -enum il4965_false_alarm_state { - IL_FA_TOO_MANY = 0, - IL_FA_TOO_FEW = 1, - IL_FA_GOOD_RANGE = 2, -}; - -enum il4965_chain_noise_state { - IL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */ - IL_CHAIN_NOISE_ACCUMULATE, - IL_CHAIN_NOISE_CALIBRATED, - IL_CHAIN_NOISE_DONE, -}; - -enum il4965_calib_enabled_state { - IL_CALIB_DISABLED = 0, /* must be 0 */ - IL_CALIB_ENABLED = 1, -}; - -/* - * enum il_calib - * defines the order in which results of initial calibrations - * should be sent to the runtime uCode - */ -enum il_calib { - IL_CALIB_MAX, -}; - -/* Opaque calibration results */ -struct il_calib_result { - void *buf; - size_t buf_len; -}; - -enum ucode_type { - UCODE_NONE = 0, - UCODE_INIT, - UCODE_RT -}; - -/* Sensitivity calib data */ -struct il_sensitivity_data { - u32 auto_corr_ofdm; - u32 auto_corr_ofdm_mrc; - u32 auto_corr_ofdm_x1; - u32 auto_corr_ofdm_mrc_x1; - u32 auto_corr_cck; - u32 auto_corr_cck_mrc; - - u32 last_bad_plcp_cnt_ofdm; - u32 last_fa_cnt_ofdm; - u32 last_bad_plcp_cnt_cck; - u32 last_fa_cnt_cck; - - u32 nrg_curr_state; - u32 nrg_prev_state; - u32 nrg_value[10]; - u8 nrg_silence_rssi[NRG_NUM_PREV_STAT_L]; - u32 nrg_silence_ref; - u32 nrg_energy_idx; - u32 nrg_silence_idx; - u32 nrg_th_cck; - s32 nrg_auto_corr_silence_diff; - u32 num_in_cck_no_fa; - u32 nrg_th_ofdm; - - u16 barker_corr_th_min; - u16 barker_corr_th_min_mrc; - u16 nrg_th_cca; -}; - -/* Chain noise (differential Rx gain) calib data */ -struct il_chain_noise_data { - u32 active_chains; - u32 chain_noise_a; - u32 chain_noise_b; - u32 chain_noise_c; - u32 chain_signal_a; - u32 chain_signal_b; - u32 chain_signal_c; - u16 beacon_count; - u8 disconn_array[NUM_RX_CHAINS]; - u8 delta_gain_code[NUM_RX_CHAINS]; - u8 radio_write; - u8 state; -}; - -#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */ -#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */ - -#define IL_TRAFFIC_ENTRIES (256) -#define IL_TRAFFIC_ENTRY_SIZE (64) - -enum { - MEASUREMENT_READY = (1 << 0), - MEASUREMENT_ACTIVE = (1 << 1), -}; - -/* interrupt stats */ -struct isr_stats { - u32 hw; - u32 sw; - u32 err_code; - u32 sch; - u32 alive; - u32 rfkill; - u32 ctkill; - u32 wakeup; - u32 rx; - u32 handlers[IL_CN_MAX]; - u32 tx; - u32 unhandled; -}; - -/* management stats */ -enum il_mgmt_stats { - MANAGEMENT_ASSOC_REQ = 0, - MANAGEMENT_ASSOC_RESP, - MANAGEMENT_REASSOC_REQ, - MANAGEMENT_REASSOC_RESP, - MANAGEMENT_PROBE_REQ, - MANAGEMENT_PROBE_RESP, - MANAGEMENT_BEACON, - MANAGEMENT_ATIM, - MANAGEMENT_DISASSOC, - MANAGEMENT_AUTH, - MANAGEMENT_DEAUTH, - MANAGEMENT_ACTION, - MANAGEMENT_MAX, -}; -/* control stats */ -enum il_ctrl_stats { - CONTROL_BACK_REQ = 0, - CONTROL_BACK, - CONTROL_PSPOLL, - CONTROL_RTS, - CONTROL_CTS, - CONTROL_ACK, - CONTROL_CFEND, - CONTROL_CFENDACK, - CONTROL_MAX, -}; - -struct traffic_stats { -#ifdef CONFIG_IWLEGACY_DEBUGFS - u32 mgmt[MANAGEMENT_MAX]; - u32 ctrl[CONTROL_MAX]; - u32 data_cnt; - u64 data_bytes; -#endif -}; - -/* - * host interrupt timeout value - * used with setting interrupt coalescing timer - * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit - * - * default interrupt coalescing timer is 64 x 32 = 2048 usecs - * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs - */ -#define IL_HOST_INT_TIMEOUT_MAX (0xFF) -#define IL_HOST_INT_TIMEOUT_DEF (0x40) -#define IL_HOST_INT_TIMEOUT_MIN (0x0) -#define IL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF) -#define IL_HOST_INT_CALIB_TIMEOUT_DEF (0x10) -#define IL_HOST_INT_CALIB_TIMEOUT_MIN (0x0) - -#define IL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5) - -/* TX queue watchdog timeouts in mSecs */ -#define IL_DEF_WD_TIMEOUT (2000) -#define IL_LONG_WD_TIMEOUT (10000) -#define IL_MAX_WD_TIMEOUT (120000) - -struct il_force_reset { - int reset_request_count; - int reset_success_count; - int reset_reject_count; - unsigned long reset_duration; - unsigned long last_force_reset_jiffies; -}; - -/* extend beacon time format bit shifting */ -/* - * for _3945 devices - * bits 31:24 - extended - * bits 23:0 - interval - */ -#define IL3945_EXT_BEACON_TIME_POS 24 -/* - * for _4965 devices - * bits 31:22 - extended - * bits 21:0 - interval - */ -#define IL4965_EXT_BEACON_TIME_POS 22 - -struct il_rxon_context { - struct ieee80211_vif *vif; - - const u8 *ac_to_fifo; - const u8 *ac_to_queue; - u8 mcast_queue; - - /* - * We could use the vif to indicate active, but we - * also need it to be active during disabling when - * we already removed the vif for type setting. - */ - bool always_active, is_active; - - bool ht_need_multiple_chains; - - int ctxid; - - u32 interface_modes, exclusive_interface_modes; - u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype; - - /* - * We declare this const so it can only be - * changed via explicit cast within the - * routines that actually update the physical - * hardware. - */ - const struct il_rxon_cmd active; - struct il_rxon_cmd staging; - - struct il_rxon_time_cmd timing; - - struct il_qos_info qos_data; - - u8 bcast_sta_id, ap_sta_id; - - u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd; - u8 qos_cmd; - u8 wep_key_cmd; - - struct il_wep_key wep_keys[WEP_KEYS_MAX]; - u8 key_mapping_keys; - - __le32 station_flags; - - struct { - bool non_gf_sta_present; - u8 protection; - bool enabled, is_40mhz; - u8 extension_chan_offset; - } ht; -}; - -struct il_power_mgr { - struct il_powertable_cmd sleep_cmd; - struct il_powertable_cmd sleep_cmd_next; - int debug_sleep_level_override; - bool pci_pm; -}; - -struct il_priv { - - /* ieee device used by generic ieee processing code */ - struct ieee80211_hw *hw; - struct ieee80211_channel *ieee_channels; - struct ieee80211_rate *ieee_rates; - struct il_cfg *cfg; - - /* temporary frame storage list */ - struct list_head free_frames; - int frames_count; - - enum ieee80211_band band; - int alloc_rxb_page; - - void (*handlers[IL_CN_MAX]) (struct il_priv *il, - struct il_rx_buf *rxb); - - struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; - - /* spectrum measurement report caching */ - struct il_spectrum_notification measure_report; - u8 measurement_status; - - /* ucode beacon time */ - u32 ucode_beacon_time; - int missed_beacon_threshold; - - /* track IBSS manager (last beacon) status */ - u32 ibss_manager; - - /* force reset */ - struct il_force_reset force_reset; - - /* we allocate array of il_channel_info for NIC's valid channels. - * Access via channel # using indirect idx array */ - struct il_channel_info *channel_info; /* channel info array */ - u8 channel_count; /* # of channels */ - - /* thermal calibration */ - s32 temperature; /* degrees Kelvin */ - s32 last_temperature; - - /* init calibration results */ - struct il_calib_result calib_results[IL_CALIB_MAX]; - - /* Scan related variables */ - unsigned long scan_start; - unsigned long scan_start_tsf; - void *scan_cmd; - enum ieee80211_band scan_band; - struct cfg80211_scan_request *scan_request; - struct ieee80211_vif *scan_vif; - u8 scan_tx_ant[IEEE80211_NUM_BANDS]; - u8 mgmt_tx_ant; - - /* spinlock */ - spinlock_t lock; /* protect general shared data */ - spinlock_t hcmd_lock; /* protect hcmd */ - spinlock_t reg_lock; /* protect hw register access */ - struct mutex mutex; - - /* basic pci-network driver stuff */ - struct pci_dev *pci_dev; - - /* pci hardware address support */ - void __iomem *hw_base; - u32 hw_rev; - u32 hw_wa_rev; - u8 rev_id; - - /* command queue number */ - u8 cmd_queue; - - /* max number of station keys */ - u8 sta_key_max_num; - - /* EEPROM MAC addresses */ - struct mac_address addresses[1]; - - /* uCode images, save to reload in case of failure */ - int fw_idx; /* firmware we're trying to load */ - u32 ucode_ver; /* version of ucode, copy of - il_ucode.ver */ - struct fw_desc ucode_code; /* runtime inst */ - struct fw_desc ucode_data; /* runtime data original */ - struct fw_desc ucode_data_backup; /* runtime data save/restore */ - struct fw_desc ucode_init; /* initialization inst */ - struct fw_desc ucode_init_data; /* initialization data */ - struct fw_desc ucode_boot; /* bootstrap inst */ - enum ucode_type ucode_type; - u8 ucode_write_complete; /* the image write is complete */ - char firmware_name[25]; - - struct il_rxon_context ctx; - - __le16 switch_channel; - - /* 1st responses from initialize and runtime uCode images. - * _4965's initialize alive response contains some calibration data. */ - struct il_init_alive_resp card_alive_init; - struct il_alive_resp card_alive; - - u16 active_rate; - - u8 start_calib; - struct il_sensitivity_data sensitivity_data; - struct il_chain_noise_data chain_noise_data; - __le16 sensitivity_tbl[HD_TBL_SIZE]; - - struct il_ht_config current_ht_config; - - /* Rate scaling data */ - u8 retry_rate; - - wait_queue_head_t wait_command_queue; - - int activity_timer_active; - - /* Rx and Tx DMA processing queues */ - struct il_rx_queue rxq; - struct il_tx_queue *txq; - unsigned long txq_ctx_active_msk; - struct il_dma_ptr kw; /* keep warm address */ - struct il_dma_ptr scd_bc_tbls; - - u32 scd_base_addr; /* scheduler sram base address */ - - unsigned long status; - - /* counts mgmt, ctl, and data packets */ - struct traffic_stats tx_stats; - struct traffic_stats rx_stats; - - /* counts interrupts */ - struct isr_stats isr_stats; - - struct il_power_mgr power_data; - - /* context information */ - u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */ - - /* station table variables */ - - /* Note: if lock and sta_lock are needed, lock must be acquired first */ - spinlock_t sta_lock; - int num_stations; - struct il_station_entry stations[IL_STATION_COUNT]; - unsigned long ucode_key_table; - - /* queue refcounts */ -#define IL_MAX_HW_QUEUES 32 - unsigned long queue_stopped[BITS_TO_LONGS(IL_MAX_HW_QUEUES)]; - /* for each AC */ - atomic_t queue_stop_count[4]; - - /* Indication if ieee80211_ops->open has been called */ - u8 is_open; - - u8 mac80211_registered; - - /* eeprom -- this is in the card's little endian byte order */ - u8 *eeprom; - struct il_eeprom_calib_info *calib_info; - - enum nl80211_iftype iw_mode; - - /* Last Rx'd beacon timestamp */ - u64 timestamp; - - union { -#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE) - struct { - void *shared_virt; - dma_addr_t shared_phys; - - struct delayed_work thermal_periodic; - struct delayed_work rfkill_poll; - - struct il3945_notif_stats stats; -#ifdef CONFIG_IWLEGACY_DEBUGFS - struct il3945_notif_stats accum_stats; - struct il3945_notif_stats delta_stats; - struct il3945_notif_stats max_delta; -#endif - - u32 sta_supp_rates; - int last_rx_rssi; /* From Rx packet stats */ - - /* Rx'd packet timing information */ - u32 last_beacon_time; - u64 last_tsf; - - /* - * each calibration channel group in the - * EEPROM has a derived clip setting for - * each rate. - */ - const struct il3945_clip_group clip_groups[5]; - - } _3945; -#endif -#if defined(CONFIG_IWL4965) || defined(CONFIG_IWL4965_MODULE) - struct { - struct il_rx_phy_res last_phy_res; - bool last_phy_res_valid; - - struct completion firmware_loading_complete; - - /* - * chain noise reset and gain commands are the - * two extra calibration commands follows the standard - * phy calibration commands - */ - u8 phy_calib_chain_noise_reset_cmd; - u8 phy_calib_chain_noise_gain_cmd; - - struct il_notif_stats stats; -#ifdef CONFIG_IWLEGACY_DEBUGFS - struct il_notif_stats accum_stats; - struct il_notif_stats delta_stats; - struct il_notif_stats max_delta; -#endif - - } _4965; -#endif - }; - - struct il_hw_params hw_params; - - u32 inta_mask; - - struct workqueue_struct *workqueue; - - struct work_struct restart; - struct work_struct scan_completed; - struct work_struct rx_replenish; - struct work_struct abort_scan; - - struct il_rxon_context *beacon_ctx; - struct sk_buff *beacon_skb; - - struct work_struct tx_flush; - - struct tasklet_struct irq_tasklet; - - struct delayed_work init_alive_start; - struct delayed_work alive_start; - struct delayed_work scan_check; - - /* TX Power */ - s8 tx_power_user_lmt; - s8 tx_power_device_lmt; - s8 tx_power_next; - -#ifdef CONFIG_IWLEGACY_DEBUG - /* debugging info */ - u32 debug_level; /* per device debugging will override global - il_debug_level if set */ -#endif /* CONFIG_IWLEGACY_DEBUG */ -#ifdef CONFIG_IWLEGACY_DEBUGFS - /* debugfs */ - u16 tx_traffic_idx; - u16 rx_traffic_idx; - u8 *tx_traffic; - u8 *rx_traffic; - struct dentry *debugfs_dir; - u32 dbgfs_sram_offset, dbgfs_sram_len; - bool disable_ht40; -#endif /* CONFIG_IWLEGACY_DEBUGFS */ - - struct work_struct txpower_work; - u32 disable_sens_cal; - u32 disable_chain_noise_cal; - u32 disable_tx_power_cal; - struct work_struct run_time_calib_work; - struct timer_list stats_periodic; - struct timer_list watchdog; - bool hw_ready; - - struct led_classdev led; - unsigned long blink_on, blink_off; - bool led_registered; -}; /*il_priv */ - -static inline void -il_txq_ctx_activate(struct il_priv *il, int txq_id) -{ - set_bit(txq_id, &il->txq_ctx_active_msk); -} - -static inline void -il_txq_ctx_deactivate(struct il_priv *il, int txq_id) -{ - clear_bit(txq_id, &il->txq_ctx_active_msk); -} - -static inline struct ieee80211_hdr * -il_tx_queue_get_hdr(struct il_priv *il, int txq_id, int idx) -{ - if (il->txq[txq_id].txb[idx].skb) - return (struct ieee80211_hdr *)il->txq[txq_id].txb[idx].skb-> - data; - return NULL; -} - -static inline struct il_rxon_context * -il_rxon_ctx_from_vif(struct ieee80211_vif *vif) -{ - struct il_vif_priv *vif_priv = (void *)vif->drv_priv; - - return vif_priv->ctx; -} - -#define for_each_context(il, _ctx) \ - for (_ctx = &il->ctx; _ctx == &il->ctx; _ctx++) - -static inline int -il_is_associated(struct il_priv *il) -{ - return (il->ctx.active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0; -} - -static inline int -il_is_any_associated(struct il_priv *il) -{ - return il_is_associated(il); -} - -static inline int -il_is_associated_ctx(struct il_rxon_context *ctx) -{ - return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0; -} - -static inline int -il_is_channel_valid(const struct il_channel_info *ch_info) -{ - if (ch_info == NULL) - return 0; - return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0; -} - -static inline int -il_is_channel_radar(const struct il_channel_info *ch_info) -{ - return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0; -} - -static inline u8 -il_is_channel_a_band(const struct il_channel_info *ch_info) -{ - return ch_info->band == IEEE80211_BAND_5GHZ; -} - -static inline int -il_is_channel_passive(const struct il_channel_info *ch) -{ - return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0; -} - -static inline int -il_is_channel_ibss(const struct il_channel_info *ch) -{ - return (ch->flags & EEPROM_CHANNEL_IBSS) ? 1 : 0; -} - -static inline void -__il_free_pages(struct il_priv *il, struct page *page) -{ - __free_pages(page, il->hw_params.rx_page_order); - il->alloc_rxb_page--; -} - -static inline void -il_free_pages(struct il_priv *il, unsigned long page) -{ - free_pages(page, il->hw_params.rx_page_order); - il->alloc_rxb_page--; -} - -#define IWLWIFI_VERSION "in-tree:" -#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation" -#define DRV_AUTHOR "" - -#define IL_PCI_DEVICE(dev, subdev, cfg) \ - .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \ - .subvendor = PCI_ANY_ID, .subdevice = (subdev), \ - .driver_data = (kernel_ulong_t)&(cfg) - -#define TIME_UNIT 1024 - -#define IL_SKU_G 0x1 -#define IL_SKU_A 0x2 -#define IL_SKU_N 0x8 - -#define IL_CMD(x) case x: return #x - -/* Size of one Rx buffer in host DRAM */ -#define IL_RX_BUF_SIZE_3K (3 * 1000) /* 3945 only */ -#define IL_RX_BUF_SIZE_4K (4 * 1024) -#define IL_RX_BUF_SIZE_8K (8 * 1024) - -struct il_hcmd_ops { - int (*rxon_assoc) (struct il_priv *il, struct il_rxon_context *ctx); - int (*commit_rxon) (struct il_priv *il, struct il_rxon_context *ctx); - void (*set_rxon_chain) (struct il_priv *il, - struct il_rxon_context *ctx); -}; - -struct il_hcmd_utils_ops { - u16(*get_hcmd_size) (u8 cmd_id, u16 len); - u16(*build_addsta_hcmd) (const struct il_addsta_cmd *cmd, u8 *data); - int (*request_scan) (struct il_priv *il, struct ieee80211_vif *vif); - void (*post_scan) (struct il_priv *il); -}; - -struct il_apm_ops { - int (*init) (struct il_priv *il); - void (*config) (struct il_priv *il); -}; - -#ifdef CONFIG_IWLEGACY_DEBUGFS -struct il_debugfs_ops { - ssize_t(*rx_stats_read) (struct file *file, char __user *user_buf, - size_t count, loff_t *ppos); - ssize_t(*tx_stats_read) (struct file *file, char __user *user_buf, - size_t count, loff_t *ppos); - ssize_t(*general_stats_read) (struct file *file, - char __user *user_buf, size_t count, - loff_t *ppos); -}; -#endif - -struct il_temp_ops { - void (*temperature) (struct il_priv *il); -}; - -struct il_lib_ops { - /* set hw dependent parameters */ - int (*set_hw_params) (struct il_priv *il); - /* Handling TX */ - void (*txq_update_byte_cnt_tbl) (struct il_priv *il, - struct il_tx_queue *txq, - u16 byte_cnt); - int (*txq_attach_buf_to_tfd) (struct il_priv *il, - struct il_tx_queue *txq, dma_addr_t addr, - u16 len, u8 reset, u8 pad); - void (*txq_free_tfd) (struct il_priv *il, struct il_tx_queue *txq); - int (*txq_init) (struct il_priv *il, struct il_tx_queue *txq); - /* setup Rx handler */ - void (*handler_setup) (struct il_priv *il); - /* alive notification after init uCode load */ - void (*init_alive_start) (struct il_priv *il); - /* check validity of rtc data address */ - int (*is_valid_rtc_data_addr) (u32 addr); - /* 1st ucode load */ - int (*load_ucode) (struct il_priv *il); - - void (*dump_nic_error_log) (struct il_priv *il); - int (*dump_fh) (struct il_priv *il, char **buf, bool display); - int (*set_channel_switch) (struct il_priv *il, - struct ieee80211_channel_switch *ch_switch); - /* power management */ - struct il_apm_ops apm_ops; - - /* power */ - int (*send_tx_power) (struct il_priv *il); - void (*update_chain_flags) (struct il_priv *il); - - /* eeprom operations */ - struct il_eeprom_ops eeprom_ops; - - /* temperature */ - struct il_temp_ops temp_ops; - -#ifdef CONFIG_IWLEGACY_DEBUGFS - struct il_debugfs_ops debugfs_ops; -#endif - -}; - -struct il_led_ops { - int (*cmd) (struct il_priv *il, struct il_led_cmd *led_cmd); -}; - -struct il_legacy_ops { - void (*post_associate) (struct il_priv *il); - void (*config_ap) (struct il_priv *il); - /* station management */ - int (*update_bcast_stations) (struct il_priv *il); - int (*manage_ibss_station) (struct il_priv *il, - struct ieee80211_vif *vif, bool add); -}; - -struct il_ops { - const struct il_lib_ops *lib; - const struct il_hcmd_ops *hcmd; - const struct il_hcmd_utils_ops *utils; - const struct il_led_ops *led; - const struct il_nic_ops *nic; - const struct il_legacy_ops *legacy; - const struct ieee80211_ops *ieee80211_ops; -}; - -struct il_mod_params { - int sw_crypto; /* def: 0 = using hardware encryption */ - int disable_hw_scan; /* def: 0 = use h/w scan */ - int num_of_queues; /* def: HW dependent */ - int disable_11n; /* def: 0 = 11n capabilities enabled */ - int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */ - int antenna; /* def: 0 = both antennas (use diversity) */ - int restart_fw; /* def: 1 = restart firmware */ -}; - -/* - * @led_compensation: compensate on the led on/off time per HW according - * to the deviation to achieve the desired led frequency. - * The detail algorithm is described in common.c - * @chain_noise_num_beacons: number of beacons used to compute chain noise - * @wd_timeout: TX queues watchdog timeout - * @temperature_kelvin: temperature report by uCode in kelvin - * @ucode_tracing: support ucode continuous tracing - * @sensitivity_calib_by_driver: driver has the capability to perform - * sensitivity calibration operation - * @chain_noise_calib_by_driver: driver has the capability to perform - * chain noise calibration operation - */ -struct il_base_params { - int eeprom_size; - int num_of_queues; /* def: HW dependent */ - int num_of_ampdu_queues; /* def: HW dependent */ - /* for il_apm_init() */ - u32 pll_cfg_val; - bool set_l0s; - bool use_bsm; - - u16 led_compensation; - int chain_noise_num_beacons; - unsigned int wd_timeout; - bool temperature_kelvin; - const bool ucode_tracing; - const bool sensitivity_calib_by_driver; - const bool chain_noise_calib_by_driver; -}; - -#define IL_LED_SOLID 11 -#define IL_DEF_LED_INTRVL cpu_to_le32(1000) - -#define IL_LED_ACTIVITY (0<<1) -#define IL_LED_LINK (1<<1) - -/* - * LED mode - * IL_LED_DEFAULT: use device default - * IL_LED_RF_STATE: turn LED on/off based on RF state - * LED ON = RF ON - * LED OFF = RF OFF - * IL_LED_BLINK: adjust led blink rate based on blink table - */ -enum il_led_mode { - IL_LED_DEFAULT, - IL_LED_RF_STATE, - IL_LED_BLINK, -}; - -void il_leds_init(struct il_priv *il); -void il_leds_exit(struct il_priv *il); - -/** - * struct il_cfg - * @fw_name_pre: Firmware filename prefix. The api version and extension - * (.ucode) will be added to filename before loading from disk. The - * filename is constructed as fw_name_pre.ucode. - * @ucode_api_max: Highest version of uCode API supported by driver. - * @ucode_api_min: Lowest version of uCode API supported by driver. - * @scan_antennas: available antenna for scan operation - * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off) - * - * We enable the driver to be backward compatible wrt API version. The - * driver specifies which APIs it supports (with @ucode_api_max being the - * highest and @ucode_api_min the lowest). Firmware will only be loaded if - * it has a supported API version. The firmware's API version will be - * stored in @il_priv, enabling the driver to make runtime changes based - * on firmware version used. - * - * For example, - * if (IL_UCODE_API(il->ucode_ver) >= 2) { - * Driver interacts with Firmware API version >= 2. - * } else { - * Driver interacts with Firmware API version 1. - * } - * - * The ideal usage of this infrastructure is to treat a new ucode API - * release as a new hardware revision. That is, through utilizing the - * il_hcmd_utils_ops etc. we accommodate different command structures - * and flows between hardware versions as well as their API - * versions. - * - */ -struct il_cfg { - /* params specific to an individual device within a device family */ - const char *name; - const char *fw_name_pre; - const unsigned int ucode_api_max; - const unsigned int ucode_api_min; - u8 valid_tx_ant; - u8 valid_rx_ant; - unsigned int sku; - u16 eeprom_ver; - u16 eeprom_calib_ver; - const struct il_ops *ops; - /* module based parameters which can be set from modprobe cmd */ - const struct il_mod_params *mod_params; - /* params not likely to change within a device family */ - struct il_base_params *base_params; - /* params likely to change within a device family */ - u8 scan_rx_antennas[IEEE80211_NUM_BANDS]; - enum il_led_mode led_mode; -}; - -/*************************** - * L i b * - ***************************/ - -struct ieee80211_hw *il_alloc_all(struct il_cfg *cfg); -int il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - u16 queue, const struct ieee80211_tx_queue_params *params); -int il_mac_tx_last_beacon(struct ieee80211_hw *hw); - -void il_set_rxon_hwcrypto(struct il_priv *il, struct il_rxon_context *ctx, - int hw_decrypt); -int il_check_rxon_cmd(struct il_priv *il, struct il_rxon_context *ctx); -int il_full_rxon_required(struct il_priv *il, struct il_rxon_context *ctx); -int il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch, - struct il_rxon_context *ctx); -void il_set_flags_for_band(struct il_priv *il, struct il_rxon_context *ctx, - enum ieee80211_band band, struct ieee80211_vif *vif); -u8 il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band); -void il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf); -bool il_is_ht40_tx_allowed(struct il_priv *il, struct il_rxon_context *ctx, - struct ieee80211_sta_ht_cap *ht_cap); -void il_connection_init_rx_config(struct il_priv *il, - struct il_rxon_context *ctx); -void il_set_rate(struct il_priv *il); -int il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr, - u32 decrypt_res, struct ieee80211_rx_status *stats); -void il_irq_handle_error(struct il_priv *il); -int il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif); -void il_mac_remove_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif); -int il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - enum nl80211_iftype newtype, bool newp2p); -int il_alloc_txq_mem(struct il_priv *il); -void il_txq_mem(struct il_priv *il); - -#ifdef CONFIG_IWLEGACY_DEBUGFS -int il_alloc_traffic_mem(struct il_priv *il); -void il_free_traffic_mem(struct il_priv *il); -void il_reset_traffic_log(struct il_priv *il); -void il_dbg_log_tx_data_frame(struct il_priv *il, u16 length, - struct ieee80211_hdr *header); -void il_dbg_log_rx_data_frame(struct il_priv *il, u16 length, - struct ieee80211_hdr *header); -const char *il_get_mgmt_string(int cmd); -const char *il_get_ctrl_string(int cmd); -void il_clear_traffic_stats(struct il_priv *il); -void il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len); -#else -static inline int -il_alloc_traffic_mem(struct il_priv *il) -{ - return 0; -} - -static inline void -il_free_traffic_mem(struct il_priv *il) -{ -} - -static inline void -il_reset_traffic_log(struct il_priv *il) -{ -} - -static inline void -il_dbg_log_tx_data_frame(struct il_priv *il, u16 length, - struct ieee80211_hdr *header) -{ -} - -static inline void -il_dbg_log_rx_data_frame(struct il_priv *il, u16 length, - struct ieee80211_hdr *header) -{ -} - -static inline void -il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len) -{ -} -#endif -/***************************************************** - * RX handlers. - * **************************************************/ -void il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb); -void il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb); -void il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb); - -/***************************************************** -* RX -******************************************************/ -void il_cmd_queue_unmap(struct il_priv *il); -void il_cmd_queue_free(struct il_priv *il); -int il_rx_queue_alloc(struct il_priv *il); -void il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q); -int il_rx_queue_space(const struct il_rx_queue *q); -void il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb); -/* Handlers */ -void il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb); -void il_recover_from_stats(struct il_priv *il, struct il_rx_pkt *pkt); -void il_chswitch_done(struct il_priv *il, bool is_success); -void il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb); - -/* TX helpers */ - -/***************************************************** -* TX -******************************************************/ -void il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq); -int il_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq, int slots_num, - u32 txq_id); -void il_tx_queue_reset(struct il_priv *il, struct il_tx_queue *txq, - int slots_num, u32 txq_id); -void il_tx_queue_unmap(struct il_priv *il, int txq_id); -void il_tx_queue_free(struct il_priv *il, int txq_id); -void il_setup_watchdog(struct il_priv *il); -/***************************************************** - * TX power - ****************************************************/ -int il_set_tx_power(struct il_priv *il, s8 tx_power, bool force); - -/******************************************************************************* - * Rate - ******************************************************************************/ - -u8 il_get_lowest_plcp(struct il_priv *il, struct il_rxon_context *ctx); - -/******************************************************************************* - * Scanning - ******************************************************************************/ -void il_init_scan_params(struct il_priv *il); -int il_scan_cancel(struct il_priv *il); -int il_scan_cancel_timeout(struct il_priv *il, unsigned long ms); -void il_force_scan_end(struct il_priv *il); -int il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct cfg80211_scan_request *req); -void il_internal_short_hw_scan(struct il_priv *il); -int il_force_reset(struct il_priv *il, bool external); -u16 il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame, - const u8 *ta, const u8 *ie, int ie_len, int left); -void il_setup_rx_scan_handlers(struct il_priv *il); -u16 il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band, - u8 n_probes); -u16 il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band, - struct ieee80211_vif *vif); -void il_setup_scan_deferred_work(struct il_priv *il); -void il_cancel_scan_deferred_work(struct il_priv *il); - -/* For faster active scanning, scan will move to the next channel if fewer than - * PLCP_QUIET_THRESH packets are heard on this channel within - * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell - * time if it's a quiet channel (nothing responded to our probe, and there's - * no other traffic). - * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */ -#define IL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */ -#define IL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */ - -#define IL_SCAN_CHECK_WATCHDOG (HZ * 7) - -/***************************************************** - * S e n d i n g H o s t C o m m a n d s * - *****************************************************/ - -const char *il_get_cmd_string(u8 cmd); -int __must_check il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd); -int il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd); -int __must_check il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len, - const void *data); -int il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data, - void (*callback) (struct il_priv *il, - struct il_device_cmd *cmd, - struct il_rx_pkt *pkt)); - -int il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd); - -/***************************************************** - * PCI * - *****************************************************/ - -static inline u16 -il_pcie_link_ctl(struct il_priv *il) -{ - int pos; - u16 pci_lnk_ctl; - pos = pci_pcie_cap(il->pci_dev); - pci_read_config_word(il->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl); - return pci_lnk_ctl; -} - -void il_bg_watchdog(unsigned long data); -u32 il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval); -__le32 il_add_beacon_time(struct il_priv *il, u32 base, u32 addon, - u32 beacon_interval); - -#ifdef CONFIG_PM -int il_pci_suspend(struct device *device); -int il_pci_resume(struct device *device); -extern const struct dev_pm_ops il_pm_ops; - -#define IL_LEGACY_PM_OPS (&il_pm_ops) - -#else /* !CONFIG_PM */ - -#define IL_LEGACY_PM_OPS NULL - -#endif /* !CONFIG_PM */ - -/***************************************************** -* Error Handling Debugging -******************************************************/ -void il4965_dump_nic_error_log(struct il_priv *il); -#ifdef CONFIG_IWLEGACY_DEBUG -void il_print_rx_config_cmd(struct il_priv *il, struct il_rxon_context *ctx); -#else -static inline void -il_print_rx_config_cmd(struct il_priv *il, struct il_rxon_context *ctx) -{ -} -#endif - -void il_clear_isr_stats(struct il_priv *il); - -/***************************************************** -* GEOS -******************************************************/ -int il_init_geos(struct il_priv *il); -void il_free_geos(struct il_priv *il); - -/*************** DRIVER STATUS FUNCTIONS *****/ - -#define S_HCMD_ACTIVE 0 /* host command in progress */ -/* 1 is unused (used to be S_HCMD_SYNC_ACTIVE) */ -#define S_INT_ENABLED 2 -#define S_RF_KILL_HW 3 -#define S_CT_KILL 4 -#define S_INIT 5 -#define S_ALIVE 6 -#define S_READY 7 -#define S_TEMPERATURE 8 -#define S_GEO_CONFIGURED 9 -#define S_EXIT_PENDING 10 -#define S_STATS 12 -#define S_SCANNING 13 -#define S_SCAN_ABORTING 14 -#define S_SCAN_HW 15 -#define S_POWER_PMI 16 -#define S_FW_ERROR 17 -#define S_CHANNEL_SWITCH_PENDING 18 - -static inline int -il_is_ready(struct il_priv *il) -{ - /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are - * set but EXIT_PENDING is not */ - return test_bit(S_READY, &il->status) && - test_bit(S_GEO_CONFIGURED, &il->status) && - !test_bit(S_EXIT_PENDING, &il->status); -} - -static inline int -il_is_alive(struct il_priv *il) -{ - return test_bit(S_ALIVE, &il->status); -} - -static inline int -il_is_init(struct il_priv *il) -{ - return test_bit(S_INIT, &il->status); -} - -static inline int -il_is_rfkill_hw(struct il_priv *il) -{ - return test_bit(S_RF_KILL_HW, &il->status); -} - -static inline int -il_is_rfkill(struct il_priv *il) -{ - return il_is_rfkill_hw(il); -} - -static inline int -il_is_ctkill(struct il_priv *il) -{ - return test_bit(S_CT_KILL, &il->status); -} - -static inline int -il_is_ready_rf(struct il_priv *il) -{ - - if (il_is_rfkill(il)) - return 0; - - return il_is_ready(il); -} - -extern void il_send_bt_config(struct il_priv *il); -extern int il_send_stats_request(struct il_priv *il, u8 flags, bool clear); -void il_apm_stop(struct il_priv *il); -int il_apm_init(struct il_priv *il); - -int il_send_rxon_timing(struct il_priv *il, struct il_rxon_context *ctx); -static inline int -il_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx) -{ - return il->cfg->ops->hcmd->rxon_assoc(il, ctx); -} - -static inline int -il_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx) -{ - return il->cfg->ops->hcmd->commit_rxon(il, ctx); -} - -static inline const struct ieee80211_supported_band * -il_get_hw_mode(struct il_priv *il, enum ieee80211_band band) -{ - return il->hw->wiphy->bands[band]; -} - -/* mac80211 handlers */ -int il_mac_config(struct ieee80211_hw *hw, u32 changed); -void il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif); -void il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_bss_conf *bss_conf, u32 changes); -void il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info, - __le16 fc, __le32 *tx_flags); - -irqreturn_t il_isr(int irq, void *data); - -extern void il_set_bit(struct il_priv *p, u32 r, u32 m); -extern void il_clear_bit(struct il_priv *p, u32 r, u32 m); -extern int _il_grab_nic_access(struct il_priv *il); -extern int _il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout); -extern int il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout); -extern u32 il_rd_prph(struct il_priv *il, u32 reg); -extern void il_wr_prph(struct il_priv *il, u32 addr, u32 val); -extern u32 il_read_targ_mem(struct il_priv *il, u32 addr); -extern void il_write_targ_mem(struct il_priv *il, u32 addr, u32 val); - -static inline void -_il_write8(struct il_priv *il, u32 ofs, u8 val) -{ - iowrite8(val, il->hw_base + ofs); -} -#define il_write8(il, ofs, val) _il_write8(il, ofs, val) - -static inline void -_il_wr(struct il_priv *il, u32 ofs, u32 val) -{ - iowrite32(val, il->hw_base + ofs); -} - -static inline u32 -_il_rd(struct il_priv *il, u32 ofs) -{ - return ioread32(il->hw_base + ofs); -} - -static inline void -_il_clear_bit(struct il_priv *il, u32 reg, u32 mask) -{ - _il_wr(il, reg, _il_rd(il, reg) & ~mask); -} - -static inline void -_il_set_bit(struct il_priv *il, u32 reg, u32 mask) -{ - _il_wr(il, reg, _il_rd(il, reg) | mask); -} - -static inline void -_il_release_nic_access(struct il_priv *il) -{ - _il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); -} - -static inline u32 -il_rd(struct il_priv *il, u32 reg) -{ - u32 value; - unsigned long reg_flags; - - spin_lock_irqsave(&il->reg_lock, reg_flags); - _il_grab_nic_access(il); - value = _il_rd(il, reg); - _il_release_nic_access(il); - spin_unlock_irqrestore(&il->reg_lock, reg_flags); - return value; -} - -static inline void -il_wr(struct il_priv *il, u32 reg, u32 value) -{ - unsigned long reg_flags; - - spin_lock_irqsave(&il->reg_lock, reg_flags); - if (!_il_grab_nic_access(il)) { - _il_wr(il, reg, value); - _il_release_nic_access(il); - } - spin_unlock_irqrestore(&il->reg_lock, reg_flags); -} - -static inline u32 -_il_rd_prph(struct il_priv *il, u32 reg) -{ - _il_wr(il, HBUS_TARG_PRPH_RADDR, reg | (3 << 24)); - rmb(); - return _il_rd(il, HBUS_TARG_PRPH_RDAT); -} - -static inline void -_il_wr_prph(struct il_priv *il, u32 addr, u32 val) -{ - _il_wr(il, HBUS_TARG_PRPH_WADDR, ((addr & 0x0000FFFF) | (3 << 24))); - wmb(); - _il_wr(il, HBUS_TARG_PRPH_WDAT, val); -} - -static inline void -il_set_bits_prph(struct il_priv *il, u32 reg, u32 mask) -{ - unsigned long reg_flags; - - spin_lock_irqsave(&il->reg_lock, reg_flags); - _il_grab_nic_access(il); - _il_wr_prph(il, reg, (_il_rd_prph(il, reg) | mask)); - _il_release_nic_access(il); - spin_unlock_irqrestore(&il->reg_lock, reg_flags); -} - -static inline void -il_set_bits_mask_prph(struct il_priv *il, u32 reg, u32 bits, u32 mask) -{ - unsigned long reg_flags; - - spin_lock_irqsave(&il->reg_lock, reg_flags); - _il_grab_nic_access(il); - _il_wr_prph(il, reg, ((_il_rd_prph(il, reg) & mask) | bits)); - _il_release_nic_access(il); - spin_unlock_irqrestore(&il->reg_lock, reg_flags); -} - -static inline void -il_clear_bits_prph(struct il_priv *il, u32 reg, u32 mask) -{ - unsigned long reg_flags; - u32 val; - - spin_lock_irqsave(&il->reg_lock, reg_flags); - _il_grab_nic_access(il); - val = _il_rd_prph(il, reg); - _il_wr_prph(il, reg, (val & ~mask)); - _il_release_nic_access(il); - spin_unlock_irqrestore(&il->reg_lock, reg_flags); -} - -#define HW_KEY_DYNAMIC 0 -#define HW_KEY_DEFAULT 1 - -#define IL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */ -#define IL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */ -#define IL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of - being activated */ -#define IL_STA_LOCAL BIT(3) /* station state not directed by mac80211; - (this is for the IBSS BSSID stations) */ -#define IL_STA_BCAST BIT(4) /* this station is the special bcast station */ - -void il_restore_stations(struct il_priv *il, struct il_rxon_context *ctx); -void il_clear_ucode_stations(struct il_priv *il, struct il_rxon_context *ctx); -void il_dealloc_bcast_stations(struct il_priv *il); -int il_get_free_ucode_key_idx(struct il_priv *il); -int il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags); -int il_add_station_common(struct il_priv *il, struct il_rxon_context *ctx, - const u8 *addr, bool is_ap, - struct ieee80211_sta *sta, u8 *sta_id_r); -int il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr); -int il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_sta *sta); - -u8 il_prep_station(struct il_priv *il, struct il_rxon_context *ctx, - const u8 *addr, bool is_ap, struct ieee80211_sta *sta); - -int il_send_lq_cmd(struct il_priv *il, struct il_rxon_context *ctx, - struct il_link_quality_cmd *lq, u8 flags, bool init); - -/** - * il_clear_driver_stations - clear knowledge of all stations from driver - * @il: iwl il struct - * - * This is called during il_down() to make sure that in the case - * we're coming there from a hardware restart mac80211 will be - * able to reconfigure stations -- if we're getting there in the - * normal down flow then the stations will already be cleared. - */ -static inline void -il_clear_driver_stations(struct il_priv *il) -{ - unsigned long flags; - struct il_rxon_context *ctx = &il->ctx; - - spin_lock_irqsave(&il->sta_lock, flags); - memset(il->stations, 0, sizeof(il->stations)); - il->num_stations = 0; - - il->ucode_key_table = 0; - - /* - * Remove all key information that is not stored as part - * of station information since mac80211 may not have had - * a chance to remove all the keys. When device is - * reconfigured by mac80211 after an error all keys will - * be reconfigured. - */ - memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys)); - ctx->key_mapping_keys = 0; - - spin_unlock_irqrestore(&il->sta_lock, flags); -} - -static inline int -il_sta_id(struct ieee80211_sta *sta) -{ - if (WARN_ON(!sta)) - return IL_INVALID_STATION; - - return ((struct il_station_priv_common *)sta->drv_priv)->sta_id; -} - -/** - * il_sta_id_or_broadcast - return sta_id or broadcast sta - * @il: iwl il - * @context: the current context - * @sta: mac80211 station - * - * In certain circumstances mac80211 passes a station pointer - * that may be %NULL, for example during TX or key setup. In - * that case, we need to use the broadcast station, so this - * inline wraps that pattern. - */ -static inline int -il_sta_id_or_broadcast(struct il_priv *il, struct il_rxon_context *context, - struct ieee80211_sta *sta) -{ - int sta_id; - - if (!sta) - return context->bcast_sta_id; - - sta_id = il_sta_id(sta); - - /* - * mac80211 should not be passing a partially - * initialised station! - */ - WARN_ON(sta_id == IL_INVALID_STATION); - - return sta_id; -} - -/** - * il_queue_inc_wrap - increment queue idx, wrap back to beginning - * @idx -- current idx - * @n_bd -- total number of entries in queue (must be power of 2) - */ -static inline int -il_queue_inc_wrap(int idx, int n_bd) -{ - return ++idx & (n_bd - 1); -} - -/** - * il_queue_dec_wrap - decrement queue idx, wrap back to end - * @idx -- current idx - * @n_bd -- total number of entries in queue (must be power of 2) - */ -static inline int -il_queue_dec_wrap(int idx, int n_bd) -{ - return --idx & (n_bd - 1); -} - -/* TODO: Move fw_desc functions to iwl-pci.ko */ -static inline void -il_free_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc) -{ - if (desc->v_addr) - dma_free_coherent(&pci_dev->dev, desc->len, desc->v_addr, - desc->p_addr); - desc->v_addr = NULL; - desc->len = 0; -} - -static inline int -il_alloc_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc) -{ - if (!desc->len) { - desc->v_addr = NULL; - return -EINVAL; - } - - desc->v_addr = - dma_alloc_coherent(&pci_dev->dev, desc->len, &desc->p_addr, - GFP_KERNEL); - return (desc->v_addr != NULL) ? 0 : -ENOMEM; -} - -/* - * we have 8 bits used like this: - * - * 7 6 5 4 3 2 1 0 - * | | | | | | | | - * | | | | | | +-+-------- AC queue (0-3) - * | | | | | | - * | +-+-+-+-+------------ HW queue ID - * | - * +---------------------- unused - */ -static inline void -il_set_swq_id(struct il_tx_queue *txq, u8 ac, u8 hwq) -{ - BUG_ON(ac > 3); /* only have 2 bits */ - BUG_ON(hwq > 31); /* only use 5 bits */ - - txq->swq_id = (hwq << 2) | ac; -} - -static inline void -il_wake_queue(struct il_priv *il, struct il_tx_queue *txq) -{ - u8 queue = txq->swq_id; - u8 ac = queue & 3; - u8 hwq = (queue >> 2) & 0x1f; - - if (test_and_clear_bit(hwq, il->queue_stopped)) - if (atomic_dec_return(&il->queue_stop_count[ac]) <= 0) - ieee80211_wake_queue(il->hw, ac); -} - -static inline void -il_stop_queue(struct il_priv *il, struct il_tx_queue *txq) -{ - u8 queue = txq->swq_id; - u8 ac = queue & 3; - u8 hwq = (queue >> 2) & 0x1f; - - if (!test_and_set_bit(hwq, il->queue_stopped)) - if (atomic_inc_return(&il->queue_stop_count[ac]) > 0) - ieee80211_stop_queue(il->hw, ac); -} - -#ifdef ieee80211_stop_queue -#undef ieee80211_stop_queue -#endif - -#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue - -#ifdef ieee80211_wake_queue -#undef ieee80211_wake_queue -#endif - -#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue - -static inline void -il_disable_interrupts(struct il_priv *il) -{ - clear_bit(S_INT_ENABLED, &il->status); - - /* disable interrupts from uCode/NIC to host */ - _il_wr(il, CSR_INT_MASK, 0x00000000); - - /* acknowledge/clear/reset any interrupts still pending - * from uCode or flow handler (Rx/Tx DMA) */ - _il_wr(il, CSR_INT, 0xffffffff); - _il_wr(il, CSR_FH_INT_STATUS, 0xffffffff); -} - -static inline void -il_enable_rfkill_int(struct il_priv *il) -{ - _il_wr(il, CSR_INT_MASK, CSR_INT_BIT_RF_KILL); -} - -static inline void -il_enable_interrupts(struct il_priv *il) -{ - set_bit(S_INT_ENABLED, &il->status); - _il_wr(il, CSR_INT_MASK, il->inta_mask); -} - -/** - * il_beacon_time_mask_low - mask of lower 32 bit of beacon time - * @il -- pointer to il_priv data structure - * @tsf_bits -- number of bits need to shift for masking) - */ -static inline u32 -il_beacon_time_mask_low(struct il_priv *il, u16 tsf_bits) -{ - return (1 << tsf_bits) - 1; -} - -/** - * il_beacon_time_mask_high - mask of higher 32 bit of beacon time - * @il -- pointer to il_priv data structure - * @tsf_bits -- number of bits need to shift for masking) - */ -static inline u32 -il_beacon_time_mask_high(struct il_priv *il, u16 tsf_bits) -{ - return ((1 << (32 - tsf_bits)) - 1) << tsf_bits; -} - -/** - * struct il_rb_status - reseve buffer status host memory mapped FH registers - * - * @closed_rb_num [0:11] - Indicates the idx of the RB which was closed - * @closed_fr_num [0:11] - Indicates the idx of the RX Frame which was closed - * @finished_rb_num [0:11] - Indicates the idx of the current RB - * in which the last frame was written to - * @finished_fr_num [0:11] - Indicates the idx of the RX Frame - * which was transferred - */ -struct il_rb_status { - __le16 closed_rb_num; - __le16 closed_fr_num; - __le16 finished_rb_num; - __le16 finished_fr_nam; - __le32 __unused; /* 3945 only */ -} __packed; - -#define TFD_QUEUE_SIZE_MAX (256) -#define TFD_QUEUE_SIZE_BC_DUP (64) -#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP) -#define IL_TX_DMA_MASK DMA_BIT_MASK(36) -#define IL_NUM_OF_TBS 20 - -static inline u8 -il_get_dma_hi_addr(dma_addr_t addr) -{ - return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF; -} - -/** - * struct il_tfd_tb transmit buffer descriptor within transmit frame descriptor - * - * This structure contains dma address and length of transmission address - * - * @lo: low [31:0] portion of the dma address of TX buffer every even is - * unaligned on 16 bit boundary - * @hi_n_len: 0-3 [35:32] portion of dma - * 4-15 length of the tx buffer - */ -struct il_tfd_tb { - __le32 lo; - __le16 hi_n_len; -} __packed; - -/** - * struct il_tfd - * - * Transmit Frame Descriptor (TFD) - * - * @ __reserved1[3] reserved - * @ num_tbs 0-4 number of active tbs - * 5 reserved - * 6-7 padding (not used) - * @ tbs[20] transmit frame buffer descriptors - * @ __pad padding - * - * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM. - * Both driver and device share these circular buffers, each of which must be - * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes - * - * Driver must indicate the physical address of the base of each - * circular buffer via the FH49_MEM_CBBC_QUEUE registers. - * - * Each TFD contains pointer/size information for up to 20 data buffers - * in host DRAM. These buffers collectively contain the (one) frame described - * by the TFD. Each buffer must be a single contiguous block of memory within - * itself, but buffers may be scattered in host DRAM. Each buffer has max size - * of (4K - 4). The concatenates all of a TFD's buffers into a single - * Tx frame, up to 8 KBytes in size. - * - * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx. - */ -struct il_tfd { - u8 __reserved1[3]; - u8 num_tbs; - struct il_tfd_tb tbs[IL_NUM_OF_TBS]; - __le32 __pad; -} __packed; -/* PCI registers */ -#define PCI_CFG_RETRY_TIMEOUT 0x041 - -/* PCI register values */ -#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01 -#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02 - -struct il_rate_info { - u8 plcp; /* uCode API: RATE_6M_PLCP, etc. */ - u8 plcp_siso; /* uCode API: RATE_SISO_6M_PLCP, etc. */ - u8 plcp_mimo2; /* uCode API: RATE_MIMO2_6M_PLCP, etc. */ - u8 ieee; /* MAC header: RATE_6M_IEEE, etc. */ - u8 prev_ieee; /* previous rate in IEEE speeds */ - u8 next_ieee; /* next rate in IEEE speeds */ - u8 prev_rs; /* previous rate used in rs algo */ - u8 next_rs; /* next rate used in rs algo */ - u8 prev_rs_tgg; /* previous rate used in TGG rs algo */ - u8 next_rs_tgg; /* next rate used in TGG rs algo */ -}; - -struct il3945_rate_info { - u8 plcp; /* uCode API: RATE_6M_PLCP, etc. */ - u8 ieee; /* MAC header: RATE_6M_IEEE, etc. */ - u8 prev_ieee; /* previous rate in IEEE speeds */ - u8 next_ieee; /* next rate in IEEE speeds */ - u8 prev_rs; /* previous rate used in rs algo */ - u8 next_rs; /* next rate used in rs algo */ - u8 prev_rs_tgg; /* previous rate used in TGG rs algo */ - u8 next_rs_tgg; /* next rate used in TGG rs algo */ - u8 table_rs_idx; /* idx in rate scale table cmd */ - u8 prev_table_rs; /* prev in rate table cmd */ -}; - -/* - * These serve as idxes into - * struct il_rate_info il_rates[RATE_COUNT]; - */ -enum { - RATE_1M_IDX = 0, - RATE_2M_IDX, - RATE_5M_IDX, - RATE_11M_IDX, - RATE_6M_IDX, - RATE_9M_IDX, - RATE_12M_IDX, - RATE_18M_IDX, - RATE_24M_IDX, - RATE_36M_IDX, - RATE_48M_IDX, - RATE_54M_IDX, - RATE_60M_IDX, - RATE_COUNT, - RATE_COUNT_LEGACY = RATE_COUNT - 1, /* Excluding 60M */ - RATE_COUNT_3945 = RATE_COUNT - 1, - RATE_INVM_IDX = RATE_COUNT, - RATE_INVALID = RATE_COUNT, -}; - -enum { - RATE_6M_IDX_TBL = 0, - RATE_9M_IDX_TBL, - RATE_12M_IDX_TBL, - RATE_18M_IDX_TBL, - RATE_24M_IDX_TBL, - RATE_36M_IDX_TBL, - RATE_48M_IDX_TBL, - RATE_54M_IDX_TBL, - RATE_1M_IDX_TBL, - RATE_2M_IDX_TBL, - RATE_5M_IDX_TBL, - RATE_11M_IDX_TBL, - RATE_INVM_IDX_TBL = RATE_INVM_IDX - 1, -}; - -enum { - IL_FIRST_OFDM_RATE = RATE_6M_IDX, - IL39_LAST_OFDM_RATE = RATE_54M_IDX, - IL_LAST_OFDM_RATE = RATE_60M_IDX, - IL_FIRST_CCK_RATE = RATE_1M_IDX, - IL_LAST_CCK_RATE = RATE_11M_IDX, -}; - -/* #define vs. enum to keep from defaulting to 'large integer' */ -#define RATE_6M_MASK (1 << RATE_6M_IDX) -#define RATE_9M_MASK (1 << RATE_9M_IDX) -#define RATE_12M_MASK (1 << RATE_12M_IDX) -#define RATE_18M_MASK (1 << RATE_18M_IDX) -#define RATE_24M_MASK (1 << RATE_24M_IDX) -#define RATE_36M_MASK (1 << RATE_36M_IDX) -#define RATE_48M_MASK (1 << RATE_48M_IDX) -#define RATE_54M_MASK (1 << RATE_54M_IDX) -#define RATE_60M_MASK (1 << RATE_60M_IDX) -#define RATE_1M_MASK (1 << RATE_1M_IDX) -#define RATE_2M_MASK (1 << RATE_2M_IDX) -#define RATE_5M_MASK (1 << RATE_5M_IDX) -#define RATE_11M_MASK (1 << RATE_11M_IDX) - -/* uCode API values for legacy bit rates, both OFDM and CCK */ -enum { - RATE_6M_PLCP = 13, - RATE_9M_PLCP = 15, - RATE_12M_PLCP = 5, - RATE_18M_PLCP = 7, - RATE_24M_PLCP = 9, - RATE_36M_PLCP = 11, - RATE_48M_PLCP = 1, - RATE_54M_PLCP = 3, - RATE_60M_PLCP = 3, /*FIXME:RS:should be removed */ - RATE_1M_PLCP = 10, - RATE_2M_PLCP = 20, - RATE_5M_PLCP = 55, - RATE_11M_PLCP = 110, - /*FIXME:RS:add RATE_LEGACY_INVM_PLCP = 0, */ -}; - -/* uCode API values for OFDM high-throughput (HT) bit rates */ -enum { - RATE_SISO_6M_PLCP = 0, - RATE_SISO_12M_PLCP = 1, - RATE_SISO_18M_PLCP = 2, - RATE_SISO_24M_PLCP = 3, - RATE_SISO_36M_PLCP = 4, - RATE_SISO_48M_PLCP = 5, - RATE_SISO_54M_PLCP = 6, - RATE_SISO_60M_PLCP = 7, - RATE_MIMO2_6M_PLCP = 0x8, - RATE_MIMO2_12M_PLCP = 0x9, - RATE_MIMO2_18M_PLCP = 0xa, - RATE_MIMO2_24M_PLCP = 0xb, - RATE_MIMO2_36M_PLCP = 0xc, - RATE_MIMO2_48M_PLCP = 0xd, - RATE_MIMO2_54M_PLCP = 0xe, - RATE_MIMO2_60M_PLCP = 0xf, - RATE_SISO_INVM_PLCP, - RATE_MIMO2_INVM_PLCP = RATE_SISO_INVM_PLCP, -}; - -/* MAC header values for bit rates */ -enum { - RATE_6M_IEEE = 12, - RATE_9M_IEEE = 18, - RATE_12M_IEEE = 24, - RATE_18M_IEEE = 36, - RATE_24M_IEEE = 48, - RATE_36M_IEEE = 72, - RATE_48M_IEEE = 96, - RATE_54M_IEEE = 108, - RATE_60M_IEEE = 120, - RATE_1M_IEEE = 2, - RATE_2M_IEEE = 4, - RATE_5M_IEEE = 11, - RATE_11M_IEEE = 22, -}; - -#define IL_CCK_BASIC_RATES_MASK \ - (RATE_1M_MASK | \ - RATE_2M_MASK) - -#define IL_CCK_RATES_MASK \ - (IL_CCK_BASIC_RATES_MASK | \ - RATE_5M_MASK | \ - RATE_11M_MASK) - -#define IL_OFDM_BASIC_RATES_MASK \ - (RATE_6M_MASK | \ - RATE_12M_MASK | \ - RATE_24M_MASK) - -#define IL_OFDM_RATES_MASK \ - (IL_OFDM_BASIC_RATES_MASK | \ - RATE_9M_MASK | \ - RATE_18M_MASK | \ - RATE_36M_MASK | \ - RATE_48M_MASK | \ - RATE_54M_MASK) - -#define IL_BASIC_RATES_MASK \ - (IL_OFDM_BASIC_RATES_MASK | \ - IL_CCK_BASIC_RATES_MASK) - -#define RATES_MASK ((1 << RATE_COUNT) - 1) -#define RATES_MASK_3945 ((1 << RATE_COUNT_3945) - 1) - -#define IL_INVALID_VALUE -1 - -#define IL_MIN_RSSI_VAL -100 -#define IL_MAX_RSSI_VAL 0 - -/* These values specify how many Tx frame attempts before - * searching for a new modulation mode */ -#define IL_LEGACY_FAILURE_LIMIT 160 -#define IL_LEGACY_SUCCESS_LIMIT 480 -#define IL_LEGACY_TBL_COUNT 160 - -#define IL_NONE_LEGACY_FAILURE_LIMIT 400 -#define IL_NONE_LEGACY_SUCCESS_LIMIT 4500 -#define IL_NONE_LEGACY_TBL_COUNT 1500 - -/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */ -#define IL_RS_GOOD_RATIO 12800 /* 100% */ -#define RATE_SCALE_SWITCH 10880 /* 85% */ -#define RATE_HIGH_TH 10880 /* 85% */ -#define RATE_INCREASE_TH 6400 /* 50% */ -#define RATE_DECREASE_TH 1920 /* 15% */ - -/* possible actions when in legacy mode */ -#define IL_LEGACY_SWITCH_ANTENNA1 0 -#define IL_LEGACY_SWITCH_ANTENNA2 1 -#define IL_LEGACY_SWITCH_SISO 2 -#define IL_LEGACY_SWITCH_MIMO2_AB 3 -#define IL_LEGACY_SWITCH_MIMO2_AC 4 -#define IL_LEGACY_SWITCH_MIMO2_BC 5 - -/* possible actions when in siso mode */ -#define IL_SISO_SWITCH_ANTENNA1 0 -#define IL_SISO_SWITCH_ANTENNA2 1 -#define IL_SISO_SWITCH_MIMO2_AB 2 -#define IL_SISO_SWITCH_MIMO2_AC 3 -#define IL_SISO_SWITCH_MIMO2_BC 4 -#define IL_SISO_SWITCH_GI 5 - -/* possible actions when in mimo mode */ -#define IL_MIMO2_SWITCH_ANTENNA1 0 -#define IL_MIMO2_SWITCH_ANTENNA2 1 -#define IL_MIMO2_SWITCH_SISO_A 2 -#define IL_MIMO2_SWITCH_SISO_B 3 -#define IL_MIMO2_SWITCH_SISO_C 4 -#define IL_MIMO2_SWITCH_GI 5 - -#define IL_MAX_SEARCH IL_MIMO2_SWITCH_GI - -#define IL_ACTION_LIMIT 3 /* # possible actions */ - -#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */ - -/* load per tid defines for A-MPDU activation */ -#define IL_AGG_TPT_THREHOLD 0 -#define IL_AGG_LOAD_THRESHOLD 10 -#define IL_AGG_ALL_TID 0xff -#define TID_QUEUE_CELL_SPACING 50 /*mS */ -#define TID_QUEUE_MAX_SIZE 20 -#define TID_ROUND_VALUE 5 /* mS */ -#define TID_MAX_LOAD_COUNT 8 - -#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING) -#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y)) - -extern const struct il_rate_info il_rates[RATE_COUNT]; - -enum il_table_type { - LQ_NONE, - LQ_G, /* legacy types */ - LQ_A, - LQ_SISO, /* high-throughput types */ - LQ_MIMO2, - LQ_MAX, -}; - -#define is_legacy(tbl) ((tbl) == LQ_G || (tbl) == LQ_A) -#define is_siso(tbl) ((tbl) == LQ_SISO) -#define is_mimo2(tbl) ((tbl) == LQ_MIMO2) -#define is_mimo(tbl) (is_mimo2(tbl)) -#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl)) -#define is_a_band(tbl) ((tbl) == LQ_A) -#define is_g_and(tbl) ((tbl) == LQ_G) - -#define ANT_NONE 0x0 -#define ANT_A BIT(0) -#define ANT_B BIT(1) -#define ANT_AB (ANT_A | ANT_B) -#define ANT_C BIT(2) -#define ANT_AC (ANT_A | ANT_C) -#define ANT_BC (ANT_B | ANT_C) -#define ANT_ABC (ANT_AB | ANT_C) - -#define IL_MAX_MCS_DISPLAY_SIZE 12 - -struct il_rate_mcs_info { - char mbps[IL_MAX_MCS_DISPLAY_SIZE]; - char mcs[IL_MAX_MCS_DISPLAY_SIZE]; -}; - -/** - * struct il_rate_scale_data -- tx success history for one rate - */ -struct il_rate_scale_data { - u64 data; /* bitmap of successful frames */ - s32 success_counter; /* number of frames successful */ - s32 success_ratio; /* per-cent * 128 */ - s32 counter; /* number of frames attempted */ - s32 average_tpt; /* success ratio * expected throughput */ - unsigned long stamp; -}; - -/** - * struct il_scale_tbl_info -- tx params and success history for all rates - * - * There are two of these in struct il_lq_sta, - * one for "active", and one for "search". - */ -struct il_scale_tbl_info { - enum il_table_type lq_type; - u8 ant_type; - u8 is_SGI; /* 1 = short guard interval */ - u8 is_ht40; /* 1 = 40 MHz channel width */ - u8 is_dup; /* 1 = duplicated data streams */ - u8 action; /* change modulation; IL_[LEGACY/SISO/MIMO]_SWITCH_* */ - u8 max_search; /* maximun number of tables we can search */ - s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */ - u32 current_rate; /* rate_n_flags, uCode API format */ - struct il_rate_scale_data win[RATE_COUNT]; /* rate histories */ -}; - -struct il_traffic_load { - unsigned long time_stamp; /* age of the oldest stats */ - u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time - * slice */ - u32 total; /* total num of packets during the - * last TID_MAX_TIME_DIFF */ - u8 queue_count; /* number of queues that has - * been used since the last cleanup */ - u8 head; /* start of the circular buffer */ -}; - -/** - * struct il_lq_sta -- driver's rate scaling ilate structure - * - * Pointer to this gets passed back and forth between driver and mac80211. - */ -struct il_lq_sta { - u8 active_tbl; /* idx of active table, range 0-1 */ - u8 enable_counter; /* indicates HT mode */ - u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */ - u8 search_better_tbl; /* 1: currently trying alternate mode */ - s32 last_tpt; - - /* The following determine when to search for a new mode */ - u32 table_count_limit; - u32 max_failure_limit; /* # failed frames before new search */ - u32 max_success_limit; /* # successful frames before new search */ - u32 table_count; - u32 total_failed; /* total failed frames, any/all rates */ - u32 total_success; /* total successful frames, any/all rates */ - u64 flush_timer; /* time staying in mode before new search */ - - u8 action_counter; /* # mode-switch actions tried */ - u8 is_green; - u8 is_dup; - enum ieee80211_band band; - - /* The following are bitmaps of rates; RATE_6M_MASK, etc. */ - u32 supp_rates; - u16 active_legacy_rate; - u16 active_siso_rate; - u16 active_mimo2_rate; - s8 max_rate_idx; /* Max rate set by user */ - u8 missed_rate_counter; - - struct il_link_quality_cmd lq; - struct il_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */ - struct il_traffic_load load[TID_MAX_LOAD_COUNT]; - u8 tx_agg_tid_en; -#ifdef CONFIG_MAC80211_DEBUGFS - struct dentry *rs_sta_dbgfs_scale_table_file; - struct dentry *rs_sta_dbgfs_stats_table_file; - struct dentry *rs_sta_dbgfs_rate_scale_data_file; - struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file; - u32 dbg_fixed_rate; -#endif - struct il_priv *drv; - - /* used to be in sta_info */ - int last_txrate_idx; - /* last tx rate_n_flags */ - u32 last_rate_n_flags; - /* packets destined for this STA are aggregated */ - u8 is_agg; -}; - -/* - * il_station_priv: Driver's ilate station information - * - * When mac80211 creates a station it reserves some space (hw->sta_data_size) - * in the structure for use by driver. This structure is places in that - * space. - * - * The common struct MUST be first because it is shared between - * 3945 and 4965! - */ -struct il_station_priv { - struct il_station_priv_common common; - struct il_lq_sta lq_sta; - atomic_t pending_frames; - bool client; - bool asleep; -}; - -static inline u8 -il4965_num_of_ant(u8 m) -{ - return !!(m & ANT_A) + !!(m & ANT_B) + !!(m & ANT_C); -} - -static inline u8 -il4965_first_antenna(u8 mask) -{ - if (mask & ANT_A) - return ANT_A; - if (mask & ANT_B) - return ANT_B; - return ANT_C; -} - -/** - * il3945_rate_scale_init - Initialize the rate scale table based on assoc info - * - * The specific throughput table used is based on the type of network - * the associated with, including A, B, G, and G w/ TGG protection - */ -extern void il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id); - -/* Initialize station's rate scaling information after adding station */ -extern void il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, - u8 sta_id); -extern void il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, - u8 sta_id); - -/** - * il_rate_control_register - Register the rate control algorithm callbacks - * - * Since the rate control algorithm is hardware specific, there is no need - * or reason to place it as a stand alone module. The driver can call - * il_rate_control_register in order to register the rate control callbacks - * with the mac80211 subsystem. This should be performed prior to calling - * ieee80211_register_hw - * - */ -extern int il4965_rate_control_register(void); -extern int il3945_rate_control_register(void); - -/** - * il_rate_control_unregister - Unregister the rate control callbacks - * - * This should be called after calling ieee80211_unregister_hw, but before - * the driver is unloaded. - */ -extern void il4965_rate_control_unregister(void); -extern void il3945_rate_control_unregister(void); - -extern int il_power_update_mode(struct il_priv *il, bool force); -extern void il_power_initialize(struct il_priv *il); - -extern u32 il_debug_level; - -#ifdef CONFIG_IWLEGACY_DEBUG -/* - * il_get_debug_level: Return active debug level for device - * - * Using sysfs it is possible to set per device debug level. This debug - * level will be used if set, otherwise the global debug level which can be - * set via module parameter is used. - */ -static inline u32 -il_get_debug_level(struct il_priv *il) -{ - if (il->debug_level) - return il->debug_level; - else - return il_debug_level; -} -#else -static inline u32 -il_get_debug_level(struct il_priv *il) -{ - return il_debug_level; -} -#endif - -#define il_print_hex_error(il, p, len) \ -do { \ - print_hex_dump(KERN_ERR, "iwl data: ", \ - DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \ -} while (0) - -#ifdef CONFIG_IWLEGACY_DEBUG -#define IL_DBG(level, fmt, args...) \ -do { \ - if (il_get_debug_level(il) & level) \ - dev_printk(KERN_ERR, &il->hw->wiphy->dev, \ - "%c %s " fmt, in_interrupt() ? 'I' : 'U', \ - __func__ , ## args); \ -} while (0) - -#define il_print_hex_dump(il, level, p, len) \ -do { \ - if (il_get_debug_level(il) & level) \ - print_hex_dump(KERN_DEBUG, "iwl data: ", \ - DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \ -} while (0) - -#else -#define IL_DBG(level, fmt, args...) -static inline void -il_print_hex_dump(struct il_priv *il, int level, const void *p, u32 len) -{ -} -#endif /* CONFIG_IWLEGACY_DEBUG */ - -#ifdef CONFIG_IWLEGACY_DEBUGFS -int il_dbgfs_register(struct il_priv *il, const char *name); -void il_dbgfs_unregister(struct il_priv *il); -#else -static inline int -il_dbgfs_register(struct il_priv *il, const char *name) -{ - return 0; -} - -static inline void -il_dbgfs_unregister(struct il_priv *il) -{ -} -#endif /* CONFIG_IWLEGACY_DEBUGFS */ - -/* - * To use the debug system: - * - * If you are defining a new debug classification, simply add it to the #define - * list here in the form of - * - * #define IL_DL_xxxx VALUE - * - * where xxxx should be the name of the classification (for example, WEP). - * - * You then need to either add a IL_xxxx_DEBUG() macro definition for your - * classification, or use IL_DBG(IL_DL_xxxx, ...) whenever you want - * to send output to that classification. - * - * The active debug levels can be accessed via files - * - * /sys/module/iwl4965/parameters/debug - * /sys/module/iwl3945/parameters/debug - * /sys/class/net/wlan0/device/debug_level - * - * when CONFIG_IWLEGACY_DEBUG=y. - */ - -/* 0x0000000F - 0x00000001 */ -#define IL_DL_INFO (1 << 0) -#define IL_DL_MAC80211 (1 << 1) -#define IL_DL_HCMD (1 << 2) -#define IL_DL_STATE (1 << 3) -/* 0x000000F0 - 0x00000010 */ -#define IL_DL_MACDUMP (1 << 4) -#define IL_DL_HCMD_DUMP (1 << 5) -#define IL_DL_EEPROM (1 << 6) -#define IL_DL_RADIO (1 << 7) -/* 0x00000F00 - 0x00000100 */ -#define IL_DL_POWER (1 << 8) -#define IL_DL_TEMP (1 << 9) -#define IL_DL_NOTIF (1 << 10) -#define IL_DL_SCAN (1 << 11) -/* 0x0000F000 - 0x00001000 */ -#define IL_DL_ASSOC (1 << 12) -#define IL_DL_DROP (1 << 13) -#define IL_DL_TXPOWER (1 << 14) -#define IL_DL_AP (1 << 15) -/* 0x000F0000 - 0x00010000 */ -#define IL_DL_FW (1 << 16) -#define IL_DL_RF_KILL (1 << 17) -#define IL_DL_FW_ERRORS (1 << 18) -#define IL_DL_LED (1 << 19) -/* 0x00F00000 - 0x00100000 */ -#define IL_DL_RATE (1 << 20) -#define IL_DL_CALIB (1 << 21) -#define IL_DL_WEP (1 << 22) -#define IL_DL_TX (1 << 23) -/* 0x0F000000 - 0x01000000 */ -#define IL_DL_RX (1 << 24) -#define IL_DL_ISR (1 << 25) -#define IL_DL_HT (1 << 26) -/* 0xF0000000 - 0x10000000 */ -#define IL_DL_11H (1 << 28) -#define IL_DL_STATS (1 << 29) -#define IL_DL_TX_REPLY (1 << 30) -#define IL_DL_QOS (1 << 31) - -#define D_INFO(f, a...) IL_DBG(IL_DL_INFO, f, ## a) -#define D_MAC80211(f, a...) IL_DBG(IL_DL_MAC80211, f, ## a) -#define D_MACDUMP(f, a...) IL_DBG(IL_DL_MACDUMP, f, ## a) -#define D_TEMP(f, a...) IL_DBG(IL_DL_TEMP, f, ## a) -#define D_SCAN(f, a...) IL_DBG(IL_DL_SCAN, f, ## a) -#define D_RX(f, a...) IL_DBG(IL_DL_RX, f, ## a) -#define D_TX(f, a...) IL_DBG(IL_DL_TX, f, ## a) -#define D_ISR(f, a...) IL_DBG(IL_DL_ISR, f, ## a) -#define D_LED(f, a...) IL_DBG(IL_DL_LED, f, ## a) -#define D_WEP(f, a...) IL_DBG(IL_DL_WEP, f, ## a) -#define D_HC(f, a...) IL_DBG(IL_DL_HCMD, f, ## a) -#define D_HC_DUMP(f, a...) IL_DBG(IL_DL_HCMD_DUMP, f, ## a) -#define D_EEPROM(f, a...) IL_DBG(IL_DL_EEPROM, f, ## a) -#define D_CALIB(f, a...) IL_DBG(IL_DL_CALIB, f, ## a) -#define D_FW(f, a...) IL_DBG(IL_DL_FW, f, ## a) -#define D_RF_KILL(f, a...) IL_DBG(IL_DL_RF_KILL, f, ## a) -#define D_DROP(f, a...) IL_DBG(IL_DL_DROP, f, ## a) -#define D_AP(f, a...) IL_DBG(IL_DL_AP, f, ## a) -#define D_TXPOWER(f, a...) IL_DBG(IL_DL_TXPOWER, f, ## a) -#define D_RATE(f, a...) IL_DBG(IL_DL_RATE, f, ## a) -#define D_NOTIF(f, a...) IL_DBG(IL_DL_NOTIF, f, ## a) -#define D_ASSOC(f, a...) IL_DBG(IL_DL_ASSOC, f, ## a) -#define D_HT(f, a...) IL_DBG(IL_DL_HT, f, ## a) -#define D_STATS(f, a...) IL_DBG(IL_DL_STATS, f, ## a) -#define D_TX_REPLY(f, a...) IL_DBG(IL_DL_TX_REPLY, f, ## a) -#define D_QOS(f, a...) IL_DBG(IL_DL_QOS, f, ## a) -#define D_RADIO(f, a...) IL_DBG(IL_DL_RADIO, f, ## a) -#define D_POWER(f, a...) IL_DBG(IL_DL_POWER, f, ## a) -#define D_11H(f, a...) IL_DBG(IL_DL_11H, f, ## a) - -#endif /* __il_core_h__ */ diff --git a/trunk/drivers/net/wireless/iwlegacy/debug.c b/trunk/drivers/net/wireless/iwlegacy/debug.c deleted file mode 100644 index b1b8926a9c7b..000000000000 --- a/trunk/drivers/net/wireless/iwlegacy/debug.c +++ /dev/null @@ -1,1411 +0,0 @@ -/****************************************************************************** - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - *****************************************************************************/ -#include -#include -#include - -#include "common.h" - -/* create and remove of files */ -#define DEBUGFS_ADD_FILE(name, parent, mode) do { \ - if (!debugfs_create_file(#name, mode, parent, il, \ - &il_dbgfs_##name##_ops)) \ - goto err; \ -} while (0) - -#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \ - struct dentry *__tmp; \ - __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \ - parent, ptr); \ - if (IS_ERR(__tmp) || !__tmp) \ - goto err; \ -} while (0) - -#define DEBUGFS_ADD_X32(name, parent, ptr) do { \ - struct dentry *__tmp; \ - __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \ - parent, ptr); \ - if (IS_ERR(__tmp) || !__tmp) \ - goto err; \ -} while (0) - -/* file operation */ -#define DEBUGFS_READ_FUNC(name) \ -static ssize_t il_dbgfs_##name##_read(struct file *file, \ - char __user *user_buf, \ - size_t count, loff_t *ppos); - -#define DEBUGFS_WRITE_FUNC(name) \ -static ssize_t il_dbgfs_##name##_write(struct file *file, \ - const char __user *user_buf, \ - size_t count, loff_t *ppos); - -static int -il_dbgfs_open_file_generic(struct inode *inode, struct file *file) -{ - file->private_data = inode->i_private; - return 0; -} - -#define DEBUGFS_READ_FILE_OPS(name) \ - DEBUGFS_READ_FUNC(name); \ -static const struct file_operations il_dbgfs_##name##_ops = { \ - .read = il_dbgfs_##name##_read, \ - .open = il_dbgfs_open_file_generic, \ - .llseek = generic_file_llseek, \ -}; - -#define DEBUGFS_WRITE_FILE_OPS(name) \ - DEBUGFS_WRITE_FUNC(name); \ -static const struct file_operations il_dbgfs_##name##_ops = { \ - .write = il_dbgfs_##name##_write, \ - .open = il_dbgfs_open_file_generic, \ - .llseek = generic_file_llseek, \ -}; - -#define DEBUGFS_READ_WRITE_FILE_OPS(name) \ - DEBUGFS_READ_FUNC(name); \ - DEBUGFS_WRITE_FUNC(name); \ -static const struct file_operations il_dbgfs_##name##_ops = { \ - .write = il_dbgfs_##name##_write, \ - .read = il_dbgfs_##name##_read, \ - .open = il_dbgfs_open_file_generic, \ - .llseek = generic_file_llseek, \ -}; - -static ssize_t -il_dbgfs_tx_stats_read(struct file *file, char __user *user_buf, size_t count, - loff_t *ppos) -{ - - struct il_priv *il = file->private_data; - char *buf; - int pos = 0; - - int cnt; - ssize_t ret; - const size_t bufsz = - 100 + sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX); - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; - pos += scnprintf(buf + pos, bufsz - pos, "Management:\n"); - for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) { - pos += - scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n", - il_get_mgmt_string(cnt), il->tx_stats.mgmt[cnt]); - } - pos += scnprintf(buf + pos, bufsz - pos, "Control\n"); - for (cnt = 0; cnt < CONTROL_MAX; cnt++) { - pos += - scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n", - il_get_ctrl_string(cnt), il->tx_stats.ctrl[cnt]); - } - pos += scnprintf(buf + pos, bufsz - pos, "Data:\n"); - pos += - scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n", - il->tx_stats.data_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n", - il->tx_stats.data_bytes); - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t -il_dbgfs_clear_traffic_stats_write(struct file *file, - const char __user *user_buf, size_t count, - loff_t *ppos) -{ - struct il_priv *il = file->private_data; - u32 clear_flag; - char buf[8]; - int buf_size; - - memset(buf, 0, sizeof(buf)); - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - if (sscanf(buf, "%x", &clear_flag) != 1) - return -EFAULT; - il_clear_traffic_stats(il); - - return count; -} - -static ssize_t -il_dbgfs_rx_stats_read(struct file *file, char __user *user_buf, size_t count, - loff_t *ppos) -{ - - struct il_priv *il = file->private_data; - char *buf; - int pos = 0; - int cnt; - ssize_t ret; - const size_t bufsz = - 100 + sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX); - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - pos += scnprintf(buf + pos, bufsz - pos, "Management:\n"); - for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) { - pos += - scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n", - il_get_mgmt_string(cnt), il->rx_stats.mgmt[cnt]); - } - pos += scnprintf(buf + pos, bufsz - pos, "Control:\n"); - for (cnt = 0; cnt < CONTROL_MAX; cnt++) { - pos += - scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n", - il_get_ctrl_string(cnt), il->rx_stats.ctrl[cnt]); - } - pos += scnprintf(buf + pos, bufsz - pos, "Data:\n"); - pos += - scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n", - il->rx_stats.data_cnt); - pos += - scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n", - il->rx_stats.data_bytes); - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -#define BYTE1_MASK 0x000000ff; -#define BYTE2_MASK 0x0000ffff; -#define BYTE3_MASK 0x00ffffff; -static ssize_t -il_dbgfs_sram_read(struct file *file, char __user *user_buf, size_t count, - loff_t *ppos) -{ - u32 val; - char *buf; - ssize_t ret; - int i; - int pos = 0; - struct il_priv *il = file->private_data; - size_t bufsz; - - /* default is to dump the entire data segment */ - if (!il->dbgfs_sram_offset && !il->dbgfs_sram_len) { - il->dbgfs_sram_offset = 0x800000; - if (il->ucode_type == UCODE_INIT) - il->dbgfs_sram_len = il->ucode_init_data.len; - else - il->dbgfs_sram_len = il->ucode_data.len; - } - bufsz = 30 + il->dbgfs_sram_len * sizeof(char) * 10; - buf = kmalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; - pos += - scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n", - il->dbgfs_sram_len); - pos += - scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n", - il->dbgfs_sram_offset); - for (i = il->dbgfs_sram_len; i > 0; i -= 4) { - val = - il_read_targ_mem(il, - il->dbgfs_sram_offset + - il->dbgfs_sram_len - i); - if (i < 4) { - switch (i) { - case 1: - val &= BYTE1_MASK; - break; - case 2: - val &= BYTE2_MASK; - break; - case 3: - val &= BYTE3_MASK; - break; - } - } - if (!(i % 16)) - pos += scnprintf(buf + pos, bufsz - pos, "\n"); - pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val); - } - pos += scnprintf(buf + pos, bufsz - pos, "\n"); - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t -il_dbgfs_sram_write(struct file *file, const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct il_priv *il = file->private_data; - char buf[64]; - int buf_size; - u32 offset, len; - - memset(buf, 0, sizeof(buf)); - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - - if (sscanf(buf, "%x,%x", &offset, &len) == 2) { - il->dbgfs_sram_offset = offset; - il->dbgfs_sram_len = len; - } else { - il->dbgfs_sram_offset = 0; - il->dbgfs_sram_len = 0; - } - - return count; -} - -static ssize_t -il_dbgfs_stations_read(struct file *file, char __user *user_buf, size_t count, - loff_t *ppos) -{ - struct il_priv *il = file->private_data; - struct il_station_entry *station; - int max_sta = il->hw_params.max_stations; - char *buf; - int i, j, pos = 0; - ssize_t ret; - /* Add 30 for initial string */ - const size_t bufsz = 30 + sizeof(char) * 500 * (il->num_stations); - - buf = kmalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - pos += - scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n", - il->num_stations); - - for (i = 0; i < max_sta; i++) { - station = &il->stations[i]; - if (!station->used) - continue; - pos += - scnprintf(buf + pos, bufsz - pos, - "station %d - addr: %pM, flags: %#x\n", i, - station->sta.sta.addr, - station->sta.station_flags_msk); - pos += - scnprintf(buf + pos, bufsz - pos, - "TID\tseq_num\ttxq_id\tframes\ttfds\t"); - pos += - scnprintf(buf + pos, bufsz - pos, - "start_idx\tbitmap\t\t\trate_n_flags\n"); - - for (j = 0; j < MAX_TID_COUNT; j++) { - pos += - scnprintf(buf + pos, bufsz - pos, - "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x", - j, station->tid[j].seq_number, - station->tid[j].agg.txq_id, - station->tid[j].agg.frame_count, - station->tid[j].tfds_in_queue, - station->tid[j].agg.start_idx, - station->tid[j].agg.bitmap, - station->tid[j].agg.rate_n_flags); - - if (station->tid[j].agg.wait_for_ba) - pos += - scnprintf(buf + pos, bufsz - pos, - " - waitforba"); - pos += scnprintf(buf + pos, bufsz - pos, "\n"); - } - - pos += scnprintf(buf + pos, bufsz - pos, "\n"); - } - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t -il_dbgfs_nvm_read(struct file *file, char __user *user_buf, size_t count, - loff_t *ppos) -{ - ssize_t ret; - struct il_priv *il = file->private_data; - int pos = 0, ofs = 0, buf_size = 0; - const u8 *ptr; - char *buf; - u16 eeprom_ver; - size_t eeprom_len = il->cfg->base_params->eeprom_size; - buf_size = 4 * eeprom_len + 256; - - if (eeprom_len % 16) { - IL_ERR("NVM size is not multiple of 16.\n"); - return -ENODATA; - } - - ptr = il->eeprom; - if (!ptr) { - IL_ERR("Invalid EEPROM memory\n"); - return -ENOMEM; - } - - /* 4 characters for byte 0xYY */ - buf = kzalloc(buf_size, GFP_KERNEL); - if (!buf) { - IL_ERR("Can not allocate Buffer\n"); - return -ENOMEM; - } - eeprom_ver = il_eeprom_query16(il, EEPROM_VERSION); - pos += - scnprintf(buf + pos, buf_size - pos, "EEPROM " "version: 0x%x\n", - eeprom_ver); - for (ofs = 0; ofs < eeprom_len; ofs += 16) { - pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs); - hex_dump_to_buffer(ptr + ofs, 16, 16, 2, buf + pos, - buf_size - pos, 0); - pos += strlen(buf + pos); - if (buf_size - pos > 0) - buf[pos++] = '\n'; - } - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t -il_dbgfs_channels_read(struct file *file, char __user *user_buf, size_t count, - loff_t *ppos) -{ - struct il_priv *il = file->private_data; - struct ieee80211_channel *channels = NULL; - const struct ieee80211_supported_band *supp_band = NULL; - int pos = 0, i, bufsz = PAGE_SIZE; - char *buf; - ssize_t ret; - - if (!test_bit(S_GEO_CONFIGURED, &il->status)) - return -EAGAIN; - - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) { - IL_ERR("Can not allocate Buffer\n"); - return -ENOMEM; - } - - supp_band = il_get_hw_mode(il, IEEE80211_BAND_2GHZ); - if (supp_band) { - channels = supp_band->channels; - - pos += - scnprintf(buf + pos, bufsz - pos, - "Displaying %d channels in 2.4GHz band 802.11bg):\n", - supp_band->n_channels); - - for (i = 0; i < supp_band->n_channels; i++) - pos += - scnprintf(buf + pos, bufsz - pos, - "%d: %ddBm: BSS%s%s, %s.\n", - channels[i].hw_value, - channels[i].max_power, - channels[i]. - flags & IEEE80211_CHAN_RADAR ? - " (IEEE 802.11h required)" : "", - ((channels[i]. - flags & IEEE80211_CHAN_NO_IBSS) || - (channels[i]. - flags & IEEE80211_CHAN_RADAR)) ? "" : - ", IBSS", - channels[i]. - flags & IEEE80211_CHAN_PASSIVE_SCAN ? - "passive only" : "active/passive"); - } - supp_band = il_get_hw_mode(il, IEEE80211_BAND_5GHZ); - if (supp_band) { - channels = supp_band->channels; - - pos += - scnprintf(buf + pos, bufsz - pos, - "Displaying %d channels in 5.2GHz band (802.11a)\n", - supp_band->n_channels); - - for (i = 0; i < supp_band->n_channels; i++) - pos += - scnprintf(buf + pos, bufsz - pos, - "%d: %ddBm: BSS%s%s, %s.\n", - channels[i].hw_value, - channels[i].max_power, - channels[i]. - flags & IEEE80211_CHAN_RADAR ? - " (IEEE 802.11h required)" : "", - ((channels[i]. - flags & IEEE80211_CHAN_NO_IBSS) || - (channels[i]. - flags & IEEE80211_CHAN_RADAR)) ? "" : - ", IBSS", - channels[i]. - flags & IEEE80211_CHAN_PASSIVE_SCAN ? - "passive only" : "active/passive"); - } - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t -il_dbgfs_status_read(struct file *file, char __user *user_buf, size_t count, - loff_t *ppos) -{ - - struct il_priv *il = file->private_data; - char buf[512]; - int pos = 0; - const size_t bufsz = sizeof(buf); - - pos += - scnprintf(buf + pos, bufsz - pos, "S_HCMD_ACTIVE:\t %d\n", - test_bit(S_HCMD_ACTIVE, &il->status)); - pos += - scnprintf(buf + pos, bufsz - pos, "S_INT_ENABLED:\t %d\n", - test_bit(S_INT_ENABLED, &il->status)); - pos += - scnprintf(buf + pos, bufsz - pos, "S_RF_KILL_HW:\t %d\n", - test_bit(S_RF_KILL_HW, &il->status)); - pos += - scnprintf(buf + pos, bufsz - pos, "S_CT_KILL:\t\t %d\n", - test_bit(S_CT_KILL, &il->status)); - pos += - scnprintf(buf + pos, bufsz - pos, "S_INIT:\t\t %d\n", - test_bit(S_INIT, &il->status)); - pos += - scnprintf(buf + pos, bufsz - pos, "S_ALIVE:\t\t %d\n", - test_bit(S_ALIVE, &il->status)); - pos += - scnprintf(buf + pos, bufsz - pos, "S_READY:\t\t %d\n", - test_bit(S_READY, &il->status)); - pos += - scnprintf(buf + pos, bufsz - pos, "S_TEMPERATURE:\t %d\n", - test_bit(S_TEMPERATURE, &il->status)); - pos += - scnprintf(buf + pos, bufsz - pos, "S_GEO_CONFIGURED:\t %d\n", - test_bit(S_GEO_CONFIGURED, &il->status)); - pos += - scnprintf(buf + pos, bufsz - pos, "S_EXIT_PENDING:\t %d\n", - test_bit(S_EXIT_PENDING, &il->status)); - pos += - scnprintf(buf + pos, bufsz - pos, "S_STATS:\t %d\n", - test_bit(S_STATS, &il->status)); - pos += - scnprintf(buf + pos, bufsz - pos, "S_SCANNING:\t %d\n", - test_bit(S_SCANNING, &il->status)); - pos += - scnprintf(buf + pos, bufsz - pos, "S_SCAN_ABORTING:\t %d\n", - test_bit(S_SCAN_ABORTING, &il->status)); - pos += - scnprintf(buf + pos, bufsz - pos, "S_SCAN_HW:\t\t %d\n", - test_bit(S_SCAN_HW, &il->status)); - pos += - scnprintf(buf + pos, bufsz - pos, "S_POWER_PMI:\t %d\n", - test_bit(S_POWER_PMI, &il->status)); - pos += - scnprintf(buf + pos, bufsz - pos, "S_FW_ERROR:\t %d\n", - test_bit(S_FW_ERROR, &il->status)); - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t -il_dbgfs_interrupt_read(struct file *file, char __user *user_buf, size_t count, - loff_t *ppos) -{ - - struct il_priv *il = file->private_data; - int pos = 0; - int cnt = 0; - char *buf; - int bufsz = 24 * 64; /* 24 items * 64 char per item */ - ssize_t ret; - - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) { - IL_ERR("Can not allocate Buffer\n"); - return -ENOMEM; - } - - pos += - scnprintf(buf + pos, bufsz - pos, "Interrupt Statistics Report:\n"); - - pos += - scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n", - il->isr_stats.hw); - pos += - scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n", - il->isr_stats.sw); - if (il->isr_stats.sw || il->isr_stats.hw) { - pos += - scnprintf(buf + pos, bufsz - pos, - "\tLast Restarting Code: 0x%X\n", - il->isr_stats.err_code); - } -#ifdef CONFIG_IWLEGACY_DEBUG - pos += - scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n", - il->isr_stats.sch); - pos += - scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n", - il->isr_stats.alive); -#endif - pos += - scnprintf(buf + pos, bufsz - pos, - "HW RF KILL switch toggled:\t %u\n", - il->isr_stats.rfkill); - - pos += - scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n", - il->isr_stats.ctkill); - - pos += - scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n", - il->isr_stats.wakeup); - - pos += - scnprintf(buf + pos, bufsz - pos, "Rx command responses:\t\t %u\n", - il->isr_stats.rx); - for (cnt = 0; cnt < IL_CN_MAX; cnt++) { - if (il->isr_stats.handlers[cnt] > 0) - pos += - scnprintf(buf + pos, bufsz - pos, - "\tRx handler[%36s]:\t\t %u\n", - il_get_cmd_string(cnt), - il->isr_stats.handlers[cnt]); - } - - pos += - scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n", - il->isr_stats.tx); - - pos += - scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n", - il->isr_stats.unhandled); - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t -il_dbgfs_interrupt_write(struct file *file, const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct il_priv *il = file->private_data; - char buf[8]; - int buf_size; - u32 reset_flag; - - memset(buf, 0, sizeof(buf)); - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - if (sscanf(buf, "%x", &reset_flag) != 1) - return -EFAULT; - if (reset_flag == 0) - il_clear_isr_stats(il); - - return count; -} - -static ssize_t -il_dbgfs_qos_read(struct file *file, char __user *user_buf, size_t count, - loff_t *ppos) -{ - struct il_priv *il = file->private_data; - struct il_rxon_context *ctx = &il->ctx; - int pos = 0, i; - char buf[256]; - const size_t bufsz = sizeof(buf); - - pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n", ctx->ctxid); - for (i = 0; i < AC_NUM; i++) { - pos += - scnprintf(buf + pos, bufsz - pos, - "\tcw_min\tcw_max\taifsn\ttxop\n"); - pos += - scnprintf(buf + pos, bufsz - pos, - "AC[%d]\t%u\t%u\t%u\t%u\n", i, - ctx->qos_data.def_qos_parm.ac[i].cw_min, - ctx->qos_data.def_qos_parm.ac[i].cw_max, - ctx->qos_data.def_qos_parm.ac[i].aifsn, - ctx->qos_data.def_qos_parm.ac[i].edca_txop); - } - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t -il_dbgfs_disable_ht40_write(struct file *file, const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct il_priv *il = file->private_data; - char buf[8]; - int buf_size; - int ht40; - - memset(buf, 0, sizeof(buf)); - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - if (sscanf(buf, "%d", &ht40) != 1) - return -EFAULT; - if (!il_is_any_associated(il)) - il->disable_ht40 = ht40 ? true : false; - else { - IL_ERR("Sta associated with AP - " - "Change to 40MHz channel support is not allowed\n"); - return -EINVAL; - } - - return count; -} - -static ssize_t -il_dbgfs_disable_ht40_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct il_priv *il = file->private_data; - char buf[100]; - int pos = 0; - const size_t bufsz = sizeof(buf); - - pos += - scnprintf(buf + pos, bufsz - pos, "11n 40MHz Mode: %s\n", - il->disable_ht40 ? "Disabled" : "Enabled"); - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -DEBUGFS_READ_WRITE_FILE_OPS(sram); -DEBUGFS_READ_FILE_OPS(nvm); -DEBUGFS_READ_FILE_OPS(stations); -DEBUGFS_READ_FILE_OPS(channels); -DEBUGFS_READ_FILE_OPS(status); -DEBUGFS_READ_WRITE_FILE_OPS(interrupt); -DEBUGFS_READ_FILE_OPS(qos); -DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40); - -static ssize_t -il_dbgfs_traffic_log_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct il_priv *il = file->private_data; - int pos = 0, ofs = 0; - int cnt = 0, entry; - struct il_tx_queue *txq; - struct il_queue *q; - struct il_rx_queue *rxq = &il->rxq; - char *buf; - int bufsz = - ((IL_TRAFFIC_ENTRIES * IL_TRAFFIC_ENTRY_SIZE * 64) * 2) + - (il->cfg->base_params->num_of_queues * 32 * 8) + 400; - const u8 *ptr; - ssize_t ret; - - if (!il->txq) { - IL_ERR("txq not ready\n"); - return -EAGAIN; - } - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) { - IL_ERR("Can not allocate buffer\n"); - return -ENOMEM; - } - pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n"); - for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) { - txq = &il->txq[cnt]; - q = &txq->q; - pos += - scnprintf(buf + pos, bufsz - pos, - "q[%d]: read_ptr: %u, write_ptr: %u\n", cnt, - q->read_ptr, q->write_ptr); - } - if (il->tx_traffic && (il_debug_level & IL_DL_TX)) { - ptr = il->tx_traffic; - pos += - scnprintf(buf + pos, bufsz - pos, "Tx Traffic idx: %u\n", - il->tx_traffic_idx); - for (cnt = 0, ofs = 0; cnt < IL_TRAFFIC_ENTRIES; cnt++) { - for (entry = 0; entry < IL_TRAFFIC_ENTRY_SIZE / 16; - entry++, ofs += 16) { - pos += - scnprintf(buf + pos, bufsz - pos, "0x%.4x ", - ofs); - hex_dump_to_buffer(ptr + ofs, 16, 16, 2, - buf + pos, bufsz - pos, 0); - pos += strlen(buf + pos); - if (bufsz - pos > 0) - buf[pos++] = '\n'; - } - } - } - - pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n"); - pos += - scnprintf(buf + pos, bufsz - pos, "read: %u, write: %u\n", - rxq->read, rxq->write); - - if (il->rx_traffic && (il_debug_level & IL_DL_RX)) { - ptr = il->rx_traffic; - pos += - scnprintf(buf + pos, bufsz - pos, "Rx Traffic idx: %u\n", - il->rx_traffic_idx); - for (cnt = 0, ofs = 0; cnt < IL_TRAFFIC_ENTRIES; cnt++) { - for (entry = 0; entry < IL_TRAFFIC_ENTRY_SIZE / 16; - entry++, ofs += 16) { - pos += - scnprintf(buf + pos, bufsz - pos, "0x%.4x ", - ofs); - hex_dump_to_buffer(ptr + ofs, 16, 16, 2, - buf + pos, bufsz - pos, 0); - pos += strlen(buf + pos); - if (bufsz - pos > 0) - buf[pos++] = '\n'; - } - } - } - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t -il_dbgfs_traffic_log_write(struct file *file, const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct il_priv *il = file->private_data; - char buf[8]; - int buf_size; - int traffic_log; - - memset(buf, 0, sizeof(buf)); - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - if (sscanf(buf, "%d", &traffic_log) != 1) - return -EFAULT; - if (traffic_log == 0) - il_reset_traffic_log(il); - - return count; -} - -static ssize_t -il_dbgfs_tx_queue_read(struct file *file, char __user *user_buf, size_t count, - loff_t *ppos) -{ - - struct il_priv *il = file->private_data; - struct il_tx_queue *txq; - struct il_queue *q; - char *buf; - int pos = 0; - int cnt; - int ret; - const size_t bufsz = - sizeof(char) * 64 * il->cfg->base_params->num_of_queues; - - if (!il->txq) { - IL_ERR("txq not ready\n"); - return -EAGAIN; - } - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) { - txq = &il->txq[cnt]; - q = &txq->q; - pos += - scnprintf(buf + pos, bufsz - pos, - "hwq %.2d: read=%u write=%u stop=%d" - " swq_id=%#.2x (ac %d/hwq %d)\n", cnt, - q->read_ptr, q->write_ptr, - !!test_bit(cnt, il->queue_stopped), - txq->swq_id, txq->swq_id & 3, - (txq->swq_id >> 2) & 0x1f); - if (cnt >= 4) - continue; - /* for the ACs, display the stop count too */ - pos += - scnprintf(buf + pos, bufsz - pos, - " stop-count: %d\n", - atomic_read(&il->queue_stop_count[cnt])); - } - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t -il_dbgfs_rx_queue_read(struct file *file, char __user *user_buf, size_t count, - loff_t *ppos) -{ - - struct il_priv *il = file->private_data; - struct il_rx_queue *rxq = &il->rxq; - char buf[256]; - int pos = 0; - const size_t bufsz = sizeof(buf); - - pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n", rxq->read); - pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n", rxq->write); - pos += - scnprintf(buf + pos, bufsz - pos, "free_count: %u\n", - rxq->free_count); - if (rxq->rb_stts) { - pos += - scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n", - le16_to_cpu(rxq->rb_stts-> - closed_rb_num) & 0x0FFF); - } else { - pos += - scnprintf(buf + pos, bufsz - pos, - "closed_rb_num: Not Allocated\n"); - } - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t -il_dbgfs_ucode_rx_stats_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct il_priv *il = file->private_data; - return il->cfg->ops->lib->debugfs_ops.rx_stats_read(file, user_buf, - count, ppos); -} - -static ssize_t -il_dbgfs_ucode_tx_stats_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct il_priv *il = file->private_data; - return il->cfg->ops->lib->debugfs_ops.tx_stats_read(file, user_buf, - count, ppos); -} - -static ssize_t -il_dbgfs_ucode_general_stats_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct il_priv *il = file->private_data; - return il->cfg->ops->lib->debugfs_ops.general_stats_read(file, user_buf, - count, ppos); -} - -static ssize_t -il_dbgfs_sensitivity_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - - struct il_priv *il = file->private_data; - int pos = 0; - int cnt = 0; - char *buf; - int bufsz = sizeof(struct il_sensitivity_data) * 4 + 100; - ssize_t ret; - struct il_sensitivity_data *data; - - data = &il->sensitivity_data; - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) { - IL_ERR("Can not allocate Buffer\n"); - return -ENOMEM; - } - - pos += - scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n", - data->auto_corr_ofdm); - pos += - scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_mrc:\t\t %u\n", - data->auto_corr_ofdm_mrc); - pos += - scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n", - data->auto_corr_ofdm_x1); - pos += - scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_mrc_x1:\t\t %u\n", - data->auto_corr_ofdm_mrc_x1); - pos += - scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n", - data->auto_corr_cck); - pos += - scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n", - data->auto_corr_cck_mrc); - pos += - scnprintf(buf + pos, bufsz - pos, - "last_bad_plcp_cnt_ofdm:\t\t %u\n", - data->last_bad_plcp_cnt_ofdm); - pos += - scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n", - data->last_fa_cnt_ofdm); - pos += - scnprintf(buf + pos, bufsz - pos, "last_bad_plcp_cnt_cck:\t\t %u\n", - data->last_bad_plcp_cnt_cck); - pos += - scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n", - data->last_fa_cnt_cck); - pos += - scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n", - data->nrg_curr_state); - pos += - scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n", - data->nrg_prev_state); - pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t"); - for (cnt = 0; cnt < 10; cnt++) { - pos += - scnprintf(buf + pos, bufsz - pos, " %u", - data->nrg_value[cnt]); - } - pos += scnprintf(buf + pos, bufsz - pos, "\n"); - pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t"); - for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) { - pos += - scnprintf(buf + pos, bufsz - pos, " %u", - data->nrg_silence_rssi[cnt]); - } - pos += scnprintf(buf + pos, bufsz - pos, "\n"); - pos += - scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n", - data->nrg_silence_ref); - pos += - scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n", - data->nrg_energy_idx); - pos += - scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n", - data->nrg_silence_idx); - pos += - scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n", - data->nrg_th_cck); - pos += - scnprintf(buf + pos, bufsz - pos, - "nrg_auto_corr_silence_diff:\t %u\n", - data->nrg_auto_corr_silence_diff); - pos += - scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n", - data->num_in_cck_no_fa); - pos += - scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n", - data->nrg_th_ofdm); - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t -il_dbgfs_chain_noise_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - - struct il_priv *il = file->private_data; - int pos = 0; - int cnt = 0; - char *buf; - int bufsz = sizeof(struct il_chain_noise_data) * 4 + 100; - ssize_t ret; - struct il_chain_noise_data *data; - - data = &il->chain_noise_data; - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) { - IL_ERR("Can not allocate Buffer\n"); - return -ENOMEM; - } - - pos += - scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n", - data->active_chains); - pos += - scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n", - data->chain_noise_a); - pos += - scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n", - data->chain_noise_b); - pos += - scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n", - data->chain_noise_c); - pos += - scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n", - data->chain_signal_a); - pos += - scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n", - data->chain_signal_b); - pos += - scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n", - data->chain_signal_c); - pos += - scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n", - data->beacon_count); - - pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t"); - for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) { - pos += - scnprintf(buf + pos, bufsz - pos, " %u", - data->disconn_array[cnt]); - } - pos += scnprintf(buf + pos, bufsz - pos, "\n"); - pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t"); - for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) { - pos += - scnprintf(buf + pos, bufsz - pos, " %u", - data->delta_gain_code[cnt]); - } - pos += scnprintf(buf + pos, bufsz - pos, "\n"); - pos += - scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n", - data->radio_write); - pos += - scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n", - data->state); - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t -il_dbgfs_power_save_status_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct il_priv *il = file->private_data; - char buf[60]; - int pos = 0; - const size_t bufsz = sizeof(buf); - u32 pwrsave_status; - - pwrsave_status = - _il_rd(il, CSR_GP_CNTRL) & CSR_GP_REG_POWER_SAVE_STATUS_MSK; - - pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: "); - pos += - scnprintf(buf + pos, bufsz - pos, "%s\n", - (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" : - (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" : - (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" : - "error"); - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t -il_dbgfs_clear_ucode_stats_write(struct file *file, - const char __user *user_buf, size_t count, - loff_t *ppos) -{ - struct il_priv *il = file->private_data; - char buf[8]; - int buf_size; - int clear; - - memset(buf, 0, sizeof(buf)); - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - if (sscanf(buf, "%d", &clear) != 1) - return -EFAULT; - - /* make request to uCode to retrieve stats information */ - mutex_lock(&il->mutex); - il_send_stats_request(il, CMD_SYNC, true); - mutex_unlock(&il->mutex); - - return count; -} - -static ssize_t -il_dbgfs_rxon_flags_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - - struct il_priv *il = file->private_data; - int len = 0; - char buf[20]; - - len = sprintf(buf, "0x%04X\n", le32_to_cpu(il->ctx.active.flags)); - return simple_read_from_buffer(user_buf, count, ppos, buf, len); -} - -static ssize_t -il_dbgfs_rxon_filter_flags_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - - struct il_priv *il = file->private_data; - int len = 0; - char buf[20]; - - len = - sprintf(buf, "0x%04X\n", le32_to_cpu(il->ctx.active.filter_flags)); - return simple_read_from_buffer(user_buf, count, ppos, buf, len); -} - -static ssize_t -il_dbgfs_fh_reg_read(struct file *file, char __user *user_buf, size_t count, - loff_t *ppos) -{ - struct il_priv *il = file->private_data; - char *buf; - int pos = 0; - ssize_t ret = -EFAULT; - - if (il->cfg->ops->lib->dump_fh) { - ret = pos = il->cfg->ops->lib->dump_fh(il, &buf, true); - if (buf) { - ret = - simple_read_from_buffer(user_buf, count, ppos, buf, - pos); - kfree(buf); - } - } - - return ret; -} - -static ssize_t -il_dbgfs_missed_beacon_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - - struct il_priv *il = file->private_data; - int pos = 0; - char buf[12]; - const size_t bufsz = sizeof(buf); - - pos += - scnprintf(buf + pos, bufsz - pos, "%d\n", - il->missed_beacon_threshold); - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t -il_dbgfs_missed_beacon_write(struct file *file, const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct il_priv *il = file->private_data; - char buf[8]; - int buf_size; - int missed; - - memset(buf, 0, sizeof(buf)); - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - if (sscanf(buf, "%d", &missed) != 1) - return -EINVAL; - - if (missed < IL_MISSED_BEACON_THRESHOLD_MIN || - missed > IL_MISSED_BEACON_THRESHOLD_MAX) - il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF; - else - il->missed_beacon_threshold = missed; - - return count; -} - -static ssize_t -il_dbgfs_force_reset_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - - struct il_priv *il = file->private_data; - int pos = 0; - char buf[300]; - const size_t bufsz = sizeof(buf); - struct il_force_reset *force_reset; - - force_reset = &il->force_reset; - - pos += - scnprintf(buf + pos, bufsz - pos, "\tnumber of reset request: %d\n", - force_reset->reset_request_count); - pos += - scnprintf(buf + pos, bufsz - pos, - "\tnumber of reset request success: %d\n", - force_reset->reset_success_count); - pos += - scnprintf(buf + pos, bufsz - pos, - "\tnumber of reset request reject: %d\n", - force_reset->reset_reject_count); - pos += - scnprintf(buf + pos, bufsz - pos, "\treset duration: %lu\n", - force_reset->reset_duration); - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t -il_dbgfs_force_reset_write(struct file *file, const char __user *user_buf, - size_t count, loff_t *ppos) -{ - - int ret; - struct il_priv *il = file->private_data; - - ret = il_force_reset(il, true); - - return ret ? ret : count; -} - -static ssize_t -il_dbgfs_wd_timeout_write(struct file *file, const char __user *user_buf, - size_t count, loff_t *ppos) -{ - - struct il_priv *il = file->private_data; - char buf[8]; - int buf_size; - int timeout; - - memset(buf, 0, sizeof(buf)); - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - if (sscanf(buf, "%d", &timeout) != 1) - return -EINVAL; - if (timeout < 0 || timeout > IL_MAX_WD_TIMEOUT) - timeout = IL_DEF_WD_TIMEOUT; - - il->cfg->base_params->wd_timeout = timeout; - il_setup_watchdog(il); - return count; -} - -DEBUGFS_READ_FILE_OPS(rx_stats); -DEBUGFS_READ_FILE_OPS(tx_stats); -DEBUGFS_READ_WRITE_FILE_OPS(traffic_log); -DEBUGFS_READ_FILE_OPS(rx_queue); -DEBUGFS_READ_FILE_OPS(tx_queue); -DEBUGFS_READ_FILE_OPS(ucode_rx_stats); -DEBUGFS_READ_FILE_OPS(ucode_tx_stats); -DEBUGFS_READ_FILE_OPS(ucode_general_stats); -DEBUGFS_READ_FILE_OPS(sensitivity); -DEBUGFS_READ_FILE_OPS(chain_noise); -DEBUGFS_READ_FILE_OPS(power_save_status); -DEBUGFS_WRITE_FILE_OPS(clear_ucode_stats); -DEBUGFS_WRITE_FILE_OPS(clear_traffic_stats); -DEBUGFS_READ_FILE_OPS(fh_reg); -DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon); -DEBUGFS_READ_WRITE_FILE_OPS(force_reset); -DEBUGFS_READ_FILE_OPS(rxon_flags); -DEBUGFS_READ_FILE_OPS(rxon_filter_flags); -DEBUGFS_WRITE_FILE_OPS(wd_timeout); - -/* - * Create the debugfs files and directories - * - */ -int -il_dbgfs_register(struct il_priv *il, const char *name) -{ - struct dentry *phyd = il->hw->wiphy->debugfsdir; - struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug; - - dir_drv = debugfs_create_dir(name, phyd); - if (!dir_drv) - return -ENOMEM; - - il->debugfs_dir = dir_drv; - - dir_data = debugfs_create_dir("data", dir_drv); - if (!dir_data) - goto err; - dir_rf = debugfs_create_dir("rf", dir_drv); - if (!dir_rf) - goto err; - dir_debug = debugfs_create_dir("debug", dir_drv); - if (!dir_debug) - goto err; - - DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR); - DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR); - DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR); - DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR); - DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR); - DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR); - DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR); - DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR); - DEBUGFS_ADD_FILE(rx_stats, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(tx_stats, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR); - DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(clear_ucode_stats, dir_debug, S_IWUSR); - DEBUGFS_ADD_FILE(clear_traffic_stats, dir_debug, S_IWUSR); - DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR); - DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR); - DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR); - - if (il->cfg->base_params->sensitivity_calib_by_driver) - DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR); - if (il->cfg->base_params->chain_noise_calib_by_driver) - DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR); - DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR); - DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR); - if (il->cfg->base_params->sensitivity_calib_by_driver) - DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf, - &il->disable_sens_cal); - if (il->cfg->base_params->chain_noise_calib_by_driver) - DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf, - &il->disable_chain_noise_cal); - DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf, &il->disable_tx_power_cal); - return 0; - -err: - IL_ERR("Can't create the debugfs directory\n"); - il_dbgfs_unregister(il); - return -ENOMEM; -} -EXPORT_SYMBOL(il_dbgfs_register); - -/** - * Remove the debugfs files and directories - * - */ -void -il_dbgfs_unregister(struct il_priv *il) -{ - if (!il->debugfs_dir) - return; - - debugfs_remove_recursive(il->debugfs_dir); - il->debugfs_dir = NULL; -} -EXPORT_SYMBOL(il_dbgfs_unregister); diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c b/trunk/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c new file mode 100644 index 000000000000..cfabb38793ab --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c @@ -0,0 +1,523 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ + +#include "iwl-3945-debugfs.h" + + +static int iwl3945_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz) +{ + int p = 0; + + p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", + le32_to_cpu(priv->_3945.statistics.flag)); + if (le32_to_cpu(priv->_3945.statistics.flag) & + UCODE_STATISTICS_CLEAR_MSK) + p += scnprintf(buf + p, bufsz - p, + "\tStatistics have been cleared\n"); + p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n", + (le32_to_cpu(priv->_3945.statistics.flag) & + UCODE_STATISTICS_FREQUENCY_MSK) + ? "2.4 GHz" : "5.2 GHz"); + p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n", + (le32_to_cpu(priv->_3945.statistics.flag) & + UCODE_STATISTICS_NARROW_BAND_MSK) + ? "enabled" : "disabled"); + return p; +} + +ssize_t iwl3945_ucode_rx_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + int pos = 0; + char *buf; + int bufsz = sizeof(struct iwl39_statistics_rx_phy) * 40 + + sizeof(struct iwl39_statistics_rx_non_phy) * 40 + 400; + ssize_t ret; + struct iwl39_statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, + *max_ofdm; + struct iwl39_statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck; + struct iwl39_statistics_rx_non_phy *general, *accum_general; + struct iwl39_statistics_rx_non_phy *delta_general, *max_general; + + if (!iwl_legacy_is_alive(priv)) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate Buffer\n"); + return -ENOMEM; + } + + /* + * The statistic information display here is based on + * the last statistics notification from uCode + * might not reflect the current uCode activity + */ + ofdm = &priv->_3945.statistics.rx.ofdm; + cck = &priv->_3945.statistics.rx.cck; + general = &priv->_3945.statistics.rx.general; + accum_ofdm = &priv->_3945.accum_statistics.rx.ofdm; + accum_cck = &priv->_3945.accum_statistics.rx.cck; + accum_general = &priv->_3945.accum_statistics.rx.general; + delta_ofdm = &priv->_3945.delta_statistics.rx.ofdm; + delta_cck = &priv->_3945.delta_statistics.rx.cck; + delta_general = &priv->_3945.delta_statistics.rx.general; + max_ofdm = &priv->_3945.max_delta.rx.ofdm; + max_cck = &priv->_3945.max_delta.rx.cck; + max_general = &priv->_3945.max_delta.rx.general; + + pos += iwl3945_statistics_flag(priv, buf, bufsz); + pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" + "acumulative delta max\n", + "Statistics_Rx - OFDM:"); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "ina_cnt:", le32_to_cpu(ofdm->ina_cnt), + accum_ofdm->ina_cnt, + delta_ofdm->ina_cnt, max_ofdm->ina_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "fina_cnt:", + le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt, + delta_ofdm->fina_cnt, max_ofdm->fina_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "plcp_err:", + le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err, + delta_ofdm->plcp_err, max_ofdm->plcp_err); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "crc32_err:", + le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err, + delta_ofdm->crc32_err, max_ofdm->crc32_err); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "overrun_err:", + le32_to_cpu(ofdm->overrun_err), + accum_ofdm->overrun_err, delta_ofdm->overrun_err, + max_ofdm->overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "early_overrun_err:", + le32_to_cpu(ofdm->early_overrun_err), + accum_ofdm->early_overrun_err, + delta_ofdm->early_overrun_err, + max_ofdm->early_overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "crc32_good:", le32_to_cpu(ofdm->crc32_good), + accum_ofdm->crc32_good, delta_ofdm->crc32_good, + max_ofdm->crc32_good); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:", + le32_to_cpu(ofdm->false_alarm_cnt), + accum_ofdm->false_alarm_cnt, + delta_ofdm->false_alarm_cnt, + max_ofdm->false_alarm_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "fina_sync_err_cnt:", + le32_to_cpu(ofdm->fina_sync_err_cnt), + accum_ofdm->fina_sync_err_cnt, + delta_ofdm->fina_sync_err_cnt, + max_ofdm->fina_sync_err_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "sfd_timeout:", + le32_to_cpu(ofdm->sfd_timeout), + accum_ofdm->sfd_timeout, + delta_ofdm->sfd_timeout, + max_ofdm->sfd_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "fina_timeout:", + le32_to_cpu(ofdm->fina_timeout), + accum_ofdm->fina_timeout, + delta_ofdm->fina_timeout, + max_ofdm->fina_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "unresponded_rts:", + le32_to_cpu(ofdm->unresponded_rts), + accum_ofdm->unresponded_rts, + delta_ofdm->unresponded_rts, + max_ofdm->unresponded_rts); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "rxe_frame_lmt_ovrun:", + le32_to_cpu(ofdm->rxe_frame_limit_overrun), + accum_ofdm->rxe_frame_limit_overrun, + delta_ofdm->rxe_frame_limit_overrun, + max_ofdm->rxe_frame_limit_overrun); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "sent_ack_cnt:", + le32_to_cpu(ofdm->sent_ack_cnt), + accum_ofdm->sent_ack_cnt, + delta_ofdm->sent_ack_cnt, + max_ofdm->sent_ack_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "sent_cts_cnt:", + le32_to_cpu(ofdm->sent_cts_cnt), + accum_ofdm->sent_cts_cnt, + delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt); + + pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" + "acumulative delta max\n", + "Statistics_Rx - CCK:"); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "ina_cnt:", + le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt, + delta_cck->ina_cnt, max_cck->ina_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "fina_cnt:", + le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt, + delta_cck->fina_cnt, max_cck->fina_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "plcp_err:", + le32_to_cpu(cck->plcp_err), accum_cck->plcp_err, + delta_cck->plcp_err, max_cck->plcp_err); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "crc32_err:", + le32_to_cpu(cck->crc32_err), accum_cck->crc32_err, + delta_cck->crc32_err, max_cck->crc32_err); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "overrun_err:", + le32_to_cpu(cck->overrun_err), + accum_cck->overrun_err, + delta_cck->overrun_err, max_cck->overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "early_overrun_err:", + le32_to_cpu(cck->early_overrun_err), + accum_cck->early_overrun_err, + delta_cck->early_overrun_err, + max_cck->early_overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "crc32_good:", + le32_to_cpu(cck->crc32_good), accum_cck->crc32_good, + delta_cck->crc32_good, + max_cck->crc32_good); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "false_alarm_cnt:", + le32_to_cpu(cck->false_alarm_cnt), + accum_cck->false_alarm_cnt, + delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "fina_sync_err_cnt:", + le32_to_cpu(cck->fina_sync_err_cnt), + accum_cck->fina_sync_err_cnt, + delta_cck->fina_sync_err_cnt, + max_cck->fina_sync_err_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "sfd_timeout:", + le32_to_cpu(cck->sfd_timeout), + accum_cck->sfd_timeout, + delta_cck->sfd_timeout, max_cck->sfd_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "fina_timeout:", + le32_to_cpu(cck->fina_timeout), + accum_cck->fina_timeout, + delta_cck->fina_timeout, max_cck->fina_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "unresponded_rts:", + le32_to_cpu(cck->unresponded_rts), + accum_cck->unresponded_rts, + delta_cck->unresponded_rts, + max_cck->unresponded_rts); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "rxe_frame_lmt_ovrun:", + le32_to_cpu(cck->rxe_frame_limit_overrun), + accum_cck->rxe_frame_limit_overrun, + delta_cck->rxe_frame_limit_overrun, + max_cck->rxe_frame_limit_overrun); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "sent_ack_cnt:", + le32_to_cpu(cck->sent_ack_cnt), + accum_cck->sent_ack_cnt, + delta_cck->sent_ack_cnt, + max_cck->sent_ack_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "sent_cts_cnt:", + le32_to_cpu(cck->sent_cts_cnt), + accum_cck->sent_cts_cnt, + delta_cck->sent_cts_cnt, + max_cck->sent_cts_cnt); + + pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" + "acumulative delta max\n", + "Statistics_Rx - GENERAL:"); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "bogus_cts:", + le32_to_cpu(general->bogus_cts), + accum_general->bogus_cts, + delta_general->bogus_cts, max_general->bogus_cts); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "bogus_ack:", + le32_to_cpu(general->bogus_ack), + accum_general->bogus_ack, + delta_general->bogus_ack, max_general->bogus_ack); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "non_bssid_frames:", + le32_to_cpu(general->non_bssid_frames), + accum_general->non_bssid_frames, + delta_general->non_bssid_frames, + max_general->non_bssid_frames); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "filtered_frames:", + le32_to_cpu(general->filtered_frames), + accum_general->filtered_frames, + delta_general->filtered_frames, + max_general->filtered_frames); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "non_channel_beacons:", + le32_to_cpu(general->non_channel_beacons), + accum_general->non_channel_beacons, + delta_general->non_channel_beacons, + max_general->non_channel_beacons); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +ssize_t iwl3945_ucode_tx_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + int pos = 0; + char *buf; + int bufsz = (sizeof(struct iwl39_statistics_tx) * 48) + 250; + ssize_t ret; + struct iwl39_statistics_tx *tx, *accum_tx, *delta_tx, *max_tx; + + if (!iwl_legacy_is_alive(priv)) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate Buffer\n"); + return -ENOMEM; + } + + /* + * The statistic information display here is based on + * the last statistics notification from uCode + * might not reflect the current uCode activity + */ + tx = &priv->_3945.statistics.tx; + accum_tx = &priv->_3945.accum_statistics.tx; + delta_tx = &priv->_3945.delta_statistics.tx; + max_tx = &priv->_3945.max_delta.tx; + pos += iwl3945_statistics_flag(priv, buf, bufsz); + pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" + "acumulative delta max\n", + "Statistics_Tx:"); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "preamble:", + le32_to_cpu(tx->preamble_cnt), + accum_tx->preamble_cnt, + delta_tx->preamble_cnt, max_tx->preamble_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "rx_detected_cnt:", + le32_to_cpu(tx->rx_detected_cnt), + accum_tx->rx_detected_cnt, + delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "bt_prio_defer_cnt:", + le32_to_cpu(tx->bt_prio_defer_cnt), + accum_tx->bt_prio_defer_cnt, + delta_tx->bt_prio_defer_cnt, + max_tx->bt_prio_defer_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "bt_prio_kill_cnt:", + le32_to_cpu(tx->bt_prio_kill_cnt), + accum_tx->bt_prio_kill_cnt, + delta_tx->bt_prio_kill_cnt, + max_tx->bt_prio_kill_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "few_bytes_cnt:", + le32_to_cpu(tx->few_bytes_cnt), + accum_tx->few_bytes_cnt, + delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "cts_timeout:", + le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout, + delta_tx->cts_timeout, max_tx->cts_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "ack_timeout:", + le32_to_cpu(tx->ack_timeout), + accum_tx->ack_timeout, + delta_tx->ack_timeout, max_tx->ack_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "expected_ack_cnt:", + le32_to_cpu(tx->expected_ack_cnt), + accum_tx->expected_ack_cnt, + delta_tx->expected_ack_cnt, + max_tx->expected_ack_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "actual_ack_cnt:", + le32_to_cpu(tx->actual_ack_cnt), + accum_tx->actual_ack_cnt, + delta_tx->actual_ack_cnt, + max_tx->actual_ack_cnt); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +ssize_t iwl3945_ucode_general_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + int pos = 0; + char *buf; + int bufsz = sizeof(struct iwl39_statistics_general) * 10 + 300; + ssize_t ret; + struct iwl39_statistics_general *general, *accum_general; + struct iwl39_statistics_general *delta_general, *max_general; + struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg; + struct iwl39_statistics_div *div, *accum_div, *delta_div, *max_div; + + if (!iwl_legacy_is_alive(priv)) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate Buffer\n"); + return -ENOMEM; + } + + /* + * The statistic information display here is based on + * the last statistics notification from uCode + * might not reflect the current uCode activity + */ + general = &priv->_3945.statistics.general; + dbg = &priv->_3945.statistics.general.dbg; + div = &priv->_3945.statistics.general.div; + accum_general = &priv->_3945.accum_statistics.general; + delta_general = &priv->_3945.delta_statistics.general; + max_general = &priv->_3945.max_delta.general; + accum_dbg = &priv->_3945.accum_statistics.general.dbg; + delta_dbg = &priv->_3945.delta_statistics.general.dbg; + max_dbg = &priv->_3945.max_delta.general.dbg; + accum_div = &priv->_3945.accum_statistics.general.div; + delta_div = &priv->_3945.delta_statistics.general.div; + max_div = &priv->_3945.max_delta.general.div; + pos += iwl3945_statistics_flag(priv, buf, bufsz); + pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" + "acumulative delta max\n", + "Statistics_General:"); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "burst_check:", + le32_to_cpu(dbg->burst_check), + accum_dbg->burst_check, + delta_dbg->burst_check, max_dbg->burst_check); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "burst_count:", + le32_to_cpu(dbg->burst_count), + accum_dbg->burst_count, + delta_dbg->burst_count, max_dbg->burst_count); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "sleep_time:", + le32_to_cpu(general->sleep_time), + accum_general->sleep_time, + delta_general->sleep_time, max_general->sleep_time); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "slots_out:", + le32_to_cpu(general->slots_out), + accum_general->slots_out, + delta_general->slots_out, max_general->slots_out); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "slots_idle:", + le32_to_cpu(general->slots_idle), + accum_general->slots_idle, + delta_general->slots_idle, max_general->slots_idle); + pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n", + le32_to_cpu(general->ttl_timestamp)); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "tx_on_a:", + le32_to_cpu(div->tx_on_a), accum_div->tx_on_a, + delta_div->tx_on_a, max_div->tx_on_a); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "tx_on_b:", + le32_to_cpu(div->tx_on_b), accum_div->tx_on_b, + delta_div->tx_on_b, max_div->tx_on_b); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "exec_time:", + le32_to_cpu(div->exec_time), accum_div->exec_time, + delta_div->exec_time, max_div->exec_time); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "probe_time:", + le32_to_cpu(div->probe_time), accum_div->probe_time, + delta_div->probe_time, max_div->probe_time); + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h b/trunk/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h new file mode 100644 index 000000000000..8fef4b32b447 --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h @@ -0,0 +1,60 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ + +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-debug.h" + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS +ssize_t iwl3945_ucode_rx_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos); +ssize_t iwl3945_ucode_tx_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos); +ssize_t iwl3945_ucode_general_stats_read(struct file *file, + char __user *user_buf, size_t count, + loff_t *ppos); +#else +static ssize_t iwl3945_ucode_rx_stats_read(struct file *file, + char __user *user_buf, size_t count, + loff_t *ppos) +{ + return 0; +} +static ssize_t iwl3945_ucode_tx_stats_read(struct file *file, + char __user *user_buf, size_t count, + loff_t *ppos) +{ + return 0; +} +static ssize_t iwl3945_ucode_general_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + return 0; +} +#endif diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-3945-fh.h b/trunk/drivers/net/wireless/iwlegacy/iwl-3945-fh.h new file mode 100644 index 000000000000..836c9919f82e --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-3945-fh.h @@ -0,0 +1,187 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __iwl_3945_fh_h__ +#define __iwl_3945_fh_h__ + +/************************************/ +/* iwl3945 Flow Handler Definitions */ +/************************************/ + +/** + * This I/O area is directly read/writable by driver (e.g. Linux uses writel()) + * Addresses are offsets from device's PCI hardware base address. + */ +#define FH39_MEM_LOWER_BOUND (0x0800) +#define FH39_MEM_UPPER_BOUND (0x1000) + +#define FH39_CBCC_TABLE (FH39_MEM_LOWER_BOUND + 0x140) +#define FH39_TFDB_TABLE (FH39_MEM_LOWER_BOUND + 0x180) +#define FH39_RCSR_TABLE (FH39_MEM_LOWER_BOUND + 0x400) +#define FH39_RSSR_TABLE (FH39_MEM_LOWER_BOUND + 0x4c0) +#define FH39_TCSR_TABLE (FH39_MEM_LOWER_BOUND + 0x500) +#define FH39_TSSR_TABLE (FH39_MEM_LOWER_BOUND + 0x680) + +/* TFDB (Transmit Frame Buffer Descriptor) */ +#define FH39_TFDB(_ch, buf) (FH39_TFDB_TABLE + \ + ((_ch) * 2 + (buf)) * 0x28) +#define FH39_TFDB_CHNL_BUF_CTRL_REG(_ch) (FH39_TFDB_TABLE + 0x50 * (_ch)) + +/* CBCC channel is [0,2] */ +#define FH39_CBCC(_ch) (FH39_CBCC_TABLE + (_ch) * 0x8) +#define FH39_CBCC_CTRL(_ch) (FH39_CBCC(_ch) + 0x00) +#define FH39_CBCC_BASE(_ch) (FH39_CBCC(_ch) + 0x04) + +/* RCSR channel is [0,2] */ +#define FH39_RCSR(_ch) (FH39_RCSR_TABLE + (_ch) * 0x40) +#define FH39_RCSR_CONFIG(_ch) (FH39_RCSR(_ch) + 0x00) +#define FH39_RCSR_RBD_BASE(_ch) (FH39_RCSR(_ch) + 0x04) +#define FH39_RCSR_WPTR(_ch) (FH39_RCSR(_ch) + 0x20) +#define FH39_RCSR_RPTR_ADDR(_ch) (FH39_RCSR(_ch) + 0x24) + +#define FH39_RSCSR_CHNL0_WPTR (FH39_RCSR_WPTR(0)) + +/* RSSR */ +#define FH39_RSSR_CTRL (FH39_RSSR_TABLE + 0x000) +#define FH39_RSSR_STATUS (FH39_RSSR_TABLE + 0x004) + +/* TCSR */ +#define FH39_TCSR(_ch) (FH39_TCSR_TABLE + (_ch) * 0x20) +#define FH39_TCSR_CONFIG(_ch) (FH39_TCSR(_ch) + 0x00) +#define FH39_TCSR_CREDIT(_ch) (FH39_TCSR(_ch) + 0x04) +#define FH39_TCSR_BUFF_STTS(_ch) (FH39_TCSR(_ch) + 0x08) + +/* TSSR */ +#define FH39_TSSR_CBB_BASE (FH39_TSSR_TABLE + 0x000) +#define FH39_TSSR_MSG_CONFIG (FH39_TSSR_TABLE + 0x008) +#define FH39_TSSR_TX_STATUS (FH39_TSSR_TABLE + 0x010) + + +/* DBM */ + +#define FH39_SRVC_CHNL (6) + +#define FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE (20) +#define FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH (4) + +#define FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN (0x08000000) + +#define FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE (0x80000000) + +#define FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE (0x20000000) + +#define FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 (0x01000000) + +#define FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST (0x00001000) + +#define FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH (0x00000000) + +#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000) +#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRIVER (0x00000001) + +#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL (0x00000000) +#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL (0x00000008) + +#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000) + +#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000) + +#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000) +#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000) + +#define FH39_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00004000) + +#define FH39_TCSR_CHNL_TX_BUF_STS_REG_BIT_TFDB_WPTR (0x00000001) + +#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON (0xFF000000) +#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON (0x00FF0000) + +#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B (0x00000400) + +#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON (0x00000100) +#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON (0x00000080) + +#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH (0x00000020) +#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH (0x00000005) + +#define FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) (BIT(_ch) << 24) +#define FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch) (BIT(_ch) << 16) + +#define FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_ch) \ + (FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) | \ + FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch)) + +#define FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000) + +struct iwl3945_tfd_tb { + __le32 addr; + __le32 len; +} __packed; + +struct iwl3945_tfd { + __le32 control_flags; + struct iwl3945_tfd_tb tbs[4]; + u8 __pad[28]; +} __packed; + + +#endif /* __iwl_3945_fh_h__ */ diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-3945-hw.h b/trunk/drivers/net/wireless/iwlegacy/iwl-3945-hw.h new file mode 100644 index 000000000000..5c3a68d3af12 --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-3945-hw.h @@ -0,0 +1,291 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +/* + * Please use this file (iwl-3945-hw.h) only for hardware-related definitions. + * Please use iwl-commands.h for uCode API definitions. + * Please use iwl-3945.h for driver implementation definitions. + */ + +#ifndef __iwl_3945_hw__ +#define __iwl_3945_hw__ + +#include "iwl-eeprom.h" + +/* RSSI to dBm */ +#define IWL39_RSSI_OFFSET 95 + +/* + * EEPROM related constants, enums, and structures. + */ +#define EEPROM_SKU_CAP_OP_MODE_MRC (1 << 7) + +/* + * Mapping of a Tx power level, at factory calibration temperature, + * to a radio/DSP gain table index. + * One for each of 5 "sample" power levels in each band. + * v_det is measured at the factory, using the 3945's built-in power amplifier + * (PA) output voltage detector. This same detector is used during Tx of + * long packets in normal operation to provide feedback as to proper output + * level. + * Data copied from EEPROM. + * DO NOT ALTER THIS STRUCTURE!!! + */ +struct iwl3945_eeprom_txpower_sample { + u8 gain_index; /* index into power (gain) setup table ... */ + s8 power; /* ... for this pwr level for this chnl group */ + u16 v_det; /* PA output voltage */ +} __packed; + +/* + * Mappings of Tx power levels -> nominal radio/DSP gain table indexes. + * One for each channel group (a.k.a. "band") (1 for BG, 4 for A). + * Tx power setup code interpolates between the 5 "sample" power levels + * to determine the nominal setup for a requested power level. + * Data copied from EEPROM. + * DO NOT ALTER THIS STRUCTURE!!! + */ +struct iwl3945_eeprom_txpower_group { + struct iwl3945_eeprom_txpower_sample samples[5]; /* 5 power levels */ + s32 a, b, c, d, e; /* coefficients for voltage->power + * formula (signed) */ + s32 Fa, Fb, Fc, Fd, Fe; /* these modify coeffs based on + * frequency (signed) */ + s8 saturation_power; /* highest power possible by h/w in this + * band */ + u8 group_channel; /* "representative" channel # in this band */ + s16 temperature; /* h/w temperature at factory calib this band + * (signed) */ +} __packed; + +/* + * Temperature-based Tx-power compensation data, not band-specific. + * These coefficients are use to modify a/b/c/d/e coeffs based on + * difference between current temperature and factory calib temperature. + * Data copied from EEPROM. + */ +struct iwl3945_eeprom_temperature_corr { + u32 Ta; + u32 Tb; + u32 Tc; + u32 Td; + u32 Te; +} __packed; + +/* + * EEPROM map + */ +struct iwl3945_eeprom { + u8 reserved0[16]; + u16 device_id; /* abs.ofs: 16 */ + u8 reserved1[2]; + u16 pmc; /* abs.ofs: 20 */ + u8 reserved2[20]; + u8 mac_address[6]; /* abs.ofs: 42 */ + u8 reserved3[58]; + u16 board_revision; /* abs.ofs: 106 */ + u8 reserved4[11]; + u8 board_pba_number[9]; /* abs.ofs: 119 */ + u8 reserved5[8]; + u16 version; /* abs.ofs: 136 */ + u8 sku_cap; /* abs.ofs: 138 */ + u8 leds_mode; /* abs.ofs: 139 */ + u16 oem_mode; + u16 wowlan_mode; /* abs.ofs: 142 */ + u16 leds_time_interval; /* abs.ofs: 144 */ + u8 leds_off_time; /* abs.ofs: 146 */ + u8 leds_on_time; /* abs.ofs: 147 */ + u8 almgor_m_version; /* abs.ofs: 148 */ + u8 antenna_switch_type; /* abs.ofs: 149 */ + u8 reserved6[42]; + u8 sku_id[4]; /* abs.ofs: 192 */ + +/* + * Per-channel regulatory data. + * + * Each channel that *might* be supported by 3945 has a fixed location + * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory + * txpower (MSB). + * + * Entries immediately below are for 20 MHz channel width. + * + * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 + */ + u16 band_1_count; /* abs.ofs: 196 */ + struct iwl_eeprom_channel band_1_channels[14]; /* abs.ofs: 198 */ + +/* + * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196, + * 5.0 GHz channels 7, 8, 11, 12, 16 + * (4915-5080MHz) (none of these is ever supported) + */ + u16 band_2_count; /* abs.ofs: 226 */ + struct iwl_eeprom_channel band_2_channels[13]; /* abs.ofs: 228 */ + +/* + * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 + * (5170-5320MHz) + */ + u16 band_3_count; /* abs.ofs: 254 */ + struct iwl_eeprom_channel band_3_channels[12]; /* abs.ofs: 256 */ + +/* + * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 + * (5500-5700MHz) + */ + u16 band_4_count; /* abs.ofs: 280 */ + struct iwl_eeprom_channel band_4_channels[11]; /* abs.ofs: 282 */ + +/* + * 5.7 GHz channels 145, 149, 153, 157, 161, 165 + * (5725-5825MHz) + */ + u16 band_5_count; /* abs.ofs: 304 */ + struct iwl_eeprom_channel band_5_channels[6]; /* abs.ofs: 306 */ + + u8 reserved9[194]; + +/* + * 3945 Txpower calibration data. + */ +#define IWL_NUM_TX_CALIB_GROUPS 5 + struct iwl3945_eeprom_txpower_group groups[IWL_NUM_TX_CALIB_GROUPS]; +/* abs.ofs: 512 */ + struct iwl3945_eeprom_temperature_corr corrections; /* abs.ofs: 832 */ + u8 reserved16[172]; /* fill out to full 1024 byte block */ +} __packed; + +#define IWL3945_EEPROM_IMG_SIZE 1024 + +/* End of EEPROM */ + +#define PCI_CFG_REV_ID_BIT_BASIC_SKU (0x40) /* bit 6 */ +#define PCI_CFG_REV_ID_BIT_RTP (0x80) /* bit 7 */ + +/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */ +#define IWL39_NUM_QUEUES 5 +#define IWL39_CMD_QUEUE_NUM 4 + +#define IWL_DEFAULT_TX_RETRY 15 + +/*********************************************/ + +#define RFD_SIZE 4 +#define NUM_TFD_CHUNKS 4 + +#define RX_QUEUE_SIZE 256 +#define RX_QUEUE_MASK 255 +#define RX_QUEUE_SIZE_LOG 8 + +#define U32_PAD(n) ((4-(n))&0x3) + +#define TFD_CTL_COUNT_SET(n) (n << 24) +#define TFD_CTL_COUNT_GET(ctl) ((ctl >> 24) & 7) +#define TFD_CTL_PAD_SET(n) (n << 28) +#define TFD_CTL_PAD_GET(ctl) (ctl >> 28) + +/* Sizes and addresses for instruction and data memory (SRAM) in + * 3945's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */ +#define IWL39_RTC_INST_LOWER_BOUND (0x000000) +#define IWL39_RTC_INST_UPPER_BOUND (0x014000) + +#define IWL39_RTC_DATA_LOWER_BOUND (0x800000) +#define IWL39_RTC_DATA_UPPER_BOUND (0x808000) + +#define IWL39_RTC_INST_SIZE (IWL39_RTC_INST_UPPER_BOUND - \ + IWL39_RTC_INST_LOWER_BOUND) +#define IWL39_RTC_DATA_SIZE (IWL39_RTC_DATA_UPPER_BOUND - \ + IWL39_RTC_DATA_LOWER_BOUND) + +#define IWL39_MAX_INST_SIZE IWL39_RTC_INST_SIZE +#define IWL39_MAX_DATA_SIZE IWL39_RTC_DATA_SIZE + +/* Size of uCode instruction memory in bootstrap state machine */ +#define IWL39_MAX_BSM_SIZE IWL39_RTC_INST_SIZE + +static inline int iwl3945_hw_valid_rtc_data_addr(u32 addr) +{ + return (addr >= IWL39_RTC_DATA_LOWER_BOUND) && + (addr < IWL39_RTC_DATA_UPPER_BOUND); +} + +/* Base physical address of iwl3945_shared is provided to FH_TSSR_CBB_BASE + * and &iwl3945_shared.rx_read_ptr[0] is provided to FH_RCSR_RPTR_ADDR(0) */ +struct iwl3945_shared { + __le32 tx_base_ptr[8]; +} __packed; + +static inline u8 iwl3945_hw_get_rate(__le16 rate_n_flags) +{ + return le16_to_cpu(rate_n_flags) & 0xFF; +} + +static inline u16 iwl3945_hw_get_rate_n_flags(__le16 rate_n_flags) +{ + return le16_to_cpu(rate_n_flags); +} + +static inline __le16 iwl3945_hw_set_rate_n_flags(u8 rate, u16 flags) +{ + return cpu_to_le16((u16)rate|flags); +} +#endif diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-3945-led.c b/trunk/drivers/net/wireless/iwlegacy/iwl-3945-led.c new file mode 100644 index 000000000000..7a7f0f38c8ab --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-3945-led.c @@ -0,0 +1,63 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "iwl-commands.h" +#include "iwl-3945.h" +#include "iwl-core.h" +#include "iwl-dev.h" +#include "iwl-3945-led.h" + + +/* Send led command */ +static int iwl3945_send_led_cmd(struct iwl_priv *priv, + struct iwl_led_cmd *led_cmd) +{ + struct iwl_host_cmd cmd = { + .id = REPLY_LEDS_CMD, + .len = sizeof(struct iwl_led_cmd), + .data = led_cmd, + .flags = CMD_ASYNC, + .callback = NULL, + }; + + return iwl_legacy_send_cmd(priv, &cmd); +} + +const struct iwl_led_ops iwl3945_led_ops = { + .cmd = iwl3945_send_led_cmd, +}; diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-3945-led.h b/trunk/drivers/net/wireless/iwlegacy/iwl-3945-led.h new file mode 100644 index 000000000000..96716276eb0d --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-3945-led.h @@ -0,0 +1,32 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_3945_led_h__ +#define __iwl_3945_led_h__ + +extern const struct iwl_led_ops iwl3945_led_ops; + +#endif /* __iwl_3945_led_h__ */ diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-3945-rs.c b/trunk/drivers/net/wireless/iwlegacy/iwl-3945-rs.c new file mode 100644 index 000000000000..8faeaf2dddec --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-3945-rs.c @@ -0,0 +1,996 @@ +/****************************************************************************** + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include "iwl-commands.h" +#include "iwl-3945.h" +#include "iwl-sta.h" + +#define RS_NAME "iwl-3945-rs" + +static s32 iwl3945_expected_tpt_g[IWL_RATE_COUNT_3945] = { + 7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202 +}; + +static s32 iwl3945_expected_tpt_g_prot[IWL_RATE_COUNT_3945] = { + 7, 13, 35, 58, 0, 0, 0, 80, 93, 113, 123, 125 +}; + +static s32 iwl3945_expected_tpt_a[IWL_RATE_COUNT_3945] = { + 0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186 +}; + +static s32 iwl3945_expected_tpt_b[IWL_RATE_COUNT_3945] = { + 7, 13, 35, 58, 0, 0, 0, 0, 0, 0, 0, 0 +}; + +struct iwl3945_tpt_entry { + s8 min_rssi; + u8 index; +}; + +static struct iwl3945_tpt_entry iwl3945_tpt_table_a[] = { + {-60, IWL_RATE_54M_INDEX}, + {-64, IWL_RATE_48M_INDEX}, + {-72, IWL_RATE_36M_INDEX}, + {-80, IWL_RATE_24M_INDEX}, + {-84, IWL_RATE_18M_INDEX}, + {-85, IWL_RATE_12M_INDEX}, + {-87, IWL_RATE_9M_INDEX}, + {-89, IWL_RATE_6M_INDEX} +}; + +static struct iwl3945_tpt_entry iwl3945_tpt_table_g[] = { + {-60, IWL_RATE_54M_INDEX}, + {-64, IWL_RATE_48M_INDEX}, + {-68, IWL_RATE_36M_INDEX}, + {-80, IWL_RATE_24M_INDEX}, + {-84, IWL_RATE_18M_INDEX}, + {-85, IWL_RATE_12M_INDEX}, + {-86, IWL_RATE_11M_INDEX}, + {-88, IWL_RATE_5M_INDEX}, + {-90, IWL_RATE_2M_INDEX}, + {-92, IWL_RATE_1M_INDEX} +}; + +#define IWL_RATE_MAX_WINDOW 62 +#define IWL_RATE_FLUSH (3*HZ) +#define IWL_RATE_WIN_FLUSH (HZ/2) +#define IWL39_RATE_HIGH_TH 11520 +#define IWL_SUCCESS_UP_TH 8960 +#define IWL_SUCCESS_DOWN_TH 10880 +#define IWL_RATE_MIN_FAILURE_TH 6 +#define IWL_RATE_MIN_SUCCESS_TH 8 +#define IWL_RATE_DECREASE_TH 1920 +#define IWL_RATE_RETRY_TH 15 + +static u8 iwl3945_get_rate_index_by_rssi(s32 rssi, enum ieee80211_band band) +{ + u32 index = 0; + u32 table_size = 0; + struct iwl3945_tpt_entry *tpt_table = NULL; + + if ((rssi < IWL_MIN_RSSI_VAL) || (rssi > IWL_MAX_RSSI_VAL)) + rssi = IWL_MIN_RSSI_VAL; + + switch (band) { + case IEEE80211_BAND_2GHZ: + tpt_table = iwl3945_tpt_table_g; + table_size = ARRAY_SIZE(iwl3945_tpt_table_g); + break; + + case IEEE80211_BAND_5GHZ: + tpt_table = iwl3945_tpt_table_a; + table_size = ARRAY_SIZE(iwl3945_tpt_table_a); + break; + + default: + BUG(); + break; + } + + while ((index < table_size) && (rssi < tpt_table[index].min_rssi)) + index++; + + index = min(index, (table_size - 1)); + + return tpt_table[index].index; +} + +static void iwl3945_clear_window(struct iwl3945_rate_scale_data *window) +{ + window->data = 0; + window->success_counter = 0; + window->success_ratio = -1; + window->counter = 0; + window->average_tpt = IWL_INVALID_VALUE; + window->stamp = 0; +} + +/** + * iwl3945_rate_scale_flush_windows - flush out the rate scale windows + * + * Returns the number of windows that have gathered data but were + * not flushed. If there were any that were not flushed, then + * reschedule the rate flushing routine. + */ +static int iwl3945_rate_scale_flush_windows(struct iwl3945_rs_sta *rs_sta) +{ + int unflushed = 0; + int i; + unsigned long flags; + struct iwl_priv *priv __maybe_unused = rs_sta->priv; + + /* + * For each rate, if we have collected data on that rate + * and it has been more than IWL_RATE_WIN_FLUSH + * since we flushed, clear out the gathered statistics + */ + for (i = 0; i < IWL_RATE_COUNT_3945; i++) { + if (!rs_sta->win[i].counter) + continue; + + spin_lock_irqsave(&rs_sta->lock, flags); + if (time_after(jiffies, rs_sta->win[i].stamp + + IWL_RATE_WIN_FLUSH)) { + IWL_DEBUG_RATE(priv, "flushing %d samples of rate " + "index %d\n", + rs_sta->win[i].counter, i); + iwl3945_clear_window(&rs_sta->win[i]); + } else + unflushed++; + spin_unlock_irqrestore(&rs_sta->lock, flags); + } + + return unflushed; +} + +#define IWL_RATE_FLUSH_MAX 5000 /* msec */ +#define IWL_RATE_FLUSH_MIN 50 /* msec */ +#define IWL_AVERAGE_PACKETS 1500 + +static void iwl3945_bg_rate_scale_flush(unsigned long data) +{ + struct iwl3945_rs_sta *rs_sta = (void *)data; + struct iwl_priv *priv __maybe_unused = rs_sta->priv; + int unflushed = 0; + unsigned long flags; + u32 packet_count, duration, pps; + + IWL_DEBUG_RATE(priv, "enter\n"); + + unflushed = iwl3945_rate_scale_flush_windows(rs_sta); + + spin_lock_irqsave(&rs_sta->lock, flags); + + /* Number of packets Rx'd since last time this timer ran */ + packet_count = (rs_sta->tx_packets - rs_sta->last_tx_packets) + 1; + + rs_sta->last_tx_packets = rs_sta->tx_packets + 1; + + if (unflushed) { + duration = + jiffies_to_msecs(jiffies - rs_sta->last_partial_flush); + + IWL_DEBUG_RATE(priv, "Tx'd %d packets in %dms\n", + packet_count, duration); + + /* Determine packets per second */ + if (duration) + pps = (packet_count * 1000) / duration; + else + pps = 0; + + if (pps) { + duration = (IWL_AVERAGE_PACKETS * 1000) / pps; + if (duration < IWL_RATE_FLUSH_MIN) + duration = IWL_RATE_FLUSH_MIN; + else if (duration > IWL_RATE_FLUSH_MAX) + duration = IWL_RATE_FLUSH_MAX; + } else + duration = IWL_RATE_FLUSH_MAX; + + rs_sta->flush_time = msecs_to_jiffies(duration); + + IWL_DEBUG_RATE(priv, "new flush period: %d msec ave %d\n", + duration, packet_count); + + mod_timer(&rs_sta->rate_scale_flush, jiffies + + rs_sta->flush_time); + + rs_sta->last_partial_flush = jiffies; + } else { + rs_sta->flush_time = IWL_RATE_FLUSH; + rs_sta->flush_pending = 0; + } + /* If there weren't any unflushed entries, we don't schedule the timer + * to run again */ + + rs_sta->last_flush = jiffies; + + spin_unlock_irqrestore(&rs_sta->lock, flags); + + IWL_DEBUG_RATE(priv, "leave\n"); +} + +/** + * iwl3945_collect_tx_data - Update the success/failure sliding window + * + * We keep a sliding window of the last 64 packets transmitted + * at this rate. window->data contains the bitmask of successful + * packets. + */ +static void iwl3945_collect_tx_data(struct iwl3945_rs_sta *rs_sta, + struct iwl3945_rate_scale_data *window, + int success, int retries, int index) +{ + unsigned long flags; + s32 fail_count; + struct iwl_priv *priv __maybe_unused = rs_sta->priv; + + if (!retries) { + IWL_DEBUG_RATE(priv, "leave: retries == 0 -- should be at least 1\n"); + return; + } + + spin_lock_irqsave(&rs_sta->lock, flags); + + /* + * Keep track of only the latest 62 tx frame attempts in this rate's + * history window; anything older isn't really relevant any more. + * If we have filled up the sliding window, drop the oldest attempt; + * if the oldest attempt (highest bit in bitmap) shows "success", + * subtract "1" from the success counter (this is the main reason + * we keep these bitmaps!). + * */ + while (retries > 0) { + if (window->counter >= IWL_RATE_MAX_WINDOW) { + + /* remove earliest */ + window->counter = IWL_RATE_MAX_WINDOW - 1; + + if (window->data & (1ULL << (IWL_RATE_MAX_WINDOW - 1))) { + window->data &= ~(1ULL << (IWL_RATE_MAX_WINDOW - 1)); + window->success_counter--; + } + } + + /* Increment frames-attempted counter */ + window->counter++; + + /* Shift bitmap by one frame (throw away oldest history), + * OR in "1", and increment "success" if this + * frame was successful. */ + window->data <<= 1; + if (success > 0) { + window->success_counter++; + window->data |= 0x1; + success--; + } + + retries--; + } + + /* Calculate current success ratio, avoid divide-by-0! */ + if (window->counter > 0) + window->success_ratio = 128 * (100 * window->success_counter) + / window->counter; + else + window->success_ratio = IWL_INVALID_VALUE; + + fail_count = window->counter - window->success_counter; + + /* Calculate average throughput, if we have enough history. */ + if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) || + (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH)) + window->average_tpt = ((window->success_ratio * + rs_sta->expected_tpt[index] + 64) / 128); + else + window->average_tpt = IWL_INVALID_VALUE; + + /* Tag this window as having been updated */ + window->stamp = jiffies; + + spin_unlock_irqrestore(&rs_sta->lock, flags); + +} + +/* + * Called after adding a new station to initialize rate scaling + */ +void iwl3945_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_id) +{ + struct ieee80211_hw *hw = priv->hw; + struct ieee80211_conf *conf = &priv->hw->conf; + struct iwl3945_sta_priv *psta; + struct iwl3945_rs_sta *rs_sta; + struct ieee80211_supported_band *sband; + int i; + + IWL_DEBUG_INFO(priv, "enter\n"); + if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id) + goto out; + + psta = (struct iwl3945_sta_priv *) sta->drv_priv; + rs_sta = &psta->rs_sta; + sband = hw->wiphy->bands[conf->channel->band]; + + rs_sta->priv = priv; + + rs_sta->start_rate = IWL_RATE_INVALID; + + /* default to just 802.11b */ + rs_sta->expected_tpt = iwl3945_expected_tpt_b; + + rs_sta->last_partial_flush = jiffies; + rs_sta->last_flush = jiffies; + rs_sta->flush_time = IWL_RATE_FLUSH; + rs_sta->last_tx_packets = 0; + + rs_sta->rate_scale_flush.data = (unsigned long)rs_sta; + rs_sta->rate_scale_flush.function = iwl3945_bg_rate_scale_flush; + + for (i = 0; i < IWL_RATE_COUNT_3945; i++) + iwl3945_clear_window(&rs_sta->win[i]); + + /* TODO: what is a good starting rate for STA? About middle? Maybe not + * the lowest or the highest rate.. Could consider using RSSI from + * previous packets? Need to have IEEE 802.1X auth succeed immediately + * after assoc.. */ + + for (i = sband->n_bitrates - 1; i >= 0; i--) { + if (sta->supp_rates[sband->band] & (1 << i)) { + rs_sta->last_txrate_idx = i; + break; + } + } + + priv->_3945.sta_supp_rates = sta->supp_rates[sband->band]; + /* For 5 GHz band it start at IWL_FIRST_OFDM_RATE */ + if (sband->band == IEEE80211_BAND_5GHZ) { + rs_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE; + priv->_3945.sta_supp_rates = priv->_3945.sta_supp_rates << + IWL_FIRST_OFDM_RATE; + } + +out: + priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS; + + IWL_DEBUG_INFO(priv, "leave\n"); +} + +static void *iwl3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) +{ + return hw->priv; +} + +/* rate scale requires free function to be implemented */ +static void iwl3945_rs_free(void *priv) +{ + return; +} + +static void *iwl3945_rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp) +{ + struct iwl3945_rs_sta *rs_sta; + struct iwl3945_sta_priv *psta = (void *) sta->drv_priv; + struct iwl_priv *priv __maybe_unused = iwl_priv; + + IWL_DEBUG_RATE(priv, "enter\n"); + + rs_sta = &psta->rs_sta; + + spin_lock_init(&rs_sta->lock); + init_timer(&rs_sta->rate_scale_flush); + + IWL_DEBUG_RATE(priv, "leave\n"); + + return rs_sta; +} + +static void iwl3945_rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta, + void *priv_sta) +{ + struct iwl3945_rs_sta *rs_sta = priv_sta; + + /* + * Be careful not to use any members of iwl3945_rs_sta (like trying + * to use iwl_priv to print out debugging) since it may not be fully + * initialized at this point. + */ + del_timer_sync(&rs_sta->rate_scale_flush); +} + + +/** + * iwl3945_rs_tx_status - Update rate control values based on Tx results + * + * NOTE: Uses iwl_priv->retry_rate for the # of retries attempted by + * the hardware for each rate. + */ +static void iwl3945_rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *priv_sta, + struct sk_buff *skb) +{ + s8 retries = 0, current_count; + int scale_rate_index, first_index, last_index; + unsigned long flags; + struct iwl_priv *priv = (struct iwl_priv *)priv_rate; + struct iwl3945_rs_sta *rs_sta = priv_sta; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + IWL_DEBUG_RATE(priv, "enter\n"); + + retries = info->status.rates[0].count; + /* Sanity Check for retries */ + if (retries > IWL_RATE_RETRY_TH) + retries = IWL_RATE_RETRY_TH; + + first_index = sband->bitrates[info->status.rates[0].idx].hw_value; + if ((first_index < 0) || (first_index >= IWL_RATE_COUNT_3945)) { + IWL_DEBUG_RATE(priv, "leave: Rate out of bounds: %d\n", first_index); + return; + } + + if (!priv_sta) { + IWL_DEBUG_RATE(priv, "leave: No STA priv data to update!\n"); + return; + } + + /* Treat uninitialized rate scaling data same as non-existing. */ + if (!rs_sta->priv) { + IWL_DEBUG_RATE(priv, "leave: STA priv data uninitialized!\n"); + return; + } + + + rs_sta->tx_packets++; + + scale_rate_index = first_index; + last_index = first_index; + + /* + * Update the window for each rate. We determine which rates + * were Tx'd based on the total number of retries vs. the number + * of retries configured for each rate -- currently set to the + * priv value 'retry_rate' vs. rate specific + * + * On exit from this while loop last_index indicates the rate + * at which the frame was finally transmitted (or failed if no + * ACK) + */ + while (retries > 1) { + if ((retries - 1) < priv->retry_rate) { + current_count = (retries - 1); + last_index = scale_rate_index; + } else { + current_count = priv->retry_rate; + last_index = iwl3945_rs_next_rate(priv, + scale_rate_index); + } + + /* Update this rate accounting for as many retries + * as was used for it (per current_count) */ + iwl3945_collect_tx_data(rs_sta, + &rs_sta->win[scale_rate_index], + 0, current_count, scale_rate_index); + IWL_DEBUG_RATE(priv, "Update rate %d for %d retries.\n", + scale_rate_index, current_count); + + retries -= current_count; + + scale_rate_index = last_index; + } + + + /* Update the last index window with success/failure based on ACK */ + IWL_DEBUG_RATE(priv, "Update rate %d with %s.\n", + last_index, + (info->flags & IEEE80211_TX_STAT_ACK) ? + "success" : "failure"); + iwl3945_collect_tx_data(rs_sta, + &rs_sta->win[last_index], + info->flags & IEEE80211_TX_STAT_ACK, 1, last_index); + + /* We updated the rate scale window -- if its been more than + * flush_time since the last run, schedule the flush + * again */ + spin_lock_irqsave(&rs_sta->lock, flags); + + if (!rs_sta->flush_pending && + time_after(jiffies, rs_sta->last_flush + + rs_sta->flush_time)) { + + rs_sta->last_partial_flush = jiffies; + rs_sta->flush_pending = 1; + mod_timer(&rs_sta->rate_scale_flush, + jiffies + rs_sta->flush_time); + } + + spin_unlock_irqrestore(&rs_sta->lock, flags); + + IWL_DEBUG_RATE(priv, "leave\n"); +} + +static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta, + u8 index, u16 rate_mask, enum ieee80211_band band) +{ + u8 high = IWL_RATE_INVALID; + u8 low = IWL_RATE_INVALID; + struct iwl_priv *priv __maybe_unused = rs_sta->priv; + + /* 802.11A walks to the next literal adjacent rate in + * the rate table */ + if (unlikely(band == IEEE80211_BAND_5GHZ)) { + int i; + u32 mask; + + /* Find the previous rate that is in the rate mask */ + i = index - 1; + for (mask = (1 << i); i >= 0; i--, mask >>= 1) { + if (rate_mask & mask) { + low = i; + break; + } + } + + /* Find the next rate that is in the rate mask */ + i = index + 1; + for (mask = (1 << i); i < IWL_RATE_COUNT_3945; + i++, mask <<= 1) { + if (rate_mask & mask) { + high = i; + break; + } + } + + return (high << 8) | low; + } + + low = index; + while (low != IWL_RATE_INVALID) { + if (rs_sta->tgg) + low = iwl3945_rates[low].prev_rs_tgg; + else + low = iwl3945_rates[low].prev_rs; + if (low == IWL_RATE_INVALID) + break; + if (rate_mask & (1 << low)) + break; + IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low); + } + + high = index; + while (high != IWL_RATE_INVALID) { + if (rs_sta->tgg) + high = iwl3945_rates[high].next_rs_tgg; + else + high = iwl3945_rates[high].next_rs; + if (high == IWL_RATE_INVALID) + break; + if (rate_mask & (1 << high)) + break; + IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high); + } + + return (high << 8) | low; +} + +/** + * iwl3945_rs_get_rate - find the rate for the requested packet + * + * Returns the ieee80211_rate structure allocated by the driver. + * + * The rate control algorithm has no internal mapping between hw_mode's + * rate ordering and the rate ordering used by the rate control algorithm. + * + * The rate control algorithm uses a single table of rates that goes across + * the entire A/B/G spectrum vs. being limited to just one particular + * hw_mode. + * + * As such, we can't convert the index obtained below into the hw_mode's + * rate table and must reference the driver allocated rate table + * + */ +static void iwl3945_rs_get_rate(void *priv_r, struct ieee80211_sta *sta, + void *priv_sta, struct ieee80211_tx_rate_control *txrc) +{ + struct ieee80211_supported_band *sband = txrc->sband; + struct sk_buff *skb = txrc->skb; + u8 low = IWL_RATE_INVALID; + u8 high = IWL_RATE_INVALID; + u16 high_low; + int index; + struct iwl3945_rs_sta *rs_sta = priv_sta; + struct iwl3945_rate_scale_data *window = NULL; + int current_tpt = IWL_INVALID_VALUE; + int low_tpt = IWL_INVALID_VALUE; + int high_tpt = IWL_INVALID_VALUE; + u32 fail_count; + s8 scale_action = 0; + unsigned long flags; + u16 rate_mask; + s8 max_rate_idx = -1; + struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + IWL_DEBUG_RATE(priv, "enter\n"); + + /* Treat uninitialized rate scaling data same as non-existing. */ + if (rs_sta && !rs_sta->priv) { + IWL_DEBUG_RATE(priv, "Rate scaling information not initialized yet.\n"); + priv_sta = NULL; + } + + if (rate_control_send_low(sta, priv_sta, txrc)) + return; + + rate_mask = sta->supp_rates[sband->band]; + + /* get user max rate if set */ + max_rate_idx = txrc->max_rate_idx; + if ((sband->band == IEEE80211_BAND_5GHZ) && (max_rate_idx != -1)) + max_rate_idx += IWL_FIRST_OFDM_RATE; + if ((max_rate_idx < 0) || (max_rate_idx >= IWL_RATE_COUNT)) + max_rate_idx = -1; + + index = min(rs_sta->last_txrate_idx & 0xffff, IWL_RATE_COUNT_3945 - 1); + + if (sband->band == IEEE80211_BAND_5GHZ) + rate_mask = rate_mask << IWL_FIRST_OFDM_RATE; + + spin_lock_irqsave(&rs_sta->lock, flags); + + /* for recent assoc, choose best rate regarding + * to rssi value + */ + if (rs_sta->start_rate != IWL_RATE_INVALID) { + if (rs_sta->start_rate < index && + (rate_mask & (1 << rs_sta->start_rate))) + index = rs_sta->start_rate; + rs_sta->start_rate = IWL_RATE_INVALID; + } + + /* force user max rate if set by user */ + if ((max_rate_idx != -1) && (max_rate_idx < index)) { + if (rate_mask & (1 << max_rate_idx)) + index = max_rate_idx; + } + + window = &(rs_sta->win[index]); + + fail_count = window->counter - window->success_counter; + + if (((fail_count < IWL_RATE_MIN_FAILURE_TH) && + (window->success_counter < IWL_RATE_MIN_SUCCESS_TH))) { + spin_unlock_irqrestore(&rs_sta->lock, flags); + + IWL_DEBUG_RATE(priv, "Invalid average_tpt on rate %d: " + "counter: %d, success_counter: %d, " + "expected_tpt is %sNULL\n", + index, + window->counter, + window->success_counter, + rs_sta->expected_tpt ? "not " : ""); + + /* Can't calculate this yet; not enough history */ + window->average_tpt = IWL_INVALID_VALUE; + goto out; + + } + + current_tpt = window->average_tpt; + + high_low = iwl3945_get_adjacent_rate(rs_sta, index, rate_mask, + sband->band); + low = high_low & 0xff; + high = (high_low >> 8) & 0xff; + + /* If user set max rate, dont allow higher than user constrain */ + if ((max_rate_idx != -1) && (max_rate_idx < high)) + high = IWL_RATE_INVALID; + + /* Collect Measured throughputs of adjacent rates */ + if (low != IWL_RATE_INVALID) + low_tpt = rs_sta->win[low].average_tpt; + + if (high != IWL_RATE_INVALID) + high_tpt = rs_sta->win[high].average_tpt; + + spin_unlock_irqrestore(&rs_sta->lock, flags); + + scale_action = 0; + + /* Low success ratio , need to drop the rate */ + if ((window->success_ratio < IWL_RATE_DECREASE_TH) || !current_tpt) { + IWL_DEBUG_RATE(priv, "decrease rate because of low success_ratio\n"); + scale_action = -1; + /* No throughput measured yet for adjacent rates, + * try increase */ + } else if ((low_tpt == IWL_INVALID_VALUE) && + (high_tpt == IWL_INVALID_VALUE)) { + + if (high != IWL_RATE_INVALID && window->success_ratio >= IWL_RATE_INCREASE_TH) + scale_action = 1; + else if (low != IWL_RATE_INVALID) + scale_action = 0; + + /* Both adjacent throughputs are measured, but neither one has + * better throughput; we're using the best rate, don't change + * it! */ + } else if ((low_tpt != IWL_INVALID_VALUE) && + (high_tpt != IWL_INVALID_VALUE) && + (low_tpt < current_tpt) && (high_tpt < current_tpt)) { + + IWL_DEBUG_RATE(priv, "No action -- low [%d] & high [%d] < " + "current_tpt [%d]\n", + low_tpt, high_tpt, current_tpt); + scale_action = 0; + + /* At least one of the rates has better throughput */ + } else { + if (high_tpt != IWL_INVALID_VALUE) { + + /* High rate has better throughput, Increase + * rate */ + if (high_tpt > current_tpt && + window->success_ratio >= IWL_RATE_INCREASE_TH) + scale_action = 1; + else { + IWL_DEBUG_RATE(priv, + "decrease rate because of high tpt\n"); + scale_action = 0; + } + } else if (low_tpt != IWL_INVALID_VALUE) { + if (low_tpt > current_tpt) { + IWL_DEBUG_RATE(priv, + "decrease rate because of low tpt\n"); + scale_action = -1; + } else if (window->success_ratio >= IWL_RATE_INCREASE_TH) { + /* Lower rate has better + * throughput,decrease rate */ + scale_action = 1; + } + } + } + + /* Sanity check; asked for decrease, but success rate or throughput + * has been good at old rate. Don't change it. */ + if ((scale_action == -1) && (low != IWL_RATE_INVALID) && + ((window->success_ratio > IWL_RATE_HIGH_TH) || + (current_tpt > (100 * rs_sta->expected_tpt[low])))) + scale_action = 0; + + switch (scale_action) { + case -1: + + /* Decrese rate */ + if (low != IWL_RATE_INVALID) + index = low; + break; + + case 1: + /* Increase rate */ + if (high != IWL_RATE_INVALID) + index = high; + + break; + + case 0: + default: + /* No change */ + break; + } + + IWL_DEBUG_RATE(priv, "Selected %d (action %d) - low %d high %d\n", + index, scale_action, low, high); + + out: + + if (sband->band == IEEE80211_BAND_5GHZ) { + if (WARN_ON_ONCE(index < IWL_FIRST_OFDM_RATE)) + index = IWL_FIRST_OFDM_RATE; + rs_sta->last_txrate_idx = index; + info->control.rates[0].idx = index - IWL_FIRST_OFDM_RATE; + } else { + rs_sta->last_txrate_idx = index; + info->control.rates[0].idx = rs_sta->last_txrate_idx; + } + + IWL_DEBUG_RATE(priv, "leave: %d\n", index); +} + +#ifdef CONFIG_MAC80211_DEBUGFS +static int iwl3945_open_file_generic(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t iwl3945_sta_dbgfs_stats_table_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + char *buff; + int desc = 0; + int j; + ssize_t ret; + struct iwl3945_rs_sta *lq_sta = file->private_data; + + buff = kmalloc(1024, GFP_KERNEL); + if (!buff) + return -ENOMEM; + + desc += sprintf(buff + desc, "tx packets=%d last rate index=%d\n" + "rate=0x%X flush time %d\n", + lq_sta->tx_packets, + lq_sta->last_txrate_idx, + lq_sta->start_rate, jiffies_to_msecs(lq_sta->flush_time)); + for (j = 0; j < IWL_RATE_COUNT_3945; j++) { + desc += sprintf(buff+desc, + "counter=%d success=%d %%=%d\n", + lq_sta->win[j].counter, + lq_sta->win[j].success_counter, + lq_sta->win[j].success_ratio); + } + ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); + kfree(buff); + return ret; +} + +static const struct file_operations rs_sta_dbgfs_stats_table_ops = { + .read = iwl3945_sta_dbgfs_stats_table_read, + .open = iwl3945_open_file_generic, + .llseek = default_llseek, +}; + +static void iwl3945_add_debugfs(void *priv, void *priv_sta, + struct dentry *dir) +{ + struct iwl3945_rs_sta *lq_sta = priv_sta; + + lq_sta->rs_sta_dbgfs_stats_table_file = + debugfs_create_file("rate_stats_table", 0600, dir, + lq_sta, &rs_sta_dbgfs_stats_table_ops); + +} + +static void iwl3945_remove_debugfs(void *priv, void *priv_sta) +{ + struct iwl3945_rs_sta *lq_sta = priv_sta; + debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file); +} +#endif + +/* + * Initialization of rate scaling information is done by driver after + * the station is added. Since mac80211 calls this function before a + * station is added we ignore it. + */ +static void iwl3945_rs_rate_init_stub(void *priv_r, + struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *priv_sta) +{ +} + +static struct rate_control_ops rs_ops = { + .module = NULL, + .name = RS_NAME, + .tx_status = iwl3945_rs_tx_status, + .get_rate = iwl3945_rs_get_rate, + .rate_init = iwl3945_rs_rate_init_stub, + .alloc = iwl3945_rs_alloc, + .free = iwl3945_rs_free, + .alloc_sta = iwl3945_rs_alloc_sta, + .free_sta = iwl3945_rs_free_sta, +#ifdef CONFIG_MAC80211_DEBUGFS + .add_sta_debugfs = iwl3945_add_debugfs, + .remove_sta_debugfs = iwl3945_remove_debugfs, +#endif + +}; +void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id) +{ + struct iwl_priv *priv = hw->priv; + s32 rssi = 0; + unsigned long flags; + struct iwl3945_rs_sta *rs_sta; + struct ieee80211_sta *sta; + struct iwl3945_sta_priv *psta; + + IWL_DEBUG_RATE(priv, "enter\n"); + + rcu_read_lock(); + + sta = ieee80211_find_sta(priv->contexts[IWL_RXON_CTX_BSS].vif, + priv->stations[sta_id].sta.sta.addr); + if (!sta) { + IWL_DEBUG_RATE(priv, "Unable to find station to initialize rate scaling.\n"); + rcu_read_unlock(); + return; + } + + psta = (void *) sta->drv_priv; + rs_sta = &psta->rs_sta; + + spin_lock_irqsave(&rs_sta->lock, flags); + + rs_sta->tgg = 0; + switch (priv->band) { + case IEEE80211_BAND_2GHZ: + /* TODO: this always does G, not a regression */ + if (priv->contexts[IWL_RXON_CTX_BSS].active.flags & + RXON_FLG_TGG_PROTECT_MSK) { + rs_sta->tgg = 1; + rs_sta->expected_tpt = iwl3945_expected_tpt_g_prot; + } else + rs_sta->expected_tpt = iwl3945_expected_tpt_g; + break; + + case IEEE80211_BAND_5GHZ: + rs_sta->expected_tpt = iwl3945_expected_tpt_a; + break; + case IEEE80211_NUM_BANDS: + BUG(); + break; + } + + spin_unlock_irqrestore(&rs_sta->lock, flags); + + rssi = priv->_3945.last_rx_rssi; + if (rssi == 0) + rssi = IWL_MIN_RSSI_VAL; + + IWL_DEBUG_RATE(priv, "Network RSSI: %d\n", rssi); + + rs_sta->start_rate = iwl3945_get_rate_index_by_rssi(rssi, priv->band); + + IWL_DEBUG_RATE(priv, "leave: rssi %d assign rate index: " + "%d (plcp 0x%x)\n", rssi, rs_sta->start_rate, + iwl3945_rates[rs_sta->start_rate].plcp); + rcu_read_unlock(); +} + +int iwl3945_rate_control_register(void) +{ + return ieee80211_rate_control_register(&rs_ops); +} + +void iwl3945_rate_control_unregister(void) +{ + ieee80211_rate_control_unregister(&rs_ops); +} diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-3945.c b/trunk/drivers/net/wireless/iwlegacy/iwl-3945.c new file mode 100644 index 000000000000..f7c0a7438476 --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-3945.c @@ -0,0 +1,2741 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "iwl-fh.h" +#include "iwl-3945-fh.h" +#include "iwl-commands.h" +#include "iwl-sta.h" +#include "iwl-3945.h" +#include "iwl-eeprom.h" +#include "iwl-core.h" +#include "iwl-helpers.h" +#include "iwl-led.h" +#include "iwl-3945-led.h" +#include "iwl-3945-debugfs.h" + +#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \ + [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ + IWL_RATE_##r##M_IEEE, \ + IWL_RATE_##ip##M_INDEX, \ + IWL_RATE_##in##M_INDEX, \ + IWL_RATE_##rp##M_INDEX, \ + IWL_RATE_##rn##M_INDEX, \ + IWL_RATE_##pp##M_INDEX, \ + IWL_RATE_##np##M_INDEX, \ + IWL_RATE_##r##M_INDEX_TABLE, \ + IWL_RATE_##ip##M_INDEX_TABLE } + +/* + * Parameter order: + * rate, prev rate, next rate, prev tgg rate, next tgg rate + * + * If there isn't a valid next or previous rate then INV is used which + * maps to IWL_RATE_INVALID + * + */ +const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945] = { + IWL_DECLARE_RATE_INFO(1, INV, 2, INV, 2, INV, 2), /* 1mbps */ + IWL_DECLARE_RATE_INFO(2, 1, 5, 1, 5, 1, 5), /* 2mbps */ + IWL_DECLARE_RATE_INFO(5, 2, 6, 2, 11, 2, 11), /*5.5mbps */ + IWL_DECLARE_RATE_INFO(11, 9, 12, 5, 12, 5, 18), /* 11mbps */ + IWL_DECLARE_RATE_INFO(6, 5, 9, 5, 11, 5, 11), /* 6mbps */ + IWL_DECLARE_RATE_INFO(9, 6, 11, 5, 11, 5, 11), /* 9mbps */ + IWL_DECLARE_RATE_INFO(12, 11, 18, 11, 18, 11, 18), /* 12mbps */ + IWL_DECLARE_RATE_INFO(18, 12, 24, 12, 24, 11, 24), /* 18mbps */ + IWL_DECLARE_RATE_INFO(24, 18, 36, 18, 36, 18, 36), /* 24mbps */ + IWL_DECLARE_RATE_INFO(36, 24, 48, 24, 48, 24, 48), /* 36mbps */ + IWL_DECLARE_RATE_INFO(48, 36, 54, 36, 54, 36, 54), /* 48mbps */ + IWL_DECLARE_RATE_INFO(54, 48, INV, 48, INV, 48, INV),/* 54mbps */ +}; + +static inline u8 iwl3945_get_prev_ieee_rate(u8 rate_index) +{ + u8 rate = iwl3945_rates[rate_index].prev_ieee; + + if (rate == IWL_RATE_INVALID) + rate = rate_index; + return rate; +} + +/* 1 = enable the iwl3945_disable_events() function */ +#define IWL_EVT_DISABLE (0) +#define IWL_EVT_DISABLE_SIZE (1532/32) + +/** + * iwl3945_disable_events - Disable selected events in uCode event log + * + * Disable an event by writing "1"s into "disable" + * bitmap in SRAM. Bit position corresponds to Event # (id/type). + * Default values of 0 enable uCode events to be logged. + * Use for only special debugging. This function is just a placeholder as-is, + * you'll need to provide the special bits! ... + * ... and set IWL_EVT_DISABLE to 1. */ +void iwl3945_disable_events(struct iwl_priv *priv) +{ + int i; + u32 base; /* SRAM address of event log header */ + u32 disable_ptr; /* SRAM address of event-disable bitmap array */ + u32 array_size; /* # of u32 entries in array */ + static const u32 evt_disable[IWL_EVT_DISABLE_SIZE] = { + 0x00000000, /* 31 - 0 Event id numbers */ + 0x00000000, /* 63 - 32 */ + 0x00000000, /* 95 - 64 */ + 0x00000000, /* 127 - 96 */ + 0x00000000, /* 159 - 128 */ + 0x00000000, /* 191 - 160 */ + 0x00000000, /* 223 - 192 */ + 0x00000000, /* 255 - 224 */ + 0x00000000, /* 287 - 256 */ + 0x00000000, /* 319 - 288 */ + 0x00000000, /* 351 - 320 */ + 0x00000000, /* 383 - 352 */ + 0x00000000, /* 415 - 384 */ + 0x00000000, /* 447 - 416 */ + 0x00000000, /* 479 - 448 */ + 0x00000000, /* 511 - 480 */ + 0x00000000, /* 543 - 512 */ + 0x00000000, /* 575 - 544 */ + 0x00000000, /* 607 - 576 */ + 0x00000000, /* 639 - 608 */ + 0x00000000, /* 671 - 640 */ + 0x00000000, /* 703 - 672 */ + 0x00000000, /* 735 - 704 */ + 0x00000000, /* 767 - 736 */ + 0x00000000, /* 799 - 768 */ + 0x00000000, /* 831 - 800 */ + 0x00000000, /* 863 - 832 */ + 0x00000000, /* 895 - 864 */ + 0x00000000, /* 927 - 896 */ + 0x00000000, /* 959 - 928 */ + 0x00000000, /* 991 - 960 */ + 0x00000000, /* 1023 - 992 */ + 0x00000000, /* 1055 - 1024 */ + 0x00000000, /* 1087 - 1056 */ + 0x00000000, /* 1119 - 1088 */ + 0x00000000, /* 1151 - 1120 */ + 0x00000000, /* 1183 - 1152 */ + 0x00000000, /* 1215 - 1184 */ + 0x00000000, /* 1247 - 1216 */ + 0x00000000, /* 1279 - 1248 */ + 0x00000000, /* 1311 - 1280 */ + 0x00000000, /* 1343 - 1312 */ + 0x00000000, /* 1375 - 1344 */ + 0x00000000, /* 1407 - 1376 */ + 0x00000000, /* 1439 - 1408 */ + 0x00000000, /* 1471 - 1440 */ + 0x00000000, /* 1503 - 1472 */ + }; + + base = le32_to_cpu(priv->card_alive.log_event_table_ptr); + if (!iwl3945_hw_valid_rtc_data_addr(base)) { + IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base); + return; + } + + disable_ptr = iwl_legacy_read_targ_mem(priv, base + (4 * sizeof(u32))); + array_size = iwl_legacy_read_targ_mem(priv, base + (5 * sizeof(u32))); + + if (IWL_EVT_DISABLE && (array_size == IWL_EVT_DISABLE_SIZE)) { + IWL_DEBUG_INFO(priv, "Disabling selected uCode log events at 0x%x\n", + disable_ptr); + for (i = 0; i < IWL_EVT_DISABLE_SIZE; i++) + iwl_legacy_write_targ_mem(priv, + disable_ptr + (i * sizeof(u32)), + evt_disable[i]); + + } else { + IWL_DEBUG_INFO(priv, "Selected uCode log events may be disabled\n"); + IWL_DEBUG_INFO(priv, " by writing \"1\"s into disable bitmap\n"); + IWL_DEBUG_INFO(priv, " in SRAM at 0x%x, size %d u32s\n", + disable_ptr, array_size); + } + +} + +static int iwl3945_hwrate_to_plcp_idx(u8 plcp) +{ + int idx; + + for (idx = 0; idx < IWL_RATE_COUNT_3945; idx++) + if (iwl3945_rates[idx].plcp == plcp) + return idx; + return -1; +} + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x + +static const char *iwl3945_get_tx_fail_reason(u32 status) +{ + switch (status & TX_STATUS_MSK) { + case TX_3945_STATUS_SUCCESS: + return "SUCCESS"; + TX_STATUS_ENTRY(SHORT_LIMIT); + TX_STATUS_ENTRY(LONG_LIMIT); + TX_STATUS_ENTRY(FIFO_UNDERRUN); + TX_STATUS_ENTRY(MGMNT_ABORT); + TX_STATUS_ENTRY(NEXT_FRAG); + TX_STATUS_ENTRY(LIFE_EXPIRE); + TX_STATUS_ENTRY(DEST_PS); + TX_STATUS_ENTRY(ABORTED); + TX_STATUS_ENTRY(BT_RETRY); + TX_STATUS_ENTRY(STA_INVALID); + TX_STATUS_ENTRY(FRAG_DROPPED); + TX_STATUS_ENTRY(TID_DISABLE); + TX_STATUS_ENTRY(FRAME_FLUSHED); + TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL); + TX_STATUS_ENTRY(TX_LOCKED); + TX_STATUS_ENTRY(NO_BEACON_ON_RADAR); + } + + return "UNKNOWN"; +} +#else +static inline const char *iwl3945_get_tx_fail_reason(u32 status) +{ + return ""; +} +#endif + +/* + * get ieee prev rate from rate scale table. + * for A and B mode we need to overright prev + * value + */ +int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate) +{ + int next_rate = iwl3945_get_prev_ieee_rate(rate); + + switch (priv->band) { + case IEEE80211_BAND_5GHZ: + if (rate == IWL_RATE_12M_INDEX) + next_rate = IWL_RATE_9M_INDEX; + else if (rate == IWL_RATE_6M_INDEX) + next_rate = IWL_RATE_6M_INDEX; + break; + case IEEE80211_BAND_2GHZ: + if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) && + iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) { + if (rate == IWL_RATE_11M_INDEX) + next_rate = IWL_RATE_5M_INDEX; + } + break; + + default: + break; + } + + return next_rate; +} + + +/** + * iwl3945_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd + * + * When FW advances 'R' index, all entries between old and new 'R' index + * need to be reclaimed. As result, some free space forms. If there is + * enough free space (> low mark), wake the stack that feeds us. + */ +static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv, + int txq_id, int index) +{ + struct iwl_tx_queue *txq = &priv->txq[txq_id]; + struct iwl_queue *q = &txq->q; + struct iwl_tx_info *tx_info; + + BUG_ON(txq_id == IWL39_CMD_QUEUE_NUM); + + for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd); + q->read_ptr != index; + q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) { + + tx_info = &txq->txb[txq->q.read_ptr]; + ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb); + tx_info->skb = NULL; + priv->cfg->ops->lib->txq_free_tfd(priv, txq); + } + + if (iwl_legacy_queue_space(q) > q->low_mark && (txq_id >= 0) && + (txq_id != IWL39_CMD_QUEUE_NUM) && + priv->mac80211_registered) + iwl_legacy_wake_queue(priv, txq); +} + +/** + * iwl3945_rx_reply_tx - Handle Tx response + */ +static void iwl3945_rx_reply_tx(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + u16 sequence = le16_to_cpu(pkt->hdr.sequence); + int txq_id = SEQ_TO_QUEUE(sequence); + int index = SEQ_TO_INDEX(sequence); + struct iwl_tx_queue *txq = &priv->txq[txq_id]; + struct ieee80211_tx_info *info; + struct iwl3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; + u32 status = le32_to_cpu(tx_resp->status); + int rate_idx; + int fail; + + if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) { + IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d " + "is out of range [0-%d] %d %d\n", txq_id, + index, txq->q.n_bd, txq->q.write_ptr, + txq->q.read_ptr); + return; + } + + txq->time_stamp = jiffies; + info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb); + ieee80211_tx_info_clear_status(info); + + /* Fill the MRR chain with some info about on-chip retransmissions */ + rate_idx = iwl3945_hwrate_to_plcp_idx(tx_resp->rate); + if (info->band == IEEE80211_BAND_5GHZ) + rate_idx -= IWL_FIRST_OFDM_RATE; + + fail = tx_resp->failure_frame; + + info->status.rates[0].idx = rate_idx; + info->status.rates[0].count = fail + 1; /* add final attempt */ + + /* tx_status->rts_retry_count = tx_resp->failure_rts; */ + info->flags |= ((status & TX_STATUS_MSK) == TX_STATUS_SUCCESS) ? + IEEE80211_TX_STAT_ACK : 0; + + IWL_DEBUG_TX(priv, "Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n", + txq_id, iwl3945_get_tx_fail_reason(status), status, + tx_resp->rate, tx_resp->failure_frame); + + IWL_DEBUG_TX_REPLY(priv, "Tx queue reclaim %d\n", index); + iwl3945_tx_queue_reclaim(priv, txq_id, index); + + if (status & TX_ABORT_REQUIRED_MSK) + IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n"); +} + + + +/***************************************************************************** + * + * Intel PRO/Wireless 3945ABG/BG Network Connection + * + * RX handler implementations + * + *****************************************************************************/ +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS +static void iwl3945_accumulative_statistics(struct iwl_priv *priv, + __le32 *stats) +{ + int i; + __le32 *prev_stats; + u32 *accum_stats; + u32 *delta, *max_delta; + + prev_stats = (__le32 *)&priv->_3945.statistics; + accum_stats = (u32 *)&priv->_3945.accum_statistics; + delta = (u32 *)&priv->_3945.delta_statistics; + max_delta = (u32 *)&priv->_3945.max_delta; + + for (i = sizeof(__le32); i < sizeof(struct iwl3945_notif_statistics); + i += sizeof(__le32), stats++, prev_stats++, delta++, + max_delta++, accum_stats++) { + if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) { + *delta = (le32_to_cpu(*stats) - + le32_to_cpu(*prev_stats)); + *accum_stats += *delta; + if (*delta > *max_delta) + *max_delta = *delta; + } + } + + /* reset accumulative statistics for "no-counter" type statistics */ + priv->_3945.accum_statistics.general.temperature = + priv->_3945.statistics.general.temperature; + priv->_3945.accum_statistics.general.ttl_timestamp = + priv->_3945.statistics.general.ttl_timestamp; +} +#endif + +void iwl3945_hw_rx_statistics(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + + IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n", + (int)sizeof(struct iwl3945_notif_statistics), + le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK); +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS + iwl3945_accumulative_statistics(priv, (__le32 *)&pkt->u.raw); +#endif + + memcpy(&priv->_3945.statistics, pkt->u.raw, sizeof(priv->_3945.statistics)); +} + +void iwl3945_reply_statistics(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + __le32 *flag = (__le32 *)&pkt->u.raw; + + if (le32_to_cpu(*flag) & UCODE_STATISTICS_CLEAR_MSK) { +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS + memset(&priv->_3945.accum_statistics, 0, + sizeof(struct iwl3945_notif_statistics)); + memset(&priv->_3945.delta_statistics, 0, + sizeof(struct iwl3945_notif_statistics)); + memset(&priv->_3945.max_delta, 0, + sizeof(struct iwl3945_notif_statistics)); +#endif + IWL_DEBUG_RX(priv, "Statistics have been cleared\n"); + } + iwl3945_hw_rx_statistics(priv, rxb); +} + + +/****************************************************************************** + * + * Misc. internal state and helper functions + * + ******************************************************************************/ + +/* This is necessary only for a number of statistics, see the caller. */ +static int iwl3945_is_network_packet(struct iwl_priv *priv, + struct ieee80211_hdr *header) +{ + /* Filter incoming packets to determine if they are targeted toward + * this network, discarding packets coming from ourselves */ + switch (priv->iw_mode) { + case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */ + /* packets to our IBSS update information */ + return !compare_ether_addr(header->addr3, priv->bssid); + case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */ + /* packets to our IBSS update information */ + return !compare_ether_addr(header->addr2, priv->bssid); + default: + return 1; + } +} + +static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb, + struct ieee80211_rx_status *stats) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IWL_RX_DATA(pkt); + struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt); + struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt); + u16 len = le16_to_cpu(rx_hdr->len); + struct sk_buff *skb; + __le16 fc = hdr->frame_control; + + /* We received data from the HW, so stop the watchdog */ + if (unlikely(len + IWL39_RX_FRAME_SIZE > + PAGE_SIZE << priv->hw_params.rx_page_order)) { + IWL_DEBUG_DROP(priv, "Corruption detected!\n"); + return; + } + + /* We only process data packets if the interface is open */ + if (unlikely(!priv->is_open)) { + IWL_DEBUG_DROP_LIMIT(priv, + "Dropping packet while interface is not open.\n"); + return; + } + + skb = dev_alloc_skb(128); + if (!skb) { + IWL_ERR(priv, "dev_alloc_skb failed\n"); + return; + } + + if (!iwl3945_mod_params.sw_crypto) + iwl_legacy_set_decrypted_flag(priv, + (struct ieee80211_hdr *)rxb_addr(rxb), + le32_to_cpu(rx_end->status), stats); + + skb_add_rx_frag(skb, 0, rxb->page, + (void *)rx_hdr->payload - (void *)pkt, len); + + iwl_legacy_update_stats(priv, false, fc, len); + memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); + + ieee80211_rx(priv->hw, skb); + priv->alloc_rxb_page--; + rxb->page = NULL; +} + +#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6) + +static void iwl3945_rx_reply_rx(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct ieee80211_hdr *header; + struct ieee80211_rx_status rx_status; + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt); + struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt); + struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt); + u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg); + u16 rx_stats_noise_diff __maybe_unused = le16_to_cpu(rx_stats->noise_diff); + u8 network_packet; + + rx_status.flag = 0; + rx_status.mactime = le64_to_cpu(rx_end->timestamp); + rx_status.band = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? + IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; + rx_status.freq = + ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel), + rx_status.band); + + rx_status.rate_idx = iwl3945_hwrate_to_plcp_idx(rx_hdr->rate); + if (rx_status.band == IEEE80211_BAND_5GHZ) + rx_status.rate_idx -= IWL_FIRST_OFDM_RATE; + + rx_status.antenna = (le16_to_cpu(rx_hdr->phy_flags) & + RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4; + + /* set the preamble flag if appropriate */ + if (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) + rx_status.flag |= RX_FLAG_SHORTPRE; + + if ((unlikely(rx_stats->phy_count > 20))) { + IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n", + rx_stats->phy_count); + return; + } + + if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR) + || !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) { + IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n", rx_end->status); + return; + } + + + + /* Convert 3945's rssi indicator to dBm */ + rx_status.signal = rx_stats->rssi - IWL39_RSSI_OFFSET; + + IWL_DEBUG_STATS(priv, "Rssi %d sig_avg %d noise_diff %d\n", + rx_status.signal, rx_stats_sig_avg, + rx_stats_noise_diff); + + header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt); + + network_packet = iwl3945_is_network_packet(priv, header); + + IWL_DEBUG_STATS_LIMIT(priv, "[%c] %d RSSI:%d Signal:%u, Rate:%u\n", + network_packet ? '*' : ' ', + le16_to_cpu(rx_hdr->channel), + rx_status.signal, rx_status.signal, + rx_status.rate_idx); + + iwl_legacy_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len), + header); + + if (network_packet) { + priv->_3945.last_beacon_time = + le32_to_cpu(rx_end->beacon_timestamp); + priv->_3945.last_tsf = le64_to_cpu(rx_end->timestamp); + priv->_3945.last_rx_rssi = rx_status.signal; + } + + iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status); +} + +int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + dma_addr_t addr, u16 len, u8 reset, u8 pad) +{ + int count; + struct iwl_queue *q; + struct iwl3945_tfd *tfd, *tfd_tmp; + + q = &txq->q; + tfd_tmp = (struct iwl3945_tfd *)txq->tfds; + tfd = &tfd_tmp[q->write_ptr]; + + if (reset) + memset(tfd, 0, sizeof(*tfd)); + + count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags)); + + if ((count >= NUM_TFD_CHUNKS) || (count < 0)) { + IWL_ERR(priv, "Error can not send more than %d chunks\n", + NUM_TFD_CHUNKS); + return -EINVAL; + } + + tfd->tbs[count].addr = cpu_to_le32(addr); + tfd->tbs[count].len = cpu_to_le32(len); + + count++; + + tfd->control_flags = cpu_to_le32(TFD_CTL_COUNT_SET(count) | + TFD_CTL_PAD_SET(pad)); + + return 0; +} + +/** + * iwl3945_hw_txq_free_tfd - Free one TFD, those at index [txq->q.read_ptr] + * + * Does NOT advance any indexes + */ +void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) +{ + struct iwl3945_tfd *tfd_tmp = (struct iwl3945_tfd *)txq->tfds; + int index = txq->q.read_ptr; + struct iwl3945_tfd *tfd = &tfd_tmp[index]; + struct pci_dev *dev = priv->pci_dev; + int i; + int counter; + + /* sanity check */ + counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags)); + if (counter > NUM_TFD_CHUNKS) { + IWL_ERR(priv, "Too many chunks: %i\n", counter); + /* @todo issue fatal error, it is quite serious situation */ + return; + } + + /* Unmap tx_cmd */ + if (counter) + pci_unmap_single(dev, + dma_unmap_addr(&txq->meta[index], mapping), + dma_unmap_len(&txq->meta[index], len), + PCI_DMA_TODEVICE); + + /* unmap chunks if any */ + + for (i = 1; i < counter; i++) + pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr), + le32_to_cpu(tfd->tbs[i].len), PCI_DMA_TODEVICE); + + /* free SKB */ + if (txq->txb) { + struct sk_buff *skb; + + skb = txq->txb[txq->q.read_ptr].skb; + + /* can be called from irqs-disabled context */ + if (skb) { + dev_kfree_skb_any(skb); + txq->txb[txq->q.read_ptr].skb = NULL; + } + } +} + +/** + * iwl3945_hw_build_tx_cmd_rate - Add rate portion to TX_CMD: + * +*/ +void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv, + struct iwl_device_cmd *cmd, + struct ieee80211_tx_info *info, + struct ieee80211_hdr *hdr, + int sta_id, int tx_id) +{ + u16 hw_value = ieee80211_get_tx_rate(priv->hw, info)->hw_value; + u16 rate_index = min(hw_value & 0xffff, IWL_RATE_COUNT_3945); + u16 rate_mask; + int rate; + u8 rts_retry_limit; + u8 data_retry_limit; + __le32 tx_flags; + __le16 fc = hdr->frame_control; + struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload; + + rate = iwl3945_rates[rate_index].plcp; + tx_flags = tx_cmd->tx_flags; + + /* We need to figure out how to get the sta->supp_rates while + * in this running context */ + rate_mask = IWL_RATES_MASK_3945; + + /* Set retry limit on DATA packets and Probe Responses*/ + if (ieee80211_is_probe_resp(fc)) + data_retry_limit = 3; + else + data_retry_limit = IWL_DEFAULT_TX_RETRY; + tx_cmd->data_retry_limit = data_retry_limit; + + if (tx_id >= IWL39_CMD_QUEUE_NUM) + rts_retry_limit = 3; + else + rts_retry_limit = 7; + + if (data_retry_limit < rts_retry_limit) + rts_retry_limit = data_retry_limit; + tx_cmd->rts_retry_limit = rts_retry_limit; + + tx_cmd->rate = rate; + tx_cmd->tx_flags = tx_flags; + + /* OFDM */ + tx_cmd->supp_rates[0] = + ((rate_mask & IWL_OFDM_RATES_MASK) >> IWL_FIRST_OFDM_RATE) & 0xFF; + + /* CCK */ + tx_cmd->supp_rates[1] = (rate_mask & 0xF); + + IWL_DEBUG_RATE(priv, "Tx sta id: %d, rate: %d (plcp), flags: 0x%4X " + "cck/ofdm mask: 0x%x/0x%x\n", sta_id, + tx_cmd->rate, le32_to_cpu(tx_cmd->tx_flags), + tx_cmd->supp_rates[1], tx_cmd->supp_rates[0]); +} + +static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate) +{ + unsigned long flags_spin; + struct iwl_station_entry *station; + + if (sta_id == IWL_INVALID_STATION) + return IWL_INVALID_STATION; + + spin_lock_irqsave(&priv->sta_lock, flags_spin); + station = &priv->stations[sta_id]; + + station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK; + station->sta.rate_n_flags = cpu_to_le16(tx_rate); + station->sta.mode = STA_CONTROL_MODIFY_MSK; + iwl_legacy_send_add_sta(priv, &station->sta, CMD_ASYNC); + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + + IWL_DEBUG_RATE(priv, "SCALE sync station %d to rate %d\n", + sta_id, tx_rate); + return sta_id; +} + +static void iwl3945_set_pwr_vmain(struct iwl_priv *priv) +{ +/* + * (for documentation purposes) + * to set power to V_AUX, do + + if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) { + iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_PWR_SRC_VAUX, + ~APMG_PS_CTRL_MSK_PWR_SRC); + + iwl_poll_bit(priv, CSR_GPIO_IN, + CSR_GPIO_IN_VAL_VAUX_PWR_SRC, + CSR_GPIO_IN_BIT_AUX_POWER, 5000); + } + */ + + iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, + ~APMG_PS_CTRL_MSK_PWR_SRC); + + iwl_poll_bit(priv, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC, + CSR_GPIO_IN_BIT_AUX_POWER, 5000); /* uS */ +} + +static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq) +{ + iwl_legacy_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->bd_dma); + iwl_legacy_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0), + rxq->rb_stts_dma); + iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), 0); + iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0), + FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE | + FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE | + FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN | + FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 | + (RX_QUEUE_SIZE_LOG << FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE) | + FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST | + (1 << FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH) | + FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH); + + /* fake read to flush all prev I/O */ + iwl_legacy_read_direct32(priv, FH39_RSSR_CTRL); + + return 0; +} + +static int iwl3945_tx_reset(struct iwl_priv *priv) +{ + + /* bypass mode */ + iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0x2); + + /* RA 0 is active */ + iwl_legacy_write_prph(priv, ALM_SCD_ARASTAT_REG, 0x01); + + /* all 6 fifo are active */ + iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0x3f); + + iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_1_REG, 0x010000); + iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_2_REG, 0x030002); + iwl_legacy_write_prph(priv, ALM_SCD_TXF4MF_REG, 0x000004); + iwl_legacy_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005); + + iwl_legacy_write_direct32(priv, FH39_TSSR_CBB_BASE, + priv->_3945.shared_phys); + + iwl_legacy_write_direct32(priv, FH39_TSSR_MSG_CONFIG, + FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON | + FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON | + FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B | + FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON | + FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON | + FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH | + FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH); + + + return 0; +} + +/** + * iwl3945_txq_ctx_reset - Reset TX queue context + * + * Destroys all DMA structures and initialize them again + */ +static int iwl3945_txq_ctx_reset(struct iwl_priv *priv) +{ + int rc; + int txq_id, slots_num; + + iwl3945_hw_txq_ctx_free(priv); + + /* allocate tx queue structure */ + rc = iwl_legacy_alloc_txq_mem(priv); + if (rc) + return rc; + + /* Tx CMD queue */ + rc = iwl3945_tx_reset(priv); + if (rc) + goto error; + + /* Tx queue(s) */ + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { + slots_num = (txq_id == IWL39_CMD_QUEUE_NUM) ? + TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; + rc = iwl_legacy_tx_queue_init(priv, &priv->txq[txq_id], + slots_num, txq_id); + if (rc) { + IWL_ERR(priv, "Tx %d queue init failed\n", txq_id); + goto error; + } + } + + return rc; + + error: + iwl3945_hw_txq_ctx_free(priv); + return rc; +} + + +/* + * Start up 3945's basic functionality after it has been reset + * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop()) + * NOTE: This does not load uCode nor start the embedded processor + */ +static int iwl3945_apm_init(struct iwl_priv *priv) +{ + int ret = iwl_legacy_apm_init(priv); + + /* Clear APMG (NIC's internal power management) interrupts */ + iwl_legacy_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0); + iwl_legacy_write_prph(priv, APMG_RTC_INT_STT_REG, 0xFFFFFFFF); + + /* Reset radio chip */ + iwl_legacy_set_bits_prph(priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_RESET_REQ); + udelay(5); + iwl_legacy_clear_bits_prph(priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_RESET_REQ); + + return ret; +} + +static void iwl3945_nic_config(struct iwl_priv *priv) +{ + struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; + unsigned long flags; + u8 rev_id = priv->pci_dev->revision; + + spin_lock_irqsave(&priv->lock, flags); + + /* Determine HW type */ + IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id); + + if (rev_id & PCI_CFG_REV_ID_BIT_RTP) + IWL_DEBUG_INFO(priv, "RTP type\n"); + else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) { + IWL_DEBUG_INFO(priv, "3945 RADIO-MB type\n"); + iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR39_HW_IF_CONFIG_REG_BIT_3945_MB); + } else { + IWL_DEBUG_INFO(priv, "3945 RADIO-MM type\n"); + iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR39_HW_IF_CONFIG_REG_BIT_3945_MM); + } + + if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) { + IWL_DEBUG_INFO(priv, "SKU OP mode is mrc\n"); + iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC); + } else + IWL_DEBUG_INFO(priv, "SKU OP mode is basic\n"); + + if ((eeprom->board_revision & 0xF0) == 0xD0) { + IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n", + eeprom->board_revision); + iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE); + } else { + IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n", + eeprom->board_revision); + iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE); + } + + if (eeprom->almgor_m_version <= 1) { + iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A); + IWL_DEBUG_INFO(priv, "Card M type A version is 0x%X\n", + eeprom->almgor_m_version); + } else { + IWL_DEBUG_INFO(priv, "Card M type B version is 0x%X\n", + eeprom->almgor_m_version); + iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B); + } + spin_unlock_irqrestore(&priv->lock, flags); + + if (eeprom->sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE) + IWL_DEBUG_RF_KILL(priv, "SW RF KILL supported in EEPROM.\n"); + + if (eeprom->sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE) + IWL_DEBUG_RF_KILL(priv, "HW RF KILL supported in EEPROM.\n"); +} + +int iwl3945_hw_nic_init(struct iwl_priv *priv) +{ + int rc; + unsigned long flags; + struct iwl_rx_queue *rxq = &priv->rxq; + + spin_lock_irqsave(&priv->lock, flags); + priv->cfg->ops->lib->apm_ops.init(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + iwl3945_set_pwr_vmain(priv); + + priv->cfg->ops->lib->apm_ops.config(priv); + + /* Allocate the RX queue, or reset if it is already allocated */ + if (!rxq->bd) { + rc = iwl_legacy_rx_queue_alloc(priv); + if (rc) { + IWL_ERR(priv, "Unable to initialize Rx queue\n"); + return -ENOMEM; + } + } else + iwl3945_rx_queue_reset(priv, rxq); + + iwl3945_rx_replenish(priv); + + iwl3945_rx_init(priv, rxq); + + + /* Look at using this instead: + rxq->need_update = 1; + iwl_legacy_rx_queue_update_write_ptr(priv, rxq); + */ + + iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), rxq->write & ~7); + + rc = iwl3945_txq_ctx_reset(priv); + if (rc) + return rc; + + set_bit(STATUS_INIT, &priv->status); + + return 0; +} + +/** + * iwl3945_hw_txq_ctx_free - Free TXQ Context + * + * Destroy all TX DMA queues and structures + */ +void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv) +{ + int txq_id; + + /* Tx queues */ + if (priv->txq) + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; + txq_id++) + if (txq_id == IWL39_CMD_QUEUE_NUM) + iwl_legacy_cmd_queue_free(priv); + else + iwl_legacy_tx_queue_free(priv, txq_id); + + /* free tx queue structure */ + iwl_legacy_txq_mem(priv); +} + +void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv) +{ + int txq_id; + + /* stop SCD */ + iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0); + iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0); + + /* reset TFD queues */ + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { + iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 0x0); + iwl_poll_direct_bit(priv, FH39_TSSR_TX_STATUS, + FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id), + 1000); + } + + iwl3945_hw_txq_ctx_free(priv); +} + +/** + * iwl3945_hw_reg_adjust_power_by_temp + * return index delta into power gain settings table +*/ +static int iwl3945_hw_reg_adjust_power_by_temp(int new_reading, int old_reading) +{ + return (new_reading - old_reading) * (-11) / 100; +} + +/** + * iwl3945_hw_reg_temp_out_of_range - Keep temperature in sane range + */ +static inline int iwl3945_hw_reg_temp_out_of_range(int temperature) +{ + return ((temperature < -260) || (temperature > 25)) ? 1 : 0; +} + +int iwl3945_hw_get_temperature(struct iwl_priv *priv) +{ + return iwl_read32(priv, CSR_UCODE_DRV_GP2); +} + +/** + * iwl3945_hw_reg_txpower_get_temperature + * get the current temperature by reading from NIC +*/ +static int iwl3945_hw_reg_txpower_get_temperature(struct iwl_priv *priv) +{ + struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; + int temperature; + + temperature = iwl3945_hw_get_temperature(priv); + + /* driver's okay range is -260 to +25. + * human readable okay range is 0 to +285 */ + IWL_DEBUG_INFO(priv, "Temperature: %d\n", temperature + IWL_TEMP_CONVERT); + + /* handle insane temp reading */ + if (iwl3945_hw_reg_temp_out_of_range(temperature)) { + IWL_ERR(priv, "Error bad temperature value %d\n", temperature); + + /* if really really hot(?), + * substitute the 3rd band/group's temp measured at factory */ + if (priv->last_temperature > 100) + temperature = eeprom->groups[2].temperature; + else /* else use most recent "sane" value from driver */ + temperature = priv->last_temperature; + } + + return temperature; /* raw, not "human readable" */ +} + +/* Adjust Txpower only if temperature variance is greater than threshold. + * + * Both are lower than older versions' 9 degrees */ +#define IWL_TEMPERATURE_LIMIT_TIMER 6 + +/** + * iwl3945_is_temp_calib_needed - determines if new calibration is needed + * + * records new temperature in tx_mgr->temperature. + * replaces tx_mgr->last_temperature *only* if calib needed + * (assumes caller will actually do the calibration!). */ +static int iwl3945_is_temp_calib_needed(struct iwl_priv *priv) +{ + int temp_diff; + + priv->temperature = iwl3945_hw_reg_txpower_get_temperature(priv); + temp_diff = priv->temperature - priv->last_temperature; + + /* get absolute value */ + if (temp_diff < 0) { + IWL_DEBUG_POWER(priv, "Getting cooler, delta %d,\n", temp_diff); + temp_diff = -temp_diff; + } else if (temp_diff == 0) + IWL_DEBUG_POWER(priv, "Same temp,\n"); + else + IWL_DEBUG_POWER(priv, "Getting warmer, delta %d,\n", temp_diff); + + /* if we don't need calibration, *don't* update last_temperature */ + if (temp_diff < IWL_TEMPERATURE_LIMIT_TIMER) { + IWL_DEBUG_POWER(priv, "Timed thermal calib not needed\n"); + return 0; + } + + IWL_DEBUG_POWER(priv, "Timed thermal calib needed\n"); + + /* assume that caller will actually do calib ... + * update the "last temperature" value */ + priv->last_temperature = priv->temperature; + return 1; +} + +#define IWL_MAX_GAIN_ENTRIES 78 +#define IWL_CCK_FROM_OFDM_POWER_DIFF -5 +#define IWL_CCK_FROM_OFDM_INDEX_DIFF (10) + +/* radio and DSP power table, each step is 1/2 dB. + * 1st number is for RF analog gain, 2nd number is for DSP pre-DAC gain. */ +static struct iwl3945_tx_power power_gain_table[2][IWL_MAX_GAIN_ENTRIES] = { + { + {251, 127}, /* 2.4 GHz, highest power */ + {251, 127}, + {251, 127}, + {251, 127}, + {251, 125}, + {251, 110}, + {251, 105}, + {251, 98}, + {187, 125}, + {187, 115}, + {187, 108}, + {187, 99}, + {243, 119}, + {243, 111}, + {243, 105}, + {243, 97}, + {243, 92}, + {211, 106}, + {211, 100}, + {179, 120}, + {179, 113}, + {179, 107}, + {147, 125}, + {147, 119}, + {147, 112}, + {147, 106}, + {147, 101}, + {147, 97}, + {147, 91}, + {115, 107}, + {235, 121}, + {235, 115}, + {235, 109}, + {203, 127}, + {203, 121}, + {203, 115}, + {203, 108}, + {203, 102}, + {203, 96}, + {203, 92}, + {171, 110}, + {171, 104}, + {171, 98}, + {139, 116}, + {227, 125}, + {227, 119}, + {227, 113}, + {227, 107}, + {227, 101}, + {227, 96}, + {195, 113}, + {195, 106}, + {195, 102}, + {195, 95}, + {163, 113}, + {163, 106}, + {163, 102}, + {163, 95}, + {131, 113}, + {131, 106}, + {131, 102}, + {131, 95}, + {99, 113}, + {99, 106}, + {99, 102}, + {99, 95}, + {67, 113}, + {67, 106}, + {67, 102}, + {67, 95}, + {35, 113}, + {35, 106}, + {35, 102}, + {35, 95}, + {3, 113}, + {3, 106}, + {3, 102}, + {3, 95} }, /* 2.4 GHz, lowest power */ + { + {251, 127}, /* 5.x GHz, highest power */ + {251, 120}, + {251, 114}, + {219, 119}, + {219, 101}, + {187, 113}, + {187, 102}, + {155, 114}, + {155, 103}, + {123, 117}, + {123, 107}, + {123, 99}, + {123, 92}, + {91, 108}, + {59, 125}, + {59, 118}, + {59, 109}, + {59, 102}, + {59, 96}, + {59, 90}, + {27, 104}, + {27, 98}, + {27, 92}, + {115, 118}, + {115, 111}, + {115, 104}, + {83, 126}, + {83, 121}, + {83, 113}, + {83, 105}, + {83, 99}, + {51, 118}, + {51, 111}, + {51, 104}, + {51, 98}, + {19, 116}, + {19, 109}, + {19, 102}, + {19, 98}, + {19, 93}, + {171, 113}, + {171, 107}, + {171, 99}, + {139, 120}, + {139, 113}, + {139, 107}, + {139, 99}, + {107, 120}, + {107, 113}, + {107, 107}, + {107, 99}, + {75, 120}, + {75, 113}, + {75, 107}, + {75, 99}, + {43, 120}, + {43, 113}, + {43, 107}, + {43, 99}, + {11, 120}, + {11, 113}, + {11, 107}, + {11, 99}, + {131, 107}, + {131, 99}, + {99, 120}, + {99, 113}, + {99, 107}, + {99, 99}, + {67, 120}, + {67, 113}, + {67, 107}, + {67, 99}, + {35, 120}, + {35, 113}, + {35, 107}, + {35, 99}, + {3, 120} } /* 5.x GHz, lowest power */ +}; + +static inline u8 iwl3945_hw_reg_fix_power_index(int index) +{ + if (index < 0) + return 0; + if (index >= IWL_MAX_GAIN_ENTRIES) + return IWL_MAX_GAIN_ENTRIES - 1; + return (u8) index; +} + +/* Kick off thermal recalibration check every 60 seconds */ +#define REG_RECALIB_PERIOD (60) + +/** + * iwl3945_hw_reg_set_scan_power - Set Tx power for scan probe requests + * + * Set (in our channel info database) the direct scan Tx power for 1 Mbit (CCK) + * or 6 Mbit (OFDM) rates. + */ +static void iwl3945_hw_reg_set_scan_power(struct iwl_priv *priv, u32 scan_tbl_index, + s32 rate_index, const s8 *clip_pwrs, + struct iwl_channel_info *ch_info, + int band_index) +{ + struct iwl3945_scan_power_info *scan_power_info; + s8 power; + u8 power_index; + + scan_power_info = &ch_info->scan_pwr_info[scan_tbl_index]; + + /* use this channel group's 6Mbit clipping/saturation pwr, + * but cap at regulatory scan power restriction (set during init + * based on eeprom channel data) for this channel. */ + power = min(ch_info->scan_power, clip_pwrs[IWL_RATE_6M_INDEX_TABLE]); + + power = min(power, priv->tx_power_user_lmt); + scan_power_info->requested_power = power; + + /* find difference between new scan *power* and current "normal" + * Tx *power* for 6Mb. Use this difference (x2) to adjust the + * current "normal" temperature-compensated Tx power *index* for + * this rate (1Mb or 6Mb) to yield new temp-compensated scan power + * *index*. */ + power_index = ch_info->power_info[rate_index].power_table_index + - (power - ch_info->power_info + [IWL_RATE_6M_INDEX_TABLE].requested_power) * 2; + + /* store reference index that we use when adjusting *all* scan + * powers. So we can accommodate user (all channel) or spectrum + * management (single channel) power changes "between" temperature + * feedback compensation procedures. + * don't force fit this reference index into gain table; it may be a + * negative number. This will help avoid errors when we're at + * the lower bounds (highest gains, for warmest temperatures) + * of the table. */ + + /* don't exceed table bounds for "real" setting */ + power_index = iwl3945_hw_reg_fix_power_index(power_index); + + scan_power_info->power_table_index = power_index; + scan_power_info->tpc.tx_gain = + power_gain_table[band_index][power_index].tx_gain; + scan_power_info->tpc.dsp_atten = + power_gain_table[band_index][power_index].dsp_atten; +} + +/** + * iwl3945_send_tx_power - fill in Tx Power command with gain settings + * + * Configures power settings for all rates for the current channel, + * using values from channel info struct, and send to NIC + */ +static int iwl3945_send_tx_power(struct iwl_priv *priv) +{ + int rate_idx, i; + const struct iwl_channel_info *ch_info = NULL; + struct iwl3945_txpowertable_cmd txpower = { + .channel = priv->contexts[IWL_RXON_CTX_BSS].active.channel, + }; + u16 chan; + + if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status), + "TX Power requested while scanning!\n")) + return -EAGAIN; + + chan = le16_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.channel); + + txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1; + ch_info = iwl_legacy_get_channel_info(priv, priv->band, chan); + if (!ch_info) { + IWL_ERR(priv, + "Failed to get channel info for channel %d [%d]\n", + chan, priv->band); + return -EINVAL; + } + + if (!iwl_legacy_is_channel_valid(ch_info)) { + IWL_DEBUG_POWER(priv, "Not calling TX_PWR_TABLE_CMD on " + "non-Tx channel.\n"); + return 0; + } + + /* fill cmd with power settings for all rates for current channel */ + /* Fill OFDM rate */ + for (rate_idx = IWL_FIRST_OFDM_RATE, i = 0; + rate_idx <= IWL39_LAST_OFDM_RATE; rate_idx++, i++) { + + txpower.power[i].tpc = ch_info->power_info[i].tpc; + txpower.power[i].rate = iwl3945_rates[rate_idx].plcp; + + IWL_DEBUG_POWER(priv, "ch %d:%d rf %d dsp %3d rate code 0x%02x\n", + le16_to_cpu(txpower.channel), + txpower.band, + txpower.power[i].tpc.tx_gain, + txpower.power[i].tpc.dsp_atten, + txpower.power[i].rate); + } + /* Fill CCK rates */ + for (rate_idx = IWL_FIRST_CCK_RATE; + rate_idx <= IWL_LAST_CCK_RATE; rate_idx++, i++) { + txpower.power[i].tpc = ch_info->power_info[i].tpc; + txpower.power[i].rate = iwl3945_rates[rate_idx].plcp; + + IWL_DEBUG_POWER(priv, "ch %d:%d rf %d dsp %3d rate code 0x%02x\n", + le16_to_cpu(txpower.channel), + txpower.band, + txpower.power[i].tpc.tx_gain, + txpower.power[i].tpc.dsp_atten, + txpower.power[i].rate); + } + + return iwl_legacy_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, + sizeof(struct iwl3945_txpowertable_cmd), + &txpower); + +} + +/** + * iwl3945_hw_reg_set_new_power - Configures power tables at new levels + * @ch_info: Channel to update. Uses power_info.requested_power. + * + * Replace requested_power and base_power_index ch_info fields for + * one channel. + * + * Called if user or spectrum management changes power preferences. + * Takes into account h/w and modulation limitations (clip power). + * + * This does *not* send anything to NIC, just sets up ch_info for one channel. + * + * NOTE: reg_compensate_for_temperature_dif() *must* be run after this to + * properly fill out the scan powers, and actual h/w gain settings, + * and send changes to NIC + */ +static int iwl3945_hw_reg_set_new_power(struct iwl_priv *priv, + struct iwl_channel_info *ch_info) +{ + struct iwl3945_channel_power_info *power_info; + int power_changed = 0; + int i; + const s8 *clip_pwrs; + int power; + + /* Get this chnlgrp's rate-to-max/clip-powers table */ + clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers; + + /* Get this channel's rate-to-current-power settings table */ + power_info = ch_info->power_info; + + /* update OFDM Txpower settings */ + for (i = IWL_RATE_6M_INDEX_TABLE; i <= IWL_RATE_54M_INDEX_TABLE; + i++, ++power_info) { + int delta_idx; + + /* limit new power to be no more than h/w capability */ + power = min(ch_info->curr_txpow, clip_pwrs[i]); + if (power == power_info->requested_power) + continue; + + /* find difference between old and new requested powers, + * update base (non-temp-compensated) power index */ + delta_idx = (power - power_info->requested_power) * 2; + power_info->base_power_index -= delta_idx; + + /* save new requested power value */ + power_info->requested_power = power; + + power_changed = 1; + } + + /* update CCK Txpower settings, based on OFDM 12M setting ... + * ... all CCK power settings for a given channel are the *same*. */ + if (power_changed) { + power = + ch_info->power_info[IWL_RATE_12M_INDEX_TABLE]. + requested_power + IWL_CCK_FROM_OFDM_POWER_DIFF; + + /* do all CCK rates' iwl3945_channel_power_info structures */ + for (i = IWL_RATE_1M_INDEX_TABLE; i <= IWL_RATE_11M_INDEX_TABLE; i++) { + power_info->requested_power = power; + power_info->base_power_index = + ch_info->power_info[IWL_RATE_12M_INDEX_TABLE]. + base_power_index + IWL_CCK_FROM_OFDM_INDEX_DIFF; + ++power_info; + } + } + + return 0; +} + +/** + * iwl3945_hw_reg_get_ch_txpower_limit - returns new power limit for channel + * + * NOTE: Returned power limit may be less (but not more) than requested, + * based strictly on regulatory (eeprom and spectrum mgt) limitations + * (no consideration for h/w clipping limitations). + */ +static int iwl3945_hw_reg_get_ch_txpower_limit(struct iwl_channel_info *ch_info) +{ + s8 max_power; + +#if 0 + /* if we're using TGd limits, use lower of TGd or EEPROM */ + if (ch_info->tgd_data.max_power != 0) + max_power = min(ch_info->tgd_data.max_power, + ch_info->eeprom.max_power_avg); + + /* else just use EEPROM limits */ + else +#endif + max_power = ch_info->eeprom.max_power_avg; + + return min(max_power, ch_info->max_power_avg); +} + +/** + * iwl3945_hw_reg_comp_txpower_temp - Compensate for temperature + * + * Compensate txpower settings of *all* channels for temperature. + * This only accounts for the difference between current temperature + * and the factory calibration temperatures, and bases the new settings + * on the channel's base_power_index. + * + * If RxOn is "associated", this sends the new Txpower to NIC! + */ +static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv) +{ + struct iwl_channel_info *ch_info = NULL; + struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; + int delta_index; + const s8 *clip_pwrs; /* array of h/w max power levels for each rate */ + u8 a_band; + u8 rate_index; + u8 scan_tbl_index; + u8 i; + int ref_temp; + int temperature = priv->temperature; + + if (priv->disable_tx_power_cal || + test_bit(STATUS_SCANNING, &priv->status)) { + /* do not perform tx power calibration */ + return 0; + } + /* set up new Tx power info for each and every channel, 2.4 and 5.x */ + for (i = 0; i < priv->channel_count; i++) { + ch_info = &priv->channel_info[i]; + a_band = iwl_legacy_is_channel_a_band(ch_info); + + /* Get this chnlgrp's factory calibration temperature */ + ref_temp = (s16)eeprom->groups[ch_info->group_index]. + temperature; + + /* get power index adjustment based on current and factory + * temps */ + delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature, + ref_temp); + + /* set tx power value for all rates, OFDM and CCK */ + for (rate_index = 0; rate_index < IWL_RATE_COUNT_3945; + rate_index++) { + int power_idx = + ch_info->power_info[rate_index].base_power_index; + + /* temperature compensate */ + power_idx += delta_index; + + /* stay within table range */ + power_idx = iwl3945_hw_reg_fix_power_index(power_idx); + ch_info->power_info[rate_index]. + power_table_index = (u8) power_idx; + ch_info->power_info[rate_index].tpc = + power_gain_table[a_band][power_idx]; + } + + /* Get this chnlgrp's rate-to-max/clip-powers table */ + clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers; + + /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */ + for (scan_tbl_index = 0; + scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) { + s32 actual_index = (scan_tbl_index == 0) ? + IWL_RATE_1M_INDEX_TABLE : IWL_RATE_6M_INDEX_TABLE; + iwl3945_hw_reg_set_scan_power(priv, scan_tbl_index, + actual_index, clip_pwrs, + ch_info, a_band); + } + } + + /* send Txpower command for current channel to ucode */ + return priv->cfg->ops->lib->send_tx_power(priv); +} + +int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power) +{ + struct iwl_channel_info *ch_info; + s8 max_power; + u8 a_band; + u8 i; + + if (priv->tx_power_user_lmt == power) { + IWL_DEBUG_POWER(priv, "Requested Tx power same as current " + "limit: %ddBm.\n", power); + return 0; + } + + IWL_DEBUG_POWER(priv, "Setting upper limit clamp to %ddBm.\n", power); + priv->tx_power_user_lmt = power; + + /* set up new Tx powers for each and every channel, 2.4 and 5.x */ + + for (i = 0; i < priv->channel_count; i++) { + ch_info = &priv->channel_info[i]; + a_band = iwl_legacy_is_channel_a_band(ch_info); + + /* find minimum power of all user and regulatory constraints + * (does not consider h/w clipping limitations) */ + max_power = iwl3945_hw_reg_get_ch_txpower_limit(ch_info); + max_power = min(power, max_power); + if (max_power != ch_info->curr_txpow) { + ch_info->curr_txpow = max_power; + + /* this considers the h/w clipping limitations */ + iwl3945_hw_reg_set_new_power(priv, ch_info); + } + } + + /* update txpower settings for all channels, + * send to NIC if associated. */ + iwl3945_is_temp_calib_needed(priv); + iwl3945_hw_reg_comp_txpower_temp(priv); + + return 0; +} + +static int iwl3945_send_rxon_assoc(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + int rc = 0; + struct iwl_rx_packet *pkt; + struct iwl3945_rxon_assoc_cmd rxon_assoc; + struct iwl_host_cmd cmd = { + .id = REPLY_RXON_ASSOC, + .len = sizeof(rxon_assoc), + .flags = CMD_WANT_SKB, + .data = &rxon_assoc, + }; + const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging; + const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active; + + if ((rxon1->flags == rxon2->flags) && + (rxon1->filter_flags == rxon2->filter_flags) && + (rxon1->cck_basic_rates == rxon2->cck_basic_rates) && + (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) { + IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n"); + return 0; + } + + rxon_assoc.flags = ctx->staging.flags; + rxon_assoc.filter_flags = ctx->staging.filter_flags; + rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates; + rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates; + rxon_assoc.reserved = 0; + + rc = iwl_legacy_send_cmd_sync(priv, &cmd); + if (rc) + return rc; + + pkt = (struct iwl_rx_packet *)cmd.reply_page; + if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { + IWL_ERR(priv, "Bad return from REPLY_RXON_ASSOC command\n"); + rc = -EIO; + } + + iwl_legacy_free_pages(priv, cmd.reply_page); + + return rc; +} + +/** + * iwl3945_commit_rxon - commit staging_rxon to hardware + * + * The RXON command in staging_rxon is committed to the hardware and + * the active_rxon structure is updated with the new data. This + * function correctly transitions out of the RXON_ASSOC_MSK state if + * a HW tune is required based on the RXON structure changes. + */ +int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) +{ + /* cast away the const for active_rxon in this function */ + struct iwl3945_rxon_cmd *active_rxon = (void *)&ctx->active; + struct iwl3945_rxon_cmd *staging_rxon = (void *)&ctx->staging; + int rc = 0; + bool new_assoc = !!(staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return -EINVAL; + + if (!iwl_legacy_is_alive(priv)) + return -1; + + /* always get timestamp with Rx frame */ + staging_rxon->flags |= RXON_FLG_TSF2HOST_MSK; + + /* select antenna */ + staging_rxon->flags &= + ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK); + staging_rxon->flags |= iwl3945_get_antenna_flags(priv); + + rc = iwl_legacy_check_rxon_cmd(priv, ctx); + if (rc) { + IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n"); + return -EINVAL; + } + + /* If we don't need to send a full RXON, we can use + * iwl3945_rxon_assoc_cmd which is used to reconfigure filter + * and other flags for the current radio configuration. */ + if (!iwl_legacy_full_rxon_required(priv, + &priv->contexts[IWL_RXON_CTX_BSS])) { + rc = iwl_legacy_send_rxon_assoc(priv, + &priv->contexts[IWL_RXON_CTX_BSS]); + if (rc) { + IWL_ERR(priv, "Error setting RXON_ASSOC " + "configuration (%d).\n", rc); + return rc; + } + + memcpy(active_rxon, staging_rxon, sizeof(*active_rxon)); + /* + * We do not commit tx power settings while channel changing, + * do it now if tx power changed. + */ + iwl_legacy_set_tx_power(priv, priv->tx_power_next, false); + return 0; + } + + /* If we are currently associated and the new config requires + * an RXON_ASSOC and the new config wants the associated mask enabled, + * we must clear the associated from the active configuration + * before we apply the new config */ + if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) && new_assoc) { + IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n"); + active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; + + /* + * reserved4 and 5 could have been filled by the iwlcore code. + * Let's clear them before pushing to the 3945. + */ + active_rxon->reserved4 = 0; + active_rxon->reserved5 = 0; + rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON, + sizeof(struct iwl3945_rxon_cmd), + &priv->contexts[IWL_RXON_CTX_BSS].active); + + /* If the mask clearing failed then we set + * active_rxon back to what it was previously */ + if (rc) { + active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK; + IWL_ERR(priv, "Error clearing ASSOC_MSK on current " + "configuration (%d).\n", rc); + return rc; + } + iwl_legacy_clear_ucode_stations(priv, + &priv->contexts[IWL_RXON_CTX_BSS]); + iwl_legacy_restore_stations(priv, + &priv->contexts[IWL_RXON_CTX_BSS]); + } + + IWL_DEBUG_INFO(priv, "Sending RXON\n" + "* with%s RXON_FILTER_ASSOC_MSK\n" + "* channel = %d\n" + "* bssid = %pM\n", + (new_assoc ? "" : "out"), + le16_to_cpu(staging_rxon->channel), + staging_rxon->bssid_addr); + + /* + * reserved4 and 5 could have been filled by the iwlcore code. + * Let's clear them before pushing to the 3945. + */ + staging_rxon->reserved4 = 0; + staging_rxon->reserved5 = 0; + + iwl_legacy_set_rxon_hwcrypto(priv, ctx, !iwl3945_mod_params.sw_crypto); + + /* Apply the new configuration */ + rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON, + sizeof(struct iwl3945_rxon_cmd), + staging_rxon); + if (rc) { + IWL_ERR(priv, "Error setting new configuration (%d).\n", rc); + return rc; + } + + memcpy(active_rxon, staging_rxon, sizeof(*active_rxon)); + + if (!new_assoc) { + iwl_legacy_clear_ucode_stations(priv, + &priv->contexts[IWL_RXON_CTX_BSS]); + iwl_legacy_restore_stations(priv, + &priv->contexts[IWL_RXON_CTX_BSS]); + } + + /* If we issue a new RXON command which required a tune then we must + * send a new TXPOWER command or we won't be able to Tx any frames */ + rc = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true); + if (rc) { + IWL_ERR(priv, "Error setting Tx power (%d).\n", rc); + return rc; + } + + /* Init the hardware's rate fallback order based on the band */ + rc = iwl3945_init_hw_rate_table(priv); + if (rc) { + IWL_ERR(priv, "Error setting HW rate table: %02X\n", rc); + return -EIO; + } + + return 0; +} + +/** + * iwl3945_reg_txpower_periodic - called when time to check our temperature. + * + * -- reset periodic timer + * -- see if temp has changed enough to warrant re-calibration ... if so: + * -- correct coeffs for temp (can reset temp timer) + * -- save this temp as "last", + * -- send new set of gain settings to NIC + * NOTE: This should continue working, even when we're not associated, + * so we can keep our internal table of scan powers current. */ +void iwl3945_reg_txpower_periodic(struct iwl_priv *priv) +{ + /* This will kick in the "brute force" + * iwl3945_hw_reg_comp_txpower_temp() below */ + if (!iwl3945_is_temp_calib_needed(priv)) + goto reschedule; + + /* Set up a new set of temp-adjusted TxPowers, send to NIC. + * This is based *only* on current temperature, + * ignoring any previous power measurements */ + iwl3945_hw_reg_comp_txpower_temp(priv); + + reschedule: + queue_delayed_work(priv->workqueue, + &priv->_3945.thermal_periodic, REG_RECALIB_PERIOD * HZ); +} + +static void iwl3945_bg_reg_txpower_periodic(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, + _3945.thermal_periodic.work); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + iwl3945_reg_txpower_periodic(priv); + mutex_unlock(&priv->mutex); +} + +/** + * iwl3945_hw_reg_get_ch_grp_index - find the channel-group index (0-4) + * for the channel. + * + * This function is used when initializing channel-info structs. + * + * NOTE: These channel groups do *NOT* match the bands above! + * These channel groups are based on factory-tested channels; + * on A-band, EEPROM's "group frequency" entries represent the top + * channel in each group 1-4. Group 5 All B/G channels are in group 0. + */ +static u16 iwl3945_hw_reg_get_ch_grp_index(struct iwl_priv *priv, + const struct iwl_channel_info *ch_info) +{ + struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; + struct iwl3945_eeprom_txpower_group *ch_grp = &eeprom->groups[0]; + u8 group; + u16 group_index = 0; /* based on factory calib frequencies */ + u8 grp_channel; + + /* Find the group index for the channel ... don't use index 1(?) */ + if (iwl_legacy_is_channel_a_band(ch_info)) { + for (group = 1; group < 5; group++) { + grp_channel = ch_grp[group].group_channel; + if (ch_info->channel <= grp_channel) { + group_index = group; + break; + } + } + /* group 4 has a few channels *above* its factory cal freq */ + if (group == 5) + group_index = 4; + } else + group_index = 0; /* 2.4 GHz, group 0 */ + + IWL_DEBUG_POWER(priv, "Chnl %d mapped to grp %d\n", ch_info->channel, + group_index); + return group_index; +} + +/** + * iwl3945_hw_reg_get_matched_power_index - Interpolate to get nominal index + * + * Interpolate to get nominal (i.e. at factory calibration temperature) index + * into radio/DSP gain settings table for requested power. + */ +static int iwl3945_hw_reg_get_matched_power_index(struct iwl_priv *priv, + s8 requested_power, + s32 setting_index, s32 *new_index) +{ + const struct iwl3945_eeprom_txpower_group *chnl_grp = NULL; + struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; + s32 index0, index1; + s32 power = 2 * requested_power; + s32 i; + const struct iwl3945_eeprom_txpower_sample *samples; + s32 gains0, gains1; + s32 res; + s32 denominator; + + chnl_grp = &eeprom->groups[setting_index]; + samples = chnl_grp->samples; + for (i = 0; i < 5; i++) { + if (power == samples[i].power) { + *new_index = samples[i].gain_index; + return 0; + } + } + + if (power > samples[1].power) { + index0 = 0; + index1 = 1; + } else if (power > samples[2].power) { + index0 = 1; + index1 = 2; + } else if (power > samples[3].power) { + index0 = 2; + index1 = 3; + } else { + index0 = 3; + index1 = 4; + } + + denominator = (s32) samples[index1].power - (s32) samples[index0].power; + if (denominator == 0) + return -EINVAL; + gains0 = (s32) samples[index0].gain_index * (1 << 19); + gains1 = (s32) samples[index1].gain_index * (1 << 19); + res = gains0 + (gains1 - gains0) * + ((s32) power - (s32) samples[index0].power) / denominator + + (1 << 18); + *new_index = res >> 19; + return 0; +} + +static void iwl3945_hw_reg_init_channel_groups(struct iwl_priv *priv) +{ + u32 i; + s32 rate_index; + struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; + const struct iwl3945_eeprom_txpower_group *group; + + IWL_DEBUG_POWER(priv, "Initializing factory calib info from EEPROM\n"); + + for (i = 0; i < IWL_NUM_TX_CALIB_GROUPS; i++) { + s8 *clip_pwrs; /* table of power levels for each rate */ + s8 satur_pwr; /* saturation power for each chnl group */ + group = &eeprom->groups[i]; + + /* sanity check on factory saturation power value */ + if (group->saturation_power < 40) { + IWL_WARN(priv, "Error: saturation power is %d, " + "less than minimum expected 40\n", + group->saturation_power); + return; + } + + /* + * Derive requested power levels for each rate, based on + * hardware capabilities (saturation power for band). + * Basic value is 3dB down from saturation, with further + * power reductions for highest 3 data rates. These + * backoffs provide headroom for high rate modulation + * power peaks, without too much distortion (clipping). + */ + /* we'll fill in this array with h/w max power levels */ + clip_pwrs = (s8 *) priv->_3945.clip_groups[i].clip_powers; + + /* divide factory saturation power by 2 to find -3dB level */ + satur_pwr = (s8) (group->saturation_power >> 1); + + /* fill in channel group's nominal powers for each rate */ + for (rate_index = 0; + rate_index < IWL_RATE_COUNT_3945; rate_index++, clip_pwrs++) { + switch (rate_index) { + case IWL_RATE_36M_INDEX_TABLE: + if (i == 0) /* B/G */ + *clip_pwrs = satur_pwr; + else /* A */ + *clip_pwrs = satur_pwr - 5; + break; + case IWL_RATE_48M_INDEX_TABLE: + if (i == 0) + *clip_pwrs = satur_pwr - 7; + else + *clip_pwrs = satur_pwr - 10; + break; + case IWL_RATE_54M_INDEX_TABLE: + if (i == 0) + *clip_pwrs = satur_pwr - 9; + else + *clip_pwrs = satur_pwr - 12; + break; + default: + *clip_pwrs = satur_pwr; + break; + } + } + } +} + +/** + * iwl3945_txpower_set_from_eeprom - Set channel power info based on EEPROM + * + * Second pass (during init) to set up priv->channel_info + * + * Set up Tx-power settings in our channel info database for each VALID + * (for this geo/SKU) channel, at all Tx data rates, based on eeprom values + * and current temperature. + * + * Since this is based on current temperature (at init time), these values may + * not be valid for very long, but it gives us a starting/default point, + * and allows us to active (i.e. using Tx) scan. + * + * This does *not* write values to NIC, just sets up our internal table. + */ +int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv) +{ + struct iwl_channel_info *ch_info = NULL; + struct iwl3945_channel_power_info *pwr_info; + struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; + int delta_index; + u8 rate_index; + u8 scan_tbl_index; + const s8 *clip_pwrs; /* array of power levels for each rate */ + u8 gain, dsp_atten; + s8 power; + u8 pwr_index, base_pwr_index, a_band; + u8 i; + int temperature; + + /* save temperature reference, + * so we can determine next time to calibrate */ + temperature = iwl3945_hw_reg_txpower_get_temperature(priv); + priv->last_temperature = temperature; + + iwl3945_hw_reg_init_channel_groups(priv); + + /* initialize Tx power info for each and every channel, 2.4 and 5.x */ + for (i = 0, ch_info = priv->channel_info; i < priv->channel_count; + i++, ch_info++) { + a_band = iwl_legacy_is_channel_a_band(ch_info); + if (!iwl_legacy_is_channel_valid(ch_info)) + continue; + + /* find this channel's channel group (*not* "band") index */ + ch_info->group_index = + iwl3945_hw_reg_get_ch_grp_index(priv, ch_info); + + /* Get this chnlgrp's rate->max/clip-powers table */ + clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers; + + /* calculate power index *adjustment* value according to + * diff between current temperature and factory temperature */ + delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature, + eeprom->groups[ch_info->group_index]. + temperature); + + IWL_DEBUG_POWER(priv, "Delta index for channel %d: %d [%d]\n", + ch_info->channel, delta_index, temperature + + IWL_TEMP_CONVERT); + + /* set tx power value for all OFDM rates */ + for (rate_index = 0; rate_index < IWL_OFDM_RATES; + rate_index++) { + s32 uninitialized_var(power_idx); + int rc; + + /* use channel group's clip-power table, + * but don't exceed channel's max power */ + s8 pwr = min(ch_info->max_power_avg, + clip_pwrs[rate_index]); + + pwr_info = &ch_info->power_info[rate_index]; + + /* get base (i.e. at factory-measured temperature) + * power table index for this rate's power */ + rc = iwl3945_hw_reg_get_matched_power_index(priv, pwr, + ch_info->group_index, + &power_idx); + if (rc) { + IWL_ERR(priv, "Invalid power index\n"); + return rc; + } + pwr_info->base_power_index = (u8) power_idx; + + /* temperature compensate */ + power_idx += delta_index; + + /* stay within range of gain table */ + power_idx = iwl3945_hw_reg_fix_power_index(power_idx); + + /* fill 1 OFDM rate's iwl3945_channel_power_info struct */ + pwr_info->requested_power = pwr; + pwr_info->power_table_index = (u8) power_idx; + pwr_info->tpc.tx_gain = + power_gain_table[a_band][power_idx].tx_gain; + pwr_info->tpc.dsp_atten = + power_gain_table[a_band][power_idx].dsp_atten; + } + + /* set tx power for CCK rates, based on OFDM 12 Mbit settings*/ + pwr_info = &ch_info->power_info[IWL_RATE_12M_INDEX_TABLE]; + power = pwr_info->requested_power + + IWL_CCK_FROM_OFDM_POWER_DIFF; + pwr_index = pwr_info->power_table_index + + IWL_CCK_FROM_OFDM_INDEX_DIFF; + base_pwr_index = pwr_info->base_power_index + + IWL_CCK_FROM_OFDM_INDEX_DIFF; + + /* stay within table range */ + pwr_index = iwl3945_hw_reg_fix_power_index(pwr_index); + gain = power_gain_table[a_band][pwr_index].tx_gain; + dsp_atten = power_gain_table[a_band][pwr_index].dsp_atten; + + /* fill each CCK rate's iwl3945_channel_power_info structure + * NOTE: All CCK-rate Txpwrs are the same for a given chnl! + * NOTE: CCK rates start at end of OFDM rates! */ + for (rate_index = 0; + rate_index < IWL_CCK_RATES; rate_index++) { + pwr_info = &ch_info->power_info[rate_index+IWL_OFDM_RATES]; + pwr_info->requested_power = power; + pwr_info->power_table_index = pwr_index; + pwr_info->base_power_index = base_pwr_index; + pwr_info->tpc.tx_gain = gain; + pwr_info->tpc.dsp_atten = dsp_atten; + } + + /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */ + for (scan_tbl_index = 0; + scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) { + s32 actual_index = (scan_tbl_index == 0) ? + IWL_RATE_1M_INDEX_TABLE : IWL_RATE_6M_INDEX_TABLE; + iwl3945_hw_reg_set_scan_power(priv, scan_tbl_index, + actual_index, clip_pwrs, ch_info, a_band); + } + } + + return 0; +} + +int iwl3945_hw_rxq_stop(struct iwl_priv *priv) +{ + int rc; + + iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0), 0); + rc = iwl_poll_direct_bit(priv, FH39_RSSR_STATUS, + FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); + if (rc < 0) + IWL_ERR(priv, "Can't stop Rx DMA.\n"); + + return 0; +} + +int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq) +{ + int txq_id = txq->q.id; + + struct iwl3945_shared *shared_data = priv->_3945.shared_virt; + + shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr); + + iwl_legacy_write_direct32(priv, FH39_CBCC_CTRL(txq_id), 0); + iwl_legacy_write_direct32(priv, FH39_CBCC_BASE(txq_id), 0); + + iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), + FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT | + FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF | + FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD | + FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL | + FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE); + + /* fake read to flush all prev. writes */ + iwl_read32(priv, FH39_TSSR_CBB_BASE); + + return 0; +} + +/* + * HCMD utils + */ +static u16 iwl3945_get_hcmd_size(u8 cmd_id, u16 len) +{ + switch (cmd_id) { + case REPLY_RXON: + return sizeof(struct iwl3945_rxon_cmd); + case POWER_TABLE_CMD: + return sizeof(struct iwl3945_powertable_cmd); + default: + return len; + } +} + + +static u16 iwl3945_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd, + u8 *data) +{ + struct iwl3945_addsta_cmd *addsta = (struct iwl3945_addsta_cmd *)data; + addsta->mode = cmd->mode; + memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify)); + memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo)); + addsta->station_flags = cmd->station_flags; + addsta->station_flags_msk = cmd->station_flags_msk; + addsta->tid_disable_tx = cpu_to_le16(0); + addsta->rate_n_flags = cmd->rate_n_flags; + addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid; + addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid; + addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn; + + return (u16)sizeof(struct iwl3945_addsta_cmd); +} + +static int iwl3945_add_bssid_station(struct iwl_priv *priv, + const u8 *addr, u8 *sta_id_r) +{ + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + int ret; + u8 sta_id; + unsigned long flags; + + if (sta_id_r) + *sta_id_r = IWL_INVALID_STATION; + + ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id); + if (ret) { + IWL_ERR(priv, "Unable to add station %pM\n", addr); + return ret; + } + + if (sta_id_r) + *sta_id_r = sta_id; + + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].used |= IWL_STA_LOCAL; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return 0; +} +static int iwl3945_manage_ibss_station(struct iwl_priv *priv, + struct ieee80211_vif *vif, bool add) +{ + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + int ret; + + if (add) { + ret = iwl3945_add_bssid_station(priv, vif->bss_conf.bssid, + &vif_priv->ibss_bssid_sta_id); + if (ret) + return ret; + + iwl3945_sync_sta(priv, vif_priv->ibss_bssid_sta_id, + (priv->band == IEEE80211_BAND_5GHZ) ? + IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP); + iwl3945_rate_scale_init(priv->hw, vif_priv->ibss_bssid_sta_id); + + return 0; + } + + return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id, + vif->bss_conf.bssid); +} + +/** + * iwl3945_init_hw_rate_table - Initialize the hardware rate fallback table + */ +int iwl3945_init_hw_rate_table(struct iwl_priv *priv) +{ + int rc, i, index, prev_index; + struct iwl3945_rate_scaling_cmd rate_cmd = { + .reserved = {0, 0, 0}, + }; + struct iwl3945_rate_scaling_info *table = rate_cmd.table; + + for (i = 0; i < ARRAY_SIZE(iwl3945_rates); i++) { + index = iwl3945_rates[i].table_rs_index; + + table[index].rate_n_flags = + iwl3945_hw_set_rate_n_flags(iwl3945_rates[i].plcp, 0); + table[index].try_cnt = priv->retry_rate; + prev_index = iwl3945_get_prev_ieee_rate(i); + table[index].next_rate_index = + iwl3945_rates[prev_index].table_rs_index; + } + + switch (priv->band) { + case IEEE80211_BAND_5GHZ: + IWL_DEBUG_RATE(priv, "Select A mode rate scale\n"); + /* If one of the following CCK rates is used, + * have it fall back to the 6M OFDM rate */ + for (i = IWL_RATE_1M_INDEX_TABLE; + i <= IWL_RATE_11M_INDEX_TABLE; i++) + table[i].next_rate_index = + iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index; + + /* Don't fall back to CCK rates */ + table[IWL_RATE_12M_INDEX_TABLE].next_rate_index = + IWL_RATE_9M_INDEX_TABLE; + + /* Don't drop out of OFDM rates */ + table[IWL_RATE_6M_INDEX_TABLE].next_rate_index = + iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index; + break; + + case IEEE80211_BAND_2GHZ: + IWL_DEBUG_RATE(priv, "Select B/G mode rate scale\n"); + /* If an OFDM rate is used, have it fall back to the + * 1M CCK rates */ + + if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) && + iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) { + + index = IWL_FIRST_CCK_RATE; + for (i = IWL_RATE_6M_INDEX_TABLE; + i <= IWL_RATE_54M_INDEX_TABLE; i++) + table[i].next_rate_index = + iwl3945_rates[index].table_rs_index; + + index = IWL_RATE_11M_INDEX_TABLE; + /* CCK shouldn't fall back to OFDM... */ + table[index].next_rate_index = IWL_RATE_5M_INDEX_TABLE; + } + break; + + default: + WARN_ON(1); + break; + } + + /* Update the rate scaling for control frame Tx */ + rate_cmd.table_id = 0; + rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd), + &rate_cmd); + if (rc) + return rc; + + /* Update the rate scaling for data frame Tx */ + rate_cmd.table_id = 1; + return iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd), + &rate_cmd); +} + +/* Called when initializing driver */ +int iwl3945_hw_set_hw_params(struct iwl_priv *priv) +{ + memset((void *)&priv->hw_params, 0, + sizeof(struct iwl_hw_params)); + + priv->_3945.shared_virt = + dma_alloc_coherent(&priv->pci_dev->dev, + sizeof(struct iwl3945_shared), + &priv->_3945.shared_phys, GFP_KERNEL); + if (!priv->_3945.shared_virt) { + IWL_ERR(priv, "failed to allocate pci memory\n"); + return -ENOMEM; + } + + /* Assign number of Usable TX queues */ + priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues; + + priv->hw_params.tfd_size = sizeof(struct iwl3945_tfd); + priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_3K); + priv->hw_params.max_rxq_size = RX_QUEUE_SIZE; + priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG; + priv->hw_params.max_stations = IWL3945_STATION_COUNT; + priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL3945_BROADCAST_ID; + + priv->sta_key_max_num = STA_KEY_MAX_NUM; + + priv->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR; + priv->hw_params.max_beacon_itrvl = IWL39_MAX_UCODE_BEACON_INTERVAL; + priv->hw_params.beacon_time_tsf_bits = IWL3945_EXT_BEACON_TIME_POS; + + return 0; +} + +unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv, + struct iwl3945_frame *frame, u8 rate) +{ + struct iwl3945_tx_beacon_cmd *tx_beacon_cmd; + unsigned int frame_size; + + tx_beacon_cmd = (struct iwl3945_tx_beacon_cmd *)&frame->u; + memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd)); + + tx_beacon_cmd->tx.sta_id = + priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id; + tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + + frame_size = iwl3945_fill_beacon_frame(priv, + tx_beacon_cmd->frame, + sizeof(frame->u) - sizeof(*tx_beacon_cmd)); + + BUG_ON(frame_size > MAX_MPDU_SIZE); + tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size); + + tx_beacon_cmd->tx.rate = rate; + tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK | + TX_CMD_FLG_TSF_MSK); + + /* supp_rates[0] == OFDM start at IWL_FIRST_OFDM_RATE*/ + tx_beacon_cmd->tx.supp_rates[0] = + (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; + + tx_beacon_cmd->tx.supp_rates[1] = + (IWL_CCK_BASIC_RATES_MASK & 0xF); + + return sizeof(struct iwl3945_tx_beacon_cmd) + frame_size; +} + +void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv) +{ + priv->rx_handlers[REPLY_TX] = iwl3945_rx_reply_tx; + priv->rx_handlers[REPLY_3945_RX] = iwl3945_rx_reply_rx; +} + +void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv) +{ + INIT_DELAYED_WORK(&priv->_3945.thermal_periodic, + iwl3945_bg_reg_txpower_periodic); +} + +void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv) +{ + cancel_delayed_work(&priv->_3945.thermal_periodic); +} + +/* check contents of special bootstrap uCode SRAM */ +static int iwl3945_verify_bsm(struct iwl_priv *priv) + { + __le32 *image = priv->ucode_boot.v_addr; + u32 len = priv->ucode_boot.len; + u32 reg; + u32 val; + + IWL_DEBUG_INFO(priv, "Begin verify bsm\n"); + + /* verify BSM SRAM contents */ + val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG); + for (reg = BSM_SRAM_LOWER_BOUND; + reg < BSM_SRAM_LOWER_BOUND + len; + reg += sizeof(u32), image++) { + val = iwl_legacy_read_prph(priv, reg); + if (val != le32_to_cpu(*image)) { + IWL_ERR(priv, "BSM uCode verification failed at " + "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n", + BSM_SRAM_LOWER_BOUND, + reg - BSM_SRAM_LOWER_BOUND, len, + val, le32_to_cpu(*image)); + return -EIO; + } + } + + IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n"); + + return 0; +} + + +/****************************************************************************** + * + * EEPROM related functions + * + ******************************************************************************/ + +/* + * Clear the OWNER_MSK, to establish driver (instead of uCode running on + * embedded controller) as EEPROM reader; each read is a series of pulses + * to/from the EEPROM chip, not a single event, so even reads could conflict + * if they weren't arbitrated by some ownership mechanism. Here, the driver + * simply claims ownership, which should be safe when this function is called + * (i.e. before loading uCode!). + */ +static int iwl3945_eeprom_acquire_semaphore(struct iwl_priv *priv) +{ + _iwl_legacy_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK); + return 0; +} + + +static void iwl3945_eeprom_release_semaphore(struct iwl_priv *priv) +{ + return; +} + + /** + * iwl3945_load_bsm - Load bootstrap instructions + * + * BSM operation: + * + * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program + * in special SRAM that does not power down during RFKILL. When powering back + * up after power-saving sleeps (or during initial uCode load), the BSM loads + * the bootstrap program into the on-board processor, and starts it. + * + * The bootstrap program loads (via DMA) instructions and data for a new + * program from host DRAM locations indicated by the host driver in the + * BSM_DRAM_* registers. Once the new program is loaded, it starts + * automatically. + * + * When initializing the NIC, the host driver points the BSM to the + * "initialize" uCode image. This uCode sets up some internal data, then + * notifies host via "initialize alive" that it is complete. + * + * The host then replaces the BSM_DRAM_* pointer values to point to the + * normal runtime uCode instructions and a backup uCode data cache buffer + * (filled initially with starting data values for the on-board processor), + * then triggers the "initialize" uCode to load and launch the runtime uCode, + * which begins normal operation. + * + * When doing a power-save shutdown, runtime uCode saves data SRAM into + * the backup data cache in DRAM before SRAM is powered down. + * + * When powering back up, the BSM loads the bootstrap program. This reloads + * the runtime uCode instructions and the backup data cache into SRAM, + * and re-launches the runtime uCode from where it left off. + */ +static int iwl3945_load_bsm(struct iwl_priv *priv) +{ + __le32 *image = priv->ucode_boot.v_addr; + u32 len = priv->ucode_boot.len; + dma_addr_t pinst; + dma_addr_t pdata; + u32 inst_len; + u32 data_len; + int rc; + int i; + u32 done; + u32 reg_offset; + + IWL_DEBUG_INFO(priv, "Begin load bsm\n"); + + /* make sure bootstrap program is no larger than BSM's SRAM size */ + if (len > IWL39_MAX_BSM_SIZE) + return -EINVAL; + + /* Tell bootstrap uCode where to find the "Initialize" uCode + * in host DRAM ... host DRAM physical address bits 31:0 for 3945. + * NOTE: iwl3945_initialize_alive_start() will replace these values, + * after the "initialize" uCode has run, to point to + * runtime/protocol instructions and backup data cache. */ + pinst = priv->ucode_init.p_addr; + pdata = priv->ucode_init_data.p_addr; + inst_len = priv->ucode_init.len; + data_len = priv->ucode_init_data.len; + + iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); + iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); + iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len); + iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len); + + /* Fill BSM memory with bootstrap instructions */ + for (reg_offset = BSM_SRAM_LOWER_BOUND; + reg_offset < BSM_SRAM_LOWER_BOUND + len; + reg_offset += sizeof(u32), image++) + _iwl_legacy_write_prph(priv, reg_offset, + le32_to_cpu(*image)); + + rc = iwl3945_verify_bsm(priv); + if (rc) + return rc; + + /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */ + iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0); + iwl_legacy_write_prph(priv, BSM_WR_MEM_DST_REG, + IWL39_RTC_INST_LOWER_BOUND); + iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32)); + + /* Load bootstrap code into instruction SRAM now, + * to prepare to load "initialize" uCode */ + iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG, + BSM_WR_CTRL_REG_BIT_START); + + /* Wait for load of bootstrap uCode to finish */ + for (i = 0; i < 100; i++) { + done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG); + if (!(done & BSM_WR_CTRL_REG_BIT_START)) + break; + udelay(10); + } + if (i < 100) + IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i); + else { + IWL_ERR(priv, "BSM write did not complete!\n"); + return -EIO; + } + + /* Enable future boot loads whenever power management unit triggers it + * (e.g. when powering back up after power-save shutdown) */ + iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG, + BSM_WR_CTRL_REG_BIT_START_EN); + + return 0; +} + +static struct iwl_hcmd_ops iwl3945_hcmd = { + .rxon_assoc = iwl3945_send_rxon_assoc, + .commit_rxon = iwl3945_commit_rxon, +}; + +static struct iwl_lib_ops iwl3945_lib = { + .txq_attach_buf_to_tfd = iwl3945_hw_txq_attach_buf_to_tfd, + .txq_free_tfd = iwl3945_hw_txq_free_tfd, + .txq_init = iwl3945_hw_tx_queue_init, + .load_ucode = iwl3945_load_bsm, + .dump_nic_error_log = iwl3945_dump_nic_error_log, + .apm_ops = { + .init = iwl3945_apm_init, + .config = iwl3945_nic_config, + }, + .eeprom_ops = { + .regulatory_bands = { + EEPROM_REGULATORY_BAND_1_CHANNELS, + EEPROM_REGULATORY_BAND_2_CHANNELS, + EEPROM_REGULATORY_BAND_3_CHANNELS, + EEPROM_REGULATORY_BAND_4_CHANNELS, + EEPROM_REGULATORY_BAND_5_CHANNELS, + EEPROM_REGULATORY_BAND_NO_HT40, + EEPROM_REGULATORY_BAND_NO_HT40, + }, + .acquire_semaphore = iwl3945_eeprom_acquire_semaphore, + .release_semaphore = iwl3945_eeprom_release_semaphore, + }, + .send_tx_power = iwl3945_send_tx_power, + .is_valid_rtc_data_addr = iwl3945_hw_valid_rtc_data_addr, + + .debugfs_ops = { + .rx_stats_read = iwl3945_ucode_rx_stats_read, + .tx_stats_read = iwl3945_ucode_tx_stats_read, + .general_stats_read = iwl3945_ucode_general_stats_read, + }, +}; + +static const struct iwl_legacy_ops iwl3945_legacy_ops = { + .post_associate = iwl3945_post_associate, + .config_ap = iwl3945_config_ap, + .manage_ibss_station = iwl3945_manage_ibss_station, +}; + +static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = { + .get_hcmd_size = iwl3945_get_hcmd_size, + .build_addsta_hcmd = iwl3945_build_addsta_hcmd, + .request_scan = iwl3945_request_scan, + .post_scan = iwl3945_post_scan, +}; + +static const struct iwl_ops iwl3945_ops = { + .lib = &iwl3945_lib, + .hcmd = &iwl3945_hcmd, + .utils = &iwl3945_hcmd_utils, + .led = &iwl3945_led_ops, + .legacy = &iwl3945_legacy_ops, + .ieee80211_ops = &iwl3945_hw_ops, +}; + +static struct iwl_base_params iwl3945_base_params = { + .eeprom_size = IWL3945_EEPROM_IMG_SIZE, + .num_of_queues = IWL39_NUM_QUEUES, + .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL, + .set_l0s = false, + .use_bsm = true, + .led_compensation = 64, + .wd_timeout = IWL_DEF_WD_TIMEOUT, +}; + +static struct iwl_cfg iwl3945_bg_cfg = { + .name = "3945BG", + .fw_name_pre = IWL3945_FW_PRE, + .ucode_api_max = IWL3945_UCODE_API_MAX, + .ucode_api_min = IWL3945_UCODE_API_MIN, + .sku = IWL_SKU_G, + .eeprom_ver = EEPROM_3945_EEPROM_VERSION, + .ops = &iwl3945_ops, + .mod_params = &iwl3945_mod_params, + .base_params = &iwl3945_base_params, + .led_mode = IWL_LED_BLINK, +}; + +static struct iwl_cfg iwl3945_abg_cfg = { + .name = "3945ABG", + .fw_name_pre = IWL3945_FW_PRE, + .ucode_api_max = IWL3945_UCODE_API_MAX, + .ucode_api_min = IWL3945_UCODE_API_MIN, + .sku = IWL_SKU_A|IWL_SKU_G, + .eeprom_ver = EEPROM_3945_EEPROM_VERSION, + .ops = &iwl3945_ops, + .mod_params = &iwl3945_mod_params, + .base_params = &iwl3945_base_params, + .led_mode = IWL_LED_BLINK, +}; + +DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = { + {IWL_PCI_DEVICE(0x4222, 0x1005, iwl3945_bg_cfg)}, + {IWL_PCI_DEVICE(0x4222, 0x1034, iwl3945_bg_cfg)}, + {IWL_PCI_DEVICE(0x4222, 0x1044, iwl3945_bg_cfg)}, + {IWL_PCI_DEVICE(0x4227, 0x1014, iwl3945_bg_cfg)}, + {IWL_PCI_DEVICE(0x4222, PCI_ANY_ID, iwl3945_abg_cfg)}, + {IWL_PCI_DEVICE(0x4227, PCI_ANY_ID, iwl3945_abg_cfg)}, + {0} +}; + +MODULE_DEVICE_TABLE(pci, iwl3945_hw_card_ids); diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-3945.h b/trunk/drivers/net/wireless/iwlegacy/iwl-3945.h new file mode 100644 index 000000000000..b118b59b71de --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-3945.h @@ -0,0 +1,308 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ +/* + * Please use this file (iwl-3945.h) for driver implementation definitions. + * Please use iwl-3945-commands.h for uCode API definitions. + * Please use iwl-3945-hw.h for hardware-related definitions. + */ + +#ifndef __iwl_3945_h__ +#define __iwl_3945_h__ + +#include /* for struct pci_device_id */ +#include +#include + +/* Hardware specific file defines the PCI IDs table for that hardware module */ +extern const struct pci_device_id iwl3945_hw_card_ids[]; + +#include "iwl-csr.h" +#include "iwl-prph.h" +#include "iwl-fh.h" +#include "iwl-3945-hw.h" +#include "iwl-debug.h" +#include "iwl-power.h" +#include "iwl-dev.h" +#include "iwl-led.h" + +/* Highest firmware API version supported */ +#define IWL3945_UCODE_API_MAX 2 + +/* Lowest firmware API version supported */ +#define IWL3945_UCODE_API_MIN 1 + +#define IWL3945_FW_PRE "iwlwifi-3945-" +#define _IWL3945_MODULE_FIRMWARE(api) IWL3945_FW_PRE #api ".ucode" +#define IWL3945_MODULE_FIRMWARE(api) _IWL3945_MODULE_FIRMWARE(api) + +/* Default noise level to report when noise measurement is not available. + * This may be because we're: + * 1) Not associated (4965, no beacon statistics being sent to driver) + * 2) Scanning (noise measurement does not apply to associated channel) + * 3) Receiving CCK (3945 delivers noise info only for OFDM frames) + * Use default noise value of -127 ... this is below the range of measurable + * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user. + * Also, -127 works better than 0 when averaging frames with/without + * noise info (e.g. averaging might be done in app); measured dBm values are + * always negative ... using a negative value as the default keeps all + * averages within an s8's (used in some apps) range of negative values. */ +#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127) + +/* Module parameters accessible from iwl-*.c */ +extern struct iwl_mod_params iwl3945_mod_params; + +struct iwl3945_rate_scale_data { + u64 data; + s32 success_counter; + s32 success_ratio; + s32 counter; + s32 average_tpt; + unsigned long stamp; +}; + +struct iwl3945_rs_sta { + spinlock_t lock; + struct iwl_priv *priv; + s32 *expected_tpt; + unsigned long last_partial_flush; + unsigned long last_flush; + u32 flush_time; + u32 last_tx_packets; + u32 tx_packets; + u8 tgg; + u8 flush_pending; + u8 start_rate; + struct timer_list rate_scale_flush; + struct iwl3945_rate_scale_data win[IWL_RATE_COUNT_3945]; +#ifdef CONFIG_MAC80211_DEBUGFS + struct dentry *rs_sta_dbgfs_stats_table_file; +#endif + + /* used to be in sta_info */ + int last_txrate_idx; +}; + + +/* + * The common struct MUST be first because it is shared between + * 3945 and 4965! + */ +struct iwl3945_sta_priv { + struct iwl_station_priv_common common; + struct iwl3945_rs_sta rs_sta; +}; + +enum iwl3945_antenna { + IWL_ANTENNA_DIVERSITY, + IWL_ANTENNA_MAIN, + IWL_ANTENNA_AUX +}; + +/* + * RTS threshold here is total size [2347] minus 4 FCS bytes + * Per spec: + * a value of 0 means RTS on all data/management packets + * a value > max MSDU size means no RTS + * else RTS for data/management frames where MPDU is larger + * than RTS value. + */ +#define DEFAULT_RTS_THRESHOLD 2347U +#define MIN_RTS_THRESHOLD 0U +#define MAX_RTS_THRESHOLD 2347U +#define MAX_MSDU_SIZE 2304U +#define MAX_MPDU_SIZE 2346U +#define DEFAULT_BEACON_INTERVAL 100U +#define DEFAULT_SHORT_RETRY_LIMIT 7U +#define DEFAULT_LONG_RETRY_LIMIT 4U + +#define IWL_TX_FIFO_AC0 0 +#define IWL_TX_FIFO_AC1 1 +#define IWL_TX_FIFO_AC2 2 +#define IWL_TX_FIFO_AC3 3 +#define IWL_TX_FIFO_HCCA_1 5 +#define IWL_TX_FIFO_HCCA_2 6 +#define IWL_TX_FIFO_NONE 7 + +#define IEEE80211_DATA_LEN 2304 +#define IEEE80211_4ADDR_LEN 30 +#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN) +#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN) + +struct iwl3945_frame { + union { + struct ieee80211_hdr frame; + struct iwl3945_tx_beacon_cmd beacon; + u8 raw[IEEE80211_FRAME_LEN]; + u8 cmd[360]; + } u; + struct list_head list; +}; + +#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4) +#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ) +#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4) + +#define SUP_RATE_11A_MAX_NUM_CHANNELS 8 +#define SUP_RATE_11B_MAX_NUM_CHANNELS 4 +#define SUP_RATE_11G_MAX_NUM_CHANNELS 12 + +#define IWL_SUPPORTED_RATES_IE_LEN 8 + +#define SCAN_INTERVAL 100 + +#define MAX_TID_COUNT 9 + +#define IWL_INVALID_RATE 0xFF +#define IWL_INVALID_VALUE -1 + +#define STA_PS_STATUS_WAKE 0 +#define STA_PS_STATUS_SLEEP 1 + +struct iwl3945_ibss_seq { + u8 mac[ETH_ALEN]; + u16 seq_num; + u16 frag_num; + unsigned long packet_time; + struct list_head list; +}; + +#define IWL_RX_HDR(x) ((struct iwl3945_rx_frame_hdr *)(\ + x->u.rx_frame.stats.payload + \ + x->u.rx_frame.stats.phy_count)) +#define IWL_RX_END(x) ((struct iwl3945_rx_frame_end *)(\ + IWL_RX_HDR(x)->payload + \ + le16_to_cpu(IWL_RX_HDR(x)->len))) +#define IWL_RX_STATS(x) (&x->u.rx_frame.stats) +#define IWL_RX_DATA(x) (IWL_RX_HDR(x)->payload) + + +/****************************************************************************** + * + * Functions implemented in iwl3945-base.c which are forward declared here + * for use by iwl-*.c + * + *****************************************************************************/ +extern int iwl3945_calc_db_from_ratio(int sig_ratio); +extern void iwl3945_rx_replenish(void *data); +extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq); +extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv, + struct ieee80211_hdr *hdr, int left); +extern int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log, + char **buf, bool display); +extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv); + +/****************************************************************************** + * + * Functions implemented in iwl-[34]*.c which are forward declared here + * for use by iwl3945-base.c + * + * NOTE: The implementation of these functions are hardware specific + * which is why they are in the hardware specific files (vs. iwl-base.c) + * + * Naming convention -- + * iwl3945_ <-- Its part of iwlwifi (should be changed to iwl3945_) + * iwl3945_hw_ <-- Hardware specific (implemented in iwl-XXXX.c by all HW) + * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX) + * iwl3945_bg_ <-- Called from work queue context + * iwl3945_mac_ <-- mac80211 callback + * + ****************************************************************************/ +extern void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv); +extern void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv); +extern void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv); +extern int iwl3945_hw_rxq_stop(struct iwl_priv *priv); +extern int iwl3945_hw_set_hw_params(struct iwl_priv *priv); +extern int iwl3945_hw_nic_init(struct iwl_priv *priv); +extern int iwl3945_hw_nic_stop_master(struct iwl_priv *priv); +extern void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv); +extern void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv); +extern int iwl3945_hw_nic_reset(struct iwl_priv *priv); +extern int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + dma_addr_t addr, u16 len, + u8 reset, u8 pad); +extern void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, + struct iwl_tx_queue *txq); +extern int iwl3945_hw_get_temperature(struct iwl_priv *priv); +extern int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, + struct iwl_tx_queue *txq); +extern unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv, + struct iwl3945_frame *frame, u8 rate); +void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv, + struct iwl_device_cmd *cmd, + struct ieee80211_tx_info *info, + struct ieee80211_hdr *hdr, + int sta_id, int tx_id); +extern int iwl3945_hw_reg_send_txpower(struct iwl_priv *priv); +extern int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power); +extern void iwl3945_hw_rx_statistics(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +void iwl3945_reply_statistics(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +extern void iwl3945_disable_events(struct iwl_priv *priv); +extern int iwl4965_get_temperature(const struct iwl_priv *priv); +extern void iwl3945_post_associate(struct iwl_priv *priv); +extern void iwl3945_config_ap(struct iwl_priv *priv); + +extern int iwl3945_commit_rxon(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); + +/** + * iwl3945_hw_find_station - Find station id for a given BSSID + * @bssid: MAC address of station ID to find + * + * NOTE: This should not be hardware specific but the code has + * not yet been merged into a single common layer for managing the + * station tables. + */ +extern u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *bssid); + +extern struct ieee80211_ops iwl3945_hw_ops; + +/* + * Forward declare iwl-3945.c functions for iwl3945-base.c + */ +extern __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv); +extern int iwl3945_init_hw_rate_table(struct iwl_priv *priv); +extern void iwl3945_reg_txpower_periodic(struct iwl_priv *priv); +extern int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv); + +extern const struct iwl_channel_info *iwl3945_get_channel_info( + const struct iwl_priv *priv, enum ieee80211_band band, u16 channel); + +extern int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate); + +/* scanning */ +int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif); +void iwl3945_post_scan(struct iwl_priv *priv); + +/* rates */ +extern const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945]; + +/* Requires full declaration of iwl_priv before including */ +#include "iwl-io.h" + +#endif diff --git a/trunk/drivers/net/wireless/iwlegacy/4965-calib.c b/trunk/drivers/net/wireless/iwlegacy/iwl-4965-calib.c similarity index 55% rename from trunk/drivers/net/wireless/iwlegacy/4965-calib.c rename to trunk/drivers/net/wireless/iwlegacy/iwl-4965-calib.c index d3248e3ef23b..162d877e6869 100644 --- a/trunk/drivers/net/wireless/iwlegacy/4965-calib.c +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-4965-calib.c @@ -63,14 +63,15 @@ #include #include -#include "common.h" -#include "4965.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-4965-calib.h" /***************************************************************************** * INIT calibrations framework *****************************************************************************/ -struct stats_general_data { +struct statistics_general_data { u32 beacon_silence_rssi_a; u32 beacon_silence_rssi_b; u32 beacon_silence_rssi_c; @@ -79,15 +80,14 @@ struct stats_general_data { u32 beacon_energy_c; }; -void -il4965_calib_free_results(struct il_priv *il) +void iwl4965_calib_free_results(struct iwl_priv *priv) { int i; - for (i = 0; i < IL_CALIB_MAX; i++) { - kfree(il->calib_results[i].buf); - il->calib_results[i].buf = NULL; - il->calib_results[i].buf_len = 0; + for (i = 0; i < IWL_CALIB_MAX; i++) { + kfree(priv->calib_results[i].buf); + priv->calib_results[i].buf = NULL; + priv->calib_results[i].buf_len = 0; } } @@ -103,9 +103,10 @@ il4965_calib_free_results(struct il_priv *il) * enough to receive all of our own network traffic, but not so * high that our DSP gets too busy trying to lock onto non-network * activity/noise. */ -static int -il4965_sens_energy_cck(struct il_priv *il, u32 norm_fa, u32 rx_enable_time, - struct stats_general_data *rx_info) +static int iwl4965_sens_energy_cck(struct iwl_priv *priv, + u32 norm_fa, + u32 rx_enable_time, + struct statistics_general_data *rx_info) { u32 max_nrg_cck = 0; int i = 0; @@ -128,22 +129,22 @@ il4965_sens_energy_cck(struct il_priv *il, u32 norm_fa, u32 rx_enable_time, u32 false_alarms = norm_fa * 200 * 1024; u32 max_false_alarms = MAX_FA_CCK * rx_enable_time; u32 min_false_alarms = MIN_FA_CCK * rx_enable_time; - struct il_sensitivity_data *data = NULL; - const struct il_sensitivity_ranges *ranges = il->hw_params.sens; + struct iwl_sensitivity_data *data = NULL; + const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens; - data = &(il->sensitivity_data); + data = &(priv->sensitivity_data); data->nrg_auto_corr_silence_diff = 0; /* Find max silence rssi among all 3 receivers. * This is background noise, which may include transmissions from other * networks, measured during silence before our network's beacon */ - silence_rssi_a = - (u8) ((rx_info->beacon_silence_rssi_a & ALL_BAND_FILTER) >> 8); - silence_rssi_b = - (u8) ((rx_info->beacon_silence_rssi_b & ALL_BAND_FILTER) >> 8); - silence_rssi_c = - (u8) ((rx_info->beacon_silence_rssi_c & ALL_BAND_FILTER) >> 8); + silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a & + ALL_BAND_FILTER) >> 8); + silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b & + ALL_BAND_FILTER) >> 8); + silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c & + ALL_BAND_FILTER) >> 8); val = max(silence_rssi_b, silence_rssi_c); max_silence_rssi = max(silence_rssi_a, (u8) val); @@ -159,8 +160,9 @@ il4965_sens_energy_cck(struct il_priv *il, u32 norm_fa, u32 rx_enable_time, val = data->nrg_silence_rssi[i]; silence_ref = max(silence_ref, val); } - D_CALIB("silence a %u, b %u, c %u, 20-bcn max %u\n", silence_rssi_a, - silence_rssi_b, silence_rssi_c, silence_ref); + IWL_DEBUG_CALIB(priv, "silence a %u, b %u, c %u, 20-bcn max %u\n", + silence_rssi_a, silence_rssi_b, silence_rssi_c, + silence_ref); /* Find max rx energy (min value!) among all 3 receivers, * measured during beacon frame. @@ -182,9 +184,9 @@ il4965_sens_energy_cck(struct il_priv *il, u32 norm_fa, u32 rx_enable_time, max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i])); max_nrg_cck += 6; - D_CALIB("rx energy a %u, b %u, c %u, 10-bcn max/min %u\n", - rx_info->beacon_energy_a, rx_info->beacon_energy_b, - rx_info->beacon_energy_c, max_nrg_cck - 6); + IWL_DEBUG_CALIB(priv, "rx energy a %u, b %u, c %u, 10-bcn max/min %u\n", + rx_info->beacon_energy_a, rx_info->beacon_energy_b, + rx_info->beacon_energy_c, max_nrg_cck - 6); /* Count number of consecutive beacons with fewer-than-desired * false alarms. */ @@ -192,34 +194,35 @@ il4965_sens_energy_cck(struct il_priv *il, u32 norm_fa, u32 rx_enable_time, data->num_in_cck_no_fa++; else data->num_in_cck_no_fa = 0; - D_CALIB("consecutive bcns with few false alarms = %u\n", - data->num_in_cck_no_fa); + IWL_DEBUG_CALIB(priv, "consecutive bcns with few false alarms = %u\n", + data->num_in_cck_no_fa); /* If we got too many false alarms this time, reduce sensitivity */ - if (false_alarms > max_false_alarms && - data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK) { - D_CALIB("norm FA %u > max FA %u\n", false_alarms, - max_false_alarms); - D_CALIB("... reducing sensitivity\n"); - data->nrg_curr_state = IL_FA_TOO_MANY; + if ((false_alarms > max_false_alarms) && + (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK)) { + IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u\n", + false_alarms, max_false_alarms); + IWL_DEBUG_CALIB(priv, "... reducing sensitivity\n"); + data->nrg_curr_state = IWL_FA_TOO_MANY; /* Store for "fewer than desired" on later beacon */ data->nrg_silence_ref = silence_ref; /* increase energy threshold (reduce nrg value) * to decrease sensitivity */ data->nrg_th_cck = data->nrg_th_cck - NRG_STEP_CCK; - /* Else if we got fewer than desired, increase sensitivity */ + /* Else if we got fewer than desired, increase sensitivity */ } else if (false_alarms < min_false_alarms) { - data->nrg_curr_state = IL_FA_TOO_FEW; + data->nrg_curr_state = IWL_FA_TOO_FEW; /* Compare silence level with silence level for most recent * healthy number or too many false alarms */ - data->nrg_auto_corr_silence_diff = - (s32) data->nrg_silence_ref - (s32) silence_ref; + data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref - + (s32)silence_ref; - D_CALIB("norm FA %u < min FA %u, silence diff %d\n", - false_alarms, min_false_alarms, - data->nrg_auto_corr_silence_diff); + IWL_DEBUG_CALIB(priv, + "norm FA %u < min FA %u, silence diff %d\n", + false_alarms, min_false_alarms, + data->nrg_auto_corr_silence_diff); /* Increase value to increase sensitivity, but only if: * 1a) previous beacon did *not* have *too many* false alarms @@ -227,22 +230,23 @@ il4965_sens_energy_cck(struct il_priv *il, u32 norm_fa, u32 rx_enable_time, * from a previous beacon with too many, or healthy # FAs * OR 2) We've seen a lot of beacons (100) with too few * false alarms */ - if (data->nrg_prev_state != IL_FA_TOO_MANY && - (data->nrg_auto_corr_silence_diff > NRG_DIFF || - data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA)) { + if ((data->nrg_prev_state != IWL_FA_TOO_MANY) && + ((data->nrg_auto_corr_silence_diff > NRG_DIFF) || + (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) { - D_CALIB("... increasing sensitivity\n"); + IWL_DEBUG_CALIB(priv, "... increasing sensitivity\n"); /* Increase nrg value to increase sensitivity */ val = data->nrg_th_cck + NRG_STEP_CCK; - data->nrg_th_cck = min((u32) ranges->min_nrg_cck, val); + data->nrg_th_cck = min((u32)ranges->min_nrg_cck, val); } else { - D_CALIB("... but not changing sensitivity\n"); + IWL_DEBUG_CALIB(priv, + "... but not changing sensitivity\n"); } - /* Else we got a healthy number of false alarms, keep status quo */ + /* Else we got a healthy number of false alarms, keep status quo */ } else { - D_CALIB(" FA in safe zone\n"); - data->nrg_curr_state = IL_FA_GOOD_RANGE; + IWL_DEBUG_CALIB(priv, " FA in safe zone\n"); + data->nrg_curr_state = IWL_FA_GOOD_RANGE; /* Store for use in "fewer than desired" with later beacon */ data->nrg_silence_ref = silence_ref; @@ -250,8 +254,8 @@ il4965_sens_energy_cck(struct il_priv *il, u32 norm_fa, u32 rx_enable_time, /* If previous beacon had too many false alarms, * give it some extra margin by reducing sensitivity again * (but don't go below measured energy of desired Rx) */ - if (IL_FA_TOO_MANY == data->nrg_prev_state) { - D_CALIB("... increasing margin\n"); + if (IWL_FA_TOO_MANY == data->nrg_prev_state) { + IWL_DEBUG_CALIB(priv, "... increasing margin\n"); if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN)) data->nrg_th_cck -= NRG_MARGIN; else @@ -265,7 +269,7 @@ il4965_sens_energy_cck(struct il_priv *il, u32 norm_fa, u32 rx_enable_time, * Lower value is higher energy, so we use max()! */ data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck); - D_CALIB("new nrg_th_cck %u\n", data->nrg_th_cck); + IWL_DEBUG_CALIB(priv, "new nrg_th_cck %u\n", data->nrg_th_cck); data->nrg_prev_state = data->nrg_curr_state; @@ -280,187 +284,190 @@ il4965_sens_energy_cck(struct il_priv *il, u32 norm_fa, u32 rx_enable_time, else { val = data->auto_corr_cck + AUTO_CORR_STEP_CCK; data->auto_corr_cck = - min((u32) ranges->auto_corr_max_cck, val); + min((u32)ranges->auto_corr_max_cck, val); } val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK; data->auto_corr_cck_mrc = - min((u32) ranges->auto_corr_max_cck_mrc, val); - } else if (false_alarms < min_false_alarms && - (data->nrg_auto_corr_silence_diff > NRG_DIFF || - data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA)) { + min((u32)ranges->auto_corr_max_cck_mrc, val); + } else if ((false_alarms < min_false_alarms) && + ((data->nrg_auto_corr_silence_diff > NRG_DIFF) || + (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) { /* Decrease auto_corr values to increase sensitivity */ val = data->auto_corr_cck - AUTO_CORR_STEP_CCK; - data->auto_corr_cck = max((u32) ranges->auto_corr_min_cck, val); + data->auto_corr_cck = + max((u32)ranges->auto_corr_min_cck, val); val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK; data->auto_corr_cck_mrc = - max((u32) ranges->auto_corr_min_cck_mrc, val); + max((u32)ranges->auto_corr_min_cck_mrc, val); } return 0; } -static int -il4965_sens_auto_corr_ofdm(struct il_priv *il, u32 norm_fa, u32 rx_enable_time) + +static int iwl4965_sens_auto_corr_ofdm(struct iwl_priv *priv, + u32 norm_fa, + u32 rx_enable_time) { u32 val; u32 false_alarms = norm_fa * 200 * 1024; u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time; u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time; - struct il_sensitivity_data *data = NULL; - const struct il_sensitivity_ranges *ranges = il->hw_params.sens; + struct iwl_sensitivity_data *data = NULL; + const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens; - data = &(il->sensitivity_data); + data = &(priv->sensitivity_data); /* If we got too many false alarms this time, reduce sensitivity */ if (false_alarms > max_false_alarms) { - D_CALIB("norm FA %u > max FA %u)\n", false_alarms, - max_false_alarms); + IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u)\n", + false_alarms, max_false_alarms); val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM; data->auto_corr_ofdm = - min((u32) ranges->auto_corr_max_ofdm, val); + min((u32)ranges->auto_corr_max_ofdm, val); val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM; data->auto_corr_ofdm_mrc = - min((u32) ranges->auto_corr_max_ofdm_mrc, val); + min((u32)ranges->auto_corr_max_ofdm_mrc, val); val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM; data->auto_corr_ofdm_x1 = - min((u32) ranges->auto_corr_max_ofdm_x1, val); + min((u32)ranges->auto_corr_max_ofdm_x1, val); val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM; data->auto_corr_ofdm_mrc_x1 = - min((u32) ranges->auto_corr_max_ofdm_mrc_x1, val); + min((u32)ranges->auto_corr_max_ofdm_mrc_x1, val); } /* Else if we got fewer than desired, increase sensitivity */ else if (false_alarms < min_false_alarms) { - D_CALIB("norm FA %u < min FA %u\n", false_alarms, - min_false_alarms); + IWL_DEBUG_CALIB(priv, "norm FA %u < min FA %u\n", + false_alarms, min_false_alarms); val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM; data->auto_corr_ofdm = - max((u32) ranges->auto_corr_min_ofdm, val); + max((u32)ranges->auto_corr_min_ofdm, val); val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM; data->auto_corr_ofdm_mrc = - max((u32) ranges->auto_corr_min_ofdm_mrc, val); + max((u32)ranges->auto_corr_min_ofdm_mrc, val); val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM; data->auto_corr_ofdm_x1 = - max((u32) ranges->auto_corr_min_ofdm_x1, val); + max((u32)ranges->auto_corr_min_ofdm_x1, val); val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM; data->auto_corr_ofdm_mrc_x1 = - max((u32) ranges->auto_corr_min_ofdm_mrc_x1, val); + max((u32)ranges->auto_corr_min_ofdm_mrc_x1, val); } else { - D_CALIB("min FA %u < norm FA %u < max FA %u OK\n", - min_false_alarms, false_alarms, max_false_alarms); + IWL_DEBUG_CALIB(priv, "min FA %u < norm FA %u < max FA %u OK\n", + min_false_alarms, false_alarms, max_false_alarms); } return 0; } -static void -il4965_prepare_legacy_sensitivity_tbl(struct il_priv *il, - struct il_sensitivity_data *data, - __le16 *tbl) +static void iwl4965_prepare_legacy_sensitivity_tbl(struct iwl_priv *priv, + struct iwl_sensitivity_data *data, + __le16 *tbl) { - tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX] = - cpu_to_le16((u16) data->auto_corr_ofdm); - tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] = - cpu_to_le16((u16) data->auto_corr_ofdm_mrc); - tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX] = - cpu_to_le16((u16) data->auto_corr_ofdm_x1); - tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] = - cpu_to_le16((u16) data->auto_corr_ofdm_mrc_x1); - - tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX] = - cpu_to_le16((u16) data->auto_corr_cck); - tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] = - cpu_to_le16((u16) data->auto_corr_cck_mrc); - - tbl[HD_MIN_ENERGY_CCK_DET_IDX] = cpu_to_le16((u16) data->nrg_th_cck); - tbl[HD_MIN_ENERGY_OFDM_DET_IDX] = cpu_to_le16((u16) data->nrg_th_ofdm); - - tbl[HD_BARKER_CORR_TH_ADD_MIN_IDX] = - cpu_to_le16(data->barker_corr_th_min); - tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_IDX] = - cpu_to_le16(data->barker_corr_th_min_mrc); - tbl[HD_OFDM_ENERGY_TH_IN_IDX] = cpu_to_le16(data->nrg_th_cca); - - D_CALIB("ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n", - data->auto_corr_ofdm, data->auto_corr_ofdm_mrc, - data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1, - data->nrg_th_ofdm); - - D_CALIB("cck: ac %u mrc %u thresh %u\n", data->auto_corr_cck, - data->auto_corr_cck_mrc, data->nrg_th_cck); + tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] = + cpu_to_le16((u16)data->auto_corr_ofdm); + tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] = + cpu_to_le16((u16)data->auto_corr_ofdm_mrc); + tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] = + cpu_to_le16((u16)data->auto_corr_ofdm_x1); + tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] = + cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1); + + tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] = + cpu_to_le16((u16)data->auto_corr_cck); + tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] = + cpu_to_le16((u16)data->auto_corr_cck_mrc); + + tbl[HD_MIN_ENERGY_CCK_DET_INDEX] = + cpu_to_le16((u16)data->nrg_th_cck); + tbl[HD_MIN_ENERGY_OFDM_DET_INDEX] = + cpu_to_le16((u16)data->nrg_th_ofdm); + + tbl[HD_BARKER_CORR_TH_ADD_MIN_INDEX] = + cpu_to_le16(data->barker_corr_th_min); + tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] = + cpu_to_le16(data->barker_corr_th_min_mrc); + tbl[HD_OFDM_ENERGY_TH_IN_INDEX] = + cpu_to_le16(data->nrg_th_cca); + + IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n", + data->auto_corr_ofdm, data->auto_corr_ofdm_mrc, + data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1, + data->nrg_th_ofdm); + + IWL_DEBUG_CALIB(priv, "cck: ac %u mrc %u thresh %u\n", + data->auto_corr_cck, data->auto_corr_cck_mrc, + data->nrg_th_cck); } -/* Prepare a C_SENSITIVITY, send to uCode if values have changed */ -static int -il4965_sensitivity_write(struct il_priv *il) +/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */ +static int iwl4965_sensitivity_write(struct iwl_priv *priv) { - struct il_sensitivity_cmd cmd; - struct il_sensitivity_data *data = NULL; - struct il_host_cmd cmd_out = { - .id = C_SENSITIVITY, - .len = sizeof(struct il_sensitivity_cmd), + struct iwl_sensitivity_cmd cmd; + struct iwl_sensitivity_data *data = NULL; + struct iwl_host_cmd cmd_out = { + .id = SENSITIVITY_CMD, + .len = sizeof(struct iwl_sensitivity_cmd), .flags = CMD_ASYNC, .data = &cmd, }; - data = &(il->sensitivity_data); + data = &(priv->sensitivity_data); memset(&cmd, 0, sizeof(cmd)); - il4965_prepare_legacy_sensitivity_tbl(il, data, &cmd.table[0]); + iwl4965_prepare_legacy_sensitivity_tbl(priv, data, &cmd.table[0]); /* Update uCode's "work" table, and copy it to DSP */ - cmd.control = C_SENSITIVITY_CONTROL_WORK_TBL; + cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE; /* Don't send command to uCode if nothing has changed */ - if (!memcmp - (&cmd.table[0], &(il->sensitivity_tbl[0]), - sizeof(u16) * HD_TBL_SIZE)) { - D_CALIB("No change in C_SENSITIVITY\n"); + if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]), + sizeof(u16)*HD_TABLE_SIZE)) { + IWL_DEBUG_CALIB(priv, "No change in SENSITIVITY_CMD\n"); return 0; } /* Copy table for comparison next time */ - memcpy(&(il->sensitivity_tbl[0]), &(cmd.table[0]), - sizeof(u16) * HD_TBL_SIZE); + memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]), + sizeof(u16)*HD_TABLE_SIZE); - return il_send_cmd(il, &cmd_out); + return iwl_legacy_send_cmd(priv, &cmd_out); } -void -il4965_init_sensitivity(struct il_priv *il) +void iwl4965_init_sensitivity(struct iwl_priv *priv) { int ret = 0; int i; - struct il_sensitivity_data *data = NULL; - const struct il_sensitivity_ranges *ranges = il->hw_params.sens; + struct iwl_sensitivity_data *data = NULL; + const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens; - if (il->disable_sens_cal) + if (priv->disable_sens_cal) return; - D_CALIB("Start il4965_init_sensitivity\n"); + IWL_DEBUG_CALIB(priv, "Start iwl4965_init_sensitivity\n"); /* Clear driver's sensitivity algo data */ - data = &(il->sensitivity_data); + data = &(priv->sensitivity_data); if (ranges == NULL) return; - memset(data, 0, sizeof(struct il_sensitivity_data)); + memset(data, 0, sizeof(struct iwl_sensitivity_data)); data->num_in_cck_no_fa = 0; - data->nrg_curr_state = IL_FA_TOO_MANY; - data->nrg_prev_state = IL_FA_TOO_MANY; + data->nrg_curr_state = IWL_FA_TOO_MANY; + data->nrg_prev_state = IWL_FA_TOO_MANY; data->nrg_silence_ref = 0; data->nrg_silence_idx = 0; data->nrg_energy_idx = 0; @@ -471,9 +478,9 @@ il4965_init_sensitivity(struct il_priv *il) for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) data->nrg_silence_rssi[i] = 0; - data->auto_corr_ofdm = ranges->auto_corr_min_ofdm; + data->auto_corr_ofdm = ranges->auto_corr_min_ofdm; data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc; - data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1; + data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1; data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1; data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF; data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc; @@ -488,12 +495,11 @@ il4965_init_sensitivity(struct il_priv *il) data->last_bad_plcp_cnt_cck = 0; data->last_fa_cnt_cck = 0; - ret |= il4965_sensitivity_write(il); - D_CALIB("<disable_sens_cal) + if (priv->disable_sens_cal) return; - data = &(il->sensitivity_data); + data = &(priv->sensitivity_data); - if (!il_is_any_associated(il)) { - D_CALIB("<< - not associated\n"); + if (!iwl_legacy_is_any_associated(priv)) { + IWL_DEBUG_CALIB(priv, "<< - not associated\n"); return; } - spin_lock_irqsave(&il->lock, flags); + spin_lock_irqsave(&priv->lock, flags); - rx_info = &(((struct il_notif_stats *)resp)->rx.general); - ofdm = &(((struct il_notif_stats *)resp)->rx.ofdm); - cck = &(((struct il_notif_stats *)resp)->rx.cck); + rx_info = &(((struct iwl_notif_statistics *)resp)->rx.general); + ofdm = &(((struct iwl_notif_statistics *)resp)->rx.ofdm); + cck = &(((struct iwl_notif_statistics *)resp)->rx.cck); if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) { - D_CALIB("<< invalid data.\n"); - spin_unlock_irqrestore(&il->lock, flags); + IWL_DEBUG_CALIB(priv, "<< invalid data.\n"); + spin_unlock_irqrestore(&priv->lock, flags); return; } @@ -538,27 +544,30 @@ il4965_sensitivity_calibration(struct il_priv *il, void *resp) bad_plcp_ofdm = le32_to_cpu(ofdm->plcp_err); statis.beacon_silence_rssi_a = - le32_to_cpu(rx_info->beacon_silence_rssi_a); + le32_to_cpu(rx_info->beacon_silence_rssi_a); statis.beacon_silence_rssi_b = - le32_to_cpu(rx_info->beacon_silence_rssi_b); + le32_to_cpu(rx_info->beacon_silence_rssi_b); statis.beacon_silence_rssi_c = - le32_to_cpu(rx_info->beacon_silence_rssi_c); - statis.beacon_energy_a = le32_to_cpu(rx_info->beacon_energy_a); - statis.beacon_energy_b = le32_to_cpu(rx_info->beacon_energy_b); - statis.beacon_energy_c = le32_to_cpu(rx_info->beacon_energy_c); + le32_to_cpu(rx_info->beacon_silence_rssi_c); + statis.beacon_energy_a = + le32_to_cpu(rx_info->beacon_energy_a); + statis.beacon_energy_b = + le32_to_cpu(rx_info->beacon_energy_b); + statis.beacon_energy_c = + le32_to_cpu(rx_info->beacon_energy_c); - spin_unlock_irqrestore(&il->lock, flags); + spin_unlock_irqrestore(&priv->lock, flags); - D_CALIB("rx_enable_time = %u usecs\n", rx_enable_time); + IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time); if (!rx_enable_time) { - D_CALIB("<< RX Enable Time == 0!\n"); + IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0!\n"); return; } - /* These stats increase monotonically, and do not reset + /* These statistics increase monotonically, and do not reset * at each beacon. Calculate difference from last value, or just - * use the new stats value if it has reset or wrapped around. */ + * use the new statistics value if it has reset or wrapped around. */ if (data->last_bad_plcp_cnt_cck > bad_plcp_cck) data->last_bad_plcp_cnt_cck = bad_plcp_cck; else { @@ -591,17 +600,17 @@ il4965_sensitivity_calibration(struct il_priv *il, void *resp) norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm; norm_fa_cck = fa_cck + bad_plcp_cck; - D_CALIB("cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck, - bad_plcp_cck, fa_ofdm, bad_plcp_ofdm); + IWL_DEBUG_CALIB(priv, + "cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck, + bad_plcp_cck, fa_ofdm, bad_plcp_ofdm); - il4965_sens_auto_corr_ofdm(il, norm_fa_ofdm, rx_enable_time); - il4965_sens_energy_cck(il, norm_fa_cck, rx_enable_time, &statis); + iwl4965_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time); + iwl4965_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis); - il4965_sensitivity_write(il); + iwl4965_sensitivity_write(priv); } -static inline u8 -il4965_find_first_chain(u8 mask) +static inline u8 iwl4965_find_first_chain(u8 mask) { if (mask & ANT_A) return CHAIN_A; @@ -615,8 +624,8 @@ il4965_find_first_chain(u8 mask) * disconnected. */ static void -il4965_find_disconn_antenna(struct il_priv *il, u32 * average_sig, - struct il_chain_noise_data *data) +iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig, + struct iwl_chain_noise_data *data) { u32 active_chains = 0; u32 max_average_sig; @@ -625,15 +634,12 @@ il4965_find_disconn_antenna(struct il_priv *il, u32 * average_sig, u8 first_chain; u16 i = 0; - average_sig[0] = - data->chain_signal_a / - il->cfg->base_params->chain_noise_num_beacons; - average_sig[1] = - data->chain_signal_b / - il->cfg->base_params->chain_noise_num_beacons; - average_sig[2] = - data->chain_signal_c / - il->cfg->base_params->chain_noise_num_beacons; + average_sig[0] = data->chain_signal_a / + priv->cfg->base_params->chain_noise_num_beacons; + average_sig[1] = data->chain_signal_b / + priv->cfg->base_params->chain_noise_num_beacons; + average_sig[2] = data->chain_signal_c / + priv->cfg->base_params->chain_noise_num_beacons; if (average_sig[0] >= average_sig[1]) { max_average_sig = average_sig[0]; @@ -651,10 +657,10 @@ il4965_find_disconn_antenna(struct il_priv *il, u32 * average_sig, active_chains = (1 << max_average_sig_antenna_i); } - D_CALIB("average_sig: a %d b %d c %d\n", average_sig[0], average_sig[1], - average_sig[2]); - D_CALIB("max_average_sig = %d, antenna %d\n", max_average_sig, - max_average_sig_antenna_i); + IWL_DEBUG_CALIB(priv, "average_sig: a %d b %d c %d\n", + average_sig[0], average_sig[1], average_sig[2]); + IWL_DEBUG_CALIB(priv, "max_average_sig = %d, antenna %d\n", + max_average_sig, max_average_sig_antenna_i); /* Compare signal strengths for all 3 receivers. */ for (i = 0; i < NUM_RX_CHAINS; i++) { @@ -667,9 +673,9 @@ il4965_find_disconn_antenna(struct il_priv *il, u32 * average_sig, data->disconn_array[i] = 1; else active_chains |= (1 << i); - D_CALIB("i = %d rssiDelta = %d " - "disconn_array[i] = %d\n", i, rssi_delta, - data->disconn_array[i]); + IWL_DEBUG_CALIB(priv, "i = %d rssiDelta = %d " + "disconn_array[i] = %d\n", + i, rssi_delta, data->disconn_array[i]); } } @@ -683,110 +689,119 @@ il4965_find_disconn_antenna(struct il_priv *il, u32 * average_sig, * To be safe, simply mask out any chains that we know * are not on the device. */ - active_chains &= il->hw_params.valid_rx_ant; + active_chains &= priv->hw_params.valid_rx_ant; num_tx_chains = 0; for (i = 0; i < NUM_RX_CHAINS; i++) { /* loops on all the bits of - * il->hw_setting.valid_tx_ant */ + * priv->hw_setting.valid_tx_ant */ u8 ant_msk = (1 << i); - if (!(il->hw_params.valid_tx_ant & ant_msk)) + if (!(priv->hw_params.valid_tx_ant & ant_msk)) continue; num_tx_chains++; if (data->disconn_array[i] == 0) /* there is a Tx antenna connected */ break; - if (num_tx_chains == il->hw_params.tx_chains_num && + if (num_tx_chains == priv->hw_params.tx_chains_num && data->disconn_array[i]) { /* * If all chains are disconnected * connect the first valid tx chain */ first_chain = - il4965_find_first_chain(il->cfg->valid_tx_ant); + iwl4965_find_first_chain(priv->cfg->valid_tx_ant); data->disconn_array[first_chain] = 0; active_chains |= BIT(first_chain); - D_CALIB("All Tx chains are disconnected" - "- declare %d as connected\n", first_chain); + IWL_DEBUG_CALIB(priv, + "All Tx chains are disconnected W/A - declare %d as connected\n", + first_chain); break; } } - if (active_chains != il->hw_params.valid_rx_ant && - active_chains != il->chain_noise_data.active_chains) - D_CALIB("Detected that not all antennas are connected! " - "Connected: %#x, valid: %#x.\n", active_chains, - il->hw_params.valid_rx_ant); + if (active_chains != priv->hw_params.valid_rx_ant && + active_chains != priv->chain_noise_data.active_chains) + IWL_DEBUG_CALIB(priv, + "Detected that not all antennas are connected! " + "Connected: %#x, valid: %#x.\n", + active_chains, priv->hw_params.valid_rx_ant); /* Save for use within RXON, TX, SCAN commands, etc. */ data->active_chains = active_chains; - D_CALIB("active_chains (bitwise) = 0x%x\n", active_chains); + IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n", + active_chains); } -static void -il4965_gain_computation(struct il_priv *il, u32 * average_noise, - u16 min_average_noise_antenna_i, u32 min_average_noise, - u8 default_chain) +static void iwl4965_gain_computation(struct iwl_priv *priv, + u32 *average_noise, + u16 min_average_noise_antenna_i, + u32 min_average_noise, + u8 default_chain) { int i, ret; - struct il_chain_noise_data *data = &il->chain_noise_data; + struct iwl_chain_noise_data *data = &priv->chain_noise_data; data->delta_gain_code[min_average_noise_antenna_i] = 0; for (i = default_chain; i < NUM_RX_CHAINS; i++) { s32 delta_g = 0; - if (!data->disconn_array[i] && - data->delta_gain_code[i] == - CHAIN_NOISE_DELTA_GAIN_INIT_VAL) { + if (!(data->disconn_array[i]) && + (data->delta_gain_code[i] == + CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) { delta_g = average_noise[i] - min_average_noise; - data->delta_gain_code[i] = (u8) ((delta_g * 10) / 15); + data->delta_gain_code[i] = (u8)((delta_g * 10) / 15); data->delta_gain_code[i] = - min(data->delta_gain_code[i], + min(data->delta_gain_code[i], (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE); data->delta_gain_code[i] = - (data->delta_gain_code[i] | (1 << 2)); + (data->delta_gain_code[i] | (1 << 2)); } else { data->delta_gain_code[i] = 0; } } - D_CALIB("delta_gain_codes: a %d b %d c %d\n", data->delta_gain_code[0], - data->delta_gain_code[1], data->delta_gain_code[2]); + IWL_DEBUG_CALIB(priv, "delta_gain_codes: a %d b %d c %d\n", + data->delta_gain_code[0], + data->delta_gain_code[1], + data->delta_gain_code[2]); /* Differential gain gets sent to uCode only once */ if (!data->radio_write) { - struct il_calib_diff_gain_cmd cmd; + struct iwl_calib_diff_gain_cmd cmd; data->radio_write = 1; memset(&cmd, 0, sizeof(cmd)); - cmd.hdr.op_code = IL_PHY_CALIBRATE_DIFF_GAIN_CMD; + cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD; cmd.diff_gain_a = data->delta_gain_code[0]; cmd.diff_gain_b = data->delta_gain_code[1]; cmd.diff_gain_c = data->delta_gain_code[2]; - ret = il_send_cmd_pdu(il, C_PHY_CALIBRATION, sizeof(cmd), &cmd); + ret = iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD, + sizeof(cmd), &cmd); if (ret) - D_CALIB("fail sending cmd " "C_PHY_CALIBRATION\n"); + IWL_DEBUG_CALIB(priv, "fail sending cmd " + "REPLY_PHY_CALIBRATION_CMD\n"); /* TODO we might want recalculate * rx_chain in rxon cmd */ /* Mark so we run this algo only once! */ - data->state = IL_CHAIN_NOISE_CALIBRATED; + data->state = IWL_CHAIN_NOISE_CALIBRATED; } } + + /* - * Accumulate 16 beacons of signal and noise stats for each of + * Accumulate 16 beacons of signal and noise statistics for each of * 3 receivers/antennas/rx-chains, then figure out: * 1) Which antennas are connected. * 2) Differential rx gain settings to balance the 3 receivers. */ -void -il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp) +void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp) { - struct il_chain_noise_data *data = NULL; + struct iwl_chain_noise_data *data = NULL; u32 chain_noise_a; u32 chain_noise_b; @@ -794,8 +809,8 @@ il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp) u32 chain_sig_a; u32 chain_sig_b; u32 chain_sig_c; - u32 average_sig[NUM_RX_CHAINS] = { INITIALIZATION_VALUE }; - u32 average_noise[NUM_RX_CHAINS] = { INITIALIZATION_VALUE }; + u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE}; + u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE}; u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE; u16 min_average_noise_antenna_i = INITIALIZATION_VALUE; u16 i = 0; @@ -804,69 +819,70 @@ il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp) u8 rxon_band24; u8 stat_band24; unsigned long flags; - struct stats_rx_non_phy *rx_info; + struct statistics_rx_non_phy *rx_info; - struct il_rxon_context *ctx = &il->ctx; + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - if (il->disable_chain_noise_cal) + if (priv->disable_chain_noise_cal) return; - data = &(il->chain_noise_data); + data = &(priv->chain_noise_data); /* * Accumulate just the first "chain_noise_num_beacons" after * the first association, then we're done forever. */ - if (data->state != IL_CHAIN_NOISE_ACCUMULATE) { - if (data->state == IL_CHAIN_NOISE_ALIVE) - D_CALIB("Wait for noise calib reset\n"); + if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) { + if (data->state == IWL_CHAIN_NOISE_ALIVE) + IWL_DEBUG_CALIB(priv, "Wait for noise calib reset\n"); return; } - spin_lock_irqsave(&il->lock, flags); + spin_lock_irqsave(&priv->lock, flags); - rx_info = &(((struct il_notif_stats *)stat_resp)->rx.general); + rx_info = &(((struct iwl_notif_statistics *)stat_resp)-> + rx.general); if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) { - D_CALIB(" << Interference data unavailable\n"); - spin_unlock_irqrestore(&il->lock, flags); + IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n"); + spin_unlock_irqrestore(&priv->lock, flags); return; } rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK); rxon_chnum = le16_to_cpu(ctx->staging.channel); - stat_band24 = - !!(((struct il_notif_stats *)stat_resp)-> - flag & STATS_REPLY_FLG_BAND_24G_MSK); - stat_chnum = - le32_to_cpu(((struct il_notif_stats *)stat_resp)->flag) >> 16; + stat_band24 = !!(((struct iwl_notif_statistics *) + stat_resp)->flag & + STATISTICS_REPLY_FLG_BAND_24G_MSK); + stat_chnum = le32_to_cpu(((struct iwl_notif_statistics *) + stat_resp)->flag) >> 16; /* Make sure we accumulate data for just the associated channel * (even if scanning). */ - if (rxon_chnum != stat_chnum || rxon_band24 != stat_band24) { - D_CALIB("Stats not from chan=%d, band24=%d\n", rxon_chnum, - rxon_band24); - spin_unlock_irqrestore(&il->lock, flags); + if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) { + IWL_DEBUG_CALIB(priv, "Stats not from chan=%d, band24=%d\n", + rxon_chnum, rxon_band24); + spin_unlock_irqrestore(&priv->lock, flags); return; } /* - * Accumulate beacon stats values across + * Accumulate beacon statistics values across * "chain_noise_num_beacons" */ - chain_noise_a = - le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER; - chain_noise_b = - le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER; - chain_noise_c = - le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER; + chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) & + IN_BAND_FILTER; + chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) & + IN_BAND_FILTER; + chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) & + IN_BAND_FILTER; chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER; chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER; chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER; - spin_unlock_irqrestore(&il->lock, flags); + spin_unlock_irqrestore(&priv->lock, flags); data->beacon_count++; @@ -878,33 +894,34 @@ il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp) data->chain_signal_b = (chain_sig_b + data->chain_signal_b); data->chain_signal_c = (chain_sig_c + data->chain_signal_c); - D_CALIB("chan=%d, band24=%d, beacon=%d\n", rxon_chnum, rxon_band24, - data->beacon_count); - D_CALIB("chain_sig: a %d b %d c %d\n", chain_sig_a, chain_sig_b, - chain_sig_c); - D_CALIB("chain_noise: a %d b %d c %d\n", chain_noise_a, chain_noise_b, - chain_noise_c); + IWL_DEBUG_CALIB(priv, "chan=%d, band24=%d, beacon=%d\n", + rxon_chnum, rxon_band24, data->beacon_count); + IWL_DEBUG_CALIB(priv, "chain_sig: a %d b %d c %d\n", + chain_sig_a, chain_sig_b, chain_sig_c); + IWL_DEBUG_CALIB(priv, "chain_noise: a %d b %d c %d\n", + chain_noise_a, chain_noise_b, chain_noise_c); /* If this is the "chain_noise_num_beacons", determine: * 1) Disconnected antennas (using signal strengths) * 2) Differential gain (using silence noise) to balance receivers */ - if (data->beacon_count != il->cfg->base_params->chain_noise_num_beacons) + if (data->beacon_count != + priv->cfg->base_params->chain_noise_num_beacons) return; /* Analyze signal for disconnected antenna */ - il4965_find_disconn_antenna(il, average_sig, data); + iwl4965_find_disconn_antenna(priv, average_sig, data); /* Analyze noise for rx balance */ - average_noise[0] = - data->chain_noise_a / il->cfg->base_params->chain_noise_num_beacons; - average_noise[1] = - data->chain_noise_b / il->cfg->base_params->chain_noise_num_beacons; - average_noise[2] = - data->chain_noise_c / il->cfg->base_params->chain_noise_num_beacons; + average_noise[0] = data->chain_noise_a / + priv->cfg->base_params->chain_noise_num_beacons; + average_noise[1] = data->chain_noise_b / + priv->cfg->base_params->chain_noise_num_beacons; + average_noise[2] = data->chain_noise_c / + priv->cfg->base_params->chain_noise_num_beacons; for (i = 0; i < NUM_RX_CHAINS; i++) { - if (!data->disconn_array[i] && - average_noise[i] <= min_average_noise) { + if (!(data->disconn_array[i]) && + (average_noise[i] <= min_average_noise)) { /* This means that chain i is active and has * lower noise values so far: */ min_average_noise = average_noise[i]; @@ -912,37 +929,39 @@ il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp) } } - D_CALIB("average_noise: a %d b %d c %d\n", average_noise[0], - average_noise[1], average_noise[2]); + IWL_DEBUG_CALIB(priv, "average_noise: a %d b %d c %d\n", + average_noise[0], average_noise[1], + average_noise[2]); - D_CALIB("min_average_noise = %d, antenna %d\n", min_average_noise, - min_average_noise_antenna_i); + IWL_DEBUG_CALIB(priv, "min_average_noise = %d, antenna %d\n", + min_average_noise, min_average_noise_antenna_i); - il4965_gain_computation(il, average_noise, min_average_noise_antenna_i, - min_average_noise, - il4965_find_first_chain(il->cfg->valid_rx_ant)); + iwl4965_gain_computation(priv, average_noise, + min_average_noise_antenna_i, min_average_noise, + iwl4965_find_first_chain(priv->cfg->valid_rx_ant)); /* Some power changes may have been made during the calibration. * Update and commit the RXON */ - if (il->cfg->ops->lib->update_chain_flags) - il->cfg->ops->lib->update_chain_flags(il); + if (priv->cfg->ops->lib->update_chain_flags) + priv->cfg->ops->lib->update_chain_flags(priv); - data->state = IL_CHAIN_NOISE_DONE; - il_power_update_mode(il, false); + data->state = IWL_CHAIN_NOISE_DONE; + iwl_legacy_power_update_mode(priv, false); } -void -il4965_reset_run_time_calib(struct il_priv *il) +void iwl4965_reset_run_time_calib(struct iwl_priv *priv) { int i; - memset(&(il->sensitivity_data), 0, sizeof(struct il_sensitivity_data)); - memset(&(il->chain_noise_data), 0, sizeof(struct il_chain_noise_data)); + memset(&(priv->sensitivity_data), 0, + sizeof(struct iwl_sensitivity_data)); + memset(&(priv->chain_noise_data), 0, + sizeof(struct iwl_chain_noise_data)); for (i = 0; i < NUM_RX_CHAINS; i++) - il->chain_noise_data.delta_gain_code[i] = - CHAIN_NOISE_DELTA_GAIN_INIT_VAL; + priv->chain_noise_data.delta_gain_code[i] = + CHAIN_NOISE_DELTA_GAIN_INIT_VAL; - /* Ask for stats now, the uCode will send notification + /* Ask for statistics now, the uCode will send notification * periodically after association */ - il_send_stats_request(il, CMD_ASYNC, true); + iwl_legacy_send_statistics_request(priv, CMD_ASYNC, true); } diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-wifi.h b/trunk/drivers/net/wireless/iwlegacy/iwl-4965-calib.h similarity index 85% rename from trunk/drivers/net/wireless/iwlwifi/iwl-wifi.h rename to trunk/drivers/net/wireless/iwlegacy/iwl-4965-calib.h index 18501101a530..f46c80e6e005 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-wifi.h +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-4965-calib.h @@ -59,16 +59,17 @@ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ +#ifndef __iwl_4965_calib_h__ +#define __iwl_4965_calib_h__ -#ifndef __iwl_wifi_h__ -#define __iwl_wifi_h__ +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-commands.h" -#include "iwl-shared.h" +void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp); +void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp); +void iwl4965_init_sensitivity(struct iwl_priv *priv); +void iwl4965_reset_run_time_calib(struct iwl_priv *priv); +void iwl4965_calib_free_results(struct iwl_priv *priv); -int iwl_send_bt_env(struct iwl_trans *trans, u8 action, u8 type); -void iwl_send_prio_tbl(struct iwl_trans *trans); -int iwl_init_alive_start(struct iwl_trans *trans); -int iwl_run_init_ucode(struct iwl_trans *trans); -int iwl_load_ucode_wait_alive(struct iwl_trans *trans, - enum iwl_ucode_type ucode_type); -#endif /* __iwl_wifi_h__ */ +#endif /* __iwl_4965_calib_h__ */ diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c b/trunk/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c new file mode 100644 index 000000000000..1c93665766e4 --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c @@ -0,0 +1,774 @@ +/****************************************************************************** +* +* GPL LICENSE SUMMARY +* +* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of version 2 of the GNU General Public License as +* published by the Free Software Foundation. +* +* This program is distributed in the hope that it will be useful, but +* WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +* General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software +* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, +* USA +* +* The full GNU General Public License is included in this distribution +* in the file called LICENSE.GPL. +* +* Contact Information: +* Intel Linux Wireless +* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +*****************************************************************************/ +#include "iwl-4965.h" +#include "iwl-4965-debugfs.h" + +static const char *fmt_value = " %-30s %10u\n"; +static const char *fmt_table = " %-30s %10u %10u %10u %10u\n"; +static const char *fmt_header = + "%-32s current cumulative delta max\n"; + +static int iwl4965_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz) +{ + int p = 0; + u32 flag; + + flag = le32_to_cpu(priv->_4965.statistics.flag); + + p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag); + if (flag & UCODE_STATISTICS_CLEAR_MSK) + p += scnprintf(buf + p, bufsz - p, + "\tStatistics have been cleared\n"); + p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n", + (flag & UCODE_STATISTICS_FREQUENCY_MSK) + ? "2.4 GHz" : "5.2 GHz"); + p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n", + (flag & UCODE_STATISTICS_NARROW_BAND_MSK) + ? "enabled" : "disabled"); + + return p; +} + +ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + int pos = 0; + char *buf; + int bufsz = sizeof(struct statistics_rx_phy) * 40 + + sizeof(struct statistics_rx_non_phy) * 40 + + sizeof(struct statistics_rx_ht_phy) * 40 + 400; + ssize_t ret; + struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm; + struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck; + struct statistics_rx_non_phy *general, *accum_general; + struct statistics_rx_non_phy *delta_general, *max_general; + struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht; + + if (!iwl_legacy_is_alive(priv)) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate Buffer\n"); + return -ENOMEM; + } + + /* + * the statistic information display here is based on + * the last statistics notification from uCode + * might not reflect the current uCode activity + */ + ofdm = &priv->_4965.statistics.rx.ofdm; + cck = &priv->_4965.statistics.rx.cck; + general = &priv->_4965.statistics.rx.general; + ht = &priv->_4965.statistics.rx.ofdm_ht; + accum_ofdm = &priv->_4965.accum_statistics.rx.ofdm; + accum_cck = &priv->_4965.accum_statistics.rx.cck; + accum_general = &priv->_4965.accum_statistics.rx.general; + accum_ht = &priv->_4965.accum_statistics.rx.ofdm_ht; + delta_ofdm = &priv->_4965.delta_statistics.rx.ofdm; + delta_cck = &priv->_4965.delta_statistics.rx.cck; + delta_general = &priv->_4965.delta_statistics.rx.general; + delta_ht = &priv->_4965.delta_statistics.rx.ofdm_ht; + max_ofdm = &priv->_4965.max_delta.rx.ofdm; + max_cck = &priv->_4965.max_delta.rx.cck; + max_general = &priv->_4965.max_delta.rx.general; + max_ht = &priv->_4965.max_delta.rx.ofdm_ht; + + pos += iwl4965_statistics_flag(priv, buf, bufsz); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_header, "Statistics_Rx - OFDM:"); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "ina_cnt:", + le32_to_cpu(ofdm->ina_cnt), + accum_ofdm->ina_cnt, + delta_ofdm->ina_cnt, max_ofdm->ina_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "fina_cnt:", + le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt, + delta_ofdm->fina_cnt, max_ofdm->fina_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "plcp_err:", + le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err, + delta_ofdm->plcp_err, max_ofdm->plcp_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "crc32_err:", + le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err, + delta_ofdm->crc32_err, max_ofdm->crc32_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "overrun_err:", + le32_to_cpu(ofdm->overrun_err), + accum_ofdm->overrun_err, delta_ofdm->overrun_err, + max_ofdm->overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "early_overrun_err:", + le32_to_cpu(ofdm->early_overrun_err), + accum_ofdm->early_overrun_err, + delta_ofdm->early_overrun_err, + max_ofdm->early_overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "crc32_good:", + le32_to_cpu(ofdm->crc32_good), + accum_ofdm->crc32_good, delta_ofdm->crc32_good, + max_ofdm->crc32_good); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "false_alarm_cnt:", + le32_to_cpu(ofdm->false_alarm_cnt), + accum_ofdm->false_alarm_cnt, + delta_ofdm->false_alarm_cnt, + max_ofdm->false_alarm_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "fina_sync_err_cnt:", + le32_to_cpu(ofdm->fina_sync_err_cnt), + accum_ofdm->fina_sync_err_cnt, + delta_ofdm->fina_sync_err_cnt, + max_ofdm->fina_sync_err_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sfd_timeout:", + le32_to_cpu(ofdm->sfd_timeout), + accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout, + max_ofdm->sfd_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "fina_timeout:", + le32_to_cpu(ofdm->fina_timeout), + accum_ofdm->fina_timeout, delta_ofdm->fina_timeout, + max_ofdm->fina_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "unresponded_rts:", + le32_to_cpu(ofdm->unresponded_rts), + accum_ofdm->unresponded_rts, + delta_ofdm->unresponded_rts, + max_ofdm->unresponded_rts); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "rxe_frame_lmt_ovrun:", + le32_to_cpu(ofdm->rxe_frame_limit_overrun), + accum_ofdm->rxe_frame_limit_overrun, + delta_ofdm->rxe_frame_limit_overrun, + max_ofdm->rxe_frame_limit_overrun); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sent_ack_cnt:", + le32_to_cpu(ofdm->sent_ack_cnt), + accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt, + max_ofdm->sent_ack_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sent_cts_cnt:", + le32_to_cpu(ofdm->sent_cts_cnt), + accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt, + max_ofdm->sent_cts_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sent_ba_rsp_cnt:", + le32_to_cpu(ofdm->sent_ba_rsp_cnt), + accum_ofdm->sent_ba_rsp_cnt, + delta_ofdm->sent_ba_rsp_cnt, + max_ofdm->sent_ba_rsp_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "dsp_self_kill:", + le32_to_cpu(ofdm->dsp_self_kill), + accum_ofdm->dsp_self_kill, + delta_ofdm->dsp_self_kill, + max_ofdm->dsp_self_kill); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "mh_format_err:", + le32_to_cpu(ofdm->mh_format_err), + accum_ofdm->mh_format_err, + delta_ofdm->mh_format_err, + max_ofdm->mh_format_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "re_acq_main_rssi_sum:", + le32_to_cpu(ofdm->re_acq_main_rssi_sum), + accum_ofdm->re_acq_main_rssi_sum, + delta_ofdm->re_acq_main_rssi_sum, + max_ofdm->re_acq_main_rssi_sum); + + pos += scnprintf(buf + pos, bufsz - pos, + fmt_header, "Statistics_Rx - CCK:"); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "ina_cnt:", + le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt, + delta_cck->ina_cnt, max_cck->ina_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "fina_cnt:", + le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt, + delta_cck->fina_cnt, max_cck->fina_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "plcp_err:", + le32_to_cpu(cck->plcp_err), accum_cck->plcp_err, + delta_cck->plcp_err, max_cck->plcp_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "crc32_err:", + le32_to_cpu(cck->crc32_err), accum_cck->crc32_err, + delta_cck->crc32_err, max_cck->crc32_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "overrun_err:", + le32_to_cpu(cck->overrun_err), + accum_cck->overrun_err, delta_cck->overrun_err, + max_cck->overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "early_overrun_err:", + le32_to_cpu(cck->early_overrun_err), + accum_cck->early_overrun_err, + delta_cck->early_overrun_err, + max_cck->early_overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "crc32_good:", + le32_to_cpu(cck->crc32_good), accum_cck->crc32_good, + delta_cck->crc32_good, max_cck->crc32_good); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "false_alarm_cnt:", + le32_to_cpu(cck->false_alarm_cnt), + accum_cck->false_alarm_cnt, + delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "fina_sync_err_cnt:", + le32_to_cpu(cck->fina_sync_err_cnt), + accum_cck->fina_sync_err_cnt, + delta_cck->fina_sync_err_cnt, + max_cck->fina_sync_err_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sfd_timeout:", + le32_to_cpu(cck->sfd_timeout), + accum_cck->sfd_timeout, delta_cck->sfd_timeout, + max_cck->sfd_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "fina_timeout:", + le32_to_cpu(cck->fina_timeout), + accum_cck->fina_timeout, delta_cck->fina_timeout, + max_cck->fina_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "unresponded_rts:", + le32_to_cpu(cck->unresponded_rts), + accum_cck->unresponded_rts, delta_cck->unresponded_rts, + max_cck->unresponded_rts); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "rxe_frame_lmt_ovrun:", + le32_to_cpu(cck->rxe_frame_limit_overrun), + accum_cck->rxe_frame_limit_overrun, + delta_cck->rxe_frame_limit_overrun, + max_cck->rxe_frame_limit_overrun); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sent_ack_cnt:", + le32_to_cpu(cck->sent_ack_cnt), + accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt, + max_cck->sent_ack_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sent_cts_cnt:", + le32_to_cpu(cck->sent_cts_cnt), + accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt, + max_cck->sent_cts_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sent_ba_rsp_cnt:", + le32_to_cpu(cck->sent_ba_rsp_cnt), + accum_cck->sent_ba_rsp_cnt, + delta_cck->sent_ba_rsp_cnt, + max_cck->sent_ba_rsp_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "dsp_self_kill:", + le32_to_cpu(cck->dsp_self_kill), + accum_cck->dsp_self_kill, delta_cck->dsp_self_kill, + max_cck->dsp_self_kill); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "mh_format_err:", + le32_to_cpu(cck->mh_format_err), + accum_cck->mh_format_err, delta_cck->mh_format_err, + max_cck->mh_format_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "re_acq_main_rssi_sum:", + le32_to_cpu(cck->re_acq_main_rssi_sum), + accum_cck->re_acq_main_rssi_sum, + delta_cck->re_acq_main_rssi_sum, + max_cck->re_acq_main_rssi_sum); + + pos += scnprintf(buf + pos, bufsz - pos, + fmt_header, "Statistics_Rx - GENERAL:"); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "bogus_cts:", + le32_to_cpu(general->bogus_cts), + accum_general->bogus_cts, delta_general->bogus_cts, + max_general->bogus_cts); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "bogus_ack:", + le32_to_cpu(general->bogus_ack), + accum_general->bogus_ack, delta_general->bogus_ack, + max_general->bogus_ack); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "non_bssid_frames:", + le32_to_cpu(general->non_bssid_frames), + accum_general->non_bssid_frames, + delta_general->non_bssid_frames, + max_general->non_bssid_frames); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "filtered_frames:", + le32_to_cpu(general->filtered_frames), + accum_general->filtered_frames, + delta_general->filtered_frames, + max_general->filtered_frames); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "non_channel_beacons:", + le32_to_cpu(general->non_channel_beacons), + accum_general->non_channel_beacons, + delta_general->non_channel_beacons, + max_general->non_channel_beacons); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "channel_beacons:", + le32_to_cpu(general->channel_beacons), + accum_general->channel_beacons, + delta_general->channel_beacons, + max_general->channel_beacons); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "num_missed_bcon:", + le32_to_cpu(general->num_missed_bcon), + accum_general->num_missed_bcon, + delta_general->num_missed_bcon, + max_general->num_missed_bcon); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "adc_rx_saturation_time:", + le32_to_cpu(general->adc_rx_saturation_time), + accum_general->adc_rx_saturation_time, + delta_general->adc_rx_saturation_time, + max_general->adc_rx_saturation_time); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "ina_detect_search_tm:", + le32_to_cpu(general->ina_detection_search_time), + accum_general->ina_detection_search_time, + delta_general->ina_detection_search_time, + max_general->ina_detection_search_time); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_silence_rssi_a:", + le32_to_cpu(general->beacon_silence_rssi_a), + accum_general->beacon_silence_rssi_a, + delta_general->beacon_silence_rssi_a, + max_general->beacon_silence_rssi_a); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_silence_rssi_b:", + le32_to_cpu(general->beacon_silence_rssi_b), + accum_general->beacon_silence_rssi_b, + delta_general->beacon_silence_rssi_b, + max_general->beacon_silence_rssi_b); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_silence_rssi_c:", + le32_to_cpu(general->beacon_silence_rssi_c), + accum_general->beacon_silence_rssi_c, + delta_general->beacon_silence_rssi_c, + max_general->beacon_silence_rssi_c); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "interference_data_flag:", + le32_to_cpu(general->interference_data_flag), + accum_general->interference_data_flag, + delta_general->interference_data_flag, + max_general->interference_data_flag); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "channel_load:", + le32_to_cpu(general->channel_load), + accum_general->channel_load, + delta_general->channel_load, + max_general->channel_load); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "dsp_false_alarms:", + le32_to_cpu(general->dsp_false_alarms), + accum_general->dsp_false_alarms, + delta_general->dsp_false_alarms, + max_general->dsp_false_alarms); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_rssi_a:", + le32_to_cpu(general->beacon_rssi_a), + accum_general->beacon_rssi_a, + delta_general->beacon_rssi_a, + max_general->beacon_rssi_a); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_rssi_b:", + le32_to_cpu(general->beacon_rssi_b), + accum_general->beacon_rssi_b, + delta_general->beacon_rssi_b, + max_general->beacon_rssi_b); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_rssi_c:", + le32_to_cpu(general->beacon_rssi_c), + accum_general->beacon_rssi_c, + delta_general->beacon_rssi_c, + max_general->beacon_rssi_c); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_energy_a:", + le32_to_cpu(general->beacon_energy_a), + accum_general->beacon_energy_a, + delta_general->beacon_energy_a, + max_general->beacon_energy_a); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_energy_b:", + le32_to_cpu(general->beacon_energy_b), + accum_general->beacon_energy_b, + delta_general->beacon_energy_b, + max_general->beacon_energy_b); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_energy_c:", + le32_to_cpu(general->beacon_energy_c), + accum_general->beacon_energy_c, + delta_general->beacon_energy_c, + max_general->beacon_energy_c); + + pos += scnprintf(buf + pos, bufsz - pos, + fmt_header, "Statistics_Rx - OFDM_HT:"); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "plcp_err:", + le32_to_cpu(ht->plcp_err), accum_ht->plcp_err, + delta_ht->plcp_err, max_ht->plcp_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "overrun_err:", + le32_to_cpu(ht->overrun_err), accum_ht->overrun_err, + delta_ht->overrun_err, max_ht->overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "early_overrun_err:", + le32_to_cpu(ht->early_overrun_err), + accum_ht->early_overrun_err, + delta_ht->early_overrun_err, + max_ht->early_overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "crc32_good:", + le32_to_cpu(ht->crc32_good), accum_ht->crc32_good, + delta_ht->crc32_good, max_ht->crc32_good); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "crc32_err:", + le32_to_cpu(ht->crc32_err), accum_ht->crc32_err, + delta_ht->crc32_err, max_ht->crc32_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "mh_format_err:", + le32_to_cpu(ht->mh_format_err), + accum_ht->mh_format_err, + delta_ht->mh_format_err, max_ht->mh_format_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg_crc32_good:", + le32_to_cpu(ht->agg_crc32_good), + accum_ht->agg_crc32_good, + delta_ht->agg_crc32_good, max_ht->agg_crc32_good); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg_mpdu_cnt:", + le32_to_cpu(ht->agg_mpdu_cnt), + accum_ht->agg_mpdu_cnt, + delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg_cnt:", + le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt, + delta_ht->agg_cnt, max_ht->agg_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "unsupport_mcs:", + le32_to_cpu(ht->unsupport_mcs), + accum_ht->unsupport_mcs, + delta_ht->unsupport_mcs, max_ht->unsupport_mcs); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +ssize_t iwl4965_ucode_tx_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + int pos = 0; + char *buf; + int bufsz = (sizeof(struct statistics_tx) * 48) + 250; + ssize_t ret; + struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx; + + if (!iwl_legacy_is_alive(priv)) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate Buffer\n"); + return -ENOMEM; + } + + /* the statistic information display here is based on + * the last statistics notification from uCode + * might not reflect the current uCode activity + */ + tx = &priv->_4965.statistics.tx; + accum_tx = &priv->_4965.accum_statistics.tx; + delta_tx = &priv->_4965.delta_statistics.tx; + max_tx = &priv->_4965.max_delta.tx; + + pos += iwl4965_statistics_flag(priv, buf, bufsz); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_header, "Statistics_Tx:"); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "preamble:", + le32_to_cpu(tx->preamble_cnt), + accum_tx->preamble_cnt, + delta_tx->preamble_cnt, max_tx->preamble_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "rx_detected_cnt:", + le32_to_cpu(tx->rx_detected_cnt), + accum_tx->rx_detected_cnt, + delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "bt_prio_defer_cnt:", + le32_to_cpu(tx->bt_prio_defer_cnt), + accum_tx->bt_prio_defer_cnt, + delta_tx->bt_prio_defer_cnt, + max_tx->bt_prio_defer_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "bt_prio_kill_cnt:", + le32_to_cpu(tx->bt_prio_kill_cnt), + accum_tx->bt_prio_kill_cnt, + delta_tx->bt_prio_kill_cnt, + max_tx->bt_prio_kill_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "few_bytes_cnt:", + le32_to_cpu(tx->few_bytes_cnt), + accum_tx->few_bytes_cnt, + delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "cts_timeout:", + le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout, + delta_tx->cts_timeout, max_tx->cts_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "ack_timeout:", + le32_to_cpu(tx->ack_timeout), + accum_tx->ack_timeout, + delta_tx->ack_timeout, max_tx->ack_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "expected_ack_cnt:", + le32_to_cpu(tx->expected_ack_cnt), + accum_tx->expected_ack_cnt, + delta_tx->expected_ack_cnt, + max_tx->expected_ack_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "actual_ack_cnt:", + le32_to_cpu(tx->actual_ack_cnt), + accum_tx->actual_ack_cnt, + delta_tx->actual_ack_cnt, + max_tx->actual_ack_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "dump_msdu_cnt:", + le32_to_cpu(tx->dump_msdu_cnt), + accum_tx->dump_msdu_cnt, + delta_tx->dump_msdu_cnt, + max_tx->dump_msdu_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "abort_nxt_frame_mismatch:", + le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt), + accum_tx->burst_abort_next_frame_mismatch_cnt, + delta_tx->burst_abort_next_frame_mismatch_cnt, + max_tx->burst_abort_next_frame_mismatch_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "abort_missing_nxt_frame:", + le32_to_cpu(tx->burst_abort_missing_next_frame_cnt), + accum_tx->burst_abort_missing_next_frame_cnt, + delta_tx->burst_abort_missing_next_frame_cnt, + max_tx->burst_abort_missing_next_frame_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "cts_timeout_collision:", + le32_to_cpu(tx->cts_timeout_collision), + accum_tx->cts_timeout_collision, + delta_tx->cts_timeout_collision, + max_tx->cts_timeout_collision); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "ack_ba_timeout_collision:", + le32_to_cpu(tx->ack_or_ba_timeout_collision), + accum_tx->ack_or_ba_timeout_collision, + delta_tx->ack_or_ba_timeout_collision, + max_tx->ack_or_ba_timeout_collision); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg ba_timeout:", + le32_to_cpu(tx->agg.ba_timeout), + accum_tx->agg.ba_timeout, + delta_tx->agg.ba_timeout, + max_tx->agg.ba_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg ba_resched_frames:", + le32_to_cpu(tx->agg.ba_reschedule_frames), + accum_tx->agg.ba_reschedule_frames, + delta_tx->agg.ba_reschedule_frames, + max_tx->agg.ba_reschedule_frames); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg scd_query_agg_frame:", + le32_to_cpu(tx->agg.scd_query_agg_frame_cnt), + accum_tx->agg.scd_query_agg_frame_cnt, + delta_tx->agg.scd_query_agg_frame_cnt, + max_tx->agg.scd_query_agg_frame_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg scd_query_no_agg:", + le32_to_cpu(tx->agg.scd_query_no_agg), + accum_tx->agg.scd_query_no_agg, + delta_tx->agg.scd_query_no_agg, + max_tx->agg.scd_query_no_agg); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg scd_query_agg:", + le32_to_cpu(tx->agg.scd_query_agg), + accum_tx->agg.scd_query_agg, + delta_tx->agg.scd_query_agg, + max_tx->agg.scd_query_agg); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg scd_query_mismatch:", + le32_to_cpu(tx->agg.scd_query_mismatch), + accum_tx->agg.scd_query_mismatch, + delta_tx->agg.scd_query_mismatch, + max_tx->agg.scd_query_mismatch); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg frame_not_ready:", + le32_to_cpu(tx->agg.frame_not_ready), + accum_tx->agg.frame_not_ready, + delta_tx->agg.frame_not_ready, + max_tx->agg.frame_not_ready); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg underrun:", + le32_to_cpu(tx->agg.underrun), + accum_tx->agg.underrun, + delta_tx->agg.underrun, max_tx->agg.underrun); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg bt_prio_kill:", + le32_to_cpu(tx->agg.bt_prio_kill), + accum_tx->agg.bt_prio_kill, + delta_tx->agg.bt_prio_kill, + max_tx->agg.bt_prio_kill); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg rx_ba_rsp_cnt:", + le32_to_cpu(tx->agg.rx_ba_rsp_cnt), + accum_tx->agg.rx_ba_rsp_cnt, + delta_tx->agg.rx_ba_rsp_cnt, + max_tx->agg.rx_ba_rsp_cnt); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +ssize_t +iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + int pos = 0; + char *buf; + int bufsz = sizeof(struct statistics_general) * 10 + 300; + ssize_t ret; + struct statistics_general_common *general, *accum_general; + struct statistics_general_common *delta_general, *max_general; + struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg; + struct statistics_div *div, *accum_div, *delta_div, *max_div; + + if (!iwl_legacy_is_alive(priv)) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate Buffer\n"); + return -ENOMEM; + } + + /* the statistic information display here is based on + * the last statistics notification from uCode + * might not reflect the current uCode activity + */ + general = &priv->_4965.statistics.general.common; + dbg = &priv->_4965.statistics.general.common.dbg; + div = &priv->_4965.statistics.general.common.div; + accum_general = &priv->_4965.accum_statistics.general.common; + accum_dbg = &priv->_4965.accum_statistics.general.common.dbg; + accum_div = &priv->_4965.accum_statistics.general.common.div; + delta_general = &priv->_4965.delta_statistics.general.common; + max_general = &priv->_4965.max_delta.general.common; + delta_dbg = &priv->_4965.delta_statistics.general.common.dbg; + max_dbg = &priv->_4965.max_delta.general.common.dbg; + delta_div = &priv->_4965.delta_statistics.general.common.div; + max_div = &priv->_4965.max_delta.general.common.div; + + pos += iwl4965_statistics_flag(priv, buf, bufsz); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_header, "Statistics_General:"); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_value, "temperature:", + le32_to_cpu(general->temperature)); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_value, "ttl_timestamp:", + le32_to_cpu(general->ttl_timestamp)); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "burst_check:", + le32_to_cpu(dbg->burst_check), + accum_dbg->burst_check, + delta_dbg->burst_check, max_dbg->burst_check); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "burst_count:", + le32_to_cpu(dbg->burst_count), + accum_dbg->burst_count, + delta_dbg->burst_count, max_dbg->burst_count); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "wait_for_silence_timeout_count:", + le32_to_cpu(dbg->wait_for_silence_timeout_cnt), + accum_dbg->wait_for_silence_timeout_cnt, + delta_dbg->wait_for_silence_timeout_cnt, + max_dbg->wait_for_silence_timeout_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sleep_time:", + le32_to_cpu(general->sleep_time), + accum_general->sleep_time, + delta_general->sleep_time, max_general->sleep_time); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "slots_out:", + le32_to_cpu(general->slots_out), + accum_general->slots_out, + delta_general->slots_out, max_general->slots_out); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "slots_idle:", + le32_to_cpu(general->slots_idle), + accum_general->slots_idle, + delta_general->slots_idle, max_general->slots_idle); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "tx_on_a:", + le32_to_cpu(div->tx_on_a), accum_div->tx_on_a, + delta_div->tx_on_a, max_div->tx_on_a); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "tx_on_b:", + le32_to_cpu(div->tx_on_b), accum_div->tx_on_b, + delta_div->tx_on_b, max_div->tx_on_b); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "exec_time:", + le32_to_cpu(div->exec_time), accum_div->exec_time, + delta_div->exec_time, max_div->exec_time); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "probe_time:", + le32_to_cpu(div->probe_time), accum_div->probe_time, + delta_div->probe_time, max_div->probe_time); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "rx_enable_counter:", + le32_to_cpu(general->rx_enable_counter), + accum_general->rx_enable_counter, + delta_general->rx_enable_counter, + max_general->rx_enable_counter); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "num_of_sos_states:", + le32_to_cpu(general->num_of_sos_states), + accum_general->num_of_sos_states, + delta_general->num_of_sos_states, + max_general->num_of_sos_states); + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h b/trunk/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h new file mode 100644 index 000000000000..6c8e35361a9e --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h @@ -0,0 +1,59 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ + +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-debug.h" + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS +ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos); +ssize_t iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos); +ssize_t iwl4965_ucode_general_stats_read(struct file *file, + char __user *user_buf, size_t count, loff_t *ppos); +#else +static ssize_t +iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + return 0; +} +static ssize_t +iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + return 0; +} +static ssize_t +iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + return 0; +} +#endif diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c b/trunk/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c new file mode 100644 index 000000000000..cb9baab1ff7d --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c @@ -0,0 +1,154 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + + +#include +#include +#include +#include + +#include + +#include "iwl-commands.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-debug.h" +#include "iwl-4965.h" +#include "iwl-io.h" + +/****************************************************************************** + * + * EEPROM related functions + * +******************************************************************************/ + +/* + * The device's EEPROM semaphore prevents conflicts between driver and uCode + * when accessing the EEPROM; each access is a series of pulses to/from the + * EEPROM chip, not a single event, so even reads could conflict if they + * weren't arbitrated by the semaphore. + */ +int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv) +{ + u16 count; + int ret; + + for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) { + /* Request semaphore */ + iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); + + /* See if we got it */ + ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, + CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, + EEPROM_SEM_TIMEOUT); + if (ret >= 0) { + IWL_DEBUG_IO(priv, + "Acquired semaphore after %d tries.\n", + count+1); + return ret; + } + } + + return ret; +} + +void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv) +{ + iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); + +} + +int iwl4965_eeprom_check_version(struct iwl_priv *priv) +{ + u16 eeprom_ver; + u16 calib_ver; + + eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION); + calib_ver = iwl_legacy_eeprom_query16(priv, + EEPROM_4965_CALIB_VERSION_OFFSET); + + if (eeprom_ver < priv->cfg->eeprom_ver || + calib_ver < priv->cfg->eeprom_calib_ver) + goto err; + + IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n", + eeprom_ver, calib_ver); + + return 0; +err: + IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x " + "CALIB=0x%x < 0x%x\n", + eeprom_ver, priv->cfg->eeprom_ver, + calib_ver, priv->cfg->eeprom_calib_ver); + return -EINVAL; + +} + +void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac) +{ + const u8 *addr = iwl_legacy_eeprom_query_addr(priv, + EEPROM_MAC_ADDRESS); + memcpy(mac, addr, ETH_ALEN); +} diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-4965-hw.h b/trunk/drivers/net/wireless/iwlegacy/iwl-4965-hw.h new file mode 100644 index 000000000000..fc6fa2886d9c --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-4965-hw.h @@ -0,0 +1,811 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +/* + * Please use this file (iwl-4965-hw.h) only for hardware-related definitions. + * Use iwl-commands.h for uCode API definitions. + * Use iwl-dev.h for driver implementation definitions. + */ + +#ifndef __iwl_4965_hw_h__ +#define __iwl_4965_hw_h__ + +#include "iwl-fh.h" + +/* EEPROM */ +#define IWL4965_EEPROM_IMG_SIZE 1024 + +/* + * uCode queue management definitions ... + * The first queue used for block-ack aggregation is #7 (4965 only). + * All block-ack aggregation queues should map to Tx DMA/FIFO channel 7. + */ +#define IWL49_FIRST_AMPDU_QUEUE 7 + +/* Sizes and addresses for instruction and data memory (SRAM) in + * 4965's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */ +#define IWL49_RTC_INST_LOWER_BOUND (0x000000) +#define IWL49_RTC_INST_UPPER_BOUND (0x018000) + +#define IWL49_RTC_DATA_LOWER_BOUND (0x800000) +#define IWL49_RTC_DATA_UPPER_BOUND (0x80A000) + +#define IWL49_RTC_INST_SIZE (IWL49_RTC_INST_UPPER_BOUND - \ + IWL49_RTC_INST_LOWER_BOUND) +#define IWL49_RTC_DATA_SIZE (IWL49_RTC_DATA_UPPER_BOUND - \ + IWL49_RTC_DATA_LOWER_BOUND) + +#define IWL49_MAX_INST_SIZE IWL49_RTC_INST_SIZE +#define IWL49_MAX_DATA_SIZE IWL49_RTC_DATA_SIZE + +/* Size of uCode instruction memory in bootstrap state machine */ +#define IWL49_MAX_BSM_SIZE BSM_SRAM_SIZE + +static inline int iwl4965_hw_valid_rtc_data_addr(u32 addr) +{ + return (addr >= IWL49_RTC_DATA_LOWER_BOUND) && + (addr < IWL49_RTC_DATA_UPPER_BOUND); +} + +/********************* START TEMPERATURE *************************************/ + +/** + * 4965 temperature calculation. + * + * The driver must calculate the device temperature before calculating + * a txpower setting (amplifier gain is temperature dependent). The + * calculation uses 4 measurements, 3 of which (R1, R2, R3) are calibration + * values used for the life of the driver, and one of which (R4) is the + * real-time temperature indicator. + * + * uCode provides all 4 values to the driver via the "initialize alive" + * notification (see struct iwl4965_init_alive_resp). After the runtime uCode + * image loads, uCode updates the R4 value via statistics notifications + * (see STATISTICS_NOTIFICATION), which occur after each received beacon + * when associated, or can be requested via REPLY_STATISTICS_CMD. + * + * NOTE: uCode provides the R4 value as a 23-bit signed value. Driver + * must sign-extend to 32 bits before applying formula below. + * + * Formula: + * + * degrees Kelvin = ((97 * 259 * (R4 - R2) / (R3 - R1)) / 100) + 8 + * + * NOTE: The basic formula is 259 * (R4-R2) / (R3-R1). The 97/100 is + * an additional correction, which should be centered around 0 degrees + * Celsius (273 degrees Kelvin). The 8 (3 percent of 273) compensates for + * centering the 97/100 correction around 0 degrees K. + * + * Add 273 to Kelvin value to find degrees Celsius, for comparing current + * temperature with factory-measured temperatures when calculating txpower + * settings. + */ +#define TEMPERATURE_CALIB_KELVIN_OFFSET 8 +#define TEMPERATURE_CALIB_A_VAL 259 + +/* Limit range of calculated temperature to be between these Kelvin values */ +#define IWL_TX_POWER_TEMPERATURE_MIN (263) +#define IWL_TX_POWER_TEMPERATURE_MAX (410) + +#define IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(t) \ + (((t) < IWL_TX_POWER_TEMPERATURE_MIN) || \ + ((t) > IWL_TX_POWER_TEMPERATURE_MAX)) + +/********************* END TEMPERATURE ***************************************/ + +/********************* START TXPOWER *****************************************/ + +/** + * 4965 txpower calculations rely on information from three sources: + * + * 1) EEPROM + * 2) "initialize" alive notification + * 3) statistics notifications + * + * EEPROM data consists of: + * + * 1) Regulatory information (max txpower and channel usage flags) is provided + * separately for each channel that can possibly supported by 4965. + * 40 MHz wide (.11n HT40) channels are listed separately from 20 MHz + * (legacy) channels. + * + * See struct iwl4965_eeprom_channel for format, and struct iwl4965_eeprom + * for locations in EEPROM. + * + * 2) Factory txpower calibration information is provided separately for + * sub-bands of contiguous channels. 2.4GHz has just one sub-band, + * but 5 GHz has several sub-bands. + * + * In addition, per-band (2.4 and 5 Ghz) saturation txpowers are provided. + * + * See struct iwl4965_eeprom_calib_info (and the tree of structures + * contained within it) for format, and struct iwl4965_eeprom for + * locations in EEPROM. + * + * "Initialization alive" notification (see struct iwl4965_init_alive_resp) + * consists of: + * + * 1) Temperature calculation parameters. + * + * 2) Power supply voltage measurement. + * + * 3) Tx gain compensation to balance 2 transmitters for MIMO use. + * + * Statistics notifications deliver: + * + * 1) Current values for temperature param R4. + */ + +/** + * To calculate a txpower setting for a given desired target txpower, channel, + * modulation bit rate, and transmitter chain (4965 has 2 transmitters to + * support MIMO and transmit diversity), driver must do the following: + * + * 1) Compare desired txpower vs. (EEPROM) regulatory limit for this channel. + * Do not exceed regulatory limit; reduce target txpower if necessary. + * + * If setting up txpowers for MIMO rates (rate indexes 8-15, 24-31), + * 2 transmitters will be used simultaneously; driver must reduce the + * regulatory limit by 3 dB (half-power) for each transmitter, so the + * combined total output of the 2 transmitters is within regulatory limits. + * + * + * 2) Compare target txpower vs. (EEPROM) saturation txpower *reduced by + * backoff for this bit rate*. Do not exceed (saturation - backoff[rate]); + * reduce target txpower if necessary. + * + * Backoff values below are in 1/2 dB units (equivalent to steps in + * txpower gain tables): + * + * OFDM 6 - 36 MBit: 10 steps (5 dB) + * OFDM 48 MBit: 15 steps (7.5 dB) + * OFDM 54 MBit: 17 steps (8.5 dB) + * OFDM 60 MBit: 20 steps (10 dB) + * CCK all rates: 10 steps (5 dB) + * + * Backoff values apply to saturation txpower on a per-transmitter basis; + * when using MIMO (2 transmitters), each transmitter uses the same + * saturation level provided in EEPROM, and the same backoff values; + * no reduction (such as with regulatory txpower limits) is required. + * + * Saturation and Backoff values apply equally to 20 Mhz (legacy) channel + * widths and 40 Mhz (.11n HT40) channel widths; there is no separate + * factory measurement for ht40 channels. + * + * The result of this step is the final target txpower. The rest of + * the steps figure out the proper settings for the device to achieve + * that target txpower. + * + * + * 3) Determine (EEPROM) calibration sub band for the target channel, by + * comparing against first and last channels in each sub band + * (see struct iwl4965_eeprom_calib_subband_info). + * + * + * 4) Linearly interpolate (EEPROM) factory calibration measurement sets, + * referencing the 2 factory-measured (sample) channels within the sub band. + * + * Interpolation is based on difference between target channel's frequency + * and the sample channels' frequencies. Since channel numbers are based + * on frequency (5 MHz between each channel number), this is equivalent + * to interpolating based on channel number differences. + * + * Note that the sample channels may or may not be the channels at the + * edges of the sub band. The target channel may be "outside" of the + * span of the sampled channels. + * + * Driver may choose the pair (for 2 Tx chains) of measurements (see + * struct iwl4965_eeprom_calib_ch_info) for which the actual measured + * txpower comes closest to the desired txpower. Usually, though, + * the middle set of measurements is closest to the regulatory limits, + * and is therefore a good choice for all txpower calculations (this + * assumes that high accuracy is needed for maximizing legal txpower, + * while lower txpower configurations do not need as much accuracy). + * + * Driver should interpolate both members of the chosen measurement pair, + * i.e. for both Tx chains (radio transmitters), unless the driver knows + * that only one of the chains will be used (e.g. only one tx antenna + * connected, but this should be unusual). The rate scaling algorithm + * switches antennas to find best performance, so both Tx chains will + * be used (although only one at a time) even for non-MIMO transmissions. + * + * Driver should interpolate factory values for temperature, gain table + * index, and actual power. The power amplifier detector values are + * not used by the driver. + * + * Sanity check: If the target channel happens to be one of the sample + * channels, the results should agree with the sample channel's + * measurements! + * + * + * 5) Find difference between desired txpower and (interpolated) + * factory-measured txpower. Using (interpolated) factory gain table index + * (shown elsewhere) as a starting point, adjust this index lower to + * increase txpower, or higher to decrease txpower, until the target + * txpower is reached. Each step in the gain table is 1/2 dB. + * + * For example, if factory measured txpower is 16 dBm, and target txpower + * is 13 dBm, add 6 steps to the factory gain index to reduce txpower + * by 3 dB. + * + * + * 6) Find difference between current device temperature and (interpolated) + * factory-measured temperature for sub-band. Factory values are in + * degrees Celsius. To calculate current temperature, see comments for + * "4965 temperature calculation". + * + * If current temperature is higher than factory temperature, driver must + * increase gain (lower gain table index), and vice verse. + * + * Temperature affects gain differently for different channels: + * + * 2.4 GHz all channels: 3.5 degrees per half-dB step + * 5 GHz channels 34-43: 4.5 degrees per half-dB step + * 5 GHz channels >= 44: 4.0 degrees per half-dB step + * + * NOTE: Temperature can increase rapidly when transmitting, especially + * with heavy traffic at high txpowers. Driver should update + * temperature calculations often under these conditions to + * maintain strong txpower in the face of rising temperature. + * + * + * 7) Find difference between current power supply voltage indicator + * (from "initialize alive") and factory-measured power supply voltage + * indicator (EEPROM). + * + * If the current voltage is higher (indicator is lower) than factory + * voltage, gain should be reduced (gain table index increased) by: + * + * (eeprom - current) / 7 + * + * If the current voltage is lower (indicator is higher) than factory + * voltage, gain should be increased (gain table index decreased) by: + * + * 2 * (current - eeprom) / 7 + * + * If number of index steps in either direction turns out to be > 2, + * something is wrong ... just use 0. + * + * NOTE: Voltage compensation is independent of band/channel. + * + * NOTE: "Initialize" uCode measures current voltage, which is assumed + * to be constant after this initial measurement. Voltage + * compensation for txpower (number of steps in gain table) + * may be calculated once and used until the next uCode bootload. + * + * + * 8) If setting up txpowers for MIMO rates (rate indexes 8-15, 24-31), + * adjust txpower for each transmitter chain, so txpower is balanced + * between the two chains. There are 5 pairs of tx_atten[group][chain] + * values in "initialize alive", one pair for each of 5 channel ranges: + * + * Group 0: 5 GHz channel 34-43 + * Group 1: 5 GHz channel 44-70 + * Group 2: 5 GHz channel 71-124 + * Group 3: 5 GHz channel 125-200 + * Group 4: 2.4 GHz all channels + * + * Add the tx_atten[group][chain] value to the index for the target chain. + * The values are signed, but are in pairs of 0 and a non-negative number, + * so as to reduce gain (if necessary) of the "hotter" channel. This + * avoids any need to double-check for regulatory compliance after + * this step. + * + * + * 9) If setting up for a CCK rate, lower the gain by adding a CCK compensation + * value to the index: + * + * Hardware rev B: 9 steps (4.5 dB) + * Hardware rev C: 5 steps (2.5 dB) + * + * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG, + * bits [3:2], 1 = B, 2 = C. + * + * NOTE: This compensation is in addition to any saturation backoff that + * might have been applied in an earlier step. + * + * + * 10) Select the gain table, based on band (2.4 vs 5 GHz). + * + * Limit the adjusted index to stay within the table! + * + * + * 11) Read gain table entries for DSP and radio gain, place into appropriate + * location(s) in command (struct iwl4965_txpowertable_cmd). + */ + +/** + * When MIMO is used (2 transmitters operating simultaneously), driver should + * limit each transmitter to deliver a max of 3 dB below the regulatory limit + * for the device. That is, use half power for each transmitter, so total + * txpower is within regulatory limits. + * + * The value "6" represents number of steps in gain table to reduce power 3 dB. + * Each step is 1/2 dB. + */ +#define IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION (6) + +/** + * CCK gain compensation. + * + * When calculating txpowers for CCK, after making sure that the target power + * is within regulatory and saturation limits, driver must additionally + * back off gain by adding these values to the gain table index. + * + * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG, + * bits [3:2], 1 = B, 2 = C. + */ +#define IWL_TX_POWER_CCK_COMPENSATION_B_STEP (9) +#define IWL_TX_POWER_CCK_COMPENSATION_C_STEP (5) + +/* + * 4965 power supply voltage compensation for txpower + */ +#define TX_POWER_IWL_VOLTAGE_CODES_PER_03V (7) + +/** + * Gain tables. + * + * The following tables contain pair of values for setting txpower, i.e. + * gain settings for the output of the device's digital signal processor (DSP), + * and for the analog gain structure of the transmitter. + * + * Each entry in the gain tables represents a step of 1/2 dB. Note that these + * are *relative* steps, not indications of absolute output power. Output + * power varies with temperature, voltage, and channel frequency, and also + * requires consideration of average power (to satisfy regulatory constraints), + * and peak power (to avoid distortion of the output signal). + * + * Each entry contains two values: + * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained + * linear value that multiplies the output of the digital signal processor, + * before being sent to the analog radio. + * 2) Radio gain. This sets the analog gain of the radio Tx path. + * It is a coarser setting, and behaves in a logarithmic (dB) fashion. + * + * EEPROM contains factory calibration data for txpower. This maps actual + * measured txpower levels to gain settings in the "well known" tables + * below ("well-known" means here that both factory calibration *and* the + * driver work with the same table). + * + * There are separate tables for 2.4 GHz and 5 GHz bands. The 5 GHz table + * has an extension (into negative indexes), in case the driver needs to + * boost power setting for high device temperatures (higher than would be + * present during factory calibration). A 5 Ghz EEPROM index of "40" + * corresponds to the 49th entry in the table used by the driver. + */ +#define MIN_TX_GAIN_INDEX (0) /* highest gain, lowest idx, 2.4 */ +#define MIN_TX_GAIN_INDEX_52GHZ_EXT (-9) /* highest gain, lowest idx, 5 */ + +/** + * 2.4 GHz gain table + * + * Index Dsp gain Radio gain + * 0 110 0x3f (highest gain) + * 1 104 0x3f + * 2 98 0x3f + * 3 110 0x3e + * 4 104 0x3e + * 5 98 0x3e + * 6 110 0x3d + * 7 104 0x3d + * 8 98 0x3d + * 9 110 0x3c + * 10 104 0x3c + * 11 98 0x3c + * 12 110 0x3b + * 13 104 0x3b + * 14 98 0x3b + * 15 110 0x3a + * 16 104 0x3a + * 17 98 0x3a + * 18 110 0x39 + * 19 104 0x39 + * 20 98 0x39 + * 21 110 0x38 + * 22 104 0x38 + * 23 98 0x38 + * 24 110 0x37 + * 25 104 0x37 + * 26 98 0x37 + * 27 110 0x36 + * 28 104 0x36 + * 29 98 0x36 + * 30 110 0x35 + * 31 104 0x35 + * 32 98 0x35 + * 33 110 0x34 + * 34 104 0x34 + * 35 98 0x34 + * 36 110 0x33 + * 37 104 0x33 + * 38 98 0x33 + * 39 110 0x32 + * 40 104 0x32 + * 41 98 0x32 + * 42 110 0x31 + * 43 104 0x31 + * 44 98 0x31 + * 45 110 0x30 + * 46 104 0x30 + * 47 98 0x30 + * 48 110 0x6 + * 49 104 0x6 + * 50 98 0x6 + * 51 110 0x5 + * 52 104 0x5 + * 53 98 0x5 + * 54 110 0x4 + * 55 104 0x4 + * 56 98 0x4 + * 57 110 0x3 + * 58 104 0x3 + * 59 98 0x3 + * 60 110 0x2 + * 61 104 0x2 + * 62 98 0x2 + * 63 110 0x1 + * 64 104 0x1 + * 65 98 0x1 + * 66 110 0x0 + * 67 104 0x0 + * 68 98 0x0 + * 69 97 0 + * 70 96 0 + * 71 95 0 + * 72 94 0 + * 73 93 0 + * 74 92 0 + * 75 91 0 + * 76 90 0 + * 77 89 0 + * 78 88 0 + * 79 87 0 + * 80 86 0 + * 81 85 0 + * 82 84 0 + * 83 83 0 + * 84 82 0 + * 85 81 0 + * 86 80 0 + * 87 79 0 + * 88 78 0 + * 89 77 0 + * 90 76 0 + * 91 75 0 + * 92 74 0 + * 93 73 0 + * 94 72 0 + * 95 71 0 + * 96 70 0 + * 97 69 0 + * 98 68 0 + */ + +/** + * 5 GHz gain table + * + * Index Dsp gain Radio gain + * -9 123 0x3F (highest gain) + * -8 117 0x3F + * -7 110 0x3F + * -6 104 0x3F + * -5 98 0x3F + * -4 110 0x3E + * -3 104 0x3E + * -2 98 0x3E + * -1 110 0x3D + * 0 104 0x3D + * 1 98 0x3D + * 2 110 0x3C + * 3 104 0x3C + * 4 98 0x3C + * 5 110 0x3B + * 6 104 0x3B + * 7 98 0x3B + * 8 110 0x3A + * 9 104 0x3A + * 10 98 0x3A + * 11 110 0x39 + * 12 104 0x39 + * 13 98 0x39 + * 14 110 0x38 + * 15 104 0x38 + * 16 98 0x38 + * 17 110 0x37 + * 18 104 0x37 + * 19 98 0x37 + * 20 110 0x36 + * 21 104 0x36 + * 22 98 0x36 + * 23 110 0x35 + * 24 104 0x35 + * 25 98 0x35 + * 26 110 0x34 + * 27 104 0x34 + * 28 98 0x34 + * 29 110 0x33 + * 30 104 0x33 + * 31 98 0x33 + * 32 110 0x32 + * 33 104 0x32 + * 34 98 0x32 + * 35 110 0x31 + * 36 104 0x31 + * 37 98 0x31 + * 38 110 0x30 + * 39 104 0x30 + * 40 98 0x30 + * 41 110 0x25 + * 42 104 0x25 + * 43 98 0x25 + * 44 110 0x24 + * 45 104 0x24 + * 46 98 0x24 + * 47 110 0x23 + * 48 104 0x23 + * 49 98 0x23 + * 50 110 0x22 + * 51 104 0x18 + * 52 98 0x18 + * 53 110 0x17 + * 54 104 0x17 + * 55 98 0x17 + * 56 110 0x16 + * 57 104 0x16 + * 58 98 0x16 + * 59 110 0x15 + * 60 104 0x15 + * 61 98 0x15 + * 62 110 0x14 + * 63 104 0x14 + * 64 98 0x14 + * 65 110 0x13 + * 66 104 0x13 + * 67 98 0x13 + * 68 110 0x12 + * 69 104 0x08 + * 70 98 0x08 + * 71 110 0x07 + * 72 104 0x07 + * 73 98 0x07 + * 74 110 0x06 + * 75 104 0x06 + * 76 98 0x06 + * 77 110 0x05 + * 78 104 0x05 + * 79 98 0x05 + * 80 110 0x04 + * 81 104 0x04 + * 82 98 0x04 + * 83 110 0x03 + * 84 104 0x03 + * 85 98 0x03 + * 86 110 0x02 + * 87 104 0x02 + * 88 98 0x02 + * 89 110 0x01 + * 90 104 0x01 + * 91 98 0x01 + * 92 110 0x00 + * 93 104 0x00 + * 94 98 0x00 + * 95 93 0x00 + * 96 88 0x00 + * 97 83 0x00 + * 98 78 0x00 + */ + + +/** + * Sanity checks and default values for EEPROM regulatory levels. + * If EEPROM values fall outside MIN/MAX range, use default values. + * + * Regulatory limits refer to the maximum average txpower allowed by + * regulatory agencies in the geographies in which the device is meant + * to be operated. These limits are SKU-specific (i.e. geography-specific), + * and channel-specific; each channel has an individual regulatory limit + * listed in the EEPROM. + * + * Units are in half-dBm (i.e. "34" means 17 dBm). + */ +#define IWL_TX_POWER_DEFAULT_REGULATORY_24 (34) +#define IWL_TX_POWER_DEFAULT_REGULATORY_52 (34) +#define IWL_TX_POWER_REGULATORY_MIN (0) +#define IWL_TX_POWER_REGULATORY_MAX (34) + +/** + * Sanity checks and default values for EEPROM saturation levels. + * If EEPROM values fall outside MIN/MAX range, use default values. + * + * Saturation is the highest level that the output power amplifier can produce + * without significant clipping distortion. This is a "peak" power level. + * Different types of modulation (i.e. various "rates", and OFDM vs. CCK) + * require differing amounts of backoff, relative to their average power output, + * in order to avoid clipping distortion. + * + * Driver must make sure that it is violating neither the saturation limit, + * nor the regulatory limit, when calculating Tx power settings for various + * rates. + * + * Units are in half-dBm (i.e. "38" means 19 dBm). + */ +#define IWL_TX_POWER_DEFAULT_SATURATION_24 (38) +#define IWL_TX_POWER_DEFAULT_SATURATION_52 (38) +#define IWL_TX_POWER_SATURATION_MIN (20) +#define IWL_TX_POWER_SATURATION_MAX (50) + +/** + * Channel groups used for Tx Attenuation calibration (MIMO tx channel balance) + * and thermal Txpower calibration. + * + * When calculating txpower, driver must compensate for current device + * temperature; higher temperature requires higher gain. Driver must calculate + * current temperature (see "4965 temperature calculation"), then compare vs. + * factory calibration temperature in EEPROM; if current temperature is higher + * than factory temperature, driver must *increase* gain by proportions shown + * in table below. If current temperature is lower than factory, driver must + * *decrease* gain. + * + * Different frequency ranges require different compensation, as shown below. + */ +/* Group 0, 5.2 GHz ch 34-43: 4.5 degrees per 1/2 dB. */ +#define CALIB_IWL_TX_ATTEN_GR1_FCH 34 +#define CALIB_IWL_TX_ATTEN_GR1_LCH 43 + +/* Group 1, 5.3 GHz ch 44-70: 4.0 degrees per 1/2 dB. */ +#define CALIB_IWL_TX_ATTEN_GR2_FCH 44 +#define CALIB_IWL_TX_ATTEN_GR2_LCH 70 + +/* Group 2, 5.5 GHz ch 71-124: 4.0 degrees per 1/2 dB. */ +#define CALIB_IWL_TX_ATTEN_GR3_FCH 71 +#define CALIB_IWL_TX_ATTEN_GR3_LCH 124 + +/* Group 3, 5.7 GHz ch 125-200: 4.0 degrees per 1/2 dB. */ +#define CALIB_IWL_TX_ATTEN_GR4_FCH 125 +#define CALIB_IWL_TX_ATTEN_GR4_LCH 200 + +/* Group 4, 2.4 GHz all channels: 3.5 degrees per 1/2 dB. */ +#define CALIB_IWL_TX_ATTEN_GR5_FCH 1 +#define CALIB_IWL_TX_ATTEN_GR5_LCH 20 + +enum { + CALIB_CH_GROUP_1 = 0, + CALIB_CH_GROUP_2 = 1, + CALIB_CH_GROUP_3 = 2, + CALIB_CH_GROUP_4 = 3, + CALIB_CH_GROUP_5 = 4, + CALIB_CH_GROUP_MAX +}; + +/********************* END TXPOWER *****************************************/ + + +/** + * Tx/Rx Queues + * + * Most communication between driver and 4965 is via queues of data buffers. + * For example, all commands that the driver issues to device's embedded + * controller (uCode) are via the command queue (one of the Tx queues). All + * uCode command responses/replies/notifications, including Rx frames, are + * conveyed from uCode to driver via the Rx queue. + * + * Most support for these queues, including handshake support, resides in + * structures in host DRAM, shared between the driver and the device. When + * allocating this memory, the driver must make sure that data written by + * the host CPU updates DRAM immediately (and does not get "stuck" in CPU's + * cache memory), so DRAM and cache are consistent, and the device can + * immediately see changes made by the driver. + * + * 4965 supports up to 16 DRAM-based Tx queues, and services these queues via + * up to 7 DMA channels (FIFOs). Each Tx queue is supported by a circular array + * in DRAM containing 256 Transmit Frame Descriptors (TFDs). + */ +#define IWL49_NUM_FIFOS 7 +#define IWL49_CMD_FIFO_NUM 4 +#define IWL49_NUM_QUEUES 16 +#define IWL49_NUM_AMPDU_QUEUES 8 + + +/** + * struct iwl4965_schedq_bc_tbl + * + * Byte Count table + * + * Each Tx queue uses a byte-count table containing 320 entries: + * one 16-bit entry for each of 256 TFDs, plus an additional 64 entries that + * duplicate the first 64 entries (to avoid wrap-around within a Tx window; + * max Tx window is 64 TFDs). + * + * When driver sets up a new TFD, it must also enter the total byte count + * of the frame to be transmitted into the corresponding entry in the byte + * count table for the chosen Tx queue. If the TFD index is 0-63, the driver + * must duplicate the byte count entry in corresponding index 256-319. + * + * padding puts each byte count table on a 1024-byte boundary; + * 4965 assumes tables are separated by 1024 bytes. + */ +struct iwl4965_scd_bc_tbl { + __le16 tfd_offset[TFD_QUEUE_BC_SIZE]; + u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)]; +} __packed; + + +#define IWL4965_RTC_INST_LOWER_BOUND (0x000000) + +/* RSSI to dBm */ +#define IWL4965_RSSI_OFFSET 44 + +/* PCI registers */ +#define PCI_CFG_RETRY_TIMEOUT 0x041 + +/* PCI register values */ +#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01 +#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02 + +#define IWL4965_DEFAULT_TX_RETRY 15 + +/* EEPROM */ +#define IWL4965_FIRST_AMPDU_QUEUE 10 + + +#endif /* !__iwl_4965_hw_h__ */ diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-4965-led.c b/trunk/drivers/net/wireless/iwlegacy/iwl-4965-led.c new file mode 100644 index 000000000000..6862fdcaee62 --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-4965-led.c @@ -0,0 +1,73 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "iwl-commands.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-io.h" +#include "iwl-4965-led.h" + +/* Send led command */ +static int +iwl4965_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd) +{ + struct iwl_host_cmd cmd = { + .id = REPLY_LEDS_CMD, + .len = sizeof(struct iwl_led_cmd), + .data = led_cmd, + .flags = CMD_ASYNC, + .callback = NULL, + }; + u32 reg; + + reg = iwl_read32(priv, CSR_LED_REG); + if (reg != (reg & CSR_LED_BSM_CTRL_MSK)) + iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK); + + return iwl_legacy_send_cmd(priv, &cmd); +} + +/* Set led register off */ +void iwl4965_led_enable(struct iwl_priv *priv) +{ + iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON); +} + +const struct iwl_led_ops iwl4965_led_ops = { + .cmd = iwl4965_send_led_cmd, +}; diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-4965-led.h b/trunk/drivers/net/wireless/iwlegacy/iwl-4965-led.h new file mode 100644 index 000000000000..5ed3615fc338 --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-4965-led.h @@ -0,0 +1,33 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_4965_led_h__ +#define __iwl_4965_led_h__ + +extern const struct iwl_led_ops iwl4965_led_ops; +void iwl4965_led_enable(struct iwl_priv *priv); + +#endif /* __iwl_4965_led_h__ */ diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-4965-lib.c b/trunk/drivers/net/wireless/iwlegacy/iwl-4965-lib.c new file mode 100644 index 000000000000..2be6d9e3b019 --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-4965-lib.c @@ -0,0 +1,1194 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ +#include +#include +#include +#include +#include + +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-io.h" +#include "iwl-helpers.h" +#include "iwl-4965-hw.h" +#include "iwl-4965.h" +#include "iwl-sta.h" + +void iwl4965_check_abort_status(struct iwl_priv *priv, + u8 frame_count, u32 status) +{ + if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) { + IWL_ERR(priv, "Tx flush command to flush out all frames\n"); + if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) + queue_work(priv->workqueue, &priv->tx_flush); + } +} + +/* + * EEPROM + */ +struct iwl_mod_params iwl4965_mod_params = { + .amsdu_size_8K = 1, + .restart_fw = 1, + /* the rest are 0 by default */ +}; + +void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq) +{ + unsigned long flags; + int i; + spin_lock_irqsave(&rxq->lock, flags); + INIT_LIST_HEAD(&rxq->rx_free); + INIT_LIST_HEAD(&rxq->rx_used); + /* Fill the rx_used queue with _all_ of the Rx buffers */ + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { + /* In the reset function, these buffers may have been allocated + * to an SKB, so we need to unmap and free potential storage */ + if (rxq->pool[i].page != NULL) { + pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, + PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + __iwl_legacy_free_pages(priv, rxq->pool[i].page); + rxq->pool[i].page = NULL; + } + list_add_tail(&rxq->pool[i].list, &rxq->rx_used); + } + + for (i = 0; i < RX_QUEUE_SIZE; i++) + rxq->queue[i] = NULL; + + /* Set us so that we have processed and used all buffers, but have + * not restocked the Rx queue with fresh buffers */ + rxq->read = rxq->write = 0; + rxq->write_actual = 0; + rxq->free_count = 0; + spin_unlock_irqrestore(&rxq->lock, flags); +} + +int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq) +{ + u32 rb_size; + const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ + u32 rb_timeout = 0; + + if (priv->cfg->mod_params->amsdu_size_8K) + rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; + else + rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; + + /* Stop Rx DMA */ + iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); + + /* Reset driver's Rx queue write index */ + iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); + + /* Tell device where to find RBD circular buffer in DRAM */ + iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG, + (u32)(rxq->bd_dma >> 8)); + + /* Tell device where in DRAM to update its Rx status */ + iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG, + rxq->rb_stts_dma >> 4); + + /* Enable Rx DMA + * Direct rx interrupts to hosts + * Rx buffer size 4 or 8k + * RB timeout 0x10 + * 256 RBDs + */ + iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, + FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | + FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | + FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK | + rb_size| + (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| + (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); + + /* Set interrupt coalescing timer to default (2048 usecs) */ + iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); + + return 0; +} + +static void iwl4965_set_pwr_vmain(struct iwl_priv *priv) +{ +/* + * (for documentation purposes) + * to set power to V_AUX, do: + + if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) + iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_PWR_SRC_VAUX, + ~APMG_PS_CTRL_MSK_PWR_SRC); + */ + + iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, + ~APMG_PS_CTRL_MSK_PWR_SRC); +} + +int iwl4965_hw_nic_init(struct iwl_priv *priv) +{ + unsigned long flags; + struct iwl_rx_queue *rxq = &priv->rxq; + int ret; + + /* nic_init */ + spin_lock_irqsave(&priv->lock, flags); + priv->cfg->ops->lib->apm_ops.init(priv); + + /* Set interrupt coalescing calibration timer to default (512 usecs) */ + iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF); + + spin_unlock_irqrestore(&priv->lock, flags); + + iwl4965_set_pwr_vmain(priv); + + priv->cfg->ops->lib->apm_ops.config(priv); + + /* Allocate the RX queue, or reset if it is already allocated */ + if (!rxq->bd) { + ret = iwl_legacy_rx_queue_alloc(priv); + if (ret) { + IWL_ERR(priv, "Unable to initialize Rx queue\n"); + return -ENOMEM; + } + } else + iwl4965_rx_queue_reset(priv, rxq); + + iwl4965_rx_replenish(priv); + + iwl4965_rx_init(priv, rxq); + + spin_lock_irqsave(&priv->lock, flags); + + rxq->need_update = 1; + iwl_legacy_rx_queue_update_write_ptr(priv, rxq); + + spin_unlock_irqrestore(&priv->lock, flags); + + /* Allocate or reset and init all Tx and Command queues */ + if (!priv->txq) { + ret = iwl4965_txq_ctx_alloc(priv); + if (ret) + return ret; + } else + iwl4965_txq_ctx_reset(priv); + + set_bit(STATUS_INIT, &priv->status); + + return 0; +} + +/** + * iwl4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr + */ +static inline __le32 iwl4965_dma_addr2rbd_ptr(struct iwl_priv *priv, + dma_addr_t dma_addr) +{ + return cpu_to_le32((u32)(dma_addr >> 8)); +} + +/** + * iwl4965_rx_queue_restock - refill RX queue from pre-allocated pool + * + * If there are slots in the RX queue that need to be restocked, + * and we have free pre-allocated buffers, fill the ranks as much + * as we can, pulling from rx_free. + * + * This moves the 'write' index forward to catch up with 'processed', and + * also updates the memory address in the firmware to reference the new + * target buffer. + */ +void iwl4965_rx_queue_restock(struct iwl_priv *priv) +{ + struct iwl_rx_queue *rxq = &priv->rxq; + struct list_head *element; + struct iwl_rx_mem_buffer *rxb; + unsigned long flags; + + spin_lock_irqsave(&rxq->lock, flags); + while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) { + /* The overwritten rxb must be a used one */ + rxb = rxq->queue[rxq->write]; + BUG_ON(rxb && rxb->page); + + /* Get next free Rx buffer, remove from free list */ + element = rxq->rx_free.next; + rxb = list_entry(element, struct iwl_rx_mem_buffer, list); + list_del(element); + + /* Point to Rx buffer via next RBD in circular buffer */ + rxq->bd[rxq->write] = iwl4965_dma_addr2rbd_ptr(priv, + rxb->page_dma); + rxq->queue[rxq->write] = rxb; + rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; + rxq->free_count--; + } + spin_unlock_irqrestore(&rxq->lock, flags); + /* If the pre-allocated buffer pool is dropping low, schedule to + * refill it */ + if (rxq->free_count <= RX_LOW_WATERMARK) + queue_work(priv->workqueue, &priv->rx_replenish); + + + /* If we've added more space for the firmware to place data, tell it. + * Increment device's write pointer in multiples of 8. */ + if (rxq->write_actual != (rxq->write & ~0x7)) { + spin_lock_irqsave(&rxq->lock, flags); + rxq->need_update = 1; + spin_unlock_irqrestore(&rxq->lock, flags); + iwl_legacy_rx_queue_update_write_ptr(priv, rxq); + } +} + +/** + * iwl4965_rx_replenish - Move all used packet from rx_used to rx_free + * + * When moving to rx_free an SKB is allocated for the slot. + * + * Also restock the Rx queue via iwl_rx_queue_restock. + * This is called as a scheduled work item (except for during initialization) + */ +static void iwl4965_rx_allocate(struct iwl_priv *priv, gfp_t priority) +{ + struct iwl_rx_queue *rxq = &priv->rxq; + struct list_head *element; + struct iwl_rx_mem_buffer *rxb; + struct page *page; + unsigned long flags; + gfp_t gfp_mask = priority; + + while (1) { + spin_lock_irqsave(&rxq->lock, flags); + if (list_empty(&rxq->rx_used)) { + spin_unlock_irqrestore(&rxq->lock, flags); + return; + } + spin_unlock_irqrestore(&rxq->lock, flags); + + if (rxq->free_count > RX_LOW_WATERMARK) + gfp_mask |= __GFP_NOWARN; + + if (priv->hw_params.rx_page_order > 0) + gfp_mask |= __GFP_COMP; + + /* Alloc a new receive buffer */ + page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order); + if (!page) { + if (net_ratelimit()) + IWL_DEBUG_INFO(priv, "alloc_pages failed, " + "order: %d\n", + priv->hw_params.rx_page_order); + + if ((rxq->free_count <= RX_LOW_WATERMARK) && + net_ratelimit()) + IWL_CRIT(priv, + "Failed to alloc_pages with %s. " + "Only %u free buffers remaining.\n", + priority == GFP_ATOMIC ? + "GFP_ATOMIC" : "GFP_KERNEL", + rxq->free_count); + /* We don't reschedule replenish work here -- we will + * call the restock method and if it still needs + * more buffers it will schedule replenish */ + return; + } + + spin_lock_irqsave(&rxq->lock, flags); + + if (list_empty(&rxq->rx_used)) { + spin_unlock_irqrestore(&rxq->lock, flags); + __free_pages(page, priv->hw_params.rx_page_order); + return; + } + element = rxq->rx_used.next; + rxb = list_entry(element, struct iwl_rx_mem_buffer, list); + list_del(element); + + spin_unlock_irqrestore(&rxq->lock, flags); + + BUG_ON(rxb->page); + rxb->page = page; + /* Get physical address of the RB */ + rxb->page_dma = pci_map_page(priv->pci_dev, page, 0, + PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + /* dma address must be no more than 36 bits */ + BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); + /* and also 256 byte aligned! */ + BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); + + spin_lock_irqsave(&rxq->lock, flags); + + list_add_tail(&rxb->list, &rxq->rx_free); + rxq->free_count++; + priv->alloc_rxb_page++; + + spin_unlock_irqrestore(&rxq->lock, flags); + } +} + +void iwl4965_rx_replenish(struct iwl_priv *priv) +{ + unsigned long flags; + + iwl4965_rx_allocate(priv, GFP_KERNEL); + + spin_lock_irqsave(&priv->lock, flags); + iwl4965_rx_queue_restock(priv); + spin_unlock_irqrestore(&priv->lock, flags); +} + +void iwl4965_rx_replenish_now(struct iwl_priv *priv) +{ + iwl4965_rx_allocate(priv, GFP_ATOMIC); + + iwl4965_rx_queue_restock(priv); +} + +/* Assumes that the skb field of the buffers in 'pool' is kept accurate. + * If an SKB has been detached, the POOL needs to have its SKB set to NULL + * This free routine walks the list of POOL entries and if SKB is set to + * non NULL it is unmapped and freed + */ +void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq) +{ + int i; + for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { + if (rxq->pool[i].page != NULL) { + pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, + PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + __iwl_legacy_free_pages(priv, rxq->pool[i].page); + rxq->pool[i].page = NULL; + } + } + + dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, + rxq->bd_dma); + dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status), + rxq->rb_stts, rxq->rb_stts_dma); + rxq->bd = NULL; + rxq->rb_stts = NULL; +} + +int iwl4965_rxq_stop(struct iwl_priv *priv) +{ + + /* stop Rx DMA */ + iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); + iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG, + FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); + + return 0; +} + +int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band) +{ + int idx = 0; + int band_offset = 0; + + /* HT rate format: mac80211 wants an MCS number, which is just LSB */ + if (rate_n_flags & RATE_MCS_HT_MSK) { + idx = (rate_n_flags & 0xff); + return idx; + /* Legacy rate format, search for match in table */ + } else { + if (band == IEEE80211_BAND_5GHZ) + band_offset = IWL_FIRST_OFDM_RATE; + for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++) + if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF)) + return idx - band_offset; + } + + return -1; +} + +static int iwl4965_calc_rssi(struct iwl_priv *priv, + struct iwl_rx_phy_res *rx_resp) +{ + /* data from PHY/DSP regarding signal strength, etc., + * contents are always there, not configurable by host. */ + struct iwl4965_rx_non_cfg_phy *ncphy = + (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf; + u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL49_AGC_DB_MASK) + >> IWL49_AGC_DB_POS; + + u32 valid_antennae = + (le16_to_cpu(rx_resp->phy_flags) & IWL49_RX_PHY_FLAGS_ANTENNAE_MASK) + >> IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET; + u8 max_rssi = 0; + u32 i; + + /* Find max rssi among 3 possible receivers. + * These values are measured by the digital signal processor (DSP). + * They should stay fairly constant even as the signal strength varies, + * if the radio's automatic gain control (AGC) is working right. + * AGC value (see below) will provide the "interesting" info. */ + for (i = 0; i < 3; i++) + if (valid_antennae & (1 << i)) + max_rssi = max(ncphy->rssi_info[i << 1], max_rssi); + + IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n", + ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4], + max_rssi, agc); + + /* dBm = max_rssi dB - agc dB - constant. + * Higher AGC (higher radio gain) means lower signal. */ + return max_rssi - agc - IWL4965_RSSI_OFFSET; +} + + +static u32 iwl4965_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in) +{ + u32 decrypt_out = 0; + + if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) == + RX_RES_STATUS_STATION_FOUND) + decrypt_out |= (RX_RES_STATUS_STATION_FOUND | + RX_RES_STATUS_NO_STATION_INFO_MISMATCH); + + decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK); + + /* packet was not encrypted */ + if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == + RX_RES_STATUS_SEC_TYPE_NONE) + return decrypt_out; + + /* packet was encrypted with unknown alg */ + if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == + RX_RES_STATUS_SEC_TYPE_ERR) + return decrypt_out; + + /* decryption was not done in HW */ + if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) != + RX_MPDU_RES_STATUS_DEC_DONE_MSK) + return decrypt_out; + + switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) { + + case RX_RES_STATUS_SEC_TYPE_CCMP: + /* alg is CCM: check MIC only */ + if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK)) + /* Bad MIC */ + decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; + else + decrypt_out |= RX_RES_STATUS_DECRYPT_OK; + + break; + + case RX_RES_STATUS_SEC_TYPE_TKIP: + if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) { + /* Bad TTAK */ + decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK; + break; + } + /* fall through if TTAK OK */ + default: + if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK)) + decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; + else + decrypt_out |= RX_RES_STATUS_DECRYPT_OK; + break; + } + + IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n", + decrypt_in, decrypt_out); + + return decrypt_out; +} + +static void iwl4965_pass_packet_to_mac80211(struct iwl_priv *priv, + struct ieee80211_hdr *hdr, + u16 len, + u32 ampdu_status, + struct iwl_rx_mem_buffer *rxb, + struct ieee80211_rx_status *stats) +{ + struct sk_buff *skb; + __le16 fc = hdr->frame_control; + + /* We only process data packets if the interface is open */ + if (unlikely(!priv->is_open)) { + IWL_DEBUG_DROP_LIMIT(priv, + "Dropping packet while interface is not open.\n"); + return; + } + + /* In case of HW accelerated crypto and bad decryption, drop */ + if (!priv->cfg->mod_params->sw_crypto && + iwl_legacy_set_decrypted_flag(priv, hdr, ampdu_status, stats)) + return; + + skb = dev_alloc_skb(128); + if (!skb) { + IWL_ERR(priv, "dev_alloc_skb failed\n"); + return; + } + + skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len); + + iwl_legacy_update_stats(priv, false, fc, len); + memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); + + ieee80211_rx(priv->hw, skb); + priv->alloc_rxb_page--; + rxb->page = NULL; +} + +/* Called for REPLY_RX (legacy ABG frames), or + * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */ +void iwl4965_rx_reply_rx(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct ieee80211_hdr *header; + struct ieee80211_rx_status rx_status; + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_rx_phy_res *phy_res; + __le32 rx_pkt_status; + struct iwl_rx_mpdu_res_start *amsdu; + u32 len; + u32 ampdu_status; + u32 rate_n_flags; + + /** + * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently. + * REPLY_RX: physical layer info is in this buffer + * REPLY_RX_MPDU_CMD: physical layer info was sent in separate + * command and cached in priv->last_phy_res + * + * Here we set up local variables depending on which command is + * received. + */ + if (pkt->hdr.cmd == REPLY_RX) { + phy_res = (struct iwl_rx_phy_res *)pkt->u.raw; + header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res) + + phy_res->cfg_phy_cnt); + + len = le16_to_cpu(phy_res->byte_count); + rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) + + phy_res->cfg_phy_cnt + len); + ampdu_status = le32_to_cpu(rx_pkt_status); + } else { + if (!priv->_4965.last_phy_res_valid) { + IWL_ERR(priv, "MPDU frame without cached PHY data\n"); + return; + } + phy_res = &priv->_4965.last_phy_res; + amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw; + header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu)); + len = le16_to_cpu(amsdu->byte_count); + rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len); + ampdu_status = iwl4965_translate_rx_status(priv, + le32_to_cpu(rx_pkt_status)); + } + + if ((unlikely(phy_res->cfg_phy_cnt > 20))) { + IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n", + phy_res->cfg_phy_cnt); + return; + } + + if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) || + !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) { + IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n", + le32_to_cpu(rx_pkt_status)); + return; + } + + /* This will be used in several places later */ + rate_n_flags = le32_to_cpu(phy_res->rate_n_flags); + + /* rx_status carries information about the packet to mac80211 */ + rx_status.mactime = le64_to_cpu(phy_res->timestamp); + rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? + IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; + rx_status.freq = + ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel), + rx_status.band); + rx_status.rate_idx = + iwl4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band); + rx_status.flag = 0; + + /* TSF isn't reliable. In order to allow smooth user experience, + * this W/A doesn't propagate it to the mac80211 */ + /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/ + + priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp); + + /* Find max signal strength (dBm) among 3 antenna/receiver chains */ + rx_status.signal = iwl4965_calc_rssi(priv, phy_res); + + iwl_legacy_dbg_log_rx_data_frame(priv, len, header); + IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n", + rx_status.signal, (unsigned long long)rx_status.mactime); + + /* + * "antenna number" + * + * It seems that the antenna field in the phy flags value + * is actually a bit field. This is undefined by radiotap, + * it wants an actual antenna number but I always get "7" + * for most legacy frames I receive indicating that the + * same frame was received on all three RX chains. + * + * I think this field should be removed in favor of a + * new 802.11n radiotap field "RX chains" that is defined + * as a bitmask. + */ + rx_status.antenna = + (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) + >> RX_RES_PHY_FLAGS_ANTENNA_POS; + + /* set the preamble flag if appropriate */ + if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) + rx_status.flag |= RX_FLAG_SHORTPRE; + + /* Set up the HT phy flags */ + if (rate_n_flags & RATE_MCS_HT_MSK) + rx_status.flag |= RX_FLAG_HT; + if (rate_n_flags & RATE_MCS_HT40_MSK) + rx_status.flag |= RX_FLAG_40MHZ; + if (rate_n_flags & RATE_MCS_SGI_MSK) + rx_status.flag |= RX_FLAG_SHORT_GI; + + iwl4965_pass_packet_to_mac80211(priv, header, len, ampdu_status, + rxb, &rx_status); +} + +/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD). + * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */ +void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + priv->_4965.last_phy_res_valid = true; + memcpy(&priv->_4965.last_phy_res, pkt->u.raw, + sizeof(struct iwl_rx_phy_res)); +} + +static int iwl4965_get_channels_for_scan(struct iwl_priv *priv, + struct ieee80211_vif *vif, + enum ieee80211_band band, + u8 is_active, u8 n_probes, + struct iwl_scan_channel *scan_ch) +{ + struct ieee80211_channel *chan; + const struct ieee80211_supported_band *sband; + const struct iwl_channel_info *ch_info; + u16 passive_dwell = 0; + u16 active_dwell = 0; + int added, i; + u16 channel; + + sband = iwl_get_hw_mode(priv, band); + if (!sband) + return 0; + + active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes); + passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif); + + if (passive_dwell <= active_dwell) + passive_dwell = active_dwell + 1; + + for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) { + chan = priv->scan_request->channels[i]; + + if (chan->band != band) + continue; + + channel = chan->hw_value; + scan_ch->channel = cpu_to_le16(channel); + + ch_info = iwl_legacy_get_channel_info(priv, band, channel); + if (!iwl_legacy_is_channel_valid(ch_info)) { + IWL_DEBUG_SCAN(priv, + "Channel %d is INVALID for this band.\n", + channel); + continue; + } + + if (!is_active || iwl_legacy_is_channel_passive(ch_info) || + (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) + scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE; + else + scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE; + + if (n_probes) + scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes); + + scan_ch->active_dwell = cpu_to_le16(active_dwell); + scan_ch->passive_dwell = cpu_to_le16(passive_dwell); + + /* Set txpower levels to defaults */ + scan_ch->dsp_atten = 110; + + /* NOTE: if we were doing 6Mb OFDM for scans we'd use + * power level: + * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3; + */ + if (band == IEEE80211_BAND_5GHZ) + scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3; + else + scan_ch->tx_gain = ((1 << 5) | (5 << 3)); + + IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n", + channel, le32_to_cpu(scan_ch->type), + (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ? + "ACTIVE" : "PASSIVE", + (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ? + active_dwell : passive_dwell); + + scan_ch++; + added++; + } + + IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added); + return added; +} + +int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) +{ + struct iwl_host_cmd cmd = { + .id = REPLY_SCAN_CMD, + .len = sizeof(struct iwl_scan_cmd), + .flags = CMD_SIZE_HUGE, + }; + struct iwl_scan_cmd *scan; + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + u32 rate_flags = 0; + u16 cmd_len; + u16 rx_chain = 0; + enum ieee80211_band band; + u8 n_probes = 0; + u8 rx_ant = priv->hw_params.valid_rx_ant; + u8 rate; + bool is_active = false; + int chan_mod; + u8 active_chains; + u8 scan_tx_antennas = priv->hw_params.valid_tx_ant; + int ret; + + lockdep_assert_held(&priv->mutex); + + if (vif) + ctx = iwl_legacy_rxon_ctx_from_vif(vif); + + if (!priv->scan_cmd) { + priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) + + IWL_MAX_SCAN_SIZE, GFP_KERNEL); + if (!priv->scan_cmd) { + IWL_DEBUG_SCAN(priv, + "fail to allocate memory for scan\n"); + return -ENOMEM; + } + } + scan = priv->scan_cmd; + memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE); + + scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; + scan->quiet_time = IWL_ACTIVE_QUIET_TIME; + + if (iwl_legacy_is_any_associated(priv)) { + u16 interval; + u32 extra; + u32 suspend_time = 100; + u32 scan_suspend_time = 100; + + IWL_DEBUG_INFO(priv, "Scanning while associated...\n"); + interval = vif->bss_conf.beacon_int; + + scan->suspend_time = 0; + scan->max_out_time = cpu_to_le32(200 * 1024); + if (!interval) + interval = suspend_time; + + extra = (suspend_time / interval) << 22; + scan_suspend_time = (extra | + ((suspend_time % interval) * 1024)); + scan->suspend_time = cpu_to_le32(scan_suspend_time); + IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n", + scan_suspend_time, interval); + } + + if (priv->scan_request->n_ssids) { + int i, p = 0; + IWL_DEBUG_SCAN(priv, "Kicking off active scan\n"); + for (i = 0; i < priv->scan_request->n_ssids; i++) { + /* always does wildcard anyway */ + if (!priv->scan_request->ssids[i].ssid_len) + continue; + scan->direct_scan[p].id = WLAN_EID_SSID; + scan->direct_scan[p].len = + priv->scan_request->ssids[i].ssid_len; + memcpy(scan->direct_scan[p].ssid, + priv->scan_request->ssids[i].ssid, + priv->scan_request->ssids[i].ssid_len); + n_probes++; + p++; + } + is_active = true; + } else + IWL_DEBUG_SCAN(priv, "Start passive scan.\n"); + + scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; + scan->tx_cmd.sta_id = ctx->bcast_sta_id; + scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + + switch (priv->scan_band) { + case IEEE80211_BAND_2GHZ: + scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; + chan_mod = le32_to_cpu( + priv->contexts[IWL_RXON_CTX_BSS].active.flags & + RXON_FLG_CHANNEL_MODE_MSK) + >> RXON_FLG_CHANNEL_MODE_POS; + if (chan_mod == CHANNEL_MODE_PURE_40) { + rate = IWL_RATE_6M_PLCP; + } else { + rate = IWL_RATE_1M_PLCP; + rate_flags = RATE_MCS_CCK_MSK; + } + break; + case IEEE80211_BAND_5GHZ: + rate = IWL_RATE_6M_PLCP; + break; + default: + IWL_WARN(priv, "Invalid scan band\n"); + return -EIO; + } + + /* + * If active scanning is requested but a certain channel is + * marked passive, we can do active scanning if we detect + * transmissions. + * + * There is an issue with some firmware versions that triggers + * a sysassert on a "good CRC threshold" of zero (== disabled), + * on a radar channel even though this means that we should NOT + * send probes. + * + * The "good CRC threshold" is the number of frames that we + * need to receive during our dwell time on a channel before + * sending out probes -- setting this to a huge value will + * mean we never reach it, but at the same time work around + * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER + * here instead of IWL_GOOD_CRC_TH_DISABLED. + */ + scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT : + IWL_GOOD_CRC_TH_NEVER; + + band = priv->scan_band; + + if (priv->cfg->scan_rx_antennas[band]) + rx_ant = priv->cfg->scan_rx_antennas[band]; + + priv->scan_tx_ant[band] = iwl4965_toggle_tx_ant(priv, + priv->scan_tx_ant[band], + scan_tx_antennas); + rate_flags |= iwl4965_ant_idx_to_flags(priv->scan_tx_ant[band]); + scan->tx_cmd.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate, rate_flags); + + /* In power save mode use one chain, otherwise use all chains */ + if (test_bit(STATUS_POWER_PMI, &priv->status)) { + /* rx_ant has been set to all valid chains previously */ + active_chains = rx_ant & + ((u8)(priv->chain_noise_data.active_chains)); + if (!active_chains) + active_chains = rx_ant; + + IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n", + priv->chain_noise_data.active_chains); + + rx_ant = iwl4965_first_antenna(active_chains); + } + + /* MIMO is not used here, but value is required */ + rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS; + rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS; + rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS; + rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS; + scan->rx_chain = cpu_to_le16(rx_chain); + + cmd_len = iwl_legacy_fill_probe_req(priv, + (struct ieee80211_mgmt *)scan->data, + vif->addr, + priv->scan_request->ie, + priv->scan_request->ie_len, + IWL_MAX_SCAN_SIZE - sizeof(*scan)); + scan->tx_cmd.len = cpu_to_le16(cmd_len); + + scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK | + RXON_FILTER_BCON_AWARE_MSK); + + scan->channel_count = iwl4965_get_channels_for_scan(priv, vif, band, + is_active, n_probes, + (void *)&scan->data[cmd_len]); + if (scan->channel_count == 0) { + IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count); + return -EIO; + } + + cmd.len += le16_to_cpu(scan->tx_cmd.len) + + scan->channel_count * sizeof(struct iwl_scan_channel); + cmd.data = scan; + scan->len = cpu_to_le16(cmd.len); + + set_bit(STATUS_SCAN_HW, &priv->status); + + ret = iwl_legacy_send_cmd_sync(priv, &cmd); + if (ret) + clear_bit(STATUS_SCAN_HW, &priv->status); + + return ret; +} + +int iwl4965_manage_ibss_station(struct iwl_priv *priv, + struct ieee80211_vif *vif, bool add) +{ + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + + if (add) + return iwl4965_add_bssid_station(priv, vif_priv->ctx, + vif->bss_conf.bssid, + &vif_priv->ibss_bssid_sta_id); + return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id, + vif->bss_conf.bssid); +} + +void iwl4965_free_tfds_in_queue(struct iwl_priv *priv, + int sta_id, int tid, int freed) +{ + lockdep_assert_held(&priv->sta_lock); + + if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed) + priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; + else { + IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n", + priv->stations[sta_id].tid[tid].tfds_in_queue, + freed); + priv->stations[sta_id].tid[tid].tfds_in_queue = 0; + } +} + +#define IWL_TX_QUEUE_MSK 0xfffff + +static bool iwl4965_is_single_rx_stream(struct iwl_priv *priv) +{ + return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC || + priv->current_ht_config.single_chain_sufficient; +} + +#define IWL_NUM_RX_CHAINS_MULTIPLE 3 +#define IWL_NUM_RX_CHAINS_SINGLE 2 +#define IWL_NUM_IDLE_CHAINS_DUAL 2 +#define IWL_NUM_IDLE_CHAINS_SINGLE 1 + +/* + * Determine how many receiver/antenna chains to use. + * + * More provides better reception via diversity. Fewer saves power + * at the expense of throughput, but only when not in powersave to + * start with. + * + * MIMO (dual stream) requires at least 2, but works better with 3. + * This does not determine *which* chains to use, just how many. + */ +static int iwl4965_get_active_rx_chain_count(struct iwl_priv *priv) +{ + /* # of Rx chains to use when expecting MIMO. */ + if (iwl4965_is_single_rx_stream(priv)) + return IWL_NUM_RX_CHAINS_SINGLE; + else + return IWL_NUM_RX_CHAINS_MULTIPLE; +} + +/* + * When we are in power saving mode, unless device support spatial + * multiplexing power save, use the active count for rx chain count. + */ +static int +iwl4965_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt) +{ + /* # Rx chains when idling, depending on SMPS mode */ + switch (priv->current_ht_config.smps) { + case IEEE80211_SMPS_STATIC: + case IEEE80211_SMPS_DYNAMIC: + return IWL_NUM_IDLE_CHAINS_SINGLE; + case IEEE80211_SMPS_OFF: + return active_cnt; + default: + WARN(1, "invalid SMPS mode %d", + priv->current_ht_config.smps); + return active_cnt; + } +} + +/* up to 4 chains */ +static u8 iwl4965_count_chain_bitmap(u32 chain_bitmap) +{ + u8 res; + res = (chain_bitmap & BIT(0)) >> 0; + res += (chain_bitmap & BIT(1)) >> 1; + res += (chain_bitmap & BIT(2)) >> 2; + res += (chain_bitmap & BIT(3)) >> 3; + return res; +} + +/** + * iwl4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image + * + * Selects how many and which Rx receivers/antennas/chains to use. + * This should not be used for scan command ... it puts data in wrong place. + */ +void iwl4965_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx) +{ + bool is_single = iwl4965_is_single_rx_stream(priv); + bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status); + u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt; + u32 active_chains; + u16 rx_chain; + + /* Tell uCode which antennas are actually connected. + * Before first association, we assume all antennas are connected. + * Just after first association, iwl4965_chain_noise_calibration() + * checks which antennas actually *are* connected. */ + if (priv->chain_noise_data.active_chains) + active_chains = priv->chain_noise_data.active_chains; + else + active_chains = priv->hw_params.valid_rx_ant; + + rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS; + + /* How many receivers should we use? */ + active_rx_cnt = iwl4965_get_active_rx_chain_count(priv); + idle_rx_cnt = iwl4965_get_idle_rx_chain_count(priv, active_rx_cnt); + + + /* correct rx chain count according hw settings + * and chain noise calibration + */ + valid_rx_cnt = iwl4965_count_chain_bitmap(active_chains); + if (valid_rx_cnt < active_rx_cnt) + active_rx_cnt = valid_rx_cnt; + + if (valid_rx_cnt < idle_rx_cnt) + idle_rx_cnt = valid_rx_cnt; + + rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS; + rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS; + + ctx->staging.rx_chain = cpu_to_le16(rx_chain); + + if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam) + ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK; + else + ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK; + + IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n", + ctx->staging.rx_chain, + active_rx_cnt, idle_rx_cnt); + + WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 || + active_rx_cnt < idle_rx_cnt); +} + +u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid) +{ + int i; + u8 ind = ant; + + for (i = 0; i < RATE_ANT_NUM - 1; i++) { + ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0; + if (valid & BIT(ind)) + return ind; + } + return ant; +} + +static const char *iwl4965_get_fh_string(int cmd) +{ + switch (cmd) { + IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG); + IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG); + IWL_CMD(FH_RSCSR_CHNL0_WPTR); + IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG); + IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG); + IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG); + IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV); + IWL_CMD(FH_TSSR_TX_STATUS_REG); + IWL_CMD(FH_TSSR_TX_ERROR_REG); + default: + return "UNKNOWN"; + } +} + +int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display) +{ + int i; +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + int pos = 0; + size_t bufsz = 0; +#endif + static const u32 fh_tbl[] = { + FH_RSCSR_CHNL0_STTS_WPTR_REG, + FH_RSCSR_CHNL0_RBDCB_BASE_REG, + FH_RSCSR_CHNL0_WPTR, + FH_MEM_RCSR_CHNL0_CONFIG_REG, + FH_MEM_RSSR_SHARED_CTRL_REG, + FH_MEM_RSSR_RX_STATUS_REG, + FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV, + FH_TSSR_TX_STATUS_REG, + FH_TSSR_TX_ERROR_REG + }; +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + if (display) { + bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40; + *buf = kmalloc(bufsz, GFP_KERNEL); + if (!*buf) + return -ENOMEM; + pos += scnprintf(*buf + pos, bufsz - pos, + "FH register values:\n"); + for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { + pos += scnprintf(*buf + pos, bufsz - pos, + " %34s: 0X%08x\n", + iwl4965_get_fh_string(fh_tbl[i]), + iwl_legacy_read_direct32(priv, fh_tbl[i])); + } + return pos; + } +#endif + IWL_ERR(priv, "FH register values:\n"); + for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { + IWL_ERR(priv, " %34s: 0X%08x\n", + iwl4965_get_fh_string(fh_tbl[i]), + iwl_legacy_read_direct32(priv, fh_tbl[i])); + } + return 0; +} diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-4965-rs.c b/trunk/drivers/net/wireless/iwlegacy/iwl-4965-rs.c new file mode 100644 index 000000000000..57ebe214e68c --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-4965-rs.c @@ -0,0 +1,2871 @@ +/****************************************************************************** + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include "iwl-dev.h" +#include "iwl-sta.h" +#include "iwl-core.h" +#include "iwl-4965.h" + +#define IWL4965_RS_NAME "iwl-4965-rs" + +#define NUM_TRY_BEFORE_ANT_TOGGLE 1 +#define IWL_NUMBER_TRY 1 +#define IWL_HT_NUMBER_TRY 3 + +#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */ +#define IWL_RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */ +#define IWL_RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */ + +/* max allowed rate miss before sync LQ cmd */ +#define IWL_MISSED_RATE_MAX 15 +/* max time to accum history 2 seconds */ +#define IWL_RATE_SCALE_FLUSH_INTVL (3*HZ) + +static u8 rs_ht_to_legacy[] = { + IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX, + IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX, + IWL_RATE_6M_INDEX, + IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX, + IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX, + IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX, + IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX +}; + +static const u8 ant_toggle_lookup[] = { + /*ANT_NONE -> */ ANT_NONE, + /*ANT_A -> */ ANT_B, + /*ANT_B -> */ ANT_C, + /*ANT_AB -> */ ANT_BC, + /*ANT_C -> */ ANT_A, + /*ANT_AC -> */ ANT_AB, + /*ANT_BC -> */ ANT_AC, + /*ANT_ABC -> */ ANT_ABC, +}; + +#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \ + [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ + IWL_RATE_SISO_##s##M_PLCP, \ + IWL_RATE_MIMO2_##s##M_PLCP,\ + IWL_RATE_##r##M_IEEE, \ + IWL_RATE_##ip##M_INDEX, \ + IWL_RATE_##in##M_INDEX, \ + IWL_RATE_##rp##M_INDEX, \ + IWL_RATE_##rn##M_INDEX, \ + IWL_RATE_##pp##M_INDEX, \ + IWL_RATE_##np##M_INDEX } + +/* + * Parameter order: + * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate + * + * If there isn't a valid next or previous rate then INV is used which + * maps to IWL_RATE_INVALID + * + */ +const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT] = { + IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */ + IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */ + IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */ + IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */ + IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */ + IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */ + IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */ + IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */ + IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */ + IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */ + IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */ + IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */ + IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */ +}; + +static int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags) +{ + int idx = 0; + + /* HT rate format */ + if (rate_n_flags & RATE_MCS_HT_MSK) { + idx = (rate_n_flags & 0xff); + + if (idx >= IWL_RATE_MIMO2_6M_PLCP) + idx = idx - IWL_RATE_MIMO2_6M_PLCP; + + idx += IWL_FIRST_OFDM_RATE; + /* skip 9M not supported in ht*/ + if (idx >= IWL_RATE_9M_INDEX) + idx += 1; + if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE)) + return idx; + + /* legacy rate format, search for match in table */ + } else { + for (idx = 0; idx < ARRAY_SIZE(iwlegacy_rates); idx++) + if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF)) + return idx; + } + + return -1; +} + +static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv, + struct sk_buff *skb, + struct ieee80211_sta *sta, + struct iwl_lq_sta *lq_sta); +static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, u32 rate_n_flags); +static void iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta, + bool force_search); + +#ifdef CONFIG_MAC80211_DEBUGFS +static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta, + u32 *rate_n_flags, int index); +#else +static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta, + u32 *rate_n_flags, int index) +{} +#endif + +/** + * The following tables contain the expected throughput metrics for all rates + * + * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits + * + * where invalid entries are zeros. + * + * CCK rates are only valid in legacy table and will only be used in G + * (2.4 GHz) band. + */ + +static s32 expected_tpt_legacy[IWL_RATE_COUNT] = { + 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0 +}; + +static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = { + {0, 0, 0, 0, 42, 0, 76, 102, 124, 158, 183, 193, 202}, /* Norm */ + {0, 0, 0, 0, 46, 0, 82, 110, 132, 167, 192, 202, 210}, /* SGI */ + {0, 0, 0, 0, 48, 0, 93, 135, 176, 251, 319, 351, 381}, /* AGG */ + {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */ +}; + +static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = { + {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */ + {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */ + {0, 0, 0, 0, 96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */ + {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */ +}; + +static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = { + {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */ + {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */ + {0, 0, 0, 0, 92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */ + {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI*/ +}; + +static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = { + {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */ + {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */ + {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */ + {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */ +}; + +/* mbps, mcs */ +static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = { + { "1", "BPSK DSSS"}, + { "2", "QPSK DSSS"}, + {"5.5", "BPSK CCK"}, + { "11", "QPSK CCK"}, + { "6", "BPSK 1/2"}, + { "9", "BPSK 1/2"}, + { "12", "QPSK 1/2"}, + { "18", "QPSK 3/4"}, + { "24", "16QAM 1/2"}, + { "36", "16QAM 3/4"}, + { "48", "64QAM 2/3"}, + { "54", "64QAM 3/4"}, + { "60", "64QAM 5/6"}, +}; + +#define MCS_INDEX_PER_STREAM (8) + +static inline u8 iwl4965_rs_extract_rate(u32 rate_n_flags) +{ + return (u8)(rate_n_flags & 0xFF); +} + +static void +iwl4965_rs_rate_scale_clear_window(struct iwl_rate_scale_data *window) +{ + window->data = 0; + window->success_counter = 0; + window->success_ratio = IWL_INVALID_VALUE; + window->counter = 0; + window->average_tpt = IWL_INVALID_VALUE; + window->stamp = 0; +} + +static inline u8 iwl4965_rs_is_valid_ant(u8 valid_antenna, u8 ant_type) +{ + return (ant_type & valid_antenna) == ant_type; +} + +/* + * removes the old data from the statistics. All data that is older than + * TID_MAX_TIME_DIFF, will be deleted. + */ +static void +iwl4965_rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time) +{ + /* The oldest age we want to keep */ + u32 oldest_time = curr_time - TID_MAX_TIME_DIFF; + + while (tl->queue_count && + (tl->time_stamp < oldest_time)) { + tl->total -= tl->packet_count[tl->head]; + tl->packet_count[tl->head] = 0; + tl->time_stamp += TID_QUEUE_CELL_SPACING; + tl->queue_count--; + tl->head++; + if (tl->head >= TID_QUEUE_MAX_SIZE) + tl->head = 0; + } +} + +/* + * increment traffic load value for tid and also remove + * any old values if passed the certain time period + */ +static u8 iwl4965_rs_tl_add_packet(struct iwl_lq_sta *lq_data, + struct ieee80211_hdr *hdr) +{ + u32 curr_time = jiffies_to_msecs(jiffies); + u32 time_diff; + s32 index; + struct iwl_traffic_load *tl = NULL; + u8 tid; + + if (ieee80211_is_data_qos(hdr->frame_control)) { + u8 *qc = ieee80211_get_qos_ctl(hdr); + tid = qc[0] & 0xf; + } else + return MAX_TID_COUNT; + + if (unlikely(tid >= TID_MAX_LOAD_COUNT)) + return MAX_TID_COUNT; + + tl = &lq_data->load[tid]; + + curr_time -= curr_time % TID_ROUND_VALUE; + + /* Happens only for the first packet. Initialize the data */ + if (!(tl->queue_count)) { + tl->total = 1; + tl->time_stamp = curr_time; + tl->queue_count = 1; + tl->head = 0; + tl->packet_count[0] = 1; + return MAX_TID_COUNT; + } + + time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time); + index = time_diff / TID_QUEUE_CELL_SPACING; + + /* The history is too long: remove data that is older than */ + /* TID_MAX_TIME_DIFF */ + if (index >= TID_QUEUE_MAX_SIZE) + iwl4965_rs_tl_rm_old_stats(tl, curr_time); + + index = (tl->head + index) % TID_QUEUE_MAX_SIZE; + tl->packet_count[index] = tl->packet_count[index] + 1; + tl->total = tl->total + 1; + + if ((index + 1) > tl->queue_count) + tl->queue_count = index + 1; + + return tid; +} + +/* + get the traffic load value for tid +*/ +static u32 iwl4965_rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid) +{ + u32 curr_time = jiffies_to_msecs(jiffies); + u32 time_diff; + s32 index; + struct iwl_traffic_load *tl = NULL; + + if (tid >= TID_MAX_LOAD_COUNT) + return 0; + + tl = &(lq_data->load[tid]); + + curr_time -= curr_time % TID_ROUND_VALUE; + + if (!(tl->queue_count)) + return 0; + + time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time); + index = time_diff / TID_QUEUE_CELL_SPACING; + + /* The history is too long: remove data that is older than */ + /* TID_MAX_TIME_DIFF */ + if (index >= TID_QUEUE_MAX_SIZE) + iwl4965_rs_tl_rm_old_stats(tl, curr_time); + + return tl->total; +} + +static int iwl4965_rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv, + struct iwl_lq_sta *lq_data, u8 tid, + struct ieee80211_sta *sta) +{ + int ret = -EAGAIN; + u32 load; + + load = iwl4965_rs_tl_get_load(lq_data, tid); + + if (load > IWL_AGG_LOAD_THRESHOLD) { + IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n", + sta->addr, tid); + ret = ieee80211_start_tx_ba_session(sta, tid, 5000); + if (ret == -EAGAIN) { + /* + * driver and mac80211 is out of sync + * this might be cause by reloading firmware + * stop the tx ba session here + */ + IWL_ERR(priv, "Fail start Tx agg on tid: %d\n", + tid); + ieee80211_stop_tx_ba_session(sta, tid); + } + } else { + IWL_ERR(priv, "Aggregation not enabled for tid %d " + "because load = %u\n", tid, load); + } + return ret; +} + +static void iwl4965_rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid, + struct iwl_lq_sta *lq_data, + struct ieee80211_sta *sta) +{ + if (tid < TID_MAX_LOAD_COUNT) + iwl4965_rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta); + else + IWL_ERR(priv, "tid exceeds max load count: %d/%d\n", + tid, TID_MAX_LOAD_COUNT); +} + +static inline int iwl4965_get_iwl4965_num_of_ant_from_rate(u32 rate_n_flags) +{ + return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) + + !!(rate_n_flags & RATE_MCS_ANT_B_MSK) + + !!(rate_n_flags & RATE_MCS_ANT_C_MSK); +} + +/* + * Static function to get the expected throughput from an iwl_scale_tbl_info + * that wraps a NULL pointer check + */ +static s32 +iwl4965_get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index) +{ + if (tbl->expected_tpt) + return tbl->expected_tpt[rs_index]; + return 0; +} + +/** + * iwl4965_rs_collect_tx_data - Update the success/failure sliding window + * + * We keep a sliding window of the last 62 packets transmitted + * at this rate. window->data contains the bitmask of successful + * packets. + */ +static int iwl4965_rs_collect_tx_data(struct iwl_scale_tbl_info *tbl, + int scale_index, int attempts, int successes) +{ + struct iwl_rate_scale_data *window = NULL; + static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1)); + s32 fail_count, tpt; + + if (scale_index < 0 || scale_index >= IWL_RATE_COUNT) + return -EINVAL; + + /* Select window for current tx bit rate */ + window = &(tbl->win[scale_index]); + + /* Get expected throughput */ + tpt = iwl4965_get_expected_tpt(tbl, scale_index); + + /* + * Keep track of only the latest 62 tx frame attempts in this rate's + * history window; anything older isn't really relevant any more. + * If we have filled up the sliding window, drop the oldest attempt; + * if the oldest attempt (highest bit in bitmap) shows "success", + * subtract "1" from the success counter (this is the main reason + * we keep these bitmaps!). + */ + while (attempts > 0) { + if (window->counter >= IWL_RATE_MAX_WINDOW) { + + /* remove earliest */ + window->counter = IWL_RATE_MAX_WINDOW - 1; + + if (window->data & mask) { + window->data &= ~mask; + window->success_counter--; + } + } + + /* Increment frames-attempted counter */ + window->counter++; + + /* Shift bitmap by one frame to throw away oldest history */ + window->data <<= 1; + + /* Mark the most recent #successes attempts as successful */ + if (successes > 0) { + window->success_counter++; + window->data |= 0x1; + successes--; + } + + attempts--; + } + + /* Calculate current success ratio, avoid divide-by-0! */ + if (window->counter > 0) + window->success_ratio = 128 * (100 * window->success_counter) + / window->counter; + else + window->success_ratio = IWL_INVALID_VALUE; + + fail_count = window->counter - window->success_counter; + + /* Calculate average throughput, if we have enough history. */ + if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) || + (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH)) + window->average_tpt = (window->success_ratio * tpt + 64) / 128; + else + window->average_tpt = IWL_INVALID_VALUE; + + /* Tag this window as having been updated */ + window->stamp = jiffies; + + return 0; +} + +/* + * Fill uCode API rate_n_flags field, based on "search" or "active" table. + */ +static u32 iwl4965_rate_n_flags_from_tbl(struct iwl_priv *priv, + struct iwl_scale_tbl_info *tbl, + int index, u8 use_green) +{ + u32 rate_n_flags = 0; + + if (is_legacy(tbl->lq_type)) { + rate_n_flags = iwlegacy_rates[index].plcp; + if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE) + rate_n_flags |= RATE_MCS_CCK_MSK; + + } else if (is_Ht(tbl->lq_type)) { + if (index > IWL_LAST_OFDM_RATE) { + IWL_ERR(priv, "Invalid HT rate index %d\n", index); + index = IWL_LAST_OFDM_RATE; + } + rate_n_flags = RATE_MCS_HT_MSK; + + if (is_siso(tbl->lq_type)) + rate_n_flags |= iwlegacy_rates[index].plcp_siso; + else + rate_n_flags |= iwlegacy_rates[index].plcp_mimo2; + } else { + IWL_ERR(priv, "Invalid tbl->lq_type %d\n", tbl->lq_type); + } + + rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) & + RATE_MCS_ANT_ABC_MSK); + + if (is_Ht(tbl->lq_type)) { + if (tbl->is_ht40) { + if (tbl->is_dup) + rate_n_flags |= RATE_MCS_DUP_MSK; + else + rate_n_flags |= RATE_MCS_HT40_MSK; + } + if (tbl->is_SGI) + rate_n_flags |= RATE_MCS_SGI_MSK; + + if (use_green) { + rate_n_flags |= RATE_MCS_GF_MSK; + if (is_siso(tbl->lq_type) && tbl->is_SGI) { + rate_n_flags &= ~RATE_MCS_SGI_MSK; + IWL_ERR(priv, "GF was set with SGI:SISO\n"); + } + } + } + return rate_n_flags; +} + +/* + * Interpret uCode API's rate_n_flags format, + * fill "search" or "active" tx mode table. + */ +static int iwl4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags, + enum ieee80211_band band, + struct iwl_scale_tbl_info *tbl, + int *rate_idx) +{ + u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK); + u8 iwl4965_num_of_ant = iwl4965_get_iwl4965_num_of_ant_from_rate(rate_n_flags); + u8 mcs; + + memset(tbl, 0, sizeof(struct iwl_scale_tbl_info)); + *rate_idx = iwl4965_hwrate_to_plcp_idx(rate_n_flags); + + if (*rate_idx == IWL_RATE_INVALID) { + *rate_idx = -1; + return -EINVAL; + } + tbl->is_SGI = 0; /* default legacy setup */ + tbl->is_ht40 = 0; + tbl->is_dup = 0; + tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS); + tbl->lq_type = LQ_NONE; + tbl->max_search = IWL_MAX_SEARCH; + + /* legacy rate format */ + if (!(rate_n_flags & RATE_MCS_HT_MSK)) { + if (iwl4965_num_of_ant == 1) { + if (band == IEEE80211_BAND_5GHZ) + tbl->lq_type = LQ_A; + else + tbl->lq_type = LQ_G; + } + /* HT rate format */ + } else { + if (rate_n_flags & RATE_MCS_SGI_MSK) + tbl->is_SGI = 1; + + if ((rate_n_flags & RATE_MCS_HT40_MSK) || + (rate_n_flags & RATE_MCS_DUP_MSK)) + tbl->is_ht40 = 1; + + if (rate_n_flags & RATE_MCS_DUP_MSK) + tbl->is_dup = 1; + + mcs = iwl4965_rs_extract_rate(rate_n_flags); + + /* SISO */ + if (mcs <= IWL_RATE_SISO_60M_PLCP) { + if (iwl4965_num_of_ant == 1) + tbl->lq_type = LQ_SISO; /*else NONE*/ + /* MIMO2 */ + } else { + if (iwl4965_num_of_ant == 2) + tbl->lq_type = LQ_MIMO2; + } + } + return 0; +} + +/* switch to another antenna/antennas and return 1 */ +/* if no other valid antenna found, return 0 */ +static int iwl4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags, + struct iwl_scale_tbl_info *tbl) +{ + u8 new_ant_type; + + if (!tbl->ant_type || tbl->ant_type > ANT_ABC) + return 0; + + if (!iwl4965_rs_is_valid_ant(valid_ant, tbl->ant_type)) + return 0; + + new_ant_type = ant_toggle_lookup[tbl->ant_type]; + + while ((new_ant_type != tbl->ant_type) && + !iwl4965_rs_is_valid_ant(valid_ant, new_ant_type)) + new_ant_type = ant_toggle_lookup[new_ant_type]; + + if (new_ant_type == tbl->ant_type) + return 0; + + tbl->ant_type = new_ant_type; + *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK; + *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS; + return 1; +} + +/** + * Green-field mode is valid if the station supports it and + * there are no non-GF stations present in the BSS. + */ +static bool iwl4965_rs_use_green(struct ieee80211_sta *sta) +{ + struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; + struct iwl_rxon_context *ctx = sta_priv->common.ctx; + + return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) && + !(ctx->ht.non_gf_sta_present); +} + +/** + * iwl4965_rs_get_supported_rates - get the available rates + * + * if management frame or broadcast frame only return + * basic available rates. + * + */ +static u16 iwl4965_rs_get_supported_rates(struct iwl_lq_sta *lq_sta, + struct ieee80211_hdr *hdr, + enum iwl_table_type rate_type) +{ + if (is_legacy(rate_type)) { + return lq_sta->active_legacy_rate; + } else { + if (is_siso(rate_type)) + return lq_sta->active_siso_rate; + else + return lq_sta->active_mimo2_rate; + } +} + +static u16 +iwl4965_rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask, + int rate_type) +{ + u8 high = IWL_RATE_INVALID; + u8 low = IWL_RATE_INVALID; + + /* 802.11A or ht walks to the next literal adjacent rate in + * the rate table */ + if (is_a_band(rate_type) || !is_legacy(rate_type)) { + int i; + u32 mask; + + /* Find the previous rate that is in the rate mask */ + i = index - 1; + for (mask = (1 << i); i >= 0; i--, mask >>= 1) { + if (rate_mask & mask) { + low = i; + break; + } + } + + /* Find the next rate that is in the rate mask */ + i = index + 1; + for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) { + if (rate_mask & mask) { + high = i; + break; + } + } + + return (high << 8) | low; + } + + low = index; + while (low != IWL_RATE_INVALID) { + low = iwlegacy_rates[low].prev_rs; + if (low == IWL_RATE_INVALID) + break; + if (rate_mask & (1 << low)) + break; + IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low); + } + + high = index; + while (high != IWL_RATE_INVALID) { + high = iwlegacy_rates[high].next_rs; + if (high == IWL_RATE_INVALID) + break; + if (rate_mask & (1 << high)) + break; + IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high); + } + + return (high << 8) | low; +} + +static u32 iwl4965_rs_get_lower_rate(struct iwl_lq_sta *lq_sta, + struct iwl_scale_tbl_info *tbl, + u8 scale_index, u8 ht_possible) +{ + s32 low; + u16 rate_mask; + u16 high_low; + u8 switch_to_legacy = 0; + u8 is_green = lq_sta->is_green; + struct iwl_priv *priv = lq_sta->drv; + + /* check if we need to switch from HT to legacy rates. + * assumption is that mandatory rates (1Mbps or 6Mbps) + * are always supported (spec demand) */ + if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) { + switch_to_legacy = 1; + scale_index = rs_ht_to_legacy[scale_index]; + if (lq_sta->band == IEEE80211_BAND_5GHZ) + tbl->lq_type = LQ_A; + else + tbl->lq_type = LQ_G; + + if (iwl4965_num_of_ant(tbl->ant_type) > 1) + tbl->ant_type = + iwl4965_first_antenna(priv->hw_params.valid_tx_ant); + + tbl->is_ht40 = 0; + tbl->is_SGI = 0; + tbl->max_search = IWL_MAX_SEARCH; + } + + rate_mask = iwl4965_rs_get_supported_rates(lq_sta, NULL, tbl->lq_type); + + /* Mask with station rate restriction */ + if (is_legacy(tbl->lq_type)) { + /* supp_rates has no CCK bits in A mode */ + if (lq_sta->band == IEEE80211_BAND_5GHZ) + rate_mask = (u16)(rate_mask & + (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE)); + else + rate_mask = (u16)(rate_mask & lq_sta->supp_rates); + } + + /* If we switched from HT to legacy, check current rate */ + if (switch_to_legacy && (rate_mask & (1 << scale_index))) { + low = scale_index; + goto out; + } + + high_low = iwl4965_rs_get_adjacent_rate(lq_sta->drv, + scale_index, rate_mask, + tbl->lq_type); + low = high_low & 0xff; + + if (low == IWL_RATE_INVALID) + low = scale_index; + +out: + return iwl4965_rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green); +} + +/* + * Simple function to compare two rate scale table types + */ +static bool iwl4965_table_type_matches(struct iwl_scale_tbl_info *a, + struct iwl_scale_tbl_info *b) +{ + return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) && + (a->is_SGI == b->is_SGI); +} + +/* + * mac80211 sends us Tx status + */ +static void +iwl4965_rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *priv_sta, + struct sk_buff *skb) +{ + int legacy_success; + int retries; + int rs_index, mac_index, i; + struct iwl_lq_sta *lq_sta = priv_sta; + struct iwl_link_quality_cmd *table; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct iwl_priv *priv = (struct iwl_priv *)priv_r; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + enum mac80211_rate_control_flags mac_flags; + u32 tx_rate; + struct iwl_scale_tbl_info tbl_type; + struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl; + struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; + struct iwl_rxon_context *ctx = sta_priv->common.ctx; + + IWL_DEBUG_RATE_LIMIT(priv, + "get frame ack response, update rate scale window\n"); + + /* Treat uninitialized rate scaling data same as non-existing. */ + if (!lq_sta) { + IWL_DEBUG_RATE(priv, "Station rate scaling not created yet.\n"); + return; + } else if (!lq_sta->drv) { + IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n"); + return; + } + + if (!ieee80211_is_data(hdr->frame_control) || + info->flags & IEEE80211_TX_CTL_NO_ACK) + return; + + /* This packet was aggregated but doesn't carry status info */ + if ((info->flags & IEEE80211_TX_CTL_AMPDU) && + !(info->flags & IEEE80211_TX_STAT_AMPDU)) + return; + + /* + * Ignore this Tx frame response if its initial rate doesn't match + * that of latest Link Quality command. There may be stragglers + * from a previous Link Quality command, but we're no longer interested + * in those; they're either from the "active" mode while we're trying + * to check "search" mode, or a prior "search" mode after we've moved + * to a new "search" mode (which might become the new "active" mode). + */ + table = &lq_sta->lq; + tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); + iwl4965_rs_get_tbl_info_from_mcs(tx_rate, + priv->band, &tbl_type, &rs_index); + if (priv->band == IEEE80211_BAND_5GHZ) + rs_index -= IWL_FIRST_OFDM_RATE; + mac_flags = info->status.rates[0].flags; + mac_index = info->status.rates[0].idx; + /* For HT packets, map MCS to PLCP */ + if (mac_flags & IEEE80211_TX_RC_MCS) { + mac_index &= RATE_MCS_CODE_MSK; /* Remove # of streams */ + if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE)) + mac_index++; + /* + * mac80211 HT index is always zero-indexed; we need to move + * HT OFDM rates after CCK rates in 2.4 GHz band + */ + if (priv->band == IEEE80211_BAND_2GHZ) + mac_index += IWL_FIRST_OFDM_RATE; + } + /* Here we actually compare this rate to the latest LQ command */ + if ((mac_index < 0) || + (tbl_type.is_SGI != + !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) || + (tbl_type.is_ht40 != + !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) || + (tbl_type.is_dup != + !!(mac_flags & IEEE80211_TX_RC_DUP_DATA)) || + (tbl_type.ant_type != info->antenna_sel_tx) || + (!!(tx_rate & RATE_MCS_HT_MSK) != + !!(mac_flags & IEEE80211_TX_RC_MCS)) || + (!!(tx_rate & RATE_MCS_GF_MSK) != + !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) || + (rs_index != mac_index)) { + IWL_DEBUG_RATE(priv, + "initial rate %d does not match %d (0x%x)\n", + mac_index, rs_index, tx_rate); + /* + * Since rates mis-match, the last LQ command may have failed. + * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with + * ... driver. + */ + lq_sta->missed_rate_counter++; + if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) { + lq_sta->missed_rate_counter = 0; + iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, + CMD_ASYNC, false); + } + /* Regardless, ignore this status info for outdated rate */ + return; + } else + /* Rate did match, so reset the missed_rate_counter */ + lq_sta->missed_rate_counter = 0; + + /* Figure out if rate scale algorithm is in active or search table */ + if (iwl4965_table_type_matches(&tbl_type, + &(lq_sta->lq_info[lq_sta->active_tbl]))) { + curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); + } else if (iwl4965_table_type_matches(&tbl_type, + &lq_sta->lq_info[1 - lq_sta->active_tbl])) { + curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); + other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + } else { + IWL_DEBUG_RATE(priv, + "Neither active nor search matches tx rate\n"); + tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + IWL_DEBUG_RATE(priv, "active- lq:%x, ant:%x, SGI:%d\n", + tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI); + tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); + IWL_DEBUG_RATE(priv, "search- lq:%x, ant:%x, SGI:%d\n", + tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI); + IWL_DEBUG_RATE(priv, "actual- lq:%x, ant:%x, SGI:%d\n", + tbl_type.lq_type, tbl_type.ant_type, tbl_type.is_SGI); + /* + * no matching table found, let's by-pass the data collection + * and continue to perform rate scale to find the rate table + */ + iwl4965_rs_stay_in_table(lq_sta, true); + goto done; + } + + /* + * Updating the frame history depends on whether packets were + * aggregated. + * + * For aggregation, all packets were transmitted at the same rate, the + * first index into rate scale table. + */ + if (info->flags & IEEE80211_TX_STAT_AMPDU) { + tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); + iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, + &rs_index); + iwl4965_rs_collect_tx_data(curr_tbl, rs_index, + info->status.ampdu_len, + info->status.ampdu_ack_len); + + /* Update success/fail counts if not searching for new mode */ + if (lq_sta->stay_in_tbl) { + lq_sta->total_success += info->status.ampdu_ack_len; + lq_sta->total_failed += (info->status.ampdu_len - + info->status.ampdu_ack_len); + } + } else { + /* + * For legacy, update frame history with for each Tx retry. + */ + retries = info->status.rates[0].count - 1; + /* HW doesn't send more than 15 retries */ + retries = min(retries, 15); + + /* The last transmission may have been successful */ + legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK); + /* Collect data for each rate used during failed TX attempts */ + for (i = 0; i <= retries; ++i) { + tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags); + iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band, + &tbl_type, &rs_index); + /* + * Only collect stats if retried rate is in the same RS + * table as active/search. + */ + if (iwl4965_table_type_matches(&tbl_type, curr_tbl)) + tmp_tbl = curr_tbl; + else if (iwl4965_table_type_matches(&tbl_type, + other_tbl)) + tmp_tbl = other_tbl; + else + continue; + iwl4965_rs_collect_tx_data(tmp_tbl, rs_index, 1, + i < retries ? 0 : legacy_success); + } + + /* Update success/fail counts if not searching for new mode */ + if (lq_sta->stay_in_tbl) { + lq_sta->total_success += legacy_success; + lq_sta->total_failed += retries + (1 - legacy_success); + } + } + /* The last TX rate is cached in lq_sta; it's set in if/else above */ + lq_sta->last_rate_n_flags = tx_rate; +done: + /* See if there's a better rate or modulation mode to try. */ + if (sta && sta->supp_rates[sband->band]) + iwl4965_rs_rate_scale_perform(priv, skb, sta, lq_sta); +} + +/* + * Begin a period of staying with a selected modulation mode. + * Set "stay_in_tbl" flag to prevent any mode switches. + * Set frame tx success limits according to legacy vs. high-throughput, + * and reset overall (spanning all rates) tx success history statistics. + * These control how long we stay using same modulation mode before + * searching for a new mode. + */ +static void iwl4965_rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy, + struct iwl_lq_sta *lq_sta) +{ + IWL_DEBUG_RATE(priv, "we are staying in the same table\n"); + lq_sta->stay_in_tbl = 1; /* only place this gets set */ + if (is_legacy) { + lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT; + lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT; + lq_sta->max_success_limit = IWL_LEGACY_SUCCESS_LIMIT; + } else { + lq_sta->table_count_limit = IWL_NONE_LEGACY_TABLE_COUNT; + lq_sta->max_failure_limit = IWL_NONE_LEGACY_FAILURE_LIMIT; + lq_sta->max_success_limit = IWL_NONE_LEGACY_SUCCESS_LIMIT; + } + lq_sta->table_count = 0; + lq_sta->total_failed = 0; + lq_sta->total_success = 0; + lq_sta->flush_timer = jiffies; + lq_sta->action_counter = 0; +} + +/* + * Find correct throughput table for given mode of modulation + */ +static void iwl4965_rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta, + struct iwl_scale_tbl_info *tbl) +{ + /* Used to choose among HT tables */ + s32 (*ht_tbl_pointer)[IWL_RATE_COUNT]; + + /* Check for invalid LQ type */ + if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) { + tbl->expected_tpt = expected_tpt_legacy; + return; + } + + /* Legacy rates have only one table */ + if (is_legacy(tbl->lq_type)) { + tbl->expected_tpt = expected_tpt_legacy; + return; + } + + /* Choose among many HT tables depending on number of streams + * (SISO/MIMO2), channel width (20/40), SGI, and aggregation + * status */ + if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup)) + ht_tbl_pointer = expected_tpt_siso20MHz; + else if (is_siso(tbl->lq_type)) + ht_tbl_pointer = expected_tpt_siso40MHz; + else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup)) + ht_tbl_pointer = expected_tpt_mimo2_20MHz; + else /* if (is_mimo2(tbl->lq_type)) <-- must be true */ + ht_tbl_pointer = expected_tpt_mimo2_40MHz; + + if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */ + tbl->expected_tpt = ht_tbl_pointer[0]; + else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */ + tbl->expected_tpt = ht_tbl_pointer[1]; + else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */ + tbl->expected_tpt = ht_tbl_pointer[2]; + else /* AGG+SGI */ + tbl->expected_tpt = ht_tbl_pointer[3]; +} + +/* + * Find starting rate for new "search" high-throughput mode of modulation. + * Goal is to find lowest expected rate (under perfect conditions) that is + * above the current measured throughput of "active" mode, to give new mode + * a fair chance to prove itself without too many challenges. + * + * This gets called when transitioning to more aggressive modulation + * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive + * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need + * to decrease to match "active" throughput. When moving from MIMO to SISO, + * bit rate will typically need to increase, but not if performance was bad. + */ +static s32 iwl4965_rs_get_best_rate(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct iwl_scale_tbl_info *tbl, /* "search" */ + u16 rate_mask, s8 index) +{ + /* "active" values */ + struct iwl_scale_tbl_info *active_tbl = + &(lq_sta->lq_info[lq_sta->active_tbl]); + s32 active_sr = active_tbl->win[index].success_ratio; + s32 active_tpt = active_tbl->expected_tpt[index]; + + /* expected "search" throughput */ + s32 *tpt_tbl = tbl->expected_tpt; + + s32 new_rate, high, low, start_hi; + u16 high_low; + s8 rate = index; + + new_rate = high = low = start_hi = IWL_RATE_INVALID; + + for (; ;) { + high_low = iwl4965_rs_get_adjacent_rate(priv, rate, rate_mask, + tbl->lq_type); + + low = high_low & 0xff; + high = (high_low >> 8) & 0xff; + + /* + * Lower the "search" bit rate, to give new "search" mode + * approximately the same throughput as "active" if: + * + * 1) "Active" mode has been working modestly well (but not + * great), and expected "search" throughput (under perfect + * conditions) at candidate rate is above the actual + * measured "active" throughput (but less than expected + * "active" throughput under perfect conditions). + * OR + * 2) "Active" mode has been working perfectly or very well + * and expected "search" throughput (under perfect + * conditions) at candidate rate is above expected + * "active" throughput (under perfect conditions). + */ + if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) && + ((active_sr > IWL_RATE_DECREASE_TH) && + (active_sr <= IWL_RATE_HIGH_TH) && + (tpt_tbl[rate] <= active_tpt))) || + ((active_sr >= IWL_RATE_SCALE_SWITCH) && + (tpt_tbl[rate] > active_tpt))) { + + /* (2nd or later pass) + * If we've already tried to raise the rate, and are + * now trying to lower it, use the higher rate. */ + if (start_hi != IWL_RATE_INVALID) { + new_rate = start_hi; + break; + } + + new_rate = rate; + + /* Loop again with lower rate */ + if (low != IWL_RATE_INVALID) + rate = low; + + /* Lower rate not available, use the original */ + else + break; + + /* Else try to raise the "search" rate to match "active" */ + } else { + /* (2nd or later pass) + * If we've already tried to lower the rate, and are + * now trying to raise it, use the lower rate. */ + if (new_rate != IWL_RATE_INVALID) + break; + + /* Loop again with higher rate */ + else if (high != IWL_RATE_INVALID) { + start_hi = high; + rate = high; + + /* Higher rate not available, use the original */ + } else { + new_rate = rate; + break; + } + } + } + + return new_rate; +} + +/* + * Set up search table for MIMO2 + */ +static int iwl4965_rs_switch_to_mimo2(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, + struct iwl_scale_tbl_info *tbl, int index) +{ + u16 rate_mask; + s32 rate; + s8 is_green = lq_sta->is_green; + struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; + struct iwl_rxon_context *ctx = sta_priv->common.ctx; + + if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported) + return -1; + + if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2) + == WLAN_HT_CAP_SM_PS_STATIC) + return -1; + + /* Need both Tx chains/antennas to support MIMO */ + if (priv->hw_params.tx_chains_num < 2) + return -1; + + IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n"); + + tbl->lq_type = LQ_MIMO2; + tbl->is_dup = lq_sta->is_dup; + tbl->action = 0; + tbl->max_search = IWL_MAX_SEARCH; + rate_mask = lq_sta->active_mimo2_rate; + + if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap)) + tbl->is_ht40 = 1; + else + tbl->is_ht40 = 0; + + iwl4965_rs_set_expected_tpt_table(lq_sta, tbl); + + rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index); + + IWL_DEBUG_RATE(priv, "LQ: MIMO2 best rate %d mask %X\n", + rate, rate_mask); + if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) { + IWL_DEBUG_RATE(priv, + "Can't switch with index %d rate mask %x\n", + rate, rate_mask); + return -1; + } + tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv, + tbl, rate, is_green); + + IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n", + tbl->current_rate, is_green); + return 0; +} + +/* + * Set up search table for SISO + */ +static int iwl4965_rs_switch_to_siso(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, + struct iwl_scale_tbl_info *tbl, int index) +{ + u16 rate_mask; + u8 is_green = lq_sta->is_green; + s32 rate; + struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; + struct iwl_rxon_context *ctx = sta_priv->common.ctx; + + if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported) + return -1; + + IWL_DEBUG_RATE(priv, "LQ: try to switch to SISO\n"); + + tbl->is_dup = lq_sta->is_dup; + tbl->lq_type = LQ_SISO; + tbl->action = 0; + tbl->max_search = IWL_MAX_SEARCH; + rate_mask = lq_sta->active_siso_rate; + + if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap)) + tbl->is_ht40 = 1; + else + tbl->is_ht40 = 0; + + if (is_green) + tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/ + + iwl4965_rs_set_expected_tpt_table(lq_sta, tbl); + rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index); + + IWL_DEBUG_RATE(priv, "LQ: get best rate %d mask %X\n", rate, rate_mask); + if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) { + IWL_DEBUG_RATE(priv, + "can not switch with index %d rate mask %x\n", + rate, rate_mask); + return -1; + } + tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv, + tbl, rate, is_green); + IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n", + tbl->current_rate, is_green); + return 0; +} + +/* + * Try to switch to new modulation mode from legacy + */ +static int iwl4965_rs_move_legacy_other(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, + int index) +{ + struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + struct iwl_scale_tbl_info *search_tbl = + &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); + struct iwl_rate_scale_data *window = &(tbl->win[index]); + u32 sz = (sizeof(struct iwl_scale_tbl_info) - + (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); + u8 start_action; + u8 valid_tx_ant = priv->hw_params.valid_tx_ant; + u8 tx_chains_num = priv->hw_params.tx_chains_num; + int ret = 0; + u8 update_search_tbl_counter = 0; + + tbl->action = IWL_LEGACY_SWITCH_SISO; + + start_action = tbl->action; + for (; ;) { + lq_sta->action_counter++; + switch (tbl->action) { + case IWL_LEGACY_SWITCH_ANTENNA1: + case IWL_LEGACY_SWITCH_ANTENNA2: + IWL_DEBUG_RATE(priv, "LQ: Legacy toggle Antenna\n"); + + if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 && + tx_chains_num <= 1) || + (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 && + tx_chains_num <= 2)) + break; + + /* Don't change antenna if success has been great */ + if (window->success_ratio >= IWL_RS_GOOD_RATIO) + break; + + /* Set up search table to try other antenna */ + memcpy(search_tbl, tbl, sz); + + if (iwl4965_rs_toggle_antenna(valid_tx_ant, + &search_tbl->current_rate, search_tbl)) { + update_search_tbl_counter = 1; + iwl4965_rs_set_expected_tpt_table(lq_sta, + search_tbl); + goto out; + } + break; + case IWL_LEGACY_SWITCH_SISO: + IWL_DEBUG_RATE(priv, "LQ: Legacy switch to SISO\n"); + + /* Set up search table to try SISO */ + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = 0; + ret = iwl4965_rs_switch_to_siso(priv, lq_sta, conf, sta, + search_tbl, index); + if (!ret) { + lq_sta->action_counter = 0; + goto out; + } + + break; + case IWL_LEGACY_SWITCH_MIMO2_AB: + case IWL_LEGACY_SWITCH_MIMO2_AC: + case IWL_LEGACY_SWITCH_MIMO2_BC: + IWL_DEBUG_RATE(priv, "LQ: Legacy switch to MIMO2\n"); + + /* Set up search table to try MIMO */ + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = 0; + + if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB) + search_tbl->ant_type = ANT_AB; + else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC) + search_tbl->ant_type = ANT_AC; + else + search_tbl->ant_type = ANT_BC; + + if (!iwl4965_rs_is_valid_ant(valid_tx_ant, + search_tbl->ant_type)) + break; + + ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta, + conf, sta, + search_tbl, index); + if (!ret) { + lq_sta->action_counter = 0; + goto out; + } + break; + } + tbl->action++; + if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC) + tbl->action = IWL_LEGACY_SWITCH_ANTENNA1; + + if (tbl->action == start_action) + break; + + } + search_tbl->lq_type = LQ_NONE; + return 0; + +out: + lq_sta->search_better_tbl = 1; + tbl->action++; + if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC) + tbl->action = IWL_LEGACY_SWITCH_ANTENNA1; + if (update_search_tbl_counter) + search_tbl->action = tbl->action; + return 0; + +} + +/* + * Try to switch to new modulation mode from SISO + */ +static int iwl4965_rs_move_siso_to_other(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, int index) +{ + u8 is_green = lq_sta->is_green; + struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + struct iwl_scale_tbl_info *search_tbl = + &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); + struct iwl_rate_scale_data *window = &(tbl->win[index]); + struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + u32 sz = (sizeof(struct iwl_scale_tbl_info) - + (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); + u8 start_action; + u8 valid_tx_ant = priv->hw_params.valid_tx_ant; + u8 tx_chains_num = priv->hw_params.tx_chains_num; + u8 update_search_tbl_counter = 0; + int ret; + + start_action = tbl->action; + + for (;;) { + lq_sta->action_counter++; + switch (tbl->action) { + case IWL_SISO_SWITCH_ANTENNA1: + case IWL_SISO_SWITCH_ANTENNA2: + IWL_DEBUG_RATE(priv, "LQ: SISO toggle Antenna\n"); + if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 && + tx_chains_num <= 1) || + (tbl->action == IWL_SISO_SWITCH_ANTENNA2 && + tx_chains_num <= 2)) + break; + + if (window->success_ratio >= IWL_RS_GOOD_RATIO) + break; + + memcpy(search_tbl, tbl, sz); + if (iwl4965_rs_toggle_antenna(valid_tx_ant, + &search_tbl->current_rate, search_tbl)) { + update_search_tbl_counter = 1; + goto out; + } + break; + case IWL_SISO_SWITCH_MIMO2_AB: + case IWL_SISO_SWITCH_MIMO2_AC: + case IWL_SISO_SWITCH_MIMO2_BC: + IWL_DEBUG_RATE(priv, "LQ: SISO switch to MIMO2\n"); + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = 0; + + if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB) + search_tbl->ant_type = ANT_AB; + else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC) + search_tbl->ant_type = ANT_AC; + else + search_tbl->ant_type = ANT_BC; + + if (!iwl4965_rs_is_valid_ant(valid_tx_ant, + search_tbl->ant_type)) + break; + + ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta, + conf, sta, + search_tbl, index); + if (!ret) + goto out; + break; + case IWL_SISO_SWITCH_GI: + if (!tbl->is_ht40 && !(ht_cap->cap & + IEEE80211_HT_CAP_SGI_20)) + break; + if (tbl->is_ht40 && !(ht_cap->cap & + IEEE80211_HT_CAP_SGI_40)) + break; + + IWL_DEBUG_RATE(priv, "LQ: SISO toggle SGI/NGI\n"); + + memcpy(search_tbl, tbl, sz); + if (is_green) { + if (!tbl->is_SGI) + break; + else + IWL_ERR(priv, + "SGI was set in GF+SISO\n"); + } + search_tbl->is_SGI = !tbl->is_SGI; + iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl); + if (tbl->is_SGI) { + s32 tpt = lq_sta->last_tpt / 100; + if (tpt >= search_tbl->expected_tpt[index]) + break; + } + search_tbl->current_rate = + iwl4965_rate_n_flags_from_tbl(priv, search_tbl, + index, is_green); + update_search_tbl_counter = 1; + goto out; + } + tbl->action++; + if (tbl->action > IWL_SISO_SWITCH_GI) + tbl->action = IWL_SISO_SWITCH_ANTENNA1; + + if (tbl->action == start_action) + break; + } + search_tbl->lq_type = LQ_NONE; + return 0; + + out: + lq_sta->search_better_tbl = 1; + tbl->action++; + if (tbl->action > IWL_SISO_SWITCH_GI) + tbl->action = IWL_SISO_SWITCH_ANTENNA1; + if (update_search_tbl_counter) + search_tbl->action = tbl->action; + + return 0; +} + +/* + * Try to switch to new modulation mode from MIMO2 + */ +static int iwl4965_rs_move_mimo2_to_other(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, int index) +{ + s8 is_green = lq_sta->is_green; + struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + struct iwl_scale_tbl_info *search_tbl = + &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); + struct iwl_rate_scale_data *window = &(tbl->win[index]); + struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + u32 sz = (sizeof(struct iwl_scale_tbl_info) - + (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); + u8 start_action; + u8 valid_tx_ant = priv->hw_params.valid_tx_ant; + u8 tx_chains_num = priv->hw_params.tx_chains_num; + u8 update_search_tbl_counter = 0; + int ret; + + start_action = tbl->action; + for (;;) { + lq_sta->action_counter++; + switch (tbl->action) { + case IWL_MIMO2_SWITCH_ANTENNA1: + case IWL_MIMO2_SWITCH_ANTENNA2: + IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle Antennas\n"); + + if (tx_chains_num <= 2) + break; + + if (window->success_ratio >= IWL_RS_GOOD_RATIO) + break; + + memcpy(search_tbl, tbl, sz); + if (iwl4965_rs_toggle_antenna(valid_tx_ant, + &search_tbl->current_rate, search_tbl)) { + update_search_tbl_counter = 1; + goto out; + } + break; + case IWL_MIMO2_SWITCH_SISO_A: + case IWL_MIMO2_SWITCH_SISO_B: + case IWL_MIMO2_SWITCH_SISO_C: + IWL_DEBUG_RATE(priv, "LQ: MIMO2 switch to SISO\n"); + + /* Set up new search table for SISO */ + memcpy(search_tbl, tbl, sz); + + if (tbl->action == IWL_MIMO2_SWITCH_SISO_A) + search_tbl->ant_type = ANT_A; + else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B) + search_tbl->ant_type = ANT_B; + else + search_tbl->ant_type = ANT_C; + + if (!iwl4965_rs_is_valid_ant(valid_tx_ant, + search_tbl->ant_type)) + break; + + ret = iwl4965_rs_switch_to_siso(priv, lq_sta, + conf, sta, + search_tbl, index); + if (!ret) + goto out; + + break; + + case IWL_MIMO2_SWITCH_GI: + if (!tbl->is_ht40 && !(ht_cap->cap & + IEEE80211_HT_CAP_SGI_20)) + break; + if (tbl->is_ht40 && !(ht_cap->cap & + IEEE80211_HT_CAP_SGI_40)) + break; + + IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle SGI/NGI\n"); + + /* Set up new search table for MIMO2 */ + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = !tbl->is_SGI; + iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl); + /* + * If active table already uses the fastest possible + * modulation (dual stream with short guard interval), + * and it's working well, there's no need to look + * for a better type of modulation! + */ + if (tbl->is_SGI) { + s32 tpt = lq_sta->last_tpt / 100; + if (tpt >= search_tbl->expected_tpt[index]) + break; + } + search_tbl->current_rate = + iwl4965_rate_n_flags_from_tbl(priv, search_tbl, + index, is_green); + update_search_tbl_counter = 1; + goto out; + + } + tbl->action++; + if (tbl->action > IWL_MIMO2_SWITCH_GI) + tbl->action = IWL_MIMO2_SWITCH_ANTENNA1; + + if (tbl->action == start_action) + break; + } + search_tbl->lq_type = LQ_NONE; + return 0; + out: + lq_sta->search_better_tbl = 1; + tbl->action++; + if (tbl->action > IWL_MIMO2_SWITCH_GI) + tbl->action = IWL_MIMO2_SWITCH_ANTENNA1; + if (update_search_tbl_counter) + search_tbl->action = tbl->action; + + return 0; + +} + +/* + * Check whether we should continue using same modulation mode, or + * begin search for a new mode, based on: + * 1) # tx successes or failures while using this mode + * 2) # times calling this function + * 3) elapsed time in this mode (not used, for now) + */ +static void +iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search) +{ + struct iwl_scale_tbl_info *tbl; + int i; + int active_tbl; + int flush_interval_passed = 0; + struct iwl_priv *priv; + + priv = lq_sta->drv; + active_tbl = lq_sta->active_tbl; + + tbl = &(lq_sta->lq_info[active_tbl]); + + /* If we've been disallowing search, see if we should now allow it */ + if (lq_sta->stay_in_tbl) { + + /* Elapsed time using current modulation mode */ + if (lq_sta->flush_timer) + flush_interval_passed = + time_after(jiffies, + (unsigned long)(lq_sta->flush_timer + + IWL_RATE_SCALE_FLUSH_INTVL)); + + /* + * Check if we should allow search for new modulation mode. + * If many frames have failed or succeeded, or we've used + * this same modulation for a long time, allow search, and + * reset history stats that keep track of whether we should + * allow a new search. Also (below) reset all bitmaps and + * stats in active history. + */ + if (force_search || + (lq_sta->total_failed > lq_sta->max_failure_limit) || + (lq_sta->total_success > lq_sta->max_success_limit) || + ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer) + && (flush_interval_passed))) { + IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n:", + lq_sta->total_failed, + lq_sta->total_success, + flush_interval_passed); + + /* Allow search for new mode */ + lq_sta->stay_in_tbl = 0; /* only place reset */ + lq_sta->total_failed = 0; + lq_sta->total_success = 0; + lq_sta->flush_timer = 0; + + /* + * Else if we've used this modulation mode enough repetitions + * (regardless of elapsed time or success/failure), reset + * history bitmaps and rate-specific stats for all rates in + * active table. + */ + } else { + lq_sta->table_count++; + if (lq_sta->table_count >= + lq_sta->table_count_limit) { + lq_sta->table_count = 0; + + IWL_DEBUG_RATE(priv, + "LQ: stay in table clear win\n"); + for (i = 0; i < IWL_RATE_COUNT; i++) + iwl4965_rs_rate_scale_clear_window( + &(tbl->win[i])); + } + } + + /* If transitioning to allow "search", reset all history + * bitmaps and stats in active table (this will become the new + * "search" table). */ + if (!lq_sta->stay_in_tbl) { + for (i = 0; i < IWL_RATE_COUNT; i++) + iwl4965_rs_rate_scale_clear_window( + &(tbl->win[i])); + } + } +} + +/* + * setup rate table in uCode + * return rate_n_flags as used in the table + */ +static u32 iwl4965_rs_update_rate_tbl(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct iwl_lq_sta *lq_sta, + struct iwl_scale_tbl_info *tbl, + int index, u8 is_green) +{ + u32 rate; + + /* Update uCode's rate table. */ + rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, index, is_green); + iwl4965_rs_fill_link_cmd(priv, lq_sta, rate); + iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false); + + return rate; +} + +/* + * Do rate scaling and search for new modulation mode. + */ +static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv, + struct sk_buff *skb, + struct ieee80211_sta *sta, + struct iwl_lq_sta *lq_sta) +{ + struct ieee80211_hw *hw = priv->hw; + struct ieee80211_conf *conf = &hw->conf; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + int low = IWL_RATE_INVALID; + int high = IWL_RATE_INVALID; + int index; + int i; + struct iwl_rate_scale_data *window = NULL; + int current_tpt = IWL_INVALID_VALUE; + int low_tpt = IWL_INVALID_VALUE; + int high_tpt = IWL_INVALID_VALUE; + u32 fail_count; + s8 scale_action = 0; + u16 rate_mask; + u8 update_lq = 0; + struct iwl_scale_tbl_info *tbl, *tbl1; + u16 rate_scale_index_msk = 0; + u32 rate; + u8 is_green = 0; + u8 active_tbl = 0; + u8 done_search = 0; + u16 high_low; + s32 sr; + u8 tid = MAX_TID_COUNT; + struct iwl_tid_data *tid_data; + struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; + struct iwl_rxon_context *ctx = sta_priv->common.ctx; + + IWL_DEBUG_RATE(priv, "rate scale calculate new rate for skb\n"); + + /* Send management frames and NO_ACK data using lowest rate. */ + /* TODO: this could probably be improved.. */ + if (!ieee80211_is_data(hdr->frame_control) || + info->flags & IEEE80211_TX_CTL_NO_ACK) + return; + + if (!sta || !lq_sta) + return; + + lq_sta->supp_rates = sta->supp_rates[lq_sta->band]; + + tid = iwl4965_rs_tl_add_packet(lq_sta, hdr); + if ((tid != MAX_TID_COUNT) && (lq_sta->tx_agg_tid_en & (1 << tid))) { + tid_data = &priv->stations[lq_sta->lq.sta_id].tid[tid]; + if (tid_data->agg.state == IWL_AGG_OFF) + lq_sta->is_agg = 0; + else + lq_sta->is_agg = 1; + } else + lq_sta->is_agg = 0; + + /* + * Select rate-scale / modulation-mode table to work with in + * the rest of this function: "search" if searching for better + * modulation mode, or "active" if doing rate scaling within a mode. + */ + if (!lq_sta->search_better_tbl) + active_tbl = lq_sta->active_tbl; + else + active_tbl = 1 - lq_sta->active_tbl; + + tbl = &(lq_sta->lq_info[active_tbl]); + if (is_legacy(tbl->lq_type)) + lq_sta->is_green = 0; + else + lq_sta->is_green = iwl4965_rs_use_green(sta); + is_green = lq_sta->is_green; + + /* current tx rate */ + index = lq_sta->last_txrate_idx; + + IWL_DEBUG_RATE(priv, "Rate scale index %d for type %d\n", index, + tbl->lq_type); + + /* rates available for this association, and for modulation mode */ + rate_mask = iwl4965_rs_get_supported_rates(lq_sta, hdr, tbl->lq_type); + + IWL_DEBUG_RATE(priv, "mask 0x%04X\n", rate_mask); + + /* mask with station rate restriction */ + if (is_legacy(tbl->lq_type)) { + if (lq_sta->band == IEEE80211_BAND_5GHZ) + /* supp_rates has no CCK bits in A mode */ + rate_scale_index_msk = (u16) (rate_mask & + (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE)); + else + rate_scale_index_msk = (u16) (rate_mask & + lq_sta->supp_rates); + + } else + rate_scale_index_msk = rate_mask; + + if (!rate_scale_index_msk) + rate_scale_index_msk = rate_mask; + + if (!((1 << index) & rate_scale_index_msk)) { + IWL_ERR(priv, "Current Rate is not valid\n"); + if (lq_sta->search_better_tbl) { + /* revert to active table if search table is not valid*/ + tbl->lq_type = LQ_NONE; + lq_sta->search_better_tbl = 0; + tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + /* get "active" rate info */ + index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate); + rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta, + tbl, index, is_green); + } + return; + } + + /* Get expected throughput table and history window for current rate */ + if (!tbl->expected_tpt) { + IWL_ERR(priv, "tbl->expected_tpt is NULL\n"); + return; + } + + /* force user max rate if set by user */ + if ((lq_sta->max_rate_idx != -1) && + (lq_sta->max_rate_idx < index)) { + index = lq_sta->max_rate_idx; + update_lq = 1; + window = &(tbl->win[index]); + goto lq_update; + } + + window = &(tbl->win[index]); + + /* + * If there is not enough history to calculate actual average + * throughput, keep analyzing results of more tx frames, without + * changing rate or mode (bypass most of the rest of this function). + * Set up new rate table in uCode only if old rate is not supported + * in current association (use new rate found above). + */ + fail_count = window->counter - window->success_counter; + if ((fail_count < IWL_RATE_MIN_FAILURE_TH) && + (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) { + IWL_DEBUG_RATE(priv, "LQ: still below TH. succ=%d total=%d " + "for index %d\n", + window->success_counter, window->counter, index); + + /* Can't calculate this yet; not enough history */ + window->average_tpt = IWL_INVALID_VALUE; + + /* Should we stay with this modulation mode, + * or search for a new one? */ + iwl4965_rs_stay_in_table(lq_sta, false); + + goto out; + } + /* Else we have enough samples; calculate estimate of + * actual average throughput */ + if (window->average_tpt != ((window->success_ratio * + tbl->expected_tpt[index] + 64) / 128)) { + IWL_ERR(priv, + "expected_tpt should have been calculated by now\n"); + window->average_tpt = ((window->success_ratio * + tbl->expected_tpt[index] + 64) / 128); + } + + /* If we are searching for better modulation mode, check success. */ + if (lq_sta->search_better_tbl) { + /* If good success, continue using the "search" mode; + * no need to send new link quality command, since we're + * continuing to use the setup that we've been trying. */ + if (window->average_tpt > lq_sta->last_tpt) { + + IWL_DEBUG_RATE(priv, "LQ: SWITCHING TO NEW TABLE " + "suc=%d cur-tpt=%d old-tpt=%d\n", + window->success_ratio, + window->average_tpt, + lq_sta->last_tpt); + + if (!is_legacy(tbl->lq_type)) + lq_sta->enable_counter = 1; + + /* Swap tables; "search" becomes "active" */ + lq_sta->active_tbl = active_tbl; + current_tpt = window->average_tpt; + + /* Else poor success; go back to mode in "active" table */ + } else { + + IWL_DEBUG_RATE(priv, "LQ: GOING BACK TO THE OLD TABLE " + "suc=%d cur-tpt=%d old-tpt=%d\n", + window->success_ratio, + window->average_tpt, + lq_sta->last_tpt); + + /* Nullify "search" table */ + tbl->lq_type = LQ_NONE; + + /* Revert to "active" table */ + active_tbl = lq_sta->active_tbl; + tbl = &(lq_sta->lq_info[active_tbl]); + + /* Revert to "active" rate and throughput info */ + index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate); + current_tpt = lq_sta->last_tpt; + + /* Need to set up a new rate table in uCode */ + update_lq = 1; + } + + /* Either way, we've made a decision; modulation mode + * search is done, allow rate adjustment next time. */ + lq_sta->search_better_tbl = 0; + done_search = 1; /* Don't switch modes below! */ + goto lq_update; + } + + /* (Else) not in search of better modulation mode, try for better + * starting rate, while staying in this mode. */ + high_low = iwl4965_rs_get_adjacent_rate(priv, index, + rate_scale_index_msk, + tbl->lq_type); + low = high_low & 0xff; + high = (high_low >> 8) & 0xff; + + /* If user set max rate, dont allow higher than user constrain */ + if ((lq_sta->max_rate_idx != -1) && + (lq_sta->max_rate_idx < high)) + high = IWL_RATE_INVALID; + + sr = window->success_ratio; + + /* Collect measured throughputs for current and adjacent rates */ + current_tpt = window->average_tpt; + if (low != IWL_RATE_INVALID) + low_tpt = tbl->win[low].average_tpt; + if (high != IWL_RATE_INVALID) + high_tpt = tbl->win[high].average_tpt; + + scale_action = 0; + + /* Too many failures, decrease rate */ + if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) { + IWL_DEBUG_RATE(priv, + "decrease rate because of low success_ratio\n"); + scale_action = -1; + + /* No throughput measured yet for adjacent rates; try increase. */ + } else if ((low_tpt == IWL_INVALID_VALUE) && + (high_tpt == IWL_INVALID_VALUE)) { + + if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH) + scale_action = 1; + else if (low != IWL_RATE_INVALID) + scale_action = 0; + } + + /* Both adjacent throughputs are measured, but neither one has better + * throughput; we're using the best rate, don't change it! */ + else if ((low_tpt != IWL_INVALID_VALUE) && + (high_tpt != IWL_INVALID_VALUE) && + (low_tpt < current_tpt) && + (high_tpt < current_tpt)) + scale_action = 0; + + /* At least one adjacent rate's throughput is measured, + * and may have better performance. */ + else { + /* Higher adjacent rate's throughput is measured */ + if (high_tpt != IWL_INVALID_VALUE) { + /* Higher rate has better throughput */ + if (high_tpt > current_tpt && + sr >= IWL_RATE_INCREASE_TH) { + scale_action = 1; + } else { + scale_action = 0; + } + + /* Lower adjacent rate's throughput is measured */ + } else if (low_tpt != IWL_INVALID_VALUE) { + /* Lower rate has better throughput */ + if (low_tpt > current_tpt) { + IWL_DEBUG_RATE(priv, + "decrease rate because of low tpt\n"); + scale_action = -1; + } else if (sr >= IWL_RATE_INCREASE_TH) { + scale_action = 1; + } + } + } + + /* Sanity check; asked for decrease, but success rate or throughput + * has been good at old rate. Don't change it. */ + if ((scale_action == -1) && (low != IWL_RATE_INVALID) && + ((sr > IWL_RATE_HIGH_TH) || + (current_tpt > (100 * tbl->expected_tpt[low])))) + scale_action = 0; + + switch (scale_action) { + case -1: + /* Decrease starting rate, update uCode's rate table */ + if (low != IWL_RATE_INVALID) { + update_lq = 1; + index = low; + } + + break; + case 1: + /* Increase starting rate, update uCode's rate table */ + if (high != IWL_RATE_INVALID) { + update_lq = 1; + index = high; + } + + break; + case 0: + /* No change */ + default: + break; + } + + IWL_DEBUG_RATE(priv, "choose rate scale index %d action %d low %d " + "high %d type %d\n", + index, scale_action, low, high, tbl->lq_type); + +lq_update: + /* Replace uCode's rate table for the destination station. */ + if (update_lq) + rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta, + tbl, index, is_green); + + /* Should we stay with this modulation mode, + * or search for a new one? */ + iwl4965_rs_stay_in_table(lq_sta, false); + + /* + * Search for new modulation mode if we're: + * 1) Not changing rates right now + * 2) Not just finishing up a search + * 3) Allowing a new search + */ + if (!update_lq && !done_search && + !lq_sta->stay_in_tbl && window->counter) { + /* Save current throughput to compare with "search" throughput*/ + lq_sta->last_tpt = current_tpt; + + /* Select a new "search" modulation mode to try. + * If one is found, set up the new "search" table. */ + if (is_legacy(tbl->lq_type)) + iwl4965_rs_move_legacy_other(priv, lq_sta, + conf, sta, index); + else if (is_siso(tbl->lq_type)) + iwl4965_rs_move_siso_to_other(priv, lq_sta, + conf, sta, index); + else /* (is_mimo2(tbl->lq_type)) */ + iwl4965_rs_move_mimo2_to_other(priv, lq_sta, + conf, sta, index); + + /* If new "search" mode was selected, set up in uCode table */ + if (lq_sta->search_better_tbl) { + /* Access the "search" table, clear its history. */ + tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); + for (i = 0; i < IWL_RATE_COUNT; i++) + iwl4965_rs_rate_scale_clear_window( + &(tbl->win[i])); + + /* Use new "search" start rate */ + index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate); + + IWL_DEBUG_RATE(priv, + "Switch current mcs: %X index: %d\n", + tbl->current_rate, index); + iwl4965_rs_fill_link_cmd(priv, lq_sta, + tbl->current_rate); + iwl_legacy_send_lq_cmd(priv, ctx, + &lq_sta->lq, CMD_ASYNC, false); + } else + done_search = 1; + } + + if (done_search && !lq_sta->stay_in_tbl) { + /* If the "active" (non-search) mode was legacy, + * and we've tried switching antennas, + * but we haven't been able to try HT modes (not available), + * stay with best antenna legacy modulation for a while + * before next round of mode comparisons. */ + tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]); + if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) && + lq_sta->action_counter > tbl1->max_search) { + IWL_DEBUG_RATE(priv, "LQ: STAY in legacy table\n"); + iwl4965_rs_set_stay_in_table(priv, 1, lq_sta); + } + + /* If we're in an HT mode, and all 3 mode switch actions + * have been tried and compared, stay in this best modulation + * mode for a while before next round of mode comparisons. */ + if (lq_sta->enable_counter && + (lq_sta->action_counter >= tbl1->max_search)) { + if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) && + (lq_sta->tx_agg_tid_en & (1 << tid)) && + (tid != MAX_TID_COUNT)) { + tid_data = + &priv->stations[lq_sta->lq.sta_id].tid[tid]; + if (tid_data->agg.state == IWL_AGG_OFF) { + IWL_DEBUG_RATE(priv, + "try to aggregate tid %d\n", + tid); + iwl4965_rs_tl_turn_on_agg(priv, tid, + lq_sta, sta); + } + } + iwl4965_rs_set_stay_in_table(priv, 0, lq_sta); + } + } + +out: + tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, + index, is_green); + i = index; + lq_sta->last_txrate_idx = i; +} + +/** + * iwl4965_rs_initialize_lq - Initialize a station's hardware rate table + * + * The uCode's station table contains a table of fallback rates + * for automatic fallback during transmission. + * + * NOTE: This sets up a default set of values. These will be replaced later + * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of + * rc80211_simple. + * + * NOTE: Run REPLY_ADD_STA command to set up station table entry, before + * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD, + * which requires station table entry to exist). + */ +static void iwl4965_rs_initialize_lq(struct iwl_priv *priv, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, + struct iwl_lq_sta *lq_sta) +{ + struct iwl_scale_tbl_info *tbl; + int rate_idx; + int i; + u32 rate; + u8 use_green = iwl4965_rs_use_green(sta); + u8 active_tbl = 0; + u8 valid_tx_ant; + struct iwl_station_priv *sta_priv; + struct iwl_rxon_context *ctx; + + if (!sta || !lq_sta) + return; + + sta_priv = (void *)sta->drv_priv; + ctx = sta_priv->common.ctx; + + i = lq_sta->last_txrate_idx; + + valid_tx_ant = priv->hw_params.valid_tx_ant; + + if (!lq_sta->search_better_tbl) + active_tbl = lq_sta->active_tbl; + else + active_tbl = 1 - lq_sta->active_tbl; + + tbl = &(lq_sta->lq_info[active_tbl]); + + if ((i < 0) || (i >= IWL_RATE_COUNT)) + i = 0; + + rate = iwlegacy_rates[i].plcp; + tbl->ant_type = iwl4965_first_antenna(valid_tx_ant); + rate |= tbl->ant_type << RATE_MCS_ANT_POS; + + if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE) + rate |= RATE_MCS_CCK_MSK; + + iwl4965_rs_get_tbl_info_from_mcs(rate, priv->band, tbl, &rate_idx); + if (!iwl4965_rs_is_valid_ant(valid_tx_ant, tbl->ant_type)) + iwl4965_rs_toggle_antenna(valid_tx_ant, &rate, tbl); + + rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, rate_idx, use_green); + tbl->current_rate = rate; + iwl4965_rs_set_expected_tpt_table(lq_sta, tbl); + iwl4965_rs_fill_link_cmd(NULL, lq_sta, rate); + priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq; + iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_SYNC, true); +} + +static void +iwl4965_rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta, + struct ieee80211_tx_rate_control *txrc) +{ + + struct sk_buff *skb = txrc->skb; + struct ieee80211_supported_band *sband = txrc->sband; + struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct iwl_lq_sta *lq_sta = priv_sta; + int rate_idx; + + IWL_DEBUG_RATE_LIMIT(priv, "rate scale calculate new rate for skb\n"); + + /* Get max rate if user set max rate */ + if (lq_sta) { + lq_sta->max_rate_idx = txrc->max_rate_idx; + if ((sband->band == IEEE80211_BAND_5GHZ) && + (lq_sta->max_rate_idx != -1)) + lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE; + if ((lq_sta->max_rate_idx < 0) || + (lq_sta->max_rate_idx >= IWL_RATE_COUNT)) + lq_sta->max_rate_idx = -1; + } + + /* Treat uninitialized rate scaling data same as non-existing. */ + if (lq_sta && !lq_sta->drv) { + IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n"); + priv_sta = NULL; + } + + /* Send management frames and NO_ACK data using lowest rate. */ + if (rate_control_send_low(sta, priv_sta, txrc)) + return; + + if (!lq_sta) + return; + + rate_idx = lq_sta->last_txrate_idx; + + if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) { + rate_idx -= IWL_FIRST_OFDM_RATE; + /* 6M and 9M shared same MCS index */ + rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0; + if (iwl4965_rs_extract_rate(lq_sta->last_rate_n_flags) >= + IWL_RATE_MIMO2_6M_PLCP) + rate_idx = rate_idx + MCS_INDEX_PER_STREAM; + info->control.rates[0].flags = IEEE80211_TX_RC_MCS; + if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK) + info->control.rates[0].flags |= + IEEE80211_TX_RC_SHORT_GI; + if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK) + info->control.rates[0].flags |= + IEEE80211_TX_RC_DUP_DATA; + if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK) + info->control.rates[0].flags |= + IEEE80211_TX_RC_40_MHZ_WIDTH; + if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK) + info->control.rates[0].flags |= + IEEE80211_TX_RC_GREEN_FIELD; + } else { + /* Check for invalid rates */ + if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) || + ((sband->band == IEEE80211_BAND_5GHZ) && + (rate_idx < IWL_FIRST_OFDM_RATE))) + rate_idx = rate_lowest_index(sband, sta); + /* On valid 5 GHz rate, adjust index */ + else if (sband->band == IEEE80211_BAND_5GHZ) + rate_idx -= IWL_FIRST_OFDM_RATE; + info->control.rates[0].flags = 0; + } + info->control.rates[0].idx = rate_idx; + +} + +static void *iwl4965_rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta, + gfp_t gfp) +{ + struct iwl_lq_sta *lq_sta; + struct iwl_station_priv *sta_priv = + (struct iwl_station_priv *) sta->drv_priv; + struct iwl_priv *priv; + + priv = (struct iwl_priv *)priv_rate; + IWL_DEBUG_RATE(priv, "create station rate scale window\n"); + + lq_sta = &sta_priv->lq_sta; + + return lq_sta; +} + +/* + * Called after adding a new station to initialize rate scaling + */ +void +iwl4965_rs_rate_init(struct iwl_priv *priv, + struct ieee80211_sta *sta, + u8 sta_id) +{ + int i, j; + struct ieee80211_hw *hw = priv->hw; + struct ieee80211_conf *conf = &priv->hw->conf; + struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + struct iwl_station_priv *sta_priv; + struct iwl_lq_sta *lq_sta; + struct ieee80211_supported_band *sband; + + sta_priv = (struct iwl_station_priv *) sta->drv_priv; + lq_sta = &sta_priv->lq_sta; + sband = hw->wiphy->bands[conf->channel->band]; + + + lq_sta->lq.sta_id = sta_id; + + for (j = 0; j < LQ_SIZE; j++) + for (i = 0; i < IWL_RATE_COUNT; i++) + iwl4965_rs_rate_scale_clear_window( + &lq_sta->lq_info[j].win[i]); + + lq_sta->flush_timer = 0; + lq_sta->supp_rates = sta->supp_rates[sband->band]; + for (j = 0; j < LQ_SIZE; j++) + for (i = 0; i < IWL_RATE_COUNT; i++) + iwl4965_rs_rate_scale_clear_window( + &lq_sta->lq_info[j].win[i]); + + IWL_DEBUG_RATE(priv, "LQ:" + "*** rate scale station global init for station %d ***\n", + sta_id); + /* TODO: what is a good starting rate for STA? About middle? Maybe not + * the lowest or the highest rate.. Could consider using RSSI from + * previous packets? Need to have IEEE 802.1X auth succeed immediately + * after assoc.. */ + + lq_sta->is_dup = 0; + lq_sta->max_rate_idx = -1; + lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX; + lq_sta->is_green = iwl4965_rs_use_green(sta); + lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000); + lq_sta->band = priv->band; + /* + * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3), + * supp_rates[] does not; shift to convert format, force 9 MBits off. + */ + lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1; + lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1; + lq_sta->active_siso_rate &= ~((u16)0x2); + lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE; + + /* Same here */ + lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1; + lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1; + lq_sta->active_mimo2_rate &= ~((u16)0x2); + lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE; + + /* These values will be overridden later */ + lq_sta->lq.general_params.single_stream_ant_msk = + iwl4965_first_antenna(priv->hw_params.valid_tx_ant); + lq_sta->lq.general_params.dual_stream_ant_msk = + priv->hw_params.valid_tx_ant & + ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant); + if (!lq_sta->lq.general_params.dual_stream_ant_msk) { + lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB; + } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) { + lq_sta->lq.general_params.dual_stream_ant_msk = + priv->hw_params.valid_tx_ant; + } + + /* as default allow aggregation for all tids */ + lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID; + lq_sta->drv = priv; + + /* Set last_txrate_idx to lowest rate */ + lq_sta->last_txrate_idx = rate_lowest_index(sband, sta); + if (sband->band == IEEE80211_BAND_5GHZ) + lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE; + lq_sta->is_agg = 0; + +#ifdef CONFIG_MAC80211_DEBUGFS + lq_sta->dbg_fixed_rate = 0; +#endif + + iwl4965_rs_initialize_lq(priv, conf, sta, lq_sta); +} + +static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, u32 new_rate) +{ + struct iwl_scale_tbl_info tbl_type; + int index = 0; + int rate_idx; + int repeat_rate = 0; + u8 ant_toggle_cnt = 0; + u8 use_ht_possible = 1; + u8 valid_tx_ant = 0; + struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq; + + /* Override starting rate (index 0) if needed for debug purposes */ + iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index); + + /* Interpret new_rate (rate_n_flags) */ + iwl4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, + &tbl_type, &rate_idx); + + /* How many times should we repeat the initial rate? */ + if (is_legacy(tbl_type.lq_type)) { + ant_toggle_cnt = 1; + repeat_rate = IWL_NUMBER_TRY; + } else { + repeat_rate = IWL_HT_NUMBER_TRY; + } + + lq_cmd->general_params.mimo_delimiter = + is_mimo(tbl_type.lq_type) ? 1 : 0; + + /* Fill 1st table entry (index 0) */ + lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate); + + if (iwl4965_num_of_ant(tbl_type.ant_type) == 1) { + lq_cmd->general_params.single_stream_ant_msk = + tbl_type.ant_type; + } else if (iwl4965_num_of_ant(tbl_type.ant_type) == 2) { + lq_cmd->general_params.dual_stream_ant_msk = + tbl_type.ant_type; + } /* otherwise we don't modify the existing value */ + + index++; + repeat_rate--; + if (priv) + valid_tx_ant = priv->hw_params.valid_tx_ant; + + /* Fill rest of rate table */ + while (index < LINK_QUAL_MAX_RETRY_NUM) { + /* Repeat initial/next rate. + * For legacy IWL_NUMBER_TRY == 1, this loop will not execute. + * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */ + while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) { + if (is_legacy(tbl_type.lq_type)) { + if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE) + ant_toggle_cnt++; + else if (priv && + iwl4965_rs_toggle_antenna(valid_tx_ant, + &new_rate, &tbl_type)) + ant_toggle_cnt = 1; + } + + /* Override next rate if needed for debug purposes */ + iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index); + + /* Fill next table entry */ + lq_cmd->rs_table[index].rate_n_flags = + cpu_to_le32(new_rate); + repeat_rate--; + index++; + } + + iwl4965_rs_get_tbl_info_from_mcs(new_rate, + lq_sta->band, &tbl_type, + &rate_idx); + + /* Indicate to uCode which entries might be MIMO. + * If initial rate was MIMO, this will finally end up + * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */ + if (is_mimo(tbl_type.lq_type)) + lq_cmd->general_params.mimo_delimiter = index; + + /* Get next rate */ + new_rate = iwl4965_rs_get_lower_rate(lq_sta, + &tbl_type, rate_idx, + use_ht_possible); + + /* How many times should we repeat the next rate? */ + if (is_legacy(tbl_type.lq_type)) { + if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE) + ant_toggle_cnt++; + else if (priv && + iwl4965_rs_toggle_antenna(valid_tx_ant, + &new_rate, &tbl_type)) + ant_toggle_cnt = 1; + + repeat_rate = IWL_NUMBER_TRY; + } else { + repeat_rate = IWL_HT_NUMBER_TRY; + } + + /* Don't allow HT rates after next pass. + * iwl4965_rs_get_lower_rate() will change type to LQ_A or LQ_G. */ + use_ht_possible = 0; + + /* Override next rate if needed for debug purposes */ + iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index); + + /* Fill next table entry */ + lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate); + + index++; + repeat_rate--; + } + + lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF; + lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; + + lq_cmd->agg_params.agg_time_limit = + cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); +} + +static void +*iwl4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) +{ + return hw->priv; +} +/* rate scale requires free function to be implemented */ +static void iwl4965_rs_free(void *priv_rate) +{ + return; +} + +static void iwl4965_rs_free_sta(void *priv_r, struct ieee80211_sta *sta, + void *priv_sta) +{ + struct iwl_priv *priv __maybe_unused = priv_r; + + IWL_DEBUG_RATE(priv, "enter\n"); + IWL_DEBUG_RATE(priv, "leave\n"); +} + + +#ifdef CONFIG_MAC80211_DEBUGFS +static int iwl4965_open_file_generic(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} +static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta, + u32 *rate_n_flags, int index) +{ + struct iwl_priv *priv; + u8 valid_tx_ant; + u8 ant_sel_tx; + + priv = lq_sta->drv; + valid_tx_ant = priv->hw_params.valid_tx_ant; + if (lq_sta->dbg_fixed_rate) { + ant_sel_tx = + ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) + >> RATE_MCS_ANT_POS); + if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) { + *rate_n_flags = lq_sta->dbg_fixed_rate; + IWL_DEBUG_RATE(priv, "Fixed rate ON\n"); + } else { + lq_sta->dbg_fixed_rate = 0; + IWL_ERR(priv, + "Invalid antenna selection 0x%X, Valid is 0x%X\n", + ant_sel_tx, valid_tx_ant); + IWL_DEBUG_RATE(priv, "Fixed rate OFF\n"); + } + } else { + IWL_DEBUG_RATE(priv, "Fixed rate OFF\n"); + } +} + +static ssize_t iwl4965_rs_sta_dbgfs_scale_table_write(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + struct iwl_lq_sta *lq_sta = file->private_data; + struct iwl_priv *priv; + char buf[64]; + size_t buf_size; + u32 parsed_rate; + struct iwl_station_priv *sta_priv = + container_of(lq_sta, struct iwl_station_priv, lq_sta); + struct iwl_rxon_context *ctx = sta_priv->common.ctx; + + priv = lq_sta->drv; + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + if (sscanf(buf, "%x", &parsed_rate) == 1) + lq_sta->dbg_fixed_rate = parsed_rate; + else + lq_sta->dbg_fixed_rate = 0; + + lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */ + lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ + lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ + + IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n", + lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate); + + if (lq_sta->dbg_fixed_rate) { + iwl4965_rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate); + iwl_legacy_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC, + false); + } + + return count; +} + +static ssize_t iwl4965_rs_sta_dbgfs_scale_table_read(struct file *file, + char __user *user_buf, size_t count, loff_t *ppos) +{ + char *buff; + int desc = 0; + int i = 0; + int index = 0; + ssize_t ret; + + struct iwl_lq_sta *lq_sta = file->private_data; + struct iwl_priv *priv; + struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + + priv = lq_sta->drv; + buff = kmalloc(1024, GFP_KERNEL); + if (!buff) + return -ENOMEM; + + desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id); + desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n", + lq_sta->total_failed, lq_sta->total_success, + lq_sta->active_legacy_rate); + desc += sprintf(buff+desc, "fixed rate 0x%X\n", + lq_sta->dbg_fixed_rate); + desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n", + (priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "", + (priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "", + (priv->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : ""); + desc += sprintf(buff+desc, "lq type %s\n", + (is_legacy(tbl->lq_type)) ? "legacy" : "HT"); + if (is_Ht(tbl->lq_type)) { + desc += sprintf(buff+desc, " %s", + (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2"); + desc += sprintf(buff+desc, " %s", + (tbl->is_ht40) ? "40MHz" : "20MHz"); + desc += sprintf(buff+desc, " %s %s %s\n", + (tbl->is_SGI) ? "SGI" : "", + (lq_sta->is_green) ? "GF enabled" : "", + (lq_sta->is_agg) ? "AGG on" : ""); + } + desc += sprintf(buff+desc, "last tx rate=0x%X\n", + lq_sta->last_rate_n_flags); + desc += sprintf(buff+desc, "general:" + "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n", + lq_sta->lq.general_params.flags, + lq_sta->lq.general_params.mimo_delimiter, + lq_sta->lq.general_params.single_stream_ant_msk, + lq_sta->lq.general_params.dual_stream_ant_msk); + + desc += sprintf(buff+desc, "agg:" + "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n", + le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit), + lq_sta->lq.agg_params.agg_dis_start_th, + lq_sta->lq.agg_params.agg_frame_cnt_limit); + + desc += sprintf(buff+desc, + "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n", + lq_sta->lq.general_params.start_rate_index[0], + lq_sta->lq.general_params.start_rate_index[1], + lq_sta->lq.general_params.start_rate_index[2], + lq_sta->lq.general_params.start_rate_index[3]); + + for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { + index = iwl4965_hwrate_to_plcp_idx( + le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags)); + if (is_legacy(tbl->lq_type)) { + desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps\n", + i, + le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags), + iwl_rate_mcs[index].mbps); + } else { + desc += sprintf(buff+desc, + " rate[%d] 0x%X %smbps (%s)\n", + i, + le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags), + iwl_rate_mcs[index].mbps, iwl_rate_mcs[index].mcs); + } + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); + kfree(buff); + return ret; +} + +static const struct file_operations rs_sta_dbgfs_scale_table_ops = { + .write = iwl4965_rs_sta_dbgfs_scale_table_write, + .read = iwl4965_rs_sta_dbgfs_scale_table_read, + .open = iwl4965_open_file_generic, + .llseek = default_llseek, +}; +static ssize_t iwl4965_rs_sta_dbgfs_stats_table_read(struct file *file, + char __user *user_buf, size_t count, loff_t *ppos) +{ + char *buff; + int desc = 0; + int i, j; + ssize_t ret; + + struct iwl_lq_sta *lq_sta = file->private_data; + + buff = kmalloc(1024, GFP_KERNEL); + if (!buff) + return -ENOMEM; + + for (i = 0; i < LQ_SIZE; i++) { + desc += sprintf(buff+desc, + "%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n" + "rate=0x%X\n", + lq_sta->active_tbl == i ? "*" : "x", + lq_sta->lq_info[i].lq_type, + lq_sta->lq_info[i].is_SGI, + lq_sta->lq_info[i].is_ht40, + lq_sta->lq_info[i].is_dup, + lq_sta->is_green, + lq_sta->lq_info[i].current_rate); + for (j = 0; j < IWL_RATE_COUNT; j++) { + desc += sprintf(buff+desc, + "counter=%d success=%d %%=%d\n", + lq_sta->lq_info[i].win[j].counter, + lq_sta->lq_info[i].win[j].success_counter, + lq_sta->lq_info[i].win[j].success_ratio); + } + } + ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); + kfree(buff); + return ret; +} + +static const struct file_operations rs_sta_dbgfs_stats_table_ops = { + .read = iwl4965_rs_sta_dbgfs_stats_table_read, + .open = iwl4965_open_file_generic, + .llseek = default_llseek, +}; + +static ssize_t iwl4965_rs_sta_dbgfs_rate_scale_data_read(struct file *file, + char __user *user_buf, size_t count, loff_t *ppos) +{ + char buff[120]; + int desc = 0; + ssize_t ret; + + struct iwl_lq_sta *lq_sta = file->private_data; + struct iwl_priv *priv; + struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl]; + + priv = lq_sta->drv; + + if (is_Ht(tbl->lq_type)) + desc += sprintf(buff+desc, + "Bit Rate= %d Mb/s\n", + tbl->expected_tpt[lq_sta->last_txrate_idx]); + else + desc += sprintf(buff+desc, + "Bit Rate= %d Mb/s\n", + iwlegacy_rates[lq_sta->last_txrate_idx].ieee >> 1); + + ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); + return ret; +} + +static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = { + .read = iwl4965_rs_sta_dbgfs_rate_scale_data_read, + .open = iwl4965_open_file_generic, + .llseek = default_llseek, +}; + +static void iwl4965_rs_add_debugfs(void *priv, void *priv_sta, + struct dentry *dir) +{ + struct iwl_lq_sta *lq_sta = priv_sta; + lq_sta->rs_sta_dbgfs_scale_table_file = + debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir, + lq_sta, &rs_sta_dbgfs_scale_table_ops); + lq_sta->rs_sta_dbgfs_stats_table_file = + debugfs_create_file("rate_stats_table", S_IRUSR, dir, + lq_sta, &rs_sta_dbgfs_stats_table_ops); + lq_sta->rs_sta_dbgfs_rate_scale_data_file = + debugfs_create_file("rate_scale_data", S_IRUSR, dir, + lq_sta, &rs_sta_dbgfs_rate_scale_data_ops); + lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file = + debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir, + &lq_sta->tx_agg_tid_en); + +} + +static void iwl4965_rs_remove_debugfs(void *priv, void *priv_sta) +{ + struct iwl_lq_sta *lq_sta = priv_sta; + debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file); + debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file); + debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file); + debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file); +} +#endif + +/* + * Initialization of rate scaling information is done by driver after + * the station is added. Since mac80211 calls this function before a + * station is added we ignore it. + */ +static void +iwl4965_rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *priv_sta) +{ +} +static struct rate_control_ops rs_4965_ops = { + .module = NULL, + .name = IWL4965_RS_NAME, + .tx_status = iwl4965_rs_tx_status, + .get_rate = iwl4965_rs_get_rate, + .rate_init = iwl4965_rs_rate_init_stub, + .alloc = iwl4965_rs_alloc, + .free = iwl4965_rs_free, + .alloc_sta = iwl4965_rs_alloc_sta, + .free_sta = iwl4965_rs_free_sta, +#ifdef CONFIG_MAC80211_DEBUGFS + .add_sta_debugfs = iwl4965_rs_add_debugfs, + .remove_sta_debugfs = iwl4965_rs_remove_debugfs, +#endif +}; + +int iwl4965_rate_control_register(void) +{ + return ieee80211_rate_control_register(&rs_4965_ops); +} + +void iwl4965_rate_control_unregister(void) +{ + ieee80211_rate_control_unregister(&rs_4965_ops); +} diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-4965-rx.c b/trunk/drivers/net/wireless/iwlegacy/iwl-4965-rx.c new file mode 100644 index 000000000000..2b144bbfc3c5 --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-4965-rx.c @@ -0,0 +1,215 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include +#include +#include +#include + +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-4965-calib.h" +#include "iwl-sta.h" +#include "iwl-io.h" +#include "iwl-helpers.h" +#include "iwl-4965-hw.h" +#include "iwl-4965.h" + +void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) + +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_missed_beacon_notif *missed_beacon; + + missed_beacon = &pkt->u.missed_beacon; + if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) > + priv->missed_beacon_threshold) { + IWL_DEBUG_CALIB(priv, + "missed bcn cnsq %d totl %d rcd %d expctd %d\n", + le32_to_cpu(missed_beacon->consecutive_missed_beacons), + le32_to_cpu(missed_beacon->total_missed_becons), + le32_to_cpu(missed_beacon->num_recvd_beacons), + le32_to_cpu(missed_beacon->num_expected_beacons)); + if (!test_bit(STATUS_SCANNING, &priv->status)) + iwl4965_init_sensitivity(priv); + } +} + +/* Calculate noise level, based on measurements during network silence just + * before arriving beacon. This measurement can be done only if we know + * exactly when to expect beacons, therefore only when we're associated. */ +static void iwl4965_rx_calc_noise(struct iwl_priv *priv) +{ + struct statistics_rx_non_phy *rx_info; + int num_active_rx = 0; + int total_silence = 0; + int bcn_silence_a, bcn_silence_b, bcn_silence_c; + int last_rx_noise; + + rx_info = &(priv->_4965.statistics.rx.general); + bcn_silence_a = + le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER; + bcn_silence_b = + le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER; + bcn_silence_c = + le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER; + + if (bcn_silence_a) { + total_silence += bcn_silence_a; + num_active_rx++; + } + if (bcn_silence_b) { + total_silence += bcn_silence_b; + num_active_rx++; + } + if (bcn_silence_c) { + total_silence += bcn_silence_c; + num_active_rx++; + } + + /* Average among active antennas */ + if (num_active_rx) + last_rx_noise = (total_silence / num_active_rx) - 107; + else + last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE; + + IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n", + bcn_silence_a, bcn_silence_b, bcn_silence_c, + last_rx_noise); +} + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS +/* + * based on the assumption of all statistics counter are in DWORD + * FIXME: This function is for debugging, do not deal with + * the case of counters roll-over. + */ +static void iwl4965_accumulative_statistics(struct iwl_priv *priv, + __le32 *stats) +{ + int i, size; + __le32 *prev_stats; + u32 *accum_stats; + u32 *delta, *max_delta; + struct statistics_general_common *general, *accum_general; + struct statistics_tx *tx, *accum_tx; + + prev_stats = (__le32 *)&priv->_4965.statistics; + accum_stats = (u32 *)&priv->_4965.accum_statistics; + size = sizeof(struct iwl_notif_statistics); + general = &priv->_4965.statistics.general.common; + accum_general = &priv->_4965.accum_statistics.general.common; + tx = &priv->_4965.statistics.tx; + accum_tx = &priv->_4965.accum_statistics.tx; + delta = (u32 *)&priv->_4965.delta_statistics; + max_delta = (u32 *)&priv->_4965.max_delta; + + for (i = sizeof(__le32); i < size; + i += sizeof(__le32), stats++, prev_stats++, delta++, + max_delta++, accum_stats++) { + if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) { + *delta = (le32_to_cpu(*stats) - + le32_to_cpu(*prev_stats)); + *accum_stats += *delta; + if (*delta > *max_delta) + *max_delta = *delta; + } + } + + /* reset accumulative statistics for "no-counter" type statistics */ + accum_general->temperature = general->temperature; + accum_general->ttl_timestamp = general->ttl_timestamp; +} +#endif + +#define REG_RECALIB_PERIOD (60) + +void iwl4965_rx_statistics(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + int change; + struct iwl_rx_packet *pkt = rxb_addr(rxb); + + IWL_DEBUG_RX(priv, + "Statistics notification received (%d vs %d).\n", + (int)sizeof(struct iwl_notif_statistics), + le32_to_cpu(pkt->len_n_flags) & + FH_RSCSR_FRAME_SIZE_MSK); + + change = ((priv->_4965.statistics.general.common.temperature != + pkt->u.stats.general.common.temperature) || + ((priv->_4965.statistics.flag & + STATISTICS_REPLY_FLG_HT40_MODE_MSK) != + (pkt->u.stats.flag & + STATISTICS_REPLY_FLG_HT40_MODE_MSK))); +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS + iwl4965_accumulative_statistics(priv, (__le32 *)&pkt->u.stats); +#endif + + /* TODO: reading some of statistics is unneeded */ + memcpy(&priv->_4965.statistics, &pkt->u.stats, + sizeof(priv->_4965.statistics)); + + set_bit(STATUS_STATISTICS, &priv->status); + + /* Reschedule the statistics timer to occur in + * REG_RECALIB_PERIOD seconds to ensure we get a + * thermal update even if the uCode doesn't give + * us one */ + mod_timer(&priv->statistics_periodic, jiffies + + msecs_to_jiffies(REG_RECALIB_PERIOD * 1000)); + + if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) && + (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) { + iwl4965_rx_calc_noise(priv); + queue_work(priv->workqueue, &priv->run_time_calib_work); + } + if (priv->cfg->ops->lib->temp_ops.temperature && change) + priv->cfg->ops->lib->temp_ops.temperature(priv); +} + +void iwl4965_reply_statistics(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + + if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) { +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS + memset(&priv->_4965.accum_statistics, 0, + sizeof(struct iwl_notif_statistics)); + memset(&priv->_4965.delta_statistics, 0, + sizeof(struct iwl_notif_statistics)); + memset(&priv->_4965.max_delta, 0, + sizeof(struct iwl_notif_statistics)); +#endif + IWL_DEBUG_RX(priv, "Statistics have been cleared\n"); + } + iwl4965_rx_statistics(priv, rxb); +} diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-4965-sta.c b/trunk/drivers/net/wireless/iwlegacy/iwl-4965-sta.c new file mode 100644 index 000000000000..a262c23553d2 --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-4965-sta.c @@ -0,0 +1,721 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include + +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-sta.h" +#include "iwl-4965.h" + +static struct iwl_link_quality_cmd * +iwl4965_sta_alloc_lq(struct iwl_priv *priv, u8 sta_id) +{ + int i, r; + struct iwl_link_quality_cmd *link_cmd; + u32 rate_flags = 0; + __le32 rate_n_flags; + + link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL); + if (!link_cmd) { + IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n"); + return NULL; + } + /* Set up the rate scaling to start at selected rate, fall back + * all the way down to 1M in IEEE order, and then spin on 1M */ + if (priv->band == IEEE80211_BAND_5GHZ) + r = IWL_RATE_6M_INDEX; + else + r = IWL_RATE_1M_INDEX; + + if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE) + rate_flags |= RATE_MCS_CCK_MSK; + + rate_flags |= iwl4965_first_antenna(priv->hw_params.valid_tx_ant) << + RATE_MCS_ANT_POS; + rate_n_flags = iwl4965_hw_set_rate_n_flags(iwlegacy_rates[r].plcp, + rate_flags); + for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) + link_cmd->rs_table[i].rate_n_flags = rate_n_flags; + + link_cmd->general_params.single_stream_ant_msk = + iwl4965_first_antenna(priv->hw_params.valid_tx_ant); + + link_cmd->general_params.dual_stream_ant_msk = + priv->hw_params.valid_tx_ant & + ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant); + if (!link_cmd->general_params.dual_stream_ant_msk) { + link_cmd->general_params.dual_stream_ant_msk = ANT_AB; + } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) { + link_cmd->general_params.dual_stream_ant_msk = + priv->hw_params.valid_tx_ant; + } + + link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; + link_cmd->agg_params.agg_time_limit = + cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); + + link_cmd->sta_id = sta_id; + + return link_cmd; +} + +/* + * iwl4965_add_bssid_station - Add the special IBSS BSSID station + * + * Function sleeps. + */ +int +iwl4965_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx, + const u8 *addr, u8 *sta_id_r) +{ + int ret; + u8 sta_id; + struct iwl_link_quality_cmd *link_cmd; + unsigned long flags; + + if (sta_id_r) + *sta_id_r = IWL_INVALID_STATION; + + ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id); + if (ret) { + IWL_ERR(priv, "Unable to add station %pM\n", addr); + return ret; + } + + if (sta_id_r) + *sta_id_r = sta_id; + + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].used |= IWL_STA_LOCAL; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + /* Set up default rate scaling table in device's station table */ + link_cmd = iwl4965_sta_alloc_lq(priv, sta_id); + if (!link_cmd) { + IWL_ERR(priv, + "Unable to initialize rate scaling for station %pM.\n", + addr); + return -ENOMEM; + } + + ret = iwl_legacy_send_lq_cmd(priv, ctx, link_cmd, CMD_SYNC, true); + if (ret) + IWL_ERR(priv, "Link quality command failed (%d)\n", ret); + + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].lq = link_cmd; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return 0; +} + +static int iwl4965_static_wepkey_cmd(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + bool send_if_empty) +{ + int i, not_empty = 0; + u8 buff[sizeof(struct iwl_wep_cmd) + + sizeof(struct iwl_wep_key) * WEP_KEYS_MAX]; + struct iwl_wep_cmd *wep_cmd = (struct iwl_wep_cmd *)buff; + size_t cmd_size = sizeof(struct iwl_wep_cmd); + struct iwl_host_cmd cmd = { + .id = ctx->wep_key_cmd, + .data = wep_cmd, + .flags = CMD_SYNC, + }; + + might_sleep(); + + memset(wep_cmd, 0, cmd_size + + (sizeof(struct iwl_wep_key) * WEP_KEYS_MAX)); + + for (i = 0; i < WEP_KEYS_MAX ; i++) { + wep_cmd->key[i].key_index = i; + if (ctx->wep_keys[i].key_size) { + wep_cmd->key[i].key_offset = i; + not_empty = 1; + } else { + wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET; + } + + wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size; + memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key, + ctx->wep_keys[i].key_size); + } + + wep_cmd->global_key_type = WEP_KEY_WEP_TYPE; + wep_cmd->num_keys = WEP_KEYS_MAX; + + cmd_size += sizeof(struct iwl_wep_key) * WEP_KEYS_MAX; + + cmd.len = cmd_size; + + if (not_empty || send_if_empty) + return iwl_legacy_send_cmd(priv, &cmd); + else + return 0; +} + +int iwl4965_restore_default_wep_keys(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + lockdep_assert_held(&priv->mutex); + + return iwl4965_static_wepkey_cmd(priv, ctx, false); +} + +int iwl4965_remove_default_wep_key(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *keyconf) +{ + int ret; + + lockdep_assert_held(&priv->mutex); + + IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n", + keyconf->keyidx); + + memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0])); + if (iwl_legacy_is_rfkill(priv)) { + IWL_DEBUG_WEP(priv, + "Not sending REPLY_WEPKEY command due to RFKILL.\n"); + /* but keys in device are clear anyway so return success */ + return 0; + } + ret = iwl4965_static_wepkey_cmd(priv, ctx, 1); + IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n", + keyconf->keyidx, ret); + + return ret; +} + +int iwl4965_set_default_wep_key(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *keyconf) +{ + int ret; + + lockdep_assert_held(&priv->mutex); + + if (keyconf->keylen != WEP_KEY_LEN_128 && + keyconf->keylen != WEP_KEY_LEN_64) { + IWL_DEBUG_WEP(priv, "Bad WEP key length %d\n", keyconf->keylen); + return -EINVAL; + } + + keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV; + keyconf->hw_key_idx = HW_KEY_DEFAULT; + priv->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher; + + ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen; + memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key, + keyconf->keylen); + + ret = iwl4965_static_wepkey_cmd(priv, ctx, false); + IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n", + keyconf->keylen, keyconf->keyidx, ret); + + return ret; +} + +static int iwl4965_set_wep_dynamic_key_info(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *keyconf, + u8 sta_id) +{ + unsigned long flags; + __le16 key_flags = 0; + struct iwl_legacy_addsta_cmd sta_cmd; + + lockdep_assert_held(&priv->mutex); + + keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV; + + key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK); + key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); + key_flags &= ~STA_KEY_FLG_INVALID; + + if (keyconf->keylen == WEP_KEY_LEN_128) + key_flags |= STA_KEY_FLG_KEY_SIZE_MSK; + + if (sta_id == ctx->bcast_sta_id) + key_flags |= STA_KEY_MULTICAST_MSK; + + spin_lock_irqsave(&priv->sta_lock, flags); + + priv->stations[sta_id].keyinfo.cipher = keyconf->cipher; + priv->stations[sta_id].keyinfo.keylen = keyconf->keylen; + priv->stations[sta_id].keyinfo.keyidx = keyconf->keyidx; + + memcpy(priv->stations[sta_id].keyinfo.key, + keyconf->key, keyconf->keylen); + + memcpy(&priv->stations[sta_id].sta.key.key[3], + keyconf->key, keyconf->keylen); + + if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK) + == STA_KEY_FLG_NO_ENC) + priv->stations[sta_id].sta.key.key_offset = + iwl_legacy_get_free_ucode_key_index(priv); + /* else, we are overriding an existing key => no need to allocated room + * in uCode. */ + + WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, + "no space for a new key"); + + priv->stations[sta_id].sta.key.key_flags = key_flags; + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + + memcpy(&sta_cmd, &priv->stations[sta_id].sta, + sizeof(struct iwl_legacy_addsta_cmd)); + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); +} + +static int iwl4965_set_ccmp_dynamic_key_info(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *keyconf, + u8 sta_id) +{ + unsigned long flags; + __le16 key_flags = 0; + struct iwl_legacy_addsta_cmd sta_cmd; + + lockdep_assert_held(&priv->mutex); + + key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK); + key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); + key_flags &= ~STA_KEY_FLG_INVALID; + + if (sta_id == ctx->bcast_sta_id) + key_flags |= STA_KEY_MULTICAST_MSK; + + keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].keyinfo.cipher = keyconf->cipher; + priv->stations[sta_id].keyinfo.keylen = keyconf->keylen; + + memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, + keyconf->keylen); + + memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, + keyconf->keylen); + + if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK) + == STA_KEY_FLG_NO_ENC) + priv->stations[sta_id].sta.key.key_offset = + iwl_legacy_get_free_ucode_key_index(priv); + /* else, we are overriding an existing key => no need to allocated room + * in uCode. */ + + WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, + "no space for a new key"); + + priv->stations[sta_id].sta.key.key_flags = key_flags; + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + + memcpy(&sta_cmd, &priv->stations[sta_id].sta, + sizeof(struct iwl_legacy_addsta_cmd)); + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); +} + +static int iwl4965_set_tkip_dynamic_key_info(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *keyconf, + u8 sta_id) +{ + unsigned long flags; + int ret = 0; + __le16 key_flags = 0; + + key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK); + key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); + key_flags &= ~STA_KEY_FLG_INVALID; + + if (sta_id == ctx->bcast_sta_id) + key_flags |= STA_KEY_MULTICAST_MSK; + + keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; + + spin_lock_irqsave(&priv->sta_lock, flags); + + priv->stations[sta_id].keyinfo.cipher = keyconf->cipher; + priv->stations[sta_id].keyinfo.keylen = 16; + + if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK) + == STA_KEY_FLG_NO_ENC) + priv->stations[sta_id].sta.key.key_offset = + iwl_legacy_get_free_ucode_key_index(priv); + /* else, we are overriding an existing key => no need to allocated room + * in uCode. */ + + WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, + "no space for a new key"); + + priv->stations[sta_id].sta.key.key_flags = key_flags; + + + /* This copy is acutally not needed: we get the key with each TX */ + memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16); + + memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, 16); + + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return ret; +} + +void iwl4965_update_tkip_key(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *keyconf, + struct ieee80211_sta *sta, u32 iv32, u16 *phase1key) +{ + u8 sta_id; + unsigned long flags; + int i; + + if (iwl_legacy_scan_cancel(priv)) { + /* cancel scan failed, just live w/ bad key and rely + briefly on SW decryption */ + return; + } + + sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, sta); + if (sta_id == IWL_INVALID_STATION) + return; + + spin_lock_irqsave(&priv->sta_lock, flags); + + priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32; + + for (i = 0; i < 5; i++) + priv->stations[sta_id].sta.key.tkip_rx_ttak[i] = + cpu_to_le16(phase1key[i]); + + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + + iwl_legacy_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); + + spin_unlock_irqrestore(&priv->sta_lock, flags); + +} + +int iwl4965_remove_dynamic_key(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *keyconf, + u8 sta_id) +{ + unsigned long flags; + u16 key_flags; + u8 keyidx; + struct iwl_legacy_addsta_cmd sta_cmd; + + lockdep_assert_held(&priv->mutex); + + ctx->key_mapping_keys--; + + spin_lock_irqsave(&priv->sta_lock, flags); + key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags); + keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3; + + IWL_DEBUG_WEP(priv, "Remove dynamic key: idx=%d sta=%d\n", + keyconf->keyidx, sta_id); + + if (keyconf->keyidx != keyidx) { + /* We need to remove a key with index different that the one + * in the uCode. This means that the key we need to remove has + * been replaced by another one with different index. + * Don't do anything and return ok + */ + spin_unlock_irqrestore(&priv->sta_lock, flags); + return 0; + } + + if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) { + IWL_WARN(priv, "Removing wrong key %d 0x%x\n", + keyconf->keyidx, key_flags); + spin_unlock_irqrestore(&priv->sta_lock, flags); + return 0; + } + + if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset, + &priv->ucode_key_table)) + IWL_ERR(priv, "index %d not used in uCode key table.\n", + priv->stations[sta_id].sta.key.key_offset); + memset(&priv->stations[sta_id].keyinfo, 0, + sizeof(struct iwl_hw_key)); + memset(&priv->stations[sta_id].sta.key, 0, + sizeof(struct iwl4965_keyinfo)); + priv->stations[sta_id].sta.key.key_flags = + STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID; + priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET; + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + + if (iwl_legacy_is_rfkill(priv)) { + IWL_DEBUG_WEP(priv, + "Not sending REPLY_ADD_STA command because RFKILL enabled.\n"); + spin_unlock_irqrestore(&priv->sta_lock, flags); + return 0; + } + memcpy(&sta_cmd, &priv->stations[sta_id].sta, + sizeof(struct iwl_legacy_addsta_cmd)); + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); +} + +int iwl4965_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *keyconf, u8 sta_id) +{ + int ret; + + lockdep_assert_held(&priv->mutex); + + ctx->key_mapping_keys++; + keyconf->hw_key_idx = HW_KEY_DYNAMIC; + + switch (keyconf->cipher) { + case WLAN_CIPHER_SUITE_CCMP: + ret = iwl4965_set_ccmp_dynamic_key_info(priv, ctx, + keyconf, sta_id); + break; + case WLAN_CIPHER_SUITE_TKIP: + ret = iwl4965_set_tkip_dynamic_key_info(priv, ctx, + keyconf, sta_id); + break; + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + ret = iwl4965_set_wep_dynamic_key_info(priv, ctx, + keyconf, sta_id); + break; + default: + IWL_ERR(priv, + "Unknown alg: %s cipher = %x\n", __func__, + keyconf->cipher); + ret = -EINVAL; + } + + IWL_DEBUG_WEP(priv, + "Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n", + keyconf->cipher, keyconf->keylen, keyconf->keyidx, + sta_id, ret); + + return ret; +} + +/** + * iwl4965_alloc_bcast_station - add broadcast station into driver's station table. + * + * This adds the broadcast station into the driver's station table + * and marks it driver active, so that it will be restored to the + * device at the next best time. + */ +int iwl4965_alloc_bcast_station(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + struct iwl_link_quality_cmd *link_cmd; + unsigned long flags; + u8 sta_id; + + spin_lock_irqsave(&priv->sta_lock, flags); + sta_id = iwl_legacy_prep_station(priv, ctx, iwlegacy_bcast_addr, + false, NULL); + if (sta_id == IWL_INVALID_STATION) { + IWL_ERR(priv, "Unable to prepare broadcast station\n"); + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return -EINVAL; + } + + priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE; + priv->stations[sta_id].used |= IWL_STA_BCAST; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + link_cmd = iwl4965_sta_alloc_lq(priv, sta_id); + if (!link_cmd) { + IWL_ERR(priv, + "Unable to initialize rate scaling for bcast station.\n"); + return -ENOMEM; + } + + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].lq = link_cmd; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return 0; +} + +/** + * iwl4965_update_bcast_station - update broadcast station's LQ command + * + * Only used by iwl4965. Placed here to have all bcast station management + * code together. + */ +static int iwl4965_update_bcast_station(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + unsigned long flags; + struct iwl_link_quality_cmd *link_cmd; + u8 sta_id = ctx->bcast_sta_id; + + link_cmd = iwl4965_sta_alloc_lq(priv, sta_id); + if (!link_cmd) { + IWL_ERR(priv, + "Unable to initialize rate scaling for bcast station.\n"); + return -ENOMEM; + } + + spin_lock_irqsave(&priv->sta_lock, flags); + if (priv->stations[sta_id].lq) + kfree(priv->stations[sta_id].lq); + else + IWL_DEBUG_INFO(priv, + "Bcast station rate scaling has not been initialized yet.\n"); + priv->stations[sta_id].lq = link_cmd; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return 0; +} + +int iwl4965_update_bcast_stations(struct iwl_priv *priv) +{ + struct iwl_rxon_context *ctx; + int ret = 0; + + for_each_context(priv, ctx) { + ret = iwl4965_update_bcast_station(priv, ctx); + if (ret) + break; + } + + return ret; +} + +/** + * iwl4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table + */ +int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid) +{ + unsigned long flags; + struct iwl_legacy_addsta_cmd sta_cmd; + + lockdep_assert_held(&priv->mutex); + + /* Remove "disable" flag, to enable Tx for this TID */ + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX; + priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid)); + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + memcpy(&sta_cmd, &priv->stations[sta_id].sta, + sizeof(struct iwl_legacy_addsta_cmd)); + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); +} + +int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta, + int tid, u16 ssn) +{ + unsigned long flags; + int sta_id; + struct iwl_legacy_addsta_cmd sta_cmd; + + lockdep_assert_held(&priv->mutex); + + sta_id = iwl_legacy_sta_id(sta); + if (sta_id == IWL_INVALID_STATION) + return -ENXIO; + + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].sta.station_flags_msk = 0; + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK; + priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid; + priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn); + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + memcpy(&sta_cmd, &priv->stations[sta_id].sta, + sizeof(struct iwl_legacy_addsta_cmd)); + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); +} + +int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta, + int tid) +{ + unsigned long flags; + int sta_id; + struct iwl_legacy_addsta_cmd sta_cmd; + + lockdep_assert_held(&priv->mutex); + + sta_id = iwl_legacy_sta_id(sta); + if (sta_id == IWL_INVALID_STATION) { + IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid); + return -ENXIO; + } + + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].sta.station_flags_msk = 0; + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK; + priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + memcpy(&sta_cmd, &priv->stations[sta_id].sta, + sizeof(struct iwl_legacy_addsta_cmd)); + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); +} + +void +iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK; + priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK; + priv->stations[sta_id].sta.sta.modify_mask = + STA_MODIFY_SLEEP_TX_COUNT_MSK; + priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt); + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + iwl_legacy_send_add_sta(priv, + &priv->stations[sta_id].sta, CMD_ASYNC); + spin_unlock_irqrestore(&priv->sta_lock, flags); + +} diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-4965-tx.c b/trunk/drivers/net/wireless/iwlegacy/iwl-4965-tx.c new file mode 100644 index 000000000000..7f12e3638bae --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-4965-tx.c @@ -0,0 +1,1378 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include +#include +#include +#include + +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-sta.h" +#include "iwl-io.h" +#include "iwl-helpers.h" +#include "iwl-4965-hw.h" +#include "iwl-4965.h" + +/* + * mac80211 queues, ACs, hardware queues, FIFOs. + * + * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues + * + * Mac80211 uses the following numbers, which we get as from it + * by way of skb_get_queue_mapping(skb): + * + * VO 0 + * VI 1 + * BE 2 + * BK 3 + * + * + * Regular (not A-MPDU) frames are put into hardware queues corresponding + * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their + * own queue per aggregation session (RA/TID combination), such queues are + * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In + * order to map frames to the right queue, we also need an AC->hw queue + * mapping. This is implemented here. + * + * Due to the way hw queues are set up (by the hw specific modules like + * iwl-4965.c), the AC->hw queue mapping is the identity + * mapping. + */ + +static const u8 tid_to_ac[] = { + IEEE80211_AC_BE, + IEEE80211_AC_BK, + IEEE80211_AC_BK, + IEEE80211_AC_BE, + IEEE80211_AC_VI, + IEEE80211_AC_VI, + IEEE80211_AC_VO, + IEEE80211_AC_VO +}; + +static inline int iwl4965_get_ac_from_tid(u16 tid) +{ + if (likely(tid < ARRAY_SIZE(tid_to_ac))) + return tid_to_ac[tid]; + + /* no support for TIDs 8-15 yet */ + return -EINVAL; +} + +static inline int +iwl4965_get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid) +{ + if (likely(tid < ARRAY_SIZE(tid_to_ac))) + return ctx->ac_to_fifo[tid_to_ac[tid]]; + + /* no support for TIDs 8-15 yet */ + return -EINVAL; +} + +/* + * handle build REPLY_TX command notification. + */ +static void iwl4965_tx_cmd_build_basic(struct iwl_priv *priv, + struct sk_buff *skb, + struct iwl_tx_cmd *tx_cmd, + struct ieee80211_tx_info *info, + struct ieee80211_hdr *hdr, + u8 std_id) +{ + __le16 fc = hdr->frame_control; + __le32 tx_flags = tx_cmd->tx_flags; + + tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { + tx_flags |= TX_CMD_FLG_ACK_MSK; + if (ieee80211_is_mgmt(fc)) + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + if (ieee80211_is_probe_resp(fc) && + !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) + tx_flags |= TX_CMD_FLG_TSF_MSK; + } else { + tx_flags &= (~TX_CMD_FLG_ACK_MSK); + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + } + + if (ieee80211_is_back_req(fc)) + tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; + + tx_cmd->sta_id = std_id; + if (ieee80211_has_morefrags(fc)) + tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; + + if (ieee80211_is_data_qos(fc)) { + u8 *qc = ieee80211_get_qos_ctl(hdr); + tx_cmd->tid_tspec = qc[0] & 0xf; + tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; + } else { + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + } + + iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags); + + tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); + if (ieee80211_is_mgmt(fc)) { + if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) + tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); + else + tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); + } else { + tx_cmd->timeout.pm_frame_timeout = 0; + } + + tx_cmd->driver_txop = 0; + tx_cmd->tx_flags = tx_flags; + tx_cmd->next_frame_len = 0; +} + +#define RTS_DFAULT_RETRY_LIMIT 60 + +static void iwl4965_tx_cmd_build_rate(struct iwl_priv *priv, + struct iwl_tx_cmd *tx_cmd, + struct ieee80211_tx_info *info, + __le16 fc) +{ + u32 rate_flags; + int rate_idx; + u8 rts_retry_limit; + u8 data_retry_limit; + u8 rate_plcp; + + /* Set retry limit on DATA packets and Probe Responses*/ + if (ieee80211_is_probe_resp(fc)) + data_retry_limit = 3; + else + data_retry_limit = IWL4965_DEFAULT_TX_RETRY; + tx_cmd->data_retry_limit = data_retry_limit; + + /* Set retry limit on RTS packets */ + rts_retry_limit = RTS_DFAULT_RETRY_LIMIT; + if (data_retry_limit < rts_retry_limit) + rts_retry_limit = data_retry_limit; + tx_cmd->rts_retry_limit = rts_retry_limit; + + /* DATA packets will use the uCode station table for rate/antenna + * selection */ + if (ieee80211_is_data(fc)) { + tx_cmd->initial_rate_index = 0; + tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; + return; + } + + /** + * If the current TX rate stored in mac80211 has the MCS bit set, it's + * not really a TX rate. Thus, we use the lowest supported rate for + * this band. Also use the lowest supported rate if the stored rate + * index is invalid. + */ + rate_idx = info->control.rates[0].idx; + if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS || + (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY)) + rate_idx = rate_lowest_index(&priv->bands[info->band], + info->control.sta); + /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ + if (info->band == IEEE80211_BAND_5GHZ) + rate_idx += IWL_FIRST_OFDM_RATE; + /* Get PLCP rate for tx_cmd->rate_n_flags */ + rate_plcp = iwlegacy_rates[rate_idx].plcp; + /* Zero out flags for this packet */ + rate_flags = 0; + + /* Set CCK flag as needed */ + if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) + rate_flags |= RATE_MCS_CCK_MSK; + + /* Set up antennas */ + priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant, + priv->hw_params.valid_tx_ant); + + rate_flags |= iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant); + + /* Set the rate in the TX cmd */ + tx_cmd->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags); +} + +static void iwl4965_tx_cmd_build_hwcrypto(struct iwl_priv *priv, + struct ieee80211_tx_info *info, + struct iwl_tx_cmd *tx_cmd, + struct sk_buff *skb_frag, + int sta_id) +{ + struct ieee80211_key_conf *keyconf = info->control.hw_key; + + switch (keyconf->cipher) { + case WLAN_CIPHER_SUITE_CCMP: + tx_cmd->sec_ctl = TX_CMD_SEC_CCM; + memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); + if (info->flags & IEEE80211_TX_CTL_AMPDU) + tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK; + IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n"); + break; + + case WLAN_CIPHER_SUITE_TKIP: + tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; + ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key); + IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n"); + break; + + case WLAN_CIPHER_SUITE_WEP104: + tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; + /* fall through */ + case WLAN_CIPHER_SUITE_WEP40: + tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP | + (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT); + + memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); + + IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption " + "with key %d\n", keyconf->keyidx); + break; + + default: + IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher); + break; + } +} + +/* + * start REPLY_TX command process + */ +int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_sta *sta = info->control.sta; + struct iwl_station_priv *sta_priv = NULL; + struct iwl_tx_queue *txq; + struct iwl_queue *q; + struct iwl_device_cmd *out_cmd; + struct iwl_cmd_meta *out_meta; + struct iwl_tx_cmd *tx_cmd; + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + int txq_id; + dma_addr_t phys_addr; + dma_addr_t txcmd_phys; + dma_addr_t scratch_phys; + u16 len, firstlen, secondlen; + u16 seq_number = 0; + __le16 fc; + u8 hdr_len; + u8 sta_id; + u8 wait_write_ptr = 0; + u8 tid = 0; + u8 *qc = NULL; + unsigned long flags; + bool is_agg = false; + + if (info->control.vif) + ctx = iwl_legacy_rxon_ctx_from_vif(info->control.vif); + + spin_lock_irqsave(&priv->lock, flags); + if (iwl_legacy_is_rfkill(priv)) { + IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n"); + goto drop_unlock; + } + + fc = hdr->frame_control; + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + if (ieee80211_is_auth(fc)) + IWL_DEBUG_TX(priv, "Sending AUTH frame\n"); + else if (ieee80211_is_assoc_req(fc)) + IWL_DEBUG_TX(priv, "Sending ASSOC frame\n"); + else if (ieee80211_is_reassoc_req(fc)) + IWL_DEBUG_TX(priv, "Sending REASSOC frame\n"); +#endif + + hdr_len = ieee80211_hdrlen(fc); + + /* For management frames use broadcast id to do not break aggregation */ + if (!ieee80211_is_data(fc)) + sta_id = ctx->bcast_sta_id; + else { + /* Find index into station table for destination station */ + sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, info->control.sta); + + if (sta_id == IWL_INVALID_STATION) { + IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", + hdr->addr1); + goto drop_unlock; + } + } + + IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); + + if (sta) + sta_priv = (void *)sta->drv_priv; + + if (sta_priv && sta_priv->asleep && + (info->flags & IEEE80211_TX_CTL_POLL_RESPONSE)) { + /* + * This sends an asynchronous command to the device, + * but we can rely on it being processed before the + * next frame is processed -- and the next frame to + * this station is the one that will consume this + * counter. + * For now set the counter to just 1 since we do not + * support uAPSD yet. + */ + iwl4965_sta_modify_sleep_tx_count(priv, sta_id, 1); + } + + /* + * Send this frame after DTIM -- there's a special queue + * reserved for this for contexts that support AP mode. + */ + if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { + txq_id = ctx->mcast_queue; + /* + * The microcode will clear the more data + * bit in the last frame it transmits. + */ + hdr->frame_control |= + cpu_to_le16(IEEE80211_FCTL_MOREDATA); + } else + txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)]; + + /* irqs already disabled/saved above when locking priv->lock */ + spin_lock(&priv->sta_lock); + + if (ieee80211_is_data_qos(fc)) { + qc = ieee80211_get_qos_ctl(hdr); + tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; + if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) { + spin_unlock(&priv->sta_lock); + goto drop_unlock; + } + seq_number = priv->stations[sta_id].tid[tid].seq_number; + seq_number &= IEEE80211_SCTL_SEQ; + hdr->seq_ctrl = hdr->seq_ctrl & + cpu_to_le16(IEEE80211_SCTL_FRAG); + hdr->seq_ctrl |= cpu_to_le16(seq_number); + seq_number += 0x10; + /* aggregation is on for this */ + if (info->flags & IEEE80211_TX_CTL_AMPDU && + priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) { + txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; + is_agg = true; + } + } + + txq = &priv->txq[txq_id]; + q = &txq->q; + + if (unlikely(iwl_legacy_queue_space(q) < q->high_mark)) { + spin_unlock(&priv->sta_lock); + goto drop_unlock; + } + + if (ieee80211_is_data_qos(fc)) { + priv->stations[sta_id].tid[tid].tfds_in_queue++; + if (!ieee80211_has_morefrags(fc)) + priv->stations[sta_id].tid[tid].seq_number = seq_number; + } + + spin_unlock(&priv->sta_lock); + + /* Set up driver data for this TFD */ + memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); + txq->txb[q->write_ptr].skb = skb; + txq->txb[q->write_ptr].ctx = ctx; + + /* Set up first empty entry in queue's array of Tx/cmd buffers */ + out_cmd = txq->cmd[q->write_ptr]; + out_meta = &txq->meta[q->write_ptr]; + tx_cmd = &out_cmd->cmd.tx; + memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); + memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd)); + + /* + * Set up the Tx-command (not MAC!) header. + * Store the chosen Tx queue and TFD index within the sequence field; + * after Tx, uCode's Tx response will return this value so driver can + * locate the frame within the tx queue and do post-tx processing. + */ + out_cmd->hdr.cmd = REPLY_TX; + out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | + INDEX_TO_SEQ(q->write_ptr))); + + /* Copy MAC header from skb into command buffer */ + memcpy(tx_cmd->hdr, hdr, hdr_len); + + + /* Total # bytes to be transmitted */ + len = (u16)skb->len; + tx_cmd->len = cpu_to_le16(len); + + if (info->control.hw_key) + iwl4965_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id); + + /* TODO need this for burst mode later on */ + iwl4965_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id); + iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr); + + iwl4965_tx_cmd_build_rate(priv, tx_cmd, info, fc); + + iwl_legacy_update_stats(priv, true, fc, len); + /* + * Use the first empty entry in this queue's command buffer array + * to contain the Tx command and MAC header concatenated together + * (payload data will be in another buffer). + * Size of this varies, due to varying MAC header length. + * If end is not dword aligned, we'll have 2 extra bytes at the end + * of the MAC header (device reads on dword boundaries). + * We'll tell device about this padding later. + */ + len = sizeof(struct iwl_tx_cmd) + + sizeof(struct iwl_cmd_header) + hdr_len; + firstlen = (len + 3) & ~3; + + /* Tell NIC about any 2-byte padding after MAC header */ + if (firstlen != len) + tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; + + /* Physical address of this Tx command's header (not MAC header!), + * within command buffer array. */ + txcmd_phys = pci_map_single(priv->pci_dev, + &out_cmd->hdr, firstlen, + PCI_DMA_BIDIRECTIONAL); + dma_unmap_addr_set(out_meta, mapping, txcmd_phys); + dma_unmap_len_set(out_meta, len, firstlen); + /* Add buffer containing Tx command and MAC(!) header to TFD's + * first entry */ + priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, + txcmd_phys, firstlen, 1, 0); + + if (!ieee80211_has_morefrags(hdr->frame_control)) { + txq->need_update = 1; + } else { + wait_write_ptr = 1; + txq->need_update = 0; + } + + /* Set up TFD's 2nd entry to point directly to remainder of skb, + * if any (802.11 null frames have no payload). */ + secondlen = skb->len - hdr_len; + if (secondlen > 0) { + phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, + secondlen, PCI_DMA_TODEVICE); + priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, + phys_addr, secondlen, + 0, 0); + } + + scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + + offsetof(struct iwl_tx_cmd, scratch); + + /* take back ownership of DMA buffer to enable update */ + pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys, + firstlen, PCI_DMA_BIDIRECTIONAL); + tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); + tx_cmd->dram_msb_ptr = iwl_legacy_get_dma_hi_addr(scratch_phys); + + IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n", + le16_to_cpu(out_cmd->hdr.sequence)); + IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); + iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd)); + iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); + + /* Set up entry for this TFD in Tx byte-count array */ + if (info->flags & IEEE80211_TX_CTL_AMPDU) + priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, + le16_to_cpu(tx_cmd->len)); + + pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys, + firstlen, PCI_DMA_BIDIRECTIONAL); + + trace_iwlwifi_legacy_dev_tx(priv, + &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr], + sizeof(struct iwl_tfd), + &out_cmd->hdr, firstlen, + skb->data + hdr_len, secondlen); + + /* Tell device the write index *just past* this latest filled TFD */ + q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd); + iwl_legacy_txq_update_write_ptr(priv, txq); + spin_unlock_irqrestore(&priv->lock, flags); + + /* + * At this point the frame is "transmitted" successfully + * and we will get a TX status notification eventually, + * regardless of the value of ret. "ret" only indicates + * whether or not we should update the write pointer. + */ + + /* + * Avoid atomic ops if it isn't an associated client. + * Also, if this is a packet for aggregation, don't + * increase the counter because the ucode will stop + * aggregation queues when their respective station + * goes to sleep. + */ + if (sta_priv && sta_priv->client && !is_agg) + atomic_inc(&sta_priv->pending_frames); + + if ((iwl_legacy_queue_space(q) < q->high_mark) && + priv->mac80211_registered) { + if (wait_write_ptr) { + spin_lock_irqsave(&priv->lock, flags); + txq->need_update = 1; + iwl_legacy_txq_update_write_ptr(priv, txq); + spin_unlock_irqrestore(&priv->lock, flags); + } else { + iwl_legacy_stop_queue(priv, txq); + } + } + + return 0; + +drop_unlock: + spin_unlock_irqrestore(&priv->lock, flags); + return -1; +} + +static inline int iwl4965_alloc_dma_ptr(struct iwl_priv *priv, + struct iwl_dma_ptr *ptr, size_t size) +{ + ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma, + GFP_KERNEL); + if (!ptr->addr) + return -ENOMEM; + ptr->size = size; + return 0; +} + +static inline void iwl4965_free_dma_ptr(struct iwl_priv *priv, + struct iwl_dma_ptr *ptr) +{ + if (unlikely(!ptr->addr)) + return; + + dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma); + memset(ptr, 0, sizeof(*ptr)); +} + +/** + * iwl4965_hw_txq_ctx_free - Free TXQ Context + * + * Destroy all TX DMA queues and structures + */ +void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv) +{ + int txq_id; + + /* Tx queues */ + if (priv->txq) { + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) + if (txq_id == priv->cmd_queue) + iwl_legacy_cmd_queue_free(priv); + else + iwl_legacy_tx_queue_free(priv, txq_id); + } + iwl4965_free_dma_ptr(priv, &priv->kw); + + iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls); + + /* free tx queue structure */ + iwl_legacy_txq_mem(priv); +} + +/** + * iwl4965_txq_ctx_alloc - allocate TX queue context + * Allocate all Tx DMA structures and initialize them + * + * @param priv + * @return error code + */ +int iwl4965_txq_ctx_alloc(struct iwl_priv *priv) +{ + int ret; + int txq_id, slots_num; + unsigned long flags; + + /* Free all tx/cmd queues and keep-warm buffer */ + iwl4965_hw_txq_ctx_free(priv); + + ret = iwl4965_alloc_dma_ptr(priv, &priv->scd_bc_tbls, + priv->hw_params.scd_bc_tbls_size); + if (ret) { + IWL_ERR(priv, "Scheduler BC Table allocation failed\n"); + goto error_bc_tbls; + } + /* Alloc keep-warm buffer */ + ret = iwl4965_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE); + if (ret) { + IWL_ERR(priv, "Keep Warm allocation failed\n"); + goto error_kw; + } + + /* allocate tx queue structure */ + ret = iwl_legacy_alloc_txq_mem(priv); + if (ret) + goto error; + + spin_lock_irqsave(&priv->lock, flags); + + /* Turn off all Tx DMA fifos */ + iwl4965_txq_set_sched(priv, 0); + + /* Tell NIC where to find the "keep warm" buffer */ + iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); + + spin_unlock_irqrestore(&priv->lock, flags); + + /* Alloc and init all Tx queues, including the command queue (#4/#9) */ + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { + slots_num = (txq_id == priv->cmd_queue) ? + TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; + ret = iwl_legacy_tx_queue_init(priv, + &priv->txq[txq_id], slots_num, + txq_id); + if (ret) { + IWL_ERR(priv, "Tx %d queue init failed\n", txq_id); + goto error; + } + } + + return ret; + + error: + iwl4965_hw_txq_ctx_free(priv); + iwl4965_free_dma_ptr(priv, &priv->kw); + error_kw: + iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls); + error_bc_tbls: + return ret; +} + +void iwl4965_txq_ctx_reset(struct iwl_priv *priv) +{ + int txq_id, slots_num; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + + /* Turn off all Tx DMA fifos */ + iwl4965_txq_set_sched(priv, 0); + + /* Tell NIC where to find the "keep warm" buffer */ + iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); + + spin_unlock_irqrestore(&priv->lock, flags); + + /* Alloc and init all Tx queues, including the command queue (#4) */ + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { + slots_num = txq_id == priv->cmd_queue ? + TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; + iwl_legacy_tx_queue_reset(priv, &priv->txq[txq_id], + slots_num, txq_id); + } +} + +/** + * iwl4965_txq_ctx_stop - Stop all Tx DMA channels + */ +void iwl4965_txq_ctx_stop(struct iwl_priv *priv) +{ + int ch, txq_id; + unsigned long flags; + + /* Turn off all Tx DMA fifos */ + spin_lock_irqsave(&priv->lock, flags); + + iwl4965_txq_set_sched(priv, 0); + + /* Stop each Tx DMA channel, and wait for it to be idle */ + for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) { + iwl_legacy_write_direct32(priv, + FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); + if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG, + FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), + 1000)) + IWL_ERR(priv, "Failing on timeout while stopping" + " DMA channel %d [0x%08x]", ch, + iwl_legacy_read_direct32(priv, + FH_TSSR_TX_STATUS_REG)); + } + spin_unlock_irqrestore(&priv->lock, flags); + + if (!priv->txq) + return; + + /* Unmap DMA from host system and free skb's */ + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) + if (txq_id == priv->cmd_queue) + iwl_legacy_cmd_queue_unmap(priv); + else + iwl_legacy_tx_queue_unmap(priv, txq_id); +} + +/* + * Find first available (lowest unused) Tx Queue, mark it "active". + * Called only when finding queue for aggregation. + * Should never return anything < 7, because they should already + * be in use as EDCA AC (0-3), Command (4), reserved (5, 6) + */ +static int iwl4965_txq_ctx_activate_free(struct iwl_priv *priv) +{ + int txq_id; + + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) + if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk)) + return txq_id; + return -1; +} + +/** + * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration + */ +static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv, + u16 txq_id) +{ + /* Simply stop the queue, but don't change any configuration; + * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ + iwl_legacy_write_prph(priv, + IWL49_SCD_QUEUE_STATUS_BITS(txq_id), + (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)| + (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); +} + +/** + * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue + */ +static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid, + u16 txq_id) +{ + u32 tbl_dw_addr; + u32 tbl_dw; + u16 scd_q2ratid; + + scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK; + + tbl_dw_addr = priv->scd_base_addr + + IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id); + + tbl_dw = iwl_legacy_read_targ_mem(priv, tbl_dw_addr); + + if (txq_id & 0x1) + tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); + else + tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); + + iwl_legacy_write_targ_mem(priv, tbl_dw_addr, tbl_dw); + + return 0; +} + +/** + * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue + * + * NOTE: txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE, + * i.e. it must be one of the higher queues used for aggregation + */ +static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id, + int tx_fifo, int sta_id, int tid, u16 ssn_idx) +{ + unsigned long flags; + u16 ra_tid; + int ret; + + if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) || + (IWL49_FIRST_AMPDU_QUEUE + + priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) { + IWL_WARN(priv, + "queue number out of range: %d, must be %d to %d\n", + txq_id, IWL49_FIRST_AMPDU_QUEUE, + IWL49_FIRST_AMPDU_QUEUE + + priv->cfg->base_params->num_of_ampdu_queues - 1); + return -EINVAL; + } + + ra_tid = BUILD_RAxTID(sta_id, tid); + + /* Modify device's station table to Tx this TID */ + ret = iwl4965_sta_tx_modify_enable_tid(priv, sta_id, tid); + if (ret) + return ret; + + spin_lock_irqsave(&priv->lock, flags); + + /* Stop this Tx queue before configuring it */ + iwl4965_tx_queue_stop_scheduler(priv, txq_id); + + /* Map receiver-address / traffic-ID to this queue */ + iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id); + + /* Set this queue as a chain-building queue */ + iwl_legacy_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id)); + + /* Place first TFD at index corresponding to start sequence number. + * Assumes that ssn_idx is valid (!= 0xFFF) */ + priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); + priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); + iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx); + + /* Set up Tx window size and frame limit for this queue */ + iwl_legacy_write_targ_mem(priv, + priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id), + (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) & + IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK); + + iwl_legacy_write_targ_mem(priv, priv->scd_base_addr + + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), + (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) + & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK); + + iwl_legacy_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id)); + + /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ + iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1); + + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + + +int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid, u16 *ssn) +{ + int sta_id; + int tx_fifo; + int txq_id; + int ret; + unsigned long flags; + struct iwl_tid_data *tid_data; + + tx_fifo = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid); + if (unlikely(tx_fifo < 0)) + return tx_fifo; + + IWL_WARN(priv, "%s on ra = %pM tid = %d\n", + __func__, sta->addr, tid); + + sta_id = iwl_legacy_sta_id(sta); + if (sta_id == IWL_INVALID_STATION) { + IWL_ERR(priv, "Start AGG on invalid station\n"); + return -ENXIO; + } + if (unlikely(tid >= MAX_TID_COUNT)) + return -EINVAL; + + if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) { + IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n"); + return -ENXIO; + } + + txq_id = iwl4965_txq_ctx_activate_free(priv); + if (txq_id == -1) { + IWL_ERR(priv, "No free aggregation queue available\n"); + return -ENXIO; + } + + spin_lock_irqsave(&priv->sta_lock, flags); + tid_data = &priv->stations[sta_id].tid[tid]; + *ssn = SEQ_TO_SN(tid_data->seq_number); + tid_data->agg.txq_id = txq_id; + iwl_legacy_set_swq_id(&priv->txq[txq_id], + iwl4965_get_ac_from_tid(tid), txq_id); + spin_unlock_irqrestore(&priv->sta_lock, flags); + + ret = iwl4965_txq_agg_enable(priv, txq_id, tx_fifo, + sta_id, tid, *ssn); + if (ret) + return ret; + + spin_lock_irqsave(&priv->sta_lock, flags); + tid_data = &priv->stations[sta_id].tid[tid]; + if (tid_data->tfds_in_queue == 0) { + IWL_DEBUG_HT(priv, "HW queue is empty\n"); + tid_data->agg.state = IWL_AGG_ON; + ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); + } else { + IWL_DEBUG_HT(priv, + "HW queue is NOT empty: %d packets in HW queue\n", + tid_data->tfds_in_queue); + tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA; + } + spin_unlock_irqrestore(&priv->sta_lock, flags); + return ret; +} + +/** + * txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE + * priv->lock must be held by the caller + */ +static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id, + u16 ssn_idx, u8 tx_fifo) +{ + if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) || + (IWL49_FIRST_AMPDU_QUEUE + + priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) { + IWL_WARN(priv, + "queue number out of range: %d, must be %d to %d\n", + txq_id, IWL49_FIRST_AMPDU_QUEUE, + IWL49_FIRST_AMPDU_QUEUE + + priv->cfg->base_params->num_of_ampdu_queues - 1); + return -EINVAL; + } + + iwl4965_tx_queue_stop_scheduler(priv, txq_id); + + iwl_legacy_clear_bits_prph(priv, + IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id)); + + priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); + priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); + /* supposes that ssn_idx is valid (!= 0xFFF) */ + iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx); + + iwl_legacy_clear_bits_prph(priv, + IWL49_SCD_INTERRUPT_MASK, (1 << txq_id)); + iwl_txq_ctx_deactivate(priv, txq_id); + iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0); + + return 0; +} + +int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid) +{ + int tx_fifo_id, txq_id, sta_id, ssn; + struct iwl_tid_data *tid_data; + int write_ptr, read_ptr; + unsigned long flags; + + tx_fifo_id = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid); + if (unlikely(tx_fifo_id < 0)) + return tx_fifo_id; + + sta_id = iwl_legacy_sta_id(sta); + + if (sta_id == IWL_INVALID_STATION) { + IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid); + return -ENXIO; + } + + spin_lock_irqsave(&priv->sta_lock, flags); + + tid_data = &priv->stations[sta_id].tid[tid]; + ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; + txq_id = tid_data->agg.txq_id; + + switch (priv->stations[sta_id].tid[tid].agg.state) { + case IWL_EMPTYING_HW_QUEUE_ADDBA: + /* + * This can happen if the peer stops aggregation + * again before we've had a chance to drain the + * queue we selected previously, i.e. before the + * session was really started completely. + */ + IWL_DEBUG_HT(priv, "AGG stop before setup done\n"); + goto turn_off; + case IWL_AGG_ON: + break; + default: + IWL_WARN(priv, "Stopping AGG while state not ON or starting\n"); + } + + write_ptr = priv->txq[txq_id].q.write_ptr; + read_ptr = priv->txq[txq_id].q.read_ptr; + + /* The queue is not empty */ + if (write_ptr != read_ptr) { + IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n"); + priv->stations[sta_id].tid[tid].agg.state = + IWL_EMPTYING_HW_QUEUE_DELBA; + spin_unlock_irqrestore(&priv->sta_lock, flags); + return 0; + } + + IWL_DEBUG_HT(priv, "HW queue is empty\n"); + turn_off: + priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; + + /* do not restore/save irqs */ + spin_unlock(&priv->sta_lock); + spin_lock(&priv->lock); + + /* + * the only reason this call can fail is queue number out of range, + * which can happen if uCode is reloaded and all the station + * information are lost. if it is outside the range, there is no need + * to deactivate the uCode queue, just return "success" to allow + * mac80211 to clean up it own data. + */ + iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo_id); + spin_unlock_irqrestore(&priv->lock, flags); + + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); + + return 0; +} + +int iwl4965_txq_check_empty(struct iwl_priv *priv, + int sta_id, u8 tid, int txq_id) +{ + struct iwl_queue *q = &priv->txq[txq_id].q; + u8 *addr = priv->stations[sta_id].sta.sta.addr; + struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid]; + struct iwl_rxon_context *ctx; + + ctx = &priv->contexts[priv->stations[sta_id].ctxid]; + + lockdep_assert_held(&priv->sta_lock); + + switch (priv->stations[sta_id].tid[tid].agg.state) { + case IWL_EMPTYING_HW_QUEUE_DELBA: + /* We are reclaiming the last packet of the */ + /* aggregated HW queue */ + if ((txq_id == tid_data->agg.txq_id) && + (q->read_ptr == q->write_ptr)) { + u16 ssn = SEQ_TO_SN(tid_data->seq_number); + int tx_fifo = iwl4965_get_fifo_from_tid(ctx, tid); + IWL_DEBUG_HT(priv, + "HW queue empty: continue DELBA flow\n"); + iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo); + tid_data->agg.state = IWL_AGG_OFF; + ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid); + } + break; + case IWL_EMPTYING_HW_QUEUE_ADDBA: + /* We are reclaiming the last packet of the queue */ + if (tid_data->tfds_in_queue == 0) { + IWL_DEBUG_HT(priv, + "HW queue empty: continue ADDBA flow\n"); + tid_data->agg.state = IWL_AGG_ON; + ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid); + } + break; + } + + return 0; +} + +static void iwl4965_non_agg_tx_status(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + const u8 *addr1) +{ + struct ieee80211_sta *sta; + struct iwl_station_priv *sta_priv; + + rcu_read_lock(); + sta = ieee80211_find_sta(ctx->vif, addr1); + if (sta) { + sta_priv = (void *)sta->drv_priv; + /* avoid atomic ops if this isn't a client */ + if (sta_priv->client && + atomic_dec_return(&sta_priv->pending_frames) == 0) + ieee80211_sta_block_awake(priv->hw, sta, false); + } + rcu_read_unlock(); +} + +static void +iwl4965_tx_status(struct iwl_priv *priv, struct iwl_tx_info *tx_info, + bool is_agg) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data; + + if (!is_agg) + iwl4965_non_agg_tx_status(priv, tx_info->ctx, hdr->addr1); + + ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb); +} + +int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) +{ + struct iwl_tx_queue *txq = &priv->txq[txq_id]; + struct iwl_queue *q = &txq->q; + struct iwl_tx_info *tx_info; + int nfreed = 0; + struct ieee80211_hdr *hdr; + + if ((index >= q->n_bd) || (iwl_legacy_queue_used(q, index) == 0)) { + IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, " + "is out of range [0-%d] %d %d.\n", txq_id, + index, q->n_bd, q->write_ptr, q->read_ptr); + return 0; + } + + for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd); + q->read_ptr != index; + q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) { + + tx_info = &txq->txb[txq->q.read_ptr]; + + if (WARN_ON_ONCE(tx_info->skb == NULL)) + continue; + + hdr = (struct ieee80211_hdr *)tx_info->skb->data; + if (ieee80211_is_data_qos(hdr->frame_control)) + nfreed++; + + iwl4965_tx_status(priv, tx_info, + txq_id >= IWL4965_FIRST_AMPDU_QUEUE); + tx_info->skb = NULL; + + priv->cfg->ops->lib->txq_free_tfd(priv, txq); + } + return nfreed; +} + +/** + * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack + * + * Go through block-ack's bitmap of ACK'd frames, update driver's record of + * ACK vs. not. This gets sent to mac80211, then to rate scaling algo. + */ +static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv, + struct iwl_ht_agg *agg, + struct iwl_compressed_ba_resp *ba_resp) + +{ + int i, sh, ack; + u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl); + u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); + int successes = 0; + struct ieee80211_tx_info *info; + u64 bitmap, sent_bitmap; + + if (unlikely(!agg->wait_for_ba)) { + if (unlikely(ba_resp->bitmap)) + IWL_ERR(priv, "Received BA when not expected\n"); + return -EINVAL; + } + + /* Mark that the expected block-ack response arrived */ + agg->wait_for_ba = 0; + IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, + ba_resp->seq_ctl); + + /* Calculate shift to align block-ack bits with our Tx window bits */ + sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4); + if (sh < 0) /* tbw something is wrong with indices */ + sh += 0x100; + + if (agg->frame_count > (64 - sh)) { + IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size"); + return -1; + } + + /* don't use 64-bit values for now */ + bitmap = le64_to_cpu(ba_resp->bitmap) >> sh; + + /* check for success or failure according to the + * transmitted bitmap and block-ack bitmap */ + sent_bitmap = bitmap & agg->bitmap; + + /* For each frame attempted in aggregation, + * update driver's record of tx frame's status. */ + i = 0; + while (sent_bitmap) { + ack = sent_bitmap & 1ULL; + successes += ack; + IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n", + ack ? "ACK" : "NACK", i, + (agg->start_idx + i) & 0xff, + agg->start_idx + i); + sent_bitmap >>= 1; + ++i; + } + + IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", + (unsigned long long)bitmap); + + info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb); + memset(&info->status, 0, sizeof(info->status)); + info->flags |= IEEE80211_TX_STAT_ACK; + info->flags |= IEEE80211_TX_STAT_AMPDU; + info->status.ampdu_ack_len = successes; + info->status.ampdu_len = agg->frame_count; + iwl4965_hwrate_to_tx_control(priv, agg->rate_n_flags, info); + + return 0; +} + +/** + * translate ucode response to mac80211 tx status control values + */ +void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags, + struct ieee80211_tx_info *info) +{ + struct ieee80211_tx_rate *r = &info->control.rates[0]; + + info->antenna_sel_tx = + ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS); + if (rate_n_flags & RATE_MCS_HT_MSK) + r->flags |= IEEE80211_TX_RC_MCS; + if (rate_n_flags & RATE_MCS_GF_MSK) + r->flags |= IEEE80211_TX_RC_GREEN_FIELD; + if (rate_n_flags & RATE_MCS_HT40_MSK) + r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; + if (rate_n_flags & RATE_MCS_DUP_MSK) + r->flags |= IEEE80211_TX_RC_DUP_DATA; + if (rate_n_flags & RATE_MCS_SGI_MSK) + r->flags |= IEEE80211_TX_RC_SHORT_GI; + r->idx = iwl4965_hwrate_to_mac80211_idx(rate_n_flags, info->band); +} + +/** + * iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA + * + * Handles block-acknowledge notification from device, which reports success + * of frames sent via aggregation. + */ +void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; + struct iwl_tx_queue *txq = NULL; + struct iwl_ht_agg *agg; + int index; + int sta_id; + int tid; + unsigned long flags; + + /* "flow" corresponds to Tx queue */ + u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); + + /* "ssn" is start of block-ack Tx window, corresponds to index + * (in Tx queue's circular buffer) of first TFD/frame in window */ + u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); + + if (scd_flow >= priv->hw_params.max_txq_num) { + IWL_ERR(priv, + "BUG_ON scd_flow is bigger than number of queues\n"); + return; + } + + txq = &priv->txq[scd_flow]; + sta_id = ba_resp->sta_id; + tid = ba_resp->tid; + agg = &priv->stations[sta_id].tid[tid].agg; + if (unlikely(agg->txq_id != scd_flow)) { + /* + * FIXME: this is a uCode bug which need to be addressed, + * log the information and return for now! + * since it is possible happen very often and in order + * not to fill the syslog, don't enable the logging by default + */ + IWL_DEBUG_TX_REPLY(priv, + "BA scd_flow %d does not match txq_id %d\n", + scd_flow, agg->txq_id); + return; + } + + /* Find index just before block-ack window */ + index = iwl_legacy_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd); + + spin_lock_irqsave(&priv->sta_lock, flags); + + IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, " + "sta_id = %d\n", + agg->wait_for_ba, + (u8 *) &ba_resp->sta_addr_lo32, + ba_resp->sta_id); + IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx," + "scd_flow = " + "%d, scd_ssn = %d\n", + ba_resp->tid, + ba_resp->seq_ctl, + (unsigned long long)le64_to_cpu(ba_resp->bitmap), + ba_resp->scd_flow, + ba_resp->scd_ssn); + IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx\n", + agg->start_idx, + (unsigned long long)agg->bitmap); + + /* Update driver's record of ACK vs. not for each frame in window */ + iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp); + + /* Release all TFDs before the SSN, i.e. all TFDs in front of + * block-ack window (we assume that they've been successfully + * transmitted ... if not, it's too late anyway). */ + if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) { + /* calculate mac80211 ampdu sw queue to wake */ + int freed = iwl4965_tx_queue_reclaim(priv, scd_flow, index); + iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed); + + if ((iwl_legacy_queue_space(&txq->q) > txq->q.low_mark) && + priv->mac80211_registered && + (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) + iwl_legacy_wake_queue(priv, txq); + + iwl4965_txq_check_empty(priv, sta_id, tid, scd_flow); + } + + spin_unlock_irqrestore(&priv->sta_lock, flags); +} + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +const char *iwl4965_get_tx_fail_reason(u32 status) +{ +#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x +#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x + + switch (status & TX_STATUS_MSK) { + case TX_STATUS_SUCCESS: + return "SUCCESS"; + TX_STATUS_POSTPONE(DELAY); + TX_STATUS_POSTPONE(FEW_BYTES); + TX_STATUS_POSTPONE(QUIET_PERIOD); + TX_STATUS_POSTPONE(CALC_TTAK); + TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY); + TX_STATUS_FAIL(SHORT_LIMIT); + TX_STATUS_FAIL(LONG_LIMIT); + TX_STATUS_FAIL(FIFO_UNDERRUN); + TX_STATUS_FAIL(DRAIN_FLOW); + TX_STATUS_FAIL(RFKILL_FLUSH); + TX_STATUS_FAIL(LIFE_EXPIRE); + TX_STATUS_FAIL(DEST_PS); + TX_STATUS_FAIL(HOST_ABORTED); + TX_STATUS_FAIL(BT_RETRY); + TX_STATUS_FAIL(STA_INVALID); + TX_STATUS_FAIL(FRAG_DROPPED); + TX_STATUS_FAIL(TID_DISABLE); + TX_STATUS_FAIL(FIFO_FLUSHED); + TX_STATUS_FAIL(INSUFFICIENT_CF_POLL); + TX_STATUS_FAIL(PASSIVE_NO_RX); + TX_STATUS_FAIL(NO_BEACON_ON_RADAR); + } + + return "UNKNOWN"; + +#undef TX_STATUS_FAIL +#undef TX_STATUS_POSTPONE +} +#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */ diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c b/trunk/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c new file mode 100644 index 000000000000..001d148feb94 --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c @@ -0,0 +1,166 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include +#include +#include +#include + +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-io.h" +#include "iwl-helpers.h" +#include "iwl-4965-hw.h" +#include "iwl-4965.h" +#include "iwl-4965-calib.h" + +#define IWL_AC_UNSET -1 + +/** + * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host, + * using sample data 100 bytes apart. If these sample points are good, + * it's a pretty good bet that everything between them is good, too. + */ +static int +iwl4965_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len) +{ + u32 val; + int ret = 0; + u32 errcnt = 0; + u32 i; + + IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len); + + for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) { + /* read data comes through single port, auto-incr addr */ + /* NOTE: Use the debugless read so we don't flood kernel log + * if IWL_DL_IO is set */ + iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, + i + IWL4965_RTC_INST_LOWER_BOUND); + val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); + if (val != le32_to_cpu(*image)) { + ret = -EIO; + errcnt++; + if (errcnt >= 3) + break; + } + } + + return ret; +} + +/** + * iwl4965_verify_inst_full - verify runtime uCode image in card vs. host, + * looking at all data. + */ +static int iwl4965_verify_inst_full(struct iwl_priv *priv, __le32 *image, + u32 len) +{ + u32 val; + u32 save_len = len; + int ret = 0; + u32 errcnt; + + IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len); + + iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, + IWL4965_RTC_INST_LOWER_BOUND); + + errcnt = 0; + for (; len > 0; len -= sizeof(u32), image++) { + /* read data comes through single port, auto-incr addr */ + /* NOTE: Use the debugless read so we don't flood kernel log + * if IWL_DL_IO is set */ + val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); + if (val != le32_to_cpu(*image)) { + IWL_ERR(priv, "uCode INST section is invalid at " + "offset 0x%x, is 0x%x, s/b 0x%x\n", + save_len - len, val, le32_to_cpu(*image)); + ret = -EIO; + errcnt++; + if (errcnt >= 20) + break; + } + } + + if (!errcnt) + IWL_DEBUG_INFO(priv, + "ucode image in INSTRUCTION memory is good\n"); + + return ret; +} + +/** + * iwl4965_verify_ucode - determine which instruction image is in SRAM, + * and verify its contents + */ +int iwl4965_verify_ucode(struct iwl_priv *priv) +{ + __le32 *image; + u32 len; + int ret; + + /* Try bootstrap */ + image = (__le32 *)priv->ucode_boot.v_addr; + len = priv->ucode_boot.len; + ret = iwl4965_verify_inst_sparse(priv, image, len); + if (!ret) { + IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n"); + return 0; + } + + /* Try initialize */ + image = (__le32 *)priv->ucode_init.v_addr; + len = priv->ucode_init.len; + ret = iwl4965_verify_inst_sparse(priv, image, len); + if (!ret) { + IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n"); + return 0; + } + + /* Try runtime/protocol */ + image = (__le32 *)priv->ucode_code.v_addr; + len = priv->ucode_code.len; + ret = iwl4965_verify_inst_sparse(priv, image, len); + if (!ret) { + IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n"); + return 0; + } + + IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n"); + + /* Since nothing seems to match, show first several data entries in + * instruction SRAM, so maybe visual inspection will give a clue. + * Selection of bootstrap image (vs. other images) is arbitrary. */ + image = (__le32 *)priv->ucode_boot.v_addr; + len = priv->ucode_boot.len; + ret = iwl4965_verify_inst_full(priv, image, len); + + return ret; +} diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-4965.c b/trunk/drivers/net/wireless/iwlegacy/iwl-4965.c new file mode 100644 index 000000000000..86f4fce193e4 --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-4965.c @@ -0,0 +1,2183 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "iwl-eeprom.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-io.h" +#include "iwl-helpers.h" +#include "iwl-4965-calib.h" +#include "iwl-sta.h" +#include "iwl-4965-led.h" +#include "iwl-4965.h" +#include "iwl-4965-debugfs.h" + +static int iwl4965_send_tx_power(struct iwl_priv *priv); +static int iwl4965_hw_get_temperature(struct iwl_priv *priv); + +/* Highest firmware API version supported */ +#define IWL4965_UCODE_API_MAX 2 + +/* Lowest firmware API version supported */ +#define IWL4965_UCODE_API_MIN 2 + +#define IWL4965_FW_PRE "iwlwifi-4965-" +#define _IWL4965_MODULE_FIRMWARE(api) IWL4965_FW_PRE #api ".ucode" +#define IWL4965_MODULE_FIRMWARE(api) _IWL4965_MODULE_FIRMWARE(api) + +/* check contents of special bootstrap uCode SRAM */ +static int iwl4965_verify_bsm(struct iwl_priv *priv) +{ + __le32 *image = priv->ucode_boot.v_addr; + u32 len = priv->ucode_boot.len; + u32 reg; + u32 val; + + IWL_DEBUG_INFO(priv, "Begin verify bsm\n"); + + /* verify BSM SRAM contents */ + val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG); + for (reg = BSM_SRAM_LOWER_BOUND; + reg < BSM_SRAM_LOWER_BOUND + len; + reg += sizeof(u32), image++) { + val = iwl_legacy_read_prph(priv, reg); + if (val != le32_to_cpu(*image)) { + IWL_ERR(priv, "BSM uCode verification failed at " + "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n", + BSM_SRAM_LOWER_BOUND, + reg - BSM_SRAM_LOWER_BOUND, len, + val, le32_to_cpu(*image)); + return -EIO; + } + } + + IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n"); + + return 0; +} + +/** + * iwl4965_load_bsm - Load bootstrap instructions + * + * BSM operation: + * + * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program + * in special SRAM that does not power down during RFKILL. When powering back + * up after power-saving sleeps (or during initial uCode load), the BSM loads + * the bootstrap program into the on-board processor, and starts it. + * + * The bootstrap program loads (via DMA) instructions and data for a new + * program from host DRAM locations indicated by the host driver in the + * BSM_DRAM_* registers. Once the new program is loaded, it starts + * automatically. + * + * When initializing the NIC, the host driver points the BSM to the + * "initialize" uCode image. This uCode sets up some internal data, then + * notifies host via "initialize alive" that it is complete. + * + * The host then replaces the BSM_DRAM_* pointer values to point to the + * normal runtime uCode instructions and a backup uCode data cache buffer + * (filled initially with starting data values for the on-board processor), + * then triggers the "initialize" uCode to load and launch the runtime uCode, + * which begins normal operation. + * + * When doing a power-save shutdown, runtime uCode saves data SRAM into + * the backup data cache in DRAM before SRAM is powered down. + * + * When powering back up, the BSM loads the bootstrap program. This reloads + * the runtime uCode instructions and the backup data cache into SRAM, + * and re-launches the runtime uCode from where it left off. + */ +static int iwl4965_load_bsm(struct iwl_priv *priv) +{ + __le32 *image = priv->ucode_boot.v_addr; + u32 len = priv->ucode_boot.len; + dma_addr_t pinst; + dma_addr_t pdata; + u32 inst_len; + u32 data_len; + int i; + u32 done; + u32 reg_offset; + int ret; + + IWL_DEBUG_INFO(priv, "Begin load bsm\n"); + + priv->ucode_type = UCODE_RT; + + /* make sure bootstrap program is no larger than BSM's SRAM size */ + if (len > IWL49_MAX_BSM_SIZE) + return -EINVAL; + + /* Tell bootstrap uCode where to find the "Initialize" uCode + * in host DRAM ... host DRAM physical address bits 35:4 for 4965. + * NOTE: iwl_init_alive_start() will replace these values, + * after the "initialize" uCode has run, to point to + * runtime/protocol instructions and backup data cache. + */ + pinst = priv->ucode_init.p_addr >> 4; + pdata = priv->ucode_init_data.p_addr >> 4; + inst_len = priv->ucode_init.len; + data_len = priv->ucode_init_data.len; + + iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); + iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); + iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len); + iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len); + + /* Fill BSM memory with bootstrap instructions */ + for (reg_offset = BSM_SRAM_LOWER_BOUND; + reg_offset < BSM_SRAM_LOWER_BOUND + len; + reg_offset += sizeof(u32), image++) + _iwl_legacy_write_prph(priv, reg_offset, le32_to_cpu(*image)); + + ret = iwl4965_verify_bsm(priv); + if (ret) + return ret; + + /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */ + iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0); + iwl_legacy_write_prph(priv, + BSM_WR_MEM_DST_REG, IWL49_RTC_INST_LOWER_BOUND); + iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32)); + + /* Load bootstrap code into instruction SRAM now, + * to prepare to load "initialize" uCode */ + iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START); + + /* Wait for load of bootstrap uCode to finish */ + for (i = 0; i < 100; i++) { + done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG); + if (!(done & BSM_WR_CTRL_REG_BIT_START)) + break; + udelay(10); + } + if (i < 100) + IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i); + else { + IWL_ERR(priv, "BSM write did not complete!\n"); + return -EIO; + } + + /* Enable future boot loads whenever power management unit triggers it + * (e.g. when powering back up after power-save shutdown) */ + iwl_legacy_write_prph(priv, + BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN); + + + return 0; +} + +/** + * iwl4965_set_ucode_ptrs - Set uCode address location + * + * Tell initialization uCode where to find runtime uCode. + * + * BSM registers initially contain pointers to initialization uCode. + * We need to replace them to load runtime uCode inst and data, + * and to save runtime data when powering down. + */ +static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv) +{ + dma_addr_t pinst; + dma_addr_t pdata; + int ret = 0; + + /* bits 35:4 for 4965 */ + pinst = priv->ucode_code.p_addr >> 4; + pdata = priv->ucode_data_backup.p_addr >> 4; + + /* Tell bootstrap uCode where to find image to load */ + iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); + iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); + iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, + priv->ucode_data.len); + + /* Inst byte count must be last to set up, bit 31 signals uCode + * that all new ptr/size info is in place */ + iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, + priv->ucode_code.len | BSM_DRAM_INST_LOAD); + IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n"); + + return ret; +} + +/** + * iwl4965_init_alive_start - Called after REPLY_ALIVE notification received + * + * Called after REPLY_ALIVE notification received from "initialize" uCode. + * + * The 4965 "initialize" ALIVE reply contains calibration data for: + * Voltage, temperature, and MIMO tx gain correction, now stored in priv + * (3945 does not contain this data). + * + * Tell "initialize" uCode to go ahead and load the runtime uCode. +*/ +static void iwl4965_init_alive_start(struct iwl_priv *priv) +{ + /* Bootstrap uCode has loaded initialize uCode ... verify inst image. + * This is a paranoid check, because we would not have gotten the + * "initialize" alive if code weren't properly loaded. */ + if (iwl4965_verify_ucode(priv)) { + /* Runtime instruction load was bad; + * take it all the way back down so we can try again */ + IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n"); + goto restart; + } + + /* Calculate temperature */ + priv->temperature = iwl4965_hw_get_temperature(priv); + + /* Send pointers to protocol/runtime uCode image ... init code will + * load and launch runtime uCode, which will send us another "Alive" + * notification. */ + IWL_DEBUG_INFO(priv, "Initialization Alive received.\n"); + if (iwl4965_set_ucode_ptrs(priv)) { + /* Runtime instruction load won't happen; + * take it all the way back down so we can try again */ + IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n"); + goto restart; + } + return; + +restart: + queue_work(priv->workqueue, &priv->restart); +} + +static bool iw4965_is_ht40_channel(__le32 rxon_flags) +{ + int chan_mod = le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK) + >> RXON_FLG_CHANNEL_MODE_POS; + return ((chan_mod == CHANNEL_MODE_PURE_40) || + (chan_mod == CHANNEL_MODE_MIXED)); +} + +static void iwl4965_nic_config(struct iwl_priv *priv) +{ + unsigned long flags; + u16 radio_cfg; + + spin_lock_irqsave(&priv->lock, flags); + + radio_cfg = iwl_legacy_eeprom_query16(priv, EEPROM_RADIO_CONFIG); + + /* write radio config values to register */ + if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX) + iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, + EEPROM_RF_CFG_TYPE_MSK(radio_cfg) | + EEPROM_RF_CFG_STEP_MSK(radio_cfg) | + EEPROM_RF_CFG_DASH_MSK(radio_cfg)); + + /* set CSR_HW_CONFIG_REG for uCode use */ + iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | + CSR_HW_IF_CONFIG_REG_BIT_MAC_SI); + + priv->calib_info = (struct iwl_eeprom_calib_info *) + iwl_legacy_eeprom_query_addr(priv, + EEPROM_4965_CALIB_TXPOWER_OFFSET); + + spin_unlock_irqrestore(&priv->lock, flags); +} + +/* Reset differential Rx gains in NIC to prepare for chain noise calibration. + * Called after every association, but this runs only once! + * ... once chain noise is calibrated the first time, it's good forever. */ +static void iwl4965_chain_noise_reset(struct iwl_priv *priv) +{ + struct iwl_chain_noise_data *data = &(priv->chain_noise_data); + + if ((data->state == IWL_CHAIN_NOISE_ALIVE) && + iwl_legacy_is_any_associated(priv)) { + struct iwl_calib_diff_gain_cmd cmd; + + /* clear data for chain noise calibration algorithm */ + data->chain_noise_a = 0; + data->chain_noise_b = 0; + data->chain_noise_c = 0; + data->chain_signal_a = 0; + data->chain_signal_b = 0; + data->chain_signal_c = 0; + data->beacon_count = 0; + + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD; + cmd.diff_gain_a = 0; + cmd.diff_gain_b = 0; + cmd.diff_gain_c = 0; + if (iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD, + sizeof(cmd), &cmd)) + IWL_ERR(priv, + "Could not send REPLY_PHY_CALIBRATION_CMD\n"); + data->state = IWL_CHAIN_NOISE_ACCUMULATE; + IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n"); + } +} + +static struct iwl_sensitivity_ranges iwl4965_sensitivity = { + .min_nrg_cck = 97, + .max_nrg_cck = 0, /* not used, set to 0 */ + + .auto_corr_min_ofdm = 85, + .auto_corr_min_ofdm_mrc = 170, + .auto_corr_min_ofdm_x1 = 105, + .auto_corr_min_ofdm_mrc_x1 = 220, + + .auto_corr_max_ofdm = 120, + .auto_corr_max_ofdm_mrc = 210, + .auto_corr_max_ofdm_x1 = 140, + .auto_corr_max_ofdm_mrc_x1 = 270, + + .auto_corr_min_cck = 125, + .auto_corr_max_cck = 200, + .auto_corr_min_cck_mrc = 200, + .auto_corr_max_cck_mrc = 400, + + .nrg_th_cck = 100, + .nrg_th_ofdm = 100, + + .barker_corr_th_min = 190, + .barker_corr_th_min_mrc = 390, + .nrg_th_cca = 62, +}; + +static void iwl4965_set_ct_threshold(struct iwl_priv *priv) +{ + /* want Kelvin */ + priv->hw_params.ct_kill_threshold = + CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY); +} + +/** + * iwl4965_hw_set_hw_params + * + * Called when initializing driver + */ +static int iwl4965_hw_set_hw_params(struct iwl_priv *priv) +{ + if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES && + priv->cfg->mod_params->num_of_queues <= IWL49_NUM_QUEUES) + priv->cfg->base_params->num_of_queues = + priv->cfg->mod_params->num_of_queues; + + priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues; + priv->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM; + priv->hw_params.scd_bc_tbls_size = + priv->cfg->base_params->num_of_queues * + sizeof(struct iwl4965_scd_bc_tbl); + priv->hw_params.tfd_size = sizeof(struct iwl_tfd); + priv->hw_params.max_stations = IWL4965_STATION_COUNT; + priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL4965_BROADCAST_ID; + priv->hw_params.max_data_size = IWL49_RTC_DATA_SIZE; + priv->hw_params.max_inst_size = IWL49_RTC_INST_SIZE; + priv->hw_params.max_bsm_size = BSM_SRAM_SIZE; + priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ); + + priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR; + + priv->hw_params.tx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_tx_ant); + priv->hw_params.rx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_rx_ant); + priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant; + priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant; + + iwl4965_set_ct_threshold(priv); + + priv->hw_params.sens = &iwl4965_sensitivity; + priv->hw_params.beacon_time_tsf_bits = IWL4965_EXT_BEACON_TIME_POS; + + return 0; +} + +static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res) +{ + s32 sign = 1; + + if (num < 0) { + sign = -sign; + num = -num; + } + if (denom < 0) { + sign = -sign; + denom = -denom; + } + *res = 1; + *res = ((num * 2 + denom) / (denom * 2)) * sign; + + return 1; +} + +/** + * iwl4965_get_voltage_compensation - Power supply voltage comp for txpower + * + * Determines power supply voltage compensation for txpower calculations. + * Returns number of 1/2-dB steps to subtract from gain table index, + * to compensate for difference between power supply voltage during + * factory measurements, vs. current power supply voltage. + * + * Voltage indication is higher for lower voltage. + * Lower voltage requires more gain (lower gain table index). + */ +static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage, + s32 current_voltage) +{ + s32 comp = 0; + + if ((TX_POWER_IWL_ILLEGAL_VOLTAGE == eeprom_voltage) || + (TX_POWER_IWL_ILLEGAL_VOLTAGE == current_voltage)) + return 0; + + iwl4965_math_div_round(current_voltage - eeprom_voltage, + TX_POWER_IWL_VOLTAGE_CODES_PER_03V, &comp); + + if (current_voltage > eeprom_voltage) + comp *= 2; + if ((comp < -2) || (comp > 2)) + comp = 0; + + return comp; +} + +static s32 iwl4965_get_tx_atten_grp(u16 channel) +{ + if (channel >= CALIB_IWL_TX_ATTEN_GR5_FCH && + channel <= CALIB_IWL_TX_ATTEN_GR5_LCH) + return CALIB_CH_GROUP_5; + + if (channel >= CALIB_IWL_TX_ATTEN_GR1_FCH && + channel <= CALIB_IWL_TX_ATTEN_GR1_LCH) + return CALIB_CH_GROUP_1; + + if (channel >= CALIB_IWL_TX_ATTEN_GR2_FCH && + channel <= CALIB_IWL_TX_ATTEN_GR2_LCH) + return CALIB_CH_GROUP_2; + + if (channel >= CALIB_IWL_TX_ATTEN_GR3_FCH && + channel <= CALIB_IWL_TX_ATTEN_GR3_LCH) + return CALIB_CH_GROUP_3; + + if (channel >= CALIB_IWL_TX_ATTEN_GR4_FCH && + channel <= CALIB_IWL_TX_ATTEN_GR4_LCH) + return CALIB_CH_GROUP_4; + + return -EINVAL; +} + +static u32 iwl4965_get_sub_band(const struct iwl_priv *priv, u32 channel) +{ + s32 b = -1; + + for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) { + if (priv->calib_info->band_info[b].ch_from == 0) + continue; + + if ((channel >= priv->calib_info->band_info[b].ch_from) + && (channel <= priv->calib_info->band_info[b].ch_to)) + break; + } + + return b; +} + +static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2) +{ + s32 val; + + if (x2 == x1) + return y1; + else { + iwl4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val); + return val + y2; + } +} + +/** + * iwl4965_interpolate_chan - Interpolate factory measurements for one channel + * + * Interpolates factory measurements from the two sample channels within a + * sub-band, to apply to channel of interest. Interpolation is proportional to + * differences in channel frequencies, which is proportional to differences + * in channel number. + */ +static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel, + struct iwl_eeprom_calib_ch_info *chan_info) +{ + s32 s = -1; + u32 c; + u32 m; + const struct iwl_eeprom_calib_measure *m1; + const struct iwl_eeprom_calib_measure *m2; + struct iwl_eeprom_calib_measure *omeas; + u32 ch_i1; + u32 ch_i2; + + s = iwl4965_get_sub_band(priv, channel); + if (s >= EEPROM_TX_POWER_BANDS) { + IWL_ERR(priv, "Tx Power can not find channel %d\n", channel); + return -1; + } + + ch_i1 = priv->calib_info->band_info[s].ch1.ch_num; + ch_i2 = priv->calib_info->band_info[s].ch2.ch_num; + chan_info->ch_num = (u8) channel; + + IWL_DEBUG_TXPOWER(priv, "channel %d subband %d factory cal ch %d & %d\n", + channel, s, ch_i1, ch_i2); + + for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) { + for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) { + m1 = &(priv->calib_info->band_info[s].ch1. + measurements[c][m]); + m2 = &(priv->calib_info->band_info[s].ch2. + measurements[c][m]); + omeas = &(chan_info->measurements[c][m]); + + omeas->actual_pow = + (u8) iwl4965_interpolate_value(channel, ch_i1, + m1->actual_pow, + ch_i2, + m2->actual_pow); + omeas->gain_idx = + (u8) iwl4965_interpolate_value(channel, ch_i1, + m1->gain_idx, ch_i2, + m2->gain_idx); + omeas->temperature = + (u8) iwl4965_interpolate_value(channel, ch_i1, + m1->temperature, + ch_i2, + m2->temperature); + omeas->pa_det = + (s8) iwl4965_interpolate_value(channel, ch_i1, + m1->pa_det, ch_i2, + m2->pa_det); + + IWL_DEBUG_TXPOWER(priv, + "chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, m, + m1->actual_pow, m2->actual_pow, omeas->actual_pow); + IWL_DEBUG_TXPOWER(priv, + "chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, m, + m1->gain_idx, m2->gain_idx, omeas->gain_idx); + IWL_DEBUG_TXPOWER(priv, + "chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, m, + m1->pa_det, m2->pa_det, omeas->pa_det); + IWL_DEBUG_TXPOWER(priv, + "chain %d meas %d T1=%d T2=%d T=%d\n", c, m, + m1->temperature, m2->temperature, + omeas->temperature); + } + } + + return 0; +} + +/* bit-rate-dependent table to prevent Tx distortion, in half-dB units, + * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */ +static s32 back_off_table[] = { + 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */ + 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */ + 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */ + 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */ + 10 /* CCK */ +}; + +/* Thermal compensation values for txpower for various frequency ranges ... + * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */ +static struct iwl4965_txpower_comp_entry { + s32 degrees_per_05db_a; + s32 degrees_per_05db_a_denom; +} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = { + {9, 2}, /* group 0 5.2, ch 34-43 */ + {4, 1}, /* group 1 5.2, ch 44-70 */ + {4, 1}, /* group 2 5.2, ch 71-124 */ + {4, 1}, /* group 3 5.2, ch 125-200 */ + {3, 1} /* group 4 2.4, ch all */ +}; + +static s32 get_min_power_index(s32 rate_power_index, u32 band) +{ + if (!band) { + if ((rate_power_index & 7) <= 4) + return MIN_TX_GAIN_INDEX_52GHZ_EXT; + } + return MIN_TX_GAIN_INDEX; +} + +struct gain_entry { + u8 dsp; + u8 radio; +}; + +static const struct gain_entry gain_table[2][108] = { + /* 5.2GHz power gain index table */ + { + {123, 0x3F}, /* highest txpower */ + {117, 0x3F}, + {110, 0x3F}, + {104, 0x3F}, + {98, 0x3F}, + {110, 0x3E}, + {104, 0x3E}, + {98, 0x3E}, + {110, 0x3D}, + {104, 0x3D}, + {98, 0x3D}, + {110, 0x3C}, + {104, 0x3C}, + {98, 0x3C}, + {110, 0x3B}, + {104, 0x3B}, + {98, 0x3B}, + {110, 0x3A}, + {104, 0x3A}, + {98, 0x3A}, + {110, 0x39}, + {104, 0x39}, + {98, 0x39}, + {110, 0x38}, + {104, 0x38}, + {98, 0x38}, + {110, 0x37}, + {104, 0x37}, + {98, 0x37}, + {110, 0x36}, + {104, 0x36}, + {98, 0x36}, + {110, 0x35}, + {104, 0x35}, + {98, 0x35}, + {110, 0x34}, + {104, 0x34}, + {98, 0x34}, + {110, 0x33}, + {104, 0x33}, + {98, 0x33}, + {110, 0x32}, + {104, 0x32}, + {98, 0x32}, + {110, 0x31}, + {104, 0x31}, + {98, 0x31}, + {110, 0x30}, + {104, 0x30}, + {98, 0x30}, + {110, 0x25}, + {104, 0x25}, + {98, 0x25}, + {110, 0x24}, + {104, 0x24}, + {98, 0x24}, + {110, 0x23}, + {104, 0x23}, + {98, 0x23}, + {110, 0x22}, + {104, 0x18}, + {98, 0x18}, + {110, 0x17}, + {104, 0x17}, + {98, 0x17}, + {110, 0x16}, + {104, 0x16}, + {98, 0x16}, + {110, 0x15}, + {104, 0x15}, + {98, 0x15}, + {110, 0x14}, + {104, 0x14}, + {98, 0x14}, + {110, 0x13}, + {104, 0x13}, + {98, 0x13}, + {110, 0x12}, + {104, 0x08}, + {98, 0x08}, + {110, 0x07}, + {104, 0x07}, + {98, 0x07}, + {110, 0x06}, + {104, 0x06}, + {98, 0x06}, + {110, 0x05}, + {104, 0x05}, + {98, 0x05}, + {110, 0x04}, + {104, 0x04}, + {98, 0x04}, + {110, 0x03}, + {104, 0x03}, + {98, 0x03}, + {110, 0x02}, + {104, 0x02}, + {98, 0x02}, + {110, 0x01}, + {104, 0x01}, + {98, 0x01}, + {110, 0x00}, + {104, 0x00}, + {98, 0x00}, + {93, 0x00}, + {88, 0x00}, + {83, 0x00}, + {78, 0x00}, + }, + /* 2.4GHz power gain index table */ + { + {110, 0x3f}, /* highest txpower */ + {104, 0x3f}, + {98, 0x3f}, + {110, 0x3e}, + {104, 0x3e}, + {98, 0x3e}, + {110, 0x3d}, + {104, 0x3d}, + {98, 0x3d}, + {110, 0x3c}, + {104, 0x3c}, + {98, 0x3c}, + {110, 0x3b}, + {104, 0x3b}, + {98, 0x3b}, + {110, 0x3a}, + {104, 0x3a}, + {98, 0x3a}, + {110, 0x39}, + {104, 0x39}, + {98, 0x39}, + {110, 0x38}, + {104, 0x38}, + {98, 0x38}, + {110, 0x37}, + {104, 0x37}, + {98, 0x37}, + {110, 0x36}, + {104, 0x36}, + {98, 0x36}, + {110, 0x35}, + {104, 0x35}, + {98, 0x35}, + {110, 0x34}, + {104, 0x34}, + {98, 0x34}, + {110, 0x33}, + {104, 0x33}, + {98, 0x33}, + {110, 0x32}, + {104, 0x32}, + {98, 0x32}, + {110, 0x31}, + {104, 0x31}, + {98, 0x31}, + {110, 0x30}, + {104, 0x30}, + {98, 0x30}, + {110, 0x6}, + {104, 0x6}, + {98, 0x6}, + {110, 0x5}, + {104, 0x5}, + {98, 0x5}, + {110, 0x4}, + {104, 0x4}, + {98, 0x4}, + {110, 0x3}, + {104, 0x3}, + {98, 0x3}, + {110, 0x2}, + {104, 0x2}, + {98, 0x2}, + {110, 0x1}, + {104, 0x1}, + {98, 0x1}, + {110, 0x0}, + {104, 0x0}, + {98, 0x0}, + {97, 0}, + {96, 0}, + {95, 0}, + {94, 0}, + {93, 0}, + {92, 0}, + {91, 0}, + {90, 0}, + {89, 0}, + {88, 0}, + {87, 0}, + {86, 0}, + {85, 0}, + {84, 0}, + {83, 0}, + {82, 0}, + {81, 0}, + {80, 0}, + {79, 0}, + {78, 0}, + {77, 0}, + {76, 0}, + {75, 0}, + {74, 0}, + {73, 0}, + {72, 0}, + {71, 0}, + {70, 0}, + {69, 0}, + {68, 0}, + {67, 0}, + {66, 0}, + {65, 0}, + {64, 0}, + {63, 0}, + {62, 0}, + {61, 0}, + {60, 0}, + {59, 0}, + } +}; + +static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel, + u8 is_ht40, u8 ctrl_chan_high, + struct iwl4965_tx_power_db *tx_power_tbl) +{ + u8 saturation_power; + s32 target_power; + s32 user_target_power; + s32 power_limit; + s32 current_temp; + s32 reg_limit; + s32 current_regulatory; + s32 txatten_grp = CALIB_CH_GROUP_MAX; + int i; + int c; + const struct iwl_channel_info *ch_info = NULL; + struct iwl_eeprom_calib_ch_info ch_eeprom_info; + const struct iwl_eeprom_calib_measure *measurement; + s16 voltage; + s32 init_voltage; + s32 voltage_compensation; + s32 degrees_per_05db_num; + s32 degrees_per_05db_denom; + s32 factory_temp; + s32 temperature_comp[2]; + s32 factory_gain_index[2]; + s32 factory_actual_pwr[2]; + s32 power_index; + + /* tx_power_user_lmt is in dBm, convert to half-dBm (half-dB units + * are used for indexing into txpower table) */ + user_target_power = 2 * priv->tx_power_user_lmt; + + /* Get current (RXON) channel, band, width */ + IWL_DEBUG_TXPOWER(priv, "chan %d band %d is_ht40 %d\n", channel, band, + is_ht40); + + ch_info = iwl_legacy_get_channel_info(priv, priv->band, channel); + + if (!iwl_legacy_is_channel_valid(ch_info)) + return -EINVAL; + + /* get txatten group, used to select 1) thermal txpower adjustment + * and 2) mimo txpower balance between Tx chains. */ + txatten_grp = iwl4965_get_tx_atten_grp(channel); + if (txatten_grp < 0) { + IWL_ERR(priv, "Can't find txatten group for channel %d.\n", + channel); + return txatten_grp; + } + + IWL_DEBUG_TXPOWER(priv, "channel %d belongs to txatten group %d\n", + channel, txatten_grp); + + if (is_ht40) { + if (ctrl_chan_high) + channel -= 2; + else + channel += 2; + } + + /* hardware txpower limits ... + * saturation (clipping distortion) txpowers are in half-dBm */ + if (band) + saturation_power = priv->calib_info->saturation_power24; + else + saturation_power = priv->calib_info->saturation_power52; + + if (saturation_power < IWL_TX_POWER_SATURATION_MIN || + saturation_power > IWL_TX_POWER_SATURATION_MAX) { + if (band) + saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_24; + else + saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_52; + } + + /* regulatory txpower limits ... reg_limit values are in half-dBm, + * max_power_avg values are in dBm, convert * 2 */ + if (is_ht40) + reg_limit = ch_info->ht40_max_power_avg * 2; + else + reg_limit = ch_info->max_power_avg * 2; + + if ((reg_limit < IWL_TX_POWER_REGULATORY_MIN) || + (reg_limit > IWL_TX_POWER_REGULATORY_MAX)) { + if (band) + reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_24; + else + reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_52; + } + + /* Interpolate txpower calibration values for this channel, + * based on factory calibration tests on spaced channels. */ + iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info); + + /* calculate tx gain adjustment based on power supply voltage */ + voltage = le16_to_cpu(priv->calib_info->voltage); + init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage); + voltage_compensation = + iwl4965_get_voltage_compensation(voltage, init_voltage); + + IWL_DEBUG_TXPOWER(priv, "curr volt %d eeprom volt %d volt comp %d\n", + init_voltage, + voltage, voltage_compensation); + + /* get current temperature (Celsius) */ + current_temp = max(priv->temperature, IWL_TX_POWER_TEMPERATURE_MIN); + current_temp = min(priv->temperature, IWL_TX_POWER_TEMPERATURE_MAX); + current_temp = KELVIN_TO_CELSIUS(current_temp); + + /* select thermal txpower adjustment params, based on channel group + * (same frequency group used for mimo txatten adjustment) */ + degrees_per_05db_num = + tx_power_cmp_tble[txatten_grp].degrees_per_05db_a; + degrees_per_05db_denom = + tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom; + + /* get per-chain txpower values from factory measurements */ + for (c = 0; c < 2; c++) { + measurement = &ch_eeprom_info.measurements[c][1]; + + /* txgain adjustment (in half-dB steps) based on difference + * between factory and current temperature */ + factory_temp = measurement->temperature; + iwl4965_math_div_round((current_temp - factory_temp) * + degrees_per_05db_denom, + degrees_per_05db_num, + &temperature_comp[c]); + + factory_gain_index[c] = measurement->gain_idx; + factory_actual_pwr[c] = measurement->actual_pow; + + IWL_DEBUG_TXPOWER(priv, "chain = %d\n", c); + IWL_DEBUG_TXPOWER(priv, "fctry tmp %d, " + "curr tmp %d, comp %d steps\n", + factory_temp, current_temp, + temperature_comp[c]); + + IWL_DEBUG_TXPOWER(priv, "fctry idx %d, fctry pwr %d\n", + factory_gain_index[c], + factory_actual_pwr[c]); + } + + /* for each of 33 bit-rates (including 1 for CCK) */ + for (i = 0; i < POWER_TABLE_NUM_ENTRIES; i++) { + u8 is_mimo_rate; + union iwl4965_tx_power_dual_stream tx_power; + + /* for mimo, reduce each chain's txpower by half + * (3dB, 6 steps), so total output power is regulatory + * compliant. */ + if (i & 0x8) { + current_regulatory = reg_limit - + IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION; + is_mimo_rate = 1; + } else { + current_regulatory = reg_limit; + is_mimo_rate = 0; + } + + /* find txpower limit, either hardware or regulatory */ + power_limit = saturation_power - back_off_table[i]; + if (power_limit > current_regulatory) + power_limit = current_regulatory; + + /* reduce user's txpower request if necessary + * for this rate on this channel */ + target_power = user_target_power; + if (target_power > power_limit) + target_power = power_limit; + + IWL_DEBUG_TXPOWER(priv, "rate %d sat %d reg %d usr %d tgt %d\n", + i, saturation_power - back_off_table[i], + current_regulatory, user_target_power, + target_power); + + /* for each of 2 Tx chains (radio transmitters) */ + for (c = 0; c < 2; c++) { + s32 atten_value; + + if (is_mimo_rate) + atten_value = + (s32)le32_to_cpu(priv->card_alive_init. + tx_atten[txatten_grp][c]); + else + atten_value = 0; + + /* calculate index; higher index means lower txpower */ + power_index = (u8) (factory_gain_index[c] - + (target_power - + factory_actual_pwr[c]) - + temperature_comp[c] - + voltage_compensation + + atten_value); + +/* IWL_DEBUG_TXPOWER(priv, "calculated txpower index %d\n", + power_index); */ + + if (power_index < get_min_power_index(i, band)) + power_index = get_min_power_index(i, band); + + /* adjust 5 GHz index to support negative indexes */ + if (!band) + power_index += 9; + + /* CCK, rate 32, reduce txpower for CCK */ + if (i == POWER_TABLE_CCK_ENTRY) + power_index += + IWL_TX_POWER_CCK_COMPENSATION_C_STEP; + + /* stay within the table! */ + if (power_index > 107) { + IWL_WARN(priv, "txpower index %d > 107\n", + power_index); + power_index = 107; + } + if (power_index < 0) { + IWL_WARN(priv, "txpower index %d < 0\n", + power_index); + power_index = 0; + } + + /* fill txpower command for this rate/chain */ + tx_power.s.radio_tx_gain[c] = + gain_table[band][power_index].radio; + tx_power.s.dsp_predis_atten[c] = + gain_table[band][power_index].dsp; + + IWL_DEBUG_TXPOWER(priv, "chain %d mimo %d index %d " + "gain 0x%02x dsp %d\n", + c, atten_value, power_index, + tx_power.s.radio_tx_gain[c], + tx_power.s.dsp_predis_atten[c]); + } /* for each chain */ + + tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw); + + } /* for each rate */ + + return 0; +} + +/** + * iwl4965_send_tx_power - Configure the TXPOWER level user limit + * + * Uses the active RXON for channel, band, and characteristics (ht40, high) + * The power limit is taken from priv->tx_power_user_lmt. + */ +static int iwl4965_send_tx_power(struct iwl_priv *priv) +{ + struct iwl4965_txpowertable_cmd cmd = { 0 }; + int ret; + u8 band = 0; + bool is_ht40 = false; + u8 ctrl_chan_high = 0; + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + + if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status), + "TX Power requested while scanning!\n")) + return -EAGAIN; + + band = priv->band == IEEE80211_BAND_2GHZ; + + is_ht40 = iw4965_is_ht40_channel(ctx->active.flags); + + if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK)) + ctrl_chan_high = 1; + + cmd.band = band; + cmd.channel = ctx->active.channel; + + ret = iwl4965_fill_txpower_tbl(priv, band, + le16_to_cpu(ctx->active.channel), + is_ht40, ctrl_chan_high, &cmd.tx_power); + if (ret) + goto out; + + ret = iwl_legacy_send_cmd_pdu(priv, + REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd); + +out: + return ret; +} + +static int iwl4965_send_rxon_assoc(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + int ret = 0; + struct iwl4965_rxon_assoc_cmd rxon_assoc; + const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging; + const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active; + + if ((rxon1->flags == rxon2->flags) && + (rxon1->filter_flags == rxon2->filter_flags) && + (rxon1->cck_basic_rates == rxon2->cck_basic_rates) && + (rxon1->ofdm_ht_single_stream_basic_rates == + rxon2->ofdm_ht_single_stream_basic_rates) && + (rxon1->ofdm_ht_dual_stream_basic_rates == + rxon2->ofdm_ht_dual_stream_basic_rates) && + (rxon1->rx_chain == rxon2->rx_chain) && + (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) { + IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n"); + return 0; + } + + rxon_assoc.flags = ctx->staging.flags; + rxon_assoc.filter_flags = ctx->staging.filter_flags; + rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates; + rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates; + rxon_assoc.reserved = 0; + rxon_assoc.ofdm_ht_single_stream_basic_rates = + ctx->staging.ofdm_ht_single_stream_basic_rates; + rxon_assoc.ofdm_ht_dual_stream_basic_rates = + ctx->staging.ofdm_ht_dual_stream_basic_rates; + rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain; + + ret = iwl_legacy_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC, + sizeof(rxon_assoc), &rxon_assoc, NULL); + + return ret; +} + +static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) +{ + /* cast away the const for active_rxon in this function */ + struct iwl_legacy_rxon_cmd *active_rxon = (void *)&ctx->active; + int ret; + bool new_assoc = + !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK); + + if (!iwl_legacy_is_alive(priv)) + return -EBUSY; + + if (!ctx->is_active) + return 0; + + /* always get timestamp with Rx frame */ + ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK; + + ret = iwl_legacy_check_rxon_cmd(priv, ctx); + if (ret) { + IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n"); + return -EINVAL; + } + + /* + * receive commit_rxon request + * abort any previous channel switch if still in process + */ + if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) && + (priv->switch_channel != ctx->staging.channel)) { + IWL_DEBUG_11H(priv, "abort channel switch on %d\n", + le16_to_cpu(priv->switch_channel)); + iwl_legacy_chswitch_done(priv, false); + } + + /* If we don't need to send a full RXON, we can use + * iwl_rxon_assoc_cmd which is used to reconfigure filter + * and other flags for the current radio configuration. */ + if (!iwl_legacy_full_rxon_required(priv, ctx)) { + ret = iwl_legacy_send_rxon_assoc(priv, ctx); + if (ret) { + IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret); + return ret; + } + + memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon)); + iwl_legacy_print_rx_config_cmd(priv, ctx); + /* + * We do not commit tx power settings while channel changing, + * do it now if tx power changed. + */ + iwl_legacy_set_tx_power(priv, priv->tx_power_next, false); + return 0; + } + + /* If we are currently associated and the new config requires + * an RXON_ASSOC and the new config wants the associated mask enabled, + * we must clear the associated from the active configuration + * before we apply the new config */ + if (iwl_legacy_is_associated_ctx(ctx) && new_assoc) { + IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n"); + active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; + + ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd, + sizeof(struct iwl_legacy_rxon_cmd), + active_rxon); + + /* If the mask clearing failed then we set + * active_rxon back to what it was previously */ + if (ret) { + active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK; + IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret); + return ret; + } + iwl_legacy_clear_ucode_stations(priv, ctx); + iwl_legacy_restore_stations(priv, ctx); + ret = iwl4965_restore_default_wep_keys(priv, ctx); + if (ret) { + IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret); + return ret; + } + } + + IWL_DEBUG_INFO(priv, "Sending RXON\n" + "* with%s RXON_FILTER_ASSOC_MSK\n" + "* channel = %d\n" + "* bssid = %pM\n", + (new_assoc ? "" : "out"), + le16_to_cpu(ctx->staging.channel), + ctx->staging.bssid_addr); + + iwl_legacy_set_rxon_hwcrypto(priv, ctx, + !priv->cfg->mod_params->sw_crypto); + + /* Apply the new configuration + * RXON unassoc clears the station table in uCode so restoration of + * stations is needed after it (the RXON command) completes + */ + if (!new_assoc) { + ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd, + sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging); + if (ret) { + IWL_ERR(priv, "Error setting new RXON (%d)\n", ret); + return ret; + } + IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n"); + memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon)); + iwl_legacy_clear_ucode_stations(priv, ctx); + iwl_legacy_restore_stations(priv, ctx); + ret = iwl4965_restore_default_wep_keys(priv, ctx); + if (ret) { + IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret); + return ret; + } + } + if (new_assoc) { + priv->start_calib = 0; + /* Apply the new configuration + * RXON assoc doesn't clear the station table in uCode, + */ + ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd, + sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging); + if (ret) { + IWL_ERR(priv, "Error setting new RXON (%d)\n", ret); + return ret; + } + memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon)); + } + iwl_legacy_print_rx_config_cmd(priv, ctx); + + iwl4965_init_sensitivity(priv); + + /* If we issue a new RXON command which required a tune then we must + * send a new TXPOWER command or we won't be able to Tx any frames */ + ret = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true); + if (ret) { + IWL_ERR(priv, "Error sending TX power (%d)\n", ret); + return ret; + } + + return 0; +} + +static int iwl4965_hw_channel_switch(struct iwl_priv *priv, + struct ieee80211_channel_switch *ch_switch) +{ + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + int rc; + u8 band = 0; + bool is_ht40 = false; + u8 ctrl_chan_high = 0; + struct iwl4965_channel_switch_cmd cmd; + const struct iwl_channel_info *ch_info; + u32 switch_time_in_usec, ucode_switch_time; + u16 ch; + u32 tsf_low; + u8 switch_count; + u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval); + struct ieee80211_vif *vif = ctx->vif; + band = priv->band == IEEE80211_BAND_2GHZ; + + is_ht40 = iw4965_is_ht40_channel(ctx->staging.flags); + + if (is_ht40 && + (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK)) + ctrl_chan_high = 1; + + cmd.band = band; + cmd.expect_beacon = 0; + ch = ch_switch->channel->hw_value; + cmd.channel = cpu_to_le16(ch); + cmd.rxon_flags = ctx->staging.flags; + cmd.rxon_filter_flags = ctx->staging.filter_flags; + switch_count = ch_switch->count; + tsf_low = ch_switch->timestamp & 0x0ffffffff; + /* + * calculate the ucode channel switch time + * adding TSF as one of the factor for when to switch + */ + if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) { + if (switch_count > ((priv->ucode_beacon_time - tsf_low) / + beacon_interval)) { + switch_count -= (priv->ucode_beacon_time - + tsf_low) / beacon_interval; + } else + switch_count = 0; + } + if (switch_count <= 1) + cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time); + else { + switch_time_in_usec = + vif->bss_conf.beacon_int * switch_count * TIME_UNIT; + ucode_switch_time = iwl_legacy_usecs_to_beacons(priv, + switch_time_in_usec, + beacon_interval); + cmd.switch_time = iwl_legacy_add_beacon_time(priv, + priv->ucode_beacon_time, + ucode_switch_time, + beacon_interval); + } + IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n", + cmd.switch_time); + ch_info = iwl_legacy_get_channel_info(priv, priv->band, ch); + if (ch_info) + cmd.expect_beacon = iwl_legacy_is_channel_radar(ch_info); + else { + IWL_ERR(priv, "invalid channel switch from %u to %u\n", + ctx->active.channel, ch); + return -EFAULT; + } + + rc = iwl4965_fill_txpower_tbl(priv, band, ch, is_ht40, + ctrl_chan_high, &cmd.tx_power); + if (rc) { + IWL_DEBUG_11H(priv, "error:%d fill txpower_tbl\n", rc); + return rc; + } + + return iwl_legacy_send_cmd_pdu(priv, + REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd); +} + +/** + * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array + */ +static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + u16 byte_cnt) +{ + struct iwl4965_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr; + int txq_id = txq->q.id; + int write_ptr = txq->q.write_ptr; + int len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; + __le16 bc_ent; + + WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX); + + bc_ent = cpu_to_le16(len & 0xFFF); + /* Set up byte count within first 256 entries */ + scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; + + /* If within first 64 entries, duplicate at end */ + if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) + scd_bc_tbl[txq_id]. + tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; +} + +/** + * iwl4965_hw_get_temperature - return the calibrated temperature (in Kelvin) + * @statistics: Provides the temperature reading from the uCode + * + * A return of <0 indicates bogus data in the statistics + */ +static int iwl4965_hw_get_temperature(struct iwl_priv *priv) +{ + s32 temperature; + s32 vt; + s32 R1, R2, R3; + u32 R4; + + if (test_bit(STATUS_TEMPERATURE, &priv->status) && + (priv->_4965.statistics.flag & + STATISTICS_REPLY_FLG_HT40_MODE_MSK)) { + IWL_DEBUG_TEMP(priv, "Running HT40 temperature calibration\n"); + R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]); + R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]); + R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]); + R4 = le32_to_cpu(priv->card_alive_init.therm_r4[1]); + } else { + IWL_DEBUG_TEMP(priv, "Running temperature calibration\n"); + R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]); + R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]); + R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]); + R4 = le32_to_cpu(priv->card_alive_init.therm_r4[0]); + } + + /* + * Temperature is only 23 bits, so sign extend out to 32. + * + * NOTE If we haven't received a statistics notification yet + * with an updated temperature, use R4 provided to us in the + * "initialize" ALIVE response. + */ + if (!test_bit(STATUS_TEMPERATURE, &priv->status)) + vt = sign_extend32(R4, 23); + else + vt = sign_extend32(le32_to_cpu(priv->_4965.statistics. + general.common.temperature), 23); + + IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt); + + if (R3 == R1) { + IWL_ERR(priv, "Calibration conflict R1 == R3\n"); + return -1; + } + + /* Calculate temperature in degrees Kelvin, adjust by 97%. + * Add offset to center the adjustment around 0 degrees Centigrade. */ + temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2); + temperature /= (R3 - R1); + temperature = (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET; + + IWL_DEBUG_TEMP(priv, "Calibrated temperature: %dK, %dC\n", + temperature, KELVIN_TO_CELSIUS(temperature)); + + return temperature; +} + +/* Adjust Txpower only if temperature variance is greater than threshold. */ +#define IWL_TEMPERATURE_THRESHOLD 3 + +/** + * iwl4965_is_temp_calib_needed - determines if new calibration is needed + * + * If the temperature changed has changed sufficiently, then a recalibration + * is needed. + * + * Assumes caller will replace priv->last_temperature once calibration + * executed. + */ +static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv) +{ + int temp_diff; + + if (!test_bit(STATUS_STATISTICS, &priv->status)) { + IWL_DEBUG_TEMP(priv, "Temperature not updated -- no statistics.\n"); + return 0; + } + + temp_diff = priv->temperature - priv->last_temperature; + + /* get absolute value */ + if (temp_diff < 0) { + IWL_DEBUG_POWER(priv, "Getting cooler, delta %d\n", temp_diff); + temp_diff = -temp_diff; + } else if (temp_diff == 0) + IWL_DEBUG_POWER(priv, "Temperature unchanged\n"); + else + IWL_DEBUG_POWER(priv, "Getting warmer, delta %d\n", temp_diff); + + if (temp_diff < IWL_TEMPERATURE_THRESHOLD) { + IWL_DEBUG_POWER(priv, " => thermal txpower calib not needed\n"); + return 0; + } + + IWL_DEBUG_POWER(priv, " => thermal txpower calib needed\n"); + + return 1; +} + +static void iwl4965_temperature_calib(struct iwl_priv *priv) +{ + s32 temp; + + temp = iwl4965_hw_get_temperature(priv); + if (IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(temp)) + return; + + if (priv->temperature != temp) { + if (priv->temperature) + IWL_DEBUG_TEMP(priv, "Temperature changed " + "from %dC to %dC\n", + KELVIN_TO_CELSIUS(priv->temperature), + KELVIN_TO_CELSIUS(temp)); + else + IWL_DEBUG_TEMP(priv, "Temperature " + "initialized to %dC\n", + KELVIN_TO_CELSIUS(temp)); + } + + priv->temperature = temp; + set_bit(STATUS_TEMPERATURE, &priv->status); + + if (!priv->disable_tx_power_cal && + unlikely(!test_bit(STATUS_SCANNING, &priv->status)) && + iwl4965_is_temp_calib_needed(priv)) + queue_work(priv->workqueue, &priv->txpower_work); +} + +static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len) +{ + switch (cmd_id) { + case REPLY_RXON: + return (u16) sizeof(struct iwl4965_rxon_cmd); + default: + return len; + } +} + +static u16 iwl4965_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd, + u8 *data) +{ + struct iwl4965_addsta_cmd *addsta = (struct iwl4965_addsta_cmd *)data; + addsta->mode = cmd->mode; + memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify)); + memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo)); + addsta->station_flags = cmd->station_flags; + addsta->station_flags_msk = cmd->station_flags_msk; + addsta->tid_disable_tx = cmd->tid_disable_tx; + addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid; + addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid; + addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn; + addsta->sleep_tx_count = cmd->sleep_tx_count; + addsta->reserved1 = cpu_to_le16(0); + addsta->reserved2 = cpu_to_le16(0); + + return (u16)sizeof(struct iwl4965_addsta_cmd); +} + +static inline u32 iwl4965_get_scd_ssn(struct iwl4965_tx_resp *tx_resp) +{ + return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN; +} + +/** + * iwl4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue + */ +static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv, + struct iwl_ht_agg *agg, + struct iwl4965_tx_resp *tx_resp, + int txq_id, u16 start_idx) +{ + u16 status; + struct agg_tx_status *frame_status = tx_resp->u.agg_status; + struct ieee80211_tx_info *info = NULL; + struct ieee80211_hdr *hdr = NULL; + u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags); + int i, sh, idx; + u16 seq; + if (agg->wait_for_ba) + IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n"); + + agg->frame_count = tx_resp->frame_count; + agg->start_idx = start_idx; + agg->rate_n_flags = rate_n_flags; + agg->bitmap = 0; + + /* num frames attempted by Tx command */ + if (agg->frame_count == 1) { + /* Only one frame was attempted; no block-ack will arrive */ + status = le16_to_cpu(frame_status[0].status); + idx = start_idx; + + IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n", + agg->frame_count, agg->start_idx, idx); + + info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb); + info->status.rates[0].count = tx_resp->failure_frame + 1; + info->flags &= ~IEEE80211_TX_CTL_AMPDU; + info->flags |= iwl4965_tx_status_to_mac80211(status); + iwl4965_hwrate_to_tx_control(priv, rate_n_flags, info); + + IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n", + status & 0xff, tx_resp->failure_frame); + IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags); + + agg->wait_for_ba = 0; + } else { + /* Two or more frames were attempted; expect block-ack */ + u64 bitmap = 0; + int start = agg->start_idx; + + /* Construct bit-map of pending frames within Tx window */ + for (i = 0; i < agg->frame_count; i++) { + u16 sc; + status = le16_to_cpu(frame_status[i].status); + seq = le16_to_cpu(frame_status[i].sequence); + idx = SEQ_TO_INDEX(seq); + txq_id = SEQ_TO_QUEUE(seq); + + if (status & (AGG_TX_STATE_FEW_BYTES_MSK | + AGG_TX_STATE_ABORT_MSK)) + continue; + + IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n", + agg->frame_count, txq_id, idx); + + hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, idx); + if (!hdr) { + IWL_ERR(priv, + "BUG_ON idx doesn't point to valid skb" + " idx=%d, txq_id=%d\n", idx, txq_id); + return -1; + } + + sc = le16_to_cpu(hdr->seq_ctrl); + if (idx != (SEQ_TO_SN(sc) & 0xff)) { + IWL_ERR(priv, + "BUG_ON idx doesn't match seq control" + " idx=%d, seq_idx=%d, seq=%d\n", + idx, SEQ_TO_SN(sc), hdr->seq_ctrl); + return -1; + } + + IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n", + i, idx, SEQ_TO_SN(sc)); + + sh = idx - start; + if (sh > 64) { + sh = (start - idx) + 0xff; + bitmap = bitmap << sh; + sh = 0; + start = idx; + } else if (sh < -64) + sh = 0xff - (start - idx); + else if (sh < 0) { + sh = start - idx; + start = idx; + bitmap = bitmap << sh; + sh = 0; + } + bitmap |= 1ULL << sh; + IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n", + start, (unsigned long long)bitmap); + } + + agg->bitmap = bitmap; + agg->start_idx = start; + IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n", + agg->frame_count, agg->start_idx, + (unsigned long long)agg->bitmap); + + if (bitmap) + agg->wait_for_ba = 1; + } + return 0; +} + +static u8 iwl4965_find_station(struct iwl_priv *priv, const u8 *addr) +{ + int i; + int start = 0; + int ret = IWL_INVALID_STATION; + unsigned long flags; + + if ((priv->iw_mode == NL80211_IFTYPE_ADHOC)) + start = IWL_STA_ID; + + if (is_broadcast_ether_addr(addr)) + return priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id; + + spin_lock_irqsave(&priv->sta_lock, flags); + for (i = start; i < priv->hw_params.max_stations; i++) + if (priv->stations[i].used && + (!compare_ether_addr(priv->stations[i].sta.sta.addr, + addr))) { + ret = i; + goto out; + } + + IWL_DEBUG_ASSOC_LIMIT(priv, "can not find STA %pM total %d\n", + addr, priv->num_stations); + + out: + /* + * It may be possible that more commands interacting with stations + * arrive before we completed processing the adding of + * station + */ + if (ret != IWL_INVALID_STATION && + (!(priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) || + ((priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) && + (priv->stations[ret].used & IWL_STA_UCODE_INPROGRESS)))) { + IWL_ERR(priv, "Requested station info for sta %d before ready.\n", + ret); + ret = IWL_INVALID_STATION; + } + spin_unlock_irqrestore(&priv->sta_lock, flags); + return ret; +} + +static int iwl4965_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr) +{ + if (priv->iw_mode == NL80211_IFTYPE_STATION) { + return IWL_AP_ID; + } else { + u8 *da = ieee80211_get_DA(hdr); + return iwl4965_find_station(priv, da); + } +} + +/** + * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response + */ +static void iwl4965_rx_reply_tx(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + u16 sequence = le16_to_cpu(pkt->hdr.sequence); + int txq_id = SEQ_TO_QUEUE(sequence); + int index = SEQ_TO_INDEX(sequence); + struct iwl_tx_queue *txq = &priv->txq[txq_id]; + struct ieee80211_hdr *hdr; + struct ieee80211_tx_info *info; + struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; + u32 status = le32_to_cpu(tx_resp->u.status); + int uninitialized_var(tid); + int sta_id; + int freed; + u8 *qc = NULL; + unsigned long flags; + + if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) { + IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d " + "is out of range [0-%d] %d %d\n", txq_id, + index, txq->q.n_bd, txq->q.write_ptr, + txq->q.read_ptr); + return; + } + + txq->time_stamp = jiffies; + info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb); + memset(&info->status, 0, sizeof(info->status)); + + hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, index); + if (ieee80211_is_data_qos(hdr->frame_control)) { + qc = ieee80211_get_qos_ctl(hdr); + tid = qc[0] & 0xf; + } + + sta_id = iwl4965_get_ra_sta_id(priv, hdr); + if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) { + IWL_ERR(priv, "Station not known\n"); + return; + } + + spin_lock_irqsave(&priv->sta_lock, flags); + if (txq->sched_retry) { + const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp); + struct iwl_ht_agg *agg = NULL; + WARN_ON(!qc); + + agg = &priv->stations[sta_id].tid[tid].agg; + + iwl4965_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index); + + /* check if BAR is needed */ + if ((tx_resp->frame_count == 1) && !iwl4965_is_tx_success(status)) + info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; + + if (txq->q.read_ptr != (scd_ssn & 0xff)) { + index = iwl_legacy_queue_dec_wrap(scd_ssn & 0xff, + txq->q.n_bd); + IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn " + "%d index %d\n", scd_ssn , index); + freed = iwl4965_tx_queue_reclaim(priv, txq_id, index); + if (qc) + iwl4965_free_tfds_in_queue(priv, sta_id, + tid, freed); + + if (priv->mac80211_registered && + (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark) + && (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) + iwl_legacy_wake_queue(priv, txq); + } + } else { + info->status.rates[0].count = tx_resp->failure_frame + 1; + info->flags |= iwl4965_tx_status_to_mac80211(status); + iwl4965_hwrate_to_tx_control(priv, + le32_to_cpu(tx_resp->rate_n_flags), + info); + + IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) " + "rate_n_flags 0x%x retries %d\n", + txq_id, + iwl4965_get_tx_fail_reason(status), status, + le32_to_cpu(tx_resp->rate_n_flags), + tx_resp->failure_frame); + + freed = iwl4965_tx_queue_reclaim(priv, txq_id, index); + if (qc && likely(sta_id != IWL_INVALID_STATION)) + iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed); + else if (sta_id == IWL_INVALID_STATION) + IWL_DEBUG_TX_REPLY(priv, "Station not known\n"); + + if (priv->mac80211_registered && + (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark)) + iwl_legacy_wake_queue(priv, txq); + } + if (qc && likely(sta_id != IWL_INVALID_STATION)) + iwl4965_txq_check_empty(priv, sta_id, tid, txq_id); + + iwl4965_check_abort_status(priv, tx_resp->frame_count, status); + + spin_unlock_irqrestore(&priv->sta_lock, flags); +} + +static void iwl4965_rx_beacon_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl4965_beacon_notif *beacon = (void *)pkt->u.raw; + u8 rate __maybe_unused = + iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); + + IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d " + "tsf:0x%.8x%.8x rate:%d\n", + le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK, + beacon->beacon_notify_hdr.failure_frame, + le32_to_cpu(beacon->ibss_mgr_status), + le32_to_cpu(beacon->high_tsf), + le32_to_cpu(beacon->low_tsf), rate); + + priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status); +} + +/* Set up 4965-specific Rx frame reply handlers */ +static void iwl4965_rx_handler_setup(struct iwl_priv *priv) +{ + /* Legacy Rx frames */ + priv->rx_handlers[REPLY_RX] = iwl4965_rx_reply_rx; + /* Tx response */ + priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx; + priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif; +} + +static struct iwl_hcmd_ops iwl4965_hcmd = { + .rxon_assoc = iwl4965_send_rxon_assoc, + .commit_rxon = iwl4965_commit_rxon, + .set_rxon_chain = iwl4965_set_rxon_chain, +}; + +static void iwl4965_post_scan(struct iwl_priv *priv) +{ + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + + /* + * Since setting the RXON may have been deferred while + * performing the scan, fire one off if needed + */ + if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging))) + iwl_legacy_commit_rxon(priv, ctx); +} + +static void iwl4965_post_associate(struct iwl_priv *priv) +{ + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + struct ieee80211_vif *vif = ctx->vif; + struct ieee80211_conf *conf = NULL; + int ret = 0; + + if (!vif || !priv->is_open) + return; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + iwl_legacy_scan_cancel_timeout(priv, 200); + + conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw); + + ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + iwl_legacy_commit_rxon(priv, ctx); + + ret = iwl_legacy_send_rxon_timing(priv, ctx); + if (ret) + IWL_WARN(priv, "RXON timing - " + "Attempting to continue.\n"); + + ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; + + iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config); + + if (priv->cfg->ops->hcmd->set_rxon_chain) + priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); + + ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid); + + IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n", + vif->bss_conf.aid, vif->bss_conf.beacon_int); + + if (vif->bss_conf.use_short_preamble) + ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; + else + ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; + + if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) { + if (vif->bss_conf.use_short_slot) + ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; + else + ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; + } + + iwl_legacy_commit_rxon(priv, ctx); + + IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n", + vif->bss_conf.aid, ctx->active.bssid_addr); + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + break; + case NL80211_IFTYPE_ADHOC: + iwl4965_send_beacon_cmd(priv); + break; + default: + IWL_ERR(priv, "%s Should not be called in %d mode\n", + __func__, vif->type); + break; + } + + /* the chain noise calibration will enabled PM upon completion + * If chain noise has already been run, then we need to enable + * power management here */ + if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE) + iwl_legacy_power_update_mode(priv, false); + + /* Enable Rx differential gain and sensitivity calibrations */ + iwl4965_chain_noise_reset(priv); + priv->start_calib = 1; +} + +static void iwl4965_config_ap(struct iwl_priv *priv) +{ + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + struct ieee80211_vif *vif = ctx->vif; + int ret = 0; + + lockdep_assert_held(&priv->mutex); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + /* The following should be done only at AP bring up */ + if (!iwl_legacy_is_associated_ctx(ctx)) { + + /* RXON - unassoc (to set timing command) */ + ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + iwl_legacy_commit_rxon(priv, ctx); + + /* RXON Timing */ + ret = iwl_legacy_send_rxon_timing(priv, ctx); + if (ret) + IWL_WARN(priv, "RXON timing failed - " + "Attempting to continue.\n"); + + /* AP has all antennas */ + priv->chain_noise_data.active_chains = + priv->hw_params.valid_rx_ant; + iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config); + if (priv->cfg->ops->hcmd->set_rxon_chain) + priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); + + ctx->staging.assoc_id = 0; + + if (vif->bss_conf.use_short_preamble) + ctx->staging.flags |= + RXON_FLG_SHORT_PREAMBLE_MSK; + else + ctx->staging.flags &= + ~RXON_FLG_SHORT_PREAMBLE_MSK; + + if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) { + if (vif->bss_conf.use_short_slot) + ctx->staging.flags |= + RXON_FLG_SHORT_SLOT_MSK; + else + ctx->staging.flags &= + ~RXON_FLG_SHORT_SLOT_MSK; + } + /* need to send beacon cmd before committing assoc RXON! */ + iwl4965_send_beacon_cmd(priv); + /* restore RXON assoc */ + ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; + iwl_legacy_commit_rxon(priv, ctx); + } + iwl4965_send_beacon_cmd(priv); +} + +static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = { + .get_hcmd_size = iwl4965_get_hcmd_size, + .build_addsta_hcmd = iwl4965_build_addsta_hcmd, + .request_scan = iwl4965_request_scan, + .post_scan = iwl4965_post_scan, +}; + +static struct iwl_lib_ops iwl4965_lib = { + .set_hw_params = iwl4965_hw_set_hw_params, + .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl, + .txq_attach_buf_to_tfd = iwl4965_hw_txq_attach_buf_to_tfd, + .txq_free_tfd = iwl4965_hw_txq_free_tfd, + .txq_init = iwl4965_hw_tx_queue_init, + .rx_handler_setup = iwl4965_rx_handler_setup, + .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr, + .init_alive_start = iwl4965_init_alive_start, + .load_ucode = iwl4965_load_bsm, + .dump_nic_error_log = iwl4965_dump_nic_error_log, + .dump_fh = iwl4965_dump_fh, + .set_channel_switch = iwl4965_hw_channel_switch, + .apm_ops = { + .init = iwl_legacy_apm_init, + .config = iwl4965_nic_config, + }, + .eeprom_ops = { + .regulatory_bands = { + EEPROM_REGULATORY_BAND_1_CHANNELS, + EEPROM_REGULATORY_BAND_2_CHANNELS, + EEPROM_REGULATORY_BAND_3_CHANNELS, + EEPROM_REGULATORY_BAND_4_CHANNELS, + EEPROM_REGULATORY_BAND_5_CHANNELS, + EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS, + EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS + }, + .acquire_semaphore = iwl4965_eeprom_acquire_semaphore, + .release_semaphore = iwl4965_eeprom_release_semaphore, + }, + .send_tx_power = iwl4965_send_tx_power, + .update_chain_flags = iwl4965_update_chain_flags, + .temp_ops = { + .temperature = iwl4965_temperature_calib, + }, + .debugfs_ops = { + .rx_stats_read = iwl4965_ucode_rx_stats_read, + .tx_stats_read = iwl4965_ucode_tx_stats_read, + .general_stats_read = iwl4965_ucode_general_stats_read, + }, +}; + +static const struct iwl_legacy_ops iwl4965_legacy_ops = { + .post_associate = iwl4965_post_associate, + .config_ap = iwl4965_config_ap, + .manage_ibss_station = iwl4965_manage_ibss_station, + .update_bcast_stations = iwl4965_update_bcast_stations, +}; + +struct ieee80211_ops iwl4965_hw_ops = { + .tx = iwl4965_mac_tx, + .start = iwl4965_mac_start, + .stop = iwl4965_mac_stop, + .add_interface = iwl_legacy_mac_add_interface, + .remove_interface = iwl_legacy_mac_remove_interface, + .change_interface = iwl_legacy_mac_change_interface, + .config = iwl_legacy_mac_config, + .configure_filter = iwl4965_configure_filter, + .set_key = iwl4965_mac_set_key, + .update_tkip_key = iwl4965_mac_update_tkip_key, + .conf_tx = iwl_legacy_mac_conf_tx, + .reset_tsf = iwl_legacy_mac_reset_tsf, + .bss_info_changed = iwl_legacy_mac_bss_info_changed, + .ampdu_action = iwl4965_mac_ampdu_action, + .hw_scan = iwl_legacy_mac_hw_scan, + .sta_add = iwl4965_mac_sta_add, + .sta_remove = iwl_legacy_mac_sta_remove, + .channel_switch = iwl4965_mac_channel_switch, + .tx_last_beacon = iwl_legacy_mac_tx_last_beacon, +}; + +static const struct iwl_ops iwl4965_ops = { + .lib = &iwl4965_lib, + .hcmd = &iwl4965_hcmd, + .utils = &iwl4965_hcmd_utils, + .led = &iwl4965_led_ops, + .legacy = &iwl4965_legacy_ops, + .ieee80211_ops = &iwl4965_hw_ops, +}; + +static struct iwl_base_params iwl4965_base_params = { + .eeprom_size = IWL4965_EEPROM_IMG_SIZE, + .num_of_queues = IWL49_NUM_QUEUES, + .num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES, + .pll_cfg_val = 0, + .set_l0s = true, + .use_bsm = true, + .led_compensation = 61, + .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS, + .wd_timeout = IWL_DEF_WD_TIMEOUT, + .temperature_kelvin = true, + .ucode_tracing = true, + .sensitivity_calib_by_driver = true, + .chain_noise_calib_by_driver = true, +}; + +struct iwl_cfg iwl4965_cfg = { + .name = "Intel(R) Wireless WiFi Link 4965AGN", + .fw_name_pre = IWL4965_FW_PRE, + .ucode_api_max = IWL4965_UCODE_API_MAX, + .ucode_api_min = IWL4965_UCODE_API_MIN, + .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, + .valid_tx_ant = ANT_AB, + .valid_rx_ant = ANT_ABC, + .eeprom_ver = EEPROM_4965_EEPROM_VERSION, + .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION, + .ops = &iwl4965_ops, + .mod_params = &iwl4965_mod_params, + .base_params = &iwl4965_base_params, + .led_mode = IWL_LED_BLINK, + /* + * Force use of chains B and C for scan RX on 5 GHz band + * because the device has off-channel reception on chain A. + */ + .scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC, +}; + +/* Module firmware */ +MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX)); diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-4965.h b/trunk/drivers/net/wireless/iwlegacy/iwl-4965.h new file mode 100644 index 000000000000..01f8163daf16 --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-4965.h @@ -0,0 +1,282 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __iwl_4965_h__ +#define __iwl_4965_h__ + +#include "iwl-dev.h" + +/* configuration for the _4965 devices */ +extern struct iwl_cfg iwl4965_cfg; + +extern struct iwl_mod_params iwl4965_mod_params; + +extern struct ieee80211_ops iwl4965_hw_ops; + +/* tx queue */ +void iwl4965_free_tfds_in_queue(struct iwl_priv *priv, + int sta_id, int tid, int freed); + +/* RXON */ +void iwl4965_set_rxon_chain(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); + +/* uCode */ +int iwl4965_verify_ucode(struct iwl_priv *priv); + +/* lib */ +void iwl4965_check_abort_status(struct iwl_priv *priv, + u8 frame_count, u32 status); + +void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq); +int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq); +int iwl4965_hw_nic_init(struct iwl_priv *priv); +int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display); + +/* rx */ +void iwl4965_rx_queue_restock(struct iwl_priv *priv); +void iwl4965_rx_replenish(struct iwl_priv *priv); +void iwl4965_rx_replenish_now(struct iwl_priv *priv); +void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq); +int iwl4965_rxq_stop(struct iwl_priv *priv); +int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band); +void iwl4965_rx_reply_rx(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +void iwl4965_rx_handle(struct iwl_priv *priv); + +/* tx */ +void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq); +int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + dma_addr_t addr, u16 len, u8 reset, u8 pad); +int iwl4965_hw_tx_queue_init(struct iwl_priv *priv, + struct iwl_tx_queue *txq); +void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags, + struct ieee80211_tx_info *info); +int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb); +int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid, u16 *ssn); +int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid); +int iwl4965_txq_check_empty(struct iwl_priv *priv, + int sta_id, u8 tid, int txq_id); +void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index); +void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv); +int iwl4965_txq_ctx_alloc(struct iwl_priv *priv); +void iwl4965_txq_ctx_reset(struct iwl_priv *priv); +void iwl4965_txq_ctx_stop(struct iwl_priv *priv); +void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask); + +/* + * Acquire priv->lock before calling this function ! + */ +void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index); +/** + * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue + * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed + * @scd_retry: (1) Indicates queue will be used in aggregation mode + * + * NOTE: Acquire priv->lock before calling this function ! + */ +void iwl4965_tx_queue_set_status(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + int tx_fifo_id, int scd_retry); + +static inline u32 iwl4965_tx_status_to_mac80211(u32 status) +{ + status &= TX_STATUS_MSK; + + switch (status) { + case TX_STATUS_SUCCESS: + case TX_STATUS_DIRECT_DONE: + return IEEE80211_TX_STAT_ACK; + case TX_STATUS_FAIL_DEST_PS: + return IEEE80211_TX_STAT_TX_FILTERED; + default: + return 0; + } +} + +static inline bool iwl4965_is_tx_success(u32 status) +{ + status &= TX_STATUS_MSK; + return (status == TX_STATUS_SUCCESS) || + (status == TX_STATUS_DIRECT_DONE); +} + +u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid); + +/* rx */ +void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +bool iwl4965_good_plcp_health(struct iwl_priv *priv, + struct iwl_rx_packet *pkt); +void iwl4965_rx_statistics(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +void iwl4965_reply_statistics(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); + +/* scan */ +int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif); + +/* station mgmt */ +int iwl4965_manage_ibss_station(struct iwl_priv *priv, + struct ieee80211_vif *vif, bool add); + +/* hcmd */ +int iwl4965_send_beacon_cmd(struct iwl_priv *priv); + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +const char *iwl4965_get_tx_fail_reason(u32 status); +#else +static inline const char * +iwl4965_get_tx_fail_reason(u32 status) { return ""; } +#endif + +/* station management */ +int iwl4965_alloc_bcast_station(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); +int iwl4965_add_bssid_station(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + const u8 *addr, u8 *sta_id_r); +int iwl4965_remove_default_wep_key(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *key); +int iwl4965_set_default_wep_key(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *key); +int iwl4965_restore_default_wep_keys(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); +int iwl4965_set_dynamic_key(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *key, u8 sta_id); +int iwl4965_remove_dynamic_key(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *key, u8 sta_id); +void iwl4965_update_tkip_key(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *keyconf, + struct ieee80211_sta *sta, u32 iv32, u16 *phase1key); +int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv, + int sta_id, int tid); +int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta, + int tid, u16 ssn); +int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta, + int tid); +void iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv, + int sta_id, int cnt); +int iwl4965_update_bcast_stations(struct iwl_priv *priv); + +/* rate */ +static inline u32 iwl4965_ant_idx_to_flags(u8 ant_idx) +{ + return BIT(ant_idx) << RATE_MCS_ANT_POS; +} + +static inline u8 iwl4965_hw_get_rate(__le32 rate_n_flags) +{ + return le32_to_cpu(rate_n_flags) & 0xFF; +} + +static inline __le32 iwl4965_hw_set_rate_n_flags(u8 rate, u32 flags) +{ + return cpu_to_le32(flags|(u32)rate); +} + +/* eeprom */ +void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac); +int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv); +void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv); +int iwl4965_eeprom_check_version(struct iwl_priv *priv); + +/* mac80211 handlers (for 4965) */ +void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb); +int iwl4965_mac_start(struct ieee80211_hw *hw); +void iwl4965_mac_stop(struct ieee80211_hw *hw); +void iwl4965_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast); +int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key); +void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_key_conf *keyconf, + struct ieee80211_sta *sta, + u32 iv32, u16 *phase1key); +int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum ieee80211_ampdu_mlme_action action, + struct ieee80211_sta *sta, u16 tid, u16 *ssn, + u8 buf_size); +int iwl4965_mac_sta_add(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +void iwl4965_mac_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_channel_switch *ch_switch); + +#endif /* __iwl_4965_h__ */ diff --git a/trunk/drivers/net/wireless/iwlegacy/commands.h b/trunk/drivers/net/wireless/iwlegacy/iwl-commands.h similarity index 79% rename from trunk/drivers/net/wireless/iwlegacy/commands.h rename to trunk/drivers/net/wireless/iwlegacy/iwl-commands.h index 25dd7d28d022..89904054473f 100644 --- a/trunk/drivers/net/wireless/iwlegacy/commands.h +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-commands.h @@ -60,96 +60,100 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ +/* + * Please use this file (iwl-commands.h) only for uCode API definitions. + * Please use iwl-xxxx-hw.h for hardware-related definitions. + * Please use iwl-dev.h for driver implementation definitions. + */ -#ifndef __il_commands_h__ -#define __il_commands_h__ - -#include +#ifndef __iwl_legacy_commands_h__ +#define __iwl_legacy_commands_h__ -struct il_priv; +struct iwl_priv; /* uCode version contains 4 values: Major/Minor/API/Serial */ -#define IL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24) -#define IL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16) -#define IL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8) -#define IL_UCODE_SERIAL(ver) ((ver) & 0x000000FF) +#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24) +#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16) +#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8) +#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF) + /* Tx rates */ -#define IL_CCK_RATES 4 -#define IL_OFDM_RATES 8 -#define IL_MAX_RATES (IL_CCK_RATES + IL_OFDM_RATES) +#define IWL_CCK_RATES 4 +#define IWL_OFDM_RATES 8 +#define IWL_MAX_RATES (IWL_CCK_RATES + IWL_OFDM_RATES) enum { - N_ALIVE = 0x1, - N_ERROR = 0x2, + REPLY_ALIVE = 0x1, + REPLY_ERROR = 0x2, /* RXON and QOS commands */ - C_RXON = 0x10, - C_RXON_ASSOC = 0x11, - C_QOS_PARAM = 0x13, - C_RXON_TIMING = 0x14, + REPLY_RXON = 0x10, + REPLY_RXON_ASSOC = 0x11, + REPLY_QOS_PARAM = 0x13, + REPLY_RXON_TIMING = 0x14, /* Multi-Station support */ - C_ADD_STA = 0x18, - C_REM_STA = 0x19, + REPLY_ADD_STA = 0x18, + REPLY_REMOVE_STA = 0x19, /* Security */ - C_WEPKEY = 0x20, + REPLY_WEPKEY = 0x20, /* RX, TX, LEDs */ - N_3945_RX = 0x1b, /* 3945 only */ - C_TX = 0x1c, - C_RATE_SCALE = 0x47, /* 3945 only */ - C_LEDS = 0x48, - C_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 */ + REPLY_3945_RX = 0x1b, /* 3945 only */ + REPLY_TX = 0x1c, + REPLY_RATE_SCALE = 0x47, /* 3945 only */ + REPLY_LEDS_CMD = 0x48, + REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */ /* 802.11h related */ - C_CHANNEL_SWITCH = 0x72, - N_CHANNEL_SWITCH = 0x73, - C_SPECTRUM_MEASUREMENT = 0x74, - N_SPECTRUM_MEASUREMENT = 0x75, + REPLY_CHANNEL_SWITCH = 0x72, + CHANNEL_SWITCH_NOTIFICATION = 0x73, + REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74, + SPECTRUM_MEASURE_NOTIFICATION = 0x75, /* Power Management */ - C_POWER_TBL = 0x77, - N_PM_SLEEP = 0x7A, - N_PM_DEBUG_STATS = 0x7B, + POWER_TABLE_CMD = 0x77, + PM_SLEEP_NOTIFICATION = 0x7A, + PM_DEBUG_STATISTIC_NOTIFIC = 0x7B, /* Scan commands and notifications */ - C_SCAN = 0x80, - C_SCAN_ABORT = 0x81, - N_SCAN_START = 0x82, - N_SCAN_RESULTS = 0x83, - N_SCAN_COMPLETE = 0x84, + REPLY_SCAN_CMD = 0x80, + REPLY_SCAN_ABORT_CMD = 0x81, + SCAN_START_NOTIFICATION = 0x82, + SCAN_RESULTS_NOTIFICATION = 0x83, + SCAN_COMPLETE_NOTIFICATION = 0x84, /* IBSS/AP commands */ - N_BEACON = 0x90, - C_TX_BEACON = 0x91, + BEACON_NOTIFICATION = 0x90, + REPLY_TX_BEACON = 0x91, /* Miscellaneous commands */ - C_TX_PWR_TBL = 0x97, + REPLY_TX_PWR_TABLE_CMD = 0x97, /* Bluetooth device coexistence config command */ - C_BT_CONFIG = 0x9b, + REPLY_BT_CONFIG = 0x9b, /* Statistics */ - C_STATS = 0x9c, - N_STATS = 0x9d, + REPLY_STATISTICS_CMD = 0x9c, + STATISTICS_NOTIFICATION = 0x9d, /* RF-KILL commands and notifications */ - N_CARD_STATE = 0xa1, + CARD_STATE_NOTIFICATION = 0xa1, /* Missed beacons notification */ - N_MISSED_BEACONS = 0xa2, + MISSED_BEACONS_NOTIFICATION = 0xa2, - C_CT_KILL_CONFIG = 0xa4, - C_SENSITIVITY = 0xa8, - C_PHY_CALIBRATION = 0xb0, - N_RX_PHY = 0xc0, - N_RX_MPDU = 0xc1, - N_RX = 0xc3, - N_COMPRESSED_BA = 0xc5, + REPLY_CT_KILL_CONFIG_CMD = 0xa4, + SENSITIVITY_CMD = 0xa8, + REPLY_PHY_CALIBRATION_CMD = 0xb0, + REPLY_RX_PHY_CMD = 0xc0, + REPLY_RX_MPDU_CMD = 0xc1, + REPLY_RX = 0xc3, + REPLY_COMPRESSED_BA = 0xc5, - IL_CN_MAX = 0xff + REPLY_MAX = 0xff }; /****************************************************************************** @@ -159,25 +163,25 @@ enum { * *****************************************************************************/ -/* il_cmd_header flags value */ -#define IL_CMD_FAILED_MSK 0x40 +/* iwl_cmd_header flags value */ +#define IWL_CMD_FAILED_MSK 0x40 #define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f) #define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8) -#define SEQ_TO_IDX(s) ((s) & 0xff) -#define IDX_TO_SEQ(i) ((i) & 0xff) +#define SEQ_TO_INDEX(s) ((s) & 0xff) +#define INDEX_TO_SEQ(i) ((i) & 0xff) #define SEQ_HUGE_FRAME cpu_to_le16(0x4000) #define SEQ_RX_FRAME cpu_to_le16(0x8000) /** - * struct il_cmd_header + * struct iwl_cmd_header * * This header format appears in the beginning of each command sent from the * driver, and each response/notification received from uCode. */ -struct il_cmd_header { - u8 cmd; /* Command ID: C_RXON, etc. */ - u8 flags; /* 0:5 reserved, 6 abort, 7 internal */ +struct iwl_cmd_header { + u8 cmd; /* Command ID: REPLY_RXON, etc. */ + u8 flags; /* 0:5 reserved, 6 abort, 7 internal */ /* * The driver sets up the sequence number to values of its choosing. * uCode does not use this value, but passes it back to the driver @@ -188,28 +192,29 @@ struct il_cmd_header { * There is one exception: uCode sets bit 15 when it originates * the response/notification, i.e. when the response/notification * is not a direct response to a command sent by the driver. For - * example, uCode issues N_3945_RX when it sends a received frame + * example, uCode issues REPLY_3945_RX when it sends a received frame * to the driver; it is not a direct response to any driver command. * * The Linux driver uses the following format: * - * 0:7 tfd idx - position within TX queue - * 8:12 TX queue id - * 13 reserved - * 14 huge - driver sets this to indicate command is in the - * 'huge' storage at the end of the command buffers - * 15 unsolicited RX or uCode-originated notification - */ + * 0:7 tfd index - position within TX queue + * 8:12 TX queue id + * 13 reserved + * 14 huge - driver sets this to indicate command is in the + * 'huge' storage at the end of the command buffers + * 15 unsolicited RX or uCode-originated notification + */ __le16 sequence; /* command or response/notification data follows immediately */ u8 data[0]; } __packed; + /** - * struct il3945_tx_power + * struct iwl3945_tx_power * - * Used in C_TX_PWR_TBL, C_SCAN, C_CHANNEL_SWITCH + * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_SCAN_CMD, REPLY_CHANNEL_SWITCH * * Each entry contains two values: * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained @@ -218,21 +223,21 @@ struct il_cmd_header { * 2) Radio gain. This sets the analog gain of the radio Tx path. * It is a coarser setting, and behaves in a logarithmic (dB) fashion. * - * Driver obtains values from struct il3945_tx_power power_gain_table[][]. + * Driver obtains values from struct iwl3945_tx_power power_gain_table[][]. */ -struct il3945_tx_power { +struct iwl3945_tx_power { u8 tx_gain; /* gain for analog radio */ u8 dsp_atten; /* gain for DSP */ } __packed; /** - * struct il3945_power_per_rate + * struct iwl3945_power_per_rate * - * Used in C_TX_PWR_TBL, C_CHANNEL_SWITCH + * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH */ -struct il3945_power_per_rate { +struct iwl3945_power_per_rate { u8 rate; /* plcp */ - struct il3945_tx_power tpc; + struct iwl3945_tx_power tpc; u8 reserved; } __packed; @@ -240,10 +245,10 @@ struct il3945_power_per_rate { * iwl4965 rate_n_flags bit fields * * rate_n_flags format is used in following iwl4965 commands: - * N_RX (response only) - * N_RX_MPDU (response only) - * C_TX (both command and response) - * C_TX_LINK_QUALITY_CMD + * REPLY_RX (response only) + * REPLY_RX_MPDU (response only) + * REPLY_TX (both command and response) + * REPLY_TX_LINK_QUALITY_CMD * * High-throughput (HT) rate format for bits 7:0 (bit 8 must be "1"): * 2-0: 0) 6 Mbps @@ -321,17 +326,17 @@ struct il3945_power_per_rate { #define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | RATE_MCS_ANT_C_MSK) #define RATE_ANT_NUM 3 -#define POWER_TBL_NUM_ENTRIES 33 -#define POWER_TBL_NUM_HT_OFDM_ENTRIES 32 -#define POWER_TBL_CCK_ENTRY 32 +#define POWER_TABLE_NUM_ENTRIES 33 +#define POWER_TABLE_NUM_HT_OFDM_ENTRIES 32 +#define POWER_TABLE_CCK_ENTRY 32 -#define IL_PWR_NUM_HT_OFDM_ENTRIES 24 -#define IL_PWR_CCK_ENTRIES 2 +#define IWL_PWR_NUM_HT_OFDM_ENTRIES 24 +#define IWL_PWR_CCK_ENTRIES 2 /** - * union il4965_tx_power_dual_stream + * union iwl4965_tx_power_dual_stream * - * Host format used for C_TX_PWR_TBL, C_CHANNEL_SWITCH + * Host format used for REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH * Use __le32 version (struct tx_power_dual_stream) when building command. * * Driver provides radio gain and DSP attenuation settings to device in pairs, @@ -342,9 +347,9 @@ struct il3945_power_per_rate { * For MIMO rates, one value may be different from the other, * in order to balance the Tx output between the two transmitters. * - * See more details in doc for TXPOWER in 4965.h. + * See more details in doc for TXPOWER in iwl-4965-hw.h. */ -union il4965_tx_power_dual_stream { +union iwl4965_tx_power_dual_stream { struct { u8 radio_tx_gain[2]; u8 dsp_predis_atten[2]; @@ -355,21 +360,21 @@ union il4965_tx_power_dual_stream { /** * struct tx_power_dual_stream * - * Table entries in C_TX_PWR_TBL, C_CHANNEL_SWITCH + * Table entries in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH * - * Same format as il_tx_power_dual_stream, but __le32 + * Same format as iwl_tx_power_dual_stream, but __le32 */ struct tx_power_dual_stream { __le32 dw; } __packed; /** - * struct il4965_tx_power_db + * struct iwl4965_tx_power_db * - * Entire table within C_TX_PWR_TBL, C_CHANNEL_SWITCH + * Entire table within REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH */ -struct il4965_tx_power_db { - struct tx_power_dual_stream power_tbl[POWER_TBL_NUM_ENTRIES]; +struct iwl4965_tx_power_db { + struct tx_power_dual_stream power_tbl[POWER_TABLE_NUM_ENTRIES]; } __packed; /****************************************************************************** @@ -382,7 +387,7 @@ struct il4965_tx_power_db { #define INITIALIZE_SUBTYPE (9) /* - * ("Initialize") N_ALIVE = 0x1 (response only, not a command) + * ("Initialize") REPLY_ALIVE = 0x1 (response only, not a command) * * uCode issues this "initialize alive" notification once the initialization * uCode image has completed its work, and is ready to load the runtime image. @@ -405,7 +410,7 @@ struct il4965_tx_power_db { * 3) Tx gain compensation to balance 4965's 2 Tx chains for MIMO operation, * for each of 5 frequency ranges. */ -struct il_init_alive_resp { +struct iwl_init_alive_resp { u8 ucode_minor; u8 ucode_major; __le16 reserved1; @@ -428,8 +433,9 @@ struct il_init_alive_resp { * 2 Tx chains */ } __packed; + /** - * N_ALIVE = 0x1 (response only, not a command) + * REPLY_ALIVE = 0x1 (response only, not a command) * * uCode issues this "alive" notification once the runtime image is ready * to receive commands from the driver. This is the *second* "alive" @@ -448,7 +454,7 @@ struct il_init_alive_resp { * __le32 log_size; log capacity (in number of entries) * __le32 type; (1) timestamp with each entry, (0) no timestamp * __le32 wraps; # times uCode has wrapped to top of circular buffer - * __le32 write_idx; next circular buffer entry that uCode would fill + * __le32 write_index; next circular buffer entry that uCode would fill * * The header is followed by the circular buffer of log entries. Entries * with timestamps have the following format: @@ -505,13 +511,13 @@ struct il_init_alive_resp { * The Linux driver can print both logs to the system log when a uCode error * occurs. */ -struct il_alive_resp { +struct iwl_alive_resp { u8 ucode_minor; u8 ucode_major; __le16 reserved1; u8 sw_rev[8]; u8 ver_type; - u8 ver_subtype; /* not "9" for runtime alive */ + u8 ver_subtype; /* not "9" for runtime alive */ __le16 reserved2; __le32 log_event_table_ptr; /* SRAM address for event log */ __le32 error_event_table_ptr; /* SRAM address for error log */ @@ -520,9 +526,9 @@ struct il_alive_resp { } __packed; /* - * N_ERROR = 0x2 (response only, not a command) + * REPLY_ERROR = 0x2 (response only, not a command) */ -struct il_error_resp { +struct iwl_error_resp { __le32 error_type; u8 cmd_id; u8 reserved1; @@ -548,6 +554,7 @@ enum { RXON_DEV_TYPE_SNIFFER = 6, }; + #define RXON_RX_CHAIN_DRIVER_FORCE_MSK cpu_to_le16(0x1 << 0) #define RXON_RX_CHAIN_DRIVER_FORCE_POS (0) #define RXON_RX_CHAIN_VALID_MSK cpu_to_le16(0x7 << 1) @@ -586,6 +593,7 @@ enum { * (according to ON_AIR deassertion) */ #define RXON_FLG_TSF2HOST_MSK cpu_to_le32(1 << 15) + /* HT flags */ #define RXON_FLG_CTRL_CHANNEL_LOC_POS (22) #define RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK cpu_to_le32(0x1 << 22) @@ -632,7 +640,7 @@ enum { #define RXON_FILTER_BCON_AWARE_MSK cpu_to_le32(1 << 6) /** - * C_RXON = 0x10 (command, has simple generic response) + * REPLY_RXON = 0x10 (command, has simple generic response) * * RXON tunes the radio tuner to a service channel, and sets up a number * of parameters that are used primarily for Rx, but also for Tx operations. @@ -645,11 +653,11 @@ enum { * channel. * * NOTE: All RXONs wipe clean the internal txpower table. Driver must - * issue a new C_TX_PWR_TBL after each C_RXON (0x10), + * issue a new REPLY_TX_PWR_TABLE_CMD after each REPLY_RXON (0x10), * regardless of whether RXON_FILTER_ASSOC_MSK is set. */ -struct il3945_rxon_cmd { +struct iwl3945_rxon_cmd { u8 node_addr[6]; __le16 reserved1; u8 bssid_addr[6]; @@ -668,7 +676,7 @@ struct il3945_rxon_cmd { __le16 reserved5; } __packed; -struct il4965_rxon_cmd { +struct iwl4965_rxon_cmd { u8 node_addr[6]; __le16 reserved1; u8 bssid_addr[6]; @@ -691,7 +699,7 @@ struct il4965_rxon_cmd { /* Create a common rxon cmd which will be typecast into the 3945 or 4965 * specific rxon cmd, depending on where it is called from. */ -struct il_rxon_cmd { +struct iwl_legacy_rxon_cmd { u8 node_addr[6]; __le16 reserved1; u8 bssid_addr[6]; @@ -713,10 +721,11 @@ struct il_rxon_cmd { u8 reserved5; } __packed; + /* - * C_RXON_ASSOC = 0x11 (command, has simple generic response) + * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response) */ -struct il3945_rxon_assoc_cmd { +struct iwl3945_rxon_assoc_cmd { __le32 flags; __le32 filter_flags; u8 ofdm_basic_rates; @@ -724,7 +733,7 @@ struct il3945_rxon_assoc_cmd { __le16 reserved; } __packed; -struct il4965_rxon_assoc_cmd { +struct iwl4965_rxon_assoc_cmd { __le32 flags; __le32 filter_flags; u8 ofdm_basic_rates; @@ -735,17 +744,17 @@ struct il4965_rxon_assoc_cmd { __le16 reserved; } __packed; -#define IL_CONN_MAX_LISTEN_INTERVAL 10 -#define IL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */ -#define IL39_MAX_UCODE_BEACON_INTERVAL 1 /* 1024 */ +#define IWL_CONN_MAX_LISTEN_INTERVAL 10 +#define IWL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */ +#define IWL39_MAX_UCODE_BEACON_INTERVAL 1 /* 1024 */ /* - * C_RXON_TIMING = 0x14 (command, has simple generic response) + * REPLY_RXON_TIMING = 0x14 (command, has simple generic response) */ -struct il_rxon_time_cmd { +struct iwl_rxon_time_cmd { __le64 timestamp; __le16 beacon_interval; - __le16 atim_win; + __le16 atim_window; __le32 beacon_init_val; __le16 listen_interval; u8 dtim_period; @@ -753,32 +762,32 @@ struct il_rxon_time_cmd { } __packed; /* - * C_CHANNEL_SWITCH = 0x72 (command, has simple generic response) + * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response) */ -struct il3945_channel_switch_cmd { +struct iwl3945_channel_switch_cmd { u8 band; u8 expect_beacon; __le16 channel; __le32 rxon_flags; __le32 rxon_filter_flags; __le32 switch_time; - struct il3945_power_per_rate power[IL_MAX_RATES]; + struct iwl3945_power_per_rate power[IWL_MAX_RATES]; } __packed; -struct il4965_channel_switch_cmd { +struct iwl4965_channel_switch_cmd { u8 band; u8 expect_beacon; __le16 channel; __le32 rxon_flags; __le32 rxon_filter_flags; __le32 switch_time; - struct il4965_tx_power_db tx_power; + struct iwl4965_tx_power_db tx_power; } __packed; /* - * N_CHANNEL_SWITCH = 0x73 (notification only, not a command) + * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command) */ -struct il_csa_notification { +struct iwl_csa_notification { __le16 band; __le16 channel; __le32 status; /* 0 - OK, 1 - fail */ @@ -791,22 +800,22 @@ struct il_csa_notification { *****************************************************************************/ /** - * struct il_ac_qos -- QOS timing params for C_QOS_PARAM - * One for each of 4 EDCA access categories in struct il_qosparam_cmd + * struct iwl_ac_qos -- QOS timing params for REPLY_QOS_PARAM + * One for each of 4 EDCA access categories in struct iwl_qosparam_cmd * - * @cw_min: Contention win, start value in numbers of slots. + * @cw_min: Contention window, start value in numbers of slots. * Should be a power-of-2, minus 1. Device's default is 0x0f. - * @cw_max: Contention win, max value in numbers of slots. + * @cw_max: Contention window, max value in numbers of slots. * Should be a power-of-2, minus 1. Device's default is 0x3f. * @aifsn: Number of slots in Arbitration Interframe Space (before * performing random backoff timing prior to Tx). Device default 1. * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0. * - * Device will automatically increase contention win by (2*CW) + 1 for each + * Device will automatically increase contention window by (2*CW) + 1 for each * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW * value, to cap the CW value. */ -struct il_ac_qos { +struct iwl_ac_qos { __le16 cw_min; __le16 cw_max; u8 aifsn; @@ -823,14 +832,14 @@ struct il_ac_qos { #define AC_NUM 4 /* - * C_QOS_PARAM = 0x13 (command, has simple generic response) + * REPLY_QOS_PARAM = 0x13 (command, has simple generic response) * * This command sets up timings for each of the 4 prioritized EDCA Tx FIFOs * 0: Background, 1: Best Effort, 2: Video, 3: Voice. */ -struct il_qosparam_cmd { +struct iwl_qosparam_cmd { __le32 qos_flags; - struct il_ac_qos ac[AC_NUM]; + struct iwl_ac_qos ac[AC_NUM]; } __packed; /****************************************************************************** @@ -843,15 +852,15 @@ struct il_qosparam_cmd { */ /* Special, dedicated locations within device's station table */ -#define IL_AP_ID 0 -#define IL_STA_ID 2 -#define IL3945_BROADCAST_ID 24 -#define IL3945_STATION_COUNT 25 -#define IL4965_BROADCAST_ID 31 -#define IL4965_STATION_COUNT 32 +#define IWL_AP_ID 0 +#define IWL_STA_ID 2 +#define IWL3945_BROADCAST_ID 24 +#define IWL3945_STATION_COUNT 25 +#define IWL4965_BROADCAST_ID 31 +#define IWL4965_STATION_COUNT 32 -#define IL_STATION_COUNT 32 /* MAX(3945,4965) */ -#define IL_INVALID_STATION 255 +#define IWL_STATION_COUNT 32 /* MAX(3945,4965)*/ +#define IWL_INVALID_STATION 255 #define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2) #define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8) @@ -892,11 +901,11 @@ struct il_qosparam_cmd { #define STA_MODIFY_DELBA_TID_MSK 0x10 #define STA_MODIFY_SLEEP_TX_COUNT_MSK 0x20 -/* Receiver address (actually, Rx station's idx into station table), +/* Receiver address (actually, Rx station's index into station table), * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ #define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) -struct il4965_keyinfo { +struct iwl4965_keyinfo { __le16 key_flags; u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */ u8 reserved1; @@ -909,12 +918,12 @@ struct il4965_keyinfo { /** * struct sta_id_modify * @addr[ETH_ALEN]: station's MAC address - * @sta_id: idx of station in uCode's station table + * @sta_id: index of station in uCode's station table * @modify_mask: STA_MODIFY_*, 1: modify, 0: don't change * - * Driver selects unused table idx when adding new station, - * or the idx to a pre-existing station entry when modifying that station. - * Some idxes have special purposes (IL_AP_ID, idx 0, is for AP). + * Driver selects unused table index when adding new station, + * or the index to a pre-existing station entry when modifying that station. + * Some indexes have special purposes (IWL_AP_ID, index 0, is for AP). * * modify_mask flags select which parameters to modify vs. leave alone. */ @@ -927,15 +936,15 @@ struct sta_id_modify { } __packed; /* - * C_ADD_STA = 0x18 (command) + * REPLY_ADD_STA = 0x18 (command) * * The device contains an internal table of per-station information, * with info on security keys, aggregation parameters, and Tx rates for * initial Tx attempt and any retries (4965 devices uses - * C_TX_LINK_QUALITY_CMD, - * 3945 uses C_RATE_SCALE to set up rate tables). + * REPLY_TX_LINK_QUALITY_CMD, + * 3945 uses REPLY_RATE_SCALE to set up rate tables). * - * C_ADD_STA sets up the table entry for one station, either creating + * REPLY_ADD_STA sets up the table entry for one station, either creating * a new entry, or modifying a pre-existing one. * * NOTE: RXON command (without "associated" bit set) wipes the station table @@ -945,20 +954,20 @@ struct sta_id_modify { * their own txpower/rate setup data). * * When getting started on a new channel, driver must set up the - * IL_BROADCAST_ID entry (last entry in the table). For a client + * IWL_BROADCAST_ID entry (last entry in the table). For a client * station in a BSS, once an AP is selected, driver sets up the AP STA - * in the IL_AP_ID entry (1st entry in the table). BROADCAST and AP + * in the IWL_AP_ID entry (1st entry in the table). BROADCAST and AP * are all that are needed for a BSS client station. If the device is * used as AP, or in an IBSS network, driver must set up station table - * entries for all STAs in network, starting with idx IL_STA_ID. + * entries for all STAs in network, starting with index IWL_STA_ID. */ -struct il3945_addsta_cmd { +struct iwl3945_addsta_cmd { u8 mode; /* 1: modify existing, 0: add new station */ u8 reserved[3]; struct sta_id_modify sta; - struct il4965_keyinfo key; - __le32 station_flags; /* STA_FLG_* */ + struct iwl4965_keyinfo key; + __le32 station_flags; /* STA_FLG_* */ __le32 station_flags_msk; /* STA_FLG_* */ /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID) @@ -981,12 +990,12 @@ struct il3945_addsta_cmd { __le16 add_immediate_ba_ssn; } __packed; -struct il4965_addsta_cmd { +struct iwl4965_addsta_cmd { u8 mode; /* 1: modify existing, 0: add new station */ u8 reserved[3]; struct sta_id_modify sta; - struct il4965_keyinfo key; - __le32 station_flags; /* STA_FLG_* */ + struct iwl4965_keyinfo key; + __le32 station_flags; /* STA_FLG_* */ __le32 station_flags_msk; /* STA_FLG_* */ /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID) @@ -994,7 +1003,7 @@ struct il4965_addsta_cmd { * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */ __le16 tid_disable_tx; - __le16 reserved1; + __le16 reserved1; /* TID for which to add block-ack support. * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ @@ -1019,12 +1028,12 @@ struct il4965_addsta_cmd { } __packed; /* Wrapper struct for 3945 and 4965 addsta_cmd structures */ -struct il_addsta_cmd { +struct iwl_legacy_addsta_cmd { u8 mode; /* 1: modify existing, 0: add new station */ u8 reserved[3]; struct sta_id_modify sta; - struct il4965_keyinfo key; - __le32 station_flags; /* STA_FLG_* */ + struct iwl4965_keyinfo key; + __le32 station_flags; /* STA_FLG_* */ __le32 station_flags_msk; /* STA_FLG_* */ /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID) @@ -1032,7 +1041,7 @@ struct il_addsta_cmd { * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */ __le16 tid_disable_tx; - __le16 rate_n_flags; /* 3945 only */ + __le16 rate_n_flags; /* 3945 only */ /* TID for which to add block-ack support. * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ @@ -1056,50 +1065,51 @@ struct il_addsta_cmd { __le16 reserved2; } __packed; + #define ADD_STA_SUCCESS_MSK 0x1 -#define ADD_STA_NO_ROOM_IN_TBL 0x2 +#define ADD_STA_NO_ROOM_IN_TABLE 0x2 #define ADD_STA_NO_BLOCK_ACK_RESOURCE 0x4 #define ADD_STA_MODIFY_NON_EXIST_STA 0x8 /* - * C_ADD_STA = 0x18 (response) + * REPLY_ADD_STA = 0x18 (response) */ -struct il_add_sta_resp { - u8 status; /* ADD_STA_* */ +struct iwl_add_sta_resp { + u8 status; /* ADD_STA_* */ } __packed; #define REM_STA_SUCCESS_MSK 0x1 /* - * C_REM_STA = 0x19 (response) + * REPLY_REM_STA = 0x19 (response) */ -struct il_rem_sta_resp { +struct iwl_rem_sta_resp { u8 status; } __packed; /* - * C_REM_STA = 0x19 (command) + * REPLY_REM_STA = 0x19 (command) */ -struct il_rem_sta_cmd { - u8 num_sta; /* number of removed stations */ +struct iwl_rem_sta_cmd { + u8 num_sta; /* number of removed stations */ u8 reserved[3]; - u8 addr[ETH_ALEN]; /* MAC addr of the first station */ + u8 addr[ETH_ALEN]; /* MAC addr of the first station */ u8 reserved2[2]; } __packed; -#define IL_TX_FIFO_BK_MSK cpu_to_le32(BIT(0)) -#define IL_TX_FIFO_BE_MSK cpu_to_le32(BIT(1)) -#define IL_TX_FIFO_VI_MSK cpu_to_le32(BIT(2)) -#define IL_TX_FIFO_VO_MSK cpu_to_le32(BIT(3)) -#define IL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00) +#define IWL_TX_FIFO_BK_MSK cpu_to_le32(BIT(0)) +#define IWL_TX_FIFO_BE_MSK cpu_to_le32(BIT(1)) +#define IWL_TX_FIFO_VI_MSK cpu_to_le32(BIT(2)) +#define IWL_TX_FIFO_VO_MSK cpu_to_le32(BIT(3)) +#define IWL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00) -#define IL_DROP_SINGLE 0 -#define IL_DROP_SELECTED 1 -#define IL_DROP_ALL 2 +#define IWL_DROP_SINGLE 0 +#define IWL_DROP_SELECTED 1 +#define IWL_DROP_ALL 2 /* * REPLY_WEP_KEY = 0x20 */ -struct il_wep_key { - u8 key_idx; +struct iwl_wep_key { + u8 key_index; u8 key_offset; u8 reserved1[2]; u8 key_size; @@ -1107,12 +1117,12 @@ struct il_wep_key { u8 key[16]; } __packed; -struct il_wep_cmd { +struct iwl_wep_cmd { u8 num_keys; u8 global_key_type; u8 flags; u8 reserved; - struct il_wep_key key[0]; + struct iwl_wep_key key[0]; } __packed; #define WEP_KEY_WEP_TYPE 1 @@ -1158,7 +1168,8 @@ struct il_wep_cmd { #define RX_MPDU_RES_STATUS_TTAK_OK (1 << 7) #define RX_MPDU_RES_STATUS_DEC_DONE_MSK (0x800) -struct il3945_rx_frame_stats { + +struct iwl3945_rx_frame_stats { u8 phy_count; u8 id; u8 rssi; @@ -1168,7 +1179,7 @@ struct il3945_rx_frame_stats { u8 payload[0]; } __packed; -struct il3945_rx_frame_hdr { +struct iwl3945_rx_frame_hdr { __le16 channel; __le16 phy_flags; u8 reserved1; @@ -1177,71 +1188,73 @@ struct il3945_rx_frame_hdr { u8 payload[0]; } __packed; -struct il3945_rx_frame_end { +struct iwl3945_rx_frame_end { __le32 status; __le64 timestamp; __le32 beacon_timestamp; } __packed; /* - * N_3945_RX = 0x1b (response only, not a command) + * REPLY_3945_RX = 0x1b (response only, not a command) * * NOTE: DO NOT dereference from casts to this structure * It is provided only for calculating minimum data set size. * The actual offsets of the hdr and end are dynamic based on * stats.phy_count */ -struct il3945_rx_frame { - struct il3945_rx_frame_stats stats; - struct il3945_rx_frame_hdr hdr; - struct il3945_rx_frame_end end; +struct iwl3945_rx_frame { + struct iwl3945_rx_frame_stats stats; + struct iwl3945_rx_frame_hdr hdr; + struct iwl3945_rx_frame_end end; } __packed; -#define IL39_RX_FRAME_SIZE (4 + sizeof(struct il3945_rx_frame)) +#define IWL39_RX_FRAME_SIZE (4 + sizeof(struct iwl3945_rx_frame)) /* Fixed (non-configurable) rx data from phy */ -#define IL49_RX_RES_PHY_CNT 14 -#define IL49_RX_PHY_FLAGS_ANTENNAE_OFFSET (4) -#define IL49_RX_PHY_FLAGS_ANTENNAE_MASK (0x70) -#define IL49_AGC_DB_MASK (0x3f80) /* MASK(7,13) */ -#define IL49_AGC_DB_POS (7) -struct il4965_rx_non_cfg_phy { +#define IWL49_RX_RES_PHY_CNT 14 +#define IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET (4) +#define IWL49_RX_PHY_FLAGS_ANTENNAE_MASK (0x70) +#define IWL49_AGC_DB_MASK (0x3f80) /* MASK(7,13) */ +#define IWL49_AGC_DB_POS (7) +struct iwl4965_rx_non_cfg_phy { __le16 ant_selection; /* ant A bit 4, ant B bit 5, ant C bit 6 */ __le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */ u8 rssi_info[6]; /* we use even entries, 0/2/4 for A/B/C rssi */ u8 pad[0]; } __packed; + /* - * N_RX = 0xc3 (response only, not a command) + * REPLY_RX = 0xc3 (response only, not a command) * Used only for legacy (non 11n) frames. */ -struct il_rx_phy_res { - u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */ +struct iwl_rx_phy_res { + u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */ u8 cfg_phy_cnt; /* configurable DSP phy data byte count */ u8 stat_id; /* configurable DSP phy data set ID */ u8 reserved1; __le64 timestamp; /* TSF at on air rise */ - __le32 beacon_time_stamp; /* beacon at on-air rise */ + __le32 beacon_time_stamp; /* beacon at on-air rise */ __le16 phy_flags; /* general phy flags: band, modulation, ... */ __le16 channel; /* channel number */ - u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */ + u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */ __le32 rate_n_flags; /* RATE_MCS_* */ __le16 byte_count; /* frame's byte-count */ __le16 frame_time; /* frame's time on the air */ } __packed; -struct il_rx_mpdu_res_start { +struct iwl_rx_mpdu_res_start { __le16 byte_count; __le16 reserved; } __packed; + /****************************************************************************** * (5) * Tx Commands & Responses: * - * Driver must place each C_TX command into one of the prioritized Tx + * Driver must place each REPLY_TX command into one of the prioritized Tx * queues in host DRAM, shared between driver and device (see comments for * SCD registers and Tx/Rx Queues). When the device's Tx scheduler and uCode * are preparing to transmit, the device pulls the Tx command over the PCI @@ -1251,18 +1264,18 @@ struct il_rx_mpdu_res_start { * uCode handles all timing and protocol related to control frames * (RTS/CTS/ACK), based on flags in the Tx command. uCode and Tx scheduler * handle reception of block-acks; uCode updates the host driver via - * N_COMPRESSED_BA. + * REPLY_COMPRESSED_BA. * * uCode handles retrying Tx when an ACK is expected but not received. * This includes trying lower data rates than the one requested in the Tx - * command, as set up by the C_RATE_SCALE (for 3945) or - * C_TX_LINK_QUALITY_CMD (4965). + * command, as set up by the REPLY_RATE_SCALE (for 3945) or + * REPLY_TX_LINK_QUALITY_CMD (4965). * - * Driver sets up transmit power for various rates via C_TX_PWR_TBL. + * Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD. * This command must be executed after every RXON command, before Tx can occur. *****************************************************************************/ -/* C_TX Tx flags field */ +/* REPLY_TX Tx flags field */ /* * 1: Use Request-To-Send protocol before this frame. @@ -1283,8 +1296,8 @@ struct il_rx_mpdu_res_start { #define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3) /* For 4965 devices: - * 1: Use rate scale table (see C_TX_LINK_QUALITY_CMD). - * Tx command's initial_rate_idx indicates first rate to try; + * 1: Use rate scale table (see REPLY_TX_LINK_QUALITY_CMD). + * Tx command's initial_rate_index indicates first rate to try; * uCode walks through table for additional Tx attempts. * 0: Use Tx rate/MCS from Tx command's rate_n_flags field. * This rate will be used for all Tx attempts; it will not be scaled. */ @@ -1309,7 +1322,7 @@ struct il_rx_mpdu_res_start { /* 1: uCode overrides sequence control field in MAC header. * 0: Driver provides sequence control field in MAC header. * Set this for management frames, non-QOS data frames, non-unicast frames, - * and also in Tx command embedded in C_SCAN for active scans. */ + * and also in Tx command embedded in REPLY_SCAN_CMD for active scans. */ #define TX_CMD_FLG_SEQ_CTL_MSK cpu_to_le32(1 << 13) /* 1: This frame is non-last MPDU; more fragments are coming. @@ -1336,6 +1349,7 @@ struct il_rx_mpdu_res_start { /* HCCA-AP - disable duration overwriting. */ #define TX_CMD_FLG_DUR_MSK cpu_to_le32(1 << 25) + /* * TX command security control */ @@ -1355,10 +1369,10 @@ struct il_rx_mpdu_res_start { #define TKIP_ICV_LEN 4 /* - * C_TX = 0x1c (command) + * REPLY_TX = 0x1c (command) */ -struct il3945_tx_cmd { +struct iwl3945_tx_cmd { /* * MPDU byte count: * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size, @@ -1420,9 +1434,9 @@ struct il3945_tx_cmd { } __packed; /* - * C_TX = 0x1c (response) + * REPLY_TX = 0x1c (response) */ -struct il3945_tx_resp { +struct iwl3945_tx_resp { u8 failure_rts; u8 failure_frame; u8 bt_kill_count; @@ -1431,18 +1445,19 @@ struct il3945_tx_resp { __le32 status; /* TX status */ } __packed; + /* * 4965 uCode updates these Tx attempt count values in host DRAM. * Used for managing Tx retries when expecting block-acks. * Driver should set these fields to 0. */ -struct il_dram_scratch { +struct iwl_dram_scratch { u8 try_cnt; /* Tx attempts */ u8 bt_kill_cnt; /* Tx attempts blocked by Bluetooth device */ __le16 reserved; } __packed; -struct il_tx_cmd { +struct iwl_tx_cmd { /* * MPDU byte count: * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size, @@ -1466,7 +1481,7 @@ struct il_tx_cmd { /* uCode may modify this field of the Tx command (in host DRAM!). * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */ - struct il_dram_scratch scratch; + struct iwl_dram_scratch scratch; /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */ __le32 rate_n_flags; /* RATE_MCS_* */ @@ -1478,13 +1493,13 @@ struct il_tx_cmd { u8 sec_ctl; /* TX_CMD_SEC_* */ /* - * Index into rate table (see C_TX_LINK_QUALITY_CMD) for initial + * Index into rate table (see REPLY_TX_LINK_QUALITY_CMD) for initial * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for * data frames, this field may be used to selectively reduce initial * rate (via non-0 value) for special frames (e.g. management), while * still supporting rate scaling for all frames. */ - u8 initial_rate_idx; + u8 initial_rate_index; u8 reserved; u8 key[16]; __le16 next_frame_flags; @@ -1613,12 +1628,12 @@ enum { }; enum { - TX_STATUS_MSK = 0x000000ff, /* bits 0:7 */ + TX_STATUS_MSK = 0x000000ff, /* bits 0:7 */ TX_STATUS_DELAY_MSK = 0x00000040, TX_STATUS_ABORT_MSK = 0x00000080, TX_PACKET_MODE_MSK = 0x0000ff00, /* bits 8:15 */ TX_FIFO_NUMBER_MSK = 0x00070000, /* bits 16:18 */ - TX_RESERVED = 0x00780000, /* bits 19:22 */ + TX_RESERVED = 0x00780000, /* bits 19:22 */ TX_POWER_PA_DETECT_MSK = 0x7f800000, /* bits 23:30 */ TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */ }; @@ -1656,7 +1671,7 @@ enum { #define AGG_TX_STATE_SEQ_NUM_MSK 0xffff0000 /* - * C_TX = 0x1c (response) + * REPLY_TX = 0x1c (response) * * This response may be in one of two slightly different formats, indicated * by the frame_count field: @@ -1682,7 +1697,7 @@ struct agg_tx_status { __le16 sequence; } __packed; -struct il4965_tx_resp { +struct iwl4965_tx_resp { u8 frame_count; /* 1 no aggregation, >1 aggregation */ u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */ u8 failure_rts; /* # failures due to unsuccessful RTS */ @@ -1715,16 +1730,16 @@ struct il4965_tx_resp { */ union { __le32 status; - struct agg_tx_status agg_status[0]; /* for each agg frame */ + struct agg_tx_status agg_status[0]; /* for each agg frame */ } u; } __packed; /* - * N_COMPRESSED_BA = 0xc5 (response only, not a command) + * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command) * * Reports Block-Acknowledge from recipient station */ -struct il_compressed_ba_resp { +struct iwl_compressed_ba_resp { __le32 sta_addr_lo32; __le16 sta_addr_hi16; __le16 reserved; @@ -1739,29 +1754,30 @@ struct il_compressed_ba_resp { } __packed; /* - * C_TX_PWR_TBL = 0x97 (command, has simple generic response) + * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response) * - * See details under "TXPOWER" in 4965.h. + * See details under "TXPOWER" in iwl-4965-hw.h. */ -struct il3945_txpowertable_cmd { +struct iwl3945_txpowertable_cmd { u8 band; /* 0: 5 GHz, 1: 2.4 GHz */ u8 reserved; __le16 channel; - struct il3945_power_per_rate power[IL_MAX_RATES]; + struct iwl3945_power_per_rate power[IWL_MAX_RATES]; } __packed; -struct il4965_txpowertable_cmd { +struct iwl4965_txpowertable_cmd { u8 band; /* 0: 5 GHz, 1: 2.4 GHz */ u8 reserved; __le16 channel; - struct il4965_tx_power_db tx_power; + struct iwl4965_tx_power_db tx_power; } __packed; + /** - * struct il3945_rate_scaling_cmd - Rate Scaling Command & Response + * struct iwl3945_rate_scaling_cmd - Rate Scaling Command & Response * - * C_RATE_SCALE = 0x47 (command, has simple generic response) + * REPLY_RATE_SCALE = 0x47 (command, has simple generic response) * * NOTE: The table of rates passed to the uCode via the * RATE_SCALE command sets up the corresponding order of @@ -1770,21 +1786,22 @@ struct il4965_txpowertable_cmd { * * For example, if you set 9MB (PLCP 0x0f) as the first * rate in the rate table, the bit mask for that rate - * when passed through ofdm_basic_rates on the C_RXON + * when passed through ofdm_basic_rates on the REPLY_RXON * command would be bit 0 (1 << 0) */ -struct il3945_rate_scaling_info { +struct iwl3945_rate_scaling_info { __le16 rate_n_flags; u8 try_cnt; - u8 next_rate_idx; + u8 next_rate_index; } __packed; -struct il3945_rate_scaling_cmd { +struct iwl3945_rate_scaling_cmd { u8 table_id; u8 reserved[3]; - struct il3945_rate_scaling_info table[IL_MAX_RATES]; + struct iwl3945_rate_scaling_info table[IWL_MAX_RATES]; } __packed; + /*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */ #define LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK (1 << 0) @@ -1799,27 +1816,28 @@ struct il3945_rate_scaling_cmd { #define LINK_QUAL_ANT_B_MSK (1 << 1) #define LINK_QUAL_ANT_MSK (LINK_QUAL_ANT_A_MSK|LINK_QUAL_ANT_B_MSK) + /** - * struct il_link_qual_general_params + * struct iwl_link_qual_general_params * - * Used in C_TX_LINK_QUALITY_CMD + * Used in REPLY_TX_LINK_QUALITY_CMD */ -struct il_link_qual_general_params { +struct iwl_link_qual_general_params { u8 flags; - /* No entries at or above this (driver chosen) idx contain MIMO */ + /* No entries at or above this (driver chosen) index contain MIMO */ u8 mimo_delimiter; /* Best single antenna to use for single stream (legacy, SISO). */ u8 single_stream_ant_msk; /* LINK_QUAL_ANT_* */ /* Best antennas to use for MIMO (unused for 4965, assumes both). */ - u8 dual_stream_ant_msk; /* LINK_QUAL_ANT_* */ + u8 dual_stream_ant_msk; /* LINK_QUAL_ANT_* */ /* * If driver needs to use different initial rates for different * EDCA QOS access categories (as implemented by tx fifos 0-3), - * this table will set that up, by indicating the idxes in the + * this table will set that up, by indicating the indexes in the * rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table at which to start. * Otherwise, driver should set all entries to 0. * @@ -1827,10 +1845,10 @@ struct il_link_qual_general_params { * 0 = Background, 1 = Best Effort (normal), 2 = Video, 3 = Voice * TX FIFOs above 3 use same value (typically 0) as TX FIFO 3. */ - u8 start_rate_idx[LINK_QUAL_AC_NUM]; + u8 start_rate_index[LINK_QUAL_AC_NUM]; } __packed; -#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */ +#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */ #define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000) #define LINK_QUAL_AGG_TIME_LIMIT_MIN (100) @@ -1843,11 +1861,11 @@ struct il_link_qual_general_params { #define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0) /** - * struct il_link_qual_agg_params + * struct iwl_link_qual_agg_params * - * Used in C_TX_LINK_QUALITY_CMD + * Used in REPLY_TX_LINK_QUALITY_CMD */ -struct il_link_qual_agg_params { +struct iwl_link_qual_agg_params { /* *Maximum number of uSec in aggregation. @@ -1874,9 +1892,9 @@ struct il_link_qual_agg_params { } __packed; /* - * C_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response) + * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response) * - * For 4965 devices only; 3945 uses C_RATE_SCALE. + * For 4965 devices only; 3945 uses REPLY_RATE_SCALE. * * Each station in the 4965 device's internal station table has its own table * of 16 @@ -1885,13 +1903,13 @@ struct il_link_qual_agg_params { * one station. * * NOTE: Station must already be in 4965 device's station table. - * Use C_ADD_STA. + * Use REPLY_ADD_STA. * * The rate scaling procedures described below work well. Of course, other * procedures are possible, and may work better for particular environments. * * - * FILLING THE RATE TBL + * FILLING THE RATE TABLE * * Given a particular initial rate and mode, as determined by the rate * scaling algorithm described below, the Linux driver uses the following @@ -1930,13 +1948,13 @@ struct il_link_qual_agg_params { * speculative mode as the new current active mode. * * Each history set contains, separately for each possible rate, data for a - * sliding win of the 62 most recent tx attempts at that rate. The data + * sliding window of the 62 most recent tx attempts at that rate. The data * includes a shifting bitmap of success(1)/failure(0), and sums of successful * and attempted frames, from which the driver can additionally calculate a * success ratio (success / attempted) and number of failures - * (attempted - success), and control the size of the win (attempted). + * (attempted - success), and control the size of the window (attempted). * The driver uses the bit map to remove successes from the success sum, as - * the oldest tx attempts fall out of the win. + * the oldest tx attempts fall out of the window. * * When the 4965 device makes multiple tx attempts for a given frame, each * attempt might be at a different rate, and have different modulation @@ -1948,7 +1966,7 @@ struct il_link_qual_agg_params { * * When using block-ack (aggregation), all frames are transmitted at the same * rate, since there is no per-attempt acknowledgment from the destination - * station. The Tx response struct il_tx_resp indicates the Tx rate in + * station. The Tx response struct iwl_tx_resp indicates the Tx rate in * rate_n_flags field. After receiving a block-ack, the driver can update * history for the entire block all at once. * @@ -1998,8 +2016,8 @@ struct il_link_qual_agg_params { * good performance; higher rate is sure to have poorer success. * * 6) Re-evaluate the rate after each tx frame. If working with block- - * acknowledge, history and stats may be calculated for the entire - * block (including prior history that fits within the history wins), + * acknowledge, history and statistics may be calculated for the entire + * block (including prior history that fits within the history windows), * before re-evaluation. * * FINDING BEST STARTING MODULATION MODE: @@ -2061,22 +2079,22 @@ struct il_link_qual_agg_params { * legacy), and then repeat the search process. * */ -struct il_link_quality_cmd { +struct iwl_link_quality_cmd { /* Index of destination/recipient station in uCode's station table */ u8 sta_id; u8 reserved1; __le16 control; /* not used */ - struct il_link_qual_general_params general_params; - struct il_link_qual_agg_params agg_params; + struct iwl_link_qual_general_params general_params; + struct iwl_link_qual_agg_params agg_params; /* - * Rate info; when using rate-scaling, Tx command's initial_rate_idx - * specifies 1st Tx rate attempted, via idx into this table. + * Rate info; when using rate-scaling, Tx command's initial_rate_index + * specifies 1st Tx rate attempted, via index into this table. * 4965 devices works its way through table when retrying Tx. */ struct { - __le32 rate_n_flags; /* RATE_MCS_*, RATE_* */ + __le32 rate_n_flags; /* RATE_MCS_*, IWL_RATE_* */ } rs_table[LINK_QUAL_MAX_RETRY_NUM]; __le32 reserved2; } __packed; @@ -2099,13 +2117,13 @@ struct il_link_quality_cmd { #define BT_MAX_KILL_DEF (0x5) /* - * C_BT_CONFIG = 0x9b (command, has simple generic response) + * REPLY_BT_CONFIG = 0x9b (command, has simple generic response) * * 3945 and 4965 devices support hardware handshake with Bluetooth device on * same platform. Bluetooth device alerts wireless device when it will Tx; * wireless device can delay or kill its own Tx to accommodate. */ -struct il_bt_cmd { +struct iwl_bt_cmd { u8 flags; u8 lead_time; u8 max_kill; @@ -2114,6 +2132,7 @@ struct il_bt_cmd { __le32 kill_cts_mask; } __packed; + /****************************************************************************** * (6) * Spectrum Management (802.11h) Commands, Responses, Notifications: @@ -2131,18 +2150,18 @@ struct il_bt_cmd { RXON_FILTER_ASSOC_MSK | \ RXON_FILTER_BCON_AWARE_MSK) -struct il_measure_channel { +struct iwl_measure_channel { __le32 duration; /* measurement duration in extended beacon * format */ u8 channel; /* channel to measure */ - u8 type; /* see enum il_measure_type */ + u8 type; /* see enum iwl_measure_type */ __le16 reserved; } __packed; /* - * C_SPECTRUM_MEASUREMENT = 0x74 (command) + * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command) */ -struct il_spectrum_cmd { +struct iwl_spectrum_cmd { __le16 len; /* number of bytes starting from token */ u8 token; /* token id */ u8 id; /* measurement id -- 0 or 1 */ @@ -2155,13 +2174,13 @@ struct il_spectrum_cmd { __le32 filter_flags; /* rxon filter flags */ __le16 channel_count; /* minimum 1, maximum 10 */ __le16 reserved3; - struct il_measure_channel channels[10]; + struct iwl_measure_channel channels[10]; } __packed; /* - * C_SPECTRUM_MEASUREMENT = 0x74 (response) + * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response) */ -struct il_spectrum_resp { +struct iwl_spectrum_resp { u8 token; u8 id; /* id of the prior command replaced, or 0xff */ __le16 status; /* 0 - command will be handled @@ -2169,57 +2188,57 @@ struct il_spectrum_resp { * measurement) */ } __packed; -enum il_measurement_state { - IL_MEASUREMENT_START = 0, - IL_MEASUREMENT_STOP = 1, +enum iwl_measurement_state { + IWL_MEASUREMENT_START = 0, + IWL_MEASUREMENT_STOP = 1, }; -enum il_measurement_status { - IL_MEASUREMENT_OK = 0, - IL_MEASUREMENT_CONCURRENT = 1, - IL_MEASUREMENT_CSA_CONFLICT = 2, - IL_MEASUREMENT_TGH_CONFLICT = 3, +enum iwl_measurement_status { + IWL_MEASUREMENT_OK = 0, + IWL_MEASUREMENT_CONCURRENT = 1, + IWL_MEASUREMENT_CSA_CONFLICT = 2, + IWL_MEASUREMENT_TGH_CONFLICT = 3, /* 4-5 reserved */ - IL_MEASUREMENT_STOPPED = 6, - IL_MEASUREMENT_TIMEOUT = 7, - IL_MEASUREMENT_PERIODIC_FAILED = 8, + IWL_MEASUREMENT_STOPPED = 6, + IWL_MEASUREMENT_TIMEOUT = 7, + IWL_MEASUREMENT_PERIODIC_FAILED = 8, }; #define NUM_ELEMENTS_IN_HISTOGRAM 8 -struct il_measurement_histogram { +struct iwl_measurement_histogram { __le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */ __le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */ } __packed; /* clear channel availability counters */ -struct il_measurement_cca_counters { +struct iwl_measurement_cca_counters { __le32 ofdm; __le32 cck; } __packed; -enum il_measure_type { - IL_MEASURE_BASIC = (1 << 0), - IL_MEASURE_CHANNEL_LOAD = (1 << 1), - IL_MEASURE_HISTOGRAM_RPI = (1 << 2), - IL_MEASURE_HISTOGRAM_NOISE = (1 << 3), - IL_MEASURE_FRAME = (1 << 4), +enum iwl_measure_type { + IWL_MEASURE_BASIC = (1 << 0), + IWL_MEASURE_CHANNEL_LOAD = (1 << 1), + IWL_MEASURE_HISTOGRAM_RPI = (1 << 2), + IWL_MEASURE_HISTOGRAM_NOISE = (1 << 3), + IWL_MEASURE_FRAME = (1 << 4), /* bits 5:6 are reserved */ - IL_MEASURE_IDLE = (1 << 7), + IWL_MEASURE_IDLE = (1 << 7), }; /* - * N_SPECTRUM_MEASUREMENT = 0x75 (notification only, not a command) + * SPECTRUM_MEASURE_NOTIFICATION = 0x75 (notification only, not a command) */ -struct il_spectrum_notification { +struct iwl_spectrum_notification { u8 id; /* measurement id -- 0 or 1 */ u8 token; - u8 channel_idx; /* idx in measurement channel list */ + u8 channel_index; /* index in measurement channel list */ u8 state; /* 0 - start, 1 - stop */ __le32 start_time; /* lower 32-bits of TSF */ u8 band; /* 0 - 5.2GHz, 1 - 2.4GHz */ u8 channel; - u8 type; /* see enum il_measurement_type */ + u8 type; /* see enum iwl_measurement_type */ u8 reserved1; /* NOTE: cca_ofdm, cca_cck, basic_type, and histogram are only only * valid if applicable for measurement type requested. */ @@ -2229,9 +2248,9 @@ struct il_spectrum_notification { u8 basic_type; /* 0 - bss, 1 - ofdm preamble, 2 - * unidentified */ u8 reserved2[3]; - struct il_measurement_histogram histogram; + struct iwl_measurement_histogram histogram; __le32 stop_time; /* lower 32-bits of TSF */ - __le32 status; /* see il_measurement_status */ + __le32 status; /* see iwl_measurement_status */ } __packed; /****************************************************************************** @@ -2241,10 +2260,10 @@ struct il_spectrum_notification { *****************************************************************************/ /** - * struct il_powertable_cmd - Power Table Command + * struct iwl_powertable_cmd - Power Table Command * @flags: See below: * - * C_POWER_TBL = 0x77 (command, has simple generic response) + * POWER_TABLE_CMD = 0x77 (command, has simple generic response) * * PM allow: * bit 0 - '0' Driver not allow power management @@ -2271,38 +2290,38 @@ struct il_spectrum_notification { * '10' force xtal sleep * '11' Illegal set * - * NOTE: if sleep_interval[SLEEP_INTRVL_TBL_SIZE-1] > DTIM period then + * NOTE: if sleep_interval[SLEEP_INTRVL_TABLE_SIZE-1] > DTIM period then * ucode assume sleep over DTIM is allowed and we don't need to wake up * for every DTIM. */ -#define IL_POWER_VEC_SIZE 5 +#define IWL_POWER_VEC_SIZE 5 -#define IL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0)) -#define IL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3)) +#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0)) +#define IWL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3)) -struct il3945_powertable_cmd { +struct iwl3945_powertable_cmd { __le16 flags; u8 reserved[2]; __le32 rx_data_timeout; __le32 tx_data_timeout; - __le32 sleep_interval[IL_POWER_VEC_SIZE]; + __le32 sleep_interval[IWL_POWER_VEC_SIZE]; } __packed; -struct il_powertable_cmd { +struct iwl_powertable_cmd { __le16 flags; - u8 keep_alive_seconds; /* 3945 reserved */ - u8 debug_flags; /* 3945 reserved */ + u8 keep_alive_seconds; /* 3945 reserved */ + u8 debug_flags; /* 3945 reserved */ __le32 rx_data_timeout; __le32 tx_data_timeout; - __le32 sleep_interval[IL_POWER_VEC_SIZE]; + __le32 sleep_interval[IWL_POWER_VEC_SIZE]; __le32 keep_alive_beacons; } __packed; /* - * N_PM_SLEEP = 0x7A (notification only, not a command) + * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command) * all devices identical. */ -struct il_sleep_notification { +struct iwl_sleep_notification { u8 pm_sleep_mode; u8 pm_wakeup_src; __le16 reserved; @@ -2313,23 +2332,23 @@ struct il_sleep_notification { /* Sleep states. all devices identical. */ enum { - IL_PM_NO_SLEEP = 0, - IL_PM_SLP_MAC = 1, - IL_PM_SLP_FULL_MAC_UNASSOCIATE = 2, - IL_PM_SLP_FULL_MAC_CARD_STATE = 3, - IL_PM_SLP_PHY = 4, - IL_PM_SLP_REPENT = 5, - IL_PM_WAKEUP_BY_TIMER = 6, - IL_PM_WAKEUP_BY_DRIVER = 7, - IL_PM_WAKEUP_BY_RFKILL = 8, + IWL_PM_NO_SLEEP = 0, + IWL_PM_SLP_MAC = 1, + IWL_PM_SLP_FULL_MAC_UNASSOCIATE = 2, + IWL_PM_SLP_FULL_MAC_CARD_STATE = 3, + IWL_PM_SLP_PHY = 4, + IWL_PM_SLP_REPENT = 5, + IWL_PM_WAKEUP_BY_TIMER = 6, + IWL_PM_WAKEUP_BY_DRIVER = 7, + IWL_PM_WAKEUP_BY_RFKILL = 8, /* 3 reserved */ - IL_PM_NUM_OF_MODES = 12, + IWL_PM_NUM_OF_MODES = 12, }; /* - * N_CARD_STATE = 0xa1 (notification only, not a command) + * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command) */ -struct il_card_state_notif { +struct iwl_card_state_notif { __le32 flags; } __packed; @@ -2338,11 +2357,11 @@ struct il_card_state_notif { #define CT_CARD_DISABLED 0x04 #define RXON_CARD_DISABLED 0x10 -struct il_ct_kill_config { - __le32 reserved; - __le32 critical_temperature_M; - __le32 critical_temperature_R; -} __packed; +struct iwl_ct_kill_config { + __le32 reserved; + __le32 critical_temperature_M; + __le32 critical_temperature_R; +} __packed; /****************************************************************************** * (8) @@ -2354,7 +2373,7 @@ struct il_ct_kill_config { #define SCAN_CHANNEL_TYPE_ACTIVE cpu_to_le32(1) /** - * struct il_scan_channel - entry in C_SCAN channel table + * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table * * One for each channel in the scan list. * Each channel can independently select: @@ -2364,7 +2383,7 @@ struct il_ct_kill_config { * quiet_plcp_th, good_CRC_th) * * To avoid uCode errors, make sure the following are true (see comments - * under struct il_scan_cmd about max_out_time and quiet_time): + * under struct iwl_scan_cmd about max_out_time and quiet_time): * 1) If using passive_dwell (i.e. passive_dwell != 0): * active_dwell <= passive_dwell (< max_out_time if max_out_time != 0) * 2) quiet_time <= active_dwell @@ -2372,7 +2391,7 @@ struct il_ct_kill_config { * passive_dwell < max_out_time * active_dwell < max_out_time */ -struct il3945_scan_channel { +struct iwl3945_scan_channel { /* * type is defined as: * 0:0 1 = active, 0 = passive @@ -2381,16 +2400,16 @@ struct il3945_scan_channel { * 5:7 reserved */ u8 type; - u8 channel; /* band is selected by il3945_scan_cmd "flags" field */ - struct il3945_tx_power tpc; + u8 channel; /* band is selected by iwl3945_scan_cmd "flags" field */ + struct iwl3945_tx_power tpc; __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */ __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */ } __packed; /* set number of direct probes u8 type */ -#define IL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1)))) +#define IWL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1)))) -struct il_scan_channel { +struct iwl_scan_channel { /* * type is defined as: * 0:0 1 = active, 0 = passive @@ -2399,7 +2418,7 @@ struct il_scan_channel { * 21:31 reserved */ __le32 type; - __le16 channel; /* band is selected by il_scan_cmd "flags" field */ + __le16 channel; /* band is selected by iwl_scan_cmd "flags" field */ u8 tx_gain; /* gain for analog radio */ u8 dsp_atten; /* gain for DSP */ __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */ @@ -2407,17 +2426,17 @@ struct il_scan_channel { } __packed; /* set number of direct probes __le32 type */ -#define IL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1)))) +#define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1)))) /** - * struct il_ssid_ie - directed scan network information element + * struct iwl_ssid_ie - directed scan network information element * - * Up to 20 of these may appear in C_SCAN (Note: Only 4 are in - * 3945 SCAN api), selected by "type" bit field in struct il_scan_channel; + * Up to 20 of these may appear in REPLY_SCAN_CMD (Note: Only 4 are in + * 3945 SCAN api), selected by "type" bit field in struct iwl_scan_channel; * each channel may select different ssids from among the 20 (4) entries. * SSID IEs get transmitted in reverse order of entry. */ -struct il_ssid_ie { +struct iwl_ssid_ie { u8 id; u8 len; u8 ssid[32]; @@ -2426,14 +2445,14 @@ struct il_ssid_ie { #define PROBE_OPTION_MAX_3945 4 #define PROBE_OPTION_MAX 20 #define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF) -#define IL_GOOD_CRC_TH_DISABLED 0 -#define IL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1) -#define IL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff) -#define IL_MAX_SCAN_SIZE 1024 -#define IL_MAX_CMD_SIZE 4096 +#define IWL_GOOD_CRC_TH_DISABLED 0 +#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1) +#define IWL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff) +#define IWL_MAX_SCAN_SIZE 1024 +#define IWL_MAX_CMD_SIZE 4096 /* - * C_SCAN = 0x80 (command) + * REPLY_SCAN_CMD = 0x80 (command) * * The hardware scan command is very powerful; the driver can set it up to * maintain (relatively) normal network traffic while doing a scan in the @@ -2482,10 +2501,10 @@ struct il_ssid_ie { * Driver must use separate scan commands for 2.4 vs. 5 GHz bands. * * To avoid uCode errors, see timing restrictions described under - * struct il_scan_channel. + * struct iwl_scan_channel. */ -struct il3945_scan_cmd { +struct iwl3945_scan_cmd { __le16 len; u8 reserved0; u8 channel_count; /* # channels in channel list */ @@ -2506,10 +2525,10 @@ struct il3945_scan_cmd { /* For active scans (set to all-0s for passive scans). * Does not include payload. Must specify Tx rate; no rate scaling. */ - struct il3945_tx_cmd tx_cmd; + struct iwl3945_tx_cmd tx_cmd; /* For directed active scans (set to all-0s otherwise) */ - struct il_ssid_ie direct_scan[PROBE_OPTION_MAX_3945]; + struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX_3945]; /* * Probe request frame, followed by channel list. @@ -2519,17 +2538,17 @@ struct il3945_scan_cmd { * Number of channels in list is specified by channel_count. * Each channel in list is of type: * - * struct il3945_scan_channel channels[0]; + * struct iwl3945_scan_channel channels[0]; * * NOTE: Only one band of channels can be scanned per pass. You * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait - * for one scan to complete (i.e. receive N_SCAN_COMPLETE) + * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION) * before requesting another scan. */ u8 data[0]; } __packed; -struct il_scan_cmd { +struct iwl_scan_cmd { __le16 len; u8 reserved0; u8 channel_count; /* # channels in channel list */ @@ -2550,10 +2569,10 @@ struct il_scan_cmd { /* For active scans (set to all-0s for passive scans). * Does not include payload. Must specify Tx rate; no rate scaling. */ - struct il_tx_cmd tx_cmd; + struct iwl_tx_cmd tx_cmd; /* For directed active scans (set to all-0s otherwise) */ - struct il_ssid_ie direct_scan[PROBE_OPTION_MAX]; + struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; /* * Probe request frame, followed by channel list. @@ -2563,11 +2582,11 @@ struct il_scan_cmd { * Number of channels in list is specified by channel_count. * Each channel in list is of type: * - * struct il_scan_channel channels[0]; + * struct iwl_scan_channel channels[0]; * * NOTE: Only one band of channels can be scanned per pass. You * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait - * for one scan to complete (i.e. receive N_SCAN_COMPLETE) + * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION) * before requesting another scan. */ u8 data[0]; @@ -2579,16 +2598,16 @@ struct il_scan_cmd { #define ABORT_STATUS 0x2 /* - * C_SCAN = 0x80 (response) + * REPLY_SCAN_CMD = 0x80 (response) */ -struct il_scanreq_notification { +struct iwl_scanreq_notification { __le32 status; /* 1: okay, 2: cannot fulfill request */ } __packed; /* - * N_SCAN_START = 0x82 (notification only, not a command) + * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command) */ -struct il_scanstart_notification { +struct iwl_scanstart_notification { __le32 tsf_low; __le32 tsf_high; __le32 beacon_timer; @@ -2601,30 +2620,30 @@ struct il_scanstart_notification { #define SCAN_OWNER_STATUS 0x1 #define MEASURE_OWNER_STATUS 0x2 -#define IL_PROBE_STATUS_OK 0 -#define IL_PROBE_STATUS_TX_FAILED BIT(0) +#define IWL_PROBE_STATUS_OK 0 +#define IWL_PROBE_STATUS_TX_FAILED BIT(0) /* error statuses combined with TX_FAILED */ -#define IL_PROBE_STATUS_FAIL_TTL BIT(1) -#define IL_PROBE_STATUS_FAIL_BT BIT(2) +#define IWL_PROBE_STATUS_FAIL_TTL BIT(1) +#define IWL_PROBE_STATUS_FAIL_BT BIT(2) -#define NUMBER_OF_STATS 1 /* first __le32 is good CRC */ +#define NUMBER_OF_STATISTICS 1 /* first __le32 is good CRC */ /* - * N_SCAN_RESULTS = 0x83 (notification only, not a command) + * SCAN_RESULTS_NOTIFICATION = 0x83 (notification only, not a command) */ -struct il_scanresults_notification { +struct iwl_scanresults_notification { u8 channel; u8 band; u8 probe_status; - u8 num_probe_not_sent; /* not enough time to send */ + u8 num_probe_not_sent; /* not enough time to send */ __le32 tsf_low; __le32 tsf_high; - __le32 stats[NUMBER_OF_STATS]; + __le32 statistics[NUMBER_OF_STATISTICS]; } __packed; /* - * N_SCAN_COMPLETE = 0x84 (notification only, not a command) + * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command) */ -struct il_scancomplete_notification { +struct iwl_scancomplete_notification { u8 scanned_channels; u8 status; u8 last_channel; @@ -2632,49 +2651,50 @@ struct il_scancomplete_notification { __le32 tsf_high; } __packed; + /****************************************************************************** * (9) * IBSS/AP Commands and Notifications: * *****************************************************************************/ -enum il_ibss_manager { - IL_NOT_IBSS_MANAGER = 0, - IL_IBSS_MANAGER = 1, +enum iwl_ibss_manager { + IWL_NOT_IBSS_MANAGER = 0, + IWL_IBSS_MANAGER = 1, }; /* - * N_BEACON = 0x90 (notification only, not a command) + * BEACON_NOTIFICATION = 0x90 (notification only, not a command) */ -struct il3945_beacon_notif { - struct il3945_tx_resp beacon_notify_hdr; +struct iwl3945_beacon_notif { + struct iwl3945_tx_resp beacon_notify_hdr; __le32 low_tsf; __le32 high_tsf; __le32 ibss_mgr_status; } __packed; -struct il4965_beacon_notif { - struct il4965_tx_resp beacon_notify_hdr; +struct iwl4965_beacon_notif { + struct iwl4965_tx_resp beacon_notify_hdr; __le32 low_tsf; __le32 high_tsf; __le32 ibss_mgr_status; } __packed; /* - * C_TX_BEACON= 0x91 (command, has simple generic response) + * REPLY_TX_BEACON = 0x91 (command, has simple generic response) */ -struct il3945_tx_beacon_cmd { - struct il3945_tx_cmd tx; +struct iwl3945_tx_beacon_cmd { + struct iwl3945_tx_cmd tx; __le16 tim_idx; u8 tim_size; u8 reserved1; struct ieee80211_hdr frame[0]; /* beacon frame */ } __packed; -struct il_tx_beacon_cmd { - struct il_tx_cmd tx; +struct iwl_tx_beacon_cmd { + struct iwl_tx_cmd tx; __le16 tim_idx; u8 tim_size; u8 reserved1; @@ -2687,7 +2707,7 @@ struct il_tx_beacon_cmd { * *****************************************************************************/ -#define IL_TEMP_CONVERT 260 +#define IWL_TEMP_CONVERT 260 #define SUP_RATE_11A_MAX_NUM_CHANNELS 8 #define SUP_RATE_11B_MAX_NUM_CHANNELS 4 @@ -2707,9 +2727,9 @@ struct rate_histogram { } failed; } __packed; -/* stats command response */ +/* statistics command response */ -struct iwl39_stats_rx_phy { +struct iwl39_statistics_rx_phy { __le32 ina_cnt; __le32 fina_cnt; __le32 plcp_err; @@ -2727,7 +2747,7 @@ struct iwl39_stats_rx_phy { __le32 sent_cts_cnt; } __packed; -struct iwl39_stats_rx_non_phy { +struct iwl39_statistics_rx_non_phy { __le32 bogus_cts; /* CTS received when not expecting CTS */ __le32 bogus_ack; /* ACK received when not expecting ACK */ __le32 non_bssid_frames; /* number of frames with BSSID that @@ -2738,13 +2758,13 @@ struct iwl39_stats_rx_non_phy { * our serving channel */ } __packed; -struct iwl39_stats_rx { - struct iwl39_stats_rx_phy ofdm; - struct iwl39_stats_rx_phy cck; - struct iwl39_stats_rx_non_phy general; +struct iwl39_statistics_rx { + struct iwl39_statistics_rx_phy ofdm; + struct iwl39_statistics_rx_phy cck; + struct iwl39_statistics_rx_non_phy general; } __packed; -struct iwl39_stats_tx { +struct iwl39_statistics_tx { __le32 preamble_cnt; __le32 rx_detected_cnt; __le32 bt_prio_defer_cnt; @@ -2756,31 +2776,31 @@ struct iwl39_stats_tx { __le32 actual_ack_cnt; } __packed; -struct stats_dbg { +struct statistics_dbg { __le32 burst_check; __le32 burst_count; __le32 wait_for_silence_timeout_cnt; __le32 reserved[3]; } __packed; -struct iwl39_stats_div { +struct iwl39_statistics_div { __le32 tx_on_a; __le32 tx_on_b; __le32 exec_time; __le32 probe_time; } __packed; -struct iwl39_stats_general { +struct iwl39_statistics_general { __le32 temperature; - struct stats_dbg dbg; + struct statistics_dbg dbg; __le32 sleep_time; __le32 slots_out; __le32 slots_idle; __le32 ttl_timestamp; - struct iwl39_stats_div div; + struct iwl39_statistics_div div; } __packed; -struct stats_rx_phy { +struct statistics_rx_phy { __le32 ina_cnt; __le32 fina_cnt; __le32 plcp_err; @@ -2803,7 +2823,7 @@ struct stats_rx_phy { __le32 reserved3; } __packed; -struct stats_rx_ht_phy { +struct statistics_rx_ht_phy { __le32 plcp_err; __le32 overrun_err; __le32 early_overrun_err; @@ -2818,7 +2838,7 @@ struct stats_rx_ht_phy { #define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1) -struct stats_rx_non_phy { +struct statistics_rx_non_phy { __le32 bogus_cts; /* CTS received when not expecting CTS */ __le32 bogus_ack; /* ACK received when not expecting ACK */ __le32 non_bssid_frames; /* number of frames with BSSID that @@ -2832,15 +2852,15 @@ struct stats_rx_non_phy { __le32 num_missed_bcon; /* number of missed beacons */ __le32 adc_rx_saturation_time; /* count in 0.8us units the time the * ADC was in saturation */ - __le32 ina_detection_search_time; /* total time (in 0.8us) searched - * for INA */ + __le32 ina_detection_search_time;/* total time (in 0.8us) searched + * for INA */ __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */ __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */ __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */ __le32 interference_data_flag; /* flag for interference data * availability. 1 when data is * available. */ - __le32 channel_load; /* counts RX Enable time in uSec */ + __le32 channel_load; /* counts RX Enable time in uSec */ __le32 dsp_false_alarms; /* DSP false alarm (both OFDM * and CCK) counter */ __le32 beacon_rssi_a; @@ -2851,28 +2871,28 @@ struct stats_rx_non_phy { __le32 beacon_energy_c; } __packed; -struct stats_rx { - struct stats_rx_phy ofdm; - struct stats_rx_phy cck; - struct stats_rx_non_phy general; - struct stats_rx_ht_phy ofdm_ht; +struct statistics_rx { + struct statistics_rx_phy ofdm; + struct statistics_rx_phy cck; + struct statistics_rx_non_phy general; + struct statistics_rx_ht_phy ofdm_ht; } __packed; /** - * struct stats_tx_power - current tx power + * struct statistics_tx_power - current tx power * * @ant_a: current tx power on chain a in 1/2 dB step * @ant_b: current tx power on chain b in 1/2 dB step * @ant_c: current tx power on chain c in 1/2 dB step */ -struct stats_tx_power { +struct statistics_tx_power { u8 ant_a; u8 ant_b; u8 ant_c; u8 reserved; } __packed; -struct stats_tx_non_phy_agg { +struct statistics_tx_non_phy_agg { __le32 ba_timeout; __le32 ba_reschedule_frames; __le32 scd_query_agg_frame_cnt; @@ -2885,7 +2905,7 @@ struct stats_tx_non_phy_agg { __le32 rx_ba_rsp_cnt; } __packed; -struct stats_tx { +struct statistics_tx { __le32 preamble_cnt; __le32 rx_detected_cnt; __le32 bt_prio_defer_cnt; @@ -2900,12 +2920,13 @@ struct stats_tx { __le32 burst_abort_missing_next_frame_cnt; __le32 cts_timeout_collision; __le32 ack_or_ba_timeout_collision; - struct stats_tx_non_phy_agg agg; + struct statistics_tx_non_phy_agg agg; __le32 reserved1; } __packed; -struct stats_div { + +struct statistics_div { __le32 tx_on_a; __le32 tx_on_b; __le32 exec_time; @@ -2914,14 +2935,14 @@ struct stats_div { __le32 reserved2; } __packed; -struct stats_general_common { - __le32 temperature; /* radio temperature */ - struct stats_dbg dbg; +struct statistics_general_common { + __le32 temperature; /* radio temperature */ + struct statistics_dbg dbg; __le32 sleep_time; __le32 slots_out; __le32 slots_idle; __le32 ttl_timestamp; - struct stats_div div; + struct statistics_div div; __le32 rx_enable_counter; /* * num_of_sos_states: @@ -2931,73 +2952,73 @@ struct stats_general_common { __le32 num_of_sos_states; } __packed; -struct stats_general { - struct stats_general_common common; +struct statistics_general { + struct statistics_general_common common; __le32 reserved2; __le32 reserved3; } __packed; -#define UCODE_STATS_CLEAR_MSK (0x1 << 0) -#define UCODE_STATS_FREQUENCY_MSK (0x1 << 1) -#define UCODE_STATS_NARROW_BAND_MSK (0x1 << 2) +#define UCODE_STATISTICS_CLEAR_MSK (0x1 << 0) +#define UCODE_STATISTICS_FREQUENCY_MSK (0x1 << 1) +#define UCODE_STATISTICS_NARROW_BAND_MSK (0x1 << 2) /* - * C_STATS = 0x9c, + * REPLY_STATISTICS_CMD = 0x9c, * all devices identical. * - * This command triggers an immediate response containing uCode stats. - * The response is in the same format as N_STATS 0x9d, below. + * This command triggers an immediate response containing uCode statistics. + * The response is in the same format as STATISTICS_NOTIFICATION 0x9d, below. * * If the CLEAR_STATS configuration flag is set, uCode will clear its - * internal copy of the stats (counters) after issuing the response. - * This flag does not affect N_STATSs after beacons (see below). + * internal copy of the statistics (counters) after issuing the response. + * This flag does not affect STATISTICS_NOTIFICATIONs after beacons (see below). * * If the DISABLE_NOTIF configuration flag is set, uCode will not issue - * N_STATSs after received beacons (see below). This flag - * does not affect the response to the C_STATS 0x9c itself. + * STATISTICS_NOTIFICATIONs after received beacons (see below). This flag + * does not affect the response to the REPLY_STATISTICS_CMD 0x9c itself. */ -#define IL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1) /* see above */ -#define IL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2) /* see above */ -struct il_stats_cmd { - __le32 configuration_flags; /* IL_STATS_CONF_* */ +#define IWL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1) /* see above */ +#define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */ +struct iwl_statistics_cmd { + __le32 configuration_flags; /* IWL_STATS_CONF_* */ } __packed; /* - * N_STATS = 0x9d (notification only, not a command) + * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command) * * By default, uCode issues this notification after receiving a beacon * while associated. To disable this behavior, set DISABLE_NOTIF flag in the - * C_STATS 0x9c, above. + * REPLY_STATISTICS_CMD 0x9c, above. * * Statistics counters continue to increment beacon after beacon, but are - * cleared when changing channels or when driver issues C_STATS + * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD * 0x9c with CLEAR_STATS bit set (see above). * - * uCode also issues this notification during scans. uCode clears stats - * appropriately so that each notification contains stats for only the + * uCode also issues this notification during scans. uCode clears statistics + * appropriately so that each notification contains statistics for only the * one channel that has just been scanned. */ -#define STATS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2) -#define STATS_REPLY_FLG_HT40_MODE_MSK cpu_to_le32(0x8) +#define STATISTICS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2) +#define STATISTICS_REPLY_FLG_HT40_MODE_MSK cpu_to_le32(0x8) -struct il3945_notif_stats { +struct iwl3945_notif_statistics { __le32 flag; - struct iwl39_stats_rx rx; - struct iwl39_stats_tx tx; - struct iwl39_stats_general general; + struct iwl39_statistics_rx rx; + struct iwl39_statistics_tx tx; + struct iwl39_statistics_general general; } __packed; -struct il_notif_stats { +struct iwl_notif_statistics { __le32 flag; - struct stats_rx rx; - struct stats_tx tx; - struct stats_general general; + struct statistics_rx rx; + struct statistics_tx tx; + struct statistics_general general; } __packed; /* - * N_MISSED_BEACONS = 0xa2 (notification only, not a command) + * MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command) * - * uCode send N_MISSED_BEACONS to driver when detect beacon missed + * uCode send MISSED_BEACONS_NOTIFICATION to driver when detect beacon missed * in regardless of how many missed beacons, which mean when driver receive the * notification, inside the command, it can find all the beacons information * which include number of total missed beacons, number of consecutive missed @@ -3014,17 +3035,18 @@ struct il_notif_stats { * */ -#define IL_MISSED_BEACON_THRESHOLD_MIN (1) -#define IL_MISSED_BEACON_THRESHOLD_DEF (5) -#define IL_MISSED_BEACON_THRESHOLD_MAX IL_MISSED_BEACON_THRESHOLD_DEF +#define IWL_MISSED_BEACON_THRESHOLD_MIN (1) +#define IWL_MISSED_BEACON_THRESHOLD_DEF (5) +#define IWL_MISSED_BEACON_THRESHOLD_MAX IWL_MISSED_BEACON_THRESHOLD_DEF -struct il_missed_beacon_notif { +struct iwl_missed_beacon_notif { __le32 consecutive_missed_beacons; __le32 total_missed_becons; __le32 num_expected_beacons; __le32 num_recvd_beacons; } __packed; + /****************************************************************************** * (11) * Rx Calibration Commands: @@ -3040,7 +3062,7 @@ struct il_missed_beacon_notif { *****************************************************************************/ /** - * C_SENSITIVITY = 0xa8 (command, has simple generic response) + * SENSITIVITY_CMD = 0xa8 (command, has simple generic response) * * This command sets up the Rx signal detector for a sensitivity level that * is high enough to lock onto all signals within the associated network, @@ -3054,12 +3076,12 @@ struct il_missed_beacon_notif { * time listening, not transmitting). Driver must adjust sensitivity so that * the ratio of actual false alarms to actual Rx time falls within this range. * - * While associated, uCode delivers N_STATSs after each + * While associated, uCode delivers STATISTICS_NOTIFICATIONs after each * received beacon. These provide information to the driver to analyze the - * sensitivity. Don't analyze stats that come in from scanning, or any - * other non-associated-network source. Pertinent stats include: + * sensitivity. Don't analyze statistics that come in from scanning, or any + * other non-associated-network source. Pertinent statistics include: * - * From "general" stats (struct stats_rx_non_phy): + * From "general" statistics (struct statistics_rx_non_phy): * * (beacon_energy_[abc] & 0x0FF00) >> 8 (unsigned, higher value is lower level) * Measure of energy of desired signal. Used for establishing a level @@ -3072,7 +3094,7 @@ struct il_missed_beacon_notif { * uSecs of actual Rx time during beacon period (varies according to * how much time was spent transmitting). * - * From "cck" and "ofdm" stats (struct stats_rx_phy), separately: + * From "cck" and "ofdm" statistics (struct statistics_rx_phy), separately: * * false_alarm_cnt * Signal locks abandoned early (before phy-level header). @@ -3089,15 +3111,15 @@ struct il_missed_beacon_notif { * * Total number of false alarms = false_alarms + plcp_errs * - * For OFDM, adjust the following table entries in struct il_sensitivity_cmd + * For OFDM, adjust the following table entries in struct iwl_sensitivity_cmd * (notice that the start points for OFDM are at or close to settings for * maximum sensitivity): * * START / MIN / MAX - * HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX 90 / 85 / 120 - * HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX 170 / 170 / 210 - * HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX 105 / 105 / 140 - * HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX 220 / 220 / 270 + * HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX 90 / 85 / 120 + * HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX 170 / 170 / 210 + * HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX 105 / 105 / 140 + * HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX 220 / 220 / 270 * * If actual rate of OFDM false alarms (+ plcp_errors) is too high * (greater than 50 for each 204.8 msecs listening), reduce sensitivity @@ -3130,30 +3152,30 @@ struct il_missed_beacon_notif { * Reset this to 0 at the first beacon period that falls within the * "good" range (5 to 50 false alarms per 204.8 milliseconds rx). * - * Then, adjust the following CCK table entries in struct il_sensitivity_cmd + * Then, adjust the following CCK table entries in struct iwl_sensitivity_cmd * (notice that the start points for CCK are at maximum sensitivity): * * START / MIN / MAX - * HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX 125 / 125 / 200 - * HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX 200 / 200 / 400 - * HD_MIN_ENERGY_CCK_DET_IDX 100 / 0 / 100 + * HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX 125 / 125 / 200 + * HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX 200 / 200 / 400 + * HD_MIN_ENERGY_CCK_DET_INDEX 100 / 0 / 100 * * If actual rate of CCK false alarms (+ plcp_errors) is too high * (greater than 50 for each 204.8 msecs listening), method for reducing * sensitivity is: * - * 1) *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX, + * 1) *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX, * up to max 400. * - * 2) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX is < 160, + * 2) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is < 160, * sensitivity has been reduced a significant amount; bring it up to * a moderate 161. Otherwise, *add* 3, up to max 200. * - * 3) a) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX is > 160, + * 3) a) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is > 160, * sensitivity has been reduced only a moderate or small amount; - * *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_IDX, + * *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_INDEX, * down to min 0. Otherwise (if gain has been significantly reduced), - * don't change the HD_MIN_ENERGY_CCK_DET_IDX value. + * don't change the HD_MIN_ENERGY_CCK_DET_INDEX value. * * b) Save a snapshot of the "silence reference". * @@ -3169,13 +3191,13 @@ struct il_missed_beacon_notif { * * Method for increasing sensitivity: * - * 1) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX, + * 1) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX, * down to min 125. * - * 2) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX, + * 2) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX, * down to min 200. * - * 3) *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_IDX, up to max 100. + * 3) *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_INDEX, up to max 100. * * If actual rate of CCK false alarms (+ plcp_errors) is within good range * (between 5 and 50 for each 204.8 msecs listening): @@ -3184,56 +3206,57 @@ struct il_missed_beacon_notif { * * 2) If previous beacon had too many CCK false alarms (+ plcp_errors), * give some extra margin to energy threshold by *subtracting* 8 - * from value in HD_MIN_ENERGY_CCK_DET_IDX. + * from value in HD_MIN_ENERGY_CCK_DET_INDEX. * * For all cases (too few, too many, good range), make sure that the CCK * detection threshold (energy) is below the energy level for robust * detection over the past 10 beacon periods, the "Max cck energy". * Lower values mean higher energy; this means making sure that the value - * in HD_MIN_ENERGY_CCK_DET_IDX is at or *above* "Max cck energy". + * in HD_MIN_ENERGY_CCK_DET_INDEX is at or *above* "Max cck energy". * */ /* - * Table entries in C_SENSITIVITY (struct il_sensitivity_cmd) - */ -#define HD_TBL_SIZE (11) /* number of entries */ -#define HD_MIN_ENERGY_CCK_DET_IDX (0) /* table idxes */ -#define HD_MIN_ENERGY_OFDM_DET_IDX (1) -#define HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX (2) -#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX (3) -#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX (4) -#define HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX (5) -#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX (6) -#define HD_BARKER_CORR_TH_ADD_MIN_IDX (7) -#define HD_BARKER_CORR_TH_ADD_MIN_MRC_IDX (8) -#define HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX (9) -#define HD_OFDM_ENERGY_TH_IN_IDX (10) - -/* Control field in struct il_sensitivity_cmd */ -#define C_SENSITIVITY_CONTROL_DEFAULT_TBL cpu_to_le16(0) -#define C_SENSITIVITY_CONTROL_WORK_TBL cpu_to_le16(1) + * Table entries in SENSITIVITY_CMD (struct iwl_sensitivity_cmd) + */ +#define HD_TABLE_SIZE (11) /* number of entries */ +#define HD_MIN_ENERGY_CCK_DET_INDEX (0) /* table indexes */ +#define HD_MIN_ENERGY_OFDM_DET_INDEX (1) +#define HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX (2) +#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX (3) +#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX (4) +#define HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX (5) +#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX (6) +#define HD_BARKER_CORR_TH_ADD_MIN_INDEX (7) +#define HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX (8) +#define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX (9) +#define HD_OFDM_ENERGY_TH_IN_INDEX (10) + +/* Control field in struct iwl_sensitivity_cmd */ +#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE cpu_to_le16(0) +#define SENSITIVITY_CMD_CONTROL_WORK_TABLE cpu_to_le16(1) /** - * struct il_sensitivity_cmd + * struct iwl_sensitivity_cmd * @control: (1) updates working table, (0) updates default table - * @table: energy threshold values, use HD_* as idx into table + * @table: energy threshold values, use HD_* as index into table * * Always use "1" in "control" to update uCode's working table and DSP. */ -struct il_sensitivity_cmd { - __le16 control; /* always use "1" */ - __le16 table[HD_TBL_SIZE]; /* use HD_* as idx */ +struct iwl_sensitivity_cmd { + __le16 control; /* always use "1" */ + __le16 table[HD_TABLE_SIZE]; /* use HD_* as index */ } __packed; + /** - * C_PHY_CALIBRATION = 0xb0 (command, has simple generic response) + * REPLY_PHY_CALIBRATION_CMD = 0xb0 (command, has simple generic response) * * This command sets the relative gains of 4965 device's 3 radio receiver chains. * * After the first association, driver should accumulate signal and noise - * stats from the N_STATSs that follow the first 20 - * beacons from the associated network (don't collect stats that come + * statistics from the STATISTICS_NOTIFICATIONs that follow the first 20 + * beacons from the associated network (don't collect statistics that come * in from scanning, or any other non-network source). * * DISCONNECTED ANTENNA: @@ -3241,7 +3264,7 @@ struct il_sensitivity_cmd { * Driver should determine which antennas are actually connected, by comparing * average beacon signal levels for the 3 Rx chains. Accumulate (add) the * following values over 20 beacons, one accumulator for each of the chains - * a/b/c, from struct stats_rx_non_phy: + * a/b/c, from struct statistics_rx_non_phy: * * beacon_rssi_[abc] & 0x0FF (unsigned, units in dB) * @@ -3260,7 +3283,7 @@ struct il_sensitivity_cmd { * to antennas, see above) for gain, by comparing the average signal levels * detected during the silence after each beacon (background noise). * Accumulate (add) the following values over 20 beacons, one accumulator for - * each of the chains a/b/c, from struct stats_rx_non_phy: + * each of the chains a/b/c, from struct statistics_rx_non_phy: * * beacon_silence_rssi_[abc] & 0x0FF (unsigned, units in dB) * @@ -3271,7 +3294,7 @@ struct il_sensitivity_cmd { * (accum_noise[i] - accum_noise[reference]) / 30 * * The "30" adjusts the dB in the 20 accumulated samples to units of 1.5 dB. - * For use in diff_gain_[abc] fields of struct il_calibration_cmd, the + * For use in diff_gain_[abc] fields of struct iwl_calibration_cmd, the * driver should limit the difference results to a range of 0-3 (0-4.5 dB), * and set bit 2 to indicate "reduce gain". The value for the reference * (weakest) chain should be "0". @@ -3283,24 +3306,24 @@ struct il_sensitivity_cmd { /* Phy calibration command for series */ /* The default calibrate table size if not specified by firmware */ -#define IL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18 +#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18 enum { - IL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7, - IL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 19, + IWL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7, + IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 19, }; -#define IL_MAX_PHY_CALIBRATE_TBL_SIZE (253) +#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE (253) -struct il_calib_hdr { +struct iwl_calib_hdr { u8 op_code; u8 first_group; u8 groups_num; u8 data_valid; } __packed; -/* IL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */ -struct il_calib_diff_gain_cmd { - struct il_calib_hdr hdr; +/* IWL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */ +struct iwl_calib_diff_gain_cmd { + struct iwl_calib_hdr hdr; s8 diff_gain_a; /* see above */ s8 diff_gain_b; s8 diff_gain_c; @@ -3315,12 +3338,12 @@ struct il_calib_diff_gain_cmd { /* * LEDs Command & Response - * C_LEDS = 0x48 (command, has simple generic response) + * REPLY_LEDS_CMD = 0x48 (command, has simple generic response) * * For each of 3 possible LEDs (Activity/Link/Tech, selected by "id" field), * this command turns it on or off, or sets up a periodic blinking cycle. */ -struct il_led_cmd { +struct iwl_led_cmd { __le32 interval; /* "interval" in uSec */ u8 id; /* 1: Activity, 2: Link, 3: Tech */ u8 off; /* # intervals off while blinking; @@ -3330,15 +3353,14 @@ struct il_led_cmd { u8 reserved; } __packed; + /****************************************************************************** * (13) * Union of all expected notifications/responses: * *****************************************************************************/ -#define IL_RX_FRAME_SIZE_MSK 0x00003fff - -struct il_rx_pkt { +struct iwl_rx_packet { /* * The first 4 bytes of the RX frame header contain both the RX frame * size and some flags. @@ -3350,27 +3372,27 @@ struct il_rx_pkt { * 13-00: RX frame size */ __le32 len_n_flags; - struct il_cmd_header hdr; + struct iwl_cmd_header hdr; union { - struct il3945_rx_frame rx_frame; - struct il3945_tx_resp tx_resp; - struct il3945_beacon_notif beacon_status; - - struct il_alive_resp alive_frame; - struct il_spectrum_notification spectrum_notif; - struct il_csa_notification csa_notif; - struct il_error_resp err_resp; - struct il_card_state_notif card_state_notif; - struct il_add_sta_resp add_sta; - struct il_rem_sta_resp rem_sta; - struct il_sleep_notification sleep_notif; - struct il_spectrum_resp spectrum; - struct il_notif_stats stats; - struct il_compressed_ba_resp compressed_ba; - struct il_missed_beacon_notif missed_beacon; + struct iwl3945_rx_frame rx_frame; + struct iwl3945_tx_resp tx_resp; + struct iwl3945_beacon_notif beacon_status; + + struct iwl_alive_resp alive_frame; + struct iwl_spectrum_notification spectrum_notif; + struct iwl_csa_notification csa_notif; + struct iwl_error_resp err_resp; + struct iwl_card_state_notif card_state_notif; + struct iwl_add_sta_resp add_sta; + struct iwl_rem_sta_resp rem_sta; + struct iwl_sleep_notification sleep_notif; + struct iwl_spectrum_resp spectrum; + struct iwl_notif_statistics stats; + struct iwl_compressed_ba_resp compressed_ba; + struct iwl_missed_beacon_notif missed_beacon; __le32 status; u8 raw[0]; } u; } __packed; -#endif /* __il_commands_h__ */ +#endif /* __iwl_legacy_commands_h__ */ diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-core.c b/trunk/drivers/net/wireless/iwlegacy/iwl-core.c new file mode 100644 index 000000000000..2bd5659310d7 --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-core.c @@ -0,0 +1,2661 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ + +#include +#include +#include +#include +#include +#include + +#include "iwl-eeprom.h" +#include "iwl-dev.h" +#include "iwl-debug.h" +#include "iwl-core.h" +#include "iwl-io.h" +#include "iwl-power.h" +#include "iwl-sta.h" +#include "iwl-helpers.h" + + +MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965"); +MODULE_VERSION(IWLWIFI_VERSION); +MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); +MODULE_LICENSE("GPL"); + +/* + * set bt_coex_active to true, uCode will do kill/defer + * every time the priority line is asserted (BT is sending signals on the + * priority line in the PCIx). + * set bt_coex_active to false, uCode will ignore the BT activity and + * perform the normal operation + * + * User might experience transmit issue on some platform due to WiFi/BT + * co-exist problem. The possible behaviors are: + * Able to scan and finding all the available AP + * Not able to associate with any AP + * On those platforms, WiFi communication can be restored by set + * "bt_coex_active" module parameter to "false" + * + * default: bt_coex_active = true (BT_COEX_ENABLE) + */ +static bool bt_coex_active = true; +module_param(bt_coex_active, bool, S_IRUGO); +MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist"); + +u32 iwlegacy_debug_level; +EXPORT_SYMBOL(iwlegacy_debug_level); + +const u8 iwlegacy_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; +EXPORT_SYMBOL(iwlegacy_bcast_addr); + + +/* This function both allocates and initializes hw and priv. */ +struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg) +{ + struct iwl_priv *priv; + /* mac80211 allocates memory for this device instance, including + * space for this driver's private structure */ + struct ieee80211_hw *hw; + + hw = ieee80211_alloc_hw(sizeof(struct iwl_priv), + cfg->ops->ieee80211_ops); + if (hw == NULL) { + pr_err("%s: Can not allocate network device\n", + cfg->name); + goto out; + } + + priv = hw->priv; + priv->hw = hw; + +out: + return hw; +} +EXPORT_SYMBOL(iwl_legacy_alloc_all); + +#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ +#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */ +static void iwl_legacy_init_ht_hw_capab(const struct iwl_priv *priv, + struct ieee80211_sta_ht_cap *ht_info, + enum ieee80211_band band) +{ + u16 max_bit_rate = 0; + u8 rx_chains_num = priv->hw_params.rx_chains_num; + u8 tx_chains_num = priv->hw_params.tx_chains_num; + + ht_info->cap = 0; + memset(&ht_info->mcs, 0, sizeof(ht_info->mcs)); + + ht_info->ht_supported = true; + + ht_info->cap |= IEEE80211_HT_CAP_SGI_20; + max_bit_rate = MAX_BIT_RATE_20_MHZ; + if (priv->hw_params.ht40_channel & BIT(band)) { + ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; + ht_info->cap |= IEEE80211_HT_CAP_SGI_40; + ht_info->mcs.rx_mask[4] = 0x01; + max_bit_rate = MAX_BIT_RATE_40_MHZ; + } + + if (priv->cfg->mod_params->amsdu_size_8K) + ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; + + ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF; + ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF; + + ht_info->mcs.rx_mask[0] = 0xFF; + if (rx_chains_num >= 2) + ht_info->mcs.rx_mask[1] = 0xFF; + if (rx_chains_num >= 3) + ht_info->mcs.rx_mask[2] = 0xFF; + + /* Highest supported Rx data rate */ + max_bit_rate *= rx_chains_num; + WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK); + ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate); + + /* Tx MCS capabilities */ + ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; + if (tx_chains_num != rx_chains_num) { + ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; + ht_info->mcs.tx_params |= ((tx_chains_num - 1) << + IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT); + } +} + +/** + * iwl_legacy_init_geos - Initialize mac80211's geo/channel info based from eeprom + */ +int iwl_legacy_init_geos(struct iwl_priv *priv) +{ + struct iwl_channel_info *ch; + struct ieee80211_supported_band *sband; + struct ieee80211_channel *channels; + struct ieee80211_channel *geo_ch; + struct ieee80211_rate *rates; + int i = 0; + s8 max_tx_power = 0; + + if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates || + priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) { + IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n"); + set_bit(STATUS_GEO_CONFIGURED, &priv->status); + return 0; + } + + channels = kzalloc(sizeof(struct ieee80211_channel) * + priv->channel_count, GFP_KERNEL); + if (!channels) + return -ENOMEM; + + rates = kzalloc((sizeof(struct ieee80211_rate) * IWL_RATE_COUNT_LEGACY), + GFP_KERNEL); + if (!rates) { + kfree(channels); + return -ENOMEM; + } + + /* 5.2GHz channels start after the 2.4GHz channels */ + sband = &priv->bands[IEEE80211_BAND_5GHZ]; + sband->channels = &channels[ARRAY_SIZE(iwlegacy_eeprom_band_1)]; + /* just OFDM */ + sband->bitrates = &rates[IWL_FIRST_OFDM_RATE]; + sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE; + + if (priv->cfg->sku & IWL_SKU_N) + iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap, + IEEE80211_BAND_5GHZ); + + sband = &priv->bands[IEEE80211_BAND_2GHZ]; + sband->channels = channels; + /* OFDM & CCK */ + sband->bitrates = rates; + sband->n_bitrates = IWL_RATE_COUNT_LEGACY; + + if (priv->cfg->sku & IWL_SKU_N) + iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap, + IEEE80211_BAND_2GHZ); + + priv->ieee_channels = channels; + priv->ieee_rates = rates; + + for (i = 0; i < priv->channel_count; i++) { + ch = &priv->channel_info[i]; + + if (!iwl_legacy_is_channel_valid(ch)) + continue; + + sband = &priv->bands[ch->band]; + + geo_ch = &sband->channels[sband->n_channels++]; + + geo_ch->center_freq = + ieee80211_channel_to_frequency(ch->channel, ch->band); + geo_ch->max_power = ch->max_power_avg; + geo_ch->max_antenna_gain = 0xff; + geo_ch->hw_value = ch->channel; + + if (iwl_legacy_is_channel_valid(ch)) { + if (!(ch->flags & EEPROM_CHANNEL_IBSS)) + geo_ch->flags |= IEEE80211_CHAN_NO_IBSS; + + if (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) + geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN; + + if (ch->flags & EEPROM_CHANNEL_RADAR) + geo_ch->flags |= IEEE80211_CHAN_RADAR; + + geo_ch->flags |= ch->ht40_extension_channel; + + if (ch->max_power_avg > max_tx_power) + max_tx_power = ch->max_power_avg; + } else { + geo_ch->flags |= IEEE80211_CHAN_DISABLED; + } + + IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n", + ch->channel, geo_ch->center_freq, + iwl_legacy_is_channel_a_band(ch) ? "5.2" : "2.4", + geo_ch->flags & IEEE80211_CHAN_DISABLED ? + "restricted" : "valid", + geo_ch->flags); + } + + priv->tx_power_device_lmt = max_tx_power; + priv->tx_power_user_lmt = max_tx_power; + priv->tx_power_next = max_tx_power; + + if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) && + priv->cfg->sku & IWL_SKU_A) { + IWL_INFO(priv, "Incorrectly detected BG card as ABG. " + "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n", + priv->pci_dev->device, + priv->pci_dev->subsystem_device); + priv->cfg->sku &= ~IWL_SKU_A; + } + + IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n", + priv->bands[IEEE80211_BAND_2GHZ].n_channels, + priv->bands[IEEE80211_BAND_5GHZ].n_channels); + + set_bit(STATUS_GEO_CONFIGURED, &priv->status); + + return 0; +} +EXPORT_SYMBOL(iwl_legacy_init_geos); + +/* + * iwl_legacy_free_geos - undo allocations in iwl_legacy_init_geos + */ +void iwl_legacy_free_geos(struct iwl_priv *priv) +{ + kfree(priv->ieee_channels); + kfree(priv->ieee_rates); + clear_bit(STATUS_GEO_CONFIGURED, &priv->status); +} +EXPORT_SYMBOL(iwl_legacy_free_geos); + +static bool iwl_legacy_is_channel_extension(struct iwl_priv *priv, + enum ieee80211_band band, + u16 channel, u8 extension_chan_offset) +{ + const struct iwl_channel_info *ch_info; + + ch_info = iwl_legacy_get_channel_info(priv, band, channel); + if (!iwl_legacy_is_channel_valid(ch_info)) + return false; + + if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE) + return !(ch_info->ht40_extension_channel & + IEEE80211_CHAN_NO_HT40PLUS); + else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW) + return !(ch_info->ht40_extension_channel & + IEEE80211_CHAN_NO_HT40MINUS); + + return false; +} + +bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_sta_ht_cap *ht_cap) +{ + if (!ctx->ht.enabled || !ctx->ht.is_40mhz) + return false; + + /* + * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40 + * the bit will not set if it is pure 40MHz case + */ + if (ht_cap && !ht_cap->ht_supported) + return false; + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS + if (priv->disable_ht40) + return false; +#endif + + return iwl_legacy_is_channel_extension(priv, priv->band, + le16_to_cpu(ctx->staging.channel), + ctx->ht.extension_chan_offset); +} +EXPORT_SYMBOL(iwl_legacy_is_ht40_tx_allowed); + +static u16 iwl_legacy_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val) +{ + u16 new_val; + u16 beacon_factor; + + /* + * If mac80211 hasn't given us a beacon interval, program + * the default into the device. + */ + if (!beacon_val) + return DEFAULT_BEACON_INTERVAL; + + /* + * If the beacon interval we obtained from the peer + * is too large, we'll have to wake up more often + * (and in IBSS case, we'll beacon too much) + * + * For example, if max_beacon_val is 4096, and the + * requested beacon interval is 7000, we'll have to + * use 3500 to be able to wake up on the beacons. + * + * This could badly influence beacon detection stats. + */ + + beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val; + new_val = beacon_val / beacon_factor; + + if (!new_val) + new_val = max_beacon_val; + + return new_val; +} + +int +iwl_legacy_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx) +{ + u64 tsf; + s32 interval_tm, rem; + struct ieee80211_conf *conf = NULL; + u16 beacon_int; + struct ieee80211_vif *vif = ctx->vif; + + conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw); + + lockdep_assert_held(&priv->mutex); + + memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd)); + + ctx->timing.timestamp = cpu_to_le64(priv->timestamp); + ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval); + + beacon_int = vif ? vif->bss_conf.beacon_int : 0; + + /* + * TODO: For IBSS we need to get atim_window from mac80211, + * for now just always use 0 + */ + ctx->timing.atim_window = 0; + + beacon_int = iwl_legacy_adjust_beacon_interval(beacon_int, + priv->hw_params.max_beacon_itrvl * TIME_UNIT); + ctx->timing.beacon_interval = cpu_to_le16(beacon_int); + + tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */ + interval_tm = beacon_int * TIME_UNIT; + rem = do_div(tsf, interval_tm); + ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem); + + ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1; + + IWL_DEBUG_ASSOC(priv, + "beacon interval %d beacon timer %d beacon tim %d\n", + le16_to_cpu(ctx->timing.beacon_interval), + le32_to_cpu(ctx->timing.beacon_init_val), + le16_to_cpu(ctx->timing.atim_window)); + + return iwl_legacy_send_cmd_pdu(priv, ctx->rxon_timing_cmd, + sizeof(ctx->timing), &ctx->timing); +} +EXPORT_SYMBOL(iwl_legacy_send_rxon_timing); + +void +iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + int hw_decrypt) +{ + struct iwl_legacy_rxon_cmd *rxon = &ctx->staging; + + if (hw_decrypt) + rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK; + else + rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK; + +} +EXPORT_SYMBOL(iwl_legacy_set_rxon_hwcrypto); + +/* validate RXON structure is valid */ +int +iwl_legacy_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx) +{ + struct iwl_legacy_rxon_cmd *rxon = &ctx->staging; + bool error = false; + + if (rxon->flags & RXON_FLG_BAND_24G_MSK) { + if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) { + IWL_WARN(priv, "check 2.4G: wrong narrow\n"); + error = true; + } + if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) { + IWL_WARN(priv, "check 2.4G: wrong radar\n"); + error = true; + } + } else { + if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) { + IWL_WARN(priv, "check 5.2G: not short slot!\n"); + error = true; + } + if (rxon->flags & RXON_FLG_CCK_MSK) { + IWL_WARN(priv, "check 5.2G: CCK!\n"); + error = true; + } + } + if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) { + IWL_WARN(priv, "mac/bssid mcast!\n"); + error = true; + } + + /* make sure basic rates 6Mbps and 1Mbps are supported */ + if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 && + (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) { + IWL_WARN(priv, "neither 1 nor 6 are basic\n"); + error = true; + } + + if (le16_to_cpu(rxon->assoc_id) > 2007) { + IWL_WARN(priv, "aid > 2007\n"); + error = true; + } + + if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) + == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) { + IWL_WARN(priv, "CCK and short slot\n"); + error = true; + } + + if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) + == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) { + IWL_WARN(priv, "CCK and auto detect"); + error = true; + } + + if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK | + RXON_FLG_TGG_PROTECT_MSK)) == + RXON_FLG_TGG_PROTECT_MSK) { + IWL_WARN(priv, "TGg but no auto-detect\n"); + error = true; + } + + if (error) + IWL_WARN(priv, "Tuning to channel %d\n", + le16_to_cpu(rxon->channel)); + + if (error) { + IWL_ERR(priv, "Invalid RXON\n"); + return -EINVAL; + } + return 0; +} +EXPORT_SYMBOL(iwl_legacy_check_rxon_cmd); + +/** + * iwl_legacy_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed + * @priv: staging_rxon is compared to active_rxon + * + * If the RXON structure is changing enough to require a new tune, + * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that + * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required. + */ +int iwl_legacy_full_rxon_required(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + const struct iwl_legacy_rxon_cmd *staging = &ctx->staging; + const struct iwl_legacy_rxon_cmd *active = &ctx->active; + +#define CHK(cond) \ + if ((cond)) { \ + IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \ + return 1; \ + } + +#define CHK_NEQ(c1, c2) \ + if ((c1) != (c2)) { \ + IWL_DEBUG_INFO(priv, "need full RXON - " \ + #c1 " != " #c2 " - %d != %d\n", \ + (c1), (c2)); \ + return 1; \ + } + + /* These items are only settable from the full RXON command */ + CHK(!iwl_legacy_is_associated_ctx(ctx)); + CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr)); + CHK(compare_ether_addr(staging->node_addr, active->node_addr)); + CHK(compare_ether_addr(staging->wlap_bssid_addr, + active->wlap_bssid_addr)); + CHK_NEQ(staging->dev_type, active->dev_type); + CHK_NEQ(staging->channel, active->channel); + CHK_NEQ(staging->air_propagation, active->air_propagation); + CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates, + active->ofdm_ht_single_stream_basic_rates); + CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates, + active->ofdm_ht_dual_stream_basic_rates); + CHK_NEQ(staging->assoc_id, active->assoc_id); + + /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can + * be updated with the RXON_ASSOC command -- however only some + * flag transitions are allowed using RXON_ASSOC */ + + /* Check if we are not switching bands */ + CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK, + active->flags & RXON_FLG_BAND_24G_MSK); + + /* Check if we are switching association toggle */ + CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK, + active->filter_flags & RXON_FILTER_ASSOC_MSK); + +#undef CHK +#undef CHK_NEQ + + return 0; +} +EXPORT_SYMBOL(iwl_legacy_full_rxon_required); + +u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + /* + * Assign the lowest rate -- should really get this from + * the beacon skb from mac80211. + */ + if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) + return IWL_RATE_1M_PLCP; + else + return IWL_RATE_6M_PLCP; +} +EXPORT_SYMBOL(iwl_legacy_get_lowest_plcp); + +static void _iwl_legacy_set_rxon_ht(struct iwl_priv *priv, + struct iwl_ht_config *ht_conf, + struct iwl_rxon_context *ctx) +{ + struct iwl_legacy_rxon_cmd *rxon = &ctx->staging; + + if (!ctx->ht.enabled) { + rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK | + RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | + RXON_FLG_HT40_PROT_MSK | + RXON_FLG_HT_PROT_MSK); + return; + } + + rxon->flags |= cpu_to_le32(ctx->ht.protection << + RXON_FLG_HT_OPERATING_MODE_POS); + + /* Set up channel bandwidth: + * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */ + /* clear the HT channel mode before set the mode */ + rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK | + RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); + if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, NULL)) { + /* pure ht40 */ + if (ctx->ht.protection == + IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) { + rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40; + /* Note: control channel is opposite of extension channel */ + switch (ctx->ht.extension_chan_offset) { + case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: + rxon->flags &= + ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; + break; + case IEEE80211_HT_PARAM_CHA_SEC_BELOW: + rxon->flags |= + RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; + break; + } + } else { + /* Note: control channel is opposite of extension channel */ + switch (ctx->ht.extension_chan_offset) { + case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: + rxon->flags &= + ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); + rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; + break; + case IEEE80211_HT_PARAM_CHA_SEC_BELOW: + rxon->flags |= + RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; + rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; + break; + case IEEE80211_HT_PARAM_CHA_SEC_NONE: + default: + /* channel location only valid if in Mixed mode */ + IWL_ERR(priv, + "invalid extension channel offset\n"); + break; + } + } + } else { + rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY; + } + + if (priv->cfg->ops->hcmd->set_rxon_chain) + priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); + + IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X " + "extension channel offset 0x%x\n", + le32_to_cpu(rxon->flags), ctx->ht.protection, + ctx->ht.extension_chan_offset); +} + +void iwl_legacy_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf) +{ + struct iwl_rxon_context *ctx; + + for_each_context(priv, ctx) + _iwl_legacy_set_rxon_ht(priv, ht_conf, ctx); +} +EXPORT_SYMBOL(iwl_legacy_set_rxon_ht); + +/* Return valid, unused, channel for a passive scan to reset the RF */ +u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv, + enum ieee80211_band band) +{ + const struct iwl_channel_info *ch_info; + int i; + u8 channel = 0; + u8 min, max; + struct iwl_rxon_context *ctx; + + if (band == IEEE80211_BAND_5GHZ) { + min = 14; + max = priv->channel_count; + } else { + min = 0; + max = 14; + } + + for (i = min; i < max; i++) { + bool busy = false; + + for_each_context(priv, ctx) { + busy = priv->channel_info[i].channel == + le16_to_cpu(ctx->staging.channel); + if (busy) + break; + } + + if (busy) + continue; + + channel = priv->channel_info[i].channel; + ch_info = iwl_legacy_get_channel_info(priv, band, channel); + if (iwl_legacy_is_channel_valid(ch_info)) + break; + } + + return channel; +} +EXPORT_SYMBOL(iwl_legacy_get_single_channel_number); + +/** + * iwl_legacy_set_rxon_channel - Set the band and channel values in staging RXON + * @ch: requested channel as a pointer to struct ieee80211_channel + + * NOTE: Does not commit to the hardware; it sets appropriate bit fields + * in the staging RXON flag structure based on the ch->band + */ +int +iwl_legacy_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch, + struct iwl_rxon_context *ctx) +{ + enum ieee80211_band band = ch->band; + u16 channel = ch->hw_value; + + if ((le16_to_cpu(ctx->staging.channel) == channel) && + (priv->band == band)) + return 0; + + ctx->staging.channel = cpu_to_le16(channel); + if (band == IEEE80211_BAND_5GHZ) + ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK; + else + ctx->staging.flags |= RXON_FLG_BAND_24G_MSK; + + priv->band = band; + + IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band); + + return 0; +} +EXPORT_SYMBOL(iwl_legacy_set_rxon_channel); + +void iwl_legacy_set_flags_for_band(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + enum ieee80211_band band, + struct ieee80211_vif *vif) +{ + if (band == IEEE80211_BAND_5GHZ) { + ctx->staging.flags &= + ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK + | RXON_FLG_CCK_MSK); + ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; + } else { + /* Copied from iwl_post_associate() */ + if (vif && vif->bss_conf.use_short_slot) + ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; + else + ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; + + ctx->staging.flags |= RXON_FLG_BAND_24G_MSK; + ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK; + ctx->staging.flags &= ~RXON_FLG_CCK_MSK; + } +} +EXPORT_SYMBOL(iwl_legacy_set_flags_for_band); + +/* + * initialize rxon structure with default values from eeprom + */ +void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + const struct iwl_channel_info *ch_info; + + memset(&ctx->staging, 0, sizeof(ctx->staging)); + + if (!ctx->vif) { + ctx->staging.dev_type = ctx->unused_devtype; + } else + switch (ctx->vif->type) { + + case NL80211_IFTYPE_STATION: + ctx->staging.dev_type = ctx->station_devtype; + ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK; + break; + + case NL80211_IFTYPE_ADHOC: + ctx->staging.dev_type = ctx->ibss_devtype; + ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK; + ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK | + RXON_FILTER_ACCEPT_GRP_MSK; + break; + + default: + IWL_ERR(priv, "Unsupported interface type %d\n", + ctx->vif->type); + break; + } + +#if 0 + /* TODO: Figure out when short_preamble would be set and cache from + * that */ + if (!hw_to_local(priv->hw)->short_preamble) + ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; + else + ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; +#endif + + ch_info = iwl_legacy_get_channel_info(priv, priv->band, + le16_to_cpu(ctx->active.channel)); + + if (!ch_info) + ch_info = &priv->channel_info[0]; + + ctx->staging.channel = cpu_to_le16(ch_info->channel); + priv->band = ch_info->band; + + iwl_legacy_set_flags_for_band(priv, ctx, priv->band, ctx->vif); + + ctx->staging.ofdm_basic_rates = + (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; + ctx->staging.cck_basic_rates = + (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF; + + /* clear both MIX and PURE40 mode flag */ + ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED | + RXON_FLG_CHANNEL_MODE_PURE_40); + if (ctx->vif) + memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN); + + ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff; + ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff; +} +EXPORT_SYMBOL(iwl_legacy_connection_init_rx_config); + +void iwl_legacy_set_rate(struct iwl_priv *priv) +{ + const struct ieee80211_supported_band *hw = NULL; + struct ieee80211_rate *rate; + struct iwl_rxon_context *ctx; + int i; + + hw = iwl_get_hw_mode(priv, priv->band); + if (!hw) { + IWL_ERR(priv, "Failed to set rate: unable to get hw mode\n"); + return; + } + + priv->active_rate = 0; + + for (i = 0; i < hw->n_bitrates; i++) { + rate = &(hw->bitrates[i]); + if (rate->hw_value < IWL_RATE_COUNT_LEGACY) + priv->active_rate |= (1 << rate->hw_value); + } + + IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate); + + for_each_context(priv, ctx) { + ctx->staging.cck_basic_rates = + (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF; + + ctx->staging.ofdm_basic_rates = + (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; + } +} +EXPORT_SYMBOL(iwl_legacy_set_rate); + +void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success) +{ + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status)) + ieee80211_chswitch_done(ctx->vif, is_success); +} +EXPORT_SYMBOL(iwl_legacy_chswitch_done); + +void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_csa_notification *csa = &(pkt->u.csa_notif); + + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + struct iwl_legacy_rxon_cmd *rxon = (void *)&ctx->active; + + if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status)) + return; + + if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) { + rxon->channel = csa->channel; + ctx->staging.channel = csa->channel; + IWL_DEBUG_11H(priv, "CSA notif: channel %d\n", + le16_to_cpu(csa->channel)); + iwl_legacy_chswitch_done(priv, true); + } else { + IWL_ERR(priv, "CSA notif (fail) : channel %d\n", + le16_to_cpu(csa->channel)); + iwl_legacy_chswitch_done(priv, false); + } +} +EXPORT_SYMBOL(iwl_legacy_rx_csa); + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + struct iwl_legacy_rxon_cmd *rxon = &ctx->staging; + + IWL_DEBUG_RADIO(priv, "RX CONFIG:\n"); + iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon)); + IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n", + le16_to_cpu(rxon->channel)); + IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags)); + IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n", + le32_to_cpu(rxon->filter_flags)); + IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type); + IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n", + rxon->ofdm_basic_rates); + IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n", + rxon->cck_basic_rates); + IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr); + IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr); + IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", + le16_to_cpu(rxon->assoc_id)); +} +EXPORT_SYMBOL(iwl_legacy_print_rx_config_cmd); +#endif +/** + * iwl_legacy_irq_handle_error - called for HW or SW error interrupt from card + */ +void iwl_legacy_irq_handle_error(struct iwl_priv *priv) +{ + /* Set the FW error flag -- cleared on iwl_down */ + set_bit(STATUS_FW_ERROR, &priv->status); + + /* Cancel currently queued command. */ + clear_bit(STATUS_HCMD_ACTIVE, &priv->status); + + IWL_ERR(priv, "Loaded firmware version: %s\n", + priv->hw->wiphy->fw_version); + + priv->cfg->ops->lib->dump_nic_error_log(priv); + if (priv->cfg->ops->lib->dump_fh) + priv->cfg->ops->lib->dump_fh(priv, NULL, false); +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + if (iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) + iwl_legacy_print_rx_config_cmd(priv, + &priv->contexts[IWL_RXON_CTX_BSS]); +#endif + + wake_up(&priv->wait_command_queue); + + /* Keep the restart process from trying to send host + * commands by clearing the INIT status bit */ + clear_bit(STATUS_READY, &priv->status); + + if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) { + IWL_DEBUG(priv, IWL_DL_FW_ERRORS, + "Restarting adapter due to uCode error.\n"); + + if (priv->cfg->mod_params->restart_fw) + queue_work(priv->workqueue, &priv->restart); + } +} +EXPORT_SYMBOL(iwl_legacy_irq_handle_error); + +static int iwl_legacy_apm_stop_master(struct iwl_priv *priv) +{ + int ret = 0; + + /* stop device's busmaster DMA activity */ + iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); + + ret = iwl_poll_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED, + CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); + if (ret) + IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n"); + + IWL_DEBUG_INFO(priv, "stop master\n"); + + return ret; +} + +void iwl_legacy_apm_stop(struct iwl_priv *priv) +{ + IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n"); + + /* Stop device's DMA activity */ + iwl_legacy_apm_stop_master(priv); + + /* Reset the entire device */ + iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); + + udelay(10); + + /* + * Clear "initialization complete" bit to move adapter from + * D0A* (powered-up Active) --> D0U* (Uninitialized) state. + */ + iwl_legacy_clear_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_INIT_DONE); +} +EXPORT_SYMBOL(iwl_legacy_apm_stop); + + +/* + * Start up NIC's basic functionality after it has been reset + * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop()) + * NOTE: This does not load uCode nor start the embedded processor + */ +int iwl_legacy_apm_init(struct iwl_priv *priv) +{ + int ret = 0; + u16 lctl; + + IWL_DEBUG_INFO(priv, "Init card's basic functions\n"); + + /* + * Use "set_bit" below rather than "write", to preserve any hardware + * bits already set by default after reset. + */ + + /* Disable L0S exit timer (platform NMI Work/Around) */ + iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS, + CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); + + /* + * Disable L0s without affecting L1; + * don't wait for ICH L0s (ICH bug W/A) + */ + iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS, + CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); + + /* Set FH wait threshold to maximum (HW error during stress W/A) */ + iwl_legacy_set_bit(priv, CSR_DBG_HPET_MEM_REG, + CSR_DBG_HPET_MEM_REG_VAL); + + /* + * Enable HAP INTA (interrupt from management bus) to + * wake device's PCI Express link L1a -> L0s + * NOTE: This is no-op for 3945 (non-existent bit) + */ + iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); + + /* + * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition. + * Check if BIOS (or OS) enabled L1-ASPM on this device. + * If so (likely), disable L0S, so device moves directly L0->L1; + * costs negligible amount of power savings. + * If not (unlikely), enable L0S, so there is at least some + * power savings, even without L1. + */ + if (priv->cfg->base_params->set_l0s) { + lctl = iwl_legacy_pcie_link_ctl(priv); + if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) == + PCI_CFG_LINK_CTRL_VAL_L1_EN) { + /* L1-ASPM enabled; disable(!) L0S */ + iwl_legacy_set_bit(priv, CSR_GIO_REG, + CSR_GIO_REG_VAL_L0S_ENABLED); + IWL_DEBUG_POWER(priv, "L1 Enabled; Disabling L0S\n"); + } else { + /* L1-ASPM disabled; enable(!) L0S */ + iwl_legacy_clear_bit(priv, CSR_GIO_REG, + CSR_GIO_REG_VAL_L0S_ENABLED); + IWL_DEBUG_POWER(priv, "L1 Disabled; Enabling L0S\n"); + } + } + + /* Configure analog phase-lock-loop before activating to D0A */ + if (priv->cfg->base_params->pll_cfg_val) + iwl_legacy_set_bit(priv, CSR_ANA_PLL_CFG, + priv->cfg->base_params->pll_cfg_val); + + /* + * Set "initialization complete" bit to move adapter from + * D0U* --> D0A* (powered-up active) state. + */ + iwl_legacy_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + + /* + * Wait for clock stabilization; once stabilized, access to + * device-internal resources is supported, e.g. iwl_legacy_write_prph() + * and accesses to uCode SRAM. + */ + ret = iwl_poll_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); + if (ret < 0) { + IWL_DEBUG_INFO(priv, "Failed to init the card\n"); + goto out; + } + + /* + * Enable DMA and BSM (if used) clocks, wait for them to stabilize. + * BSM (Boostrap State Machine) is only in 3945 and 4965. + * + * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits + * do not disable clocks. This preserves any hardware bits already + * set by default in "CLK_CTRL_REG" after reset. + */ + if (priv->cfg->base_params->use_bsm) + iwl_legacy_write_prph(priv, APMG_CLK_EN_REG, + APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT); + else + iwl_legacy_write_prph(priv, APMG_CLK_EN_REG, + APMG_CLK_VAL_DMA_CLK_RQT); + udelay(20); + + /* Disable L1-Active */ + iwl_legacy_set_bits_prph(priv, APMG_PCIDEV_STT_REG, + APMG_PCIDEV_STT_VAL_L1_ACT_DIS); + +out: + return ret; +} +EXPORT_SYMBOL(iwl_legacy_apm_init); + + +int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force) +{ + int ret; + s8 prev_tx_power; + bool defer; + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + + lockdep_assert_held(&priv->mutex); + + if (priv->tx_power_user_lmt == tx_power && !force) + return 0; + + if (!priv->cfg->ops->lib->send_tx_power) + return -EOPNOTSUPP; + + /* 0 dBm mean 1 milliwatt */ + if (tx_power < 0) { + IWL_WARN(priv, + "Requested user TXPOWER %d below 1 mW.\n", + tx_power); + return -EINVAL; + } + + if (tx_power > priv->tx_power_device_lmt) { + IWL_WARN(priv, + "Requested user TXPOWER %d above upper limit %d.\n", + tx_power, priv->tx_power_device_lmt); + return -EINVAL; + } + + if (!iwl_legacy_is_ready_rf(priv)) + return -EIO; + + /* scan complete and commit_rxon use tx_power_next value, + * it always need to be updated for newest request */ + priv->tx_power_next = tx_power; + + /* do not set tx power when scanning or channel changing */ + defer = test_bit(STATUS_SCANNING, &priv->status) || + memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)); + if (defer && !force) { + IWL_DEBUG_INFO(priv, "Deferring tx power set\n"); + return 0; + } + + prev_tx_power = priv->tx_power_user_lmt; + priv->tx_power_user_lmt = tx_power; + + ret = priv->cfg->ops->lib->send_tx_power(priv); + + /* if fail to set tx_power, restore the orig. tx power */ + if (ret) { + priv->tx_power_user_lmt = prev_tx_power; + priv->tx_power_next = prev_tx_power; + } + return ret; +} +EXPORT_SYMBOL(iwl_legacy_set_tx_power); + +void iwl_legacy_send_bt_config(struct iwl_priv *priv) +{ + struct iwl_bt_cmd bt_cmd = { + .lead_time = BT_LEAD_TIME_DEF, + .max_kill = BT_MAX_KILL_DEF, + .kill_ack_mask = 0, + .kill_cts_mask = 0, + }; + + if (!bt_coex_active) + bt_cmd.flags = BT_COEX_DISABLE; + else + bt_cmd.flags = BT_COEX_ENABLE; + + IWL_DEBUG_INFO(priv, "BT coex %s\n", + (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active"); + + if (iwl_legacy_send_cmd_pdu(priv, REPLY_BT_CONFIG, + sizeof(struct iwl_bt_cmd), &bt_cmd)) + IWL_ERR(priv, "failed to send BT Coex Config\n"); +} +EXPORT_SYMBOL(iwl_legacy_send_bt_config); + +int iwl_legacy_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear) +{ + struct iwl_statistics_cmd statistics_cmd = { + .configuration_flags = + clear ? IWL_STATS_CONF_CLEAR_STATS : 0, + }; + + if (flags & CMD_ASYNC) + return iwl_legacy_send_cmd_pdu_async(priv, REPLY_STATISTICS_CMD, + sizeof(struct iwl_statistics_cmd), + &statistics_cmd, NULL); + else + return iwl_legacy_send_cmd_pdu(priv, REPLY_STATISTICS_CMD, + sizeof(struct iwl_statistics_cmd), + &statistics_cmd); +} +EXPORT_SYMBOL(iwl_legacy_send_statistics_request); + +void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif); + IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n", + sleep->pm_sleep_mode, sleep->pm_wakeup_src); +#endif +} +EXPORT_SYMBOL(iwl_legacy_rx_pm_sleep_notif); + +void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; + IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled " + "notification for %s:\n", len, + iwl_legacy_get_cmd_string(pkt->hdr.cmd)); + iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len); +} +EXPORT_SYMBOL(iwl_legacy_rx_pm_debug_statistics_notif); + +void iwl_legacy_rx_reply_error(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + + IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) " + "seq 0x%04X ser 0x%08X\n", + le32_to_cpu(pkt->u.err_resp.error_type), + iwl_legacy_get_cmd_string(pkt->u.err_resp.cmd_id), + pkt->u.err_resp.cmd_id, + le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num), + le32_to_cpu(pkt->u.err_resp.error_info)); +} +EXPORT_SYMBOL(iwl_legacy_rx_reply_error); + +void iwl_legacy_clear_isr_stats(struct iwl_priv *priv) +{ + memset(&priv->isr_stats, 0, sizeof(priv->isr_stats)); +} + +int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, u16 queue, + const struct ieee80211_tx_queue_params *params) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_rxon_context *ctx; + unsigned long flags; + int q; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (!iwl_legacy_is_ready_rf(priv)) { + IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n"); + return -EIO; + } + + if (queue >= AC_NUM) { + IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue); + return 0; + } + + q = AC_NUM - 1 - queue; + + spin_lock_irqsave(&priv->lock, flags); + + for_each_context(priv, ctx) { + ctx->qos_data.def_qos_parm.ac[q].cw_min = + cpu_to_le16(params->cw_min); + ctx->qos_data.def_qos_parm.ac[q].cw_max = + cpu_to_le16(params->cw_max); + ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs; + ctx->qos_data.def_qos_parm.ac[q].edca_txop = + cpu_to_le16((params->txop * 32)); + + ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0; + } + + spin_unlock_irqrestore(&priv->lock, flags); + + IWL_DEBUG_MAC80211(priv, "leave\n"); + return 0; +} +EXPORT_SYMBOL(iwl_legacy_mac_conf_tx); + +int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + + return priv->ibss_manager == IWL_IBSS_MANAGER; +} +EXPORT_SYMBOL_GPL(iwl_legacy_mac_tx_last_beacon); + +static int +iwl_legacy_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx) +{ + iwl_legacy_connection_init_rx_config(priv, ctx); + + if (priv->cfg->ops->hcmd->set_rxon_chain) + priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); + + return iwl_legacy_commit_rxon(priv, ctx); +} + +static int iwl_legacy_setup_interface(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + struct ieee80211_vif *vif = ctx->vif; + int err; + + lockdep_assert_held(&priv->mutex); + + /* + * This variable will be correct only when there's just + * a single context, but all code using it is for hardware + * that supports only one context. + */ + priv->iw_mode = vif->type; + + ctx->is_active = true; + + err = iwl_legacy_set_mode(priv, ctx); + if (err) { + if (!ctx->always_active) + ctx->is_active = false; + return err; + } + + return 0; +} + +int +iwl_legacy_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + struct iwl_rxon_context *tmp, *ctx = NULL; + int err; + + IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n", + vif->type, vif->addr); + + mutex_lock(&priv->mutex); + + if (!iwl_legacy_is_ready_rf(priv)) { + IWL_WARN(priv, "Try to add interface when device not ready\n"); + err = -EINVAL; + goto out; + } + + for_each_context(priv, tmp) { + u32 possible_modes = + tmp->interface_modes | tmp->exclusive_interface_modes; + + if (tmp->vif) { + /* check if this busy context is exclusive */ + if (tmp->exclusive_interface_modes & + BIT(tmp->vif->type)) { + err = -EINVAL; + goto out; + } + continue; + } + + if (!(possible_modes & BIT(vif->type))) + continue; + + /* have maybe usable context w/o interface */ + ctx = tmp; + break; + } + + if (!ctx) { + err = -EOPNOTSUPP; + goto out; + } + + vif_priv->ctx = ctx; + ctx->vif = vif; + + err = iwl_legacy_setup_interface(priv, ctx); + if (!err) + goto out; + + ctx->vif = NULL; + priv->iw_mode = NL80211_IFTYPE_STATION; + out: + mutex_unlock(&priv->mutex); + + IWL_DEBUG_MAC80211(priv, "leave\n"); + return err; +} +EXPORT_SYMBOL(iwl_legacy_mac_add_interface); + +static void iwl_legacy_teardown_interface(struct iwl_priv *priv, + struct ieee80211_vif *vif, + bool mode_change) +{ + struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif); + + lockdep_assert_held(&priv->mutex); + + if (priv->scan_vif == vif) { + iwl_legacy_scan_cancel_timeout(priv, 200); + iwl_legacy_force_scan_end(priv); + } + + if (!mode_change) { + iwl_legacy_set_mode(priv, ctx); + if (!ctx->always_active) + ctx->is_active = false; + } +} + +void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif); + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + mutex_lock(&priv->mutex); + + WARN_ON(ctx->vif != vif); + ctx->vif = NULL; + + iwl_legacy_teardown_interface(priv, vif, false); + + memset(priv->bssid, 0, ETH_ALEN); + mutex_unlock(&priv->mutex); + + IWL_DEBUG_MAC80211(priv, "leave\n"); + +} +EXPORT_SYMBOL(iwl_legacy_mac_remove_interface); + +int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv) +{ + if (!priv->txq) + priv->txq = kzalloc( + sizeof(struct iwl_tx_queue) * + priv->cfg->base_params->num_of_queues, + GFP_KERNEL); + if (!priv->txq) { + IWL_ERR(priv, "Not enough memory for txq\n"); + return -ENOMEM; + } + return 0; +} +EXPORT_SYMBOL(iwl_legacy_alloc_txq_mem); + +void iwl_legacy_txq_mem(struct iwl_priv *priv) +{ + kfree(priv->txq); + priv->txq = NULL; +} +EXPORT_SYMBOL(iwl_legacy_txq_mem); + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS + +#define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES) + +void iwl_legacy_reset_traffic_log(struct iwl_priv *priv) +{ + priv->tx_traffic_idx = 0; + priv->rx_traffic_idx = 0; + if (priv->tx_traffic) + memset(priv->tx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE); + if (priv->rx_traffic) + memset(priv->rx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE); +} + +int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv) +{ + u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE; + + if (iwlegacy_debug_level & IWL_DL_TX) { + if (!priv->tx_traffic) { + priv->tx_traffic = + kzalloc(traffic_size, GFP_KERNEL); + if (!priv->tx_traffic) + return -ENOMEM; + } + } + if (iwlegacy_debug_level & IWL_DL_RX) { + if (!priv->rx_traffic) { + priv->rx_traffic = + kzalloc(traffic_size, GFP_KERNEL); + if (!priv->rx_traffic) + return -ENOMEM; + } + } + iwl_legacy_reset_traffic_log(priv); + return 0; +} +EXPORT_SYMBOL(iwl_legacy_alloc_traffic_mem); + +void iwl_legacy_free_traffic_mem(struct iwl_priv *priv) +{ + kfree(priv->tx_traffic); + priv->tx_traffic = NULL; + + kfree(priv->rx_traffic); + priv->rx_traffic = NULL; +} +EXPORT_SYMBOL(iwl_legacy_free_traffic_mem); + +void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv, + u16 length, struct ieee80211_hdr *header) +{ + __le16 fc; + u16 len; + + if (likely(!(iwlegacy_debug_level & IWL_DL_TX))) + return; + + if (!priv->tx_traffic) + return; + + fc = header->frame_control; + if (ieee80211_is_data(fc)) { + len = (length > IWL_TRAFFIC_ENTRY_SIZE) + ? IWL_TRAFFIC_ENTRY_SIZE : length; + memcpy((priv->tx_traffic + + (priv->tx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)), + header, len); + priv->tx_traffic_idx = + (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES; + } +} +EXPORT_SYMBOL(iwl_legacy_dbg_log_tx_data_frame); + +void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv, + u16 length, struct ieee80211_hdr *header) +{ + __le16 fc; + u16 len; + + if (likely(!(iwlegacy_debug_level & IWL_DL_RX))) + return; + + if (!priv->rx_traffic) + return; + + fc = header->frame_control; + if (ieee80211_is_data(fc)) { + len = (length > IWL_TRAFFIC_ENTRY_SIZE) + ? IWL_TRAFFIC_ENTRY_SIZE : length; + memcpy((priv->rx_traffic + + (priv->rx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)), + header, len); + priv->rx_traffic_idx = + (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES; + } +} +EXPORT_SYMBOL(iwl_legacy_dbg_log_rx_data_frame); + +const char *iwl_legacy_get_mgmt_string(int cmd) +{ + switch (cmd) { + IWL_CMD(MANAGEMENT_ASSOC_REQ); + IWL_CMD(MANAGEMENT_ASSOC_RESP); + IWL_CMD(MANAGEMENT_REASSOC_REQ); + IWL_CMD(MANAGEMENT_REASSOC_RESP); + IWL_CMD(MANAGEMENT_PROBE_REQ); + IWL_CMD(MANAGEMENT_PROBE_RESP); + IWL_CMD(MANAGEMENT_BEACON); + IWL_CMD(MANAGEMENT_ATIM); + IWL_CMD(MANAGEMENT_DISASSOC); + IWL_CMD(MANAGEMENT_AUTH); + IWL_CMD(MANAGEMENT_DEAUTH); + IWL_CMD(MANAGEMENT_ACTION); + default: + return "UNKNOWN"; + + } +} + +const char *iwl_legacy_get_ctrl_string(int cmd) +{ + switch (cmd) { + IWL_CMD(CONTROL_BACK_REQ); + IWL_CMD(CONTROL_BACK); + IWL_CMD(CONTROL_PSPOLL); + IWL_CMD(CONTROL_RTS); + IWL_CMD(CONTROL_CTS); + IWL_CMD(CONTROL_ACK); + IWL_CMD(CONTROL_CFEND); + IWL_CMD(CONTROL_CFENDACK); + default: + return "UNKNOWN"; + + } +} + +void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv) +{ + memset(&priv->tx_stats, 0, sizeof(struct traffic_stats)); + memset(&priv->rx_stats, 0, sizeof(struct traffic_stats)); +} + +/* + * if CONFIG_IWLWIFI_LEGACY_DEBUGFS defined, + * iwl_legacy_update_stats function will + * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass + * Use debugFs to display the rx/rx_statistics + * if CONFIG_IWLWIFI_LEGACY_DEBUGFS not being defined, then no MGMT and CTRL + * information will be recorded, but DATA pkt still will be recorded + * for the reason of iwl_led.c need to control the led blinking based on + * number of tx and rx data. + * + */ +void +iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len) +{ + struct traffic_stats *stats; + + if (is_tx) + stats = &priv->tx_stats; + else + stats = &priv->rx_stats; + + if (ieee80211_is_mgmt(fc)) { + switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { + case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ): + stats->mgmt[MANAGEMENT_ASSOC_REQ]++; + break; + case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP): + stats->mgmt[MANAGEMENT_ASSOC_RESP]++; + break; + case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ): + stats->mgmt[MANAGEMENT_REASSOC_REQ]++; + break; + case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP): + stats->mgmt[MANAGEMENT_REASSOC_RESP]++; + break; + case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ): + stats->mgmt[MANAGEMENT_PROBE_REQ]++; + break; + case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP): + stats->mgmt[MANAGEMENT_PROBE_RESP]++; + break; + case cpu_to_le16(IEEE80211_STYPE_BEACON): + stats->mgmt[MANAGEMENT_BEACON]++; + break; + case cpu_to_le16(IEEE80211_STYPE_ATIM): + stats->mgmt[MANAGEMENT_ATIM]++; + break; + case cpu_to_le16(IEEE80211_STYPE_DISASSOC): + stats->mgmt[MANAGEMENT_DISASSOC]++; + break; + case cpu_to_le16(IEEE80211_STYPE_AUTH): + stats->mgmt[MANAGEMENT_AUTH]++; + break; + case cpu_to_le16(IEEE80211_STYPE_DEAUTH): + stats->mgmt[MANAGEMENT_DEAUTH]++; + break; + case cpu_to_le16(IEEE80211_STYPE_ACTION): + stats->mgmt[MANAGEMENT_ACTION]++; + break; + } + } else if (ieee80211_is_ctl(fc)) { + switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { + case cpu_to_le16(IEEE80211_STYPE_BACK_REQ): + stats->ctrl[CONTROL_BACK_REQ]++; + break; + case cpu_to_le16(IEEE80211_STYPE_BACK): + stats->ctrl[CONTROL_BACK]++; + break; + case cpu_to_le16(IEEE80211_STYPE_PSPOLL): + stats->ctrl[CONTROL_PSPOLL]++; + break; + case cpu_to_le16(IEEE80211_STYPE_RTS): + stats->ctrl[CONTROL_RTS]++; + break; + case cpu_to_le16(IEEE80211_STYPE_CTS): + stats->ctrl[CONTROL_CTS]++; + break; + case cpu_to_le16(IEEE80211_STYPE_ACK): + stats->ctrl[CONTROL_ACK]++; + break; + case cpu_to_le16(IEEE80211_STYPE_CFEND): + stats->ctrl[CONTROL_CFEND]++; + break; + case cpu_to_le16(IEEE80211_STYPE_CFENDACK): + stats->ctrl[CONTROL_CFENDACK]++; + break; + } + } else { + /* data */ + stats->data_cnt++; + stats->data_bytes += len; + } +} +EXPORT_SYMBOL(iwl_legacy_update_stats); +#endif + +int iwl_legacy_force_reset(struct iwl_priv *priv, bool external) +{ + struct iwl_force_reset *force_reset; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return -EINVAL; + + force_reset = &priv->force_reset; + force_reset->reset_request_count++; + if (!external) { + if (force_reset->last_force_reset_jiffies && + time_after(force_reset->last_force_reset_jiffies + + force_reset->reset_duration, jiffies)) { + IWL_DEBUG_INFO(priv, "force reset rejected\n"); + force_reset->reset_reject_count++; + return -EAGAIN; + } + } + force_reset->reset_success_count++; + force_reset->last_force_reset_jiffies = jiffies; + + /* + * if the request is from external(ex: debugfs), + * then always perform the request in regardless the module + * parameter setting + * if the request is from internal (uCode error or driver + * detect failure), then fw_restart module parameter + * need to be check before performing firmware reload + */ + + if (!external && !priv->cfg->mod_params->restart_fw) { + IWL_DEBUG_INFO(priv, "Cancel firmware reload based on " + "module parameter setting\n"); + return 0; + } + + IWL_ERR(priv, "On demand firmware reload\n"); + + /* Set the FW error flag -- cleared on iwl_down */ + set_bit(STATUS_FW_ERROR, &priv->status); + wake_up(&priv->wait_command_queue); + /* + * Keep the restart process from trying to send host + * commands by clearing the INIT status bit + */ + clear_bit(STATUS_READY, &priv->status); + queue_work(priv->workqueue, &priv->restart); + + return 0; +} + +int +iwl_legacy_mac_change_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum nl80211_iftype newtype, bool newp2p) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif); + struct iwl_rxon_context *tmp; + u32 interface_modes; + int err; + + newtype = ieee80211_iftype_p2p(newtype, newp2p); + + mutex_lock(&priv->mutex); + + if (!ctx->vif || !iwl_legacy_is_ready_rf(priv)) { + /* + * Huh? But wait ... this can maybe happen when + * we're in the middle of a firmware restart! + */ + err = -EBUSY; + goto out; + } + + interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes; + + if (!(interface_modes & BIT(newtype))) { + err = -EBUSY; + goto out; + } + + if (ctx->exclusive_interface_modes & BIT(newtype)) { + for_each_context(priv, tmp) { + if (ctx == tmp) + continue; + + if (!tmp->vif) + continue; + + /* + * The current mode switch would be exclusive, but + * another context is active ... refuse the switch. + */ + err = -EBUSY; + goto out; + } + } + + /* success */ + iwl_legacy_teardown_interface(priv, vif, true); + vif->type = newtype; + vif->p2p = newp2p; + err = iwl_legacy_setup_interface(priv, ctx); + WARN_ON(err); + /* + * We've switched internally, but submitting to the + * device may have failed for some reason. Mask this + * error, because otherwise mac80211 will not switch + * (and set the interface type back) and we'll be + * out of sync with it. + */ + err = 0; + + out: + mutex_unlock(&priv->mutex); + return err; +} +EXPORT_SYMBOL(iwl_legacy_mac_change_interface); + +/* + * On every watchdog tick we check (latest) time stamp. If it does not + * change during timeout period and queue is not empty we reset firmware. + */ +static int iwl_legacy_check_stuck_queue(struct iwl_priv *priv, int cnt) +{ + struct iwl_tx_queue *txq = &priv->txq[cnt]; + struct iwl_queue *q = &txq->q; + unsigned long timeout; + int ret; + + if (q->read_ptr == q->write_ptr) { + txq->time_stamp = jiffies; + return 0; + } + + timeout = txq->time_stamp + + msecs_to_jiffies(priv->cfg->base_params->wd_timeout); + + if (time_after(jiffies, timeout)) { + IWL_ERR(priv, "Queue %d stuck for %u ms.\n", + q->id, priv->cfg->base_params->wd_timeout); + ret = iwl_legacy_force_reset(priv, false); + return (ret == -EAGAIN) ? 0 : 1; + } + + return 0; +} + +/* + * Making watchdog tick be a quarter of timeout assure we will + * discover the queue hung between timeout and 1.25*timeout + */ +#define IWL_WD_TICK(timeout) ((timeout) / 4) + +/* + * Watchdog timer callback, we check each tx queue for stuck, if if hung + * we reset the firmware. If everything is fine just rearm the timer. + */ +void iwl_legacy_bg_watchdog(unsigned long data) +{ + struct iwl_priv *priv = (struct iwl_priv *)data; + int cnt; + unsigned long timeout; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + timeout = priv->cfg->base_params->wd_timeout; + if (timeout == 0) + return; + + /* monitor and check for stuck cmd queue */ + if (iwl_legacy_check_stuck_queue(priv, priv->cmd_queue)) + return; + + /* monitor and check for other stuck queues */ + if (iwl_legacy_is_any_associated(priv)) { + for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) { + /* skip as we already checked the command queue */ + if (cnt == priv->cmd_queue) + continue; + if (iwl_legacy_check_stuck_queue(priv, cnt)) + return; + } + } + + mod_timer(&priv->watchdog, jiffies + + msecs_to_jiffies(IWL_WD_TICK(timeout))); +} +EXPORT_SYMBOL(iwl_legacy_bg_watchdog); + +void iwl_legacy_setup_watchdog(struct iwl_priv *priv) +{ + unsigned int timeout = priv->cfg->base_params->wd_timeout; + + if (timeout) + mod_timer(&priv->watchdog, + jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout))); + else + del_timer(&priv->watchdog); +} +EXPORT_SYMBOL(iwl_legacy_setup_watchdog); + +/* + * extended beacon time format + * time in usec will be changed into a 32-bit value in extended:internal format + * the extended part is the beacon counts + * the internal part is the time in usec within one beacon interval + */ +u32 +iwl_legacy_usecs_to_beacons(struct iwl_priv *priv, + u32 usec, u32 beacon_interval) +{ + u32 quot; + u32 rem; + u32 interval = beacon_interval * TIME_UNIT; + + if (!interval || !usec) + return 0; + + quot = (usec / interval) & + (iwl_legacy_beacon_time_mask_high(priv, + priv->hw_params.beacon_time_tsf_bits) >> + priv->hw_params.beacon_time_tsf_bits); + rem = (usec % interval) & iwl_legacy_beacon_time_mask_low(priv, + priv->hw_params.beacon_time_tsf_bits); + + return (quot << priv->hw_params.beacon_time_tsf_bits) + rem; +} +EXPORT_SYMBOL(iwl_legacy_usecs_to_beacons); + +/* base is usually what we get from ucode with each received frame, + * the same as HW timer counter counting down + */ +__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base, + u32 addon, u32 beacon_interval) +{ + u32 base_low = base & iwl_legacy_beacon_time_mask_low(priv, + priv->hw_params.beacon_time_tsf_bits); + u32 addon_low = addon & iwl_legacy_beacon_time_mask_low(priv, + priv->hw_params.beacon_time_tsf_bits); + u32 interval = beacon_interval * TIME_UNIT; + u32 res = (base & iwl_legacy_beacon_time_mask_high(priv, + priv->hw_params.beacon_time_tsf_bits)) + + (addon & iwl_legacy_beacon_time_mask_high(priv, + priv->hw_params.beacon_time_tsf_bits)); + + if (base_low > addon_low) + res += base_low - addon_low; + else if (base_low < addon_low) { + res += interval + base_low - addon_low; + res += (1 << priv->hw_params.beacon_time_tsf_bits); + } else + res += (1 << priv->hw_params.beacon_time_tsf_bits); + + return cpu_to_le32(res); +} +EXPORT_SYMBOL(iwl_legacy_add_beacon_time); + +#ifdef CONFIG_PM + +int iwl_legacy_pci_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct iwl_priv *priv = pci_get_drvdata(pdev); + + /* + * This function is called when system goes into suspend state + * mac80211 will call iwl_mac_stop() from the mac80211 suspend function + * first but since iwl_mac_stop() has no knowledge of who the caller is, + * it will not call apm_ops.stop() to stop the DMA operation. + * Calling apm_ops.stop here to make sure we stop the DMA. + */ + iwl_legacy_apm_stop(priv); + + return 0; +} +EXPORT_SYMBOL(iwl_legacy_pci_suspend); + +int iwl_legacy_pci_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct iwl_priv *priv = pci_get_drvdata(pdev); + bool hw_rfkill = false; + + /* + * We disable the RETRY_TIMEOUT register (0x41) to keep + * PCI Tx retries from interfering with C3 CPU state. + */ + pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); + + iwl_legacy_enable_interrupts(priv); + + if (!(iwl_read32(priv, CSR_GP_CNTRL) & + CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) + hw_rfkill = true; + + if (hw_rfkill) + set_bit(STATUS_RF_KILL_HW, &priv->status); + else + clear_bit(STATUS_RF_KILL_HW, &priv->status); + + wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rfkill); + + return 0; +} +EXPORT_SYMBOL(iwl_legacy_pci_resume); + +const struct dev_pm_ops iwl_legacy_pm_ops = { + .suspend = iwl_legacy_pci_suspend, + .resume = iwl_legacy_pci_resume, + .freeze = iwl_legacy_pci_suspend, + .thaw = iwl_legacy_pci_resume, + .poweroff = iwl_legacy_pci_suspend, + .restore = iwl_legacy_pci_resume, +}; +EXPORT_SYMBOL(iwl_legacy_pm_ops); + +#endif /* CONFIG_PM */ + +static void +iwl_legacy_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx) +{ + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + if (!ctx->is_active) + return; + + ctx->qos_data.def_qos_parm.qos_flags = 0; + + if (ctx->qos_data.qos_active) + ctx->qos_data.def_qos_parm.qos_flags |= + QOS_PARAM_FLG_UPDATE_EDCA_MSK; + + if (ctx->ht.enabled) + ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK; + + IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n", + ctx->qos_data.qos_active, + ctx->qos_data.def_qos_parm.qos_flags); + + iwl_legacy_send_cmd_pdu_async(priv, ctx->qos_cmd, + sizeof(struct iwl_qosparam_cmd), + &ctx->qos_data.def_qos_parm, NULL); +} + +/** + * iwl_legacy_mac_config - mac80211 config callback + */ +int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed) +{ + struct iwl_priv *priv = hw->priv; + const struct iwl_channel_info *ch_info; + struct ieee80211_conf *conf = &hw->conf; + struct ieee80211_channel *channel = conf->channel; + struct iwl_ht_config *ht_conf = &priv->current_ht_config; + struct iwl_rxon_context *ctx; + unsigned long flags = 0; + int ret = 0; + u16 ch; + int scan_active = 0; + bool ht_changed[NUM_IWL_RXON_CTX] = {}; + + if (WARN_ON(!priv->cfg->ops->legacy)) + return -EOPNOTSUPP; + + mutex_lock(&priv->mutex); + + IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n", + channel->hw_value, changed); + + if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) { + scan_active = 1; + IWL_DEBUG_MAC80211(priv, "scan active\n"); + } + + if (changed & (IEEE80211_CONF_CHANGE_SMPS | + IEEE80211_CONF_CHANGE_CHANNEL)) { + /* mac80211 uses static for non-HT which is what we want */ + priv->current_ht_config.smps = conf->smps_mode; + + /* + * Recalculate chain counts. + * + * If monitor mode is enabled then mac80211 will + * set up the SM PS mode to OFF if an HT channel is + * configured. + */ + if (priv->cfg->ops->hcmd->set_rxon_chain) + for_each_context(priv, ctx) + priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); + } + + /* during scanning mac80211 will delay channel setting until + * scan finish with changed = 0 + */ + if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) { + if (scan_active) + goto set_ch_out; + + ch = channel->hw_value; + ch_info = iwl_legacy_get_channel_info(priv, channel->band, ch); + if (!iwl_legacy_is_channel_valid(ch_info)) { + IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n"); + ret = -EINVAL; + goto set_ch_out; + } + + if (priv->iw_mode == NL80211_IFTYPE_ADHOC && + !iwl_legacy_is_channel_ibss(ch_info)) { + IWL_DEBUG_MAC80211(priv, "leave - not IBSS channel\n"); + ret = -EINVAL; + goto set_ch_out; + } + + spin_lock_irqsave(&priv->lock, flags); + + for_each_context(priv, ctx) { + /* Configure HT40 channels */ + if (ctx->ht.enabled != conf_is_ht(conf)) { + ctx->ht.enabled = conf_is_ht(conf); + ht_changed[ctx->ctxid] = true; + } + if (ctx->ht.enabled) { + if (conf_is_ht40_minus(conf)) { + ctx->ht.extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_BELOW; + ctx->ht.is_40mhz = true; + } else if (conf_is_ht40_plus(conf)) { + ctx->ht.extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_ABOVE; + ctx->ht.is_40mhz = true; + } else { + ctx->ht.extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_NONE; + ctx->ht.is_40mhz = false; + } + } else + ctx->ht.is_40mhz = false; + + /* + * Default to no protection. Protection mode will + * later be set from BSS config in iwl_ht_conf + */ + ctx->ht.protection = + IEEE80211_HT_OP_MODE_PROTECTION_NONE; + + /* if we are switching from ht to 2.4 clear flags + * from any ht related info since 2.4 does not + * support ht */ + if ((le16_to_cpu(ctx->staging.channel) != ch)) + ctx->staging.flags = 0; + + iwl_legacy_set_rxon_channel(priv, channel, ctx); + iwl_legacy_set_rxon_ht(priv, ht_conf); + + iwl_legacy_set_flags_for_band(priv, ctx, channel->band, + ctx->vif); + } + + spin_unlock_irqrestore(&priv->lock, flags); + + if (priv->cfg->ops->legacy->update_bcast_stations) + ret = + priv->cfg->ops->legacy->update_bcast_stations(priv); + + set_ch_out: + /* The list of supported rates and rate mask can be different + * for each band; since the band may have changed, reset + * the rate mask to what mac80211 lists */ + iwl_legacy_set_rate(priv); + } + + if (changed & (IEEE80211_CONF_CHANGE_PS | + IEEE80211_CONF_CHANGE_IDLE)) { + ret = iwl_legacy_power_update_mode(priv, false); + if (ret) + IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n"); + } + + if (changed & IEEE80211_CONF_CHANGE_POWER) { + IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n", + priv->tx_power_user_lmt, conf->power_level); + + iwl_legacy_set_tx_power(priv, conf->power_level, false); + } + + if (!iwl_legacy_is_ready(priv)) { + IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); + goto out; + } + + if (scan_active) + goto out; + + for_each_context(priv, ctx) { + if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging))) + iwl_legacy_commit_rxon(priv, ctx); + else + IWL_DEBUG_INFO(priv, + "Not re-sending same RXON configuration.\n"); + if (ht_changed[ctx->ctxid]) + iwl_legacy_update_qos(priv, ctx); + } + +out: + IWL_DEBUG_MAC80211(priv, "leave\n"); + mutex_unlock(&priv->mutex); + return ret; +} +EXPORT_SYMBOL(iwl_legacy_mac_config); + +void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_priv *priv = hw->priv; + unsigned long flags; + /* IBSS can only be the IWL_RXON_CTX_BSS context */ + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + + if (WARN_ON(!priv->cfg->ops->legacy)) + return; + + mutex_lock(&priv->mutex); + IWL_DEBUG_MAC80211(priv, "enter\n"); + + spin_lock_irqsave(&priv->lock, flags); + memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config)); + spin_unlock_irqrestore(&priv->lock, flags); + + spin_lock_irqsave(&priv->lock, flags); + + /* new association get rid of ibss beacon skb */ + if (priv->beacon_skb) + dev_kfree_skb(priv->beacon_skb); + + priv->beacon_skb = NULL; + + priv->timestamp = 0; + + spin_unlock_irqrestore(&priv->lock, flags); + + iwl_legacy_scan_cancel_timeout(priv, 100); + if (!iwl_legacy_is_ready_rf(priv)) { + IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); + mutex_unlock(&priv->mutex); + return; + } + + /* we are restarting association process + * clear RXON_FILTER_ASSOC_MSK bit + */ + ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + iwl_legacy_commit_rxon(priv, ctx); + + iwl_legacy_set_rate(priv); + + mutex_unlock(&priv->mutex); + + IWL_DEBUG_MAC80211(priv, "leave\n"); +} +EXPORT_SYMBOL(iwl_legacy_mac_reset_tsf); + +static void iwl_legacy_ht_conf(struct iwl_priv *priv, + struct ieee80211_vif *vif) +{ + struct iwl_ht_config *ht_conf = &priv->current_ht_config; + struct ieee80211_sta *sta; + struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; + struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif); + + IWL_DEBUG_ASSOC(priv, "enter:\n"); + + if (!ctx->ht.enabled) + return; + + ctx->ht.protection = + bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION; + ctx->ht.non_gf_sta_present = + !!(bss_conf->ht_operation_mode & + IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); + + ht_conf->single_chain_sufficient = false; + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + rcu_read_lock(); + sta = ieee80211_find_sta(vif, bss_conf->bssid); + if (sta) { + struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + int maxstreams; + + maxstreams = (ht_cap->mcs.tx_params & + IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK) + >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT; + maxstreams += 1; + + if ((ht_cap->mcs.rx_mask[1] == 0) && + (ht_cap->mcs.rx_mask[2] == 0)) + ht_conf->single_chain_sufficient = true; + if (maxstreams <= 1) + ht_conf->single_chain_sufficient = true; + } else { + /* + * If at all, this can only happen through a race + * when the AP disconnects us while we're still + * setting up the connection, in that case mac80211 + * will soon tell us about that. + */ + ht_conf->single_chain_sufficient = true; + } + rcu_read_unlock(); + break; + case NL80211_IFTYPE_ADHOC: + ht_conf->single_chain_sufficient = true; + break; + default: + break; + } + + IWL_DEBUG_ASSOC(priv, "leave\n"); +} + +static inline void iwl_legacy_set_no_assoc(struct iwl_priv *priv, + struct ieee80211_vif *vif) +{ + struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif); + + /* + * inform the ucode that there is no longer an + * association and that no more packets should be + * sent + */ + ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + ctx->staging.assoc_id = 0; + iwl_legacy_commit_rxon(priv, ctx); +} + +static void iwl_legacy_beacon_update(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_priv *priv = hw->priv; + unsigned long flags; + __le64 timestamp; + struct sk_buff *skb = ieee80211_beacon_get(hw, vif); + + if (!skb) + return; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + lockdep_assert_held(&priv->mutex); + + if (!priv->beacon_ctx) { + IWL_ERR(priv, "update beacon but no beacon context!\n"); + dev_kfree_skb(skb); + return; + } + + spin_lock_irqsave(&priv->lock, flags); + + if (priv->beacon_skb) + dev_kfree_skb(priv->beacon_skb); + + priv->beacon_skb = skb; + + timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp; + priv->timestamp = le64_to_cpu(timestamp); + + IWL_DEBUG_MAC80211(priv, "leave\n"); + spin_unlock_irqrestore(&priv->lock, flags); + + if (!iwl_legacy_is_ready_rf(priv)) { + IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n"); + return; + } + + priv->cfg->ops->legacy->post_associate(priv); +} + +void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u32 changes) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif); + int ret; + + if (WARN_ON(!priv->cfg->ops->legacy)) + return; + + IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes); + + mutex_lock(&priv->mutex); + + if (!iwl_legacy_is_alive(priv)) { + mutex_unlock(&priv->mutex); + return; + } + + if (changes & BSS_CHANGED_QOS) { + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + ctx->qos_data.qos_active = bss_conf->qos; + iwl_legacy_update_qos(priv, ctx); + spin_unlock_irqrestore(&priv->lock, flags); + } + + if (changes & BSS_CHANGED_BEACON_ENABLED) { + /* + * the add_interface code must make sure we only ever + * have a single interface that could be beaconing at + * any time. + */ + if (vif->bss_conf.enable_beacon) + priv->beacon_ctx = ctx; + else + priv->beacon_ctx = NULL; + } + + if (changes & BSS_CHANGED_BSSID) { + IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid); + + /* + * If there is currently a HW scan going on in the + * background then we need to cancel it else the RXON + * below/in post_associate will fail. + */ + if (iwl_legacy_scan_cancel_timeout(priv, 100)) { + IWL_WARN(priv, + "Aborted scan still in progress after 100ms\n"); + IWL_DEBUG_MAC80211(priv, + "leaving - scan abort failed.\n"); + mutex_unlock(&priv->mutex); + return; + } + + /* mac80211 only sets assoc when in STATION mode */ + if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) { + memcpy(ctx->staging.bssid_addr, + bss_conf->bssid, ETH_ALEN); + + /* currently needed in a few places */ + memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN); + } else { + ctx->staging.filter_flags &= + ~RXON_FILTER_ASSOC_MSK; + } + + } + + /* + * This needs to be after setting the BSSID in case + * mac80211 decides to do both changes at once because + * it will invoke post_associate. + */ + if (vif->type == NL80211_IFTYPE_ADHOC && changes & BSS_CHANGED_BEACON) + iwl_legacy_beacon_update(hw, vif); + + if (changes & BSS_CHANGED_ERP_PREAMBLE) { + IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n", + bss_conf->use_short_preamble); + if (bss_conf->use_short_preamble) + ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; + else + ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; + } + + if (changes & BSS_CHANGED_ERP_CTS_PROT) { + IWL_DEBUG_MAC80211(priv, + "ERP_CTS %d\n", bss_conf->use_cts_prot); + if (bss_conf->use_cts_prot && + (priv->band != IEEE80211_BAND_5GHZ)) + ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK; + else + ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK; + if (bss_conf->use_cts_prot) + ctx->staging.flags |= RXON_FLG_SELF_CTS_EN; + else + ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN; + } + + if (changes & BSS_CHANGED_BASIC_RATES) { + /* XXX use this information + * + * To do that, remove code from iwl_legacy_set_rate() and put something + * like this here: + * + if (A-band) + ctx->staging.ofdm_basic_rates = + bss_conf->basic_rates; + else + ctx->staging.ofdm_basic_rates = + bss_conf->basic_rates >> 4; + ctx->staging.cck_basic_rates = + bss_conf->basic_rates & 0xF; + */ + } + + if (changes & BSS_CHANGED_HT) { + iwl_legacy_ht_conf(priv, vif); + + if (priv->cfg->ops->hcmd->set_rxon_chain) + priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); + } + + if (changes & BSS_CHANGED_ASSOC) { + IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc); + if (bss_conf->assoc) { + priv->timestamp = bss_conf->timestamp; + + if (!iwl_legacy_is_rfkill(priv)) + priv->cfg->ops->legacy->post_associate(priv); + } else + iwl_legacy_set_no_assoc(priv, vif); + } + + if (changes && iwl_legacy_is_associated_ctx(ctx) && bss_conf->aid) { + IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n", + changes); + ret = iwl_legacy_send_rxon_assoc(priv, ctx); + if (!ret) { + /* Sync active_rxon with latest change. */ + memcpy((void *)&ctx->active, + &ctx->staging, + sizeof(struct iwl_legacy_rxon_cmd)); + } + } + + if (changes & BSS_CHANGED_BEACON_ENABLED) { + if (vif->bss_conf.enable_beacon) { + memcpy(ctx->staging.bssid_addr, + bss_conf->bssid, ETH_ALEN); + memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN); + priv->cfg->ops->legacy->config_ap(priv); + } else + iwl_legacy_set_no_assoc(priv, vif); + } + + if (changes & BSS_CHANGED_IBSS) { + ret = priv->cfg->ops->legacy->manage_ibss_station(priv, vif, + bss_conf->ibss_joined); + if (ret) + IWL_ERR(priv, "failed to %s IBSS station %pM\n", + bss_conf->ibss_joined ? "add" : "remove", + bss_conf->bssid); + } + + mutex_unlock(&priv->mutex); + + IWL_DEBUG_MAC80211(priv, "leave\n"); +} +EXPORT_SYMBOL(iwl_legacy_mac_bss_info_changed); + +irqreturn_t iwl_legacy_isr(int irq, void *data) +{ + struct iwl_priv *priv = data; + u32 inta, inta_mask; + u32 inta_fh; + unsigned long flags; + if (!priv) + return IRQ_NONE; + + spin_lock_irqsave(&priv->lock, flags); + + /* Disable (but don't clear!) interrupts here to avoid + * back-to-back ISRs and sporadic interrupts from our NIC. + * If we have something to service, the tasklet will re-enable ints. + * If we *don't* have something, we'll re-enable before leaving here. */ + inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */ + iwl_write32(priv, CSR_INT_MASK, 0x00000000); + + /* Discover which interrupts are active/pending */ + inta = iwl_read32(priv, CSR_INT); + inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); + + /* Ignore interrupt if there's nothing in NIC to service. + * This may be due to IRQ shared with another device, + * or due to sporadic interrupts thrown from our NIC. */ + if (!inta && !inta_fh) { + IWL_DEBUG_ISR(priv, + "Ignore interrupt, inta == 0, inta_fh == 0\n"); + goto none; + } + + if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { + /* Hardware disappeared. It might have already raised + * an interrupt */ + IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta); + goto unplugged; + } + + IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", + inta, inta_mask, inta_fh); + + inta &= ~CSR_INT_BIT_SCD; + + /* iwl_irq_tasklet() will service interrupts and re-enable them */ + if (likely(inta || inta_fh)) + tasklet_schedule(&priv->irq_tasklet); + +unplugged: + spin_unlock_irqrestore(&priv->lock, flags); + return IRQ_HANDLED; + +none: + /* re-enable interrupts here since we don't have anything to service. */ + /* only Re-enable if disabled by irq */ + if (test_bit(STATUS_INT_ENABLED, &priv->status)) + iwl_legacy_enable_interrupts(priv); + spin_unlock_irqrestore(&priv->lock, flags); + return IRQ_NONE; +} +EXPORT_SYMBOL(iwl_legacy_isr); + +/* + * iwl_legacy_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this + * function. + */ +void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv, + struct ieee80211_tx_info *info, + __le16 fc, __le32 *tx_flags) +{ + if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) { + *tx_flags |= TX_CMD_FLG_RTS_MSK; + *tx_flags &= ~TX_CMD_FLG_CTS_MSK; + *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; + + if (!ieee80211_is_mgmt(fc)) + return; + + switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { + case cpu_to_le16(IEEE80211_STYPE_AUTH): + case cpu_to_le16(IEEE80211_STYPE_DEAUTH): + case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ): + case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ): + *tx_flags &= ~TX_CMD_FLG_RTS_MSK; + *tx_flags |= TX_CMD_FLG_CTS_MSK; + break; + } + } else if (info->control.rates[0].flags & + IEEE80211_TX_RC_USE_CTS_PROTECT) { + *tx_flags &= ~TX_CMD_FLG_RTS_MSK; + *tx_flags |= TX_CMD_FLG_CTS_MSK; + *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; + } +} +EXPORT_SYMBOL(iwl_legacy_tx_cmd_protection); diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-core.h b/trunk/drivers/net/wireless/iwlegacy/iwl-core.h new file mode 100644 index 000000000000..d1271fe07d4b --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-core.h @@ -0,0 +1,636 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __iwl_legacy_core_h__ +#define __iwl_legacy_core_h__ + +/************************ + * forward declarations * + ************************/ +struct iwl_host_cmd; +struct iwl_cmd; + + +#define IWLWIFI_VERSION "in-tree:" +#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation" +#define DRV_AUTHOR "" + +#define IWL_PCI_DEVICE(dev, subdev, cfg) \ + .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \ + .subvendor = PCI_ANY_ID, .subdevice = (subdev), \ + .driver_data = (kernel_ulong_t)&(cfg) + +#define TIME_UNIT 1024 + +#define IWL_SKU_G 0x1 +#define IWL_SKU_A 0x2 +#define IWL_SKU_N 0x8 + +#define IWL_CMD(x) case x: return #x + +struct iwl_hcmd_ops { + int (*rxon_assoc)(struct iwl_priv *priv, struct iwl_rxon_context *ctx); + int (*commit_rxon)(struct iwl_priv *priv, struct iwl_rxon_context *ctx); + void (*set_rxon_chain)(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); +}; + +struct iwl_hcmd_utils_ops { + u16 (*get_hcmd_size)(u8 cmd_id, u16 len); + u16 (*build_addsta_hcmd)(const struct iwl_legacy_addsta_cmd *cmd, + u8 *data); + int (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif); + void (*post_scan)(struct iwl_priv *priv); +}; + +struct iwl_apm_ops { + int (*init)(struct iwl_priv *priv); + void (*config)(struct iwl_priv *priv); +}; + +struct iwl_debugfs_ops { + ssize_t (*rx_stats_read)(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos); + ssize_t (*tx_stats_read)(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos); + ssize_t (*general_stats_read)(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos); +}; + +struct iwl_temp_ops { + void (*temperature)(struct iwl_priv *priv); +}; + +struct iwl_lib_ops { + /* set hw dependent parameters */ + int (*set_hw_params)(struct iwl_priv *priv); + /* Handling TX */ + void (*txq_update_byte_cnt_tbl)(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + u16 byte_cnt); + int (*txq_attach_buf_to_tfd)(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + dma_addr_t addr, + u16 len, u8 reset, u8 pad); + void (*txq_free_tfd)(struct iwl_priv *priv, + struct iwl_tx_queue *txq); + int (*txq_init)(struct iwl_priv *priv, + struct iwl_tx_queue *txq); + /* setup Rx handler */ + void (*rx_handler_setup)(struct iwl_priv *priv); + /* alive notification after init uCode load */ + void (*init_alive_start)(struct iwl_priv *priv); + /* check validity of rtc data address */ + int (*is_valid_rtc_data_addr)(u32 addr); + /* 1st ucode load */ + int (*load_ucode)(struct iwl_priv *priv); + + void (*dump_nic_error_log)(struct iwl_priv *priv); + int (*dump_fh)(struct iwl_priv *priv, char **buf, bool display); + int (*set_channel_switch)(struct iwl_priv *priv, + struct ieee80211_channel_switch *ch_switch); + /* power management */ + struct iwl_apm_ops apm_ops; + + /* power */ + int (*send_tx_power) (struct iwl_priv *priv); + void (*update_chain_flags)(struct iwl_priv *priv); + + /* eeprom operations (as defined in iwl-eeprom.h) */ + struct iwl_eeprom_ops eeprom_ops; + + /* temperature */ + struct iwl_temp_ops temp_ops; + + struct iwl_debugfs_ops debugfs_ops; + +}; + +struct iwl_led_ops { + int (*cmd)(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd); +}; + +struct iwl_legacy_ops { + void (*post_associate)(struct iwl_priv *priv); + void (*config_ap)(struct iwl_priv *priv); + /* station management */ + int (*update_bcast_stations)(struct iwl_priv *priv); + int (*manage_ibss_station)(struct iwl_priv *priv, + struct ieee80211_vif *vif, bool add); +}; + +struct iwl_ops { + const struct iwl_lib_ops *lib; + const struct iwl_hcmd_ops *hcmd; + const struct iwl_hcmd_utils_ops *utils; + const struct iwl_led_ops *led; + const struct iwl_nic_ops *nic; + const struct iwl_legacy_ops *legacy; + const struct ieee80211_ops *ieee80211_ops; +}; + +struct iwl_mod_params { + int sw_crypto; /* def: 0 = using hardware encryption */ + int disable_hw_scan; /* def: 0 = use h/w scan */ + int num_of_queues; /* def: HW dependent */ + int disable_11n; /* def: 0 = 11n capabilities enabled */ + int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */ + int antenna; /* def: 0 = both antennas (use diversity) */ + int restart_fw; /* def: 1 = restart firmware */ +}; + +/* + * @led_compensation: compensate on the led on/off time per HW according + * to the deviation to achieve the desired led frequency. + * The detail algorithm is described in iwl-led.c + * @chain_noise_num_beacons: number of beacons used to compute chain noise + * @wd_timeout: TX queues watchdog timeout + * @temperature_kelvin: temperature report by uCode in kelvin + * @ucode_tracing: support ucode continuous tracing + * @sensitivity_calib_by_driver: driver has the capability to perform + * sensitivity calibration operation + * @chain_noise_calib_by_driver: driver has the capability to perform + * chain noise calibration operation + */ +struct iwl_base_params { + int eeprom_size; + int num_of_queues; /* def: HW dependent */ + int num_of_ampdu_queues;/* def: HW dependent */ + /* for iwl_legacy_apm_init() */ + u32 pll_cfg_val; + bool set_l0s; + bool use_bsm; + + u16 led_compensation; + int chain_noise_num_beacons; + unsigned int wd_timeout; + bool temperature_kelvin; + const bool ucode_tracing; + const bool sensitivity_calib_by_driver; + const bool chain_noise_calib_by_driver; +}; + +/** + * struct iwl_cfg + * @fw_name_pre: Firmware filename prefix. The api version and extension + * (.ucode) will be added to filename before loading from disk. The + * filename is constructed as fw_name_pre.ucode. + * @ucode_api_max: Highest version of uCode API supported by driver. + * @ucode_api_min: Lowest version of uCode API supported by driver. + * @scan_antennas: available antenna for scan operation + * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off) + * + * We enable the driver to be backward compatible wrt API version. The + * driver specifies which APIs it supports (with @ucode_api_max being the + * highest and @ucode_api_min the lowest). Firmware will only be loaded if + * it has a supported API version. The firmware's API version will be + * stored in @iwl_priv, enabling the driver to make runtime changes based + * on firmware version used. + * + * For example, + * if (IWL_UCODE_API(priv->ucode_ver) >= 2) { + * Driver interacts with Firmware API version >= 2. + * } else { + * Driver interacts with Firmware API version 1. + * } + * + * The ideal usage of this infrastructure is to treat a new ucode API + * release as a new hardware revision. That is, through utilizing the + * iwl_hcmd_utils_ops etc. we accommodate different command structures + * and flows between hardware versions as well as their API + * versions. + * + */ +struct iwl_cfg { + /* params specific to an individual device within a device family */ + const char *name; + const char *fw_name_pre; + const unsigned int ucode_api_max; + const unsigned int ucode_api_min; + u8 valid_tx_ant; + u8 valid_rx_ant; + unsigned int sku; + u16 eeprom_ver; + u16 eeprom_calib_ver; + const struct iwl_ops *ops; + /* module based parameters which can be set from modprobe cmd */ + const struct iwl_mod_params *mod_params; + /* params not likely to change within a device family */ + struct iwl_base_params *base_params; + /* params likely to change within a device family */ + u8 scan_rx_antennas[IEEE80211_NUM_BANDS]; + enum iwl_led_mode led_mode; +}; + +/*************************** + * L i b * + ***************************/ + +struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg); +int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, u16 queue, + const struct ieee80211_tx_queue_params *params); +int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw); +void iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + int hw_decrypt); +int iwl_legacy_check_rxon_cmd(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); +int iwl_legacy_full_rxon_required(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); +int iwl_legacy_set_rxon_channel(struct iwl_priv *priv, + struct ieee80211_channel *ch, + struct iwl_rxon_context *ctx); +void iwl_legacy_set_flags_for_band(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + enum ieee80211_band band, + struct ieee80211_vif *vif); +u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv, + enum ieee80211_band band); +void iwl_legacy_set_rxon_ht(struct iwl_priv *priv, + struct iwl_ht_config *ht_conf); +bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_sta_ht_cap *ht_cap); +void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); +void iwl_legacy_set_rate(struct iwl_priv *priv); +int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv, + struct ieee80211_hdr *hdr, + u32 decrypt_res, + struct ieee80211_rx_status *stats); +void iwl_legacy_irq_handle_error(struct iwl_priv *priv); +int iwl_legacy_mac_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); +void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); +int iwl_legacy_mac_change_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum nl80211_iftype newtype, bool newp2p); +int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv); +void iwl_legacy_txq_mem(struct iwl_priv *priv); + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS +int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv); +void iwl_legacy_free_traffic_mem(struct iwl_priv *priv); +void iwl_legacy_reset_traffic_log(struct iwl_priv *priv); +void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv, + u16 length, struct ieee80211_hdr *header); +void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv, + u16 length, struct ieee80211_hdr *header); +const char *iwl_legacy_get_mgmt_string(int cmd); +const char *iwl_legacy_get_ctrl_string(int cmd); +void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv); +void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, + u16 len); +#else +static inline int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv) +{ + return 0; +} +static inline void iwl_legacy_free_traffic_mem(struct iwl_priv *priv) +{ +} +static inline void iwl_legacy_reset_traffic_log(struct iwl_priv *priv) +{ +} +static inline void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv, + u16 length, struct ieee80211_hdr *header) +{ +} +static inline void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv, + u16 length, struct ieee80211_hdr *header) +{ +} +static inline void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, + __le16 fc, u16 len) +{ +} +#endif +/***************************************************** + * RX handlers. + * **************************************************/ +void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +void iwl_legacy_rx_reply_error(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); + +/***************************************************** +* RX +******************************************************/ +void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv); +void iwl_legacy_cmd_queue_free(struct iwl_priv *priv); +int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv); +void iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv, + struct iwl_rx_queue *q); +int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q); +void iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +/* Handlers */ +void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +void iwl_legacy_recover_from_statistics(struct iwl_priv *priv, + struct iwl_rx_packet *pkt); +void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success); +void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); + +/* TX helpers */ + +/***************************************************** +* TX +******************************************************/ +void iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv, + struct iwl_tx_queue *txq); +int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, + int slots_num, u32 txq_id); +void iwl_legacy_tx_queue_reset(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + int slots_num, u32 txq_id); +void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id); +void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id); +void iwl_legacy_setup_watchdog(struct iwl_priv *priv); +/***************************************************** + * TX power + ****************************************************/ +int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force); + +/******************************************************************************* + * Rate + ******************************************************************************/ + +u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); + +/******************************************************************************* + * Scanning + ******************************************************************************/ +void iwl_legacy_init_scan_params(struct iwl_priv *priv); +int iwl_legacy_scan_cancel(struct iwl_priv *priv); +int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms); +void iwl_legacy_force_scan_end(struct iwl_priv *priv); +int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_scan_request *req); +void iwl_legacy_internal_short_hw_scan(struct iwl_priv *priv); +int iwl_legacy_force_reset(struct iwl_priv *priv, bool external); +u16 iwl_legacy_fill_probe_req(struct iwl_priv *priv, + struct ieee80211_mgmt *frame, + const u8 *ta, const u8 *ie, int ie_len, int left); +void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv); +u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv, + enum ieee80211_band band, + u8 n_probes); +u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv, + enum ieee80211_band band, + struct ieee80211_vif *vif); +void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv); +void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv); + +/* For faster active scanning, scan will move to the next channel if fewer than + * PLCP_QUIET_THRESH packets are heard on this channel within + * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell + * time if it's a quiet channel (nothing responded to our probe, and there's + * no other traffic). + * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */ +#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */ +#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */ + +#define IWL_SCAN_CHECK_WATCHDOG (HZ * 7) + +/***************************************************** + * S e n d i n g H o s t C o m m a n d s * + *****************************************************/ + +const char *iwl_legacy_get_cmd_string(u8 cmd); +int __must_check iwl_legacy_send_cmd_sync(struct iwl_priv *priv, + struct iwl_host_cmd *cmd); +int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd); +int __must_check iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id, + u16 len, const void *data); +int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv, u8 id, u16 len, + const void *data, + void (*callback)(struct iwl_priv *priv, + struct iwl_device_cmd *cmd, + struct iwl_rx_packet *pkt)); + +int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd); + + +/***************************************************** + * PCI * + *****************************************************/ + +static inline u16 iwl_legacy_pcie_link_ctl(struct iwl_priv *priv) +{ + int pos; + u16 pci_lnk_ctl; + pos = pci_pcie_cap(priv->pci_dev); + pci_read_config_word(priv->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl); + return pci_lnk_ctl; +} + +void iwl_legacy_bg_watchdog(unsigned long data); +u32 iwl_legacy_usecs_to_beacons(struct iwl_priv *priv, + u32 usec, u32 beacon_interval); +__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base, + u32 addon, u32 beacon_interval); + +#ifdef CONFIG_PM +int iwl_legacy_pci_suspend(struct device *device); +int iwl_legacy_pci_resume(struct device *device); +extern const struct dev_pm_ops iwl_legacy_pm_ops; + +#define IWL_LEGACY_PM_OPS (&iwl_legacy_pm_ops) + +#else /* !CONFIG_PM */ + +#define IWL_LEGACY_PM_OPS NULL + +#endif /* !CONFIG_PM */ + +/***************************************************** +* Error Handling Debugging +******************************************************/ +void iwl4965_dump_nic_error_log(struct iwl_priv *priv); +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); +#else +static inline void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ +} +#endif + +void iwl_legacy_clear_isr_stats(struct iwl_priv *priv); + +/***************************************************** +* GEOS +******************************************************/ +int iwl_legacy_init_geos(struct iwl_priv *priv); +void iwl_legacy_free_geos(struct iwl_priv *priv); + +/*************** DRIVER STATUS FUNCTIONS *****/ + +#define STATUS_HCMD_ACTIVE 0 /* host command in progress */ +/* 1 is unused (used to be STATUS_HCMD_SYNC_ACTIVE) */ +#define STATUS_INT_ENABLED 2 +#define STATUS_RF_KILL_HW 3 +#define STATUS_CT_KILL 4 +#define STATUS_INIT 5 +#define STATUS_ALIVE 6 +#define STATUS_READY 7 +#define STATUS_TEMPERATURE 8 +#define STATUS_GEO_CONFIGURED 9 +#define STATUS_EXIT_PENDING 10 +#define STATUS_STATISTICS 12 +#define STATUS_SCANNING 13 +#define STATUS_SCAN_ABORTING 14 +#define STATUS_SCAN_HW 15 +#define STATUS_POWER_PMI 16 +#define STATUS_FW_ERROR 17 +#define STATUS_CHANNEL_SWITCH_PENDING 18 + +static inline int iwl_legacy_is_ready(struct iwl_priv *priv) +{ + /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are + * set but EXIT_PENDING is not */ + return test_bit(STATUS_READY, &priv->status) && + test_bit(STATUS_GEO_CONFIGURED, &priv->status) && + !test_bit(STATUS_EXIT_PENDING, &priv->status); +} + +static inline int iwl_legacy_is_alive(struct iwl_priv *priv) +{ + return test_bit(STATUS_ALIVE, &priv->status); +} + +static inline int iwl_legacy_is_init(struct iwl_priv *priv) +{ + return test_bit(STATUS_INIT, &priv->status); +} + +static inline int iwl_legacy_is_rfkill_hw(struct iwl_priv *priv) +{ + return test_bit(STATUS_RF_KILL_HW, &priv->status); +} + +static inline int iwl_legacy_is_rfkill(struct iwl_priv *priv) +{ + return iwl_legacy_is_rfkill_hw(priv); +} + +static inline int iwl_legacy_is_ctkill(struct iwl_priv *priv) +{ + return test_bit(STATUS_CT_KILL, &priv->status); +} + +static inline int iwl_legacy_is_ready_rf(struct iwl_priv *priv) +{ + + if (iwl_legacy_is_rfkill(priv)) + return 0; + + return iwl_legacy_is_ready(priv); +} + +extern void iwl_legacy_send_bt_config(struct iwl_priv *priv); +extern int iwl_legacy_send_statistics_request(struct iwl_priv *priv, + u8 flags, bool clear); +void iwl_legacy_apm_stop(struct iwl_priv *priv); +int iwl_legacy_apm_init(struct iwl_priv *priv); + +int iwl_legacy_send_rxon_timing(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); +static inline int iwl_legacy_send_rxon_assoc(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + return priv->cfg->ops->hcmd->rxon_assoc(priv, ctx); +} +static inline int iwl_legacy_commit_rxon(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + return priv->cfg->ops->hcmd->commit_rxon(priv, ctx); +} +static inline const struct ieee80211_supported_band *iwl_get_hw_mode( + struct iwl_priv *priv, enum ieee80211_band band) +{ + return priv->hw->wiphy->bands[band]; +} + +/* mac80211 handlers */ +int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed); +void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); +void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u32 changes); +void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv, + struct ieee80211_tx_info *info, + __le16 fc, __le32 *tx_flags); + +irqreturn_t iwl_legacy_isr(int irq, void *data); + +#endif /* __iwl_legacy_core_h__ */ diff --git a/trunk/drivers/net/wireless/iwlegacy/csr.h b/trunk/drivers/net/wireless/iwlegacy/iwl-csr.h similarity index 84% rename from trunk/drivers/net/wireless/iwlegacy/csr.h rename to trunk/drivers/net/wireless/iwlegacy/iwl-csr.h index 9138e15004fa..668a9616c269 100644 --- a/trunk/drivers/net/wireless/iwlegacy/csr.h +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-csr.h @@ -60,8 +60,8 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ -#ifndef __il_csr_h__ -#define __il_csr_h__ +#ifndef __iwl_legacy_csr_h__ +#define __iwl_legacy_csr_h__ /* * CSR (control and status registers) * @@ -70,9 +70,9 @@ * low power states due to driver-invoked device resets * (e.g. CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes. * - * Use _il_wr() and _il_rd() family to access these registers; + * Use iwl_write32() and iwl_read32() family to access these registers; * these provide simple PCI bus access, without waking up the MAC. - * Do not use il_wr() family for these registers; + * Do not use iwl_legacy_write_direct32() family for these registers; * no need to "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ. * The MAC (uCode processor, etc.) does not need to be powered up for accessing * the CSR registers. @@ -82,16 +82,16 @@ */ #define CSR_BASE (0x000) -#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */ -#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */ -#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */ -#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */ -#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack */ -#define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */ -#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc */ +#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */ +#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */ +#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */ +#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */ +#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack*/ +#define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */ +#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/ #define CSR_GP_CNTRL (CSR_BASE+0x024) -/* 2nd byte of CSR_INT_COALESCING, not accessible via _il_wr()! */ +/* 2nd byte of CSR_INT_COALESCING, not accessible via iwl_write32()! */ #define CSR_INT_PERIODIC_REG (CSR_BASE+0x005) /* @@ -166,26 +166,26 @@ #define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000) #define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000) -#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */ -#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */ -#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */ +#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */ +#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */ +#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */ -#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int */ -#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec */ +#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/ +#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/ /* interrupt flags in INTA, set by uCode or hardware (e.g. dma), * acknowledged (reset) by host writing "1" to flagged bits. */ -#define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */ -#define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */ -#define CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */ -#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */ -#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */ -#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */ -#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */ -#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */ -#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses, 3945 */ -#define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */ -#define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */ +#define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */ +#define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */ +#define CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */ +#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */ +#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */ +#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */ +#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */ +#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */ +#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses, 3945 */ +#define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */ +#define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */ #define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \ CSR_INT_BIT_HW_ERR | \ @@ -197,20 +197,21 @@ CSR_INT_BIT_ALIVE) /* interrupt flags in FH (flow handler) (PCI busmaster DMA) */ -#define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */ -#define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */ -#define CSR39_FH_INT_BIT_RX_CHNL2 (1 << 18) /* Rx channel 2 (3945 only) */ -#define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */ -#define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */ -#define CSR39_FH_INT_BIT_TX_CHNL6 (1 << 6) /* Tx channel 6 (3945 only) */ -#define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */ -#define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */ +#define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */ +#define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */ +#define CSR39_FH_INT_BIT_RX_CHNL2 (1 << 18) /* Rx channel 2 (3945 only) */ +#define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */ +#define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */ +#define CSR39_FH_INT_BIT_TX_CHNL6 (1 << 6) /* Tx channel 6 (3945 only) */ +#define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */ +#define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */ #define CSR39_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \ CSR39_FH_INT_BIT_RX_CHNL2 | \ CSR_FH_INT_BIT_RX_CHNL1 | \ CSR_FH_INT_BIT_RX_CHNL0) + #define CSR39_FH_INT_TX_MASK (CSR39_FH_INT_BIT_TX_CHNL6 | \ CSR_FH_INT_BIT_TX_CHNL1 | \ CSR_FH_INT_BIT_TX_CHNL0) @@ -284,6 +285,7 @@ #define CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE (0x04000000) #define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000) + /* EEPROM REG */ #define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001) #define CSR_EEPROM_REG_BIT_CMD (0x00000002) @@ -291,18 +293,19 @@ #define CSR_EEPROM_REG_MSK_DATA (0xFFFF0000) /* EEPROM GP */ -#define CSR_EEPROM_GP_VALID_MSK (0x00000007) /* signature */ +#define CSR_EEPROM_GP_VALID_MSK (0x00000007) /* signature */ #define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180) #define CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K (0x00000002) #define CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K (0x00000004) /* GP REG */ -#define CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */ +#define CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */ #define CSR_GP_REG_NO_POWER_SAVE (0x00000000) #define CSR_GP_REG_MAC_POWER_SAVE (0x01000000) #define CSR_GP_REG_PHY_POWER_SAVE (0x02000000) #define CSR_GP_REG_POWER_SAVE_ERROR (0x03000000) + /* CSR GIO */ #define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002) @@ -354,7 +357,7 @@ /* HPET MEM debug */ #define CSR_DBG_HPET_MEM_REG_VAL (0xFFFF0000) -/* DRAM INT TBL */ +/* DRAM INT TABLE */ #define CSR_DRAM_INT_TBL_ENABLE (1 << 31) #define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27) @@ -365,13 +368,13 @@ * to indirectly access device's internal memory or registers that * may be powered-down. * - * Use il_wr()/il_rd() family + * Use iwl_legacy_write_direct32()/iwl_legacy_read_direct32() family * for these registers; * host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ * to make sure the MAC (uCode processor, etc.) is powered up for accessing * internal resources. * - * Do not use _il_wr()/_il_rd() family to access these registers; + * Do not use iwl_write32()/iwl_read32() family to access these registers; * these provide only simple PCI bus access, without waking up the MAC. */ #define HBUS_BASE (0x400) @@ -408,12 +411,12 @@ #define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050) /* - * Per-Tx-queue write pointer (idx, really!) - * Indicates idx to next TFD that driver will fill (1 past latest filled). + * Per-Tx-queue write pointer (index, really!) + * Indicates index to next TFD that driver will fill (1 past latest filled). * Bit usage: - * 0-7: queue write idx + * 0-7: queue write index * 11-8: queue selector */ #define HBUS_TARG_WRPTR (HBUS_BASE+0x060) -#endif /* !__il_csr_h__ */ +#endif /* !__iwl_legacy_csr_h__ */ diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-debug.h b/trunk/drivers/net/wireless/iwlegacy/iwl-debug.h new file mode 100644 index 000000000000..ae13112701bf --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-debug.h @@ -0,0 +1,198 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_legacy_debug_h__ +#define __iwl_legacy_debug_h__ + +struct iwl_priv; +extern u32 iwlegacy_debug_level; + +#define IWL_ERR(p, f, a...) dev_err(&((p)->pci_dev->dev), f, ## a) +#define IWL_WARN(p, f, a...) dev_warn(&((p)->pci_dev->dev), f, ## a) +#define IWL_INFO(p, f, a...) dev_info(&((p)->pci_dev->dev), f, ## a) +#define IWL_CRIT(p, f, a...) dev_crit(&((p)->pci_dev->dev), f, ## a) + +#define iwl_print_hex_error(priv, p, len) \ +do { \ + print_hex_dump(KERN_ERR, "iwl data: ", \ + DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \ +} while (0) + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +#define IWL_DEBUG(__priv, level, fmt, args...) \ +do { \ + if (iwl_legacy_get_debug_level(__priv) & (level)) \ + dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \ + "%c %s " fmt, in_interrupt() ? 'I' : 'U', \ + __func__ , ## args); \ +} while (0) + +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) \ +do { \ + if ((iwl_legacy_get_debug_level(__priv) & (level)) && net_ratelimit()) \ + dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \ + "%c %s " fmt, in_interrupt() ? 'I' : 'U', \ + __func__ , ## args); \ +} while (0) + +#define iwl_print_hex_dump(priv, level, p, len) \ +do { \ + if (iwl_legacy_get_debug_level(priv) & level) \ + print_hex_dump(KERN_DEBUG, "iwl data: ", \ + DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \ +} while (0) + +#else +#define IWL_DEBUG(__priv, level, fmt, args...) +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) +static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level, + const void *p, u32 len) +{} +#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */ + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS +int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name); +void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv); +#else +static inline int +iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name) +{ + return 0; +} +static inline void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv) +{ +} +#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */ + +/* + * To use the debug system: + * + * If you are defining a new debug classification, simply add it to the #define + * list here in the form of + * + * #define IWL_DL_xxxx VALUE + * + * where xxxx should be the name of the classification (for example, WEP). + * + * You then need to either add a IWL_xxxx_DEBUG() macro definition for your + * classification, or use IWL_DEBUG(IWL_DL_xxxx, ...) whenever you want + * to send output to that classification. + * + * The active debug levels can be accessed via files + * + * /sys/module/iwl4965/parameters/debug{50} + * /sys/module/iwl3945/parameters/debug + * /sys/class/net/wlan0/device/debug_level + * + * when CONFIG_IWLWIFI_LEGACY_DEBUG=y. + */ + +/* 0x0000000F - 0x00000001 */ +#define IWL_DL_INFO (1 << 0) +#define IWL_DL_MAC80211 (1 << 1) +#define IWL_DL_HCMD (1 << 2) +#define IWL_DL_STATE (1 << 3) +/* 0x000000F0 - 0x00000010 */ +#define IWL_DL_MACDUMP (1 << 4) +#define IWL_DL_HCMD_DUMP (1 << 5) +#define IWL_DL_EEPROM (1 << 6) +#define IWL_DL_RADIO (1 << 7) +/* 0x00000F00 - 0x00000100 */ +#define IWL_DL_POWER (1 << 8) +#define IWL_DL_TEMP (1 << 9) +#define IWL_DL_NOTIF (1 << 10) +#define IWL_DL_SCAN (1 << 11) +/* 0x0000F000 - 0x00001000 */ +#define IWL_DL_ASSOC (1 << 12) +#define IWL_DL_DROP (1 << 13) +#define IWL_DL_TXPOWER (1 << 14) +#define IWL_DL_AP (1 << 15) +/* 0x000F0000 - 0x00010000 */ +#define IWL_DL_FW (1 << 16) +#define IWL_DL_RF_KILL (1 << 17) +#define IWL_DL_FW_ERRORS (1 << 18) +#define IWL_DL_LED (1 << 19) +/* 0x00F00000 - 0x00100000 */ +#define IWL_DL_RATE (1 << 20) +#define IWL_DL_CALIB (1 << 21) +#define IWL_DL_WEP (1 << 22) +#define IWL_DL_TX (1 << 23) +/* 0x0F000000 - 0x01000000 */ +#define IWL_DL_RX (1 << 24) +#define IWL_DL_ISR (1 << 25) +#define IWL_DL_HT (1 << 26) +#define IWL_DL_IO (1 << 27) +/* 0xF0000000 - 0x10000000 */ +#define IWL_DL_11H (1 << 28) +#define IWL_DL_STATS (1 << 29) +#define IWL_DL_TX_REPLY (1 << 30) +#define IWL_DL_QOS (1 << 31) + +#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a) +#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a) +#define IWL_DEBUG_MACDUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_MACDUMP, f, ## a) +#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a) +#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a) +#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a) +#define IWL_DEBUG_TX(p, f, a...) IWL_DEBUG(p, IWL_DL_TX, f, ## a) +#define IWL_DEBUG_ISR(p, f, a...) IWL_DEBUG(p, IWL_DL_ISR, f, ## a) +#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a) +#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a) +#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a) +#define IWL_DEBUG_HC_DUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD_DUMP, f, ## a) +#define IWL_DEBUG_EEPROM(p, f, a...) IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a) +#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a) +#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a) +#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a) +#define IWL_DEBUG_DROP(p, f, a...) IWL_DEBUG(p, IWL_DL_DROP, f, ## a) +#define IWL_DEBUG_DROP_LIMIT(p, f, a...) \ + IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a) +#define IWL_DEBUG_AP(p, f, a...) IWL_DEBUG(p, IWL_DL_AP, f, ## a) +#define IWL_DEBUG_TXPOWER(p, f, a...) IWL_DEBUG(p, IWL_DL_TXPOWER, f, ## a) +#define IWL_DEBUG_IO(p, f, a...) IWL_DEBUG(p, IWL_DL_IO, f, ## a) +#define IWL_DEBUG_RATE(p, f, a...) IWL_DEBUG(p, IWL_DL_RATE, f, ## a) +#define IWL_DEBUG_RATE_LIMIT(p, f, a...) \ + IWL_DEBUG_LIMIT(p, IWL_DL_RATE, f, ## a) +#define IWL_DEBUG_NOTIF(p, f, a...) IWL_DEBUG(p, IWL_DL_NOTIF, f, ## a) +#define IWL_DEBUG_ASSOC(p, f, a...) \ + IWL_DEBUG(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a) +#define IWL_DEBUG_ASSOC_LIMIT(p, f, a...) \ + IWL_DEBUG_LIMIT(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a) +#define IWL_DEBUG_HT(p, f, a...) IWL_DEBUG(p, IWL_DL_HT, f, ## a) +#define IWL_DEBUG_STATS(p, f, a...) IWL_DEBUG(p, IWL_DL_STATS, f, ## a) +#define IWL_DEBUG_STATS_LIMIT(p, f, a...) \ + IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a) +#define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a) +#define IWL_DEBUG_TX_REPLY_LIMIT(p, f, a...) \ + IWL_DEBUG_LIMIT(p, IWL_DL_TX_REPLY, f, ## a) +#define IWL_DEBUG_QOS(p, f, a...) IWL_DEBUG(p, IWL_DL_QOS, f, ## a) +#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a) +#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a) +#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a) + +#endif diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-debugfs.c b/trunk/drivers/net/wireless/iwlegacy/iwl-debugfs.c new file mode 100644 index 000000000000..1407dca70def --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-debugfs.c @@ -0,0 +1,1314 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ +#include +#include +#include + + +#include "iwl-dev.h" +#include "iwl-debug.h" +#include "iwl-core.h" +#include "iwl-io.h" + +/* create and remove of files */ +#define DEBUGFS_ADD_FILE(name, parent, mode) do { \ + if (!debugfs_create_file(#name, mode, parent, priv, \ + &iwl_legacy_dbgfs_##name##_ops)) \ + goto err; \ +} while (0) + +#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \ + struct dentry *__tmp; \ + __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \ + parent, ptr); \ + if (IS_ERR(__tmp) || !__tmp) \ + goto err; \ +} while (0) + +#define DEBUGFS_ADD_X32(name, parent, ptr) do { \ + struct dentry *__tmp; \ + __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \ + parent, ptr); \ + if (IS_ERR(__tmp) || !__tmp) \ + goto err; \ +} while (0) + +/* file operation */ +#define DEBUGFS_READ_FUNC(name) \ +static ssize_t iwl_legacy_dbgfs_##name##_read(struct file *file, \ + char __user *user_buf, \ + size_t count, loff_t *ppos); + +#define DEBUGFS_WRITE_FUNC(name) \ +static ssize_t iwl_legacy_dbgfs_##name##_write(struct file *file, \ + const char __user *user_buf, \ + size_t count, loff_t *ppos); + + +static int +iwl_legacy_dbgfs_open_file_generic(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +#define DEBUGFS_READ_FILE_OPS(name) \ + DEBUGFS_READ_FUNC(name); \ +static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \ + .read = iwl_legacy_dbgfs_##name##_read, \ + .open = iwl_legacy_dbgfs_open_file_generic, \ + .llseek = generic_file_llseek, \ +}; + +#define DEBUGFS_WRITE_FILE_OPS(name) \ + DEBUGFS_WRITE_FUNC(name); \ +static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \ + .write = iwl_legacy_dbgfs_##name##_write, \ + .open = iwl_legacy_dbgfs_open_file_generic, \ + .llseek = generic_file_llseek, \ +}; + +#define DEBUGFS_READ_WRITE_FILE_OPS(name) \ + DEBUGFS_READ_FUNC(name); \ + DEBUGFS_WRITE_FUNC(name); \ +static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \ + .write = iwl_legacy_dbgfs_##name##_write, \ + .read = iwl_legacy_dbgfs_##name##_read, \ + .open = iwl_legacy_dbgfs_open_file_generic, \ + .llseek = generic_file_llseek, \ +}; + +static ssize_t iwl_legacy_dbgfs_tx_statistics_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + char *buf; + int pos = 0; + + int cnt; + ssize_t ret; + const size_t bufsz = 100 + + sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX); + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + pos += scnprintf(buf + pos, bufsz - pos, "Management:\n"); + for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) { + pos += scnprintf(buf + pos, bufsz - pos, + "\t%25s\t\t: %u\n", + iwl_legacy_get_mgmt_string(cnt), + priv->tx_stats.mgmt[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "Control\n"); + for (cnt = 0; cnt < CONTROL_MAX; cnt++) { + pos += scnprintf(buf + pos, bufsz - pos, + "\t%25s\t\t: %u\n", + iwl_legacy_get_ctrl_string(cnt), + priv->tx_stats.ctrl[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "Data:\n"); + pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n", + priv->tx_stats.data_cnt); + pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n", + priv->tx_stats.data_bytes); + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t +iwl_legacy_dbgfs_clear_traffic_statistics_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + u32 clear_flag; + char buf[8]; + int buf_size; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%x", &clear_flag) != 1) + return -EFAULT; + iwl_legacy_clear_traffic_stats(priv); + + return count; +} + +static ssize_t iwl_legacy_dbgfs_rx_statistics_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + char *buf; + int pos = 0; + int cnt; + ssize_t ret; + const size_t bufsz = 100 + + sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX); + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pos += scnprintf(buf + pos, bufsz - pos, "Management:\n"); + for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) { + pos += scnprintf(buf + pos, bufsz - pos, + "\t%25s\t\t: %u\n", + iwl_legacy_get_mgmt_string(cnt), + priv->rx_stats.mgmt[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "Control:\n"); + for (cnt = 0; cnt < CONTROL_MAX; cnt++) { + pos += scnprintf(buf + pos, bufsz - pos, + "\t%25s\t\t: %u\n", + iwl_legacy_get_ctrl_string(cnt), + priv->rx_stats.ctrl[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "Data:\n"); + pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n", + priv->rx_stats.data_cnt); + pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n", + priv->rx_stats.data_bytes); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +#define BYTE1_MASK 0x000000ff; +#define BYTE2_MASK 0x0000ffff; +#define BYTE3_MASK 0x00ffffff; +static ssize_t iwl_legacy_dbgfs_sram_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + u32 val; + char *buf; + ssize_t ret; + int i; + int pos = 0; + struct iwl_priv *priv = file->private_data; + size_t bufsz; + + /* default is to dump the entire data segment */ + if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) { + priv->dbgfs_sram_offset = 0x800000; + if (priv->ucode_type == UCODE_INIT) + priv->dbgfs_sram_len = priv->ucode_init_data.len; + else + priv->dbgfs_sram_len = priv->ucode_data.len; + } + bufsz = 30 + priv->dbgfs_sram_len * sizeof(char) * 10; + buf = kmalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n", + priv->dbgfs_sram_len); + pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n", + priv->dbgfs_sram_offset); + for (i = priv->dbgfs_sram_len; i > 0; i -= 4) { + val = iwl_legacy_read_targ_mem(priv, priv->dbgfs_sram_offset + \ + priv->dbgfs_sram_len - i); + if (i < 4) { + switch (i) { + case 1: + val &= BYTE1_MASK; + break; + case 2: + val &= BYTE2_MASK; + break; + case 3: + val &= BYTE3_MASK; + break; + } + } + if (!(i % 16)) + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val); + } + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_legacy_dbgfs_sram_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[64]; + int buf_size; + u32 offset, len; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + if (sscanf(buf, "%x,%x", &offset, &len) == 2) { + priv->dbgfs_sram_offset = offset; + priv->dbgfs_sram_len = len; + } else { + priv->dbgfs_sram_offset = 0; + priv->dbgfs_sram_len = 0; + } + + return count; +} + +static ssize_t +iwl_legacy_dbgfs_stations_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + struct iwl_station_entry *station; + int max_sta = priv->hw_params.max_stations; + char *buf; + int i, j, pos = 0; + ssize_t ret; + /* Add 30 for initial string */ + const size_t bufsz = 30 + sizeof(char) * 500 * (priv->num_stations); + + buf = kmalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pos += scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n", + priv->num_stations); + + for (i = 0; i < max_sta; i++) { + station = &priv->stations[i]; + if (!station->used) + continue; + pos += scnprintf(buf + pos, bufsz - pos, + "station %d - addr: %pM, flags: %#x\n", + i, station->sta.sta.addr, + station->sta.station_flags_msk); + pos += scnprintf(buf + pos, bufsz - pos, + "TID\tseq_num\ttxq_id\tframes\ttfds\t"); + pos += scnprintf(buf + pos, bufsz - pos, + "start_idx\tbitmap\t\t\trate_n_flags\n"); + + for (j = 0; j < MAX_TID_COUNT; j++) { + pos += scnprintf(buf + pos, bufsz - pos, + "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x", + j, station->tid[j].seq_number, + station->tid[j].agg.txq_id, + station->tid[j].agg.frame_count, + station->tid[j].tfds_in_queue, + station->tid[j].agg.start_idx, + station->tid[j].agg.bitmap, + station->tid[j].agg.rate_n_flags); + + if (station->tid[j].agg.wait_for_ba) + pos += scnprintf(buf + pos, bufsz - pos, + " - waitforba"); + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + } + + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_legacy_dbgfs_nvm_read(struct file *file, + char __user *user_buf, + size_t count, + loff_t *ppos) +{ + ssize_t ret; + struct iwl_priv *priv = file->private_data; + int pos = 0, ofs = 0, buf_size = 0; + const u8 *ptr; + char *buf; + u16 eeprom_ver; + size_t eeprom_len = priv->cfg->base_params->eeprom_size; + buf_size = 4 * eeprom_len + 256; + + if (eeprom_len % 16) { + IWL_ERR(priv, "NVM size is not multiple of 16.\n"); + return -ENODATA; + } + + ptr = priv->eeprom; + if (!ptr) { + IWL_ERR(priv, "Invalid EEPROM memory\n"); + return -ENOMEM; + } + + /* 4 characters for byte 0xYY */ + buf = kzalloc(buf_size, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate Buffer\n"); + return -ENOMEM; + } + eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION); + pos += scnprintf(buf + pos, buf_size - pos, "EEPROM " + "version: 0x%x\n", eeprom_ver); + for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) { + pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs); + hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos, + buf_size - pos, 0); + pos += strlen(buf + pos); + if (buf_size - pos > 0) + buf[pos++] = '\n'; + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t +iwl_legacy_dbgfs_channels_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + struct ieee80211_channel *channels = NULL; + const struct ieee80211_supported_band *supp_band = NULL; + int pos = 0, i, bufsz = PAGE_SIZE; + char *buf; + ssize_t ret; + + if (!test_bit(STATUS_GEO_CONFIGURED, &priv->status)) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate Buffer\n"); + return -ENOMEM; + } + + supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ); + if (supp_band) { + channels = supp_band->channels; + + pos += scnprintf(buf + pos, bufsz - pos, + "Displaying %d channels in 2.4GHz band 802.11bg):\n", + supp_band->n_channels); + + for (i = 0; i < supp_band->n_channels; i++) + pos += scnprintf(buf + pos, bufsz - pos, + "%d: %ddBm: BSS%s%s, %s.\n", + channels[i].hw_value, + channels[i].max_power, + channels[i].flags & IEEE80211_CHAN_RADAR ? + " (IEEE 802.11h required)" : "", + ((channels[i].flags & IEEE80211_CHAN_NO_IBSS) + || (channels[i].flags & + IEEE80211_CHAN_RADAR)) ? "" : + ", IBSS", + channels[i].flags & + IEEE80211_CHAN_PASSIVE_SCAN ? + "passive only" : "active/passive"); + } + supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ); + if (supp_band) { + channels = supp_band->channels; + + pos += scnprintf(buf + pos, bufsz - pos, + "Displaying %d channels in 5.2GHz band (802.11a)\n", + supp_band->n_channels); + + for (i = 0; i < supp_band->n_channels; i++) + pos += scnprintf(buf + pos, bufsz - pos, + "%d: %ddBm: BSS%s%s, %s.\n", + channels[i].hw_value, + channels[i].max_power, + channels[i].flags & IEEE80211_CHAN_RADAR ? + " (IEEE 802.11h required)" : "", + ((channels[i].flags & IEEE80211_CHAN_NO_IBSS) + || (channels[i].flags & + IEEE80211_CHAN_RADAR)) ? "" : + ", IBSS", + channels[i].flags & + IEEE80211_CHAN_PASSIVE_SCAN ? + "passive only" : "active/passive"); + } + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_legacy_dbgfs_status_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + char buf[512]; + int pos = 0; + const size_t bufsz = sizeof(buf); + + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n", + test_bit(STATUS_HCMD_ACTIVE, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n", + test_bit(STATUS_INT_ENABLED, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n", + test_bit(STATUS_RF_KILL_HW, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n", + test_bit(STATUS_CT_KILL, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n", + test_bit(STATUS_INIT, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n", + test_bit(STATUS_ALIVE, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n", + test_bit(STATUS_READY, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_TEMPERATURE:\t %d\n", + test_bit(STATUS_TEMPERATURE, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_GEO_CONFIGURED:\t %d\n", + test_bit(STATUS_GEO_CONFIGURED, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n", + test_bit(STATUS_EXIT_PENDING, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n", + test_bit(STATUS_STATISTICS, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n", + test_bit(STATUS_SCANNING, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n", + test_bit(STATUS_SCAN_ABORTING, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n", + test_bit(STATUS_SCAN_HW, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n", + test_bit(STATUS_POWER_PMI, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n", + test_bit(STATUS_FW_ERROR, &priv->status)); + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_legacy_dbgfs_interrupt_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int pos = 0; + int cnt = 0; + char *buf; + int bufsz = 24 * 64; /* 24 items * 64 char per item */ + ssize_t ret; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate Buffer\n"); + return -ENOMEM; + } + + pos += scnprintf(buf + pos, bufsz - pos, + "Interrupt Statistics Report:\n"); + + pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n", + priv->isr_stats.hw); + pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n", + priv->isr_stats.sw); + if (priv->isr_stats.sw || priv->isr_stats.hw) { + pos += scnprintf(buf + pos, bufsz - pos, + "\tLast Restarting Code: 0x%X\n", + priv->isr_stats.err_code); + } +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n", + priv->isr_stats.sch); + pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n", + priv->isr_stats.alive); +#endif + pos += scnprintf(buf + pos, bufsz - pos, + "HW RF KILL switch toggled:\t %u\n", + priv->isr_stats.rfkill); + + pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n", + priv->isr_stats.ctkill); + + pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n", + priv->isr_stats.wakeup); + + pos += scnprintf(buf + pos, bufsz - pos, + "Rx command responses:\t\t %u\n", + priv->isr_stats.rx); + for (cnt = 0; cnt < REPLY_MAX; cnt++) { + if (priv->isr_stats.rx_handlers[cnt] > 0) + pos += scnprintf(buf + pos, bufsz - pos, + "\tRx handler[%36s]:\t\t %u\n", + iwl_legacy_get_cmd_string(cnt), + priv->isr_stats.rx_handlers[cnt]); + } + + pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n", + priv->isr_stats.tx); + + pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n", + priv->isr_stats.unhandled); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_legacy_dbgfs_interrupt_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + u32 reset_flag; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%x", &reset_flag) != 1) + return -EFAULT; + if (reset_flag == 0) + iwl_legacy_clear_isr_stats(priv); + + return count; +} + +static ssize_t +iwl_legacy_dbgfs_qos_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + struct iwl_rxon_context *ctx; + int pos = 0, i; + char buf[256 * NUM_IWL_RXON_CTX]; + const size_t bufsz = sizeof(buf); + + for_each_context(priv, ctx) { + pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n", + ctx->ctxid); + for (i = 0; i < AC_NUM; i++) { + pos += scnprintf(buf + pos, bufsz - pos, + "\tcw_min\tcw_max\taifsn\ttxop\n"); + pos += scnprintf(buf + pos, bufsz - pos, + "AC[%d]\t%u\t%u\t%u\t%u\n", i, + ctx->qos_data.def_qos_parm.ac[i].cw_min, + ctx->qos_data.def_qos_parm.ac[i].cw_max, + ctx->qos_data.def_qos_parm.ac[i].aifsn, + ctx->qos_data.def_qos_parm.ac[i].edca_txop); + } + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + } + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_legacy_dbgfs_disable_ht40_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int ht40; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &ht40) != 1) + return -EFAULT; + if (!iwl_legacy_is_any_associated(priv)) + priv->disable_ht40 = ht40 ? true : false; + else { + IWL_ERR(priv, "Sta associated with AP - " + "Change to 40MHz channel support is not allowed\n"); + return -EINVAL; + } + + return count; +} + +static ssize_t iwl_legacy_dbgfs_disable_ht40_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[100]; + int pos = 0; + const size_t bufsz = sizeof(buf); + + pos += scnprintf(buf + pos, bufsz - pos, + "11n 40MHz Mode: %s\n", + priv->disable_ht40 ? "Disabled" : "Enabled"); + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +DEBUGFS_READ_WRITE_FILE_OPS(sram); +DEBUGFS_READ_FILE_OPS(nvm); +DEBUGFS_READ_FILE_OPS(stations); +DEBUGFS_READ_FILE_OPS(channels); +DEBUGFS_READ_FILE_OPS(status); +DEBUGFS_READ_WRITE_FILE_OPS(interrupt); +DEBUGFS_READ_FILE_OPS(qos); +DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40); + +static ssize_t iwl_legacy_dbgfs_traffic_log_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + int pos = 0, ofs = 0; + int cnt = 0, entry; + struct iwl_tx_queue *txq; + struct iwl_queue *q; + struct iwl_rx_queue *rxq = &priv->rxq; + char *buf; + int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) + + (priv->cfg->base_params->num_of_queues * 32 * 8) + 400; + const u8 *ptr; + ssize_t ret; + + if (!priv->txq) { + IWL_ERR(priv, "txq not ready\n"); + return -EAGAIN; + } + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate buffer\n"); + return -ENOMEM; + } + pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n"); + for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) { + txq = &priv->txq[cnt]; + q = &txq->q; + pos += scnprintf(buf + pos, bufsz - pos, + "q[%d]: read_ptr: %u, write_ptr: %u\n", + cnt, q->read_ptr, q->write_ptr); + } + if (priv->tx_traffic && (iwlegacy_debug_level & IWL_DL_TX)) { + ptr = priv->tx_traffic; + pos += scnprintf(buf + pos, bufsz - pos, + "Tx Traffic idx: %u\n", priv->tx_traffic_idx); + for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) { + for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16; + entry++, ofs += 16) { + pos += scnprintf(buf + pos, bufsz - pos, + "0x%.4x ", ofs); + hex_dump_to_buffer(ptr + ofs, 16, 16, 2, + buf + pos, bufsz - pos, 0); + pos += strlen(buf + pos); + if (bufsz - pos > 0) + buf[pos++] = '\n'; + } + } + } + + pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n"); + pos += scnprintf(buf + pos, bufsz - pos, + "read: %u, write: %u\n", + rxq->read, rxq->write); + + if (priv->rx_traffic && (iwlegacy_debug_level & IWL_DL_RX)) { + ptr = priv->rx_traffic; + pos += scnprintf(buf + pos, bufsz - pos, + "Rx Traffic idx: %u\n", priv->rx_traffic_idx); + for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) { + for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16; + entry++, ofs += 16) { + pos += scnprintf(buf + pos, bufsz - pos, + "0x%.4x ", ofs); + hex_dump_to_buffer(ptr + ofs, 16, 16, 2, + buf + pos, bufsz - pos, 0); + pos += strlen(buf + pos); + if (bufsz - pos > 0) + buf[pos++] = '\n'; + } + } + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_legacy_dbgfs_traffic_log_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int traffic_log; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &traffic_log) != 1) + return -EFAULT; + if (traffic_log == 0) + iwl_legacy_reset_traffic_log(priv); + + return count; +} + +static ssize_t iwl_legacy_dbgfs_tx_queue_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + struct iwl_tx_queue *txq; + struct iwl_queue *q; + char *buf; + int pos = 0; + int cnt; + int ret; + const size_t bufsz = sizeof(char) * 64 * + priv->cfg->base_params->num_of_queues; + + if (!priv->txq) { + IWL_ERR(priv, "txq not ready\n"); + return -EAGAIN; + } + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) { + txq = &priv->txq[cnt]; + q = &txq->q; + pos += scnprintf(buf + pos, bufsz - pos, + "hwq %.2d: read=%u write=%u stop=%d" + " swq_id=%#.2x (ac %d/hwq %d)\n", + cnt, q->read_ptr, q->write_ptr, + !!test_bit(cnt, priv->queue_stopped), + txq->swq_id, txq->swq_id & 3, + (txq->swq_id >> 2) & 0x1f); + if (cnt >= 4) + continue; + /* for the ACs, display the stop count too */ + pos += scnprintf(buf + pos, bufsz - pos, + " stop-count: %d\n", + atomic_read(&priv->queue_stop_count[cnt])); + } + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_legacy_dbgfs_rx_queue_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + struct iwl_rx_queue *rxq = &priv->rxq; + char buf[256]; + int pos = 0; + const size_t bufsz = sizeof(buf); + + pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n", + rxq->read); + pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n", + rxq->write); + pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n", + rxq->free_count); + if (rxq->rb_stts) { + pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n", + le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF); + } else { + pos += scnprintf(buf + pos, bufsz - pos, + "closed_rb_num: Not Allocated\n"); + } + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_legacy_dbgfs_ucode_rx_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + return priv->cfg->ops->lib->debugfs_ops.rx_stats_read(file, + user_buf, count, ppos); +} + +static ssize_t iwl_legacy_dbgfs_ucode_tx_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + return priv->cfg->ops->lib->debugfs_ops.tx_stats_read(file, + user_buf, count, ppos); +} + +static ssize_t iwl_legacy_dbgfs_ucode_general_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + return priv->cfg->ops->lib->debugfs_ops.general_stats_read(file, + user_buf, count, ppos); +} + +static ssize_t iwl_legacy_dbgfs_sensitivity_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int pos = 0; + int cnt = 0; + char *buf; + int bufsz = sizeof(struct iwl_sensitivity_data) * 4 + 100; + ssize_t ret; + struct iwl_sensitivity_data *data; + + data = &priv->sensitivity_data; + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate Buffer\n"); + return -ENOMEM; + } + + pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n", + data->auto_corr_ofdm); + pos += scnprintf(buf + pos, bufsz - pos, + "auto_corr_ofdm_mrc:\t\t %u\n", + data->auto_corr_ofdm_mrc); + pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n", + data->auto_corr_ofdm_x1); + pos += scnprintf(buf + pos, bufsz - pos, + "auto_corr_ofdm_mrc_x1:\t\t %u\n", + data->auto_corr_ofdm_mrc_x1); + pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n", + data->auto_corr_cck); + pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n", + data->auto_corr_cck_mrc); + pos += scnprintf(buf + pos, bufsz - pos, + "last_bad_plcp_cnt_ofdm:\t\t %u\n", + data->last_bad_plcp_cnt_ofdm); + pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n", + data->last_fa_cnt_ofdm); + pos += scnprintf(buf + pos, bufsz - pos, + "last_bad_plcp_cnt_cck:\t\t %u\n", + data->last_bad_plcp_cnt_cck); + pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n", + data->last_fa_cnt_cck); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n", + data->nrg_curr_state); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n", + data->nrg_prev_state); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t"); + for (cnt = 0; cnt < 10; cnt++) { + pos += scnprintf(buf + pos, bufsz - pos, " %u", + data->nrg_value[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t"); + for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) { + pos += scnprintf(buf + pos, bufsz - pos, " %u", + data->nrg_silence_rssi[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n", + data->nrg_silence_ref); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n", + data->nrg_energy_idx); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n", + data->nrg_silence_idx); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n", + data->nrg_th_cck); + pos += scnprintf(buf + pos, bufsz - pos, + "nrg_auto_corr_silence_diff:\t %u\n", + data->nrg_auto_corr_silence_diff); + pos += scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n", + data->num_in_cck_no_fa); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n", + data->nrg_th_ofdm); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + + +static ssize_t iwl_legacy_dbgfs_chain_noise_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int pos = 0; + int cnt = 0; + char *buf; + int bufsz = sizeof(struct iwl_chain_noise_data) * 4 + 100; + ssize_t ret; + struct iwl_chain_noise_data *data; + + data = &priv->chain_noise_data; + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate Buffer\n"); + return -ENOMEM; + } + + pos += scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n", + data->active_chains); + pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n", + data->chain_noise_a); + pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n", + data->chain_noise_b); + pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n", + data->chain_noise_c); + pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n", + data->chain_signal_a); + pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n", + data->chain_signal_b); + pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n", + data->chain_signal_c); + pos += scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n", + data->beacon_count); + + pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t"); + for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) { + pos += scnprintf(buf + pos, bufsz - pos, " %u", + data->disconn_array[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t"); + for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) { + pos += scnprintf(buf + pos, bufsz - pos, " %u", + data->delta_gain_code[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + pos += scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n", + data->radio_write); + pos += scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n", + data->state); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_legacy_dbgfs_power_save_status_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[60]; + int pos = 0; + const size_t bufsz = sizeof(buf); + u32 pwrsave_status; + + pwrsave_status = iwl_read32(priv, CSR_GP_CNTRL) & + CSR_GP_REG_POWER_SAVE_STATUS_MSK; + + pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: "); + pos += scnprintf(buf + pos, bufsz - pos, "%s\n", + (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" : + (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" : + (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" : + "error"); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_legacy_dbgfs_clear_ucode_statistics_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int clear; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &clear) != 1) + return -EFAULT; + + /* make request to uCode to retrieve statistics information */ + mutex_lock(&priv->mutex); + iwl_legacy_send_statistics_request(priv, CMD_SYNC, true); + mutex_unlock(&priv->mutex); + + return count; +} + +static ssize_t iwl_legacy_dbgfs_rxon_flags_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int len = 0; + char buf[20]; + + len = sprintf(buf, "0x%04X\n", + le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.flags)); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t iwl_legacy_dbgfs_rxon_filter_flags_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int len = 0; + char buf[20]; + + len = sprintf(buf, "0x%04X\n", + le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags)); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t iwl_legacy_dbgfs_fh_reg_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char *buf; + int pos = 0; + ssize_t ret = -EFAULT; + + if (priv->cfg->ops->lib->dump_fh) { + ret = pos = priv->cfg->ops->lib->dump_fh(priv, &buf, true); + if (buf) { + ret = simple_read_from_buffer(user_buf, + count, ppos, buf, pos); + kfree(buf); + } + } + + return ret; +} + +static ssize_t iwl_legacy_dbgfs_missed_beacon_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int pos = 0; + char buf[12]; + const size_t bufsz = sizeof(buf); + + pos += scnprintf(buf + pos, bufsz - pos, "%d\n", + priv->missed_beacon_threshold); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_legacy_dbgfs_missed_beacon_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int missed; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &missed) != 1) + return -EINVAL; + + if (missed < IWL_MISSED_BEACON_THRESHOLD_MIN || + missed > IWL_MISSED_BEACON_THRESHOLD_MAX) + priv->missed_beacon_threshold = + IWL_MISSED_BEACON_THRESHOLD_DEF; + else + priv->missed_beacon_threshold = missed; + + return count; +} + +static ssize_t iwl_legacy_dbgfs_force_reset_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int pos = 0; + char buf[300]; + const size_t bufsz = sizeof(buf); + struct iwl_force_reset *force_reset; + + force_reset = &priv->force_reset; + + pos += scnprintf(buf + pos, bufsz - pos, + "\tnumber of reset request: %d\n", + force_reset->reset_request_count); + pos += scnprintf(buf + pos, bufsz - pos, + "\tnumber of reset request success: %d\n", + force_reset->reset_success_count); + pos += scnprintf(buf + pos, bufsz - pos, + "\tnumber of reset request reject: %d\n", + force_reset->reset_reject_count); + pos += scnprintf(buf + pos, bufsz - pos, + "\treset duration: %lu\n", + force_reset->reset_duration); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_legacy_dbgfs_force_reset_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) { + + int ret; + struct iwl_priv *priv = file->private_data; + + ret = iwl_legacy_force_reset(priv, true); + + return ret ? ret : count; +} + +static ssize_t iwl_legacy_dbgfs_wd_timeout_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int timeout; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &timeout) != 1) + return -EINVAL; + if (timeout < 0 || timeout > IWL_MAX_WD_TIMEOUT) + timeout = IWL_DEF_WD_TIMEOUT; + + priv->cfg->base_params->wd_timeout = timeout; + iwl_legacy_setup_watchdog(priv); + return count; +} + +DEBUGFS_READ_FILE_OPS(rx_statistics); +DEBUGFS_READ_FILE_OPS(tx_statistics); +DEBUGFS_READ_WRITE_FILE_OPS(traffic_log); +DEBUGFS_READ_FILE_OPS(rx_queue); +DEBUGFS_READ_FILE_OPS(tx_queue); +DEBUGFS_READ_FILE_OPS(ucode_rx_stats); +DEBUGFS_READ_FILE_OPS(ucode_tx_stats); +DEBUGFS_READ_FILE_OPS(ucode_general_stats); +DEBUGFS_READ_FILE_OPS(sensitivity); +DEBUGFS_READ_FILE_OPS(chain_noise); +DEBUGFS_READ_FILE_OPS(power_save_status); +DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics); +DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics); +DEBUGFS_READ_FILE_OPS(fh_reg); +DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon); +DEBUGFS_READ_WRITE_FILE_OPS(force_reset); +DEBUGFS_READ_FILE_OPS(rxon_flags); +DEBUGFS_READ_FILE_OPS(rxon_filter_flags); +DEBUGFS_WRITE_FILE_OPS(wd_timeout); + +/* + * Create the debugfs files and directories + * + */ +int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name) +{ + struct dentry *phyd = priv->hw->wiphy->debugfsdir; + struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug; + + dir_drv = debugfs_create_dir(name, phyd); + if (!dir_drv) + return -ENOMEM; + + priv->debugfs_dir = dir_drv; + + dir_data = debugfs_create_dir("data", dir_drv); + if (!dir_data) + goto err; + dir_rf = debugfs_create_dir("rf", dir_drv); + if (!dir_rf) + goto err; + dir_debug = debugfs_create_dir("debug", dir_drv); + if (!dir_debug) + goto err; + + DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(tx_statistics, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR); + DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR); + DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR); + DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR); + + if (priv->cfg->base_params->sensitivity_calib_by_driver) + DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR); + if (priv->cfg->base_params->chain_noise_calib_by_driver) + DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR); + DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR); + DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR); + if (priv->cfg->base_params->sensitivity_calib_by_driver) + DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf, + &priv->disable_sens_cal); + if (priv->cfg->base_params->chain_noise_calib_by_driver) + DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf, + &priv->disable_chain_noise_cal); + DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf, + &priv->disable_tx_power_cal); + return 0; + +err: + IWL_ERR(priv, "Can't create the debugfs directory\n"); + iwl_legacy_dbgfs_unregister(priv); + return -ENOMEM; +} +EXPORT_SYMBOL(iwl_legacy_dbgfs_register); + +/** + * Remove the debugfs files and directories + * + */ +void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv) +{ + if (!priv->debugfs_dir) + return; + + debugfs_remove_recursive(priv->debugfs_dir); + priv->debugfs_dir = NULL; +} +EXPORT_SYMBOL(iwl_legacy_dbgfs_unregister); diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-dev.h b/trunk/drivers/net/wireless/iwlegacy/iwl-dev.h new file mode 100644 index 000000000000..9c786edf56fd --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-dev.h @@ -0,0 +1,1364 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ +/* + * Please use this file (iwl-dev.h) for driver implementation definitions. + * Please use iwl-commands.h for uCode API definitions. + * Please use iwl-4965-hw.h for hardware-related definitions. + */ + +#ifndef __iwl_legacy_dev_h__ +#define __iwl_legacy_dev_h__ + +#include +#include /* for struct pci_device_id */ +#include +#include +#include +#include + +#include "iwl-eeprom.h" +#include "iwl-csr.h" +#include "iwl-prph.h" +#include "iwl-fh.h" +#include "iwl-debug.h" +#include "iwl-4965-hw.h" +#include "iwl-3945-hw.h" +#include "iwl-led.h" +#include "iwl-power.h" +#include "iwl-legacy-rs.h" + +struct iwl_tx_queue; + +/* CT-KILL constants */ +#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */ + +/* Default noise level to report when noise measurement is not available. + * This may be because we're: + * 1) Not associated (4965, no beacon statistics being sent to driver) + * 2) Scanning (noise measurement does not apply to associated channel) + * 3) Receiving CCK (3945 delivers noise info only for OFDM frames) + * Use default noise value of -127 ... this is below the range of measurable + * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user. + * Also, -127 works better than 0 when averaging frames with/without + * noise info (e.g. averaging might be done in app); measured dBm values are + * always negative ... using a negative value as the default keeps all + * averages within an s8's (used in some apps) range of negative values. */ +#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127) + +/* + * RTS threshold here is total size [2347] minus 4 FCS bytes + * Per spec: + * a value of 0 means RTS on all data/management packets + * a value > max MSDU size means no RTS + * else RTS for data/management frames where MPDU is larger + * than RTS value. + */ +#define DEFAULT_RTS_THRESHOLD 2347U +#define MIN_RTS_THRESHOLD 0U +#define MAX_RTS_THRESHOLD 2347U +#define MAX_MSDU_SIZE 2304U +#define MAX_MPDU_SIZE 2346U +#define DEFAULT_BEACON_INTERVAL 100U +#define DEFAULT_SHORT_RETRY_LIMIT 7U +#define DEFAULT_LONG_RETRY_LIMIT 4U + +struct iwl_rx_mem_buffer { + dma_addr_t page_dma; + struct page *page; + struct list_head list; +}; + +#define rxb_addr(r) page_address(r->page) + +/* defined below */ +struct iwl_device_cmd; + +struct iwl_cmd_meta { + /* only for SYNC commands, iff the reply skb is wanted */ + struct iwl_host_cmd *source; + /* + * only for ASYNC commands + * (which is somewhat stupid -- look at iwl-sta.c for instance + * which duplicates a bunch of code because the callback isn't + * invoked for SYNC commands, if it were and its result passed + * through it would be simpler...) + */ + void (*callback)(struct iwl_priv *priv, + struct iwl_device_cmd *cmd, + struct iwl_rx_packet *pkt); + + /* The CMD_SIZE_HUGE flag bit indicates that the command + * structure is stored at the end of the shared queue memory. */ + u32 flags; + + DEFINE_DMA_UNMAP_ADDR(mapping); + DEFINE_DMA_UNMAP_LEN(len); +}; + +/* + * Generic queue structure + * + * Contains common data for Rx and Tx queues + */ +struct iwl_queue { + int n_bd; /* number of BDs in this queue */ + int write_ptr; /* 1-st empty entry (index) host_w*/ + int read_ptr; /* last used entry (index) host_r*/ + /* use for monitoring and recovering the stuck queue */ + dma_addr_t dma_addr; /* physical addr for BD's */ + int n_window; /* safe queue window */ + u32 id; + int low_mark; /* low watermark, resume queue if free + * space more than this */ + int high_mark; /* high watermark, stop queue if free + * space less than this */ +}; + +/* One for each TFD */ +struct iwl_tx_info { + struct sk_buff *skb; + struct iwl_rxon_context *ctx; +}; + +/** + * struct iwl_tx_queue - Tx Queue for DMA + * @q: generic Rx/Tx queue descriptor + * @bd: base of circular buffer of TFDs + * @cmd: array of command/TX buffer pointers + * @meta: array of meta data for each command/tx buffer + * @dma_addr_cmd: physical address of cmd/tx buffer array + * @txb: array of per-TFD driver data + * @time_stamp: time (in jiffies) of last read_ptr change + * @need_update: indicates need to update read/write index + * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled + * + * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame + * descriptors) and required locking structures. + */ +#define TFD_TX_CMD_SLOTS 256 +#define TFD_CMD_SLOTS 32 + +struct iwl_tx_queue { + struct iwl_queue q; + void *tfds; + struct iwl_device_cmd **cmd; + struct iwl_cmd_meta *meta; + struct iwl_tx_info *txb; + unsigned long time_stamp; + u8 need_update; + u8 sched_retry; + u8 active; + u8 swq_id; +}; + +#define IWL_NUM_SCAN_RATES (2) + +struct iwl4965_channel_tgd_info { + u8 type; + s8 max_power; +}; + +struct iwl4965_channel_tgh_info { + s64 last_radar_time; +}; + +#define IWL4965_MAX_RATE (33) + +struct iwl3945_clip_group { + /* maximum power level to prevent clipping for each rate, derived by + * us from this band's saturation power in EEPROM */ + const s8 clip_powers[IWL_MAX_RATES]; +}; + +/* current Tx power values to use, one for each rate for each channel. + * requested power is limited by: + * -- regulatory EEPROM limits for this channel + * -- hardware capabilities (clip-powers) + * -- spectrum management + * -- user preference (e.g. iwconfig) + * when requested power is set, base power index must also be set. */ +struct iwl3945_channel_power_info { + struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */ + s8 power_table_index; /* actual (compenst'd) index into gain table */ + s8 base_power_index; /* gain index for power at factory temp. */ + s8 requested_power; /* power (dBm) requested for this chnl/rate */ +}; + +/* current scan Tx power values to use, one for each scan rate for each + * channel. */ +struct iwl3945_scan_power_info { + struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */ + s8 power_table_index; /* actual (compenst'd) index into gain table */ + s8 requested_power; /* scan pwr (dBm) requested for chnl/rate */ +}; + +/* + * One for each channel, holds all channel setup data + * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant + * with one another! + */ +struct iwl_channel_info { + struct iwl4965_channel_tgd_info tgd; + struct iwl4965_channel_tgh_info tgh; + struct iwl_eeprom_channel eeprom; /* EEPROM regulatory limit */ + struct iwl_eeprom_channel ht40_eeprom; /* EEPROM regulatory limit for + * HT40 channel */ + + u8 channel; /* channel number */ + u8 flags; /* flags copied from EEPROM */ + s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */ + s8 curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) limit */ + s8 min_power; /* always 0 */ + s8 scan_power; /* (dBm) regul. eeprom, direct scans, any rate */ + + u8 group_index; /* 0-4, maps channel to group1/2/3/4/5 */ + u8 band_index; /* 0-4, maps channel to band1/2/3/4/5 */ + enum ieee80211_band band; + + /* HT40 channel info */ + s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */ + u8 ht40_flags; /* flags copied from EEPROM */ + u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */ + + /* Radio/DSP gain settings for each "normal" data Tx rate. + * These include, in addition to RF and DSP gain, a few fields for + * remembering/modifying gain settings (indexes). */ + struct iwl3945_channel_power_info power_info[IWL4965_MAX_RATE]; + + /* Radio/DSP gain settings for each scan rate, for directed scans. */ + struct iwl3945_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES]; +}; + +#define IWL_TX_FIFO_BK 0 /* shared */ +#define IWL_TX_FIFO_BE 1 +#define IWL_TX_FIFO_VI 2 /* shared */ +#define IWL_TX_FIFO_VO 3 +#define IWL_TX_FIFO_UNUSED -1 + +/* Minimum number of queues. MAX_NUM is defined in hw specific files. + * Set the minimum to accommodate the 4 standard TX queues, 1 command + * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */ +#define IWL_MIN_NUM_QUEUES 10 + +#define IWL_DEFAULT_CMD_QUEUE_NUM 4 + +#define IEEE80211_DATA_LEN 2304 +#define IEEE80211_4ADDR_LEN 30 +#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN) +#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN) + +struct iwl_frame { + union { + struct ieee80211_hdr frame; + struct iwl_tx_beacon_cmd beacon; + u8 raw[IEEE80211_FRAME_LEN]; + u8 cmd[360]; + } u; + struct list_head list; +}; + +#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4) +#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ) +#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4) + +enum { + CMD_SYNC = 0, + CMD_SIZE_NORMAL = 0, + CMD_NO_SKB = 0, + CMD_SIZE_HUGE = (1 << 0), + CMD_ASYNC = (1 << 1), + CMD_WANT_SKB = (1 << 2), + CMD_MAPPED = (1 << 3), +}; + +#define DEF_CMD_PAYLOAD_SIZE 320 + +/** + * struct iwl_device_cmd + * + * For allocation of the command and tx queues, this establishes the overall + * size of the largest command we send to uCode, except for a scan command + * (which is relatively huge; space is allocated separately). + */ +struct iwl_device_cmd { + struct iwl_cmd_header hdr; /* uCode API */ + union { + u32 flags; + u8 val8; + u16 val16; + u32 val32; + struct iwl_tx_cmd tx; + u8 payload[DEF_CMD_PAYLOAD_SIZE]; + } __packed cmd; +} __packed; + +#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd)) + + +struct iwl_host_cmd { + const void *data; + unsigned long reply_page; + void (*callback)(struct iwl_priv *priv, + struct iwl_device_cmd *cmd, + struct iwl_rx_packet *pkt); + u32 flags; + u16 len; + u8 id; +}; + +#define SUP_RATE_11A_MAX_NUM_CHANNELS 8 +#define SUP_RATE_11B_MAX_NUM_CHANNELS 4 +#define SUP_RATE_11G_MAX_NUM_CHANNELS 12 + +/** + * struct iwl_rx_queue - Rx queue + * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) + * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) + * @read: Shared index to newest available Rx buffer + * @write: Shared index to oldest written Rx packet + * @free_count: Number of pre-allocated buffers in rx_free + * @rx_free: list of free SKBs for use + * @rx_used: List of Rx buffers with no SKB + * @need_update: flag to indicate we need to update read/write index + * @rb_stts: driver's pointer to receive buffer status + * @rb_stts_dma: bus address of receive buffer status + * + * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers + */ +struct iwl_rx_queue { + __le32 *bd; + dma_addr_t bd_dma; + struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; + struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; + u32 read; + u32 write; + u32 free_count; + u32 write_actual; + struct list_head rx_free; + struct list_head rx_used; + int need_update; + struct iwl_rb_status *rb_stts; + dma_addr_t rb_stts_dma; + spinlock_t lock; +}; + +#define IWL_SUPPORTED_RATES_IE_LEN 8 + +#define MAX_TID_COUNT 9 + +#define IWL_INVALID_RATE 0xFF +#define IWL_INVALID_VALUE -1 + +/** + * struct iwl_ht_agg -- aggregation status while waiting for block-ack + * @txq_id: Tx queue used for Tx attempt + * @frame_count: # frames attempted by Tx command + * @wait_for_ba: Expect block-ack before next Tx reply + * @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx window + * @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx window + * @bitmap1: High order, one bit for each frame pending ACK in Tx window + * @rate_n_flags: Rate at which Tx was attempted + * + * If REPLY_TX indicates that aggregation was attempted, driver must wait + * for block ack (REPLY_COMPRESSED_BA). This struct stores tx reply info + * until block ack arrives. + */ +struct iwl_ht_agg { + u16 txq_id; + u16 frame_count; + u16 wait_for_ba; + u16 start_idx; + u64 bitmap; + u32 rate_n_flags; +#define IWL_AGG_OFF 0 +#define IWL_AGG_ON 1 +#define IWL_EMPTYING_HW_QUEUE_ADDBA 2 +#define IWL_EMPTYING_HW_QUEUE_DELBA 3 + u8 state; +}; + + +struct iwl_tid_data { + u16 seq_number; /* 4965 only */ + u16 tfds_in_queue; + struct iwl_ht_agg agg; +}; + +struct iwl_hw_key { + u32 cipher; + int keylen; + u8 keyidx; + u8 key[32]; +}; + +union iwl_ht_rate_supp { + u16 rates; + struct { + u8 siso_rate; + u8 mimo_rate; + }; +}; + +#define CFG_HT_RX_AMPDU_FACTOR_8K (0x0) +#define CFG_HT_RX_AMPDU_FACTOR_16K (0x1) +#define CFG_HT_RX_AMPDU_FACTOR_32K (0x2) +#define CFG_HT_RX_AMPDU_FACTOR_64K (0x3) +#define CFG_HT_RX_AMPDU_FACTOR_DEF CFG_HT_RX_AMPDU_FACTOR_64K +#define CFG_HT_RX_AMPDU_FACTOR_MAX CFG_HT_RX_AMPDU_FACTOR_64K +#define CFG_HT_RX_AMPDU_FACTOR_MIN CFG_HT_RX_AMPDU_FACTOR_8K + +/* + * Maximal MPDU density for TX aggregation + * 4 - 2us density + * 5 - 4us density + * 6 - 8us density + * 7 - 16us density + */ +#define CFG_HT_MPDU_DENSITY_2USEC (0x4) +#define CFG_HT_MPDU_DENSITY_4USEC (0x5) +#define CFG_HT_MPDU_DENSITY_8USEC (0x6) +#define CFG_HT_MPDU_DENSITY_16USEC (0x7) +#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC +#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC +#define CFG_HT_MPDU_DENSITY_MIN (0x1) + +struct iwl_ht_config { + bool single_chain_sufficient; + enum ieee80211_smps_mode smps; /* current smps mode */ +}; + +/* QoS structures */ +struct iwl_qos_info { + int qos_active; + struct iwl_qosparam_cmd def_qos_parm; +}; + +/* + * Structure should be accessed with sta_lock held. When station addition + * is in progress (IWL_STA_UCODE_INPROGRESS) it is possible to access only + * the commands (iwl_legacy_addsta_cmd and iwl_link_quality_cmd) without + * sta_lock held. + */ +struct iwl_station_entry { + struct iwl_legacy_addsta_cmd sta; + struct iwl_tid_data tid[MAX_TID_COUNT]; + u8 used, ctxid; + struct iwl_hw_key keyinfo; + struct iwl_link_quality_cmd *lq; +}; + +struct iwl_station_priv_common { + struct iwl_rxon_context *ctx; + u8 sta_id; +}; + +/* + * iwl_station_priv: Driver's private station information + * + * When mac80211 creates a station it reserves some space (hw->sta_data_size) + * in the structure for use by driver. This structure is places in that + * space. + * + * The common struct MUST be first because it is shared between + * 3945 and 4965! + */ +struct iwl_station_priv { + struct iwl_station_priv_common common; + struct iwl_lq_sta lq_sta; + atomic_t pending_frames; + bool client; + bool asleep; +}; + +/** + * struct iwl_vif_priv - driver's private per-interface information + * + * When mac80211 allocates a virtual interface, it can allocate + * space for us to put data into. + */ +struct iwl_vif_priv { + struct iwl_rxon_context *ctx; + u8 ibss_bssid_sta_id; +}; + +/* one for each uCode image (inst/data, boot/init/runtime) */ +struct fw_desc { + void *v_addr; /* access by driver */ + dma_addr_t p_addr; /* access by card's busmaster DMA */ + u32 len; /* bytes */ +}; + +/* uCode file layout */ +struct iwl_ucode_header { + __le32 ver; /* major/minor/API/serial */ + struct { + __le32 inst_size; /* bytes of runtime code */ + __le32 data_size; /* bytes of runtime data */ + __le32 init_size; /* bytes of init code */ + __le32 init_data_size; /* bytes of init data */ + __le32 boot_size; /* bytes of bootstrap code */ + u8 data[0]; /* in same order as sizes */ + } v1; +}; + +struct iwl4965_ibss_seq { + u8 mac[ETH_ALEN]; + u16 seq_num; + u16 frag_num; + unsigned long packet_time; + struct list_head list; +}; + +struct iwl_sensitivity_ranges { + u16 min_nrg_cck; + u16 max_nrg_cck; + + u16 nrg_th_cck; + u16 nrg_th_ofdm; + + u16 auto_corr_min_ofdm; + u16 auto_corr_min_ofdm_mrc; + u16 auto_corr_min_ofdm_x1; + u16 auto_corr_min_ofdm_mrc_x1; + + u16 auto_corr_max_ofdm; + u16 auto_corr_max_ofdm_mrc; + u16 auto_corr_max_ofdm_x1; + u16 auto_corr_max_ofdm_mrc_x1; + + u16 auto_corr_max_cck; + u16 auto_corr_max_cck_mrc; + u16 auto_corr_min_cck; + u16 auto_corr_min_cck_mrc; + + u16 barker_corr_th_min; + u16 barker_corr_th_min_mrc; + u16 nrg_th_cca; +}; + + +#define KELVIN_TO_CELSIUS(x) ((x)-273) +#define CELSIUS_TO_KELVIN(x) ((x)+273) + + +/** + * struct iwl_hw_params + * @max_txq_num: Max # Tx queues supported + * @dma_chnl_num: Number of Tx DMA/FIFO channels + * @scd_bc_tbls_size: size of scheduler byte count tables + * @tfd_size: TFD size + * @tx/rx_chains_num: Number of TX/RX chains + * @valid_tx/rx_ant: usable antennas + * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2) + * @max_rxq_log: Log-base-2 of max_rxq_size + * @rx_page_order: Rx buffer page order + * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR + * @max_stations: + * @ht40_channel: is 40MHz width possible in band 2.4 + * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ) + * @sw_crypto: 0 for hw, 1 for sw + * @max_xxx_size: for ucode uses + * @ct_kill_threshold: temperature threshold + * @beacon_time_tsf_bits: number of valid tsf bits for beacon time + * @struct iwl_sensitivity_ranges: range of sensitivity values + */ +struct iwl_hw_params { + u8 max_txq_num; + u8 dma_chnl_num; + u16 scd_bc_tbls_size; + u32 tfd_size; + u8 tx_chains_num; + u8 rx_chains_num; + u8 valid_tx_ant; + u8 valid_rx_ant; + u16 max_rxq_size; + u16 max_rxq_log; + u32 rx_page_order; + u32 rx_wrt_ptr_reg; + u8 max_stations; + u8 ht40_channel; + u8 max_beacon_itrvl; /* in 1024 ms */ + u32 max_inst_size; + u32 max_data_size; + u32 max_bsm_size; + u32 ct_kill_threshold; /* value in hw-dependent units */ + u16 beacon_time_tsf_bits; + const struct iwl_sensitivity_ranges *sens; +}; + + +/****************************************************************************** + * + * Functions implemented in core module which are forward declared here + * for use by iwl-[4-5].c + * + * NOTE: The implementation of these functions are not hardware specific + * which is why they are in the core module files. + * + * Naming convention -- + * iwl_ <-- Is part of iwlwifi + * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX) + * iwl4965_bg_ <-- Called from work queue context + * iwl4965_mac_ <-- mac80211 callback + * + ****************************************************************************/ +extern void iwl4965_update_chain_flags(struct iwl_priv *priv); +extern const u8 iwlegacy_bcast_addr[ETH_ALEN]; +extern int iwl_legacy_queue_space(const struct iwl_queue *q); +static inline int iwl_legacy_queue_used(const struct iwl_queue *q, int i) +{ + return q->write_ptr >= q->read_ptr ? + (i >= q->read_ptr && i < q->write_ptr) : + !(i < q->read_ptr && i >= q->write_ptr); +} + + +static inline u8 iwl_legacy_get_cmd_index(struct iwl_queue *q, u32 index, + int is_huge) +{ + /* + * This is for init calibration result and scan command which + * required buffer > TFD_MAX_PAYLOAD_SIZE, + * the big buffer at end of command array + */ + if (is_huge) + return q->n_window; /* must be power of 2 */ + + /* Otherwise, use normal size buffers */ + return index & (q->n_window - 1); +} + + +struct iwl_dma_ptr { + dma_addr_t dma; + void *addr; + size_t size; +}; + +#define IWL_OPERATION_MODE_AUTO 0 +#define IWL_OPERATION_MODE_HT_ONLY 1 +#define IWL_OPERATION_MODE_MIXED 2 +#define IWL_OPERATION_MODE_20MHZ 3 + +#define IWL_TX_CRC_SIZE 4 +#define IWL_TX_DELIMITER_SIZE 4 + +#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000 + +/* Sensitivity and chain noise calibration */ +#define INITIALIZATION_VALUE 0xFFFF +#define IWL4965_CAL_NUM_BEACONS 20 +#define IWL_CAL_NUM_BEACONS 16 +#define MAXIMUM_ALLOWED_PATHLOSS 15 + +#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3 + +#define MAX_FA_OFDM 50 +#define MIN_FA_OFDM 5 +#define MAX_FA_CCK 50 +#define MIN_FA_CCK 5 + +#define AUTO_CORR_STEP_OFDM 1 + +#define AUTO_CORR_STEP_CCK 3 +#define AUTO_CORR_MAX_TH_CCK 160 + +#define NRG_DIFF 2 +#define NRG_STEP_CCK 2 +#define NRG_MARGIN 8 +#define MAX_NUMBER_CCK_NO_FA 100 + +#define AUTO_CORR_CCK_MIN_VAL_DEF (125) + +#define CHAIN_A 0 +#define CHAIN_B 1 +#define CHAIN_C 2 +#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4 +#define ALL_BAND_FILTER 0xFF00 +#define IN_BAND_FILTER 0xFF +#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF + +#define NRG_NUM_PREV_STAT_L 20 +#define NUM_RX_CHAINS 3 + +enum iwl4965_false_alarm_state { + IWL_FA_TOO_MANY = 0, + IWL_FA_TOO_FEW = 1, + IWL_FA_GOOD_RANGE = 2, +}; + +enum iwl4965_chain_noise_state { + IWL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */ + IWL_CHAIN_NOISE_ACCUMULATE, + IWL_CHAIN_NOISE_CALIBRATED, + IWL_CHAIN_NOISE_DONE, +}; + +enum iwl4965_calib_enabled_state { + IWL_CALIB_DISABLED = 0, /* must be 0 */ + IWL_CALIB_ENABLED = 1, +}; + +/* + * enum iwl_calib + * defines the order in which results of initial calibrations + * should be sent to the runtime uCode + */ +enum iwl_calib { + IWL_CALIB_MAX, +}; + +/* Opaque calibration results */ +struct iwl_calib_result { + void *buf; + size_t buf_len; +}; + +enum ucode_type { + UCODE_NONE = 0, + UCODE_INIT, + UCODE_RT +}; + +/* Sensitivity calib data */ +struct iwl_sensitivity_data { + u32 auto_corr_ofdm; + u32 auto_corr_ofdm_mrc; + u32 auto_corr_ofdm_x1; + u32 auto_corr_ofdm_mrc_x1; + u32 auto_corr_cck; + u32 auto_corr_cck_mrc; + + u32 last_bad_plcp_cnt_ofdm; + u32 last_fa_cnt_ofdm; + u32 last_bad_plcp_cnt_cck; + u32 last_fa_cnt_cck; + + u32 nrg_curr_state; + u32 nrg_prev_state; + u32 nrg_value[10]; + u8 nrg_silence_rssi[NRG_NUM_PREV_STAT_L]; + u32 nrg_silence_ref; + u32 nrg_energy_idx; + u32 nrg_silence_idx; + u32 nrg_th_cck; + s32 nrg_auto_corr_silence_diff; + u32 num_in_cck_no_fa; + u32 nrg_th_ofdm; + + u16 barker_corr_th_min; + u16 barker_corr_th_min_mrc; + u16 nrg_th_cca; +}; + +/* Chain noise (differential Rx gain) calib data */ +struct iwl_chain_noise_data { + u32 active_chains; + u32 chain_noise_a; + u32 chain_noise_b; + u32 chain_noise_c; + u32 chain_signal_a; + u32 chain_signal_b; + u32 chain_signal_c; + u16 beacon_count; + u8 disconn_array[NUM_RX_CHAINS]; + u8 delta_gain_code[NUM_RX_CHAINS]; + u8 radio_write; + u8 state; +}; + +#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */ +#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */ + +#define IWL_TRAFFIC_ENTRIES (256) +#define IWL_TRAFFIC_ENTRY_SIZE (64) + +enum { + MEASUREMENT_READY = (1 << 0), + MEASUREMENT_ACTIVE = (1 << 1), +}; + +/* interrupt statistics */ +struct isr_statistics { + u32 hw; + u32 sw; + u32 err_code; + u32 sch; + u32 alive; + u32 rfkill; + u32 ctkill; + u32 wakeup; + u32 rx; + u32 rx_handlers[REPLY_MAX]; + u32 tx; + u32 unhandled; +}; + +/* management statistics */ +enum iwl_mgmt_stats { + MANAGEMENT_ASSOC_REQ = 0, + MANAGEMENT_ASSOC_RESP, + MANAGEMENT_REASSOC_REQ, + MANAGEMENT_REASSOC_RESP, + MANAGEMENT_PROBE_REQ, + MANAGEMENT_PROBE_RESP, + MANAGEMENT_BEACON, + MANAGEMENT_ATIM, + MANAGEMENT_DISASSOC, + MANAGEMENT_AUTH, + MANAGEMENT_DEAUTH, + MANAGEMENT_ACTION, + MANAGEMENT_MAX, +}; +/* control statistics */ +enum iwl_ctrl_stats { + CONTROL_BACK_REQ = 0, + CONTROL_BACK, + CONTROL_PSPOLL, + CONTROL_RTS, + CONTROL_CTS, + CONTROL_ACK, + CONTROL_CFEND, + CONTROL_CFENDACK, + CONTROL_MAX, +}; + +struct traffic_stats { +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS + u32 mgmt[MANAGEMENT_MAX]; + u32 ctrl[CONTROL_MAX]; + u32 data_cnt; + u64 data_bytes; +#endif +}; + +/* + * host interrupt timeout value + * used with setting interrupt coalescing timer + * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit + * + * default interrupt coalescing timer is 64 x 32 = 2048 usecs + * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs + */ +#define IWL_HOST_INT_TIMEOUT_MAX (0xFF) +#define IWL_HOST_INT_TIMEOUT_DEF (0x40) +#define IWL_HOST_INT_TIMEOUT_MIN (0x0) +#define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF) +#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10) +#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0) + +#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5) + +/* TX queue watchdog timeouts in mSecs */ +#define IWL_DEF_WD_TIMEOUT (2000) +#define IWL_LONG_WD_TIMEOUT (10000) +#define IWL_MAX_WD_TIMEOUT (120000) + +struct iwl_force_reset { + int reset_request_count; + int reset_success_count; + int reset_reject_count; + unsigned long reset_duration; + unsigned long last_force_reset_jiffies; +}; + +/* extend beacon time format bit shifting */ +/* + * for _3945 devices + * bits 31:24 - extended + * bits 23:0 - interval + */ +#define IWL3945_EXT_BEACON_TIME_POS 24 +/* + * for _4965 devices + * bits 31:22 - extended + * bits 21:0 - interval + */ +#define IWL4965_EXT_BEACON_TIME_POS 22 + +enum iwl_rxon_context_id { + IWL_RXON_CTX_BSS, + + NUM_IWL_RXON_CTX +}; + +struct iwl_rxon_context { + struct ieee80211_vif *vif; + + const u8 *ac_to_fifo; + const u8 *ac_to_queue; + u8 mcast_queue; + + /* + * We could use the vif to indicate active, but we + * also need it to be active during disabling when + * we already removed the vif for type setting. + */ + bool always_active, is_active; + + bool ht_need_multiple_chains; + + enum iwl_rxon_context_id ctxid; + + u32 interface_modes, exclusive_interface_modes; + u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype; + + /* + * We declare this const so it can only be + * changed via explicit cast within the + * routines that actually update the physical + * hardware. + */ + const struct iwl_legacy_rxon_cmd active; + struct iwl_legacy_rxon_cmd staging; + + struct iwl_rxon_time_cmd timing; + + struct iwl_qos_info qos_data; + + u8 bcast_sta_id, ap_sta_id; + + u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd; + u8 qos_cmd; + u8 wep_key_cmd; + + struct iwl_wep_key wep_keys[WEP_KEYS_MAX]; + u8 key_mapping_keys; + + __le32 station_flags; + + struct { + bool non_gf_sta_present; + u8 protection; + bool enabled, is_40mhz; + u8 extension_chan_offset; + } ht; +}; + +struct iwl_priv { + + /* ieee device used by generic ieee processing code */ + struct ieee80211_hw *hw; + struct ieee80211_channel *ieee_channels; + struct ieee80211_rate *ieee_rates; + struct iwl_cfg *cfg; + + /* temporary frame storage list */ + struct list_head free_frames; + int frames_count; + + enum ieee80211_band band; + int alloc_rxb_page; + + void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); + + struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; + + /* spectrum measurement report caching */ + struct iwl_spectrum_notification measure_report; + u8 measurement_status; + + /* ucode beacon time */ + u32 ucode_beacon_time; + int missed_beacon_threshold; + + /* track IBSS manager (last beacon) status */ + u32 ibss_manager; + + /* force reset */ + struct iwl_force_reset force_reset; + + /* we allocate array of iwl_channel_info for NIC's valid channels. + * Access via channel # using indirect index array */ + struct iwl_channel_info *channel_info; /* channel info array */ + u8 channel_count; /* # of channels */ + + /* thermal calibration */ + s32 temperature; /* degrees Kelvin */ + s32 last_temperature; + + /* init calibration results */ + struct iwl_calib_result calib_results[IWL_CALIB_MAX]; + + /* Scan related variables */ + unsigned long scan_start; + unsigned long scan_start_tsf; + void *scan_cmd; + enum ieee80211_band scan_band; + struct cfg80211_scan_request *scan_request; + struct ieee80211_vif *scan_vif; + u8 scan_tx_ant[IEEE80211_NUM_BANDS]; + u8 mgmt_tx_ant; + + /* spinlock */ + spinlock_t lock; /* protect general shared data */ + spinlock_t hcmd_lock; /* protect hcmd */ + spinlock_t reg_lock; /* protect hw register access */ + struct mutex mutex; + + /* basic pci-network driver stuff */ + struct pci_dev *pci_dev; + + /* pci hardware address support */ + void __iomem *hw_base; + u32 hw_rev; + u32 hw_wa_rev; + u8 rev_id; + + /* microcode/device supports multiple contexts */ + u8 valid_contexts; + + /* command queue number */ + u8 cmd_queue; + + /* max number of station keys */ + u8 sta_key_max_num; + + /* EEPROM MAC addresses */ + struct mac_address addresses[1]; + + /* uCode images, save to reload in case of failure */ + int fw_index; /* firmware we're trying to load */ + u32 ucode_ver; /* version of ucode, copy of + iwl_ucode.ver */ + struct fw_desc ucode_code; /* runtime inst */ + struct fw_desc ucode_data; /* runtime data original */ + struct fw_desc ucode_data_backup; /* runtime data save/restore */ + struct fw_desc ucode_init; /* initialization inst */ + struct fw_desc ucode_init_data; /* initialization data */ + struct fw_desc ucode_boot; /* bootstrap inst */ + enum ucode_type ucode_type; + u8 ucode_write_complete; /* the image write is complete */ + char firmware_name[25]; + + struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX]; + + __le16 switch_channel; + + /* 1st responses from initialize and runtime uCode images. + * _4965's initialize alive response contains some calibration data. */ + struct iwl_init_alive_resp card_alive_init; + struct iwl_alive_resp card_alive; + + u16 active_rate; + + u8 start_calib; + struct iwl_sensitivity_data sensitivity_data; + struct iwl_chain_noise_data chain_noise_data; + __le16 sensitivity_tbl[HD_TABLE_SIZE]; + + struct iwl_ht_config current_ht_config; + + /* Rate scaling data */ + u8 retry_rate; + + wait_queue_head_t wait_command_queue; + + int activity_timer_active; + + /* Rx and Tx DMA processing queues */ + struct iwl_rx_queue rxq; + struct iwl_tx_queue *txq; + unsigned long txq_ctx_active_msk; + struct iwl_dma_ptr kw; /* keep warm address */ + struct iwl_dma_ptr scd_bc_tbls; + + u32 scd_base_addr; /* scheduler sram base address */ + + unsigned long status; + + /* counts mgmt, ctl, and data packets */ + struct traffic_stats tx_stats; + struct traffic_stats rx_stats; + + /* counts interrupts */ + struct isr_statistics isr_stats; + + struct iwl_power_mgr power_data; + + /* context information */ + u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */ + + /* station table variables */ + + /* Note: if lock and sta_lock are needed, lock must be acquired first */ + spinlock_t sta_lock; + int num_stations; + struct iwl_station_entry stations[IWL_STATION_COUNT]; + unsigned long ucode_key_table; + + /* queue refcounts */ +#define IWL_MAX_HW_QUEUES 32 + unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; + /* for each AC */ + atomic_t queue_stop_count[4]; + + /* Indication if ieee80211_ops->open has been called */ + u8 is_open; + + u8 mac80211_registered; + + /* eeprom -- this is in the card's little endian byte order */ + u8 *eeprom; + struct iwl_eeprom_calib_info *calib_info; + + enum nl80211_iftype iw_mode; + + /* Last Rx'd beacon timestamp */ + u64 timestamp; + + union { +#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE) + struct { + void *shared_virt; + dma_addr_t shared_phys; + + struct delayed_work thermal_periodic; + struct delayed_work rfkill_poll; + + struct iwl3945_notif_statistics statistics; +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS + struct iwl3945_notif_statistics accum_statistics; + struct iwl3945_notif_statistics delta_statistics; + struct iwl3945_notif_statistics max_delta; +#endif + + u32 sta_supp_rates; + int last_rx_rssi; /* From Rx packet statistics */ + + /* Rx'd packet timing information */ + u32 last_beacon_time; + u64 last_tsf; + + /* + * each calibration channel group in the + * EEPROM has a derived clip setting for + * each rate. + */ + const struct iwl3945_clip_group clip_groups[5]; + + } _3945; +#endif +#if defined(CONFIG_IWL4965) || defined(CONFIG_IWL4965_MODULE) + struct { + struct iwl_rx_phy_res last_phy_res; + bool last_phy_res_valid; + + struct completion firmware_loading_complete; + + /* + * chain noise reset and gain commands are the + * two extra calibration commands follows the standard + * phy calibration commands + */ + u8 phy_calib_chain_noise_reset_cmd; + u8 phy_calib_chain_noise_gain_cmd; + + struct iwl_notif_statistics statistics; +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS + struct iwl_notif_statistics accum_statistics; + struct iwl_notif_statistics delta_statistics; + struct iwl_notif_statistics max_delta; +#endif + + } _4965; +#endif + }; + + struct iwl_hw_params hw_params; + + u32 inta_mask; + + struct workqueue_struct *workqueue; + + struct work_struct restart; + struct work_struct scan_completed; + struct work_struct rx_replenish; + struct work_struct abort_scan; + + struct iwl_rxon_context *beacon_ctx; + struct sk_buff *beacon_skb; + + struct work_struct tx_flush; + + struct tasklet_struct irq_tasklet; + + struct delayed_work init_alive_start; + struct delayed_work alive_start; + struct delayed_work scan_check; + + /* TX Power */ + s8 tx_power_user_lmt; + s8 tx_power_device_lmt; + s8 tx_power_next; + + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + /* debugging info */ + u32 debug_level; /* per device debugging will override global + iwlegacy_debug_level if set */ +#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */ +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS + /* debugfs */ + u16 tx_traffic_idx; + u16 rx_traffic_idx; + u8 *tx_traffic; + u8 *rx_traffic; + struct dentry *debugfs_dir; + u32 dbgfs_sram_offset, dbgfs_sram_len; + bool disable_ht40; +#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */ + + struct work_struct txpower_work; + u32 disable_sens_cal; + u32 disable_chain_noise_cal; + u32 disable_tx_power_cal; + struct work_struct run_time_calib_work; + struct timer_list statistics_periodic; + struct timer_list watchdog; + bool hw_ready; + + struct led_classdev led; + unsigned long blink_on, blink_off; + bool led_registered; +}; /*iwl_priv */ + +static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id) +{ + set_bit(txq_id, &priv->txq_ctx_active_msk); +} + +static inline void iwl_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id) +{ + clear_bit(txq_id, &priv->txq_ctx_active_msk); +} + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +/* + * iwl_legacy_get_debug_level: Return active debug level for device + * + * Using sysfs it is possible to set per device debug level. This debug + * level will be used if set, otherwise the global debug level which can be + * set via module parameter is used. + */ +static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv) +{ + if (priv->debug_level) + return priv->debug_level; + else + return iwlegacy_debug_level; +} +#else +static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv) +{ + return iwlegacy_debug_level; +} +#endif + + +static inline struct ieee80211_hdr * +iwl_legacy_tx_queue_get_hdr(struct iwl_priv *priv, + int txq_id, int idx) +{ + if (priv->txq[txq_id].txb[idx].skb) + return (struct ieee80211_hdr *)priv->txq[txq_id]. + txb[idx].skb->data; + return NULL; +} + +static inline struct iwl_rxon_context * +iwl_legacy_rxon_ctx_from_vif(struct ieee80211_vif *vif) +{ + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + + return vif_priv->ctx; +} + +#define for_each_context(priv, ctx) \ + for (ctx = &priv->contexts[IWL_RXON_CTX_BSS]; \ + ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++) \ + if (priv->valid_contexts & BIT(ctx->ctxid)) + +static inline int iwl_legacy_is_associated(struct iwl_priv *priv, + enum iwl_rxon_context_id ctxid) +{ + return (priv->contexts[ctxid].active.filter_flags & + RXON_FILTER_ASSOC_MSK) ? 1 : 0; +} + +static inline int iwl_legacy_is_any_associated(struct iwl_priv *priv) +{ + return iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS); +} + +static inline int iwl_legacy_is_associated_ctx(struct iwl_rxon_context *ctx) +{ + return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0; +} + +static inline int iwl_legacy_is_channel_valid(const struct iwl_channel_info *ch_info) +{ + if (ch_info == NULL) + return 0; + return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0; +} + +static inline int iwl_legacy_is_channel_radar(const struct iwl_channel_info *ch_info) +{ + return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0; +} + +static inline u8 iwl_legacy_is_channel_a_band(const struct iwl_channel_info *ch_info) +{ + return ch_info->band == IEEE80211_BAND_5GHZ; +} + +static inline int +iwl_legacy_is_channel_passive(const struct iwl_channel_info *ch) +{ + return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0; +} + +static inline int +iwl_legacy_is_channel_ibss(const struct iwl_channel_info *ch) +{ + return (ch->flags & EEPROM_CHANNEL_IBSS) ? 1 : 0; +} + +static inline void +__iwl_legacy_free_pages(struct iwl_priv *priv, struct page *page) +{ + __free_pages(page, priv->hw_params.rx_page_order); + priv->alloc_rxb_page--; +} + +static inline void iwl_legacy_free_pages(struct iwl_priv *priv, unsigned long page) +{ + free_pages(page, priv->hw_params.rx_page_order); + priv->alloc_rxb_page--; +} +#endif /* __iwl_legacy_dev_h__ */ diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-devtrace.c b/trunk/drivers/net/wireless/iwlegacy/iwl-devtrace.c new file mode 100644 index 000000000000..acec99197ce0 --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-devtrace.c @@ -0,0 +1,42 @@ +/****************************************************************************** + * + * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include + +/* sparse doesn't like tracepoint macros */ +#ifndef __CHECKER__ +#include "iwl-dev.h" + +#define CREATE_TRACE_POINTS +#include "iwl-devtrace.h" + +EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite8); +EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ioread32); +EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite32); +EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_rx); +EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_tx); +EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_error); +#endif diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-devtrace.h b/trunk/drivers/net/wireless/iwlegacy/iwl-devtrace.h new file mode 100644 index 000000000000..a443725ba6be --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-devtrace.h @@ -0,0 +1,210 @@ +/****************************************************************************** + * + * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#if !defined(__IWLWIFI_LEGACY_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ) +#define __IWLWIFI_LEGACY_DEVICE_TRACE + +#include + +#if !defined(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) || defined(__CHECKER__) +#undef TRACE_EVENT +#define TRACE_EVENT(name, proto, ...) \ +static inline void trace_ ## name(proto) {} +#endif + + +#define PRIV_ENTRY __field(struct iwl_priv *, priv) +#define PRIV_ASSIGN (__entry->priv = priv) + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM iwlwifi_legacy_io + +TRACE_EVENT(iwlwifi_legacy_dev_ioread32, + TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val), + TP_ARGS(priv, offs, val), + TP_STRUCT__entry( + PRIV_ENTRY + __field(u32, offs) + __field(u32, val) + ), + TP_fast_assign( + PRIV_ASSIGN; + __entry->offs = offs; + __entry->val = val; + ), + TP_printk("[%p] read io[%#x] = %#x", __entry->priv, + __entry->offs, __entry->val) +); + +TRACE_EVENT(iwlwifi_legacy_dev_iowrite8, + TP_PROTO(struct iwl_priv *priv, u32 offs, u8 val), + TP_ARGS(priv, offs, val), + TP_STRUCT__entry( + PRIV_ENTRY + __field(u32, offs) + __field(u8, val) + ), + TP_fast_assign( + PRIV_ASSIGN; + __entry->offs = offs; + __entry->val = val; + ), + TP_printk("[%p] write io[%#x] = %#x)", __entry->priv, + __entry->offs, __entry->val) +); + +TRACE_EVENT(iwlwifi_legacy_dev_iowrite32, + TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val), + TP_ARGS(priv, offs, val), + TP_STRUCT__entry( + PRIV_ENTRY + __field(u32, offs) + __field(u32, val) + ), + TP_fast_assign( + PRIV_ASSIGN; + __entry->offs = offs; + __entry->val = val; + ), + TP_printk("[%p] write io[%#x] = %#x)", __entry->priv, + __entry->offs, __entry->val) +); + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM iwlwifi_legacy_ucode + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM iwlwifi + +TRACE_EVENT(iwlwifi_legacy_dev_hcmd, + TP_PROTO(struct iwl_priv *priv, void *hcmd, size_t len, u32 flags), + TP_ARGS(priv, hcmd, len, flags), + TP_STRUCT__entry( + PRIV_ENTRY + __dynamic_array(u8, hcmd, len) + __field(u32, flags) + ), + TP_fast_assign( + PRIV_ASSIGN; + memcpy(__get_dynamic_array(hcmd), hcmd, len); + __entry->flags = flags; + ), + TP_printk("[%p] hcmd %#.2x (%ssync)", + __entry->priv, ((u8 *)__get_dynamic_array(hcmd))[0], + __entry->flags & CMD_ASYNC ? "a" : "") +); + +TRACE_EVENT(iwlwifi_legacy_dev_rx, + TP_PROTO(struct iwl_priv *priv, void *rxbuf, size_t len), + TP_ARGS(priv, rxbuf, len), + TP_STRUCT__entry( + PRIV_ENTRY + __dynamic_array(u8, rxbuf, len) + ), + TP_fast_assign( + PRIV_ASSIGN; + memcpy(__get_dynamic_array(rxbuf), rxbuf, len); + ), + TP_printk("[%p] RX cmd %#.2x", + __entry->priv, ((u8 *)__get_dynamic_array(rxbuf))[4]) +); + +TRACE_EVENT(iwlwifi_legacy_dev_tx, + TP_PROTO(struct iwl_priv *priv, void *tfd, size_t tfdlen, + void *buf0, size_t buf0_len, + void *buf1, size_t buf1_len), + TP_ARGS(priv, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len), + TP_STRUCT__entry( + PRIV_ENTRY + + __field(size_t, framelen) + __dynamic_array(u8, tfd, tfdlen) + + /* + * Do not insert between or below these items, + * we want to keep the frame together (except + * for the possible padding). + */ + __dynamic_array(u8, buf0, buf0_len) + __dynamic_array(u8, buf1, buf1_len) + ), + TP_fast_assign( + PRIV_ASSIGN; + __entry->framelen = buf0_len + buf1_len; + memcpy(__get_dynamic_array(tfd), tfd, tfdlen); + memcpy(__get_dynamic_array(buf0), buf0, buf0_len); + memcpy(__get_dynamic_array(buf1), buf1, buf1_len); + ), + TP_printk("[%p] TX %.2x (%zu bytes)", + __entry->priv, + ((u8 *)__get_dynamic_array(buf0))[0], + __entry->framelen) +); + +TRACE_EVENT(iwlwifi_legacy_dev_ucode_error, + TP_PROTO(struct iwl_priv *priv, u32 desc, u32 time, + u32 data1, u32 data2, u32 line, u32 blink1, + u32 blink2, u32 ilink1, u32 ilink2), + TP_ARGS(priv, desc, time, data1, data2, line, + blink1, blink2, ilink1, ilink2), + TP_STRUCT__entry( + PRIV_ENTRY + __field(u32, desc) + __field(u32, time) + __field(u32, data1) + __field(u32, data2) + __field(u32, line) + __field(u32, blink1) + __field(u32, blink2) + __field(u32, ilink1) + __field(u32, ilink2) + ), + TP_fast_assign( + PRIV_ASSIGN; + __entry->desc = desc; + __entry->time = time; + __entry->data1 = data1; + __entry->data2 = data2; + __entry->line = line; + __entry->blink1 = blink1; + __entry->blink2 = blink2; + __entry->ilink1 = ilink1; + __entry->ilink2 = ilink2; + ), + TP_printk("[%p] #%02d %010u data 0x%08X 0x%08X line %u, " + "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X", + __entry->priv, __entry->desc, __entry->time, __entry->data1, + __entry->data2, __entry->line, __entry->blink1, + __entry->blink2, __entry->ilink1, __entry->ilink2) +); + +#endif /* __IWLWIFI_DEVICE_TRACE */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE iwl-devtrace +#include diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-eeprom.c b/trunk/drivers/net/wireless/iwlegacy/iwl-eeprom.c new file mode 100644 index 000000000000..5bf3f49b74ab --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-eeprom.c @@ -0,0 +1,553 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + + +#include +#include +#include +#include + +#include + +#include "iwl-commands.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-debug.h" +#include "iwl-eeprom.h" +#include "iwl-io.h" + +/************************** EEPROM BANDS **************************** + * + * The iwlegacy_eeprom_band definitions below provide the mapping from the + * EEPROM contents to the specific channel number supported for each + * band. + * + * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3 + * definition below maps to physical channel 42 in the 5.2GHz spectrum. + * The specific geography and calibration information for that channel + * is contained in the eeprom map itself. + * + * During init, we copy the eeprom information and channel map + * information into priv->channel_info_24/52 and priv->channel_map_24/52 + * + * channel_map_24/52 provides the index in the channel_info array for a + * given channel. We have to have two separate maps as there is channel + * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and + * band_2 + * + * A value of 0xff stored in the channel_map indicates that the channel + * is not supported by the hardware at all. + * + * A value of 0xfe in the channel_map indicates that the channel is not + * valid for Tx with the current hardware. This means that + * while the system can tune and receive on a given channel, it may not + * be able to associate or transmit any frames on that + * channel. There is no corresponding channel information for that + * entry. + * + *********************************************************************/ + +/* 2.4 GHz */ +const u8 iwlegacy_eeprom_band_1[14] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 +}; + +/* 5.2 GHz bands */ +static const u8 iwlegacy_eeprom_band_2[] = { /* 4915-5080MHz */ + 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16 +}; + +static const u8 iwlegacy_eeprom_band_3[] = { /* 5170-5320MHz */ + 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 +}; + +static const u8 iwlegacy_eeprom_band_4[] = { /* 5500-5700MHz */ + 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 +}; + +static const u8 iwlegacy_eeprom_band_5[] = { /* 5725-5825MHz */ + 145, 149, 153, 157, 161, 165 +}; + +static const u8 iwlegacy_eeprom_band_6[] = { /* 2.4 ht40 channel */ + 1, 2, 3, 4, 5, 6, 7 +}; + +static const u8 iwlegacy_eeprom_band_7[] = { /* 5.2 ht40 channel */ + 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157 +}; + +/****************************************************************************** + * + * EEPROM related functions + * +******************************************************************************/ + +static int iwl_legacy_eeprom_verify_signature(struct iwl_priv *priv) +{ + u32 gp = iwl_read32(priv, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK; + int ret = 0; + + IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp); + switch (gp) { + case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K: + case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K: + break; + default: + IWL_ERR(priv, "bad EEPROM signature," + "EEPROM_GP=0x%08x\n", gp); + ret = -ENOENT; + break; + } + return ret; +} + +const u8 +*iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv, size_t offset) +{ + BUG_ON(offset >= priv->cfg->base_params->eeprom_size); + return &priv->eeprom[offset]; +} +EXPORT_SYMBOL(iwl_legacy_eeprom_query_addr); + +u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset) +{ + if (!priv->eeprom) + return 0; + return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8); +} +EXPORT_SYMBOL(iwl_legacy_eeprom_query16); + +/** + * iwl_legacy_eeprom_init - read EEPROM contents + * + * Load the EEPROM contents from adapter into priv->eeprom + * + * NOTE: This routine uses the non-debug IO access functions. + */ +int iwl_legacy_eeprom_init(struct iwl_priv *priv) +{ + __le16 *e; + u32 gp = iwl_read32(priv, CSR_EEPROM_GP); + int sz; + int ret; + u16 addr; + + /* allocate eeprom */ + sz = priv->cfg->base_params->eeprom_size; + IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz); + priv->eeprom = kzalloc(sz, GFP_KERNEL); + if (!priv->eeprom) { + ret = -ENOMEM; + goto alloc_err; + } + e = (__le16 *)priv->eeprom; + + priv->cfg->ops->lib->apm_ops.init(priv); + + ret = iwl_legacy_eeprom_verify_signature(priv); + if (ret < 0) { + IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp); + ret = -ENOENT; + goto err; + } + + /* Make sure driver (instead of uCode) is allowed to read EEPROM */ + ret = priv->cfg->ops->lib->eeprom_ops.acquire_semaphore(priv); + if (ret < 0) { + IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n"); + ret = -ENOENT; + goto err; + } + + /* eeprom is an array of 16bit values */ + for (addr = 0; addr < sz; addr += sizeof(u16)) { + u32 r; + + _iwl_legacy_write32(priv, CSR_EEPROM_REG, + CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); + + ret = iwl_poll_bit(priv, CSR_EEPROM_REG, + CSR_EEPROM_REG_READ_VALID_MSK, + CSR_EEPROM_REG_READ_VALID_MSK, + IWL_EEPROM_ACCESS_TIMEOUT); + if (ret < 0) { + IWL_ERR(priv, "Time out reading EEPROM[%d]\n", + addr); + goto done; + } + r = _iwl_legacy_read_direct32(priv, CSR_EEPROM_REG); + e[addr / 2] = cpu_to_le16(r >> 16); + } + + IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n", + "EEPROM", + iwl_legacy_eeprom_query16(priv, EEPROM_VERSION)); + + ret = 0; +done: + priv->cfg->ops->lib->eeprom_ops.release_semaphore(priv); + +err: + if (ret) + iwl_legacy_eeprom_free(priv); + /* Reset chip to save power until we load uCode during "up". */ + iwl_legacy_apm_stop(priv); +alloc_err: + return ret; +} +EXPORT_SYMBOL(iwl_legacy_eeprom_init); + +void iwl_legacy_eeprom_free(struct iwl_priv *priv) +{ + kfree(priv->eeprom); + priv->eeprom = NULL; +} +EXPORT_SYMBOL(iwl_legacy_eeprom_free); + +static void iwl_legacy_init_band_reference(const struct iwl_priv *priv, + int eep_band, int *eeprom_ch_count, + const struct iwl_eeprom_channel **eeprom_ch_info, + const u8 **eeprom_ch_index) +{ + u32 offset = priv->cfg->ops->lib-> + eeprom_ops.regulatory_bands[eep_band - 1]; + switch (eep_band) { + case 1: /* 2.4GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_1); + *eeprom_ch_info = (struct iwl_eeprom_channel *) + iwl_legacy_eeprom_query_addr(priv, offset); + *eeprom_ch_index = iwlegacy_eeprom_band_1; + break; + case 2: /* 4.9GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_2); + *eeprom_ch_info = (struct iwl_eeprom_channel *) + iwl_legacy_eeprom_query_addr(priv, offset); + *eeprom_ch_index = iwlegacy_eeprom_band_2; + break; + case 3: /* 5.2GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_3); + *eeprom_ch_info = (struct iwl_eeprom_channel *) + iwl_legacy_eeprom_query_addr(priv, offset); + *eeprom_ch_index = iwlegacy_eeprom_band_3; + break; + case 4: /* 5.5GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_4); + *eeprom_ch_info = (struct iwl_eeprom_channel *) + iwl_legacy_eeprom_query_addr(priv, offset); + *eeprom_ch_index = iwlegacy_eeprom_band_4; + break; + case 5: /* 5.7GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_5); + *eeprom_ch_info = (struct iwl_eeprom_channel *) + iwl_legacy_eeprom_query_addr(priv, offset); + *eeprom_ch_index = iwlegacy_eeprom_band_5; + break; + case 6: /* 2.4GHz ht40 channels */ + *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_6); + *eeprom_ch_info = (struct iwl_eeprom_channel *) + iwl_legacy_eeprom_query_addr(priv, offset); + *eeprom_ch_index = iwlegacy_eeprom_band_6; + break; + case 7: /* 5 GHz ht40 channels */ + *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_7); + *eeprom_ch_info = (struct iwl_eeprom_channel *) + iwl_legacy_eeprom_query_addr(priv, offset); + *eeprom_ch_index = iwlegacy_eeprom_band_7; + break; + default: + BUG(); + } +} + +#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \ + ? # x " " : "") +/** + * iwl_legacy_mod_ht40_chan_info - Copy ht40 channel info into driver's priv. + * + * Does not set up a command, or touch hardware. + */ +static int iwl_legacy_mod_ht40_chan_info(struct iwl_priv *priv, + enum ieee80211_band band, u16 channel, + const struct iwl_eeprom_channel *eeprom_ch, + u8 clear_ht40_extension_channel) +{ + struct iwl_channel_info *ch_info; + + ch_info = (struct iwl_channel_info *) + iwl_legacy_get_channel_info(priv, band, channel); + + if (!iwl_legacy_is_channel_valid(ch_info)) + return -1; + + IWL_DEBUG_EEPROM(priv, "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):" + " Ad-Hoc %ssupported\n", + ch_info->channel, + iwl_legacy_is_channel_a_band(ch_info) ? + "5.2" : "2.4", + CHECK_AND_PRINT(IBSS), + CHECK_AND_PRINT(ACTIVE), + CHECK_AND_PRINT(RADAR), + CHECK_AND_PRINT(WIDE), + CHECK_AND_PRINT(DFS), + eeprom_ch->flags, + eeprom_ch->max_power_avg, + ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) + && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? + "" : "not "); + + ch_info->ht40_eeprom = *eeprom_ch; + ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg; + ch_info->ht40_flags = eeprom_ch->flags; + if (eeprom_ch->flags & EEPROM_CHANNEL_VALID) + ch_info->ht40_extension_channel &= + ~clear_ht40_extension_channel; + + return 0; +} + +#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \ + ? # x " " : "") + +/** + * iwl_legacy_init_channel_map - Set up driver's info for all possible channels + */ +int iwl_legacy_init_channel_map(struct iwl_priv *priv) +{ + int eeprom_ch_count = 0; + const u8 *eeprom_ch_index = NULL; + const struct iwl_eeprom_channel *eeprom_ch_info = NULL; + int band, ch; + struct iwl_channel_info *ch_info; + + if (priv->channel_count) { + IWL_DEBUG_EEPROM(priv, "Channel map already initialized.\n"); + return 0; + } + + IWL_DEBUG_EEPROM(priv, "Initializing regulatory info from EEPROM\n"); + + priv->channel_count = + ARRAY_SIZE(iwlegacy_eeprom_band_1) + + ARRAY_SIZE(iwlegacy_eeprom_band_2) + + ARRAY_SIZE(iwlegacy_eeprom_band_3) + + ARRAY_SIZE(iwlegacy_eeprom_band_4) + + ARRAY_SIZE(iwlegacy_eeprom_band_5); + + IWL_DEBUG_EEPROM(priv, "Parsing data for %d channels.\n", + priv->channel_count); + + priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) * + priv->channel_count, GFP_KERNEL); + if (!priv->channel_info) { + IWL_ERR(priv, "Could not allocate channel_info\n"); + priv->channel_count = 0; + return -ENOMEM; + } + + ch_info = priv->channel_info; + + /* Loop through the 5 EEPROM bands adding them in order to the + * channel map we maintain (that contains additional information than + * what just in the EEPROM) */ + for (band = 1; band <= 5; band++) { + + iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count, + &eeprom_ch_info, &eeprom_ch_index); + + /* Loop through each band adding each of the channels */ + for (ch = 0; ch < eeprom_ch_count; ch++) { + ch_info->channel = eeprom_ch_index[ch]; + ch_info->band = (band == 1) ? IEEE80211_BAND_2GHZ : + IEEE80211_BAND_5GHZ; + + /* permanently store EEPROM's channel regulatory flags + * and max power in channel info database. */ + ch_info->eeprom = eeprom_ch_info[ch]; + + /* Copy the run-time flags so they are there even on + * invalid channels */ + ch_info->flags = eeprom_ch_info[ch].flags; + /* First write that ht40 is not enabled, and then enable + * one by one */ + ch_info->ht40_extension_channel = + IEEE80211_CHAN_NO_HT40; + + if (!(iwl_legacy_is_channel_valid(ch_info))) { + IWL_DEBUG_EEPROM(priv, + "Ch. %d Flags %x [%sGHz] - " + "No traffic\n", + ch_info->channel, + ch_info->flags, + iwl_legacy_is_channel_a_band(ch_info) ? + "5.2" : "2.4"); + ch_info++; + continue; + } + + /* Initialize regulatory-based run-time data */ + ch_info->max_power_avg = ch_info->curr_txpow = + eeprom_ch_info[ch].max_power_avg; + ch_info->scan_power = eeprom_ch_info[ch].max_power_avg; + ch_info->min_power = 0; + + IWL_DEBUG_EEPROM(priv, "Ch. %d [%sGHz] " + "%s%s%s%s%s%s(0x%02x %ddBm):" + " Ad-Hoc %ssupported\n", + ch_info->channel, + iwl_legacy_is_channel_a_band(ch_info) ? + "5.2" : "2.4", + CHECK_AND_PRINT_I(VALID), + CHECK_AND_PRINT_I(IBSS), + CHECK_AND_PRINT_I(ACTIVE), + CHECK_AND_PRINT_I(RADAR), + CHECK_AND_PRINT_I(WIDE), + CHECK_AND_PRINT_I(DFS), + eeprom_ch_info[ch].flags, + eeprom_ch_info[ch].max_power_avg, + ((eeprom_ch_info[ch]. + flags & EEPROM_CHANNEL_IBSS) + && !(eeprom_ch_info[ch]. + flags & EEPROM_CHANNEL_RADAR)) + ? "" : "not "); + + ch_info++; + } + } + + /* Check if we do have HT40 channels */ + if (priv->cfg->ops->lib->eeprom_ops.regulatory_bands[5] == + EEPROM_REGULATORY_BAND_NO_HT40 && + priv->cfg->ops->lib->eeprom_ops.regulatory_bands[6] == + EEPROM_REGULATORY_BAND_NO_HT40) + return 0; + + /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */ + for (band = 6; band <= 7; band++) { + enum ieee80211_band ieeeband; + + iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count, + &eeprom_ch_info, &eeprom_ch_index); + + /* EEPROM band 6 is 2.4, band 7 is 5 GHz */ + ieeeband = + (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; + + /* Loop through each band adding each of the channels */ + for (ch = 0; ch < eeprom_ch_count; ch++) { + /* Set up driver's info for lower half */ + iwl_legacy_mod_ht40_chan_info(priv, ieeeband, + eeprom_ch_index[ch], + &eeprom_ch_info[ch], + IEEE80211_CHAN_NO_HT40PLUS); + + /* Set up driver's info for upper half */ + iwl_legacy_mod_ht40_chan_info(priv, ieeeband, + eeprom_ch_index[ch] + 4, + &eeprom_ch_info[ch], + IEEE80211_CHAN_NO_HT40MINUS); + } + } + + return 0; +} +EXPORT_SYMBOL(iwl_legacy_init_channel_map); + +/* + * iwl_legacy_free_channel_map - undo allocations in iwl_legacy_init_channel_map + */ +void iwl_legacy_free_channel_map(struct iwl_priv *priv) +{ + kfree(priv->channel_info); + priv->channel_count = 0; +} +EXPORT_SYMBOL(iwl_legacy_free_channel_map); + +/** + * iwl_legacy_get_channel_info - Find driver's private channel info + * + * Based on band and channel number. + */ +const struct +iwl_channel_info *iwl_legacy_get_channel_info(const struct iwl_priv *priv, + enum ieee80211_band band, u16 channel) +{ + int i; + + switch (band) { + case IEEE80211_BAND_5GHZ: + for (i = 14; i < priv->channel_count; i++) { + if (priv->channel_info[i].channel == channel) + return &priv->channel_info[i]; + } + break; + case IEEE80211_BAND_2GHZ: + if (channel >= 1 && channel <= 14) + return &priv->channel_info[channel - 1]; + break; + default: + BUG(); + } + + return NULL; +} +EXPORT_SYMBOL(iwl_legacy_get_channel_info); diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-eeprom.h b/trunk/drivers/net/wireless/iwlegacy/iwl-eeprom.h new file mode 100644 index 000000000000..c59c81002022 --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-eeprom.h @@ -0,0 +1,344 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __iwl_legacy_eeprom_h__ +#define __iwl_legacy_eeprom_h__ + +#include + +struct iwl_priv; + +/* + * EEPROM access time values: + * + * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG. + * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1). + * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec. + * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG. + */ +#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */ + +#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */ +#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */ + + +/* + * Regulatory channel usage flags in EEPROM struct iwl4965_eeprom_channel.flags. + * + * IBSS and/or AP operation is allowed *only* on those channels with + * (VALID && IBSS && ACTIVE && !RADAR). This restriction is in place because + * RADAR detection is not supported by the 4965 driver, but is a + * requirement for establishing a new network for legal operation on channels + * requiring RADAR detection or restricting ACTIVE scanning. + * + * NOTE: "WIDE" flag does not indicate anything about "HT40" 40 MHz channels. + * It only indicates that 20 MHz channel use is supported; HT40 channel + * usage is indicated by a separate set of regulatory flags for each + * HT40 channel pair. + * + * NOTE: Using a channel inappropriately will result in a uCode error! + */ +#define IWL_NUM_TX_CALIB_GROUPS 5 +enum { + EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */ + EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */ + /* Bit 2 Reserved */ + EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */ + EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */ + EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */ + /* Bit 6 Reserved (was Narrow Channel) */ + EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */ +}; + +/* SKU Capabilities */ +/* 3945 only */ +#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0) +#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1) + +/* *regulatory* channel data format in eeprom, one for each channel. + * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */ +struct iwl_eeprom_channel { + u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */ + s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */ +} __packed; + +/* 3945 Specific */ +#define EEPROM_3945_EEPROM_VERSION (0x2f) + +/* 4965 has two radio transmitters (and 3 radio receivers) */ +#define EEPROM_TX_POWER_TX_CHAINS (2) + +/* 4965 has room for up to 8 sets of txpower calibration data */ +#define EEPROM_TX_POWER_BANDS (8) + +/* 4965 factory calibration measures txpower gain settings for + * each of 3 target output levels */ +#define EEPROM_TX_POWER_MEASUREMENTS (3) + +/* 4965 Specific */ +/* 4965 driver does not work with txpower calibration version < 5 */ +#define EEPROM_4965_TX_POWER_VERSION (5) +#define EEPROM_4965_EEPROM_VERSION (0x2f) +#define EEPROM_4965_CALIB_VERSION_OFFSET (2*0xB6) /* 2 bytes */ +#define EEPROM_4965_CALIB_TXPOWER_OFFSET (2*0xE8) /* 48 bytes */ +#define EEPROM_4965_BOARD_REVISION (2*0x4F) /* 2 bytes */ +#define EEPROM_4965_BOARD_PBA (2*0x56+1) /* 9 bytes */ + +/* 2.4 GHz */ +extern const u8 iwlegacy_eeprom_band_1[14]; + +/* + * factory calibration data for one txpower level, on one channel, + * measured on one of the 2 tx chains (radio transmitter and associated + * antenna). EEPROM contains: + * + * 1) Temperature (degrees Celsius) of device when measurement was made. + * + * 2) Gain table index used to achieve the target measurement power. + * This refers to the "well-known" gain tables (see iwl-4965-hw.h). + * + * 3) Actual measured output power, in half-dBm ("34" = 17 dBm). + * + * 4) RF power amplifier detector level measurement (not used). + */ +struct iwl_eeprom_calib_measure { + u8 temperature; /* Device temperature (Celsius) */ + u8 gain_idx; /* Index into gain table */ + u8 actual_pow; /* Measured RF output power, half-dBm */ + s8 pa_det; /* Power amp detector level (not used) */ +} __packed; + + +/* + * measurement set for one channel. EEPROM contains: + * + * 1) Channel number measured + * + * 2) Measurements for each of 3 power levels for each of 2 radio transmitters + * (a.k.a. "tx chains") (6 measurements altogether) + */ +struct iwl_eeprom_calib_ch_info { + u8 ch_num; + struct iwl_eeprom_calib_measure + measurements[EEPROM_TX_POWER_TX_CHAINS] + [EEPROM_TX_POWER_MEASUREMENTS]; +} __packed; + +/* + * txpower subband info. + * + * For each frequency subband, EEPROM contains the following: + * + * 1) First and last channels within range of the subband. "0" values + * indicate that this sample set is not being used. + * + * 2) Sample measurement sets for 2 channels close to the range endpoints. + */ +struct iwl_eeprom_calib_subband_info { + u8 ch_from; /* channel number of lowest channel in subband */ + u8 ch_to; /* channel number of highest channel in subband */ + struct iwl_eeprom_calib_ch_info ch1; + struct iwl_eeprom_calib_ch_info ch2; +} __packed; + + +/* + * txpower calibration info. EEPROM contains: + * + * 1) Factory-measured saturation power levels (maximum levels at which + * tx power amplifier can output a signal without too much distortion). + * There is one level for 2.4 GHz band and one for 5 GHz band. These + * values apply to all channels within each of the bands. + * + * 2) Factory-measured power supply voltage level. This is assumed to be + * constant (i.e. same value applies to all channels/bands) while the + * factory measurements are being made. + * + * 3) Up to 8 sets of factory-measured txpower calibration values. + * These are for different frequency ranges, since txpower gain + * characteristics of the analog radio circuitry vary with frequency. + * + * Not all sets need to be filled with data; + * struct iwl_eeprom_calib_subband_info contains range of channels + * (0 if unused) for each set of data. + */ +struct iwl_eeprom_calib_info { + u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */ + u8 saturation_power52; /* half-dBm */ + __le16 voltage; /* signed */ + struct iwl_eeprom_calib_subband_info + band_info[EEPROM_TX_POWER_BANDS]; +} __packed; + + +/* General */ +#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */ +#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */ +#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */ +#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */ +#define EEPROM_VERSION (2*0x44) /* 2 bytes */ +#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */ +#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */ +#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */ +#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */ +#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */ + +/* The following masks are to be applied on EEPROM_RADIO_CONFIG */ +#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */ +#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */ +#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */ +#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */ +#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */ +#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */ + +#define EEPROM_3945_RF_CFG_TYPE_MAX 0x0 +#define EEPROM_4965_RF_CFG_TYPE_MAX 0x1 + +/* + * Per-channel regulatory data. + * + * Each channel that *might* be supported by iwl has a fixed location + * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory + * txpower (MSB). + * + * Entries immediately below are for 20 MHz channel width. HT40 (40 MHz) + * channels (only for 4965, not supported by 3945) appear later in the EEPROM. + * + * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 + */ +#define EEPROM_REGULATORY_SKU_ID (2*0x60) /* 4 bytes */ +#define EEPROM_REGULATORY_BAND_1 (2*0x62) /* 2 bytes */ +#define EEPROM_REGULATORY_BAND_1_CHANNELS (2*0x63) /* 28 bytes */ + +/* + * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196, + * 5.0 GHz channels 7, 8, 11, 12, 16 + * (4915-5080MHz) (none of these is ever supported) + */ +#define EEPROM_REGULATORY_BAND_2 (2*0x71) /* 2 bytes */ +#define EEPROM_REGULATORY_BAND_2_CHANNELS (2*0x72) /* 26 bytes */ + +/* + * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 + * (5170-5320MHz) + */ +#define EEPROM_REGULATORY_BAND_3 (2*0x7F) /* 2 bytes */ +#define EEPROM_REGULATORY_BAND_3_CHANNELS (2*0x80) /* 24 bytes */ + +/* + * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 + * (5500-5700MHz) + */ +#define EEPROM_REGULATORY_BAND_4 (2*0x8C) /* 2 bytes */ +#define EEPROM_REGULATORY_BAND_4_CHANNELS (2*0x8D) /* 22 bytes */ + +/* + * 5.7 GHz channels 145, 149, 153, 157, 161, 165 + * (5725-5825MHz) + */ +#define EEPROM_REGULATORY_BAND_5 (2*0x98) /* 2 bytes */ +#define EEPROM_REGULATORY_BAND_5_CHANNELS (2*0x99) /* 12 bytes */ + +/* + * 2.4 GHz HT40 channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11) + * + * The channel listed is the center of the lower 20 MHz half of the channel. + * The overall center frequency is actually 2 channels (10 MHz) above that, + * and the upper half of each HT40 channel is centered 4 channels (20 MHz) away + * from the lower half; e.g. the upper half of HT40 channel 1 is channel 5, + * and the overall HT40 channel width centers on channel 3. + * + * NOTE: The RXON command uses 20 MHz channel numbers to specify the + * control channel to which to tune. RXON also specifies whether the + * control channel is the upper or lower half of a HT40 channel. + * + * NOTE: 4965 does not support HT40 channels on 2.4 GHz. + */ +#define EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS (2*0xA0) /* 14 bytes */ + +/* + * 5.2 GHz HT40 channels 36 (40), 44 (48), 52 (56), 60 (64), + * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161) + */ +#define EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS (2*0xA8) /* 22 bytes */ + +#define EEPROM_REGULATORY_BAND_NO_HT40 (0) + +struct iwl_eeprom_ops { + const u32 regulatory_bands[7]; + int (*acquire_semaphore) (struct iwl_priv *priv); + void (*release_semaphore) (struct iwl_priv *priv); +}; + + +int iwl_legacy_eeprom_init(struct iwl_priv *priv); +void iwl_legacy_eeprom_free(struct iwl_priv *priv); +const u8 *iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv, + size_t offset); +u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset); +int iwl_legacy_init_channel_map(struct iwl_priv *priv); +void iwl_legacy_free_channel_map(struct iwl_priv *priv); +const struct iwl_channel_info *iwl_legacy_get_channel_info( + const struct iwl_priv *priv, + enum ieee80211_band band, u16 channel); + +#endif /* __iwl_legacy_eeprom_h__ */ diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-fh.h b/trunk/drivers/net/wireless/iwlegacy/iwl-fh.h new file mode 100644 index 000000000000..6e6091816e36 --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-fh.h @@ -0,0 +1,513 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __iwl_legacy_fh_h__ +#define __iwl_legacy_fh_h__ + +/****************************/ +/* Flow Handler Definitions */ +/****************************/ + +/** + * This I/O area is directly read/writable by driver (e.g. Linux uses writel()) + * Addresses are offsets from device's PCI hardware base address. + */ +#define FH_MEM_LOWER_BOUND (0x1000) +#define FH_MEM_UPPER_BOUND (0x2000) + +/** + * Keep-Warm (KW) buffer base address. + * + * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the + * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency + * DRAM access when 4965 is Txing or Rxing. The dummy accesses prevent host + * from going into a power-savings mode that would cause higher DRAM latency, + * and possible data over/under-runs, before all Tx/Rx is complete. + * + * Driver loads FH_KW_MEM_ADDR_REG with the physical address (bits 35:4) + * of the buffer, which must be 4K aligned. Once this is set up, the 4965 + * automatically invokes keep-warm accesses when normal accesses might not + * be sufficient to maintain fast DRAM response. + * + * Bit fields: + * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned + */ +#define FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C) + + +/** + * TFD Circular Buffers Base (CBBC) addresses + * + * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident + * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs) + * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04 + * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte + * aligned (address bits 0-7 must be 0). + * + * Bit fields in each pointer register: + * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned + */ +#define FH_MEM_CBBC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0) +#define FH_MEM_CBBC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10) + +/* Find TFD CB base pointer for given queue (range 0-15). */ +#define FH_MEM_CBBC_QUEUE(x) (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4) + + +/** + * Rx SRAM Control and Status Registers (RSCSR) + * + * These registers provide handshake between driver and 4965 for the Rx queue + * (this queue handles *all* command responses, notifications, Rx data, etc. + * sent from 4965 uCode to host driver). Unlike Tx, there is only one Rx + * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can + * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer + * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1 + * mapping between RBDs and RBs. + * + * Driver must allocate host DRAM memory for the following, and set the + * physical address of each into 4965 registers: + * + * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256 + * entries (although any power of 2, up to 4096, is selectable by driver). + * Each entry (1 dword) points to a receive buffer (RB) of consistent size + * (typically 4K, although 8K or 16K are also selectable by driver). + * Driver sets up RB size and number of RBDs in the CB via Rx config + * register FH_MEM_RCSR_CHNL0_CONFIG_REG. + * + * Bit fields within one RBD: + * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned + * + * Driver sets physical address [35:8] of base of RBD circular buffer + * into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0]. + * + * 2) Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers + * (RBs) have been filled, via a "write pointer", actually the index of + * the RB's corresponding RBD within the circular buffer. Driver sets + * physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0]. + * + * Bit fields in lower dword of Rx status buffer (upper dword not used + * by driver; see struct iwl4965_shared, val0): + * 31-12: Not used by driver + * 11- 0: Index of last filled Rx buffer descriptor + * (4965 writes, driver reads this value) + * + * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must + * enter pointers to these RBs into contiguous RBD circular buffer entries, + * and update the 4965's "write" index register, + * FH_RSCSR_CHNL0_RBDCB_WPTR_REG. + * + * This "write" index corresponds to the *next* RBD that the driver will make + * available, i.e. one RBD past the tail of the ready-to-fill RBDs within + * the circular buffer. This value should initially be 0 (before preparing any + * RBs), should be 8 after preparing the first 8 RBs (for example), and must + * wrap back to 0 at the end of the circular buffer (but don't wrap before + * "read" index has advanced past 1! See below). + * NOTE: 4965 EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8. + * + * As the 4965 fills RBs (referenced from contiguous RBDs within the circular + * buffer), it updates the Rx status buffer in host DRAM, 2) described above, + * to tell the driver the index of the latest filled RBD. The driver must + * read this "read" index from DRAM after receiving an Rx interrupt from 4965. + * + * The driver must also internally keep track of a third index, which is the + * next RBD to process. When receiving an Rx interrupt, driver should process + * all filled but unprocessed RBs up to, but not including, the RB + * corresponding to the "read" index. For example, if "read" index becomes "1", + * driver may process the RB pointed to by RBD 0. Depending on volume of + * traffic, there may be many RBs to process. + * + * If read index == write index, 4965 thinks there is no room to put new data. + * Due to this, the maximum number of filled RBs is 255, instead of 256. To + * be safe, make sure that there is a gap of at least 2 RBDs between "write" + * and "read" indexes; that is, make sure that there are no more than 254 + * buffers waiting to be filled. + */ +#define FH_MEM_RSCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBC0) +#define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00) +#define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND) + +/** + * Physical base address of 8-byte Rx Status buffer. + * Bit fields: + * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned. + */ +#define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0) + +/** + * Physical base address of Rx Buffer Descriptor Circular Buffer. + * Bit fields: + * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned. + */ +#define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004) + +/** + * Rx write pointer (index, really!). + * Bit fields: + * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1. + * NOTE: For 256-entry circular buffer, use only bits [7:0]. + */ +#define FH_RSCSR_CHNL0_RBDCB_WPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x008) +#define FH_RSCSR_CHNL0_WPTR (FH_RSCSR_CHNL0_RBDCB_WPTR_REG) + + +/** + * Rx Config/Status Registers (RCSR) + * Rx Config Reg for channel 0 (only channel used) + * + * Driver must initialize FH_MEM_RCSR_CHNL0_CONFIG_REG as follows for + * normal operation (see bit fields). + * + * Clearing FH_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA. + * Driver should poll FH_MEM_RSSR_RX_STATUS_REG for + * FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing. + * + * Bit fields: + * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame, + * '10' operate normally + * 29-24: reserved + * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal), + * min "5" for 32 RBDs, max "12" for 4096 RBDs. + * 19-18: reserved + * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K, + * '10' 12K, '11' 16K. + * 15-14: reserved + * 13-12: IRQ destination; '00' none, '01' host driver (normal operation) + * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec) + * typical value 0x10 (about 1/2 msec) + * 3- 0: reserved + */ +#define FH_MEM_RCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC00) +#define FH_MEM_RCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xCC0) +#define FH_MEM_RCSR_CHNL0 (FH_MEM_RCSR_LOWER_BOUND) + +#define FH_MEM_RCSR_CHNL0_CONFIG_REG (FH_MEM_RCSR_CHNL0) + +#define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */ +#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */ +#define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */ +#define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK (0x00030000) /* bits 16-17 */ +#define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */ +#define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31*/ + +#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20) +#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4) +#define RX_RB_TIMEOUT (0x10) + +#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000) +#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000) +#define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000) + +#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000) +#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000) +#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000) +#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000) + +#define FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY (0x00000004) +#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000) +#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000) + +#define FH_RSCSR_FRAME_SIZE_MSK (0x00003FFF) /* bits 0-13 */ + +/** + * Rx Shared Status Registers (RSSR) + * + * After stopping Rx DMA channel (writing 0 to + * FH_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll + * FH_MEM_RSSR_RX_STATUS_REG until Rx channel is idle. + * + * Bit fields: + * 24: 1 = Channel 0 is idle + * + * FH_MEM_RSSR_SHARED_CTRL_REG and FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV + * contain default values that should not be altered by the driver. + */ +#define FH_MEM_RSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC40) +#define FH_MEM_RSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xD00) + +#define FH_MEM_RSSR_SHARED_CTRL_REG (FH_MEM_RSSR_LOWER_BOUND) +#define FH_MEM_RSSR_RX_STATUS_REG (FH_MEM_RSSR_LOWER_BOUND + 0x004) +#define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\ + (FH_MEM_RSSR_LOWER_BOUND + 0x008) + +#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000) + +#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28 + +/* TFDB Area - TFDs buffer table */ +#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF) +#define FH_TFDIB_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x900) +#define FH_TFDIB_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x958) +#define FH_TFDIB_CTRL0_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl)) +#define FH_TFDIB_CTRL1_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4) + +/** + * Transmit DMA Channel Control/Status Registers (TCSR) + * + * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels + * supported in hardware (don't confuse these with the 16 Tx queues in DRAM, + * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes. + * + * To use a Tx DMA channel, driver must initialize its + * FH_TCSR_CHNL_TX_CONFIG_REG(chnl) with: + * + * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | + * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL + * + * All other bits should be 0. + * + * Bit fields: + * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame, + * '10' operate normally + * 29- 4: Reserved, set to "0" + * 3: Enable internal DMA requests (1, normal operation), disable (0) + * 2- 0: Reserved, set to "0" + */ +#define FH_TCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xD00) +#define FH_TCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xE60) + +/* Find Control/Status reg for given Tx DMA/FIFO channel */ +#define FH49_TCSR_CHNL_NUM (7) +#define FH50_TCSR_CHNL_NUM (8) + +/* TCSR: tx_config register values */ +#define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \ + (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl)) +#define FH_TCSR_CHNL_TX_CREDIT_REG(_chnl) \ + (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4) +#define FH_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \ + (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8) + +#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000) +#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV (0x00000001) + +#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE (0x00000000) +#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE (0x00000008) + +#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000) +#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000) +#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000) + +#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000) +#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000) +#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000) + +#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000) +#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000) +#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000) + +#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000) +#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000) +#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003) + +#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20) +#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12) + +/** + * Tx Shared Status Registers (TSSR) + * + * After stopping Tx DMA channel (writing 0 to + * FH_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll + * FH_TSSR_TX_STATUS_REG until selected Tx channel is idle + * (channel's buffers empty | no pending requests). + * + * Bit fields: + * 31-24: 1 = Channel buffers empty (channel 7:0) + * 23-16: 1 = No pending requests (channel 7:0) + */ +#define FH_TSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xEA0) +#define FH_TSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xEC0) + +#define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010) + +/** + * Bit fields for TSSR(Tx Shared Status & Control) error status register: + * 31: Indicates an address error when accessed to internal memory + * uCode/driver must write "1" in order to clear this flag + * 30: Indicates that Host did not send the expected number of dwords to FH + * uCode/driver must write "1" in order to clear this flag + * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA + * command was received from the scheduler while the TRB was already full + * with previous command + * uCode/driver must write "1" in order to clear this flag + * 7-0: Each status bit indicates a channel's TxCredit error. When an error + * bit is set, it indicates that the FH has received a full indication + * from the RTC TxFIFO and the current value of the TxCredit counter was + * not equal to zero. This mean that the credit mechanism was not + * synchronized to the TxFIFO status + * uCode/driver must write "1" in order to clear this flag + */ +#define FH_TSSR_TX_ERROR_REG (FH_TSSR_LOWER_BOUND + 0x018) + +#define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16) + +/* Tx service channels */ +#define FH_SRVC_CHNL (9) +#define FH_SRVC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9C8) +#define FH_SRVC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0) +#define FH_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \ + (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4) + +#define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98) +/* Instruct FH to increment the retry count of a packet when + * it is brought from the memory to TX-FIFO + */ +#define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002) + +#define RX_QUEUE_SIZE 256 +#define RX_QUEUE_MASK 255 +#define RX_QUEUE_SIZE_LOG 8 + +/* + * RX related structures and functions + */ +#define RX_FREE_BUFFERS 64 +#define RX_LOW_WATERMARK 8 + +/* Size of one Rx buffer in host DRAM */ +#define IWL_RX_BUF_SIZE_3K (3 * 1000) /* 3945 only */ +#define IWL_RX_BUF_SIZE_4K (4 * 1024) +#define IWL_RX_BUF_SIZE_8K (8 * 1024) + +/** + * struct iwl_rb_status - reseve buffer status + * host memory mapped FH registers + * @closed_rb_num [0:11] - Indicates the index of the RB which was closed + * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed + * @finished_rb_num [0:11] - Indicates the index of the current RB + * in which the last frame was written to + * @finished_fr_num [0:11] - Indicates the index of the RX Frame + * which was transferred + */ +struct iwl_rb_status { + __le16 closed_rb_num; + __le16 closed_fr_num; + __le16 finished_rb_num; + __le16 finished_fr_nam; + __le32 __unused; /* 3945 only */ +} __packed; + + +#define TFD_QUEUE_SIZE_MAX (256) +#define TFD_QUEUE_SIZE_BC_DUP (64) +#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP) +#define IWL_TX_DMA_MASK DMA_BIT_MASK(36) +#define IWL_NUM_OF_TBS 20 + +static inline u8 iwl_legacy_get_dma_hi_addr(dma_addr_t addr) +{ + return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF; +} +/** + * struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor + * + * This structure contains dma address and length of transmission address + * + * @lo: low [31:0] portion of the dma address of TX buffer + * every even is unaligned on 16 bit boundary + * @hi_n_len 0-3 [35:32] portion of dma + * 4-15 length of the tx buffer + */ +struct iwl_tfd_tb { + __le32 lo; + __le16 hi_n_len; +} __packed; + +/** + * struct iwl_tfd + * + * Transmit Frame Descriptor (TFD) + * + * @ __reserved1[3] reserved + * @ num_tbs 0-4 number of active tbs + * 5 reserved + * 6-7 padding (not used) + * @ tbs[20] transmit frame buffer descriptors + * @ __pad padding + * + * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM. + * Both driver and device share these circular buffers, each of which must be + * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes + * + * Driver must indicate the physical address of the base of each + * circular buffer via the FH_MEM_CBBC_QUEUE registers. + * + * Each TFD contains pointer/size information for up to 20 data buffers + * in host DRAM. These buffers collectively contain the (one) frame described + * by the TFD. Each buffer must be a single contiguous block of memory within + * itself, but buffers may be scattered in host DRAM. Each buffer has max size + * of (4K - 4). The concatenates all of a TFD's buffers into a single + * Tx frame, up to 8 KBytes in size. + * + * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx. + */ +struct iwl_tfd { + u8 __reserved1[3]; + u8 num_tbs; + struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS]; + __le32 __pad; +} __packed; + +/* Keep Warm Size */ +#define IWL_KW_SIZE 0x1000 /* 4k */ + +#endif /* !__iwl_legacy_fh_h__ */ diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-hcmd.c b/trunk/drivers/net/wireless/iwlegacy/iwl-hcmd.c new file mode 100644 index 000000000000..ce1fc9feb61f --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-hcmd.c @@ -0,0 +1,271 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ + +#include +#include +#include +#include + +#include "iwl-dev.h" +#include "iwl-debug.h" +#include "iwl-eeprom.h" +#include "iwl-core.h" + + +const char *iwl_legacy_get_cmd_string(u8 cmd) +{ + switch (cmd) { + IWL_CMD(REPLY_ALIVE); + IWL_CMD(REPLY_ERROR); + IWL_CMD(REPLY_RXON); + IWL_CMD(REPLY_RXON_ASSOC); + IWL_CMD(REPLY_QOS_PARAM); + IWL_CMD(REPLY_RXON_TIMING); + IWL_CMD(REPLY_ADD_STA); + IWL_CMD(REPLY_REMOVE_STA); + IWL_CMD(REPLY_WEPKEY); + IWL_CMD(REPLY_3945_RX); + IWL_CMD(REPLY_TX); + IWL_CMD(REPLY_RATE_SCALE); + IWL_CMD(REPLY_LEDS_CMD); + IWL_CMD(REPLY_TX_LINK_QUALITY_CMD); + IWL_CMD(REPLY_CHANNEL_SWITCH); + IWL_CMD(CHANNEL_SWITCH_NOTIFICATION); + IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD); + IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION); + IWL_CMD(POWER_TABLE_CMD); + IWL_CMD(PM_SLEEP_NOTIFICATION); + IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC); + IWL_CMD(REPLY_SCAN_CMD); + IWL_CMD(REPLY_SCAN_ABORT_CMD); + IWL_CMD(SCAN_START_NOTIFICATION); + IWL_CMD(SCAN_RESULTS_NOTIFICATION); + IWL_CMD(SCAN_COMPLETE_NOTIFICATION); + IWL_CMD(BEACON_NOTIFICATION); + IWL_CMD(REPLY_TX_BEACON); + IWL_CMD(REPLY_TX_PWR_TABLE_CMD); + IWL_CMD(REPLY_BT_CONFIG); + IWL_CMD(REPLY_STATISTICS_CMD); + IWL_CMD(STATISTICS_NOTIFICATION); + IWL_CMD(CARD_STATE_NOTIFICATION); + IWL_CMD(MISSED_BEACONS_NOTIFICATION); + IWL_CMD(REPLY_CT_KILL_CONFIG_CMD); + IWL_CMD(SENSITIVITY_CMD); + IWL_CMD(REPLY_PHY_CALIBRATION_CMD); + IWL_CMD(REPLY_RX_PHY_CMD); + IWL_CMD(REPLY_RX_MPDU_CMD); + IWL_CMD(REPLY_RX); + IWL_CMD(REPLY_COMPRESSED_BA); + default: + return "UNKNOWN"; + + } +} +EXPORT_SYMBOL(iwl_legacy_get_cmd_string); + +#define HOST_COMPLETE_TIMEOUT (HZ / 2) + +static void iwl_legacy_generic_cmd_callback(struct iwl_priv *priv, + struct iwl_device_cmd *cmd, + struct iwl_rx_packet *pkt) +{ + if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { + IWL_ERR(priv, "Bad return from %s (0x%08X)\n", + iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); + return; + } + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + switch (cmd->hdr.cmd) { + case REPLY_TX_LINK_QUALITY_CMD: + case SENSITIVITY_CMD: + IWL_DEBUG_HC_DUMP(priv, "back from %s (0x%08X)\n", + iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); + break; + default: + IWL_DEBUG_HC(priv, "back from %s (0x%08X)\n", + iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); + } +#endif +} + +static int +iwl_legacy_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +{ + int ret; + + BUG_ON(!(cmd->flags & CMD_ASYNC)); + + /* An asynchronous command can not expect an SKB to be set. */ + BUG_ON(cmd->flags & CMD_WANT_SKB); + + /* Assign a generic callback if one is not provided */ + if (!cmd->callback) + cmd->callback = iwl_legacy_generic_cmd_callback; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return -EBUSY; + + ret = iwl_legacy_enqueue_hcmd(priv, cmd); + if (ret < 0) { + IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n", + iwl_legacy_get_cmd_string(cmd->id), ret); + return ret; + } + return 0; +} + +int iwl_legacy_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +{ + int cmd_idx; + int ret; + + lockdep_assert_held(&priv->mutex); + + BUG_ON(cmd->flags & CMD_ASYNC); + + /* A synchronous command can not have a callback set. */ + BUG_ON(cmd->callback); + + IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n", + iwl_legacy_get_cmd_string(cmd->id)); + + set_bit(STATUS_HCMD_ACTIVE, &priv->status); + IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n", + iwl_legacy_get_cmd_string(cmd->id)); + + cmd_idx = iwl_legacy_enqueue_hcmd(priv, cmd); + if (cmd_idx < 0) { + ret = cmd_idx; + IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n", + iwl_legacy_get_cmd_string(cmd->id), ret); + goto out; + } + + ret = wait_event_timeout(priv->wait_command_queue, + !test_bit(STATUS_HCMD_ACTIVE, &priv->status), + HOST_COMPLETE_TIMEOUT); + if (!ret) { + if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) { + IWL_ERR(priv, + "Error sending %s: time out after %dms.\n", + iwl_legacy_get_cmd_string(cmd->id), + jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); + + clear_bit(STATUS_HCMD_ACTIVE, &priv->status); + IWL_DEBUG_INFO(priv, + "Clearing HCMD_ACTIVE for command %s\n", + iwl_legacy_get_cmd_string(cmd->id)); + ret = -ETIMEDOUT; + goto cancel; + } + } + + if (test_bit(STATUS_RF_KILL_HW, &priv->status)) { + IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n", + iwl_legacy_get_cmd_string(cmd->id)); + ret = -ECANCELED; + goto fail; + } + if (test_bit(STATUS_FW_ERROR, &priv->status)) { + IWL_ERR(priv, "Command %s failed: FW Error\n", + iwl_legacy_get_cmd_string(cmd->id)); + ret = -EIO; + goto fail; + } + if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) { + IWL_ERR(priv, "Error: Response NULL in '%s'\n", + iwl_legacy_get_cmd_string(cmd->id)); + ret = -EIO; + goto cancel; + } + + ret = 0; + goto out; + +cancel: + if (cmd->flags & CMD_WANT_SKB) { + /* + * Cancel the CMD_WANT_SKB flag for the cmd in the + * TX cmd queue. Otherwise in case the cmd comes + * in later, it will possibly set an invalid + * address (cmd->meta.source). + */ + priv->txq[priv->cmd_queue].meta[cmd_idx].flags &= + ~CMD_WANT_SKB; + } +fail: + if (cmd->reply_page) { + iwl_legacy_free_pages(priv, cmd->reply_page); + cmd->reply_page = 0; + } +out: + return ret; +} +EXPORT_SYMBOL(iwl_legacy_send_cmd_sync); + +int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +{ + if (cmd->flags & CMD_ASYNC) + return iwl_legacy_send_cmd_async(priv, cmd); + + return iwl_legacy_send_cmd_sync(priv, cmd); +} +EXPORT_SYMBOL(iwl_legacy_send_cmd); + +int +iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data) +{ + struct iwl_host_cmd cmd = { + .id = id, + .len = len, + .data = data, + }; + + return iwl_legacy_send_cmd_sync(priv, &cmd); +} +EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu); + +int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv, + u8 id, u16 len, const void *data, + void (*callback)(struct iwl_priv *priv, + struct iwl_device_cmd *cmd, + struct iwl_rx_packet *pkt)) +{ + struct iwl_host_cmd cmd = { + .id = id, + .len = len, + .data = data, + }; + + cmd.flags |= CMD_ASYNC; + cmd.callback = callback; + + return iwl_legacy_send_cmd_async(priv, &cmd); +} +EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu_async); diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-helpers.h b/trunk/drivers/net/wireless/iwlegacy/iwl-helpers.h new file mode 100644 index 000000000000..5cf23eaecbbb --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-helpers.h @@ -0,0 +1,196 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_legacy_helpers_h__ +#define __iwl_legacy_helpers_h__ + +#include +#include + +#include "iwl-io.h" + +#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo)))) + + +static inline struct ieee80211_conf *iwl_legacy_ieee80211_get_hw_conf( + struct ieee80211_hw *hw) +{ + return &hw->conf; +} + +/** + * iwl_legacy_queue_inc_wrap - increment queue index, wrap back to beginning + * @index -- current index + * @n_bd -- total number of entries in queue (must be power of 2) + */ +static inline int iwl_legacy_queue_inc_wrap(int index, int n_bd) +{ + return ++index & (n_bd - 1); +} + +/** + * iwl_legacy_queue_dec_wrap - decrement queue index, wrap back to end + * @index -- current index + * @n_bd -- total number of entries in queue (must be power of 2) + */ +static inline int iwl_legacy_queue_dec_wrap(int index, int n_bd) +{ + return --index & (n_bd - 1); +} + +/* TODO: Move fw_desc functions to iwl-pci.ko */ +static inline void iwl_legacy_free_fw_desc(struct pci_dev *pci_dev, + struct fw_desc *desc) +{ + if (desc->v_addr) + dma_free_coherent(&pci_dev->dev, desc->len, + desc->v_addr, desc->p_addr); + desc->v_addr = NULL; + desc->len = 0; +} + +static inline int iwl_legacy_alloc_fw_desc(struct pci_dev *pci_dev, + struct fw_desc *desc) +{ + if (!desc->len) { + desc->v_addr = NULL; + return -EINVAL; + } + + desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len, + &desc->p_addr, GFP_KERNEL); + return (desc->v_addr != NULL) ? 0 : -ENOMEM; +} + +/* + * we have 8 bits used like this: + * + * 7 6 5 4 3 2 1 0 + * | | | | | | | | + * | | | | | | +-+-------- AC queue (0-3) + * | | | | | | + * | +-+-+-+-+------------ HW queue ID + * | + * +---------------------- unused + */ +static inline void +iwl_legacy_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq) +{ + BUG_ON(ac > 3); /* only have 2 bits */ + BUG_ON(hwq > 31); /* only use 5 bits */ + + txq->swq_id = (hwq << 2) | ac; +} + +static inline void iwl_legacy_wake_queue(struct iwl_priv *priv, + struct iwl_tx_queue *txq) +{ + u8 queue = txq->swq_id; + u8 ac = queue & 3; + u8 hwq = (queue >> 2) & 0x1f; + + if (test_and_clear_bit(hwq, priv->queue_stopped)) + if (atomic_dec_return(&priv->queue_stop_count[ac]) <= 0) + ieee80211_wake_queue(priv->hw, ac); +} + +static inline void iwl_legacy_stop_queue(struct iwl_priv *priv, + struct iwl_tx_queue *txq) +{ + u8 queue = txq->swq_id; + u8 ac = queue & 3; + u8 hwq = (queue >> 2) & 0x1f; + + if (!test_and_set_bit(hwq, priv->queue_stopped)) + if (atomic_inc_return(&priv->queue_stop_count[ac]) > 0) + ieee80211_stop_queue(priv->hw, ac); +} + +#ifdef ieee80211_stop_queue +#undef ieee80211_stop_queue +#endif + +#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue + +#ifdef ieee80211_wake_queue +#undef ieee80211_wake_queue +#endif + +#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue + +static inline void iwl_legacy_disable_interrupts(struct iwl_priv *priv) +{ + clear_bit(STATUS_INT_ENABLED, &priv->status); + + /* disable interrupts from uCode/NIC to host */ + iwl_write32(priv, CSR_INT_MASK, 0x00000000); + + /* acknowledge/clear/reset any interrupts still pending + * from uCode or flow handler (Rx/Tx DMA) */ + iwl_write32(priv, CSR_INT, 0xffffffff); + iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff); + IWL_DEBUG_ISR(priv, "Disabled interrupts\n"); +} + +static inline void iwl_legacy_enable_rfkill_int(struct iwl_priv *priv) +{ + IWL_DEBUG_ISR(priv, "Enabling rfkill interrupt\n"); + iwl_write32(priv, CSR_INT_MASK, CSR_INT_BIT_RF_KILL); +} + +static inline void iwl_legacy_enable_interrupts(struct iwl_priv *priv) +{ + IWL_DEBUG_ISR(priv, "Enabling interrupts\n"); + set_bit(STATUS_INT_ENABLED, &priv->status); + iwl_write32(priv, CSR_INT_MASK, priv->inta_mask); +} + +/** + * iwl_legacy_beacon_time_mask_low - mask of lower 32 bit of beacon time + * @priv -- pointer to iwl_priv data structure + * @tsf_bits -- number of bits need to shift for masking) + */ +static inline u32 iwl_legacy_beacon_time_mask_low(struct iwl_priv *priv, + u16 tsf_bits) +{ + return (1 << tsf_bits) - 1; +} + +/** + * iwl_legacy_beacon_time_mask_high - mask of higher 32 bit of beacon time + * @priv -- pointer to iwl_priv data structure + * @tsf_bits -- number of bits need to shift for masking) + */ +static inline u32 iwl_legacy_beacon_time_mask_high(struct iwl_priv *priv, + u16 tsf_bits) +{ + return ((1 << (32 - tsf_bits)) - 1) << tsf_bits; +} + +#endif /* __iwl_legacy_helpers_h__ */ diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-io.h b/trunk/drivers/net/wireless/iwlegacy/iwl-io.h new file mode 100644 index 000000000000..5cc5d342914f --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-io.h @@ -0,0 +1,545 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_legacy_io_h__ +#define __iwl_legacy_io_h__ + +#include + +#include "iwl-dev.h" +#include "iwl-debug.h" +#include "iwl-devtrace.h" + +/* + * IO, register, and NIC memory access functions + * + * NOTE on naming convention and macro usage for these + * + * A single _ prefix before a an access function means that no state + * check or debug information is printed when that function is called. + * + * A double __ prefix before an access function means that state is checked + * and the current line number and caller function name are printed in addition + * to any other debug output. + * + * The non-prefixed name is the #define that maps the caller into a + * #define that provides the caller's name and __LINE__ to the double + * prefix version. + * + * If you wish to call the function without any debug or state checking, + * you should use the single _ prefix version (as is used by dependent IO + * routines, for example _iwl_legacy_read_direct32 calls the non-check version of + * _iwl_legacy_read32.) + * + * These declarations are *extremely* useful in quickly isolating code deltas + * which result in misconfiguration of the hardware I/O. In combination with + * git-bisect and the IO debug level you can quickly determine the specific + * commit which breaks the IO sequence to the hardware. + * + */ + +static inline void _iwl_legacy_write8(struct iwl_priv *priv, u32 ofs, u8 val) +{ + trace_iwlwifi_legacy_dev_iowrite8(priv, ofs, val); + iowrite8(val, priv->hw_base + ofs); +} + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +static inline void +__iwl_legacy_write8(const char *f, u32 l, struct iwl_priv *priv, + u32 ofs, u8 val) +{ + IWL_DEBUG_IO(priv, "write8(0x%08X, 0x%02X) - %s %d\n", ofs, val, f, l); + _iwl_legacy_write8(priv, ofs, val); +} +#define iwl_write8(priv, ofs, val) \ + __iwl_legacy_write8(__FILE__, __LINE__, priv, ofs, val) +#else +#define iwl_write8(priv, ofs, val) _iwl_legacy_write8(priv, ofs, val) +#endif + + +static inline void _iwl_legacy_write32(struct iwl_priv *priv, u32 ofs, u32 val) +{ + trace_iwlwifi_legacy_dev_iowrite32(priv, ofs, val); + iowrite32(val, priv->hw_base + ofs); +} + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +static inline void +__iwl_legacy_write32(const char *f, u32 l, struct iwl_priv *priv, + u32 ofs, u32 val) +{ + IWL_DEBUG_IO(priv, "write32(0x%08X, 0x%08X) - %s %d\n", ofs, val, f, l); + _iwl_legacy_write32(priv, ofs, val); +} +#define iwl_write32(priv, ofs, val) \ + __iwl_legacy_write32(__FILE__, __LINE__, priv, ofs, val) +#else +#define iwl_write32(priv, ofs, val) _iwl_legacy_write32(priv, ofs, val) +#endif + +static inline u32 _iwl_legacy_read32(struct iwl_priv *priv, u32 ofs) +{ + u32 val = ioread32(priv->hw_base + ofs); + trace_iwlwifi_legacy_dev_ioread32(priv, ofs, val); + return val; +} + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +static inline u32 +__iwl_legacy_read32(char *f, u32 l, struct iwl_priv *priv, u32 ofs) +{ + IWL_DEBUG_IO(priv, "read_direct32(0x%08X) - %s %d\n", ofs, f, l); + return _iwl_legacy_read32(priv, ofs); +} +#define iwl_read32(priv, ofs) __iwl_legacy_read32(__FILE__, __LINE__, priv, ofs) +#else +#define iwl_read32(p, o) _iwl_legacy_read32(p, o) +#endif + +#define IWL_POLL_INTERVAL 10 /* microseconds */ +static inline int +_iwl_legacy_poll_bit(struct iwl_priv *priv, u32 addr, + u32 bits, u32 mask, int timeout) +{ + int t = 0; + + do { + if ((_iwl_legacy_read32(priv, addr) & mask) == (bits & mask)) + return t; + udelay(IWL_POLL_INTERVAL); + t += IWL_POLL_INTERVAL; + } while (t < timeout); + + return -ETIMEDOUT; +} +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +static inline int __iwl_legacy_poll_bit(const char *f, u32 l, + struct iwl_priv *priv, u32 addr, + u32 bits, u32 mask, int timeout) +{ + int ret = _iwl_legacy_poll_bit(priv, addr, bits, mask, timeout); + IWL_DEBUG_IO(priv, "poll_bit(0x%08X, 0x%08X, 0x%08X) - %s- %s %d\n", + addr, bits, mask, + unlikely(ret == -ETIMEDOUT) ? "timeout" : "", f, l); + return ret; +} +#define iwl_poll_bit(priv, addr, bits, mask, timeout) \ + __iwl_legacy_poll_bit(__FILE__, __LINE__, priv, addr, \ + bits, mask, timeout) +#else +#define iwl_poll_bit(p, a, b, m, t) _iwl_legacy_poll_bit(p, a, b, m, t) +#endif + +static inline void _iwl_legacy_set_bit(struct iwl_priv *priv, u32 reg, u32 mask) +{ + _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) | mask); +} +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +static inline void __iwl_legacy_set_bit(const char *f, u32 l, + struct iwl_priv *priv, u32 reg, u32 mask) +{ + u32 val = _iwl_legacy_read32(priv, reg) | mask; + IWL_DEBUG_IO(priv, "set_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, + mask, val); + _iwl_legacy_write32(priv, reg, val); +} +static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&p->reg_lock, reg_flags); + __iwl_legacy_set_bit(__FILE__, __LINE__, p, r, m); + spin_unlock_irqrestore(&p->reg_lock, reg_flags); +} +#else +static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&p->reg_lock, reg_flags); + _iwl_legacy_set_bit(p, r, m); + spin_unlock_irqrestore(&p->reg_lock, reg_flags); +} +#endif + +static inline void +_iwl_legacy_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask) +{ + _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) & ~mask); +} +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +static inline void +__iwl_legacy_clear_bit(const char *f, u32 l, + struct iwl_priv *priv, u32 reg, u32 mask) +{ + u32 val = _iwl_legacy_read32(priv, reg) & ~mask; + IWL_DEBUG_IO(priv, "clear_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val); + _iwl_legacy_write32(priv, reg, val); +} +static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&p->reg_lock, reg_flags); + __iwl_legacy_clear_bit(__FILE__, __LINE__, p, r, m); + spin_unlock_irqrestore(&p->reg_lock, reg_flags); +} +#else +static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&p->reg_lock, reg_flags); + _iwl_legacy_clear_bit(p, r, m); + spin_unlock_irqrestore(&p->reg_lock, reg_flags); +} +#endif + +static inline int _iwl_legacy_grab_nic_access(struct iwl_priv *priv) +{ + int ret; + u32 val; + + /* this bit wakes up the NIC */ + _iwl_legacy_set_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + + /* + * These bits say the device is running, and should keep running for + * at least a short while (at least as long as MAC_ACCESS_REQ stays 1), + * but they do not indicate that embedded SRAM is restored yet; + * 3945 and 4965 have volatile SRAM, and must save/restore contents + * to/from host DRAM when sleeping/waking for power-saving. + * Each direction takes approximately 1/4 millisecond; with this + * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a + * series of register accesses are expected (e.g. reading Event Log), + * to keep device from sleeping. + * + * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that + * SRAM is okay/restored. We don't check that here because this call + * is just for hardware register access; but GP1 MAC_SLEEP check is a + * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log). + * + */ + ret = _iwl_legacy_poll_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, + (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | + CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000); + if (ret < 0) { + val = _iwl_legacy_read32(priv, CSR_GP_CNTRL); + IWL_ERR(priv, + "MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val); + _iwl_legacy_write32(priv, CSR_RESET, + CSR_RESET_REG_FLAG_FORCE_NMI); + return -EIO; + } + + return 0; +} + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +static inline int __iwl_legacy_grab_nic_access(const char *f, u32 l, + struct iwl_priv *priv) +{ + IWL_DEBUG_IO(priv, "grabbing nic access - %s %d\n", f, l); + return _iwl_legacy_grab_nic_access(priv); +} +#define iwl_grab_nic_access(priv) \ + __iwl_legacy_grab_nic_access(__FILE__, __LINE__, priv) +#else +#define iwl_grab_nic_access(priv) \ + _iwl_legacy_grab_nic_access(priv) +#endif + +static inline void _iwl_legacy_release_nic_access(struct iwl_priv *priv) +{ + _iwl_legacy_clear_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); +} +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +static inline void __iwl_legacy_release_nic_access(const char *f, u32 l, + struct iwl_priv *priv) +{ + + IWL_DEBUG_IO(priv, "releasing nic access - %s %d\n", f, l); + _iwl_legacy_release_nic_access(priv); +} +#define iwl_release_nic_access(priv) \ + __iwl_legacy_release_nic_access(__FILE__, __LINE__, priv) +#else +#define iwl_release_nic_access(priv) \ + _iwl_legacy_release_nic_access(priv) +#endif + +static inline u32 _iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg) +{ + return _iwl_legacy_read32(priv, reg); +} +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +static inline u32 __iwl_legacy_read_direct32(const char *f, u32 l, + struct iwl_priv *priv, u32 reg) +{ + u32 value = _iwl_legacy_read_direct32(priv, reg); + IWL_DEBUG_IO(priv, + "read_direct32(0x%4X) = 0x%08x - %s %d\n", reg, value, + f, l); + return value; +} +static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg) +{ + u32 value; + unsigned long reg_flags; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + iwl_grab_nic_access(priv); + value = __iwl_legacy_read_direct32(__FILE__, __LINE__, priv, reg); + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); + return value; +} + +#else +static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg) +{ + u32 value; + unsigned long reg_flags; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + iwl_grab_nic_access(priv); + value = _iwl_legacy_read_direct32(priv, reg); + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); + return value; + +} +#endif + +static inline void _iwl_legacy_write_direct32(struct iwl_priv *priv, + u32 reg, u32 value) +{ + _iwl_legacy_write32(priv, reg, value); +} +static inline void +iwl_legacy_write_direct32(struct iwl_priv *priv, u32 reg, u32 value) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + if (!iwl_grab_nic_access(priv)) { + _iwl_legacy_write_direct32(priv, reg, value); + iwl_release_nic_access(priv); + } + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); +} + +static inline void iwl_legacy_write_reg_buf(struct iwl_priv *priv, + u32 reg, u32 len, u32 *values) +{ + u32 count = sizeof(u32); + + if ((priv != NULL) && (values != NULL)) { + for (; 0 < len; len -= count, reg += count, values++) + iwl_legacy_write_direct32(priv, reg, *values); + } +} + +static inline int _iwl_legacy_poll_direct_bit(struct iwl_priv *priv, u32 addr, + u32 mask, int timeout) +{ + int t = 0; + + do { + if ((iwl_legacy_read_direct32(priv, addr) & mask) == mask) + return t; + udelay(IWL_POLL_INTERVAL); + t += IWL_POLL_INTERVAL; + } while (t < timeout); + + return -ETIMEDOUT; +} + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +static inline int __iwl_legacy_poll_direct_bit(const char *f, u32 l, + struct iwl_priv *priv, + u32 addr, u32 mask, int timeout) +{ + int ret = _iwl_legacy_poll_direct_bit(priv, addr, mask, timeout); + + if (unlikely(ret == -ETIMEDOUT)) + IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) - " + "timedout - %s %d\n", addr, mask, f, l); + else + IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) = 0x%08X " + "- %s %d\n", addr, mask, ret, f, l); + return ret; +} +#define iwl_poll_direct_bit(priv, addr, mask, timeout) \ +__iwl_legacy_poll_direct_bit(__FILE__, __LINE__, priv, addr, mask, timeout) +#else +#define iwl_poll_direct_bit _iwl_legacy_poll_direct_bit +#endif + +static inline u32 _iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg) +{ + _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24)); + rmb(); + return _iwl_legacy_read_direct32(priv, HBUS_TARG_PRPH_RDAT); +} +static inline u32 iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg) +{ + unsigned long reg_flags; + u32 val; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + iwl_grab_nic_access(priv); + val = _iwl_legacy_read_prph(priv, reg); + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); + return val; +} + +static inline void _iwl_legacy_write_prph(struct iwl_priv *priv, + u32 addr, u32 val) +{ + _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WADDR, + ((addr & 0x0000FFFF) | (3 << 24))); + wmb(); + _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WDAT, val); +} + +static inline void +iwl_legacy_write_prph(struct iwl_priv *priv, u32 addr, u32 val) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + if (!iwl_grab_nic_access(priv)) { + _iwl_legacy_write_prph(priv, addr, val); + iwl_release_nic_access(priv); + } + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); +} + +#define _iwl_legacy_set_bits_prph(priv, reg, mask) \ +_iwl_legacy_write_prph(priv, reg, (_iwl_legacy_read_prph(priv, reg) | mask)) + +static inline void +iwl_legacy_set_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + iwl_grab_nic_access(priv); + _iwl_legacy_set_bits_prph(priv, reg, mask); + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); +} + +#define _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask) \ +_iwl_legacy_write_prph(priv, reg, \ + ((_iwl_legacy_read_prph(priv, reg) & mask) | bits)) + +static inline void iwl_legacy_set_bits_mask_prph(struct iwl_priv *priv, u32 reg, + u32 bits, u32 mask) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + iwl_grab_nic_access(priv); + _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask); + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); +} + +static inline void iwl_legacy_clear_bits_prph(struct iwl_priv + *priv, u32 reg, u32 mask) +{ + unsigned long reg_flags; + u32 val; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + iwl_grab_nic_access(priv); + val = _iwl_legacy_read_prph(priv, reg); + _iwl_legacy_write_prph(priv, reg, (val & ~mask)); + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); +} + +static inline u32 iwl_legacy_read_targ_mem(struct iwl_priv *priv, u32 addr) +{ + unsigned long reg_flags; + u32 value; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + iwl_grab_nic_access(priv); + + _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, addr); + rmb(); + value = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); + + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); + return value; +} + +static inline void +iwl_legacy_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + if (!iwl_grab_nic_access(priv)) { + _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr); + wmb(); + _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WDAT, val); + iwl_release_nic_access(priv); + } + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); +} + +static inline void +iwl_legacy_write_targ_mem_buf(struct iwl_priv *priv, u32 addr, + u32 len, u32 *values) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + if (!iwl_grab_nic_access(priv)) { + _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr); + wmb(); + for (; 0 < len; len -= sizeof(u32), values++) + _iwl_legacy_write_direct32(priv, + HBUS_TARG_MEM_WDAT, *values); + + iwl_release_nic_access(priv); + } + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); +} +#endif diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-led.c b/trunk/drivers/net/wireless/iwlegacy/iwl-led.c new file mode 100644 index 000000000000..dc568a474c5d --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-led.c @@ -0,0 +1,205 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-io.h" + +/* default: IWL_LED_BLINK(0) using blinking index table */ +static int led_mode; +module_param(led_mode, int, S_IRUGO); +MODULE_PARM_DESC(led_mode, "0=system default, " + "1=On(RF On)/Off(RF Off), 2=blinking"); + +/* Throughput OFF time(ms) ON time (ms) + * >300 25 25 + * >200 to 300 40 40 + * >100 to 200 55 55 + * >70 to 100 65 65 + * >50 to 70 75 75 + * >20 to 50 85 85 + * >10 to 20 95 95 + * >5 to 10 110 110 + * >1 to 5 130 130 + * >0 to 1 167 167 + * <=0 SOLID ON + */ +static const struct ieee80211_tpt_blink iwl_blink[] = { + { .throughput = 0, .blink_time = 334 }, + { .throughput = 1 * 1024 - 1, .blink_time = 260 }, + { .throughput = 5 * 1024 - 1, .blink_time = 220 }, + { .throughput = 10 * 1024 - 1, .blink_time = 190 }, + { .throughput = 20 * 1024 - 1, .blink_time = 170 }, + { .throughput = 50 * 1024 - 1, .blink_time = 150 }, + { .throughput = 70 * 1024 - 1, .blink_time = 130 }, + { .throughput = 100 * 1024 - 1, .blink_time = 110 }, + { .throughput = 200 * 1024 - 1, .blink_time = 80 }, + { .throughput = 300 * 1024 - 1, .blink_time = 50 }, +}; + +/* + * Adjust led blink rate to compensate on a MAC Clock difference on every HW + * Led blink rate analysis showed an average deviation of 0% on 3945, + * 5% on 4965 HW. + * Need to compensate on the led on/off time per HW according to the deviation + * to achieve the desired led frequency + * The calculation is: (100-averageDeviation)/100 * blinkTime + * For code efficiency the calculation will be: + * compensation = (100 - averageDeviation) * 64 / 100 + * NewBlinkTime = (compensation * BlinkTime) / 64 + */ +static inline u8 iwl_legacy_blink_compensation(struct iwl_priv *priv, + u8 time, u16 compensation) +{ + if (!compensation) { + IWL_ERR(priv, "undefined blink compensation: " + "use pre-defined blinking time\n"); + return time; + } + + return (u8)((time * compensation) >> 6); +} + +/* Set led pattern command */ +static int iwl_legacy_led_cmd(struct iwl_priv *priv, + unsigned long on, + unsigned long off) +{ + struct iwl_led_cmd led_cmd = { + .id = IWL_LED_LINK, + .interval = IWL_DEF_LED_INTRVL + }; + int ret; + + if (!test_bit(STATUS_READY, &priv->status)) + return -EBUSY; + + if (priv->blink_on == on && priv->blink_off == off) + return 0; + + if (off == 0) { + /* led is SOLID_ON */ + on = IWL_LED_SOLID; + } + + IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n", + priv->cfg->base_params->led_compensation); + led_cmd.on = iwl_legacy_blink_compensation(priv, on, + priv->cfg->base_params->led_compensation); + led_cmd.off = iwl_legacy_blink_compensation(priv, off, + priv->cfg->base_params->led_compensation); + + ret = priv->cfg->ops->led->cmd(priv, &led_cmd); + if (!ret) { + priv->blink_on = on; + priv->blink_off = off; + } + return ret; +} + +static void iwl_legacy_led_brightness_set(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led); + unsigned long on = 0; + + if (brightness > 0) + on = IWL_LED_SOLID; + + iwl_legacy_led_cmd(priv, on, 0); +} + +static int iwl_legacy_led_blink_set(struct led_classdev *led_cdev, + unsigned long *delay_on, + unsigned long *delay_off) +{ + struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led); + + return iwl_legacy_led_cmd(priv, *delay_on, *delay_off); +} + +void iwl_legacy_leds_init(struct iwl_priv *priv) +{ + int mode = led_mode; + int ret; + + if (mode == IWL_LED_DEFAULT) + mode = priv->cfg->led_mode; + + priv->led.name = kasprintf(GFP_KERNEL, "%s-led", + wiphy_name(priv->hw->wiphy)); + priv->led.brightness_set = iwl_legacy_led_brightness_set; + priv->led.blink_set = iwl_legacy_led_blink_set; + priv->led.max_brightness = 1; + + switch (mode) { + case IWL_LED_DEFAULT: + WARN_ON(1); + break; + case IWL_LED_BLINK: + priv->led.default_trigger = + ieee80211_create_tpt_led_trigger(priv->hw, + IEEE80211_TPT_LEDTRIG_FL_CONNECTED, + iwl_blink, ARRAY_SIZE(iwl_blink)); + break; + case IWL_LED_RF_STATE: + priv->led.default_trigger = + ieee80211_get_radio_led_name(priv->hw); + break; + } + + ret = led_classdev_register(&priv->pci_dev->dev, &priv->led); + if (ret) { + kfree(priv->led.name); + return; + } + + priv->led_registered = true; +} +EXPORT_SYMBOL(iwl_legacy_leds_init); + +void iwl_legacy_leds_exit(struct iwl_priv *priv) +{ + if (!priv->led_registered) + return; + + led_classdev_unregister(&priv->led); + kfree(priv->led.name); +} +EXPORT_SYMBOL(iwl_legacy_leds_exit); diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-led.h b/trunk/drivers/net/wireless/iwlegacy/iwl-led.h new file mode 100644 index 000000000000..f0791f70f79d --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-led.h @@ -0,0 +1,56 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_legacy_leds_h__ +#define __iwl_legacy_leds_h__ + + +struct iwl_priv; + +#define IWL_LED_SOLID 11 +#define IWL_DEF_LED_INTRVL cpu_to_le32(1000) + +#define IWL_LED_ACTIVITY (0<<1) +#define IWL_LED_LINK (1<<1) + +/* + * LED mode + * IWL_LED_DEFAULT: use device default + * IWL_LED_RF_STATE: turn LED on/off based on RF state + * LED ON = RF ON + * LED OFF = RF OFF + * IWL_LED_BLINK: adjust led blink rate based on blink table + */ +enum iwl_led_mode { + IWL_LED_DEFAULT, + IWL_LED_RF_STATE, + IWL_LED_BLINK, +}; + +void iwl_legacy_leds_init(struct iwl_priv *priv); +void iwl_legacy_leds_exit(struct iwl_priv *priv); + +#endif /* __iwl_legacy_leds_h__ */ diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h b/trunk/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h new file mode 100644 index 000000000000..38647e481eb0 --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h @@ -0,0 +1,456 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_legacy_rs_h__ +#define __iwl_legacy_rs_h__ + +struct iwl_rate_info { + u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ + u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */ + u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */ + u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */ + u8 prev_ieee; /* previous rate in IEEE speeds */ + u8 next_ieee; /* next rate in IEEE speeds */ + u8 prev_rs; /* previous rate used in rs algo */ + u8 next_rs; /* next rate used in rs algo */ + u8 prev_rs_tgg; /* previous rate used in TGG rs algo */ + u8 next_rs_tgg; /* next rate used in TGG rs algo */ +}; + +struct iwl3945_rate_info { + u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ + u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */ + u8 prev_ieee; /* previous rate in IEEE speeds */ + u8 next_ieee; /* next rate in IEEE speeds */ + u8 prev_rs; /* previous rate used in rs algo */ + u8 next_rs; /* next rate used in rs algo */ + u8 prev_rs_tgg; /* previous rate used in TGG rs algo */ + u8 next_rs_tgg; /* next rate used in TGG rs algo */ + u8 table_rs_index; /* index in rate scale table cmd */ + u8 prev_table_rs; /* prev in rate table cmd */ +}; + + +/* + * These serve as indexes into + * struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT]; + */ +enum { + IWL_RATE_1M_INDEX = 0, + IWL_RATE_2M_INDEX, + IWL_RATE_5M_INDEX, + IWL_RATE_11M_INDEX, + IWL_RATE_6M_INDEX, + IWL_RATE_9M_INDEX, + IWL_RATE_12M_INDEX, + IWL_RATE_18M_INDEX, + IWL_RATE_24M_INDEX, + IWL_RATE_36M_INDEX, + IWL_RATE_48M_INDEX, + IWL_RATE_54M_INDEX, + IWL_RATE_60M_INDEX, + IWL_RATE_COUNT, + IWL_RATE_COUNT_LEGACY = IWL_RATE_COUNT - 1, /* Excluding 60M */ + IWL_RATE_COUNT_3945 = IWL_RATE_COUNT - 1, + IWL_RATE_INVM_INDEX = IWL_RATE_COUNT, + IWL_RATE_INVALID = IWL_RATE_COUNT, +}; + +enum { + IWL_RATE_6M_INDEX_TABLE = 0, + IWL_RATE_9M_INDEX_TABLE, + IWL_RATE_12M_INDEX_TABLE, + IWL_RATE_18M_INDEX_TABLE, + IWL_RATE_24M_INDEX_TABLE, + IWL_RATE_36M_INDEX_TABLE, + IWL_RATE_48M_INDEX_TABLE, + IWL_RATE_54M_INDEX_TABLE, + IWL_RATE_1M_INDEX_TABLE, + IWL_RATE_2M_INDEX_TABLE, + IWL_RATE_5M_INDEX_TABLE, + IWL_RATE_11M_INDEX_TABLE, + IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1, +}; + +enum { + IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX, + IWL39_LAST_OFDM_RATE = IWL_RATE_54M_INDEX, + IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX, + IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX, + IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX, +}; + +/* #define vs. enum to keep from defaulting to 'large integer' */ +#define IWL_RATE_6M_MASK (1 << IWL_RATE_6M_INDEX) +#define IWL_RATE_9M_MASK (1 << IWL_RATE_9M_INDEX) +#define IWL_RATE_12M_MASK (1 << IWL_RATE_12M_INDEX) +#define IWL_RATE_18M_MASK (1 << IWL_RATE_18M_INDEX) +#define IWL_RATE_24M_MASK (1 << IWL_RATE_24M_INDEX) +#define IWL_RATE_36M_MASK (1 << IWL_RATE_36M_INDEX) +#define IWL_RATE_48M_MASK (1 << IWL_RATE_48M_INDEX) +#define IWL_RATE_54M_MASK (1 << IWL_RATE_54M_INDEX) +#define IWL_RATE_60M_MASK (1 << IWL_RATE_60M_INDEX) +#define IWL_RATE_1M_MASK (1 << IWL_RATE_1M_INDEX) +#define IWL_RATE_2M_MASK (1 << IWL_RATE_2M_INDEX) +#define IWL_RATE_5M_MASK (1 << IWL_RATE_5M_INDEX) +#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX) + +/* uCode API values for legacy bit rates, both OFDM and CCK */ +enum { + IWL_RATE_6M_PLCP = 13, + IWL_RATE_9M_PLCP = 15, + IWL_RATE_12M_PLCP = 5, + IWL_RATE_18M_PLCP = 7, + IWL_RATE_24M_PLCP = 9, + IWL_RATE_36M_PLCP = 11, + IWL_RATE_48M_PLCP = 1, + IWL_RATE_54M_PLCP = 3, + IWL_RATE_60M_PLCP = 3,/*FIXME:RS:should be removed*/ + IWL_RATE_1M_PLCP = 10, + IWL_RATE_2M_PLCP = 20, + IWL_RATE_5M_PLCP = 55, + IWL_RATE_11M_PLCP = 110, + /*FIXME:RS:add IWL_RATE_LEGACY_INVM_PLCP = 0,*/ +}; + +/* uCode API values for OFDM high-throughput (HT) bit rates */ +enum { + IWL_RATE_SISO_6M_PLCP = 0, + IWL_RATE_SISO_12M_PLCP = 1, + IWL_RATE_SISO_18M_PLCP = 2, + IWL_RATE_SISO_24M_PLCP = 3, + IWL_RATE_SISO_36M_PLCP = 4, + IWL_RATE_SISO_48M_PLCP = 5, + IWL_RATE_SISO_54M_PLCP = 6, + IWL_RATE_SISO_60M_PLCP = 7, + IWL_RATE_MIMO2_6M_PLCP = 0x8, + IWL_RATE_MIMO2_12M_PLCP = 0x9, + IWL_RATE_MIMO2_18M_PLCP = 0xa, + IWL_RATE_MIMO2_24M_PLCP = 0xb, + IWL_RATE_MIMO2_36M_PLCP = 0xc, + IWL_RATE_MIMO2_48M_PLCP = 0xd, + IWL_RATE_MIMO2_54M_PLCP = 0xe, + IWL_RATE_MIMO2_60M_PLCP = 0xf, + IWL_RATE_SISO_INVM_PLCP, + IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP, +}; + +/* MAC header values for bit rates */ +enum { + IWL_RATE_6M_IEEE = 12, + IWL_RATE_9M_IEEE = 18, + IWL_RATE_12M_IEEE = 24, + IWL_RATE_18M_IEEE = 36, + IWL_RATE_24M_IEEE = 48, + IWL_RATE_36M_IEEE = 72, + IWL_RATE_48M_IEEE = 96, + IWL_RATE_54M_IEEE = 108, + IWL_RATE_60M_IEEE = 120, + IWL_RATE_1M_IEEE = 2, + IWL_RATE_2M_IEEE = 4, + IWL_RATE_5M_IEEE = 11, + IWL_RATE_11M_IEEE = 22, +}; + +#define IWL_CCK_BASIC_RATES_MASK \ + (IWL_RATE_1M_MASK | \ + IWL_RATE_2M_MASK) + +#define IWL_CCK_RATES_MASK \ + (IWL_CCK_BASIC_RATES_MASK | \ + IWL_RATE_5M_MASK | \ + IWL_RATE_11M_MASK) + +#define IWL_OFDM_BASIC_RATES_MASK \ + (IWL_RATE_6M_MASK | \ + IWL_RATE_12M_MASK | \ + IWL_RATE_24M_MASK) + +#define IWL_OFDM_RATES_MASK \ + (IWL_OFDM_BASIC_RATES_MASK | \ + IWL_RATE_9M_MASK | \ + IWL_RATE_18M_MASK | \ + IWL_RATE_36M_MASK | \ + IWL_RATE_48M_MASK | \ + IWL_RATE_54M_MASK) + +#define IWL_BASIC_RATES_MASK \ + (IWL_OFDM_BASIC_RATES_MASK | \ + IWL_CCK_BASIC_RATES_MASK) + +#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1) +#define IWL_RATES_MASK_3945 ((1 << IWL_RATE_COUNT_3945) - 1) + +#define IWL_INVALID_VALUE -1 + +#define IWL_MIN_RSSI_VAL -100 +#define IWL_MAX_RSSI_VAL 0 + +/* These values specify how many Tx frame attempts before + * searching for a new modulation mode */ +#define IWL_LEGACY_FAILURE_LIMIT 160 +#define IWL_LEGACY_SUCCESS_LIMIT 480 +#define IWL_LEGACY_TABLE_COUNT 160 + +#define IWL_NONE_LEGACY_FAILURE_LIMIT 400 +#define IWL_NONE_LEGACY_SUCCESS_LIMIT 4500 +#define IWL_NONE_LEGACY_TABLE_COUNT 1500 + +/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */ +#define IWL_RS_GOOD_RATIO 12800 /* 100% */ +#define IWL_RATE_SCALE_SWITCH 10880 /* 85% */ +#define IWL_RATE_HIGH_TH 10880 /* 85% */ +#define IWL_RATE_INCREASE_TH 6400 /* 50% */ +#define IWL_RATE_DECREASE_TH 1920 /* 15% */ + +/* possible actions when in legacy mode */ +#define IWL_LEGACY_SWITCH_ANTENNA1 0 +#define IWL_LEGACY_SWITCH_ANTENNA2 1 +#define IWL_LEGACY_SWITCH_SISO 2 +#define IWL_LEGACY_SWITCH_MIMO2_AB 3 +#define IWL_LEGACY_SWITCH_MIMO2_AC 4 +#define IWL_LEGACY_SWITCH_MIMO2_BC 5 + +/* possible actions when in siso mode */ +#define IWL_SISO_SWITCH_ANTENNA1 0 +#define IWL_SISO_SWITCH_ANTENNA2 1 +#define IWL_SISO_SWITCH_MIMO2_AB 2 +#define IWL_SISO_SWITCH_MIMO2_AC 3 +#define IWL_SISO_SWITCH_MIMO2_BC 4 +#define IWL_SISO_SWITCH_GI 5 + +/* possible actions when in mimo mode */ +#define IWL_MIMO2_SWITCH_ANTENNA1 0 +#define IWL_MIMO2_SWITCH_ANTENNA2 1 +#define IWL_MIMO2_SWITCH_SISO_A 2 +#define IWL_MIMO2_SWITCH_SISO_B 3 +#define IWL_MIMO2_SWITCH_SISO_C 4 +#define IWL_MIMO2_SWITCH_GI 5 + +#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_GI + +#define IWL_ACTION_LIMIT 3 /* # possible actions */ + +#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */ + +/* load per tid defines for A-MPDU activation */ +#define IWL_AGG_TPT_THREHOLD 0 +#define IWL_AGG_LOAD_THRESHOLD 10 +#define IWL_AGG_ALL_TID 0xff +#define TID_QUEUE_CELL_SPACING 50 /*mS */ +#define TID_QUEUE_MAX_SIZE 20 +#define TID_ROUND_VALUE 5 /* mS */ +#define TID_MAX_LOAD_COUNT 8 + +#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING) +#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y)) + +extern const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT]; + +enum iwl_table_type { + LQ_NONE, + LQ_G, /* legacy types */ + LQ_A, + LQ_SISO, /* high-throughput types */ + LQ_MIMO2, + LQ_MAX, +}; + +#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A)) +#define is_siso(tbl) ((tbl) == LQ_SISO) +#define is_mimo2(tbl) ((tbl) == LQ_MIMO2) +#define is_mimo(tbl) (is_mimo2(tbl)) +#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl)) +#define is_a_band(tbl) ((tbl) == LQ_A) +#define is_g_and(tbl) ((tbl) == LQ_G) + +#define ANT_NONE 0x0 +#define ANT_A BIT(0) +#define ANT_B BIT(1) +#define ANT_AB (ANT_A | ANT_B) +#define ANT_C BIT(2) +#define ANT_AC (ANT_A | ANT_C) +#define ANT_BC (ANT_B | ANT_C) +#define ANT_ABC (ANT_AB | ANT_C) + +#define IWL_MAX_MCS_DISPLAY_SIZE 12 + +struct iwl_rate_mcs_info { + char mbps[IWL_MAX_MCS_DISPLAY_SIZE]; + char mcs[IWL_MAX_MCS_DISPLAY_SIZE]; +}; + +/** + * struct iwl_rate_scale_data -- tx success history for one rate + */ +struct iwl_rate_scale_data { + u64 data; /* bitmap of successful frames */ + s32 success_counter; /* number of frames successful */ + s32 success_ratio; /* per-cent * 128 */ + s32 counter; /* number of frames attempted */ + s32 average_tpt; /* success ratio * expected throughput */ + unsigned long stamp; +}; + +/** + * struct iwl_scale_tbl_info -- tx params and success history for all rates + * + * There are two of these in struct iwl_lq_sta, + * one for "active", and one for "search". + */ +struct iwl_scale_tbl_info { + enum iwl_table_type lq_type; + u8 ant_type; + u8 is_SGI; /* 1 = short guard interval */ + u8 is_ht40; /* 1 = 40 MHz channel width */ + u8 is_dup; /* 1 = duplicated data streams */ + u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */ + u8 max_search; /* maximun number of tables we can search */ + s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */ + u32 current_rate; /* rate_n_flags, uCode API format */ + struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */ +}; + +struct iwl_traffic_load { + unsigned long time_stamp; /* age of the oldest statistics */ + u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time + * slice */ + u32 total; /* total num of packets during the + * last TID_MAX_TIME_DIFF */ + u8 queue_count; /* number of queues that has + * been used since the last cleanup */ + u8 head; /* start of the circular buffer */ +}; + +/** + * struct iwl_lq_sta -- driver's rate scaling private structure + * + * Pointer to this gets passed back and forth between driver and mac80211. + */ +struct iwl_lq_sta { + u8 active_tbl; /* index of active table, range 0-1 */ + u8 enable_counter; /* indicates HT mode */ + u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */ + u8 search_better_tbl; /* 1: currently trying alternate mode */ + s32 last_tpt; + + /* The following determine when to search for a new mode */ + u32 table_count_limit; + u32 max_failure_limit; /* # failed frames before new search */ + u32 max_success_limit; /* # successful frames before new search */ + u32 table_count; + u32 total_failed; /* total failed frames, any/all rates */ + u32 total_success; /* total successful frames, any/all rates */ + u64 flush_timer; /* time staying in mode before new search */ + + u8 action_counter; /* # mode-switch actions tried */ + u8 is_green; + u8 is_dup; + enum ieee80211_band band; + + /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */ + u32 supp_rates; + u16 active_legacy_rate; + u16 active_siso_rate; + u16 active_mimo2_rate; + s8 max_rate_idx; /* Max rate set by user */ + u8 missed_rate_counter; + + struct iwl_link_quality_cmd lq; + struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */ + struct iwl_traffic_load load[TID_MAX_LOAD_COUNT]; + u8 tx_agg_tid_en; +#ifdef CONFIG_MAC80211_DEBUGFS + struct dentry *rs_sta_dbgfs_scale_table_file; + struct dentry *rs_sta_dbgfs_stats_table_file; + struct dentry *rs_sta_dbgfs_rate_scale_data_file; + struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file; + u32 dbg_fixed_rate; +#endif + struct iwl_priv *drv; + + /* used to be in sta_info */ + int last_txrate_idx; + /* last tx rate_n_flags */ + u32 last_rate_n_flags; + /* packets destined for this STA are aggregated */ + u8 is_agg; +}; + +static inline u8 iwl4965_num_of_ant(u8 mask) +{ + return !!((mask) & ANT_A) + + !!((mask) & ANT_B) + + !!((mask) & ANT_C); +} + +static inline u8 iwl4965_first_antenna(u8 mask) +{ + if (mask & ANT_A) + return ANT_A; + if (mask & ANT_B) + return ANT_B; + return ANT_C; +} + + +/** + * iwl3945_rate_scale_init - Initialize the rate scale table based on assoc info + * + * The specific throughput table used is based on the type of network + * the associated with, including A, B, G, and G w/ TGG protection + */ +extern void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id); + +/* Initialize station's rate scaling information after adding station */ +extern void iwl4965_rs_rate_init(struct iwl_priv *priv, + struct ieee80211_sta *sta, u8 sta_id); +extern void iwl3945_rs_rate_init(struct iwl_priv *priv, + struct ieee80211_sta *sta, u8 sta_id); + +/** + * iwl_rate_control_register - Register the rate control algorithm callbacks + * + * Since the rate control algorithm is hardware specific, there is no need + * or reason to place it as a stand alone module. The driver can call + * iwl_rate_control_register in order to register the rate control callbacks + * with the mac80211 subsystem. This should be performed prior to calling + * ieee80211_register_hw + * + */ +extern int iwl4965_rate_control_register(void); +extern int iwl3945_rate_control_register(void); + +/** + * iwl_rate_control_unregister - Unregister the rate control callbacks + * + * This should be called after calling ieee80211_unregister_hw, but before + * the driver is unloaded. + */ +extern void iwl4965_rate_control_unregister(void); +extern void iwl3945_rate_control_unregister(void); + +#endif /* __iwl_legacy_rs__ */ diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-power.c b/trunk/drivers/net/wireless/iwlegacy/iwl-power.c new file mode 100644 index 000000000000..903ef0d6d6cb --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-power.c @@ -0,0 +1,165 @@ +/****************************************************************************** + * + * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ + + +#include +#include +#include +#include + +#include + +#include "iwl-eeprom.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-io.h" +#include "iwl-commands.h" +#include "iwl-debug.h" +#include "iwl-power.h" + +/* + * Setting power level allows the card to go to sleep when not busy. + * + * We calculate a sleep command based on the required latency, which + * we get from mac80211. In order to handle thermal throttling, we can + * also use pre-defined power levels. + */ + +/* + * This defines the old power levels. They are still used by default + * (level 1) and for thermal throttle (levels 3 through 5) + */ + +struct iwl_power_vec_entry { + struct iwl_powertable_cmd cmd; + u8 no_dtim; /* number of skip dtim */ +}; + +static void iwl_legacy_power_sleep_cam_cmd(struct iwl_priv *priv, + struct iwl_powertable_cmd *cmd) +{ + memset(cmd, 0, sizeof(*cmd)); + + if (priv->power_data.pci_pm) + cmd->flags |= IWL_POWER_PCI_PM_MSK; + + IWL_DEBUG_POWER(priv, "Sleep command for CAM\n"); +} + +static int +iwl_legacy_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd) +{ + IWL_DEBUG_POWER(priv, "Sending power/sleep command\n"); + IWL_DEBUG_POWER(priv, "Flags value = 0x%08X\n", cmd->flags); + IWL_DEBUG_POWER(priv, "Tx timeout = %u\n", + le32_to_cpu(cmd->tx_data_timeout)); + IWL_DEBUG_POWER(priv, "Rx timeout = %u\n", + le32_to_cpu(cmd->rx_data_timeout)); + IWL_DEBUG_POWER(priv, + "Sleep interval vector = { %d , %d , %d , %d , %d }\n", + le32_to_cpu(cmd->sleep_interval[0]), + le32_to_cpu(cmd->sleep_interval[1]), + le32_to_cpu(cmd->sleep_interval[2]), + le32_to_cpu(cmd->sleep_interval[3]), + le32_to_cpu(cmd->sleep_interval[4])); + + return iwl_legacy_send_cmd_pdu(priv, POWER_TABLE_CMD, + sizeof(struct iwl_powertable_cmd), cmd); +} + +int +iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd, + bool force) +{ + int ret; + bool update_chains; + + lockdep_assert_held(&priv->mutex); + + /* Don't update the RX chain when chain noise calibration is running */ + update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE || + priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE; + + if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force) + return 0; + + if (!iwl_legacy_is_ready_rf(priv)) + return -EIO; + + /* scan complete use sleep_power_next, need to be updated */ + memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd)); + if (test_bit(STATUS_SCANNING, &priv->status) && !force) { + IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n"); + return 0; + } + + if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK) + set_bit(STATUS_POWER_PMI, &priv->status); + + ret = iwl_legacy_set_power(priv, cmd); + if (!ret) { + if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)) + clear_bit(STATUS_POWER_PMI, &priv->status); + + if (priv->cfg->ops->lib->update_chain_flags && update_chains) + priv->cfg->ops->lib->update_chain_flags(priv); + else if (priv->cfg->ops->lib->update_chain_flags) + IWL_DEBUG_POWER(priv, + "Cannot update the power, chain noise " + "calibration running: %d\n", + priv->chain_noise_data.state); + + memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)); + } else + IWL_ERR(priv, "set power fail, ret = %d", ret); + + return ret; +} + +int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force) +{ + struct iwl_powertable_cmd cmd; + + iwl_legacy_power_sleep_cam_cmd(priv, &cmd); + return iwl_legacy_power_set_mode(priv, &cmd, force); +} +EXPORT_SYMBOL(iwl_legacy_power_update_mode); + +/* initialize to default */ +void iwl_legacy_power_initialize(struct iwl_priv *priv) +{ + u16 lctl = iwl_legacy_pcie_link_ctl(priv); + + priv->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN); + + priv->power_data.debug_sleep_level_override = -1; + + memset(&priv->power_data.sleep_cmd, 0, + sizeof(priv->power_data.sleep_cmd)); +} +EXPORT_SYMBOL(iwl_legacy_power_initialize); diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-power.h b/trunk/drivers/net/wireless/iwlegacy/iwl-power.h new file mode 100644 index 000000000000..d30b36acdc4a --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-power.h @@ -0,0 +1,55 @@ +/****************************************************************************** + * + * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ +#ifndef __iwl_legacy_power_setting_h__ +#define __iwl_legacy_power_setting_h__ + +#include "iwl-commands.h" + +enum iwl_power_level { + IWL_POWER_INDEX_1, + IWL_POWER_INDEX_2, + IWL_POWER_INDEX_3, + IWL_POWER_INDEX_4, + IWL_POWER_INDEX_5, + IWL_POWER_NUM +}; + +struct iwl_power_mgr { + struct iwl_powertable_cmd sleep_cmd; + struct iwl_powertable_cmd sleep_cmd_next; + int debug_sleep_level_override; + bool pci_pm; +}; + +int +iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd, + bool force); +int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force); +void iwl_legacy_power_initialize(struct iwl_priv *priv); + +#endif /* __iwl_legacy_power_setting_h__ */ diff --git a/trunk/drivers/net/wireless/iwlegacy/prph.h b/trunk/drivers/net/wireless/iwlegacy/iwl-prph.h similarity index 83% rename from trunk/drivers/net/wireless/iwlegacy/prph.h rename to trunk/drivers/net/wireless/iwlegacy/iwl-prph.h index ffec4b4a248a..30a493003ab0 100644 --- a/trunk/drivers/net/wireless/iwlegacy/prph.h +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-prph.h @@ -60,8 +60,8 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ -#ifndef __il_prph_h__ -#define __il_prph_h__ +#ifndef __iwl_legacy_prph_h__ +#define __iwl_legacy_prph_h__ /* * Registers in this file are internal, not PCI bus memory mapped. @@ -91,9 +91,9 @@ #define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000) #define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000) #define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN (0x00000000) -#define APMG_PS_CTRL_VAL_PWR_SRC_MAX (0x01000000) /* 3945 only */ +#define APMG_PS_CTRL_VAL_PWR_SRC_MAX (0x01000000) /* 3945 only */ #define APMG_PS_CTRL_VAL_PWR_SRC_VAUX (0x02000000) -#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */ +#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */ #define APMG_SVR_DIGITAL_VOLTAGE_1_32 (0x00000060) #define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800) @@ -120,13 +120,13 @@ * * 1) Initialization -- performs hardware calibration and sets up some * internal data, then notifies host via "initialize alive" notification - * (struct il_init_alive_resp) that it has completed all of its work. + * (struct iwl_init_alive_resp) that it has completed all of its work. * After signal from host, it then loads and starts the runtime program. * The initialization program must be used when initially setting up the * NIC after loading the driver. * * 2) Runtime/Protocol -- performs all normal runtime operations. This - * notifies host via "alive" notification (struct il_alive_resp) that it + * notifies host via "alive" notification (struct iwl_alive_resp) that it * is ready to be used. * * When initializing the NIC, the host driver does the following procedure: @@ -189,7 +189,7 @@ * procedure. * * This save/restore method is mostly for autonomous power management during - * normal operation (result of C_POWER_TBL). Platform suspend/resume and + * normal operation (result of POWER_TABLE_CMD). Platform suspend/resume and * RFKILL should use complete restarts (with total re-initialization) of uCode, * allowing total shutdown (including BSM memory). * @@ -202,19 +202,19 @@ */ /* BSM bit fields */ -#define BSM_WR_CTRL_REG_BIT_START (0x80000000) /* start boot load now */ -#define BSM_WR_CTRL_REG_BIT_START_EN (0x40000000) /* enable boot after pwrup */ -#define BSM_DRAM_INST_LOAD (0x80000000) /* start program load now */ +#define BSM_WR_CTRL_REG_BIT_START (0x80000000) /* start boot load now */ +#define BSM_WR_CTRL_REG_BIT_START_EN (0x40000000) /* enable boot after pwrup*/ +#define BSM_DRAM_INST_LOAD (0x80000000) /* start program load now */ /* BSM addresses */ #define BSM_BASE (PRPH_BASE + 0x3400) #define BSM_END (PRPH_BASE + 0x3800) -#define BSM_WR_CTRL_REG (BSM_BASE + 0x000) /* ctl and status */ -#define BSM_WR_MEM_SRC_REG (BSM_BASE + 0x004) /* source in BSM mem */ -#define BSM_WR_MEM_DST_REG (BSM_BASE + 0x008) /* dest in SRAM mem */ -#define BSM_WR_DWCOUNT_REG (BSM_BASE + 0x00C) /* bytes */ -#define BSM_WR_STATUS_REG (BSM_BASE + 0x010) /* bit 0: 1 == done */ +#define BSM_WR_CTRL_REG (BSM_BASE + 0x000) /* ctl and status */ +#define BSM_WR_MEM_SRC_REG (BSM_BASE + 0x004) /* source in BSM mem */ +#define BSM_WR_MEM_DST_REG (BSM_BASE + 0x008) /* dest in SRAM mem */ +#define BSM_WR_DWCOUNT_REG (BSM_BASE + 0x00C) /* bytes */ +#define BSM_WR_STATUS_REG (BSM_BASE + 0x010) /* bit 0: 1 == done */ /* * Pointers and size regs for bootstrap load and data SRAM save/restore. @@ -231,7 +231,8 @@ * Read/write, address range from LOWER_BOUND to (LOWER_BOUND + SIZE -1) */ #define BSM_SRAM_LOWER_BOUND (PRPH_BASE + 0x3800) -#define BSM_SRAM_SIZE (1024) /* bytes */ +#define BSM_SRAM_SIZE (1024) /* bytes */ + /* 3945 Tx scheduler registers */ #define ALM_SCD_BASE (PRPH_BASE + 0x2E00) @@ -254,7 +255,7 @@ * but one DMA channel may take input from several queues. * * Tx DMA FIFOs have dedicated purposes. For 4965, they are used as follows - * (cf. default_queue_to_tx_fifo in 4965.c): + * (cf. default_queue_to_tx_fifo in iwl-4965.c): * * 0 -- EDCA BK (background) frames, lowest priority * 1 -- EDCA BE (best effort) frames, normal priority @@ -273,20 +274,20 @@ * The driver sets up each queue to work in one of two modes: * * 1) Scheduler-Ack, in which the scheduler automatically supports a - * block-ack (BA) win of up to 64 TFDs. In this mode, each queue + * block-ack (BA) window of up to 64 TFDs. In this mode, each queue * contains TFDs for a unique combination of Recipient Address (RA) * and Traffic Identifier (TID), that is, traffic of a given * Quality-Of-Service (QOS) priority, destined for a single station. * * In scheduler-ack mode, the scheduler keeps track of the Tx status of - * each frame within the BA win, including whether it's been transmitted, + * each frame within the BA window, including whether it's been transmitted, * and whether it's been acknowledged by the receiving station. The device * automatically processes block-acks received from the receiving STA, * and reschedules un-acked frames to be retransmitted (successful * Tx completion may end up being out-of-order). * * The driver must maintain the queue's Byte Count table in host DRAM - * (struct il4965_sched_queue_byte_cnt_tbl) for this mode. + * (struct iwl4965_sched_queue_byte_cnt_tbl) for this mode. * This mode does not support fragmentation. * * 2) FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order. @@ -315,34 +316,34 @@ */ /** - * Max Tx win size is the max number of contiguous TFDs that the scheduler + * Max Tx window size is the max number of contiguous TFDs that the scheduler * can keep track of at one time when creating block-ack chains of frames. * Note that "64" matches the number of ack bits in a block-ack packet. * Driver should use SCD_WIN_SIZE and SCD_FRAME_LIMIT values to initialize - * IL49_SCD_CONTEXT_QUEUE_OFFSET(x) values. + * IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) values. */ #define SCD_WIN_SIZE 64 #define SCD_FRAME_LIMIT 64 /* SCD registers are internal, must be accessed via HBUS_TARG_PRPH regs */ -#define IL49_SCD_START_OFFSET 0xa02c00 +#define IWL49_SCD_START_OFFSET 0xa02c00 /* * 4965 tells driver SRAM address for internal scheduler structs via this reg. * Value is valid only after "Alive" response from uCode. */ -#define IL49_SCD_SRAM_BASE_ADDR (IL49_SCD_START_OFFSET + 0x0) +#define IWL49_SCD_SRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x0) /* * Driver may need to update queue-empty bits after changing queue's - * write and read pointers (idxes) during (re-)initialization (i.e. when + * write and read pointers (indexes) during (re-)initialization (i.e. when * scheduler is not tracking what's happening). * Bit fields: * 31-16: Write mask -- 1: update empty bit, 0: don't change empty bit * 15-00: Empty state, one for each queue -- 1: empty, 0: non-empty * NOTE: This register is not used by Linux driver. */ -#define IL49_SCD_EMPTY_BITS (IL49_SCD_START_OFFSET + 0x4) +#define IWL49_SCD_EMPTY_BITS (IWL49_SCD_START_OFFSET + 0x4) /* * Physical base address of array of byte count (BC) circular buffers (CBs). @@ -350,11 +351,11 @@ * This register points to BC CB for queue 0, must be on 1024-byte boundary. * Others are spaced by 1024 bytes. * Each BC CB is 2 bytes * (256 + 64) = 740 bytes, followed by 384 bytes pad. - * (Index into a queue's BC CB) = (idx into queue's TFD CB) = (SSN & 0xff). + * (Index into a queue's BC CB) = (index into queue's TFD CB) = (SSN & 0xff). * Bit fields: * 25-00: Byte Count CB physical address [35:10], must be 1024-byte aligned. */ -#define IL49_SCD_DRAM_BASE_ADDR (IL49_SCD_START_OFFSET + 0x10) +#define IWL49_SCD_DRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x10) /* * Enables any/all Tx DMA/FIFO channels. @@ -363,23 +364,23 @@ * Bit fields: * 7- 0: Enable (1), disable (0), one bit for each channel 0-7 */ -#define IL49_SCD_TXFACT (IL49_SCD_START_OFFSET + 0x1c) +#define IWL49_SCD_TXFACT (IWL49_SCD_START_OFFSET + 0x1c) /* - * Queue (x) Write Pointers (idxes, really!), one for each Tx queue. + * Queue (x) Write Pointers (indexes, really!), one for each Tx queue. * Initialized and updated by driver as new TFDs are added to queue. - * NOTE: If using Block Ack, idx must correspond to frame's - * Start Sequence Number; idx = (SSN & 0xff) + * NOTE: If using Block Ack, index must correspond to frame's + * Start Sequence Number; index = (SSN & 0xff) * NOTE: Alternative to HBUS_TARG_WRPTR, which is what Linux driver uses? */ -#define IL49_SCD_QUEUE_WRPTR(x) (IL49_SCD_START_OFFSET + 0x24 + (x) * 4) +#define IWL49_SCD_QUEUE_WRPTR(x) (IWL49_SCD_START_OFFSET + 0x24 + (x) * 4) /* - * Queue (x) Read Pointers (idxes, really!), one for each Tx queue. - * For FIFO mode, idx indicates next frame to transmit. - * For Scheduler-ACK mode, idx indicates first frame in Tx win. + * Queue (x) Read Pointers (indexes, really!), one for each Tx queue. + * For FIFO mode, index indicates next frame to transmit. + * For Scheduler-ACK mode, index indicates first frame in Tx window. * Initialized by driver, updated by scheduler. */ -#define IL49_SCD_QUEUE_RDPTR(x) (IL49_SCD_START_OFFSET + 0x64 + (x) * 4) +#define IWL49_SCD_QUEUE_RDPTR(x) (IWL49_SCD_START_OFFSET + 0x64 + (x) * 4) /* * Select which queues work in chain mode (1) vs. not (0). @@ -390,18 +391,18 @@ * NOTE: If driver sets up queue for chain mode, it should be also set up * Scheduler-ACK mode as well, via SCD_QUEUE_STATUS_BITS(x). */ -#define IL49_SCD_QUEUECHAIN_SEL (IL49_SCD_START_OFFSET + 0xd0) +#define IWL49_SCD_QUEUECHAIN_SEL (IWL49_SCD_START_OFFSET + 0xd0) /* * Select which queues interrupt driver when scheduler increments - * a queue's read pointer (idx). + * a queue's read pointer (index). * Bit fields: * 31-16: Reserved * 15-00: Interrupt enable, one bit for each queue -- 1: enabled, 0: disabled * NOTE: This functionality is apparently a no-op; driver relies on interrupts * from Rx queue to read Tx command responses and update Tx queues. */ -#define IL49_SCD_INTERRUPT_MASK (IL49_SCD_START_OFFSET + 0xe4) +#define IWL49_SCD_INTERRUPT_MASK (IWL49_SCD_START_OFFSET + 0xe4) /* * Queue search status registers. One for each queue. @@ -413,7 +414,7 @@ * Driver should init to "1" for aggregation mode, or "0" otherwise. * 7-6: Driver should init to "0" * 5: Window Size Left; indicates whether scheduler can request - * another TFD, based on win size, etc. Driver should init + * another TFD, based on window size, etc. Driver should init * this bit to "1" for aggregation mode, or "0" for non-agg. * 4-1: Tx FIFO to use (range 0-7). * 0: Queue is active (1), not active (0). @@ -422,18 +423,18 @@ * NOTE: If enabling Scheduler-ACK mode, chain mode should also be enabled * via SCD_QUEUECHAIN_SEL. */ -#define IL49_SCD_QUEUE_STATUS_BITS(x)\ - (IL49_SCD_START_OFFSET + 0x104 + (x) * 4) +#define IWL49_SCD_QUEUE_STATUS_BITS(x)\ + (IWL49_SCD_START_OFFSET + 0x104 + (x) * 4) /* Bit field positions */ -#define IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE (0) -#define IL49_SCD_QUEUE_STTS_REG_POS_TXF (1) -#define IL49_SCD_QUEUE_STTS_REG_POS_WSL (5) -#define IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK (8) +#define IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE (0) +#define IWL49_SCD_QUEUE_STTS_REG_POS_TXF (1) +#define IWL49_SCD_QUEUE_STTS_REG_POS_WSL (5) +#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK (8) /* Write masks */ -#define IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (10) -#define IL49_SCD_QUEUE_STTS_REG_MSK (0x0007FC00) +#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (10) +#define IWL49_SCD_QUEUE_STTS_REG_MSK (0x0007FC00) /** * 4965 internal SRAM structures for scheduler, shared with driver ... @@ -459,7 +460,7 @@ * each queue's entry as follows: * * LS Dword bit fields: - * 0-06: Max Tx win size for Scheduler-ACK. Driver should init to 64. + * 0-06: Max Tx window size for Scheduler-ACK. Driver should init to 64. * * MS Dword bit fields: * 16-22: Frame limit. Driver should init to 10 (0xa). @@ -469,14 +470,14 @@ * Init must be done after driver receives "Alive" response from 4965 uCode, * and when setting up queue for aggregation. */ -#define IL49_SCD_CONTEXT_DATA_OFFSET 0x380 -#define IL49_SCD_CONTEXT_QUEUE_OFFSET(x) \ - (IL49_SCD_CONTEXT_DATA_OFFSET + ((x) * 8)) +#define IWL49_SCD_CONTEXT_DATA_OFFSET 0x380 +#define IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) \ + (IWL49_SCD_CONTEXT_DATA_OFFSET + ((x) * 8)) -#define IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS (0) -#define IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK (0x0000007F) -#define IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16) -#define IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000) +#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS (0) +#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK (0x0000007F) +#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16) +#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000) /* * Tx Status Bitmap @@ -485,7 +486,7 @@ * "Alive" notification from uCode. Area is used only by device itself; * no other support (besides clearing) is required from driver. */ -#define IL49_SCD_TX_STTS_BITMAP_OFFSET 0x400 +#define IWL49_SCD_TX_STTS_BITMAP_OFFSET 0x400 /* * RAxTID to queue translation mapping. @@ -493,7 +494,7 @@ * When queue is in Scheduler-ACK mode, frames placed in a that queue must be * for only one combination of receiver address (RA) and traffic ID (TID), i.e. * one QOS priority level destined for one station (for this wireless link, - * not final destination). The SCD_TRANSLATE_TBL area provides 16 16-bit + * not final destination). The SCD_TRANSLATE_TABLE area provides 16 16-bit * mappings, one for each of the 16 queues. If queue is not in Scheduler-ACK * mode, the device ignores the mapping value. * @@ -507,16 +508,16 @@ * must read a dword-aligned value from device SRAM, replace the 16-bit map * value of interest, and write the dword value back into device SRAM. */ -#define IL49_SCD_TRANSLATE_TBL_OFFSET 0x500 +#define IWL49_SCD_TRANSLATE_TBL_OFFSET 0x500 /* Find translation table dword to read/write for given queue */ -#define IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \ - ((IL49_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc) +#define IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \ + ((IWL49_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc) -#define IL_SCD_TXFIFO_POS_TID (0) -#define IL_SCD_TXFIFO_POS_RA (4) -#define IL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF) +#define IWL_SCD_TXFIFO_POS_TID (0) +#define IWL_SCD_TXFIFO_POS_RA (4) +#define IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF) /*********************** END TX SCHEDULER *************************************/ -#endif /* __il_prph_h__ */ +#endif /* __iwl_legacy_prph_h__ */ diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-rx.c b/trunk/drivers/net/wireless/iwlegacy/iwl-rx.c new file mode 100644 index 000000000000..f4d21ec22497 --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-rx.c @@ -0,0 +1,282 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include +#include +#include +#include +#include +#include "iwl-eeprom.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-sta.h" +#include "iwl-io.h" +#include "iwl-helpers.h" +/************************** RX-FUNCTIONS ****************************/ +/* + * Rx theory of operation + * + * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), + * each of which point to Receive Buffers to be filled by the NIC. These get + * used not only for Rx frames, but for any command response or notification + * from the NIC. The driver and NIC manage the Rx buffers by means + * of indexes into the circular buffer. + * + * Rx Queue Indexes + * The host/firmware share two index registers for managing the Rx buffers. + * + * The READ index maps to the first position that the firmware may be writing + * to -- the driver can read up to (but not including) this position and get + * good data. + * The READ index is managed by the firmware once the card is enabled. + * + * The WRITE index maps to the last position the driver has read from -- the + * position preceding WRITE is the last slot the firmware can place a packet. + * + * The queue is empty (no good data) if WRITE = READ - 1, and is full if + * WRITE = READ. + * + * During initialization, the host sets up the READ queue position to the first + * INDEX position, and WRITE to the last (READ - 1 wrapped) + * + * When the firmware places a packet in a buffer, it will advance the READ index + * and fire the RX interrupt. The driver can then query the READ index and + * process as many packets as possible, moving the WRITE index forward as it + * resets the Rx queue buffers with new memory. + * + * The management in the driver is as follows: + * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When + * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled + * to replenish the iwl->rxq->rx_free. + * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the + * iwl->rxq is replenished and the READ INDEX is updated (updating the + * 'processed' and 'read' driver indexes as well) + * + A received packet is processed and handed to the kernel network stack, + * detached from the iwl->rxq. The driver 'processed' index is updated. + * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free + * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ + * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there + * were enough free buffers and RX_STALLED is set it is cleared. + * + * + * Driver sequence: + * + * iwl_legacy_rx_queue_alloc() Allocates rx_free + * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls + * iwl_rx_queue_restock + * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx + * queue, updates firmware pointers, and updates + * the WRITE index. If insufficient rx_free buffers + * are available, schedules iwl_rx_replenish + * + * -- enable interrupts -- + * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the + * READ INDEX, detaching the SKB from the pool. + * Moves the packet buffer from queue to rx_used. + * Calls iwl_rx_queue_restock to refill any empty + * slots. + * ... + * + */ + +/** + * iwl_legacy_rx_queue_space - Return number of free slots available in queue. + */ +int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q) +{ + int s = q->read - q->write; + if (s <= 0) + s += RX_QUEUE_SIZE; + /* keep some buffer to not confuse full and empty queue */ + s -= 2; + if (s < 0) + s = 0; + return s; +} +EXPORT_SYMBOL(iwl_legacy_rx_queue_space); + +/** + * iwl_legacy_rx_queue_update_write_ptr - Update the write pointer for the RX queue + */ +void +iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv, + struct iwl_rx_queue *q) +{ + unsigned long flags; + u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg; + u32 reg; + + spin_lock_irqsave(&q->lock, flags); + + if (q->need_update == 0) + goto exit_unlock; + + /* If power-saving is in use, make sure device is awake */ + if (test_bit(STATUS_POWER_PMI, &priv->status)) { + reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); + + if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { + IWL_DEBUG_INFO(priv, + "Rx queue requesting wakeup," + " GP1 = 0x%x\n", reg); + iwl_legacy_set_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + goto exit_unlock; + } + + q->write_actual = (q->write & ~0x7); + iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg, + q->write_actual); + + /* Else device is assumed to be awake */ + } else { + /* Device expects a multiple of 8 */ + q->write_actual = (q->write & ~0x7); + iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg, + q->write_actual); + } + + q->need_update = 0; + + exit_unlock: + spin_unlock_irqrestore(&q->lock, flags); +} +EXPORT_SYMBOL(iwl_legacy_rx_queue_update_write_ptr); + +int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv) +{ + struct iwl_rx_queue *rxq = &priv->rxq; + struct device *dev = &priv->pci_dev->dev; + int i; + + spin_lock_init(&rxq->lock); + INIT_LIST_HEAD(&rxq->rx_free); + INIT_LIST_HEAD(&rxq->rx_used); + + /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */ + rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma, + GFP_KERNEL); + if (!rxq->bd) + goto err_bd; + + rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status), + &rxq->rb_stts_dma, GFP_KERNEL); + if (!rxq->rb_stts) + goto err_rb; + + /* Fill the rx_used queue with _all_ of the Rx buffers */ + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) + list_add_tail(&rxq->pool[i].list, &rxq->rx_used); + + /* Set us so that we have processed and used all buffers, but have + * not restocked the Rx queue with fresh buffers */ + rxq->read = rxq->write = 0; + rxq->write_actual = 0; + rxq->free_count = 0; + rxq->need_update = 0; + return 0; + +err_rb: + dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, + rxq->bd_dma); +err_bd: + return -ENOMEM; +} +EXPORT_SYMBOL(iwl_legacy_rx_queue_alloc); + + +void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif); + + if (!report->state) { + IWL_DEBUG_11H(priv, + "Spectrum Measure Notification: Start\n"); + return; + } + + memcpy(&priv->measure_report, report, sizeof(*report)); + priv->measurement_status |= MEASUREMENT_READY; +} +EXPORT_SYMBOL(iwl_legacy_rx_spectrum_measure_notif); + +/* + * returns non-zero if packet should be dropped + */ +int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv, + struct ieee80211_hdr *hdr, + u32 decrypt_res, + struct ieee80211_rx_status *stats) +{ + u16 fc = le16_to_cpu(hdr->frame_control); + + /* + * All contexts have the same setting here due to it being + * a module parameter, so OK to check any context. + */ + if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags & + RXON_FILTER_DIS_DECRYPT_MSK) + return 0; + + if (!(fc & IEEE80211_FCTL_PROTECTED)) + return 0; + + IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res); + switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) { + case RX_RES_STATUS_SEC_TYPE_TKIP: + /* The uCode has got a bad phase 1 Key, pushes the packet. + * Decryption will be done in SW. */ + if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == + RX_RES_STATUS_BAD_KEY_TTAK) + break; + + case RX_RES_STATUS_SEC_TYPE_WEP: + if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == + RX_RES_STATUS_BAD_ICV_MIC) { + /* bad ICV, the packet is destroyed since the + * decryption is inplace, drop it */ + IWL_DEBUG_RX(priv, "Packet destroyed\n"); + return -1; + } + case RX_RES_STATUS_SEC_TYPE_CCMP: + if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == + RX_RES_STATUS_DECRYPT_OK) { + IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n"); + stats->flag |= RX_FLAG_DECRYPTED; + } + break; + + default: + break; + } + return 0; +} +EXPORT_SYMBOL(iwl_legacy_set_decrypted_flag); diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-scan.c b/trunk/drivers/net/wireless/iwlegacy/iwl-scan.c new file mode 100644 index 000000000000..521b73b527d3 --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-scan.c @@ -0,0 +1,550 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ +#include +#include +#include +#include +#include + +#include "iwl-eeprom.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-sta.h" +#include "iwl-io.h" +#include "iwl-helpers.h" + +/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after + * sending probe req. This should be set long enough to hear probe responses + * from more than one AP. */ +#define IWL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */ +#define IWL_ACTIVE_DWELL_TIME_52 (20) + +#define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3) +#define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2) + +/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel. + * Must be set longer than active dwell time. + * For the most reliable scan, set > AP beacon interval (typically 100msec). */ +#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */ +#define IWL_PASSIVE_DWELL_TIME_52 (10) +#define IWL_PASSIVE_DWELL_BASE (100) +#define IWL_CHANNEL_TUNE_TIME 5 + +static int iwl_legacy_send_scan_abort(struct iwl_priv *priv) +{ + int ret; + struct iwl_rx_packet *pkt; + struct iwl_host_cmd cmd = { + .id = REPLY_SCAN_ABORT_CMD, + .flags = CMD_WANT_SKB, + }; + + /* Exit instantly with error when device is not ready + * to receive scan abort command or it does not perform + * hardware scan currently */ + if (!test_bit(STATUS_READY, &priv->status) || + !test_bit(STATUS_GEO_CONFIGURED, &priv->status) || + !test_bit(STATUS_SCAN_HW, &priv->status) || + test_bit(STATUS_FW_ERROR, &priv->status) || + test_bit(STATUS_EXIT_PENDING, &priv->status)) + return -EIO; + + ret = iwl_legacy_send_cmd_sync(priv, &cmd); + if (ret) + return ret; + + pkt = (struct iwl_rx_packet *)cmd.reply_page; + if (pkt->u.status != CAN_ABORT_STATUS) { + /* The scan abort will return 1 for success or + * 2 for "failure". A failure condition can be + * due to simply not being in an active scan which + * can occur if we send the scan abort before we + * the microcode has notified us that a scan is + * completed. */ + IWL_DEBUG_SCAN(priv, "SCAN_ABORT ret %d.\n", pkt->u.status); + ret = -EIO; + } + + iwl_legacy_free_pages(priv, cmd.reply_page); + return ret; +} + +static void iwl_legacy_complete_scan(struct iwl_priv *priv, bool aborted) +{ + /* check if scan was requested from mac80211 */ + if (priv->scan_request) { + IWL_DEBUG_SCAN(priv, "Complete scan in mac80211\n"); + ieee80211_scan_completed(priv->hw, aborted); + } + + priv->scan_vif = NULL; + priv->scan_request = NULL; +} + +void iwl_legacy_force_scan_end(struct iwl_priv *priv) +{ + lockdep_assert_held(&priv->mutex); + + if (!test_bit(STATUS_SCANNING, &priv->status)) { + IWL_DEBUG_SCAN(priv, "Forcing scan end while not scanning\n"); + return; + } + + IWL_DEBUG_SCAN(priv, "Forcing scan end\n"); + clear_bit(STATUS_SCANNING, &priv->status); + clear_bit(STATUS_SCAN_HW, &priv->status); + clear_bit(STATUS_SCAN_ABORTING, &priv->status); + iwl_legacy_complete_scan(priv, true); +} + +static void iwl_legacy_do_scan_abort(struct iwl_priv *priv) +{ + int ret; + + lockdep_assert_held(&priv->mutex); + + if (!test_bit(STATUS_SCANNING, &priv->status)) { + IWL_DEBUG_SCAN(priv, "Not performing scan to abort\n"); + return; + } + + if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) { + IWL_DEBUG_SCAN(priv, "Scan abort in progress\n"); + return; + } + + ret = iwl_legacy_send_scan_abort(priv); + if (ret) { + IWL_DEBUG_SCAN(priv, "Send scan abort failed %d\n", ret); + iwl_legacy_force_scan_end(priv); + } else + IWL_DEBUG_SCAN(priv, "Successfully send scan abort\n"); +} + +/** + * iwl_scan_cancel - Cancel any currently executing HW scan + */ +int iwl_legacy_scan_cancel(struct iwl_priv *priv) +{ + IWL_DEBUG_SCAN(priv, "Queuing abort scan\n"); + queue_work(priv->workqueue, &priv->abort_scan); + return 0; +} +EXPORT_SYMBOL(iwl_legacy_scan_cancel); + +/** + * iwl_legacy_scan_cancel_timeout - Cancel any currently executing HW scan + * @ms: amount of time to wait (in milliseconds) for scan to abort + * + */ +int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(ms); + + lockdep_assert_held(&priv->mutex); + + IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n"); + + iwl_legacy_do_scan_abort(priv); + + while (time_before_eq(jiffies, timeout)) { + if (!test_bit(STATUS_SCAN_HW, &priv->status)) + break; + msleep(20); + } + + return test_bit(STATUS_SCAN_HW, &priv->status); +} +EXPORT_SYMBOL(iwl_legacy_scan_cancel_timeout); + +/* Service response to REPLY_SCAN_CMD (0x80) */ +static void iwl_legacy_rx_reply_scan(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_scanreq_notification *notif = + (struct iwl_scanreq_notification *)pkt->u.raw; + + IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status); +#endif +} + +/* Service SCAN_START_NOTIFICATION (0x82) */ +static void iwl_legacy_rx_scan_start_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_scanstart_notification *notif = + (struct iwl_scanstart_notification *)pkt->u.raw; + priv->scan_start_tsf = le32_to_cpu(notif->tsf_low); + IWL_DEBUG_SCAN(priv, "Scan start: " + "%d [802.11%s] " + "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", + notif->channel, + notif->band ? "bg" : "a", + le32_to_cpu(notif->tsf_high), + le32_to_cpu(notif->tsf_low), + notif->status, notif->beacon_timer); +} + +/* Service SCAN_RESULTS_NOTIFICATION (0x83) */ +static void iwl_legacy_rx_scan_results_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_scanresults_notification *notif = + (struct iwl_scanresults_notification *)pkt->u.raw; + + IWL_DEBUG_SCAN(priv, "Scan ch.res: " + "%d [802.11%s] " + "(TSF: 0x%08X:%08X) - %d " + "elapsed=%lu usec\n", + notif->channel, + notif->band ? "bg" : "a", + le32_to_cpu(notif->tsf_high), + le32_to_cpu(notif->tsf_low), + le32_to_cpu(notif->statistics[0]), + le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf); +#endif +} + +/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */ +static void iwl_legacy_rx_scan_complete_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw; +#endif + + IWL_DEBUG_SCAN(priv, + "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n", + scan_notif->scanned_channels, + scan_notif->tsf_low, + scan_notif->tsf_high, scan_notif->status); + + /* The HW is no longer scanning */ + clear_bit(STATUS_SCAN_HW, &priv->status); + + IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n", + (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2", + jiffies_to_msecs(jiffies - priv->scan_start)); + + queue_work(priv->workqueue, &priv->scan_completed); +} + +void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv) +{ + /* scan handlers */ + priv->rx_handlers[REPLY_SCAN_CMD] = iwl_legacy_rx_reply_scan; + priv->rx_handlers[SCAN_START_NOTIFICATION] = + iwl_legacy_rx_scan_start_notif; + priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] = + iwl_legacy_rx_scan_results_notif; + priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] = + iwl_legacy_rx_scan_complete_notif; +} +EXPORT_SYMBOL(iwl_legacy_setup_rx_scan_handlers); + +inline u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv, + enum ieee80211_band band, + u8 n_probes) +{ + if (band == IEEE80211_BAND_5GHZ) + return IWL_ACTIVE_DWELL_TIME_52 + + IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1); + else + return IWL_ACTIVE_DWELL_TIME_24 + + IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1); +} +EXPORT_SYMBOL(iwl_legacy_get_active_dwell_time); + +u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv, + enum ieee80211_band band, + struct ieee80211_vif *vif) +{ + struct iwl_rxon_context *ctx; + u16 passive = (band == IEEE80211_BAND_2GHZ) ? + IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 : + IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52; + + if (iwl_legacy_is_any_associated(priv)) { + /* + * If we're associated, we clamp the maximum passive + * dwell time to be 98% of the smallest beacon interval + * (minus 2 * channel tune time) + */ + for_each_context(priv, ctx) { + u16 value; + + if (!iwl_legacy_is_associated_ctx(ctx)) + continue; + value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0; + if ((value > IWL_PASSIVE_DWELL_BASE) || !value) + value = IWL_PASSIVE_DWELL_BASE; + value = (value * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2; + passive = min(value, passive); + } + } + + return passive; +} +EXPORT_SYMBOL(iwl_legacy_get_passive_dwell_time); + +void iwl_legacy_init_scan_params(struct iwl_priv *priv) +{ + u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1; + if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ]) + priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx; + if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ]) + priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx; +} +EXPORT_SYMBOL(iwl_legacy_init_scan_params); + +static int iwl_legacy_scan_initiate(struct iwl_priv *priv, + struct ieee80211_vif *vif) +{ + int ret; + + lockdep_assert_held(&priv->mutex); + + if (WARN_ON(!priv->cfg->ops->utils->request_scan)) + return -EOPNOTSUPP; + + cancel_delayed_work(&priv->scan_check); + + if (!iwl_legacy_is_ready_rf(priv)) { + IWL_WARN(priv, "Request scan called when driver not ready.\n"); + return -EIO; + } + + if (test_bit(STATUS_SCAN_HW, &priv->status)) { + IWL_DEBUG_SCAN(priv, + "Multiple concurrent scan requests in parallel.\n"); + return -EBUSY; + } + + if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { + IWL_DEBUG_SCAN(priv, "Scan request while abort pending.\n"); + return -EBUSY; + } + + IWL_DEBUG_SCAN(priv, "Starting scan...\n"); + + set_bit(STATUS_SCANNING, &priv->status); + priv->scan_start = jiffies; + + ret = priv->cfg->ops->utils->request_scan(priv, vif); + if (ret) { + clear_bit(STATUS_SCANNING, &priv->status); + return ret; + } + + queue_delayed_work(priv->workqueue, &priv->scan_check, + IWL_SCAN_CHECK_WATCHDOG); + + return 0; +} + +int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_scan_request *req) +{ + struct iwl_priv *priv = hw->priv; + int ret; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (req->n_channels == 0) + return -EINVAL; + + mutex_lock(&priv->mutex); + + if (test_bit(STATUS_SCANNING, &priv->status)) { + IWL_DEBUG_SCAN(priv, "Scan already in progress.\n"); + ret = -EAGAIN; + goto out_unlock; + } + + /* mac80211 will only ask for one band at a time */ + priv->scan_request = req; + priv->scan_vif = vif; + priv->scan_band = req->channels[0]->band; + + ret = iwl_legacy_scan_initiate(priv, vif); + + IWL_DEBUG_MAC80211(priv, "leave\n"); + +out_unlock: + mutex_unlock(&priv->mutex); + + return ret; +} +EXPORT_SYMBOL(iwl_legacy_mac_hw_scan); + +static void iwl_legacy_bg_scan_check(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, scan_check.work); + + IWL_DEBUG_SCAN(priv, "Scan check work\n"); + + /* Since we are here firmware does not finish scan and + * most likely is in bad shape, so we don't bother to + * send abort command, just force scan complete to mac80211 */ + mutex_lock(&priv->mutex); + iwl_legacy_force_scan_end(priv); + mutex_unlock(&priv->mutex); +} + +/** + * iwl_legacy_fill_probe_req - fill in all required fields and IE for probe request + */ + +u16 +iwl_legacy_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame, + const u8 *ta, const u8 *ies, int ie_len, int left) +{ + int len = 0; + u8 *pos = NULL; + + /* Make sure there is enough space for the probe request, + * two mandatory IEs and the data */ + left -= 24; + if (left < 0) + return 0; + + frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); + memcpy(frame->da, iwlegacy_bcast_addr, ETH_ALEN); + memcpy(frame->sa, ta, ETH_ALEN); + memcpy(frame->bssid, iwlegacy_bcast_addr, ETH_ALEN); + frame->seq_ctrl = 0; + + len += 24; + + /* ...next IE... */ + pos = &frame->u.probe_req.variable[0]; + + /* fill in our indirect SSID IE */ + left -= 2; + if (left < 0) + return 0; + *pos++ = WLAN_EID_SSID; + *pos++ = 0; + + len += 2; + + if (WARN_ON(left < ie_len)) + return len; + + if (ies && ie_len) { + memcpy(pos, ies, ie_len); + len += ie_len; + } + + return (u16)len; +} +EXPORT_SYMBOL(iwl_legacy_fill_probe_req); + +static void iwl_legacy_bg_abort_scan(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan); + + IWL_DEBUG_SCAN(priv, "Abort scan work\n"); + + /* We keep scan_check work queued in case when firmware will not + * report back scan completed notification */ + mutex_lock(&priv->mutex); + iwl_legacy_scan_cancel_timeout(priv, 200); + mutex_unlock(&priv->mutex); +} + +static void iwl_legacy_bg_scan_completed(struct work_struct *work) +{ + struct iwl_priv *priv = + container_of(work, struct iwl_priv, scan_completed); + bool aborted; + + IWL_DEBUG_SCAN(priv, "Completed scan.\n"); + + cancel_delayed_work(&priv->scan_check); + + mutex_lock(&priv->mutex); + + aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status); + if (aborted) + IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n"); + + if (!test_and_clear_bit(STATUS_SCANNING, &priv->status)) { + IWL_DEBUG_SCAN(priv, "Scan already completed.\n"); + goto out_settings; + } + + iwl_legacy_complete_scan(priv, aborted); + +out_settings: + /* Can we still talk to firmware ? */ + if (!iwl_legacy_is_ready_rf(priv)) + goto out; + + /* + * We do not commit power settings while scan is pending, + * do it now if the settings changed. + */ + iwl_legacy_power_set_mode(priv, &priv->power_data.sleep_cmd_next, false); + iwl_legacy_set_tx_power(priv, priv->tx_power_next, false); + + priv->cfg->ops->utils->post_scan(priv); + +out: + mutex_unlock(&priv->mutex); +} + +void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv) +{ + INIT_WORK(&priv->scan_completed, iwl_legacy_bg_scan_completed); + INIT_WORK(&priv->abort_scan, iwl_legacy_bg_abort_scan); + INIT_DELAYED_WORK(&priv->scan_check, iwl_legacy_bg_scan_check); +} +EXPORT_SYMBOL(iwl_legacy_setup_scan_deferred_work); + +void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv) +{ + cancel_work_sync(&priv->abort_scan); + cancel_work_sync(&priv->scan_completed); + + if (cancel_delayed_work_sync(&priv->scan_check)) { + mutex_lock(&priv->mutex); + iwl_legacy_force_scan_end(priv); + mutex_unlock(&priv->mutex); + } +} +EXPORT_SYMBOL(iwl_legacy_cancel_scan_deferred_work); diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-spectrum.h b/trunk/drivers/net/wireless/iwlegacy/iwl-spectrum.h index 85fe48e520f9..9f70a4723103 100644 --- a/trunk/drivers/net/wireless/iwlegacy/iwl-spectrum.h +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-spectrum.h @@ -26,8 +26,8 @@ * *****************************************************************************/ -#ifndef __il_spectrum_h__ -#define __il_spectrum_h__ +#ifndef __iwl_legacy_spectrum_h__ +#define __iwl_legacy_spectrum_h__ enum { /* ieee80211_basic_report.map */ IEEE80211_BASIC_MAP_BSS = (1 << 0), IEEE80211_BASIC_MAP_OFDM = (1 << 1), diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-sta.c b/trunk/drivers/net/wireless/iwlegacy/iwl-sta.c new file mode 100644 index 000000000000..f10df3e2813a --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-sta.c @@ -0,0 +1,817 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include +#include +#include +#include +#include + +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-sta.h" + +/* priv->sta_lock must be held */ +static void iwl_legacy_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id) +{ + + if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) + IWL_ERR(priv, + "ACTIVATE a non DRIVER active station id %u addr %pM\n", + sta_id, priv->stations[sta_id].sta.sta.addr); + + if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) { + IWL_DEBUG_ASSOC(priv, + "STA id %u addr %pM already present" + " in uCode (according to driver)\n", + sta_id, priv->stations[sta_id].sta.sta.addr); + } else { + priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE; + IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n", + sta_id, priv->stations[sta_id].sta.sta.addr); + } +} + +static int iwl_legacy_process_add_sta_resp(struct iwl_priv *priv, + struct iwl_legacy_addsta_cmd *addsta, + struct iwl_rx_packet *pkt, + bool sync) +{ + u8 sta_id = addsta->sta.sta_id; + unsigned long flags; + int ret = -EIO; + + if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { + IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n", + pkt->hdr.flags); + return ret; + } + + IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n", + sta_id); + + spin_lock_irqsave(&priv->sta_lock, flags); + + switch (pkt->u.add_sta.status) { + case ADD_STA_SUCCESS_MSK: + IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n"); + iwl_legacy_sta_ucode_activate(priv, sta_id); + ret = 0; + break; + case ADD_STA_NO_ROOM_IN_TABLE: + IWL_ERR(priv, "Adding station %d failed, no room in table.\n", + sta_id); + break; + case ADD_STA_NO_BLOCK_ACK_RESOURCE: + IWL_ERR(priv, + "Adding station %d failed, no block ack resource.\n", + sta_id); + break; + case ADD_STA_MODIFY_NON_EXIST_STA: + IWL_ERR(priv, "Attempting to modify non-existing station %d\n", + sta_id); + break; + default: + IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n", + pkt->u.add_sta.status); + break; + } + + IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n", + priv->stations[sta_id].sta.mode == + STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", + sta_id, priv->stations[sta_id].sta.sta.addr); + + /* + * XXX: The MAC address in the command buffer is often changed from + * the original sent to the device. That is, the MAC address + * written to the command buffer often is not the same MAC address + * read from the command buffer when the command returns. This + * issue has not yet been resolved and this debugging is left to + * observe the problem. + */ + IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n", + priv->stations[sta_id].sta.mode == + STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", + addsta->sta.addr); + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return ret; +} + +static void iwl_legacy_add_sta_callback(struct iwl_priv *priv, + struct iwl_device_cmd *cmd, + struct iwl_rx_packet *pkt) +{ + struct iwl_legacy_addsta_cmd *addsta = + (struct iwl_legacy_addsta_cmd *)cmd->cmd.payload; + + iwl_legacy_process_add_sta_resp(priv, addsta, pkt, false); + +} + +int iwl_legacy_send_add_sta(struct iwl_priv *priv, + struct iwl_legacy_addsta_cmd *sta, u8 flags) +{ + struct iwl_rx_packet *pkt = NULL; + int ret = 0; + u8 data[sizeof(*sta)]; + struct iwl_host_cmd cmd = { + .id = REPLY_ADD_STA, + .flags = flags, + .data = data, + }; + u8 sta_id __maybe_unused = sta->sta.sta_id; + + IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n", + sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : ""); + + if (flags & CMD_ASYNC) + cmd.callback = iwl_legacy_add_sta_callback; + else { + cmd.flags |= CMD_WANT_SKB; + might_sleep(); + } + + cmd.len = priv->cfg->ops->utils->build_addsta_hcmd(sta, data); + ret = iwl_legacy_send_cmd(priv, &cmd); + + if (ret || (flags & CMD_ASYNC)) + return ret; + + if (ret == 0) { + pkt = (struct iwl_rx_packet *)cmd.reply_page; + ret = iwl_legacy_process_add_sta_resp(priv, sta, pkt, true); + } + iwl_legacy_free_pages(priv, cmd.reply_page); + + return ret; +} +EXPORT_SYMBOL(iwl_legacy_send_add_sta); + +static void iwl_legacy_set_ht_add_station(struct iwl_priv *priv, u8 index, + struct ieee80211_sta *sta, + struct iwl_rxon_context *ctx) +{ + struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap; + __le32 sta_flags; + u8 mimo_ps_mode; + + if (!sta || !sta_ht_inf->ht_supported) + goto done; + + mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2; + IWL_DEBUG_ASSOC(priv, "spatial multiplexing power save mode: %s\n", + (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ? + "static" : + (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ? + "dynamic" : "disabled"); + + sta_flags = priv->stations[index].sta.station_flags; + + sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK); + + switch (mimo_ps_mode) { + case WLAN_HT_CAP_SM_PS_STATIC: + sta_flags |= STA_FLG_MIMO_DIS_MSK; + break; + case WLAN_HT_CAP_SM_PS_DYNAMIC: + sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK; + break; + case WLAN_HT_CAP_SM_PS_DISABLED: + break; + default: + IWL_WARN(priv, "Invalid MIMO PS mode %d\n", mimo_ps_mode); + break; + } + + sta_flags |= cpu_to_le32( + (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS); + + sta_flags |= cpu_to_le32( + (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS); + + if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap)) + sta_flags |= STA_FLG_HT40_EN_MSK; + else + sta_flags &= ~STA_FLG_HT40_EN_MSK; + + priv->stations[index].sta.station_flags = sta_flags; + done: + return; +} + +/** + * iwl_legacy_prep_station - Prepare station information for addition + * + * should be called with sta_lock held + */ +u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx, + const u8 *addr, bool is_ap, struct ieee80211_sta *sta) +{ + struct iwl_station_entry *station; + int i; + u8 sta_id = IWL_INVALID_STATION; + u16 rate; + + if (is_ap) + sta_id = ctx->ap_sta_id; + else if (is_broadcast_ether_addr(addr)) + sta_id = ctx->bcast_sta_id; + else + for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) { + if (!compare_ether_addr(priv->stations[i].sta.sta.addr, + addr)) { + sta_id = i; + break; + } + + if (!priv->stations[i].used && + sta_id == IWL_INVALID_STATION) + sta_id = i; + } + + /* + * These two conditions have the same outcome, but keep them + * separate + */ + if (unlikely(sta_id == IWL_INVALID_STATION)) + return sta_id; + + /* + * uCode is not able to deal with multiple requests to add a + * station. Keep track if one is in progress so that we do not send + * another. + */ + if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) { + IWL_DEBUG_INFO(priv, + "STA %d already in process of being added.\n", + sta_id); + return sta_id; + } + + if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) && + (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) && + !compare_ether_addr(priv->stations[sta_id].sta.sta.addr, addr)) { + IWL_DEBUG_ASSOC(priv, + "STA %d (%pM) already added, not adding again.\n", + sta_id, addr); + return sta_id; + } + + station = &priv->stations[sta_id]; + station->used = IWL_STA_DRIVER_ACTIVE; + IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n", + sta_id, addr); + priv->num_stations++; + + /* Set up the REPLY_ADD_STA command to send to device */ + memset(&station->sta, 0, sizeof(struct iwl_legacy_addsta_cmd)); + memcpy(station->sta.sta.addr, addr, ETH_ALEN); + station->sta.mode = 0; + station->sta.sta.sta_id = sta_id; + station->sta.station_flags = ctx->station_flags; + station->ctxid = ctx->ctxid; + + if (sta) { + struct iwl_station_priv_common *sta_priv; + + sta_priv = (void *)sta->drv_priv; + sta_priv->ctx = ctx; + } + + /* + * OK to call unconditionally, since local stations (IBSS BSSID + * STA and broadcast STA) pass in a NULL sta, and mac80211 + * doesn't allow HT IBSS. + */ + iwl_legacy_set_ht_add_station(priv, sta_id, sta, ctx); + + /* 3945 only */ + rate = (priv->band == IEEE80211_BAND_5GHZ) ? + IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP; + /* Turn on both antennas for the station... */ + station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK); + + return sta_id; + +} +EXPORT_SYMBOL_GPL(iwl_legacy_prep_station); + +#define STA_WAIT_TIMEOUT (HZ/2) + +/** + * iwl_legacy_add_station_common - + */ +int +iwl_legacy_add_station_common(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + const u8 *addr, bool is_ap, + struct ieee80211_sta *sta, u8 *sta_id_r) +{ + unsigned long flags_spin; + int ret = 0; + u8 sta_id; + struct iwl_legacy_addsta_cmd sta_cmd; + + *sta_id_r = 0; + spin_lock_irqsave(&priv->sta_lock, flags_spin); + sta_id = iwl_legacy_prep_station(priv, ctx, addr, is_ap, sta); + if (sta_id == IWL_INVALID_STATION) { + IWL_ERR(priv, "Unable to prepare station %pM for addition\n", + addr); + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + return -EINVAL; + } + + /* + * uCode is not able to deal with multiple requests to add a + * station. Keep track if one is in progress so that we do not send + * another. + */ + if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) { + IWL_DEBUG_INFO(priv, + "STA %d already in process of being added.\n", + sta_id); + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + return -EEXIST; + } + + if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) && + (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) { + IWL_DEBUG_ASSOC(priv, + "STA %d (%pM) already added, not adding again.\n", + sta_id, addr); + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + return -EEXIST; + } + + priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS; + memcpy(&sta_cmd, &priv->stations[sta_id].sta, + sizeof(struct iwl_legacy_addsta_cmd)); + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + + /* Add station to device's station table */ + ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); + if (ret) { + spin_lock_irqsave(&priv->sta_lock, flags_spin); + IWL_ERR(priv, "Adding station %pM failed.\n", + priv->stations[sta_id].sta.sta.addr); + priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE; + priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS; + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + } + *sta_id_r = sta_id; + return ret; +} +EXPORT_SYMBOL(iwl_legacy_add_station_common); + +/** + * iwl_legacy_sta_ucode_deactivate - deactivate ucode status for a station + * + * priv->sta_lock must be held + */ +static void iwl_legacy_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id) +{ + /* Ucode must be active and driver must be non active */ + if ((priv->stations[sta_id].used & + (IWL_STA_UCODE_ACTIVE | IWL_STA_DRIVER_ACTIVE)) != + IWL_STA_UCODE_ACTIVE) + IWL_ERR(priv, "removed non active STA %u\n", sta_id); + + priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE; + + memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry)); + IWL_DEBUG_ASSOC(priv, "Removed STA %u\n", sta_id); +} + +static int iwl_legacy_send_remove_station(struct iwl_priv *priv, + const u8 *addr, int sta_id, + bool temporary) +{ + struct iwl_rx_packet *pkt; + int ret; + + unsigned long flags_spin; + struct iwl_rem_sta_cmd rm_sta_cmd; + + struct iwl_host_cmd cmd = { + .id = REPLY_REMOVE_STA, + .len = sizeof(struct iwl_rem_sta_cmd), + .flags = CMD_SYNC, + .data = &rm_sta_cmd, + }; + + memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd)); + rm_sta_cmd.num_sta = 1; + memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN); + + cmd.flags |= CMD_WANT_SKB; + + ret = iwl_legacy_send_cmd(priv, &cmd); + + if (ret) + return ret; + + pkt = (struct iwl_rx_packet *)cmd.reply_page; + if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { + IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n", + pkt->hdr.flags); + ret = -EIO; + } + + if (!ret) { + switch (pkt->u.rem_sta.status) { + case REM_STA_SUCCESS_MSK: + if (!temporary) { + spin_lock_irqsave(&priv->sta_lock, flags_spin); + iwl_legacy_sta_ucode_deactivate(priv, sta_id); + spin_unlock_irqrestore(&priv->sta_lock, + flags_spin); + } + IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n"); + break; + default: + ret = -EIO; + IWL_ERR(priv, "REPLY_REMOVE_STA failed\n"); + break; + } + } + iwl_legacy_free_pages(priv, cmd.reply_page); + + return ret; +} + +/** + * iwl_legacy_remove_station - Remove driver's knowledge of station. + */ +int iwl_legacy_remove_station(struct iwl_priv *priv, const u8 sta_id, + const u8 *addr) +{ + unsigned long flags; + + if (!iwl_legacy_is_ready(priv)) { + IWL_DEBUG_INFO(priv, + "Unable to remove station %pM, device not ready.\n", + addr); + /* + * It is typical for stations to be removed when we are + * going down. Return success since device will be down + * soon anyway + */ + return 0; + } + + IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d %pM\n", + sta_id, addr); + + if (WARN_ON(sta_id == IWL_INVALID_STATION)) + return -EINVAL; + + spin_lock_irqsave(&priv->sta_lock, flags); + + if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) { + IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n", + addr); + goto out_err; + } + + if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) { + IWL_DEBUG_INFO(priv, "Removing %pM but non UCODE active\n", + addr); + goto out_err; + } + + if (priv->stations[sta_id].used & IWL_STA_LOCAL) { + kfree(priv->stations[sta_id].lq); + priv->stations[sta_id].lq = NULL; + } + + priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE; + + priv->num_stations--; + + BUG_ON(priv->num_stations < 0); + + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return iwl_legacy_send_remove_station(priv, addr, sta_id, false); +out_err: + spin_unlock_irqrestore(&priv->sta_lock, flags); + return -EINVAL; +} +EXPORT_SYMBOL_GPL(iwl_legacy_remove_station); + +/** + * iwl_legacy_clear_ucode_stations - clear ucode station table bits + * + * This function clears all the bits in the driver indicating + * which stations are active in the ucode. Call when something + * other than explicit station management would cause this in + * the ucode, e.g. unassociated RXON. + */ +void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + int i; + unsigned long flags_spin; + bool cleared = false; + + IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n"); + + spin_lock_irqsave(&priv->sta_lock, flags_spin); + for (i = 0; i < priv->hw_params.max_stations; i++) { + if (ctx && ctx->ctxid != priv->stations[i].ctxid) + continue; + + if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) { + IWL_DEBUG_INFO(priv, + "Clearing ucode active for station %d\n", i); + priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE; + cleared = true; + } + } + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + + if (!cleared) + IWL_DEBUG_INFO(priv, + "No active stations found to be cleared\n"); +} +EXPORT_SYMBOL(iwl_legacy_clear_ucode_stations); + +/** + * iwl_legacy_restore_stations() - Restore driver known stations to device + * + * All stations considered active by driver, but not present in ucode, is + * restored. + * + * Function sleeps. + */ +void +iwl_legacy_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx) +{ + struct iwl_legacy_addsta_cmd sta_cmd; + struct iwl_link_quality_cmd lq; + unsigned long flags_spin; + int i; + bool found = false; + int ret; + bool send_lq; + + if (!iwl_legacy_is_ready(priv)) { + IWL_DEBUG_INFO(priv, + "Not ready yet, not restoring any stations.\n"); + return; + } + + IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n"); + spin_lock_irqsave(&priv->sta_lock, flags_spin); + for (i = 0; i < priv->hw_params.max_stations; i++) { + if (ctx->ctxid != priv->stations[i].ctxid) + continue; + if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) && + !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) { + IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n", + priv->stations[i].sta.sta.addr); + priv->stations[i].sta.mode = 0; + priv->stations[i].used |= IWL_STA_UCODE_INPROGRESS; + found = true; + } + } + + for (i = 0; i < priv->hw_params.max_stations; i++) { + if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) { + memcpy(&sta_cmd, &priv->stations[i].sta, + sizeof(struct iwl_legacy_addsta_cmd)); + send_lq = false; + if (priv->stations[i].lq) { + memcpy(&lq, priv->stations[i].lq, + sizeof(struct iwl_link_quality_cmd)); + send_lq = true; + } + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); + if (ret) { + spin_lock_irqsave(&priv->sta_lock, flags_spin); + IWL_ERR(priv, "Adding station %pM failed.\n", + priv->stations[i].sta.sta.addr); + priv->stations[i].used &= + ~IWL_STA_DRIVER_ACTIVE; + priv->stations[i].used &= + ~IWL_STA_UCODE_INPROGRESS; + spin_unlock_irqrestore(&priv->sta_lock, + flags_spin); + } + /* + * Rate scaling has already been initialized, send + * current LQ command + */ + if (send_lq) + iwl_legacy_send_lq_cmd(priv, ctx, &lq, + CMD_SYNC, true); + spin_lock_irqsave(&priv->sta_lock, flags_spin); + priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS; + } + } + + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + if (!found) + IWL_DEBUG_INFO(priv, "Restoring all known stations" + " .... no stations to be restored.\n"); + else + IWL_DEBUG_INFO(priv, "Restoring all known stations" + " .... complete.\n"); +} +EXPORT_SYMBOL(iwl_legacy_restore_stations); + +int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv) +{ + int i; + + for (i = 0; i < priv->sta_key_max_num; i++) + if (!test_and_set_bit(i, &priv->ucode_key_table)) + return i; + + return WEP_INVALID_OFFSET; +} +EXPORT_SYMBOL(iwl_legacy_get_free_ucode_key_index); + +void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv) +{ + unsigned long flags; + int i; + + spin_lock_irqsave(&priv->sta_lock, flags); + for (i = 0; i < priv->hw_params.max_stations; i++) { + if (!(priv->stations[i].used & IWL_STA_BCAST)) + continue; + + priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE; + priv->num_stations--; + BUG_ON(priv->num_stations < 0); + kfree(priv->stations[i].lq); + priv->stations[i].lq = NULL; + } + spin_unlock_irqrestore(&priv->sta_lock, flags); +} +EXPORT_SYMBOL_GPL(iwl_legacy_dealloc_bcast_stations); + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +static void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv, + struct iwl_link_quality_cmd *lq) +{ + int i; + IWL_DEBUG_RATE(priv, "lq station id 0x%x\n", lq->sta_id); + IWL_DEBUG_RATE(priv, "lq ant 0x%X 0x%X\n", + lq->general_params.single_stream_ant_msk, + lq->general_params.dual_stream_ant_msk); + + for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) + IWL_DEBUG_RATE(priv, "lq index %d 0x%X\n", + i, lq->rs_table[i].rate_n_flags); +} +#else +static inline void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv, + struct iwl_link_quality_cmd *lq) +{ +} +#endif + +/** + * iwl_legacy_is_lq_table_valid() - Test one aspect of LQ cmd for validity + * + * It sometimes happens when a HT rate has been in use and we + * loose connectivity with AP then mac80211 will first tell us that the + * current channel is not HT anymore before removing the station. In such a + * scenario the RXON flags will be updated to indicate we are not + * communicating HT anymore, but the LQ command may still contain HT rates. + * Test for this to prevent driver from sending LQ command between the time + * RXON flags are updated and when LQ command is updated. + */ +static bool iwl_legacy_is_lq_table_valid(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct iwl_link_quality_cmd *lq) +{ + int i; + + if (ctx->ht.enabled) + return true; + + IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n", + ctx->active.channel); + for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { + if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & + RATE_MCS_HT_MSK) { + IWL_DEBUG_INFO(priv, + "index %d of LQ expects HT channel\n", + i); + return false; + } + } + return true; +} + +/** + * iwl_legacy_send_lq_cmd() - Send link quality command + * @init: This command is sent as part of station initialization right + * after station has been added. + * + * The link quality command is sent as the last step of station creation. + * This is the special case in which init is set and we call a callback in + * this case to clear the state indicating that station creation is in + * progress. + */ +int iwl_legacy_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx, + struct iwl_link_quality_cmd *lq, u8 flags, bool init) +{ + int ret = 0; + unsigned long flags_spin; + + struct iwl_host_cmd cmd = { + .id = REPLY_TX_LINK_QUALITY_CMD, + .len = sizeof(struct iwl_link_quality_cmd), + .flags = flags, + .data = lq, + }; + + if (WARN_ON(lq->sta_id == IWL_INVALID_STATION)) + return -EINVAL; + + + spin_lock_irqsave(&priv->sta_lock, flags_spin); + if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) { + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + return -EINVAL; + } + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + + iwl_legacy_dump_lq_cmd(priv, lq); + BUG_ON(init && (cmd.flags & CMD_ASYNC)); + + if (iwl_legacy_is_lq_table_valid(priv, ctx, lq)) + ret = iwl_legacy_send_cmd(priv, &cmd); + else + ret = -EINVAL; + + if (cmd.flags & CMD_ASYNC) + return ret; + + if (init) { + IWL_DEBUG_INFO(priv, "init LQ command complete," + " clearing sta addition status for sta %d\n", + lq->sta_id); + spin_lock_irqsave(&priv->sta_lock, flags_spin); + priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS; + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + } + return ret; +} +EXPORT_SYMBOL(iwl_legacy_send_lq_cmd); + +int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_station_priv_common *sta_common = (void *)sta->drv_priv; + int ret; + + IWL_DEBUG_INFO(priv, "received request to remove station %pM\n", + sta->addr); + mutex_lock(&priv->mutex); + IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n", + sta->addr); + ret = iwl_legacy_remove_station(priv, sta_common->sta_id, sta->addr); + if (ret) + IWL_ERR(priv, "Error removing station %pM\n", + sta->addr); + mutex_unlock(&priv->mutex); + return ret; +} +EXPORT_SYMBOL(iwl_legacy_mac_sta_remove); diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-sta.h b/trunk/drivers/net/wireless/iwlegacy/iwl-sta.h new file mode 100644 index 000000000000..67bd75fe01a1 --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-sta.h @@ -0,0 +1,148 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ +#ifndef __iwl_legacy_sta_h__ +#define __iwl_legacy_sta_h__ + +#include "iwl-dev.h" + +#define HW_KEY_DYNAMIC 0 +#define HW_KEY_DEFAULT 1 + +#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */ +#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */ +#define IWL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of + being activated */ +#define IWL_STA_LOCAL BIT(3) /* station state not directed by mac80211; + (this is for the IBSS BSSID stations) */ +#define IWL_STA_BCAST BIT(4) /* this station is the special bcast station */ + + +void iwl_legacy_restore_stations(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); +void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); +void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv); +int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv); +int iwl_legacy_send_add_sta(struct iwl_priv *priv, + struct iwl_legacy_addsta_cmd *sta, u8 flags); +int iwl_legacy_add_station_common(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + const u8 *addr, bool is_ap, + struct ieee80211_sta *sta, u8 *sta_id_r); +int iwl_legacy_remove_station(struct iwl_priv *priv, + const u8 sta_id, + const u8 *addr); +int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta); + +u8 iwl_legacy_prep_station(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + const u8 *addr, bool is_ap, + struct ieee80211_sta *sta); + +int iwl_legacy_send_lq_cmd(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct iwl_link_quality_cmd *lq, + u8 flags, bool init); + +/** + * iwl_legacy_clear_driver_stations - clear knowledge of all stations from driver + * @priv: iwl priv struct + * + * This is called during iwl_down() to make sure that in the case + * we're coming there from a hardware restart mac80211 will be + * able to reconfigure stations -- if we're getting there in the + * normal down flow then the stations will already be cleared. + */ +static inline void iwl_legacy_clear_driver_stations(struct iwl_priv *priv) +{ + unsigned long flags; + struct iwl_rxon_context *ctx; + + spin_lock_irqsave(&priv->sta_lock, flags); + memset(priv->stations, 0, sizeof(priv->stations)); + priv->num_stations = 0; + + priv->ucode_key_table = 0; + + for_each_context(priv, ctx) { + /* + * Remove all key information that is not stored as part + * of station information since mac80211 may not have had + * a chance to remove all the keys. When device is + * reconfigured by mac80211 after an error all keys will + * be reconfigured. + */ + memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys)); + ctx->key_mapping_keys = 0; + } + + spin_unlock_irqrestore(&priv->sta_lock, flags); +} + +static inline int iwl_legacy_sta_id(struct ieee80211_sta *sta) +{ + if (WARN_ON(!sta)) + return IWL_INVALID_STATION; + + return ((struct iwl_station_priv_common *)sta->drv_priv)->sta_id; +} + +/** + * iwl_legacy_sta_id_or_broadcast - return sta_id or broadcast sta + * @priv: iwl priv + * @context: the current context + * @sta: mac80211 station + * + * In certain circumstances mac80211 passes a station pointer + * that may be %NULL, for example during TX or key setup. In + * that case, we need to use the broadcast station, so this + * inline wraps that pattern. + */ +static inline int iwl_legacy_sta_id_or_broadcast(struct iwl_priv *priv, + struct iwl_rxon_context *context, + struct ieee80211_sta *sta) +{ + int sta_id; + + if (!sta) + return context->bcast_sta_id; + + sta_id = iwl_legacy_sta_id(sta); + + /* + * mac80211 should not be passing a partially + * initialised station! + */ + WARN_ON(sta_id == IWL_INVALID_STATION); + + return sta_id; +} +#endif /* __iwl_legacy_sta_h__ */ diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-tx.c b/trunk/drivers/net/wireless/iwlegacy/iwl-tx.c new file mode 100644 index 000000000000..c0dfb1a4e968 --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-tx.c @@ -0,0 +1,659 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include +#include +#include +#include +#include +#include "iwl-eeprom.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-sta.h" +#include "iwl-io.h" +#include "iwl-helpers.h" + +/** + * iwl_legacy_txq_update_write_ptr - Send new write index to hardware + */ +void +iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq) +{ + u32 reg = 0; + int txq_id = txq->q.id; + + if (txq->need_update == 0) + return; + + /* if we're trying to save power */ + if (test_bit(STATUS_POWER_PMI, &priv->status)) { + /* wake up nic if it's powered down ... + * uCode will wake up, and interrupt us again, so next + * time we'll skip this part. */ + reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); + + if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { + IWL_DEBUG_INFO(priv, + "Tx queue %d requesting wakeup," + " GP1 = 0x%x\n", txq_id, reg); + iwl_legacy_set_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + return; + } + + iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR, + txq->q.write_ptr | (txq_id << 8)); + + /* + * else not in power-save mode, + * uCode will never sleep when we're + * trying to tx (during RFKILL, we're not trying to tx). + */ + } else + iwl_write32(priv, HBUS_TARG_WRPTR, + txq->q.write_ptr | (txq_id << 8)); + txq->need_update = 0; +} +EXPORT_SYMBOL(iwl_legacy_txq_update_write_ptr); + +/** + * iwl_legacy_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's + */ +void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id) +{ + struct iwl_tx_queue *txq = &priv->txq[txq_id]; + struct iwl_queue *q = &txq->q; + + if (q->n_bd == 0) + return; + + while (q->write_ptr != q->read_ptr) { + priv->cfg->ops->lib->txq_free_tfd(priv, txq); + q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd); + } +} +EXPORT_SYMBOL(iwl_legacy_tx_queue_unmap); + +/** + * iwl_legacy_tx_queue_free - Deallocate DMA queue. + * @txq: Transmit queue to deallocate. + * + * Empty queue by removing and destroying all BD's. + * Free all buffers. + * 0-fill, but do not free "txq" descriptor structure. + */ +void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id) +{ + struct iwl_tx_queue *txq = &priv->txq[txq_id]; + struct device *dev = &priv->pci_dev->dev; + int i; + + iwl_legacy_tx_queue_unmap(priv, txq_id); + + /* De-alloc array of command/tx buffers */ + for (i = 0; i < TFD_TX_CMD_SLOTS; i++) + kfree(txq->cmd[i]); + + /* De-alloc circular buffer of TFDs */ + if (txq->q.n_bd) + dma_free_coherent(dev, priv->hw_params.tfd_size * + txq->q.n_bd, txq->tfds, txq->q.dma_addr); + + /* De-alloc array of per-TFD driver data */ + kfree(txq->txb); + txq->txb = NULL; + + /* deallocate arrays */ + kfree(txq->cmd); + kfree(txq->meta); + txq->cmd = NULL; + txq->meta = NULL; + + /* 0-fill queue descriptor structure */ + memset(txq, 0, sizeof(*txq)); +} +EXPORT_SYMBOL(iwl_legacy_tx_queue_free); + +/** + * iwl_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue + */ +void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv) +{ + struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; + struct iwl_queue *q = &txq->q; + int i; + + if (q->n_bd == 0) + return; + + while (q->read_ptr != q->write_ptr) { + i = iwl_legacy_get_cmd_index(q, q->read_ptr, 0); + + if (txq->meta[i].flags & CMD_MAPPED) { + pci_unmap_single(priv->pci_dev, + dma_unmap_addr(&txq->meta[i], mapping), + dma_unmap_len(&txq->meta[i], len), + PCI_DMA_BIDIRECTIONAL); + txq->meta[i].flags = 0; + } + + q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd); + } + + i = q->n_window; + if (txq->meta[i].flags & CMD_MAPPED) { + pci_unmap_single(priv->pci_dev, + dma_unmap_addr(&txq->meta[i], mapping), + dma_unmap_len(&txq->meta[i], len), + PCI_DMA_BIDIRECTIONAL); + txq->meta[i].flags = 0; + } +} +EXPORT_SYMBOL(iwl_legacy_cmd_queue_unmap); + +/** + * iwl_legacy_cmd_queue_free - Deallocate DMA queue. + * @txq: Transmit queue to deallocate. + * + * Empty queue by removing and destroying all BD's. + * Free all buffers. + * 0-fill, but do not free "txq" descriptor structure. + */ +void iwl_legacy_cmd_queue_free(struct iwl_priv *priv) +{ + struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; + struct device *dev = &priv->pci_dev->dev; + int i; + + iwl_legacy_cmd_queue_unmap(priv); + + /* De-alloc array of command/tx buffers */ + for (i = 0; i <= TFD_CMD_SLOTS; i++) + kfree(txq->cmd[i]); + + /* De-alloc circular buffer of TFDs */ + if (txq->q.n_bd) + dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd, + txq->tfds, txq->q.dma_addr); + + /* deallocate arrays */ + kfree(txq->cmd); + kfree(txq->meta); + txq->cmd = NULL; + txq->meta = NULL; + + /* 0-fill queue descriptor structure */ + memset(txq, 0, sizeof(*txq)); +} +EXPORT_SYMBOL(iwl_legacy_cmd_queue_free); + +/*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** + * DMA services + * + * Theory of operation + * + * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer + * of buffer descriptors, each of which points to one or more data buffers for + * the device to read from or fill. Driver and device exchange status of each + * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty + * entries in each circular buffer, to protect against confusing empty and full + * queue states. + * + * The device reads or writes the data in the queues via the device's several + * DMA/FIFO channels. Each queue is mapped to a single DMA channel. + * + * For Tx queue, there are low mark and high mark limits. If, after queuing + * the packet for Tx, free space become < low mark, Tx queue stopped. When + * reclaiming packets (on 'tx done IRQ), if free space become > high mark, + * Tx queue resumed. + * + * See more detailed info in iwl-4965-hw.h. + ***************************************************/ + +int iwl_legacy_queue_space(const struct iwl_queue *q) +{ + int s = q->read_ptr - q->write_ptr; + + if (q->read_ptr > q->write_ptr) + s -= q->n_bd; + + if (s <= 0) + s += q->n_window; + /* keep some reserve to not confuse empty and full situations */ + s -= 2; + if (s < 0) + s = 0; + return s; +} +EXPORT_SYMBOL(iwl_legacy_queue_space); + + +/** + * iwl_legacy_queue_init - Initialize queue's high/low-water and read/write indexes + */ +static int iwl_legacy_queue_init(struct iwl_priv *priv, struct iwl_queue *q, + int count, int slots_num, u32 id) +{ + q->n_bd = count; + q->n_window = slots_num; + q->id = id; + + /* count must be power-of-two size, otherwise iwl_legacy_queue_inc_wrap + * and iwl_legacy_queue_dec_wrap are broken. */ + BUG_ON(!is_power_of_2(count)); + + /* slots_num must be power-of-two size, otherwise + * iwl_legacy_get_cmd_index is broken. */ + BUG_ON(!is_power_of_2(slots_num)); + + q->low_mark = q->n_window / 4; + if (q->low_mark < 4) + q->low_mark = 4; + + q->high_mark = q->n_window / 8; + if (q->high_mark < 2) + q->high_mark = 2; + + q->write_ptr = q->read_ptr = 0; + + return 0; +} + +/** + * iwl_legacy_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue + */ +static int iwl_legacy_tx_queue_alloc(struct iwl_priv *priv, + struct iwl_tx_queue *txq, u32 id) +{ + struct device *dev = &priv->pci_dev->dev; + size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX; + + /* Driver private data, only for Tx (not command) queues, + * not shared with device. */ + if (id != priv->cmd_queue) { + txq->txb = kzalloc(sizeof(txq->txb[0]) * + TFD_QUEUE_SIZE_MAX, GFP_KERNEL); + if (!txq->txb) { + IWL_ERR(priv, "kmalloc for auxiliary BD " + "structures failed\n"); + goto error; + } + } else { + txq->txb = NULL; + } + + /* Circular buffer of transmit frame descriptors (TFDs), + * shared with device */ + txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, + GFP_KERNEL); + if (!txq->tfds) { + IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz); + goto error; + } + txq->q.id = id; + + return 0; + + error: + kfree(txq->txb); + txq->txb = NULL; + + return -ENOMEM; +} + +/** + * iwl_legacy_tx_queue_init - Allocate and initialize one tx/cmd queue + */ +int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, + int slots_num, u32 txq_id) +{ + int i, len; + int ret; + int actual_slots = slots_num; + + /* + * Alloc buffer array for commands (Tx or other types of commands). + * For the command queue (#4/#9), allocate command space + one big + * command for scan, since scan command is very huge; the system will + * not have two scans at the same time, so only one is needed. + * For normal Tx queues (all other queues), no super-size command + * space is needed. + */ + if (txq_id == priv->cmd_queue) + actual_slots++; + + txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots, + GFP_KERNEL); + txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots, + GFP_KERNEL); + + if (!txq->meta || !txq->cmd) + goto out_free_arrays; + + len = sizeof(struct iwl_device_cmd); + for (i = 0; i < actual_slots; i++) { + /* only happens for cmd queue */ + if (i == slots_num) + len = IWL_MAX_CMD_SIZE; + + txq->cmd[i] = kmalloc(len, GFP_KERNEL); + if (!txq->cmd[i]) + goto err; + } + + /* Alloc driver data array and TFD circular buffer */ + ret = iwl_legacy_tx_queue_alloc(priv, txq, txq_id); + if (ret) + goto err; + + txq->need_update = 0; + + /* + * For the default queues 0-3, set up the swq_id + * already -- all others need to get one later + * (if they need one at all). + */ + if (txq_id < 4) + iwl_legacy_set_swq_id(txq, txq_id, txq_id); + + /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise + * iwl_legacy_queue_inc_wrap and iwl_legacy_queue_dec_wrap are broken. */ + BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); + + /* Initialize queue's high/low-water marks, and head/tail indexes */ + iwl_legacy_queue_init(priv, &txq->q, + TFD_QUEUE_SIZE_MAX, slots_num, txq_id); + + /* Tell device where to find queue */ + priv->cfg->ops->lib->txq_init(priv, txq); + + return 0; +err: + for (i = 0; i < actual_slots; i++) + kfree(txq->cmd[i]); +out_free_arrays: + kfree(txq->meta); + kfree(txq->cmd); + + return -ENOMEM; +} +EXPORT_SYMBOL(iwl_legacy_tx_queue_init); + +void iwl_legacy_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, + int slots_num, u32 txq_id) +{ + int actual_slots = slots_num; + + if (txq_id == priv->cmd_queue) + actual_slots++; + + memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots); + + txq->need_update = 0; + + /* Initialize queue's high/low-water marks, and head/tail indexes */ + iwl_legacy_queue_init(priv, &txq->q, + TFD_QUEUE_SIZE_MAX, slots_num, txq_id); + + /* Tell device where to find queue */ + priv->cfg->ops->lib->txq_init(priv, txq); +} +EXPORT_SYMBOL(iwl_legacy_tx_queue_reset); + +/*************** HOST COMMAND QUEUE FUNCTIONS *****/ + +/** + * iwl_legacy_enqueue_hcmd - enqueue a uCode command + * @priv: device private data point + * @cmd: a point to the ucode command structure + * + * The function returns < 0 values to indicate the operation is + * failed. On success, it turns the index (> 0) of command in the + * command queue. + */ +int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +{ + struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; + struct iwl_queue *q = &txq->q; + struct iwl_device_cmd *out_cmd; + struct iwl_cmd_meta *out_meta; + dma_addr_t phys_addr; + unsigned long flags; + int len; + u32 idx; + u16 fix_size; + + cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len); + fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr)); + + /* If any of the command structures end up being larger than + * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then + * we will need to increase the size of the TFD entries + * Also, check to see if command buffer should not exceed the size + * of device_cmd and max_cmd_size. */ + BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && + !(cmd->flags & CMD_SIZE_HUGE)); + BUG_ON(fix_size > IWL_MAX_CMD_SIZE); + + if (iwl_legacy_is_rfkill(priv) || iwl_legacy_is_ctkill(priv)) { + IWL_WARN(priv, "Not sending command - %s KILL\n", + iwl_legacy_is_rfkill(priv) ? "RF" : "CT"); + return -EIO; + } + + spin_lock_irqsave(&priv->hcmd_lock, flags); + + if (iwl_legacy_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { + spin_unlock_irqrestore(&priv->hcmd_lock, flags); + + IWL_ERR(priv, "Restarting adapter due to command queue full\n"); + queue_work(priv->workqueue, &priv->restart); + return -ENOSPC; + } + + idx = iwl_legacy_get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); + out_cmd = txq->cmd[idx]; + out_meta = &txq->meta[idx]; + + if (WARN_ON(out_meta->flags & CMD_MAPPED)) { + spin_unlock_irqrestore(&priv->hcmd_lock, flags); + return -ENOSPC; + } + + memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ + out_meta->flags = cmd->flags | CMD_MAPPED; + if (cmd->flags & CMD_WANT_SKB) + out_meta->source = cmd; + if (cmd->flags & CMD_ASYNC) + out_meta->callback = cmd->callback; + + out_cmd->hdr.cmd = cmd->id; + memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len); + + /* At this point, the out_cmd now has all of the incoming cmd + * information */ + + out_cmd->hdr.flags = 0; + out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) | + INDEX_TO_SEQ(q->write_ptr)); + if (cmd->flags & CMD_SIZE_HUGE) + out_cmd->hdr.sequence |= SEQ_HUGE_FRAME; + len = sizeof(struct iwl_device_cmd); + if (idx == TFD_CMD_SLOTS) + len = IWL_MAX_CMD_SIZE; + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + switch (out_cmd->hdr.cmd) { + case REPLY_TX_LINK_QUALITY_CMD: + case SENSITIVITY_CMD: + IWL_DEBUG_HC_DUMP(priv, + "Sending command %s (#%x), seq: 0x%04X, " + "%d bytes at %d[%d]:%d\n", + iwl_legacy_get_cmd_string(out_cmd->hdr.cmd), + out_cmd->hdr.cmd, + le16_to_cpu(out_cmd->hdr.sequence), fix_size, + q->write_ptr, idx, priv->cmd_queue); + break; + default: + IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, " + "%d bytes at %d[%d]:%d\n", + iwl_legacy_get_cmd_string(out_cmd->hdr.cmd), + out_cmd->hdr.cmd, + le16_to_cpu(out_cmd->hdr.sequence), fix_size, + q->write_ptr, idx, priv->cmd_queue); + } +#endif + txq->need_update = 1; + + if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl) + /* Set up entry in queue's byte count circular buffer */ + priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0); + + phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr, + fix_size, PCI_DMA_BIDIRECTIONAL); + dma_unmap_addr_set(out_meta, mapping, phys_addr); + dma_unmap_len_set(out_meta, len, fix_size); + + trace_iwlwifi_legacy_dev_hcmd(priv, &out_cmd->hdr, + fix_size, cmd->flags); + + priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, + phys_addr, fix_size, 1, + U32_PAD(cmd->len)); + + /* Increment and update queue's write index */ + q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd); + iwl_legacy_txq_update_write_ptr(priv, txq); + + spin_unlock_irqrestore(&priv->hcmd_lock, flags); + return idx; +} + +/** + * iwl_legacy_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd + * + * When FW advances 'R' index, all entries between old and new 'R' index + * need to be reclaimed. As result, some free space forms. If there is + * enough free space (> low mark), wake the stack that feeds us. + */ +static void iwl_legacy_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, + int idx, int cmd_idx) +{ + struct iwl_tx_queue *txq = &priv->txq[txq_id]; + struct iwl_queue *q = &txq->q; + int nfreed = 0; + + if ((idx >= q->n_bd) || (iwl_legacy_queue_used(q, idx) == 0)) { + IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, " + "is out of range [0-%d] %d %d.\n", txq_id, + idx, q->n_bd, q->write_ptr, q->read_ptr); + return; + } + + for (idx = iwl_legacy_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; + q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) { + + if (nfreed++ > 0) { + IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx, + q->write_ptr, q->read_ptr); + queue_work(priv->workqueue, &priv->restart); + } + + } +} + +/** + * iwl_legacy_tx_cmd_complete - Pull unused buffers off the queue and reclaim them + * @rxb: Rx buffer to reclaim + * + * If an Rx buffer has an async callback associated with it the callback + * will be executed. The attached skb (if present) will only be freed + * if the callback returns 1 + */ +void +iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + u16 sequence = le16_to_cpu(pkt->hdr.sequence); + int txq_id = SEQ_TO_QUEUE(sequence); + int index = SEQ_TO_INDEX(sequence); + int cmd_index; + bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); + struct iwl_device_cmd *cmd; + struct iwl_cmd_meta *meta; + struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; + unsigned long flags; + + /* If a Tx command is being handled and it isn't in the actual + * command queue then there a command routing bug has been introduced + * in the queue management code. */ + if (WARN(txq_id != priv->cmd_queue, + "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", + txq_id, priv->cmd_queue, sequence, + priv->txq[priv->cmd_queue].q.read_ptr, + priv->txq[priv->cmd_queue].q.write_ptr)) { + iwl_print_hex_error(priv, pkt, 32); + return; + } + + cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, huge); + cmd = txq->cmd[cmd_index]; + meta = &txq->meta[cmd_index]; + + txq->time_stamp = jiffies; + + pci_unmap_single(priv->pci_dev, + dma_unmap_addr(meta, mapping), + dma_unmap_len(meta, len), + PCI_DMA_BIDIRECTIONAL); + + /* Input error checking is done when commands are added to queue. */ + if (meta->flags & CMD_WANT_SKB) { + meta->source->reply_page = (unsigned long)rxb_addr(rxb); + rxb->page = NULL; + } else if (meta->callback) + meta->callback(priv, cmd, pkt); + + spin_lock_irqsave(&priv->hcmd_lock, flags); + + iwl_legacy_hcmd_queue_reclaim(priv, txq_id, index, cmd_index); + + if (!(meta->flags & CMD_ASYNC)) { + clear_bit(STATUS_HCMD_ACTIVE, &priv->status); + IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n", + iwl_legacy_get_cmd_string(cmd->hdr.cmd)); + wake_up(&priv->wait_command_queue); + } + + /* Mark as unmapped */ + meta->flags = 0; + + spin_unlock_irqrestore(&priv->hcmd_lock, flags); +} +EXPORT_SYMBOL(iwl_legacy_tx_cmd_complete); diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl3945-base.c b/trunk/drivers/net/wireless/iwlegacy/iwl3945-base.c new file mode 100644 index 000000000000..b282d869a546 --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl3945-base.c @@ -0,0 +1,4016 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#define DRV_NAME "iwl3945" + +#include "iwl-fh.h" +#include "iwl-3945-fh.h" +#include "iwl-commands.h" +#include "iwl-sta.h" +#include "iwl-3945.h" +#include "iwl-core.h" +#include "iwl-helpers.h" +#include "iwl-dev.h" +#include "iwl-spectrum.h" + +/* + * module name, copyright, version, etc. + */ + +#define DRV_DESCRIPTION \ +"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux" + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +#define VD "d" +#else +#define VD +#endif + +/* + * add "s" to indicate spectrum measurement included. + * we add it here to be consistent with previous releases in which + * this was configurable. + */ +#define DRV_VERSION IWLWIFI_VERSION VD "s" +#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation" +#define DRV_AUTHOR "" + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_VERSION(DRV_VERSION); +MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); +MODULE_LICENSE("GPL"); + + /* module parameters */ +struct iwl_mod_params iwl3945_mod_params = { + .sw_crypto = 1, + .restart_fw = 1, + .disable_hw_scan = 1, + /* the rest are 0 by default */ +}; + +/** + * iwl3945_get_antenna_flags - Get antenna flags for RXON command + * @priv: eeprom and antenna fields are used to determine antenna flags + * + * priv->eeprom39 is used to determine if antenna AUX/MAIN are reversed + * iwl3945_mod_params.antenna specifies the antenna diversity mode: + * + * IWL_ANTENNA_DIVERSITY - NIC selects best antenna by itself + * IWL_ANTENNA_MAIN - Force MAIN antenna + * IWL_ANTENNA_AUX - Force AUX antenna + */ +__le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv) +{ + struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; + + switch (iwl3945_mod_params.antenna) { + case IWL_ANTENNA_DIVERSITY: + return 0; + + case IWL_ANTENNA_MAIN: + if (eeprom->antenna_switch_type) + return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK; + return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK; + + case IWL_ANTENNA_AUX: + if (eeprom->antenna_switch_type) + return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK; + return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK; + } + + /* bad antenna selector value */ + IWL_ERR(priv, "Bad antenna selector value (0x%x)\n", + iwl3945_mod_params.antenna); + + return 0; /* "diversity" is default if error */ +} + +static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv, + struct ieee80211_key_conf *keyconf, + u8 sta_id) +{ + unsigned long flags; + __le16 key_flags = 0; + int ret; + + key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK); + key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); + + if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id) + key_flags |= STA_KEY_MULTICAST_MSK; + + keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + keyconf->hw_key_idx = keyconf->keyidx; + key_flags &= ~STA_KEY_FLG_INVALID; + + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].keyinfo.cipher = keyconf->cipher; + priv->stations[sta_id].keyinfo.keylen = keyconf->keylen; + memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, + keyconf->keylen); + + memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, + keyconf->keylen); + + if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK) + == STA_KEY_FLG_NO_ENC) + priv->stations[sta_id].sta.key.key_offset = + iwl_legacy_get_free_ucode_key_index(priv); + /* else, we are overriding an existing key => no need to allocated room + * in uCode. */ + + WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, + "no space for a new key"); + + priv->stations[sta_id].sta.key.key_flags = key_flags; + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + + IWL_DEBUG_INFO(priv, "hwcrypto: modify ucode station key info\n"); + + ret = iwl_legacy_send_add_sta(priv, + &priv->stations[sta_id].sta, CMD_ASYNC); + + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return ret; +} + +static int iwl3945_set_tkip_dynamic_key_info(struct iwl_priv *priv, + struct ieee80211_key_conf *keyconf, + u8 sta_id) +{ + return -EOPNOTSUPP; +} + +static int iwl3945_set_wep_dynamic_key_info(struct iwl_priv *priv, + struct ieee80211_key_conf *keyconf, + u8 sta_id) +{ + return -EOPNOTSUPP; +} + +static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id) +{ + unsigned long flags; + struct iwl_legacy_addsta_cmd sta_cmd; + + spin_lock_irqsave(&priv->sta_lock, flags); + memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key)); + memset(&priv->stations[sta_id].sta.key, 0, + sizeof(struct iwl4965_keyinfo)); + priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC; + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_legacy_addsta_cmd)); + spin_unlock_irqrestore(&priv->sta_lock, flags); + + IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n"); + return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); +} + +static int iwl3945_set_dynamic_key(struct iwl_priv *priv, + struct ieee80211_key_conf *keyconf, u8 sta_id) +{ + int ret = 0; + + keyconf->hw_key_idx = HW_KEY_DYNAMIC; + + switch (keyconf->cipher) { + case WLAN_CIPHER_SUITE_CCMP: + ret = iwl3945_set_ccmp_dynamic_key_info(priv, keyconf, sta_id); + break; + case WLAN_CIPHER_SUITE_TKIP: + ret = iwl3945_set_tkip_dynamic_key_info(priv, keyconf, sta_id); + break; + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + ret = iwl3945_set_wep_dynamic_key_info(priv, keyconf, sta_id); + break; + default: + IWL_ERR(priv, "Unknown alg: %s alg=%x\n", __func__, + keyconf->cipher); + ret = -EINVAL; + } + + IWL_DEBUG_WEP(priv, "Set dynamic key: alg=%x len=%d idx=%d sta=%d ret=%d\n", + keyconf->cipher, keyconf->keylen, keyconf->keyidx, + sta_id, ret); + + return ret; +} + +static int iwl3945_remove_static_key(struct iwl_priv *priv) +{ + int ret = -EOPNOTSUPP; + + return ret; +} + +static int iwl3945_set_static_key(struct iwl_priv *priv, + struct ieee80211_key_conf *key) +{ + if (key->cipher == WLAN_CIPHER_SUITE_WEP40 || + key->cipher == WLAN_CIPHER_SUITE_WEP104) + return -EOPNOTSUPP; + + IWL_ERR(priv, "Static key invalid: cipher %x\n", key->cipher); + return -EINVAL; +} + +static void iwl3945_clear_free_frames(struct iwl_priv *priv) +{ + struct list_head *element; + + IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n", + priv->frames_count); + + while (!list_empty(&priv->free_frames)) { + element = priv->free_frames.next; + list_del(element); + kfree(list_entry(element, struct iwl3945_frame, list)); + priv->frames_count--; + } + + if (priv->frames_count) { + IWL_WARN(priv, "%d frames still in use. Did we lose one?\n", + priv->frames_count); + priv->frames_count = 0; + } +} + +static struct iwl3945_frame *iwl3945_get_free_frame(struct iwl_priv *priv) +{ + struct iwl3945_frame *frame; + struct list_head *element; + if (list_empty(&priv->free_frames)) { + frame = kzalloc(sizeof(*frame), GFP_KERNEL); + if (!frame) { + IWL_ERR(priv, "Could not allocate frame!\n"); + return NULL; + } + + priv->frames_count++; + return frame; + } + + element = priv->free_frames.next; + list_del(element); + return list_entry(element, struct iwl3945_frame, list); +} + +static void iwl3945_free_frame(struct iwl_priv *priv, struct iwl3945_frame *frame) +{ + memset(frame, 0, sizeof(*frame)); + list_add(&frame->list, &priv->free_frames); +} + +unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv, + struct ieee80211_hdr *hdr, + int left) +{ + + if (!iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) || !priv->beacon_skb) + return 0; + + if (priv->beacon_skb->len > left) + return 0; + + memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len); + + return priv->beacon_skb->len; +} + +static int iwl3945_send_beacon_cmd(struct iwl_priv *priv) +{ + struct iwl3945_frame *frame; + unsigned int frame_size; + int rc; + u8 rate; + + frame = iwl3945_get_free_frame(priv); + + if (!frame) { + IWL_ERR(priv, "Could not obtain free frame buffer for beacon " + "command.\n"); + return -ENOMEM; + } + + rate = iwl_legacy_get_lowest_plcp(priv, + &priv->contexts[IWL_RXON_CTX_BSS]); + + frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate); + + rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size, + &frame->u.cmd[0]); + + iwl3945_free_frame(priv, frame); + + return rc; +} + +static void iwl3945_unset_hw_params(struct iwl_priv *priv) +{ + if (priv->_3945.shared_virt) + dma_free_coherent(&priv->pci_dev->dev, + sizeof(struct iwl3945_shared), + priv->_3945.shared_virt, + priv->_3945.shared_phys); +} + +static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv, + struct ieee80211_tx_info *info, + struct iwl_device_cmd *cmd, + struct sk_buff *skb_frag, + int sta_id) +{ + struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload; + struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo; + + tx_cmd->sec_ctl = 0; + + switch (keyinfo->cipher) { + case WLAN_CIPHER_SUITE_CCMP: + tx_cmd->sec_ctl = TX_CMD_SEC_CCM; + memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen); + IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n"); + break; + + case WLAN_CIPHER_SUITE_TKIP: + break; + + case WLAN_CIPHER_SUITE_WEP104: + tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; + /* fall through */ + case WLAN_CIPHER_SUITE_WEP40: + tx_cmd->sec_ctl |= TX_CMD_SEC_WEP | + (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT; + + memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen); + + IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption " + "with key %d\n", info->control.hw_key->hw_key_idx); + break; + + default: + IWL_ERR(priv, "Unknown encode cipher %x\n", keyinfo->cipher); + break; + } +} + +/* + * handle build REPLY_TX command notification. + */ +static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv, + struct iwl_device_cmd *cmd, + struct ieee80211_tx_info *info, + struct ieee80211_hdr *hdr, u8 std_id) +{ + struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload; + __le32 tx_flags = tx_cmd->tx_flags; + __le16 fc = hdr->frame_control; + + tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { + tx_flags |= TX_CMD_FLG_ACK_MSK; + if (ieee80211_is_mgmt(fc)) + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + if (ieee80211_is_probe_resp(fc) && + !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) + tx_flags |= TX_CMD_FLG_TSF_MSK; + } else { + tx_flags &= (~TX_CMD_FLG_ACK_MSK); + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + } + + tx_cmd->sta_id = std_id; + if (ieee80211_has_morefrags(fc)) + tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; + + if (ieee80211_is_data_qos(fc)) { + u8 *qc = ieee80211_get_qos_ctl(hdr); + tx_cmd->tid_tspec = qc[0] & 0xf; + tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; + } else { + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + } + + iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags); + + tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); + if (ieee80211_is_mgmt(fc)) { + if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) + tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); + else + tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); + } else { + tx_cmd->timeout.pm_frame_timeout = 0; + } + + tx_cmd->driver_txop = 0; + tx_cmd->tx_flags = tx_flags; + tx_cmd->next_frame_len = 0; +} + +/* + * start REPLY_TX command process + */ +static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct iwl3945_tx_cmd *tx_cmd; + struct iwl_tx_queue *txq = NULL; + struct iwl_queue *q = NULL; + struct iwl_device_cmd *out_cmd; + struct iwl_cmd_meta *out_meta; + dma_addr_t phys_addr; + dma_addr_t txcmd_phys; + int txq_id = skb_get_queue_mapping(skb); + u16 len, idx, hdr_len; + u8 id; + u8 unicast; + u8 sta_id; + u8 tid = 0; + __le16 fc; + u8 wait_write_ptr = 0; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + if (iwl_legacy_is_rfkill(priv)) { + IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n"); + goto drop_unlock; + } + + if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == IWL_INVALID_RATE) { + IWL_ERR(priv, "ERROR: No TX rate available.\n"); + goto drop_unlock; + } + + unicast = !is_multicast_ether_addr(hdr->addr1); + id = 0; + + fc = hdr->frame_control; + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + if (ieee80211_is_auth(fc)) + IWL_DEBUG_TX(priv, "Sending AUTH frame\n"); + else if (ieee80211_is_assoc_req(fc)) + IWL_DEBUG_TX(priv, "Sending ASSOC frame\n"); + else if (ieee80211_is_reassoc_req(fc)) + IWL_DEBUG_TX(priv, "Sending REASSOC frame\n"); +#endif + + spin_unlock_irqrestore(&priv->lock, flags); + + hdr_len = ieee80211_hdrlen(fc); + + /* Find index into station table for destination station */ + sta_id = iwl_legacy_sta_id_or_broadcast( + priv, &priv->contexts[IWL_RXON_CTX_BSS], + info->control.sta); + if (sta_id == IWL_INVALID_STATION) { + IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", + hdr->addr1); + goto drop; + } + + IWL_DEBUG_RATE(priv, "station Id %d\n", sta_id); + + if (ieee80211_is_data_qos(fc)) { + u8 *qc = ieee80211_get_qos_ctl(hdr); + tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; + if (unlikely(tid >= MAX_TID_COUNT)) + goto drop; + } + + /* Descriptor for chosen Tx queue */ + txq = &priv->txq[txq_id]; + q = &txq->q; + + if ((iwl_legacy_queue_space(q) < q->high_mark)) + goto drop; + + spin_lock_irqsave(&priv->lock, flags); + + idx = iwl_legacy_get_cmd_index(q, q->write_ptr, 0); + + /* Set up driver data for this TFD */ + memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); + txq->txb[q->write_ptr].skb = skb; + txq->txb[q->write_ptr].ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + + /* Init first empty entry in queue's array of Tx/cmd buffers */ + out_cmd = txq->cmd[idx]; + out_meta = &txq->meta[idx]; + tx_cmd = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload; + memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); + memset(tx_cmd, 0, sizeof(*tx_cmd)); + + /* + * Set up the Tx-command (not MAC!) header. + * Store the chosen Tx queue and TFD index within the sequence field; + * after Tx, uCode's Tx response will return this value so driver can + * locate the frame within the tx queue and do post-tx processing. + */ + out_cmd->hdr.cmd = REPLY_TX; + out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | + INDEX_TO_SEQ(q->write_ptr))); + + /* Copy MAC header from skb into command buffer */ + memcpy(tx_cmd->hdr, hdr, hdr_len); + + + if (info->control.hw_key) + iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, sta_id); + + /* TODO need this for burst mode later on */ + iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, sta_id); + + /* set is_hcca to 0; it probably will never be implemented */ + iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0); + + /* Total # bytes to be transmitted */ + len = (u16)skb->len; + tx_cmd->len = cpu_to_le16(len); + + iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr); + iwl_legacy_update_stats(priv, true, fc, len); + tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK; + tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK; + + if (!ieee80211_has_morefrags(hdr->frame_control)) { + txq->need_update = 1; + } else { + wait_write_ptr = 1; + txq->need_update = 0; + } + + IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n", + le16_to_cpu(out_cmd->hdr.sequence)); + IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); + iwl_print_hex_dump(priv, IWL_DL_TX, tx_cmd, sizeof(*tx_cmd)); + iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, + ieee80211_hdrlen(fc)); + + /* + * Use the first empty entry in this queue's command buffer array + * to contain the Tx command and MAC header concatenated together + * (payload data will be in another buffer). + * Size of this varies, due to varying MAC header length. + * If end is not dword aligned, we'll have 2 extra bytes at the end + * of the MAC header (device reads on dword boundaries). + * We'll tell device about this padding later. + */ + len = sizeof(struct iwl3945_tx_cmd) + + sizeof(struct iwl_cmd_header) + hdr_len; + len = (len + 3) & ~3; + + /* Physical address of this Tx command's header (not MAC header!), + * within command buffer array. */ + txcmd_phys = pci_map_single(priv->pci_dev, &out_cmd->hdr, + len, PCI_DMA_TODEVICE); + /* we do not map meta data ... so we can safely access address to + * provide to unmap command*/ + dma_unmap_addr_set(out_meta, mapping, txcmd_phys); + dma_unmap_len_set(out_meta, len, len); + + /* Add buffer containing Tx command and MAC(!) header to TFD's + * first entry */ + priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, + txcmd_phys, len, 1, 0); + + + /* Set up TFD's 2nd entry to point directly to remainder of skb, + * if any (802.11 null frames have no payload). */ + len = skb->len - hdr_len; + if (len) { + phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, + len, PCI_DMA_TODEVICE); + priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, + phys_addr, len, + 0, U32_PAD(len)); + } + + + /* Tell device the write index *just past* this latest filled TFD */ + q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd); + iwl_legacy_txq_update_write_ptr(priv, txq); + spin_unlock_irqrestore(&priv->lock, flags); + + if ((iwl_legacy_queue_space(q) < q->high_mark) + && priv->mac80211_registered) { + if (wait_write_ptr) { + spin_lock_irqsave(&priv->lock, flags); + txq->need_update = 1; + iwl_legacy_txq_update_write_ptr(priv, txq); + spin_unlock_irqrestore(&priv->lock, flags); + } + + iwl_legacy_stop_queue(priv, txq); + } + + return 0; + +drop_unlock: + spin_unlock_irqrestore(&priv->lock, flags); +drop: + return -1; +} + +static int iwl3945_get_measurement(struct iwl_priv *priv, + struct ieee80211_measurement_params *params, + u8 type) +{ + struct iwl_spectrum_cmd spectrum; + struct iwl_rx_packet *pkt; + struct iwl_host_cmd cmd = { + .id = REPLY_SPECTRUM_MEASUREMENT_CMD, + .data = (void *)&spectrum, + .flags = CMD_WANT_SKB, + }; + u32 add_time = le64_to_cpu(params->start_time); + int rc; + int spectrum_resp_status; + int duration = le16_to_cpu(params->duration); + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + + if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) + add_time = iwl_legacy_usecs_to_beacons(priv, + le64_to_cpu(params->start_time) - priv->_3945.last_tsf, + le16_to_cpu(ctx->timing.beacon_interval)); + + memset(&spectrum, 0, sizeof(spectrum)); + + spectrum.channel_count = cpu_to_le16(1); + spectrum.flags = + RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK; + spectrum.filter_flags = MEASUREMENT_FILTER_FLAG; + cmd.len = sizeof(spectrum); + spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len)); + + if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) + spectrum.start_time = + iwl_legacy_add_beacon_time(priv, + priv->_3945.last_beacon_time, add_time, + le16_to_cpu(ctx->timing.beacon_interval)); + else + spectrum.start_time = 0; + + spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT); + spectrum.channels[0].channel = params->channel; + spectrum.channels[0].type = type; + if (ctx->active.flags & RXON_FLG_BAND_24G_MSK) + spectrum.flags |= RXON_FLG_BAND_24G_MSK | + RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK; + + rc = iwl_legacy_send_cmd_sync(priv, &cmd); + if (rc) + return rc; + + pkt = (struct iwl_rx_packet *)cmd.reply_page; + if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { + IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n"); + rc = -EIO; + } + + spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status); + switch (spectrum_resp_status) { + case 0: /* Command will be handled */ + if (pkt->u.spectrum.id != 0xff) { + IWL_DEBUG_INFO(priv, "Replaced existing measurement: %d\n", + pkt->u.spectrum.id); + priv->measurement_status &= ~MEASUREMENT_READY; + } + priv->measurement_status |= MEASUREMENT_ACTIVE; + rc = 0; + break; + + case 1: /* Command will not be handled */ + rc = -EAGAIN; + break; + } + + iwl_legacy_free_pages(priv, cmd.reply_page); + + return rc; +} + +static void iwl3945_rx_reply_alive(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_alive_resp *palive; + struct delayed_work *pwork; + + palive = &pkt->u.alive_frame; + + IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision " + "0x%01X 0x%01X\n", + palive->is_valid, palive->ver_type, + palive->ver_subtype); + + if (palive->ver_subtype == INITIALIZE_SUBTYPE) { + IWL_DEBUG_INFO(priv, "Initialization Alive received.\n"); + memcpy(&priv->card_alive_init, &pkt->u.alive_frame, + sizeof(struct iwl_alive_resp)); + pwork = &priv->init_alive_start; + } else { + IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); + memcpy(&priv->card_alive, &pkt->u.alive_frame, + sizeof(struct iwl_alive_resp)); + pwork = &priv->alive_start; + iwl3945_disable_events(priv); + } + + /* We delay the ALIVE response by 5ms to + * give the HW RF Kill time to activate... */ + if (palive->is_valid == UCODE_VALID_OK) + queue_delayed_work(priv->workqueue, pwork, + msecs_to_jiffies(5)); + else + IWL_WARN(priv, "uCode did not respond OK.\n"); +} + +static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + struct iwl_rx_packet *pkt = rxb_addr(rxb); +#endif + + IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status); +} + +static void iwl3945_rx_beacon_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status); +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + u8 rate = beacon->beacon_notify_hdr.rate; + + IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d " + "tsf %d %d rate %d\n", + le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK, + beacon->beacon_notify_hdr.failure_frame, + le32_to_cpu(beacon->ibss_mgr_status), + le32_to_cpu(beacon->high_tsf), + le32_to_cpu(beacon->low_tsf), rate); +#endif + + priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status); + +} + +/* Handle notification from uCode that card's power state is changing + * due to software, hardware, or critical temperature RFKILL */ +static void iwl3945_rx_card_state_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); + unsigned long status = priv->status; + + IWL_WARN(priv, "Card state received: HW:%s SW:%s\n", + (flags & HW_CARD_DISABLED) ? "Kill" : "On", + (flags & SW_CARD_DISABLED) ? "Kill" : "On"); + + iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + + if (flags & HW_CARD_DISABLED) + set_bit(STATUS_RF_KILL_HW, &priv->status); + else + clear_bit(STATUS_RF_KILL_HW, &priv->status); + + + iwl_legacy_scan_cancel(priv); + + if ((test_bit(STATUS_RF_KILL_HW, &status) != + test_bit(STATUS_RF_KILL_HW, &priv->status))) + wiphy_rfkill_set_hw_state(priv->hw->wiphy, + test_bit(STATUS_RF_KILL_HW, &priv->status)); + else + wake_up(&priv->wait_command_queue); +} + +/** + * iwl3945_setup_rx_handlers - Initialize Rx handler callbacks + * + * Setup the RX handlers for each of the reply types sent from the uCode + * to the host. + * + * This function chains into the hardware specific files for them to setup + * any hardware specific handlers as well. + */ +static void iwl3945_setup_rx_handlers(struct iwl_priv *priv) +{ + priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive; + priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta; + priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error; + priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa; + priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] = + iwl_legacy_rx_spectrum_measure_notif; + priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif; + priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] = + iwl_legacy_rx_pm_debug_statistics_notif; + priv->rx_handlers[BEACON_NOTIFICATION] = iwl3945_rx_beacon_notif; + + /* + * The same handler is used for both the REPLY to a discrete + * statistics request from the host as well as for the periodic + * statistics notifications (after received beacons) from the uCode. + */ + priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_reply_statistics; + priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics; + + iwl_legacy_setup_rx_scan_handlers(priv); + priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif; + + /* Set up hardware specific Rx handlers */ + iwl3945_hw_rx_handler_setup(priv); +} + +/************************** RX-FUNCTIONS ****************************/ +/* + * Rx theory of operation + * + * The host allocates 32 DMA target addresses and passes the host address + * to the firmware at register IWL_RFDS_TABLE_LOWER + N * RFD_SIZE where N is + * 0 to 31 + * + * Rx Queue Indexes + * The host/firmware share two index registers for managing the Rx buffers. + * + * The READ index maps to the first position that the firmware may be writing + * to -- the driver can read up to (but not including) this position and get + * good data. + * The READ index is managed by the firmware once the card is enabled. + * + * The WRITE index maps to the last position the driver has read from -- the + * position preceding WRITE is the last slot the firmware can place a packet. + * + * The queue is empty (no good data) if WRITE = READ - 1, and is full if + * WRITE = READ. + * + * During initialization, the host sets up the READ queue position to the first + * INDEX position, and WRITE to the last (READ - 1 wrapped) + * + * When the firmware places a packet in a buffer, it will advance the READ index + * and fire the RX interrupt. The driver can then query the READ index and + * process as many packets as possible, moving the WRITE index forward as it + * resets the Rx queue buffers with new memory. + * + * The management in the driver is as follows: + * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When + * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled + * to replenish the iwl->rxq->rx_free. + * + In iwl3945_rx_replenish (scheduled) if 'processed' != 'read' then the + * iwl->rxq is replenished and the READ INDEX is updated (updating the + * 'processed' and 'read' driver indexes as well) + * + A received packet is processed and handed to the kernel network stack, + * detached from the iwl->rxq. The driver 'processed' index is updated. + * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free + * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ + * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there + * were enough free buffers and RX_STALLED is set it is cleared. + * + * + * Driver sequence: + * + * iwl3945_rx_replenish() Replenishes rx_free list from rx_used, and calls + * iwl3945_rx_queue_restock + * iwl3945_rx_queue_restock() Moves available buffers from rx_free into Rx + * queue, updates firmware pointers, and updates + * the WRITE index. If insufficient rx_free buffers + * are available, schedules iwl3945_rx_replenish + * + * -- enable interrupts -- + * ISR - iwl3945_rx() Detach iwl_rx_mem_buffers from pool up to the + * READ INDEX, detaching the SKB from the pool. + * Moves the packet buffer from queue to rx_used. + * Calls iwl3945_rx_queue_restock to refill any empty + * slots. + * ... + * + */ + +/** + * iwl3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr + */ +static inline __le32 iwl3945_dma_addr2rbd_ptr(struct iwl_priv *priv, + dma_addr_t dma_addr) +{ + return cpu_to_le32((u32)dma_addr); +} + +/** + * iwl3945_rx_queue_restock - refill RX queue from pre-allocated pool + * + * If there are slots in the RX queue that need to be restocked, + * and we have free pre-allocated buffers, fill the ranks as much + * as we can, pulling from rx_free. + * + * This moves the 'write' index forward to catch up with 'processed', and + * also updates the memory address in the firmware to reference the new + * target buffer. + */ +static void iwl3945_rx_queue_restock(struct iwl_priv *priv) +{ + struct iwl_rx_queue *rxq = &priv->rxq; + struct list_head *element; + struct iwl_rx_mem_buffer *rxb; + unsigned long flags; + int write; + + spin_lock_irqsave(&rxq->lock, flags); + write = rxq->write & ~0x7; + while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) { + /* Get next free Rx buffer, remove from free list */ + element = rxq->rx_free.next; + rxb = list_entry(element, struct iwl_rx_mem_buffer, list); + list_del(element); + + /* Point to Rx buffer via next RBD in circular buffer */ + rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->page_dma); + rxq->queue[rxq->write] = rxb; + rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; + rxq->free_count--; + } + spin_unlock_irqrestore(&rxq->lock, flags); + /* If the pre-allocated buffer pool is dropping low, schedule to + * refill it */ + if (rxq->free_count <= RX_LOW_WATERMARK) + queue_work(priv->workqueue, &priv->rx_replenish); + + + /* If we've added more space for the firmware to place data, tell it. + * Increment device's write pointer in multiples of 8. */ + if ((rxq->write_actual != (rxq->write & ~0x7)) + || (abs(rxq->write - rxq->read) > 7)) { + spin_lock_irqsave(&rxq->lock, flags); + rxq->need_update = 1; + spin_unlock_irqrestore(&rxq->lock, flags); + iwl_legacy_rx_queue_update_write_ptr(priv, rxq); + } +} + +/** + * iwl3945_rx_replenish - Move all used packet from rx_used to rx_free + * + * When moving to rx_free an SKB is allocated for the slot. + * + * Also restock the Rx queue via iwl3945_rx_queue_restock. + * This is called as a scheduled work item (except for during initialization) + */ +static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority) +{ + struct iwl_rx_queue *rxq = &priv->rxq; + struct list_head *element; + struct iwl_rx_mem_buffer *rxb; + struct page *page; + unsigned long flags; + gfp_t gfp_mask = priority; + + while (1) { + spin_lock_irqsave(&rxq->lock, flags); + + if (list_empty(&rxq->rx_used)) { + spin_unlock_irqrestore(&rxq->lock, flags); + return; + } + spin_unlock_irqrestore(&rxq->lock, flags); + + if (rxq->free_count > RX_LOW_WATERMARK) + gfp_mask |= __GFP_NOWARN; + + if (priv->hw_params.rx_page_order > 0) + gfp_mask |= __GFP_COMP; + + /* Alloc a new receive buffer */ + page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order); + if (!page) { + if (net_ratelimit()) + IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n"); + if ((rxq->free_count <= RX_LOW_WATERMARK) && + net_ratelimit()) + IWL_CRIT(priv, "Failed to allocate SKB buffer with %s. Only %u free buffers remaining.\n", + priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL", + rxq->free_count); + /* We don't reschedule replenish work here -- we will + * call the restock method and if it still needs + * more buffers it will schedule replenish */ + break; + } + + spin_lock_irqsave(&rxq->lock, flags); + if (list_empty(&rxq->rx_used)) { + spin_unlock_irqrestore(&rxq->lock, flags); + __free_pages(page, priv->hw_params.rx_page_order); + return; + } + element = rxq->rx_used.next; + rxb = list_entry(element, struct iwl_rx_mem_buffer, list); + list_del(element); + spin_unlock_irqrestore(&rxq->lock, flags); + + rxb->page = page; + /* Get physical address of RB/SKB */ + rxb->page_dma = pci_map_page(priv->pci_dev, page, 0, + PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + + spin_lock_irqsave(&rxq->lock, flags); + + list_add_tail(&rxb->list, &rxq->rx_free); + rxq->free_count++; + priv->alloc_rxb_page++; + + spin_unlock_irqrestore(&rxq->lock, flags); + } +} + +void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq) +{ + unsigned long flags; + int i; + spin_lock_irqsave(&rxq->lock, flags); + INIT_LIST_HEAD(&rxq->rx_free); + INIT_LIST_HEAD(&rxq->rx_used); + /* Fill the rx_used queue with _all_ of the Rx buffers */ + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { + /* In the reset function, these buffers may have been allocated + * to an SKB, so we need to unmap and free potential storage */ + if (rxq->pool[i].page != NULL) { + pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, + PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + __iwl_legacy_free_pages(priv, rxq->pool[i].page); + rxq->pool[i].page = NULL; + } + list_add_tail(&rxq->pool[i].list, &rxq->rx_used); + } + + /* Set us so that we have processed and used all buffers, but have + * not restocked the Rx queue with fresh buffers */ + rxq->read = rxq->write = 0; + rxq->write_actual = 0; + rxq->free_count = 0; + spin_unlock_irqrestore(&rxq->lock, flags); +} + +void iwl3945_rx_replenish(void *data) +{ + struct iwl_priv *priv = data; + unsigned long flags; + + iwl3945_rx_allocate(priv, GFP_KERNEL); + + spin_lock_irqsave(&priv->lock, flags); + iwl3945_rx_queue_restock(priv); + spin_unlock_irqrestore(&priv->lock, flags); +} + +static void iwl3945_rx_replenish_now(struct iwl_priv *priv) +{ + iwl3945_rx_allocate(priv, GFP_ATOMIC); + + iwl3945_rx_queue_restock(priv); +} + + +/* Assumes that the skb field of the buffers in 'pool' is kept accurate. + * If an SKB has been detached, the POOL needs to have its SKB set to NULL + * This free routine walks the list of POOL entries and if SKB is set to + * non NULL it is unmapped and freed + */ +static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq) +{ + int i; + for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { + if (rxq->pool[i].page != NULL) { + pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, + PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + __iwl_legacy_free_pages(priv, rxq->pool[i].page); + rxq->pool[i].page = NULL; + } + } + + dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, + rxq->bd_dma); + dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status), + rxq->rb_stts, rxq->rb_stts_dma); + rxq->bd = NULL; + rxq->rb_stts = NULL; +} + + +/* Convert linear signal-to-noise ratio into dB */ +static u8 ratio2dB[100] = { +/* 0 1 2 3 4 5 6 7 8 9 */ + 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */ + 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */ + 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */ + 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */ + 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */ + 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */ + 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */ + 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */ + 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */ + 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */ +}; + +/* Calculates a relative dB value from a ratio of linear + * (i.e. not dB) signal levels. + * Conversion assumes that levels are voltages (20*log), not powers (10*log). */ +int iwl3945_calc_db_from_ratio(int sig_ratio) +{ + /* 1000:1 or higher just report as 60 dB */ + if (sig_ratio >= 1000) + return 60; + + /* 100:1 or higher, divide by 10 and use table, + * add 20 dB to make up for divide by 10 */ + if (sig_ratio >= 100) + return 20 + (int)ratio2dB[sig_ratio/10]; + + /* We shouldn't see this */ + if (sig_ratio < 1) + return 0; + + /* Use table for ratios 1:1 - 99:1 */ + return (int)ratio2dB[sig_ratio]; +} + +/** + * iwl3945_rx_handle - Main entry function for receiving responses from uCode + * + * Uses the priv->rx_handlers callback function array to invoke + * the appropriate handlers, including command responses, + * frame-received notifications, and other notifications. + */ +static void iwl3945_rx_handle(struct iwl_priv *priv) +{ + struct iwl_rx_mem_buffer *rxb; + struct iwl_rx_packet *pkt; + struct iwl_rx_queue *rxq = &priv->rxq; + u32 r, i; + int reclaim; + unsigned long flags; + u8 fill_rx = 0; + u32 count = 8; + int total_empty = 0; + + /* uCode's read index (stored in shared DRAM) indicates the last Rx + * buffer that the driver may process (last buffer filled by ucode). */ + r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF; + i = rxq->read; + + /* calculate total frames need to be restock after handling RX */ + total_empty = r - rxq->write_actual; + if (total_empty < 0) + total_empty += RX_QUEUE_SIZE; + + if (total_empty > (RX_QUEUE_SIZE / 2)) + fill_rx = 1; + /* Rx interrupt, but nothing sent from uCode */ + if (i == r) + IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i); + + while (i != r) { + int len; + + rxb = rxq->queue[i]; + + /* If an RXB doesn't have a Rx queue slot associated with it, + * then a bug has been introduced in the queue refilling + * routines -- catch it here */ + BUG_ON(rxb == NULL); + + rxq->queue[i] = NULL; + + pci_unmap_page(priv->pci_dev, rxb->page_dma, + PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + pkt = rxb_addr(rxb); + + len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; + len += sizeof(u32); /* account for status word */ + trace_iwlwifi_legacy_dev_rx(priv, pkt, len); + + /* Reclaim a command buffer only if this packet is a response + * to a (driver-originated) command. + * If the packet (e.g. Rx frame) originated from uCode, + * there is no command buffer to reclaim. + * Ucode should set SEQ_RX_FRAME bit if ucode-originated, + * but apparently a few don't get set; catch them here. */ + reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) && + (pkt->hdr.cmd != STATISTICS_NOTIFICATION) && + (pkt->hdr.cmd != REPLY_TX); + + /* Based on type of command response or notification, + * handle those that need handling via function in + * rx_handlers table. See iwl3945_setup_rx_handlers() */ + if (priv->rx_handlers[pkt->hdr.cmd]) { + IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i, + iwl_legacy_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); + priv->isr_stats.rx_handlers[pkt->hdr.cmd]++; + priv->rx_handlers[pkt->hdr.cmd] (priv, rxb); + } else { + /* No handling needed */ + IWL_DEBUG_RX(priv, + "r %d i %d No handler needed for %s, 0x%02x\n", + r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd), + pkt->hdr.cmd); + } + + /* + * XXX: After here, we should always check rxb->page + * against NULL before touching it or its virtual + * memory (pkt). Because some rx_handler might have + * already taken or freed the pages. + */ + + if (reclaim) { + /* Invoke any callbacks, transfer the buffer to caller, + * and fire off the (possibly) blocking iwl_legacy_send_cmd() + * as we reclaim the driver command queue */ + if (rxb->page) + iwl_legacy_tx_cmd_complete(priv, rxb); + else + IWL_WARN(priv, "Claim null rxb?\n"); + } + + /* Reuse the page if possible. For notification packets and + * SKBs that fail to Rx correctly, add them back into the + * rx_free list for reuse later. */ + spin_lock_irqsave(&rxq->lock, flags); + if (rxb->page != NULL) { + rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page, + 0, PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + list_add_tail(&rxb->list, &rxq->rx_free); + rxq->free_count++; + } else + list_add_tail(&rxb->list, &rxq->rx_used); + + spin_unlock_irqrestore(&rxq->lock, flags); + + i = (i + 1) & RX_QUEUE_MASK; + /* If there are a lot of unused frames, + * restock the Rx queue so ucode won't assert. */ + if (fill_rx) { + count++; + if (count >= 8) { + rxq->read = i; + iwl3945_rx_replenish_now(priv); + count = 0; + } + } + } + + /* Backtrack one entry */ + rxq->read = i; + if (fill_rx) + iwl3945_rx_replenish_now(priv); + else + iwl3945_rx_queue_restock(priv); +} + +/* call this function to flush any scheduled tasklet */ +static inline void iwl3945_synchronize_irq(struct iwl_priv *priv) +{ + /* wait to make sure we flush pending tasklet*/ + synchronize_irq(priv->pci_dev->irq); + tasklet_kill(&priv->irq_tasklet); +} + +static const char *iwl3945_desc_lookup(int i) +{ + switch (i) { + case 1: + return "FAIL"; + case 2: + return "BAD_PARAM"; + case 3: + return "BAD_CHECKSUM"; + case 4: + return "NMI_INTERRUPT"; + case 5: + return "SYSASSERT"; + case 6: + return "FATAL_ERROR"; + } + + return "UNKNOWN"; +} + +#define ERROR_START_OFFSET (1 * sizeof(u32)) +#define ERROR_ELEM_SIZE (7 * sizeof(u32)) + +void iwl3945_dump_nic_error_log(struct iwl_priv *priv) +{ + u32 i; + u32 desc, time, count, base, data1; + u32 blink1, blink2, ilink1, ilink2; + + base = le32_to_cpu(priv->card_alive.error_event_table_ptr); + + if (!iwl3945_hw_valid_rtc_data_addr(base)) { + IWL_ERR(priv, "Not valid error log pointer 0x%08X\n", base); + return; + } + + + count = iwl_legacy_read_targ_mem(priv, base); + + if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { + IWL_ERR(priv, "Start IWL Error Log Dump:\n"); + IWL_ERR(priv, "Status: 0x%08lX, count: %d\n", + priv->status, count); + } + + IWL_ERR(priv, "Desc Time asrtPC blink2 " + "ilink1 nmiPC Line\n"); + for (i = ERROR_START_OFFSET; + i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET; + i += ERROR_ELEM_SIZE) { + desc = iwl_legacy_read_targ_mem(priv, base + i); + time = + iwl_legacy_read_targ_mem(priv, base + i + 1 * sizeof(u32)); + blink1 = + iwl_legacy_read_targ_mem(priv, base + i + 2 * sizeof(u32)); + blink2 = + iwl_legacy_read_targ_mem(priv, base + i + 3 * sizeof(u32)); + ilink1 = + iwl_legacy_read_targ_mem(priv, base + i + 4 * sizeof(u32)); + ilink2 = + iwl_legacy_read_targ_mem(priv, base + i + 5 * sizeof(u32)); + data1 = + iwl_legacy_read_targ_mem(priv, base + i + 6 * sizeof(u32)); + + IWL_ERR(priv, + "%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n", + iwl3945_desc_lookup(desc), desc, time, blink1, blink2, + ilink1, ilink2, data1); + trace_iwlwifi_legacy_dev_ucode_error(priv, desc, time, data1, 0, + 0, blink1, blink2, ilink1, ilink2); + } +} + +static void iwl3945_irq_tasklet(struct iwl_priv *priv) +{ + u32 inta, handled = 0; + u32 inta_fh; + unsigned long flags; +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + u32 inta_mask; +#endif + + spin_lock_irqsave(&priv->lock, flags); + + /* Ack/clear/reset pending uCode interrupts. + * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, + * and will clear only when CSR_FH_INT_STATUS gets cleared. */ + inta = iwl_read32(priv, CSR_INT); + iwl_write32(priv, CSR_INT, inta); + + /* Ack/clear/reset pending flow-handler (DMA) interrupts. + * Any new interrupts that happen after this, either while we're + * in this tasklet, or later, will show up in next ISR/tasklet. */ + inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); + iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh); + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) { + /* just for debug */ + inta_mask = iwl_read32(priv, CSR_INT_MASK); + IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", + inta, inta_mask, inta_fh); + } +#endif + + spin_unlock_irqrestore(&priv->lock, flags); + + /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not + * atomic, make sure that inta covers all the interrupts that + * we've discovered, even if FH interrupt came in just after + * reading CSR_INT. */ + if (inta_fh & CSR39_FH_INT_RX_MASK) + inta |= CSR_INT_BIT_FH_RX; + if (inta_fh & CSR39_FH_INT_TX_MASK) + inta |= CSR_INT_BIT_FH_TX; + + /* Now service all interrupt bits discovered above. */ + if (inta & CSR_INT_BIT_HW_ERR) { + IWL_ERR(priv, "Hardware error detected. Restarting.\n"); + + /* Tell the device to stop sending interrupts */ + iwl_legacy_disable_interrupts(priv); + + priv->isr_stats.hw++; + iwl_legacy_irq_handle_error(priv); + + handled |= CSR_INT_BIT_HW_ERR; + + return; + } + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) { + /* NIC fires this, but we don't use it, redundant with WAKEUP */ + if (inta & CSR_INT_BIT_SCD) { + IWL_DEBUG_ISR(priv, "Scheduler finished to transmit " + "the frame/frames.\n"); + priv->isr_stats.sch++; + } + + /* Alive notification via Rx interrupt will do the real work */ + if (inta & CSR_INT_BIT_ALIVE) { + IWL_DEBUG_ISR(priv, "Alive interrupt\n"); + priv->isr_stats.alive++; + } + } +#endif + /* Safely ignore these bits for debug checks below */ + inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); + + /* Error detected by uCode */ + if (inta & CSR_INT_BIT_SW_ERR) { + IWL_ERR(priv, "Microcode SW error detected. " + "Restarting 0x%X.\n", inta); + priv->isr_stats.sw++; + iwl_legacy_irq_handle_error(priv); + handled |= CSR_INT_BIT_SW_ERR; + } + + /* uCode wakes up after power-down sleep */ + if (inta & CSR_INT_BIT_WAKEUP) { + IWL_DEBUG_ISR(priv, "Wakeup interrupt\n"); + iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq); + iwl_legacy_txq_update_write_ptr(priv, &priv->txq[0]); + iwl_legacy_txq_update_write_ptr(priv, &priv->txq[1]); + iwl_legacy_txq_update_write_ptr(priv, &priv->txq[2]); + iwl_legacy_txq_update_write_ptr(priv, &priv->txq[3]); + iwl_legacy_txq_update_write_ptr(priv, &priv->txq[4]); + iwl_legacy_txq_update_write_ptr(priv, &priv->txq[5]); + + priv->isr_stats.wakeup++; + handled |= CSR_INT_BIT_WAKEUP; + } + + /* All uCode command responses, including Tx command responses, + * Rx "responses" (frame-received notification), and other + * notifications from uCode come through here*/ + if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { + iwl3945_rx_handle(priv); + priv->isr_stats.rx++; + handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); + } + + if (inta & CSR_INT_BIT_FH_TX) { + IWL_DEBUG_ISR(priv, "Tx interrupt\n"); + priv->isr_stats.tx++; + + iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6)); + iwl_legacy_write_direct32(priv, FH39_TCSR_CREDIT + (FH39_SRVC_CHNL), 0x0); + handled |= CSR_INT_BIT_FH_TX; + } + + if (inta & ~handled) { + IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled); + priv->isr_stats.unhandled++; + } + + if (inta & ~priv->inta_mask) { + IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n", + inta & ~priv->inta_mask); + IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh); + } + + /* Re-enable all interrupts */ + /* only Re-enable if disabled by irq */ + if (test_bit(STATUS_INT_ENABLED, &priv->status)) + iwl_legacy_enable_interrupts(priv); + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) { + inta = iwl_read32(priv, CSR_INT); + inta_mask = iwl_read32(priv, CSR_INT_MASK); + inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); + IWL_DEBUG_ISR(priv, "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, " + "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); + } +#endif +} + +static int iwl3945_get_channels_for_scan(struct iwl_priv *priv, + enum ieee80211_band band, + u8 is_active, u8 n_probes, + struct iwl3945_scan_channel *scan_ch, + struct ieee80211_vif *vif) +{ + struct ieee80211_channel *chan; + const struct ieee80211_supported_band *sband; + const struct iwl_channel_info *ch_info; + u16 passive_dwell = 0; + u16 active_dwell = 0; + int added, i; + + sband = iwl_get_hw_mode(priv, band); + if (!sband) + return 0; + + active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes); + passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif); + + if (passive_dwell <= active_dwell) + passive_dwell = active_dwell + 1; + + for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) { + chan = priv->scan_request->channels[i]; + + if (chan->band != band) + continue; + + scan_ch->channel = chan->hw_value; + + ch_info = iwl_legacy_get_channel_info(priv, band, + scan_ch->channel); + if (!iwl_legacy_is_channel_valid(ch_info)) { + IWL_DEBUG_SCAN(priv, + "Channel %d is INVALID for this band.\n", + scan_ch->channel); + continue; + } + + scan_ch->active_dwell = cpu_to_le16(active_dwell); + scan_ch->passive_dwell = cpu_to_le16(passive_dwell); + /* If passive , set up for auto-switch + * and use long active_dwell time. + */ + if (!is_active || iwl_legacy_is_channel_passive(ch_info) || + (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) { + scan_ch->type = 0; /* passive */ + if (IWL_UCODE_API(priv->ucode_ver) == 1) + scan_ch->active_dwell = cpu_to_le16(passive_dwell - 1); + } else { + scan_ch->type = 1; /* active */ + } + + /* Set direct probe bits. These may be used both for active + * scan channels (probes gets sent right away), + * or for passive channels (probes get se sent only after + * hearing clear Rx packet).*/ + if (IWL_UCODE_API(priv->ucode_ver) >= 2) { + if (n_probes) + scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes); + } else { + /* uCode v1 does not allow setting direct probe bits on + * passive channel. */ + if ((scan_ch->type & 1) && n_probes) + scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes); + } + + /* Set txpower levels to defaults */ + scan_ch->tpc.dsp_atten = 110; + /* scan_pwr_info->tpc.dsp_atten; */ + + /*scan_pwr_info->tpc.tx_gain; */ + if (band == IEEE80211_BAND_5GHZ) + scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3; + else { + scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3)); + /* NOTE: if we were doing 6Mb OFDM for scans we'd use + * power level: + * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3; + */ + } + + IWL_DEBUG_SCAN(priv, "Scanning %d [%s %d]\n", + scan_ch->channel, + (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE", + (scan_ch->type & 1) ? + active_dwell : passive_dwell); + + scan_ch++; + added++; + } + + IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added); + return added; +} + +static void iwl3945_init_hw_rates(struct iwl_priv *priv, + struct ieee80211_rate *rates) +{ + int i; + + for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) { + rates[i].bitrate = iwl3945_rates[i].ieee * 5; + rates[i].hw_value = i; /* Rate scaling will work on indexes */ + rates[i].hw_value_short = i; + rates[i].flags = 0; + if ((i > IWL39_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) { + /* + * If CCK != 1M then set short preamble rate flag. + */ + rates[i].flags |= (iwl3945_rates[i].plcp == 10) ? + 0 : IEEE80211_RATE_SHORT_PREAMBLE; + } + } +} + +/****************************************************************************** + * + * uCode download functions + * + ******************************************************************************/ + +static void iwl3945_dealloc_ucode_pci(struct iwl_priv *priv) +{ + iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code); + iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data); + iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup); + iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init); + iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data); + iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot); +} + +/** + * iwl3945_verify_inst_full - verify runtime uCode image in card vs. host, + * looking at all data. + */ +static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 len) +{ + u32 val; + u32 save_len = len; + int rc = 0; + u32 errcnt; + + IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len); + + iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, + IWL39_RTC_INST_LOWER_BOUND); + + errcnt = 0; + for (; len > 0; len -= sizeof(u32), image++) { + /* read data comes through single port, auto-incr addr */ + /* NOTE: Use the debugless read so we don't flood kernel log + * if IWL_DL_IO is set */ + val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); + if (val != le32_to_cpu(*image)) { + IWL_ERR(priv, "uCode INST section is invalid at " + "offset 0x%x, is 0x%x, s/b 0x%x\n", + save_len - len, val, le32_to_cpu(*image)); + rc = -EIO; + errcnt++; + if (errcnt >= 20) + break; + } + } + + + if (!errcnt) + IWL_DEBUG_INFO(priv, + "ucode image in INSTRUCTION memory is good\n"); + + return rc; +} + + +/** + * iwl3945_verify_inst_sparse - verify runtime uCode image in card vs. host, + * using sample data 100 bytes apart. If these sample points are good, + * it's a pretty good bet that everything between them is good, too. + */ +static int iwl3945_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len) +{ + u32 val; + int rc = 0; + u32 errcnt = 0; + u32 i; + + IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len); + + for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) { + /* read data comes through single port, auto-incr addr */ + /* NOTE: Use the debugless read so we don't flood kernel log + * if IWL_DL_IO is set */ + iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, + i + IWL39_RTC_INST_LOWER_BOUND); + val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); + if (val != le32_to_cpu(*image)) { +#if 0 /* Enable this if you want to see details */ + IWL_ERR(priv, "uCode INST section is invalid at " + "offset 0x%x, is 0x%x, s/b 0x%x\n", + i, val, *image); +#endif + rc = -EIO; + errcnt++; + if (errcnt >= 3) + break; + } + } + + return rc; +} + + +/** + * iwl3945_verify_ucode - determine which instruction image is in SRAM, + * and verify its contents + */ +static int iwl3945_verify_ucode(struct iwl_priv *priv) +{ + __le32 *image; + u32 len; + int rc = 0; + + /* Try bootstrap */ + image = (__le32 *)priv->ucode_boot.v_addr; + len = priv->ucode_boot.len; + rc = iwl3945_verify_inst_sparse(priv, image, len); + if (rc == 0) { + IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n"); + return 0; + } + + /* Try initialize */ + image = (__le32 *)priv->ucode_init.v_addr; + len = priv->ucode_init.len; + rc = iwl3945_verify_inst_sparse(priv, image, len); + if (rc == 0) { + IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n"); + return 0; + } + + /* Try runtime/protocol */ + image = (__le32 *)priv->ucode_code.v_addr; + len = priv->ucode_code.len; + rc = iwl3945_verify_inst_sparse(priv, image, len); + if (rc == 0) { + IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n"); + return 0; + } + + IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n"); + + /* Since nothing seems to match, show first several data entries in + * instruction SRAM, so maybe visual inspection will give a clue. + * Selection of bootstrap image (vs. other images) is arbitrary. */ + image = (__le32 *)priv->ucode_boot.v_addr; + len = priv->ucode_boot.len; + rc = iwl3945_verify_inst_full(priv, image, len); + + return rc; +} + +static void iwl3945_nic_start(struct iwl_priv *priv) +{ + /* Remove all resets to allow NIC to operate */ + iwl_write32(priv, CSR_RESET, 0); +} + +#define IWL3945_UCODE_GET(item) \ +static u32 iwl3945_ucode_get_##item(const struct iwl_ucode_header *ucode)\ +{ \ + return le32_to_cpu(ucode->v1.item); \ +} + +static u32 iwl3945_ucode_get_header_size(u32 api_ver) +{ + return 24; +} + +static u8 *iwl3945_ucode_get_data(const struct iwl_ucode_header *ucode) +{ + return (u8 *) ucode->v1.data; +} + +IWL3945_UCODE_GET(inst_size); +IWL3945_UCODE_GET(data_size); +IWL3945_UCODE_GET(init_size); +IWL3945_UCODE_GET(init_data_size); +IWL3945_UCODE_GET(boot_size); + +/** + * iwl3945_read_ucode - Read uCode images from disk file. + * + * Copy into buffers for card to fetch via bus-mastering + */ +static int iwl3945_read_ucode(struct iwl_priv *priv) +{ + const struct iwl_ucode_header *ucode; + int ret = -EINVAL, index; + const struct firmware *ucode_raw; + /* firmware file name contains uCode/driver compatibility version */ + const char *name_pre = priv->cfg->fw_name_pre; + const unsigned int api_max = priv->cfg->ucode_api_max; + const unsigned int api_min = priv->cfg->ucode_api_min; + char buf[25]; + u8 *src; + size_t len; + u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size; + + /* Ask kernel firmware_class module to get the boot firmware off disk. + * request_firmware() is synchronous, file is in memory on return. */ + for (index = api_max; index >= api_min; index--) { + sprintf(buf, "%s%u%s", name_pre, index, ".ucode"); + ret = request_firmware(&ucode_raw, buf, &priv->pci_dev->dev); + if (ret < 0) { + IWL_ERR(priv, "%s firmware file req failed: %d\n", + buf, ret); + if (ret == -ENOENT) + continue; + else + goto error; + } else { + if (index < api_max) + IWL_ERR(priv, "Loaded firmware %s, " + "which is deprecated. " + " Please use API v%u instead.\n", + buf, api_max); + IWL_DEBUG_INFO(priv, "Got firmware '%s' file " + "(%zd bytes) from disk\n", + buf, ucode_raw->size); + break; + } + } + + if (ret < 0) + goto error; + + /* Make sure that we got at least our header! */ + if (ucode_raw->size < iwl3945_ucode_get_header_size(1)) { + IWL_ERR(priv, "File size way too small!\n"); + ret = -EINVAL; + goto err_release; + } + + /* Data from ucode file: header followed by uCode images */ + ucode = (struct iwl_ucode_header *)ucode_raw->data; + + priv->ucode_ver = le32_to_cpu(ucode->ver); + api_ver = IWL_UCODE_API(priv->ucode_ver); + inst_size = iwl3945_ucode_get_inst_size(ucode); + data_size = iwl3945_ucode_get_data_size(ucode); + init_size = iwl3945_ucode_get_init_size(ucode); + init_data_size = iwl3945_ucode_get_init_data_size(ucode); + boot_size = iwl3945_ucode_get_boot_size(ucode); + src = iwl3945_ucode_get_data(ucode); + + /* api_ver should match the api version forming part of the + * firmware filename ... but we don't check for that and only rely + * on the API version read from firmware header from here on forward */ + + if (api_ver < api_min || api_ver > api_max) { + IWL_ERR(priv, "Driver unable to support your firmware API. " + "Driver supports v%u, firmware is v%u.\n", + api_max, api_ver); + priv->ucode_ver = 0; + ret = -EINVAL; + goto err_release; + } + if (api_ver != api_max) + IWL_ERR(priv, "Firmware has old API version. Expected %u, " + "got %u. New firmware can be obtained " + "from http://www.intellinuxwireless.org.\n", + api_max, api_ver); + + IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n", + IWL_UCODE_MAJOR(priv->ucode_ver), + IWL_UCODE_MINOR(priv->ucode_ver), + IWL_UCODE_API(priv->ucode_ver), + IWL_UCODE_SERIAL(priv->ucode_ver)); + + snprintf(priv->hw->wiphy->fw_version, + sizeof(priv->hw->wiphy->fw_version), + "%u.%u.%u.%u", + IWL_UCODE_MAJOR(priv->ucode_ver), + IWL_UCODE_MINOR(priv->ucode_ver), + IWL_UCODE_API(priv->ucode_ver), + IWL_UCODE_SERIAL(priv->ucode_ver)); + + IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n", + priv->ucode_ver); + IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %u\n", + inst_size); + IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %u\n", + data_size); + IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %u\n", + init_size); + IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %u\n", + init_data_size); + IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %u\n", + boot_size); + + + /* Verify size of file vs. image size info in file's header */ + if (ucode_raw->size != iwl3945_ucode_get_header_size(api_ver) + + inst_size + data_size + init_size + + init_data_size + boot_size) { + + IWL_DEBUG_INFO(priv, + "uCode file size %zd does not match expected size\n", + ucode_raw->size); + ret = -EINVAL; + goto err_release; + } + + /* Verify that uCode images will fit in card's SRAM */ + if (inst_size > IWL39_MAX_INST_SIZE) { + IWL_DEBUG_INFO(priv, "uCode instr len %d too large to fit in\n", + inst_size); + ret = -EINVAL; + goto err_release; + } + + if (data_size > IWL39_MAX_DATA_SIZE) { + IWL_DEBUG_INFO(priv, "uCode data len %d too large to fit in\n", + data_size); + ret = -EINVAL; + goto err_release; + } + if (init_size > IWL39_MAX_INST_SIZE) { + IWL_DEBUG_INFO(priv, + "uCode init instr len %d too large to fit in\n", + init_size); + ret = -EINVAL; + goto err_release; + } + if (init_data_size > IWL39_MAX_DATA_SIZE) { + IWL_DEBUG_INFO(priv, + "uCode init data len %d too large to fit in\n", + init_data_size); + ret = -EINVAL; + goto err_release; + } + if (boot_size > IWL39_MAX_BSM_SIZE) { + IWL_DEBUG_INFO(priv, + "uCode boot instr len %d too large to fit in\n", + boot_size); + ret = -EINVAL; + goto err_release; + } + + /* Allocate ucode buffers for card's bus-master loading ... */ + + /* Runtime instructions and 2 copies of data: + * 1) unmodified from disk + * 2) backup cache for save/restore during power-downs */ + priv->ucode_code.len = inst_size; + iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code); + + priv->ucode_data.len = data_size; + iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data); + + priv->ucode_data_backup.len = data_size; + iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup); + + if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr || + !priv->ucode_data_backup.v_addr) + goto err_pci_alloc; + + /* Initialization instructions and data */ + if (init_size && init_data_size) { + priv->ucode_init.len = init_size; + iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init); + + priv->ucode_init_data.len = init_data_size; + iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data); + + if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr) + goto err_pci_alloc; + } + + /* Bootstrap (instructions only, no data) */ + if (boot_size) { + priv->ucode_boot.len = boot_size; + iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot); + + if (!priv->ucode_boot.v_addr) + goto err_pci_alloc; + } + + /* Copy images into buffers for card's bus-master reads ... */ + + /* Runtime instructions (first block of data in file) */ + len = inst_size; + IWL_DEBUG_INFO(priv, + "Copying (but not loading) uCode instr len %zd\n", len); + memcpy(priv->ucode_code.v_addr, src, len); + src += len; + + IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n", + priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr); + + /* Runtime data (2nd block) + * NOTE: Copy into backup buffer will be done in iwl3945_up() */ + len = data_size; + IWL_DEBUG_INFO(priv, + "Copying (but not loading) uCode data len %zd\n", len); + memcpy(priv->ucode_data.v_addr, src, len); + memcpy(priv->ucode_data_backup.v_addr, src, len); + src += len; + + /* Initialization instructions (3rd block) */ + if (init_size) { + len = init_size; + IWL_DEBUG_INFO(priv, + "Copying (but not loading) init instr len %zd\n", len); + memcpy(priv->ucode_init.v_addr, src, len); + src += len; + } + + /* Initialization data (4th block) */ + if (init_data_size) { + len = init_data_size; + IWL_DEBUG_INFO(priv, + "Copying (but not loading) init data len %zd\n", len); + memcpy(priv->ucode_init_data.v_addr, src, len); + src += len; + } + + /* Bootstrap instructions (5th block) */ + len = boot_size; + IWL_DEBUG_INFO(priv, + "Copying (but not loading) boot instr len %zd\n", len); + memcpy(priv->ucode_boot.v_addr, src, len); + + /* We have our copies now, allow OS release its copies */ + release_firmware(ucode_raw); + return 0; + + err_pci_alloc: + IWL_ERR(priv, "failed to allocate pci memory\n"); + ret = -ENOMEM; + iwl3945_dealloc_ucode_pci(priv); + + err_release: + release_firmware(ucode_raw); + + error: + return ret; +} + + +/** + * iwl3945_set_ucode_ptrs - Set uCode address location + * + * Tell initialization uCode where to find runtime uCode. + * + * BSM registers initially contain pointers to initialization uCode. + * We need to replace them to load runtime uCode inst and data, + * and to save runtime data when powering down. + */ +static int iwl3945_set_ucode_ptrs(struct iwl_priv *priv) +{ + dma_addr_t pinst; + dma_addr_t pdata; + + /* bits 31:0 for 3945 */ + pinst = priv->ucode_code.p_addr; + pdata = priv->ucode_data_backup.p_addr; + + /* Tell bootstrap uCode where to find image to load */ + iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); + iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); + iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, + priv->ucode_data.len); + + /* Inst byte count must be last to set up, bit 31 signals uCode + * that all new ptr/size info is in place */ + iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, + priv->ucode_code.len | BSM_DRAM_INST_LOAD); + + IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n"); + + return 0; +} + +/** + * iwl3945_init_alive_start - Called after REPLY_ALIVE notification received + * + * Called after REPLY_ALIVE notification received from "initialize" uCode. + * + * Tell "initialize" uCode to go ahead and load the runtime uCode. + */ +static void iwl3945_init_alive_start(struct iwl_priv *priv) +{ + /* Check alive response for "valid" sign from uCode */ + if (priv->card_alive_init.is_valid != UCODE_VALID_OK) { + /* We had an error bringing up the hardware, so take it + * all the way back down so we can try again */ + IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n"); + goto restart; + } + + /* Bootstrap uCode has loaded initialize uCode ... verify inst image. + * This is a paranoid check, because we would not have gotten the + * "initialize" alive if code weren't properly loaded. */ + if (iwl3945_verify_ucode(priv)) { + /* Runtime instruction load was bad; + * take it all the way back down so we can try again */ + IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n"); + goto restart; + } + + /* Send pointers to protocol/runtime uCode image ... init code will + * load and launch runtime uCode, which will send us another "Alive" + * notification. */ + IWL_DEBUG_INFO(priv, "Initialization Alive received.\n"); + if (iwl3945_set_ucode_ptrs(priv)) { + /* Runtime instruction load won't happen; + * take it all the way back down so we can try again */ + IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n"); + goto restart; + } + return; + + restart: + queue_work(priv->workqueue, &priv->restart); +} + +/** + * iwl3945_alive_start - called after REPLY_ALIVE notification received + * from protocol/runtime uCode (initialization uCode's + * Alive gets handled by iwl3945_init_alive_start()). + */ +static void iwl3945_alive_start(struct iwl_priv *priv) +{ + int thermal_spin = 0; + u32 rfkill; + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + + IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); + + if (priv->card_alive.is_valid != UCODE_VALID_OK) { + /* We had an error bringing up the hardware, so take it + * all the way back down so we can try again */ + IWL_DEBUG_INFO(priv, "Alive failed.\n"); + goto restart; + } + + /* Initialize uCode has loaded Runtime uCode ... verify inst image. + * This is a paranoid check, because we would not have gotten the + * "runtime" alive if code weren't properly loaded. */ + if (iwl3945_verify_ucode(priv)) { + /* Runtime instruction load was bad; + * take it all the way back down so we can try again */ + IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n"); + goto restart; + } + + rfkill = iwl_legacy_read_prph(priv, APMG_RFKILL_REG); + IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill); + + if (rfkill & 0x1) { + clear_bit(STATUS_RF_KILL_HW, &priv->status); + /* if RFKILL is not on, then wait for thermal + * sensor in adapter to kick in */ + while (iwl3945_hw_get_temperature(priv) == 0) { + thermal_spin++; + udelay(10); + } + + if (thermal_spin) + IWL_DEBUG_INFO(priv, "Thermal calibration took %dus\n", + thermal_spin * 10); + } else + set_bit(STATUS_RF_KILL_HW, &priv->status); + + /* After the ALIVE response, we can send commands to 3945 uCode */ + set_bit(STATUS_ALIVE, &priv->status); + + /* Enable watchdog to monitor the driver tx queues */ + iwl_legacy_setup_watchdog(priv); + + if (iwl_legacy_is_rfkill(priv)) + return; + + ieee80211_wake_queues(priv->hw); + + priv->active_rate = IWL_RATES_MASK_3945; + + iwl_legacy_power_update_mode(priv, true); + + if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) { + struct iwl3945_rxon_cmd *active_rxon = + (struct iwl3945_rxon_cmd *)(&ctx->active); + + ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; + active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; + } else { + /* Initialize our rx_config data */ + iwl_legacy_connection_init_rx_config(priv, ctx); + } + + /* Configure Bluetooth device coexistence support */ + iwl_legacy_send_bt_config(priv); + + set_bit(STATUS_READY, &priv->status); + + /* Configure the adapter for unassociated operation */ + iwl3945_commit_rxon(priv, ctx); + + iwl3945_reg_txpower_periodic(priv); + + IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); + wake_up(&priv->wait_command_queue); + + return; + + restart: + queue_work(priv->workqueue, &priv->restart); +} + +static void iwl3945_cancel_deferred_work(struct iwl_priv *priv); + +static void __iwl3945_down(struct iwl_priv *priv) +{ + unsigned long flags; + int exit_pending; + + IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n"); + + iwl_legacy_scan_cancel_timeout(priv, 200); + + exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status); + + /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set + * to prevent rearm timer */ + del_timer_sync(&priv->watchdog); + + /* Station information will now be cleared in device */ + iwl_legacy_clear_ucode_stations(priv, NULL); + iwl_legacy_dealloc_bcast_stations(priv); + iwl_legacy_clear_driver_stations(priv); + + /* Unblock any waiting calls */ + wake_up_all(&priv->wait_command_queue); + + /* Wipe out the EXIT_PENDING status bit if we are not actually + * exiting the module */ + if (!exit_pending) + clear_bit(STATUS_EXIT_PENDING, &priv->status); + + /* stop and reset the on-board processor */ + iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); + + /* tell the device to stop sending interrupts */ + spin_lock_irqsave(&priv->lock, flags); + iwl_legacy_disable_interrupts(priv); + spin_unlock_irqrestore(&priv->lock, flags); + iwl3945_synchronize_irq(priv); + + if (priv->mac80211_registered) + ieee80211_stop_queues(priv->hw); + + /* If we have not previously called iwl3945_init() then + * clear all bits but the RF Kill bits and return */ + if (!iwl_legacy_is_init(priv)) { + priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) << + STATUS_RF_KILL_HW | + test_bit(STATUS_GEO_CONFIGURED, &priv->status) << + STATUS_GEO_CONFIGURED | + test_bit(STATUS_EXIT_PENDING, &priv->status) << + STATUS_EXIT_PENDING; + goto exit; + } + + /* ...otherwise clear out all the status bits but the RF Kill + * bit and continue taking the NIC down. */ + priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) << + STATUS_RF_KILL_HW | + test_bit(STATUS_GEO_CONFIGURED, &priv->status) << + STATUS_GEO_CONFIGURED | + test_bit(STATUS_FW_ERROR, &priv->status) << + STATUS_FW_ERROR | + test_bit(STATUS_EXIT_PENDING, &priv->status) << + STATUS_EXIT_PENDING; + + iwl3945_hw_txq_ctx_stop(priv); + iwl3945_hw_rxq_stop(priv); + + /* Power-down device's busmaster DMA clocks */ + iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); + udelay(5); + + /* Stop the device, and put it in low power state */ + iwl_legacy_apm_stop(priv); + + exit: + memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp)); + + if (priv->beacon_skb) + dev_kfree_skb(priv->beacon_skb); + priv->beacon_skb = NULL; + + /* clear out any free frames */ + iwl3945_clear_free_frames(priv); +} + +static void iwl3945_down(struct iwl_priv *priv) +{ + mutex_lock(&priv->mutex); + __iwl3945_down(priv); + mutex_unlock(&priv->mutex); + + iwl3945_cancel_deferred_work(priv); +} + +#define MAX_HW_RESTARTS 5 + +static int iwl3945_alloc_bcast_station(struct iwl_priv *priv) +{ + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + unsigned long flags; + u8 sta_id; + + spin_lock_irqsave(&priv->sta_lock, flags); + sta_id = iwl_legacy_prep_station(priv, ctx, + iwlegacy_bcast_addr, false, NULL); + if (sta_id == IWL_INVALID_STATION) { + IWL_ERR(priv, "Unable to prepare broadcast station\n"); + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return -EINVAL; + } + + priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE; + priv->stations[sta_id].used |= IWL_STA_BCAST; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return 0; +} + +static int __iwl3945_up(struct iwl_priv *priv) +{ + int rc, i; + + rc = iwl3945_alloc_bcast_station(priv); + if (rc) + return rc; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { + IWL_WARN(priv, "Exit pending; will not bring the NIC up\n"); + return -EIO; + } + + if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) { + IWL_ERR(priv, "ucode not available for device bring up\n"); + return -EIO; + } + + /* If platform's RF_KILL switch is NOT set to KILL */ + if (iwl_read32(priv, CSR_GP_CNTRL) & + CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) + clear_bit(STATUS_RF_KILL_HW, &priv->status); + else { + set_bit(STATUS_RF_KILL_HW, &priv->status); + IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n"); + return -ENODEV; + } + + iwl_write32(priv, CSR_INT, 0xFFFFFFFF); + + rc = iwl3945_hw_nic_init(priv); + if (rc) { + IWL_ERR(priv, "Unable to int nic\n"); + return rc; + } + + /* make sure rfkill handshake bits are cleared */ + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + + /* clear (again), then enable host interrupts */ + iwl_write32(priv, CSR_INT, 0xFFFFFFFF); + iwl_legacy_enable_interrupts(priv); + + /* really make sure rfkill handshake bits are cleared */ + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + + /* Copy original ucode data image from disk into backup cache. + * This will be used to initialize the on-board processor's + * data SRAM for a clean start when the runtime program first loads. */ + memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr, + priv->ucode_data.len); + + /* We return success when we resume from suspend and rf_kill is on. */ + if (test_bit(STATUS_RF_KILL_HW, &priv->status)) + return 0; + + for (i = 0; i < MAX_HW_RESTARTS; i++) { + + /* load bootstrap state machine, + * load bootstrap program into processor's memory, + * prepare to load the "initialize" uCode */ + rc = priv->cfg->ops->lib->load_ucode(priv); + + if (rc) { + IWL_ERR(priv, + "Unable to set up bootstrap uCode: %d\n", rc); + continue; + } + + /* start card; "initialize" will load runtime ucode */ + iwl3945_nic_start(priv); + + IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n"); + + return 0; + } + + set_bit(STATUS_EXIT_PENDING, &priv->status); + __iwl3945_down(priv); + clear_bit(STATUS_EXIT_PENDING, &priv->status); + + /* tried to restart and config the device for as long as our + * patience could withstand */ + IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i); + return -EIO; +} + + +/***************************************************************************** + * + * Workqueue callbacks + * + *****************************************************************************/ + +static void iwl3945_bg_init_alive_start(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, init_alive_start.work); + + mutex_lock(&priv->mutex); + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + goto out; + + iwl3945_init_alive_start(priv); +out: + mutex_unlock(&priv->mutex); +} + +static void iwl3945_bg_alive_start(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, alive_start.work); + + mutex_lock(&priv->mutex); + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + goto out; + + iwl3945_alive_start(priv); +out: + mutex_unlock(&priv->mutex); +} + +/* + * 3945 cannot interrupt driver when hardware rf kill switch toggles; + * driver must poll CSR_GP_CNTRL_REG register for change. This register + * *is* readable even when device has been SW_RESET into low power mode + * (e.g. during RF KILL). + */ +static void iwl3945_rfkill_poll(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, _3945.rfkill_poll.work); + bool old_rfkill = test_bit(STATUS_RF_KILL_HW, &priv->status); + bool new_rfkill = !(iwl_read32(priv, CSR_GP_CNTRL) + & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); + + if (new_rfkill != old_rfkill) { + if (new_rfkill) + set_bit(STATUS_RF_KILL_HW, &priv->status); + else + clear_bit(STATUS_RF_KILL_HW, &priv->status); + + wiphy_rfkill_set_hw_state(priv->hw->wiphy, new_rfkill); + + IWL_DEBUG_RF_KILL(priv, "RF_KILL bit toggled to %s.\n", + new_rfkill ? "disable radio" : "enable radio"); + } + + /* Keep this running, even if radio now enabled. This will be + * cancelled in mac_start() if system decides to start again */ + queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll, + round_jiffies_relative(2 * HZ)); + +} + +int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) +{ + struct iwl_host_cmd cmd = { + .id = REPLY_SCAN_CMD, + .len = sizeof(struct iwl3945_scan_cmd), + .flags = CMD_SIZE_HUGE, + }; + struct iwl3945_scan_cmd *scan; + u8 n_probes = 0; + enum ieee80211_band band; + bool is_active = false; + int ret; + u16 len; + + lockdep_assert_held(&priv->mutex); + + if (!priv->scan_cmd) { + priv->scan_cmd = kmalloc(sizeof(struct iwl3945_scan_cmd) + + IWL_MAX_SCAN_SIZE, GFP_KERNEL); + if (!priv->scan_cmd) { + IWL_DEBUG_SCAN(priv, "Fail to allocate scan memory\n"); + return -ENOMEM; + } + } + scan = priv->scan_cmd; + memset(scan, 0, sizeof(struct iwl3945_scan_cmd) + IWL_MAX_SCAN_SIZE); + + scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; + scan->quiet_time = IWL_ACTIVE_QUIET_TIME; + + if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) { + u16 interval; + u32 extra; + u32 suspend_time = 100; + u32 scan_suspend_time = 100; + + IWL_DEBUG_INFO(priv, "Scanning while associated...\n"); + + interval = vif->bss_conf.beacon_int; + + scan->suspend_time = 0; + scan->max_out_time = cpu_to_le32(200 * 1024); + if (!interval) + interval = suspend_time; + /* + * suspend time format: + * 0-19: beacon interval in usec (time before exec.) + * 20-23: 0 + * 24-31: number of beacons (suspend between channels) + */ + + extra = (suspend_time / interval) << 24; + scan_suspend_time = 0xFF0FFFFF & + (extra | ((suspend_time % interval) * 1024)); + + scan->suspend_time = cpu_to_le32(scan_suspend_time); + IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n", + scan_suspend_time, interval); + } + + if (priv->scan_request->n_ssids) { + int i, p = 0; + IWL_DEBUG_SCAN(priv, "Kicking off active scan\n"); + for (i = 0; i < priv->scan_request->n_ssids; i++) { + /* always does wildcard anyway */ + if (!priv->scan_request->ssids[i].ssid_len) + continue; + scan->direct_scan[p].id = WLAN_EID_SSID; + scan->direct_scan[p].len = + priv->scan_request->ssids[i].ssid_len; + memcpy(scan->direct_scan[p].ssid, + priv->scan_request->ssids[i].ssid, + priv->scan_request->ssids[i].ssid_len); + n_probes++; + p++; + } + is_active = true; + } else + IWL_DEBUG_SCAN(priv, "Kicking off passive scan.\n"); + + /* We don't build a direct scan probe request; the uCode will do + * that based on the direct_mask added to each channel entry */ + scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; + scan->tx_cmd.sta_id = priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id; + scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + + /* flags + rate selection */ + + switch (priv->scan_band) { + case IEEE80211_BAND_2GHZ: + scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; + scan->tx_cmd.rate = IWL_RATE_1M_PLCP; + band = IEEE80211_BAND_2GHZ; + break; + case IEEE80211_BAND_5GHZ: + scan->tx_cmd.rate = IWL_RATE_6M_PLCP; + band = IEEE80211_BAND_5GHZ; + break; + default: + IWL_WARN(priv, "Invalid scan band\n"); + return -EIO; + } + + /* + * If active scaning is requested but a certain channel + * is marked passive, we can do active scanning if we + * detect transmissions. + */ + scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT : + IWL_GOOD_CRC_TH_DISABLED; + + len = iwl_legacy_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data, + vif->addr, priv->scan_request->ie, + priv->scan_request->ie_len, + IWL_MAX_SCAN_SIZE - sizeof(*scan)); + scan->tx_cmd.len = cpu_to_le16(len); + + /* select Rx antennas */ + scan->flags |= iwl3945_get_antenna_flags(priv); + + scan->channel_count = iwl3945_get_channels_for_scan(priv, band, is_active, n_probes, + (void *)&scan->data[len], vif); + if (scan->channel_count == 0) { + IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count); + return -EIO; + } + + cmd.len += le16_to_cpu(scan->tx_cmd.len) + + scan->channel_count * sizeof(struct iwl3945_scan_channel); + cmd.data = scan; + scan->len = cpu_to_le16(cmd.len); + + set_bit(STATUS_SCAN_HW, &priv->status); + ret = iwl_legacy_send_cmd_sync(priv, &cmd); + if (ret) + clear_bit(STATUS_SCAN_HW, &priv->status); + return ret; +} + +void iwl3945_post_scan(struct iwl_priv *priv) +{ + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + + /* + * Since setting the RXON may have been deferred while + * performing the scan, fire one off if needed + */ + if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging))) + iwl3945_commit_rxon(priv, ctx); +} + +static void iwl3945_bg_restart(struct work_struct *data) +{ + struct iwl_priv *priv = container_of(data, struct iwl_priv, restart); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) { + struct iwl_rxon_context *ctx; + mutex_lock(&priv->mutex); + for_each_context(priv, ctx) + ctx->vif = NULL; + priv->is_open = 0; + mutex_unlock(&priv->mutex); + iwl3945_down(priv); + ieee80211_restart_hw(priv->hw); + } else { + iwl3945_down(priv); + + mutex_lock(&priv->mutex); + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { + mutex_unlock(&priv->mutex); + return; + } + + __iwl3945_up(priv); + mutex_unlock(&priv->mutex); + } +} + +static void iwl3945_bg_rx_replenish(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, rx_replenish); + + mutex_lock(&priv->mutex); + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + goto out; + + iwl3945_rx_replenish(priv); +out: + mutex_unlock(&priv->mutex); +} + +void iwl3945_post_associate(struct iwl_priv *priv) +{ + int rc = 0; + struct ieee80211_conf *conf = NULL; + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + + if (!ctx->vif || !priv->is_open) + return; + + IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n", + ctx->vif->bss_conf.aid, ctx->active.bssid_addr); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + iwl_legacy_scan_cancel_timeout(priv, 200); + + conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw); + + ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + iwl3945_commit_rxon(priv, ctx); + + rc = iwl_legacy_send_rxon_timing(priv, ctx); + if (rc) + IWL_WARN(priv, "REPLY_RXON_TIMING failed - " + "Attempting to continue.\n"); + + ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; + + ctx->staging.assoc_id = cpu_to_le16(ctx->vif->bss_conf.aid); + + IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n", + ctx->vif->bss_conf.aid, ctx->vif->bss_conf.beacon_int); + + if (ctx->vif->bss_conf.use_short_preamble) + ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; + else + ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; + + if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) { + if (ctx->vif->bss_conf.use_short_slot) + ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; + else + ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; + } + + iwl3945_commit_rxon(priv, ctx); + + switch (ctx->vif->type) { + case NL80211_IFTYPE_STATION: + iwl3945_rate_scale_init(priv->hw, IWL_AP_ID); + break; + case NL80211_IFTYPE_ADHOC: + iwl3945_send_beacon_cmd(priv); + break; + default: + IWL_ERR(priv, "%s Should not be called in %d mode\n", + __func__, ctx->vif->type); + break; + } +} + +/***************************************************************************** + * + * mac80211 entry point functions + * + *****************************************************************************/ + +#define UCODE_READY_TIMEOUT (2 * HZ) + +static int iwl3945_mac_start(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + int ret; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + /* we should be verifying the device is ready to be opened */ + mutex_lock(&priv->mutex); + + /* fetch ucode file from disk, alloc and copy to bus-master buffers ... + * ucode filename and max sizes are card-specific. */ + + if (!priv->ucode_code.len) { + ret = iwl3945_read_ucode(priv); + if (ret) { + IWL_ERR(priv, "Could not read microcode: %d\n", ret); + mutex_unlock(&priv->mutex); + goto out_release_irq; + } + } + + ret = __iwl3945_up(priv); + + mutex_unlock(&priv->mutex); + + if (ret) + goto out_release_irq; + + IWL_DEBUG_INFO(priv, "Start UP work.\n"); + + /* Wait for START_ALIVE from ucode. Otherwise callbacks from + * mac80211 will not be run successfully. */ + ret = wait_event_timeout(priv->wait_command_queue, + test_bit(STATUS_READY, &priv->status), + UCODE_READY_TIMEOUT); + if (!ret) { + if (!test_bit(STATUS_READY, &priv->status)) { + IWL_ERR(priv, + "Wait for START_ALIVE timeout after %dms.\n", + jiffies_to_msecs(UCODE_READY_TIMEOUT)); + ret = -ETIMEDOUT; + goto out_release_irq; + } + } + + /* ucode is running and will send rfkill notifications, + * no need to poll the killswitch state anymore */ + cancel_delayed_work(&priv->_3945.rfkill_poll); + + priv->is_open = 1; + IWL_DEBUG_MAC80211(priv, "leave\n"); + return 0; + +out_release_irq: + priv->is_open = 0; + IWL_DEBUG_MAC80211(priv, "leave - failed\n"); + return ret; +} + +static void iwl3945_mac_stop(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (!priv->is_open) { + IWL_DEBUG_MAC80211(priv, "leave - skip\n"); + return; + } + + priv->is_open = 0; + + iwl3945_down(priv); + + flush_workqueue(priv->workqueue); + + /* start polling the killswitch state again */ + queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll, + round_jiffies_relative(2 * HZ)); + + IWL_DEBUG_MAC80211(priv, "leave\n"); +} + +static void iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, + ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); + + if (iwl3945_tx_skb(priv, skb)) + dev_kfree_skb_any(skb); + + IWL_DEBUG_MAC80211(priv, "leave\n"); +} + +void iwl3945_config_ap(struct iwl_priv *priv) +{ + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + struct ieee80211_vif *vif = ctx->vif; + int rc = 0; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + /* The following should be done only at AP bring up */ + if (!(iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))) { + + /* RXON - unassoc (to set timing command) */ + ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + iwl3945_commit_rxon(priv, ctx); + + /* RXON Timing */ + rc = iwl_legacy_send_rxon_timing(priv, ctx); + if (rc) + IWL_WARN(priv, "REPLY_RXON_TIMING failed - " + "Attempting to continue.\n"); + + ctx->staging.assoc_id = 0; + + if (vif->bss_conf.use_short_preamble) + ctx->staging.flags |= + RXON_FLG_SHORT_PREAMBLE_MSK; + else + ctx->staging.flags &= + ~RXON_FLG_SHORT_PREAMBLE_MSK; + + if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) { + if (vif->bss_conf.use_short_slot) + ctx->staging.flags |= + RXON_FLG_SHORT_SLOT_MSK; + else + ctx->staging.flags &= + ~RXON_FLG_SHORT_SLOT_MSK; + } + /* restore RXON assoc */ + ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; + iwl3945_commit_rxon(priv, ctx); + } + iwl3945_send_beacon_cmd(priv); +} + +static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct iwl_priv *priv = hw->priv; + int ret = 0; + u8 sta_id = IWL_INVALID_STATION; + u8 static_key; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (iwl3945_mod_params.sw_crypto) { + IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n"); + return -EOPNOTSUPP; + } + + /* + * To support IBSS RSN, don't program group keys in IBSS, the + * hardware will then not attempt to decrypt the frames. + */ + if (vif->type == NL80211_IFTYPE_ADHOC && + !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) + return -EOPNOTSUPP; + + static_key = !iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS); + + if (!static_key) { + sta_id = iwl_legacy_sta_id_or_broadcast( + priv, &priv->contexts[IWL_RXON_CTX_BSS], sta); + if (sta_id == IWL_INVALID_STATION) + return -EINVAL; + } + + mutex_lock(&priv->mutex); + iwl_legacy_scan_cancel_timeout(priv, 100); + + switch (cmd) { + case SET_KEY: + if (static_key) + ret = iwl3945_set_static_key(priv, key); + else + ret = iwl3945_set_dynamic_key(priv, key, sta_id); + IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n"); + break; + case DISABLE_KEY: + if (static_key) + ret = iwl3945_remove_static_key(priv); + else + ret = iwl3945_clear_sta_key_info(priv, sta_id); + IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n"); + break; + default: + ret = -EINVAL; + } + + mutex_unlock(&priv->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); + + return ret; +} + +static int iwl3945_mac_sta_add(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_priv *priv = hw->priv; + struct iwl3945_sta_priv *sta_priv = (void *)sta->drv_priv; + int ret; + bool is_ap = vif->type == NL80211_IFTYPE_STATION; + u8 sta_id; + + IWL_DEBUG_INFO(priv, "received request to add station %pM\n", + sta->addr); + mutex_lock(&priv->mutex); + IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n", + sta->addr); + sta_priv->common.sta_id = IWL_INVALID_STATION; + + + ret = iwl_legacy_add_station_common(priv, + &priv->contexts[IWL_RXON_CTX_BSS], + sta->addr, is_ap, sta, &sta_id); + if (ret) { + IWL_ERR(priv, "Unable to add station %pM (%d)\n", + sta->addr, ret); + /* Should we return success if return code is EEXIST ? */ + mutex_unlock(&priv->mutex); + return ret; + } + + sta_priv->common.sta_id = sta_id; + + /* Initialize rate scaling */ + IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n", + sta->addr); + iwl3945_rs_rate_init(priv, sta, sta_id); + mutex_unlock(&priv->mutex); + + return 0; +} + +static void iwl3945_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast) +{ + struct iwl_priv *priv = hw->priv; + __le32 filter_or = 0, filter_nand = 0; + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + +#define CHK(test, flag) do { \ + if (*total_flags & (test)) \ + filter_or |= (flag); \ + else \ + filter_nand |= (flag); \ + } while (0) + + IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n", + changed_flags, *total_flags); + + CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK); + CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK); + CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); + +#undef CHK + + mutex_lock(&priv->mutex); + + ctx->staging.filter_flags &= ~filter_nand; + ctx->staging.filter_flags |= filter_or; + + /* + * Not committing directly because hardware can perform a scan, + * but even if hw is ready, committing here breaks for some reason, + * we'll eventually commit the filter flags change anyway. + */ + + mutex_unlock(&priv->mutex); + + /* + * Receiving all multicast frames is always enabled by the + * default flags setup in iwl_legacy_connection_init_rx_config() + * since we currently do not support programming multicast + * filters into the device. + */ + *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS | + FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; +} + + +/***************************************************************************** + * + * sysfs attributes + * + *****************************************************************************/ + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + +/* + * The following adds a new attribute to the sysfs representation + * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/) + * used for controlling the debug level. + * + * See the level definitions in iwl for details. + * + * The debug_level being managed using sysfs below is a per device debug + * level that is used instead of the global debug level if it (the per + * device debug level) is set. + */ +static ssize_t iwl3945_show_debug_level(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv)); +} +static ssize_t iwl3945_store_debug_level(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + unsigned long val; + int ret; + + ret = strict_strtoul(buf, 0, &val); + if (ret) + IWL_INFO(priv, "%s is not in hex or decimal form.\n", buf); + else { + priv->debug_level = val; + if (iwl_legacy_alloc_traffic_mem(priv)) + IWL_ERR(priv, + "Not enough memory to generate traffic log\n"); + } + return strnlen(buf, count); +} + +static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, + iwl3945_show_debug_level, iwl3945_store_debug_level); + +#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */ + +static ssize_t iwl3945_show_temperature(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + + if (!iwl_legacy_is_alive(priv)) + return -EAGAIN; + + return sprintf(buf, "%d\n", iwl3945_hw_get_temperature(priv)); +} + +static DEVICE_ATTR(temperature, S_IRUGO, iwl3945_show_temperature, NULL); + +static ssize_t iwl3945_show_tx_power(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + return sprintf(buf, "%d\n", priv->tx_power_user_lmt); +} + +static ssize_t iwl3945_store_tx_power(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + char *p = (char *)buf; + u32 val; + + val = simple_strtoul(p, &p, 10); + if (p == buf) + IWL_INFO(priv, ": %s is not in decimal form.\n", buf); + else + iwl3945_hw_reg_set_txpower(priv, val); + + return count; +} + +static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, iwl3945_show_tx_power, iwl3945_store_tx_power); + +static ssize_t iwl3945_show_flags(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + + return sprintf(buf, "0x%04X\n", ctx->active.flags); +} + +static ssize_t iwl3945_store_flags(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + u32 flags = simple_strtoul(buf, NULL, 0); + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + + mutex_lock(&priv->mutex); + if (le32_to_cpu(ctx->staging.flags) != flags) { + /* Cancel any currently running scans... */ + if (iwl_legacy_scan_cancel_timeout(priv, 100)) + IWL_WARN(priv, "Could not cancel scan.\n"); + else { + IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n", + flags); + ctx->staging.flags = cpu_to_le32(flags); + iwl3945_commit_rxon(priv, ctx); + } + } + mutex_unlock(&priv->mutex); + + return count; +} + +static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, iwl3945_show_flags, iwl3945_store_flags); + +static ssize_t iwl3945_show_filter_flags(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + + return sprintf(buf, "0x%04X\n", + le32_to_cpu(ctx->active.filter_flags)); +} + +static ssize_t iwl3945_store_filter_flags(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + u32 filter_flags = simple_strtoul(buf, NULL, 0); + + mutex_lock(&priv->mutex); + if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) { + /* Cancel any currently running scans... */ + if (iwl_legacy_scan_cancel_timeout(priv, 100)) + IWL_WARN(priv, "Could not cancel scan.\n"); + else { + IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = " + "0x%04X\n", filter_flags); + ctx->staging.filter_flags = + cpu_to_le32(filter_flags); + iwl3945_commit_rxon(priv, ctx); + } + } + mutex_unlock(&priv->mutex); + + return count; +} + +static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, iwl3945_show_filter_flags, + iwl3945_store_filter_flags); + +static ssize_t iwl3945_show_measurement(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + struct iwl_spectrum_notification measure_report; + u32 size = sizeof(measure_report), len = 0, ofs = 0; + u8 *data = (u8 *)&measure_report; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + if (!(priv->measurement_status & MEASUREMENT_READY)) { + spin_unlock_irqrestore(&priv->lock, flags); + return 0; + } + memcpy(&measure_report, &priv->measure_report, size); + priv->measurement_status = 0; + spin_unlock_irqrestore(&priv->lock, flags); + + while (size && (PAGE_SIZE - len)) { + hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len, + PAGE_SIZE - len, 1); + len = strlen(buf); + if (PAGE_SIZE - len) + buf[len++] = '\n'; + + ofs += 16; + size -= min(size, 16U); + } + + return len; +} + +static ssize_t iwl3945_store_measurement(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + struct ieee80211_measurement_params params = { + .channel = le16_to_cpu(ctx->active.channel), + .start_time = cpu_to_le64(priv->_3945.last_tsf), + .duration = cpu_to_le16(1), + }; + u8 type = IWL_MEASURE_BASIC; + u8 buffer[32]; + u8 channel; + + if (count) { + char *p = buffer; + strncpy(buffer, buf, min(sizeof(buffer), count)); + channel = simple_strtoul(p, NULL, 0); + if (channel) + params.channel = channel; + + p = buffer; + while (*p && *p != ' ') + p++; + if (*p) + type = simple_strtoul(p + 1, NULL, 0); + } + + IWL_DEBUG_INFO(priv, "Invoking measurement of type %d on " + "channel %d (for '%s')\n", type, params.channel, buf); + iwl3945_get_measurement(priv, ¶ms, type); + + return count; +} + +static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR, + iwl3945_show_measurement, iwl3945_store_measurement); + +static ssize_t iwl3945_store_retry_rate(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + + priv->retry_rate = simple_strtoul(buf, NULL, 0); + if (priv->retry_rate <= 0) + priv->retry_rate = 1; + + return count; +} + +static ssize_t iwl3945_show_retry_rate(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + return sprintf(buf, "%d", priv->retry_rate); +} + +static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, iwl3945_show_retry_rate, + iwl3945_store_retry_rate); + + +static ssize_t iwl3945_show_channels(struct device *d, + struct device_attribute *attr, char *buf) +{ + /* all this shit doesn't belong into sysfs anyway */ + return 0; +} + +static DEVICE_ATTR(channels, S_IRUSR, iwl3945_show_channels, NULL); + +static ssize_t iwl3945_show_antenna(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + + if (!iwl_legacy_is_alive(priv)) + return -EAGAIN; + + return sprintf(buf, "%d\n", iwl3945_mod_params.antenna); +} + +static ssize_t iwl3945_store_antenna(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv __maybe_unused = dev_get_drvdata(d); + int ant; + + if (count == 0) + return 0; + + if (sscanf(buf, "%1i", &ant) != 1) { + IWL_DEBUG_INFO(priv, "not in hex or decimal form.\n"); + return count; + } + + if ((ant >= 0) && (ant <= 2)) { + IWL_DEBUG_INFO(priv, "Setting antenna select to %d.\n", ant); + iwl3945_mod_params.antenna = (enum iwl3945_antenna)ant; + } else + IWL_DEBUG_INFO(priv, "Bad antenna select value %d.\n", ant); + + + return count; +} + +static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, iwl3945_show_antenna, iwl3945_store_antenna); + +static ssize_t iwl3945_show_status(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + if (!iwl_legacy_is_alive(priv)) + return -EAGAIN; + return sprintf(buf, "0x%08x\n", (int)priv->status); +} + +static DEVICE_ATTR(status, S_IRUGO, iwl3945_show_status, NULL); + +static ssize_t iwl3945_dump_error_log(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + char *p = (char *)buf; + + if (p[0] == '1') + iwl3945_dump_nic_error_log(priv); + + return strnlen(buf, count); +} + +static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, iwl3945_dump_error_log); + +/***************************************************************************** + * + * driver setup and tear down + * + *****************************************************************************/ + +static void iwl3945_setup_deferred_work(struct iwl_priv *priv) +{ + priv->workqueue = create_singlethread_workqueue(DRV_NAME); + + init_waitqueue_head(&priv->wait_command_queue); + + INIT_WORK(&priv->restart, iwl3945_bg_restart); + INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish); + INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start); + INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start); + INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll); + + iwl_legacy_setup_scan_deferred_work(priv); + + iwl3945_hw_setup_deferred_work(priv); + + init_timer(&priv->watchdog); + priv->watchdog.data = (unsigned long)priv; + priv->watchdog.function = iwl_legacy_bg_watchdog; + + tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) + iwl3945_irq_tasklet, (unsigned long)priv); +} + +static void iwl3945_cancel_deferred_work(struct iwl_priv *priv) +{ + iwl3945_hw_cancel_deferred_work(priv); + + cancel_delayed_work_sync(&priv->init_alive_start); + cancel_delayed_work(&priv->alive_start); + + iwl_legacy_cancel_scan_deferred_work(priv); +} + +static struct attribute *iwl3945_sysfs_entries[] = { + &dev_attr_antenna.attr, + &dev_attr_channels.attr, + &dev_attr_dump_errors.attr, + &dev_attr_flags.attr, + &dev_attr_filter_flags.attr, + &dev_attr_measurement.attr, + &dev_attr_retry_rate.attr, + &dev_attr_status.attr, + &dev_attr_temperature.attr, + &dev_attr_tx_power.attr, +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + &dev_attr_debug_level.attr, +#endif + NULL +}; + +static struct attribute_group iwl3945_attribute_group = { + .name = NULL, /* put in device directory */ + .attrs = iwl3945_sysfs_entries, +}; + +struct ieee80211_ops iwl3945_hw_ops = { + .tx = iwl3945_mac_tx, + .start = iwl3945_mac_start, + .stop = iwl3945_mac_stop, + .add_interface = iwl_legacy_mac_add_interface, + .remove_interface = iwl_legacy_mac_remove_interface, + .change_interface = iwl_legacy_mac_change_interface, + .config = iwl_legacy_mac_config, + .configure_filter = iwl3945_configure_filter, + .set_key = iwl3945_mac_set_key, + .conf_tx = iwl_legacy_mac_conf_tx, + .reset_tsf = iwl_legacy_mac_reset_tsf, + .bss_info_changed = iwl_legacy_mac_bss_info_changed, + .hw_scan = iwl_legacy_mac_hw_scan, + .sta_add = iwl3945_mac_sta_add, + .sta_remove = iwl_legacy_mac_sta_remove, + .tx_last_beacon = iwl_legacy_mac_tx_last_beacon, +}; + +static int iwl3945_init_drv(struct iwl_priv *priv) +{ + int ret; + struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; + + priv->retry_rate = 1; + priv->beacon_skb = NULL; + + spin_lock_init(&priv->sta_lock); + spin_lock_init(&priv->hcmd_lock); + + INIT_LIST_HEAD(&priv->free_frames); + + mutex_init(&priv->mutex); + + priv->ieee_channels = NULL; + priv->ieee_rates = NULL; + priv->band = IEEE80211_BAND_2GHZ; + + priv->iw_mode = NL80211_IFTYPE_STATION; + priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF; + + /* initialize force reset */ + priv->force_reset.reset_duration = IWL_DELAY_NEXT_FORCE_FW_RELOAD; + + if (eeprom->version < EEPROM_3945_EEPROM_VERSION) { + IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n", + eeprom->version); + ret = -EINVAL; + goto err; + } + ret = iwl_legacy_init_channel_map(priv); + if (ret) { + IWL_ERR(priv, "initializing regulatory failed: %d\n", ret); + goto err; + } + + /* Set up txpower settings in driver for all channels */ + if (iwl3945_txpower_set_from_eeprom(priv)) { + ret = -EIO; + goto err_free_channel_map; + } + + ret = iwl_legacy_init_geos(priv); + if (ret) { + IWL_ERR(priv, "initializing geos failed: %d\n", ret); + goto err_free_channel_map; + } + iwl3945_init_hw_rates(priv, priv->ieee_rates); + + return 0; + +err_free_channel_map: + iwl_legacy_free_channel_map(priv); +err: + return ret; +} + +#define IWL3945_MAX_PROBE_REQUEST 200 + +static int iwl3945_setup_mac(struct iwl_priv *priv) +{ + int ret; + struct ieee80211_hw *hw = priv->hw; + + hw->rate_control_algorithm = "iwl-3945-rs"; + hw->sta_data_size = sizeof(struct iwl3945_sta_priv); + hw->vif_data_size = sizeof(struct iwl_vif_priv); + + /* Tell mac80211 our characteristics */ + hw->flags = IEEE80211_HW_SIGNAL_DBM | + IEEE80211_HW_SPECTRUM_MGMT; + + hw->wiphy->interface_modes = + priv->contexts[IWL_RXON_CTX_BSS].interface_modes; + + hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | + WIPHY_FLAG_DISABLE_BEACON_HINTS | + WIPHY_FLAG_IBSS_RSN; + + hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945; + /* we create the 802.11 header and a zero-length SSID element */ + hw->wiphy->max_scan_ie_len = IWL3945_MAX_PROBE_REQUEST - 24 - 2; + + /* Default value; 4 EDCA QOS priorities */ + hw->queues = 4; + + if (priv->bands[IEEE80211_BAND_2GHZ].n_channels) + priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = + &priv->bands[IEEE80211_BAND_2GHZ]; + + if (priv->bands[IEEE80211_BAND_5GHZ].n_channels) + priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = + &priv->bands[IEEE80211_BAND_5GHZ]; + + iwl_legacy_leds_init(priv); + + ret = ieee80211_register_hw(priv->hw); + if (ret) { + IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); + return ret; + } + priv->mac80211_registered = 1; + + return 0; +} + +static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + int err = 0, i; + struct iwl_priv *priv; + struct ieee80211_hw *hw; + struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); + struct iwl3945_eeprom *eeprom; + unsigned long flags; + + /*********************** + * 1. Allocating HW data + * ********************/ + + /* mac80211 allocates memory for this device instance, including + * space for this driver's private structure */ + hw = iwl_legacy_alloc_all(cfg); + if (hw == NULL) { + pr_err("Can not allocate network device\n"); + err = -ENOMEM; + goto out; + } + priv = hw->priv; + SET_IEEE80211_DEV(hw, &pdev->dev); + + priv->cmd_queue = IWL39_CMD_QUEUE_NUM; + + /* 3945 has only one valid context */ + priv->valid_contexts = BIT(IWL_RXON_CTX_BSS); + + for (i = 0; i < NUM_IWL_RXON_CTX; i++) + priv->contexts[i].ctxid = i; + + priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON; + priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING; + priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC; + priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM; + priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID; + priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY; + priv->contexts[IWL_RXON_CTX_BSS].interface_modes = + BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_ADHOC); + priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS; + priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS; + priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS; + + /* + * Disabling hardware scan means that mac80211 will perform scans + * "the hard way", rather than using device's scan. + */ + if (iwl3945_mod_params.disable_hw_scan) { + IWL_DEBUG_INFO(priv, "Disabling hw_scan\n"); + iwl3945_hw_ops.hw_scan = NULL; + } + + IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n"); + priv->cfg = cfg; + priv->pci_dev = pdev; + priv->inta_mask = CSR_INI_SET_MASK; + + if (iwl_legacy_alloc_traffic_mem(priv)) + IWL_ERR(priv, "Not enough memory to generate traffic log\n"); + + /*************************** + * 2. Initializing PCI bus + * *************************/ + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | + PCIE_LINK_STATE_CLKPM); + + if (pci_enable_device(pdev)) { + err = -ENODEV; + goto out_ieee80211_free_hw; + } + + pci_set_master(pdev); + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (!err) + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + IWL_WARN(priv, "No suitable DMA available.\n"); + goto out_pci_disable_device; + } + + pci_set_drvdata(pdev, priv); + err = pci_request_regions(pdev, DRV_NAME); + if (err) + goto out_pci_disable_device; + + /*********************** + * 3. Read REV Register + * ********************/ + priv->hw_base = pci_iomap(pdev, 0, 0); + if (!priv->hw_base) { + err = -ENODEV; + goto out_pci_release_regions; + } + + IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n", + (unsigned long long) pci_resource_len(pdev, 0)); + IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base); + + /* We disable the RETRY_TIMEOUT register (0x41) to keep + * PCI Tx retries from interfering with C3 CPU state */ + pci_write_config_byte(pdev, 0x41, 0x00); + + /* these spin locks will be used in apm_ops.init and EEPROM access + * we should init now + */ + spin_lock_init(&priv->reg_lock); + spin_lock_init(&priv->lock); + + /* + * stop and reset the on-board processor just in case it is in a + * strange state ... like being left stranded by a primary kernel + * and this is now the kdump kernel trying to start up + */ + iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); + + /*********************** + * 4. Read EEPROM + * ********************/ + + /* Read the EEPROM */ + err = iwl_legacy_eeprom_init(priv); + if (err) { + IWL_ERR(priv, "Unable to init EEPROM\n"); + goto out_iounmap; + } + /* MAC Address location in EEPROM same for 3945/4965 */ + eeprom = (struct iwl3945_eeprom *)priv->eeprom; + IWL_DEBUG_INFO(priv, "MAC address: %pM\n", eeprom->mac_address); + SET_IEEE80211_PERM_ADDR(priv->hw, eeprom->mac_address); + + /*********************** + * 5. Setup HW Constants + * ********************/ + /* Device-specific setup */ + if (iwl3945_hw_set_hw_params(priv)) { + IWL_ERR(priv, "failed to set hw settings\n"); + goto out_eeprom_free; + } + + /*********************** + * 6. Setup priv + * ********************/ + + err = iwl3945_init_drv(priv); + if (err) { + IWL_ERR(priv, "initializing driver failed\n"); + goto out_unset_hw_params; + } + + IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s\n", + priv->cfg->name); + + /*********************** + * 7. Setup Services + * ********************/ + + spin_lock_irqsave(&priv->lock, flags); + iwl_legacy_disable_interrupts(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + pci_enable_msi(priv->pci_dev); + + err = request_irq(priv->pci_dev->irq, iwl_legacy_isr, + IRQF_SHARED, DRV_NAME, priv); + if (err) { + IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq); + goto out_disable_msi; + } + + err = sysfs_create_group(&pdev->dev.kobj, &iwl3945_attribute_group); + if (err) { + IWL_ERR(priv, "failed to create sysfs device attributes\n"); + goto out_release_irq; + } + + iwl_legacy_set_rxon_channel(priv, + &priv->bands[IEEE80211_BAND_2GHZ].channels[5], + &priv->contexts[IWL_RXON_CTX_BSS]); + iwl3945_setup_deferred_work(priv); + iwl3945_setup_rx_handlers(priv); + iwl_legacy_power_initialize(priv); + + /********************************* + * 8. Setup and Register mac80211 + * *******************************/ + + iwl_legacy_enable_interrupts(priv); + + err = iwl3945_setup_mac(priv); + if (err) + goto out_remove_sysfs; + + err = iwl_legacy_dbgfs_register(priv, DRV_NAME); + if (err) + IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err); + + /* Start monitoring the killswitch */ + queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll, + 2 * HZ); + + return 0; + + out_remove_sysfs: + destroy_workqueue(priv->workqueue); + priv->workqueue = NULL; + sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group); + out_release_irq: + free_irq(priv->pci_dev->irq, priv); + out_disable_msi: + pci_disable_msi(priv->pci_dev); + iwl_legacy_free_geos(priv); + iwl_legacy_free_channel_map(priv); + out_unset_hw_params: + iwl3945_unset_hw_params(priv); + out_eeprom_free: + iwl_legacy_eeprom_free(priv); + out_iounmap: + pci_iounmap(pdev, priv->hw_base); + out_pci_release_regions: + pci_release_regions(pdev); + out_pci_disable_device: + pci_set_drvdata(pdev, NULL); + pci_disable_device(pdev); + out_ieee80211_free_hw: + iwl_legacy_free_traffic_mem(priv); + ieee80211_free_hw(priv->hw); + out: + return err; +} + +static void __devexit iwl3945_pci_remove(struct pci_dev *pdev) +{ + struct iwl_priv *priv = pci_get_drvdata(pdev); + unsigned long flags; + + if (!priv) + return; + + IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n"); + + iwl_legacy_dbgfs_unregister(priv); + + set_bit(STATUS_EXIT_PENDING, &priv->status); + + iwl_legacy_leds_exit(priv); + + if (priv->mac80211_registered) { + ieee80211_unregister_hw(priv->hw); + priv->mac80211_registered = 0; + } else { + iwl3945_down(priv); + } + + /* + * Make sure device is reset to low power before unloading driver. + * This may be redundant with iwl_down(), but there are paths to + * run iwl_down() without calling apm_ops.stop(), and there are + * paths to avoid running iwl_down() at all before leaving driver. + * This (inexpensive) call *makes sure* device is reset. + */ + iwl_legacy_apm_stop(priv); + + /* make sure we flush any pending irq or + * tasklet for the driver + */ + spin_lock_irqsave(&priv->lock, flags); + iwl_legacy_disable_interrupts(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + iwl3945_synchronize_irq(priv); + + sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group); + + cancel_delayed_work_sync(&priv->_3945.rfkill_poll); + + iwl3945_dealloc_ucode_pci(priv); + + if (priv->rxq.bd) + iwl3945_rx_queue_free(priv, &priv->rxq); + iwl3945_hw_txq_ctx_free(priv); + + iwl3945_unset_hw_params(priv); + + /*netif_stop_queue(dev); */ + flush_workqueue(priv->workqueue); + + /* ieee80211_unregister_hw calls iwl3945_mac_stop, which flushes + * priv->workqueue... so we can't take down the workqueue + * until now... */ + destroy_workqueue(priv->workqueue); + priv->workqueue = NULL; + iwl_legacy_free_traffic_mem(priv); + + free_irq(pdev->irq, priv); + pci_disable_msi(pdev); + + pci_iounmap(pdev, priv->hw_base); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + + iwl_legacy_free_channel_map(priv); + iwl_legacy_free_geos(priv); + kfree(priv->scan_cmd); + if (priv->beacon_skb) + dev_kfree_skb(priv->beacon_skb); + + ieee80211_free_hw(priv->hw); +} + + +/***************************************************************************** + * + * driver and module entry point + * + *****************************************************************************/ + +static struct pci_driver iwl3945_driver = { + .name = DRV_NAME, + .id_table = iwl3945_hw_card_ids, + .probe = iwl3945_pci_probe, + .remove = __devexit_p(iwl3945_pci_remove), + .driver.pm = IWL_LEGACY_PM_OPS, +}; + +static int __init iwl3945_init(void) +{ + + int ret; + pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n"); + pr_info(DRV_COPYRIGHT "\n"); + + ret = iwl3945_rate_control_register(); + if (ret) { + pr_err("Unable to register rate control algorithm: %d\n", ret); + return ret; + } + + ret = pci_register_driver(&iwl3945_driver); + if (ret) { + pr_err("Unable to initialize PCI module\n"); + goto error_register; + } + + return ret; + +error_register: + iwl3945_rate_control_unregister(); + return ret; +} + +static void __exit iwl3945_exit(void) +{ + pci_unregister_driver(&iwl3945_driver); + iwl3945_rate_control_unregister(); +} + +MODULE_FIRMWARE(IWL3945_MODULE_FIRMWARE(IWL3945_UCODE_API_MAX)); + +module_param_named(antenna, iwl3945_mod_params.antenna, int, S_IRUGO); +MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])"); +module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, S_IRUGO); +MODULE_PARM_DESC(swcrypto, + "using software crypto (default 1 [software])"); +module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan, + int, S_IRUGO); +MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 1)"); +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(debug, "debug output mask"); +#endif +module_param_named(fw_restart, iwl3945_mod_params.restart_fw, int, S_IRUGO); +MODULE_PARM_DESC(fw_restart, "restart firmware in case of error"); + +module_exit(iwl3945_exit); +module_init(iwl3945_init); diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl4965-base.c b/trunk/drivers/net/wireless/iwlegacy/iwl4965-base.c new file mode 100644 index 000000000000..d2fba9eae153 --- /dev/null +++ b/trunk/drivers/net/wireless/iwlegacy/iwl4965-base.c @@ -0,0 +1,3281 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#define DRV_NAME "iwl4965" + +#include "iwl-eeprom.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-io.h" +#include "iwl-helpers.h" +#include "iwl-sta.h" +#include "iwl-4965-calib.h" +#include "iwl-4965.h" +#include "iwl-4965-led.h" + + +/****************************************************************************** + * + * module boiler plate + * + ******************************************************************************/ + +/* + * module name, copyright, version, etc. + */ +#define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux" + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +#define VD "d" +#else +#define VD +#endif + +#define DRV_VERSION IWLWIFI_VERSION VD + + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_VERSION(DRV_VERSION); +MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("iwl4965"); + +void iwl4965_update_chain_flags(struct iwl_priv *priv) +{ + struct iwl_rxon_context *ctx; + + if (priv->cfg->ops->hcmd->set_rxon_chain) { + for_each_context(priv, ctx) { + priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); + if (ctx->active.rx_chain != ctx->staging.rx_chain) + iwl_legacy_commit_rxon(priv, ctx); + } + } +} + +static void iwl4965_clear_free_frames(struct iwl_priv *priv) +{ + struct list_head *element; + + IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n", + priv->frames_count); + + while (!list_empty(&priv->free_frames)) { + element = priv->free_frames.next; + list_del(element); + kfree(list_entry(element, struct iwl_frame, list)); + priv->frames_count--; + } + + if (priv->frames_count) { + IWL_WARN(priv, "%d frames still in use. Did we lose one?\n", + priv->frames_count); + priv->frames_count = 0; + } +} + +static struct iwl_frame *iwl4965_get_free_frame(struct iwl_priv *priv) +{ + struct iwl_frame *frame; + struct list_head *element; + if (list_empty(&priv->free_frames)) { + frame = kzalloc(sizeof(*frame), GFP_KERNEL); + if (!frame) { + IWL_ERR(priv, "Could not allocate frame!\n"); + return NULL; + } + + priv->frames_count++; + return frame; + } + + element = priv->free_frames.next; + list_del(element); + return list_entry(element, struct iwl_frame, list); +} + +static void iwl4965_free_frame(struct iwl_priv *priv, struct iwl_frame *frame) +{ + memset(frame, 0, sizeof(*frame)); + list_add(&frame->list, &priv->free_frames); +} + +static u32 iwl4965_fill_beacon_frame(struct iwl_priv *priv, + struct ieee80211_hdr *hdr, + int left) +{ + lockdep_assert_held(&priv->mutex); + + if (!priv->beacon_skb) + return 0; + + if (priv->beacon_skb->len > left) + return 0; + + memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len); + + return priv->beacon_skb->len; +} + +/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */ +static void iwl4965_set_beacon_tim(struct iwl_priv *priv, + struct iwl_tx_beacon_cmd *tx_beacon_cmd, + u8 *beacon, u32 frame_size) +{ + u16 tim_idx; + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon; + + /* + * The index is relative to frame start but we start looking at the + * variable-length part of the beacon. + */ + tim_idx = mgmt->u.beacon.variable - beacon; + + /* Parse variable-length elements of beacon to find WLAN_EID_TIM */ + while ((tim_idx < (frame_size - 2)) && + (beacon[tim_idx] != WLAN_EID_TIM)) + tim_idx += beacon[tim_idx+1] + 2; + + /* If TIM field was found, set variables */ + if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) { + tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx); + tx_beacon_cmd->tim_size = beacon[tim_idx+1]; + } else + IWL_WARN(priv, "Unable to find TIM Element in beacon\n"); +} + +static unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv, + struct iwl_frame *frame) +{ + struct iwl_tx_beacon_cmd *tx_beacon_cmd; + u32 frame_size; + u32 rate_flags; + u32 rate; + /* + * We have to set up the TX command, the TX Beacon command, and the + * beacon contents. + */ + + lockdep_assert_held(&priv->mutex); + + if (!priv->beacon_ctx) { + IWL_ERR(priv, "trying to build beacon w/o beacon context!\n"); + return 0; + } + + /* Initialize memory */ + tx_beacon_cmd = &frame->u.beacon; + memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd)); + + /* Set up TX beacon contents */ + frame_size = iwl4965_fill_beacon_frame(priv, tx_beacon_cmd->frame, + sizeof(frame->u) - sizeof(*tx_beacon_cmd)); + if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE)) + return 0; + if (!frame_size) + return 0; + + /* Set up TX command fields */ + tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size); + tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id; + tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK | + TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK; + + /* Set up TX beacon command fields */ + iwl4965_set_beacon_tim(priv, tx_beacon_cmd, (u8 *)tx_beacon_cmd->frame, + frame_size); + + /* Set up packet rate and flags */ + rate = iwl_legacy_get_lowest_plcp(priv, priv->beacon_ctx); + priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant, + priv->hw_params.valid_tx_ant); + rate_flags = iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant); + if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE)) + rate_flags |= RATE_MCS_CCK_MSK; + tx_beacon_cmd->tx.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate, + rate_flags); + + return sizeof(*tx_beacon_cmd) + frame_size; +} + +int iwl4965_send_beacon_cmd(struct iwl_priv *priv) +{ + struct iwl_frame *frame; + unsigned int frame_size; + int rc; + + frame = iwl4965_get_free_frame(priv); + if (!frame) { + IWL_ERR(priv, "Could not obtain free frame buffer for beacon " + "command.\n"); + return -ENOMEM; + } + + frame_size = iwl4965_hw_get_beacon_cmd(priv, frame); + if (!frame_size) { + IWL_ERR(priv, "Error configuring the beacon command\n"); + iwl4965_free_frame(priv, frame); + return -EINVAL; + } + + rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size, + &frame->u.cmd[0]); + + iwl4965_free_frame(priv, frame); + + return rc; +} + +static inline dma_addr_t iwl4965_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) +{ + struct iwl_tfd_tb *tb = &tfd->tbs[idx]; + + dma_addr_t addr = get_unaligned_le32(&tb->lo); + if (sizeof(dma_addr_t) > sizeof(u32)) + addr |= + ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16; + + return addr; +} + +static inline u16 iwl4965_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx) +{ + struct iwl_tfd_tb *tb = &tfd->tbs[idx]; + + return le16_to_cpu(tb->hi_n_len) >> 4; +} + +static inline void iwl4965_tfd_set_tb(struct iwl_tfd *tfd, u8 idx, + dma_addr_t addr, u16 len) +{ + struct iwl_tfd_tb *tb = &tfd->tbs[idx]; + u16 hi_n_len = len << 4; + + put_unaligned_le32(addr, &tb->lo); + if (sizeof(dma_addr_t) > sizeof(u32)) + hi_n_len |= ((addr >> 16) >> 16) & 0xF; + + tb->hi_n_len = cpu_to_le16(hi_n_len); + + tfd->num_tbs = idx + 1; +} + +static inline u8 iwl4965_tfd_get_num_tbs(struct iwl_tfd *tfd) +{ + return tfd->num_tbs & 0x1f; +} + +/** + * iwl4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] + * @priv - driver private data + * @txq - tx queue + * + * Does NOT advance any TFD circular buffer read/write indexes + * Does NOT free the TFD itself (which is within circular buffer) + */ +void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) +{ + struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds; + struct iwl_tfd *tfd; + struct pci_dev *dev = priv->pci_dev; + int index = txq->q.read_ptr; + int i; + int num_tbs; + + tfd = &tfd_tmp[index]; + + /* Sanity check on number of chunks */ + num_tbs = iwl4965_tfd_get_num_tbs(tfd); + + if (num_tbs >= IWL_NUM_OF_TBS) { + IWL_ERR(priv, "Too many chunks: %i\n", num_tbs); + /* @todo issue fatal error, it is quite serious situation */ + return; + } + + /* Unmap tx_cmd */ + if (num_tbs) + pci_unmap_single(dev, + dma_unmap_addr(&txq->meta[index], mapping), + dma_unmap_len(&txq->meta[index], len), + PCI_DMA_BIDIRECTIONAL); + + /* Unmap chunks, if any. */ + for (i = 1; i < num_tbs; i++) + pci_unmap_single(dev, iwl4965_tfd_tb_get_addr(tfd, i), + iwl4965_tfd_tb_get_len(tfd, i), + PCI_DMA_TODEVICE); + + /* free SKB */ + if (txq->txb) { + struct sk_buff *skb; + + skb = txq->txb[txq->q.read_ptr].skb; + + /* can be called from irqs-disabled context */ + if (skb) { + dev_kfree_skb_any(skb); + txq->txb[txq->q.read_ptr].skb = NULL; + } + } +} + +int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + dma_addr_t addr, u16 len, + u8 reset, u8 pad) +{ + struct iwl_queue *q; + struct iwl_tfd *tfd, *tfd_tmp; + u32 num_tbs; + + q = &txq->q; + tfd_tmp = (struct iwl_tfd *)txq->tfds; + tfd = &tfd_tmp[q->write_ptr]; + + if (reset) + memset(tfd, 0, sizeof(*tfd)); + + num_tbs = iwl4965_tfd_get_num_tbs(tfd); + + /* Each TFD can point to a maximum 20 Tx buffers */ + if (num_tbs >= IWL_NUM_OF_TBS) { + IWL_ERR(priv, "Error can not send more than %d chunks\n", + IWL_NUM_OF_TBS); + return -EINVAL; + } + + BUG_ON(addr & ~DMA_BIT_MASK(36)); + if (unlikely(addr & ~IWL_TX_DMA_MASK)) + IWL_ERR(priv, "Unaligned address = %llx\n", + (unsigned long long)addr); + + iwl4965_tfd_set_tb(tfd, num_tbs, addr, len); + + return 0; +} + +/* + * Tell nic where to find circular buffer of Tx Frame Descriptors for + * given Tx queue, and enable the DMA channel used for that queue. + * + * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA + * channels supported in hardware. + */ +int iwl4965_hw_tx_queue_init(struct iwl_priv *priv, + struct iwl_tx_queue *txq) +{ + int txq_id = txq->q.id; + + /* Circular buffer (TFD queue in DRAM) physical base address */ + iwl_legacy_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id), + txq->q.dma_addr >> 8); + + return 0; +} + +/****************************************************************************** + * + * Generic RX handler implementations + * + ******************************************************************************/ +static void iwl4965_rx_reply_alive(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_alive_resp *palive; + struct delayed_work *pwork; + + palive = &pkt->u.alive_frame; + + IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision " + "0x%01X 0x%01X\n", + palive->is_valid, palive->ver_type, + palive->ver_subtype); + + if (palive->ver_subtype == INITIALIZE_SUBTYPE) { + IWL_DEBUG_INFO(priv, "Initialization Alive received.\n"); + memcpy(&priv->card_alive_init, + &pkt->u.alive_frame, + sizeof(struct iwl_init_alive_resp)); + pwork = &priv->init_alive_start; + } else { + IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); + memcpy(&priv->card_alive, &pkt->u.alive_frame, + sizeof(struct iwl_alive_resp)); + pwork = &priv->alive_start; + } + + /* We delay the ALIVE response by 5ms to + * give the HW RF Kill time to activate... */ + if (palive->is_valid == UCODE_VALID_OK) + queue_delayed_work(priv->workqueue, pwork, + msecs_to_jiffies(5)); + else + IWL_WARN(priv, "uCode did not respond OK.\n"); +} + +/** + * iwl4965_bg_statistics_periodic - Timer callback to queue statistics + * + * This callback is provided in order to send a statistics request. + * + * This timer function is continually reset to execute within + * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION + * was received. We need to ensure we receive the statistics in order + * to update the temperature used for calibrating the TXPOWER. + */ +static void iwl4965_bg_statistics_periodic(unsigned long data) +{ + struct iwl_priv *priv = (struct iwl_priv *)data; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + /* dont send host command if rf-kill is on */ + if (!iwl_legacy_is_ready_rf(priv)) + return; + + iwl_legacy_send_statistics_request(priv, CMD_ASYNC, false); +} + +static void iwl4965_rx_beacon_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl4965_beacon_notif *beacon = + (struct iwl4965_beacon_notif *)pkt->u.raw; +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + u8 rate = iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); + + IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d " + "tsf %d %d rate %d\n", + le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK, + beacon->beacon_notify_hdr.failure_frame, + le32_to_cpu(beacon->ibss_mgr_status), + le32_to_cpu(beacon->high_tsf), + le32_to_cpu(beacon->low_tsf), rate); +#endif + + priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status); +} + +static void iwl4965_perform_ct_kill_task(struct iwl_priv *priv) +{ + unsigned long flags; + + IWL_DEBUG_POWER(priv, "Stop all queues\n"); + + if (priv->mac80211_registered) + ieee80211_stop_queues(priv->hw); + + iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, + CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); + iwl_read32(priv, CSR_UCODE_DRV_GP1); + + spin_lock_irqsave(&priv->reg_lock, flags); + if (!iwl_grab_nic_access(priv)) + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, flags); +} + +/* Handle notification from uCode that card's power state is changing + * due to software, hardware, or critical temperature RFKILL */ +static void iwl4965_rx_card_state_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); + unsigned long status = priv->status; + + IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n", + (flags & HW_CARD_DISABLED) ? "Kill" : "On", + (flags & SW_CARD_DISABLED) ? "Kill" : "On", + (flags & CT_CARD_DISABLED) ? + "Reached" : "Not reached"); + + if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | + CT_CARD_DISABLED)) { + + iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + + iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C, + HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); + + if (!(flags & RXON_CARD_DISABLED)) { + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C, + HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); + } + } + + if (flags & CT_CARD_DISABLED) + iwl4965_perform_ct_kill_task(priv); + + if (flags & HW_CARD_DISABLED) + set_bit(STATUS_RF_KILL_HW, &priv->status); + else + clear_bit(STATUS_RF_KILL_HW, &priv->status); + + if (!(flags & RXON_CARD_DISABLED)) + iwl_legacy_scan_cancel(priv); + + if ((test_bit(STATUS_RF_KILL_HW, &status) != + test_bit(STATUS_RF_KILL_HW, &priv->status))) + wiphy_rfkill_set_hw_state(priv->hw->wiphy, + test_bit(STATUS_RF_KILL_HW, &priv->status)); + else + wake_up(&priv->wait_command_queue); +} + +/** + * iwl4965_setup_rx_handlers - Initialize Rx handler callbacks + * + * Setup the RX handlers for each of the reply types sent from the uCode + * to the host. + * + * This function chains into the hardware specific files for them to setup + * any hardware specific handlers as well. + */ +static void iwl4965_setup_rx_handlers(struct iwl_priv *priv) +{ + priv->rx_handlers[REPLY_ALIVE] = iwl4965_rx_reply_alive; + priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error; + priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa; + priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] = + iwl_legacy_rx_spectrum_measure_notif; + priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif; + priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] = + iwl_legacy_rx_pm_debug_statistics_notif; + priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif; + + /* + * The same handler is used for both the REPLY to a discrete + * statistics request from the host as well as for the periodic + * statistics notifications (after received beacons) from the uCode. + */ + priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl4965_reply_statistics; + priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl4965_rx_statistics; + + iwl_legacy_setup_rx_scan_handlers(priv); + + /* status change handler */ + priv->rx_handlers[CARD_STATE_NOTIFICATION] = + iwl4965_rx_card_state_notif; + + priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] = + iwl4965_rx_missed_beacon_notif; + /* Rx handlers */ + priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy; + priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx; + /* block ack */ + priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba; + /* Set up hardware specific Rx handlers */ + priv->cfg->ops->lib->rx_handler_setup(priv); +} + +/** + * iwl4965_rx_handle - Main entry function for receiving responses from uCode + * + * Uses the priv->rx_handlers callback function array to invoke + * the appropriate handlers, including command responses, + * frame-received notifications, and other notifications. + */ +void iwl4965_rx_handle(struct iwl_priv *priv) +{ + struct iwl_rx_mem_buffer *rxb; + struct iwl_rx_packet *pkt; + struct iwl_rx_queue *rxq = &priv->rxq; + u32 r, i; + int reclaim; + unsigned long flags; + u8 fill_rx = 0; + u32 count = 8; + int total_empty; + + /* uCode's read index (stored in shared DRAM) indicates the last Rx + * buffer that the driver may process (last buffer filled by ucode). */ + r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF; + i = rxq->read; + + /* Rx interrupt, but nothing sent from uCode */ + if (i == r) + IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i); + + /* calculate total frames need to be restock after handling RX */ + total_empty = r - rxq->write_actual; + if (total_empty < 0) + total_empty += RX_QUEUE_SIZE; + + if (total_empty > (RX_QUEUE_SIZE / 2)) + fill_rx = 1; + + while (i != r) { + int len; + + rxb = rxq->queue[i]; + + /* If an RXB doesn't have a Rx queue slot associated with it, + * then a bug has been introduced in the queue refilling + * routines -- catch it here */ + BUG_ON(rxb == NULL); + + rxq->queue[i] = NULL; + + pci_unmap_page(priv->pci_dev, rxb->page_dma, + PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + pkt = rxb_addr(rxb); + + len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; + len += sizeof(u32); /* account for status word */ + trace_iwlwifi_legacy_dev_rx(priv, pkt, len); + + /* Reclaim a command buffer only if this packet is a response + * to a (driver-originated) command. + * If the packet (e.g. Rx frame) originated from uCode, + * there is no command buffer to reclaim. + * Ucode should set SEQ_RX_FRAME bit if ucode-originated, + * but apparently a few don't get set; catch them here. */ + reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) && + (pkt->hdr.cmd != REPLY_RX_PHY_CMD) && + (pkt->hdr.cmd != REPLY_RX) && + (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) && + (pkt->hdr.cmd != REPLY_COMPRESSED_BA) && + (pkt->hdr.cmd != STATISTICS_NOTIFICATION) && + (pkt->hdr.cmd != REPLY_TX); + + /* Based on type of command response or notification, + * handle those that need handling via function in + * rx_handlers table. See iwl4965_setup_rx_handlers() */ + if (priv->rx_handlers[pkt->hdr.cmd]) { + IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, + i, iwl_legacy_get_cmd_string(pkt->hdr.cmd), + pkt->hdr.cmd); + priv->isr_stats.rx_handlers[pkt->hdr.cmd]++; + priv->rx_handlers[pkt->hdr.cmd] (priv, rxb); + } else { + /* No handling needed */ + IWL_DEBUG_RX(priv, + "r %d i %d No handler needed for %s, 0x%02x\n", + r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd), + pkt->hdr.cmd); + } + + /* + * XXX: After here, we should always check rxb->page + * against NULL before touching it or its virtual + * memory (pkt). Because some rx_handler might have + * already taken or freed the pages. + */ + + if (reclaim) { + /* Invoke any callbacks, transfer the buffer to caller, + * and fire off the (possibly) blocking iwl_legacy_send_cmd() + * as we reclaim the driver command queue */ + if (rxb->page) + iwl_legacy_tx_cmd_complete(priv, rxb); + else + IWL_WARN(priv, "Claim null rxb?\n"); + } + + /* Reuse the page if possible. For notification packets and + * SKBs that fail to Rx correctly, add them back into the + * rx_free list for reuse later. */ + spin_lock_irqsave(&rxq->lock, flags); + if (rxb->page != NULL) { + rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page, + 0, PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + list_add_tail(&rxb->list, &rxq->rx_free); + rxq->free_count++; + } else + list_add_tail(&rxb->list, &rxq->rx_used); + + spin_unlock_irqrestore(&rxq->lock, flags); + + i = (i + 1) & RX_QUEUE_MASK; + /* If there are a lot of unused frames, + * restock the Rx queue so ucode wont assert. */ + if (fill_rx) { + count++; + if (count >= 8) { + rxq->read = i; + iwl4965_rx_replenish_now(priv); + count = 0; + } + } + } + + /* Backtrack one entry */ + rxq->read = i; + if (fill_rx) + iwl4965_rx_replenish_now(priv); + else + iwl4965_rx_queue_restock(priv); +} + +/* call this function to flush any scheduled tasklet */ +static inline void iwl4965_synchronize_irq(struct iwl_priv *priv) +{ + /* wait to make sure we flush pending tasklet*/ + synchronize_irq(priv->pci_dev->irq); + tasklet_kill(&priv->irq_tasklet); +} + +static void iwl4965_irq_tasklet(struct iwl_priv *priv) +{ + u32 inta, handled = 0; + u32 inta_fh; + unsigned long flags; + u32 i; +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + u32 inta_mask; +#endif + + spin_lock_irqsave(&priv->lock, flags); + + /* Ack/clear/reset pending uCode interrupts. + * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, + * and will clear only when CSR_FH_INT_STATUS gets cleared. */ + inta = iwl_read32(priv, CSR_INT); + iwl_write32(priv, CSR_INT, inta); + + /* Ack/clear/reset pending flow-handler (DMA) interrupts. + * Any new interrupts that happen after this, either while we're + * in this tasklet, or later, will show up in next ISR/tasklet. */ + inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); + iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh); + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) { + /* just for debug */ + inta_mask = iwl_read32(priv, CSR_INT_MASK); + IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", + inta, inta_mask, inta_fh); + } +#endif + + spin_unlock_irqrestore(&priv->lock, flags); + + /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not + * atomic, make sure that inta covers all the interrupts that + * we've discovered, even if FH interrupt came in just after + * reading CSR_INT. */ + if (inta_fh & CSR49_FH_INT_RX_MASK) + inta |= CSR_INT_BIT_FH_RX; + if (inta_fh & CSR49_FH_INT_TX_MASK) + inta |= CSR_INT_BIT_FH_TX; + + /* Now service all interrupt bits discovered above. */ + if (inta & CSR_INT_BIT_HW_ERR) { + IWL_ERR(priv, "Hardware error detected. Restarting.\n"); + + /* Tell the device to stop sending interrupts */ + iwl_legacy_disable_interrupts(priv); + + priv->isr_stats.hw++; + iwl_legacy_irq_handle_error(priv); + + handled |= CSR_INT_BIT_HW_ERR; + + return; + } + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) { + /* NIC fires this, but we don't use it, redundant with WAKEUP */ + if (inta & CSR_INT_BIT_SCD) { + IWL_DEBUG_ISR(priv, "Scheduler finished to transmit " + "the frame/frames.\n"); + priv->isr_stats.sch++; + } + + /* Alive notification via Rx interrupt will do the real work */ + if (inta & CSR_INT_BIT_ALIVE) { + IWL_DEBUG_ISR(priv, "Alive interrupt\n"); + priv->isr_stats.alive++; + } + } +#endif + /* Safely ignore these bits for debug checks below */ + inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); + + /* HW RF KILL switch toggled */ + if (inta & CSR_INT_BIT_RF_KILL) { + int hw_rf_kill = 0; + if (!(iwl_read32(priv, CSR_GP_CNTRL) & + CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) + hw_rf_kill = 1; + + IWL_WARN(priv, "RF_KILL bit toggled to %s.\n", + hw_rf_kill ? "disable radio" : "enable radio"); + + priv->isr_stats.rfkill++; + + /* driver only loads ucode once setting the interface up. + * the driver allows loading the ucode even if the radio + * is killed. Hence update the killswitch state here. The + * rfkill handler will care about restarting if needed. + */ + if (!test_bit(STATUS_ALIVE, &priv->status)) { + if (hw_rf_kill) + set_bit(STATUS_RF_KILL_HW, &priv->status); + else + clear_bit(STATUS_RF_KILL_HW, &priv->status); + wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill); + } + + handled |= CSR_INT_BIT_RF_KILL; + } + + /* Chip got too hot and stopped itself */ + if (inta & CSR_INT_BIT_CT_KILL) { + IWL_ERR(priv, "Microcode CT kill error detected.\n"); + priv->isr_stats.ctkill++; + handled |= CSR_INT_BIT_CT_KILL; + } + + /* Error detected by uCode */ + if (inta & CSR_INT_BIT_SW_ERR) { + IWL_ERR(priv, "Microcode SW error detected. " + " Restarting 0x%X.\n", inta); + priv->isr_stats.sw++; + iwl_legacy_irq_handle_error(priv); + handled |= CSR_INT_BIT_SW_ERR; + } + + /* + * uCode wakes up after power-down sleep. + * Tell device about any new tx or host commands enqueued, + * and about any Rx buffers made available while asleep. + */ + if (inta & CSR_INT_BIT_WAKEUP) { + IWL_DEBUG_ISR(priv, "Wakeup interrupt\n"); + iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq); + for (i = 0; i < priv->hw_params.max_txq_num; i++) + iwl_legacy_txq_update_write_ptr(priv, &priv->txq[i]); + priv->isr_stats.wakeup++; + handled |= CSR_INT_BIT_WAKEUP; + } + + /* All uCode command responses, including Tx command responses, + * Rx "responses" (frame-received notification), and other + * notifications from uCode come through here*/ + if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { + iwl4965_rx_handle(priv); + priv->isr_stats.rx++; + handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); + } + + /* This "Tx" DMA channel is used only for loading uCode */ + if (inta & CSR_INT_BIT_FH_TX) { + IWL_DEBUG_ISR(priv, "uCode load interrupt\n"); + priv->isr_stats.tx++; + handled |= CSR_INT_BIT_FH_TX; + /* Wake up uCode load routine, now that load is complete */ + priv->ucode_write_complete = 1; + wake_up(&priv->wait_command_queue); + } + + if (inta & ~handled) { + IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled); + priv->isr_stats.unhandled++; + } + + if (inta & ~(priv->inta_mask)) { + IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n", + inta & ~priv->inta_mask); + IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh); + } + + /* Re-enable all interrupts */ + /* only Re-enable if disabled by irq */ + if (test_bit(STATUS_INT_ENABLED, &priv->status)) + iwl_legacy_enable_interrupts(priv); + /* Re-enable RF_KILL if it occurred */ + else if (handled & CSR_INT_BIT_RF_KILL) + iwl_legacy_enable_rfkill_int(priv); + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) { + inta = iwl_read32(priv, CSR_INT); + inta_mask = iwl_read32(priv, CSR_INT_MASK); + inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); + IWL_DEBUG_ISR(priv, + "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, " + "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); + } +#endif +} + +/***************************************************************************** + * + * sysfs attributes + * + *****************************************************************************/ + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + +/* + * The following adds a new attribute to the sysfs representation + * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/) + * used for controlling the debug level. + * + * See the level definitions in iwl for details. + * + * The debug_level being managed using sysfs below is a per device debug + * level that is used instead of the global debug level if it (the per + * device debug level) is set. + */ +static ssize_t iwl4965_show_debug_level(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv)); +} +static ssize_t iwl4965_store_debug_level(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + unsigned long val; + int ret; + + ret = strict_strtoul(buf, 0, &val); + if (ret) + IWL_ERR(priv, "%s is not in hex or decimal form.\n", buf); + else { + priv->debug_level = val; + if (iwl_legacy_alloc_traffic_mem(priv)) + IWL_ERR(priv, + "Not enough memory to generate traffic log\n"); + } + return strnlen(buf, count); +} + +static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, + iwl4965_show_debug_level, iwl4965_store_debug_level); + + +#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */ + + +static ssize_t iwl4965_show_temperature(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + + if (!iwl_legacy_is_alive(priv)) + return -EAGAIN; + + return sprintf(buf, "%d\n", priv->temperature); +} + +static DEVICE_ATTR(temperature, S_IRUGO, iwl4965_show_temperature, NULL); + +static ssize_t iwl4965_show_tx_power(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + + if (!iwl_legacy_is_ready_rf(priv)) + return sprintf(buf, "off\n"); + else + return sprintf(buf, "%d\n", priv->tx_power_user_lmt); +} + +static ssize_t iwl4965_store_tx_power(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + unsigned long val; + int ret; + + ret = strict_strtoul(buf, 10, &val); + if (ret) + IWL_INFO(priv, "%s is not in decimal form.\n", buf); + else { + ret = iwl_legacy_set_tx_power(priv, val, false); + if (ret) + IWL_ERR(priv, "failed setting tx power (0x%d).\n", + ret); + else + ret = count; + } + return ret; +} + +static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, + iwl4965_show_tx_power, iwl4965_store_tx_power); + +static struct attribute *iwl_sysfs_entries[] = { + &dev_attr_temperature.attr, + &dev_attr_tx_power.attr, +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + &dev_attr_debug_level.attr, +#endif + NULL +}; + +static struct attribute_group iwl_attribute_group = { + .name = NULL, /* put in device directory */ + .attrs = iwl_sysfs_entries, +}; + +/****************************************************************************** + * + * uCode download functions + * + ******************************************************************************/ + +static void iwl4965_dealloc_ucode_pci(struct iwl_priv *priv) +{ + iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code); + iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data); + iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup); + iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init); + iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data); + iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot); +} + +static void iwl4965_nic_start(struct iwl_priv *priv) +{ + /* Remove all resets to allow NIC to operate */ + iwl_write32(priv, CSR_RESET, 0); +} + +static void iwl4965_ucode_callback(const struct firmware *ucode_raw, + void *context); +static int iwl4965_mac_setup_register(struct iwl_priv *priv, + u32 max_probe_length); + +static int __must_check iwl4965_request_firmware(struct iwl_priv *priv, bool first) +{ + const char *name_pre = priv->cfg->fw_name_pre; + char tag[8]; + + if (first) { + priv->fw_index = priv->cfg->ucode_api_max; + sprintf(tag, "%d", priv->fw_index); + } else { + priv->fw_index--; + sprintf(tag, "%d", priv->fw_index); + } + + if (priv->fw_index < priv->cfg->ucode_api_min) { + IWL_ERR(priv, "no suitable firmware found!\n"); + return -ENOENT; + } + + sprintf(priv->firmware_name, "%s%s%s", name_pre, tag, ".ucode"); + + IWL_DEBUG_INFO(priv, "attempting to load firmware '%s'\n", + priv->firmware_name); + + return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name, + &priv->pci_dev->dev, GFP_KERNEL, priv, + iwl4965_ucode_callback); +} + +struct iwl4965_firmware_pieces { + const void *inst, *data, *init, *init_data, *boot; + size_t inst_size, data_size, init_size, init_data_size, boot_size; +}; + +static int iwl4965_load_firmware(struct iwl_priv *priv, + const struct firmware *ucode_raw, + struct iwl4965_firmware_pieces *pieces) +{ + struct iwl_ucode_header *ucode = (void *)ucode_raw->data; + u32 api_ver, hdr_size; + const u8 *src; + + priv->ucode_ver = le32_to_cpu(ucode->ver); + api_ver = IWL_UCODE_API(priv->ucode_ver); + + switch (api_ver) { + default: + case 0: + case 1: + case 2: + hdr_size = 24; + if (ucode_raw->size < hdr_size) { + IWL_ERR(priv, "File size too small!\n"); + return -EINVAL; + } + pieces->inst_size = le32_to_cpu(ucode->v1.inst_size); + pieces->data_size = le32_to_cpu(ucode->v1.data_size); + pieces->init_size = le32_to_cpu(ucode->v1.init_size); + pieces->init_data_size = + le32_to_cpu(ucode->v1.init_data_size); + pieces->boot_size = le32_to_cpu(ucode->v1.boot_size); + src = ucode->v1.data; + break; + } + + /* Verify size of file vs. image size info in file's header */ + if (ucode_raw->size != hdr_size + pieces->inst_size + + pieces->data_size + pieces->init_size + + pieces->init_data_size + pieces->boot_size) { + + IWL_ERR(priv, + "uCode file size %d does not match expected size\n", + (int)ucode_raw->size); + return -EINVAL; + } + + pieces->inst = src; + src += pieces->inst_size; + pieces->data = src; + src += pieces->data_size; + pieces->init = src; + src += pieces->init_size; + pieces->init_data = src; + src += pieces->init_data_size; + pieces->boot = src; + src += pieces->boot_size; + + return 0; +} + +/** + * iwl4965_ucode_callback - callback when firmware was loaded + * + * If loaded successfully, copies the firmware into buffers + * for the card to fetch (via DMA). + */ +static void +iwl4965_ucode_callback(const struct firmware *ucode_raw, void *context) +{ + struct iwl_priv *priv = context; + struct iwl_ucode_header *ucode; + int err; + struct iwl4965_firmware_pieces pieces; + const unsigned int api_max = priv->cfg->ucode_api_max; + const unsigned int api_min = priv->cfg->ucode_api_min; + u32 api_ver; + + u32 max_probe_length = 200; + u32 standard_phy_calibration_size = + IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE; + + memset(&pieces, 0, sizeof(pieces)); + + if (!ucode_raw) { + if (priv->fw_index <= priv->cfg->ucode_api_max) + IWL_ERR(priv, + "request for firmware file '%s' failed.\n", + priv->firmware_name); + goto try_again; + } + + IWL_DEBUG_INFO(priv, "Loaded firmware file '%s' (%zd bytes).\n", + priv->firmware_name, ucode_raw->size); + + /* Make sure that we got at least the API version number */ + if (ucode_raw->size < 4) { + IWL_ERR(priv, "File size way too small!\n"); + goto try_again; + } + + /* Data from ucode file: header followed by uCode images */ + ucode = (struct iwl_ucode_header *)ucode_raw->data; + + err = iwl4965_load_firmware(priv, ucode_raw, &pieces); + + if (err) + goto try_again; + + api_ver = IWL_UCODE_API(priv->ucode_ver); + + /* + * api_ver should match the api version forming part of the + * firmware filename ... but we don't check for that and only rely + * on the API version read from firmware header from here on forward + */ + if (api_ver < api_min || api_ver > api_max) { + IWL_ERR(priv, + "Driver unable to support your firmware API. " + "Driver supports v%u, firmware is v%u.\n", + api_max, api_ver); + goto try_again; + } + + if (api_ver != api_max) + IWL_ERR(priv, + "Firmware has old API version. Expected v%u, " + "got v%u. New firmware can be obtained " + "from http://www.intellinuxwireless.org.\n", + api_max, api_ver); + + IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n", + IWL_UCODE_MAJOR(priv->ucode_ver), + IWL_UCODE_MINOR(priv->ucode_ver), + IWL_UCODE_API(priv->ucode_ver), + IWL_UCODE_SERIAL(priv->ucode_ver)); + + snprintf(priv->hw->wiphy->fw_version, + sizeof(priv->hw->wiphy->fw_version), + "%u.%u.%u.%u", + IWL_UCODE_MAJOR(priv->ucode_ver), + IWL_UCODE_MINOR(priv->ucode_ver), + IWL_UCODE_API(priv->ucode_ver), + IWL_UCODE_SERIAL(priv->ucode_ver)); + + /* + * For any of the failures below (before allocating pci memory) + * we will try to load a version with a smaller API -- maybe the + * user just got a corrupted version of the latest API. + */ + + IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n", + priv->ucode_ver); + IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %Zd\n", + pieces.inst_size); + IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %Zd\n", + pieces.data_size); + IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %Zd\n", + pieces.init_size); + IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %Zd\n", + pieces.init_data_size); + IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %Zd\n", + pieces.boot_size); + + /* Verify that uCode images will fit in card's SRAM */ + if (pieces.inst_size > priv->hw_params.max_inst_size) { + IWL_ERR(priv, "uCode instr len %Zd too large to fit in\n", + pieces.inst_size); + goto try_again; + } + + if (pieces.data_size > priv->hw_params.max_data_size) { + IWL_ERR(priv, "uCode data len %Zd too large to fit in\n", + pieces.data_size); + goto try_again; + } + + if (pieces.init_size > priv->hw_params.max_inst_size) { + IWL_ERR(priv, "uCode init instr len %Zd too large to fit in\n", + pieces.init_size); + goto try_again; + } + + if (pieces.init_data_size > priv->hw_params.max_data_size) { + IWL_ERR(priv, "uCode init data len %Zd too large to fit in\n", + pieces.init_data_size); + goto try_again; + } + + if (pieces.boot_size > priv->hw_params.max_bsm_size) { + IWL_ERR(priv, "uCode boot instr len %Zd too large to fit in\n", + pieces.boot_size); + goto try_again; + } + + /* Allocate ucode buffers for card's bus-master loading ... */ + + /* Runtime instructions and 2 copies of data: + * 1) unmodified from disk + * 2) backup cache for save/restore during power-downs */ + priv->ucode_code.len = pieces.inst_size; + iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code); + + priv->ucode_data.len = pieces.data_size; + iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data); + + priv->ucode_data_backup.len = pieces.data_size; + iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup); + + if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr || + !priv->ucode_data_backup.v_addr) + goto err_pci_alloc; + + /* Initialization instructions and data */ + if (pieces.init_size && pieces.init_data_size) { + priv->ucode_init.len = pieces.init_size; + iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init); + + priv->ucode_init_data.len = pieces.init_data_size; + iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data); + + if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr) + goto err_pci_alloc; + } + + /* Bootstrap (instructions only, no data) */ + if (pieces.boot_size) { + priv->ucode_boot.len = pieces.boot_size; + iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot); + + if (!priv->ucode_boot.v_addr) + goto err_pci_alloc; + } + + /* Now that we can no longer fail, copy information */ + + priv->sta_key_max_num = STA_KEY_MAX_NUM; + + /* Copy images into buffers for card's bus-master reads ... */ + + /* Runtime instructions (first block of data in file) */ + IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode instr len %Zd\n", + pieces.inst_size); + memcpy(priv->ucode_code.v_addr, pieces.inst, pieces.inst_size); + + IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n", + priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr); + + /* + * Runtime data + * NOTE: Copy into backup buffer will be done in iwl_up() + */ + IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode data len %Zd\n", + pieces.data_size); + memcpy(priv->ucode_data.v_addr, pieces.data, pieces.data_size); + memcpy(priv->ucode_data_backup.v_addr, pieces.data, pieces.data_size); + + /* Initialization instructions */ + if (pieces.init_size) { + IWL_DEBUG_INFO(priv, + "Copying (but not loading) init instr len %Zd\n", + pieces.init_size); + memcpy(priv->ucode_init.v_addr, pieces.init, pieces.init_size); + } + + /* Initialization data */ + if (pieces.init_data_size) { + IWL_DEBUG_INFO(priv, + "Copying (but not loading) init data len %Zd\n", + pieces.init_data_size); + memcpy(priv->ucode_init_data.v_addr, pieces.init_data, + pieces.init_data_size); + } + + /* Bootstrap instructions */ + IWL_DEBUG_INFO(priv, "Copying (but not loading) boot instr len %Zd\n", + pieces.boot_size); + memcpy(priv->ucode_boot.v_addr, pieces.boot, pieces.boot_size); + + /* + * figure out the offset of chain noise reset and gain commands + * base on the size of standard phy calibration commands table size + */ + priv->_4965.phy_calib_chain_noise_reset_cmd = + standard_phy_calibration_size; + priv->_4965.phy_calib_chain_noise_gain_cmd = + standard_phy_calibration_size + 1; + + /************************************************** + * This is still part of probe() in a sense... + * + * 9. Setup and register with mac80211 and debugfs + **************************************************/ + err = iwl4965_mac_setup_register(priv, max_probe_length); + if (err) + goto out_unbind; + + err = iwl_legacy_dbgfs_register(priv, DRV_NAME); + if (err) + IWL_ERR(priv, + "failed to create debugfs files. Ignoring error: %d\n", err); + + err = sysfs_create_group(&priv->pci_dev->dev.kobj, + &iwl_attribute_group); + if (err) { + IWL_ERR(priv, "failed to create sysfs device attributes\n"); + goto out_unbind; + } + + /* We have our copies now, allow OS release its copies */ + release_firmware(ucode_raw); + complete(&priv->_4965.firmware_loading_complete); + return; + + try_again: + /* try next, if any */ + if (iwl4965_request_firmware(priv, false)) + goto out_unbind; + release_firmware(ucode_raw); + return; + + err_pci_alloc: + IWL_ERR(priv, "failed to allocate pci memory\n"); + iwl4965_dealloc_ucode_pci(priv); + out_unbind: + complete(&priv->_4965.firmware_loading_complete); + device_release_driver(&priv->pci_dev->dev); + release_firmware(ucode_raw); +} + +static const char * const desc_lookup_text[] = { + "OK", + "FAIL", + "BAD_PARAM", + "BAD_CHECKSUM", + "NMI_INTERRUPT_WDG", + "SYSASSERT", + "FATAL_ERROR", + "BAD_COMMAND", + "HW_ERROR_TUNE_LOCK", + "HW_ERROR_TEMPERATURE", + "ILLEGAL_CHAN_FREQ", + "VCC_NOT_STABLE", + "FH_ERROR", + "NMI_INTERRUPT_HOST", + "NMI_INTERRUPT_ACTION_PT", + "NMI_INTERRUPT_UNKNOWN", + "UCODE_VERSION_MISMATCH", + "HW_ERROR_ABS_LOCK", + "HW_ERROR_CAL_LOCK_FAIL", + "NMI_INTERRUPT_INST_ACTION_PT", + "NMI_INTERRUPT_DATA_ACTION_PT", + "NMI_TRM_HW_ER", + "NMI_INTERRUPT_TRM", + "NMI_INTERRUPT_BREAK_POINT", + "DEBUG_0", + "DEBUG_1", + "DEBUG_2", + "DEBUG_3", +}; + +static struct { char *name; u8 num; } advanced_lookup[] = { + { "NMI_INTERRUPT_WDG", 0x34 }, + { "SYSASSERT", 0x35 }, + { "UCODE_VERSION_MISMATCH", 0x37 }, + { "BAD_COMMAND", 0x38 }, + { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C }, + { "FATAL_ERROR", 0x3D }, + { "NMI_TRM_HW_ERR", 0x46 }, + { "NMI_INTERRUPT_TRM", 0x4C }, + { "NMI_INTERRUPT_BREAK_POINT", 0x54 }, + { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C }, + { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 }, + { "NMI_INTERRUPT_HOST", 0x66 }, + { "NMI_INTERRUPT_ACTION_PT", 0x7C }, + { "NMI_INTERRUPT_UNKNOWN", 0x84 }, + { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 }, + { "ADVANCED_SYSASSERT", 0 }, +}; + +static const char *iwl4965_desc_lookup(u32 num) +{ + int i; + int max = ARRAY_SIZE(desc_lookup_text); + + if (num < max) + return desc_lookup_text[num]; + + max = ARRAY_SIZE(advanced_lookup) - 1; + for (i = 0; i < max; i++) { + if (advanced_lookup[i].num == num) + break; + } + return advanced_lookup[i].name; +} + +#define ERROR_START_OFFSET (1 * sizeof(u32)) +#define ERROR_ELEM_SIZE (7 * sizeof(u32)) + +void iwl4965_dump_nic_error_log(struct iwl_priv *priv) +{ + u32 data2, line; + u32 desc, time, count, base, data1; + u32 blink1, blink2, ilink1, ilink2; + u32 pc, hcmd; + + if (priv->ucode_type == UCODE_INIT) { + base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr); + } else { + base = le32_to_cpu(priv->card_alive.error_event_table_ptr); + } + + if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) { + IWL_ERR(priv, + "Not valid error log pointer 0x%08X for %s uCode\n", + base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT"); + return; + } + + count = iwl_legacy_read_targ_mem(priv, base); + + if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { + IWL_ERR(priv, "Start IWL Error Log Dump:\n"); + IWL_ERR(priv, "Status: 0x%08lX, count: %d\n", + priv->status, count); + } + + desc = iwl_legacy_read_targ_mem(priv, base + 1 * sizeof(u32)); + priv->isr_stats.err_code = desc; + pc = iwl_legacy_read_targ_mem(priv, base + 2 * sizeof(u32)); + blink1 = iwl_legacy_read_targ_mem(priv, base + 3 * sizeof(u32)); + blink2 = iwl_legacy_read_targ_mem(priv, base + 4 * sizeof(u32)); + ilink1 = iwl_legacy_read_targ_mem(priv, base + 5 * sizeof(u32)); + ilink2 = iwl_legacy_read_targ_mem(priv, base + 6 * sizeof(u32)); + data1 = iwl_legacy_read_targ_mem(priv, base + 7 * sizeof(u32)); + data2 = iwl_legacy_read_targ_mem(priv, base + 8 * sizeof(u32)); + line = iwl_legacy_read_targ_mem(priv, base + 9 * sizeof(u32)); + time = iwl_legacy_read_targ_mem(priv, base + 11 * sizeof(u32)); + hcmd = iwl_legacy_read_targ_mem(priv, base + 22 * sizeof(u32)); + + trace_iwlwifi_legacy_dev_ucode_error(priv, desc, + time, data1, data2, line, + blink1, blink2, ilink1, ilink2); + + IWL_ERR(priv, "Desc Time " + "data1 data2 line\n"); + IWL_ERR(priv, "%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n", + iwl4965_desc_lookup(desc), desc, time, data1, data2, line); + IWL_ERR(priv, "pc blink1 blink2 ilink1 ilink2 hcmd\n"); + IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n", + pc, blink1, blink2, ilink1, ilink2, hcmd); +} + +static void iwl4965_rf_kill_ct_config(struct iwl_priv *priv) +{ + struct iwl_ct_kill_config cmd; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&priv->lock, flags); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); + spin_unlock_irqrestore(&priv->lock, flags); + + cmd.critical_temperature_R = + cpu_to_le32(priv->hw_params.ct_kill_threshold); + + ret = iwl_legacy_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD, + sizeof(cmd), &cmd); + if (ret) + IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n"); + else + IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD " + "succeeded, " + "critical temperature is %d\n", + priv->hw_params.ct_kill_threshold); +} + +static const s8 default_queue_to_tx_fifo[] = { + IWL_TX_FIFO_VO, + IWL_TX_FIFO_VI, + IWL_TX_FIFO_BE, + IWL_TX_FIFO_BK, + IWL49_CMD_FIFO_NUM, + IWL_TX_FIFO_UNUSED, + IWL_TX_FIFO_UNUSED, +}; + +static int iwl4965_alive_notify(struct iwl_priv *priv) +{ + u32 a; + unsigned long flags; + int i, chan; + u32 reg_val; + + spin_lock_irqsave(&priv->lock, flags); + + /* Clear 4965's internal Tx Scheduler data base */ + priv->scd_base_addr = iwl_legacy_read_prph(priv, + IWL49_SCD_SRAM_BASE_ADDR); + a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET; + for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4) + iwl_legacy_write_targ_mem(priv, a, 0); + for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4) + iwl_legacy_write_targ_mem(priv, a, 0); + for (; a < priv->scd_base_addr + + IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4) + iwl_legacy_write_targ_mem(priv, a, 0); + + /* Tel 4965 where to find Tx byte count tables */ + iwl_legacy_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR, + priv->scd_bc_tbls.dma >> 10); + + /* Enable DMA channel */ + for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++) + iwl_legacy_write_direct32(priv, + FH_TCSR_CHNL_TX_CONFIG_REG(chan), + FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | + FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); + + /* Update FH chicken bits */ + reg_val = iwl_legacy_read_direct32(priv, FH_TX_CHICKEN_BITS_REG); + iwl_legacy_write_direct32(priv, FH_TX_CHICKEN_BITS_REG, + reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); + + /* Disable chain mode for all queues */ + iwl_legacy_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0); + + /* Initialize each Tx queue (including the command queue) */ + for (i = 0; i < priv->hw_params.max_txq_num; i++) { + + /* TFD circular buffer read/write indexes */ + iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0); + iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8)); + + /* Max Tx Window size for Scheduler-ACK mode */ + iwl_legacy_write_targ_mem(priv, priv->scd_base_addr + + IWL49_SCD_CONTEXT_QUEUE_OFFSET(i), + (SCD_WIN_SIZE << + IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) & + IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK); + + /* Frame limit */ + iwl_legacy_write_targ_mem(priv, priv->scd_base_addr + + IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) + + sizeof(u32), + (SCD_FRAME_LIMIT << + IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & + IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK); + + } + iwl_legacy_write_prph(priv, IWL49_SCD_INTERRUPT_MASK, + (1 << priv->hw_params.max_txq_num) - 1); + + /* Activate all Tx DMA/FIFO channels */ + iwl4965_txq_set_sched(priv, IWL_MASK(0, 6)); + + iwl4965_set_wr_ptrs(priv, IWL_DEFAULT_CMD_QUEUE_NUM, 0); + + /* make sure all queue are not stopped */ + memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped)); + for (i = 0; i < 4; i++) + atomic_set(&priv->queue_stop_count[i], 0); + + /* reset to 0 to enable all the queue first */ + priv->txq_ctx_active_msk = 0; + /* Map each Tx/cmd queue to its corresponding fifo */ + BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7); + + for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) { + int ac = default_queue_to_tx_fifo[i]; + + iwl_txq_ctx_activate(priv, i); + + if (ac == IWL_TX_FIFO_UNUSED) + continue; + + iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0); + } + + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +/** + * iwl4965_alive_start - called after REPLY_ALIVE notification received + * from protocol/runtime uCode (initialization uCode's + * Alive gets handled by iwl_init_alive_start()). + */ +static void iwl4965_alive_start(struct iwl_priv *priv) +{ + int ret = 0; + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + + IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); + + if (priv->card_alive.is_valid != UCODE_VALID_OK) { + /* We had an error bringing up the hardware, so take it + * all the way back down so we can try again */ + IWL_DEBUG_INFO(priv, "Alive failed.\n"); + goto restart; + } + + /* Initialize uCode has loaded Runtime uCode ... verify inst image. + * This is a paranoid check, because we would not have gotten the + * "runtime" alive if code weren't properly loaded. */ + if (iwl4965_verify_ucode(priv)) { + /* Runtime instruction load was bad; + * take it all the way back down so we can try again */ + IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n"); + goto restart; + } + + ret = iwl4965_alive_notify(priv); + if (ret) { + IWL_WARN(priv, + "Could not complete ALIVE transition [ntf]: %d\n", ret); + goto restart; + } + + + /* After the ALIVE response, we can send host commands to the uCode */ + set_bit(STATUS_ALIVE, &priv->status); + + /* Enable watchdog to monitor the driver tx queues */ + iwl_legacy_setup_watchdog(priv); + + if (iwl_legacy_is_rfkill(priv)) + return; + + ieee80211_wake_queues(priv->hw); + + priv->active_rate = IWL_RATES_MASK; + + if (iwl_legacy_is_associated_ctx(ctx)) { + struct iwl_legacy_rxon_cmd *active_rxon = + (struct iwl_legacy_rxon_cmd *)&ctx->active; + /* apply any changes in staging */ + ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; + active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; + } else { + struct iwl_rxon_context *tmp; + /* Initialize our rx_config data */ + for_each_context(priv, tmp) + iwl_legacy_connection_init_rx_config(priv, tmp); + + if (priv->cfg->ops->hcmd->set_rxon_chain) + priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); + } + + /* Configure bluetooth coexistence if enabled */ + iwl_legacy_send_bt_config(priv); + + iwl4965_reset_run_time_calib(priv); + + set_bit(STATUS_READY, &priv->status); + + /* Configure the adapter for unassociated operation */ + iwl_legacy_commit_rxon(priv, ctx); + + /* At this point, the NIC is initialized and operational */ + iwl4965_rf_kill_ct_config(priv); + + IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); + wake_up(&priv->wait_command_queue); + + iwl_legacy_power_update_mode(priv, true); + IWL_DEBUG_INFO(priv, "Updated power mode\n"); + + return; + + restart: + queue_work(priv->workqueue, &priv->restart); +} + +static void iwl4965_cancel_deferred_work(struct iwl_priv *priv); + +static void __iwl4965_down(struct iwl_priv *priv) +{ + unsigned long flags; + int exit_pending; + + IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n"); + + iwl_legacy_scan_cancel_timeout(priv, 200); + + exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status); + + /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set + * to prevent rearm timer */ + del_timer_sync(&priv->watchdog); + + iwl_legacy_clear_ucode_stations(priv, NULL); + iwl_legacy_dealloc_bcast_stations(priv); + iwl_legacy_clear_driver_stations(priv); + + /* Unblock any waiting calls */ + wake_up_all(&priv->wait_command_queue); + + /* Wipe out the EXIT_PENDING status bit if we are not actually + * exiting the module */ + if (!exit_pending) + clear_bit(STATUS_EXIT_PENDING, &priv->status); + + /* stop and reset the on-board processor */ + iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); + + /* tell the device to stop sending interrupts */ + spin_lock_irqsave(&priv->lock, flags); + iwl_legacy_disable_interrupts(priv); + spin_unlock_irqrestore(&priv->lock, flags); + iwl4965_synchronize_irq(priv); + + if (priv->mac80211_registered) + ieee80211_stop_queues(priv->hw); + + /* If we have not previously called iwl_init() then + * clear all bits but the RF Kill bit and return */ + if (!iwl_legacy_is_init(priv)) { + priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) << + STATUS_RF_KILL_HW | + test_bit(STATUS_GEO_CONFIGURED, &priv->status) << + STATUS_GEO_CONFIGURED | + test_bit(STATUS_EXIT_PENDING, &priv->status) << + STATUS_EXIT_PENDING; + goto exit; + } + + /* ...otherwise clear out all the status bits but the RF Kill + * bit and continue taking the NIC down. */ + priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) << + STATUS_RF_KILL_HW | + test_bit(STATUS_GEO_CONFIGURED, &priv->status) << + STATUS_GEO_CONFIGURED | + test_bit(STATUS_FW_ERROR, &priv->status) << + STATUS_FW_ERROR | + test_bit(STATUS_EXIT_PENDING, &priv->status) << + STATUS_EXIT_PENDING; + + iwl4965_txq_ctx_stop(priv); + iwl4965_rxq_stop(priv); + + /* Power-down device's busmaster DMA clocks */ + iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); + udelay(5); + + /* Make sure (redundant) we've released our request to stay awake */ + iwl_legacy_clear_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + + /* Stop the device, and put it in low power state */ + iwl_legacy_apm_stop(priv); + + exit: + memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp)); + + dev_kfree_skb(priv->beacon_skb); + priv->beacon_skb = NULL; + + /* clear out any free frames */ + iwl4965_clear_free_frames(priv); +} + +static void iwl4965_down(struct iwl_priv *priv) +{ + mutex_lock(&priv->mutex); + __iwl4965_down(priv); + mutex_unlock(&priv->mutex); + + iwl4965_cancel_deferred_work(priv); +} + +#define HW_READY_TIMEOUT (50) + +static int iwl4965_set_hw_ready(struct iwl_priv *priv) +{ + int ret = 0; + + iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); + + /* See if we got it */ + ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, + CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, + HW_READY_TIMEOUT); + if (ret != -ETIMEDOUT) + priv->hw_ready = true; + else + priv->hw_ready = false; + + IWL_DEBUG_INFO(priv, "hardware %s\n", + (priv->hw_ready == 1) ? "ready" : "not ready"); + return ret; +} + +static int iwl4965_prepare_card_hw(struct iwl_priv *priv) +{ + int ret = 0; + + IWL_DEBUG_INFO(priv, "iwl4965_prepare_card_hw enter\n"); + + ret = iwl4965_set_hw_ready(priv); + if (priv->hw_ready) + return ret; + + /* If HW is not ready, prepare the conditions to check again */ + iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_PREPARE); + + ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG, + ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, + CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000); + + /* HW should be ready by now, check again. */ + if (ret != -ETIMEDOUT) + iwl4965_set_hw_ready(priv); + + return ret; +} + +#define MAX_HW_RESTARTS 5 + +static int __iwl4965_up(struct iwl_priv *priv) +{ + struct iwl_rxon_context *ctx; + int i; + int ret; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { + IWL_WARN(priv, "Exit pending; will not bring the NIC up\n"); + return -EIO; + } + + if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) { + IWL_ERR(priv, "ucode not available for device bringup\n"); + return -EIO; + } + + for_each_context(priv, ctx) { + ret = iwl4965_alloc_bcast_station(priv, ctx); + if (ret) { + iwl_legacy_dealloc_bcast_stations(priv); + return ret; + } + } + + iwl4965_prepare_card_hw(priv); + + if (!priv->hw_ready) { + IWL_WARN(priv, "Exit HW not ready\n"); + return -EIO; + } + + /* If platform's RF_KILL switch is NOT set to KILL */ + if (iwl_read32(priv, + CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) + clear_bit(STATUS_RF_KILL_HW, &priv->status); + else + set_bit(STATUS_RF_KILL_HW, &priv->status); + + if (iwl_legacy_is_rfkill(priv)) { + wiphy_rfkill_set_hw_state(priv->hw->wiphy, true); + + iwl_legacy_enable_interrupts(priv); + IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n"); + return 0; + } + + iwl_write32(priv, CSR_INT, 0xFFFFFFFF); + + /* must be initialised before iwl_hw_nic_init */ + priv->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM; + + ret = iwl4965_hw_nic_init(priv); + if (ret) { + IWL_ERR(priv, "Unable to init nic\n"); + return ret; + } + + /* make sure rfkill handshake bits are cleared */ + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + + /* clear (again), then enable host interrupts */ + iwl_write32(priv, CSR_INT, 0xFFFFFFFF); + iwl_legacy_enable_interrupts(priv); + + /* really make sure rfkill handshake bits are cleared */ + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + + /* Copy original ucode data image from disk into backup cache. + * This will be used to initialize the on-board processor's + * data SRAM for a clean start when the runtime program first loads. */ + memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr, + priv->ucode_data.len); + + for (i = 0; i < MAX_HW_RESTARTS; i++) { + + /* load bootstrap state machine, + * load bootstrap program into processor's memory, + * prepare to load the "initialize" uCode */ + ret = priv->cfg->ops->lib->load_ucode(priv); + + if (ret) { + IWL_ERR(priv, "Unable to set up bootstrap uCode: %d\n", + ret); + continue; + } + + /* start card; "initialize" will load runtime ucode */ + iwl4965_nic_start(priv); + + IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n"); + + return 0; + } + + set_bit(STATUS_EXIT_PENDING, &priv->status); + __iwl4965_down(priv); + clear_bit(STATUS_EXIT_PENDING, &priv->status); + + /* tried to restart and config the device for as long as our + * patience could withstand */ + IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i); + return -EIO; +} + + +/***************************************************************************** + * + * Workqueue callbacks + * + *****************************************************************************/ + +static void iwl4965_bg_init_alive_start(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, init_alive_start.work); + + mutex_lock(&priv->mutex); + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + goto out; + + priv->cfg->ops->lib->init_alive_start(priv); +out: + mutex_unlock(&priv->mutex); +} + +static void iwl4965_bg_alive_start(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, alive_start.work); + + mutex_lock(&priv->mutex); + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + goto out; + + iwl4965_alive_start(priv); +out: + mutex_unlock(&priv->mutex); +} + +static void iwl4965_bg_run_time_calib_work(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, + run_time_calib_work); + + mutex_lock(&priv->mutex); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status) || + test_bit(STATUS_SCANNING, &priv->status)) { + mutex_unlock(&priv->mutex); + return; + } + + if (priv->start_calib) { + iwl4965_chain_noise_calibration(priv, + (void *)&priv->_4965.statistics); + iwl4965_sensitivity_calibration(priv, + (void *)&priv->_4965.statistics); + } + + mutex_unlock(&priv->mutex); +} + +static void iwl4965_bg_restart(struct work_struct *data) +{ + struct iwl_priv *priv = container_of(data, struct iwl_priv, restart); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) { + struct iwl_rxon_context *ctx; + + mutex_lock(&priv->mutex); + for_each_context(priv, ctx) + ctx->vif = NULL; + priv->is_open = 0; + + __iwl4965_down(priv); + + mutex_unlock(&priv->mutex); + iwl4965_cancel_deferred_work(priv); + ieee80211_restart_hw(priv->hw); + } else { + iwl4965_down(priv); + + mutex_lock(&priv->mutex); + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { + mutex_unlock(&priv->mutex); + return; + } + + __iwl4965_up(priv); + mutex_unlock(&priv->mutex); + } +} + +static void iwl4965_bg_rx_replenish(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, rx_replenish); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + iwl4965_rx_replenish(priv); + mutex_unlock(&priv->mutex); +} + +/***************************************************************************** + * + * mac80211 entry point functions + * + *****************************************************************************/ + +#define UCODE_READY_TIMEOUT (4 * HZ) + +/* + * Not a mac80211 entry point function, but it fits in with all the + * other mac80211 functions grouped here. + */ +static int iwl4965_mac_setup_register(struct iwl_priv *priv, + u32 max_probe_length) +{ + int ret; + struct ieee80211_hw *hw = priv->hw; + struct iwl_rxon_context *ctx; + + hw->rate_control_algorithm = "iwl-4965-rs"; + + /* Tell mac80211 our characteristics */ + hw->flags = IEEE80211_HW_SIGNAL_DBM | + IEEE80211_HW_AMPDU_AGGREGATION | + IEEE80211_HW_NEED_DTIM_PERIOD | + IEEE80211_HW_SPECTRUM_MGMT | + IEEE80211_HW_REPORTS_TX_ACK_STATUS; + + if (priv->cfg->sku & IWL_SKU_N) + hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | + IEEE80211_HW_SUPPORTS_STATIC_SMPS; + + hw->sta_data_size = sizeof(struct iwl_station_priv); + hw->vif_data_size = sizeof(struct iwl_vif_priv); + + for_each_context(priv, ctx) { + hw->wiphy->interface_modes |= ctx->interface_modes; + hw->wiphy->interface_modes |= ctx->exclusive_interface_modes; + } + + hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | + WIPHY_FLAG_DISABLE_BEACON_HINTS; + + /* + * For now, disable PS by default because it affects + * RX performance significantly. + */ + hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; + + hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; + /* we create the 802.11 header and a zero-length SSID element */ + hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2; + + /* Default value; 4 EDCA QOS priorities */ + hw->queues = 4; + + hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; + + if (priv->bands[IEEE80211_BAND_2GHZ].n_channels) + priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = + &priv->bands[IEEE80211_BAND_2GHZ]; + if (priv->bands[IEEE80211_BAND_5GHZ].n_channels) + priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = + &priv->bands[IEEE80211_BAND_5GHZ]; + + iwl_legacy_leds_init(priv); + + ret = ieee80211_register_hw(priv->hw); + if (ret) { + IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); + return ret; + } + priv->mac80211_registered = 1; + + return 0; +} + + +int iwl4965_mac_start(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + int ret; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + /* we should be verifying the device is ready to be opened */ + mutex_lock(&priv->mutex); + ret = __iwl4965_up(priv); + mutex_unlock(&priv->mutex); + + if (ret) + return ret; + + if (iwl_legacy_is_rfkill(priv)) + goto out; + + IWL_DEBUG_INFO(priv, "Start UP work done.\n"); + + /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from + * mac80211 will not be run successfully. */ + ret = wait_event_timeout(priv->wait_command_queue, + test_bit(STATUS_READY, &priv->status), + UCODE_READY_TIMEOUT); + if (!ret) { + if (!test_bit(STATUS_READY, &priv->status)) { + IWL_ERR(priv, "START_ALIVE timeout after %dms.\n", + jiffies_to_msecs(UCODE_READY_TIMEOUT)); + return -ETIMEDOUT; + } + } + + iwl4965_led_enable(priv); + +out: + priv->is_open = 1; + IWL_DEBUG_MAC80211(priv, "leave\n"); + return 0; +} + +void iwl4965_mac_stop(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (!priv->is_open) + return; + + priv->is_open = 0; + + iwl4965_down(priv); + + flush_workqueue(priv->workqueue); + + /* User space software may expect getting rfkill changes + * even if interface is down */ + iwl_write32(priv, CSR_INT, 0xFFFFFFFF); + iwl_legacy_enable_rfkill_int(priv); + + IWL_DEBUG_MAC80211(priv, "leave\n"); +} + +void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MACDUMP(priv, "enter\n"); + + IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, + ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); + + if (iwl4965_tx_skb(priv, skb)) + dev_kfree_skb_any(skb); + + IWL_DEBUG_MACDUMP(priv, "leave\n"); +} + +void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_key_conf *keyconf, + struct ieee80211_sta *sta, + u32 iv32, u16 *phase1key) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + iwl4965_update_tkip_key(priv, vif_priv->ctx, keyconf, sta, + iv32, phase1key); + + IWL_DEBUG_MAC80211(priv, "leave\n"); +} + +int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + struct iwl_rxon_context *ctx = vif_priv->ctx; + int ret; + u8 sta_id; + bool is_default_wep_key = false; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (priv->cfg->mod_params->sw_crypto) { + IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n"); + return -EOPNOTSUPP; + } + + sta_id = iwl_legacy_sta_id_or_broadcast(priv, vif_priv->ctx, sta); + if (sta_id == IWL_INVALID_STATION) + return -EINVAL; + + mutex_lock(&priv->mutex); + iwl_legacy_scan_cancel_timeout(priv, 100); + + /* + * If we are getting WEP group key and we didn't receive any key mapping + * so far, we are in legacy wep mode (group key only), otherwise we are + * in 1X mode. + * In legacy wep mode, we use another host command to the uCode. + */ + if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 || + key->cipher == WLAN_CIPHER_SUITE_WEP104) && + !sta) { + if (cmd == SET_KEY) + is_default_wep_key = !ctx->key_mapping_keys; + else + is_default_wep_key = + (key->hw_key_idx == HW_KEY_DEFAULT); + } + + switch (cmd) { + case SET_KEY: + if (is_default_wep_key) + ret = iwl4965_set_default_wep_key(priv, + vif_priv->ctx, key); + else + ret = iwl4965_set_dynamic_key(priv, vif_priv->ctx, + key, sta_id); + + IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n"); + break; + case DISABLE_KEY: + if (is_default_wep_key) + ret = iwl4965_remove_default_wep_key(priv, ctx, key); + else + ret = iwl4965_remove_dynamic_key(priv, ctx, + key, sta_id); + + IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n"); + break; + default: + ret = -EINVAL; + } + + mutex_unlock(&priv->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); + + return ret; +} + +int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum ieee80211_ampdu_mlme_action action, + struct ieee80211_sta *sta, u16 tid, u16 *ssn, + u8 buf_size) +{ + struct iwl_priv *priv = hw->priv; + int ret = -EINVAL; + + IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n", + sta->addr, tid); + + if (!(priv->cfg->sku & IWL_SKU_N)) + return -EACCES; + + mutex_lock(&priv->mutex); + + switch (action) { + case IEEE80211_AMPDU_RX_START: + IWL_DEBUG_HT(priv, "start Rx\n"); + ret = iwl4965_sta_rx_agg_start(priv, sta, tid, *ssn); + break; + case IEEE80211_AMPDU_RX_STOP: + IWL_DEBUG_HT(priv, "stop Rx\n"); + ret = iwl4965_sta_rx_agg_stop(priv, sta, tid); + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + ret = 0; + break; + case IEEE80211_AMPDU_TX_START: + IWL_DEBUG_HT(priv, "start Tx\n"); + ret = iwl4965_tx_agg_start(priv, vif, sta, tid, ssn); + break; + case IEEE80211_AMPDU_TX_STOP: + IWL_DEBUG_HT(priv, "stop Tx\n"); + ret = iwl4965_tx_agg_stop(priv, vif, sta, tid); + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + ret = 0; + break; + case IEEE80211_AMPDU_TX_OPERATIONAL: + ret = 0; + break; + } + mutex_unlock(&priv->mutex); + + return ret; +} + +int iwl4965_mac_sta_add(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + bool is_ap = vif->type == NL80211_IFTYPE_STATION; + int ret; + u8 sta_id; + + IWL_DEBUG_INFO(priv, "received request to add station %pM\n", + sta->addr); + mutex_lock(&priv->mutex); + IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n", + sta->addr); + sta_priv->common.sta_id = IWL_INVALID_STATION; + + atomic_set(&sta_priv->pending_frames, 0); + + ret = iwl_legacy_add_station_common(priv, vif_priv->ctx, sta->addr, + is_ap, sta, &sta_id); + if (ret) { + IWL_ERR(priv, "Unable to add station %pM (%d)\n", + sta->addr, ret); + /* Should we return success if return code is EEXIST ? */ + mutex_unlock(&priv->mutex); + return ret; + } + + sta_priv->common.sta_id = sta_id; + + /* Initialize rate scaling */ + IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n", + sta->addr); + iwl4965_rs_rate_init(priv, sta, sta_id); + mutex_unlock(&priv->mutex); + + return 0; +} + +void iwl4965_mac_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_channel_switch *ch_switch) +{ + struct iwl_priv *priv = hw->priv; + const struct iwl_channel_info *ch_info; + struct ieee80211_conf *conf = &hw->conf; + struct ieee80211_channel *channel = ch_switch->channel; + struct iwl_ht_config *ht_conf = &priv->current_ht_config; + + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + u16 ch; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + mutex_lock(&priv->mutex); + + if (iwl_legacy_is_rfkill(priv)) + goto out; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status) || + test_bit(STATUS_SCANNING, &priv->status) || + test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status)) + goto out; + + if (!iwl_legacy_is_associated_ctx(ctx)) + goto out; + + if (!priv->cfg->ops->lib->set_channel_switch) + goto out; + + ch = channel->hw_value; + if (le16_to_cpu(ctx->active.channel) == ch) + goto out; + + ch_info = iwl_legacy_get_channel_info(priv, channel->band, ch); + if (!iwl_legacy_is_channel_valid(ch_info)) { + IWL_DEBUG_MAC80211(priv, "invalid channel\n"); + goto out; + } + + spin_lock_irq(&priv->lock); + + priv->current_ht_config.smps = conf->smps_mode; + + /* Configure HT40 channels */ + ctx->ht.enabled = conf_is_ht(conf); + if (ctx->ht.enabled) { + if (conf_is_ht40_minus(conf)) { + ctx->ht.extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_BELOW; + ctx->ht.is_40mhz = true; + } else if (conf_is_ht40_plus(conf)) { + ctx->ht.extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_ABOVE; + ctx->ht.is_40mhz = true; + } else { + ctx->ht.extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_NONE; + ctx->ht.is_40mhz = false; + } + } else + ctx->ht.is_40mhz = false; + + if ((le16_to_cpu(ctx->staging.channel) != ch)) + ctx->staging.flags = 0; + + iwl_legacy_set_rxon_channel(priv, channel, ctx); + iwl_legacy_set_rxon_ht(priv, ht_conf); + iwl_legacy_set_flags_for_band(priv, ctx, channel->band, ctx->vif); + + spin_unlock_irq(&priv->lock); + + iwl_legacy_set_rate(priv); + /* + * at this point, staging_rxon has the + * configuration for channel switch + */ + set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status); + priv->switch_channel = cpu_to_le16(ch); + if (priv->cfg->ops->lib->set_channel_switch(priv, ch_switch)) { + clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status); + priv->switch_channel = 0; + ieee80211_chswitch_done(ctx->vif, false); + } + +out: + mutex_unlock(&priv->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); +} + +void iwl4965_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast) +{ + struct iwl_priv *priv = hw->priv; + __le32 filter_or = 0, filter_nand = 0; + struct iwl_rxon_context *ctx; + +#define CHK(test, flag) do { \ + if (*total_flags & (test)) \ + filter_or |= (flag); \ + else \ + filter_nand |= (flag); \ + } while (0) + + IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n", + changed_flags, *total_flags); + + CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK); + /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */ + CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK); + CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); + +#undef CHK + + mutex_lock(&priv->mutex); + + for_each_context(priv, ctx) { + ctx->staging.filter_flags &= ~filter_nand; + ctx->staging.filter_flags |= filter_or; + + /* + * Not committing directly because hardware can perform a scan, + * but we'll eventually commit the filter flags change anyway. + */ + } + + mutex_unlock(&priv->mutex); + + /* + * Receiving all multicast frames is always enabled by the + * default flags setup in iwl_legacy_connection_init_rx_config() + * since we currently do not support programming multicast + * filters into the device. + */ + *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS | + FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; +} + +/***************************************************************************** + * + * driver setup and teardown + * + *****************************************************************************/ + +static void iwl4965_bg_txpower_work(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, + txpower_work); + + mutex_lock(&priv->mutex); + + /* If a scan happened to start before we got here + * then just return; the statistics notification will + * kick off another scheduled work to compensate for + * any temperature delta we missed here. */ + if (test_bit(STATUS_EXIT_PENDING, &priv->status) || + test_bit(STATUS_SCANNING, &priv->status)) + goto out; + + /* Regardless of if we are associated, we must reconfigure the + * TX power since frames can be sent on non-radar channels while + * not associated */ + priv->cfg->ops->lib->send_tx_power(priv); + + /* Update last_temperature to keep is_calib_needed from running + * when it isn't needed... */ + priv->last_temperature = priv->temperature; +out: + mutex_unlock(&priv->mutex); +} + +static void iwl4965_setup_deferred_work(struct iwl_priv *priv) +{ + priv->workqueue = create_singlethread_workqueue(DRV_NAME); + + init_waitqueue_head(&priv->wait_command_queue); + + INIT_WORK(&priv->restart, iwl4965_bg_restart); + INIT_WORK(&priv->rx_replenish, iwl4965_bg_rx_replenish); + INIT_WORK(&priv->run_time_calib_work, iwl4965_bg_run_time_calib_work); + INIT_DELAYED_WORK(&priv->init_alive_start, iwl4965_bg_init_alive_start); + INIT_DELAYED_WORK(&priv->alive_start, iwl4965_bg_alive_start); + + iwl_legacy_setup_scan_deferred_work(priv); + + INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work); + + init_timer(&priv->statistics_periodic); + priv->statistics_periodic.data = (unsigned long)priv; + priv->statistics_periodic.function = iwl4965_bg_statistics_periodic; + + init_timer(&priv->watchdog); + priv->watchdog.data = (unsigned long)priv; + priv->watchdog.function = iwl_legacy_bg_watchdog; + + tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) + iwl4965_irq_tasklet, (unsigned long)priv); +} + +static void iwl4965_cancel_deferred_work(struct iwl_priv *priv) +{ + cancel_work_sync(&priv->txpower_work); + cancel_delayed_work_sync(&priv->init_alive_start); + cancel_delayed_work(&priv->alive_start); + cancel_work_sync(&priv->run_time_calib_work); + + iwl_legacy_cancel_scan_deferred_work(priv); + + del_timer_sync(&priv->statistics_periodic); +} + +static void iwl4965_init_hw_rates(struct iwl_priv *priv, + struct ieee80211_rate *rates) +{ + int i; + + for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) { + rates[i].bitrate = iwlegacy_rates[i].ieee * 5; + rates[i].hw_value = i; /* Rate scaling will work on indexes */ + rates[i].hw_value_short = i; + rates[i].flags = 0; + if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) { + /* + * If CCK != 1M then set short preamble rate flag. + */ + rates[i].flags |= + (iwlegacy_rates[i].plcp == IWL_RATE_1M_PLCP) ? + 0 : IEEE80211_RATE_SHORT_PREAMBLE; + } + } +} +/* + * Acquire priv->lock before calling this function ! + */ +void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index) +{ + iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR, + (index & 0xff) | (txq_id << 8)); + iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index); +} + +void iwl4965_tx_queue_set_status(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + int tx_fifo_id, int scd_retry) +{ + int txq_id = txq->q.id; + + /* Find out whether to activate Tx queue */ + int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0; + + /* Set up and activate */ + iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id), + (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) | + (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) | + (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) | + (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) | + IWL49_SCD_QUEUE_STTS_REG_MSK); + + txq->sched_retry = scd_retry; + + IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n", + active ? "Activate" : "Deactivate", + scd_retry ? "BA" : "AC", txq_id, tx_fifo_id); +} + + +static int iwl4965_init_drv(struct iwl_priv *priv) +{ + int ret; + + spin_lock_init(&priv->sta_lock); + spin_lock_init(&priv->hcmd_lock); + + INIT_LIST_HEAD(&priv->free_frames); + + mutex_init(&priv->mutex); + + priv->ieee_channels = NULL; + priv->ieee_rates = NULL; + priv->band = IEEE80211_BAND_2GHZ; + + priv->iw_mode = NL80211_IFTYPE_STATION; + priv->current_ht_config.smps = IEEE80211_SMPS_STATIC; + priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF; + + /* initialize force reset */ + priv->force_reset.reset_duration = IWL_DELAY_NEXT_FORCE_FW_RELOAD; + + /* Choose which receivers/antennas to use */ + if (priv->cfg->ops->hcmd->set_rxon_chain) + priv->cfg->ops->hcmd->set_rxon_chain(priv, + &priv->contexts[IWL_RXON_CTX_BSS]); + + iwl_legacy_init_scan_params(priv); + + ret = iwl_legacy_init_channel_map(priv); + if (ret) { + IWL_ERR(priv, "initializing regulatory failed: %d\n", ret); + goto err; + } + + ret = iwl_legacy_init_geos(priv); + if (ret) { + IWL_ERR(priv, "initializing geos failed: %d\n", ret); + goto err_free_channel_map; + } + iwl4965_init_hw_rates(priv, priv->ieee_rates); + + return 0; + +err_free_channel_map: + iwl_legacy_free_channel_map(priv); +err: + return ret; +} + +static void iwl4965_uninit_drv(struct iwl_priv *priv) +{ + iwl4965_calib_free_results(priv); + iwl_legacy_free_geos(priv); + iwl_legacy_free_channel_map(priv); + kfree(priv->scan_cmd); +} + +static void iwl4965_hw_detect(struct iwl_priv *priv) +{ + priv->hw_rev = _iwl_legacy_read32(priv, CSR_HW_REV); + priv->hw_wa_rev = _iwl_legacy_read32(priv, CSR_HW_REV_WA_REG); + priv->rev_id = priv->pci_dev->revision; + IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id); +} + +static int iwl4965_set_hw_params(struct iwl_priv *priv) +{ + priv->hw_params.max_rxq_size = RX_QUEUE_SIZE; + priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG; + if (priv->cfg->mod_params->amsdu_size_8K) + priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K); + else + priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K); + + priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL; + + if (priv->cfg->mod_params->disable_11n) + priv->cfg->sku &= ~IWL_SKU_N; + + /* Device-specific setup */ + return priv->cfg->ops->lib->set_hw_params(priv); +} + +static const u8 iwl4965_bss_ac_to_fifo[] = { + IWL_TX_FIFO_VO, + IWL_TX_FIFO_VI, + IWL_TX_FIFO_BE, + IWL_TX_FIFO_BK, +}; + +static const u8 iwl4965_bss_ac_to_queue[] = { + 0, 1, 2, 3, +}; + +static int +iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + int err = 0, i; + struct iwl_priv *priv; + struct ieee80211_hw *hw; + struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); + unsigned long flags; + u16 pci_cmd; + + /************************ + * 1. Allocating HW data + ************************/ + + hw = iwl_legacy_alloc_all(cfg); + if (!hw) { + err = -ENOMEM; + goto out; + } + priv = hw->priv; + /* At this point both hw and priv are allocated. */ + + /* + * The default context is always valid, + * more may be discovered when firmware + * is loaded. + */ + priv->valid_contexts = BIT(IWL_RXON_CTX_BSS); + + for (i = 0; i < NUM_IWL_RXON_CTX; i++) + priv->contexts[i].ctxid = i; + + priv->contexts[IWL_RXON_CTX_BSS].always_active = true; + priv->contexts[IWL_RXON_CTX_BSS].is_active = true; + priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON; + priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING; + priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC; + priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM; + priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID; + priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY; + priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo = iwl4965_bss_ac_to_fifo; + priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue = iwl4965_bss_ac_to_queue; + priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes = + BIT(NL80211_IFTYPE_ADHOC); + priv->contexts[IWL_RXON_CTX_BSS].interface_modes = + BIT(NL80211_IFTYPE_STATION); + priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP; + priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS; + priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS; + priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS; + + BUILD_BUG_ON(NUM_IWL_RXON_CTX != 1); + + SET_IEEE80211_DEV(hw, &pdev->dev); + + IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n"); + priv->cfg = cfg; + priv->pci_dev = pdev; + priv->inta_mask = CSR_INI_SET_MASK; + + if (iwl_legacy_alloc_traffic_mem(priv)) + IWL_ERR(priv, "Not enough memory to generate traffic log\n"); + + /************************** + * 2. Initializing PCI bus + **************************/ + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | + PCIE_LINK_STATE_CLKPM); + + if (pci_enable_device(pdev)) { + err = -ENODEV; + goto out_ieee80211_free_hw; + } + + pci_set_master(pdev); + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); + if (!err) + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); + if (err) { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (!err) + err = pci_set_consistent_dma_mask(pdev, + DMA_BIT_MASK(32)); + /* both attempts failed: */ + if (err) { + IWL_WARN(priv, "No suitable DMA available.\n"); + goto out_pci_disable_device; + } + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) + goto out_pci_disable_device; + + pci_set_drvdata(pdev, priv); + + + /*********************** + * 3. Read REV register + ***********************/ + priv->hw_base = pci_iomap(pdev, 0, 0); + if (!priv->hw_base) { + err = -ENODEV; + goto out_pci_release_regions; + } + + IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n", + (unsigned long long) pci_resource_len(pdev, 0)); + IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base); + + /* these spin locks will be used in apm_ops.init and EEPROM access + * we should init now + */ + spin_lock_init(&priv->reg_lock); + spin_lock_init(&priv->lock); + + /* + * stop and reset the on-board processor just in case it is in a + * strange state ... like being left stranded by a primary kernel + * and this is now the kdump kernel trying to start up + */ + iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); + + iwl4965_hw_detect(priv); + IWL_INFO(priv, "Detected %s, REV=0x%X\n", + priv->cfg->name, priv->hw_rev); + + /* We disable the RETRY_TIMEOUT register (0x41) to keep + * PCI Tx retries from interfering with C3 CPU state */ + pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); + + iwl4965_prepare_card_hw(priv); + if (!priv->hw_ready) { + IWL_WARN(priv, "Failed, HW not ready\n"); + goto out_iounmap; + } + + /***************** + * 4. Read EEPROM + *****************/ + /* Read the EEPROM */ + err = iwl_legacy_eeprom_init(priv); + if (err) { + IWL_ERR(priv, "Unable to init EEPROM\n"); + goto out_iounmap; + } + err = iwl4965_eeprom_check_version(priv); + if (err) + goto out_free_eeprom; + + if (err) + goto out_free_eeprom; + + /* extract MAC Address */ + iwl4965_eeprom_get_mac(priv, priv->addresses[0].addr); + IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr); + priv->hw->wiphy->addresses = priv->addresses; + priv->hw->wiphy->n_addresses = 1; + + /************************ + * 5. Setup HW constants + ************************/ + if (iwl4965_set_hw_params(priv)) { + IWL_ERR(priv, "failed to set hw parameters\n"); + goto out_free_eeprom; + } + + /******************* + * 6. Setup priv + *******************/ + + err = iwl4965_init_drv(priv); + if (err) + goto out_free_eeprom; + /* At this point both hw and priv are initialized. */ + + /******************** + * 7. Setup services + ********************/ + spin_lock_irqsave(&priv->lock, flags); + iwl_legacy_disable_interrupts(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + pci_enable_msi(priv->pci_dev); + + err = request_irq(priv->pci_dev->irq, iwl_legacy_isr, + IRQF_SHARED, DRV_NAME, priv); + if (err) { + IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq); + goto out_disable_msi; + } + + iwl4965_setup_deferred_work(priv); + iwl4965_setup_rx_handlers(priv); + + /********************************************* + * 8. Enable interrupts and read RFKILL state + *********************************************/ + + /* enable rfkill interrupt: hw bug w/a */ + pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd); + if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { + pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; + pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd); + } + + iwl_legacy_enable_rfkill_int(priv); + + /* If platform's RF_KILL switch is NOT set to KILL */ + if (iwl_read32(priv, CSR_GP_CNTRL) & + CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) + clear_bit(STATUS_RF_KILL_HW, &priv->status); + else + set_bit(STATUS_RF_KILL_HW, &priv->status); + + wiphy_rfkill_set_hw_state(priv->hw->wiphy, + test_bit(STATUS_RF_KILL_HW, &priv->status)); + + iwl_legacy_power_initialize(priv); + + init_completion(&priv->_4965.firmware_loading_complete); + + err = iwl4965_request_firmware(priv, true); + if (err) + goto out_destroy_workqueue; + + return 0; + + out_destroy_workqueue: + destroy_workqueue(priv->workqueue); + priv->workqueue = NULL; + free_irq(priv->pci_dev->irq, priv); + out_disable_msi: + pci_disable_msi(priv->pci_dev); + iwl4965_uninit_drv(priv); + out_free_eeprom: + iwl_legacy_eeprom_free(priv); + out_iounmap: + pci_iounmap(pdev, priv->hw_base); + out_pci_release_regions: + pci_set_drvdata(pdev, NULL); + pci_release_regions(pdev); + out_pci_disable_device: + pci_disable_device(pdev); + out_ieee80211_free_hw: + iwl_legacy_free_traffic_mem(priv); + ieee80211_free_hw(priv->hw); + out: + return err; +} + +static void __devexit iwl4965_pci_remove(struct pci_dev *pdev) +{ + struct iwl_priv *priv = pci_get_drvdata(pdev); + unsigned long flags; + + if (!priv) + return; + + wait_for_completion(&priv->_4965.firmware_loading_complete); + + IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n"); + + iwl_legacy_dbgfs_unregister(priv); + sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group); + + /* ieee80211_unregister_hw call wil cause iwl_mac_stop to + * to be called and iwl4965_down since we are removing the device + * we need to set STATUS_EXIT_PENDING bit. + */ + set_bit(STATUS_EXIT_PENDING, &priv->status); + + iwl_legacy_leds_exit(priv); + + if (priv->mac80211_registered) { + ieee80211_unregister_hw(priv->hw); + priv->mac80211_registered = 0; + } else { + iwl4965_down(priv); + } + + /* + * Make sure device is reset to low power before unloading driver. + * This may be redundant with iwl4965_down(), but there are paths to + * run iwl4965_down() without calling apm_ops.stop(), and there are + * paths to avoid running iwl4965_down() at all before leaving driver. + * This (inexpensive) call *makes sure* device is reset. + */ + iwl_legacy_apm_stop(priv); + + /* make sure we flush any pending irq or + * tasklet for the driver + */ + spin_lock_irqsave(&priv->lock, flags); + iwl_legacy_disable_interrupts(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + iwl4965_synchronize_irq(priv); + + iwl4965_dealloc_ucode_pci(priv); + + if (priv->rxq.bd) + iwl4965_rx_queue_free(priv, &priv->rxq); + iwl4965_hw_txq_ctx_free(priv); + + iwl_legacy_eeprom_free(priv); + + + /*netif_stop_queue(dev); */ + flush_workqueue(priv->workqueue); + + /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes + * priv->workqueue... so we can't take down the workqueue + * until now... */ + destroy_workqueue(priv->workqueue); + priv->workqueue = NULL; + iwl_legacy_free_traffic_mem(priv); + + free_irq(priv->pci_dev->irq, priv); + pci_disable_msi(priv->pci_dev); + pci_iounmap(pdev, priv->hw_base); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + + iwl4965_uninit_drv(priv); + + dev_kfree_skb(priv->beacon_skb); + + ieee80211_free_hw(priv->hw); +} + +/* + * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask + * must be called under priv->lock and mac access + */ +void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask) +{ + iwl_legacy_write_prph(priv, IWL49_SCD_TXFACT, mask); +} + +/***************************************************************************** + * + * driver and module entry point + * + *****************************************************************************/ + +/* Hardware specific file defines the PCI IDs table for that hardware module */ +static DEFINE_PCI_DEVICE_TABLE(iwl4965_hw_card_ids) = { +#if defined(CONFIG_IWL4965_MODULE) || defined(CONFIG_IWL4965) + {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_cfg)}, + {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_cfg)}, +#endif /* CONFIG_IWL4965 */ + + {0} +}; +MODULE_DEVICE_TABLE(pci, iwl4965_hw_card_ids); + +static struct pci_driver iwl4965_driver = { + .name = DRV_NAME, + .id_table = iwl4965_hw_card_ids, + .probe = iwl4965_pci_probe, + .remove = __devexit_p(iwl4965_pci_remove), + .driver.pm = IWL_LEGACY_PM_OPS, +}; + +static int __init iwl4965_init(void) +{ + + int ret; + pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n"); + pr_info(DRV_COPYRIGHT "\n"); + + ret = iwl4965_rate_control_register(); + if (ret) { + pr_err("Unable to register rate control algorithm: %d\n", ret); + return ret; + } + + ret = pci_register_driver(&iwl4965_driver); + if (ret) { + pr_err("Unable to initialize PCI module\n"); + goto error_register; + } + + return ret; + +error_register: + iwl4965_rate_control_unregister(); + return ret; +} + +static void __exit iwl4965_exit(void) +{ + pci_unregister_driver(&iwl4965_driver); + iwl4965_rate_control_unregister(); +} + +module_exit(iwl4965_exit); +module_init(iwl4965_init); + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(debug, "debug output mask"); +#endif + +module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, S_IRUGO); +MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])"); +module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, S_IRUGO); +MODULE_PARM_DESC(queues_num, "number of hw queues."); +module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, S_IRUGO); +MODULE_PARM_DESC(11n_disable, "disable 11n functionality"); +module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K, + int, S_IRUGO); +MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size"); +module_param_named(fw_restart, iwl4965_mod_params.restart_fw, int, S_IRUGO); +MODULE_PARM_DESC(fw_restart, "restart firmware in case of error"); diff --git a/trunk/drivers/net/wireless/iwlwifi/Kconfig b/trunk/drivers/net/wireless/iwlwifi/Kconfig index ae08498dfcad..57703d5209d7 100644 --- a/trunk/drivers/net/wireless/iwlwifi/Kconfig +++ b/trunk/drivers/net/wireless/iwlwifi/Kconfig @@ -102,28 +102,12 @@ config IWLWIFI_DEVICE_TRACING occur. endmenu -config IWLWIFI_DEVICE_TESTMODE - def_bool y +config IWLWIFI_DEVICE_SVTOOL + bool "iwlwifi device svtool support" depends on IWLWIFI - depends on NL80211_TESTMODE + select NL80211_TESTMODE help - This option enables the testmode support for iwlwifi device through - NL80211_TESTMODE. This provide the capabilities of enable user space - validation applications to interacts with the device through the - generic netlink message via NL80211_TESTMODE channel. - -config IWLWIFI_P2P - bool "iwlwifi experimental P2P support" - depends on IWLWIFI - help - This option enables experimental P2P support for some devices - based on microcode support. Since P2P support is still under - development, this option may even enable it for some devices - now that turn out to not support it in the future due to - microcode restrictions. - - To determine if your microcode supports the experimental P2P - offered by this option, check if the driver advertises AP - support when it is loaded. - - Say Y only if you want to experiment with P2P. + This option enables the svtool support for iwlwifi device through + NL80211_TESTMODE. svtool is a software validation tool that runs in + the user space and interacts with the device in the kernel space + through the generic netlink message via NL80211_TESTMODE channel. diff --git a/trunk/drivers/net/wireless/iwlwifi/Makefile b/trunk/drivers/net/wireless/iwlwifi/Makefile index 9dc84a7354db..c73e5ed8db5e 100644 --- a/trunk/drivers/net/wireless/iwlwifi/Makefile +++ b/trunk/drivers/net/wireless/iwlwifi/Makefile @@ -1,7 +1,7 @@ # WIFI obj-$(CONFIG_IWLWIFI) += iwlwifi.o -iwlwifi-objs := iwl-agn.o iwl-agn-rs.o iwl-mac80211.o -iwlwifi-objs += iwl-ucode.o iwl-agn-tx.o +iwlwifi-objs := iwl-agn.o iwl-agn-rs.o +iwlwifi-objs += iwl-agn-ucode.o iwl-agn-tx.o iwlwifi-objs += iwl-agn-lib.o iwl-agn-calib.o iwl-io.o iwlwifi-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-rx.o @@ -18,7 +18,7 @@ iwlwifi-objs += iwl-trans-pcie.o iwl-trans-pcie-rx.o iwl-trans-pcie-tx.o iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o -iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-testmode.o +iwlwifi-$(CONFIG_IWLWIFI_DEVICE_SVTOOL) += iwl-sv-open.o CFLAGS_iwl-devtrace.o := -I$(src) diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-1000.c b/trunk/drivers/net/wireless/iwlwifi/iwl-1000.c index 1ef7bfc2ab25..dd008b0e6417 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-1000.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-1000.c @@ -124,10 +124,10 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv) { if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES && iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES) - cfg(priv)->base_params->num_of_queues = + priv->cfg->base_params->num_of_queues = iwlagn_mod_params.num_of_queues; - hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues; + hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues; priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID; hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE; @@ -135,19 +135,28 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv) hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ); - hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant); - if (cfg(priv)->rx_with_siso_diversity) + hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant); + if (priv->cfg->rx_with_siso_diversity) hw_params(priv).rx_chains_num = 1; else hw_params(priv).rx_chains_num = - num_of_ant(cfg(priv)->valid_rx_ant); - hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant; - hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant; + num_of_ant(priv->cfg->valid_rx_ant); + hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant; + hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant; iwl1000_set_ct_threshold(priv); /* Set initial sensitivity parameters */ + /* Set initial calibration set */ hw_params(priv).sens = &iwl1000_sensitivity; + hw_params(priv).calib_init_cfg = + BIT(IWL_CALIB_XTAL) | + BIT(IWL_CALIB_LO) | + BIT(IWL_CALIB_TX_IQ) | + BIT(IWL_CALIB_TX_IQ_PERD) | + BIT(IWL_CALIB_BASE_BAND); + if (priv->cfg->need_dc_calib) + hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_DC); return 0; } diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-2000.c b/trunk/drivers/net/wireless/iwlwifi/iwl-2000.c index 094693328dbb..79431977a968 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-2000.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-2000.c @@ -86,7 +86,7 @@ static void iwl2000_nic_config(struct iwl_priv *priv) { iwl_rf_config(priv); - if (cfg(priv)->iq_invert) + if (priv->cfg->iq_invert) iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG, CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER); } @@ -120,10 +120,10 @@ static int iwl2000_hw_set_hw_params(struct iwl_priv *priv) { if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES && iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES) - cfg(priv)->base_params->num_of_queues = + priv->cfg->base_params->num_of_queues = iwlagn_mod_params.num_of_queues; - hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues; + hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues; priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID; hw_params(priv).max_data_size = IWL60_RTC_DATA_SIZE; @@ -131,19 +131,29 @@ static int iwl2000_hw_set_hw_params(struct iwl_priv *priv) hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ); - hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant); - if (cfg(priv)->rx_with_siso_diversity) + hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant); + if (priv->cfg->rx_with_siso_diversity) hw_params(priv).rx_chains_num = 1; else hw_params(priv).rx_chains_num = - num_of_ant(cfg(priv)->valid_rx_ant); - hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant; - hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant; + num_of_ant(priv->cfg->valid_rx_ant); + hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant; + hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant; iwl2000_set_ct_threshold(priv); /* Set initial sensitivity parameters */ + /* Set initial calibration set */ hw_params(priv).sens = &iwl2000_sensitivity; + hw_params(priv).calib_init_cfg = + BIT(IWL_CALIB_XTAL) | + BIT(IWL_CALIB_LO) | + BIT(IWL_CALIB_TX_IQ) | + BIT(IWL_CALIB_BASE_BAND); + if (priv->cfg->need_dc_calib) + hw_params(priv).calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX; + if (priv->cfg->need_temp_offset_calib) + hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET); return 0; } @@ -248,19 +258,25 @@ static struct iwl_bt_params iwl2030_bt_params = { .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ .lib = &iwl2000_lib, \ .base_params = &iwl2000_base_params, \ + .need_dc_calib = true, \ .need_temp_offset_calib = true, \ .temp_offset_v2 = true, \ .led_mode = IWL_LED_RF_STATE, \ .iq_invert = true \ struct iwl_cfg iwl2000_2bgn_cfg = { - .name = "Intel(R) Centrino(R) Wireless-N 2200 BGN", + .name = "2000 Series 2x2 BGN", IWL_DEVICE_2000, .ht_params = &iwl2000_ht_params, }; +struct iwl_cfg iwl2000_2bg_cfg = { + .name = "2000 Series 2x2 BG", + IWL_DEVICE_2000, +}; + struct iwl_cfg iwl2000_2bgn_d_cfg = { - .name = "Intel(R) Centrino(R) Wireless-N 2200D BGN", + .name = "2000D Series 2x2 BGN", IWL_DEVICE_2000, .ht_params = &iwl2000_ht_params, }; @@ -275,6 +291,7 @@ struct iwl_cfg iwl2000_2bgn_d_cfg = { .lib = &iwl2030_lib, \ .base_params = &iwl2030_base_params, \ .bt_params = &iwl2030_bt_params, \ + .need_dc_calib = true, \ .need_temp_offset_calib = true, \ .temp_offset_v2 = true, \ .led_mode = IWL_LED_RF_STATE, \ @@ -282,11 +299,16 @@ struct iwl_cfg iwl2000_2bgn_d_cfg = { .iq_invert = true \ struct iwl_cfg iwl2030_2bgn_cfg = { - .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN", + .name = "2000 Series 2x2 BGN/BT", IWL_DEVICE_2030, .ht_params = &iwl2000_ht_params, }; +struct iwl_cfg iwl2030_2bg_cfg = { + .name = "2000 Series 2x2 BG/BT", + IWL_DEVICE_2030, +}; + #define IWL_DEVICE_105 \ .fw_name_pre = IWL105_FW_PRE, \ .ucode_api_max = IWL105_UCODE_API_MAX, \ @@ -296,6 +318,7 @@ struct iwl_cfg iwl2030_2bgn_cfg = { .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ .lib = &iwl2000_lib, \ .base_params = &iwl2000_base_params, \ + .need_dc_calib = true, \ .need_temp_offset_calib = true, \ .temp_offset_v2 = true, \ .led_mode = IWL_LED_RF_STATE, \ @@ -303,14 +326,19 @@ struct iwl_cfg iwl2030_2bgn_cfg = { .rx_with_siso_diversity = true, \ .iq_invert = true \ +struct iwl_cfg iwl105_bg_cfg = { + .name = "105 Series 1x1 BG", + IWL_DEVICE_105, +}; + struct iwl_cfg iwl105_bgn_cfg = { - .name = "Intel(R) Centrino(R) Wireless-N 105 BGN", + .name = "105 Series 1x1 BGN", IWL_DEVICE_105, .ht_params = &iwl2000_ht_params, }; struct iwl_cfg iwl105_bgn_d_cfg = { - .name = "Intel(R) Centrino(R) Wireless-N 105D BGN", + .name = "105D Series 1x1 BGN", IWL_DEVICE_105, .ht_params = &iwl2000_ht_params, }; @@ -325,6 +353,7 @@ struct iwl_cfg iwl105_bgn_d_cfg = { .lib = &iwl2030_lib, \ .base_params = &iwl2030_base_params, \ .bt_params = &iwl2030_bt_params, \ + .need_dc_calib = true, \ .need_temp_offset_calib = true, \ .temp_offset_v2 = true, \ .led_mode = IWL_LED_RF_STATE, \ @@ -332,8 +361,13 @@ struct iwl_cfg iwl105_bgn_d_cfg = { .rx_with_siso_diversity = true, \ .iq_invert = true \ +struct iwl_cfg iwl135_bg_cfg = { + .name = "135 Series 1x1 BG/BT", + IWL_DEVICE_135, +}; + struct iwl_cfg iwl135_bgn_cfg = { - .name = "Intel(R) Centrino(R) Wireless-N 135 BGN", + .name = "135 Series 1x1 BGN/BT", IWL_DEVICE_135, .ht_params = &iwl2000_ht_params, }; diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-5000.c b/trunk/drivers/net/wireless/iwlwifi/iwl-5000.c index b3a365fea7bb..f55fb2d1af52 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-5000.c @@ -134,10 +134,10 @@ static struct iwl_sensitivity_ranges iwl5150_sensitivity = { #define IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF (-5) -static s32 iwl_temp_calib_to_offset(struct iwl_shared *shrd) +static s32 iwl_temp_calib_to_offset(struct iwl_priv *priv) { u16 temperature, voltage; - __le16 *temp_calib = (__le16 *)iwl_eeprom_query_addr(shrd, + __le16 *temp_calib = (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_KELVIN_TEMPERATURE); temperature = le16_to_cpu(temp_calib[0]); @@ -151,7 +151,7 @@ static void iwl5150_set_ct_threshold(struct iwl_priv *priv) { const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF; s32 threshold = (s32)CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY) - - iwl_temp_calib_to_offset(priv->shrd); + iwl_temp_calib_to_offset(priv); hw_params(priv).ct_kill_threshold = threshold * volt2temp_coef; } @@ -166,10 +166,10 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv) { if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES && iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES) - cfg(priv)->base_params->num_of_queues = + priv->cfg->base_params->num_of_queues = iwlagn_mod_params.num_of_queues; - hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues; + hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues; priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID; hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE; @@ -178,15 +178,22 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv) hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ); - hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant); - hw_params(priv).rx_chains_num = num_of_ant(cfg(priv)->valid_rx_ant); - hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant; - hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant; + hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant); + hw_params(priv).rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant); + hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant; + hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant; iwl5000_set_ct_threshold(priv); /* Set initial sensitivity parameters */ + /* Set initial calibration set */ hw_params(priv).sens = &iwl5000_sensitivity; + hw_params(priv).calib_init_cfg = + BIT(IWL_CALIB_XTAL) | + BIT(IWL_CALIB_LO) | + BIT(IWL_CALIB_TX_IQ) | + BIT(IWL_CALIB_TX_IQ_PERD) | + BIT(IWL_CALIB_BASE_BAND); return 0; } @@ -195,10 +202,10 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv) { if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES && iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES) - cfg(priv)->base_params->num_of_queues = + priv->cfg->base_params->num_of_queues = iwlagn_mod_params.num_of_queues; - hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues; + hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues; priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID; hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE; @@ -207,15 +214,22 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv) hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ); - hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant); - hw_params(priv).rx_chains_num = num_of_ant(cfg(priv)->valid_rx_ant); - hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant; - hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant; + hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant); + hw_params(priv).rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant); + hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant; + hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant; iwl5150_set_ct_threshold(priv); /* Set initial sensitivity parameters */ + /* Set initial calibration set */ hw_params(priv).sens = &iwl5150_sensitivity; + hw_params(priv).calib_init_cfg = + BIT(IWL_CALIB_LO) | + BIT(IWL_CALIB_TX_IQ) | + BIT(IWL_CALIB_BASE_BAND); + if (priv->cfg->need_dc_calib) + hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_DC); return 0; } @@ -223,7 +237,7 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv) static void iwl5150_temperature(struct iwl_priv *priv) { u32 vt = 0; - s32 offset = iwl_temp_calib_to_offset(priv->shrd); + s32 offset = iwl_temp_calib_to_offset(priv); vt = le32_to_cpu(priv->statistics.common.temperature); vt = vt / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF + offset; @@ -420,7 +434,7 @@ struct iwl_cfg iwl5350_agn_cfg = { .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, \ .lib = &iwl5150_lib, \ .base_params = &iwl5000_base_params, \ - .no_xtal_calib = true, \ + .need_dc_calib = true, \ .led_mode = IWL_LED_BLINK, \ .internal_wimax_coex = true diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-6000.c b/trunk/drivers/net/wireless/iwlwifi/iwl-6000.c index 54b753399e6e..c840c78278db 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-6000.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-6000.c @@ -46,12 +46,11 @@ #include "iwl-cfg.h" /* Highest firmware API version supported */ -#define IWL6000_UCODE_API_MAX 6 +#define IWL6000_UCODE_API_MAX 4 #define IWL6050_UCODE_API_MAX 5 #define IWL6000G2_UCODE_API_MAX 6 /* Oldest version we won't warn about */ -#define IWL6000_UCODE_API_OK 4 #define IWL6000G2_UCODE_API_OK 5 /* Lowest firmware API version supported */ @@ -81,7 +80,7 @@ static void iwl6000_set_ct_threshold(struct iwl_priv *priv) static void iwl6050_additional_nic_config(struct iwl_priv *priv) { /* Indicate calibration version to uCode. */ - if (iwl_eeprom_calib_version(priv->shrd) >= 6) + if (iwlagn_eeprom_calib_version(priv) >= 6) iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG, CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6); } @@ -89,7 +88,7 @@ static void iwl6050_additional_nic_config(struct iwl_priv *priv) static void iwl6150_additional_nic_config(struct iwl_priv *priv) { /* Indicate calibration version to uCode. */ - if (iwl_eeprom_calib_version(priv->shrd) >= 6) + if (iwlagn_eeprom_calib_version(priv) >= 6) iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG, CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6); iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG, @@ -102,14 +101,14 @@ static void iwl6000_nic_config(struct iwl_priv *priv) iwl_rf_config(priv); /* no locking required for register write */ - if (cfg(priv)->pa_type == IWL_PA_INTERNAL) { + if (priv->cfg->pa_type == IWL_PA_INTERNAL) { /* 2x2 IPA phy type */ iwl_write32(bus(priv), CSR_GP_DRIVER_REG, CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA); } /* do additional nic configuration if needed */ - if (cfg(priv)->additional_nic_config) - cfg(priv)->additional_nic_config(priv); + if (priv->cfg->additional_nic_config) + priv->cfg->additional_nic_config(priv); } static struct iwl_sensitivity_ranges iwl6000_sensitivity = { @@ -141,10 +140,10 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv) { if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES && iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES) - cfg(priv)->base_params->num_of_queues = + priv->cfg->base_params->num_of_queues = iwlagn_mod_params.num_of_queues; - hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues; + hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues; priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID; hw_params(priv).max_data_size = IWL60_RTC_DATA_SIZE; @@ -153,19 +152,29 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv) hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ); - hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant); - if (cfg(priv)->rx_with_siso_diversity) + hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant); + if (priv->cfg->rx_with_siso_diversity) hw_params(priv).rx_chains_num = 1; else hw_params(priv).rx_chains_num = - num_of_ant(cfg(priv)->valid_rx_ant); - hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant; - hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant; + num_of_ant(priv->cfg->valid_rx_ant); + hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant; + hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant; iwl6000_set_ct_threshold(priv); /* Set initial sensitivity parameters */ + /* Set initial calibration set */ hw_params(priv).sens = &iwl6000_sensitivity; + hw_params(priv).calib_init_cfg = + BIT(IWL_CALIB_XTAL) | + BIT(IWL_CALIB_LO) | + BIT(IWL_CALIB_TX_IQ) | + BIT(IWL_CALIB_BASE_BAND); + if (priv->cfg->need_dc_calib) + hw_params(priv).calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX; + if (priv->cfg->need_temp_offset_calib) + hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET); return 0; } @@ -355,6 +364,7 @@ static struct iwl_bt_params iwl6000_bt_params = { .eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \ .lib = &iwl6000_lib, \ .base_params = &iwl6000_g2_base_params, \ + .need_dc_calib = true, \ .need_temp_offset_calib = true, \ .led_mode = IWL_LED_RF_STATE @@ -396,6 +406,7 @@ struct iwl_cfg iwl6005_2agn_d_cfg = { .lib = &iwl6030_lib, \ .base_params = &iwl6000_g2_base_params, \ .bt_params = &iwl6000_bt_params, \ + .need_dc_calib = true, \ .need_temp_offset_calib = true, \ .led_mode = IWL_LED_RF_STATE, \ .adv_pm = true \ @@ -423,11 +434,21 @@ struct iwl_cfg iwl6030_2bg_cfg = { }; struct iwl_cfg iwl6035_2agn_cfg = { - .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN", + .name = "6035 Series 2x2 AGN/BT", IWL_DEVICE_6030, .ht_params = &iwl6000_ht_params, }; +struct iwl_cfg iwl6035_2abg_cfg = { + .name = "6035 Series 2x2 ABG/BT", + IWL_DEVICE_6030, +}; + +struct iwl_cfg iwl6035_2bg_cfg = { + .name = "6035 Series 2x2 BG/BT", + IWL_DEVICE_6030, +}; + struct iwl_cfg iwl1030_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 1030 BGN", IWL_DEVICE_6030, @@ -458,7 +479,6 @@ struct iwl_cfg iwl130_bg_cfg = { #define IWL_DEVICE_6000i \ .fw_name_pre = IWL6000_FW_PRE, \ .ucode_api_max = IWL6000_UCODE_API_MAX, \ - .ucode_api_ok = IWL6000_UCODE_API_OK, \ .ucode_api_min = IWL6000_UCODE_API_MIN, \ .valid_tx_ant = ANT_BC, /* .cfg overwrite */ \ .valid_rx_ant = ANT_BC, /* .cfg overwrite */ \ @@ -496,6 +516,7 @@ struct iwl_cfg iwl6000i_2bg_cfg = { .eeprom_ver = EEPROM_6050_EEPROM_VERSION, \ .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \ .base_params = &iwl6050_base_params, \ + .need_dc_calib = true, \ .led_mode = IWL_LED_BLINK, \ .internal_wimax_coex = true @@ -519,6 +540,7 @@ struct iwl_cfg iwl6050_2abg_cfg = { .eeprom_ver = EEPROM_6150_EEPROM_VERSION, \ .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION, \ .base_params = &iwl6050_base_params, \ + .need_dc_calib = true, \ .led_mode = IWL_LED_BLINK, \ .internal_wimax_coex = true @@ -537,17 +559,17 @@ struct iwl_cfg iwl6000_3agn_cfg = { .name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN", .fw_name_pre = IWL6000_FW_PRE, .ucode_api_max = IWL6000_UCODE_API_MAX, - .ucode_api_ok = IWL6000_UCODE_API_OK, .ucode_api_min = IWL6000_UCODE_API_MIN, .eeprom_ver = EEPROM_6000_EEPROM_VERSION, .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, .lib = &iwl6000_lib, .base_params = &iwl6000_base_params, .ht_params = &iwl6000_ht_params, + .need_dc_calib = true, .led_mode = IWL_LED_BLINK, }; -MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_OK)); +MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX)); MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX)); MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX)); diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-calib.c b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-calib.c index 50ff849c9f67..03bac48558b2 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-calib.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-calib.c @@ -82,64 +82,56 @@ struct statistics_general_data { u32 beacon_energy_c; }; -int iwl_send_calib_results(struct iwl_trans *trans) +int iwl_send_calib_results(struct iwl_priv *priv) { + int ret = 0; + int i = 0; + struct iwl_host_cmd hcmd = { .id = REPLY_PHY_CALIBRATION_CMD, .flags = CMD_SYNC, }; - struct iwl_calib_result *res; - - list_for_each_entry(res, &trans->calib_results, list) { - int ret; - - hcmd.len[0] = res->cmd_len; - hcmd.data[0] = &res->hdr; - hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY; - ret = iwl_trans_send_cmd(trans, &hcmd); - if (ret) { - IWL_ERR(trans, "Error %d on calib cmd %d\n", - ret, res->hdr.op_code); - return ret; + + for (i = 0; i < IWL_CALIB_MAX; i++) { + if ((BIT(i) & hw_params(priv).calib_init_cfg) && + priv->calib_results[i].buf) { + hcmd.len[0] = priv->calib_results[i].buf_len; + hcmd.data[0] = priv->calib_results[i].buf; + hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY; + ret = iwl_trans_send_cmd(trans(priv), &hcmd); + if (ret) { + IWL_ERR(priv, "Error %d iteration %d\n", + ret, i); + break; + } } } - return 0; + return ret; } -int iwl_calib_set(struct iwl_trans *trans, - const struct iwl_calib_hdr *cmd, int len) +int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len) { - struct iwl_calib_result *res, *tmp; - - res = kmalloc(sizeof(*res) + len - sizeof(struct iwl_calib_hdr), - GFP_ATOMIC); - if (!res) - return -ENOMEM; - memcpy(&res->hdr, cmd, len); - res->cmd_len = len; - - list_for_each_entry(tmp, &trans->calib_results, list) { - if (tmp->hdr.op_code == res->hdr.op_code) { - list_replace(&tmp->list, &res->list); - kfree(tmp); - return 0; - } + if (res->buf_len != len) { + kfree(res->buf); + res->buf = kzalloc(len, GFP_ATOMIC); } + if (unlikely(res->buf == NULL)) + return -ENOMEM; - /* wasn't in list already */ - list_add_tail(&res->list, &trans->calib_results); - + res->buf_len = len; + memcpy(res->buf, buf, len); return 0; } -void iwl_calib_free_results(struct iwl_trans *trans) +void iwl_calib_free_results(struct iwl_priv *priv) { - struct iwl_calib_result *res, *tmp; + int i; - list_for_each_entry_safe(res, tmp, &trans->calib_results, list) { - list_del(&res->list); - kfree(res); + for (i = 0; i < IWL_CALIB_MAX; i++) { + kfree(priv->calib_results[i].buf); + priv->calib_results[i].buf = NULL; + priv->calib_results[i].buf_len = 0; } } @@ -513,7 +505,7 @@ static int iwl_enhance_sensitivity_write(struct iwl_priv *priv) iwl_prepare_legacy_sensitivity_tbl(priv, data, &cmd.enhance_table[0]); - if (cfg(priv)->base_params->hd_v2) { + if (priv->cfg->base_params->hd_v2) { cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX] = HD_INA_NON_SQUARE_DET_OFDM_DATA_V2; cmd.enhance_table[HD_INA_NON_SQUARE_DET_CCK_INDEX] = @@ -847,7 +839,7 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig, * connect the first valid tx chain */ first_chain = - find_first_chain(cfg(priv)->valid_tx_ant); + find_first_chain(priv->cfg->valid_tx_ant); data->disconn_array[first_chain] = 0; active_chains |= BIT(first_chain); IWL_DEBUG_CALIB(priv, @@ -890,7 +882,7 @@ static void iwlagn_gain_computation(struct iwl_priv *priv, continue; } - delta_g = (cfg(priv)->base_params->chain_noise_scale * + delta_g = (priv->cfg->base_params->chain_noise_scale * ((s32)average_noise[default_chain] - (s32)average_noise[i])) / 1500; @@ -1047,8 +1039,8 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv) return; /* Analyze signal for disconnected antenna */ - if (cfg(priv)->bt_params && - cfg(priv)->bt_params->advanced_bt_coexist) { + if (priv->cfg->bt_params && + priv->cfg->bt_params->advanced_bt_coexist) { /* Disable disconnected antenna algorithm for advanced bt coex, assuming valid antennas are connected */ data->active_chains = hw_params(priv).valid_rx_ant; @@ -1082,7 +1074,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv) iwlagn_gain_computation(priv, average_noise, min_average_noise_antenna_i, min_average_noise, - find_first_chain(cfg(priv)->valid_rx_ant)); + find_first_chain(priv->cfg->valid_rx_ant)); /* Some power changes may have been made during the calibration. * Update and commit the RXON diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-calib.h b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-calib.h index 10275ce92bde..a869fc9205d2 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-calib.h +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-calib.h @@ -72,4 +72,8 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv); void iwl_init_sensitivity(struct iwl_priv *priv); void iwl_reset_run_time_calib(struct iwl_priv *priv); +int iwl_send_calib_results(struct iwl_priv *priv); +int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len); +void iwl_calib_free_results(struct iwl_priv *priv); + #endif /* __iwl_calib_h__ */ diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-lib.c index 64cf439035c3..1a52ed29f2d6 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-lib.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-lib.c @@ -32,7 +32,6 @@ #include #include -#include "iwl-wifi.h" #include "iwl-dev.h" #include "iwl-core.h" #include "iwl-io.h" @@ -93,11 +92,11 @@ void iwlagn_temperature(struct iwl_priv *priv) iwl_tt_handler(priv); } -u16 iwl_eeprom_calib_version(struct iwl_shared *shrd) +u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv) { struct iwl_eeprom_calib_hdr *hdr; - hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(shrd, + hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv, EEPROM_CALIB_ALL); return hdr->version; @@ -106,7 +105,7 @@ u16 iwl_eeprom_calib_version(struct iwl_shared *shrd) /* * EEPROM */ -static u32 eeprom_indirect_address(const struct iwl_shared *shrd, u32 address) +static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address) { u16 offset = 0; @@ -115,31 +114,31 @@ static u32 eeprom_indirect_address(const struct iwl_shared *shrd, u32 address) switch (address & INDIRECT_TYPE_MSK) { case INDIRECT_HOST: - offset = iwl_eeprom_query16(shrd, EEPROM_LINK_HOST); + offset = iwl_eeprom_query16(priv, EEPROM_LINK_HOST); break; case INDIRECT_GENERAL: - offset = iwl_eeprom_query16(shrd, EEPROM_LINK_GENERAL); + offset = iwl_eeprom_query16(priv, EEPROM_LINK_GENERAL); break; case INDIRECT_REGULATORY: - offset = iwl_eeprom_query16(shrd, EEPROM_LINK_REGULATORY); + offset = iwl_eeprom_query16(priv, EEPROM_LINK_REGULATORY); break; case INDIRECT_TXP_LIMIT: - offset = iwl_eeprom_query16(shrd, EEPROM_LINK_TXP_LIMIT); + offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT); break; case INDIRECT_TXP_LIMIT_SIZE: - offset = iwl_eeprom_query16(shrd, EEPROM_LINK_TXP_LIMIT_SIZE); + offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT_SIZE); break; case INDIRECT_CALIBRATION: - offset = iwl_eeprom_query16(shrd, EEPROM_LINK_CALIBRATION); + offset = iwl_eeprom_query16(priv, EEPROM_LINK_CALIBRATION); break; case INDIRECT_PROCESS_ADJST: - offset = iwl_eeprom_query16(shrd, EEPROM_LINK_PROCESS_ADJST); + offset = iwl_eeprom_query16(priv, EEPROM_LINK_PROCESS_ADJST); break; case INDIRECT_OTHERS: - offset = iwl_eeprom_query16(shrd, EEPROM_LINK_OTHERS); + offset = iwl_eeprom_query16(priv, EEPROM_LINK_OTHERS); break; default: - IWL_ERR(shrd->trans, "illegal indirect type: 0x%X\n", + IWL_ERR(priv, "illegal indirect type: 0x%X\n", address & INDIRECT_TYPE_MSK); break; } @@ -148,11 +147,11 @@ static u32 eeprom_indirect_address(const struct iwl_shared *shrd, u32 address) return (address & ADDRESS_MSK) + (offset << 1); } -const u8 *iwl_eeprom_query_addr(const struct iwl_shared *shrd, size_t offset) +const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset) { - u32 address = eeprom_indirect_address(shrd, offset); - BUG_ON(address >= shrd->cfg->base_params->eeprom_size); - return &shrd->eeprom[address]; + u32 address = eeprom_indirect_address(priv, offset); + BUG_ON(address >= priv->cfg->base_params->eeprom_size); + return &priv->eeprom[address]; } struct iwl_mod_params iwlagn_mod_params = { @@ -233,7 +232,7 @@ int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control) IWL_PAN_SCD_BK_MSK | IWL_PAN_SCD_MGMT_MSK | IWL_PAN_SCD_MULTICAST_MSK; - if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE) + if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE) flush_cmd.fifo_control |= IWL_AGG_TX_QUEUE_MSK; IWL_DEBUG_INFO(priv, "fifo queue control: 0X%x\n", @@ -375,15 +374,15 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv) BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) != sizeof(basic.bt3_lookup_table)); - if (cfg(priv)->bt_params) { - if (cfg(priv)->bt_params->bt_session_2) { + if (priv->cfg->bt_params) { + if (priv->cfg->bt_params->bt_session_2) { bt_cmd_2000.prio_boost = cpu_to_le32( - cfg(priv)->bt_params->bt_prio_boost); + priv->cfg->bt_params->bt_prio_boost); bt_cmd_2000.tx_prio_boost = 0; bt_cmd_2000.rx_prio_boost = 0; } else { bt_cmd_6000.prio_boost = - cfg(priv)->bt_params->bt_prio_boost; + priv->cfg->bt_params->bt_prio_boost; bt_cmd_6000.tx_prio_boost = 0; bt_cmd_6000.rx_prio_boost = 0; } @@ -431,7 +430,7 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv) priv->bt_full_concurrent ? "full concurrency" : "3-wire"); - if (cfg(priv)->bt_params->bt_session_2) { + if (priv->cfg->bt_params->bt_session_2) { memcpy(&bt_cmd_2000.basic, &basic, sizeof(basic)); ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_BT_CONFIG, @@ -800,8 +799,8 @@ static bool is_single_rx_stream(struct iwl_priv *priv) */ static int iwl_get_active_rx_chain_count(struct iwl_priv *priv) { - if (cfg(priv)->bt_params && - cfg(priv)->bt_params->advanced_bt_coexist && + if (priv->cfg->bt_params && + priv->cfg->bt_params->advanced_bt_coexist && (priv->bt_full_concurrent || priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) { /* @@ -828,7 +827,6 @@ static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt) case IEEE80211_SMPS_STATIC: case IEEE80211_SMPS_DYNAMIC: return IWL_NUM_IDLE_CHAINS_SINGLE; - case IEEE80211_SMPS_AUTOMATIC: case IEEE80211_SMPS_OFF: return active_cnt; default: @@ -872,8 +870,8 @@ void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx) else active_chains = hw_params(priv).valid_rx_ant; - if (cfg(priv)->bt_params && - cfg(priv)->bt_params->advanced_bt_coexist && + if (priv->cfg->bt_params && + priv->cfg->bt_params->advanced_bt_coexist && (priv->bt_full_concurrent || priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) { /* @@ -935,359 +933,53 @@ u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid) return ant; } -#ifdef CONFIG_PM_SLEEP -static void iwlagn_convert_p1k(u16 *p1k, __le16 *out) +/* notification wait support */ +void iwlagn_init_notification_wait(struct iwl_priv *priv, + struct iwl_notification_wait *wait_entry, + u8 cmd, + void (*fn)(struct iwl_priv *priv, + struct iwl_rx_packet *pkt, + void *data), + void *fn_data) { - int i; - - for (i = 0; i < IWLAGN_P1K_SIZE; i++) - out[i] = cpu_to_le16(p1k[i]); + wait_entry->fn = fn; + wait_entry->fn_data = fn_data; + wait_entry->cmd = cmd; + wait_entry->triggered = false; + wait_entry->aborted = false; + + spin_lock_bh(&priv->notif_wait_lock); + list_add(&wait_entry->list, &priv->notif_waits); + spin_unlock_bh(&priv->notif_wait_lock); } -struct wowlan_key_data { - struct iwl_rxon_context *ctx; - struct iwlagn_wowlan_rsc_tsc_params_cmd *rsc_tsc; - struct iwlagn_wowlan_tkip_params_cmd *tkip; - const u8 *bssid; - bool error, use_rsc_tsc, use_tkip; -}; - - -static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct ieee80211_key_conf *key, - void *_data) +int iwlagn_wait_notification(struct iwl_priv *priv, + struct iwl_notification_wait *wait_entry, + unsigned long timeout) { - struct iwl_priv *priv = hw->priv; - struct wowlan_key_data *data = _data; - struct iwl_rxon_context *ctx = data->ctx; - struct aes_sc *aes_sc, *aes_tx_sc = NULL; - struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL; - struct iwlagn_p1k_cache *rx_p1ks; - u8 *rx_mic_key; - struct ieee80211_key_seq seq; - u32 cur_rx_iv32 = 0; - u16 p1k[IWLAGN_P1K_SIZE]; - int ret, i; - - mutex_lock(&priv->shrd->mutex); - - if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 || - key->cipher == WLAN_CIPHER_SUITE_WEP104) && - !sta && !ctx->key_mapping_keys) - ret = iwl_set_default_wep_key(priv, ctx, key); - else - ret = iwl_set_dynamic_key(priv, ctx, key, sta); - - if (ret) { - IWL_ERR(priv, "Error setting key during suspend!\n"); - data->error = true; - } - - switch (key->cipher) { - case WLAN_CIPHER_SUITE_TKIP: - if (sta) { - tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc; - tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc; - - rx_p1ks = data->tkip->rx_uni; - - ieee80211_get_key_tx_seq(key, &seq); - tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16); - tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32); - - ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k); - iwlagn_convert_p1k(p1k, data->tkip->tx.p1k); - - memcpy(data->tkip->mic_keys.tx, - &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], - IWLAGN_MIC_KEY_SIZE); - - rx_mic_key = data->tkip->mic_keys.rx_unicast; - } else { - tkip_sc = - data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc; - rx_p1ks = data->tkip->rx_multi; - rx_mic_key = data->tkip->mic_keys.rx_mcast; - } - - /* - * For non-QoS this relies on the fact that both the uCode and - * mac80211 use TID 0 (as they need to to avoid replay attacks) - * for checking the IV in the frames. - */ - for (i = 0; i < IWLAGN_NUM_RSC; i++) { - ieee80211_get_key_rx_seq(key, i, &seq); - tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16); - tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32); - /* wrapping isn't allowed, AP must rekey */ - if (seq.tkip.iv32 > cur_rx_iv32) - cur_rx_iv32 = seq.tkip.iv32; - } - - ieee80211_get_tkip_rx_p1k(key, data->bssid, cur_rx_iv32, p1k); - iwlagn_convert_p1k(p1k, rx_p1ks[0].p1k); - ieee80211_get_tkip_rx_p1k(key, data->bssid, - cur_rx_iv32 + 1, p1k); - iwlagn_convert_p1k(p1k, rx_p1ks[1].p1k); - - memcpy(rx_mic_key, - &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], - IWLAGN_MIC_KEY_SIZE); - - data->use_tkip = true; - data->use_rsc_tsc = true; - break; - case WLAN_CIPHER_SUITE_CCMP: - if (sta) { - u8 *pn = seq.ccmp.pn; - - aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc; - aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc; - - ieee80211_get_key_tx_seq(key, &seq); - aes_tx_sc->pn = cpu_to_le64( - (u64)pn[5] | - ((u64)pn[4] << 8) | - ((u64)pn[3] << 16) | - ((u64)pn[2] << 24) | - ((u64)pn[1] << 32) | - ((u64)pn[0] << 40)); - } else - aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc; - - /* - * For non-QoS this relies on the fact that both the uCode and - * mac80211 use TID 0 for checking the IV in the frames. - */ - for (i = 0; i < IWLAGN_NUM_RSC; i++) { - u8 *pn = seq.ccmp.pn; - - ieee80211_get_key_rx_seq(key, i, &seq); - aes_sc->pn = cpu_to_le64( - (u64)pn[5] | - ((u64)pn[4] << 8) | - ((u64)pn[3] << 16) | - ((u64)pn[2] << 24) | - ((u64)pn[1] << 32) | - ((u64)pn[0] << 40)); - } - data->use_rsc_tsc = true; - break; - } - - mutex_unlock(&priv->shrd->mutex); -} - -int iwlagn_send_patterns(struct iwl_priv *priv, - struct cfg80211_wowlan *wowlan) -{ - struct iwlagn_wowlan_patterns_cmd *pattern_cmd; - struct iwl_host_cmd cmd = { - .id = REPLY_WOWLAN_PATTERNS, - .dataflags[0] = IWL_HCMD_DFL_NOCOPY, - .flags = CMD_SYNC, - }; - int i, err; - - if (!wowlan->n_patterns) - return 0; - - cmd.len[0] = sizeof(*pattern_cmd) + - wowlan->n_patterns * sizeof(struct iwlagn_wowlan_pattern); - - pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL); - if (!pattern_cmd) - return -ENOMEM; + int ret; - pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns); + ret = wait_event_timeout(priv->notif_waitq, + wait_entry->triggered || wait_entry->aborted, + timeout); - for (i = 0; i < wowlan->n_patterns; i++) { - int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8); + spin_lock_bh(&priv->notif_wait_lock); + list_del(&wait_entry->list); + spin_unlock_bh(&priv->notif_wait_lock); - memcpy(&pattern_cmd->patterns[i].mask, - wowlan->patterns[i].mask, mask_len); - memcpy(&pattern_cmd->patterns[i].pattern, - wowlan->patterns[i].pattern, - wowlan->patterns[i].pattern_len); - pattern_cmd->patterns[i].mask_size = mask_len; - pattern_cmd->patterns[i].pattern_size = - wowlan->patterns[i].pattern_len; - } + if (wait_entry->aborted) + return -EIO; - cmd.data[0] = pattern_cmd; - err = iwl_trans_send_cmd(trans(priv), &cmd); - kfree(pattern_cmd); - return err; + /* return value is always >= 0 */ + if (ret <= 0) + return -ETIMEDOUT; + return 0; } -int iwlagn_suspend(struct iwl_priv *priv, - struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) +void iwlagn_remove_notification(struct iwl_priv *priv, + struct iwl_notification_wait *wait_entry) { - struct iwlagn_wowlan_wakeup_filter_cmd wakeup_filter_cmd; - struct iwl_rxon_cmd rxon; - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - struct iwlagn_wowlan_kek_kck_material_cmd kek_kck_cmd; - struct iwlagn_wowlan_tkip_params_cmd tkip_cmd = {}; - struct iwlagn_d3_config_cmd d3_cfg_cmd = {}; - struct wowlan_key_data key_data = { - .ctx = ctx, - .bssid = ctx->active.bssid_addr, - .use_rsc_tsc = false, - .tkip = &tkip_cmd, - .use_tkip = false, - }; - int ret, i; - u16 seq; - - key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL); - if (!key_data.rsc_tsc) - return -ENOMEM; - - memset(&wakeup_filter_cmd, 0, sizeof(wakeup_filter_cmd)); - - /* - * We know the last used seqno, and the uCode expects to know that - * one, it will increment before TX. - */ - seq = le16_to_cpu(priv->last_seq_ctl) & IEEE80211_SCTL_SEQ; - wakeup_filter_cmd.non_qos_seq = cpu_to_le16(seq); - - /* - * For QoS counters, we store the one to use next, so subtract 0x10 - * since the uCode will add 0x10 before using the value. - */ - for (i = 0; i < IWL_MAX_TID_COUNT; i++) { - seq = priv->tid_data[IWL_AP_ID][i].seq_number; - seq -= 0x10; - wakeup_filter_cmd.qos_seq[i] = cpu_to_le16(seq); - } - - if (wowlan->disconnect) - wakeup_filter_cmd.enabled |= - cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_BEACON_MISS | - IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE); - if (wowlan->magic_pkt) - wakeup_filter_cmd.enabled |= - cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET); - if (wowlan->gtk_rekey_failure) - wakeup_filter_cmd.enabled |= - cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL); - if (wowlan->eap_identity_req) - wakeup_filter_cmd.enabled |= - cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ); - if (wowlan->four_way_handshake) - wakeup_filter_cmd.enabled |= - cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE); - if (wowlan->n_patterns) - wakeup_filter_cmd.enabled |= - cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH); - - if (wowlan->rfkill_release) - d3_cfg_cmd.wakeup_flags |= - cpu_to_le32(IWLAGN_D3_WAKEUP_RFKILL); - - iwl_scan_cancel_timeout(priv, 200); - - memcpy(&rxon, &ctx->active, sizeof(rxon)); - - iwl_trans_stop_device(trans(priv)); - - priv->shrd->wowlan = true; - - ret = iwl_load_ucode_wait_alive(trans(priv), IWL_UCODE_WOWLAN); - if (ret) - goto out; - - /* now configure WoWLAN ucode */ - ret = iwl_alive_start(priv); - if (ret) - goto out; - - memcpy(&ctx->staging, &rxon, sizeof(rxon)); - ret = iwlagn_commit_rxon(priv, ctx); - if (ret) - goto out; - - ret = iwl_power_update_mode(priv, true); - if (ret) - goto out; - - if (!iwlagn_mod_params.sw_crypto) { - /* mark all keys clear */ - priv->ucode_key_table = 0; - ctx->key_mapping_keys = 0; - - /* - * This needs to be unlocked due to lock ordering - * constraints. Since we're in the suspend path - * that isn't really a problem though. - */ - mutex_unlock(&priv->shrd->mutex); - ieee80211_iter_keys(priv->hw, ctx->vif, - iwlagn_wowlan_program_keys, - &key_data); - mutex_lock(&priv->shrd->mutex); - if (key_data.error) { - ret = -EIO; - goto out; - } - - if (key_data.use_rsc_tsc) { - struct iwl_host_cmd rsc_tsc_cmd = { - .id = REPLY_WOWLAN_TSC_RSC_PARAMS, - .flags = CMD_SYNC, - .data[0] = key_data.rsc_tsc, - .dataflags[0] = IWL_HCMD_DFL_NOCOPY, - .len[0] = sizeof(key_data.rsc_tsc), - }; - - ret = iwl_trans_send_cmd(trans(priv), &rsc_tsc_cmd); - if (ret) - goto out; - } - - if (key_data.use_tkip) { - ret = iwl_trans_send_cmd_pdu(trans(priv), - REPLY_WOWLAN_TKIP_PARAMS, - CMD_SYNC, sizeof(tkip_cmd), - &tkip_cmd); - if (ret) - goto out; - } - - if (priv->have_rekey_data) { - memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd)); - memcpy(kek_kck_cmd.kck, priv->kck, NL80211_KCK_LEN); - kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN); - memcpy(kek_kck_cmd.kek, priv->kek, NL80211_KEK_LEN); - kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN); - kek_kck_cmd.replay_ctr = priv->replay_ctr; - - ret = iwl_trans_send_cmd_pdu(trans(priv), - REPLY_WOWLAN_KEK_KCK_MATERIAL, - CMD_SYNC, sizeof(kek_kck_cmd), - &kek_kck_cmd); - if (ret) - goto out; - } - } - - ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_D3_CONFIG, CMD_SYNC, - sizeof(d3_cfg_cmd), &d3_cfg_cmd); - if (ret) - goto out; - - ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_WOWLAN_WAKEUP_FILTER, - CMD_SYNC, sizeof(wakeup_filter_cmd), - &wakeup_filter_cmd); - if (ret) - goto out; - - ret = iwlagn_send_patterns(priv, wowlan); - out: - kfree(key_data.rsc_tsc); - return ret; + spin_lock_bh(&priv->notif_wait_lock); + list_del(&wait_entry->list); + spin_unlock_bh(&priv->notif_wait_lock); } -#endif diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-rs.c index 334b5ae8fdd4..66118cea2af3 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-rs.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-rs.c @@ -298,7 +298,7 @@ static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data, } else return IWL_MAX_TID_COUNT; - if (unlikely(tid >= IWL_MAX_TID_COUNT)) + if (unlikely(tid >= TID_MAX_LOAD_COUNT)) return IWL_MAX_TID_COUNT; tl = &lq_data->load[tid]; @@ -352,7 +352,7 @@ static void rs_program_fix_rate(struct iwl_priv *priv, lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ -#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE +#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL /* testmode has higher priority to overwirte the fixed rate */ if (priv->tm_fixed_rate) lq_sta->dbg_fixed_rate = priv->tm_fixed_rate; @@ -379,7 +379,7 @@ static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid) s32 index; struct iwl_traffic_load *tl = NULL; - if (tid >= IWL_MAX_TID_COUNT) + if (tid >= TID_MAX_LOAD_COUNT) return 0; tl = &(lq_data->load[tid]); @@ -444,11 +444,11 @@ static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid, struct iwl_lq_sta *lq_data, struct ieee80211_sta *sta) { - if (tid < IWL_MAX_TID_COUNT) + if (tid < TID_MAX_LOAD_COUNT) rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta); else - IWL_ERR(priv, "tid exceeds max TID count: %d/%d\n", - tid, IWL_MAX_TID_COUNT); + IWL_ERR(priv, "tid exceeds max load count: %d/%d\n", + tid, TID_MAX_LOAD_COUNT); } static inline int get_num_of_ant_from_rate(u32 rate_n_flags) @@ -1081,12 +1081,12 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband, if (sta && sta->supp_rates[sband->band]) rs_rate_scale_perform(priv, skb, sta, lq_sta); -#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_IWLWIFI_DEVICE_TESTMODE) +#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_IWLWIFI_DEVICE_SVTOOL) if ((priv->tm_fixed_rate) && (priv->tm_fixed_rate != lq_sta->dbg_fixed_rate)) rs_program_fix_rate(priv, lq_sta); #endif - if (cfg(priv)->bt_params && cfg(priv)->bt_params->advanced_bt_coexist) + if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist) rs_bt_update_lq(priv, ctx, lq_sta); } @@ -1458,8 +1458,10 @@ static int rs_move_legacy_other(struct iwl_priv *priv, break; case IWL_BT_COEX_TRAFFIC_LOAD_LOW: /* avoid antenna B unless MIMO */ + valid_tx_ant = + first_antenna(hw_params(priv).valid_tx_ant); if (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2) - tbl->action = IWL_LEGACY_SWITCH_SISO; + tbl->action = IWL_LEGACY_SWITCH_ANTENNA1; break; case IWL_BT_COEX_TRAFFIC_LOAD_HIGH: case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: @@ -1634,8 +1636,10 @@ static int rs_move_siso_to_other(struct iwl_priv *priv, break; case IWL_BT_COEX_TRAFFIC_LOAD_LOW: /* avoid antenna B unless MIMO */ + valid_tx_ant = + first_antenna(hw_params(priv).valid_tx_ant); if (tbl->action == IWL_SISO_SWITCH_ANTENNA2) - tbl->action = IWL_SISO_SWITCH_MIMO2_AB; + tbl->action = IWL_SISO_SWITCH_ANTENNA1; break; case IWL_BT_COEX_TRAFFIC_LOAD_HIGH: case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: @@ -2273,7 +2277,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv, tid = rs_tl_add_packet(lq_sta, hdr); if ((tid != IWL_MAX_TID_COUNT) && (lq_sta->tx_agg_tid_en & (1 << tid))) { - tid_data = &priv->tid_data[lq_sta->lq.sta_id][tid]; + tid_data = &priv->shrd->tid_data[lq_sta->lq.sta_id][tid]; if (tid_data->agg.state == IWL_AGG_OFF) lq_sta->is_agg = 0; else @@ -2645,7 +2649,8 @@ static void rs_rate_scale_perform(struct iwl_priv *priv, (lq_sta->tx_agg_tid_en & (1 << tid)) && (tid != IWL_MAX_TID_COUNT)) { u8 sta_id = lq_sta->lq.sta_id; - tid_data = &priv->tid_data[sta_id][tid]; + tid_data = + &priv->shrd->tid_data[sta_id][tid]; if (tid_data->agg.state == IWL_AGG_OFF) { IWL_DEBUG_RATE(priv, "try to aggregate tid %d\n", @@ -2903,7 +2908,7 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i if (sband->band == IEEE80211_BAND_5GHZ) lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE; lq_sta->is_agg = 0; -#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE +#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL priv->tm_fixed_rate = 0; #endif #ifdef CONFIG_MAC80211_DEBUGFS @@ -3054,11 +3059,11 @@ static void rs_fill_link_cmd(struct iwl_priv *priv, * overwrite if needed, pass aggregation time limit * to uCode in uSec */ - if (priv && cfg(priv)->bt_params && - cfg(priv)->bt_params->agg_time_limit && + if (priv && priv->cfg->bt_params && + priv->cfg->bt_params->agg_time_limit && priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) lq_cmd->agg_params.agg_time_limit = - cpu_to_le16(cfg(priv)->bt_params->agg_time_limit); + cpu_to_le16(priv->cfg->bt_params->agg_time_limit); } static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-rs.h index 6675b3c816d9..f4f6deb829ae 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-rs.h +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-rs.h @@ -281,6 +281,7 @@ enum { #define TID_QUEUE_CELL_SPACING 50 /*mS */ #define TID_QUEUE_MAX_SIZE 20 #define TID_ROUND_VALUE 5 /* mS */ +#define TID_MAX_LOAD_COUNT 8 #define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING) #define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y)) @@ -401,7 +402,7 @@ struct iwl_lq_sta { struct iwl_link_quality_cmd lq; struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */ - struct iwl_traffic_load load[IWL_MAX_TID_COUNT]; + struct iwl_traffic_load load[TID_MAX_LOAD_COUNT]; u8 tx_agg_tid_en; #ifdef CONFIG_MAC80211_DEBUGFS struct dentry *rs_sta_dbgfs_scale_table_file; diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-rx.c b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-rx.c index b22b2976f899..5af9e6258a16 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-rx.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-rx.c @@ -117,7 +117,6 @@ const char *get_cmd_string(u8 cmd) IWL_CMD(REPLY_WOWLAN_TKIP_PARAMS); IWL_CMD(REPLY_WOWLAN_KEK_KCK_MATERIAL); IWL_CMD(REPLY_WOWLAN_GET_STATUS); - IWL_CMD(REPLY_D3_CONFIG); default: return "UNKNOWN"; @@ -318,7 +317,7 @@ static bool iwlagn_good_plcp_health(struct iwl_priv *priv, unsigned int msecs) { int delta; - int threshold = cfg(priv)->base_params->plcp_delta_threshold; + int threshold = priv->cfg->base_params->plcp_delta_threshold; if (threshold == IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) { IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n"); @@ -583,8 +582,8 @@ static int iwlagn_rx_statistics(struct iwl_priv *priv, iwlagn_rx_calc_noise(priv); queue_work(priv->shrd->workqueue, &priv->run_time_calib_work); } - if (cfg(priv)->lib->temperature && change) - cfg(priv)->lib->temperature(priv); + if (priv->cfg->lib->temperature && change) + priv->cfg->lib->temperature(priv); return 0; } @@ -801,8 +800,7 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv, ctx->active.bssid_addr)) continue; ctx->last_tx_rejected = false; - iwl_trans_wake_any_queue(trans(priv), ctx->ctxid, - "channel got active"); + iwl_trans_wake_any_queue(trans(priv), ctx->ctxid); } } @@ -1034,50 +1032,6 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv, return 0; } -static int iwlagn_rx_noa_notification(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb, - struct iwl_device_cmd *cmd) -{ - struct iwl_wipan_noa_data *new_data, *old_data; - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_wipan_noa_notification *noa_notif = (void *)pkt->u.raw; - - /* no condition -- we're in softirq */ - old_data = rcu_dereference_protected(priv->noa_data, true); - - if (noa_notif->noa_active) { - u32 len = le16_to_cpu(noa_notif->noa_attribute.length); - u32 copylen = len; - - /* EID, len, OUI, subtype */ - len += 1 + 1 + 3 + 1; - /* P2P id, P2P length */ - len += 1 + 2; - copylen += 1 + 2; - - new_data = kmalloc(sizeof(*new_data) + len, GFP_ATOMIC); - if (new_data) { - new_data->length = len; - new_data->data[0] = WLAN_EID_VENDOR_SPECIFIC; - new_data->data[1] = len - 2; /* not counting EID, len */ - new_data->data[2] = (WLAN_OUI_WFA >> 16) & 0xff; - new_data->data[3] = (WLAN_OUI_WFA >> 8) & 0xff; - new_data->data[4] = (WLAN_OUI_WFA >> 0) & 0xff; - new_data->data[5] = WLAN_OUI_TYPE_WFA_P2P; - memcpy(&new_data->data[6], &noa_notif->noa_attribute, - copylen); - } - } else - new_data = NULL; - - rcu_assign_pointer(priv->noa_data, new_data); - - if (old_data) - kfree_rcu(old_data, rcu_head); - - return 0; -} - /** * iwl_setup_rx_handlers - Initialize Rx handler callbacks * @@ -1101,8 +1055,6 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv) handlers[BEACON_NOTIFICATION] = iwlagn_rx_beacon_notif; handlers[REPLY_ADD_STA] = iwl_add_sta_callback; - handlers[REPLY_WIPAN_NOA_NOTIFICATION] = iwlagn_rx_noa_notification; - /* * The same handler is used for both the REPLY to a discrete * statistics request from the host as well as for the periodic @@ -1131,13 +1083,13 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv) priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx; /* set up notification wait support */ - spin_lock_init(&priv->shrd->notif_wait_lock); - INIT_LIST_HEAD(&priv->shrd->notif_waits); - init_waitqueue_head(&priv->shrd->notif_waitq); + spin_lock_init(&priv->notif_wait_lock); + INIT_LIST_HEAD(&priv->notif_waits); + init_waitqueue_head(&priv->notif_waitq); /* Set up BT Rx handlers */ - if (cfg(priv)->lib->bt_rx_handler_setup) - cfg(priv)->lib->bt_rx_handler_setup(priv); + if (priv->cfg->lib->bt_rx_handler_setup) + priv->cfg->lib->bt_rx_handler_setup(priv); } @@ -1152,11 +1104,11 @@ int iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb, * even if the RX handler consumes the RXB we have * access to it in the notification wait entry. */ - if (!list_empty(&priv->shrd->notif_waits)) { + if (!list_empty(&priv->notif_waits)) { struct iwl_notification_wait *w; - spin_lock(&priv->shrd->notif_wait_lock); - list_for_each_entry(w, &priv->shrd->notif_waits, list) { + spin_lock(&priv->notif_wait_lock); + list_for_each_entry(w, &priv->notif_waits, list) { if (w->cmd != pkt->hdr.cmd) continue; IWL_DEBUG_RX(priv, @@ -1165,11 +1117,11 @@ int iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb, pkt->hdr.cmd); w->triggered = true; if (w->fn) - w->fn(trans(priv), pkt, w->fn_data); + w->fn(priv, pkt, w->fn_data); } - spin_unlock(&priv->shrd->notif_wait_lock); + spin_unlock(&priv->notif_wait_lock); - wake_up_all(&priv->shrd->notif_waitq); + wake_up_all(&priv->notif_waitq); } if (priv->pre_rx_handler) diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c index 1c6659416621..5c7c17c7166a 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c @@ -45,8 +45,7 @@ static int iwlagn_disable_bss(struct iwl_priv *priv, send->filter_flags = old_filter; if (ret) - IWL_DEBUG_QUIET_RFKILL(priv, - "Error clearing ASSOC_MSK on BSS (%d)\n", ret); + IWL_ERR(priv, "Error clearing ASSOC_MSK on BSS (%d)\n", ret); return ret; } @@ -60,7 +59,7 @@ static int iwlagn_disable_pan(struct iwl_priv *priv, u8 old_dev_type = send->dev_type; int ret; - iwl_init_notification_wait(priv->shrd, &disable_wait, + iwlagn_init_notification_wait(priv, &disable_wait, REPLY_WIPAN_DEACTIVATION_COMPLETE, NULL, NULL); @@ -74,9 +73,9 @@ static int iwlagn_disable_pan(struct iwl_priv *priv, if (ret) { IWL_ERR(priv, "Error disabling PAN (%d)\n", ret); - iwl_remove_notification(priv->shrd, &disable_wait); + iwlagn_remove_notification(priv, &disable_wait); } else { - ret = iwl_wait_notification(priv->shrd, &disable_wait, HZ); + ret = iwlagn_wait_notification(priv, &disable_wait, HZ); if (ret) IWL_ERR(priv, "Timed out waiting for PAN disable\n"); } @@ -117,7 +116,7 @@ static void iwlagn_update_qos(struct iwl_priv *priv, if (ctx->ht.enabled) ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK; - IWL_DEBUG_INFO(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n", + IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n", ctx->qos_data.qos_active, ctx->qos_data.def_qos_parm.qos_flags); @@ -125,7 +124,7 @@ static void iwlagn_update_qos(struct iwl_priv *priv, sizeof(struct iwl_qosparam_cmd), &ctx->qos_data.def_qos_parm); if (ret) - IWL_DEBUG_QUIET_RFKILL(priv, "Failed to update QoS\n"); + IWL_ERR(priv, "Failed to update QoS\n"); } static int iwlagn_update_beacon(struct iwl_priv *priv, @@ -296,9 +295,9 @@ static int iwlagn_rxon_connect(struct iwl_priv *priv, } if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION && - cfg(priv)->ht_params && cfg(priv)->ht_params->smps_mode) + priv->cfg->ht_params && priv->cfg->ht_params->smps_mode) ieee80211_request_smps(ctx->vif, - cfg(priv)->ht_params->smps_mode); + priv->cfg->ht_params->smps_mode); return 0; } @@ -445,8 +444,8 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) * force CTS-to-self frames protection if RTS-CTS is not preferred * one aggregation protection method */ - if (!(cfg(priv)->ht_params && - cfg(priv)->ht_params->use_rts_for_aggregation)) + if (!(priv->cfg->ht_params && + priv->cfg->ht_params->use_rts_for_aggregation)) ctx->staging.flags |= RXON_FLG_SELF_CTS_EN; if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) || @@ -560,9 +559,6 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed) mutex_lock(&priv->shrd->mutex); - if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) - goto out; - if (unlikely(test_bit(STATUS_SCANNING, &priv->shrd->status))) { IWL_DEBUG_MAC80211(priv, "leave - scanning\n"); goto out; @@ -854,8 +850,7 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw, if (ctx->last_tx_rejected) { ctx->last_tx_rejected = false; iwl_trans_wake_any_queue(trans(priv), - ctx->ctxid, - "Disassoc: flush queue"); + ctx->ctxid); } ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-sta.c index 7353826095f1..4b2aa1da0953 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-sta.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-sta.c @@ -130,15 +130,25 @@ int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb, return iwl_process_add_sta_resp(priv, addsta, pkt); } +static u16 iwlagn_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data) +{ + u16 size = (u16)sizeof(struct iwl_addsta_cmd); + struct iwl_addsta_cmd *addsta = (struct iwl_addsta_cmd *)data; + memcpy(addsta, cmd, size); + /* resrved in 5000 */ + addsta->rate_n_flags = cpu_to_le16(0); + return size; +} + int iwl_send_add_sta(struct iwl_priv *priv, struct iwl_addsta_cmd *sta, u8 flags) { int ret = 0; + u8 data[sizeof(*sta)]; struct iwl_host_cmd cmd = { .id = REPLY_ADD_STA, .flags = flags, - .data = { sta, }, - .len = { sizeof(*sta), }, + .data = { data, }, }; u8 sta_id __maybe_unused = sta->sta.sta_id; @@ -150,6 +160,7 @@ int iwl_send_add_sta(struct iwl_priv *priv, might_sleep(); } + cmd.len[0] = iwlagn_build_addsta_hcmd(sta, data); ret = iwl_trans_send_cmd(trans(priv), &cmd); if (ret || (flags & CMD_ASYNC)) @@ -452,7 +463,6 @@ int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id, const u8 *addr) { unsigned long flags; - u8 tid; if (!iwl_is_ready(priv->shrd)) { IWL_DEBUG_INFO(priv, @@ -491,10 +501,6 @@ int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id, priv->stations[sta_id].lq = NULL; } - for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) - memset(&priv->tid_data[sta_id][tid], 0, - sizeof(priv->tid_data[sta_id][tid])); - priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE; priv->num_stations--; @@ -641,7 +647,7 @@ void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx) int ret; struct iwl_addsta_cmd sta_cmd; struct iwl_link_quality_cmd lq; - bool active, have_lq = false; + bool active; spin_lock_irqsave(&priv->shrd->sta_lock, flags); if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) { @@ -651,10 +657,7 @@ void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx) memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd)); sta_cmd.mode = 0; - if (priv->stations[sta_id].lq) { - memcpy(&lq, priv->stations[sta_id].lq, sizeof(lq)); - have_lq = true; - } + memcpy(&lq, priv->stations[sta_id].lq, sizeof(lq)); active = priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE; priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE; @@ -676,8 +679,7 @@ void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx) if (ret) IWL_ERR(priv, "failed to re-add STA %pM (%d)\n", priv->stations[sta_id].sta.sta.addr, ret); - if (have_lq) - iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true); + iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true); } int iwl_get_free_ucode_key_offset(struct iwl_priv *priv) @@ -823,6 +825,28 @@ int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx, return ret; } +int iwlagn_mac_sta_remove(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; + int ret; + + IWL_DEBUG_MAC80211(priv, "enter: received request to remove " + "station %pM\n", sta->addr); + mutex_lock(&priv->shrd->mutex); + IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n", + sta->addr); + ret = iwl_remove_station(priv, sta_priv->sta_id, sta->addr); + if (ret) + IWL_ERR(priv, "Error removing station %pM\n", + sta->addr); + mutex_unlock(&priv->shrd->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); + + return ret; +} void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx, u8 sta_id, struct iwl_link_quality_cmd *link_cmd) @@ -1435,7 +1459,20 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta, return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); } +static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id) +{ + unsigned long flags; + spin_lock_irqsave(&priv->shrd->sta_lock, flags); + priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK; + priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK; + priv->stations[sta_id].sta.sta.modify_mask = 0; + priv->stations[sta_id].sta.sleep_tx_count = 0; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); + spin_unlock_irqrestore(&priv->shrd->sta_lock, flags); + +} void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt) { @@ -1452,3 +1489,36 @@ void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt) spin_unlock_irqrestore(&priv->shrd->sta_lock, flags); } + +void iwlagn_mac_sta_notify(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum sta_notify_cmd cmd, + struct ieee80211_sta *sta) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; + int sta_id; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + switch (cmd) { + case STA_NOTIFY_SLEEP: + WARN_ON(!sta_priv->client); + sta_priv->asleep = true; + if (atomic_read(&sta_priv->pending_frames) > 0) + ieee80211_sta_block_awake(hw, sta, true); + break; + case STA_NOTIFY_AWAKE: + WARN_ON(!sta_priv->client); + if (!sta_priv->asleep) + break; + sta_priv->asleep = false; + sta_id = iwl_sta_id(sta); + if (sta_id != IWL_INVALID_STATION) + iwl_sta_modify_ps_wake(priv, sta_id); + break; + default: + break; + } + IWL_DEBUG_MAC80211(priv, "leave\n"); +} diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-tt.c b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-tt.c index b0dff7a753a5..c27180a73351 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-tt.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-tt.c @@ -633,7 +633,7 @@ void iwl_tt_initialize(struct iwl_priv *priv) INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter); INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit); - if (cfg(priv)->base_params->adv_thermal_throttle) { + if (priv->cfg->base_params->adv_thermal_throttle) { IWL_DEBUG_TEMP(priv, "Advanced Thermal Throttling\n"); tt->restriction = kcalloc(IWL_TI_STATE_MAX, sizeof(struct iwl_tt_restriction), diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-tx.c index c664c2726553..df1540ca6102 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-tx.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-tx.c @@ -74,8 +74,8 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv, else if (ieee80211_is_back_req(fc)) tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; else if (info->band == IEEE80211_BAND_2GHZ && - cfg(priv)->bt_params && - cfg(priv)->bt_params->advanced_bt_coexist && + priv->cfg->bt_params && + priv->cfg->bt_params->advanced_bt_coexist && (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc) || skb->protocol == cpu_to_be16(ETH_P_PAE))) @@ -151,7 +151,7 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv, if (ieee80211_is_data(fc)) { tx_cmd->initial_rate_index = 0; tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; -#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE +#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL if (priv->tm_fixed_rate) { /* * rate overwrite by testmode @@ -164,8 +164,7 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv, } #endif return; - } else if (ieee80211_is_back_req(fc)) - tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; + } /** * If the current TX rate stored in mac80211 has the MCS bit set, it's @@ -191,8 +190,8 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv, rate_flags |= RATE_MCS_CCK_MSK; /* Set up antennas */ - if (cfg(priv)->bt_params && - cfg(priv)->bt_params->advanced_bt_coexist && + if (priv->cfg->bt_params && + priv->cfg->bt_params->advanced_bt_coexist && priv->bt_full_concurrent) { /* operated as 1x1 in full concurrency mode */ priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, @@ -262,8 +261,8 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) __le16 fc; u8 hdr_len; - u16 len, seq_number = 0; - u8 sta_id, tid = IWL_MAX_TID_COUNT; + u16 len; + u8 sta_id; unsigned long flags; bool is_agg = false; @@ -287,19 +286,6 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) IWL_DEBUG_TX(priv, "Sending REASSOC frame\n"); #endif - if (unlikely(ieee80211_is_probe_resp(fc))) { - struct iwl_wipan_noa_data *noa_data = - rcu_dereference(priv->noa_data); - - if (noa_data && - pskb_expand_head(skb, 0, noa_data->length, - GFP_ATOMIC) == 0) { - memcpy(skb_put(skb, noa_data->length), - noa_data->data, noa_data->length); - hdr = (struct ieee80211_hdr *)skb->data; - } - } - hdr_len = ieee80211_hdrlen(fc); /* For management frames use broadcast id to do not break aggregation */ @@ -368,51 +354,9 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) info->driver_data[0] = ctx; info->driver_data[1] = dev_cmd; - if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) { - u8 *qc = NULL; - struct iwl_tid_data *tid_data; - qc = ieee80211_get_qos_ctl(hdr); - tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; - if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) - goto drop_unlock_sta; - tid_data = &priv->tid_data[sta_id][tid]; - - /* aggregation is on for this */ - if (info->flags & IEEE80211_TX_CTL_AMPDU && - tid_data->agg.state != IWL_AGG_ON) { - IWL_ERR(priv, "TX_CTL_AMPDU while not in AGG:" - " Tx flags = 0x%08x, agg.state = %d", - info->flags, tid_data->agg.state); - IWL_ERR(priv, "sta_id = %d, tid = %d seq_num = %d", - sta_id, tid, SEQ_TO_SN(tid_data->seq_number)); - goto drop_unlock_sta; - } - - /* We can receive packets from the stack in IWL_AGG_{ON,OFF} - * only. Check this here. - */ - if (WARN_ONCE(tid_data->agg.state != IWL_AGG_ON && - tid_data->agg.state != IWL_AGG_OFF, - "Tx while agg.state = %d", tid_data->agg.state)) - goto drop_unlock_sta; - - seq_number = tid_data->seq_number; - seq_number &= IEEE80211_SCTL_SEQ; - hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); - hdr->seq_ctrl |= cpu_to_le16(seq_number); - seq_number += 0x10; - } - - /* Copy MAC header from skb into command buffer */ - memcpy(tx_cmd->hdr, hdr, hdr_len); - - if (iwl_trans_tx(trans(priv), skb, dev_cmd, ctx->ctxid, sta_id, tid)) + if (iwl_trans_tx(trans(priv), skb, dev_cmd, ctx->ctxid, sta_id)) goto drop_unlock_sta; - if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc) && - !ieee80211_has_morefrags(fc)) - priv->tid_data[sta_id][tid].seq_number = seq_number; - spin_unlock(&priv->shrd->sta_lock); spin_unlock_irqrestore(&priv->shrd->lock, flags); @@ -437,81 +381,10 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) return -1; } -int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u16 tid) -{ - struct iwl_tid_data *tid_data; - unsigned long flags; - int sta_id; - - sta_id = iwl_sta_id(sta); - - if (sta_id == IWL_INVALID_STATION) { - IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid); - return -ENXIO; - } - - spin_lock_irqsave(&priv->shrd->sta_lock, flags); - - tid_data = &priv->tid_data[sta_id][tid]; - - switch (priv->tid_data[sta_id][tid].agg.state) { - case IWL_EMPTYING_HW_QUEUE_ADDBA: - /* - * This can happen if the peer stops aggregation - * again before we've had a chance to drain the - * queue we selected previously, i.e. before the - * session was really started completely. - */ - IWL_DEBUG_HT(priv, "AGG stop before setup done\n"); - goto turn_off; - case IWL_AGG_ON: - break; - default: - IWL_WARN(priv, "Stopping AGG while state not ON " - "or starting for %d on %d (%d)\n", sta_id, tid, - priv->tid_data[sta_id][tid].agg.state); - spin_unlock_irqrestore(&priv->shrd->sta_lock, flags); - return 0; - } - - tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number); - - /* There are still packets for this RA / TID in the HW */ - if (tid_data->agg.ssn != tid_data->next_reclaimed) { - IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, " - "next_recl = %d", - tid_data->agg.ssn, - tid_data->next_reclaimed); - priv->tid_data[sta_id][tid].agg.state = - IWL_EMPTYING_HW_QUEUE_DELBA; - spin_unlock_irqrestore(&priv->shrd->sta_lock, flags); - return 0; - } - - IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d", - tid_data->agg.ssn); -turn_off: - priv->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF; - - /* do not restore/save irqs */ - spin_unlock(&priv->shrd->sta_lock); - spin_lock(&priv->shrd->lock); - - iwl_trans_tx_agg_disable(trans(priv), sta_id, tid); - - spin_unlock_irqrestore(&priv->shrd->lock, flags); - - ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); - - return 0; -} - int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid, u16 *ssn) { - struct iwl_tid_data *tid_data; - unsigned long flags; + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; int sta_id; int ret; @@ -526,7 +399,7 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, if (unlikely(tid >= IWL_MAX_TID_COUNT)) return -EINVAL; - if (priv->tid_data[sta_id][tid].agg.state != IWL_AGG_OFF) { + if (priv->shrd->tid_data[sta_id][tid].agg.state != IWL_AGG_OFF) { IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n"); return -ENXIO; } @@ -535,136 +408,27 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, if (ret) return ret; - spin_lock_irqsave(&priv->shrd->sta_lock, flags); - - tid_data = &priv->tid_data[sta_id][tid]; - tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number); - - *ssn = tid_data->agg.ssn; - - ret = iwl_trans_tx_agg_alloc(trans(priv), sta_id, tid); - if (ret) { - spin_unlock_irqrestore(&priv->shrd->sta_lock, flags); - return ret; - } - - if (*ssn == tid_data->next_reclaimed) { - IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d", - tid_data->agg.ssn); - tid_data->agg.state = IWL_AGG_ON; - ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); - } else { - IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, " - "next_reclaimed = %d", - tid_data->agg.ssn, - tid_data->next_reclaimed); - tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA; - } - - spin_unlock_irqrestore(&priv->shrd->sta_lock, flags); + ret = iwl_trans_tx_agg_alloc(trans(priv), vif_priv->ctx->ctxid, sta_id, + tid, ssn); return ret; } -int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u16 tid, u8 buf_size) +int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid) { - struct iwl_station_priv *sta_priv = (void *) sta->drv_priv; - struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); - unsigned long flags; - u16 ssn; - - buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF); - - spin_lock_irqsave(&priv->shrd->sta_lock, flags); - ssn = priv->tid_data[sta_priv->sta_id][tid].agg.ssn; - spin_unlock_irqrestore(&priv->shrd->sta_lock, flags); - - iwl_trans_tx_agg_setup(trans(priv), ctx->ctxid, sta_priv->sta_id, tid, - buf_size, ssn); - - /* - * If the limit is 0, then it wasn't initialised yet, - * use the default. We can do that since we take the - * minimum below, and we don't want to go above our - * default due to hardware restrictions. - */ - if (sta_priv->max_agg_bufsize == 0) - sta_priv->max_agg_bufsize = - LINK_QUAL_AGG_FRAME_LIMIT_DEF; - - /* - * Even though in theory the peer could have different - * aggregation reorder buffer sizes for different sessions, - * our ucode doesn't allow for that and has a global limit - * for each station. Therefore, use the minimum of all the - * aggregation sessions and our default value. - */ - sta_priv->max_agg_bufsize = - min(sta_priv->max_agg_bufsize, buf_size); + int sta_id; + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; - if (cfg(priv)->ht_params && - cfg(priv)->ht_params->use_rts_for_aggregation) { - /* - * switch to RTS/CTS if it is the prefer protection - * method for HT traffic - */ + sta_id = iwl_sta_id(sta); - sta_priv->lq_sta.lq.general_params.flags |= - LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK; + if (sta_id == IWL_INVALID_STATION) { + IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid); + return -ENXIO; } - priv->agg_tids_count++; - IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n", - priv->agg_tids_count); - - sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit = - sta_priv->max_agg_bufsize; - IWL_INFO(priv, "Tx aggregation enabled on ra = %pM tid = %d\n", - sta->addr, tid); - - return iwl_send_lq_cmd(priv, ctx, - &sta_priv->lq_sta.lq, CMD_ASYNC, false); -} - -static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid) -{ - struct iwl_tid_data *tid_data = &priv->tid_data[sta_id][tid]; - enum iwl_rxon_context_id ctx; - struct ieee80211_vif *vif; - u8 *addr; - - lockdep_assert_held(&priv->shrd->sta_lock); - - addr = priv->stations[sta_id].sta.sta.addr; - ctx = priv->stations[sta_id].ctxid; - vif = priv->contexts[ctx].vif; - - switch (priv->tid_data[sta_id][tid].agg.state) { - case IWL_EMPTYING_HW_QUEUE_DELBA: - /* There are no packets for this RA / TID in the HW any more */ - if (tid_data->agg.ssn == tid_data->next_reclaimed) { - IWL_DEBUG_TX_QUEUES(priv, - "Can continue DELBA flow ssn = next_recl =" - " %d", tid_data->next_reclaimed); - iwl_trans_tx_agg_disable(trans(priv), sta_id, tid); - tid_data->agg.state = IWL_AGG_OFF; - ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid); - } - break; - case IWL_EMPTYING_HW_QUEUE_ADDBA: - /* There are no packets for this RA / TID in the HW any more */ - if (tid_data->agg.ssn == tid_data->next_reclaimed) { - IWL_DEBUG_TX_QUEUES(priv, - "Can continue ADDBA flow ssn = next_recl =" - " %d", tid_data->next_reclaimed); - tid_data->agg.state = IWL_AGG_ON; - ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid); - } - break; - default: - break; - } + return iwl_trans_tx_agg_disable(trans(priv), vif_priv->ctx->ctxid, + sta_id, tid); } static void iwlagn_non_agg_tx_status(struct iwl_priv *priv, @@ -804,7 +568,7 @@ static void iwl_rx_reply_tx_agg(struct iwl_priv *priv, IWLAGN_TX_RES_TID_POS; int sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >> IWLAGN_TX_RES_RA_POS; - struct iwl_ht_agg *agg = &priv->tid_data[sta_id][tid].agg; + struct iwl_ht_agg *agg = &priv->shrd->tid_data[sta_id][tid].agg; u32 status = le16_to_cpu(tx_resp->status.status); int i; @@ -820,8 +584,8 @@ static void iwl_rx_reply_tx_agg(struct iwl_priv *priv, * notification again. */ if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 && - cfg(priv)->bt_params && - cfg(priv)->bt_params->advanced_bt_coexist) { + priv->cfg->bt_params && + priv->cfg->bt_params->advanced_bt_coexist) { IWL_DEBUG_COEX(priv, "receive reply tx w/ bt_kill\n"); } @@ -994,7 +758,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb, struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; struct ieee80211_hdr *hdr; u32 status = le16_to_cpu(tx_resp->status.status); - u16 ssn = iwlagn_get_scd_ssn(tx_resp); + u32 ssn = iwlagn_get_scd_ssn(tx_resp); int tid; int sta_id; int freed; @@ -1016,34 +780,10 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb, iwl_rx_reply_tx_agg(priv, tx_resp); if (tx_resp->frame_count == 1) { - u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl); - next_reclaimed = SEQ_TO_SN(next_reclaimed + 0x10); - - if (is_agg) { - /* If this is an aggregation queue, we can rely on the - * ssn since the wifi sequence number corresponds to - * the index in the TFD ring (%256). - * The seq_ctl is the sequence control of the packet - * to which this Tx response relates. But if there is a - * hole in the bitmap of the BA we received, this Tx - * response may allow to reclaim the hole and all the - * subsequent packets that were already acked. - * In that case, seq_ctl != ssn, and the next packet - * to be reclaimed will be ssn and not seq_ctl. - */ - next_reclaimed = ssn; - } - __skb_queue_head_init(&skbs); - priv->tid_data[sta_id][tid].next_reclaimed = next_reclaimed; - - IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d", - next_reclaimed); - /*we can free until ssn % q.n_bd not inclusive */ - WARN_ON(iwl_trans_reclaim(trans(priv), sta_id, tid, txq_id, - ssn, status, &skbs)); - iwlagn_check_ratid_empty(priv, sta_id, tid); + iwl_trans_reclaim(trans(priv), sta_id, tid, txq_id, + ssn, status, &skbs); freed = 0; while (!skb_queue_empty(&skbs)) { skb = __skb_dequeue(&skbs); @@ -1063,8 +803,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb, iwl_is_associated_ctx(ctx) && ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION) { ctx->last_tx_rejected = true; - iwl_trans_stop_queue(trans(priv), txq_id, - "Tx on passive channel"); + iwl_trans_stop_queue(trans(priv), txq_id); IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) " @@ -1138,24 +877,27 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, sta_id = ba_resp->sta_id; tid = ba_resp->tid; - agg = &priv->tid_data[sta_id][tid].agg; + agg = &priv->shrd->tid_data[sta_id][tid].agg; spin_lock_irqsave(&priv->shrd->sta_lock, flags); - if (unlikely(!agg->wait_for_ba)) { - if (unlikely(ba_resp->bitmap)) - IWL_ERR(priv, "Received BA when not expected\n"); + if (unlikely(agg->txq_id != scd_flow)) { + /* + * FIXME: this is a uCode bug which need to be addressed, + * log the information and return for now! + * since it is possible happen very often and in order + * not to fill the syslog, don't enable the logging by default + */ + IWL_DEBUG_TX_REPLY(priv, + "BA scd_flow %d does not match txq_id %d\n", + scd_flow, agg->txq_id); spin_unlock_irqrestore(&priv->shrd->sta_lock, flags); return 0; } - __skb_queue_head_init(&reclaimed_skbs); - - /* Release all TFDs before the SSN, i.e. all TFDs in front of - * block-ack window (we assume that they've been successfully - * transmitted ... if not, it's too late anyway). */ - if (iwl_trans_reclaim(trans(priv), sta_id, tid, scd_flow, - ba_resp_scd_ssn, 0, &reclaimed_skbs)) { + if (unlikely(!agg->wait_for_ba)) { + if (unlikely(ba_resp->bitmap)) + IWL_ERR(priv, "Received BA when not expected\n"); spin_unlock_irqrestore(&priv->shrd->sta_lock, flags); return 0; } @@ -1167,9 +909,11 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, ba_resp->sta_id); IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, " "scd_flow = %d, scd_ssn = %d\n", - ba_resp->tid, le16_to_cpu(ba_resp->seq_ctl), + ba_resp->tid, + ba_resp->seq_ctl, (unsigned long long)le64_to_cpu(ba_resp->bitmap), - scd_flow, ba_resp_scd_ssn); + ba_resp->scd_flow, + ba_resp->scd_ssn); /* Mark that the expected block-ack response arrived */ agg->wait_for_ba = false; @@ -1188,9 +932,13 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, IWL_DEBUG_HT(priv, "agg frames sent:%d, acked:%d\n", ba_resp->txed, ba_resp->txed_2_done); - priv->tid_data[sta_id][tid].next_reclaimed = ba_resp_scd_ssn; + __skb_queue_head_init(&reclaimed_skbs); - iwlagn_check_ratid_empty(priv, sta_id, tid); + /* Release all TFDs before the SSN, i.e. all TFDs in front of + * block-ack window (we assume that they've been successfully + * transmitted ... if not, it's too late anyway). */ + iwl_trans_reclaim(trans(priv), sta_id, tid, scd_flow, ba_resp_scd_ssn, + 0, &reclaimed_skbs); freed = 0; while (!skb_queue_empty(&reclaimed_skbs)) { diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-ucode.c b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c similarity index 57% rename from trunk/drivers/net/wireless/iwlwifi/iwl-ucode.c rename to trunk/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c index 36a1b5b25858..8ba0dd54e37d 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-ucode.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c @@ -31,9 +31,7 @@ #include #include #include -#include -#include "iwl-wifi.h" #include "iwl-dev.h" #include "iwl-core.h" #include "iwl-io.h" @@ -74,98 +72,51 @@ static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = { {COEX_CU_RSRVD2_RP, COEX_CU_RSRVD2_WP, 0, COEX_RSRVD2_FLAGS} }; -/****************************************************************************** - * - * uCode download functions - * - ******************************************************************************/ - -static void iwl_free_fw_desc(struct iwl_bus *bus, struct fw_desc *desc) -{ - if (desc->v_addr) - dma_free_coherent(bus->dev, desc->len, - desc->v_addr, desc->p_addr); - desc->v_addr = NULL; - desc->len = 0; -} - -static void iwl_free_fw_img(struct iwl_bus *bus, struct fw_img *img) -{ - iwl_free_fw_desc(bus, &img->code); - iwl_free_fw_desc(bus, &img->data); -} - -void iwl_dealloc_ucode(struct iwl_trans *trans) -{ - iwl_free_fw_img(bus(trans), &trans->ucode_rt); - iwl_free_fw_img(bus(trans), &trans->ucode_init); - iwl_free_fw_img(bus(trans), &trans->ucode_wowlan); -} - -int iwl_alloc_fw_desc(struct iwl_bus *bus, struct fw_desc *desc, - const void *data, size_t len) -{ - if (!len) { - desc->v_addr = NULL; - return -EINVAL; - } - - desc->v_addr = dma_alloc_coherent(bus->dev, len, - &desc->p_addr, GFP_KERNEL); - if (!desc->v_addr) - return -ENOMEM; - - desc->len = len; - memcpy(desc->v_addr, data, len); - return 0; -} - /* * ucode */ -static int iwl_load_section(struct iwl_trans *trans, const char *name, +static int iwlagn_load_section(struct iwl_priv *priv, const char *name, struct fw_desc *image, u32 dst_addr) { - struct iwl_bus *bus = bus(trans); dma_addr_t phy_addr = image->p_addr; u32 byte_cnt = image->len; int ret; - trans->ucode_write_complete = 0; + priv->ucode_write_complete = 0; - iwl_write_direct32(bus, + iwl_write_direct32(bus(priv), FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); - iwl_write_direct32(bus, + iwl_write_direct32(bus(priv), FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr); - iwl_write_direct32(bus, + iwl_write_direct32(bus(priv), FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL), phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); - iwl_write_direct32(bus, + iwl_write_direct32(bus(priv), FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), (iwl_get_dma_hi_addr(phy_addr) << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt); - iwl_write_direct32(bus, + iwl_write_direct32(bus(priv), FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL), 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM | 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX | FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID); - iwl_write_direct32(bus, + iwl_write_direct32(bus(priv), FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); - IWL_DEBUG_FW(bus, "%s uCode section being loaded...\n", name); - ret = wait_event_timeout(trans->shrd->wait_command_queue, - trans->ucode_write_complete, 5 * HZ); + IWL_DEBUG_FW(priv, "%s uCode section being loaded...\n", name); + ret = wait_event_timeout(priv->shrd->wait_command_queue, + priv->ucode_write_complete, 5 * HZ); if (!ret) { - IWL_ERR(trans, "Could not load the %s uCode section\n", + IWL_ERR(priv, "Could not load the %s uCode section\n", name); return -ETIMEDOUT; } @@ -173,65 +124,41 @@ static int iwl_load_section(struct iwl_trans *trans, const char *name, return 0; } -static inline struct fw_img *iwl_get_ucode_image(struct iwl_trans *trans, - enum iwl_ucode_type ucode_type) -{ - switch (ucode_type) { - case IWL_UCODE_INIT: - return &trans->ucode_init; - case IWL_UCODE_WOWLAN: - return &trans->ucode_wowlan; - case IWL_UCODE_REGULAR: - return &trans->ucode_rt; - case IWL_UCODE_NONE: - break; - } - return NULL; -} - -static int iwl_load_given_ucode(struct iwl_trans *trans, - enum iwl_ucode_type ucode_type) +static int iwlagn_load_given_ucode(struct iwl_priv *priv, + struct fw_img *image) { int ret = 0; - struct fw_img *image = iwl_get_ucode_image(trans, ucode_type); - - if (!image) { - IWL_ERR(trans, "Invalid ucode requested (%d)\n", - ucode_type); - return -EINVAL; - } - - ret = iwl_load_section(trans, "INST", &image->code, + ret = iwlagn_load_section(priv, "INST", &image->code, IWLAGN_RTC_INST_LOWER_BOUND); if (ret) return ret; - return iwl_load_section(trans, "DATA", &image->data, + return iwlagn_load_section(priv, "DATA", &image->data, IWLAGN_RTC_DATA_LOWER_BOUND); } /* * Calibration */ -static int iwl_set_Xtal_calib(struct iwl_trans *trans) +static int iwlagn_set_Xtal_calib(struct iwl_priv *priv) { struct iwl_calib_xtal_freq_cmd cmd; __le16 *xtal_calib = - (__le16 *)iwl_eeprom_query_addr(trans->shrd, EEPROM_XTAL); + (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_XTAL); iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD); cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]); cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]); - return iwl_calib_set(trans, (void *)&cmd, sizeof(cmd)); + return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL], + (u8 *)&cmd, sizeof(cmd)); } -static int iwl_set_temperature_offset_calib(struct iwl_trans *trans) +static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv) { struct iwl_calib_temperature_offset_cmd cmd; __le16 *offset_calib = - (__le16 *)iwl_eeprom_query_addr(trans->shrd, - EEPROM_RAW_TEMPERATURE); + (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_RAW_TEMPERATURE); memset(&cmd, 0, sizeof(cmd)); iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD); @@ -239,48 +166,49 @@ static int iwl_set_temperature_offset_calib(struct iwl_trans *trans) if (!(cmd.radio_sensor_offset)) cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET; - IWL_DEBUG_CALIB(trans, "Radio sensor offset: %d\n", + IWL_DEBUG_CALIB(priv, "Radio sensor offset: %d\n", le16_to_cpu(cmd.radio_sensor_offset)); - return iwl_calib_set(trans, (void *)&cmd, sizeof(cmd)); + return iwl_calib_set(&priv->calib_results[IWL_CALIB_TEMP_OFFSET], + (u8 *)&cmd, sizeof(cmd)); } -static int iwl_set_temperature_offset_calib_v2(struct iwl_trans *trans) +static int iwlagn_set_temperature_offset_calib_v2(struct iwl_priv *priv) { struct iwl_calib_temperature_offset_v2_cmd cmd; - __le16 *offset_calib_high = (__le16 *)iwl_eeprom_query_addr(trans->shrd, + __le16 *offset_calib_high = (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_KELVIN_TEMPERATURE); __le16 *offset_calib_low = - (__le16 *)iwl_eeprom_query_addr(trans->shrd, - EEPROM_RAW_TEMPERATURE); + (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_RAW_TEMPERATURE); struct iwl_eeprom_calib_hdr *hdr; memset(&cmd, 0, sizeof(cmd)); iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD); - hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(trans->shrd, + hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv, EEPROM_CALIB_ALL); memcpy(&cmd.radio_sensor_offset_high, offset_calib_high, sizeof(*offset_calib_high)); memcpy(&cmd.radio_sensor_offset_low, offset_calib_low, sizeof(*offset_calib_low)); if (!(cmd.radio_sensor_offset_low)) { - IWL_DEBUG_CALIB(trans, "no info in EEPROM, use default\n"); + IWL_DEBUG_CALIB(priv, "no info in EEPROM, use default\n"); cmd.radio_sensor_offset_low = DEFAULT_RADIO_SENSOR_OFFSET; cmd.radio_sensor_offset_high = DEFAULT_RADIO_SENSOR_OFFSET; } memcpy(&cmd.burntVoltageRef, &hdr->voltage, sizeof(hdr->voltage)); - IWL_DEBUG_CALIB(trans, "Radio sensor offset high: %d\n", + IWL_DEBUG_CALIB(priv, "Radio sensor offset high: %d\n", le16_to_cpu(cmd.radio_sensor_offset_high)); - IWL_DEBUG_CALIB(trans, "Radio sensor offset low: %d\n", + IWL_DEBUG_CALIB(priv, "Radio sensor offset low: %d\n", le16_to_cpu(cmd.radio_sensor_offset_low)); - IWL_DEBUG_CALIB(trans, "Voltage Ref: %d\n", + IWL_DEBUG_CALIB(priv, "Voltage Ref: %d\n", le16_to_cpu(cmd.burntVoltageRef)); - return iwl_calib_set(trans, (void *)&cmd, sizeof(cmd)); + return iwl_calib_set(&priv->calib_results[IWL_CALIB_TEMP_OFFSET], + (u8 *)&cmd, sizeof(cmd)); } -static int iwl_send_calib_cfg(struct iwl_trans *trans) +static int iwlagn_send_calib_cfg(struct iwl_priv *priv) { struct iwl_calib_cfg_cmd calib_cfg_cmd; struct iwl_host_cmd cmd = { @@ -296,7 +224,7 @@ static int iwl_send_calib_cfg(struct iwl_trans *trans) calib_cfg_cmd.ucd_calib_cfg.flags = IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK; - return iwl_trans_send_cmd(trans, &cmd); + return iwl_trans_send_cmd(trans(priv), &cmd); } int iwlagn_rx_calib_result(struct iwl_priv *priv, @@ -306,37 +234,60 @@ int iwlagn_rx_calib_result(struct iwl_priv *priv, struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw; int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; + int index; /* reduce the size of the length field itself */ len -= 4; - if (iwl_calib_set(trans(priv), hdr, len)) - IWL_ERR(priv, "Failed to record calibration data %d\n", - hdr->op_code); - + /* Define the order in which the results will be sent to the runtime + * uCode. iwl_send_calib_results sends them in a row according to + * their index. We sort them here + */ + switch (hdr->op_code) { + case IWL_PHY_CALIBRATE_DC_CMD: + index = IWL_CALIB_DC; + break; + case IWL_PHY_CALIBRATE_LO_CMD: + index = IWL_CALIB_LO; + break; + case IWL_PHY_CALIBRATE_TX_IQ_CMD: + index = IWL_CALIB_TX_IQ; + break; + case IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD: + index = IWL_CALIB_TX_IQ_PERD; + break; + case IWL_PHY_CALIBRATE_BASE_BAND_CMD: + index = IWL_CALIB_BASE_BAND; + break; + default: + IWL_ERR(priv, "Unknown calibration notification %d\n", + hdr->op_code); + return -1; + } + iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len); return 0; } -int iwl_init_alive_start(struct iwl_trans *trans) +int iwlagn_init_alive_start(struct iwl_priv *priv) { int ret; - if (cfg(trans)->bt_params && - cfg(trans)->bt_params->advanced_bt_coexist) { + if (priv->cfg->bt_params && + priv->cfg->bt_params->advanced_bt_coexist) { /* * Tell uCode we are ready to perform calibration * need to perform this before any calibration * no need to close the envlope since we are going * to load the runtime uCode later. */ - ret = iwl_send_bt_env(trans, IWL_BT_COEX_ENV_OPEN, + ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN, BT_COEX_PRIO_TBL_EVT_INIT_CALIB2); if (ret) return ret; } - ret = iwl_send_calib_cfg(trans); + ret = iwlagn_send_calib_cfg(priv); if (ret) return ret; @@ -344,21 +295,21 @@ int iwl_init_alive_start(struct iwl_trans *trans) * temperature offset calibration is only needed for runtime ucode, * so prepare the value now. */ - if (cfg(trans)->need_temp_offset_calib) { - if (cfg(trans)->temp_offset_v2) - return iwl_set_temperature_offset_calib_v2(trans); + if (priv->cfg->need_temp_offset_calib) { + if (priv->cfg->temp_offset_v2) + return iwlagn_set_temperature_offset_calib_v2(priv); else - return iwl_set_temperature_offset_calib(trans); + return iwlagn_set_temperature_offset_calib(priv); } return 0; } -static int iwl_send_wimax_coex(struct iwl_trans *trans) +static int iwlagn_send_wimax_coex(struct iwl_priv *priv) { struct iwl_wimax_coex_cmd coex_cmd; - if (cfg(trans)->base_params->support_wimax_coexist) { + if (priv->cfg->base_params->support_wimax_coexist) { /* UnMask wake up src at associated sleep */ coex_cmd.flags = COEX_FLAGS_ASSOC_WA_UNMASK_MSK; @@ -377,12 +328,12 @@ static int iwl_send_wimax_coex(struct iwl_trans *trans) /* coexistence is disabled */ memset(&coex_cmd, 0, sizeof(coex_cmd)); } - return iwl_trans_send_cmd_pdu(trans, + return iwl_trans_send_cmd_pdu(trans(priv), COEX_PRIORITY_TABLE_CMD, CMD_SYNC, sizeof(coex_cmd), &coex_cmd); } -static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = { +static const u8 iwlagn_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = { ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) | (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)), ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) | @@ -404,64 +355,61 @@ static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = { 0, 0, 0, 0, 0, 0, 0 }; -void iwl_send_prio_tbl(struct iwl_trans *trans) +void iwlagn_send_prio_tbl(struct iwl_priv *priv) { struct iwl_bt_coex_prio_table_cmd prio_tbl_cmd; - memcpy(prio_tbl_cmd.prio_tbl, iwl_bt_prio_tbl, - sizeof(iwl_bt_prio_tbl)); - if (iwl_trans_send_cmd_pdu(trans, + memcpy(prio_tbl_cmd.prio_tbl, iwlagn_bt_prio_tbl, + sizeof(iwlagn_bt_prio_tbl)); + if (iwl_trans_send_cmd_pdu(trans(priv), REPLY_BT_COEX_PRIO_TABLE, CMD_SYNC, sizeof(prio_tbl_cmd), &prio_tbl_cmd)) - IWL_ERR(trans, "failed to send BT prio tbl command\n"); + IWL_ERR(priv, "failed to send BT prio tbl command\n"); } -int iwl_send_bt_env(struct iwl_trans *trans, u8 action, u8 type) +int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type) { struct iwl_bt_coex_prot_env_cmd env_cmd; int ret; env_cmd.action = action; env_cmd.type = type; - ret = iwl_trans_send_cmd_pdu(trans, + ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_BT_COEX_PROT_ENV, CMD_SYNC, sizeof(env_cmd), &env_cmd); if (ret) - IWL_ERR(trans, "failed to send BT env command\n"); + IWL_ERR(priv, "failed to send BT env command\n"); return ret; } -static int iwl_alive_notify(struct iwl_trans *trans) +static int iwlagn_alive_notify(struct iwl_priv *priv) { - struct iwl_priv *priv = priv(trans); struct iwl_rxon_context *ctx; int ret; if (!priv->tx_cmd_pool) priv->tx_cmd_pool = - kmem_cache_create("iwl_dev_cmd", + kmem_cache_create("iwlagn_dev_cmd", sizeof(struct iwl_device_cmd), sizeof(void *), 0, NULL); if (!priv->tx_cmd_pool) return -ENOMEM; - iwl_trans_tx_start(trans); + iwl_trans_tx_start(trans(priv)); for_each_context(priv, ctx) ctx->last_tx_rejected = false; - ret = iwl_send_wimax_coex(trans); + ret = iwlagn_send_wimax_coex(priv); if (ret) return ret; - if (!cfg(priv)->no_xtal_calib) { - ret = iwl_set_Xtal_calib(trans); - if (ret) - return ret; - } + ret = iwlagn_set_Xtal_calib(priv); + if (ret) + return ret; - return iwl_send_calib_results(trans); + return iwl_send_calib_results(priv); } @@ -470,7 +418,7 @@ static int iwl_alive_notify(struct iwl_trans *trans) * using sample data 100 bytes apart. If these sample points are good, * it's a pretty good bet that everything between them is good, too. */ -static int iwl_verify_inst_sparse(struct iwl_bus *bus, +static int iwl_verify_inst_sparse(struct iwl_priv *priv, struct fw_desc *fw_desc) { __le32 *image = (__le32 *)fw_desc->v_addr; @@ -478,15 +426,15 @@ static int iwl_verify_inst_sparse(struct iwl_bus *bus, u32 val; u32 i; - IWL_DEBUG_FW(bus, "ucode inst image size is %u\n", len); + IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len); for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) { /* read data comes through single port, auto-incr addr */ /* NOTE: Use the debugless read so we don't flood kernel log * if IWL_DL_IO is set */ - iwl_write_direct32(bus, HBUS_TARG_MEM_RADDR, + iwl_write_direct32(bus(priv), HBUS_TARG_MEM_RADDR, i + IWLAGN_RTC_INST_LOWER_BOUND); - val = iwl_read32(bus, HBUS_TARG_MEM_RDAT); + val = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT); if (val != le32_to_cpu(*image)) return -EIO; } @@ -494,7 +442,7 @@ static int iwl_verify_inst_sparse(struct iwl_bus *bus, return 0; } -static void iwl_print_mismatch_inst(struct iwl_bus *bus, +static void iwl_print_mismatch_inst(struct iwl_priv *priv, struct fw_desc *fw_desc) { __le32 *image = (__le32 *)fw_desc->v_addr; @@ -503,18 +451,18 @@ static void iwl_print_mismatch_inst(struct iwl_bus *bus, u32 offs; int errors = 0; - IWL_DEBUG_FW(bus, "ucode inst image size is %u\n", len); + IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len); - iwl_write_direct32(bus, HBUS_TARG_MEM_RADDR, + iwl_write_direct32(bus(priv), HBUS_TARG_MEM_RADDR, IWLAGN_RTC_INST_LOWER_BOUND); for (offs = 0; offs < len && errors < 20; offs += sizeof(u32), image++) { /* read data comes through single port, auto-incr addr */ - val = iwl_read32(bus, HBUS_TARG_MEM_RDAT); + val = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT); if (val != le32_to_cpu(*image)) { - IWL_ERR(bus, "uCode INST section at " + IWL_ERR(priv, "uCode INST section at " "offset 0x%x, is 0x%x, s/b 0x%x\n", offs, val, le32_to_cpu(*image)); errors++; @@ -526,163 +474,91 @@ static void iwl_print_mismatch_inst(struct iwl_bus *bus, * iwl_verify_ucode - determine which instruction image is in SRAM, * and verify its contents */ -static int iwl_verify_ucode(struct iwl_trans *trans, - enum iwl_ucode_type ucode_type) +static int iwl_verify_ucode(struct iwl_priv *priv, struct fw_img *img) { - struct fw_img *img = iwl_get_ucode_image(trans, ucode_type); - - if (!img) { - IWL_ERR(trans, "Invalid ucode requested (%d)\n", ucode_type); - return -EINVAL; - } - - if (!iwl_verify_inst_sparse(bus(trans), &img->code)) { - IWL_DEBUG_FW(trans, "uCode is good in inst SRAM\n"); + if (!iwl_verify_inst_sparse(priv, &img->code)) { + IWL_DEBUG_FW(priv, "uCode is good in inst SRAM\n"); return 0; } - IWL_ERR(trans, "UCODE IMAGE IN INSTRUCTION SRAM NOT VALID!!\n"); + IWL_ERR(priv, "UCODE IMAGE IN INSTRUCTION SRAM NOT VALID!!\n"); - iwl_print_mismatch_inst(bus(trans), &img->code); + iwl_print_mismatch_inst(priv, &img->code); return -EIO; } -struct iwl_alive_data { +struct iwlagn_alive_data { bool valid; u8 subtype; }; -static void iwl_alive_fn(struct iwl_trans *trans, +static void iwlagn_alive_fn(struct iwl_priv *priv, struct iwl_rx_packet *pkt, void *data) { - struct iwl_alive_data *alive_data = data; + struct iwlagn_alive_data *alive_data = data; struct iwl_alive_resp *palive; palive = &pkt->u.alive_frame; - IWL_DEBUG_FW(trans, "Alive ucode status 0x%08X revision " + IWL_DEBUG_FW(priv, "Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n", palive->is_valid, palive->ver_type, palive->ver_subtype); - trans->shrd->device_pointers.error_event_table = + priv->device_pointers.error_event_table = le32_to_cpu(palive->error_event_table_ptr); - trans->shrd->device_pointers.log_event_table = + priv->device_pointers.log_event_table = le32_to_cpu(palive->log_event_table_ptr); alive_data->subtype = palive->ver_subtype; alive_data->valid = palive->is_valid == UCODE_VALID_OK; } -/* notification wait support */ -void iwl_init_notification_wait(struct iwl_shared *shrd, - struct iwl_notification_wait *wait_entry, - u8 cmd, - void (*fn)(struct iwl_trans *trans, - struct iwl_rx_packet *pkt, - void *data), - void *fn_data) -{ - wait_entry->fn = fn; - wait_entry->fn_data = fn_data; - wait_entry->cmd = cmd; - wait_entry->triggered = false; - wait_entry->aborted = false; - - spin_lock_bh(&shrd->notif_wait_lock); - list_add(&wait_entry->list, &shrd->notif_waits); - spin_unlock_bh(&shrd->notif_wait_lock); -} - -int iwl_wait_notification(struct iwl_shared *shrd, - struct iwl_notification_wait *wait_entry, - unsigned long timeout) -{ - int ret; - - ret = wait_event_timeout(shrd->notif_waitq, - wait_entry->triggered || wait_entry->aborted, - timeout); - - spin_lock_bh(&shrd->notif_wait_lock); - list_del(&wait_entry->list); - spin_unlock_bh(&shrd->notif_wait_lock); - - if (wait_entry->aborted) - return -EIO; - - /* return value is always >= 0 */ - if (ret <= 0) - return -ETIMEDOUT; - return 0; -} - -void iwl_remove_notification(struct iwl_shared *shrd, - struct iwl_notification_wait *wait_entry) -{ - spin_lock_bh(&shrd->notif_wait_lock); - list_del(&wait_entry->list); - spin_unlock_bh(&shrd->notif_wait_lock); -} - -void iwl_abort_notification_waits(struct iwl_shared *shrd) -{ - unsigned long flags; - struct iwl_notification_wait *wait_entry; - - spin_lock_irqsave(&shrd->notif_wait_lock, flags); - list_for_each_entry(wait_entry, &shrd->notif_waits, list) - wait_entry->aborted = true; - spin_unlock_irqrestore(&shrd->notif_wait_lock, flags); - - wake_up_all(&shrd->notif_waitq); -} - #define UCODE_ALIVE_TIMEOUT HZ #define UCODE_CALIB_TIMEOUT (2*HZ) -int iwl_load_ucode_wait_alive(struct iwl_trans *trans, - enum iwl_ucode_type ucode_type) +int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv, + struct fw_img *image, + enum iwlagn_ucode_type ucode_type) { struct iwl_notification_wait alive_wait; - struct iwl_alive_data alive_data; + struct iwlagn_alive_data alive_data; int ret; - enum iwl_ucode_type old_type; + enum iwlagn_ucode_type old_type; - ret = iwl_trans_start_device(trans); + ret = iwl_trans_start_device(trans(priv)); if (ret) return ret; - iwl_init_notification_wait(trans->shrd, &alive_wait, REPLY_ALIVE, - iwl_alive_fn, &alive_data); + iwlagn_init_notification_wait(priv, &alive_wait, REPLY_ALIVE, + iwlagn_alive_fn, &alive_data); - old_type = trans->shrd->ucode_type; - trans->shrd->ucode_type = ucode_type; + old_type = priv->ucode_type; + priv->ucode_type = ucode_type; - ret = iwl_load_given_ucode(trans, ucode_type); + ret = iwlagn_load_given_ucode(priv, image); if (ret) { - trans->shrd->ucode_type = old_type; - iwl_remove_notification(trans->shrd, &alive_wait); + priv->ucode_type = old_type; + iwlagn_remove_notification(priv, &alive_wait); return ret; } - iwl_trans_kick_nic(trans); + iwl_trans_kick_nic(trans(priv)); /* * Some things may run in the background now, but we * just wait for the ALIVE notification here. */ - ret = iwl_wait_notification(trans->shrd, &alive_wait, - UCODE_ALIVE_TIMEOUT); + ret = iwlagn_wait_notification(priv, &alive_wait, UCODE_ALIVE_TIMEOUT); if (ret) { - trans->shrd->ucode_type = old_type; + priv->ucode_type = old_type; return ret; } if (!alive_data.valid) { - IWL_ERR(trans, "Loaded ucode is not valid!\n"); - trans->shrd->ucode_type = old_type; + IWL_ERR(priv, "Loaded ucode is not valid!\n"); + priv->ucode_type = old_type; return -EIO; } @@ -692,9 +568,9 @@ int iwl_load_ucode_wait_alive(struct iwl_trans *trans, * skip it for WoWLAN. */ if (ucode_type != IWL_UCODE_WOWLAN) { - ret = iwl_verify_ucode(trans, ucode_type); + ret = iwl_verify_ucode(priv, image); if (ret) { - trans->shrd->ucode_type = old_type; + priv->ucode_type = old_type; return ret; } @@ -702,41 +578,42 @@ int iwl_load_ucode_wait_alive(struct iwl_trans *trans, msleep(5); } - ret = iwl_alive_notify(trans); + ret = iwlagn_alive_notify(priv); if (ret) { - IWL_WARN(trans, + IWL_WARN(priv, "Could not complete ALIVE transition: %d\n", ret); - trans->shrd->ucode_type = old_type; + priv->ucode_type = old_type; return ret; } return 0; } -int iwl_run_init_ucode(struct iwl_trans *trans) +int iwlagn_run_init_ucode(struct iwl_priv *priv) { struct iwl_notification_wait calib_wait; int ret; - lockdep_assert_held(&trans->shrd->mutex); + lockdep_assert_held(&priv->shrd->mutex); /* No init ucode required? Curious, but maybe ok */ - if (!trans->ucode_init.code.len) + if (!priv->ucode_init.code.len) return 0; - if (trans->shrd->ucode_type != IWL_UCODE_NONE) + if (priv->ucode_type != IWL_UCODE_NONE) return 0; - iwl_init_notification_wait(trans->shrd, &calib_wait, + iwlagn_init_notification_wait(priv, &calib_wait, CALIBRATION_COMPLETE_NOTIFICATION, NULL, NULL); /* Will also start the device */ - ret = iwl_load_ucode_wait_alive(trans, IWL_UCODE_INIT); + ret = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_init, + IWL_UCODE_INIT); if (ret) goto error; - ret = iwl_init_alive_start(trans); + ret = iwlagn_init_alive_start(priv); if (ret) goto error; @@ -744,15 +621,14 @@ int iwl_run_init_ucode(struct iwl_trans *trans) * Some things may run in the background now, but we * just wait for the calibration complete notification. */ - ret = iwl_wait_notification(trans->shrd, &calib_wait, - UCODE_CALIB_TIMEOUT); + ret = iwlagn_wait_notification(priv, &calib_wait, UCODE_CALIB_TIMEOUT); goto out; error: - iwl_remove_notification(trans->shrd, &calib_wait); + iwlagn_remove_notification(priv, &calib_wait); out: /* Whatever happened, stop the device */ - iwl_trans_stop_device(trans); + iwl_trans_stop_device(trans(priv)); return ret; } diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-agn.c b/trunk/drivers/net/wireless/iwlwifi/iwl-agn.c index b5c7c5f0a753..e0e9a3dfbc00 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -43,7 +44,6 @@ #include #include "iwl-eeprom.h" -#include "iwl-wifi.h" #include "iwl-dev.h" #include "iwl-core.h" #include "iwl-io.h" @@ -367,7 +367,7 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv) u32 num_wraps; /* # times uCode wrapped to top of log */ u32 next_entry; /* index of next entry to be written by uCode */ - base = priv->shrd->device_pointers.error_event_table; + base = priv->device_pointers.error_event_table; if (iwlagn_hw_valid_rtc_data_addr(base)) { capacity = iwl_read_targ_mem(bus(priv), base); num_wraps = iwl_read_targ_mem(bus(priv), @@ -452,6 +452,52 @@ static void iwl_bg_tx_flush(struct work_struct *work) iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL); } +/****************************************************************************** + * + * uCode download functions + * + ******************************************************************************/ + +static void iwl_free_fw_desc(struct iwl_priv *priv, struct fw_desc *desc) +{ + if (desc->v_addr) + dma_free_coherent(bus(priv)->dev, desc->len, + desc->v_addr, desc->p_addr); + desc->v_addr = NULL; + desc->len = 0; +} + +static void iwl_free_fw_img(struct iwl_priv *priv, struct fw_img *img) +{ + iwl_free_fw_desc(priv, &img->code); + iwl_free_fw_desc(priv, &img->data); +} + +static void iwl_dealloc_ucode(struct iwl_priv *priv) +{ + iwl_free_fw_img(priv, &priv->ucode_rt); + iwl_free_fw_img(priv, &priv->ucode_init); + iwl_free_fw_img(priv, &priv->ucode_wowlan); +} + +static int iwl_alloc_fw_desc(struct iwl_priv *priv, struct fw_desc *desc, + const void *data, size_t len) +{ + if (!len) { + desc->v_addr = NULL; + return -EINVAL; + } + + desc->v_addr = dma_alloc_coherent(bus(priv)->dev, len, + &desc->p_addr, GFP_KERNEL); + if (!desc->v_addr) + return -ENOMEM; + + desc->len = len; + memcpy(desc->v_addr, data, len); + return 0; +} + static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags) { int i; @@ -509,14 +555,23 @@ static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags) BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); } + +struct iwlagn_ucode_capabilities { + u32 max_probe_length; + u32 standard_phy_calibration_size; + u32 flags; +}; + static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context); +static int iwlagn_mac_setup_register(struct iwl_priv *priv, + struct iwlagn_ucode_capabilities *capa); #define UCODE_EXPERIMENTAL_INDEX 100 #define UCODE_EXPERIMENTAL_TAG "exp" static int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first) { - const char *name_pre = cfg(priv)->fw_name_pre; + const char *name_pre = priv->cfg->fw_name_pre; char tag[8]; if (first) { @@ -525,14 +580,14 @@ static int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first) strcpy(tag, UCODE_EXPERIMENTAL_TAG); } else if (priv->fw_index == UCODE_EXPERIMENTAL_INDEX) { #endif - priv->fw_index = cfg(priv)->ucode_api_max; + priv->fw_index = priv->cfg->ucode_api_max; sprintf(tag, "%d", priv->fw_index); } else { priv->fw_index--; sprintf(tag, "%d", priv->fw_index); } - if (priv->fw_index < cfg(priv)->ucode_api_min) { + if (priv->fw_index < priv->cfg->ucode_api_min) { IWL_ERR(priv, "no suitable firmware found!\n"); return -ENOENT; } @@ -837,9 +892,9 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) struct iwl_ucode_header *ucode; int err; struct iwlagn_firmware_pieces pieces; - const unsigned int api_max = cfg(priv)->ucode_api_max; - unsigned int api_ok = cfg(priv)->ucode_api_ok; - const unsigned int api_min = cfg(priv)->ucode_api_min; + const unsigned int api_max = priv->cfg->ucode_api_max; + unsigned int api_ok = priv->cfg->ucode_api_ok; + const unsigned int api_min = priv->cfg->ucode_api_min; u32 api_ver; char buildstr[25]; u32 build; @@ -985,32 +1040,30 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) /* Runtime instructions and 2 copies of data: * 1) unmodified from disk * 2) backup cache for save/restore during power-downs */ - if (iwl_alloc_fw_desc(bus(priv), &trans(priv)->ucode_rt.code, + if (iwl_alloc_fw_desc(priv, &priv->ucode_rt.code, pieces.inst, pieces.inst_size)) goto err_pci_alloc; - if (iwl_alloc_fw_desc(bus(priv), &trans(priv)->ucode_rt.data, + if (iwl_alloc_fw_desc(priv, &priv->ucode_rt.data, pieces.data, pieces.data_size)) goto err_pci_alloc; /* Initialization instructions and data */ if (pieces.init_size && pieces.init_data_size) { - if (iwl_alloc_fw_desc(bus(priv), &trans(priv)->ucode_init.code, + if (iwl_alloc_fw_desc(priv, &priv->ucode_init.code, pieces.init, pieces.init_size)) goto err_pci_alloc; - if (iwl_alloc_fw_desc(bus(priv), &trans(priv)->ucode_init.data, + if (iwl_alloc_fw_desc(priv, &priv->ucode_init.data, pieces.init_data, pieces.init_data_size)) goto err_pci_alloc; } /* WoWLAN instructions and data */ if (pieces.wowlan_inst_size && pieces.wowlan_data_size) { - if (iwl_alloc_fw_desc(bus(priv), - &trans(priv)->ucode_wowlan.code, + if (iwl_alloc_fw_desc(priv, &priv->ucode_wowlan.code, pieces.wowlan_inst, pieces.wowlan_inst_size)) goto err_pci_alloc; - if (iwl_alloc_fw_desc(bus(priv), - &trans(priv)->ucode_wowlan.data, + if (iwl_alloc_fw_desc(priv, &priv->ucode_wowlan.data, pieces.wowlan_data, pieces.wowlan_data_size)) goto err_pci_alloc; @@ -1028,23 +1081,20 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) priv->init_evtlog_size = (pieces.init_evtlog_size - 16)/12; else priv->init_evtlog_size = - cfg(priv)->base_params->max_event_log_size; + priv->cfg->base_params->max_event_log_size; priv->init_errlog_ptr = pieces.init_errlog_ptr; priv->inst_evtlog_ptr = pieces.inst_evtlog_ptr; if (pieces.inst_evtlog_size) priv->inst_evtlog_size = (pieces.inst_evtlog_size - 16)/12; else priv->inst_evtlog_size = - cfg(priv)->base_params->max_event_log_size; + priv->cfg->base_params->max_event_log_size; priv->inst_errlog_ptr = pieces.inst_errlog_ptr; -#ifndef CONFIG_IWLWIFI_P2P - ucode_capa.flags &= ~IWL_UCODE_TLV_FLAGS_PAN; -#endif priv->new_scan_threshold_behaviour = !!(ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWSCAN); - if (!(cfg(priv)->sku & EEPROM_SKU_CAP_IPAN_ENABLE)) + if (!(priv->cfg->sku & EEPROM_SKU_CAP_IPAN_ENABLE)) ucode_capa.flags &= ~IWL_UCODE_TLV_FLAGS_PAN; /* @@ -1061,6 +1111,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) priv->sta_key_max_num = STA_KEY_MAX_NUM; priv->shrd->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM; } + /* * figure out the offset of chain noise reset and gain commands * base on the size of standard phy calibration commands table size @@ -1105,7 +1156,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) err_pci_alloc: IWL_ERR(priv, "failed to allocate pci memory\n"); - iwl_dealloc_ucode(trans(priv)); + iwl_dealloc_ucode(priv); out_unbind: complete(&priv->firmware_loading_complete); device_release_driver(bus(priv)->dev); @@ -1125,7 +1176,7 @@ static void iwl_rf_kill_ct_config(struct iwl_priv *priv) spin_unlock_irqrestore(&priv->shrd->lock, flags); priv->thermal_throttle.ct_kill_toggle = false; - if (cfg(priv)->base_params->support_ct_kill_exit) { + if (priv->cfg->base_params->support_ct_kill_exit) { adv_cmd.critical_temperature_enter = cpu_to_le32(hw_params(priv).ct_kill_threshold); adv_cmd.critical_temperature_exit = @@ -1220,10 +1271,10 @@ int iwl_alive_start(struct iwl_priv *priv) return -ERFKILL; /* download priority table before any calibration request */ - if (cfg(priv)->bt_params && - cfg(priv)->bt_params->advanced_bt_coexist) { + if (priv->cfg->bt_params && + priv->cfg->bt_params->advanced_bt_coexist) { /* Configure Bluetooth device coexistence support */ - if (cfg(priv)->bt_params->bt_sco_disable) + if (priv->cfg->bt_params->bt_sco_disable) priv->bt_enable_pspoll = false; else priv->bt_enable_pspoll = true; @@ -1235,14 +1286,14 @@ int iwl_alive_start(struct iwl_priv *priv) priv->bt_valid = IWLAGN_BT_VALID_ENABLE_FLAGS; priv->cur_rssi_ctx = NULL; - iwl_send_prio_tbl(trans(priv)); + iwlagn_send_prio_tbl(priv); /* FIXME: w/a to force change uCode BT state machine */ - ret = iwl_send_bt_env(trans(priv), IWL_BT_COEX_ENV_OPEN, + ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN, BT_COEX_PRIO_TBL_EVT_INIT_CALIB2); if (ret) return ret; - ret = iwl_send_bt_env(trans(priv), IWL_BT_COEX_ENV_CLOSE, + ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_CLOSE, BT_COEX_PRIO_TBL_EVT_INIT_CALIB2); if (ret) return ret; @@ -1253,17 +1304,16 @@ int iwl_alive_start(struct iwl_priv *priv) iwl_send_bt_config(priv); } - /* - * Perform runtime calibrations, including DC calibration. - */ - iwlagn_send_calib_cfg_rt(priv, IWL_CALIB_CFG_DC_IDX); + if (hw_params(priv).calib_rt_cfg) + iwlagn_send_calib_cfg_rt(priv, + hw_params(priv).calib_rt_cfg); ieee80211_wake_queues(priv->hw); priv->active_rate = IWL_RATES_MASK; /* Configure Tx antenna selection based on H/W config */ - iwlagn_send_tx_ant_config(priv, cfg(priv)->valid_tx_ant); + iwlagn_send_tx_ant_config(priv, priv->cfg->valid_tx_ant); if (iwl_is_associated_ctx(ctx) && !priv->shrd->wowlan) { struct iwl_rxon_cmd *active_rxon = @@ -1302,7 +1352,7 @@ int iwl_alive_start(struct iwl_priv *priv) static void iwl_cancel_deferred_work(struct iwl_priv *priv); -void __iwl_down(struct iwl_priv *priv) +static void __iwl_down(struct iwl_priv *priv) { int exit_pending; @@ -1332,9 +1382,9 @@ void __iwl_down(struct iwl_priv *priv) priv->bt_status = 0; priv->cur_rssi_ctx = NULL; priv->bt_is_sco = 0; - if (cfg(priv)->bt_params) + if (priv->cfg->bt_params) priv->bt_traffic_load = - cfg(priv)->bt_params->bt_init_traffic_load; + priv->cfg->bt_params->bt_init_traffic_load; else priv->bt_traffic_load = 0; priv->bt_full_concurrent = false; @@ -1365,7 +1415,7 @@ void __iwl_down(struct iwl_priv *priv) priv->beacon_skb = NULL; } -void iwl_down(struct iwl_priv *priv) +static void iwl_down(struct iwl_priv *priv) { mutex_lock(&priv->shrd->mutex); __iwl_down(priv); @@ -1374,6 +1424,57 @@ void iwl_down(struct iwl_priv *priv) iwl_cancel_deferred_work(priv); } +#define MAX_HW_RESTARTS 5 + +static int __iwl_up(struct iwl_priv *priv) +{ + struct iwl_rxon_context *ctx; + int ret; + + lockdep_assert_held(&priv->shrd->mutex); + + if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) { + IWL_WARN(priv, "Exit pending; will not bring the NIC up\n"); + return -EIO; + } + + for_each_context(priv, ctx) { + ret = iwlagn_alloc_bcast_station(priv, ctx); + if (ret) { + iwl_dealloc_bcast_stations(priv); + return ret; + } + } + + ret = iwlagn_run_init_ucode(priv); + if (ret) { + IWL_ERR(priv, "Failed to run INIT ucode: %d\n", ret); + goto error; + } + + ret = iwlagn_load_ucode_wait_alive(priv, + &priv->ucode_rt, + IWL_UCODE_REGULAR); + if (ret) { + IWL_ERR(priv, "Failed to start RT ucode: %d\n", ret); + goto error; + } + + ret = iwl_alive_start(priv); + if (ret) + goto error; + return 0; + + error: + set_bit(STATUS_EXIT_PENDING, &priv->shrd->status); + __iwl_down(priv); + clear_bit(STATUS_EXIT_PENDING, &priv->shrd->status); + + IWL_ERR(priv, "Unable to initialize device.\n"); + return ret; +} + + /***************************************************************************** * * Workqueue callbacks @@ -1401,7 +1502,7 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work) mutex_unlock(&priv->shrd->mutex); } -void iwlagn_prepare_restart(struct iwl_priv *priv) +static void iwlagn_prepare_restart(struct iwl_priv *priv) { struct iwl_rxon_context *ctx; bool bt_full_concurrent; @@ -1458,198 +1559,1589 @@ static void iwl_bg_restart(struct work_struct *data) } } +/***************************************************************************** + * + * mac80211 entry point functions + * + *****************************************************************************/ + +static const struct ieee80211_iface_limit iwlagn_sta_ap_limits[] = { + { + .max = 1, + .types = BIT(NL80211_IFTYPE_STATION), + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_AP), + }, +}; + +static const struct ieee80211_iface_limit iwlagn_2sta_limits[] = { + { + .max = 2, + .types = BIT(NL80211_IFTYPE_STATION), + }, +}; +static const struct ieee80211_iface_limit iwlagn_p2p_sta_go_limits[] = { + { + .max = 1, + .types = BIT(NL80211_IFTYPE_STATION), + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_GO) | + BIT(NL80211_IFTYPE_AP), + }, +}; +static const struct ieee80211_iface_limit iwlagn_p2p_2sta_limits[] = { + { + .max = 2, + .types = BIT(NL80211_IFTYPE_STATION), + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_CLIENT), + }, +}; -void iwlagn_disable_roc(struct iwl_priv *priv) +static const struct ieee80211_iface_combination +iwlagn_iface_combinations_dualmode[] = { + { .num_different_channels = 1, + .max_interfaces = 2, + .beacon_int_infra_match = true, + .limits = iwlagn_sta_ap_limits, + .n_limits = ARRAY_SIZE(iwlagn_sta_ap_limits), + }, + { .num_different_channels = 1, + .max_interfaces = 2, + .limits = iwlagn_2sta_limits, + .n_limits = ARRAY_SIZE(iwlagn_2sta_limits), + }, +}; + +static const struct ieee80211_iface_combination +iwlagn_iface_combinations_p2p[] = { + { .num_different_channels = 1, + .max_interfaces = 2, + .beacon_int_infra_match = true, + .limits = iwlagn_p2p_sta_go_limits, + .n_limits = ARRAY_SIZE(iwlagn_p2p_sta_go_limits), + }, + { .num_different_channels = 1, + .max_interfaces = 2, + .limits = iwlagn_p2p_2sta_limits, + .n_limits = ARRAY_SIZE(iwlagn_p2p_2sta_limits), + }, +}; + +/* + * Not a mac80211 entry point function, but it fits in with all the + * other mac80211 functions grouped here. + */ +static int iwlagn_mac_setup_register(struct iwl_priv *priv, + struct iwlagn_ucode_capabilities *capa) { - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN]; + int ret; + struct ieee80211_hw *hw = priv->hw; + struct iwl_rxon_context *ctx; - lockdep_assert_held(&priv->shrd->mutex); + hw->rate_control_algorithm = "iwl-agn-rs"; - if (!priv->hw_roc_setup) - return; + /* Tell mac80211 our characteristics */ + hw->flags = IEEE80211_HW_SIGNAL_DBM | + IEEE80211_HW_AMPDU_AGGREGATION | + IEEE80211_HW_NEED_DTIM_PERIOD | + IEEE80211_HW_SPECTRUM_MGMT | + IEEE80211_HW_REPORTS_TX_ACK_STATUS; - ctx->staging.dev_type = RXON_DEV_TYPE_P2P; - ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + /* + * Including the following line will crash some AP's. This + * workaround removes the stimulus which causes the crash until + * the AP software can be fixed. + hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF; + */ - priv->hw_roc_channel = NULL; + hw->flags |= IEEE80211_HW_SUPPORTS_PS | + IEEE80211_HW_SUPPORTS_DYNAMIC_PS; - memset(ctx->staging.node_addr, 0, ETH_ALEN); + if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE) + hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | + IEEE80211_HW_SUPPORTS_STATIC_SMPS; - iwlagn_commit_rxon(priv, ctx); + if (capa->flags & IWL_UCODE_TLV_FLAGS_MFP) + hw->flags |= IEEE80211_HW_MFP_CAPABLE; - ctx->is_active = false; - priv->hw_roc_setup = false; + hw->sta_data_size = sizeof(struct iwl_station_priv); + hw->vif_data_size = sizeof(struct iwl_vif_priv); + + for_each_context(priv, ctx) { + hw->wiphy->interface_modes |= ctx->interface_modes; + hw->wiphy->interface_modes |= ctx->exclusive_interface_modes; + } + + BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); + + if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)) { + hw->wiphy->iface_combinations = iwlagn_iface_combinations_p2p; + hw->wiphy->n_iface_combinations = + ARRAY_SIZE(iwlagn_iface_combinations_p2p); + } else if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) { + hw->wiphy->iface_combinations = iwlagn_iface_combinations_dualmode; + hw->wiphy->n_iface_combinations = + ARRAY_SIZE(iwlagn_iface_combinations_dualmode); + } + + hw->wiphy->max_remain_on_channel_duration = 1000; + + hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | + WIPHY_FLAG_DISABLE_BEACON_HINTS | + WIPHY_FLAG_IBSS_RSN; + + if (priv->ucode_wowlan.code.len && device_can_wakeup(bus(priv)->dev)) { + hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | + WIPHY_WOWLAN_DISCONNECT | + WIPHY_WOWLAN_EAP_IDENTITY_REQ | + WIPHY_WOWLAN_RFKILL_RELEASE; + if (!iwlagn_mod_params.sw_crypto) + hw->wiphy->wowlan.flags |= + WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | + WIPHY_WOWLAN_GTK_REKEY_FAILURE; + + hw->wiphy->wowlan.n_patterns = IWLAGN_WOWLAN_MAX_PATTERNS; + hw->wiphy->wowlan.pattern_min_len = + IWLAGN_WOWLAN_MIN_PATTERN_LEN; + hw->wiphy->wowlan.pattern_max_len = + IWLAGN_WOWLAN_MAX_PATTERN_LEN; + } + + if (iwlagn_mod_params.power_save) + hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; + else + hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; + + hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; + /* we create the 802.11 header and a zero-length SSID element */ + hw->wiphy->max_scan_ie_len = capa->max_probe_length - 24 - 2; + + /* Default value; 4 EDCA QOS priorities */ + hw->queues = 4; + + hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; + + if (priv->bands[IEEE80211_BAND_2GHZ].n_channels) + priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = + &priv->bands[IEEE80211_BAND_2GHZ]; + if (priv->bands[IEEE80211_BAND_5GHZ].n_channels) + priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = + &priv->bands[IEEE80211_BAND_5GHZ]; + + iwl_leds_init(priv); + + ret = ieee80211_register_hw(priv->hw); + if (ret) { + IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); + return ret; + } + priv->mac80211_registered = 1; + + return 0; } -static void iwlagn_disable_roc_work(struct work_struct *work) + +static int iwlagn_mac_start(struct ieee80211_hw *hw) { - struct iwl_priv *priv = container_of(work, struct iwl_priv, - hw_roc_disable_work.work); + struct iwl_priv *priv = hw->priv; + int ret; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + /* we should be verifying the device is ready to be opened */ mutex_lock(&priv->shrd->mutex); - iwlagn_disable_roc(priv); + ret = __iwl_up(priv); mutex_unlock(&priv->shrd->mutex); -} + if (ret) + return ret; -/***************************************************************************** - * - * driver setup and teardown - * - *****************************************************************************/ + IWL_DEBUG_INFO(priv, "Start UP work done.\n"); -static void iwl_setup_deferred_work(struct iwl_priv *priv) + /* Now we should be done, and the READY bit should be set. */ + if (WARN_ON(!test_bit(STATUS_READY, &priv->shrd->status))) + ret = -EIO; + + iwlagn_led_enable(priv); + + priv->is_open = 1; + IWL_DEBUG_MAC80211(priv, "leave\n"); + return 0; +} + +static void iwlagn_mac_stop(struct ieee80211_hw *hw) { - priv->shrd->workqueue = create_singlethread_workqueue(DRV_NAME); + struct iwl_priv *priv = hw->priv; - init_waitqueue_head(&priv->shrd->wait_command_queue); + IWL_DEBUG_MAC80211(priv, "enter\n"); - INIT_WORK(&priv->restart, iwl_bg_restart); - INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update); - INIT_WORK(&priv->run_time_calib_work, iwl_bg_run_time_calib_work); - INIT_WORK(&priv->tx_flush, iwl_bg_tx_flush); - INIT_WORK(&priv->bt_full_concurrency, iwl_bg_bt_full_concurrency); - INIT_WORK(&priv->bt_runtime_config, iwl_bg_bt_runtime_config); - INIT_DELAYED_WORK(&priv->hw_roc_disable_work, - iwlagn_disable_roc_work); + if (!priv->is_open) + return; - iwl_setup_scan_deferred_work(priv); + priv->is_open = 0; - if (cfg(priv)->lib->bt_setup_deferred_work) - cfg(priv)->lib->bt_setup_deferred_work(priv); + iwl_down(priv); - init_timer(&priv->statistics_periodic); - priv->statistics_periodic.data = (unsigned long)priv; - priv->statistics_periodic.function = iwl_bg_statistics_periodic; + flush_workqueue(priv->shrd->workqueue); - init_timer(&priv->ucode_trace); - priv->ucode_trace.data = (unsigned long)priv; - priv->ucode_trace.function = iwl_bg_ucode_trace; + /* User space software may expect getting rfkill changes + * even if interface is down */ + iwl_write32(bus(priv), CSR_INT, 0xFFFFFFFF); + iwl_enable_rfkill_int(priv); - init_timer(&priv->watchdog); - priv->watchdog.data = (unsigned long)priv; - priv->watchdog.function = iwl_bg_watchdog; + IWL_DEBUG_MAC80211(priv, "leave\n"); } -static void iwl_cancel_deferred_work(struct iwl_priv *priv) +#ifdef CONFIG_PM_SLEEP +static int iwlagn_send_patterns(struct iwl_priv *priv, + struct cfg80211_wowlan *wowlan) { - if (cfg(priv)->lib->cancel_deferred_work) - cfg(priv)->lib->cancel_deferred_work(priv); + struct iwlagn_wowlan_patterns_cmd *pattern_cmd; + struct iwl_host_cmd cmd = { + .id = REPLY_WOWLAN_PATTERNS, + .dataflags[0] = IWL_HCMD_DFL_NOCOPY, + .flags = CMD_SYNC, + }; + int i, err; - cancel_work_sync(&priv->run_time_calib_work); - cancel_work_sync(&priv->beacon_update); + if (!wowlan->n_patterns) + return 0; - iwl_cancel_scan_deferred_work(priv); + cmd.len[0] = sizeof(*pattern_cmd) + + wowlan->n_patterns * sizeof(struct iwlagn_wowlan_pattern); - cancel_work_sync(&priv->bt_full_concurrency); - cancel_work_sync(&priv->bt_runtime_config); - cancel_delayed_work_sync(&priv->hw_roc_disable_work); + pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL); + if (!pattern_cmd) + return -ENOMEM; - del_timer_sync(&priv->statistics_periodic); - del_timer_sync(&priv->ucode_trace); -} + pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns); -static void iwl_init_hw_rates(struct iwl_priv *priv, - struct ieee80211_rate *rates) -{ - int i; + for (i = 0; i < wowlan->n_patterns; i++) { + int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8); - for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) { - rates[i].bitrate = iwl_rates[i].ieee * 5; - rates[i].hw_value = i; /* Rate scaling will work on indexes */ - rates[i].hw_value_short = i; - rates[i].flags = 0; - if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) { - /* - * If CCK != 1M then set short preamble rate flag. - */ - rates[i].flags |= - (iwl_rates[i].plcp == IWL_RATE_1M_PLCP) ? - 0 : IEEE80211_RATE_SHORT_PREAMBLE; - } + memcpy(&pattern_cmd->patterns[i].mask, + wowlan->patterns[i].mask, mask_len); + memcpy(&pattern_cmd->patterns[i].pattern, + wowlan->patterns[i].pattern, + wowlan->patterns[i].pattern_len); + pattern_cmd->patterns[i].mask_size = mask_len; + pattern_cmd->patterns[i].pattern_size = + wowlan->patterns[i].pattern_len; } + + cmd.data[0] = pattern_cmd; + err = iwl_trans_send_cmd(trans(priv), &cmd); + kfree(pattern_cmd); + return err; } +#endif -static int iwl_init_drv(struct iwl_priv *priv) +static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_gtk_rekey_data *data) { - int ret; + struct iwl_priv *priv = hw->priv; - spin_lock_init(&priv->shrd->sta_lock); + if (iwlagn_mod_params.sw_crypto) + return; - mutex_init(&priv->shrd->mutex); + IWL_DEBUG_MAC80211(priv, "enter\n"); + mutex_lock(&priv->shrd->mutex); - INIT_LIST_HEAD(&trans(priv)->calib_results); + if (priv->contexts[IWL_RXON_CTX_BSS].vif != vif) + goto out; - priv->ieee_channels = NULL; - priv->ieee_rates = NULL; - priv->band = IEEE80211_BAND_2GHZ; + memcpy(priv->kek, data->kek, NL80211_KEK_LEN); + memcpy(priv->kck, data->kck, NL80211_KCK_LEN); + priv->replay_ctr = cpu_to_le64(be64_to_cpup((__be64 *)&data->replay_ctr)); + priv->have_rekey_data = true; - priv->iw_mode = NL80211_IFTYPE_STATION; - priv->current_ht_config.smps = IEEE80211_SMPS_STATIC; - priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF; - priv->agg_tids_count = 0; + out: + mutex_unlock(&priv->shrd->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); +} - /* initialize force reset */ - priv->force_reset[IWL_RF_RESET].reset_duration = - IWL_DELAY_NEXT_FORCE_RF_RESET; - priv->force_reset[IWL_FW_RESET].reset_duration = - IWL_DELAY_NEXT_FORCE_FW_RELOAD; +struct wowlan_key_data { + struct iwl_rxon_context *ctx; + struct iwlagn_wowlan_rsc_tsc_params_cmd *rsc_tsc; + struct iwlagn_wowlan_tkip_params_cmd *tkip; + const u8 *bssid; + bool error, use_rsc_tsc, use_tkip; +}; - priv->rx_statistics_jiffies = jiffies; +#ifdef CONFIG_PM_SLEEP +static void iwlagn_convert_p1k(u16 *p1k, __le16 *out) +{ + int i; - /* Choose which receivers/antennas to use */ - iwlagn_set_rxon_chain(priv, &priv->contexts[IWL_RXON_CTX_BSS]); + for (i = 0; i < IWLAGN_P1K_SIZE; i++) + out[i] = cpu_to_le16(p1k[i]); +} - iwl_init_scan_params(priv); +static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *_data) +{ + struct iwl_priv *priv = hw->priv; + struct wowlan_key_data *data = _data; + struct iwl_rxon_context *ctx = data->ctx; + struct aes_sc *aes_sc, *aes_tx_sc = NULL; + struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL; + struct iwlagn_p1k_cache *rx_p1ks; + u8 *rx_mic_key; + struct ieee80211_key_seq seq; + u32 cur_rx_iv32 = 0; + u16 p1k[IWLAGN_P1K_SIZE]; + int ret, i; - /* init bt coex */ - if (cfg(priv)->bt_params && - cfg(priv)->bt_params->advanced_bt_coexist) { - priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT; - priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT; - priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK; - priv->bt_on_thresh = BT_ON_THRESHOLD_DEF; - priv->bt_duration = BT_DURATION_LIMIT_DEF; - priv->dynamic_frag_thresh = BT_FRAG_THRESHOLD_DEF; - } + mutex_lock(&priv->shrd->mutex); - ret = iwl_init_channel_map(priv); - if (ret) { - IWL_ERR(priv, "initializing regulatory failed: %d\n", ret); - goto err; - } + if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 || + key->cipher == WLAN_CIPHER_SUITE_WEP104) && + !sta && !ctx->key_mapping_keys) + ret = iwl_set_default_wep_key(priv, ctx, key); + else + ret = iwl_set_dynamic_key(priv, ctx, key, sta); - ret = iwl_init_geos(priv); if (ret) { - IWL_ERR(priv, "initializing geos failed: %d\n", ret); - goto err_free_channel_map; + IWL_ERR(priv, "Error setting key during suspend!\n"); + data->error = true; } - iwl_init_hw_rates(priv, priv->ieee_rates); - return 0; + switch (key->cipher) { + case WLAN_CIPHER_SUITE_TKIP: + if (sta) { + tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc; + tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc; -err_free_channel_map: - iwl_free_channel_map(priv); -err: - return ret; -} + rx_p1ks = data->tkip->rx_uni; -static void iwl_uninit_drv(struct iwl_priv *priv) -{ - iwl_free_geos(priv); - iwl_free_channel_map(priv); - if (priv->tx_cmd_pool) + ieee80211_get_key_tx_seq(key, &seq); + tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16); + tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32); + + ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k); + iwlagn_convert_p1k(p1k, data->tkip->tx.p1k); + + memcpy(data->tkip->mic_keys.tx, + &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], + IWLAGN_MIC_KEY_SIZE); + + rx_mic_key = data->tkip->mic_keys.rx_unicast; + } else { + tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc; + rx_p1ks = data->tkip->rx_multi; + rx_mic_key = data->tkip->mic_keys.rx_mcast; + } + + /* + * For non-QoS this relies on the fact that both the uCode and + * mac80211 use TID 0 (as they need to to avoid replay attacks) + * for checking the IV in the frames. + */ + for (i = 0; i < IWLAGN_NUM_RSC; i++) { + ieee80211_get_key_rx_seq(key, i, &seq); + tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16); + tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32); + /* wrapping isn't allowed, AP must rekey */ + if (seq.tkip.iv32 > cur_rx_iv32) + cur_rx_iv32 = seq.tkip.iv32; + } + + ieee80211_get_tkip_rx_p1k(key, data->bssid, cur_rx_iv32, p1k); + iwlagn_convert_p1k(p1k, rx_p1ks[0].p1k); + ieee80211_get_tkip_rx_p1k(key, data->bssid, + cur_rx_iv32 + 1, p1k); + iwlagn_convert_p1k(p1k, rx_p1ks[1].p1k); + + memcpy(rx_mic_key, + &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], + IWLAGN_MIC_KEY_SIZE); + + data->use_tkip = true; + data->use_rsc_tsc = true; + break; + case WLAN_CIPHER_SUITE_CCMP: + if (sta) { + u8 *pn = seq.ccmp.pn; + + aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc; + aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc; + + ieee80211_get_key_tx_seq(key, &seq); + aes_tx_sc->pn = cpu_to_le64( + (u64)pn[5] | + ((u64)pn[4] << 8) | + ((u64)pn[3] << 16) | + ((u64)pn[2] << 24) | + ((u64)pn[1] << 32) | + ((u64)pn[0] << 40)); + } else + aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc; + + /* + * For non-QoS this relies on the fact that both the uCode and + * mac80211 use TID 0 for checking the IV in the frames. + */ + for (i = 0; i < IWLAGN_NUM_RSC; i++) { + u8 *pn = seq.ccmp.pn; + + ieee80211_get_key_rx_seq(key, i, &seq); + aes_sc->pn = cpu_to_le64( + (u64)pn[5] | + ((u64)pn[4] << 8) | + ((u64)pn[3] << 16) | + ((u64)pn[2] << 24) | + ((u64)pn[1] << 32) | + ((u64)pn[0] << 40)); + } + data->use_rsc_tsc = true; + break; + } + + mutex_unlock(&priv->shrd->mutex); +} + +static int iwlagn_mac_suspend(struct ieee80211_hw *hw, + struct cfg80211_wowlan *wowlan) +{ + struct iwl_priv *priv = hw->priv; + struct iwlagn_wowlan_wakeup_filter_cmd wakeup_filter_cmd; + struct iwl_rxon_cmd rxon; + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + struct iwlagn_wowlan_kek_kck_material_cmd kek_kck_cmd; + struct iwlagn_wowlan_tkip_params_cmd tkip_cmd = {}; + struct wowlan_key_data key_data = { + .ctx = ctx, + .bssid = ctx->active.bssid_addr, + .use_rsc_tsc = false, + .tkip = &tkip_cmd, + .use_tkip = false, + }; + int ret, i; + u16 seq; + + if (WARN_ON(!wowlan)) + return -EINVAL; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + mutex_lock(&priv->shrd->mutex); + + /* Don't attempt WoWLAN when not associated, tear down instead. */ + if (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION || + !iwl_is_associated_ctx(ctx)) { + ret = 1; + goto out; + } + + key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL); + if (!key_data.rsc_tsc) { + ret = -ENOMEM; + goto out; + } + + memset(&wakeup_filter_cmd, 0, sizeof(wakeup_filter_cmd)); + + /* + * We know the last used seqno, and the uCode expects to know that + * one, it will increment before TX. + */ + seq = le16_to_cpu(priv->last_seq_ctl) & IEEE80211_SCTL_SEQ; + wakeup_filter_cmd.non_qos_seq = cpu_to_le16(seq); + + /* + * For QoS counters, we store the one to use next, so subtract 0x10 + * since the uCode will add 0x10 before using the value. + */ + for (i = 0; i < 8; i++) { + seq = priv->shrd->tid_data[IWL_AP_ID][i].seq_number; + seq -= 0x10; + wakeup_filter_cmd.qos_seq[i] = cpu_to_le16(seq); + } + + if (wowlan->disconnect) + wakeup_filter_cmd.enabled |= + cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_BEACON_MISS | + IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE); + if (wowlan->magic_pkt) + wakeup_filter_cmd.enabled |= + cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET); + if (wowlan->gtk_rekey_failure) + wakeup_filter_cmd.enabled |= + cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL); + if (wowlan->eap_identity_req) + wakeup_filter_cmd.enabled |= + cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ); + if (wowlan->four_way_handshake) + wakeup_filter_cmd.enabled |= + cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE); + if (wowlan->rfkill_release) + wakeup_filter_cmd.enabled |= + cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_RFKILL); + if (wowlan->n_patterns) + wakeup_filter_cmd.enabled |= + cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH); + + iwl_scan_cancel_timeout(priv, 200); + + memcpy(&rxon, &ctx->active, sizeof(rxon)); + + iwl_trans_stop_device(trans(priv)); + + priv->shrd->wowlan = true; + + ret = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_wowlan, + IWL_UCODE_WOWLAN); + if (ret) + goto error; + + /* now configure WoWLAN ucode */ + ret = iwl_alive_start(priv); + if (ret) + goto error; + + memcpy(&ctx->staging, &rxon, sizeof(rxon)); + ret = iwlagn_commit_rxon(priv, ctx); + if (ret) + goto error; + + ret = iwl_power_update_mode(priv, true); + if (ret) + goto error; + + if (!iwlagn_mod_params.sw_crypto) { + /* mark all keys clear */ + priv->ucode_key_table = 0; + ctx->key_mapping_keys = 0; + + /* + * This needs to be unlocked due to lock ordering + * constraints. Since we're in the suspend path + * that isn't really a problem though. + */ + mutex_unlock(&priv->shrd->mutex); + ieee80211_iter_keys(priv->hw, ctx->vif, + iwlagn_wowlan_program_keys, + &key_data); + mutex_lock(&priv->shrd->mutex); + if (key_data.error) { + ret = -EIO; + goto error; + } + + if (key_data.use_rsc_tsc) { + struct iwl_host_cmd rsc_tsc_cmd = { + .id = REPLY_WOWLAN_TSC_RSC_PARAMS, + .flags = CMD_SYNC, + .data[0] = key_data.rsc_tsc, + .dataflags[0] = IWL_HCMD_DFL_NOCOPY, + .len[0] = sizeof(*key_data.rsc_tsc), + }; + + ret = iwl_trans_send_cmd(trans(priv), &rsc_tsc_cmd); + if (ret) + goto error; + } + + if (key_data.use_tkip) { + ret = iwl_trans_send_cmd_pdu(trans(priv), + REPLY_WOWLAN_TKIP_PARAMS, + CMD_SYNC, sizeof(tkip_cmd), + &tkip_cmd); + if (ret) + goto error; + } + + if (priv->have_rekey_data) { + memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd)); + memcpy(kek_kck_cmd.kck, priv->kck, NL80211_KCK_LEN); + kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN); + memcpy(kek_kck_cmd.kek, priv->kek, NL80211_KEK_LEN); + kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN); + kek_kck_cmd.replay_ctr = priv->replay_ctr; + + ret = iwl_trans_send_cmd_pdu(trans(priv), + REPLY_WOWLAN_KEK_KCK_MATERIAL, + CMD_SYNC, sizeof(kek_kck_cmd), + &kek_kck_cmd); + if (ret) + goto error; + } + } + + ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_WOWLAN_WAKEUP_FILTER, + CMD_SYNC, sizeof(wakeup_filter_cmd), + &wakeup_filter_cmd); + if (ret) + goto error; + + ret = iwlagn_send_patterns(priv, wowlan); + if (ret) + goto error; + + device_set_wakeup_enable(bus(priv)->dev, true); + + /* Now let the ucode operate on its own */ + iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_SET, + CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); + + goto out; + + error: + priv->shrd->wowlan = false; + iwlagn_prepare_restart(priv); + ieee80211_restart_hw(priv->hw); + out: + mutex_unlock(&priv->shrd->mutex); + kfree(key_data.rsc_tsc); + IWL_DEBUG_MAC80211(priv, "leave\n"); + + return ret; +} + +static int iwlagn_mac_resume(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + struct ieee80211_vif *vif; + unsigned long flags; + u32 base, status = 0xffffffff; + int ret = -EIO; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + mutex_lock(&priv->shrd->mutex); + + iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); + + base = priv->device_pointers.error_event_table; + if (iwlagn_hw_valid_rtc_data_addr(base)) { + spin_lock_irqsave(&bus(priv)->reg_lock, flags); + ret = iwl_grab_nic_access_silent(bus(priv)); + if (ret == 0) { + iwl_write32(bus(priv), HBUS_TARG_MEM_RADDR, base); + status = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT); + iwl_release_nic_access(bus(priv)); + } + spin_unlock_irqrestore(&bus(priv)->reg_lock, flags); + +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (ret == 0) { + if (!priv->wowlan_sram) + priv->wowlan_sram = + kzalloc(priv->ucode_wowlan.data.len, + GFP_KERNEL); + + if (priv->wowlan_sram) + _iwl_read_targ_mem_words( + bus(priv), 0x800000, priv->wowlan_sram, + priv->ucode_wowlan.data.len / 4); + } +#endif + } + + /* we'll clear ctx->vif during iwlagn_prepare_restart() */ + vif = ctx->vif; + + priv->shrd->wowlan = false; + + device_set_wakeup_enable(bus(priv)->dev, false); + + iwlagn_prepare_restart(priv); + + memset((void *)&ctx->active, 0, sizeof(ctx->active)); + iwl_connection_init_rx_config(priv, ctx); + iwlagn_set_rxon_chain(priv, ctx); + + mutex_unlock(&priv->shrd->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); + + ieee80211_resume_disconnect(vif); + + return 1; +} +#endif + +static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MACDUMP(priv, "enter\n"); + + IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, + ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); + + if (iwlagn_tx_skb(priv, skb)) + dev_kfree_skb_any(skb); + + IWL_DEBUG_MACDUMP(priv, "leave\n"); +} + +static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_key_conf *keyconf, + struct ieee80211_sta *sta, + u32 iv32, u16 *phase1key) +{ + struct iwl_priv *priv = hw->priv; + + iwl_update_tkip_key(priv, vif, keyconf, sta, iv32, phase1key); +} + +static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + struct iwl_rxon_context *ctx = vif_priv->ctx; + int ret; + bool is_default_wep_key = false; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (iwlagn_mod_params.sw_crypto) { + IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n"); + return -EOPNOTSUPP; + } + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_TKIP: + key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; + /* fall through */ + case WLAN_CIPHER_SUITE_CCMP: + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + break; + default: + break; + } + + /* + * We could program these keys into the hardware as well, but we + * don't expect much multicast traffic in IBSS and having keys + * for more stations is probably more useful. + * + * Mark key TX-only and return 0. + */ + if (vif->type == NL80211_IFTYPE_ADHOC && + !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { + key->hw_key_idx = WEP_INVALID_OFFSET; + return 0; + } + + /* If they key was TX-only, accept deletion */ + if (cmd == DISABLE_KEY && key->hw_key_idx == WEP_INVALID_OFFSET) + return 0; + + mutex_lock(&priv->shrd->mutex); + iwl_scan_cancel_timeout(priv, 100); + + BUILD_BUG_ON(WEP_INVALID_OFFSET == IWLAGN_HW_KEY_DEFAULT); + + /* + * If we are getting WEP group key and we didn't receive any key mapping + * so far, we are in legacy wep mode (group key only), otherwise we are + * in 1X mode. + * In legacy wep mode, we use another host command to the uCode. + */ + if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 || + key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) { + if (cmd == SET_KEY) + is_default_wep_key = !ctx->key_mapping_keys; + else + is_default_wep_key = + key->hw_key_idx == IWLAGN_HW_KEY_DEFAULT; + } + + + switch (cmd) { + case SET_KEY: + if (is_default_wep_key) { + ret = iwl_set_default_wep_key(priv, vif_priv->ctx, key); + break; + } + ret = iwl_set_dynamic_key(priv, vif_priv->ctx, key, sta); + if (ret) { + /* + * can't add key for RX, but we don't need it + * in the device for TX so still return 0 + */ + ret = 0; + key->hw_key_idx = WEP_INVALID_OFFSET; + } + + IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n"); + break; + case DISABLE_KEY: + if (is_default_wep_key) + ret = iwl_remove_default_wep_key(priv, ctx, key); + else + ret = iwl_remove_dynamic_key(priv, ctx, key, sta); + + IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n"); + break; + default: + ret = -EINVAL; + } + + mutex_unlock(&priv->shrd->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); + + return ret; +} + +static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum ieee80211_ampdu_mlme_action action, + struct ieee80211_sta *sta, u16 tid, u16 *ssn, + u8 buf_size) +{ + struct iwl_priv *priv = hw->priv; + int ret = -EINVAL; + struct iwl_station_priv *sta_priv = (void *) sta->drv_priv; + struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); + + IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n", + sta->addr, tid); + + if (!(priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)) + return -EACCES; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + mutex_lock(&priv->shrd->mutex); + + switch (action) { + case IEEE80211_AMPDU_RX_START: + IWL_DEBUG_HT(priv, "start Rx\n"); + ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn); + break; + case IEEE80211_AMPDU_RX_STOP: + IWL_DEBUG_HT(priv, "stop Rx\n"); + ret = iwl_sta_rx_agg_stop(priv, sta, tid); + if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) + ret = 0; + break; + case IEEE80211_AMPDU_TX_START: + IWL_DEBUG_HT(priv, "start Tx\n"); + ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn); + break; + case IEEE80211_AMPDU_TX_STOP: + IWL_DEBUG_HT(priv, "stop Tx\n"); + ret = iwlagn_tx_agg_stop(priv, vif, sta, tid); + if ((ret == 0) && (priv->agg_tids_count > 0)) { + priv->agg_tids_count--; + IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n", + priv->agg_tids_count); + } + if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) + ret = 0; + if (!priv->agg_tids_count && priv->cfg->ht_params && + priv->cfg->ht_params->use_rts_for_aggregation) { + /* + * switch off RTS/CTS if it was previously enabled + */ + sta_priv->lq_sta.lq.general_params.flags &= + ~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK; + iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif), + &sta_priv->lq_sta.lq, CMD_ASYNC, false); + } + break; + case IEEE80211_AMPDU_TX_OPERATIONAL: + buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF); + + iwl_trans_tx_agg_setup(trans(priv), ctx->ctxid, iwl_sta_id(sta), + tid, buf_size); + + /* + * If the limit is 0, then it wasn't initialised yet, + * use the default. We can do that since we take the + * minimum below, and we don't want to go above our + * default due to hardware restrictions. + */ + if (sta_priv->max_agg_bufsize == 0) + sta_priv->max_agg_bufsize = + LINK_QUAL_AGG_FRAME_LIMIT_DEF; + + /* + * Even though in theory the peer could have different + * aggregation reorder buffer sizes for different sessions, + * our ucode doesn't allow for that and has a global limit + * for each station. Therefore, use the minimum of all the + * aggregation sessions and our default value. + */ + sta_priv->max_agg_bufsize = + min(sta_priv->max_agg_bufsize, buf_size); + + if (priv->cfg->ht_params && + priv->cfg->ht_params->use_rts_for_aggregation) { + /* + * switch to RTS/CTS if it is the prefer protection + * method for HT traffic + */ + + sta_priv->lq_sta.lq.general_params.flags |= + LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK; + } + priv->agg_tids_count++; + IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n", + priv->agg_tids_count); + + sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit = + sta_priv->max_agg_bufsize; + + iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif), + &sta_priv->lq_sta.lq, CMD_ASYNC, false); + + IWL_INFO(priv, "Tx aggregation enabled on ra = %pM tid = %d\n", + sta->addr, tid); + ret = 0; + break; + } + mutex_unlock(&priv->shrd->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); + return ret; +} + +static int iwlagn_mac_sta_add(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + bool is_ap = vif->type == NL80211_IFTYPE_STATION; + int ret = 0; + u8 sta_id; + + IWL_DEBUG_MAC80211(priv, "received request to add station %pM\n", + sta->addr); + mutex_lock(&priv->shrd->mutex); + IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n", + sta->addr); + sta_priv->sta_id = IWL_INVALID_STATION; + + atomic_set(&sta_priv->pending_frames, 0); + if (vif->type == NL80211_IFTYPE_AP) + sta_priv->client = true; + + ret = iwl_add_station_common(priv, vif_priv->ctx, sta->addr, + is_ap, sta, &sta_id); + if (ret) { + IWL_ERR(priv, "Unable to add station %pM (%d)\n", + sta->addr, ret); + /* Should we return success if return code is EEXIST ? */ + goto out; + } + + sta_priv->sta_id = sta_id; + + /* Initialize rate scaling */ + IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n", + sta->addr); + iwl_rs_rate_init(priv, sta, sta_id); + out: + mutex_unlock(&priv->shrd->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); + + return ret; +} + +static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_channel_switch *ch_switch) +{ + struct iwl_priv *priv = hw->priv; + const struct iwl_channel_info *ch_info; + struct ieee80211_conf *conf = &hw->conf; + struct ieee80211_channel *channel = ch_switch->channel; + struct iwl_ht_config *ht_conf = &priv->current_ht_config; + /* + * MULTI-FIXME + * When we add support for multiple interfaces, we need to + * revisit this. The channel switch command in the device + * only affects the BSS context, but what does that really + * mean? And what if we get a CSA on the second interface? + * This needs a lot of work. + */ + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + u16 ch; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + mutex_lock(&priv->shrd->mutex); + + if (iwl_is_rfkill(priv->shrd)) + goto out; + + if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status) || + test_bit(STATUS_SCANNING, &priv->shrd->status) || + test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status)) + goto out; + + if (!iwl_is_associated_ctx(ctx)) + goto out; + + if (!priv->cfg->lib->set_channel_switch) + goto out; + + ch = channel->hw_value; + if (le16_to_cpu(ctx->active.channel) == ch) + goto out; + + ch_info = iwl_get_channel_info(priv, channel->band, ch); + if (!is_channel_valid(ch_info)) { + IWL_DEBUG_MAC80211(priv, "invalid channel\n"); + goto out; + } + + spin_lock_irq(&priv->shrd->lock); + + priv->current_ht_config.smps = conf->smps_mode; + + /* Configure HT40 channels */ + ctx->ht.enabled = conf_is_ht(conf); + if (ctx->ht.enabled) + iwlagn_config_ht40(conf, ctx); + else + ctx->ht.is_40mhz = false; + + if ((le16_to_cpu(ctx->staging.channel) != ch)) + ctx->staging.flags = 0; + + iwl_set_rxon_channel(priv, channel, ctx); + iwl_set_rxon_ht(priv, ht_conf); + iwl_set_flags_for_band(priv, ctx, channel->band, ctx->vif); + + spin_unlock_irq(&priv->shrd->lock); + + iwl_set_rate(priv); + /* + * at this point, staging_rxon has the + * configuration for channel switch + */ + set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status); + priv->switch_channel = cpu_to_le16(ch); + if (priv->cfg->lib->set_channel_switch(priv, ch_switch)) { + clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status); + priv->switch_channel = 0; + ieee80211_chswitch_done(ctx->vif, false); + } + +out: + mutex_unlock(&priv->shrd->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); +} + +static void iwlagn_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast) +{ + struct iwl_priv *priv = hw->priv; + __le32 filter_or = 0, filter_nand = 0; + struct iwl_rxon_context *ctx; + +#define CHK(test, flag) do { \ + if (*total_flags & (test)) \ + filter_or |= (flag); \ + else \ + filter_nand |= (flag); \ + } while (0) + + IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n", + changed_flags, *total_flags); + + CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK); + /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */ + CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK); + CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); + +#undef CHK + + mutex_lock(&priv->shrd->mutex); + + for_each_context(priv, ctx) { + ctx->staging.filter_flags &= ~filter_nand; + ctx->staging.filter_flags |= filter_or; + + /* + * Not committing directly because hardware can perform a scan, + * but we'll eventually commit the filter flags change anyway. + */ + } + + mutex_unlock(&priv->shrd->mutex); + + /* + * Receiving all multicast frames is always enabled by the + * default flags setup in iwl_connection_init_rx_config() + * since we currently do not support programming multicast + * filters into the device. + */ + *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS | + FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; +} + +static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop) +{ + struct iwl_priv *priv = hw->priv; + + mutex_lock(&priv->shrd->mutex); + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) { + IWL_DEBUG_TX(priv, "Aborting flush due to device shutdown\n"); + goto done; + } + if (iwl_is_rfkill(priv->shrd)) { + IWL_DEBUG_TX(priv, "Aborting flush due to RF Kill\n"); + goto done; + } + + /* + * mac80211 will not push any more frames for transmit + * until the flush is completed + */ + if (drop) { + IWL_DEBUG_MAC80211(priv, "send flush command\n"); + if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) { + IWL_ERR(priv, "flush request fail\n"); + goto done; + } + } + IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n"); + iwl_trans_wait_tx_queue_empty(trans(priv)); +done: + mutex_unlock(&priv->shrd->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); +} + +void iwlagn_disable_roc(struct iwl_priv *priv) +{ + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN]; + + lockdep_assert_held(&priv->shrd->mutex); + + if (!priv->hw_roc_setup) + return; + + ctx->staging.dev_type = RXON_DEV_TYPE_P2P; + ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + + priv->hw_roc_channel = NULL; + + memset(ctx->staging.node_addr, 0, ETH_ALEN); + + iwlagn_commit_rxon(priv, ctx); + + ctx->is_active = false; + priv->hw_roc_setup = false; +} + +static void iwlagn_disable_roc_work(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, + hw_roc_disable_work.work); + + mutex_lock(&priv->shrd->mutex); + iwlagn_disable_roc(priv); + mutex_unlock(&priv->shrd->mutex); +} + +static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw, + struct ieee80211_channel *channel, + enum nl80211_channel_type channel_type, + int duration) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN]; + int err = 0; + + if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN))) + return -EOPNOTSUPP; + + if (!(ctx->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT))) + return -EOPNOTSUPP; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + mutex_lock(&priv->shrd->mutex); + + if (test_bit(STATUS_SCAN_HW, &priv->shrd->status)) { + err = -EBUSY; + goto out; + } + + priv->hw_roc_channel = channel; + priv->hw_roc_chantype = channel_type; + priv->hw_roc_duration = duration; + priv->hw_roc_start_notified = false; + cancel_delayed_work(&priv->hw_roc_disable_work); + + if (!ctx->is_active) { + ctx->is_active = true; + ctx->staging.dev_type = RXON_DEV_TYPE_P2P; + memcpy(ctx->staging.node_addr, + priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr, + ETH_ALEN); + memcpy(ctx->staging.bssid_addr, + priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr, + ETH_ALEN); + err = iwlagn_commit_rxon(priv, ctx); + if (err) + goto out; + ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK | + RXON_FILTER_PROMISC_MSK | + RXON_FILTER_CTL2HOST_MSK; + + err = iwlagn_commit_rxon(priv, ctx); + if (err) { + iwlagn_disable_roc(priv); + goto out; + } + priv->hw_roc_setup = true; + } + + err = iwl_scan_initiate(priv, ctx->vif, IWL_SCAN_ROC, channel->band); + if (err) + iwlagn_disable_roc(priv); + + out: + mutex_unlock(&priv->shrd->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); + + return err; +} + +static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + + if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN))) + return -EOPNOTSUPP; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + mutex_lock(&priv->shrd->mutex); + iwl_scan_cancel_timeout(priv, priv->hw_roc_duration); + iwlagn_disable_roc(priv); + mutex_unlock(&priv->shrd->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); + + return 0; +} + +static int iwlagn_mac_tx_sync(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const u8 *bssid, + enum ieee80211_tx_sync_type type) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + struct iwl_rxon_context *ctx = vif_priv->ctx; + int ret; + u8 sta_id; + + if (ctx->ctxid != IWL_RXON_CTX_PAN) + return 0; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + mutex_lock(&priv->shrd->mutex); + + if (iwl_is_associated_ctx(ctx)) { + ret = 0; + goto out; + } + + if (ctx->preauth_bssid || test_bit(STATUS_SCAN_HW, &priv->shrd->status)) { + ret = -EBUSY; + goto out; + } + + ret = iwl_add_station_common(priv, ctx, bssid, true, NULL, &sta_id); + if (ret) + goto out; + + if (WARN_ON(sta_id != ctx->ap_sta_id)) { + ret = -EIO; + goto out_remove_sta; + } + + memcpy(ctx->bssid, bssid, ETH_ALEN); + ctx->preauth_bssid = true; + + ret = iwlagn_commit_rxon(priv, ctx); + + if (ret == 0) + goto out; + + out_remove_sta: + iwl_remove_station(priv, sta_id, bssid); + out: + mutex_unlock(&priv->shrd->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); + + return ret; +} + +static void iwlagn_mac_finish_tx_sync(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const u8 *bssid, + enum ieee80211_tx_sync_type type) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + struct iwl_rxon_context *ctx = vif_priv->ctx; + + if (ctx->ctxid != IWL_RXON_CTX_PAN) + return; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + mutex_lock(&priv->shrd->mutex); + + if (iwl_is_associated_ctx(ctx)) + goto out; + + iwl_remove_station(priv, ctx->ap_sta_id, bssid); + ctx->preauth_bssid = false; + /* no need to commit */ + out: + mutex_unlock(&priv->shrd->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); +} + +/***************************************************************************** + * + * driver setup and teardown + * + *****************************************************************************/ + +static void iwl_setup_deferred_work(struct iwl_priv *priv) +{ + priv->shrd->workqueue = create_singlethread_workqueue(DRV_NAME); + + init_waitqueue_head(&priv->shrd->wait_command_queue); + + INIT_WORK(&priv->restart, iwl_bg_restart); + INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update); + INIT_WORK(&priv->run_time_calib_work, iwl_bg_run_time_calib_work); + INIT_WORK(&priv->tx_flush, iwl_bg_tx_flush); + INIT_WORK(&priv->bt_full_concurrency, iwl_bg_bt_full_concurrency); + INIT_WORK(&priv->bt_runtime_config, iwl_bg_bt_runtime_config); + INIT_DELAYED_WORK(&priv->hw_roc_disable_work, + iwlagn_disable_roc_work); + + iwl_setup_scan_deferred_work(priv); + + if (priv->cfg->lib->bt_setup_deferred_work) + priv->cfg->lib->bt_setup_deferred_work(priv); + + init_timer(&priv->statistics_periodic); + priv->statistics_periodic.data = (unsigned long)priv; + priv->statistics_periodic.function = iwl_bg_statistics_periodic; + + init_timer(&priv->ucode_trace); + priv->ucode_trace.data = (unsigned long)priv; + priv->ucode_trace.function = iwl_bg_ucode_trace; + + init_timer(&priv->watchdog); + priv->watchdog.data = (unsigned long)priv; + priv->watchdog.function = iwl_bg_watchdog; +} + +static void iwl_cancel_deferred_work(struct iwl_priv *priv) +{ + if (priv->cfg->lib->cancel_deferred_work) + priv->cfg->lib->cancel_deferred_work(priv); + + cancel_work_sync(&priv->run_time_calib_work); + cancel_work_sync(&priv->beacon_update); + + iwl_cancel_scan_deferred_work(priv); + + cancel_work_sync(&priv->bt_full_concurrency); + cancel_work_sync(&priv->bt_runtime_config); + cancel_delayed_work_sync(&priv->hw_roc_disable_work); + + del_timer_sync(&priv->statistics_periodic); + del_timer_sync(&priv->ucode_trace); +} + +static void iwl_init_hw_rates(struct iwl_priv *priv, + struct ieee80211_rate *rates) +{ + int i; + + for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) { + rates[i].bitrate = iwl_rates[i].ieee * 5; + rates[i].hw_value = i; /* Rate scaling will work on indexes */ + rates[i].hw_value_short = i; + rates[i].flags = 0; + if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) { + /* + * If CCK != 1M then set short preamble rate flag. + */ + rates[i].flags |= + (iwl_rates[i].plcp == IWL_RATE_1M_PLCP) ? + 0 : IEEE80211_RATE_SHORT_PREAMBLE; + } + } +} + +static int iwl_init_drv(struct iwl_priv *priv) +{ + int ret; + + spin_lock_init(&priv->shrd->sta_lock); + + mutex_init(&priv->shrd->mutex); + + priv->ieee_channels = NULL; + priv->ieee_rates = NULL; + priv->band = IEEE80211_BAND_2GHZ; + + priv->iw_mode = NL80211_IFTYPE_STATION; + priv->current_ht_config.smps = IEEE80211_SMPS_STATIC; + priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF; + priv->agg_tids_count = 0; + + /* initialize force reset */ + priv->force_reset[IWL_RF_RESET].reset_duration = + IWL_DELAY_NEXT_FORCE_RF_RESET; + priv->force_reset[IWL_FW_RESET].reset_duration = + IWL_DELAY_NEXT_FORCE_FW_RELOAD; + + priv->rx_statistics_jiffies = jiffies; + + /* Choose which receivers/antennas to use */ + iwlagn_set_rxon_chain(priv, &priv->contexts[IWL_RXON_CTX_BSS]); + + iwl_init_scan_params(priv); + + /* init bt coex */ + if (priv->cfg->bt_params && + priv->cfg->bt_params->advanced_bt_coexist) { + priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT; + priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT; + priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK; + priv->bt_on_thresh = BT_ON_THRESHOLD_DEF; + priv->bt_duration = BT_DURATION_LIMIT_DEF; + priv->dynamic_frag_thresh = BT_FRAG_THRESHOLD_DEF; + } + + ret = iwl_init_channel_map(priv); + if (ret) { + IWL_ERR(priv, "initializing regulatory failed: %d\n", ret); + goto err; + } + + ret = iwl_init_geos(priv); + if (ret) { + IWL_ERR(priv, "initializing geos failed: %d\n", ret); + goto err_free_channel_map; + } + iwl_init_hw_rates(priv, priv->ieee_rates); + + return 0; + +err_free_channel_map: + iwl_free_channel_map(priv); +err: + return ret; +} + +static void iwl_uninit_drv(struct iwl_priv *priv) +{ + iwl_calib_free_results(priv); + iwl_free_geos(priv); + iwl_free_channel_map(priv); + if (priv->tx_cmd_pool) kmem_cache_destroy(priv->tx_cmd_pool); kfree(priv->scan_cmd); kfree(priv->beacon_cmd); - kfree(rcu_dereference_raw(priv->noa_data)); #ifdef CONFIG_IWLWIFI_DEBUGFS kfree(priv->wowlan_sram); #endif } +static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw, + enum ieee80211_rssi_event rssi_event) +{ + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + mutex_lock(&priv->shrd->mutex); + + if (priv->cfg->bt_params && + priv->cfg->bt_params->advanced_bt_coexist) { + if (rssi_event == RSSI_EVENT_LOW) + priv->bt_enable_pspoll = true; + else if (rssi_event == RSSI_EVENT_HIGH) + priv->bt_enable_pspoll = false; + + iwlagn_send_advance_bt_config(priv); + } else { + IWL_DEBUG_MAC80211(priv, "Advanced BT coex disabled," + "ignoring RSSI callback\n"); + } + + mutex_unlock(&priv->shrd->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); +} + +static int iwlagn_mac_set_tim(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, bool set) +{ + struct iwl_priv *priv = hw->priv; + + queue_work(priv->shrd->workqueue, &priv->beacon_update); + return 0; +} + +struct ieee80211_ops iwlagn_hw_ops = { + .tx = iwlagn_mac_tx, + .start = iwlagn_mac_start, + .stop = iwlagn_mac_stop, +#ifdef CONFIG_PM_SLEEP + .suspend = iwlagn_mac_suspend, + .resume = iwlagn_mac_resume, +#endif + .add_interface = iwlagn_mac_add_interface, + .remove_interface = iwlagn_mac_remove_interface, + .change_interface = iwlagn_mac_change_interface, + .config = iwlagn_mac_config, + .configure_filter = iwlagn_configure_filter, + .set_key = iwlagn_mac_set_key, + .update_tkip_key = iwlagn_mac_update_tkip_key, + .set_rekey_data = iwlagn_mac_set_rekey_data, + .conf_tx = iwlagn_mac_conf_tx, + .bss_info_changed = iwlagn_bss_info_changed, + .ampdu_action = iwlagn_mac_ampdu_action, + .hw_scan = iwlagn_mac_hw_scan, + .sta_notify = iwlagn_mac_sta_notify, + .sta_add = iwlagn_mac_sta_add, + .sta_remove = iwlagn_mac_sta_remove, + .channel_switch = iwlagn_mac_channel_switch, + .flush = iwlagn_mac_flush, + .tx_last_beacon = iwlagn_mac_tx_last_beacon, + .remain_on_channel = iwlagn_mac_remain_on_channel, + .cancel_remain_on_channel = iwlagn_mac_cancel_remain_on_channel, + .rssi_callback = iwlagn_mac_rssi_callback, + CFG80211_TESTMODE_CMD(iwlagn_mac_testmode_cmd) + CFG80211_TESTMODE_DUMP(iwlagn_mac_testmode_dump) + .tx_sync = iwlagn_mac_tx_sync, + .finish_tx_sync = iwlagn_mac_finish_tx_sync, + .set_tim = iwlagn_mac_set_tim, +}; static u32 iwl_hw_detect(struct iwl_priv *priv) { @@ -1669,55 +3161,40 @@ static int iwl_set_hw_params(struct iwl_priv *priv) hw_params(priv).rx_page_order = get_order(IWL_RX_BUF_SIZE_4K); - if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_ALL) - cfg(priv)->sku &= ~EEPROM_SKU_CAP_11N_ENABLE; + if (iwlagn_mod_params.disable_11n) + priv->cfg->sku &= ~EEPROM_SKU_CAP_11N_ENABLE; hw_params(priv).num_ampdu_queues = - cfg(priv)->base_params->num_of_ampdu_queues; + priv->cfg->base_params->num_of_ampdu_queues; hw_params(priv).shadow_reg_enable = - cfg(priv)->base_params->shadow_reg_enable; - hw_params(priv).sku = cfg(priv)->sku; - hw_params(priv).wd_timeout = cfg(priv)->base_params->wd_timeout; + priv->cfg->base_params->shadow_reg_enable; + hw_params(priv).sku = priv->cfg->sku; + hw_params(priv).wd_timeout = priv->cfg->base_params->wd_timeout; /* Device-specific setup */ - return cfg(priv)->lib->set_hw_params(priv); + return priv->cfg->lib->set_hw_params(priv); } +/* This function both allocates and initializes hw and priv. */ +static struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg) +{ + struct iwl_priv *priv; + /* mac80211 allocates memory for this device instance, including + * space for this driver's private structure */ + struct ieee80211_hw *hw; + hw = ieee80211_alloc_hw(sizeof(struct iwl_priv), &iwlagn_hw_ops); + if (hw == NULL) { + pr_err("%s: Can not allocate network device\n", + cfg->name); + goto out; + } -static void iwl_debug_config(struct iwl_priv *priv) -{ - dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEBUG " -#ifdef CONFIG_IWLWIFI_DEBUG - "enabled\n"); -#else - "disabled\n"); -#endif - dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEBUGFS " -#ifdef CONFIG_IWLWIFI_DEBUGFS - "enabled\n"); -#else - "disabled\n"); -#endif - dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEVICE_TRACING " -#ifdef CONFIG_IWLWIFI_DEVICE_TRACING - "enabled\n"); -#else - "disabled\n"); -#endif + priv = hw->priv; + priv->hw = hw; - dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEVICE_TESTMODE " -#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE - "enabled\n"); -#else - "disabled\n"); -#endif - dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_P2P " -#ifdef CONFIG_IWLWIFI_P2P - "enabled\n"); -#else - "disabled\n"); -#endif +out: + return hw; } int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops, @@ -1732,9 +3209,8 @@ int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops, /************************ * 1. Allocating HW data ************************/ - hw = iwl_alloc_all(); + hw = iwl_alloc_all(cfg); if (!hw) { - pr_err("%s: Cannot allocate network device\n", cfg->name); err = -ENOMEM; goto out; } @@ -1755,11 +3231,8 @@ int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops, SET_IEEE80211_DEV(hw, bus(priv)->dev); - /* what debugging capabilities we have */ - iwl_debug_config(priv); - IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n"); - cfg(priv) = cfg; + priv->cfg = cfg; /* is antenna coupling more than 35dB ? */ priv->bt_ant_couple_ok = @@ -1793,7 +3266,7 @@ int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops, ***********************/ hw_rev = iwl_hw_detect(priv); IWL_INFO(priv, "Detected %s, REV=0x%X\n", - cfg(priv)->name, hw_rev); + priv->cfg->name, hw_rev); err = iwl_trans_request_irq(trans(priv)); if (err) @@ -1823,11 +3296,11 @@ int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops, goto out_free_eeprom; /* extract MAC Address */ - iwl_eeprom_get_mac(priv->shrd, priv->addresses[0].addr); + iwl_eeprom_get_mac(priv, priv->addresses[0].addr); IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr); priv->hw->wiphy->addresses = priv->addresses; priv->hw->wiphy->n_addresses = 1; - num_mac = iwl_eeprom_query16(priv->shrd, EEPROM_NUM_MAC_ADDRESS); + num_mac = iwl_eeprom_query16(priv, EEPROM_NUM_MAC_ADDRESS); if (num_mac > 1) { memcpy(priv->addresses[1].addr, priv->addresses[0].addr, ETH_ALEN); @@ -1892,7 +3365,7 @@ int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops, priv->shrd->workqueue = NULL; iwl_uninit_drv(priv); out_free_eeprom: - iwl_eeprom_free(priv->shrd); + iwl_eeprom_free(priv); out_free_trans: iwl_trans_free(trans(priv)); out_free_traffic_mem: @@ -1917,16 +3390,21 @@ void __devexit iwl_remove(struct iwl_priv * priv) set_bit(STATUS_EXIT_PENDING, &priv->shrd->status); iwl_testmode_cleanup(priv); - iwlagn_mac_unregister(priv); + iwl_leds_exit(priv); + + if (priv->mac80211_registered) { + ieee80211_unregister_hw(priv->hw); + priv->mac80211_registered = 0; + } iwl_tt_exit(priv); /*This will stop the queues, move the device to low power state */ iwl_trans_stop_device(trans(priv)); - iwl_dealloc_ucode(trans(priv)); + iwl_dealloc_ucode(priv); - iwl_eeprom_free(priv->shrd); + iwl_eeprom_free(priv); /*netif_stop_queue(dev); */ flush_workqueue(priv->shrd->workqueue); @@ -1996,9 +3474,8 @@ module_param_named(swcrypto, iwlagn_mod_params.sw_crypto, int, S_IRUGO); MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])"); module_param_named(queues_num, iwlagn_mod_params.num_of_queues, int, S_IRUGO); MODULE_PARM_DESC(queues_num, "number of hw queues."); -module_param_named(11n_disable, iwlagn_mod_params.disable_11n, uint, S_IRUGO); -MODULE_PARM_DESC(11n_disable, - "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX"); +module_param_named(11n_disable, iwlagn_mod_params.disable_11n, int, S_IRUGO); +MODULE_PARM_DESC(11n_disable, "disable 11n functionality"); module_param_named(amsdu_size_8K, iwlagn_mod_params.amsdu_size_8K, int, S_IRUGO); MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size"); diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-agn.h b/trunk/drivers/net/wireless/iwlwifi/iwl-agn.h index f84fb3c53563..3856abaea507 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-agn.h +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-agn.h @@ -65,12 +65,6 @@ #include "iwl-dev.h" -struct iwlagn_ucode_capabilities { - u32 max_probe_length; - u32 standard_phy_calibration_size; - u32 flags; -}; - extern struct ieee80211_ops iwlagn_hw_ops; int iwl_reset_ict(struct iwl_trans *trans); @@ -83,16 +77,6 @@ static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd) hdr->data_valid = 1; } -void __iwl_down(struct iwl_priv *priv); -void iwl_down(struct iwl_priv *priv); -void iwlagn_prepare_restart(struct iwl_priv *priv); - -/* MAC80211 */ -struct ieee80211_hw *iwl_alloc_all(void); -int iwlagn_mac_setup_register(struct iwl_priv *priv, - struct iwlagn_ucode_capabilities *capa); -void iwlagn_mac_unregister(struct iwl_priv *priv); - /* RXON */ int iwlagn_set_pan_params(struct iwl_priv *priv); int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx); @@ -109,20 +93,20 @@ void iwlagn_config_ht40(struct ieee80211_conf *conf, int iwlagn_rx_calib_result(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb, struct iwl_device_cmd *cmd); +int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type); +void iwlagn_send_prio_tbl(struct iwl_priv *priv); +int iwlagn_run_init_ucode(struct iwl_priv *priv); +int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv, + struct fw_img *image, + enum iwlagn_ucode_type ucode_type); /* lib */ int iwlagn_send_tx_power(struct iwl_priv *priv); void iwlagn_temperature(struct iwl_priv *priv); -u16 iwl_eeprom_calib_version(struct iwl_shared *shrd); +u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv); int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control); void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control); int iwlagn_send_beacon_cmd(struct iwl_priv *priv); -#ifdef CONFIG_PM_SLEEP -int iwlagn_send_patterns(struct iwl_priv *priv, - struct cfg80211_wowlan *wowlan); -int iwlagn_suspend(struct iwl_priv *priv, - struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan); -#endif /* rx */ int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band); @@ -133,8 +117,6 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv); int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb); int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid, u16 *ssn); -int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u16 tid, u8 buf_size); int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid); int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, @@ -216,6 +198,9 @@ int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx, struct ieee80211_sta *sta, u8 *sta_id_r); int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id, const u8 *addr); +int iwlagn_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); + u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx, const u8 *addr, bool is_ap, struct ieee80211_sta *sta); @@ -333,6 +318,10 @@ void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt); int iwl_update_bcast_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx); int iwl_update_bcast_stations(struct iwl_priv *priv); +void iwlagn_mac_sta_notify(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum sta_notify_cmd cmd, + struct ieee80211_sta *sta); /* rate */ static inline u32 iwl_ant_idx_to_flags(u8 ant_idx) @@ -352,11 +341,28 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags) /* eeprom */ void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv); -void iwl_eeprom_get_mac(const struct iwl_shared *shrd, u8 *mac); - +void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac); + +/* notification wait support */ +void __acquires(wait_entry) +iwlagn_init_notification_wait(struct iwl_priv *priv, + struct iwl_notification_wait *wait_entry, + u8 cmd, + void (*fn)(struct iwl_priv *priv, + struct iwl_rx_packet *pkt, + void *data), + void *fn_data); +int __must_check __releases(wait_entry) +iwlagn_wait_notification(struct iwl_priv *priv, + struct iwl_notification_wait *wait_entry, + unsigned long timeout); +void __releases(wait_entry) +iwlagn_remove_notification(struct iwl_priv *priv, + struct iwl_notification_wait *wait_entry); +extern int iwlagn_init_alive_start(struct iwl_priv *priv); extern int iwl_alive_start(struct iwl_priv *priv); /* svtool */ -#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE +#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL extern int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len); extern int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-bus.h b/trunk/drivers/net/wireless/iwlwifi/iwl-bus.h index 940d5038b39c..08b97594e305 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-bus.h +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-bus.h @@ -122,8 +122,7 @@ struct iwl_bus; * struct iwl_bus_ops - bus specific operations * @get_pm_support: must returns true if the bus can go to sleep * @apm_config: will be called during the config of the APM - * @get_hw_id_string: prints the hw_id in the provided buffer - * @get_hw_id: get hw_id in u32 + * @get_hw_id: prints the hw_id in the provided buffer * @write8: write a byte to register at offset ofs * @write32: write a dword to register at offset ofs * @wread32: read a dword at register at offset ofs @@ -131,8 +130,7 @@ struct iwl_bus; struct iwl_bus_ops { bool (*get_pm_support)(struct iwl_bus *bus); void (*apm_config)(struct iwl_bus *bus); - void (*get_hw_id_string)(struct iwl_bus *bus, char buf[], int buf_len); - u32 (*get_hw_id)(struct iwl_bus *bus); + void (*get_hw_id)(struct iwl_bus *bus, char buf[], int buf_len); void (*write8)(struct iwl_bus *bus, u32 ofs, u8 val); void (*write32)(struct iwl_bus *bus, u32 ofs, u32 val); u32 (*read32)(struct iwl_bus *bus, u32 ofs); @@ -174,15 +172,9 @@ static inline void bus_apm_config(struct iwl_bus *bus) bus->ops->apm_config(bus); } -static inline void bus_get_hw_id_string(struct iwl_bus *bus, char buf[], - int buf_len) +static inline void bus_get_hw_id(struct iwl_bus *bus, char buf[], int buf_len) { - bus->ops->get_hw_id_string(bus, buf, buf_len); -} - -static inline u32 bus_get_hw_id(struct iwl_bus *bus) -{ - return bus->ops->get_hw_id(bus); + bus->ops->get_hw_id(bus, buf, buf_len); } static inline void bus_write8(struct iwl_bus *bus, u32 ofs, u8 val) diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-cfg.h b/trunk/drivers/net/wireless/iwlwifi/iwl-cfg.h index e1d78257e4a9..2a2dc4597ba1 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-cfg.h +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-cfg.h @@ -101,11 +101,17 @@ extern struct iwl_cfg iwl100_bg_cfg; extern struct iwl_cfg iwl130_bgn_cfg; extern struct iwl_cfg iwl130_bg_cfg; extern struct iwl_cfg iwl2000_2bgn_cfg; +extern struct iwl_cfg iwl2000_2bg_cfg; extern struct iwl_cfg iwl2000_2bgn_d_cfg; extern struct iwl_cfg iwl2030_2bgn_cfg; +extern struct iwl_cfg iwl2030_2bg_cfg; extern struct iwl_cfg iwl6035_2agn_cfg; +extern struct iwl_cfg iwl6035_2abg_cfg; +extern struct iwl_cfg iwl6035_2bg_cfg; +extern struct iwl_cfg iwl105_bg_cfg; extern struct iwl_cfg iwl105_bgn_cfg; extern struct iwl_cfg iwl105_bgn_d_cfg; +extern struct iwl_cfg iwl135_bg_cfg; extern struct iwl_cfg iwl135_bgn_cfg; #endif /* __iwl_pci_h__ */ diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-commands.h b/trunk/drivers/net/wireless/iwlwifi/iwl-commands.h index 265de39d394c..69d5f85d11e2 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-commands.h +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-commands.h @@ -109,10 +109,10 @@ enum { /* RX, TX, LEDs */ REPLY_TX = 0x1c, REPLY_LEDS_CMD = 0x48, - REPLY_TX_LINK_QUALITY_CMD = 0x4e, + REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */ /* WiMAX coexistence */ - COEX_PRIORITY_TABLE_CMD = 0x5a, + COEX_PRIORITY_TABLE_CMD = 0x5a, /* for 5000 series and up */ COEX_MEDIUM_NOTIFICATION = 0x5b, COEX_EVENT_CMD = 0x5c, @@ -198,7 +198,6 @@ enum { REPLY_WOWLAN_TKIP_PARAMS = 0xe3, REPLY_WOWLAN_KEK_KCK_MATERIAL = 0xe4, REPLY_WOWLAN_GET_STATUS = 0xe5, - REPLY_D3_CONFIG = 0xd3, REPLY_MAX = 0xff }; @@ -466,27 +465,23 @@ struct iwl_error_event_table { u32 frame_ptr; /* frame pointer */ u32 stack_ptr; /* stack pointer */ u32 hcmd; /* last host command header */ - u32 isr0; /* isr status register LMPM_NIC_ISR0: - * rxtx_flag */ - u32 isr1; /* isr status register LMPM_NIC_ISR1: - * host_flag */ - u32 isr2; /* isr status register LMPM_NIC_ISR2: - * enc_flag */ - u32 isr3; /* isr status register LMPM_NIC_ISR3: - * time_flag */ - u32 isr4; /* isr status register LMPM_NIC_ISR4: - * wico interrupt */ +#if 0 + /* no need to read the remainder, we don't use the values */ + u32 isr0; /* isr status register LMPM_NIC_ISR0: rxtx_flag */ + u32 isr1; /* isr status register LMPM_NIC_ISR1: host_flag */ + u32 isr2; /* isr status register LMPM_NIC_ISR2: enc_flag */ + u32 isr3; /* isr status register LMPM_NIC_ISR3: time_flag */ + u32 isr4; /* isr status register LMPM_NIC_ISR4: wico interrupt */ u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */ u32 wait_event; /* wait event() caller address */ u32 l2p_control; /* L2pControlField */ u32 l2p_duration; /* L2pDurationField */ u32 l2p_mhvalid; /* L2pMhValidBits */ u32 l2p_addr_match; /* L2pAddrMatchStat */ - u32 lmpm_pmg_sel; /* indicate which clocks are turned on - * (LMPM_PMG_SEL) */ - u32 u_timestamp; /* indicate when the date and time of the - * compilation */ + u32 lmpm_pmg_sel; /* indicate which clocks are turned on (LMPM_PMG_SEL) */ + u32 u_timestamp; /* indicate when the date and time of the compilation */ u32 flow_handler; /* FH read/write pointers, RX credit */ +#endif } __packed; struct iwl_alive_resp { @@ -814,7 +809,7 @@ struct iwl_qosparam_cmd { #define IWLAGN_STATION_COUNT 16 #define IWL_INVALID_STATION 255 -#define IWL_MAX_TID_COUNT 8 +#define IWL_MAX_TID_COUNT 9 #define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2) #define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8) @@ -935,7 +930,8 @@ struct iwl_addsta_cmd { * corresponding to bit (e.g. bit 5 controls TID 5). * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */ __le16 tid_disable_tx; - __le16 legacy_reserved; + + __le16 rate_n_flags; /* 3945 only */ /* TID for which to add block-ack support. * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ @@ -1165,7 +1161,8 @@ struct iwl_rx_mpdu_res_start { * * uCode handles retrying Tx when an ACK is expected but not received. * This includes trying lower data rates than the one requested in the Tx - * command, as set up by the REPLY_TX_LINK_QUALITY_CMD (agn). + * command, as set up by the REPLY_RATE_SCALE (for 3945) or + * REPLY_TX_LINK_QUALITY_CMD (agn). * * Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD. * This command must be executed after every RXON command, before Tx can occur. @@ -1177,9 +1174,25 @@ struct iwl_rx_mpdu_res_start { * 1: Use RTS/CTS protocol or CTS-to-self if spec allows it * before this frame. if CTS-to-self required check * RXON_FLG_SELF_CTS_EN status. + * unused in 3945/4965, used in 5000 series and after */ #define TX_CMD_FLG_PROT_REQUIRE_MSK cpu_to_le32(1 << 0) +/* + * 1: Use Request-To-Send protocol before this frame. + * Mutually exclusive vs. TX_CMD_FLG_CTS_MSK. + * used in 3945/4965, unused in 5000 series and after + */ +#define TX_CMD_FLG_RTS_MSK cpu_to_le32(1 << 1) + +/* + * 1: Transmit Clear-To-Send to self before this frame. + * Driver should set this for AUTH/DEAUTH/ASSOC-REQ/REASSOC mgmnt frames. + * Mutually exclusive vs. TX_CMD_FLG_RTS_MSK. + * used in 3945/4965, unused in 5000 series and after + */ +#define TX_CMD_FLG_CTS_MSK cpu_to_le32(1 << 2) + /* 1: Expect ACK from receiving station * 0: Don't expect ACK (MAC header's duration field s/b 0) * Set this for unicast frames, but not broadcast/multicast. */ @@ -1197,8 +1210,18 @@ struct iwl_rx_mpdu_res_start { * Set when Txing a block-ack request frame. Also set TX_CMD_FLG_ACK_MSK. */ #define TX_CMD_FLG_IMM_BA_RSP_MASK cpu_to_le32(1 << 6) -/* Tx antenna selection field; reserved (0) for agn devices. */ +/* + * 1: Frame requires full Tx-Op protection. + * Set this if either RTS or CTS Tx Flag gets set. + * used in 3945/4965, unused in 5000 series and after + */ +#define TX_CMD_FLG_FULL_TXOP_PROT_MSK cpu_to_le32(1 << 7) + +/* Tx antenna selection field; used only for 3945, reserved (0) for agn devices. + * Set field to "0" to allow 3945 uCode to select antenna (normal usage). */ #define TX_CMD_FLG_ANT_SEL_MSK cpu_to_le32(0xf00) +#define TX_CMD_FLG_ANT_A_MSK cpu_to_le32(1 << 8) +#define TX_CMD_FLG_ANT_B_MSK cpu_to_le32(1 << 9) /* 1: Ignore Bluetooth priority for this frame. * 0: Delay Tx until Bluetooth device is done (normal usage). */ @@ -1544,6 +1567,7 @@ struct iwl_compressed_ba_resp { __le64 bitmap; __le16 scd_flow; __le16 scd_ssn; + /* following only for 5000 series and up */ u8 txed; /* number of frames sent */ u8 txed_2_done; /* number of frames acked */ } __packed; @@ -1645,7 +1669,7 @@ struct iwl_link_qual_agg_params { /* * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response) * - * For agn devices + * For agn devices only; 3945 uses REPLY_RATE_SCALE. * * Each station in the agn device's internal station table has its own table * of 16 @@ -1894,7 +1918,7 @@ struct iwl_link_quality_cmd { /* * REPLY_BT_CONFIG = 0x9b (command, has simple generic response) * - * agn devices support hardware handshake with Bluetooth device on + * 3945 and agn devices support hardware handshake with Bluetooth device on * same platform. Bluetooth device alerts wireless device when it will Tx; * wireless device can delay or kill its own Tx to accommodate. */ @@ -2178,8 +2202,8 @@ struct iwl_spectrum_notification { struct iwl_powertable_cmd { __le16 flags; - u8 keep_alive_seconds; - u8 debug_flags; + u8 keep_alive_seconds; /* 3945 reserved */ + u8 debug_flags; /* 3945 reserved */ __le32 rx_data_timeout; __le32 tx_data_timeout; __le32 sleep_interval[IWL_POWER_VEC_SIZE]; @@ -2300,9 +2324,9 @@ struct iwl_scan_channel { /** * struct iwl_ssid_ie - directed scan network information element * - * Up to 20 of these may appear in REPLY_SCAN_CMD, - * selected by "type" bit field in struct iwl_scan_channel; - * each channel may select different ssids from among the 20 entries. + * Up to 20 of these may appear in REPLY_SCAN_CMD (Note: Only 4 are in + * 3945 SCAN api), selected by "type" bit field in struct iwl_scan_channel; + * each channel may select different ssids from among the 20 (4) entries. * SSID IEs get transmitted in reverse order of entry. */ struct iwl_ssid_ie { @@ -2311,6 +2335,7 @@ struct iwl_ssid_ie { u8 ssid[32]; } __packed; +#define PROBE_OPTION_MAX_3945 4 #define PROBE_OPTION_MAX 20 #define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF) #define IWL_GOOD_CRC_TH_DISABLED 0 @@ -2391,6 +2416,8 @@ struct iwl_scan_cmd { * channel */ __le32 suspend_time; /* pause scan this long (in "extended beacon * format") when returning to service chnl: + * 3945; 31:24 # beacons, 19:0 additional usec, + * 4965; 31:22 # beacons, 21:0 additional usec. */ __le32 flags; /* RXON_FLG_* */ __le32 filter_flags; /* RXON_FILTER_* */ @@ -2706,7 +2733,7 @@ struct statistics_div { struct statistics_general_common { __le32 temperature; /* radio temperature */ - __le32 temperature_m; /* radio voltage */ + __le32 temperature_m; /* for 5000 and up, this is radio voltage */ struct statistics_dbg dbg; __le32 sleep_time; __le32 slots_out; @@ -3773,19 +3800,6 @@ struct iwl_bt_coex_prot_env_cmd { u8 reserved[2]; } __attribute__((packed)); -/* - * REPLY_D3_CONFIG - */ -enum iwlagn_d3_wakeup_filters { - IWLAGN_D3_WAKEUP_RFKILL = BIT(0), - IWLAGN_D3_WAKEUP_SYSASSERT = BIT(1), -}; - -struct iwlagn_d3_config_cmd { - __le32 min_sleep_time; - __le32 wakeup_flags; -} __packed; - /* * REPLY_WOWLAN_PATTERNS */ @@ -3816,16 +3830,19 @@ enum iwlagn_wowlan_wakeup_filters { IWLAGN_WOWLAN_WAKEUP_BEACON_MISS = BIT(2), IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE = BIT(3), IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL = BIT(4), - IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ = BIT(5), - IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE = BIT(6), - IWLAGN_WOWLAN_WAKEUP_ALWAYS = BIT(7), - IWLAGN_WOWLAN_WAKEUP_ENABLE_NET_DETECT = BIT(8), + IWLAGN_WOWLAN_WAKEUP_RFKILL = BIT(5), + IWLAGN_WOWLAN_WAKEUP_UCODE_ERROR = BIT(6), + IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ = BIT(7), + IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE = BIT(8), + IWLAGN_WOWLAN_WAKEUP_ALWAYS = BIT(9), + IWLAGN_WOWLAN_WAKEUP_ENABLE_NET_DETECT = BIT(10), }; struct iwlagn_wowlan_wakeup_filter_cmd { __le32 enabled; __le16 non_qos_seq; - __le16 reserved; + u8 min_sleep_seconds; + u8 reserved; __le16 qos_seq[8]; }; diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-core.c b/trunk/drivers/net/wireless/iwlwifi/iwl-core.c index 7bcfa781e0b9..fcf54160e4ed 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-core.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-core.c @@ -60,8 +60,8 @@ static void iwl_init_ht_hw_capab(const struct iwl_priv *priv, ht_info->ht_supported = true; - if (cfg(priv)->ht_params && - cfg(priv)->ht_params->ht_greenfield_support) + if (priv->cfg->ht_params && + priv->cfg->ht_params->ht_greenfield_support) ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD; ht_info->cap |= IEEE80211_HT_CAP_SGI_20; max_bit_rate = MAX_BIT_RATE_20_MHZ; @@ -76,7 +76,11 @@ static void iwl_init_ht_hw_capab(const struct iwl_priv *priv, ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF; + if (priv->cfg->bt_params && priv->cfg->bt_params->ampdu_factor) + ht_info->ampdu_factor = priv->cfg->bt_params->ampdu_factor; ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF; + if (priv->cfg->bt_params && priv->cfg->bt_params->ampdu_density) + ht_info->ampdu_density = priv->cfg->bt_params->ampdu_density; ht_info->mcs.rx_mask[0] = 0xFF; if (rx_chains_num >= 2) @@ -137,7 +141,7 @@ int iwl_init_geos(struct iwl_priv *priv) sband->bitrates = &rates[IWL_FIRST_OFDM_RATE]; sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE; - if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE) + if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE) iwl_init_ht_hw_capab(priv, &sband->ht_cap, IEEE80211_BAND_5GHZ); @@ -147,7 +151,7 @@ int iwl_init_geos(struct iwl_priv *priv) sband->bitrates = rates; sband->n_bitrates = IWL_RATE_COUNT_LEGACY; - if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE) + if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE) iwl_init_ht_hw_capab(priv, &sband->ht_cap, IEEE80211_BAND_2GHZ); @@ -202,12 +206,12 @@ int iwl_init_geos(struct iwl_priv *priv) priv->tx_power_next = max_tx_power; if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) && - cfg(priv)->sku & EEPROM_SKU_CAP_BAND_52GHZ) { + priv->cfg->sku & EEPROM_SKU_CAP_BAND_52GHZ) { char buf[32]; - bus_get_hw_id_string(bus(priv), buf, sizeof(buf)); + bus_get_hw_id(bus(priv), buf, sizeof(buf)); IWL_INFO(priv, "Incorrectly detected BG card as ABG. " "Please send your %s to maintainer.\n", buf); - cfg(priv)->sku &= ~EEPROM_SKU_CAP_BAND_52GHZ; + priv->cfg->sku &= ~EEPROM_SKU_CAP_BAND_52GHZ; } IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n", @@ -832,6 +836,19 @@ void iwl_print_rx_config_cmd(struct iwl_priv *priv, } #endif +static void iwlagn_abort_notification_waits(struct iwl_priv *priv) +{ + unsigned long flags; + struct iwl_notification_wait *wait_entry; + + spin_lock_irqsave(&priv->notif_wait_lock, flags); + list_for_each_entry(wait_entry, &priv->notif_waits, list) + wait_entry->aborted = true; + spin_unlock_irqrestore(&priv->notif_wait_lock, flags); + + wake_up_all(&priv->notif_waitq); +} + void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand) { unsigned int reload_msec; @@ -843,7 +860,7 @@ void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand) /* Cancel currently queued command. */ clear_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status); - iwl_abort_notification_waits(priv->shrd); + iwlagn_abort_notification_waits(priv); /* Keep the restart process from trying to send host * commands by clearing the ready bit */ @@ -962,9 +979,9 @@ int iwl_apm_init(struct iwl_priv *priv) bus_apm_config(bus(priv)); /* Configure analog phase-lock-loop before activating to D0A */ - if (cfg(priv)->base_params->pll_cfg_val) + if (priv->cfg->base_params->pll_cfg_val) iwl_set_bit(bus(priv), CSR_ANA_PLL_CFG, - cfg(priv)->base_params->pll_cfg_val); + priv->cfg->base_params->pll_cfg_val); /* * Set "initialization complete" bit to move adapter from @@ -1103,8 +1120,229 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear) &statistics_cmd); } +int iwlagn_mac_conf_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, u16 queue, + const struct ieee80211_tx_queue_params *params) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_rxon_context *ctx; + unsigned long flags; + int q; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + if (!iwl_is_ready_rf(priv->shrd)) { + IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n"); + return -EIO; + } + if (queue >= AC_NUM) { + IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue); + return 0; + } + + q = AC_NUM - 1 - queue; + + spin_lock_irqsave(&priv->shrd->lock, flags); + + /* + * MULTI-FIXME + * This may need to be done per interface in nl80211/cfg80211/mac80211. + */ + for_each_context(priv, ctx) { + ctx->qos_data.def_qos_parm.ac[q].cw_min = + cpu_to_le16(params->cw_min); + ctx->qos_data.def_qos_parm.ac[q].cw_max = + cpu_to_le16(params->cw_max); + ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs; + ctx->qos_data.def_qos_parm.ac[q].edca_txop = + cpu_to_le16((params->txop * 32)); + + ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0; + } + + spin_unlock_irqrestore(&priv->shrd->lock, flags); + + IWL_DEBUG_MAC80211(priv, "leave\n"); + return 0; +} + +int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + + return priv->ibss_manager == IWL_IBSS_MANAGER; +} + +static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx) +{ + iwl_connection_init_rx_config(priv, ctx); + + iwlagn_set_rxon_chain(priv, ctx); + + return iwlagn_commit_rxon(priv, ctx); +} + +static int iwl_setup_interface(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + struct ieee80211_vif *vif = ctx->vif; + int err; + + lockdep_assert_held(&priv->shrd->mutex); + + /* + * This variable will be correct only when there's just + * a single context, but all code using it is for hardware + * that supports only one context. + */ + priv->iw_mode = vif->type; + + ctx->is_active = true; + + err = iwl_set_mode(priv, ctx); + if (err) { + if (!ctx->always_active) + ctx->is_active = false; + return err; + } + + if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist && + vif->type == NL80211_IFTYPE_ADHOC) { + /* + * pretend to have high BT traffic as long as we + * are operating in IBSS mode, as this will cause + * the rate scaling etc. to behave as intended. + */ + priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH; + } + + return 0; +} + +int iwlagn_mac_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + struct iwl_rxon_context *tmp, *ctx = NULL; + int err; + enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif); + + IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n", + viftype, vif->addr); + + cancel_delayed_work_sync(&priv->hw_roc_disable_work); + + mutex_lock(&priv->shrd->mutex); + + iwlagn_disable_roc(priv); + + if (!iwl_is_ready_rf(priv->shrd)) { + IWL_WARN(priv, "Try to add interface when device not ready\n"); + err = -EINVAL; + goto out; + } + + for_each_context(priv, tmp) { + u32 possible_modes = + tmp->interface_modes | tmp->exclusive_interface_modes; + + if (tmp->vif) { + /* check if this busy context is exclusive */ + if (tmp->exclusive_interface_modes & + BIT(tmp->vif->type)) { + err = -EINVAL; + goto out; + } + continue; + } + + if (!(possible_modes & BIT(viftype))) + continue; + + /* have maybe usable context w/o interface */ + ctx = tmp; + break; + } + + if (!ctx) { + err = -EOPNOTSUPP; + goto out; + } + + vif_priv->ctx = ctx; + ctx->vif = vif; + + err = iwl_setup_interface(priv, ctx); + if (!err) + goto out; + + ctx->vif = NULL; + priv->iw_mode = NL80211_IFTYPE_STATION; + out: + mutex_unlock(&priv->shrd->mutex); + + IWL_DEBUG_MAC80211(priv, "leave\n"); + return err; +} + +static void iwl_teardown_interface(struct iwl_priv *priv, + struct ieee80211_vif *vif, + bool mode_change) +{ + struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); + + lockdep_assert_held(&priv->shrd->mutex); + + if (priv->scan_vif == vif) { + iwl_scan_cancel_timeout(priv, 200); + iwl_force_scan_end(priv); + } + + if (!mode_change) { + iwl_set_mode(priv, ctx); + if (!ctx->always_active) + ctx->is_active = false; + } + + /* + * When removing the IBSS interface, overwrite the + * BT traffic load with the stored one from the last + * notification, if any. If this is a device that + * doesn't implement this, this has no effect since + * both values are the same and zero. + */ + if (vif->type == NL80211_IFTYPE_ADHOC) + priv->bt_traffic_load = priv->last_bt_traffic_load; +} + +void iwlagn_mac_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + mutex_lock(&priv->shrd->mutex); + + if (WARN_ON(ctx->vif != vif)) { + struct iwl_rxon_context *tmp; + IWL_ERR(priv, "ctx->vif = %p, vif = %p\n", ctx->vif, vif); + for_each_context(priv, tmp) + IWL_ERR(priv, "\tID = %d:\tctx = %p\tctx->vif = %p\n", + tmp->ctxid, tmp, tmp->vif); + } + ctx->vif = NULL; + + iwl_teardown_interface(priv, vif, false); + + mutex_unlock(&priv->shrd->mutex); + + IWL_DEBUG_MAC80211(priv, "leave\n"); + +} #ifdef CONFIG_IWLWIFI_DEBUGFS @@ -1411,13 +1649,97 @@ int iwl_force_reset(struct iwl_priv *priv, int mode, bool external) return 0; } +int iwlagn_mac_change_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum nl80211_iftype newtype, bool newp2p) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); + struct iwl_rxon_context *bss_ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + struct iwl_rxon_context *tmp; + enum nl80211_iftype newviftype = newtype; + u32 interface_modes; + int err; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + newtype = ieee80211_iftype_p2p(newtype, newp2p); + + mutex_lock(&priv->shrd->mutex); + + if (!ctx->vif || !iwl_is_ready_rf(priv->shrd)) { + /* + * Huh? But wait ... this can maybe happen when + * we're in the middle of a firmware restart! + */ + err = -EBUSY; + goto out; + } + + interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes; + + if (!(interface_modes & BIT(newtype))) { + err = -EBUSY; + goto out; + } + + /* + * Refuse a change that should be done by moving from the PAN + * context to the BSS context instead, if the BSS context is + * available and can support the new interface type. + */ + if (ctx->ctxid == IWL_RXON_CTX_PAN && !bss_ctx->vif && + (bss_ctx->interface_modes & BIT(newtype) || + bss_ctx->exclusive_interface_modes & BIT(newtype))) { + BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); + err = -EBUSY; + goto out; + } + + if (ctx->exclusive_interface_modes & BIT(newtype)) { + for_each_context(priv, tmp) { + if (ctx == tmp) + continue; + + if (!tmp->vif) + continue; + + /* + * The current mode switch would be exclusive, but + * another context is active ... refuse the switch. + */ + err = -EBUSY; + goto out; + } + } + + /* success */ + iwl_teardown_interface(priv, vif, true); + vif->type = newviftype; + vif->p2p = newp2p; + err = iwl_setup_interface(priv, ctx); + WARN_ON(err); + /* + * We've switched internally, but submitting to the + * device may have failed for some reason. Mask this + * error, because otherwise mac80211 will not switch + * (and set the interface type back) and we'll be + * out of sync with it. + */ + err = 0; + + out: + mutex_unlock(&priv->shrd->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); + + return err; +} int iwl_cmd_echo_test(struct iwl_priv *priv) { int ret; struct iwl_host_cmd cmd = { .id = REPLY_ECHO, - .len = { 0 }, .flags = CMD_SYNC, }; @@ -1461,7 +1783,7 @@ void iwl_bg_watchdog(unsigned long data) if (iwl_is_rfkill(priv->shrd)) return; - timeout = cfg(priv)->base_params->wd_timeout; + timeout = priv->cfg->base_params->wd_timeout; if (timeout == 0) return; @@ -1486,11 +1808,11 @@ void iwl_bg_watchdog(unsigned long data) void iwl_setup_watchdog(struct iwl_priv *priv) { - unsigned int timeout = cfg(priv)->base_params->wd_timeout; + unsigned int timeout = priv->cfg->base_params->wd_timeout; if (!iwlagn_mod_params.wd_disable) { /* use system default */ - if (timeout && !cfg(priv)->base_params->wd_disable) + if (timeout && !priv->cfg->base_params->wd_disable) mod_timer(&priv->watchdog, jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout))); @@ -1580,6 +1902,34 @@ __le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base, return cpu_to_le32(res); } +void iwl_start_tx_ba_trans_ready(struct iwl_priv *priv, + enum iwl_rxon_context_id ctx, + u8 sta_id, u8 tid) +{ + struct ieee80211_vif *vif; + u8 *addr = priv->stations[sta_id].sta.sta.addr; + + if (ctx == NUM_IWL_RXON_CTX) + ctx = priv->stations[sta_id].ctxid; + vif = priv->contexts[ctx].vif; + + ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid); +} + +void iwl_stop_tx_ba_trans_ready(struct iwl_priv *priv, + enum iwl_rxon_context_id ctx, + u8 sta_id, u8 tid) +{ + struct ieee80211_vif *vif; + u8 *addr = priv->stations[sta_id].sta.sta.addr; + + if (ctx == NUM_IWL_RXON_CTX) + ctx = priv->stations[sta_id].ctxid; + vif = priv->contexts[ctx].vif; + + ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid); +} + void iwl_set_hw_rfkill_state(struct iwl_priv *priv, bool state) { wiphy_rfkill_set_hw_state(priv->hw->wiphy, state); @@ -1587,7 +1937,8 @@ void iwl_set_hw_rfkill_state(struct iwl_priv *priv, bool state) void iwl_nic_config(struct iwl_priv *priv) { - cfg(priv)->lib->nic_config(priv); + priv->cfg->lib->nic_config(priv); + } void iwl_free_skb(struct iwl_priv *priv, struct sk_buff *skb) diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-core.h b/trunk/drivers/net/wireless/iwlwifi/iwl-core.h index 7bf76ab94dd2..f2fc288f3dd3 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-core.h +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-core.h @@ -142,6 +142,8 @@ struct iwl_base_params { * @bt_init_traffic_load: specify initial bt traffic load * @bt_prio_boost: default bt priority boost value * @agg_time_limit: maximum number of uSec in aggregation + * @ampdu_factor: Maximum A-MPDU length factor + * @ampdu_density: Minimum A-MPDU spacing * @bt_sco_disable: uCode should not response to BT in SCO/ESCO mode */ struct iwl_bt_params { @@ -149,6 +151,8 @@ struct iwl_bt_params { u8 bt_init_traffic_load; u8 bt_prio_boost; u16 agg_time_limit; + u8 ampdu_factor; + u8 ampdu_density; bool bt_sco_disable; bool bt_session_2; }; @@ -161,10 +165,84 @@ struct iwl_ht_params { enum ieee80211_smps_mode smps_mode; }; +/** + * struct iwl_cfg + * @name: Offical name of the device + * @fw_name_pre: Firmware filename prefix. The api version and extension + * (.ucode) will be added to filename before loading from disk. The + * filename is constructed as fw_name_pre.ucode. + * @ucode_api_max: Highest version of uCode API supported by driver. + * @ucode_api_ok: oldest version of the uCode API that is OK to load + * without a warning, for use in transitions + * @ucode_api_min: Lowest version of uCode API supported by driver. + * @valid_tx_ant: valid transmit antenna + * @valid_rx_ant: valid receive antenna + * @sku: sku information from EEPROM + * @eeprom_ver: EEPROM version + * @eeprom_calib_ver: EEPROM calibration version + * @lib: pointer to the lib ops + * @additional_nic_config: additional nic configuration + * @base_params: pointer to basic parameters + * @ht_params: point to ht patameters + * @bt_params: pointer to bt parameters + * @pa_type: used by 6000 series only to identify the type of Power Amplifier + * @need_dc_calib: need to perform init dc calibration + * @need_temp_offset_calib: need to perform temperature offset calibration + * @scan_antennas: available antenna for scan operation + * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off) + * @adv_pm: advance power management + * @rx_with_siso_diversity: 1x1 device with rx antenna diversity + * @internal_wimax_coex: internal wifi/wimax combo device + * @iq_invert: I/Q inversion + * @temp_offset_v2: support v2 of temperature offset calibration + * + * We enable the driver to be backward compatible wrt API version. The + * driver specifies which APIs it supports (with @ucode_api_max being the + * highest and @ucode_api_min the lowest). Firmware will only be loaded if + * it has a supported API version. + * + * The ideal usage of this infrastructure is to treat a new ucode API + * release as a new hardware revision. + */ +struct iwl_cfg { + /* params specific to an individual device within a device family */ + const char *name; + const char *fw_name_pre; + const unsigned int ucode_api_max; + const unsigned int ucode_api_ok; + const unsigned int ucode_api_min; + u8 valid_tx_ant; + u8 valid_rx_ant; + u16 sku; + u16 eeprom_ver; + u16 eeprom_calib_ver; + const struct iwl_lib_ops *lib; + void (*additional_nic_config)(struct iwl_priv *priv); + /* params not likely to change within a device family */ + struct iwl_base_params *base_params; + /* params likely to change within a device family */ + struct iwl_ht_params *ht_params; + struct iwl_bt_params *bt_params; + enum iwl_pa_type pa_type; /* if used set to IWL_PA_SYSTEM */ + const bool need_dc_calib; /* if used set to true */ + const bool need_temp_offset_calib; /* if used set to true */ + u8 scan_rx_antennas[IEEE80211_NUM_BANDS]; + enum iwl_led_mode led_mode; + const bool adv_pm; + const bool rx_with_siso_diversity; + const bool internal_wimax_coex; + const bool iq_invert; + const bool temp_offset_v2; +}; + /*************************** * L i b * ***************************/ +int iwlagn_mac_conf_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, u16 queue, + const struct ieee80211_tx_queue_params *params); +int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw); void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx, int hw_decrypt); int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx); @@ -184,6 +262,13 @@ bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv, void iwl_connection_init_rx_config(struct iwl_priv *priv, struct iwl_rxon_context *ctx); void iwl_set_rate(struct iwl_priv *priv); +int iwlagn_mac_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); +void iwlagn_mac_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); +int iwlagn_mac_change_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum nl80211_iftype newtype, bool newp2p); int iwl_cmd_echo_test(struct iwl_priv *priv); #ifdef CONFIG_IWLWIFI_DEBUGFS int iwl_alloc_traffic_mem(struct iwl_priv *priv); @@ -240,6 +325,9 @@ void iwl_init_scan_params(struct iwl_priv *priv); int iwl_scan_cancel(struct iwl_priv *priv); void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms); void iwl_force_scan_end(struct iwl_priv *priv); +int iwlagn_mac_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_scan_request *req); void iwl_internal_short_hw_scan(struct iwl_priv *priv); int iwl_force_reset(struct iwl_priv *priv, int mode, bool external); u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame, @@ -293,8 +381,8 @@ static inline const struct ieee80211_supported_band *iwl_get_hw_mode( static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv) { - return cfg(priv)->bt_params && - cfg(priv)->bt_params->advanced_bt_coexist; + return priv->cfg->bt_params && + priv->cfg->bt_params->advanced_bt_coexist; } static inline void iwl_enable_rfkill_int(struct iwl_priv *priv) diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-csr.h b/trunk/drivers/net/wireless/iwlwifi/iwl-csr.h index fbc3095c7b44..b9f3267e720c 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-csr.h +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-csr.h @@ -284,8 +284,8 @@ #define CSR_HW_REV_TYPE_6x35 CSR_HW_REV_TYPE_6x05 #define CSR_HW_REV_TYPE_2x30 (0x00000C0) #define CSR_HW_REV_TYPE_2x00 (0x0000100) -#define CSR_HW_REV_TYPE_105 (0x0000110) -#define CSR_HW_REV_TYPE_135 (0x0000120) +#define CSR_HW_REV_TYPE_200 (0x0000110) +#define CSR_HW_REV_TYPE_230 (0x0000120) #define CSR_HW_REV_TYPE_NONE (0x00001F0) /* EEPROM REG */ diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-debug.h b/trunk/drivers/net/wireless/iwlwifi/iwl-debug.h index f8fc2393dd4c..69a77e24d229 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-debug.h +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-debug.h @@ -47,21 +47,20 @@ do { \ } while (0) #ifdef CONFIG_IWLWIFI_DEBUG -#define IWL_DEBUG(m, level, fmt, ...) \ +#define IWL_DEBUG(m, level, fmt, args...) \ do { \ if (iwl_get_debug_level((m)->shrd) & (level)) \ - dev_err(bus(m)->dev, "%c %s " fmt, \ - in_interrupt() ? 'I' : 'U', __func__, \ - ##__VA_ARGS__); \ + dev_printk(KERN_ERR, bus(m)->dev, \ + "%c %s " fmt, in_interrupt() ? 'I' : 'U', \ + __func__ , ## args); \ } while (0) -#define IWL_DEBUG_LIMIT(m, level, fmt, ...) \ +#define IWL_DEBUG_LIMIT(m, level, fmt, args...) \ do { \ - if (iwl_get_debug_level((m)->shrd) & (level) && \ - net_ratelimit()) \ - dev_err(bus(m)->dev, "%c %s " fmt, \ - in_interrupt() ? 'I' : 'U', __func__, \ - ##__VA_ARGS__); \ + if (iwl_get_debug_level((m)->shrd) & (level) && net_ratelimit())\ + dev_printk(KERN_ERR, bus(m)->dev, \ + "%c %s " fmt, in_interrupt() ? 'I' : 'U', \ + __func__ , ## args); \ } while (0) #define iwl_print_hex_dump(m, level, p, len) \ @@ -71,29 +70,10 @@ do { \ DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \ } while (0) -#define IWL_DEBUG_QUIET_RFKILL(p, fmt, ...) \ -do { \ - if (!iwl_is_rfkill(p->shrd)) \ - dev_err(bus(p)->dev, "%s%c %s " fmt, \ - "", \ - in_interrupt() ? 'I' : 'U', __func__, \ - ##__VA_ARGS__); \ - else if (iwl_get_debug_level(p->shrd) & IWL_DL_RADIO) \ - dev_err(bus(p)->dev, "%s%c %s " fmt, \ - "(RFKILL) ", \ - in_interrupt() ? 'I' : 'U', __func__, \ - ##__VA_ARGS__); \ -} while (0) - #else #define IWL_DEBUG(m, level, fmt, args...) #define IWL_DEBUG_LIMIT(m, level, fmt, args...) #define iwl_print_hex_dump(m, level, p, len) -#define IWL_DEBUG_QUIET_RFKILL(p, fmt, args...) \ -do { \ - if (!iwl_is_rfkill(p->shrd)) \ - IWL_ERR(p, fmt, ##args); \ -} while (0) #endif /* CONFIG_IWLWIFI_DEBUG */ #ifdef CONFIG_IWLWIFI_DEBUGFS @@ -134,43 +114,48 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv) */ /* 0x0000000F - 0x00000001 */ -#define IWL_DL_INFO 0x00000001 -#define IWL_DL_MAC80211 0x00000002 -#define IWL_DL_HCMD 0x00000004 -#define IWL_DL_STATE 0x00000008 +#define IWL_DL_INFO (1 << 0) +#define IWL_DL_MAC80211 (1 << 1) +#define IWL_DL_HCMD (1 << 2) +#define IWL_DL_STATE (1 << 3) /* 0x000000F0 - 0x00000010 */ -#define IWL_DL_EEPROM 0x00000040 -#define IWL_DL_RADIO 0x00000080 +#define IWL_DL_MACDUMP (1 << 4) +#define IWL_DL_HCMD_DUMP (1 << 5) +#define IWL_DL_EEPROM (1 << 6) +#define IWL_DL_RADIO (1 << 7) /* 0x00000F00 - 0x00000100 */ -#define IWL_DL_POWER 0x00000100 -#define IWL_DL_TEMP 0x00000200 -#define IWL_DL_SCAN 0x00000800 +#define IWL_DL_POWER (1 << 8) +#define IWL_DL_TEMP (1 << 9) +/* reserved (1 << 10) */ +#define IWL_DL_SCAN (1 << 11) /* 0x0000F000 - 0x00001000 */ -#define IWL_DL_ASSOC 0x00001000 -#define IWL_DL_DROP 0x00002000 -#define IWL_DL_COEX 0x00008000 +#define IWL_DL_ASSOC (1 << 12) +#define IWL_DL_DROP (1 << 13) +/* reserved (1 << 14) */ +#define IWL_DL_COEX (1 << 15) /* 0x000F0000 - 0x00010000 */ -#define IWL_DL_FW 0x00010000 -#define IWL_DL_RF_KILL 0x00020000 -#define IWL_DL_FW_ERRORS 0x00040000 -#define IWL_DL_LED 0x00080000 +#define IWL_DL_FW (1 << 16) +#define IWL_DL_RF_KILL (1 << 17) +#define IWL_DL_FW_ERRORS (1 << 18) +#define IWL_DL_LED (1 << 19) /* 0x00F00000 - 0x00100000 */ -#define IWL_DL_RATE 0x00100000 -#define IWL_DL_CALIB 0x00200000 -#define IWL_DL_WEP 0x00400000 -#define IWL_DL_TX 0x00800000 +#define IWL_DL_RATE (1 << 20) +#define IWL_DL_CALIB (1 << 21) +#define IWL_DL_WEP (1 << 22) +#define IWL_DL_TX (1 << 23) /* 0x0F000000 - 0x01000000 */ -#define IWL_DL_RX 0x01000000 -#define IWL_DL_ISR 0x02000000 -#define IWL_DL_HT 0x04000000 +#define IWL_DL_RX (1 << 24) +#define IWL_DL_ISR (1 << 25) +#define IWL_DL_HT (1 << 26) /* 0xF0000000 - 0x10000000 */ -#define IWL_DL_11H 0x10000000 -#define IWL_DL_STATS 0x20000000 -#define IWL_DL_TX_REPLY 0x40000000 -#define IWL_DL_TX_QUEUES 0x80000000 +#define IWL_DL_11H (1 << 28) +#define IWL_DL_STATS (1 << 29) +#define IWL_DL_TX_REPLY (1 << 30) +#define IWL_DL_QOS (1 << 31) #define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a) #define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a) +#define IWL_DEBUG_MACDUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_MACDUMP, f, ## a) #define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a) #define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a) #define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a) @@ -179,6 +164,7 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv) #define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a) #define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a) #define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a) +#define IWL_DEBUG_HC_DUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD_DUMP, f, ## a) #define IWL_DEBUG_EEPROM(p, f, a...) IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a) #define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a) #define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a) @@ -200,7 +186,9 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv) #define IWL_DEBUG_STATS_LIMIT(p, f, a...) \ IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a) #define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a) -#define IWL_DEBUG_TX_QUEUES(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_QUEUES, f, ## a) +#define IWL_DEBUG_TX_REPLY_LIMIT(p, f, a...) \ + IWL_DEBUG_LIMIT(p, IWL_DL_TX_REPLY, f, ## a) +#define IWL_DEBUG_QOS(p, f, a...) IWL_DEBUG(p, IWL_DL_QOS, f, ## a) #define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a) #define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a) #define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a) diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/trunk/drivers/net/wireless/iwlwifi/iwl-debugfs.c index 04a3343f4610..a1670e3f8bfa 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-debugfs.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-debugfs.c @@ -234,12 +234,11 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file, /* default is to dump the entire data segment */ if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) { - struct iwl_trans *trans = trans(priv); priv->dbgfs_sram_offset = 0x800000; - if (trans->shrd->ucode_type == IWL_UCODE_INIT) - priv->dbgfs_sram_len = trans->ucode_init.data.len; + if (priv->ucode_type == IWL_UCODE_INIT) + priv->dbgfs_sram_len = priv->ucode_init.data.len; else - priv->dbgfs_sram_len = trans->ucode_rt.data.len; + priv->dbgfs_sram_len = priv->ucode_rt.data.len; } len = priv->dbgfs_sram_len; @@ -342,7 +341,7 @@ static ssize_t iwl_dbgfs_wowlan_sram_read(struct file *file, return simple_read_from_buffer(user_buf, count, ppos, priv->wowlan_sram, - trans(priv)->ucode_wowlan.data.len); + priv->ucode_wowlan.data.len); } static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -372,13 +371,15 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf, i, station->sta.sta.addr, station->sta.station_flags_msk); pos += scnprintf(buf + pos, bufsz - pos, - "TID\tseq_num\trate_n_flags\n"); + "TID\tseq_num\ttxq_id\ttfds\trate_n_flags\n"); for (j = 0; j < IWL_MAX_TID_COUNT; j++) { - tid_data = &priv->tid_data[i][j]; + tid_data = &priv->shrd->tid_data[i][j]; pos += scnprintf(buf + pos, bufsz - pos, - "%d:\t%#x\t%#x", + "%d:\t%#x\t%#x\t%u\t%#x", j, tid_data->seq_number, + tid_data->agg.txq_id, + tid_data->tfds_in_queue, tid_data->agg.rate_n_flags); if (tid_data->agg.wait_for_ba) @@ -406,7 +407,7 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file, const u8 *ptr; char *buf; u16 eeprom_ver; - size_t eeprom_len = cfg(priv)->base_params->eeprom_size; + size_t eeprom_len = priv->cfg->base_params->eeprom_size; buf_size = 4 * eeprom_len + 256; if (eeprom_len % 16) { @@ -414,7 +415,7 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file, return -ENODATA; } - ptr = priv->shrd->eeprom; + ptr = priv->eeprom; if (!ptr) { IWL_ERR(priv, "Invalid EEPROM/OTP memory\n"); return -ENOMEM; @@ -426,10 +427,10 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file, IWL_ERR(priv, "Can not allocate Buffer\n"); return -ENOMEM; } - eeprom_ver = iwl_eeprom_query16(priv->shrd, EEPROM_VERSION); + eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION); pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s, " "version: 0x%x\n", - (trans(priv)->nvm_device_type == NVM_DEVICE_TYPE_OTP) + (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) ? "OTP" : "EEPROM", eeprom_ver); for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) { pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs); @@ -1540,15 +1541,15 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file, if (tx->tx_power.ant_a || tx->tx_power.ant_b || tx->tx_power.ant_c) { pos += scnprintf(buf + pos, bufsz - pos, "tx power: (1/2 dB step)\n"); - if ((cfg(priv)->valid_tx_ant & ANT_A) && tx->tx_power.ant_a) + if ((priv->cfg->valid_tx_ant & ANT_A) && tx->tx_power.ant_a) pos += scnprintf(buf + pos, bufsz - pos, fmt_hex, "antenna A:", tx->tx_power.ant_a); - if ((cfg(priv)->valid_tx_ant & ANT_B) && tx->tx_power.ant_b) + if ((priv->cfg->valid_tx_ant & ANT_B) && tx->tx_power.ant_b) pos += scnprintf(buf + pos, bufsz - pos, fmt_hex, "antenna B:", tx->tx_power.ant_b); - if ((cfg(priv)->valid_tx_ant & ANT_C) && tx->tx_power.ant_c) + if ((priv->cfg->valid_tx_ant & ANT_C) && tx->tx_power.ant_c) pos += scnprintf(buf + pos, bufsz - pos, fmt_hex, "antenna C:", tx->tx_power.ant_c); @@ -2219,7 +2220,7 @@ static ssize_t iwl_dbgfs_plcp_delta_read(struct file *file, const size_t bufsz = sizeof(buf); pos += scnprintf(buf + pos, bufsz - pos, "%u\n", - cfg(priv)->base_params->plcp_delta_threshold); + priv->cfg->base_params->plcp_delta_threshold); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } @@ -2241,10 +2242,10 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file, return -EINVAL; if ((plcp < IWL_MAX_PLCP_ERR_THRESHOLD_MIN) || (plcp > IWL_MAX_PLCP_ERR_THRESHOLD_MAX)) - cfg(priv)->base_params->plcp_delta_threshold = + priv->cfg->base_params->plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE; else - cfg(priv)->base_params->plcp_delta_threshold = plcp; + priv->cfg->base_params->plcp_delta_threshold = plcp; return count; } @@ -2346,7 +2347,7 @@ static ssize_t iwl_dbgfs_wd_timeout_write(struct file *file, if (timeout < 0 || timeout > IWL_MAX_WD_TIMEOUT) timeout = IWL_DEF_WD_TIMEOUT; - cfg(priv)->base_params->wd_timeout = timeout; + priv->cfg->base_params->wd_timeout = timeout; iwl_setup_watchdog(priv); return count; } @@ -2406,10 +2407,10 @@ static ssize_t iwl_dbgfs_protection_mode_read(struct file *file, char buf[40]; const size_t bufsz = sizeof(buf); - if (cfg(priv)->ht_params) + if (priv->cfg->ht_params) pos += scnprintf(buf + pos, bufsz - pos, "use %s for aggregation\n", - (cfg(priv)->ht_params->use_rts_for_aggregation) ? + (priv->cfg->ht_params->use_rts_for_aggregation) ? "rts/cts" : "cts-to-self"); else pos += scnprintf(buf + pos, bufsz - pos, "N/A"); @@ -2426,7 +2427,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file, int buf_size; int rts; - if (!cfg(priv)->ht_params) + if (!priv->cfg->ht_params) return -EINVAL; memset(buf, 0, sizeof(buf)); @@ -2436,9 +2437,9 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file, if (sscanf(buf, "%d", &rts) != 1) return -EINVAL; if (rts) - cfg(priv)->ht_params->use_rts_for_aggregation = true; + priv->cfg->ht_params->use_rts_for_aggregation = true; else - cfg(priv)->ht_params->use_rts_for_aggregation = false; + priv->cfg->ht_params->use_rts_for_aggregation = false; return count; } diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-dev.h b/trunk/drivers/net/wireless/iwlwifi/iwl-dev.h index e54a4d11e584..6c00a447963d 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-dev.h +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-dev.h @@ -60,10 +60,11 @@ struct iwl_tx_queue; /* Default noise level to report when noise measurement is not available. * This may be because we're: - * 1) Not associated no beacon statistics being sent to driver) + * 1) Not associated (4965, no beacon statistics being sent to driver) * 2) Scanning (noise measurement does not apply to associated channel) + * 3) Receiving CCK (3945 delivers noise info only for OFDM frames) * Use default noise value of -127 ... this is below the range of measurable - * Rx dBm for all agn devices, so it can indicate "unmeasurable" to user. + * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user. * Also, -127 works better than 0 when averaging frames with/without * noise info (e.g. averaging might be done in app); measured dBm values are * always negative ... using a negative value as the default keeps all @@ -189,69 +190,6 @@ struct iwl_qos_info { struct iwl_qosparam_cmd def_qos_parm; }; -/** - * enum iwl_agg_state - * - * The state machine of the BA agreement establishment / tear down. - * These states relate to a specific RA / TID. - * - * @IWL_AGG_OFF: aggregation is not used - * @IWL_AGG_ON: aggregation session is up - * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the - * HW queue to be empty from packets for this RA /TID. - * @IWL_EMPTYING_HW_QUEUE_DELBA: tearing down a BA session - waiting for the - * HW queue to be empty from packets for this RA /TID. - */ -enum iwl_agg_state { - IWL_AGG_OFF = 0, - IWL_AGG_ON, - IWL_EMPTYING_HW_QUEUE_ADDBA, - IWL_EMPTYING_HW_QUEUE_DELBA, -}; - -/** - * struct iwl_ht_agg - aggregation state machine - - * This structs holds the states for the BA agreement establishment and tear - * down. It also holds the state during the BA session itself. This struct is - * duplicated for each RA / TID. - - * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the - * Tx response (REPLY_TX), and the block ack notification - * (REPLY_COMPRESSED_BA). - * @state: state of the BA agreement establishment / tear down. - * @txq_id: Tx queue used by the BA session - used by the transport layer. - * Needed by the upper layer for debugfs only. - * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or - * the first packet to be sent in legacy HW queue in Tx AGG stop flow. - * Basically when next_reclaimed reaches ssn, we can tell mac80211 that - * we are ready to finish the Tx AGG stop / start flow. - * @wait_for_ba: Expect block-ack before next Tx reply - */ -struct iwl_ht_agg { - u32 rate_n_flags; - enum iwl_agg_state state; - u16 txq_id; - u16 ssn; - bool wait_for_ba; -}; - -/** - * struct iwl_tid_data - one for each RA / TID - - * This structs holds the states for each RA / TID. - - * @seq_number: the next WiFi sequence number to use - * @next_reclaimed: the WiFi sequence number of the next packet to be acked. - * This is basically (last acked packet++). - * @agg: aggregation state machine - */ -struct iwl_tid_data { - u16 seq_number; - u16 next_reclaimed; - struct iwl_ht_agg agg; -}; - /* * Structure should be accessed with sta_lock held. When station addition * is in progress (IWL_STA_UCODE_INPROGRESS) it is possible to access only @@ -292,6 +230,17 @@ struct iwl_vif_priv { u8 ibss_bssid_sta_id; }; +/* one for each uCode image (inst/data, boot/init/runtime) */ +struct fw_desc { + void *v_addr; /* access by driver */ + dma_addr_t p_addr; /* access by card's busmaster DMA */ + u32 len; /* bytes */ +}; + +struct fw_img { + struct fw_desc code, data; +}; + /* v1/v2 uCode file layout */ struct iwl_ucode_header { __le32 ver; /* major/minor/API/serial */ @@ -503,6 +452,29 @@ enum iwlagn_chain_noise_state { IWL_CHAIN_NOISE_DONE, }; + +/* + * enum iwl_calib + * defines the order in which results of initial calibrations + * should be sent to the runtime uCode + */ +enum iwl_calib { + IWL_CALIB_XTAL, + IWL_CALIB_DC, + IWL_CALIB_LO, + IWL_CALIB_TX_IQ, + IWL_CALIB_TX_IQ_PERD, + IWL_CALIB_BASE_BAND, + IWL_CALIB_TEMP_OFFSET, + IWL_CALIB_MAX +}; + +/* Opaque calibration results */ +struct iwl_calib_result { + void *buf; + size_t buf_len; +}; + /* Sensitivity calib data */ struct iwl_sensitivity_data { u32 auto_corr_ofdm; @@ -575,6 +547,16 @@ enum iwl_access_mode { IWL_OTP_ACCESS_RELATIVE, }; +/** + * enum iwl_pa_type - Power Amplifier type + * @IWL_PA_SYSTEM: based on uCode configuration + * @IWL_PA_INTERNAL: use Internal only + */ +enum iwl_pa_type { + IWL_PA_SYSTEM = 0, + IWL_PA_INTERNAL = 1, +}; + /* reply_tx_statistics (for _agn devices) */ struct reply_tx_error_statistics { u32 pp_delay; @@ -732,6 +714,35 @@ struct iwl_force_reset { */ #define IWLAGN_EXT_BEACON_TIME_POS 22 +/** + * struct iwl_notification_wait - notification wait entry + * @list: list head for global list + * @fn: function called with the notification + * @cmd: command ID + * + * This structure is not used directly, to wait for a + * notification declare it on the stack, and call + * iwlagn_init_notification_wait() with appropriate + * parameters. Then do whatever will cause the ucode + * to notify the driver, and to wait for that then + * call iwlagn_wait_notification(). + * + * Each notification is one-shot. If at some point we + * need to support multi-shot notifications (which + * can't be allocated on the stack) we need to modify + * the code for them. + */ +struct iwl_notification_wait { + struct list_head list; + + void (*fn)(struct iwl_priv *priv, struct iwl_rx_packet *pkt, + void *data); + void *fn_data; + + u8 cmd; + bool triggered, aborted; +}; + struct iwl_rxon_context { struct ieee80211_vif *vif; @@ -794,7 +805,14 @@ enum iwl_scan_type { IWL_SCAN_ROC, }; -#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE +enum iwlagn_ucode_type { + IWL_UCODE_NONE, + IWL_UCODE_REGULAR, + IWL_UCODE_INIT, + IWL_UCODE_WOWLAN, +}; + +#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL struct iwl_testmode_trace { u32 buff_size; u32 total_size; @@ -804,20 +822,8 @@ struct iwl_testmode_trace { dma_addr_t dma_addr; bool trace_enabled; }; -struct iwl_testmode_sram { - u32 buff_size; - u32 num_chunks; - u8 *buff_addr; - bool sram_readed; -}; #endif -struct iwl_wipan_noa_data { - struct rcu_head rcu_head; - u32 length; - u8 data[]; -}; - struct iwl_priv { /*data shared among all the driver's layers */ @@ -829,6 +835,7 @@ struct iwl_priv { struct ieee80211_channel *ieee_channels; struct ieee80211_rate *ieee_rates; struct kmem_cache *tx_cmd_pool; + struct iwl_cfg *cfg; enum ieee80211_band band; @@ -873,7 +880,8 @@ struct iwl_priv { s32 temperature; /* Celsius */ s32 last_temperature; - struct iwl_wipan_noa_data __rcu *noa_data; + /* init calibration results */ + struct iwl_calib_result calib_results[IWL_CALIB_MAX]; /* Scan related variables */ unsigned long scan_start; @@ -899,12 +907,23 @@ struct iwl_priv { u32 ucode_ver; /* version of ucode, copy of iwl_ucode.ver */ + struct fw_img ucode_rt; + struct fw_img ucode_init; + struct fw_img ucode_wowlan; + + enum iwlagn_ucode_type ucode_type; + u8 ucode_write_complete; /* the image write is complete */ char firmware_name[25]; struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX]; __le16 switch_channel; + struct { + u32 error_event_table; + u32 log_event_table; + } device_pointers; + u16 active_rate; u8 start_calib; @@ -932,13 +951,17 @@ struct iwl_priv { int num_stations; struct iwl_station_entry stations[IWLAGN_STATION_COUNT]; unsigned long ucode_key_table; - struct iwl_tid_data tid_data[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT]; u8 mac80211_registered; /* Indication if ieee80211_ops->open has been called */ u8 is_open; + /* eeprom -- this is in the card's little endian byte order */ + u8 *eeprom; + int nvm_device_type; + struct iwl_eeprom_calib_info *calib_info; + enum nl80211_iftype iw_mode; /* Last Rx'd beacon timestamp */ @@ -994,6 +1017,10 @@ struct iwl_priv { /* counts reply_tx error */ struct reply_tx_error_statistics reply_tx_stats; struct reply_agg_tx_error_statistics reply_agg_tx_stats; + /* notification wait support */ + struct list_head notif_waits; + spinlock_t notif_wait_lock; + wait_queue_head_t notif_waitq; /* remain-on-channel offload support */ struct ieee80211_channel *hw_roc_channel; @@ -1071,9 +1098,8 @@ struct iwl_priv { struct led_classdev led; unsigned long blink_on, blink_off; bool led_registered; -#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE +#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL struct iwl_testmode_trace testmode_trace; - struct iwl_testmode_sram testmode_sram; u32 tm_fixed_rate; #endif diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/trunk/drivers/net/wireless/iwlwifi/iwl-devtrace.c index 2a2c8de64a04..a635a7e75447 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-devtrace.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-devtrace.c @@ -28,7 +28,7 @@ /* sparse doesn't like tracepoint macros */ #ifndef __CHECKER__ -#include "iwl-trans.h" +#include "iwl-dev.h" #define CREATE_TRACE_POINTS #include "iwl-devtrace.h" diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/trunk/drivers/net/wireless/iwlwifi/iwl-devtrace.h index 9b212a8f30bb..8a51c5ccda1e 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-devtrace.h +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-devtrace.h @@ -29,6 +29,7 @@ #include +struct iwl_priv; #if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) || defined(__CHECKER__) #undef TRACE_EVENT @@ -36,14 +37,14 @@ static inline void trace_ ## name(proto) {} #endif -#define PRIV_ENTRY __field(void *, priv) +#define PRIV_ENTRY __field(struct iwl_priv *, priv) #define PRIV_ASSIGN __entry->priv = priv #undef TRACE_SYSTEM #define TRACE_SYSTEM iwlwifi_io TRACE_EVENT(iwlwifi_dev_ioread32, - TP_PROTO(void *priv, u32 offs, u32 val), + TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val), TP_ARGS(priv, offs, val), TP_STRUCT__entry( PRIV_ENTRY @@ -59,7 +60,7 @@ TRACE_EVENT(iwlwifi_dev_ioread32, ); TRACE_EVENT(iwlwifi_dev_iowrite8, - TP_PROTO(void *priv, u32 offs, u8 val), + TP_PROTO(struct iwl_priv *priv, u32 offs, u8 val), TP_ARGS(priv, offs, val), TP_STRUCT__entry( PRIV_ENTRY @@ -75,7 +76,7 @@ TRACE_EVENT(iwlwifi_dev_iowrite8, ); TRACE_EVENT(iwlwifi_dev_iowrite32, - TP_PROTO(void *priv, u32 offs, u32 val), + TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val), TP_ARGS(priv, offs, val), TP_STRUCT__entry( PRIV_ENTRY @@ -90,40 +91,11 @@ TRACE_EVENT(iwlwifi_dev_iowrite32, TP_printk("[%p] write io[%#x] = %#x)", __entry->priv, __entry->offs, __entry->val) ); -TRACE_EVENT(iwlwifi_dev_irq, - TP_PROTO(void *priv), - TP_ARGS(priv), - TP_STRUCT__entry( - PRIV_ENTRY - ), - TP_fast_assign( - PRIV_ASSIGN; - ), - /* TP_printk("") doesn't compile */ - TP_printk("%d", 0) -); - -TRACE_EVENT(iwlwifi_dev_ict_read, - TP_PROTO(void *priv, u32 index, u32 value), - TP_ARGS(priv, index, value), - TP_STRUCT__entry( - PRIV_ENTRY - __field(u32, index) - __field(u32, value) - ), - TP_fast_assign( - PRIV_ASSIGN; - __entry->index = index; - __entry->value = value; - ), - TP_printk("read ict[%d] = %#.8x", __entry->index, __entry->value) -); - #undef TRACE_SYSTEM #define TRACE_SYSTEM iwlwifi_ucode TRACE_EVENT(iwlwifi_dev_ucode_cont_event, - TP_PROTO(void *priv, u32 time, u32 data, u32 ev), + TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev), TP_ARGS(priv, time, data, ev), TP_STRUCT__entry( PRIV_ENTRY @@ -143,7 +115,7 @@ TRACE_EVENT(iwlwifi_dev_ucode_cont_event, ); TRACE_EVENT(iwlwifi_dev_ucode_wrap_event, - TP_PROTO(void *priv, u32 wraps, u32 n_entry, u32 p_entry), + TP_PROTO(struct iwl_priv *priv, u32 wraps, u32 n_entry, u32 p_entry), TP_ARGS(priv, wraps, n_entry, p_entry), TP_STRUCT__entry( PRIV_ENTRY @@ -167,7 +139,7 @@ TRACE_EVENT(iwlwifi_dev_ucode_wrap_event, #define TRACE_SYSTEM iwlwifi TRACE_EVENT(iwlwifi_dev_hcmd, - TP_PROTO(void *priv, u32 flags, + TP_PROTO(struct iwl_priv *priv, u32 flags, const void *hcmd0, size_t len0, const void *hcmd1, size_t len1, const void *hcmd2, size_t len2), @@ -192,7 +164,7 @@ TRACE_EVENT(iwlwifi_dev_hcmd, ); TRACE_EVENT(iwlwifi_dev_rx, - TP_PROTO(void *priv, void *rxbuf, size_t len), + TP_PROTO(struct iwl_priv *priv, void *rxbuf, size_t len), TP_ARGS(priv, rxbuf, len), TP_STRUCT__entry( PRIV_ENTRY @@ -207,7 +179,7 @@ TRACE_EVENT(iwlwifi_dev_rx, ); TRACE_EVENT(iwlwifi_dev_tx, - TP_PROTO(void *priv, void *tfd, size_t tfdlen, + TP_PROTO(struct iwl_priv *priv, void *tfd, size_t tfdlen, void *buf0, size_t buf0_len, void *buf1, size_t buf1_len), TP_ARGS(priv, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len), @@ -239,7 +211,7 @@ TRACE_EVENT(iwlwifi_dev_tx, ); TRACE_EVENT(iwlwifi_dev_ucode_error, - TP_PROTO(void *priv, u32 desc, u32 tsf_low, + TP_PROTO(struct iwl_priv *priv, u32 desc, u32 tsf_low, u32 data1, u32 data2, u32 line, u32 blink1, u32 blink2, u32 ilink1, u32 ilink2, u32 bcon_time, u32 gp1, u32 gp2, u32 gp3, u32 ucode_ver, u32 hw_ver, @@ -299,7 +271,7 @@ TRACE_EVENT(iwlwifi_dev_ucode_error, ); TRACE_EVENT(iwlwifi_dev_ucode_event, - TP_PROTO(void *priv, u32 time, u32 data, u32 ev), + TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev), TP_ARGS(priv, time, data, ev), TP_STRUCT__entry( PRIV_ENTRY diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/trunk/drivers/net/wireless/iwlwifi/iwl-eeprom.c index c1eda9724f42..a4e43bd4a547 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-eeprom.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-eeprom.c @@ -149,23 +149,23 @@ static const u8 iwl_eeprom_band_7[] = { /* 5.2 ht40 channel */ * EEPROM chip, not a single event, so even reads could conflict if they * weren't arbitrated by the semaphore. */ -static int iwl_eeprom_acquire_semaphore(struct iwl_bus *bus) +static int iwl_eeprom_acquire_semaphore(struct iwl_priv *priv) { u16 count; int ret; for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) { /* Request semaphore */ - iwl_set_bit(bus, CSR_HW_IF_CONFIG_REG, + iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); /* See if we got it */ - ret = iwl_poll_bit(bus, CSR_HW_IF_CONFIG_REG, + ret = iwl_poll_bit(bus(priv), CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, EEPROM_SEM_TIMEOUT); if (ret >= 0) { - IWL_DEBUG_EEPROM(bus, + IWL_DEBUG_EEPROM(priv, "Acquired semaphore after %d tries.\n", count+1); return ret; @@ -175,39 +175,39 @@ static int iwl_eeprom_acquire_semaphore(struct iwl_bus *bus) return ret; } -static void iwl_eeprom_release_semaphore(struct iwl_bus *bus) +static void iwl_eeprom_release_semaphore(struct iwl_priv *priv) { - iwl_clear_bit(bus, CSR_HW_IF_CONFIG_REG, + iwl_clear_bit(bus(priv), CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); } -static int iwl_eeprom_verify_signature(struct iwl_trans *trans) +static int iwl_eeprom_verify_signature(struct iwl_priv *priv) { - u32 gp = iwl_read32(bus(trans), CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK; + u32 gp = iwl_read32(bus(priv), CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK; int ret = 0; - IWL_DEBUG_EEPROM(trans, "EEPROM signature=0x%08x\n", gp); + IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp); switch (gp) { case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP: - if (trans->nvm_device_type != NVM_DEVICE_TYPE_OTP) { - IWL_ERR(trans, "EEPROM with bad signature: 0x%08x\n", + if (priv->nvm_device_type != NVM_DEVICE_TYPE_OTP) { + IWL_ERR(priv, "EEPROM with bad signature: 0x%08x\n", gp); ret = -ENOENT; } break; case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K: case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K: - if (trans->nvm_device_type != NVM_DEVICE_TYPE_EEPROM) { - IWL_ERR(trans, "OTP with bad signature: 0x%08x\n", gp); + if (priv->nvm_device_type != NVM_DEVICE_TYPE_EEPROM) { + IWL_ERR(priv, "OTP with bad signature: 0x%08x\n", gp); ret = -ENOENT; } break; case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP: default: - IWL_ERR(trans, "bad EEPROM/OTP signature, type=%s, " + IWL_ERR(priv, "bad EEPROM/OTP signature, type=%s, " "EEPROM_GP=0x%08x\n", - (trans->nvm_device_type == NVM_DEVICE_TYPE_OTP) + (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) ? "OTP" : "EEPROM", gp); ret = -ENOENT; break; @@ -215,11 +215,11 @@ static int iwl_eeprom_verify_signature(struct iwl_trans *trans) return ret; } -u16 iwl_eeprom_query16(const struct iwl_shared *shrd, size_t offset) +u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset) { - if (!shrd->eeprom) + if (!priv->eeprom) return 0; - return (u16)shrd->eeprom[offset] | ((u16)shrd->eeprom[offset + 1] << 8); + return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8); } int iwl_eeprom_check_version(struct iwl_priv *priv) @@ -227,11 +227,11 @@ int iwl_eeprom_check_version(struct iwl_priv *priv) u16 eeprom_ver; u16 calib_ver; - eeprom_ver = iwl_eeprom_query16(priv->shrd, EEPROM_VERSION); - calib_ver = iwl_eeprom_calib_version(priv->shrd); + eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION); + calib_ver = iwlagn_eeprom_calib_version(priv); - if (eeprom_ver < cfg(priv)->eeprom_ver || - calib_ver < cfg(priv)->eeprom_calib_ver) + if (eeprom_ver < priv->cfg->eeprom_ver || + calib_ver < priv->cfg->eeprom_calib_ver) goto err; IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n", @@ -241,46 +241,45 @@ int iwl_eeprom_check_version(struct iwl_priv *priv) err: IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x " "CALIB=0x%x < 0x%x\n", - eeprom_ver, cfg(priv)->eeprom_ver, - calib_ver, cfg(priv)->eeprom_calib_ver); + eeprom_ver, priv->cfg->eeprom_ver, + calib_ver, priv->cfg->eeprom_calib_ver); return -EINVAL; } int iwl_eeprom_check_sku(struct iwl_priv *priv) { - struct iwl_shared *shrd = priv->shrd; u16 radio_cfg; - if (!cfg(priv)->sku) { + if (!priv->cfg->sku) { /* not using sku overwrite */ - cfg(priv)->sku = iwl_eeprom_query16(shrd, EEPROM_SKU_CAP); - if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE && - !cfg(priv)->ht_params) { + priv->cfg->sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP); + if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE && + !priv->cfg->ht_params) { IWL_ERR(priv, "Invalid 11n configuration\n"); return -EINVAL; } } - if (!cfg(priv)->sku) { + if (!priv->cfg->sku) { IWL_ERR(priv, "Invalid device sku\n"); return -EINVAL; } - IWL_INFO(priv, "Device SKU: 0x%X\n", cfg(priv)->sku); + IWL_INFO(priv, "Device SKU: 0X%x\n", priv->cfg->sku); - if (!cfg(priv)->valid_tx_ant && !cfg(priv)->valid_rx_ant) { + if (!priv->cfg->valid_tx_ant && !priv->cfg->valid_rx_ant) { /* not using .cfg overwrite */ - radio_cfg = iwl_eeprom_query16(shrd, EEPROM_RADIO_CONFIG); - cfg(priv)->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg); - cfg(priv)->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg); - if (!cfg(priv)->valid_tx_ant || !cfg(priv)->valid_rx_ant) { - IWL_ERR(priv, "Invalid chain (0x%X, 0x%X)\n", - cfg(priv)->valid_tx_ant, - cfg(priv)->valid_rx_ant); + radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG); + priv->cfg->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg); + priv->cfg->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg); + if (!priv->cfg->valid_tx_ant || !priv->cfg->valid_rx_ant) { + IWL_ERR(priv, "Invalid chain (0X%x, 0X%x)\n", + priv->cfg->valid_tx_ant, + priv->cfg->valid_rx_ant); return -EINVAL; } - IWL_INFO(priv, "Valid Tx ant: 0x%X, Valid Rx ant: 0x%X\n", - cfg(priv)->valid_tx_ant, cfg(priv)->valid_rx_ant); + IWL_INFO(priv, "Valid Tx ant: 0X%x, Valid Rx ant: 0X%x\n", + priv->cfg->valid_tx_ant, priv->cfg->valid_rx_ant); } /* * for some special cases, @@ -290,9 +289,9 @@ int iwl_eeprom_check_sku(struct iwl_priv *priv) return 0; } -void iwl_eeprom_get_mac(const struct iwl_shared *shrd, u8 *mac) +void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac) { - const u8 *addr = iwl_eeprom_query_addr(shrd, + const u8 *addr = iwl_eeprom_query_addr(priv, EEPROM_MAC_ADDRESS); memcpy(mac, addr, ETH_ALEN); } @@ -303,19 +302,19 @@ void iwl_eeprom_get_mac(const struct iwl_shared *shrd, u8 *mac) * ******************************************************************************/ -static void iwl_set_otp_access(struct iwl_bus *bus, enum iwl_access_mode mode) +static void iwl_set_otp_access(struct iwl_priv *priv, enum iwl_access_mode mode) { - iwl_read32(bus, CSR_OTP_GP_REG); + iwl_read32(bus(priv), CSR_OTP_GP_REG); if (mode == IWL_OTP_ACCESS_ABSOLUTE) - iwl_clear_bit(bus, CSR_OTP_GP_REG, + iwl_clear_bit(bus(priv), CSR_OTP_GP_REG, CSR_OTP_GP_REG_OTP_ACCESS_MODE); else - iwl_set_bit(bus, CSR_OTP_GP_REG, + iwl_set_bit(bus(priv), CSR_OTP_GP_REG, CSR_OTP_GP_REG_OTP_ACCESS_MODE); } -static int iwl_get_nvm_type(struct iwl_bus *bus, u32 hw_rev) +static int iwl_get_nvm_type(struct iwl_priv *priv, u32 hw_rev) { u32 otpgp; int nvm_type; @@ -323,7 +322,7 @@ static int iwl_get_nvm_type(struct iwl_bus *bus, u32 hw_rev) /* OTP only valid for CP/PP and after */ switch (hw_rev & CSR_HW_REV_TYPE_MSK) { case CSR_HW_REV_TYPE_NONE: - IWL_ERR(bus, "Unknown hardware type\n"); + IWL_ERR(priv, "Unknown hardware type\n"); return -ENOENT; case CSR_HW_REV_TYPE_5300: case CSR_HW_REV_TYPE_5350: @@ -332,7 +331,7 @@ static int iwl_get_nvm_type(struct iwl_bus *bus, u32 hw_rev) nvm_type = NVM_DEVICE_TYPE_EEPROM; break; default: - otpgp = iwl_read32(bus, CSR_OTP_GP_REG); + otpgp = iwl_read32(bus(priv), CSR_OTP_GP_REG); if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT) nvm_type = NVM_DEVICE_TYPE_OTP; else @@ -342,73 +341,73 @@ static int iwl_get_nvm_type(struct iwl_bus *bus, u32 hw_rev) return nvm_type; } -static int iwl_init_otp_access(struct iwl_bus *bus) +static int iwl_init_otp_access(struct iwl_priv *priv) { int ret; /* Enable 40MHz radio clock */ - iwl_write32(bus, CSR_GP_CNTRL, - iwl_read32(bus, CSR_GP_CNTRL) | + iwl_write32(bus(priv), CSR_GP_CNTRL, + iwl_read32(bus(priv), CSR_GP_CNTRL) | CSR_GP_CNTRL_REG_FLAG_INIT_DONE); /* wait for clock to be ready */ - ret = iwl_poll_bit(bus, CSR_GP_CNTRL, + ret = iwl_poll_bit(bus(priv), CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); if (ret < 0) - IWL_ERR(bus, "Time out access OTP\n"); + IWL_ERR(priv, "Time out access OTP\n"); else { - iwl_set_bits_prph(bus, APMG_PS_CTRL_REG, + iwl_set_bits_prph(bus(priv), APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ); udelay(5); - iwl_clear_bits_prph(bus, APMG_PS_CTRL_REG, + iwl_clear_bits_prph(bus(priv), APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ); /* * CSR auto clock gate disable bit - * this is only applicable for HW with OTP shadow RAM */ - if (cfg(bus)->base_params->shadow_ram_support) - iwl_set_bit(bus, CSR_DBG_LINK_PWR_MGMT_REG, + if (priv->cfg->base_params->shadow_ram_support) + iwl_set_bit(bus(priv), CSR_DBG_LINK_PWR_MGMT_REG, CSR_RESET_LINK_PWR_MGMT_DISABLED); } return ret; } -static int iwl_read_otp_word(struct iwl_bus *bus, u16 addr, __le16 *eeprom_data) +static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_data) { int ret = 0; u32 r; u32 otpgp; - iwl_write32(bus, CSR_EEPROM_REG, + iwl_write32(bus(priv), CSR_EEPROM_REG, CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); - ret = iwl_poll_bit(bus, CSR_EEPROM_REG, + ret = iwl_poll_bit(bus(priv), CSR_EEPROM_REG, CSR_EEPROM_REG_READ_VALID_MSK, CSR_EEPROM_REG_READ_VALID_MSK, IWL_EEPROM_ACCESS_TIMEOUT); if (ret < 0) { - IWL_ERR(bus, "Time out reading OTP[%d]\n", addr); + IWL_ERR(priv, "Time out reading OTP[%d]\n", addr); return ret; } - r = iwl_read32(bus, CSR_EEPROM_REG); + r = iwl_read32(bus(priv), CSR_EEPROM_REG); /* check for ECC errors: */ - otpgp = iwl_read32(bus, CSR_OTP_GP_REG); + otpgp = iwl_read32(bus(priv), CSR_OTP_GP_REG); if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) { /* stop in this case */ /* set the uncorrectable OTP ECC bit for acknowledgement */ - iwl_set_bit(bus, CSR_OTP_GP_REG, + iwl_set_bit(bus(priv), CSR_OTP_GP_REG, CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK); - IWL_ERR(bus, "Uncorrectable OTP ECC error, abort OTP read\n"); + IWL_ERR(priv, "Uncorrectable OTP ECC error, abort OTP read\n"); return -EINVAL; } if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) { /* continue in this case */ /* set the correctable OTP ECC bit for acknowledgement */ - iwl_set_bit(bus, CSR_OTP_GP_REG, + iwl_set_bit(bus(priv), CSR_OTP_GP_REG, CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK); - IWL_ERR(bus, "Correctable OTP ECC error, continue read\n"); + IWL_ERR(priv, "Correctable OTP ECC error, continue read\n"); } *eeprom_data = cpu_to_le16(r >> 16); return 0; @@ -417,20 +416,20 @@ static int iwl_read_otp_word(struct iwl_bus *bus, u16 addr, __le16 *eeprom_data) /* * iwl_is_otp_empty: check for empty OTP */ -static bool iwl_is_otp_empty(struct iwl_bus *bus) +static bool iwl_is_otp_empty(struct iwl_priv *priv) { u16 next_link_addr = 0; __le16 link_value; bool is_empty = false; /* locate the beginning of OTP link list */ - if (!iwl_read_otp_word(bus, next_link_addr, &link_value)) { + if (!iwl_read_otp_word(priv, next_link_addr, &link_value)) { if (!link_value) { - IWL_ERR(bus, "OTP is empty\n"); + IWL_ERR(priv, "OTP is empty\n"); is_empty = true; } } else { - IWL_ERR(bus, "Unable to read first block of OTP list.\n"); + IWL_ERR(priv, "Unable to read first block of OTP list.\n"); is_empty = true; } @@ -447,7 +446,7 @@ static bool iwl_is_otp_empty(struct iwl_bus *bus) * we should read and used to configure the device. * only perform this operation if shadow RAM is disabled */ -static int iwl_find_otp_image(struct iwl_bus *bus, +static int iwl_find_otp_image(struct iwl_priv *priv, u16 *validblockaddr) { u16 next_link_addr = 0, valid_addr; @@ -455,10 +454,10 @@ static int iwl_find_otp_image(struct iwl_bus *bus, int usedblocks = 0; /* set addressing mode to absolute to traverse the link list */ - iwl_set_otp_access(bus, IWL_OTP_ACCESS_ABSOLUTE); + iwl_set_otp_access(priv, IWL_OTP_ACCESS_ABSOLUTE); /* checking for empty OTP or error */ - if (iwl_is_otp_empty(bus)) + if (iwl_is_otp_empty(priv)) return -EINVAL; /* @@ -472,9 +471,9 @@ static int iwl_find_otp_image(struct iwl_bus *bus, */ valid_addr = next_link_addr; next_link_addr = le16_to_cpu(link_value) * sizeof(u16); - IWL_DEBUG_EEPROM(bus, "OTP blocks %d addr 0x%x\n", + IWL_DEBUG_EEPROM(priv, "OTP blocks %d addr 0x%x\n", usedblocks, next_link_addr); - if (iwl_read_otp_word(bus, next_link_addr, &link_value)) + if (iwl_read_otp_word(priv, next_link_addr, &link_value)) return -EINVAL; if (!link_value) { /* @@ -489,10 +488,10 @@ static int iwl_find_otp_image(struct iwl_bus *bus, } /* more in the link list, continue */ usedblocks++; - } while (usedblocks <= cfg(bus)->base_params->max_ll_items); + } while (usedblocks <= priv->cfg->base_params->max_ll_items); /* OTP has no valid blocks */ - IWL_DEBUG_EEPROM(bus, "OTP has no valid blocks\n"); + IWL_DEBUG_EEPROM(priv, "OTP has no valid blocks\n"); return -EINVAL; } @@ -505,28 +504,28 @@ static int iwl_find_otp_image(struct iwl_bus *bus, * iwl_get_max_txpower_avg - get the highest tx power from all chains. * find the highest tx power from all chains for the channel */ -static s8 iwl_get_max_txpower_avg(struct iwl_cfg *cfg, +static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv, struct iwl_eeprom_enhanced_txpwr *enhanced_txpower, int element, s8 *max_txpower_in_half_dbm) { s8 max_txpower_avg = 0; /* (dBm) */ /* Take the highest tx power from any valid chains */ - if ((cfg->valid_tx_ant & ANT_A) && + if ((priv->cfg->valid_tx_ant & ANT_A) && (enhanced_txpower[element].chain_a_max > max_txpower_avg)) max_txpower_avg = enhanced_txpower[element].chain_a_max; - if ((cfg->valid_tx_ant & ANT_B) && + if ((priv->cfg->valid_tx_ant & ANT_B) && (enhanced_txpower[element].chain_b_max > max_txpower_avg)) max_txpower_avg = enhanced_txpower[element].chain_b_max; - if ((cfg->valid_tx_ant & ANT_C) && + if ((priv->cfg->valid_tx_ant & ANT_C) && (enhanced_txpower[element].chain_c_max > max_txpower_avg)) max_txpower_avg = enhanced_txpower[element].chain_c_max; - if (((cfg->valid_tx_ant == ANT_AB) | - (cfg->valid_tx_ant == ANT_BC) | - (cfg->valid_tx_ant == ANT_AC)) && + if (((priv->cfg->valid_tx_ant == ANT_AB) | + (priv->cfg->valid_tx_ant == ANT_BC) | + (priv->cfg->valid_tx_ant == ANT_AC)) && (enhanced_txpower[element].mimo2_max > max_txpower_avg)) max_txpower_avg = enhanced_txpower[element].mimo2_max; - if ((cfg->valid_tx_ant == ANT_ABC) && + if ((priv->cfg->valid_tx_ant == ANT_ABC) && (enhanced_txpower[element].mimo3_max > max_txpower_avg)) max_txpower_avg = enhanced_txpower[element].mimo3_max; @@ -583,7 +582,6 @@ iwl_eeprom_enh_txp_read_element(struct iwl_priv *priv, void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv) { - struct iwl_shared *shrd = priv->shrd; struct iwl_eeprom_enhanced_txpwr *txp_array, *txp; int idx, entries; __le16 *txp_len; @@ -592,10 +590,10 @@ void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv) BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8); /* the length is in 16-bit words, but we want entries */ - txp_len = (__le16 *) iwl_eeprom_query_addr(shrd, EEPROM_TXP_SZ_OFFS); + txp_len = (__le16 *) iwl_eeprom_query_addr(priv, EEPROM_TXP_SZ_OFFS); entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN; - txp_array = (void *) iwl_eeprom_query_addr(shrd, EEPROM_TXP_OFFS); + txp_array = (void *) iwl_eeprom_query_addr(priv, EEPROM_TXP_OFFS); for (idx = 0; idx < entries; idx++) { txp = &txp_array[idx]; @@ -629,7 +627,7 @@ void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv) ((txp->delta_20_in_40 & 0xf0) >> 4), (txp->delta_20_in_40 & 0x0f)); - max_txp_avg = iwl_get_max_txpower_avg(cfg(priv), txp_array, idx, + max_txp_avg = iwl_get_max_txpower_avg(priv, txp_array, idx, &max_txp_avg_halfdbm); /* @@ -648,13 +646,12 @@ void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv) /** * iwl_eeprom_init - read EEPROM contents * - * Load the EEPROM contents from adapter into shrd->eeprom + * Load the EEPROM contents from adapter into priv->eeprom * * NOTE: This routine uses the non-debug IO access functions. */ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev) { - struct iwl_shared *shrd = priv->shrd; __le16 *e; u32 gp = iwl_read32(bus(priv), CSR_EEPROM_GP); int sz; @@ -663,22 +660,22 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev) u16 validblockaddr = 0; u16 cache_addr = 0; - trans(priv)->nvm_device_type = iwl_get_nvm_type(bus(priv), hw_rev); - if (trans(priv)->nvm_device_type == -ENOENT) + priv->nvm_device_type = iwl_get_nvm_type(priv, hw_rev); + if (priv->nvm_device_type == -ENOENT) return -ENOENT; /* allocate eeprom */ - sz = cfg(priv)->base_params->eeprom_size; + sz = priv->cfg->base_params->eeprom_size; IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz); - shrd->eeprom = kzalloc(sz, GFP_KERNEL); - if (!shrd->eeprom) { + priv->eeprom = kzalloc(sz, GFP_KERNEL); + if (!priv->eeprom) { ret = -ENOMEM; goto alloc_err; } - e = (__le16 *)shrd->eeprom; + e = (__le16 *)priv->eeprom; iwl_apm_init(priv); - ret = iwl_eeprom_verify_signature(trans(priv)); + ret = iwl_eeprom_verify_signature(priv); if (ret < 0) { IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp); ret = -ENOENT; @@ -686,16 +683,16 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev) } /* Make sure driver (instead of uCode) is allowed to read EEPROM */ - ret = iwl_eeprom_acquire_semaphore(bus(priv)); + ret = iwl_eeprom_acquire_semaphore(priv); if (ret < 0) { IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n"); ret = -ENOENT; goto err; } - if (trans(priv)->nvm_device_type == NVM_DEVICE_TYPE_OTP) { + if (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) { - ret = iwl_init_otp_access(bus(priv)); + ret = iwl_init_otp_access(priv); if (ret) { IWL_ERR(priv, "Failed to initialize OTP access.\n"); ret = -ENOENT; @@ -709,8 +706,8 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev) CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK | CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK); /* traversing the linked list if no shadow ram supported */ - if (!cfg(priv)->base_params->shadow_ram_support) { - if (iwl_find_otp_image(bus(priv), &validblockaddr)) { + if (!priv->cfg->base_params->shadow_ram_support) { + if (iwl_find_otp_image(priv, &validblockaddr)) { ret = -ENOENT; goto done; } @@ -719,7 +716,7 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev) addr += sizeof(u16)) { __le16 eeprom_data; - ret = iwl_read_otp_word(bus(priv), addr, &eeprom_data); + ret = iwl_read_otp_word(priv, addr, &eeprom_data); if (ret) goto done; e[cache_addr / 2] = eeprom_data; @@ -747,27 +744,27 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev) } IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n", - (trans(priv)->nvm_device_type == NVM_DEVICE_TYPE_OTP) + (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) ? "OTP" : "EEPROM", - iwl_eeprom_query16(shrd, EEPROM_VERSION)); + iwl_eeprom_query16(priv, EEPROM_VERSION)); ret = 0; done: - iwl_eeprom_release_semaphore(bus(priv)); + iwl_eeprom_release_semaphore(priv); err: if (ret) - iwl_eeprom_free(priv->shrd); + iwl_eeprom_free(priv); /* Reset chip to save power until we load uCode during "up". */ iwl_apm_stop(priv); alloc_err: return ret; } -void iwl_eeprom_free(struct iwl_shared *shrd) +void iwl_eeprom_free(struct iwl_priv *priv) { - kfree(shrd->eeprom); - shrd->eeprom = NULL; + kfree(priv->eeprom); + priv->eeprom = NULL; } static void iwl_init_band_reference(const struct iwl_priv *priv, @@ -775,50 +772,49 @@ static void iwl_init_band_reference(const struct iwl_priv *priv, const struct iwl_eeprom_channel **eeprom_ch_info, const u8 **eeprom_ch_index) { - struct iwl_shared *shrd = priv->shrd; - u32 offset = cfg(priv)->lib-> + u32 offset = priv->cfg->lib-> eeprom_ops.regulatory_bands[eep_band - 1]; switch (eep_band) { case 1: /* 2.4GHz band */ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1); *eeprom_ch_info = (struct iwl_eeprom_channel *) - iwl_eeprom_query_addr(shrd, offset); + iwl_eeprom_query_addr(priv, offset); *eeprom_ch_index = iwl_eeprom_band_1; break; case 2: /* 4.9GHz band */ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2); *eeprom_ch_info = (struct iwl_eeprom_channel *) - iwl_eeprom_query_addr(shrd, offset); + iwl_eeprom_query_addr(priv, offset); *eeprom_ch_index = iwl_eeprom_band_2; break; case 3: /* 5.2GHz band */ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3); *eeprom_ch_info = (struct iwl_eeprom_channel *) - iwl_eeprom_query_addr(shrd, offset); + iwl_eeprom_query_addr(priv, offset); *eeprom_ch_index = iwl_eeprom_band_3; break; case 4: /* 5.5GHz band */ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4); *eeprom_ch_info = (struct iwl_eeprom_channel *) - iwl_eeprom_query_addr(shrd, offset); + iwl_eeprom_query_addr(priv, offset); *eeprom_ch_index = iwl_eeprom_band_4; break; case 5: /* 5.7GHz band */ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5); *eeprom_ch_info = (struct iwl_eeprom_channel *) - iwl_eeprom_query_addr(shrd, offset); + iwl_eeprom_query_addr(priv, offset); *eeprom_ch_index = iwl_eeprom_band_5; break; case 6: /* 2.4GHz ht40 channels */ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6); *eeprom_ch_info = (struct iwl_eeprom_channel *) - iwl_eeprom_query_addr(shrd, offset); + iwl_eeprom_query_addr(priv, offset); *eeprom_ch_index = iwl_eeprom_band_6; break; case 7: /* 5 GHz ht40 channels */ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7); *eeprom_ch_info = (struct iwl_eeprom_channel *) - iwl_eeprom_query_addr(shrd, offset); + iwl_eeprom_query_addr(priv, offset); *eeprom_ch_index = iwl_eeprom_band_7; break; default: @@ -983,9 +979,9 @@ int iwl_init_channel_map(struct iwl_priv *priv) } /* Check if we do have HT40 channels */ - if (cfg(priv)->lib->eeprom_ops.regulatory_bands[5] == + if (priv->cfg->lib->eeprom_ops.regulatory_bands[5] == EEPROM_REGULATORY_BAND_NO_HT40 && - cfg(priv)->lib->eeprom_ops.regulatory_bands[6] == + priv->cfg->lib->eeprom_ops.regulatory_bands[6] == EEPROM_REGULATORY_BAND_NO_HT40) return 0; @@ -1021,8 +1017,8 @@ int iwl_init_channel_map(struct iwl_priv *priv) * driver need to process addition information * to determine the max channel tx power limits */ - if (cfg(priv)->lib->eeprom_ops.update_enhanced_txpower) - cfg(priv)->lib->eeprom_ops.update_enhanced_txpower(priv); + if (priv->cfg->lib->eeprom_ops.update_enhanced_txpower) + priv->cfg->lib->eeprom_ops.update_enhanced_txpower(priv); return 0; } @@ -1068,7 +1064,7 @@ void iwl_rf_config(struct iwl_priv *priv) { u16 radio_cfg; - radio_cfg = iwl_eeprom_query16(priv->shrd, EEPROM_RADIO_CONFIG); + radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG); /* write radio config values to register */ if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) { diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/trunk/drivers/net/wireless/iwlwifi/iwl-eeprom.h index 9fa937ec35e3..c94747e7299e 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-eeprom.h +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-eeprom.h @@ -66,7 +66,6 @@ #include struct iwl_priv; -struct iwl_shared; /* * EEPROM access time values: @@ -306,11 +305,11 @@ struct iwl_eeprom_ops { int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev); -void iwl_eeprom_free(struct iwl_shared *shrd); +void iwl_eeprom_free(struct iwl_priv *priv); int iwl_eeprom_check_version(struct iwl_priv *priv); int iwl_eeprom_check_sku(struct iwl_priv *priv); -const u8 *iwl_eeprom_query_addr(const struct iwl_shared *shrd, size_t offset); -u16 iwl_eeprom_query16(const struct iwl_shared *shrd, size_t offset); +const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset); +u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset); int iwl_init_channel_map(struct iwl_priv *priv); void iwl_free_channel_map(struct iwl_priv *priv); const struct iwl_channel_info *iwl_get_channel_info( diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-io.c b/trunk/drivers/net/wireless/iwlwifi/iwl-io.c index d57ea6484bbe..3ffa8e62b856 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-io.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-io.c @@ -143,7 +143,7 @@ u32 iwl_read_direct32(struct iwl_bus *bus, u32 reg) spin_lock_irqsave(&bus->reg_lock, flags); iwl_grab_nic_access(bus); - value = iwl_read32(bus, reg); + value = iwl_read32(bus(bus), reg); iwl_release_nic_access(bus); spin_unlock_irqrestore(&bus->reg_lock, flags); @@ -283,29 +283,16 @@ u32 iwl_read_targ_mem(struct iwl_bus *bus, u32 addr) return value; } -int _iwl_write_targ_mem_words(struct iwl_bus *bus, u32 addr, - void *buf, int words) +void iwl_write_targ_mem(struct iwl_bus *bus, u32 addr, u32 val) { unsigned long flags; - int offs, result = 0; - u32 *vals = buf; spin_lock_irqsave(&bus->reg_lock, flags); if (!iwl_grab_nic_access(bus)) { iwl_write32(bus, HBUS_TARG_MEM_WADDR, addr); wmb(); - - for (offs = 0; offs < words; offs++) - iwl_write32(bus, HBUS_TARG_MEM_WDAT, vals[offs]); + iwl_write32(bus, HBUS_TARG_MEM_WDAT, val); iwl_release_nic_access(bus); - } else - result = -EBUSY; + } spin_unlock_irqrestore(&bus->reg_lock, flags); - - return result; -} - -int iwl_write_targ_mem(struct iwl_bus *bus, u32 addr, u32 val) -{ - return _iwl_write_targ_mem_words(bus, addr, &val, 1); } diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-io.h b/trunk/drivers/net/wireless/iwlwifi/iwl-io.h index aae2eeb331a8..ced2cbeb6eae 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-io.h +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-io.h @@ -85,9 +85,6 @@ void _iwl_read_targ_mem_words(struct iwl_bus *bus, u32 addr, (bufsize) / sizeof(u32));\ } while (0) -int _iwl_write_targ_mem_words(struct iwl_bus *bus, u32 addr, - void *buf, int words); - u32 iwl_read_targ_mem(struct iwl_bus *bus, u32 addr); -int iwl_write_targ_mem(struct iwl_bus *bus, u32 addr, u32 val); +void iwl_write_targ_mem(struct iwl_bus *bus, u32 addr, u32 val); #endif diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-led.c b/trunk/drivers/net/wireless/iwlwifi/iwl-led.c index 14dcbfcdc0fd..eb541735296c 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-led.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-led.c @@ -137,11 +137,11 @@ static int iwl_led_cmd(struct iwl_priv *priv, } IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n", - cfg(priv)->base_params->led_compensation); + priv->cfg->base_params->led_compensation); led_cmd.on = iwl_blink_compensation(priv, on, - cfg(priv)->base_params->led_compensation); + priv->cfg->base_params->led_compensation); led_cmd.off = iwl_blink_compensation(priv, off, - cfg(priv)->base_params->led_compensation); + priv->cfg->base_params->led_compensation); ret = iwl_send_led_cmd(priv, &led_cmd); if (!ret) { @@ -178,7 +178,7 @@ void iwl_leds_init(struct iwl_priv *priv) int ret; if (mode == IWL_LED_DEFAULT) - mode = cfg(priv)->led_mode; + mode = priv->cfg->led_mode; priv->led.name = kasprintf(GFP_KERNEL, "%s-led", wiphy_name(priv->hw->wiphy)); diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-led.h b/trunk/drivers/net/wireless/iwlwifi/iwl-led.h index 2550b3c7dcbf..1c93dfef6933 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-led.h +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-led.h @@ -36,6 +36,20 @@ struct iwl_priv; #define IWL_LED_ACTIVITY (0<<1) #define IWL_LED_LINK (1<<1) +/* + * LED mode + * IWL_LED_DEFAULT: use device default + * IWL_LED_RF_STATE: turn LED on/off based on RF state + * LED ON = RF ON + * LED OFF = RF OFF + * IWL_LED_BLINK: adjust led blink rate based on blink table + */ +enum iwl_led_mode { + IWL_LED_DEFAULT, + IWL_LED_RF_STATE, + IWL_LED_BLINK, +}; + void iwlagn_led_enable(struct iwl_priv *priv); void iwl_leds_init(struct iwl_priv *priv); void iwl_leds_exit(struct iwl_priv *priv); diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-mac80211.c b/trunk/drivers/net/wireless/iwlwifi/iwl-mac80211.c deleted file mode 100644 index f980e574e1f9..000000000000 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-mac80211.c +++ /dev/null @@ -1,1601 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. - * - * Portions of this file are derived from the ipw3945 project, as well - * as portions of the ieee80211 subsystem header files. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include - -#include "iwl-eeprom.h" -#include "iwl-wifi.h" -#include "iwl-dev.h" -#include "iwl-core.h" -#include "iwl-io.h" -#include "iwl-agn-calib.h" -#include "iwl-agn.h" -#include "iwl-shared.h" -#include "iwl-bus.h" -#include "iwl-trans.h" - -/***************************************************************************** - * - * mac80211 entry point functions - * - *****************************************************************************/ - -static const struct ieee80211_iface_limit iwlagn_sta_ap_limits[] = { - { - .max = 1, - .types = BIT(NL80211_IFTYPE_STATION), - }, - { - .max = 1, - .types = BIT(NL80211_IFTYPE_AP), - }, -}; - -static const struct ieee80211_iface_limit iwlagn_2sta_limits[] = { - { - .max = 2, - .types = BIT(NL80211_IFTYPE_STATION), - }, -}; - -static const struct ieee80211_iface_limit iwlagn_p2p_sta_go_limits[] = { - { - .max = 1, - .types = BIT(NL80211_IFTYPE_STATION), - }, - { - .max = 1, - .types = BIT(NL80211_IFTYPE_P2P_GO) | - BIT(NL80211_IFTYPE_AP), - }, -}; - -static const struct ieee80211_iface_limit iwlagn_p2p_2sta_limits[] = { - { - .max = 2, - .types = BIT(NL80211_IFTYPE_STATION), - }, - { - .max = 1, - .types = BIT(NL80211_IFTYPE_P2P_CLIENT), - }, -}; - -static const struct ieee80211_iface_combination -iwlagn_iface_combinations_dualmode[] = { - { .num_different_channels = 1, - .max_interfaces = 2, - .beacon_int_infra_match = true, - .limits = iwlagn_sta_ap_limits, - .n_limits = ARRAY_SIZE(iwlagn_sta_ap_limits), - }, - { .num_different_channels = 1, - .max_interfaces = 2, - .limits = iwlagn_2sta_limits, - .n_limits = ARRAY_SIZE(iwlagn_2sta_limits), - }, -}; - -static const struct ieee80211_iface_combination -iwlagn_iface_combinations_p2p[] = { - { .num_different_channels = 1, - .max_interfaces = 2, - .beacon_int_infra_match = true, - .limits = iwlagn_p2p_sta_go_limits, - .n_limits = ARRAY_SIZE(iwlagn_p2p_sta_go_limits), - }, - { .num_different_channels = 1, - .max_interfaces = 2, - .limits = iwlagn_p2p_2sta_limits, - .n_limits = ARRAY_SIZE(iwlagn_p2p_2sta_limits), - }, -}; - -/* - * Not a mac80211 entry point function, but it fits in with all the - * other mac80211 functions grouped here. - */ -int iwlagn_mac_setup_register(struct iwl_priv *priv, - struct iwlagn_ucode_capabilities *capa) -{ - int ret; - struct ieee80211_hw *hw = priv->hw; - struct iwl_rxon_context *ctx; - - hw->rate_control_algorithm = "iwl-agn-rs"; - - /* Tell mac80211 our characteristics */ - hw->flags = IEEE80211_HW_SIGNAL_DBM | - IEEE80211_HW_AMPDU_AGGREGATION | - IEEE80211_HW_NEED_DTIM_PERIOD | - IEEE80211_HW_SPECTRUM_MGMT | - IEEE80211_HW_REPORTS_TX_ACK_STATUS; - - /* - * Including the following line will crash some AP's. This - * workaround removes the stimulus which causes the crash until - * the AP software can be fixed. - hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF; - */ - - hw->flags |= IEEE80211_HW_SUPPORTS_PS | - IEEE80211_HW_SUPPORTS_DYNAMIC_PS; - - if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE) - hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | - IEEE80211_HW_SUPPORTS_STATIC_SMPS; - - if (capa->flags & IWL_UCODE_TLV_FLAGS_MFP) - hw->flags |= IEEE80211_HW_MFP_CAPABLE; - - hw->sta_data_size = sizeof(struct iwl_station_priv); - hw->vif_data_size = sizeof(struct iwl_vif_priv); - - for_each_context(priv, ctx) { - hw->wiphy->interface_modes |= ctx->interface_modes; - hw->wiphy->interface_modes |= ctx->exclusive_interface_modes; - } - - BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); - - if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)) { - hw->wiphy->iface_combinations = iwlagn_iface_combinations_p2p; - hw->wiphy->n_iface_combinations = - ARRAY_SIZE(iwlagn_iface_combinations_p2p); - } else if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) { - hw->wiphy->iface_combinations = - iwlagn_iface_combinations_dualmode; - hw->wiphy->n_iface_combinations = - ARRAY_SIZE(iwlagn_iface_combinations_dualmode); - } - - hw->wiphy->max_remain_on_channel_duration = 1000; - - hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | - WIPHY_FLAG_DISABLE_BEACON_HINTS | - WIPHY_FLAG_IBSS_RSN; - - if (trans(priv)->ucode_wowlan.code.len && - device_can_wakeup(bus(priv)->dev)) { - hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | - WIPHY_WOWLAN_DISCONNECT | - WIPHY_WOWLAN_EAP_IDENTITY_REQ | - WIPHY_WOWLAN_RFKILL_RELEASE; - if (!iwlagn_mod_params.sw_crypto) - hw->wiphy->wowlan.flags |= - WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | - WIPHY_WOWLAN_GTK_REKEY_FAILURE; - - hw->wiphy->wowlan.n_patterns = IWLAGN_WOWLAN_MAX_PATTERNS; - hw->wiphy->wowlan.pattern_min_len = - IWLAGN_WOWLAN_MIN_PATTERN_LEN; - hw->wiphy->wowlan.pattern_max_len = - IWLAGN_WOWLAN_MAX_PATTERN_LEN; - } - - if (iwlagn_mod_params.power_save) - hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; - else - hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; - - hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; - /* we create the 802.11 header and a zero-length SSID element */ - hw->wiphy->max_scan_ie_len = capa->max_probe_length - 24 - 2; - - /* Default value; 4 EDCA QOS priorities */ - hw->queues = 4; - - hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; - - if (priv->bands[IEEE80211_BAND_2GHZ].n_channels) - priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = - &priv->bands[IEEE80211_BAND_2GHZ]; - if (priv->bands[IEEE80211_BAND_5GHZ].n_channels) - priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = - &priv->bands[IEEE80211_BAND_5GHZ]; - - hw->wiphy->hw_version = bus_get_hw_id(bus(priv)); - - iwl_leds_init(priv); - - ret = ieee80211_register_hw(priv->hw); - if (ret) { - IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); - return ret; - } - priv->mac80211_registered = 1; - - return 0; -} - -void iwlagn_mac_unregister(struct iwl_priv *priv) -{ - if (!priv->mac80211_registered) - return; - iwl_leds_exit(priv); - ieee80211_unregister_hw(priv->hw); - priv->mac80211_registered = 0; -} - -static int __iwl_up(struct iwl_priv *priv) -{ - struct iwl_rxon_context *ctx; - int ret; - - lockdep_assert_held(&priv->shrd->mutex); - - if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) { - IWL_WARN(priv, "Exit pending; will not bring the NIC up\n"); - return -EIO; - } - - for_each_context(priv, ctx) { - ret = iwlagn_alloc_bcast_station(priv, ctx); - if (ret) { - iwl_dealloc_bcast_stations(priv); - return ret; - } - } - - ret = iwl_run_init_ucode(trans(priv)); - if (ret) { - IWL_ERR(priv, "Failed to run INIT ucode: %d\n", ret); - goto error; - } - - ret = iwl_load_ucode_wait_alive(trans(priv), IWL_UCODE_REGULAR); - if (ret) { - IWL_ERR(priv, "Failed to start RT ucode: %d\n", ret); - goto error; - } - - ret = iwl_alive_start(priv); - if (ret) - goto error; - return 0; - - error: - set_bit(STATUS_EXIT_PENDING, &priv->shrd->status); - __iwl_down(priv); - clear_bit(STATUS_EXIT_PENDING, &priv->shrd->status); - - IWL_ERR(priv, "Unable to initialize device.\n"); - return ret; -} - -static int iwlagn_mac_start(struct ieee80211_hw *hw) -{ - struct iwl_priv *priv = hw->priv; - int ret; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - /* we should be verifying the device is ready to be opened */ - mutex_lock(&priv->shrd->mutex); - ret = __iwl_up(priv); - mutex_unlock(&priv->shrd->mutex); - if (ret) - return ret; - - IWL_DEBUG_INFO(priv, "Start UP work done.\n"); - - /* Now we should be done, and the READY bit should be set. */ - if (WARN_ON(!test_bit(STATUS_READY, &priv->shrd->status))) - ret = -EIO; - - iwlagn_led_enable(priv); - - priv->is_open = 1; - IWL_DEBUG_MAC80211(priv, "leave\n"); - return 0; -} - -static void iwlagn_mac_stop(struct ieee80211_hw *hw) -{ - struct iwl_priv *priv = hw->priv; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - if (!priv->is_open) - return; - - priv->is_open = 0; - - iwl_down(priv); - - flush_workqueue(priv->shrd->workqueue); - - /* User space software may expect getting rfkill changes - * even if interface is down */ - iwl_write32(bus(priv), CSR_INT, 0xFFFFFFFF); - iwl_enable_rfkill_int(priv); - - IWL_DEBUG_MAC80211(priv, "leave\n"); -} - -static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct cfg80211_gtk_rekey_data *data) -{ - struct iwl_priv *priv = hw->priv; - - if (iwlagn_mod_params.sw_crypto) - return; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - mutex_lock(&priv->shrd->mutex); - - if (priv->contexts[IWL_RXON_CTX_BSS].vif != vif) - goto out; - - memcpy(priv->kek, data->kek, NL80211_KEK_LEN); - memcpy(priv->kck, data->kck, NL80211_KCK_LEN); - priv->replay_ctr = - cpu_to_le64(be64_to_cpup((__be64 *)&data->replay_ctr)); - priv->have_rekey_data = true; - - out: - mutex_unlock(&priv->shrd->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); -} - -#ifdef CONFIG_PM_SLEEP - -static int iwlagn_mac_suspend(struct ieee80211_hw *hw, - struct cfg80211_wowlan *wowlan) -{ - struct iwl_priv *priv = hw->priv; - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - int ret; - - if (WARN_ON(!wowlan)) - return -EINVAL; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - mutex_lock(&priv->shrd->mutex); - - /* Don't attempt WoWLAN when not associated, tear down instead. */ - if (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION || - !iwl_is_associated_ctx(ctx)) { - ret = 1; - goto out; - } - - ret = iwlagn_suspend(priv, hw, wowlan); - if (ret) - goto error; - - device_set_wakeup_enable(bus(priv)->dev, true); - - /* Now let the ucode operate on its own */ - iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_SET, - CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); - - goto out; - - error: - priv->shrd->wowlan = false; - iwlagn_prepare_restart(priv); - ieee80211_restart_hw(priv->hw); - out: - mutex_unlock(&priv->shrd->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); - - return ret; -} - -static int iwlagn_mac_resume(struct ieee80211_hw *hw) -{ - struct iwl_priv *priv = hw->priv; - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - struct ieee80211_vif *vif; - unsigned long flags; - u32 base, status = 0xffffffff; - int ret = -EIO; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - mutex_lock(&priv->shrd->mutex); - - iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR, - CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); - - base = priv->shrd->device_pointers.error_event_table; - if (iwlagn_hw_valid_rtc_data_addr(base)) { - spin_lock_irqsave(&bus(priv)->reg_lock, flags); - ret = iwl_grab_nic_access_silent(bus(priv)); - if (ret == 0) { - iwl_write32(bus(priv), HBUS_TARG_MEM_RADDR, base); - status = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT); - iwl_release_nic_access(bus(priv)); - } - spin_unlock_irqrestore(&bus(priv)->reg_lock, flags); - -#ifdef CONFIG_IWLWIFI_DEBUGFS - if (ret == 0) { - struct iwl_trans *trans = trans(priv); - if (!priv->wowlan_sram) - priv->wowlan_sram = - kzalloc(trans->ucode_wowlan.data.len, - GFP_KERNEL); - - if (priv->wowlan_sram) - _iwl_read_targ_mem_words( - bus(priv), 0x800000, priv->wowlan_sram, - trans->ucode_wowlan.data.len / 4); - } -#endif - } - - /* we'll clear ctx->vif during iwlagn_prepare_restart() */ - vif = ctx->vif; - - priv->shrd->wowlan = false; - - device_set_wakeup_enable(bus(priv)->dev, false); - - iwlagn_prepare_restart(priv); - - memset((void *)&ctx->active, 0, sizeof(ctx->active)); - iwl_connection_init_rx_config(priv, ctx); - iwlagn_set_rxon_chain(priv, ctx); - - mutex_unlock(&priv->shrd->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); - - ieee80211_resume_disconnect(vif); - - return 1; -} - -#endif - -static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) -{ - struct iwl_priv *priv = hw->priv; - - IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, - ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); - - if (iwlagn_tx_skb(priv, skb)) - dev_kfree_skb_any(skb); -} - -static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_key_conf *keyconf, - struct ieee80211_sta *sta, - u32 iv32, u16 *phase1key) -{ - struct iwl_priv *priv = hw->priv; - - iwl_update_tkip_key(priv, vif, keyconf, sta, iv32, phase1key); -} - -static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct ieee80211_key_conf *key) -{ - struct iwl_priv *priv = hw->priv; - struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; - struct iwl_rxon_context *ctx = vif_priv->ctx; - int ret; - bool is_default_wep_key = false; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - if (iwlagn_mod_params.sw_crypto) { - IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n"); - return -EOPNOTSUPP; - } - - switch (key->cipher) { - case WLAN_CIPHER_SUITE_TKIP: - key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; - /* fall through */ - case WLAN_CIPHER_SUITE_CCMP: - key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; - break; - default: - break; - } - - /* - * We could program these keys into the hardware as well, but we - * don't expect much multicast traffic in IBSS and having keys - * for more stations is probably more useful. - * - * Mark key TX-only and return 0. - */ - if (vif->type == NL80211_IFTYPE_ADHOC && - !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { - key->hw_key_idx = WEP_INVALID_OFFSET; - return 0; - } - - /* If they key was TX-only, accept deletion */ - if (cmd == DISABLE_KEY && key->hw_key_idx == WEP_INVALID_OFFSET) - return 0; - - mutex_lock(&priv->shrd->mutex); - iwl_scan_cancel_timeout(priv, 100); - - BUILD_BUG_ON(WEP_INVALID_OFFSET == IWLAGN_HW_KEY_DEFAULT); - - /* - * If we are getting WEP group key and we didn't receive any key mapping - * so far, we are in legacy wep mode (group key only), otherwise we are - * in 1X mode. - * In legacy wep mode, we use another host command to the uCode. - */ - if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 || - key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) { - if (cmd == SET_KEY) - is_default_wep_key = !ctx->key_mapping_keys; - else - is_default_wep_key = - key->hw_key_idx == IWLAGN_HW_KEY_DEFAULT; - } - - - switch (cmd) { - case SET_KEY: - if (is_default_wep_key) { - ret = iwl_set_default_wep_key(priv, vif_priv->ctx, key); - break; - } - ret = iwl_set_dynamic_key(priv, vif_priv->ctx, key, sta); - if (ret) { - /* - * can't add key for RX, but we don't need it - * in the device for TX so still return 0 - */ - ret = 0; - key->hw_key_idx = WEP_INVALID_OFFSET; - } - - IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n"); - break; - case DISABLE_KEY: - if (is_default_wep_key) - ret = iwl_remove_default_wep_key(priv, ctx, key); - else - ret = iwl_remove_dynamic_key(priv, ctx, key, sta); - - IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n"); - break; - default: - ret = -EINVAL; - } - - mutex_unlock(&priv->shrd->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); - - return ret; -} - -static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 *ssn, - u8 buf_size) -{ - struct iwl_priv *priv = hw->priv; - int ret = -EINVAL; - struct iwl_station_priv *sta_priv = (void *) sta->drv_priv; - - IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n", - sta->addr, tid); - - if (!(cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE)) - return -EACCES; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - mutex_lock(&priv->shrd->mutex); - - switch (action) { - case IEEE80211_AMPDU_RX_START: - if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) - break; - IWL_DEBUG_HT(priv, "start Rx\n"); - ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn); - break; - case IEEE80211_AMPDU_RX_STOP: - IWL_DEBUG_HT(priv, "stop Rx\n"); - ret = iwl_sta_rx_agg_stop(priv, sta, tid); - if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) - ret = 0; - break; - case IEEE80211_AMPDU_TX_START: - if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) - break; - IWL_DEBUG_HT(priv, "start Tx\n"); - ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn); - break; - case IEEE80211_AMPDU_TX_STOP: - IWL_DEBUG_HT(priv, "stop Tx\n"); - ret = iwlagn_tx_agg_stop(priv, vif, sta, tid); - if ((ret == 0) && (priv->agg_tids_count > 0)) { - priv->agg_tids_count--; - IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n", - priv->agg_tids_count); - } - if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) - ret = 0; - if (!priv->agg_tids_count && cfg(priv)->ht_params && - cfg(priv)->ht_params->use_rts_for_aggregation) { - /* - * switch off RTS/CTS if it was previously enabled - */ - sta_priv->lq_sta.lq.general_params.flags &= - ~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK; - iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif), - &sta_priv->lq_sta.lq, CMD_ASYNC, false); - } - break; - case IEEE80211_AMPDU_TX_OPERATIONAL: - ret = iwlagn_tx_agg_oper(priv, vif, sta, tid, buf_size); - break; - } - mutex_unlock(&priv->shrd->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); - return ret; -} - -static int iwlagn_mac_sta_add(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) -{ - struct iwl_priv *priv = hw->priv; - struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; - struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; - bool is_ap = vif->type == NL80211_IFTYPE_STATION; - int ret = 0; - u8 sta_id; - - IWL_DEBUG_MAC80211(priv, "received request to add station %pM\n", - sta->addr); - mutex_lock(&priv->shrd->mutex); - IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n", - sta->addr); - sta_priv->sta_id = IWL_INVALID_STATION; - - atomic_set(&sta_priv->pending_frames, 0); - if (vif->type == NL80211_IFTYPE_AP) - sta_priv->client = true; - - ret = iwl_add_station_common(priv, vif_priv->ctx, sta->addr, - is_ap, sta, &sta_id); - if (ret) { - IWL_ERR(priv, "Unable to add station %pM (%d)\n", - sta->addr, ret); - /* Should we return success if return code is EEXIST ? */ - goto out; - } - - sta_priv->sta_id = sta_id; - - /* Initialize rate scaling */ - IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n", - sta->addr); - iwl_rs_rate_init(priv, sta, sta_id); - out: - mutex_unlock(&priv->shrd->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); - - return ret; -} - -static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw, - struct ieee80211_channel_switch *ch_switch) -{ - struct iwl_priv *priv = hw->priv; - const struct iwl_channel_info *ch_info; - struct ieee80211_conf *conf = &hw->conf; - struct ieee80211_channel *channel = ch_switch->channel; - struct iwl_ht_config *ht_conf = &priv->current_ht_config; - /* - * MULTI-FIXME - * When we add support for multiple interfaces, we need to - * revisit this. The channel switch command in the device - * only affects the BSS context, but what does that really - * mean? And what if we get a CSA on the second interface? - * This needs a lot of work. - */ - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - u16 ch; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - mutex_lock(&priv->shrd->mutex); - - if (iwl_is_rfkill(priv->shrd)) - goto out; - - if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status) || - test_bit(STATUS_SCANNING, &priv->shrd->status) || - test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status)) - goto out; - - if (!iwl_is_associated_ctx(ctx)) - goto out; - - if (!cfg(priv)->lib->set_channel_switch) - goto out; - - ch = channel->hw_value; - if (le16_to_cpu(ctx->active.channel) == ch) - goto out; - - ch_info = iwl_get_channel_info(priv, channel->band, ch); - if (!is_channel_valid(ch_info)) { - IWL_DEBUG_MAC80211(priv, "invalid channel\n"); - goto out; - } - - spin_lock_irq(&priv->shrd->lock); - - priv->current_ht_config.smps = conf->smps_mode; - - /* Configure HT40 channels */ - ctx->ht.enabled = conf_is_ht(conf); - if (ctx->ht.enabled) - iwlagn_config_ht40(conf, ctx); - else - ctx->ht.is_40mhz = false; - - if ((le16_to_cpu(ctx->staging.channel) != ch)) - ctx->staging.flags = 0; - - iwl_set_rxon_channel(priv, channel, ctx); - iwl_set_rxon_ht(priv, ht_conf); - iwl_set_flags_for_band(priv, ctx, channel->band, ctx->vif); - - spin_unlock_irq(&priv->shrd->lock); - - iwl_set_rate(priv); - /* - * at this point, staging_rxon has the - * configuration for channel switch - */ - set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status); - priv->switch_channel = cpu_to_le16(ch); - if (cfg(priv)->lib->set_channel_switch(priv, ch_switch)) { - clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status); - priv->switch_channel = 0; - ieee80211_chswitch_done(ctx->vif, false); - } - -out: - mutex_unlock(&priv->shrd->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); -} - -static void iwlagn_configure_filter(struct ieee80211_hw *hw, - unsigned int changed_flags, - unsigned int *total_flags, - u64 multicast) -{ - struct iwl_priv *priv = hw->priv; - __le32 filter_or = 0, filter_nand = 0; - struct iwl_rxon_context *ctx; - -#define CHK(test, flag) do { \ - if (*total_flags & (test)) \ - filter_or |= (flag); \ - else \ - filter_nand |= (flag); \ - } while (0) - - IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n", - changed_flags, *total_flags); - - CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK); - /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */ - CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK); - CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); - -#undef CHK - - mutex_lock(&priv->shrd->mutex); - - for_each_context(priv, ctx) { - ctx->staging.filter_flags &= ~filter_nand; - ctx->staging.filter_flags |= filter_or; - - /* - * Not committing directly because hardware can perform a scan, - * but we'll eventually commit the filter flags change anyway. - */ - } - - mutex_unlock(&priv->shrd->mutex); - - /* - * Receiving all multicast frames is always enabled by the - * default flags setup in iwl_connection_init_rx_config() - * since we currently do not support programming multicast - * filters into the device. - */ - *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS | - FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; -} - -static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop) -{ - struct iwl_priv *priv = hw->priv; - - mutex_lock(&priv->shrd->mutex); - IWL_DEBUG_MAC80211(priv, "enter\n"); - - if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) { - IWL_DEBUG_TX(priv, "Aborting flush due to device shutdown\n"); - goto done; - } - if (iwl_is_rfkill(priv->shrd)) { - IWL_DEBUG_TX(priv, "Aborting flush due to RF Kill\n"); - goto done; - } - - /* - * mac80211 will not push any more frames for transmit - * until the flush is completed - */ - if (drop) { - IWL_DEBUG_MAC80211(priv, "send flush command\n"); - if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) { - IWL_ERR(priv, "flush request fail\n"); - goto done; - } - } - IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n"); - iwl_trans_wait_tx_queue_empty(trans(priv)); -done: - mutex_unlock(&priv->shrd->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); -} - -static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw, - struct ieee80211_channel *channel, - enum nl80211_channel_type channel_type, - int duration) -{ - struct iwl_priv *priv = hw->priv; - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN]; - int err = 0; - - if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN))) - return -EOPNOTSUPP; - - if (!(ctx->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT))) - return -EOPNOTSUPP; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - mutex_lock(&priv->shrd->mutex); - - if (test_bit(STATUS_SCAN_HW, &priv->shrd->status)) { - err = -EBUSY; - goto out; - } - - priv->hw_roc_channel = channel; - priv->hw_roc_chantype = channel_type; - /* convert from ms to TU */ - priv->hw_roc_duration = DIV_ROUND_UP(1000 * duration, 1024); - priv->hw_roc_start_notified = false; - cancel_delayed_work(&priv->hw_roc_disable_work); - - if (!ctx->is_active) { - static const struct iwl_qos_info default_qos_data = { - .def_qos_parm = { - .ac[0] = { - .cw_min = cpu_to_le16(3), - .cw_max = cpu_to_le16(7), - .aifsn = 2, - .edca_txop = cpu_to_le16(1504), - }, - .ac[1] = { - .cw_min = cpu_to_le16(7), - .cw_max = cpu_to_le16(15), - .aifsn = 2, - .edca_txop = cpu_to_le16(3008), - }, - .ac[2] = { - .cw_min = cpu_to_le16(15), - .cw_max = cpu_to_le16(1023), - .aifsn = 3, - }, - .ac[3] = { - .cw_min = cpu_to_le16(15), - .cw_max = cpu_to_le16(1023), - .aifsn = 7, - }, - }, - }; - - ctx->is_active = true; - ctx->qos_data = default_qos_data; - ctx->staging.dev_type = RXON_DEV_TYPE_P2P; - memcpy(ctx->staging.node_addr, - priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr, - ETH_ALEN); - memcpy(ctx->staging.bssid_addr, - priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr, - ETH_ALEN); - err = iwlagn_commit_rxon(priv, ctx); - if (err) - goto out; - ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK | - RXON_FILTER_PROMISC_MSK | - RXON_FILTER_CTL2HOST_MSK; - - err = iwlagn_commit_rxon(priv, ctx); - if (err) { - iwlagn_disable_roc(priv); - goto out; - } - priv->hw_roc_setup = true; - } - - err = iwl_scan_initiate(priv, ctx->vif, IWL_SCAN_ROC, channel->band); - if (err) - iwlagn_disable_roc(priv); - - out: - mutex_unlock(&priv->shrd->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); - - return err; -} - -static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw) -{ - struct iwl_priv *priv = hw->priv; - - if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN))) - return -EOPNOTSUPP; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - mutex_lock(&priv->shrd->mutex); - iwl_scan_cancel_timeout(priv, priv->hw_roc_duration); - iwlagn_disable_roc(priv); - mutex_unlock(&priv->shrd->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); - - return 0; -} - -static int iwlagn_mac_tx_sync(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - const u8 *bssid, - enum ieee80211_tx_sync_type type) -{ - struct iwl_priv *priv = hw->priv; - struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; - struct iwl_rxon_context *ctx = vif_priv->ctx; - int ret; - u8 sta_id; - - if (ctx->ctxid != IWL_RXON_CTX_PAN) - return 0; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - mutex_lock(&priv->shrd->mutex); - - if (iwl_is_associated_ctx(ctx)) { - ret = 0; - goto out; - } - - if (ctx->preauth_bssid || test_bit(STATUS_SCAN_HW, - &priv->shrd->status)) { - ret = -EBUSY; - goto out; - } - - ret = iwl_add_station_common(priv, ctx, bssid, true, NULL, &sta_id); - if (ret) - goto out; - - if (WARN_ON(sta_id != ctx->ap_sta_id)) { - ret = -EIO; - goto out_remove_sta; - } - - memcpy(ctx->bssid, bssid, ETH_ALEN); - ctx->preauth_bssid = true; - - ret = iwlagn_commit_rxon(priv, ctx); - - if (ret == 0) - goto out; - - out_remove_sta: - iwl_remove_station(priv, sta_id, bssid); - out: - mutex_unlock(&priv->shrd->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); - - return ret; -} - -static void iwlagn_mac_finish_tx_sync(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - const u8 *bssid, - enum ieee80211_tx_sync_type type) -{ - struct iwl_priv *priv = hw->priv; - struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; - struct iwl_rxon_context *ctx = vif_priv->ctx; - - if (ctx->ctxid != IWL_RXON_CTX_PAN) - return; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - mutex_lock(&priv->shrd->mutex); - - if (iwl_is_associated_ctx(ctx)) - goto out; - - iwl_remove_station(priv, ctx->ap_sta_id, bssid); - ctx->preauth_bssid = false; - /* no need to commit */ - out: - mutex_unlock(&priv->shrd->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); -} - -static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw, - enum ieee80211_rssi_event rssi_event) -{ - struct iwl_priv *priv = hw->priv; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - mutex_lock(&priv->shrd->mutex); - - if (cfg(priv)->bt_params && - cfg(priv)->bt_params->advanced_bt_coexist) { - if (rssi_event == RSSI_EVENT_LOW) - priv->bt_enable_pspoll = true; - else if (rssi_event == RSSI_EVENT_HIGH) - priv->bt_enable_pspoll = false; - - iwlagn_send_advance_bt_config(priv); - } else { - IWL_DEBUG_MAC80211(priv, "Advanced BT coex disabled," - "ignoring RSSI callback\n"); - } - - mutex_unlock(&priv->shrd->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); -} - -static int iwlagn_mac_set_tim(struct ieee80211_hw *hw, - struct ieee80211_sta *sta, bool set) -{ - struct iwl_priv *priv = hw->priv; - - queue_work(priv->shrd->workqueue, &priv->beacon_update); - - return 0; -} - -static int iwlagn_mac_conf_tx(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, u16 queue, - const struct ieee80211_tx_queue_params *params) -{ - struct iwl_priv *priv = hw->priv; - struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; - struct iwl_rxon_context *ctx = vif_priv->ctx; - unsigned long flags; - int q; - - if (WARN_ON(!ctx)) - return -EINVAL; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - if (!iwl_is_ready_rf(priv->shrd)) { - IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n"); - return -EIO; - } - - if (queue >= AC_NUM) { - IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue); - return 0; - } - - q = AC_NUM - 1 - queue; - - spin_lock_irqsave(&priv->shrd->lock, flags); - - ctx->qos_data.def_qos_parm.ac[q].cw_min = - cpu_to_le16(params->cw_min); - ctx->qos_data.def_qos_parm.ac[q].cw_max = - cpu_to_le16(params->cw_max); - ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs; - ctx->qos_data.def_qos_parm.ac[q].edca_txop = - cpu_to_le16((params->txop * 32)); - - ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0; - - spin_unlock_irqrestore(&priv->shrd->lock, flags); - - IWL_DEBUG_MAC80211(priv, "leave\n"); - return 0; -} - -static int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw) -{ - struct iwl_priv *priv = hw->priv; - - return priv->ibss_manager == IWL_IBSS_MANAGER; -} - -static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx) -{ - iwl_connection_init_rx_config(priv, ctx); - - iwlagn_set_rxon_chain(priv, ctx); - - return iwlagn_commit_rxon(priv, ctx); -} - -static int iwl_setup_interface(struct iwl_priv *priv, - struct iwl_rxon_context *ctx) -{ - struct ieee80211_vif *vif = ctx->vif; - int err; - - lockdep_assert_held(&priv->shrd->mutex); - - /* - * This variable will be correct only when there's just - * a single context, but all code using it is for hardware - * that supports only one context. - */ - priv->iw_mode = vif->type; - - ctx->is_active = true; - - err = iwl_set_mode(priv, ctx); - if (err) { - if (!ctx->always_active) - ctx->is_active = false; - return err; - } - - if (cfg(priv)->bt_params && cfg(priv)->bt_params->advanced_bt_coexist && - vif->type == NL80211_IFTYPE_ADHOC) { - /* - * pretend to have high BT traffic as long as we - * are operating in IBSS mode, as this will cause - * the rate scaling etc. to behave as intended. - */ - priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH; - } - - return 0; -} - -static int iwlagn_mac_add_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) -{ - struct iwl_priv *priv = hw->priv; - struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; - struct iwl_rxon_context *tmp, *ctx = NULL; - int err; - enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif); - - IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n", - viftype, vif->addr); - - cancel_delayed_work_sync(&priv->hw_roc_disable_work); - - mutex_lock(&priv->shrd->mutex); - - iwlagn_disable_roc(priv); - - if (!iwl_is_ready_rf(priv->shrd)) { - IWL_WARN(priv, "Try to add interface when device not ready\n"); - err = -EINVAL; - goto out; - } - - for_each_context(priv, tmp) { - u32 possible_modes = - tmp->interface_modes | tmp->exclusive_interface_modes; - - if (tmp->vif) { - /* check if this busy context is exclusive */ - if (tmp->exclusive_interface_modes & - BIT(tmp->vif->type)) { - err = -EINVAL; - goto out; - } - continue; - } - - if (!(possible_modes & BIT(viftype))) - continue; - - /* have maybe usable context w/o interface */ - ctx = tmp; - break; - } - - if (!ctx) { - err = -EOPNOTSUPP; - goto out; - } - - vif_priv->ctx = ctx; - ctx->vif = vif; - - err = iwl_setup_interface(priv, ctx); - if (!err) - goto out; - - ctx->vif = NULL; - priv->iw_mode = NL80211_IFTYPE_STATION; - out: - mutex_unlock(&priv->shrd->mutex); - - IWL_DEBUG_MAC80211(priv, "leave\n"); - return err; -} - -static void iwl_teardown_interface(struct iwl_priv *priv, - struct ieee80211_vif *vif, - bool mode_change) -{ - struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); - - lockdep_assert_held(&priv->shrd->mutex); - - if (priv->scan_vif == vif) { - iwl_scan_cancel_timeout(priv, 200); - iwl_force_scan_end(priv); - } - - if (!mode_change) { - iwl_set_mode(priv, ctx); - if (!ctx->always_active) - ctx->is_active = false; - } - - /* - * When removing the IBSS interface, overwrite the - * BT traffic load with the stored one from the last - * notification, if any. If this is a device that - * doesn't implement this, this has no effect since - * both values are the same and zero. - */ - if (vif->type == NL80211_IFTYPE_ADHOC) - priv->bt_traffic_load = priv->last_bt_traffic_load; -} - -static void iwlagn_mac_remove_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) -{ - struct iwl_priv *priv = hw->priv; - struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - mutex_lock(&priv->shrd->mutex); - - if (WARN_ON(ctx->vif != vif)) { - struct iwl_rxon_context *tmp; - IWL_ERR(priv, "ctx->vif = %p, vif = %p\n", ctx->vif, vif); - for_each_context(priv, tmp) - IWL_ERR(priv, "\tID = %d:\tctx = %p\tctx->vif = %p\n", - tmp->ctxid, tmp, tmp->vif); - } - ctx->vif = NULL; - - iwl_teardown_interface(priv, vif, false); - - mutex_unlock(&priv->shrd->mutex); - - IWL_DEBUG_MAC80211(priv, "leave\n"); - -} - -static int iwlagn_mac_change_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - enum nl80211_iftype newtype, bool newp2p) -{ - struct iwl_priv *priv = hw->priv; - struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); - struct iwl_rxon_context *bss_ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - struct iwl_rxon_context *tmp; - enum nl80211_iftype newviftype = newtype; - u32 interface_modes; - int err; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - newtype = ieee80211_iftype_p2p(newtype, newp2p); - - mutex_lock(&priv->shrd->mutex); - - if (!ctx->vif || !iwl_is_ready_rf(priv->shrd)) { - /* - * Huh? But wait ... this can maybe happen when - * we're in the middle of a firmware restart! - */ - err = -EBUSY; - goto out; - } - - interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes; - - if (!(interface_modes & BIT(newtype))) { - err = -EBUSY; - goto out; - } - - /* - * Refuse a change that should be done by moving from the PAN - * context to the BSS context instead, if the BSS context is - * available and can support the new interface type. - */ - if (ctx->ctxid == IWL_RXON_CTX_PAN && !bss_ctx->vif && - (bss_ctx->interface_modes & BIT(newtype) || - bss_ctx->exclusive_interface_modes & BIT(newtype))) { - BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); - err = -EBUSY; - goto out; - } - - if (ctx->exclusive_interface_modes & BIT(newtype)) { - for_each_context(priv, tmp) { - if (ctx == tmp) - continue; - - if (!tmp->vif) - continue; - - /* - * The current mode switch would be exclusive, but - * another context is active ... refuse the switch. - */ - err = -EBUSY; - goto out; - } - } - - /* success */ - iwl_teardown_interface(priv, vif, true); - vif->type = newviftype; - vif->p2p = newp2p; - err = iwl_setup_interface(priv, ctx); - WARN_ON(err); - /* - * We've switched internally, but submitting to the - * device may have failed for some reason. Mask this - * error, because otherwise mac80211 will not switch - * (and set the interface type back) and we'll be - * out of sync with it. - */ - err = 0; - - out: - mutex_unlock(&priv->shrd->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); - - return err; -} - -static int iwlagn_mac_hw_scan(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct cfg80211_scan_request *req) -{ - struct iwl_priv *priv = hw->priv; - int ret; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - if (req->n_channels == 0) - return -EINVAL; - - mutex_lock(&priv->shrd->mutex); - - /* - * If an internal scan is in progress, just set - * up the scan_request as per above. - */ - if (priv->scan_type != IWL_SCAN_NORMAL) { - IWL_DEBUG_SCAN(priv, - "SCAN request during internal scan - defer\n"); - priv->scan_request = req; - priv->scan_vif = vif; - ret = 0; - } else { - priv->scan_request = req; - priv->scan_vif = vif; - /* - * mac80211 will only ask for one band at a time - * so using channels[0] here is ok - */ - ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL, - req->channels[0]->band); - if (ret) { - priv->scan_request = NULL; - priv->scan_vif = NULL; - } - } - - IWL_DEBUG_MAC80211(priv, "leave\n"); - - mutex_unlock(&priv->shrd->mutex); - - return ret; -} - -static int iwlagn_mac_sta_remove(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) -{ - struct iwl_priv *priv = hw->priv; - struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; - int ret; - - IWL_DEBUG_MAC80211(priv, "enter: received request to remove " - "station %pM\n", sta->addr); - mutex_lock(&priv->shrd->mutex); - IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n", - sta->addr); - ret = iwl_remove_station(priv, sta_priv->sta_id, sta->addr); - if (ret) - IWL_DEBUG_QUIET_RFKILL(priv, "Error removing station %pM\n", - sta->addr); - mutex_unlock(&priv->shrd->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); - - return ret; -} - -static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id) -{ - unsigned long flags; - - spin_lock_irqsave(&priv->shrd->sta_lock, flags); - priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK; - priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK; - priv->stations[sta_id].sta.sta.modify_mask = 0; - priv->stations[sta_id].sta.sleep_tx_count = 0; - priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; - iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); - spin_unlock_irqrestore(&priv->shrd->sta_lock, flags); - -} - -static void iwlagn_mac_sta_notify(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - enum sta_notify_cmd cmd, - struct ieee80211_sta *sta) -{ - struct iwl_priv *priv = hw->priv; - struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; - int sta_id; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - switch (cmd) { - case STA_NOTIFY_SLEEP: - WARN_ON(!sta_priv->client); - sta_priv->asleep = true; - if (atomic_read(&sta_priv->pending_frames) > 0) - ieee80211_sta_block_awake(hw, sta, true); - break; - case STA_NOTIFY_AWAKE: - WARN_ON(!sta_priv->client); - if (!sta_priv->asleep) - break; - sta_priv->asleep = false; - sta_id = iwl_sta_id(sta); - if (sta_id != IWL_INVALID_STATION) - iwl_sta_modify_ps_wake(priv, sta_id); - break; - default: - break; - } - IWL_DEBUG_MAC80211(priv, "leave\n"); -} - -struct ieee80211_ops iwlagn_hw_ops = { - .tx = iwlagn_mac_tx, - .start = iwlagn_mac_start, - .stop = iwlagn_mac_stop, -#ifdef CONFIG_PM_SLEEP - .suspend = iwlagn_mac_suspend, - .resume = iwlagn_mac_resume, -#endif - .add_interface = iwlagn_mac_add_interface, - .remove_interface = iwlagn_mac_remove_interface, - .change_interface = iwlagn_mac_change_interface, - .config = iwlagn_mac_config, - .configure_filter = iwlagn_configure_filter, - .set_key = iwlagn_mac_set_key, - .update_tkip_key = iwlagn_mac_update_tkip_key, - .set_rekey_data = iwlagn_mac_set_rekey_data, - .conf_tx = iwlagn_mac_conf_tx, - .bss_info_changed = iwlagn_bss_info_changed, - .ampdu_action = iwlagn_mac_ampdu_action, - .hw_scan = iwlagn_mac_hw_scan, - .sta_notify = iwlagn_mac_sta_notify, - .sta_add = iwlagn_mac_sta_add, - .sta_remove = iwlagn_mac_sta_remove, - .channel_switch = iwlagn_mac_channel_switch, - .flush = iwlagn_mac_flush, - .tx_last_beacon = iwlagn_mac_tx_last_beacon, - .remain_on_channel = iwlagn_mac_remain_on_channel, - .cancel_remain_on_channel = iwlagn_mac_cancel_remain_on_channel, - .rssi_callback = iwlagn_mac_rssi_callback, - CFG80211_TESTMODE_CMD(iwlagn_mac_testmode_cmd) - CFG80211_TESTMODE_DUMP(iwlagn_mac_testmode_dump) - .tx_sync = iwlagn_mac_tx_sync, - .finish_tx_sync = iwlagn_mac_finish_tx_sync, - .set_tim = iwlagn_mac_set_tim, -}; - -/* This function both allocates and initializes hw and priv. */ -struct ieee80211_hw *iwl_alloc_all(void) -{ - struct iwl_priv *priv; - /* mac80211 allocates memory for this device instance, including - * space for this driver's private structure */ - struct ieee80211_hw *hw; - - hw = ieee80211_alloc_hw(sizeof(struct iwl_priv), &iwlagn_hw_ops); - if (!hw) - goto out; - - priv = hw->priv; - priv->hw = hw; - -out: - return hw; -} diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-pci.c b/trunk/drivers/net/wireless/iwlwifi/iwl-pci.c index fb30ea7ca96b..1800029911ad 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-pci.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-pci.c @@ -135,7 +135,7 @@ static void iwl_pci_apm_config(struct iwl_bus *bus) } } -static void iwl_pci_get_hw_id_string(struct iwl_bus *bus, char buf[], +static void iwl_pci_get_hw_id(struct iwl_bus *bus, char buf[], int buf_len) { struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus); @@ -144,13 +144,6 @@ static void iwl_pci_get_hw_id_string(struct iwl_bus *bus, char buf[], pci_dev->subsystem_device); } -static u32 iwl_pci_get_hw_id(struct iwl_bus *bus) -{ - struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus); - - return (pci_dev->device << 16) + pci_dev->subsystem_device; -} - static void iwl_pci_write8(struct iwl_bus *bus, u32 ofs, u8 val) { iowrite8(val, IWL_BUS_GET_PCI_BUS(bus)->hw_base + ofs); @@ -170,7 +163,6 @@ static u32 iwl_pci_read32(struct iwl_bus *bus, u32 ofs) static const struct iwl_bus_ops bus_ops_pci = { .get_pm_support = iwl_pci_is_pm_supported, .apm_config = iwl_pci_apm_config, - .get_hw_id_string = iwl_pci_get_hw_id_string, .get_hw_id = iwl_pci_get_hw_id, .write8 = iwl_pci_write8, .write32 = iwl_pci_write32, @@ -264,8 +256,6 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { {IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)}, {IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)}, {IWL_PCI_DEVICE(0x0082, 0x1341, iwl6005_2agn_d_cfg)}, - {IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_cfg)},/* low 5GHz active */ - {IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_cfg)},/* high 5GHz active */ /* 6x30 Series */ {IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)}, @@ -335,28 +325,46 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { {IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)}, {IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)}, {IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)}, + {IWL_PCI_DEVICE(0x0890, 0x4026, iwl2000_2bg_cfg)}, + {IWL_PCI_DEVICE(0x0891, 0x4226, iwl2000_2bg_cfg)}, + {IWL_PCI_DEVICE(0x0890, 0x4426, iwl2000_2bg_cfg)}, {IWL_PCI_DEVICE(0x0890, 0x4822, iwl2000_2bgn_d_cfg)}, /* 2x30 Series */ {IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)}, {IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)}, {IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)}, + {IWL_PCI_DEVICE(0x0887, 0x4066, iwl2030_2bg_cfg)}, + {IWL_PCI_DEVICE(0x0888, 0x4266, iwl2030_2bg_cfg)}, + {IWL_PCI_DEVICE(0x0887, 0x4466, iwl2030_2bg_cfg)}, /* 6x35 Series */ {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)}, {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)}, {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)}, + {IWL_PCI_DEVICE(0x088E, 0x4064, iwl6035_2abg_cfg)}, + {IWL_PCI_DEVICE(0x088F, 0x4264, iwl6035_2abg_cfg)}, + {IWL_PCI_DEVICE(0x088E, 0x4464, iwl6035_2abg_cfg)}, + {IWL_PCI_DEVICE(0x088E, 0x4066, iwl6035_2bg_cfg)}, + {IWL_PCI_DEVICE(0x088F, 0x4266, iwl6035_2bg_cfg)}, + {IWL_PCI_DEVICE(0x088E, 0x4466, iwl6035_2bg_cfg)}, /* 105 Series */ {IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_bgn_cfg)}, {IWL_PCI_DEVICE(0x0895, 0x0222, iwl105_bgn_cfg)}, {IWL_PCI_DEVICE(0x0894, 0x0422, iwl105_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0894, 0x0026, iwl105_bg_cfg)}, + {IWL_PCI_DEVICE(0x0895, 0x0226, iwl105_bg_cfg)}, + {IWL_PCI_DEVICE(0x0894, 0x0426, iwl105_bg_cfg)}, {IWL_PCI_DEVICE(0x0894, 0x0822, iwl105_bgn_d_cfg)}, /* 135 Series */ {IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)}, {IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)}, {IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0892, 0x0066, iwl135_bg_cfg)}, + {IWL_PCI_DEVICE(0x0893, 0x0266, iwl135_bg_cfg)}, + {IWL_PCI_DEVICE(0x0892, 0x0466, iwl135_bg_cfg)}, {0} }; diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-power.c b/trunk/drivers/net/wireless/iwlwifi/iwl-power.c index 2b188a6025b3..4eaab204322d 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-power.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-power.c @@ -167,7 +167,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv, u8 skip; u32 slp_itrvl; - if (cfg(priv)->adv_pm) { + if (priv->cfg->adv_pm) { table = apm_range_2; if (period <= IWL_DTIM_RANGE_1_MAX) table = apm_range_1; @@ -221,7 +221,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv, cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA; if (iwl_advanced_bt_coexist(priv)) { - if (!cfg(priv)->bt_params->bt_sco_disable) + if (!priv->cfg->bt_params->bt_sco_disable) cmd->flags |= IWL_POWER_BT_SCO_ENA; else cmd->flags &= ~IWL_POWER_BT_SCO_ENA; @@ -307,7 +307,7 @@ static void iwl_power_fill_sleep_cmd(struct iwl_priv *priv, cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA; if (iwl_advanced_bt_coexist(priv)) { - if (!cfg(priv)->bt_params->bt_sco_disable) + if (!priv->cfg->bt_params->bt_sco_disable) cmd->flags |= IWL_POWER_BT_SCO_ENA; else cmd->flags &= ~IWL_POWER_BT_SCO_ENA; @@ -350,7 +350,7 @@ static void iwl_power_build_cmd(struct iwl_priv *priv, if (priv->shrd->wowlan) iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper); - else if (!cfg(priv)->base_params->no_idle_support && + else if (!priv->cfg->base_params->no_idle_support && priv->hw->conf.flags & IEEE80211_CONF_IDLE) iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20); else if (iwl_tt_is_low_power_state(priv)) { diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-scan.c b/trunk/drivers/net/wireless/iwlwifi/iwl-scan.c index 084aa2c4ccfb..e5d727f537d0 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-scan.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-scan.c @@ -416,8 +416,6 @@ static u16 iwl_limit_dwell(struct iwl_priv *priv, u16 dwell_time) if (!iwl_is_associated_ctx(ctx)) continue; - if (ctx->staging.dev_type == RXON_DEV_TYPE_P2P) - continue; value = ctx->beacon_int; if (!value) value = IWL_PASSIVE_DWELL_BASE; @@ -680,8 +678,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) priv->contexts[IWL_RXON_CTX_BSS].active.flags & RXON_FLG_CHANNEL_MODE_MSK) >> RXON_FLG_CHANNEL_MODE_POS; - if ((priv->scan_request && priv->scan_request->no_cck) || - chan_mod == CHANNEL_MODE_PURE_40) { + if (chan_mod == CHANNEL_MODE_PURE_40) { rate = IWL_RATE_6M_PLCP; } else { rate = IWL_RATE_1M_PLCP; @@ -691,8 +688,8 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) * Internal scans are passive, so we can indiscriminately set * the BT ignore flag on 2.4 GHz since it applies to TX only. */ - if (cfg(priv)->bt_params && - cfg(priv)->bt_params->advanced_bt_coexist) + if (priv->cfg->bt_params && + priv->cfg->bt_params->advanced_bt_coexist) scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT; break; case IEEE80211_BAND_5GHZ: @@ -733,12 +730,12 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) band = priv->scan_band; - if (cfg(priv)->scan_rx_antennas[band]) - rx_ant = cfg(priv)->scan_rx_antennas[band]; + if (priv->cfg->scan_rx_antennas[band]) + rx_ant = priv->cfg->scan_rx_antennas[band]; if (band == IEEE80211_BAND_2GHZ && - cfg(priv)->bt_params && - cfg(priv)->bt_params->advanced_bt_coexist) { + priv->cfg->bt_params && + priv->cfg->bt_params->advanced_bt_coexist) { /* transmit 2.4 GHz probes only on first antenna */ scan_tx_antennas = first_antenna(scan_tx_antennas); } @@ -762,8 +759,8 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) rx_ant = first_antenna(active_chains); } - if (cfg(priv)->bt_params && - cfg(priv)->bt_params->advanced_bt_coexist && + if (priv->cfg->bt_params && + priv->cfg->bt_params->advanced_bt_coexist && priv->bt_full_concurrent) { /* operated as 1x1 in full concurrency mode */ rx_ant = first_antenna(rx_ant); @@ -941,6 +938,51 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv, return 0; } +int iwlagn_mac_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_scan_request *req) +{ + struct iwl_priv *priv = hw->priv; + int ret; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (req->n_channels == 0) + return -EINVAL; + + mutex_lock(&priv->shrd->mutex); + + /* + * If an internal scan is in progress, just set + * up the scan_request as per above. + */ + if (priv->scan_type != IWL_SCAN_NORMAL) { + IWL_DEBUG_SCAN(priv, + "SCAN request during internal scan - defer\n"); + priv->scan_request = req; + priv->scan_vif = vif; + ret = 0; + } else { + priv->scan_request = req; + priv->scan_vif = vif; + /* + * mac80211 will only ask for one band at a time + * so using channels[0] here is ok + */ + ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL, + req->channels[0]->band); + if (ret) { + priv->scan_request = NULL; + priv->scan_vif = NULL; + } + } + + IWL_DEBUG_MAC80211(priv, "leave\n"); + + mutex_unlock(&priv->shrd->mutex); + + return ret; +} /* * internal short scan, this function should only been called while associated. diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-shared.h b/trunk/drivers/net/wireless/iwlwifi/iwl-shared.h index dc55cc4a8108..14eaf37ce3b1 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-shared.h +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-shared.h @@ -94,9 +94,9 @@ * This implementation is iwl-pci.c */ +struct iwl_cfg; struct iwl_bus; struct iwl_priv; -struct iwl_trans; struct iwl_sensitivity_ranges; struct iwl_trans_ops; @@ -107,10 +107,6 @@ struct iwl_trans_ops; extern struct iwl_mod_params iwlagn_mod_params; -#define IWL_DISABLE_HT_ALL BIT(0) -#define IWL_DISABLE_HT_TXAGG BIT(1) -#define IWL_DISABLE_HT_RXAGG BIT(2) - /** * struct iwl_mod_params * @@ -118,8 +114,7 @@ extern struct iwl_mod_params iwlagn_mod_params; * * @sw_crypto: using hardware encryption, default = 0 * @num_of_queues: number of tx queue, HW dependent - * @disable_11n: disable 11n capabilities, default = 0, - * use IWL_DISABLE_HT_* constants + * @disable_11n: 11n capabilities enabled, default = 0 * @amsdu_size_8K: enable 8K amsdu size, default = 1 * @antenna: both antennas (use diversity), default = 0 * @restart_fw: restart firmware, default = 1 @@ -140,7 +135,7 @@ extern struct iwl_mod_params iwlagn_mod_params; struct iwl_mod_params { int sw_crypto; int num_of_queues; - unsigned int disable_11n; + int disable_11n; int amsdu_size_8K; int antenna; int restart_fw; @@ -179,6 +174,8 @@ struct iwl_mod_params { * @ct_kill_exit_threshold: when to reeable the device - in hw dependent unit * relevant for 1000, 6000 and up * @wd_timeout: TX queues watchdog timeout + * @calib_init_cfg: setup initial calibrations for the hw + * @calib_rt_cfg: setup runtime calibrations for the hw * @struct iwl_sensitivity_ranges: range of sensitivity values */ struct iwl_hw_params { @@ -198,148 +195,67 @@ struct iwl_hw_params { u32 ct_kill_exit_threshold; unsigned int wd_timeout; + u32 calib_init_cfg; + u32 calib_rt_cfg; const struct iwl_sensitivity_ranges *sens; }; /** - * enum iwl_ucode_type + * enum iwl_agg_state * - * The type of ucode currently loaded on the hardware. + * The state machine of the BA agreement establishment / tear down. + * These states relate to a specific RA / TID. * - * @IWL_UCODE_NONE: No ucode loaded - * @IWL_UCODE_REGULAR: Normal runtime ucode - * @IWL_UCODE_INIT: Initial ucode - * @IWL_UCODE_WOWLAN: Wake on Wireless enabled ucode + * @IWL_AGG_OFF: aggregation is not used + * @IWL_AGG_ON: aggregation session is up + * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the + * HW queue to be empty from packets for this RA /TID. + * @IWL_EMPTYING_HW_QUEUE_DELBA: tearing down a BA session - waiting for the + * HW queue to be empty from packets for this RA /TID. */ -enum iwl_ucode_type { - IWL_UCODE_NONE, - IWL_UCODE_REGULAR, - IWL_UCODE_INIT, - IWL_UCODE_WOWLAN, +enum iwl_agg_state { + IWL_AGG_OFF = 0, + IWL_AGG_ON, + IWL_EMPTYING_HW_QUEUE_ADDBA, + IWL_EMPTYING_HW_QUEUE_DELBA, }; /** - * struct iwl_notification_wait - notification wait entry - * @list: list head for global list - * @fn: function called with the notification - * @cmd: command ID - * - * This structure is not used directly, to wait for a - * notification declare it on the stack, and call - * iwlagn_init_notification_wait() with appropriate - * parameters. Then do whatever will cause the ucode - * to notify the driver, and to wait for that then - * call iwlagn_wait_notification(). - * - * Each notification is one-shot. If at some point we - * need to support multi-shot notifications (which - * can't be allocated on the stack) we need to modify - * the code for them. + * struct iwl_ht_agg - aggregation state machine + + * This structs holds the states for the BA agreement establishment and tear + * down. It also holds the state during the BA session itself. This struct is + * duplicated for each RA / TID. + + * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the + * Tx response (REPLY_TX), and the block ack notification + * (REPLY_COMPRESSED_BA). + * @state: state of the BA agreement establishment / tear down. + * @txq_id: Tx queue used by the BA session - used by the transport layer. + * Needed by the upper layer for debugfs only. + * @wait_for_ba: Expect block-ack before next Tx reply */ -struct iwl_notification_wait { - struct list_head list; - - void (*fn)(struct iwl_trans *trans, struct iwl_rx_packet *pkt, - void *data); - void *fn_data; - - u8 cmd; - bool triggered, aborted; +struct iwl_ht_agg { + u32 rate_n_flags; + enum iwl_agg_state state; + u16 txq_id; + bool wait_for_ba; }; /** - * enum iwl_pa_type - Power Amplifier type - * @IWL_PA_SYSTEM: based on uCode configuration - * @IWL_PA_INTERNAL: use Internal only - */ -enum iwl_pa_type { - IWL_PA_SYSTEM = 0, - IWL_PA_INTERNAL = 1, -}; + * struct iwl_tid_data - one for each RA / TID -/* - * LED mode - * IWL_LED_DEFAULT: use device default - * IWL_LED_RF_STATE: turn LED on/off based on RF state - * LED ON = RF ON - * LED OFF = RF OFF - * IWL_LED_BLINK: adjust led blink rate based on blink table - */ -enum iwl_led_mode { - IWL_LED_DEFAULT, - IWL_LED_RF_STATE, - IWL_LED_BLINK, -}; + * This structs holds the states for each RA / TID. -/** - * struct iwl_cfg - * @name: Offical name of the device - * @fw_name_pre: Firmware filename prefix. The api version and extension - * (.ucode) will be added to filename before loading from disk. The - * filename is constructed as fw_name_pre.ucode. - * @ucode_api_max: Highest version of uCode API supported by driver. - * @ucode_api_ok: oldest version of the uCode API that is OK to load - * without a warning, for use in transitions - * @ucode_api_min: Lowest version of uCode API supported by driver. - * @valid_tx_ant: valid transmit antenna - * @valid_rx_ant: valid receive antenna - * @sku: sku information from EEPROM - * @eeprom_ver: EEPROM version - * @eeprom_calib_ver: EEPROM calibration version - * @lib: pointer to the lib ops - * @additional_nic_config: additional nic configuration - * @base_params: pointer to basic parameters - * @ht_params: point to ht patameters - * @bt_params: pointer to bt parameters - * @pa_type: used by 6000 series only to identify the type of Power Amplifier - * @need_temp_offset_calib: need to perform temperature offset calibration - * @no_xtal_calib: some devices do not need crystal calibration data, - * don't send it to those - * @scan_rx_antennas: available antenna for scan operation - * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off) - * @adv_pm: advance power management - * @rx_with_siso_diversity: 1x1 device with rx antenna diversity - * @internal_wimax_coex: internal wifi/wimax combo device - * @iq_invert: I/Q inversion - * @temp_offset_v2: support v2 of temperature offset calibration - * - * We enable the driver to be backward compatible wrt API version. The - * driver specifies which APIs it supports (with @ucode_api_max being the - * highest and @ucode_api_min the lowest). Firmware will only be loaded if - * it has a supported API version. - * - * The ideal usage of this infrastructure is to treat a new ucode API - * release as a new hardware revision. + * @seq_number: the next WiFi sequence number to use + * @tfds_in_queue: number of packets sent to the HW queues. + * Exported for debugfs only + * @agg: aggregation state machine */ -struct iwl_cfg { - /* params specific to an individual device within a device family */ - const char *name; - const char *fw_name_pre; - const unsigned int ucode_api_max; - const unsigned int ucode_api_ok; - const unsigned int ucode_api_min; - u8 valid_tx_ant; - u8 valid_rx_ant; - u16 sku; - u16 eeprom_ver; - u16 eeprom_calib_ver; - const struct iwl_lib_ops *lib; - void (*additional_nic_config)(struct iwl_priv *priv); - /* params not likely to change within a device family */ - struct iwl_base_params *base_params; - /* params likely to change within a device family */ - struct iwl_ht_params *ht_params; - struct iwl_bt_params *bt_params; - enum iwl_pa_type pa_type; /* if used set to IWL_PA_SYSTEM */ - const bool need_temp_offset_calib; /* if used set to true */ - const bool no_xtal_calib; - u8 scan_rx_antennas[IEEE80211_NUM_BANDS]; - enum iwl_led_mode led_mode; - const bool adv_pm; - const bool rx_with_siso_diversity; - const bool internal_wimax_coex; - const bool iq_invert; - const bool temp_offset_v2; +struct iwl_tid_data { + u16 seq_number; + u16 tfds_in_queue; + struct iwl_ht_agg agg; }; /** @@ -350,25 +266,15 @@ struct iwl_cfg { * @ucode_owner: IWL_OWNERSHIP_* * @cmd_queue: command queue number * @status: STATUS_* - * @wowlan: are we running wowlan uCode * @valid_contexts: microcode/device supports multiple contexts * @bus: pointer to the bus layer data - * @cfg: see struct iwl_cfg * @priv: pointer to the upper layer data - * @trans: pointer to the transport layer data * @hw_params: see struct iwl_hw_params * @workqueue: the workqueue used by all the layers of the driver * @lock: protect general shared data * @sta_lock: protects the station table. * If lock and sta_lock are needed, lock must be acquired first. * @mutex: - * @wait_command_queue: the wait_queue for SYNC host command nad uCode load - * @eeprom: pointer to the eeprom/OTP image - * @ucode_type: indicator of loaded ucode image - * @notif_waits: things waiting for notification - * @notif_wait_lock: lock protecting notification - * @notif_waitq: head of notification wait queue - * @device_pointers: pointers to ucode event tables */ struct iwl_shared { #ifdef CONFIG_IWLWIFI_DEBUG @@ -384,7 +290,6 @@ struct iwl_shared { u8 valid_contexts; struct iwl_bus *bus; - struct iwl_cfg *cfg; struct iwl_priv *priv; struct iwl_trans *trans; struct iwl_hw_params hw_params; @@ -394,29 +299,13 @@ struct iwl_shared { spinlock_t sta_lock; struct mutex mutex; - wait_queue_head_t wait_command_queue; - - /* eeprom -- this is in the card's little endian byte order */ - u8 *eeprom; - - /* ucode related variables */ - enum iwl_ucode_type ucode_type; - - /* notification wait support */ - struct list_head notif_waits; - spinlock_t notif_wait_lock; - wait_queue_head_t notif_waitq; - - struct { - u32 error_event_table; - u32 log_event_table; - } device_pointers; + struct iwl_tid_data tid_data[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT]; + wait_queue_head_t wait_command_queue; }; /*Whatever _m is (iwl_trans, iwl_priv, iwl_bus, these macros will work */ #define priv(_m) ((_m)->shrd->priv) -#define cfg(_m) ((_m)->shrd->cfg) #define bus(_m) ((_m)->shrd->bus) #define trans(_m) ((_m)->shrd->trans) #define hw_params(_m) ((_m)->shrd->hw_params) @@ -538,6 +427,12 @@ int __must_check iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_device_cmd *cmd); int iwlagn_hw_valid_rtc_data_addr(u32 addr); +void iwl_start_tx_ba_trans_ready(struct iwl_priv *priv, + enum iwl_rxon_context_id ctx, + u8 sta_id, u8 tid); +void iwl_stop_tx_ba_trans_ready(struct iwl_priv *priv, + enum iwl_rxon_context_id ctx, + u8 sta_id, u8 tid); void iwl_set_hw_rfkill_state(struct iwl_priv *priv, bool state); void iwl_nic_config(struct iwl_priv *priv); void iwl_free_skb(struct iwl_priv *priv, struct sk_buff *skb); @@ -550,24 +445,6 @@ bool iwl_check_for_ct_kill(struct iwl_priv *priv); void iwl_stop_sw_queue(struct iwl_priv *priv, u8 ac); void iwl_wake_sw_queue(struct iwl_priv *priv, u8 ac); -/* notification wait support */ -void iwl_abort_notification_waits(struct iwl_shared *shrd); -void __acquires(wait_entry) -iwl_init_notification_wait(struct iwl_shared *shrd, - struct iwl_notification_wait *wait_entry, - u8 cmd, - void (*fn)(struct iwl_trans *trans, - struct iwl_rx_packet *pkt, - void *data), - void *fn_data); -int __must_check __releases(wait_entry) -iwl_wait_notification(struct iwl_shared *shrd, - struct iwl_notification_wait *wait_entry, - unsigned long timeout); -void __releases(wait_entry) -iwl_remove_notification(struct iwl_shared *shrd, - struct iwl_notification_wait *wait_entry); - #ifdef CONFIG_IWLWIFI_DEBUGFS void iwl_reset_traffic_log(struct iwl_priv *priv); #endif /* CONFIG_IWLWIFI_DEBUGFS */ diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-testmode.c b/trunk/drivers/net/wireless/iwlwifi/iwl-sv-open.c similarity index 74% rename from trunk/drivers/net/wireless/iwlwifi/iwl-testmode.c rename to trunk/drivers/net/wireless/iwlwifi/iwl-sv-open.c index 4a5cddd2d56b..5e50d88f302b 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-testmode.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-sv-open.c @@ -70,7 +70,6 @@ #include #include -#include "iwl-wifi.h" #include "iwl-dev.h" #include "iwl-core.h" #include "iwl-debug.h" @@ -78,7 +77,6 @@ #include "iwl-agn.h" #include "iwl-testmode.h" #include "iwl-trans.h" -#include "iwl-bus.h" /* The TLVs used in the gnl message policy between the kernel module and * user space application. iwl_testmode_gnl_msg_policy is to be carried @@ -108,13 +106,6 @@ struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = { [IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, }, [IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, }, - - [IWL_TM_ATTR_SRAM_ADDR] = { .type = NLA_U32, }, - [IWL_TM_ATTR_SRAM_SIZE] = { .type = NLA_U32, }, - [IWL_TM_ATTR_SRAM_DUMP] = { .type = NLA_UNSPEC, }, - - [IWL_TM_ATTR_FW_VERSION] = { .type = NLA_U32, }, - [IWL_TM_ATTR_DEVICE_ID] = { .type = NLA_U32, }, }; /* @@ -186,18 +177,6 @@ void iwl_testmode_init(struct iwl_priv *priv) { priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt; priv->testmode_trace.trace_enabled = false; - priv->testmode_sram.sram_readed = false; -} - -static void iwl_sram_cleanup(struct iwl_priv *priv) -{ - if (priv->testmode_sram.sram_readed) { - kfree(priv->testmode_sram.buff_addr); - priv->testmode_sram.buff_addr = NULL; - priv->testmode_sram.buff_size = 0; - priv->testmode_sram.num_chunks = 0; - priv->testmode_sram.sram_readed = false; - } } static void iwl_trace_cleanup(struct iwl_priv *priv) @@ -222,7 +201,6 @@ static void iwl_trace_cleanup(struct iwl_priv *priv) void iwl_testmode_cleanup(struct iwl_priv *priv) { iwl_trace_cleanup(priv); - iwl_sram_cleanup(priv); } /* @@ -298,7 +276,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb) IWL_INFO(priv, "testmode register access command offset 0x%x\n", ofs); switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) { - case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32: + case IWL_TM_CMD_APP2DEV_REG_READ32: val32 = iwl_read32(bus(priv), ofs); IWL_INFO(priv, "32bit value to read 0x%x\n", val32); @@ -313,7 +291,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb) IWL_DEBUG_INFO(priv, "Error sending msg : %d\n", status); break; - case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32: + case IWL_TM_CMD_APP2DEV_REG_WRITE32: if (!tb[IWL_TM_ATTR_REG_VALUE32]) { IWL_DEBUG_INFO(priv, "Error finding value to write\n"); @@ -324,7 +302,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb) iwl_write32(bus(priv), ofs, val32); } break; - case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8: + case IWL_TM_CMD_APP2DEV_REG_WRITE8: if (!tb[IWL_TM_ATTR_REG_VALUE8]) { IWL_DEBUG_INFO(priv, "Error finding value to write\n"); return -ENOMSG; @@ -334,32 +312,6 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb) iwl_write8(bus(priv), ofs, val8); } break; - case IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32: - val32 = iwl_read_prph(bus(priv), ofs); - IWL_INFO(priv, "32bit value to read 0x%x\n", val32); - - skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20); - if (!skb) { - IWL_DEBUG_INFO(priv, "Error allocating memory\n"); - return -ENOMEM; - } - NLA_PUT_U32(skb, IWL_TM_ATTR_REG_VALUE32, val32); - status = cfg80211_testmode_reply(skb); - if (status < 0) - IWL_DEBUG_INFO(priv, - "Error sending msg : %d\n", status); - break; - case IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32: - if (!tb[IWL_TM_ATTR_REG_VALUE32]) { - IWL_DEBUG_INFO(priv, - "Error finding value to write\n"); - return -ENOMSG; - } else { - val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]); - IWL_INFO(priv, "32bit value to write 0x%x\n", val32); - iwl_write_prph(bus(priv), ofs, val32); - } - break; default: IWL_DEBUG_INFO(priv, "Unknown testmode register command ID\n"); return -ENOSYS; @@ -378,24 +330,24 @@ static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv) struct iwl_notification_wait calib_wait; int ret; - iwl_init_notification_wait(priv->shrd, &calib_wait, + iwlagn_init_notification_wait(priv, &calib_wait, CALIBRATION_COMPLETE_NOTIFICATION, NULL, NULL); - ret = iwl_init_alive_start(trans(priv)); + ret = iwlagn_init_alive_start(priv); if (ret) { IWL_DEBUG_INFO(priv, "Error configuring init calibration: %d\n", ret); goto cfg_init_calib_error; } - ret = iwl_wait_notification(priv->shrd, &calib_wait, 2 * HZ); + ret = iwlagn_wait_notification(priv, &calib_wait, 2 * HZ); if (ret) IWL_DEBUG_INFO(priv, "Error detecting" " CALIBRATION_COMPLETE_NOTIFICATION: %d\n", ret); return ret; cfg_init_calib_error: - iwl_remove_notification(priv->shrd, &calib_wait); + iwlagn_remove_notification(priv, &calib_wait); return ret; } @@ -418,16 +370,14 @@ static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv) static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb) { struct iwl_priv *priv = hw->priv; - struct iwl_trans *trans = trans(priv); struct sk_buff *skb; unsigned char *rsp_data_ptr = NULL; int status = 0, rsp_data_len = 0; - u32 devid; switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) { case IWL_TM_CMD_APP2DEV_GET_DEVICENAME: - rsp_data_ptr = (unsigned char *)cfg(priv)->name; - rsp_data_len = strlen(cfg(priv)->name); + rsp_data_ptr = (unsigned char *)priv->cfg->name; + rsp_data_len = strlen(priv->cfg->name); skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, rsp_data_len + 20); if (!skb) { @@ -446,7 +396,8 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb) break; case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW: - status = iwl_load_ucode_wait_alive(trans, IWL_UCODE_INIT); + status = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_init, + IWL_UCODE_INIT); if (status) IWL_DEBUG_INFO(priv, "Error loading init ucode: %d\n", status); @@ -454,11 +405,13 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb) case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB: iwl_testmode_cfg_init_calib(priv); - iwl_trans_stop_device(trans); + iwl_trans_stop_device(trans(priv)); break; case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW: - status = iwl_load_ucode_wait_alive(trans, IWL_UCODE_REGULAR); + status = iwlagn_load_ucode_wait_alive(priv, + &priv->ucode_rt, + IWL_UCODE_REGULAR); if (status) { IWL_DEBUG_INFO(priv, "Error loading runtime ucode: %d\n", status); @@ -470,25 +423,10 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb) "Error starting the device: %d\n", status); break; - case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW: - iwl_scan_cancel_timeout(priv, 200); - iwl_trans_stop_device(trans); - status = iwl_load_ucode_wait_alive(trans, IWL_UCODE_WOWLAN); - if (status) { - IWL_DEBUG_INFO(priv, - "Error loading WOWLAN ucode: %d\n", status); - break; - } - status = iwl_alive_start(priv); - if (status) - IWL_DEBUG_INFO(priv, - "Error starting the device: %d\n", status); - break; - case IWL_TM_CMD_APP2DEV_GET_EEPROM: - if (priv->shrd->eeprom) { + if (priv->eeprom) { skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, - cfg(priv)->base_params->eeprom_size + 20); + priv->cfg->base_params->eeprom_size + 20); if (!skb) { IWL_DEBUG_INFO(priv, "Error allocating memory\n"); @@ -497,8 +435,8 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb) NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_EEPROM_RSP); NLA_PUT(skb, IWL_TM_ATTR_EEPROM, - cfg(priv)->base_params->eeprom_size, - priv->shrd->eeprom); + priv->cfg->base_params->eeprom_size, + priv->eeprom); status = cfg80211_testmode_reply(skb); if (status < 0) IWL_DEBUG_INFO(priv, @@ -517,37 +455,6 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb) priv->tm_fixed_rate = nla_get_u32(tb[IWL_TM_ATTR_FIXRATE]); break; - case IWL_TM_CMD_APP2DEV_GET_FW_VERSION: - IWL_INFO(priv, "uCode version raw: 0x%x\n", priv->ucode_ver); - - skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20); - if (!skb) { - IWL_DEBUG_INFO(priv, "Error allocating memory\n"); - return -ENOMEM; - } - NLA_PUT_U32(skb, IWL_TM_ATTR_FW_VERSION, priv->ucode_ver); - status = cfg80211_testmode_reply(skb); - if (status < 0) - IWL_DEBUG_INFO(priv, - "Error sending msg : %d\n", status); - break; - - case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID: - devid = bus_get_hw_id(bus(priv)); - IWL_INFO(priv, "hw version: 0x%x\n", devid); - - skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20); - if (!skb) { - IWL_DEBUG_INFO(priv, "Error allocating memory\n"); - return -ENOMEM; - } - NLA_PUT_U32(skb, IWL_TM_ATTR_DEVICE_ID, devid); - status = cfg80211_testmode_reply(skb); - if (status < 0) - IWL_DEBUG_INFO(priv, - "Error sending msg : %d\n", status); - break; - default: IWL_DEBUG_INFO(priv, "Unknown testmode driver command ID\n"); return -ENOSYS; @@ -628,7 +535,7 @@ static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb) } priv->testmode_trace.num_chunks = DIV_ROUND_UP(priv->testmode_trace.buff_size, - DUMP_CHUNK_SIZE); + TRACE_CHUNK_SIZE); break; case IWL_TM_CMD_APP2DEV_END_TRACE: @@ -660,15 +567,15 @@ static int iwl_testmode_trace_dump(struct ieee80211_hw *hw, struct nlattr **tb, idx = cb->args[4]; if (idx >= priv->testmode_trace.num_chunks) return -ENOENT; - length = DUMP_CHUNK_SIZE; + length = TRACE_CHUNK_SIZE; if (((idx + 1) == priv->testmode_trace.num_chunks) && - (priv->testmode_trace.buff_size % DUMP_CHUNK_SIZE)) + (priv->testmode_trace.buff_size % TRACE_CHUNK_SIZE)) length = priv->testmode_trace.buff_size % - DUMP_CHUNK_SIZE; + TRACE_CHUNK_SIZE; NLA_PUT(skb, IWL_TM_ATTR_TRACE_DUMP, length, priv->testmode_trace.trace_addr + - (DUMP_CHUNK_SIZE * idx)); + (TRACE_CHUNK_SIZE * idx)); idx++; cb->args[4] = idx; return 0; @@ -714,110 +621,6 @@ static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb) return 0; } -/* - * This function handles the user application commands for SRAM data dump - * - * It retrieves the mandatory fields IWL_TM_ATTR_SRAM_ADDR and - * IWL_TM_ATTR_SRAM_SIZE to decide the memory area for SRAM data reading - * - * Several error will be retured, -EBUSY if the SRAM data retrieved by - * previous command has not been delivered to userspace, or -ENOMSG if - * the mandatory fields (IWL_TM_ATTR_SRAM_ADDR,IWL_TM_ATTR_SRAM_SIZE) - * are missing, or -ENOMEM if the buffer allocation fails. - * - * Otherwise 0 is replied indicating the success of the SRAM reading. - * - * @hw: ieee80211_hw object that represents the device - * @tb: gnl message fields from the user space - */ -static int iwl_testmode_sram(struct ieee80211_hw *hw, struct nlattr **tb) -{ - struct iwl_priv *priv = hw->priv; - u32 base, ofs, size, maxsize; - - if (priv->testmode_sram.sram_readed) - return -EBUSY; - - if (!tb[IWL_TM_ATTR_SRAM_ADDR]) { - IWL_DEBUG_INFO(priv, "Error finding SRAM offset address\n"); - return -ENOMSG; - } - ofs = nla_get_u32(tb[IWL_TM_ATTR_SRAM_ADDR]); - if (!tb[IWL_TM_ATTR_SRAM_SIZE]) { - IWL_DEBUG_INFO(priv, "Error finding size for SRAM reading\n"); - return -ENOMSG; - } - size = nla_get_u32(tb[IWL_TM_ATTR_SRAM_SIZE]); - switch (priv->shrd->ucode_type) { - case IWL_UCODE_REGULAR: - maxsize = trans(priv)->ucode_rt.data.len; - break; - case IWL_UCODE_INIT: - maxsize = trans(priv)->ucode_init.data.len; - break; - case IWL_UCODE_WOWLAN: - maxsize = trans(priv)->ucode_wowlan.data.len; - break; - case IWL_UCODE_NONE: - IWL_DEBUG_INFO(priv, "Error, uCode does not been loaded\n"); - return -ENOSYS; - default: - IWL_DEBUG_INFO(priv, "Error, unsupported uCode type\n"); - return -ENOSYS; - } - if ((ofs + size) > maxsize) { - IWL_DEBUG_INFO(priv, "Invalid offset/size: out of range\n"); - return -EINVAL; - } - priv->testmode_sram.buff_size = (size / 4) * 4; - priv->testmode_sram.buff_addr = - kmalloc(priv->testmode_sram.buff_size, GFP_KERNEL); - if (priv->testmode_sram.buff_addr == NULL) { - IWL_DEBUG_INFO(priv, "Error allocating memory\n"); - return -ENOMEM; - } - base = 0x800000; - _iwl_read_targ_mem_words(bus(priv), base + ofs, - priv->testmode_sram.buff_addr, - priv->testmode_sram.buff_size / 4); - priv->testmode_sram.num_chunks = - DIV_ROUND_UP(priv->testmode_sram.buff_size, DUMP_CHUNK_SIZE); - priv->testmode_sram.sram_readed = true; - return 0; -} - -static int iwl_testmode_sram_dump(struct ieee80211_hw *hw, struct nlattr **tb, - struct sk_buff *skb, - struct netlink_callback *cb) -{ - struct iwl_priv *priv = hw->priv; - int idx, length; - - if (priv->testmode_sram.sram_readed) { - idx = cb->args[4]; - if (idx >= priv->testmode_sram.num_chunks) { - iwl_sram_cleanup(priv); - return -ENOENT; - } - length = DUMP_CHUNK_SIZE; - if (((idx + 1) == priv->testmode_sram.num_chunks) && - (priv->testmode_sram.buff_size % DUMP_CHUNK_SIZE)) - length = priv->testmode_sram.buff_size % - DUMP_CHUNK_SIZE; - - NLA_PUT(skb, IWL_TM_ATTR_SRAM_DUMP, length, - priv->testmode_sram.buff_addr + - (DUMP_CHUNK_SIZE * idx)); - idx++; - cb->args[4] = idx; - return 0; - } else - return -EFAULT; - - nla_put_failure: - return -ENOBUFS; -} - /* The testmode gnl message handler that takes the gnl message from the * user space and parses it per the policy iwl_testmode_gnl_msg_policy, then @@ -865,11 +668,9 @@ int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len) IWL_DEBUG_INFO(priv, "testmode cmd to uCode\n"); result = iwl_testmode_ucode(hw, tb); break; - case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32: - case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32: - case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8: - case IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32: - case IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32: + case IWL_TM_CMD_APP2DEV_REG_READ32: + case IWL_TM_CMD_APP2DEV_REG_WRITE32: + case IWL_TM_CMD_APP2DEV_REG_WRITE8: IWL_DEBUG_INFO(priv, "testmode cmd to register\n"); result = iwl_testmode_reg(hw, tb); break; @@ -879,9 +680,6 @@ int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len) case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW: case IWL_TM_CMD_APP2DEV_GET_EEPROM: case IWL_TM_CMD_APP2DEV_FIXRATE_REQ: - case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW: - case IWL_TM_CMD_APP2DEV_GET_FW_VERSION: - case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID: IWL_DEBUG_INFO(priv, "testmode cmd to driver\n"); result = iwl_testmode_driver(hw, tb); break; @@ -898,11 +696,6 @@ int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len) result = iwl_testmode_ownership(hw, tb); break; - case IWL_TM_CMD_APP2DEV_READ_SRAM: - IWL_DEBUG_INFO(priv, "testmode sram read cmd to driver\n"); - result = iwl_testmode_sram(hw, tb); - break; - default: IWL_DEBUG_INFO(priv, "Unknown testmode command\n"); result = -ENOSYS; @@ -951,10 +744,6 @@ int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb, IWL_DEBUG_INFO(priv, "uCode trace cmd to driver\n"); result = iwl_testmode_trace_dump(hw, tb, skb, cb); break; - case IWL_TM_CMD_APP2DEV_DUMP_SRAM: - IWL_DEBUG_INFO(priv, "testmode sram dump cmd to driver\n"); - result = iwl_testmode_sram_dump(hw, tb, skb, cb); - break; default: result = -EINVAL; break; diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-testmode.h b/trunk/drivers/net/wireless/iwlwifi/iwl-testmode.h index 26138f110340..b980bda4b0f8 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-testmode.h +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-testmode.h @@ -76,9 +76,9 @@ * the actual uCode host command ID is carried with * IWL_TM_ATTR_UCODE_CMD_ID * - * @IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32: - * @IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32: - * @IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8: + * @IWL_TM_CMD_APP2DEV_REG_READ32: + * @IWL_TM_CMD_APP2DEV_REG_WRITE32: + * @IWL_TM_CMD_APP2DEV_REG_WRITE8: * commands from user applicaiton to access register * * @IWL_TM_CMD_APP2DEV_GET_DEVICENAME: retrieve device name @@ -103,30 +103,16 @@ * @IWL_TM_CMD_DEV2APP_EEPROM_RSP: * commands from kernel space to carry the eeprom response * to user application - * * @IWL_TM_CMD_APP2DEV_OWNERSHIP: * commands from user application to own change the ownership of the uCode * if application has the ownership, the only host command from * testmode will deliver to uCode. Default owner is driver - * - * @IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32: - * @IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32: - * commands from user applicaiton to indirectly access peripheral register - * - * @IWL_TM_CMD_APP2DEV_READ_SRAM: - * @IWL_TM_CMD_APP2DEV_DUMP_SRAM: - * commands from user applicaiton to read data in sram - * - * @IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW: load Weak On Wireless LAN uCode image - * @IWL_TM_CMD_APP2DEV_GET_FW_VERSION: retrieve uCode version - * @IWL_TM_CMD_APP2DEV_GET_DEVICE_ID: retrieve ID information in device - * */ enum iwl_tm_cmd_t { IWL_TM_CMD_APP2DEV_UCODE = 1, - IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32 = 2, - IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32 = 3, - IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8 = 4, + IWL_TM_CMD_APP2DEV_REG_READ32 = 2, + IWL_TM_CMD_APP2DEV_REG_WRITE32 = 3, + IWL_TM_CMD_APP2DEV_REG_WRITE8 = 4, IWL_TM_CMD_APP2DEV_GET_DEVICENAME = 5, IWL_TM_CMD_APP2DEV_LOAD_INIT_FW = 6, IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB = 7, @@ -140,14 +126,7 @@ enum iwl_tm_cmd_t { IWL_TM_CMD_DEV2APP_UCODE_RX_PKT = 15, IWL_TM_CMD_DEV2APP_EEPROM_RSP = 16, IWL_TM_CMD_APP2DEV_OWNERSHIP = 17, - IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32 = 18, - IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32 = 19, - IWL_TM_CMD_APP2DEV_READ_SRAM = 20, - IWL_TM_CMD_APP2DEV_DUMP_SRAM = 21, - IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW = 22, - IWL_TM_CMD_APP2DEV_GET_FW_VERSION = 23, - IWL_TM_CMD_APP2DEV_GET_DEVICE_ID = 24, - IWL_TM_CMD_MAX = 25, + IWL_TM_CMD_MAX = 18, }; /* @@ -217,26 +196,6 @@ enum iwl_tm_cmd_t { * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_OWNERSHIP, * The mandatory fields are: * IWL_TM_ATTR_UCODE_OWNER for the new owner - * - * @IWL_TM_ATTR_SRAM_ADDR: - * @IWL_TM_ATTR_SRAM_SIZE: - * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_READ_SRAM, - * The mandatory fields are: - * IWL_TM_ATTR_SRAM_ADDR for the address in sram - * IWL_TM_ATTR_SRAM_SIZE for the buffer size of data reading - * - * @IWL_TM_ATTR_SRAM_DUMP: - * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_DUMP_SRAM, - * IWL_TM_ATTR_SRAM_DUMP for the data in sram - * - * @IWL_TM_ATTR_FW_VERSION: - * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_GET_FW_VERSION, - * IWL_TM_ATTR_FW_VERSION for the uCode version - * - * @IWL_TM_ATTR_DEVICE_ID: - * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_GET_DEVICE_ID, - * IWL_TM_ATTR_DEVICE_ID for the device ID information - * */ enum iwl_tm_attr_t { IWL_TM_ATTR_NOT_APPLICABLE = 0, @@ -254,12 +213,7 @@ enum iwl_tm_attr_t { IWL_TM_ATTR_TRACE_DUMP = 12, IWL_TM_ATTR_FIXRATE = 13, IWL_TM_ATTR_UCODE_OWNER = 14, - IWL_TM_ATTR_SRAM_ADDR = 15, - IWL_TM_ATTR_SRAM_SIZE = 16, - IWL_TM_ATTR_SRAM_DUMP = 17, - IWL_TM_ATTR_FW_VERSION = 18, - IWL_TM_ATTR_DEVICE_ID = 19, - IWL_TM_ATTR_MAX = 20, + IWL_TM_ATTR_MAX = 15, }; /* uCode trace buffer */ @@ -267,8 +221,6 @@ enum iwl_tm_attr_t { #define TRACE_BUFF_SIZE_MIN 0x20000 #define TRACE_BUFF_SIZE_DEF TRACE_BUFF_SIZE_MIN #define TRACE_BUFF_PADD 0x2000 - -/* Maximum data size of each dump it packet */ -#define DUMP_CHUNK_SIZE (PAGE_SIZE - 1024) +#define TRACE_CHUNK_SIZE (PAGE_SIZE - 1024) #endif diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h b/trunk/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h index f6debf91d7b5..2b6756e8b8f9 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h @@ -219,7 +219,9 @@ struct iwl_trans_pcie { /* INT ICT Table */ __le32 *ict_tbl; + void *ict_tbl_vir; dma_addr_t ict_tbl_dma; + dma_addr_t aligned_ict_tbl_dma; int ict_index; u32 inta; bool use_ict; @@ -234,7 +236,6 @@ struct iwl_trans_pcie { const u8 *ac_to_fifo[NUM_IWL_RXON_CTX]; const u8 *ac_to_queue[NUM_IWL_RXON_CTX]; u8 mcast_queue[NUM_IWL_RXON_CTX]; - u8 agg_txq[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT]; struct iwl_tx_queue *txq; unsigned long txq_ctx_active_msk; @@ -279,16 +280,20 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans, struct iwl_tx_queue *txq, u16 byte_cnt); +void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id); int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, - int sta_id, int tid); + enum iwl_rxon_context_id ctx, int sta_id, + int tid); void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index); void iwl_trans_tx_queue_set_status(struct iwl_trans *trans, struct iwl_tx_queue *txq, int tx_fifo_id, int scd_retry); -int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans, int sta_id, int tid); +int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans, + enum iwl_rxon_context_id ctx, int sta_id, + int tid, u16 *ssn); void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, enum iwl_rxon_context_id ctx, - int sta_id, int tid, int frame_limit, u16 ssn); + int sta_id, int tid, int frame_limit); void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, int index, enum dma_data_direction dma_dir); int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index, @@ -349,13 +354,8 @@ static inline void iwl_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq) txq->swq_id = (hwq << 2) | ac; } -static inline u8 iwl_get_queue_ac(struct iwl_tx_queue *txq) -{ - return txq->swq_id & 0x3; -} - static inline void iwl_wake_queue(struct iwl_trans *trans, - struct iwl_tx_queue *txq, const char *msg) + struct iwl_tx_queue *txq) { u8 queue = txq->swq_id; u8 ac = queue & 3; @@ -363,22 +363,13 @@ static inline void iwl_wake_queue(struct iwl_trans *trans, struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - if (test_and_clear_bit(hwq, trans_pcie->queue_stopped)) { - if (atomic_dec_return(&trans_pcie->queue_stop_count[ac]) <= 0) { + if (test_and_clear_bit(hwq, trans_pcie->queue_stopped)) + if (atomic_dec_return(&trans_pcie->queue_stop_count[ac]) <= 0) iwl_wake_sw_queue(priv(trans), ac); - IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d ac %d. %s", - hwq, ac, msg); - } else { - IWL_DEBUG_TX_QUEUES(trans, "Don't wake hwq %d ac %d" - " stop count %d. %s", - hwq, ac, atomic_read(&trans_pcie-> - queue_stop_count[ac]), msg); - } - } } static inline void iwl_stop_queue(struct iwl_trans *trans, - struct iwl_tx_queue *txq, const char *msg) + struct iwl_tx_queue *txq) { u8 queue = txq->swq_id; u8 ac = queue & 3; @@ -386,23 +377,9 @@ static inline void iwl_stop_queue(struct iwl_trans *trans, struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - if (!test_and_set_bit(hwq, trans_pcie->queue_stopped)) { - if (atomic_inc_return(&trans_pcie->queue_stop_count[ac]) > 0) { + if (!test_and_set_bit(hwq, trans_pcie->queue_stopped)) + if (atomic_inc_return(&trans_pcie->queue_stop_count[ac]) > 0) iwl_stop_sw_queue(priv(trans), ac); - IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d ac %d" - " stop count %d. %s", - hwq, ac, atomic_read(&trans_pcie-> - queue_stop_count[ac]), msg); - } else { - IWL_DEBUG_TX_QUEUES(trans, "Don't stop hwq %d ac %d" - " stop count %d. %s", - hwq, ac, atomic_read(&trans_pcie-> - queue_stop_count[ac]), msg); - } - } else { - IWL_DEBUG_TX_QUEUES(trans, "stop hwq %d, but it is stopped/ %s", - hwq, msg); - } } #ifdef ieee80211_stop_queue diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c b/trunk/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c index 752493f00406..374c68cc1d70 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c @@ -594,8 +594,8 @@ static void iwl_dump_nic_error_log(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - base = trans->shrd->device_pointers.error_event_table; - if (trans->shrd->ucode_type == IWL_UCODE_INIT) { + base = priv->device_pointers.error_event_table; + if (priv->ucode_type == IWL_UCODE_INIT) { if (!base) base = priv->init_errlog_ptr; } else { @@ -607,7 +607,7 @@ static void iwl_dump_nic_error_log(struct iwl_trans *trans) IWL_ERR(trans, "Not valid error log pointer 0x%08X for %s uCode\n", base, - (trans->shrd->ucode_type == IWL_UCODE_INIT) + (priv->ucode_type == IWL_UCODE_INIT) ? "Init" : "RT"); return; } @@ -648,21 +648,6 @@ static void iwl_dump_nic_error_log(struct iwl_trans *trans) IWL_ERR(trans, "0x%08X | hw version\n", table.hw_ver); IWL_ERR(trans, "0x%08X | board version\n", table.brd_ver); IWL_ERR(trans, "0x%08X | hcmd\n", table.hcmd); - - IWL_ERR(trans, "0x%08X | isr0\n", table.isr0); - IWL_ERR(trans, "0x%08X | isr1\n", table.isr1); - IWL_ERR(trans, "0x%08X | isr2\n", table.isr2); - IWL_ERR(trans, "0x%08X | isr3\n", table.isr3); - IWL_ERR(trans, "0x%08X | isr4\n", table.isr4); - IWL_ERR(trans, "0x%08X | isr_pref\n", table.isr_pref); - IWL_ERR(trans, "0x%08X | wait_event\n", table.wait_event); - IWL_ERR(trans, "0x%08X | l2p_control\n", table.l2p_control); - IWL_ERR(trans, "0x%08X | l2p_duration\n", table.l2p_duration); - IWL_ERR(trans, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid); - IWL_ERR(trans, "0x%08X | l2p_addr_match\n", table.l2p_addr_match); - IWL_ERR(trans, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel); - IWL_ERR(trans, "0x%08X | timestamp\n", table.u_timestamp); - IWL_ERR(trans, "0x%08X | flow_handler\n", table.flow_handler); } /** @@ -672,7 +657,7 @@ static void iwl_irq_handle_error(struct iwl_trans *trans) { struct iwl_priv *priv = priv(trans); /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ - if (cfg(priv)->internal_wimax_coex && + if (priv->cfg->internal_wimax_coex && (!(iwl_read_prph(bus(trans), APMG_CLK_CTRL_REG) & APMS_CLK_VAL_MRB_FUNC_MODE) || (iwl_read_prph(bus(trans), APMG_PS_CTRL_REG) & @@ -724,8 +709,8 @@ static int iwl_print_event_log(struct iwl_trans *trans, u32 start_idx, if (num_events == 0) return pos; - base = trans->shrd->device_pointers.log_event_table; - if (trans->shrd->ucode_type == IWL_UCODE_INIT) { + base = priv->device_pointers.log_event_table; + if (priv->ucode_type == IWL_UCODE_INIT) { if (!base) base = priv->init_evtlog_ptr; } else { @@ -838,8 +823,8 @@ int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log, size_t bufsz = 0; struct iwl_priv *priv = priv(trans); - base = trans->shrd->device_pointers.log_event_table; - if (trans->shrd->ucode_type == IWL_UCODE_INIT) { + base = priv->device_pointers.log_event_table; + if (priv->ucode_type == IWL_UCODE_INIT) { logsize = priv->init_evtlog_size; if (!base) base = priv->init_evtlog_ptr; @@ -853,7 +838,7 @@ int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log, IWL_ERR(trans, "Invalid event log pointer 0x%08X for %s uCode\n", base, - (trans->shrd->ucode_type == IWL_UCODE_INIT) + (priv->ucode_type == IWL_UCODE_INIT) ? "Init" : "RT"); return -EINVAL; } @@ -1123,7 +1108,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans) isr_stats->tx++; handled |= CSR_INT_BIT_FH_TX; /* Wake up uCode load routine, now that load is complete */ - trans->ucode_write_complete = 1; + priv(trans)->ucode_write_complete = 1; wake_up(&trans->shrd->wait_command_queue); } @@ -1151,11 +1136,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans) * ICT functions * ******************************************************************************/ - -/* a device (PCI-E) page is 4096 bytes long */ -#define ICT_SHIFT 12 -#define ICT_SIZE (1 << ICT_SHIFT) -#define ICT_COUNT (ICT_SIZE / sizeof(u32)) +#define ICT_COUNT (PAGE_SIZE/sizeof(u32)) /* Free dram table */ void iwl_free_isr_ict(struct iwl_trans *trans) @@ -1163,19 +1144,21 @@ void iwl_free_isr_ict(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - if (trans_pcie->ict_tbl) { - dma_free_coherent(bus(trans)->dev, ICT_SIZE, - trans_pcie->ict_tbl, + if (trans_pcie->ict_tbl_vir) { + dma_free_coherent(bus(trans)->dev, + (sizeof(u32) * ICT_COUNT) + PAGE_SIZE, + trans_pcie->ict_tbl_vir, trans_pcie->ict_tbl_dma); - trans_pcie->ict_tbl = NULL; - trans_pcie->ict_tbl_dma = 0; + trans_pcie->ict_tbl_vir = NULL; + memset(&trans_pcie->ict_tbl_dma, 0, + sizeof(trans_pcie->ict_tbl_dma)); + memset(&trans_pcie->aligned_ict_tbl_dma, 0, + sizeof(trans_pcie->aligned_ict_tbl_dma)); } } -/* - * allocate dram shared table, it is an aligned memory - * block of ICT_SIZE. +/* allocate dram shared table it is a PAGE_SIZE aligned * also reset all data related to ICT table interrupt. */ int iwl_alloc_isr_ict(struct iwl_trans *trans) @@ -1183,26 +1166,36 @@ int iwl_alloc_isr_ict(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - trans_pcie->ict_tbl = - dma_alloc_coherent(bus(trans)->dev, ICT_SIZE, - &trans_pcie->ict_tbl_dma, - GFP_KERNEL); - if (!trans_pcie->ict_tbl) + /* allocate shrared data table */ + trans_pcie->ict_tbl_vir = + dma_alloc_coherent(bus(trans)->dev, + (sizeof(u32) * ICT_COUNT) + PAGE_SIZE, + &trans_pcie->ict_tbl_dma, GFP_KERNEL); + if (!trans_pcie->ict_tbl_vir) return -ENOMEM; - /* just an API sanity check ... it is guaranteed to be aligned */ - if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) { - iwl_free_isr_ict(trans); - return -EINVAL; - } + /* align table to PAGE_SIZE boundary */ + trans_pcie->aligned_ict_tbl_dma = + ALIGN(trans_pcie->ict_tbl_dma, PAGE_SIZE); + + IWL_DEBUG_ISR(trans, "ict dma addr %Lx dma aligned %Lx diff %d\n", + (unsigned long long)trans_pcie->ict_tbl_dma, + (unsigned long long)trans_pcie->aligned_ict_tbl_dma, + (int)(trans_pcie->aligned_ict_tbl_dma - + trans_pcie->ict_tbl_dma)); - IWL_DEBUG_ISR(trans, "ict dma addr %Lx\n", - (unsigned long long)trans_pcie->ict_tbl_dma); + trans_pcie->ict_tbl = trans_pcie->ict_tbl_vir + + (trans_pcie->aligned_ict_tbl_dma - + trans_pcie->ict_tbl_dma); - IWL_DEBUG_ISR(trans, "ict vir addr %p\n", trans_pcie->ict_tbl); + IWL_DEBUG_ISR(trans, "ict vir addr %p vir aligned %p diff %d\n", + trans_pcie->ict_tbl, trans_pcie->ict_tbl_vir, + (int)(trans_pcie->aligned_ict_tbl_dma - + trans_pcie->ict_tbl_dma)); /* reset table and index to all 0 */ - memset(trans_pcie->ict_tbl, 0, ICT_SIZE); + memset(trans_pcie->ict_tbl_vir, 0, + (sizeof(u32) * ICT_COUNT) + PAGE_SIZE); trans_pcie->ict_index = 0; /* add periodic RX interrupt */ @@ -1220,20 +1213,23 @@ int iwl_reset_ict(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - if (!trans_pcie->ict_tbl) + if (!trans_pcie->ict_tbl_vir) return 0; spin_lock_irqsave(&trans->shrd->lock, flags); iwl_disable_interrupts(trans); - memset(trans_pcie->ict_tbl, 0, ICT_SIZE); + memset(&trans_pcie->ict_tbl[0], 0, sizeof(u32) * ICT_COUNT); - val = trans_pcie->ict_tbl_dma >> ICT_SHIFT; + val = trans_pcie->aligned_ict_tbl_dma >> PAGE_SHIFT; val |= CSR_DRAM_INT_TBL_ENABLE; val |= CSR_DRAM_INIT_TBL_WRAP_CHECK; - IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val); + IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%X " + "aligned dma address %Lx\n", + val, + (unsigned long long)trans_pcie->aligned_ict_tbl_dma); iwl_write32(bus(trans), CSR_DRAM_INT_TBL_REG, val); trans_pcie->use_ict = true; @@ -1270,8 +1266,6 @@ static irqreturn_t iwl_isr(int irq, void *data) if (!trans) return IRQ_NONE; - trace_iwlwifi_dev_irq(priv(trans)); - trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); spin_lock_irqsave(&trans->shrd->lock, flags); @@ -1346,7 +1340,6 @@ irqreturn_t iwl_isr_ict(int irq, void *data) struct iwl_trans_pcie *trans_pcie; u32 inta, inta_mask; u32 val = 0; - u32 read; unsigned long flags; if (!trans) @@ -1360,8 +1353,6 @@ irqreturn_t iwl_isr_ict(int irq, void *data) if (!trans_pcie->use_ict) return iwl_isr(irq, data); - trace_iwlwifi_dev_irq(priv(trans)); - spin_lock_irqsave(&trans->shrd->lock, flags); /* Disable (but don't clear!) interrupts here to avoid @@ -1376,29 +1367,24 @@ irqreturn_t iwl_isr_ict(int irq, void *data) /* Ignore interrupt if there's nothing in NIC to service. * This may be due to IRQ shared with another device, * or due to sporadic interrupts thrown from our NIC. */ - read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); - trace_iwlwifi_dev_ict_read(priv(trans), trans_pcie->ict_index, read); - if (!read) { + if (!trans_pcie->ict_tbl[trans_pcie->ict_index]) { IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); goto none; } - /* - * Collect all entries up to the first 0, starting from ict_index; - * note we already read at ict_index. - */ - do { - val |= read; + /* read all entries that not 0 start with ict_index */ + while (trans_pcie->ict_tbl[trans_pcie->ict_index]) { + + val |= le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n", - trans_pcie->ict_index, read); + trans_pcie->ict_index, + le32_to_cpu( + trans_pcie->ict_tbl[trans_pcie->ict_index])); trans_pcie->ict_tbl[trans_pcie->ict_index] = 0; trans_pcie->ict_index = iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT); - read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); - trace_iwlwifi_dev_ict_read(priv(trans), trans_pcie->ict_index, - read); - } while (read); + } /* We should not get this value, just ignore it. */ if (val == 0xffffffff) @@ -1425,7 +1411,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data) if (likely(inta)) tasklet_schedule(&trans_pcie->irq_tasklet); else if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) && - !trans_pcie->inta) { + !trans_pcie->inta) { /* Allow interrupt if was disabled by this handler and * no tasklet was schedules, We should not enable interrupt, * tasklet will enable it. @@ -1441,7 +1427,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data) * only Re-enable if disabled by irq. */ if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) && - !trans_pcie->inta) + !trans_pcie->inta) iwl_enable_interrupts(trans); spin_unlock_irqrestore(&trans->shrd->lock, flags); diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c b/trunk/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c index bd29568177e6..4a0c95302a7e 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c @@ -408,7 +408,6 @@ static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id) void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index) { - IWL_DEBUG_TX_QUEUES(trans, "Q %d WrPtr: %d", txq_id, index & 0xff); iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR, (index & 0xff) | (txq_id << 8)); iwl_write_prph(bus(trans), SCD_QUEUE_RDPTR(txq_id), index); @@ -431,7 +430,7 @@ void iwl_trans_tx_queue_set_status(struct iwl_trans *trans, txq->sched_retry = scd_retry; - IWL_DEBUG_TX_QUEUES(trans, "%s %s Queue %d on FIFO %d\n", + IWL_DEBUG_INFO(trans, "%s %s Queue %d on FIFO %d\n", active ? "Activate" : "Deactivate", scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id); } @@ -447,21 +446,14 @@ static inline int get_fifo_from_tid(struct iwl_trans_pcie *trans_pcie, return -EINVAL; } -static inline bool is_agg_txqid_valid(struct iwl_trans *trans, int txq_id) -{ - if (txq_id < IWLAGN_FIRST_AMPDU_QUEUE) - return false; - return txq_id < (IWLAGN_FIRST_AMPDU_QUEUE + - hw_params(trans).num_ampdu_queues); -} - void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, enum iwl_rxon_context_id ctx, int sta_id, - int tid, int frame_limit, u16 ssn) + int tid, int frame_limit) { - int tx_fifo, txq_id; + int tx_fifo, txq_id, ssn_idx; u16 ra_tid; unsigned long flags; + struct iwl_tid_data *tid_data; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -477,15 +469,11 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, return; } - txq_id = trans_pcie->agg_txq[sta_id][tid]; - if (WARN_ON_ONCE(is_agg_txqid_valid(trans, txq_id) == false)) { - IWL_ERR(trans, - "queue number out of range: %d, must be %d to %d\n", - txq_id, IWLAGN_FIRST_AMPDU_QUEUE, - IWLAGN_FIRST_AMPDU_QUEUE + - hw_params(trans).num_ampdu_queues - 1); - return; - } + spin_lock_irqsave(&trans->shrd->sta_lock, flags); + tid_data = &trans->shrd->tid_data[sta_id][tid]; + ssn_idx = SEQ_TO_SN(tid_data->seq_number); + txq_id = tid_data->agg.txq_id; + spin_unlock_irqrestore(&trans->shrd->sta_lock, flags); ra_tid = BUILD_RAxTID(sta_id, tid); @@ -505,9 +493,9 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, /* Place first TFD at index corresponding to start sequence number. * Assumes that ssn_idx is valid (!= 0xFFF) */ - trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff); - trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff); - iwl_trans_set_wr_ptrs(trans, txq_id, ssn); + trans_pcie->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); + trans_pcie->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); + iwl_trans_set_wr_ptrs(trans, txq_id, ssn_idx); /* Set up Tx window size and frame limit for this queue */ iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr + @@ -551,9 +539,12 @@ static int iwlagn_txq_ctx_activate_free(struct iwl_trans *trans) } int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans, - int sta_id, int tid) + enum iwl_rxon_context_id ctx, int sta_id, + int tid, u16 *ssn) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_tid_data *tid_data; + unsigned long flags; int txq_id; txq_id = iwlagn_txq_ctx_activate_free(trans); @@ -562,31 +553,34 @@ int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans, return -ENXIO; } - trans_pcie->agg_txq[sta_id][tid] = txq_id; + spin_lock_irqsave(&trans->shrd->sta_lock, flags); + tid_data = &trans->shrd->tid_data[sta_id][tid]; + *ssn = SEQ_TO_SN(tid_data->seq_number); + tid_data->agg.txq_id = txq_id; iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id); + tid_data = &trans->shrd->tid_data[sta_id][tid]; + if (tid_data->tfds_in_queue == 0) { + IWL_DEBUG_HT(trans, "HW queue is empty\n"); + tid_data->agg.state = IWL_AGG_ON; + iwl_start_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid); + } else { + IWL_DEBUG_HT(trans, "HW queue is NOT empty: %d packets in HW" + "queue\n", tid_data->tfds_in_queue); + tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA; + } + spin_unlock_irqrestore(&trans->shrd->sta_lock, flags); + return 0; } -int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int sta_id, int tid) +void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - u8 txq_id = trans_pcie->agg_txq[sta_id][tid]; - - if (WARN_ON_ONCE(is_agg_txqid_valid(trans, txq_id) == false)) { - IWL_ERR(trans, - "queue number out of range: %d, must be %d to %d\n", - txq_id, IWLAGN_FIRST_AMPDU_QUEUE, - IWLAGN_FIRST_AMPDU_QUEUE + - hw_params(trans).num_ampdu_queues - 1); - return -EINVAL; - } - iwlagn_tx_queue_stop_scheduler(trans, txq_id); iwl_clear_bits_prph(bus(trans), SCD_AGGR_SEL, (1 << txq_id)); - trans_pcie->agg_txq[sta_id][tid] = 0; trans_pcie->txq[txq_id].q.read_ptr = 0; trans_pcie->txq[txq_id].q.write_ptr = 0; /* supposes that ssn_idx is valid (!= 0xFFF) */ @@ -595,6 +589,81 @@ int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int sta_id, int tid) iwl_clear_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id)); iwl_txq_ctx_deactivate(trans_pcie, txq_id); iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0); +} + +int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, + enum iwl_rxon_context_id ctx, int sta_id, + int tid) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + unsigned long flags; + int read_ptr, write_ptr; + struct iwl_tid_data *tid_data; + int txq_id; + + spin_lock_irqsave(&trans->shrd->sta_lock, flags); + + tid_data = &trans->shrd->tid_data[sta_id][tid]; + txq_id = tid_data->agg.txq_id; + + if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) || + (IWLAGN_FIRST_AMPDU_QUEUE + + hw_params(trans).num_ampdu_queues <= txq_id)) { + IWL_ERR(trans, + "queue number out of range: %d, must be %d to %d\n", + txq_id, IWLAGN_FIRST_AMPDU_QUEUE, + IWLAGN_FIRST_AMPDU_QUEUE + + hw_params(trans).num_ampdu_queues - 1); + spin_unlock_irqrestore(&trans->shrd->sta_lock, flags); + return -EINVAL; + } + + switch (trans->shrd->tid_data[sta_id][tid].agg.state) { + case IWL_EMPTYING_HW_QUEUE_ADDBA: + /* + * This can happen if the peer stops aggregation + * again before we've had a chance to drain the + * queue we selected previously, i.e. before the + * session was really started completely. + */ + IWL_DEBUG_HT(trans, "AGG stop before setup done\n"); + goto turn_off; + case IWL_AGG_ON: + break; + default: + IWL_WARN(trans, "Stopping AGG while state not ON " + "or starting for %d on %d (%d)\n", sta_id, tid, + trans->shrd->tid_data[sta_id][tid].agg.state); + spin_unlock_irqrestore(&trans->shrd->sta_lock, flags); + return 0; + } + + write_ptr = trans_pcie->txq[txq_id].q.write_ptr; + read_ptr = trans_pcie->txq[txq_id].q.read_ptr; + + /* The queue is not empty */ + if (write_ptr != read_ptr) { + IWL_DEBUG_HT(trans, "Stopping a non empty AGG HW QUEUE\n"); + trans->shrd->tid_data[sta_id][tid].agg.state = + IWL_EMPTYING_HW_QUEUE_DELBA; + spin_unlock_irqrestore(&trans->shrd->sta_lock, flags); + return 0; + } + + IWL_DEBUG_HT(trans, "HW queue is empty\n"); +turn_off: + trans->shrd->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF; + + /* do not restore/save irqs */ + spin_unlock(&trans->shrd->sta_lock); + spin_lock(&trans->shrd->lock); + + iwl_trans_pcie_txq_agg_disable(trans, txq_id); + + spin_unlock_irqrestore(&trans->shrd->lock, flags); + + iwl_stop_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid); + return 0; } @@ -913,8 +982,7 @@ static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd) ret = iwl_enqueue_hcmd(trans, cmd); if (ret < 0) { - IWL_DEBUG_QUIET_RFKILL(trans, - "Error sending %s: enqueue_hcmd failed: %d\n", + IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", get_cmd_string(cmd->id), ret); return ret; } @@ -932,20 +1000,6 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", get_cmd_string(cmd->id)); - if (test_bit(STATUS_EXIT_PENDING, &trans->shrd->status)) - return -EBUSY; - - - if (test_bit(STATUS_RF_KILL_HW, &trans->shrd->status)) { - IWL_ERR(trans, "Command %s aborted: RF KILL Switch\n", - get_cmd_string(cmd->id)); - return -ECANCELED; - } - if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) { - IWL_ERR(trans, "Command %s failed: FW Error\n", - get_cmd_string(cmd->id)); - return -EIO; - } set_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status); IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", get_cmd_string(cmd->id)); @@ -954,8 +1008,7 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) if (cmd_idx < 0) { ret = cmd_idx; clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status); - IWL_DEBUG_QUIET_RFKILL(trans, - "Error sending %s: enqueue_hcmd failed: %d\n", + IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", get_cmd_string(cmd->id), ret); return ret; } @@ -969,12 +1022,12 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) &trans_pcie->txq[trans->shrd->cmd_queue]; struct iwl_queue *q = &txq->q; - IWL_DEBUG_QUIET_RFKILL(trans, + IWL_ERR(trans, "Error sending %s: time out after %dms.\n", get_cmd_string(cmd->id), jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); - IWL_DEBUG_QUIET_RFKILL(trans, + IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", q->read_ptr, q->write_ptr); @@ -986,6 +1039,18 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) } } + if (test_bit(STATUS_RF_KILL_HW, &trans->shrd->status)) { + IWL_ERR(trans, "Command %s aborted: RF KILL Switch\n", + get_cmd_string(cmd->id)); + ret = -ECANCELED; + goto fail; + } + if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) { + IWL_ERR(trans, "Command %s failed: FW Error\n", + get_cmd_string(cmd->id)); + ret = -EIO; + goto fail; + } if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) { IWL_ERR(trans, "Error: Response NULL in '%s'\n", get_cmd_string(cmd->id)); @@ -1006,7 +1071,7 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) trans_pcie->txq[trans->shrd->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB; } - +fail: if (cmd->reply_page) { iwl_free_pages(trans->shrd, cmd->reply_page); cmd->reply_page = 0; @@ -1050,6 +1115,9 @@ int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index, return 0; } + IWL_DEBUG_TX_REPLY(trans, "reclaim: [%d, %d, %d]\n", txq_id, + q->read_ptr, index); + if (WARN_ON(!skb_queue_empty(skbs))) return 0; diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/trunk/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c index 67d6e324e26f..5f17ab8e76ba 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c @@ -88,16 +88,18 @@ static int iwl_trans_rx_alloc(struct iwl_trans *trans) return -EINVAL; /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */ - rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, - &rxq->bd_dma, GFP_KERNEL); + rxq->bd = dma_alloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, + &rxq->bd_dma, GFP_KERNEL); if (!rxq->bd) goto err_bd; + memset(rxq->bd, 0, sizeof(__le32) * RX_QUEUE_SIZE); /*Allocate the driver's pointer to receive buffer status */ - rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts), - &rxq->rb_stts_dma, GFP_KERNEL); + rxq->rb_stts = dma_alloc_coherent(dev, sizeof(*rxq->rb_stts), + &rxq->rb_stts_dma, GFP_KERNEL); if (!rxq->rb_stts) goto err_rb_stts; + memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); return 0; @@ -1042,7 +1044,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx, - u8 sta_id, u8 tid) + u8 sta_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; @@ -1056,12 +1058,13 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, dma_addr_t txcmd_phys; dma_addr_t scratch_phys; u16 len, firstlen, secondlen; + u16 seq_number = 0; u8 wait_write_ptr = 0; u8 txq_id; + u8 tid = 0; bool is_agg = false; __le16 fc = hdr->frame_control; u8 hdr_len = ieee80211_hdrlen(fc); - u16 __maybe_unused wifi_seq; /* * Send this frame after DTIM -- there's a special queue @@ -1082,28 +1085,36 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, txq_id = trans_pcie->ac_to_queue[ctx][skb_get_queue_mapping(skb)]; - /* aggregation is on for this */ - if (info->flags & IEEE80211_TX_CTL_AMPDU) { - WARN_ON(tid >= IWL_MAX_TID_COUNT); - txq_id = trans_pcie->agg_txq[sta_id][tid]; - is_agg = true; + if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) { + u8 *qc = NULL; + struct iwl_tid_data *tid_data; + qc = ieee80211_get_qos_ctl(hdr); + tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; + tid_data = &trans->shrd->tid_data[sta_id][tid]; + + if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) + return -1; + + seq_number = tid_data->seq_number; + seq_number &= IEEE80211_SCTL_SEQ; + hdr->seq_ctrl = hdr->seq_ctrl & + cpu_to_le16(IEEE80211_SCTL_FRAG); + hdr->seq_ctrl |= cpu_to_le16(seq_number); + seq_number += 0x10; + /* aggregation is on for this */ + if (info->flags & IEEE80211_TX_CTL_AMPDU) { + WARN_ON_ONCE(tid_data->agg.state != IWL_AGG_ON); + txq_id = tid_data->agg.txq_id; + is_agg = true; + } } + /* Copy MAC header from skb into command buffer */ + memcpy(tx_cmd->hdr, hdr, hdr_len); + txq = &trans_pcie->txq[txq_id]; q = &txq->q; - /* In AGG mode, the index in the ring must correspond to the WiFi - * sequence number. This is a HW requirements to help the SCD to parse - * the BA. - * Check here that the packets are in the right place on the ring. - */ -#ifdef CONFIG_IWLWIFI_DEBUG - wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); - WARN_ONCE(is_agg && ((wifi_seq & 0xff) != q->write_ptr), - "Q: %d WiFi Seq %d tfdNum %d", - txq_id, wifi_seq, q->write_ptr); -#endif - /* Set up driver data for this TFD */ txq->skbs[q->write_ptr] = skb; txq->cmd[q->write_ptr] = dev_cmd; @@ -1201,6 +1212,13 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); iwl_txq_update_write_ptr(trans, txq); + if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) { + trans->shrd->tid_data[sta_id][tid].tfds_in_queue++; + if (!ieee80211_has_morefrags(fc)) + trans->shrd->tid_data[sta_id][tid].seq_number = + seq_number; + } + /* * At this point the frame is "transmitted" successfully * and we will get a TX status notification eventually, @@ -1212,7 +1230,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, txq->need_update = 1; iwl_txq_update_write_ptr(trans, txq); } else { - iwl_stop_queue(trans, txq, "Queue is full"); + iwl_stop_queue(trans, txq); } } return 0; @@ -1249,48 +1267,101 @@ static int iwl_trans_pcie_request_irq(struct iwl_trans *trans) return 0; } -static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid, +static int iwlagn_txq_check_empty(struct iwl_trans *trans, + int sta_id, u8 tid, int txq_id) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_queue *q = &trans_pcie->txq[txq_id].q; + struct iwl_tid_data *tid_data = &trans->shrd->tid_data[sta_id][tid]; + + lockdep_assert_held(&trans->shrd->sta_lock); + + switch (trans->shrd->tid_data[sta_id][tid].agg.state) { + case IWL_EMPTYING_HW_QUEUE_DELBA: + /* We are reclaiming the last packet of the */ + /* aggregated HW queue */ + if ((txq_id == tid_data->agg.txq_id) && + (q->read_ptr == q->write_ptr)) { + IWL_DEBUG_HT(trans, + "HW queue empty: continue DELBA flow\n"); + iwl_trans_pcie_txq_agg_disable(trans, txq_id); + tid_data->agg.state = IWL_AGG_OFF; + iwl_stop_tx_ba_trans_ready(priv(trans), + NUM_IWL_RXON_CTX, + sta_id, tid); + iwl_wake_queue(trans, &trans_pcie->txq[txq_id]); + } + break; + case IWL_EMPTYING_HW_QUEUE_ADDBA: + /* We are reclaiming the last packet of the queue */ + if (tid_data->tfds_in_queue == 0) { + IWL_DEBUG_HT(trans, + "HW queue empty: continue ADDBA flow\n"); + tid_data->agg.state = IWL_AGG_ON; + iwl_start_tx_ba_trans_ready(priv(trans), + NUM_IWL_RXON_CTX, + sta_id, tid); + } + break; + default: + break; + } + + return 0; +} + +static void iwl_free_tfds_in_queue(struct iwl_trans *trans, + int sta_id, int tid, int freed) +{ + lockdep_assert_held(&trans->shrd->sta_lock); + + if (trans->shrd->tid_data[sta_id][tid].tfds_in_queue >= freed) + trans->shrd->tid_data[sta_id][tid].tfds_in_queue -= freed; + else { + IWL_DEBUG_TX(trans, "free more than tfds_in_queue (%u:%d)\n", + trans->shrd->tid_data[sta_id][tid].tfds_in_queue, + freed); + trans->shrd->tid_data[sta_id][tid].tfds_in_queue = 0; + } +} + +static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid, int txq_id, int ssn, u32 status, struct sk_buff_head *skbs) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; + enum iwl_agg_state agg_state; /* n_bd is usually 256 => n_bd - 1 = 0xff */ int tfd_num = ssn & (txq->q.n_bd - 1); int freed = 0; + bool cond; txq->time_stamp = jiffies; - if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE && - txq_id != trans_pcie->agg_txq[sta_id][tid])) { - /* - * FIXME: this is a uCode bug which need to be addressed, - * log the information and return for now. - * Since it is can possibly happen very often and in order - * not to fill the syslog, don't use IWL_ERR or IWL_WARN - */ - IWL_DEBUG_TX_QUEUES(trans, "Bad queue mapping txq_id %d, " - "agg_txq[sta_id[tid] %d", txq_id, - trans_pcie->agg_txq[sta_id][tid]); - return 1; + if (txq->sched_retry) { + agg_state = + trans->shrd->tid_data[txq->sta_id][txq->tid].agg.state; + cond = (agg_state != IWL_EMPTYING_HW_QUEUE_DELBA); + } else { + cond = (status != TX_STATUS_FAIL_PASSIVE_NO_RX); } if (txq->q.read_ptr != tfd_num) { - IWL_DEBUG_TX_REPLY(trans, "[Q %d | AC %d] %d -> %d (%d)\n", - txq_id, iwl_get_queue_ac(txq), txq->q.read_ptr, - tfd_num, ssn); + IWL_DEBUG_TX_REPLY(trans, "Retry scheduler reclaim " + "scd_ssn=%d idx=%d txq=%d swq=%d\n", + ssn , tfd_num, txq_id, txq->swq_id); freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs); - if (iwl_queue_space(&txq->q) > txq->q.low_mark && - (!txq->sched_retry || - status != TX_STATUS_FAIL_PASSIVE_NO_RX)) - iwl_wake_queue(trans, txq, "Packets reclaimed"); + if (iwl_queue_space(&txq->q) > txq->q.low_mark && cond) + iwl_wake_queue(trans, txq); } - return 0; + + iwl_free_tfds_in_queue(trans, sta_id, tid, freed); + iwlagn_txq_check_empty(trans, sta_id, tid, txq_id); } static void iwl_trans_pcie_free(struct iwl_trans *trans) { - iwl_calib_free_results(trans); iwl_trans_pcie_tx_free(trans); iwl_trans_pcie_rx_free(trans); free_irq(bus(trans)->irq, trans); @@ -1346,8 +1417,7 @@ static int iwl_trans_pcie_resume(struct iwl_trans *trans) #endif /* CONFIG_PM_SLEEP */ static void iwl_trans_pcie_wake_any_queue(struct iwl_trans *trans, - enum iwl_rxon_context_id ctx, - const char *msg) + enum iwl_rxon_context_id ctx) { u8 ac, txq_id; struct iwl_trans_pcie *trans_pcie = @@ -1355,11 +1425,11 @@ static void iwl_trans_pcie_wake_any_queue(struct iwl_trans *trans, for (ac = 0; ac < AC_NUM; ac++) { txq_id = trans_pcie->ac_to_queue[ctx][ac]; - IWL_DEBUG_TX_QUEUES(trans, "Queue Status: Q[%d] %s\n", + IWL_DEBUG_INFO(trans, "Queue Status: Q[%d] %s\n", ac, (atomic_read(&trans_pcie->queue_stop_count[ac]) > 0) ? "stopped" : "awake"); - iwl_wake_queue(trans, &trans_pcie->txq[txq_id], msg); + iwl_wake_queue(trans, &trans_pcie->txq[txq_id]); } } @@ -1382,12 +1452,11 @@ static struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd) return iwl_trans; } -static void iwl_trans_pcie_stop_queue(struct iwl_trans *trans, int txq_id, - const char *msg) +static void iwl_trans_pcie_stop_queue(struct iwl_trans *trans, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - iwl_stop_queue(trans, &trans_pcie->txq[txq_id], msg); + iwl_stop_queue(trans, &trans_pcie->txq[txq_id]); } #define IWL_FLUSH_WAIT_MS 2000 @@ -1442,12 +1511,8 @@ static int iwl_trans_pcie_check_stuck_queue(struct iwl_trans *trans, int cnt) if (time_after(jiffies, timeout)) { IWL_ERR(trans, "Queue %d stuck for %u ms.\n", q->id, hw_params(trans).wd_timeout); - IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n", + IWL_ERR(trans, "Current read_ptr %d write_ptr %d\n", q->read_ptr, q->write_ptr); - IWL_ERR(trans, "Current HW read_ptr %d write_ptr %d\n", - iwl_read_prph(bus(trans), SCD_QUEUE_RDPTR(cnt)) - & (TFD_QUEUE_SIZE_MAX - 1), - iwl_read_prph(bus(trans), SCD_QUEUE_WRPTR(cnt))); return 1; } diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-trans.h b/trunk/drivers/net/wireless/iwlwifi/iwl-trans.h index e6bf3f554772..c5923125c3f9 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-trans.h +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-trans.h @@ -171,31 +171,32 @@ struct iwl_trans_ops { void (*tx_start)(struct iwl_trans *trans); void (*wake_any_queue)(struct iwl_trans *trans, - enum iwl_rxon_context_id ctx, - const char *msg); + enum iwl_rxon_context_id ctx); int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd); int (*tx)(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx, - u8 sta_id, u8 tid); - int (*reclaim)(struct iwl_trans *trans, int sta_id, int tid, + u8 sta_id); + void (*reclaim)(struct iwl_trans *trans, int sta_id, int tid, int txq_id, int ssn, u32 status, struct sk_buff_head *skbs); int (*tx_agg_disable)(struct iwl_trans *trans, - int sta_id, int tid); + enum iwl_rxon_context_id ctx, int sta_id, + int tid); int (*tx_agg_alloc)(struct iwl_trans *trans, - int sta_id, int tid); + enum iwl_rxon_context_id ctx, int sta_id, int tid, + u16 *ssn); void (*tx_agg_setup)(struct iwl_trans *trans, enum iwl_rxon_context_id ctx, int sta_id, int tid, - int frame_limit, u16 ssn); + int frame_limit); void (*kick_nic)(struct iwl_trans *trans); void (*free)(struct iwl_trans *trans); - void (*stop_queue)(struct iwl_trans *trans, int q, const char *msg); + void (*stop_queue)(struct iwl_trans *trans, int q); int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir); int (*check_stuck_queue)(struct iwl_trans *trans, int q); @@ -206,54 +207,17 @@ struct iwl_trans_ops { #endif }; -/* one for each uCode image (inst/data, boot/init/runtime) */ -struct fw_desc { - dma_addr_t p_addr; /* hardware address */ - void *v_addr; /* software address */ - u32 len; /* size in bytes */ -}; - -struct fw_img { - struct fw_desc code; /* firmware code image */ - struct fw_desc data; /* firmware data image */ -}; - -/* Opaque calibration results */ -struct iwl_calib_result { - struct list_head list; - size_t cmd_len; - struct iwl_calib_hdr hdr; - /* data follows */ -}; - /** * struct iwl_trans - transport common data * @ops - pointer to iwl_trans_ops * @shrd - pointer to iwl_shared which holds shared data from the upper layer * @hcmd_lock: protects HCMD - * @ucode_write_complete: indicates that the ucode has been copied. - * @ucode_rt: run time ucode image - * @ucode_init: init ucode image - * @ucode_wowlan: wake on wireless ucode image (optional) - * @nvm_device_type: indicates OTP or eeprom - * @calib_results: list head for init calibration results */ struct iwl_trans { const struct iwl_trans_ops *ops; struct iwl_shared *shrd; spinlock_t hcmd_lock; - u8 ucode_write_complete; /* the image write is complete */ - struct fw_img ucode_rt; - struct fw_img ucode_init; - struct fw_img ucode_wowlan; - - /* eeprom related variables */ - int nvm_device_type; - - /* init calibration results */ - struct list_head calib_results; - /* pointer to trans specific struct */ /*Ensure that this pointer will always be aligned to sizeof pointer */ char trans_specific[0] __attribute__((__aligned__(sizeof(void *)))); @@ -285,10 +249,9 @@ static inline void iwl_trans_tx_start(struct iwl_trans *trans) } static inline void iwl_trans_wake_any_queue(struct iwl_trans *trans, - enum iwl_rxon_context_id ctx, - const char *msg) + enum iwl_rxon_context_id ctx) { - trans->ops->wake_any_queue(trans, ctx, msg); + trans->ops->wake_any_queue(trans, ctx); } @@ -303,38 +266,39 @@ int iwl_trans_send_cmd_pdu(struct iwl_trans *trans, u8 id, static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx, - u8 sta_id, u8 tid) + u8 sta_id) { - return trans->ops->tx(trans, skb, dev_cmd, ctx, sta_id, tid); + return trans->ops->tx(trans, skb, dev_cmd, ctx, sta_id); } -static inline int iwl_trans_reclaim(struct iwl_trans *trans, int sta_id, +static inline void iwl_trans_reclaim(struct iwl_trans *trans, int sta_id, int tid, int txq_id, int ssn, u32 status, struct sk_buff_head *skbs) { - return trans->ops->reclaim(trans, sta_id, tid, txq_id, ssn, - status, skbs); + trans->ops->reclaim(trans, sta_id, tid, txq_id, ssn, status, skbs); } static inline int iwl_trans_tx_agg_disable(struct iwl_trans *trans, + enum iwl_rxon_context_id ctx, int sta_id, int tid) { - return trans->ops->tx_agg_disable(trans, sta_id, tid); + return trans->ops->tx_agg_disable(trans, ctx, sta_id, tid); } static inline int iwl_trans_tx_agg_alloc(struct iwl_trans *trans, - int sta_id, int tid) + enum iwl_rxon_context_id ctx, + int sta_id, int tid, u16 *ssn) { - return trans->ops->tx_agg_alloc(trans, sta_id, tid); + return trans->ops->tx_agg_alloc(trans, ctx, sta_id, tid, ssn); } static inline void iwl_trans_tx_agg_setup(struct iwl_trans *trans, enum iwl_rxon_context_id ctx, int sta_id, int tid, - int frame_limit, u16 ssn) + int frame_limit) { - trans->ops->tx_agg_setup(trans, ctx, sta_id, tid, frame_limit, ssn); + trans->ops->tx_agg_setup(trans, ctx, sta_id, tid, frame_limit); } static inline void iwl_trans_kick_nic(struct iwl_trans *trans) @@ -347,10 +311,9 @@ static inline void iwl_trans_free(struct iwl_trans *trans) trans->ops->free(trans); } -static inline void iwl_trans_stop_queue(struct iwl_trans *trans, int q, - const char *msg) +static inline void iwl_trans_stop_queue(struct iwl_trans *trans, int q) { - trans->ops->stop_queue(trans, q, msg); + trans->ops->stop_queue(trans, q); } static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans) @@ -385,13 +348,4 @@ static inline int iwl_trans_resume(struct iwl_trans *trans) ******************************************************/ extern const struct iwl_trans_ops trans_ops_pcie; -int iwl_alloc_fw_desc(struct iwl_bus *bus, struct fw_desc *desc, - const void *data, size_t len); -void iwl_dealloc_ucode(struct iwl_trans *trans); - -int iwl_send_calib_results(struct iwl_trans *trans); -int iwl_calib_set(struct iwl_trans *trans, - const struct iwl_calib_hdr *cmd, int len); -void iwl_calib_free_results(struct iwl_trans *trans); - #endif /* __iwl_trans_h__ */ diff --git a/trunk/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/trunk/drivers/net/wireless/iwmc3200wifi/cfg80211.c index 48e8218fd23b..c42be81e979e 100644 --- a/trunk/drivers/net/wireless/iwmc3200wifi/cfg80211.c +++ b/trunk/drivers/net/wireless/iwmc3200wifi/cfg80211.c @@ -165,15 +165,11 @@ static int iwm_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, struct key_params *params) { struct iwm_priv *iwm = ndev_to_iwm(ndev); - struct iwm_key *key; + struct iwm_key *key = &iwm->keys[key_index]; int ret; IWM_DBG_WEXT(iwm, DBG, "Adding key for %pM\n", mac_addr); - if (key_index >= IWM_NUM_KEYS) - return -ENOENT; - - key = &iwm->keys[key_index]; memset(key, 0, sizeof(struct iwm_key)); ret = iwm_key_init(key, key_index, mac_addr, params); if (ret < 0) { @@ -218,12 +214,8 @@ static int iwm_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_index, bool pairwise, const u8 *mac_addr) { struct iwm_priv *iwm = ndev_to_iwm(ndev); - struct iwm_key *key; + struct iwm_key *key = &iwm->keys[key_index]; - if (key_index >= IWM_NUM_KEYS) - return -ENOENT; - - key = &iwm->keys[key_index]; if (!iwm->keys[key_index].key_len) { IWM_DBG_WEXT(iwm, DBG, "Key %d not used\n", key_index); return 0; @@ -244,9 +236,6 @@ static int iwm_cfg80211_set_default_key(struct wiphy *wiphy, IWM_DBG_WEXT(iwm, DBG, "Default key index is: %d\n", key_index); - if (key_index >= IWM_NUM_KEYS) - return -ENOENT; - if (!iwm->keys[key_index].key_len) { IWM_ERR(iwm, "Key %d not used\n", key_index); return -EINVAL; diff --git a/trunk/drivers/net/wireless/iwmc3200wifi/main.c b/trunk/drivers/net/wireless/iwmc3200wifi/main.c index 1f868b166d10..98a179f98ea1 100644 --- a/trunk/drivers/net/wireless/iwmc3200wifi/main.c +++ b/trunk/drivers/net/wireless/iwmc3200wifi/main.c @@ -91,11 +91,11 @@ static struct iwm_conf def_iwm_conf = { .mac_addr = {0x00, 0x02, 0xb3, 0x01, 0x02, 0x03}, }; -static bool modparam_reset; +static int modparam_reset; module_param_named(reset, modparam_reset, bool, 0644); MODULE_PARM_DESC(reset, "reset on firmware errors (default 0 [not reset])"); -static bool modparam_wimax_enable = true; +static int modparam_wimax_enable = 1; module_param_named(wimax_enable, modparam_wimax_enable, bool, 0644); MODULE_PARM_DESC(wimax_enable, "Enable wimax core (default 1 [wimax enabled])"); @@ -130,7 +130,7 @@ static void iwm_disconnect_work(struct work_struct *work) iwm_invalidate_mlme_profile(iwm); clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status); - iwm->umac_profile_active = false; + iwm->umac_profile_active = 0; memset(iwm->bssid, 0, ETH_ALEN); iwm->channel = 0; diff --git a/trunk/drivers/net/wireless/iwmc3200wifi/rx.c b/trunk/drivers/net/wireless/iwmc3200wifi/rx.c index 7d708f4395f3..a414768f40f1 100644 --- a/trunk/drivers/net/wireless/iwmc3200wifi/rx.c +++ b/trunk/drivers/net/wireless/iwmc3200wifi/rx.c @@ -660,7 +660,7 @@ static int iwm_mlme_profile_invalidate(struct iwm_priv *iwm, u8 *buf, clear_bit(IWM_STATUS_SME_CONNECTING, &iwm->status); clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status); - iwm->umac_profile_active = false; + iwm->umac_profile_active = 0; memset(iwm->bssid, 0, ETH_ALEN); iwm->channel = 0; @@ -735,7 +735,7 @@ static int iwm_mlme_update_sta_table(struct iwm_priv *iwm, u8 *buf, umac_sta->mac_addr, umac_sta->flags & UMAC_STA_FLAG_QOS); - sta->valid = true; + sta->valid = 1; sta->qos = umac_sta->flags & UMAC_STA_FLAG_QOS; sta->color = GET_VAL8(umac_sta->sta_id, LMAC_STA_COLOR); memcpy(sta->addr, umac_sta->mac_addr, ETH_ALEN); @@ -750,12 +750,12 @@ static int iwm_mlme_update_sta_table(struct iwm_priv *iwm, u8 *buf, sta = &iwm->sta_table[GET_VAL8(umac_sta->sta_id, LMAC_STA_ID)]; if (!memcmp(sta->addr, umac_sta->mac_addr, ETH_ALEN)) - sta->valid = false; + sta->valid = 0; break; case UMAC_OPCODE_CLEAR_ALL: for (i = 0; i < IWM_STA_TABLE_NUM; i++) - iwm->sta_table[i].valid = false; + iwm->sta_table[i].valid = 0; break; default: @@ -1203,7 +1203,7 @@ static int iwm_ntf_wifi_if_wrapper(struct iwm_priv *iwm, u8 *buf, switch (hdr->oid) { case UMAC_WIFI_IF_CMD_SET_PROFILE: - iwm->umac_profile_active = true; + iwm->umac_profile_active = 1; break; default: break; @@ -1363,7 +1363,7 @@ static int iwm_rx_handle_nonwifi(struct iwm_priv *iwm, u8 *buf, */ list_for_each_entry(cmd, &iwm->nonwifi_pending_cmd, pending) if (cmd->seq_num == seq_num) { - cmd->resp_received = true; + cmd->resp_received = 1; cmd->buf.len = buf_size; memcpy(cmd->buf.hdr, buf, buf_size); wake_up_interruptible(&iwm->nonwifi_queue); diff --git a/trunk/drivers/net/wireless/libertas/cfg.c b/trunk/drivers/net/wireless/libertas/cfg.c index a7cd311cb1b7..a7f1ab28940d 100644 --- a/trunk/drivers/net/wireless/libertas/cfg.c +++ b/trunk/drivers/net/wireless/libertas/cfg.c @@ -485,7 +485,6 @@ static int lbs_cfg_set_channel(struct wiphy *wiphy, static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy, struct cmd_header *resp) { - struct cfg80211_bss *bss; struct cmd_ds_802_11_scan_rsp *scanresp = (void *)resp; int bsssize; const u8 *pos; @@ -633,14 +632,12 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy, LBS_SCAN_RSSI_TO_MBM(rssi)/100); if (channel && - !(channel->flags & IEEE80211_CHAN_DISABLED)) { - bss = cfg80211_inform_bss(wiphy, channel, + !(channel->flags & IEEE80211_CHAN_DISABLED)) + cfg80211_inform_bss(wiphy, channel, bssid, get_unaligned_le64(tsfdesc), capa, intvl, ie, ielen, LBS_SCAN_RSSI_TO_MBM(rssi), GFP_KERNEL); - cfg80211_put_bss(bss); - } } else lbs_deb_scan("scan response: missing BSS channel IE\n"); @@ -731,11 +728,9 @@ static void lbs_scan_worker(struct work_struct *work) le16_to_cpu(scan_cmd->hdr.size), lbs_ret_scan, 0); - if (priv->scan_channel >= priv->scan_req->n_channels) { + if (priv->scan_channel >= priv->scan_req->n_channels) /* Mark scan done */ - cancel_delayed_work(&priv->scan_work); lbs_scan_done(priv); - } /* Restart network */ if (carrier) @@ -764,12 +759,12 @@ static void _internal_start_scan(struct lbs_private *priv, bool internal, request->n_ssids, request->n_channels, request->ie_len); priv->scan_channel = 0; - priv->scan_req = request; - priv->internal_scan = internal; - queue_delayed_work(priv->work_thread, &priv->scan_work, msecs_to_jiffies(50)); + priv->scan_req = request; + priv->internal_scan = internal; + lbs_deb_leave(LBS_DEB_CFG80211); } @@ -1725,7 +1720,6 @@ static void lbs_join_post(struct lbs_private *priv, 2 + 2 + /* atim */ 2 + 8]; /* extended rates */ u8 *fake = fake_ie; - struct cfg80211_bss *bss; lbs_deb_enter(LBS_DEB_CFG80211); @@ -1769,15 +1763,14 @@ static void lbs_join_post(struct lbs_private *priv, *fake++ = 0x6c; lbs_deb_hex(LBS_DEB_CFG80211, "IE", fake_ie, fake - fake_ie); - bss = cfg80211_inform_bss(priv->wdev->wiphy, - params->channel, - bssid, - 0, - capability, - params->beacon_interval, - fake_ie, fake - fake_ie, - 0, GFP_KERNEL); - cfg80211_put_bss(bss); + cfg80211_inform_bss(priv->wdev->wiphy, + params->channel, + bssid, + 0, + capability, + params->beacon_interval, + fake_ie, fake - fake_ie, + 0, GFP_KERNEL); memcpy(priv->wdev->ssid, params->ssid, params->ssid_len); priv->wdev->ssid_len = params->ssid_len; diff --git a/trunk/drivers/net/wireless/libertas/ethtool.c b/trunk/drivers/net/wireless/libertas/ethtool.c index f955b2d66ed6..885ddc1c4fed 100644 --- a/trunk/drivers/net/wireless/libertas/ethtool.c +++ b/trunk/drivers/net/wireless/libertas/ethtool.c @@ -13,14 +13,13 @@ static void lbs_ethtool_get_drvinfo(struct net_device *dev, { struct lbs_private *priv = dev->ml_priv; - snprintf(info->fw_version, sizeof(info->fw_version), - "%u.%u.%u.p%u", + snprintf(info->fw_version, 32, "%u.%u.%u.p%u", priv->fwrelease >> 24 & 0xff, priv->fwrelease >> 16 & 0xff, priv->fwrelease >> 8 & 0xff, priv->fwrelease & 0xff); - strlcpy(info->driver, "libertas", sizeof(info->driver)); - strlcpy(info->version, lbs_driver_version, sizeof(info->version)); + strcpy(info->driver, "libertas"); + strcpy(info->version, lbs_driver_version); } /* diff --git a/trunk/drivers/net/wireless/libertas/if_cs.c b/trunk/drivers/net/wireless/libertas/if_cs.c index 3f7bf4d912b6..e26935179861 100644 --- a/trunk/drivers/net/wireless/libertas/if_cs.c +++ b/trunk/drivers/net/wireless/libertas/if_cs.c @@ -859,7 +859,7 @@ static int if_cs_probe(struct pcmcia_device *p_dev) * Most of the libertas cards can do unaligned register access, but some * weird ones cannot. That's especially true for the CF8305 card. */ - card->align_regs = false; + card->align_regs = 0; card->model = get_model(p_dev->manf_id, p_dev->card_id); if (card->model == MODEL_UNKNOWN) { @@ -871,7 +871,7 @@ static int if_cs_probe(struct pcmcia_device *p_dev) /* Check if we have a current silicon */ prod_id = if_cs_read8(card, IF_CS_PRODUCT_ID); if (card->model == MODEL_8305) { - card->align_regs = true; + card->align_regs = 1; if (prod_id < IF_CS_CF8305_B1_REV) { pr_err("8305 rev B0 and older are not supported\n"); ret = -ENODEV; diff --git a/trunk/drivers/net/wireless/libertas/if_spi.c b/trunk/drivers/net/wireless/libertas/if_spi.c index 50b1ee7721e9..728baa445259 100644 --- a/trunk/drivers/net/wireless/libertas/if_spi.c +++ b/trunk/drivers/net/wireless/libertas/if_spi.c @@ -1291,6 +1291,7 @@ static struct spi_driver libertas_spi_driver = { .remove = __devexit_p(libertas_spi_remove), .driver = { .name = "libertas_spi", + .bus = &spi_bus_type, .owner = THIS_MODULE, .pm = &if_spi_pm_ops, }, diff --git a/trunk/drivers/net/wireless/libertas_tf/main.c b/trunk/drivers/net/wireless/libertas_tf/main.c index a03457292c88..ceb51b6e6702 100644 --- a/trunk/drivers/net/wireless/libertas_tf/main.c +++ b/trunk/drivers/net/wireless/libertas_tf/main.c @@ -719,11 +719,11 @@ void lbtf_bcn_sent(struct lbtf_private *priv) return; if (skb_queue_empty(&priv->bc_ps_buf)) { - bool tx_buff_bc = false; + bool tx_buff_bc = 0; while ((skb = ieee80211_get_buffered_bc(priv->hw, priv->vif))) { skb_queue_tail(&priv->bc_ps_buf, skb); - tx_buff_bc = true; + tx_buff_bc = 1; } if (tx_buff_bc) { ieee80211_stop_queues(priv->hw); diff --git a/trunk/drivers/net/wireless/mac80211_hwsim.c b/trunk/drivers/net/wireless/mac80211_hwsim.c index 4b9e730d2c8a..523ad55a2885 100644 --- a/trunk/drivers/net/wireless/mac80211_hwsim.c +++ b/trunk/drivers/net/wireless/mac80211_hwsim.c @@ -37,8 +37,7 @@ MODULE_AUTHOR("Jouni Malinen"); MODULE_DESCRIPTION("Software simulator of 802.11 radio(s) for mac80211"); MODULE_LICENSE("GPL"); -static u32 wmediumd_pid; - +int wmediumd_pid; static int radios = 2; module_param(radios, int, 0444); MODULE_PARM_DESC(radios, "Number of simulated radios"); @@ -666,7 +665,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { bool ack; struct ieee80211_tx_info *txi; - u32 _pid; + int _pid; mac80211_hwsim_monitor_rx(hw, skb); @@ -677,7 +676,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb) } /* wmediumd mode check */ - _pid = ACCESS_ONCE(wmediumd_pid); + _pid = wmediumd_pid; if (_pid) return mac80211_hwsim_tx_frame_nl(hw, skb, _pid); @@ -708,7 +707,7 @@ static int mac80211_hwsim_start(struct ieee80211_hw *hw) { struct mac80211_hwsim_data *data = hw->priv; wiphy_debug(hw->wiphy, "%s\n", __func__); - data->started = true; + data->started = 1; return 0; } @@ -716,7 +715,7 @@ static int mac80211_hwsim_start(struct ieee80211_hw *hw) static void mac80211_hwsim_stop(struct ieee80211_hw *hw) { struct mac80211_hwsim_data *data = hw->priv; - data->started = false; + data->started = 0; del_timer(&data->beacon_timer); wiphy_debug(hw->wiphy, "%s\n", __func__); } @@ -765,7 +764,7 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac, struct ieee80211_hw *hw = arg; struct sk_buff *skb; struct ieee80211_tx_info *info; - u32 _pid; + int _pid; hwsim_check_magic(vif); @@ -782,7 +781,7 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac, mac80211_hwsim_monitor_rx(hw, skb); /* wmediumd mode check */ - _pid = ACCESS_ONCE(wmediumd_pid); + _pid = wmediumd_pid; if (_pid) return mac80211_hwsim_tx_frame_nl(hw, skb, _pid); @@ -1255,7 +1254,7 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif) struct hwsim_vif_priv *vp = (void *)vif->drv_priv; struct sk_buff *skb; struct ieee80211_pspoll *pspoll; - u32 _pid; + int _pid; if (!vp->assoc) return; @@ -1276,7 +1275,7 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif) memcpy(pspoll->ta, mac, ETH_ALEN); /* wmediumd mode check */ - _pid = ACCESS_ONCE(wmediumd_pid); + _pid = wmediumd_pid; if (_pid) return mac80211_hwsim_tx_frame_nl(data->hw, skb, _pid); @@ -1293,7 +1292,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac, struct hwsim_vif_priv *vp = (void *)vif->drv_priv; struct sk_buff *skb; struct ieee80211_hdr *hdr; - u32 _pid; + int _pid; if (!vp->assoc) return; @@ -1315,7 +1314,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac, memcpy(hdr->addr3, vp->bssid, ETH_ALEN); /* wmediumd mode check */ - _pid = ACCESS_ONCE(wmediumd_pid); + _pid = wmediumd_pid; if (_pid) return mac80211_hwsim_tx_frame_nl(data->hw, skb, _pid); @@ -1635,6 +1634,8 @@ static int hwsim_init_netlink(void) int rc; printk(KERN_INFO "mac80211_hwsim: initializing netlink\n"); + wmediumd_pid = 0; + rc = genl_register_family_with_ops(&hwsim_genl_family, hwsim_ops, ARRAY_SIZE(hwsim_ops)); if (rc) @@ -1747,8 +1748,6 @@ static int __init init_mac80211_hwsim(void) IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | IEEE80211_HW_AMPDU_AGGREGATION; - hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; - /* ask mac80211 to reserve space for magic */ hw->vif_data_size = sizeof(struct hwsim_vif_priv); hw->sta_data_size = sizeof(struct hwsim_sta_priv); diff --git a/trunk/drivers/net/wireless/mwifiex/11n_rxreorder.c b/trunk/drivers/net/wireless/mwifiex/11n_rxreorder.c index 681d3f2a4c28..7aa9aa0ac958 100644 --- a/trunk/drivers/net/wireless/mwifiex/11n_rxreorder.c +++ b/trunk/drivers/net/wireless/mwifiex/11n_rxreorder.c @@ -33,7 +33,7 @@ * Since the buffer is linear, the function uses rotation to simulate * circular buffer. */ -static void +static int mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv, struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr, int start_win) @@ -71,6 +71,8 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv, rx_reor_tbl_ptr->start_win = start_win; spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); + + return 0; } /* @@ -81,7 +83,7 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv, * Since the buffer is linear, the function uses rotation to simulate * circular buffer. */ -static void +static int mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv, struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr) { @@ -117,6 +119,7 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv, rx_reor_tbl_ptr->start_win = (rx_reor_tbl_ptr->start_win + i) &(MAX_TID_VALUE - 1); spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); + return 0; } /* @@ -402,7 +405,7 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv, u8 *ta, u8 pkt_type, void *payload) { struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr; - int start_win, end_win, win_size; + int start_win, end_win, win_size, ret; u16 pkt_index; rx_reor_tbl_ptr = @@ -449,8 +452,11 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv, start_win = (end_win - win_size) + 1; else start_win = (MAX_TID_VALUE - (win_size - seq_num)) + 1; - mwifiex_11n_dispatch_pkt_until_start_win(priv, + ret = mwifiex_11n_dispatch_pkt_until_start_win(priv, rx_reor_tbl_ptr, start_win); + + if (ret) + return ret; } if (pkt_type != PKT_TYPE_BAR) { @@ -469,9 +475,9 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv, * Dispatch all packets sequentially from start_win until a * hole is found and adjust the start_win appropriately */ - mwifiex_11n_scan_and_dispatch(priv, rx_reor_tbl_ptr); + ret = mwifiex_11n_scan_and_dispatch(priv, rx_reor_tbl_ptr); - return 0; + return ret; } /* diff --git a/trunk/drivers/net/wireless/mwifiex/Kconfig b/trunk/drivers/net/wireless/mwifiex/Kconfig index 2a078cea830a..8f2797aa0c60 100644 --- a/trunk/drivers/net/wireless/mwifiex/Kconfig +++ b/trunk/drivers/net/wireless/mwifiex/Kconfig @@ -10,12 +10,12 @@ config MWIFIEX mwifiex. config MWIFIEX_SDIO - tristate "Marvell WiFi-Ex Driver for SD8787/SD8797" + tristate "Marvell WiFi-Ex Driver for SD8787" depends on MWIFIEX && MMC select FW_LOADER ---help--- This adds support for wireless adapters based on Marvell - 8787/8797 chipsets with SDIO interface. + 8787 chipset with SDIO interface. If you choose to build it as a module, it will be called mwifiex_sdio. diff --git a/trunk/drivers/net/wireless/mwifiex/cfg80211.c b/trunk/drivers/net/wireless/mwifiex/cfg80211.c index c3b6c4652cd6..462c71067bfb 100644 --- a/trunk/drivers/net/wireless/mwifiex/cfg80211.c +++ b/trunk/drivers/net/wireless/mwifiex/cfg80211.c @@ -24,26 +24,50 @@ * This function maps the nl802.11 channel type into driver channel type. * * The mapping is as follows - - * NL80211_CHAN_NO_HT -> IEEE80211_HT_PARAM_CHA_SEC_NONE - * NL80211_CHAN_HT20 -> IEEE80211_HT_PARAM_CHA_SEC_NONE - * NL80211_CHAN_HT40PLUS -> IEEE80211_HT_PARAM_CHA_SEC_ABOVE - * NL80211_CHAN_HT40MINUS -> IEEE80211_HT_PARAM_CHA_SEC_BELOW - * Others -> IEEE80211_HT_PARAM_CHA_SEC_NONE + * NL80211_CHAN_NO_HT -> NO_SEC_CHANNEL + * NL80211_CHAN_HT20 -> NO_SEC_CHANNEL + * NL80211_CHAN_HT40PLUS -> SEC_CHANNEL_ABOVE + * NL80211_CHAN_HT40MINUS -> SEC_CHANNEL_BELOW + * Others -> NO_SEC_CHANNEL */ -static u8 -mwifiex_cfg80211_channel_type_to_sec_chan_offset(enum nl80211_channel_type - channel_type) +static int +mwifiex_cfg80211_channel_type_to_mwifiex_channels(enum nl80211_channel_type + channel_type) { switch (channel_type) { case NL80211_CHAN_NO_HT: case NL80211_CHAN_HT20: - return IEEE80211_HT_PARAM_CHA_SEC_NONE; + return NO_SEC_CHANNEL; case NL80211_CHAN_HT40PLUS: - return IEEE80211_HT_PARAM_CHA_SEC_ABOVE; + return SEC_CHANNEL_ABOVE; case NL80211_CHAN_HT40MINUS: - return IEEE80211_HT_PARAM_CHA_SEC_BELOW; + return SEC_CHANNEL_BELOW; + default: + return NO_SEC_CHANNEL; + } +} + +/* + * This function maps the driver channel type into nl802.11 channel type. + * + * The mapping is as follows - + * NO_SEC_CHANNEL -> NL80211_CHAN_HT20 + * SEC_CHANNEL_ABOVE -> NL80211_CHAN_HT40PLUS + * SEC_CHANNEL_BELOW -> NL80211_CHAN_HT40MINUS + * Others -> NL80211_CHAN_HT20 + */ +static enum nl80211_channel_type +mwifiex_channels_to_cfg80211_channel_type(int channel_type) +{ + switch (channel_type) { + case NO_SEC_CHANNEL: + return NL80211_CHAN_HT20; + case SEC_CHANNEL_ABOVE: + return NL80211_CHAN_HT40PLUS; + case SEC_CHANNEL_BELOW: + return NL80211_CHAN_HT40MINUS; default: - return IEEE80211_HT_PARAM_CHA_SEC_NONE; + return NL80211_CHAN_HT20; } } @@ -96,11 +120,10 @@ mwifiex_cfg80211_del_key(struct wiphy *wiphy, struct net_device *netdev, static int mwifiex_cfg80211_set_tx_power(struct wiphy *wiphy, enum nl80211_tx_power_setting type, - int mbm) + int dbm) { struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy); struct mwifiex_power_cfg power_cfg; - int dbm = MBM_TO_DBM(mbm); if (type == NL80211_TX_POWER_FIXED) { power_cfg.is_power_auto = 0; @@ -307,51 +330,37 @@ mwifiex_set_rf_channel(struct mwifiex_private *priv, enum nl80211_channel_type channel_type) { struct mwifiex_chan_freq_power cfp; + struct mwifiex_ds_band_cfg band_cfg; u32 config_bands = 0; struct wiphy *wiphy = priv->wdev->wiphy; - struct mwifiex_adapter *adapter = priv->adapter; if (chan) { + memset(&band_cfg, 0, sizeof(band_cfg)); /* Set appropriate bands */ - if (chan->band == IEEE80211_BAND_2GHZ) { - if (channel_type == NL80211_CHAN_NO_HT) - if (priv->adapter->config_bands == BAND_B || - priv->adapter->config_bands == BAND_G) - config_bands = - priv->adapter->config_bands; - else - config_bands = BAND_B | BAND_G; - else - config_bands = BAND_B | BAND_G | BAND_GN; - } else { - if (channel_type == NL80211_CHAN_NO_HT) - config_bands = BAND_A; - else - config_bands = BAND_AN | BAND_A; + if (chan->band == IEEE80211_BAND_2GHZ) + config_bands = BAND_B | BAND_G | BAND_GN; + else + config_bands = BAND_AN | BAND_A; + if (priv->bss_mode == NL80211_IFTYPE_STATION + || priv->bss_mode == NL80211_IFTYPE_UNSPECIFIED) { + band_cfg.config_bands = config_bands; + } else if (priv->bss_mode == NL80211_IFTYPE_ADHOC) { + band_cfg.config_bands = config_bands; + band_cfg.adhoc_start_band = config_bands; } - if (!((config_bands | adapter->fw_bands) & - ~adapter->fw_bands)) { - adapter->config_bands = config_bands; - if (priv->bss_mode == NL80211_IFTYPE_ADHOC) { - adapter->adhoc_start_band = config_bands; - if ((config_bands & BAND_GN) || - (config_bands & BAND_AN)) - adapter->adhoc_11n_enabled = true; - else - adapter->adhoc_11n_enabled = false; - } - } - adapter->sec_chan_offset = - mwifiex_cfg80211_channel_type_to_sec_chan_offset + band_cfg.sec_chan_offset = + mwifiex_cfg80211_channel_type_to_mwifiex_channels (channel_type); - adapter->channel_type = channel_type; + + if (mwifiex_set_radio_band_cfg(priv, &band_cfg)) + return -EFAULT; mwifiex_send_domain_info_cmd_fw(wiphy); } wiphy_dbg(wiphy, "info: setting band %d, channel offset %d and " - "mode %d\n", config_bands, adapter->sec_chan_offset, + "mode %d\n", config_bands, band_cfg.sec_chan_offset, priv->bss_mode); if (!chan) return 0; @@ -687,9 +696,9 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy, const u8 *peer, const struct cfg80211_bitrate_mask *mask) { + struct mwifiex_ds_band_cfg band_cfg; struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); int index = 0, mode = 0, i; - struct mwifiex_adapter *adapter = priv->adapter; /* Currently only 2.4GHz is supported */ for (i = 0; i < mwifiex_band_2ghz.n_bitrates; i++) { @@ -711,15 +720,16 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy, mode |= BAND_B; } - if (!((mode | adapter->fw_bands) & ~adapter->fw_bands)) { - adapter->config_bands = mode; - if (priv->bss_mode == NL80211_IFTYPE_ADHOC) { - adapter->adhoc_start_band = mode; - adapter->adhoc_11n_enabled = false; - } - } - adapter->sec_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE; - adapter->channel_type = NL80211_CHAN_NO_HT; + memset(&band_cfg, 0, sizeof(band_cfg)); + band_cfg.config_bands = mode; + + if (priv->bss_mode == NL80211_IFTYPE_ADHOC) + band_cfg.adhoc_start_band = mode; + + band_cfg.sec_chan_offset = NO_SEC_CHANNEL; + + if (mwifiex_set_radio_band_cfg(priv, &band_cfg)) + return -EFAULT; wiphy_debug(wiphy, "info: device configured in 802.11%s%s mode\n", (mode & BAND_B) ? "b" : "", @@ -740,13 +750,17 @@ mwifiex_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev, { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); + if (priv->disconnect) + return -EBUSY; + + priv->disconnect = 1; if (mwifiex_deauthenticate(priv, NULL)) return -EFAULT; wiphy_dbg(wiphy, "info: successfully disconnected from %pM:" " reason code %d\n", priv->cfg_bssid, reason_code); - memset(priv->cfg_bssid, 0, ETH_ALEN); + queue_work(priv->workqueue, &priv->cfg_workqueue); return 0; } @@ -766,7 +780,6 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv) { struct ieee80211_channel *chan; struct mwifiex_bss_info bss_info; - struct cfg80211_bss *bss; int ie_len; u8 ie_buf[IEEE80211_MAX_SSID_LEN + sizeof(struct ieee_types_header)]; enum ieee80211_band band; @@ -787,10 +800,9 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv) ieee80211_channel_to_frequency(bss_info.bss_chan, band)); - bss = cfg80211_inform_bss(priv->wdev->wiphy, chan, + cfg80211_inform_bss(priv->wdev->wiphy, chan, bss_info.bssid, 0, WLAN_CAPABILITY_IBSS, 0, ie_buf, ie_len, 0, GFP_KERNEL); - cfg80211_put_bss(bss); memcpy(priv->cfg_bssid, bss_info.bssid, ETH_ALEN); return 0; @@ -839,7 +851,8 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid, if (channel) ret = mwifiex_set_rf_channel(priv, channel, - priv->adapter->channel_type); + mwifiex_channels_to_cfg80211_channel_type + (priv->adapter->chan_offset)); ret = mwifiex_set_encode(priv, NULL, 0, 0, 1); /* Disable keys */ @@ -965,32 +978,27 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); int ret = 0; + if (priv->assoc_request) + return -EBUSY; + if (priv->bss_mode == NL80211_IFTYPE_ADHOC) { wiphy_err(wiphy, "received infra assoc request " "when station is in ibss mode\n"); goto done; } + priv->assoc_request = -EINPROGRESS; + wiphy_dbg(wiphy, "info: Trying to associate to %s and bssid %pM\n", (char *) sme->ssid, sme->bssid); ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid, priv->bss_mode, sme->channel, sme, 0); -done: - if (!ret) { - cfg80211_connect_result(priv->netdev, priv->cfg_bssid, NULL, 0, - NULL, 0, WLAN_STATUS_SUCCESS, - GFP_KERNEL); - dev_dbg(priv->adapter->dev, - "info: associated to bssid %pM successfully\n", - priv->cfg_bssid); - } else { - dev_dbg(priv->adapter->dev, - "info: association to bssid %pM failed\n", - priv->cfg_bssid); - memset(priv->cfg_bssid, 0, ETH_ALEN); - } + priv->assoc_request = 1; +done: + priv->assoc_result = ret; + queue_work(priv->workqueue, &priv->cfg_workqueue); return ret; } @@ -1007,29 +1015,28 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy); int ret = 0; + if (priv->ibss_join_request) + return -EBUSY; + if (priv->bss_mode != NL80211_IFTYPE_ADHOC) { wiphy_err(wiphy, "request to join ibss received " "when station is not in ibss mode\n"); goto done; } + priv->ibss_join_request = -EINPROGRESS; + wiphy_dbg(wiphy, "info: trying to join to %s and bssid %pM\n", (char *) params->ssid, params->bssid); ret = mwifiex_cfg80211_assoc(priv, params->ssid_len, params->ssid, params->bssid, priv->bss_mode, params->channel, NULL, params->privacy); -done: - if (!ret) { - cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid, GFP_KERNEL); - dev_dbg(priv->adapter->dev, - "info: joined/created adhoc network with bssid" - " %pM successfully\n", priv->cfg_bssid); - } else { - dev_dbg(priv->adapter->dev, - "info: failed creating/joining adhoc network\n"); - } + priv->ibss_join_request = 1; +done: + priv->ibss_join_result = ret; + queue_work(priv->workqueue, &priv->cfg_workqueue); return ret; } @@ -1044,12 +1051,17 @@ mwifiex_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev) { struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy); + if (priv->disconnect) + return -EBUSY; + + priv->disconnect = 1; + wiphy_dbg(wiphy, "info: disconnecting from essid %pM\n", priv->cfg_bssid); if (mwifiex_deauthenticate(priv, NULL)) return -EFAULT; - memset(priv->cfg_bssid, 0, ETH_ALEN); + queue_work(priv->workqueue, &priv->cfg_workqueue); return 0; } @@ -1066,42 +1078,15 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_scan_request *request) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); - int i; - struct ieee80211_channel *chan; wiphy_dbg(wiphy, "info: received scan request on %s\n", dev->name); - priv->scan_request = request; - - priv->user_scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg), - GFP_KERNEL); - if (!priv->user_scan_cfg) { - dev_err(priv->adapter->dev, "failed to alloc scan_req\n"); - return -ENOMEM; - } - for (i = 0; i < request->n_ssids; i++) { - memcpy(priv->user_scan_cfg->ssid_list[i].ssid, - request->ssids[i].ssid, request->ssids[i].ssid_len); - priv->user_scan_cfg->ssid_list[i].max_len = - request->ssids[i].ssid_len; - } - for (i = 0; i < request->n_channels; i++) { - chan = request->channels[i]; - priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value; - priv->user_scan_cfg->chan_list[i].radio_type = chan->band; - - if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) - priv->user_scan_cfg->chan_list[i].scan_type = - MWIFIEX_SCAN_TYPE_PASSIVE; - else - priv->user_scan_cfg->chan_list[i].scan_type = - MWIFIEX_SCAN_TYPE_ACTIVE; + if (priv->scan_request && priv->scan_request != request) + return -EBUSY; - priv->user_scan_cfg->chan_list[i].scan_time = 0; - } - if (mwifiex_set_user_scan_ioctl(priv, priv->user_scan_cfg)) - return -EFAULT; + priv->scan_request = request; + queue_work(priv->workqueue, &priv->cfg_workqueue); return 0; } @@ -1307,6 +1292,10 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct net_device *dev) priv->media_connected = false; + cancel_work_sync(&priv->cfg_workqueue); + flush_workqueue(priv->workqueue); + destroy_workqueue(priv->workqueue); + priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; return 0; @@ -1384,6 +1373,9 @@ int mwifiex_register_cfg80211(struct mwifiex_private *priv) memcpy(wdev->wiphy->perm_addr, priv->curr_addr, ETH_ALEN); wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; + /* We are using custom domains */ + wdev->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY; + /* Reserve space for bss band information */ wdev->wiphy->bss_priv_size = sizeof(u8); @@ -1412,3 +1404,100 @@ int mwifiex_register_cfg80211(struct mwifiex_private *priv) return ret; } + +/* + * This function handles the result of different pending network operations. + * + * The following operations are handled and CFG802.11 subsystem is + * notified accordingly - + * - Scan request completion + * - Association request completion + * - IBSS join request completion + * - Disconnect request completion + */ +void +mwifiex_cfg80211_results(struct work_struct *work) +{ + struct mwifiex_private *priv = + container_of(work, struct mwifiex_private, cfg_workqueue); + struct mwifiex_user_scan_cfg *scan_req; + int ret = 0, i; + struct ieee80211_channel *chan; + + if (priv->scan_request) { + scan_req = kzalloc(sizeof(struct mwifiex_user_scan_cfg), + GFP_KERNEL); + if (!scan_req) { + dev_err(priv->adapter->dev, "failed to alloc " + "scan_req\n"); + return; + } + for (i = 0; i < priv->scan_request->n_ssids; i++) { + memcpy(scan_req->ssid_list[i].ssid, + priv->scan_request->ssids[i].ssid, + priv->scan_request->ssids[i].ssid_len); + scan_req->ssid_list[i].max_len = + priv->scan_request->ssids[i].ssid_len; + } + for (i = 0; i < priv->scan_request->n_channels; i++) { + chan = priv->scan_request->channels[i]; + scan_req->chan_list[i].chan_number = chan->hw_value; + scan_req->chan_list[i].radio_type = chan->band; + if (chan->flags & IEEE80211_CHAN_DISABLED) + scan_req->chan_list[i].scan_type = + MWIFIEX_SCAN_TYPE_PASSIVE; + else + scan_req->chan_list[i].scan_type = + MWIFIEX_SCAN_TYPE_ACTIVE; + scan_req->chan_list[i].scan_time = 0; + } + if (mwifiex_set_user_scan_ioctl(priv, scan_req)) + ret = -EFAULT; + priv->scan_result_status = ret; + dev_dbg(priv->adapter->dev, "info: %s: sending scan results\n", + __func__); + cfg80211_scan_done(priv->scan_request, + (priv->scan_result_status < 0)); + priv->scan_request = NULL; + kfree(scan_req); + } + + if (priv->assoc_request == 1) { + if (!priv->assoc_result) { + cfg80211_connect_result(priv->netdev, priv->cfg_bssid, + NULL, 0, NULL, 0, + WLAN_STATUS_SUCCESS, + GFP_KERNEL); + dev_dbg(priv->adapter->dev, + "info: associated to bssid %pM successfully\n", + priv->cfg_bssid); + } else { + dev_dbg(priv->adapter->dev, + "info: association to bssid %pM failed\n", + priv->cfg_bssid); + memset(priv->cfg_bssid, 0, ETH_ALEN); + } + priv->assoc_request = 0; + priv->assoc_result = 0; + } + + if (priv->ibss_join_request == 1) { + if (!priv->ibss_join_result) { + cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid, + GFP_KERNEL); + dev_dbg(priv->adapter->dev, + "info: joined/created adhoc network with bssid" + " %pM successfully\n", priv->cfg_bssid); + } else { + dev_dbg(priv->adapter->dev, + "info: failed creating/joining adhoc network\n"); + } + priv->ibss_join_request = 0; + priv->ibss_join_result = 0; + } + + if (priv->disconnect) { + memset(priv->cfg_bssid, 0, ETH_ALEN); + priv->disconnect = 0; + } +} diff --git a/trunk/drivers/net/wireless/mwifiex/cfg80211.h b/trunk/drivers/net/wireless/mwifiex/cfg80211.h index 76c76c60438b..8d010f2500c5 100644 --- a/trunk/drivers/net/wireless/mwifiex/cfg80211.h +++ b/trunk/drivers/net/wireless/mwifiex/cfg80211.h @@ -26,4 +26,5 @@ int mwifiex_register_cfg80211(struct mwifiex_private *); +void mwifiex_cfg80211_results(struct work_struct *work); #endif diff --git a/trunk/drivers/net/wireless/mwifiex/cfp.c b/trunk/drivers/net/wireless/mwifiex/cfp.c index 1782a77f15dc..f2e6de03805c 100644 --- a/trunk/drivers/net/wireless/mwifiex/cfp.c +++ b/trunk/drivers/net/wireless/mwifiex/cfp.c @@ -75,32 +75,18 @@ static u8 supported_rates_n[N_SUPPORTED_RATES] = { 0x02, 0x04, 0 }; * This function maps an index in supported rates table into * the corresponding data rate. */ -u32 mwifiex_index_to_data_rate(struct mwifiex_private *priv, u8 index, - u8 ht_info) +u32 mwifiex_index_to_data_rate(u8 index, u8 ht_info) { - /* - * For every mcs_rate line, the first 8 bytes are for stream 1x1, - * and all 16 bytes are for stream 2x2. - */ - u16 mcs_rate[4][16] = { - /* LGI 40M */ - { 0x1b, 0x36, 0x51, 0x6c, 0xa2, 0xd8, 0xf3, 0x10e, - 0x36, 0x6c, 0xa2, 0xd8, 0x144, 0x1b0, 0x1e6, 0x21c }, - - /* SGI 40M */ - { 0x1e, 0x3c, 0x5a, 0x78, 0xb4, 0xf0, 0x10e, 0x12c, - 0x3c, 0x78, 0xb4, 0xf0, 0x168, 0x1e0, 0x21c, 0x258 }, - - /* LGI 20M */ - { 0x0d, 0x1a, 0x27, 0x34, 0x4e, 0x68, 0x75, 0x82, - 0x1a, 0x34, 0x4e, 0x68, 0x9c, 0xd0, 0xea, 0x104 }, - - /* SGI 20M */ - { 0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90, - 0x1c, 0x39, 0x56, 0x73, 0xad, 0xe7, 0x104, 0x120 } - }; - u32 mcs_num_supp = - (priv->adapter->hw_dev_mcs_support == HT_STREAM_2X2) ? 16 : 8; + u16 mcs_rate[4][8] = { + {0x1b, 0x36, 0x51, 0x6c, 0xa2, 0xd8, 0xf3, 0x10e} + , /* LG 40M */ + {0x1e, 0x3c, 0x5a, 0x78, 0xb4, 0xf0, 0x10e, 0x12c} + , /* SG 40M */ + {0x0d, 0x1a, 0x27, 0x34, 0x4e, 0x68, 0x75, 0x82} + , /* LG 20M */ + {0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90} + }; /* SG 20M */ + u32 rate; if (ht_info & BIT(0)) { @@ -109,7 +95,7 @@ u32 mwifiex_index_to_data_rate(struct mwifiex_private *priv, u8 index, rate = 0x0D; /* MCS 32 SGI rate */ else rate = 0x0C; /* MCS 32 LGI rate */ - } else if (index < mcs_num_supp) { + } else if (index < 8) { if (ht_info & BIT(1)) { if (ht_info & BIT(2)) /* SGI, 40M */ diff --git a/trunk/drivers/net/wireless/mwifiex/fw.h b/trunk/drivers/net/wireless/mwifiex/fw.h index 51c5417c569c..0cc5d73cb0c1 100644 --- a/trunk/drivers/net/wireless/mwifiex/fw.h +++ b/trunk/drivers/net/wireless/mwifiex/fw.h @@ -165,7 +165,6 @@ enum MWIFIEX_802_11_WEP_STATUS { #define GET_RXMCSSUPP(DevMCSSupported) (DevMCSSupported & 0x0f) #define SETHT_MCS32(x) (x[4] |= 1) -#define HT_STREAM_2X2 0x22 #define SET_SECONDARYCHAN(RadioType, SECCHAN) (RadioType |= (SECCHAN << 4)) @@ -376,6 +375,8 @@ enum mwifiex_chan_scan_mode_bitmasks { MWIFIEX_DISABLE_CHAN_FILT = BIT(1), }; +#define SECOND_CHANNEL_BELOW 0x30 +#define SECOND_CHANNEL_ABOVE 0x10 struct mwifiex_chan_scan_param_set { u8 radio_type; u8 chan_number; @@ -672,7 +673,7 @@ struct host_cmd_ds_802_11_ad_hoc_start { union ieee_types_phy_param_set phy_param_set; u16 reserved1; __le16 cap_info_bitmap; - u8 data_rate[HOSTCMD_SUPPORTED_RATES]; + u8 DataRate[HOSTCMD_SUPPORTED_RATES]; } __packed; struct host_cmd_ds_802_11_ad_hoc_result { diff --git a/trunk/drivers/net/wireless/mwifiex/init.c b/trunk/drivers/net/wireless/mwifiex/init.c index e05b417a3fae..d792b3fb7c16 100644 --- a/trunk/drivers/net/wireless/mwifiex/init.c +++ b/trunk/drivers/net/wireless/mwifiex/init.c @@ -187,6 +187,8 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter) struct mwifiex_opt_sleep_confirm *sleep_cfm_buf = NULL; skb_put(adapter->sleep_cfm, sizeof(struct mwifiex_opt_sleep_confirm)); + sleep_cfm_buf = (struct mwifiex_opt_sleep_confirm *) + (adapter->sleep_cfm->data); adapter->cmd_sent = false; @@ -246,14 +248,12 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter) memset(adapter->event_body, 0, sizeof(adapter->event_body)); adapter->hw_dot_11n_dev_cap = 0; adapter->hw_dev_mcs_support = 0; - adapter->sec_chan_offset = 0; + adapter->chan_offset = 0; adapter->adhoc_11n_enabled = false; mwifiex_wmm_init(adapter); if (adapter->sleep_cfm) { - sleep_cfm_buf = (struct mwifiex_opt_sleep_confirm *) - adapter->sleep_cfm->data; memset(sleep_cfm_buf, 0, adapter->sleep_cfm->len); sleep_cfm_buf->command = cpu_to_le16(HostCmd_CMD_802_11_PS_MODE_ENH); @@ -282,45 +282,6 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter) adapter->arp_filter_size = 0; } -/* - * This function sets trans_start per tx_queue - */ -void mwifiex_set_trans_start(struct net_device *dev) -{ - int i; - - for (i = 0; i < dev->num_tx_queues; i++) - netdev_get_tx_queue(dev, i)->trans_start = jiffies; - - dev->trans_start = jiffies; -} - -/* - * This function wakes up all queues in net_device - */ -void mwifiex_wake_up_net_dev_queue(struct net_device *netdev, - struct mwifiex_adapter *adapter) -{ - unsigned long dev_queue_flags; - - spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags); - netif_tx_wake_all_queues(netdev); - spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags); -} - -/* - * This function stops all queues in net_device - */ -void mwifiex_stop_net_dev_queue(struct net_device *netdev, - struct mwifiex_adapter *adapter) -{ - unsigned long dev_queue_flags; - - spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags); - netif_tx_stop_all_queues(netdev); - spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags); -} - /* * This function releases the lock variables and frees the locks and * associated locks. @@ -398,7 +359,6 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter) spin_lock_init(&adapter->int_lock); spin_lock_init(&adapter->main_proc_lock); spin_lock_init(&adapter->mwifiex_cmd_lock); - spin_lock_init(&adapter->queue_lock); for (i = 0; i < adapter->priv_num; i++) { if (adapter->priv[i]) { priv = adapter->priv[i]; diff --git a/trunk/drivers/net/wireless/mwifiex/ioctl.h b/trunk/drivers/net/wireless/mwifiex/ioctl.h index d5d81f1fe41c..e0b68e7c8ca2 100644 --- a/trunk/drivers/net/wireless/mwifiex/ioctl.h +++ b/trunk/drivers/net/wireless/mwifiex/ioctl.h @@ -62,6 +62,17 @@ enum { BAND_AN = 16, }; +#define NO_SEC_CHANNEL 0 +#define SEC_CHANNEL_ABOVE 1 +#define SEC_CHANNEL_BELOW 3 + +struct mwifiex_ds_band_cfg { + u32 config_bands; + u32 adhoc_start_band; + u32 adhoc_channel; + u32 sec_chan_offset; +}; + enum { ADHOC_IDLE, ADHOC_STARTED, diff --git a/trunk/drivers/net/wireless/mwifiex/join.c b/trunk/drivers/net/wireless/mwifiex/join.c index 0b0eb5efba9d..62b4c2938608 100644 --- a/trunk/drivers/net/wireless/mwifiex/join.c +++ b/trunk/drivers/net/wireless/mwifiex/join.c @@ -724,8 +724,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv, u32 cmd_append_size = 0; u32 i; u16 tmp_cap; + uint16_t ht_cap_info; struct mwifiex_ie_types_chan_list_param_set *chan_tlv; - u8 radio_type; struct mwifiex_ie_types_htcap *ht_cap; struct mwifiex_ie_types_htinfo *ht_info; @@ -837,8 +837,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv, bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_ACCEPT_ALL; } - memset(adhoc_start->data_rate, 0, sizeof(adhoc_start->data_rate)); - mwifiex_get_active_data_rates(priv, adhoc_start->data_rate); + memset(adhoc_start->DataRate, 0, sizeof(adhoc_start->DataRate)); + mwifiex_get_active_data_rates(priv, adhoc_start->DataRate); if ((adapter->adhoc_start_band & BAND_G) && (priv->curr_pkt_filter & HostCmd_ACT_MAC_ADHOC_G_PROTECTION_ON)) { if (mwifiex_send_cmd_async(priv, HostCmd_CMD_MAC_CONTROL, @@ -850,19 +850,20 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv, } } /* Find the last non zero */ - for (i = 0; i < sizeof(adhoc_start->data_rate); i++) - if (!adhoc_start->data_rate[i]) - break; + for (i = 0; i < sizeof(adhoc_start->DataRate) && + adhoc_start->DataRate[i]; + i++) + ; priv->curr_bss_params.num_of_rates = i; /* Copy the ad-hoc creating rates into Current BSS rate structure */ memcpy(&priv->curr_bss_params.data_rates, - &adhoc_start->data_rate, priv->curr_bss_params.num_of_rates); + &adhoc_start->DataRate, priv->curr_bss_params.num_of_rates); dev_dbg(adapter->dev, "info: ADHOC_S_CMD: rates=%02x %02x %02x %02x\n", - adhoc_start->data_rate[0], adhoc_start->data_rate[1], - adhoc_start->data_rate[2], adhoc_start->data_rate[3]); + adhoc_start->DataRate[0], adhoc_start->DataRate[1], + adhoc_start->DataRate[2], adhoc_start->DataRate[3]); dev_dbg(adapter->dev, "info: ADHOC_S_CMD: AD-HOC Start command is ready\n"); @@ -885,14 +886,12 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv, = mwifiex_band_to_radio_type(priv->curr_bss_params.band); if (adapter->adhoc_start_band & BAND_GN || adapter->adhoc_start_band & BAND_AN) { - if (adapter->sec_chan_offset == - IEEE80211_HT_PARAM_CHA_SEC_ABOVE) + if (adapter->chan_offset == SEC_CHANNEL_ABOVE) chan_tlv->chan_scan_param[0].radio_type |= - (IEEE80211_HT_PARAM_CHA_SEC_ABOVE << 4); - else if (adapter->sec_chan_offset == - IEEE80211_HT_PARAM_CHA_SEC_ABOVE) + SECOND_CHANNEL_ABOVE; + else if (adapter->chan_offset == SEC_CHANNEL_BELOW) chan_tlv->chan_scan_param[0].radio_type |= - (IEEE80211_HT_PARAM_CHA_SEC_BELOW << 4); + SECOND_CHANNEL_BELOW; } dev_dbg(adapter->dev, "info: ADHOC_S_CMD: TLV Band = %d\n", chan_tlv->chan_scan_param[0].radio_type); @@ -915,40 +914,55 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv, } if (adapter->adhoc_11n_enabled) { - /* Fill HT CAPABILITY */ - ht_cap = (struct mwifiex_ie_types_htcap *) pos; - memset(ht_cap, 0, sizeof(struct mwifiex_ie_types_htcap)); - ht_cap->header.type = cpu_to_le16(WLAN_EID_HT_CAPABILITY); - ht_cap->header.len = - cpu_to_le16(sizeof(struct ieee80211_ht_cap)); - radio_type = mwifiex_band_to_radio_type( - priv->adapter->config_bands); - mwifiex_fill_cap_info(priv, radio_type, ht_cap); - - pos += sizeof(struct mwifiex_ie_types_htcap); - cmd_append_size += - sizeof(struct mwifiex_ie_types_htcap); - - /* Fill HT INFORMATION */ - ht_info = (struct mwifiex_ie_types_htinfo *) pos; - memset(ht_info, 0, sizeof(struct mwifiex_ie_types_htinfo)); - ht_info->header.type = cpu_to_le16(WLAN_EID_HT_INFORMATION); - ht_info->header.len = - cpu_to_le16(sizeof(struct ieee80211_ht_info)); + { + ht_cap = (struct mwifiex_ie_types_htcap *) pos; + memset(ht_cap, 0, + sizeof(struct mwifiex_ie_types_htcap)); + ht_cap->header.type = + cpu_to_le16(WLAN_EID_HT_CAPABILITY); + ht_cap->header.len = + cpu_to_le16(sizeof(struct ieee80211_ht_cap)); + ht_cap_info = le16_to_cpu(ht_cap->ht_cap.cap_info); + + ht_cap_info |= IEEE80211_HT_CAP_SGI_20; + if (adapter->chan_offset) { + ht_cap_info |= IEEE80211_HT_CAP_SGI_40; + ht_cap_info |= IEEE80211_HT_CAP_DSSSCCK40; + ht_cap_info |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; + SETHT_MCS32(ht_cap->ht_cap.mcs.rx_mask); + } - ht_info->ht_info.control_chan = - (u8) priv->curr_bss_params.bss_descriptor.channel; - if (adapter->sec_chan_offset) { - ht_info->ht_info.ht_param = adapter->sec_chan_offset; - ht_info->ht_info.ht_param |= + ht_cap->ht_cap.ampdu_params_info + = IEEE80211_HT_MAX_AMPDU_64K; + ht_cap->ht_cap.mcs.rx_mask[0] = 0xff; + pos += sizeof(struct mwifiex_ie_types_htcap); + cmd_append_size += + sizeof(struct mwifiex_ie_types_htcap); + } + { + ht_info = (struct mwifiex_ie_types_htinfo *) pos; + memset(ht_info, 0, + sizeof(struct mwifiex_ie_types_htinfo)); + ht_info->header.type = + cpu_to_le16(WLAN_EID_HT_INFORMATION); + ht_info->header.len = + cpu_to_le16(sizeof(struct ieee80211_ht_info)); + ht_info->ht_info.control_chan = + (u8) priv->curr_bss_params.bss_descriptor. + channel; + if (adapter->chan_offset) { + ht_info->ht_info.ht_param = + adapter->chan_offset; + ht_info->ht_info.ht_param |= IEEE80211_HT_PARAM_CHAN_WIDTH_ANY; + } + ht_info->ht_info.operation_mode = + cpu_to_le16(IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); + ht_info->ht_info.basic_set[0] = 0xff; + pos += sizeof(struct mwifiex_ie_types_htinfo); + cmd_append_size += + sizeof(struct mwifiex_ie_types_htinfo); } - ht_info->ht_info.operation_mode = - cpu_to_le16(IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); - ht_info->ht_info.basic_set[0] = 0xff; - pos += sizeof(struct mwifiex_ie_types_htinfo); - cmd_append_size += - sizeof(struct mwifiex_ie_types_htinfo); } cmd->size = cpu_to_le16((u16) diff --git a/trunk/drivers/net/wireless/mwifiex/main.c b/trunk/drivers/net/wireless/mwifiex/main.c index 84be196188cc..67e6db7d672d 100644 --- a/trunk/drivers/net/wireless/mwifiex/main.c +++ b/trunk/drivers/net/wireless/mwifiex/main.c @@ -401,7 +401,7 @@ mwifiex_fill_buffer(struct sk_buff *skb) static int mwifiex_open(struct net_device *dev) { - netif_tx_start_all_queues(dev); + netif_start_queue(dev); return 0; } @@ -465,8 +465,8 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) atomic_inc(&priv->adapter->tx_pending); if (atomic_read(&priv->adapter->tx_pending) >= MAX_TX_PENDING) { - mwifiex_set_trans_start(dev); - mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter); + netif_stop_queue(priv->netdev); + dev->trans_start = jiffies; } queue_work(priv->adapter->workqueue, &priv->adapter->main_work); @@ -533,7 +533,7 @@ mwifiex_tx_timeout(struct net_device *dev) dev_err(priv->adapter->dev, "%lu : Tx timeout, bss_index=%d\n", jiffies, priv->bss_index); - mwifiex_set_trans_start(dev); + dev->trans_start = jiffies; priv->num_tx_timeout++; } @@ -586,6 +586,8 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv, priv->media_connected = false; memset(&priv->nick_name, 0, sizeof(priv->nick_name)); priv->num_tx_timeout = 0; + priv->workqueue = create_singlethread_workqueue("cfg80211_wq"); + INIT_WORK(&priv->cfg_workqueue, mwifiex_cfg80211_results); memcpy(dev->dev_addr, priv->curr_addr, ETH_ALEN); } @@ -791,8 +793,7 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem) priv = adapter->priv[i]; if (priv && priv->netdev) { if (!netif_queue_stopped(priv->netdev)) - mwifiex_stop_net_dev_queue(priv->netdev, - adapter); + netif_stop_queue(priv->netdev); if (netif_carrier_ok(priv->netdev)) netif_carrier_off(priv->netdev); } diff --git a/trunk/drivers/net/wireless/mwifiex/main.h b/trunk/drivers/net/wireless/mwifiex/main.h index 3186aa437f42..30f138b6fa4c 100644 --- a/trunk/drivers/net/wireless/mwifiex/main.h +++ b/trunk/drivers/net/wireless/mwifiex/main.h @@ -453,8 +453,15 @@ struct mwifiex_private { u8 scan_pending_on_block; u8 report_scan_result; struct cfg80211_scan_request *scan_request; - struct mwifiex_user_scan_cfg *user_scan_cfg; + int scan_result_status; + int assoc_request; + u16 assoc_result; + int ibss_join_request; + u16 ibss_join_result; + bool disconnect; u8 cfg_bssid[6]; + struct workqueue_struct *workqueue; + struct work_struct cfg_workqueue; u8 country_code[IEEE80211_COUNTRY_STRING_LEN]; struct wps wps; u8 scan_block; @@ -640,8 +647,7 @@ struct mwifiex_adapter { u32 hw_dot_11n_dev_cap; u8 hw_dev_mcs_support; u8 adhoc_11n_enabled; - u8 sec_chan_offset; - enum nl80211_channel_type channel_type; + u8 chan_offset; struct mwifiex_dbg dbg; u8 arp_filter[ARP_FILTER_MAX_BUF_SIZE]; u32 arp_filter_size; @@ -649,19 +655,10 @@ struct mwifiex_adapter { struct mwifiex_wait_queue cmd_wait_q; u8 scan_wait_q_woken; struct cmd_ctrl_node *cmd_queued; - spinlock_t queue_lock; /* lock for tx queues */ }; int mwifiex_init_lock_list(struct mwifiex_adapter *adapter); -void mwifiex_set_trans_start(struct net_device *dev); - -void mwifiex_stop_net_dev_queue(struct net_device *netdev, - struct mwifiex_adapter *adapter); - -void mwifiex_wake_up_net_dev_queue(struct net_device *netdev, - struct mwifiex_adapter *adapter); - int mwifiex_init_fw(struct mwifiex_adapter *adapter); int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter); @@ -778,8 +775,7 @@ struct mwifiex_chan_freq_power * struct mwifiex_chan_freq_power *mwifiex_get_cfp_by_band_and_freq_from_cfg80211( struct mwifiex_private *priv, u8 band, u32 freq); -u32 mwifiex_index_to_data_rate(struct mwifiex_private *priv, u8 index, - u8 ht_info); +u32 mwifiex_index_to_data_rate(u8 index, u8 ht_info); u32 mwifiex_find_freq_from_band_chan(u8, u8); int mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv, u16 vsie_mask, u8 **buffer); @@ -955,6 +951,8 @@ int mwifiex_main_process(struct mwifiex_adapter *); int mwifiex_bss_set_channel(struct mwifiex_private *, struct mwifiex_chan_freq_power *cfp); +int mwifiex_set_radio_band_cfg(struct mwifiex_private *, + struct mwifiex_ds_band_cfg *); int mwifiex_get_bss_info(struct mwifiex_private *, struct mwifiex_bss_info *); int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv, diff --git a/trunk/drivers/net/wireless/mwifiex/pcie.c b/trunk/drivers/net/wireless/mwifiex/pcie.c index 405350940a45..d34acf082d3a 100644 --- a/trunk/drivers/net/wireless/mwifiex/pcie.c +++ b/trunk/drivers/net/wireless/mwifiex/pcie.c @@ -386,7 +386,8 @@ static int mwifiex_pcie_create_txbd_ring(struct mwifiex_adapter *adapter) card->txbd_ring_vbase = kzalloc(card->txbd_ring_size, GFP_KERNEL); if (!card->txbd_ring_vbase) { dev_err(adapter->dev, "Unable to allocate buffer for txbd ring.\n"); - return -ENOMEM; + kfree(card->txbd_ring_vbase); + return -1; } card->txbd_ring_pbase = virt_to_phys(card->txbd_ring_vbase); @@ -476,7 +477,7 @@ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter) if (!card->rxbd_ring_vbase) { dev_err(adapter->dev, "Unable to allocate buffer for " "rxbd_ring.\n"); - return -ENOMEM; + return -1; } card->rxbd_ring_pbase = virt_to_phys(card->rxbd_ring_vbase); @@ -569,7 +570,7 @@ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter) if (!card->evtbd_ring_vbase) { dev_err(adapter->dev, "Unable to allocate buffer. " "Terminating download\n"); - return -ENOMEM; + return -1; } card->evtbd_ring_pbase = virt_to_phys(card->evtbd_ring_vbase); @@ -1228,16 +1229,15 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter, if (!skb) return 0; - if (rdptr >= MWIFIEX_MAX_EVT_BD) { + if (rdptr >= MWIFIEX_MAX_EVT_BD) dev_err(adapter->dev, "event_complete: Invalid rdptr 0x%x\n", rdptr); - return -EINVAL; - } /* Read the event ring write pointer set by firmware */ if (mwifiex_read_reg(adapter, REG_EVTBD_WRPTR, &wrptr)) { dev_err(adapter->dev, "event_complete: failed to read REG_EVTBD_WRPTR\n"); - return -1; + ret = -1; + goto done; } if (!card->evt_buf_list[rdptr]) { @@ -1266,9 +1266,15 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter, /* Write the event ring read pointer in to REG_EVTBD_RDPTR */ if (mwifiex_write_reg(adapter, REG_EVTBD_RDPTR, card->evtbd_rdptr)) { dev_err(adapter->dev, "event_complete: failed to read REG_EVTBD_RDPTR\n"); - return -1; + ret = -1; + goto done; } +done: + /* Free the buffer for failure case */ + if (ret && skb) + dev_kfree_skb_any(skb); + dev_dbg(adapter->dev, "info: Check Events Again\n"); ret = mwifiex_pcie_process_event_ready(adapter); @@ -1666,8 +1672,9 @@ static int mwifiex_pcie_host_to_card(struct mwifiex_adapter *adapter, u8 type, struct sk_buff *skb, struct mwifiex_tx_param *tx_param) { - if (!skb) { - dev_err(adapter->dev, "Passed NULL skb to %s\n", __func__); + if (!adapter || !skb) { + dev_err(adapter->dev, "Invalid parameter in %s <%p, %p>\n", + __func__, adapter, skb); return -1; } diff --git a/trunk/drivers/net/wireless/mwifiex/scan.c b/trunk/drivers/net/wireless/mwifiex/scan.c index 6396d3318ead..8d3ab378662b 100644 --- a/trunk/drivers/net/wireless/mwifiex/scan.c +++ b/trunk/drivers/net/wireless/mwifiex/scan.c @@ -500,6 +500,7 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv, struct ieee80211_channel *ch; struct mwifiex_adapter *adapter = priv->adapter; int chan_idx = 0, i; + u8 scan_type; for (band = 0; (band < IEEE80211_NUM_BANDS) ; band++) { @@ -513,20 +514,19 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv, if (ch->flags & IEEE80211_CHAN_DISABLED) continue; scan_chan_list[chan_idx].radio_type = band; - + scan_type = ch->flags & IEEE80211_CHAN_PASSIVE_SCAN; if (user_scan_in && user_scan_in->chan_list[0].scan_time) scan_chan_list[chan_idx].max_scan_time = cpu_to_le16((u16) user_scan_in-> chan_list[0].scan_time); - else if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN) + else if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE) scan_chan_list[chan_idx].max_scan_time = cpu_to_le16(adapter->passive_scan_time); else scan_chan_list[chan_idx].max_scan_time = cpu_to_le16(adapter->active_scan_time); - - if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN) + if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE) scan_chan_list[chan_idx].chan_scan_mode_bitmap |= MWIFIEX_PASSIVE_SCAN; else @@ -1391,8 +1391,11 @@ int mwifiex_set_user_scan_ioctl(struct mwifiex_private *priv, { int status; + priv->adapter->scan_wait_q_woken = false; + status = mwifiex_scan_networks(priv, scan_req); - queue_work(priv->adapter->workqueue, &priv->adapter->main_work); + if (!status) + status = mwifiex_wait_queue_complete(priv->adapter); return status; } @@ -1534,6 +1537,11 @@ mwifiex_update_curr_bss_params(struct mwifiex_private *priv, u8 *bssid, return 0; } +static void mwifiex_free_bss_priv(struct cfg80211_bss *bss) +{ + kfree(bss->priv); +} + /* * This function handles the command response of scan. * @@ -1759,7 +1767,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv, cap_info_bitmap, beacon_period, ie_buf, ie_len, rssi, GFP_KERNEL); *(u8 *)bss->priv = band; - cfg80211_put_bss(bss); + bss->free_priv = mwifiex_free_bss_priv; if (priv->media_connected && !memcmp(bssid, priv->curr_bss_params.bss_descriptor @@ -1793,14 +1801,6 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv, up(&priv->async_sem); } - if (priv->user_scan_cfg) { - dev_dbg(priv->adapter->dev, "info: %s: sending scan " - "results\n", __func__); - cfg80211_scan_done(priv->scan_request, 0); - priv->scan_request = NULL; - kfree(priv->user_scan_cfg); - priv->user_scan_cfg = NULL; - } } else { /* Get scan command from scan_pending_q and put to cmd_pending_q */ diff --git a/trunk/drivers/net/wireless/mwifiex/sdio.c b/trunk/drivers/net/wireless/mwifiex/sdio.c index d39d8457f252..283171bbcedf 100644 --- a/trunk/drivers/net/wireless/mwifiex/sdio.c +++ b/trunk/drivers/net/wireless/mwifiex/sdio.c @@ -256,13 +256,10 @@ static int mwifiex_sdio_resume(struct device *dev) /* Device ID for SD8787 */ #define SDIO_DEVICE_ID_MARVELL_8787 (0x9119) -/* Device ID for SD8797 */ -#define SDIO_DEVICE_ID_MARVELL_8797 (0x9129) /* WLAN IDs */ static const struct sdio_device_id mwifiex_ids[] = { {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8787)}, - {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797)}, {}, }; @@ -1087,7 +1084,7 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter, (adapter->ioport | 0x1000 | (card->mpa_rx.ports << 4)) + card->mpa_rx.start_port, 1)) - goto error; + return -1; curr_ptr = card->mpa_rx.buf; @@ -1130,29 +1127,12 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter, if (mwifiex_sdio_card_to_host(adapter, &pkt_type, skb->data, skb->len, adapter->ioport + port)) - goto error; + return -1; mwifiex_decode_rx_packet(adapter, skb, pkt_type); } return 0; - -error: - if (MP_RX_AGGR_IN_PROGRESS(card)) { - /* Multiport-aggregation transfer failed - cleanup */ - for (pind = 0; pind < card->mpa_rx.pkt_cnt; pind++) { - /* copy pkt to deaggr buf */ - skb_deaggr = card->mpa_rx.skb_arr[pind]; - dev_kfree_skb_any(skb_deaggr); - } - MP_RX_AGGR_BUF_RESET(card); - } - - if (f_do_rx_cur) - /* Single transfer pending. Free curr buff also */ - dev_kfree_skb_any(skb); - - return -1; } /* @@ -1288,6 +1268,7 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) dev_dbg(adapter->dev, "info: CFG reg val =%x\n", cr); + dev_kfree_skb_any(skb); return -1; } } @@ -1592,16 +1573,7 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter) sdio_set_drvdata(func, card); adapter->dev = &func->dev; - - switch (func->device) { - case SDIO_DEVICE_ID_MARVELL_8797: - strcpy(adapter->fw_name, SD8797_DEFAULT_FW_NAME); - break; - case SDIO_DEVICE_ID_MARVELL_8787: - default: - strcpy(adapter->fw_name, SD8787_DEFAULT_FW_NAME); - break; - } + strcpy(adapter->fw_name, SD8787_DEFAULT_FW_NAME); return 0; @@ -1658,14 +1630,14 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter) card->mpa_tx.pkt_cnt = 0; card->mpa_tx.start_port = 0; - card->mpa_tx.enabled = 1; + card->mpa_tx.enabled = 0; card->mpa_tx.pkt_aggr_limit = SDIO_MP_AGGR_DEF_PKT_LIMIT; card->mpa_rx.buf_len = 0; card->mpa_rx.pkt_cnt = 0; card->mpa_rx.start_port = 0; - card->mpa_rx.enabled = 1; + card->mpa_rx.enabled = 0; card->mpa_rx.pkt_aggr_limit = SDIO_MP_AGGR_DEF_PKT_LIMIT; /* Allocate buffers for SDIO MP-A */ @@ -1802,5 +1774,4 @@ MODULE_AUTHOR("Marvell International Ltd."); MODULE_DESCRIPTION("Marvell WiFi-Ex SDIO Driver version " SDIO_VERSION); MODULE_VERSION(SDIO_VERSION); MODULE_LICENSE("GPL v2"); -MODULE_FIRMWARE(SD8787_DEFAULT_FW_NAME); -MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME); +MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin"); diff --git a/trunk/drivers/net/wireless/mwifiex/sdio.h b/trunk/drivers/net/wireless/mwifiex/sdio.h index a3fb322205b0..3f711801e58a 100644 --- a/trunk/drivers/net/wireless/mwifiex/sdio.h +++ b/trunk/drivers/net/wireless/mwifiex/sdio.h @@ -29,7 +29,6 @@ #include "main.h" #define SD8787_DEFAULT_FW_NAME "mrvl/sd8787_uapsta.bin" -#define SD8797_DEFAULT_FW_NAME "mrvl/sd8797_uapsta.bin" #define BLOCK_MODE 1 #define BYTE_MODE 0 diff --git a/trunk/drivers/net/wireless/mwifiex/sta_cmd.c b/trunk/drivers/net/wireless/mwifiex/sta_cmd.c index 6e443ffa0465..ea6518d1c9e3 100644 --- a/trunk/drivers/net/wireless/mwifiex/sta_cmd.c +++ b/trunk/drivers/net/wireless/mwifiex/sta_cmd.c @@ -748,7 +748,7 @@ static int mwifiex_cmd_802_11_rf_channel(struct mwifiex_private *priv, cpu_to_le16(HostCmd_SCAN_RADIO_TYPE_A); rf_type = le16_to_cpu(rf_chan->rf_type); - SET_SECONDARYCHAN(rf_type, priv->adapter->sec_chan_offset); + SET_SECONDARYCHAN(rf_type, priv->adapter->chan_offset); rf_chan->current_channel = cpu_to_le16(*channel); } rf_chan->action = cpu_to_le16(cmd_action); diff --git a/trunk/drivers/net/wireless/mwifiex/sta_cmdresp.c b/trunk/drivers/net/wireless/mwifiex/sta_cmdresp.c index e812db8b695c..7a16b0c417af 100644 --- a/trunk/drivers/net/wireless/mwifiex/sta_cmdresp.c +++ b/trunk/drivers/net/wireless/mwifiex/sta_cmdresp.c @@ -508,7 +508,7 @@ static int mwifiex_ret_802_11_tx_rate_query(struct mwifiex_private *priv, priv->tx_htinfo = resp->params.tx_rate.ht_info; if (!priv->is_data_rate_auto) priv->data_rate = - mwifiex_index_to_data_rate(priv, priv->tx_rate, + mwifiex_index_to_data_rate(priv->tx_rate, priv->tx_htinfo); return 0; diff --git a/trunk/drivers/net/wireless/mwifiex/sta_event.c b/trunk/drivers/net/wireless/mwifiex/sta_event.c index d7aa21da84d0..f204810e8338 100644 --- a/trunk/drivers/net/wireless/mwifiex/sta_event.c +++ b/trunk/drivers/net/wireless/mwifiex/sta_event.c @@ -115,17 +115,18 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv) if (adapter->num_cmd_timeout && adapter->curr_cmd) return; priv->media_connected = false; - dev_dbg(adapter->dev, "info: successfully disconnected from" - " %pM: reason code %d\n", priv->cfg_bssid, - WLAN_REASON_DEAUTH_LEAVING); - if (priv->bss_mode == NL80211_IFTYPE_STATION) { - cfg80211_disconnected(priv->netdev, WLAN_REASON_DEAUTH_LEAVING, - NULL, 0, GFP_KERNEL); + if (!priv->disconnect) { + priv->disconnect = 1; + dev_dbg(adapter->dev, "info: successfully disconnected from" + " %pM: reason code %d\n", priv->cfg_bssid, + WLAN_REASON_DEAUTH_LEAVING); + cfg80211_disconnected(priv->netdev, + WLAN_REASON_DEAUTH_LEAVING, NULL, 0, + GFP_KERNEL); + queue_work(priv->workqueue, &priv->cfg_workqueue); } - memset(priv->cfg_bssid, 0, ETH_ALEN); - if (!netif_queue_stopped(priv->netdev)) - mwifiex_stop_net_dev_queue(priv->netdev, adapter); + netif_stop_queue(priv->netdev); if (netif_carrier_ok(priv->netdev)) netif_carrier_off(priv->netdev); /* Reset wireless stats signal info */ @@ -200,7 +201,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) if (!netif_carrier_ok(priv->netdev)) netif_carrier_on(priv->netdev); if (netif_queue_stopped(priv->netdev)) - mwifiex_wake_up_net_dev_queue(priv->netdev, adapter); + netif_wake_queue(priv->netdev); break; case EVENT_DEAUTHENTICATED: @@ -291,7 +292,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) priv->adhoc_is_link_sensed = false; mwifiex_clean_txrx(priv); if (!netif_queue_stopped(priv->netdev)) - mwifiex_stop_net_dev_queue(priv->netdev, adapter); + netif_stop_queue(priv->netdev); if (netif_carrier_ok(priv->netdev)) netif_carrier_off(priv->netdev); break; diff --git a/trunk/drivers/net/wireless/mwifiex/sta_ioctl.c b/trunk/drivers/net/wireless/mwifiex/sta_ioctl.c index 470ca75ec250..ea4a29b7e331 100644 --- a/trunk/drivers/net/wireless/mwifiex/sta_ioctl.c +++ b/trunk/drivers/net/wireless/mwifiex/sta_ioctl.c @@ -55,14 +55,9 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter) { bool cancel_flag = false; int status = adapter->cmd_wait_q.status; - struct cmd_ctrl_node *cmd_queued; + struct cmd_ctrl_node *cmd_queued = adapter->cmd_queued; - if (!adapter->cmd_queued) - return 0; - - cmd_queued = adapter->cmd_queued; adapter->cmd_queued = NULL; - dev_dbg(adapter->dev, "cmd pending\n"); atomic_inc(&adapter->cmd_pending); @@ -239,7 +234,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, "associating...\n"); if (!netif_queue_stopped(priv->netdev)) - mwifiex_stop_net_dev_queue(priv->netdev, adapter); + netif_stop_queue(priv->netdev); /* Clear any past association response stored for * application retrieval */ @@ -270,7 +265,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, ret = mwifiex_check_network_compatibility(priv, bss_desc); if (!netif_queue_stopped(priv->netdev)) - mwifiex_stop_net_dev_queue(priv->netdev, adapter); + netif_stop_queue(priv->netdev); if (!ret) { dev_dbg(adapter->dev, "info: network found in scan" @@ -476,6 +471,67 @@ int mwifiex_get_bss_info(struct mwifiex_private *priv, return 0; } +/* + * The function sets band configurations. + * + * it performs extra checks to make sure the Ad-Hoc + * band and channel are compatible. Otherwise it returns an error. + * + */ +int mwifiex_set_radio_band_cfg(struct mwifiex_private *priv, + struct mwifiex_ds_band_cfg *radio_cfg) +{ + struct mwifiex_adapter *adapter = priv->adapter; + u8 infra_band, adhoc_band; + u32 adhoc_channel; + + infra_band = (u8) radio_cfg->config_bands; + adhoc_band = (u8) radio_cfg->adhoc_start_band; + adhoc_channel = radio_cfg->adhoc_channel; + + /* SET Infra band */ + if ((infra_band | adapter->fw_bands) & ~adapter->fw_bands) + return -1; + + adapter->config_bands = infra_band; + + /* SET Ad-hoc Band */ + if ((adhoc_band | adapter->fw_bands) & ~adapter->fw_bands) + return -1; + + if (adhoc_band) + adapter->adhoc_start_band = adhoc_band; + adapter->chan_offset = (u8) radio_cfg->sec_chan_offset; + /* + * If no adhoc_channel is supplied verify if the existing adhoc + * channel compiles with new adhoc_band + */ + if (!adhoc_channel) { + if (!mwifiex_get_cfp_by_band_and_channel_from_cfg80211 + (priv, adapter->adhoc_start_band, + priv->adhoc_channel)) { + /* Pass back the default channel */ + radio_cfg->adhoc_channel = DEFAULT_AD_HOC_CHANNEL; + if ((adapter->adhoc_start_band & BAND_A) + || (adapter->adhoc_start_band & BAND_AN)) + radio_cfg->adhoc_channel = + DEFAULT_AD_HOC_CHANNEL_A; + } + } else { /* Retrurn error if adhoc_band and + adhoc_channel combination is invalid */ + if (!mwifiex_get_cfp_by_band_and_channel_from_cfg80211 + (priv, adapter->adhoc_start_band, (u16) adhoc_channel)) + return -1; + priv->adhoc_channel = (u8) adhoc_channel; + } + if ((adhoc_band & BAND_GN) || (adhoc_band & BAND_AN)) + adapter->adhoc_11n_enabled = true; + else + adapter->adhoc_11n_enabled = false; + + return 0; +} + /* * The function disables auto deep sleep mode. */ @@ -776,8 +832,8 @@ int mwifiex_drv_get_data_rate(struct mwifiex_private *priv, if (!ret) { if (rate->is_rate_auto) - rate->rate = mwifiex_index_to_data_rate(priv, - priv->tx_rate, priv->tx_htinfo); + rate->rate = mwifiex_index_to_data_rate(priv->tx_rate, + priv->tx_htinfo); else rate->rate = priv->data_rate; } else { diff --git a/trunk/drivers/net/wireless/mwifiex/sta_rx.c b/trunk/drivers/net/wireless/mwifiex/sta_rx.c index 5e1ef7e5da4f..27430512f7cd 100644 --- a/trunk/drivers/net/wireless/mwifiex/sta_rx.c +++ b/trunk/drivers/net/wireless/mwifiex/sta_rx.c @@ -126,9 +126,6 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter, u16 rx_pkt_type; struct mwifiex_private *priv = adapter->priv[rx_info->bss_index]; - if (!priv) - return -1; - local_rx_pd = (struct rxpd *) (skb->data); rx_pkt_type = local_rx_pd->rx_pkt_type; @@ -192,11 +189,12 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter, (u8) local_rx_pd->rx_pkt_type, skb); - if (ret || (rx_pkt_type == PKT_TYPE_BAR)) - dev_kfree_skb_any(skb); + if (ret || (rx_pkt_type == PKT_TYPE_BAR)) { + if (priv && (ret == -1)) + priv->stats.rx_dropped++; - if (ret) - priv->stats.rx_dropped++; + dev_kfree_skb_any(skb); + } return ret; } diff --git a/trunk/drivers/net/wireless/mwifiex/txrx.c b/trunk/drivers/net/wireless/mwifiex/txrx.c index d9274a1b77ac..a206f412875f 100644 --- a/trunk/drivers/net/wireless/mwifiex/txrx.c +++ b/trunk/drivers/net/wireless/mwifiex/txrx.c @@ -134,7 +134,7 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter, if (!priv) goto done; - mwifiex_set_trans_start(priv->netdev); + priv->netdev->trans_start = jiffies; if (!status) { priv->stats.tx_packets++; priv->stats.tx_bytes += skb->len; @@ -152,8 +152,7 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter, if ((GET_BSS_ROLE(tpriv) == MWIFIEX_BSS_ROLE_STA) && (tpriv->media_connected)) { if (netif_queue_stopped(tpriv->netdev)) - mwifiex_wake_up_net_dev_queue(tpriv->netdev, - adapter); + netif_wake_queue(tpriv->netdev); } } done: diff --git a/trunk/drivers/net/wireless/mwl8k.c b/trunk/drivers/net/wireless/mwl8k.c index 7becea3dec65..995695c28d5c 100644 --- a/trunk/drivers/net/wireless/mwl8k.c +++ b/trunk/drivers/net/wireless/mwl8k.c @@ -28,10 +28,10 @@ #define MWL8K_DESC "Marvell TOPDOG(R) 802.11 Wireless Network Driver" #define MWL8K_NAME KBUILD_MODNAME -#define MWL8K_VERSION "0.13" +#define MWL8K_VERSION "0.12" /* Module parameters */ -static bool ap_mode_default; +static unsigned ap_mode_default; module_param(ap_mode_default, bool, 0); MODULE_PARM_DESC(ap_mode_default, "Set to 1 to make ap mode the default instead of sta mode"); @@ -198,7 +198,6 @@ struct mwl8k_priv { /* firmware access */ struct mutex fw_mutex; struct task_struct *fw_mutex_owner; - struct task_struct *hw_restart_owner; int fw_mutex_depth; struct completion *hostcmd_wait; @@ -263,10 +262,6 @@ struct mwl8k_priv { */ struct ieee80211_tx_queue_params wmm_params[MWL8K_TX_WMM_QUEUES]; - /* To perform the task of reloading the firmware */ - struct work_struct fw_reload; - bool hw_restart_in_progress; - /* async firmware loading state */ unsigned fw_state; char *fw_pref; @@ -743,10 +738,10 @@ static int mwl8k_load_firmware(struct ieee80211_hw *hw) ready_code = ioread32(priv->regs + MWL8K_HIU_INT_CODE); if (ready_code == MWL8K_FWAP_READY) { - priv->ap_fw = true; + priv->ap_fw = 1; break; } else if (ready_code == MWL8K_FWSTA_READY) { - priv->ap_fw = false; + priv->ap_fw = 0; break; } @@ -1503,18 +1498,6 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw) might_sleep(); - /* Since fw restart is in progress, allow only the firmware - * commands from the restart code and block the other - * commands since they are going to fail in any case since - * the firmware has crashed - */ - if (priv->hw_restart_in_progress) { - if (priv->hw_restart_owner == current) - return 0; - else - return -EBUSY; - } - /* * The TX queues are stopped at this point, so this test * doesn't need to take ->tx_lock. @@ -1558,8 +1541,6 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw) wiphy_err(hw->wiphy, "tx rings stuck for %d ms\n", MWL8K_TX_WAIT_TIMEOUT_MS); mwl8k_dump_tx_rings(hw); - priv->hw_restart_in_progress = true; - ieee80211_queue_work(hw, &priv->fw_reload); rc = -ETIMEDOUT; } @@ -2077,9 +2058,7 @@ static int mwl8k_fw_lock(struct ieee80211_hw *hw) rc = mwl8k_tx_wait_empty(hw); if (rc) { - if (!priv->hw_restart_in_progress) - ieee80211_wake_queues(hw); - + ieee80211_wake_queues(hw); mutex_unlock(&priv->fw_mutex); return rc; @@ -2098,9 +2077,7 @@ static void mwl8k_fw_unlock(struct ieee80211_hw *hw) struct mwl8k_priv *priv = hw->priv; if (!--priv->fw_mutex_depth) { - if (!priv->hw_restart_in_progress) - ieee80211_wake_queues(hw); - + ieee80211_wake_queues(hw); priv->fw_mutex_owner = NULL; mutex_unlock(&priv->fw_mutex); } @@ -4421,8 +4398,7 @@ static void mwl8k_stop(struct ieee80211_hw *hw) struct mwl8k_priv *priv = hw->priv; int i; - if (!priv->hw_restart_in_progress) - mwl8k_cmd_radio_disable(hw); + mwl8k_cmd_radio_disable(hw); ieee80211_stop_queues(hw); @@ -4523,16 +4499,6 @@ static int mwl8k_add_interface(struct ieee80211_hw *hw, return 0; } -static void mwl8k_remove_vif(struct mwl8k_priv *priv, struct mwl8k_vif *vif) -{ - /* Has ieee80211_restart_hw re-added the removed interfaces? */ - if (!priv->macids_used) - return; - - priv->macids_used &= ~(1 << vif->macid); - list_del(&vif->list); -} - static void mwl8k_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { @@ -4544,54 +4510,8 @@ static void mwl8k_remove_interface(struct ieee80211_hw *hw, mwl8k_cmd_set_mac_addr(hw, vif, "\x00\x00\x00\x00\x00\x00"); - mwl8k_remove_vif(priv, mwl8k_vif); -} - -static void mwl8k_hw_restart_work(struct work_struct *work) -{ - struct mwl8k_priv *priv = - container_of(work, struct mwl8k_priv, fw_reload); - struct ieee80211_hw *hw = priv->hw; - struct mwl8k_device_info *di; - int rc; - - /* If some command is waiting for a response, clear it */ - if (priv->hostcmd_wait != NULL) { - complete(priv->hostcmd_wait); - priv->hostcmd_wait = NULL; - } - - priv->hw_restart_owner = current; - di = priv->device_info; - mwl8k_fw_lock(hw); - - if (priv->ap_fw) - rc = mwl8k_reload_firmware(hw, di->fw_image_ap); - else - rc = mwl8k_reload_firmware(hw, di->fw_image_sta); - - if (rc) - goto fail; - - priv->hw_restart_owner = NULL; - priv->hw_restart_in_progress = false; - - /* - * This unlock will wake up the queues and - * also opens the command path for other - * commands - */ - mwl8k_fw_unlock(hw); - - ieee80211_restart_hw(hw); - - wiphy_err(hw->wiphy, "Firmware restarted successfully\n"); - - return; -fail: - mwl8k_fw_unlock(hw); - - wiphy_err(hw->wiphy, "Firmware restart failed\n"); + priv->macids_used &= ~(1 << mwl8k_vif->macid); + list_del(&mwl8k_vif->list); } static int mwl8k_config(struct ieee80211_hw *hw, u32 changed) @@ -5104,11 +5024,7 @@ mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, for (i = 0; i < MAX_AMPDU_ATTEMPTS; i++) { rc = mwl8k_check_ba(hw, stream); - /* If HW restart is in progress mwl8k_post_cmd will - * return -EBUSY. Avoid retrying mwl8k_check_ba in - * such cases - */ - if (!rc || rc == -EBUSY) + if (!rc) break; /* * HW queues take time to be flushed, give them @@ -5128,14 +5044,14 @@ mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid); break; case IEEE80211_AMPDU_TX_STOP: - if (stream) { - if (stream->state == AMPDU_STREAM_ACTIVE) { - spin_unlock(&priv->stream_lock); - mwl8k_destroy_ba(hw, stream); - spin_lock(&priv->stream_lock); - } - mwl8k_remove_stream(hw, stream); + if (stream == NULL) + break; + if (stream->state == AMPDU_STREAM_ACTIVE) { + spin_unlock(&priv->stream_lock); + mwl8k_destroy_ba(hw, stream); + spin_lock(&priv->stream_lock); } + mwl8k_remove_stream(hw, stream); ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid); break; case IEEE80211_AMPDU_TX_OPERATIONAL: @@ -5347,15 +5263,12 @@ static void mwl8k_fw_state_machine(const struct firmware *fw, void *context) mwl8k_release_firmware(priv); } -#define MAX_RESTART_ATTEMPTS 1 static int mwl8k_init_firmware(struct ieee80211_hw *hw, char *fw_image, bool nowait) { struct mwl8k_priv *priv = hw->priv; int rc; - int count = MAX_RESTART_ATTEMPTS; -retry: /* Reset firmware and hardware */ mwl8k_hw_reset(priv); @@ -5377,16 +5290,6 @@ static int mwl8k_init_firmware(struct ieee80211_hw *hw, char *fw_image, /* Reclaim memory once firmware is successfully loaded */ mwl8k_release_firmware(priv); - if (rc && count) { - /* FW did not start successfully; - * lets try one more time - */ - count--; - wiphy_err(hw->wiphy, "Trying to reload the firmware again\n"); - msleep(20); - goto retry; - } - return rc; } @@ -5462,14 +5365,7 @@ static int mwl8k_probe_hw(struct ieee80211_hw *hw) goto err_free_queues; } - /* - * When hw restart is requested, - * mac80211 will take care of clearing - * the ampdu streams, so do not clear - * the ampdu state here - */ - if (!priv->hw_restart_in_progress) - memset(priv->ampdu, 0, sizeof(priv->ampdu)); + memset(priv->ampdu, 0, sizeof(priv->ampdu)); /* * Temporarily enable interrupts. Initial firmware host @@ -5543,20 +5439,10 @@ static int mwl8k_reload_firmware(struct ieee80211_hw *hw, char *fw_image) { int i, rc = 0; struct mwl8k_priv *priv = hw->priv; - struct mwl8k_vif *vif, *tmp_vif; mwl8k_stop(hw); mwl8k_rxq_deinit(hw, 0); - /* - * All the existing interfaces are re-added by the ieee80211_reconfig; - * which means driver should remove existing interfaces before calling - * ieee80211_restart_hw - */ - if (priv->hw_restart_in_progress) - list_for_each_entry_safe(vif, tmp_vif, &priv->vif_list, list) - mwl8k_remove_vif(priv, vif); - for (i = 0; i < mwl8k_tx_queues(priv); i++) mwl8k_txq_deinit(hw, i); @@ -5568,9 +5454,6 @@ static int mwl8k_reload_firmware(struct ieee80211_hw *hw, char *fw_image) if (rc) goto fail; - if (priv->hw_restart_in_progress) - return rc; - rc = mwl8k_start(hw); if (rc) goto fail; @@ -5634,15 +5517,13 @@ static int mwl8k_firmware_load_success(struct mwl8k_priv *priv) INIT_LIST_HEAD(&priv->vif_list); /* Set default radio state and preamble */ - priv->radio_on = false; - priv->radio_short_preamble = false; + priv->radio_on = 0; + priv->radio_short_preamble = 0; /* Finalize join worker */ INIT_WORK(&priv->finalize_join_worker, mwl8k_finalize_join_worker); /* Handle watchdog ba events */ INIT_WORK(&priv->watchdog_ba_handle, mwl8k_watchdog_ba_events); - /* To reload the firmware if it crashes */ - INIT_WORK(&priv->fw_reload, mwl8k_hw_restart_work); /* TX reclaim and RX tasklets. */ tasklet_init(&priv->poll_tx_task, mwl8k_tx_poll, (unsigned long)hw); @@ -5786,9 +5667,6 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev, rc = mwl8k_init_firmware(hw, priv->fw_pref, true); if (rc) goto err_stop_firmware; - - priv->hw_restart_in_progress = false; - return rc; err_stop_firmware: diff --git a/trunk/drivers/net/wireless/orinoco/main.c b/trunk/drivers/net/wireless/orinoco/main.c index 9fb77d0319f5..b52acc4b4086 100644 --- a/trunk/drivers/net/wireless/orinoco/main.c +++ b/trunk/drivers/net/wireless/orinoco/main.c @@ -121,7 +121,7 @@ module_param(orinoco_debug, int, 0644); MODULE_PARM_DESC(orinoco_debug, "Debug level"); #endif -static bool suppress_linkstatus; /* = 0 */ +static int suppress_linkstatus; /* = 0 */ module_param(suppress_linkstatus, bool, 0644); MODULE_PARM_DESC(suppress_linkstatus, "Don't log link status changes"); diff --git a/trunk/drivers/net/wireless/orinoco/scan.c b/trunk/drivers/net/wireless/orinoco/scan.c index 96e39edfec77..e99ca1c1e0d8 100644 --- a/trunk/drivers/net/wireless/orinoco/scan.c +++ b/trunk/drivers/net/wireless/orinoco/scan.c @@ -76,7 +76,6 @@ static void orinoco_add_hostscan_result(struct orinoco_private *priv, { struct wiphy *wiphy = priv_to_wiphy(priv); struct ieee80211_channel *channel; - struct cfg80211_bss *cbss; u8 *ie; u8 ie_buf[46]; u64 timestamp; @@ -122,10 +121,9 @@ static void orinoco_add_hostscan_result(struct orinoco_private *priv, beacon_interval = le16_to_cpu(bss->a.beacon_interv); signal = SIGNAL_TO_MBM(le16_to_cpu(bss->a.level)); - cbss = cfg80211_inform_bss(wiphy, channel, bss->a.bssid, timestamp, - capability, beacon_interval, ie_buf, ie_len, - signal, GFP_KERNEL); - cfg80211_put_bss(cbss); + cfg80211_inform_bss(wiphy, channel, bss->a.bssid, timestamp, + capability, beacon_interval, ie_buf, ie_len, + signal, GFP_KERNEL); } void orinoco_add_extscan_result(struct orinoco_private *priv, @@ -134,7 +132,6 @@ void orinoco_add_extscan_result(struct orinoco_private *priv, { struct wiphy *wiphy = priv_to_wiphy(priv); struct ieee80211_channel *channel; - struct cfg80211_bss *cbss; const u8 *ie; u64 timestamp; s32 signal; @@ -155,10 +152,9 @@ void orinoco_add_extscan_result(struct orinoco_private *priv, ie = bss->data; signal = SIGNAL_TO_MBM(bss->level); - cbss = cfg80211_inform_bss(wiphy, channel, bss->bssid, timestamp, - capability, beacon_interval, ie, ie_len, - signal, GFP_KERNEL); - cfg80211_put_bss(cbss); + cfg80211_inform_bss(wiphy, channel, bss->bssid, timestamp, + capability, beacon_interval, ie, ie_len, + signal, GFP_KERNEL); } void orinoco_add_hostscan_results(struct orinoco_private *priv, diff --git a/trunk/drivers/net/wireless/p54/main.c b/trunk/drivers/net/wireless/p54/main.c index af2ca1a9c7d3..db4d9a02f264 100644 --- a/trunk/drivers/net/wireless/p54/main.c +++ b/trunk/drivers/net/wireless/p54/main.c @@ -27,7 +27,7 @@ #include "p54.h" #include "lmac.h" -static bool modparam_nohwcrypt; +static int modparam_nohwcrypt; module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); MODULE_AUTHOR("Michael Wu "); diff --git a/trunk/drivers/net/wireless/p54/p54spi.c b/trunk/drivers/net/wireless/p54/p54spi.c index 7faed62c6378..78d0d6988553 100644 --- a/trunk/drivers/net/wireless/p54/p54spi.c +++ b/trunk/drivers/net/wireless/p54/p54spi.c @@ -581,7 +581,11 @@ static void p54spi_op_stop(struct ieee80211_hw *dev) struct p54s_priv *priv = dev->priv; unsigned long flags; - mutex_lock(&priv->mutex); + if (mutex_lock_interruptible(&priv->mutex)) { + /* FIXME: how to handle this error? */ + return; + } + WARN_ON(priv->fw_state != FW_STATE_READY); p54spi_power_off(priv); @@ -700,6 +704,7 @@ static int __devexit p54spi_remove(struct spi_device *spi) static struct spi_driver p54spi_driver = { .driver = { .name = "p54spi", + .bus = &spi_bus_type, .owner = THIS_MODULE, }, diff --git a/trunk/drivers/net/wireless/p54/txrx.c b/trunk/drivers/net/wireless/p54/txrx.c index 42b97bc38477..6ed9c323e3cb 100644 --- a/trunk/drivers/net/wireless/p54/txrx.c +++ b/trunk/drivers/net/wireless/p54/txrx.c @@ -242,7 +242,7 @@ void p54_free_skb(struct ieee80211_hw *dev, struct sk_buff *skb) skb_unlink(skb, &priv->tx_queue); p54_tx_qos_accounting_free(priv, skb); - ieee80211_free_txskb(dev, skb); + dev_kfree_skb_any(skb); } EXPORT_SYMBOL_GPL(p54_free_skb); @@ -788,7 +788,7 @@ void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb) &hdr_flags, &aid, &burst_allowed); if (p54_tx_qos_accounting_alloc(priv, skb, queue)) { - ieee80211_free_txskb(dev, skb); + dev_kfree_skb_any(skb); return; } diff --git a/trunk/drivers/net/wireless/prism54/isl_ioctl.c b/trunk/drivers/net/wireless/prism54/isl_ioctl.c index 4e44b1af119a..bc2ba80c47bb 100644 --- a/trunk/drivers/net/wireless/prism54/isl_ioctl.c +++ b/trunk/drivers/net/wireless/prism54/isl_ioctl.c @@ -2493,7 +2493,323 @@ prism54_set_mac_address(struct net_device *ndev, void *addr) return ret; } +/* Note: currently, use hostapd ioctl from the Host AP driver for WPA + * support. This is to be replaced with Linux wireless extensions once they + * get WPA support. */ + +/* Note II: please leave all this together as it will be easier to remove later, + * once wireless extensions add WPA support -mcgrof */ + +/* PRISM54_HOSTAPD ioctl() cmd: */ +enum { + PRISM2_SET_ENCRYPTION = 6, + PRISM2_HOSTAPD_SET_GENERIC_ELEMENT = 12, + PRISM2_HOSTAPD_MLME = 13, + PRISM2_HOSTAPD_SCAN_REQ = 14, +}; + #define PRISM54_SET_WPA SIOCIWFIRSTPRIV+12 +#define PRISM54_HOSTAPD SIOCIWFIRSTPRIV+25 +#define PRISM54_DROP_UNENCRYPTED SIOCIWFIRSTPRIV+26 + +#define PRISM2_HOSTAPD_MAX_BUF_SIZE 1024 +#define PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN \ + offsetof(struct prism2_hostapd_param, u.generic_elem.data) + +/* Maximum length for algorithm names (-1 for nul termination) + * used in ioctl() */ +#define HOSTAP_CRYPT_ALG_NAME_LEN 16 + +struct prism2_hostapd_param { + u32 cmd; + u8 sta_addr[ETH_ALEN]; + union { + struct { + u8 alg[HOSTAP_CRYPT_ALG_NAME_LEN]; + u32 flags; + u32 err; + u8 idx; + u8 seq[8]; /* sequence counter (set: RX, get: TX) */ + u16 key_len; + u8 key[0]; + } crypt; + struct { + u8 len; + u8 data[0]; + } generic_elem; + struct { +#define MLME_STA_DEAUTH 0 +#define MLME_STA_DISASSOC 1 + u16 cmd; + u16 reason_code; + } mlme; + struct { + u8 ssid_len; + u8 ssid[32]; + } scan_req; + } u; +}; + + +static int +prism2_ioctl_set_encryption(struct net_device *dev, + struct prism2_hostapd_param *param, + int param_len) +{ + islpci_private *priv = netdev_priv(dev); + int rvalue = 0, force = 0; + int authen = DOT11_AUTH_OS, invoke = 0, exunencrypt = 0; + union oid_res_t r; + + /* with the new API, it's impossible to get a NULL pointer. + * New version of iwconfig set the IW_ENCODE_NOKEY flag + * when no key is given, but older versions don't. */ + + if (param->u.crypt.key_len > 0) { + /* we have a key to set */ + int index = param->u.crypt.idx; + int current_index; + struct obj_key key = { DOT11_PRIV_TKIP, 0, "" }; + + /* get the current key index */ + rvalue = mgt_get_request(priv, DOT11_OID_DEFKEYID, 0, NULL, &r); + current_index = r.u; + /* Verify that the key is not marked as invalid */ + if (!(param->u.crypt.flags & IW_ENCODE_NOKEY)) { + key.length = param->u.crypt.key_len > sizeof (param->u.crypt.key) ? + sizeof (param->u.crypt.key) : param->u.crypt.key_len; + memcpy(key.key, param->u.crypt.key, key.length); + if (key.length == 32) + /* we want WPA-PSK */ + key.type = DOT11_PRIV_TKIP; + if ((index < 0) || (index > 3)) + /* no index provided use the current one */ + index = current_index; + + /* now send the key to the card */ + rvalue |= + mgt_set_request(priv, DOT11_OID_DEFKEYX, index, + &key); + } + /* + * If a valid key is set, encryption should be enabled + * (user may turn it off later). + * This is also how "iwconfig ethX key on" works + */ + if ((index == current_index) && (key.length > 0)) + force = 1; + } else { + int index = (param->u.crypt.flags & IW_ENCODE_INDEX) - 1; + if ((index >= 0) && (index <= 3)) { + /* we want to set the key index */ + rvalue |= + mgt_set_request(priv, DOT11_OID_DEFKEYID, 0, + &index); + } else { + if (!(param->u.crypt.flags & IW_ENCODE_MODE)) { + /* we cannot do anything. Complain. */ + return -EINVAL; + } + } + } + /* now read the flags */ + if (param->u.crypt.flags & IW_ENCODE_DISABLED) { + /* Encoding disabled, + * authen = DOT11_AUTH_OS; + * invoke = 0; + * exunencrypt = 0; */ + } + if (param->u.crypt.flags & IW_ENCODE_OPEN) + /* Encode but accept non-encoded packets. No auth */ + invoke = 1; + if ((param->u.crypt.flags & IW_ENCODE_RESTRICTED) || force) { + /* Refuse non-encoded packets. Auth */ + authen = DOT11_AUTH_BOTH; + invoke = 1; + exunencrypt = 1; + } + /* do the change if requested */ + if ((param->u.crypt.flags & IW_ENCODE_MODE) || force) { + rvalue |= + mgt_set_request(priv, DOT11_OID_AUTHENABLE, 0, &authen); + rvalue |= + mgt_set_request(priv, DOT11_OID_PRIVACYINVOKED, 0, &invoke); + rvalue |= + mgt_set_request(priv, DOT11_OID_EXUNENCRYPTED, 0, + &exunencrypt); + } + return rvalue; +} + +static int +prism2_ioctl_set_generic_element(struct net_device *ndev, + struct prism2_hostapd_param *param, + int param_len) +{ + islpci_private *priv = netdev_priv(ndev); + int max_len, len, alen, ret=0; + struct obj_attachment *attach; + + len = param->u.generic_elem.len; + max_len = param_len - PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN; + if (max_len < 0 || max_len < len) + return -EINVAL; + + alen = sizeof(*attach) + len; + attach = kzalloc(alen, GFP_KERNEL); + if (attach == NULL) + return -ENOMEM; + +#define WLAN_FC_TYPE_MGMT 0 +#define WLAN_FC_STYPE_ASSOC_REQ 0 +#define WLAN_FC_STYPE_REASSOC_REQ 2 + + /* Note: endianness is covered by mgt_set_varlen */ + + attach->type = (WLAN_FC_TYPE_MGMT << 2) | + (WLAN_FC_STYPE_ASSOC_REQ << 4); + attach->id = -1; + attach->size = len; + memcpy(attach->data, param->u.generic_elem.data, len); + + ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach, len); + + if (ret == 0) { + attach->type = (WLAN_FC_TYPE_MGMT << 2) | + (WLAN_FC_STYPE_REASSOC_REQ << 4); + + ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach, len); + + if (ret == 0) + printk(KERN_DEBUG "%s: WPA IE Attachment was set\n", + ndev->name); + } + + kfree(attach); + return ret; + +} + +static int +prism2_ioctl_mlme(struct net_device *dev, struct prism2_hostapd_param *param) +{ + return -EOPNOTSUPP; +} + +static int +prism2_ioctl_scan_req(struct net_device *ndev, + struct prism2_hostapd_param *param) +{ + islpci_private *priv = netdev_priv(ndev); + struct iw_request_info info; + int i, rvalue; + struct obj_bsslist *bsslist; + u32 noise = 0; + char *extra = ""; + char *current_ev = "foo"; + union oid_res_t r; + + if (islpci_get_state(priv) < PRV_STATE_INIT) { + /* device is not ready, fail gently */ + return 0; + } + + /* first get the noise value. We will use it to report the link quality */ + rvalue = mgt_get_request(priv, DOT11_OID_NOISEFLOOR, 0, NULL, &r); + noise = r.u; + + /* Ask the device for a list of known bss. We can report at most + * IW_MAX_AP=64 to the range struct. But the device won't repport anything + * if you change the value of IWMAX_BSS=24. + */ + rvalue |= mgt_get_request(priv, DOT11_OID_BSSLIST, 0, NULL, &r); + bsslist = r.ptr; + + info.cmd = PRISM54_HOSTAPD; + info.flags = 0; + + /* ok now, scan the list and translate its info */ + for (i = 0; i < min(IW_MAX_AP, (int) bsslist->nr); i++) + current_ev = prism54_translate_bss(ndev, &info, current_ev, + extra + IW_SCAN_MAX_DATA, + &(bsslist->bsslist[i]), + noise); + kfree(bsslist); + + return rvalue; +} + +static int +prism54_hostapd(struct net_device *ndev, struct iw_point *p) +{ + struct prism2_hostapd_param *param; + int ret = 0; + u32 uwrq; + + printk(KERN_DEBUG "prism54_hostapd - len=%d\n", p->length); + if (p->length < sizeof(struct prism2_hostapd_param) || + p->length > PRISM2_HOSTAPD_MAX_BUF_SIZE || !p->pointer) + return -EINVAL; + + param = memdup_user(p->pointer, p->length); + if (IS_ERR(param)) + return PTR_ERR(param); + + switch (param->cmd) { + case PRISM2_SET_ENCRYPTION: + printk(KERN_DEBUG "%s: Caught WPA supplicant set encryption request\n", + ndev->name); + ret = prism2_ioctl_set_encryption(ndev, param, p->length); + break; + case PRISM2_HOSTAPD_SET_GENERIC_ELEMENT: + printk(KERN_DEBUG "%s: Caught WPA supplicant set WPA IE request\n", + ndev->name); + ret = prism2_ioctl_set_generic_element(ndev, param, + p->length); + break; + case PRISM2_HOSTAPD_MLME: + printk(KERN_DEBUG "%s: Caught WPA supplicant MLME request\n", + ndev->name); + ret = prism2_ioctl_mlme(ndev, param); + break; + case PRISM2_HOSTAPD_SCAN_REQ: + printk(KERN_DEBUG "%s: Caught WPA supplicant scan request\n", + ndev->name); + ret = prism2_ioctl_scan_req(ndev, param); + break; + case PRISM54_SET_WPA: + printk(KERN_DEBUG "%s: Caught WPA supplicant wpa init request\n", + ndev->name); + uwrq = 1; + ret = prism54_set_wpa(ndev, NULL, &uwrq, NULL); + break; + case PRISM54_DROP_UNENCRYPTED: + printk(KERN_DEBUG "%s: Caught WPA drop unencrypted request\n", + ndev->name); +#if 0 + uwrq = 0x01; + mgt_set(priv, DOT11_OID_EXUNENCRYPTED, &uwrq); + down_write(&priv->mib_sem); + mgt_commit(priv); + up_write(&priv->mib_sem); +#endif + /* Not necessary, as set_wpa does it, should we just do it here though? */ + ret = 0; + break; + default: + printk(KERN_DEBUG "%s: Caught a WPA supplicant request that is not supported\n", + ndev->name); + ret = -EOPNOTSUPP; + break; + } + + if (ret == 0 && copy_to_user(p->pointer, param, p->length)) + ret = -EFAULT; + + kfree(param); + + return ret; +} static int prism54_set_wpa(struct net_device *ndev, struct iw_request_info *info, @@ -2907,3 +3223,20 @@ const struct iw_handler_def prism54_handler_def = { .private_args = (struct iw_priv_args *) prism54_private_args, .get_wireless_stats = prism54_get_wireless_stats, }; + +/* For wpa_supplicant */ + +int +prism54_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) +{ + struct iwreq *wrq = (struct iwreq *) rq; + int ret = -1; + switch (cmd) { + case PRISM54_HOSTAPD: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + ret = prism54_hostapd(ndev, &wrq->u.data); + return ret; + } + return -EOPNOTSUPP; +} diff --git a/trunk/drivers/net/wireless/prism54/isl_ioctl.h b/trunk/drivers/net/wireless/prism54/isl_ioctl.h index a34bceb6e3cd..bcfbfb9281d2 100644 --- a/trunk/drivers/net/wireless/prism54/isl_ioctl.h +++ b/trunk/drivers/net/wireless/prism54/isl_ioctl.h @@ -43,6 +43,8 @@ void prism54_wpa_bss_ie_clean(islpci_private *priv); int prism54_set_mac_address(struct net_device *, void *); +int prism54_ioctl(struct net_device *, struct ifreq *, int); + extern const struct iw_handler_def prism54_handler_def; #endif /* _ISL_IOCTL_H */ diff --git a/trunk/drivers/net/wireless/prism54/islpci_dev.c b/trunk/drivers/net/wireless/prism54/islpci_dev.c index 5970ff6f40cc..5d0f61508a2e 100644 --- a/trunk/drivers/net/wireless/prism54/islpci_dev.c +++ b/trunk/drivers/net/wireless/prism54/islpci_dev.c @@ -793,8 +793,8 @@ islpci_set_multicast_list(struct net_device *dev) static void islpci_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); } static const struct ethtool_ops islpci_ethtool_ops = { @@ -804,6 +804,7 @@ static const struct ethtool_ops islpci_ethtool_ops = { static const struct net_device_ops islpci_netdev_ops = { .ndo_open = islpci_open, .ndo_stop = islpci_close, + .ndo_do_ioctl = prism54_ioctl, .ndo_start_xmit = islpci_eth_transmit, .ndo_tx_timeout = islpci_eth_tx_timeout, .ndo_set_mac_address = prism54_set_mac_address, diff --git a/trunk/drivers/net/wireless/ray_cs.c b/trunk/drivers/net/wireless/ray_cs.c index 04fec1fa6e0b..0021e4948512 100644 --- a/trunk/drivers/net/wireless/ray_cs.c +++ b/trunk/drivers/net/wireless/ray_cs.c @@ -2426,7 +2426,7 @@ static void rx_authenticate(ray_dev_t *local, struct rcs __iomem *prcs, unsigned int pkt_addr, int rx_len) { UCHAR buff[256]; - struct ray_rx_msg *msg = (struct ray_rx_msg *) buff; + struct rx_msg *msg = (struct rx_msg *)buff; del_timer(&local->timer); @@ -2513,7 +2513,7 @@ static void rx_deauthenticate(ray_dev_t *local, struct rcs __iomem *prcs, unsigned int pkt_addr, int rx_len) { /* UCHAR buff[256]; - struct ray_rx_msg *msg = (struct ray_rx_msg *) buff; + struct rx_msg *msg = (struct rx_msg *)buff; */ pr_debug("Deauthentication frame received\n"); local->authentication_state = UNAUTHENTICATED; diff --git a/trunk/drivers/net/wireless/rayctl.h b/trunk/drivers/net/wireless/rayctl.h index 3c3b98b152c3..d7646f299bd3 100644 --- a/trunk/drivers/net/wireless/rayctl.h +++ b/trunk/drivers/net/wireless/rayctl.h @@ -566,9 +566,9 @@ struct phy_header { UCHAR hdr_3; UCHAR hdr_4; }; -struct ray_rx_msg { +struct rx_msg { struct mac_header mac; - UCHAR var[0]; + UCHAR var[1]; }; struct tx_msg { diff --git a/trunk/drivers/net/wireless/rndis_wlan.c b/trunk/drivers/net/wireless/rndis_wlan.c index 3802c31fefcd..0c13840a7de5 100644 --- a/trunk/drivers/net/wireless/rndis_wlan.c +++ b/trunk/drivers/net/wireless/rndis_wlan.c @@ -244,10 +244,6 @@ enum ndis_80211_power_mode { NDIS_80211_POWER_MODE_FAST_PSP, }; -enum ndis_80211_pmkid_cand_list_flag_bits { - NDIS_80211_PMKID_CAND_PREAUTH = cpu_to_le32(1 << 0) -}; - struct ndis_80211_auth_request { __le32 length; u8 bssid[6]; @@ -391,17 +387,19 @@ struct ndis_80211_capability { struct ndis_80211_bssid_info { u8 bssid[6]; u8 pmkid[16]; -} __packed; +}; struct ndis_80211_pmkid { __le32 length; __le32 bssid_info_count; struct ndis_80211_bssid_info bssid_info[0]; -} __packed; +}; /* * private data */ +#define NET_TYPE_11FB 0 + #define CAP_MODE_80211A 1 #define CAP_MODE_80211B 2 #define CAP_MODE_80211G 4 @@ -416,7 +414,6 @@ struct ndis_80211_pmkid { #define RNDIS_WLAN_ALG_TKIP (1<<1) #define RNDIS_WLAN_ALG_CCMP (1<<2) -#define RNDIS_WLAN_NUM_KEYS 4 #define RNDIS_WLAN_KEY_MGMT_NONE 0 #define RNDIS_WLAN_KEY_MGMT_802_1X (1<<0) #define RNDIS_WLAN_KEY_MGMT_PSK (1<<1) @@ -519,7 +516,7 @@ struct rndis_wlan_private { /* encryption stuff */ int encr_tx_key_index; - struct rndis_wlan_encr_key encr_keys[RNDIS_WLAN_NUM_KEYS]; + struct rndis_wlan_encr_key encr_keys[4]; int wpa_version; u8 command_buffer[COMMAND_BUFFER_SIZE]; @@ -1349,32 +1346,6 @@ static int set_channel(struct usbnet *usbdev, int channel) return ret; } -static struct ieee80211_channel *get_current_channel(struct usbnet *usbdev, - u16 *beacon_interval) -{ - struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); - struct ieee80211_channel *channel; - struct ndis_80211_conf config; - int len, ret; - - /* Get channel and beacon interval */ - len = sizeof(config); - ret = rndis_query_oid(usbdev, OID_802_11_CONFIGURATION, &config, &len); - netdev_dbg(usbdev->net, "%s(): OID_802_11_CONFIGURATION -> %d\n", - __func__, ret); - if (ret < 0) - return NULL; - - channel = ieee80211_get_channel(priv->wdev.wiphy, - KHZ_TO_MHZ(le32_to_cpu(config.ds_config))); - if (!channel) - return NULL; - - if (beacon_interval) - *beacon_interval = le16_to_cpu(config.beacon_period); - return channel; -} - /* index must be 0 - N, as per NDIS */ static int add_wep_key(struct usbnet *usbdev, const u8 *key, int key_len, int index) @@ -1564,9 +1535,6 @@ static int remove_key(struct usbnet *usbdev, int index, const u8 *bssid) bool is_wpa; int ret; - if (index >= RNDIS_WLAN_NUM_KEYS) - return -ENOENT; - if (priv->encr_keys[index].len == 0) return 0; @@ -2004,12 +1972,11 @@ static int rndis_scan(struct wiphy *wiphy, struct net_device *dev, return ret; } -static bool rndis_bss_info_update(struct usbnet *usbdev, - struct ndis_80211_bssid_ex *bssid) +static struct cfg80211_bss *rndis_bss_info_update(struct usbnet *usbdev, + struct ndis_80211_bssid_ex *bssid) { struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); struct ieee80211_channel *channel; - struct cfg80211_bss *bss; s32 signal; u64 timestamp; u16 capability; @@ -2048,12 +2015,9 @@ static bool rndis_bss_info_update(struct usbnet *usbdev, capability = le16_to_cpu(fixed->capabilities); beacon_interval = le16_to_cpu(fixed->beacon_interval); - bss = cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid->mac, + return cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid->mac, timestamp, capability, beacon_interval, ie, ie_len, signal, GFP_KERNEL); - cfg80211_put_bss(bss); - - return (bss != NULL); } static struct ndis_80211_bssid_ex *next_bssid_list_item( @@ -2487,9 +2451,6 @@ static int rndis_set_default_key(struct wiphy *wiphy, struct net_device *netdev, netdev_dbg(usbdev->net, "%s(%i)\n", __func__, key_index); - if (key_index >= RNDIS_WLAN_NUM_KEYS) - return -ENOENT; - priv->encr_tx_key_index = key_index; if (is_wpa_key(priv, key_index)) @@ -2678,12 +2639,12 @@ static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid, { struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); struct ieee80211_channel *channel; + struct ndis_80211_conf config; struct ndis_80211_ssid ssid; - struct cfg80211_bss *bss; s32 signal; u64 timestamp; u16 capability; - u16 beacon_interval = 0; + u16 beacon_interval; __le32 rssi; u8 ie_buf[34]; int len, ret, ie_len; @@ -2708,10 +2669,22 @@ static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid, } /* Get channel and beacon interval */ - channel = get_current_channel(usbdev, &beacon_interval); - if (!channel) { - netdev_warn(usbdev->net, "%s(): could not get channel.\n", - __func__); + len = sizeof(config); + ret = rndis_query_oid(usbdev, OID_802_11_CONFIGURATION, &config, &len); + netdev_dbg(usbdev->net, "%s(): OID_802_11_CONFIGURATION -> %d\n", + __func__, ret); + if (ret >= 0) { + beacon_interval = le16_to_cpu(config.beacon_period); + channel = ieee80211_get_channel(priv->wdev.wiphy, + KHZ_TO_MHZ(le32_to_cpu(config.ds_config))); + if (!channel) { + netdev_warn(usbdev->net, "%s(): could not get channel." + "\n", __func__); + return; + } + } else { + netdev_warn(usbdev->net, "%s(): could not get configuration.\n", + __func__); return; } @@ -2741,10 +2714,9 @@ static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid, bssid, (u32)timestamp, capability, beacon_interval, ie_len, ssid.essid, signal); - bss = cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid, + cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid, timestamp, capability, beacon_interval, ie_buf, ie_len, signal, GFP_KERNEL); - cfg80211_put_bss(bss); } /* @@ -2856,9 +2828,8 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev) req_ie_len, resp_ie, resp_ie_len, 0, GFP_KERNEL); else - cfg80211_roamed(usbdev->net, - get_current_channel(usbdev, NULL), - bssid, req_ie, req_ie_len, + cfg80211_roamed(usbdev->net, NULL, bssid, + req_ie, req_ie_len, resp_ie, resp_ie_len, GFP_KERNEL); } else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC) cfg80211_ibss_joined(usbdev->net, bssid, GFP_KERNEL); @@ -3024,13 +2995,25 @@ static void rndis_wlan_pmkid_cand_list_indication(struct usbnet *usbdev, for (i = 0; i < le32_to_cpu(cand_list->num_candidates); i++) { struct ndis_80211_pmkid_candidate *cand = &cand_list->candidate_list[i]; - bool preauth = !!(cand->flags & NDIS_80211_PMKID_CAND_PREAUTH); - netdev_dbg(usbdev->net, "cand[%i]: flags: 0x%08x, preauth: %d, bssid: %pM\n", - i, le32_to_cpu(cand->flags), preauth, cand->bssid); + netdev_dbg(usbdev->net, "cand[%i]: flags: 0x%08x, bssid: %pM\n", + i, le32_to_cpu(cand->flags), cand->bssid); + +#if 0 + struct iw_pmkid_cand pcand; + union iwreq_data wrqu; - cfg80211_pmksa_candidate_notify(usbdev->net, i, cand->bssid, - preauth, GFP_ATOMIC); + memset(&pcand, 0, sizeof(pcand)); + if (le32_to_cpu(cand->flags) & 0x01) + pcand.flags |= IW_PMKID_CAND_PREAUTH; + pcand.index = i; + memcpy(pcand.bssid.sa_data, cand->bssid, ETH_ALEN); + + memset(&wrqu, 0, sizeof(wrqu)); + wrqu.data.length = sizeof(pcand); + wireless_send_event(usbdev->net, IWEVPMKIDCAND, &wrqu, + (u8 *)&pcand); +#endif } } diff --git a/trunk/drivers/net/wireless/rt2x00/rt2500usb.c b/trunk/drivers/net/wireless/rt2x00/rt2500usb.c index de7d41f21a69..53c5f878f61d 100644 --- a/trunk/drivers/net/wireless/rt2x00/rt2500usb.c +++ b/trunk/drivers/net/wireless/rt2x00/rt2500usb.c @@ -39,7 +39,7 @@ /* * Allow hardware encryption to be disabled. */ -static bool modparam_nohwcrypt; +static int modparam_nohwcrypt; module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); diff --git a/trunk/drivers/net/wireless/rt2x00/rt2800.h b/trunk/drivers/net/wireless/rt2x00/rt2800.h index 2571a2fa3d09..4778620347c4 100644 --- a/trunk/drivers/net/wireless/rt2x00/rt2800.h +++ b/trunk/drivers/net/wireless/rt2x00/rt2800.h @@ -50,7 +50,7 @@ * RF2853 2.4G/5G 3T3R * RF3320 2.4G 1T1R(RT3350/RT3370/RT3390) * RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392) - * RF3053 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662) + * RF3853 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662) * RF5370 2.4G 1T1R * RF5390 2.4G 1T1R */ @@ -66,7 +66,7 @@ #define RF2853 0x000a #define RF3320 0x000b #define RF3322 0x000c -#define RF3053 0x000d +#define RF3853 0x000d #define RF5370 0x5370 #define RF5390 0x5390 diff --git a/trunk/drivers/net/wireless/rt2x00/rt2800lib.c b/trunk/drivers/net/wireless/rt2x00/rt2800lib.c index 22a1a8fc6e02..1ba079dffb11 100644 --- a/trunk/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/trunk/drivers/net/wireless/rt2x00/rt2800lib.c @@ -1203,10 +1203,8 @@ void rt2800_config_filter(struct rt2x00_dev *rt2x00dev, !(filter_flags & FIF_CONTROL)); rt2x00_set_field32(®, RX_FILTER_CFG_DROP_PSPOLL, !(filter_flags & FIF_PSPOLL)); - rt2x00_set_field32(®, RX_FILTER_CFG_DROP_BA, - !(filter_flags & FIF_CONTROL)); - rt2x00_set_field32(®, RX_FILTER_CFG_DROP_BAR, - !(filter_flags & FIF_CONTROL)); + rt2x00_set_field32(®, RX_FILTER_CFG_DROP_BA, 1); + rt2x00_set_field32(®, RX_FILTER_CFG_DROP_BAR, 0); rt2x00_set_field32(®, RX_FILTER_CFG_DROP_CNTL, !(filter_flags & FIF_CONTROL)); rt2800_register_write(rt2x00dev, RX_FILTER_CFG, reg); @@ -1944,24 +1942,19 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, info->default_power2 = TXPOWER_A_TO_DEV(info->default_power2); } - switch (rt2x00dev->chip.rf) { - case RF2020: - case RF3020: - case RF3021: - case RF3022: - case RF3320: + if (rt2x00_rf(rt2x00dev, RF2020) || + rt2x00_rf(rt2x00dev, RF3020) || + rt2x00_rf(rt2x00dev, RF3021) || + rt2x00_rf(rt2x00dev, RF3022) || + rt2x00_rf(rt2x00dev, RF3320)) rt2800_config_channel_rf3xxx(rt2x00dev, conf, rf, info); - break; - case RF3052: + else if (rt2x00_rf(rt2x00dev, RF3052)) rt2800_config_channel_rf3052(rt2x00dev, conf, rf, info); - break; - case RF5370: - case RF5390: + else if (rt2x00_rf(rt2x00dev, RF5370) || + rt2x00_rf(rt2x00dev, RF5390)) rt2800_config_channel_rf53xx(rt2x00dev, conf, rf, info); - break; - default: + else rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info); - } /* * Change BBP settings @@ -3937,18 +3930,15 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev) rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET), value, rt2x00_get_field32(reg, MAC_CSR0_REVISION)); - switch (rt2x00dev->chip.rt) { - case RT2860: - case RT2872: - case RT2883: - case RT3070: - case RT3071: - case RT3090: - case RT3390: - case RT3572: - case RT5390: - break; - default: + if (!rt2x00_rt(rt2x00dev, RT2860) && + !rt2x00_rt(rt2x00dev, RT2872) && + !rt2x00_rt(rt2x00dev, RT2883) && + !rt2x00_rt(rt2x00dev, RT3070) && + !rt2x00_rt(rt2x00dev, RT3071) && + !rt2x00_rt(rt2x00dev, RT3090) && + !rt2x00_rt(rt2x00dev, RT3390) && + !rt2x00_rt(rt2x00dev, RT3572) && + !rt2x00_rt(rt2x00dev, RT5390)) { ERROR(rt2x00dev, "Invalid RT chipset detected.\n"); return -ENODEV; } @@ -4562,9 +4552,6 @@ int rt2800_get_survey(struct ieee80211_hw *hw, int idx, survey->channel_time_ext_busy = busy_ext / 1000; } - if (!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)) - survey->filled |= SURVEY_INFO_IN_USE; - return 0; } diff --git a/trunk/drivers/net/wireless/rt2x00/rt2800pci.c b/trunk/drivers/net/wireless/rt2x00/rt2800pci.c index 4941a1a23219..da48c8ac27bd 100644 --- a/trunk/drivers/net/wireless/rt2x00/rt2800pci.c +++ b/trunk/drivers/net/wireless/rt2x00/rt2800pci.c @@ -50,7 +50,7 @@ /* * Allow hardware encryption to be disabled. */ -static bool modparam_nohwcrypt = false; +static int modparam_nohwcrypt = 0; module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); diff --git a/trunk/drivers/net/wireless/rt2x00/rt2800usb.c b/trunk/drivers/net/wireless/rt2x00/rt2800usb.c index f8eb49f5ac29..377876315b8d 100644 --- a/trunk/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/trunk/drivers/net/wireless/rt2x00/rt2800usb.c @@ -45,7 +45,7 @@ /* * Allow hardware encryption to be disabled. */ -static bool modparam_nohwcrypt; +static int modparam_nohwcrypt; module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); @@ -400,10 +400,10 @@ static void rt2800usb_write_tx_desc(struct queue_entry *entry, /* * The size of TXINFO_W0_USB_DMA_TX_PKT_LEN is * TXWI + 802.11 header + L2 pad + payload + pad, - * so need to decrease size of TXINFO. + * so need to decrease size of TXINFO and USB end pad. */ rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN, - roundup(entry->skb->len, 4) - TXINFO_DESC_SIZE); + entry->skb->len - TXINFO_DESC_SIZE - 4); rt2x00_set_field32(&word, TXINFO_W0_WIV, !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags)); rt2x00_set_field32(&word, TXINFO_W0_QSEL, 2); @@ -421,20 +421,37 @@ static void rt2800usb_write_tx_desc(struct queue_entry *entry, skbdesc->desc_len = TXINFO_DESC_SIZE + TXWI_DESC_SIZE; } -/* - * TX data initialization - */ -static int rt2800usb_get_tx_data_len(struct queue_entry *entry) +static void rt2800usb_write_tx_data(struct queue_entry *entry, + struct txentry_desc *txdesc) { + unsigned int len; + int err; + + rt2800_write_tx_data(entry, txdesc); + /* - * pad(1~3 bytes) is needed after each 802.11 payload. - * USB end pad(4 bytes) is needed at each USB bulk out packet end. + * pad(1~3 bytes) is added after each 802.11 payload. + * USB end pad(4 bytes) is added at each USB bulk out packet end. * TX frame format is : * | TXINFO | TXWI | 802.11 header | L2 pad | payload | pad | USB end pad | * |<------------- tx_pkt_len ------------->| */ + len = roundup(entry->skb->len, 4) + 4; + err = skb_padto(entry->skb, len); + if (unlikely(err)) { + WARNING(entry->queue->rt2x00dev, "TX SKB padding error, out of memory\n"); + return; + } - return roundup(entry->skb->len, 4) + 4; + entry->skb->len = len; +} + +/* + * TX data initialization + */ +static int rt2800usb_get_tx_data_len(struct queue_entry *entry) +{ + return entry->skb->len; } /* @@ -790,7 +807,7 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = { .flush_queue = rt2x00usb_flush_queue, .tx_dma_done = rt2800usb_tx_dma_done, .write_tx_desc = rt2800usb_write_tx_desc, - .write_tx_data = rt2800_write_tx_data, + .write_tx_data = rt2800usb_write_tx_data, .write_beacon = rt2800_write_beacon, .clear_beacon = rt2800_clear_beacon, .get_tx_data_len = rt2800usb_get_tx_data_len, @@ -897,14 +914,12 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x050d, 0x8053) }, { USB_DEVICE(0x050d, 0x805c) }, { USB_DEVICE(0x050d, 0x815c) }, - { USB_DEVICE(0x050d, 0x825a) }, { USB_DEVICE(0x050d, 0x825b) }, { USB_DEVICE(0x050d, 0x935a) }, { USB_DEVICE(0x050d, 0x935b) }, /* Buffalo */ { USB_DEVICE(0x0411, 0x00e8) }, { USB_DEVICE(0x0411, 0x0158) }, - { USB_DEVICE(0x0411, 0x015d) }, { USB_DEVICE(0x0411, 0x016f) }, { USB_DEVICE(0x0411, 0x01a2) }, /* Corega */ @@ -919,8 +934,6 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x07d1, 0x3c0e) }, { USB_DEVICE(0x07d1, 0x3c0f) }, { USB_DEVICE(0x07d1, 0x3c11) }, - { USB_DEVICE(0x07d1, 0x3c13) }, - { USB_DEVICE(0x07d1, 0x3c15) }, { USB_DEVICE(0x07d1, 0x3c16) }, /* Draytek */ { USB_DEVICE(0x07fa, 0x7712) }, @@ -930,7 +943,6 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x7392, 0x7711) }, { USB_DEVICE(0x7392, 0x7717) }, { USB_DEVICE(0x7392, 0x7718) }, - { USB_DEVICE(0x7392, 0x7722) }, /* Encore */ { USB_DEVICE(0x203d, 0x1480) }, { USB_DEVICE(0x203d, 0x14a9) }, @@ -964,8 +976,6 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x13b1, 0x0031) }, { USB_DEVICE(0x1737, 0x0070) }, { USB_DEVICE(0x1737, 0x0071) }, - { USB_DEVICE(0x1737, 0x0077) }, - { USB_DEVICE(0x1737, 0x0078) }, /* Logitec */ { USB_DEVICE(0x0789, 0x0162) }, { USB_DEVICE(0x0789, 0x0163) }, @@ -989,13 +999,9 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x0db0, 0x871b) }, { USB_DEVICE(0x0db0, 0x871c) }, { USB_DEVICE(0x0db0, 0x899a) }, - /* Ovislink */ - { USB_DEVICE(0x1b75, 0x3071) }, - { USB_DEVICE(0x1b75, 0x3072) }, /* Para */ { USB_DEVICE(0x20b8, 0x8888) }, /* Pegatron */ - { USB_DEVICE(0x1d4d, 0x0002) }, { USB_DEVICE(0x1d4d, 0x000c) }, { USB_DEVICE(0x1d4d, 0x000e) }, { USB_DEVICE(0x1d4d, 0x0011) }, @@ -1048,9 +1054,7 @@ static struct usb_device_id rt2800usb_device_table[] = { /* Sparklan */ { USB_DEVICE(0x15a9, 0x0006) }, /* Sweex */ - { USB_DEVICE(0x177f, 0x0153) }, { USB_DEVICE(0x177f, 0x0302) }, - { USB_DEVICE(0x177f, 0x0313) }, /* U-Media */ { USB_DEVICE(0x157e, 0x300e) }, { USB_DEVICE(0x157e, 0x3013) }, @@ -1134,24 +1138,27 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x13d3, 0x3322) }, /* Belkin */ { USB_DEVICE(0x050d, 0x1003) }, + { USB_DEVICE(0x050d, 0x825a) }, /* Buffalo */ { USB_DEVICE(0x0411, 0x012e) }, { USB_DEVICE(0x0411, 0x0148) }, { USB_DEVICE(0x0411, 0x0150) }, + { USB_DEVICE(0x0411, 0x015d) }, /* Corega */ { USB_DEVICE(0x07aa, 0x0041) }, { USB_DEVICE(0x07aa, 0x0042) }, { USB_DEVICE(0x18c5, 0x0008) }, /* D-Link */ { USB_DEVICE(0x07d1, 0x3c0b) }, + { USB_DEVICE(0x07d1, 0x3c13) }, + { USB_DEVICE(0x07d1, 0x3c15) }, { USB_DEVICE(0x07d1, 0x3c17) }, { USB_DEVICE(0x2001, 0x3c17) }, /* Edimax */ { USB_DEVICE(0x7392, 0x4085) }, + { USB_DEVICE(0x7392, 0x7722) }, /* Encore */ { USB_DEVICE(0x203d, 0x14a1) }, - /* Fujitsu Stylistic 550 */ - { USB_DEVICE(0x1690, 0x0761) }, /* Gemtek */ { USB_DEVICE(0x15a9, 0x0010) }, /* Gigabyte */ @@ -1163,13 +1170,20 @@ static struct usb_device_id rt2800usb_device_table[] = { /* LevelOne */ { USB_DEVICE(0x1740, 0x0605) }, { USB_DEVICE(0x1740, 0x0615) }, + /* Linksys */ + { USB_DEVICE(0x1737, 0x0077) }, + { USB_DEVICE(0x1737, 0x0078) }, /* Logitec */ { USB_DEVICE(0x0789, 0x0168) }, { USB_DEVICE(0x0789, 0x0169) }, /* Motorola */ { USB_DEVICE(0x100d, 0x9032) }, + /* Ovislink */ + { USB_DEVICE(0x1b75, 0x3071) }, + { USB_DEVICE(0x1b75, 0x3072) }, /* Pegatron */ { USB_DEVICE(0x05a6, 0x0101) }, + { USB_DEVICE(0x1d4d, 0x0002) }, { USB_DEVICE(0x1d4d, 0x0010) }, /* Planex */ { USB_DEVICE(0x2019, 0x5201) }, @@ -1188,6 +1202,9 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x083a, 0xc522) }, { USB_DEVICE(0x083a, 0xd522) }, { USB_DEVICE(0x083a, 0xf511) }, + /* Sweex */ + { USB_DEVICE(0x177f, 0x0153) }, + { USB_DEVICE(0x177f, 0x0313) }, /* Zyxel */ { USB_DEVICE(0x0586, 0x341a) }, #endif diff --git a/trunk/drivers/net/wireless/rt2x00/rt2x00.h b/trunk/drivers/net/wireless/rt2x00/rt2x00.h index b03b22c47b18..99ff12d0c29d 100644 --- a/trunk/drivers/net/wireless/rt2x00/rt2x00.h +++ b/trunk/drivers/net/wireless/rt2x00/rt2x00.h @@ -189,9 +189,9 @@ struct rt2x00_chip { #define RT3090 0x3090 /* 2.4GHz PCIe */ #define RT3390 0x3390 #define RT3572 0x3572 -#define RT3593 0x3593 +#define RT3593 0x3593 /* PCIe */ #define RT3883 0x3883 /* WSOC */ -#define RT5390 0x5390 /* 2.4GHz */ +#define RT5390 0x5390 /* 2.4GHz */ u16 rf; u16 rev; diff --git a/trunk/drivers/net/wireless/rt2x00/rt2x00dev.c b/trunk/drivers/net/wireless/rt2x00/rt2x00dev.c index c3e1aa7c1a80..edd317fa7c0a 100644 --- a/trunk/drivers/net/wireless/rt2x00/rt2x00dev.c +++ b/trunk/drivers/net/wireless/rt2x00/rt2x00dev.c @@ -831,11 +831,11 @@ static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev, if (spec->supported_rates & SUPPORT_RATE_OFDM) num_rates += 8; - channels = kcalloc(spec->num_channels, sizeof(*channels), GFP_KERNEL); + channels = kzalloc(sizeof(*channels) * spec->num_channels, GFP_KERNEL); if (!channels) return -ENOMEM; - rates = kcalloc(num_rates, sizeof(*rates), GFP_KERNEL); + rates = kzalloc(sizeof(*rates) * num_rates, GFP_KERNEL); if (!rates) goto exit_free_channels; diff --git a/trunk/drivers/net/wireless/rt2x00/rt2x00mac.c b/trunk/drivers/net/wireless/rt2x00/rt2x00mac.c index ede3c58e6783..bf0acff07807 100644 --- a/trunk/drivers/net/wireless/rt2x00/rt2x00mac.c +++ b/trunk/drivers/net/wireless/rt2x00/rt2x00mac.c @@ -160,7 +160,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) exit_fail: rt2x00queue_pause_queue(queue); exit_free_skb: - ieee80211_free_txskb(hw, skb); + dev_kfree_skb_any(skb); } EXPORT_SYMBOL_GPL(rt2x00mac_tx); diff --git a/trunk/drivers/net/wireless/rt2x00/rt2x00usb.c b/trunk/drivers/net/wireless/rt2x00/rt2x00usb.c index 2eea3866504d..1e31050dafc9 100644 --- a/trunk/drivers/net/wireless/rt2x00/rt2x00usb.c +++ b/trunk/drivers/net/wireless/rt2x00/rt2x00usb.c @@ -298,22 +298,12 @@ static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void* data) return false; /* - * USB devices require certain padding at the end of each frame - * and urb. Those paddings are not included in skbs. Pass entry - * to the driver to determine what the overall length should be. + * USB devices cannot blindly pass the skb->len as the + * length of the data to usb_fill_bulk_urb. Pass the skb + * to the driver to determine what the length should be. */ length = rt2x00dev->ops->lib->get_tx_data_len(entry); - status = skb_padto(entry->skb, length); - if (unlikely(status)) { - /* TODO: report something more appropriate than IO_FAILED. */ - WARNING(rt2x00dev, "TX SKB padding error, out of memory\n"); - set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); - rt2x00lib_dmadone(entry); - - return false; - } - usb_fill_bulk_urb(entry_priv->urb, usb_dev, usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint), entry->skb->data, length, diff --git a/trunk/drivers/net/wireless/rt2x00/rt61pci.c b/trunk/drivers/net/wireless/rt2x00/rt61pci.c index e0c6d117429d..bf55b4a311e3 100644 --- a/trunk/drivers/net/wireless/rt2x00/rt61pci.c +++ b/trunk/drivers/net/wireless/rt2x00/rt61pci.c @@ -41,7 +41,7 @@ /* * Allow hardware encryption to be disabled. */ -static bool modparam_nohwcrypt = false; +static int modparam_nohwcrypt = 0; module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); diff --git a/trunk/drivers/net/wireless/rt2x00/rt73usb.c b/trunk/drivers/net/wireless/rt2x00/rt73usb.c index 1c69c737086d..cfb19dbb0a67 100644 --- a/trunk/drivers/net/wireless/rt2x00/rt73usb.c +++ b/trunk/drivers/net/wireless/rt2x00/rt73usb.c @@ -40,7 +40,7 @@ /* * Allow hardware encryption to be disabled. */ -static bool modparam_nohwcrypt; +static int modparam_nohwcrypt; module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); diff --git a/trunk/drivers/net/wireless/rtlwifi/base.c b/trunk/drivers/net/wireless/rtlwifi/base.c index 8d6eb0f56c03..b4ce93436d2e 100644 --- a/trunk/drivers/net/wireless/rtlwifi/base.c +++ b/trunk/drivers/net/wireless/rtlwifi/base.c @@ -345,9 +345,9 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw) if (is_valid_ether_addr(rtlefuse->dev_addr)) { SET_IEEE80211_PERM_ADDR(hw, rtlefuse->dev_addr); } else { - u8 rtlmac1[] = { 0x00, 0xe0, 0x4c, 0x81, 0x92, 0x00 }; - get_random_bytes((rtlmac1 + (ETH_ALEN - 1)), 1); - SET_IEEE80211_PERM_ADDR(hw, rtlmac1); + u8 rtlmac[] = { 0x00, 0xe0, 0x4c, 0x81, 0x92, 0x00 }; + get_random_bytes((rtlmac + (ETH_ALEN - 1)), 1); + SET_IEEE80211_PERM_ADDR(hw, rtlmac); } } @@ -396,7 +396,7 @@ void rtl_init_rfkill(struct ieee80211_hw *hw) u8 valid = 0; /*set init state to on */ - rtlpriv->rfkill.rfkill_state = true; + rtlpriv->rfkill.rfkill_state = 1; wiphy_rfkill_set_hw_state(hw->wiphy, 0); radio_state = rtlpriv->cfg->ops->radio_onoff_checking(hw, &valid); @@ -448,12 +448,12 @@ int rtl_init_core(struct ieee80211_hw *hw) /* <4> locks */ mutex_init(&rtlpriv->locks.conf_mutex); - mutex_init(&rtlpriv->locks.ps_mutex); spin_lock_init(&rtlpriv->locks.ips_lock); spin_lock_init(&rtlpriv->locks.irq_th_lock); spin_lock_init(&rtlpriv->locks.h2c_lock); spin_lock_init(&rtlpriv->locks.rf_ps_lock); spin_lock_init(&rtlpriv->locks.rf_lock); + spin_lock_init(&rtlpriv->locks.lps_lock); spin_lock_init(&rtlpriv->locks.waitq_lock); spin_lock_init(&rtlpriv->locks.cck_and_rw_pagea_lock); diff --git a/trunk/drivers/net/wireless/rtlwifi/base.h b/trunk/drivers/net/wireless/rtlwifi/base.h index f66b5757f6b9..4ae905983d0d 100644 --- a/trunk/drivers/net/wireless/rtlwifi/base.h +++ b/trunk/drivers/net/wireless/rtlwifi/base.h @@ -76,7 +76,7 @@ enum ap_peer { SET_BITS_TO_LE_2BYTE(_hdr, 8, 1, _val) #define SET_80211_PS_POLL_AID(_hdr, _val) \ - (*(u16 *)((u8 *)(_hdr) + 2) = _val) + (*(u16 *)((u8 *)(_hdr) + 2) = le16_to_cpu(_val)) #define SET_80211_PS_POLL_BSSID(_hdr, _val) \ memcpy(((u8 *)(_hdr)) + 4, (u8 *)(_val), ETH_ALEN) #define SET_80211_PS_POLL_TA(_hdr, _val) \ diff --git a/trunk/drivers/net/wireless/rtlwifi/pci.c b/trunk/drivers/net/wireless/rtlwifi/pci.c index 39e0907a3c4e..eb61061821e4 100644 --- a/trunk/drivers/net/wireless/rtlwifi/pci.c +++ b/trunk/drivers/net/wireless/rtlwifi/pci.c @@ -78,7 +78,7 @@ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw) u8 init_aspm; ppsc->reg_rfps_level = 0; - ppsc->support_aspm = false; + ppsc->support_aspm = 0; /*Update PCI ASPM setting */ ppsc->const_amdpci_aspm = rtlpci->const_amdpci_aspm; @@ -570,9 +570,9 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio) if (ieee80211_is_nullfunc(fc)) { if (ieee80211_has_pm(fc)) { rtlpriv->mac80211.offchan_delay = true; - rtlpriv->psc.state_inap = true; + rtlpriv->psc.state_inap = 1; } else { - rtlpriv->psc.state_inap = false; + rtlpriv->psc.state_inap = 0; } } @@ -610,7 +610,7 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio) if (((rtlpriv->link_info.num_rx_inperiod + rtlpriv->link_info.num_tx_inperiod) > 8) || (rtlpriv->link_info.num_rx_inperiod > 2)) { - schedule_work(&rtlpriv->works.lps_leave_work); + tasklet_schedule(&rtlpriv->works.ips_leave_tasklet); } } @@ -736,7 +736,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) if (((rtlpriv->link_info.num_rx_inperiod + rtlpriv->link_info.num_tx_inperiod) > 8) || (rtlpriv->link_info.num_rx_inperiod > 2)) { - schedule_work(&rtlpriv->works.lps_leave_work); + tasklet_schedule(&rtlpriv->works.ips_leave_tasklet); } dev_kfree_skb_any(skb); @@ -780,7 +780,6 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id) unsigned long flags; u32 inta = 0; u32 intb = 0; - irqreturn_t ret = IRQ_HANDLED; spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); @@ -788,10 +787,8 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id) rtlpriv->cfg->ops->interrupt_recognized(hw, &inta, &intb); /*Shared IRQ or HW disappared */ - if (!inta || inta == 0xffff) { - ret = IRQ_NONE; + if (!inta || inta == 0xffff) goto done; - } /*<1> beacon related */ if (inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK]) { @@ -893,9 +890,12 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id) if (rtlpriv->rtlhal.earlymode_enable) tasklet_schedule(&rtlpriv->works.irq_tasklet); + spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); + return IRQ_HANDLED; + done: spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); - return ret; + return IRQ_HANDLED; } static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw) @@ -903,6 +903,11 @@ static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw) _rtl_pci_tx_chk_waitq(hw); } +static void _rtl_pci_ips_leave_tasklet(struct ieee80211_hw *hw) +{ + rtl_lps_leave(hw); +} + static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -940,15 +945,6 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw) return; } -static void rtl_lps_leave_work_callback(struct work_struct *work) -{ - struct rtl_works *rtlworks = - container_of(work, struct rtl_works, lps_leave_work); - struct ieee80211_hw *hw = rtlworks->hw; - - rtl_lps_leave(hw); -} - static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw) { struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); @@ -1010,7 +1006,9 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw, tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet, (void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet, (unsigned long)hw); - INIT_WORK(&rtlpriv->works.lps_leave_work, rtl_lps_leave_work_callback); + tasklet_init(&rtlpriv->works.ips_leave_tasklet, + (void (*)(unsigned long))_rtl_pci_ips_leave_tasklet, + (unsigned long)hw); } static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw, @@ -1480,7 +1478,7 @@ static void rtl_pci_deinit(struct ieee80211_hw *hw) synchronize_irq(rtlpci->pdev->irq); tasklet_kill(&rtlpriv->works.irq_tasklet); - cancel_work_sync(&rtlpriv->works.lps_leave_work); + tasklet_kill(&rtlpriv->works.ips_leave_tasklet); flush_workqueue(rtlpriv->works.rtl_wq); destroy_workqueue(rtlpriv->works.rtl_wq); @@ -1555,7 +1553,7 @@ static void rtl_pci_stop(struct ieee80211_hw *hw) set_hal_stop(rtlhal); rtlpriv->cfg->ops->disable_interrupt(hw); - cancel_work_sync(&rtlpriv->works.lps_leave_work); + tasklet_kill(&rtlpriv->works.ips_leave_tasklet); spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags); while (ppsc->rfchange_inprogress) { diff --git a/trunk/drivers/net/wireless/rtlwifi/ps.c b/trunk/drivers/net/wireless/rtlwifi/ps.c index 130fdd99d573..55c8e50f45fd 100644 --- a/trunk/drivers/net/wireless/rtlwifi/ps.c +++ b/trunk/drivers/net/wireless/rtlwifi/ps.c @@ -237,12 +237,11 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw) struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); enum rf_pwrstate rtstate; - unsigned long flags; if (mac->opmode != NL80211_IFTYPE_STATION) return; - spin_lock_irqsave(&rtlpriv->locks.ips_lock, flags); + spin_lock(&rtlpriv->locks.ips_lock); if (ppsc->inactiveps) { rtstate = ppsc->rfpwr_state; @@ -258,7 +257,7 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw) } } - spin_unlock_irqrestore(&rtlpriv->locks.ips_lock, flags); + spin_unlock(&rtlpriv->locks.ips_lock); } /*for FW LPS*/ @@ -396,7 +395,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw) if (mac->link_state != MAC80211_LINKED) return; - mutex_lock(&rtlpriv->locks.ps_mutex); + spin_lock_irq(&rtlpriv->locks.lps_lock); /* Idle for a while if we connect to AP a while ago. */ if (mac->cnt_after_linked >= 2) { @@ -408,7 +407,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw) } } - mutex_unlock(&rtlpriv->locks.ps_mutex); + spin_unlock_irq(&rtlpriv->locks.lps_lock); } /*Leave the leisure power save mode.*/ @@ -417,8 +416,9 @@ void rtl_lps_leave(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + unsigned long flags; - mutex_lock(&rtlpriv->locks.ps_mutex); + spin_lock_irqsave(&rtlpriv->locks.lps_lock, flags); if (ppsc->fwctrl_lps) { if (ppsc->dot11_psmode != EACTIVE) { @@ -439,7 +439,7 @@ void rtl_lps_leave(struct ieee80211_hw *hw) rtl_lps_set_psmode(hw, EACTIVE); } } - mutex_unlock(&rtlpriv->locks.ps_mutex); + spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flags); } /* For sw LPS*/ @@ -540,9 +540,9 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw) RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM); } - mutex_lock(&rtlpriv->locks.ps_mutex); + spin_lock_irq(&rtlpriv->locks.lps_lock); rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS); - mutex_unlock(&rtlpriv->locks.ps_mutex); + spin_unlock_irq(&rtlpriv->locks.lps_lock); } void rtl_swlps_rfon_wq_callback(void *data) @@ -575,9 +575,9 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw) if (rtlpriv->link_info.busytraffic) return; - mutex_lock(&rtlpriv->locks.ps_mutex); + spin_lock_irq(&rtlpriv->locks.lps_lock); rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS); - mutex_unlock(&rtlpriv->locks.ps_mutex); + spin_unlock_irq(&rtlpriv->locks.lps_lock); if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM && !RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) { diff --git a/trunk/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/trunk/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c index 931d97979b04..950c65a15b8a 100644 --- a/trunk/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c +++ b/trunk/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c @@ -73,34 +73,6 @@ static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable) } } -static void rtl_block_fw_writeN(struct ieee80211_hw *hw, const u8 *buffer, - u32 size) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 blockSize = REALTEK_USB_VENQT_MAX_BUF_SIZE - 20; - u8 *bufferPtr = (u8 *) buffer; - u32 i, offset, blockCount, remainSize; - - blockCount = size / blockSize; - remainSize = size % blockSize; - - for (i = 0; i < blockCount; i++) { - offset = i * blockSize; - rtlpriv->io.writeN_sync(rtlpriv, - (FW_8192C_START_ADDRESS + offset), - (void *)(bufferPtr + offset), - blockSize); - } - - if (remainSize) { - offset = blockCount * blockSize; - rtlpriv->io.writeN_sync(rtlpriv, - (FW_8192C_START_ADDRESS + offset), - (void *)(bufferPtr + offset), - remainSize); - } -} - static void _rtl92c_fw_block_write(struct ieee80211_hw *hw, const u8 *buffer, u32 size) { @@ -109,30 +81,23 @@ static void _rtl92c_fw_block_write(struct ieee80211_hw *hw, u8 *bufferPtr = (u8 *) buffer; u32 *pu4BytePtr = (u32 *) buffer; u32 i, offset, blockCount, remainSize; - u32 data; - if (rtlpriv->io.writeN_sync) { - rtl_block_fw_writeN(hw, buffer, size); - return; - } blockCount = size / blockSize; remainSize = size % blockSize; - if (remainSize) { - /* the last word is < 4 bytes - pad it with zeros */ - for (i = 0; i < 4 - remainSize; i++) - *(bufferPtr + size + i) = 0; - blockCount++; - } for (i = 0; i < blockCount; i++) { offset = i * blockSize; - /* for big-endian platforms, the firmware data need to be byte - * swapped as it was read as a byte string and will be written - * as 32-bit dwords and byte swapped when written - */ - data = le32_to_cpu(*(__le32 *)(pu4BytePtr + i)); rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset), - data); + *(pu4BytePtr + i)); + } + + if (remainSize) { + offset = blockCount * blockSize; + bufferPtr += offset; + for (i = 0; i < remainSize; i++) { + rtl_write_byte(rtlpriv, (FW_8192C_START_ADDRESS + + offset + i), *(bufferPtr + i)); + } } } @@ -262,10 +227,10 @@ int rtl92c_download_fw(struct ieee80211_hw *hw) u32 fwsize; enum version_8192c version = rtlhal->version; + pr_info("Loading firmware file %s\n", rtlpriv->cfg->fw_name); if (!rtlhal->pfirmware) return 1; - pr_info("Loading firmware file %s\n", rtlpriv->cfg->fw_name); pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware; pfwdata = (u8 *) rtlhal->pfirmware; fwsize = rtlhal->fwsize; @@ -273,9 +238,8 @@ int rtl92c_download_fw(struct ieee80211_hw *hw) if (IS_FW_HEADER_EXIST(pfwheader)) { RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, ("Firmware Version(%d), Signature(%#x),Size(%d)\n", - le16_to_cpu(pfwheader->version), - le16_to_cpu(pfwheader->signature), - (uint)sizeof(struct rtl92c_firmware_header))); + pfwheader->version, pfwheader->signature, + (uint)sizeof(struct rtl92c_firmware_header))); pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header); fwsize = fwsize - sizeof(struct rtl92c_firmware_header); diff --git a/trunk/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h b/trunk/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h index cec5a3a1cc53..3d5823c12621 100644 --- a/trunk/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h +++ b/trunk/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h @@ -32,32 +32,32 @@ #define FW_8192C_SIZE 0x3000 #define FW_8192C_START_ADDRESS 0x1000 -#define FW_8192C_END_ADDRESS 0x1FFF +#define FW_8192C_END_ADDRESS 0x3FFF #define FW_8192C_PAGE_SIZE 4096 #define FW_8192C_POLLING_DELAY 5 #define FW_8192C_POLLING_TIMEOUT_COUNT 100 #define IS_FW_HEADER_EXIST(_pfwhdr) \ - ((le16_to_cpu(_pfwhdr->signature)&0xFFF0) == 0x92C0 ||\ - (le16_to_cpu(_pfwhdr->signature)&0xFFF0) == 0x88C0) + ((_pfwhdr->signature&0xFFF0) == 0x92C0 ||\ + (_pfwhdr->signature&0xFFF0) == 0x88C0) struct rtl92c_firmware_header { - __le16 signature; + u16 signature; u8 category; u8 function; - __le16 version; + u16 version; u8 subversion; u8 rsvd1; u8 month; u8 date; u8 hour; u8 minute; - __le16 ramcodeSize; - __le16 rsvd2; - __le32 svnindex; - __le32 rsvd3; - __le32 rsvd4; - __le32 rsvd5; + u16 ramcodeSize; + u16 rsvd2; + u32 svnindex; + u32 rsvd3; + u32 rsvd4; + u32 rsvd5; }; enum rtl8192c_h2c_cmd { @@ -94,6 +94,5 @@ void rtl92c_firmware_selfreset(struct ieee80211_hw *hw); void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode); void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished); void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus); -void usb_writeN_async(struct rtl_priv *rtlpriv, u32 addr, void *data, u16 len); #endif diff --git a/trunk/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/trunk/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c index 89ef6982ce50..f2aa33dc4d78 100644 --- a/trunk/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c +++ b/trunk/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c @@ -98,9 +98,9 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw) rtl8192ce_bt_reg_init(hw); - rtlpriv->dm.dm_initialgain_enable = true; + rtlpriv->dm.dm_initialgain_enable = 1; rtlpriv->dm.dm_flag = 0; - rtlpriv->dm.disable_framebursting = false; + rtlpriv->dm.disable_framebursting = 0; rtlpriv->dm.thermalvalue = 0; rtlpci->transmit_config = CFENDFORM | BIT(12) | BIT(13); diff --git a/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c index 124cf633861c..814c05df51e8 100644 --- a/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c +++ b/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c @@ -498,7 +498,7 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw) } RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD, ("MAP\n"), hwinfo, HWSET_MAX_SIZE); - eeprom_id = le16_to_cpu(*((__le16 *)&hwinfo[0])); + eeprom_id = *((u16 *)&hwinfo[0]); if (eeprom_id != RTL8190_EEPROM_ID) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("EEPROM ID(%#x) is invalid!!\n", eeprom_id)); @@ -516,14 +516,13 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw) pr_info("MAC address: %pM\n", rtlefuse->dev_addr); _rtl92cu_read_txpower_info_from_hwpg(hw, rtlefuse->autoload_failflag, hwinfo); - rtlefuse->eeprom_vid = le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_VID]); - rtlefuse->eeprom_did = le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_DID]); + rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID]; + rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID]; RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, (" VID = 0x%02x PID = 0x%02x\n", rtlefuse->eeprom_vid, rtlefuse->eeprom_did)); rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN]; - rtlefuse->eeprom_version = - le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_VERSION]); + rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION]; rtlefuse->txpwr_fromeprom = true; rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID]; RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, @@ -2436,7 +2435,7 @@ bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid) "%x\n", ppsc->hwradiooff, e_rfpowerstate_toset)); } if (actuallyset) { - ppsc->hwradiooff = true; + ppsc->hwradiooff = 1; if (e_rfpowerstate_toset == ERFON) { if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) && RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM)) diff --git a/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c index 9e0c8fcdf90f..060a06f4a885 100644 --- a/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c +++ b/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c @@ -84,7 +84,6 @@ void rtl92c_read_chip_version(struct ieee80211_hw *hw) } } rtlhal->version = (enum version_8192c)chip_version; - pr_info("rtl8192cu: Chip version 0x%x\n", chip_version); switch (rtlhal->version) { case VERSION_NORMAL_TSMC_CHIP_92C_1T2R: RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, diff --git a/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c index 3527c7957b45..c244f2f1b83f 100644 --- a/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c +++ b/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c @@ -57,9 +57,9 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw) const struct firmware *firmware; int err; - rtlpriv->dm.dm_initialgain_enable = true; + rtlpriv->dm.dm_initialgain_enable = 1; rtlpriv->dm.dm_flag = 0; - rtlpriv->dm.disable_framebursting = false; + rtlpriv->dm.disable_framebursting = 0; rtlpriv->dm.thermalvalue = 0; rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug; rtlpriv->rtlhal.pfirmware = vmalloc(0x4000); @@ -275,8 +275,6 @@ static struct usb_device_id rtl8192c_usb_ids[] = { {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8191, rtl92cu_hal_cfg)}, /****** 8188CU ********/ - /* RTL8188CTV */ - {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x018a, rtl92cu_hal_cfg)}, /* 8188CE-VAU USB minCard */ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8170, rtl92cu_hal_cfg)}, /* 8188cu 1*1 dongle */ @@ -293,14 +291,14 @@ static struct usb_device_id rtl8192c_usb_ids[] = { {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817e, rtl92cu_hal_cfg)}, /* 8188RU in Alfa AWUS036NHR */ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817f, rtl92cu_hal_cfg)}, - /* RTL8188CUS-VL */ - {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x818a, rtl92cu_hal_cfg)}, /* 8188 Combo for BC4 */ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)}, /****** 8192CU ********/ + /* 8191cu 1*2 */ + {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8177, rtl92cu_hal_cfg)}, /* 8192cu 2*2 */ - {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8178, rtl92cu_hal_cfg)}, + {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817b, rtl92cu_hal_cfg)}, /* 8192CE-VAU USB minCard */ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817c, rtl92cu_hal_cfg)}, @@ -311,17 +309,13 @@ static struct usb_device_id rtl8192c_usb_ids[] = { {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/ {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/ {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/ - {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ - {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ + {RTL_USB_DEVICE(0x0Df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/ /* HP - Lite-On ,8188CUS Slim Combo */ {RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)}, {RTL_USB_DEVICE(0x13d3, 0x3357, rtl92cu_hal_cfg)}, /* AzureWave */ {RTL_USB_DEVICE(0x2001, 0x3308, rtl92cu_hal_cfg)}, /*D-Link - Alpha*/ - {RTL_USB_DEVICE(0x2019, 0x4902, rtl92cu_hal_cfg)}, /*Planex - Etop*/ {RTL_USB_DEVICE(0x2019, 0xab2a, rtl92cu_hal_cfg)}, /*Planex - Abocom*/ - /*SW-WF02-AD15 -Abocom*/ - {RTL_USB_DEVICE(0x2019, 0xab2e, rtl92cu_hal_cfg)}, {RTL_USB_DEVICE(0x2019, 0xed17, rtl92cu_hal_cfg)}, /*PCI - Edimax*/ {RTL_USB_DEVICE(0x20f4, 0x648b, rtl92cu_hal_cfg)}, /*TRENDnet - Cameo*/ {RTL_USB_DEVICE(0x7392, 0x7811, rtl92cu_hal_cfg)}, /*Edimax - Edimax*/ @@ -332,36 +326,14 @@ static struct usb_device_id rtl8192c_usb_ids[] = { {RTL_USB_DEVICE(0x4855, 0x0091, rtl92cu_hal_cfg)}, /* NetweeN-Feixun */ {RTL_USB_DEVICE(0x9846, 0x9041, rtl92cu_hal_cfg)}, /* Netgear Cameo */ - /****** 8188 RU ********/ - /* Netcore */ - {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x317f, rtl92cu_hal_cfg)}, - - /****** 8188CUS Slim Solo********/ - {RTL_USB_DEVICE(0x04f2, 0xaff7, rtl92cu_hal_cfg)}, /*Xavi*/ - {RTL_USB_DEVICE(0x04f2, 0xaff9, rtl92cu_hal_cfg)}, /*Xavi*/ - {RTL_USB_DEVICE(0x04f2, 0xaffa, rtl92cu_hal_cfg)}, /*Xavi*/ - - /****** 8188CUS Slim Combo ********/ - {RTL_USB_DEVICE(0x04f2, 0xaff8, rtl92cu_hal_cfg)}, /*Xavi*/ - {RTL_USB_DEVICE(0x04f2, 0xaffb, rtl92cu_hal_cfg)}, /*Xavi*/ - {RTL_USB_DEVICE(0x04f2, 0xaffc, rtl92cu_hal_cfg)}, /*Xavi*/ - {RTL_USB_DEVICE(0x2019, 0x1201, rtl92cu_hal_cfg)}, /*Planex-Vencer*/ - /****** 8192CU ********/ - {RTL_USB_DEVICE(0x050d, 0x2102, rtl92cu_hal_cfg)}, /*Belcom-Sercomm*/ - {RTL_USB_DEVICE(0x050d, 0x2103, rtl92cu_hal_cfg)}, /*Belcom-Edimax*/ {RTL_USB_DEVICE(0x0586, 0x341f, rtl92cu_hal_cfg)}, /*Zyxel -Abocom*/ {RTL_USB_DEVICE(0x07aa, 0x0056, rtl92cu_hal_cfg)}, /*ATKK-Gemtek*/ {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/ - {RTL_USB_DEVICE(0x0846, 0x9021, rtl92cu_hal_cfg)}, /*Netgear-Sercomm*/ - {RTL_USB_DEVICE(0x0b05, 0x17ab, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/ - {RTL_USB_DEVICE(0x0df6, 0x0061, rtl92cu_hal_cfg)}, /*Sitecom-Edimax*/ - {RTL_USB_DEVICE(0x0e66, 0x0019, rtl92cu_hal_cfg)}, /*Hawking-Edimax*/ {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/ {RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/ {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/ {RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/ - {RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/ {RTL_USB_DEVICE(0x7392, 0x7822, rtl92cu_hal_cfg)}, /*Edimax -Edimax*/ {} }; diff --git a/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c index b3cc7b949992..bc33b147f44f 100644 --- a/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c +++ b/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c @@ -491,7 +491,7 @@ static void _rtl_tx_desc_checksum(u8 *txdesc) SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, 0); for (index = 0; index < 16; index++) checksum = checksum ^ (*(ptr + index)); - SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, cpu_to_le16(checksum)); + SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, checksum); } void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw, diff --git a/trunk/drivers/net/wireless/rtlwifi/rtl8192de/sw.c b/trunk/drivers/net/wireless/rtlwifi/rtl8192de/sw.c index 7911c9c87085..149493f4c25c 100644 --- a/trunk/drivers/net/wireless/rtlwifi/rtl8192de/sw.c +++ b/trunk/drivers/net/wireless/rtlwifi/rtl8192de/sw.c @@ -99,9 +99,9 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->dm.dm_initialgain_enable = true; rtlpriv->dm.dm_flag = 0; - rtlpriv->dm.disable_framebursting = false; + rtlpriv->dm.disable_framebursting = 0; rtlpriv->dm.thermalvalue = 0; - rtlpriv->dm.useramask = true; + rtlpriv->dm.useramask = 1; /* dual mac */ if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G) diff --git a/trunk/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/trunk/drivers/net/wireless/rtlwifi/rtl8192se/sw.c index 78723cf59491..92f49d522c56 100644 --- a/trunk/drivers/net/wireless/rtlwifi/rtl8192se/sw.c +++ b/trunk/drivers/net/wireless/rtlwifi/rtl8192se/sw.c @@ -98,9 +98,9 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw) int err = 0; u16 earlyrxthreshold = 7; - rtlpriv->dm.dm_initialgain_enable = true; + rtlpriv->dm.dm_initialgain_enable = 1; rtlpriv->dm.dm_flag = 0; - rtlpriv->dm.disable_framebursting = false; + rtlpriv->dm.disable_framebursting = 0; rtlpriv->dm.thermalvalue = 0; rtlpriv->dm.useramask = true; diff --git a/trunk/drivers/net/wireless/rtlwifi/usb.c b/trunk/drivers/net/wireless/rtlwifi/usb.c index e956fa71d040..54cb8a60514d 100644 --- a/trunk/drivers/net/wireless/rtlwifi/usb.c +++ b/trunk/drivers/net/wireless/rtlwifi/usb.c @@ -34,14 +34,13 @@ #include "usb.h" #include "base.h" #include "ps.h" -#include "rtl8192c/fw_common.h" #define REALTEK_USB_VENQT_READ 0xC0 #define REALTEK_USB_VENQT_WRITE 0x40 #define REALTEK_USB_VENQT_CMD_REQ 0x05 #define REALTEK_USB_VENQT_CMD_IDX 0x00 -#define MAX_USBCTRL_VENDORREQ_TIMES 10 +#define REALTEK_USB_VENQT_MAX_BUF_SIZE 254 static void usbctrl_async_callback(struct urb *urb) { @@ -83,7 +82,6 @@ static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request, dr->wValue = cpu_to_le16(value); dr->wIndex = cpu_to_le16(index); dr->wLength = cpu_to_le16(len); - /* data are already in little-endian order */ memcpy(buf, pdata, len); usb_fill_control_urb(urb, udev, pipe, (unsigned char *)dr, buf, len, @@ -102,28 +100,16 @@ static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request, unsigned int pipe; int status; u8 reqtype; - int vendorreq_times = 0; - static int count; pipe = usb_rcvctrlpipe(udev, 0); /* read_in */ reqtype = REALTEK_USB_VENQT_READ; - do { - status = usb_control_msg(udev, pipe, request, reqtype, value, - index, pdata, len, 0); /*max. timeout*/ - if (status < 0) { - /* firmware download is checksumed, don't retry */ - if ((value >= FW_8192C_START_ADDRESS && - value <= FW_8192C_END_ADDRESS)) - break; - } else { - break; - } - } while (++vendorreq_times < MAX_USBCTRL_VENDORREQ_TIMES); + status = usb_control_msg(udev, pipe, request, reqtype, value, index, + pdata, len, 0); /* max. timeout */ - if (status < 0 && count++ < 4) + if (status < 0) pr_err("reg 0x%x, usbctrl_vendorreq TimeOut! status:0x%x value=0x%x\n", - value, status, le32_to_cpu(*(u32 *)pdata)); + value, status, *(u32 *)pdata); return status; } @@ -143,7 +129,7 @@ static u32 _usb_read_sync(struct usb_device *udev, u32 addr, u16 len) wvalue = (u16)addr; _usbctrl_vendorreq_sync_read(udev, request, wvalue, index, data, len); - ret = le32_to_cpu(*data); + ret = *data; kfree(data); return ret; } @@ -175,12 +161,12 @@ static void _usb_write_async(struct usb_device *udev, u32 addr, u32 val, u8 request; u16 wvalue; u16 index; - __le32 data; + u32 data; request = REALTEK_USB_VENQT_CMD_REQ; index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */ wvalue = (u16)(addr&0x0000ffff); - data = cpu_to_le32(val); + data = val; _usbctrl_vendorreq_async_write(udev, request, wvalue, index, &data, len); } @@ -206,30 +192,6 @@ static void _usb_write32_async(struct rtl_priv *rtlpriv, u32 addr, u32 val) _usb_write_async(to_usb_device(dev), addr, val, 4); } -static void _usb_writeN_sync(struct rtl_priv *rtlpriv, u32 addr, void *data, - u16 len) -{ - struct device *dev = rtlpriv->io.dev; - struct usb_device *udev = to_usb_device(dev); - u8 request = REALTEK_USB_VENQT_CMD_REQ; - u8 reqtype = REALTEK_USB_VENQT_WRITE; - u16 wvalue; - u16 index = REALTEK_USB_VENQT_CMD_IDX; - int pipe = usb_sndctrlpipe(udev, 0); /* write_out */ - u8 *buffer; - dma_addr_t dma_addr; - - wvalue = (u16)(addr&0x0000ffff); - buffer = usb_alloc_coherent(udev, (size_t)len, GFP_ATOMIC, &dma_addr); - if (!buffer) - return; - memcpy(buffer, data, len); - usb_control_msg(udev, pipe, request, reqtype, wvalue, - index, buffer, len, 50); - - usb_free_coherent(udev, (size_t)len, buffer, dma_addr); -} - static void _rtl_usb_io_handler_init(struct device *dev, struct ieee80211_hw *hw) { @@ -243,7 +205,6 @@ static void _rtl_usb_io_handler_init(struct device *dev, rtlpriv->io.read8_sync = _usb_read8_sync; rtlpriv->io.read16_sync = _usb_read16_sync; rtlpriv->io.read32_sync = _usb_read32_sync; - rtlpriv->io.writeN_sync = _usb_writeN_sync; } static void _rtl_usb_io_handler_release(struct ieee80211_hw *hw) diff --git a/trunk/drivers/net/wireless/rtlwifi/wifi.h b/trunk/drivers/net/wireless/rtlwifi/wifi.h index cdaf1429fa0b..713c7ddba8eb 100644 --- a/trunk/drivers/net/wireless/rtlwifi/wifi.h +++ b/trunk/drivers/net/wireless/rtlwifi/wifi.h @@ -63,7 +63,6 @@ #define AC_MAX 4 #define QOS_QUEUE_NUM 4 #define RTL_MAC80211_NUM_QUEUE 5 -#define REALTEK_USB_VENQT_MAX_BUF_SIZE 254 #define QBSS_LOAD_SIZE 5 #define MAX_WMMELE_LENGTH 64 @@ -944,10 +943,8 @@ struct rtl_io { unsigned long pci_base_addr; /*device I/O address */ void (*write8_async) (struct rtl_priv *rtlpriv, u32 addr, u8 val); - void (*write16_async) (struct rtl_priv *rtlpriv, u32 addr, u16 val); - void (*write32_async) (struct rtl_priv *rtlpriv, u32 addr, u32 val); - void (*writeN_sync) (struct rtl_priv *rtlpriv, u32 addr, void *buf, - u16 len); + void (*write16_async) (struct rtl_priv *rtlpriv, u32 addr, __le16 val); + void (*write32_async) (struct rtl_priv *rtlpriv, u32 addr, __le32 val); u8(*read8_sync) (struct rtl_priv *rtlpriv, u32 addr); u16(*read16_sync) (struct rtl_priv *rtlpriv, u32 addr); @@ -1488,7 +1485,7 @@ struct rtl_intf_ops { struct rtl_mod_params { /* default: 0 = using hardware encryption */ - bool sw_crypto; + int sw_crypto; /* default: 0 = DBG_EMERG (0)*/ int debug; @@ -1544,7 +1541,6 @@ struct rtl_hal_cfg { struct rtl_locks { /* mutex */ struct mutex conf_mutex; - struct mutex ps_mutex; /*spin lock */ spinlock_t ips_lock; @@ -1552,6 +1548,7 @@ struct rtl_locks { spinlock_t h2c_lock; spinlock_t rf_ps_lock; spinlock_t rf_lock; + spinlock_t lps_lock; spinlock_t waitq_lock; /*Dual mac*/ @@ -1576,8 +1573,7 @@ struct rtl_works { /* For SW LPS */ struct delayed_work ps_work; struct delayed_work ps_rfon_wq; - - struct work_struct lps_leave_work; + struct tasklet_struct ips_leave_tasklet; }; struct rtl_debug { diff --git a/trunk/drivers/net/wireless/wl1251/spi.c b/trunk/drivers/net/wireless/wl1251/spi.c index 6248c354fc5c..eaa5f9556200 100644 --- a/trunk/drivers/net/wireless/wl1251/spi.c +++ b/trunk/drivers/net/wireless/wl1251/spi.c @@ -319,6 +319,7 @@ static int __devexit wl1251_spi_remove(struct spi_device *spi) static struct spi_driver wl1251_spi_driver = { .driver = { .name = DRIVER_NAME, + .bus = &spi_bus_type, .owner = THIS_MODULE, }, diff --git a/trunk/drivers/net/wireless/wl12xx/Kconfig b/trunk/drivers/net/wireless/wl12xx/Kconfig index af08c8609c63..3fe388b87c2e 100644 --- a/trunk/drivers/net/wireless/wl12xx/Kconfig +++ b/trunk/drivers/net/wireless/wl12xx/Kconfig @@ -42,6 +42,16 @@ config WL12XX_SDIO If you choose to build a module, it'll be called wl12xx_sdio. Say N if unsure. +config WL12XX_SDIO_TEST + tristate "TI wl12xx SDIO testing support" + depends on WL12XX && MMC && WL12XX_SDIO + default n + ---help--- + This module adds support for the SDIO bus testing with the + TI wl12xx chipsets. You probably don't want this unless you are + testing a new hardware platform. Select this if you want to test the + SDIO bus which is connected to the wl12xx chip. + config WL12XX_PLATFORM_DATA bool depends on WL12XX_SDIO != n || WL1251_SDIO != n diff --git a/trunk/drivers/net/wireless/wl12xx/Makefile b/trunk/drivers/net/wireless/wl12xx/Makefile index fe67262ba19f..621b3483ca2c 100644 --- a/trunk/drivers/net/wireless/wl12xx/Makefile +++ b/trunk/drivers/net/wireless/wl12xx/Makefile @@ -3,11 +3,14 @@ wl12xx-objs = main.o cmd.o io.o event.o tx.o rx.o ps.o acx.o \ wl12xx_spi-objs = spi.o wl12xx_sdio-objs = sdio.o +wl12xx_sdio_test-objs = sdio_test.o wl12xx-$(CONFIG_NL80211_TESTMODE) += testmode.o obj-$(CONFIG_WL12XX) += wl12xx.o obj-$(CONFIG_WL12XX_SPI) += wl12xx_spi.o obj-$(CONFIG_WL12XX_SDIO) += wl12xx_sdio.o +obj-$(CONFIG_WL12XX_SDIO_TEST) += wl12xx_sdio_test.o + # small builtin driver bit obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx_platform_data.o diff --git a/trunk/drivers/net/wireless/wl12xx/acx.c b/trunk/drivers/net/wireless/wl12xx/acx.c index 7537c401a448..ca044a743191 100644 --- a/trunk/drivers/net/wireless/wl12xx/acx.c +++ b/trunk/drivers/net/wireless/wl12xx/acx.c @@ -29,12 +29,11 @@ #include #include "wl12xx.h" -#include "debug.h" #include "wl12xx_80211.h" #include "reg.h" #include "ps.h" -int wl1271_acx_wake_up_conditions(struct wl1271 *wl, struct wl12xx_vif *wlvif) +int wl1271_acx_wake_up_conditions(struct wl1271 *wl) { struct acx_wake_up_condition *wake_up; int ret; @@ -47,7 +46,7 @@ int wl1271_acx_wake_up_conditions(struct wl1271 *wl, struct wl12xx_vif *wlvif) goto out; } - wake_up->role_id = wlvif->role_id; + wake_up->role_id = wl->role_id; wake_up->wake_up_event = wl->conf.conn.wake_up_event; wake_up->listen_interval = wl->conf.conn.listen_interval; @@ -85,8 +84,7 @@ int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth) return ret; } -int wl1271_acx_tx_power(struct wl1271 *wl, struct wl12xx_vif *wlvif, - int power) +int wl1271_acx_tx_power(struct wl1271 *wl, int power) { struct acx_current_tx_power *acx; int ret; @@ -102,7 +100,7 @@ int wl1271_acx_tx_power(struct wl1271 *wl, struct wl12xx_vif *wlvif, goto out; } - acx->role_id = wlvif->role_id; + acx->role_id = wl->role_id; acx->current_tx_power = power * 10; ret = wl1271_cmd_configure(wl, DOT11_CUR_TX_PWR, acx, sizeof(*acx)); @@ -116,7 +114,7 @@ int wl1271_acx_tx_power(struct wl1271 *wl, struct wl12xx_vif *wlvif, return ret; } -int wl1271_acx_feature_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif) +int wl1271_acx_feature_cfg(struct wl1271 *wl) { struct acx_feature_config *feature; int ret; @@ -130,7 +128,7 @@ int wl1271_acx_feature_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif) } /* DF_ENCRYPTION_DISABLE and DF_SNIFF_MODE_ENABLE are disabled */ - feature->role_id = wlvif->role_id; + feature->role_id = wl->role_id; feature->data_flow_options = 0; feature->options = 0; @@ -186,8 +184,33 @@ int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl) return ret; } -int wl1271_acx_slot(struct wl1271 *wl, struct wl12xx_vif *wlvif, - enum acx_slot_type slot_time) +int wl1271_acx_pd_threshold(struct wl1271 *wl) +{ + struct acx_packet_detection *pd; + int ret; + + wl1271_debug(DEBUG_ACX, "acx data pd threshold"); + + pd = kzalloc(sizeof(*pd), GFP_KERNEL); + if (!pd) { + ret = -ENOMEM; + goto out; + } + + pd->threshold = cpu_to_le32(wl->conf.rx.packet_detection_threshold); + + ret = wl1271_cmd_configure(wl, ACX_PD_THRESHOLD, pd, sizeof(*pd)); + if (ret < 0) { + wl1271_warning("failed to set pd threshold: %d", ret); + goto out; + } + +out: + kfree(pd); + return ret; +} + +int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time) { struct acx_slot *slot; int ret; @@ -200,7 +223,7 @@ int wl1271_acx_slot(struct wl1271 *wl, struct wl12xx_vif *wlvif, goto out; } - slot->role_id = wlvif->role_id; + slot->role_id = wl->role_id; slot->wone_index = STATION_WONE_INDEX; slot->slot_time = slot_time; @@ -215,8 +238,8 @@ int wl1271_acx_slot(struct wl1271 *wl, struct wl12xx_vif *wlvif, return ret; } -int wl1271_acx_group_address_tbl(struct wl1271 *wl, struct wl12xx_vif *wlvif, - bool enable, void *mc_list, u32 mc_list_len) +int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable, + void *mc_list, u32 mc_list_len) { struct acx_dot11_grp_addr_tbl *acx; int ret; @@ -230,7 +253,7 @@ int wl1271_acx_group_address_tbl(struct wl1271 *wl, struct wl12xx_vif *wlvif, } /* MAC filtering */ - acx->role_id = wlvif->role_id; + acx->role_id = wl->role_id; acx->enabled = enable; acx->num_groups = mc_list_len; memcpy(acx->mac_table, mc_list, mc_list_len * ETH_ALEN); @@ -247,8 +270,7 @@ int wl1271_acx_group_address_tbl(struct wl1271 *wl, struct wl12xx_vif *wlvif, return ret; } -int wl1271_acx_service_period_timeout(struct wl1271 *wl, - struct wl12xx_vif *wlvif) +int wl1271_acx_service_period_timeout(struct wl1271 *wl) { struct acx_rx_timeout *rx_timeout; int ret; @@ -261,7 +283,7 @@ int wl1271_acx_service_period_timeout(struct wl1271 *wl, wl1271_debug(DEBUG_ACX, "acx service period timeout"); - rx_timeout->role_id = wlvif->role_id; + rx_timeout->role_id = wl->role_id; rx_timeout->ps_poll_timeout = cpu_to_le16(wl->conf.rx.ps_poll_timeout); rx_timeout->upsd_timeout = cpu_to_le16(wl->conf.rx.upsd_timeout); @@ -278,8 +300,7 @@ int wl1271_acx_service_period_timeout(struct wl1271 *wl, return ret; } -int wl1271_acx_rts_threshold(struct wl1271 *wl, struct wl12xx_vif *wlvif, - u32 rts_threshold) +int wl1271_acx_rts_threshold(struct wl1271 *wl, u32 rts_threshold) { struct acx_rts_threshold *rts; int ret; @@ -299,7 +320,7 @@ int wl1271_acx_rts_threshold(struct wl1271 *wl, struct wl12xx_vif *wlvif, goto out; } - rts->role_id = wlvif->role_id; + rts->role_id = wl->role_id; rts->threshold = cpu_to_le16((u16)rts_threshold); ret = wl1271_cmd_configure(wl, DOT11_RTS_THRESHOLD, rts, sizeof(*rts)); @@ -342,8 +363,7 @@ int wl1271_acx_dco_itrim_params(struct wl1271 *wl) return ret; } -int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, struct wl12xx_vif *wlvif, - bool enable_filter) +int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter) { struct acx_beacon_filter_option *beacon_filter = NULL; int ret = 0; @@ -360,7 +380,7 @@ int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, struct wl12xx_vif *wlvif, goto out; } - beacon_filter->role_id = wlvif->role_id; + beacon_filter->role_id = wl->role_id; beacon_filter->enable = enable_filter; /* @@ -381,8 +401,7 @@ int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, struct wl12xx_vif *wlvif, return ret; } -int wl1271_acx_beacon_filter_table(struct wl1271 *wl, - struct wl12xx_vif *wlvif) +int wl1271_acx_beacon_filter_table(struct wl1271 *wl) { struct acx_beacon_filter_ie_table *ie_table; int i, idx = 0; @@ -398,7 +417,7 @@ int wl1271_acx_beacon_filter_table(struct wl1271 *wl, } /* configure default beacon pass-through rules */ - ie_table->role_id = wlvif->role_id; + ie_table->role_id = wl->role_id; ie_table->num_ie = 0; for (i = 0; i < wl->conf.conn.bcn_filt_ie_count; i++) { struct conf_bcn_filt_rule *r = &(wl->conf.conn.bcn_filt_ie[i]); @@ -439,8 +458,7 @@ int wl1271_acx_beacon_filter_table(struct wl1271 *wl, #define ACX_CONN_MONIT_DISABLE_VALUE 0xffffffff -int wl1271_acx_conn_monit_params(struct wl1271 *wl, struct wl12xx_vif *wlvif, - bool enable) +int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable) { struct acx_conn_monit_params *acx; u32 threshold = ACX_CONN_MONIT_DISABLE_VALUE; @@ -461,7 +479,7 @@ int wl1271_acx_conn_monit_params(struct wl1271 *wl, struct wl12xx_vif *wlvif, timeout = wl->conf.conn.bss_lose_timeout; } - acx->role_id = wlvif->role_id; + acx->role_id = wl->role_id; acx->synch_fail_thold = cpu_to_le32(threshold); acx->bss_lose_timeout = cpu_to_le32(timeout); @@ -564,7 +582,7 @@ int wl1271_acx_cca_threshold(struct wl1271 *wl) return ret; } -int wl1271_acx_bcn_dtim_options(struct wl1271 *wl, struct wl12xx_vif *wlvif) +int wl1271_acx_bcn_dtim_options(struct wl1271 *wl) { struct acx_beacon_broadcast *bb; int ret; @@ -577,7 +595,7 @@ int wl1271_acx_bcn_dtim_options(struct wl1271 *wl, struct wl12xx_vif *wlvif) goto out; } - bb->role_id = wlvif->role_id; + bb->role_id = wl->role_id; bb->beacon_rx_timeout = cpu_to_le16(wl->conf.conn.beacon_rx_timeout); bb->broadcast_timeout = cpu_to_le16(wl->conf.conn.broadcast_timeout); bb->rx_broadcast_in_ps = wl->conf.conn.rx_broadcast_in_ps; @@ -594,7 +612,7 @@ int wl1271_acx_bcn_dtim_options(struct wl1271 *wl, struct wl12xx_vif *wlvif) return ret; } -int wl1271_acx_aid(struct wl1271 *wl, struct wl12xx_vif *wlvif, u16 aid) +int wl1271_acx_aid(struct wl1271 *wl, u16 aid) { struct acx_aid *acx_aid; int ret; @@ -607,7 +625,7 @@ int wl1271_acx_aid(struct wl1271 *wl, struct wl12xx_vif *wlvif, u16 aid) goto out; } - acx_aid->role_id = wlvif->role_id; + acx_aid->role_id = wl->role_id; acx_aid->aid = cpu_to_le16(aid); ret = wl1271_cmd_configure(wl, ACX_AID, acx_aid, sizeof(*acx_aid)); @@ -650,8 +668,7 @@ int wl1271_acx_event_mbox_mask(struct wl1271 *wl, u32 event_mask) return ret; } -int wl1271_acx_set_preamble(struct wl1271 *wl, struct wl12xx_vif *wlvif, - enum acx_preamble_type preamble) +int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble) { struct acx_preamble *acx; int ret; @@ -664,7 +681,7 @@ int wl1271_acx_set_preamble(struct wl1271 *wl, struct wl12xx_vif *wlvif, goto out; } - acx->role_id = wlvif->role_id; + acx->role_id = wl->role_id; acx->preamble = preamble; ret = wl1271_cmd_configure(wl, ACX_PREAMBLE_TYPE, acx, sizeof(*acx)); @@ -678,7 +695,7 @@ int wl1271_acx_set_preamble(struct wl1271 *wl, struct wl12xx_vif *wlvif, return ret; } -int wl1271_acx_cts_protect(struct wl1271 *wl, struct wl12xx_vif *wlvif, +int wl1271_acx_cts_protect(struct wl1271 *wl, enum acx_ctsprotect_type ctsprotect) { struct acx_ctsprotect *acx; @@ -692,7 +709,7 @@ int wl1271_acx_cts_protect(struct wl1271 *wl, struct wl12xx_vif *wlvif, goto out; } - acx->role_id = wlvif->role_id; + acx->role_id = wl->role_id; acx->ctsprotect = ctsprotect; ret = wl1271_cmd_configure(wl, ACX_CTS_PROTECTION, acx, sizeof(*acx)); @@ -722,7 +739,7 @@ int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats) return 0; } -int wl1271_acx_sta_rate_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif) +int wl1271_acx_sta_rate_policies(struct wl1271 *wl) { struct acx_rate_policy *acx; struct conf_tx_rate_class *c = &wl->conf.tx.sta_rc_conf; @@ -738,11 +755,11 @@ int wl1271_acx_sta_rate_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif) } wl1271_debug(DEBUG_ACX, "basic_rate: 0x%x, full_rate: 0x%x", - wlvif->basic_rate, wlvif->rate_set); + wl->basic_rate, wl->rate_set); /* configure one basic rate class */ - acx->rate_policy_idx = cpu_to_le32(wlvif->sta.basic_rate_idx); - acx->rate_policy.enabled_rates = cpu_to_le32(wlvif->basic_rate); + acx->rate_policy_idx = cpu_to_le32(ACX_TX_BASIC_RATE); + acx->rate_policy.enabled_rates = cpu_to_le32(wl->basic_rate); acx->rate_policy.short_retry_limit = c->short_retry_limit; acx->rate_policy.long_retry_limit = c->long_retry_limit; acx->rate_policy.aflags = c->aflags; @@ -754,8 +771,8 @@ int wl1271_acx_sta_rate_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif) } /* configure one AP supported rate class */ - acx->rate_policy_idx = cpu_to_le32(wlvif->sta.ap_rate_idx); - acx->rate_policy.enabled_rates = cpu_to_le32(wlvif->rate_set); + acx->rate_policy_idx = cpu_to_le32(ACX_TX_AP_FULL_RATE); + acx->rate_policy.enabled_rates = cpu_to_le32(wl->rate_set); acx->rate_policy.short_retry_limit = c->short_retry_limit; acx->rate_policy.long_retry_limit = c->long_retry_limit; acx->rate_policy.aflags = c->aflags; @@ -771,7 +788,7 @@ int wl1271_acx_sta_rate_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif) * (p2p packets should always go out with OFDM rates, even * if we are currently connected to 11b AP) */ - acx->rate_policy_idx = cpu_to_le32(wlvif->sta.p2p_rate_idx); + acx->rate_policy_idx = cpu_to_le32(ACX_TX_BASIC_RATE_P2P); acx->rate_policy.enabled_rates = cpu_to_le32(CONF_TX_RATE_MASK_BASIC_P2P); acx->rate_policy.short_retry_limit = c->short_retry_limit; @@ -822,8 +839,8 @@ int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c, return ret; } -int wl1271_acx_ac_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif, - u8 ac, u8 cw_min, u16 cw_max, u8 aifsn, u16 txop) +int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max, + u8 aifsn, u16 txop) { struct acx_ac_cfg *acx; int ret = 0; @@ -838,7 +855,7 @@ int wl1271_acx_ac_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif, goto out; } - acx->role_id = wlvif->role_id; + acx->role_id = wl->role_id; acx->ac = ac; acx->cw_min = cw_min; acx->cw_max = cpu_to_le16(cw_max); @@ -856,8 +873,7 @@ int wl1271_acx_ac_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif, return ret; } -int wl1271_acx_tid_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif, - u8 queue_id, u8 channel_type, +int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type, u8 tsid, u8 ps_scheme, u8 ack_policy, u32 apsd_conf0, u32 apsd_conf1) { @@ -873,7 +889,7 @@ int wl1271_acx_tid_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif, goto out; } - acx->role_id = wlvif->role_id; + acx->role_id = wl->role_id; acx->queue_id = queue_id; acx->channel_type = channel_type; acx->tsid = tsid; @@ -1082,8 +1098,7 @@ int wl1271_acx_init_rx_interrupt(struct wl1271 *wl) return ret; } -int wl1271_acx_bet_enable(struct wl1271 *wl, struct wl12xx_vif *wlvif, - bool enable) +int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable) { struct wl1271_acx_bet_enable *acx = NULL; int ret = 0; @@ -1099,7 +1114,7 @@ int wl1271_acx_bet_enable(struct wl1271 *wl, struct wl12xx_vif *wlvif, goto out; } - acx->role_id = wlvif->role_id; + acx->role_id = wl->role_id; acx->enable = enable ? CONF_BET_MODE_ENABLE : CONF_BET_MODE_DISABLE; acx->max_consecutive = wl->conf.conn.bet_max_consecutive; @@ -1114,8 +1129,7 @@ int wl1271_acx_bet_enable(struct wl1271 *wl, struct wl12xx_vif *wlvif, return ret; } -int wl1271_acx_arp_ip_filter(struct wl1271 *wl, struct wl12xx_vif *wlvif, - u8 enable, __be32 address) +int wl1271_acx_arp_ip_filter(struct wl1271 *wl, u8 enable, __be32 address) { struct wl1271_acx_arp_filter *acx; int ret; @@ -1128,7 +1142,7 @@ int wl1271_acx_arp_ip_filter(struct wl1271 *wl, struct wl12xx_vif *wlvif, goto out; } - acx->role_id = wlvif->role_id; + acx->role_id = wl->role_id; acx->version = ACX_IPV4_VERSION; acx->enable = enable; @@ -1175,8 +1189,7 @@ int wl1271_acx_pm_config(struct wl1271 *wl) return ret; } -int wl1271_acx_keep_alive_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif, - bool enable) +int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable) { struct wl1271_acx_keep_alive_mode *acx = NULL; int ret = 0; @@ -1189,7 +1202,7 @@ int wl1271_acx_keep_alive_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif, goto out; } - acx->role_id = wlvif->role_id; + acx->role_id = wl->role_id; acx->enabled = enable; ret = wl1271_cmd_configure(wl, ACX_KEEP_ALIVE_MODE, acx, sizeof(*acx)); @@ -1203,8 +1216,7 @@ int wl1271_acx_keep_alive_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif, return ret; } -int wl1271_acx_keep_alive_config(struct wl1271 *wl, struct wl12xx_vif *wlvif, - u8 index, u8 tpl_valid) +int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid) { struct wl1271_acx_keep_alive_config *acx = NULL; int ret = 0; @@ -1217,7 +1229,7 @@ int wl1271_acx_keep_alive_config(struct wl1271 *wl, struct wl12xx_vif *wlvif, goto out; } - acx->role_id = wlvif->role_id; + acx->role_id = wl->role_id; acx->period = cpu_to_le32(wl->conf.conn.keep_alive_interval); acx->index = index; acx->tpl_validation = tpl_valid; @@ -1235,8 +1247,8 @@ int wl1271_acx_keep_alive_config(struct wl1271 *wl, struct wl12xx_vif *wlvif, return ret; } -int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, struct wl12xx_vif *wlvif, - bool enable, s16 thold, u8 hyst) +int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable, + s16 thold, u8 hyst) { struct wl1271_acx_rssi_snr_trigger *acx = NULL; int ret = 0; @@ -1249,9 +1261,9 @@ int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, struct wl12xx_vif *wlvif, goto out; } - wlvif->last_rssi_event = -1; + wl->last_rssi_event = -1; - acx->role_id = wlvif->role_id; + acx->role_id = wl->role_id; acx->pacing = cpu_to_le16(wl->conf.roam_trigger.trigger_pacing); acx->metric = WL1271_ACX_TRIG_METRIC_RSSI_BEACON; acx->type = WL1271_ACX_TRIG_TYPE_EDGE; @@ -1276,8 +1288,7 @@ int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, struct wl12xx_vif *wlvif, return ret; } -int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl, - struct wl12xx_vif *wlvif) +int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl) { struct wl1271_acx_rssi_snr_avg_weights *acx = NULL; struct conf_roam_trigger_settings *c = &wl->conf.roam_trigger; @@ -1291,7 +1302,7 @@ int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl, goto out; } - acx->role_id = wlvif->role_id; + acx->role_id = wl->role_id; acx->rssi_beacon = c->avg_weight_rssi_beacon; acx->rssi_data = c->avg_weight_rssi_data; acx->snr_beacon = c->avg_weight_snr_beacon; @@ -1356,7 +1367,6 @@ int wl1271_acx_set_ht_capabilities(struct wl1271 *wl, } int wl1271_acx_set_ht_information(struct wl1271 *wl, - struct wl12xx_vif *wlvif, u16 ht_operation_mode) { struct wl1271_acx_ht_information *acx; @@ -1370,7 +1380,7 @@ int wl1271_acx_set_ht_information(struct wl1271 *wl, goto out; } - acx->role_id = wlvif->role_id; + acx->role_id = wl->role_id; acx->ht_protection = (u8)(ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION); acx->rifs_mode = 0; @@ -1392,8 +1402,7 @@ int wl1271_acx_set_ht_information(struct wl1271 *wl, } /* Configure BA session initiator/receiver parameters setting in the FW. */ -int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl, - struct wl12xx_vif *wlvif) +int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl) { struct wl1271_acx_ba_initiator_policy *acx; int ret; @@ -1407,7 +1416,7 @@ int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl, } /* set for the current role */ - acx->role_id = wlvif->role_id; + acx->role_id = wl->role_id; acx->tid_bitmap = wl->conf.ht.tx_ba_tid_bitmap; acx->win_size = wl->conf.ht.tx_ba_win_size; acx->inactivity_timeout = wl->conf.ht.inactivity_timeout; @@ -1485,8 +1494,7 @@ int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime) return ret; } -int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif, - bool enable) +int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, bool enable) { struct wl1271_acx_ps_rx_streaming *rx_streaming; u32 conf_queues, enable_queues; @@ -1515,7 +1523,7 @@ int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif, if (!(conf_queues & BIT(i))) continue; - rx_streaming->role_id = wlvif->role_id; + rx_streaming->role_id = wl->role_id; rx_streaming->tid = i; rx_streaming->enable = enable_queues & BIT(i); rx_streaming->period = wl->conf.rx_streaming.interval; @@ -1534,7 +1542,7 @@ int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif, return ret; } -int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl, struct wl12xx_vif *wlvif) +int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl) { struct wl1271_acx_ap_max_tx_retry *acx = NULL; int ret; @@ -1545,7 +1553,7 @@ int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl, struct wl12xx_vif *wlvif) if (!acx) return -ENOMEM; - acx->role_id = wlvif->role_id; + acx->role_id = wl->role_id; acx->max_tx_retry = cpu_to_le16(wl->conf.tx.max_tx_retries); ret = wl1271_cmd_configure(wl, ACX_MAX_TX_FAILURE, acx, sizeof(*acx)); @@ -1559,7 +1567,7 @@ int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl, struct wl12xx_vif *wlvif) return ret; } -int wl12xx_acx_config_ps(struct wl1271 *wl, struct wl12xx_vif *wlvif) +int wl1271_acx_config_ps(struct wl1271 *wl) { struct wl1271_acx_config_ps *config_ps; int ret; @@ -1574,7 +1582,7 @@ int wl12xx_acx_config_ps(struct wl1271 *wl, struct wl12xx_vif *wlvif) config_ps->exit_retries = wl->conf.conn.psm_exit_retries; config_ps->enter_retries = wl->conf.conn.psm_entry_retries; - config_ps->null_data_rate = cpu_to_le32(wlvif->basic_rate); + config_ps->null_data_rate = cpu_to_le32(wl->basic_rate); ret = wl1271_cmd_configure(wl, ACX_CONFIG_PS, config_ps, sizeof(*config_ps)); diff --git a/trunk/drivers/net/wireless/wl12xx/acx.h b/trunk/drivers/net/wireless/wl12xx/acx.h index 69892b40c2df..e3f93b4b3429 100644 --- a/trunk/drivers/net/wireless/wl12xx/acx.h +++ b/trunk/drivers/net/wireless/wl12xx/acx.h @@ -171,6 +171,13 @@ struct acx_rx_msdu_lifetime { __le32 lifetime; } __packed; +struct acx_packet_detection { + struct acx_header header; + + __le32 threshold; +} __packed; + + enum acx_slot_type { SLOT_TIME_LONG = 0, SLOT_TIME_SHORT = 1, @@ -647,6 +654,11 @@ struct acx_rate_class { u8 reserved; }; +#define ACX_TX_BASIC_RATE 0 +#define ACX_TX_AP_FULL_RATE 1 +#define ACX_TX_BASIC_RATE_P2P 2 +#define ACX_TX_AP_MODE_MGMT_RATE 4 +#define ACX_TX_AP_MODE_BCST_RATE 5 struct acx_rate_policy { struct acx_header header; @@ -1222,48 +1234,39 @@ enum { }; -int wl1271_acx_wake_up_conditions(struct wl1271 *wl, - struct wl12xx_vif *wlvif); +int wl1271_acx_wake_up_conditions(struct wl1271 *wl); int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth); -int wl1271_acx_tx_power(struct wl1271 *wl, struct wl12xx_vif *wlvif, - int power); -int wl1271_acx_feature_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif); +int wl1271_acx_tx_power(struct wl1271 *wl, int power); +int wl1271_acx_feature_cfg(struct wl1271 *wl); int wl1271_acx_mem_map(struct wl1271 *wl, struct acx_header *mem_map, size_t len); int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl); -int wl1271_acx_slot(struct wl1271 *wl, struct wl12xx_vif *wlvif, - enum acx_slot_type slot_time); -int wl1271_acx_group_address_tbl(struct wl1271 *wl, struct wl12xx_vif *wlvif, - bool enable, void *mc_list, u32 mc_list_len); -int wl1271_acx_service_period_timeout(struct wl1271 *wl, - struct wl12xx_vif *wlvif); -int wl1271_acx_rts_threshold(struct wl1271 *wl, struct wl12xx_vif *wlvif, - u32 rts_threshold); +int wl1271_acx_pd_threshold(struct wl1271 *wl); +int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time); +int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable, + void *mc_list, u32 mc_list_len); +int wl1271_acx_service_period_timeout(struct wl1271 *wl); +int wl1271_acx_rts_threshold(struct wl1271 *wl, u32 rts_threshold); int wl1271_acx_dco_itrim_params(struct wl1271 *wl); -int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, struct wl12xx_vif *wlvif, - bool enable_filter); -int wl1271_acx_beacon_filter_table(struct wl1271 *wl, - struct wl12xx_vif *wlvif); -int wl1271_acx_conn_monit_params(struct wl1271 *wl, struct wl12xx_vif *wlvif, - bool enable); +int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter); +int wl1271_acx_beacon_filter_table(struct wl1271 *wl); +int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable); int wl1271_acx_sg_enable(struct wl1271 *wl, bool enable); int wl12xx_acx_sg_cfg(struct wl1271 *wl); int wl1271_acx_cca_threshold(struct wl1271 *wl); -int wl1271_acx_bcn_dtim_options(struct wl1271 *wl, struct wl12xx_vif *wlvif); -int wl1271_acx_aid(struct wl1271 *wl, struct wl12xx_vif *wlvif, u16 aid); +int wl1271_acx_bcn_dtim_options(struct wl1271 *wl); +int wl1271_acx_aid(struct wl1271 *wl, u16 aid); int wl1271_acx_event_mbox_mask(struct wl1271 *wl, u32 event_mask); -int wl1271_acx_set_preamble(struct wl1271 *wl, struct wl12xx_vif *wlvif, - enum acx_preamble_type preamble); -int wl1271_acx_cts_protect(struct wl1271 *wl, struct wl12xx_vif *wlvif, +int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble); +int wl1271_acx_cts_protect(struct wl1271 *wl, enum acx_ctsprotect_type ctsprotect); int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats); -int wl1271_acx_sta_rate_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif); +int wl1271_acx_sta_rate_policies(struct wl1271 *wl); int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c, u8 idx); -int wl1271_acx_ac_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif, - u8 ac, u8 cw_min, u16 cw_max, u8 aifsn, u16 txop); -int wl1271_acx_tid_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif, - u8 queue_id, u8 channel_type, +int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max, + u8 aifsn, u16 txop); +int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type, u8 tsid, u8 ps_scheme, u8 ack_policy, u32 apsd_conf0, u32 apsd_conf1); int wl1271_acx_frag_threshold(struct wl1271 *wl, u32 frag_threshold); @@ -1273,34 +1276,26 @@ int wl1271_acx_init_mem_config(struct wl1271 *wl); int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap); int wl1271_acx_init_rx_interrupt(struct wl1271 *wl); int wl1271_acx_smart_reflex(struct wl1271 *wl); -int wl1271_acx_bet_enable(struct wl1271 *wl, struct wl12xx_vif *wlvif, - bool enable); -int wl1271_acx_arp_ip_filter(struct wl1271 *wl, struct wl12xx_vif *wlvif, - u8 enable, __be32 address); +int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable); +int wl1271_acx_arp_ip_filter(struct wl1271 *wl, u8 enable, __be32 address); int wl1271_acx_pm_config(struct wl1271 *wl); -int wl1271_acx_keep_alive_mode(struct wl1271 *wl, struct wl12xx_vif *vif, - bool enable); -int wl1271_acx_keep_alive_config(struct wl1271 *wl, struct wl12xx_vif *wlvif, - u8 index, u8 tpl_valid); -int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, struct wl12xx_vif *wlvif, - bool enable, s16 thold, u8 hyst); -int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl, - struct wl12xx_vif *wlvif); +int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable); +int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid); +int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable, + s16 thold, u8 hyst); +int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl); int wl1271_acx_set_ht_capabilities(struct wl1271 *wl, struct ieee80211_sta_ht_cap *ht_cap, bool allow_ht_operation, u8 hlid); int wl1271_acx_set_ht_information(struct wl1271 *wl, - struct wl12xx_vif *wlvif, u16 ht_operation_mode); -int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl, - struct wl12xx_vif *wlvif); +int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl); int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, u16 ssn, bool enable, u8 peer_hlid); int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime); -int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif, - bool enable); -int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl, struct wl12xx_vif *wlvif); -int wl12xx_acx_config_ps(struct wl1271 *wl, struct wl12xx_vif *wlvif); +int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, bool enable); +int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl); +int wl1271_acx_config_ps(struct wl1271 *wl); int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr); int wl1271_acx_fm_coex(struct wl1271 *wl); int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl); diff --git a/trunk/drivers/net/wireless/wl12xx/boot.c b/trunk/drivers/net/wireless/wl12xx/boot.c index 8f9cf5a816ea..681337914976 100644 --- a/trunk/drivers/net/wireless/wl12xx/boot.c +++ b/trunk/drivers/net/wireless/wl12xx/boot.c @@ -25,7 +25,6 @@ #include #include -#include "debug.h" #include "acx.h" #include "reg.h" #include "boot.h" @@ -348,9 +347,6 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl) nvs_ptr += 3; for (i = 0; i < burst_len; i++) { - if (nvs_ptr + 3 >= (u8 *) wl->nvs + nvs_len) - goto out_badnvs; - val = (nvs_ptr[0] | (nvs_ptr[1] << 8) | (nvs_ptr[2] << 16) | (nvs_ptr[3] << 24)); @@ -362,9 +358,6 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl) nvs_ptr += 4; dest_addr += 4; } - - if (nvs_ptr >= (u8 *) wl->nvs + nvs_len) - goto out_badnvs; } /* @@ -376,10 +369,6 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl) */ nvs_ptr = (u8 *)wl->nvs + ALIGN(nvs_ptr - (u8 *)wl->nvs + 7, 4); - - if (nvs_ptr >= (u8 *) wl->nvs + nvs_len) - goto out_badnvs; - nvs_len -= nvs_ptr - (u8 *)wl->nvs; /* Now we must set the partition correctly */ @@ -395,10 +384,6 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl) kfree(nvs_aligned); return 0; - -out_badnvs: - wl1271_error("nvs data is malformed"); - return -EILSEQ; } static void wl1271_boot_enable_interrupts(struct wl1271 *wl) diff --git a/trunk/drivers/net/wireless/wl12xx/cmd.c b/trunk/drivers/net/wireless/wl12xx/cmd.c index 25990bd38be6..a52299e548fa 100644 --- a/trunk/drivers/net/wireless/wl12xx/cmd.c +++ b/trunk/drivers/net/wireless/wl12xx/cmd.c @@ -29,7 +29,6 @@ #include #include "wl12xx.h" -#include "debug.h" #include "reg.h" #include "io.h" #include "acx.h" @@ -121,11 +120,6 @@ int wl1271_cmd_general_parms(struct wl1271 *wl) if (!wl->nvs) return -ENODEV; - if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) { - wl1271_warning("FEM index from INI out of bounds"); - return -EINVAL; - } - gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL); if (!gen_parms) return -ENOMEM; @@ -149,12 +143,6 @@ int wl1271_cmd_general_parms(struct wl1271 *wl) gp->tx_bip_fem_manufacturer = gen_parms->general_params.tx_bip_fem_manufacturer; - if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) { - wl1271_warning("FEM index from FW out of bounds"); - ret = -EINVAL; - goto out; - } - wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n", answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer); @@ -174,11 +162,6 @@ int wl128x_cmd_general_parms(struct wl1271 *wl) if (!wl->nvs) return -ENODEV; - if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) { - wl1271_warning("FEM index from ini out of bounds"); - return -EINVAL; - } - gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL); if (!gen_parms) return -ENOMEM; @@ -203,12 +186,6 @@ int wl128x_cmd_general_parms(struct wl1271 *wl) gp->tx_bip_fem_manufacturer = gen_parms->general_params.tx_bip_fem_manufacturer; - if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) { - wl1271_warning("FEM index from FW out of bounds"); - ret = -EINVAL; - goto out; - } - wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n", answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer); @@ -381,8 +358,7 @@ static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask) return 0; } -int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type, - u8 *role_id) +int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 role_type, u8 *role_id) { struct wl12xx_cmd_role_enable *cmd; int ret; @@ -405,7 +381,7 @@ int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type, goto out_free; } - memcpy(cmd->mac_address, addr, ETH_ALEN); + memcpy(cmd->mac_address, wl->mac_addr, ETH_ALEN); cmd->role_type = role_type; ret = wl1271_cmd_send(wl, CMD_ROLE_ENABLE, cmd, sizeof(*cmd), 0); @@ -457,41 +433,37 @@ int wl12xx_cmd_role_disable(struct wl1271 *wl, u8 *role_id) return ret; } -int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid) +static int wl12xx_allocate_link(struct wl1271 *wl, u8 *hlid) { u8 link = find_first_zero_bit(wl->links_map, WL12XX_MAX_LINKS); if (link >= WL12XX_MAX_LINKS) return -EBUSY; __set_bit(link, wl->links_map); - __set_bit(link, wlvif->links_map); *hlid = link; return 0; } -void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid) +static void wl12xx_free_link(struct wl1271 *wl, u8 *hlid) { if (*hlid == WL12XX_INVALID_LINK_ID) return; __clear_bit(*hlid, wl->links_map); - __clear_bit(*hlid, wlvif->links_map); *hlid = WL12XX_INVALID_LINK_ID; } -static int wl12xx_get_new_session_id(struct wl1271 *wl, - struct wl12xx_vif *wlvif) +static int wl12xx_get_new_session_id(struct wl1271 *wl) { - if (wlvif->session_counter >= SESSION_COUNTER_MAX) - wlvif->session_counter = 0; + if (wl->session_counter >= SESSION_COUNTER_MAX) + wl->session_counter = 0; - wlvif->session_counter++; + wl->session_counter++; - return wlvif->session_counter; + return wl->session_counter; } -static int wl12xx_cmd_role_start_dev(struct wl1271 *wl, - struct wl12xx_vif *wlvif) +int wl12xx_cmd_role_start_dev(struct wl1271 *wl) { struct wl12xx_cmd_role_start *cmd; int ret; @@ -502,20 +474,20 @@ static int wl12xx_cmd_role_start_dev(struct wl1271 *wl, goto out; } - wl1271_debug(DEBUG_CMD, "cmd role start dev %d", wlvif->dev_role_id); + wl1271_debug(DEBUG_CMD, "cmd role start dev %d", wl->dev_role_id); - cmd->role_id = wlvif->dev_role_id; - if (wlvif->band == IEEE80211_BAND_5GHZ) + cmd->role_id = wl->dev_role_id; + if (wl->band == IEEE80211_BAND_5GHZ) cmd->band = WL12XX_BAND_5GHZ; - cmd->channel = wlvif->channel; + cmd->channel = wl->channel; - if (wlvif->dev_hlid == WL12XX_INVALID_LINK_ID) { - ret = wl12xx_allocate_link(wl, wlvif, &wlvif->dev_hlid); + if (wl->dev_hlid == WL12XX_INVALID_LINK_ID) { + ret = wl12xx_allocate_link(wl, &wl->dev_hlid); if (ret) goto out_free; } - cmd->device.hlid = wlvif->dev_hlid; - cmd->device.session = wlvif->session_counter; + cmd->device.hlid = wl->dev_hlid; + cmd->device.session = wl->session_counter; wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d", cmd->role_id, cmd->device.hlid, cmd->device.session); @@ -530,7 +502,9 @@ static int wl12xx_cmd_role_start_dev(struct wl1271 *wl, err_hlid: /* clear links on error */ - wl12xx_free_link(wl, wlvif, &wlvif->dev_hlid); + __clear_bit(wl->dev_hlid, wl->links_map); + wl->dev_hlid = WL12XX_INVALID_LINK_ID; + out_free: kfree(cmd); @@ -539,13 +513,12 @@ static int wl12xx_cmd_role_start_dev(struct wl1271 *wl, return ret; } -static int wl12xx_cmd_role_stop_dev(struct wl1271 *wl, - struct wl12xx_vif *wlvif) +int wl12xx_cmd_role_stop_dev(struct wl1271 *wl) { struct wl12xx_cmd_role_stop *cmd; int ret; - if (WARN_ON(wlvif->dev_hlid == WL12XX_INVALID_LINK_ID)) + if (WARN_ON(wl->dev_hlid == WL12XX_INVALID_LINK_ID)) return -EINVAL; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); @@ -556,7 +529,7 @@ static int wl12xx_cmd_role_stop_dev(struct wl1271 *wl, wl1271_debug(DEBUG_CMD, "cmd role stop dev"); - cmd->role_id = wlvif->dev_role_id; + cmd->role_id = wl->dev_role_id; cmd->disc_type = DISCONNECT_IMMEDIATE; cmd->reason = cpu_to_le16(WLAN_REASON_UNSPECIFIED); @@ -572,7 +545,7 @@ static int wl12xx_cmd_role_stop_dev(struct wl1271 *wl, goto out_free; } - wl12xx_free_link(wl, wlvif, &wlvif->dev_hlid); + wl12xx_free_link(wl, &wl->dev_hlid); out_free: kfree(cmd); @@ -581,9 +554,8 @@ static int wl12xx_cmd_role_stop_dev(struct wl1271 *wl, return ret; } -int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif) +int wl12xx_cmd_role_start_sta(struct wl1271 *wl) { - struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); struct wl12xx_cmd_role_start *cmd; int ret; @@ -593,33 +565,33 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif) goto out; } - wl1271_debug(DEBUG_CMD, "cmd role start sta %d", wlvif->role_id); + wl1271_debug(DEBUG_CMD, "cmd role start sta %d", wl->role_id); - cmd->role_id = wlvif->role_id; - if (wlvif->band == IEEE80211_BAND_5GHZ) + cmd->role_id = wl->role_id; + if (wl->band == IEEE80211_BAND_5GHZ) cmd->band = WL12XX_BAND_5GHZ; - cmd->channel = wlvif->channel; - cmd->sta.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set); - cmd->sta.beacon_interval = cpu_to_le16(wlvif->beacon_int); + cmd->channel = wl->channel; + cmd->sta.basic_rate_set = cpu_to_le32(wl->basic_rate_set); + cmd->sta.beacon_interval = cpu_to_le16(wl->beacon_int); cmd->sta.ssid_type = WL12XX_SSID_TYPE_ANY; - cmd->sta.ssid_len = wlvif->ssid_len; - memcpy(cmd->sta.ssid, wlvif->ssid, wlvif->ssid_len); - memcpy(cmd->sta.bssid, vif->bss_conf.bssid, ETH_ALEN); - cmd->sta.local_rates = cpu_to_le32(wlvif->rate_set); + cmd->sta.ssid_len = wl->ssid_len; + memcpy(cmd->sta.ssid, wl->ssid, wl->ssid_len); + memcpy(cmd->sta.bssid, wl->bssid, ETH_ALEN); + cmd->sta.local_rates = cpu_to_le32(wl->rate_set); - if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) { - ret = wl12xx_allocate_link(wl, wlvif, &wlvif->sta.hlid); + if (wl->sta_hlid == WL12XX_INVALID_LINK_ID) { + ret = wl12xx_allocate_link(wl, &wl->sta_hlid); if (ret) goto out_free; } - cmd->sta.hlid = wlvif->sta.hlid; - cmd->sta.session = wl12xx_get_new_session_id(wl, wlvif); - cmd->sta.remote_rates = cpu_to_le32(wlvif->rate_set); + cmd->sta.hlid = wl->sta_hlid; + cmd->sta.session = wl12xx_get_new_session_id(wl); + cmd->sta.remote_rates = cpu_to_le32(wl->rate_set); wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d " "basic_rate_set: 0x%x, remote_rates: 0x%x", - wlvif->role_id, cmd->sta.hlid, cmd->sta.session, - wlvif->basic_rate_set, wlvif->rate_set); + wl->role_id, cmd->sta.hlid, cmd->sta.session, + wl->basic_rate_set, wl->rate_set); ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0); if (ret < 0) { @@ -631,7 +603,7 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif) err_hlid: /* clear links on error. */ - wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid); + wl12xx_free_link(wl, &wl->sta_hlid); out_free: kfree(cmd); @@ -641,12 +613,12 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif) } /* use this function to stop ibss as well */ -int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif) +int wl12xx_cmd_role_stop_sta(struct wl1271 *wl) { struct wl12xx_cmd_role_stop *cmd; int ret; - if (WARN_ON(wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)) + if (WARN_ON(wl->sta_hlid == WL12XX_INVALID_LINK_ID)) return -EINVAL; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); @@ -655,9 +627,9 @@ int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif) goto out; } - wl1271_debug(DEBUG_CMD, "cmd role stop sta %d", wlvif->role_id); + wl1271_debug(DEBUG_CMD, "cmd role stop sta %d", wl->role_id); - cmd->role_id = wlvif->role_id; + cmd->role_id = wl->role_id; cmd->disc_type = DISCONNECT_IMMEDIATE; cmd->reason = cpu_to_le16(WLAN_REASON_UNSPECIFIED); @@ -667,7 +639,7 @@ int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif) goto out_free; } - wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid); + wl12xx_free_link(wl, &wl->sta_hlid); out_free: kfree(cmd); @@ -676,17 +648,16 @@ int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif) return ret; } -int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif) +int wl12xx_cmd_role_start_ap(struct wl1271 *wl) { struct wl12xx_cmd_role_start *cmd; - struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); - struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; + struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf; int ret; - wl1271_debug(DEBUG_CMD, "cmd role start ap %d", wlvif->role_id); + wl1271_debug(DEBUG_CMD, "cmd role start ap %d", wl->role_id); /* trying to use hidden SSID with an old hostapd version */ - if (wlvif->ssid_len == 0 && !bss_conf->hidden_ssid) { + if (wl->ssid_len == 0 && !bss_conf->hidden_ssid) { wl1271_error("got a null SSID from beacon/bss"); ret = -EINVAL; goto out; @@ -698,30 +669,30 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif) goto out; } - ret = wl12xx_allocate_link(wl, wlvif, &wlvif->ap.global_hlid); + ret = wl12xx_allocate_link(wl, &wl->ap_global_hlid); if (ret < 0) goto out_free; - ret = wl12xx_allocate_link(wl, wlvif, &wlvif->ap.bcast_hlid); + ret = wl12xx_allocate_link(wl, &wl->ap_bcast_hlid); if (ret < 0) goto out_free_global; - cmd->role_id = wlvif->role_id; + cmd->role_id = wl->role_id; cmd->ap.aging_period = cpu_to_le16(wl->conf.tx.ap_aging_period); cmd->ap.bss_index = WL1271_AP_BSS_INDEX; - cmd->ap.global_hlid = wlvif->ap.global_hlid; - cmd->ap.broadcast_hlid = wlvif->ap.bcast_hlid; - cmd->ap.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set); - cmd->ap.beacon_interval = cpu_to_le16(wlvif->beacon_int); + cmd->ap.global_hlid = wl->ap_global_hlid; + cmd->ap.broadcast_hlid = wl->ap_bcast_hlid; + cmd->ap.basic_rate_set = cpu_to_le32(wl->basic_rate_set); + cmd->ap.beacon_interval = cpu_to_le16(wl->beacon_int); cmd->ap.dtim_interval = bss_conf->dtim_period; cmd->ap.beacon_expiry = WL1271_AP_DEF_BEACON_EXP; - cmd->channel = wlvif->channel; + cmd->channel = wl->channel; if (!bss_conf->hidden_ssid) { /* take the SSID from the beacon for backward compatibility */ cmd->ap.ssid_type = WL12XX_SSID_TYPE_PUBLIC; - cmd->ap.ssid_len = wlvif->ssid_len; - memcpy(cmd->ap.ssid, wlvif->ssid, wlvif->ssid_len); + cmd->ap.ssid_len = wl->ssid_len; + memcpy(cmd->ap.ssid, wl->ssid, wl->ssid_len); } else { cmd->ap.ssid_type = WL12XX_SSID_TYPE_HIDDEN; cmd->ap.ssid_len = bss_conf->ssid_len; @@ -730,7 +701,7 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif) cmd->ap.local_rates = cpu_to_le32(0xffffffff); - switch (wlvif->band) { + switch (wl->band) { case IEEE80211_BAND_2GHZ: cmd->band = RADIO_BAND_2_4GHZ; break; @@ -738,7 +709,7 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif) cmd->band = RADIO_BAND_5GHZ; break; default: - wl1271_warning("ap start - unknown band: %d", (int)wlvif->band); + wl1271_warning("ap start - unknown band: %d", (int)wl->band); cmd->band = RADIO_BAND_2_4GHZ; break; } @@ -752,10 +723,10 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif) goto out_free; out_free_bcast: - wl12xx_free_link(wl, wlvif, &wlvif->ap.bcast_hlid); + wl12xx_free_link(wl, &wl->ap_bcast_hlid); out_free_global: - wl12xx_free_link(wl, wlvif, &wlvif->ap.global_hlid); + wl12xx_free_link(wl, &wl->ap_global_hlid); out_free: kfree(cmd); @@ -764,7 +735,7 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif) return ret; } -int wl12xx_cmd_role_stop_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif) +int wl12xx_cmd_role_stop_ap(struct wl1271 *wl) { struct wl12xx_cmd_role_stop *cmd; int ret; @@ -775,9 +746,9 @@ int wl12xx_cmd_role_stop_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif) goto out; } - wl1271_debug(DEBUG_CMD, "cmd role stop ap %d", wlvif->role_id); + wl1271_debug(DEBUG_CMD, "cmd role stop ap %d", wl->role_id); - cmd->role_id = wlvif->role_id; + cmd->role_id = wl->role_id; ret = wl1271_cmd_send(wl, CMD_ROLE_STOP, cmd, sizeof(*cmd), 0); if (ret < 0) { @@ -785,8 +756,8 @@ int wl12xx_cmd_role_stop_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif) goto out_free; } - wl12xx_free_link(wl, wlvif, &wlvif->ap.bcast_hlid); - wl12xx_free_link(wl, wlvif, &wlvif->ap.global_hlid); + wl12xx_free_link(wl, &wl->ap_bcast_hlid); + wl12xx_free_link(wl, &wl->ap_global_hlid); out_free: kfree(cmd); @@ -795,11 +766,10 @@ int wl12xx_cmd_role_stop_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif) return ret; } -int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif) +int wl12xx_cmd_role_start_ibss(struct wl1271 *wl) { - struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); struct wl12xx_cmd_role_start *cmd; - struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; + struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf; int ret; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); @@ -808,36 +778,35 @@ int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif) goto out; } - wl1271_debug(DEBUG_CMD, "cmd role start ibss %d", wlvif->role_id); + wl1271_debug(DEBUG_CMD, "cmd role start ibss %d", wl->role_id); - cmd->role_id = wlvif->role_id; - if (wlvif->band == IEEE80211_BAND_5GHZ) + cmd->role_id = wl->role_id; + if (wl->band == IEEE80211_BAND_5GHZ) cmd->band = WL12XX_BAND_5GHZ; - cmd->channel = wlvif->channel; - cmd->ibss.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set); - cmd->ibss.beacon_interval = cpu_to_le16(wlvif->beacon_int); + cmd->channel = wl->channel; + cmd->ibss.basic_rate_set = cpu_to_le32(wl->basic_rate_set); + cmd->ibss.beacon_interval = cpu_to_le16(wl->beacon_int); cmd->ibss.dtim_interval = bss_conf->dtim_period; cmd->ibss.ssid_type = WL12XX_SSID_TYPE_ANY; - cmd->ibss.ssid_len = wlvif->ssid_len; - memcpy(cmd->ibss.ssid, wlvif->ssid, wlvif->ssid_len); - memcpy(cmd->ibss.bssid, vif->bss_conf.bssid, ETH_ALEN); - cmd->sta.local_rates = cpu_to_le32(wlvif->rate_set); + cmd->ibss.ssid_len = wl->ssid_len; + memcpy(cmd->ibss.ssid, wl->ssid, wl->ssid_len); + memcpy(cmd->ibss.bssid, wl->bssid, ETH_ALEN); + cmd->sta.local_rates = cpu_to_le32(wl->rate_set); - if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) { - ret = wl12xx_allocate_link(wl, wlvif, &wlvif->sta.hlid); + if (wl->sta_hlid == WL12XX_INVALID_LINK_ID) { + ret = wl12xx_allocate_link(wl, &wl->sta_hlid); if (ret) goto out_free; } - cmd->ibss.hlid = wlvif->sta.hlid; - cmd->ibss.remote_rates = cpu_to_le32(wlvif->rate_set); + cmd->ibss.hlid = wl->sta_hlid; + cmd->ibss.remote_rates = cpu_to_le32(wl->rate_set); wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d " "basic_rate_set: 0x%x, remote_rates: 0x%x", - wlvif->role_id, cmd->sta.hlid, cmd->sta.session, - wlvif->basic_rate_set, wlvif->rate_set); + wl->role_id, cmd->sta.hlid, cmd->sta.session, + wl->basic_rate_set, wl->rate_set); - wl1271_debug(DEBUG_CMD, "vif->bss_conf.bssid = %pM", - vif->bss_conf.bssid); + wl1271_debug(DEBUG_CMD, "wl->bssid = %pM", wl->bssid); ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0); if (ret < 0) { @@ -849,7 +818,7 @@ int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif) err_hlid: /* clear links on error. */ - wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid); + wl12xx_free_link(wl, &wl->sta_hlid); out_free: kfree(cmd); @@ -993,8 +962,7 @@ int wl1271_cmd_data_path(struct wl1271 *wl, bool enable) return ret; } -int wl1271_cmd_ps_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif, - u8 ps_mode) +int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode) { struct wl1271_cmd_ps_params *ps_params = NULL; int ret = 0; @@ -1007,7 +975,7 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif, goto out; } - ps_params->role_id = wlvif->role_id; + ps_params->role_id = wl->role_id; ps_params->ps_mode = ps_mode; ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params, @@ -1062,7 +1030,7 @@ int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id, return ret; } -int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif) +int wl1271_cmd_build_null_data(struct wl1271 *wl) { struct sk_buff *skb = NULL; int size; @@ -1070,12 +1038,11 @@ int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif) int ret = -ENOMEM; - if (wlvif->bss_type == BSS_TYPE_IBSS) { + if (wl->bss_type == BSS_TYPE_IBSS) { size = sizeof(struct wl12xx_null_data_template); ptr = NULL; } else { - skb = ieee80211_nullfunc_get(wl->hw, - wl12xx_wlvif_to_vif(wlvif)); + skb = ieee80211_nullfunc_get(wl->hw, wl->vif); if (!skb) goto out; size = skb->len; @@ -1083,7 +1050,7 @@ int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif) } ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, ptr, size, 0, - wlvif->basic_rate); + wl->basic_rate); out: dev_kfree_skb(skb); @@ -1094,21 +1061,19 @@ int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif) } -int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl, - struct wl12xx_vif *wlvif) +int wl1271_cmd_build_klv_null_data(struct wl1271 *wl) { - struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); struct sk_buff *skb = NULL; int ret = -ENOMEM; - skb = ieee80211_nullfunc_get(wl->hw, vif); + skb = ieee80211_nullfunc_get(wl->hw, wl->vif); if (!skb) goto out; ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV, skb->data, skb->len, CMD_TEMPL_KLV_IDX_NULL_DATA, - wlvif->basic_rate); + wl->basic_rate); out: dev_kfree_skb(skb); @@ -1119,35 +1084,32 @@ int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl, } -int wl1271_cmd_build_ps_poll(struct wl1271 *wl, struct wl12xx_vif *wlvif, - u16 aid) +int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid) { - struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); struct sk_buff *skb; int ret = 0; - skb = ieee80211_pspoll_get(wl->hw, vif); + skb = ieee80211_pspoll_get(wl->hw, wl->vif); if (!skb) goto out; ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, skb->data, - skb->len, 0, wlvif->basic_rate_set); + skb->len, 0, wl->basic_rate_set); out: dev_kfree_skb(skb); return ret; } -int wl1271_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif, +int wl1271_cmd_build_probe_req(struct wl1271 *wl, const u8 *ssid, size_t ssid_len, const u8 *ie, size_t ie_len, u8 band) { - struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); struct sk_buff *skb; int ret; u32 rate; - skb = ieee80211_probereq_get(wl->hw, vif, ssid, ssid_len, + skb = ieee80211_probereq_get(wl->hw, wl->vif, ssid, ssid_len, ie, ie_len); if (!skb) { ret = -ENOMEM; @@ -1156,7 +1118,7 @@ int wl1271_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif, wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", skb->data, skb->len); - rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]); + rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]); if (band == IEEE80211_BAND_2GHZ) ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4, skb->data, skb->len, 0, rate); @@ -1170,22 +1132,20 @@ int wl1271_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif, } struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl, - struct wl12xx_vif *wlvif, struct sk_buff *skb) { - struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); int ret; u32 rate; if (!skb) - skb = ieee80211_ap_probereq_get(wl->hw, vif); + skb = ieee80211_ap_probereq_get(wl->hw, wl->vif); if (!skb) goto out; wl1271_dump(DEBUG_SCAN, "AP PROBE REQ: ", skb->data, skb->len); - rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[wlvif->band]); - if (wlvif->band == IEEE80211_BAND_2GHZ) + rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[wl->band]); + if (wl->band == IEEE80211_BAND_2GHZ) ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4, skb->data, skb->len, 0, rate); else @@ -1199,11 +1159,9 @@ struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl, return skb; } -int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif, - __be32 ip_addr) +int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr) { int ret; - struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); struct wl12xx_arp_rsp_template tmpl; struct ieee80211_hdr_3addr *hdr; struct arphdr *arp_hdr; @@ -1215,8 +1173,8 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif, hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA | IEEE80211_FCTL_TODS); - memcpy(hdr->addr1, vif->bss_conf.bssid, ETH_ALEN); - memcpy(hdr->addr2, vif->addr, ETH_ALEN); + memcpy(hdr->addr1, wl->vif->bss_conf.bssid, ETH_ALEN); + memcpy(hdr->addr2, wl->vif->addr, ETH_ALEN); memset(hdr->addr3, 0xff, ETH_ALEN); /* llc layer */ @@ -1232,26 +1190,25 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif, arp_hdr->ar_op = cpu_to_be16(ARPOP_REPLY); /* arp payload */ - memcpy(tmpl.sender_hw, vif->addr, ETH_ALEN); + memcpy(tmpl.sender_hw, wl->vif->addr, ETH_ALEN); tmpl.sender_ip = ip_addr; ret = wl1271_cmd_template_set(wl, CMD_TEMPL_ARP_RSP, &tmpl, sizeof(tmpl), 0, - wlvif->basic_rate); + wl->basic_rate); return ret; } -int wl1271_build_qos_null_data(struct wl1271 *wl, struct ieee80211_vif *vif) +int wl1271_build_qos_null_data(struct wl1271 *wl) { - struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); struct ieee80211_qos_hdr template; memset(&template, 0, sizeof(template)); - memcpy(template.addr1, vif->bss_conf.bssid, ETH_ALEN); - memcpy(template.addr2, vif->addr, ETH_ALEN); - memcpy(template.addr3, vif->bss_conf.bssid, ETH_ALEN); + memcpy(template.addr1, wl->bssid, ETH_ALEN); + memcpy(template.addr2, wl->mac_addr, ETH_ALEN); + memcpy(template.addr3, wl->bssid, ETH_ALEN); template.frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC | @@ -1262,7 +1219,7 @@ int wl1271_build_qos_null_data(struct wl1271 *wl, struct ieee80211_vif *vif) return wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, &template, sizeof(template), 0, - wlvif->basic_rate); + wl->basic_rate); } int wl12xx_cmd_set_default_wep_key(struct wl1271 *wl, u8 id, u8 hlid) @@ -1296,8 +1253,7 @@ int wl12xx_cmd_set_default_wep_key(struct wl1271 *wl, u8 id, u8 hlid) return ret; } -int wl1271_cmd_set_sta_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, - u16 action, u8 id, u8 key_type, +int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, u8 key_size, const u8 *key, const u8 *addr, u32 tx_seq_32, u16 tx_seq_16) { @@ -1305,7 +1261,7 @@ int wl1271_cmd_set_sta_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, int ret = 0; /* hlid might have already been deleted */ - if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) + if (wl->sta_hlid == WL12XX_INVALID_LINK_ID) return 0; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); @@ -1314,7 +1270,7 @@ int wl1271_cmd_set_sta_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, goto out; } - cmd->hlid = wlvif->sta.hlid; + cmd->hlid = wl->sta_hlid; if (key_type == KEY_WEP) cmd->lid_key_type = WEP_DEFAULT_LID_TYPE; @@ -1365,10 +1321,9 @@ int wl1271_cmd_set_sta_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, * TODO: merge with sta/ibss into 1 set_key function. * note there are slight diffs */ -int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, - u16 action, u8 id, u8 key_type, - u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32, - u16 tx_seq_16) +int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, + u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32, + u16 tx_seq_16) { struct wl1271_cmd_set_keys *cmd; int ret = 0; @@ -1378,7 +1333,7 @@ int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, if (!cmd) return -ENOMEM; - if (hlid == wlvif->ap.bcast_hlid) { + if (hlid == wl->ap_bcast_hlid) { if (key_type == KEY_WEP) lid_type = WEP_DEFAULT_LID_TYPE; else @@ -1456,8 +1411,7 @@ int wl12xx_cmd_set_peer_state(struct wl1271 *wl, u8 hlid) return ret; } -int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif, - struct ieee80211_sta *sta, u8 hlid) +int wl12xx_cmd_add_peer(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid) { struct wl12xx_cmd_add_peer *cmd; int i, ret; @@ -1484,13 +1438,13 @@ int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif, else cmd->psd_type[i] = WL1271_PSD_LEGACY; - sta_rates = sta->supp_rates[wlvif->band]; + sta_rates = sta->supp_rates[wl->band]; if (sta->ht_cap.ht_supported) sta_rates |= sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET; cmd->supported_rates = cpu_to_le32(wl1271_tx_enabled_rates_get(wl, sta_rates, - wlvif->band)); + wl->band)); wl1271_debug(DEBUG_CMD, "new peer rates=0x%x queues=0x%x", cmd->supported_rates, sta->uapsd_queues); @@ -1630,13 +1584,12 @@ int wl12xx_cmd_stop_fwlog(struct wl1271 *wl) return ret; } -static int wl12xx_cmd_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, - u8 role_id) +static int wl12xx_cmd_roc(struct wl1271 *wl, u8 role_id) { struct wl12xx_cmd_roc *cmd; int ret = 0; - wl1271_debug(DEBUG_CMD, "cmd roc %d (%d)", wlvif->channel, role_id); + wl1271_debug(DEBUG_CMD, "cmd roc %d (%d)", wl->channel, role_id); if (WARN_ON(role_id == WL12XX_INVALID_ROLE_ID)) return -EINVAL; @@ -1648,8 +1601,8 @@ static int wl12xx_cmd_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, } cmd->role_id = role_id; - cmd->channel = wlvif->channel; - switch (wlvif->band) { + cmd->channel = wl->channel; + switch (wl->band) { case IEEE80211_BAND_2GHZ: cmd->band = RADIO_BAND_2_4GHZ; break; @@ -1657,7 +1610,7 @@ static int wl12xx_cmd_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, cmd->band = RADIO_BAND_5GHZ; break; default: - wl1271_error("roc - unknown band: %d", (int)wlvif->band); + wl1271_error("roc - unknown band: %d", (int)wl->band); ret = -EINVAL; goto out_free; } @@ -1704,14 +1657,14 @@ static int wl12xx_cmd_croc(struct wl1271 *wl, u8 role_id) return ret; } -int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id) +int wl12xx_roc(struct wl1271 *wl, u8 role_id) { int ret = 0; if (WARN_ON(test_bit(role_id, wl->roc_map))) return 0; - ret = wl12xx_cmd_roc(wl, wlvif, role_id); + ret = wl12xx_cmd_roc(wl, role_id); if (ret < 0) goto out; @@ -1800,53 +1753,3 @@ int wl12xx_cmd_stop_channel_switch(struct wl1271 *wl) out: return ret; } - -/* start dev role and roc on its channel */ -int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif) -{ - int ret; - - if (WARN_ON(!(wlvif->bss_type == BSS_TYPE_STA_BSS || - wlvif->bss_type == BSS_TYPE_IBSS))) - return -EINVAL; - - ret = wl12xx_cmd_role_start_dev(wl, wlvif); - if (ret < 0) - goto out; - - ret = wl12xx_roc(wl, wlvif, wlvif->dev_role_id); - if (ret < 0) - goto out_stop; - - return 0; - -out_stop: - wl12xx_cmd_role_stop_dev(wl, wlvif); -out: - return ret; -} - -/* croc dev hlid, and stop the role */ -int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif) -{ - int ret; - - if (WARN_ON(!(wlvif->bss_type == BSS_TYPE_STA_BSS || - wlvif->bss_type == BSS_TYPE_IBSS))) - return -EINVAL; - - /* flush all pending packets */ - wl1271_tx_work_locked(wl); - - if (test_bit(wlvif->dev_role_id, wl->roc_map)) { - ret = wl12xx_croc(wl, wlvif->dev_role_id); - if (ret < 0) - goto out; - } - - ret = wl12xx_cmd_role_stop_dev(wl, wlvif); - if (ret < 0) - goto out; -out: - return ret; -} diff --git a/trunk/drivers/net/wireless/wl12xx/cmd.h b/trunk/drivers/net/wireless/wl12xx/cmd.h index 3f7d0b93c24d..b7bd42769aa7 100644 --- a/trunk/drivers/net/wireless/wl12xx/cmd.h +++ b/trunk/drivers/net/wireless/wl12xx/cmd.h @@ -36,54 +36,45 @@ int wl128x_cmd_general_parms(struct wl1271 *wl); int wl1271_cmd_radio_parms(struct wl1271 *wl); int wl128x_cmd_radio_parms(struct wl1271 *wl); int wl1271_cmd_ext_radio_parms(struct wl1271 *wl); -int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type, - u8 *role_id); +int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 role_type, u8 *role_id); int wl12xx_cmd_role_disable(struct wl1271 *wl, u8 *role_id); -int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif); -int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif); -int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif); -int wl12xx_cmd_role_stop_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif); -int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif); -int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif); -int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif); +int wl12xx_cmd_role_start_dev(struct wl1271 *wl); +int wl12xx_cmd_role_stop_dev(struct wl1271 *wl); +int wl12xx_cmd_role_start_sta(struct wl1271 *wl); +int wl12xx_cmd_role_stop_sta(struct wl1271 *wl); +int wl12xx_cmd_role_start_ap(struct wl1271 *wl); +int wl12xx_cmd_role_stop_ap(struct wl1271 *wl); +int wl12xx_cmd_role_start_ibss(struct wl1271 *wl); int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer); int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len); int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len); int wl1271_cmd_data_path(struct wl1271 *wl, bool enable); -int wl1271_cmd_ps_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif, - u8 ps_mode); +int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode); int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer, size_t len); int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id, void *buf, size_t buf_len, int index, u32 rates); -int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif); -int wl1271_cmd_build_ps_poll(struct wl1271 *wl, struct wl12xx_vif *wlvif, - u16 aid); -int wl1271_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif, +int wl1271_cmd_build_null_data(struct wl1271 *wl); +int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid); +int wl1271_cmd_build_probe_req(struct wl1271 *wl, const u8 *ssid, size_t ssid_len, const u8 *ie, size_t ie_len, u8 band); struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl, - struct wl12xx_vif *wlvif, struct sk_buff *skb); -int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif, - __be32 ip_addr); -int wl1271_build_qos_null_data(struct wl1271 *wl, struct ieee80211_vif *vif); -int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl, - struct wl12xx_vif *wlvif); +int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr); +int wl1271_build_qos_null_data(struct wl1271 *wl); +int wl1271_cmd_build_klv_null_data(struct wl1271 *wl); int wl12xx_cmd_set_default_wep_key(struct wl1271 *wl, u8 id, u8 hlid); -int wl1271_cmd_set_sta_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, - u16 action, u8 id, u8 key_type, +int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, u8 key_size, const u8 *key, const u8 *addr, u32 tx_seq_32, u16 tx_seq_16); -int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, - u16 action, u8 id, u8 key_type, +int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32, u16 tx_seq_16); int wl12xx_cmd_set_peer_state(struct wl1271 *wl, u8 hlid); -int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id); +int wl12xx_roc(struct wl1271 *wl, u8 role_id); int wl12xx_croc(struct wl1271 *wl, u8 role_id); -int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif, - struct ieee80211_sta *sta, u8 hlid); +int wl12xx_cmd_add_peer(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid); int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid); int wl12xx_cmd_config_fwlog(struct wl1271 *wl); int wl12xx_cmd_start_fwlog(struct wl1271 *wl); @@ -91,9 +82,6 @@ int wl12xx_cmd_stop_fwlog(struct wl1271 *wl); int wl12xx_cmd_channel_switch(struct wl1271 *wl, struct ieee80211_channel_switch *ch_switch); int wl12xx_cmd_stop_channel_switch(struct wl1271 *wl); -int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, - u8 *hlid); -void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid); enum wl1271_commands { CMD_INTERROGATE = 1, /*use this to read information elements*/ diff --git a/trunk/drivers/net/wireless/wl12xx/conf.h b/trunk/drivers/net/wireless/wl12xx/conf.h index 1bcfb017058d..04bb8fbf93f9 100644 --- a/trunk/drivers/net/wireless/wl12xx/conf.h +++ b/trunk/drivers/net/wireless/wl12xx/conf.h @@ -440,10 +440,6 @@ struct conf_rx_settings { CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS | \ CONF_HW_BIT_RATE_54MBPS) -#define CONF_TX_CCK_RATES (CONF_HW_BIT_RATE_1MBPS | \ - CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS | \ - CONF_HW_BIT_RATE_11MBPS) - #define CONF_TX_OFDM_RATES (CONF_HW_BIT_RATE_6MBPS | \ CONF_HW_BIT_RATE_12MBPS | CONF_HW_BIT_RATE_24MBPS | \ CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS | \ diff --git a/trunk/drivers/net/wireless/wl12xx/debug.h b/trunk/drivers/net/wireless/wl12xx/debug.h deleted file mode 100644 index b85fd8c41e8f..000000000000 --- a/trunk/drivers/net/wireless/wl12xx/debug.h +++ /dev/null @@ -1,101 +0,0 @@ -/* - * This file is part of wl12xx - * - * Copyright (C) 2011 Texas Instruments. All rights reserved. - * Copyright (C) 2008-2009 Nokia Corporation - * - * Contact: Luciano Coelho - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA - * 02110-1301 USA - * - */ - -#ifndef __DEBUG_H__ -#define __DEBUG_H__ - -#include -#include - -#define DRIVER_NAME "wl12xx" -#define DRIVER_PREFIX DRIVER_NAME ": " - -enum { - DEBUG_NONE = 0, - DEBUG_IRQ = BIT(0), - DEBUG_SPI = BIT(1), - DEBUG_BOOT = BIT(2), - DEBUG_MAILBOX = BIT(3), - DEBUG_TESTMODE = BIT(4), - DEBUG_EVENT = BIT(5), - DEBUG_TX = BIT(6), - DEBUG_RX = BIT(7), - DEBUG_SCAN = BIT(8), - DEBUG_CRYPT = BIT(9), - DEBUG_PSM = BIT(10), - DEBUG_MAC80211 = BIT(11), - DEBUG_CMD = BIT(12), - DEBUG_ACX = BIT(13), - DEBUG_SDIO = BIT(14), - DEBUG_FILTERS = BIT(15), - DEBUG_ADHOC = BIT(16), - DEBUG_AP = BIT(17), - DEBUG_MASTER = (DEBUG_ADHOC | DEBUG_AP), - DEBUG_ALL = ~0, -}; - -extern u32 wl12xx_debug_level; - -#define DEBUG_DUMP_LIMIT 1024 - -#define wl1271_error(fmt, arg...) \ - pr_err(DRIVER_PREFIX "ERROR " fmt "\n", ##arg) - -#define wl1271_warning(fmt, arg...) \ - pr_warning(DRIVER_PREFIX "WARNING " fmt "\n", ##arg) - -#define wl1271_notice(fmt, arg...) \ - pr_info(DRIVER_PREFIX fmt "\n", ##arg) - -#define wl1271_info(fmt, arg...) \ - pr_info(DRIVER_PREFIX fmt "\n", ##arg) - -#define wl1271_debug(level, fmt, arg...) \ - do { \ - if (level & wl12xx_debug_level) \ - pr_debug(DRIVER_PREFIX fmt "\n", ##arg); \ - } while (0) - -/* TODO: use pr_debug_hex_dump when it becomes available */ -#define wl1271_dump(level, prefix, buf, len) \ - do { \ - if (level & wl12xx_debug_level) \ - print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \ - DUMP_PREFIX_OFFSET, 16, 1, \ - buf, \ - min_t(size_t, len, DEBUG_DUMP_LIMIT), \ - 0); \ - } while (0) - -#define wl1271_dump_ascii(level, prefix, buf, len) \ - do { \ - if (level & wl12xx_debug_level) \ - print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \ - DUMP_PREFIX_OFFSET, 16, 1, \ - buf, \ - min_t(size_t, len, DEBUG_DUMP_LIMIT), \ - true); \ - } while (0) - -#endif /* __DEBUG_H__ */ diff --git a/trunk/drivers/net/wireless/wl12xx/debugfs.c b/trunk/drivers/net/wireless/wl12xx/debugfs.c index 15eb3a9c30ca..3999fd528302 100644 --- a/trunk/drivers/net/wireless/wl12xx/debugfs.c +++ b/trunk/drivers/net/wireless/wl12xx/debugfs.c @@ -27,7 +27,6 @@ #include #include "wl12xx.h" -#include "debug.h" #include "acx.h" #include "ps.h" #include "io.h" @@ -317,19 +316,12 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf, { struct wl1271 *wl = file->private_data; int res = 0; - ssize_t ret; - char *buf; - -#define DRIVER_STATE_BUF_LEN 1024 - - buf = kmalloc(DRIVER_STATE_BUF_LEN, GFP_KERNEL); - if (!buf) - return -ENOMEM; + char buf[1024]; mutex_lock(&wl->mutex); #define DRIVER_STATE_PRINT(x, fmt) \ - (res += scnprintf(buf + res, DRIVER_STATE_BUF_LEN - res,\ + (res += scnprintf(buf + res, sizeof(buf) - res,\ #x " = " fmt "\n", wl->x)) #define DRIVER_STATE_PRINT_LONG(x) DRIVER_STATE_PRINT(x, "%ld") @@ -354,14 +346,29 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf, DRIVER_STATE_PRINT_INT(tx_results_count); DRIVER_STATE_PRINT_LHEX(flags); DRIVER_STATE_PRINT_INT(tx_blocks_freed); + DRIVER_STATE_PRINT_INT(tx_security_last_seq_lsb); DRIVER_STATE_PRINT_INT(rx_counter); + DRIVER_STATE_PRINT_INT(session_counter); DRIVER_STATE_PRINT_INT(state); + DRIVER_STATE_PRINT_INT(bss_type); DRIVER_STATE_PRINT_INT(channel); + DRIVER_STATE_PRINT_HEX(rate_set); + DRIVER_STATE_PRINT_HEX(basic_rate_set); + DRIVER_STATE_PRINT_HEX(basic_rate); DRIVER_STATE_PRINT_INT(band); + DRIVER_STATE_PRINT_INT(beacon_int); + DRIVER_STATE_PRINT_INT(psm_entry_retry); + DRIVER_STATE_PRINT_INT(ps_poll_failures); DRIVER_STATE_PRINT_INT(power_level); + DRIVER_STATE_PRINT_INT(rssi_thold); + DRIVER_STATE_PRINT_INT(last_rssi_event); DRIVER_STATE_PRINT_INT(sg_enabled); DRIVER_STATE_PRINT_INT(enable_11a); DRIVER_STATE_PRINT_INT(noise); + DRIVER_STATE_PRINT_LHEX(ap_hlid_map[0]); + DRIVER_STATE_PRINT_INT(last_tx_hlid); + DRIVER_STATE_PRINT_INT(ba_support); + DRIVER_STATE_PRINT_HEX(ba_rx_bitmap); DRIVER_STATE_PRINT_HEX(ap_fw_ps_map); DRIVER_STATE_PRINT_LHEX(ap_ps_map); DRIVER_STATE_PRINT_HEX(quirks); @@ -380,13 +387,10 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf, #undef DRIVER_STATE_PRINT_LHEX #undef DRIVER_STATE_PRINT_STR #undef DRIVER_STATE_PRINT -#undef DRIVER_STATE_BUF_LEN mutex_unlock(&wl->mutex); - ret = simple_read_from_buffer(user_buf, count, ppos, buf, res); - kfree(buf); - return ret; + return simple_read_from_buffer(user_buf, count, ppos, buf, res); } static const struct file_operations driver_state_ops = { @@ -395,115 +399,6 @@ static const struct file_operations driver_state_ops = { .llseek = default_llseek, }; -static ssize_t vifs_state_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct wl1271 *wl = file->private_data; - struct wl12xx_vif *wlvif; - int ret, res = 0; - const int buf_size = 4096; - char *buf; - char tmp_buf[64]; - - buf = kzalloc(buf_size, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - mutex_lock(&wl->mutex); - -#define VIF_STATE_PRINT(x, fmt) \ - (res += scnprintf(buf + res, buf_size - res, \ - #x " = " fmt "\n", wlvif->x)) - -#define VIF_STATE_PRINT_LONG(x) VIF_STATE_PRINT(x, "%ld") -#define VIF_STATE_PRINT_INT(x) VIF_STATE_PRINT(x, "%d") -#define VIF_STATE_PRINT_STR(x) VIF_STATE_PRINT(x, "%s") -#define VIF_STATE_PRINT_LHEX(x) VIF_STATE_PRINT(x, "0x%lx") -#define VIF_STATE_PRINT_LLHEX(x) VIF_STATE_PRINT(x, "0x%llx") -#define VIF_STATE_PRINT_HEX(x) VIF_STATE_PRINT(x, "0x%x") - -#define VIF_STATE_PRINT_NSTR(x, len) \ - do { \ - memset(tmp_buf, 0, sizeof(tmp_buf)); \ - memcpy(tmp_buf, wlvif->x, \ - min_t(u8, len, sizeof(tmp_buf) - 1)); \ - res += scnprintf(buf + res, buf_size - res, \ - #x " = %s\n", tmp_buf); \ - } while (0) - - wl12xx_for_each_wlvif(wl, wlvif) { - VIF_STATE_PRINT_INT(role_id); - VIF_STATE_PRINT_INT(bss_type); - VIF_STATE_PRINT_LHEX(flags); - VIF_STATE_PRINT_INT(p2p); - VIF_STATE_PRINT_INT(dev_role_id); - VIF_STATE_PRINT_INT(dev_hlid); - - if (wlvif->bss_type == BSS_TYPE_STA_BSS || - wlvif->bss_type == BSS_TYPE_IBSS) { - VIF_STATE_PRINT_INT(sta.hlid); - VIF_STATE_PRINT_INT(sta.ba_rx_bitmap); - VIF_STATE_PRINT_INT(sta.basic_rate_idx); - VIF_STATE_PRINT_INT(sta.ap_rate_idx); - VIF_STATE_PRINT_INT(sta.p2p_rate_idx); - } else { - VIF_STATE_PRINT_INT(ap.global_hlid); - VIF_STATE_PRINT_INT(ap.bcast_hlid); - VIF_STATE_PRINT_LHEX(ap.sta_hlid_map[0]); - VIF_STATE_PRINT_INT(ap.mgmt_rate_idx); - VIF_STATE_PRINT_INT(ap.bcast_rate_idx); - VIF_STATE_PRINT_INT(ap.ucast_rate_idx[0]); - VIF_STATE_PRINT_INT(ap.ucast_rate_idx[1]); - VIF_STATE_PRINT_INT(ap.ucast_rate_idx[2]); - VIF_STATE_PRINT_INT(ap.ucast_rate_idx[3]); - } - VIF_STATE_PRINT_INT(last_tx_hlid); - VIF_STATE_PRINT_LHEX(links_map[0]); - VIF_STATE_PRINT_NSTR(ssid, wlvif->ssid_len); - VIF_STATE_PRINT_INT(band); - VIF_STATE_PRINT_INT(channel); - VIF_STATE_PRINT_HEX(bitrate_masks[0]); - VIF_STATE_PRINT_HEX(bitrate_masks[1]); - VIF_STATE_PRINT_HEX(basic_rate_set); - VIF_STATE_PRINT_HEX(basic_rate); - VIF_STATE_PRINT_HEX(rate_set); - VIF_STATE_PRINT_INT(beacon_int); - VIF_STATE_PRINT_INT(default_key); - VIF_STATE_PRINT_INT(aid); - VIF_STATE_PRINT_INT(session_counter); - VIF_STATE_PRINT_INT(ps_poll_failures); - VIF_STATE_PRINT_INT(psm_entry_retry); - VIF_STATE_PRINT_INT(power_level); - VIF_STATE_PRINT_INT(rssi_thold); - VIF_STATE_PRINT_INT(last_rssi_event); - VIF_STATE_PRINT_INT(ba_support); - VIF_STATE_PRINT_INT(ba_allowed); - VIF_STATE_PRINT_LLHEX(tx_security_seq); - VIF_STATE_PRINT_INT(tx_security_last_seq_lsb); - } - -#undef VIF_STATE_PRINT_INT -#undef VIF_STATE_PRINT_LONG -#undef VIF_STATE_PRINT_HEX -#undef VIF_STATE_PRINT_LHEX -#undef VIF_STATE_PRINT_LLHEX -#undef VIF_STATE_PRINT_STR -#undef VIF_STATE_PRINT_NSTR -#undef VIF_STATE_PRINT - - mutex_unlock(&wl->mutex); - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, res); - kfree(buf); - return ret; -} - -static const struct file_operations vifs_state_ops = { - .read = vifs_state_read, - .open = wl1271_open_file_generic, - .llseek = default_llseek, -}; - static ssize_t dtim_interval_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { @@ -625,7 +520,6 @@ static ssize_t rx_streaming_interval_write(struct file *file, size_t count, loff_t *ppos) { struct wl1271 *wl = file->private_data; - struct wl12xx_vif *wlvif; unsigned long value; int ret; @@ -649,9 +543,7 @@ static ssize_t rx_streaming_interval_write(struct file *file, if (ret < 0) goto out; - wl12xx_for_each_wlvif_sta(wl, wlvif) { - wl1271_recalc_rx_streaming(wl, wlvif); - } + wl1271_recalc_rx_streaming(wl); wl1271_ps_elp_sleep(wl); out: @@ -680,7 +572,6 @@ static ssize_t rx_streaming_always_write(struct file *file, size_t count, loff_t *ppos) { struct wl1271 *wl = file->private_data; - struct wl12xx_vif *wlvif; unsigned long value; int ret; @@ -704,9 +595,7 @@ static ssize_t rx_streaming_always_write(struct file *file, if (ret < 0) goto out; - wl12xx_for_each_wlvif_sta(wl, wlvif) { - wl1271_recalc_rx_streaming(wl, wlvif); - } + wl1271_recalc_rx_streaming(wl); wl1271_ps_elp_sleep(wl); out: @@ -735,7 +624,6 @@ static ssize_t beacon_filtering_write(struct file *file, size_t count, loff_t *ppos) { struct wl1271 *wl = file->private_data; - struct wl12xx_vif *wlvif; char buf[10]; size_t len; unsigned long value; @@ -758,9 +646,7 @@ static ssize_t beacon_filtering_write(struct file *file, if (ret < 0) goto out; - wl12xx_for_each_wlvif(wl, wlvif) { - ret = wl1271_acx_beacon_filter_opt(wl, wlvif, !!value); - } + ret = wl1271_acx_beacon_filter_opt(wl, !!value); wl1271_ps_elp_sleep(wl); out: @@ -884,7 +770,6 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl, DEBUGFS_ADD(gpio_power, rootdir); DEBUGFS_ADD(start_recovery, rootdir); DEBUGFS_ADD(driver_state, rootdir); - DEBUGFS_ADD(vifs_state, rootdir); DEBUGFS_ADD(dtim_interval, rootdir); DEBUGFS_ADD(beacon_interval, rootdir); DEBUGFS_ADD(beacon_filtering, rootdir); diff --git a/trunk/drivers/net/wireless/wl12xx/event.c b/trunk/drivers/net/wireless/wl12xx/event.c index d3280df68f5d..674ad2a9e409 100644 --- a/trunk/drivers/net/wireless/wl12xx/event.c +++ b/trunk/drivers/net/wireless/wl12xx/event.c @@ -22,7 +22,6 @@ */ #include "wl12xx.h" -#include "debug.h" #include "reg.h" #include "io.h" #include "event.h" @@ -32,16 +31,12 @@ void wl1271_pspoll_work(struct work_struct *work) { - struct ieee80211_vif *vif; - struct wl12xx_vif *wlvif; struct delayed_work *dwork; struct wl1271 *wl; int ret; dwork = container_of(work, struct delayed_work, work); - wlvif = container_of(dwork, struct wl12xx_vif, pspoll_work); - vif = container_of((void *)wlvif, struct ieee80211_vif, drv_priv); - wl = wlvif->wl; + wl = container_of(dwork, struct wl1271, pspoll_work); wl1271_debug(DEBUG_EVENT, "pspoll work"); @@ -50,10 +45,10 @@ void wl1271_pspoll_work(struct work_struct *work) if (unlikely(wl->state == WL1271_STATE_OFF)) goto out; - if (!test_and_clear_bit(WLVIF_FLAG_PSPOLL_FAILURE, &wlvif->flags)) + if (!test_and_clear_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags)) goto out; - if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) + if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) goto out; /* @@ -65,33 +60,31 @@ void wl1271_pspoll_work(struct work_struct *work) if (ret < 0) goto out; - wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE, - wlvif->basic_rate, true); + wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE, wl->basic_rate, true); wl1271_ps_elp_sleep(wl); out: mutex_unlock(&wl->mutex); }; -static void wl1271_event_pspoll_delivery_fail(struct wl1271 *wl, - struct wl12xx_vif *wlvif) +static void wl1271_event_pspoll_delivery_fail(struct wl1271 *wl) { int delay = wl->conf.conn.ps_poll_recovery_period; int ret; - wlvif->ps_poll_failures++; - if (wlvif->ps_poll_failures == 1) + wl->ps_poll_failures++; + if (wl->ps_poll_failures == 1) wl1271_info("AP with dysfunctional ps-poll, " "trying to work around it."); /* force active mode receive data from the AP */ - if (test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) { - ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE, - wlvif->basic_rate, true); + if (test_bit(WL1271_FLAG_PSM, &wl->flags)) { + ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE, + wl->basic_rate, true); if (ret < 0) return; - set_bit(WLVIF_FLAG_PSPOLL_FAILURE, &wlvif->flags); - ieee80211_queue_delayed_work(wl->hw, &wlvif->pspoll_work, + set_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags); + ieee80211_queue_delayed_work(wl->hw, &wl->pspoll_work, msecs_to_jiffies(delay)); } @@ -104,7 +97,6 @@ static void wl1271_event_pspoll_delivery_fail(struct wl1271 *wl, } static int wl1271_event_ps_report(struct wl1271 *wl, - struct wl12xx_vif *wlvif, struct event_mailbox *mbox, bool *beacon_loss) { @@ -117,37 +109,41 @@ static int wl1271_event_ps_report(struct wl1271 *wl, case EVENT_ENTER_POWER_SAVE_FAIL: wl1271_debug(DEBUG_PSM, "PSM entry failed"); - if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) { + if (!test_bit(WL1271_FLAG_PSM, &wl->flags)) { /* remain in active mode */ - wlvif->psm_entry_retry = 0; + wl->psm_entry_retry = 0; break; } - if (wlvif->psm_entry_retry < total_retries) { - wlvif->psm_entry_retry++; - ret = wl1271_ps_set_mode(wl, wlvif, - STATION_POWER_SAVE_MODE, - wlvif->basic_rate, true); + if (wl->psm_entry_retry < total_retries) { + wl->psm_entry_retry++; + ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE, + wl->basic_rate, true); } else { wl1271_info("No ack to nullfunc from AP."); - wlvif->psm_entry_retry = 0; + wl->psm_entry_retry = 0; *beacon_loss = true; } break; case EVENT_ENTER_POWER_SAVE_SUCCESS: - wlvif->psm_entry_retry = 0; + wl->psm_entry_retry = 0; + + /* enable beacon filtering */ + ret = wl1271_acx_beacon_filter_opt(wl, true); + if (ret < 0) + break; /* * BET has only a minor effect in 5GHz and masks * channel switch IEs, so we only enable BET on 2.4GHz */ - if (wlvif->band == IEEE80211_BAND_2GHZ) + if (wl->band == IEEE80211_BAND_2GHZ) /* enable beacon early termination */ - ret = wl1271_acx_bet_enable(wl, wlvif, true); + ret = wl1271_acx_bet_enable(wl, true); - if (wlvif->ps_compl) { - complete(wlvif->ps_compl); - wlvif->ps_compl = NULL; + if (wl->ps_compl) { + complete(wl->ps_compl); + wl->ps_compl = NULL; } break; default: @@ -158,44 +154,39 @@ static int wl1271_event_ps_report(struct wl1271 *wl, } static void wl1271_event_rssi_trigger(struct wl1271 *wl, - struct wl12xx_vif *wlvif, struct event_mailbox *mbox) { - struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); enum nl80211_cqm_rssi_threshold_event event; s8 metric = mbox->rssi_snr_trigger_metric[0]; wl1271_debug(DEBUG_EVENT, "RSSI trigger metric: %d", metric); - if (metric <= wlvif->rssi_thold) + if (metric <= wl->rssi_thold) event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW; else event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH; - if (event != wlvif->last_rssi_event) - ieee80211_cqm_rssi_notify(vif, event, GFP_KERNEL); - wlvif->last_rssi_event = event; + if (event != wl->last_rssi_event) + ieee80211_cqm_rssi_notify(wl->vif, event, GFP_KERNEL); + wl->last_rssi_event = event; } -static void wl1271_stop_ba_event(struct wl1271 *wl, struct wl12xx_vif *wlvif) +static void wl1271_stop_ba_event(struct wl1271 *wl) { - struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); - - if (wlvif->bss_type != BSS_TYPE_AP_BSS) { - if (!wlvif->sta.ba_rx_bitmap) + if (wl->bss_type != BSS_TYPE_AP_BSS) { + if (!wl->ba_rx_bitmap) return; - ieee80211_stop_rx_ba_session(vif, wlvif->sta.ba_rx_bitmap, - vif->bss_conf.bssid); + ieee80211_stop_rx_ba_session(wl->vif, wl->ba_rx_bitmap, + wl->bssid); } else { - u8 hlid; + int i; struct wl1271_link *lnk; - for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, - WL12XX_MAX_LINKS) { - lnk = &wl->links[hlid]; - if (!lnk->ba_bitmap) + for (i = WL1271_AP_STA_HLID_START; i < AP_MAX_LINKS; i++) { + lnk = &wl->links[i]; + if (!wl1271_is_active_sta(wl, i) || !lnk->ba_bitmap) continue; - ieee80211_stop_rx_ba_session(vif, + ieee80211_stop_rx_ba_session(wl->vif, lnk->ba_bitmap, lnk->addr); } @@ -205,23 +196,14 @@ static void wl1271_stop_ba_event(struct wl1271 *wl, struct wl12xx_vif *wlvif) static void wl12xx_event_soft_gemini_sense(struct wl1271 *wl, u8 enable) { - struct ieee80211_vif *vif; - struct wl12xx_vif *wlvif; - if (enable) { /* disable dynamic PS when requested by the firmware */ - wl12xx_for_each_wlvif_sta(wl, wlvif) { - vif = wl12xx_wlvif_to_vif(wlvif); - ieee80211_disable_dyn_ps(vif); - } + ieee80211_disable_dyn_ps(wl->vif); set_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags); } else { + ieee80211_enable_dyn_ps(wl->vif); clear_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags); - wl12xx_for_each_wlvif_sta(wl, wlvif) { - vif = wl12xx_wlvif_to_vif(wlvif); - ieee80211_enable_dyn_ps(vif); - wl1271_recalc_rx_streaming(wl, wlvif); - } + wl1271_recalc_rx_streaming(wl); } } @@ -235,11 +217,10 @@ static void wl1271_event_mbox_dump(struct event_mailbox *mbox) static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox) { - struct ieee80211_vif *vif; - struct wl12xx_vif *wlvif; int ret; u32 vector; bool beacon_loss = false; + bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); bool disconnect_sta = false; unsigned long sta_bitmap = 0; @@ -253,7 +234,7 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox) wl1271_debug(DEBUG_EVENT, "status: 0x%x", mbox->scheduled_scan_status); - wl1271_scan_stm(wl, wl->scan_vif); + wl1271_scan_stm(wl); } if (vector & PERIODIC_SCAN_REPORT_EVENT_ID) { @@ -267,12 +248,13 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox) wl1271_debug(DEBUG_EVENT, "PERIODIC_SCAN_COMPLETE_EVENT " "(status 0x%0x)", mbox->scheduled_scan_status); if (wl->sched_scanning) { + wl1271_scan_sched_scan_stop(wl); ieee80211_sched_scan_stopped(wl->hw); - wl->sched_scanning = false; } } - if (vector & SOFT_GEMINI_SENSE_EVENT_ID) + if (vector & SOFT_GEMINI_SENSE_EVENT_ID && + wl->bss_type == BSS_TYPE_STA_BSS) wl12xx_event_soft_gemini_sense(wl, mbox->soft_gemini_sense_info); @@ -285,54 +267,40 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox) * BSS_LOSE_EVENT, beacon loss has to be reported to the stack. * */ - if (vector & BSS_LOSE_EVENT_ID) { - /* TODO: check for multi-role */ + if ((vector & BSS_LOSE_EVENT_ID) && !is_ap) { wl1271_info("Beacon loss detected."); /* indicate to the stack, that beacons have been lost */ beacon_loss = true; } - if (vector & PS_REPORT_EVENT_ID) { + if ((vector & PS_REPORT_EVENT_ID) && !is_ap) { wl1271_debug(DEBUG_EVENT, "PS_REPORT_EVENT"); - wl12xx_for_each_wlvif_sta(wl, wlvif) { - ret = wl1271_event_ps_report(wl, wlvif, - mbox, &beacon_loss); - if (ret < 0) - return ret; - } + ret = wl1271_event_ps_report(wl, mbox, &beacon_loss); + if (ret < 0) + return ret; } - if (vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID) - wl12xx_for_each_wlvif_sta(wl, wlvif) { - wl1271_event_pspoll_delivery_fail(wl, wlvif); - } + if ((vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID) && !is_ap) + wl1271_event_pspoll_delivery_fail(wl); if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) { - /* TODO: check actual multi-role support */ wl1271_debug(DEBUG_EVENT, "RSSI_SNR_TRIGGER_0_EVENT"); - wl12xx_for_each_wlvif_sta(wl, wlvif) { - wl1271_event_rssi_trigger(wl, wlvif, mbox); - } + if (wl->vif) + wl1271_event_rssi_trigger(wl, mbox); } - if (vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID) { - u8 role_id = mbox->role_id; + if ((vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID)) { wl1271_debug(DEBUG_EVENT, "BA_SESSION_RX_CONSTRAINT_EVENT_ID. " - "ba_allowed = 0x%x, role_id=%d", - mbox->rx_ba_allowed, role_id); + "ba_allowed = 0x%x", mbox->rx_ba_allowed); - wl12xx_for_each_wlvif(wl, wlvif) { - if (role_id != 0xff && role_id != wlvif->role_id) - continue; + wl->ba_allowed = !!mbox->rx_ba_allowed; - wlvif->ba_allowed = !!mbox->rx_ba_allowed; - if (!wlvif->ba_allowed) - wl1271_stop_ba_event(wl, wlvif); - } + if (wl->vif && !wl->ba_allowed) + wl1271_stop_ba_event(wl); } - if (vector & CHANNEL_SWITCH_COMPLETE_EVENT_ID) { + if ((vector & CHANNEL_SWITCH_COMPLETE_EVENT_ID) && !is_ap) { wl1271_debug(DEBUG_EVENT, "CHANNEL_SWITCH_COMPLETE_EVENT_ID. " "status = 0x%x", mbox->channel_switch_status); @@ -341,65 +309,50 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox) * 1) channel switch complete with status=0 * 2) channel switch failed status=1 */ - - /* TODO: configure only the relevant vif */ - wl12xx_for_each_wlvif_sta(wl, wlvif) { - struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); - bool success; - - if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, - &wl->flags)) - continue; - - success = mbox->channel_switch_status ? false : true; - ieee80211_chswitch_done(vif, success); - } + if (test_and_clear_bit(WL1271_FLAG_CS_PROGRESS, &wl->flags) && + (wl->vif)) + ieee80211_chswitch_done(wl->vif, + mbox->channel_switch_status ? false : true); } if ((vector & DUMMY_PACKET_EVENT_ID)) { wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID"); - wl1271_tx_dummy_packet(wl); + if (wl->vif) + wl1271_tx_dummy_packet(wl); } /* * "TX retries exceeded" has a different meaning according to mode. * In AP mode the offending station is disconnected. */ - if (vector & MAX_TX_RETRY_EVENT_ID) { + if ((vector & MAX_TX_RETRY_EVENT_ID) && is_ap) { wl1271_debug(DEBUG_EVENT, "MAX_TX_RETRY_EVENT_ID"); sta_bitmap |= le16_to_cpu(mbox->sta_tx_retry_exceeded); disconnect_sta = true; } - if (vector & INACTIVE_STA_EVENT_ID) { + if ((vector & INACTIVE_STA_EVENT_ID) && is_ap) { wl1271_debug(DEBUG_EVENT, "INACTIVE_STA_EVENT_ID"); sta_bitmap |= le16_to_cpu(mbox->sta_aging_status); disconnect_sta = true; } - if (disconnect_sta) { + if (is_ap && disconnect_sta) { u32 num_packets = wl->conf.tx.max_tx_retries; struct ieee80211_sta *sta; const u8 *addr; int h; - for_each_set_bit(h, &sta_bitmap, WL12XX_MAX_LINKS) { - bool found = false; - /* find the ap vif connected to this sta */ - wl12xx_for_each_wlvif_ap(wl, wlvif) { - if (!test_bit(h, wlvif->ap.sta_hlid_map)) - continue; - found = true; - break; - } - if (!found) + for (h = find_first_bit(&sta_bitmap, AP_MAX_LINKS); + h < AP_MAX_LINKS; + h = find_next_bit(&sta_bitmap, AP_MAX_LINKS, h+1)) { + if (!wl1271_is_active_sta(wl, h)) continue; - vif = wl12xx_wlvif_to_vif(wlvif); addr = wl->links[h].addr; rcu_read_lock(); - sta = ieee80211_find_sta(vif, addr); + sta = ieee80211_find_sta(wl->vif, addr); if (sta) { wl1271_debug(DEBUG_EVENT, "remove sta %d", h); ieee80211_report_low_ack(sta, num_packets); @@ -408,11 +361,8 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox) } } - if (beacon_loss) - wl12xx_for_each_wlvif_sta(wl, wlvif) { - vif = wl12xx_wlvif_to_vif(wlvif); - ieee80211_connection_loss(vif); - } + if (wl->vif && beacon_loss) + ieee80211_connection_loss(wl->vif); return 0; } diff --git a/trunk/drivers/net/wireless/wl12xx/event.h b/trunk/drivers/net/wireless/wl12xx/event.h index 1d878ba47bf4..49c1a0ede5b1 100644 --- a/trunk/drivers/net/wireless/wl12xx/event.h +++ b/trunk/drivers/net/wireless/wl12xx/event.h @@ -132,4 +132,7 @@ void wl1271_event_mbox_config(struct wl1271 *wl); int wl1271_event_handle(struct wl1271 *wl, u8 mbox); void wl1271_pspoll_work(struct work_struct *work); +/* Functions from main.c */ +bool wl1271_is_active_sta(struct wl1271 *wl, u8 hlid); + #endif diff --git a/trunk/drivers/net/wireless/wl12xx/init.c b/trunk/drivers/net/wireless/wl12xx/init.c index ca7ee59e4505..04db64c94e9a 100644 --- a/trunk/drivers/net/wireless/wl12xx/init.c +++ b/trunk/drivers/net/wireless/wl12xx/init.c @@ -25,7 +25,6 @@ #include #include -#include "debug.h" #include "init.h" #include "wl12xx_80211.h" #include "acx.h" @@ -34,7 +33,7 @@ #include "tx.h" #include "io.h" -int wl1271_init_templates_config(struct wl1271 *wl) +int wl1271_sta_init_templates_config(struct wl1271 *wl) { int ret, i; @@ -65,7 +64,7 @@ int wl1271_init_templates_config(struct wl1271 *wl) ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, NULL, sizeof - (struct ieee80211_qos_hdr), + (struct wl12xx_qos_null_data_template), 0, WL1271_RATE_AUTOMATIC); if (ret < 0) return ret; @@ -89,33 +88,10 @@ int wl1271_init_templates_config(struct wl1271 *wl) if (ret < 0) return ret; - /* - * Put very large empty placeholders for all templates. These - * reserve memory for later. - */ - ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_PROBE_RESPONSE, NULL, - WL1271_CMD_TEMPL_MAX_SIZE, - 0, WL1271_RATE_AUTOMATIC); - if (ret < 0) - return ret; - - ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_BEACON, NULL, - WL1271_CMD_TEMPL_MAX_SIZE, - 0, WL1271_RATE_AUTOMATIC); - if (ret < 0) - return ret; - - ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP, NULL, - sizeof - (struct wl12xx_disconn_template), - 0, WL1271_RATE_AUTOMATIC); - if (ret < 0) - return ret; - for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) { ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV, NULL, - sizeof(struct ieee80211_qos_hdr), - i, WL1271_RATE_AUTOMATIC); + WL1271_CMD_TEMPL_DFLT_SIZE, i, + WL1271_RATE_AUTOMATIC); if (ret < 0) return ret; } @@ -123,8 +99,7 @@ int wl1271_init_templates_config(struct wl1271 *wl) return 0; } -static int wl1271_ap_init_deauth_template(struct wl1271 *wl, - struct wl12xx_vif *wlvif) +static int wl1271_ap_init_deauth_template(struct wl1271 *wl) { struct wl12xx_disconn_template *tmpl; int ret; @@ -139,7 +114,7 @@ static int wl1271_ap_init_deauth_template(struct wl1271 *wl, tmpl->header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DEAUTH); - rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); + rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set); ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP, tmpl, sizeof(*tmpl), 0, rate); @@ -148,10 +123,8 @@ static int wl1271_ap_init_deauth_template(struct wl1271 *wl, return ret; } -static int wl1271_ap_init_null_template(struct wl1271 *wl, - struct ieee80211_vif *vif) +static int wl1271_ap_init_null_template(struct wl1271 *wl) { - struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); struct ieee80211_hdr_3addr *nullfunc; int ret; u32 rate; @@ -168,10 +141,10 @@ static int wl1271_ap_init_null_template(struct wl1271 *wl, /* nullfunc->addr1 is filled by FW */ - memcpy(nullfunc->addr2, vif->addr, ETH_ALEN); - memcpy(nullfunc->addr3, vif->addr, ETH_ALEN); + memcpy(nullfunc->addr2, wl->mac_addr, ETH_ALEN); + memcpy(nullfunc->addr3, wl->mac_addr, ETH_ALEN); - rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); + rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set); ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, nullfunc, sizeof(*nullfunc), 0, rate); @@ -180,10 +153,8 @@ static int wl1271_ap_init_null_template(struct wl1271 *wl, return ret; } -static int wl1271_ap_init_qos_null_template(struct wl1271 *wl, - struct ieee80211_vif *vif) +static int wl1271_ap_init_qos_null_template(struct wl1271 *wl) { - struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); struct ieee80211_qos_hdr *qosnull; int ret; u32 rate; @@ -200,10 +171,10 @@ static int wl1271_ap_init_qos_null_template(struct wl1271 *wl, /* qosnull->addr1 is filled by FW */ - memcpy(qosnull->addr2, vif->addr, ETH_ALEN); - memcpy(qosnull->addr3, vif->addr, ETH_ALEN); + memcpy(qosnull->addr2, wl->mac_addr, ETH_ALEN); + memcpy(qosnull->addr3, wl->mac_addr, ETH_ALEN); - rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); + rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set); ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, qosnull, sizeof(*qosnull), 0, rate); @@ -212,6 +183,49 @@ static int wl1271_ap_init_qos_null_template(struct wl1271 *wl, return ret; } +static int wl1271_ap_init_templates_config(struct wl1271 *wl) +{ + int ret; + + /* + * Put very large empty placeholders for all templates. These + * reserve memory for later. + */ + ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_PROBE_RESPONSE, NULL, + WL1271_CMD_TEMPL_MAX_SIZE, + 0, WL1271_RATE_AUTOMATIC); + if (ret < 0) + return ret; + + ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_BEACON, NULL, + WL1271_CMD_TEMPL_MAX_SIZE, + 0, WL1271_RATE_AUTOMATIC); + if (ret < 0) + return ret; + + ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP, NULL, + sizeof + (struct wl12xx_disconn_template), + 0, WL1271_RATE_AUTOMATIC); + if (ret < 0) + return ret; + + ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, NULL, + sizeof(struct wl12xx_null_data_template), + 0, WL1271_RATE_AUTOMATIC); + if (ret < 0) + return ret; + + ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, NULL, + sizeof + (struct wl12xx_qos_null_data_template), + 0, WL1271_RATE_AUTOMATIC); + if (ret < 0) + return ret; + + return 0; +} + static int wl12xx_init_rx_config(struct wl1271 *wl) { int ret; @@ -223,37 +237,39 @@ static int wl12xx_init_rx_config(struct wl1271 *wl) return 0; } -static int wl12xx_init_phy_vif_config(struct wl1271 *wl, - struct wl12xx_vif *wlvif) +int wl1271_init_phy_config(struct wl1271 *wl) { int ret; - ret = wl1271_acx_slot(wl, wlvif, DEFAULT_SLOT_TIME); + ret = wl1271_acx_pd_threshold(wl); + if (ret < 0) + return ret; + + ret = wl1271_acx_slot(wl, DEFAULT_SLOT_TIME); if (ret < 0) return ret; - ret = wl1271_acx_service_period_timeout(wl, wlvif); + ret = wl1271_acx_service_period_timeout(wl); if (ret < 0) return ret; - ret = wl1271_acx_rts_threshold(wl, wlvif, wl->hw->wiphy->rts_threshold); + ret = wl1271_acx_rts_threshold(wl, wl->hw->wiphy->rts_threshold); if (ret < 0) return ret; return 0; } -static int wl1271_init_sta_beacon_filter(struct wl1271 *wl, - struct wl12xx_vif *wlvif) +static int wl1271_init_beacon_filter(struct wl1271 *wl) { int ret; - ret = wl1271_acx_beacon_filter_table(wl, wlvif); + /* disable beacon filtering at this stage */ + ret = wl1271_acx_beacon_filter_opt(wl, false); if (ret < 0) return ret; - /* enable beacon filtering */ - ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true); + ret = wl1271_acx_beacon_filter_table(wl); if (ret < 0) return ret; @@ -286,12 +302,11 @@ int wl1271_init_energy_detection(struct wl1271 *wl) return 0; } -static int wl1271_init_beacon_broadcast(struct wl1271 *wl, - struct wl12xx_vif *wlvif) +static int wl1271_init_beacon_broadcast(struct wl1271 *wl) { int ret; - ret = wl1271_acx_bcn_dtim_options(wl, wlvif); + ret = wl1271_acx_bcn_dtim_options(wl); if (ret < 0) return ret; @@ -312,13 +327,36 @@ static int wl12xx_init_fwlog(struct wl1271 *wl) return 0; } -/* generic sta initialization (non vif-specific) */ -static int wl1271_sta_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif) +static int wl1271_sta_hw_init(struct wl1271 *wl) { int ret; + if (wl->chip.id != CHIP_ID_1283_PG20) { + ret = wl1271_cmd_ext_radio_parms(wl); + if (ret < 0) + return ret; + } + /* PS config */ - ret = wl12xx_acx_config_ps(wl, wlvif); + ret = wl1271_acx_config_ps(wl); + if (ret < 0) + return ret; + + ret = wl1271_sta_init_templates_config(wl); + if (ret < 0) + return ret; + + ret = wl1271_acx_group_address_tbl(wl, true, NULL, 0); + if (ret < 0) + return ret; + + /* Initialize connection monitoring thresholds */ + ret = wl1271_acx_conn_monit_params(wl, false); + if (ret < 0) + return ret; + + /* Beacon filtering */ + ret = wl1271_init_beacon_filter(wl); if (ret < 0) return ret; @@ -327,61 +365,103 @@ static int wl1271_sta_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif) if (ret < 0) return ret; - ret = wl1271_acx_sta_rate_policies(wl, wlvif); + /* Beacons and broadcast settings */ + ret = wl1271_init_beacon_broadcast(wl); + if (ret < 0) + return ret; + + /* Configure for ELP power saving */ + ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP); + if (ret < 0) + return ret; + + /* Configure rssi/snr averaging weights */ + ret = wl1271_acx_rssi_snr_avg_weights(wl); + if (ret < 0) + return ret; + + ret = wl1271_acx_sta_rate_policies(wl); + if (ret < 0) + return ret; + + ret = wl12xx_acx_mem_cfg(wl); + if (ret < 0) + return ret; + + /* Configure the FW logger */ + ret = wl12xx_init_fwlog(wl); if (ret < 0) return ret; return 0; } -static int wl1271_sta_hw_init_post_mem(struct wl1271 *wl, - struct ieee80211_vif *vif) +static int wl1271_sta_hw_init_post_mem(struct wl1271 *wl) { - struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); int ret, i; /* disable all keep-alive templates */ for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) { - ret = wl1271_acx_keep_alive_config(wl, wlvif, i, + ret = wl1271_acx_keep_alive_config(wl, i, ACX_KEEP_ALIVE_TPL_INVALID); if (ret < 0) return ret; } /* disable the keep-alive feature */ - ret = wl1271_acx_keep_alive_mode(wl, wlvif, false); + ret = wl1271_acx_keep_alive_mode(wl, false); if (ret < 0) return ret; return 0; } -/* generic ap initialization (non vif-specific) */ -static int wl1271_ap_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif) +static int wl1271_ap_hw_init(struct wl1271 *wl) { int ret; - ret = wl1271_init_ap_rates(wl, wlvif); + ret = wl1271_ap_init_templates_config(wl); + if (ret < 0) + return ret; + + /* Configure for power always on */ + ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM); + if (ret < 0) + return ret; + + ret = wl1271_init_ap_rates(wl); + if (ret < 0) + return ret; + + ret = wl1271_acx_ap_max_tx_retry(wl); + if (ret < 0) + return ret; + + ret = wl12xx_acx_mem_cfg(wl); + if (ret < 0) + return ret; + + /* initialize Tx power */ + ret = wl1271_acx_tx_power(wl, wl->power_level); if (ret < 0) return ret; return 0; } -int wl1271_ap_init_templates(struct wl1271 *wl, struct ieee80211_vif *vif) +int wl1271_ap_init_templates(struct wl1271 *wl) { - struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); int ret; - ret = wl1271_ap_init_deauth_template(wl, wlvif); + ret = wl1271_ap_init_deauth_template(wl); if (ret < 0) return ret; - ret = wl1271_ap_init_null_template(wl, vif); + ret = wl1271_ap_init_null_template(wl); if (ret < 0) return ret; - ret = wl1271_ap_init_qos_null_template(wl, vif); + ret = wl1271_ap_init_qos_null_template(wl); if (ret < 0) return ret; @@ -389,45 +469,43 @@ int wl1271_ap_init_templates(struct wl1271 *wl, struct ieee80211_vif *vif) * when operating as AP we want to receive external beacons for * configuring ERP protection. */ - ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false); + ret = wl1271_acx_beacon_filter_opt(wl, false); if (ret < 0) return ret; return 0; } -static int wl1271_ap_hw_init_post_mem(struct wl1271 *wl, - struct ieee80211_vif *vif) +static int wl1271_ap_hw_init_post_mem(struct wl1271 *wl) { - return wl1271_ap_init_templates(wl, vif); + return wl1271_ap_init_templates(wl); } -int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif) +int wl1271_init_ap_rates(struct wl1271 *wl) { int i, ret; struct conf_tx_rate_class rc; u32 supported_rates; - wl1271_debug(DEBUG_AP, "AP basic rate set: 0x%x", - wlvif->basic_rate_set); + wl1271_debug(DEBUG_AP, "AP basic rate set: 0x%x", wl->basic_rate_set); - if (wlvif->basic_rate_set == 0) + if (wl->basic_rate_set == 0) return -EINVAL; - rc.enabled_rates = wlvif->basic_rate_set; + rc.enabled_rates = wl->basic_rate_set; rc.long_retry_limit = 10; rc.short_retry_limit = 10; rc.aflags = 0; - ret = wl1271_acx_ap_rate_policy(wl, &rc, wlvif->ap.mgmt_rate_idx); + ret = wl1271_acx_ap_rate_policy(wl, &rc, ACX_TX_AP_MODE_MGMT_RATE); if (ret < 0) return ret; /* use the min basic rate for AP broadcast/multicast */ - rc.enabled_rates = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); + rc.enabled_rates = wl1271_tx_min_rate_get(wl, wl->basic_rate_set); rc.short_retry_limit = 10; rc.long_retry_limit = 10; rc.aflags = 0; - ret = wl1271_acx_ap_rate_policy(wl, &rc, wlvif->ap.bcast_rate_idx); + ret = wl1271_acx_ap_rate_policy(wl, &rc, ACX_TX_AP_MODE_BCST_RATE); if (ret < 0) return ret; @@ -435,7 +513,7 @@ int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif) * If the basic rates contain OFDM rates, use OFDM only * rates for unicast TX as well. Else use all supported rates. */ - if ((wlvif->basic_rate_set & CONF_TX_OFDM_RATES)) + if ((wl->basic_rate_set & CONF_TX_OFDM_RATES)) supported_rates = CONF_TX_OFDM_RATES; else supported_rates = CONF_TX_AP_ENABLED_RATES; @@ -449,8 +527,7 @@ int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif) rc.short_retry_limit = 10; rc.long_retry_limit = 10; rc.aflags = 0; - ret = wl1271_acx_ap_rate_policy(wl, &rc, - wlvif->ap.ucast_rate_idx[i]); + ret = wl1271_acx_ap_rate_policy(wl, &rc, i); if (ret < 0) return ret; } @@ -458,23 +535,24 @@ int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif) return 0; } -static int wl1271_set_ba_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif) +static int wl1271_set_ba_policies(struct wl1271 *wl) { /* Reset the BA RX indicators */ - wlvif->ba_allowed = true; + wl->ba_rx_bitmap = 0; + wl->ba_allowed = true; wl->ba_rx_session_count = 0; /* BA is supported in STA/AP modes */ - if (wlvif->bss_type != BSS_TYPE_AP_BSS && - wlvif->bss_type != BSS_TYPE_STA_BSS) { - wlvif->ba_support = false; + if (wl->bss_type != BSS_TYPE_AP_BSS && + wl->bss_type != BSS_TYPE_STA_BSS) { + wl->ba_support = false; return 0; } - wlvif->ba_support = true; + wl->ba_support = true; /* 802.11n initiator BA session setting */ - return wl12xx_acx_set_ba_initiator_policy(wl, wlvif); + return wl12xx_acx_set_ba_initiator_policy(wl); } int wl1271_chip_specific_init(struct wl1271 *wl) @@ -484,7 +562,7 @@ int wl1271_chip_specific_init(struct wl1271 *wl) if (wl->chip.id == CHIP_ID_1283_PG20) { u32 host_cfg_bitmap = HOST_IF_CFG_RX_FIFO_ENABLE; - if (!(wl->quirks & WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT)) + if (wl->quirks & WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT) /* Enable SDIO padding */ host_cfg_bitmap |= HOST_IF_CFG_TX_PAD_TO_SDIO_BLK; @@ -497,186 +575,39 @@ int wl1271_chip_specific_init(struct wl1271 *wl) return ret; } -/* vif-specifc initialization */ -static int wl12xx_init_sta_role(struct wl1271 *wl, struct wl12xx_vif *wlvif) -{ - int ret; - - ret = wl1271_acx_group_address_tbl(wl, wlvif, true, NULL, 0); - if (ret < 0) - return ret; - - /* Initialize connection monitoring thresholds */ - ret = wl1271_acx_conn_monit_params(wl, wlvif, false); - if (ret < 0) - return ret; - - /* Beacon filtering */ - ret = wl1271_init_sta_beacon_filter(wl, wlvif); - if (ret < 0) - return ret; - - /* Beacons and broadcast settings */ - ret = wl1271_init_beacon_broadcast(wl, wlvif); - if (ret < 0) - return ret; - - /* Configure rssi/snr averaging weights */ - ret = wl1271_acx_rssi_snr_avg_weights(wl, wlvif); - if (ret < 0) - return ret; - - return 0; -} - -/* vif-specific intialization */ -static int wl12xx_init_ap_role(struct wl1271 *wl, struct wl12xx_vif *wlvif) -{ - int ret; - - ret = wl1271_acx_ap_max_tx_retry(wl, wlvif); - if (ret < 0) - return ret; - /* initialize Tx power */ - ret = wl1271_acx_tx_power(wl, wlvif, wlvif->power_level); - if (ret < 0) - return ret; - - return 0; -} - -int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif) +int wl1271_hw_init(struct wl1271 *wl) { - struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); struct conf_tx_ac_category *conf_ac; struct conf_tx_tid *conf_tid; - bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); int ret, i; + bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); - /* - * consider all existing roles before configuring psm. - * TODO: reconfigure on interface removal. - */ - if (!wl->ap_count) { - if (is_ap) { - /* Configure for power always on */ - ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM); - if (ret < 0) - return ret; - } else if (!wl->sta_count) { - /* Configure for ELP power saving */ - ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP); - if (ret < 0) - return ret; - } - } - - /* Mode specific init */ - if (is_ap) { - ret = wl1271_ap_hw_init(wl, wlvif); - if (ret < 0) - return ret; - - ret = wl12xx_init_ap_role(wl, wlvif); - if (ret < 0) - return ret; - } else { - ret = wl1271_sta_hw_init(wl, wlvif); - if (ret < 0) - return ret; - - ret = wl12xx_init_sta_role(wl, wlvif); - if (ret < 0) - return ret; - } - - wl12xx_init_phy_vif_config(wl, wlvif); - - /* Default TID/AC configuration */ - BUG_ON(wl->conf.tx.tid_conf_count != wl->conf.tx.ac_conf_count); - for (i = 0; i < wl->conf.tx.tid_conf_count; i++) { - conf_ac = &wl->conf.tx.ac_conf[i]; - ret = wl1271_acx_ac_cfg(wl, wlvif, conf_ac->ac, - conf_ac->cw_min, conf_ac->cw_max, - conf_ac->aifsn, conf_ac->tx_op_limit); - if (ret < 0) - return ret; - - conf_tid = &wl->conf.tx.tid_conf[i]; - ret = wl1271_acx_tid_cfg(wl, wlvif, - conf_tid->queue_id, - conf_tid->channel_type, - conf_tid->tsid, - conf_tid->ps_scheme, - conf_tid->ack_policy, - conf_tid->apsd_conf[0], - conf_tid->apsd_conf[1]); - if (ret < 0) - return ret; - } - - /* Configure HW encryption */ - ret = wl1271_acx_feature_cfg(wl, wlvif); - if (ret < 0) - return ret; - - /* Mode specific init - post mem init */ - if (is_ap) - ret = wl1271_ap_hw_init_post_mem(wl, vif); + if (wl->chip.id == CHIP_ID_1283_PG20) + ret = wl128x_cmd_general_parms(wl); else - ret = wl1271_sta_hw_init_post_mem(wl, vif); - - if (ret < 0) - return ret; - - /* Configure initiator BA sessions policies */ - ret = wl1271_set_ba_policies(wl, wlvif); + ret = wl1271_cmd_general_parms(wl); if (ret < 0) return ret; - return 0; -} - -int wl1271_hw_init(struct wl1271 *wl) -{ - int ret; - - if (wl->chip.id == CHIP_ID_1283_PG20) { - ret = wl128x_cmd_general_parms(wl); - if (ret < 0) - return ret; + if (wl->chip.id == CHIP_ID_1283_PG20) ret = wl128x_cmd_radio_parms(wl); - if (ret < 0) - return ret; - } else { - ret = wl1271_cmd_general_parms(wl); - if (ret < 0) - return ret; + else ret = wl1271_cmd_radio_parms(wl); - if (ret < 0) - return ret; - ret = wl1271_cmd_ext_radio_parms(wl); - if (ret < 0) - return ret; - } - - /* Chip-specific init */ - ret = wl1271_chip_specific_init(wl); if (ret < 0) return ret; - /* Init templates */ - ret = wl1271_init_templates_config(wl); + /* Chip-specific init */ + ret = wl1271_chip_specific_init(wl); if (ret < 0) return ret; - ret = wl12xx_acx_mem_cfg(wl); - if (ret < 0) - return ret; + /* Mode specific init */ + if (is_ap) + ret = wl1271_ap_hw_init(wl); + else + ret = wl1271_sta_hw_init(wl); - /* Configure the FW logger */ - ret = wl12xx_init_fwlog(wl); if (ret < 0) return ret; @@ -695,6 +626,11 @@ int wl1271_hw_init(struct wl1271 *wl) if (ret < 0) goto out_free_memmap; + /* PHY layer config */ + ret = wl1271_init_phy_config(wl); + if (ret < 0) + goto out_free_memmap; + ret = wl1271_acx_dco_itrim_params(wl); if (ret < 0) goto out_free_memmap; @@ -719,20 +655,61 @@ int wl1271_hw_init(struct wl1271 *wl) if (ret < 0) goto out_free_memmap; + /* Default TID/AC configuration */ + BUG_ON(wl->conf.tx.tid_conf_count != wl->conf.tx.ac_conf_count); + for (i = 0; i < wl->conf.tx.tid_conf_count; i++) { + conf_ac = &wl->conf.tx.ac_conf[i]; + ret = wl1271_acx_ac_cfg(wl, conf_ac->ac, conf_ac->cw_min, + conf_ac->cw_max, conf_ac->aifsn, + conf_ac->tx_op_limit); + if (ret < 0) + goto out_free_memmap; + + conf_tid = &wl->conf.tx.tid_conf[i]; + ret = wl1271_acx_tid_cfg(wl, conf_tid->queue_id, + conf_tid->channel_type, + conf_tid->tsid, + conf_tid->ps_scheme, + conf_tid->ack_policy, + conf_tid->apsd_conf[0], + conf_tid->apsd_conf[1]); + if (ret < 0) + goto out_free_memmap; + } + /* Enable data path */ ret = wl1271_cmd_data_path(wl, 1); if (ret < 0) goto out_free_memmap; + /* Configure HW encryption */ + ret = wl1271_acx_feature_cfg(wl); + if (ret < 0) + goto out_free_memmap; + /* configure PM */ ret = wl1271_acx_pm_config(wl); if (ret < 0) goto out_free_memmap; + /* Mode specific init - post mem init */ + if (is_ap) + ret = wl1271_ap_hw_init_post_mem(wl); + else + ret = wl1271_sta_hw_init_post_mem(wl); + + if (ret < 0) + goto out_free_memmap; + ret = wl12xx_acx_set_rate_mgmt_params(wl); if (ret < 0) goto out_free_memmap; + /* Configure initiator BA sessions policies */ + ret = wl1271_set_ba_policies(wl); + if (ret < 0) + goto out_free_memmap; + /* configure hangover */ ret = wl12xx_acx_config_hangover(wl); if (ret < 0) diff --git a/trunk/drivers/net/wireless/wl12xx/init.h b/trunk/drivers/net/wireless/wl12xx/init.h index 2da0f404ef6e..3a3c230fd292 100644 --- a/trunk/drivers/net/wireless/wl12xx/init.h +++ b/trunk/drivers/net/wireless/wl12xx/init.h @@ -27,13 +27,13 @@ #include "wl12xx.h" int wl1271_hw_init_power_auth(struct wl1271 *wl); -int wl1271_init_templates_config(struct wl1271 *wl); +int wl1271_sta_init_templates_config(struct wl1271 *wl); +int wl1271_init_phy_config(struct wl1271 *wl); int wl1271_init_pta(struct wl1271 *wl); int wl1271_init_energy_detection(struct wl1271 *wl); int wl1271_chip_specific_init(struct wl1271 *wl); int wl1271_hw_init(struct wl1271 *wl); -int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif); -int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif); -int wl1271_ap_init_templates(struct wl1271 *wl, struct ieee80211_vif *vif); +int wl1271_init_ap_rates(struct wl1271 *wl); +int wl1271_ap_init_templates(struct wl1271 *wl); #endif diff --git a/trunk/drivers/net/wireless/wl12xx/io.c b/trunk/drivers/net/wireless/wl12xx/io.c index 079ad380e8ff..c2da66f45046 100644 --- a/trunk/drivers/net/wireless/wl12xx/io.c +++ b/trunk/drivers/net/wireless/wl12xx/io.c @@ -24,10 +24,8 @@ #include #include #include -#include #include "wl12xx.h" -#include "debug.h" #include "wl12xx_80211.h" #include "io.h" #include "tx.h" @@ -48,7 +46,7 @@ bool wl1271_set_block_size(struct wl1271 *wl) { if (wl->if_ops->set_block_size) { - wl->if_ops->set_block_size(wl->dev, WL12XX_BUS_BLOCK_SIZE); + wl->if_ops->set_block_size(wl, WL12XX_BUS_BLOCK_SIZE); return true; } @@ -57,12 +55,12 @@ bool wl1271_set_block_size(struct wl1271 *wl) void wl1271_disable_interrupts(struct wl1271 *wl) { - disable_irq(wl->irq); + wl->if_ops->disable_irq(wl); } void wl1271_enable_interrupts(struct wl1271 *wl) { - enable_irq(wl->irq); + wl->if_ops->enable_irq(wl); } /* Set the SPI partitions to access the chip addresses @@ -130,13 +128,13 @@ EXPORT_SYMBOL_GPL(wl1271_set_partition); void wl1271_io_reset(struct wl1271 *wl) { if (wl->if_ops->reset) - wl->if_ops->reset(wl->dev); + wl->if_ops->reset(wl); } void wl1271_io_init(struct wl1271 *wl) { if (wl->if_ops->init) - wl->if_ops->init(wl->dev); + wl->if_ops->init(wl); } void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val) diff --git a/trunk/drivers/net/wireless/wl12xx/io.h b/trunk/drivers/net/wireless/wl12xx/io.h index d398cbcea986..e839341dfafe 100644 --- a/trunk/drivers/net/wireless/wl12xx/io.h +++ b/trunk/drivers/net/wireless/wl12xx/io.h @@ -51,17 +51,23 @@ void wl1271_enable_interrupts(struct wl1271 *wl); void wl1271_io_reset(struct wl1271 *wl); void wl1271_io_init(struct wl1271 *wl); +static inline struct device *wl1271_wl_to_dev(struct wl1271 *wl) +{ + return wl->if_ops->dev(wl); +} + + /* Raw target IO, address is not translated */ static inline void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf, size_t len, bool fixed) { - wl->if_ops->write(wl->dev, addr, buf, len, fixed); + wl->if_ops->write(wl, addr, buf, len, fixed); } static inline void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf, size_t len, bool fixed) { - wl->if_ops->read(wl->dev, addr, buf, len, fixed); + wl->if_ops->read(wl, addr, buf, len, fixed); } static inline u32 wl1271_raw_read32(struct wl1271 *wl, int addr) @@ -149,13 +155,13 @@ static inline void wl1271_write32(struct wl1271 *wl, int addr, u32 val) static inline void wl1271_power_off(struct wl1271 *wl) { - wl->if_ops->power(wl->dev, false); + wl->if_ops->power(wl, false); clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags); } static inline int wl1271_power_on(struct wl1271 *wl) { - int ret = wl->if_ops->power(wl->dev, true); + int ret = wl->if_ops->power(wl, true); if (ret == 0) set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags); @@ -170,10 +176,15 @@ u16 wl1271_top_reg_read(struct wl1271 *wl, int addr); int wl1271_set_partition(struct wl1271 *wl, struct wl1271_partition_set *p); -bool wl1271_set_block_size(struct wl1271 *wl); - /* Functions from wl1271_main.c */ +int wl1271_register_hw(struct wl1271 *wl); +void wl1271_unregister_hw(struct wl1271 *wl); +int wl1271_init_ieee80211(struct wl1271 *wl); +struct ieee80211_hw *wl1271_alloc_hw(void); +int wl1271_free_hw(struct wl1271 *wl); +irqreturn_t wl1271_irq(int irq, void *data); +bool wl1271_set_block_size(struct wl1271 *wl); int wl1271_tx_dummy_packet(struct wl1271 *wl); #endif diff --git a/trunk/drivers/net/wireless/wl12xx/main.c b/trunk/drivers/net/wireless/wl12xx/main.c index d5f55a149de5..884f82b63219 100644 --- a/trunk/drivers/net/wireless/wl12xx/main.c +++ b/trunk/drivers/net/wireless/wl12xx/main.c @@ -32,10 +32,8 @@ #include #include #include -#include #include "wl12xx.h" -#include "debug.h" #include "wl12xx_80211.h" #include "reg.h" #include "io.h" @@ -379,30 +377,42 @@ static char *fwlog_param; static bool bug_on_recovery; static void __wl1271_op_remove_interface(struct wl1271 *wl, - struct ieee80211_vif *vif, bool reset_tx_queues); -static void wl1271_op_stop(struct ieee80211_hw *hw); -static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif); +static void wl1271_free_ap_keys(struct wl1271 *wl); + + +static void wl1271_device_release(struct device *dev) +{ + +} + +static struct platform_device wl1271_device = { + .name = "wl1271", + .id = -1, + + /* device model insists to have a release function */ + .dev = { + .release = wl1271_device_release, + }, +}; static DEFINE_MUTEX(wl_list_mutex); static LIST_HEAD(wl_list); -static int wl1271_check_operstate(struct wl1271 *wl, struct wl12xx_vif *wlvif, - unsigned char operstate) +static int wl1271_check_operstate(struct wl1271 *wl, unsigned char operstate) { int ret; - if (operstate != IF_OPER_UP) return 0; - if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags)) + if (test_and_set_bit(WL1271_FLAG_STA_STATE_SENT, &wl->flags)) return 0; - ret = wl12xx_cmd_set_peer_state(wl, wlvif->sta.hlid); + ret = wl12xx_cmd_set_peer_state(wl, wl->sta_hlid); if (ret < 0) return ret; - wl12xx_croc(wl, wlvif->role_id); + wl12xx_croc(wl, wl->role_id); wl1271_info("Association completed."); return 0; @@ -416,7 +426,6 @@ static int wl1271_dev_notify(struct notifier_block *me, unsigned long what, struct ieee80211_hw *hw; struct wl1271 *wl; struct wl1271 *wl_temp; - struct wl12xx_vif *wlvif; int ret = 0; /* Check that this notification is for us. */ @@ -450,28 +459,17 @@ static int wl1271_dev_notify(struct notifier_block *me, unsigned long what, if (wl->state == WL1271_STATE_OFF) goto out; - if (dev->operstate != IF_OPER_UP) + if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) goto out; - /* - * The correct behavior should be just getting the appropriate wlvif - * from the given dev, but currently we don't have a mac80211 - * interface for it. - */ - wl12xx_for_each_wlvif_sta(wl, wlvif) { - struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); - if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) - continue; + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) - goto out; + wl1271_check_operstate(wl, dev->operstate); - wl1271_check_operstate(wl, wlvif, - ieee80211_get_operstate(vif)); + wl1271_ps_elp_sleep(wl); - wl1271_ps_elp_sleep(wl); - } out: mutex_unlock(&wl->mutex); @@ -500,20 +498,19 @@ static int wl1271_reg_notify(struct wiphy *wiphy, return 0; } -static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif, - bool enable) +static int wl1271_set_rx_streaming(struct wl1271 *wl, bool enable) { int ret = 0; /* we should hold wl->mutex */ - ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable); + ret = wl1271_acx_ps_rx_streaming(wl, enable); if (ret < 0) goto out; if (enable) - set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags); + set_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags); else - clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags); + clear_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags); out: return ret; } @@ -522,25 +519,25 @@ static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif, * this function is being called when the rx_streaming interval * has beed changed or rx_streaming should be disabled */ -int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif) +int wl1271_recalc_rx_streaming(struct wl1271 *wl) { int ret = 0; int period = wl->conf.rx_streaming.interval; /* don't reconfigure if rx_streaming is disabled */ - if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags)) + if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags)) goto out; /* reconfigure/disable according to new streaming_period */ if (period && - test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) && + test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) && (wl->conf.rx_streaming.always || test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) - ret = wl1271_set_rx_streaming(wl, wlvif, true); + ret = wl1271_set_rx_streaming(wl, true); else { - ret = wl1271_set_rx_streaming(wl, wlvif, false); + ret = wl1271_set_rx_streaming(wl, false); /* don't cancel_work_sync since we might deadlock */ - del_timer_sync(&wlvif->rx_streaming_timer); + del_timer_sync(&wl->rx_streaming_timer); } out: return ret; @@ -549,14 +546,13 @@ int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif) static void wl1271_rx_streaming_enable_work(struct work_struct *work) { int ret; - struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif, - rx_streaming_enable_work); - struct wl1271 *wl = wlvif->wl; + struct wl1271 *wl = + container_of(work, struct wl1271, rx_streaming_enable_work); mutex_lock(&wl->mutex); - if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) || - !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) || + if (test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags) || + !test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) || (!wl->conf.rx_streaming.always && !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) goto out; @@ -568,12 +564,12 @@ static void wl1271_rx_streaming_enable_work(struct work_struct *work) if (ret < 0) goto out; - ret = wl1271_set_rx_streaming(wl, wlvif, true); + ret = wl1271_set_rx_streaming(wl, true); if (ret < 0) goto out_sleep; /* stop it after some time of inactivity */ - mod_timer(&wlvif->rx_streaming_timer, + mod_timer(&wl->rx_streaming_timer, jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration)); out_sleep: @@ -585,20 +581,19 @@ static void wl1271_rx_streaming_enable_work(struct work_struct *work) static void wl1271_rx_streaming_disable_work(struct work_struct *work) { int ret; - struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif, - rx_streaming_disable_work); - struct wl1271 *wl = wlvif->wl; + struct wl1271 *wl = + container_of(work, struct wl1271, rx_streaming_disable_work); mutex_lock(&wl->mutex); - if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags)) + if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags)) goto out; ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; - ret = wl1271_set_rx_streaming(wl, wlvif, false); + ret = wl1271_set_rx_streaming(wl, false); if (ret) goto out_sleep; @@ -610,9 +605,8 @@ static void wl1271_rx_streaming_disable_work(struct work_struct *work) static void wl1271_rx_streaming_timer(unsigned long data) { - struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data; - struct wl1271 *wl = wlvif->wl; - ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work); + struct wl1271 *wl = (struct wl1271 *)data; + ieee80211_queue_work(wl->hw, &wl->rx_streaming_disable_work); } static void wl1271_conf_init(struct wl1271 *wl) @@ -651,7 +645,9 @@ static void wl1271_conf_init(struct wl1271 *wl) static int wl1271_plt_init(struct wl1271 *wl) { - int ret; + struct conf_tx_ac_category *conf_ac; + struct conf_tx_tid *conf_tid; + int ret, i; if (wl->chip.id == CHIP_ID_1283_PG20) ret = wl128x_cmd_general_parms(wl); @@ -680,14 +676,74 @@ static int wl1271_plt_init(struct wl1271 *wl) if (ret < 0) return ret; + ret = wl1271_sta_init_templates_config(wl); + if (ret < 0) + return ret; + ret = wl1271_acx_init_mem_config(wl); if (ret < 0) return ret; + /* PHY layer config */ + ret = wl1271_init_phy_config(wl); + if (ret < 0) + goto out_free_memmap; + + ret = wl1271_acx_dco_itrim_params(wl); + if (ret < 0) + goto out_free_memmap; + + /* Initialize connection monitoring thresholds */ + ret = wl1271_acx_conn_monit_params(wl, false); + if (ret < 0) + goto out_free_memmap; + + /* Bluetooth WLAN coexistence */ + ret = wl1271_init_pta(wl); + if (ret < 0) + goto out_free_memmap; + + /* FM WLAN coexistence */ + ret = wl1271_acx_fm_coex(wl); + if (ret < 0) + goto out_free_memmap; + + /* Energy detection */ + ret = wl1271_init_energy_detection(wl); + if (ret < 0) + goto out_free_memmap; + ret = wl12xx_acx_mem_cfg(wl); if (ret < 0) goto out_free_memmap; + /* Default fragmentation threshold */ + ret = wl1271_acx_frag_threshold(wl, wl->conf.tx.frag_threshold); + if (ret < 0) + goto out_free_memmap; + + /* Default TID/AC configuration */ + BUG_ON(wl->conf.tx.tid_conf_count != wl->conf.tx.ac_conf_count); + for (i = 0; i < wl->conf.tx.tid_conf_count; i++) { + conf_ac = &wl->conf.tx.ac_conf[i]; + ret = wl1271_acx_ac_cfg(wl, conf_ac->ac, conf_ac->cw_min, + conf_ac->cw_max, conf_ac->aifsn, + conf_ac->tx_op_limit); + if (ret < 0) + goto out_free_memmap; + + conf_tid = &wl->conf.tx.tid_conf[i]; + ret = wl1271_acx_tid_cfg(wl, conf_tid->queue_id, + conf_tid->channel_type, + conf_tid->tsid, + conf_tid->ps_scheme, + conf_tid->ack_policy, + conf_tid->apsd_conf[0], + conf_tid->apsd_conf[1]); + if (ret < 0) + goto out_free_memmap; + } + /* Enable data path */ ret = wl1271_cmd_data_path(wl, 1); if (ret < 0) @@ -712,12 +768,14 @@ static int wl1271_plt_init(struct wl1271 *wl) return ret; } -static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, - struct wl12xx_vif *wlvif, - u8 hlid, u8 tx_pkts) +static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_pkts) { bool fw_ps, single_sta; + /* only regulate station links */ + if (hlid < WL1271_AP_STA_HLID_START) + return; + fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map); single_sta = (wl->active_sta_count == 1); @@ -726,7 +784,7 @@ static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, * packets in FW or if the STA is awake. */ if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS) - wl12xx_ps_link_end(wl, wlvif, hlid); + wl1271_ps_link_end(wl, hlid); /* * Start high-level PS if the STA is asleep with enough blocks in FW. @@ -734,14 +792,24 @@ static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, * case FW-memory congestion is not a problem. */ else if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS) - wl12xx_ps_link_start(wl, wlvif, hlid, true); + wl1271_ps_link_start(wl, hlid, true); +} + +bool wl1271_is_active_sta(struct wl1271 *wl, u8 hlid) +{ + int id; + + /* global/broadcast "stations" are always active */ + if (hlid < WL1271_AP_STA_HLID_START) + return true; + + id = hlid - WL1271_AP_STA_HLID_START; + return test_bit(id, wl->ap_hlid_map); } static void wl12xx_irq_update_links_status(struct wl1271 *wl, - struct wl12xx_vif *wlvif, struct wl12xx_fw_status *status) { - struct wl1271_link *lnk; u32 cur_fw_ps_map; u8 hlid, cnt; @@ -757,22 +825,25 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl, wl->ap_fw_ps_map = cur_fw_ps_map; } - for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS) { - lnk = &wl->links[hlid]; - cnt = status->tx_lnk_free_pkts[hlid] - lnk->prev_freed_pkts; + for (hlid = WL1271_AP_STA_HLID_START; hlid < AP_MAX_LINKS; hlid++) { + if (!wl1271_is_active_sta(wl, hlid)) + continue; + + cnt = status->tx_lnk_free_pkts[hlid] - + wl->links[hlid].prev_freed_pkts; - lnk->prev_freed_pkts = status->tx_lnk_free_pkts[hlid]; - lnk->allocated_pkts -= cnt; + wl->links[hlid].prev_freed_pkts = + status->tx_lnk_free_pkts[hlid]; + wl->links[hlid].allocated_pkts -= cnt; - wl12xx_irq_ps_regulate_link(wl, wlvif, hlid, - lnk->allocated_pkts); + wl12xx_irq_ps_regulate_link(wl, hlid, + wl->links[hlid].allocated_pkts); } } static void wl12xx_fw_status(struct wl1271 *wl, struct wl12xx_fw_status *status) { - struct wl12xx_vif *wlvif; struct timespec ts; u32 old_tx_blk_count = wl->tx_blocks_available; int avail, freed_blocks; @@ -827,9 +898,8 @@ static void wl12xx_fw_status(struct wl1271 *wl, clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); /* for AP update num of allocated TX blocks per link and ps status */ - wl12xx_for_each_wlvif_ap(wl, wlvif) { - wl12xx_irq_update_links_status(wl, wlvif, status); - } + if (wl->bss_type == BSS_TYPE_AP_BSS) + wl12xx_irq_update_links_status(wl, status); /* update the host-chipset time offset */ getnstimeofday(&ts); @@ -862,7 +932,7 @@ static void wl1271_netstack_work(struct work_struct *work) #define WL1271_IRQ_MAX_LOOPS 256 -static irqreturn_t wl1271_irq(int irq, void *cookie) +irqreturn_t wl1271_irq(int irq, void *cookie) { int ret; u32 intr; @@ -984,6 +1054,7 @@ static irqreturn_t wl1271_irq(int irq, void *cookie) return IRQ_HANDLED; } +EXPORT_SYMBOL_GPL(wl1271_irq); static int wl1271_fetch_firmware(struct wl1271 *wl) { @@ -998,10 +1069,10 @@ static int wl1271_fetch_firmware(struct wl1271 *wl) wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name); - ret = request_firmware(&fw, fw_name, wl->dev); + ret = request_firmware(&fw, fw_name, wl1271_wl_to_dev(wl)); if (ret < 0) { - wl1271_error("could not get firmware %s: %d", fw_name, ret); + wl1271_error("could not get firmware: %d", ret); return ret; } @@ -1036,11 +1107,10 @@ static int wl1271_fetch_nvs(struct wl1271 *wl) const struct firmware *fw; int ret; - ret = request_firmware(&fw, WL12XX_NVS_NAME, wl->dev); + ret = request_firmware(&fw, WL12XX_NVS_NAME, wl1271_wl_to_dev(wl)); if (ret < 0) { - wl1271_error("could not get nvs file %s: %d", WL12XX_NVS_NAME, - ret); + wl1271_error("could not get nvs file: %d", ret); return ret; } @@ -1147,13 +1217,11 @@ static void wl1271_recovery_work(struct work_struct *work) { struct wl1271 *wl = container_of(work, struct wl1271, recovery_work); - struct wl12xx_vif *wlvif; - struct ieee80211_vif *vif; mutex_lock(&wl->mutex); if (wl->state != WL1271_STATE_ON) - goto out_unlock; + goto out; /* Avoid a recursive recovery */ set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags); @@ -1170,12 +1238,9 @@ static void wl1271_recovery_work(struct work_struct *work) * in the firmware during recovery. This doens't hurt if the network is * not encrypted. */ - wl12xx_for_each_wlvif(wl, wlvif) { - if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) || - test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) - wlvif->tx_security_seq += - WL1271_TX_SQN_POST_RECOVERY_PADDING; - } + if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) || + test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) + wl->tx_security_seq += WL1271_TX_SQN_POST_RECOVERY_PADDING; /* Prevent spurious TX during FW restart */ ieee80211_stop_queues(wl->hw); @@ -1186,14 +1251,7 @@ static void wl1271_recovery_work(struct work_struct *work) } /* reboot the chipset */ - while (!list_empty(&wl->wlvif_list)) { - wlvif = list_first_entry(&wl->wlvif_list, - struct wl12xx_vif, list); - vif = wl12xx_wlvif_to_vif(wlvif); - __wl1271_op_remove_interface(wl, vif, false); - } - mutex_unlock(&wl->mutex); - wl1271_op_stop(wl->hw); + __wl1271_op_remove_interface(wl, false); clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags); @@ -1204,8 +1262,8 @@ static void wl1271_recovery_work(struct work_struct *work) * to restart the HW. */ ieee80211_wake_queues(wl->hw); - return; -out_unlock: + +out: mutex_unlock(&wl->mutex); } @@ -1260,16 +1318,7 @@ static int wl1271_chip_wakeup(struct wl1271 *wl) /* 0. read chip id from CHIP_ID */ wl->chip.id = wl1271_read32(wl, CHIP_ID_B); - /* - * For wl127x based devices we could use the default block - * size (512 bytes), but due to a bug in the sdio driver, we - * need to set it explicitly after the chip is powered on. To - * simplify the code and since the performance impact is - * negligible, we use the same block size for all different - * chip types. - */ - if (!wl1271_set_block_size(wl)) - wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT; + /* 1. check if chip id is valid */ switch (wl->chip.id) { case CHIP_ID_1271_PG10: @@ -1279,9 +1328,7 @@ static int wl1271_chip_wakeup(struct wl1271 *wl) ret = wl1271_setup(wl); if (ret < 0) goto out; - wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT; break; - case CHIP_ID_1271_PG20: wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)", wl->chip.id); @@ -1289,9 +1336,7 @@ static int wl1271_chip_wakeup(struct wl1271 *wl) ret = wl1271_setup(wl); if (ret < 0) goto out; - wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT; break; - case CHIP_ID_1283_PG20: wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1283 PG20)", wl->chip.id); @@ -1299,6 +1344,9 @@ static int wl1271_chip_wakeup(struct wl1271 *wl) ret = wl1271_setup(wl); if (ret < 0) goto out; + + if (wl1271_set_block_size(wl)) + wl->quirks |= WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT; break; case CHIP_ID_1283_PG10: default: @@ -1341,6 +1389,8 @@ int wl1271_plt_start(struct wl1271 *wl) goto out; } + wl->bss_type = BSS_TYPE_STA_BSS; + while (retries) { retries--; ret = wl1271_chip_wakeup(wl); @@ -1432,33 +1482,32 @@ int wl1271_plt_stop(struct wl1271 *wl) static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { struct wl1271 *wl = hw->priv; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_vif *vif = info->control.vif; - struct wl12xx_vif *wlvif = NULL; unsigned long flags; int q, mapping; - u8 hlid; - - if (vif) - wlvif = wl12xx_vif_to_data(vif); + u8 hlid = 0; mapping = skb_get_queue_mapping(skb); q = wl1271_tx_get_queue(mapping); - hlid = wl12xx_tx_get_hlid(wl, wlvif, skb); + if (wl->bss_type == BSS_TYPE_AP_BSS) + hlid = wl12xx_tx_get_hlid_ap(wl, skb); spin_lock_irqsave(&wl->wl_lock, flags); /* queue the packet */ - if (hlid == WL12XX_INVALID_LINK_ID || - (wlvif && !test_bit(hlid, wlvif->links_map))) { - wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q); - ieee80211_free_txskb(hw, skb); - goto out; - } + if (wl->bss_type == BSS_TYPE_AP_BSS) { + if (!wl1271_is_active_sta(wl, hlid)) { + wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", + hlid, q); + dev_kfree_skb(skb); + goto out; + } - wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q); - skb_queue_tail(&wl->links[hlid].tx_queue[q], skb); + wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q); + skb_queue_tail(&wl->links[hlid].tx_queue[q], skb); + } else { + skb_queue_tail(&wl->tx_queue[q], skb); + } wl->tx_queue_count[q]++; @@ -1560,14 +1609,13 @@ static struct notifier_block wl1271_dev_notifier = { }; #ifdef CONFIG_PM -static int wl1271_configure_suspend_sta(struct wl1271 *wl, - struct wl12xx_vif *wlvif) +static int wl1271_configure_suspend_sta(struct wl1271 *wl) { int ret = 0; mutex_lock(&wl->mutex); - if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) + if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) goto out_unlock; ret = wl1271_ps_elp_wakeup(wl); @@ -1575,12 +1623,12 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl, goto out_unlock; /* enter psm if needed*/ - if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) { + if (!test_bit(WL1271_FLAG_PSM, &wl->flags)) { DECLARE_COMPLETION_ONSTACK(compl); - wlvif->ps_compl = &compl; - ret = wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE, - wlvif->basic_rate, true); + wl->ps_compl = &compl; + ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE, + wl->basic_rate, true); if (ret < 0) goto out_sleep; @@ -1590,43 +1638,42 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl, ret = wait_for_completion_timeout( &compl, msecs_to_jiffies(WL1271_PS_COMPLETE_TIMEOUT)); - - mutex_lock(&wl->mutex); if (ret <= 0) { wl1271_warning("couldn't enter ps mode!"); ret = -EBUSY; - goto out_cleanup; + goto out; } + /* take mutex again, and wakeup */ + mutex_lock(&wl->mutex); + ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) - goto out_cleanup; + goto out_unlock; } out_sleep: wl1271_ps_elp_sleep(wl); -out_cleanup: - wlvif->ps_compl = NULL; out_unlock: mutex_unlock(&wl->mutex); +out: return ret; } -static int wl1271_configure_suspend_ap(struct wl1271 *wl, - struct wl12xx_vif *wlvif) +static int wl1271_configure_suspend_ap(struct wl1271 *wl) { int ret = 0; mutex_lock(&wl->mutex); - if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) + if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) goto out_unlock; ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out_unlock; - ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true); + ret = wl1271_acx_beacon_filter_opt(wl, true); wl1271_ps_elp_sleep(wl); out_unlock: @@ -1635,22 +1682,20 @@ static int wl1271_configure_suspend_ap(struct wl1271 *wl, } -static int wl1271_configure_suspend(struct wl1271 *wl, - struct wl12xx_vif *wlvif) +static int wl1271_configure_suspend(struct wl1271 *wl) { - if (wlvif->bss_type == BSS_TYPE_STA_BSS) - return wl1271_configure_suspend_sta(wl, wlvif); - if (wlvif->bss_type == BSS_TYPE_AP_BSS) - return wl1271_configure_suspend_ap(wl, wlvif); + if (wl->bss_type == BSS_TYPE_STA_BSS) + return wl1271_configure_suspend_sta(wl); + if (wl->bss_type == BSS_TYPE_AP_BSS) + return wl1271_configure_suspend_ap(wl); return 0; } -static void wl1271_configure_resume(struct wl1271 *wl, - struct wl12xx_vif *wlvif) +static void wl1271_configure_resume(struct wl1271 *wl) { int ret; - bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS; - bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS; + bool is_sta = wl->bss_type == BSS_TYPE_STA_BSS; + bool is_ap = wl->bss_type == BSS_TYPE_AP_BSS; if (!is_sta && !is_ap) return; @@ -1662,11 +1707,11 @@ static void wl1271_configure_resume(struct wl1271 *wl, if (is_sta) { /* exit psm if it wasn't configured */ - if (!test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags)) - wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE, - wlvif->basic_rate, true); + if (!test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) + wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE, + wl->basic_rate, true); } else if (is_ap) { - wl1271_acx_beacon_filter_opt(wl, wlvif, false); + wl1271_acx_beacon_filter_opt(wl, false); } wl1271_ps_elp_sleep(wl); @@ -1678,19 +1723,16 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wow) { struct wl1271 *wl = hw->priv; - struct wl12xx_vif *wlvif; int ret; wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow); WARN_ON(!wow || !wow->any); wl->wow_enabled = true; - wl12xx_for_each_wlvif(wl, wlvif) { - ret = wl1271_configure_suspend(wl, wlvif); - if (ret < 0) { - wl1271_warning("couldn't prepare device to suspend"); - return ret; - } + ret = wl1271_configure_suspend(wl); + if (ret < 0) { + wl1271_warning("couldn't prepare device to suspend"); + return ret; } /* flush any remaining work */ wl1271_debug(DEBUG_MAC80211, "flushing remaining works"); @@ -1709,9 +1751,7 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw, wl1271_enable_interrupts(wl); flush_work(&wl->tx_work); - wl12xx_for_each_wlvif(wl, wlvif) { - flush_delayed_work(&wlvif->pspoll_work); - } + flush_delayed_work(&wl->pspoll_work); flush_delayed_work(&wl->elp_work); return 0; @@ -1720,7 +1760,6 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw, static int wl1271_op_resume(struct ieee80211_hw *hw) { struct wl1271 *wl = hw->priv; - struct wl12xx_vif *wlvif; unsigned long flags; bool run_irq_work = false; @@ -1744,9 +1783,7 @@ static int wl1271_op_resume(struct ieee80211_hw *hw) wl1271_irq(0, wl); wl1271_enable_interrupts(wl); } - wl12xx_for_each_wlvif(wl, wlvif) { - wl1271_configure_resume(wl, wlvif); - } + wl1271_configure_resume(wl); wl->wow_enabled = false; return 0; @@ -1773,119 +1810,20 @@ static int wl1271_op_start(struct ieee80211_hw *hw) static void wl1271_op_stop(struct ieee80211_hw *hw) { - struct wl1271 *wl = hw->priv; - int i; - wl1271_debug(DEBUG_MAC80211, "mac80211 stop"); - - mutex_lock(&wl->mutex); - if (wl->state == WL1271_STATE_OFF) { - mutex_unlock(&wl->mutex); - return; - } - /* - * this must be before the cancel_work calls below, so that the work - * functions don't perform further work. - */ - wl->state = WL1271_STATE_OFF; - mutex_unlock(&wl->mutex); - - mutex_lock(&wl_list_mutex); - list_del(&wl->list); - mutex_unlock(&wl_list_mutex); - - wl1271_disable_interrupts(wl); - wl1271_flush_deferred_work(wl); - cancel_delayed_work_sync(&wl->scan_complete_work); - cancel_work_sync(&wl->netstack_work); - cancel_work_sync(&wl->tx_work); - cancel_delayed_work_sync(&wl->elp_work); - - /* let's notify MAC80211 about the remaining pending TX frames */ - wl12xx_tx_reset(wl, true); - mutex_lock(&wl->mutex); - - wl1271_power_off(wl); - - wl->band = IEEE80211_BAND_2GHZ; - - wl->rx_counter = 0; - wl->power_level = WL1271_DEFAULT_POWER_LEVEL; - wl->tx_blocks_available = 0; - wl->tx_allocated_blocks = 0; - wl->tx_results_count = 0; - wl->tx_packets_count = 0; - wl->time_offset = 0; - wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT; - wl->ap_fw_ps_map = 0; - wl->ap_ps_map = 0; - wl->sched_scanning = false; - memset(wl->roles_map, 0, sizeof(wl->roles_map)); - memset(wl->links_map, 0, sizeof(wl->links_map)); - memset(wl->roc_map, 0, sizeof(wl->roc_map)); - wl->active_sta_count = 0; - - /* The system link is always allocated */ - __set_bit(WL12XX_SYSTEM_HLID, wl->links_map); - - /* - * this is performed after the cancel_work calls and the associated - * mutex_lock, so that wl1271_op_add_interface does not accidentally - * get executed before all these vars have been reset. - */ - wl->flags = 0; - - wl->tx_blocks_freed = 0; - - for (i = 0; i < NUM_TX_QUEUES; i++) { - wl->tx_pkts_freed[i] = 0; - wl->tx_allocated_pkts[i] = 0; - } - - wl1271_debugfs_reset(wl); - - kfree(wl->fw_status); - wl->fw_status = NULL; - kfree(wl->tx_res_if); - wl->tx_res_if = NULL; - kfree(wl->target_mem_map); - wl->target_mem_map = NULL; - - mutex_unlock(&wl->mutex); -} - -static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx) -{ - u8 policy = find_first_zero_bit(wl->rate_policies_map, - WL12XX_MAX_RATE_POLICIES); - if (policy >= WL12XX_MAX_RATE_POLICIES) - return -EBUSY; - - __set_bit(policy, wl->rate_policies_map); - *idx = policy; - return 0; -} - -static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx) -{ - if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES)) - return; - - __clear_bit(*idx, wl->rate_policies_map); - *idx = WL12XX_MAX_RATE_POLICIES; } -static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif) +static u8 wl12xx_get_role_type(struct wl1271 *wl) { - switch (wlvif->bss_type) { + switch (wl->bss_type) { case BSS_TYPE_AP_BSS: - if (wlvif->p2p) + if (wl->p2p) return WL1271_ROLE_P2P_GO; else return WL1271_ROLE_AP; case BSS_TYPE_STA_BSS: - if (wlvif->p2p) + if (wl->p2p) return WL1271_ROLE_P2P_CL; else return WL1271_ROLE_STA; @@ -1894,95 +1832,78 @@ static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif) return WL1271_ROLE_IBSS; default: - wl1271_error("invalid bss_type: %d", wlvif->bss_type); + wl1271_error("invalid bss_type: %d", wl->bss_type); } return WL12XX_INVALID_ROLE_TYPE; } -static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif) +static int wl1271_op_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { - struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); - int i; + struct wl1271 *wl = hw->priv; + struct wiphy *wiphy = hw->wiphy; + int retries = WL1271_BOOT_RETRIES; + int ret = 0; + u8 role_type; + bool booted = false; - /* clear everything but the persistent data */ - memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent)); + wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM", + ieee80211_vif_type_p2p(vif), vif->addr); + + mutex_lock(&wl->mutex); + if (wl->vif) { + wl1271_debug(DEBUG_MAC80211, + "multiple vifs are not supported yet"); + ret = -EBUSY; + goto out; + } + + /* + * in some very corner case HW recovery scenarios its possible to + * get here before __wl1271_op_remove_interface is complete, so + * opt out if that is the case. + */ + if (test_bit(WL1271_FLAG_IF_INITIALIZED, &wl->flags)) { + ret = -EBUSY; + goto out; + } switch (ieee80211_vif_type_p2p(vif)) { case NL80211_IFTYPE_P2P_CLIENT: - wlvif->p2p = 1; + wl->p2p = 1; /* fall-through */ case NL80211_IFTYPE_STATION: - wlvif->bss_type = BSS_TYPE_STA_BSS; + wl->bss_type = BSS_TYPE_STA_BSS; + wl->set_bss_type = BSS_TYPE_STA_BSS; break; case NL80211_IFTYPE_ADHOC: - wlvif->bss_type = BSS_TYPE_IBSS; + wl->bss_type = BSS_TYPE_IBSS; + wl->set_bss_type = BSS_TYPE_STA_BSS; break; case NL80211_IFTYPE_P2P_GO: - wlvif->p2p = 1; + wl->p2p = 1; /* fall-through */ case NL80211_IFTYPE_AP: - wlvif->bss_type = BSS_TYPE_AP_BSS; + wl->bss_type = BSS_TYPE_AP_BSS; break; default: - wlvif->bss_type = MAX_BSS_TYPE; - return -EOPNOTSUPP; + ret = -EOPNOTSUPP; + goto out; } - wlvif->role_id = WL12XX_INVALID_ROLE_ID; - wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID; - wlvif->dev_hlid = WL12XX_INVALID_LINK_ID; - - if (wlvif->bss_type == BSS_TYPE_STA_BSS || - wlvif->bss_type == BSS_TYPE_IBSS) { - /* init sta/ibss data */ - wlvif->sta.hlid = WL12XX_INVALID_LINK_ID; - wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx); - wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx); - wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx); - } else { - /* init ap data */ - wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID; - wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID; - wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx); - wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx); - for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++) - wl12xx_allocate_rate_policy(wl, - &wlvif->ap.ucast_rate_idx[i]); + role_type = wl12xx_get_role_type(wl); + if (role_type == WL12XX_INVALID_ROLE_TYPE) { + ret = -EINVAL; + goto out; } + memcpy(wl->mac_addr, vif->addr, ETH_ALEN); - wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate; - wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5; - wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC; - wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC; - wlvif->rate_set = CONF_TX_RATE_MASK_BASIC; - wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT; - - /* - * mac80211 configures some values globally, while we treat them - * per-interface. thus, on init, we have to copy them from wl - */ - wlvif->band = wl->band; - wlvif->channel = wl->channel; - wlvif->power_level = wl->power_level; - - INIT_WORK(&wlvif->rx_streaming_enable_work, - wl1271_rx_streaming_enable_work); - INIT_WORK(&wlvif->rx_streaming_disable_work, - wl1271_rx_streaming_disable_work); - INIT_DELAYED_WORK(&wlvif->pspoll_work, wl1271_pspoll_work); - INIT_LIST_HEAD(&wlvif->list); - - setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, - (unsigned long) wlvif); - return 0; -} - -static bool wl12xx_init_fw(struct wl1271 *wl) -{ - int retries = WL1271_BOOT_RETRIES; - bool booted = false; - struct wiphy *wiphy = wl->hw->wiphy; - int ret; + if (wl->state != WL1271_STATE_OFF) { + wl1271_error("cannot start because not in off state: %d", + wl->state); + ret = -EBUSY; + goto out; + } while (retries) { retries--; @@ -1994,6 +1915,25 @@ static bool wl12xx_init_fw(struct wl1271 *wl) if (ret < 0) goto power_off; + if (wl->bss_type == BSS_TYPE_STA_BSS || + wl->bss_type == BSS_TYPE_IBSS) { + /* + * The device role is a special role used for + * rx and tx frames prior to association (as + * the STA role can get packets only from + * its associated bssid) + */ + ret = wl12xx_cmd_role_enable(wl, + WL1271_ROLE_DEVICE, + &wl->dev_role_id); + if (ret < 0) + goto irq_disable; + } + + ret = wl12xx_cmd_role_enable(wl, role_type, &wl->role_id); + if (ret < 0) + goto irq_disable; + ret = wl1271_hw_init(wl); if (ret < 0) goto irq_disable; @@ -2024,6 +1964,9 @@ static bool wl12xx_init_fw(struct wl1271 *wl) goto out; } + wl->vif = vif; + wl->state = WL1271_STATE_ON; + set_bit(WL1271_FLAG_IF_INITIALIZED, &wl->flags); wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str); /* update hw/fw version info in wiphy struct */ @@ -2041,115 +1984,7 @@ static bool wl12xx_init_fw(struct wl1271 *wl) wl1271_debug(DEBUG_MAC80211, "11a is %ssupported", wl->enable_11a ? "" : "not "); - wl->state = WL1271_STATE_ON; out: - return booted; -} - -static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif) -{ - return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID; -} - -static int wl1271_op_add_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) -{ - struct wl1271 *wl = hw->priv; - struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); - int ret = 0; - u8 role_type; - bool booted = false; - - wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM", - ieee80211_vif_type_p2p(vif), vif->addr); - - mutex_lock(&wl->mutex); - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) - goto out_unlock; - - if (wl->vif) { - wl1271_debug(DEBUG_MAC80211, - "multiple vifs are not supported yet"); - ret = -EBUSY; - goto out; - } - - /* - * in some very corner case HW recovery scenarios its possible to - * get here before __wl1271_op_remove_interface is complete, so - * opt out if that is the case. - */ - if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) || - test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) { - ret = -EBUSY; - goto out; - } - - ret = wl12xx_init_vif_data(wl, vif); - if (ret < 0) - goto out; - - wlvif->wl = wl; - role_type = wl12xx_get_role_type(wl, wlvif); - if (role_type == WL12XX_INVALID_ROLE_TYPE) { - ret = -EINVAL; - goto out; - } - - /* - * TODO: after the nvs issue will be solved, move this block - * to start(), and make sure here the driver is ON. - */ - if (wl->state == WL1271_STATE_OFF) { - /* - * we still need this in order to configure the fw - * while uploading the nvs - */ - memcpy(wl->mac_addr, vif->addr, ETH_ALEN); - - booted = wl12xx_init_fw(wl); - if (!booted) { - ret = -EINVAL; - goto out; - } - } - - if (wlvif->bss_type == BSS_TYPE_STA_BSS || - wlvif->bss_type == BSS_TYPE_IBSS) { - /* - * The device role is a special role used for - * rx and tx frames prior to association (as - * the STA role can get packets only from - * its associated bssid) - */ - ret = wl12xx_cmd_role_enable(wl, vif->addr, - WL1271_ROLE_DEVICE, - &wlvif->dev_role_id); - if (ret < 0) - goto out; - } - - ret = wl12xx_cmd_role_enable(wl, vif->addr, - role_type, &wlvif->role_id); - if (ret < 0) - goto out; - - ret = wl1271_init_vif_specific(wl, vif); - if (ret < 0) - goto out; - - wl->vif = vif; - list_add(&wlvif->list, &wl->wlvif_list); - set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags); - - if (wlvif->bss_type == BSS_TYPE_AP_BSS) - wl->ap_count++; - else - wl->sta_count++; -out: - wl1271_ps_elp_sleep(wl); -out_unlock: mutex_unlock(&wl->mutex); mutex_lock(&wl_list_mutex); @@ -2161,34 +1996,29 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw, } static void __wl1271_op_remove_interface(struct wl1271 *wl, - struct ieee80211_vif *vif, bool reset_tx_queues) { - struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); - int i, ret; + int ret, i; wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface"); - if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) - return; - - wl->vif = NULL; - /* because of hardware recovery, we may get here twice */ if (wl->state != WL1271_STATE_ON) return; wl1271_info("down"); + mutex_lock(&wl_list_mutex); + list_del(&wl->list); + mutex_unlock(&wl_list_mutex); + /* enable dyn ps just in case (if left on due to fw crash etc) */ - if (wlvif->bss_type == BSS_TYPE_STA_BSS) - ieee80211_enable_dyn_ps(vif); + if (wl->bss_type == BSS_TYPE_STA_BSS) + ieee80211_enable_dyn_ps(wl->vif); - if (wl->scan.state != WL1271_SCAN_STATE_IDLE && - wl->scan_vif == vif) { + if (wl->scan.state != WL1271_SCAN_STATE_IDLE) { wl->scan.state = WL1271_SCAN_STATE_IDLE; memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch)); - wl->scan_vif = NULL; wl->scan.req = NULL; ieee80211_scan_completed(wl->hw, true); } @@ -2199,17 +2029,13 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl, if (ret < 0) goto deinit; - if (wlvif->bss_type == BSS_TYPE_STA_BSS || - wlvif->bss_type == BSS_TYPE_IBSS) { - if (wl12xx_dev_role_started(wlvif)) - wl12xx_stop_dev(wl, wlvif); - - ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id); + if (wl->bss_type == BSS_TYPE_STA_BSS) { + ret = wl12xx_cmd_role_disable(wl, &wl->dev_role_id); if (ret < 0) goto deinit; } - ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id); + ret = wl12xx_cmd_role_disable(wl, &wl->role_id); if (ret < 0) goto deinit; @@ -2217,93 +2043,120 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl, } deinit: /* clear all hlids (except system_hlid) */ - wlvif->dev_hlid = WL12XX_INVALID_LINK_ID; - - if (wlvif->bss_type == BSS_TYPE_STA_BSS || - wlvif->bss_type == BSS_TYPE_IBSS) { - wlvif->sta.hlid = WL12XX_INVALID_LINK_ID; - wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx); - wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx); - wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx); - } else { - wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID; - wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID; - wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx); - wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx); - for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++) - wl12xx_free_rate_policy(wl, - &wlvif->ap.ucast_rate_idx[i]); - } + wl->sta_hlid = WL12XX_INVALID_LINK_ID; + wl->dev_hlid = WL12XX_INVALID_LINK_ID; + wl->ap_bcast_hlid = WL12XX_INVALID_LINK_ID; + wl->ap_global_hlid = WL12XX_INVALID_LINK_ID; - wl12xx_tx_reset_wlvif(wl, wlvif); - wl1271_free_ap_keys(wl, wlvif); - if (wl->last_wlvif == wlvif) - wl->last_wlvif = NULL; - list_del(&wlvif->list); - memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map)); - wlvif->role_id = WL12XX_INVALID_ROLE_ID; - wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID; - - if (wlvif->bss_type == BSS_TYPE_AP_BSS) - wl->ap_count--; - else - wl->sta_count--; + /* + * this must be before the cancel_work calls below, so that the work + * functions don't perform further work. + */ + wl->state = WL1271_STATE_OFF; mutex_unlock(&wl->mutex); - del_timer_sync(&wlvif->rx_streaming_timer); - cancel_work_sync(&wlvif->rx_streaming_enable_work); - cancel_work_sync(&wlvif->rx_streaming_disable_work); - cancel_delayed_work_sync(&wlvif->pspoll_work); + + wl1271_disable_interrupts(wl); + wl1271_flush_deferred_work(wl); + cancel_delayed_work_sync(&wl->scan_complete_work); + cancel_work_sync(&wl->netstack_work); + cancel_work_sync(&wl->tx_work); + del_timer_sync(&wl->rx_streaming_timer); + cancel_work_sync(&wl->rx_streaming_enable_work); + cancel_work_sync(&wl->rx_streaming_disable_work); + cancel_delayed_work_sync(&wl->pspoll_work); + cancel_delayed_work_sync(&wl->elp_work); mutex_lock(&wl->mutex); + + /* let's notify MAC80211 about the remaining pending TX frames */ + wl1271_tx_reset(wl, reset_tx_queues); + wl1271_power_off(wl); + + memset(wl->bssid, 0, ETH_ALEN); + memset(wl->ssid, 0, IEEE80211_MAX_SSID_LEN + 1); + wl->ssid_len = 0; + wl->bss_type = MAX_BSS_TYPE; + wl->set_bss_type = MAX_BSS_TYPE; + wl->p2p = 0; + wl->band = IEEE80211_BAND_2GHZ; + + wl->rx_counter = 0; + wl->psm_entry_retry = 0; + wl->power_level = WL1271_DEFAULT_POWER_LEVEL; + wl->tx_blocks_available = 0; + wl->tx_allocated_blocks = 0; + wl->tx_results_count = 0; + wl->tx_packets_count = 0; + wl->time_offset = 0; + wl->session_counter = 0; + wl->rate_set = CONF_TX_RATE_MASK_BASIC; + wl->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate; + wl->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5; + wl->vif = NULL; + wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT; + wl1271_free_ap_keys(wl); + memset(wl->ap_hlid_map, 0, sizeof(wl->ap_hlid_map)); + wl->ap_fw_ps_map = 0; + wl->ap_ps_map = 0; + wl->sched_scanning = false; + wl->role_id = WL12XX_INVALID_ROLE_ID; + wl->dev_role_id = WL12XX_INVALID_ROLE_ID; + memset(wl->roles_map, 0, sizeof(wl->roles_map)); + memset(wl->links_map, 0, sizeof(wl->links_map)); + memset(wl->roc_map, 0, sizeof(wl->roc_map)); + wl->active_sta_count = 0; + + /* The system link is always allocated */ + __set_bit(WL12XX_SYSTEM_HLID, wl->links_map); + + /* + * this is performed after the cancel_work calls and the associated + * mutex_lock, so that wl1271_op_add_interface does not accidentally + * get executed before all these vars have been reset. + */ + wl->flags = 0; + + wl->tx_blocks_freed = 0; + + for (i = 0; i < NUM_TX_QUEUES; i++) { + wl->tx_pkts_freed[i] = 0; + wl->tx_allocated_pkts[i] = 0; + } + + wl1271_debugfs_reset(wl); + + kfree(wl->fw_status); + wl->fw_status = NULL; + kfree(wl->tx_res_if); + wl->tx_res_if = NULL; + kfree(wl->target_mem_map); + wl->target_mem_map = NULL; } static void wl1271_op_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct wl1271 *wl = hw->priv; - struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); - struct wl12xx_vif *iter; mutex_lock(&wl->mutex); - - if (wl->state == WL1271_STATE_OFF || - !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) - goto out; - /* * wl->vif can be null here if someone shuts down the interface * just when hardware recovery has been started. */ - wl12xx_for_each_wlvif(wl, iter) { - if (iter != wlvif) - continue; - - __wl1271_op_remove_interface(wl, vif, true); - break; + if (wl->vif) { + WARN_ON(wl->vif != vif); + __wl1271_op_remove_interface(wl, true); } - WARN_ON(iter != wlvif); -out: + mutex_unlock(&wl->mutex); cancel_work_sync(&wl->recovery_work); } -static int wl12xx_op_change_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - enum nl80211_iftype new_type, bool p2p) -{ - wl1271_op_remove_interface(hw, vif); - - vif->type = ieee80211_iftype_p2p(new_type, p2p); - vif->p2p = p2p; - return wl1271_op_add_interface(hw, vif); -} - -static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif, - bool set_assoc) +static int wl1271_join(struct wl1271 *wl, bool set_assoc) { int ret; - bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS); + bool is_ibss = (wl->bss_type == BSS_TYPE_IBSS); /* * One of the side effects of the JOIN command is that is clears @@ -2314,20 +2167,20 @@ static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif, * Keep the below message for now, unless it starts bothering * users who really like to roam a lot :) */ - if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) + if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) wl1271_info("JOIN while associated."); if (set_assoc) - set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags); + set_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags); if (is_ibss) - ret = wl12xx_cmd_role_start_ibss(wl, wlvif); + ret = wl12xx_cmd_role_start_ibss(wl); else - ret = wl12xx_cmd_role_start_sta(wl, wlvif); + ret = wl12xx_cmd_role_start_sta(wl); if (ret < 0) goto out; - if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) + if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) goto out; /* @@ -2336,20 +2189,19 @@ static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif, * the join. The acx_aid starts the keep-alive process, and the order * of the commands below is relevant. */ - ret = wl1271_acx_keep_alive_mode(wl, wlvif, true); + ret = wl1271_acx_keep_alive_mode(wl, true); if (ret < 0) goto out; - ret = wl1271_acx_aid(wl, wlvif, wlvif->aid); + ret = wl1271_acx_aid(wl, wl->aid); if (ret < 0) goto out; - ret = wl12xx_cmd_build_klv_null_data(wl, wlvif); + ret = wl1271_cmd_build_klv_null_data(wl); if (ret < 0) goto out; - ret = wl1271_acx_keep_alive_config(wl, wlvif, - CMD_TEMPL_KLV_IDX_NULL_DATA, + ret = wl1271_acx_keep_alive_config(wl, CMD_TEMPL_KLV_IDX_NULL_DATA, ACX_KEEP_ALIVE_TPL_VALID); if (ret < 0) goto out; @@ -2358,63 +2210,72 @@ static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif, return ret; } -static int wl1271_unjoin(struct wl1271 *wl, struct wl12xx_vif *wlvif) +static int wl1271_unjoin(struct wl1271 *wl) { int ret; - if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) { - struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); - + if (test_and_clear_bit(WL1271_FLAG_CS_PROGRESS, &wl->flags)) { wl12xx_cmd_stop_channel_switch(wl); - ieee80211_chswitch_done(vif, false); + ieee80211_chswitch_done(wl->vif, false); } /* to stop listening to a channel, we disconnect */ - ret = wl12xx_cmd_role_stop_sta(wl, wlvif); + ret = wl12xx_cmd_role_stop_sta(wl); if (ret < 0) goto out; + memset(wl->bssid, 0, ETH_ALEN); + /* reset TX security counters on a clean disconnect */ - wlvif->tx_security_last_seq_lsb = 0; - wlvif->tx_security_seq = 0; + wl->tx_security_last_seq_lsb = 0; + wl->tx_security_seq = 0; out: return ret; } -static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif) +static void wl1271_set_band_rate(struct wl1271 *wl) { - wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band]; - wlvif->rate_set = wlvif->basic_rate_set; + wl->basic_rate_set = wl->bitrate_masks[wl->band]; + wl->rate_set = wl->basic_rate_set; } -static int wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif, - bool idle) +static bool wl12xx_is_roc(struct wl1271 *wl) { - int ret; - bool cur_idle = !test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags); + u8 role_id; - if (idle == cur_idle) - return 0; + role_id = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES); + if (role_id >= WL12XX_MAX_ROLES) + return false; + + return true; +} + +static int wl1271_sta_handle_idle(struct wl1271 *wl, bool idle) +{ + int ret; if (idle) { /* no need to croc if we weren't busy (e.g. during boot) */ - if (wl12xx_dev_role_started(wlvif)) { - ret = wl12xx_stop_dev(wl, wlvif); + if (wl12xx_is_roc(wl)) { + ret = wl12xx_croc(wl, wl->dev_role_id); + if (ret < 0) + goto out; + + ret = wl12xx_cmd_role_stop_dev(wl); if (ret < 0) goto out; } - wlvif->rate_set = - wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); - ret = wl1271_acx_sta_rate_policies(wl, wlvif); + wl->rate_set = wl1271_tx_min_rate_get(wl, wl->basic_rate_set); + ret = wl1271_acx_sta_rate_policies(wl); if (ret < 0) goto out; ret = wl1271_acx_keep_alive_config( - wl, wlvif, CMD_TEMPL_KLV_IDX_NULL_DATA, + wl, CMD_TEMPL_KLV_IDX_NULL_DATA, ACX_KEEP_ALIVE_TPL_INVALID); if (ret < 0) goto out; - clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags); + set_bit(WL1271_FLAG_IDLE, &wl->flags); } else { /* The current firmware only supports sched_scan in idle */ if (wl->sched_scanning) { @@ -2422,32 +2283,75 @@ static int wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif, ieee80211_sched_scan_stopped(wl->hw); } - ret = wl12xx_start_dev(wl, wlvif); + ret = wl12xx_cmd_role_start_dev(wl); + if (ret < 0) + goto out; + + ret = wl12xx_roc(wl, wl->dev_role_id); if (ret < 0) goto out; - set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags); + clear_bit(WL1271_FLAG_IDLE, &wl->flags); } out: return ret; } -static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif, - struct ieee80211_conf *conf, u32 changed) +static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed) { - bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); - int channel, ret; + struct wl1271 *wl = hw->priv; + struct ieee80211_conf *conf = &hw->conf; + int channel, ret = 0; + bool is_ap; channel = ieee80211_frequency_to_channel(conf->channel->center_freq); + wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s" + " changed 0x%x", + channel, + conf->flags & IEEE80211_CONF_PS ? "on" : "off", + conf->power_level, + conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use", + changed); + + /* + * mac80211 will go to idle nearly immediately after transmitting some + * frames, such as the deauth. To make sure those frames reach the air, + * wait here until the TX queue is fully flushed. + */ + if ((changed & IEEE80211_CONF_CHANGE_IDLE) && + (conf->flags & IEEE80211_CONF_IDLE)) + wl1271_tx_flush(wl); + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state == WL1271_STATE_OFF)) { + /* we support configuring the channel and band while off */ + if ((changed & IEEE80211_CONF_CHANGE_CHANNEL)) { + wl->band = conf->channel->band; + wl->channel = channel; + } + + if ((changed & IEEE80211_CONF_CHANGE_POWER)) + wl->power_level = conf->power_level; + + goto out; + } + + is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + /* if the channel changes while joined, join again */ if (changed & IEEE80211_CONF_CHANGE_CHANNEL && - ((wlvif->band != conf->channel->band) || - (wlvif->channel != channel))) { + ((wl->band != conf->channel->band) || + (wl->channel != channel))) { /* send all pending packets */ wl1271_tx_work_locked(wl); - wlvif->band = conf->channel->band; - wlvif->channel = channel; + wl->band = conf->channel->band; + wl->channel = channel; if (!is_ap) { /* @@ -2456,27 +2360,24 @@ static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif, * possible rate for the band as a fixed rate for * association frames and other control messages. */ - if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) - wl1271_set_band_rate(wl, wlvif); + if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) + wl1271_set_band_rate(wl); - wlvif->basic_rate = - wl1271_tx_min_rate_get(wl, - wlvif->basic_rate_set); - ret = wl1271_acx_sta_rate_policies(wl, wlvif); + wl->basic_rate = + wl1271_tx_min_rate_get(wl, wl->basic_rate_set); + ret = wl1271_acx_sta_rate_policies(wl); if (ret < 0) wl1271_warning("rate policy for channel " "failed %d", ret); - if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, - &wlvif->flags)) { - if (wl12xx_dev_role_started(wlvif)) { + if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) { + if (wl12xx_is_roc(wl)) { /* roaming */ - ret = wl12xx_croc(wl, - wlvif->dev_role_id); + ret = wl12xx_croc(wl, wl->dev_role_id); if (ret < 0) - return ret; + goto out_sleep; } - ret = wl1271_join(wl, wlvif, false); + ret = wl1271_join(wl, false); if (ret < 0) wl1271_warning("cmd join on channel " "failed %d", ret); @@ -2486,114 +2387,66 @@ static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif, * not idle. otherwise, CROC will be called * anyway. */ - if (wl12xx_dev_role_started(wlvif) && + if (wl12xx_is_roc(wl) && !(conf->flags & IEEE80211_CONF_IDLE)) { - ret = wl12xx_stop_dev(wl, wlvif); + ret = wl12xx_croc(wl, wl->dev_role_id); if (ret < 0) - return ret; + goto out_sleep; - ret = wl12xx_start_dev(wl, wlvif); + ret = wl12xx_roc(wl, wl->dev_role_id); if (ret < 0) - return ret; + wl1271_warning("roc failed %d", + ret); } } } } + if (changed & IEEE80211_CONF_CHANGE_IDLE && !is_ap) { + ret = wl1271_sta_handle_idle(wl, + conf->flags & IEEE80211_CONF_IDLE); + if (ret < 0) + wl1271_warning("idle mode change failed %d", ret); + } + /* * if mac80211 changes the PSM mode, make sure the mode is not * incorrectly changed after the pspoll failure active window. */ if (changed & IEEE80211_CONF_CHANGE_PS) - clear_bit(WLVIF_FLAG_PSPOLL_FAILURE, &wlvif->flags); + clear_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags); if (conf->flags & IEEE80211_CONF_PS && - !test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags)) { - set_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags); + !test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) { + set_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags); /* * We enter PSM only if we're already associated. * If we're not, we'll enter it when joining an SSID, * through the bss_info_changed() hook. */ - if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) { + if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) { wl1271_debug(DEBUG_PSM, "psm enabled"); - ret = wl1271_ps_set_mode(wl, wlvif, - STATION_POWER_SAVE_MODE, - wlvif->basic_rate, true); + ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE, + wl->basic_rate, true); } } else if (!(conf->flags & IEEE80211_CONF_PS) && - test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags)) { + test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) { wl1271_debug(DEBUG_PSM, "psm disabled"); - clear_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags); + clear_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags); - if (test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) - ret = wl1271_ps_set_mode(wl, wlvif, - STATION_ACTIVE_MODE, - wlvif->basic_rate, true); + if (test_bit(WL1271_FLAG_PSM, &wl->flags)) + ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE, + wl->basic_rate, true); } - if (conf->power_level != wlvif->power_level) { - ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level); + if (conf->power_level != wl->power_level) { + ret = wl1271_acx_tx_power(wl, conf->power_level); if (ret < 0) - return ret; - - wlvif->power_level = conf->power_level; - } - - return 0; -} - -static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed) -{ - struct wl1271 *wl = hw->priv; - struct wl12xx_vif *wlvif; - struct ieee80211_conf *conf = &hw->conf; - int channel, ret = 0; - - channel = ieee80211_frequency_to_channel(conf->channel->center_freq); - - wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s" - " changed 0x%x", - channel, - conf->flags & IEEE80211_CONF_PS ? "on" : "off", - conf->power_level, - conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use", - changed); - - /* - * mac80211 will go to idle nearly immediately after transmitting some - * frames, such as the deauth. To make sure those frames reach the air, - * wait here until the TX queue is fully flushed. - */ - if ((changed & IEEE80211_CONF_CHANGE_IDLE) && - (conf->flags & IEEE80211_CONF_IDLE)) - wl1271_tx_flush(wl); - - mutex_lock(&wl->mutex); - - /* we support configuring the channel and band even while off */ - if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { - wl->band = conf->channel->band; - wl->channel = channel; - } + goto out_sleep; - if (changed & IEEE80211_CONF_CHANGE_POWER) wl->power_level = conf->power_level; - - if (unlikely(wl->state == WL1271_STATE_OFF)) - goto out; - - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) - goto out; - - /* configure each interface */ - wl12xx_for_each_wlvif(wl, wlvif) { - ret = wl12xx_config_vif(wl, wlvif, conf, changed); - if (ret < 0) - goto out_sleep; } out_sleep: @@ -2656,8 +2509,6 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw, { struct wl1271_filter_params *fp = (void *)(unsigned long)multicast; struct wl1271 *wl = hw->priv; - struct wl12xx_vif *wlvif; - int ret; wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x" @@ -2675,20 +2526,15 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw, if (ret < 0) goto out; - wl12xx_for_each_wlvif(wl, wlvif) { - if (wlvif->bss_type != BSS_TYPE_AP_BSS) { - if (*total & FIF_ALLMULTI) - ret = wl1271_acx_group_address_tbl(wl, wlvif, - false, - NULL, 0); - else if (fp) - ret = wl1271_acx_group_address_tbl(wl, wlvif, - fp->enabled, - fp->mc_list, - fp->mc_list_length); - if (ret < 0) - goto out_sleep; - } + if (wl->bss_type != BSS_TYPE_AP_BSS) { + if (*total & FIF_ALLMULTI) + ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0); + else if (fp) + ret = wl1271_acx_group_address_tbl(wl, fp->enabled, + fp->mc_list, + fp->mc_list_length); + if (ret < 0) + goto out_sleep; } /* @@ -2705,10 +2551,9 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw, kfree(fp); } -static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, - u8 id, u8 key_type, u8 key_size, - const u8 *key, u8 hlid, u32 tx_seq_32, - u16 tx_seq_16) +static int wl1271_record_ap_key(struct wl1271 *wl, u8 id, u8 key_type, + u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32, + u16 tx_seq_16) { struct wl1271_ap_key *ap_key; int i; @@ -2723,10 +2568,10 @@ static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, * an existing key. */ for (i = 0; i < MAX_NUM_KEYS; i++) { - if (wlvif->ap.recorded_keys[i] == NULL) + if (wl->recorded_ap_keys[i] == NULL) break; - if (wlvif->ap.recorded_keys[i]->id == id) { + if (wl->recorded_ap_keys[i]->id == id) { wl1271_warning("trying to record key replacement"); return -EINVAL; } @@ -2747,21 +2592,21 @@ static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, ap_key->tx_seq_32 = tx_seq_32; ap_key->tx_seq_16 = tx_seq_16; - wlvif->ap.recorded_keys[i] = ap_key; + wl->recorded_ap_keys[i] = ap_key; return 0; } -static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif) +static void wl1271_free_ap_keys(struct wl1271 *wl) { int i; for (i = 0; i < MAX_NUM_KEYS; i++) { - kfree(wlvif->ap.recorded_keys[i]); - wlvif->ap.recorded_keys[i] = NULL; + kfree(wl->recorded_ap_keys[i]); + wl->recorded_ap_keys[i] = NULL; } } -static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif) +static int wl1271_ap_init_hwenc(struct wl1271 *wl) { int i, ret = 0; struct wl1271_ap_key *key; @@ -2769,15 +2614,15 @@ static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif) for (i = 0; i < MAX_NUM_KEYS; i++) { u8 hlid; - if (wlvif->ap.recorded_keys[i] == NULL) + if (wl->recorded_ap_keys[i] == NULL) break; - key = wlvif->ap.recorded_keys[i]; + key = wl->recorded_ap_keys[i]; hlid = key->hlid; if (hlid == WL12XX_INVALID_LINK_ID) - hlid = wlvif->ap.bcast_hlid; + hlid = wl->ap_bcast_hlid; - ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE, + ret = wl1271_cmd_set_ap_key(wl, KEY_ADD_OR_REPLACE, key->id, key->key_type, key->key_size, key->key, hlid, key->tx_seq_32, @@ -2790,24 +2635,23 @@ static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif) } if (wep_key_added) { - ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key, - wlvif->ap.bcast_hlid); + ret = wl12xx_cmd_set_default_wep_key(wl, wl->default_key, + wl->ap_bcast_hlid); if (ret < 0) goto out; } out: - wl1271_free_ap_keys(wl, wlvif); + wl1271_free_ap_keys(wl); return ret; } -static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, - u16 action, u8 id, u8 key_type, +static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, u8 key_size, const u8 *key, u32 tx_seq_32, u16 tx_seq_16, struct ieee80211_sta *sta) { int ret; - bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); + bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); if (is_ap) { struct wl1271_station *wl_sta; @@ -2817,10 +2661,10 @@ static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, wl_sta = (struct wl1271_station *)sta->drv_priv; hlid = wl_sta->hlid; } else { - hlid = wlvif->ap.bcast_hlid; + hlid = wl->ap_bcast_hlid; } - if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) { + if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) { /* * We do not support removing keys after AP shutdown. * Pretend we do to make mac80211 happy. @@ -2828,12 +2672,12 @@ static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, if (action != KEY_ADD_OR_REPLACE) return 0; - ret = wl1271_record_ap_key(wl, wlvif, id, + ret = wl1271_record_ap_key(wl, id, key_type, key_size, key, hlid, tx_seq_32, tx_seq_16); } else { - ret = wl1271_cmd_set_ap_key(wl, wlvif, action, + ret = wl1271_cmd_set_ap_key(wl, action, id, key_type, key_size, key, hlid, tx_seq_32, tx_seq_16); @@ -2874,10 +2718,10 @@ static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, /* don't remove key if hlid was already deleted */ if (action == KEY_REMOVE && - wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) + wl->sta_hlid == WL12XX_INVALID_LINK_ID) return 0; - ret = wl1271_cmd_set_sta_key(wl, wlvif, action, + ret = wl1271_cmd_set_sta_key(wl, action, id, key_type, key_size, key, addr, tx_seq_32, tx_seq_16); @@ -2887,8 +2731,8 @@ static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, /* the default WEP key needs to be configured at least once */ if (key_type == KEY_WEP) { ret = wl12xx_cmd_set_default_wep_key(wl, - wlvif->default_key, - wlvif->sta.hlid); + wl->default_key, + wl->sta_hlid); if (ret < 0) return ret; } @@ -2903,7 +2747,6 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_key_conf *key_conf) { struct wl1271 *wl = hw->priv; - struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); int ret; u32 tx_seq_32 = 0; u16 tx_seq_16 = 0; @@ -2939,20 +2782,20 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, key_type = KEY_TKIP; key_conf->hw_key_idx = key_conf->keyidx; - tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq); - tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq); + tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq); + tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq); break; case WLAN_CIPHER_SUITE_CCMP: key_type = KEY_AES; - key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; - tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq); - tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq); + key_conf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq); + tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq); break; case WL1271_CIPHER_SUITE_GEM: key_type = KEY_GEM; - tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq); - tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq); + tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq); + tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq); break; default: wl1271_error("Unknown key algo 0x%x", key_conf->cipher); @@ -2963,7 +2806,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, switch (cmd) { case SET_KEY: - ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE, + ret = wl1271_set_key(wl, KEY_ADD_OR_REPLACE, key_conf->keyidx, key_type, key_conf->keylen, key_conf->key, tx_seq_32, tx_seq_16, sta); @@ -2974,7 +2817,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, break; case DISABLE_KEY: - ret = wl1271_set_key(wl, wlvif, KEY_REMOVE, + ret = wl1271_set_key(wl, KEY_REMOVE, key_conf->keyidx, key_type, key_conf->keylen, key_conf->key, 0, 0, sta); @@ -3004,8 +2847,6 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw, struct cfg80211_scan_request *req) { struct wl1271 *wl = hw->priv; - struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); - int ret; u8 *ssid = NULL; size_t len = 0; @@ -3033,18 +2874,18 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw, if (ret < 0) goto out; - if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) && - test_bit(wlvif->role_id, wl->roc_map)) { - /* don't allow scanning right now */ - ret = -EBUSY; - goto out_sleep; - } - /* cancel ROC before scanning */ - if (wl12xx_dev_role_started(wlvif)) - wl12xx_stop_dev(wl, wlvif); + if (wl12xx_is_roc(wl)) { + if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) { + /* don't allow scanning right now */ + ret = -EBUSY; + goto out_sleep; + } + wl12xx_croc(wl, wl->dev_role_id); + wl12xx_cmd_role_stop_dev(wl); + } - ret = wl1271_scan(hw->priv, vif, ssid, len, req); + ret = wl1271_scan(hw->priv, ssid, len, req); out_sleep: wl1271_ps_elp_sleep(wl); out: @@ -3080,7 +2921,6 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw, } wl->scan.state = WL1271_SCAN_STATE_IDLE; memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch)); - wl->scan_vif = NULL; wl->scan.req = NULL; ieee80211_scan_completed(wl->hw, true); @@ -3098,7 +2938,6 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw, struct ieee80211_sched_scan_ies *ies) { struct wl1271 *wl = hw->priv; - struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); int ret; wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start"); @@ -3109,11 +2948,11 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw, if (ret < 0) goto out; - ret = wl1271_scan_sched_scan_config(wl, wlvif, req, ies); + ret = wl1271_scan_sched_scan_config(wl, req, ies); if (ret < 0) goto out_sleep; - ret = wl1271_scan_sched_scan_start(wl, wlvif); + ret = wl1271_scan_sched_scan_start(wl); if (ret < 0) goto out_sleep; @@ -3178,7 +3017,6 @@ static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value) static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value) { struct wl1271 *wl = hw->priv; - struct wl12xx_vif *wlvif; int ret = 0; mutex_lock(&wl->mutex); @@ -3192,11 +3030,10 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value) if (ret < 0) goto out; - wl12xx_for_each_wlvif(wl, wlvif) { - ret = wl1271_acx_rts_threshold(wl, wlvif, value); - if (ret < 0) - wl1271_warning("set rts threshold failed: %d", ret); - } + ret = wl1271_acx_rts_threshold(wl, value); + if (ret < 0) + wl1271_warning("wl1271_op_set_rts_threshold failed: %d", ret); + wl1271_ps_elp_sleep(wl); out: @@ -3205,10 +3042,9 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value) return ret; } -static int wl1271_ssid_set(struct ieee80211_vif *vif, struct sk_buff *skb, +static int wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb, int offset) { - struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); u8 ssid_len; const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset, skb->len - offset); @@ -3224,8 +3060,8 @@ static int wl1271_ssid_set(struct ieee80211_vif *vif, struct sk_buff *skb, return -EINVAL; } - wlvif->ssid_len = ssid_len; - memcpy(wlvif->ssid, ptr+2, ssid_len); + wl->ssid_len = ssid_len; + memcpy(wl->ssid, ptr+2, ssid_len); return 0; } @@ -3260,40 +3096,18 @@ static void wl12xx_remove_vendor_ie(struct sk_buff *skb, skb_trim(skb, skb->len - len); } -static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates, - struct ieee80211_vif *vif) -{ - struct sk_buff *skb; - int ret; - - skb = ieee80211_proberesp_get(wl->hw, vif); - if (!skb) - return -EOPNOTSUPP; - - ret = wl1271_cmd_template_set(wl, - CMD_TEMPL_AP_PROBE_RESPONSE, - skb->data, - skb->len, 0, - rates); - - dev_kfree_skb(skb); - return ret; -} - -static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl, - struct ieee80211_vif *vif, - u8 *probe_rsp_data, - size_t probe_rsp_len, - u32 rates) +static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, + u8 *probe_rsp_data, + size_t probe_rsp_len, + u32 rates) { - struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); - struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; + struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf; u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE]; int ssid_ie_offset, ie_offset, templ_len; const u8 *ptr; /* no need to change probe response if the SSID is set correctly */ - if (wlvif->ssid_len > 0) + if (wl->ssid_len > 0) return wl1271_cmd_template_set(wl, CMD_TEMPL_AP_PROBE_RESPONSE, probe_rsp_data, @@ -3339,18 +3153,16 @@ static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl, } static int wl1271_bss_erp_info_changed(struct wl1271 *wl, - struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, u32 changed) { - struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); int ret = 0; if (changed & BSS_CHANGED_ERP_SLOT) { if (bss_conf->use_short_slot) - ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT); + ret = wl1271_acx_slot(wl, SLOT_TIME_SHORT); else - ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG); + ret = wl1271_acx_slot(wl, SLOT_TIME_LONG); if (ret < 0) { wl1271_warning("Set slot time failed %d", ret); goto out; @@ -3359,18 +3171,16 @@ static int wl1271_bss_erp_info_changed(struct wl1271 *wl, if (changed & BSS_CHANGED_ERP_PREAMBLE) { if (bss_conf->use_short_preamble) - wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT); + wl1271_acx_set_preamble(wl, ACX_PREAMBLE_SHORT); else - wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG); + wl1271_acx_set_preamble(wl, ACX_PREAMBLE_LONG); } if (changed & BSS_CHANGED_ERP_CTS_PROT) { if (bss_conf->use_cts_prot) - ret = wl1271_acx_cts_protect(wl, wlvif, - CTSPROTECT_ENABLE); + ret = wl1271_acx_cts_protect(wl, CTSPROTECT_ENABLE); else - ret = wl1271_acx_cts_protect(wl, wlvif, - CTSPROTECT_DISABLE); + ret = wl1271_acx_cts_protect(wl, CTSPROTECT_DISABLE); if (ret < 0) { wl1271_warning("Set ctsprotect failed %d", ret); goto out; @@ -3386,23 +3196,14 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl, struct ieee80211_bss_conf *bss_conf, u32 changed) { - struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); - bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); + bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); int ret = 0; if ((changed & BSS_CHANGED_BEACON_INT)) { wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d", bss_conf->beacon_int); - wlvif->beacon_int = bss_conf->beacon_int; - } - - if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) { - u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); - if (!wl1271_ap_set_probe_resp_tmpl(wl, rate, vif)) { - wl1271_debug(DEBUG_AP, "probe response updated"); - set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags); - } + wl->beacon_int = bss_conf->beacon_int; } if ((changed & BSS_CHANGED_BEACON)) { @@ -3413,19 +3214,17 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl, struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif); u16 tmpl_id; - if (!beacon) { - ret = -EINVAL; + if (!beacon) goto out; - } wl1271_debug(DEBUG_MASTER, "beacon updated"); - ret = wl1271_ssid_set(vif, beacon, ieoffset); + ret = wl1271_ssid_set(wl, beacon, ieoffset); if (ret < 0) { dev_kfree_skb(beacon); goto out; } - min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); + min_rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set); tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON : CMD_TEMPL_BEACON; ret = wl1271_cmd_template_set(wl, tmpl_id, @@ -3437,13 +3236,6 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl, goto out; } - /* - * In case we already have a probe-resp beacon set explicitly - * by usermode, don't use the beacon data. - */ - if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags)) - goto end_bcn; - /* remove TIM ie from probe response */ wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset); @@ -3462,7 +3254,7 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl, hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_RESP); if (is_ap) - ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif, + ret = wl1271_ap_set_probe_resp_tmpl(wl, beacon->data, beacon->len, min_rate); @@ -3472,15 +3264,12 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl, beacon->data, beacon->len, 0, min_rate); -end_bcn: dev_kfree_skb(beacon); if (ret < 0) goto out; } out: - if (ret != 0) - wl1271_error("beacon info change failed: %d", ret); return ret; } @@ -3490,24 +3279,23 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl, struct ieee80211_bss_conf *bss_conf, u32 changed) { - struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); int ret = 0; if ((changed & BSS_CHANGED_BASIC_RATES)) { u32 rates = bss_conf->basic_rates; - wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates, - wlvif->band); - wlvif->basic_rate = wl1271_tx_min_rate_get(wl, - wlvif->basic_rate_set); + wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates, + wl->band); + wl->basic_rate = wl1271_tx_min_rate_get(wl, + wl->basic_rate_set); - ret = wl1271_init_ap_rates(wl, wlvif); + ret = wl1271_init_ap_rates(wl); if (ret < 0) { wl1271_error("AP rate policy change failed %d", ret); goto out; } - ret = wl1271_ap_init_templates(wl, vif); + ret = wl1271_ap_init_templates(wl); if (ret < 0) goto out; } @@ -3518,40 +3306,38 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl, if ((changed & BSS_CHANGED_BEACON_ENABLED)) { if (bss_conf->enable_beacon) { - if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) { - ret = wl12xx_cmd_role_start_ap(wl, wlvif); + if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) { + ret = wl12xx_cmd_role_start_ap(wl); if (ret < 0) goto out; - ret = wl1271_ap_init_hwenc(wl, wlvif); + ret = wl1271_ap_init_hwenc(wl); if (ret < 0) goto out; - set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags); + set_bit(WL1271_FLAG_AP_STARTED, &wl->flags); wl1271_debug(DEBUG_AP, "started AP"); } } else { - if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) { - ret = wl12xx_cmd_role_stop_ap(wl, wlvif); + if (test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) { + ret = wl12xx_cmd_role_stop_ap(wl); if (ret < 0) goto out; - clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags); - clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, - &wlvif->flags); + clear_bit(WL1271_FLAG_AP_STARTED, &wl->flags); wl1271_debug(DEBUG_AP, "stopped AP"); } } } - ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed); + ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed); if (ret < 0) goto out; /* Handle HT information change */ if ((changed & BSS_CHANGED_HT) && (bss_conf->channel_type != NL80211_CHAN_NO_HT)) { - ret = wl1271_acx_set_ht_information(wl, wlvif, + ret = wl1271_acx_set_ht_information(wl, bss_conf->ht_operation_mode); if (ret < 0) { wl1271_warning("Set ht information failed %d", ret); @@ -3569,9 +3355,8 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl, struct ieee80211_bss_conf *bss_conf, u32 changed) { - struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); bool do_join = false, set_assoc = false; - bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS); + bool is_ibss = (wl->bss_type == BSS_TYPE_IBSS); bool ibss_joined = false; u32 sta_rate_set = 0; int ret; @@ -3588,13 +3373,14 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl, if (changed & BSS_CHANGED_IBSS) { if (bss_conf->ibss_joined) { - set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags); + set_bit(WL1271_FLAG_IBSS_JOINED, &wl->flags); ibss_joined = true; } else { - if (test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, - &wlvif->flags)) { - wl1271_unjoin(wl, wlvif); - wl12xx_start_dev(wl, wlvif); + if (test_and_clear_bit(WL1271_FLAG_IBSS_JOINED, + &wl->flags)) { + wl1271_unjoin(wl); + wl12xx_cmd_role_start_dev(wl); + wl12xx_roc(wl, wl->dev_role_id); } } } @@ -3610,40 +3396,46 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl, wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s", bss_conf->enable_beacon ? "enabled" : "disabled"); + if (bss_conf->enable_beacon) + wl->set_bss_type = BSS_TYPE_IBSS; + else + wl->set_bss_type = BSS_TYPE_STA_BSS; do_join = true; } - if (changed & BSS_CHANGED_IDLE) { - ret = wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle); - if (ret < 0) - wl1271_warning("idle mode change failed %d", ret); - } - if ((changed & BSS_CHANGED_CQM)) { bool enable = false; if (bss_conf->cqm_rssi_thold) enable = true; - ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable, + ret = wl1271_acx_rssi_snr_trigger(wl, enable, bss_conf->cqm_rssi_thold, bss_conf->cqm_rssi_hyst); if (ret < 0) goto out; - wlvif->rssi_thold = bss_conf->cqm_rssi_thold; + wl->rssi_thold = bss_conf->cqm_rssi_thold; } - if (changed & BSS_CHANGED_BSSID) - if (!is_zero_ether_addr(bss_conf->bssid)) { - ret = wl12xx_cmd_build_null_data(wl, wlvif); + if ((changed & BSS_CHANGED_BSSID) && + /* + * Now we know the correct bssid, so we send a new join command + * and enable the BSSID filter + */ + memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) { + memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN); + + if (!is_zero_ether_addr(wl->bssid)) { + ret = wl1271_cmd_build_null_data(wl); if (ret < 0) goto out; - ret = wl1271_build_qos_null_data(wl, vif); + ret = wl1271_build_qos_null_data(wl); if (ret < 0) goto out; /* Need to update the BSSID (for filtering etc) */ do_join = true; } + } if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_HT)) { rcu_read_lock(); @@ -3667,28 +3459,26 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl, if (bss_conf->assoc) { u32 rates; int ieoffset; - wlvif->aid = bss_conf->aid; + wl->aid = bss_conf->aid; set_assoc = true; - wlvif->ps_poll_failures = 0; + wl->ps_poll_failures = 0; /* * use basic rates from AP, and determine lowest rate * to use with control frames. */ rates = bss_conf->basic_rates; - wlvif->basic_rate_set = + wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates, - wlvif->band); - wlvif->basic_rate = - wl1271_tx_min_rate_get(wl, - wlvif->basic_rate_set); + wl->band); + wl->basic_rate = + wl1271_tx_min_rate_get(wl, wl->basic_rate_set); if (sta_rate_set) - wlvif->rate_set = - wl1271_tx_enabled_rates_get(wl, + wl->rate_set = wl1271_tx_enabled_rates_get(wl, sta_rate_set, - wlvif->band); - ret = wl1271_acx_sta_rate_policies(wl, wlvif); + wl->band); + ret = wl1271_acx_sta_rate_policies(wl); if (ret < 0) goto out; @@ -3698,56 +3488,53 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl, * updates it by itself when the first beacon is * received after a join. */ - ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid); + ret = wl1271_cmd_build_ps_poll(wl, wl->aid); if (ret < 0) goto out; /* * Get a template for hardware connection maintenance */ - dev_kfree_skb(wlvif->probereq); - wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl, - wlvif, - NULL); + dev_kfree_skb(wl->probereq); + wl->probereq = wl1271_cmd_build_ap_probe_req(wl, NULL); ieoffset = offsetof(struct ieee80211_mgmt, u.probe_req.variable); - wl1271_ssid_set(vif, wlvif->probereq, ieoffset); + wl1271_ssid_set(wl, wl->probereq, ieoffset); /* enable the connection monitoring feature */ - ret = wl1271_acx_conn_monit_params(wl, wlvif, true); + ret = wl1271_acx_conn_monit_params(wl, true); if (ret < 0) goto out; } else { /* use defaults when not associated */ bool was_assoc = - !!test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, - &wlvif->flags); + !!test_and_clear_bit(WL1271_FLAG_STA_ASSOCIATED, + &wl->flags); bool was_ifup = - !!test_and_clear_bit(WLVIF_FLAG_STA_STATE_SENT, - &wlvif->flags); - wlvif->aid = 0; + !!test_and_clear_bit(WL1271_FLAG_STA_STATE_SENT, + &wl->flags); + wl->aid = 0; /* free probe-request template */ - dev_kfree_skb(wlvif->probereq); - wlvif->probereq = NULL; + dev_kfree_skb(wl->probereq); + wl->probereq = NULL; /* re-enable dynamic ps - just in case */ - ieee80211_enable_dyn_ps(vif); + ieee80211_enable_dyn_ps(wl->vif); /* revert back to minimum rates for the current band */ - wl1271_set_band_rate(wl, wlvif); - wlvif->basic_rate = - wl1271_tx_min_rate_get(wl, - wlvif->basic_rate_set); - ret = wl1271_acx_sta_rate_policies(wl, wlvif); + wl1271_set_band_rate(wl); + wl->basic_rate = + wl1271_tx_min_rate_get(wl, wl->basic_rate_set); + ret = wl1271_acx_sta_rate_policies(wl); if (ret < 0) goto out; /* disable connection monitor features */ - ret = wl1271_acx_conn_monit_params(wl, wlvif, false); + ret = wl1271_acx_conn_monit_params(wl, false); /* Disable the keep-alive feature */ - ret = wl1271_acx_keep_alive_mode(wl, wlvif, false); + ret = wl1271_acx_keep_alive_mode(wl, false); if (ret < 0) goto out; @@ -3759,7 +3546,7 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl, * no IF_OPER_UP notification. */ if (!was_ifup) { - ret = wl12xx_croc(wl, wlvif->role_id); + ret = wl12xx_croc(wl, wl->role_id); if (ret < 0) goto out; } @@ -3768,16 +3555,17 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl, * roaming on the same channel. until we will * have a better flow...) */ - if (test_bit(wlvif->dev_role_id, wl->roc_map)) { - ret = wl12xx_croc(wl, - wlvif->dev_role_id); + if (test_bit(wl->dev_role_id, wl->roc_map)) { + ret = wl12xx_croc(wl, wl->dev_role_id); if (ret < 0) goto out; } - wl1271_unjoin(wl, wlvif); - if (!(conf_flags & IEEE80211_CONF_IDLE)) - wl12xx_start_dev(wl, wlvif); + wl1271_unjoin(wl); + if (!(conf_flags & IEEE80211_CONF_IDLE)) { + wl12xx_cmd_role_start_dev(wl); + wl12xx_roc(wl, wl->dev_role_id); + } } } } @@ -3788,28 +3576,27 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl, if (bss_conf->ibss_joined) { u32 rates = bss_conf->basic_rates; - wlvif->basic_rate_set = + wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates, - wlvif->band); - wlvif->basic_rate = - wl1271_tx_min_rate_get(wl, - wlvif->basic_rate_set); + wl->band); + wl->basic_rate = + wl1271_tx_min_rate_get(wl, wl->basic_rate_set); /* by default, use 11b + OFDM rates */ - wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES; - ret = wl1271_acx_sta_rate_policies(wl, wlvif); + wl->rate_set = CONF_TX_IBSS_DEFAULT_RATES; + ret = wl1271_acx_sta_rate_policies(wl); if (ret < 0) goto out; } } - ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed); + ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed); if (ret < 0) goto out; if (changed & BSS_CHANGED_ARP_FILTER) { __be32 addr = bss_conf->arp_addr_list[0]; - WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS); + WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS); if (bss_conf->arp_addr_cnt == 1 && bss_conf->arp_filter_enabled) { @@ -3819,24 +3606,24 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl, * isn't being set (when sending), so we have to * reconfigure the template upon every ip change. */ - ret = wl1271_cmd_build_arp_rsp(wl, wlvif, addr); + ret = wl1271_cmd_build_arp_rsp(wl, addr); if (ret < 0) { wl1271_warning("build arp rsp failed: %d", ret); goto out; } - ret = wl1271_acx_arp_ip_filter(wl, wlvif, + ret = wl1271_acx_arp_ip_filter(wl, ACX_ARP_FILTER_ARP_FILTERING, addr); } else - ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr); + ret = wl1271_acx_arp_ip_filter(wl, 0, addr); if (ret < 0) goto out; } if (do_join) { - ret = wl1271_join(wl, wlvif, set_assoc); + ret = wl1271_join(wl, set_assoc); if (ret < 0) { wl1271_warning("cmd join failed %d", ret); goto out; @@ -3844,31 +3631,35 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl, /* ROC until connected (after EAPOL exchange) */ if (!is_ibss) { - ret = wl12xx_roc(wl, wlvif, wlvif->role_id); + ret = wl12xx_roc(wl, wl->role_id); if (ret < 0) goto out; - wl1271_check_operstate(wl, wlvif, + wl1271_check_operstate(wl, ieee80211_get_operstate(vif)); } /* * stop device role if started (we might already be in - * STA/IBSS role). + * STA role). TODO: make it better. */ - if (wl12xx_dev_role_started(wlvif)) { - ret = wl12xx_stop_dev(wl, wlvif); + if (wl->dev_role_id != WL12XX_INVALID_ROLE_ID) { + ret = wl12xx_croc(wl, wl->dev_role_id); + if (ret < 0) + goto out; + + ret = wl12xx_cmd_role_stop_dev(wl); if (ret < 0) goto out; } /* If we want to go in PSM but we're not there yet */ - if (test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags) && - !test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) { + if (test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags) && + !test_bit(WL1271_FLAG_PSM, &wl->flags)) { enum wl1271_cmd_ps_mode mode; mode = STATION_POWER_SAVE_MODE; - ret = wl1271_ps_set_mode(wl, wlvif, mode, - wlvif->basic_rate, + ret = wl1271_ps_set_mode(wl, mode, + wl->basic_rate, true); if (ret < 0) goto out; @@ -3882,7 +3673,7 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl, ret = wl1271_acx_set_ht_capabilities(wl, &sta_ht_cap, true, - wlvif->sta.hlid); + wl->sta_hlid); if (ret < 0) { wl1271_warning("Set ht cap true failed %d", ret); @@ -3894,7 +3685,7 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl, ret = wl1271_acx_set_ht_capabilities(wl, &sta_ht_cap, false, - wlvif->sta.hlid); + wl->sta_hlid); if (ret < 0) { wl1271_warning("Set ht cap false failed %d", ret); @@ -3906,7 +3697,7 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl, /* Handle HT information change. Done after join. */ if ((changed & BSS_CHANGED_HT) && (bss_conf->channel_type != NL80211_CHAN_NO_HT)) { - ret = wl1271_acx_set_ht_information(wl, wlvif, + ret = wl1271_acx_set_ht_information(wl, bss_conf->ht_operation_mode); if (ret < 0) { wl1271_warning("Set ht information failed %d", ret); @@ -3924,8 +3715,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, u32 changed) { struct wl1271 *wl = hw->priv; - struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); - bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); + bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); int ret; wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed 0x%x", @@ -3936,9 +3726,6 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, if (unlikely(wl->state == WL1271_STATE_OFF)) goto out; - if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))) - goto out; - ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; @@ -3959,7 +3746,6 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, const struct ieee80211_tx_queue_params *params) { struct wl1271 *wl = hw->priv; - struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); u8 ps_scheme; int ret = 0; @@ -3972,8 +3758,31 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, else ps_scheme = CONF_PS_SCHEME_LEGACY; - if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) + if (wl->state == WL1271_STATE_OFF) { + /* + * If the state is off, the parameters will be recorded and + * configured on init. This happens in AP-mode. + */ + struct conf_tx_ac_category *conf_ac = + &wl->conf.tx.ac_conf[wl1271_tx_get_queue(queue)]; + struct conf_tx_tid *conf_tid = + &wl->conf.tx.tid_conf[wl1271_tx_get_queue(queue)]; + + conf_ac->ac = wl1271_tx_get_queue(queue); + conf_ac->cw_min = (u8)params->cw_min; + conf_ac->cw_max = params->cw_max; + conf_ac->aifsn = params->aifs; + conf_ac->tx_op_limit = params->txop << 5; + + conf_tid->queue_id = wl1271_tx_get_queue(queue); + conf_tid->channel_type = CONF_CHANNEL_TYPE_EDCF; + conf_tid->tsid = wl1271_tx_get_queue(queue); + conf_tid->ps_scheme = ps_scheme; + conf_tid->ack_policy = CONF_ACK_POLICY_LEGACY; + conf_tid->apsd_conf[0] = 0; + conf_tid->apsd_conf[1] = 0; goto out; + } ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) @@ -3983,13 +3792,13 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, * the txop is confed in units of 32us by the mac80211, * we need us */ - ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue), + ret = wl1271_acx_ac_cfg(wl, wl1271_tx_get_queue(queue), params->cw_min, params->cw_max, params->aifs, params->txop << 5); if (ret < 0) goto out_sleep; - ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue), + ret = wl1271_acx_tid_cfg(wl, wl1271_tx_get_queue(queue), CONF_CHANNEL_TYPE_EDCF, wl1271_tx_get_queue(queue), ps_scheme, CONF_ACK_POLICY_LEGACY, @@ -4052,43 +3861,43 @@ static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx, } static int wl1271_allocate_sta(struct wl1271 *wl, - struct wl12xx_vif *wlvif, - struct ieee80211_sta *sta) + struct ieee80211_sta *sta, + u8 *hlid) { struct wl1271_station *wl_sta; - int ret; + int id; - - if (wl->active_sta_count >= AP_MAX_STATIONS) { + id = find_first_zero_bit(wl->ap_hlid_map, AP_MAX_STATIONS); + if (id >= AP_MAX_STATIONS) { wl1271_warning("could not allocate HLID - too much stations"); return -EBUSY; } wl_sta = (struct wl1271_station *)sta->drv_priv; - ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid); - if (ret < 0) { - wl1271_warning("could not allocate HLID - too many links"); - return -EBUSY; - } - - set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map); + set_bit(id, wl->ap_hlid_map); + wl_sta->hlid = WL1271_AP_STA_HLID_START + id; + *hlid = wl_sta->hlid; memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN); wl->active_sta_count++; return 0; } -void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid) +void wl1271_free_sta(struct wl1271 *wl, u8 hlid) { - if (!test_bit(hlid, wlvif->ap.sta_hlid_map)) + int id = hlid - WL1271_AP_STA_HLID_START; + + if (hlid < WL1271_AP_STA_HLID_START) + return; + + if (!test_bit(id, wl->ap_hlid_map)) return; - clear_bit(hlid, wlvif->ap.sta_hlid_map); + clear_bit(id, wl->ap_hlid_map); memset(wl->links[hlid].addr, 0, ETH_ALEN); wl->links[hlid].ba_bitmap = 0; wl1271_tx_reset_link_queues(wl, hlid); __clear_bit(hlid, &wl->ap_ps_map); __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map); - wl12xx_free_link(wl, wlvif, &hlid); wl->active_sta_count--; } @@ -4097,8 +3906,6 @@ static int wl1271_op_sta_add(struct ieee80211_hw *hw, struct ieee80211_sta *sta) { struct wl1271 *wl = hw->priv; - struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); - struct wl1271_station *wl_sta; int ret = 0; u8 hlid; @@ -4107,23 +3914,20 @@ static int wl1271_op_sta_add(struct ieee80211_hw *hw, if (unlikely(wl->state == WL1271_STATE_OFF)) goto out; - if (wlvif->bss_type != BSS_TYPE_AP_BSS) + if (wl->bss_type != BSS_TYPE_AP_BSS) goto out; wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid); - ret = wl1271_allocate_sta(wl, wlvif, sta); + ret = wl1271_allocate_sta(wl, sta, &hlid); if (ret < 0) goto out; - wl_sta = (struct wl1271_station *)sta->drv_priv; - hlid = wl_sta->hlid; - ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out_free_sta; - ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid); + ret = wl12xx_cmd_add_peer(wl, sta, hlid); if (ret < 0) goto out_sleep; @@ -4140,7 +3944,7 @@ static int wl1271_op_sta_add(struct ieee80211_hw *hw, out_free_sta: if (ret < 0) - wl1271_free_sta(wl, wlvif, hlid); + wl1271_free_sta(wl, hlid); out: mutex_unlock(&wl->mutex); @@ -4152,7 +3956,6 @@ static int wl1271_op_sta_remove(struct ieee80211_hw *hw, struct ieee80211_sta *sta) { struct wl1271 *wl = hw->priv; - struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); struct wl1271_station *wl_sta; int ret = 0, id; @@ -4161,14 +3964,14 @@ static int wl1271_op_sta_remove(struct ieee80211_hw *hw, if (unlikely(wl->state == WL1271_STATE_OFF)) goto out; - if (wlvif->bss_type != BSS_TYPE_AP_BSS) + if (wl->bss_type != BSS_TYPE_AP_BSS) goto out; wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid); wl_sta = (struct wl1271_station *)sta->drv_priv; - id = wl_sta->hlid; - if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map))) + id = wl_sta->hlid - WL1271_AP_STA_HLID_START; + if (WARN_ON(!test_bit(id, wl->ap_hlid_map))) goto out; ret = wl1271_ps_elp_wakeup(wl); @@ -4179,7 +3982,7 @@ static int wl1271_op_sta_remove(struct ieee80211_hw *hw, if (ret < 0) goto out_sleep; - wl1271_free_sta(wl, wlvif, wl_sta->hlid); + wl1271_free_sta(wl, wl_sta->hlid); out_sleep: wl1271_ps_elp_sleep(wl); @@ -4196,7 +3999,6 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw, u8 buf_size) { struct wl1271 *wl = hw->priv; - struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); int ret; u8 hlid, *ba_bitmap; @@ -4214,10 +4016,10 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw, goto out; } - if (wlvif->bss_type == BSS_TYPE_STA_BSS) { - hlid = wlvif->sta.hlid; - ba_bitmap = &wlvif->sta.ba_rx_bitmap; - } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) { + if (wl->bss_type == BSS_TYPE_STA_BSS) { + hlid = wl->sta_hlid; + ba_bitmap = &wl->ba_rx_bitmap; + } else if (wl->bss_type == BSS_TYPE_AP_BSS) { struct wl1271_station *wl_sta; wl_sta = (struct wl1271_station *)sta->drv_priv; @@ -4237,7 +4039,7 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw, switch (action) { case IEEE80211_AMPDU_RX_START: - if (!wlvif->ba_support || !wlvif->ba_allowed) { + if (!wl->ba_support || !wl->ba_allowed) { ret = -ENOTSUPP; break; } @@ -4306,9 +4108,8 @@ static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const struct cfg80211_bitrate_mask *mask) { - struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); struct wl1271 *wl = hw->priv; - int i, ret = 0; + int i; wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x", mask->control[NL80211_BAND_2GHZ].legacy, @@ -4317,39 +4118,19 @@ static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw, mutex_lock(&wl->mutex); for (i = 0; i < IEEE80211_NUM_BANDS; i++) - wlvif->bitrate_masks[i] = + wl->bitrate_masks[i] = wl1271_tx_enabled_rates_get(wl, mask->control[i].legacy, i); - - if (unlikely(wl->state == WL1271_STATE_OFF)) - goto out; - - if (wlvif->bss_type == BSS_TYPE_STA_BSS && - !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) { - - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) - goto out; - - wl1271_set_band_rate(wl, wlvif); - wlvif->basic_rate = - wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); - ret = wl1271_acx_sta_rate_policies(wl, wlvif); - - wl1271_ps_elp_sleep(wl); - } -out: mutex_unlock(&wl->mutex); - return ret; + return 0; } static void wl12xx_op_channel_switch(struct ieee80211_hw *hw, struct ieee80211_channel_switch *ch_switch) { struct wl1271 *wl = hw->priv; - struct wl12xx_vif *wlvif; int ret; wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch"); @@ -4357,24 +4138,19 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw, mutex_lock(&wl->mutex); if (unlikely(wl->state == WL1271_STATE_OFF)) { - wl12xx_for_each_wlvif_sta(wl, wlvif) { - struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); - ieee80211_chswitch_done(vif, false); - } - goto out; + mutex_unlock(&wl->mutex); + ieee80211_chswitch_done(wl->vif, false); + return; } ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; - /* TODO: change mac80211 to pass vif as param */ - wl12xx_for_each_wlvif_sta(wl, wlvif) { - ret = wl12xx_cmd_channel_switch(wl, ch_switch); + ret = wl12xx_cmd_channel_switch(wl, ch_switch); - if (!ret) - set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags); - } + if (!ret) + set_bit(WL1271_FLAG_CS_PROGRESS, &wl->flags); wl1271_ps_elp_sleep(wl); @@ -4394,6 +4170,10 @@ static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw) /* packets are considered pending if in the TX queue or the FW */ ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0); + + /* the above is appropriate for STA mode for PS purposes */ + WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS); + out: mutex_unlock(&wl->mutex); @@ -4630,7 +4410,6 @@ static const struct ieee80211_ops wl1271_ops = { .stop = wl1271_op_stop, .add_interface = wl1271_op_add_interface, .remove_interface = wl1271_op_remove_interface, - .change_interface = wl12xx_op_change_interface, #ifdef CONFIG_PM .suspend = wl1271_op_suspend, .resume = wl1271_op_resume, @@ -4825,7 +4604,7 @@ static struct bin_attribute fwlog_attr = { .read = wl1271_sysfs_read_fwlog, }; -static int wl1271_register_hw(struct wl1271 *wl) +int wl1271_register_hw(struct wl1271 *wl) { int ret; @@ -4866,8 +4645,9 @@ static int wl1271_register_hw(struct wl1271 *wl) return 0; } +EXPORT_SYMBOL_GPL(wl1271_register_hw); -static void wl1271_unregister_hw(struct wl1271 *wl) +void wl1271_unregister_hw(struct wl1271 *wl) { if (wl->state == WL1271_STATE_PLT) __wl1271_plt_stop(wl); @@ -4877,8 +4657,9 @@ static void wl1271_unregister_hw(struct wl1271 *wl) wl->mac80211_registered = false; } +EXPORT_SYMBOL_GPL(wl1271_unregister_hw); -static int wl1271_init_ieee80211(struct wl1271 *wl) +int wl1271_init_ieee80211(struct wl1271 *wl) { static const u32 cipher_suites[] = { WLAN_CIPHER_SUITE_WEP40, @@ -4955,33 +4736,27 @@ static int wl1271_init_ieee80211(struct wl1271 *wl) wl->hw->wiphy->reg_notifier = wl1271_reg_notify; - /* the FW answers probe-requests in AP-mode */ - wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; - wl->hw->wiphy->probe_resp_offload = - NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | - NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | - NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P; - - SET_IEEE80211_DEV(wl->hw, wl->dev); + SET_IEEE80211_DEV(wl->hw, wl1271_wl_to_dev(wl)); wl->hw->sta_data_size = sizeof(struct wl1271_station); - wl->hw->vif_data_size = sizeof(struct wl12xx_vif); wl->hw->max_rx_aggregation_subframes = 8; return 0; } +EXPORT_SYMBOL_GPL(wl1271_init_ieee80211); #define WL1271_DEFAULT_CHANNEL 0 -static struct ieee80211_hw *wl1271_alloc_hw(void) +struct ieee80211_hw *wl1271_alloc_hw(void) { struct ieee80211_hw *hw; + struct platform_device *plat_dev = NULL; struct wl1271 *wl; int i, j, ret; unsigned int order; - BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS); + BUILD_BUG_ON(AP_MAX_LINKS > WL12XX_MAX_LINKS); hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops); if (!hw) { @@ -4990,26 +4765,41 @@ static struct ieee80211_hw *wl1271_alloc_hw(void) goto err_hw_alloc; } + plat_dev = kmemdup(&wl1271_device, sizeof(wl1271_device), GFP_KERNEL); + if (!plat_dev) { + wl1271_error("could not allocate platform_device"); + ret = -ENOMEM; + goto err_plat_alloc; + } + wl = hw->priv; memset(wl, 0, sizeof(*wl)); INIT_LIST_HEAD(&wl->list); - INIT_LIST_HEAD(&wl->wlvif_list); wl->hw = hw; + wl->plat_dev = plat_dev; for (i = 0; i < NUM_TX_QUEUES; i++) - for (j = 0; j < WL12XX_MAX_LINKS; j++) + skb_queue_head_init(&wl->tx_queue[i]); + + for (i = 0; i < NUM_TX_QUEUES; i++) + for (j = 0; j < AP_MAX_LINKS; j++) skb_queue_head_init(&wl->links[j].tx_queue[i]); skb_queue_head_init(&wl->deferred_rx_queue); skb_queue_head_init(&wl->deferred_tx_queue); INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work); + INIT_DELAYED_WORK(&wl->pspoll_work, wl1271_pspoll_work); INIT_WORK(&wl->netstack_work, wl1271_netstack_work); INIT_WORK(&wl->tx_work, wl1271_tx_work); INIT_WORK(&wl->recovery_work, wl1271_recovery_work); INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work); + INIT_WORK(&wl->rx_streaming_enable_work, + wl1271_rx_streaming_enable_work); + INIT_WORK(&wl->rx_streaming_disable_work, + wl1271_rx_streaming_disable_work); wl->freezable_wq = create_freezable_workqueue("wl12xx_wq"); if (!wl->freezable_wq) { @@ -5018,21 +4808,41 @@ static struct ieee80211_hw *wl1271_alloc_hw(void) } wl->channel = WL1271_DEFAULT_CHANNEL; + wl->beacon_int = WL1271_DEFAULT_BEACON_INT; + wl->default_key = 0; wl->rx_counter = 0; + wl->psm_entry_retry = 0; wl->power_level = WL1271_DEFAULT_POWER_LEVEL; + wl->basic_rate_set = CONF_TX_RATE_MASK_BASIC; + wl->basic_rate = CONF_TX_RATE_MASK_BASIC; + wl->rate_set = CONF_TX_RATE_MASK_BASIC; wl->band = IEEE80211_BAND_2GHZ; wl->vif = NULL; wl->flags = 0; wl->sg_enabled = true; wl->hw_pg_ver = -1; + wl->bss_type = MAX_BSS_TYPE; + wl->set_bss_type = MAX_BSS_TYPE; + wl->last_tx_hlid = 0; wl->ap_ps_map = 0; wl->ap_fw_ps_map = 0; wl->quirks = 0; wl->platform_quirks = 0; wl->sched_scanning = false; + wl->tx_security_seq = 0; + wl->tx_security_last_seq_lsb = 0; wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT; + wl->role_id = WL12XX_INVALID_ROLE_ID; wl->system_hlid = WL12XX_SYSTEM_HLID; + wl->sta_hlid = WL12XX_INVALID_LINK_ID; + wl->dev_role_id = WL12XX_INVALID_ROLE_ID; + wl->dev_hlid = WL12XX_INVALID_LINK_ID; + wl->session_counter = 0; + wl->ap_bcast_hlid = WL12XX_INVALID_LINK_ID; + wl->ap_global_hlid = WL12XX_INVALID_LINK_ID; wl->active_sta_count = 0; + setup_timer(&wl->rx_streaming_timer, wl1271_rx_streaming_timer, + (unsigned long) wl); wl->fwlog_size = 0; init_waitqueue_head(&wl->fwlog_waitq); @@ -5050,6 +4860,8 @@ static struct ieee80211_hw *wl1271_alloc_hw(void) /* Apply default driver configuration. */ wl1271_conf_init(wl); + wl->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate; + wl->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5; order = get_order(WL1271_AGGR_BUFFER_SIZE); wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order); @@ -5071,8 +4883,49 @@ static struct ieee80211_hw *wl1271_alloc_hw(void) goto err_dummy_packet; } + /* Register platform device */ + ret = platform_device_register(wl->plat_dev); + if (ret) { + wl1271_error("couldn't register platform device"); + goto err_fwlog; + } + dev_set_drvdata(&wl->plat_dev->dev, wl); + + /* Create sysfs file to control bt coex state */ + ret = device_create_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state); + if (ret < 0) { + wl1271_error("failed to create sysfs file bt_coex_state"); + goto err_platform; + } + + /* Create sysfs file to get HW PG version */ + ret = device_create_file(&wl->plat_dev->dev, &dev_attr_hw_pg_ver); + if (ret < 0) { + wl1271_error("failed to create sysfs file hw_pg_ver"); + goto err_bt_coex_state; + } + + /* Create sysfs file for the FW log */ + ret = device_create_bin_file(&wl->plat_dev->dev, &fwlog_attr); + if (ret < 0) { + wl1271_error("failed to create sysfs file fwlog"); + goto err_hw_pg_ver; + } + return hw; +err_hw_pg_ver: + device_remove_file(&wl->plat_dev->dev, &dev_attr_hw_pg_ver); + +err_bt_coex_state: + device_remove_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state); + +err_platform: + platform_device_unregister(wl->plat_dev); + +err_fwlog: + free_page((unsigned long)wl->fwlog); + err_dummy_packet: dev_kfree_skb(wl->dummy_packet); @@ -5084,14 +4937,18 @@ static struct ieee80211_hw *wl1271_alloc_hw(void) err_hw: wl1271_debugfs_exit(wl); + kfree(plat_dev); + +err_plat_alloc: ieee80211_free_hw(hw); err_hw_alloc: return ERR_PTR(ret); } +EXPORT_SYMBOL_GPL(wl1271_alloc_hw); -static int wl1271_free_hw(struct wl1271 *wl) +int wl1271_free_hw(struct wl1271 *wl) { /* Unblock any fwlog readers */ mutex_lock(&wl->mutex); @@ -5099,15 +4956,17 @@ static int wl1271_free_hw(struct wl1271 *wl) wake_up_interruptible_all(&wl->fwlog_waitq); mutex_unlock(&wl->mutex); - device_remove_bin_file(wl->dev, &fwlog_attr); + device_remove_bin_file(&wl->plat_dev->dev, &fwlog_attr); - device_remove_file(wl->dev, &dev_attr_hw_pg_ver); + device_remove_file(&wl->plat_dev->dev, &dev_attr_hw_pg_ver); - device_remove_file(wl->dev, &dev_attr_bt_coex_state); + device_remove_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state); + platform_device_unregister(wl->plat_dev); free_page((unsigned long)wl->fwlog); dev_kfree_skb(wl->dummy_packet); free_pages((unsigned long)wl->aggr_buf, get_order(WL1271_AGGR_BUFFER_SIZE)); + kfree(wl->plat_dev); wl1271_debugfs_exit(wl); @@ -5124,174 +4983,7 @@ static int wl1271_free_hw(struct wl1271 *wl) return 0; } - -static irqreturn_t wl12xx_hardirq(int irq, void *cookie) -{ - struct wl1271 *wl = cookie; - unsigned long flags; - - wl1271_debug(DEBUG_IRQ, "IRQ"); - - /* complete the ELP completion */ - spin_lock_irqsave(&wl->wl_lock, flags); - set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags); - if (wl->elp_compl) { - complete(wl->elp_compl); - wl->elp_compl = NULL; - } - - if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) { - /* don't enqueue a work right now. mark it as pending */ - set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags); - wl1271_debug(DEBUG_IRQ, "should not enqueue work"); - disable_irq_nosync(wl->irq); - pm_wakeup_event(wl->dev, 0); - spin_unlock_irqrestore(&wl->wl_lock, flags); - return IRQ_HANDLED; - } - spin_unlock_irqrestore(&wl->wl_lock, flags); - - return IRQ_WAKE_THREAD; -} - -static int __devinit wl12xx_probe(struct platform_device *pdev) -{ - struct wl12xx_platform_data *pdata = pdev->dev.platform_data; - struct ieee80211_hw *hw; - struct wl1271 *wl; - unsigned long irqflags; - int ret = -ENODEV; - - hw = wl1271_alloc_hw(); - if (IS_ERR(hw)) { - wl1271_error("can't allocate hw"); - ret = PTR_ERR(hw); - goto out; - } - - wl = hw->priv; - wl->irq = platform_get_irq(pdev, 0); - wl->ref_clock = pdata->board_ref_clock; - wl->tcxo_clock = pdata->board_tcxo_clock; - wl->platform_quirks = pdata->platform_quirks; - wl->set_power = pdata->set_power; - wl->dev = &pdev->dev; - wl->if_ops = pdata->ops; - - platform_set_drvdata(pdev, wl); - - if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) - irqflags = IRQF_TRIGGER_RISING; - else - irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT; - - ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wl1271_irq, - irqflags, - pdev->name, wl); - if (ret < 0) { - wl1271_error("request_irq() failed: %d", ret); - goto out_free_hw; - } - - ret = enable_irq_wake(wl->irq); - if (!ret) { - wl->irq_wake_enabled = true; - device_init_wakeup(wl->dev, 1); - if (pdata->pwr_in_suspend) - hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY; - - } - disable_irq(wl->irq); - - ret = wl1271_init_ieee80211(wl); - if (ret) - goto out_irq; - - ret = wl1271_register_hw(wl); - if (ret) - goto out_irq; - - /* Create sysfs file to control bt coex state */ - ret = device_create_file(wl->dev, &dev_attr_bt_coex_state); - if (ret < 0) { - wl1271_error("failed to create sysfs file bt_coex_state"); - goto out_irq; - } - - /* Create sysfs file to get HW PG version */ - ret = device_create_file(wl->dev, &dev_attr_hw_pg_ver); - if (ret < 0) { - wl1271_error("failed to create sysfs file hw_pg_ver"); - goto out_bt_coex_state; - } - - /* Create sysfs file for the FW log */ - ret = device_create_bin_file(wl->dev, &fwlog_attr); - if (ret < 0) { - wl1271_error("failed to create sysfs file fwlog"); - goto out_hw_pg_ver; - } - - return 0; - -out_hw_pg_ver: - device_remove_file(wl->dev, &dev_attr_hw_pg_ver); - -out_bt_coex_state: - device_remove_file(wl->dev, &dev_attr_bt_coex_state); - -out_irq: - free_irq(wl->irq, wl); - -out_free_hw: - wl1271_free_hw(wl); - -out: - return ret; -} - -static int __devexit wl12xx_remove(struct platform_device *pdev) -{ - struct wl1271 *wl = platform_get_drvdata(pdev); - - if (wl->irq_wake_enabled) { - device_init_wakeup(wl->dev, 0); - disable_irq_wake(wl->irq); - } - wl1271_unregister_hw(wl); - free_irq(wl->irq, wl); - wl1271_free_hw(wl); - - return 0; -} - -static const struct platform_device_id wl12xx_id_table[] __devinitconst = { - { "wl12xx", 0 }, - { } /* Terminating Entry */ -}; -MODULE_DEVICE_TABLE(platform, wl12xx_id_table); - -static struct platform_driver wl12xx_driver = { - .probe = wl12xx_probe, - .remove = __devexit_p(wl12xx_remove), - .id_table = wl12xx_id_table, - .driver = { - .name = "wl12xx_driver", - .owner = THIS_MODULE, - } -}; - -static int __init wl12xx_init(void) -{ - return platform_driver_register(&wl12xx_driver); -} -module_init(wl12xx_init); - -static void __exit wl12xx_exit(void) -{ - platform_driver_unregister(&wl12xx_driver); -} -module_exit(wl12xx_exit); +EXPORT_SYMBOL_GPL(wl1271_free_hw); u32 wl12xx_debug_level = DEBUG_NONE; EXPORT_SYMBOL_GPL(wl12xx_debug_level); diff --git a/trunk/drivers/net/wireless/wl12xx/ps.c b/trunk/drivers/net/wireless/wl12xx/ps.c index a2bdacdd7e1d..c15ebf2efd40 100644 --- a/trunk/drivers/net/wireless/wl12xx/ps.c +++ b/trunk/drivers/net/wireless/wl12xx/ps.c @@ -25,7 +25,6 @@ #include "ps.h" #include "io.h" #include "tx.h" -#include "debug.h" #define WL1271_WAKEUP_TIMEOUT 500 @@ -33,7 +32,6 @@ void wl1271_elp_work(struct work_struct *work) { struct delayed_work *dwork; struct wl1271 *wl; - struct wl12xx_vif *wlvif; dwork = container_of(work, struct delayed_work, work); wl = container_of(dwork, struct wl1271, elp_work); @@ -49,18 +47,11 @@ void wl1271_elp_work(struct work_struct *work) if (unlikely(!test_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags))) goto out; - if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags)) + if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags) || + (!test_bit(WL1271_FLAG_PSM, &wl->flags) && + !test_bit(WL1271_FLAG_IDLE, &wl->flags))) goto out; - wl12xx_for_each_wlvif(wl, wlvif) { - if (wlvif->bss_type == BSS_TYPE_AP_BSS) - goto out; - - if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags) && - test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) - goto out; - } - wl1271_debug(DEBUG_PSM, "chip to elp"); wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP); set_bit(WL1271_FLAG_IN_ELP, &wl->flags); @@ -74,20 +65,13 @@ void wl1271_elp_work(struct work_struct *work) /* Routines to toggle sleep mode while in ELP */ void wl1271_ps_elp_sleep(struct wl1271 *wl) { - struct wl12xx_vif *wlvif; - /* we shouldn't get consecutive sleep requests */ if (WARN_ON(test_and_set_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags))) return; - wl12xx_for_each_wlvif(wl, wlvif) { - if (wlvif->bss_type == BSS_TYPE_AP_BSS) - return; - - if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags) && - test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) - return; - } + if (!test_bit(WL1271_FLAG_PSM, &wl->flags) && + !test_bit(WL1271_FLAG_IDLE, &wl->flags)) + return; ieee80211_queue_delayed_work(wl->hw, &wl->elp_work, msecs_to_jiffies(ELP_ENTRY_DELAY)); @@ -159,8 +143,8 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl) return 0; } -int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif, - enum wl1271_cmd_ps_mode mode, u32 rates, bool send) +int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode, + u32 rates, bool send) { int ret; @@ -168,34 +152,39 @@ int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif, case STATION_POWER_SAVE_MODE: wl1271_debug(DEBUG_PSM, "entering psm"); - ret = wl1271_acx_wake_up_conditions(wl, wlvif); + ret = wl1271_acx_wake_up_conditions(wl); if (ret < 0) { wl1271_error("couldn't set wake up conditions"); return ret; } - ret = wl1271_cmd_ps_mode(wl, wlvif, STATION_POWER_SAVE_MODE); + ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE); if (ret < 0) return ret; - set_bit(WLVIF_FLAG_PSM, &wlvif->flags); + set_bit(WL1271_FLAG_PSM, &wl->flags); break; case STATION_ACTIVE_MODE: default: wl1271_debug(DEBUG_PSM, "leaving psm"); /* disable beacon early termination */ - if (wlvif->band == IEEE80211_BAND_2GHZ) { - ret = wl1271_acx_bet_enable(wl, wlvif, false); + if (wl->band == IEEE80211_BAND_2GHZ) { + ret = wl1271_acx_bet_enable(wl, false); if (ret < 0) return ret; } - ret = wl1271_cmd_ps_mode(wl, wlvif, STATION_ACTIVE_MODE); + /* disable beacon filtering */ + ret = wl1271_acx_beacon_filter_opt(wl, false); + if (ret < 0) + return ret; + + ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE); if (ret < 0) return ret; - clear_bit(WLVIF_FLAG_PSM, &wlvif->flags); + clear_bit(WL1271_FLAG_PSM, &wl->flags); break; } @@ -234,11 +223,9 @@ static void wl1271_ps_filter_frames(struct wl1271 *wl, u8 hlid) wl1271_handle_tx_low_watermark(wl); } -void wl12xx_ps_link_start(struct wl1271 *wl, struct wl12xx_vif *wlvif, - u8 hlid, bool clean_queues) +void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues) { struct ieee80211_sta *sta; - struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); if (test_bit(hlid, &wl->ap_ps_map)) return; @@ -248,7 +235,7 @@ void wl12xx_ps_link_start(struct wl1271 *wl, struct wl12xx_vif *wlvif, clean_queues); rcu_read_lock(); - sta = ieee80211_find_sta(vif, wl->links[hlid].addr); + sta = ieee80211_find_sta(wl->vif, wl->links[hlid].addr); if (!sta) { wl1271_error("could not find sta %pM for starting ps", wl->links[hlid].addr); @@ -266,10 +253,9 @@ void wl12xx_ps_link_start(struct wl1271 *wl, struct wl12xx_vif *wlvif, __set_bit(hlid, &wl->ap_ps_map); } -void wl12xx_ps_link_end(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid) +void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid) { struct ieee80211_sta *sta; - struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); if (!test_bit(hlid, &wl->ap_ps_map)) return; @@ -279,7 +265,7 @@ void wl12xx_ps_link_end(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid) __clear_bit(hlid, &wl->ap_ps_map); rcu_read_lock(); - sta = ieee80211_find_sta(vif, wl->links[hlid].addr); + sta = ieee80211_find_sta(wl->vif, wl->links[hlid].addr); if (!sta) { wl1271_error("could not find sta %pM for ending ps", wl->links[hlid].addr); diff --git a/trunk/drivers/net/wireless/wl12xx/ps.h b/trunk/drivers/net/wireless/wl12xx/ps.h index a12052f02026..25eb9bc9b628 100644 --- a/trunk/drivers/net/wireless/wl12xx/ps.h +++ b/trunk/drivers/net/wireless/wl12xx/ps.h @@ -27,14 +27,13 @@ #include "wl12xx.h" #include "acx.h" -int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif, - enum wl1271_cmd_ps_mode mode, u32 rates, bool send); +int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode, + u32 rates, bool send); void wl1271_ps_elp_sleep(struct wl1271 *wl); int wl1271_ps_elp_wakeup(struct wl1271 *wl); void wl1271_elp_work(struct work_struct *work); -void wl12xx_ps_link_start(struct wl1271 *wl, struct wl12xx_vif *wlvif, - u8 hlid, bool clean_queues); -void wl12xx_ps_link_end(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid); +void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues); +void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid); #define WL1271_PS_COMPLETE_TIMEOUT 500 diff --git a/trunk/drivers/net/wireless/wl12xx/reg.h b/trunk/drivers/net/wireless/wl12xx/reg.h index df34d5977b98..3f570f397586 100644 --- a/trunk/drivers/net/wireless/wl12xx/reg.h +++ b/trunk/drivers/net/wireless/wl12xx/reg.h @@ -408,7 +408,7 @@ /* Firmware image load chunk size */ -#define CHUNK_SIZE 16384 +#define CHUNK_SIZE 512 /* Firmware image header size */ #define FW_HDR_SIZE 8 diff --git a/trunk/drivers/net/wireless/wl12xx/rx.c b/trunk/drivers/net/wireless/wl12xx/rx.c index 4fbd2a722ffa..dee4cfe9ccc1 100644 --- a/trunk/drivers/net/wireless/wl12xx/rx.c +++ b/trunk/drivers/net/wireless/wl12xx/rx.c @@ -25,11 +25,9 @@ #include #include "wl12xx.h" -#include "debug.h" #include "acx.h" #include "reg.h" #include "rx.h" -#include "tx.h" #include "io.h" static u8 wl12xx_rx_get_mem_block(struct wl12xx_fw_status *status, @@ -98,7 +96,7 @@ static void wl1271_rx_status(struct wl1271 *wl, } static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length, - bool unaligned, u8 *hlid) + bool unaligned) { struct wl1271_rx_descriptor *desc; struct sk_buff *skb; @@ -161,7 +159,6 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length, * payload aligned to 4 bytes. */ memcpy(buf, data + sizeof(*desc), length - sizeof(*desc)); - *hlid = desc->hlid; hdr = (struct ieee80211_hdr *)skb->data; if (ieee80211_is_beacon(hdr->frame_control)) @@ -172,10 +169,10 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length, wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon); seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; - wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s seq %d hlid %d", skb, + wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s seq %d", skb, skb->len - desc->pad_len, beacon ? "beacon" : "", - seq_num, *hlid); + seq_num); skb_trim(skb, skb->len - desc->pad_len); @@ -188,7 +185,6 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length, void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status) { struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map; - unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0}; u32 buf_size; u32 fw_rx_counter = status->fw_rx_counter & NUM_RX_PKT_DESC_MOD_MASK; u32 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK; @@ -196,7 +192,8 @@ void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status) u32 mem_block; u32 pkt_length; u32 pkt_offset; - u8 hlid; + bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); + bool had_data = false; bool unaligned = false; while (drv_rx_counter != fw_rx_counter) { @@ -256,15 +253,8 @@ void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status) */ if (wl1271_rx_handle_data(wl, wl->aggr_buf + pkt_offset, - pkt_length, unaligned, - &hlid) == 1) { - if (hlid < WL12XX_MAX_LINKS) - __set_bit(hlid, active_hlids); - else - WARN(1, - "hlid exceeded WL12XX_MAX_LINKS " - "(%d)\n", hlid); - } + pkt_length, unaligned) == 1) + had_data = true; wl->rx_counter++; drv_rx_counter++; @@ -280,5 +270,17 @@ void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status) if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION) wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter); - wl12xx_rearm_rx_streaming(wl, active_hlids); + if (!is_ap && wl->conf.rx_streaming.interval && had_data && + (wl->conf.rx_streaming.always || + test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) { + u32 timeout = wl->conf.rx_streaming.duration; + + /* restart rx streaming */ + if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags)) + ieee80211_queue_work(wl->hw, + &wl->rx_streaming_enable_work); + + mod_timer(&wl->rx_streaming_timer, + jiffies + msecs_to_jiffies(timeout)); + } } diff --git a/trunk/drivers/net/wireless/wl12xx/scan.c b/trunk/drivers/net/wireless/wl12xx/scan.c index e24111ececc5..fc29c671cf3b 100644 --- a/trunk/drivers/net/wireless/wl12xx/scan.c +++ b/trunk/drivers/net/wireless/wl12xx/scan.c @@ -24,7 +24,6 @@ #include #include "wl12xx.h" -#include "debug.h" #include "cmd.h" #include "scan.h" #include "acx.h" @@ -35,8 +34,6 @@ void wl1271_scan_complete_work(struct work_struct *work) { struct delayed_work *dwork; struct wl1271 *wl; - struct ieee80211_vif *vif; - struct wl12xx_vif *wlvif; int ret; bool is_sta, is_ibss; @@ -53,31 +50,28 @@ void wl1271_scan_complete_work(struct work_struct *work) if (wl->scan.state == WL1271_SCAN_STATE_IDLE) goto out; - vif = wl->scan_vif; - wlvif = wl12xx_vif_to_data(vif); - wl->scan.state = WL1271_SCAN_STATE_IDLE; memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch)); wl->scan.req = NULL; - wl->scan_vif = NULL; ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; - if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) { + if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) { /* restore hardware connection monitoring template */ - wl1271_cmd_build_ap_probe_req(wl, wlvif, wlvif->probereq); + wl1271_cmd_build_ap_probe_req(wl, wl->probereq); } /* return to ROC if needed */ - is_sta = (wlvif->bss_type == BSS_TYPE_STA_BSS); - is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS); - if (((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) || - (is_ibss && !test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))) && - !test_bit(wlvif->dev_role_id, wl->roc_map)) { + is_sta = (wl->bss_type == BSS_TYPE_STA_BSS); + is_ibss = (wl->bss_type == BSS_TYPE_IBSS); + if (((is_sta && !test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) || + (is_ibss && !test_bit(WL1271_FLAG_IBSS_JOINED, &wl->flags))) && + !test_bit(wl->dev_role_id, wl->roc_map)) { /* restore remain on channel */ - wl12xx_start_dev(wl, wlvif); + wl12xx_cmd_role_start_dev(wl); + wl12xx_roc(wl, wl->dev_role_id); } wl1271_ps_elp_sleep(wl); @@ -161,11 +155,9 @@ static int wl1271_get_scan_channels(struct wl1271 *wl, #define WL1271_NOTHING_TO_SCAN 1 -static int wl1271_scan_send(struct wl1271 *wl, struct ieee80211_vif *vif, - enum ieee80211_band band, - bool passive, u32 basic_rate) +static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band, + bool passive, u32 basic_rate) { - struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); struct wl1271_cmd_scan *cmd; struct wl1271_cmd_trigger_scan_to *trigger; int ret; @@ -185,11 +177,11 @@ static int wl1271_scan_send(struct wl1271 *wl, struct ieee80211_vif *vif, if (passive) scan_options |= WL1271_SCAN_OPT_PASSIVE; - if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID)) { + if (WARN_ON(wl->role_id == WL12XX_INVALID_ROLE_ID)) { ret = -EINVAL; goto out; } - cmd->params.role_id = wlvif->role_id; + cmd->params.role_id = wl->role_id; cmd->params.scan_options = cpu_to_le16(scan_options); cmd->params.n_ch = wl1271_get_scan_channels(wl, wl->scan.req, @@ -202,6 +194,7 @@ static int wl1271_scan_send(struct wl1271 *wl, struct ieee80211_vif *vif, cmd->params.tx_rate = cpu_to_le32(basic_rate); cmd->params.n_probe_reqs = wl->conf.scan.num_probe_reqs; + cmd->params.tx_rate = cpu_to_le32(basic_rate); cmd->params.tid_trigger = 0; cmd->params.scan_tag = WL1271_SCAN_DEFAULT_TAG; @@ -215,11 +208,11 @@ static int wl1271_scan_send(struct wl1271 *wl, struct ieee80211_vif *vif, memcpy(cmd->params.ssid, wl->scan.ssid, wl->scan.ssid_len); } - memcpy(cmd->addr, vif->addr, ETH_ALEN); + memcpy(cmd->addr, wl->mac_addr, ETH_ALEN); - ret = wl1271_cmd_build_probe_req(wl, wlvif, wl->scan.ssid, - wl->scan.ssid_len, wl->scan.req->ie, - wl->scan.req->ie_len, band); + ret = wl1271_cmd_build_probe_req(wl, wl->scan.ssid, wl->scan.ssid_len, + wl->scan.req->ie, wl->scan.req->ie_len, + band); if (ret < 0) { wl1271_error("PROBE request template failed"); goto out; @@ -248,12 +241,11 @@ static int wl1271_scan_send(struct wl1271 *wl, struct ieee80211_vif *vif, return ret; } -void wl1271_scan_stm(struct wl1271 *wl, struct ieee80211_vif *vif) +void wl1271_scan_stm(struct wl1271 *wl) { - struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); int ret = 0; enum ieee80211_band band; - u32 rate, mask; + u32 rate; switch (wl->scan.state) { case WL1271_SCAN_STATE_IDLE: @@ -261,59 +253,47 @@ void wl1271_scan_stm(struct wl1271 *wl, struct ieee80211_vif *vif) case WL1271_SCAN_STATE_2GHZ_ACTIVE: band = IEEE80211_BAND_2GHZ; - mask = wlvif->bitrate_masks[band]; - if (wl->scan.req->no_cck) { - mask &= ~CONF_TX_CCK_RATES; - if (!mask) - mask = CONF_TX_RATE_MASK_BASIC_P2P; - } - rate = wl1271_tx_min_rate_get(wl, mask); - ret = wl1271_scan_send(wl, vif, band, false, rate); + rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]); + ret = wl1271_scan_send(wl, band, false, rate); if (ret == WL1271_NOTHING_TO_SCAN) { wl->scan.state = WL1271_SCAN_STATE_2GHZ_PASSIVE; - wl1271_scan_stm(wl, vif); + wl1271_scan_stm(wl); } break; case WL1271_SCAN_STATE_2GHZ_PASSIVE: band = IEEE80211_BAND_2GHZ; - mask = wlvif->bitrate_masks[band]; - if (wl->scan.req->no_cck) { - mask &= ~CONF_TX_CCK_RATES; - if (!mask) - mask = CONF_TX_RATE_MASK_BASIC_P2P; - } - rate = wl1271_tx_min_rate_get(wl, mask); - ret = wl1271_scan_send(wl, vif, band, true, rate); + rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]); + ret = wl1271_scan_send(wl, band, true, rate); if (ret == WL1271_NOTHING_TO_SCAN) { if (wl->enable_11a) wl->scan.state = WL1271_SCAN_STATE_5GHZ_ACTIVE; else wl->scan.state = WL1271_SCAN_STATE_DONE; - wl1271_scan_stm(wl, vif); + wl1271_scan_stm(wl); } break; case WL1271_SCAN_STATE_5GHZ_ACTIVE: band = IEEE80211_BAND_5GHZ; - rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]); - ret = wl1271_scan_send(wl, vif, band, false, rate); + rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]); + ret = wl1271_scan_send(wl, band, false, rate); if (ret == WL1271_NOTHING_TO_SCAN) { wl->scan.state = WL1271_SCAN_STATE_5GHZ_PASSIVE; - wl1271_scan_stm(wl, vif); + wl1271_scan_stm(wl); } break; case WL1271_SCAN_STATE_5GHZ_PASSIVE: band = IEEE80211_BAND_5GHZ; - rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]); - ret = wl1271_scan_send(wl, vif, band, true, rate); + rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]); + ret = wl1271_scan_send(wl, band, true, rate); if (ret == WL1271_NOTHING_TO_SCAN) { wl->scan.state = WL1271_SCAN_STATE_DONE; - wl1271_scan_stm(wl, vif); + wl1271_scan_stm(wl); } break; @@ -337,8 +317,7 @@ void wl1271_scan_stm(struct wl1271 *wl, struct ieee80211_vif *vif) } } -int wl1271_scan(struct wl1271 *wl, struct ieee80211_vif *vif, - const u8 *ssid, size_t ssid_len, +int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len, struct cfg80211_scan_request *req) { /* @@ -359,7 +338,6 @@ int wl1271_scan(struct wl1271 *wl, struct ieee80211_vif *vif, wl->scan.ssid_len = 0; } - wl->scan_vif = vif; wl->scan.req = req; memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch)); @@ -368,7 +346,7 @@ int wl1271_scan(struct wl1271 *wl, struct ieee80211_vif *vif, ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work, msecs_to_jiffies(WL1271_SCAN_TIMEOUT)); - wl1271_scan_stm(wl, vif); + wl1271_scan_stm(wl); return 0; } @@ -437,19 +415,18 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl, if (flags & IEEE80211_CHAN_RADAR) { channels[j].flags |= SCAN_CHANNEL_FLAGS_DFS; - channels[j].passive_duration = cpu_to_le16(c->dwell_time_dfs); - } else { + } + else if (flags & IEEE80211_CHAN_PASSIVE_SCAN) { channels[j].passive_duration = cpu_to_le16(c->dwell_time_passive); + } else { + channels[j].min_duration = + cpu_to_le16(c->min_dwell_time_active); + channels[j].max_duration = + cpu_to_le16(c->max_dwell_time_active); } - - channels[j].min_duration = - cpu_to_le16(c->min_dwell_time_active); - channels[j].max_duration = - cpu_to_le16(c->max_dwell_time_active); - channels[j].tx_power_att = req->channels[i]->max_power; channels[j].channel = req->channels[i]->hw_value; @@ -573,9 +550,6 @@ wl12xx_scan_sched_scan_ssid_list(struct wl1271 *wl, * so they're used in probe requests. */ for (i = 0; i < req->n_ssids; i++) { - if (!req->ssids[i].ssid_len) - continue; - for (j = 0; j < cmd->n_ssids; j++) if (!memcmp(req->ssids[i].ssid, cmd->ssids[j].ssid, @@ -611,7 +585,6 @@ wl12xx_scan_sched_scan_ssid_list(struct wl1271 *wl, } int wl1271_scan_sched_scan_config(struct wl1271 *wl, - struct wl12xx_vif *wlvif, struct cfg80211_sched_scan_request *req, struct ieee80211_sched_scan_ies *ies) { @@ -658,7 +631,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl, } if (!force_passive && cfg->active[0]) { - ret = wl1271_cmd_build_probe_req(wl, wlvif, req->ssids[0].ssid, + ret = wl1271_cmd_build_probe_req(wl, req->ssids[0].ssid, req->ssids[0].ssid_len, ies->ie[IEEE80211_BAND_2GHZ], ies->len[IEEE80211_BAND_2GHZ], @@ -670,7 +643,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl, } if (!force_passive && cfg->active[1]) { - ret = wl1271_cmd_build_probe_req(wl, wlvif, req->ssids[0].ssid, + ret = wl1271_cmd_build_probe_req(wl, req->ssids[0].ssid, req->ssids[0].ssid_len, ies->ie[IEEE80211_BAND_5GHZ], ies->len[IEEE80211_BAND_5GHZ], @@ -694,17 +667,17 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl, return ret; } -int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif) +int wl1271_scan_sched_scan_start(struct wl1271 *wl) { struct wl1271_cmd_sched_scan_start *start; int ret = 0; wl1271_debug(DEBUG_CMD, "cmd periodic scan start"); - if (wlvif->bss_type != BSS_TYPE_STA_BSS) + if (wl->bss_type != BSS_TYPE_STA_BSS) return -EOPNOTSUPP; - if (test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) + if (!test_bit(WL1271_FLAG_IDLE, &wl->flags)) return -EBUSY; start = kzalloc(sizeof(*start), GFP_KERNEL); @@ -754,6 +727,7 @@ void wl1271_scan_sched_scan_stop(struct wl1271 *wl) wl1271_error("failed to send sched scan stop command"); goto out_free; } + wl->sched_scanning = false; out_free: kfree(stop); diff --git a/trunk/drivers/net/wireless/wl12xx/scan.h b/trunk/drivers/net/wireless/wl12xx/scan.h index a7ed43dc08c9..92115156522f 100644 --- a/trunk/drivers/net/wireless/wl12xx/scan.h +++ b/trunk/drivers/net/wireless/wl12xx/scan.h @@ -26,20 +26,18 @@ #include "wl12xx.h" -int wl1271_scan(struct wl1271 *wl, struct ieee80211_vif *vif, - const u8 *ssid, size_t ssid_len, +int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len, struct cfg80211_scan_request *req); int wl1271_scan_stop(struct wl1271 *wl); int wl1271_scan_build_probe_req(struct wl1271 *wl, const u8 *ssid, size_t ssid_len, const u8 *ie, size_t ie_len, u8 band); -void wl1271_scan_stm(struct wl1271 *wl, struct ieee80211_vif *vif); +void wl1271_scan_stm(struct wl1271 *wl); void wl1271_scan_complete_work(struct work_struct *work); int wl1271_scan_sched_scan_config(struct wl1271 *wl, - struct wl12xx_vif *wlvif, struct cfg80211_sched_scan_request *req, struct ieee80211_sched_scan_ies *ies); -int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif); +int wl1271_scan_sched_scan_start(struct wl1271 *wl); void wl1271_scan_sched_scan_stop(struct wl1271 *wl); void wl1271_scan_sched_scan_results(struct wl1271 *wl); diff --git a/trunk/drivers/net/wireless/wl12xx/sdio.c b/trunk/drivers/net/wireless/wl12xx/sdio.c index 468a50553fac..516a8980723c 100644 --- a/trunk/drivers/net/wireless/wl12xx/sdio.c +++ b/trunk/drivers/net/wireless/wl12xx/sdio.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include @@ -45,67 +44,107 @@ #define SDIO_DEVICE_ID_TI_WL1271 0x4076 #endif -struct wl12xx_sdio_glue { - struct device *dev; - struct platform_device *core; -}; - static const struct sdio_device_id wl1271_devices[] __devinitconst = { { SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271) }, {} }; MODULE_DEVICE_TABLE(sdio, wl1271_devices); -static void wl1271_sdio_set_block_size(struct device *child, - unsigned int blksz) +static void wl1271_sdio_set_block_size(struct wl1271 *wl, unsigned int blksz) { - struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent); - struct sdio_func *func = dev_to_sdio_func(glue->dev); + sdio_claim_host(wl->if_priv); + sdio_set_block_size(wl->if_priv, blksz); + sdio_release_host(wl->if_priv); +} - sdio_claim_host(func); - sdio_set_block_size(func, blksz); - sdio_release_host(func); +static inline struct sdio_func *wl_to_func(struct wl1271 *wl) +{ + return wl->if_priv; +} + +static struct device *wl1271_sdio_wl_to_dev(struct wl1271 *wl) +{ + return &(wl_to_func(wl)->dev); +} + +static irqreturn_t wl1271_hardirq(int irq, void *cookie) +{ + struct wl1271 *wl = cookie; + unsigned long flags; + + wl1271_debug(DEBUG_IRQ, "IRQ"); + + /* complete the ELP completion */ + spin_lock_irqsave(&wl->wl_lock, flags); + set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags); + if (wl->elp_compl) { + complete(wl->elp_compl); + wl->elp_compl = NULL; + } + + if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) { + /* don't enqueue a work right now. mark it as pending */ + set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags); + wl1271_debug(DEBUG_IRQ, "should not enqueue work"); + disable_irq_nosync(wl->irq); + pm_wakeup_event(wl1271_sdio_wl_to_dev(wl), 0); + spin_unlock_irqrestore(&wl->wl_lock, flags); + return IRQ_HANDLED; + } + spin_unlock_irqrestore(&wl->wl_lock, flags); + + return IRQ_WAKE_THREAD; } -static void wl12xx_sdio_raw_read(struct device *child, int addr, void *buf, +static void wl1271_sdio_disable_interrupts(struct wl1271 *wl) +{ + disable_irq(wl->irq); +} + +static void wl1271_sdio_enable_interrupts(struct wl1271 *wl) +{ + enable_irq(wl->irq); +} + +static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf, size_t len, bool fixed) { int ret; - struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent); - struct sdio_func *func = dev_to_sdio_func(glue->dev); + struct sdio_func *func = wl_to_func(wl); if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) { ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret); - dev_dbg(child->parent, "sdio read 52 addr 0x%x, byte 0x%02x\n", - addr, ((u8 *)buf)[0]); + wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x", + addr, ((u8 *)buf)[0]); } else { if (fixed) ret = sdio_readsb(func, buf, addr, len); else ret = sdio_memcpy_fromio(func, buf, addr, len); - dev_dbg(child->parent, "sdio read 53 addr 0x%x, %zu bytes\n", - addr, len); + wl1271_debug(DEBUG_SDIO, "sdio read 53 addr 0x%x, %zu bytes", + addr, len); + wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len); } if (ret) - dev_err(child->parent, "sdio read failed (%d)\n", ret); + wl1271_error("sdio read failed (%d)", ret); } -static void wl12xx_sdio_raw_write(struct device *child, int addr, void *buf, +static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf, size_t len, bool fixed) { int ret; - struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent); - struct sdio_func *func = dev_to_sdio_func(glue->dev); + struct sdio_func *func = wl_to_func(wl); if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) { sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret); - dev_dbg(child->parent, "sdio write 52 addr 0x%x, byte 0x%02x\n", - addr, ((u8 *)buf)[0]); + wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x", + addr, ((u8 *)buf)[0]); } else { - dev_dbg(child->parent, "sdio write 53 addr 0x%x, %zu bytes\n", - addr, len); + wl1271_debug(DEBUG_SDIO, "sdio write 53 addr 0x%x, %zu bytes", + addr, len); + wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len); if (fixed) ret = sdio_writesb(func, addr, buf, len); @@ -114,13 +153,13 @@ static void wl12xx_sdio_raw_write(struct device *child, int addr, void *buf, } if (ret) - dev_err(child->parent, "sdio write failed (%d)\n", ret); + wl1271_error("sdio write failed (%d)", ret); } -static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue) +static int wl1271_sdio_power_on(struct wl1271 *wl) { + struct sdio_func *func = wl_to_func(wl); int ret; - struct sdio_func *func = dev_to_sdio_func(glue->dev); /* If enabled, tell runtime PM not to power off the card */ if (pm_runtime_enabled(&func->dev)) { @@ -141,10 +180,10 @@ static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue) return ret; } -static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue) +static int wl1271_sdio_power_off(struct wl1271 *wl) { + struct sdio_func *func = wl_to_func(wl); int ret; - struct sdio_func *func = dev_to_sdio_func(glue->dev); sdio_disable_func(func); sdio_release_host(func); @@ -161,43 +200,46 @@ static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue) return ret; } -static int wl12xx_sdio_set_power(struct device *child, bool enable) +static int wl1271_sdio_set_power(struct wl1271 *wl, bool enable) { - struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent); - if (enable) - return wl12xx_sdio_power_on(glue); + return wl1271_sdio_power_on(wl); else - return wl12xx_sdio_power_off(glue); + return wl1271_sdio_power_off(wl); } static struct wl1271_if_operations sdio_ops = { - .read = wl12xx_sdio_raw_read, - .write = wl12xx_sdio_raw_write, - .power = wl12xx_sdio_set_power, + .read = wl1271_sdio_raw_read, + .write = wl1271_sdio_raw_write, + .power = wl1271_sdio_set_power, + .dev = wl1271_sdio_wl_to_dev, + .enable_irq = wl1271_sdio_enable_interrupts, + .disable_irq = wl1271_sdio_disable_interrupts, .set_block_size = wl1271_sdio_set_block_size, }; static int __devinit wl1271_probe(struct sdio_func *func, const struct sdio_device_id *id) { - struct wl12xx_platform_data *wlan_data; - struct wl12xx_sdio_glue *glue; - struct resource res[1]; + struct ieee80211_hw *hw; + const struct wl12xx_platform_data *wlan_data; + struct wl1271 *wl; + unsigned long irqflags; mmc_pm_flag_t mmcflags; - int ret = -ENOMEM; + int ret; /* We are only able to handle the wlan function */ if (func->num != 0x02) return -ENODEV; - glue = kzalloc(sizeof(*glue), GFP_KERNEL); - if (!glue) { - dev_err(&func->dev, "can't allocate glue\n"); - goto out; - } + hw = wl1271_alloc_hw(); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + wl = hw->priv; - glue->dev = &func->dev; + wl->if_priv = func; + wl->if_ops = &sdio_ops; /* Grab access to FN0 for ELP reg. */ func->card->quirks |= MMC_QUIRK_LENIENT_FN0; @@ -208,79 +250,80 @@ static int __devinit wl1271_probe(struct sdio_func *func, wlan_data = wl12xx_get_platform_data(); if (IS_ERR(wlan_data)) { ret = PTR_ERR(wlan_data); - dev_err(glue->dev, "missing wlan platform data: %d\n", ret); - goto out_free_glue; + wl1271_error("missing wlan platform data: %d", ret); + goto out_free; } - /* if sdio can keep power while host is suspended, enable wow */ - mmcflags = sdio_get_host_pm_caps(func); - dev_dbg(glue->dev, "sdio PM caps = 0x%x\n", mmcflags); + wl->irq = wlan_data->irq; + wl->ref_clock = wlan_data->board_ref_clock; + wl->tcxo_clock = wlan_data->board_tcxo_clock; + wl->platform_quirks = wlan_data->platform_quirks; - if (mmcflags & MMC_PM_KEEP_POWER) - wlan_data->pwr_in_suspend = true; - - wlan_data->ops = &sdio_ops; + if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) + irqflags = IRQF_TRIGGER_RISING; + else + irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT; + + ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq, + irqflags, + DRIVER_NAME, wl); + if (ret < 0) { + wl1271_error("request_irq() failed: %d", ret); + goto out_free; + } - sdio_set_drvdata(func, glue); + ret = enable_irq_wake(wl->irq); + if (!ret) { + wl->irq_wake_enabled = true; + device_init_wakeup(wl1271_sdio_wl_to_dev(wl), 1); - /* Tell PM core that we don't need the card to be powered now */ - pm_runtime_put_noidle(&func->dev); + /* if sdio can keep power while host is suspended, enable wow */ + mmcflags = sdio_get_host_pm_caps(func); + wl1271_debug(DEBUG_SDIO, "sdio PM caps = 0x%x", mmcflags); - glue->core = platform_device_alloc("wl12xx", -1); - if (!glue->core) { - dev_err(glue->dev, "can't allocate platform_device"); - ret = -ENOMEM; - goto out_free_glue; + if (mmcflags & MMC_PM_KEEP_POWER) + hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY; } + disable_irq(wl->irq); - glue->core->dev.parent = &func->dev; + ret = wl1271_init_ieee80211(wl); + if (ret) + goto out_irq; - memset(res, 0x00, sizeof(res)); + ret = wl1271_register_hw(wl); + if (ret) + goto out_irq; - res[0].start = wlan_data->irq; - res[0].flags = IORESOURCE_IRQ; - res[0].name = "irq"; + sdio_set_drvdata(func, wl); - ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res)); - if (ret) { - dev_err(glue->dev, "can't add resources\n"); - goto out_dev_put; - } - - ret = platform_device_add_data(glue->core, wlan_data, - sizeof(*wlan_data)); - if (ret) { - dev_err(glue->dev, "can't add platform data\n"); - goto out_dev_put; - } + /* Tell PM core that we don't need the card to be powered now */ + pm_runtime_put_noidle(&func->dev); - ret = platform_device_add(glue->core); - if (ret) { - dev_err(glue->dev, "can't add platform device\n"); - goto out_dev_put; - } return 0; -out_dev_put: - platform_device_put(glue->core); + out_irq: + free_irq(wl->irq, wl); -out_free_glue: - kfree(glue); + out_free: + wl1271_free_hw(wl); -out: return ret; } static void __devexit wl1271_remove(struct sdio_func *func) { - struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func); + struct wl1271 *wl = sdio_get_drvdata(func); /* Undo decrement done above in wl1271_probe */ pm_runtime_get_noresume(&func->dev); - platform_device_del(glue->core); - platform_device_put(glue->core); - kfree(glue); + wl1271_unregister_hw(wl); + if (wl->irq_wake_enabled) { + device_init_wakeup(wl1271_sdio_wl_to_dev(wl), 0); + disable_irq_wake(wl->irq); + } + free_irq(wl->irq, wl); + wl1271_free_hw(wl); } #ifdef CONFIG_PM @@ -289,21 +332,20 @@ static int wl1271_suspend(struct device *dev) /* Tell MMC/SDIO core it's OK to power down the card * (if it isn't already), but not to remove it completely */ struct sdio_func *func = dev_to_sdio_func(dev); - struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func); - struct wl1271 *wl = platform_get_drvdata(glue->core); + struct wl1271 *wl = sdio_get_drvdata(func); mmc_pm_flag_t sdio_flags; int ret = 0; - dev_dbg(dev, "wl1271 suspend. wow_enabled: %d\n", - wl->wow_enabled); + wl1271_debug(DEBUG_MAC80211, "wl1271 suspend. wow_enabled: %d", + wl->wow_enabled); /* check whether sdio should keep power */ if (wl->wow_enabled) { sdio_flags = sdio_get_host_pm_caps(func); if (!(sdio_flags & MMC_PM_KEEP_POWER)) { - dev_err(dev, "can't keep power while host " - "is suspended\n"); + wl1271_error("can't keep power while host " + "is suspended"); ret = -EINVAL; goto out; } @@ -311,7 +353,7 @@ static int wl1271_suspend(struct device *dev) /* keep power while host suspended */ ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); if (ret) { - dev_err(dev, "error while trying to keep power\n"); + wl1271_error("error while trying to keep power"); goto out; } @@ -325,10 +367,9 @@ static int wl1271_suspend(struct device *dev) static int wl1271_resume(struct device *dev) { struct sdio_func *func = dev_to_sdio_func(dev); - struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func); - struct wl1271 *wl = platform_get_drvdata(glue->core); + struct wl1271 *wl = sdio_get_drvdata(func); - dev_dbg(dev, "wl1271 resume\n"); + wl1271_debug(DEBUG_MAC80211, "wl1271 resume"); if (wl->wow_enabled) { /* claim back host */ sdio_claim_host(func); diff --git a/trunk/drivers/net/wireless/wl12xx/sdio_test.c b/trunk/drivers/net/wireless/wl12xx/sdio_test.c new file mode 100644 index 000000000000..f25d5d9212e7 --- /dev/null +++ b/trunk/drivers/net/wireless/wl12xx/sdio_test.c @@ -0,0 +1,543 @@ +/* + * SDIO testing driver for wl12xx + * + * Copyright (C) 2010 Nokia Corporation + * + * Contact: Roger Quadros + * + * wl12xx read/write routines taken from the main module + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "wl12xx.h" +#include "io.h" +#include "boot.h" + +#ifndef SDIO_VENDOR_ID_TI +#define SDIO_VENDOR_ID_TI 0x0097 +#endif + +#ifndef SDIO_DEVICE_ID_TI_WL1271 +#define SDIO_DEVICE_ID_TI_WL1271 0x4076 +#endif + +static bool rx, tx; + +module_param(rx, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(rx, "Perform rx test. Default (0). " + "This test continuously reads data from the SDIO device.\n"); + +module_param(tx, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(tx, "Perform tx test. Default (0). " + "This test continuously writes data to the SDIO device.\n"); + +struct wl1271_test { + struct wl1271 wl; + struct task_struct *test_task; +}; + +static const struct sdio_device_id wl1271_devices[] = { + { SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271) }, + {} +}; + +static inline struct sdio_func *wl_to_func(struct wl1271 *wl) +{ + return wl->if_priv; +} + +static struct device *wl1271_sdio_wl_to_dev(struct wl1271 *wl) +{ + return &(wl_to_func(wl)->dev); +} + +static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf, + size_t len, bool fixed) +{ + int ret = 0; + struct sdio_func *func = wl_to_func(wl); + + if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) { + ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret); + wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x", + addr, ((u8 *)buf)[0]); + } else { + if (fixed) + ret = sdio_readsb(func, buf, addr, len); + else + ret = sdio_memcpy_fromio(func, buf, addr, len); + + wl1271_debug(DEBUG_SDIO, "sdio read 53 addr 0x%x, %zu bytes", + addr, len); + wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len); + } + + if (ret) + wl1271_error("sdio read failed (%d)", ret); +} + +static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf, + size_t len, bool fixed) +{ + int ret = 0; + struct sdio_func *func = wl_to_func(wl); + + if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) { + sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret); + wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x", + addr, ((u8 *)buf)[0]); + } else { + wl1271_debug(DEBUG_SDIO, "sdio write 53 addr 0x%x, %zu bytes", + addr, len); + wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len); + + if (fixed) + ret = sdio_writesb(func, addr, buf, len); + else + ret = sdio_memcpy_toio(func, addr, buf, len); + } + if (ret) + wl1271_error("sdio write failed (%d)", ret); + +} + +static int wl1271_sdio_set_power(struct wl1271 *wl, bool enable) +{ + struct sdio_func *func = wl_to_func(wl); + int ret; + + /* Let the SDIO stack handle wlan_enable control, so we + * keep host claimed while wlan is in use to keep wl1271 + * alive. + */ + if (enable) { + /* Power up the card */ + ret = pm_runtime_get_sync(&func->dev); + if (ret < 0) + goto out; + + /* Runtime PM might be disabled, power up the card manually */ + ret = mmc_power_restore_host(func->card->host); + if (ret < 0) + goto out; + + sdio_claim_host(func); + sdio_enable_func(func); + } else { + sdio_disable_func(func); + sdio_release_host(func); + + /* Runtime PM might be disabled, power off the card manually */ + ret = mmc_power_save_host(func->card->host); + if (ret < 0) + goto out; + + /* Power down the card */ + ret = pm_runtime_put_sync(&func->dev); + } + +out: + return ret; +} + +static void wl1271_sdio_disable_interrupts(struct wl1271 *wl) +{ +} + +static void wl1271_sdio_enable_interrupts(struct wl1271 *wl) +{ +} + + +static struct wl1271_if_operations sdio_ops = { + .read = wl1271_sdio_raw_read, + .write = wl1271_sdio_raw_write, + .power = wl1271_sdio_set_power, + .dev = wl1271_sdio_wl_to_dev, + .enable_irq = wl1271_sdio_enable_interrupts, + .disable_irq = wl1271_sdio_disable_interrupts, +}; + +static void wl1271_fw_wakeup(struct wl1271 *wl) +{ + u32 elp_reg; + + elp_reg = ELPCTRL_WAKE_UP; + wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg); +} + +static int wl1271_fetch_firmware(struct wl1271 *wl) +{ + const struct firmware *fw; + int ret; + + if (wl->chip.id == CHIP_ID_1283_PG20) + ret = request_firmware(&fw, WL128X_FW_NAME, + wl1271_wl_to_dev(wl)); + else + ret = request_firmware(&fw, WL127X_FW_NAME, + wl1271_wl_to_dev(wl)); + + if (ret < 0) { + wl1271_error("could not get firmware: %d", ret); + return ret; + } + + if (fw->size % 4) { + wl1271_error("firmware size is not multiple of 32 bits: %zu", + fw->size); + ret = -EILSEQ; + goto out; + } + + wl->fw_len = fw->size; + wl->fw = vmalloc(wl->fw_len); + + if (!wl->fw) { + wl1271_error("could not allocate memory for the firmware"); + ret = -ENOMEM; + goto out; + } + + memcpy(wl->fw, fw->data, wl->fw_len); + + ret = 0; + +out: + release_firmware(fw); + + return ret; +} + +static int wl1271_fetch_nvs(struct wl1271 *wl) +{ + const struct firmware *fw; + int ret; + + ret = request_firmware(&fw, WL12XX_NVS_NAME, wl1271_wl_to_dev(wl)); + + if (ret < 0) { + wl1271_error("could not get nvs file: %d", ret); + return ret; + } + + wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL); + + if (!wl->nvs) { + wl1271_error("could not allocate memory for the nvs file"); + ret = -ENOMEM; + goto out; + } + + wl->nvs_len = fw->size; + +out: + release_firmware(fw); + + return ret; +} + +static int wl1271_chip_wakeup(struct wl1271 *wl) +{ + struct wl1271_partition_set partition; + int ret; + + msleep(WL1271_PRE_POWER_ON_SLEEP); + ret = wl1271_power_on(wl); + if (ret) + return ret; + + msleep(WL1271_POWER_ON_SLEEP); + + /* We don't need a real memory partition here, because we only want + * to use the registers at this point. */ + memset(&partition, 0, sizeof(partition)); + partition.reg.start = REGISTERS_BASE; + partition.reg.size = REGISTERS_DOWN_SIZE; + wl1271_set_partition(wl, &partition); + + /* ELP module wake up */ + wl1271_fw_wakeup(wl); + + /* whal_FwCtrl_BootSm() */ + + /* 0. read chip id from CHIP_ID */ + wl->chip.id = wl1271_read32(wl, CHIP_ID_B); + + /* 1. check if chip id is valid */ + + switch (wl->chip.id) { + case CHIP_ID_1271_PG10: + wl1271_warning("chip id 0x%x (1271 PG10) support is obsolete", + wl->chip.id); + break; + case CHIP_ID_1271_PG20: + wl1271_notice("chip id 0x%x (1271 PG20)", + wl->chip.id); + break; + case CHIP_ID_1283_PG20: + wl1271_notice("chip id 0x%x (1283 PG20)", + wl->chip.id); + break; + case CHIP_ID_1283_PG10: + default: + wl1271_warning("unsupported chip id: 0x%x", wl->chip.id); + return -ENODEV; + } + + return ret; +} + +static struct wl1271_partition_set part_down = { + .mem = { + .start = 0x00000000, + .size = 0x000177c0 + }, + .reg = { + .start = REGISTERS_BASE, + .size = 0x00008800 + }, + .mem2 = { + .start = 0x00000000, + .size = 0x00000000 + }, + .mem3 = { + .start = 0x00000000, + .size = 0x00000000 + }, +}; + +static int tester(void *data) +{ + struct wl1271 *wl = data; + struct sdio_func *func = wl_to_func(wl); + struct device *pdev = &func->dev; + int ret = 0; + bool rx_started = 0; + bool tx_started = 0; + uint8_t *tx_buf, *rx_buf; + int test_size = PAGE_SIZE; + u32 addr = 0; + struct wl1271_partition_set partition; + + /* We assume chip is powered up and firmware fetched */ + + memcpy(&partition, &part_down, sizeof(partition)); + partition.mem.start = addr; + wl1271_set_partition(wl, &partition); + + tx_buf = kmalloc(test_size, GFP_KERNEL); + rx_buf = kmalloc(test_size, GFP_KERNEL); + if (!tx_buf || !rx_buf) { + dev_err(pdev, + "Could not allocate memory. Test will not run.\n"); + ret = -ENOMEM; + goto free; + } + + memset(tx_buf, 0x5a, test_size); + + /* write something in data area so we can read it back */ + wl1271_write(wl, addr, tx_buf, test_size, false); + + while (!kthread_should_stop()) { + if (rx && !rx_started) { + dev_info(pdev, "starting rx test\n"); + rx_started = 1; + } else if (!rx && rx_started) { + dev_info(pdev, "stopping rx test\n"); + rx_started = 0; + } + + if (tx && !tx_started) { + dev_info(pdev, "starting tx test\n"); + tx_started = 1; + } else if (!tx && tx_started) { + dev_info(pdev, "stopping tx test\n"); + tx_started = 0; + } + + if (rx_started) + wl1271_read(wl, addr, rx_buf, test_size, false); + + if (tx_started) + wl1271_write(wl, addr, tx_buf, test_size, false); + + if (!rx_started && !tx_started) + msleep(100); + } + +free: + kfree(tx_buf); + kfree(rx_buf); + return ret; +} + +static int __devinit wl1271_probe(struct sdio_func *func, + const struct sdio_device_id *id) +{ + const struct wl12xx_platform_data *wlan_data; + struct wl1271 *wl; + struct wl1271_test *wl_test; + int ret = 0; + + /* wl1271 has 2 sdio functions we handle just the wlan part */ + if (func->num != 0x02) + return -ENODEV; + + wl_test = kzalloc(sizeof(struct wl1271_test), GFP_KERNEL); + if (!wl_test) { + dev_err(&func->dev, "Could not allocate memory\n"); + return -ENOMEM; + } + + wl = &wl_test->wl; + + wl->if_priv = func; + wl->if_ops = &sdio_ops; + + /* Grab access to FN0 for ELP reg. */ + func->card->quirks |= MMC_QUIRK_LENIENT_FN0; + + /* Use block mode for transferring over one block size of data */ + func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE; + + wlan_data = wl12xx_get_platform_data(); + if (IS_ERR(wlan_data)) { + ret = PTR_ERR(wlan_data); + dev_err(&func->dev, "missing wlan platform data: %d\n", ret); + goto out_free; + } + + wl->irq = wlan_data->irq; + wl->ref_clock = wlan_data->board_ref_clock; + wl->tcxo_clock = wlan_data->board_tcxo_clock; + + sdio_set_drvdata(func, wl_test); + + /* power up the device */ + ret = wl1271_chip_wakeup(wl); + if (ret) { + dev_err(&func->dev, "could not wake up chip\n"); + goto out_free; + } + + if (wl->fw == NULL) { + ret = wl1271_fetch_firmware(wl); + if (ret < 0) { + dev_err(&func->dev, "firmware fetch error\n"); + goto out_off; + } + } + + /* fetch NVS */ + if (wl->nvs == NULL) { + ret = wl1271_fetch_nvs(wl); + if (ret < 0) { + dev_err(&func->dev, "NVS fetch error\n"); + goto out_off; + } + } + + ret = wl1271_load_firmware(wl); + if (ret < 0) { + dev_err(&func->dev, "firmware load error: %d\n", ret); + goto out_free; + } + + dev_info(&func->dev, "initialized\n"); + + /* I/O testing will be done in the tester thread */ + + wl_test->test_task = kthread_run(tester, wl, "sdio_tester"); + if (IS_ERR(wl_test->test_task)) { + dev_err(&func->dev, "unable to create kernel thread\n"); + ret = PTR_ERR(wl_test->test_task); + goto out_free; + } + + return 0; + +out_off: + /* power off the chip */ + wl1271_power_off(wl); + +out_free: + kfree(wl_test); + return ret; +} + +static void __devexit wl1271_remove(struct sdio_func *func) +{ + struct wl1271_test *wl_test = sdio_get_drvdata(func); + + /* stop the I/O test thread */ + kthread_stop(wl_test->test_task); + + /* power off the chip */ + wl1271_power_off(&wl_test->wl); + + vfree(wl_test->wl.fw); + wl_test->wl.fw = NULL; + kfree(wl_test->wl.nvs); + wl_test->wl.nvs = NULL; + + kfree(wl_test); +} + +static struct sdio_driver wl1271_sdio_driver = { + .name = "wl12xx_sdio_test", + .id_table = wl1271_devices, + .probe = wl1271_probe, + .remove = __devexit_p(wl1271_remove), +}; + +static int __init wl1271_init(void) +{ + int ret; + + ret = sdio_register_driver(&wl1271_sdio_driver); + if (ret < 0) + pr_err("failed to register sdio driver: %d\n", ret); + + return ret; +} +module_init(wl1271_init); + +static void __exit wl1271_exit(void) +{ + sdio_unregister_driver(&wl1271_sdio_driver); +} +module_exit(wl1271_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Roger Quadros "); + diff --git a/trunk/drivers/net/wireless/wl12xx/spi.c b/trunk/drivers/net/wireless/wl12xx/spi.c index 92caa7ce6053..0f9718677860 100644 --- a/trunk/drivers/net/wireless/wl12xx/spi.c +++ b/trunk/drivers/net/wireless/wl12xx/spi.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include "wl12xx.h" @@ -70,22 +69,35 @@ #define WSPI_MAX_NUM_OF_CHUNKS (WL1271_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE) -struct wl12xx_spi_glue { - struct device *dev; - struct platform_device *core; -}; +static inline struct spi_device *wl_to_spi(struct wl1271 *wl) +{ + return wl->if_priv; +} + +static struct device *wl1271_spi_wl_to_dev(struct wl1271 *wl) +{ + return &(wl_to_spi(wl)->dev); +} + +static void wl1271_spi_disable_interrupts(struct wl1271 *wl) +{ + disable_irq(wl->irq); +} -static void wl12xx_spi_reset(struct device *child) +static void wl1271_spi_enable_interrupts(struct wl1271 *wl) +{ + enable_irq(wl->irq); +} + +static void wl1271_spi_reset(struct wl1271 *wl) { - struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent); u8 *cmd; struct spi_transfer t; struct spi_message m; cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL); if (!cmd) { - dev_err(child->parent, - "could not allocate cmd for spi reset\n"); + wl1271_error("could not allocate cmd for spi reset"); return; } @@ -98,22 +110,21 @@ static void wl12xx_spi_reset(struct device *child) t.len = WSPI_INIT_CMD_LEN; spi_message_add_tail(&t, &m); - spi_sync(to_spi_device(glue->dev), &m); + spi_sync(wl_to_spi(wl), &m); + wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN); kfree(cmd); } -static void wl12xx_spi_init(struct device *child) +static void wl1271_spi_init(struct wl1271 *wl) { - struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent); u8 crc[WSPI_INIT_CMD_CRC_LEN], *cmd; struct spi_transfer t; struct spi_message m; cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL); if (!cmd) { - dev_err(child->parent, - "could not allocate cmd for spi init\n"); + wl1271_error("could not allocate cmd for spi init"); return; } @@ -154,16 +165,15 @@ static void wl12xx_spi_init(struct device *child) t.len = WSPI_INIT_CMD_LEN; spi_message_add_tail(&t, &m); - spi_sync(to_spi_device(glue->dev), &m); + spi_sync(wl_to_spi(wl), &m); + wl1271_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN); kfree(cmd); } #define WL1271_BUSY_WORD_TIMEOUT 1000 -static int wl12xx_spi_read_busy(struct device *child) +static int wl1271_spi_read_busy(struct wl1271 *wl) { - struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent); - struct wl1271 *wl = dev_get_drvdata(child); struct spi_transfer t[1]; struct spi_message m; u32 *busy_buf; @@ -184,22 +194,20 @@ static int wl12xx_spi_read_busy(struct device *child) t[0].len = sizeof(u32); t[0].cs_change = true; spi_message_add_tail(&t[0], &m); - spi_sync(to_spi_device(glue->dev), &m); + spi_sync(wl_to_spi(wl), &m); if (*busy_buf & 0x1) return 0; } /* The SPI bus is unresponsive, the read failed. */ - dev_err(child->parent, "SPI read busy-word timeout!\n"); + wl1271_error("SPI read busy-word timeout!\n"); return -ETIMEDOUT; } -static void wl12xx_spi_raw_read(struct device *child, int addr, void *buf, +static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf, size_t len, bool fixed) { - struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent); - struct wl1271 *wl = dev_get_drvdata(child); struct spi_transfer t[2]; struct spi_message m; u32 *busy_buf; @@ -235,10 +243,10 @@ static void wl12xx_spi_raw_read(struct device *child, int addr, void *buf, t[1].cs_change = true; spi_message_add_tail(&t[1], &m); - spi_sync(to_spi_device(glue->dev), &m); + spi_sync(wl_to_spi(wl), &m); if (!(busy_buf[WL1271_BUSY_WORD_CNT - 1] & 0x1) && - wl12xx_spi_read_busy(child)) { + wl1271_spi_read_busy(wl)) { memset(buf, 0, chunk_len); return; } @@ -251,7 +259,10 @@ static void wl12xx_spi_raw_read(struct device *child, int addr, void *buf, t[0].cs_change = true; spi_message_add_tail(&t[0], &m); - spi_sync(to_spi_device(glue->dev), &m); + spi_sync(wl_to_spi(wl), &m); + + wl1271_dump(DEBUG_SPI, "spi_read cmd -> ", cmd, sizeof(*cmd)); + wl1271_dump(DEBUG_SPI, "spi_read buf <- ", buf, chunk_len); if (!fixed) addr += chunk_len; @@ -260,10 +271,9 @@ static void wl12xx_spi_raw_read(struct device *child, int addr, void *buf, } } -static void wl12xx_spi_raw_write(struct device *child, int addr, void *buf, - size_t len, bool fixed) +static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf, + size_t len, bool fixed) { - struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent); struct spi_transfer t[2 * WSPI_MAX_NUM_OF_CHUNKS]; struct spi_message m; u32 commands[WSPI_MAX_NUM_OF_CHUNKS]; @@ -298,6 +308,9 @@ static void wl12xx_spi_raw_write(struct device *child, int addr, void *buf, t[i].len = chunk_len; spi_message_add_tail(&t[i++], &m); + wl1271_dump(DEBUG_SPI, "spi_write cmd -> ", cmd, sizeof(*cmd)); + wl1271_dump(DEBUG_SPI, "spi_write buf -> ", buf, chunk_len); + if (!fixed) addr += chunk_len; buf += chunk_len; @@ -305,41 +318,72 @@ static void wl12xx_spi_raw_write(struct device *child, int addr, void *buf, cmd++; } - spi_sync(to_spi_device(glue->dev), &m); + spi_sync(wl_to_spi(wl), &m); +} + +static irqreturn_t wl1271_hardirq(int irq, void *cookie) +{ + struct wl1271 *wl = cookie; + unsigned long flags; + + wl1271_debug(DEBUG_IRQ, "IRQ"); + + /* complete the ELP completion */ + spin_lock_irqsave(&wl->wl_lock, flags); + set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags); + if (wl->elp_compl) { + complete(wl->elp_compl); + wl->elp_compl = NULL; + } + spin_unlock_irqrestore(&wl->wl_lock, flags); + + return IRQ_WAKE_THREAD; +} + +static int wl1271_spi_set_power(struct wl1271 *wl, bool enable) +{ + if (wl->set_power) + wl->set_power(enable); + + return 0; } static struct wl1271_if_operations spi_ops = { - .read = wl12xx_spi_raw_read, - .write = wl12xx_spi_raw_write, - .reset = wl12xx_spi_reset, - .init = wl12xx_spi_init, + .read = wl1271_spi_raw_read, + .write = wl1271_spi_raw_write, + .reset = wl1271_spi_reset, + .init = wl1271_spi_init, + .power = wl1271_spi_set_power, + .dev = wl1271_spi_wl_to_dev, + .enable_irq = wl1271_spi_enable_interrupts, + .disable_irq = wl1271_spi_disable_interrupts, .set_block_size = NULL, }; static int __devinit wl1271_probe(struct spi_device *spi) { - struct wl12xx_spi_glue *glue; struct wl12xx_platform_data *pdata; - struct resource res[1]; - int ret = -ENOMEM; + struct ieee80211_hw *hw; + struct wl1271 *wl; + unsigned long irqflags; + int ret; pdata = spi->dev.platform_data; if (!pdata) { - dev_err(&spi->dev, "no platform data\n"); + wl1271_error("no platform data"); return -ENODEV; } - pdata->ops = &spi_ops; + hw = wl1271_alloc_hw(); + if (IS_ERR(hw)) + return PTR_ERR(hw); - glue = kzalloc(sizeof(*glue), GFP_KERNEL); - if (!glue) { - dev_err(&spi->dev, "can't allocate glue\n"); - goto out; - } + wl = hw->priv; - glue->dev = &spi->dev; + dev_set_drvdata(&spi->dev, wl); + wl->if_priv = spi; - spi_set_drvdata(spi, glue); + wl->if_ops = &spi_ops; /* This is the only SPI value that we need to set here, the rest * comes from the board-peripherals file */ @@ -347,61 +391,69 @@ static int __devinit wl1271_probe(struct spi_device *spi) ret = spi_setup(spi); if (ret < 0) { - dev_err(glue->dev, "spi_setup failed\n"); - goto out_free_glue; + wl1271_error("spi_setup failed"); + goto out_free; } - glue->core = platform_device_alloc("wl12xx", -1); - if (!glue->core) { - dev_err(glue->dev, "can't allocate platform_device\n"); - ret = -ENOMEM; - goto out_free_glue; + wl->set_power = pdata->set_power; + if (!wl->set_power) { + wl1271_error("set power function missing in platform data"); + ret = -ENODEV; + goto out_free; } - glue->core->dev.parent = &spi->dev; - - memset(res, 0x00, sizeof(res)); + wl->ref_clock = pdata->board_ref_clock; + wl->tcxo_clock = pdata->board_tcxo_clock; + wl->platform_quirks = pdata->platform_quirks; - res[0].start = spi->irq; - res[0].flags = IORESOURCE_IRQ; - res[0].name = "irq"; + if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) + irqflags = IRQF_TRIGGER_RISING; + else + irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT; - ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res)); - if (ret) { - dev_err(glue->dev, "can't add resources\n"); - goto out_dev_put; + wl->irq = spi->irq; + if (wl->irq < 0) { + wl1271_error("irq missing in platform data"); + ret = -ENODEV; + goto out_free; } - ret = platform_device_add_data(glue->core, pdata, sizeof(*pdata)); - if (ret) { - dev_err(glue->dev, "can't add platform data\n"); - goto out_dev_put; + ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq, + irqflags, + DRIVER_NAME, wl); + if (ret < 0) { + wl1271_error("request_irq() failed: %d", ret); + goto out_free; } - ret = platform_device_add(glue->core); - if (ret) { - dev_err(glue->dev, "can't register platform device\n"); - goto out_dev_put; - } + disable_irq(wl->irq); + + ret = wl1271_init_ieee80211(wl); + if (ret) + goto out_irq; + + ret = wl1271_register_hw(wl); + if (ret) + goto out_irq; return 0; -out_dev_put: - platform_device_put(glue->core); + out_irq: + free_irq(wl->irq, wl); + + out_free: + wl1271_free_hw(wl); -out_free_glue: - kfree(glue); -out: return ret; } static int __devexit wl1271_remove(struct spi_device *spi) { - struct wl12xx_spi_glue *glue = spi_get_drvdata(spi); + struct wl1271 *wl = dev_get_drvdata(&spi->dev); - platform_device_del(glue->core); - platform_device_put(glue->core); - kfree(glue); + wl1271_unregister_hw(wl); + free_irq(wl->irq, wl); + wl1271_free_hw(wl); return 0; } @@ -410,6 +462,7 @@ static int __devexit wl1271_remove(struct spi_device *spi) static struct spi_driver wl1271_spi_driver = { .driver = { .name = "wl1271_spi", + .bus = &spi_bus_type, .owner = THIS_MODULE, }, diff --git a/trunk/drivers/net/wireless/wl12xx/testmode.c b/trunk/drivers/net/wireless/wl12xx/testmode.c index 25093c0cb0ed..4ae8effaee22 100644 --- a/trunk/drivers/net/wireless/wl12xx/testmode.c +++ b/trunk/drivers/net/wireless/wl12xx/testmode.c @@ -26,10 +26,8 @@ #include #include "wl12xx.h" -#include "debug.h" #include "acx.h" #include "reg.h" -#include "ps.h" #define WL1271_TM_MAX_DATA_LENGTH 1024 @@ -38,7 +36,6 @@ enum wl1271_tm_commands { WL1271_TM_CMD_TEST, WL1271_TM_CMD_INTERROGATE, WL1271_TM_CMD_CONFIGURE, - WL1271_TM_CMD_NVS_PUSH, /* Not in use. Keep to not break ABI */ WL1271_TM_CMD_SET_PLT_MODE, WL1271_TM_CMD_RECOVER, @@ -90,47 +87,31 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[]) return -EMSGSIZE; mutex_lock(&wl->mutex); - - if (wl->state == WL1271_STATE_OFF) { - ret = -EINVAL; - goto out; - } - - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) - goto out; - ret = wl1271_cmd_test(wl, buf, buf_len, answer); + mutex_unlock(&wl->mutex); + if (ret < 0) { wl1271_warning("testmode cmd test failed: %d", ret); - goto out_sleep; + return ret; } if (answer) { len = nla_total_size(buf_len); skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, len); - if (!skb) { - ret = -ENOMEM; - goto out_sleep; - } + if (!skb) + return -ENOMEM; NLA_PUT(skb, WL1271_TM_ATTR_DATA, buf_len, buf); ret = cfg80211_testmode_reply(skb); if (ret < 0) - goto out_sleep; + return ret; } -out_sleep: - wl1271_ps_elp_sleep(wl); -out: - mutex_unlock(&wl->mutex); - - return ret; + return 0; nla_put_failure: kfree_skb(skb); - ret = -EMSGSIZE; - goto out_sleep; + return -EMSGSIZE; } static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[]) @@ -147,53 +128,33 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[]) ie_id = nla_get_u8(tb[WL1271_TM_ATTR_IE_ID]); - mutex_lock(&wl->mutex); - - if (wl->state == WL1271_STATE_OFF) { - ret = -EINVAL; - goto out; - } - - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) - goto out; - cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); - if (!cmd) { - ret = -ENOMEM; - goto out_sleep; - } + if (!cmd) + return -ENOMEM; + mutex_lock(&wl->mutex); ret = wl1271_cmd_interrogate(wl, ie_id, cmd, sizeof(*cmd)); + mutex_unlock(&wl->mutex); + if (ret < 0) { wl1271_warning("testmode cmd interrogate failed: %d", ret); - goto out_free; + kfree(cmd); + return ret; } skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, sizeof(*cmd)); if (!skb) { - ret = -ENOMEM; - goto out_free; + kfree(cmd); + return -ENOMEM; } NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd); - ret = cfg80211_testmode_reply(skb); - if (ret < 0) - goto out_free; - -out_free: - kfree(cmd); -out_sleep: - wl1271_ps_elp_sleep(wl); -out: - mutex_unlock(&wl->mutex); - return ret; + return 0; nla_put_failure: kfree_skb(skb); - ret = -EMSGSIZE; - goto out_free; + return -EMSGSIZE; } static int wl1271_tm_cmd_configure(struct wl1271 *wl, struct nlattr *tb[]) diff --git a/trunk/drivers/net/wireless/wl12xx/tx.c b/trunk/drivers/net/wireless/wl12xx/tx.c index 4508ccd78328..bad9e29d49b0 100644 --- a/trunk/drivers/net/wireless/wl12xx/tx.c +++ b/trunk/drivers/net/wireless/wl12xx/tx.c @@ -26,24 +26,22 @@ #include #include "wl12xx.h" -#include "debug.h" #include "io.h" #include "reg.h" #include "ps.h" #include "tx.h" #include "event.h" -static int wl1271_set_default_wep_key(struct wl1271 *wl, - struct wl12xx_vif *wlvif, u8 id) +static int wl1271_set_default_wep_key(struct wl1271 *wl, u8 id) { int ret; - bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); + bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); if (is_ap) ret = wl12xx_cmd_set_default_wep_key(wl, id, - wlvif->ap.bcast_hlid); + wl->ap_bcast_hlid); else - ret = wl12xx_cmd_set_default_wep_key(wl, id, wlvif->sta.hlid); + ret = wl12xx_cmd_set_default_wep_key(wl, id, wl->sta_hlid); if (ret < 0) return ret; @@ -78,8 +76,7 @@ static void wl1271_free_tx_id(struct wl1271 *wl, int id) } static int wl1271_tx_update_filters(struct wl1271 *wl, - struct wl12xx_vif *wlvif, - struct sk_buff *skb) + struct sk_buff *skb) { struct ieee80211_hdr *hdr; int ret; @@ -95,11 +92,15 @@ static int wl1271_tx_update_filters(struct wl1271 *wl, if (!ieee80211_is_auth(hdr->frame_control)) return 0; - if (wlvif->dev_hlid != WL12XX_INVALID_LINK_ID) + if (wl->dev_hlid != WL12XX_INVALID_LINK_ID) goto out; wl1271_debug(DEBUG_CMD, "starting device role for roaming"); - ret = wl12xx_start_dev(wl, wlvif); + ret = wl12xx_cmd_role_start_dev(wl); + if (ret < 0) + goto out; + + ret = wl12xx_roc(wl, wl->dev_role_id); if (ret < 0) goto out; out: @@ -122,16 +123,18 @@ static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl, wl1271_acx_set_inconnection_sta(wl, hdr->addr1); } -static void wl1271_tx_regulate_link(struct wl1271 *wl, - struct wl12xx_vif *wlvif, - u8 hlid) +static void wl1271_tx_regulate_link(struct wl1271 *wl, u8 hlid) { bool fw_ps, single_sta; u8 tx_pkts; - if (WARN_ON(!test_bit(hlid, wlvif->links_map))) + /* only regulate station links */ + if (hlid < WL1271_AP_STA_HLID_START) return; + if (WARN_ON(!wl1271_is_active_sta(wl, hlid))) + return; + fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map); tx_pkts = wl->links[hlid].allocated_pkts; single_sta = (wl->active_sta_count == 1); @@ -143,7 +146,7 @@ static void wl1271_tx_regulate_link(struct wl1271 *wl, * case FW-memory congestion is not a problem. */ if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS) - wl12xx_ps_link_start(wl, wlvif, hlid, true); + wl1271_ps_link_start(wl, hlid, true); } bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb) @@ -151,8 +154,7 @@ bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb) return wl->dummy_packet == skb; } -u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif, - struct sk_buff *skb) +u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct sk_buff *skb) { struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb); @@ -165,51 +167,49 @@ u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif, } else { struct ieee80211_hdr *hdr; - if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) + if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) return wl->system_hlid; hdr = (struct ieee80211_hdr *)skb->data; if (ieee80211_is_mgmt(hdr->frame_control)) - return wlvif->ap.global_hlid; + return wl->ap_global_hlid; else - return wlvif->ap.bcast_hlid; + return wl->ap_bcast_hlid; } } -u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif, - struct sk_buff *skb) +static u8 wl1271_tx_get_hlid(struct wl1271 *wl, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - if (!wlvif || wl12xx_is_dummy_packet(wl, skb)) + if (wl12xx_is_dummy_packet(wl, skb)) return wl->system_hlid; - if (wlvif->bss_type == BSS_TYPE_AP_BSS) - return wl12xx_tx_get_hlid_ap(wl, wlvif, skb); + if (wl->bss_type == BSS_TYPE_AP_BSS) + return wl12xx_tx_get_hlid_ap(wl, skb); - wl1271_tx_update_filters(wl, wlvif, skb); + wl1271_tx_update_filters(wl, skb); - if ((test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) || - test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags)) && + if ((test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) || + test_bit(WL1271_FLAG_IBSS_JOINED, &wl->flags)) && !ieee80211_is_auth(hdr->frame_control) && !ieee80211_is_assoc_req(hdr->frame_control)) - return wlvif->sta.hlid; + return wl->sta_hlid; else - return wlvif->dev_hlid; + return wl->dev_hlid; } static unsigned int wl12xx_calc_packet_alignment(struct wl1271 *wl, unsigned int packet_length) { - if (wl->quirks & WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT) - return ALIGN(packet_length, WL1271_TX_ALIGN_TO); - else + if (wl->quirks & WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT) return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE); + else + return ALIGN(packet_length, WL1271_TX_ALIGN_TO); } -static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif, - struct sk_buff *skb, u32 extra, u32 buf_offset, - u8 hlid) +static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra, + u32 buf_offset, u8 hlid) { struct wl1271_tx_hw_descr *desc; u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra; @@ -217,7 +217,6 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif, u32 total_blocks; int id, ret = -EBUSY, ac; u32 spare_blocks = wl->tx_spare_blocks; - bool is_dummy = false; if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE) return -EAGAIN; @@ -232,10 +231,8 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif, len = wl12xx_calc_packet_alignment(wl, total_len); /* in case of a dummy packet, use default amount of spare mem blocks */ - if (unlikely(wl12xx_is_dummy_packet(wl, skb))) { - is_dummy = true; + if (unlikely(wl12xx_is_dummy_packet(wl, skb))) spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT; - } total_blocks = (len + TX_HW_BLOCK_SIZE - 1) / TX_HW_BLOCK_SIZE + spare_blocks; @@ -260,9 +257,8 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif, ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); wl->tx_allocated_pkts[ac]++; - if (!is_dummy && wlvif && - wlvif->bss_type == BSS_TYPE_AP_BSS && - test_bit(hlid, wlvif->ap.sta_hlid_map)) + if (wl->bss_type == BSS_TYPE_AP_BSS && + hlid >= WL1271_AP_STA_HLID_START) wl->links[hlid].allocated_pkts++; ret = 0; @@ -277,16 +273,15 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif, return ret; } -static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif, - struct sk_buff *skb, u32 extra, - struct ieee80211_tx_info *control, u8 hlid) +static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb, + u32 extra, struct ieee80211_tx_info *control, + u8 hlid) { struct timespec ts; struct wl1271_tx_hw_descr *desc; int aligned_len, ac, rate_idx; s64 hosttime; - u16 tx_attr = 0; - bool is_dummy; + u16 tx_attr; desc = (struct wl1271_tx_hw_descr *) skb->data; @@ -303,8 +298,7 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif, hosttime = (timespec_to_ns(&ts) >> 10); desc->start_time = cpu_to_le32(hosttime - wl->time_offset); - is_dummy = wl12xx_is_dummy_packet(wl, skb); - if (is_dummy || !wlvif || wlvif->bss_type != BSS_TYPE_AP_BSS) + if (wl->bss_type != BSS_TYPE_AP_BSS) desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU); else desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU); @@ -313,42 +307,39 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif, ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); desc->tid = skb->priority; - if (is_dummy) { + if (wl12xx_is_dummy_packet(wl, skb)) { /* * FW expects the dummy packet to have an invalid session id - * any session id that is different than the one set in the join */ - tx_attr = (SESSION_COUNTER_INVALID << + tx_attr = ((~wl->session_counter) << TX_HW_ATTR_OFST_SESSION_COUNTER) & TX_HW_ATTR_SESSION_COUNTER; tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ; - } else if (wlvif) { + } else { /* configure the tx attributes */ - tx_attr = wlvif->session_counter << - TX_HW_ATTR_OFST_SESSION_COUNTER; + tx_attr = + wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER; } desc->hlid = hlid; - if (is_dummy || !wlvif) - rate_idx = 0; - else if (wlvif->bss_type != BSS_TYPE_AP_BSS) { + + if (wl->bss_type != BSS_TYPE_AP_BSS) { /* if the packets are destined for AP (have a STA entry) send them with AP rate policies, otherwise use default basic rates */ - if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE) - rate_idx = wlvif->sta.p2p_rate_idx; - else if (control->control.sta) - rate_idx = wlvif->sta.ap_rate_idx; + if (control->control.sta) + rate_idx = ACX_TX_AP_FULL_RATE; else - rate_idx = wlvif->sta.basic_rate_idx; + rate_idx = ACX_TX_BASIC_RATE; } else { - if (hlid == wlvif->ap.global_hlid) - rate_idx = wlvif->ap.mgmt_rate_idx; - else if (hlid == wlvif->ap.bcast_hlid) - rate_idx = wlvif->ap.bcast_rate_idx; + if (hlid == wl->ap_global_hlid) + rate_idx = ACX_TX_AP_MODE_MGMT_RATE; + else if (hlid == wl->ap_bcast_hlid) + rate_idx = ACX_TX_AP_MODE_BCST_RATE; else - rate_idx = wlvif->ap.ucast_rate_idx[ac]; + rate_idx = ac; } tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY; @@ -388,24 +379,20 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif, } /* caller must hold wl->mutex */ -static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif, - struct sk_buff *skb, u32 buf_offset) +static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb, + u32 buf_offset) { struct ieee80211_tx_info *info; u32 extra = 0; int ret = 0; u32 total_len; u8 hlid; - bool is_dummy; if (!skb) return -EINVAL; info = IEEE80211_SKB_CB(skb); - /* TODO: handle dummy packets on multi-vifs */ - is_dummy = wl12xx_is_dummy_packet(wl, skb); - if (info->control.hw_key && info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) extra = WL1271_TKIP_IV_SPACE; @@ -418,28 +405,29 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif, is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) || (cipher == WLAN_CIPHER_SUITE_WEP104); - if (unlikely(is_wep && wlvif->default_key != idx)) { - ret = wl1271_set_default_wep_key(wl, wlvif, idx); + if (unlikely(is_wep && wl->default_key != idx)) { + ret = wl1271_set_default_wep_key(wl, idx); if (ret < 0) return ret; - wlvif->default_key = idx; + wl->default_key = idx; } } - hlid = wl12xx_tx_get_hlid(wl, wlvif, skb); + + hlid = wl1271_tx_get_hlid(wl, skb); if (hlid == WL12XX_INVALID_LINK_ID) { wl1271_error("invalid hlid. dropping skb 0x%p", skb); return -EINVAL; } - ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid); + ret = wl1271_tx_allocate(wl, skb, extra, buf_offset, hlid); if (ret < 0) return ret; - wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid); + wl1271_tx_fill_hdr(wl, skb, extra, info, hlid); - if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) { + if (wl->bss_type == BSS_TYPE_AP_BSS) { wl1271_tx_ap_update_inconnection_sta(wl, skb); - wl1271_tx_regulate_link(wl, wlvif, hlid); + wl1271_tx_regulate_link(wl, hlid); } /* @@ -456,7 +444,7 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif, memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len); /* Revert side effects in the dummy packet skb, so it can be reused */ - if (is_dummy) + if (wl12xx_is_dummy_packet(wl, skb)) skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); return total_len; @@ -534,18 +522,19 @@ static struct sk_buff_head *wl1271_select_queue(struct wl1271 *wl, return &queues[q]; } -static struct sk_buff *wl12xx_lnk_skb_dequeue(struct wl1271 *wl, - struct wl1271_link *lnk) +static struct sk_buff *wl1271_sta_skb_dequeue(struct wl1271 *wl) { - struct sk_buff *skb; + struct sk_buff *skb = NULL; unsigned long flags; struct sk_buff_head *queue; - queue = wl1271_select_queue(wl, lnk->tx_queue); + queue = wl1271_select_queue(wl, wl->tx_queue); if (!queue) - return NULL; + goto out; skb = skb_dequeue(queue); + +out: if (skb) { int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); spin_lock_irqsave(&wl->wl_lock, flags); @@ -556,33 +545,43 @@ static struct sk_buff *wl12xx_lnk_skb_dequeue(struct wl1271 *wl, return skb; } -static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl, - struct wl12xx_vif *wlvif) +static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl) { struct sk_buff *skb = NULL; + unsigned long flags; int i, h, start_hlid; + struct sk_buff_head *queue; /* start from the link after the last one */ - start_hlid = (wlvif->last_tx_hlid + 1) % WL12XX_MAX_LINKS; + start_hlid = (wl->last_tx_hlid + 1) % AP_MAX_LINKS; /* dequeue according to AC, round robin on each link */ - for (i = 0; i < WL12XX_MAX_LINKS; i++) { - h = (start_hlid + i) % WL12XX_MAX_LINKS; + for (i = 0; i < AP_MAX_LINKS; i++) { + h = (start_hlid + i) % AP_MAX_LINKS; /* only consider connected stations */ - if (!test_bit(h, wlvif->links_map)) + if (h >= WL1271_AP_STA_HLID_START && + !test_bit(h - WL1271_AP_STA_HLID_START, wl->ap_hlid_map)) continue; - skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[h]); - if (!skb) + queue = wl1271_select_queue(wl, wl->links[h].tx_queue); + if (!queue) continue; - wlvif->last_tx_hlid = h; - break; + skb = skb_dequeue(queue); + if (skb) + break; } - if (!skb) - wlvif->last_tx_hlid = 0; + if (skb) { + int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); + wl->last_tx_hlid = h; + spin_lock_irqsave(&wl->wl_lock, flags); + wl->tx_queue_count[q]--; + spin_unlock_irqrestore(&wl->wl_lock, flags); + } else { + wl->last_tx_hlid = 0; + } return skb; } @@ -590,32 +589,12 @@ static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl, static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl) { unsigned long flags; - struct wl12xx_vif *wlvif = wl->last_wlvif; struct sk_buff *skb = NULL; - if (wlvif) { - wl12xx_for_each_wlvif_continue(wl, wlvif) { - skb = wl12xx_vif_skb_dequeue(wl, wlvif); - if (skb) { - wl->last_wlvif = wlvif; - break; - } - } - } - - /* do another pass */ - if (!skb) { - wl12xx_for_each_wlvif(wl, wlvif) { - skb = wl12xx_vif_skb_dequeue(wl, wlvif); - if (skb) { - wl->last_wlvif = wlvif; - break; - } - } - } - - if (!skb) - skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]); + if (wl->bss_type == BSS_TYPE_AP_BSS) + skb = wl1271_ap_skb_dequeue(wl); + else + skb = wl1271_sta_skb_dequeue(wl); if (!skb && test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) { @@ -631,21 +610,21 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl) return skb; } -static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif, - struct sk_buff *skb) +static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb) { unsigned long flags; int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); if (wl12xx_is_dummy_packet(wl, skb)) { set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags); - } else { - u8 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb); + } else if (wl->bss_type == BSS_TYPE_AP_BSS) { + u8 hlid = wl1271_tx_get_hlid(wl, skb); skb_queue_head(&wl->links[hlid].tx_queue[q], skb); /* make sure we dequeue the same packet next time */ - wlvif->last_tx_hlid = (hlid + WL12XX_MAX_LINKS - 1) % - WL12XX_MAX_LINKS; + wl->last_tx_hlid = (hlid + AP_MAX_LINKS - 1) % AP_MAX_LINKS; + } else { + skb_queue_head(&wl->tx_queue[q], skb); } spin_lock_irqsave(&wl->wl_lock, flags); @@ -660,71 +639,29 @@ static bool wl1271_tx_is_data_present(struct sk_buff *skb) return ieee80211_is_data_present(hdr->frame_control); } -void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids) -{ - struct wl12xx_vif *wlvif; - u32 timeout; - u8 hlid; - - if (!wl->conf.rx_streaming.interval) - return; - - if (!wl->conf.rx_streaming.always && - !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)) - return; - - timeout = wl->conf.rx_streaming.duration; - wl12xx_for_each_wlvif_sta(wl, wlvif) { - bool found = false; - for_each_set_bit(hlid, active_hlids, WL12XX_MAX_LINKS) { - if (test_bit(hlid, wlvif->links_map)) { - found = true; - break; - } - } - - if (!found) - continue; - - /* enable rx streaming */ - if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags)) - ieee80211_queue_work(wl->hw, - &wlvif->rx_streaming_enable_work); - - mod_timer(&wlvif->rx_streaming_timer, - jiffies + msecs_to_jiffies(timeout)); - } -} - void wl1271_tx_work_locked(struct wl1271 *wl) { - struct wl12xx_vif *wlvif; struct sk_buff *skb; - struct wl1271_tx_hw_descr *desc; u32 buf_offset = 0; bool sent_packets = false; - unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0}; + bool had_data = false; + bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); int ret; if (unlikely(wl->state == WL1271_STATE_OFF)) return; while ((skb = wl1271_skb_dequeue(wl))) { - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - bool has_data = false; + if (wl1271_tx_is_data_present(skb)) + had_data = true; - wlvif = NULL; - if (!wl12xx_is_dummy_packet(wl, skb) && info->control.vif) - wlvif = wl12xx_vif_to_data(info->control.vif); - - has_data = wlvif && wl1271_tx_is_data_present(skb); - ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset); + ret = wl1271_prepare_tx_frame(wl, skb, buf_offset); if (ret == -EAGAIN) { /* * Aggregation buffer is full. * Flush buffer and try again. */ - wl1271_skb_queue_head(wl, wlvif, skb); + wl1271_skb_queue_head(wl, skb); wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf, buf_offset, true); sent_packets = true; @@ -735,27 +672,16 @@ void wl1271_tx_work_locked(struct wl1271 *wl) * Firmware buffer is full. * Queue back last skb, and stop aggregating. */ - wl1271_skb_queue_head(wl, wlvif, skb); + wl1271_skb_queue_head(wl, skb); /* No work left, avoid scheduling redundant tx work */ set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); goto out_ack; } else if (ret < 0) { - if (wl12xx_is_dummy_packet(wl, skb)) - /* - * fw still expects dummy packet, - * so re-enqueue it - */ - wl1271_skb_queue_head(wl, wlvif, skb); - else - ieee80211_free_txskb(wl->hw, skb); + dev_kfree_skb(skb); goto out_ack; } buf_offset += ret; wl->tx_packets_count++; - if (has_data) { - desc = (struct wl1271_tx_hw_descr *) skb->data; - __set_bit(desc->hlid, active_hlids); - } } out_ack: @@ -775,7 +701,19 @@ void wl1271_tx_work_locked(struct wl1271 *wl) wl1271_handle_tx_low_watermark(wl); } - wl12xx_rearm_rx_streaming(wl, active_hlids); + if (!is_ap && wl->conf.rx_streaming.interval && had_data && + (wl->conf.rx_streaming.always || + test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) { + u32 timeout = wl->conf.rx_streaming.duration; + + /* enable rx streaming */ + if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags)) + ieee80211_queue_work(wl->hw, + &wl->rx_streaming_enable_work); + + mod_timer(&wl->rx_streaming_timer, + jiffies + msecs_to_jiffies(timeout)); + } } void wl1271_tx_work(struct work_struct *work) @@ -799,8 +737,6 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl, struct wl1271_tx_hw_res_descr *result) { struct ieee80211_tx_info *info; - struct ieee80211_vif *vif; - struct wl12xx_vif *wlvif; struct sk_buff *skb; int id = result->id; int rate = -1; @@ -820,16 +756,11 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl, return; } - /* info->control is valid as long as we don't update info->status */ - vif = info->control.vif; - wlvif = wl12xx_vif_to_data(vif); - /* update the TX status info */ if (result->status == TX_SUCCESS) { if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) info->flags |= IEEE80211_TX_STAT_ACK; - rate = wl1271_rate_to_idx(result->rate_class_index, - wlvif->band); + rate = wl1271_rate_to_idx(result->rate_class_index, wl->band); retries = result->ack_failures; } else if (result->status == TX_RETRY_EXCEEDED) { wl->stats.excessive_retries++; @@ -852,14 +783,14 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl, info->control.hw_key->cipher == WLAN_CIPHER_SUITE_CCMP || info->control.hw_key->cipher == WL1271_CIPHER_SUITE_GEM)) { u8 fw_lsb = result->tx_security_sequence_number_lsb; - u8 cur_lsb = wlvif->tx_security_last_seq_lsb; + u8 cur_lsb = wl->tx_security_last_seq_lsb; /* * update security sequence number, taking care of potential * wrap-around */ - wlvif->tx_security_seq += (fw_lsb - cur_lsb) & 0xff; - wlvif->tx_security_last_seq_lsb = fw_lsb; + wl->tx_security_seq += (fw_lsb - cur_lsb + 256) % 256; + wl->tx_security_last_seq_lsb = fw_lsb; } /* remove private header from packet */ @@ -955,30 +886,39 @@ void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid) } /* caller must hold wl->mutex and TX must be stopped */ -void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif) +void wl1271_tx_reset(struct wl1271 *wl, bool reset_tx_queues) { int i; + struct sk_buff *skb; + struct ieee80211_tx_info *info; /* TX failure */ - for_each_set_bit(i, wlvif->links_map, WL12XX_MAX_LINKS) { - if (wlvif->bss_type == BSS_TYPE_AP_BSS) - wl1271_free_sta(wl, wlvif, i); - else - wlvif->sta.ba_rx_bitmap = 0; + if (wl->bss_type == BSS_TYPE_AP_BSS) { + for (i = 0; i < AP_MAX_LINKS; i++) { + wl1271_free_sta(wl, i); + wl1271_tx_reset_link_queues(wl, i); + wl->links[i].allocated_pkts = 0; + wl->links[i].prev_freed_pkts = 0; + } - wl1271_tx_reset_link_queues(wl, i); - wl->links[i].allocated_pkts = 0; - wl->links[i].prev_freed_pkts = 0; - } - wlvif->last_tx_hlid = 0; + wl->last_tx_hlid = 0; + } else { + for (i = 0; i < NUM_TX_QUEUES; i++) { + while ((skb = skb_dequeue(&wl->tx_queue[i]))) { + wl1271_debug(DEBUG_TX, "freeing skb 0x%p", + skb); + + if (!wl12xx_is_dummy_packet(wl, skb)) { + info = IEEE80211_SKB_CB(skb); + info->status.rates[0].idx = -1; + info->status.rates[0].count = 0; + ieee80211_tx_status_ni(wl->hw, skb); + } + } + } -} -/* caller must hold wl->mutex and TX must be stopped */ -void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues) -{ - int i; - struct sk_buff *skb; - struct ieee80211_tx_info *info; + wl->ba_rx_bitmap = 0; + } for (i = 0; i < NUM_TX_QUEUES; i++) wl->tx_queue_count[i] = 0; diff --git a/trunk/drivers/net/wireless/wl12xx/tx.h b/trunk/drivers/net/wireless/wl12xx/tx.h index 2dbb24e6d541..dc4f09adf088 100644 --- a/trunk/drivers/net/wireless/wl12xx/tx.h +++ b/trunk/drivers/net/wireless/wl12xx/tx.h @@ -206,23 +206,18 @@ static inline int wl1271_tx_total_queue_count(struct wl1271 *wl) void wl1271_tx_work(struct work_struct *work); void wl1271_tx_work_locked(struct wl1271 *wl); void wl1271_tx_complete(struct wl1271 *wl); -void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif); -void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues); +void wl1271_tx_reset(struct wl1271 *wl, bool reset_tx_queues); void wl1271_tx_flush(struct wl1271 *wl); u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band); u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set, enum ieee80211_band rate_band); u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set); -u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif, - struct sk_buff *skb); -u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif, - struct sk_buff *skb); +u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct sk_buff *skb); void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid); void wl1271_handle_tx_low_watermark(struct wl1271 *wl); bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb); -void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids); /* from main.c */ -void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid); +void wl1271_free_sta(struct wl1271 *wl, u8 hlid); #endif diff --git a/trunk/drivers/net/wireless/wl12xx/wl12xx.h b/trunk/drivers/net/wireless/wl12xx/wl12xx.h index b2b09cd02022..1ec90fc7505e 100644 --- a/trunk/drivers/net/wireless/wl12xx/wl12xx.h +++ b/trunk/drivers/net/wireless/wl12xx/wl12xx.h @@ -35,6 +35,83 @@ #include "conf.h" #include "ini.h" +#define DRIVER_NAME "wl1271" +#define DRIVER_PREFIX DRIVER_NAME ": " + +/* + * FW versions support BA 11n + * versions marks x.x.x.50-60.x + */ +#define WL12XX_BA_SUPPORT_FW_COST_VER2_START 50 +#define WL12XX_BA_SUPPORT_FW_COST_VER2_END 60 + +enum { + DEBUG_NONE = 0, + DEBUG_IRQ = BIT(0), + DEBUG_SPI = BIT(1), + DEBUG_BOOT = BIT(2), + DEBUG_MAILBOX = BIT(3), + DEBUG_TESTMODE = BIT(4), + DEBUG_EVENT = BIT(5), + DEBUG_TX = BIT(6), + DEBUG_RX = BIT(7), + DEBUG_SCAN = BIT(8), + DEBUG_CRYPT = BIT(9), + DEBUG_PSM = BIT(10), + DEBUG_MAC80211 = BIT(11), + DEBUG_CMD = BIT(12), + DEBUG_ACX = BIT(13), + DEBUG_SDIO = BIT(14), + DEBUG_FILTERS = BIT(15), + DEBUG_ADHOC = BIT(16), + DEBUG_AP = BIT(17), + DEBUG_MASTER = (DEBUG_ADHOC | DEBUG_AP), + DEBUG_ALL = ~0, +}; + +extern u32 wl12xx_debug_level; + +#define DEBUG_DUMP_LIMIT 1024 + +#define wl1271_error(fmt, arg...) \ + pr_err(DRIVER_PREFIX "ERROR " fmt "\n", ##arg) + +#define wl1271_warning(fmt, arg...) \ + pr_warning(DRIVER_PREFIX "WARNING " fmt "\n", ##arg) + +#define wl1271_notice(fmt, arg...) \ + pr_info(DRIVER_PREFIX fmt "\n", ##arg) + +#define wl1271_info(fmt, arg...) \ + pr_info(DRIVER_PREFIX fmt "\n", ##arg) + +#define wl1271_debug(level, fmt, arg...) \ + do { \ + if (level & wl12xx_debug_level) \ + pr_debug(DRIVER_PREFIX fmt "\n", ##arg); \ + } while (0) + +/* TODO: use pr_debug_hex_dump when it will be available */ +#define wl1271_dump(level, prefix, buf, len) \ + do { \ + if (level & wl12xx_debug_level) \ + print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \ + DUMP_PREFIX_OFFSET, 16, 1, \ + buf, \ + min_t(size_t, len, DEBUG_DUMP_LIMIT), \ + 0); \ + } while (0) + +#define wl1271_dump_ascii(level, prefix, buf, len) \ + do { \ + if (level & wl12xx_debug_level) \ + print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \ + DUMP_PREFIX_OFFSET, 16, 1, \ + buf, \ + min_t(size_t, len, DEBUG_DUMP_LIMIT), \ + true); \ + } while (0) + #define WL127X_FW_NAME "ti-connectivity/wl127x-fw-3.bin" #define WL128X_FW_NAME "ti-connectivity/wl128x-fw-3.bin" @@ -65,11 +142,15 @@ #define WL12XX_INVALID_ROLE_ID 0xff #define WL12XX_INVALID_LINK_ID 0xff -#define WL12XX_MAX_RATE_POLICIES 16 - /* Defined by FW as 0. Will not be freed or allocated. */ #define WL12XX_SYSTEM_HLID 0 +/* + * TODO: we currently don't support multirole. remove + * this constant from the code when we do. + */ +#define WL1271_AP_STA_HLID_START 3 + /* * When in AP-mode, we allow (at least) this number of packets * to be transmitted to FW for a STA in PS-mode. Only when packets are @@ -155,6 +236,13 @@ struct wl1271_stats { #define AP_MAX_STATIONS 8 +/* Broadcast and Global links + system link + links to stations */ +/* + * TODO: when WL1271_AP_STA_HLID_START is no longer constant, change all + * the places that use this. + */ +#define AP_MAX_LINKS (AP_MAX_STATIONS + WL1271_AP_STA_HLID_START) + /* FW status registers */ struct wl12xx_fw_status { __le32 intr; @@ -211,14 +299,17 @@ struct wl1271_scan { }; struct wl1271_if_operations { - void (*read)(struct device *child, int addr, void *buf, size_t len, + void (*read)(struct wl1271 *wl, int addr, void *buf, size_t len, bool fixed); - void (*write)(struct device *child, int addr, void *buf, size_t len, + void (*write)(struct wl1271 *wl, int addr, void *buf, size_t len, bool fixed); - void (*reset)(struct device *child); - void (*init)(struct device *child); - int (*power)(struct device *child, bool enable); - void (*set_block_size) (struct device *child, unsigned int blksz); + void (*reset)(struct wl1271 *wl); + void (*init)(struct wl1271 *wl); + int (*power)(struct wl1271 *wl, bool enable); + struct device* (*dev)(struct wl1271 *wl); + void (*enable_irq)(struct wl1271 *wl); + void (*disable_irq)(struct wl1271 *wl); + void (*set_block_size) (struct wl1271 *wl, unsigned int blksz); }; #define MAX_NUM_KEYS 14 @@ -235,33 +326,29 @@ struct wl1271_ap_key { }; enum wl12xx_flags { + WL1271_FLAG_STA_ASSOCIATED, + WL1271_FLAG_IBSS_JOINED, WL1271_FLAG_GPIO_POWER, WL1271_FLAG_TX_QUEUE_STOPPED, WL1271_FLAG_TX_PENDING, WL1271_FLAG_IN_ELP, WL1271_FLAG_ELP_REQUESTED, + WL1271_FLAG_PSM, + WL1271_FLAG_PSM_REQUESTED, WL1271_FLAG_IRQ_RUNNING, + WL1271_FLAG_IDLE, + WL1271_FLAG_PSPOLL_FAILURE, + WL1271_FLAG_STA_STATE_SENT, WL1271_FLAG_FW_TX_BUSY, + WL1271_FLAG_AP_STARTED, + WL1271_FLAG_IF_INITIALIZED, WL1271_FLAG_DUMMY_PACKET_PENDING, WL1271_FLAG_SUSPENDED, WL1271_FLAG_PENDING_WORK, WL1271_FLAG_SOFT_GEMINI, + WL1271_FLAG_RX_STREAMING_STARTED, WL1271_FLAG_RECOVERY_IN_PROGRESS, -}; - -enum wl12xx_vif_flags { - WLVIF_FLAG_INITIALIZED, - WLVIF_FLAG_STA_ASSOCIATED, - WLVIF_FLAG_IBSS_JOINED, - WLVIF_FLAG_AP_STARTED, - WLVIF_FLAG_PSM, - WLVIF_FLAG_PSM_REQUESTED, - WLVIF_FLAG_STA_STATE_SENT, - WLVIF_FLAG_RX_STREAMING_STARTED, - WLVIF_FLAG_PSPOLL_FAILURE, - WLVIF_FLAG_CS_PROGRESS, - WLVIF_FLAG_AP_PROBE_RESP_SET, - WLVIF_FLAG_IN_USE, + WL1271_FLAG_CS_PROGRESS, }; struct wl1271_link { @@ -279,11 +366,10 @@ struct wl1271_link { }; struct wl1271 { + struct platform_device *plat_dev; struct ieee80211_hw *hw; bool mac80211_registered; - struct device *dev; - void *if_priv; struct wl1271_if_operations *if_ops; @@ -313,20 +399,25 @@ struct wl1271 { s8 hw_pg_ver; + u8 bssid[ETH_ALEN]; u8 mac_addr[ETH_ALEN]; + u8 bss_type; + u8 set_bss_type; + u8 p2p; /* we are using p2p role */ + u8 ssid[IEEE80211_MAX_SSID_LEN + 1]; + u8 ssid_len; int channel; + u8 role_id; + u8 dev_role_id; u8 system_hlid; + u8 sta_hlid; + u8 dev_hlid; + u8 ap_global_hlid; + u8 ap_bcast_hlid; unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)]; unsigned long roles_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)]; unsigned long roc_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)]; - unsigned long rate_policies_map[ - BITS_TO_LONGS(WL12XX_MAX_RATE_POLICIES)]; - - struct list_head wlvif_list; - - u8 sta_count; - u8 ap_count; struct wl1271_acx_mem_map *target_mem_map; @@ -349,7 +440,11 @@ struct wl1271 { /* Time-offset between host and chipset clocks */ s64 time_offset; + /* Session counter for the chipset */ + int session_counter; + /* Frames scheduled for transmission, not handled yet */ + struct sk_buff_head tx_queue[NUM_TX_QUEUES]; int tx_queue_count[NUM_TX_QUEUES]; long stopped_queues_map; @@ -367,6 +462,17 @@ struct wl1271 { struct sk_buff *tx_frames[ACX_TX_DESCRIPTORS]; int tx_frames_cnt; + /* + * Security sequence number + * bits 0-15: lower 16 bits part of sequence number + * bits 16-47: higher 32 bits part of sequence number + * bits 48-63: not in use + */ + u64 tx_security_seq; + + /* 8 bits of the last sequence number in use */ + u8 tx_security_last_seq_lsb; + /* FW Rx counter */ u32 rx_counter; @@ -401,21 +507,59 @@ struct wl1271 { u32 mbox_ptr[2]; /* Are we currently scanning */ - struct ieee80211_vif *scan_vif; struct wl1271_scan scan; struct delayed_work scan_complete_work; bool sched_scanning; + /* probe-req template for the current AP */ + struct sk_buff *probereq; + + /* Our association ID */ + u16 aid; + + /* + * currently configured rate set: + * bits 0-15 - 802.11abg rates + * bits 16-23 - 802.11n MCS index mask + * support only 1 stream, thus only 8 bits for the MCS rates (0-7). + */ + u32 basic_rate_set; + u32 basic_rate; + u32 rate_set; + u32 bitrate_masks[IEEE80211_NUM_BANDS]; + /* The current band */ enum ieee80211_band band; + /* Beaconing interval (needed for ad-hoc) */ + u32 beacon_int; + + /* Default key (for WEP) */ + u32 default_key; + + /* Rx Streaming */ + struct work_struct rx_streaming_enable_work; + struct work_struct rx_streaming_disable_work; + struct timer_list rx_streaming_timer; + struct completion *elp_compl; + struct completion *ps_compl; struct delayed_work elp_work; + struct delayed_work pspoll_work; + + /* counter for ps-poll delivery failures */ + int ps_poll_failures; + + /* retry counter for PSM entries */ + u8 psm_entry_retry; /* in dBm */ int power_level; + int rssi_thold; + int last_rssi_event; + struct wl1271_stats stats; __le32 buffer_32; @@ -439,9 +583,20 @@ struct wl1271 { /* Most recently reported noise in dBm */ s8 noise; + /* map for HLIDs of associated stations - when operating in AP mode */ + unsigned long ap_hlid_map[BITS_TO_LONGS(AP_MAX_STATIONS)]; + + /* recoreded keys for AP-mode - set here before AP startup */ + struct wl1271_ap_key *recorded_ap_keys[MAX_NUM_KEYS]; + /* bands supported by this instance of wl12xx */ struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; + /* RX BA constraint value */ + bool ba_support; + u8 ba_rx_bitmap; + bool ba_allowed; + int tcxo_clock; /* @@ -455,7 +610,10 @@ struct wl1271 { * AP-mode - links indexed by HLID. The global and broadcast links * are always active. */ - struct wl1271_link links[WL12XX_MAX_LINKS]; + struct wl1271_link links[AP_MAX_LINKS]; + + /* the hlid of the link where the last transmitted skb came from */ + int last_tx_hlid; /* AP-mode - a bitmap of links currently in PS mode according to FW */ u32 ap_fw_ps_map; @@ -474,173 +632,21 @@ struct wl1271 { /* AP-mode - number of currently connected stations */ int active_sta_count; - - /* last wlvif we transmitted from */ - struct wl12xx_vif *last_wlvif; }; struct wl1271_station { u8 hlid; }; -struct wl12xx_vif { - struct wl1271 *wl; - struct list_head list; - unsigned long flags; - u8 bss_type; - u8 p2p; /* we are using p2p role */ - u8 role_id; - - /* sta/ibss specific */ - u8 dev_role_id; - u8 dev_hlid; - - union { - struct { - u8 hlid; - u8 ba_rx_bitmap; - - u8 basic_rate_idx; - u8 ap_rate_idx; - u8 p2p_rate_idx; - } sta; - struct { - u8 global_hlid; - u8 bcast_hlid; - - /* HLIDs bitmap of associated stations */ - unsigned long sta_hlid_map[BITS_TO_LONGS( - WL12XX_MAX_LINKS)]; - - /* recoreded keys - set here before AP startup */ - struct wl1271_ap_key *recorded_keys[MAX_NUM_KEYS]; - - u8 mgmt_rate_idx; - u8 bcast_rate_idx; - u8 ucast_rate_idx[CONF_TX_MAX_AC_COUNT]; - } ap; - }; - - /* the hlid of the last transmitted skb */ - int last_tx_hlid; - - unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)]; - - u8 ssid[IEEE80211_MAX_SSID_LEN + 1]; - u8 ssid_len; - - /* The current band */ - enum ieee80211_band band; - int channel; - - u32 bitrate_masks[IEEE80211_NUM_BANDS]; - u32 basic_rate_set; - - /* - * currently configured rate set: - * bits 0-15 - 802.11abg rates - * bits 16-23 - 802.11n MCS index mask - * support only 1 stream, thus only 8 bits for the MCS rates (0-7). - */ - u32 basic_rate; - u32 rate_set; - - /* probe-req template for the current AP */ - struct sk_buff *probereq; - - /* Beaconing interval (needed for ad-hoc) */ - u32 beacon_int; - - /* Default key (for WEP) */ - u32 default_key; - - /* Our association ID */ - u16 aid; - - /* Session counter for the chipset */ - int session_counter; - - struct completion *ps_compl; - struct delayed_work pspoll_work; - - /* counter for ps-poll delivery failures */ - int ps_poll_failures; - - /* retry counter for PSM entries */ - u8 psm_entry_retry; - - /* in dBm */ - int power_level; - - int rssi_thold; - int last_rssi_event; - - /* RX BA constraint value */ - bool ba_support; - bool ba_allowed; - - /* Rx Streaming */ - struct work_struct rx_streaming_enable_work; - struct work_struct rx_streaming_disable_work; - struct timer_list rx_streaming_timer; - - /* - * This struct must be last! - * data that has to be saved acrossed reconfigs (e.g. recovery) - * should be declared in this struct. - */ - struct { - u8 persistent[0]; - /* - * Security sequence number - * bits 0-15: lower 16 bits part of sequence number - * bits 16-47: higher 32 bits part of sequence number - * bits 48-63: not in use - */ - u64 tx_security_seq; - - /* 8 bits of the last sequence number in use */ - u8 tx_security_last_seq_lsb; - }; -}; - -static inline struct wl12xx_vif *wl12xx_vif_to_data(struct ieee80211_vif *vif) -{ - return (struct wl12xx_vif *)vif->drv_priv; -} - -static inline -struct ieee80211_vif *wl12xx_wlvif_to_vif(struct wl12xx_vif *wlvif) -{ - return container_of((void *)wlvif, struct ieee80211_vif, drv_priv); -} - -#define wl12xx_for_each_wlvif(wl, wlvif) \ - list_for_each_entry(wlvif, &wl->wlvif_list, list) - -#define wl12xx_for_each_wlvif_continue(wl, wlvif) \ - list_for_each_entry_continue(wlvif, &wl->wlvif_list, list) - -#define wl12xx_for_each_wlvif_bss_type(wl, wlvif, _bss_type) \ - wl12xx_for_each_wlvif(wl, wlvif) \ - if (wlvif->bss_type == _bss_type) - -#define wl12xx_for_each_wlvif_sta(wl, wlvif) \ - wl12xx_for_each_wlvif_bss_type(wl, wlvif, BSS_TYPE_STA_BSS) - -#define wl12xx_for_each_wlvif_ap(wl, wlvif) \ - wl12xx_for_each_wlvif_bss_type(wl, wlvif, BSS_TYPE_AP_BSS) - int wl1271_plt_start(struct wl1271 *wl); int wl1271_plt_stop(struct wl1271 *wl); -int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif); +int wl1271_recalc_rx_streaming(struct wl1271 *wl); void wl12xx_queue_recovery_work(struct wl1271 *wl); size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen); #define JOIN_TIMEOUT 5000 /* 5000 milliseconds to join */ -#define SESSION_COUNTER_MAX 6 /* maximum value for the session counter */ -#define SESSION_COUNTER_INVALID 7 /* used with dummy_packet */ +#define SESSION_COUNTER_MAX 7 /* maximum value for the session counter */ #define WL1271_DEFAULT_POWER_LEVEL 0 @@ -663,8 +669,8 @@ size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen); /* Each RX/TX transaction requires an end-of-transaction transfer */ #define WL12XX_QUIRK_END_OF_TRANSACTION BIT(0) -/* wl127x and SPI don't support SDIO block size alignment */ -#define WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT BIT(2) +/* WL128X requires aggregated packets to be aligned to the SDIO block size */ +#define WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT BIT(2) /* Older firmwares did not implement the FW logger over bus feature */ #define WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED BIT(4) diff --git a/trunk/drivers/net/wireless/wl12xx/wl12xx_80211.h b/trunk/drivers/net/wireless/wl12xx/wl12xx_80211.h index 8f0ffaf62309..f7971d3b0898 100644 --- a/trunk/drivers/net/wireless/wl12xx/wl12xx_80211.h +++ b/trunk/drivers/net/wireless/wl12xx/wl12xx_80211.h @@ -116,6 +116,11 @@ struct wl12xx_ps_poll_template { u8 ta[ETH_ALEN]; } __packed; +struct wl12xx_qos_null_data_template { + struct ieee80211_header header; + __le16 qos_ctl; +} __packed; + struct wl12xx_arp_rsp_template { struct ieee80211_hdr_3addr hdr; diff --git a/trunk/drivers/net/wireless/wl12xx/wl12xx_platform_data.c b/trunk/drivers/net/wireless/wl12xx/wl12xx_platform_data.c index 998e95895f9d..973b11060a8f 100644 --- a/trunk/drivers/net/wireless/wl12xx/wl12xx_platform_data.c +++ b/trunk/drivers/net/wireless/wl12xx/wl12xx_platform_data.c @@ -1,29 +1,8 @@ -/* - * This file is part of wl12xx - * - * Copyright (C) 2010-2011 Texas Instruments, Inc. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA - * 02110-1301 USA - * - */ - #include #include #include -static struct wl12xx_platform_data *platform_data; +static const struct wl12xx_platform_data *platform_data; int __init wl12xx_set_platform_data(const struct wl12xx_platform_data *data) { @@ -39,7 +18,7 @@ int __init wl12xx_set_platform_data(const struct wl12xx_platform_data *data) return 0; } -struct wl12xx_platform_data *wl12xx_get_platform_data(void) +const struct wl12xx_platform_data *wl12xx_get_platform_data(void) { if (!platform_data) return ERR_PTR(-ENODEV); diff --git a/trunk/drivers/net/xen-netback/interface.c b/trunk/drivers/net/xen-netback/interface.c index b7d41f8c338a..182562952c79 100644 --- a/trunk/drivers/net/xen-netback/interface.c +++ b/trunk/drivers/net/xen-netback/interface.c @@ -165,8 +165,7 @@ static int xenvif_change_mtu(struct net_device *dev, int mtu) return 0; } -static netdev_features_t xenvif_fix_features(struct net_device *dev, - netdev_features_t features) +static u32 xenvif_fix_features(struct net_device *dev, u32 features) { struct xenvif *vif = netdev_priv(dev); @@ -223,7 +222,7 @@ static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data) } } -static const struct ethtool_ops xenvif_ethtool_ops = { +static struct ethtool_ops xenvif_ethtool_ops = { .get_link = ethtool_op_get_link, .get_sset_count = xenvif_get_sset_count, @@ -231,7 +230,7 @@ static const struct ethtool_ops xenvif_ethtool_ops = { .get_strings = xenvif_get_strings, }; -static const struct net_device_ops xenvif_netdev_ops = { +static struct net_device_ops xenvif_netdev_ops = { .ndo_start_xmit = xenvif_start_xmit, .ndo_get_stats = xenvif_get_stats, .ndo_open = xenvif_open, diff --git a/trunk/drivers/net/xen-netback/netback.c b/trunk/drivers/net/xen-netback/netback.c index 639cf8ab62ba..15e332d08c8d 100644 --- a/trunk/drivers/net/xen-netback/netback.c +++ b/trunk/drivers/net/xen-netback/netback.c @@ -395,7 +395,7 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, struct gnttab_copy *copy_gop; struct netbk_rx_meta *meta; /* - * These variables are used iff get_page_ext returns true, + * These variables a used iff get_page_ext returns true, * in which case they are guaranteed to be initialized. */ unsigned int uninitialized_var(group), uninitialized_var(idx); @@ -940,6 +940,8 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk, if (!page) return NULL; + netbk->mmap_pages[pending_idx] = page; + gop->source.u.ref = txp->gref; gop->source.domid = vif->domid; gop->source.offset = txp->offset; @@ -1334,6 +1336,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) continue; } + netbk->mmap_pages[pending_idx] = page; + gop->source.u.ref = txreq.gref; gop->source.domid = vif->domid; gop->source.offset = txreq.offset; diff --git a/trunk/drivers/net/xen-netfront.c b/trunk/drivers/net/xen-netfront.c index 0a59c57864f5..226faab23603 100644 --- a/trunk/drivers/net/xen-netfront.c +++ b/trunk/drivers/net/xen-netfront.c @@ -201,7 +201,7 @@ static void xennet_sysfs_delif(struct net_device *netdev); #define xennet_sysfs_delif(dev) do { } while (0) #endif -static bool xennet_can_sg(struct net_device *dev) +static int xennet_can_sg(struct net_device *dev) { return dev->features & NETIF_F_SG; } @@ -1190,8 +1190,7 @@ static void xennet_uninit(struct net_device *dev) gnttab_free_grant_references(np->gref_rx_head); } -static netdev_features_t xennet_fix_features(struct net_device *dev, - netdev_features_t features) +static u32 xennet_fix_features(struct net_device *dev, u32 features) { struct netfront_info *np = netdev_priv(dev); int val; @@ -1217,8 +1216,7 @@ static netdev_features_t xennet_fix_features(struct net_device *dev, return features; } -static int xennet_set_features(struct net_device *dev, - netdev_features_t features) +static int xennet_set_features(struct net_device *dev, u32 features) { if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) { netdev_info(dev, "Reducing MTU because no SG offload"); @@ -1709,6 +1707,7 @@ static void netback_changed(struct xenbus_device *dev, case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: + case XenbusStateConnected: case XenbusStateUnknown: case XenbusStateClosed: break; @@ -1719,9 +1718,6 @@ static void netback_changed(struct xenbus_device *dev, if (xennet_connect(netdev) != 0) break; xenbus_switch_state(dev, XenbusStateConnected); - break; - - case XenbusStateConnected: netif_notify_peers(netdev); break; diff --git a/trunk/drivers/nfc/pn533.c b/trunk/drivers/nfc/pn533.c index b8b6c2abbd4a..7bcb1febef0d 100644 --- a/trunk/drivers/nfc/pn533.c +++ b/trunk/drivers/nfc/pn533.c @@ -72,7 +72,6 @@ MODULE_DEVICE_TABLE(usb, pn533_table); #define PN533_CMD_IN_LIST_PASSIVE_TARGET 0x4A #define PN533_CMD_IN_ATR 0x50 #define PN533_CMD_IN_RELEASE 0x52 -#define PN533_CMD_IN_JUMP_FOR_DEP 0x56 #define PN533_CMD_RESPONSE(cmd) (cmd + 1) @@ -232,26 +231,6 @@ struct pn533_cmd_activate_response { u8 gt[]; } __packed; -/* PN533_CMD_IN_JUMP_FOR_DEP */ -struct pn533_cmd_jump_dep { - u8 active; - u8 baud; - u8 next; - u8 gt[]; -} __packed; - -struct pn533_cmd_jump_dep_response { - u8 status; - u8 tg; - u8 nfcid3t[10]; - u8 didt; - u8 bst; - u8 brt; - u8 to; - u8 ppt; - /* optional */ - u8 gt[]; -} __packed; struct pn533 { struct usb_device *udev; @@ -1142,7 +1121,6 @@ static int pn533_activate_target_nfcdep(struct pn533 *dev) { struct pn533_cmd_activate_param param; struct pn533_cmd_activate_response *resp; - u16 gt_len; int rc; nfc_dev_dbg(&dev->interface->dev, "%s", __func__); @@ -1168,11 +1146,7 @@ static int pn533_activate_target_nfcdep(struct pn533 *dev) if (rc != PN533_CMD_RET_SUCCESS) return -EIO; - /* ATR_RES general bytes are located at offset 16 */ - gt_len = PN533_FRAME_CMD_PARAMS_LEN(dev->in_frame) - 16; - rc = nfc_set_remote_general_bytes(dev->nfc_dev, resp->gt, gt_len); - - return rc; + return 0; } static int pn533_activate_target(struct nfc_dev *nfc_dev, u32 target_idx, @@ -1265,142 +1239,6 @@ static void pn533_deactivate_target(struct nfc_dev *nfc_dev, u32 target_idx) return; } - -static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg, - u8 *params, int params_len) -{ - struct pn533_cmd_jump_dep *cmd; - struct pn533_cmd_jump_dep_response *resp; - struct nfc_target nfc_target; - u8 target_gt_len; - int rc; - - if (params_len == -ENOENT) { - nfc_dev_dbg(&dev->interface->dev, ""); - return 0; - } - - if (params_len < 0) { - nfc_dev_err(&dev->interface->dev, - "Error %d when bringing DEP link up", - params_len); - return 0; - } - - if (dev->tgt_available_prots && - !(dev->tgt_available_prots & (1 << NFC_PROTO_NFC_DEP))) { - nfc_dev_err(&dev->interface->dev, - "The target does not support DEP"); - return -EINVAL; - } - - resp = (struct pn533_cmd_jump_dep_response *) params; - cmd = (struct pn533_cmd_jump_dep *) arg; - rc = resp->status & PN533_CMD_RET_MASK; - if (rc != PN533_CMD_RET_SUCCESS) { - nfc_dev_err(&dev->interface->dev, - "Bringing DEP link up failed %d", rc); - return 0; - } - - if (!dev->tgt_available_prots) { - nfc_dev_dbg(&dev->interface->dev, "Creating new target"); - - nfc_target.supported_protocols = NFC_PROTO_NFC_DEP_MASK; - rc = nfc_targets_found(dev->nfc_dev, &nfc_target, 1); - if (rc) - return 0; - - dev->tgt_available_prots = 0; - } - - dev->tgt_active_prot = NFC_PROTO_NFC_DEP; - - /* ATR_RES general bytes are located at offset 17 */ - target_gt_len = PN533_FRAME_CMD_PARAMS_LEN(dev->in_frame) - 17; - rc = nfc_set_remote_general_bytes(dev->nfc_dev, - resp->gt, target_gt_len); - if (rc == 0) - rc = nfc_dep_link_is_up(dev->nfc_dev, - dev->nfc_dev->targets[0].idx, - !cmd->active, NFC_RF_INITIATOR); - - return 0; -} - -static int pn533_dep_link_up(struct nfc_dev *nfc_dev, int target_idx, - u8 comm_mode, u8 rf_mode) -{ - struct pn533 *dev = nfc_get_drvdata(nfc_dev); - struct pn533_cmd_jump_dep *cmd; - u8 cmd_len, local_gt_len, *local_gt; - int rc; - - nfc_dev_dbg(&dev->interface->dev, "%s", __func__); - - if (rf_mode == NFC_RF_TARGET) { - nfc_dev_err(&dev->interface->dev, "Target mode not supported"); - return -EOPNOTSUPP; - } - - - if (dev->poll_mod_count) { - nfc_dev_err(&dev->interface->dev, - "Cannot bring the DEP link up while polling"); - return -EBUSY; - } - - if (dev->tgt_active_prot) { - nfc_dev_err(&dev->interface->dev, - "There is already an active target"); - return -EBUSY; - } - - local_gt = nfc_get_local_general_bytes(dev->nfc_dev, &local_gt_len); - if (local_gt_len > NFC_MAX_GT_LEN) - return -EINVAL; - - cmd_len = sizeof(struct pn533_cmd_jump_dep) + local_gt_len; - cmd = kzalloc(cmd_len, GFP_KERNEL); - if (cmd == NULL) - return -ENOMEM; - - pn533_tx_frame_init(dev->out_frame, PN533_CMD_IN_JUMP_FOR_DEP); - - cmd->active = !comm_mode; - cmd->baud = 0; - if (local_gt != NULL) { - cmd->next = 4; /* We have some Gi */ - memcpy(cmd->gt, local_gt, local_gt_len); - } else { - cmd->next = 0; - } - - memcpy(PN533_FRAME_CMD_PARAMS_PTR(dev->out_frame), cmd, cmd_len); - dev->out_frame->datalen += cmd_len; - - pn533_tx_frame_finish(dev->out_frame); - - rc = pn533_send_cmd_frame_async(dev, dev->out_frame, dev->in_frame, - dev->in_maxlen, pn533_in_dep_link_up_complete, - cmd, GFP_KERNEL); - if (rc) - goto out; - - -out: - kfree(cmd); - - return rc; -} - -static int pn533_dep_link_down(struct nfc_dev *nfc_dev) -{ - pn533_deactivate_target(nfc_dev, 0); - - return 0; -} - #define PN533_CMD_DATAEXCH_HEAD_LEN (sizeof(struct pn533_frame) + 3) #define PN533_CMD_DATAEXCH_DATA_MAXLEN 262 @@ -1501,7 +1339,7 @@ static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg, return 0; } -static int pn533_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx, +int pn533_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx, struct sk_buff *skb, data_exchange_cb_t cb, void *cb_context) @@ -1530,7 +1368,7 @@ static int pn533_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx, PN533_CMD_DATAEXCH_DATA_MAXLEN + PN533_FRAME_TAIL_SIZE; - skb_resp = nfc_alloc_recv_skb(skb_resp_len, GFP_KERNEL); + skb_resp = nfc_alloc_skb(skb_resp_len, GFP_KERNEL); if (!skb_resp) { rc = -ENOMEM; goto error; @@ -1596,8 +1434,6 @@ static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata, struct nfc_ops pn533_nfc_ops = { .dev_up = NULL, .dev_down = NULL, - .dep_link_up = pn533_dep_link_up, - .dep_link_down = pn533_dep_link_down, .start_poll = pn533_start_poll, .stop_poll = pn533_stop_poll, .activate_target = pn533_activate_target, diff --git a/trunk/drivers/of/platform.c b/trunk/drivers/of/platform.c index 63b3ec48c203..cbd5d701c7e0 100644 --- a/trunk/drivers/of/platform.c +++ b/trunk/drivers/of/platform.c @@ -314,7 +314,7 @@ static const struct of_dev_auxdata *of_dev_lookup(const struct of_dev_auxdata *l if (!lookup) return NULL; - for(; lookup->compatible != NULL; lookup++) { + for(; lookup->name != NULL; lookup++) { if (!of_device_is_compatible(np, lookup->compatible)) continue; if (of_address_to_resource(np, 0, &res)) diff --git a/trunk/drivers/oprofile/nmi_timer_int.c b/trunk/drivers/oprofile/nmi_timer_int.c deleted file mode 100644 index 76f1c9357f39..000000000000 --- a/trunk/drivers/oprofile/nmi_timer_int.c +++ /dev/null @@ -1,173 +0,0 @@ -/** - * @file nmi_timer_int.c - * - * @remark Copyright 2011 Advanced Micro Devices, Inc. - * - * @author Robert Richter - */ - -#include -#include -#include -#include -#include - -#ifdef CONFIG_OPROFILE_NMI_TIMER - -static DEFINE_PER_CPU(struct perf_event *, nmi_timer_events); -static int ctr_running; - -static struct perf_event_attr nmi_timer_attr = { - .type = PERF_TYPE_HARDWARE, - .config = PERF_COUNT_HW_CPU_CYCLES, - .size = sizeof(struct perf_event_attr), - .pinned = 1, - .disabled = 1, -}; - -static void nmi_timer_callback(struct perf_event *event, - struct perf_sample_data *data, - struct pt_regs *regs) -{ - event->hw.interrupts = 0; /* don't throttle interrupts */ - oprofile_add_sample(regs, 0); -} - -static int nmi_timer_start_cpu(int cpu) -{ - struct perf_event *event = per_cpu(nmi_timer_events, cpu); - - if (!event) { - event = perf_event_create_kernel_counter(&nmi_timer_attr, cpu, NULL, - nmi_timer_callback, NULL); - if (IS_ERR(event)) - return PTR_ERR(event); - per_cpu(nmi_timer_events, cpu) = event; - } - - if (event && ctr_running) - perf_event_enable(event); - - return 0; -} - -static void nmi_timer_stop_cpu(int cpu) -{ - struct perf_event *event = per_cpu(nmi_timer_events, cpu); - - if (event && ctr_running) - perf_event_disable(event); -} - -static int nmi_timer_cpu_notifier(struct notifier_block *b, unsigned long action, - void *data) -{ - int cpu = (unsigned long)data; - switch (action) { - case CPU_DOWN_FAILED: - case CPU_ONLINE: - nmi_timer_start_cpu(cpu); - break; - case CPU_DOWN_PREPARE: - nmi_timer_stop_cpu(cpu); - break; - } - return NOTIFY_DONE; -} - -static struct notifier_block nmi_timer_cpu_nb = { - .notifier_call = nmi_timer_cpu_notifier -}; - -static int nmi_timer_start(void) -{ - int cpu; - - get_online_cpus(); - ctr_running = 1; - for_each_online_cpu(cpu) - nmi_timer_start_cpu(cpu); - put_online_cpus(); - - return 0; -} - -static void nmi_timer_stop(void) -{ - int cpu; - - get_online_cpus(); - for_each_online_cpu(cpu) - nmi_timer_stop_cpu(cpu); - ctr_running = 0; - put_online_cpus(); -} - -static void nmi_timer_shutdown(void) -{ - struct perf_event *event; - int cpu; - - get_online_cpus(); - unregister_cpu_notifier(&nmi_timer_cpu_nb); - for_each_possible_cpu(cpu) { - event = per_cpu(nmi_timer_events, cpu); - if (!event) - continue; - perf_event_disable(event); - per_cpu(nmi_timer_events, cpu) = NULL; - perf_event_release_kernel(event); - } - - put_online_cpus(); -} - -static int nmi_timer_setup(void) -{ - int cpu, err; - u64 period; - - /* clock cycles per tick: */ - period = (u64)cpu_khz * 1000; - do_div(period, HZ); - nmi_timer_attr.sample_period = period; - - get_online_cpus(); - err = register_cpu_notifier(&nmi_timer_cpu_nb); - if (err) - goto out; - /* can't attach events to offline cpus: */ - for_each_online_cpu(cpu) { - err = nmi_timer_start_cpu(cpu); - if (err) - break; - } - if (err) - nmi_timer_shutdown(); -out: - put_online_cpus(); - return err; -} - -int __init op_nmi_timer_init(struct oprofile_operations *ops) -{ - int err = 0; - - err = nmi_timer_setup(); - if (err) - return err; - nmi_timer_shutdown(); /* only check, don't alloc */ - - ops->create_files = NULL; - ops->setup = nmi_timer_setup; - ops->shutdown = nmi_timer_shutdown; - ops->start = nmi_timer_start; - ops->stop = nmi_timer_stop; - ops->cpu_type = "timer"; - - printk(KERN_INFO "oprofile: using NMI timer interrupt.\n"); - - return 0; -} - -#endif diff --git a/trunk/drivers/oprofile/oprof.c b/trunk/drivers/oprofile/oprof.c index ed2c3ec07024..f8c752e408a6 100644 --- a/trunk/drivers/oprofile/oprof.c +++ b/trunk/drivers/oprofile/oprof.c @@ -246,31 +246,37 @@ static int __init oprofile_init(void) int err; /* always init architecture to setup backtrace support */ - timer_mode = 0; err = oprofile_arch_init(&oprofile_ops); - if (!err) { - if (!timer && !oprofilefs_register()) - return 0; - oprofile_arch_exit(); - } - /* setup timer mode: */ - timer_mode = 1; - /* no nmi timer mode if oprofile.timer is set */ - if (timer || op_nmi_timer_init(&oprofile_ops)) { + timer_mode = err || timer; /* fall back to timer mode on errors */ + if (timer_mode) { + if (!err) + oprofile_arch_exit(); err = oprofile_timer_init(&oprofile_ops); if (err) return err; } - return oprofilefs_register(); + err = oprofilefs_register(); + if (!err) + return 0; + + /* failed */ + if (timer_mode) + oprofile_timer_exit(); + else + oprofile_arch_exit(); + + return err; } static void __exit oprofile_exit(void) { oprofilefs_unregister(); - if (!timer_mode) + if (timer_mode) + oprofile_timer_exit(); + else oprofile_arch_exit(); } diff --git a/trunk/drivers/oprofile/oprof.h b/trunk/drivers/oprofile/oprof.h index d32ef816337c..177b73de5e5f 100644 --- a/trunk/drivers/oprofile/oprof.h +++ b/trunk/drivers/oprofile/oprof.h @@ -35,15 +35,7 @@ struct dentry; void oprofile_create_files(struct super_block *sb, struct dentry *root); int oprofile_timer_init(struct oprofile_operations *ops); -#ifdef CONFIG_OPROFILE_NMI_TIMER -int op_nmi_timer_init(struct oprofile_operations *ops); -#else -static inline int op_nmi_timer_init(struct oprofile_operations *ops) -{ - return -ENODEV; -} -#endif - +void oprofile_timer_exit(void); int oprofile_set_ulong(unsigned long *addr, unsigned long val); int oprofile_set_timeout(unsigned long time); diff --git a/trunk/drivers/oprofile/timer_int.c b/trunk/drivers/oprofile/timer_int.c index 93404f72dfa8..878fba126582 100644 --- a/trunk/drivers/oprofile/timer_int.c +++ b/trunk/drivers/oprofile/timer_int.c @@ -97,24 +97,24 @@ static struct notifier_block __refdata oprofile_cpu_notifier = { .notifier_call = oprofile_cpu_notify, }; -static int oprofile_hrtimer_setup(void) +int oprofile_timer_init(struct oprofile_operations *ops) { - return register_hotcpu_notifier(&oprofile_cpu_notifier); + int rc; + + rc = register_hotcpu_notifier(&oprofile_cpu_notifier); + if (rc) + return rc; + ops->create_files = NULL; + ops->setup = NULL; + ops->shutdown = NULL; + ops->start = oprofile_hrtimer_start; + ops->stop = oprofile_hrtimer_stop; + ops->cpu_type = "timer"; + printk(KERN_INFO "oprofile: using timer interrupt.\n"); + return 0; } -static void oprofile_hrtimer_shutdown(void) +void oprofile_timer_exit(void) { unregister_hotcpu_notifier(&oprofile_cpu_notifier); } - -int oprofile_timer_init(struct oprofile_operations *ops) -{ - ops->create_files = NULL; - ops->setup = oprofile_hrtimer_setup; - ops->shutdown = oprofile_hrtimer_shutdown; - ops->start = oprofile_hrtimer_start; - ops->stop = oprofile_hrtimer_stop; - ops->cpu_type = "timer"; - printk(KERN_INFO "oprofile: using timer interrupt.\n"); - return 0; -} diff --git a/trunk/drivers/pci/Kconfig b/trunk/drivers/pci/Kconfig index 37856f7c7781..f02b5235056d 100644 --- a/trunk/drivers/pci/Kconfig +++ b/trunk/drivers/pci/Kconfig @@ -98,11 +98,11 @@ config PCI_PASID If unsure, say N. config PCI_IOAPIC - tristate "PCI IO-APIC hotplug support" if X86 + bool depends on PCI depends on ACPI depends on HOTPLUG - default !X86 + default y config PCI_LABEL def_bool y if (DMI || ACPI) diff --git a/trunk/drivers/pci/ioapic.c b/trunk/drivers/pci/ioapic.c index 205af8dc83c2..5775638ac017 100644 --- a/trunk/drivers/pci/ioapic.c +++ b/trunk/drivers/pci/ioapic.c @@ -17,7 +17,7 @@ */ #include -#include +#include #include #include #include @@ -27,7 +27,7 @@ struct ioapic { u32 gsi_base; }; -static int __devinit ioapic_probe(struct pci_dev *dev, const struct pci_device_id *ent) +static int ioapic_probe(struct pci_dev *dev, const struct pci_device_id *ent) { acpi_handle handle; acpi_status status; @@ -88,7 +88,7 @@ static int __devinit ioapic_probe(struct pci_dev *dev, const struct pci_device_i return -ENODEV; } -static void __devexit ioapic_remove(struct pci_dev *dev) +static void ioapic_remove(struct pci_dev *dev) { struct ioapic *ioapic = pci_get_drvdata(dev); @@ -99,12 +99,13 @@ static void __devexit ioapic_remove(struct pci_dev *dev) } -static DEFINE_PCI_DEVICE_TABLE(ioapic_devices) = { - { PCI_DEVICE_CLASS(PCI_CLASS_SYSTEM_PIC_IOAPIC, ~0) }, - { PCI_DEVICE_CLASS(PCI_CLASS_SYSTEM_PIC_IOXAPIC, ~0) }, +static struct pci_device_id ioapic_devices[] = { + { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_SYSTEM_PIC_IOAPIC << 8, 0xffff00, }, + { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_SYSTEM_PIC_IOXAPIC << 8, 0xffff00, }, { } }; -MODULE_DEVICE_TABLE(pci, ioapic_devices); static struct pci_driver ioapic_driver = { .name = "ioapic", diff --git a/trunk/drivers/rtc/interface.c b/trunk/drivers/rtc/interface.c index 8e286259a007..3bcc7cfcaba7 100644 --- a/trunk/drivers/rtc/interface.c +++ b/trunk/drivers/rtc/interface.c @@ -73,6 +73,8 @@ int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm) err = -EINVAL; mutex_unlock(&rtc->ops_lock); + /* A timer might have just expired */ + schedule_work(&rtc->irqwork); return err; } EXPORT_SYMBOL_GPL(rtc_set_time); @@ -112,6 +114,8 @@ int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs) err = -EINVAL; mutex_unlock(&rtc->ops_lock); + /* A timer might have just expired */ + schedule_work(&rtc->irqwork); return err; } @@ -319,6 +323,20 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) } EXPORT_SYMBOL_GPL(rtc_read_alarm); +static int ___rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) +{ + int err; + + if (!rtc->ops) + err = -ENODEV; + else if (!rtc->ops->set_alarm) + err = -EINVAL; + else + err = rtc->ops->set_alarm(rtc->dev.parent, alarm); + + return err; +} + static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) { struct rtc_time tm; @@ -342,14 +360,7 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) * over right here, before we set the alarm. */ - if (!rtc->ops) - err = -ENODEV; - else if (!rtc->ops->set_alarm) - err = -EINVAL; - else - err = rtc->ops->set_alarm(rtc->dev.parent, alarm); - - return err; + return ___rtc_set_alarm(rtc, alarm); } int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) @@ -396,6 +407,8 @@ int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node); } mutex_unlock(&rtc->ops_lock); + /* maybe that was in the past.*/ + schedule_work(&rtc->irqwork); return err; } EXPORT_SYMBOL_GPL(rtc_initialize_alarm); @@ -763,6 +776,20 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer) return 0; } +static void rtc_alarm_disable(struct rtc_device *rtc) +{ + struct rtc_wkalrm alarm; + struct rtc_time tm; + + __rtc_read_time(rtc, &tm); + + alarm.time = rtc_ktime_to_tm(ktime_add(rtc_tm_to_ktime(tm), + ktime_set(300, 0))); + alarm.enabled = 0; + + ___rtc_set_alarm(rtc, &alarm); +} + /** * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue * @rtc rtc device @@ -784,8 +811,10 @@ static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer) struct rtc_wkalrm alarm; int err; next = timerqueue_getnext(&rtc->timerqueue); - if (!next) + if (!next) { + rtc_alarm_disable(rtc); return; + } alarm.time = rtc_ktime_to_tm(next->expires); alarm.enabled = 1; err = __rtc_set_alarm(rtc, &alarm); @@ -847,7 +876,8 @@ void rtc_timer_do_work(struct work_struct *work) err = __rtc_set_alarm(rtc, &alarm); if (err == -ETIME) goto again; - } + } else + rtc_alarm_disable(rtc); mutex_unlock(&rtc->ops_lock); } diff --git a/trunk/drivers/s390/kvm/kvm_virtio.c b/trunk/drivers/s390/kvm/kvm_virtio.c index 8af868bab20b..94f49ffa70ba 100644 --- a/trunk/drivers/s390/kvm/kvm_virtio.c +++ b/trunk/drivers/s390/kvm/kvm_virtio.c @@ -263,11 +263,6 @@ static int kvm_find_vqs(struct virtio_device *vdev, unsigned nvqs, return PTR_ERR(vqs[i]); } -static const char *kvm_bus_name(struct virtio_device *vdev) -{ - return ""; -} - /* * The config ops structure as defined by virtio config */ @@ -281,7 +276,6 @@ static struct virtio_config_ops kvm_vq_configspace_ops = { .reset = kvm_reset, .find_vqs = kvm_find_vqs, .del_vqs = kvm_del_vqs, - .bus_name = kvm_bus_name, }; /* diff --git a/trunk/drivers/s390/net/netiucv.c b/trunk/drivers/s390/net/netiucv.c index 8160591913f9..b6a6356d09b3 100644 --- a/trunk/drivers/s390/net/netiucv.c +++ b/trunk/drivers/s390/net/netiucv.c @@ -63,7 +63,6 @@ #include #include -#include #include #include "fsm.h" @@ -76,7 +75,7 @@ MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver"); * Debug Facility stuff */ #define IUCV_DBF_SETUP_NAME "iucv_setup" -#define IUCV_DBF_SETUP_LEN 64 +#define IUCV_DBF_SETUP_LEN 32 #define IUCV_DBF_SETUP_PAGES 2 #define IUCV_DBF_SETUP_NR_AREAS 1 #define IUCV_DBF_SETUP_LEVEL 3 @@ -227,7 +226,6 @@ struct iucv_connection { struct net_device *netdev; struct connection_profile prof; char userid[9]; - char userdata[17]; }; /** @@ -265,7 +263,7 @@ struct ll_header { }; #define NETIUCV_HDRLEN (sizeof(struct ll_header)) -#define NETIUCV_BUFSIZE_MAX 65537 +#define NETIUCV_BUFSIZE_MAX 32768 #define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX #define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN) #define NETIUCV_MTU_DEFAULT 9216 @@ -290,12 +288,7 @@ static inline int netiucv_test_and_set_busy(struct net_device *dev) return test_and_set_bit(0, &priv->tbusy); } -static u8 iucvMagic_ascii[16] = { - 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20 -}; - -static u8 iucvMagic_ebcdic[16] = { +static u8 iucvMagic[16] = { 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40 }; @@ -308,38 +301,18 @@ static u8 iucvMagic_ebcdic[16] = { * * @returns The printable string (static data!!) */ -static char *netiucv_printname(char *name, int len) +static char *netiucv_printname(char *name) { - static char tmp[17]; + static char tmp[9]; char *p = tmp; - memcpy(tmp, name, len); - tmp[len] = '\0'; - while (*p && ((p - tmp) < len) && (!isspace(*p))) + memcpy(tmp, name, 8); + tmp[8] = '\0'; + while (*p && (!isspace(*p))) p++; *p = '\0'; return tmp; } -static char *netiucv_printuser(struct iucv_connection *conn) -{ - static char tmp_uid[9]; - static char tmp_udat[17]; - static char buf[100]; - - if (memcmp(conn->userdata, iucvMagic_ebcdic, 16)) { - tmp_uid[8] = '\0'; - tmp_udat[16] = '\0'; - memcpy(tmp_uid, conn->userid, 8); - memcpy(tmp_uid, netiucv_printname(tmp_uid, 8), 8); - memcpy(tmp_udat, conn->userdata, 16); - EBCASC(tmp_udat, 16); - memcpy(tmp_udat, netiucv_printname(tmp_udat, 16), 16); - sprintf(buf, "%s.%s", tmp_uid, tmp_udat); - return buf; - } else - return netiucv_printname(conn->userid, 8); -} - /** * States of the interface statemachine. */ @@ -590,18 +563,15 @@ static int netiucv_callback_connreq(struct iucv_path *path, { struct iucv_connection *conn = path->private; struct iucv_event ev; - static char tmp_user[9]; - static char tmp_udat[17]; int rc; + if (memcmp(iucvMagic, ipuser, 16)) + /* ipuser must match iucvMagic. */ + return -EINVAL; rc = -EINVAL; - memcpy(tmp_user, netiucv_printname(ipvmid, 8), 8); - memcpy(tmp_udat, ipuser, 16); - EBCASC(tmp_udat, 16); read_lock_bh(&iucv_connection_rwlock); list_for_each_entry(conn, &iucv_connection_list, list) { - if (strncmp(ipvmid, conn->userid, 8) || - strncmp(ipuser, conn->userdata, 16)) + if (strncmp(ipvmid, conn->userid, 8)) continue; /* Found a matching connection for this path. */ conn->path = path; @@ -610,8 +580,6 @@ static int netiucv_callback_connreq(struct iucv_path *path, fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev); rc = 0; } - IUCV_DBF_TEXT_(setup, 2, "Connection requested for %s.%s\n", - tmp_user, netiucv_printname(tmp_udat, 16)); read_unlock_bh(&iucv_connection_rwlock); return rc; } @@ -848,7 +816,7 @@ static void conn_action_connaccept(fsm_instance *fi, int event, void *arg) conn->path = path; path->msglim = NETIUCV_QUEUELEN_DEFAULT; path->flags = 0; - rc = iucv_path_accept(path, &netiucv_handler, conn->userdata , conn); + rc = iucv_path_accept(path, &netiucv_handler, NULL, conn); if (rc) { IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc); return; @@ -886,7 +854,7 @@ static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg) IUCV_DBF_TEXT(trace, 3, __func__); fsm_deltimer(&conn->timer); - iucv_path_sever(conn->path, conn->userdata); + iucv_path_sever(conn->path, NULL); fsm_newstate(fi, CONN_STATE_STARTWAIT); } @@ -899,9 +867,9 @@ static void conn_action_connsever(fsm_instance *fi, int event, void *arg) IUCV_DBF_TEXT(trace, 3, __func__); fsm_deltimer(&conn->timer); - iucv_path_sever(conn->path, conn->userdata); - dev_info(privptr->dev, "The peer z/VM guest %s has closed the " - "connection\n", netiucv_printuser(conn)); + iucv_path_sever(conn->path, NULL); + dev_info(privptr->dev, "The peer interface of the IUCV device" + " has closed the connection\n"); IUCV_DBF_TEXT(data, 2, "conn_action_connsever: Remote dropped connection\n"); fsm_newstate(fi, CONN_STATE_STARTWAIT); @@ -918,6 +886,8 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg) IUCV_DBF_TEXT(trace, 3, __func__); fsm_newstate(fi, CONN_STATE_STARTWAIT); + IUCV_DBF_TEXT_(setup, 2, "%s('%s'): connecting ...\n", + netdev->name, conn->userid); /* * We must set the state before calling iucv_connect because the @@ -927,11 +897,8 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg) fsm_newstate(fi, CONN_STATE_SETUPWAIT); conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL); - IUCV_DBF_TEXT_(setup, 2, "%s: connecting to %s ...\n", - netdev->name, netiucv_printuser(conn)); - rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid, - NULL, conn->userdata, conn); + NULL, iucvMagic, conn); switch (rc) { case 0: netdev->tx_queue_len = conn->path->msglim; @@ -941,13 +908,13 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg) case 11: dev_warn(privptr->dev, "The IUCV device failed to connect to z/VM guest %s\n", - netiucv_printname(conn->userid, 8)); + netiucv_printname(conn->userid)); fsm_newstate(fi, CONN_STATE_STARTWAIT); break; case 12: dev_warn(privptr->dev, "The IUCV device failed to connect to the peer on z/VM" - " guest %s\n", netiucv_printname(conn->userid, 8)); + " guest %s\n", netiucv_printname(conn->userid)); fsm_newstate(fi, CONN_STATE_STARTWAIT); break; case 13: @@ -960,7 +927,7 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg) dev_err(privptr->dev, "z/VM guest %s has too many IUCV connections" " to connect with the IUCV device\n", - netiucv_printname(conn->userid, 8)); + netiucv_printname(conn->userid)); fsm_newstate(fi, CONN_STATE_CONNERR); break; case 15: @@ -1005,7 +972,7 @@ static void conn_action_stop(fsm_instance *fi, int event, void *arg) netiucv_purge_skb_queue(&conn->collect_queue); if (conn->path) { IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n"); - iucv_path_sever(conn->path, conn->userdata); + iucv_path_sever(conn->path, iucvMagic); kfree(conn->path); conn->path = NULL; } @@ -1123,8 +1090,7 @@ dev_action_connup(fsm_instance *fi, int event, void *arg) fsm_newstate(fi, DEV_STATE_RUNNING); dev_info(privptr->dev, "The IUCV device has been connected" - " successfully to %s\n", - netiucv_printuser(privptr->conn)); + " successfully to %s\n", privptr->conn->userid); IUCV_DBF_TEXT(setup, 3, "connection is up and running\n"); break; @@ -1486,72 +1452,45 @@ static ssize_t user_show(struct device *dev, struct device_attribute *attr, struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 5, __func__); - return sprintf(buf, "%s\n", netiucv_printuser(priv->conn)); + return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid)); } -static int netiucv_check_user(const char *buf, size_t count, char *username, - char *userdata) +static ssize_t user_write(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { - const char *p; - int i; + struct netiucv_priv *priv = dev_get_drvdata(dev); + struct net_device *ndev = priv->conn->netdev; + char *p; + char *tmp; + char username[9]; + int i; + struct iucv_connection *cp; - p = strchr(buf, '.'); - if ((p && ((count > 26) || - ((p - buf) > 8) || - (buf + count - p > 18))) || - (!p && (count > 9))) { - IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n"); + IUCV_DBF_TEXT(trace, 3, __func__); + if (count > 9) { + IUCV_DBF_TEXT_(setup, 2, + "%d is length of username\n", (int) count); return -EINVAL; } - for (i = 0, p = buf; i < 8 && *p && *p != '.'; i++, p++) { - if (isalnum(*p) || *p == '$') { - username[i] = toupper(*p); + tmp = strsep((char **) &buf, "\n"); + for (i = 0, p = tmp; i < 8 && *p; i++, p++) { + if (isalnum(*p) || (*p == '$')) { + username[i]= toupper(*p); continue; } - if (*p == '\n') + if (*p == '\n') { /* trailing lf, grr */ break; + } IUCV_DBF_TEXT_(setup, 2, - "conn_write: invalid character %02x\n", *p); + "username: invalid character %c\n", *p); return -EINVAL; } while (i < 8) username[i++] = ' '; username[8] = '\0'; - if (*p == '.') { - p++; - for (i = 0; i < 16 && *p; i++, p++) { - if (*p == '\n') - break; - userdata[i] = toupper(*p); - } - while (i > 0 && i < 16) - userdata[i++] = ' '; - } else - memcpy(userdata, iucvMagic_ascii, 16); - userdata[16] = '\0'; - ASCEBC(userdata, 16); - - return 0; -} - -static ssize_t user_write(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct netiucv_priv *priv = dev_get_drvdata(dev); - struct net_device *ndev = priv->conn->netdev; - char username[9]; - char userdata[17]; - int rc; - struct iucv_connection *cp; - - IUCV_DBF_TEXT(trace, 3, __func__); - rc = netiucv_check_user(buf, count, username, userdata); - if (rc) - return rc; - if (memcmp(username, priv->conn->userid, 9) && (ndev->flags & (IFF_UP | IFF_RUNNING))) { /* username changed while the interface is active. */ @@ -1560,17 +1499,15 @@ static ssize_t user_write(struct device *dev, struct device_attribute *attr, } read_lock_bh(&iucv_connection_rwlock); list_for_each_entry(cp, &iucv_connection_list, list) { - if (!strncmp(username, cp->userid, 9) && - !strncmp(userdata, cp->userdata, 17) && cp->netdev != ndev) { + if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) { read_unlock_bh(&iucv_connection_rwlock); - IUCV_DBF_TEXT_(setup, 2, "user_write: Connection to %s " - "already exists\n", netiucv_printuser(cp)); + IUCV_DBF_TEXT_(setup, 2, "user_write: Connection " + "to %s already exists\n", username); return -EEXIST; } } read_unlock_bh(&iucv_connection_rwlock); memcpy(priv->conn->userid, username, 9); - memcpy(priv->conn->userdata, userdata, 17); return count; } @@ -1600,8 +1537,7 @@ static ssize_t buffer_write (struct device *dev, struct device_attribute *attr, bs1 = simple_strtoul(buf, &e, 0); if (e && (!isspace(*e))) { - IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %02x\n", - *e); + IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e); return -EINVAL; } if (bs1 > NETIUCV_BUFSIZE_MAX) { @@ -1928,8 +1864,7 @@ static void netiucv_unregister_device(struct device *dev) * Add it to the list of netiucv connections; */ static struct iucv_connection *netiucv_new_connection(struct net_device *dev, - char *username, - char *userdata) + char *username) { struct iucv_connection *conn; @@ -1958,8 +1893,6 @@ static struct iucv_connection *netiucv_new_connection(struct net_device *dev, fsm_settimer(conn->fsm, &conn->timer); fsm_newstate(conn->fsm, CONN_STATE_INVALID); - if (userdata) - memcpy(conn->userdata, userdata, 17); if (username) { memcpy(conn->userid, username, 9); fsm_newstate(conn->fsm, CONN_STATE_STOPPED); @@ -1986,7 +1919,6 @@ static struct iucv_connection *netiucv_new_connection(struct net_device *dev, */ static void netiucv_remove_connection(struct iucv_connection *conn) { - IUCV_DBF_TEXT(trace, 3, __func__); write_lock_bh(&iucv_connection_rwlock); list_del_init(&conn->list); @@ -1994,7 +1926,7 @@ static void netiucv_remove_connection(struct iucv_connection *conn) fsm_deltimer(&conn->timer); netiucv_purge_skb_queue(&conn->collect_queue); if (conn->path) { - iucv_path_sever(conn->path, conn->userdata); + iucv_path_sever(conn->path, iucvMagic); kfree(conn->path); conn->path = NULL; } @@ -2053,7 +1985,7 @@ static void netiucv_setup_netdevice(struct net_device *dev) /** * Allocate and initialize everything of a net device. */ -static struct net_device *netiucv_init_netdevice(char *username, char *userdata) +static struct net_device *netiucv_init_netdevice(char *username) { struct netiucv_priv *privptr; struct net_device *dev; @@ -2072,7 +2004,7 @@ static struct net_device *netiucv_init_netdevice(char *username, char *userdata) if (!privptr->fsm) goto out_netdev; - privptr->conn = netiucv_new_connection(dev, username, userdata); + privptr->conn = netiucv_new_connection(dev, username); if (!privptr->conn) { IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n"); goto out_fsm; @@ -2090,31 +2022,47 @@ static struct net_device *netiucv_init_netdevice(char *username, char *userdata) static ssize_t conn_write(struct device_driver *drv, const char *buf, size_t count) { + const char *p; char username[9]; - char userdata[17]; - int rc; + int i, rc; struct net_device *dev; struct netiucv_priv *priv; struct iucv_connection *cp; IUCV_DBF_TEXT(trace, 3, __func__); - rc = netiucv_check_user(buf, count, username, userdata); - if (rc) - return rc; + if (count>9) { + IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n"); + return -EINVAL; + } + + for (i = 0, p = buf; i < 8 && *p; i++, p++) { + if (isalnum(*p) || *p == '$') { + username[i] = toupper(*p); + continue; + } + if (*p == '\n') + /* trailing lf, grr */ + break; + IUCV_DBF_TEXT_(setup, 2, + "conn_write: invalid character %c\n", *p); + return -EINVAL; + } + while (i < 8) + username[i++] = ' '; + username[8] = '\0'; read_lock_bh(&iucv_connection_rwlock); list_for_each_entry(cp, &iucv_connection_list, list) { - if (!strncmp(username, cp->userid, 9) && - !strncmp(userdata, cp->userdata, 17)) { + if (!strncmp(username, cp->userid, 9)) { read_unlock_bh(&iucv_connection_rwlock); - IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection to %s " - "already exists\n", netiucv_printuser(cp)); + IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection " + "to %s already exists\n", username); return -EEXIST; } } read_unlock_bh(&iucv_connection_rwlock); - dev = netiucv_init_netdevice(username, userdata); + dev = netiucv_init_netdevice(username); if (!dev) { IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n"); return -ENODEV; @@ -2135,9 +2083,8 @@ static ssize_t conn_write(struct device_driver *drv, if (rc) goto out_unreg; - dev_info(priv->dev, "The IUCV interface to %s has been established " - "successfully\n", - netiucv_printuser(priv->conn)); + dev_info(priv->dev, "The IUCV interface to %s has been" + " established successfully\n", netiucv_printname(username)); return count; diff --git a/trunk/drivers/s390/net/qeth_core_main.c b/trunk/drivers/s390/net/qeth_core_main.c index 4fae1dc19951..fff57de78943 100644 --- a/trunk/drivers/s390/net/qeth_core_main.c +++ b/trunk/drivers/s390/net/qeth_core_main.c @@ -66,7 +66,7 @@ static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf); static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, struct qeth_qdio_out_buffer *buf, enum qeth_qdio_buffer_states newbufstate); -static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); + static inline const char *qeth_get_cardname(struct qeth_card *card) { @@ -363,9 +363,6 @@ static inline enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15, static inline void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx, int forced_cleanup) { - if (q->card->options.cq != QETH_CQ_ENABLED) - return; - if (q->bufs[bidx]->next_pending != NULL) { struct qeth_qdio_out_buffer *head = q->bufs[bidx]; struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending; @@ -393,13 +390,6 @@ static inline void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, } } - if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) == - QETH_QDIO_BUF_HANDLED_DELAYED)) { - /* for recovery situations */ - q->bufs[bidx]->aob = q->bufstates[bidx].aob; - qeth_init_qdio_out_buf(q, bidx); - QETH_CARD_TEXT(q->card, 2, "clprecov"); - } } @@ -422,6 +412,7 @@ static inline void qeth_qdio_handle_aob(struct qeth_card *card, notification = TX_NOTIFY_OK; } else { BUG_ON(atomic_read(&buffer->state) != QETH_QDIO_BUF_PENDING); + atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ); notification = TX_NOTIFY_DELAYED_OK; } @@ -434,8 +425,7 @@ static inline void qeth_qdio_handle_aob(struct qeth_card *card, buffer->aob = NULL; qeth_clear_output_buffer(buffer->q, buffer, - QETH_QDIO_BUF_HANDLED_DELAYED); - + QETH_QDIO_BUF_HANDLED_DELAYED); /* from here on: do not touch buffer anymore */ qdio_release_aob(aob); } @@ -1123,25 +1113,11 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *q, static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf) { struct sk_buff *skb; - struct iucv_sock *iucv; - int notify_general_error = 0; - - if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING) - notify_general_error = 1; - - /* release may never happen from within CQ tasklet scope */ - BUG_ON(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ); skb = skb_dequeue(&buf->skb_list); while (skb) { QETH_CARD_TEXT(buf->q->card, 5, "skbr"); QETH_CARD_TEXT_(buf->q->card, 5, "%lx", (long) skb); - if (notify_general_error && skb->protocol == ETH_P_AF_IUCV) { - if (skb->sk) { - iucv = iucv_sk(skb->sk); - iucv->sk_txnotify(skb, TX_NOTIFY_GENERALERROR); - } - } atomic_dec(&skb->users); dev_kfree_skb_any(skb); skb = skb_dequeue(&buf->skb_list); @@ -1184,7 +1160,7 @@ static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free) for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { if (!q->bufs[j]) continue; - qeth_cleanup_handled_pending(q, j, 1); + qeth_cleanup_handled_pending(q, j, free); qeth_clear_output_buffer(q, q->bufs[j], QETH_QDIO_BUF_EMPTY); if (free) { kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]); @@ -1231,7 +1207,7 @@ static void qeth_free_qdio_buffers(struct qeth_card *card) qeth_free_cq(card); cancel_delayed_work_sync(&card->buffer_reclaim_work); for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) - dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb); + kfree_skb(card->qdio.in_q->bufs[j].rx_skb); kfree(card->qdio.in_q); card->qdio.in_q = NULL; /* inbound buffer pool */ @@ -1353,7 +1329,6 @@ static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread) static void qeth_start_kernel_thread(struct work_struct *work) { - struct task_struct *ts; struct qeth_card *card = container_of(work, struct qeth_card, kernel_thread_starter); QETH_CARD_TEXT(card , 2, "strthrd"); @@ -1361,15 +1336,9 @@ static void qeth_start_kernel_thread(struct work_struct *work) if (card->read.state != CH_STATE_UP && card->write.state != CH_STATE_UP) return; - if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) { - ts = kthread_run(card->discipline.recover, (void *)card, + if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) + kthread_run(card->discipline.recover, (void *) card, "qeth_recover"); - if (IS_ERR(ts)) { - qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); - qeth_clear_thread_running_bit(card, - QETH_RECOVER_THREAD); - } - } } static int qeth_setup_card(struct qeth_card *card) diff --git a/trunk/drivers/s390/net/qeth_l2_main.c b/trunk/drivers/s390/net/qeth_l2_main.c index c12967133114..a21ae3d549db 100644 --- a/trunk/drivers/s390/net/qeth_l2_main.c +++ b/trunk/drivers/s390/net/qeth_l2_main.c @@ -301,21 +301,21 @@ static void qeth_l2_process_vlans(struct qeth_card *card) spin_unlock_bh(&card->vlanlock); } -static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) +static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) { struct qeth_card *card = dev->ml_priv; struct qeth_vlan_vid *id; QETH_CARD_TEXT_(card, 4, "aid:%d", vid); if (!vid) - return 0; + return; if (card->info.type == QETH_CARD_TYPE_OSM) { QETH_CARD_TEXT(card, 3, "aidOSM"); - return 0; + return; } if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { QETH_CARD_TEXT(card, 3, "aidREC"); - return 0; + return; } id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC); if (id) { @@ -324,13 +324,10 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) spin_lock_bh(&card->vlanlock); list_add_tail(&id->list, &card->vid_list); spin_unlock_bh(&card->vlanlock); - } else { - return -ENOMEM; } - return 0; } -static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) +static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) { struct qeth_vlan_vid *id, *tmpid = NULL; struct qeth_card *card = dev->ml_priv; @@ -338,11 +335,11 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) QETH_CARD_TEXT_(card, 4, "kid:%d", vid); if (card->info.type == QETH_CARD_TYPE_OSM) { QETH_CARD_TEXT(card, 3, "kidOSM"); - return 0; + return; } if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { QETH_CARD_TEXT(card, 3, "kidREC"); - return 0; + return; } spin_lock_bh(&card->vlanlock); list_for_each_entry(id, &card->vid_list, list) { @@ -358,7 +355,6 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) kfree(tmpid); } qeth_l2_set_multicast_list(card->dev); - return 0; } static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode) @@ -1173,7 +1169,6 @@ static void __exit qeth_l2_exit(void) static void qeth_l2_shutdown(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); - qeth_set_allowed_threads(card, 0, 1); if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap) qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); qeth_qdio_clear_card(card, 0); diff --git a/trunk/drivers/s390/net/qeth_l3_main.c b/trunk/drivers/s390/net/qeth_l3_main.c index 9648e4e68337..4d5307ddbe55 100644 --- a/trunk/drivers/s390/net/qeth_l3_main.c +++ b/trunk/drivers/s390/net/qeth_l3_main.c @@ -1869,15 +1869,15 @@ static void qeth_l3_free_vlan_addresses(struct qeth_card *card, qeth_l3_free_vlan_addresses6(card, vid); } -static int qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) +static void qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) { struct qeth_card *card = dev->ml_priv; set_bit(vid, card->active_vlans); - return 0; + return; } -static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) +static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) { struct qeth_card *card = dev->ml_priv; unsigned long flags; @@ -1885,7 +1885,7 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) QETH_CARD_TEXT_(card, 4, "kid:%d", vid); if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { QETH_CARD_TEXT(card, 3, "kidREC"); - return 0; + return; } spin_lock_irqsave(&card->vlanlock, flags); /* unregister IP addresses of vlan device */ @@ -1893,7 +1893,6 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) clear_bit(vid, card->active_vlans); spin_unlock_irqrestore(&card->vlanlock, flags); qeth_l3_set_multicast_list(card->dev); - return 0; } static inline int qeth_l3_rebuild_skb(struct qeth_card *card, @@ -2760,7 +2759,7 @@ int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb) rcu_read_lock(); dst = skb_dst(skb); if (dst) - n = dst_get_neighbour_noref(dst); + n = dst_get_neighbour(dst); if (n) { cast_type = n->type; rcu_read_unlock(); @@ -2856,7 +2855,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, rcu_read_lock(); dst = skb_dst(skb); if (dst) - n = dst_get_neighbour_noref(dst); + n = dst_get_neighbour(dst); if (ipv == 4) { /* IPv4 */ hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags4(cast_type); @@ -3210,8 +3209,7 @@ static int qeth_l3_stop(struct net_device *dev) return 0; } -static netdev_features_t qeth_l3_fix_features(struct net_device *dev, - netdev_features_t features) +static u32 qeth_l3_fix_features(struct net_device *dev, u32 features) { struct qeth_card *card = dev->ml_priv; @@ -3225,8 +3223,7 @@ static netdev_features_t qeth_l3_fix_features(struct net_device *dev, return features; } -static int qeth_l3_set_features(struct net_device *dev, - netdev_features_t features) +static int qeth_l3_set_features(struct net_device *dev, u32 features) { struct qeth_card *card = dev->ml_priv; u32 changed = dev->features ^ features; @@ -3492,13 +3489,14 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) else netif_carrier_off(card->dev); if (recover_flag == CARD_STATE_RECOVER) { - rtnl_lock(); if (recovery_mode) __qeth_l3_open(card->dev); - else + else { + rtnl_lock(); dev_open(card->dev); + rtnl_unlock(); + } qeth_l3_set_multicast_list(card->dev); - rtnl_unlock(); } /* let user_space know that device is online */ kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); @@ -3544,11 +3542,6 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev, card->info.hwtrap = 1; } qeth_l3_stop_card(card, recovery_mode); - if ((card->options.cq == QETH_CQ_ENABLED) && card->dev) { - rtnl_lock(); - call_netdevice_notifiers(NETDEV_REBOOT, card->dev); - rtnl_unlock(); - } rc = ccw_device_set_offline(CARD_DDEV(card)); rc2 = ccw_device_set_offline(CARD_WDEV(card)); rc3 = ccw_device_set_offline(CARD_RDEV(card)); @@ -3603,7 +3596,6 @@ static int qeth_l3_recover(void *ptr) static void qeth_l3_shutdown(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); - qeth_set_allowed_threads(card, 0, 1); if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap) qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); qeth_qdio_clear_card(card, 0); diff --git a/trunk/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/trunk/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c index 36739da8bc15..000294a9df80 100644 --- a/trunk/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +++ b/trunk/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c @@ -966,7 +966,7 @@ static int init_act_open(struct cxgbi_sock *csk) csk->saddr.sin_addr.s_addr = chba->ipv4addr; csk->rss_qid = 0; - csk->l2t = t3_l2t_get(t3dev, dst, ndev); + csk->l2t = t3_l2t_get(t3dev, dst_get_neighbour(dst), ndev); if (!csk->l2t) { pr_err("NO l2t available.\n"); return -EINVAL; diff --git a/trunk/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/trunk/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 5a4a3bfc60cf..ac7a9b1e3e23 100644 --- a/trunk/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/trunk/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -1127,7 +1127,6 @@ static int init_act_open(struct cxgbi_sock *csk) struct net_device *ndev = cdev->ports[csk->port_id]; struct port_info *pi = netdev_priv(ndev); struct sk_buff *skb = NULL; - struct neighbour *n; unsigned int step; log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, @@ -1142,12 +1141,7 @@ static int init_act_open(struct cxgbi_sock *csk) cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); cxgbi_sock_get(csk); - n = dst_get_neighbour_noref(csk->dst); - if (!n) { - pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name); - goto rel_resource; - } - csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0); + csk->l2t = cxgb4_l2t_get(lldi->l2t, dst_get_neighbour(csk->dst), ndev, 0); if (!csk->l2t) { pr_err("%s, cannot alloc l2t.\n", ndev->name); goto rel_resource; diff --git a/trunk/drivers/scsi/cxgbi/libcxgbi.c b/trunk/drivers/scsi/cxgbi/libcxgbi.c index 1d25a87aa47b..c10f74a566f2 100644 --- a/trunk/drivers/scsi/cxgbi/libcxgbi.c +++ b/trunk/drivers/scsi/cxgbi/libcxgbi.c @@ -472,7 +472,6 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) struct net_device *ndev; struct cxgbi_device *cdev; struct rtable *rt = NULL; - struct neighbour *n; struct flowi4 fl4; struct cxgbi_sock *csk = NULL; unsigned int mtu = 0; @@ -494,12 +493,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) goto err_out; } dst = &rt->dst; - n = dst_get_neighbour_noref(dst); - if (!n) { - err = -ENODEV; - goto rel_rt; - } - ndev = n->dev; + ndev = dst_get_neighbour(dst)->dev; if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { pr_info("multi-cast route %pI4, port %u, dev %s.\n", @@ -513,7 +507,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr); mtu = ndev->mtu; pr_info("rt dev %s, loopback -> %s, mtu %u.\n", - n->dev->name, ndev->name, mtu); + dst_get_neighbour(dst)->dev->name, ndev->name, mtu); } cdev = cxgbi_device_find_by_netdev(ndev, &port); diff --git a/trunk/drivers/ssb/pci.c b/trunk/drivers/ssb/pci.c index 973223f5de8e..34c3bab90b9a 100644 --- a/trunk/drivers/ssb/pci.c +++ b/trunk/drivers/ssb/pci.c @@ -607,29 +607,6 @@ static void sprom_extract_r8(struct ssb_sprom *out, const u16 *in) memcpy(&out->antenna_gain.ghz5, &out->antenna_gain.ghz24, sizeof(out->antenna_gain.ghz5)); - /* Extract FEM info */ - SPEX(fem.ghz2.tssipos, SSB_SPROM8_FEM2G, - SSB_SROM8_FEM_TSSIPOS, SSB_SROM8_FEM_TSSIPOS_SHIFT); - SPEX(fem.ghz2.extpa_gain, SSB_SPROM8_FEM2G, - SSB_SROM8_FEM_EXTPA_GAIN, SSB_SROM8_FEM_EXTPA_GAIN_SHIFT); - SPEX(fem.ghz2.pdet_range, SSB_SPROM8_FEM2G, - SSB_SROM8_FEM_PDET_RANGE, SSB_SROM8_FEM_PDET_RANGE_SHIFT); - SPEX(fem.ghz2.tr_iso, SSB_SPROM8_FEM2G, - SSB_SROM8_FEM_TR_ISO, SSB_SROM8_FEM_TR_ISO_SHIFT); - SPEX(fem.ghz2.antswlut, SSB_SPROM8_FEM2G, - SSB_SROM8_FEM_ANTSWLUT, SSB_SROM8_FEM_ANTSWLUT_SHIFT); - - SPEX(fem.ghz5.tssipos, SSB_SPROM8_FEM5G, - SSB_SROM8_FEM_TSSIPOS, SSB_SROM8_FEM_TSSIPOS_SHIFT); - SPEX(fem.ghz5.extpa_gain, SSB_SPROM8_FEM5G, - SSB_SROM8_FEM_EXTPA_GAIN, SSB_SROM8_FEM_EXTPA_GAIN_SHIFT); - SPEX(fem.ghz5.pdet_range, SSB_SPROM8_FEM5G, - SSB_SROM8_FEM_PDET_RANGE, SSB_SROM8_FEM_PDET_RANGE_SHIFT); - SPEX(fem.ghz5.tr_iso, SSB_SPROM8_FEM5G, - SSB_SROM8_FEM_TR_ISO, SSB_SROM8_FEM_TR_ISO_SHIFT); - SPEX(fem.ghz5.antswlut, SSB_SPROM8_FEM5G, - SSB_SROM8_FEM_ANTSWLUT, SSB_SROM8_FEM_ANTSWLUT_SHIFT); - sprom_extract_r458(out, in); /* TODO - get remaining rev 8 stuff needed */ diff --git a/trunk/drivers/tty/serial/pmac_zilog.c b/trunk/drivers/tty/serial/pmac_zilog.c index e9c2dfe471a2..5acd24a27d08 100644 --- a/trunk/drivers/tty/serial/pmac_zilog.c +++ b/trunk/drivers/tty/serial/pmac_zilog.c @@ -99,9 +99,6 @@ MODULE_LICENSE("GPL"); #define PMACZILOG_NAME "ttyPZ" #endif -#define pmz_debug(fmt, arg...) pr_debug("ttyPZ%d: " fmt, uap->port.line, ## arg) -#define pmz_error(fmt, arg...) pr_err("ttyPZ%d: " fmt, uap->port.line, ## arg) -#define pmz_info(fmt, arg...) pr_info("ttyPZ%d: " fmt, uap->port.line, ## arg) /* * For the sake of early serial console, we can do a pre-probe @@ -109,6 +106,7 @@ MODULE_LICENSE("GPL"); */ static struct uart_pmac_port pmz_ports[MAX_ZS_PORTS]; static int pmz_ports_count; +static DEFINE_MUTEX(pmz_irq_mutex); static struct uart_driver pmz_uart_reg = { .owner = THIS_MODULE, @@ -128,6 +126,9 @@ static void pmz_load_zsregs(struct uart_pmac_port *uap, u8 *regs) { int i; + if (ZS_IS_ASLEEP(uap)) + return; + /* Let pending transmits finish. */ for (i = 0; i < 1000; i++) { unsigned char stat = read_zsreg(uap, R1); @@ -215,24 +216,32 @@ static void pmz_maybe_update_regs(struct uart_pmac_port *uap) } } -static void pmz_interrupt_control(struct uart_pmac_port *uap, int enable) -{ - if (enable) { - uap->curregs[1] |= INT_ALL_Rx | TxINT_ENAB; - if (!ZS_IS_EXTCLK(uap)) - uap->curregs[1] |= EXT_INT_ENAB; - } else { - uap->curregs[1] &= ~(EXT_INT_ENAB | TxINT_ENAB | RxINT_MASK); - } - write_zsreg(uap, R1, uap->curregs[1]); -} - static struct tty_struct *pmz_receive_chars(struct uart_pmac_port *uap) { struct tty_struct *tty = NULL; unsigned char ch, r1, drop, error, flag; int loops = 0; + /* The interrupt can be enabled when the port isn't open, typically + * that happens when using one port is open and the other closed (stale + * interrupt) or when one port is used as a console. + */ + if (!ZS_IS_OPEN(uap)) { + pmz_debug("pmz: draining input\n"); + /* Port is closed, drain input data */ + for (;;) { + if ((++loops) > 1000) + goto flood; + (void)read_zsreg(uap, R1); + write_zsreg(uap, R0, ERR_RES); + (void)read_zsdata(uap); + ch = read_zsreg(uap, R0); + if (!(ch & Rx_CH_AV)) + break; + } + return NULL; + } + /* Sanity check, make sure the old bug is no longer happening */ if (uap->port.state == NULL || uap->port.state->port.tty == NULL) { WARN_ON(1); @@ -330,7 +339,9 @@ static struct tty_struct *pmz_receive_chars(struct uart_pmac_port *uap) return tty; flood: - pmz_interrupt_control(uap, 0); + uap->curregs[R1] &= ~(EXT_INT_ENAB | TxINT_ENAB | RxINT_MASK); + write_zsreg(uap, R1, uap->curregs[R1]); + zssync(uap); pmz_error("pmz: rx irq flood !\n"); return tty; } @@ -372,6 +383,8 @@ static void pmz_transmit_chars(struct uart_pmac_port *uap) { struct circ_buf *xmit; + if (ZS_IS_ASLEEP(uap)) + return; if (ZS_IS_CONS(uap)) { unsigned char status = read_zsreg(uap, R0); @@ -468,10 +481,6 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id) /* Channel A */ tty = NULL; if (r3 & (CHAEXT | CHATxIP | CHARxIP)) { - if (!ZS_IS_OPEN(uap_a)) { - pmz_debug("ChanA interrupt while open !\n"); - goto skip_a; - } write_zsreg(uap_a, R0, RES_H_IUS); zssync(uap_a); if (r3 & CHAEXT) @@ -482,21 +491,16 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id) pmz_transmit_chars(uap_a); rc = IRQ_HANDLED; } - skip_a: spin_unlock(&uap_a->port.lock); if (tty != NULL) tty_flip_buffer_push(tty); - if (!uap_b) + if (uap_b->node == NULL) goto out; spin_lock(&uap_b->port.lock); tty = NULL; if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) { - if (!ZS_IS_OPEN(uap_a)) { - pmz_debug("ChanB interrupt while open !\n"); - goto skip_b; - } write_zsreg(uap_b, R0, RES_H_IUS); zssync(uap_b); if (r3 & CHBEXT) @@ -507,12 +511,14 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id) pmz_transmit_chars(uap_b); rc = IRQ_HANDLED; } - skip_b: spin_unlock(&uap_b->port.lock); if (tty != NULL) tty_flip_buffer_push(tty); out: +#ifdef DEBUG_HARD + pmz_debug("irq done.\n"); +#endif return rc; } @@ -537,8 +543,12 @@ static inline u8 pmz_peek_status(struct uart_pmac_port *uap) */ static unsigned int pmz_tx_empty(struct uart_port *port) { + struct uart_pmac_port *uap = to_pmz(port); unsigned char status; + if (ZS_IS_ASLEEP(uap) || uap->node == NULL) + return TIOCSER_TEMT; + status = pmz_peek_status(to_pmz(port)); if (status & Tx_BUF_EMP) return TIOCSER_TEMT; @@ -560,7 +570,8 @@ static void pmz_set_mctrl(struct uart_port *port, unsigned int mctrl) if (ZS_IS_IRDA(uap)) return; /* We get called during boot with a port not up yet */ - if (!(ZS_IS_OPEN(uap) || ZS_IS_CONS(uap))) + if (ZS_IS_ASLEEP(uap) || + !(ZS_IS_OPEN(uap) || ZS_IS_CONS(uap))) return; set_bits = clear_bits = 0; @@ -579,7 +590,8 @@ static void pmz_set_mctrl(struct uart_port *port, unsigned int mctrl) /* NOTE: Not subject to 'transmitter active' rule. */ uap->curregs[R5] |= set_bits; uap->curregs[R5] &= ~clear_bits; - + if (ZS_IS_ASLEEP(uap)) + return; write_zsreg(uap, R5, uap->curregs[R5]); pmz_debug("pmz_set_mctrl: set bits: %x, clear bits: %x -> %x\n", set_bits, clear_bits, uap->curregs[R5]); @@ -597,6 +609,9 @@ static unsigned int pmz_get_mctrl(struct uart_port *port) unsigned char status; unsigned int ret; + if (ZS_IS_ASLEEP(uap) || uap->node == NULL) + return 0; + status = read_zsreg(uap, R0); ret = 0; @@ -634,6 +649,9 @@ static void pmz_start_tx(struct uart_port *port) uap->flags |= PMACZILOG_FLAG_TX_ACTIVE; uap->flags &= ~PMACZILOG_FLAG_TX_STOPPED; + if (ZS_IS_ASLEEP(uap) || uap->node == NULL) + return; + status = read_zsreg(uap, R0); /* TX busy? Just wait for the TX done interrupt. */ @@ -672,6 +690,9 @@ static void pmz_stop_rx(struct uart_port *port) { struct uart_pmac_port *uap = to_pmz(port); + if (ZS_IS_ASLEEP(uap) || uap->node == NULL) + return; + pmz_debug("pmz: stop_rx()()\n"); /* Disable all RX interrupts. */ @@ -690,12 +711,14 @@ static void pmz_enable_ms(struct uart_port *port) struct uart_pmac_port *uap = to_pmz(port); unsigned char new_reg; - if (ZS_IS_IRDA(uap)) + if (ZS_IS_IRDA(uap) || uap->node == NULL) return; new_reg = uap->curregs[R15] | (DCDIE | SYNCIE | CTSIE); if (new_reg != uap->curregs[R15]) { uap->curregs[R15] = new_reg; + if (ZS_IS_ASLEEP(uap)) + return; /* NOTE: Not subject to 'transmitter active' rule. */ write_zsreg(uap, R15, uap->curregs[R15]); } @@ -711,6 +734,8 @@ static void pmz_break_ctl(struct uart_port *port, int break_state) unsigned char set_bits, clear_bits, new_reg; unsigned long flags; + if (uap->node == NULL) + return; set_bits = clear_bits = 0; if (break_state) @@ -723,6 +748,12 @@ static void pmz_break_ctl(struct uart_port *port, int break_state) new_reg = (uap->curregs[R5] | set_bits) & ~clear_bits; if (new_reg != uap->curregs[R5]) { uap->curregs[R5] = new_reg; + + /* NOTE: Not subject to 'transmitter active' rule. */ + if (ZS_IS_ASLEEP(uap)) { + spin_unlock_irqrestore(&port->lock, flags); + return; + } write_zsreg(uap, R5, uap->curregs[R5]); } @@ -896,21 +927,14 @@ static int __pmz_startup(struct uart_pmac_port *uap) static void pmz_irda_reset(struct uart_pmac_port *uap) { - unsigned long flags; - - spin_lock_irqsave(&uap->port.lock, flags); uap->curregs[R5] |= DTR; write_zsreg(uap, R5, uap->curregs[R5]); zssync(uap); - spin_unlock_irqrestore(&uap->port.lock, flags); - msleep(110); - - spin_lock_irqsave(&uap->port.lock, flags); + mdelay(110); uap->curregs[R5] &= ~DTR; write_zsreg(uap, R5, uap->curregs[R5]); zssync(uap); - spin_unlock_irqrestore(&uap->port.lock, flags); - msleep(10); + mdelay(10); } /* @@ -925,6 +949,13 @@ static int pmz_startup(struct uart_port *port) pmz_debug("pmz: startup()\n"); + if (ZS_IS_ASLEEP(uap)) + return -EAGAIN; + if (uap->node == NULL) + return -ENODEV; + + mutex_lock(&pmz_irq_mutex); + uap->flags |= PMACZILOG_FLAG_IS_OPEN; /* A console is never powered down. Else, power up and @@ -935,14 +966,18 @@ static int pmz_startup(struct uart_port *port) pwr_delay = __pmz_startup(uap); spin_unlock_irqrestore(&port->lock, flags); } - sprintf(uap->irq_name, PMACZILOG_NAME"%d", uap->port.line); + + pmz_get_port_A(uap)->flags |= PMACZILOG_FLAG_IS_IRQ_ON; if (request_irq(uap->port.irq, pmz_interrupt, IRQF_SHARED, - uap->irq_name, uap)) { + "SCC", uap)) { pmz_error("Unable to register zs interrupt handler.\n"); pmz_set_scc_power(uap, 0); + mutex_unlock(&pmz_irq_mutex); return -ENXIO; } + mutex_unlock(&pmz_irq_mutex); + /* Right now, we deal with delay by blocking here, I'll be * smarter later on */ @@ -955,9 +990,12 @@ static int pmz_startup(struct uart_port *port) if (ZS_IS_IRDA(uap)) pmz_irda_reset(uap); - /* Enable interrupt requests for the channel */ + /* Enable interrupts emission from the chip */ spin_lock_irqsave(&port->lock, flags); - pmz_interrupt_control(uap, 1); + uap->curregs[R1] |= INT_ALL_Rx | TxINT_ENAB; + if (!ZS_IS_EXTCLK(uap)) + uap->curregs[R1] |= EXT_INT_ENAB; + write_zsreg(uap, R1, uap->curregs[R1]); spin_unlock_irqrestore(&port->lock, flags); pmz_debug("pmz: startup() done.\n"); @@ -972,22 +1010,10 @@ static void pmz_shutdown(struct uart_port *port) pmz_debug("pmz: shutdown()\n"); - spin_lock_irqsave(&port->lock, flags); - - /* Disable interrupt requests for the channel */ - pmz_interrupt_control(uap, 0); - - if (!ZS_IS_CONS(uap)) { - /* Disable receiver and transmitter */ - uap->curregs[R3] &= ~RxENABLE; - uap->curregs[R5] &= ~TxENABLE; - - /* Disable break assertion */ - uap->curregs[R5] &= ~SND_BRK; - pmz_maybe_update_regs(uap); - } + if (uap->node == NULL) + return; - spin_unlock_irqrestore(&port->lock, flags); + mutex_lock(&pmz_irq_mutex); /* Release interrupt handler */ free_irq(uap->port.irq, uap); @@ -996,11 +1022,37 @@ static void pmz_shutdown(struct uart_port *port) uap->flags &= ~PMACZILOG_FLAG_IS_OPEN; - if (!ZS_IS_CONS(uap)) - pmz_set_scc_power(uap, 0); /* Shut the chip down */ + if (!ZS_IS_OPEN(uap->mate)) + pmz_get_port_A(uap)->flags &= ~PMACZILOG_FLAG_IS_IRQ_ON; + + /* Disable interrupts */ + if (!ZS_IS_ASLEEP(uap)) { + uap->curregs[R1] &= ~(EXT_INT_ENAB | TxINT_ENAB | RxINT_MASK); + write_zsreg(uap, R1, uap->curregs[R1]); + zssync(uap); + } + + if (ZS_IS_CONS(uap) || ZS_IS_ASLEEP(uap)) { + spin_unlock_irqrestore(&port->lock, flags); + mutex_unlock(&pmz_irq_mutex); + return; + } + + /* Disable receiver and transmitter. */ + uap->curregs[R3] &= ~RxENABLE; + uap->curregs[R5] &= ~TxENABLE; + + /* Disable all interrupts and BRK assertion. */ + uap->curregs[R5] &= ~SND_BRK; + pmz_maybe_update_regs(uap); + + /* Shut the chip down */ + pmz_set_scc_power(uap, 0); spin_unlock_irqrestore(&port->lock, flags); + mutex_unlock(&pmz_irq_mutex); + pmz_debug("pmz: shutdown() done.\n"); } @@ -1248,6 +1300,9 @@ static void __pmz_set_termios(struct uart_port *port, struct ktermios *termios, pmz_debug("pmz: set_termios()\n"); + if (ZS_IS_ASLEEP(uap)) + return; + memcpy(&uap->termios_cache, termios, sizeof(struct ktermios)); /* XXX Check which revs of machines actually allow 1 and 4Mb speeds @@ -1297,15 +1352,19 @@ static void pmz_set_termios(struct uart_port *port, struct ktermios *termios, spin_lock_irqsave(&port->lock, flags); /* Disable IRQs on the port */ - pmz_interrupt_control(uap, 0); + uap->curregs[R1] &= ~(EXT_INT_ENAB | TxINT_ENAB | RxINT_MASK); + write_zsreg(uap, R1, uap->curregs[R1]); /* Setup new port configuration */ __pmz_set_termios(port, termios, old); /* Re-enable IRQs on the port */ - if (ZS_IS_OPEN(uap)) - pmz_interrupt_control(uap, 1); - + if (ZS_IS_OPEN(uap)) { + uap->curregs[R1] |= INT_ALL_Rx | TxINT_ENAB; + if (!ZS_IS_EXTCLK(uap)) + uap->curregs[R1] |= EXT_INT_ENAB; + write_zsreg(uap, R1, uap->curregs[R1]); + } spin_unlock_irqrestore(&port->lock, flags); } @@ -1545,34 +1604,25 @@ static void pmz_dispose_port(struct uart_pmac_port *uap) */ static int pmz_attach(struct macio_dev *mdev, const struct of_device_id *match) { - struct uart_pmac_port *uap; int i; /* Iterate the pmz_ports array to find a matching entry */ for (i = 0; i < MAX_ZS_PORTS; i++) - if (pmz_ports[i].node == mdev->ofdev.dev.of_node) - break; - if (i >= MAX_ZS_PORTS) - return -ENODEV; - - - uap = &pmz_ports[i]; - uap->dev = mdev; - uap->port.dev = &mdev->ofdev.dev; - dev_set_drvdata(&mdev->ofdev.dev, uap); - - /* We still activate the port even when failing to request resources - * to work around bugs in ancient Apple device-trees - */ - if (macio_request_resources(uap->dev, "pmac_zilog")) - printk(KERN_WARNING "%s: Failed to request resource" - ", port still active\n", - uap->node->name); - else - uap->flags |= PMACZILOG_FLAG_RSRC_REQUESTED; - - return uart_add_one_port(&pmz_uart_reg, &uap->port); + if (pmz_ports[i].node == mdev->ofdev.dev.of_node) { + struct uart_pmac_port *uap = &pmz_ports[i]; + + uap->dev = mdev; + dev_set_drvdata(&mdev->ofdev.dev, uap); + if (macio_request_resources(uap->dev, "pmac_zilog")) + printk(KERN_WARNING "%s: Failed to request resource" + ", port still active\n", + uap->node->name); + else + uap->flags |= PMACZILOG_FLAG_RSRC_REQUESTED; + return 0; + } + return -ENODEV; } /* @@ -1586,15 +1636,12 @@ static int pmz_detach(struct macio_dev *mdev) if (!uap) return -ENODEV; - uart_remove_one_port(&pmz_uart_reg, &uap->port); - if (uap->flags & PMACZILOG_FLAG_RSRC_REQUESTED) { macio_release_resources(uap->dev); uap->flags &= ~PMACZILOG_FLAG_RSRC_REQUESTED; } dev_set_drvdata(&mdev->ofdev.dev, NULL); uap->dev = NULL; - uap->port.dev = NULL; return 0; } @@ -1603,13 +1650,59 @@ static int pmz_detach(struct macio_dev *mdev) static int pmz_suspend(struct macio_dev *mdev, pm_message_t pm_state) { struct uart_pmac_port *uap = dev_get_drvdata(&mdev->ofdev.dev); + struct uart_state *state; + unsigned long flags; if (uap == NULL) { printk("HRM... pmz_suspend with NULL uap\n"); return 0; } - uart_suspend_port(&pmz_uart_reg, &uap->port); + if (pm_state.event == mdev->ofdev.dev.power.power_state.event) + return 0; + + pmz_debug("suspend, switching to state %d\n", pm_state.event); + + state = pmz_uart_reg.state + uap->port.line; + + mutex_lock(&pmz_irq_mutex); + mutex_lock(&state->port.mutex); + + spin_lock_irqsave(&uap->port.lock, flags); + + if (ZS_IS_OPEN(uap) || ZS_IS_CONS(uap)) { + /* Disable receiver and transmitter. */ + uap->curregs[R3] &= ~RxENABLE; + uap->curregs[R5] &= ~TxENABLE; + + /* Disable all interrupts and BRK assertion. */ + uap->curregs[R1] &= ~(EXT_INT_ENAB | TxINT_ENAB | RxINT_MASK); + uap->curregs[R5] &= ~SND_BRK; + pmz_load_zsregs(uap, uap->curregs); + uap->flags |= PMACZILOG_FLAG_IS_ASLEEP; + mb(); + } + + spin_unlock_irqrestore(&uap->port.lock, flags); + + if (ZS_IS_OPEN(uap) || ZS_IS_OPEN(uap->mate)) + if (ZS_IS_ASLEEP(uap->mate) && ZS_IS_IRQ_ON(pmz_get_port_A(uap))) { + pmz_get_port_A(uap)->flags &= ~PMACZILOG_FLAG_IS_IRQ_ON; + disable_irq(uap->port.irq); + } + + if (ZS_IS_CONS(uap)) + uap->port.cons->flags &= ~CON_ENABLED; + + /* Shut the chip down */ + pmz_set_scc_power(uap, 0); + + mutex_unlock(&state->port.mutex); + mutex_unlock(&pmz_irq_mutex); + + pmz_debug("suspend, switching complete\n"); + + mdev->ofdev.dev.power.power_state = pm_state; return 0; } @@ -1618,20 +1711,76 @@ static int pmz_suspend(struct macio_dev *mdev, pm_message_t pm_state) static int pmz_resume(struct macio_dev *mdev) { struct uart_pmac_port *uap = dev_get_drvdata(&mdev->ofdev.dev); + struct uart_state *state; + unsigned long flags; + int pwr_delay = 0; if (uap == NULL) return 0; - uart_resume_port(&pmz_uart_reg, &uap->port); + if (mdev->ofdev.dev.power.power_state.event == PM_EVENT_ON) + return 0; + + pmz_debug("resume, switching to state 0\n"); + + state = pmz_uart_reg.state + uap->port.line; + + mutex_lock(&pmz_irq_mutex); + mutex_lock(&state->port.mutex); + + spin_lock_irqsave(&uap->port.lock, flags); + if (!ZS_IS_OPEN(uap) && !ZS_IS_CONS(uap)) { + spin_unlock_irqrestore(&uap->port.lock, flags); + goto bail; + } + pwr_delay = __pmz_startup(uap); + + /* Take care of config that may have changed while asleep */ + __pmz_set_termios(&uap->port, &uap->termios_cache, NULL); + + if (ZS_IS_OPEN(uap)) { + /* Enable interrupts */ + uap->curregs[R1] |= INT_ALL_Rx | TxINT_ENAB; + if (!ZS_IS_EXTCLK(uap)) + uap->curregs[R1] |= EXT_INT_ENAB; + write_zsreg(uap, R1, uap->curregs[R1]); + } + + spin_unlock_irqrestore(&uap->port.lock, flags); + + if (ZS_IS_CONS(uap)) + uap->port.cons->flags |= CON_ENABLED; + + /* Re-enable IRQ on the controller */ + if (ZS_IS_OPEN(uap) && !ZS_IS_IRQ_ON(pmz_get_port_A(uap))) { + pmz_get_port_A(uap)->flags |= PMACZILOG_FLAG_IS_IRQ_ON; + enable_irq(uap->port.irq); + } + + bail: + mutex_unlock(&state->port.mutex); + mutex_unlock(&pmz_irq_mutex); + + /* Right now, we deal with delay by blocking here, I'll be + * smarter later on + */ + if (pwr_delay != 0) { + pmz_debug("pmz: delaying %d ms\n", pwr_delay); + msleep(pwr_delay); + } + + pmz_debug("resume, switching complete\n"); + + mdev->ofdev.dev.power.power_state.event = PM_EVENT_ON; return 0; } /* * Probe all ports in the system and build the ports array, we register - * with the serial layer later, so we get a proper struct device which - * allows the tty to attach properly. This is later than it used to be - * but the tty layer really wants it that way. + * with the serial layer at this point, the macio-type probing is only + * used later to "attach" to the sysfs tree so we get power management + * events */ static int __init pmz_probe(void) { @@ -1667,10 +1816,8 @@ static int __init pmz_probe(void) /* * Fill basic fields in the port structures */ - if (node_b != NULL) { - pmz_ports[count].mate = &pmz_ports[count+1]; - pmz_ports[count+1].mate = &pmz_ports[count]; - } + pmz_ports[count].mate = &pmz_ports[count+1]; + pmz_ports[count+1].mate = &pmz_ports[count]; pmz_ports[count].flags = PMACZILOG_FLAG_IS_CHANNEL_A; pmz_ports[count].node = node_a; pmz_ports[count+1].node = node_b; @@ -1708,8 +1855,8 @@ static int __init pmz_init_port(struct uart_pmac_port *uap) struct resource *r_ports; int irq; - r_ports = platform_get_resource(uap->pdev, IORESOURCE_MEM, 0); - irq = platform_get_irq(uap->pdev, 0); + r_ports = platform_get_resource(uap->node, IORESOURCE_MEM, 0); + irq = platform_get_irq(uap->node, 0); if (!r_ports || !irq) return -ENODEV; @@ -1738,19 +1885,19 @@ static int __init pmz_probe(void) pmz_ports_count = 0; + pmz_ports[0].mate = &pmz_ports[1]; pmz_ports[0].port.line = 0; pmz_ports[0].flags = PMACZILOG_FLAG_IS_CHANNEL_A; - pmz_ports[0].pdev = &scc_a_pdev; + pmz_ports[0].node = &scc_a_pdev; err = pmz_init_port(&pmz_ports[0]); if (err) return err; pmz_ports_count++; - pmz_ports[0].mate = &pmz_ports[1]; pmz_ports[1].mate = &pmz_ports[0]; pmz_ports[1].port.line = 1; pmz_ports[1].flags = 0; - pmz_ports[1].pdev = &scc_b_pdev; + pmz_ports[1].node = &scc_b_pdev; err = pmz_init_port(&pmz_ports[1]); if (err) return err; @@ -1766,35 +1913,16 @@ static void pmz_dispose_port(struct uart_pmac_port *uap) static int __init pmz_attach(struct platform_device *pdev) { - struct uart_pmac_port *uap; int i; - /* Iterate the pmz_ports array to find a matching entry */ for (i = 0; i < pmz_ports_count; i++) - if (pmz_ports[i].pdev == pdev) - break; - if (i >= pmz_ports_count) - return -ENODEV; - - uap = &pmz_ports[i]; - uap->port.dev = &pdev->dev; - platform_set_drvdata(pdev, uap); - - return uart_add_one_port(&pmz_uart_reg, &uap->port); + if (pmz_ports[i].node == pdev) + return 0; + return -ENODEV; } static int __exit pmz_detach(struct platform_device *pdev) { - struct uart_pmac_port *uap = platform_get_drvdata(pdev); - - if (!uap) - return -ENODEV; - - uart_remove_one_port(&pmz_uart_reg, &uap->port); - - platform_set_drvdata(pdev, NULL); - uap->port.dev = NULL; - return 0; } @@ -1826,13 +1954,38 @@ static struct console pmz_console = { */ static int __init pmz_register(void) { + int i, rc; + pmz_uart_reg.nr = pmz_ports_count; pmz_uart_reg.cons = PMACZILOG_CONSOLE; /* * Register this driver with the serial core */ - return uart_register_driver(&pmz_uart_reg); + rc = uart_register_driver(&pmz_uart_reg); + if (rc) + return rc; + + /* + * Register each port with the serial core + */ + for (i = 0; i < pmz_ports_count; i++) { + struct uart_pmac_port *uport = &pmz_ports[i]; + /* NULL node may happen on wallstreet */ + if (uport->node != NULL) + rc = uart_add_one_port(&pmz_uart_reg, &uport->port); + if (rc) + goto err_out; + } + + return 0; +err_out: + while (i-- > 0) { + struct uart_pmac_port *uport = &pmz_ports[i]; + uart_remove_one_port(&pmz_uart_reg, &uport->port); + } + uart_unregister_driver(&pmz_uart_reg); + return rc; } #ifdef CONFIG_PPC_PMAC @@ -1931,13 +2084,10 @@ static void __exit exit_pmz(void) for (i = 0; i < pmz_ports_count; i++) { struct uart_pmac_port *uport = &pmz_ports[i]; -#ifdef CONFIG_PPC_PMAC - if (uport->node != NULL) - pmz_dispose_port(uport); -#else - if (uport->pdev != NULL) + if (uport->node != NULL) { + uart_remove_one_port(&pmz_uart_reg, &uport->port); pmz_dispose_port(uport); -#endif + } } /* Unregister UART driver */ uart_unregister_driver(&pmz_uart_reg); @@ -1964,6 +2114,8 @@ static void pmz_console_write(struct console *con, const char *s, unsigned int c struct uart_pmac_port *uap = &pmz_ports[con->index]; unsigned long flags; + if (ZS_IS_ASLEEP(uap)) + return; spin_lock_irqsave(&uap->port.lock, flags); /* Turn of interrupts and enable the transmitter. */ @@ -2008,13 +2160,8 @@ static int __init pmz_console_setup(struct console *co, char *options) if (co->index >= pmz_ports_count) co->index = 0; uap = &pmz_ports[co->index]; -#ifdef CONFIG_PPC_PMAC if (uap->node == NULL) return -ENODEV; -#else - if (uap->pdev == NULL) - return -ENODEV; -#endif port = &uap->port; /* diff --git a/trunk/drivers/tty/serial/pmac_zilog.h b/trunk/drivers/tty/serial/pmac_zilog.h index 3483242ee3e0..cbc34fbb1b20 100644 --- a/trunk/drivers/tty/serial/pmac_zilog.h +++ b/trunk/drivers/tty/serial/pmac_zilog.h @@ -1,6 +1,16 @@ #ifndef __PMAC_ZILOG_H__ #define __PMAC_ZILOG_H__ +#ifdef CONFIG_PPC_PMAC +#define pmz_debug(fmt, arg...) dev_dbg(&uap->dev->ofdev.dev, fmt, ## arg) +#define pmz_error(fmt, arg...) dev_err(&uap->dev->ofdev.dev, fmt, ## arg) +#define pmz_info(fmt, arg...) dev_info(&uap->dev->ofdev.dev, fmt, ## arg) +#else +#define pmz_debug(fmt, arg...) dev_dbg(&uap->node->dev, fmt, ## arg) +#define pmz_error(fmt, arg...) dev_err(&uap->node->dev, fmt, ## arg) +#define pmz_info(fmt, arg...) dev_info(&uap->node->dev, fmt, ## arg) +#endif + /* * At most 2 ESCCs with 2 ports each */ @@ -25,7 +35,7 @@ struct uart_pmac_port { */ struct device_node *node; #else - struct platform_device *pdev; + struct platform_device *node; #endif /* Port type as obtained from device tree (IRDA, modem, ...) */ @@ -40,11 +50,14 @@ struct uart_pmac_port { #define PMACZILOG_FLAG_REGS_HELD 0x00000010 #define PMACZILOG_FLAG_TX_STOPPED 0x00000020 #define PMACZILOG_FLAG_TX_ACTIVE 0x00000040 +#define PMACZILOG_FLAG_ENABLED 0x00000080 #define PMACZILOG_FLAG_IS_IRDA 0x00000100 #define PMACZILOG_FLAG_IS_INTMODEM 0x00000200 #define PMACZILOG_FLAG_HAS_DMA 0x00000400 #define PMACZILOG_FLAG_RSRC_REQUESTED 0x00000800 +#define PMACZILOG_FLAG_IS_ASLEEP 0x00001000 #define PMACZILOG_FLAG_IS_OPEN 0x00002000 +#define PMACZILOG_FLAG_IS_IRQ_ON 0x00004000 #define PMACZILOG_FLAG_IS_EXTCLK 0x00008000 #define PMACZILOG_FLAG_BREAK 0x00010000 @@ -61,8 +74,6 @@ struct uart_pmac_port { volatile struct dbdma_regs __iomem *rx_dma_regs; #endif - unsigned char irq_name[8]; - struct ktermios termios_cache; }; @@ -377,7 +388,9 @@ static inline void zssync(struct uart_pmac_port *port) #define ZS_IS_IRDA(UP) ((UP)->flags & PMACZILOG_FLAG_IS_IRDA) #define ZS_IS_INTMODEM(UP) ((UP)->flags & PMACZILOG_FLAG_IS_INTMODEM) #define ZS_HAS_DMA(UP) ((UP)->flags & PMACZILOG_FLAG_HAS_DMA) +#define ZS_IS_ASLEEP(UP) ((UP)->flags & PMACZILOG_FLAG_IS_ASLEEP) #define ZS_IS_OPEN(UP) ((UP)->flags & PMACZILOG_FLAG_IS_OPEN) +#define ZS_IS_IRQ_ON(UP) ((UP)->flags & PMACZILOG_FLAG_IS_IRQ_ON) #define ZS_IS_EXTCLK(UP) ((UP)->flags & PMACZILOG_FLAG_IS_EXTCLK) #endif /* __PMAC_ZILOG_H__ */ diff --git a/trunk/drivers/tty/serial/ucc_uart.c b/trunk/drivers/tty/serial/ucc_uart.c index 2ebe606a2db1..cea8918b8233 100644 --- a/trunk/drivers/tty/serial/ucc_uart.c +++ b/trunk/drivers/tty/serial/ucc_uart.c @@ -963,9 +963,6 @@ static void qe_uart_set_termios(struct uart_port *port, /* Do we really need a spinlock here? */ spin_lock_irqsave(&port->lock, flags); - /* Update the per-port timeout. */ - uart_update_timeout(port, termios->c_cflag, baud); - out_be16(&uccp->upsmr, upsmr); if (soft_uart) { out_be16(&uccup->supsmr, supsmr); diff --git a/trunk/drivers/usb/gadget/f_phonet.c b/trunk/drivers/usb/gadget/f_phonet.c index 7cdcb63b21ff..16a509ae517b 100644 --- a/trunk/drivers/usb/gadget/f_phonet.c +++ b/trunk/drivers/usb/gadget/f_phonet.c @@ -298,10 +298,11 @@ static void pn_net_setup(struct net_device *dev) static int pn_rx_submit(struct f_phonet *fp, struct usb_request *req, gfp_t gfp_flags) { + struct net_device *dev = fp->dev; struct page *page; int err; - page = alloc_page(gfp_flags); + page = __netdev_alloc_page(dev, gfp_flags); if (!page) return -ENOMEM; @@ -311,7 +312,7 @@ pn_rx_submit(struct f_phonet *fp, struct usb_request *req, gfp_t gfp_flags) err = usb_ep_queue(fp->out_ep, req, gfp_flags); if (unlikely(err)) - put_page(page); + netdev_free_page(dev, page); return err; } @@ -373,9 +374,9 @@ static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req) } if (page) - put_page(page); + netdev_free_page(dev, page); if (req) - pn_rx_submit(fp, req, GFP_ATOMIC | __GFP_COLD); + pn_rx_submit(fp, req, GFP_ATOMIC); } /*-------------------------------------------------------------------------*/ @@ -435,7 +436,7 @@ static int pn_set_alt(struct usb_function *f, unsigned intf, unsigned alt) netif_carrier_on(dev); for (i = 0; i < phonet_rxq_size; i++) - pn_rx_submit(fp, fp->out_reqv[i], GFP_ATOMIC | __GFP_COLD); + pn_rx_submit(fp, fp->out_reqv[i], GFP_ATOMIC); } spin_unlock(&port->lock); return 0; diff --git a/trunk/drivers/video/offb.c b/trunk/drivers/video/offb.c index 0c4f34311eda..cb163a5397be 100644 --- a/trunk/drivers/video/offb.c +++ b/trunk/drivers/video/offb.c @@ -41,14 +41,13 @@ /* Supported palette hacks */ enum { cmap_unknown, - cmap_simple, /* ATI Mach64 */ + cmap_m64, /* ATI Mach64 */ cmap_r128, /* ATI Rage128 */ cmap_M3A, /* ATI Rage Mobility M3 Head A */ cmap_M3B, /* ATI Rage Mobility M3 Head B */ cmap_radeon, /* ATI Radeon */ cmap_gxt2000, /* IBM GXT2000 */ cmap_avivo, /* ATI R5xx */ - cmap_qemu, /* qemu vga */ }; struct offb_par { @@ -101,32 +100,36 @@ static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info) { struct offb_par *par = (struct offb_par *) info->par; - - if (info->fix.visual == FB_VISUAL_TRUECOLOR) { - u32 *pal = info->pseudo_palette; - u32 cr = red >> (16 - info->var.red.length); - u32 cg = green >> (16 - info->var.green.length); - u32 cb = blue >> (16 - info->var.blue.length); - u32 value; - - if (regno >= 16) - return -EINVAL; - - value = (cr << info->var.red.offset) | - (cg << info->var.green.offset) | - (cb << info->var.blue.offset); - if (info->var.transp.length > 0) { - u32 mask = (1 << info->var.transp.length) - 1; - mask <<= info->var.transp.offset; - value |= mask; + int i, depth; + u32 *pal = info->pseudo_palette; + + depth = info->var.bits_per_pixel; + if (depth == 16) + depth = (info->var.green.length == 5) ? 15 : 16; + + if (regno > 255 || + (depth == 16 && regno > 63) || + (depth == 15 && regno > 31)) + return 1; + + if (regno < 16) { + switch (depth) { + case 15: + pal[regno] = (regno << 10) | (regno << 5) | regno; + break; + case 16: + pal[regno] = (regno << 11) | (regno << 5) | regno; + break; + case 24: + pal[regno] = (regno << 16) | (regno << 8) | regno; + break; + case 32: + i = (regno << 8) | regno; + pal[regno] = (i << 16) | i; + break; } - pal[regno] = value; - return 0; } - if (regno > 255) - return -EINVAL; - red >>= 8; green >>= 8; blue >>= 8; @@ -135,7 +138,7 @@ static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, return 0; switch (par->cmap_type) { - case cmap_simple: + case cmap_m64: writeb(regno, par->cmap_adr); writeb(red, par->cmap_data); writeb(green, par->cmap_data); @@ -205,7 +208,7 @@ static int offb_blank(int blank, struct fb_info *info) if (blank) for (i = 0; i < 256; i++) { switch (par->cmap_type) { - case cmap_simple: + case cmap_m64: writeb(i, par->cmap_adr); for (j = 0; j < 3; j++) writeb(0, par->cmap_data); @@ -347,7 +350,7 @@ static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp par->cmap_adr = ioremap(base + 0x7ff000, 0x1000) + 0xcc0; par->cmap_data = par->cmap_adr + 1; - par->cmap_type = cmap_simple; + par->cmap_type = cmap_m64; } else if (dp && (of_device_is_compatible(dp, "pci1014,b7") || of_device_is_compatible(dp, "pci1014,21c"))) { par->cmap_adr = offb_map_reg(dp, 0, 0x6000, 0x1000); @@ -368,16 +371,6 @@ static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp par->cmap_type = cmap_avivo; } of_node_put(pciparent); - } else if (dp && of_device_is_compatible(dp, "qemu,std-vga")) { - const u32 io_of_addr[3] = { 0x01000000, 0x0, 0x0 }; - u64 io_addr = of_translate_address(dp, io_of_addr); - if (io_addr != OF_BAD_ADDR) { - par->cmap_adr = ioremap(io_addr + 0x3c8, 2); - if (par->cmap_adr) { - par->cmap_type = cmap_simple; - par->cmap_data = par->cmap_adr + 1; - } - } } info->fix.visual = (par->cmap_type != cmap_unknown) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_STATIC_PSEUDOCOLOR; @@ -388,7 +381,7 @@ static void __init offb_init_fb(const char *name, const char *full_name, int pitch, unsigned long address, int foreign_endian, struct device_node *dp) { - unsigned long res_size = pitch * height; + unsigned long res_size = pitch * height * (depth + 7) / 8; struct offb_par *par = &default_par; unsigned long res_start = address; struct fb_fix_screeninfo *fix; diff --git a/trunk/drivers/virtio/virtio_mmio.c b/trunk/drivers/virtio/virtio_mmio.c index 0269717436af..7317dc2ec426 100644 --- a/trunk/drivers/virtio/virtio_mmio.c +++ b/trunk/drivers/virtio/virtio_mmio.c @@ -361,12 +361,7 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, return 0; } -static const char *vm_bus_name(struct virtio_device *vdev) -{ - struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); - return vm_dev->pdev->name; -} static struct virtio_config_ops virtio_mmio_config_ops = { .get = vm_get, @@ -378,7 +373,6 @@ static struct virtio_config_ops virtio_mmio_config_ops = { .del_vqs = vm_del_vqs, .get_features = vm_get_features, .finalize_features = vm_finalize_features, - .bus_name = vm_bus_name, }; diff --git a/trunk/drivers/virtio/virtio_pci.c b/trunk/drivers/virtio/virtio_pci.c index baabb7937ec2..03d1984bd363 100644 --- a/trunk/drivers/virtio/virtio_pci.c +++ b/trunk/drivers/virtio/virtio_pci.c @@ -598,13 +598,6 @@ static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, false, false); } -static const char *vp_bus_name(struct virtio_device *vdev) -{ - struct virtio_pci_device *vp_dev = to_vp_device(vdev); - - return pci_name(vp_dev->pci_dev); -} - static struct virtio_config_ops virtio_pci_config_ops = { .get = vp_get, .set = vp_set, @@ -615,7 +608,6 @@ static struct virtio_config_ops virtio_pci_config_ops = { .del_vqs = vp_del_vqs, .get_features = vp_get_features, .finalize_features = vp_finalize_features, - .bus_name = vp_bus_name, }; static void virtio_pci_release_dev(struct device *_d) diff --git a/trunk/drivers/watchdog/coh901327_wdt.c b/trunk/drivers/watchdog/coh901327_wdt.c index 5b89f7d6cd0f..03f449a430d2 100644 --- a/trunk/drivers/watchdog/coh901327_wdt.c +++ b/trunk/drivers/watchdog/coh901327_wdt.c @@ -76,6 +76,8 @@ static int irq; static void __iomem *virtbase; static unsigned long coh901327_users; static unsigned long boot_status; +static u16 wdogenablestore; +static u16 irqmaskstore; static struct device *parent; /* @@ -459,10 +461,6 @@ static int __init coh901327_probe(struct platform_device *pdev) } #ifdef CONFIG_PM - -static u16 wdogenablestore; -static u16 irqmaskstore; - static int coh901327_suspend(struct platform_device *pdev, pm_message_t state) { irqmaskstore = readw(virtbase + U300_WDOG_IMR) & 0x0001U; diff --git a/trunk/drivers/watchdog/hpwdt.c b/trunk/drivers/watchdog/hpwdt.c index 8464ea1c36a1..3774c9b8dac9 100644 --- a/trunk/drivers/watchdog/hpwdt.c +++ b/trunk/drivers/watchdog/hpwdt.c @@ -231,7 +231,6 @@ static int __devinit cru_detect(unsigned long map_entry, cmn_regs.u1.reax = CRU_BIOS_SIGNATURE_VALUE; - set_memory_x((unsigned long)bios32_entrypoint, (2 * PAGE_SIZE)); asminline_call(&cmn_regs, bios32_entrypoint); if (cmn_regs.u1.ral != 0) { @@ -249,10 +248,8 @@ static int __devinit cru_detect(unsigned long map_entry, if ((physical_bios_base + physical_bios_offset)) { cru_rom_addr = ioremap(cru_physical_address, cru_length); - if (cru_rom_addr) { - set_memory_x((unsigned long)cru_rom_addr, cru_length); + if (cru_rom_addr) retval = 0; - } } printk(KERN_DEBUG "hpwdt: CRU Base Address: 0x%lx\n", diff --git a/trunk/drivers/watchdog/iTCO_wdt.c b/trunk/drivers/watchdog/iTCO_wdt.c index 99796c5d913d..ba6ad662635a 100644 --- a/trunk/drivers/watchdog/iTCO_wdt.c +++ b/trunk/drivers/watchdog/iTCO_wdt.c @@ -384,10 +384,10 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); -static int turn_SMI_watchdog_clear_off = 1; +static int turn_SMI_watchdog_clear_off = 0; module_param(turn_SMI_watchdog_clear_off, int, 0); MODULE_PARM_DESC(turn_SMI_watchdog_clear_off, - "Turn off SMI clearing watchdog (depends on TCO-version)(default=1)"); + "Turn off SMI clearing watchdog (default=0)"); /* * Some TCO specific functions @@ -813,7 +813,7 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev, ret = -EIO; goto out_unmap; } - if (turn_SMI_watchdog_clear_off >= iTCO_wdt_private.iTCO_version) { + if (turn_SMI_watchdog_clear_off) { /* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */ val32 = inl(SMI_EN); val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */ diff --git a/trunk/drivers/watchdog/sp805_wdt.c b/trunk/drivers/watchdog/sp805_wdt.c index bfaf9bb1ee0d..cc2cfbe33b30 100644 --- a/trunk/drivers/watchdog/sp805_wdt.c +++ b/trunk/drivers/watchdog/sp805_wdt.c @@ -351,7 +351,7 @@ static int __devexit sp805_wdt_remove(struct amba_device *adev) return 0; } -static struct amba_id sp805_wdt_ids[] = { +static struct amba_id sp805_wdt_ids[] __initdata = { { .id = 0x00141805, .mask = 0x00ffffff, diff --git a/trunk/fs/ceph/dir.c b/trunk/fs/ceph/dir.c index 98954003a8d3..3eeb97661262 100644 --- a/trunk/fs/ceph/dir.c +++ b/trunk/fs/ceph/dir.c @@ -1094,19 +1094,42 @@ static int ceph_snapdir_d_revalidate(struct dentry *dentry, /* * Set/clear/test dir complete flag on the dir's dentry. */ +static struct dentry * __d_find_any_alias(struct inode *inode) +{ + struct dentry *alias; + + if (list_empty(&inode->i_dentry)) + return NULL; + alias = list_first_entry(&inode->i_dentry, struct dentry, d_alias); + return alias; +} + void ceph_dir_set_complete(struct inode *inode) { - /* not yet implemented */ + struct dentry *dentry = __d_find_any_alias(inode); + + if (dentry && ceph_dentry(dentry)) { + dout(" marking %p (%p) complete\n", inode, dentry); + set_bit(CEPH_D_COMPLETE, &ceph_dentry(dentry)->flags); + } } void ceph_dir_clear_complete(struct inode *inode) { - /* not yet implemented */ + struct dentry *dentry = __d_find_any_alias(inode); + + if (dentry && ceph_dentry(dentry)) { + dout(" marking %p (%p) NOT complete\n", inode, dentry); + clear_bit(CEPH_D_COMPLETE, &ceph_dentry(dentry)->flags); + } } bool ceph_dir_test_complete(struct inode *inode) { - /* not yet implemented */ + struct dentry *dentry = __d_find_any_alias(inode); + + if (dentry && ceph_dentry(dentry)) + return test_bit(CEPH_D_COMPLETE, &ceph_dentry(dentry)->flags); return false; } diff --git a/trunk/fs/cifs/connect.c b/trunk/fs/cifs/connect.c index f3670cf72587..8cd4b52d4217 100644 --- a/trunk/fs/cifs/connect.c +++ b/trunk/fs/cifs/connect.c @@ -282,7 +282,7 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB) byte_count = be32_to_cpu(pTargetSMB->smb_buf_length); byte_count += total_in_buf2; /* don't allow buffer to overflow */ - if (byte_count > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) + if (byte_count > CIFSMaxBufSize) return -ENOBUFS; pTargetSMB->smb_buf_length = cpu_to_be32(byte_count); @@ -2122,7 +2122,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) warned_on_ntlm = true; cERROR(1, "default security mechanism requested. The default " "security mechanism will be upgraded from ntlm to " - "ntlmv2 in kernel release 3.3"); + "ntlmv2 in kernel release 3.2"); } ses->overrideSecFlg = volume_info->secFlg; diff --git a/trunk/fs/compat_ioctl.c b/trunk/fs/compat_ioctl.c index a10e428b32b4..51352de88ef1 100644 --- a/trunk/fs/compat_ioctl.c +++ b/trunk/fs/compat_ioctl.c @@ -1506,6 +1506,35 @@ static long do_ioctl_trans(int fd, unsigned int cmd, return -ENOIOCTLCMD; } +static void compat_ioctl_error(struct file *filp, unsigned int fd, + unsigned int cmd, unsigned long arg) +{ + char buf[10]; + char *fn = "?"; + char *path; + + /* find the name of the device. */ + path = (char *)__get_free_page(GFP_KERNEL); + if (path) { + fn = d_path(&filp->f_path, path, PAGE_SIZE); + if (IS_ERR(fn)) + fn = "?"; + } + + sprintf(buf,"'%c'", (cmd>>_IOC_TYPESHIFT) & _IOC_TYPEMASK); + if (!isprint(buf[1])) + sprintf(buf, "%02x", buf[1]); + compat_printk("ioctl32(%s:%d): Unknown cmd fd(%d) " + "cmd(%08x){t:%s;sz:%u} arg(%08x) on %s\n", + current->comm, current->pid, + (int)fd, (unsigned int)cmd, buf, + (cmd >> _IOC_SIZESHIFT) & _IOC_SIZEMASK, + (unsigned int)arg, fn); + + if (path) + free_page((unsigned long)path); +} + static int compat_ioctl_check_table(unsigned int xcmd) { int i; @@ -1592,8 +1621,13 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd, goto found_handler; error = do_ioctl_trans(fd, cmd, arg, filp); - if (error == -ENOIOCTLCMD) - error = -ENOTTY; + if (error == -ENOIOCTLCMD) { + static int count; + + if (++count <= 50) + compat_ioctl_error(filp, fd, cmd, arg); + error = -EINVAL; + } goto out_fput; diff --git a/trunk/fs/dlm/lowcomms.c b/trunk/fs/dlm/lowcomms.c index 0b3109ee4257..990626e7da80 100644 --- a/trunk/fs/dlm/lowcomms.c +++ b/trunk/fs/dlm/lowcomms.c @@ -281,7 +281,7 @@ static int nodeid_to_addr(int nodeid, struct sockaddr *retaddr) } else { struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) &addr; struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) retaddr; - ret6->sin6_addr = in6->sin6_addr; + ipv6_addr_copy(&ret6->sin6_addr, &in6->sin6_addr); } return 0; diff --git a/trunk/fs/ioctl.c b/trunk/fs/ioctl.c index 066836e81848..1d9b9fcb2db4 100644 --- a/trunk/fs/ioctl.c +++ b/trunk/fs/ioctl.c @@ -42,7 +42,7 @@ static long vfs_ioctl(struct file *filp, unsigned int cmd, error = filp->f_op->unlocked_ioctl(filp, cmd, arg); if (error == -ENOIOCTLCMD) - error = -ENOTTY; + error = -EINVAL; out: return error; } diff --git a/trunk/fs/locks.c b/trunk/fs/locks.c index 637694bf3a03..3b0d05dcd7c1 100644 --- a/trunk/fs/locks.c +++ b/trunk/fs/locks.c @@ -1205,8 +1205,6 @@ int __break_lease(struct inode *inode, unsigned int mode) int want_write = (mode & O_ACCMODE) != O_RDONLY; new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK); - if (IS_ERR(new_fl)) - return PTR_ERR(new_fl); lock_flocks(); @@ -1223,6 +1221,12 @@ int __break_lease(struct inode *inode, unsigned int mode) if (fl->fl_owner == current->files) i_have_this_lease = 1; + if (IS_ERR(new_fl) && !i_have_this_lease + && ((mode & O_NONBLOCK) == 0)) { + error = PTR_ERR(new_fl); + goto out; + } + break_time = 0; if (lease_break_time > 0) { break_time = jiffies + lease_break_time * HZ; @@ -1280,7 +1284,8 @@ int __break_lease(struct inode *inode, unsigned int mode) out: unlock_flocks(); - locks_free_lock(new_fl); + if (!IS_ERR(new_fl)) + locks_free_lock(new_fl); return error; } diff --git a/trunk/fs/minix/inode.c b/trunk/fs/minix/inode.c index 4d46a6a59070..1d9e33966db0 100644 --- a/trunk/fs/minix/inode.c +++ b/trunk/fs/minix/inode.c @@ -263,6 +263,23 @@ static int minix_fill_super(struct super_block *s, void *data, int silent) goto out_no_root; } + ret = -ENOMEM; + s->s_root = d_alloc_root(root_inode); + if (!s->s_root) + goto out_iput; + + if (!(s->s_flags & MS_RDONLY)) { + if (sbi->s_version != MINIX_V3) /* s_state is now out from V3 sb */ + ms->s_state &= ~MINIX_VALID_FS; + mark_buffer_dirty(bh); + } + if (!(sbi->s_mount_state & MINIX_VALID_FS)) + printk("MINIX-fs: mounting unchecked file system, " + "running fsck is recommended\n"); + else if (sbi->s_mount_state & MINIX_ERROR_FS) + printk("MINIX-fs: mounting file system with errors, " + "running fsck is recommended\n"); + /* Apparently minix can create filesystems that allocate more blocks for * the bitmaps than needed. We simply ignore that, but verify it didn't * create one with not enough blocks and bail out if so. @@ -283,23 +300,6 @@ static int minix_fill_super(struct super_block *s, void *data, int silent) goto out_iput; } - ret = -ENOMEM; - s->s_root = d_alloc_root(root_inode); - if (!s->s_root) - goto out_iput; - - if (!(s->s_flags & MS_RDONLY)) { - if (sbi->s_version != MINIX_V3) /* s_state is now out from V3 sb */ - ms->s_state &= ~MINIX_VALID_FS; - mark_buffer_dirty(bh); - } - if (!(sbi->s_mount_state & MINIX_VALID_FS)) - printk("MINIX-fs: mounting unchecked file system, " - "running fsck is recommended\n"); - else if (sbi->s_mount_state & MINIX_ERROR_FS) - printk("MINIX-fs: mounting file system with errors, " - "running fsck is recommended\n"); - return 0; out_iput: diff --git a/trunk/fs/proc/array.c b/trunk/fs/proc/array.c index 8c344f037bd0..3a1dafd228d1 100644 --- a/trunk/fs/proc/array.c +++ b/trunk/fs/proc/array.c @@ -394,8 +394,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, sigemptyset(&sigign); sigemptyset(&sigcatch); - cutime = cstime = utime = stime = 0; - cgtime = gtime = 0; + cutime = cstime = utime = stime = cputime_zero; + cgtime = gtime = cputime_zero; if (lock_task_sighand(task, &flags)) { struct signal_struct *sig = task->signal; @@ -423,14 +423,14 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, do { min_flt += t->min_flt; maj_flt += t->maj_flt; - gtime += t->gtime; + gtime = cputime_add(gtime, t->gtime); t = next_thread(t); } while (t != task); min_flt += sig->min_flt; maj_flt += sig->maj_flt; thread_group_times(task, &utime, &stime); - gtime += sig->gtime; + gtime = cputime_add(gtime, sig->gtime); } sid = task_session_nr_ns(task, ns); diff --git a/trunk/fs/proc/stat.c b/trunk/fs/proc/stat.c index d76ca6ae2b1b..2a30d67dd6b8 100644 --- a/trunk/fs/proc/stat.c +++ b/trunk/fs/proc/stat.c @@ -22,29 +22,31 @@ #define arch_idle_time(cpu) 0 #endif -static u64 get_idle_time(int cpu) +static cputime64_t get_idle_time(int cpu) { - u64 idle, idle_time = get_cpu_idle_time_us(cpu, NULL); + u64 idle_time = get_cpu_idle_time_us(cpu, NULL); + cputime64_t idle; if (idle_time == -1ULL) { /* !NO_HZ so we can rely on cpustat.idle */ - idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE]; - idle += arch_idle_time(cpu); + idle = kstat_cpu(cpu).cpustat.idle; + idle = cputime64_add(idle, arch_idle_time(cpu)); } else - idle = usecs_to_cputime64(idle_time); + idle = nsecs_to_jiffies64(1000 * idle_time); return idle; } -static u64 get_iowait_time(int cpu) +static cputime64_t get_iowait_time(int cpu) { - u64 iowait, iowait_time = get_cpu_iowait_time_us(cpu, NULL); + u64 iowait_time = get_cpu_iowait_time_us(cpu, NULL); + cputime64_t iowait; if (iowait_time == -1ULL) /* !NO_HZ so we can rely on cpustat.iowait */ - iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT]; + iowait = kstat_cpu(cpu).cpustat.iowait; else - iowait = usecs_to_cputime64(iowait_time); + iowait = nsecs_to_jiffies64(1000 * iowait_time); return iowait; } @@ -53,30 +55,33 @@ static int show_stat(struct seq_file *p, void *v) { int i, j; unsigned long jif; - u64 user, nice, system, idle, iowait, irq, softirq, steal; - u64 guest, guest_nice; + cputime64_t user, nice, system, idle, iowait, irq, softirq, steal; + cputime64_t guest, guest_nice; u64 sum = 0; u64 sum_softirq = 0; unsigned int per_softirq_sums[NR_SOFTIRQS] = {0}; struct timespec boottime; user = nice = system = idle = iowait = - irq = softirq = steal = 0; - guest = guest_nice = 0; + irq = softirq = steal = cputime64_zero; + guest = guest_nice = cputime64_zero; getboottime(&boottime); jif = boottime.tv_sec; for_each_possible_cpu(i) { - user += kcpustat_cpu(i).cpustat[CPUTIME_USER]; - nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE]; - system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]; - idle += get_idle_time(i); - iowait += get_iowait_time(i); - irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ]; - softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]; - steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL]; - guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST]; - guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE]; + user = cputime64_add(user, kstat_cpu(i).cpustat.user); + nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice); + system = cputime64_add(system, kstat_cpu(i).cpustat.system); + idle = cputime64_add(idle, get_idle_time(i)); + iowait = cputime64_add(iowait, get_iowait_time(i)); + irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq); + softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq); + steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal); + guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest); + guest_nice = cputime64_add(guest_nice, + kstat_cpu(i).cpustat.guest_nice); + sum += kstat_cpu_irqs_sum(i); + sum += arch_irq_stat_cpu(i); for (j = 0; j < NR_SOFTIRQS; j++) { unsigned int softirq_stat = kstat_softirqs_cpu(j, i); @@ -101,16 +106,16 @@ static int show_stat(struct seq_file *p, void *v) (unsigned long long)cputime64_to_clock_t(guest_nice)); for_each_online_cpu(i) { /* Copy values here to work around gcc-2.95.3, gcc-2.96 */ - user = kcpustat_cpu(i).cpustat[CPUTIME_USER]; - nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE]; - system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]; + user = kstat_cpu(i).cpustat.user; + nice = kstat_cpu(i).cpustat.nice; + system = kstat_cpu(i).cpustat.system; idle = get_idle_time(i); iowait = get_iowait_time(i); - irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ]; - softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]; - steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL]; - guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST]; - guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE]; + irq = kstat_cpu(i).cpustat.irq; + softirq = kstat_cpu(i).cpustat.softirq; + steal = kstat_cpu(i).cpustat.steal; + guest = kstat_cpu(i).cpustat.guest; + guest_nice = kstat_cpu(i).cpustat.guest_nice; seq_printf(p, "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu " "%llu\n", diff --git a/trunk/fs/proc/uptime.c b/trunk/fs/proc/uptime.c index 9610ac772d7e..766b1d456050 100644 --- a/trunk/fs/proc/uptime.c +++ b/trunk/fs/proc/uptime.c @@ -11,20 +11,15 @@ static int uptime_proc_show(struct seq_file *m, void *v) { struct timespec uptime; struct timespec idle; - u64 idletime; - u64 nsec; - u32 rem; int i; + cputime_t idletime = cputime_zero; - idletime = 0; for_each_possible_cpu(i) - idletime += (__force u64) kcpustat_cpu(i).cpustat[CPUTIME_IDLE]; + idletime = cputime64_add(idletime, kstat_cpu(i).cpustat.idle); do_posix_clock_monotonic_gettime(&uptime); monotonic_to_bootbased(&uptime); - nsec = cputime64_to_jiffies64(idletime) * TICK_NSEC; - idle.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem); - idle.tv_nsec = rem; + cputime_to_timespec(idletime, &idle); seq_printf(m, "%lu.%02lu %lu.%02lu\n", (unsigned long) uptime.tv_sec, (uptime.tv_nsec / (NSEC_PER_SEC / 100)), diff --git a/trunk/fs/xfs/xfs_super.c b/trunk/fs/xfs/xfs_super.c index 8a899496fd5f..3eca58f51ae9 100644 --- a/trunk/fs/xfs/xfs_super.c +++ b/trunk/fs/xfs/xfs_super.c @@ -868,6 +868,27 @@ xfs_fs_dirty_inode( XFS_I(inode)->i_update_core = 1; } +STATIC int +xfs_log_inode( + struct xfs_inode *ip) +{ + struct xfs_mount *mp = ip->i_mount; + struct xfs_trans *tp; + int error; + + tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS); + error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0); + if (error) { + xfs_trans_cancel(tp, 0); + return error; + } + + xfs_ilock(ip, XFS_ILOCK_EXCL); + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + return xfs_trans_commit(tp, 0); +} + STATIC int xfs_fs_write_inode( struct inode *inode, @@ -881,8 +902,10 @@ xfs_fs_write_inode( if (XFS_FORCED_SHUTDOWN(mp)) return -XFS_ERROR(EIO); + if (!ip->i_update_core) + return 0; - if (wbc->sync_mode == WB_SYNC_ALL || wbc->for_kupdate) { + if (wbc->sync_mode == WB_SYNC_ALL) { /* * Make sure the inode has made it it into the log. Instead * of forcing it all the way to stable storage using a @@ -890,14 +913,11 @@ xfs_fs_write_inode( * ->sync_fs call do that for thus, which reduces the number * of synchronous log forces dramatically. */ - error = xfs_log_dirty_inode(ip, NULL, 0); + error = xfs_log_inode(ip); if (error) goto out; return 0; } else { - if (!ip->i_update_core) - return 0; - /* * We make this non-blocking if the inode is contended, return * EAGAIN to indicate to the caller that they did not succeed. diff --git a/trunk/fs/xfs/xfs_sync.c b/trunk/fs/xfs/xfs_sync.c index f0994aedcd15..be5c51d8f757 100644 --- a/trunk/fs/xfs/xfs_sync.c +++ b/trunk/fs/xfs/xfs_sync.c @@ -336,32 +336,6 @@ xfs_sync_fsdata( return error; } -int -xfs_log_dirty_inode( - struct xfs_inode *ip, - struct xfs_perag *pag, - int flags) -{ - struct xfs_mount *mp = ip->i_mount; - struct xfs_trans *tp; - int error; - - if (!ip->i_update_core) - return 0; - - tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS); - error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0); - if (error) { - xfs_trans_cancel(tp, 0); - return error; - } - - xfs_ilock(ip, XFS_ILOCK_EXCL); - xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); - xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); - return xfs_trans_commit(tp, 0); -} - /* * When remounting a filesystem read-only or freezing the filesystem, we have * two phases to execute. This first phase is syncing the data before we @@ -385,16 +359,6 @@ xfs_quiesce_data( { int error, error2 = 0; - /* - * Log all pending size and timestamp updates. The vfs writeback - * code is supposed to do this, but due to its overagressive - * livelock detection it will skip inodes where appending writes - * were written out in the first non-blocking sync phase if their - * completion took long enough that it happened after taking the - * timestamp for the cut-off in the blocking phase. - */ - xfs_inode_ag_iterator(mp, xfs_log_dirty_inode, 0); - xfs_qm_sync(mp, SYNC_TRYLOCK); xfs_qm_sync(mp, SYNC_WAIT); diff --git a/trunk/fs/xfs/xfs_sync.h b/trunk/fs/xfs/xfs_sync.h index fa965479d788..941202e7ac6e 100644 --- a/trunk/fs/xfs/xfs_sync.h +++ b/trunk/fs/xfs/xfs_sync.h @@ -34,8 +34,6 @@ void xfs_quiesce_attr(struct xfs_mount *mp); void xfs_flush_inodes(struct xfs_inode *ip); -int xfs_log_dirty_inode(struct xfs_inode *ip, struct xfs_perag *pag, int flags); - int xfs_reclaim_inodes(struct xfs_mount *mp, int mode); int xfs_reclaim_inodes_count(struct xfs_mount *mp); void xfs_reclaim_inodes_nr(struct xfs_mount *mp, int nr_to_scan); diff --git a/trunk/include/asm-generic/cputime.h b/trunk/include/asm-generic/cputime.h index 9a62937c56ca..62ce6823c0f2 100644 --- a/trunk/include/asm-generic/cputime.h +++ b/trunk/include/asm-generic/cputime.h @@ -4,66 +4,70 @@ #include #include -typedef unsigned long __nocast cputime_t; +typedef unsigned long cputime_t; +#define cputime_zero (0UL) #define cputime_one_jiffy jiffies_to_cputime(1) -#define cputime_to_jiffies(__ct) (__force unsigned long)(__ct) +#define cputime_max ((~0UL >> 1) - 1) +#define cputime_add(__a, __b) ((__a) + (__b)) +#define cputime_sub(__a, __b) ((__a) - (__b)) +#define cputime_div(__a, __n) ((__a) / (__n)) +#define cputime_halve(__a) ((__a) >> 1) +#define cputime_eq(__a, __b) ((__a) == (__b)) +#define cputime_gt(__a, __b) ((__a) > (__b)) +#define cputime_ge(__a, __b) ((__a) >= (__b)) +#define cputime_lt(__a, __b) ((__a) < (__b)) +#define cputime_le(__a, __b) ((__a) <= (__b)) +#define cputime_to_jiffies(__ct) (__ct) #define cputime_to_scaled(__ct) (__ct) -#define jiffies_to_cputime(__hz) (__force cputime_t)(__hz) +#define jiffies_to_cputime(__hz) (__hz) -typedef u64 __nocast cputime64_t; +typedef u64 cputime64_t; -#define cputime64_to_jiffies64(__ct) (__force u64)(__ct) -#define jiffies64_to_cputime64(__jif) (__force cputime64_t)(__jif) +#define cputime64_zero (0ULL) +#define cputime64_add(__a, __b) ((__a) + (__b)) +#define cputime64_sub(__a, __b) ((__a) - (__b)) +#define cputime64_to_jiffies64(__ct) (__ct) +#define jiffies64_to_cputime64(__jif) (__jif) +#define cputime_to_cputime64(__ct) ((u64) __ct) +#define cputime64_gt(__a, __b) ((__a) > (__b)) -#define nsecs_to_cputime64(__ct) \ - jiffies64_to_cputime64(nsecs_to_jiffies64(__ct)) +#define nsecs_to_cputime64(__ct) nsecs_to_jiffies64(__ct) /* * Convert cputime to microseconds and back. */ -#define cputime_to_usecs(__ct) \ - jiffies_to_usecs(cputime_to_jiffies(__ct)) -#define usecs_to_cputime(__usec) \ - jiffies_to_cputime(usecs_to_jiffies(__usec)) -#define usecs_to_cputime64(__usec) \ - jiffies64_to_cputime64(nsecs_to_jiffies64((__usec) * 1000)) +#define cputime_to_usecs(__ct) jiffies_to_usecs(__ct) +#define usecs_to_cputime(__msecs) usecs_to_jiffies(__msecs) /* * Convert cputime to seconds and back. */ -#define cputime_to_secs(jif) (cputime_to_jiffies(jif) / HZ) -#define secs_to_cputime(sec) jiffies_to_cputime((sec) * HZ) +#define cputime_to_secs(jif) ((jif) / HZ) +#define secs_to_cputime(sec) ((sec) * HZ) /* * Convert cputime to timespec and back. */ -#define timespec_to_cputime(__val) \ - jiffies_to_cputime(timespec_to_jiffies(__val)) -#define cputime_to_timespec(__ct,__val) \ - jiffies_to_timespec(cputime_to_jiffies(__ct),__val) +#define timespec_to_cputime(__val) timespec_to_jiffies(__val) +#define cputime_to_timespec(__ct,__val) jiffies_to_timespec(__ct,__val) /* * Convert cputime to timeval and back. */ -#define timeval_to_cputime(__val) \ - jiffies_to_cputime(timeval_to_jiffies(__val)) -#define cputime_to_timeval(__ct,__val) \ - jiffies_to_timeval(cputime_to_jiffies(__ct),__val) +#define timeval_to_cputime(__val) timeval_to_jiffies(__val) +#define cputime_to_timeval(__ct,__val) jiffies_to_timeval(__ct,__val) /* * Convert cputime to clock and back. */ -#define cputime_to_clock_t(__ct) \ - jiffies_to_clock_t(cputime_to_jiffies(__ct)) -#define clock_t_to_cputime(__x) \ - jiffies_to_cputime(clock_t_to_jiffies(__x)) +#define cputime_to_clock_t(__ct) jiffies_to_clock_t(__ct) +#define clock_t_to_cputime(__x) clock_t_to_jiffies(__x) /* * Convert cputime64 to clock. */ -#define cputime64_to_clock_t(__ct) \ - jiffies_64_to_clock_t(cputime64_to_jiffies64(__ct)) +#define cputime64_to_clock_t(__ct) jiffies_64_to_clock_t(__ct) #endif diff --git a/trunk/include/asm-generic/socket.h b/trunk/include/asm-generic/socket.h index 49c1704173e7..9a6115e7cf63 100644 --- a/trunk/include/asm-generic/socket.h +++ b/trunk/include/asm-generic/socket.h @@ -64,7 +64,4 @@ #define SO_DOMAIN 39 #define SO_RXQ_OVFL 40 - -#define SO_WIFI_STATUS 41 -#define SCM_WIFI_STATUS SO_WIFI_STATUS #endif /* __ASM_GENERIC_SOCKET_H */ diff --git a/trunk/include/linux/Kbuild b/trunk/include/linux/Kbuild index c94e71781b79..619b5657af77 100644 --- a/trunk/include/linux/Kbuild +++ b/trunk/include/linux/Kbuild @@ -185,7 +185,6 @@ header-y += if_pppol2tp.h header-y += if_pppox.h header-y += if_slip.h header-y += if_strip.h -header-y += if_team.h header-y += if_tr.h header-y += if_tun.h header-y += if_tunnel.h @@ -195,9 +194,7 @@ header-y += igmp.h header-y += in.h header-y += in6.h header-y += in_route.h -header-y += sock_diag.h header-y += inet_diag.h -header-y += unix_diag.h header-y += inotify.h header-y += input.h header-y += ioctl.h diff --git a/trunk/include/linux/atmdev.h b/trunk/include/linux/atmdev.h index f4ff882cb2da..49a83ca900ba 100644 --- a/trunk/include/linux/atmdev.h +++ b/trunk/include/linux/atmdev.h @@ -445,6 +445,16 @@ void vcc_insert_socket(struct sock *sk); void atm_dev_release_vccs(struct atm_dev *dev); +/* + * This is approximately the algorithm used by alloc_skb. + * + */ + +static inline int atm_guess_pdu2truesize(int size) +{ + return SKB_DATA_ALIGN(size) + sizeof(struct skb_shared_info); +} + static inline void atm_force_charge(struct atm_vcc *vcc,int truesize) { diff --git a/trunk/include/linux/bcma/bcma.h b/trunk/include/linux/bcma/bcma.h index f4b8346b1a33..4d4b59de9467 100644 --- a/trunk/include/linux/bcma/bcma.h +++ b/trunk/include/linux/bcma/bcma.h @@ -205,82 +205,61 @@ struct bcma_bus { struct ssb_sprom sprom; }; -static inline u32 bcma_read8(struct bcma_device *core, u16 offset) +extern inline u32 bcma_read8(struct bcma_device *core, u16 offset) { return core->bus->ops->read8(core, offset); } -static inline u32 bcma_read16(struct bcma_device *core, u16 offset) +extern inline u32 bcma_read16(struct bcma_device *core, u16 offset) { return core->bus->ops->read16(core, offset); } -static inline u32 bcma_read32(struct bcma_device *core, u16 offset) +extern inline u32 bcma_read32(struct bcma_device *core, u16 offset) { return core->bus->ops->read32(core, offset); } -static inline +extern inline void bcma_write8(struct bcma_device *core, u16 offset, u32 value) { core->bus->ops->write8(core, offset, value); } -static inline +extern inline void bcma_write16(struct bcma_device *core, u16 offset, u32 value) { core->bus->ops->write16(core, offset, value); } -static inline +extern inline void bcma_write32(struct bcma_device *core, u16 offset, u32 value) { core->bus->ops->write32(core, offset, value); } #ifdef CONFIG_BCMA_BLOCKIO -static inline void bcma_block_read(struct bcma_device *core, void *buffer, +extern inline void bcma_block_read(struct bcma_device *core, void *buffer, size_t count, u16 offset, u8 reg_width) { core->bus->ops->block_read(core, buffer, count, offset, reg_width); } -static inline void bcma_block_write(struct bcma_device *core, - const void *buffer, size_t count, - u16 offset, u8 reg_width) +extern inline void bcma_block_write(struct bcma_device *core, const void *buffer, + size_t count, u16 offset, u8 reg_width) { core->bus->ops->block_write(core, buffer, count, offset, reg_width); } #endif -static inline u32 bcma_aread32(struct bcma_device *core, u16 offset) +extern inline u32 bcma_aread32(struct bcma_device *core, u16 offset) { return core->bus->ops->aread32(core, offset); } -static inline +extern inline void bcma_awrite32(struct bcma_device *core, u16 offset, u32 value) { core->bus->ops->awrite32(core, offset, value); } -static inline void bcma_mask32(struct bcma_device *cc, u16 offset, u32 mask) -{ - bcma_write32(cc, offset, bcma_read32(cc, offset) & mask); -} -static inline void bcma_set32(struct bcma_device *cc, u16 offset, u32 set) -{ - bcma_write32(cc, offset, bcma_read32(cc, offset) | set); -} -static inline void bcma_maskset32(struct bcma_device *cc, - u16 offset, u32 mask, u32 set) -{ - bcma_write32(cc, offset, (bcma_read32(cc, offset) & mask) | set); -} -static inline void bcma_mask16(struct bcma_device *cc, u16 offset, u16 mask) -{ - bcma_write16(cc, offset, bcma_read16(cc, offset) & mask); -} -static inline void bcma_set16(struct bcma_device *cc, u16 offset, u16 set) -{ - bcma_write16(cc, offset, bcma_read16(cc, offset) | set); -} -static inline void bcma_maskset16(struct bcma_device *cc, - u16 offset, u16 mask, u16 set) -{ - bcma_write16(cc, offset, (bcma_read16(cc, offset) & mask) | set); -} +#define bcma_mask32(cc, offset, mask) \ + bcma_write32(cc, offset, bcma_read32(cc, offset) & (mask)) +#define bcma_set32(cc, offset, set) \ + bcma_write32(cc, offset, bcma_read32(cc, offset) | (set)) +#define bcma_maskset32(cc, offset, mask, set) \ + bcma_write32(cc, offset, (bcma_read32(cc, offset) & (mask)) | (set)) extern bool bcma_core_is_enabled(struct bcma_device *core); extern void bcma_core_disable(struct bcma_device *core, u32 flags); diff --git a/trunk/include/linux/bcma/bcma_driver_chipcommon.h b/trunk/include/linux/bcma/bcma_driver_chipcommon.h index a33086a7530b..1526d965ed06 100644 --- a/trunk/include/linux/bcma/bcma_driver_chipcommon.h +++ b/trunk/include/linux/bcma/bcma_driver_chipcommon.h @@ -203,7 +203,6 @@ #define BCMA_CC_PMU_CTL 0x0600 /* PMU control */ #define BCMA_CC_PMU_CTL_ILP_DIV 0xFFFF0000 /* ILP div mask */ #define BCMA_CC_PMU_CTL_ILP_DIV_SHIFT 16 -#define BCMA_CC_PMU_CTL_PLL_UPD 0x00000400 #define BCMA_CC_PMU_CTL_NOILPONW 0x00000200 /* No ILP on wait */ #define BCMA_CC_PMU_CTL_HTREQEN 0x00000100 /* HT req enable */ #define BCMA_CC_PMU_CTL_ALPREQEN 0x00000080 /* ALP req enable */ diff --git a/trunk/include/linux/bitops.h b/trunk/include/linux/bitops.h index 3c1063acb2ab..a3ef66a2a083 100644 --- a/trunk/include/linux/bitops.h +++ b/trunk/include/linux/bitops.h @@ -22,14 +22,8 @@ extern unsigned long __sw_hweight64(__u64 w); #include #define for_each_set_bit(bit, addr, size) \ - for ((bit) = find_first_bit((addr), (size)); \ - (bit) < (size); \ - (bit) = find_next_bit((addr), (size), (bit) + 1)) - -/* same as for_each_set_bit() but use bit as value to start with */ -#define for_each_set_bit_cont(bit, addr, size) \ - for ((bit) = find_next_bit((addr), (size), (bit)); \ - (bit) < (size); \ + for ((bit) = find_first_bit((addr), (size)); \ + (bit) < (size); \ (bit) = find_next_bit((addr), (size), (bit) + 1)) static __inline__ int get_bitmask_order(unsigned int count) diff --git a/trunk/include/linux/bootmem.h b/trunk/include/linux/bootmem.h index 66d3e954eb6c..ab344a521105 100644 --- a/trunk/include/linux/bootmem.h +++ b/trunk/include/linux/bootmem.h @@ -44,7 +44,7 @@ extern unsigned long init_bootmem_node(pg_data_t *pgdat, unsigned long endpfn); extern unsigned long init_bootmem(unsigned long addr, unsigned long memend); -extern unsigned long free_low_memory_core_early(int nodeid); +unsigned long free_all_memory_core_early(int nodeid); extern unsigned long free_all_bootmem_node(pg_data_t *pgdat); extern unsigned long free_all_bootmem(void); diff --git a/trunk/include/linux/can/platform/cc770.h b/trunk/include/linux/can/platform/cc770.h deleted file mode 100644 index 7702641f87ee..000000000000 --- a/trunk/include/linux/can/platform/cc770.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef _CAN_PLATFORM_CC770_H_ -#define _CAN_PLATFORM_CC770_H_ - -/* CPU Interface Register (0x02) */ -#define CPUIF_CEN 0x01 /* Clock Out Enable */ -#define CPUIF_MUX 0x04 /* Multiplex */ -#define CPUIF_SLP 0x08 /* Sleep */ -#define CPUIF_PWD 0x10 /* Power Down Mode */ -#define CPUIF_DMC 0x20 /* Divide Memory Clock */ -#define CPUIF_DSC 0x40 /* Divide System Clock */ -#define CPUIF_RST 0x80 /* Hardware Reset Status */ - -/* Clock Out Register (0x1f) */ -#define CLKOUT_CD_MASK 0x0f /* Clock Divider mask */ -#define CLKOUT_SL_MASK 0x30 /* Slew Rate mask */ -#define CLKOUT_SL_SHIFT 4 - -/* Bus Configuration Register (0x2f) */ -#define BUSCFG_DR0 0x01 /* Disconnect RX0 Input / Select RX input */ -#define BUSCFG_DR1 0x02 /* Disconnect RX1 Input / Silent mode */ -#define BUSCFG_DT1 0x08 /* Disconnect TX1 Output */ -#define BUSCFG_POL 0x20 /* Polarity dominant or recessive */ -#define BUSCFG_CBY 0x40 /* Input Comparator Bypass */ - -struct cc770_platform_data { - u32 osc_freq; /* CAN bus oscillator frequency in Hz */ - - u8 cir; /* CPU Interface Register */ - u8 cor; /* Clock Out Register */ - u8 bcr; /* Bus Configuration Register */ -}; - -#endif /* !_CAN_PLATFORM_CC770_H_ */ diff --git a/trunk/include/linux/cgroup_subsys.h b/trunk/include/linux/cgroup_subsys.h index 0bd390ce98b2..ac663c18776c 100644 --- a/trunk/include/linux/cgroup_subsys.h +++ b/trunk/include/linux/cgroup_subsys.h @@ -59,16 +59,8 @@ SUBSYS(net_cls) SUBSYS(blkio) #endif -/* */ - #ifdef CONFIG_CGROUP_PERF SUBSYS(perf) #endif /* */ - -#ifdef CONFIG_NETPRIO_CGROUP -SUBSYS(net_prio) -#endif - -/* */ diff --git a/trunk/include/linux/cpu.h b/trunk/include/linux/cpu.h index 305c263021e7..6cb60fd2ea84 100644 --- a/trunk/include/linux/cpu.h +++ b/trunk/include/linux/cpu.h @@ -27,7 +27,6 @@ struct cpu { extern int register_cpu(struct cpu *cpu, int num); extern struct sys_device *get_cpu_sysdev(unsigned cpu); -extern bool cpu_is_hotpluggable(unsigned cpu); extern int cpu_add_sysdev_attr(struct sysdev_attribute *attr); extern void cpu_remove_sysdev_attr(struct sysdev_attribute *attr); diff --git a/trunk/include/linux/cpuidle.h b/trunk/include/linux/cpuidle.h index 23f81de51829..7408af843b8a 100644 --- a/trunk/include/linux/cpuidle.h +++ b/trunk/include/linux/cpuidle.h @@ -130,6 +130,7 @@ struct cpuidle_driver { #ifdef CONFIG_CPU_IDLE extern void disable_cpuidle(void); extern int cpuidle_idle_call(void); + extern int cpuidle_register_driver(struct cpuidle_driver *drv); struct cpuidle_driver *cpuidle_get_driver(void); extern void cpuidle_unregister_driver(struct cpuidle_driver *drv); @@ -144,6 +145,7 @@ extern void cpuidle_disable_device(struct cpuidle_device *dev); #else static inline void disable_cpuidle(void) { } static inline int cpuidle_idle_call(void) { return -ENODEV; } + static inline int cpuidle_register_driver(struct cpuidle_driver *drv) {return -ENODEV; } static inline struct cpuidle_driver *cpuidle_get_driver(void) {return NULL; } diff --git a/trunk/include/linux/debugobjects.h b/trunk/include/linux/debugobjects.h index 0e5f5785d9f2..65970b811e22 100644 --- a/trunk/include/linux/debugobjects.h +++ b/trunk/include/linux/debugobjects.h @@ -46,8 +46,6 @@ struct debug_obj { * fails * @fixup_free: fixup function, which is called when the free check * fails - * @fixup_assert_init: fixup function, which is called when the assert_init - * check fails */ struct debug_obj_descr { const char *name; @@ -56,7 +54,6 @@ struct debug_obj_descr { int (*fixup_activate) (void *addr, enum debug_obj_state state); int (*fixup_destroy) (void *addr, enum debug_obj_state state); int (*fixup_free) (void *addr, enum debug_obj_state state); - int (*fixup_assert_init)(void *addr, enum debug_obj_state state); }; #ifdef CONFIG_DEBUG_OBJECTS @@ -67,7 +64,6 @@ extern void debug_object_activate (void *addr, struct debug_obj_descr *descr); extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr); extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr); extern void debug_object_free (void *addr, struct debug_obj_descr *descr); -extern void debug_object_assert_init(void *addr, struct debug_obj_descr *descr); /* * Active state: @@ -93,8 +89,6 @@ static inline void debug_object_destroy (void *addr, struct debug_obj_descr *descr) { } static inline void debug_object_free (void *addr, struct debug_obj_descr *descr) { } -static inline void -debug_object_assert_init(void *addr, struct debug_obj_descr *descr) { } static inline void debug_objects_early_init(void) { } static inline void debug_objects_mem_init(void) { } diff --git a/trunk/include/linux/dynamic_queue_limits.h b/trunk/include/linux/dynamic_queue_limits.h deleted file mode 100644 index 5621547d631b..000000000000 --- a/trunk/include/linux/dynamic_queue_limits.h +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Dynamic queue limits (dql) - Definitions - * - * Copyright (c) 2011, Tom Herbert - * - * This header file contains the definitions for dynamic queue limits (dql). - * dql would be used in conjunction with a producer/consumer type queue - * (possibly a HW queue). Such a queue would have these general properties: - * - * 1) Objects are queued up to some limit specified as number of objects. - * 2) Periodically a completion process executes which retires consumed - * objects. - * 3) Starvation occurs when limit has been reached, all queued data has - * actually been consumed, but completion processing has not yet run - * so queuing new data is blocked. - * 4) Minimizing the amount of queued data is desirable. - * - * The goal of dql is to calculate the limit as the minimum number of objects - * needed to prevent starvation. - * - * The primary functions of dql are: - * dql_queued - called when objects are enqueued to record number of objects - * dql_avail - returns how many objects are available to be queued based - * on the object limit and how many objects are already enqueued - * dql_completed - called at completion time to indicate how many objects - * were retired from the queue - * - * The dql implementation does not implement any locking for the dql data - * structures, the higher layer should provide this. dql_queued should - * be serialized to prevent concurrent execution of the function; this - * is also true for dql_completed. However, dql_queued and dlq_completed can - * be executed concurrently (i.e. they can be protected by different locks). - */ - -#ifndef _LINUX_DQL_H -#define _LINUX_DQL_H - -#ifdef __KERNEL__ - -struct dql { - /* Fields accessed in enqueue path (dql_queued) */ - unsigned int num_queued; /* Total ever queued */ - unsigned int adj_limit; /* limit + num_completed */ - unsigned int last_obj_cnt; /* Count at last queuing */ - - /* Fields accessed only by completion path (dql_completed) */ - - unsigned int limit ____cacheline_aligned_in_smp; /* Current limit */ - unsigned int num_completed; /* Total ever completed */ - - unsigned int prev_ovlimit; /* Previous over limit */ - unsigned int prev_num_queued; /* Previous queue total */ - unsigned int prev_last_obj_cnt; /* Previous queuing cnt */ - - unsigned int lowest_slack; /* Lowest slack found */ - unsigned long slack_start_time; /* Time slacks seen */ - - /* Configuration */ - unsigned int max_limit; /* Max limit */ - unsigned int min_limit; /* Minimum limit */ - unsigned int slack_hold_time; /* Time to measure slack */ -}; - -/* Set some static maximums */ -#define DQL_MAX_OBJECT (UINT_MAX / 16) -#define DQL_MAX_LIMIT ((UINT_MAX / 2) - DQL_MAX_OBJECT) - -/* - * Record number of objects queued. Assumes that caller has already checked - * availability in the queue with dql_avail. - */ -static inline void dql_queued(struct dql *dql, unsigned int count) -{ - BUG_ON(count > DQL_MAX_OBJECT); - - dql->num_queued += count; - dql->last_obj_cnt = count; -} - -/* Returns how many objects can be queued, < 0 indicates over limit. */ -static inline int dql_avail(const struct dql *dql) -{ - return dql->adj_limit - dql->num_queued; -} - -/* Record number of completed objects and recalculate the limit. */ -void dql_completed(struct dql *dql, unsigned int count); - -/* Reset dql state */ -void dql_reset(struct dql *dql); - -/* Initialize dql state */ -int dql_init(struct dql *dql, unsigned hold_time); - -#endif /* _KERNEL_ */ - -#endif /* _LINUX_DQL_H */ diff --git a/trunk/include/linux/eeprom_93cx6.h b/trunk/include/linux/eeprom_93cx6.h index e50f98b0297a..c4627cbdb8e0 100644 --- a/trunk/include/linux/eeprom_93cx6.h +++ b/trunk/include/linux/eeprom_93cx6.h @@ -33,7 +33,6 @@ #define PCI_EEPROM_WIDTH_93C86 8 #define PCI_EEPROM_WIDTH_OPCODE 3 #define PCI_EEPROM_WRITE_OPCODE 0x05 -#define PCI_EEPROM_ERASE_OPCODE 0x07 #define PCI_EEPROM_READ_OPCODE 0x06 #define PCI_EEPROM_EWDS_OPCODE 0x10 #define PCI_EEPROM_EWEN_OPCODE 0x13 @@ -47,7 +46,6 @@ * @register_write(struct eeprom_93cx6 *eeprom): handler to * write to the eeprom register by using all reg_* fields. * @width: eeprom width, should be one of the PCI_EEPROM_WIDTH_* defines - * @drive_data: Set if we're driving the data line. * @reg_data_in: register field to indicate data input * @reg_data_out: register field to indicate data output * @reg_data_clock: register field to set the data clock @@ -64,7 +62,6 @@ struct eeprom_93cx6 { int width; - char drive_data; char reg_data_in; char reg_data_out; char reg_data_clock; @@ -75,8 +72,3 @@ extern void eeprom_93cx6_read(struct eeprom_93cx6 *eeprom, const u8 word, u16 *data); extern void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom, const u8 word, __le16 *data, const u16 words); - -extern void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable); - -extern void eeprom_93cx6_write(struct eeprom_93cx6 *eeprom, - u8 addr, u16 data); diff --git a/trunk/include/linux/errqueue.h b/trunk/include/linux/errqueue.h index fd0628be45ce..034072cea853 100644 --- a/trunk/include/linux/errqueue.h +++ b/trunk/include/linux/errqueue.h @@ -17,15 +17,14 @@ struct sock_extended_err { #define SO_EE_ORIGIN_LOCAL 1 #define SO_EE_ORIGIN_ICMP 2 #define SO_EE_ORIGIN_ICMP6 3 -#define SO_EE_ORIGIN_TXSTATUS 4 -#define SO_EE_ORIGIN_TIMESTAMPING SO_EE_ORIGIN_TXSTATUS +#define SO_EE_ORIGIN_TIMESTAMPING 4 #define SO_EE_OFFENDER(ee) ((struct sockaddr*)((ee)+1)) #ifdef __KERNEL__ #include -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) #include #endif @@ -34,7 +33,7 @@ struct sock_extended_err { struct sock_exterr_skb { union { struct inet_skb_parm h4; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) struct inet6_skb_parm h6; #endif } header; diff --git a/trunk/include/linux/ethtool.h b/trunk/include/linux/ethtool.h index da5b2de99ae4..de33de1e2052 100644 --- a/trunk/include/linux/ethtool.h +++ b/trunk/include/linux/ethtool.h @@ -489,10 +489,7 @@ struct ethtool_rx_flow_spec { * on return. * * For %ETHTOOL_GRXCLSRLCNT, @rule_cnt is set to the number of defined - * rules on return. If @data is non-zero on return then it is the - * size of the rule table, plus the flag %RX_CLS_LOC_SPECIAL if the - * driver supports any special location values. If that flag is not - * set in @data then special location values should not be used. + * rules on return. * * For %ETHTOOL_GRXCLSRULE, @fs.@location specifies the location of an * existing rule on entry and @fs contains the rule on return. @@ -504,23 +501,10 @@ struct ethtool_rx_flow_spec { * must use the second parameter to get_rxnfc() instead of @rule_locs. * * For %ETHTOOL_SRXCLSRLINS, @fs specifies the rule to add or update. - * @fs.@location either specifies the location to use or is a special - * location value with %RX_CLS_LOC_SPECIAL flag set. On return, - * @fs.@location is the actual rule location. + * @fs.@location specifies the location to use and must not be ignored. * * For %ETHTOOL_SRXCLSRLDEL, @fs.@location specifies the location of an * existing rule on entry. - * - * A driver supporting the special location values for - * %ETHTOOL_SRXCLSRLINS may add the rule at any suitable unused - * location, and may remove a rule at a later location (lower - * priority) that matches exactly the same set of flows. The special - * values are: %RX_CLS_LOC_ANY, selecting any location; - * %RX_CLS_LOC_FIRST, selecting the first suitable location (maximum - * priority); and %RX_CLS_LOC_LAST, selecting the last suitable - * location (minimum priority). Additional special values may be - * defined in future and drivers must return -%EINVAL for any - * unrecognised value. */ struct ethtool_rxnfc { __u32 cmd; @@ -559,15 +543,9 @@ struct compat_ethtool_rxnfc { /** * struct ethtool_rxfh_indir - command to get or set RX flow hash indirection * @cmd: Specific command number - %ETHTOOL_GRXFHINDIR or %ETHTOOL_SRXFHINDIR - * @size: On entry, the array size of the user buffer, which may be zero. - * On return from %ETHTOOL_GRXFHINDIR, the array size of the hardware - * indirection table. + * @size: On entry, the array size of the user buffer. On return from + * %ETHTOOL_GRXFHINDIR, the array size of the hardware indirection table. * @ring_index: RX ring/queue index for each hash value - * - * For %ETHTOOL_GRXFHINDIR, a @size of zero means that only the size - * should be returned. For %ETHTOOL_SRXFHINDIR, a @size of zero means - * the table should be reset to default values. This last feature - * is not supported by the original implementations. */ struct ethtool_rxfh_indir { __u32 cmd; @@ -746,6 +724,9 @@ enum ethtool_sfeatures_retval_bits { #include +/* needed by dev_disable_lro() */ +extern int __ethtool_set_flags(struct net_device *dev, u32 flags); + extern int __ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd); @@ -769,18 +750,19 @@ struct net_device; /* Some generic methods drivers may use in their ethtool_ops */ u32 ethtool_op_get_link(struct net_device *dev); - -/** - * ethtool_rxfh_indir_default - get default value for RX flow hash indirection - * @index: Index in RX flow hash indirection table - * @n_rx_rings: Number of RX rings to use - * - * This function provides the default policy for RX flow hash indirection. - */ -static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) -{ - return index % n_rx_rings; -} +u32 ethtool_op_get_tx_csum(struct net_device *dev); +int ethtool_op_set_tx_csum(struct net_device *dev, u32 data); +int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data); +int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data); +u32 ethtool_op_get_sg(struct net_device *dev); +int ethtool_op_set_sg(struct net_device *dev, u32 data); +u32 ethtool_op_get_tso(struct net_device *dev); +int ethtool_op_set_tso(struct net_device *dev, u32 data); +u32 ethtool_op_get_ufo(struct net_device *dev); +int ethtool_op_set_ufo(struct net_device *dev, u32 data); +u32 ethtool_op_get_flags(struct net_device *dev); +int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported); +bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported); /** * struct ethtool_ops - optional netdev operations @@ -825,6 +807,22 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) * @get_pauseparam: Report pause parameters * @set_pauseparam: Set pause parameters. Returns a negative error code * or zero. + * @get_rx_csum: Deprecated in favour of the netdev feature %NETIF_F_RXCSUM. + * Report whether receive checksums are turned on or off. + * @set_rx_csum: Deprecated in favour of generic netdev features. Turn + * receive checksum on or off. Returns a negative error code or zero. + * @get_tx_csum: Deprecated as redundant. Report whether transmit checksums + * are turned on or off. + * @set_tx_csum: Deprecated in favour of generic netdev features. Turn + * transmit checksums on or off. Returns a negative error code or zero. + * @get_sg: Deprecated as redundant. Report whether scatter-gather is + * enabled. + * @set_sg: Deprecated in favour of generic netdev features. Turn + * scatter-gather on or off. Returns a negative error code or zero. + * @get_tso: Deprecated as redundant. Report whether TCP segmentation + * offload is enabled. + * @set_tso: Deprecated in favour of generic netdev features. Turn TCP + * segmentation offload on or off. Returns a negative error code or zero. * @self_test: Run specified self-tests * @get_strings: Return a set of strings that describe the requested objects * @set_phys_id: Identify the physical devices, e.g. by flashing an LED @@ -846,6 +844,15 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) * negative error code or zero. * @complete: Function to be called after any other operation except * @begin. Will be called even if the other operation failed. + * @get_ufo: Deprecated as redundant. Report whether UDP fragmentation + * offload is enabled. + * @set_ufo: Deprecated in favour of generic netdev features. Turn UDP + * fragmentation offload on or off. Returns a negative error code or zero. + * @get_flags: Deprecated as redundant. Report features included in + * &enum ethtool_flags that are enabled. + * @set_flags: Deprecated in favour of generic netdev features. Turn + * features included in &enum ethtool_flags on or off. Returns a + * negative error code or zero. * @get_priv_flags: Report driver-specific feature flags. * @set_priv_flags: Set driver-specific feature flags. Returns a negative * error code or zero. @@ -859,13 +866,11 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) * @reset: Reset (part of) the device, as specified by a bitmask of * flags from &enum ethtool_reset_flags. Returns a negative * error code or zero. - * @get_rxfh_indir_size: Get the size of the RX flow hash indirection table. - * Returns zero if not supported for this specific device. + * @set_rx_ntuple: Set an RX n-tuple rule. Returns a negative error code + * or zero. * @get_rxfh_indir: Get the contents of the RX flow hash indirection table. - * Will not be called if @get_rxfh_indir_size returns zero. * Returns a negative error code or zero. * @set_rxfh_indir: Set the contents of the RX flow hash indirection table. - * Will not be called if @get_rxfh_indir_size returns zero. * Returns a negative error code or zero. * @get_channels: Get number of channels. * @set_channels: Set number of channels. Returns a negative error code or @@ -912,6 +917,14 @@ struct ethtool_ops { struct ethtool_pauseparam*); int (*set_pauseparam)(struct net_device *, struct ethtool_pauseparam*); + u32 (*get_rx_csum)(struct net_device *); + int (*set_rx_csum)(struct net_device *, u32); + u32 (*get_tx_csum)(struct net_device *); + int (*set_tx_csum)(struct net_device *, u32); + u32 (*get_sg)(struct net_device *); + int (*set_sg)(struct net_device *, u32); + u32 (*get_tso)(struct net_device *); + int (*set_tso)(struct net_device *, u32); void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); void (*get_strings)(struct net_device *, u32 stringset, u8 *); int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state); @@ -919,6 +932,10 @@ struct ethtool_ops { struct ethtool_stats *, u64 *); int (*begin)(struct net_device *); void (*complete)(struct net_device *); + u32 (*get_ufo)(struct net_device *); + int (*set_ufo)(struct net_device *, u32); + u32 (*get_flags)(struct net_device *); + int (*set_flags)(struct net_device *, u32); u32 (*get_priv_flags)(struct net_device *); int (*set_priv_flags)(struct net_device *, u32); int (*get_sset_count)(struct net_device *, int); @@ -927,9 +944,12 @@ struct ethtool_ops { int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *); int (*flash_device)(struct net_device *, struct ethtool_flash *); int (*reset)(struct net_device *, u32 *); - u32 (*get_rxfh_indir_size)(struct net_device *); - int (*get_rxfh_indir)(struct net_device *, u32 *); - int (*set_rxfh_indir)(struct net_device *, const u32 *); + int (*set_rx_ntuple)(struct net_device *, + struct ethtool_rx_ntuple *); + int (*get_rxfh_indir)(struct net_device *, + struct ethtool_rxfh_indir *); + int (*set_rxfh_indir)(struct net_device *, + const struct ethtool_rxfh_indir *); void (*get_channels)(struct net_device *, struct ethtool_channels *); int (*set_channels)(struct net_device *, struct ethtool_channels *); int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); @@ -1153,12 +1173,6 @@ struct ethtool_ops { #define RX_CLS_FLOW_DISC 0xffffffffffffffffULL -/* Special RX classification rule insert location values */ -#define RX_CLS_LOC_SPECIAL 0x80000000 /* flag */ -#define RX_CLS_LOC_ANY 0xffffffff -#define RX_CLS_LOC_FIRST 0xfffffffe -#define RX_CLS_LOC_LAST 0xfffffffd - /* Reset flags */ /* The reset() operation must clear the flags for the components which * were actually reset. On successful return, the flags indicate the diff --git a/trunk/include/linux/genetlink.h b/trunk/include/linux/genetlink.h index 73c28dea10ae..61549b26ad6f 100644 --- a/trunk/include/linux/genetlink.h +++ b/trunk/include/linux/genetlink.h @@ -85,30 +85,6 @@ enum { /* All generic netlink requests are serialized by a global lock. */ extern void genl_lock(void); extern void genl_unlock(void); -#ifdef CONFIG_PROVE_LOCKING -extern int lockdep_genl_is_held(void); -#endif - -/** - * rcu_dereference_genl - rcu_dereference with debug checking - * @p: The pointer to read, prior to dereferencing - * - * Do an rcu_dereference(p), but check caller either holds rcu_read_lock() - * or genl mutex. Note : Please prefer genl_dereference() or rcu_dereference() - */ -#define rcu_dereference_genl(p) \ - rcu_dereference_check(p, lockdep_genl_is_held()) - -/** - * genl_dereference - fetch RCU pointer when updates are prevented by genl mutex - * @p: The pointer to read, prior to dereferencing - * - * Return the value of the specified RCU-protected pointer, but omit - * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because - * caller holds genl mutex. - */ -#define genl_dereference(p) \ - rcu_dereference_protected(p, lockdep_genl_is_held()) #endif /* __KERNEL__ */ diff --git a/trunk/include/linux/hardirq.h b/trunk/include/linux/hardirq.h index bb7f30971858..f743883f769e 100644 --- a/trunk/include/linux/hardirq.h +++ b/trunk/include/linux/hardirq.h @@ -139,7 +139,20 @@ static inline void account_system_vtime(struct task_struct *tsk) extern void account_system_vtime(struct task_struct *tsk); #endif +#if defined(CONFIG_NO_HZ) #if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU) +extern void rcu_enter_nohz(void); +extern void rcu_exit_nohz(void); + +static inline void rcu_irq_enter(void) +{ + rcu_exit_nohz(); +} + +static inline void rcu_irq_exit(void) +{ + rcu_enter_nohz(); +} static inline void rcu_nmi_enter(void) { @@ -150,9 +163,17 @@ static inline void rcu_nmi_exit(void) } #else +extern void rcu_irq_enter(void); +extern void rcu_irq_exit(void); extern void rcu_nmi_enter(void); extern void rcu_nmi_exit(void); #endif +#else +# define rcu_irq_enter() do { } while (0) +# define rcu_irq_exit() do { } while (0) +# define rcu_nmi_enter() do { } while (0) +# define rcu_nmi_exit() do { } while (0) +#endif /* #if defined(CONFIG_NO_HZ) */ /* * It is safe to do non-atomic ops on ->hardirq_context, diff --git a/trunk/include/linux/ieee80211.h b/trunk/include/linux/ieee80211.h index 210e2c325534..48363c3c40f8 100644 --- a/trunk/include/linux/ieee80211.h +++ b/trunk/include/linux/ieee80211.h @@ -128,7 +128,6 @@ #define IEEE80211_QOS_CTL_ACK_POLICY_NOACK 0x0020 #define IEEE80211_QOS_CTL_ACK_POLICY_NO_EXPL 0x0040 #define IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK 0x0060 -#define IEEE80211_QOS_CTL_ACK_POLICY_MASK 0x0060 /* A-MSDU 802.11n */ #define IEEE80211_QOS_CTL_A_MSDU_PRESENT 0x0080 /* Mesh Control 802.11s */ @@ -544,15 +543,6 @@ static inline int ieee80211_is_qos_nullfunc(__le16 fc) cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC); } -/** - * ieee80211_is_first_frag - check if IEEE80211_SCTL_FRAG is not set - * @seq_ctrl: frame sequence control bytes in little-endian byteorder - */ -static inline int ieee80211_is_first_frag(__le16 seq_ctrl) -{ - return (seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0; -} - struct ieee80211s_hdr { u8 flags; u8 ttl; @@ -780,9 +770,6 @@ struct ieee80211_mgmt { } u; } __attribute__ ((packed)); -/* Supported Rates value encodings in 802.11n-2009 7.3.2.2 */ -#define BSS_MEMBERSHIP_SELECTOR_HT_PHY 127 - /* mgmt header + 1 byte category code */ #define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u) @@ -1565,8 +1552,6 @@ enum ieee80211_sa_query_action { #define WLAN_CIPHER_SUITE_WEP104 0x000FAC05 #define WLAN_CIPHER_SUITE_AES_CMAC 0x000FAC06 -#define WLAN_CIPHER_SUITE_SMS4 0x00147201 - /* AKM suite selectors */ #define WLAN_AKM_SUITE_8021X 0x000FAC01 #define WLAN_AKM_SUITE_PSK 0x000FAC02 @@ -1703,23 +1688,6 @@ static inline bool ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr) return false; } -/** - * ieee80211_is_public_action - check if frame is a public action frame - * @hdr: the frame - * @len: length of the frame - */ -static inline bool ieee80211_is_public_action(struct ieee80211_hdr *hdr, - size_t len) -{ - struct ieee80211_mgmt *mgmt = (void *)hdr; - - if (len < IEEE80211_MIN_ACTION_SIZE) - return false; - if (!ieee80211_is_action(hdr->frame_control)) - return false; - return mgmt->u.action.category == WLAN_CATEGORY_PUBLIC; -} - /** * ieee80211_fhss_chan_to_freq - get channel frequency * @channel: the FHSS channel diff --git a/trunk/include/linux/if.h b/trunk/include/linux/if.h index 06b6ef60c821..db20bd4fd16b 100644 --- a/trunk/include/linux/if.h +++ b/trunk/include/linux/if.h @@ -79,7 +79,6 @@ #define IFF_TX_SKB_SHARING 0x10000 /* The interface supports sharing * skbs on transmit */ #define IFF_UNICAST_FLT 0x20000 /* Supports unicast filtering */ -#define IFF_TEAM_PORT 0x40000 /* device used as team port */ #define IF_GET_IFACE 0x0001 /* for querying only */ #define IF_GET_PROTO 0x0002 diff --git a/trunk/include/linux/if_ether.h b/trunk/include/linux/if_ether.h index 56d907a2c804..e473003e4bda 100644 --- a/trunk/include/linux/if_ether.h +++ b/trunk/include/linux/if_ether.h @@ -79,7 +79,6 @@ #define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */ #define ETH_P_AOE 0x88A2 /* ATA over Ethernet */ #define ETH_P_8021AD 0x88A8 /* 802.1ad Service VLAN */ -#define ETH_P_802_EX1 0x88B5 /* 802.1 Local Experimental 1. */ #define ETH_P_TIPC 0x88CA /* TIPC */ #define ETH_P_8021AH 0x88E7 /* 802.1ah Backbone Service Tag */ #define ETH_P_1588 0x88F7 /* IEEE 1588 Timesync */ diff --git a/trunk/include/linux/if_team.h b/trunk/include/linux/if_team.h deleted file mode 100644 index 828181fbad5d..000000000000 --- a/trunk/include/linux/if_team.h +++ /dev/null @@ -1,242 +0,0 @@ -/* - * include/linux/if_team.h - Network team device driver header - * Copyright (c) 2011 Jiri Pirko - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#ifndef _LINUX_IF_TEAM_H_ -#define _LINUX_IF_TEAM_H_ - -#ifdef __KERNEL__ - -struct team_pcpu_stats { - u64 rx_packets; - u64 rx_bytes; - u64 rx_multicast; - u64 tx_packets; - u64 tx_bytes; - struct u64_stats_sync syncp; - u32 rx_dropped; - u32 tx_dropped; -}; - -struct team; - -struct team_port { - struct net_device *dev; - struct hlist_node hlist; /* node in hash list */ - struct list_head list; /* node in ordinary list */ - struct team *team; - int index; - - /* - * A place for storing original values of the device before it - * become a port. - */ - struct { - unsigned char dev_addr[MAX_ADDR_LEN]; - unsigned int mtu; - } orig; - - bool linkup; - u32 speed; - u8 duplex; - - struct rcu_head rcu; -}; - -struct team_mode_ops { - int (*init)(struct team *team); - void (*exit)(struct team *team); - rx_handler_result_t (*receive)(struct team *team, - struct team_port *port, - struct sk_buff *skb); - bool (*transmit)(struct team *team, struct sk_buff *skb); - int (*port_enter)(struct team *team, struct team_port *port); - void (*port_leave)(struct team *team, struct team_port *port); - void (*port_change_mac)(struct team *team, struct team_port *port); -}; - -enum team_option_type { - TEAM_OPTION_TYPE_U32, - TEAM_OPTION_TYPE_STRING, -}; - -struct team_option { - struct list_head list; - const char *name; - enum team_option_type type; - int (*getter)(struct team *team, void *arg); - int (*setter)(struct team *team, void *arg); -}; - -struct team_mode { - struct list_head list; - const char *kind; - struct module *owner; - size_t priv_size; - const struct team_mode_ops *ops; -}; - -#define TEAM_PORT_HASHBITS 4 -#define TEAM_PORT_HASHENTRIES (1 << TEAM_PORT_HASHBITS) - -#define TEAM_MODE_PRIV_LONGS 4 -#define TEAM_MODE_PRIV_SIZE (sizeof(long) * TEAM_MODE_PRIV_LONGS) - -struct team { - struct net_device *dev; /* associated netdevice */ - struct team_pcpu_stats __percpu *pcpu_stats; - - struct mutex lock; /* used for overall locking, e.g. port lists write */ - - /* - * port lists with port count - */ - int port_count; - struct hlist_head port_hlist[TEAM_PORT_HASHENTRIES]; - struct list_head port_list; - - struct list_head option_list; - - const struct team_mode *mode; - struct team_mode_ops ops; - long mode_priv[TEAM_MODE_PRIV_LONGS]; -}; - -static inline struct hlist_head *team_port_index_hash(struct team *team, - int port_index) -{ - return &team->port_hlist[port_index & (TEAM_PORT_HASHENTRIES - 1)]; -} - -static inline struct team_port *team_get_port_by_index(struct team *team, - int port_index) -{ - struct hlist_node *p; - struct team_port *port; - struct hlist_head *head = team_port_index_hash(team, port_index); - - hlist_for_each_entry(port, p, head, hlist) - if (port->index == port_index) - return port; - return NULL; -} -static inline struct team_port *team_get_port_by_index_rcu(struct team *team, - int port_index) -{ - struct hlist_node *p; - struct team_port *port; - struct hlist_head *head = team_port_index_hash(team, port_index); - - hlist_for_each_entry_rcu(port, p, head, hlist) - if (port->index == port_index) - return port; - return NULL; -} - -extern int team_port_set_team_mac(struct team_port *port); -extern int team_options_register(struct team *team, - const struct team_option *option, - size_t option_count); -extern void team_options_unregister(struct team *team, - const struct team_option *option, - size_t option_count); -extern int team_mode_register(struct team_mode *mode); -extern int team_mode_unregister(struct team_mode *mode); - -#endif /* __KERNEL__ */ - -#define TEAM_STRING_MAX_LEN 32 - -/********************************** - * NETLINK_GENERIC netlink family. - **********************************/ - -enum { - TEAM_CMD_NOOP, - TEAM_CMD_OPTIONS_SET, - TEAM_CMD_OPTIONS_GET, - TEAM_CMD_PORT_LIST_GET, - - __TEAM_CMD_MAX, - TEAM_CMD_MAX = (__TEAM_CMD_MAX - 1), -}; - -enum { - TEAM_ATTR_UNSPEC, - TEAM_ATTR_TEAM_IFINDEX, /* u32 */ - TEAM_ATTR_LIST_OPTION, /* nest */ - TEAM_ATTR_LIST_PORT, /* nest */ - - __TEAM_ATTR_MAX, - TEAM_ATTR_MAX = __TEAM_ATTR_MAX - 1, -}; - -/* Nested layout of get/set msg: - * - * [TEAM_ATTR_LIST_OPTION] - * [TEAM_ATTR_ITEM_OPTION] - * [TEAM_ATTR_OPTION_*], ... - * [TEAM_ATTR_ITEM_OPTION] - * [TEAM_ATTR_OPTION_*], ... - * ... - * [TEAM_ATTR_LIST_PORT] - * [TEAM_ATTR_ITEM_PORT] - * [TEAM_ATTR_PORT_*], ... - * [TEAM_ATTR_ITEM_PORT] - * [TEAM_ATTR_PORT_*], ... - * ... - */ - -enum { - TEAM_ATTR_ITEM_OPTION_UNSPEC, - TEAM_ATTR_ITEM_OPTION, /* nest */ - - __TEAM_ATTR_ITEM_OPTION_MAX, - TEAM_ATTR_ITEM_OPTION_MAX = __TEAM_ATTR_ITEM_OPTION_MAX - 1, -}; - -enum { - TEAM_ATTR_OPTION_UNSPEC, - TEAM_ATTR_OPTION_NAME, /* string */ - TEAM_ATTR_OPTION_CHANGED, /* flag */ - TEAM_ATTR_OPTION_TYPE, /* u8 */ - TEAM_ATTR_OPTION_DATA, /* dynamic */ - - __TEAM_ATTR_OPTION_MAX, - TEAM_ATTR_OPTION_MAX = __TEAM_ATTR_OPTION_MAX - 1, -}; - -enum { - TEAM_ATTR_ITEM_PORT_UNSPEC, - TEAM_ATTR_ITEM_PORT, /* nest */ - - __TEAM_ATTR_ITEM_PORT_MAX, - TEAM_ATTR_ITEM_PORT_MAX = __TEAM_ATTR_ITEM_PORT_MAX - 1, -}; - -enum { - TEAM_ATTR_PORT_UNSPEC, - TEAM_ATTR_PORT_IFINDEX, /* u32 */ - TEAM_ATTR_PORT_CHANGED, /* flag */ - TEAM_ATTR_PORT_LINKUP, /* flag */ - TEAM_ATTR_PORT_SPEED, /* u32 */ - TEAM_ATTR_PORT_DUPLEX, /* u8 */ - - __TEAM_ATTR_PORT_MAX, - TEAM_ATTR_PORT_MAX = __TEAM_ATTR_PORT_MAX - 1, -}; - -/* - * NETLINK_GENERIC related info - */ -#define TEAM_GENL_NAME "team" -#define TEAM_GENL_VERSION 0x1 -#define TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME "change_event" - -#endif /* _LINUX_IF_TEAM_H_ */ diff --git a/trunk/include/linux/if_vlan.h b/trunk/include/linux/if_vlan.h index 13aff1e2183b..12d5543b14f2 100644 --- a/trunk/include/linux/if_vlan.h +++ b/trunk/include/linux/if_vlan.h @@ -74,7 +74,22 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) /* found in socket.c */ extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *)); -struct vlan_info; +/* if this changes, algorithm will have to be reworked because this + * depends on completely exhausting the VLAN identifier space. Thus + * it gives constant time look-up, but in many cases it wastes memory. + */ +#define VLAN_GROUP_ARRAY_SPLIT_PARTS 8 +#define VLAN_GROUP_ARRAY_PART_LEN (VLAN_N_VID/VLAN_GROUP_ARRAY_SPLIT_PARTS) + +struct vlan_group { + struct net_device *real_dev; /* The ethernet(like) device + * the vlan is attached to. + */ + unsigned int nr_vlans; + struct hlist_node hlist; /* linked list */ + struct net_device **vlan_devices_arrays[VLAN_GROUP_ARRAY_SPLIT_PARTS]; + struct rcu_head rcu; +}; static inline int is_vlan_dev(struct net_device *dev) { @@ -94,13 +109,6 @@ extern u16 vlan_dev_vlan_id(const struct net_device *dev); extern bool vlan_do_receive(struct sk_buff **skb, bool last_handler); extern struct sk_buff *vlan_untag(struct sk_buff *skb); -extern int vlan_vid_add(struct net_device *dev, unsigned short vid); -extern void vlan_vid_del(struct net_device *dev, unsigned short vid); - -extern int vlan_vids_add_by_dev(struct net_device *dev, - const struct net_device *by_dev); -extern void vlan_vids_del_by_dev(struct net_device *dev, - const struct net_device *by_dev); #else static inline struct net_device * __vlan_find_dev_deep(struct net_device *real_dev, u16 vlan_id) @@ -131,26 +139,6 @@ static inline struct sk_buff *vlan_untag(struct sk_buff *skb) { return skb; } - -static inline int vlan_vid_add(struct net_device *dev, unsigned short vid) -{ - return 0; -} - -static inline void vlan_vid_del(struct net_device *dev, unsigned short vid) -{ -} - -static inline int vlan_vids_add_by_dev(struct net_device *dev, - const struct net_device *by_dev) -{ - return 0; -} - -static inline void vlan_vids_del_by_dev(struct net_device *dev, - const struct net_device *by_dev) -{ -} #endif /** @@ -322,40 +310,6 @@ static inline __be16 vlan_get_protocol(const struct sk_buff *skb) return protocol; } - -static inline void vlan_set_encap_proto(struct sk_buff *skb, - struct vlan_hdr *vhdr) -{ - __be16 proto; - unsigned char *rawp; - - /* - * Was a VLAN packet, grab the encapsulated protocol, which the layer - * three protocols care about. - */ - - proto = vhdr->h_vlan_encapsulated_proto; - if (ntohs(proto) >= 1536) { - skb->protocol = proto; - return; - } - - rawp = skb->data; - if (*(unsigned short *) rawp == 0xFFFF) - /* - * This is a magic hack to spot IPX packets. Older Novell - * breaks the protocol design and runs IPX over 802.3 without - * an 802.2 LLC layer. We look for FFFF which isn't a used - * 802.2 SSAP/DSAP. This won't work for fault tolerant netware - * but does for the rest. - */ - skb->protocol = htons(ETH_P_802_3); - else - /* - * Real 802.2 LLC - */ - skb->protocol = htons(ETH_P_802_2); -} #endif /* __KERNEL__ */ /* VLAN IOCTLs are found in sockios.h */ @@ -398,7 +352,7 @@ struct vlan_ioctl_args { unsigned int skb_priority; unsigned int name_type; unsigned int bind_type; - unsigned int flag; /* Matches vlan_dev_priv flags */ + unsigned int flag; /* Matches vlan_dev_info flags */ } u; short vlan_qos; diff --git a/trunk/include/linux/inet_diag.h b/trunk/include/linux/inet_diag.h index 34e8d52c1925..abf5028db981 100644 --- a/trunk/include/linux/inet_diag.h +++ b/trunk/include/linux/inet_diag.h @@ -22,7 +22,7 @@ struct inet_diag_sockid { /* Request structure */ -struct inet_diag_req_compat { +struct inet_diag_req { __u8 idiag_family; /* Family of addresses. */ __u8 idiag_src_len; __u8 idiag_dst_len; @@ -34,15 +34,6 @@ struct inet_diag_req_compat { __u32 idiag_dbs; /* Tables to dump (NI) */ }; -struct inet_diag_req { - __u8 sdiag_family; - __u8 sdiag_protocol; - __u8 idiag_ext; - __u8 pad; - __u32 idiag_states; - struct inet_diag_sockid id; -}; - enum { INET_DIAG_REQ_NONE, INET_DIAG_REQ_BYTECODE, @@ -108,10 +99,9 @@ enum { INET_DIAG_CONG, INET_DIAG_TOS, INET_DIAG_TCLASS, - INET_DIAG_SKMEMINFO, }; -#define INET_DIAG_MAX INET_DIAG_SKMEMINFO +#define INET_DIAG_MAX INET_DIAG_TCLASS /* INET_DIAG_MEM */ @@ -135,41 +125,16 @@ struct tcpvegas_info { #ifdef __KERNEL__ struct sock; struct inet_hashinfo; -struct nlattr; -struct nlmsghdr; -struct sk_buff; -struct netlink_callback; struct inet_diag_handler { - void (*dump)(struct sk_buff *skb, - struct netlink_callback *cb, - struct inet_diag_req *r, - struct nlattr *bc); - - int (*dump_one)(struct sk_buff *in_skb, - const struct nlmsghdr *nlh, - struct inet_diag_req *req); - + struct inet_hashinfo *idiag_hashinfo; void (*idiag_get_info)(struct sock *sk, struct inet_diag_msg *r, void *info); + __u16 idiag_info_size; __u16 idiag_type; }; -struct inet_connection_sock; -int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, - struct sk_buff *skb, struct inet_diag_req *req, - u32 pid, u32 seq, u16 nlmsg_flags, - const struct nlmsghdr *unlh); -void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb, - struct netlink_callback *cb, struct inet_diag_req *r, - struct nlattr *bc); -int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, - struct sk_buff *in_skb, const struct nlmsghdr *nlh, - struct inet_diag_req *req); - -int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk); - extern int inet_diag_register(const struct inet_diag_handler *handler); extern void inet_diag_unregister(const struct inet_diag_handler *handler); #endif /* __KERNEL__ */ diff --git a/trunk/include/linux/ipv6.h b/trunk/include/linux/ipv6.h index 6318268dcaf5..0c997767429a 100644 --- a/trunk/include/linux/ipv6.h +++ b/trunk/include/linux/ipv6.h @@ -404,7 +404,7 @@ struct tcp6_sock { extern int inet6_sk_rebuild_header(struct sock *sk); -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk) { return inet_sk(__sk)->pinet6; @@ -515,7 +515,7 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk) #define inet6_rcv_saddr(__sk) NULL #define tcp_twsk_ipv6only(__sk) 0 #define inet_v6_ipv6only(__sk) 0 -#endif /* IS_ENABLED(CONFIG_IPV6) */ +#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ #define INET6_MATCH(__sk, __net, __hash, __saddr, __daddr, __ports, __dif)\ (((__sk)->sk_hash == (__hash)) && sock_net((__sk)) == (__net) && \ diff --git a/trunk/include/linux/jump_label.h b/trunk/include/linux/jump_label.h index 5ce8b140428f..388b0d425b50 100644 --- a/trunk/include/linux/jump_label.h +++ b/trunk/include/linux/jump_label.h @@ -3,7 +3,6 @@ #include #include -#include #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) @@ -15,12 +14,6 @@ struct jump_label_key { #endif }; -struct jump_label_key_deferred { - struct jump_label_key key; - unsigned long timeout; - struct delayed_work work; -}; - # include # define HAVE_JUMP_LABEL #endif /* CC_HAVE_ASM_GOTO && CONFIG_JUMP_LABEL */ @@ -58,11 +51,8 @@ extern void arch_jump_label_transform_static(struct jump_entry *entry, extern int jump_label_text_reserved(void *start, void *end); extern void jump_label_inc(struct jump_label_key *key); extern void jump_label_dec(struct jump_label_key *key); -extern void jump_label_dec_deferred(struct jump_label_key_deferred *key); extern bool jump_label_enabled(struct jump_label_key *key); extern void jump_label_apply_nops(struct module *mod); -extern void jump_label_rate_limit(struct jump_label_key_deferred *key, - unsigned long rl); #else /* !HAVE_JUMP_LABEL */ @@ -78,10 +68,6 @@ static __always_inline void jump_label_init(void) { } -struct jump_label_key_deferred { - struct jump_label_key key; -}; - static __always_inline bool static_branch(struct jump_label_key *key) { if (unlikely(atomic_read(&key->enabled))) @@ -99,11 +85,6 @@ static inline void jump_label_dec(struct jump_label_key *key) atomic_dec(&key->enabled); } -static inline void jump_label_dec_deferred(struct jump_label_key_deferred *key) -{ - jump_label_dec(&key->key); -} - static inline int jump_label_text_reserved(void *start, void *end) { return 0; @@ -121,14 +102,6 @@ static inline int jump_label_apply_nops(struct module *mod) { return 0; } - -static inline void jump_label_rate_limit(struct jump_label_key_deferred *key, - unsigned long rl) -{ -} #endif /* HAVE_JUMP_LABEL */ -#define jump_label_key_enabled ((struct jump_label_key){ .enabled = ATOMIC_INIT(1), }) -#define jump_label_key_disabled ((struct jump_label_key){ .enabled = ATOMIC_INIT(0), }) - #endif /* _LINUX_JUMP_LABEL_H */ diff --git a/trunk/include/linux/kernel_stat.h b/trunk/include/linux/kernel_stat.h index 2fbd9053c2df..0cce2db580c3 100644 --- a/trunk/include/linux/kernel_stat.h +++ b/trunk/include/linux/kernel_stat.h @@ -6,7 +6,6 @@ #include #include #include -#include #include #include @@ -16,25 +15,21 @@ * used by rstatd/perfmeter */ -enum cpu_usage_stat { - CPUTIME_USER, - CPUTIME_NICE, - CPUTIME_SYSTEM, - CPUTIME_SOFTIRQ, - CPUTIME_IRQ, - CPUTIME_IDLE, - CPUTIME_IOWAIT, - CPUTIME_STEAL, - CPUTIME_GUEST, - CPUTIME_GUEST_NICE, - NR_STATS, -}; - -struct kernel_cpustat { - u64 cpustat[NR_STATS]; +struct cpu_usage_stat { + cputime64_t user; + cputime64_t nice; + cputime64_t system; + cputime64_t softirq; + cputime64_t irq; + cputime64_t idle; + cputime64_t iowait; + cputime64_t steal; + cputime64_t guest; + cputime64_t guest_nice; }; struct kernel_stat { + struct cpu_usage_stat cpustat; #ifndef CONFIG_GENERIC_HARDIRQS unsigned int irqs[NR_IRQS]; #endif @@ -43,13 +38,10 @@ struct kernel_stat { }; DECLARE_PER_CPU(struct kernel_stat, kstat); -DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat); +#define kstat_cpu(cpu) per_cpu(kstat, cpu) /* Must have preemption disabled for this to be meaningful. */ -#define kstat_this_cpu (&__get_cpu_var(kstat)) -#define kcpustat_this_cpu (&__get_cpu_var(kernel_cpustat)) -#define kstat_cpu(cpu) per_cpu(kstat, cpu) -#define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu) +#define kstat_this_cpu __get_cpu_var(kstat) extern unsigned long long nr_context_switches(void); diff --git a/trunk/include/linux/kvm.h b/trunk/include/linux/kvm.h index 68e67e50d028..c3892fc1d538 100644 --- a/trunk/include/linux/kvm.h +++ b/trunk/include/linux/kvm.h @@ -557,7 +557,6 @@ struct kvm_ppc_pvinfo { #define KVM_CAP_MAX_VCPUS 66 /* returns max vcpus per vm */ #define KVM_CAP_PPC_PAPR 68 #define KVM_CAP_S390_GMAP 71 -#define KVM_CAP_TSC_DEADLINE_TIMER 72 #ifdef KVM_CAP_IRQ_ROUTING diff --git a/trunk/include/linux/latencytop.h b/trunk/include/linux/latencytop.h index e23121f9d82a..b0e99898527c 100644 --- a/trunk/include/linux/latencytop.h +++ b/trunk/include/linux/latencytop.h @@ -10,8 +10,6 @@ #define _INCLUDE_GUARD_LATENCYTOP_H_ #include -struct task_struct; - #ifdef CONFIG_LATENCYTOP #define LT_SAVECOUNT 32 @@ -25,6 +23,7 @@ struct latency_record { }; +struct task_struct; extern int latencytop_enabled; void __account_scheduler_latency(struct task_struct *task, int usecs, int inter); diff --git a/trunk/include/linux/lockd/lockd.h b/trunk/include/linux/lockd/lockd.h index 90b0656a869e..ff9abff55aa0 100644 --- a/trunk/include/linux/lockd/lockd.h +++ b/trunk/include/linux/lockd/lockd.h @@ -301,7 +301,7 @@ static inline int __nlm_privileged_request4(const struct sockaddr *sap) return ipv4_is_loopback(sin->sin_addr.s_addr); } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) static inline int __nlm_privileged_request6(const struct sockaddr *sap) { const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap; @@ -314,12 +314,12 @@ static inline int __nlm_privileged_request6(const struct sockaddr *sap) return ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LOOPBACK; } -#else /* IS_ENABLED(CONFIG_IPV6) */ +#else /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ static inline int __nlm_privileged_request6(const struct sockaddr *sap) { return 0; } -#endif /* IS_ENABLED(CONFIG_IPV6) */ +#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ /* * Ensure incoming requests are from local privileged callers. diff --git a/trunk/include/linux/lockdep.h b/trunk/include/linux/lockdep.h index d36619ead3ba..b6a56e37284c 100644 --- a/trunk/include/linux/lockdep.h +++ b/trunk/include/linux/lockdep.h @@ -343,8 +343,6 @@ extern void lockdep_trace_alloc(gfp_t mask); #define lockdep_assert_held(l) WARN_ON(debug_locks && !lockdep_is_held(l)) -#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) - #else /* !LOCKDEP */ static inline void lockdep_off(void) @@ -394,8 +392,6 @@ struct lock_class_key { }; #define lockdep_assert_held(l) do { } while (0) -#define lockdep_recursing(tsk) (0) - #endif /* !LOCKDEP */ #ifdef CONFIG_LOCK_STAT diff --git a/trunk/include/linux/mdio-bitbang.h b/trunk/include/linux/mdio-bitbang.h index 76f52bbbb2f4..0fe00cd4c93c 100644 --- a/trunk/include/linux/mdio-bitbang.h +++ b/trunk/include/linux/mdio-bitbang.h @@ -32,8 +32,6 @@ struct mdiobb_ops { struct mdiobb_ctrl { const struct mdiobb_ops *ops; - /* reset callback */ - int (*reset)(struct mii_bus *bus); }; /* The returned bus is not yet registered with the phy layer. */ diff --git a/trunk/include/linux/mdio-gpio.h b/trunk/include/linux/mdio-gpio.h index 7c9fe3c2be73..e9d3fdfe41d7 100644 --- a/trunk/include/linux/mdio-gpio.h +++ b/trunk/include/linux/mdio-gpio.h @@ -20,8 +20,6 @@ struct mdio_gpio_platform_data { unsigned int phy_mask; int irqs[PHY_MAX_ADDR]; - /* reset callback */ - int (*reset)(struct mii_bus *bus); }; #endif /* __LINUX_MDIO_GPIO_H */ diff --git a/trunk/include/linux/memblock.h b/trunk/include/linux/memblock.h index a6bb10235148..e6b843e16e81 100644 --- a/trunk/include/linux/memblock.h +++ b/trunk/include/linux/memblock.h @@ -2,6 +2,8 @@ #define _LINUX_MEMBLOCK_H #ifdef __KERNEL__ +#define MEMBLOCK_ERROR 0 + #ifdef CONFIG_HAVE_MEMBLOCK /* * Logical memory blocks. @@ -17,161 +19,81 @@ #include #include +#include + #define INIT_MEMBLOCK_REGIONS 128 struct memblock_region { phys_addr_t base; phys_addr_t size; -#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP - int nid; -#endif }; struct memblock_type { unsigned long cnt; /* number of regions */ unsigned long max; /* size of the allocated array */ - phys_addr_t total_size; /* size of all regions */ struct memblock_region *regions; }; struct memblock { phys_addr_t current_limit; + phys_addr_t memory_size; /* Updated by memblock_analyze() */ struct memblock_type memory; struct memblock_type reserved; }; extern struct memblock memblock; extern int memblock_debug; +extern int memblock_can_resize; #define memblock_dbg(fmt, ...) \ if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) -phys_addr_t memblock_find_in_range_node(phys_addr_t start, phys_addr_t end, - phys_addr_t size, phys_addr_t align, int nid); -phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, - phys_addr_t size, phys_addr_t align); +u64 memblock_find_in_range(u64 start, u64 end, u64 size, u64 align); int memblock_free_reserved_regions(void); int memblock_reserve_reserved_regions(void); -void memblock_allow_resize(void); -int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid); -int memblock_add(phys_addr_t base, phys_addr_t size); -int memblock_remove(phys_addr_t base, phys_addr_t size); -int memblock_free(phys_addr_t base, phys_addr_t size); -int memblock_reserve(phys_addr_t base, phys_addr_t size); - -#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP -void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, - unsigned long *out_end_pfn, int *out_nid); - -/** - * for_each_mem_pfn_range - early memory pfn range iterator - * @i: an integer used as loop variable - * @nid: node selector, %MAX_NUMNODES for all nodes - * @p_start: ptr to ulong for start pfn of the range, can be %NULL - * @p_end: ptr to ulong for end pfn of the range, can be %NULL - * @p_nid: ptr to int for nid of the range, can be %NULL - * - * Walks over configured memory ranges. Available after early_node_map is - * populated. - */ -#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \ - for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \ - i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid)) -#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ - -void __next_free_mem_range(u64 *idx, int nid, phys_addr_t *out_start, - phys_addr_t *out_end, int *out_nid); - -/** - * for_each_free_mem_range - iterate through free memblock areas - * @i: u64 used as loop variable - * @nid: node selector, %MAX_NUMNODES for all nodes - * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL - * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL - * @p_nid: ptr to int for nid of the range, can be %NULL - * - * Walks over free (memory && !reserved) areas of memblock. Available as - * soon as memblock is initialized. - */ -#define for_each_free_mem_range(i, nid, p_start, p_end, p_nid) \ - for (i = 0, \ - __next_free_mem_range(&i, nid, p_start, p_end, p_nid); \ - i != (u64)ULLONG_MAX; \ - __next_free_mem_range(&i, nid, p_start, p_end, p_nid)) - -void __next_free_mem_range_rev(u64 *idx, int nid, phys_addr_t *out_start, - phys_addr_t *out_end, int *out_nid); +extern void memblock_init(void); +extern void memblock_analyze(void); +extern long memblock_add(phys_addr_t base, phys_addr_t size); +extern long memblock_remove(phys_addr_t base, phys_addr_t size); +extern long memblock_free(phys_addr_t base, phys_addr_t size); +extern long memblock_reserve(phys_addr_t base, phys_addr_t size); -/** - * for_each_free_mem_range_reverse - rev-iterate through free memblock areas - * @i: u64 used as loop variable - * @nid: node selector, %MAX_NUMNODES for all nodes - * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL - * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL - * @p_nid: ptr to int for nid of the range, can be %NULL - * - * Walks over free (memory && !reserved) areas of memblock in reverse - * order. Available as soon as memblock is initialized. +/* The numa aware allocator is only available if + * CONFIG_ARCH_POPULATES_NODE_MAP is set */ -#define for_each_free_mem_range_reverse(i, nid, p_start, p_end, p_nid) \ - for (i = (u64)ULLONG_MAX, \ - __next_free_mem_range_rev(&i, nid, p_start, p_end, p_nid); \ - i != (u64)ULLONG_MAX; \ - __next_free_mem_range_rev(&i, nid, p_start, p_end, p_nid)) +extern phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, + int nid); +extern phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, + int nid); -#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP -int memblock_set_node(phys_addr_t base, phys_addr_t size, int nid); - -static inline void memblock_set_region_node(struct memblock_region *r, int nid) -{ - r->nid = nid; -} - -static inline int memblock_get_region_node(const struct memblock_region *r) -{ - return r->nid; -} -#else -static inline void memblock_set_region_node(struct memblock_region *r, int nid) -{ -} - -static inline int memblock_get_region_node(const struct memblock_region *r) -{ - return 0; -} -#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ - -phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid); -phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid); - -phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align); +extern phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align); /* Flags for memblock_alloc_base() amd __memblock_alloc_base() */ #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) #define MEMBLOCK_ALLOC_ACCESSIBLE 0 -phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align, - phys_addr_t max_addr); -phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align, - phys_addr_t max_addr); -phys_addr_t memblock_phys_mem_size(void); -phys_addr_t memblock_start_of_DRAM(void); -phys_addr_t memblock_end_of_DRAM(void); -void memblock_enforce_memory_limit(phys_addr_t memory_limit); -int memblock_is_memory(phys_addr_t addr); -int memblock_is_region_memory(phys_addr_t base, phys_addr_t size); -int memblock_is_reserved(phys_addr_t addr); -int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); - -extern void __memblock_dump_all(void); - -static inline void memblock_dump_all(void) -{ - if (memblock_debug) - __memblock_dump_all(); -} +extern phys_addr_t memblock_alloc_base(phys_addr_t size, + phys_addr_t align, + phys_addr_t max_addr); +extern phys_addr_t __memblock_alloc_base(phys_addr_t size, + phys_addr_t align, + phys_addr_t max_addr); +extern phys_addr_t memblock_phys_mem_size(void); +extern phys_addr_t memblock_start_of_DRAM(void); +extern phys_addr_t memblock_end_of_DRAM(void); +extern void memblock_enforce_memory_limit(phys_addr_t memory_limit); +extern int memblock_is_memory(phys_addr_t addr); +extern int memblock_is_region_memory(phys_addr_t base, phys_addr_t size); +extern int memblock_is_reserved(phys_addr_t addr); +extern int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); + +extern void memblock_dump_all(void); + +/* Provided by the architecture */ +extern phys_addr_t memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid); +extern int memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1, + phys_addr_t addr2, phys_addr_t size2); /** * memblock_set_current_limit - Set the current allocation limit to allow @@ -179,7 +101,7 @@ static inline void memblock_dump_all(void) * accessible during boot * @limit: New limit value (physical address) */ -void memblock_set_current_limit(phys_addr_t limit); +extern void memblock_set_current_limit(phys_addr_t limit); /* @@ -232,9 +154,9 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo region++) -#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK -#define __init_memblock __meminit -#define __initdata_memblock __meminitdata +#ifdef ARCH_DISCARD_MEMBLOCK +#define __init_memblock __init +#define __initdata_memblock __initdata #else #define __init_memblock #define __initdata_memblock @@ -243,7 +165,7 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo #else static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align) { - return 0; + return MEMBLOCK_ERROR; } #endif /* CONFIG_HAVE_MEMBLOCK */ diff --git a/trunk/include/linux/memcontrol.h b/trunk/include/linux/memcontrol.h index 9b296ea41bb8..b87068a1a09e 100644 --- a/trunk/include/linux/memcontrol.h +++ b/trunk/include/linux/memcontrol.h @@ -85,9 +85,6 @@ extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm); -extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); -extern struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont); - static inline int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup) { @@ -384,25 +381,5 @@ mem_cgroup_print_bad_page(struct page *page) } #endif -enum { - UNDER_LIMIT, - SOFT_LIMIT, - OVER_LIMIT, -}; - -#ifdef CONFIG_INET -struct sock; -#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM -void sock_update_memcg(struct sock *sk); -void sock_release_memcg(struct sock *sk); -#else -static inline void sock_update_memcg(struct sock *sk) -{ -} -static inline void sock_release_memcg(struct sock *sk) -{ -} -#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */ -#endif /* CONFIG_INET */ #endif /* _LINUX_MEMCONTROL_H */ diff --git a/trunk/include/linux/mii.h b/trunk/include/linux/mii.h index 2783eca629a0..27748230aa69 100644 --- a/trunk/include/linux/mii.h +++ b/trunk/include/linux/mii.h @@ -9,7 +9,6 @@ #define __LINUX_MII_H__ #include -#include /* Generic MII registers. */ #define MII_BMCR 0x00 /* Basic mode control register */ @@ -240,205 +239,6 @@ static inline unsigned int mii_duplex (unsigned int duplex_lock, return 0; } -/** - * ethtool_adv_to_mii_adv_t - * @ethadv: the ethtool advertisement settings - * - * A small helper function that translates ethtool advertisement - * settings to phy autonegotiation advertisements for the - * MII_ADVERTISE register. - */ -static inline u32 ethtool_adv_to_mii_adv_t(u32 ethadv) -{ - u32 result = 0; - - if (ethadv & ADVERTISED_10baseT_Half) - result |= ADVERTISE_10HALF; - if (ethadv & ADVERTISED_10baseT_Full) - result |= ADVERTISE_10FULL; - if (ethadv & ADVERTISED_100baseT_Half) - result |= ADVERTISE_100HALF; - if (ethadv & ADVERTISED_100baseT_Full) - result |= ADVERTISE_100FULL; - if (ethadv & ADVERTISED_Pause) - result |= ADVERTISE_PAUSE_CAP; - if (ethadv & ADVERTISED_Asym_Pause) - result |= ADVERTISE_PAUSE_ASYM; - - return result; -} - -/** - * mii_adv_to_ethtool_adv_t - * @adv: value of the MII_ADVERTISE register - * - * A small helper function that translates MII_ADVERTISE bits - * to ethtool advertisement settings. - */ -static inline u32 mii_adv_to_ethtool_adv_t(u32 adv) -{ - u32 result = 0; - - if (adv & ADVERTISE_10HALF) - result |= ADVERTISED_10baseT_Half; - if (adv & ADVERTISE_10FULL) - result |= ADVERTISED_10baseT_Full; - if (adv & ADVERTISE_100HALF) - result |= ADVERTISED_100baseT_Half; - if (adv & ADVERTISE_100FULL) - result |= ADVERTISED_100baseT_Full; - if (adv & ADVERTISE_PAUSE_CAP) - result |= ADVERTISED_Pause; - if (adv & ADVERTISE_PAUSE_ASYM) - result |= ADVERTISED_Asym_Pause; - - return result; -} - -/** - * ethtool_adv_to_mii_ctrl1000_t - * @ethadv: the ethtool advertisement settings - * - * A small helper function that translates ethtool advertisement - * settings to phy autonegotiation advertisements for the - * MII_CTRL1000 register when in 1000T mode. - */ -static inline u32 ethtool_adv_to_mii_ctrl1000_t(u32 ethadv) -{ - u32 result = 0; - - if (ethadv & ADVERTISED_1000baseT_Half) - result |= ADVERTISE_1000HALF; - if (ethadv & ADVERTISED_1000baseT_Full) - result |= ADVERTISE_1000FULL; - - return result; -} - -/** - * mii_ctrl1000_to_ethtool_adv_t - * @adv: value of the MII_CTRL1000 register - * - * A small helper function that translates MII_CTRL1000 - * bits, when in 1000Base-T mode, to ethtool - * advertisement settings. - */ -static inline u32 mii_ctrl1000_to_ethtool_adv_t(u32 adv) -{ - u32 result = 0; - - if (adv & ADVERTISE_1000HALF) - result |= ADVERTISED_1000baseT_Half; - if (adv & ADVERTISE_1000FULL) - result |= ADVERTISED_1000baseT_Full; - - return result; -} - -/** - * mii_lpa_to_ethtool_lpa_t - * @adv: value of the MII_LPA register - * - * A small helper function that translates MII_LPA - * bits, when in 1000Base-T mode, to ethtool - * LP advertisement settings. - */ -static inline u32 mii_lpa_to_ethtool_lpa_t(u32 lpa) -{ - u32 result = 0; - - if (lpa & LPA_LPACK) - result |= ADVERTISED_Autoneg; - - return result | mii_adv_to_ethtool_adv_t(lpa); -} - -/** - * mii_stat1000_to_ethtool_lpa_t - * @adv: value of the MII_STAT1000 register - * - * A small helper function that translates MII_STAT1000 - * bits, when in 1000Base-T mode, to ethtool - * advertisement settings. - */ -static inline u32 mii_stat1000_to_ethtool_lpa_t(u32 lpa) -{ - u32 result = 0; - - if (lpa & LPA_1000HALF) - result |= ADVERTISED_1000baseT_Half; - if (lpa & LPA_1000FULL) - result |= ADVERTISED_1000baseT_Full; - - return result; -} - -/** - * ethtool_adv_to_mii_adv_x - * @ethadv: the ethtool advertisement settings - * - * A small helper function that translates ethtool advertisement - * settings to phy autonegotiation advertisements for the - * MII_CTRL1000 register when in 1000Base-X mode. - */ -static inline u32 ethtool_adv_to_mii_adv_x(u32 ethadv) -{ - u32 result = 0; - - if (ethadv & ADVERTISED_1000baseT_Half) - result |= ADVERTISE_1000XHALF; - if (ethadv & ADVERTISED_1000baseT_Full) - result |= ADVERTISE_1000XFULL; - if (ethadv & ADVERTISED_Pause) - result |= ADVERTISE_1000XPAUSE; - if (ethadv & ADVERTISED_Asym_Pause) - result |= ADVERTISE_1000XPSE_ASYM; - - return result; -} - -/** - * mii_adv_to_ethtool_adv_x - * @adv: value of the MII_CTRL1000 register - * - * A small helper function that translates MII_CTRL1000 - * bits, when in 1000Base-X mode, to ethtool - * advertisement settings. - */ -static inline u32 mii_adv_to_ethtool_adv_x(u32 adv) -{ - u32 result = 0; - - if (adv & ADVERTISE_1000XHALF) - result |= ADVERTISED_1000baseT_Half; - if (adv & ADVERTISE_1000XFULL) - result |= ADVERTISED_1000baseT_Full; - if (adv & ADVERTISE_1000XPAUSE) - result |= ADVERTISED_Pause; - if (adv & ADVERTISE_1000XPSE_ASYM) - result |= ADVERTISED_Asym_Pause; - - return result; -} - -/** - * mii_lpa_to_ethtool_lpa_x - * @adv: value of the MII_LPA register - * - * A small helper function that translates MII_LPA - * bits, when in 1000Base-X mode, to ethtool - * LP advertisement settings. - */ -static inline u32 mii_lpa_to_ethtool_lpa_x(u32 lpa) -{ - u32 result = 0; - - if (lpa & LPA_LPACK) - result |= ADVERTISED_Autoneg; - - return result | mii_adv_to_ethtool_adv_x(lpa); -} - /** * mii_advertise_flowctrl - get flow control advertisement flags * @cap: Flow control capabilities (FLOW_CTRL_RX, FLOW_CTRL_TX or both) diff --git a/trunk/include/linux/mlx4/cmd.h b/trunk/include/linux/mlx4/cmd.h index 9958ff2cad3c..b56e4587208d 100644 --- a/trunk/include/linux/mlx4/cmd.h +++ b/trunk/include/linux/mlx4/cmd.h @@ -59,15 +59,12 @@ enum { MLX4_CMD_HW_HEALTH_CHECK = 0x50, MLX4_CMD_SET_PORT = 0xc, MLX4_CMD_SET_NODE = 0x5a, - MLX4_CMD_QUERY_FUNC = 0x56, MLX4_CMD_ACCESS_DDR = 0x2e, MLX4_CMD_MAP_ICM = 0xffa, MLX4_CMD_UNMAP_ICM = 0xff9, MLX4_CMD_MAP_ICM_AUX = 0xffc, MLX4_CMD_UNMAP_ICM_AUX = 0xffb, MLX4_CMD_SET_ICM_SIZE = 0xffd, - /*master notify fw on finish for slave's flr*/ - MLX4_CMD_INFORM_FLR_DONE = 0x5b, /* TPT commands */ MLX4_CMD_SW2HW_MPT = 0xd, @@ -122,26 +119,6 @@ enum { /* miscellaneous commands */ MLX4_CMD_DIAG_RPRT = 0x30, MLX4_CMD_NOP = 0x31, - MLX4_CMD_ACCESS_MEM = 0x2e, - MLX4_CMD_SET_VEP = 0x52, - - /* Ethernet specific commands */ - MLX4_CMD_SET_VLAN_FLTR = 0x47, - MLX4_CMD_SET_MCAST_FLTR = 0x48, - MLX4_CMD_DUMP_ETH_STATS = 0x49, - - /* Communication channel commands */ - MLX4_CMD_ARM_COMM_CHANNEL = 0x57, - MLX4_CMD_GEN_EQE = 0x58, - - /* virtual commands */ - MLX4_CMD_ALLOC_RES = 0xf00, - MLX4_CMD_FREE_RES = 0xf01, - MLX4_CMD_MCAST_ATTACH = 0xf05, - MLX4_CMD_UCAST_ATTACH = 0xf06, - MLX4_CMD_PROMISC = 0xf08, - MLX4_CMD_QUERY_FUNC_CAP = 0xf0a, - MLX4_CMD_QP_ATTACH = 0xf0b, /* debug commands */ MLX4_CMD_QUERY_DEBUG_MSG = 0x2a, @@ -149,7 +126,6 @@ enum { /* statistics commands */ MLX4_CMD_QUERY_IF_STAT = 0X54, - MLX4_CMD_SET_IF_STAT = 0X55, }; enum { @@ -159,8 +135,7 @@ enum { }; enum { - MLX4_MAILBOX_SIZE = 4096, - MLX4_ACCESS_MEM_ALIGN = 256, + MLX4_MAILBOX_SIZE = 4096 }; enum { @@ -173,11 +148,6 @@ enum { MLX4_SET_PORT_GID_TABLE = 0x5, }; -enum { - MLX4_CMD_WRAPPED, - MLX4_CMD_NATIVE -}; - struct mlx4_dev; struct mlx4_cmd_mailbox { @@ -187,24 +157,23 @@ struct mlx4_cmd_mailbox { int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param, int out_is_imm, u32 in_modifier, u8 op_modifier, - u16 op, unsigned long timeout, int native); + u16 op, unsigned long timeout); /* Invoke a command with no output parameter */ static inline int mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u32 in_modifier, - u8 op_modifier, u16 op, unsigned long timeout, - int native) + u8 op_modifier, u16 op, unsigned long timeout) { return __mlx4_cmd(dev, in_param, NULL, 0, in_modifier, - op_modifier, op, timeout, native); + op_modifier, op, timeout); } /* Invoke a command with an output mailbox */ static inline int mlx4_cmd_box(struct mlx4_dev *dev, u64 in_param, u64 out_param, u32 in_modifier, u8 op_modifier, u16 op, - unsigned long timeout, int native) + unsigned long timeout) { return __mlx4_cmd(dev, in_param, &out_param, 0, in_modifier, - op_modifier, op, timeout, native); + op_modifier, op, timeout); } /* @@ -214,17 +183,13 @@ static inline int mlx4_cmd_box(struct mlx4_dev *dev, u64 in_param, u64 out_param */ static inline int mlx4_cmd_imm(struct mlx4_dev *dev, u64 in_param, u64 *out_param, u32 in_modifier, u8 op_modifier, u16 op, - unsigned long timeout, int native) + unsigned long timeout) { return __mlx4_cmd(dev, in_param, out_param, 1, in_modifier, - op_modifier, op, timeout, native); + op_modifier, op, timeout); } struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev); void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox); -u32 mlx4_comm_get_version(void); - -#define MLX4_COMM_GET_IF_REV(cmd_chan_ver) (u8)((cmd_chan_ver) >> 8) - #endif /* MLX4_CMD_H */ diff --git a/trunk/include/linux/mlx4/device.h b/trunk/include/linux/mlx4/device.h index 5c4fe8e5bfe5..84b0b1848f17 100644 --- a/trunk/include/linux/mlx4/device.h +++ b/trunk/include/linux/mlx4/device.h @@ -47,9 +47,6 @@ enum { MLX4_FLAG_MSI_X = 1 << 0, MLX4_FLAG_OLD_PORT_CMDS = 1 << 1, - MLX4_FLAG_MASTER = 1 << 2, - MLX4_FLAG_SLAVE = 1 << 3, - MLX4_FLAG_SRIOV = 1 << 4, }; enum { @@ -60,15 +57,6 @@ enum { MLX4_BOARD_ID_LEN = 64 }; -enum { - MLX4_MAX_NUM_PF = 16, - MLX4_MAX_NUM_VF = 64, - MLX4_MFUNC_MAX = 80, - MLX4_MFUNC_EQ_NUM = 4, - MLX4_MFUNC_MAX_EQES = 8, - MLX4_MFUNC_EQE_MASK = (MLX4_MFUNC_MAX_EQES - 1) -}; - enum { MLX4_DEV_CAP_FLAG_RC = 1LL << 0, MLX4_DEV_CAP_FLAG_UC = 1LL << 1, @@ -89,13 +77,11 @@ enum { MLX4_DEV_CAP_FLAG_IBOE = 1LL << 30, MLX4_DEV_CAP_FLAG_UC_LOOPBACK = 1LL << 32, MLX4_DEV_CAP_FLAG_FCS_KEEP = 1LL << 34, - MLX4_DEV_CAP_FLAG_WOL_PORT1 = 1LL << 37, - MLX4_DEV_CAP_FLAG_WOL_PORT2 = 1LL << 38, + MLX4_DEV_CAP_FLAG_WOL = 1LL << 38, MLX4_DEV_CAP_FLAG_UDP_RSS = 1LL << 40, MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41, MLX4_DEV_CAP_FLAG_VEP_MC_STEER = 1LL << 42, - MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48, - MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55 + MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48 }; #define MLX4_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90) @@ -130,11 +116,7 @@ enum mlx4_event { MLX4_EVENT_TYPE_PORT_CHANGE = 0x09, MLX4_EVENT_TYPE_EQ_OVERFLOW = 0x0f, MLX4_EVENT_TYPE_ECC_DETECT = 0x0e, - MLX4_EVENT_TYPE_CMD = 0x0a, - MLX4_EVENT_TYPE_VEP_UPDATE = 0x19, - MLX4_EVENT_TYPE_COMM_CHANNEL = 0x18, - MLX4_EVENT_TYPE_FLR_EVENT = 0x1c, - MLX4_EVENT_TYPE_NONE = 0xff, + MLX4_EVENT_TYPE_CMD = 0x0a }; enum { @@ -201,7 +183,6 @@ enum mlx4_qp_region { }; enum mlx4_port_type { - MLX4_PORT_TYPE_NONE = 0, MLX4_PORT_TYPE_IB = 1, MLX4_PORT_TYPE_ETH = 2, MLX4_PORT_TYPE_AUTO = 3 @@ -234,7 +215,6 @@ static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) struct mlx4_caps { u64 fw_ver; - u32 function; int num_ports; int vl_cap[MLX4_MAX_PORTS + 1]; int ib_mtu_cap[MLX4_MAX_PORTS + 1]; @@ -249,7 +229,6 @@ struct mlx4_caps { u64 trans_code[MLX4_MAX_PORTS + 1]; int local_ca_ack_delay; int num_uars; - u32 uar_page_size; int bf_reg_size; int bf_regs_per_page; int max_sq_sg; @@ -273,7 +252,8 @@ struct mlx4_caps { int num_comp_vectors; int comp_pool; int num_mpts; - int num_mtts; + int num_mtt_segs; + int mtts_per_seg; int fmr_reserved_mtts; int reserved_mtts; int reserved_mrws; @@ -303,9 +283,7 @@ struct mlx4_caps { int log_num_prios; enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; u8 supported_type[MLX4_MAX_PORTS + 1]; - u8 suggested_type[MLX4_MAX_PORTS + 1]; - u8 default_sense[MLX4_MAX_PORTS + 1]; - u32 port_mask[MLX4_MAX_PORTS + 1]; + u32 port_mask; enum mlx4_port_type possible_type[MLX4_MAX_PORTS + 1]; u32 max_counters; u8 ext_port_cap[MLX4_MAX_PORTS + 1]; @@ -325,7 +303,7 @@ struct mlx4_buf { }; struct mlx4_mtt { - u32 offset; + u32 first_seg; int order; int page_shift; }; @@ -487,12 +465,10 @@ struct mlx4_counter { struct mlx4_dev { struct pci_dev *pdev; unsigned long flags; - unsigned long num_slaves; struct mlx4_caps caps; struct radix_tree_root qp_table_tree; u8 rev_id; char board_id[MLX4_BOARD_ID_LEN]; - int num_vfs; }; struct mlx4_init_port_param { @@ -511,32 +487,14 @@ struct mlx4_init_port_param { #define mlx4_foreach_port(port, dev, type) \ for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ - if ((type) == (dev)->caps.port_mask[(port)]) - -#define mlx4_foreach_ib_transport_port(port, dev) \ - for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ - if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \ - ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) + if (((type) == MLX4_PORT_TYPE_IB ? (dev)->caps.port_mask : \ + ~(dev)->caps.port_mask) & 1 << ((port) - 1)) -static inline int mlx4_is_master(struct mlx4_dev *dev) -{ - return dev->flags & MLX4_FLAG_MASTER; -} - -static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn) -{ - return (qpn < dev->caps.sqp_start + 8); -} - -static inline int mlx4_is_mfunc(struct mlx4_dev *dev) -{ - return dev->flags & (MLX4_FLAG_SLAVE | MLX4_FLAG_MASTER); -} +#define mlx4_foreach_ib_transport_port(port, dev) \ + for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ + if (((dev)->caps.port_mask & 1 << ((port) - 1)) || \ + ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) -static inline int mlx4_is_slave(struct mlx4_dev *dev) -{ - return dev->flags & MLX4_FLAG_SLAVE; -} int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, struct mlx4_buf *buf); @@ -602,10 +560,6 @@ int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_waterm int mlx4_INIT_PORT(struct mlx4_dev *dev, int port); int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port); -int mlx4_unicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], - int block_mcast_loopback, enum mlx4_protocol prot); -int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], - enum mlx4_protocol prot); int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], int block_mcast_loopback, enum mlx4_protocol protocol); int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], @@ -616,11 +570,9 @@ int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port); int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port); int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode); -int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac); -void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac); -int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac); -int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn); -void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn); +int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap); +void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn); +int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wrap); int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); diff --git a/trunk/include/linux/mlx4/qp.h b/trunk/include/linux/mlx4/qp.h index bee8fa231276..48cc4cb97858 100644 --- a/trunk/include/linux/mlx4/qp.h +++ b/trunk/include/linux/mlx4/qp.h @@ -97,33 +97,6 @@ enum { MLX4_QP_BIT_RIC = 1 << 4, }; -enum { - MLX4_RSS_HASH_XOR = 0, - MLX4_RSS_HASH_TOP = 1, - - MLX4_RSS_UDP_IPV6 = 1 << 0, - MLX4_RSS_UDP_IPV4 = 1 << 1, - MLX4_RSS_TCP_IPV6 = 1 << 2, - MLX4_RSS_IPV6 = 1 << 3, - MLX4_RSS_TCP_IPV4 = 1 << 4, - MLX4_RSS_IPV4 = 1 << 5, - - /* offset of mlx4_rss_context within mlx4_qp_context.pri_path */ - MLX4_RSS_OFFSET_IN_QPC_PRI_PATH = 0x24, - /* offset of being RSS indirection QP within mlx4_qp_context.flags */ - MLX4_RSS_QPC_FLAG_OFFSET = 13, -}; - -struct mlx4_rss_context { - __be32 base_qpn; - __be32 default_qpn; - u16 reserved; - u8 hash_fn; - u8 flags; - __be32 rss_key[10]; - __be32 base_qpn_udp; -}; - struct mlx4_qp_path { u8 fl; u8 reserved1[2]; @@ -210,7 +183,6 @@ struct mlx4_wqe_ctrl_seg { * [4] IP checksum * [3:2] C (generate completion queue entry) * [1] SE (solicited event) - * [0] FL (force loopback) */ __be32 srcrb_flags; /* diff --git a/trunk/include/linux/mm.h b/trunk/include/linux/mm.h index 5d9b4c9813bd..4baadd18f4ad 100644 --- a/trunk/include/linux/mm.h +++ b/trunk/include/linux/mm.h @@ -1253,34 +1253,41 @@ static inline void pgtable_page_dtor(struct page *page) extern void free_area_init(unsigned long * zones_size); extern void free_area_init_node(int nid, unsigned long * zones_size, unsigned long zone_start_pfn, unsigned long *zholes_size); -#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP +#ifdef CONFIG_ARCH_POPULATES_NODE_MAP /* - * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its + * With CONFIG_ARCH_POPULATES_NODE_MAP set, an architecture may initialise its * zones, allocate the backing mem_map and account for memory holes in a more * architecture independent manner. This is a substitute for creating the * zone_sizes[] and zholes_size[] arrays and passing them to * free_area_init_node() * * An architecture is expected to register range of page frames backed by - * physical memory with memblock_add[_node]() before calling + * physical memory with add_active_range() before calling * free_area_init_nodes() passing in the PFN each zone ends at. At a basic * usage, an architecture is expected to do something like * * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn, * max_highmem_pfn}; * for_each_valid_physical_page_range() - * memblock_add_node(base, size, nid) + * add_active_range(node_id, start_pfn, end_pfn) * free_area_init_nodes(max_zone_pfns); * - * free_bootmem_with_active_regions() calls free_bootmem_node() for each - * registered physical page range. Similarly - * sparse_memory_present_with_active_regions() calls memory_present() for - * each range when SPARSEMEM is enabled. + * If the architecture guarantees that there are no holes in the ranges + * registered with add_active_range(), free_bootmem_active_regions() + * will call free_bootmem_node() for each registered physical page range. + * Similarly sparse_memory_present_with_active_regions() calls + * memory_present() for each range when SPARSEMEM is enabled. * * See mm/page_alloc.c for more information on each function exposed by - * CONFIG_HAVE_MEMBLOCK_NODE_MAP. + * CONFIG_ARCH_POPULATES_NODE_MAP */ extern void free_area_init_nodes(unsigned long *max_zone_pfn); +extern void add_active_range(unsigned int nid, unsigned long start_pfn, + unsigned long end_pfn); +extern void remove_active_range(unsigned int nid, unsigned long start_pfn, + unsigned long end_pfn); +extern void remove_all_active_ranges(void); +void sort_node_map(void); unsigned long node_map_pfn_alignment(void); unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, unsigned long end_pfn); @@ -1293,11 +1300,14 @@ extern void free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn); int add_from_early_node_map(struct range *range, int az, int nr_range, int nid); +u64 __init find_memory_core_early(int nid, u64 size, u64 align, + u64 goal, u64 limit); +typedef int (*work_fn_t)(unsigned long, unsigned long, void *); +extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data); extern void sparse_memory_present_with_active_regions(int nid); +#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ -#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ - -#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \ +#if !defined(CONFIG_ARCH_POPULATES_NODE_MAP) && \ !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) static inline int __early_pfn_to_nid(unsigned long pfn) { diff --git a/trunk/include/linux/mmzone.h b/trunk/include/linux/mmzone.h index 3ac040f19369..188cb2ffe8db 100644 --- a/trunk/include/linux/mmzone.h +++ b/trunk/include/linux/mmzone.h @@ -598,13 +598,13 @@ struct zonelist { #endif }; -#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP +#ifdef CONFIG_ARCH_POPULATES_NODE_MAP struct node_active_region { unsigned long start_pfn; unsigned long end_pfn; int nid; }; -#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ +#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ #ifndef CONFIG_DISCONTIGMEM /* The array of struct pages - for discontigmem use pgdat->lmem_map */ @@ -720,7 +720,7 @@ extern int movable_zone; static inline int zone_movable_is_highmem(void) { -#if defined(CONFIG_HIGHMEM) && defined(CONFIG_HAVE_MEMBLOCK_NODE) +#if defined(CONFIG_HIGHMEM) && defined(CONFIG_ARCH_POPULATES_NODE_MAP) return movable_zone == ZONE_HIGHMEM; #else return 0; @@ -938,7 +938,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, #endif #if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \ - !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) + !defined(CONFIG_ARCH_POPULATES_NODE_MAP) static inline unsigned long early_pfn_to_nid(unsigned long pfn) { return 0; diff --git a/trunk/include/linux/neighbour.h b/trunk/include/linux/neighbour.h index b188f68a08c9..a7003b7a695d 100644 --- a/trunk/include/linux/neighbour.h +++ b/trunk/include/linux/neighbour.h @@ -116,7 +116,6 @@ enum { NDTPA_PROXY_DELAY, /* u64, msecs */ NDTPA_PROXY_QLEN, /* u32 */ NDTPA_LOCKTIME, /* u64, msecs */ - NDTPA_QUEUE_LENBYTES, /* u32 */ __NDTPA_MAX }; #define NDTPA_MAX (__NDTPA_MAX - 1) diff --git a/trunk/include/linux/netdev_features.h b/trunk/include/linux/netdev_features.h deleted file mode 100644 index 77f5202977ce..000000000000 --- a/trunk/include/linux/netdev_features.h +++ /dev/null @@ -1,146 +0,0 @@ -/* - * Network device features. - * - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ -#ifndef _LINUX_NETDEV_FEATURES_H -#define _LINUX_NETDEV_FEATURES_H - -#include - -typedef u64 netdev_features_t; - -enum { - NETIF_F_SG_BIT, /* Scatter/gather IO. */ - NETIF_F_IP_CSUM_BIT, /* Can checksum TCP/UDP over IPv4. */ - __UNUSED_NETIF_F_1, - NETIF_F_HW_CSUM_BIT, /* Can checksum all the packets. */ - NETIF_F_IPV6_CSUM_BIT, /* Can checksum TCP/UDP over IPV6 */ - NETIF_F_HIGHDMA_BIT, /* Can DMA to high memory. */ - NETIF_F_FRAGLIST_BIT, /* Scatter/gather IO. */ - NETIF_F_HW_VLAN_TX_BIT, /* Transmit VLAN hw acceleration */ - NETIF_F_HW_VLAN_RX_BIT, /* Receive VLAN hw acceleration */ - NETIF_F_HW_VLAN_FILTER_BIT, /* Receive filtering on VLAN */ - NETIF_F_VLAN_CHALLENGED_BIT, /* Device cannot handle VLAN packets */ - NETIF_F_GSO_BIT, /* Enable software GSO. */ - NETIF_F_LLTX_BIT, /* LockLess TX - deprecated. Please */ - /* do not use LLTX in new drivers */ - NETIF_F_NETNS_LOCAL_BIT, /* Does not change network namespaces */ - NETIF_F_GRO_BIT, /* Generic receive offload */ - NETIF_F_LRO_BIT, /* large receive offload */ - - /**/NETIF_F_GSO_SHIFT, /* keep the order of SKB_GSO_* bits */ - NETIF_F_TSO_BIT /* ... TCPv4 segmentation */ - = NETIF_F_GSO_SHIFT, - NETIF_F_UFO_BIT, /* ... UDPv4 fragmentation */ - NETIF_F_GSO_ROBUST_BIT, /* ... ->SKB_GSO_DODGY */ - NETIF_F_TSO_ECN_BIT, /* ... TCP ECN support */ - NETIF_F_TSO6_BIT, /* ... TCPv6 segmentation */ - NETIF_F_FSO_BIT, /* ... FCoE segmentation */ - NETIF_F_GSO_RESERVED1, /* ... free (fill GSO_MASK to 8 bits) */ - /**/NETIF_F_GSO_LAST, /* [can't be last bit, see GSO_MASK] */ - NETIF_F_GSO_RESERVED2 /* ... free (fill GSO_MASK to 8 bits) */ - = NETIF_F_GSO_LAST, - - NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */ - NETIF_F_SCTP_CSUM_BIT, /* SCTP checksum offload */ - NETIF_F_FCOE_MTU_BIT, /* Supports max FCoE MTU, 2158 bytes*/ - NETIF_F_NTUPLE_BIT, /* N-tuple filters supported */ - NETIF_F_RXHASH_BIT, /* Receive hashing offload */ - NETIF_F_RXCSUM_BIT, /* Receive checksumming offload */ - NETIF_F_NOCACHE_COPY_BIT, /* Use no-cache copyfromuser */ - NETIF_F_LOOPBACK_BIT, /* Enable loopback */ - - /* - * Add your fresh new feature above and remember to update - * netdev_features_strings[] in net/core/ethtool.c and maybe - * some feature mask #defines below. Please also describe it - * in Documentation/networking/netdev-features.txt. - */ - - /**/NETDEV_FEATURE_COUNT -}; - -/* copy'n'paste compression ;) */ -#define __NETIF_F_BIT(bit) ((netdev_features_t)1 << (bit)) -#define __NETIF_F(name) __NETIF_F_BIT(NETIF_F_##name##_BIT) - -#define NETIF_F_FCOE_CRC __NETIF_F(FCOE_CRC) -#define NETIF_F_FCOE_MTU __NETIF_F(FCOE_MTU) -#define NETIF_F_FRAGLIST __NETIF_F(FRAGLIST) -#define NETIF_F_FSO __NETIF_F(FSO) -#define NETIF_F_GRO __NETIF_F(GRO) -#define NETIF_F_GSO __NETIF_F(GSO) -#define NETIF_F_GSO_ROBUST __NETIF_F(GSO_ROBUST) -#define NETIF_F_HIGHDMA __NETIF_F(HIGHDMA) -#define NETIF_F_HW_CSUM __NETIF_F(HW_CSUM) -#define NETIF_F_HW_VLAN_FILTER __NETIF_F(HW_VLAN_FILTER) -#define NETIF_F_HW_VLAN_RX __NETIF_F(HW_VLAN_RX) -#define NETIF_F_HW_VLAN_TX __NETIF_F(HW_VLAN_TX) -#define NETIF_F_IP_CSUM __NETIF_F(IP_CSUM) -#define NETIF_F_IPV6_CSUM __NETIF_F(IPV6_CSUM) -#define NETIF_F_LLTX __NETIF_F(LLTX) -#define NETIF_F_LOOPBACK __NETIF_F(LOOPBACK) -#define NETIF_F_LRO __NETIF_F(LRO) -#define NETIF_F_NETNS_LOCAL __NETIF_F(NETNS_LOCAL) -#define NETIF_F_NOCACHE_COPY __NETIF_F(NOCACHE_COPY) -#define NETIF_F_NTUPLE __NETIF_F(NTUPLE) -#define NETIF_F_RXCSUM __NETIF_F(RXCSUM) -#define NETIF_F_RXHASH __NETIF_F(RXHASH) -#define NETIF_F_SCTP_CSUM __NETIF_F(SCTP_CSUM) -#define NETIF_F_SG __NETIF_F(SG) -#define NETIF_F_TSO6 __NETIF_F(TSO6) -#define NETIF_F_TSO_ECN __NETIF_F(TSO_ECN) -#define NETIF_F_TSO __NETIF_F(TSO) -#define NETIF_F_UFO __NETIF_F(UFO) -#define NETIF_F_VLAN_CHALLENGED __NETIF_F(VLAN_CHALLENGED) - -/* Features valid for ethtool to change */ -/* = all defined minus driver/device-class-related */ -#define NETIF_F_NEVER_CHANGE (NETIF_F_VLAN_CHALLENGED | \ - NETIF_F_LLTX | NETIF_F_NETNS_LOCAL) - -/* remember that ((t)1 << t_BITS) is undefined in C99 */ -#define NETIF_F_ETHTOOL_BITS ((__NETIF_F_BIT(NETDEV_FEATURE_COUNT - 1) | \ - (__NETIF_F_BIT(NETDEV_FEATURE_COUNT - 1) - 1)) & \ - ~NETIF_F_NEVER_CHANGE) - -/* Segmentation offload feature mask */ -#define NETIF_F_GSO_MASK (__NETIF_F_BIT(NETIF_F_GSO_LAST + 1) - \ - __NETIF_F_BIT(NETIF_F_GSO_SHIFT)) - -/* List of features with software fallbacks. */ -#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \ - NETIF_F_TSO6 | NETIF_F_UFO) - -#define NETIF_F_GEN_CSUM NETIF_F_HW_CSUM -#define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM) -#define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM) -#define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM) - -#define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) - -#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \ - NETIF_F_FSO) - -/* - * If one device supports one of these features, then enable them - * for all in netdev_increment_features. - */ -#define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \ - NETIF_F_SG | NETIF_F_HIGHDMA | \ - NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED) -/* - * If one device doesn't support one of these features, then disable it - * for all in netdev_increment_features. - */ -#define NETIF_F_ALL_FOR_ALL (NETIF_F_NOCACHE_COPY | NETIF_F_FSO) - -/* changeable features with no special hardware requirements */ -#define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO) - -#endif /* _LINUX_NETDEV_FEATURES_H */ diff --git a/trunk/include/linux/netdevice.h b/trunk/include/linux/netdevice.h index a1d109590da4..a82ad4dd306a 100644 --- a/trunk/include/linux/netdevice.h +++ b/trunk/include/linux/netdevice.h @@ -43,7 +43,6 @@ #include #include #include -#include #include #include @@ -51,10 +50,8 @@ #ifdef CONFIG_DCB #include #endif -#include - -#include +struct vlan_group; struct netpoll_info; struct phy_device; /* 802.11 specific */ @@ -144,20 +141,22 @@ static inline bool dev_xmit_complete(int rc) * used. */ -#if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25) +#if defined(CONFIG_WLAN) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) # if defined(CONFIG_MAC80211_MESH) # define LL_MAX_HEADER 128 # else # define LL_MAX_HEADER 96 # endif -#elif IS_ENABLED(CONFIG_TR) +#elif defined(CONFIG_TR) || defined(CONFIG_TR_MODULE) # define LL_MAX_HEADER 48 #else # define LL_MAX_HEADER 32 #endif -#if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \ - !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL) +#if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \ + !defined(CONFIG_NET_IPGRE) && !defined(CONFIG_NET_IPGRE_MODULE) && \ + !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \ + !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE) #define MAX_HEADER LL_MAX_HEADER #else #define MAX_HEADER (LL_MAX_HEADER + 48) @@ -213,11 +212,6 @@ enum { #include #include -#ifdef CONFIG_RPS -#include -extern struct jump_label_key rps_needed; -#endif - struct neighbour; struct neigh_parms; struct sk_buff; @@ -278,11 +272,16 @@ struct hh_cache { * * We could use other alignment values, but we must maintain the * relationship HH alignment <= LL alignment. + * + * LL_ALLOCATED_SPACE also takes into account the tailroom the device + * may need. */ #define LL_RESERVED_SPACE(dev) \ ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) +#define LL_ALLOCATED_SPACE(dev) \ + ((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) struct header_ops { int (*create) (struct sk_buff *skb, struct net_device *dev, @@ -517,23 +516,11 @@ static inline void napi_synchronize(const struct napi_struct *n) #endif enum netdev_queue_state_t { - __QUEUE_STATE_DRV_XOFF, - __QUEUE_STATE_STACK_XOFF, + __QUEUE_STATE_XOFF, __QUEUE_STATE_FROZEN, -#define QUEUE_STATE_ANY_XOFF ((1 << __QUEUE_STATE_DRV_XOFF) | \ - (1 << __QUEUE_STATE_STACK_XOFF)) -#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \ - (1 << __QUEUE_STATE_FROZEN)) +#define QUEUE_STATE_XOFF_OR_FROZEN ((1 << __QUEUE_STATE_XOFF) | \ + (1 << __QUEUE_STATE_FROZEN)) }; -/* - * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The - * netif_tx_* functions below are used to manipulate this flag. The - * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit - * queue independently. The netif_xmit_*stopped functions below are called - * to check if the queue has been stopped by the driver or stack (either - * of the XOFF bits are set in the state). Drivers should not need to call - * netif_xmit*stopped functions, they should only be using netif_tx_*. - */ struct netdev_queue { /* @@ -541,8 +528,9 @@ struct netdev_queue { */ struct net_device *dev; struct Qdisc *qdisc; + unsigned long state; struct Qdisc *qdisc_sleeping; -#ifdef CONFIG_SYSFS +#if defined(CONFIG_RPS) || defined(CONFIG_XPS) struct kobject kobj; #endif #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) @@ -557,18 +545,6 @@ struct netdev_queue { * please use this field instead of dev->trans_start */ unsigned long trans_start; - - /* - * Number of TX timeouts for this queue - * (/sys/class/net/DEV/Q/trans_timeout) - */ - unsigned long trans_timeout; - - unsigned long state; - -#ifdef CONFIG_BQL - struct dql dql; -#endif } ____cacheline_aligned_in_smp; static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) @@ -597,7 +573,7 @@ struct rps_map { struct rcu_head rcu; u16 cpus[0]; }; -#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16))) +#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + (_num * sizeof(u16))) /* * The rps_dev_flow structure contains the mapping of a flow to a CPU, the @@ -621,7 +597,7 @@ struct rps_dev_flow_table { struct rps_dev_flow flows[0]; }; #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \ - ((_num) * sizeof(struct rps_dev_flow))) + (_num * sizeof(struct rps_dev_flow))) /* * The rps_sock_flow_table contains mappings of flows to the last CPU @@ -632,7 +608,7 @@ struct rps_sock_flow_table { u16 ents[0]; }; #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \ - ((_num) * sizeof(u16))) + (_num * sizeof(u16))) #define RPS_NO_CPU 0xffff @@ -684,7 +660,7 @@ struct xps_map { struct rcu_head rcu; u16 queues[0]; }; -#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16))) +#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + (_num * sizeof(u16))) #define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \ / sizeof(u16)) @@ -707,23 +683,6 @@ struct netdev_tc_txq { u16 offset; }; -#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) -/* - * This structure is to hold information about the device - * configured to run FCoE protocol stack. - */ -struct netdev_fcoe_hbainfo { - char manufacturer[64]; - char serial_number[64]; - char hardware_version[64]; - char driver_version[64]; - char optionrom_version[64]; - char firmware_version[64]; - char model[256]; - char model_description[256]; -}; -#endif - /* * This structure defines the management hooks for network devices. * The following hooks can be defined; unless noted otherwise, they are @@ -808,11 +767,11 @@ struct netdev_fcoe_hbainfo { * 3. Update dev->stats asynchronously and atomically, and define * neither operation. * - * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid); + * void (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid); * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER) * this function is called when a VLAN id is registered. * - * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid); + * void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid); * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER) * this function is called when a VLAN id is unregistered. * @@ -864,13 +823,6 @@ struct netdev_fcoe_hbainfo { * perform necessary setup and returns 1 to indicate the device is set up * successfully to perform DDP on this I/O, otherwise this returns 0. * - * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, - * struct netdev_fcoe_hbainfo *hbainfo); - * Called when the FCoE Protocol stack wants information on the underlying - * device. This information is utilized by the FCoE protocol stack to - * register attributes with Fiber Channel management service as per the - * FC-GS Fabric Device Management Information(FDMI) specification. - * * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type); * Called when the underlying device wants to override default World Wide * Name (WWN) generation mechanism in FCoE protocol stack to pass its own @@ -893,13 +845,12 @@ struct netdev_fcoe_hbainfo { * Called to release previously enslaved netdev. * * Feature/offload setting functions. - * netdev_features_t (*ndo_fix_features)(struct net_device *dev, - * netdev_features_t features); + * u32 (*ndo_fix_features)(struct net_device *dev, u32 features); * Adjusts the requested feature flags according to device-specific * constraints, and returns the resulting flags. Must not modify * the device state. * - * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); + * int (*ndo_set_features)(struct net_device *dev, u32 features); * Called to update device configuration to new features. Passed * feature set might be less than what was returned by ndo_fix_features()). * Must return >0 or -errno if it changed dev->features itself. @@ -934,9 +885,9 @@ struct net_device_ops { struct rtnl_link_stats64 *storage); struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); - int (*ndo_vlan_rx_add_vid)(struct net_device *dev, + void (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid); - int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, + void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid); #ifdef CONFIG_NET_POLL_CONTROLLER void (*ndo_poll_controller)(struct net_device *dev); @@ -961,7 +912,7 @@ struct net_device_ops { int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); int (*ndo_setup_tc)(struct net_device *dev, u8 tc); -#if IS_ENABLED(CONFIG_FCOE) +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) int (*ndo_fcoe_enable)(struct net_device *dev); int (*ndo_fcoe_disable)(struct net_device *dev); int (*ndo_fcoe_ddp_setup)(struct net_device *dev, @@ -974,11 +925,9 @@ struct net_device_ops { u16 xid, struct scatterlist *sgl, unsigned int sgc); - int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, - struct netdev_fcoe_hbainfo *hbainfo); #endif -#if IS_ENABLED(CONFIG_LIBFCOE) +#if defined(CONFIG_LIBFCOE) || defined(CONFIG_LIBFCOE_MODULE) #define NETDEV_FCOE_WWNN 0 #define NETDEV_FCOE_WWPN 1 int (*ndo_fcoe_get_wwn)(struct net_device *dev, @@ -995,12 +944,10 @@ struct net_device_ops { struct net_device *slave_dev); int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev); - netdev_features_t (*ndo_fix_features)(struct net_device *dev, - netdev_features_t features); + u32 (*ndo_fix_features)(struct net_device *dev, + u32 features); int (*ndo_set_features)(struct net_device *dev, - netdev_features_t features); - int (*ndo_neigh_construct)(struct neighbour *n); - void (*ndo_neigh_destroy)(struct neighbour *n); + u32 features); }; /* @@ -1050,13 +997,91 @@ struct net_device { struct list_head unreg_list; /* currently active device features */ - netdev_features_t features; + u32 features; /* user-changeable features */ - netdev_features_t hw_features; + u32 hw_features; /* user-requested features */ - netdev_features_t wanted_features; + u32 wanted_features; /* mask of features inheritable by VLAN devices */ - netdev_features_t vlan_features; + u32 vlan_features; + + /* Net device feature bits; if you change something, + * also update netdev_features_strings[] in ethtool.c */ + +#define NETIF_F_SG 1 /* Scatter/gather IO. */ +#define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */ +#define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */ +#define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */ +#define NETIF_F_IPV6_CSUM 16 /* Can checksum TCP/UDP over IPV6 */ +#define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */ +#define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */ +#define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */ +#define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */ +#define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */ +#define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */ +#define NETIF_F_GSO 2048 /* Enable software GSO. */ +#define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */ + /* do not use LLTX in new drivers */ +#define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */ +#define NETIF_F_GRO 16384 /* Generic receive offload */ +#define NETIF_F_LRO 32768 /* large receive offload */ + +/* the GSO_MASK reserves bits 16 through 23 */ +#define NETIF_F_FCOE_CRC (1 << 24) /* FCoE CRC32 */ +#define NETIF_F_SCTP_CSUM (1 << 25) /* SCTP checksum offload */ +#define NETIF_F_FCOE_MTU (1 << 26) /* Supports max FCoE MTU, 2158 bytes*/ +#define NETIF_F_NTUPLE (1 << 27) /* N-tuple filters supported */ +#define NETIF_F_RXHASH (1 << 28) /* Receive hashing offload */ +#define NETIF_F_RXCSUM (1 << 29) /* Receive checksumming offload */ +#define NETIF_F_NOCACHE_COPY (1 << 30) /* Use no-cache copyfromuser */ +#define NETIF_F_LOOPBACK (1 << 31) /* Enable loopback */ + + /* Segmentation offload features */ +#define NETIF_F_GSO_SHIFT 16 +#define NETIF_F_GSO_MASK 0x00ff0000 +#define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT) +#define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT) +#define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT) +#define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT) +#define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT) +#define NETIF_F_FSO (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT) + + /* Features valid for ethtool to change */ + /* = all defined minus driver/device-class-related */ +#define NETIF_F_NEVER_CHANGE (NETIF_F_VLAN_CHALLENGED | \ + NETIF_F_LLTX | NETIF_F_NETNS_LOCAL) +#define NETIF_F_ETHTOOL_BITS (0xff3fffff & ~NETIF_F_NEVER_CHANGE) + + /* List of features with software fallbacks. */ +#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \ + NETIF_F_TSO6 | NETIF_F_UFO) + + +#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) +#define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM) +#define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM) +#define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM) + +#define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) + +#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \ + NETIF_F_FSO) + + /* + * If one device supports one of these features, then enable them + * for all in netdev_increment_features. + */ +#define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \ + NETIF_F_SG | NETIF_F_HIGHDMA | \ + NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED) + /* + * If one device doesn't support one of these features, then disable it + * for all in netdev_increment_features. + */ +#define NETIF_F_ALL_FOR_ALL (NETIF_F_NOCACHE_COPY | NETIF_F_FSO) + + /* changeable features with no special hardware requirements */ +#define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO) /* Interface index. Unique device identifier */ int ifindex; @@ -1107,7 +1132,6 @@ struct net_device { unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */ unsigned char addr_assign_type; /* hw address assignment type */ unsigned char addr_len; /* hardware address length */ - unsigned char neigh_priv_len; unsigned short dev_id; /* for shared network cards */ spinlock_t addr_list_lock; @@ -1120,11 +1144,11 @@ struct net_device { /* Protocol specific pointers */ -#if IS_ENABLED(CONFIG_VLAN_8021Q) - struct vlan_info __rcu *vlan_info; /* VLAN info */ +#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) + struct vlan_group __rcu *vlgrp; /* VLAN group */ #endif -#if IS_ENABLED(CONFIG_NET_DSA) - struct dsa_switch_tree *dsa_ptr; /* dsa specific data */ +#ifdef CONFIG_NET_DSA + void *dsa_ptr; /* dsa specific data */ #endif void *atalk_ptr; /* AppleTalk link */ struct in_device __rcu *ip_ptr; /* IPv4 specific data */ @@ -1160,11 +1184,9 @@ struct net_device { unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ -#ifdef CONFIG_SYSFS +#if defined(CONFIG_RPS) || defined(CONFIG_XPS) struct kset *queues_kset; -#endif -#ifdef CONFIG_RPS struct netdev_rx_queue *_rx; /* Number of RX queues allocated at register_netdev() time */ @@ -1286,12 +1308,9 @@ struct net_device { struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; u8 prio_tc_map[TC_BITMASK + 1]; -#if IS_ENABLED(CONFIG_FCOE) +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) /* max exchange id for FCoE LRO by ddp */ unsigned int fcoe_ddp_xid; -#endif -#if IS_ENABLED(CONFIG_NETPRIO_CGROUP) - struct netprio_map __rcu *priomap; #endif /* phy device may attach itself for hardware timestamping */ struct phy_device *phydev; @@ -1496,7 +1515,7 @@ struct packet_type { struct packet_type *, struct net_device *); struct sk_buff *(*gso_segment)(struct sk_buff *skb, - netdev_features_t features); + u32 features); int (*gso_send_check)(struct sk_buff *skb); struct sk_buff **(*gro_receive)(struct sk_buff **head, struct sk_buff *skb); @@ -1764,7 +1783,7 @@ extern void __netif_schedule(struct Qdisc *q); static inline void netif_schedule_queue(struct netdev_queue *txq) { - if (!(txq->state & QUEUE_STATE_ANY_XOFF)) + if (!test_bit(__QUEUE_STATE_XOFF, &txq->state)) __netif_schedule(txq->qdisc); } @@ -1778,7 +1797,7 @@ static inline void netif_tx_schedule_all(struct net_device *dev) static inline void netif_tx_start_queue(struct netdev_queue *dev_queue) { - clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); + clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state); } /** @@ -1810,7 +1829,7 @@ static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue) return; } #endif - if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) + if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state)) __netif_schedule(dev_queue->qdisc); } @@ -1842,7 +1861,7 @@ static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) pr_info("netif_stop_queue() cannot be called before register_netdev()\n"); return; } - set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); + set_bit(__QUEUE_STATE_XOFF, &dev_queue->state); } /** @@ -1869,7 +1888,7 @@ static inline void netif_tx_stop_all_queues(struct net_device *dev) static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue) { - return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); + return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state); } /** @@ -1883,68 +1902,9 @@ static inline int netif_queue_stopped(const struct net_device *dev) return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); } -static inline int netif_xmit_stopped(const struct netdev_queue *dev_queue) -{ - return dev_queue->state & QUEUE_STATE_ANY_XOFF; -} - -static inline int netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue) -{ - return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN; -} - -static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, - unsigned int bytes) -{ -#ifdef CONFIG_BQL - dql_queued(&dev_queue->dql, bytes); - if (unlikely(dql_avail(&dev_queue->dql) < 0)) { - set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); - if (unlikely(dql_avail(&dev_queue->dql) >= 0)) - clear_bit(__QUEUE_STATE_STACK_XOFF, - &dev_queue->state); - } -#endif -} - -static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes) +static inline int netif_tx_queue_frozen_or_stopped(const struct netdev_queue *dev_queue) { - netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes); -} - -static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, - unsigned pkts, unsigned bytes) -{ -#ifdef CONFIG_BQL - if (likely(bytes)) { - dql_completed(&dev_queue->dql, bytes); - if (unlikely(test_bit(__QUEUE_STATE_STACK_XOFF, - &dev_queue->state) && - dql_avail(&dev_queue->dql) >= 0)) { - if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, - &dev_queue->state)) - netif_schedule_queue(dev_queue); - } - } -#endif -} - -static inline void netdev_completed_queue(struct net_device *dev, - unsigned pkts, unsigned bytes) -{ - netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes); -} - -static inline void netdev_tx_reset_queue(struct netdev_queue *q) -{ -#ifdef CONFIG_BQL - dql_reset(&q->dql); -#endif -} - -static inline void netdev_reset_queue(struct net_device *dev_queue) -{ - netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0)); + return dev_queue->state & QUEUE_STATE_XOFF_OR_FROZEN; } /** @@ -2031,7 +1991,7 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) if (netpoll_trap()) return; #endif - if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) + if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state)) __netif_schedule(txq->qdisc); } @@ -2560,8 +2520,7 @@ extern int netdev_set_master(struct net_device *dev, struct net_device *master) extern int netdev_set_bond_master(struct net_device *dev, struct net_device *master); extern int skb_checksum_help(struct sk_buff *skb); -extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, - netdev_features_t features); +extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features); #ifdef CONFIG_BUG extern void netdev_rx_csum_fault(struct net_device *dev); #else @@ -2590,13 +2549,11 @@ extern const char *netdev_drivername(const struct net_device *dev); extern void linkwatch_run_queue(void); -static inline netdev_features_t netdev_get_wanted_features( - struct net_device *dev) +static inline u32 netdev_get_wanted_features(struct net_device *dev) { return (dev->features & ~dev->hw_features) | dev->wanted_features; } -netdev_features_t netdev_increment_features(netdev_features_t all, - netdev_features_t one, netdev_features_t mask); +u32 netdev_increment_features(u32 all, u32 one, u32 mask); int __netdev_update_features(struct net_device *dev); void netdev_update_features(struct net_device *dev); void netdev_change_features(struct net_device *dev); @@ -2604,31 +2561,21 @@ void netdev_change_features(struct net_device *dev); void netif_stacked_transfer_operstate(const struct net_device *rootdev, struct net_device *dev); -netdev_features_t netif_skb_features(struct sk_buff *skb); +u32 netif_skb_features(struct sk_buff *skb); -static inline int net_gso_ok(netdev_features_t features, int gso_type) +static inline int net_gso_ok(u32 features, int gso_type) { - netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT; - - /* check flags correspondence */ - BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT)); - BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT)); - BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT)); - BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT)); - BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT)); - BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT)); - + int feature = gso_type << NETIF_F_GSO_SHIFT; return (features & feature) == feature; } -static inline int skb_gso_ok(struct sk_buff *skb, netdev_features_t features) +static inline int skb_gso_ok(struct sk_buff *skb, u32 features) { return net_gso_ok(features, skb_shinfo(skb)->gso_type) && (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); } -static inline int netif_needs_gso(struct sk_buff *skb, - netdev_features_t features) +static inline int netif_needs_gso(struct sk_buff *skb, int features) { return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); @@ -2647,6 +2594,22 @@ static inline int netif_is_bond_slave(struct net_device *dev) extern struct pernet_operations __net_initdata loopback_net_ops; +static inline u32 dev_ethtool_get_rx_csum(struct net_device *dev) +{ + if (dev->features & NETIF_F_RXCSUM) + return 1; + if (!dev->ethtool_ops || !dev->ethtool_ops->get_rx_csum) + return 0; + return dev->ethtool_ops->get_rx_csum(dev); +} + +static inline u32 dev_ethtool_get_flags(struct net_device *dev) +{ + if (!dev->ethtool_ops || !dev->ethtool_ops->get_flags) + return 0; + return dev->ethtool_ops->get_flags(dev); +} + /* Logging, debugging and troubleshooting/diagnostic helpers. */ /* netdev_printk helpers, similar to dev_printk */ diff --git a/trunk/include/linux/netfilter.h b/trunk/include/linux/netfilter.h index b809265607d0..857f5026ced6 100644 --- a/trunk/include/linux/netfilter.h +++ b/trunk/include/linux/netfilter.h @@ -162,24 +162,6 @@ extern struct ctl_path nf_net_ipv4_netfilter_sysctl_path[]; extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; -#if defined(CONFIG_JUMP_LABEL) -#include -extern struct jump_label_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; -static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook) -{ - if (__builtin_constant_p(pf) && - __builtin_constant_p(hook)) - return static_branch(&nf_hooks_needed[pf][hook]); - - return !list_empty(&nf_hooks[pf][hook]); -} -#else -static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook) -{ - return !list_empty(&nf_hooks[pf][hook]); -} -#endif - int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb, struct net_device *indev, struct net_device *outdev, int (*okfn)(struct sk_buff *), int thresh); @@ -197,9 +179,11 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook, struct net_device *outdev, int (*okfn)(struct sk_buff *), int thresh) { - if (nf_hooks_active(pf, hook)) - return nf_hook_slow(pf, hook, skb, indev, outdev, okfn, thresh); - return 1; +#ifndef CONFIG_NETFILTER_DEBUG + if (list_empty(&nf_hooks[pf][hook])) + return 1; +#endif + return nf_hook_slow(pf, hook, skb, indev, outdev, okfn, thresh); } static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb, diff --git a/trunk/include/linux/netfilter/Kbuild b/trunk/include/linux/netfilter/Kbuild index e144f54185c0..a1b410c76fc3 100644 --- a/trunk/include/linux/netfilter/Kbuild +++ b/trunk/include/linux/netfilter/Kbuild @@ -5,9 +5,7 @@ header-y += nf_conntrack_ftp.h header-y += nf_conntrack_sctp.h header-y += nf_conntrack_tcp.h header-y += nf_conntrack_tuple_common.h -header-y += nf_nat.h header-y += nfnetlink.h -header-y += nfnetlink_acct.h header-y += nfnetlink_compat.h header-y += nfnetlink_conntrack.h header-y += nfnetlink_log.h @@ -23,7 +21,6 @@ header-y += xt_DSCP.h header-y += xt_IDLETIMER.h header-y += xt_LED.h header-y += xt_MARK.h -header-y += xt_nfacct.h header-y += xt_NFLOG.h header-y += xt_NFQUEUE.h header-y += xt_RATEEST.h @@ -43,7 +40,6 @@ header-y += xt_cpu.h header-y += xt_dccp.h header-y += xt_devgroup.h header-y += xt_dscp.h -header-y += xt_ecn.h header-y += xt_esp.h header-y += xt_hashlimit.h header-y += xt_helper.h diff --git a/trunk/include/linux/netfilter/nf_conntrack_common.h b/trunk/include/linux/netfilter/nf_conntrack_common.h index 9e3a2838291b..0d3dd66322ec 100644 --- a/trunk/include/linux/netfilter/nf_conntrack_common.h +++ b/trunk/include/linux/netfilter/nf_conntrack_common.h @@ -83,10 +83,6 @@ enum ip_conntrack_status { /* Conntrack is a fake untracked entry */ IPS_UNTRACKED_BIT = 12, IPS_UNTRACKED = (1 << IPS_UNTRACKED_BIT), - - /* Conntrack has a userspace helper. */ - IPS_USERSPACE_HELPER_BIT = 13, - IPS_USERSPACE_HELPER = (1 << IPS_USERSPACE_HELPER_BIT), }; /* Connection tracking event types */ diff --git a/trunk/include/linux/netfilter/nf_conntrack_tuple_common.h b/trunk/include/linux/netfilter/nf_conntrack_tuple_common.h index 2f6bbc5b8125..2ea22b018a87 100644 --- a/trunk/include/linux/netfilter/nf_conntrack_tuple_common.h +++ b/trunk/include/linux/netfilter/nf_conntrack_tuple_common.h @@ -7,33 +7,6 @@ enum ip_conntrack_dir { IP_CT_DIR_MAX }; -/* The protocol-specific manipulable parts of the tuple: always in - * network order - */ -union nf_conntrack_man_proto { - /* Add other protocols here. */ - __be16 all; - - struct { - __be16 port; - } tcp; - struct { - __be16 port; - } udp; - struct { - __be16 id; - } icmp; - struct { - __be16 port; - } dccp; - struct { - __be16 port; - } sctp; - struct { - __be16 key; /* GRE key is 32bit, PPtP only uses 16bit */ - } gre; -}; - #define CTINFO2DIR(ctinfo) ((ctinfo) >= IP_CT_IS_REPLY ? IP_CT_DIR_REPLY : IP_CT_DIR_ORIGINAL) #endif /* _NF_CONNTRACK_TUPLE_COMMON_H */ diff --git a/trunk/include/linux/netfilter/nf_nat.h b/trunk/include/linux/netfilter/nf_nat.h deleted file mode 100644 index 8df2d13730b2..000000000000 --- a/trunk/include/linux/netfilter/nf_nat.h +++ /dev/null @@ -1,25 +0,0 @@ -#ifndef _NETFILTER_NF_NAT_H -#define _NETFILTER_NF_NAT_H - -#include -#include - -#define NF_NAT_RANGE_MAP_IPS 1 -#define NF_NAT_RANGE_PROTO_SPECIFIED 2 -#define NF_NAT_RANGE_PROTO_RANDOM 4 -#define NF_NAT_RANGE_PERSISTENT 8 - -struct nf_nat_ipv4_range { - unsigned int flags; - __be32 min_ip; - __be32 max_ip; - union nf_conntrack_man_proto min; - union nf_conntrack_man_proto max; -}; - -struct nf_nat_ipv4_multi_range_compat { - unsigned int rangesize; - struct nf_nat_ipv4_range range[1]; -}; - -#endif /* _NETFILTER_NF_NAT_H */ diff --git a/trunk/include/linux/netfilter/nfnetlink.h b/trunk/include/linux/netfilter/nfnetlink.h index b64454c2f79f..74d33861473c 100644 --- a/trunk/include/linux/netfilter/nfnetlink.h +++ b/trunk/include/linux/netfilter/nfnetlink.h @@ -48,8 +48,7 @@ struct nfgenmsg { #define NFNL_SUBSYS_ULOG 4 #define NFNL_SUBSYS_OSF 5 #define NFNL_SUBSYS_IPSET 6 -#define NFNL_SUBSYS_ACCT 7 -#define NFNL_SUBSYS_COUNT 8 +#define NFNL_SUBSYS_COUNT 7 #ifdef __KERNEL__ diff --git a/trunk/include/linux/netfilter/nfnetlink_acct.h b/trunk/include/linux/netfilter/nfnetlink_acct.h deleted file mode 100644 index 7c4279b4ae7a..000000000000 --- a/trunk/include/linux/netfilter/nfnetlink_acct.h +++ /dev/null @@ -1,36 +0,0 @@ -#ifndef _NFNL_ACCT_H_ -#define _NFNL_ACCT_H_ - -#ifndef NFACCT_NAME_MAX -#define NFACCT_NAME_MAX 32 -#endif - -enum nfnl_acct_msg_types { - NFNL_MSG_ACCT_NEW, - NFNL_MSG_ACCT_GET, - NFNL_MSG_ACCT_GET_CTRZERO, - NFNL_MSG_ACCT_DEL, - NFNL_MSG_ACCT_MAX -}; - -enum nfnl_acct_type { - NFACCT_UNSPEC, - NFACCT_NAME, - NFACCT_PKTS, - NFACCT_BYTES, - NFACCT_USE, - __NFACCT_MAX -}; -#define NFACCT_MAX (__NFACCT_MAX - 1) - -#ifdef __KERNEL__ - -struct nf_acct; - -extern struct nf_acct *nfnl_acct_find_get(const char *filter_name); -extern void nfnl_acct_put(struct nf_acct *acct); -extern void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct); - -#endif /* __KERNEL__ */ - -#endif /* _NFNL_ACCT_H */ diff --git a/trunk/include/linux/netfilter/xt_CT.h b/trunk/include/linux/netfilter/xt_CT.h index 6390f0992f36..b56e76811c04 100644 --- a/trunk/include/linux/netfilter/xt_CT.h +++ b/trunk/include/linux/netfilter/xt_CT.h @@ -3,8 +3,7 @@ #include -#define XT_CT_NOTRACK 0x1 -#define XT_CT_USERSPACE_HELPER 0x2 +#define XT_CT_NOTRACK 0x1 struct xt_ct_target_info { __u16 flags; diff --git a/trunk/include/linux/netfilter/xt_ecn.h b/trunk/include/linux/netfilter/xt_ecn.h deleted file mode 100644 index 7158fca364f2..000000000000 --- a/trunk/include/linux/netfilter/xt_ecn.h +++ /dev/null @@ -1,35 +0,0 @@ -/* iptables module for matching the ECN header in IPv4 and TCP header - * - * (C) 2002 Harald Welte - * - * This software is distributed under GNU GPL v2, 1991 - * - * ipt_ecn.h,v 1.4 2002/08/05 19:39:00 laforge Exp -*/ -#ifndef _XT_ECN_H -#define _XT_ECN_H - -#include -#include - -#define XT_ECN_IP_MASK (~XT_DSCP_MASK) - -#define XT_ECN_OP_MATCH_IP 0x01 -#define XT_ECN_OP_MATCH_ECE 0x10 -#define XT_ECN_OP_MATCH_CWR 0x20 - -#define XT_ECN_OP_MATCH_MASK 0xce - -/* match info */ -struct xt_ecn_info { - __u8 operation; - __u8 invert; - __u8 ip_ect; - union { - struct { - __u8 ect; - } tcp; - } proto; -}; - -#endif /* _XT_ECN_H */ diff --git a/trunk/include/linux/netfilter/xt_nfacct.h b/trunk/include/linux/netfilter/xt_nfacct.h deleted file mode 100644 index 3e19c8a86576..000000000000 --- a/trunk/include/linux/netfilter/xt_nfacct.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef _XT_NFACCT_MATCH_H -#define _XT_NFACCT_MATCH_H - -#include - -struct nf_acct; - -struct xt_nfacct_match_info { - char name[NFACCT_NAME_MAX]; - struct nf_acct *nfacct; -}; - -#endif /* _XT_NFACCT_MATCH_H */ diff --git a/trunk/include/linux/netfilter/xt_rpfilter.h b/trunk/include/linux/netfilter/xt_rpfilter.h deleted file mode 100644 index 8358d4f71952..000000000000 --- a/trunk/include/linux/netfilter/xt_rpfilter.h +++ /dev/null @@ -1,23 +0,0 @@ -#ifndef _XT_RPATH_H -#define _XT_RPATH_H - -#include - -enum { - XT_RPFILTER_LOOSE = 1 << 0, - XT_RPFILTER_VALID_MARK = 1 << 1, - XT_RPFILTER_ACCEPT_LOCAL = 1 << 2, - XT_RPFILTER_INVERT = 1 << 3, -#ifdef __KERNEL__ - XT_RPFILTER_OPTION_MASK = XT_RPFILTER_LOOSE | - XT_RPFILTER_VALID_MARK | - XT_RPFILTER_ACCEPT_LOCAL | - XT_RPFILTER_INVERT, -#endif -}; - -struct xt_rpfilter_info { - __u8 flags; -}; - -#endif diff --git a/trunk/include/linux/netfilter_ipv4/Kbuild b/trunk/include/linux/netfilter_ipv4/Kbuild index f9930c87fff3..c3b45480ecf7 100644 --- a/trunk/include/linux/netfilter_ipv4/Kbuild +++ b/trunk/include/linux/netfilter_ipv4/Kbuild @@ -12,3 +12,4 @@ header-y += ipt_ah.h header-y += ipt_ecn.h header-y += ipt_realm.h header-y += ipt_ttl.h +header-y += nf_nat.h diff --git a/trunk/include/linux/netfilter_ipv4/ipt_ecn.h b/trunk/include/linux/netfilter_ipv4/ipt_ecn.h index 0e0c063dbf60..eabf95fb7d3e 100644 --- a/trunk/include/linux/netfilter_ipv4/ipt_ecn.h +++ b/trunk/include/linux/netfilter_ipv4/ipt_ecn.h @@ -1,15 +1,35 @@ +/* iptables module for matching the ECN header in IPv4 and TCP header + * + * (C) 2002 Harald Welte + * + * This software is distributed under GNU GPL v2, 1991 + * + * ipt_ecn.h,v 1.4 2002/08/05 19:39:00 laforge Exp +*/ #ifndef _IPT_ECN_H #define _IPT_ECN_H -#include -#define ipt_ecn_info xt_ecn_info +#include +#include -enum { - IPT_ECN_IP_MASK = XT_ECN_IP_MASK, - IPT_ECN_OP_MATCH_IP = XT_ECN_OP_MATCH_IP, - IPT_ECN_OP_MATCH_ECE = XT_ECN_OP_MATCH_ECE, - IPT_ECN_OP_MATCH_CWR = XT_ECN_OP_MATCH_CWR, - IPT_ECN_OP_MATCH_MASK = XT_ECN_OP_MATCH_MASK, +#define IPT_ECN_IP_MASK (~XT_DSCP_MASK) + +#define IPT_ECN_OP_MATCH_IP 0x01 +#define IPT_ECN_OP_MATCH_ECE 0x10 +#define IPT_ECN_OP_MATCH_CWR 0x20 + +#define IPT_ECN_OP_MATCH_MASK 0xce + +/* match info */ +struct ipt_ecn_info { + __u8 operation; + __u8 invert; + __u8 ip_ect; + union { + struct { + __u8 ect; + } tcp; + } proto; }; -#endif /* IPT_ECN_H */ +#endif /* _IPT_ECN_H */ diff --git a/trunk/include/linux/netfilter_ipv4/nf_nat.h b/trunk/include/linux/netfilter_ipv4/nf_nat.h new file mode 100644 index 000000000000..7a861d09fc86 --- /dev/null +++ b/trunk/include/linux/netfilter_ipv4/nf_nat.h @@ -0,0 +1,58 @@ +#ifndef _LINUX_NF_NAT_H +#define _LINUX_NF_NAT_H + +#include + +#define IP_NAT_RANGE_MAP_IPS 1 +#define IP_NAT_RANGE_PROTO_SPECIFIED 2 +#define IP_NAT_RANGE_PROTO_RANDOM 4 +#define IP_NAT_RANGE_PERSISTENT 8 + +/* The protocol-specific manipulable parts of the tuple. */ +union nf_conntrack_man_proto { + /* Add other protocols here. */ + __be16 all; + + struct { + __be16 port; + } tcp; + struct { + __be16 port; + } udp; + struct { + __be16 id; + } icmp; + struct { + __be16 port; + } dccp; + struct { + __be16 port; + } sctp; + struct { + __be16 key; /* GRE key is 32bit, PPtP only uses 16bit */ + } gre; +}; + +/* Single range specification. */ +struct nf_nat_range { + /* Set to OR of flags above. */ + unsigned int flags; + + /* Inclusive: network order. */ + __be32 min_ip, max_ip; + + /* Inclusive: network order */ + union nf_conntrack_man_proto min, max; +}; + +/* For backwards compat: don't use in modern code. */ +struct nf_nat_multi_range_compat { + unsigned int rangesize; /* Must be 1. */ + + /* hangs off end. */ + struct nf_nat_range range[1]; +}; + +#define nf_nat_multi_range nf_nat_multi_range_compat + +#endif diff --git a/trunk/include/linux/netlink.h b/trunk/include/linux/netlink.h index 52e48959cfa1..8374d2967362 100644 --- a/trunk/include/linux/netlink.h +++ b/trunk/include/linux/netlink.h @@ -8,7 +8,7 @@ #define NETLINK_UNUSED 1 /* Unused number */ #define NETLINK_USERSOCK 2 /* Reserved for user mode socket protocols */ #define NETLINK_FIREWALL 3 /* Firewalling hook */ -#define NETLINK_SOCK_DIAG 4 /* socket monitoring */ +#define NETLINK_INET_DIAG 4 /* INET socket monitoring */ #define NETLINK_NFLOG 5 /* netfilter/iptables ULOG */ #define NETLINK_XFRM 6 /* ipsec */ #define NETLINK_SELINUX 7 /* SELinux event notifications */ @@ -27,8 +27,6 @@ #define NETLINK_RDMA 20 #define NETLINK_CRYPTO 21 /* Crypto layer */ -#define NETLINK_INET_DIAG NETLINK_SOCK_DIAG - #define MAX_LINKS 32 struct sockaddr_nl { diff --git a/trunk/include/linux/nfc.h b/trunk/include/linux/nfc.h index 01d4e5d60325..36cb955b05cc 100644 --- a/trunk/include/linux/nfc.h +++ b/trunk/include/linux/nfc.h @@ -62,8 +62,6 @@ enum nfc_commands { NFC_CMD_GET_DEVICE, NFC_CMD_DEV_UP, NFC_CMD_DEV_DOWN, - NFC_CMD_DEP_LINK_UP, - NFC_CMD_DEP_LINK_DOWN, NFC_CMD_START_POLL, NFC_CMD_STOP_POLL, NFC_CMD_GET_TARGET, @@ -88,9 +86,6 @@ enum nfc_commands { * @NFC_ATTR_TARGET_SENS_RES: NFC-A targets extra information such as NFCID * @NFC_ATTR_TARGET_SEL_RES: NFC-A targets extra information (useful if the * target is not NFC-Forum compliant) - * @NFC_ATTR_TARGET_NFCID1: NFC-A targets identifier, max 10 bytes - * @NFC_ATTR_COMM_MODE: Passive or active mode - * @NFC_ATTR_RF_MODE: Initiator or target */ enum nfc_attrs { NFC_ATTR_UNSPEC, @@ -100,9 +95,6 @@ enum nfc_attrs { NFC_ATTR_TARGET_INDEX, NFC_ATTR_TARGET_SENS_RES, NFC_ATTR_TARGET_SEL_RES, - NFC_ATTR_TARGET_NFCID1, - NFC_ATTR_COMM_MODE, - NFC_ATTR_RF_MODE, /* private: internal use only */ __NFC_ATTR_AFTER_LAST }; @@ -119,14 +111,6 @@ enum nfc_attrs { #define NFC_PROTO_MAX 6 -/* NFC communication modes */ -#define NFC_COMM_ACTIVE 0 -#define NFC_COMM_PASSIVE 1 - -/* NFC RF modes */ -#define NFC_RF_INITIATOR 0 -#define NFC_RF_TARGET 1 - /* NFC protocols masks used in bitsets */ #define NFC_PROTO_JEWEL_MASK (1 << NFC_PROTO_JEWEL) #define NFC_PROTO_MIFARE_MASK (1 << NFC_PROTO_MIFARE) @@ -141,22 +125,9 @@ struct sockaddr_nfc { __u32 nfc_protocol; }; -#define NFC_LLCP_MAX_SERVICE_NAME 63 -struct sockaddr_nfc_llcp { - sa_family_t sa_family; - __u32 dev_idx; - __u32 target_idx; - __u32 nfc_protocol; - __u8 dsap; /* Destination SAP, if known */ - __u8 ssap; /* Source SAP to be bound to */ - char service_name[NFC_LLCP_MAX_SERVICE_NAME]; /* Service name URI */; - size_t service_name_len; -}; - /* NFC socket protocols */ #define NFC_SOCKPROTO_RAW 0 -#define NFC_SOCKPROTO_LLCP 1 -#define NFC_SOCKPROTO_MAX 2 +#define NFC_SOCKPROTO_MAX 1 #define NFC_HEADER_SIZE 1 diff --git a/trunk/include/linux/nl80211.h b/trunk/include/linux/nl80211.h index 0f5ff3739820..8049bf77d799 100644 --- a/trunk/include/linux/nl80211.h +++ b/trunk/include/linux/nl80211.h @@ -509,38 +509,6 @@ * @NL80211_CMD_TDLS_OPER: Perform a high-level TDLS command (e.g. link setup). * @NL80211_CMD_TDLS_MGMT: Send a TDLS management frame. * - * @NL80211_CMD_UNEXPECTED_FRAME: Used by an application controlling an AP - * (or GO) interface (i.e. hostapd) to ask for unexpected frames to - * implement sending deauth to stations that send unexpected class 3 - * frames. Also used as the event sent by the kernel when such a frame - * is received. - * For the event, the %NL80211_ATTR_MAC attribute carries the TA and - * other attributes like the interface index are present. - * If used as the command it must have an interface index and you can - * only unsubscribe from the event by closing the socket. Subscription - * is also for %NL80211_CMD_UNEXPECTED_4ADDR_FRAME events. - * - * @NL80211_CMD_UNEXPECTED_4ADDR_FRAME: Sent as an event indicating that the - * associated station identified by %NL80211_ATTR_MAC sent a 4addr frame - * and wasn't already in a 4-addr VLAN. The event will be sent similarly - * to the %NL80211_CMD_UNEXPECTED_FRAME event, to the same listener. - * - * @NL80211_CMD_PROBE_CLIENT: Probe an associated station on an AP interface - * by sending a null data frame to it and reporting when the frame is - * acknowleged. This is used to allow timing out inactive clients. Uses - * %NL80211_ATTR_IFINDEX and %NL80211_ATTR_MAC. The command returns a - * direct reply with an %NL80211_ATTR_COOKIE that is later used to match - * up the event with the request. The event includes the same data and - * has %NL80211_ATTR_ACK set if the frame was ACKed. - * - * @NL80211_CMD_REGISTER_BEACONS: Register this socket to receive beacons from - * other BSSes when any interfaces are in AP mode. This helps implement - * OLBC handling in hostapd. Beacons are reported in %NL80211_CMD_FRAME - * messages. Note that per PHY only one application may register. - * - * @NL80211_CMD_SET_NOACK_MAP: sets a bitmap for the individual TIDs whether - * No Acknowledgement Policy should be applied. - * * @NL80211_CMD_MAX: highest used command number * @__NL80211_CMD_AFTER_LAST: internal use */ @@ -670,16 +638,6 @@ enum nl80211_commands { NL80211_CMD_TDLS_OPER, NL80211_CMD_TDLS_MGMT, - NL80211_CMD_UNEXPECTED_FRAME, - - NL80211_CMD_PROBE_CLIENT, - - NL80211_CMD_REGISTER_BEACONS, - - NL80211_CMD_UNEXPECTED_4ADDR_FRAME, - - NL80211_CMD_SET_NOACK_MAP, - /* add new commands above here */ /* used to define NL80211_CMD_MAX below */ @@ -700,8 +658,6 @@ enum nl80211_commands { #define NL80211_CMD_DISASSOCIATE NL80211_CMD_DISASSOCIATE #define NL80211_CMD_REG_BEACON_HINT NL80211_CMD_REG_BEACON_HINT -#define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS - /* source-level API compatibility */ #define NL80211_CMD_GET_MESH_PARAMS NL80211_CMD_GET_MESH_CONFIG #define NL80211_CMD_SET_MESH_PARAMS NL80211_CMD_SET_MESH_CONFIG @@ -1153,46 +1109,6 @@ enum nl80211_commands { * %NL80211_CMD_TDLS_MGMT. Otherwise %NL80211_CMD_TDLS_OPER should be * used for asking the driver to perform a TDLS operation. * - * @NL80211_ATTR_DEVICE_AP_SME: This u32 attribute may be listed for devices - * that have AP support to indicate that they have the AP SME integrated - * with support for the features listed in this attribute, see - * &enum nl80211_ap_sme_features. - * - * @NL80211_ATTR_DONT_WAIT_FOR_ACK: Used with %NL80211_CMD_FRAME, this tells - * the driver to not wait for an acknowledgement. Note that due to this, - * it will also not give a status callback nor return a cookie. This is - * mostly useful for probe responses to save airtime. - * - * @NL80211_ATTR_FEATURE_FLAGS: This u32 attribute contains flags from - * &enum nl80211_feature_flags and is advertised in wiphy information. - * @NL80211_ATTR_PROBE_RESP_OFFLOAD: Indicates that the HW responds to probe - * - * requests while operating in AP-mode. - * This attribute holds a bitmap of the supported protocols for - * offloading (see &enum nl80211_probe_resp_offload_support_attr). - * - * @NL80211_ATTR_PROBE_RESP: Probe Response template data. Contains the entire - * probe-response frame. The DA field in the 802.11 header is zero-ed out, - * to be filled by the FW. - * @NL80211_ATTR_DISABLE_HT: Force HT capable interfaces to disable - * this feature. Currently, only supported in mac80211 drivers. - * @NL80211_ATTR_HT_CAPABILITY_MASK: Specify which bits of the - * ATTR_HT_CAPABILITY to which attention should be paid. - * Currently, only mac80211 NICs support this feature. - * The values that may be configured are: - * MCS rates, MAX-AMSDU, HT-20-40 and HT_CAP_SGI_40 - * AMPDU density and AMPDU factor. - * All values are treated as suggestions and may be ignored - * by the driver as required. The actual values may be seen in - * the station debugfs ht_caps file. - * - * @NL80211_ATTR_DFS_REGION: region for regulatory rules which this country - * abides to when initiating radiation on DFS channels. A country maps - * to one DFS region. - * - * @NL80211_ATTR_NOACK_MAP: This u16 bitmap contains the No Ack Policy of - * up to 16 TIDs. - * * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use */ @@ -1421,23 +1337,6 @@ enum nl80211_attrs { NL80211_ATTR_TDLS_SUPPORT, NL80211_ATTR_TDLS_EXTERNAL_SETUP, - NL80211_ATTR_DEVICE_AP_SME, - - NL80211_ATTR_DONT_WAIT_FOR_ACK, - - NL80211_ATTR_FEATURE_FLAGS, - - NL80211_ATTR_PROBE_RESP_OFFLOAD, - - NL80211_ATTR_PROBE_RESP, - - NL80211_ATTR_DFS_REGION, - - NL80211_ATTR_DISABLE_HT, - NL80211_ATTR_HT_CAPABILITY_MASK, - - NL80211_ATTR_NOACK_MAP, - /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -1472,7 +1371,6 @@ enum nl80211_attrs { #define NL80211_ATTR_AKM_SUITES NL80211_ATTR_AKM_SUITES #define NL80211_ATTR_KEY NL80211_ATTR_KEY #define NL80211_ATTR_KEYS NL80211_ATTR_KEYS -#define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS #define NL80211_MAX_SUPP_RATES 32 #define NL80211_MAX_SUPP_REG_RULES 32 @@ -1536,11 +1434,7 @@ enum nl80211_iftype { * @NL80211_STA_FLAG_WME: station is WME/QoS capable * @NL80211_STA_FLAG_MFP: station uses management frame protection * @NL80211_STA_FLAG_AUTHENTICATED: station is authenticated - * @NL80211_STA_FLAG_TDLS_PEER: station is a TDLS peer -- this flag should - * only be used in managed mode (even in the flags mask). Note that the - * flag can't be changed, it is only valid while adding a station, and - * attempts to change it will silently be ignored (rather than rejected - * as errors.) + * @NL80211_STA_FLAG_TDLS_PEER: station is a TDLS peer * @NL80211_STA_FLAG_MAX: highest station flag number currently defined * @__NL80211_STA_FLAG_AFTER_LAST: internal use */ @@ -1655,7 +1549,6 @@ enum nl80211_sta_bss_param { * containing info as possible, see &enum nl80211_sta_bss_param * @NL80211_STA_INFO_CONNECTED_TIME: time since the station is last connected * @NL80211_STA_INFO_STA_FLAGS: Contains a struct nl80211_sta_flag_update. - * @NL80211_STA_INFO_BEACON_LOSS: count of times beacon loss was detected (u32) * @__NL80211_STA_INFO_AFTER_LAST: internal * @NL80211_STA_INFO_MAX: highest possible station info attribute */ @@ -1678,7 +1571,6 @@ enum nl80211_sta_info { NL80211_STA_INFO_BSS_PARAM, NL80211_STA_INFO_CONNECTED_TIME, NL80211_STA_INFO_STA_FLAGS, - NL80211_STA_INFO_BEACON_LOSS, /* keep last */ __NL80211_STA_INFO_AFTER_LAST, @@ -1952,21 +1844,6 @@ enum nl80211_reg_rule_flags { NL80211_RRF_NO_IBSS = 1<<8, }; -/** - * enum nl80211_dfs_regions - regulatory DFS regions - * - * @NL80211_DFS_UNSET: Country has no DFS master region specified - * @NL80211_DFS_FCC_: Country follows DFS master rules from FCC - * @NL80211_DFS_FCC_: Country follows DFS master rules from ETSI - * @NL80211_DFS_JP_: Country follows DFS master rules from JP/MKK/Telec - */ -enum nl80211_dfs_regions { - NL80211_DFS_UNSET = 0, - NL80211_DFS_FCC = 1, - NL80211_DFS_ETSI = 2, - NL80211_DFS_JP = 3, -}; - /** * enum nl80211_survey_info - survey information * @@ -2100,10 +1977,6 @@ enum nl80211_mntr_flags { * access to a broader network beyond the MBSS. This is done via Root * Announcement frames. * - * @NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL: The minimum interval of time (in - * TUs) during which a mesh STA can send only one Action frame containing a - * PERR element. - * * @NL80211_MESHCONF_ATTR_MAX: highest possible mesh configuration attribute * * @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use @@ -2127,7 +2000,6 @@ enum nl80211_meshconf_params { NL80211_MESHCONF_ELEMENT_TTL, NL80211_MESHCONF_HWMP_RANN_INTERVAL, NL80211_MESHCONF_GATE_ANNOUNCEMENTS, - NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL, /* keep last */ __NL80211_MESHCONF_ATTR_AFTER_LAST, @@ -2778,45 +2650,4 @@ enum nl80211_tdls_operation { NL80211_TDLS_DISABLE_LINK, }; -/* - * enum nl80211_ap_sme_features - device-integrated AP features - * Reserved for future use, no bits are defined in - * NL80211_ATTR_DEVICE_AP_SME yet. -enum nl80211_ap_sme_features { -}; - */ - -/** - * enum nl80211_feature_flags - device/driver features - * @NL80211_FEATURE_SK_TX_STATUS: This driver supports reflecting back - * TX status to the socket error queue when requested with the - * socket option. - * @NL80211_FEATURE_HT_IBSS: This driver supports IBSS with HT datarates. - */ -enum nl80211_feature_flags { - NL80211_FEATURE_SK_TX_STATUS = 1 << 0, - NL80211_FEATURE_HT_IBSS = 1 << 1, -}; - -/** - * enum nl80211_probe_resp_offload_support_attr - optional supported - * protocols for probe-response offloading by the driver/FW. - * To be used with the %NL80211_ATTR_PROBE_RESP_OFFLOAD attribute. - * Each enum value represents a bit in the bitmap of supported - * protocols. Typically a subset of probe-requests belonging to a - * supported protocol will be excluded from offload and uploaded - * to the host. - * - * @NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS: Support for WPS ver. 1 - * @NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2: Support for WPS ver. 2 - * @NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P: Support for P2P - * @NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U: Support for 802.11u - */ -enum nl80211_probe_resp_offload_support_attr { - NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS = 1<<0, - NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 = 1<<1, - NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P = 1<<2, - NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U = 1<<3, -}; - #endif /* __LINUX_NL80211_H */ diff --git a/trunk/include/linux/openvswitch.h b/trunk/include/linux/openvswitch.h deleted file mode 100644 index eb1efa54fe84..000000000000 --- a/trunk/include/linux/openvswitch.h +++ /dev/null @@ -1,452 +0,0 @@ -/* - * Copyright (c) 2007-2011 Nicira Networks. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA - */ - -#ifndef _LINUX_OPENVSWITCH_H -#define _LINUX_OPENVSWITCH_H 1 - -#include - -/** - * struct ovs_header - header for OVS Generic Netlink messages. - * @dp_ifindex: ifindex of local port for datapath (0 to make a request not - * specific to a datapath). - * - * Attributes following the header are specific to a particular OVS Generic - * Netlink family, but all of the OVS families use this header. - */ - -struct ovs_header { - int dp_ifindex; -}; - -/* Datapaths. */ - -#define OVS_DATAPATH_FAMILY "ovs_datapath" -#define OVS_DATAPATH_MCGROUP "ovs_datapath" -#define OVS_DATAPATH_VERSION 0x1 - -enum ovs_datapath_cmd { - OVS_DP_CMD_UNSPEC, - OVS_DP_CMD_NEW, - OVS_DP_CMD_DEL, - OVS_DP_CMD_GET, - OVS_DP_CMD_SET -}; - -/** - * enum ovs_datapath_attr - attributes for %OVS_DP_* commands. - * @OVS_DP_ATTR_NAME: Name of the network device that serves as the "local - * port". This is the name of the network device whose dp_ifindex is given in - * the &struct ovs_header. Always present in notifications. Required in - * %OVS_DP_NEW requests. May be used as an alternative to specifying - * dp_ifindex in other requests (with a dp_ifindex of 0). - * @OVS_DP_ATTR_UPCALL_PID: The Netlink socket in userspace that is initially - * set on the datapath port (for OVS_ACTION_ATTR_MISS). Only valid on - * %OVS_DP_CMD_NEW requests. A value of zero indicates that upcalls should - * not be sent. - * @OVS_DP_ATTR_STATS: Statistics about packets that have passed through the - * datapath. Always present in notifications. - * - * These attributes follow the &struct ovs_header within the Generic Netlink - * payload for %OVS_DP_* commands. - */ -enum ovs_datapath_attr { - OVS_DP_ATTR_UNSPEC, - OVS_DP_ATTR_NAME, /* name of dp_ifindex netdev */ - OVS_DP_ATTR_UPCALL_PID, /* Netlink PID to receive upcalls */ - OVS_DP_ATTR_STATS, /* struct ovs_dp_stats */ - __OVS_DP_ATTR_MAX -}; - -#define OVS_DP_ATTR_MAX (__OVS_DP_ATTR_MAX - 1) - -struct ovs_dp_stats { - __u64 n_hit; /* Number of flow table matches. */ - __u64 n_missed; /* Number of flow table misses. */ - __u64 n_lost; /* Number of misses not sent to userspace. */ - __u64 n_flows; /* Number of flows present */ -}; - -struct ovs_vport_stats { - __u64 rx_packets; /* total packets received */ - __u64 tx_packets; /* total packets transmitted */ - __u64 rx_bytes; /* total bytes received */ - __u64 tx_bytes; /* total bytes transmitted */ - __u64 rx_errors; /* bad packets received */ - __u64 tx_errors; /* packet transmit problems */ - __u64 rx_dropped; /* no space in linux buffers */ - __u64 tx_dropped; /* no space available in linux */ -}; - -/* Fixed logical ports. */ -#define OVSP_LOCAL ((__u16)0) - -/* Packet transfer. */ - -#define OVS_PACKET_FAMILY "ovs_packet" -#define OVS_PACKET_VERSION 0x1 - -enum ovs_packet_cmd { - OVS_PACKET_CMD_UNSPEC, - - /* Kernel-to-user notifications. */ - OVS_PACKET_CMD_MISS, /* Flow table miss. */ - OVS_PACKET_CMD_ACTION, /* OVS_ACTION_ATTR_USERSPACE action. */ - - /* Userspace commands. */ - OVS_PACKET_CMD_EXECUTE /* Apply actions to a packet. */ -}; - -/** - * enum ovs_packet_attr - attributes for %OVS_PACKET_* commands. - * @OVS_PACKET_ATTR_PACKET: Present for all notifications. Contains the entire - * packet as received, from the start of the Ethernet header onward. For - * %OVS_PACKET_CMD_ACTION, %OVS_PACKET_ATTR_PACKET reflects changes made by - * actions preceding %OVS_ACTION_ATTR_USERSPACE, but %OVS_PACKET_ATTR_KEY is - * the flow key extracted from the packet as originally received. - * @OVS_PACKET_ATTR_KEY: Present for all notifications. Contains the flow key - * extracted from the packet as nested %OVS_KEY_ATTR_* attributes. This allows - * userspace to adapt its flow setup strategy by comparing its notion of the - * flow key against the kernel's. - * @OVS_PACKET_ATTR_ACTIONS: Contains actions for the packet. Used - * for %OVS_PACKET_CMD_EXECUTE. It has nested %OVS_ACTION_ATTR_* attributes. - * @OVS_PACKET_ATTR_USERDATA: Present for an %OVS_PACKET_CMD_ACTION - * notification if the %OVS_ACTION_ATTR_USERSPACE action specified an - * %OVS_USERSPACE_ATTR_USERDATA attribute. - * - * These attributes follow the &struct ovs_header within the Generic Netlink - * payload for %OVS_PACKET_* commands. - */ -enum ovs_packet_attr { - OVS_PACKET_ATTR_UNSPEC, - OVS_PACKET_ATTR_PACKET, /* Packet data. */ - OVS_PACKET_ATTR_KEY, /* Nested OVS_KEY_ATTR_* attributes. */ - OVS_PACKET_ATTR_ACTIONS, /* Nested OVS_ACTION_ATTR_* attributes. */ - OVS_PACKET_ATTR_USERDATA, /* u64 OVS_ACTION_ATTR_USERSPACE arg. */ - __OVS_PACKET_ATTR_MAX -}; - -#define OVS_PACKET_ATTR_MAX (__OVS_PACKET_ATTR_MAX - 1) - -/* Virtual ports. */ - -#define OVS_VPORT_FAMILY "ovs_vport" -#define OVS_VPORT_MCGROUP "ovs_vport" -#define OVS_VPORT_VERSION 0x1 - -enum ovs_vport_cmd { - OVS_VPORT_CMD_UNSPEC, - OVS_VPORT_CMD_NEW, - OVS_VPORT_CMD_DEL, - OVS_VPORT_CMD_GET, - OVS_VPORT_CMD_SET -}; - -enum ovs_vport_type { - OVS_VPORT_TYPE_UNSPEC, - OVS_VPORT_TYPE_NETDEV, /* network device */ - OVS_VPORT_TYPE_INTERNAL, /* network device implemented by datapath */ - __OVS_VPORT_TYPE_MAX -}; - -#define OVS_VPORT_TYPE_MAX (__OVS_VPORT_TYPE_MAX - 1) - -/** - * enum ovs_vport_attr - attributes for %OVS_VPORT_* commands. - * @OVS_VPORT_ATTR_PORT_NO: 32-bit port number within datapath. - * @OVS_VPORT_ATTR_TYPE: 32-bit %OVS_VPORT_TYPE_* constant describing the type - * of vport. - * @OVS_VPORT_ATTR_NAME: Name of vport. For a vport based on a network device - * this is the name of the network device. Maximum length %IFNAMSIZ-1 bytes - * plus a null terminator. - * @OVS_VPORT_ATTR_OPTIONS: Vport-specific configuration information. - * @OVS_VPORT_ATTR_UPCALL_PID: The Netlink socket in userspace that - * OVS_PACKET_CMD_MISS upcalls will be directed to for packets received on - * this port. A value of zero indicates that upcalls should not be sent. - * @OVS_VPORT_ATTR_STATS: A &struct ovs_vport_stats giving statistics for - * packets sent or received through the vport. - * - * These attributes follow the &struct ovs_header within the Generic Netlink - * payload for %OVS_VPORT_* commands. - * - * For %OVS_VPORT_CMD_NEW requests, the %OVS_VPORT_ATTR_TYPE and - * %OVS_VPORT_ATTR_NAME attributes are required. %OVS_VPORT_ATTR_PORT_NO is - * optional; if not specified a free port number is automatically selected. - * Whether %OVS_VPORT_ATTR_OPTIONS is required or optional depends on the type - * of vport. - * and other attributes are ignored. - * - * For other requests, if %OVS_VPORT_ATTR_NAME is specified then it is used to - * look up the vport to operate on; otherwise dp_idx from the &struct - * ovs_header plus %OVS_VPORT_ATTR_PORT_NO determine the vport. - */ -enum ovs_vport_attr { - OVS_VPORT_ATTR_UNSPEC, - OVS_VPORT_ATTR_PORT_NO, /* u32 port number within datapath */ - OVS_VPORT_ATTR_TYPE, /* u32 OVS_VPORT_TYPE_* constant. */ - OVS_VPORT_ATTR_NAME, /* string name, up to IFNAMSIZ bytes long */ - OVS_VPORT_ATTR_OPTIONS, /* nested attributes, varies by vport type */ - OVS_VPORT_ATTR_UPCALL_PID, /* u32 Netlink PID to receive upcalls */ - OVS_VPORT_ATTR_STATS, /* struct ovs_vport_stats */ - __OVS_VPORT_ATTR_MAX -}; - -#define OVS_VPORT_ATTR_MAX (__OVS_VPORT_ATTR_MAX - 1) - -/* Flows. */ - -#define OVS_FLOW_FAMILY "ovs_flow" -#define OVS_FLOW_MCGROUP "ovs_flow" -#define OVS_FLOW_VERSION 0x1 - -enum ovs_flow_cmd { - OVS_FLOW_CMD_UNSPEC, - OVS_FLOW_CMD_NEW, - OVS_FLOW_CMD_DEL, - OVS_FLOW_CMD_GET, - OVS_FLOW_CMD_SET -}; - -struct ovs_flow_stats { - __u64 n_packets; /* Number of matched packets. */ - __u64 n_bytes; /* Number of matched bytes. */ -}; - -enum ovs_key_attr { - OVS_KEY_ATTR_UNSPEC, - OVS_KEY_ATTR_ENCAP, /* Nested set of encapsulated attributes. */ - OVS_KEY_ATTR_PRIORITY, /* u32 skb->priority */ - OVS_KEY_ATTR_IN_PORT, /* u32 OVS dp port number */ - OVS_KEY_ATTR_ETHERNET, /* struct ovs_key_ethernet */ - OVS_KEY_ATTR_VLAN, /* be16 VLAN TCI */ - OVS_KEY_ATTR_ETHERTYPE, /* be16 Ethernet type */ - OVS_KEY_ATTR_IPV4, /* struct ovs_key_ipv4 */ - OVS_KEY_ATTR_IPV6, /* struct ovs_key_ipv6 */ - OVS_KEY_ATTR_TCP, /* struct ovs_key_tcp */ - OVS_KEY_ATTR_UDP, /* struct ovs_key_udp */ - OVS_KEY_ATTR_ICMP, /* struct ovs_key_icmp */ - OVS_KEY_ATTR_ICMPV6, /* struct ovs_key_icmpv6 */ - OVS_KEY_ATTR_ARP, /* struct ovs_key_arp */ - OVS_KEY_ATTR_ND, /* struct ovs_key_nd */ - __OVS_KEY_ATTR_MAX -}; - -#define OVS_KEY_ATTR_MAX (__OVS_KEY_ATTR_MAX - 1) - -/** - * enum ovs_frag_type - IPv4 and IPv6 fragment type - * @OVS_FRAG_TYPE_NONE: Packet is not a fragment. - * @OVS_FRAG_TYPE_FIRST: Packet is a fragment with offset 0. - * @OVS_FRAG_TYPE_LATER: Packet is a fragment with nonzero offset. - * - * Used as the @ipv4_frag in &struct ovs_key_ipv4 and as @ipv6_frag &struct - * ovs_key_ipv6. - */ -enum ovs_frag_type { - OVS_FRAG_TYPE_NONE, - OVS_FRAG_TYPE_FIRST, - OVS_FRAG_TYPE_LATER, - __OVS_FRAG_TYPE_MAX -}; - -#define OVS_FRAG_TYPE_MAX (__OVS_FRAG_TYPE_MAX - 1) - -struct ovs_key_ethernet { - __u8 eth_src[6]; - __u8 eth_dst[6]; -}; - -struct ovs_key_ipv4 { - __be32 ipv4_src; - __be32 ipv4_dst; - __u8 ipv4_proto; - __u8 ipv4_tos; - __u8 ipv4_ttl; - __u8 ipv4_frag; /* One of OVS_FRAG_TYPE_*. */ -}; - -struct ovs_key_ipv6 { - __be32 ipv6_src[4]; - __be32 ipv6_dst[4]; - __be32 ipv6_label; /* 20-bits in least-significant bits. */ - __u8 ipv6_proto; - __u8 ipv6_tclass; - __u8 ipv6_hlimit; - __u8 ipv6_frag; /* One of OVS_FRAG_TYPE_*. */ -}; - -struct ovs_key_tcp { - __be16 tcp_src; - __be16 tcp_dst; -}; - -struct ovs_key_udp { - __be16 udp_src; - __be16 udp_dst; -}; - -struct ovs_key_icmp { - __u8 icmp_type; - __u8 icmp_code; -}; - -struct ovs_key_icmpv6 { - __u8 icmpv6_type; - __u8 icmpv6_code; -}; - -struct ovs_key_arp { - __be32 arp_sip; - __be32 arp_tip; - __be16 arp_op; - __u8 arp_sha[6]; - __u8 arp_tha[6]; -}; - -struct ovs_key_nd { - __u32 nd_target[4]; - __u8 nd_sll[6]; - __u8 nd_tll[6]; -}; - -/** - * enum ovs_flow_attr - attributes for %OVS_FLOW_* commands. - * @OVS_FLOW_ATTR_KEY: Nested %OVS_KEY_ATTR_* attributes specifying the flow - * key. Always present in notifications. Required for all requests (except - * dumps). - * @OVS_FLOW_ATTR_ACTIONS: Nested %OVS_ACTION_ATTR_* attributes specifying - * the actions to take for packets that match the key. Always present in - * notifications. Required for %OVS_FLOW_CMD_NEW requests, optional for - * %OVS_FLOW_CMD_SET requests. - * @OVS_FLOW_ATTR_STATS: &struct ovs_flow_stats giving statistics for this - * flow. Present in notifications if the stats would be nonzero. Ignored in - * requests. - * @OVS_FLOW_ATTR_TCP_FLAGS: An 8-bit value giving the OR'd value of all of the - * TCP flags seen on packets in this flow. Only present in notifications for - * TCP flows, and only if it would be nonzero. Ignored in requests. - * @OVS_FLOW_ATTR_USED: A 64-bit integer giving the time, in milliseconds on - * the system monotonic clock, at which a packet was last processed for this - * flow. Only present in notifications if a packet has been processed for this - * flow. Ignored in requests. - * @OVS_FLOW_ATTR_CLEAR: If present in a %OVS_FLOW_CMD_SET request, clears the - * last-used time, accumulated TCP flags, and statistics for this flow. - * Otherwise ignored in requests. Never present in notifications. - * - * These attributes follow the &struct ovs_header within the Generic Netlink - * payload for %OVS_FLOW_* commands. - */ -enum ovs_flow_attr { - OVS_FLOW_ATTR_UNSPEC, - OVS_FLOW_ATTR_KEY, /* Sequence of OVS_KEY_ATTR_* attributes. */ - OVS_FLOW_ATTR_ACTIONS, /* Nested OVS_ACTION_ATTR_* attributes. */ - OVS_FLOW_ATTR_STATS, /* struct ovs_flow_stats. */ - OVS_FLOW_ATTR_TCP_FLAGS, /* 8-bit OR'd TCP flags. */ - OVS_FLOW_ATTR_USED, /* u64 msecs last used in monotonic time. */ - OVS_FLOW_ATTR_CLEAR, /* Flag to clear stats, tcp_flags, used. */ - __OVS_FLOW_ATTR_MAX -}; - -#define OVS_FLOW_ATTR_MAX (__OVS_FLOW_ATTR_MAX - 1) - -/** - * enum ovs_sample_attr - Attributes for %OVS_ACTION_ATTR_SAMPLE action. - * @OVS_SAMPLE_ATTR_PROBABILITY: 32-bit fraction of packets to sample with - * @OVS_ACTION_ATTR_SAMPLE. A value of 0 samples no packets, a value of - * %UINT32_MAX samples all packets and intermediate values sample intermediate - * fractions of packets. - * @OVS_SAMPLE_ATTR_ACTIONS: Set of actions to execute in sampling event. - * Actions are passed as nested attributes. - * - * Executes the specified actions with the given probability on a per-packet - * basis. - */ -enum ovs_sample_attr { - OVS_SAMPLE_ATTR_UNSPEC, - OVS_SAMPLE_ATTR_PROBABILITY, /* u32 number */ - OVS_SAMPLE_ATTR_ACTIONS, /* Nested OVS_ACTION_ATTR_* attributes. */ - __OVS_SAMPLE_ATTR_MAX, -}; - -#define OVS_SAMPLE_ATTR_MAX (__OVS_SAMPLE_ATTR_MAX - 1) - -/** - * enum ovs_userspace_attr - Attributes for %OVS_ACTION_ATTR_USERSPACE action. - * @OVS_USERSPACE_ATTR_PID: u32 Netlink PID to which the %OVS_PACKET_CMD_ACTION - * message should be sent. Required. - * @OVS_USERSPACE_ATTR_USERDATA: If present, its u64 argument is copied to the - * %OVS_PACKET_CMD_ACTION message as %OVS_PACKET_ATTR_USERDATA, - */ -enum ovs_userspace_attr { - OVS_USERSPACE_ATTR_UNSPEC, - OVS_USERSPACE_ATTR_PID, /* u32 Netlink PID to receive upcalls. */ - OVS_USERSPACE_ATTR_USERDATA, /* u64 optional user-specified cookie. */ - __OVS_USERSPACE_ATTR_MAX -}; - -#define OVS_USERSPACE_ATTR_MAX (__OVS_USERSPACE_ATTR_MAX - 1) - -/** - * struct ovs_action_push_vlan - %OVS_ACTION_ATTR_PUSH_VLAN action argument. - * @vlan_tpid: Tag protocol identifier (TPID) to push. - * @vlan_tci: Tag control identifier (TCI) to push. The CFI bit must be set - * (but it will not be set in the 802.1Q header that is pushed). - * - * The @vlan_tpid value is typically %ETH_P_8021Q. The only acceptable TPID - * values are those that the kernel module also parses as 802.1Q headers, to - * prevent %OVS_ACTION_ATTR_PUSH_VLAN followed by %OVS_ACTION_ATTR_POP_VLAN - * from having surprising results. - */ -struct ovs_action_push_vlan { - __be16 vlan_tpid; /* 802.1Q TPID. */ - __be16 vlan_tci; /* 802.1Q TCI (VLAN ID and priority). */ -}; - -/** - * enum ovs_action_attr - Action types. - * - * @OVS_ACTION_ATTR_OUTPUT: Output packet to port. - * @OVS_ACTION_ATTR_USERSPACE: Send packet to userspace according to nested - * %OVS_USERSPACE_ATTR_* attributes. - * @OVS_ACTION_ATTR_SET: Replaces the contents of an existing header. The - * single nested %OVS_KEY_ATTR_* attribute specifies a header to modify and its - * value. - * @OVS_ACTION_ATTR_PUSH_VLAN: Push a new outermost 802.1Q header onto the - * packet. - * @OVS_ACTION_ATTR_POP_VLAN: Pop the outermost 802.1Q header off the packet. - * @OVS_ACTION_ATTR_SAMPLE: Probabilitically executes actions, as specified in - * the nested %OVS_SAMPLE_ATTR_* attributes. - * - * Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all - * fields within a header are modifiable, e.g. the IPv4 protocol and fragment - * type may not be changed. - */ - -enum ovs_action_attr { - OVS_ACTION_ATTR_UNSPEC, - OVS_ACTION_ATTR_OUTPUT, /* u32 port number. */ - OVS_ACTION_ATTR_USERSPACE, /* Nested OVS_USERSPACE_ATTR_*. */ - OVS_ACTION_ATTR_SET, /* One nested OVS_KEY_ATTR_*. */ - OVS_ACTION_ATTR_PUSH_VLAN, /* struct ovs_action_push_vlan. */ - OVS_ACTION_ATTR_POP_VLAN, /* No argument. */ - OVS_ACTION_ATTR_SAMPLE, /* Nested OVS_SAMPLE_ATTR_*. */ - __OVS_ACTION_ATTR_MAX -}; - -#define OVS_ACTION_ATTR_MAX (__OVS_ACTION_ATTR_MAX - 1) - -#endif /* _LINUX_OPENVSWITCH_H */ diff --git a/trunk/include/linux/perf_event.h b/trunk/include/linux/perf_event.h index 08855613ceb3..b1f89122bf6a 100644 --- a/trunk/include/linux/perf_event.h +++ b/trunk/include/linux/perf_event.h @@ -54,7 +54,6 @@ enum perf_hw_id { PERF_COUNT_HW_BUS_CYCLES = 6, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, - PERF_COUNT_HW_REF_CPU_CYCLES = 9, PERF_COUNT_HW_MAX, /* non-ABI */ }; @@ -891,7 +890,6 @@ struct perf_event_context { int nr_active; int is_active; int nr_stat; - int nr_freq; int rotate_disable; atomic_t refcount; struct task_struct *task; @@ -1065,12 +1063,12 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) } } -extern struct jump_label_key_deferred perf_sched_events; +extern struct jump_label_key perf_sched_events; static inline void perf_event_task_sched_in(struct task_struct *prev, struct task_struct *task) { - if (static_branch(&perf_sched_events.key)) + if (static_branch(&perf_sched_events)) __perf_event_task_sched_in(prev, task); } @@ -1079,7 +1077,7 @@ static inline void perf_event_task_sched_out(struct task_struct *prev, { perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); - if (static_branch(&perf_sched_events.key)) + if (static_branch(&perf_sched_events)) __perf_event_task_sched_out(prev, next); } diff --git a/trunk/include/linux/phonet.h b/trunk/include/linux/phonet.h index f48bfc80cb4b..f53a4167c5f4 100644 --- a/trunk/include/linux/phonet.h +++ b/trunk/include/linux/phonet.h @@ -38,7 +38,6 @@ #define PNPIPE_ENCAP 1 #define PNPIPE_IFINDEX 2 #define PNPIPE_HANDLE 3 -#define PNPIPE_INITSTATE 4 #define PNADDR_ANY 0 #define PNADDR_BROADCAST 0xFC @@ -50,7 +49,6 @@ /* ioctls */ #define SIOCPNGETOBJECT (SIOCPROTOPRIVATE + 0) -#define SIOCPNENABLEPIPE (SIOCPROTOPRIVATE + 13) #define SIOCPNADDRESOURCE (SIOCPROTOPRIVATE + 14) #define SIOCPNDELRESOURCE (SIOCPROTOPRIVATE + 15) diff --git a/trunk/include/linux/pkt_sched.h b/trunk/include/linux/pkt_sched.h index 8f1b928f777c..7281d5acf2f9 100644 --- a/trunk/include/linux/pkt_sched.h +++ b/trunk/include/linux/pkt_sched.h @@ -162,24 +162,25 @@ struct tc_sfq_qopt { unsigned flows; /* Maximal number of flows */ }; -struct tc_sfq_qopt_v1 { - struct tc_sfq_qopt v0; - unsigned int depth; /* max number of packets per flow */ - unsigned int headdrop; -}; - - struct tc_sfq_xstats { __s32 allot; }; +/* + * NOTE: limit, divisor and flows are hardwired to code at the moment. + * + * limit=flows=128, divisor=1024; + * + * The only reason for this is efficiency, it is possible + * to change these parameters in compile time. + */ + /* RED section */ enum { TCA_RED_UNSPEC, TCA_RED_PARMS, TCA_RED_STAB, - TCA_RED_MAX_P, __TCA_RED_MAX, }; @@ -193,9 +194,8 @@ struct tc_red_qopt { unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */ unsigned char Scell_log; /* cell size for idle damping */ unsigned char flags; -#define TC_RED_ECN 1 -#define TC_RED_HARDDROP 2 -#define TC_RED_ADAPTATIVE 4 +#define TC_RED_ECN 1 +#define TC_RED_HARDDROP 2 }; struct tc_red_xstats { @@ -214,7 +214,6 @@ enum { TCA_GRED_PARMS, TCA_GRED_STAB, TCA_GRED_DPS, - TCA_GRED_MAX_P, __TCA_GRED_MAX, }; @@ -254,7 +253,6 @@ enum { TCA_CHOKE_UNSPEC, TCA_CHOKE_PARMS, TCA_CHOKE_STAB, - TCA_CHOKE_MAX_P, __TCA_CHOKE_MAX, }; @@ -467,7 +465,6 @@ enum { TCA_NETEM_REORDER, TCA_NETEM_CORRUPT, TCA_NETEM_LOSS, - TCA_NETEM_RATE, __TCA_NETEM_MAX, }; @@ -498,13 +495,6 @@ struct tc_netem_corrupt { __u32 correlation; }; -struct tc_netem_rate { - __u32 rate; /* byte/s */ - __s32 packet_overhead; - __u32 cell_size; - __s32 cell_overhead; -}; - enum { NETEM_LOSS_UNSPEC, NETEM_LOSS_GI, /* General Intuitive - 4 state model */ diff --git a/trunk/include/linux/poison.h b/trunk/include/linux/poison.h index 2110a81c5e2a..79159de0e341 100644 --- a/trunk/include/linux/poison.h +++ b/trunk/include/linux/poison.h @@ -40,6 +40,12 @@ #define RED_INACTIVE 0x09F911029D74E35BULL /* when obj is inactive */ #define RED_ACTIVE 0xD84156C5635688C0ULL /* when obj is active */ +#ifdef CONFIG_PHYS_ADDR_T_64BIT +#define MEMBLOCK_INACTIVE 0x3a84fb0144c9e71bULL +#else +#define MEMBLOCK_INACTIVE 0x44c9e71bUL +#endif + #define SLUB_RED_INACTIVE 0xbb #define SLUB_RED_ACTIVE 0xcc diff --git a/trunk/include/linux/rcupdate.h b/trunk/include/linux/rcupdate.h index 81c04f4348ec..2cf4226ade7e 100644 --- a/trunk/include/linux/rcupdate.h +++ b/trunk/include/linux/rcupdate.h @@ -51,8 +51,6 @@ extern int rcutorture_runnable; /* for sysctl */ #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) extern void rcutorture_record_test_transition(void); extern void rcutorture_record_progress(unsigned long vernum); -extern void do_trace_rcu_torture_read(char *rcutorturename, - struct rcu_head *rhp); #else static inline void rcutorture_record_test_transition(void) { @@ -60,12 +58,6 @@ static inline void rcutorture_record_test_transition(void) static inline void rcutorture_record_progress(unsigned long vernum) { } -#ifdef CONFIG_RCU_TRACE -extern void do_trace_rcu_torture_read(char *rcutorturename, - struct rcu_head *rhp); -#else -#define do_trace_rcu_torture_read(rcutorturename, rhp) do { } while (0) -#endif #endif #define UINT_CMP_GE(a, b) (UINT_MAX / 2 >= (a) - (b)) @@ -185,10 +177,23 @@ extern void rcu_sched_qs(int cpu); extern void rcu_bh_qs(int cpu); extern void rcu_check_callbacks(int cpu, int user); struct notifier_block; -extern void rcu_idle_enter(void); -extern void rcu_idle_exit(void); -extern void rcu_irq_enter(void); -extern void rcu_irq_exit(void); + +#ifdef CONFIG_NO_HZ + +extern void rcu_enter_nohz(void); +extern void rcu_exit_nohz(void); + +#else /* #ifdef CONFIG_NO_HZ */ + +static inline void rcu_enter_nohz(void) +{ +} + +static inline void rcu_exit_nohz(void) +{ +} + +#endif /* #else #ifdef CONFIG_NO_HZ */ /* * Infrastructure to implement the synchronize_() primitives in @@ -228,30 +233,22 @@ static inline void destroy_rcu_head_on_stack(struct rcu_head *head) #ifdef CONFIG_DEBUG_LOCK_ALLOC -#ifdef CONFIG_PROVE_RCU -extern int rcu_is_cpu_idle(void); -#else /* !CONFIG_PROVE_RCU */ -static inline int rcu_is_cpu_idle(void) -{ - return 0; -} -#endif /* else !CONFIG_PROVE_RCU */ - -static inline void rcu_lock_acquire(struct lockdep_map *map) -{ - WARN_ON_ONCE(rcu_is_cpu_idle()); - lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_); -} - -static inline void rcu_lock_release(struct lockdep_map *map) -{ - WARN_ON_ONCE(rcu_is_cpu_idle()); - lock_release(map, 1, _THIS_IP_); -} - extern struct lockdep_map rcu_lock_map; +# define rcu_read_acquire() \ + lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) +# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) + extern struct lockdep_map rcu_bh_lock_map; +# define rcu_read_acquire_bh() \ + lock_acquire(&rcu_bh_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) +# define rcu_read_release_bh() lock_release(&rcu_bh_lock_map, 1, _THIS_IP_) + extern struct lockdep_map rcu_sched_lock_map; +# define rcu_read_acquire_sched() \ + lock_acquire(&rcu_sched_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) +# define rcu_read_release_sched() \ + lock_release(&rcu_sched_lock_map, 1, _THIS_IP_) + extern int debug_lockdep_rcu_enabled(void); /** @@ -265,18 +262,11 @@ extern int debug_lockdep_rcu_enabled(void); * * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot * and while lockdep is disabled. - * - * Note that rcu_read_lock() and the matching rcu_read_unlock() must - * occur in the same context, for example, it is illegal to invoke - * rcu_read_unlock() in process context if the matching rcu_read_lock() - * was invoked from within an irq handler. */ static inline int rcu_read_lock_held(void) { if (!debug_lockdep_rcu_enabled()) return 1; - if (rcu_is_cpu_idle()) - return 0; return lock_is_held(&rcu_lock_map); } @@ -300,19 +290,6 @@ extern int rcu_read_lock_bh_held(void); * * Check debug_lockdep_rcu_enabled() to prevent false positives during boot * and while lockdep is disabled. - * - * Note that if the CPU is in the idle loop from an RCU point of - * view (ie: that we are in the section between rcu_idle_enter() and - * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU - * did an rcu_read_lock(). The reason for this is that RCU ignores CPUs - * that are in such a section, considering these as in extended quiescent - * state, so such a CPU is effectively never in an RCU read-side critical - * section regardless of what RCU primitives it invokes. This state of - * affairs is required --- we need to keep an RCU-free window in idle - * where the CPU may possibly enter into low power mode. This way we can - * notice an extended quiescent state to other CPUs that started a grace - * period. Otherwise we would delay any grace period as long as we run in - * the idle task. */ #ifdef CONFIG_PREEMPT_COUNT static inline int rcu_read_lock_sched_held(void) @@ -321,8 +298,6 @@ static inline int rcu_read_lock_sched_held(void) if (!debug_lockdep_rcu_enabled()) return 1; - if (rcu_is_cpu_idle()) - return 0; if (debug_locks) lockdep_opinion = lock_is_held(&rcu_sched_lock_map); return lockdep_opinion || preempt_count() != 0 || irqs_disabled(); @@ -336,8 +311,12 @@ static inline int rcu_read_lock_sched_held(void) #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ -# define rcu_lock_acquire(a) do { } while (0) -# define rcu_lock_release(a) do { } while (0) +# define rcu_read_acquire() do { } while (0) +# define rcu_read_release() do { } while (0) +# define rcu_read_acquire_bh() do { } while (0) +# define rcu_read_release_bh() do { } while (0) +# define rcu_read_acquire_sched() do { } while (0) +# define rcu_read_release_sched() do { } while (0) static inline int rcu_read_lock_held(void) { @@ -658,7 +637,7 @@ static inline void rcu_read_lock(void) { __rcu_read_lock(); __acquire(RCU); - rcu_lock_acquire(&rcu_lock_map); + rcu_read_acquire(); } /* @@ -678,7 +657,7 @@ static inline void rcu_read_lock(void) */ static inline void rcu_read_unlock(void) { - rcu_lock_release(&rcu_lock_map); + rcu_read_release(); __release(RCU); __rcu_read_unlock(); } @@ -694,17 +673,12 @@ static inline void rcu_read_unlock(void) * critical sections in interrupt context can use just rcu_read_lock(), * though this should at least be commented to avoid confusing people * reading the code. - * - * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh() - * must occur in the same context, for example, it is illegal to invoke - * rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh() - * was invoked from some other task. */ static inline void rcu_read_lock_bh(void) { local_bh_disable(); __acquire(RCU_BH); - rcu_lock_acquire(&rcu_bh_lock_map); + rcu_read_acquire_bh(); } /* @@ -714,7 +688,7 @@ static inline void rcu_read_lock_bh(void) */ static inline void rcu_read_unlock_bh(void) { - rcu_lock_release(&rcu_bh_lock_map); + rcu_read_release_bh(); __release(RCU_BH); local_bh_enable(); } @@ -726,17 +700,12 @@ static inline void rcu_read_unlock_bh(void) * are being done using call_rcu_sched() or synchronize_rcu_sched(). * Read-side critical sections can also be introduced by anything that * disables preemption, including local_irq_disable() and friends. - * - * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched() - * must occur in the same context, for example, it is illegal to invoke - * rcu_read_unlock_sched() from process context if the matching - * rcu_read_lock_sched() was invoked from an NMI handler. */ static inline void rcu_read_lock_sched(void) { preempt_disable(); __acquire(RCU_SCHED); - rcu_lock_acquire(&rcu_sched_lock_map); + rcu_read_acquire_sched(); } /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ @@ -753,7 +722,7 @@ static inline notrace void rcu_read_lock_sched_notrace(void) */ static inline void rcu_read_unlock_sched(void) { - rcu_lock_release(&rcu_sched_lock_map); + rcu_read_release_sched(); __release(RCU_SCHED); preempt_enable(); } diff --git a/trunk/include/linux/sched.h b/trunk/include/linux/sched.h index cf0eb342bcba..1c4f3e9b9bc5 100644 --- a/trunk/include/linux/sched.h +++ b/trunk/include/linux/sched.h @@ -273,11 +273,9 @@ extern int runqueue_is_locked(int cpu); #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) extern void select_nohz_load_balancer(int stop_tick); -extern void set_cpu_sd_state_idle(void); extern int get_nohz_timer_target(void); #else static inline void select_nohz_load_balancer(int stop_tick) { } -static inline void set_cpu_sd_state_idle(void) { } #endif /* @@ -485,8 +483,8 @@ struct task_cputime { #define INIT_CPUTIME \ (struct task_cputime) { \ - .utime = 0, \ - .stime = 0, \ + .utime = cputime_zero, \ + .stime = cputime_zero, \ .sum_exec_runtime = 0, \ } @@ -903,10 +901,6 @@ struct sched_group_power { * single CPU. */ unsigned int power, power_orig; - /* - * Number of busy cpus in this group. - */ - atomic_t nr_busy_cpus; }; struct sched_group { @@ -931,15 +925,6 @@ static inline struct cpumask *sched_group_cpus(struct sched_group *sg) return to_cpumask(sg->cpumask); } -/** - * group_first_cpu - Returns the first cpu in the cpumask of a sched_group. - * @group: The group whose first cpu is to be returned. - */ -static inline unsigned int group_first_cpu(struct sched_group *group) -{ - return cpumask_first(sched_group_cpus(group)); -} - struct sched_domain_attr { int relax_domain_level; }; @@ -1330,8 +1315,8 @@ struct task_struct { * older sibling, respectively. (p->father can be replaced with * p->real_parent->pid) */ - struct task_struct __rcu *real_parent; /* real parent process */ - struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */ + struct task_struct *real_parent; /* real parent process */ + struct task_struct *parent; /* recipient of SIGCHLD, wait4() reports */ /* * children/sibling forms the list of my natural children */ @@ -2085,14 +2070,6 @@ extern int sched_setscheduler(struct task_struct *, int, extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *); extern struct task_struct *idle_task(int cpu); -/** - * is_idle_task - is the specified task an idle task? - * @tsk: the task in question. - */ -static inline bool is_idle_task(struct task_struct *p) -{ - return p->pid == 0; -} extern struct task_struct *curr_task(int cpu); extern void set_curr_task(int cpu, struct task_struct *p); diff --git a/trunk/include/linux/security.h b/trunk/include/linux/security.h index e8c619d39291..19d8e04e1688 100644 --- a/trunk/include/linux/security.h +++ b/trunk/include/linux/security.h @@ -2056,7 +2056,7 @@ static inline int security_old_inode_init_security(struct inode *inode, char **name, void **value, size_t *len) { - return -EOPNOTSUPP; + return 0; } static inline int security_inode_create(struct inode *dir, diff --git a/trunk/include/linux/skbuff.h b/trunk/include/linux/skbuff.h index 50db9b04a552..fe864885c1ed 100644 --- a/trunk/include/linux/skbuff.h +++ b/trunk/include/linux/skbuff.h @@ -30,7 +30,6 @@ #include #include #include -#include /* Don't change this without changing skb_csum_unnecessary! */ #define CHECKSUM_NONE 0 @@ -88,6 +87,7 @@ * at device setup time. * NETIF_F_HW_CSUM - it is clever device, it is able to checksum * everything. + * NETIF_F_NO_CSUM - loopback or reliable single hop media. * NETIF_F_IP_CSUM - device is dumb. It is able to csum only * TCP/UDP over IPv4. Sigh. Vendors like this * way by an unknown reason. Though, see comment above @@ -128,17 +128,13 @@ struct sk_buff_head { struct sk_buff; -/* To allow 64K frame to be packed as single skb without frag_list we - * require 64K/PAGE_SIZE pages plus 1 additional page to allow for - * buffers which do not start on a page boundary. - * - * Since GRO uses frags we allocate at least 16 regardless of page - * size. +/* To allow 64K frame to be packed as single skb without frag_list. Since + * GRO uses frags we allocate at least 16 regardless of page size. */ -#if (65536/PAGE_SIZE + 1) < 16 +#if (65536/PAGE_SIZE + 2) < 16 #define MAX_SKB_FRAGS 16UL #else -#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1) +#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2) #endif typedef struct skb_frag_struct skb_frag_t; @@ -222,9 +218,6 @@ enum { /* device driver supports TX zero-copy buffers */ SKBTX_DEV_ZEROCOPY = 1 << 4, - - /* generate wifi status information (where possible) */ - SKBTX_WIFI_STATUS = 1 << 5, }; /* @@ -242,15 +235,15 @@ struct ubuf_info { * the end of the header data, ie. at skb->end. */ struct skb_shared_info { - unsigned char nr_frags; - __u8 tx_flags; + unsigned short nr_frags; unsigned short gso_size; /* Warning: this field is not always filled in (UFO)! */ unsigned short gso_segs; unsigned short gso_type; + __be32 ip6_frag_id; + __u8 tx_flags; struct sk_buff *frag_list; struct skb_shared_hwtstamps hwtstamps; - __be32 ip6_frag_id; /* * Warning : all fields before dataref are cleared in __alloc_skb() @@ -359,8 +352,6 @@ typedef unsigned char *sk_buff_data_t; * @ooo_okay: allow the mapping of a socket to a queue to be changed * @l4_rxhash: indicate rxhash is a canonical 4-tuple hash over transport * ports. - * @wifi_acked_valid: wifi_acked was set - * @wifi_acked: whether frame was acked on wifi or not * @dma_cookie: a cookie to one of several possible DMA operations * done by skb DMA functions * @secmark: security marking @@ -454,11 +445,10 @@ struct sk_buff { #endif __u8 ooo_okay:1; __u8 l4_rxhash:1; - __u8 wifi_acked_valid:1; - __u8 wifi_acked:1; - /* 10/12 bit hole (depending on ndisc_nodetype presence) */ kmemcheck_bitfield_end(flags2); + /* 0/13 bit hole */ + #ifdef CONFIG_NET_DMA dma_cookie_t dma_cookie; #endif @@ -550,7 +540,6 @@ extern void consume_skb(struct sk_buff *skb); extern void __kfree_skb(struct sk_buff *skb); extern struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int fclone, int node); -extern struct sk_buff *build_skb(void *data); static inline struct sk_buff *alloc_skb(unsigned int size, gfp_t priority) { @@ -572,9 +561,8 @@ extern struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority); extern struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority); -extern struct sk_buff *__pskb_copy(struct sk_buff *skb, - int headroom, gfp_t gfp_mask); - +extern struct sk_buff *pskb_copy(struct sk_buff *skb, + gfp_t gfp_mask); extern int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask); @@ -1673,6 +1661,38 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC); } +/** + * __netdev_alloc_page - allocate a page for ps-rx on a specific device + * @dev: network device to receive on + * @gfp_mask: alloc_pages_node mask + * + * Allocate a new page. dev currently unused. + * + * %NULL is returned if there is no free memory. + */ +static inline struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask) +{ + return alloc_pages_node(NUMA_NO_NODE, gfp_mask, 0); +} + +/** + * netdev_alloc_page - allocate a page for ps-rx on a specific device + * @dev: network device to receive on + * + * Allocate a new page. dev currently unused. + * + * %NULL is returned if there is no free memory. + */ +static inline struct page *netdev_alloc_page(struct net_device *dev) +{ + return __netdev_alloc_page(dev, GFP_ATOMIC); +} + +static inline void netdev_free_page(struct net_device *dev, struct page *page) +{ + __free_page(page); +} + /** * skb_frag_page - retrieve the page refered to by a paged fragment * @frag: the paged fragment @@ -1804,12 +1824,6 @@ static inline dma_addr_t skb_frag_dma_map(struct device *dev, frag->page_offset + offset, size, dir); } -static inline struct sk_buff *pskb_copy(struct sk_buff *skb, - gfp_t gfp_mask) -{ - return __pskb_copy(skb, skb_headroom(skb), gfp_mask); -} - /** * skb_clone_writable - is the header of a clone writable * @skb: buffer to check @@ -2091,8 +2105,7 @@ extern void skb_split(struct sk_buff *skb, extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); -extern struct sk_buff *skb_segment(struct sk_buff *skb, - netdev_features_t features); +extern struct sk_buff *skb_segment(struct sk_buff *skb, u32 features); static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer) @@ -2250,15 +2263,6 @@ static inline void skb_tx_timestamp(struct sk_buff *skb) sw_tx_timestamp(skb); } -/** - * skb_complete_wifi_ack - deliver skb with wifi status - * - * @skb: the original outgoing packet - * @acked: ack status - * - */ -void skb_complete_wifi_ack(struct sk_buff *skb, bool acked); - extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len); extern __sum16 __skb_checksum_complete(struct sk_buff *skb); diff --git a/trunk/include/linux/smscphy.h b/trunk/include/linux/smscphy.h deleted file mode 100644 index ce718cbce435..000000000000 --- a/trunk/include/linux/smscphy.h +++ /dev/null @@ -1,25 +0,0 @@ -#ifndef __LINUX_SMSCPHY_H__ -#define __LINUX_SMSCPHY_H__ - -#define MII_LAN83C185_ISF 29 /* Interrupt Source Flags */ -#define MII_LAN83C185_IM 30 /* Interrupt Mask */ -#define MII_LAN83C185_CTRL_STATUS 17 /* Mode/Status Register */ - -#define MII_LAN83C185_ISF_INT1 (1<<1) /* Auto-Negotiation Page Received */ -#define MII_LAN83C185_ISF_INT2 (1<<2) /* Parallel Detection Fault */ -#define MII_LAN83C185_ISF_INT3 (1<<3) /* Auto-Negotiation LP Ack */ -#define MII_LAN83C185_ISF_INT4 (1<<4) /* Link Down */ -#define MII_LAN83C185_ISF_INT5 (1<<5) /* Remote Fault Detected */ -#define MII_LAN83C185_ISF_INT6 (1<<6) /* Auto-Negotiation complete */ -#define MII_LAN83C185_ISF_INT7 (1<<7) /* ENERGYON */ - -#define MII_LAN83C185_ISF_INT_ALL (0x0e) - -#define MII_LAN83C185_ISF_INT_PHYLIB_EVENTS \ - (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4 | \ - MII_LAN83C185_ISF_INT7) - -#define MII_LAN83C185_EDPWRDOWN (1 << 13) /* EDPWRDOWN */ -#define MII_LAN83C185_ENERGYON (1 << 1) /* ENERGYON */ - -#endif /* __LINUX_SMSCPHY_H__ */ diff --git a/trunk/include/linux/sock_diag.h b/trunk/include/linux/sock_diag.h deleted file mode 100644 index 251729a47880..000000000000 --- a/trunk/include/linux/sock_diag.h +++ /dev/null @@ -1,48 +0,0 @@ -#ifndef __SOCK_DIAG_H__ -#define __SOCK_DIAG_H__ - -#include - -#define SOCK_DIAG_BY_FAMILY 20 - -struct sock_diag_req { - __u8 sdiag_family; - __u8 sdiag_protocol; -}; - -enum { - SK_MEMINFO_RMEM_ALLOC, - SK_MEMINFO_RCVBUF, - SK_MEMINFO_WMEM_ALLOC, - SK_MEMINFO_SNDBUF, - SK_MEMINFO_FWD_ALLOC, - SK_MEMINFO_WMEM_QUEUED, - SK_MEMINFO_OPTMEM, - - SK_MEMINFO_VARS, -}; - -#ifdef __KERNEL__ -struct sk_buff; -struct nlmsghdr; -struct sock; - -struct sock_diag_handler { - __u8 family; - int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh); -}; - -int sock_diag_register(struct sock_diag_handler *h); -void sock_diag_unregister(struct sock_diag_handler *h); - -void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)); -void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)); - -int sock_diag_check_cookie(void *sk, __u32 *cookie); -void sock_diag_save_cookie(void *sk, __u32 *cookie); - -int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr); - -extern struct sock *sock_diag_nlsk; -#endif /* KERNEL */ -#endif diff --git a/trunk/include/linux/srcu.h b/trunk/include/linux/srcu.h index e1b005918bbb..58971e891f48 100644 --- a/trunk/include/linux/srcu.h +++ b/trunk/include/linux/srcu.h @@ -28,7 +28,6 @@ #define _LINUX_SRCU_H #include -#include struct srcu_struct_array { int c[2]; @@ -61,10 +60,18 @@ int __init_srcu_struct(struct srcu_struct *sp, const char *name, __init_srcu_struct((sp), #sp, &__srcu_key); \ }) +# define srcu_read_acquire(sp) \ + lock_acquire(&(sp)->dep_map, 0, 0, 2, 1, NULL, _THIS_IP_) +# define srcu_read_release(sp) \ + lock_release(&(sp)->dep_map, 1, _THIS_IP_) + #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ int init_srcu_struct(struct srcu_struct *sp); +# define srcu_read_acquire(sp) do { } while (0) +# define srcu_read_release(sp) do { } while (0) + #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ void cleanup_srcu_struct(struct srcu_struct *sp); @@ -83,32 +90,12 @@ long srcu_batches_completed(struct srcu_struct *sp); * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, * this assumes we are in an SRCU read-side critical section unless it can * prove otherwise. - * - * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot - * and while lockdep is disabled. - * - * Note that if the CPU is in the idle loop from an RCU point of view - * (ie: that we are in the section between rcu_idle_enter() and - * rcu_idle_exit()) then srcu_read_lock_held() returns false even if - * the CPU did an srcu_read_lock(). The reason for this is that RCU - * ignores CPUs that are in such a section, considering these as in - * extended quiescent state, so such a CPU is effectively never in an - * RCU read-side critical section regardless of what RCU primitives it - * invokes. This state of affairs is required --- we need to keep an - * RCU-free window in idle where the CPU may possibly enter into low - * power mode. This way we can notice an extended quiescent state to - * other CPUs that started a grace period. Otherwise we would delay any - * grace period as long as we run in the idle task. */ static inline int srcu_read_lock_held(struct srcu_struct *sp) { - if (rcu_is_cpu_idle()) - return 0; - - if (!debug_lockdep_rcu_enabled()) - return 1; - - return lock_is_held(&sp->dep_map); + if (debug_locks) + return lock_is_held(&sp->dep_map); + return 1; } #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ @@ -158,17 +145,12 @@ static inline int srcu_read_lock_held(struct srcu_struct *sp) * one way to indirectly wait on an SRCU grace period is to acquire * a mutex that is held elsewhere while calling synchronize_srcu() or * synchronize_srcu_expedited(). - * - * Note that srcu_read_lock() and the matching srcu_read_unlock() must - * occur in the same context, for example, it is illegal to invoke - * srcu_read_unlock() in an irq handler if the matching srcu_read_lock() - * was invoked in process context. */ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) { int retval = __srcu_read_lock(sp); - rcu_lock_acquire(&(sp)->dep_map); + srcu_read_acquire(sp); return retval; } @@ -182,51 +164,8 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) static inline void srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp) { - rcu_lock_release(&(sp)->dep_map); - __srcu_read_unlock(sp, idx); -} - -/** - * srcu_read_lock_raw - register a new reader for an SRCU-protected structure. - * @sp: srcu_struct in which to register the new reader. - * - * Enter an SRCU read-side critical section. Similar to srcu_read_lock(), - * but avoids the RCU-lockdep checking. This means that it is legal to - * use srcu_read_lock_raw() in one context, for example, in an exception - * handler, and then have the matching srcu_read_unlock_raw() in another - * context, for example in the task that took the exception. - * - * However, the entire SRCU read-side critical section must reside within a - * single task. For example, beware of using srcu_read_lock_raw() in - * a device interrupt handler and srcu_read_unlock() in the interrupted - * task: This will not work if interrupts are threaded. - */ -static inline int srcu_read_lock_raw(struct srcu_struct *sp) -{ - unsigned long flags; - int ret; - - local_irq_save(flags); - ret = __srcu_read_lock(sp); - local_irq_restore(flags); - return ret; -} - -/** - * srcu_read_unlock_raw - unregister reader from an SRCU-protected structure. - * @sp: srcu_struct in which to unregister the old reader. - * @idx: return value from corresponding srcu_read_lock_raw(). - * - * Exit an SRCU read-side critical section without lockdep-RCU checking. - * See srcu_read_lock_raw() for more details. - */ -static inline void srcu_read_unlock_raw(struct srcu_struct *sp, int idx) -{ - unsigned long flags; - - local_irq_save(flags); + srcu_read_release(sp); __srcu_read_unlock(sp, idx); - local_irq_restore(flags); } #endif diff --git a/trunk/include/linux/ssb/ssb.h b/trunk/include/linux/ssb/ssb.h index dcf35b0f303a..061e560251b4 100644 --- a/trunk/include/linux/ssb/ssb.h +++ b/trunk/include/linux/ssb/ssb.h @@ -94,15 +94,6 @@ struct ssb_sprom { } ghz5; /* 5GHz band */ } antenna_gain; - struct { - struct { - u8 tssipos, extpa_gain, pdet_range, tr_iso, antswlut; - } ghz2; - struct { - u8 tssipos, extpa_gain, pdet_range, tr_iso, antswlut; - } ghz5; - } fem; - /* TODO - add any parameters needed from rev 2, 3, 4, 5 or 8 SPROMs */ }; diff --git a/trunk/include/linux/ssb/ssb_regs.h b/trunk/include/linux/ssb/ssb_regs.h index c814ae6eeb22..98941203a27f 100644 --- a/trunk/include/linux/ssb/ssb_regs.h +++ b/trunk/include/linux/ssb/ssb_regs.h @@ -432,23 +432,6 @@ #define SSB_SPROM8_RXPO2G 0x00FF /* 2GHz RX power offset */ #define SSB_SPROM8_RXPO5G 0xFF00 /* 5GHz RX power offset */ #define SSB_SPROM8_RXPO5G_SHIFT 8 -#define SSB_SPROM8_FEM2G 0x00AE -#define SSB_SPROM8_FEM5G 0x00B0 -#define SSB_SROM8_FEM_TSSIPOS 0x0001 -#define SSB_SROM8_FEM_TSSIPOS_SHIFT 0 -#define SSB_SROM8_FEM_EXTPA_GAIN 0x0006 -#define SSB_SROM8_FEM_EXTPA_GAIN_SHIFT 1 -#define SSB_SROM8_FEM_PDET_RANGE 0x00F8 -#define SSB_SROM8_FEM_PDET_RANGE_SHIFT 3 -#define SSB_SROM8_FEM_TR_ISO 0x0700 -#define SSB_SROM8_FEM_TR_ISO_SHIFT 8 -#define SSB_SROM8_FEM_ANTSWLUT 0xF800 -#define SSB_SROM8_FEM_ANTSWLUT_SHIFT 11 -#define SSB_SPROM8_THERMAL 0x00B2 -#define SSB_SPROM8_MPWR_RAWTS 0x00B4 -#define SSB_SPROM8_TS_SLP_OPT_CORRX 0x00B6 -#define SSB_SPROM8_FOC_HWIQ_IQSWP 0x00B8 -#define SSB_SPROM8_PHYCAL_TEMPDELTA 0x00BA #define SSB_SPROM8_MAXP_BG 0x00C0 /* Max Power 2GHz in path 1 */ #define SSB_SPROM8_MAXP_BG_MASK 0x00FF /* Mask for Max Power 2GHz */ #define SSB_SPROM8_ITSSI_BG 0xFF00 /* Mask for path 1 itssi_bg */ diff --git a/trunk/include/linux/sunrpc/clnt.h b/trunk/include/linux/sunrpc/clnt.h index 2c5993a17c33..3d8f9c44e27d 100644 --- a/trunk/include/linux/sunrpc/clnt.h +++ b/trunk/include/linux/sunrpc/clnt.h @@ -215,7 +215,7 @@ static inline bool __rpc_copy_addr4(struct sockaddr *dst, return true; } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1, const struct sockaddr *sap2) { @@ -237,10 +237,10 @@ static inline bool __rpc_copy_addr6(struct sockaddr *dst, struct sockaddr_in6 *dsin6 = (struct sockaddr_in6 *) dst; dsin6->sin6_family = ssin6->sin6_family; - dsin6->sin6_addr = ssin6->sin6_addr; + ipv6_addr_copy(&dsin6->sin6_addr, &ssin6->sin6_addr); return true; } -#else /* !(IS_ENABLED(CONFIG_IPV6) */ +#else /* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */ static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1, const struct sockaddr *sap2) { @@ -252,7 +252,7 @@ static inline bool __rpc_copy_addr6(struct sockaddr *dst, { return false; } -#endif /* !(IS_ENABLED(CONFIG_IPV6) */ +#endif /* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */ /** * rpc_cmp_addr - compare the address portion of two sockaddrs. diff --git a/trunk/include/linux/tcp.h b/trunk/include/linux/tcp.h index 46a85c9e1f25..7f59ee946983 100644 --- a/trunk/include/linux/tcp.h +++ b/trunk/include/linux/tcp.h @@ -238,11 +238,6 @@ struct tcp_sack_block { u32 end_seq; }; -/*These are used to set the sack_ok field in struct tcp_options_received */ -#define TCP_SACK_SEEN (1 << 0) /*1 = peer is SACK capable, */ -#define TCP_FACK_ENABLED (1 << 1) /*1 = FACK is enabled locally*/ -#define TCP_DSACK_SEEN (1 << 2) /*1 = DSACK was received from peer*/ - struct tcp_options_received { /* PAWS/RTTM data */ long ts_recent_stamp;/* Time we stored ts_recent (for aging) */ diff --git a/trunk/include/linux/tick.h b/trunk/include/linux/tick.h index ab8be90b5cc9..b232ccc0ee29 100644 --- a/trunk/include/linux/tick.h +++ b/trunk/include/linux/tick.h @@ -7,7 +7,6 @@ #define _LINUX_TICK_H #include -#include #ifdef CONFIG_GENERIC_CLOCKEVENTS @@ -122,16 +121,14 @@ static inline int tick_oneshot_mode_active(void) { return 0; } #endif /* !CONFIG_GENERIC_CLOCKEVENTS */ # ifdef CONFIG_NO_HZ -extern void tick_nohz_idle_enter(void); -extern void tick_nohz_idle_exit(void); -extern void tick_nohz_irq_exit(void); +extern void tick_nohz_stop_sched_tick(int inidle); +extern void tick_nohz_restart_sched_tick(void); extern ktime_t tick_nohz_get_sleep_length(void); extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); # else -static inline void tick_nohz_idle_enter(void) { } -static inline void tick_nohz_idle_exit(void) { } - +static inline void tick_nohz_stop_sched_tick(int inidle) { } +static inline void tick_nohz_restart_sched_tick(void) { } static inline ktime_t tick_nohz_get_sleep_length(void) { ktime_t len = { .tv64 = NSEC_PER_SEC/HZ }; diff --git a/trunk/include/linux/unix_diag.h b/trunk/include/linux/unix_diag.h deleted file mode 100644 index b1d2bf16b33c..000000000000 --- a/trunk/include/linux/unix_diag.h +++ /dev/null @@ -1,54 +0,0 @@ -#ifndef __UNIX_DIAG_H__ -#define __UNIX_DIAG_H__ - -#include - -struct unix_diag_req { - __u8 sdiag_family; - __u8 sdiag_protocol; - __u16 pad; - __u32 udiag_states; - __u32 udiag_ino; - __u32 udiag_show; - __u32 udiag_cookie[2]; -}; - -#define UDIAG_SHOW_NAME 0x00000001 /* show name (not path) */ -#define UDIAG_SHOW_VFS 0x00000002 /* show VFS inode info */ -#define UDIAG_SHOW_PEER 0x00000004 /* show peer socket info */ -#define UDIAG_SHOW_ICONS 0x00000008 /* show pending connections */ -#define UDIAG_SHOW_RQLEN 0x00000010 /* show skb receive queue len */ -#define UDIAG_SHOW_MEMINFO 0x00000020 /* show memory info of a socket */ - -struct unix_diag_msg { - __u8 udiag_family; - __u8 udiag_type; - __u8 udiag_state; - __u8 pad; - - __u32 udiag_ino; - __u32 udiag_cookie[2]; -}; - -enum { - UNIX_DIAG_NAME, - UNIX_DIAG_VFS, - UNIX_DIAG_PEER, - UNIX_DIAG_ICONS, - UNIX_DIAG_RQLEN, - UNIX_DIAG_MEMINFO, - - UNIX_DIAG_MAX, -}; - -struct unix_diag_vfs { - __u32 udiag_vfs_ino; - __u32 udiag_vfs_dev; -}; - -struct unix_diag_rqlen { - __u32 udiag_rqueue; - __u32 udiag_wqueue; -}; - -#endif diff --git a/trunk/include/linux/virtio_config.h b/trunk/include/linux/virtio_config.h index 5206d6541da5..e9e72bda1b72 100644 --- a/trunk/include/linux/virtio_config.h +++ b/trunk/include/linux/virtio_config.h @@ -102,10 +102,6 @@ * vdev: the virtio_device * This gives the final feature bits for the device: it can change * the dev->feature bits if it wants. - * @bus_name: return the bus name associated with the device - * vdev: the virtio_device - * This returns a pointer to the bus name a la pci_name from which - * the caller can then copy. */ typedef void vq_callback_t(struct virtqueue *); struct virtio_config_ops { @@ -123,7 +119,6 @@ struct virtio_config_ops { void (*del_vqs)(struct virtio_device *); u32 (*get_features)(struct virtio_device *vdev); void (*finalize_features)(struct virtio_device *vdev); - const char *(*bus_name)(struct virtio_device *vdev); }; /* If driver didn't advertise the feature, it will never appear. */ @@ -189,14 +184,5 @@ struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev, return ERR_PTR(err); return vq; } - -static inline -const char *virtio_bus_name(struct virtio_device *vdev) -{ - if (!vdev->config->bus_name) - return "virtio"; - return vdev->config->bus_name(vdev); -} - #endif /* __KERNEL__ */ #endif /* _LINUX_VIRTIO_CONFIG_H */ diff --git a/trunk/include/linux/wait.h b/trunk/include/linux/wait.h index a9ce45e8501c..3efc9f3f43a0 100644 --- a/trunk/include/linux/wait.h +++ b/trunk/include/linux/wait.h @@ -77,13 +77,13 @@ struct task_struct; #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \ { .flags = word, .bit_nr = bit, } -extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *); +extern void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *); #define init_waitqueue_head(q) \ do { \ static struct lock_class_key __key; \ \ - __init_waitqueue_head((q), #q, &__key); \ + __init_waitqueue_head((q), &__key); \ } while (0) #ifdef CONFIG_LOCKDEP diff --git a/trunk/include/linux/wl12xx.h b/trunk/include/linux/wl12xx.h index 0d6373195d32..4b697395326e 100644 --- a/trunk/include/linux/wl12xx.h +++ b/trunk/include/linux/wl12xx.h @@ -54,9 +54,6 @@ struct wl12xx_platform_data { int board_ref_clock; int board_tcxo_clock; unsigned long platform_quirks; - bool pwr_in_suspend; - - struct wl1271_if_operations *ops; }; /* Platform does not support level trigger interrupts */ @@ -76,6 +73,6 @@ int wl12xx_set_platform_data(const struct wl12xx_platform_data *data) #endif -struct wl12xx_platform_data *wl12xx_get_platform_data(void); +const struct wl12xx_platform_data *wl12xx_get_platform_data(void); #endif diff --git a/trunk/include/net/addrconf.h b/trunk/include/net/addrconf.h index f68dce2d8d88..cbc6bb0a6838 100644 --- a/trunk/include/net/addrconf.h +++ b/trunk/include/net/addrconf.h @@ -151,8 +151,7 @@ extern int ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *src_addr); extern int ipv6_is_mld(struct sk_buff *skb, int nexthdr); -extern void addrconf_prefix_rcv(struct net_device *dev, - u8 *opt, int len, bool sllao); +extern void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len); /* * anycast prototypes (anycast.c) diff --git a/trunk/include/net/af_unix.h b/trunk/include/net/af_unix.h index 5a4e29b168c9..91ab5b01678a 100644 --- a/trunk/include/net/af_unix.h +++ b/trunk/include/net/af_unix.h @@ -11,13 +11,10 @@ extern void unix_notinflight(struct file *fp); extern void unix_gc(void); extern void wait_for_unix_gc(void); extern struct sock *unix_get_socket(struct file *filp); -extern struct sock *unix_peer_get(struct sock *); #define UNIX_HASH_SIZE 256 extern unsigned int unix_tot_inflight; -extern spinlock_t unix_table_lock; -extern struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1]; struct unix_address { atomic_t refcnt; @@ -66,9 +63,6 @@ struct unix_sock { #define peer_wait peer_wq.wait -long unix_inq_len(struct sock *sk); -long unix_outq_len(struct sock *sk); - #ifdef CONFIG_SYSCTL extern int unix_sysctl_register(struct net *net); extern void unix_sysctl_unregister(struct net *net); diff --git a/trunk/include/net/arp.h b/trunk/include/net/arp.h index 0013dc87940b..4979af8b1559 100644 --- a/trunk/include/net/arp.h +++ b/trunk/include/net/arp.h @@ -23,7 +23,7 @@ static inline struct neighbour *__ipv4_neigh_lookup(struct neigh_table *tbl, str rcu_read_lock_bh(); nht = rcu_dereference_bh(tbl->nht); - hash_val = arp_hashfn(key, dev, nht->hash_rnd[0]) >> (32 - nht->hash_shift); + hash_val = arp_hashfn(key, dev, nht->hash_rnd) >> (32 - nht->hash_shift); for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]); n != NULL; n = rcu_dereference_bh(n->next)) { diff --git a/trunk/include/net/atmclip.h b/trunk/include/net/atmclip.h index 5865924d4aac..497ef6444a7a 100644 --- a/trunk/include/net/atmclip.h +++ b/trunk/include/net/atmclip.h @@ -15,6 +15,7 @@ #define CLIP_VCC(vcc) ((struct clip_vcc *) ((vcc)->user_back)) +#define NEIGH2ENTRY(neigh) ((struct atmarp_entry *) (neigh)->primary_key) struct sk_buff; @@ -35,18 +36,24 @@ struct clip_vcc { struct atmarp_entry { + __be32 ip; /* IP address */ struct clip_vcc *vccs; /* active VCCs; NULL if resolution is pending */ unsigned long expires; /* entry expiration time */ struct neighbour *neigh; /* neighbour back-pointer */ }; + #define PRIV(dev) ((struct clip_priv *) netdev_priv(dev)) + struct clip_priv { int number; /* for convenience ... */ spinlock_t xoff_lock; /* ensures that pop is atomic (SMP) */ struct net_device *next; /* next CLIP interface */ }; + +extern struct neigh_table *clip_tbl_hook; + #endif diff --git a/trunk/include/net/bluetooth/bluetooth.h b/trunk/include/net/bluetooth/bluetooth.h index abaad6ed9b83..e86af08293a8 100644 --- a/trunk/include/net/bluetooth/bluetooth.h +++ b/trunk/include/net/bluetooth/bluetooth.h @@ -36,11 +36,6 @@ #define PF_BLUETOOTH AF_BLUETOOTH #endif -/* Bluetooth versions */ -#define BLUETOOTH_VER_1_1 1 -#define BLUETOOTH_VER_1_2 2 -#define BLUETOOTH_VER_2_0 3 - /* Reserv for core and drivers use */ #define BT_SKB_RESERVE 8 @@ -82,33 +77,6 @@ struct bt_power { #define BT_POWER_FORCE_ACTIVE_OFF 0 #define BT_POWER_FORCE_ACTIVE_ON 1 -#define BT_CHANNEL_POLICY 10 - -/* BR/EDR only (default policy) - * AMP controllers cannot be used. - * Channel move requests from the remote device are denied. - * If the L2CAP channel is currently using AMP, move the channel to BR/EDR. - */ -#define BT_CHANNEL_POLICY_BREDR_ONLY 0 - -/* BR/EDR Preferred - * Allow use of AMP controllers. - * If the L2CAP channel is currently on AMP, move it to BR/EDR. - * Channel move requests from the remote device are allowed. - */ -#define BT_CHANNEL_POLICY_BREDR_PREFERRED 1 - -/* AMP Preferred - * Allow use of AMP controllers - * If the L2CAP channel is currently on BR/EDR and AMP controller - * resources are available, initiate a channel move to AMP. - * Channel move requests from the remote device are allowed. - * If the L2CAP socket has not been connected yet, try to create - * and configure the channel directly on an AMP controller rather - * than BR/EDR. - */ -#define BT_CHANNEL_POLICY_AMP_PREFERRED 2 - __printf(2, 3) int bt_printk(const char *level, const char *fmt, ...); @@ -190,7 +158,7 @@ struct bt_skb_cb { __u8 pkt_type; __u8 incoming; __u16 expect; - __u16 tx_seq; + __u8 tx_seq; __u8 retries; __u8 sar; unsigned short channel; @@ -250,10 +218,32 @@ extern void bt_sysfs_cleanup(void); extern struct dentry *bt_debugfs; +#ifdef CONFIG_BT_L2CAP int l2cap_init(void); void l2cap_exit(void); +#else +static inline int l2cap_init(void) +{ + return 0; +} + +static inline void l2cap_exit(void) +{ +} +#endif +#ifdef CONFIG_BT_SCO int sco_init(void); void sco_exit(void); +#else +static inline int sco_init(void) +{ + return 0; +} + +static inline void sco_exit(void) +{ +} +#endif #endif /* __BLUETOOTH_H */ diff --git a/trunk/include/net/bluetooth/hci.h b/trunk/include/net/bluetooth/hci.h index 5b2fed5eebf2..aaf79af72432 100644 --- a/trunk/include/net/bluetooth/hci.h +++ b/trunk/include/net/bluetooth/hci.h @@ -88,14 +88,6 @@ enum { HCI_RESET, }; -/* - * BR/EDR and/or LE controller flags: the flags defined here should represent - * states from the controller. - */ -enum { - HCI_LE_SCAN, -}; - /* HCI ioctl defines */ #define HCIDEVUP _IOW('H', 201, int) #define HCIDEVDOWN _IOW('H', 202, int) @@ -210,7 +202,6 @@ enum { #define LMP_EV4 0x01 #define LMP_EV5 0x02 -#define LMP_NO_BREDR 0x20 #define LMP_LE 0x40 #define LMP_SNIFF_SUBR 0x02 @@ -273,17 +264,6 @@ enum { #define HCI_LK_SMP_IRK 0x82 #define HCI_LK_SMP_CSRK 0x83 -/* ---- HCI Error Codes ---- */ -#define HCI_ERROR_AUTH_FAILURE 0x05 -#define HCI_ERROR_REJ_BAD_ADDR 0x0f -#define HCI_ERROR_REMOTE_USER_TERM 0x13 -#define HCI_ERROR_LOCAL_HOST_TERM 0x16 -#define HCI_ERROR_PAIRING_NOT_ALLOWED 0x18 - -/* Flow control modes */ -#define HCI_FLOW_CTL_MODE_PACKET_BASED 0x00 -#define HCI_FLOW_CTL_MODE_BLOCK_BASED 0x01 - /* ----- HCI Commands ---- */ #define HCI_OP_NOP 0x0000 @@ -466,14 +446,6 @@ struct hci_rp_user_confirm_reply { #define HCI_OP_USER_CONFIRM_NEG_REPLY 0x042d -#define HCI_OP_USER_PASSKEY_REPLY 0x042e -struct hci_cp_user_passkey_reply { - bdaddr_t bdaddr; - __le32 passkey; -} __packed; - -#define HCI_OP_USER_PASSKEY_NEG_REPLY 0x042f - #define HCI_OP_REMOTE_OOB_DATA_REPLY 0x0430 struct hci_cp_remote_oob_data_reply { bdaddr_t bdaddr; @@ -690,12 +662,6 @@ struct hci_rp_read_local_oob_data { #define HCI_OP_READ_INQ_RSP_TX_POWER 0x0c58 -#define HCI_OP_READ_FLOW_CONTROL_MODE 0x0c66 -struct hci_rp_read_flow_control_mode { - __u8 status; - __u8 mode; -} __packed; - #define HCI_OP_WRITE_LE_HOST_SUPPORTED 0x0c6d struct hci_cp_write_le_host_supported { __u8 le; @@ -750,14 +716,6 @@ struct hci_rp_read_bd_addr { bdaddr_t bdaddr; } __packed; -#define HCI_OP_READ_DATA_BLOCK_SIZE 0x100a -struct hci_rp_read_data_block_size { - __u8 status; - __le16 max_acl_len; - __le16 block_len; - __le16 num_blocks; -} __packed; - #define HCI_OP_WRITE_PAGE_SCAN_ACTIVITY 0x0c1c struct hci_cp_write_page_scan_activity { __le16 interval; @@ -768,21 +726,6 @@ struct hci_cp_write_page_scan_activity { #define PAGE_SCAN_TYPE_STANDARD 0x00 #define PAGE_SCAN_TYPE_INTERLACED 0x01 -#define HCI_OP_READ_LOCAL_AMP_INFO 0x1409 -struct hci_rp_read_local_amp_info { - __u8 status; - __u8 amp_status; - __le32 total_bw; - __le32 max_bw; - __le32 min_latency; - __le32 max_pdu; - __u8 amp_type; - __le16 pal_cap; - __le16 max_assoc_size; - __le32 max_flush_to; - __le32 be_flush_to; -} __packed; - #define HCI_OP_LE_SET_EVENT_MASK 0x2001 struct hci_cp_le_set_event_mask { __u8 mask[8]; @@ -795,18 +738,6 @@ struct hci_rp_le_read_buffer_size { __u8 le_max_pkt; } __packed; -#define HCI_OP_LE_SET_SCAN_PARAM 0x200b -struct hci_cp_le_set_scan_param { - __u8 type; - __le16 interval; - __le16 window; - __u8 own_address_type; - __u8 filter_policy; -} __packed; - -#define LE_SCANNING_DISABLED 0x00 -#define LE_SCANNING_ENABLED 0x01 - #define HCI_OP_LE_SET_SCAN_ENABLE 0x200c struct hci_cp_le_set_scan_enable { __u8 enable; @@ -982,14 +913,9 @@ struct hci_ev_role_change { } __packed; #define HCI_EV_NUM_COMP_PKTS 0x13 -struct hci_comp_pkts_info { - __le16 handle; - __le16 count; -} __packed; - struct hci_ev_num_comp_pkts { __u8 num_hndl; - struct hci_comp_pkts_info handles[0]; + /* variable length part */ } __packed; #define HCI_EV_MODE_CHANGE 0x14 @@ -1128,11 +1054,6 @@ struct hci_ev_user_confirm_req { __le32 passkey; } __packed; -#define HCI_EV_USER_PASSKEY_REQUEST 0x34 -struct hci_ev_user_passkey_req { - bdaddr_t bdaddr; -} __packed; - #define HCI_EV_REMOTE_OOB_DATA_REQUEST 0x35 struct hci_ev_remote_oob_data_request { bdaddr_t bdaddr; @@ -1388,6 +1309,4 @@ struct hci_inquiry_req { }; #define IREQ_CACHE_FLUSH 0x0001 -extern int enable_hs; - #endif /* __HCI_H */ diff --git a/trunk/include/net/bluetooth/hci_core.h b/trunk/include/net/bluetooth/hci_core.h index 5e2e98458496..3779ea362257 100644 --- a/trunk/include/net/bluetooth/hci_core.h +++ b/trunk/include/net/bluetooth/hci_core.h @@ -28,8 +28,9 @@ #include #include -/* HCI priority */ -#define HCI_PRIO_MAX 7 +/* HCI upper protocols */ +#define HCI_PROTO_L2CAP 0 +#define HCI_PROTO_SCO 1 /* HCI Core structures */ struct inquiry_data { @@ -50,12 +51,14 @@ struct inquiry_entry { }; struct inquiry_cache { + spinlock_t lock; __u32 timestamp; struct inquiry_entry *list; }; struct hci_conn_hash { struct list_head list; + spinlock_t lock; unsigned int acl_num; unsigned int sco_num; unsigned int le_num; @@ -112,7 +115,7 @@ struct adv_entry { #define NUM_REASSEMBLY 4 struct hci_dev { struct list_head list; - struct mutex lock; + spinlock_t lock; atomic_t refcnt; char name[8]; @@ -147,19 +150,6 @@ struct hci_dev { __u16 sniff_min_interval; __u16 sniff_max_interval; - __u8 amp_status; - __u32 amp_total_bw; - __u32 amp_max_bw; - __u32 amp_min_latency; - __u32 amp_max_pdu; - __u8 amp_type; - __u16 amp_pal_cap; - __u16 amp_assoc_size; - __u32 amp_max_flush_to; - __u32 amp_be_flush_to; - - __u8 flow_ctl_mode; - unsigned int auto_accept_delay; unsigned long quirks; @@ -176,11 +166,6 @@ struct hci_dev { unsigned int sco_pkts; unsigned int le_pkts; - __u16 block_len; - __u16 block_mtu; - __u16 num_blocks; - __u16 block_cnt; - unsigned long acl_last_tx; unsigned long sco_last_tx; unsigned long le_last_tx; @@ -188,18 +173,13 @@ struct hci_dev { struct workqueue_struct *workqueue; struct work_struct power_on; - struct delayed_work power_off; - - __u16 discov_timeout; - struct delayed_work discov_off; - - struct delayed_work service_cache; + struct work_struct power_off; + struct timer_list off_timer; struct timer_list cmd_timer; - - struct work_struct rx_work; - struct work_struct cmd_work; - struct work_struct tx_work; + struct tasklet_struct cmd_task; + struct tasklet_struct rx_task; + struct tasklet_struct tx_task; struct sk_buff_head rx_q; struct sk_buff_head raw_q; @@ -215,8 +195,6 @@ struct hci_dev { __u16 init_last_cmd; - struct list_head mgmt_pending; - struct inquiry_cache inq_cache; struct hci_conn_hash conn_hash; struct list_head blacklist; @@ -228,7 +206,7 @@ struct hci_dev { struct list_head remote_oob_data; struct list_head adv_entries; - struct delayed_work adv_work; + struct timer_list adv_timer; struct hci_dev_stats stat; @@ -248,8 +226,6 @@ struct hci_dev { struct module *owner; - unsigned long dev_flags; - int (*open)(struct hci_dev *hdev); int (*close)(struct hci_dev *hdev); int (*flush)(struct hci_dev *hdev); @@ -297,19 +273,20 @@ struct hci_conn { unsigned int sent; struct sk_buff_head data_q; - struct list_head chan_list; - struct delayed_work disc_work; + struct timer_list disc_timer; struct timer_list idle_timer; struct timer_list auto_accept_timer; + struct work_struct work_add; + struct work_struct work_del; + struct device dev; atomic_t devref; struct hci_dev *hdev; void *l2cap_data; void *sco_data; - void *smp_conn; struct hci_conn *link; @@ -318,39 +295,25 @@ struct hci_conn { void (*disconn_cfm_cb) (struct hci_conn *conn, u8 reason); }; -struct hci_chan { - struct list_head list; - - struct hci_conn *conn; - struct sk_buff_head data_q; - unsigned int sent; -}; - +extern struct hci_proto *hci_proto[]; extern struct list_head hci_dev_list; extern struct list_head hci_cb_list; extern rwlock_t hci_dev_list_lock; extern rwlock_t hci_cb_list_lock; -/* ----- HCI interface to upper protocols ----- */ -extern int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr); -extern int l2cap_connect_cfm(struct hci_conn *hcon, u8 status); -extern int l2cap_disconn_ind(struct hci_conn *hcon); -extern int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason); -extern int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt); -extern int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags); - -extern int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr); -extern int sco_connect_cfm(struct hci_conn *hcon, __u8 status); -extern int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason); -extern int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb); - /* ----- Inquiry cache ----- */ #define INQUIRY_CACHE_AGE_MAX (HZ*30) /* 30 seconds */ #define INQUIRY_ENTRY_AGE_MAX (HZ*60) /* 60 seconds */ +#define inquiry_cache_lock(c) spin_lock(&c->lock) +#define inquiry_cache_unlock(c) spin_unlock(&c->lock) +#define inquiry_cache_lock_bh(c) spin_lock_bh(&c->lock) +#define inquiry_cache_unlock_bh(c) spin_unlock_bh(&c->lock) + static inline void inquiry_cache_init(struct hci_dev *hdev) { struct inquiry_cache *c = &hdev->inq_cache; + spin_lock_init(&c->lock); c->list = NULL; } @@ -390,15 +353,15 @@ static inline void hci_conn_hash_init(struct hci_dev *hdev) { struct hci_conn_hash *h = &hdev->conn_hash; INIT_LIST_HEAD(&h->list); + spin_lock_init(&h->lock); h->acl_num = 0; h->sco_num = 0; - h->le_num = 0; } static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c) { struct hci_conn_hash *h = &hdev->conn_hash; - list_add_rcu(&c->list, &h->list); + list_add(&c->list, &h->list); switch (c->type) { case ACL_LINK: h->acl_num++; @@ -416,10 +379,7 @@ static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c) static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c) { struct hci_conn_hash *h = &hdev->conn_hash; - - list_del_rcu(&c->list); - synchronize_rcu(); - + list_del(&c->list); switch (c->type) { case ACL_LINK: h->acl_num--; @@ -454,18 +414,14 @@ static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev, __u16 handle) { struct hci_conn_hash *h = &hdev->conn_hash; + struct list_head *p; struct hci_conn *c; - rcu_read_lock(); - - list_for_each_entry_rcu(c, &h->list, list) { - if (c->handle == handle) { - rcu_read_unlock(); + list_for_each(p, &h->list) { + c = list_entry(p, struct hci_conn, list); + if (c->handle == handle) return c; - } } - rcu_read_unlock(); - return NULL; } @@ -473,19 +429,14 @@ static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev, __u8 type, bdaddr_t *ba) { struct hci_conn_hash *h = &hdev->conn_hash; + struct list_head *p; struct hci_conn *c; - rcu_read_lock(); - - list_for_each_entry_rcu(c, &h->list, list) { - if (c->type == type && !bacmp(&c->dst, ba)) { - rcu_read_unlock(); + list_for_each(p, &h->list) { + c = list_entry(p, struct hci_conn, list); + if (c->type == type && !bacmp(&c->dst, ba)) return c; - } } - - rcu_read_unlock(); - return NULL; } @@ -493,19 +444,14 @@ static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev, __u8 type, __u16 state) { struct hci_conn_hash *h = &hdev->conn_hash; + struct list_head *p; struct hci_conn *c; - rcu_read_lock(); - - list_for_each_entry_rcu(c, &h->list, list) { - if (c->type == type && c->state == state) { - rcu_read_unlock(); + list_for_each(p, &h->list) { + c = list_entry(p, struct hci_conn, list); + if (c->type == type && c->state == state) return c; - } } - - rcu_read_unlock(); - return NULL; } @@ -520,10 +466,6 @@ int hci_conn_del(struct hci_conn *conn); void hci_conn_hash_flush(struct hci_dev *hdev); void hci_conn_check_pending(struct hci_dev *hdev); -struct hci_chan *hci_chan_create(struct hci_conn *conn); -int hci_chan_del(struct hci_chan *chan); -void hci_chan_list_flush(struct hci_conn *conn); - struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type); int hci_conn_check_link_mode(struct hci_conn *conn); @@ -533,6 +475,7 @@ int hci_conn_change_link_key(struct hci_conn *conn); int hci_conn_switch_role(struct hci_conn *conn, __u8 role); void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active); +void hci_conn_enter_sniff_mode(struct hci_conn *conn); void hci_conn_hold_device(struct hci_conn *conn); void hci_conn_put_device(struct hci_conn *conn); @@ -540,7 +483,7 @@ void hci_conn_put_device(struct hci_conn *conn); static inline void hci_conn_hold(struct hci_conn *conn) { atomic_inc(&conn->refcnt); - cancel_delayed_work_sync(&conn->disc_work); + del_timer(&conn->disc_timer); } static inline void hci_conn_put(struct hci_conn *conn) @@ -559,9 +502,7 @@ static inline void hci_conn_put(struct hci_conn *conn) } else { timeo = msecs_to_jiffies(10); } - cancel_delayed_work_sync(&conn->disc_work); - queue_delayed_work(conn->hdev->workqueue, - &conn->disc_work, jiffies + timeo); + mod_timer(&conn->disc_timer, jiffies + timeo); } } @@ -593,8 +534,10 @@ static inline struct hci_dev *__hci_dev_hold(struct hci_dev *d) try_module_get(d->owner) ? __hci_dev_hold(d) : NULL; \ }) -#define hci_dev_lock(d) mutex_lock(&d->lock) -#define hci_dev_unlock(d) mutex_unlock(&d->lock) +#define hci_dev_lock(d) spin_lock(&d->lock) +#define hci_dev_unlock(d) spin_unlock(&d->lock) +#define hci_dev_lock_bh(d) spin_lock_bh(&d->lock) +#define hci_dev_unlock_bh(d) spin_unlock_bh(&d->lock) struct hci_dev *hci_dev_get(int index); struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst); @@ -602,7 +545,7 @@ struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst); struct hci_dev *hci_alloc_dev(void); void hci_free_dev(struct hci_dev *hdev); int hci_register_dev(struct hci_dev *hdev); -void hci_unregister_dev(struct hci_dev *hdev); +int hci_unregister_dev(struct hci_dev *hdev); int hci_suspend_dev(struct hci_dev *hdev); int hci_resume_dev(struct hci_dev *hdev); int hci_dev_open(__u16 dev); @@ -656,9 +599,8 @@ int hci_recv_frame(struct sk_buff *skb); int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count); int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count); -void hci_init_sysfs(struct hci_dev *hdev); -int hci_add_sysfs(struct hci_dev *hdev); -void hci_del_sysfs(struct hci_dev *hdev); +int hci_register_sysfs(struct hci_dev *hdev); +void hci_unregister_sysfs(struct hci_dev *hdev); void hci_conn_init_sysfs(struct hci_conn *conn); void hci_conn_add_sysfs(struct hci_conn *conn); void hci_conn_del_sysfs(struct hci_conn *conn); @@ -679,40 +621,53 @@ void hci_conn_del_sysfs(struct hci_conn *conn); #define lmp_host_le_capable(dev) ((dev)->extfeatures[0] & LMP_HOST_LE) /* ----- HCI protocols ----- */ +struct hci_proto { + char *name; + unsigned int id; + unsigned long flags; + + void *priv; + + int (*connect_ind) (struct hci_dev *hdev, bdaddr_t *bdaddr, + __u8 type); + int (*connect_cfm) (struct hci_conn *conn, __u8 status); + int (*disconn_ind) (struct hci_conn *conn); + int (*disconn_cfm) (struct hci_conn *conn, __u8 reason); + int (*recv_acldata) (struct hci_conn *conn, struct sk_buff *skb, + __u16 flags); + int (*recv_scodata) (struct hci_conn *conn, struct sk_buff *skb); + int (*security_cfm) (struct hci_conn *conn, __u8 status, + __u8 encrypt); +}; + static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type) { - switch (type) { - case ACL_LINK: - return l2cap_connect_ind(hdev, bdaddr); + register struct hci_proto *hp; + int mask = 0; - case SCO_LINK: - case ESCO_LINK: - return sco_connect_ind(hdev, bdaddr); + hp = hci_proto[HCI_PROTO_L2CAP]; + if (hp && hp->connect_ind) + mask |= hp->connect_ind(hdev, bdaddr, type); - default: - BT_ERR("unknown link type %d", type); - return -EINVAL; - } + hp = hci_proto[HCI_PROTO_SCO]; + if (hp && hp->connect_ind) + mask |= hp->connect_ind(hdev, bdaddr, type); + + return mask; } static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status) { - switch (conn->type) { - case ACL_LINK: - case LE_LINK: - l2cap_connect_cfm(conn, status); - break; + register struct hci_proto *hp; - case SCO_LINK: - case ESCO_LINK: - sco_connect_cfm(conn, status); - break; + hp = hci_proto[HCI_PROTO_L2CAP]; + if (hp && hp->connect_cfm) + hp->connect_cfm(conn, status); - default: - BT_ERR("unknown link type %d", conn->type); - break; - } + hp = hci_proto[HCI_PROTO_SCO]; + if (hp && hp->connect_cfm) + hp->connect_cfm(conn, status); if (conn->connect_cfm_cb) conn->connect_cfm_cb(conn, status); @@ -720,29 +675,31 @@ static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status) static inline int hci_proto_disconn_ind(struct hci_conn *conn) { - if (conn->type != ACL_LINK && conn->type != LE_LINK) - return HCI_ERROR_REMOTE_USER_TERM; + register struct hci_proto *hp; + int reason = 0x13; + + hp = hci_proto[HCI_PROTO_L2CAP]; + if (hp && hp->disconn_ind) + reason = hp->disconn_ind(conn); - return l2cap_disconn_ind(conn); + hp = hci_proto[HCI_PROTO_SCO]; + if (hp && hp->disconn_ind) + reason = hp->disconn_ind(conn); + + return reason; } static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason) { - switch (conn->type) { - case ACL_LINK: - case LE_LINK: - l2cap_disconn_cfm(conn, reason); - break; + register struct hci_proto *hp; - case SCO_LINK: - case ESCO_LINK: - sco_disconn_cfm(conn, reason); - break; + hp = hci_proto[HCI_PROTO_L2CAP]; + if (hp && hp->disconn_cfm) + hp->disconn_cfm(conn, reason); - default: - BT_ERR("unknown link type %d", conn->type); - break; - } + hp = hci_proto[HCI_PROTO_SCO]; + if (hp && hp->disconn_cfm) + hp->disconn_cfm(conn, reason); if (conn->disconn_cfm_cb) conn->disconn_cfm_cb(conn, reason); @@ -750,16 +707,21 @@ static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason) static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status) { + register struct hci_proto *hp; __u8 encrypt; - if (conn->type != ACL_LINK && conn->type != LE_LINK) - return; - if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) return; encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00; - l2cap_security_cfm(conn, status, encrypt); + + hp = hci_proto[HCI_PROTO_L2CAP]; + if (hp && hp->security_cfm) + hp->security_cfm(conn, status, encrypt); + + hp = hci_proto[HCI_PROTO_SCO]; + if (hp && hp->security_cfm) + hp->security_cfm(conn, status, encrypt); if (conn->security_cfm_cb) conn->security_cfm_cb(conn, status); @@ -768,15 +730,23 @@ static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status) static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt) { - if (conn->type != ACL_LINK && conn->type != LE_LINK) - return; + register struct hci_proto *hp; - l2cap_security_cfm(conn, status, encrypt); + hp = hci_proto[HCI_PROTO_L2CAP]; + if (hp && hp->security_cfm) + hp->security_cfm(conn, status, encrypt); + + hp = hci_proto[HCI_PROTO_SCO]; + if (hp && hp->security_cfm) + hp->security_cfm(conn, status, encrypt); if (conn->security_cfm_cb) conn->security_cfm_cb(conn, status); } +int hci_register_proto(struct hci_proto *hproto); +int hci_unregister_proto(struct hci_proto *hproto); + /* ----- HCI callbacks ----- */ struct hci_cb { struct list_head list; @@ -801,13 +771,13 @@ static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status) encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00; - read_lock(&hci_cb_list_lock); + read_lock_bh(&hci_cb_list_lock); list_for_each(p, &hci_cb_list) { struct hci_cb *cb = list_entry(p, struct hci_cb, list); if (cb->security_cfm) cb->security_cfm(conn, status, encrypt); } - read_unlock(&hci_cb_list_lock); + read_unlock_bh(&hci_cb_list_lock); } static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, @@ -823,26 +793,26 @@ static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, hci_proto_encrypt_cfm(conn, status, encrypt); - read_lock(&hci_cb_list_lock); + read_lock_bh(&hci_cb_list_lock); list_for_each(p, &hci_cb_list) { struct hci_cb *cb = list_entry(p, struct hci_cb, list); if (cb->security_cfm) cb->security_cfm(conn, status, encrypt); } - read_unlock(&hci_cb_list_lock); + read_unlock_bh(&hci_cb_list_lock); } static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status) { struct list_head *p; - read_lock(&hci_cb_list_lock); + read_lock_bh(&hci_cb_list_lock); list_for_each(p, &hci_cb_list) { struct hci_cb *cb = list_entry(p, struct hci_cb, list); if (cb->key_change_cfm) cb->key_change_cfm(conn, status); } - read_unlock(&hci_cb_list_lock); + read_unlock_bh(&hci_cb_list_lock); } static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, @@ -850,13 +820,13 @@ static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, { struct list_head *p; - read_lock(&hci_cb_list_lock); + read_lock_bh(&hci_cb_list_lock); list_for_each(p, &hci_cb_list) { struct hci_cb *cb = list_entry(p, struct hci_cb, list); if (cb->role_switch_cfm) cb->role_switch_cfm(conn, status, role); } - read_unlock(&hci_cb_list_lock); + read_unlock_bh(&hci_cb_list_lock); } int hci_register_cb(struct hci_cb *hcb); @@ -866,7 +836,7 @@ int hci_register_notifier(struct notifier_block *nb); int hci_unregister_notifier(struct notifier_block *nb); int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param); -void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags); +void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags); void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb); void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode); @@ -879,63 +849,44 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb, /* Management interface */ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len); -int mgmt_index_added(struct hci_dev *hdev); -int mgmt_index_removed(struct hci_dev *hdev); -int mgmt_powered(struct hci_dev *hdev, u8 powered); -int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable); -int mgmt_connectable(struct hci_dev *hdev, u8 connectable); -int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status); -int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, - u8 persistent); -int mgmt_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, - u8 addr_type); -int mgmt_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, - u8 addr_type); -int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status); -int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, - u8 addr_type, u8 status); -int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure); -int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, +int mgmt_index_added(u16 index); +int mgmt_index_removed(u16 index); +int mgmt_powered(u16 index, u8 powered); +int mgmt_discoverable(u16 index, u8 discoverable); +int mgmt_connectable(u16 index, u8 connectable); +int mgmt_new_key(u16 index, struct link_key *key, u8 persistent); +int mgmt_connected(u16 index, bdaddr_t *bdaddr, u8 link_type); +int mgmt_disconnected(u16 index, bdaddr_t *bdaddr); +int mgmt_disconnect_failed(u16 index); +int mgmt_connect_failed(u16 index, bdaddr_t *bdaddr, u8 status); +int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr, u8 secure); +int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status); +int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status); +int mgmt_user_confirm_request(u16 index, bdaddr_t *bdaddr, __le32 value, + u8 confirm_hint); +int mgmt_user_confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status); +int mgmt_user_confirm_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status); -int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, +int mgmt_auth_failed(u16 index, bdaddr_t *bdaddr, u8 status); +int mgmt_set_local_name_complete(u16 index, u8 *name, u8 status); +int mgmt_read_local_oob_data_reply_complete(u16 index, u8 *hash, u8 *randomizer, u8 status); -int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr, - __le32 value, u8 confirm_hint); -int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, - u8 status); -int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, - bdaddr_t *bdaddr, u8 status); -int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr); -int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, - u8 status); -int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, - bdaddr_t *bdaddr, u8 status); -int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status); -int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status); -int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash, - u8 *randomizer, u8 status); -int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, - u8 addr_type, u8 *dev_class, s8 rssi, u8 *eir); -int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name); -int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status); -int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status); -int mgmt_discovering(struct hci_dev *hdev, u8 discovering); -int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr); -int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr); +int mgmt_device_found(u16 index, bdaddr_t *bdaddr, u8 *dev_class, s8 rssi, + u8 *eir); +int mgmt_remote_name(u16 index, bdaddr_t *bdaddr, u8 *name); +int mgmt_discovering(u16 index, u8 discovering); +int mgmt_device_blocked(u16 index, bdaddr_t *bdaddr); +int mgmt_device_unblocked(u16 index, bdaddr_t *bdaddr); /* HCI info for socket */ #define hci_pi(sk) ((struct hci_pinfo *) sk) -/* HCI socket flags */ -#define HCI_PI_MGMT_INIT 0 - struct hci_pinfo { struct bt_sock bt; struct hci_dev *hdev; struct hci_filter filter; __u32 cmsg_mask; unsigned short channel; - unsigned long flags; }; /* HCI security filter */ @@ -964,7 +915,4 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8], void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16]); void hci_le_ltk_neg_reply(struct hci_conn *conn); -int hci_do_inquiry(struct hci_dev *hdev, u8 length); -int hci_cancel_inquiry(struct hci_dev *hdev); - #endif /* __HCI_CORE_H */ diff --git a/trunk/include/net/bluetooth/l2cap.h b/trunk/include/net/bluetooth/l2cap.h index 68f589150692..6cc18f371675 100644 --- a/trunk/include/net/bluetooth/l2cap.h +++ b/trunk/include/net/bluetooth/l2cap.h @@ -27,23 +27,17 @@ #ifndef __L2CAP_H #define __L2CAP_H -#include - /* L2CAP defaults */ #define L2CAP_DEFAULT_MTU 672 #define L2CAP_DEFAULT_MIN_MTU 48 #define L2CAP_DEFAULT_FLUSH_TO 0xffff #define L2CAP_DEFAULT_TX_WINDOW 63 -#define L2CAP_DEFAULT_EXT_WINDOW 0x3FFF #define L2CAP_DEFAULT_MAX_TX 3 #define L2CAP_DEFAULT_RETRANS_TO 2000 /* 2 seconds */ #define L2CAP_DEFAULT_MONITOR_TO 12000 /* 12 seconds */ #define L2CAP_DEFAULT_MAX_PDU_SIZE 1009 /* Sized for 3-DH5 packet */ #define L2CAP_DEFAULT_ACK_TO 200 #define L2CAP_LE_DEFAULT_MTU 23 -#define L2CAP_DEFAULT_MAX_SDU_SIZE 0xFFFF -#define L2CAP_DEFAULT_SDU_ITIME 0xFFFFFFFF -#define L2CAP_DEFAULT_ACC_LAT 0xFFFFFFFF #define L2CAP_DISC_TIMEOUT (100) #define L2CAP_DISC_REJ_TIMEOUT (5000) /* 5 seconds */ @@ -97,82 +91,52 @@ struct l2cap_conninfo { #define L2CAP_ECHO_RSP 0x09 #define L2CAP_INFO_REQ 0x0a #define L2CAP_INFO_RSP 0x0b -#define L2CAP_CREATE_CHAN_REQ 0x0c -#define L2CAP_CREATE_CHAN_RSP 0x0d -#define L2CAP_MOVE_CHAN_REQ 0x0e -#define L2CAP_MOVE_CHAN_RSP 0x0f -#define L2CAP_MOVE_CHAN_CFM 0x10 -#define L2CAP_MOVE_CHAN_CFM_RSP 0x11 #define L2CAP_CONN_PARAM_UPDATE_REQ 0x12 #define L2CAP_CONN_PARAM_UPDATE_RSP 0x13 -/* L2CAP extended feature mask */ +/* L2CAP feature mask */ #define L2CAP_FEAT_FLOWCTL 0x00000001 #define L2CAP_FEAT_RETRANS 0x00000002 -#define L2CAP_FEAT_BIDIR_QOS 0x00000004 #define L2CAP_FEAT_ERTM 0x00000008 #define L2CAP_FEAT_STREAMING 0x00000010 #define L2CAP_FEAT_FCS 0x00000020 -#define L2CAP_FEAT_EXT_FLOW 0x00000040 #define L2CAP_FEAT_FIXED_CHAN 0x00000080 -#define L2CAP_FEAT_EXT_WINDOW 0x00000100 -#define L2CAP_FEAT_UCD 0x00000200 /* L2CAP checksum option */ #define L2CAP_FCS_NONE 0x00 #define L2CAP_FCS_CRC16 0x01 -/* L2CAP fixed channels */ -#define L2CAP_FC_L2CAP 0x02 -#define L2CAP_FC_A2MP 0x08 - /* L2CAP Control Field bit masks */ -#define L2CAP_CTRL_SAR 0xC000 -#define L2CAP_CTRL_REQSEQ 0x3F00 -#define L2CAP_CTRL_TXSEQ 0x007E -#define L2CAP_CTRL_SUPERVISE 0x000C - -#define L2CAP_CTRL_RETRANS 0x0080 -#define L2CAP_CTRL_FINAL 0x0080 -#define L2CAP_CTRL_POLL 0x0010 -#define L2CAP_CTRL_FRAME_TYPE 0x0001 /* I- or S-Frame */ - -#define L2CAP_CTRL_TXSEQ_SHIFT 1 -#define L2CAP_CTRL_SUPER_SHIFT 2 -#define L2CAP_CTRL_REQSEQ_SHIFT 8 -#define L2CAP_CTRL_SAR_SHIFT 14 - -/* L2CAP Extended Control Field bit mask */ -#define L2CAP_EXT_CTRL_TXSEQ 0xFFFC0000 -#define L2CAP_EXT_CTRL_SAR 0x00030000 -#define L2CAP_EXT_CTRL_SUPERVISE 0x00030000 -#define L2CAP_EXT_CTRL_REQSEQ 0x0000FFFC - -#define L2CAP_EXT_CTRL_POLL 0x00040000 -#define L2CAP_EXT_CTRL_FINAL 0x00000002 -#define L2CAP_EXT_CTRL_FRAME_TYPE 0x00000001 /* I- or S-Frame */ - -#define L2CAP_EXT_CTRL_REQSEQ_SHIFT 2 -#define L2CAP_EXT_CTRL_SAR_SHIFT 16 -#define L2CAP_EXT_CTRL_SUPER_SHIFT 16 -#define L2CAP_EXT_CTRL_TXSEQ_SHIFT 18 +#define L2CAP_CTRL_SAR 0xC000 +#define L2CAP_CTRL_REQSEQ 0x3F00 +#define L2CAP_CTRL_TXSEQ 0x007E +#define L2CAP_CTRL_RETRANS 0x0080 +#define L2CAP_CTRL_FINAL 0x0080 +#define L2CAP_CTRL_POLL 0x0010 +#define L2CAP_CTRL_SUPERVISE 0x000C +#define L2CAP_CTRL_FRAME_TYPE 0x0001 /* I- or S-Frame */ + +#define L2CAP_CTRL_TXSEQ_SHIFT 1 +#define L2CAP_CTRL_REQSEQ_SHIFT 8 +#define L2CAP_CTRL_SAR_SHIFT 14 /* L2CAP Supervisory Function */ -#define L2CAP_SUPER_RR 0x00 -#define L2CAP_SUPER_REJ 0x01 -#define L2CAP_SUPER_RNR 0x02 -#define L2CAP_SUPER_SREJ 0x03 +#define L2CAP_SUPER_RCV_READY 0x0000 +#define L2CAP_SUPER_REJECT 0x0004 +#define L2CAP_SUPER_RCV_NOT_READY 0x0008 +#define L2CAP_SUPER_SELECT_REJECT 0x000C /* L2CAP Segmentation and Reassembly */ -#define L2CAP_SAR_UNSEGMENTED 0x00 -#define L2CAP_SAR_START 0x01 -#define L2CAP_SAR_END 0x02 -#define L2CAP_SAR_CONTINUE 0x03 +#define L2CAP_SDU_UNSEGMENTED 0x0000 +#define L2CAP_SDU_START 0x4000 +#define L2CAP_SDU_END 0x8000 +#define L2CAP_SDU_CONTINUE 0xC000 /* L2CAP Command rej. reasons */ -#define L2CAP_REJ_NOT_UNDERSTOOD 0x0000 -#define L2CAP_REJ_MTU_EXCEEDED 0x0001 -#define L2CAP_REJ_INVALID_CID 0x0002 +#define L2CAP_REJ_NOT_UNDERSTOOD 0x0000 +#define L2CAP_REJ_MTU_EXCEEDED 0x0001 +#define L2CAP_REJ_INVALID_CID 0x0002 + /* L2CAP structures */ struct l2cap_hdr { @@ -180,12 +144,6 @@ struct l2cap_hdr { __le16 cid; } __packed; #define L2CAP_HDR_SIZE 4 -#define L2CAP_ENH_HDR_SIZE 6 -#define L2CAP_EXT_HDR_SIZE 8 - -#define L2CAP_FCS_SIZE 2 -#define L2CAP_SDULEN_SIZE 2 -#define L2CAP_PSMLEN_SIZE 2 struct l2cap_cmd_hdr { __u8 code; @@ -230,15 +188,14 @@ struct l2cap_conn_rsp { #define L2CAP_CID_DYN_START 0x0040 #define L2CAP_CID_DYN_END 0xffff -/* connect/create channel results */ +/* connect result */ #define L2CAP_CR_SUCCESS 0x0000 #define L2CAP_CR_PEND 0x0001 #define L2CAP_CR_BAD_PSM 0x0002 #define L2CAP_CR_SEC_BLOCK 0x0003 #define L2CAP_CR_NO_MEM 0x0004 -#define L2CAP_CR_BAD_AMP 0x0005 -/* connect/create channel status */ +/* connect status */ #define L2CAP_CS_NO_INFO 0x0000 #define L2CAP_CS_AUTHEN_PEND 0x0001 #define L2CAP_CS_AUTHOR_PEND 0x0002 @@ -260,8 +217,6 @@ struct l2cap_conf_rsp { #define L2CAP_CONF_UNACCEPT 0x0001 #define L2CAP_CONF_REJECT 0x0002 #define L2CAP_CONF_UNKNOWN 0x0003 -#define L2CAP_CONF_PENDING 0x0004 -#define L2CAP_CONF_EFS_REJECT 0x0005 struct l2cap_conf_opt { __u8 type; @@ -278,8 +233,6 @@ struct l2cap_conf_opt { #define L2CAP_CONF_QOS 0x03 #define L2CAP_CONF_RFC 0x04 #define L2CAP_CONF_FCS 0x05 -#define L2CAP_CONF_EFS 0x06 -#define L2CAP_CONF_EWS 0x07 #define L2CAP_CONF_MAX_SIZE 22 @@ -298,21 +251,6 @@ struct l2cap_conf_rfc { #define L2CAP_MODE_ERTM 0x03 #define L2CAP_MODE_STREAMING 0x04 -struct l2cap_conf_efs { - __u8 id; - __u8 stype; - __le16 msdu; - __le32 sdu_itime; - __le32 acc_lat; - __le32 flush_to; -} __packed; - -#define L2CAP_SERV_NOTRAFIC 0x00 -#define L2CAP_SERV_BESTEFFORT 0x01 -#define L2CAP_SERV_GUARANTEED 0x02 - -#define L2CAP_BESTEFFORT_ID 0x01 - struct l2cap_disconn_req { __le16 dcid; __le16 scid; @@ -333,57 +271,14 @@ struct l2cap_info_rsp { __u8 data[0]; } __packed; -struct l2cap_create_chan_req { - __le16 psm; - __le16 scid; - __u8 amp_id; -} __packed; - -struct l2cap_create_chan_rsp { - __le16 dcid; - __le16 scid; - __le16 result; - __le16 status; -} __packed; - -struct l2cap_move_chan_req { - __le16 icid; - __u8 dest_amp_id; -} __packed; - -struct l2cap_move_chan_rsp { - __le16 icid; - __le16 result; -} __packed; - -#define L2CAP_MR_SUCCESS 0x0000 -#define L2CAP_MR_PEND 0x0001 -#define L2CAP_MR_BAD_ID 0x0002 -#define L2CAP_MR_SAME_ID 0x0003 -#define L2CAP_MR_NOT_SUPP 0x0004 -#define L2CAP_MR_COLLISION 0x0005 -#define L2CAP_MR_NOT_ALLOWED 0x0006 - -struct l2cap_move_chan_cfm { - __le16 icid; - __le16 result; -} __packed; - -#define L2CAP_MC_CONFIRMED 0x0000 -#define L2CAP_MC_UNCONFIRMED 0x0001 - -struct l2cap_move_chan_cfm_rsp { - __le16 icid; -} __packed; - /* info type */ -#define L2CAP_IT_CL_MTU 0x0001 -#define L2CAP_IT_FEAT_MASK 0x0002 -#define L2CAP_IT_FIXED_CHAN 0x0003 +#define L2CAP_IT_CL_MTU 0x0001 +#define L2CAP_IT_FEAT_MASK 0x0002 +#define L2CAP_IT_FIXED_CHAN 0x0003 /* info result */ -#define L2CAP_IR_SUCCESS 0x0000 -#define L2CAP_IR_NOTSUPP 0x0001 +#define L2CAP_IR_SUCCESS 0x0000 +#define L2CAP_IR_NOTSUPP 0x0001 struct l2cap_conn_param_update_req { __le16 min; @@ -402,7 +297,7 @@ struct l2cap_conn_param_update_rsp { /* ----- L2CAP channels and connections ----- */ struct srej_list { - __u16 tx_seq; + __u8 tx_seq; struct list_head list; }; @@ -424,11 +319,14 @@ struct l2cap_chan { __u16 flush_to; __u8 mode; __u8 chan_type; - __u8 chan_policy; __le16 sport; __u8 sec_level; + __u8 role_switch; + __u8 force_reliable; + __u8 flushable; + __u8 force_active; __u8 ident; @@ -439,8 +337,7 @@ struct l2cap_chan { __u8 fcs; - __u16 tx_win; - __u16 tx_win_max; + __u8 tx_win; __u8 max_tx; __u16 retrans_timeout; __u16 monitor_timeout; @@ -448,45 +345,29 @@ struct l2cap_chan { unsigned long conf_state; unsigned long conn_state; - unsigned long flags; - - __u16 next_tx_seq; - __u16 expected_ack_seq; - __u16 expected_tx_seq; - __u16 buffer_seq; - __u16 buffer_seq_srej; - __u16 srej_save_reqseq; - __u16 frames_sent; - __u16 unacked_frames; + + __u8 next_tx_seq; + __u8 expected_ack_seq; + __u8 expected_tx_seq; + __u8 buffer_seq; + __u8 buffer_seq_srej; + __u8 srej_save_reqseq; + __u8 frames_sent; + __u8 unacked_frames; __u8 retry_count; __u8 num_acked; __u16 sdu_len; struct sk_buff *sdu; struct sk_buff *sdu_last_frag; - __u16 remote_tx_win; + __u8 remote_tx_win; __u8 remote_max_tx; __u16 remote_mps; - __u8 local_id; - __u8 local_stype; - __u16 local_msdu; - __u32 local_sdu_itime; - __u32 local_acc_lat; - __u32 local_flush_to; - - __u8 remote_id; - __u8 remote_stype; - __u16 remote_msdu; - __u32 remote_sdu_itime; - __u32 remote_acc_lat; - __u32 remote_flush_to; - - struct delayed_work chan_timer; - struct delayed_work retrans_timer; - struct delayed_work monitor_timer; - struct delayed_work ack_timer; - + struct timer_list chan_timer; + struct timer_list retrans_timer; + struct timer_list monitor_timer; + struct timer_list ack_timer; struct sk_buff *tx_send_head; struct sk_buff_head tx_q; struct sk_buff_head srej_q; @@ -510,7 +391,6 @@ struct l2cap_ops { struct l2cap_conn { struct hci_conn *hcon; - struct hci_chan *hchan; bdaddr_t *dst; bdaddr_t *src; @@ -522,7 +402,7 @@ struct l2cap_conn { __u8 info_state; __u8 info_ident; - struct delayed_work info_timer; + struct timer_list info_timer; spinlock_t lock; @@ -532,11 +412,11 @@ struct l2cap_conn { __u8 disc_reason; - struct delayed_work security_timer; + struct timer_list security_timer; struct smp_chan *smp_chan; struct list_head chan_l; - struct mutex chan_lock; + rwlock_t chan_lock; }; #define L2CAP_INFO_CL_MTU_REQ_SENT 0x01 @@ -565,9 +445,6 @@ enum { CONF_CONNECT_PEND, CONF_NO_FCS_RECV, CONF_STATE2_DEVICE, - CONF_EWS_RECV, - CONF_LOC_CONF_PEND, - CONF_REM_CONF_PEND, }; #define L2CAP_CONF_MAX_CONF_REQ 2 @@ -585,44 +462,6 @@ enum { CONN_RNR_SENT, }; -/* Definitions for flags in l2cap_chan */ -enum { - FLAG_ROLE_SWITCH, - FLAG_FORCE_ACTIVE, - FLAG_FORCE_RELIABLE, - FLAG_FLUSHABLE, - FLAG_EXT_CTRL, - FLAG_EFS_ENABLE, -}; - -static inline void l2cap_chan_hold(struct l2cap_chan *c) -{ - atomic_inc(&c->refcnt); -} - -static inline void l2cap_chan_put(struct l2cap_chan *c) -{ - if (atomic_dec_and_test(&c->refcnt)) - kfree(c); -} - -static inline void l2cap_set_timer(struct l2cap_chan *chan, - struct delayed_work *work, long timeout) -{ - BT_DBG("chan %p state %d timeout %ld", chan, chan->state, timeout); - - if (!__cancel_delayed_work(work)) - l2cap_chan_hold(chan); - schedule_delayed_work(work, timeout); -} - -static inline void l2cap_clear_timer(struct l2cap_chan *chan, - struct delayed_work *work) -{ - if (__cancel_delayed_work(work)) - l2cap_chan_put(chan); -} - #define __set_chan_timer(c, t) l2cap_set_timer(c, &c->chan_timer, (t)) #define __clear_chan_timer(c) l2cap_clear_timer(c, &c->chan_timer) #define __set_retrans_timer(c) l2cap_set_timer(c, &c->retrans_timer, \ @@ -635,22 +474,6 @@ static inline void l2cap_clear_timer(struct l2cap_chan *chan, L2CAP_DEFAULT_ACK_TO); #define __clear_ack_timer(c) l2cap_clear_timer(c, &c->ack_timer) -static inline int __seq_offset(struct l2cap_chan *chan, __u16 seq1, __u16 seq2) -{ - int offset; - - offset = (seq1 - seq2) % (chan->tx_win_max + 1); - if (offset < 0) - offset += (chan->tx_win_max + 1); - - return offset; -} - -static inline __u16 __next_seq(struct l2cap_chan *chan, __u16 seq) -{ - return (seq + 1) % (chan->tx_win_max + 1); -} - static inline int l2cap_tx_window_full(struct l2cap_chan *ch) { int sub; @@ -663,164 +486,13 @@ static inline int l2cap_tx_window_full(struct l2cap_chan *ch) return sub == ch->remote_tx_win; } -static inline __u16 __get_reqseq(struct l2cap_chan *chan, __u32 ctrl) -{ - if (test_bit(FLAG_EXT_CTRL, &chan->flags)) - return (ctrl & L2CAP_EXT_CTRL_REQSEQ) >> - L2CAP_EXT_CTRL_REQSEQ_SHIFT; - else - return (ctrl & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT; -} - -static inline __u32 __set_reqseq(struct l2cap_chan *chan, __u32 reqseq) -{ - if (test_bit(FLAG_EXT_CTRL, &chan->flags)) - return (reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT) & - L2CAP_EXT_CTRL_REQSEQ; - else - return (reqseq << L2CAP_CTRL_REQSEQ_SHIFT) & L2CAP_CTRL_REQSEQ; -} - -static inline __u16 __get_txseq(struct l2cap_chan *chan, __u32 ctrl) -{ - if (test_bit(FLAG_EXT_CTRL, &chan->flags)) - return (ctrl & L2CAP_EXT_CTRL_TXSEQ) >> - L2CAP_EXT_CTRL_TXSEQ_SHIFT; - else - return (ctrl & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT; -} - -static inline __u32 __set_txseq(struct l2cap_chan *chan, __u32 txseq) -{ - if (test_bit(FLAG_EXT_CTRL, &chan->flags)) - return (txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT) & - L2CAP_EXT_CTRL_TXSEQ; - else - return (txseq << L2CAP_CTRL_TXSEQ_SHIFT) & L2CAP_CTRL_TXSEQ; -} - -static inline bool __is_sframe(struct l2cap_chan *chan, __u32 ctrl) -{ - if (test_bit(FLAG_EXT_CTRL, &chan->flags)) - return ctrl & L2CAP_EXT_CTRL_FRAME_TYPE; - else - return ctrl & L2CAP_CTRL_FRAME_TYPE; -} - -static inline __u32 __set_sframe(struct l2cap_chan *chan) -{ - if (test_bit(FLAG_EXT_CTRL, &chan->flags)) - return L2CAP_EXT_CTRL_FRAME_TYPE; - else - return L2CAP_CTRL_FRAME_TYPE; -} - -static inline __u8 __get_ctrl_sar(struct l2cap_chan *chan, __u32 ctrl) -{ - if (test_bit(FLAG_EXT_CTRL, &chan->flags)) - return (ctrl & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT; - else - return (ctrl & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT; -} - -static inline __u32 __set_ctrl_sar(struct l2cap_chan *chan, __u32 sar) -{ - if (test_bit(FLAG_EXT_CTRL, &chan->flags)) - return (sar << L2CAP_EXT_CTRL_SAR_SHIFT) & L2CAP_EXT_CTRL_SAR; - else - return (sar << L2CAP_CTRL_SAR_SHIFT) & L2CAP_CTRL_SAR; -} - -static inline bool __is_sar_start(struct l2cap_chan *chan, __u32 ctrl) -{ - return __get_ctrl_sar(chan, ctrl) == L2CAP_SAR_START; -} - -static inline __u32 __get_sar_mask(struct l2cap_chan *chan) -{ - if (test_bit(FLAG_EXT_CTRL, &chan->flags)) - return L2CAP_EXT_CTRL_SAR; - else - return L2CAP_CTRL_SAR; -} - -static inline __u8 __get_ctrl_super(struct l2cap_chan *chan, __u32 ctrl) -{ - if (test_bit(FLAG_EXT_CTRL, &chan->flags)) - return (ctrl & L2CAP_EXT_CTRL_SUPERVISE) >> - L2CAP_EXT_CTRL_SUPER_SHIFT; - else - return (ctrl & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT; -} - -static inline __u32 __set_ctrl_super(struct l2cap_chan *chan, __u32 super) -{ - if (test_bit(FLAG_EXT_CTRL, &chan->flags)) - return (super << L2CAP_EXT_CTRL_SUPER_SHIFT) & - L2CAP_EXT_CTRL_SUPERVISE; - else - return (super << L2CAP_CTRL_SUPER_SHIFT) & - L2CAP_CTRL_SUPERVISE; -} - -static inline __u32 __set_ctrl_final(struct l2cap_chan *chan) -{ - if (test_bit(FLAG_EXT_CTRL, &chan->flags)) - return L2CAP_EXT_CTRL_FINAL; - else - return L2CAP_CTRL_FINAL; -} - -static inline bool __is_ctrl_final(struct l2cap_chan *chan, __u32 ctrl) -{ - if (test_bit(FLAG_EXT_CTRL, &chan->flags)) - return ctrl & L2CAP_EXT_CTRL_FINAL; - else - return ctrl & L2CAP_CTRL_FINAL; -} - -static inline __u32 __set_ctrl_poll(struct l2cap_chan *chan) -{ - if (test_bit(FLAG_EXT_CTRL, &chan->flags)) - return L2CAP_EXT_CTRL_POLL; - else - return L2CAP_CTRL_POLL; -} - -static inline bool __is_ctrl_poll(struct l2cap_chan *chan, __u32 ctrl) -{ - if (test_bit(FLAG_EXT_CTRL, &chan->flags)) - return ctrl & L2CAP_EXT_CTRL_POLL; - else - return ctrl & L2CAP_CTRL_POLL; -} - -static inline __u32 __get_control(struct l2cap_chan *chan, void *p) -{ - if (test_bit(FLAG_EXT_CTRL, &chan->flags)) - return get_unaligned_le32(p); - else - return get_unaligned_le16(p); -} - -static inline void __put_control(struct l2cap_chan *chan, __u32 control, - void *p) -{ - if (test_bit(FLAG_EXT_CTRL, &chan->flags)) - return put_unaligned_le32(control, p); - else - return put_unaligned_le16(control, p); -} - -static inline __u8 __ctrl_size(struct l2cap_chan *chan) -{ - if (test_bit(FLAG_EXT_CTRL, &chan->flags)) - return L2CAP_EXT_HDR_SIZE - L2CAP_HDR_SIZE; - else - return L2CAP_ENH_HDR_SIZE - L2CAP_HDR_SIZE; -} +#define __get_txseq(ctrl) (((ctrl) & L2CAP_CTRL_TXSEQ) >> 1) +#define __get_reqseq(ctrl) (((ctrl) & L2CAP_CTRL_REQSEQ) >> 8) +#define __is_iframe(ctrl) (!((ctrl) & L2CAP_CTRL_FRAME_TYPE)) +#define __is_sframe(ctrl) ((ctrl) & L2CAP_CTRL_FRAME_TYPE) +#define __is_sar_start(ctrl) (((ctrl) & L2CAP_CTRL_SAR) == L2CAP_SDU_START) -extern bool disable_ertm; +extern int disable_ertm; int l2cap_init_sockets(void); void l2cap_cleanup_sockets(void); @@ -834,11 +506,8 @@ int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid); struct l2cap_chan *l2cap_chan_create(struct sock *sk); void l2cap_chan_close(struct l2cap_chan *chan, int reason); void l2cap_chan_destroy(struct l2cap_chan *chan); -inline int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, - bdaddr_t *dst); -int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, - u32 priority); +int l2cap_chan_connect(struct l2cap_chan *chan); +int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len); void l2cap_chan_busy(struct l2cap_chan *chan, int busy); -int l2cap_chan_check_security(struct l2cap_chan *chan); #endif /* __L2CAP_H */ diff --git a/trunk/include/net/bluetooth/mgmt.h b/trunk/include/net/bluetooth/mgmt.h index be65d3417883..d66da0f94f95 100644 --- a/trunk/include/net/bluetooth/mgmt.h +++ b/trunk/include/net/bluetooth/mgmt.h @@ -23,23 +23,6 @@ #define MGMT_INDEX_NONE 0xFFFF -#define MGMT_STATUS_SUCCESS 0x00 -#define MGMT_STATUS_UNKNOWN_COMMAND 0x01 -#define MGMT_STATUS_NOT_CONNECTED 0x02 -#define MGMT_STATUS_FAILED 0x03 -#define MGMT_STATUS_CONNECT_FAILED 0x04 -#define MGMT_STATUS_AUTH_FAILED 0x05 -#define MGMT_STATUS_NOT_PAIRED 0x06 -#define MGMT_STATUS_NO_RESOURCES 0x07 -#define MGMT_STATUS_TIMEOUT 0x08 -#define MGMT_STATUS_ALREADY_CONNECTED 0x09 -#define MGMT_STATUS_BUSY 0x0a -#define MGMT_STATUS_REJECTED 0x0b -#define MGMT_STATUS_NOT_SUPPORTED 0x0c -#define MGMT_STATUS_INVALID_PARAMS 0x0d -#define MGMT_STATUS_DISCONNECTED 0x0e -#define MGMT_STATUS_NOT_POWERED 0x0f - struct mgmt_hdr { __le16 opcode; __le16 index; @@ -61,29 +44,22 @@ struct mgmt_rp_read_index_list { /* Reserve one extra byte for names in management messages so that they * are always guaranteed to be nul-terminated */ #define MGMT_MAX_NAME_LENGTH (HCI_MAX_NAME_LENGTH + 1) -#define MGMT_MAX_SHORT_NAME_LENGTH (10 + 1) - -#define MGMT_SETTING_POWERED 0x00000001 -#define MGMT_SETTING_CONNECTABLE 0x00000002 -#define MGMT_SETTING_FAST_CONNECTABLE 0x00000004 -#define MGMT_SETTING_DISCOVERABLE 0x00000008 -#define MGMT_SETTING_PAIRABLE 0x00000010 -#define MGMT_SETTING_LINK_SECURITY 0x00000020 -#define MGMT_SETTING_SSP 0x00000040 -#define MGMT_SETTING_BREDR 0x00000080 -#define MGMT_SETTING_HS 0x00000100 -#define MGMT_SETTING_LE 0x00000200 #define MGMT_OP_READ_INFO 0x0004 struct mgmt_rp_read_info { + __u8 type; + __u8 powered; + __u8 connectable; + __u8 discoverable; + __u8 pairable; + __u8 sec_mode; bdaddr_t bdaddr; - __u8 version; - __le16 manufacturer; - __le32 supported_settings; - __le32 current_settings; __u8 dev_class[3]; + __u8 features[8]; + __u16 manufacturer; + __u8 hci_ver; + __u16 hci_rev; __u8 name[MGMT_MAX_NAME_LENGTH]; - __u8 short_name[MGMT_MAX_SHORT_NAME_LENGTH]; } __packed; struct mgmt_mode { @@ -93,97 +69,70 @@ struct mgmt_mode { #define MGMT_OP_SET_POWERED 0x0005 #define MGMT_OP_SET_DISCOVERABLE 0x0006 -struct mgmt_cp_set_discoverable { - __u8 val; - __u16 timeout; -} __packed; #define MGMT_OP_SET_CONNECTABLE 0x0007 -#define MGMT_OP_SET_FAST_CONNECTABLE 0x0008 - -#define MGMT_OP_SET_PAIRABLE 0x0009 - -#define MGMT_OP_SET_LINK_SECURITY 0x000A - -#define MGMT_OP_SET_SSP 0x000B - -#define MGMT_OP_SET_HS 0x000C - -#define MGMT_OP_SET_LE 0x000D - -#define MGMT_OP_SET_DEV_CLASS 0x000E -struct mgmt_cp_set_dev_class { - __u8 major; - __u8 minor; -} __packed; - -#define MGMT_OP_SET_LOCAL_NAME 0x000F -struct mgmt_cp_set_local_name { - __u8 name[MGMT_MAX_NAME_LENGTH]; -} __packed; +#define MGMT_OP_SET_PAIRABLE 0x0008 -#define MGMT_OP_ADD_UUID 0x0010 +#define MGMT_OP_ADD_UUID 0x0009 struct mgmt_cp_add_uuid { __u8 uuid[16]; __u8 svc_hint; } __packed; -#define MGMT_OP_REMOVE_UUID 0x0011 +#define MGMT_OP_REMOVE_UUID 0x000A struct mgmt_cp_remove_uuid { __u8 uuid[16]; } __packed; -struct mgmt_link_key_info { +#define MGMT_OP_SET_DEV_CLASS 0x000B +struct mgmt_cp_set_dev_class { + __u8 major; + __u8 minor; +} __packed; + +#define MGMT_OP_SET_SERVICE_CACHE 0x000C +struct mgmt_cp_set_service_cache { + __u8 enable; +} __packed; + +struct mgmt_key_info { bdaddr_t bdaddr; u8 type; u8 val[16]; u8 pin_len; + u8 dlen; + u8 data[0]; } __packed; -#define MGMT_OP_LOAD_LINK_KEYS 0x0012 -struct mgmt_cp_load_link_keys { +#define MGMT_OP_LOAD_KEYS 0x000D +struct mgmt_cp_load_keys { __u8 debug_keys; __le16 key_count; - struct mgmt_link_key_info keys[0]; + struct mgmt_key_info keys[0]; } __packed; -#define MGMT_OP_REMOVE_KEYS 0x0013 -struct mgmt_cp_remove_keys { +#define MGMT_OP_REMOVE_KEY 0x000E +struct mgmt_cp_remove_key { bdaddr_t bdaddr; __u8 disconnect; } __packed; -struct mgmt_rp_remove_keys { - bdaddr_t bdaddr; - __u8 status; -}; -#define MGMT_OP_DISCONNECT 0x0014 +#define MGMT_OP_DISCONNECT 0x000F struct mgmt_cp_disconnect { bdaddr_t bdaddr; } __packed; struct mgmt_rp_disconnect { bdaddr_t bdaddr; - __u8 status; } __packed; -#define MGMT_ADDR_BREDR 0x00 -#define MGMT_ADDR_LE_PUBLIC 0x01 -#define MGMT_ADDR_LE_RANDOM 0x02 -#define MGMT_ADDR_INVALID 0xff - -struct mgmt_addr_info { - bdaddr_t bdaddr; - __u8 type; -} __packed; - -#define MGMT_OP_GET_CONNECTIONS 0x0015 +#define MGMT_OP_GET_CONNECTIONS 0x0010 struct mgmt_rp_get_connections { __le16 conn_count; - struct mgmt_addr_info addr[0]; + bdaddr_t conn[0]; } __packed; -#define MGMT_OP_PIN_CODE_REPLY 0x0016 +#define MGMT_OP_PIN_CODE_REPLY 0x0011 struct mgmt_cp_pin_code_reply { bdaddr_t bdaddr; __u8 pin_len; @@ -194,27 +143,27 @@ struct mgmt_rp_pin_code_reply { uint8_t status; } __packed; -#define MGMT_OP_PIN_CODE_NEG_REPLY 0x0017 +#define MGMT_OP_PIN_CODE_NEG_REPLY 0x0012 struct mgmt_cp_pin_code_neg_reply { bdaddr_t bdaddr; } __packed; -#define MGMT_OP_SET_IO_CAPABILITY 0x0018 +#define MGMT_OP_SET_IO_CAPABILITY 0x0013 struct mgmt_cp_set_io_capability { __u8 io_capability; } __packed; -#define MGMT_OP_PAIR_DEVICE 0x0019 +#define MGMT_OP_PAIR_DEVICE 0x0014 struct mgmt_cp_pair_device { - struct mgmt_addr_info addr; + bdaddr_t bdaddr; __u8 io_cap; } __packed; struct mgmt_rp_pair_device { - struct mgmt_addr_info addr; + bdaddr_t bdaddr; __u8 status; } __packed; -#define MGMT_OP_USER_CONFIRM_REPLY 0x001A +#define MGMT_OP_USER_CONFIRM_REPLY 0x0015 struct mgmt_cp_user_confirm_reply { bdaddr_t bdaddr; } __packed; @@ -223,71 +172,50 @@ struct mgmt_rp_user_confirm_reply { __u8 status; } __packed; -#define MGMT_OP_USER_CONFIRM_NEG_REPLY 0x001B -struct mgmt_cp_user_confirm_neg_reply { - bdaddr_t bdaddr; -} __packed; +#define MGMT_OP_USER_CONFIRM_NEG_REPLY 0x0016 -#define MGMT_OP_USER_PASSKEY_REPLY 0x001C -struct mgmt_cp_user_passkey_reply { - bdaddr_t bdaddr; - __le32 passkey; -} __packed; -struct mgmt_rp_user_passkey_reply { - bdaddr_t bdaddr; - __u8 status; -} __packed; - -#define MGMT_OP_USER_PASSKEY_NEG_REPLY 0x001D -struct mgmt_cp_user_passkey_neg_reply { - bdaddr_t bdaddr; +#define MGMT_OP_SET_LOCAL_NAME 0x0017 +struct mgmt_cp_set_local_name { + __u8 name[MGMT_MAX_NAME_LENGTH]; } __packed; -#define MGMT_OP_READ_LOCAL_OOB_DATA 0x001E +#define MGMT_OP_READ_LOCAL_OOB_DATA 0x0018 struct mgmt_rp_read_local_oob_data { __u8 hash[16]; __u8 randomizer[16]; } __packed; -#define MGMT_OP_ADD_REMOTE_OOB_DATA 0x001F +#define MGMT_OP_ADD_REMOTE_OOB_DATA 0x0019 struct mgmt_cp_add_remote_oob_data { bdaddr_t bdaddr; __u8 hash[16]; __u8 randomizer[16]; } __packed; -#define MGMT_OP_REMOVE_REMOTE_OOB_DATA 0x0020 +#define MGMT_OP_REMOVE_REMOTE_OOB_DATA 0x001A struct mgmt_cp_remove_remote_oob_data { bdaddr_t bdaddr; } __packed; -#define MGMT_OP_START_DISCOVERY 0x0021 -struct mgmt_cp_start_discovery { - __u8 type; -} __packed; +#define MGMT_OP_START_DISCOVERY 0x001B -#define MGMT_OP_STOP_DISCOVERY 0x0022 +#define MGMT_OP_STOP_DISCOVERY 0x001C -#define MGMT_OP_CONFIRM_NAME 0x0023 -struct mgmt_cp_confirm_name { - bdaddr_t bdaddr; - __u8 name_known; -} __packed; -struct mgmt_rp_confirm_name { - bdaddr_t bdaddr; - __u8 status; -} __packed; - -#define MGMT_OP_BLOCK_DEVICE 0x0024 +#define MGMT_OP_BLOCK_DEVICE 0x001D struct mgmt_cp_block_device { bdaddr_t bdaddr; } __packed; -#define MGMT_OP_UNBLOCK_DEVICE 0x0025 +#define MGMT_OP_UNBLOCK_DEVICE 0x001E struct mgmt_cp_unblock_device { bdaddr_t bdaddr; } __packed; +#define MGMT_OP_SET_FAST_CONNECTABLE 0x001F +struct mgmt_cp_set_fast_connectable { + __u8 enable; +} __packed; + #define MGMT_EV_CMD_COMPLETE 0x0001 struct mgmt_ev_cmd_complete { __le16 opcode; @@ -309,82 +237,83 @@ struct mgmt_ev_controller_error { #define MGMT_EV_INDEX_REMOVED 0x0005 -#define MGMT_EV_NEW_SETTINGS 0x0006 +#define MGMT_EV_POWERED 0x0006 -#define MGMT_EV_CLASS_OF_DEV_CHANGED 0x0007 -struct mgmt_ev_class_of_dev_changed { - __u8 dev_class[3]; -}; +#define MGMT_EV_DISCOVERABLE 0x0007 -#define MGMT_EV_LOCAL_NAME_CHANGED 0x0008 -struct mgmt_ev_local_name_changed { - __u8 name[MGMT_MAX_NAME_LENGTH]; - __u8 short_name[MGMT_MAX_SHORT_NAME_LENGTH]; -} __packed; +#define MGMT_EV_CONNECTABLE 0x0008 + +#define MGMT_EV_PAIRABLE 0x0009 -#define MGMT_EV_NEW_LINK_KEY 0x0009 -struct mgmt_ev_new_link_key { +#define MGMT_EV_NEW_KEY 0x000A +struct mgmt_ev_new_key { __u8 store_hint; - struct mgmt_link_key_info key; + struct mgmt_key_info key; } __packed; -#define MGMT_EV_CONNECTED 0x000A +#define MGMT_EV_CONNECTED 0x000B +struct mgmt_ev_connected { + bdaddr_t bdaddr; + __u8 link_type; +} __packed; -#define MGMT_EV_DISCONNECTED 0x000B +#define MGMT_EV_DISCONNECTED 0x000C +struct mgmt_ev_disconnected { + bdaddr_t bdaddr; +} __packed; -#define MGMT_EV_CONNECT_FAILED 0x000C +#define MGMT_EV_CONNECT_FAILED 0x000D struct mgmt_ev_connect_failed { - struct mgmt_addr_info addr; + bdaddr_t bdaddr; __u8 status; } __packed; -#define MGMT_EV_PIN_CODE_REQUEST 0x000D +#define MGMT_EV_PIN_CODE_REQUEST 0x000E struct mgmt_ev_pin_code_request { bdaddr_t bdaddr; __u8 secure; } __packed; -#define MGMT_EV_USER_CONFIRM_REQUEST 0x000E +#define MGMT_EV_USER_CONFIRM_REQUEST 0x000F struct mgmt_ev_user_confirm_request { bdaddr_t bdaddr; __u8 confirm_hint; __le32 value; } __packed; -#define MGMT_EV_USER_PASSKEY_REQUEST 0x000F -struct mgmt_ev_user_passkey_request { - bdaddr_t bdaddr; -} __packed; - #define MGMT_EV_AUTH_FAILED 0x0010 struct mgmt_ev_auth_failed { bdaddr_t bdaddr; __u8 status; } __packed; -#define MGMT_EV_DEVICE_FOUND 0x0011 +#define MGMT_EV_LOCAL_NAME_CHANGED 0x0011 +struct mgmt_ev_local_name_changed { + __u8 name[MGMT_MAX_NAME_LENGTH]; +} __packed; + +#define MGMT_EV_DEVICE_FOUND 0x0012 struct mgmt_ev_device_found { - struct mgmt_addr_info addr; + bdaddr_t bdaddr; __u8 dev_class[3]; __s8 rssi; - __u8 confirm_name; __u8 eir[HCI_MAX_EIR_LENGTH]; } __packed; -#define MGMT_EV_REMOTE_NAME 0x0012 +#define MGMT_EV_REMOTE_NAME 0x0013 struct mgmt_ev_remote_name { bdaddr_t bdaddr; __u8 name[MGMT_MAX_NAME_LENGTH]; } __packed; -#define MGMT_EV_DISCOVERING 0x0013 +#define MGMT_EV_DISCOVERING 0x0014 -#define MGMT_EV_DEVICE_BLOCKED 0x0014 +#define MGMT_EV_DEVICE_BLOCKED 0x0015 struct mgmt_ev_device_blocked { bdaddr_t bdaddr; } __packed; -#define MGMT_EV_DEVICE_UNBLOCKED 0x0015 +#define MGMT_EV_DEVICE_UNBLOCKED 0x0016 struct mgmt_ev_device_unblocked { bdaddr_t bdaddr; } __packed; diff --git a/trunk/include/net/bluetooth/smp.h b/trunk/include/net/bluetooth/smp.h index aeaf5fa2b9f1..15b97d549441 100644 --- a/trunk/include/net/bluetooth/smp.h +++ b/trunk/include/net/bluetooth/smp.h @@ -115,10 +115,6 @@ struct smp_cmd_security_req { #define SMP_MIN_ENC_KEY_SIZE 7 #define SMP_MAX_ENC_KEY_SIZE 16 -#define SMP_FLAG_TK_VALID 1 -#define SMP_FLAG_CFM_PENDING 2 -#define SMP_FLAG_MITM_AUTH 3 - struct smp_chan { struct l2cap_conn *conn; u8 preq[7]; /* SMP Pairing Request */ @@ -128,7 +124,6 @@ struct smp_chan { u8 pcnf[16]; /* SMP Pairing Confirm */ u8 tk[16]; /* SMP Temporary Key */ u8 smp_key_size; - unsigned long smp_flags; struct crypto_blkcipher *tfm; struct work_struct confirm; struct work_struct random; @@ -139,7 +134,6 @@ struct smp_chan { int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level); int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb); int smp_distribute_keys(struct l2cap_conn *conn, __u8 force); -int smp_user_confirm_reply(struct hci_conn *conn, u16 mgmt_op, __le32 passkey); void smp_chan_destroy(struct l2cap_conn *conn); diff --git a/trunk/include/net/caif/caif_dev.h b/trunk/include/net/caif/caif_dev.h index ef2dd9438bb1..c011281d92c0 100644 --- a/trunk/include/net/caif/caif_dev.h +++ b/trunk/include/net/caif/caif_dev.h @@ -9,7 +9,6 @@ #include #include -#include #include #include #include @@ -105,24 +104,4 @@ void caif_client_register_refcnt(struct cflayer *adapt_layer, */ void caif_free_client(struct cflayer *adap_layer); -/** - * struct caif_enroll_dev - Enroll a net-device as a CAIF Link layer - * @dev: Network device to enroll. - * @caifdev: Configuration information from CAIF Link Layer - * @link_support: Link layer support layer - * @head_room: Head room needed by link support layer - * @layer: Lowest layer in CAIF stack - * @rcv_fun: Receive function for CAIF stack. - * - * This function enroll a CAIF link layer into CAIF Stack and - * expects the interface to be able to handle CAIF payload. - * The link_support layer is used to add any Link Layer specific - * framing. - */ -void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, - struct cflayer *link_support, int head_room, - struct cflayer **layer, int (**rcv_func)( - struct sk_buff *, struct net_device *, - struct packet_type *, struct net_device *)); - #endif /* CAIF_DEV_H_ */ diff --git a/trunk/include/net/caif/caif_layer.h b/trunk/include/net/caif/caif_layer.h index 0f3a39125f90..35bc7883cf97 100644 --- a/trunk/include/net/caif/caif_layer.h +++ b/trunk/include/net/caif/caif_layer.h @@ -121,7 +121,9 @@ enum caif_direction { * @transmit: Packet transmit funciton. * @ctrlcmd: Used for control signalling upwards in the stack. * @modemcmd: Used for control signaling downwards in the stack. + * @prio: Priority of this layer. * @id: The identity of this layer + * @type: The type of this layer * @name: Name of the layer. * * This structure defines the layered structure in CAIF. @@ -228,7 +230,9 @@ struct cflayer { */ int (*modemcmd) (struct cflayer *layr, enum caif_modemcmd ctrl); + unsigned short prio; unsigned int id; + unsigned int type; char name[CAIF_LAYER_NAME_SZ]; }; diff --git a/trunk/include/net/caif/caif_spi.h b/trunk/include/net/caif/caif_spi.h index aa6a485b0545..87c3d11b8e55 100644 --- a/trunk/include/net/caif/caif_spi.h +++ b/trunk/include/net/caif/caif_spi.h @@ -55,8 +55,8 @@ struct cfspi_xfer { u16 tx_dma_len; u16 rx_dma_len; - void *va_tx[2]; - dma_addr_t pa_tx[2]; + void *va_tx; + dma_addr_t pa_tx; void *va_rx; dma_addr_t pa_rx; }; diff --git a/trunk/include/net/caif/cfcnfg.h b/trunk/include/net/caif/cfcnfg.h index 90b4ff8bad83..3e93a4a4b677 100644 --- a/trunk/include/net/caif/cfcnfg.h +++ b/trunk/include/net/caif/cfcnfg.h @@ -13,6 +13,18 @@ struct cfcnfg; +/** + * enum cfcnfg_phy_type - Types of physical layers defined in CAIF Stack + * + * @CFPHYTYPE_FRAG: Fragmented frames physical interface. + * @CFPHYTYPE_CAIF: Generic CAIF physical interface + */ +enum cfcnfg_phy_type { + CFPHYTYPE_FRAG = 1, + CFPHYTYPE_CAIF, + CFPHYTYPE_MAX +}; + /** * enum cfcnfg_phy_preference - Physical preference HW Abstraction * @@ -54,20 +66,21 @@ void cfcnfg_remove(struct cfcnfg *cfg); * cfcnfg_add_phy_layer() - Adds a physical layer to the CAIF stack. * @cnfg: Pointer to a CAIF configuration object, created by * cfcnfg_create(). + * @phy_type: Specifies the type of physical interface, e.g. + * CFPHYTYPE_FRAG. * @dev: Pointer to link layer device * @phy_layer: Specify the physical layer. The transmit function * MUST be set in the structure. * @pref: The phy (link layer) preference. - * @link_support: Protocol implementation for link layer specific protocol. * @fcs: Specify if checksum is used in CAIF Framing Layer. - * @head_room: Head space needed by link specific protocol. + * @stx: Specify if Start Of Frame eXtention is used. */ + void -cfcnfg_add_phy_layer(struct cfcnfg *cnfg, +cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type, struct net_device *dev, struct cflayer *phy_layer, enum cfcnfg_phy_preference pref, - struct cflayer *link_support, - bool fcs, int head_room); + bool fcs, bool stx); /** * cfcnfg_del_phy_layer - Deletes an phy layer from the CAIF stack. diff --git a/trunk/include/net/caif/cfserl.h b/trunk/include/net/caif/cfserl.h index f121299a3427..b8374321b362 100644 --- a/trunk/include/net/caif/cfserl.h +++ b/trunk/include/net/caif/cfserl.h @@ -8,5 +8,5 @@ #define CFSERL_H_ #include -struct cflayer *cfserl_create(int instance, bool use_stx); -#endif +struct cflayer *cfserl_create(int type, int instance, bool use_stx); +#endif /* CFSERL_H_ */ diff --git a/trunk/include/net/cfg80211.h b/trunk/include/net/cfg80211.h index 15f4be7d768e..95852e36713b 100644 --- a/trunk/include/net/cfg80211.h +++ b/trunk/include/net/cfg80211.h @@ -391,8 +391,6 @@ struct cfg80211_crypto_settings { * @assocresp_ies: extra information element(s) to add into (Re)Association * Response frames or %NULL * @assocresp_ies_len: length of assocresp_ies in octets - * @probe_resp_len: length of probe response template (@probe_resp) - * @probe_resp: probe response template (AP mode only) */ struct beacon_parameters { u8 *head, *tail; @@ -410,8 +408,6 @@ struct beacon_parameters { size_t proberesp_ies_len; const u8 *assocresp_ies; size_t assocresp_ies_len; - int probe_resp_len; - u8 *probe_resp; }; /** @@ -505,7 +501,6 @@ struct station_parameters { * @STATION_INFO_CONNECTED_TIME: @connected_time filled * @STATION_INFO_ASSOC_REQ_IES: @assoc_req_ies filled * @STATION_INFO_STA_FLAGS: @sta_flags filled - * @STATION_INFO_BEACON_LOSS_COUNT: @beacon_loss_count filled */ enum station_info_flags { STATION_INFO_INACTIVE_TIME = 1<<0, @@ -526,8 +521,7 @@ enum station_info_flags { STATION_INFO_BSS_PARAM = 1<<15, STATION_INFO_CONNECTED_TIME = 1<<16, STATION_INFO_ASSOC_REQ_IES = 1<<17, - STATION_INFO_STA_FLAGS = 1<<18, - STATION_INFO_BEACON_LOSS_COUNT = 1<<19 + STATION_INFO_STA_FLAGS = 1<<18 }; /** @@ -625,7 +619,6 @@ struct sta_bss_parameters { * the cfg80211_new_sta() calls to notify user space of the IEs. * @assoc_req_ies_len: Length of assoc_req_ies buffer in octets. * @sta_flags: station flags mask & values - * @beacon_loss_count: Number of times beacon loss event has triggered. */ struct station_info { u32 filled; @@ -653,8 +646,6 @@ struct station_info { const u8 *assoc_req_ies; size_t assoc_req_ies_len; - u32 beacon_loss_count; - /* * Note: Add a new enum station_info_flags value for each new field and * use it to check which fields are initialized. @@ -787,7 +778,6 @@ struct mesh_config { u16 min_discovery_timeout; u32 dot11MeshHWMPactivePathTimeout; u16 dot11MeshHWMPpreqMinInterval; - u16 dot11MeshHWMPperrMinInterval; u16 dot11MeshHWMPnetDiameterTraversalTime; u8 dot11MeshHWMPRootMode; u16 dot11MeshHWMPRannInterval; @@ -808,7 +798,6 @@ struct mesh_config { * @ie_len: length of vendor information elements * @is_authenticated: this mesh requires authentication * @is_secure: this mesh uses security - * @mcast_rate: multicat rate for Mesh Node [6Mbps is the default for 802.11a] * * These parameters are fixed when the mesh is created. */ @@ -821,7 +810,6 @@ struct mesh_setup { u8 ie_len; bool is_authenticated; bool is_secure; - int mcast_rate[IEEE80211_NUM_BANDS]; }; /** @@ -1051,15 +1039,6 @@ struct cfg80211_auth_request { bool local_state_change; }; -/** - * enum cfg80211_assoc_req_flags - Over-ride default behaviour in association. - * - * @ASSOC_REQ_DISABLE_HT: Disable HT (802.11n) - */ -enum cfg80211_assoc_req_flags { - ASSOC_REQ_DISABLE_HT = BIT(0), -}; - /** * struct cfg80211_assoc_request - (Re)Association request data * @@ -1071,10 +1050,6 @@ enum cfg80211_assoc_req_flags { * @use_mfp: Use management frame protection (IEEE 802.11w) in this association * @crypto: crypto settings * @prev_bssid: previous BSSID, if not %NULL use reassociate frame - * @flags: See &enum cfg80211_assoc_req_flags - * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask - * will be used in ht_capa. Un-supported values will be ignored. - * @ht_capa_mask: The bits of ht_capa which are to be used. */ struct cfg80211_assoc_request { struct cfg80211_bss *bss; @@ -1082,9 +1057,6 @@ struct cfg80211_assoc_request { size_t ie_len; struct cfg80211_crypto_settings crypto; bool use_mfp; - u32 flags; - struct ieee80211_ht_cap ht_capa; - struct ieee80211_ht_cap ht_capa_mask; }; /** @@ -1154,7 +1126,6 @@ struct cfg80211_ibss_params { u8 *ssid; u8 *bssid; struct ieee80211_channel *channel; - enum nl80211_channel_type channel_type; u8 *ie; u8 ssid_len, ie_len; u16 beacon_interval; @@ -1184,10 +1155,6 @@ struct cfg80211_ibss_params { * @key_len: length of WEP key for shared key authentication * @key_idx: index of WEP key for shared key authentication * @key: WEP key for shared key authentication - * @flags: See &enum cfg80211_assoc_req_flags - * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask - * will be used in ht_capa. Un-supported values will be ignored. - * @ht_capa_mask: The bits of ht_capa which are to be used. */ struct cfg80211_connect_params { struct ieee80211_channel *channel; @@ -1201,9 +1168,6 @@ struct cfg80211_connect_params { struct cfg80211_crypto_settings crypto; const u8 *key; u8 key_len, key_idx; - u32 flags; - struct ieee80211_ht_cap ht_capa; - struct ieee80211_ht_cap ht_capa_mask; }; /** @@ -1351,12 +1315,7 @@ struct cfg80211_gtk_rekey_data { * * @add_station: Add a new station. * @del_station: Remove a station; @mac may be NULL to remove all stations. - * @change_station: Modify a given station. Note that flags changes are not much - * validated in cfg80211, in particular the auth/assoc/authorized flags - * might come to the driver in invalid combinations -- make sure to check - * them, also against the existing state! Also, supported_rates changes are - * not checked in station mode -- drivers need to reject (or ignore) them - * for anything but TDLS peers. + * @change_station: Modify a given station. * @get_station: get station information for the station identified by @mac * @dump_station: dump station callback -- resume dump at index @idx * @@ -1383,9 +1342,6 @@ struct cfg80211_gtk_rekey_data { * doesn't verify much. Note, however, that the passed netdev may be * %NULL as well if the user requested changing the channel for the * device itself, or for a monitor interface. - * @get_channel: Get the current operating channel, should return %NULL if - * there's no single defined operating channel if for example the - * device implements channel hopping for multi-channel virtual interfaces. * * @scan: Request to do a scan. If returning zero, the scan request is given * the driver, and will be valid until passed to cfg80211_scan_done(). @@ -1413,8 +1369,7 @@ struct cfg80211_gtk_rekey_data { * have changed. The actual parameter values are available in * struct wiphy. If returning an error, no value should be changed. * - * @set_tx_power: set the transmit power according to the parameters, - * the power passed is in mBm, to get dBm use MBM_TO_DBM(). + * @set_tx_power: set the transmit power according to the parameters * @get_tx_power: store the current TX power into the dbm variable; * return 0 if successful * @@ -1477,11 +1432,6 @@ struct cfg80211_gtk_rekey_data { * * @tdls_mgmt: Transmit a TDLS management frame. * @tdls_oper: Perform a high-level TDLS operation (e.g. TDLS link setup). - * - * @probe_client: probe an associated client, must return a cookie that it - * later passes to cfg80211_probe_status(). - * - * @set_noack_map: Set the NoAck Map for the TIDs. */ struct cfg80211_ops { int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow); @@ -1635,7 +1585,7 @@ struct cfg80211_ops { enum nl80211_channel_type channel_type, bool channel_type_valid, unsigned int wait, const u8 *buf, size_t len, bool no_cck, - bool dont_wait_for_ack, u64 *cookie); + u64 *cookie); int (*mgmt_tx_cancel_wait)(struct wiphy *wiphy, struct net_device *dev, u64 cookie); @@ -1671,15 +1621,6 @@ struct cfg80211_ops { u16 status_code, const u8 *buf, size_t len); int (*tdls_oper)(struct wiphy *wiphy, struct net_device *dev, u8 *peer, enum nl80211_tdls_operation oper); - - int (*probe_client)(struct wiphy *wiphy, struct net_device *dev, - const u8 *peer, u64 *cookie); - - int (*set_noack_map)(struct wiphy *wiphy, - struct net_device *dev, - u16 noack_map); - - struct ieee80211_channel *(*get_channel)(struct wiphy *wiphy); }; /* @@ -1704,9 +1645,7 @@ struct cfg80211_ops { * regulatory domain no user regulatory domain can enable these channels * at a later time. This can be used for devices which do not have * calibration information guaranteed for frequencies or settings - * outside of its regulatory domain. If used in combination with - * WIPHY_FLAG_CUSTOM_REGULATORY the inspected country IE power settings - * will be followed. + * outside of its regulatory domain. * @WIPHY_FLAG_DISABLE_BEACON_HINTS: enable this if your driver needs to ensure * that passive scan flags and beaconing flags may not be lifted by * cfg80211 due to regulatory beacon hints. For more information on beacon @@ -1740,14 +1679,6 @@ struct cfg80211_ops { * teardown packets should be sent through the @NL80211_CMD_TDLS_MGMT * command. When this flag is not set, @NL80211_CMD_TDLS_OPER should be * used for asking the driver/firmware to perform a TDLS operation. - * @WIPHY_FLAG_HAVE_AP_SME: device integrates AP SME - * @WIPHY_FLAG_REPORTS_OBSS: the device will report beacons from other BSSes - * when there are virtual interfaces in AP mode by calling - * cfg80211_report_obss_beacon(). - * @WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD: When operating as an AP, the device - * responds to probe-requests in hardware. - * @WIPHY_FLAG_OFFCHAN_TX: Device supports direct off-channel TX. - * @WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL: Device supports remain-on-channel call. */ enum wiphy_flags { WIPHY_FLAG_CUSTOM_REGULATORY = BIT(0), @@ -1766,11 +1697,6 @@ enum wiphy_flags { WIPHY_FLAG_AP_UAPSD = BIT(14), WIPHY_FLAG_SUPPORTS_TDLS = BIT(15), WIPHY_FLAG_TDLS_EXTERNAL_SETUP = BIT(16), - WIPHY_FLAG_HAVE_AP_SME = BIT(17), - WIPHY_FLAG_REPORTS_OBSS = BIT(18), - WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD = BIT(19), - WIPHY_FLAG_OFFCHAN_TX = BIT(20), - WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL = BIT(21), }; /** @@ -1943,7 +1869,6 @@ struct wiphy_wowlan_support { * @software_iftypes: bitmask of software interface types, these are not * subject to any restrictions since they are purely managed in SW. * @flags: wiphy flags, see &enum wiphy_flags - * @features: features advertised to nl80211, see &enum nl80211_feature_flags. * @bss_priv_size: each BSS struct has private data allocated with it, * this variable determines its size * @max_scan_ssids: maximum number of SSIDs the device can scan for in @@ -1982,10 +1907,6 @@ struct wiphy_wowlan_support { * may request, if implemented. * * @wowlan: WoWLAN support information - * - * @ap_sme_capa: AP SME capabilities, flags from &enum nl80211_ap_sme_features. - * @ht_capa_mod_mask: Specify what ht_cap values can be over-ridden. - * If null, then none can be over-ridden. */ struct wiphy { /* assign these fields before you register the wiphy */ @@ -2007,9 +1928,7 @@ struct wiphy { /* Supported interface modes, OR together BIT(NL80211_IFTYPE_...) */ u16 interface_modes; - u32 flags, features; - - u32 ap_sme_capa; + u32 flags; enum cfg80211_signal_type signal_type; @@ -2041,13 +1960,6 @@ struct wiphy { u32 available_antennas_tx; u32 available_antennas_rx; - /* - * Bitmap of supported protocols for probe response offloading - * see &enum nl80211_probe_resp_offload_support_attr. Only valid - * when the wiphy flag @WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD is set. - */ - u32 probe_resp_offload; - /* If multiple wiphys are registered and you're handed e.g. * a regular netdev with assigned ieee80211_ptr, you won't * know whether it points to a wiphy your driver has registered @@ -2075,8 +1987,6 @@ struct wiphy { /* dir in debugfs: ieee80211/ */ struct dentry *debugfsdir; - const struct ieee80211_ht_cap *ht_capa_mod_mask; - #ifdef CONFIG_NET_NS /* the network namespace this phy lives in currently */ struct net *_net; @@ -2273,8 +2183,6 @@ struct wireless_dev { int beacon_interval; - u32 ap_unexpected_nlpid; - #ifdef CONFIG_CFG80211_WEXT /* wext data */ struct { @@ -2441,6 +2349,69 @@ extern int ieee80211_radiotap_iterator_next( extern const unsigned char rfc1042_header[6]; extern const unsigned char bridge_tunnel_header[6]; +/* Parsed Information Elements */ +struct ieee802_11_elems { + u8 *ie_start; + size_t total_len; + + /* pointers to IEs */ + u8 *ssid; + u8 *supp_rates; + u8 *fh_params; + u8 *ds_params; + u8 *cf_params; + struct ieee80211_tim_ie *tim; + u8 *ibss_params; + u8 *challenge; + u8 *wpa; + u8 *rsn; + u8 *erp_info; + u8 *ext_supp_rates; + u8 *wmm_info; + u8 *wmm_param; + struct ieee80211_ht_cap *ht_cap_elem; + struct ieee80211_ht_info *ht_info_elem; + struct ieee80211_meshconf_ie *mesh_config; + u8 *mesh_id; + u8 *peering; + u8 *preq; + u8 *prep; + u8 *perr; + struct ieee80211_rann_ie *rann; + u8 *ch_switch_elem; + u8 *country_elem; + u8 *pwr_constr_elem; + u8 *quiet_elem; /* first quite element */ + u8 *timeout_int; + + /* length of them, respectively */ + u8 ssid_len; + u8 supp_rates_len; + u8 fh_params_len; + u8 ds_params_len; + u8 cf_params_len; + u8 tim_len; + u8 ibss_params_len; + u8 challenge_len; + u8 wpa_len; + u8 rsn_len; + u8 erp_info_len; + u8 ext_supp_rates_len; + u8 wmm_info_len; + u8 wmm_param_len; + u8 mesh_id_len; + u8 peering_len; + u8 preq_len; + u8 prep_len; + u8 perr_len; + u8 ch_switch_elem_len; + u8 country_elem_len; + u8 pwr_constr_elem_len; + u8 quiet_elem_len; + u8 num_of_quiet_elem; /* can be more the one */ + u8 timeout_int_len; +}; + /** * ieee80211_get_hdrlen_from_skb - get header length from data * @@ -2665,10 +2636,8 @@ void cfg80211_sched_scan_stopped(struct wiphy *wiphy); * * This informs cfg80211 that BSS information was found and * the BSS should be updated/added. - * - * NOTE: Returns a referenced struct, must be released with cfg80211_put_bss()! */ -struct cfg80211_bss * __must_check +struct cfg80211_bss* cfg80211_inform_bss_frame(struct wiphy *wiphy, struct ieee80211_channel *channel, struct ieee80211_mgmt *mgmt, size_t len, @@ -2690,10 +2659,8 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy, * * This informs cfg80211 that BSS information was found and * the BSS should be updated/added. - * - * NOTE: Returns a referenced struct, must be released with cfg80211_put_bss()! */ -struct cfg80211_bss * __must_check +struct cfg80211_bss* cfg80211_inform_bss(struct wiphy *wiphy, struct ieee80211_channel *channel, const u8 *bssid, @@ -3075,32 +3042,6 @@ void cfg80211_roamed(struct net_device *dev, const u8 *req_ie, size_t req_ie_len, const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp); -/** - * cfg80211_roamed_bss - notify cfg80211 of roaming - * - * @dev: network device - * @bss: entry of bss to which STA got roamed - * @req_ie: association request IEs (maybe be %NULL) - * @req_ie_len: association request IEs length - * @resp_ie: association response IEs (may be %NULL) - * @resp_ie_len: assoc response IEs length - * @gfp: allocation flags - * - * This is just a wrapper to notify cfg80211 of roaming event with driver - * passing bss to avoid a race in timeout of the bss entry. It should be - * called by the underlying driver whenever it roamed from one AP to another - * while connected. Drivers which have roaming implemented in firmware - * may use this function to avoid a race in bss entry timeout where the bss - * entry of the new AP is seen in the driver, but gets timed out by the time - * it is accessed in __cfg80211_roamed() due to delay in scheduling - * rdev->event_work. In case of any failures, the reference is released - * either in cfg80211_roamed_bss() or in __cfg80211_romed(), Otherwise, - * it will be released while diconneting from the current bss. - */ -void cfg80211_roamed_bss(struct net_device *dev, struct cfg80211_bss *bss, - const u8 *req_ie, size_t req_ie_len, - const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp); - /** * cfg80211_disconnected - notify cfg80211 that connection was dropped * @@ -3248,74 +3189,6 @@ void cfg80211_gtk_rekey_notify(struct net_device *dev, const u8 *bssid, void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index, const u8 *bssid, bool preauth, gfp_t gfp); -/** - * cfg80211_rx_spurious_frame - inform userspace about a spurious frame - * @dev: The device the frame matched to - * @addr: the transmitter address - * @gfp: context flags - * - * This function is used in AP mode (only!) to inform userspace that - * a spurious class 3 frame was received, to be able to deauth the - * sender. - * Returns %true if the frame was passed to userspace (or this failed - * for a reason other than not having a subscription.) - */ -bool cfg80211_rx_spurious_frame(struct net_device *dev, - const u8 *addr, gfp_t gfp); - -/** - * cfg80211_rx_unexpected_4addr_frame - inform about unexpected WDS frame - * @dev: The device the frame matched to - * @addr: the transmitter address - * @gfp: context flags - * - * This function is used in AP mode (only!) to inform userspace that - * an associated station sent a 4addr frame but that wasn't expected. - * It is allowed and desirable to send this event only once for each - * station to avoid event flooding. - * Returns %true if the frame was passed to userspace (or this failed - * for a reason other than not having a subscription.) - */ -bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev, - const u8 *addr, gfp_t gfp); - -/** - * cfg80211_probe_status - notify userspace about probe status - * @dev: the device the probe was sent on - * @addr: the address of the peer - * @cookie: the cookie filled in @probe_client previously - * @acked: indicates whether probe was acked or not - * @gfp: allocation flags - */ -void cfg80211_probe_status(struct net_device *dev, const u8 *addr, - u64 cookie, bool acked, gfp_t gfp); - -/** - * cfg80211_report_obss_beacon - report beacon from other APs - * @wiphy: The wiphy that received the beacon - * @frame: the frame - * @len: length of the frame - * @freq: frequency the frame was received on - * @gfp: allocation flags - * - * Use this function to report to userspace when a beacon was - * received. It is not useful to call this when there is no - * netdev that is in AP/GO mode. - */ -void cfg80211_report_obss_beacon(struct wiphy *wiphy, - const u8 *frame, size_t len, - int freq, gfp_t gfp); - -/* - * cfg80211_can_beacon_sec_chan - test if ht40 on extension channel can be used - * @wiphy: the wiphy - * @chan: main channel - * @channel_type: HT mode - */ -int cfg80211_can_beacon_sec_chan(struct wiphy *wiphy, - struct ieee80211_channel *chan, - enum nl80211_channel_type channel_type); - /* Logging, debugging and troubleshooting/diagnostic helpers. */ /* wiphy_printk helpers, similar to dev_printk */ diff --git a/trunk/include/net/dsa.h b/trunk/include/net/dsa.h index 7828ebf99ee1..839f768f9e35 100644 --- a/trunk/include/net/dsa.h +++ b/trunk/include/net/dsa.h @@ -11,11 +11,6 @@ #ifndef __LINUX_NET_DSA_H #define __LINUX_NET_DSA_H -#include -#include -#include -#include - #define DSA_MAX_SWITCHES 4 #define DSA_MAX_PORTS 12 @@ -59,143 +54,8 @@ struct dsa_platform_data { struct dsa_chip_data *chip; }; -struct dsa_switch_tree { - /* - * Configuration data for the platform device that owns - * this dsa switch tree instance. - */ - struct dsa_platform_data *pd; - - /* - * Reference to network device to use, and which tagging - * protocol to use. - */ - struct net_device *master_netdev; - __be16 tag_protocol; - - /* - * The switch and port to which the CPU is attached. - */ - s8 cpu_switch; - s8 cpu_port; - - /* - * Link state polling. - */ - int link_poll_needed; - struct work_struct link_poll_work; - struct timer_list link_poll_timer; - - /* - * Data for the individual switch chips. - */ - struct dsa_switch *ds[DSA_MAX_SWITCHES]; -}; - -struct dsa_switch { - /* - * Parent switch tree, and switch index. - */ - struct dsa_switch_tree *dst; - int index; - - /* - * Configuration data for this switch. - */ - struct dsa_chip_data *pd; - - /* - * The used switch driver. - */ - struct dsa_switch_driver *drv; - - /* - * Reference to mii bus to use. - */ - struct mii_bus *master_mii_bus; - - /* - * Slave mii_bus and devices for the individual ports. - */ - u32 dsa_port_mask; - u32 phys_port_mask; - struct mii_bus *slave_mii_bus; - struct net_device *ports[DSA_MAX_PORTS]; -}; - -static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p) -{ - return !!(ds->index == ds->dst->cpu_switch && p == ds->dst->cpu_port); -} - -static inline u8 dsa_upstream_port(struct dsa_switch *ds) -{ - struct dsa_switch_tree *dst = ds->dst; - - /* - * If this is the root switch (i.e. the switch that connects - * to the CPU), return the cpu port number on this switch. - * Else return the (DSA) port number that connects to the - * switch that is one hop closer to the cpu. - */ - if (dst->cpu_switch == ds->index) - return dst->cpu_port; - else - return ds->pd->rtable[dst->cpu_switch]; -} - -struct dsa_switch_driver { - struct list_head list; - - __be16 tag_protocol; - int priv_size; - - /* - * Probing and setup. - */ - char *(*probe)(struct mii_bus *bus, int sw_addr); - int (*setup)(struct dsa_switch *ds); - int (*set_addr)(struct dsa_switch *ds, u8 *addr); - - /* - * Access to the switch's PHY registers. - */ - int (*phy_read)(struct dsa_switch *ds, int port, int regnum); - int (*phy_write)(struct dsa_switch *ds, int port, - int regnum, u16 val); - - /* - * Link state polling and IRQ handling. - */ - void (*poll_link)(struct dsa_switch *ds); - - /* - * ethtool hardware statistics. - */ - void (*get_strings)(struct dsa_switch *ds, int port, uint8_t *data); - void (*get_ethtool_stats)(struct dsa_switch *ds, - int port, uint64_t *data); - int (*get_sset_count)(struct dsa_switch *ds); -}; - -void register_switch_driver(struct dsa_switch_driver *type); -void unregister_switch_driver(struct dsa_switch_driver *type); - -/* - * The original DSA tag format and some other tag formats have no - * ethertype, which means that we need to add a little hack to the - * networking receive path to make sure that received frames get - * the right ->protocol assigned to them when one of those tag - * formats is in use. - */ -static inline bool dsa_uses_dsa_tags(struct dsa_switch_tree *dst) -{ - return !!(dst->tag_protocol == htons(ETH_P_DSA)); -} +extern bool dsa_uses_dsa_tags(void *dsa_ptr); +extern bool dsa_uses_trailer_tags(void *dsa_ptr); -static inline bool dsa_uses_trailer_tags(struct dsa_switch_tree *dst) -{ - return !!(dst->tag_protocol == htons(ETH_P_TRAILER)); -} #endif diff --git a/trunk/include/net/dst.h b/trunk/include/net/dst.h index 344c8dd02874..75766b42660e 100644 --- a/trunk/include/net/dst.h +++ b/trunk/include/net/dst.h @@ -87,12 +87,12 @@ struct dst_entry { }; }; -static inline struct neighbour *dst_get_neighbour_noref(struct dst_entry *dst) +static inline struct neighbour *dst_get_neighbour(struct dst_entry *dst) { return rcu_dereference(dst->_neighbour); } -static inline struct neighbour *dst_get_neighbour_noref_raw(struct dst_entry *dst) +static inline struct neighbour *dst_get_neighbour_raw(struct dst_entry *dst) { return rcu_dereference_raw(dst->_neighbour); } @@ -393,7 +393,7 @@ static inline void dst_confirm(struct dst_entry *dst) struct neighbour *n; rcu_read_lock(); - n = dst_get_neighbour_noref(dst); + n = dst_get_neighbour(dst); neigh_confirm(n); rcu_read_unlock(); } diff --git a/trunk/include/net/flow.h b/trunk/include/net/flow.h index da1f064a81b3..57f15a7f1cdd 100644 --- a/trunk/include/net/flow.h +++ b/trunk/include/net/flow.h @@ -59,11 +59,8 @@ struct flowi4 { #define flowi4_proto __fl_common.flowic_proto #define flowi4_flags __fl_common.flowic_flags #define flowi4_secid __fl_common.flowic_secid - - /* (saddr,daddr) must be grouped, same order as in IP header */ - __be32 saddr; __be32 daddr; - + __be32 saddr; union flowi_uli uli; #define fl4_sport uli.ports.sport #define fl4_dport uli.ports.dport diff --git a/trunk/include/net/flow_keys.h b/trunk/include/net/flow_keys.h deleted file mode 100644 index 80461c1ae9ef..000000000000 --- a/trunk/include/net/flow_keys.h +++ /dev/null @@ -1,16 +0,0 @@ -#ifndef _NET_FLOW_KEYS_H -#define _NET_FLOW_KEYS_H - -struct flow_keys { - /* (src,dst) must be grouped, in the same way than in IP header */ - __be32 src; - __be32 dst; - union { - __be32 ports; - __be16 port16[2]; - }; - u8 ip_proto; -}; - -extern bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow); -#endif diff --git a/trunk/include/net/genetlink.h b/trunk/include/net/genetlink.h index 7db32995ccd3..82d8d09faa44 100644 --- a/trunk/include/net/genetlink.h +++ b/trunk/include/net/genetlink.h @@ -128,8 +128,6 @@ extern int genl_register_mc_group(struct genl_family *family, struct genl_multicast_group *grp); extern void genl_unregister_mc_group(struct genl_family *family, struct genl_multicast_group *grp); -extern void genl_notify(struct sk_buff *skb, struct net *net, u32 pid, - u32 group, struct nlmsghdr *nlh, gfp_t flags); /** * genlmsg_put - Add generic netlink header to netlink message diff --git a/trunk/include/net/icmp.h b/trunk/include/net/icmp.h index 75d615649071..f0698b955b73 100644 --- a/trunk/include/net/icmp.h +++ b/trunk/include/net/icmp.h @@ -31,8 +31,8 @@ struct icmp_err { extern const struct icmp_err icmp_err_convert[]; #define ICMP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.icmp_statistics, field) #define ICMP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.icmp_statistics, field) -#define ICMPMSGOUT_INC_STATS(net, field) SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field+256) -#define ICMPMSGIN_INC_STATS_BH(net, field) SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field) +#define ICMPMSGOUT_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.icmpmsg_statistics, field+256) +#define ICMPMSGIN_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.icmpmsg_statistics, field) struct dst_entry; struct net_proto_family; diff --git a/trunk/include/net/ieee80211_radiotap.h b/trunk/include/net/ieee80211_radiotap.h index 71392545d0a1..7e2c4d483ad0 100644 --- a/trunk/include/net/ieee80211_radiotap.h +++ b/trunk/include/net/ieee80211_radiotap.h @@ -271,6 +271,14 @@ enum ieee80211_radiotap_type { #define IEEE80211_RADIOTAP_MCS_FEC_LDPC 0x10 +/* Ugly macro to convert literal channel numbers into their mhz equivalents + * There are certianly some conditions that will break this (like feeding it '30') + * but they shouldn't arise since nothing talks on channel 30. */ +#define ieee80211chan2mhz(x) \ + (((x) <= 14) ? \ + (((x) == 14) ? 2484 : ((x) * 5) + 2407) : \ + ((x) + 1000) * 5) + /* helpers */ static inline int ieee80211_get_radiotap_len(unsigned char *data) { diff --git a/trunk/include/net/ieee802154.h b/trunk/include/net/ieee802154.h index ee59f8b188dd..d52685defb11 100644 --- a/trunk/include/net/ieee802154.h +++ b/trunk/include/net/ieee802154.h @@ -21,14 +21,11 @@ * Maxim Gorbachyov * Maxim Osipov * Dmitry Eremin-Solenikov - * Alexander Smirnov */ #ifndef NET_IEEE802154_H #define NET_IEEE802154_H -#define IEEE802154_MTU 127 - #define IEEE802154_FC_TYPE_BEACON 0x0 /* Frame is beacon */ #define IEEE802154_FC_TYPE_DATA 0x1 /* Frame is data */ #define IEEE802154_FC_TYPE_ACK 0x2 /* Frame is acknowledgment */ @@ -59,9 +56,6 @@ (((x) & IEEE802154_FC_DAMODE_MASK) >> IEEE802154_FC_DAMODE_SHIFT) -/* MAC footer size */ -#define IEEE802154_MFR_SIZE 2 /* 2 octets */ - /* MAC's Command Frames Identifiers */ #define IEEE802154_CMD_ASSOCIATION_REQ 0x01 #define IEEE802154_CMD_ASSOCIATION_RESP 0x02 diff --git a/trunk/include/net/inet6_hashtables.h b/trunk/include/net/inet6_hashtables.h index 00cbb4384c79..e46674d5daea 100644 --- a/trunk/include/net/inet6_hashtables.h +++ b/trunk/include/net/inet6_hashtables.h @@ -15,7 +15,7 @@ #define _INET6_HASHTABLES_H -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) #include #include #include @@ -110,5 +110,5 @@ extern struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo const struct in6_addr *saddr, const __be16 sport, const struct in6_addr *daddr, const __be16 dport, const int dif); -#endif /* IS_ENABLED(CONFIG_IPV6) */ +#endif /* defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) */ #endif /* _INET6_HASHTABLES_H */ diff --git a/trunk/include/net/inet_connection_sock.h b/trunk/include/net/inet_connection_sock.h index dbf9aab34c82..e6db62e756dc 100644 --- a/trunk/include/net/inet_connection_sock.h +++ b/trunk/include/net/inet_connection_sock.h @@ -143,9 +143,9 @@ static inline void *inet_csk_ca(const struct sock *sk) return (void *)inet_csk(sk)->icsk_ca_priv; } -extern struct sock *inet_csk_clone_lock(const struct sock *sk, - const struct request_sock *req, - const gfp_t priority); +extern struct sock *inet_csk_clone(struct sock *sk, + const struct request_sock *req, + const gfp_t priority); enum inet_csk_ack_state_t { ICSK_ACK_SCHED = 1, diff --git a/trunk/include/net/inet_sock.h b/trunk/include/net/inet_sock.h index e3e405106afe..f941964a9931 100644 --- a/trunk/include/net/inet_sock.h +++ b/trunk/include/net/inet_sock.h @@ -71,7 +71,7 @@ struct ip_options_data { struct inet_request_sock { struct request_sock req; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) u16 inet6_rsk_offset; #endif __be16 loc_port; @@ -139,7 +139,7 @@ struct rtable; struct inet_sock { /* sk and pinet6 has to be the first two members of inet_sock */ struct sock sk; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) struct ipv6_pinfo *pinet6; #endif /* Socket demultiplex comparisons on incoming packets. */ @@ -188,7 +188,7 @@ static inline void __inet_sk_copy_descendant(struct sock *sk_to, memcpy(inet_sk(sk_to) + 1, inet_sk(sk_from) + 1, sk_from->sk_prot->obj_size - ancestor_size); } -#if !(IS_ENABLED(CONFIG_IPV6)) +#if !(defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)) static inline void inet_sk_copy_descendant(struct sock *sk_to, const struct sock *sk_from) { diff --git a/trunk/include/net/inet_timewait_sock.h b/trunk/include/net/inet_timewait_sock.h index ba52c830a7a5..e8c25b981205 100644 --- a/trunk/include/net/inet_timewait_sock.h +++ b/trunk/include/net/inet_timewait_sock.h @@ -218,12 +218,20 @@ extern void inet_twsk_purge(struct inet_hashinfo *hashinfo, static inline struct net *twsk_net(const struct inet_timewait_sock *twsk) { - return read_pnet(&twsk->tw_net); +#ifdef CONFIG_NET_NS + return rcu_dereference_raw(twsk->tw_net); /* protected by locking, */ + /* reference counting, */ + /* initialization, or RCU. */ +#else + return &init_net; +#endif } static inline void twsk_net_set(struct inet_timewait_sock *twsk, struct net *net) { - write_pnet(&twsk->tw_net, net); +#ifdef CONFIG_NET_NS + rcu_assign_pointer(twsk->tw_net, net); +#endif } #endif /* _INET_TIMEWAIT_SOCK_ */ diff --git a/trunk/include/net/inetpeer.h b/trunk/include/net/inetpeer.h index 06b795dd5906..e9ff3fc5e688 100644 --- a/trunk/include/net/inetpeer.h +++ b/trunk/include/net/inetpeer.h @@ -87,7 +87,7 @@ static inline struct inet_peer *inet_getpeer_v6(const struct in6_addr *v6daddr, { struct inetpeer_addr daddr; - *(struct in6_addr *)daddr.addr.a6 = *v6daddr; + ipv6_addr_copy((struct in6_addr *)daddr.addr.a6, v6daddr); daddr.family = AF_INET6; return inet_getpeer(&daddr, create); } diff --git a/trunk/include/net/ip.h b/trunk/include/net/ip.h index 775009f9eaba..eca0ef7a495e 100644 --- a/trunk/include/net/ip.h +++ b/trunk/include/net/ip.h @@ -353,14 +353,14 @@ static inline void ip_ipgre_mc_map(__be32 naddr, const unsigned char *broadcast, memcpy(buf, &naddr, sizeof(naddr)); } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) #include #endif static __inline__ void inet_reset_saddr(struct sock *sk) { inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) if (sk->sk_family == PF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); @@ -379,7 +379,7 @@ static inline int sk_mc_loop(struct sock *sk) switch (sk->sk_family) { case AF_INET: return inet_sk(sk)->mc_loop; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case AF_INET6: return inet6_sk(sk)->mc_loop; #endif @@ -450,7 +450,7 @@ extern int ip_options_rcv_srr(struct sk_buff *skb); * Functions provided by ip_sockglue.c */ -extern void ipv4_pktinfo_prepare(struct sk_buff *skb); +extern int ip_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); extern void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb); extern int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc); diff --git a/trunk/include/net/ip6_fib.h b/trunk/include/net/ip6_fib.h index b26bb8101981..5735a0f979c3 100644 --- a/trunk/include/net/ip6_fib.h +++ b/trunk/include/net/ip6_fib.h @@ -86,6 +86,9 @@ struct fib6_table; struct rt6_info { struct dst_entry dst; +#define rt6i_dev dst.dev +#define rt6i_expires dst.expires + /* * Tail elements of dst_entry (__refcnt etc.) * and these elements (rarely used in hot path) are in @@ -199,10 +202,6 @@ struct fib6_node *fib6_locate(struct fib6_node *root, const struct in6_addr *daddr, int dst_len, const struct in6_addr *saddr, int src_len); -extern void fib6_clean_all_ro(struct net *net, - int (*func)(struct rt6_info *, void *arg), - int prune, void *arg); - extern void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg), int prune, void *arg); diff --git a/trunk/include/net/ip6_route.h b/trunk/include/net/ip6_route.h index 2ad92ca4e6f3..5e91b72fc718 100644 --- a/trunk/include/net/ip6_route.h +++ b/trunk/include/net/ip6_route.h @@ -70,8 +70,6 @@ extern void ip6_route_input(struct sk_buff *skb); extern struct dst_entry * ip6_route_output(struct net *net, const struct sock *sk, struct flowi6 *fl6); -extern struct dst_entry * ip6_route_lookup(struct net *net, - struct flowi6 *fl6, int flags); extern int ip6_route_init(void); extern void ip6_route_cleanup(void); @@ -97,14 +95,14 @@ extern struct rt6_info *rt6_lookup(struct net *net, extern struct dst_entry *icmp6_dst_alloc(struct net_device *dev, struct neighbour *neigh, - struct flowi6 *fl6); + const struct in6_addr *addr); extern int icmp6_dst_gc(void); extern void fib6_force_start_gc(struct net *net); extern struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, const struct in6_addr *addr, - bool anycast); + int anycast); extern int ip6_dst_hoplimit(struct dst_entry *dst); diff --git a/trunk/include/net/ip_vs.h b/trunk/include/net/ip_vs.h index ebe517f2da9f..873d5be7926c 100644 --- a/trunk/include/net/ip_vs.h +++ b/trunk/include/net/ip_vs.h @@ -21,7 +21,7 @@ #include /* for union nf_inet_addr */ #include #include /* for struct ipv6hdr */ -#include +#include /* for ipv6_addr_copy */ #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) #include #endif @@ -119,8 +119,8 @@ ip_vs_fill_iphdr(int af, const void *nh, struct ip_vs_iphdr *iphdr) const struct ipv6hdr *iph = nh; iphdr->len = sizeof(struct ipv6hdr); iphdr->protocol = iph->nexthdr; - iphdr->saddr.in6 = iph->saddr; - iphdr->daddr.in6 = iph->daddr; + ipv6_addr_copy(&iphdr->saddr.in6, &iph->saddr); + ipv6_addr_copy(&iphdr->daddr.in6, &iph->daddr); } else #endif { @@ -137,7 +137,7 @@ static inline void ip_vs_addr_copy(int af, union nf_inet_addr *dst, { #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) - dst->in6 = src->in6; + ipv6_addr_copy(&dst->in6, &src->in6); else #endif dst->ip = src->ip; @@ -1207,7 +1207,7 @@ extern void ip_vs_control_cleanup(void); extern struct ip_vs_dest * ip_vs_find_dest(struct net *net, int af, const union nf_inet_addr *daddr, __be16 dport, const union nf_inet_addr *vaddr, __be16 vport, - __u16 protocol, __u32 fwmark, __u32 flags); + __u16 protocol, __u32 fwmark); extern struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp); diff --git a/trunk/include/net/ipv6.h b/trunk/include/net/ipv6.h index e4170a22fc6f..a366a8a1fe23 100644 --- a/trunk/include/net/ipv6.h +++ b/trunk/include/net/ipv6.h @@ -132,15 +132,6 @@ extern struct ctl_path net_ipv6_ctl_path[]; SNMP_INC_STATS##modifier((net)->mib.statname##_statistics, (field));\ }) -/* per device and per net counters are atomic_long_t */ -#define _DEVINC_ATOMIC_ATOMIC(net, statname, idev, field) \ -({ \ - struct inet6_dev *_idev = (idev); \ - if (likely(_idev != NULL)) \ - SNMP_INC_STATS_ATOMIC_LONG((_idev)->stats.statname##dev, (field)); \ - SNMP_INC_STATS_ATOMIC_LONG((net)->mib.statname##_statistics, (field));\ -}) - #define _DEVADD(net, statname, modifier, idev, field, val) \ ({ \ struct inet6_dev *_idev = (idev); \ @@ -177,11 +168,11 @@ extern struct ctl_path net_ipv6_ctl_path[]; _DEVINCATOMIC(net, icmpv6, _BH, idev, field) #define ICMP6MSGOUT_INC_STATS(net, idev, field) \ - _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256) + _DEVINCATOMIC(net, icmpv6msg, , idev, field +256) #define ICMP6MSGOUT_INC_STATS_BH(net, idev, field) \ - _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256) + _DEVINCATOMIC(net, icmpv6msg, _BH, idev, field +256) #define ICMP6MSGIN_INC_STATS_BH(net, idev, field) \ - _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field) + _DEVINCATOMIC(net, icmpv6msg, _BH, idev, field) struct ip6_ra_chain { struct ip6_ra_chain *next; @@ -309,6 +300,11 @@ ipv6_masked_addr_cmp(const struct in6_addr *a1, const struct in6_addr *m, ((a1->s6_addr32[3] ^ a2->s6_addr32[3]) & m->s6_addr32[3])); } +static inline void ipv6_addr_copy(struct in6_addr *a1, const struct in6_addr *a2) +{ + memcpy(a1, a2, sizeof(struct in6_addr)); +} + static inline void ipv6_addr_prefix(struct in6_addr *pfx, const struct in6_addr *addr, int plen) @@ -558,7 +554,7 @@ extern void ipv6_push_frag_opts(struct sk_buff *skb, u8 *proto); extern int ipv6_skip_exthdr(const struct sk_buff *, int start, - u8 *nexthdrp, __be16 *frag_offp); + u8 *nexthdrp); extern int ipv6_ext_hdr(u8 nexthdr); diff --git a/trunk/include/net/iucv/af_iucv.h b/trunk/include/net/iucv/af_iucv.h index 0954ec959159..f2419cf44cef 100644 --- a/trunk/include/net/iucv/af_iucv.h +++ b/trunk/include/net/iucv/af_iucv.h @@ -27,6 +27,7 @@ enum { IUCV_OPEN, IUCV_BOUND, IUCV_LISTEN, + IUCV_SEVERED, IUCV_DISCONN, IUCV_CLOSING, IUCV_CLOSED @@ -145,6 +146,7 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock, poll_table *wait); void iucv_sock_link(struct iucv_sock_list *l, struct sock *s); void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s); +int iucv_sock_wait_cnt(struct sock *sk, unsigned long timeo); void iucv_accept_enqueue(struct sock *parent, struct sock *sk); void iucv_accept_unlink(struct sock *sk); struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock); diff --git a/trunk/include/net/mac80211.h b/trunk/include/net/mac80211.h index 2a7523edd9b5..72eddd1b410b 100644 --- a/trunk/include/net/mac80211.h +++ b/trunk/include/net/mac80211.h @@ -166,7 +166,6 @@ struct ieee80211_low_level_stats { * that it is only ever disabled for station mode. * @BSS_CHANGED_IDLE: Idle changed for this BSS/interface. * @BSS_CHANGED_SSID: SSID changed for this BSS (AP mode) - * @BSS_CHANGED_AP_PROBE_RESP: Probe Response changed for this BSS (AP mode) */ enum ieee80211_bss_change { BSS_CHANGED_ASSOC = 1<<0, @@ -185,7 +184,6 @@ enum ieee80211_bss_change { BSS_CHANGED_QOS = 1<<13, BSS_CHANGED_IDLE = 1<<14, BSS_CHANGED_SSID = 1<<15, - BSS_CHANGED_AP_PROBE_RESP = 1<<16, /* when adding here, make sure to change ieee80211_reconfig */ }; @@ -520,7 +518,7 @@ struct ieee80211_tx_rate { * @flags: transmit info flags, defined above * @band: the band to transmit on (use for checking for races) * @antenna_sel_tx: antenna to use, 0 for automatic diversity - * @ack_frame_id: internal frame ID for TX status, used internally + * @pad: padding, ignore * @control: union for control data * @status: union for status data * @driver_data: array of driver_data pointers @@ -537,7 +535,8 @@ struct ieee80211_tx_info { u8 antenna_sel_tx; - u16 ack_frame_id; + /* 2 byte hole */ + u8 pad[2]; union { struct { @@ -902,10 +901,6 @@ static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif) * @IEEE80211_KEY_FLAG_SW_MGMT: This flag should be set by the driver for a * CCMP key if it requires CCMP encryption of management frames (MFP) to * be done in software. - * @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver - * for a CCMP key if space should be prepared for the IV, but the IV - * itself should not be generated. Do not set together with - * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key. */ enum ieee80211_key_flags { IEEE80211_KEY_FLAG_WMM_STA = 1<<0, @@ -913,7 +908,6 @@ enum ieee80211_key_flags { IEEE80211_KEY_FLAG_GENERATE_MMIC= 1<<2, IEEE80211_KEY_FLAG_PAIRWISE = 1<<3, IEEE80211_KEY_FLAG_SW_MGMT = 1<<4, - IEEE80211_KEY_FLAG_PUT_IV_SPACE = 1<<5, }; /** @@ -1309,16 +1303,6 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw, return &hw->wiphy->bands[c->band]->bitrates[c->control.rates[idx + 1].idx]; } -/** - * ieee80211_free_txskb - free TX skb - * @hw: the hardware - * @skb: the skb - * - * Free a transmit skb. Use this funtion when some failure - * to transmit happened and thus status cannot be reported. - */ -void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb); - /** * DOC: Hardware crypto acceleration * @@ -1760,21 +1744,11 @@ enum ieee80211_frame_release_type { * skb contains the buffer starting from the IEEE 802.11 header. * The low-level driver should send the frame out based on * configuration in the TX control data. This handler should, - * preferably, never fail and stop queues appropriately. - * This must be implemented if @tx_frags is not. - * Must be atomic. - * - * @tx_frags: Called to transmit multiple fragments of a single MSDU. - * This handler must consume all fragments, sending out some of - * them only is useless and it can't ask for some of them to be - * queued again. If the frame is not fragmented the queue has a - * single SKB only. To avoid issues with the networking stack - * when TX status is reported the frames should be removed from - * the skb queue. - * If this is used, the tx_info @vif and @sta pointers will be - * invalid -- you must not use them in that case. - * This must be implemented if @tx isn't. - * Must be atomic. + * preferably, never fail and stop queues appropriately, more + * importantly, however, it must never fail for A-MPDU-queues. + * This function should return NETDEV_TX_OK except in very + * limited cases. + * Must be implemented and atomic. * * @start: Called before the first netdevice attached to the hardware * is enabled. This should turn on the hardware and must turn on @@ -2111,8 +2085,6 @@ enum ieee80211_frame_release_type { */ struct ieee80211_ops { void (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb); - void (*tx_frags)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, struct sk_buff_head *skbs); int (*start)(struct ieee80211_hw *hw); void (*stop)(struct ieee80211_hw *hw); #ifdef CONFIG_PM @@ -2688,19 +2660,6 @@ static inline struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, return ieee80211_beacon_get_tim(hw, vif, NULL, NULL); } -/** - * ieee80211_proberesp_get - retrieve a Probe Response template - * @hw: pointer obtained from ieee80211_alloc_hw(). - * @vif: &struct ieee80211_vif pointer from the add_interface callback. - * - * Creates a Probe Response template which can, for example, be uploaded to - * hardware. The destination address should be set by the caller. - * - * Can only be called in AP mode. - */ -struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw, - struct ieee80211_vif *vif); - /** * ieee80211_pspoll_get - retrieve a PS Poll template * @hw: pointer obtained from ieee80211_alloc_hw(). @@ -3502,12 +3461,9 @@ void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn); * * @IEEE80211_RC_HT_CHANGED: The HT parameters of the operating channel have * changed, rate control algorithm can update its internal state if needed. - * @IEEE80211_RC_SMPS_CHANGED: The SMPS state of the station changed, the rate - * control algorithm needs to adjust accordingly. */ enum rate_control_changed { - IEEE80211_RC_HT_CHANGED = BIT(0), - IEEE80211_RC_SMPS_CHANGED = BIT(1), + IEEE80211_RC_HT_CHANGED = BIT(0) }; /** diff --git a/trunk/include/net/ndisc.h b/trunk/include/net/ndisc.h index e3133c23980e..62beeb97c4b1 100644 --- a/trunk/include/net/ndisc.h +++ b/trunk/include/net/ndisc.h @@ -79,42 +79,6 @@ struct nd_opt_hdr { __u8 nd_opt_len; } __packed; -static inline u32 ndisc_hashfn(const void *pkey, const struct net_device *dev, __u32 *hash_rnd) -{ - const u32 *p32 = pkey; - - return (((p32[0] ^ dev->ifindex) * hash_rnd[0]) + - (p32[1] * hash_rnd[1]) + - (p32[2] * hash_rnd[2]) + - (p32[3] * hash_rnd[3])); -} - -static inline struct neighbour *__ipv6_neigh_lookup(struct neigh_table *tbl, struct net_device *dev, const void *pkey) -{ - struct neigh_hash_table *nht; - const u32 *p32 = pkey; - struct neighbour *n; - u32 hash_val; - - rcu_read_lock_bh(); - nht = rcu_dereference_bh(tbl->nht); - hash_val = ndisc_hashfn(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift); - for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]); - n != NULL; - n = rcu_dereference_bh(n->next)) { - u32 *n32 = (u32 *) n->primary_key; - if (n->dev == dev && - ((n32[0] ^ p32[0]) | (n32[1] ^ p32[1]) | - (n32[2] ^ p32[2]) | (n32[3] ^ p32[3])) == 0) { - if (!atomic_inc_not_zero(&n->refcnt)) - n = NULL; - break; - } - } - rcu_read_unlock_bh(); - - return n; -} extern int ndisc_init(void); @@ -181,4 +145,13 @@ int ndisc_ifinfo_sysctl_strategy(ctl_table *ctl, extern void inet6_ifinfo_notify(int event, struct inet6_dev *idev); +static inline struct neighbour * ndisc_get_neigh(struct net_device *dev, const struct in6_addr *addr) +{ + + if (dev) + return __neigh_lookup_errno(&nd_tbl, addr, dev); + + return ERR_PTR(-ENODEV); +} + #endif diff --git a/trunk/include/net/neighbour.h b/trunk/include/net/neighbour.h index 34c996f46181..2720884287c3 100644 --- a/trunk/include/net/neighbour.h +++ b/trunk/include/net/neighbour.h @@ -59,7 +59,7 @@ struct neigh_parms { int reachable_time; int delay_probe_time; - int queue_len_bytes; + int queue_len; int ucast_probes; int app_probes; int mcast_probes; @@ -99,7 +99,6 @@ struct neighbour { rwlock_t lock; atomic_t refcnt; struct sk_buff_head arp_queue; - unsigned int arp_queue_len_bytes; struct timer_list timer; unsigned long used; atomic_t probes; @@ -139,12 +138,10 @@ struct pneigh_entry { * neighbour table manipulation */ -#define NEIGH_NUM_HASH_RND 4 - struct neigh_hash_table { struct neighbour __rcu **hash_buckets; unsigned int hash_shift; - __u32 hash_rnd[NEIGH_NUM_HASH_RND]; + __u32 hash_rnd; struct rcu_head rcu; }; @@ -156,7 +153,7 @@ struct neigh_table { int key_len; __u32 (*hash)(const void *pkey, const struct net_device *dev, - __u32 *hash_rnd); + __u32 hash_rnd); int (*constructor)(struct neighbour *); int (*pconstructor)(struct pneigh_entry *); void (*pdestructor)(struct pneigh_entry *); @@ -175,18 +172,12 @@ struct neigh_table { atomic_t entries; rwlock_t lock; unsigned long last_rand; + struct kmem_cache *kmem_cachep; struct neigh_statistics __percpu *stats; struct neigh_hash_table __rcu *nht; struct pneigh_entry **phash_buckets; }; -#define NEIGH_PRIV_ALIGN sizeof(long long) - -static inline void *neighbour_priv(const struct neighbour *n) -{ - return (char *)n + ALIGN(sizeof(*n) + n->tbl->key_len, NEIGH_PRIV_ALIGN); -} - /* flags for neigh_update() */ #define NEIGH_UPDATE_F_OVERRIDE 0x00000001 #define NEIGH_UPDATE_F_WEAK_OVERRIDE 0x00000002 diff --git a/trunk/include/net/net_namespace.h b/trunk/include/net/net_namespace.h index ee547c149810..3bb6fa0eace0 100644 --- a/trunk/include/net/net_namespace.h +++ b/trunk/include/net/net_namespace.h @@ -77,7 +77,7 @@ struct net { struct netns_packet packet; struct netns_unix unx; struct netns_ipv4 ipv4; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) struct netns_ipv6 ipv6; #endif #if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE) diff --git a/trunk/include/net/netfilter/nf_conntrack_acct.h b/trunk/include/net/netfilter/nf_conntrack_acct.h index 463ae8e16696..4e9c63a20db2 100644 --- a/trunk/include/net/netfilter/nf_conntrack_acct.h +++ b/trunk/include/net/netfilter/nf_conntrack_acct.h @@ -15,8 +15,8 @@ #include struct nf_conn_counter { - atomic64_t packets; - atomic64_t bytes; + u_int64_t packets; + u_int64_t bytes; }; static inline diff --git a/trunk/include/net/netfilter/nf_conntrack_expect.h b/trunk/include/net/netfilter/nf_conntrack_expect.h index 4619caadd9d1..0f8a8c587532 100644 --- a/trunk/include/net/netfilter/nf_conntrack_expect.h +++ b/trunk/include/net/netfilter/nf_conntrack_expect.h @@ -91,6 +91,7 @@ static inline void nf_ct_unlink_expect(struct nf_conntrack_expect *exp) void nf_ct_remove_expectations(struct nf_conn *ct); void nf_ct_unexpect_related(struct nf_conntrack_expect *exp); +void nf_ct_remove_userspace_expectations(void); /* Allocate space for an expectation: this is mandatory before calling nf_ct_expect_related. You will have to call put afterwards. */ diff --git a/trunk/include/net/netfilter/nf_conntrack_tuple.h b/trunk/include/net/netfilter/nf_conntrack_tuple.h index aea3f8221be0..2f8fb77bfdd1 100644 --- a/trunk/include/net/netfilter/nf_conntrack_tuple.h +++ b/trunk/include/net/netfilter/nf_conntrack_tuple.h @@ -12,6 +12,7 @@ #include #include +#include #include /* A `tuple' is a structure containing the information to uniquely diff --git a/trunk/include/net/netfilter/nf_nat.h b/trunk/include/net/netfilter/nf_nat.h index b4de990b55f1..b8872df7285f 100644 --- a/trunk/include/net/netfilter/nf_nat.h +++ b/trunk/include/net/netfilter/nf_nat.h @@ -1,12 +1,14 @@ #ifndef _NF_NAT_H #define _NF_NAT_H #include -#include +#include #include +#define NF_NAT_MAPPING_TYPE_MAX_NAMELEN 16 + enum nf_nat_manip_type { - NF_NAT_MANIP_SRC, - NF_NAT_MANIP_DST + IP_NAT_MANIP_SRC, + IP_NAT_MANIP_DST }; /* SRC manip occurs POST_ROUTING or LOCAL_IN */ @@ -50,7 +52,7 @@ struct nf_conn_nat { /* Set up the info structure to map into this range. */ extern unsigned int nf_nat_setup_info(struct nf_conn *ct, - const struct nf_nat_ipv4_range *range, + const struct nf_nat_range *range, enum nf_nat_manip_type maniptype); /* Is this tuple already taken? (not by us)*/ diff --git a/trunk/include/net/netfilter/nf_nat_core.h b/trunk/include/net/netfilter/nf_nat_core.h index b13d8d18d595..3dc7b98effeb 100644 --- a/trunk/include/net/netfilter/nf_nat_core.h +++ b/trunk/include/net/netfilter/nf_nat_core.h @@ -20,7 +20,7 @@ extern int nf_nat_icmp_reply_translation(struct nf_conn *ct, static inline int nf_nat_initialized(struct nf_conn *ct, enum nf_nat_manip_type manip) { - if (manip == NF_NAT_MANIP_SRC) + if (manip == IP_NAT_MANIP_SRC) return ct->status & IPS_SRC_NAT_DONE; else return ct->status & IPS_DST_NAT_DONE; diff --git a/trunk/include/net/netfilter/nf_nat_protocol.h b/trunk/include/net/netfilter/nf_nat_protocol.h index 7b0b51165f70..93cc90d28e66 100644 --- a/trunk/include/net/netfilter/nf_nat_protocol.h +++ b/trunk/include/net/netfilter/nf_nat_protocol.h @@ -4,12 +4,14 @@ #include #include -struct nf_nat_ipv4_range; +struct nf_nat_range; struct nf_nat_protocol { /* Protocol number. */ unsigned int protonum; + struct module *me; + /* Translate a packet to the target according to manip type. Return true if succeeded. */ bool (*manip_pkt)(struct sk_buff *skb, @@ -28,12 +30,15 @@ struct nf_nat_protocol { possible. Per-protocol part of tuple is initialized to the incoming packet. */ void (*unique_tuple)(struct nf_conntrack_tuple *tuple, - const struct nf_nat_ipv4_range *range, + const struct nf_nat_range *range, enum nf_nat_manip_type maniptype, const struct nf_conn *ct); + int (*range_to_nlattr)(struct sk_buff *skb, + const struct nf_nat_range *range); + int (*nlattr_to_range)(struct nlattr *tb[], - struct nf_nat_ipv4_range *range); + struct nf_nat_range *range); }; /* Protocol registration. */ @@ -56,12 +61,14 @@ extern bool nf_nat_proto_in_range(const struct nf_conntrack_tuple *tuple, const union nf_conntrack_man_proto *max); extern void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple, - const struct nf_nat_ipv4_range *range, + const struct nf_nat_range *range, enum nf_nat_manip_type maniptype, const struct nf_conn *ct, u_int16_t *rover); +extern int nf_nat_proto_range_to_nlattr(struct sk_buff *skb, + const struct nf_nat_range *range); extern int nf_nat_proto_nlattr_to_range(struct nlattr *tb[], - struct nf_nat_ipv4_range *range); + struct nf_nat_range *range); #endif /*_NF_NAT_PROTO_H*/ diff --git a/trunk/include/net/netfilter/nf_tproxy_core.h b/trunk/include/net/netfilter/nf_tproxy_core.h index 75ca9291cf2c..e505358d8999 100644 --- a/trunk/include/net/netfilter/nf_tproxy_core.h +++ b/trunk/include/net/netfilter/nf_tproxy_core.h @@ -131,7 +131,7 @@ nf_tproxy_get_sock_v4(struct net *net, const u8 protocol, return sk; } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) static inline struct sock * nf_tproxy_get_sock_v6(struct net *net, const u8 protocol, const struct in6_addr *saddr, const struct in6_addr *daddr, diff --git a/trunk/include/net/netns/ipv4.h b/trunk/include/net/netns/ipv4.h index bbd023a1c9b9..d786b4fc02a4 100644 --- a/trunk/include/net/netns/ipv4.h +++ b/trunk/include/net/netns/ipv4.h @@ -55,7 +55,6 @@ struct netns_ipv4 { int current_rt_cache_rebuild_count; unsigned int sysctl_ping_group_range[2]; - long sysctl_tcp_mem[3]; atomic_t rt_genid; atomic_t dev_addr_genid; diff --git a/trunk/include/net/netns/mib.h b/trunk/include/net/netns/mib.h index d542a4b28cca..0b44112e2366 100644 --- a/trunk/include/net/netns/mib.h +++ b/trunk/include/net/netns/mib.h @@ -10,15 +10,15 @@ struct netns_mib { DEFINE_SNMP_STAT(struct udp_mib, udp_statistics); DEFINE_SNMP_STAT(struct udp_mib, udplite_statistics); DEFINE_SNMP_STAT(struct icmp_mib, icmp_statistics); - DEFINE_SNMP_STAT_ATOMIC(struct icmpmsg_mib, icmpmsg_statistics); + DEFINE_SNMP_STAT(struct icmpmsg_mib, icmpmsg_statistics); -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) struct proc_dir_entry *proc_net_devsnmp6; DEFINE_SNMP_STAT(struct udp_mib, udp_stats_in6); DEFINE_SNMP_STAT(struct udp_mib, udplite_stats_in6); DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics); DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics); - DEFINE_SNMP_STAT_ATOMIC(struct icmpv6msg_mib, icmpv6msg_statistics); + DEFINE_SNMP_STAT(struct icmpv6msg_mib, icmpv6msg_statistics); #endif #ifdef CONFIG_XFRM_STATISTICS DEFINE_SNMP_STAT(struct linux_xfrm_mib, xfrm_statistics); diff --git a/trunk/include/net/netns/xfrm.h b/trunk/include/net/netns/xfrm.h index 5299e69a32af..748f91f87cd5 100644 --- a/trunk/include/net/netns/xfrm.h +++ b/trunk/include/net/netns/xfrm.h @@ -56,7 +56,7 @@ struct netns_xfrm { #endif struct dst_ops xfrm4_dst_ops; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) struct dst_ops xfrm6_dst_ops; #endif }; diff --git a/trunk/include/net/netprio_cgroup.h b/trunk/include/net/netprio_cgroup.h deleted file mode 100644 index e503b87c4c1b..000000000000 --- a/trunk/include/net/netprio_cgroup.h +++ /dev/null @@ -1,57 +0,0 @@ -/* - * netprio_cgroup.h Control Group Priority set - * - * - * Authors: Neil Horman - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - */ - -#ifndef _NETPRIO_CGROUP_H -#define _NETPRIO_CGROUP_H -#include -#include -#include -#include - - -struct netprio_map { - struct rcu_head rcu; - u32 priomap_len; - u32 priomap[]; -}; - -#ifdef CONFIG_CGROUPS - -struct cgroup_netprio_state { - struct cgroup_subsys_state css; - u32 prioidx; -}; - -#ifndef CONFIG_NETPRIO_CGROUP -extern int net_prio_subsys_id; -#endif - -extern void sock_update_netprioidx(struct sock *sk); - -static inline struct cgroup_netprio_state - *task_netprio_state(struct task_struct *p) -{ -#if IS_ENABLED(CONFIG_NETPRIO_CGROUP) - return container_of(task_subsys_state(p, net_prio_subsys_id), - struct cgroup_netprio_state, css); -#else - return NULL; -#endif -} - -#else - -#define sock_update_netprioidx(sk) -#endif - -#endif /* _NET_CLS_CGROUP_H */ diff --git a/trunk/include/net/nfc/nci.h b/trunk/include/net/nfc/nci.h index 2be95e2626c0..39b85bc0804f 100644 --- a/trunk/include/net/nfc/nci.h +++ b/trunk/include/net/nfc/nci.h @@ -34,30 +34,32 @@ #define NCI_MAX_NUM_CONN 10 /* NCI Status Codes */ -#define NCI_STATUS_OK 0x00 -#define NCI_STATUS_REJECTED 0x01 -#define NCI_STATUS_RF_FRAME_CORRUPTED 0x02 -#define NCI_STATUS_FAILED 0x03 -#define NCI_STATUS_NOT_INITIALIZED 0x04 -#define NCI_STATUS_SYNTAX_ERROR 0x05 -#define NCI_STATUS_SEMANTIC_ERROR 0x06 -#define NCI_STATUS_UNKNOWN_GID 0x07 -#define NCI_STATUS_UNKNOWN_OID 0x08 -#define NCI_STATUS_INVALID_PARAM 0x09 -#define NCI_STATUS_MESSAGE_SIZE_EXCEEDED 0x0a +#define NCI_STATUS_OK 0x00 +#define NCI_STATUS_REJECTED 0x01 +#define NCI_STATUS_MESSAGE_CORRUPTED 0x02 +#define NCI_STATUS_BUFFER_FULL 0x03 +#define NCI_STATUS_FAILED 0x04 +#define NCI_STATUS_NOT_INITIALIZED 0x05 +#define NCI_STATUS_SYNTAX_ERROR 0x06 +#define NCI_STATUS_SEMANTIC_ERROR 0x07 +#define NCI_STATUS_UNKNOWN_GID 0x08 +#define NCI_STATUS_UNKNOWN_OID 0x09 +#define NCI_STATUS_INVALID_PARAM 0x0a +#define NCI_STATUS_MESSAGE_SIZE_EXCEEDED 0x0b /* Discovery Specific Status Codes */ -#define NCI_STATUS_DISCOVERY_ALREADY_STARTED 0xa0 -#define NCI_STATUS_DISCOVERY_TARGET_ACTIVATION_FAILED 0xa1 -#define NCI_STATUS_DISCOVERY_TEAR_DOWN 0xa2 +#define NCI_STATUS_DISCOVERY_ALREADY_STARTED 0xa0 +#define NCI_STATUS_DISCOVERY_TARGET_ACTIVATION_FAILED 0xa1 /* RF Interface Specific Status Codes */ -#define NCI_STATUS_RF_TRANSMISSION_ERROR 0xb0 -#define NCI_STATUS_RF_PROTOCOL_ERROR 0xb1 -#define NCI_STATUS_RF_TIMEOUT_ERROR 0xb2 +#define NCI_STATUS_RF_TRANSMISSION_ERROR 0xb0 +#define NCI_STATUS_RF_PROTOCOL_ERROR 0xb1 +#define NCI_STATUS_RF_TIMEOUT_ERROR 0xb2 +#define NCI_STATUS_RF_LINK_LOSS_ERROR 0xb3 /* NFCEE Interface Specific Status Codes */ -#define NCI_STATUS_NFCEE_INTERFACE_ACTIVATION_FAILED 0xc0 -#define NCI_STATUS_NFCEE_TRANSMISSION_ERROR 0xc1 -#define NCI_STATUS_NFCEE_PROTOCOL_ERROR 0xc2 -#define NCI_STATUS_NFCEE_TIMEOUT_ERROR 0xc3 +#define NCI_STATUS_MAX_ACTIVE_NFCEE_INTERFACES_REACHED 0xc0 +#define NCI_STATUS_NFCEE_INTERFACE_ACTIVATION_FAILED 0xc1 +#define NCI_STATUS_NFCEE_TRANSMISSION_ERROR 0xc2 +#define NCI_STATUS_NFCEE_PROTOCOL_ERROR 0xc3 +#define NCI_STATUS_NFCEE_TIMEOUT_ERROR 0xc4 /* NCI RF Technology and Mode */ #define NCI_NFC_A_PASSIVE_POLL_MODE 0x00 @@ -65,28 +67,11 @@ #define NCI_NFC_F_PASSIVE_POLL_MODE 0x02 #define NCI_NFC_A_ACTIVE_POLL_MODE 0x03 #define NCI_NFC_F_ACTIVE_POLL_MODE 0x05 -#define NCI_NFC_15693_PASSIVE_POLL_MODE 0x06 #define NCI_NFC_A_PASSIVE_LISTEN_MODE 0x80 #define NCI_NFC_B_PASSIVE_LISTEN_MODE 0x81 #define NCI_NFC_F_PASSIVE_LISTEN_MODE 0x82 #define NCI_NFC_A_ACTIVE_LISTEN_MODE 0x83 #define NCI_NFC_F_ACTIVE_LISTEN_MODE 0x85 -#define NCI_NFC_15693_PASSIVE_LISTEN_MODE 0x86 - -/* NCI RF Technologies */ -#define NCI_NFC_RF_TECHNOLOGY_A 0x00 -#define NCI_NFC_RF_TECHNOLOGY_B 0x01 -#define NCI_NFC_RF_TECHNOLOGY_F 0x02 -#define NCI_NFC_RF_TECHNOLOGY_15693 0x03 - -/* NCI Bit Rates */ -#define NCI_NFC_BIT_RATE_106 0x00 -#define NCI_NFC_BIT_RATE_212 0x01 -#define NCI_NFC_BIT_RATE_424 0x02 -#define NCI_NFC_BIT_RATE_848 0x03 -#define NCI_NFC_BIT_RATE_1695 0x04 -#define NCI_NFC_BIT_RATE_3390 0x05 -#define NCI_NFC_BIT_RATE_6780 0x06 /* NCI RF Protocols */ #define NCI_RF_PROTOCOL_UNKNOWN 0x00 @@ -97,30 +82,37 @@ #define NCI_RF_PROTOCOL_NFC_DEP 0x05 /* NCI RF Interfaces */ -#define NCI_RF_INTERFACE_NFCEE_DIRECT 0x00 -#define NCI_RF_INTERFACE_FRAME 0x01 -#define NCI_RF_INTERFACE_ISO_DEP 0x02 -#define NCI_RF_INTERFACE_NFC_DEP 0x03 - -/* NCI Reset types */ -#define NCI_RESET_TYPE_KEEP_CONFIG 0x00 -#define NCI_RESET_TYPE_RESET_CONFIG 0x01 - -/* NCI Static RF connection ID */ -#define NCI_STATIC_RF_CONN_ID 0x00 - -/* NCI Data Flow Control */ -#define NCI_DATA_FLOW_CONTROL_NOT_USED 0xff +#define NCI_RF_INTERFACE_RFU 0x00 +#define NCI_RF_INTERFACE_FRAME 0x01 +#define NCI_RF_INTERFACE_ISO_DEP 0x02 +#define NCI_RF_INTERFACE_NFC_DEP 0x03 /* NCI RF_DISCOVER_MAP_CMD modes */ #define NCI_DISC_MAP_MODE_POLL 0x01 #define NCI_DISC_MAP_MODE_LISTEN 0x02 +#define NCI_DISC_MAP_MODE_BOTH 0x03 + +/* NCI Discovery Types */ +#define NCI_DISCOVERY_TYPE_POLL_A_PASSIVE 0x00 +#define NCI_DISCOVERY_TYPE_POLL_B_PASSIVE 0x01 +#define NCI_DISCOVERY_TYPE_POLL_F_PASSIVE 0x02 +#define NCI_DISCOVERY_TYPE_POLL_A_ACTIVE 0x03 +#define NCI_DISCOVERY_TYPE_POLL_F_ACTIVE 0x05 +#define NCI_DISCOVERY_TYPE_WAKEUP_A_PASSIVE 0x06 +#define NCI_DISCOVERY_TYPE_WAKEUP_B_PASSIVE 0x07 +#define NCI_DISCOVERY_TYPE_WAKEUP_A_ACTIVE 0x09 +#define NCI_DISCOVERY_TYPE_LISTEN_A_PASSIVE 0x80 +#define NCI_DISCOVERY_TYPE_LISTEN_B_PASSIVE 0x81 +#define NCI_DISCOVERY_TYPE_LISTEN_F_PASSIVE 0x82 +#define NCI_DISCOVERY_TYPE_LISTEN_A_ACTIVE 0x83 +#define NCI_DISCOVERY_TYPE_LISTEN_F_ACTIVE 0x85 /* NCI Deactivation Type */ -#define NCI_DEACTIVATE_TYPE_IDLE_MODE 0x00 -#define NCI_DEACTIVATE_TYPE_SLEEP_MODE 0x01 -#define NCI_DEACTIVATE_TYPE_SLEEP_AF_MODE 0x02 -#define NCI_DEACTIVATE_TYPE_DISCOVERY 0x03 +#define NCI_DEACTIVATE_TYPE_IDLE_MODE 0x00 +#define NCI_DEACTIVATE_TYPE_SLEEP_MODE 0x01 +#define NCI_DEACTIVATE_TYPE_SLEEP_AF_MODE 0x02 +#define NCI_DEACTIVATE_TYPE_RF_LINK_LOSS 0x03 +#define NCI_DEACTIVATE_TYPE_DISCOVERY_ERROR 0x04 /* Message Type (MT) */ #define NCI_MT_DATA_PKT 0x00 @@ -152,10 +144,10 @@ #define nci_conn_id(hdr) (__u8)(((hdr)[0])&0x0f) /* GID values */ -#define NCI_GID_CORE 0x0 -#define NCI_GID_RF_MGMT 0x1 -#define NCI_GID_NFCEE_MGMT 0x2 -#define NCI_GID_PROPRIETARY 0xf +#define NCI_GID_CORE 0x0 +#define NCI_GID_RF_MGMT 0x1 +#define NCI_GID_NFCEE_MGMT 0x2 +#define NCI_GID_PROPRIETARY 0xf /* ---- NCI Packet structures ---- */ #define NCI_CTRL_HDR_SIZE 3 @@ -177,17 +169,24 @@ struct nci_data_hdr { /* ----- NCI Commands ---- */ /* ------------------------ */ #define NCI_OP_CORE_RESET_CMD nci_opcode_pack(NCI_GID_CORE, 0x00) -struct nci_core_reset_cmd { - __u8 reset_type; -} __packed; #define NCI_OP_CORE_INIT_CMD nci_opcode_pack(NCI_GID_CORE, 0x01) +#define NCI_OP_CORE_SET_CONFIG_CMD nci_opcode_pack(NCI_GID_CORE, 0x02) + +#define NCI_OP_CORE_CONN_CREATE_CMD nci_opcode_pack(NCI_GID_CORE, 0x04) +struct nci_core_conn_create_cmd { + __u8 target_handle; + __u8 num_target_specific_params; +} __packed; + +#define NCI_OP_CORE_CONN_CLOSE_CMD nci_opcode_pack(NCI_GID_CORE, 0x06) + #define NCI_OP_RF_DISCOVER_MAP_CMD nci_opcode_pack(NCI_GID_RF_MGMT, 0x00) struct disc_map_config { __u8 rf_protocol; __u8 mode; - __u8 rf_interface; + __u8 rf_interface_type; } __packed; struct nci_rf_disc_map_cmd { @@ -198,7 +197,7 @@ struct nci_rf_disc_map_cmd { #define NCI_OP_RF_DISCOVER_CMD nci_opcode_pack(NCI_GID_RF_MGMT, 0x03) struct disc_config { - __u8 rf_tech_and_mode; + __u8 type; __u8 frequency; } __packed; @@ -219,7 +218,6 @@ struct nci_rf_deactivate_cmd { struct nci_core_reset_rsp { __u8 status; __u8 nci_ver; - __u8 config_status; } __packed; #define NCI_OP_CORE_INIT_RSP nci_opcode_pack(NCI_GID_CORE, 0x01) @@ -234,12 +232,24 @@ struct nci_core_init_rsp_1 { struct nci_core_init_rsp_2 { __u8 max_logical_connections; __le16 max_routing_table_size; - __u8 max_ctrl_pkt_payload_len; - __le16 max_size_for_large_params; - __u8 manufact_id; - __le32 manufact_specific_info; + __u8 max_control_packet_payload_length; + __le16 rf_sending_buffer_size; + __le16 rf_receiving_buffer_size; + __le16 manufacturer_id; } __packed; +#define NCI_OP_CORE_SET_CONFIG_RSP nci_opcode_pack(NCI_GID_CORE, 0x02) + +#define NCI_OP_CORE_CONN_CREATE_RSP nci_opcode_pack(NCI_GID_CORE, 0x04) +struct nci_core_conn_create_rsp { + __u8 status; + __u8 max_pkt_payload_size; + __u8 initial_num_credits; + __u8 conn_id; +} __packed; + +#define NCI_OP_CORE_CONN_CLOSE_RSP nci_opcode_pack(NCI_GID_CORE, 0x06) + #define NCI_OP_RF_DISCOVER_MAP_RSP nci_opcode_pack(NCI_GID_RF_MGMT, 0x00) #define NCI_OP_RF_DISCOVER_RSP nci_opcode_pack(NCI_GID_RF_MGMT, 0x03) @@ -249,7 +259,7 @@ struct nci_core_init_rsp_2 { /* --------------------------- */ /* ---- NCI Notifications ---- */ /* --------------------------- */ -#define NCI_OP_CORE_CONN_CREDITS_NTF nci_opcode_pack(NCI_GID_CORE, 0x06) +#define NCI_OP_CORE_CONN_CREDITS_NTF nci_opcode_pack(NCI_GID_CORE, 0x07) struct conn_credit_entry { __u8 conn_id; __u8 credits; @@ -260,13 +270,12 @@ struct nci_core_conn_credit_ntf { struct conn_credit_entry conn_entries[NCI_MAX_NUM_CONN]; } __packed; -#define NCI_OP_CORE_INTF_ERROR_NTF nci_opcode_pack(NCI_GID_CORE, 0x08) -struct nci_core_intf_error_ntf { - __u8 status; - __u8 conn_id; +#define NCI_OP_RF_FIELD_INFO_NTF nci_opcode_pack(NCI_GID_CORE, 0x08) +struct nci_rf_field_info_ntf { + __u8 rf_field_status; } __packed; -#define NCI_OP_RF_INTF_ACTIVATED_NTF nci_opcode_pack(NCI_GID_RF_MGMT, 0x05) +#define NCI_OP_RF_ACTIVATE_NTF nci_opcode_pack(NCI_GID_RF_MGMT, 0x05) struct rf_tech_specific_params_nfca_poll { __u16 sens_res; __u8 nfcid1_len; /* 0, 4, 7, or 10 Bytes */ @@ -280,22 +289,17 @@ struct activation_params_nfca_poll_iso_dep { __u8 rats_res[20]; }; -struct nci_rf_intf_activated_ntf { - __u8 rf_discovery_id; - __u8 rf_interface; +struct nci_rf_activate_ntf { + __u8 target_handle; __u8 rf_protocol; - __u8 activation_rf_tech_and_mode; - __u8 max_data_pkt_payload_size; - __u8 initial_num_credits; + __u8 rf_tech_and_mode; __u8 rf_tech_specific_params_len; union { struct rf_tech_specific_params_nfca_poll nfca_poll; } rf_tech_specific_params; - __u8 data_exch_rf_tech_and_mode; - __u8 data_exch_tx_bit_rate; - __u8 data_exch_rx_bit_rate; + __u8 rf_interface_type; __u8 activation_params_len; union { @@ -305,9 +309,5 @@ struct nci_rf_intf_activated_ntf { } __packed; #define NCI_OP_RF_DEACTIVATE_NTF nci_opcode_pack(NCI_GID_RF_MGMT, 0x06) -struct nci_rf_deactivate_ntf { - __u8 type; - __u8 reason; -} __packed; #endif /* __NCI_H */ diff --git a/trunk/include/net/nfc/nci_core.h b/trunk/include/net/nfc/nci_core.h index bccd89e9d4c2..b8b4bbd7e0fc 100644 --- a/trunk/include/net/nfc/nci_core.h +++ b/trunk/include/net/nfc/nci_core.h @@ -109,14 +109,15 @@ struct nci_dev { [NCI_MAX_SUPPORTED_RF_INTERFACES]; __u8 max_logical_connections; __u16 max_routing_table_size; - __u8 max_ctrl_pkt_payload_len; - __u16 max_size_for_large_params; - __u8 manufact_id; - __u32 manufact_specific_info; + __u8 max_control_packet_payload_length; + __u16 rf_sending_buffer_size; + __u16 rf_receiving_buffer_size; + __u16 manufacturer_id; - /* received during NCI_OP_RF_INTF_ACTIVATED_NTF */ - __u8 max_data_pkt_payload_size; + /* received during NCI_OP_CORE_CONN_CREATE_RSP for static conn 0 */ + __u8 max_pkt_payload_size; __u8 initial_num_credits; + __u8 conn_id; /* stored during nci_data_exchange */ data_exchange_cb_t data_exchange_cb; diff --git a/trunk/include/net/nfc/nfc.h b/trunk/include/net/nfc/nfc.h index 8696b773a695..6a7f602aa841 100644 --- a/trunk/include/net/nfc/nfc.h +++ b/trunk/include/net/nfc/nfc.h @@ -52,9 +52,6 @@ struct nfc_ops { int (*dev_down)(struct nfc_dev *dev); int (*start_poll)(struct nfc_dev *dev, u32 protocols); void (*stop_poll)(struct nfc_dev *dev); - int (*dep_link_up)(struct nfc_dev *dev, int target_idx, - u8 comm_mode, u8 rf_mode); - int (*dep_link_down)(struct nfc_dev *dev); int (*activate_target)(struct nfc_dev *dev, u32 target_idx, u32 protocol); void (*deactivate_target)(struct nfc_dev *dev, u32 target_idx); @@ -63,17 +60,11 @@ struct nfc_ops { void *cb_context); }; -#define NFC_TARGET_IDX_ANY -1 -#define NFC_MAX_GT_LEN 48 -#define NFC_MAX_NFCID1_LEN 10 - struct nfc_target { u32 idx; u32 supported_protocols; u16 sens_res; u8 sel_res; - u8 nfcid1_len; - u8 nfcid1[NFC_MAX_NFCID1_LEN]; }; struct nfc_genl_data { @@ -92,8 +83,6 @@ struct nfc_dev { bool dev_up; bool polling; bool remote_activated; - bool dep_link_up; - u32 dep_rf_mode; struct nfc_genl_data genl_data; u32 supported_protocols; @@ -168,20 +157,9 @@ static inline const char *nfc_device_name(struct nfc_dev *dev) return dev_name(&dev->dev); } -struct sk_buff *nfc_alloc_send_skb(struct nfc_dev *dev, struct sock *sk, - unsigned int flags, unsigned int size, - unsigned int *err); -struct sk_buff *nfc_alloc_recv_skb(unsigned int size, gfp_t gfp); - -int nfc_set_remote_general_bytes(struct nfc_dev *dev, - u8 *gt, u8 gt_len); - -u8 *nfc_get_local_general_bytes(struct nfc_dev *dev, u8 *gt_len); +struct sk_buff *nfc_alloc_skb(unsigned int size, gfp_t gfp); int nfc_targets_found(struct nfc_dev *dev, struct nfc_target *targets, int ntargets); -int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx, - u8 comm_mode, u8 rf_mode); - #endif /* __NET_NFC_H */ diff --git a/trunk/include/net/protocol.h b/trunk/include/net/protocol.h index 875f4895b033..6f7eb800974a 100644 --- a/trunk/include/net/protocol.h +++ b/trunk/include/net/protocol.h @@ -25,7 +25,7 @@ #define _PROTOCOL_H #include -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) #include #endif @@ -38,7 +38,7 @@ struct net_protocol { void (*err_handler)(struct sk_buff *skb, u32 info); int (*gso_send_check)(struct sk_buff *skb); struct sk_buff *(*gso_segment)(struct sk_buff *skb, - netdev_features_t features); + u32 features); struct sk_buff **(*gro_receive)(struct sk_buff **head, struct sk_buff *skb); int (*gro_complete)(struct sk_buff *skb); @@ -46,7 +46,7 @@ struct net_protocol { netns_ok:1; }; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) struct inet6_protocol { int (*handler)(struct sk_buff *skb); @@ -57,7 +57,7 @@ struct inet6_protocol { int (*gso_send_check)(struct sk_buff *skb); struct sk_buff *(*gso_segment)(struct sk_buff *skb, - netdev_features_t features); + u32 features); struct sk_buff **(*gro_receive)(struct sk_buff **head, struct sk_buff *skb); int (*gro_complete)(struct sk_buff *skb); @@ -91,7 +91,7 @@ struct inet_protosw { extern const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS]; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) extern const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS]; #endif @@ -100,7 +100,7 @@ extern int inet_del_protocol(const struct net_protocol *prot, unsigned char num) extern void inet_register_protosw(struct inet_protosw *p); extern void inet_unregister_protosw(struct inet_protosw *p); -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) extern int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char num); extern int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char num); extern int inet6_register_protosw(struct inet_protosw *p); diff --git a/trunk/include/net/red.h b/trunk/include/net/red.h index baab385a4736..b72a3b833936 100644 --- a/trunk/include/net/red.h +++ b/trunk/include/net/red.h @@ -5,7 +5,6 @@ #include #include #include -#include /* Random Early Detection (RED) algorithm. ======================================= @@ -88,29 +87,6 @@ etc. */ -/* - * Adaptative RED : An Algorithm for Increasing the Robustness of RED's AQM - * (Sally FLoyd, Ramakrishna Gummadi, and Scott Shenker) August 2001 - * - * Every 500 ms: - * if (avg > target and max_p <= 0.5) - * increase max_p : max_p += alpha; - * else if (avg < target and max_p >= 0.01) - * decrease max_p : max_p *= beta; - * - * target :[qth_min + 0.4*(qth_min - qth_max), - * qth_min + 0.6*(qth_min - qth_max)]. - * alpha : min(0.01, max_p / 4) - * beta : 0.9 - * max_P is a Q0.32 fixed point number (with 32 bits mantissa) - * max_P between 0.01 and 0.5 (1% - 50%) [ Its no longer a negative power of two ] - */ -#define RED_ONE_PERCENT ((u32)DIV_ROUND_CLOSEST(1ULL<<32, 100)) - -#define MAX_P_MIN (1 * RED_ONE_PERCENT) -#define MAX_P_MAX (50 * RED_ONE_PERCENT) -#define MAX_P_ALPHA(val) min(MAX_P_MIN, val / 4) - #define RED_STAB_SIZE 256 #define RED_STAB_MASK (RED_STAB_SIZE - 1) @@ -125,109 +101,76 @@ struct red_stats { struct red_parms { /* Parameters */ - u32 qth_min; /* Min avg length threshold: Wlog scaled */ - u32 qth_max; /* Max avg length threshold: Wlog scaled */ + u32 qth_min; /* Min avg length threshold: A scaled */ + u32 qth_max; /* Max avg length threshold: A scaled */ u32 Scell_max; - u32 max_P; /* probability, [0 .. 1.0] 32 scaled */ - u32 max_P_reciprocal; /* reciprocal_value(max_P / qth_delta) */ - u32 qth_delta; /* max_th - min_th */ - u32 target_min; /* min_th + 0.4*(max_th - min_th) */ - u32 target_max; /* min_th + 0.6*(max_th - min_th) */ + u32 Rmask; /* Cached random mask, see red_rmask */ u8 Scell_log; u8 Wlog; /* log(W) */ u8 Plog; /* random number bits */ u8 Stab[RED_STAB_SIZE]; -}; -struct red_vars { /* Variables */ int qcount; /* Number of packets since last random number generation */ u32 qR; /* Cached random number */ - unsigned long qavg; /* Average queue length: Wlog scaled */ + unsigned long qavg; /* Average queue length: A scaled */ ktime_t qidlestart; /* Start of current idle period */ }; -static inline u32 red_maxp(u8 Plog) +static inline u32 red_rmask(u8 Plog) { - return Plog < 32 ? (~0U >> Plog) : ~0U; + return Plog < 32 ? ((1 << Plog) - 1) : ~0UL; } -static inline void red_set_vars(struct red_vars *v) +static inline void red_set_parms(struct red_parms *p, + u32 qth_min, u32 qth_max, u8 Wlog, u8 Plog, + u8 Scell_log, u8 *stab) { /* Reset average queue length, the value is strictly bound * to the parameters below, reseting hurts a bit but leaving * it might result in an unreasonable qavg for a while. --TGR */ - v->qavg = 0; - - v->qcount = -1; -} - -static inline void red_set_parms(struct red_parms *p, - u32 qth_min, u32 qth_max, u8 Wlog, u8 Plog, - u8 Scell_log, u8 *stab, u32 max_P) -{ - int delta = qth_max - qth_min; - u32 max_p_delta; + p->qavg = 0; + p->qcount = -1; p->qth_min = qth_min << Wlog; p->qth_max = qth_max << Wlog; p->Wlog = Wlog; p->Plog = Plog; - if (delta < 0) - delta = 1; - p->qth_delta = delta; - if (!max_P) { - max_P = red_maxp(Plog); - max_P *= delta; /* max_P = (qth_max - qth_min)/2^Plog */ - } - p->max_P = max_P; - max_p_delta = max_P / delta; - max_p_delta = max(max_p_delta, 1U); - p->max_P_reciprocal = reciprocal_value(max_p_delta); - - /* RED Adaptative target : - * [min_th + 0.4*(min_th - max_th), - * min_th + 0.6*(min_th - max_th)]. - */ - delta /= 5; - p->target_min = qth_min + 2*delta; - p->target_max = qth_min + 3*delta; - + p->Rmask = red_rmask(Plog); p->Scell_log = Scell_log; p->Scell_max = (255 << Scell_log); memcpy(p->Stab, stab, sizeof(p->Stab)); } -static inline int red_is_idling(const struct red_vars *v) +static inline int red_is_idling(struct red_parms *p) { - return v->qidlestart.tv64 != 0; + return p->qidlestart.tv64 != 0; } -static inline void red_start_of_idle_period(struct red_vars *v) +static inline void red_start_of_idle_period(struct red_parms *p) { - v->qidlestart = ktime_get(); + p->qidlestart = ktime_get(); } -static inline void red_end_of_idle_period(struct red_vars *v) +static inline void red_end_of_idle_period(struct red_parms *p) { - v->qidlestart.tv64 = 0; + p->qidlestart.tv64 = 0; } -static inline void red_restart(struct red_vars *v) +static inline void red_restart(struct red_parms *p) { - red_end_of_idle_period(v); - v->qavg = 0; - v->qcount = -1; + red_end_of_idle_period(p); + p->qavg = 0; + p->qcount = -1; } -static inline unsigned long red_calc_qavg_from_idle_time(const struct red_parms *p, - const struct red_vars *v) +static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p) { - s64 delta = ktime_us_delta(ktime_get(), v->qidlestart); + s64 delta = ktime_us_delta(ktime_get(), p->qidlestart); long us_idle = min_t(s64, delta, p->Scell_max); int shift; @@ -254,7 +197,7 @@ static inline unsigned long red_calc_qavg_from_idle_time(const struct red_parms shift = p->Stab[(us_idle >> p->Scell_log) & RED_STAB_MASK]; if (shift) - return v->qavg >> shift; + return p->qavg >> shift; else { /* Approximate initial part of exponent with linear function: * @@ -263,17 +206,16 @@ static inline unsigned long red_calc_qavg_from_idle_time(const struct red_parms * Seems, it is the best solution to * problem of too coarse exponent tabulation. */ - us_idle = (v->qavg * (u64)us_idle) >> p->Scell_log; + us_idle = (p->qavg * (u64)us_idle) >> p->Scell_log; - if (us_idle < (v->qavg >> 1)) - return v->qavg - us_idle; + if (us_idle < (p->qavg >> 1)) + return p->qavg - us_idle; else - return v->qavg >> 1; + return p->qavg >> 1; } } -static inline unsigned long red_calc_qavg_no_idle_time(const struct red_parms *p, - const struct red_vars *v, +static inline unsigned long red_calc_qavg_no_idle_time(struct red_parms *p, unsigned int backlog) { /* @@ -285,46 +227,42 @@ static inline unsigned long red_calc_qavg_no_idle_time(const struct red_parms *p * * --ANK (980924) */ - return v->qavg + (backlog - (v->qavg >> p->Wlog)); + return p->qavg + (backlog - (p->qavg >> p->Wlog)); } -static inline unsigned long red_calc_qavg(const struct red_parms *p, - const struct red_vars *v, +static inline unsigned long red_calc_qavg(struct red_parms *p, unsigned int backlog) { - if (!red_is_idling(v)) - return red_calc_qavg_no_idle_time(p, v, backlog); + if (!red_is_idling(p)) + return red_calc_qavg_no_idle_time(p, backlog); else - return red_calc_qavg_from_idle_time(p, v); + return red_calc_qavg_from_idle_time(p); } - -static inline u32 red_random(const struct red_parms *p) +static inline u32 red_random(struct red_parms *p) { - return reciprocal_divide(net_random(), p->max_P_reciprocal); + return net_random() & p->Rmask; } -static inline int red_mark_probability(const struct red_parms *p, - const struct red_vars *v, - unsigned long qavg) +static inline int red_mark_probability(struct red_parms *p, unsigned long qavg) { /* The formula used below causes questions. - OK. qR is random number in the interval - (0..1/max_P)*(qth_max-qth_min) + OK. qR is random number in the interval 0..Rmask i.e. 0..(2^Plog). If we used floating point arithmetics, it would be: (2^Plog)*rnd_num, where rnd_num is less 1. Taking into account, that qavg have fixed - point at Wlog, two lines + point at Wlog, and Plog is related to max_P by + max_P = (qth_max-qth_min)/2^Plog; two lines below have the following floating point equivalent: max_P*(qavg - qth_min)/(qth_max-qth_min) < rnd/qcount Any questions? --ANK (980924) */ - return !(((qavg - p->qth_min) >> p->Wlog) * v->qcount < v->qR); + return !(((qavg - p->qth_min) >> p->Wlog) * p->qcount < p->qR); } enum { @@ -333,7 +271,7 @@ enum { RED_ABOVE_MAX_TRESH, }; -static inline int red_cmp_thresh(const struct red_parms *p, unsigned long qavg) +static inline int red_cmp_thresh(struct red_parms *p, unsigned long qavg) { if (qavg < p->qth_min) return RED_BELOW_MIN_THRESH; @@ -349,29 +287,27 @@ enum { RED_HARD_MARK, }; -static inline int red_action(const struct red_parms *p, - struct red_vars *v, - unsigned long qavg) +static inline int red_action(struct red_parms *p, unsigned long qavg) { switch (red_cmp_thresh(p, qavg)) { case RED_BELOW_MIN_THRESH: - v->qcount = -1; + p->qcount = -1; return RED_DONT_MARK; case RED_BETWEEN_TRESH: - if (++v->qcount) { - if (red_mark_probability(p, v, qavg)) { - v->qcount = 0; - v->qR = red_random(p); + if (++p->qcount) { + if (red_mark_probability(p, qavg)) { + p->qcount = 0; + p->qR = red_random(p); return RED_PROB_MARK; } } else - v->qR = red_random(p); + p->qR = red_random(p); return RED_DONT_MARK; case RED_ABOVE_MAX_TRESH: - v->qcount = -1; + p->qcount = -1; return RED_HARD_MARK; } @@ -379,25 +315,4 @@ static inline int red_action(const struct red_parms *p, return RED_DONT_MARK; } -static inline void red_adaptative_algo(struct red_parms *p, struct red_vars *v) -{ - unsigned long qavg; - u32 max_p_delta; - - qavg = v->qavg; - if (red_is_idling(v)) - qavg = red_calc_qavg_from_idle_time(p, v); - - /* p->qavg is fixed point number with point at Wlog */ - qavg >>= p->Wlog; - - if (qavg > p->target_max && p->max_P <= MAX_P_MAX) - p->max_P += MAX_P_ALPHA(p->max_P); /* maxp = maxp + alpha */ - else if (qavg < p->target_min && p->max_P >= MAX_P_MIN) - p->max_P = (p->max_P/10)*9; /* maxp = maxp * Beta */ - - max_p_delta = DIV_ROUND_CLOSEST(p->max_P, p->qth_delta); - max_p_delta = max(max_p_delta, 1U); - p->max_P_reciprocal = reciprocal_value(max_p_delta); -} #endif diff --git a/trunk/include/net/regulatory.h b/trunk/include/net/regulatory.h index a5f79933e211..eb7d3c2d4274 100644 --- a/trunk/include/net/regulatory.h +++ b/trunk/include/net/regulatory.h @@ -48,10 +48,6 @@ enum environment_cap { * 99 - built by driver but a specific alpha2 cannot be determined * 98 - result of an intersection between two regulatory domains * 97 - regulatory domain has not yet been configured - * @dfs_region: If CRDA responded with a regulatory domain that requires - * DFS master operation on a known DFS region (NL80211_DFS_*), - * dfs_region represents that region. Drivers can use this and the - * @alpha2 to adjust their device's DFS parameters as required. * @intersect: indicates whether the wireless core should intersect * the requested regulatory domain with the presently set regulatory * domain. @@ -71,7 +67,6 @@ struct regulatory_request { int wiphy_idx; enum nl80211_reg_initiator initiator; char alpha2[2]; - u8 dfs_region; bool intersect; bool processed; enum environment_cap country_ie_env; @@ -98,7 +93,6 @@ struct ieee80211_reg_rule { struct ieee80211_regdomain { u32 n_reg_rules; char alpha2[2]; - u8 dfs_region; struct ieee80211_reg_rule reg_rules[]; }; diff --git a/trunk/include/net/sctp/sctp.h b/trunk/include/net/sctp/sctp.h index d3685615a8b0..6a72a58cde59 100644 --- a/trunk/include/net/sctp/sctp.h +++ b/trunk/include/net/sctp/sctp.h @@ -71,7 +71,7 @@ #include #include -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) #include #include #endif @@ -383,7 +383,7 @@ static inline void sctp_sysctl_unregister(void) { return; } /* Size of Supported Address Parameter for 'x' address types. */ #define SCTP_SAT_LEN(x) (sizeof(struct sctp_paramhdr) + (x) * sizeof(__u16)) -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) void sctp_v6_pf_init(void); void sctp_v6_pf_exit(void); diff --git a/trunk/include/net/sctp/structs.h b/trunk/include/net/sctp/structs.h index 88949a994538..a15432da27c3 100644 --- a/trunk/include/net/sctp/structs.h +++ b/trunk/include/net/sctp/structs.h @@ -235,7 +235,7 @@ extern struct sctp_globals { /* Flag to indicate whether computing and verifying checksum * is disabled. */ - bool checksum_disable; + int checksum_disable; /* Threshold for rwnd update SACKS. Receive buffer shifted this many * bits is an indicator of when to send and window update SACK. @@ -369,7 +369,7 @@ static inline struct sock *sctp_opt2sk(const struct sctp_sock *sp) return (struct sock *)sp; } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) struct sctp6_sock { struct sctp_sock sctp; struct ipv6_pinfo inet6; @@ -1089,7 +1089,6 @@ void sctp_transport_burst_reset(struct sctp_transport *); unsigned long sctp_transport_timeout(struct sctp_transport *); void sctp_transport_reset(struct sctp_transport *); void sctp_transport_update_pmtu(struct sctp_transport *, u32); -void sctp_transport_immediate_rtx(struct sctp_transport *); /* This is the structure we use to queue packets as they come into diff --git a/trunk/include/net/snmp.h b/trunk/include/net/snmp.h index 2f65e1686fc8..8f0f9ac0307f 100644 --- a/trunk/include/net/snmp.h +++ b/trunk/include/net/snmp.h @@ -67,7 +67,7 @@ struct icmp_mib { #define ICMPMSG_MIB_MAX __ICMPMSG_MIB_MAX struct icmpmsg_mib { - atomic_long_t mibs[ICMPMSG_MIB_MAX]; + unsigned long mibs[ICMPMSG_MIB_MAX]; }; /* ICMP6 (IPv6-ICMP) */ @@ -84,7 +84,7 @@ struct icmpv6_mib_device { #define ICMP6MSG_MIB_MAX __ICMP6MSG_MIB_MAX /* per network ns counters */ struct icmpv6msg_mib { - atomic_long_t mibs[ICMP6MSG_MIB_MAX]; + unsigned long mibs[ICMP6MSG_MIB_MAX]; }; /* per device counters, (shared on all cpus) */ struct icmpv6msg_mib_device { diff --git a/trunk/include/net/sock.h b/trunk/include/net/sock.h index bb972d254dff..32e39371fba6 100644 --- a/trunk/include/net/sock.h +++ b/trunk/include/net/sock.h @@ -53,8 +53,6 @@ #include #include #include -#include -#include #include #include @@ -64,22 +62,6 @@ #include #include -struct cgroup; -struct cgroup_subsys; -#ifdef CONFIG_NET -int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss); -void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss); -#else -static inline -int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss) -{ - return 0; -} -static inline -void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss) -{ -} -#endif /* * This structure really needs to be cleaned up. * Most of it is for TCP, and not used by any of @@ -185,7 +167,6 @@ struct sock_common { /* public: */ }; -struct cg_proto; /** * struct sock - network layer representation of sockets * @__sk_common: shared layout with inet_timewait_sock @@ -246,7 +227,6 @@ struct cg_proto; * @sk_security: used by security modules * @sk_mark: generic packet mark * @sk_classid: this socket's cgroup classid - * @sk_cgrp: this socket's cgroup-specific proto data * @sk_write_pending: a write to stream socket waits to start * @sk_state_change: callback to indicate change in the state of the sock * @sk_data_ready: callback to indicate there is data to be processed @@ -326,8 +306,8 @@ struct sock { kmemcheck_bitfield_end(flags); int sk_wmem_queued; gfp_t sk_allocation; - netdev_features_t sk_route_caps; - netdev_features_t sk_route_nocaps; + int sk_route_caps; + int sk_route_nocaps; int sk_gso_type; unsigned int sk_gso_max_size; int sk_rcvlowat; @@ -340,9 +320,6 @@ struct sock { unsigned short sk_ack_backlog; unsigned short sk_max_ack_backlog; __u32 sk_priority; -#ifdef CONFIG_CGROUPS - __u32 sk_cgrp_prioidx; -#endif struct pid *sk_peer_pid; const struct cred *sk_peer_cred; long sk_rcvtimeo; @@ -361,7 +338,6 @@ struct sock { #endif __u32 sk_mark; u32 sk_classid; - struct cg_proto *sk_cgrp; void (*sk_state_change)(struct sock *sk); void (*sk_data_ready)(struct sock *sk, int bytes); void (*sk_write_space)(struct sock *sk); @@ -587,7 +563,6 @@ enum sock_flags { SOCK_FASYNC, /* fasync() active */ SOCK_RXQ_OVFL, SOCK_ZEROCOPY, /* buffers from userspace */ - SOCK_WIFI_STATUS, /* push wifi status to userspace */ }; static inline void sock_copy_flags(struct sock *nsk, struct sock *osk) @@ -860,37 +835,6 @@ struct proto { #ifdef SOCK_REFCNT_DEBUG atomic_t socks; #endif -#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM - /* - * cgroup specific init/deinit functions. Called once for all - * protocols that implement it, from cgroups populate function. - * This function has to setup any files the protocol want to - * appear in the kmem cgroup filesystem. - */ - int (*init_cgroup)(struct cgroup *cgrp, - struct cgroup_subsys *ss); - void (*destroy_cgroup)(struct cgroup *cgrp, - struct cgroup_subsys *ss); - struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg); -#endif -}; - -struct cg_proto { - void (*enter_memory_pressure)(struct sock *sk); - struct res_counter *memory_allocated; /* Current allocated memory. */ - struct percpu_counter *sockets_allocated; /* Current number of sockets. */ - int *memory_pressure; - long *sysctl_mem; - /* - * memcg field is used to find which memcg we belong directly - * Each memcg struct can hold more than one cg_proto, so container_of - * won't really cut. - * - * The elegant solution would be having an inverse function to - * proto_cgroup in struct proto, but that means polluting the structure - * for everybody, instead of just for memcg users. - */ - struct mem_cgroup *memcg; }; extern int proto_register(struct proto *prot, int alloc_slab); @@ -909,7 +853,7 @@ static inline void sk_refcnt_debug_dec(struct sock *sk) sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks)); } -inline void sk_refcnt_debug_release(const struct sock *sk) +static inline void sk_refcnt_debug_release(const struct sock *sk) { if (atomic_read(&sk->sk_refcnt) != 1) printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n", @@ -921,208 +865,6 @@ inline void sk_refcnt_debug_release(const struct sock *sk) #define sk_refcnt_debug_release(sk) do { } while (0) #endif /* SOCK_REFCNT_DEBUG */ -#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM -extern struct jump_label_key memcg_socket_limit_enabled; -static inline struct cg_proto *parent_cg_proto(struct proto *proto, - struct cg_proto *cg_proto) -{ - return proto->proto_cgroup(parent_mem_cgroup(cg_proto->memcg)); -} -#define mem_cgroup_sockets_enabled static_branch(&memcg_socket_limit_enabled) -#else -#define mem_cgroup_sockets_enabled 0 -static inline struct cg_proto *parent_cg_proto(struct proto *proto, - struct cg_proto *cg_proto) -{ - return NULL; -} -#endif - - -static inline bool sk_has_memory_pressure(const struct sock *sk) -{ - return sk->sk_prot->memory_pressure != NULL; -} - -static inline bool sk_under_memory_pressure(const struct sock *sk) -{ - if (!sk->sk_prot->memory_pressure) - return false; - - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) - return !!*sk->sk_cgrp->memory_pressure; - - return !!*sk->sk_prot->memory_pressure; -} - -static inline void sk_leave_memory_pressure(struct sock *sk) -{ - int *memory_pressure = sk->sk_prot->memory_pressure; - - if (!memory_pressure) - return; - - if (*memory_pressure) - *memory_pressure = 0; - - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { - struct cg_proto *cg_proto = sk->sk_cgrp; - struct proto *prot = sk->sk_prot; - - for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) - if (*cg_proto->memory_pressure) - *cg_proto->memory_pressure = 0; - } - -} - -static inline void sk_enter_memory_pressure(struct sock *sk) -{ - if (!sk->sk_prot->enter_memory_pressure) - return; - - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { - struct cg_proto *cg_proto = sk->sk_cgrp; - struct proto *prot = sk->sk_prot; - - for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) - cg_proto->enter_memory_pressure(sk); - } - - sk->sk_prot->enter_memory_pressure(sk); -} - -static inline long sk_prot_mem_limits(const struct sock *sk, int index) -{ - long *prot = sk->sk_prot->sysctl_mem; - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) - prot = sk->sk_cgrp->sysctl_mem; - return prot[index]; -} - -static inline void memcg_memory_allocated_add(struct cg_proto *prot, - unsigned long amt, - int *parent_status) -{ - struct res_counter *fail; - int ret; - - ret = res_counter_charge(prot->memory_allocated, - amt << PAGE_SHIFT, &fail); - - if (ret < 0) - *parent_status = OVER_LIMIT; -} - -static inline void memcg_memory_allocated_sub(struct cg_proto *prot, - unsigned long amt) -{ - res_counter_uncharge(prot->memory_allocated, amt << PAGE_SHIFT); -} - -static inline u64 memcg_memory_allocated_read(struct cg_proto *prot) -{ - u64 ret; - ret = res_counter_read_u64(prot->memory_allocated, RES_USAGE); - return ret >> PAGE_SHIFT; -} - -static inline long -sk_memory_allocated(const struct sock *sk) -{ - struct proto *prot = sk->sk_prot; - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) - return memcg_memory_allocated_read(sk->sk_cgrp); - - return atomic_long_read(prot->memory_allocated); -} - -static inline long -sk_memory_allocated_add(struct sock *sk, int amt, int *parent_status) -{ - struct proto *prot = sk->sk_prot; - - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { - memcg_memory_allocated_add(sk->sk_cgrp, amt, parent_status); - /* update the root cgroup regardless */ - atomic_long_add_return(amt, prot->memory_allocated); - return memcg_memory_allocated_read(sk->sk_cgrp); - } - - return atomic_long_add_return(amt, prot->memory_allocated); -} - -static inline void -sk_memory_allocated_sub(struct sock *sk, int amt, int parent_status) -{ - struct proto *prot = sk->sk_prot; - - if (mem_cgroup_sockets_enabled && sk->sk_cgrp && - parent_status != OVER_LIMIT) /* Otherwise was uncharged already */ - memcg_memory_allocated_sub(sk->sk_cgrp, amt); - - atomic_long_sub(amt, prot->memory_allocated); -} - -static inline void sk_sockets_allocated_dec(struct sock *sk) -{ - struct proto *prot = sk->sk_prot; - - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { - struct cg_proto *cg_proto = sk->sk_cgrp; - - for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) - percpu_counter_dec(cg_proto->sockets_allocated); - } - - percpu_counter_dec(prot->sockets_allocated); -} - -static inline void sk_sockets_allocated_inc(struct sock *sk) -{ - struct proto *prot = sk->sk_prot; - - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { - struct cg_proto *cg_proto = sk->sk_cgrp; - - for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) - percpu_counter_inc(cg_proto->sockets_allocated); - } - - percpu_counter_inc(prot->sockets_allocated); -} - -static inline int -sk_sockets_allocated_read_positive(struct sock *sk) -{ - struct proto *prot = sk->sk_prot; - - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) - return percpu_counter_sum_positive(sk->sk_cgrp->sockets_allocated); - - return percpu_counter_sum_positive(prot->sockets_allocated); -} - -static inline int -proto_sockets_allocated_sum_positive(struct proto *prot) -{ - return percpu_counter_sum_positive(prot->sockets_allocated); -} - -static inline long -proto_memory_allocated(struct proto *prot) -{ - return atomic_long_read(prot->memory_allocated); -} - -static inline bool -proto_memory_pressure(struct proto *prot) -{ - if (!prot->memory_pressure) - return false; - return !!*prot->memory_pressure; -} - #ifdef CONFIG_PROC_FS /* Called with local bh disabled */ @@ -1349,8 +1091,8 @@ extern struct sock *sk_alloc(struct net *net, int family, struct proto *prot); extern void sk_free(struct sock *sk); extern void sk_release_kernel(struct sock *sk); -extern struct sock *sk_clone_lock(const struct sock *sk, - const gfp_t priority); +extern struct sock *sk_clone(const struct sock *sk, + const gfp_t priority); extern struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, @@ -1653,7 +1395,7 @@ static inline int sk_can_gso(const struct sock *sk) extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst); -static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags) +static inline void sk_nocaps_add(struct sock *sk, int flags) { sk->sk_route_nocaps |= flags; sk->sk_route_caps &= ~flags; @@ -1930,7 +1672,7 @@ static inline struct page *sk_stream_alloc_page(struct sock *sk) page = alloc_pages(sk->sk_allocation, 0); if (!page) { - sk_enter_memory_pressure(sk); + sk->sk_prot->enter_memory_pressure(sk); sk_stream_moderate_sndbuf(sk); } return page; @@ -1974,8 +1716,6 @@ static inline int sock_intr_errno(long timeo) extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb); -extern void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk, - struct sk_buff *skb); static __inline__ void sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) @@ -2003,9 +1743,6 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) __sock_recv_timestamp(msg, sk, skb); else sk->sk_stamp = kt; - - if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid) - __sock_recv_wifi_status(msg, sk, skb); } extern void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, diff --git a/trunk/include/net/tcp.h b/trunk/include/net/tcp.h index 0118ea999f67..bb18c4d69aba 100644 --- a/trunk/include/net/tcp.h +++ b/trunk/include/net/tcp.h @@ -44,7 +44,6 @@ #include #include -#include extern struct inet_hashinfo tcp_hashinfo; @@ -230,6 +229,7 @@ extern int sysctl_tcp_fack; extern int sysctl_tcp_reordering; extern int sysctl_tcp_ecn; extern int sysctl_tcp_dsack; +extern long sysctl_tcp_mem[3]; extern int sysctl_tcp_wmem[3]; extern int sysctl_tcp_rmem[3]; extern int sysctl_tcp_app_win; @@ -285,7 +285,7 @@ static inline bool tcp_too_many_orphans(struct sock *sk, int shift) } if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && - sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2)) + atomic_long_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]) return true; return false; } @@ -628,7 +628,7 @@ extern u32 __tcp_select_window(struct sock *sk); struct tcp_skb_cb { union { struct inet_skb_parm h4; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) struct inet6_skb_parm h6; #endif } header; /* For incoming frames */ @@ -773,12 +773,12 @@ static inline int tcp_is_reno(const struct tcp_sock *tp) static inline int tcp_is_fack(const struct tcp_sock *tp) { - return tp->rx_opt.sack_ok & TCP_FACK_ENABLED; + return tp->rx_opt.sack_ok & 2; } static inline void tcp_enable_fack(struct tcp_sock *tp) { - tp->rx_opt.sack_ok |= TCP_FACK_ENABLED; + tp->rx_opt.sack_ok |= 2; } static inline unsigned int tcp_left_out(const struct tcp_sock *tp) @@ -834,14 +834,6 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk) extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh); extern __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst); -/* The maximum number of MSS of available cwnd for which TSO defers - * sending if not using sysctl_tcp_tso_win_divisor. - */ -static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp) -{ - return 3; -} - /* Slow start with delack produces 3 packets of burst, so that * it is safe "de facto". This will be the default - same as * the default reordering threshold - but if reordering increases, @@ -1152,7 +1144,7 @@ struct tcp6_md5sig_key { /* - sock block */ struct tcp_md5sig_info { struct tcp4_md5sig_key *keys4; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) struct tcp6_md5sig_key *keys6; u32 entries6; u32 alloced6; @@ -1179,7 +1171,7 @@ struct tcp6_pseudohdr { union tcp_md5sum_block { struct tcp4_pseudohdr ip4; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) struct tcp6_pseudohdr ip6; #endif }; @@ -1438,8 +1430,7 @@ extern struct request_sock_ops tcp6_request_sock_ops; extern void tcp_v4_destroy_sock(struct sock *sk); extern int tcp_v4_gso_send_check(struct sk_buff *skb); -extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, - netdev_features_t features); +extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, u32 features); extern struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb); extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head, diff --git a/trunk/include/net/tcp_memcontrol.h b/trunk/include/net/tcp_memcontrol.h deleted file mode 100644 index 3512082fa909..000000000000 --- a/trunk/include/net/tcp_memcontrol.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef _TCP_MEMCG_H -#define _TCP_MEMCG_H - -struct tcp_memcontrol { - struct cg_proto cg_proto; - /* per-cgroup tcp memory pressure knobs */ - struct res_counter tcp_memory_allocated; - struct percpu_counter tcp_sockets_allocated; - /* those two are read-mostly, leave them at the end */ - long tcp_prot_mem[3]; - int tcp_memory_pressure; -}; - -struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg); -int tcp_init_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss); -void tcp_destroy_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss); -unsigned long long tcp_max_memory(const struct mem_cgroup *memcg); -void tcp_prot_mem(struct mem_cgroup *memcg, long val, int idx); -#endif /* _TCP_MEMCG_H */ diff --git a/trunk/include/net/udp.h b/trunk/include/net/udp.h index e39592f682c3..3b285f402f48 100644 --- a/trunk/include/net/udp.h +++ b/trunk/include/net/udp.h @@ -41,7 +41,7 @@ struct udp_skb_cb { union { struct inet_skb_parm h4; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) struct inet6_skb_parm h6; #endif } header; @@ -194,15 +194,9 @@ extern int udp_lib_setsockopt(struct sock *sk, int level, int optname, extern struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, __be32 daddr, __be16 dport, int dif); -extern struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, - __be32 daddr, __be16 dport, - int dif, struct udp_table *tbl); extern struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, __be16 dport, int dif); -extern struct sock *__udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport, - const struct in6_addr *daddr, __be16 dport, - int dif, struct udp_table *tbl); /* * SNMP statistics for UDP and UDP-Lite @@ -223,7 +217,7 @@ extern struct sock *__udp6_lib_lookup(struct net *net, const struct in6_addr *sa else SNMP_INC_STATS_USER((net)->mib.udp_stats_in6, field); \ } while(0) -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) #define UDPX_INC_STATS_BH(sk, field) \ do { \ if ((sk)->sk_family == AF_INET) \ @@ -264,6 +258,5 @@ extern void udp4_proc_exit(void); extern void udp_init(void); extern int udp4_ufo_send_check(struct sk_buff *skb); -extern struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, - netdev_features_t features); +extern struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, u32 features); #endif /* _UDP_H */ diff --git a/trunk/include/net/xfrm.h b/trunk/include/net/xfrm.h index 89174e29dca9..b203e14d26b7 100644 --- a/trunk/include/net/xfrm.h +++ b/trunk/include/net/xfrm.h @@ -827,14 +827,6 @@ static inline bool addr_match(const void *token1, const void *token2, return true; } -static inline bool addr4_match(__be32 a1, __be32 a2, u8 prefixlen) -{ - /* C99 6.5.7 (3): u32 << 32 is undefined behaviour */ - if (prefixlen == 0) - return true; - return !((a1 ^ a2) & htonl(0xFFFFFFFFu << (32 - prefixlen))); -} - static __inline__ __be16 xfrm_flowi_sport(const struct flowi *fl, const union flowi_uli *uli) { @@ -1217,8 +1209,8 @@ void xfrm_flowi_addr_get(const struct flowi *fl, memcpy(&daddr->a4, &fl->u.ip4.daddr, sizeof(daddr->a4)); break; case AF_INET6: - *(struct in6_addr *)saddr->a6 = fl->u.ip6.saddr; - *(struct in6_addr *)daddr->a6 = fl->u.ip6.daddr; + ipv6_addr_copy((struct in6_addr *)&saddr->a6, &fl->u.ip6.saddr); + ipv6_addr_copy((struct in6_addr *)&daddr->a6, &fl->u.ip6.daddr); break; } } diff --git a/trunk/include/trace/events/rcu.h b/trunk/include/trace/events/rcu.h index d2d88bed891b..669fbd62ec25 100644 --- a/trunk/include/trace/events/rcu.h +++ b/trunk/include/trace/events/rcu.h @@ -241,73 +241,24 @@ TRACE_EVENT(rcu_fqs, /* * Tracepoint for dyntick-idle entry/exit events. These take a string - * as argument: "Start" for entering dyntick-idle mode, "End" for - * leaving it, "--=" for events moving towards idle, and "++=" for events - * moving away from idle. "Error on entry: not idle task" and "Error on - * exit: not idle task" indicate that a non-idle task is erroneously - * toying with the idle loop. - * - * These events also take a pair of numbers, which indicate the nesting - * depth before and after the event of interest. Note that task-related - * events use the upper bits of each number, while interrupt-related - * events use the lower bits. + * as argument: "Start" for entering dyntick-idle mode and "End" for + * leaving it. */ TRACE_EVENT(rcu_dyntick, - TP_PROTO(char *polarity, long long oldnesting, long long newnesting), + TP_PROTO(char *polarity), - TP_ARGS(polarity, oldnesting, newnesting), + TP_ARGS(polarity), TP_STRUCT__entry( __field(char *, polarity) - __field(long long, oldnesting) - __field(long long, newnesting) ), TP_fast_assign( __entry->polarity = polarity; - __entry->oldnesting = oldnesting; - __entry->newnesting = newnesting; - ), - - TP_printk("%s %llx %llx", __entry->polarity, - __entry->oldnesting, __entry->newnesting) -); - -/* - * Tracepoint for RCU preparation for idle, the goal being to get RCU - * processing done so that the current CPU can shut off its scheduling - * clock and enter dyntick-idle mode. One way to accomplish this is - * to drain all RCU callbacks from this CPU, and the other is to have - * done everything RCU requires for the current grace period. In this - * latter case, the CPU will be awakened at the end of the current grace - * period in order to process the remainder of its callbacks. - * - * These tracepoints take a string as argument: - * - * "No callbacks": Nothing to do, no callbacks on this CPU. - * "In holdoff": Nothing to do, holding off after unsuccessful attempt. - * "Begin holdoff": Attempt failed, don't retry until next jiffy. - * "Dyntick with callbacks": Entering dyntick-idle despite callbacks. - * "More callbacks": Still more callbacks, try again to clear them out. - * "Callbacks drained": All callbacks processed, off to dyntick idle! - * "Timer": Timer fired to cause CPU to continue processing callbacks. - */ -TRACE_EVENT(rcu_prep_idle, - - TP_PROTO(char *reason), - - TP_ARGS(reason), - - TP_STRUCT__entry( - __field(char *, reason) - ), - - TP_fast_assign( - __entry->reason = reason; ), - TP_printk("%s", __entry->reason) + TP_printk("%s", __entry->polarity) ); /* @@ -461,71 +412,27 @@ TRACE_EVENT(rcu_invoke_kfree_callback, /* * Tracepoint for exiting rcu_do_batch after RCU callbacks have been - * invoked. The first argument is the name of the RCU flavor, - * the second argument is number of callbacks actually invoked, - * the third argument (cb) is whether or not any of the callbacks that - * were ready to invoke at the beginning of this batch are still - * queued, the fourth argument (nr) is the return value of need_resched(), - * the fifth argument (iit) is 1 if the current task is the idle task, - * and the sixth argument (risk) is the return value from - * rcu_is_callbacks_kthread(). + * invoked. The first argument is the name of the RCU flavor and + * the second argument is number of callbacks actually invoked. */ TRACE_EVENT(rcu_batch_end, - TP_PROTO(char *rcuname, int callbacks_invoked, - bool cb, bool nr, bool iit, bool risk), + TP_PROTO(char *rcuname, int callbacks_invoked), - TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk), + TP_ARGS(rcuname, callbacks_invoked), TP_STRUCT__entry( __field(char *, rcuname) __field(int, callbacks_invoked) - __field(bool, cb) - __field(bool, nr) - __field(bool, iit) - __field(bool, risk) ), TP_fast_assign( __entry->rcuname = rcuname; __entry->callbacks_invoked = callbacks_invoked; - __entry->cb = cb; - __entry->nr = nr; - __entry->iit = iit; - __entry->risk = risk; - ), - - TP_printk("%s CBs-invoked=%d idle=%c%c%c%c", - __entry->rcuname, __entry->callbacks_invoked, - __entry->cb ? 'C' : '.', - __entry->nr ? 'S' : '.', - __entry->iit ? 'I' : '.', - __entry->risk ? 'R' : '.') -); - -/* - * Tracepoint for rcutorture readers. The first argument is the name - * of the RCU flavor from rcutorture's viewpoint and the second argument - * is the callback address. - */ -TRACE_EVENT(rcu_torture_read, - - TP_PROTO(char *rcutorturename, struct rcu_head *rhp), - - TP_ARGS(rcutorturename, rhp), - - TP_STRUCT__entry( - __field(char *, rcutorturename) - __field(struct rcu_head *, rhp) - ), - - TP_fast_assign( - __entry->rcutorturename = rcutorturename; - __entry->rhp = rhp; ), - TP_printk("%s torture read %p", - __entry->rcutorturename, __entry->rhp) + TP_printk("%s CBs-invoked=%d", + __entry->rcuname, __entry->callbacks_invoked) ); #else /* #ifdef CONFIG_RCU_TRACE */ @@ -536,16 +443,13 @@ TRACE_EVENT(rcu_torture_read, #define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0) #define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks) do { } while (0) #define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0) -#define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0) -#define trace_rcu_prep_idle(reason) do { } while (0) +#define trace_rcu_dyntick(polarity) do { } while (0) #define trace_rcu_callback(rcuname, rhp, qlen) do { } while (0) #define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen) do { } while (0) #define trace_rcu_batch_start(rcuname, qlen, blimit) do { } while (0) #define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0) #define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0) -#define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \ - do { } while (0) -#define trace_rcu_torture_read(rcutorturename, rhp) do { } while (0) +#define trace_rcu_batch_end(rcuname, callbacks_invoked) do { } while (0) #endif /* #else #ifdef CONFIG_RCU_TRACE */ diff --git a/trunk/include/trace/events/sched.h b/trunk/include/trace/events/sched.h index 6ba596b07a72..959ff18b63b6 100644 --- a/trunk/include/trace/events/sched.h +++ b/trunk/include/trace/events/sched.h @@ -330,13 +330,6 @@ DEFINE_EVENT(sched_stat_template, sched_stat_iowait, TP_PROTO(struct task_struct *tsk, u64 delay), TP_ARGS(tsk, delay)); -/* - * Tracepoint for accounting blocked time (time the task is in uninterruptible). - */ -DEFINE_EVENT(sched_stat_template, sched_stat_blocked, - TP_PROTO(struct task_struct *tsk, u64 delay), - TP_ARGS(tsk, delay)); - /* * Tracepoint for accounting runtime (time the task is executing * on a CPU). @@ -370,56 +363,6 @@ TRACE_EVENT(sched_stat_runtime, (unsigned long long)__entry->vruntime) ); -#ifdef CREATE_TRACE_POINTS -static inline u64 trace_get_sleeptime(struct task_struct *tsk) -{ -#ifdef CONFIG_SCHEDSTATS - u64 block, sleep; - - block = tsk->se.statistics.block_start; - sleep = tsk->se.statistics.sleep_start; - tsk->se.statistics.block_start = 0; - tsk->se.statistics.sleep_start = 0; - - return block ? block : sleep ? sleep : 0; -#else - return 0; -#endif -} -#endif - -/* - * Tracepoint for accounting sleeptime (time the task is sleeping - * or waiting for I/O). - */ -TRACE_EVENT(sched_stat_sleeptime, - - TP_PROTO(struct task_struct *tsk, u64 now), - - TP_ARGS(tsk, now), - - TP_STRUCT__entry( - __array( char, comm, TASK_COMM_LEN ) - __field( pid_t, pid ) - __field( u64, sleeptime ) - ), - - TP_fast_assign( - memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); - __entry->pid = tsk->pid; - __entry->sleeptime = trace_get_sleeptime(tsk); - __entry->sleeptime = __entry->sleeptime ? - now - __entry->sleeptime : 0; - ) - TP_perf_assign( - __perf_count(__entry->sleeptime); - ), - - TP_printk("comm=%s pid=%d sleeptime=%Lu [ns]", - __entry->comm, __entry->pid, - (unsigned long long)__entry->sleeptime) -); - /* * Tracepoint for showing priority inheritance modifying a tasks * priority. diff --git a/trunk/init/Kconfig b/trunk/init/Kconfig index a075765d5fbe..43298f9810fb 100644 --- a/trunk/init/Kconfig +++ b/trunk/init/Kconfig @@ -469,14 +469,14 @@ config RCU_FANOUT_EXACT config RCU_FAST_NO_HZ bool "Accelerate last non-dyntick-idle CPU's grace periods" - depends on NO_HZ && SMP + depends on TREE_RCU && NO_HZ && SMP default n help This option causes RCU to attempt to accelerate grace periods - in order to allow CPUs to enter dynticks-idle state more - quickly. On the other hand, this option increases the overhead - of the dynticks-idle checking, particularly on systems with - large numbers of CPUs. + in order to allow the final CPU to enter dynticks-idle state + more quickly. On the other hand, this option increases the + overhead of the dynticks-idle checking, particularly on systems + with large numbers of CPUs. Say Y if energy efficiency is critically important, particularly if you have relatively few CPUs. @@ -689,17 +689,6 @@ config CGROUP_MEM_RES_CTLR_SWAP_ENABLED For those who want to have the feature enabled by default should select this option (if, for some reason, they need to disable it then swapaccount=0 does the trick). -config CGROUP_MEM_RES_CTLR_KMEM - bool "Memory Resource Controller Kernel Memory accounting (EXPERIMENTAL)" - depends on CGROUP_MEM_RES_CTLR && EXPERIMENTAL - default n - help - The Kernel Memory extension for Memory Resource Controller can limit - the amount of memory used by kernel objects in the system. Those are - fundamentally different from the entities handled by the standard - Memory Controller, which are page-based, and can be swapped. Users of - the kmem extension can use it to guarantee that no group of processes - will ever exhaust kernel resources alone. config CGROUP_PERF bool "Enable perf_event per-cpu per-container group (cgroup) monitoring" diff --git a/trunk/init/main.c b/trunk/init/main.c index 2c76efb513c2..217ed23e9487 100644 --- a/trunk/init/main.c +++ b/trunk/init/main.c @@ -469,12 +469,13 @@ asmlinkage void __init start_kernel(void) char * command_line; extern const struct kernel_param __start___param[], __stop___param[]; + smp_setup_processor_id(); + /* * Need to run as early as possible, to initialize the * lockdep hash: */ lockdep_init(); - smp_setup_processor_id(); debug_objects_early_init(); /* diff --git a/trunk/kernel/Makefile b/trunk/kernel/Makefile index f70396e5a24b..e898c5b9d02c 100644 --- a/trunk/kernel/Makefile +++ b/trunk/kernel/Makefile @@ -2,15 +2,16 @@ # Makefile for the linux kernel. # -obj-y = fork.o exec_domain.o panic.o printk.o \ +obj-y = sched.o fork.o exec_domain.o panic.o printk.o \ cpu.o exit.o itimer.o time.o softirq.o resource.o \ sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \ signal.o sys.o kmod.o workqueue.o pid.o \ rcupdate.o extable.o params.o posix-timers.o \ kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ - notifier.o ksysfs.o cred.o \ - async.o range.o groups.o + notifier.o ksysfs.o sched_clock.o cred.o \ + async.o range.o +obj-y += groups.o ifdef CONFIG_FUNCTION_TRACER # Do not trace debug files and internal ftrace files @@ -19,11 +20,10 @@ CFLAGS_REMOVE_lockdep_proc.o = -pg CFLAGS_REMOVE_mutex-debug.o = -pg CFLAGS_REMOVE_rtmutex-debug.o = -pg CFLAGS_REMOVE_cgroup-debug.o = -pg +CFLAGS_REMOVE_sched_clock.o = -pg CFLAGS_REMOVE_irq_work.o = -pg endif -obj-y += sched/ - obj-$(CONFIG_FREEZER) += freezer.o obj-$(CONFIG_PROFILING) += profile.o obj-$(CONFIG_SYSCTL_SYSCALL_CHECK) += sysctl_check.o @@ -99,6 +99,7 @@ obj-$(CONFIG_TRACING) += trace/ obj-$(CONFIG_X86_DS) += trace/ obj-$(CONFIG_RING_BUFFER) += trace/ obj-$(CONFIG_TRACEPOINTS) += trace/ +obj-$(CONFIG_SMP) += sched_cpupri.o obj-$(CONFIG_IRQ_WORK) += irq_work.o obj-$(CONFIG_CPU_PM) += cpu_pm.o @@ -109,6 +110,15 @@ obj-$(CONFIG_PADATA) += padata.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o +ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) +# According to Alan Modra , the -fno-omit-frame-pointer is +# needed for x86 only. Why this used to be enabled for all architectures is beyond +# me. I suspect most platforms don't need this, but until we know that for sure +# I turn this off for IA-64 only. Andreas Schwab says it's also needed on m68k +# to get a correct value for the wait-channel (WCHAN in ps). --davidm +CFLAGS_sched.o := $(PROFILING) -fno-omit-frame-pointer +endif + $(obj)/configs.o: $(obj)/config_data.h # config_data.h contains the same information as ikconfig.h but gzipped. diff --git a/trunk/kernel/acct.c b/trunk/kernel/acct.c index 203dfead2e06..fa7eb3de2ddc 100644 --- a/trunk/kernel/acct.c +++ b/trunk/kernel/acct.c @@ -613,8 +613,8 @@ void acct_collect(long exitcode, int group_dead) pacct->ac_flag |= ACORE; if (current->flags & PF_SIGNALED) pacct->ac_flag |= AXSIG; - pacct->ac_utime += current->utime; - pacct->ac_stime += current->stime; + pacct->ac_utime = cputime_add(pacct->ac_utime, current->utime); + pacct->ac_stime = cputime_add(pacct->ac_stime, current->stime); pacct->ac_minflt += current->min_flt; pacct->ac_majflt += current->maj_flt; spin_unlock_irq(¤t->sighand->siglock); diff --git a/trunk/kernel/cpu.c b/trunk/kernel/cpu.c index 5ca38d5d238a..563f13609470 100644 --- a/trunk/kernel/cpu.c +++ b/trunk/kernel/cpu.c @@ -178,7 +178,8 @@ static inline void check_for_tasks(int cpu) write_lock_irq(&tasklist_lock); for_each_process(p) { if (task_cpu(p) == cpu && p->state == TASK_RUNNING && - (p->utime || p->stime)) + (!cputime_eq(p->utime, cputime_zero) || + !cputime_eq(p->stime, cputime_zero))) printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d " "(state = %ld, flags = %x)\n", p->comm, task_pid_nr(p), cpu, @@ -379,7 +380,6 @@ int __cpuinit cpu_up(unsigned int cpu) cpu_maps_update_done(); return err; } -EXPORT_SYMBOL_GPL(cpu_up); #ifdef CONFIG_PM_SLEEP_SMP static cpumask_var_t frozen_cpus; diff --git a/trunk/kernel/debug/kdb/kdb_support.c b/trunk/kernel/debug/kdb/kdb_support.c index 7d6fb40d2188..5532dd37aa86 100644 --- a/trunk/kernel/debug/kdb/kdb_support.c +++ b/trunk/kernel/debug/kdb/kdb_support.c @@ -636,7 +636,7 @@ char kdb_task_state_char (const struct task_struct *p) (p->exit_state & EXIT_ZOMBIE) ? 'Z' : (p->exit_state & EXIT_DEAD) ? 'E' : (p->state & TASK_INTERRUPTIBLE) ? 'S' : '?'; - if (is_idle_task(p)) { + if (p->pid == 0) { /* Idle task. Is it really idle, apart from the kdb * interrupt? */ if (!kdb_task_has_cpu(p) || kgdb_info[cpu].irq_depth == 1) { diff --git a/trunk/kernel/events/Makefile b/trunk/kernel/events/Makefile index 22d901f9caf4..89e5e8aa4c36 100644 --- a/trunk/kernel/events/Makefile +++ b/trunk/kernel/events/Makefile @@ -2,5 +2,5 @@ ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_core.o = -pg endif -obj-y := core.o ring_buffer.o callchain.o +obj-y := core.o ring_buffer.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o diff --git a/trunk/kernel/events/callchain.c b/trunk/kernel/events/callchain.c deleted file mode 100644 index 057e24b665cf..000000000000 --- a/trunk/kernel/events/callchain.c +++ /dev/null @@ -1,191 +0,0 @@ -/* - * Performance events callchain code, extracted from core.c: - * - * Copyright (C) 2008 Thomas Gleixner - * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar - * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra - * Copyright © 2009 Paul Mackerras, IBM Corp. - * - * For licensing details see kernel-base/COPYING - */ - -#include -#include -#include "internal.h" - -struct callchain_cpus_entries { - struct rcu_head rcu_head; - struct perf_callchain_entry *cpu_entries[0]; -}; - -static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]); -static atomic_t nr_callchain_events; -static DEFINE_MUTEX(callchain_mutex); -static struct callchain_cpus_entries *callchain_cpus_entries; - - -__weak void perf_callchain_kernel(struct perf_callchain_entry *entry, - struct pt_regs *regs) -{ -} - -__weak void perf_callchain_user(struct perf_callchain_entry *entry, - struct pt_regs *regs) -{ -} - -static void release_callchain_buffers_rcu(struct rcu_head *head) -{ - struct callchain_cpus_entries *entries; - int cpu; - - entries = container_of(head, struct callchain_cpus_entries, rcu_head); - - for_each_possible_cpu(cpu) - kfree(entries->cpu_entries[cpu]); - - kfree(entries); -} - -static void release_callchain_buffers(void) -{ - struct callchain_cpus_entries *entries; - - entries = callchain_cpus_entries; - rcu_assign_pointer(callchain_cpus_entries, NULL); - call_rcu(&entries->rcu_head, release_callchain_buffers_rcu); -} - -static int alloc_callchain_buffers(void) -{ - int cpu; - int size; - struct callchain_cpus_entries *entries; - - /* - * We can't use the percpu allocation API for data that can be - * accessed from NMI. Use a temporary manual per cpu allocation - * until that gets sorted out. - */ - size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]); - - entries = kzalloc(size, GFP_KERNEL); - if (!entries) - return -ENOMEM; - - size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS; - - for_each_possible_cpu(cpu) { - entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL, - cpu_to_node(cpu)); - if (!entries->cpu_entries[cpu]) - goto fail; - } - - rcu_assign_pointer(callchain_cpus_entries, entries); - - return 0; - -fail: - for_each_possible_cpu(cpu) - kfree(entries->cpu_entries[cpu]); - kfree(entries); - - return -ENOMEM; -} - -int get_callchain_buffers(void) -{ - int err = 0; - int count; - - mutex_lock(&callchain_mutex); - - count = atomic_inc_return(&nr_callchain_events); - if (WARN_ON_ONCE(count < 1)) { - err = -EINVAL; - goto exit; - } - - if (count > 1) { - /* If the allocation failed, give up */ - if (!callchain_cpus_entries) - err = -ENOMEM; - goto exit; - } - - err = alloc_callchain_buffers(); - if (err) - release_callchain_buffers(); -exit: - mutex_unlock(&callchain_mutex); - - return err; -} - -void put_callchain_buffers(void) -{ - if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) { - release_callchain_buffers(); - mutex_unlock(&callchain_mutex); - } -} - -static struct perf_callchain_entry *get_callchain_entry(int *rctx) -{ - int cpu; - struct callchain_cpus_entries *entries; - - *rctx = get_recursion_context(__get_cpu_var(callchain_recursion)); - if (*rctx == -1) - return NULL; - - entries = rcu_dereference(callchain_cpus_entries); - if (!entries) - return NULL; - - cpu = smp_processor_id(); - - return &entries->cpu_entries[cpu][*rctx]; -} - -static void -put_callchain_entry(int rctx) -{ - put_recursion_context(__get_cpu_var(callchain_recursion), rctx); -} - -struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) -{ - int rctx; - struct perf_callchain_entry *entry; - - - entry = get_callchain_entry(&rctx); - if (rctx == -1) - return NULL; - - if (!entry) - goto exit_put; - - entry->nr = 0; - - if (!user_mode(regs)) { - perf_callchain_store(entry, PERF_CONTEXT_KERNEL); - perf_callchain_kernel(entry, regs); - if (current->mm) - regs = task_pt_regs(current); - else - regs = NULL; - } - - if (regs) { - perf_callchain_store(entry, PERF_CONTEXT_USER); - perf_callchain_user(entry, regs); - } - -exit_put: - put_callchain_entry(rctx); - - return entry; -} diff --git a/trunk/kernel/events/core.c b/trunk/kernel/events/core.c index 890eb02c2f21..58690af323e4 100644 --- a/trunk/kernel/events/core.c +++ b/trunk/kernel/events/core.c @@ -128,7 +128,7 @@ enum event_type_t { * perf_sched_events : >0 events exist * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu */ -struct jump_label_key_deferred perf_sched_events __read_mostly; +struct jump_label_key perf_sched_events __read_mostly; static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); static atomic_t nr_mmap_events __read_mostly; @@ -1130,8 +1130,6 @@ event_sched_out(struct perf_event *event, if (!is_software_event(event)) cpuctx->active_oncpu--; ctx->nr_active--; - if (event->attr.freq && event->attr.sample_freq) - ctx->nr_freq--; if (event->attr.exclusive || !cpuctx->active_oncpu) cpuctx->exclusive = 0; } @@ -1327,7 +1325,6 @@ void perf_event_disable(struct perf_event *event) } raw_spin_unlock_irq(&ctx->lock); } -EXPORT_SYMBOL_GPL(perf_event_disable); static void perf_set_shadow_time(struct perf_event *event, struct perf_event_context *ctx, @@ -1409,8 +1406,6 @@ event_sched_in(struct perf_event *event, if (!is_software_event(event)) cpuctx->active_oncpu++; ctx->nr_active++; - if (event->attr.freq && event->attr.sample_freq) - ctx->nr_freq++; if (event->attr.exclusive) cpuctx->exclusive = 1; @@ -1667,7 +1662,8 @@ perf_install_in_context(struct perf_event_context *ctx, * Note: this works for group members as well as group leaders * since the non-leader members' sibling_lists will be empty. */ -static void __perf_event_mark_enabled(struct perf_event *event) +static void __perf_event_mark_enabled(struct perf_event *event, + struct perf_event_context *ctx) { struct perf_event *sub; u64 tstamp = perf_event_time(event); @@ -1705,7 +1701,7 @@ static int __perf_event_enable(void *info) */ perf_cgroup_set_timestamp(current, ctx); - __perf_event_mark_enabled(event); + __perf_event_mark_enabled(event, ctx); if (!event_filter_match(event)) { if (is_cgroup_event(event)) @@ -1786,7 +1782,7 @@ void perf_event_enable(struct perf_event *event) retry: if (!ctx->is_active) { - __perf_event_mark_enabled(event); + __perf_event_mark_enabled(event, ctx); goto out; } @@ -1813,7 +1809,6 @@ void perf_event_enable(struct perf_event *event) out: raw_spin_unlock_irq(&ctx->lock); } -EXPORT_SYMBOL_GPL(perf_event_enable); int perf_event_refresh(struct perf_event *event, int refresh) { @@ -2332,9 +2327,6 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period) u64 interrupts, now; s64 delta; - if (!ctx->nr_freq) - return; - list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { if (event->state != PERF_EVENT_STATE_ACTIVE) continue; @@ -2390,14 +2382,12 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx) { u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC; struct perf_event_context *ctx = NULL; - int rotate = 0, remove = 1, freq = 0; + int rotate = 0, remove = 1; if (cpuctx->ctx.nr_events) { remove = 0; if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) rotate = 1; - if (cpuctx->ctx.nr_freq) - freq = 1; } ctx = cpuctx->task_ctx; @@ -2405,40 +2395,33 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx) remove = 0; if (ctx->nr_events != ctx->nr_active) rotate = 1; - if (ctx->nr_freq) - freq = 1; } - if (!rotate && !freq) - goto done; - perf_ctx_lock(cpuctx, cpuctx->task_ctx); perf_pmu_disable(cpuctx->ctx.pmu); + perf_ctx_adjust_freq(&cpuctx->ctx, interval); + if (ctx) + perf_ctx_adjust_freq(ctx, interval); - if (freq) { - perf_ctx_adjust_freq(&cpuctx->ctx, interval); - if (ctx) - perf_ctx_adjust_freq(ctx, interval); - } - - if (rotate) { - cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); - if (ctx) - ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE); + if (!rotate) + goto done; - rotate_ctx(&cpuctx->ctx); - if (ctx) - rotate_ctx(ctx); + cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); + if (ctx) + ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE); - perf_event_sched_in(cpuctx, ctx, current); - } + rotate_ctx(&cpuctx->ctx); + if (ctx) + rotate_ctx(ctx); - perf_pmu_enable(cpuctx->ctx.pmu); - perf_ctx_unlock(cpuctx, cpuctx->task_ctx); + perf_event_sched_in(cpuctx, ctx, current); done: if (remove) list_del_init(&cpuctx->rotation_list); + + perf_pmu_enable(cpuctx->ctx.pmu); + perf_ctx_unlock(cpuctx, cpuctx->task_ctx); } void perf_event_task_tick(void) @@ -2465,7 +2448,7 @@ static int event_enable_on_exec(struct perf_event *event, if (event->state >= PERF_EVENT_STATE_INACTIVE) return 0; - __perf_event_mark_enabled(event); + __perf_event_mark_enabled(event, ctx); return 1; } @@ -2497,7 +2480,13 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx) raw_spin_lock(&ctx->lock); task_ctx_sched_out(ctx); - list_for_each_entry(event, &ctx->event_list, event_entry) { + list_for_each_entry(event, &ctx->pinned_groups, group_entry) { + ret = event_enable_on_exec(event, ctx); + if (ret) + enabled = 1; + } + + list_for_each_entry(event, &ctx->flexible_groups, group_entry) { ret = event_enable_on_exec(event, ctx); if (ret) enabled = 1; @@ -2584,6 +2573,215 @@ static u64 perf_event_read(struct perf_event *event) return perf_event_count(event); } +/* + * Callchain support + */ + +struct callchain_cpus_entries { + struct rcu_head rcu_head; + struct perf_callchain_entry *cpu_entries[0]; +}; + +static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]); +static atomic_t nr_callchain_events; +static DEFINE_MUTEX(callchain_mutex); +struct callchain_cpus_entries *callchain_cpus_entries; + + +__weak void perf_callchain_kernel(struct perf_callchain_entry *entry, + struct pt_regs *regs) +{ +} + +__weak void perf_callchain_user(struct perf_callchain_entry *entry, + struct pt_regs *regs) +{ +} + +static void release_callchain_buffers_rcu(struct rcu_head *head) +{ + struct callchain_cpus_entries *entries; + int cpu; + + entries = container_of(head, struct callchain_cpus_entries, rcu_head); + + for_each_possible_cpu(cpu) + kfree(entries->cpu_entries[cpu]); + + kfree(entries); +} + +static void release_callchain_buffers(void) +{ + struct callchain_cpus_entries *entries; + + entries = callchain_cpus_entries; + rcu_assign_pointer(callchain_cpus_entries, NULL); + call_rcu(&entries->rcu_head, release_callchain_buffers_rcu); +} + +static int alloc_callchain_buffers(void) +{ + int cpu; + int size; + struct callchain_cpus_entries *entries; + + /* + * We can't use the percpu allocation API for data that can be + * accessed from NMI. Use a temporary manual per cpu allocation + * until that gets sorted out. + */ + size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]); + + entries = kzalloc(size, GFP_KERNEL); + if (!entries) + return -ENOMEM; + + size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS; + + for_each_possible_cpu(cpu) { + entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL, + cpu_to_node(cpu)); + if (!entries->cpu_entries[cpu]) + goto fail; + } + + rcu_assign_pointer(callchain_cpus_entries, entries); + + return 0; + +fail: + for_each_possible_cpu(cpu) + kfree(entries->cpu_entries[cpu]); + kfree(entries); + + return -ENOMEM; +} + +static int get_callchain_buffers(void) +{ + int err = 0; + int count; + + mutex_lock(&callchain_mutex); + + count = atomic_inc_return(&nr_callchain_events); + if (WARN_ON_ONCE(count < 1)) { + err = -EINVAL; + goto exit; + } + + if (count > 1) { + /* If the allocation failed, give up */ + if (!callchain_cpus_entries) + err = -ENOMEM; + goto exit; + } + + err = alloc_callchain_buffers(); + if (err) + release_callchain_buffers(); +exit: + mutex_unlock(&callchain_mutex); + + return err; +} + +static void put_callchain_buffers(void) +{ + if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) { + release_callchain_buffers(); + mutex_unlock(&callchain_mutex); + } +} + +static int get_recursion_context(int *recursion) +{ + int rctx; + + if (in_nmi()) + rctx = 3; + else if (in_irq()) + rctx = 2; + else if (in_softirq()) + rctx = 1; + else + rctx = 0; + + if (recursion[rctx]) + return -1; + + recursion[rctx]++; + barrier(); + + return rctx; +} + +static inline void put_recursion_context(int *recursion, int rctx) +{ + barrier(); + recursion[rctx]--; +} + +static struct perf_callchain_entry *get_callchain_entry(int *rctx) +{ + int cpu; + struct callchain_cpus_entries *entries; + + *rctx = get_recursion_context(__get_cpu_var(callchain_recursion)); + if (*rctx == -1) + return NULL; + + entries = rcu_dereference(callchain_cpus_entries); + if (!entries) + return NULL; + + cpu = smp_processor_id(); + + return &entries->cpu_entries[cpu][*rctx]; +} + +static void +put_callchain_entry(int rctx) +{ + put_recursion_context(__get_cpu_var(callchain_recursion), rctx); +} + +static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) +{ + int rctx; + struct perf_callchain_entry *entry; + + + entry = get_callchain_entry(&rctx); + if (rctx == -1) + return NULL; + + if (!entry) + goto exit_put; + + entry->nr = 0; + + if (!user_mode(regs)) { + perf_callchain_store(entry, PERF_CONTEXT_KERNEL); + perf_callchain_kernel(entry, regs); + if (current->mm) + regs = task_pt_regs(current); + else + regs = NULL; + } + + if (regs) { + perf_callchain_store(entry, PERF_CONTEXT_USER); + perf_callchain_user(entry, regs); + } + +exit_put: + put_callchain_entry(rctx); + + return entry; +} + /* * Initialize the perf_event context in a task_struct: */ @@ -2748,7 +2946,7 @@ static void free_event(struct perf_event *event) if (!event->parent) { if (event->attach_state & PERF_ATTACH_TASK) - jump_label_dec_deferred(&perf_sched_events); + jump_label_dec(&perf_sched_events); if (event->attr.mmap || event->attr.mmap_data) atomic_dec(&nr_mmap_events); if (event->attr.comm) @@ -2759,7 +2957,7 @@ static void free_event(struct perf_event *event) put_callchain_buffers(); if (is_cgroup_event(event)) { atomic_dec(&per_cpu(perf_cgroup_events, event->cpu)); - jump_label_dec_deferred(&perf_sched_events); + jump_label_dec(&perf_sched_events); } } @@ -4622,6 +4820,7 @@ static void perf_swevent_overflow(struct perf_event *event, u64 overflow, struct hw_perf_event *hwc = &event->hw; int throttle = 0; + data->period = event->hw.last_period; if (!overflow) overflow = perf_swevent_set_period(event); @@ -4655,12 +4854,6 @@ static void perf_swevent_event(struct perf_event *event, u64 nr, if (!is_sampling_event(event)) return; - if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) { - data->period = nr; - return perf_swevent_overflow(event, 1, data, regs); - } else - data->period = event->hw.last_period; - if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) return perf_swevent_overflow(event, 1, data, regs); @@ -5173,7 +5366,7 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) regs = get_irq_regs(); if (regs && !perf_exclude_event(event, regs)) { - if (!(event->attr.exclude_idle && is_idle_task(current))) + if (!(event->attr.exclude_idle && current->pid == 0)) if (perf_event_overflow(event, &data, regs)) ret = HRTIMER_NORESTART; } @@ -5788,7 +5981,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, if (!event->parent) { if (event->attach_state & PERF_ATTACH_TASK) - jump_label_inc(&perf_sched_events.key); + jump_label_inc(&perf_sched_events); if (event->attr.mmap || event->attr.mmap_data) atomic_inc(&nr_mmap_events); if (event->attr.comm) @@ -6026,7 +6219,7 @@ SYSCALL_DEFINE5(perf_event_open, * - that may need work on context switch */ atomic_inc(&per_cpu(perf_cgroup_events, event->cpu)); - jump_label_inc(&perf_sched_events.key); + jump_label_inc(&perf_sched_events); } /* @@ -6872,9 +7065,6 @@ void __init perf_event_init(void) ret = init_hw_breakpoint(); WARN(ret, "hw_breakpoint initialization failed with: %d", ret); - - /* do not patch jump label more than once per second */ - jump_label_rate_limit(&perf_sched_events, HZ); } static int __init perf_event_sysfs_init(void) diff --git a/trunk/kernel/events/internal.h b/trunk/kernel/events/internal.h index b0b107f90afc..64568a699375 100644 --- a/trunk/kernel/events/internal.h +++ b/trunk/kernel/events/internal.h @@ -1,10 +1,6 @@ #ifndef _KERNEL_EVENTS_INTERNAL_H #define _KERNEL_EVENTS_INTERNAL_H -#include - -/* Buffer handling */ - #define RING_BUFFER_WRITABLE 0x01 struct ring_buffer { @@ -71,7 +67,7 @@ static inline int page_order(struct ring_buffer *rb) } #endif -static inline unsigned long perf_data_size(struct ring_buffer *rb) +static unsigned long perf_data_size(struct ring_buffer *rb) { return rb->nr_pages << (PAGE_SHIFT + page_order(rb)); } @@ -100,37 +96,4 @@ __output_copy(struct perf_output_handle *handle, } while (len); } -/* Callchain handling */ -extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); -extern int get_callchain_buffers(void); -extern void put_callchain_buffers(void); - -static inline int get_recursion_context(int *recursion) -{ - int rctx; - - if (in_nmi()) - rctx = 3; - else if (in_irq()) - rctx = 2; - else if (in_softirq()) - rctx = 1; - else - rctx = 0; - - if (recursion[rctx]) - return -1; - - recursion[rctx]++; - barrier(); - - return rctx; -} - -static inline void put_recursion_context(int *recursion, int rctx) -{ - barrier(); - recursion[rctx]--; -} - #endif /* _KERNEL_EVENTS_INTERNAL_H */ diff --git a/trunk/kernel/exit.c b/trunk/kernel/exit.c index d579a459309d..d0b7d988f873 100644 --- a/trunk/kernel/exit.c +++ b/trunk/kernel/exit.c @@ -121,9 +121,9 @@ static void __exit_signal(struct task_struct *tsk) * We won't ever get here for the group leader, since it * will have been the last reference on the signal_struct. */ - sig->utime += tsk->utime; - sig->stime += tsk->stime; - sig->gtime += tsk->gtime; + sig->utime = cputime_add(sig->utime, tsk->utime); + sig->stime = cputime_add(sig->stime, tsk->stime); + sig->gtime = cputime_add(sig->gtime, tsk->gtime); sig->min_flt += tsk->min_flt; sig->maj_flt += tsk->maj_flt; sig->nvcsw += tsk->nvcsw; @@ -1255,9 +1255,19 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) spin_lock_irq(&p->real_parent->sighand->siglock); psig = p->real_parent->signal; sig = p->signal; - psig->cutime += tgutime + sig->cutime; - psig->cstime += tgstime + sig->cstime; - psig->cgtime += p->gtime + sig->gtime + sig->cgtime; + psig->cutime = + cputime_add(psig->cutime, + cputime_add(tgutime, + sig->cutime)); + psig->cstime = + cputime_add(psig->cstime, + cputime_add(tgstime, + sig->cstime)); + psig->cgtime = + cputime_add(psig->cgtime, + cputime_add(p->gtime, + cputime_add(sig->gtime, + sig->cgtime))); psig->cmin_flt += p->min_flt + sig->min_flt + sig->cmin_flt; psig->cmaj_flt += @@ -1530,15 +1540,8 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace, } /* dead body doesn't have much to contribute */ - if (unlikely(p->exit_state == EXIT_DEAD)) { - /* - * But do not ignore this task until the tracer does - * wait_task_zombie()->do_notify_parent(). - */ - if (likely(!ptrace) && unlikely(ptrace_reparented(p))) - wo->notask_error = 0; + if (p->exit_state == EXIT_DEAD) return 0; - } /* slay zombie? */ if (p->exit_state == EXIT_ZOMBIE) { diff --git a/trunk/kernel/fork.c b/trunk/kernel/fork.c index b058c5820ecd..da4a6a10d088 100644 --- a/trunk/kernel/fork.c +++ b/trunk/kernel/fork.c @@ -1023,8 +1023,8 @@ void mm_init_owner(struct mm_struct *mm, struct task_struct *p) */ static void posix_cpu_timers_init(struct task_struct *tsk) { - tsk->cputime_expires.prof_exp = 0; - tsk->cputime_expires.virt_exp = 0; + tsk->cputime_expires.prof_exp = cputime_zero; + tsk->cputime_expires.virt_exp = cputime_zero; tsk->cputime_expires.sched_exp = 0; INIT_LIST_HEAD(&tsk->cpu_timers[0]); INIT_LIST_HEAD(&tsk->cpu_timers[1]); @@ -1132,10 +1132,14 @@ static struct task_struct *copy_process(unsigned long clone_flags, init_sigpending(&p->pending); - p->utime = p->stime = p->gtime = 0; - p->utimescaled = p->stimescaled = 0; + p->utime = cputime_zero; + p->stime = cputime_zero; + p->gtime = cputime_zero; + p->utimescaled = cputime_zero; + p->stimescaled = cputime_zero; #ifndef CONFIG_VIRT_CPU_ACCOUNTING - p->prev_utime = p->prev_stime = 0; + p->prev_utime = cputime_zero; + p->prev_stime = cputime_zero; #endif #if defined(SPLIT_RSS_COUNTING) memset(&p->rss_stat, 0, sizeof(p->rss_stat)); diff --git a/trunk/kernel/futex.c b/trunk/kernel/futex.c index 1614be20173d..ea87f4d2f455 100644 --- a/trunk/kernel/futex.c +++ b/trunk/kernel/futex.c @@ -314,29 +314,17 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) #endif lock_page(page_head); - - /* - * If page_head->mapping is NULL, then it cannot be a PageAnon - * page; but it might be the ZERO_PAGE or in the gate area or - * in a special mapping (all cases which we are happy to fail); - * or it may have been a good file page when get_user_pages_fast - * found it, but truncated or holepunched or subjected to - * invalidate_complete_page2 before we got the page lock (also - * cases which we are happy to fail). And we hold a reference, - * so refcount care in invalidate_complete_page's remove_mapping - * prevents drop_caches from setting mapping to NULL beneath us. - * - * The case we do have to guard against is when memory pressure made - * shmem_writepage move it from filecache to swapcache beneath us: - * an unlikely race, but we do need to retry for page_head->mapping. - */ if (!page_head->mapping) { - int shmem_swizzled = PageSwapCache(page_head); unlock_page(page_head); put_page(page_head); - if (shmem_swizzled) - goto again; - return -EFAULT; + /* + * ZERO_PAGE pages don't have a mapping. Avoid a busy loop + * trying to find one. RW mapping would have COW'd (and thus + * have a mapping) so this page is RO and won't ever change. + */ + if ((page_head == ZERO_PAGE(address))) + return -EFAULT; + goto again; } /* diff --git a/trunk/kernel/hung_task.c b/trunk/kernel/hung_task.c index 2e48ec0c2e91..8b1748d0172c 100644 --- a/trunk/kernel/hung_task.c +++ b/trunk/kernel/hung_task.c @@ -74,17 +74,11 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout) /* * Ensure the task is not frozen. - * Also, skip vfork and any other user process that freezer should skip. + * Also, when a freshly created task is scheduled once, changes + * its state to TASK_UNINTERRUPTIBLE without having ever been + * switched out once, it musn't be checked. */ - if (unlikely(t->flags & (PF_FROZEN | PF_FREEZER_SKIP))) - return; - - /* - * When a freshly created task is scheduled once, changes its state to - * TASK_UNINTERRUPTIBLE without having ever been switched out once, it - * musn't be checked. - */ - if (unlikely(!switch_count)) + if (unlikely(t->flags & PF_FROZEN || !switch_count)) return; if (switch_count != t->last_switch_count) { diff --git a/trunk/kernel/itimer.c b/trunk/kernel/itimer.c index 22000c3db0dd..d802883153da 100644 --- a/trunk/kernel/itimer.c +++ b/trunk/kernel/itimer.c @@ -52,22 +52,22 @@ static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id, cval = it->expires; cinterval = it->incr; - if (cval) { + if (!cputime_eq(cval, cputime_zero)) { struct task_cputime cputime; cputime_t t; thread_group_cputimer(tsk, &cputime); if (clock_id == CPUCLOCK_PROF) - t = cputime.utime + cputime.stime; + t = cputime_add(cputime.utime, cputime.stime); else /* CPUCLOCK_VIRT */ t = cputime.utime; - if (cval < t) + if (cputime_le(cval, t)) /* about to fire */ cval = cputime_one_jiffy; else - cval = cval - t; + cval = cputime_sub(cval, t); } spin_unlock_irq(&tsk->sighand->siglock); @@ -161,9 +161,10 @@ static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id, cval = it->expires; cinterval = it->incr; - if (cval || nval) { - if (nval > 0) - nval += cputime_one_jiffy; + if (!cputime_eq(cval, cputime_zero) || + !cputime_eq(nval, cputime_zero)) { + if (cputime_gt(nval, cputime_zero)) + nval = cputime_add(nval, cputime_one_jiffy); set_process_cpu_timer(tsk, clock_id, &nval, &cval); } it->expires = nval; diff --git a/trunk/kernel/jump_label.c b/trunk/kernel/jump_label.c index 30c3c7708132..66ff7109f697 100644 --- a/trunk/kernel/jump_label.c +++ b/trunk/kernel/jump_label.c @@ -72,46 +72,15 @@ void jump_label_inc(struct jump_label_key *key) jump_label_unlock(); } -static void __jump_label_dec(struct jump_label_key *key, - unsigned long rate_limit, struct delayed_work *work) +void jump_label_dec(struct jump_label_key *key) { if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) return; - if (rate_limit) { - atomic_inc(&key->enabled); - schedule_delayed_work(work, rate_limit); - } else - jump_label_update(key, JUMP_LABEL_DISABLE); - + jump_label_update(key, JUMP_LABEL_DISABLE); jump_label_unlock(); } -static void jump_label_update_timeout(struct work_struct *work) -{ - struct jump_label_key_deferred *key = - container_of(work, struct jump_label_key_deferred, work.work); - __jump_label_dec(&key->key, 0, NULL); -} - -void jump_label_dec(struct jump_label_key *key) -{ - __jump_label_dec(key, 0, NULL); -} - -void jump_label_dec_deferred(struct jump_label_key_deferred *key) -{ - __jump_label_dec(&key->key, key->timeout, &key->work); -} - - -void jump_label_rate_limit(struct jump_label_key_deferred *key, - unsigned long rl) -{ - key->timeout = rl; - INIT_DELAYED_WORK(&key->work, jump_label_update_timeout); -} - static int addr_conflict(struct jump_entry *entry, void *start, void *end) { if (entry->code <= (unsigned long)end && @@ -142,7 +111,7 @@ static int __jump_label_text_reserved(struct jump_entry *iter_start, * running code can override this to make the non-live update case * cheaper. */ -void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry, +void __weak arch_jump_label_transform_static(struct jump_entry *entry, enum jump_label_type type) { arch_jump_label_transform(entry, type); @@ -248,13 +217,8 @@ void jump_label_apply_nops(struct module *mod) if (iter_start == iter_stop) return; - for (iter = iter_start; iter < iter_stop; iter++) { - struct jump_label_key *iterk; - - iterk = (struct jump_label_key *)(unsigned long)iter->key; - arch_jump_label_transform_static(iter, jump_label_enabled(iterk) ? - JUMP_LABEL_ENABLE : JUMP_LABEL_DISABLE); - } + for (iter = iter_start; iter < iter_stop; iter++) + arch_jump_label_transform_static(iter, JUMP_LABEL_DISABLE); } static int jump_label_add_module(struct module *mod) @@ -294,7 +258,8 @@ static int jump_label_add_module(struct module *mod) key->next = jlm; if (jump_label_enabled(key)) - __jump_label_update(key, iter, iter_stop, JUMP_LABEL_ENABLE); + __jump_label_update(key, iter, iter_stop, + JUMP_LABEL_ENABLE); } return 0; diff --git a/trunk/kernel/lockdep.c b/trunk/kernel/lockdep.c index 8889f7dd7c46..b2e08c932d91 100644 --- a/trunk/kernel/lockdep.c +++ b/trunk/kernel/lockdep.c @@ -431,7 +431,6 @@ unsigned int max_lockdep_depth; * about it later on, in lockdep_info(). */ static int lockdep_init_error; -static const char *lock_init_error; static unsigned long lockdep_init_trace_data[20]; static struct stack_trace lockdep_init_trace = { .max_entries = ARRAY_SIZE(lockdep_init_trace_data), @@ -500,32 +499,36 @@ void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS]) usage[i] = '\0'; } -static void __print_lock_name(struct lock_class *class) +static int __print_lock_name(struct lock_class *class) { char str[KSYM_NAME_LEN]; const char *name; name = class->name; - if (!name) { + if (!name) name = __get_key_name(class->key, str); - printk("%s", name); - } else { - printk("%s", name); - if (class->name_version > 1) - printk("#%d", class->name_version); - if (class->subclass) - printk("/%d", class->subclass); - } + + return printk("%s", name); } static void print_lock_name(struct lock_class *class) { - char usage[LOCK_USAGE_CHARS]; + char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS]; + const char *name; get_usage_chars(class, usage); - printk(" ("); - __print_lock_name(class); + name = class->name; + if (!name) { + name = __get_key_name(class->key, str); + printk(" (%s", name); + } else { + printk(" (%s", name); + if (class->name_version > 1) + printk("#%d", class->name_version); + if (class->subclass) + printk("/%d", class->subclass); + } printk("){%s}", usage); } @@ -565,12 +568,11 @@ static void lockdep_print_held_locks(struct task_struct *curr) } } -static void print_kernel_ident(void) +static void print_kernel_version(void) { - printk("%s %.*s %s\n", init_utsname()->release, + printk("%s %.*s\n", init_utsname()->release, (int)strcspn(init_utsname()->version, " "), - init_utsname()->version, - print_tainted()); + init_utsname()->version); } static int very_verbose(struct lock_class *class) @@ -654,7 +656,6 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) if (unlikely(!lockdep_initialized)) { lockdep_init(); lockdep_init_error = 1; - lock_init_error = lock->name; save_stack_trace(&lockdep_init_trace); } #endif @@ -722,7 +723,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) class = look_up_lock_class(lock, subclass); if (likely(class)) - goto out_set_class_cache; + return class; /* * Debug-check: all keys must be persistent! @@ -807,7 +808,6 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) graph_unlock(); raw_local_irq_restore(flags); -out_set_class_cache: if (!subclass || force) lock->class_cache[0] = class; else if (subclass < NR_LOCKDEP_CACHING_CLASSES) @@ -1149,7 +1149,7 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth, printk("\n"); printk("======================================================\n"); printk("[ INFO: possible circular locking dependency detected ]\n"); - print_kernel_ident(); + print_kernel_version(); printk("-------------------------------------------------------\n"); printk("%s/%d is trying to acquire lock:\n", curr->comm, task_pid_nr(curr)); @@ -1488,7 +1488,7 @@ print_bad_irq_dependency(struct task_struct *curr, printk("======================================================\n"); printk("[ INFO: %s-safe -> %s-unsafe lock order detected ]\n", irqclass, irqclass); - print_kernel_ident(); + print_kernel_version(); printk("------------------------------------------------------\n"); printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n", curr->comm, task_pid_nr(curr), @@ -1717,7 +1717,7 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, printk("\n"); printk("=============================================\n"); printk("[ INFO: possible recursive locking detected ]\n"); - print_kernel_ident(); + print_kernel_version(); printk("---------------------------------------------\n"); printk("%s/%d is trying to acquire lock:\n", curr->comm, task_pid_nr(curr)); @@ -2224,7 +2224,7 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this, printk("\n"); printk("=================================\n"); printk("[ INFO: inconsistent lock state ]\n"); - print_kernel_ident(); + print_kernel_version(); printk("---------------------------------\n"); printk("inconsistent {%s} -> {%s} usage.\n", @@ -2289,7 +2289,7 @@ print_irq_inversion_bug(struct task_struct *curr, printk("\n"); printk("=========================================================\n"); printk("[ INFO: possible irq lock inversion dependency detected ]\n"); - print_kernel_ident(); + print_kernel_version(); printk("---------------------------------------------------------\n"); printk("%s/%d just changed the state of lock:\n", curr->comm, task_pid_nr(curr)); @@ -3175,7 +3175,6 @@ print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock, printk("\n"); printk("=====================================\n"); printk("[ BUG: bad unlock balance detected! ]\n"); - print_kernel_ident(); printk("-------------------------------------\n"); printk("%s/%d is trying to release lock (", curr->comm, task_pid_nr(curr)); @@ -3620,7 +3619,6 @@ print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock, printk("\n"); printk("=================================\n"); printk("[ BUG: bad contention detected! ]\n"); - print_kernel_ident(); printk("---------------------------------\n"); printk("%s/%d is trying to contend lock (", curr->comm, task_pid_nr(curr)); @@ -3976,8 +3974,7 @@ void __init lockdep_info(void) #ifdef CONFIG_DEBUG_LOCKDEP if (lockdep_init_error) { - printk("WARNING: lockdep init error! lock-%s was acquired" - "before lockdep_init\n", lock_init_error); + printk("WARNING: lockdep init error! Arch code didn't call lockdep_init() early enough?\n"); printk("Call stack leading to lockdep invocation was:\n"); print_stack_trace(&lockdep_init_trace, 0); } @@ -3996,7 +3993,6 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from, printk("\n"); printk("=========================\n"); printk("[ BUG: held lock freed! ]\n"); - print_kernel_ident(); printk("-------------------------\n"); printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n", curr->comm, task_pid_nr(curr), mem_from, mem_to-1); @@ -4054,7 +4050,6 @@ static void print_held_locks_bug(struct task_struct *curr) printk("\n"); printk("=====================================\n"); printk("[ BUG: lock held at task exit time! ]\n"); - print_kernel_ident(); printk("-------------------------------------\n"); printk("%s/%d is exiting with locks still held!\n", curr->comm, task_pid_nr(curr)); @@ -4152,7 +4147,6 @@ void lockdep_sys_exit(void) printk("\n"); printk("================================================\n"); printk("[ BUG: lock held when returning to user space! ]\n"); - print_kernel_ident(); printk("------------------------------------------------\n"); printk("%s/%d is leaving the kernel with locks still held!\n", curr->comm, curr->pid); @@ -4172,33 +4166,10 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s) printk("\n"); printk("===============================\n"); printk("[ INFO: suspicious RCU usage. ]\n"); - print_kernel_ident(); printk("-------------------------------\n"); printk("%s:%d %s!\n", file, line, s); printk("\nother info that might help us debug this:\n\n"); printk("\nrcu_scheduler_active = %d, debug_locks = %d\n", rcu_scheduler_active, debug_locks); - - /* - * If a CPU is in the RCU-free window in idle (ie: in the section - * between rcu_idle_enter() and rcu_idle_exit(), then RCU - * considers that CPU to be in an "extended quiescent state", - * which means that RCU will be completely ignoring that CPU. - * Therefore, rcu_read_lock() and friends have absolutely no - * effect on a CPU running in that state. In other words, even if - * such an RCU-idle CPU has called rcu_read_lock(), RCU might well - * delete data structures out from under it. RCU really has no - * choice here: we need to keep an RCU-free window in idle where - * the CPU may possibly enter into low power mode. This way we can - * notice an extended quiescent state to other CPUs that started a grace - * period. Otherwise we would delay any grace period as long as we run - * in the idle task. - * - * So complain bitterly if someone does call rcu_read_lock(), - * rcu_read_lock_bh() and so on from extended quiescent states. - */ - if (rcu_is_cpu_idle()) - printk("RCU used illegally from extended quiescent state!\n"); - lockdep_print_held_locks(curr); printk("\nstack backtrace:\n"); dump_stack(); diff --git a/trunk/kernel/panic.c b/trunk/kernel/panic.c index 3458469eb7c3..b26593604214 100644 --- a/trunk/kernel/panic.c +++ b/trunk/kernel/panic.c @@ -237,20 +237,11 @@ void add_taint(unsigned flag) * Can't trust the integrity of the kernel anymore. * We don't call directly debug_locks_off() because the issue * is not necessarily serious enough to set oops_in_progress to 1 - * Also we want to keep up lockdep for staging/out-of-tree - * development and post-warning case. + * Also we want to keep up lockdep for staging development and + * post-warning case. */ - switch (flag) { - case TAINT_CRAP: - case TAINT_OOT_MODULE: - case TAINT_WARN: - case TAINT_FIRMWARE_WORKAROUND: - break; - - default: - if (__debug_locks_off()) - printk(KERN_WARNING "Disabling lock debugging due to kernel taint\n"); - } + if (flag != TAINT_CRAP && flag != TAINT_WARN && __debug_locks_off()) + printk(KERN_WARNING "Disabling lock debugging due to kernel taint\n"); set_bit(flag, &tainted_mask); } diff --git a/trunk/kernel/posix-cpu-timers.c b/trunk/kernel/posix-cpu-timers.c index 125cb67daa21..e7cb76dc18f5 100644 --- a/trunk/kernel/posix-cpu-timers.c +++ b/trunk/kernel/posix-cpu-timers.c @@ -78,7 +78,7 @@ static inline int cpu_time_before(const clockid_t which_clock, if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { return now.sched < then.sched; } else { - return now.cpu < then.cpu; + return cputime_lt(now.cpu, then.cpu); } } static inline void cpu_time_add(const clockid_t which_clock, @@ -88,7 +88,7 @@ static inline void cpu_time_add(const clockid_t which_clock, if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { acc->sched += val.sched; } else { - acc->cpu += val.cpu; + acc->cpu = cputime_add(acc->cpu, val.cpu); } } static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock, @@ -98,11 +98,24 @@ static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock, if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { a.sched -= b.sched; } else { - a.cpu -= b.cpu; + a.cpu = cputime_sub(a.cpu, b.cpu); } return a; } +/* + * Divide and limit the result to res >= 1 + * + * This is necessary to prevent signal delivery starvation, when the result of + * the division would be rounded down to 0. + */ +static inline cputime_t cputime_div_non_zero(cputime_t time, unsigned long div) +{ + cputime_t res = cputime_div(time, div); + + return max_t(cputime_t, res, 1); +} + /* * Update expiry time from increment, and increase overrun count, * given the current clock sample. @@ -135,26 +148,28 @@ static void bump_cpu_timer(struct k_itimer *timer, } else { cputime_t delta, incr; - if (now.cpu < timer->it.cpu.expires.cpu) + if (cputime_lt(now.cpu, timer->it.cpu.expires.cpu)) return; incr = timer->it.cpu.incr.cpu; - delta = now.cpu + incr - timer->it.cpu.expires.cpu; + delta = cputime_sub(cputime_add(now.cpu, incr), + timer->it.cpu.expires.cpu); /* Don't use (incr*2 < delta), incr*2 might overflow. */ - for (i = 0; incr < delta - incr; i++) - incr += incr; - for (; i >= 0; incr = incr >> 1, i--) { - if (delta < incr) + for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++) + incr = cputime_add(incr, incr); + for (; i >= 0; incr = cputime_halve(incr), i--) { + if (cputime_lt(delta, incr)) continue; - timer->it.cpu.expires.cpu += incr; + timer->it.cpu.expires.cpu = + cputime_add(timer->it.cpu.expires.cpu, incr); timer->it_overrun += 1 << i; - delta -= incr; + delta = cputime_sub(delta, incr); } } } static inline cputime_t prof_ticks(struct task_struct *p) { - return p->utime + p->stime; + return cputime_add(p->utime, p->stime); } static inline cputime_t virt_ticks(struct task_struct *p) { @@ -233,8 +248,8 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) t = tsk; do { - times->utime += t->utime; - times->stime += t->stime; + times->utime = cputime_add(times->utime, t->utime); + times->stime = cputime_add(times->stime, t->stime); times->sum_exec_runtime += task_sched_runtime(t); } while_each_thread(tsk, t); out: @@ -243,10 +258,10 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b) { - if (b->utime > a->utime) + if (cputime_gt(b->utime, a->utime)) a->utime = b->utime; - if (b->stime > a->stime) + if (cputime_gt(b->stime, a->stime)) a->stime = b->stime; if (b->sum_exec_runtime > a->sum_exec_runtime) @@ -291,7 +306,7 @@ static int cpu_clock_sample_group(const clockid_t which_clock, return -EINVAL; case CPUCLOCK_PROF: thread_group_cputime(p, &cputime); - cpu->cpu = cputime.utime + cputime.stime; + cpu->cpu = cputime_add(cputime.utime, cputime.stime); break; case CPUCLOCK_VIRT: thread_group_cputime(p, &cputime); @@ -455,24 +470,26 @@ static void cleanup_timers(struct list_head *head, unsigned long long sum_exec_runtime) { struct cpu_timer_list *timer, *next; - cputime_t ptime = utime + stime; + cputime_t ptime = cputime_add(utime, stime); list_for_each_entry_safe(timer, next, head, entry) { list_del_init(&timer->entry); - if (timer->expires.cpu < ptime) { - timer->expires.cpu = 0; + if (cputime_lt(timer->expires.cpu, ptime)) { + timer->expires.cpu = cputime_zero; } else { - timer->expires.cpu -= ptime; + timer->expires.cpu = cputime_sub(timer->expires.cpu, + ptime); } } ++head; list_for_each_entry_safe(timer, next, head, entry) { list_del_init(&timer->entry); - if (timer->expires.cpu < utime) { - timer->expires.cpu = 0; + if (cputime_lt(timer->expires.cpu, utime)) { + timer->expires.cpu = cputime_zero; } else { - timer->expires.cpu -= utime; + timer->expires.cpu = cputime_sub(timer->expires.cpu, + utime); } } @@ -503,7 +520,8 @@ void posix_cpu_timers_exit_group(struct task_struct *tsk) struct signal_struct *const sig = tsk->signal; cleanup_timers(tsk->signal->cpu_timers, - tsk->utime + sig->utime, tsk->stime + sig->stime, + cputime_add(tsk->utime, sig->utime), + cputime_add(tsk->stime, sig->stime), tsk->se.sum_exec_runtime + sig->sum_sched_runtime); } @@ -522,7 +540,8 @@ static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now) static inline int expires_gt(cputime_t expires, cputime_t new_exp) { - return expires == 0 || expires > new_exp; + return cputime_eq(expires, cputime_zero) || + cputime_gt(expires, new_exp); } /* @@ -632,7 +651,7 @@ static int cpu_timer_sample_group(const clockid_t which_clock, default: return -EINVAL; case CPUCLOCK_PROF: - cpu->cpu = cputime.utime + cputime.stime; + cpu->cpu = cputime_add(cputime.utime, cputime.stime); break; case CPUCLOCK_VIRT: cpu->cpu = cputime.utime; @@ -899,12 +918,12 @@ static void check_thread_timers(struct task_struct *tsk, unsigned long soft; maxfire = 20; - tsk->cputime_expires.prof_exp = 0; + tsk->cputime_expires.prof_exp = cputime_zero; while (!list_empty(timers)) { struct cpu_timer_list *t = list_first_entry(timers, struct cpu_timer_list, entry); - if (!--maxfire || prof_ticks(tsk) < t->expires.cpu) { + if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) { tsk->cputime_expires.prof_exp = t->expires.cpu; break; } @@ -914,12 +933,12 @@ static void check_thread_timers(struct task_struct *tsk, ++timers; maxfire = 20; - tsk->cputime_expires.virt_exp = 0; + tsk->cputime_expires.virt_exp = cputime_zero; while (!list_empty(timers)) { struct cpu_timer_list *t = list_first_entry(timers, struct cpu_timer_list, entry); - if (!--maxfire || virt_ticks(tsk) < t->expires.cpu) { + if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) { tsk->cputime_expires.virt_exp = t->expires.cpu; break; } @@ -990,19 +1009,20 @@ static u32 onecputick; static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, cputime_t *expires, cputime_t cur_time, int signo) { - if (!it->expires) + if (cputime_eq(it->expires, cputime_zero)) return; - if (cur_time >= it->expires) { - if (it->incr) { - it->expires += it->incr; + if (cputime_ge(cur_time, it->expires)) { + if (!cputime_eq(it->incr, cputime_zero)) { + it->expires = cputime_add(it->expires, it->incr); it->error += it->incr_error; if (it->error >= onecputick) { - it->expires -= cputime_one_jiffy; + it->expires = cputime_sub(it->expires, + cputime_one_jiffy); it->error -= onecputick; } } else { - it->expires = 0; + it->expires = cputime_zero; } trace_itimer_expire(signo == SIGPROF ? @@ -1011,7 +1031,9 @@ static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, __group_send_sig_info(signo, SEND_SIG_PRIV, tsk); } - if (it->expires && (!*expires || it->expires < *expires)) { + if (!cputime_eq(it->expires, cputime_zero) && + (cputime_eq(*expires, cputime_zero) || + cputime_lt(it->expires, *expires))) { *expires = it->expires; } } @@ -1026,7 +1048,9 @@ static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, */ static inline int task_cputime_zero(const struct task_cputime *cputime) { - if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime) + if (cputime_eq(cputime->utime, cputime_zero) && + cputime_eq(cputime->stime, cputime_zero) && + cputime->sum_exec_runtime == 0) return 1; return 0; } @@ -1052,15 +1076,15 @@ static void check_process_timers(struct task_struct *tsk, */ thread_group_cputimer(tsk, &cputime); utime = cputime.utime; - ptime = utime + cputime.stime; + ptime = cputime_add(utime, cputime.stime); sum_sched_runtime = cputime.sum_exec_runtime; maxfire = 20; - prof_expires = 0; + prof_expires = cputime_zero; while (!list_empty(timers)) { struct cpu_timer_list *tl = list_first_entry(timers, struct cpu_timer_list, entry); - if (!--maxfire || ptime < tl->expires.cpu) { + if (!--maxfire || cputime_lt(ptime, tl->expires.cpu)) { prof_expires = tl->expires.cpu; break; } @@ -1070,12 +1094,12 @@ static void check_process_timers(struct task_struct *tsk, ++timers; maxfire = 20; - virt_expires = 0; + virt_expires = cputime_zero; while (!list_empty(timers)) { struct cpu_timer_list *tl = list_first_entry(timers, struct cpu_timer_list, entry); - if (!--maxfire || utime < tl->expires.cpu) { + if (!--maxfire || cputime_lt(utime, tl->expires.cpu)) { virt_expires = tl->expires.cpu; break; } @@ -1130,7 +1154,8 @@ static void check_process_timers(struct task_struct *tsk, } } x = secs_to_cputime(soft); - if (!prof_expires || x < prof_expires) { + if (cputime_eq(prof_expires, cputime_zero) || + cputime_lt(x, prof_expires)) { prof_expires = x; } } @@ -1224,9 +1249,12 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) static inline int task_cputime_expired(const struct task_cputime *sample, const struct task_cputime *expires) { - if (expires->utime && sample->utime >= expires->utime) + if (!cputime_eq(expires->utime, cputime_zero) && + cputime_ge(sample->utime, expires->utime)) return 1; - if (expires->stime && sample->utime + sample->stime >= expires->stime) + if (!cputime_eq(expires->stime, cputime_zero) && + cputime_ge(cputime_add(sample->utime, sample->stime), + expires->stime)) return 1; if (expires->sum_exec_runtime != 0 && sample->sum_exec_runtime >= expires->sum_exec_runtime) @@ -1361,18 +1389,18 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, * it to be relative, *newval argument is relative and we update * it to be absolute. */ - if (*oldval) { - if (*oldval <= now.cpu) { + if (!cputime_eq(*oldval, cputime_zero)) { + if (cputime_le(*oldval, now.cpu)) { /* Just about to fire. */ *oldval = cputime_one_jiffy; } else { - *oldval -= now.cpu; + *oldval = cputime_sub(*oldval, now.cpu); } } - if (!*newval) + if (cputime_eq(*newval, cputime_zero)) return; - *newval += now.cpu; + *newval = cputime_add(*newval, now.cpu); } /* diff --git a/trunk/kernel/printk.c b/trunk/kernel/printk.c index 989e4a52da76..7982a0a841ea 100644 --- a/trunk/kernel/printk.c +++ b/trunk/kernel/printk.c @@ -199,7 +199,7 @@ void __init setup_log_buf(int early) unsigned long mem; mem = memblock_alloc(new_log_buf_len, PAGE_SIZE); - if (!mem) + if (mem == MEMBLOCK_ERROR) return; new_log_buf = __va(mem); } else { @@ -688,7 +688,6 @@ static void zap_locks(void) oops_timestamp = jiffies; - debug_locks_off(); /* If a crash is occurring, make sure we can't deadlock */ raw_spin_lock_init(&logbuf_lock); /* And make sure that we print immediately */ @@ -841,8 +840,9 @@ asmlinkage int vprintk(const char *fmt, va_list args) boot_delay_msec(); printk_delay(); + preempt_disable(); /* This stops the holder of console_sem just where we want him */ - local_irq_save(flags); + raw_local_irq_save(flags); this_cpu = smp_processor_id(); /* @@ -856,7 +856,7 @@ asmlinkage int vprintk(const char *fmt, va_list args) * recursion and return - but flag the recursion so that * it can be printed at the next appropriate moment: */ - if (!oops_in_progress && !lockdep_recursing(current)) { + if (!oops_in_progress) { recursion_bug = 1; goto out_restore_irqs; } @@ -962,8 +962,9 @@ asmlinkage int vprintk(const char *fmt, va_list args) lockdep_on(); out_restore_irqs: - local_irq_restore(flags); + raw_local_irq_restore(flags); + preempt_enable(); return printed_len; } EXPORT_SYMBOL(printk); diff --git a/trunk/kernel/ptrace.c b/trunk/kernel/ptrace.c index 78ab24a7b0e4..24d04477b257 100644 --- a/trunk/kernel/ptrace.c +++ b/trunk/kernel/ptrace.c @@ -96,20 +96,9 @@ void __ptrace_unlink(struct task_struct *child) */ if (!(child->flags & PF_EXITING) && (child->signal->flags & SIGNAL_STOP_STOPPED || - child->signal->group_stop_count)) { + child->signal->group_stop_count)) child->jobctl |= JOBCTL_STOP_PENDING; - /* - * This is only possible if this thread was cloned by the - * traced task running in the stopped group, set the signal - * for the future reports. - * FIXME: we should change ptrace_init_task() to handle this - * case. - */ - if (!(child->jobctl & JOBCTL_STOP_SIGMASK)) - child->jobctl |= SIGSTOP; - } - /* * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick * @child in the butt. Note that @resume should be used iff @child diff --git a/trunk/kernel/rcu.h b/trunk/kernel/rcu.h index aa88baab5f78..f600868d550d 100644 --- a/trunk/kernel/rcu.h +++ b/trunk/kernel/rcu.h @@ -29,13 +29,6 @@ #define RCU_TRACE(stmt) #endif /* #else #ifdef CONFIG_RCU_TRACE */ -/* - * Process-level increment to ->dynticks_nesting field. This allows for - * architectures that use half-interrupts and half-exceptions from - * process context. - */ -#define DYNTICK_TASK_NESTING (LLONG_MAX / 2 - 1) - /* * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally * by call_rcu() and rcu callback execution, and are therefore not part of the diff --git a/trunk/kernel/rcupdate.c b/trunk/kernel/rcupdate.c index 2bc4e135ff23..c5b98e565aee 100644 --- a/trunk/kernel/rcupdate.c +++ b/trunk/kernel/rcupdate.c @@ -93,8 +93,6 @@ int rcu_read_lock_bh_held(void) { if (!debug_lockdep_rcu_enabled()) return 1; - if (rcu_is_cpu_idle()) - return 0; return in_softirq() || irqs_disabled(); } EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); @@ -318,13 +316,3 @@ struct debug_obj_descr rcuhead_debug_descr = { }; EXPORT_SYMBOL_GPL(rcuhead_debug_descr); #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ - -#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE) -void do_trace_rcu_torture_read(char *rcutorturename, struct rcu_head *rhp) -{ - trace_rcu_torture_read(rcutorturename, rhp); -} -EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read); -#else -#define do_trace_rcu_torture_read(rcutorturename, rhp) do { } while (0) -#endif diff --git a/trunk/kernel/rcutiny.c b/trunk/kernel/rcutiny.c index 977296dca0a4..636af6d9c6e5 100644 --- a/trunk/kernel/rcutiny.c +++ b/trunk/kernel/rcutiny.c @@ -53,137 +53,31 @@ static void __call_rcu(struct rcu_head *head, #include "rcutiny_plugin.h" -static long long rcu_dynticks_nesting = DYNTICK_TASK_NESTING; +#ifdef CONFIG_NO_HZ -/* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */ -static void rcu_idle_enter_common(long long oldval) -{ - if (rcu_dynticks_nesting) { - RCU_TRACE(trace_rcu_dyntick("--=", - oldval, rcu_dynticks_nesting)); - return; - } - RCU_TRACE(trace_rcu_dyntick("Start", oldval, rcu_dynticks_nesting)); - if (!is_idle_task(current)) { - struct task_struct *idle = idle_task(smp_processor_id()); - - RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task", - oldval, rcu_dynticks_nesting)); - ftrace_dump(DUMP_ALL); - WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", - current->pid, current->comm, - idle->pid, idle->comm); /* must be idle task! */ - } - rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */ -} - -/* - * Enter idle, which is an extended quiescent state if we have fully - * entered that mode (i.e., if the new value of dynticks_nesting is zero). - */ -void rcu_idle_enter(void) -{ - unsigned long flags; - long long oldval; - - local_irq_save(flags); - oldval = rcu_dynticks_nesting; - rcu_dynticks_nesting = 0; - rcu_idle_enter_common(oldval); - local_irq_restore(flags); -} +static long rcu_dynticks_nesting = 1; /* - * Exit an interrupt handler towards idle. + * Enter dynticks-idle mode, which is an extended quiescent state + * if we have fully entered that mode (i.e., if the new value of + * dynticks_nesting is zero). */ -void rcu_irq_exit(void) +void rcu_enter_nohz(void) { - unsigned long flags; - long long oldval; - - local_irq_save(flags); - oldval = rcu_dynticks_nesting; - rcu_dynticks_nesting--; - WARN_ON_ONCE(rcu_dynticks_nesting < 0); - rcu_idle_enter_common(oldval); - local_irq_restore(flags); -} - -/* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcutree.c. */ -static void rcu_idle_exit_common(long long oldval) -{ - if (oldval) { - RCU_TRACE(trace_rcu_dyntick("++=", - oldval, rcu_dynticks_nesting)); - return; - } - RCU_TRACE(trace_rcu_dyntick("End", oldval, rcu_dynticks_nesting)); - if (!is_idle_task(current)) { - struct task_struct *idle = idle_task(smp_processor_id()); - - RCU_TRACE(trace_rcu_dyntick("Error on exit: not idle task", - oldval, rcu_dynticks_nesting)); - ftrace_dump(DUMP_ALL); - WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", - current->pid, current->comm, - idle->pid, idle->comm); /* must be idle task! */ - } + if (--rcu_dynticks_nesting == 0) + rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */ } /* - * Exit idle, so that we are no longer in an extended quiescent state. - */ -void rcu_idle_exit(void) -{ - unsigned long flags; - long long oldval; - - local_irq_save(flags); - oldval = rcu_dynticks_nesting; - WARN_ON_ONCE(oldval != 0); - rcu_dynticks_nesting = DYNTICK_TASK_NESTING; - rcu_idle_exit_common(oldval); - local_irq_restore(flags); -} - -/* - * Enter an interrupt handler, moving away from idle. + * Exit dynticks-idle mode, so that we are no longer in an extended + * quiescent state. */ -void rcu_irq_enter(void) +void rcu_exit_nohz(void) { - unsigned long flags; - long long oldval; - - local_irq_save(flags); - oldval = rcu_dynticks_nesting; rcu_dynticks_nesting++; - WARN_ON_ONCE(rcu_dynticks_nesting == 0); - rcu_idle_exit_common(oldval); - local_irq_restore(flags); -} - -#ifdef CONFIG_PROVE_RCU - -/* - * Test whether RCU thinks that the current CPU is idle. - */ -int rcu_is_cpu_idle(void) -{ - return !rcu_dynticks_nesting; } -EXPORT_SYMBOL(rcu_is_cpu_idle); - -#endif /* #ifdef CONFIG_PROVE_RCU */ -/* - * Test whether the current CPU was interrupted from idle. Nested - * interrupts don't count, we must be running at the first interrupt - * level. - */ -int rcu_is_cpu_rrupt_from_idle(void) -{ - return rcu_dynticks_nesting <= 0; -} +#endif /* #ifdef CONFIG_NO_HZ */ /* * Helper function for rcu_sched_qs() and rcu_bh_qs(). @@ -232,13 +126,14 @@ void rcu_bh_qs(int cpu) /* * Check to see if the scheduling-clock interrupt came from an extended - * quiescent state, and, if so, tell RCU about it. This function must - * be called from hardirq context. It is normally called from the - * scheduling-clock interrupt. + * quiescent state, and, if so, tell RCU about it. */ void rcu_check_callbacks(int cpu, int user) { - if (user || rcu_is_cpu_rrupt_from_idle()) + if (user || + (idle_cpu(cpu) && + !in_softirq() && + hardirq_count() <= (1 << HARDIRQ_SHIFT))) rcu_sched_qs(cpu); else if (!in_softirq()) rcu_bh_qs(cpu); @@ -259,11 +154,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) /* If no RCU callbacks ready to invoke, just return. */ if (&rcp->rcucblist == rcp->donetail) { RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, -1)); - RCU_TRACE(trace_rcu_batch_end(rcp->name, 0, - ACCESS_ONCE(rcp->rcucblist), - need_resched(), - is_idle_task(current), - rcu_is_callbacks_kthread())); + RCU_TRACE(trace_rcu_batch_end(rcp->name, 0)); return; } @@ -292,9 +183,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) RCU_TRACE(cb_count++); } RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count)); - RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count, 0, need_resched(), - is_idle_task(current), - rcu_is_callbacks_kthread())); + RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count)); } static void rcu_process_callbacks(struct softirq_action *unused) diff --git a/trunk/kernel/rcutiny_plugin.h b/trunk/kernel/rcutiny_plugin.h index 9cb1ae4aabdd..2b0484a5dc28 100644 --- a/trunk/kernel/rcutiny_plugin.h +++ b/trunk/kernel/rcutiny_plugin.h @@ -312,8 +312,8 @@ static int rcu_boost(void) rt_mutex_lock(&mtx); rt_mutex_unlock(&mtx); /* Keep lockdep happy. */ - return ACCESS_ONCE(rcu_preempt_ctrlblk.boost_tasks) != NULL || - ACCESS_ONCE(rcu_preempt_ctrlblk.exp_tasks) != NULL; + return rcu_preempt_ctrlblk.boost_tasks != NULL || + rcu_preempt_ctrlblk.exp_tasks != NULL; } /* @@ -885,19 +885,6 @@ static void invoke_rcu_callbacks(void) wake_up(&rcu_kthread_wq); } -#ifdef CONFIG_RCU_TRACE - -/* - * Is the current CPU running the RCU-callbacks kthread? - * Caller must have preemption disabled. - */ -static bool rcu_is_callbacks_kthread(void) -{ - return rcu_kthread_task == current; -} - -#endif /* #ifdef CONFIG_RCU_TRACE */ - /* * This kthread invokes RCU callbacks whose grace periods have * elapsed. It is awakened as needed, and takes the place of the @@ -951,18 +938,6 @@ void invoke_rcu_callbacks(void) raise_softirq(RCU_SOFTIRQ); } -#ifdef CONFIG_RCU_TRACE - -/* - * There is no callback kthread, so this thread is never it. - */ -static bool rcu_is_callbacks_kthread(void) -{ - return false; -} - -#endif /* #ifdef CONFIG_RCU_TRACE */ - void rcu_init(void) { open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); diff --git a/trunk/kernel/rcutorture.c b/trunk/kernel/rcutorture.c index 88f17b8a3b1d..764825c2685c 100644 --- a/trunk/kernel/rcutorture.c +++ b/trunk/kernel/rcutorture.c @@ -61,11 +61,9 @@ static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */ static int shuffle_interval = 3; /* Interval between shuffles (in sec)*/ static int stutter = 5; /* Start/stop testing interval (in sec) */ static int irqreader = 1; /* RCU readers from irq (timers). */ -static int fqs_duration; /* Duration of bursts (us), 0 to disable. */ -static int fqs_holdoff; /* Hold time within burst (us). */ +static int fqs_duration = 0; /* Duration of bursts (us), 0 to disable. */ +static int fqs_holdoff = 0; /* Hold time within burst (us). */ static int fqs_stutter = 3; /* Wait time between bursts (s). */ -static int onoff_interval; /* Wait time between CPU hotplugs, 0=disable. */ -static int shutdown_secs; /* Shutdown time (s). <=0 for no shutdown. */ static int test_boost = 1; /* Test RCU prio boost: 0=no, 1=maybe, 2=yes. */ static int test_boost_interval = 7; /* Interval between boost tests, seconds. */ static int test_boost_duration = 4; /* Duration of each boost test, seconds. */ @@ -93,10 +91,6 @@ module_param(fqs_holdoff, int, 0444); MODULE_PARM_DESC(fqs_holdoff, "Holdoff time within fqs bursts (us)"); module_param(fqs_stutter, int, 0444); MODULE_PARM_DESC(fqs_stutter, "Wait time between fqs bursts (s)"); -module_param(onoff_interval, int, 0444); -MODULE_PARM_DESC(onoff_interval, "Time between CPU hotplugs (s), 0=disable"); -module_param(shutdown_secs, int, 0444); -MODULE_PARM_DESC(shutdown_secs, "Shutdown time (s), zero to disable."); module_param(test_boost, int, 0444); MODULE_PARM_DESC(test_boost, "Test RCU prio boost: 0=no, 1=maybe, 2=yes."); module_param(test_boost_interval, int, 0444); @@ -125,10 +119,6 @@ static struct task_struct *shuffler_task; static struct task_struct *stutter_task; static struct task_struct *fqs_task; static struct task_struct *boost_tasks[NR_CPUS]; -static struct task_struct *shutdown_task; -#ifdef CONFIG_HOTPLUG_CPU -static struct task_struct *onoff_task; -#endif /* #ifdef CONFIG_HOTPLUG_CPU */ #define RCU_TORTURE_PIPE_LEN 10 @@ -159,10 +149,6 @@ static long n_rcu_torture_boost_rterror; static long n_rcu_torture_boost_failure; static long n_rcu_torture_boosts; static long n_rcu_torture_timers; -static long n_offline_attempts; -static long n_offline_successes; -static long n_online_attempts; -static long n_online_successes; static struct list_head rcu_torture_removed; static cpumask_var_t shuffle_tmp_mask; @@ -174,8 +160,6 @@ static int stutter_pause_test; #define RCUTORTURE_RUNNABLE_INIT 0 #endif int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT; -module_param(rcutorture_runnable, int, 0444); -MODULE_PARM_DESC(rcutorture_runnable, "Start rcutorture at boot"); #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) #define rcu_can_boost() 1 @@ -183,7 +167,6 @@ MODULE_PARM_DESC(rcutorture_runnable, "Start rcutorture at boot"); #define rcu_can_boost() 0 #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */ -static unsigned long shutdown_time; /* jiffies to system shutdown. */ static unsigned long boost_starttime; /* jiffies of next boost test start. */ DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ /* and boost task create/destroy. */ @@ -199,9 +182,6 @@ static int fullstop = FULLSTOP_RMMOD; */ static DEFINE_MUTEX(fullstop_mutex); -/* Forward reference. */ -static void rcu_torture_cleanup(void); - /* * Detect and respond to a system shutdown. */ @@ -632,30 +612,6 @@ static struct rcu_torture_ops srcu_ops = { .name = "srcu" }; -static int srcu_torture_read_lock_raw(void) __acquires(&srcu_ctl) -{ - return srcu_read_lock_raw(&srcu_ctl); -} - -static void srcu_torture_read_unlock_raw(int idx) __releases(&srcu_ctl) -{ - srcu_read_unlock_raw(&srcu_ctl, idx); -} - -static struct rcu_torture_ops srcu_raw_ops = { - .init = srcu_torture_init, - .cleanup = srcu_torture_cleanup, - .readlock = srcu_torture_read_lock_raw, - .read_delay = srcu_read_delay, - .readunlock = srcu_torture_read_unlock_raw, - .completed = srcu_torture_completed, - .deferred_free = rcu_sync_torture_deferred_free, - .sync = srcu_torture_synchronize, - .cb_barrier = NULL, - .stats = srcu_torture_stats, - .name = "srcu_raw" -}; - static void srcu_torture_synchronize_expedited(void) { synchronize_srcu_expedited(&srcu_ctl); @@ -957,18 +913,6 @@ rcu_torture_fakewriter(void *arg) return 0; } -void rcutorture_trace_dump(void) -{ - static atomic_t beenhere = ATOMIC_INIT(0); - - if (atomic_read(&beenhere)) - return; - if (atomic_xchg(&beenhere, 1) != 0) - return; - do_trace_rcu_torture_read(cur_ops->name, (struct rcu_head *)~0UL); - ftrace_dump(DUMP_ALL); -} - /* * RCU torture reader from timer handler. Dereferences rcu_torture_current, * incrementing the corresponding element of the pipeline array. The @@ -990,7 +934,6 @@ static void rcu_torture_timer(unsigned long unused) rcu_read_lock_bh_held() || rcu_read_lock_sched_held() || srcu_read_lock_held(&srcu_ctl)); - do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu); if (p == NULL) { /* Leave because rcu_torture_writer is not yet underway */ cur_ops->readunlock(idx); @@ -1008,8 +951,6 @@ static void rcu_torture_timer(unsigned long unused) /* Should not happen, but... */ pipe_count = RCU_TORTURE_PIPE_LEN; } - if (pipe_count > 1) - rcutorture_trace_dump(); __this_cpu_inc(rcu_torture_count[pipe_count]); completed = cur_ops->completed() - completed; if (completed > RCU_TORTURE_PIPE_LEN) { @@ -1053,7 +994,6 @@ rcu_torture_reader(void *arg) rcu_read_lock_bh_held() || rcu_read_lock_sched_held() || srcu_read_lock_held(&srcu_ctl)); - do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu); if (p == NULL) { /* Wait for rcu_torture_writer to get underway */ cur_ops->readunlock(idx); @@ -1069,8 +1009,6 @@ rcu_torture_reader(void *arg) /* Should not happen, but... */ pipe_count = RCU_TORTURE_PIPE_LEN; } - if (pipe_count > 1) - rcutorture_trace_dump(); __this_cpu_inc(rcu_torture_count[pipe_count]); completed = cur_ops->completed() - completed; if (completed > RCU_TORTURE_PIPE_LEN) { @@ -1118,8 +1056,7 @@ rcu_torture_printk(char *page) cnt += sprintf(&page[cnt], "rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d " "rtmbe: %d rtbke: %ld rtbre: %ld " - "rtbf: %ld rtb: %ld nt: %ld " - "onoff: %ld/%ld:%ld/%ld", + "rtbf: %ld rtb: %ld nt: %ld", rcu_torture_current, rcu_torture_current_version, list_empty(&rcu_torture_freelist), @@ -1131,11 +1068,7 @@ rcu_torture_printk(char *page) n_rcu_torture_boost_rterror, n_rcu_torture_boost_failure, n_rcu_torture_boosts, - n_rcu_torture_timers, - n_online_successes, - n_online_attempts, - n_offline_successes, - n_offline_attempts); + n_rcu_torture_timers); if (atomic_read(&n_rcu_torture_mberror) != 0 || n_rcu_torture_boost_ktrerror != 0 || n_rcu_torture_boost_rterror != 0 || @@ -1299,14 +1232,12 @@ rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, char *tag) "shuffle_interval=%d stutter=%d irqreader=%d " "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d " "test_boost=%d/%d test_boost_interval=%d " - "test_boost_duration=%d shutdown_secs=%d " - "onoff_interval=%d\n", + "test_boost_duration=%d\n", torture_type, tag, nrealreaders, nfakewriters, stat_interval, verbose, test_no_idle_hz, shuffle_interval, stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, test_boost, cur_ops->can_boost, - test_boost_interval, test_boost_duration, shutdown_secs, - onoff_interval); + test_boost_interval, test_boost_duration); } static struct notifier_block rcutorture_shutdown_nb = { @@ -1356,131 +1287,6 @@ static int rcutorture_booster_init(int cpu) return 0; } -/* - * Cause the rcutorture test to shutdown the system after the test has - * run for the time specified by the shutdown_secs module parameter. - */ -static int -rcu_torture_shutdown(void *arg) -{ - long delta; - unsigned long jiffies_snap; - - VERBOSE_PRINTK_STRING("rcu_torture_shutdown task started"); - jiffies_snap = ACCESS_ONCE(jiffies); - while (ULONG_CMP_LT(jiffies_snap, shutdown_time) && - !kthread_should_stop()) { - delta = shutdown_time - jiffies_snap; - if (verbose) - printk(KERN_ALERT "%s" TORTURE_FLAG - "rcu_torture_shutdown task: %lu " - "jiffies remaining\n", - torture_type, delta); - schedule_timeout_interruptible(delta); - jiffies_snap = ACCESS_ONCE(jiffies); - } - if (kthread_should_stop()) { - VERBOSE_PRINTK_STRING("rcu_torture_shutdown task stopping"); - return 0; - } - - /* OK, shut down the system. */ - - VERBOSE_PRINTK_STRING("rcu_torture_shutdown task shutting down system"); - shutdown_task = NULL; /* Avoid self-kill deadlock. */ - rcu_torture_cleanup(); /* Get the success/failure message. */ - kernel_power_off(); /* Shut down the system. */ - return 0; -} - -#ifdef CONFIG_HOTPLUG_CPU - -/* - * Execute random CPU-hotplug operations at the interval specified - * by the onoff_interval. - */ -static int -rcu_torture_onoff(void *arg) -{ - int cpu; - int maxcpu = -1; - DEFINE_RCU_RANDOM(rand); - - VERBOSE_PRINTK_STRING("rcu_torture_onoff task started"); - for_each_online_cpu(cpu) - maxcpu = cpu; - WARN_ON(maxcpu < 0); - while (!kthread_should_stop()) { - cpu = (rcu_random(&rand) >> 4) % (maxcpu + 1); - if (cpu_online(cpu) && cpu_is_hotpluggable(cpu)) { - if (verbose) - printk(KERN_ALERT "%s" TORTURE_FLAG - "rcu_torture_onoff task: offlining %d\n", - torture_type, cpu); - n_offline_attempts++; - if (cpu_down(cpu) == 0) { - if (verbose) - printk(KERN_ALERT "%s" TORTURE_FLAG - "rcu_torture_onoff task: " - "offlined %d\n", - torture_type, cpu); - n_offline_successes++; - } - } else if (cpu_is_hotpluggable(cpu)) { - if (verbose) - printk(KERN_ALERT "%s" TORTURE_FLAG - "rcu_torture_onoff task: onlining %d\n", - torture_type, cpu); - n_online_attempts++; - if (cpu_up(cpu) == 0) { - if (verbose) - printk(KERN_ALERT "%s" TORTURE_FLAG - "rcu_torture_onoff task: " - "onlined %d\n", - torture_type, cpu); - n_online_successes++; - } - } - schedule_timeout_interruptible(onoff_interval * HZ); - } - VERBOSE_PRINTK_STRING("rcu_torture_onoff task stopping"); - return 0; -} - -static int -rcu_torture_onoff_init(void) -{ - if (onoff_interval <= 0) - return 0; - onoff_task = kthread_run(rcu_torture_onoff, NULL, "rcu_torture_onoff"); - if (IS_ERR(onoff_task)) { - onoff_task = NULL; - return PTR_ERR(onoff_task); - } - return 0; -} - -static void rcu_torture_onoff_cleanup(void) -{ - if (onoff_task == NULL) - return; - VERBOSE_PRINTK_STRING("Stopping rcu_torture_onoff task"); - kthread_stop(onoff_task); -} - -#else /* #ifdef CONFIG_HOTPLUG_CPU */ - -static void -rcu_torture_onoff_init(void) -{ -} - -static void rcu_torture_onoff_cleanup(void) -{ -} - -#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */ - static int rcutorture_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { @@ -1585,11 +1391,6 @@ rcu_torture_cleanup(void) for_each_possible_cpu(i) rcutorture_booster_cleanup(i); } - if (shutdown_task != NULL) { - VERBOSE_PRINTK_STRING("Stopping rcu_torture_shutdown task"); - kthread_stop(shutdown_task); - } - rcu_torture_onoff_cleanup(); /* Wait for all RCU callbacks to fire. */ @@ -1615,7 +1416,7 @@ rcu_torture_init(void) static struct rcu_torture_ops *torture_ops[] = { &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops, &rcu_bh_ops, &rcu_bh_sync_ops, &rcu_bh_expedited_ops, - &srcu_ops, &srcu_raw_ops, &srcu_expedited_ops, + &srcu_ops, &srcu_expedited_ops, &sched_ops, &sched_sync_ops, &sched_expedited_ops, }; mutex_lock(&fullstop_mutex); @@ -1806,18 +1607,6 @@ rcu_torture_init(void) } } } - if (shutdown_secs > 0) { - shutdown_time = jiffies + shutdown_secs * HZ; - shutdown_task = kthread_run(rcu_torture_shutdown, NULL, - "rcu_torture_shutdown"); - if (IS_ERR(shutdown_task)) { - firsterr = PTR_ERR(shutdown_task); - VERBOSE_PRINTK_ERRSTRING("Failed to create shutdown"); - shutdown_task = NULL; - goto unwind; - } - } - rcu_torture_onoff_init(); register_reboot_notifier(&rcutorture_shutdown_nb); rcutorture_record_test_transition(); mutex_unlock(&fullstop_mutex); diff --git a/trunk/kernel/rcutree.c b/trunk/kernel/rcutree.c index 6c4a6722abfd..6b76d812740c 100644 --- a/trunk/kernel/rcutree.c +++ b/trunk/kernel/rcutree.c @@ -69,7 +69,7 @@ static struct lock_class_key rcu_node_class[NUM_RCU_LVLS]; NUM_RCU_LVL_3, \ NUM_RCU_LVL_4, /* == MAX_RCU_LVLS */ \ }, \ - .fqs_state = RCU_GP_IDLE, \ + .signaled = RCU_GP_IDLE, \ .gpnum = -300, \ .completed = -300, \ .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&structname##_state.onofflock), \ @@ -195,10 +195,12 @@ void rcu_note_context_switch(int cpu) } EXPORT_SYMBOL_GPL(rcu_note_context_switch); +#ifdef CONFIG_NO_HZ DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { - .dynticks_nesting = DYNTICK_TASK_NESTING, + .dynticks_nesting = 1, .dynticks = ATOMIC_INIT(1), }; +#endif /* #ifdef CONFIG_NO_HZ */ static int blimit = 10; /* Maximum callbacks per rcu_do_batch. */ static int qhimark = 10000; /* If this many pending, ignore blimit. */ @@ -326,11 +328,11 @@ static int rcu_implicit_offline_qs(struct rcu_data *rdp) return 1; } - /* - * The CPU is online, so send it a reschedule IPI. This forces - * it through the scheduler, and (inefficiently) also handles cases - * where idle loops fail to inform RCU about the CPU being idle. - */ + /* If preemptible RCU, no point in sending reschedule IPI. */ + if (rdp->preemptible) + return 0; + + /* The CPU is online, so send it a reschedule IPI. */ if (rdp->cpu != smp_processor_id()) smp_send_reschedule(rdp->cpu); else @@ -341,181 +343,59 @@ static int rcu_implicit_offline_qs(struct rcu_data *rdp) #endif /* #ifdef CONFIG_SMP */ -/* - * rcu_idle_enter_common - inform RCU that current CPU is moving towards idle - * - * If the new value of the ->dynticks_nesting counter now is zero, - * we really have entered idle, and must do the appropriate accounting. - * The caller must have disabled interrupts. - */ -static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval) -{ - trace_rcu_dyntick("Start", oldval, 0); - if (!is_idle_task(current)) { - struct task_struct *idle = idle_task(smp_processor_id()); - - trace_rcu_dyntick("Error on entry: not idle task", oldval, 0); - ftrace_dump(DUMP_ALL); - WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", - current->pid, current->comm, - idle->pid, idle->comm); /* must be idle task! */ - } - rcu_prepare_for_idle(smp_processor_id()); - /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ - smp_mb__before_atomic_inc(); /* See above. */ - atomic_inc(&rdtp->dynticks); - smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */ - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); -} +#ifdef CONFIG_NO_HZ /** - * rcu_idle_enter - inform RCU that current CPU is entering idle + * rcu_enter_nohz - inform RCU that current CPU is entering nohz * - * Enter idle mode, in other words, -leave- the mode in which RCU + * Enter nohz mode, in other words, -leave- the mode in which RCU * read-side critical sections can occur. (Though RCU read-side - * critical sections can occur in irq handlers in idle, a possibility - * handled by irq_enter() and irq_exit().) - * - * We crowbar the ->dynticks_nesting field to zero to allow for - * the possibility of usermode upcalls having messed up our count - * of interrupt nesting level during the prior busy period. + * critical sections can occur in irq handlers in nohz mode, a possibility + * handled by rcu_irq_enter() and rcu_irq_exit()). */ -void rcu_idle_enter(void) +void rcu_enter_nohz(void) { unsigned long flags; - long long oldval; struct rcu_dynticks *rdtp; local_irq_save(flags); rdtp = &__get_cpu_var(rcu_dynticks); - oldval = rdtp->dynticks_nesting; - rdtp->dynticks_nesting = 0; - rcu_idle_enter_common(rdtp, oldval); + if (--rdtp->dynticks_nesting) { + local_irq_restore(flags); + return; + } + trace_rcu_dyntick("Start"); + /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ + smp_mb__before_atomic_inc(); /* See above. */ + atomic_inc(&rdtp->dynticks); + smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */ + WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); local_irq_restore(flags); } -/** - * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle - * - * Exit from an interrupt handler, which might possibly result in entering - * idle mode, in other words, leaving the mode in which read-side critical - * sections can occur. - * - * This code assumes that the idle loop never does anything that might - * result in unbalanced calls to irq_enter() and irq_exit(). If your - * architecture violates this assumption, RCU will give you what you - * deserve, good and hard. But very infrequently and irreproducibly. - * - * Use things like work queues to work around this limitation. +/* + * rcu_exit_nohz - inform RCU that current CPU is leaving nohz * - * You have been warned. + * Exit nohz mode, in other words, -enter- the mode in which RCU + * read-side critical sections normally occur. */ -void rcu_irq_exit(void) +void rcu_exit_nohz(void) { unsigned long flags; - long long oldval; struct rcu_dynticks *rdtp; local_irq_save(flags); rdtp = &__get_cpu_var(rcu_dynticks); - oldval = rdtp->dynticks_nesting; - rdtp->dynticks_nesting--; - WARN_ON_ONCE(rdtp->dynticks_nesting < 0); - if (rdtp->dynticks_nesting) - trace_rcu_dyntick("--=", oldval, rdtp->dynticks_nesting); - else - rcu_idle_enter_common(rdtp, oldval); - local_irq_restore(flags); -} - -/* - * rcu_idle_exit_common - inform RCU that current CPU is moving away from idle - * - * If the new value of the ->dynticks_nesting counter was previously zero, - * we really have exited idle, and must do the appropriate accounting. - * The caller must have disabled interrupts. - */ -static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval) -{ + if (rdtp->dynticks_nesting++) { + local_irq_restore(flags); + return; + } smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */ atomic_inc(&rdtp->dynticks); /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ smp_mb__after_atomic_inc(); /* See above. */ WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); - rcu_cleanup_after_idle(smp_processor_id()); - trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting); - if (!is_idle_task(current)) { - struct task_struct *idle = idle_task(smp_processor_id()); - - trace_rcu_dyntick("Error on exit: not idle task", - oldval, rdtp->dynticks_nesting); - ftrace_dump(DUMP_ALL); - WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", - current->pid, current->comm, - idle->pid, idle->comm); /* must be idle task! */ - } -} - -/** - * rcu_idle_exit - inform RCU that current CPU is leaving idle - * - * Exit idle mode, in other words, -enter- the mode in which RCU - * read-side critical sections can occur. - * - * We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NESTING to - * allow for the possibility of usermode upcalls messing up our count - * of interrupt nesting level during the busy period that is just - * now starting. - */ -void rcu_idle_exit(void) -{ - unsigned long flags; - struct rcu_dynticks *rdtp; - long long oldval; - - local_irq_save(flags); - rdtp = &__get_cpu_var(rcu_dynticks); - oldval = rdtp->dynticks_nesting; - WARN_ON_ONCE(oldval != 0); - rdtp->dynticks_nesting = DYNTICK_TASK_NESTING; - rcu_idle_exit_common(rdtp, oldval); - local_irq_restore(flags); -} - -/** - * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle - * - * Enter an interrupt handler, which might possibly result in exiting - * idle mode, in other words, entering the mode in which read-side critical - * sections can occur. - * - * Note that the Linux kernel is fully capable of entering an interrupt - * handler that it never exits, for example when doing upcalls to - * user mode! This code assumes that the idle loop never does upcalls to - * user mode. If your architecture does do upcalls from the idle loop (or - * does anything else that results in unbalanced calls to the irq_enter() - * and irq_exit() functions), RCU will give you what you deserve, good - * and hard. But very infrequently and irreproducibly. - * - * Use things like work queues to work around this limitation. - * - * You have been warned. - */ -void rcu_irq_enter(void) -{ - unsigned long flags; - struct rcu_dynticks *rdtp; - long long oldval; - - local_irq_save(flags); - rdtp = &__get_cpu_var(rcu_dynticks); - oldval = rdtp->dynticks_nesting; - rdtp->dynticks_nesting++; - WARN_ON_ONCE(rdtp->dynticks_nesting == 0); - if (oldval) - trace_rcu_dyntick("++=", oldval, rdtp->dynticks_nesting); - else - rcu_idle_exit_common(rdtp, oldval); + trace_rcu_dyntick("End"); local_irq_restore(flags); } @@ -562,37 +442,27 @@ void rcu_nmi_exit(void) WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); } -#ifdef CONFIG_PROVE_RCU - /** - * rcu_is_cpu_idle - see if RCU thinks that the current CPU is idle + * rcu_irq_enter - inform RCU of entry to hard irq context * - * If the current CPU is in its idle loop and is neither in an interrupt - * or NMI handler, return true. + * If the CPU was idle with dynamic ticks active, this updates the + * rdtp->dynticks to let the RCU handling know that the CPU is active. */ -int rcu_is_cpu_idle(void) +void rcu_irq_enter(void) { - int ret; - - preempt_disable(); - ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0; - preempt_enable(); - return ret; + rcu_exit_nohz(); } -EXPORT_SYMBOL(rcu_is_cpu_idle); - -#endif /* #ifdef CONFIG_PROVE_RCU */ /** - * rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle + * rcu_irq_exit - inform RCU of exit from hard irq context * - * If the current CPU is idle or running at a first-level (not nested) - * interrupt from idle, return true. The caller must have at least - * disabled preemption. + * If the CPU was idle with dynamic ticks active, update the rdp->dynticks + * to put let the RCU handling be aware that the CPU is going back to idle + * with no ticks. */ -int rcu_is_cpu_rrupt_from_idle(void) +void rcu_irq_exit(void) { - return __get_cpu_var(rcu_dynticks).dynticks_nesting <= 1; + rcu_enter_nohz(); } #ifdef CONFIG_SMP @@ -605,7 +475,7 @@ int rcu_is_cpu_rrupt_from_idle(void) static int dyntick_save_progress_counter(struct rcu_data *rdp) { rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks); - return (rdp->dynticks_snap & 0x1) == 0; + return 0; } /* @@ -642,6 +512,26 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) #endif /* #ifdef CONFIG_SMP */ +#else /* #ifdef CONFIG_NO_HZ */ + +#ifdef CONFIG_SMP + +static int dyntick_save_progress_counter(struct rcu_data *rdp) +{ + return 0; +} + +static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) +{ + return rcu_implicit_offline_qs(rdp); +} + +#endif /* #ifdef CONFIG_SMP */ + +#endif /* #else #ifdef CONFIG_NO_HZ */ + +int rcu_cpu_stall_suppress __read_mostly; + static void record_gp_stall_check_time(struct rcu_state *rsp) { rsp->gp_start = jiffies; @@ -976,8 +866,8 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) /* Advance to a new grace period and initialize state. */ rsp->gpnum++; trace_rcu_grace_period(rsp->name, rsp->gpnum, "start"); - WARN_ON_ONCE(rsp->fqs_state == RCU_GP_INIT); - rsp->fqs_state = RCU_GP_INIT; /* Hold off force_quiescent_state. */ + WARN_ON_ONCE(rsp->signaled == RCU_GP_INIT); + rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */ rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; record_gp_stall_check_time(rsp); @@ -987,7 +877,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) rnp->qsmask = rnp->qsmaskinit; rnp->gpnum = rsp->gpnum; rnp->completed = rsp->completed; - rsp->fqs_state = RCU_SIGNAL_INIT; /* force_quiescent_state OK */ + rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ rcu_start_gp_per_cpu(rsp, rnp, rdp); rcu_preempt_boost_start_gp(rnp); trace_rcu_grace_period_init(rsp->name, rnp->gpnum, @@ -1037,7 +927,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) rnp = rcu_get_root(rsp); raw_spin_lock(&rnp->lock); /* irqs already disabled. */ - rsp->fqs_state = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ + rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ raw_spin_unlock_irqrestore(&rsp->onofflock, flags); } @@ -1101,7 +991,7 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags) rsp->completed = rsp->gpnum; /* Declare the grace period complete. */ trace_rcu_grace_period(rsp->name, rsp->completed, "end"); - rsp->fqs_state = RCU_GP_IDLE; + rsp->signaled = RCU_GP_IDLE; rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ } @@ -1331,7 +1221,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) else raw_spin_unlock_irqrestore(&rnp->lock, flags); if (need_report & RCU_OFL_TASKS_EXP_GP) - rcu_report_exp_rnp(rsp, rnp, true); + rcu_report_exp_rnp(rsp, rnp); rcu_node_kthread_setaffinity(rnp, -1); } @@ -1373,9 +1263,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) /* If no callbacks are ready, just return.*/ if (!cpu_has_callbacks_ready_to_invoke(rdp)) { trace_rcu_batch_start(rsp->name, 0, 0); - trace_rcu_batch_end(rsp->name, 0, !!ACCESS_ONCE(rdp->nxtlist), - need_resched(), is_idle_task(current), - rcu_is_callbacks_kthread()); + trace_rcu_batch_end(rsp->name, 0); return; } @@ -1403,17 +1291,12 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) debug_rcu_head_unqueue(list); __rcu_reclaim(rsp->name, list); list = next; - /* Stop only if limit reached and CPU has something to do. */ - if (++count >= bl && - (need_resched() || - (!is_idle_task(current) && !rcu_is_callbacks_kthread()))) + if (++count >= bl) break; } local_irq_save(flags); - trace_rcu_batch_end(rsp->name, count, !!list, need_resched(), - is_idle_task(current), - rcu_is_callbacks_kthread()); + trace_rcu_batch_end(rsp->name, count); /* Update count, and requeue any remaining callbacks. */ rdp->qlen -= count; @@ -1451,14 +1334,16 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) * (user mode or idle loop for rcu, non-softirq execution for rcu_bh). * Also schedule RCU core processing. * - * This function must be called from hardirq context. It is normally + * This function must be called with hardirqs disabled. It is normally * invoked from the scheduling-clock interrupt. If rcu_pending returns * false, there is no point in invoking rcu_check_callbacks(). */ void rcu_check_callbacks(int cpu, int user) { trace_rcu_utilization("Start scheduler-tick"); - if (user || rcu_is_cpu_rrupt_from_idle()) { + if (user || + (idle_cpu(cpu) && rcu_scheduler_active && + !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) { /* * Get here if this CPU took its interrupt from user @@ -1572,7 +1457,7 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) goto unlock_fqs_ret; /* no GP in progress, time updated. */ } rsp->fqs_active = 1; - switch (rsp->fqs_state) { + switch (rsp->signaled) { case RCU_GP_IDLE: case RCU_GP_INIT: @@ -1588,7 +1473,7 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) force_qs_rnp(rsp, dyntick_save_progress_counter); raw_spin_lock(&rnp->lock); /* irqs already disabled */ if (rcu_gp_in_progress(rsp)) - rsp->fqs_state = RCU_FORCE_QS; + rsp->signaled = RCU_FORCE_QS; break; case RCU_FORCE_QS: @@ -1927,7 +1812,7 @@ static int rcu_pending(int cpu) * by the current CPU, even if none need be done immediately, returning * 1 if so. */ -static int rcu_cpu_has_callbacks(int cpu) +static int rcu_needs_cpu_quick_check(int cpu) { /* RCU callbacks either ready or pending? */ return per_cpu(rcu_sched_data, cpu).nxtlist || @@ -2028,9 +1913,9 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) for (i = 0; i < RCU_NEXT_SIZE; i++) rdp->nxttail[i] = &rdp->nxtlist; rdp->qlen = 0; +#ifdef CONFIG_NO_HZ rdp->dynticks = &per_cpu(rcu_dynticks, cpu); - WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_NESTING); - WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1); +#endif /* #ifdef CONFIG_NO_HZ */ rdp->cpu = cpu; rdp->rsp = rsp; raw_spin_unlock_irqrestore(&rnp->lock, flags); @@ -2057,10 +1942,6 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible) rdp->qlen_last_fqs_check = 0; rdp->n_force_qs_snap = rsp->n_force_qs; rdp->blimit = blimit; - rdp->dynticks->dynticks_nesting = DYNTICK_TASK_NESTING; - atomic_set(&rdp->dynticks->dynticks, - (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1); - rcu_prepare_for_idle_init(cpu); raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ /* @@ -2142,7 +2023,6 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self, rcu_send_cbs_to_online(&rcu_bh_state); rcu_send_cbs_to_online(&rcu_sched_state); rcu_preempt_send_cbs_to_online(); - rcu_cleanup_after_idle(cpu); break; case CPU_DEAD: case CPU_DEAD_FROZEN: diff --git a/trunk/kernel/rcutree.h b/trunk/kernel/rcutree.h index fddff92d6676..849ce9ec51fe 100644 --- a/trunk/kernel/rcutree.h +++ b/trunk/kernel/rcutree.h @@ -84,10 +84,9 @@ * Dynticks per-CPU state. */ struct rcu_dynticks { - long long dynticks_nesting; /* Track irq/process nesting level. */ - /* Process level is worth LLONG_MAX/2. */ - int dynticks_nmi_nesting; /* Track NMI nesting level. */ - atomic_t dynticks; /* Even value for idle, else odd. */ + int dynticks_nesting; /* Track irq/process nesting level. */ + int dynticks_nmi_nesting; /* Track NMI nesting level. */ + atomic_t dynticks; /* Even value for dynticks-idle, else odd. */ }; /* RCU's kthread states for tracing. */ @@ -275,12 +274,16 @@ struct rcu_data { /* did other CPU force QS recently? */ long blimit; /* Upper limit on a processed batch */ +#ifdef CONFIG_NO_HZ /* 3) dynticks interface. */ struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */ int dynticks_snap; /* Per-GP tracking for dynticks. */ +#endif /* #ifdef CONFIG_NO_HZ */ /* 4) reasons this CPU needed to be kicked by force_quiescent_state */ +#ifdef CONFIG_NO_HZ unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */ +#endif /* #ifdef CONFIG_NO_HZ */ unsigned long offline_fqs; /* Kicked due to being offline. */ unsigned long resched_ipi; /* Sent a resched IPI. */ @@ -299,12 +302,16 @@ struct rcu_data { struct rcu_state *rsp; }; -/* Values for fqs_state field in struct rcu_state. */ +/* Values for signaled field in struct rcu_state. */ #define RCU_GP_IDLE 0 /* No grace period in progress. */ #define RCU_GP_INIT 1 /* Grace period being initialized. */ #define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */ #define RCU_FORCE_QS 3 /* Need to force quiescent state. */ +#ifdef CONFIG_NO_HZ #define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK +#else /* #ifdef CONFIG_NO_HZ */ +#define RCU_SIGNAL_INIT RCU_FORCE_QS +#endif /* #else #ifdef CONFIG_NO_HZ */ #define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */ @@ -354,7 +361,7 @@ struct rcu_state { /* The following fields are guarded by the root rcu_node's lock. */ - u8 fqs_state ____cacheline_internodealigned_in_smp; + u8 signaled ____cacheline_internodealigned_in_smp; /* Force QS state. */ u8 fqs_active; /* force_quiescent_state() */ /* is running. */ @@ -444,8 +451,7 @@ static void rcu_preempt_check_callbacks(int cpu); static void rcu_preempt_process_callbacks(void); void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) -static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, - bool wake); +static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp); #endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) */ static int rcu_preempt_pending(int cpu); static int rcu_preempt_needs_cpu(int cpu); @@ -455,7 +461,6 @@ static void __init __rcu_init_preempt(void); static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); static void invoke_rcu_callbacks_kthread(void); -static bool rcu_is_callbacks_kthread(void); #ifdef CONFIG_RCU_BOOST static void rcu_preempt_do_callbacks(void); static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, @@ -468,8 +473,5 @@ static void rcu_yield(void (*f)(unsigned long), unsigned long arg); #endif /* #ifdef CONFIG_RCU_BOOST */ static void rcu_cpu_kthread_setrt(int cpu, int to_rt); static void __cpuinit rcu_prepare_kthreads(int cpu); -static void rcu_prepare_for_idle_init(int cpu); -static void rcu_cleanup_after_idle(int cpu); -static void rcu_prepare_for_idle(int cpu); #endif /* #ifndef RCU_TREE_NONCORE */ diff --git a/trunk/kernel/rcutree_plugin.h b/trunk/kernel/rcutree_plugin.h index 8bb35d73e1f9..4b9b9f8a4184 100644 --- a/trunk/kernel/rcutree_plugin.h +++ b/trunk/kernel/rcutree_plugin.h @@ -312,7 +312,6 @@ static noinline void rcu_read_unlock_special(struct task_struct *t) { int empty; int empty_exp; - int empty_exp_now; unsigned long flags; struct list_head *np; #ifdef CONFIG_RCU_BOOST @@ -383,10 +382,8 @@ static noinline void rcu_read_unlock_special(struct task_struct *t) /* * If this was the last task on the current list, and if * we aren't waiting on any CPUs, report the quiescent state. - * Note that rcu_report_unblock_qs_rnp() releases rnp->lock, - * so we must take a snapshot of the expedited state. + * Note that rcu_report_unblock_qs_rnp() releases rnp->lock. */ - empty_exp_now = !rcu_preempted_readers_exp(rnp); if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) { trace_rcu_quiescent_state_report("preempt_rcu", rnp->gpnum, @@ -409,8 +406,8 @@ static noinline void rcu_read_unlock_special(struct task_struct *t) * If this was the last task on the expedited lists, * then we need to report up the rcu_node hierarchy. */ - if (!empty_exp && empty_exp_now) - rcu_report_exp_rnp(&rcu_preempt_state, rnp, true); + if (!empty_exp && !rcu_preempted_readers_exp(rnp)) + rcu_report_exp_rnp(&rcu_preempt_state, rnp); } else { local_irq_restore(flags); } @@ -732,13 +729,9 @@ static int sync_rcu_preempt_exp_done(struct rcu_node *rnp) * recursively up the tree. (Calm down, calm down, we do the recursion * iteratively!) * - * Most callers will set the "wake" flag, but the task initiating the - * expedited grace period need not wake itself. - * * Caller must hold sync_rcu_preempt_exp_mutex. */ -static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, - bool wake) +static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp) { unsigned long flags; unsigned long mask; @@ -751,8 +744,7 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, } if (rnp->parent == NULL) { raw_spin_unlock_irqrestore(&rnp->lock, flags); - if (wake) - wake_up(&sync_rcu_preempt_exp_wq); + wake_up(&sync_rcu_preempt_exp_wq); break; } mask = rnp->grpmask; @@ -785,7 +777,7 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp) must_wait = 1; } if (!must_wait) - rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */ + rcu_report_exp_rnp(rsp, rnp); } /* @@ -1077,9 +1069,9 @@ EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); * report on tasks preempted in RCU read-side critical sections during * expedited RCU grace periods. */ -static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, - bool wake) +static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp) { + return; } #endif /* #ifdef CONFIG_HOTPLUG_CPU */ @@ -1165,6 +1157,8 @@ static void rcu_initiate_boost_trace(struct rcu_node *rnp) #endif /* #else #ifdef CONFIG_RCU_TRACE */ +static struct lock_class_key rcu_boost_class; + /* * Carry out RCU priority boosting on the task indicated by ->exp_tasks * or ->boost_tasks, advancing the pointer to the next task in the @@ -1227,13 +1221,15 @@ static int rcu_boost(struct rcu_node *rnp) */ t = container_of(tb, struct task_struct, rcu_node_entry); rt_mutex_init_proxy_locked(&mtx, t); + /* Avoid lockdep false positives. This rt_mutex is its own thing. */ + lockdep_set_class_and_name(&mtx.wait_lock, &rcu_boost_class, + "rcu_boost_mutex"); t->rcu_boost_mutex = &mtx; raw_spin_unlock_irqrestore(&rnp->lock, flags); rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */ rt_mutex_unlock(&mtx); /* Keep lockdep happy. */ - return ACCESS_ONCE(rnp->exp_tasks) != NULL || - ACCESS_ONCE(rnp->boost_tasks) != NULL; + return rnp->exp_tasks != NULL || rnp->boost_tasks != NULL; } /* @@ -1332,15 +1328,6 @@ static void invoke_rcu_callbacks_kthread(void) local_irq_restore(flags); } -/* - * Is the current CPU running the RCU-callbacks kthread? - * Caller must have preemption disabled. - */ -static bool rcu_is_callbacks_kthread(void) -{ - return __get_cpu_var(rcu_cpu_kthread_task) == current; -} - /* * Set the affinity of the boost kthread. The CPU-hotplug locks are * held, so no one should be messing with the existence of the boost @@ -1785,11 +1772,6 @@ static void invoke_rcu_callbacks_kthread(void) WARN_ON_ONCE(1); } -static bool rcu_is_callbacks_kthread(void) -{ - return false; -} - static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) { } @@ -1925,7 +1907,7 @@ void synchronize_sched_expedited(void) * grace period works for us. */ get_online_cpus(); - snap = atomic_read(&sync_sched_expedited_started); + snap = atomic_read(&sync_sched_expedited_started) - 1; smp_mb(); /* ensure read is before try_stop_cpus(). */ } @@ -1957,243 +1939,88 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited); * 1 if so. This function is part of the RCU implementation; it is -not- * an exported member of the RCU API. * - * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs - * any flavor of RCU. + * Because we have preemptible RCU, just check whether this CPU needs + * any flavor of RCU. Do not chew up lots of CPU cycles with preemption + * disabled in a most-likely vain attempt to cause RCU not to need this CPU. */ int rcu_needs_cpu(int cpu) { - return rcu_cpu_has_callbacks(cpu); -} - -/* - * Because we do not have RCU_FAST_NO_HZ, don't bother initializing for it. - */ -static void rcu_prepare_for_idle_init(int cpu) -{ -} - -/* - * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up - * after it. - */ -static void rcu_cleanup_after_idle(int cpu) -{ -} - -/* - * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=y, - * is nothing. - */ -static void rcu_prepare_for_idle(int cpu) -{ + return rcu_needs_cpu_quick_check(cpu); } #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */ -/* - * This code is invoked when a CPU goes idle, at which point we want - * to have the CPU do everything required for RCU so that it can enter - * the energy-efficient dyntick-idle mode. This is handled by a - * state machine implemented by rcu_prepare_for_idle() below. - * - * The following three proprocessor symbols control this state machine: - * - * RCU_IDLE_FLUSHES gives the maximum number of times that we will attempt - * to satisfy RCU. Beyond this point, it is better to incur a periodic - * scheduling-clock interrupt than to loop through the state machine - * at full power. - * RCU_IDLE_OPT_FLUSHES gives the number of RCU_IDLE_FLUSHES that are - * optional if RCU does not need anything immediately from this - * CPU, even if this CPU still has RCU callbacks queued. The first - * times through the state machine are mandatory: we need to give - * the state machine a chance to communicate a quiescent state - * to the RCU core. - * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted - * to sleep in dyntick-idle mode with RCU callbacks pending. This - * is sized to be roughly one RCU grace period. Those energy-efficiency - * benchmarkers who might otherwise be tempted to set this to a large - * number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your - * system. And if you are -that- concerned about energy efficiency, - * just power the system down and be done with it! - * - * The values below work well in practice. If future workloads require - * adjustment, they can be converted into kernel config parameters, though - * making the state machine smarter might be a better option. - */ -#define RCU_IDLE_FLUSHES 5 /* Number of dyntick-idle tries. */ -#define RCU_IDLE_OPT_FLUSHES 3 /* Optional dyntick-idle tries. */ -#define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */ - +#define RCU_NEEDS_CPU_FLUSHES 5 static DEFINE_PER_CPU(int, rcu_dyntick_drain); static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff); -static DEFINE_PER_CPU(struct hrtimer, rcu_idle_gp_timer); -static ktime_t rcu_idle_gp_wait; - -/* - * Allow the CPU to enter dyntick-idle mode if either: (1) There are no - * callbacks on this CPU, (2) this CPU has not yet attempted to enter - * dyntick-idle mode, or (3) this CPU is in the process of attempting to - * enter dyntick-idle mode. Otherwise, if we have recently tried and failed - * to enter dyntick-idle mode, we refuse to try to enter it. After all, - * it is better to incur scheduling-clock interrupts than to spin - * continuously for the same time duration! - */ -int rcu_needs_cpu(int cpu) -{ - /* If no callbacks, RCU doesn't need the CPU. */ - if (!rcu_cpu_has_callbacks(cpu)) - return 0; - /* Otherwise, RCU needs the CPU only if it recently tried and failed. */ - return per_cpu(rcu_dyntick_holdoff, cpu) == jiffies; -} - -/* - * Timer handler used to force CPU to start pushing its remaining RCU - * callbacks in the case where it entered dyntick-idle mode with callbacks - * pending. The hander doesn't really need to do anything because the - * real work is done upon re-entry to idle, or by the next scheduling-clock - * interrupt should idle not be re-entered. - */ -static enum hrtimer_restart rcu_idle_gp_timer_func(struct hrtimer *hrtp) -{ - trace_rcu_prep_idle("Timer"); - return HRTIMER_NORESTART; -} - -/* - * Initialize the timer used to pull CPUs out of dyntick-idle mode. - */ -static void rcu_prepare_for_idle_init(int cpu) -{ - static int firsttime = 1; - struct hrtimer *hrtp = &per_cpu(rcu_idle_gp_timer, cpu); - - hrtimer_init(hrtp, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - hrtp->function = rcu_idle_gp_timer_func; - if (firsttime) { - unsigned int upj = jiffies_to_usecs(RCU_IDLE_GP_DELAY); - - rcu_idle_gp_wait = ns_to_ktime(upj * (u64)1000); - firsttime = 0; - } -} - -/* - * Clean up for exit from idle. Because we are exiting from idle, there - * is no longer any point to rcu_idle_gp_timer, so cancel it. This will - * do nothing if this timer is not active, so just cancel it unconditionally. - */ -static void rcu_cleanup_after_idle(int cpu) -{ - hrtimer_cancel(&per_cpu(rcu_idle_gp_timer, cpu)); -} /* - * Check to see if any RCU-related work can be done by the current CPU, - * and if so, schedule a softirq to get it done. This function is part - * of the RCU implementation; it is -not- an exported member of the RCU API. + * Check to see if any future RCU-related work will need to be done + * by the current CPU, even if none need be done immediately, returning + * 1 if so. This function is part of the RCU implementation; it is -not- + * an exported member of the RCU API. * - * The idea is for the current CPU to clear out all work required by the - * RCU core for the current grace period, so that this CPU can be permitted - * to enter dyntick-idle mode. In some cases, it will need to be awakened - * at the end of the grace period by whatever CPU ends the grace period. - * This allows CPUs to go dyntick-idle more quickly, and to reduce the - * number of wakeups by a modest integer factor. + * Because we are not supporting preemptible RCU, attempt to accelerate + * any current grace periods so that RCU no longer needs this CPU, but + * only if all other CPUs are already in dynticks-idle mode. This will + * allow the CPU cores to be powered down immediately, as opposed to after + * waiting many milliseconds for grace periods to elapse. * * Because it is not legal to invoke rcu_process_callbacks() with irqs * disabled, we do one pass of force_quiescent_state(), then do a * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked * later. The per-cpu rcu_dyntick_drain variable controls the sequencing. - * - * The caller must have disabled interrupts. */ -static void rcu_prepare_for_idle(int cpu) +int rcu_needs_cpu(int cpu) { - unsigned long flags; - - local_irq_save(flags); - - /* - * If there are no callbacks on this CPU, enter dyntick-idle mode. - * Also reset state to avoid prejudicing later attempts. - */ - if (!rcu_cpu_has_callbacks(cpu)) { - per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; - per_cpu(rcu_dyntick_drain, cpu) = 0; - local_irq_restore(flags); - trace_rcu_prep_idle("No callbacks"); - return; - } - - /* - * If in holdoff mode, just return. We will presumably have - * refrained from disabling the scheduling-clock tick. - */ - if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) { - local_irq_restore(flags); - trace_rcu_prep_idle("In holdoff"); - return; + int c = 0; + int snap; + int thatcpu; + + /* Check for being in the holdoff period. */ + if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) + return rcu_needs_cpu_quick_check(cpu); + + /* Don't bother unless we are the last non-dyntick-idle CPU. */ + for_each_online_cpu(thatcpu) { + if (thatcpu == cpu) + continue; + snap = atomic_add_return(0, &per_cpu(rcu_dynticks, + thatcpu).dynticks); + smp_mb(); /* Order sampling of snap with end of grace period. */ + if ((snap & 0x1) != 0) { + per_cpu(rcu_dyntick_drain, cpu) = 0; + per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; + return rcu_needs_cpu_quick_check(cpu); + } } /* Check and update the rcu_dyntick_drain sequencing. */ if (per_cpu(rcu_dyntick_drain, cpu) <= 0) { /* First time through, initialize the counter. */ - per_cpu(rcu_dyntick_drain, cpu) = RCU_IDLE_FLUSHES; - } else if (per_cpu(rcu_dyntick_drain, cpu) <= RCU_IDLE_OPT_FLUSHES && - !rcu_pending(cpu)) { - /* Can we go dyntick-idle despite still having callbacks? */ - trace_rcu_prep_idle("Dyntick with callbacks"); - per_cpu(rcu_dyntick_drain, cpu) = 0; - per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; - hrtimer_start(&per_cpu(rcu_idle_gp_timer, cpu), - rcu_idle_gp_wait, HRTIMER_MODE_REL); - return; /* Nothing more to do immediately. */ + per_cpu(rcu_dyntick_drain, cpu) = RCU_NEEDS_CPU_FLUSHES; } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) { /* We have hit the limit, so time to give up. */ per_cpu(rcu_dyntick_holdoff, cpu) = jiffies; - local_irq_restore(flags); - trace_rcu_prep_idle("Begin holdoff"); - invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */ - return; + return rcu_needs_cpu_quick_check(cpu); } - /* - * Do one step of pushing the remaining RCU callbacks through - * the RCU core state machine. - */ -#ifdef CONFIG_TREE_PREEMPT_RCU - if (per_cpu(rcu_preempt_data, cpu).nxtlist) { - local_irq_restore(flags); - rcu_preempt_qs(cpu); - force_quiescent_state(&rcu_preempt_state, 0); - local_irq_save(flags); - } -#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ + /* Do one step pushing remaining RCU callbacks through. */ if (per_cpu(rcu_sched_data, cpu).nxtlist) { - local_irq_restore(flags); rcu_sched_qs(cpu); force_quiescent_state(&rcu_sched_state, 0); - local_irq_save(flags); + c = c || per_cpu(rcu_sched_data, cpu).nxtlist; } if (per_cpu(rcu_bh_data, cpu).nxtlist) { - local_irq_restore(flags); rcu_bh_qs(cpu); force_quiescent_state(&rcu_bh_state, 0); - local_irq_save(flags); + c = c || per_cpu(rcu_bh_data, cpu).nxtlist; } - /* - * If RCU callbacks are still pending, RCU still needs this CPU. - * So try forcing the callbacks through the grace period. - */ - if (rcu_cpu_has_callbacks(cpu)) { - local_irq_restore(flags); - trace_rcu_prep_idle("More callbacks"); + /* If RCU callbacks are still pending, RCU still needs this CPU. */ + if (c) invoke_rcu_core(); - } else { - local_irq_restore(flags); - trace_rcu_prep_idle("Callbacks drained"); - } + return c; } #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ diff --git a/trunk/kernel/rcutree_trace.c b/trunk/kernel/rcutree_trace.c index 654cfe67f0d1..9feffa4c0695 100644 --- a/trunk/kernel/rcutree_trace.c +++ b/trunk/kernel/rcutree_trace.c @@ -67,11 +67,13 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) rdp->completed, rdp->gpnum, rdp->passed_quiesce, rdp->passed_quiesce_gpnum, rdp->qs_pending); - seq_printf(m, " dt=%d/%llx/%d df=%lu", +#ifdef CONFIG_NO_HZ + seq_printf(m, " dt=%d/%d/%d df=%lu", atomic_read(&rdp->dynticks->dynticks), rdp->dynticks->dynticks_nesting, rdp->dynticks->dynticks_nmi_nesting, rdp->dynticks_fqs); +#endif /* #ifdef CONFIG_NO_HZ */ seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi); seq_printf(m, " ql=%ld qs=%c%c%c%c", rdp->qlen, @@ -139,11 +141,13 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp) rdp->completed, rdp->gpnum, rdp->passed_quiesce, rdp->passed_quiesce_gpnum, rdp->qs_pending); - seq_printf(m, ",%d,%llx,%d,%lu", +#ifdef CONFIG_NO_HZ + seq_printf(m, ",%d,%d,%d,%lu", atomic_read(&rdp->dynticks->dynticks), rdp->dynticks->dynticks_nesting, rdp->dynticks->dynticks_nmi_nesting, rdp->dynticks_fqs); +#endif /* #ifdef CONFIG_NO_HZ */ seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi); seq_printf(m, ",%ld,\"%c%c%c%c\"", rdp->qlen, ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] != @@ -167,7 +171,9 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp) static int show_rcudata_csv(struct seq_file *m, void *unused) { seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pgp\",\"pq\","); +#ifdef CONFIG_NO_HZ seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\","); +#endif /* #ifdef CONFIG_NO_HZ */ seq_puts(m, "\"of\",\"ri\",\"ql\",\"qs\""); #ifdef CONFIG_RCU_BOOST seq_puts(m, "\"kt\",\"ktl\""); @@ -272,7 +278,7 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp) gpnum = rsp->gpnum; seq_printf(m, "c=%lu g=%lu s=%d jfq=%ld j=%x " "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu\n", - rsp->completed, gpnum, rsp->fqs_state, + rsp->completed, gpnum, rsp->signaled, (long)(rsp->jiffies_force_qs - jiffies), (int)(jiffies & 0xffff), rsp->n_force_qs, rsp->n_force_qs_ngp, diff --git a/trunk/kernel/rtmutex-debug.c b/trunk/kernel/rtmutex-debug.c index 16502d3a71c8..8eafd1bd273e 100644 --- a/trunk/kernel/rtmutex-debug.c +++ b/trunk/kernel/rtmutex-debug.c @@ -101,7 +101,6 @@ void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter) printk("\n============================================\n"); printk( "[ BUG: circular locking deadlock detected! ]\n"); - printk("%s\n", print_tainted()); printk( "--------------------------------------------\n"); printk("%s/%d is deadlocking current task %s/%d\n\n", task->comm, task_pid_nr(task), diff --git a/trunk/kernel/rtmutex.c b/trunk/kernel/rtmutex.c index a242e691c993..f9d8482dd487 100644 --- a/trunk/kernel/rtmutex.c +++ b/trunk/kernel/rtmutex.c @@ -579,6 +579,7 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state, struct rt_mutex_waiter *waiter) { int ret = 0; + int was_disabled; for (;;) { /* Try to acquire the lock: */ @@ -601,10 +602,17 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state, raw_spin_unlock(&lock->wait_lock); + was_disabled = irqs_disabled(); + if (was_disabled) + local_irq_enable(); + debug_rt_mutex_print_deadlock(waiter); schedule_rt_mutex(lock); + if (was_disabled) + local_irq_disable(); + raw_spin_lock(&lock->wait_lock); set_current_state(state); } diff --git a/trunk/kernel/sched/core.c b/trunk/kernel/sched.c similarity index 79% rename from trunk/kernel/sched/core.c rename to trunk/kernel/sched.c index 4dbfd04a2148..d6b149ccf925 100644 --- a/trunk/kernel/sched/core.c +++ b/trunk/kernel/sched.c @@ -1,5 +1,5 @@ /* - * kernel/sched/core.c + * kernel/sched.c * * Kernel scheduler and related syscalls * @@ -56,6 +56,7 @@ #include #include #include +#include #include #include #include @@ -74,17 +75,129 @@ #include #include +#include #ifdef CONFIG_PARAVIRT #include #endif -#include "sched.h" -#include "../workqueue_sched.h" +#include "sched_cpupri.h" +#include "workqueue_sched.h" +#include "sched_autogroup.h" #define CREATE_TRACE_POINTS #include -void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period) +/* + * Convert user-nice values [ -20 ... 0 ... 19 ] + * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], + * and back. + */ +#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20) +#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20) +#define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio) + +/* + * 'User priority' is the nice value converted to something we + * can work with better when scaling various scheduler parameters, + * it's a [ 0 ... 39 ] range. + */ +#define USER_PRIO(p) ((p)-MAX_RT_PRIO) +#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio) +#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO)) + +/* + * Helpers for converting nanosecond timing to jiffy resolution + */ +#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) + +#define NICE_0_LOAD SCHED_LOAD_SCALE +#define NICE_0_SHIFT SCHED_LOAD_SHIFT + +/* + * These are the 'tuning knobs' of the scheduler: + * + * default timeslice is 100 msecs (used only for SCHED_RR tasks). + * Timeslices get refilled after they expire. + */ +#define DEF_TIMESLICE (100 * HZ / 1000) + +/* + * single value that denotes runtime == period, ie unlimited time. + */ +#define RUNTIME_INF ((u64)~0ULL) + +static inline int rt_policy(int policy) +{ + if (policy == SCHED_FIFO || policy == SCHED_RR) + return 1; + return 0; +} + +static inline int task_has_rt_policy(struct task_struct *p) +{ + return rt_policy(p->policy); +} + +/* + * This is the priority-queue data structure of the RT scheduling class: + */ +struct rt_prio_array { + DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */ + struct list_head queue[MAX_RT_PRIO]; +}; + +struct rt_bandwidth { + /* nests inside the rq lock: */ + raw_spinlock_t rt_runtime_lock; + ktime_t rt_period; + u64 rt_runtime; + struct hrtimer rt_period_timer; +}; + +static struct rt_bandwidth def_rt_bandwidth; + +static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun); + +static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer) +{ + struct rt_bandwidth *rt_b = + container_of(timer, struct rt_bandwidth, rt_period_timer); + ktime_t now; + int overrun; + int idle = 0; + + for (;;) { + now = hrtimer_cb_get_time(timer); + overrun = hrtimer_forward(timer, now, rt_b->rt_period); + + if (!overrun) + break; + + idle = do_sched_rt_period_timer(rt_b, overrun); + } + + return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; +} + +static +void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) +{ + rt_b->rt_period = ns_to_ktime(period); + rt_b->rt_runtime = runtime; + + raw_spin_lock_init(&rt_b->rt_runtime_lock); + + hrtimer_init(&rt_b->rt_period_timer, + CLOCK_MONOTONIC, HRTIMER_MODE_REL); + rt_b->rt_period_timer.function = sched_rt_period_timer; +} + +static inline int rt_bandwidth_enabled(void) +{ + return sysctl_sched_rt_runtime >= 0; +} + +static void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period) { unsigned long delta; ktime_t soft, hard, now; @@ -104,12 +217,580 @@ void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period) } } -DEFINE_MUTEX(sched_domains_mutex); -DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); +static void start_rt_bandwidth(struct rt_bandwidth *rt_b) +{ + if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) + return; + + if (hrtimer_active(&rt_b->rt_period_timer)) + return; + + raw_spin_lock(&rt_b->rt_runtime_lock); + start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period); + raw_spin_unlock(&rt_b->rt_runtime_lock); +} + +#ifdef CONFIG_RT_GROUP_SCHED +static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b) +{ + hrtimer_cancel(&rt_b->rt_period_timer); +} +#endif + +/* + * sched_domains_mutex serializes calls to init_sched_domains, + * detach_destroy_domains and partition_sched_domains. + */ +static DEFINE_MUTEX(sched_domains_mutex); + +#ifdef CONFIG_CGROUP_SCHED + +#include + +struct cfs_rq; + +static LIST_HEAD(task_groups); + +struct cfs_bandwidth { +#ifdef CONFIG_CFS_BANDWIDTH + raw_spinlock_t lock; + ktime_t period; + u64 quota, runtime; + s64 hierarchal_quota; + u64 runtime_expires; + + int idle, timer_active; + struct hrtimer period_timer, slack_timer; + struct list_head throttled_cfs_rq; + + /* statistics */ + int nr_periods, nr_throttled; + u64 throttled_time; +#endif +}; + +/* task group related information */ +struct task_group { + struct cgroup_subsys_state css; + +#ifdef CONFIG_FAIR_GROUP_SCHED + /* schedulable entities of this group on each cpu */ + struct sched_entity **se; + /* runqueue "owned" by this group on each cpu */ + struct cfs_rq **cfs_rq; + unsigned long shares; + + atomic_t load_weight; +#endif + +#ifdef CONFIG_RT_GROUP_SCHED + struct sched_rt_entity **rt_se; + struct rt_rq **rt_rq; + + struct rt_bandwidth rt_bandwidth; +#endif + + struct rcu_head rcu; + struct list_head list; + + struct task_group *parent; + struct list_head siblings; + struct list_head children; + +#ifdef CONFIG_SCHED_AUTOGROUP + struct autogroup *autogroup; +#endif + + struct cfs_bandwidth cfs_bandwidth; +}; + +/* task_group_lock serializes the addition/removal of task groups */ +static DEFINE_SPINLOCK(task_group_lock); + +#ifdef CONFIG_FAIR_GROUP_SCHED + +# define ROOT_TASK_GROUP_LOAD NICE_0_LOAD + +/* + * A weight of 0 or 1 can cause arithmetics problems. + * A weight of a cfs_rq is the sum of weights of which entities + * are queued on this cfs_rq, so a weight of a entity should not be + * too large, so as the shares value of a task group. + * (The default weight is 1024 - so there's no practical + * limitation from this.) + */ +#define MIN_SHARES (1UL << 1) +#define MAX_SHARES (1UL << 18) + +static int root_task_group_load = ROOT_TASK_GROUP_LOAD; +#endif + +/* Default task group. + * Every task in system belong to this group at bootup. + */ +struct task_group root_task_group; + +#endif /* CONFIG_CGROUP_SCHED */ + +/* CFS-related fields in a runqueue */ +struct cfs_rq { + struct load_weight load; + unsigned long nr_running, h_nr_running; + + u64 exec_clock; + u64 min_vruntime; +#ifndef CONFIG_64BIT + u64 min_vruntime_copy; +#endif + + struct rb_root tasks_timeline; + struct rb_node *rb_leftmost; + + struct list_head tasks; + struct list_head *balance_iterator; + + /* + * 'curr' points to currently running entity on this cfs_rq. + * It is set to NULL otherwise (i.e when none are currently running). + */ + struct sched_entity *curr, *next, *last, *skip; + +#ifdef CONFIG_SCHED_DEBUG + unsigned int nr_spread_over; +#endif + +#ifdef CONFIG_FAIR_GROUP_SCHED + struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ + + /* + * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in + * a hierarchy). Non-leaf lrqs hold other higher schedulable entities + * (like users, containers etc.) + * + * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This + * list is used during load balance. + */ + int on_list; + struct list_head leaf_cfs_rq_list; + struct task_group *tg; /* group that "owns" this runqueue */ + +#ifdef CONFIG_SMP + /* + * the part of load.weight contributed by tasks + */ + unsigned long task_weight; + + /* + * h_load = weight * f(tg) + * + * Where f(tg) is the recursive weight fraction assigned to + * this group. + */ + unsigned long h_load; + + /* + * Maintaining per-cpu shares distribution for group scheduling + * + * load_stamp is the last time we updated the load average + * load_last is the last time we updated the load average and saw load + * load_unacc_exec_time is currently unaccounted execution time + */ + u64 load_avg; + u64 load_period; + u64 load_stamp, load_last, load_unacc_exec_time; + + unsigned long load_contribution; +#endif +#ifdef CONFIG_CFS_BANDWIDTH + int runtime_enabled; + u64 runtime_expires; + s64 runtime_remaining; + + u64 throttled_timestamp; + int throttled, throttle_count; + struct list_head throttled_list; +#endif +#endif +}; + +#ifdef CONFIG_FAIR_GROUP_SCHED +#ifdef CONFIG_CFS_BANDWIDTH +static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) +{ + return &tg->cfs_bandwidth; +} + +static inline u64 default_cfs_period(void); +static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun); +static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b); + +static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer) +{ + struct cfs_bandwidth *cfs_b = + container_of(timer, struct cfs_bandwidth, slack_timer); + do_sched_cfs_slack_timer(cfs_b); + + return HRTIMER_NORESTART; +} + +static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) +{ + struct cfs_bandwidth *cfs_b = + container_of(timer, struct cfs_bandwidth, period_timer); + ktime_t now; + int overrun; + int idle = 0; + + for (;;) { + now = hrtimer_cb_get_time(timer); + overrun = hrtimer_forward(timer, now, cfs_b->period); + + if (!overrun) + break; + + idle = do_sched_cfs_period_timer(cfs_b, overrun); + } + + return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; +} + +static void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) +{ + raw_spin_lock_init(&cfs_b->lock); + cfs_b->runtime = 0; + cfs_b->quota = RUNTIME_INF; + cfs_b->period = ns_to_ktime(default_cfs_period()); + + INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); + hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + cfs_b->period_timer.function = sched_cfs_period_timer; + hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + cfs_b->slack_timer.function = sched_cfs_slack_timer; +} + +static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) +{ + cfs_rq->runtime_enabled = 0; + INIT_LIST_HEAD(&cfs_rq->throttled_list); +} + +/* requires cfs_b->lock, may release to reprogram timer */ +static void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) +{ + /* + * The timer may be active because we're trying to set a new bandwidth + * period or because we're racing with the tear-down path + * (timer_active==0 becomes visible before the hrtimer call-back + * terminates). In either case we ensure that it's re-programmed + */ + while (unlikely(hrtimer_active(&cfs_b->period_timer))) { + raw_spin_unlock(&cfs_b->lock); + /* ensure cfs_b->lock is available while we wait */ + hrtimer_cancel(&cfs_b->period_timer); + + raw_spin_lock(&cfs_b->lock); + /* if someone else restarted the timer then we're done */ + if (cfs_b->timer_active) + return; + } + + cfs_b->timer_active = 1; + start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period); +} + +static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) +{ + hrtimer_cancel(&cfs_b->period_timer); + hrtimer_cancel(&cfs_b->slack_timer); +} +#else +static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} +static void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} +static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} + +static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) +{ + return NULL; +} +#endif /* CONFIG_CFS_BANDWIDTH */ +#endif /* CONFIG_FAIR_GROUP_SCHED */ + +/* Real-Time classes' related field in a runqueue: */ +struct rt_rq { + struct rt_prio_array active; + unsigned long rt_nr_running; +#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED + struct { + int curr; /* highest queued rt task prio */ +#ifdef CONFIG_SMP + int next; /* next highest */ +#endif + } highest_prio; +#endif +#ifdef CONFIG_SMP + unsigned long rt_nr_migratory; + unsigned long rt_nr_total; + int overloaded; + struct plist_head pushable_tasks; +#endif + int rt_throttled; + u64 rt_time; + u64 rt_runtime; + /* Nests inside the rq lock: */ + raw_spinlock_t rt_runtime_lock; + +#ifdef CONFIG_RT_GROUP_SCHED + unsigned long rt_nr_boosted; + + struct rq *rq; + struct list_head leaf_rt_rq_list; + struct task_group *tg; +#endif +}; + +#ifdef CONFIG_SMP + +/* + * We add the notion of a root-domain which will be used to define per-domain + * variables. Each exclusive cpuset essentially defines an island domain by + * fully partitioning the member cpus from any other cpuset. Whenever a new + * exclusive cpuset is created, we also create and attach a new root-domain + * object. + * + */ +struct root_domain { + atomic_t refcount; + atomic_t rto_count; + struct rcu_head rcu; + cpumask_var_t span; + cpumask_var_t online; + + /* + * The "RT overload" flag: it gets set if a CPU has more than + * one runnable RT task. + */ + cpumask_var_t rto_mask; + struct cpupri cpupri; +}; + +/* + * By default the system creates a single root-domain with all cpus as + * members (mimicking the global state we have today). + */ +static struct root_domain def_root_domain; + +#endif /* CONFIG_SMP */ + +/* + * This is the main, per-CPU runqueue data structure. + * + * Locking rule: those places that want to lock multiple runqueues + * (such as the load balancing or the thread migration code), lock + * acquire operations must be ordered by ascending &runqueue. + */ +struct rq { + /* runqueue lock: */ + raw_spinlock_t lock; + + /* + * nr_running and cpu_load should be in the same cacheline because + * remote CPUs use both these fields when doing load calculation. + */ + unsigned long nr_running; + #define CPU_LOAD_IDX_MAX 5 + unsigned long cpu_load[CPU_LOAD_IDX_MAX]; + unsigned long last_load_update_tick; +#ifdef CONFIG_NO_HZ + u64 nohz_stamp; + unsigned char nohz_balance_kick; +#endif + int skip_clock_update; + + /* capture load from *all* tasks on this cpu: */ + struct load_weight load; + unsigned long nr_load_updates; + u64 nr_switches; + + struct cfs_rq cfs; + struct rt_rq rt; + +#ifdef CONFIG_FAIR_GROUP_SCHED + /* list of leaf cfs_rq on this cpu: */ + struct list_head leaf_cfs_rq_list; +#endif +#ifdef CONFIG_RT_GROUP_SCHED + struct list_head leaf_rt_rq_list; +#endif + + /* + * This is part of a global counter where only the total sum + * over all CPUs matters. A task can increase this counter on + * one CPU and if it got migrated afterwards it may decrease + * it on another CPU. Always updated under the runqueue lock: + */ + unsigned long nr_uninterruptible; + + struct task_struct *curr, *idle, *stop; + unsigned long next_balance; + struct mm_struct *prev_mm; + + u64 clock; + u64 clock_task; + + atomic_t nr_iowait; + +#ifdef CONFIG_SMP + struct root_domain *rd; + struct sched_domain *sd; + + unsigned long cpu_power; + + unsigned char idle_balance; + /* For active balancing */ + int post_schedule; + int active_balance; + int push_cpu; + struct cpu_stop_work active_balance_work; + /* cpu of this runqueue: */ + int cpu; + int online; + + u64 rt_avg; + u64 age_stamp; + u64 idle_stamp; + u64 avg_idle; +#endif + +#ifdef CONFIG_IRQ_TIME_ACCOUNTING + u64 prev_irq_time; +#endif +#ifdef CONFIG_PARAVIRT + u64 prev_steal_time; +#endif +#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING + u64 prev_steal_time_rq; +#endif + + /* calc_load related fields */ + unsigned long calc_load_update; + long calc_load_active; + +#ifdef CONFIG_SCHED_HRTICK +#ifdef CONFIG_SMP + int hrtick_csd_pending; + struct call_single_data hrtick_csd; +#endif + struct hrtimer hrtick_timer; +#endif + +#ifdef CONFIG_SCHEDSTATS + /* latency stats */ + struct sched_info rq_sched_info; + unsigned long long rq_cpu_time; + /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ + + /* sys_sched_yield() stats */ + unsigned int yld_count; + + /* schedule() stats */ + unsigned int sched_switch; + unsigned int sched_count; + unsigned int sched_goidle; + + /* try_to_wake_up() stats */ + unsigned int ttwu_count; + unsigned int ttwu_local; +#endif + +#ifdef CONFIG_SMP + struct llist_head wake_list; +#endif +}; + +static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); + + +static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); + +static inline int cpu_of(struct rq *rq) +{ +#ifdef CONFIG_SMP + return rq->cpu; +#else + return 0; +#endif +} + +#define rcu_dereference_check_sched_domain(p) \ + rcu_dereference_check((p), \ + lockdep_is_held(&sched_domains_mutex)) + +/* + * The domain tree (rq->sd) is protected by RCU's quiescent state transition. + * See detach_destroy_domains: synchronize_sched for details. + * + * The domain tree of any CPU may only be accessed from within + * preempt-disabled sections. + */ +#define for_each_domain(cpu, __sd) \ + for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent) + +#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) +#define this_rq() (&__get_cpu_var(runqueues)) +#define task_rq(p) cpu_rq(task_cpu(p)) +#define cpu_curr(cpu) (cpu_rq(cpu)->curr) +#define raw_rq() (&__raw_get_cpu_var(runqueues)) + +#ifdef CONFIG_CGROUP_SCHED + +/* + * Return the group to which this tasks belongs. + * + * We use task_subsys_state_check() and extend the RCU verification with + * pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each + * task it moves into the cgroup. Therefore by holding either of those locks, + * we pin the task to the current cgroup. + */ +static inline struct task_group *task_group(struct task_struct *p) +{ + struct task_group *tg; + struct cgroup_subsys_state *css; + + css = task_subsys_state_check(p, cpu_cgroup_subsys_id, + lockdep_is_held(&p->pi_lock) || + lockdep_is_held(&task_rq(p)->lock)); + tg = container_of(css, struct task_group, css); + + return autogroup_task_group(p, tg); +} + +/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ +static inline void set_task_rq(struct task_struct *p, unsigned int cpu) +{ +#ifdef CONFIG_FAIR_GROUP_SCHED + p->se.cfs_rq = task_group(p)->cfs_rq[cpu]; + p->se.parent = task_group(p)->se[cpu]; +#endif + +#ifdef CONFIG_RT_GROUP_SCHED + p->rt.rt_rq = task_group(p)->rt_rq[cpu]; + p->rt.parent = task_group(p)->rt_se[cpu]; +#endif +} + +#else /* CONFIG_CGROUP_SCHED */ + +static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } +static inline struct task_group *task_group(struct task_struct *p) +{ + return NULL; +} + +#endif /* CONFIG_CGROUP_SCHED */ static void update_rq_clock_task(struct rq *rq, s64 delta); -void update_rq_clock(struct rq *rq) +static void update_rq_clock(struct rq *rq) { s64 delta; @@ -121,15 +802,45 @@ void update_rq_clock(struct rq *rq) update_rq_clock_task(rq, delta); } +/* + * Tunables that become constants when CONFIG_SCHED_DEBUG is off: + */ +#ifdef CONFIG_SCHED_DEBUG +# define const_debug __read_mostly +#else +# define const_debug static const +#endif + +/** + * runqueue_is_locked - Returns true if the current cpu runqueue is locked + * @cpu: the processor in question. + * + * This interface allows printk to be called with the runqueue lock + * held and know whether or not it is OK to wake up the klogd. + */ +int runqueue_is_locked(int cpu) +{ + return raw_spin_is_locked(&cpu_rq(cpu)->lock); +} + /* * Debugging: various feature bits */ +#define SCHED_FEAT(name, enabled) \ + __SCHED_FEAT_##name , + +enum { +#include "sched_features.h" +}; + +#undef SCHED_FEAT + #define SCHED_FEAT(name, enabled) \ (1UL << __SCHED_FEAT_##name) * enabled | const_debug unsigned int sysctl_sched_features = -#include "features.h" +#include "sched_features.h" 0; #undef SCHED_FEAT @@ -139,7 +850,7 @@ const_debug unsigned int sysctl_sched_features = #name , static __read_mostly char *sched_feat_names[] = { -#include "features.h" +#include "sched_features.h" NULL }; @@ -149,7 +860,7 @@ static int sched_feat_show(struct seq_file *m, void *v) { int i; - for (i = 0; i < __SCHED_FEAT_NR; i++) { + for (i = 0; sched_feat_names[i]; i++) { if (!(sysctl_sched_features & (1UL << i))) seq_puts(m, "NO_"); seq_printf(m, "%s ", sched_feat_names[i]); @@ -159,36 +870,6 @@ static int sched_feat_show(struct seq_file *m, void *v) return 0; } -#ifdef HAVE_JUMP_LABEL - -#define jump_label_key__true jump_label_key_enabled -#define jump_label_key__false jump_label_key_disabled - -#define SCHED_FEAT(name, enabled) \ - jump_label_key__##enabled , - -struct jump_label_key sched_feat_keys[__SCHED_FEAT_NR] = { -#include "features.h" -}; - -#undef SCHED_FEAT - -static void sched_feat_disable(int i) -{ - if (jump_label_enabled(&sched_feat_keys[i])) - jump_label_dec(&sched_feat_keys[i]); -} - -static void sched_feat_enable(int i) -{ - if (!jump_label_enabled(&sched_feat_keys[i])) - jump_label_inc(&sched_feat_keys[i]); -} -#else -static void sched_feat_disable(int i) { }; -static void sched_feat_enable(int i) { }; -#endif /* HAVE_JUMP_LABEL */ - static ssize_t sched_feat_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) @@ -212,20 +893,17 @@ sched_feat_write(struct file *filp, const char __user *ubuf, cmp += 3; } - for (i = 0; i < __SCHED_FEAT_NR; i++) { + for (i = 0; sched_feat_names[i]; i++) { if (strcmp(cmp, sched_feat_names[i]) == 0) { - if (neg) { + if (neg) sysctl_sched_features &= ~(1UL << i); - sched_feat_disable(i); - } else { + else sysctl_sched_features |= (1UL << i); - sched_feat_enable(i); - } break; } } - if (i == __SCHED_FEAT_NR) + if (!sched_feat_names[i]) return -EINVAL; *ppos += cnt; @@ -254,7 +932,10 @@ static __init int sched_init_debug(void) return 0; } late_initcall(sched_init_debug); -#endif /* CONFIG_SCHED_DEBUG */ + +#endif + +#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) /* * Number of tasks to iterate in a single balance run. @@ -276,7 +957,7 @@ const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC; */ unsigned int sysctl_sched_rt_period = 1000000; -__read_mostly int scheduler_running; +static __read_mostly int scheduler_running; /* * part of the period that we allow rt tasks to run in us. @@ -284,7 +965,112 @@ __read_mostly int scheduler_running; */ int sysctl_sched_rt_runtime = 950000; +static inline u64 global_rt_period(void) +{ + return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; +} + +static inline u64 global_rt_runtime(void) +{ + if (sysctl_sched_rt_runtime < 0) + return RUNTIME_INF; + + return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; +} + +#ifndef prepare_arch_switch +# define prepare_arch_switch(next) do { } while (0) +#endif +#ifndef finish_arch_switch +# define finish_arch_switch(prev) do { } while (0) +#endif +static inline int task_current(struct rq *rq, struct task_struct *p) +{ + return rq->curr == p; +} + +static inline int task_running(struct rq *rq, struct task_struct *p) +{ +#ifdef CONFIG_SMP + return p->on_cpu; +#else + return task_current(rq, p); +#endif +} + +#ifndef __ARCH_WANT_UNLOCKED_CTXSW +static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) +{ +#ifdef CONFIG_SMP + /* + * We can optimise this out completely for !SMP, because the + * SMP rebalancing from interrupt is the only thing that cares + * here. + */ + next->on_cpu = 1; +#endif +} + +static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) +{ +#ifdef CONFIG_SMP + /* + * After ->on_cpu is cleared, the task can be moved to a different CPU. + * We must ensure this doesn't happen until the switch is completely + * finished. + */ + smp_wmb(); + prev->on_cpu = 0; +#endif +#ifdef CONFIG_DEBUG_SPINLOCK + /* this is a valid case when another task releases the spinlock */ + rq->lock.owner = current; +#endif + /* + * If we are tracking spinlock dependencies then we have to + * fix up the runqueue lock - which gets 'carried over' from + * prev into current: + */ + spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); + + raw_spin_unlock_irq(&rq->lock); +} + +#else /* __ARCH_WANT_UNLOCKED_CTXSW */ +static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) +{ +#ifdef CONFIG_SMP + /* + * We can optimise this out completely for !SMP, because the + * SMP rebalancing from interrupt is the only thing that cares + * here. + */ + next->on_cpu = 1; +#endif +#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW + raw_spin_unlock_irq(&rq->lock); +#else + raw_spin_unlock(&rq->lock); +#endif +} + +static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) +{ +#ifdef CONFIG_SMP + /* + * After ->on_cpu is cleared, the task can be moved to a different CPU. + * We must ensure this doesn't happen until the switch is completely + * finished. + */ + smp_wmb(); + prev->on_cpu = 0; +#endif +#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW + local_irq_enable(); +#endif +} +#endif /* __ARCH_WANT_UNLOCKED_CTXSW */ /* * __task_rq_lock - lock the rq @p resides on. @@ -367,6 +1153,20 @@ static struct rq *this_rq_lock(void) * rq->lock. */ +/* + * Use hrtick when: + * - enabled by features + * - hrtimer is actually high res + */ +static inline int hrtick_enabled(struct rq *rq) +{ + if (!sched_feat(HRTICK)) + return 0; + if (!cpu_active(cpu_of(rq))) + return 0; + return hrtimer_is_hres_active(&rq->hrtick_timer); +} + static void hrtick_clear(struct rq *rq) { if (hrtimer_active(&rq->hrtick_timer)) @@ -410,7 +1210,7 @@ static void __hrtick_start(void *arg) * * called with rq->lock held and irqs disabled */ -void hrtick_start(struct rq *rq, u64 delay) +static void hrtick_start(struct rq *rq, u64 delay) { struct hrtimer *timer = &rq->hrtick_timer; ktime_t time = ktime_add_ns(timer->base->get_time(), delay); @@ -454,7 +1254,7 @@ static __init void init_hrtick(void) * * called with rq->lock held and irqs disabled */ -void hrtick_start(struct rq *rq, u64 delay) +static void hrtick_start(struct rq *rq, u64 delay) { __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0, HRTIMER_MODE_REL_PINNED, 0); @@ -505,7 +1305,7 @@ static inline void init_hrtick(void) #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) #endif -void resched_task(struct task_struct *p) +static void resched_task(struct task_struct *p) { int cpu; @@ -526,7 +1326,7 @@ void resched_task(struct task_struct *p) smp_send_reschedule(cpu); } -void resched_cpu(int cpu) +static void resched_cpu(int cpu) { struct rq *rq = cpu_rq(cpu); unsigned long flags; @@ -597,62 +1397,236 @@ void wake_up_idle_cpu(int cpu) * lockless. The worst case is that the other CPU runs the * idle task through an additional NOOP schedule() */ - set_tsk_need_resched(rq->idle); + set_tsk_need_resched(rq->idle); + + /* NEED_RESCHED must be visible before we test polling */ + smp_mb(); + if (!tsk_is_polling(rq->idle)) + smp_send_reschedule(cpu); +} + +static inline bool got_nohz_idle_kick(void) +{ + return idle_cpu(smp_processor_id()) && this_rq()->nohz_balance_kick; +} + +#else /* CONFIG_NO_HZ */ + +static inline bool got_nohz_idle_kick(void) +{ + return false; +} + +#endif /* CONFIG_NO_HZ */ + +static u64 sched_avg_period(void) +{ + return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2; +} + +static void sched_avg_update(struct rq *rq) +{ + s64 period = sched_avg_period(); + + while ((s64)(rq->clock - rq->age_stamp) > period) { + /* + * Inline assembly required to prevent the compiler + * optimising this loop into a divmod call. + * See __iter_div_u64_rem() for another example of this. + */ + asm("" : "+rm" (rq->age_stamp)); + rq->age_stamp += period; + rq->rt_avg /= 2; + } +} + +static void sched_rt_avg_update(struct rq *rq, u64 rt_delta) +{ + rq->rt_avg += rt_delta; + sched_avg_update(rq); +} + +#else /* !CONFIG_SMP */ +static void resched_task(struct task_struct *p) +{ + assert_raw_spin_locked(&task_rq(p)->lock); + set_tsk_need_resched(p); +} + +static void sched_rt_avg_update(struct rq *rq, u64 rt_delta) +{ +} + +static void sched_avg_update(struct rq *rq) +{ +} +#endif /* CONFIG_SMP */ + +#if BITS_PER_LONG == 32 +# define WMULT_CONST (~0UL) +#else +# define WMULT_CONST (1UL << 32) +#endif + +#define WMULT_SHIFT 32 + +/* + * Shift right and round: + */ +#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y)) + +/* + * delta *= weight / lw + */ +static unsigned long +calc_delta_mine(unsigned long delta_exec, unsigned long weight, + struct load_weight *lw) +{ + u64 tmp; + + /* + * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched + * entities since MIN_SHARES = 2. Treat weight as 1 if less than + * 2^SCHED_LOAD_RESOLUTION. + */ + if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION))) + tmp = (u64)delta_exec * scale_load_down(weight); + else + tmp = (u64)delta_exec; + + if (!lw->inv_weight) { + unsigned long w = scale_load_down(lw->weight); + + if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST)) + lw->inv_weight = 1; + else if (unlikely(!w)) + lw->inv_weight = WMULT_CONST; + else + lw->inv_weight = WMULT_CONST / w; + } + + /* + * Check whether we'd overflow the 64-bit multiplication: + */ + if (unlikely(tmp > WMULT_CONST)) + tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight, + WMULT_SHIFT/2); + else + tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT); - /* NEED_RESCHED must be visible before we test polling */ - smp_mb(); - if (!tsk_is_polling(rq->idle)) - smp_send_reschedule(cpu); + return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX); } -static inline bool got_nohz_idle_kick(void) +static inline void update_load_add(struct load_weight *lw, unsigned long inc) { - int cpu = smp_processor_id(); - return idle_cpu(cpu) && test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)); + lw->weight += inc; + lw->inv_weight = 0; } -#else /* CONFIG_NO_HZ */ +static inline void update_load_sub(struct load_weight *lw, unsigned long dec) +{ + lw->weight -= dec; + lw->inv_weight = 0; +} -static inline bool got_nohz_idle_kick(void) +static inline void update_load_set(struct load_weight *lw, unsigned long w) { - return false; + lw->weight = w; + lw->inv_weight = 0; } -#endif /* CONFIG_NO_HZ */ +/* + * To aid in avoiding the subversion of "niceness" due to uneven distribution + * of tasks with abnormal "nice" values across CPUs the contribution that + * each task makes to its run queue's load is weighted according to its + * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a + * scaled version of the new time slice allocation that they receive on time + * slice expiry etc. + */ -void sched_avg_update(struct rq *rq) -{ - s64 period = sched_avg_period(); +#define WEIGHT_IDLEPRIO 3 +#define WMULT_IDLEPRIO 1431655765 - while ((s64)(rq->clock - rq->age_stamp) > period) { - /* - * Inline assembly required to prevent the compiler - * optimising this loop into a divmod call. - * See __iter_div_u64_rem() for another example of this. - */ - asm("" : "+rm" (rq->age_stamp)); - rq->age_stamp += period; - rq->rt_avg /= 2; - } +/* + * Nice levels are multiplicative, with a gentle 10% change for every + * nice level changed. I.e. when a CPU-bound task goes from nice 0 to + * nice 1, it will get ~10% less CPU time than another CPU-bound task + * that remained on nice 0. + * + * The "10% effect" is relative and cumulative: from _any_ nice level, + * if you go up 1 level, it's -10% CPU usage, if you go down 1 level + * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25. + * If a task goes up by ~10% and another task goes down by ~10% then + * the relative distance between them is ~25%.) + */ +static const int prio_to_weight[40] = { + /* -20 */ 88761, 71755, 56483, 46273, 36291, + /* -15 */ 29154, 23254, 18705, 14949, 11916, + /* -10 */ 9548, 7620, 6100, 4904, 3906, + /* -5 */ 3121, 2501, 1991, 1586, 1277, + /* 0 */ 1024, 820, 655, 526, 423, + /* 5 */ 335, 272, 215, 172, 137, + /* 10 */ 110, 87, 70, 56, 45, + /* 15 */ 36, 29, 23, 18, 15, +}; + +/* + * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated. + * + * In cases where the weight does not change often, we can use the + * precalculated inverse to speed up arithmetics by turning divisions + * into multiplications: + */ +static const u32 prio_to_wmult[40] = { + /* -20 */ 48388, 59856, 76040, 92818, 118348, + /* -15 */ 147320, 184698, 229616, 287308, 360437, + /* -10 */ 449829, 563644, 704093, 875809, 1099582, + /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326, + /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587, + /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126, + /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717, + /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, +}; + +/* Time spent by the tasks of the cpu accounting group executing in ... */ +enum cpuacct_stat_index { + CPUACCT_STAT_USER, /* ... user mode */ + CPUACCT_STAT_SYSTEM, /* ... kernel mode */ + + CPUACCT_STAT_NSTATS, +}; + +#ifdef CONFIG_CGROUP_CPUACCT +static void cpuacct_charge(struct task_struct *tsk, u64 cputime); +static void cpuacct_update_stats(struct task_struct *tsk, + enum cpuacct_stat_index idx, cputime_t val); +#else +static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} +static inline void cpuacct_update_stats(struct task_struct *tsk, + enum cpuacct_stat_index idx, cputime_t val) {} +#endif + +static inline void inc_cpu_load(struct rq *rq, unsigned long load) +{ + update_load_add(&rq->load, load); } -#else /* !CONFIG_SMP */ -void resched_task(struct task_struct *p) +static inline void dec_cpu_load(struct rq *rq, unsigned long load) { - assert_raw_spin_locked(&task_rq(p)->lock); - set_tsk_need_resched(p); + update_load_sub(&rq->load, load); } -#endif /* CONFIG_SMP */ #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \ (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH))) +typedef int (*tg_visitor)(struct task_group *, void *); + /* * Iterate task_group tree rooted at *from, calling @down when first entering a * node and @up when leaving it for the final time. * * Caller must hold rcu_lock or sufficient equivalent. */ -int walk_tg_tree_from(struct task_group *from, +static int walk_tg_tree_from(struct task_group *from, tg_visitor down, tg_visitor up, void *data) { struct task_group *parent, *child; @@ -683,13 +1657,270 @@ int walk_tg_tree_from(struct task_group *from, return ret; } -int tg_nop(struct task_group *tg, void *data) +/* + * Iterate the full tree, calling @down when first entering a node and @up when + * leaving it for the final time. + * + * Caller must hold rcu_lock or sufficient equivalent. + */ + +static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) +{ + return walk_tg_tree_from(&root_task_group, down, up, data); +} + +static int tg_nop(struct task_group *tg, void *data) +{ + return 0; +} +#endif + +#ifdef CONFIG_SMP +/* Used instead of source_load when we know the type == 0 */ +static unsigned long weighted_cpuload(const int cpu) +{ + return cpu_rq(cpu)->load.weight; +} + +/* + * Return a low guess at the load of a migration-source cpu weighted + * according to the scheduling class and "nice" value. + * + * We want to under-estimate the load of migration sources, to + * balance conservatively. + */ +static unsigned long source_load(int cpu, int type) +{ + struct rq *rq = cpu_rq(cpu); + unsigned long total = weighted_cpuload(cpu); + + if (type == 0 || !sched_feat(LB_BIAS)) + return total; + + return min(rq->cpu_load[type-1], total); +} + +/* + * Return a high guess at the load of a migration-target cpu weighted + * according to the scheduling class and "nice" value. + */ +static unsigned long target_load(int cpu, int type) +{ + struct rq *rq = cpu_rq(cpu); + unsigned long total = weighted_cpuload(cpu); + + if (type == 0 || !sched_feat(LB_BIAS)) + return total; + + return max(rq->cpu_load[type-1], total); +} + +static unsigned long power_of(int cpu) +{ + return cpu_rq(cpu)->cpu_power; +} + +static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); + +static unsigned long cpu_avg_load_per_task(int cpu) { + struct rq *rq = cpu_rq(cpu); + unsigned long nr_running = ACCESS_ONCE(rq->nr_running); + + if (nr_running) + return rq->load.weight / nr_running; + return 0; } + +#ifdef CONFIG_PREEMPT + +static void double_rq_lock(struct rq *rq1, struct rq *rq2); + +/* + * fair double_lock_balance: Safely acquires both rq->locks in a fair + * way at the expense of forcing extra atomic operations in all + * invocations. This assures that the double_lock is acquired using the + * same underlying policy as the spinlock_t on this architecture, which + * reduces latency compared to the unfair variant below. However, it + * also adds more overhead and therefore may reduce throughput. + */ +static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) + __releases(this_rq->lock) + __acquires(busiest->lock) + __acquires(this_rq->lock) +{ + raw_spin_unlock(&this_rq->lock); + double_rq_lock(this_rq, busiest); + + return 1; +} + +#else +/* + * Unfair double_lock_balance: Optimizes throughput at the expense of + * latency by eliminating extra atomic operations when the locks are + * already in proper order on entry. This favors lower cpu-ids and will + * grant the double lock to lower cpus over higher ids under contention, + * regardless of entry order into the function. + */ +static int _double_lock_balance(struct rq *this_rq, struct rq *busiest) + __releases(this_rq->lock) + __acquires(busiest->lock) + __acquires(this_rq->lock) +{ + int ret = 0; + + if (unlikely(!raw_spin_trylock(&busiest->lock))) { + if (busiest < this_rq) { + raw_spin_unlock(&this_rq->lock); + raw_spin_lock(&busiest->lock); + raw_spin_lock_nested(&this_rq->lock, + SINGLE_DEPTH_NESTING); + ret = 1; + } else + raw_spin_lock_nested(&busiest->lock, + SINGLE_DEPTH_NESTING); + } + return ret; +} + +#endif /* CONFIG_PREEMPT */ + +/* + * double_lock_balance - lock the busiest runqueue, this_rq is locked already. + */ +static int double_lock_balance(struct rq *this_rq, struct rq *busiest) +{ + if (unlikely(!irqs_disabled())) { + /* printk() doesn't work good under rq->lock */ + raw_spin_unlock(&this_rq->lock); + BUG_ON(1); + } + + return _double_lock_balance(this_rq, busiest); +} + +static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) + __releases(busiest->lock) +{ + raw_spin_unlock(&busiest->lock); + lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); +} + +/* + * double_rq_lock - safely lock two runqueues + * + * Note this does not disable interrupts like task_rq_lock, + * you need to do so manually before calling. + */ +static void double_rq_lock(struct rq *rq1, struct rq *rq2) + __acquires(rq1->lock) + __acquires(rq2->lock) +{ + BUG_ON(!irqs_disabled()); + if (rq1 == rq2) { + raw_spin_lock(&rq1->lock); + __acquire(rq2->lock); /* Fake it out ;) */ + } else { + if (rq1 < rq2) { + raw_spin_lock(&rq1->lock); + raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); + } else { + raw_spin_lock(&rq2->lock); + raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); + } + } +} + +/* + * double_rq_unlock - safely unlock two runqueues + * + * Note this does not restore interrupts like task_rq_unlock, + * you need to do so manually after calling. + */ +static void double_rq_unlock(struct rq *rq1, struct rq *rq2) + __releases(rq1->lock) + __releases(rq2->lock) +{ + raw_spin_unlock(&rq1->lock); + if (rq1 != rq2) + raw_spin_unlock(&rq2->lock); + else + __release(rq2->lock); +} + +#else /* CONFIG_SMP */ + +/* + * double_rq_lock - safely lock two runqueues + * + * Note this does not disable interrupts like task_rq_lock, + * you need to do so manually before calling. + */ +static void double_rq_lock(struct rq *rq1, struct rq *rq2) + __acquires(rq1->lock) + __acquires(rq2->lock) +{ + BUG_ON(!irqs_disabled()); + BUG_ON(rq1 != rq2); + raw_spin_lock(&rq1->lock); + __acquire(rq2->lock); /* Fake it out ;) */ +} + +/* + * double_rq_unlock - safely unlock two runqueues + * + * Note this does not restore interrupts like task_rq_unlock, + * you need to do so manually after calling. + */ +static void double_rq_unlock(struct rq *rq1, struct rq *rq2) + __releases(rq1->lock) + __releases(rq2->lock) +{ + BUG_ON(rq1 != rq2); + raw_spin_unlock(&rq1->lock); + __release(rq2->lock); +} + +#endif + +static void calc_load_account_idle(struct rq *this_rq); +static void update_sysctl(void); +static int get_update_sysctl_factor(void); +static void update_cpu_load(struct rq *this_rq); + +static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) +{ + set_task_rq(p, cpu); +#ifdef CONFIG_SMP + /* + * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be + * successfully executed on another CPU. We must ensure that updates of + * per-task data have been completed by this moment. + */ + smp_wmb(); + task_thread_info(p)->cpu = cpu; #endif +} + +static const struct sched_class rt_sched_class; + +#define sched_class_highest (&stop_sched_class) +#define for_each_class(class) \ + for (class = sched_class_highest; class; class = class->next) + +#include "sched_stats.h" -void update_cpu_load(struct rq *this_rq); +static void inc_nr_running(struct rq *rq) +{ + rq->nr_running++; +} + +static void dec_nr_running(struct rq *rq) +{ + rq->nr_running--; +} static void set_load_weight(struct task_struct *p) { @@ -726,7 +1957,7 @@ static void dequeue_task(struct rq *rq, struct task_struct *p, int flags) /* * activate_task - move a task to the runqueue. */ -void activate_task(struct rq *rq, struct task_struct *p, int flags) +static void activate_task(struct rq *rq, struct task_struct *p, int flags) { if (task_contributes_to_load(p)) rq->nr_uninterruptible--; @@ -737,7 +1968,7 @@ void activate_task(struct rq *rq, struct task_struct *p, int flags) /* * deactivate_task - remove a task from the runqueue. */ -void deactivate_task(struct rq *rq, struct task_struct *p, int flags) +static void deactivate_task(struct rq *rq, struct task_struct *p, int flags) { if (task_contributes_to_load(p)) rq->nr_uninterruptible++; @@ -928,14 +2159,14 @@ static void update_rq_clock_task(struct rq *rq, s64 delta) #ifdef CONFIG_IRQ_TIME_ACCOUNTING static int irqtime_account_hi_update(void) { - u64 *cpustat = kcpustat_this_cpu->cpustat; + struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; unsigned long flags; u64 latest_ns; int ret = 0; local_irq_save(flags); latest_ns = this_cpu_read(cpu_hardirq_time); - if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_IRQ]) + if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->irq)) ret = 1; local_irq_restore(flags); return ret; @@ -943,14 +2174,14 @@ static int irqtime_account_hi_update(void) static int irqtime_account_si_update(void) { - u64 *cpustat = kcpustat_this_cpu->cpustat; + struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; unsigned long flags; u64 latest_ns; int ret = 0; local_irq_save(flags); latest_ns = this_cpu_read(cpu_softirq_time); - if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_SOFTIRQ]) + if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->softirq)) ret = 1; local_irq_restore(flags); return ret; @@ -962,6 +2193,15 @@ static int irqtime_account_si_update(void) #endif +#include "sched_idletask.c" +#include "sched_fair.c" +#include "sched_rt.c" +#include "sched_autogroup.c" +#include "sched_stoptask.c" +#ifdef CONFIG_SCHED_DEBUG +# include "sched_debug.c" +#endif + void sched_set_stop_task(int cpu, struct task_struct *stop) { struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; @@ -1059,7 +2299,7 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p, p->sched_class->prio_changed(rq, p, oldprio); } -void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) +static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) { const struct sched_class *class; @@ -1085,6 +2325,38 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) } #ifdef CONFIG_SMP +/* + * Is this task likely cache-hot: + */ +static int +task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) +{ + s64 delta; + + if (p->sched_class != &fair_sched_class) + return 0; + + if (unlikely(p->policy == SCHED_IDLE)) + return 0; + + /* + * Buddy candidates are cache hot: + */ + if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running && + (&p->se == cfs_rq_of(&p->se)->next || + &p->se == cfs_rq_of(&p->se)->last)) + return 1; + + if (sysctl_sched_migration_cost == -1) + return 1; + if (sysctl_sched_migration_cost == 0) + return 0; + + delta = now - p->se.exec_start; + + return delta < (s64)sysctl_sched_migration_cost; +} + void set_task_cpu(struct task_struct *p, unsigned int new_cpu) { #ifdef CONFIG_SCHED_DEBUG @@ -1511,11 +2783,6 @@ static int ttwu_activate_remote(struct task_struct *p, int wake_flags) } #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ - -static inline int ttwu_share_cache(int this_cpu, int that_cpu) -{ - return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); -} #endif /* CONFIG_SMP */ static void ttwu_queue(struct task_struct *p, int cpu) @@ -1523,7 +2790,7 @@ static void ttwu_queue(struct task_struct *p, int cpu) struct rq *rq = cpu_rq(cpu); #if defined(CONFIG_SMP) - if (sched_feat(TTWU_QUEUE) && !ttwu_share_cache(smp_processor_id(), cpu)) { + if (sched_feat(TTWU_QUEUE) && cpu != smp_processor_id()) { sched_clock_cpu(cpu); /* sync clocks x-cpu */ ttwu_queue_remote(p, cpu); return; @@ -1937,7 +3204,6 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) local_irq_enable(); #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ finish_lock_switch(rq, prev); - trace_sched_stat_sleeptime(current, rq->clock); fire_sched_in_preempt_notifiers(current); if (mm) @@ -2173,7 +3439,7 @@ calc_load(unsigned long load, unsigned long exp, unsigned long active) */ static atomic_long_t calc_load_tasks_idle; -void calc_load_account_idle(struct rq *this_rq) +static void calc_load_account_idle(struct rq *this_rq) { long delta; @@ -2317,7 +3583,7 @@ static void calc_global_nohz(unsigned long ticks) */ } #else -void calc_load_account_idle(struct rq *this_rq) +static void calc_load_account_idle(struct rq *this_rq) { } @@ -2460,7 +3726,7 @@ decay_load_missed(unsigned long load, unsigned long missed_updates, int idx) * scheduler tick (TICK_NSEC). With tickless idle this will not be called * every tick. We fix it up based on jiffies. */ -void update_cpu_load(struct rq *this_rq) +static void update_cpu_load(struct rq *this_rq) { unsigned long this_load = this_rq->load.weight; unsigned long curr_jiffies = jiffies; @@ -2538,10 +3804,8 @@ void sched_exec(void) #endif DEFINE_PER_CPU(struct kernel_stat, kstat); -DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat); EXPORT_PER_CPU_SYMBOL(kstat); -EXPORT_PER_CPU_SYMBOL(kernel_cpustat); /* * Return any ns on the sched_clock that have not yet been accounted in @@ -2594,42 +3858,6 @@ unsigned long long task_sched_runtime(struct task_struct *p) return ns; } -#ifdef CONFIG_CGROUP_CPUACCT -struct cgroup_subsys cpuacct_subsys; -struct cpuacct root_cpuacct; -#endif - -static inline void task_group_account_field(struct task_struct *p, int index, - u64 tmp) -{ -#ifdef CONFIG_CGROUP_CPUACCT - struct kernel_cpustat *kcpustat; - struct cpuacct *ca; -#endif - /* - * Since all updates are sure to touch the root cgroup, we - * get ourselves ahead and touch it first. If the root cgroup - * is the only cgroup, then nothing else should be necessary. - * - */ - __get_cpu_var(kernel_cpustat).cpustat[index] += tmp; - -#ifdef CONFIG_CGROUP_CPUACCT - if (unlikely(!cpuacct_subsys.active)) - return; - - rcu_read_lock(); - ca = task_ca(p); - while (ca && (ca != &root_cpuacct)) { - kcpustat = this_cpu_ptr(ca->cpustat); - kcpustat->cpustat[index] += tmp; - ca = parent_ca(ca); - } - rcu_read_unlock(); -#endif -} - - /* * Account user cpu time to a process. * @p: the process that the cpu time gets accounted to @@ -2639,18 +3867,22 @@ static inline void task_group_account_field(struct task_struct *p, int index, void account_user_time(struct task_struct *p, cputime_t cputime, cputime_t cputime_scaled) { - int index; + struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; + cputime64_t tmp; /* Add user time to process. */ - p->utime += cputime; - p->utimescaled += cputime_scaled; + p->utime = cputime_add(p->utime, cputime); + p->utimescaled = cputime_add(p->utimescaled, cputime_scaled); account_group_user_time(p, cputime); - index = (TASK_NICE(p) > 0) ? CPUTIME_NICE : CPUTIME_USER; - /* Add user time to cpustat. */ - task_group_account_field(p, index, (__force u64) cputime); + tmp = cputime_to_cputime64(cputime); + if (TASK_NICE(p) > 0) + cpustat->nice = cputime64_add(cpustat->nice, tmp); + else + cpustat->user = cputime64_add(cpustat->user, tmp); + cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime); /* Account for user time used */ acct_update_integrals(p); } @@ -2664,21 +3896,24 @@ void account_user_time(struct task_struct *p, cputime_t cputime, static void account_guest_time(struct task_struct *p, cputime_t cputime, cputime_t cputime_scaled) { - u64 *cpustat = kcpustat_this_cpu->cpustat; + cputime64_t tmp; + struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; + + tmp = cputime_to_cputime64(cputime); /* Add guest time to process. */ - p->utime += cputime; - p->utimescaled += cputime_scaled; + p->utime = cputime_add(p->utime, cputime); + p->utimescaled = cputime_add(p->utimescaled, cputime_scaled); account_group_user_time(p, cputime); - p->gtime += cputime; + p->gtime = cputime_add(p->gtime, cputime); /* Add guest time to cpustat. */ if (TASK_NICE(p) > 0) { - cpustat[CPUTIME_NICE] += (__force u64) cputime; - cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime; + cpustat->nice = cputime64_add(cpustat->nice, tmp); + cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp); } else { - cpustat[CPUTIME_USER] += (__force u64) cputime; - cpustat[CPUTIME_GUEST] += (__force u64) cputime; + cpustat->user = cputime64_add(cpustat->user, tmp); + cpustat->guest = cputime64_add(cpustat->guest, tmp); } } @@ -2691,15 +3926,18 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime, */ static inline void __account_system_time(struct task_struct *p, cputime_t cputime, - cputime_t cputime_scaled, int index) + cputime_t cputime_scaled, cputime64_t *target_cputime64) { + cputime64_t tmp = cputime_to_cputime64(cputime); + /* Add system time to process. */ - p->stime += cputime; - p->stimescaled += cputime_scaled; + p->stime = cputime_add(p->stime, cputime); + p->stimescaled = cputime_add(p->stimescaled, cputime_scaled); account_group_system_time(p, cputime); /* Add system time to cpustat. */ - task_group_account_field(p, index, (__force u64) cputime); + *target_cputime64 = cputime64_add(*target_cputime64, tmp); + cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime); /* Account for system time used */ acct_update_integrals(p); @@ -2715,7 +3953,8 @@ void __account_system_time(struct task_struct *p, cputime_t cputime, void account_system_time(struct task_struct *p, int hardirq_offset, cputime_t cputime, cputime_t cputime_scaled) { - int index; + struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; + cputime64_t *target_cputime64; if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { account_guest_time(p, cputime, cputime_scaled); @@ -2723,13 +3962,13 @@ void account_system_time(struct task_struct *p, int hardirq_offset, } if (hardirq_count() - hardirq_offset) - index = CPUTIME_IRQ; + target_cputime64 = &cpustat->irq; else if (in_serving_softirq()) - index = CPUTIME_SOFTIRQ; + target_cputime64 = &cpustat->softirq; else - index = CPUTIME_SYSTEM; + target_cputime64 = &cpustat->system; - __account_system_time(p, cputime, cputime_scaled, index); + __account_system_time(p, cputime, cputime_scaled, target_cputime64); } /* @@ -2738,9 +3977,10 @@ void account_system_time(struct task_struct *p, int hardirq_offset, */ void account_steal_time(cputime_t cputime) { - u64 *cpustat = kcpustat_this_cpu->cpustat; + struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; + cputime64_t cputime64 = cputime_to_cputime64(cputime); - cpustat[CPUTIME_STEAL] += (__force u64) cputime; + cpustat->steal = cputime64_add(cpustat->steal, cputime64); } /* @@ -2749,13 +3989,14 @@ void account_steal_time(cputime_t cputime) */ void account_idle_time(cputime_t cputime) { - u64 *cpustat = kcpustat_this_cpu->cpustat; + struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; + cputime64_t cputime64 = cputime_to_cputime64(cputime); struct rq *rq = this_rq(); if (atomic_read(&rq->nr_iowait) > 0) - cpustat[CPUTIME_IOWAIT] += (__force u64) cputime; + cpustat->iowait = cputime64_add(cpustat->iowait, cputime64); else - cpustat[CPUTIME_IDLE] += (__force u64) cputime; + cpustat->idle = cputime64_add(cpustat->idle, cputime64); } static __always_inline bool steal_account_process_tick(void) @@ -2805,15 +4046,16 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick, struct rq *rq) { cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); - u64 *cpustat = kcpustat_this_cpu->cpustat; + cputime64_t tmp = cputime_to_cputime64(cputime_one_jiffy); + struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; if (steal_account_process_tick()) return; if (irqtime_account_hi_update()) { - cpustat[CPUTIME_IRQ] += (__force u64) cputime_one_jiffy; + cpustat->irq = cputime64_add(cpustat->irq, tmp); } else if (irqtime_account_si_update()) { - cpustat[CPUTIME_SOFTIRQ] += (__force u64) cputime_one_jiffy; + cpustat->softirq = cputime64_add(cpustat->softirq, tmp); } else if (this_cpu_ksoftirqd() == p) { /* * ksoftirqd time do not get accounted in cpu_softirq_time. @@ -2821,7 +4063,7 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick, * Also, p->stime needs to be updated for ksoftirqd. */ __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled, - CPUTIME_SOFTIRQ); + &cpustat->softirq); } else if (user_tick) { account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); } else if (p == rq->idle) { @@ -2830,7 +4072,7 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick, account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled); } else { __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled, - CPUTIME_SYSTEM); + &cpustat->system); } } @@ -2929,7 +4171,7 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) { - cputime_t rtime, utime = p->utime, total = utime + p->stime; + cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime); /* * Use CFS's precise accounting: @@ -2937,11 +4179,11 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) rtime = nsecs_to_cputime(p->se.sum_exec_runtime); if (total) { - u64 temp = (__force u64) rtime; + u64 temp = rtime; - temp *= (__force u64) utime; - do_div(temp, (__force u32) total); - utime = (__force cputime_t) temp; + temp *= utime; + do_div(temp, total); + utime = (cputime_t)temp; } else utime = rtime; @@ -2949,7 +4191,7 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) * Compare with previous values, to keep monotonicity: */ p->prev_utime = max(p->prev_utime, utime); - p->prev_stime = max(p->prev_stime, rtime - p->prev_utime); + p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime)); *ut = p->prev_utime; *st = p->prev_stime; @@ -2966,20 +4208,21 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) thread_group_cputime(p, &cputime); - total = cputime.utime + cputime.stime; + total = cputime_add(cputime.utime, cputime.stime); rtime = nsecs_to_cputime(cputime.sum_exec_runtime); if (total) { - u64 temp = (__force u64) rtime; + u64 temp = rtime; - temp *= (__force u64) cputime.utime; - do_div(temp, (__force u32) total); - utime = (__force cputime_t) temp; + temp *= cputime.utime; + do_div(temp, total); + utime = (cputime_t)temp; } else utime = rtime; sig->prev_utime = max(sig->prev_utime, utime); - sig->prev_stime = max(sig->prev_stime, rtime - sig->prev_utime); + sig->prev_stime = max(sig->prev_stime, + cputime_sub(rtime, sig->prev_utime)); *ut = sig->prev_utime; *st = sig->prev_stime; @@ -3078,9 +4321,6 @@ static noinline void __schedule_bug(struct task_struct *prev) { struct pt_regs *regs = get_irq_regs(); - if (oops_in_progress) - return; - printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", prev->comm, prev->pid, preempt_count()); @@ -4612,13 +5852,6 @@ bool __sched yield_to(struct task_struct *p, bool preempt) */ if (preempt && rq != p_rq) resched_task(p_rq->curr); - } else { - /* - * We might have set it in task_yield_fair(), but are - * not going to schedule(), so don't want to skip - * the next update. - */ - rq->skip_clock_update = 0; } out: @@ -4786,7 +6019,7 @@ void sched_show_task(struct task_struct *p) free = stack_not_used(p); #endif printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free, - task_pid_nr(p), task_pid_nr(rcu_dereference(p->real_parent)), + task_pid_nr(p), task_pid_nr(p->real_parent), (unsigned long)task_thread_info(p)->flags); show_stack(p, NULL); @@ -4885,6 +6118,53 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) #endif } +/* + * Increase the granularity value when there are more CPUs, + * because with more CPUs the 'effective latency' as visible + * to users decreases. But the relationship is not linear, + * so pick a second-best guess by going with the log2 of the + * number of CPUs. + * + * This idea comes from the SD scheduler of Con Kolivas: + */ +static int get_update_sysctl_factor(void) +{ + unsigned int cpus = min_t(int, num_online_cpus(), 8); + unsigned int factor; + + switch (sysctl_sched_tunable_scaling) { + case SCHED_TUNABLESCALING_NONE: + factor = 1; + break; + case SCHED_TUNABLESCALING_LINEAR: + factor = cpus; + break; + case SCHED_TUNABLESCALING_LOG: + default: + factor = 1 + ilog2(cpus); + break; + } + + return factor; +} + +static void update_sysctl(void) +{ + unsigned int factor = get_update_sysctl_factor(); + +#define SET_SYSCTL(name) \ + (sysctl_##name = (factor) * normalized_sysctl_##name) + SET_SYSCTL(sched_min_granularity); + SET_SYSCTL(sched_latency); + SET_SYSCTL(sched_wakeup_granularity); +#undef SET_SYSCTL +} + +static inline void sched_init_granularity(void) +{ + update_sysctl(); +} + #ifdef CONFIG_SMP void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) { @@ -5067,9 +6347,33 @@ static void migrate_nr_uninterruptible(struct rq *rq_src) */ static void calc_global_load_remove(struct rq *rq) { - atomic_long_sub(rq->calc_load_active, &calc_load_tasks); - rq->calc_load_active = 0; + atomic_long_sub(rq->calc_load_active, &calc_load_tasks); + rq->calc_load_active = 0; +} + +#ifdef CONFIG_CFS_BANDWIDTH +static void unthrottle_offline_cfs_rqs(struct rq *rq) +{ + struct cfs_rq *cfs_rq; + + for_each_leaf_cfs_rq(rq, cfs_rq) { + struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); + + if (!cfs_rq->runtime_enabled) + continue; + + /* + * clock_task is not advancing so we just need to make sure + * there's some valid quota amount + */ + cfs_rq->runtime_remaining = cfs_b->quota; + if (cfs_rq_throttled(cfs_rq)) + unthrottle_cfs_rq(cfs_rq); + } } +#else +static void unthrottle_offline_cfs_rqs(struct rq *rq) {} +#endif /* * Migrate all tasks from the rq, sleeping tasks will be migrated by @@ -5676,12 +6980,6 @@ static int init_rootdomain(struct root_domain *rd) return -ENOMEM; } -/* - * By default the system creates a single root-domain with all cpus as - * members (mimicking the global state we have today). - */ -struct root_domain def_root_domain; - static void init_defrootdomain(void) { init_rootdomain(&def_root_domain); @@ -5752,31 +7050,6 @@ static void destroy_sched_domains(struct sched_domain *sd, int cpu) destroy_sched_domain(sd, cpu); } -/* - * Keep a special pointer to the highest sched_domain that has - * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this - * allows us to avoid some pointer chasing select_idle_sibling(). - * - * Also keep a unique ID per domain (we use the first cpu number in - * the cpumask of the domain), this allows us to quickly tell if - * two cpus are in the same cache domain, see ttwu_share_cache(). - */ -DEFINE_PER_CPU(struct sched_domain *, sd_llc); -DEFINE_PER_CPU(int, sd_llc_id); - -static void update_top_cache_domain(int cpu) -{ - struct sched_domain *sd; - int id = cpu; - - sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); - if (sd) - id = cpumask_first(sched_domain_span(sd)); - - rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); - per_cpu(sd_llc_id, cpu) = id; -} - /* * Attach the domain 'sd' to 'cpu' as its base domain. Callers must * hold the hotplug lock. @@ -5816,8 +7089,6 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) tmp = rq->sd; rcu_assign_pointer(rq->sd, sd); destroy_sched_domains(tmp, cpu); - - update_top_cache_domain(cpu); } /* cpus with isolated domains */ @@ -5977,7 +7248,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu) continue; sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), - GFP_KERNEL, cpu_to_node(cpu)); + GFP_KERNEL, cpu_to_node(i)); if (!sg) goto fail; @@ -6115,12 +7386,6 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd) return; update_group_power(sd, cpu); - atomic_set(&sg->sgp->nr_busy_cpus, sg->group_weight); -} - -int __weak arch_sd_sibling_asym_packing(void) -{ - return 0*SD_ASYM_PACKING; } /* @@ -6758,6 +8023,29 @@ static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action, } } +static int update_runtime(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + int cpu = (int)(long)hcpu; + + switch (action) { + case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: + disable_runtime(cpu_rq(cpu)); + return NOTIFY_OK; + + case CPU_DOWN_FAILED: + case CPU_DOWN_FAILED_FROZEN: + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + enable_runtime(cpu_rq(cpu)); + return NOTIFY_OK; + + default: + return NOTIFY_DONE; + } +} + void __init sched_init_smp(void) { cpumask_var_t non_isolated_cpus; @@ -6806,11 +8094,104 @@ int in_sched_functions(unsigned long addr) && addr < (unsigned long)__sched_text_end); } -#ifdef CONFIG_CGROUP_SCHED -struct task_group root_task_group; +static void init_cfs_rq(struct cfs_rq *cfs_rq) +{ + cfs_rq->tasks_timeline = RB_ROOT; + INIT_LIST_HEAD(&cfs_rq->tasks); + cfs_rq->min_vruntime = (u64)(-(1LL << 20)); +#ifndef CONFIG_64BIT + cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; +#endif +} + +static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq) +{ + struct rt_prio_array *array; + int i; + + array = &rt_rq->active; + for (i = 0; i < MAX_RT_PRIO; i++) { + INIT_LIST_HEAD(array->queue + i); + __clear_bit(i, array->bitmap); + } + /* delimiter for bitsearch: */ + __set_bit(MAX_RT_PRIO, array->bitmap); + +#if defined CONFIG_SMP + rt_rq->highest_prio.curr = MAX_RT_PRIO; + rt_rq->highest_prio.next = MAX_RT_PRIO; + rt_rq->rt_nr_migratory = 0; + rt_rq->overloaded = 0; + plist_head_init(&rt_rq->pushable_tasks); +#endif + + rt_rq->rt_time = 0; + rt_rq->rt_throttled = 0; + rt_rq->rt_runtime = 0; + raw_spin_lock_init(&rt_rq->rt_runtime_lock); +} + +#ifdef CONFIG_FAIR_GROUP_SCHED +static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, + struct sched_entity *se, int cpu, + struct sched_entity *parent) +{ + struct rq *rq = cpu_rq(cpu); + + cfs_rq->tg = tg; + cfs_rq->rq = rq; +#ifdef CONFIG_SMP + /* allow initial update_cfs_load() to truncate */ + cfs_rq->load_stamp = 1; +#endif + init_cfs_rq_runtime(cfs_rq); + + tg->cfs_rq[cpu] = cfs_rq; + tg->se[cpu] = se; + + /* se could be NULL for root_task_group */ + if (!se) + return; + + if (!parent) + se->cfs_rq = &rq->cfs; + else + se->cfs_rq = parent->my_q; + + se->my_q = cfs_rq; + update_load_set(&se->load, 0); + se->parent = parent; +} #endif -DECLARE_PER_CPU(cpumask_var_t, load_balance_tmpmask); +#ifdef CONFIG_RT_GROUP_SCHED +static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, + struct sched_rt_entity *rt_se, int cpu, + struct sched_rt_entity *parent) +{ + struct rq *rq = cpu_rq(cpu); + + rt_rq->highest_prio.curr = MAX_RT_PRIO; + rt_rq->rt_nr_boosted = 0; + rt_rq->rq = rq; + rt_rq->tg = tg; + + tg->rt_rq[cpu] = rt_rq; + tg->rt_se[cpu] = rt_se; + + if (!rt_se) + return; + + if (!parent) + rt_se->rt_rq = &rq->rt; + else + rt_se->rt_rq = parent->my_q; + + rt_se->my_q = rt_rq; + rt_se->parent = parent; + INIT_LIST_HEAD(&rt_se->run_list); +} +#endif void __init sched_init(void) { @@ -6868,17 +8249,9 @@ void __init sched_init(void) #ifdef CONFIG_CGROUP_SCHED list_add(&root_task_group.list, &task_groups); INIT_LIST_HEAD(&root_task_group.children); - INIT_LIST_HEAD(&root_task_group.siblings); autogroup_init(&init_task); - #endif /* CONFIG_CGROUP_SCHED */ -#ifdef CONFIG_CGROUP_CPUACCT - root_cpuacct.cpustat = &kernel_cpustat; - root_cpuacct.cpuusage = alloc_percpu(u64); - /* Too early, not expected to fail */ - BUG_ON(!root_cpuacct.cpuusage); -#endif for_each_possible_cpu(i) { struct rq *rq; @@ -6890,7 +8263,7 @@ void __init sched_init(void) init_cfs_rq(&rq->cfs); init_rt_rq(&rq->rt, rq); #ifdef CONFIG_FAIR_GROUP_SCHED - root_task_group.shares = ROOT_TASK_GROUP_LOAD; + root_task_group.shares = root_task_group_load; INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); /* * How much cpu bandwidth does root_task_group get? @@ -6940,7 +8313,7 @@ void __init sched_init(void) rq->avg_idle = 2*sysctl_sched_migration_cost; rq_attach_root(rq, &def_root_domain); #ifdef CONFIG_NO_HZ - rq->nohz_flags = 0; + rq->nohz_balance_kick = 0; #endif #endif init_rq_hrtick(rq); @@ -6953,6 +8326,10 @@ void __init sched_init(void) INIT_HLIST_HEAD(&init_task.preempt_notifiers); #endif +#ifdef CONFIG_SMP + open_softirq(SCHED_SOFTIRQ, run_rebalance_domains); +#endif + #ifdef CONFIG_RT_MUTEXES plist_head_init(&init_task.pi_waiters); #endif @@ -6980,11 +8357,17 @@ void __init sched_init(void) #ifdef CONFIG_SMP zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT); +#ifdef CONFIG_NO_HZ + zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); + alloc_cpumask_var(&nohz.grp_idle_mask, GFP_NOWAIT); + atomic_set(&nohz.load_balancer, nr_cpu_ids); + atomic_set(&nohz.first_pick_cpu, nr_cpu_ids); + atomic_set(&nohz.second_pick_cpu, nr_cpu_ids); +#endif /* May be allocated at isolcpus cmdline parse time */ if (cpu_isolated_map == NULL) zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); -#endif - init_sched_fair_class(); +#endif /* SMP */ scheduler_running = 1; } @@ -7136,14 +8519,169 @@ void set_curr_task(int cpu, struct task_struct *p) #endif +#ifdef CONFIG_FAIR_GROUP_SCHED +static void free_fair_sched_group(struct task_group *tg) +{ + int i; + + destroy_cfs_bandwidth(tg_cfs_bandwidth(tg)); + + for_each_possible_cpu(i) { + if (tg->cfs_rq) + kfree(tg->cfs_rq[i]); + if (tg->se) + kfree(tg->se[i]); + } + + kfree(tg->cfs_rq); + kfree(tg->se); +} + +static +int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) +{ + struct cfs_rq *cfs_rq; + struct sched_entity *se; + int i; + + tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL); + if (!tg->cfs_rq) + goto err; + tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL); + if (!tg->se) + goto err; + + tg->shares = NICE_0_LOAD; + + init_cfs_bandwidth(tg_cfs_bandwidth(tg)); + + for_each_possible_cpu(i) { + cfs_rq = kzalloc_node(sizeof(struct cfs_rq), + GFP_KERNEL, cpu_to_node(i)); + if (!cfs_rq) + goto err; + + se = kzalloc_node(sizeof(struct sched_entity), + GFP_KERNEL, cpu_to_node(i)); + if (!se) + goto err_free_rq; + + init_cfs_rq(cfs_rq); + init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); + } + + return 1; + +err_free_rq: + kfree(cfs_rq); +err: + return 0; +} + +static inline void unregister_fair_sched_group(struct task_group *tg, int cpu) +{ + struct rq *rq = cpu_rq(cpu); + unsigned long flags; + + /* + * Only empty task groups can be destroyed; so we can speculatively + * check on_list without danger of it being re-added. + */ + if (!tg->cfs_rq[cpu]->on_list) + return; + + raw_spin_lock_irqsave(&rq->lock, flags); + list_del_leaf_cfs_rq(tg->cfs_rq[cpu]); + raw_spin_unlock_irqrestore(&rq->lock, flags); +} +#else /* !CONFIG_FAIR_GROUP_SCHED */ +static inline void free_fair_sched_group(struct task_group *tg) +{ +} + +static inline +int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) +{ + return 1; +} + +static inline void unregister_fair_sched_group(struct task_group *tg, int cpu) +{ +} +#endif /* CONFIG_FAIR_GROUP_SCHED */ + #ifdef CONFIG_RT_GROUP_SCHED +static void free_rt_sched_group(struct task_group *tg) +{ + int i; + + if (tg->rt_se) + destroy_rt_bandwidth(&tg->rt_bandwidth); + + for_each_possible_cpu(i) { + if (tg->rt_rq) + kfree(tg->rt_rq[i]); + if (tg->rt_se) + kfree(tg->rt_se[i]); + } + + kfree(tg->rt_rq); + kfree(tg->rt_se); +} + +static +int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) +{ + struct rt_rq *rt_rq; + struct sched_rt_entity *rt_se; + int i; + + tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL); + if (!tg->rt_rq) + goto err; + tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL); + if (!tg->rt_se) + goto err; + + init_rt_bandwidth(&tg->rt_bandwidth, + ktime_to_ns(def_rt_bandwidth.rt_period), 0); + + for_each_possible_cpu(i) { + rt_rq = kzalloc_node(sizeof(struct rt_rq), + GFP_KERNEL, cpu_to_node(i)); + if (!rt_rq) + goto err; + + rt_se = kzalloc_node(sizeof(struct sched_rt_entity), + GFP_KERNEL, cpu_to_node(i)); + if (!rt_se) + goto err_free_rq; + + init_rt_rq(rt_rq, cpu_rq(i)); + rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; + init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]); + } + + return 1; + +err_free_rq: + kfree(rt_rq); +err: + return 0; +} #else /* !CONFIG_RT_GROUP_SCHED */ +static inline void free_rt_sched_group(struct task_group *tg) +{ +} + +static inline +int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) +{ + return 1; +} #endif /* CONFIG_RT_GROUP_SCHED */ #ifdef CONFIG_CGROUP_SCHED -/* task_group_lock serializes the addition/removal of task groups */ -static DEFINE_SPINLOCK(task_group_lock); - static void free_sched_group(struct task_group *tg) { free_fair_sched_group(tg); @@ -7249,6 +8787,47 @@ void sched_move_task(struct task_struct *tsk) #endif /* CONFIG_CGROUP_SCHED */ #ifdef CONFIG_FAIR_GROUP_SCHED +static DEFINE_MUTEX(shares_mutex); + +int sched_group_set_shares(struct task_group *tg, unsigned long shares) +{ + int i; + unsigned long flags; + + /* + * We can't change the weight of the root cgroup. + */ + if (!tg->se[0]) + return -EINVAL; + + shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES)); + + mutex_lock(&shares_mutex); + if (tg->shares == shares) + goto done; + + tg->shares = shares; + for_each_possible_cpu(i) { + struct rq *rq = cpu_rq(i); + struct sched_entity *se; + + se = tg->se[i]; + /* Propagate contribution to hierarchy */ + raw_spin_lock_irqsave(&rq->lock, flags); + for_each_sched_entity(se) + update_cfs_shares(group_cfs_rq(se)); + raw_spin_unlock_irqrestore(&rq->lock, flags); + } + +done: + mutex_unlock(&shares_mutex); + return 0; +} + +unsigned long sched_group_shares(struct task_group *tg) +{ + return tg->shares; +} #endif #if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_CFS_BANDWIDTH) @@ -7273,7 +8852,7 @@ static inline int tg_has_rt_tasks(struct task_group *tg) struct task_struct *g, *p; do_each_thread(g, p) { - if (rt_task(p) && task_rq(p)->rt.tg == tg) + if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg) return 1; } while_each_thread(g, p); @@ -7624,8 +9203,8 @@ static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) { - int i, ret = 0, runtime_enabled, runtime_was_enabled; - struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; + int i, ret = 0, runtime_enabled; + struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg); if (tg == &root_task_group) return -EINVAL; @@ -7652,8 +9231,6 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) goto out_unlock; runtime_enabled = quota != RUNTIME_INF; - runtime_was_enabled = cfs_b->quota != RUNTIME_INF; - account_cfs_bandwidth_used(runtime_enabled, runtime_was_enabled); raw_spin_lock_irq(&cfs_b->lock); cfs_b->period = ns_to_ktime(period); cfs_b->quota = quota; @@ -7669,13 +9246,13 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) for_each_possible_cpu(i) { struct cfs_rq *cfs_rq = tg->cfs_rq[i]; - struct rq *rq = cfs_rq->rq; + struct rq *rq = rq_of(cfs_rq); raw_spin_lock_irq(&rq->lock); cfs_rq->runtime_enabled = runtime_enabled; cfs_rq->runtime_remaining = 0; - if (cfs_rq->throttled) + if (cfs_rq_throttled(cfs_rq)) unthrottle_cfs_rq(cfs_rq); raw_spin_unlock_irq(&rq->lock); } @@ -7689,7 +9266,7 @@ int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) { u64 quota, period; - period = ktime_to_ns(tg->cfs_bandwidth.period); + period = ktime_to_ns(tg_cfs_bandwidth(tg)->period); if (cfs_quota_us < 0) quota = RUNTIME_INF; else @@ -7702,10 +9279,10 @@ long tg_get_cfs_quota(struct task_group *tg) { u64 quota_us; - if (tg->cfs_bandwidth.quota == RUNTIME_INF) + if (tg_cfs_bandwidth(tg)->quota == RUNTIME_INF) return -1; - quota_us = tg->cfs_bandwidth.quota; + quota_us = tg_cfs_bandwidth(tg)->quota; do_div(quota_us, NSEC_PER_USEC); return quota_us; @@ -7716,7 +9293,10 @@ int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) u64 quota, period; period = (u64)cfs_period_us * NSEC_PER_USEC; - quota = tg->cfs_bandwidth.quota; + quota = tg_cfs_bandwidth(tg)->quota; + + if (period <= 0) + return -EINVAL; return tg_set_cfs_bandwidth(tg, period, quota); } @@ -7725,7 +9305,7 @@ long tg_get_cfs_period(struct task_group *tg) { u64 cfs_period_us; - cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period); + cfs_period_us = ktime_to_ns(tg_cfs_bandwidth(tg)->period); do_div(cfs_period_us, NSEC_PER_USEC); return cfs_period_us; @@ -7785,13 +9365,13 @@ static u64 normalize_cfs_quota(struct task_group *tg, static int tg_cfs_schedulable_down(struct task_group *tg, void *data) { struct cfs_schedulable_data *d = data; - struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; + struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg); s64 quota = 0, parent_quota = -1; if (!tg->parent) { quota = RUNTIME_INF; } else { - struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth; + struct cfs_bandwidth *parent_b = tg_cfs_bandwidth(tg->parent); quota = normalize_cfs_quota(tg, d); parent_quota = parent_b->hierarchal_quota; @@ -7835,7 +9415,7 @@ static int cpu_stats_show(struct cgroup *cgrp, struct cftype *cft, struct cgroup_map_cb *cb) { struct task_group *tg = cgroup_tg(cgrp); - struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; + struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg); cb->fill(cb, "nr_periods", cfs_b->nr_periods); cb->fill(cb, "nr_throttled", cfs_b->nr_throttled); @@ -7936,16 +9516,38 @@ struct cgroup_subsys cpu_cgroup_subsys = { * (balbir@in.ibm.com). */ +/* track cpu usage of a group of tasks and its child groups */ +struct cpuacct { + struct cgroup_subsys_state css; + /* cpuusage holds pointer to a u64-type object on every cpu */ + u64 __percpu *cpuusage; + struct percpu_counter cpustat[CPUACCT_STAT_NSTATS]; + struct cpuacct *parent; +}; + +struct cgroup_subsys cpuacct_subsys; + +/* return cpu accounting group corresponding to this container */ +static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp) +{ + return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id), + struct cpuacct, css); +} + +/* return cpu accounting group to which this task belongs */ +static inline struct cpuacct *task_ca(struct task_struct *tsk) +{ + return container_of(task_subsys_state(tsk, cpuacct_subsys_id), + struct cpuacct, css); +} + /* create a new cpu accounting group */ static struct cgroup_subsys_state *cpuacct_create( struct cgroup_subsys *ss, struct cgroup *cgrp) { - struct cpuacct *ca; - - if (!cgrp->parent) - return &root_cpuacct.css; + struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL); + int i; - ca = kzalloc(sizeof(*ca), GFP_KERNEL); if (!ca) goto out; @@ -7953,13 +9555,18 @@ static struct cgroup_subsys_state *cpuacct_create( if (!ca->cpuusage) goto out_free_ca; - ca->cpustat = alloc_percpu(struct kernel_cpustat); - if (!ca->cpustat) - goto out_free_cpuusage; + for (i = 0; i < CPUACCT_STAT_NSTATS; i++) + if (percpu_counter_init(&ca->cpustat[i], 0)) + goto out_free_counters; + + if (cgrp->parent) + ca->parent = cgroup_ca(cgrp->parent); return &ca->css; -out_free_cpuusage: +out_free_counters: + while (--i >= 0) + percpu_counter_destroy(&ca->cpustat[i]); free_percpu(ca->cpuusage); out_free_ca: kfree(ca); @@ -7972,8 +9579,10 @@ static void cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) { struct cpuacct *ca = cgroup_ca(cgrp); + int i; - free_percpu(ca->cpustat); + for (i = 0; i < CPUACCT_STAT_NSTATS; i++) + percpu_counter_destroy(&ca->cpustat[i]); free_percpu(ca->cpuusage); kfree(ca); } @@ -8066,31 +9675,16 @@ static const char *cpuacct_stat_desc[] = { }; static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft, - struct cgroup_map_cb *cb) + struct cgroup_map_cb *cb) { struct cpuacct *ca = cgroup_ca(cgrp); - int cpu; - s64 val = 0; - - for_each_online_cpu(cpu) { - struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu); - val += kcpustat->cpustat[CPUTIME_USER]; - val += kcpustat->cpustat[CPUTIME_NICE]; - } - val = cputime64_to_clock_t(val); - cb->fill(cb, cpuacct_stat_desc[CPUACCT_STAT_USER], val); + int i; - val = 0; - for_each_online_cpu(cpu) { - struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu); - val += kcpustat->cpustat[CPUTIME_SYSTEM]; - val += kcpustat->cpustat[CPUTIME_IRQ]; - val += kcpustat->cpustat[CPUTIME_SOFTIRQ]; + for (i = 0; i < CPUACCT_STAT_NSTATS; i++) { + s64 val = percpu_counter_read(&ca->cpustat[i]); + val = cputime64_to_clock_t(val); + cb->fill(cb, cpuacct_stat_desc[i], val); } - - val = cputime64_to_clock_t(val); - cb->fill(cb, cpuacct_stat_desc[CPUACCT_STAT_SYSTEM], val); - return 0; } @@ -8120,7 +9714,7 @@ static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) * * called with rq->lock held. */ -void cpuacct_charge(struct task_struct *tsk, u64 cputime) +static void cpuacct_charge(struct task_struct *tsk, u64 cputime) { struct cpuacct *ca; int cpu; @@ -8134,7 +9728,7 @@ void cpuacct_charge(struct task_struct *tsk, u64 cputime) ca = task_ca(tsk); - for (; ca; ca = parent_ca(ca)) { + for (; ca; ca = ca->parent) { u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); *cpuusage += cputime; } @@ -8142,6 +9736,45 @@ void cpuacct_charge(struct task_struct *tsk, u64 cputime) rcu_read_unlock(); } +/* + * When CONFIG_VIRT_CPU_ACCOUNTING is enabled one jiffy can be very large + * in cputime_t units. As a result, cpuacct_update_stats calls + * percpu_counter_add with values large enough to always overflow the + * per cpu batch limit causing bad SMP scalability. + * + * To fix this we scale percpu_counter_batch by cputime_one_jiffy so we + * batch the same amount of time with CONFIG_VIRT_CPU_ACCOUNTING disabled + * and enabled. We cap it at INT_MAX which is the largest allowed batch value. + */ +#ifdef CONFIG_SMP +#define CPUACCT_BATCH \ + min_t(long, percpu_counter_batch * cputime_one_jiffy, INT_MAX) +#else +#define CPUACCT_BATCH 0 +#endif + +/* + * Charge the system/user time to the task's accounting group. + */ +static void cpuacct_update_stats(struct task_struct *tsk, + enum cpuacct_stat_index idx, cputime_t val) +{ + struct cpuacct *ca; + int batch = CPUACCT_BATCH; + + if (unlikely(!cpuacct_subsys.active)) + return; + + rcu_read_lock(); + ca = task_ca(tsk); + + do { + __percpu_counter_add(&ca->cpustat[idx], val, batch); + ca = ca->parent; + } while (ca); + rcu_read_unlock(); +} + struct cgroup_subsys cpuacct_subsys = { .name = "cpuacct", .create = cpuacct_create, diff --git a/trunk/kernel/sched/Makefile b/trunk/kernel/sched/Makefile deleted file mode 100644 index 9a7dd35102a3..000000000000 --- a/trunk/kernel/sched/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -ifdef CONFIG_FUNCTION_TRACER -CFLAGS_REMOVE_clock.o = -pg -endif - -ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) -# According to Alan Modra , the -fno-omit-frame-pointer is -# needed for x86 only. Why this used to be enabled for all architectures is beyond -# me. I suspect most platforms don't need this, but until we know that for sure -# I turn this off for IA-64 only. Andreas Schwab says it's also needed on m68k -# to get a correct value for the wait-channel (WCHAN in ps). --davidm -CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer -endif - -obj-y += core.o clock.o idle_task.o fair.o rt.o stop_task.o -obj-$(CONFIG_SMP) += cpupri.o -obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o -obj-$(CONFIG_SCHEDSTATS) += stats.o -obj-$(CONFIG_SCHED_DEBUG) += debug.o - - diff --git a/trunk/kernel/sched/sched.h b/trunk/kernel/sched/sched.h deleted file mode 100644 index 98c0c2623db8..000000000000 --- a/trunk/kernel/sched/sched.h +++ /dev/null @@ -1,1166 +0,0 @@ - -#include -#include -#include -#include - -#include "cpupri.h" - -extern __read_mostly int scheduler_running; - -/* - * Convert user-nice values [ -20 ... 0 ... 19 ] - * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], - * and back. - */ -#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20) -#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20) -#define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio) - -/* - * 'User priority' is the nice value converted to something we - * can work with better when scaling various scheduler parameters, - * it's a [ 0 ... 39 ] range. - */ -#define USER_PRIO(p) ((p)-MAX_RT_PRIO) -#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio) -#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO)) - -/* - * Helpers for converting nanosecond timing to jiffy resolution - */ -#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) - -#define NICE_0_LOAD SCHED_LOAD_SCALE -#define NICE_0_SHIFT SCHED_LOAD_SHIFT - -/* - * These are the 'tuning knobs' of the scheduler: - * - * default timeslice is 100 msecs (used only for SCHED_RR tasks). - * Timeslices get refilled after they expire. - */ -#define DEF_TIMESLICE (100 * HZ / 1000) - -/* - * single value that denotes runtime == period, ie unlimited time. - */ -#define RUNTIME_INF ((u64)~0ULL) - -static inline int rt_policy(int policy) -{ - if (policy == SCHED_FIFO || policy == SCHED_RR) - return 1; - return 0; -} - -static inline int task_has_rt_policy(struct task_struct *p) -{ - return rt_policy(p->policy); -} - -/* - * This is the priority-queue data structure of the RT scheduling class: - */ -struct rt_prio_array { - DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */ - struct list_head queue[MAX_RT_PRIO]; -}; - -struct rt_bandwidth { - /* nests inside the rq lock: */ - raw_spinlock_t rt_runtime_lock; - ktime_t rt_period; - u64 rt_runtime; - struct hrtimer rt_period_timer; -}; - -extern struct mutex sched_domains_mutex; - -#ifdef CONFIG_CGROUP_SCHED - -#include - -struct cfs_rq; -struct rt_rq; - -static LIST_HEAD(task_groups); - -struct cfs_bandwidth { -#ifdef CONFIG_CFS_BANDWIDTH - raw_spinlock_t lock; - ktime_t period; - u64 quota, runtime; - s64 hierarchal_quota; - u64 runtime_expires; - - int idle, timer_active; - struct hrtimer period_timer, slack_timer; - struct list_head throttled_cfs_rq; - - /* statistics */ - int nr_periods, nr_throttled; - u64 throttled_time; -#endif -}; - -/* task group related information */ -struct task_group { - struct cgroup_subsys_state css; - -#ifdef CONFIG_FAIR_GROUP_SCHED - /* schedulable entities of this group on each cpu */ - struct sched_entity **se; - /* runqueue "owned" by this group on each cpu */ - struct cfs_rq **cfs_rq; - unsigned long shares; - - atomic_t load_weight; -#endif - -#ifdef CONFIG_RT_GROUP_SCHED - struct sched_rt_entity **rt_se; - struct rt_rq **rt_rq; - - struct rt_bandwidth rt_bandwidth; -#endif - - struct rcu_head rcu; - struct list_head list; - - struct task_group *parent; - struct list_head siblings; - struct list_head children; - -#ifdef CONFIG_SCHED_AUTOGROUP - struct autogroup *autogroup; -#endif - - struct cfs_bandwidth cfs_bandwidth; -}; - -#ifdef CONFIG_FAIR_GROUP_SCHED -#define ROOT_TASK_GROUP_LOAD NICE_0_LOAD - -/* - * A weight of 0 or 1 can cause arithmetics problems. - * A weight of a cfs_rq is the sum of weights of which entities - * are queued on this cfs_rq, so a weight of a entity should not be - * too large, so as the shares value of a task group. - * (The default weight is 1024 - so there's no practical - * limitation from this.) - */ -#define MIN_SHARES (1UL << 1) -#define MAX_SHARES (1UL << 18) -#endif - -/* Default task group. - * Every task in system belong to this group at bootup. - */ -extern struct task_group root_task_group; - -typedef int (*tg_visitor)(struct task_group *, void *); - -extern int walk_tg_tree_from(struct task_group *from, - tg_visitor down, tg_visitor up, void *data); - -/* - * Iterate the full tree, calling @down when first entering a node and @up when - * leaving it for the final time. - * - * Caller must hold rcu_lock or sufficient equivalent. - */ -static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) -{ - return walk_tg_tree_from(&root_task_group, down, up, data); -} - -extern int tg_nop(struct task_group *tg, void *data); - -extern void free_fair_sched_group(struct task_group *tg); -extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent); -extern void unregister_fair_sched_group(struct task_group *tg, int cpu); -extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, - struct sched_entity *se, int cpu, - struct sched_entity *parent); -extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b); -extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); - -extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); -extern void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); -extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); - -extern void free_rt_sched_group(struct task_group *tg); -extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent); -extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, - struct sched_rt_entity *rt_se, int cpu, - struct sched_rt_entity *parent); - -#else /* CONFIG_CGROUP_SCHED */ - -struct cfs_bandwidth { }; - -#endif /* CONFIG_CGROUP_SCHED */ - -/* CFS-related fields in a runqueue */ -struct cfs_rq { - struct load_weight load; - unsigned long nr_running, h_nr_running; - - u64 exec_clock; - u64 min_vruntime; -#ifndef CONFIG_64BIT - u64 min_vruntime_copy; -#endif - - struct rb_root tasks_timeline; - struct rb_node *rb_leftmost; - - struct list_head tasks; - struct list_head *balance_iterator; - - /* - * 'curr' points to currently running entity on this cfs_rq. - * It is set to NULL otherwise (i.e when none are currently running). - */ - struct sched_entity *curr, *next, *last, *skip; - -#ifdef CONFIG_SCHED_DEBUG - unsigned int nr_spread_over; -#endif - -#ifdef CONFIG_FAIR_GROUP_SCHED - struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ - - /* - * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in - * a hierarchy). Non-leaf lrqs hold other higher schedulable entities - * (like users, containers etc.) - * - * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This - * list is used during load balance. - */ - int on_list; - struct list_head leaf_cfs_rq_list; - struct task_group *tg; /* group that "owns" this runqueue */ - -#ifdef CONFIG_SMP - /* - * the part of load.weight contributed by tasks - */ - unsigned long task_weight; - - /* - * h_load = weight * f(tg) - * - * Where f(tg) is the recursive weight fraction assigned to - * this group. - */ - unsigned long h_load; - - /* - * Maintaining per-cpu shares distribution for group scheduling - * - * load_stamp is the last time we updated the load average - * load_last is the last time we updated the load average and saw load - * load_unacc_exec_time is currently unaccounted execution time - */ - u64 load_avg; - u64 load_period; - u64 load_stamp, load_last, load_unacc_exec_time; - - unsigned long load_contribution; -#endif /* CONFIG_SMP */ -#ifdef CONFIG_CFS_BANDWIDTH - int runtime_enabled; - u64 runtime_expires; - s64 runtime_remaining; - - u64 throttled_timestamp; - int throttled, throttle_count; - struct list_head throttled_list; -#endif /* CONFIG_CFS_BANDWIDTH */ -#endif /* CONFIG_FAIR_GROUP_SCHED */ -}; - -static inline int rt_bandwidth_enabled(void) -{ - return sysctl_sched_rt_runtime >= 0; -} - -/* Real-Time classes' related field in a runqueue: */ -struct rt_rq { - struct rt_prio_array active; - unsigned long rt_nr_running; -#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED - struct { - int curr; /* highest queued rt task prio */ -#ifdef CONFIG_SMP - int next; /* next highest */ -#endif - } highest_prio; -#endif -#ifdef CONFIG_SMP - unsigned long rt_nr_migratory; - unsigned long rt_nr_total; - int overloaded; - struct plist_head pushable_tasks; -#endif - int rt_throttled; - u64 rt_time; - u64 rt_runtime; - /* Nests inside the rq lock: */ - raw_spinlock_t rt_runtime_lock; - -#ifdef CONFIG_RT_GROUP_SCHED - unsigned long rt_nr_boosted; - - struct rq *rq; - struct list_head leaf_rt_rq_list; - struct task_group *tg; -#endif -}; - -#ifdef CONFIG_SMP - -/* - * We add the notion of a root-domain which will be used to define per-domain - * variables. Each exclusive cpuset essentially defines an island domain by - * fully partitioning the member cpus from any other cpuset. Whenever a new - * exclusive cpuset is created, we also create and attach a new root-domain - * object. - * - */ -struct root_domain { - atomic_t refcount; - atomic_t rto_count; - struct rcu_head rcu; - cpumask_var_t span; - cpumask_var_t online; - - /* - * The "RT overload" flag: it gets set if a CPU has more than - * one runnable RT task. - */ - cpumask_var_t rto_mask; - struct cpupri cpupri; -}; - -extern struct root_domain def_root_domain; - -#endif /* CONFIG_SMP */ - -/* - * This is the main, per-CPU runqueue data structure. - * - * Locking rule: those places that want to lock multiple runqueues - * (such as the load balancing or the thread migration code), lock - * acquire operations must be ordered by ascending &runqueue. - */ -struct rq { - /* runqueue lock: */ - raw_spinlock_t lock; - - /* - * nr_running and cpu_load should be in the same cacheline because - * remote CPUs use both these fields when doing load calculation. - */ - unsigned long nr_running; - #define CPU_LOAD_IDX_MAX 5 - unsigned long cpu_load[CPU_LOAD_IDX_MAX]; - unsigned long last_load_update_tick; -#ifdef CONFIG_NO_HZ - u64 nohz_stamp; - unsigned long nohz_flags; -#endif - int skip_clock_update; - - /* capture load from *all* tasks on this cpu: */ - struct load_weight load; - unsigned long nr_load_updates; - u64 nr_switches; - - struct cfs_rq cfs; - struct rt_rq rt; - -#ifdef CONFIG_FAIR_GROUP_SCHED - /* list of leaf cfs_rq on this cpu: */ - struct list_head leaf_cfs_rq_list; -#endif -#ifdef CONFIG_RT_GROUP_SCHED - struct list_head leaf_rt_rq_list; -#endif - - /* - * This is part of a global counter where only the total sum - * over all CPUs matters. A task can increase this counter on - * one CPU and if it got migrated afterwards it may decrease - * it on another CPU. Always updated under the runqueue lock: - */ - unsigned long nr_uninterruptible; - - struct task_struct *curr, *idle, *stop; - unsigned long next_balance; - struct mm_struct *prev_mm; - - u64 clock; - u64 clock_task; - - atomic_t nr_iowait; - -#ifdef CONFIG_SMP - struct root_domain *rd; - struct sched_domain *sd; - - unsigned long cpu_power; - - unsigned char idle_balance; - /* For active balancing */ - int post_schedule; - int active_balance; - int push_cpu; - struct cpu_stop_work active_balance_work; - /* cpu of this runqueue: */ - int cpu; - int online; - - u64 rt_avg; - u64 age_stamp; - u64 idle_stamp; - u64 avg_idle; -#endif - -#ifdef CONFIG_IRQ_TIME_ACCOUNTING - u64 prev_irq_time; -#endif -#ifdef CONFIG_PARAVIRT - u64 prev_steal_time; -#endif -#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING - u64 prev_steal_time_rq; -#endif - - /* calc_load related fields */ - unsigned long calc_load_update; - long calc_load_active; - -#ifdef CONFIG_SCHED_HRTICK -#ifdef CONFIG_SMP - int hrtick_csd_pending; - struct call_single_data hrtick_csd; -#endif - struct hrtimer hrtick_timer; -#endif - -#ifdef CONFIG_SCHEDSTATS - /* latency stats */ - struct sched_info rq_sched_info; - unsigned long long rq_cpu_time; - /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ - - /* sys_sched_yield() stats */ - unsigned int yld_count; - - /* schedule() stats */ - unsigned int sched_switch; - unsigned int sched_count; - unsigned int sched_goidle; - - /* try_to_wake_up() stats */ - unsigned int ttwu_count; - unsigned int ttwu_local; -#endif - -#ifdef CONFIG_SMP - struct llist_head wake_list; -#endif -}; - -static inline int cpu_of(struct rq *rq) -{ -#ifdef CONFIG_SMP - return rq->cpu; -#else - return 0; -#endif -} - -DECLARE_PER_CPU(struct rq, runqueues); - -#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) -#define this_rq() (&__get_cpu_var(runqueues)) -#define task_rq(p) cpu_rq(task_cpu(p)) -#define cpu_curr(cpu) (cpu_rq(cpu)->curr) -#define raw_rq() (&__raw_get_cpu_var(runqueues)) - -#ifdef CONFIG_SMP - -#define rcu_dereference_check_sched_domain(p) \ - rcu_dereference_check((p), \ - lockdep_is_held(&sched_domains_mutex)) - -/* - * The domain tree (rq->sd) is protected by RCU's quiescent state transition. - * See detach_destroy_domains: synchronize_sched for details. - * - * The domain tree of any CPU may only be accessed from within - * preempt-disabled sections. - */ -#define for_each_domain(cpu, __sd) \ - for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \ - __sd; __sd = __sd->parent) - -#define for_each_lower_domain(sd) for (; sd; sd = sd->child) - -/** - * highest_flag_domain - Return highest sched_domain containing flag. - * @cpu: The cpu whose highest level of sched domain is to - * be returned. - * @flag: The flag to check for the highest sched_domain - * for the given cpu. - * - * Returns the highest sched_domain of a cpu which contains the given flag. - */ -static inline struct sched_domain *highest_flag_domain(int cpu, int flag) -{ - struct sched_domain *sd, *hsd = NULL; - - for_each_domain(cpu, sd) { - if (!(sd->flags & flag)) - break; - hsd = sd; - } - - return hsd; -} - -DECLARE_PER_CPU(struct sched_domain *, sd_llc); -DECLARE_PER_CPU(int, sd_llc_id); - -#endif /* CONFIG_SMP */ - -#include "stats.h" -#include "auto_group.h" - -#ifdef CONFIG_CGROUP_SCHED - -/* - * Return the group to which this tasks belongs. - * - * We use task_subsys_state_check() and extend the RCU verification with - * pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each - * task it moves into the cgroup. Therefore by holding either of those locks, - * we pin the task to the current cgroup. - */ -static inline struct task_group *task_group(struct task_struct *p) -{ - struct task_group *tg; - struct cgroup_subsys_state *css; - - css = task_subsys_state_check(p, cpu_cgroup_subsys_id, - lockdep_is_held(&p->pi_lock) || - lockdep_is_held(&task_rq(p)->lock)); - tg = container_of(css, struct task_group, css); - - return autogroup_task_group(p, tg); -} - -/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ -static inline void set_task_rq(struct task_struct *p, unsigned int cpu) -{ -#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED) - struct task_group *tg = task_group(p); -#endif - -#ifdef CONFIG_FAIR_GROUP_SCHED - p->se.cfs_rq = tg->cfs_rq[cpu]; - p->se.parent = tg->se[cpu]; -#endif - -#ifdef CONFIG_RT_GROUP_SCHED - p->rt.rt_rq = tg->rt_rq[cpu]; - p->rt.parent = tg->rt_se[cpu]; -#endif -} - -#else /* CONFIG_CGROUP_SCHED */ - -static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } -static inline struct task_group *task_group(struct task_struct *p) -{ - return NULL; -} - -#endif /* CONFIG_CGROUP_SCHED */ - -static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) -{ - set_task_rq(p, cpu); -#ifdef CONFIG_SMP - /* - * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be - * successfuly executed on another CPU. We must ensure that updates of - * per-task data have been completed by this moment. - */ - smp_wmb(); - task_thread_info(p)->cpu = cpu; -#endif -} - -/* - * Tunables that become constants when CONFIG_SCHED_DEBUG is off: - */ -#ifdef CONFIG_SCHED_DEBUG -# include -# define const_debug __read_mostly -#else -# define const_debug const -#endif - -extern const_debug unsigned int sysctl_sched_features; - -#define SCHED_FEAT(name, enabled) \ - __SCHED_FEAT_##name , - -enum { -#include "features.h" - __SCHED_FEAT_NR, -}; - -#undef SCHED_FEAT - -#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL) -static __always_inline bool static_branch__true(struct jump_label_key *key) -{ - return likely(static_branch(key)); /* Not out of line branch. */ -} - -static __always_inline bool static_branch__false(struct jump_label_key *key) -{ - return unlikely(static_branch(key)); /* Out of line branch. */ -} - -#define SCHED_FEAT(name, enabled) \ -static __always_inline bool static_branch_##name(struct jump_label_key *key) \ -{ \ - return static_branch__##enabled(key); \ -} - -#include "features.h" - -#undef SCHED_FEAT - -extern struct jump_label_key sched_feat_keys[__SCHED_FEAT_NR]; -#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) -#else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */ -#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) -#endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */ - -static inline u64 global_rt_period(void) -{ - return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; -} - -static inline u64 global_rt_runtime(void) -{ - if (sysctl_sched_rt_runtime < 0) - return RUNTIME_INF; - - return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; -} - - - -static inline int task_current(struct rq *rq, struct task_struct *p) -{ - return rq->curr == p; -} - -static inline int task_running(struct rq *rq, struct task_struct *p) -{ -#ifdef CONFIG_SMP - return p->on_cpu; -#else - return task_current(rq, p); -#endif -} - - -#ifndef prepare_arch_switch -# define prepare_arch_switch(next) do { } while (0) -#endif -#ifndef finish_arch_switch -# define finish_arch_switch(prev) do { } while (0) -#endif - -#ifndef __ARCH_WANT_UNLOCKED_CTXSW -static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) -{ -#ifdef CONFIG_SMP - /* - * We can optimise this out completely for !SMP, because the - * SMP rebalancing from interrupt is the only thing that cares - * here. - */ - next->on_cpu = 1; -#endif -} - -static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) -{ -#ifdef CONFIG_SMP - /* - * After ->on_cpu is cleared, the task can be moved to a different CPU. - * We must ensure this doesn't happen until the switch is completely - * finished. - */ - smp_wmb(); - prev->on_cpu = 0; -#endif -#ifdef CONFIG_DEBUG_SPINLOCK - /* this is a valid case when another task releases the spinlock */ - rq->lock.owner = current; -#endif - /* - * If we are tracking spinlock dependencies then we have to - * fix up the runqueue lock - which gets 'carried over' from - * prev into current: - */ - spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); - - raw_spin_unlock_irq(&rq->lock); -} - -#else /* __ARCH_WANT_UNLOCKED_CTXSW */ -static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) -{ -#ifdef CONFIG_SMP - /* - * We can optimise this out completely for !SMP, because the - * SMP rebalancing from interrupt is the only thing that cares - * here. - */ - next->on_cpu = 1; -#endif -#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW - raw_spin_unlock_irq(&rq->lock); -#else - raw_spin_unlock(&rq->lock); -#endif -} - -static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) -{ -#ifdef CONFIG_SMP - /* - * After ->on_cpu is cleared, the task can be moved to a different CPU. - * We must ensure this doesn't happen until the switch is completely - * finished. - */ - smp_wmb(); - prev->on_cpu = 0; -#endif -#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW - local_irq_enable(); -#endif -} -#endif /* __ARCH_WANT_UNLOCKED_CTXSW */ - - -static inline void update_load_add(struct load_weight *lw, unsigned long inc) -{ - lw->weight += inc; - lw->inv_weight = 0; -} - -static inline void update_load_sub(struct load_weight *lw, unsigned long dec) -{ - lw->weight -= dec; - lw->inv_weight = 0; -} - -static inline void update_load_set(struct load_weight *lw, unsigned long w) -{ - lw->weight = w; - lw->inv_weight = 0; -} - -/* - * To aid in avoiding the subversion of "niceness" due to uneven distribution - * of tasks with abnormal "nice" values across CPUs the contribution that - * each task makes to its run queue's load is weighted according to its - * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a - * scaled version of the new time slice allocation that they receive on time - * slice expiry etc. - */ - -#define WEIGHT_IDLEPRIO 3 -#define WMULT_IDLEPRIO 1431655765 - -/* - * Nice levels are multiplicative, with a gentle 10% change for every - * nice level changed. I.e. when a CPU-bound task goes from nice 0 to - * nice 1, it will get ~10% less CPU time than another CPU-bound task - * that remained on nice 0. - * - * The "10% effect" is relative and cumulative: from _any_ nice level, - * if you go up 1 level, it's -10% CPU usage, if you go down 1 level - * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25. - * If a task goes up by ~10% and another task goes down by ~10% then - * the relative distance between them is ~25%.) - */ -static const int prio_to_weight[40] = { - /* -20 */ 88761, 71755, 56483, 46273, 36291, - /* -15 */ 29154, 23254, 18705, 14949, 11916, - /* -10 */ 9548, 7620, 6100, 4904, 3906, - /* -5 */ 3121, 2501, 1991, 1586, 1277, - /* 0 */ 1024, 820, 655, 526, 423, - /* 5 */ 335, 272, 215, 172, 137, - /* 10 */ 110, 87, 70, 56, 45, - /* 15 */ 36, 29, 23, 18, 15, -}; - -/* - * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated. - * - * In cases where the weight does not change often, we can use the - * precalculated inverse to speed up arithmetics by turning divisions - * into multiplications: - */ -static const u32 prio_to_wmult[40] = { - /* -20 */ 48388, 59856, 76040, 92818, 118348, - /* -15 */ 147320, 184698, 229616, 287308, 360437, - /* -10 */ 449829, 563644, 704093, 875809, 1099582, - /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326, - /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587, - /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126, - /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717, - /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, -}; - -/* Time spent by the tasks of the cpu accounting group executing in ... */ -enum cpuacct_stat_index { - CPUACCT_STAT_USER, /* ... user mode */ - CPUACCT_STAT_SYSTEM, /* ... kernel mode */ - - CPUACCT_STAT_NSTATS, -}; - - -#define sched_class_highest (&stop_sched_class) -#define for_each_class(class) \ - for (class = sched_class_highest; class; class = class->next) - -extern const struct sched_class stop_sched_class; -extern const struct sched_class rt_sched_class; -extern const struct sched_class fair_sched_class; -extern const struct sched_class idle_sched_class; - - -#ifdef CONFIG_SMP - -extern void trigger_load_balance(struct rq *rq, int cpu); -extern void idle_balance(int this_cpu, struct rq *this_rq); - -#else /* CONFIG_SMP */ - -static inline void idle_balance(int cpu, struct rq *rq) -{ -} - -#endif - -extern void sysrq_sched_debug_show(void); -extern void sched_init_granularity(void); -extern void update_max_interval(void); -extern void update_group_power(struct sched_domain *sd, int cpu); -extern int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu); -extern void init_sched_rt_class(void); -extern void init_sched_fair_class(void); - -extern void resched_task(struct task_struct *p); -extern void resched_cpu(int cpu); - -extern struct rt_bandwidth def_rt_bandwidth; -extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); - -extern void update_cpu_load(struct rq *this_rq); - -#ifdef CONFIG_CGROUP_CPUACCT -#include -/* track cpu usage of a group of tasks and its child groups */ -struct cpuacct { - struct cgroup_subsys_state css; - /* cpuusage holds pointer to a u64-type object on every cpu */ - u64 __percpu *cpuusage; - struct kernel_cpustat __percpu *cpustat; -}; - -/* return cpu accounting group corresponding to this container */ -static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp) -{ - return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id), - struct cpuacct, css); -} - -/* return cpu accounting group to which this task belongs */ -static inline struct cpuacct *task_ca(struct task_struct *tsk) -{ - return container_of(task_subsys_state(tsk, cpuacct_subsys_id), - struct cpuacct, css); -} - -static inline struct cpuacct *parent_ca(struct cpuacct *ca) -{ - if (!ca || !ca->css.cgroup->parent) - return NULL; - return cgroup_ca(ca->css.cgroup->parent); -} - -extern void cpuacct_charge(struct task_struct *tsk, u64 cputime); -#else -static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} -#endif - -static inline void inc_nr_running(struct rq *rq) -{ - rq->nr_running++; -} - -static inline void dec_nr_running(struct rq *rq) -{ - rq->nr_running--; -} - -extern void update_rq_clock(struct rq *rq); - -extern void activate_task(struct rq *rq, struct task_struct *p, int flags); -extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); - -extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); - -extern const_debug unsigned int sysctl_sched_time_avg; -extern const_debug unsigned int sysctl_sched_nr_migrate; -extern const_debug unsigned int sysctl_sched_migration_cost; - -static inline u64 sched_avg_period(void) -{ - return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2; -} - -void calc_load_account_idle(struct rq *this_rq); - -#ifdef CONFIG_SCHED_HRTICK - -/* - * Use hrtick when: - * - enabled by features - * - hrtimer is actually high res - */ -static inline int hrtick_enabled(struct rq *rq) -{ - if (!sched_feat(HRTICK)) - return 0; - if (!cpu_active(cpu_of(rq))) - return 0; - return hrtimer_is_hres_active(&rq->hrtick_timer); -} - -void hrtick_start(struct rq *rq, u64 delay); - -#else - -static inline int hrtick_enabled(struct rq *rq) -{ - return 0; -} - -#endif /* CONFIG_SCHED_HRTICK */ - -#ifdef CONFIG_SMP -extern void sched_avg_update(struct rq *rq); -static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) -{ - rq->rt_avg += rt_delta; - sched_avg_update(rq); -} -#else -static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { } -static inline void sched_avg_update(struct rq *rq) { } -#endif - -extern void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period); - -#ifdef CONFIG_SMP -#ifdef CONFIG_PREEMPT - -static inline void double_rq_lock(struct rq *rq1, struct rq *rq2); - -/* - * fair double_lock_balance: Safely acquires both rq->locks in a fair - * way at the expense of forcing extra atomic operations in all - * invocations. This assures that the double_lock is acquired using the - * same underlying policy as the spinlock_t on this architecture, which - * reduces latency compared to the unfair variant below. However, it - * also adds more overhead and therefore may reduce throughput. - */ -static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) - __releases(this_rq->lock) - __acquires(busiest->lock) - __acquires(this_rq->lock) -{ - raw_spin_unlock(&this_rq->lock); - double_rq_lock(this_rq, busiest); - - return 1; -} - -#else -/* - * Unfair double_lock_balance: Optimizes throughput at the expense of - * latency by eliminating extra atomic operations when the locks are - * already in proper order on entry. This favors lower cpu-ids and will - * grant the double lock to lower cpus over higher ids under contention, - * regardless of entry order into the function. - */ -static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) - __releases(this_rq->lock) - __acquires(busiest->lock) - __acquires(this_rq->lock) -{ - int ret = 0; - - if (unlikely(!raw_spin_trylock(&busiest->lock))) { - if (busiest < this_rq) { - raw_spin_unlock(&this_rq->lock); - raw_spin_lock(&busiest->lock); - raw_spin_lock_nested(&this_rq->lock, - SINGLE_DEPTH_NESTING); - ret = 1; - } else - raw_spin_lock_nested(&busiest->lock, - SINGLE_DEPTH_NESTING); - } - return ret; -} - -#endif /* CONFIG_PREEMPT */ - -/* - * double_lock_balance - lock the busiest runqueue, this_rq is locked already. - */ -static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) -{ - if (unlikely(!irqs_disabled())) { - /* printk() doesn't work good under rq->lock */ - raw_spin_unlock(&this_rq->lock); - BUG_ON(1); - } - - return _double_lock_balance(this_rq, busiest); -} - -static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) - __releases(busiest->lock) -{ - raw_spin_unlock(&busiest->lock); - lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); -} - -/* - * double_rq_lock - safely lock two runqueues - * - * Note this does not disable interrupts like task_rq_lock, - * you need to do so manually before calling. - */ -static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) - __acquires(rq1->lock) - __acquires(rq2->lock) -{ - BUG_ON(!irqs_disabled()); - if (rq1 == rq2) { - raw_spin_lock(&rq1->lock); - __acquire(rq2->lock); /* Fake it out ;) */ - } else { - if (rq1 < rq2) { - raw_spin_lock(&rq1->lock); - raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); - } else { - raw_spin_lock(&rq2->lock); - raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); - } - } -} - -/* - * double_rq_unlock - safely unlock two runqueues - * - * Note this does not restore interrupts like task_rq_unlock, - * you need to do so manually after calling. - */ -static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) - __releases(rq1->lock) - __releases(rq2->lock) -{ - raw_spin_unlock(&rq1->lock); - if (rq1 != rq2) - raw_spin_unlock(&rq2->lock); - else - __release(rq2->lock); -} - -#else /* CONFIG_SMP */ - -/* - * double_rq_lock - safely lock two runqueues - * - * Note this does not disable interrupts like task_rq_lock, - * you need to do so manually before calling. - */ -static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) - __acquires(rq1->lock) - __acquires(rq2->lock) -{ - BUG_ON(!irqs_disabled()); - BUG_ON(rq1 != rq2); - raw_spin_lock(&rq1->lock); - __acquire(rq2->lock); /* Fake it out ;) */ -} - -/* - * double_rq_unlock - safely unlock two runqueues - * - * Note this does not restore interrupts like task_rq_unlock, - * you need to do so manually after calling. - */ -static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) - __releases(rq1->lock) - __releases(rq2->lock) -{ - BUG_ON(rq1 != rq2); - raw_spin_unlock(&rq1->lock); - __release(rq2->lock); -} - -#endif - -extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq); -extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq); -extern void print_cfs_stats(struct seq_file *m, int cpu); -extern void print_rt_stats(struct seq_file *m, int cpu); - -extern void init_cfs_rq(struct cfs_rq *cfs_rq); -extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq); -extern void unthrottle_offline_cfs_rqs(struct rq *rq); - -extern void account_cfs_bandwidth_used(int enabled, int was_enabled); - -#ifdef CONFIG_NO_HZ -enum rq_nohz_flag_bits { - NOHZ_TICK_STOPPED, - NOHZ_BALANCE_KICK, - NOHZ_IDLE, -}; - -#define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) -#endif diff --git a/trunk/kernel/sched/stats.c b/trunk/kernel/sched/stats.c deleted file mode 100644 index 2a581ba8e190..000000000000 --- a/trunk/kernel/sched/stats.c +++ /dev/null @@ -1,111 +0,0 @@ - -#include -#include -#include -#include - -#include "sched.h" - -/* - * bump this up when changing the output format or the meaning of an existing - * format, so that tools can adapt (or abort) - */ -#define SCHEDSTAT_VERSION 15 - -static int show_schedstat(struct seq_file *seq, void *v) -{ - int cpu; - int mask_len = DIV_ROUND_UP(NR_CPUS, 32) * 9; - char *mask_str = kmalloc(mask_len, GFP_KERNEL); - - if (mask_str == NULL) - return -ENOMEM; - - seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION); - seq_printf(seq, "timestamp %lu\n", jiffies); - for_each_online_cpu(cpu) { - struct rq *rq = cpu_rq(cpu); -#ifdef CONFIG_SMP - struct sched_domain *sd; - int dcount = 0; -#endif - - /* runqueue-specific stats */ - seq_printf(seq, - "cpu%d %u %u %u %u %u %u %llu %llu %lu", - cpu, rq->yld_count, - rq->sched_switch, rq->sched_count, rq->sched_goidle, - rq->ttwu_count, rq->ttwu_local, - rq->rq_cpu_time, - rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount); - - seq_printf(seq, "\n"); - -#ifdef CONFIG_SMP - /* domain-specific stats */ - rcu_read_lock(); - for_each_domain(cpu, sd) { - enum cpu_idle_type itype; - - cpumask_scnprintf(mask_str, mask_len, - sched_domain_span(sd)); - seq_printf(seq, "domain%d %s", dcount++, mask_str); - for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES; - itype++) { - seq_printf(seq, " %u %u %u %u %u %u %u %u", - sd->lb_count[itype], - sd->lb_balanced[itype], - sd->lb_failed[itype], - sd->lb_imbalance[itype], - sd->lb_gained[itype], - sd->lb_hot_gained[itype], - sd->lb_nobusyq[itype], - sd->lb_nobusyg[itype]); - } - seq_printf(seq, - " %u %u %u %u %u %u %u %u %u %u %u %u\n", - sd->alb_count, sd->alb_failed, sd->alb_pushed, - sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed, - sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed, - sd->ttwu_wake_remote, sd->ttwu_move_affine, - sd->ttwu_move_balance); - } - rcu_read_unlock(); -#endif - } - kfree(mask_str); - return 0; -} - -static int schedstat_open(struct inode *inode, struct file *file) -{ - unsigned int size = PAGE_SIZE * (1 + num_online_cpus() / 32); - char *buf = kmalloc(size, GFP_KERNEL); - struct seq_file *m; - int res; - - if (!buf) - return -ENOMEM; - res = single_open(file, show_schedstat, NULL); - if (!res) { - m = file->private_data; - m->buf = buf; - m->size = size; - } else - kfree(buf); - return res; -} - -static const struct file_operations proc_schedstat_operations = { - .open = schedstat_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -static int __init proc_schedstat_init(void) -{ - proc_create("schedstat", 0, NULL, &proc_schedstat_operations); - return 0; -} -module_init(proc_schedstat_init); diff --git a/trunk/kernel/sched/auto_group.c b/trunk/kernel/sched_autogroup.c similarity index 88% rename from trunk/kernel/sched/auto_group.c rename to trunk/kernel/sched_autogroup.c index e8a1f83ee0e7..429242f3c484 100644 --- a/trunk/kernel/sched/auto_group.c +++ b/trunk/kernel/sched_autogroup.c @@ -1,19 +1,15 @@ #ifdef CONFIG_SCHED_AUTOGROUP -#include "sched.h" - #include #include #include #include -#include -#include unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1; static struct autogroup autogroup_default; static atomic_t autogroup_seq_nr; -void __init autogroup_init(struct task_struct *init_task) +static void __init autogroup_init(struct task_struct *init_task) { autogroup_default.tg = &root_task_group; kref_init(&autogroup_default.kref); @@ -21,7 +17,7 @@ void __init autogroup_init(struct task_struct *init_task) init_task->signal->autogroup = &autogroup_default; } -void autogroup_free(struct task_group *tg) +static inline void autogroup_free(struct task_group *tg) { kfree(tg->autogroup); } @@ -63,6 +59,10 @@ static inline struct autogroup *autogroup_task_get(struct task_struct *p) return ag; } +#ifdef CONFIG_RT_GROUP_SCHED +static void free_rt_sched_group(struct task_group *tg); +#endif + static inline struct autogroup *autogroup_create(void) { struct autogroup *ag = kzalloc(sizeof(*ag), GFP_KERNEL); @@ -108,7 +108,8 @@ static inline struct autogroup *autogroup_create(void) return autogroup_kref_get(&autogroup_default); } -bool task_wants_autogroup(struct task_struct *p, struct task_group *tg) +static inline bool +task_wants_autogroup(struct task_struct *p, struct task_group *tg) { if (tg != &root_task_group) return false; @@ -126,6 +127,22 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg) return true; } +static inline bool task_group_is_autogroup(struct task_group *tg) +{ + return !!tg->autogroup; +} + +static inline struct task_group * +autogroup_task_group(struct task_struct *p, struct task_group *tg) +{ + int enabled = ACCESS_ONCE(sysctl_sched_autogroup_enabled); + + if (enabled && task_wants_autogroup(p, tg)) + return p->signal->autogroup->tg; + + return tg; +} + static void autogroup_move_group(struct task_struct *p, struct autogroup *ag) { @@ -246,7 +263,7 @@ void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m) #endif /* CONFIG_PROC_FS */ #ifdef CONFIG_SCHED_DEBUG -int autogroup_path(struct task_group *tg, char *buf, int buflen) +static inline int autogroup_path(struct task_group *tg, char *buf, int buflen) { if (!task_group_is_autogroup(tg)) return 0; diff --git a/trunk/kernel/sched/auto_group.h b/trunk/kernel/sched_autogroup.h similarity index 66% rename from trunk/kernel/sched/auto_group.h rename to trunk/kernel/sched_autogroup.h index 8bd047142816..c2f0e7248dca 100644 --- a/trunk/kernel/sched/auto_group.h +++ b/trunk/kernel/sched_autogroup.h @@ -1,8 +1,5 @@ #ifdef CONFIG_SCHED_AUTOGROUP -#include -#include - struct autogroup { /* * reference doesn't mean how many thread attach to this @@ -16,28 +13,9 @@ struct autogroup { int nice; }; -extern void autogroup_init(struct task_struct *init_task); -extern void autogroup_free(struct task_group *tg); - -static inline bool task_group_is_autogroup(struct task_group *tg) -{ - return !!tg->autogroup; -} - -extern bool task_wants_autogroup(struct task_struct *p, struct task_group *tg); - +static inline bool task_group_is_autogroup(struct task_group *tg); static inline struct task_group * -autogroup_task_group(struct task_struct *p, struct task_group *tg) -{ - int enabled = ACCESS_ONCE(sysctl_sched_autogroup_enabled); - - if (enabled && task_wants_autogroup(p, tg)) - return p->signal->autogroup->tg; - - return tg; -} - -extern int autogroup_path(struct task_group *tg, char *buf, int buflen); +autogroup_task_group(struct task_struct *p, struct task_group *tg); #else /* !CONFIG_SCHED_AUTOGROUP */ diff --git a/trunk/kernel/sched/clock.c b/trunk/kernel/sched_clock.c similarity index 100% rename from trunk/kernel/sched/clock.c rename to trunk/kernel/sched_clock.c diff --git a/trunk/kernel/sched/cpupri.c b/trunk/kernel/sched_cpupri.c similarity index 99% rename from trunk/kernel/sched/cpupri.c rename to trunk/kernel/sched_cpupri.c index b0d798eaf130..a86cf9d9eb11 100644 --- a/trunk/kernel/sched/cpupri.c +++ b/trunk/kernel/sched_cpupri.c @@ -1,5 +1,5 @@ /* - * kernel/sched/cpupri.c + * kernel/sched_cpupri.c * * CPU priority management * @@ -28,7 +28,7 @@ */ #include -#include "cpupri.h" +#include "sched_cpupri.h" /* Convert between a 140 based task->prio, and our 102 based cpupri */ static int convert_prio(int prio) diff --git a/trunk/kernel/sched/cpupri.h b/trunk/kernel/sched_cpupri.h similarity index 100% rename from trunk/kernel/sched/cpupri.h rename to trunk/kernel/sched_cpupri.h diff --git a/trunk/kernel/sched/debug.c b/trunk/kernel/sched_debug.c similarity index 99% rename from trunk/kernel/sched/debug.c rename to trunk/kernel/sched_debug.c index 2a075e10004b..a6710a112b4f 100644 --- a/trunk/kernel/sched/debug.c +++ b/trunk/kernel/sched_debug.c @@ -1,5 +1,5 @@ /* - * kernel/sched/debug.c + * kernel/time/sched_debug.c * * Print the CFS rbtree * @@ -16,8 +16,6 @@ #include #include -#include "sched.h" - static DEFINE_SPINLOCK(sched_debug_lock); /* @@ -375,7 +373,7 @@ static int sched_debug_show(struct seq_file *m, void *v) return 0; } -void sysrq_sched_debug_show(void) +static void sysrq_sched_debug_show(void) { sched_debug_show(NULL, NULL); } diff --git a/trunk/kernel/sched/fair.c b/trunk/kernel/sched_fair.c similarity index 87% rename from trunk/kernel/sched/fair.c rename to trunk/kernel/sched_fair.c index 8e42de9105f8..8a39fa3e3c6c 100644 --- a/trunk/kernel/sched/fair.c +++ b/trunk/kernel/sched_fair.c @@ -23,13 +23,6 @@ #include #include #include -#include -#include -#include - -#include - -#include "sched.h" /* * Targeted preemption latency for CPU-bound tasks: @@ -110,110 +103,7 @@ unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL; unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL; #endif -/* - * Increase the granularity value when there are more CPUs, - * because with more CPUs the 'effective latency' as visible - * to users decreases. But the relationship is not linear, - * so pick a second-best guess by going with the log2 of the - * number of CPUs. - * - * This idea comes from the SD scheduler of Con Kolivas: - */ -static int get_update_sysctl_factor(void) -{ - unsigned int cpus = min_t(int, num_online_cpus(), 8); - unsigned int factor; - - switch (sysctl_sched_tunable_scaling) { - case SCHED_TUNABLESCALING_NONE: - factor = 1; - break; - case SCHED_TUNABLESCALING_LINEAR: - factor = cpus; - break; - case SCHED_TUNABLESCALING_LOG: - default: - factor = 1 + ilog2(cpus); - break; - } - - return factor; -} - -static void update_sysctl(void) -{ - unsigned int factor = get_update_sysctl_factor(); - -#define SET_SYSCTL(name) \ - (sysctl_##name = (factor) * normalized_sysctl_##name) - SET_SYSCTL(sched_min_granularity); - SET_SYSCTL(sched_latency); - SET_SYSCTL(sched_wakeup_granularity); -#undef SET_SYSCTL -} - -void sched_init_granularity(void) -{ - update_sysctl(); -} - -#if BITS_PER_LONG == 32 -# define WMULT_CONST (~0UL) -#else -# define WMULT_CONST (1UL << 32) -#endif - -#define WMULT_SHIFT 32 - -/* - * Shift right and round: - */ -#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y)) - -/* - * delta *= weight / lw - */ -static unsigned long -calc_delta_mine(unsigned long delta_exec, unsigned long weight, - struct load_weight *lw) -{ - u64 tmp; - - /* - * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched - * entities since MIN_SHARES = 2. Treat weight as 1 if less than - * 2^SCHED_LOAD_RESOLUTION. - */ - if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION))) - tmp = (u64)delta_exec * scale_load_down(weight); - else - tmp = (u64)delta_exec; - - if (!lw->inv_weight) { - unsigned long w = scale_load_down(lw->weight); - - if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST)) - lw->inv_weight = 1; - else if (unlikely(!w)) - lw->inv_weight = WMULT_CONST; - else - lw->inv_weight = WMULT_CONST / w; - } - - /* - * Check whether we'd overflow the 64-bit multiplication: - */ - if (unlikely(tmp > WMULT_CONST)) - tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight, - WMULT_SHIFT/2); - else - tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT); - - return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX); -} - - -const struct sched_class fair_sched_class; +static const struct sched_class fair_sched_class; /************************************************************** * CFS operations on generic schedulable entities: @@ -523,7 +413,7 @@ static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) rb_erase(&se->run_node, &cfs_rq->tasks_timeline); } -struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq) +static struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq) { struct rb_node *left = cfs_rq->rb_leftmost; @@ -544,7 +434,7 @@ static struct sched_entity *__pick_next_entity(struct sched_entity *se) } #ifdef CONFIG_SCHED_DEBUG -struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) +static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) { struct rb_node *last = rb_last(&cfs_rq->tasks_timeline); @@ -794,7 +684,7 @@ account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) { update_load_add(&cfs_rq->load, se->load.weight); if (!parent_entity(se)) - update_load_add(&rq_of(cfs_rq)->load, se->load.weight); + inc_cpu_load(rq_of(cfs_rq), se->load.weight); if (entity_is_task(se)) { add_cfs_task_weight(cfs_rq, se->load.weight); list_add(&se->group_node, &cfs_rq->tasks); @@ -807,7 +697,7 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) { update_load_sub(&cfs_rq->load, se->load.weight); if (!parent_entity(se)) - update_load_sub(&rq_of(cfs_rq)->load, se->load.weight); + dec_cpu_load(rq_of(cfs_rq), se->load.weight); if (entity_is_task(se)) { add_cfs_task_weight(cfs_rq, -se->load.weight); list_del_init(&se->group_node); @@ -1003,6 +893,7 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) if (unlikely(delta > se->statistics.sleep_max)) se->statistics.sleep_max = delta; + se->statistics.sleep_start = 0; se->statistics.sum_sleep_runtime += delta; if (tsk) { @@ -1019,6 +910,7 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) if (unlikely(delta > se->statistics.block_max)) se->statistics.block_max = delta; + se->statistics.block_start = 0; se->statistics.sum_sleep_runtime += delta; if (tsk) { @@ -1028,8 +920,6 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) trace_sched_stat_iowait(tsk, delta); } - trace_sched_stat_blocked(tsk, delta); - /* * Blocking time is in units of nanosecs, so shift by * 20 to get a milliseconds-range estimation of the @@ -1397,32 +1287,6 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) */ #ifdef CONFIG_CFS_BANDWIDTH - -#ifdef HAVE_JUMP_LABEL -static struct jump_label_key __cfs_bandwidth_used; - -static inline bool cfs_bandwidth_used(void) -{ - return static_branch(&__cfs_bandwidth_used); -} - -void account_cfs_bandwidth_used(int enabled, int was_enabled) -{ - /* only need to count groups transitioning between enabled/!enabled */ - if (enabled && !was_enabled) - jump_label_inc(&__cfs_bandwidth_used); - else if (!enabled && was_enabled) - jump_label_dec(&__cfs_bandwidth_used); -} -#else /* HAVE_JUMP_LABEL */ -static bool cfs_bandwidth_used(void) -{ - return true; -} - -void account_cfs_bandwidth_used(int enabled, int was_enabled) {} -#endif /* HAVE_JUMP_LABEL */ - /* * default period for cfs group bandwidth. * default: 0.1s, units: nanoseconds @@ -1444,7 +1308,7 @@ static inline u64 sched_cfs_bandwidth_slice(void) * * requires cfs_b->lock */ -void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b) +static void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b) { u64 now; @@ -1456,11 +1320,6 @@ void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b) cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period); } -static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) -{ - return &tg->cfs_bandwidth; -} - /* returns 0 on failure to allocate runtime */ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) { @@ -1562,7 +1421,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, static __always_inline void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec) { - if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) + if (!cfs_rq->runtime_enabled) return; __account_cfs_rq_runtime(cfs_rq, delta_exec); @@ -1570,13 +1429,13 @@ static __always_inline void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) { - return cfs_bandwidth_used() && cfs_rq->throttled; + return cfs_rq->throttled; } /* check whether cfs_rq, or any parent, is throttled */ static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) { - return cfs_bandwidth_used() && cfs_rq->throttle_count; + return cfs_rq->throttle_count; } /* @@ -1671,7 +1530,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq) raw_spin_unlock(&cfs_b->lock); } -void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) +static void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) { struct rq *rq = rq_of(cfs_rq); struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); @@ -1897,9 +1756,6 @@ static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq) static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) { - if (!cfs_bandwidth_used()) - return; - if (!cfs_rq->runtime_enabled || cfs_rq->nr_running) return; @@ -1945,9 +1801,6 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) */ static void check_enqueue_throttle(struct cfs_rq *cfs_rq) { - if (!cfs_bandwidth_used()) - return; - /* an active group must be handled by the update_curr()->put() path */ if (!cfs_rq->runtime_enabled || cfs_rq->curr) return; @@ -1965,9 +1818,6 @@ static void check_enqueue_throttle(struct cfs_rq *cfs_rq) /* conditionally throttle active cfs_rq's from put_prev_entity() */ static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { - if (!cfs_bandwidth_used()) - return; - if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0)) return; @@ -1980,112 +1830,7 @@ static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) throttle_cfs_rq(cfs_rq); } - -static inline u64 default_cfs_period(void); -static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun); -static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b); - -static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer) -{ - struct cfs_bandwidth *cfs_b = - container_of(timer, struct cfs_bandwidth, slack_timer); - do_sched_cfs_slack_timer(cfs_b); - - return HRTIMER_NORESTART; -} - -static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) -{ - struct cfs_bandwidth *cfs_b = - container_of(timer, struct cfs_bandwidth, period_timer); - ktime_t now; - int overrun; - int idle = 0; - - for (;;) { - now = hrtimer_cb_get_time(timer); - overrun = hrtimer_forward(timer, now, cfs_b->period); - - if (!overrun) - break; - - idle = do_sched_cfs_period_timer(cfs_b, overrun); - } - - return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; -} - -void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) -{ - raw_spin_lock_init(&cfs_b->lock); - cfs_b->runtime = 0; - cfs_b->quota = RUNTIME_INF; - cfs_b->period = ns_to_ktime(default_cfs_period()); - - INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); - hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - cfs_b->period_timer.function = sched_cfs_period_timer; - hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - cfs_b->slack_timer.function = sched_cfs_slack_timer; -} - -static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) -{ - cfs_rq->runtime_enabled = 0; - INIT_LIST_HEAD(&cfs_rq->throttled_list); -} - -/* requires cfs_b->lock, may release to reprogram timer */ -void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) -{ - /* - * The timer may be active because we're trying to set a new bandwidth - * period or because we're racing with the tear-down path - * (timer_active==0 becomes visible before the hrtimer call-back - * terminates). In either case we ensure that it's re-programmed - */ - while (unlikely(hrtimer_active(&cfs_b->period_timer))) { - raw_spin_unlock(&cfs_b->lock); - /* ensure cfs_b->lock is available while we wait */ - hrtimer_cancel(&cfs_b->period_timer); - - raw_spin_lock(&cfs_b->lock); - /* if someone else restarted the timer then we're done */ - if (cfs_b->timer_active) - return; - } - - cfs_b->timer_active = 1; - start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period); -} - -static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) -{ - hrtimer_cancel(&cfs_b->period_timer); - hrtimer_cancel(&cfs_b->slack_timer); -} - -void unthrottle_offline_cfs_rqs(struct rq *rq) -{ - struct cfs_rq *cfs_rq; - - for_each_leaf_cfs_rq(rq, cfs_rq) { - struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); - - if (!cfs_rq->runtime_enabled) - continue; - - /* - * clock_task is not advancing so we just need to make sure - * there's some valid quota amount - */ - cfs_rq->runtime_remaining = cfs_b->quota; - if (cfs_rq_throttled(cfs_rq)) - unthrottle_cfs_rq(cfs_rq); - } -} - -#else /* CONFIG_CFS_BANDWIDTH */ +#else static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec) {} static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} @@ -2107,22 +1852,8 @@ static inline int throttled_lb_pair(struct task_group *tg, { return 0; } - -void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} - -#ifdef CONFIG_FAIR_GROUP_SCHED -static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} #endif -static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) -{ - return NULL; -} -static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} -void unthrottle_offline_cfs_rqs(struct rq *rq) {} - -#endif /* CONFIG_CFS_BANDWIDTH */ - /************************************************** * CFS operations on tasks: */ @@ -2135,7 +1866,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p) WARN_ON(task_rq(p) != rq); - if (cfs_rq->nr_running > 1) { + if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) { u64 slice = sched_slice(cfs_rq, se); u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime; s64 delta = slice - ran; @@ -2166,7 +1897,7 @@ static void hrtick_update(struct rq *rq) { struct task_struct *curr = rq->curr; - if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class) + if (curr->sched_class != &fair_sched_class) return; if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency) @@ -2289,61 +2020,6 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) } #ifdef CONFIG_SMP -/* Used instead of source_load when we know the type == 0 */ -static unsigned long weighted_cpuload(const int cpu) -{ - return cpu_rq(cpu)->load.weight; -} - -/* - * Return a low guess at the load of a migration-source cpu weighted - * according to the scheduling class and "nice" value. - * - * We want to under-estimate the load of migration sources, to - * balance conservatively. - */ -static unsigned long source_load(int cpu, int type) -{ - struct rq *rq = cpu_rq(cpu); - unsigned long total = weighted_cpuload(cpu); - - if (type == 0 || !sched_feat(LB_BIAS)) - return total; - - return min(rq->cpu_load[type-1], total); -} - -/* - * Return a high guess at the load of a migration-target cpu weighted - * according to the scheduling class and "nice" value. - */ -static unsigned long target_load(int cpu, int type) -{ - struct rq *rq = cpu_rq(cpu); - unsigned long total = weighted_cpuload(cpu); - - if (type == 0 || !sched_feat(LB_BIAS)) - return total; - - return max(rq->cpu_load[type-1], total); -} - -static unsigned long power_of(int cpu) -{ - return cpu_rq(cpu)->cpu_power; -} - -static unsigned long cpu_avg_load_per_task(int cpu) -{ - struct rq *rq = cpu_rq(cpu); - unsigned long nr_running = ACCESS_ONCE(rq->nr_running); - - if (nr_running) - return rq->load.weight / nr_running; - - return 0; -} - static void task_waking_fair(struct task_struct *p) { @@ -2651,7 +2327,7 @@ static int select_idle_sibling(struct task_struct *p, int target) int prev_cpu = task_cpu(p); struct sched_domain *sd; struct sched_group *sg; - int i; + int i, smt = 0; /* * If the task is going to be woken-up on this cpu and if it is @@ -2671,9 +2347,17 @@ static int select_idle_sibling(struct task_struct *p, int target) * Otherwise, iterate the domains and find an elegible idle cpu. */ rcu_read_lock(); +again: + for_each_domain(target, sd) { + if (!smt && (sd->flags & SD_SHARE_CPUPOWER)) + continue; + + if (smt && !(sd->flags & SD_SHARE_CPUPOWER)) + break; + + if (!(sd->flags & SD_SHARE_PKG_RESOURCES)) + break; - sd = rcu_dereference(per_cpu(sd_llc, target)); - for_each_lower_domain(sd) { sg = sd->groups; do { if (!cpumask_intersects(sched_group_cpus(sg), @@ -2692,6 +2376,10 @@ static int select_idle_sibling(struct task_struct *p, int target) sg = sg->next; } while (sg != sd->groups); } + if (!smt) { + smt = 1; + goto again; + } done: rcu_read_unlock(); @@ -2720,9 +2408,6 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags) int want_sd = 1; int sync = wake_flags & WF_SYNC; - if (p->rt.nr_cpus_allowed == 1) - return prev_cpu; - if (sd_flag & SD_BALANCE_WAKE) { if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) want_affine = 1; @@ -3007,8 +2692,7 @@ static struct task_struct *pick_next_task_fair(struct rq *rq) } while (cfs_rq); p = task_of(se); - if (hrtick_enabled(rq)) - hrtick_start_fair(rq, p); + hrtick_start_fair(rq, p); return p; } @@ -3052,12 +2736,6 @@ static void yield_task_fair(struct rq *rq) * Update run-time statistics of the 'current'. */ update_curr(cfs_rq); - /* - * Tell update_rq_clock() that we've just updated, - * so we don't do microscopic update in schedule() - * and double the fastpath cost. - */ - rq->skip_clock_update = 1; } set_skip_buddy(se); @@ -3097,49 +2775,13 @@ static void pull_task(struct rq *src_rq, struct task_struct *p, check_preempt_curr(this_rq, p, 0); } -/* - * Is this task likely cache-hot: - */ -static int -task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) -{ - s64 delta; - - if (p->sched_class != &fair_sched_class) - return 0; - - if (unlikely(p->policy == SCHED_IDLE)) - return 0; - - /* - * Buddy candidates are cache hot: - */ - if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running && - (&p->se == cfs_rq_of(&p->se)->next || - &p->se == cfs_rq_of(&p->se)->last)) - return 1; - - if (sysctl_sched_migration_cost == -1) - return 1; - if (sysctl_sched_migration_cost == 0) - return 0; - - delta = now - p->se.exec_start; - - return delta < (s64)sysctl_sched_migration_cost; -} - -#define LBF_ALL_PINNED 0x01 -#define LBF_NEED_BREAK 0x02 -#define LBF_ABORT 0x04 - /* * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? */ static int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, struct sched_domain *sd, enum cpu_idle_type idle, - int *lb_flags) + int *all_pinned) { int tsk_cache_hot = 0; /* @@ -3152,7 +2794,7 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, schedstat_inc(p, se.statistics.nr_failed_migrations_affine); return 0; } - *lb_flags &= ~LBF_ALL_PINNED; + *all_pinned = 0; if (task_running(rq, p)) { schedstat_inc(p, se.statistics.nr_failed_migrations_running); @@ -3226,7 +2868,7 @@ move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, static unsigned long balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, unsigned long max_load_move, struct sched_domain *sd, - enum cpu_idle_type idle, int *lb_flags, + enum cpu_idle_type idle, int *all_pinned, struct cfs_rq *busiest_cfs_rq) { int loops = 0, pulled = 0; @@ -3237,14 +2879,12 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, goto out; list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) { - if (loops++ > sysctl_sched_nr_migrate) { - *lb_flags |= LBF_NEED_BREAK; + if (loops++ > sysctl_sched_nr_migrate) break; - } if ((p->se.load.weight >> 1) > rem_load_move || !can_migrate_task(p, busiest, this_cpu, sd, idle, - lb_flags)) + all_pinned)) continue; pull_task(busiest, p, this_rq, this_cpu); @@ -3257,10 +2897,8 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, * kernels will stop after the first task is pulled to minimize * the critical section. */ - if (idle == CPU_NEWLY_IDLE) { - *lb_flags |= LBF_ABORT; + if (idle == CPU_NEWLY_IDLE) break; - } #endif /* @@ -3365,7 +3003,7 @@ static unsigned long load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, unsigned long max_load_move, struct sched_domain *sd, enum cpu_idle_type idle, - int *lb_flags) + int *all_pinned) { long rem_load_move = max_load_move; struct cfs_rq *busiest_cfs_rq; @@ -3378,9 +3016,6 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, unsigned long busiest_weight = busiest_cfs_rq->load.weight; u64 rem_load, moved_load; - if (*lb_flags & (LBF_NEED_BREAK|LBF_ABORT)) - break; - /* * empty group or part of a throttled hierarchy */ @@ -3392,7 +3027,7 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, rem_load = div_u64(rem_load, busiest_h_load + 1); moved_load = balance_tasks(this_rq, this_cpu, busiest, - rem_load, sd, idle, lb_flags, + rem_load, sd, idle, all_pinned, busiest_cfs_rq); if (!moved_load) @@ -3418,10 +3053,10 @@ static unsigned long load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, unsigned long max_load_move, struct sched_domain *sd, enum cpu_idle_type idle, - int *lb_flags) + int *all_pinned) { return balance_tasks(this_rq, this_cpu, busiest, - max_load_move, sd, idle, lb_flags, + max_load_move, sd, idle, all_pinned, &busiest->cfs); } #endif @@ -3436,30 +3071,29 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, unsigned long max_load_move, struct sched_domain *sd, enum cpu_idle_type idle, - int *lb_flags) + int *all_pinned) { unsigned long total_load_moved = 0, load_moved; do { load_moved = load_balance_fair(this_rq, this_cpu, busiest, max_load_move - total_load_moved, - sd, idle, lb_flags); + sd, idle, all_pinned); total_load_moved += load_moved; - if (*lb_flags & (LBF_NEED_BREAK|LBF_ABORT)) - break; - #ifdef CONFIG_PREEMPT /* * NEWIDLE balancing is a source of latency, so preemptible * kernels will stop after the first task is pulled to minimize * the critical section. */ - if (idle == CPU_NEWLY_IDLE && this_rq->nr_running) { - *lb_flags |= LBF_ABORT; + if (idle == CPU_NEWLY_IDLE && this_rq->nr_running) + break; + + if (raw_spin_is_contended(&this_rq->lock) || + raw_spin_is_contended(&busiest->lock)) break; - } #endif } while (load_moved && max_load_move > total_load_moved); @@ -3520,6 +3154,15 @@ struct sg_lb_stats { int group_has_capacity; /* Is there extra capacity in the group? */ }; +/** + * group_first_cpu - Returns the first cpu in the cpumask of a sched_group. + * @group: The group whose first cpu is to be returned. + */ +static inline unsigned int group_first_cpu(struct sched_group *group) +{ + return cpumask_first(sched_group_cpus(group)); +} + /** * get_sd_load_idx - Obtain the load index for a given sched domain. * @sd: The sched_domain whose load_idx is to be obtained. @@ -3769,7 +3412,7 @@ static void update_cpu_power(struct sched_domain *sd, int cpu) sdg->sgp->power = power; } -void update_group_power(struct sched_domain *sd, int cpu) +static void update_group_power(struct sched_domain *sd, int cpu) { struct sched_domain *child = sd->child; struct sched_group *group, *sdg = sd->groups; @@ -4035,6 +3678,11 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu, } while (sg != sd->groups); } +int __weak arch_sd_sibling_asym_packing(void) +{ + return 0*SD_ASYM_PACKING; +} + /** * check_asym_packing - Check to see if the group is packed into the * sched doman. @@ -4398,7 +4046,7 @@ find_busiest_queue(struct sched_domain *sd, struct sched_group *group, #define MAX_PINNED_INTERVAL 512 /* Working cpumask for load_balance and load_balance_newidle. */ -DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask); +static DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask); static int need_active_balance(struct sched_domain *sd, int idle, int busiest_cpu, int this_cpu) @@ -4449,7 +4097,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, struct sched_domain *sd, enum cpu_idle_type idle, int *balance) { - int ld_moved, lb_flags = 0, active_balance = 0; + int ld_moved, all_pinned = 0, active_balance = 0; struct sched_group *group; unsigned long imbalance; struct rq *busiest; @@ -4490,11 +4138,11 @@ static int load_balance(int this_cpu, struct rq *this_rq, * still unbalanced. ld_moved simply stays zero, so it is * correctly treated as an imbalance. */ - lb_flags |= LBF_ALL_PINNED; + all_pinned = 1; local_irq_save(flags); double_rq_lock(this_rq, busiest); ld_moved = move_tasks(this_rq, this_cpu, busiest, - imbalance, sd, idle, &lb_flags); + imbalance, sd, idle, &all_pinned); double_rq_unlock(this_rq, busiest); local_irq_restore(flags); @@ -4504,16 +4152,8 @@ static int load_balance(int this_cpu, struct rq *this_rq, if (ld_moved && this_cpu != smp_processor_id()) resched_cpu(this_cpu); - if (lb_flags & LBF_ABORT) - goto out_balanced; - - if (lb_flags & LBF_NEED_BREAK) { - lb_flags &= ~LBF_NEED_BREAK; - goto redo; - } - /* All tasks on this runqueue were pinned by CPU affinity */ - if (unlikely(lb_flags & LBF_ALL_PINNED)) { + if (unlikely(all_pinned)) { cpumask_clear_cpu(cpu_of(busiest), cpus); if (!cpumask_empty(cpus)) goto redo; @@ -4543,7 +4183,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, tsk_cpus_allowed(busiest->curr))) { raw_spin_unlock_irqrestore(&busiest->lock, flags); - lb_flags |= LBF_ALL_PINNED; + all_pinned = 1; goto out_one_pinned; } @@ -4596,8 +4236,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, out_one_pinned: /* tune up the balancing interval */ - if (((lb_flags & LBF_ALL_PINNED) && - sd->balance_interval < MAX_PINNED_INTERVAL) || + if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) || (sd->balance_interval < sd->max_interval)) sd->balance_interval *= 2; @@ -4610,7 +4249,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, * idle_balance is called by schedule() if this_cpu is about to become * idle. Attempts to pull tasks from other CPUs. */ -void idle_balance(int this_cpu, struct rq *this_rq) +static void idle_balance(int this_cpu, struct rq *this_rq) { struct sched_domain *sd; int pulled_task = 0; @@ -4725,16 +4364,28 @@ static int active_load_balance_cpu_stop(void *data) #ifdef CONFIG_NO_HZ /* * idle load balancing details + * - One of the idle CPUs nominates itself as idle load_balancer, while + * entering idle. + * - This idle load balancer CPU will also go into tickless mode when + * it is idle, just like all other idle CPUs * - When one of the busy CPUs notice that there may be an idle rebalancing * needed, they will kick the idle load balancer, which then does idle * load balancing for all the idle CPUs. */ static struct { + atomic_t load_balancer; + atomic_t first_pick_cpu; + atomic_t second_pick_cpu; cpumask_var_t idle_cpus_mask; - atomic_t nr_cpus; + cpumask_var_t grp_idle_mask; unsigned long next_balance; /* in jiffy units */ } nohz ____cacheline_aligned; +int get_nohz_load_balancer(void) +{ + return atomic_read(&nohz.load_balancer); +} + #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) /** * lowest_flag_domain - Return lowest sched_domain containing flag. @@ -4770,6 +4421,33 @@ static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) for (sd = lowest_flag_domain(cpu, flag); \ (sd && (sd->flags & flag)); sd = sd->parent) +/** + * is_semi_idle_group - Checks if the given sched_group is semi-idle. + * @ilb_group: group to be checked for semi-idleness + * + * Returns: 1 if the group is semi-idle. 0 otherwise. + * + * We define a sched_group to be semi idle if it has atleast one idle-CPU + * and atleast one non-idle CPU. This helper function checks if the given + * sched_group is semi-idle or not. + */ +static inline int is_semi_idle_group(struct sched_group *ilb_group) +{ + cpumask_and(nohz.grp_idle_mask, nohz.idle_cpus_mask, + sched_group_cpus(ilb_group)); + + /* + * A sched_group is semi-idle when it has atleast one busy cpu + * and atleast one idle cpu. + */ + if (cpumask_empty(nohz.grp_idle_mask)) + return 0; + + if (cpumask_equal(nohz.grp_idle_mask, sched_group_cpus(ilb_group))) + return 0; + + return 1; +} /** * find_new_ilb - Finds the optimum idle load balancer for nomination. * @cpu: The cpu which is nominating a new idle_load_balancer. @@ -4784,9 +4462,9 @@ static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) */ static int find_new_ilb(int cpu) { - int ilb = cpumask_first(nohz.idle_cpus_mask); - struct sched_group *ilbg; struct sched_domain *sd; + struct sched_group *ilb_group; + int ilb = nr_cpu_ids; /* * Have idle load balancer selection from semi-idle packages only @@ -4804,28 +4482,23 @@ static int find_new_ilb(int cpu) rcu_read_lock(); for_each_flag_domain(cpu, sd, SD_POWERSAVINGS_BALANCE) { - ilbg = sd->groups; + ilb_group = sd->groups; do { - if (ilbg->group_weight != - atomic_read(&ilbg->sgp->nr_busy_cpus)) { - ilb = cpumask_first_and(nohz.idle_cpus_mask, - sched_group_cpus(ilbg)); + if (is_semi_idle_group(ilb_group)) { + ilb = cpumask_first(nohz.grp_idle_mask); goto unlock; } - ilbg = ilbg->next; + ilb_group = ilb_group->next; - } while (ilbg != sd->groups); + } while (ilb_group != sd->groups); } unlock: rcu_read_unlock(); out_done: - if (ilb < nr_cpu_ids && idle_cpu(ilb)) - return ilb; - - return nr_cpu_ids; + return ilb; } #else /* (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */ static inline int find_new_ilb(int call_cpu) @@ -4845,68 +4518,99 @@ static void nohz_balancer_kick(int cpu) nohz.next_balance++; - ilb_cpu = find_new_ilb(cpu); + ilb_cpu = get_nohz_load_balancer(); - if (ilb_cpu >= nr_cpu_ids) - return; + if (ilb_cpu >= nr_cpu_ids) { + ilb_cpu = cpumask_first(nohz.idle_cpus_mask); + if (ilb_cpu >= nr_cpu_ids) + return; + } - if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu))) - return; - /* - * Use smp_send_reschedule() instead of resched_cpu(). - * This way we generate a sched IPI on the target cpu which - * is idle. And the softirq performing nohz idle load balance - * will be run before returning from the IPI. - */ - smp_send_reschedule(ilb_cpu); + if (!cpu_rq(ilb_cpu)->nohz_balance_kick) { + cpu_rq(ilb_cpu)->nohz_balance_kick = 1; + + smp_mb(); + /* + * Use smp_send_reschedule() instead of resched_cpu(). + * This way we generate a sched IPI on the target cpu which + * is idle. And the softirq performing nohz idle load balance + * will be run before returning from the IPI. + */ + smp_send_reschedule(ilb_cpu); + } return; } -static inline void set_cpu_sd_state_busy(void) +/* + * This routine will try to nominate the ilb (idle load balancing) + * owner among the cpus whose ticks are stopped. ilb owner will do the idle + * load balancing on behalf of all those cpus. + * + * When the ilb owner becomes busy, we will not have new ilb owner until some + * idle CPU wakes up and goes back to idle or some busy CPU tries to kick + * idle load balancing by kicking one of the idle CPUs. + * + * Ticks are stopped for the ilb owner as well, with busy CPU kicking this + * ilb owner CPU in future (when there is a need for idle load balancing on + * behalf of all idle CPUs). + */ +void select_nohz_load_balancer(int stop_tick) { - struct sched_domain *sd; int cpu = smp_processor_id(); - if (!test_bit(NOHZ_IDLE, nohz_flags(cpu))) - return; - clear_bit(NOHZ_IDLE, nohz_flags(cpu)); + if (stop_tick) { + if (!cpu_active(cpu)) { + if (atomic_read(&nohz.load_balancer) != cpu) + return; - rcu_read_lock(); - for_each_domain(cpu, sd) - atomic_inc(&sd->groups->sgp->nr_busy_cpus); - rcu_read_unlock(); -} + /* + * If we are going offline and still the leader, + * give up! + */ + if (atomic_cmpxchg(&nohz.load_balancer, cpu, + nr_cpu_ids) != cpu) + BUG(); -void set_cpu_sd_state_idle(void) -{ - struct sched_domain *sd; - int cpu = smp_processor_id(); + return; + } - if (test_bit(NOHZ_IDLE, nohz_flags(cpu))) - return; - set_bit(NOHZ_IDLE, nohz_flags(cpu)); + cpumask_set_cpu(cpu, nohz.idle_cpus_mask); - rcu_read_lock(); - for_each_domain(cpu, sd) - atomic_dec(&sd->groups->sgp->nr_busy_cpus); - rcu_read_unlock(); -} + if (atomic_read(&nohz.first_pick_cpu) == cpu) + atomic_cmpxchg(&nohz.first_pick_cpu, cpu, nr_cpu_ids); + if (atomic_read(&nohz.second_pick_cpu) == cpu) + atomic_cmpxchg(&nohz.second_pick_cpu, cpu, nr_cpu_ids); -/* - * This routine will record that this cpu is going idle with tick stopped. - * This info will be used in performing idle load balancing in the future. - */ -void select_nohz_load_balancer(int stop_tick) -{ - int cpu = smp_processor_id(); + if (atomic_read(&nohz.load_balancer) >= nr_cpu_ids) { + int new_ilb; - if (stop_tick) { - if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu))) + /* make me the ilb owner */ + if (atomic_cmpxchg(&nohz.load_balancer, nr_cpu_ids, + cpu) != nr_cpu_ids) + return; + + /* + * Check to see if there is a more power-efficient + * ilb. + */ + new_ilb = find_new_ilb(cpu); + if (new_ilb < nr_cpu_ids && new_ilb != cpu) { + atomic_set(&nohz.load_balancer, nr_cpu_ids); + resched_cpu(new_ilb); + return; + } + return; + } + } else { + if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask)) return; - cpumask_set_cpu(cpu, nohz.idle_cpus_mask); - atomic_inc(&nohz.nr_cpus); - set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); + cpumask_clear_cpu(cpu, nohz.idle_cpus_mask); + + if (atomic_read(&nohz.load_balancer) == cpu) + if (atomic_cmpxchg(&nohz.load_balancer, cpu, + nr_cpu_ids) != cpu) + BUG(); } return; } @@ -4920,7 +4624,7 @@ static unsigned long __read_mostly max_load_balance_interval = HZ/10; * Scale the max load_balance interval with the number of CPUs in the system. * This trades load-balance latency on larger machines for less cross talk. */ -void update_max_interval(void) +static void update_max_interval(void) { max_load_balance_interval = HZ*num_online_cpus()/10; } @@ -5012,12 +4716,11 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) struct rq *rq; int balance_cpu; - if (idle != CPU_IDLE || - !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu))) - goto end; + if (idle != CPU_IDLE || !this_rq->nohz_balance_kick) + return; for_each_cpu(balance_cpu, nohz.idle_cpus_mask) { - if (balance_cpu == this_cpu || !idle_cpu(balance_cpu)) + if (balance_cpu == this_cpu) continue; /* @@ -5025,8 +4728,10 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) * work being done for other cpus. Next load * balancing owner will pick it up. */ - if (need_resched()) + if (need_resched()) { + this_rq->nohz_balance_kick = 0; break; + } raw_spin_lock_irq(&this_rq->lock); update_rq_clock(this_rq); @@ -5040,75 +4745,53 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) this_rq->next_balance = rq->next_balance; } nohz.next_balance = this_rq->next_balance; -end: - clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)); + this_rq->nohz_balance_kick = 0; } /* - * Current heuristic for kicking the idle load balancer in the presence - * of an idle cpu is the system. - * - This rq has more than one task. - * - At any scheduler domain level, this cpu's scheduler group has multiple - * busy cpu's exceeding the group's power. - * - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler - * domain span are idle. + * Current heuristic for kicking the idle load balancer + * - first_pick_cpu is the one of the busy CPUs. It will kick + * idle load balancer when it has more than one process active. This + * eliminates the need for idle load balancing altogether when we have + * only one running process in the system (common case). + * - If there are more than one busy CPU, idle load balancer may have + * to run for active_load_balance to happen (i.e., two busy CPUs are + * SMT or core siblings and can run better if they move to different + * physical CPUs). So, second_pick_cpu is the second of the busy CPUs + * which will kick idle load balancer as soon as it has any load. */ static inline int nohz_kick_needed(struct rq *rq, int cpu) { unsigned long now = jiffies; - struct sched_domain *sd; - - if (unlikely(idle_cpu(cpu))) - return 0; - - /* - * We may be recently in ticked or tickless idle mode. At the first - * busy tick after returning from idle, we will update the busy stats. - */ - set_cpu_sd_state_busy(); - if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) { - clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); - cpumask_clear_cpu(cpu, nohz.idle_cpus_mask); - atomic_dec(&nohz.nr_cpus); - } - - /* - * None are in tickless mode and hence no need for NOHZ idle load - * balancing. - */ - if (likely(!atomic_read(&nohz.nr_cpus))) - return 0; + int ret; + int first_pick_cpu, second_pick_cpu; if (time_before(now, nohz.next_balance)) return 0; - if (rq->nr_running >= 2) - goto need_kick; - - rcu_read_lock(); - for_each_domain(cpu, sd) { - struct sched_group *sg = sd->groups; - struct sched_group_power *sgp = sg->sgp; - int nr_busy = atomic_read(&sgp->nr_busy_cpus); + if (idle_cpu(cpu)) + return 0; - if (sd->flags & SD_SHARE_PKG_RESOURCES && nr_busy > 1) - goto need_kick_unlock; + first_pick_cpu = atomic_read(&nohz.first_pick_cpu); + second_pick_cpu = atomic_read(&nohz.second_pick_cpu); - if (sd->flags & SD_ASYM_PACKING && nr_busy != sg->group_weight - && (cpumask_first_and(nohz.idle_cpus_mask, - sched_domain_span(sd)) < cpu)) - goto need_kick_unlock; + if (first_pick_cpu < nr_cpu_ids && first_pick_cpu != cpu && + second_pick_cpu < nr_cpu_ids && second_pick_cpu != cpu) + return 0; - if (!(sd->flags & (SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING))) - break; + ret = atomic_cmpxchg(&nohz.first_pick_cpu, nr_cpu_ids, cpu); + if (ret == nr_cpu_ids || ret == cpu) { + atomic_cmpxchg(&nohz.second_pick_cpu, cpu, nr_cpu_ids); + if (rq->nr_running > 1) + return 1; + } else { + ret = atomic_cmpxchg(&nohz.second_pick_cpu, nr_cpu_ids, cpu); + if (ret == nr_cpu_ids || ret == cpu) { + if (rq->nr_running) + return 1; + } } - rcu_read_unlock(); return 0; - -need_kick_unlock: - rcu_read_unlock(); -need_kick: - return 1; } #else static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { } @@ -5143,14 +4826,14 @@ static inline int on_null_domain(int cpu) /* * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing. */ -void trigger_load_balance(struct rq *rq, int cpu) +static inline void trigger_load_balance(struct rq *rq, int cpu) { /* Don't need to rebalance while attached to NULL domain */ if (time_after_eq(jiffies, rq->next_balance) && likely(!on_null_domain(cpu))) raise_softirq(SCHED_SOFTIRQ); #ifdef CONFIG_NO_HZ - if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu))) + else if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu))) nohz_balancer_kick(cpu); #endif } @@ -5165,6 +4848,15 @@ static void rq_offline_fair(struct rq *rq) update_sysctl(); } +#else /* CONFIG_SMP */ + +/* + * on UP we do not need to balance between CPUs: + */ +static inline void idle_balance(int cpu, struct rq *rq) +{ +} + #endif /* CONFIG_SMP */ /* @@ -5188,8 +4880,8 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) */ static void task_fork_fair(struct task_struct *p) { - struct cfs_rq *cfs_rq; - struct sched_entity *se = &p->se, *curr; + struct cfs_rq *cfs_rq = task_cfs_rq(current); + struct sched_entity *se = &p->se, *curr = cfs_rq->curr; int this_cpu = smp_processor_id(); struct rq *rq = this_rq(); unsigned long flags; @@ -5198,9 +4890,6 @@ static void task_fork_fair(struct task_struct *p) update_rq_clock(rq); - cfs_rq = task_cfs_rq(current); - curr = cfs_rq->curr; - if (unlikely(task_cpu(p) != this_cpu)) { rcu_read_lock(); __set_task_cpu(p, this_cpu); @@ -5310,16 +4999,6 @@ static void set_curr_task_fair(struct rq *rq) } } -void init_cfs_rq(struct cfs_rq *cfs_rq) -{ - cfs_rq->tasks_timeline = RB_ROOT; - INIT_LIST_HEAD(&cfs_rq->tasks); - cfs_rq->min_vruntime = (u64)(-(1LL << 20)); -#ifndef CONFIG_64BIT - cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; -#endif -} - #ifdef CONFIG_FAIR_GROUP_SCHED static void task_move_group_fair(struct task_struct *p, int on_rq) { @@ -5336,182 +5015,13 @@ static void task_move_group_fair(struct task_struct *p, int on_rq) * to another cgroup's rq. This does somewhat interfere with the * fair sleeper stuff for the first placement, but who cares. */ - /* - * When !on_rq, vruntime of the task has usually NOT been normalized. - * But there are some cases where it has already been normalized: - * - * - Moving a forked child which is waiting for being woken up by - * wake_up_new_task(). - * - Moving a task which has been woken up by try_to_wake_up() and - * waiting for actually being woken up by sched_ttwu_pending(). - * - * To prevent boost or penalty in the new cfs_rq caused by delta - * min_vruntime between the two cfs_rqs, we skip vruntime adjustment. - */ - if (!on_rq && (!p->se.sum_exec_runtime || p->state == TASK_WAKING)) - on_rq = 1; - if (!on_rq) p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime; set_task_rq(p, task_cpu(p)); if (!on_rq) p->se.vruntime += cfs_rq_of(&p->se)->min_vruntime; } - -void free_fair_sched_group(struct task_group *tg) -{ - int i; - - destroy_cfs_bandwidth(tg_cfs_bandwidth(tg)); - - for_each_possible_cpu(i) { - if (tg->cfs_rq) - kfree(tg->cfs_rq[i]); - if (tg->se) - kfree(tg->se[i]); - } - - kfree(tg->cfs_rq); - kfree(tg->se); -} - -int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) -{ - struct cfs_rq *cfs_rq; - struct sched_entity *se; - int i; - - tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL); - if (!tg->cfs_rq) - goto err; - tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL); - if (!tg->se) - goto err; - - tg->shares = NICE_0_LOAD; - - init_cfs_bandwidth(tg_cfs_bandwidth(tg)); - - for_each_possible_cpu(i) { - cfs_rq = kzalloc_node(sizeof(struct cfs_rq), - GFP_KERNEL, cpu_to_node(i)); - if (!cfs_rq) - goto err; - - se = kzalloc_node(sizeof(struct sched_entity), - GFP_KERNEL, cpu_to_node(i)); - if (!se) - goto err_free_rq; - - init_cfs_rq(cfs_rq); - init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); - } - - return 1; - -err_free_rq: - kfree(cfs_rq); -err: - return 0; -} - -void unregister_fair_sched_group(struct task_group *tg, int cpu) -{ - struct rq *rq = cpu_rq(cpu); - unsigned long flags; - - /* - * Only empty task groups can be destroyed; so we can speculatively - * check on_list without danger of it being re-added. - */ - if (!tg->cfs_rq[cpu]->on_list) - return; - - raw_spin_lock_irqsave(&rq->lock, flags); - list_del_leaf_cfs_rq(tg->cfs_rq[cpu]); - raw_spin_unlock_irqrestore(&rq->lock, flags); -} - -void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, - struct sched_entity *se, int cpu, - struct sched_entity *parent) -{ - struct rq *rq = cpu_rq(cpu); - - cfs_rq->tg = tg; - cfs_rq->rq = rq; -#ifdef CONFIG_SMP - /* allow initial update_cfs_load() to truncate */ - cfs_rq->load_stamp = 1; #endif - init_cfs_rq_runtime(cfs_rq); - - tg->cfs_rq[cpu] = cfs_rq; - tg->se[cpu] = se; - - /* se could be NULL for root_task_group */ - if (!se) - return; - - if (!parent) - se->cfs_rq = &rq->cfs; - else - se->cfs_rq = parent->my_q; - - se->my_q = cfs_rq; - update_load_set(&se->load, 0); - se->parent = parent; -} - -static DEFINE_MUTEX(shares_mutex); - -int sched_group_set_shares(struct task_group *tg, unsigned long shares) -{ - int i; - unsigned long flags; - - /* - * We can't change the weight of the root cgroup. - */ - if (!tg->se[0]) - return -EINVAL; - - shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES)); - - mutex_lock(&shares_mutex); - if (tg->shares == shares) - goto done; - - tg->shares = shares; - for_each_possible_cpu(i) { - struct rq *rq = cpu_rq(i); - struct sched_entity *se; - - se = tg->se[i]; - /* Propagate contribution to hierarchy */ - raw_spin_lock_irqsave(&rq->lock, flags); - for_each_sched_entity(se) - update_cfs_shares(group_cfs_rq(se)); - raw_spin_unlock_irqrestore(&rq->lock, flags); - } - -done: - mutex_unlock(&shares_mutex); - return 0; -} -#else /* CONFIG_FAIR_GROUP_SCHED */ - -void free_fair_sched_group(struct task_group *tg) { } - -int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) -{ - return 1; -} - -void unregister_fair_sched_group(struct task_group *tg, int cpu) { } - -#endif /* CONFIG_FAIR_GROUP_SCHED */ - static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task) { @@ -5531,7 +5041,7 @@ static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task /* * All the scheduling class methods: */ -const struct sched_class fair_sched_class = { +static const struct sched_class fair_sched_class = { .next = &idle_sched_class, .enqueue_task = enqueue_task_fair, .dequeue_task = dequeue_task_fair, @@ -5568,7 +5078,7 @@ const struct sched_class fair_sched_class = { }; #ifdef CONFIG_SCHED_DEBUG -void print_cfs_stats(struct seq_file *m, int cpu) +static void print_cfs_stats(struct seq_file *m, int cpu) { struct cfs_rq *cfs_rq; @@ -5578,15 +5088,3 @@ void print_cfs_stats(struct seq_file *m, int cpu) rcu_read_unlock(); } #endif - -__init void init_sched_fair_class(void) -{ -#ifdef CONFIG_SMP - open_softirq(SCHED_SOFTIRQ, run_rebalance_domains); - -#ifdef CONFIG_NO_HZ - zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); -#endif -#endif /* SMP */ - -} diff --git a/trunk/kernel/sched/features.h b/trunk/kernel/sched_features.h similarity index 75% rename from trunk/kernel/sched/features.h rename to trunk/kernel/sched_features.h index e61fd73913d0..84802245abd2 100644 --- a/trunk/kernel/sched/features.h +++ b/trunk/kernel/sched_features.h @@ -3,13 +3,13 @@ * them to run sooner, but does not allow tons of sleepers to * rip the spread apart. */ -SCHED_FEAT(GENTLE_FAIR_SLEEPERS, true) +SCHED_FEAT(GENTLE_FAIR_SLEEPERS, 1) /* * Place new tasks ahead so that they do not starve already running * tasks */ -SCHED_FEAT(START_DEBIT, true) +SCHED_FEAT(START_DEBIT, 1) /* * Based on load and program behaviour, see if it makes sense to place @@ -17,54 +17,54 @@ SCHED_FEAT(START_DEBIT, true) * improve cache locality. Typically used with SYNC wakeups as * generated by pipes and the like, see also SYNC_WAKEUPS. */ -SCHED_FEAT(AFFINE_WAKEUPS, true) +SCHED_FEAT(AFFINE_WAKEUPS, 1) /* * Prefer to schedule the task we woke last (assuming it failed * wakeup-preemption), since its likely going to consume data we * touched, increases cache locality. */ -SCHED_FEAT(NEXT_BUDDY, false) +SCHED_FEAT(NEXT_BUDDY, 0) /* * Prefer to schedule the task that ran last (when we did * wake-preempt) as that likely will touch the same data, increases * cache locality. */ -SCHED_FEAT(LAST_BUDDY, true) +SCHED_FEAT(LAST_BUDDY, 1) /* * Consider buddies to be cache hot, decreases the likelyness of a * cache buddy being migrated away, increases cache locality. */ -SCHED_FEAT(CACHE_HOT_BUDDY, true) +SCHED_FEAT(CACHE_HOT_BUDDY, 1) /* * Use arch dependent cpu power functions */ -SCHED_FEAT(ARCH_POWER, false) +SCHED_FEAT(ARCH_POWER, 0) -SCHED_FEAT(HRTICK, false) -SCHED_FEAT(DOUBLE_TICK, false) -SCHED_FEAT(LB_BIAS, true) +SCHED_FEAT(HRTICK, 0) +SCHED_FEAT(DOUBLE_TICK, 0) +SCHED_FEAT(LB_BIAS, 1) /* * Spin-wait on mutex acquisition when the mutex owner is running on * another cpu -- assumes that when the owner is running, it will soon * release the lock. Decreases scheduling overhead. */ -SCHED_FEAT(OWNER_SPIN, true) +SCHED_FEAT(OWNER_SPIN, 1) /* * Decrement CPU power based on time not spent running tasks */ -SCHED_FEAT(NONTASK_POWER, true) +SCHED_FEAT(NONTASK_POWER, 1) /* * Queue remote wakeups on the target CPU and process them * using the scheduler IPI. Reduces rq->lock contention/bounces. */ -SCHED_FEAT(TTWU_QUEUE, true) +SCHED_FEAT(TTWU_QUEUE, 1) -SCHED_FEAT(FORCE_SD_OVERLAP, false) -SCHED_FEAT(RT_RUNTIME_SHARE, true) +SCHED_FEAT(FORCE_SD_OVERLAP, 0) +SCHED_FEAT(RT_RUNTIME_SHARE, 1) diff --git a/trunk/kernel/sched/idle_task.c b/trunk/kernel/sched_idletask.c similarity index 96% rename from trunk/kernel/sched/idle_task.c rename to trunk/kernel/sched_idletask.c index 91b4c957f289..0a51882534ea 100644 --- a/trunk/kernel/sched/idle_task.c +++ b/trunk/kernel/sched_idletask.c @@ -1,5 +1,3 @@ -#include "sched.h" - /* * idle-task scheduling class. * @@ -73,7 +71,7 @@ static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task /* * Simple, special scheduling class for the per-CPU idle tasks: */ -const struct sched_class idle_sched_class = { +static const struct sched_class idle_sched_class = { /* .next is NULL */ /* no enqueue/yield_task for idle tasks */ diff --git a/trunk/kernel/sched/rt.c b/trunk/kernel/sched_rt.c similarity index 90% rename from trunk/kernel/sched/rt.c rename to trunk/kernel/sched_rt.c index 3640ebbb466b..583a1368afe6 100644 --- a/trunk/kernel/sched/rt.c +++ b/trunk/kernel/sched_rt.c @@ -3,92 +3,7 @@ * policies) */ -#include "sched.h" - -#include - -static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun); - -struct rt_bandwidth def_rt_bandwidth; - -static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer) -{ - struct rt_bandwidth *rt_b = - container_of(timer, struct rt_bandwidth, rt_period_timer); - ktime_t now; - int overrun; - int idle = 0; - - for (;;) { - now = hrtimer_cb_get_time(timer); - overrun = hrtimer_forward(timer, now, rt_b->rt_period); - - if (!overrun) - break; - - idle = do_sched_rt_period_timer(rt_b, overrun); - } - - return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; -} - -void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) -{ - rt_b->rt_period = ns_to_ktime(period); - rt_b->rt_runtime = runtime; - - raw_spin_lock_init(&rt_b->rt_runtime_lock); - - hrtimer_init(&rt_b->rt_period_timer, - CLOCK_MONOTONIC, HRTIMER_MODE_REL); - rt_b->rt_period_timer.function = sched_rt_period_timer; -} - -static void start_rt_bandwidth(struct rt_bandwidth *rt_b) -{ - if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) - return; - - if (hrtimer_active(&rt_b->rt_period_timer)) - return; - - raw_spin_lock(&rt_b->rt_runtime_lock); - start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period); - raw_spin_unlock(&rt_b->rt_runtime_lock); -} - -void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq) -{ - struct rt_prio_array *array; - int i; - - array = &rt_rq->active; - for (i = 0; i < MAX_RT_PRIO; i++) { - INIT_LIST_HEAD(array->queue + i); - __clear_bit(i, array->bitmap); - } - /* delimiter for bitsearch: */ - __set_bit(MAX_RT_PRIO, array->bitmap); - -#if defined CONFIG_SMP - rt_rq->highest_prio.curr = MAX_RT_PRIO; - rt_rq->highest_prio.next = MAX_RT_PRIO; - rt_rq->rt_nr_migratory = 0; - rt_rq->overloaded = 0; - plist_head_init(&rt_rq->pushable_tasks); -#endif - - rt_rq->rt_time = 0; - rt_rq->rt_throttled = 0; - rt_rq->rt_runtime = 0; - raw_spin_lock_init(&rt_rq->rt_runtime_lock); -} - #ifdef CONFIG_RT_GROUP_SCHED -static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b) -{ - hrtimer_cancel(&rt_b->rt_period_timer); -} #define rt_entity_is_task(rt_se) (!(rt_se)->my_q) @@ -110,91 +25,6 @@ static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) return rt_se->rt_rq; } -void free_rt_sched_group(struct task_group *tg) -{ - int i; - - if (tg->rt_se) - destroy_rt_bandwidth(&tg->rt_bandwidth); - - for_each_possible_cpu(i) { - if (tg->rt_rq) - kfree(tg->rt_rq[i]); - if (tg->rt_se) - kfree(tg->rt_se[i]); - } - - kfree(tg->rt_rq); - kfree(tg->rt_se); -} - -void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, - struct sched_rt_entity *rt_se, int cpu, - struct sched_rt_entity *parent) -{ - struct rq *rq = cpu_rq(cpu); - - rt_rq->highest_prio.curr = MAX_RT_PRIO; - rt_rq->rt_nr_boosted = 0; - rt_rq->rq = rq; - rt_rq->tg = tg; - - tg->rt_rq[cpu] = rt_rq; - tg->rt_se[cpu] = rt_se; - - if (!rt_se) - return; - - if (!parent) - rt_se->rt_rq = &rq->rt; - else - rt_se->rt_rq = parent->my_q; - - rt_se->my_q = rt_rq; - rt_se->parent = parent; - INIT_LIST_HEAD(&rt_se->run_list); -} - -int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) -{ - struct rt_rq *rt_rq; - struct sched_rt_entity *rt_se; - int i; - - tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL); - if (!tg->rt_rq) - goto err; - tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL); - if (!tg->rt_se) - goto err; - - init_rt_bandwidth(&tg->rt_bandwidth, - ktime_to_ns(def_rt_bandwidth.rt_period), 0); - - for_each_possible_cpu(i) { - rt_rq = kzalloc_node(sizeof(struct rt_rq), - GFP_KERNEL, cpu_to_node(i)); - if (!rt_rq) - goto err; - - rt_se = kzalloc_node(sizeof(struct sched_rt_entity), - GFP_KERNEL, cpu_to_node(i)); - if (!rt_se) - goto err_free_rq; - - init_rt_rq(rt_rq, cpu_rq(i)); - rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; - init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]); - } - - return 1; - -err_free_rq: - kfree(rt_rq); -err: - return 0; -} - #else /* CONFIG_RT_GROUP_SCHED */ #define rt_entity_is_task(rt_se) (1) @@ -217,12 +47,6 @@ static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) return &rq->rt; } -void free_rt_sched_group(struct task_group *tg) { } - -int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) -{ - return 1; -} #endif /* CONFIG_RT_GROUP_SCHED */ #ifdef CONFIG_SMP @@ -732,28 +556,6 @@ static void enable_runtime(struct rq *rq) raw_spin_unlock_irqrestore(&rq->lock, flags); } -int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu) -{ - int cpu = (int)(long)hcpu; - - switch (action) { - case CPU_DOWN_PREPARE: - case CPU_DOWN_PREPARE_FROZEN: - disable_runtime(cpu_rq(cpu)); - return NOTIFY_OK; - - case CPU_DOWN_FAILED: - case CPU_DOWN_FAILED_FROZEN: - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - enable_runtime(cpu_rq(cpu)); - return NOTIFY_OK; - - default: - return NOTIFY_DONE; - } -} - static int balance_runtime(struct rt_rq *rt_rq) { int more = 0; @@ -846,7 +648,7 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq) if (rt_rq->rt_throttled) return rt_rq_throttled(rt_rq); - if (runtime >= sched_rt_period(rt_rq)) + if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq)) return 0; balance_runtime(rt_rq); @@ -1155,8 +957,8 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) } /* - * Put task to the head or the end of the run list without the overhead of - * dequeue followed by enqueue. + * Put task to the end of the run list without the overhead of dequeue + * followed by enqueue. */ static void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head) @@ -1200,9 +1002,6 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags) cpu = task_cpu(p); - if (p->rt.nr_cpus_allowed == 1) - goto out; - /* For anything but wake ups, just return the task_cpu */ if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK) goto out; @@ -1379,6 +1178,8 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) /* Only try algorithms three times */ #define RT_MAX_TRIES 3 +static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep); + static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) { if (!task_running(rq, p) && @@ -1852,14 +1653,13 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p) pull_rt_task(rq); } -void init_sched_rt_class(void) +static inline void init_sched_rt_class(void) { unsigned int i; - for_each_possible_cpu(i) { + for_each_possible_cpu(i) zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i), GFP_KERNEL, cpu_to_node(i)); - } } #endif /* CONFIG_SMP */ @@ -2000,7 +1800,7 @@ static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task) return 0; } -const struct sched_class rt_sched_class = { +static const struct sched_class rt_sched_class = { .next = &fair_sched_class, .enqueue_task = enqueue_task_rt, .dequeue_task = dequeue_task_rt, @@ -2035,7 +1835,7 @@ const struct sched_class rt_sched_class = { #ifdef CONFIG_SCHED_DEBUG extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq); -void print_rt_stats(struct seq_file *m, int cpu) +static void print_rt_stats(struct seq_file *m, int cpu) { rt_rq_iter_t iter; struct rt_rq *rt_rq; diff --git a/trunk/kernel/sched/stats.h b/trunk/kernel/sched_stats.h similarity index 70% rename from trunk/kernel/sched/stats.h rename to trunk/kernel/sched_stats.h index 2ef90a51ec5e..87f9e36ea56e 100644 --- a/trunk/kernel/sched/stats.h +++ b/trunk/kernel/sched_stats.h @@ -1,5 +1,108 @@ #ifdef CONFIG_SCHEDSTATS +/* + * bump this up when changing the output format or the meaning of an existing + * format, so that tools can adapt (or abort) + */ +#define SCHEDSTAT_VERSION 15 + +static int show_schedstat(struct seq_file *seq, void *v) +{ + int cpu; + int mask_len = DIV_ROUND_UP(NR_CPUS, 32) * 9; + char *mask_str = kmalloc(mask_len, GFP_KERNEL); + + if (mask_str == NULL) + return -ENOMEM; + + seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION); + seq_printf(seq, "timestamp %lu\n", jiffies); + for_each_online_cpu(cpu) { + struct rq *rq = cpu_rq(cpu); +#ifdef CONFIG_SMP + struct sched_domain *sd; + int dcount = 0; +#endif + + /* runqueue-specific stats */ + seq_printf(seq, + "cpu%d %u %u %u %u %u %u %llu %llu %lu", + cpu, rq->yld_count, + rq->sched_switch, rq->sched_count, rq->sched_goidle, + rq->ttwu_count, rq->ttwu_local, + rq->rq_cpu_time, + rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount); + + seq_printf(seq, "\n"); + +#ifdef CONFIG_SMP + /* domain-specific stats */ + rcu_read_lock(); + for_each_domain(cpu, sd) { + enum cpu_idle_type itype; + + cpumask_scnprintf(mask_str, mask_len, + sched_domain_span(sd)); + seq_printf(seq, "domain%d %s", dcount++, mask_str); + for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES; + itype++) { + seq_printf(seq, " %u %u %u %u %u %u %u %u", + sd->lb_count[itype], + sd->lb_balanced[itype], + sd->lb_failed[itype], + sd->lb_imbalance[itype], + sd->lb_gained[itype], + sd->lb_hot_gained[itype], + sd->lb_nobusyq[itype], + sd->lb_nobusyg[itype]); + } + seq_printf(seq, + " %u %u %u %u %u %u %u %u %u %u %u %u\n", + sd->alb_count, sd->alb_failed, sd->alb_pushed, + sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed, + sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed, + sd->ttwu_wake_remote, sd->ttwu_move_affine, + sd->ttwu_move_balance); + } + rcu_read_unlock(); +#endif + } + kfree(mask_str); + return 0; +} + +static int schedstat_open(struct inode *inode, struct file *file) +{ + unsigned int size = PAGE_SIZE * (1 + num_online_cpus() / 32); + char *buf = kmalloc(size, GFP_KERNEL); + struct seq_file *m; + int res; + + if (!buf) + return -ENOMEM; + res = single_open(file, show_schedstat, NULL); + if (!res) { + m = file->private_data; + m->buf = buf; + m->size = size; + } else + kfree(buf); + return res; +} + +static const struct file_operations proc_schedstat_operations = { + .open = schedstat_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init proc_schedstat_init(void) +{ + proc_create("schedstat", 0, NULL, &proc_schedstat_operations); + return 0; +} +module_init(proc_schedstat_init); /* * Expects runqueue lock to be held for atomicity of update @@ -180,7 +283,8 @@ static inline void account_group_user_time(struct task_struct *tsk, return; raw_spin_lock(&cputimer->lock); - cputimer->cputime.utime += cputime; + cputimer->cputime.utime = + cputime_add(cputimer->cputime.utime, cputime); raw_spin_unlock(&cputimer->lock); } @@ -203,7 +307,8 @@ static inline void account_group_system_time(struct task_struct *tsk, return; raw_spin_lock(&cputimer->lock); - cputimer->cputime.stime += cputime; + cputimer->cputime.stime = + cputime_add(cputimer->cputime.stime, cputime); raw_spin_unlock(&cputimer->lock); } diff --git a/trunk/kernel/sched/stop_task.c b/trunk/kernel/sched_stoptask.c similarity index 97% rename from trunk/kernel/sched/stop_task.c rename to trunk/kernel/sched_stoptask.c index 7b386e86fd23..8b44e7fa7fb3 100644 --- a/trunk/kernel/sched/stop_task.c +++ b/trunk/kernel/sched_stoptask.c @@ -1,5 +1,3 @@ -#include "sched.h" - /* * stop-task scheduling class. * @@ -82,7 +80,7 @@ get_rr_interval_stop(struct rq *rq, struct task_struct *task) /* * Simple, special scheduling class for the per-CPU stop tasks: */ -const struct sched_class stop_sched_class = { +static const struct sched_class stop_sched_class = { .next = &rt_sched_class, .enqueue_task = enqueue_task_stop, diff --git a/trunk/kernel/signal.c b/trunk/kernel/signal.c index 56ce3a618b28..b3f78d09a105 100644 --- a/trunk/kernel/signal.c +++ b/trunk/kernel/signal.c @@ -1629,8 +1629,10 @@ bool do_notify_parent(struct task_struct *tsk, int sig) info.si_uid = __task_cred(tsk)->uid; rcu_read_unlock(); - info.si_utime = cputime_to_clock_t(tsk->utime + tsk->signal->utime); - info.si_stime = cputime_to_clock_t(tsk->stime + tsk->signal->stime); + info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime, + tsk->signal->utime)); + info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime, + tsk->signal->stime)); info.si_status = tsk->exit_code & 0x7f; if (tsk->exit_code & 0x80) @@ -1992,6 +1994,8 @@ static bool do_signal_stop(int signr) */ if (!(sig->flags & SIGNAL_STOP_STOPPED)) sig->group_exit_code = signr; + else + WARN_ON_ONCE(!current->ptrace); sig->group_stop_count = 0; diff --git a/trunk/kernel/softirq.c b/trunk/kernel/softirq.c index 4eb3a0fa351e..2c71d91efff0 100644 --- a/trunk/kernel/softirq.c +++ b/trunk/kernel/softirq.c @@ -347,12 +347,12 @@ void irq_exit(void) if (!in_interrupt() && local_softirq_pending()) invoke_softirq(); + rcu_irq_exit(); #ifdef CONFIG_NO_HZ /* Make sure that timer wheel updates are propagated */ if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched()) - tick_nohz_irq_exit(); + tick_nohz_stop_sched_tick(0); #endif - rcu_irq_exit(); preempt_enable_no_resched(); } diff --git a/trunk/kernel/sys.c b/trunk/kernel/sys.c index ddf8155bf3f8..481611fbd079 100644 --- a/trunk/kernel/sys.c +++ b/trunk/kernel/sys.c @@ -1605,7 +1605,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) unsigned long maxrss = 0; memset((char *) r, 0, sizeof *r); - utime = stime = 0; + utime = stime = cputime_zero; if (who == RUSAGE_THREAD) { task_times(current, &utime, &stime); @@ -1635,8 +1635,8 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) case RUSAGE_SELF: thread_group_times(p, &tgutime, &tgstime); - utime += tgutime; - stime += tgstime; + utime = cputime_add(utime, tgutime); + stime = cputime_add(stime, tgstime); r->ru_nvcsw += p->signal->nvcsw; r->ru_nivcsw += p->signal->nivcsw; r->ru_minflt += p->signal->min_flt; diff --git a/trunk/kernel/time/clockevents.c b/trunk/kernel/time/clockevents.c index 1ecd6ba36d6c..c4eb71c8b2ea 100644 --- a/trunk/kernel/time/clockevents.c +++ b/trunk/kernel/time/clockevents.c @@ -387,6 +387,7 @@ void clockevents_exchange_device(struct clock_event_device *old, * released list and do a notify add later. */ if (old) { + old->event_handler = clockevents_handle_noop; clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED); list_del(&old->list); list_add(&old->list, &clockevents_released); diff --git a/trunk/kernel/time/tick-sched.c b/trunk/kernel/time/tick-sched.c index 7656642e4b8e..40420644d0ba 100644 --- a/trunk/kernel/time/tick-sched.c +++ b/trunk/kernel/time/tick-sched.c @@ -275,17 +275,42 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) } EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us); -static void tick_nohz_stop_sched_tick(struct tick_sched *ts) +/** + * tick_nohz_stop_sched_tick - stop the idle tick from the idle task + * + * When the next event is more than a tick into the future, stop the idle tick + * Called either from the idle loop or from irq_exit() when an idle period was + * just interrupted by an interrupt which did not cause a reschedule. + */ +void tick_nohz_stop_sched_tick(int inidle) { - unsigned long seq, last_jiffies, next_jiffies, delta_jiffies; + unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags; + struct tick_sched *ts; ktime_t last_update, expires, now; struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; u64 time_delta; int cpu; + local_irq_save(flags); + cpu = smp_processor_id(); ts = &per_cpu(tick_cpu_sched, cpu); + /* + * Call to tick_nohz_start_idle stops the last_update_time from being + * updated. Thus, it must not be called in the event we are called from + * irq_exit() with the prior state different than idle. + */ + if (!inidle && !ts->inidle) + goto end; + + /* + * Set ts->inidle unconditionally. Even if the system did not + * switch to NOHZ mode the cpu frequency governers rely on the + * update of the idle time accounting in tick_nohz_start_idle(). + */ + ts->inidle = 1; + now = tick_nohz_start_idle(cpu, ts); /* @@ -301,10 +326,10 @@ static void tick_nohz_stop_sched_tick(struct tick_sched *ts) } if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) - return; + goto end; if (need_resched()) - return; + goto end; if (unlikely(local_softirq_pending() && cpu_online(cpu))) { static int ratelimit; @@ -314,7 +339,7 @@ static void tick_nohz_stop_sched_tick(struct tick_sched *ts) (unsigned int) local_softirq_pending()); ratelimit++; } - return; + goto end; } ts->idle_calls++; @@ -409,6 +434,7 @@ static void tick_nohz_stop_sched_tick(struct tick_sched *ts) ts->idle_tick = hrtimer_get_expires(&ts->sched_timer); ts->tick_stopped = 1; ts->idle_jiffies = last_jiffies; + rcu_enter_nohz(); } ts->idle_sleeps++; @@ -446,64 +472,8 @@ static void tick_nohz_stop_sched_tick(struct tick_sched *ts) ts->next_jiffies = next_jiffies; ts->last_jiffies = last_jiffies; ts->sleep_length = ktime_sub(dev->next_event, now); -} - -/** - * tick_nohz_idle_enter - stop the idle tick from the idle task - * - * When the next event is more than a tick into the future, stop the idle tick - * Called when we start the idle loop. - * - * The arch is responsible of calling: - * - * - rcu_idle_enter() after its last use of RCU before the CPU is put - * to sleep. - * - rcu_idle_exit() before the first use of RCU after the CPU is woken up. - */ -void tick_nohz_idle_enter(void) -{ - struct tick_sched *ts; - - WARN_ON_ONCE(irqs_disabled()); - - /* - * Update the idle state in the scheduler domain hierarchy - * when tick_nohz_stop_sched_tick() is called from the idle loop. - * State will be updated to busy during the first busy tick after - * exiting idle. - */ - set_cpu_sd_state_idle(); - - local_irq_disable(); - - ts = &__get_cpu_var(tick_cpu_sched); - /* - * set ts->inidle unconditionally. even if the system did not - * switch to nohz mode the cpu frequency governers rely on the - * update of the idle time accounting in tick_nohz_start_idle(). - */ - ts->inidle = 1; - tick_nohz_stop_sched_tick(ts); - - local_irq_enable(); -} - -/** - * tick_nohz_irq_exit - update next tick event from interrupt exit - * - * When an interrupt fires while we are idle and it doesn't cause - * a reschedule, it may still add, modify or delete a timer, enqueue - * an RCU callback, etc... - * So we need to re-calculate and reprogram the next tick event. - */ -void tick_nohz_irq_exit(void) -{ - struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); - - if (!ts->inidle) - return; - - tick_nohz_stop_sched_tick(ts); +end: + local_irq_restore(flags); } /** @@ -545,13 +515,11 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) } /** - * tick_nohz_idle_exit - restart the idle tick from the idle task + * tick_nohz_restart_sched_tick - restart the idle tick from the idle task * * Restart the idle tick when the CPU is woken up from idle - * This also exit the RCU extended quiescent state. The CPU - * can use RCU again after this function is called. */ -void tick_nohz_idle_exit(void) +void tick_nohz_restart_sched_tick(void) { int cpu = smp_processor_id(); struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); @@ -561,7 +529,6 @@ void tick_nohz_idle_exit(void) ktime_t now; local_irq_disable(); - if (ts->idle_active || (ts->inidle && ts->tick_stopped)) now = ktime_get(); @@ -576,6 +543,8 @@ void tick_nohz_idle_exit(void) ts->inidle = 0; + rcu_exit_nohz(); + /* Update jiffies first */ select_nohz_load_balancer(0); tick_do_update_jiffies64(now); diff --git a/trunk/kernel/time/timekeeping.c b/trunk/kernel/time/timekeeping.c index 0c6358186401..237841378c03 100644 --- a/trunk/kernel/time/timekeeping.c +++ b/trunk/kernel/time/timekeeping.c @@ -131,7 +131,7 @@ static inline s64 timekeeping_get_ns_raw(void) /* calculate the delta since the last update_wall_time: */ cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; - /* return delta convert to nanoseconds. */ + /* return delta convert to nanoseconds using ntp adjusted mult. */ return clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); } @@ -813,11 +813,11 @@ static void timekeeping_adjust(s64 offset) * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs. * * Note we subtract one in the shift, so that error is really error*2. - * This "saves" dividing(shifting) interval twice, but keeps the - * (error > interval) comparison as still measuring if error is + * This "saves" dividing(shifting) intererval twice, but keeps the + * (error > interval) comparision as still measuring if error is * larger then half an interval. * - * Note: It does not "save" on aggravation when reading the code. + * Note: It does not "save" on aggrivation when reading the code. */ error = timekeeper.ntp_error >> (timekeeper.ntp_error_shift - 1); if (error > interval) { @@ -833,7 +833,7 @@ static void timekeeping_adjust(s64 offset) * nanosecond, and store the amount rounded up into * the error. This causes the likely below to be unlikely. * - * The proper fix is to avoid rounding up by using + * The properfix is to avoid rounding up by using * the high precision timekeeper.xtime_nsec instead of * xtime.tv_nsec everywhere. Fixing this will take some * time. diff --git a/trunk/kernel/timer.c b/trunk/kernel/timer.c index a297ffcf888e..9c3c62b0c4bc 100644 --- a/trunk/kernel/timer.c +++ b/trunk/kernel/timer.c @@ -427,12 +427,6 @@ static int timer_fixup_init(void *addr, enum debug_obj_state state) } } -/* Stub timer callback for improperly used timers. */ -static void stub_timer(unsigned long data) -{ - WARN_ON(1); -} - /* * fixup_activate is called when: * - an active object is activated @@ -456,8 +450,7 @@ static int timer_fixup_activate(void *addr, enum debug_obj_state state) debug_object_activate(timer, &timer_debug_descr); return 0; } else { - setup_timer(timer, stub_timer, 0); - return 1; + WARN_ON_ONCE(1); } return 0; @@ -487,40 +480,12 @@ static int timer_fixup_free(void *addr, enum debug_obj_state state) } } -/* - * fixup_assert_init is called when: - * - an untracked/uninit-ed object is found - */ -static int timer_fixup_assert_init(void *addr, enum debug_obj_state state) -{ - struct timer_list *timer = addr; - - switch (state) { - case ODEBUG_STATE_NOTAVAILABLE: - if (timer->entry.prev == TIMER_ENTRY_STATIC) { - /* - * This is not really a fixup. The timer was - * statically initialized. We just make sure that it - * is tracked in the object tracker. - */ - debug_object_init(timer, &timer_debug_descr); - return 0; - } else { - setup_timer(timer, stub_timer, 0); - return 1; - } - default: - return 0; - } -} - static struct debug_obj_descr timer_debug_descr = { - .name = "timer_list", - .debug_hint = timer_debug_hint, - .fixup_init = timer_fixup_init, - .fixup_activate = timer_fixup_activate, - .fixup_free = timer_fixup_free, - .fixup_assert_init = timer_fixup_assert_init, + .name = "timer_list", + .debug_hint = timer_debug_hint, + .fixup_init = timer_fixup_init, + .fixup_activate = timer_fixup_activate, + .fixup_free = timer_fixup_free, }; static inline void debug_timer_init(struct timer_list *timer) @@ -543,11 +508,6 @@ static inline void debug_timer_free(struct timer_list *timer) debug_object_free(timer, &timer_debug_descr); } -static inline void debug_timer_assert_init(struct timer_list *timer) -{ - debug_object_assert_init(timer, &timer_debug_descr); -} - static void __init_timer(struct timer_list *timer, const char *name, struct lock_class_key *key); @@ -571,7 +531,6 @@ EXPORT_SYMBOL_GPL(destroy_timer_on_stack); static inline void debug_timer_init(struct timer_list *timer) { } static inline void debug_timer_activate(struct timer_list *timer) { } static inline void debug_timer_deactivate(struct timer_list *timer) { } -static inline void debug_timer_assert_init(struct timer_list *timer) { } #endif static inline void debug_init(struct timer_list *timer) @@ -593,11 +552,6 @@ static inline void debug_deactivate(struct timer_list *timer) trace_timer_cancel(timer); } -static inline void debug_assert_init(struct timer_list *timer) -{ - debug_timer_assert_init(timer); -} - static void __init_timer(struct timer_list *timer, const char *name, struct lock_class_key *key) @@ -948,8 +902,6 @@ int del_timer(struct timer_list *timer) unsigned long flags; int ret = 0; - debug_assert_init(timer); - timer_stats_timer_clear_start_info(timer); if (timer_pending(timer)) { base = lock_timer_base(timer, &flags); @@ -980,8 +932,6 @@ int try_to_del_timer_sync(struct timer_list *timer) unsigned long flags; int ret = -1; - debug_assert_init(timer); - base = lock_timer_base(timer, &flags); if (base->running_timer == timer) diff --git a/trunk/kernel/trace/trace.c b/trunk/kernel/trace/trace.c index 91dc4bc8bf72..f2bd275bb60f 100644 --- a/trunk/kernel/trace/trace.c +++ b/trunk/kernel/trace/trace.c @@ -338,8 +338,7 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait); /* trace_flags holds trace_options default values */ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | - TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | - TRACE_ITER_IRQ_INFO; + TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE; static int trace_stop_count; static DEFINE_RAW_SPINLOCK(tracing_start_lock); @@ -427,7 +426,6 @@ static const char *trace_options[] = { "record-cmd", "overwrite", "disable_on_free", - "irq-info", NULL }; @@ -1845,33 +1843,6 @@ static void s_stop(struct seq_file *m, void *p) trace_event_read_unlock(); } -static void -get_total_entries(struct trace_array *tr, unsigned long *total, unsigned long *entries) -{ - unsigned long count; - int cpu; - - *total = 0; - *entries = 0; - - for_each_tracing_cpu(cpu) { - count = ring_buffer_entries_cpu(tr->buffer, cpu); - /* - * If this buffer has skipped entries, then we hold all - * entries for the trace and we need to ignore the - * ones before the time stamp. - */ - if (tr->data[cpu]->skipped_entries) { - count -= tr->data[cpu]->skipped_entries; - /* total is the same as the entries */ - *total += count; - } else - *total += count + - ring_buffer_overrun_cpu(tr->buffer, cpu); - *entries += count; - } -} - static void print_lat_help_header(struct seq_file *m) { seq_puts(m, "# _------=> CPU# \n"); @@ -1884,35 +1855,12 @@ static void print_lat_help_header(struct seq_file *m) seq_puts(m, "# \\ / ||||| \\ | / \n"); } -static void print_event_info(struct trace_array *tr, struct seq_file *m) -{ - unsigned long total; - unsigned long entries; - - get_total_entries(tr, &total, &entries); - seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n", - entries, total, num_online_cpus()); - seq_puts(m, "#\n"); -} - -static void print_func_help_header(struct trace_array *tr, struct seq_file *m) +static void print_func_help_header(struct seq_file *m) { - print_event_info(tr, m); - seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"); + seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"); seq_puts(m, "# | | | | |\n"); } -static void print_func_help_header_irq(struct trace_array *tr, struct seq_file *m) -{ - print_event_info(tr, m); - seq_puts(m, "# _-----=> irqs-off\n"); - seq_puts(m, "# / _----=> need-resched\n"); - seq_puts(m, "# | / _---=> hardirq/softirq\n"); - seq_puts(m, "# || / _--=> preempt-depth\n"); - seq_puts(m, "# ||| / delay\n"); - seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"); - seq_puts(m, "# | | | |||| | |\n"); -} void print_trace_header(struct seq_file *m, struct trace_iterator *iter) @@ -1921,14 +1869,32 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter) struct trace_array *tr = iter->tr; struct trace_array_cpu *data = tr->data[tr->cpu]; struct tracer *type = current_trace; - unsigned long entries; - unsigned long total; + unsigned long entries = 0; + unsigned long total = 0; + unsigned long count; const char *name = "preemption"; + int cpu; if (type) name = type->name; - get_total_entries(tr, &total, &entries); + + for_each_tracing_cpu(cpu) { + count = ring_buffer_entries_cpu(tr->buffer, cpu); + /* + * If this buffer has skipped entries, then we hold all + * entries for the trace and we need to ignore the + * ones before the time stamp. + */ + if (tr->data[cpu]->skipped_entries) { + count -= tr->data[cpu]->skipped_entries; + /* total is the same as the entries */ + total += count; + } else + total += count + + ring_buffer_overrun_cpu(tr->buffer, cpu); + entries += count; + } seq_printf(m, "# %s latency trace v1.1.5 on %s\n", name, UTS_RELEASE); @@ -2174,21 +2140,6 @@ enum print_line_t print_trace_line(struct trace_iterator *iter) return print_trace_fmt(iter); } -void trace_latency_header(struct seq_file *m) -{ - struct trace_iterator *iter = m->private; - - /* print nothing if the buffers are empty */ - if (trace_empty(iter)) - return; - - if (iter->iter_flags & TRACE_FILE_LAT_FMT) - print_trace_header(m, iter); - - if (!(trace_flags & TRACE_ITER_VERBOSE)) - print_lat_help_header(m); -} - void trace_default_header(struct seq_file *m) { struct trace_iterator *iter = m->private; @@ -2204,12 +2155,8 @@ void trace_default_header(struct seq_file *m) if (!(trace_flags & TRACE_ITER_VERBOSE)) print_lat_help_header(m); } else { - if (!(trace_flags & TRACE_ITER_VERBOSE)) { - if (trace_flags & TRACE_ITER_IRQ_INFO) - print_func_help_header_irq(iter->tr, m); - else - print_func_help_header(iter->tr, m); - } + if (!(trace_flags & TRACE_ITER_VERBOSE)) + print_func_help_header(m); } } @@ -4828,7 +4775,6 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { __ftrace_dump(true, oops_dump_mode); } -EXPORT_SYMBOL_GPL(ftrace_dump); __init static int tracer_alloc_buffers(void) { diff --git a/trunk/kernel/trace/trace.h b/trunk/kernel/trace/trace.h index 2c2657462ac3..092e1f8d18dc 100644 --- a/trunk/kernel/trace/trace.h +++ b/trunk/kernel/trace/trace.h @@ -370,7 +370,6 @@ void trace_graph_function(struct trace_array *tr, unsigned long ip, unsigned long parent_ip, unsigned long flags, int pc); -void trace_latency_header(struct seq_file *m); void trace_default_header(struct seq_file *m); void print_trace_header(struct seq_file *m, struct trace_iterator *iter); int trace_empty(struct trace_iterator *iter); @@ -655,7 +654,6 @@ enum trace_iterator_flags { TRACE_ITER_RECORD_CMD = 0x100000, TRACE_ITER_OVERWRITE = 0x200000, TRACE_ITER_STOP_ON_FREE = 0x400000, - TRACE_ITER_IRQ_INFO = 0x800000, }; /* diff --git a/trunk/kernel/trace/trace_events_filter.c b/trunk/kernel/trace/trace_events_filter.c index f04cc3136bd3..95dc31efd6dd 100644 --- a/trunk/kernel/trace/trace_events_filter.c +++ b/trunk/kernel/trace/trace_events_filter.c @@ -27,12 +27,6 @@ #include "trace.h" #include "trace_output.h" -#define DEFAULT_SYS_FILTER_MESSAGE \ - "### global filter ###\n" \ - "# Use this to set filters for multiple events.\n" \ - "# Only events with the given fields will be affected.\n" \ - "# If no events are modified, an error message will be displayed here" - enum filter_op_ids { OP_OR, @@ -652,7 +646,7 @@ void print_subsystem_event_filter(struct event_subsystem *system, if (filter && filter->filter_string) trace_seq_printf(s, "%s\n", filter->filter_string); else - trace_seq_printf(s, DEFAULT_SYS_FILTER_MESSAGE "\n"); + trace_seq_printf(s, "none\n"); mutex_unlock(&event_mutex); } @@ -1844,10 +1838,7 @@ int apply_subsystem_event_filter(struct event_subsystem *system, if (!filter) goto out; - /* System filters just show a default message */ - kfree(filter->filter_string); - filter->filter_string = NULL; - + replace_filter_string(filter, filter_string); /* * No event actually uses the system filter * we can free it without synchronize_sched(). @@ -1857,12 +1848,14 @@ int apply_subsystem_event_filter(struct event_subsystem *system, parse_init(ps, filter_ops, filter_string); err = filter_parse(ps); - if (err) - goto err_filter; + if (err) { + append_filter_err(ps, system->filter); + goto out; + } err = replace_system_preds(system, ps, filter_string); if (err) - goto err_filter; + append_filter_err(ps, system->filter); out: filter_opstack_clear(ps); @@ -1872,11 +1865,6 @@ int apply_subsystem_event_filter(struct event_subsystem *system, mutex_unlock(&event_mutex); return err; - -err_filter: - replace_filter_string(filter, filter_string); - append_filter_err(ps, system->filter); - goto out; } #ifdef CONFIG_PERF_EVENTS diff --git a/trunk/kernel/trace/trace_irqsoff.c b/trunk/kernel/trace/trace_irqsoff.c index 99d20e920368..20dad0d7a163 100644 --- a/trunk/kernel/trace/trace_irqsoff.c +++ b/trunk/kernel/trace/trace_irqsoff.c @@ -280,20 +280,9 @@ static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) } static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { } +static void irqsoff_print_header(struct seq_file *s) { } static void irqsoff_trace_open(struct trace_iterator *iter) { } static void irqsoff_trace_close(struct trace_iterator *iter) { } - -#ifdef CONFIG_FUNCTION_TRACER -static void irqsoff_print_header(struct seq_file *s) -{ - trace_default_header(s); -} -#else -static void irqsoff_print_header(struct seq_file *s) -{ - trace_latency_header(s); -} -#endif /* CONFIG_FUNCTION_TRACER */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ /* diff --git a/trunk/kernel/trace/trace_output.c b/trunk/kernel/trace/trace_output.c index 0d6ff3555942..51999309a6cf 100644 --- a/trunk/kernel/trace/trace_output.c +++ b/trunk/kernel/trace/trace_output.c @@ -627,23 +627,11 @@ int trace_print_context(struct trace_iterator *iter) unsigned long usec_rem = do_div(t, USEC_PER_SEC); unsigned long secs = (unsigned long)t; char comm[TASK_COMM_LEN]; - int ret; trace_find_cmdline(entry->pid, comm); - ret = trace_seq_printf(s, "%16s-%-5d [%03d] ", - comm, entry->pid, iter->cpu); - if (!ret) - return 0; - - if (trace_flags & TRACE_ITER_IRQ_INFO) { - ret = trace_print_lat_fmt(s, entry); - if (!ret) - return 0; - } - - return trace_seq_printf(s, " %5lu.%06lu: ", - secs, usec_rem); + return trace_seq_printf(s, "%16s-%-5d [%03d] %5lu.%06lu: ", + comm, entry->pid, iter->cpu, secs, usec_rem); } int trace_print_lat_context(struct trace_iterator *iter) diff --git a/trunk/kernel/trace/trace_sched_wakeup.c b/trunk/kernel/trace/trace_sched_wakeup.c index ff791ea48b57..e4a70c0c71b6 100644 --- a/trunk/kernel/trace/trace_sched_wakeup.c +++ b/trunk/kernel/trace/trace_sched_wakeup.c @@ -280,20 +280,9 @@ static enum print_line_t wakeup_print_line(struct trace_iterator *iter) } static void wakeup_graph_return(struct ftrace_graph_ret *trace) { } +static void wakeup_print_header(struct seq_file *s) { } static void wakeup_trace_open(struct trace_iterator *iter) { } static void wakeup_trace_close(struct trace_iterator *iter) { } - -#ifdef CONFIG_FUNCTION_TRACER -static void wakeup_print_header(struct seq_file *s) -{ - trace_default_header(s); -} -#else -static void wakeup_print_header(struct seq_file *s) -{ - trace_latency_header(s); -} -#endif /* CONFIG_FUNCTION_TRACER */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ /* diff --git a/trunk/kernel/tsacct.c b/trunk/kernel/tsacct.c index 23b4d784ebdd..5bbfac85866e 100644 --- a/trunk/kernel/tsacct.c +++ b/trunk/kernel/tsacct.c @@ -127,7 +127,7 @@ void acct_update_integrals(struct task_struct *tsk) local_irq_save(flags); time = tsk->stime + tsk->utime; - dtime = time - tsk->acct_timexpd; + dtime = cputime_sub(time, tsk->acct_timexpd); jiffies_to_timeval(cputime_to_jiffies(dtime), &value); delta = value.tv_sec; delta = delta * USEC_PER_SEC + value.tv_usec; diff --git a/trunk/kernel/wait.c b/trunk/kernel/wait.c index 7fdd9eaca2c3..26fa7797f90f 100644 --- a/trunk/kernel/wait.c +++ b/trunk/kernel/wait.c @@ -10,10 +10,10 @@ #include #include -void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *key) +void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *key) { spin_lock_init(&q->lock); - lockdep_set_class_and_name(&q->lock, key, name); + lockdep_set_class(&q->lock, key); INIT_LIST_HEAD(&q->task_list); } diff --git a/trunk/lib/Kconfig b/trunk/lib/Kconfig index 63b5782732ed..32f3e5ae2be5 100644 --- a/trunk/lib/Kconfig +++ b/trunk/lib/Kconfig @@ -244,9 +244,6 @@ config CPU_RMAP bool depends on SMP -config DQL - bool - # # Netlink attribute parsing support is select'ed if needed # diff --git a/trunk/lib/Makefile b/trunk/lib/Makefile index ff00d4dcb7ed..a4da283f5dc0 100644 --- a/trunk/lib/Makefile +++ b/trunk/lib/Makefile @@ -115,8 +115,6 @@ obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o obj-$(CONFIG_CORDIC) += cordic.o -obj-$(CONFIG_DQL) += dynamic_queue_limits.o - hostprogs-y := gen_crc32table clean-files := crc32table.h diff --git a/trunk/lib/debugobjects.c b/trunk/lib/debugobjects.c index 77cb245f8e7b..a78b7c6e042c 100644 --- a/trunk/lib/debugobjects.c +++ b/trunk/lib/debugobjects.c @@ -268,16 +268,12 @@ static void debug_print_object(struct debug_obj *obj, char *msg) * Try to repair the damage, so we have a better chance to get useful * debug output. */ -static int +static void debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state), void * addr, enum debug_obj_state state) { - int fixed = 0; - if (fixup) - fixed = fixup(addr, state); - debug_objects_fixups += fixed; - return fixed; + debug_objects_fixups += fixup(addr, state); } static void debug_object_is_on_stack(void *addr, int onstack) @@ -390,9 +386,6 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr) struct debug_bucket *db; struct debug_obj *obj; unsigned long flags; - struct debug_obj o = { .object = addr, - .state = ODEBUG_STATE_NOTAVAILABLE, - .descr = descr }; if (!debug_objects_enabled) return; @@ -432,9 +425,8 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr) * let the type specific code decide whether this is * true or not. */ - if (debug_object_fixup(descr->fixup_activate, addr, - ODEBUG_STATE_NOTAVAILABLE)) - debug_print_object(&o, "activate"); + debug_object_fixup(descr->fixup_activate, addr, + ODEBUG_STATE_NOTAVAILABLE); } /** @@ -570,44 +562,6 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr) raw_spin_unlock_irqrestore(&db->lock, flags); } -/** - * debug_object_assert_init - debug checks when object should be init-ed - * @addr: address of the object - * @descr: pointer to an object specific debug description structure - */ -void debug_object_assert_init(void *addr, struct debug_obj_descr *descr) -{ - struct debug_bucket *db; - struct debug_obj *obj; - unsigned long flags; - - if (!debug_objects_enabled) - return; - - db = get_bucket((unsigned long) addr); - - raw_spin_lock_irqsave(&db->lock, flags); - - obj = lookup_object(addr, db); - if (!obj) { - struct debug_obj o = { .object = addr, - .state = ODEBUG_STATE_NOTAVAILABLE, - .descr = descr }; - - raw_spin_unlock_irqrestore(&db->lock, flags); - /* - * Maybe the object is static. Let the type specific - * code decide what to do. - */ - if (debug_object_fixup(descr->fixup_assert_init, addr, - ODEBUG_STATE_NOTAVAILABLE)) - debug_print_object(&o, "assert_init"); - return; - } - - raw_spin_unlock_irqrestore(&db->lock, flags); -} - /** * debug_object_active_state - debug checks object usage state machine * @addr: address of the object diff --git a/trunk/lib/dynamic_queue_limits.c b/trunk/lib/dynamic_queue_limits.c deleted file mode 100644 index 3d1bdcdd7db4..000000000000 --- a/trunk/lib/dynamic_queue_limits.c +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Dynamic byte queue limits. See include/linux/dynamic_queue_limits.h - * - * Copyright (c) 2011, Tom Herbert - */ -#include -#include -#include -#include -#include - -#define POSDIFF(A, B) ((A) > (B) ? (A) - (B) : 0) - -/* Records completed count and recalculates the queue limit */ -void dql_completed(struct dql *dql, unsigned int count) -{ - unsigned int inprogress, prev_inprogress, limit; - unsigned int ovlimit, all_prev_completed, completed; - - /* Can't complete more than what's in queue */ - BUG_ON(count > dql->num_queued - dql->num_completed); - - completed = dql->num_completed + count; - limit = dql->limit; - ovlimit = POSDIFF(dql->num_queued - dql->num_completed, limit); - inprogress = dql->num_queued - completed; - prev_inprogress = dql->prev_num_queued - dql->num_completed; - all_prev_completed = POSDIFF(completed, dql->prev_num_queued); - - if ((ovlimit && !inprogress) || - (dql->prev_ovlimit && all_prev_completed)) { - /* - * Queue considered starved if: - * - The queue was over-limit in the last interval, - * and there is no more data in the queue. - * OR - * - The queue was over-limit in the previous interval and - * when enqueuing it was possible that all queued data - * had been consumed. This covers the case when queue - * may have becomes starved between completion processing - * running and next time enqueue was scheduled. - * - * When queue is starved increase the limit by the amount - * of bytes both sent and completed in the last interval, - * plus any previous over-limit. - */ - limit += POSDIFF(completed, dql->prev_num_queued) + - dql->prev_ovlimit; - dql->slack_start_time = jiffies; - dql->lowest_slack = UINT_MAX; - } else if (inprogress && prev_inprogress && !all_prev_completed) { - /* - * Queue was not starved, check if the limit can be decreased. - * A decrease is only considered if the queue has been busy in - * the whole interval (the check above). - * - * If there is slack, the amount of execess data queued above - * the the amount needed to prevent starvation, the queue limit - * can be decreased. To avoid hysteresis we consider the - * minimum amount of slack found over several iterations of the - * completion routine. - */ - unsigned int slack, slack_last_objs; - - /* - * Slack is the maximum of - * - The queue limit plus previous over-limit minus twice - * the number of objects completed. Note that two times - * number of completed bytes is a basis for an upper bound - * of the limit. - * - Portion of objects in the last queuing operation that - * was not part of non-zero previous over-limit. That is - * "round down" by non-overlimit portion of the last - * queueing operation. - */ - slack = POSDIFF(limit + dql->prev_ovlimit, - 2 * (completed - dql->num_completed)); - slack_last_objs = dql->prev_ovlimit ? - POSDIFF(dql->prev_last_obj_cnt, dql->prev_ovlimit) : 0; - - slack = max(slack, slack_last_objs); - - if (slack < dql->lowest_slack) - dql->lowest_slack = slack; - - if (time_after(jiffies, - dql->slack_start_time + dql->slack_hold_time)) { - limit = POSDIFF(limit, dql->lowest_slack); - dql->slack_start_time = jiffies; - dql->lowest_slack = UINT_MAX; - } - } - - /* Enforce bounds on limit */ - limit = clamp(limit, dql->min_limit, dql->max_limit); - - if (limit != dql->limit) { - dql->limit = limit; - ovlimit = 0; - } - - dql->adj_limit = limit + completed; - dql->prev_ovlimit = ovlimit; - dql->prev_last_obj_cnt = dql->last_obj_cnt; - dql->num_completed = completed; - dql->prev_num_queued = dql->num_queued; -} -EXPORT_SYMBOL(dql_completed); - -void dql_reset(struct dql *dql) -{ - /* Reset all dynamic values */ - dql->limit = 0; - dql->num_queued = 0; - dql->num_completed = 0; - dql->last_obj_cnt = 0; - dql->prev_num_queued = 0; - dql->prev_last_obj_cnt = 0; - dql->prev_ovlimit = 0; - dql->lowest_slack = UINT_MAX; - dql->slack_start_time = jiffies; -} -EXPORT_SYMBOL(dql_reset); - -int dql_init(struct dql *dql, unsigned hold_time) -{ - dql->max_limit = DQL_MAX_LIMIT; - dql->min_limit = 0; - dql->slack_hold_time = hold_time; - dql_reset(dql); - return 0; -} -EXPORT_SYMBOL(dql_init); diff --git a/trunk/lib/reciprocal_div.c b/trunk/lib/reciprocal_div.c index 75510e94f7d0..6a3bd48fa2a0 100644 --- a/trunk/lib/reciprocal_div.c +++ b/trunk/lib/reciprocal_div.c @@ -1,6 +1,5 @@ #include #include -#include u32 reciprocal_value(u32 k) { @@ -8,4 +7,3 @@ u32 reciprocal_value(u32 k) do_div(val, k); return (u32)val; } -EXPORT_SYMBOL(reciprocal_value); diff --git a/trunk/lib/vsprintf.c b/trunk/lib/vsprintf.c index 8e75003d62f6..993599e66e5a 100644 --- a/trunk/lib/vsprintf.c +++ b/trunk/lib/vsprintf.c @@ -777,18 +777,6 @@ char *uuid_string(char *buf, char *end, const u8 *addr, return string(buf, end, uuid, spec); } -static -char *netdev_feature_string(char *buf, char *end, const u8 *addr, - struct printf_spec spec) -{ - spec.flags |= SPECIAL | SMALL | ZEROPAD; - if (spec.field_width == -1) - spec.field_width = 2 + 2 * sizeof(netdev_features_t); - spec.base = 16; - - return number(buf, end, *(const netdev_features_t *)addr, spec); -} - int kptr_restrict __read_mostly; /* @@ -836,7 +824,6 @@ int kptr_restrict __read_mostly; * Do not use this feature without some mechanism to verify the * correctness of the format string and va_list arguments. * - 'K' For a kernel pointer that should be hidden from unprivileged users - * - 'NF' For a netdev_features_t * * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 * function pointers are really function descriptors, which contain a @@ -909,12 +896,6 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, has_capability_noaudit(current, CAP_SYSLOG)))) ptr = NULL; break; - case 'N': - switch (fmt[1]) { - case 'F': - return netdev_feature_string(buf, end, ptr, spec); - } - break; } spec.flags |= SMALL; if (spec.field_width == -1) { diff --git a/trunk/mm/Kconfig b/trunk/mm/Kconfig index e338407f1225..011b110365c8 100644 --- a/trunk/mm/Kconfig +++ b/trunk/mm/Kconfig @@ -131,12 +131,6 @@ config SPARSEMEM_VMEMMAP config HAVE_MEMBLOCK boolean -config HAVE_MEMBLOCK_NODE_MAP - boolean - -config ARCH_DISCARD_MEMBLOCK - boolean - config NO_BOOTMEM boolean diff --git a/trunk/mm/hugetlb.c b/trunk/mm/hugetlb.c index 2316840b337a..73f17c0293c0 100644 --- a/trunk/mm/hugetlb.c +++ b/trunk/mm/hugetlb.c @@ -901,6 +901,7 @@ static int gather_surplus_pages(struct hstate *h, int delta) h->resv_huge_pages += delta; ret = 0; + spin_unlock(&hugetlb_lock); /* Free the needed pages to the hugetlb pool */ list_for_each_entry_safe(page, tmp, &surplus_list, lru) { if ((--needed) < 0) @@ -914,7 +915,6 @@ static int gather_surplus_pages(struct hstate *h, int delta) VM_BUG_ON(page_count(page)); enqueue_huge_page(h, page); } - spin_unlock(&hugetlb_lock); /* Free unnecessary surplus pages to the buddy allocator */ free: diff --git a/trunk/mm/memblock.c b/trunk/mm/memblock.c index 2f55f19b7c86..84bec4969ed5 100644 --- a/trunk/mm/memblock.c +++ b/trunk/mm/memblock.c @@ -20,23 +20,12 @@ #include #include -static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; -static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; - -struct memblock memblock __initdata_memblock = { - .memory.regions = memblock_memory_init_regions, - .memory.cnt = 1, /* empty dummy entry */ - .memory.max = INIT_MEMBLOCK_REGIONS, - - .reserved.regions = memblock_reserved_init_regions, - .reserved.cnt = 1, /* empty dummy entry */ - .reserved.max = INIT_MEMBLOCK_REGIONS, - - .current_limit = MEMBLOCK_ALLOC_ANYWHERE, -}; +struct memblock memblock __initdata_memblock; int memblock_debug __initdata_memblock; -static int memblock_can_resize __initdata_memblock; +int memblock_can_resize __initdata_memblock; +static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock; +static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock; /* inline so we don't get a warning when pr_debug is compiled out */ static inline const char *memblock_type_name(struct memblock_type *type) @@ -49,15 +38,20 @@ static inline const char *memblock_type_name(struct memblock_type *type) return "unknown"; } -/* adjust *@size so that (@base + *@size) doesn't overflow, return new size */ -static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size) -{ - return *size = min(*size, (phys_addr_t)ULLONG_MAX - base); -} - /* * Address comparison utilities */ + +static phys_addr_t __init_memblock memblock_align_down(phys_addr_t addr, phys_addr_t size) +{ + return addr & ~(size - 1); +} + +static phys_addr_t __init_memblock memblock_align_up(phys_addr_t addr, phys_addr_t size) +{ + return (addr + (size - 1)) & ~(size - 1); +} + static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, phys_addr_t base2, phys_addr_t size2) { @@ -79,66 +73,83 @@ static long __init_memblock memblock_overlaps_region(struct memblock_type *type, return (i < type->cnt) ? i : -1; } -/** - * memblock_find_in_range_node - find free area in given range and node - * @start: start of candidate range - * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} - * @size: size of free area to find - * @align: alignment of free area to find - * @nid: nid of the free area to find, %MAX_NUMNODES for any node - * - * Find @size free area aligned to @align in the specified range and node. - * - * RETURNS: - * Found address on success, %0 on failure. +/* + * Find, allocate, deallocate or reserve unreserved regions. All allocations + * are top-down. */ -phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start, - phys_addr_t end, phys_addr_t size, - phys_addr_t align, int nid) + +static phys_addr_t __init_memblock memblock_find_region(phys_addr_t start, phys_addr_t end, + phys_addr_t size, phys_addr_t align) { - phys_addr_t this_start, this_end, cand; - u64 i; + phys_addr_t base, res_base; + long j; + + /* In case, huge size is requested */ + if (end < size) + return MEMBLOCK_ERROR; + + base = memblock_align_down((end - size), align); - /* align @size to avoid excessive fragmentation on reserved array */ - size = round_up(size, align); + /* Prevent allocations returning 0 as it's also used to + * indicate an allocation failure + */ + if (start == 0) + start = PAGE_SIZE; + + while (start <= base) { + j = memblock_overlaps_region(&memblock.reserved, base, size); + if (j < 0) + return base; + res_base = memblock.reserved.regions[j].base; + if (res_base < size) + break; + base = memblock_align_down(res_base - size, align); + } - /* pump up @end */ + return MEMBLOCK_ERROR; +} + +static phys_addr_t __init_memblock memblock_find_base(phys_addr_t size, + phys_addr_t align, phys_addr_t start, phys_addr_t end) +{ + long i; + + BUG_ON(0 == size); + + /* Pump up max_addr */ if (end == MEMBLOCK_ALLOC_ACCESSIBLE) end = memblock.current_limit; - /* adjust @start to avoid underflow and allocating the first page */ - start = max3(start, size, (phys_addr_t)PAGE_SIZE); - end = max(start, end); - - for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) { - this_start = clamp(this_start, start, end); - this_end = clamp(this_end, start, end); + /* We do a top-down search, this tends to limit memory + * fragmentation by keeping early boot allocs near the + * top of memory + */ + for (i = memblock.memory.cnt - 1; i >= 0; i--) { + phys_addr_t memblockbase = memblock.memory.regions[i].base; + phys_addr_t memblocksize = memblock.memory.regions[i].size; + phys_addr_t bottom, top, found; - cand = round_down(this_end - size, align); - if (cand >= this_start) - return cand; + if (memblocksize < size) + continue; + if ((memblockbase + memblocksize) <= start) + break; + bottom = max(memblockbase, start); + top = min(memblockbase + memblocksize, end); + if (bottom >= top) + continue; + found = memblock_find_region(bottom, top, size, align); + if (found != MEMBLOCK_ERROR) + return found; } - return 0; + return MEMBLOCK_ERROR; } -/** - * memblock_find_in_range - find free area in given range - * @start: start of candidate range - * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} - * @size: size of free area to find - * @align: alignment of free area to find - * - * Find @size free area aligned to @align in the specified range. - * - * RETURNS: - * Found address on success, %0 on failure. +/* + * Find a free area with specified alignment in a specific range. */ -phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, - phys_addr_t end, phys_addr_t size, - phys_addr_t align) +u64 __init_memblock memblock_find_in_range(u64 start, u64 end, u64 size, u64 align) { - return memblock_find_in_range_node(start, end, size, align, - MAX_NUMNODES); + return memblock_find_base(size, align, start, end); } /* @@ -167,21 +178,25 @@ int __init_memblock memblock_reserve_reserved_regions(void) static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) { - type->total_size -= type->regions[r].size; - memmove(&type->regions[r], &type->regions[r + 1], - (type->cnt - (r + 1)) * sizeof(type->regions[r])); + unsigned long i; + + for (i = r; i < type->cnt - 1; i++) { + type->regions[i].base = type->regions[i + 1].base; + type->regions[i].size = type->regions[i + 1].size; + } type->cnt--; /* Special case for empty arrays */ if (type->cnt == 0) { - WARN_ON(type->total_size != 0); type->cnt = 1; type->regions[0].base = 0; type->regions[0].size = 0; - memblock_set_region_node(&type->regions[0], MAX_NUMNODES); } } +/* Defined below but needed now */ +static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size); + static int __init_memblock memblock_double_array(struct memblock_type *type) { struct memblock_region *new_array, *old_array; @@ -211,10 +226,10 @@ static int __init_memblock memblock_double_array(struct memblock_type *type) */ if (use_slab) { new_array = kmalloc(new_size, GFP_KERNEL); - addr = new_array ? __pa(new_array) : 0; + addr = new_array == NULL ? MEMBLOCK_ERROR : __pa(new_array); } else - addr = memblock_find_in_range(0, MEMBLOCK_ALLOC_ACCESSIBLE, new_size, sizeof(phys_addr_t)); - if (!addr) { + addr = memblock_find_base(new_size, sizeof(phys_addr_t), 0, MEMBLOCK_ALLOC_ACCESSIBLE); + if (addr == MEMBLOCK_ERROR) { pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", memblock_type_name(type), type->max, type->max * 2); return -1; @@ -239,7 +254,7 @@ static int __init_memblock memblock_double_array(struct memblock_type *type) return 0; /* Add the new reserved region now. Should not fail ! */ - BUG_ON(memblock_reserve(addr, new_size)); + BUG_ON(memblock_add_region(&memblock.reserved, addr, new_size)); /* If the array wasn't our static init one, then free it. We only do * that before SLAB is available as later on, we don't know whether @@ -253,514 +268,343 @@ static int __init_memblock memblock_double_array(struct memblock_type *type) return 0; } -/** - * memblock_merge_regions - merge neighboring compatible regions - * @type: memblock type to scan - * - * Scan @type and merge neighboring compatible regions. - */ -static void __init_memblock memblock_merge_regions(struct memblock_type *type) +int __init_memblock __weak memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1, + phys_addr_t addr2, phys_addr_t size2) { - int i = 0; + return 1; +} - /* cnt never goes below 1 */ - while (i < type->cnt - 1) { - struct memblock_region *this = &type->regions[i]; - struct memblock_region *next = &type->regions[i + 1]; +static long __init_memblock memblock_add_region(struct memblock_type *type, + phys_addr_t base, phys_addr_t size) +{ + phys_addr_t end = base + size; + int i, slot = -1; - if (this->base + this->size != next->base || - memblock_get_region_node(this) != - memblock_get_region_node(next)) { - BUG_ON(this->base + this->size > next->base); - i++; - continue; - } + /* First try and coalesce this MEMBLOCK with others */ + for (i = 0; i < type->cnt; i++) { + struct memblock_region *rgn = &type->regions[i]; + phys_addr_t rend = rgn->base + rgn->size; - this->size += next->size; - memmove(next, next + 1, (type->cnt - (i + 1)) * sizeof(*next)); - type->cnt--; - } -} + /* Exit if there's no possible hits */ + if (rgn->base > end || rgn->size == 0) + break; -/** - * memblock_insert_region - insert new memblock region - * @type: memblock type to insert into - * @idx: index for the insertion point - * @base: base address of the new region - * @size: size of the new region - * - * Insert new memblock region [@base,@base+@size) into @type at @idx. - * @type must already have extra room to accomodate the new region. - */ -static void __init_memblock memblock_insert_region(struct memblock_type *type, - int idx, phys_addr_t base, - phys_addr_t size, int nid) -{ - struct memblock_region *rgn = &type->regions[idx]; + /* Check if we are fully enclosed within an existing + * block + */ + if (rgn->base <= base && rend >= end) + return 0; - BUG_ON(type->cnt >= type->max); - memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn)); - rgn->base = base; - rgn->size = size; - memblock_set_region_node(rgn, nid); - type->cnt++; - type->total_size += size; -} + /* Check if we overlap or are adjacent with the bottom + * of a block. + */ + if (base < rgn->base && end >= rgn->base) { + /* If we can't coalesce, create a new block */ + if (!memblock_memory_can_coalesce(base, size, + rgn->base, + rgn->size)) { + /* Overlap & can't coalesce are mutually + * exclusive, if you do that, be prepared + * for trouble + */ + WARN_ON(end != rgn->base); + goto new_block; + } + /* We extend the bottom of the block down to our + * base + */ + rgn->base = base; + rgn->size = rend - base; -/** - * memblock_add_region - add new memblock region - * @type: memblock type to add new region into - * @base: base address of the new region - * @size: size of the new region - * @nid: nid of the new region - * - * Add new memblock region [@base,@base+@size) into @type. The new region - * is allowed to overlap with existing ones - overlaps don't affect already - * existing regions. @type is guaranteed to be minimal (all neighbouring - * compatible regions are merged) after the addition. - * - * RETURNS: - * 0 on success, -errno on failure. - */ -static int __init_memblock memblock_add_region(struct memblock_type *type, - phys_addr_t base, phys_addr_t size, int nid) -{ - bool insert = false; - phys_addr_t obase = base; - phys_addr_t end = base + memblock_cap_size(base, &size); - int i, nr_new; + /* Return if we have nothing else to allocate + * (fully coalesced) + */ + if (rend >= end) + return 0; + + /* We continue processing from the end of the + * coalesced block. + */ + base = rend; + size = end - base; + } + + /* Now check if we overlap or are adjacent with the + * top of a block + */ + if (base <= rend && end >= rend) { + /* If we can't coalesce, create a new block */ + if (!memblock_memory_can_coalesce(rgn->base, + rgn->size, + base, size)) { + /* Overlap & can't coalesce are mutually + * exclusive, if you do that, be prepared + * for trouble + */ + WARN_ON(rend != base); + goto new_block; + } + /* We adjust our base down to enclose the + * original block and destroy it. It will be + * part of our new allocation. Since we've + * freed an entry, we know we won't fail + * to allocate one later, so we won't risk + * losing the original block allocation. + */ + size += (base - rgn->base); + base = rgn->base; + memblock_remove_region(type, i--); + } + } - /* special case for empty array */ - if (type->regions[0].size == 0) { - WARN_ON(type->cnt != 1 || type->total_size); + /* If the array is empty, special case, replace the fake + * filler region and return + */ + if ((type->cnt == 1) && (type->regions[0].size == 0)) { type->regions[0].base = base; type->regions[0].size = size; - memblock_set_region_node(&type->regions[0], nid); - type->total_size = size; return 0; } -repeat: - /* - * The following is executed twice. Once with %false @insert and - * then with %true. The first counts the number of regions needed - * to accomodate the new area. The second actually inserts them. - */ - base = obase; - nr_new = 0; - for (i = 0; i < type->cnt; i++) { - struct memblock_region *rgn = &type->regions[i]; - phys_addr_t rbase = rgn->base; - phys_addr_t rend = rbase + rgn->size; + new_block: + /* If we are out of space, we fail. It's too late to resize the array + * but then this shouldn't have happened in the first place. + */ + if (WARN_ON(type->cnt >= type->max)) + return -1; - if (rbase >= end) + /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */ + for (i = type->cnt - 1; i >= 0; i--) { + if (base < type->regions[i].base) { + type->regions[i+1].base = type->regions[i].base; + type->regions[i+1].size = type->regions[i].size; + } else { + type->regions[i+1].base = base; + type->regions[i+1].size = size; + slot = i + 1; break; - if (rend <= base) - continue; - /* - * @rgn overlaps. If it separates the lower part of new - * area, insert that portion. - */ - if (rbase > base) { - nr_new++; - if (insert) - memblock_insert_region(type, i++, base, - rbase - base, nid); } - /* area below @rend is dealt with, forget about it */ - base = min(rend, end); } - - /* insert the remaining portion */ - if (base < end) { - nr_new++; - if (insert) - memblock_insert_region(type, i, base, end - base, nid); + if (base < type->regions[0].base) { + type->regions[0].base = base; + type->regions[0].size = size; + slot = 0; } + type->cnt++; - /* - * If this was the first round, resize array and repeat for actual - * insertions; otherwise, merge and return. + /* The array is full ? Try to resize it. If that fails, we undo + * our allocation and return an error */ - if (!insert) { - while (type->cnt + nr_new > type->max) - if (memblock_double_array(type) < 0) - return -ENOMEM; - insert = true; - goto repeat; - } else { - memblock_merge_regions(type); - return 0; + if (type->cnt == type->max && memblock_double_array(type)) { + BUG_ON(slot < 0); + memblock_remove_region(type, slot); + return -1; } -} -int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size, - int nid) -{ - return memblock_add_region(&memblock.memory, base, size, nid); + return 0; } -int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) +long __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) { - return memblock_add_region(&memblock.memory, base, size, MAX_NUMNODES); + return memblock_add_region(&memblock.memory, base, size); + } -/** - * memblock_isolate_range - isolate given range into disjoint memblocks - * @type: memblock type to isolate range for - * @base: base of range to isolate - * @size: size of range to isolate - * @start_rgn: out parameter for the start of isolated region - * @end_rgn: out parameter for the end of isolated region - * - * Walk @type and ensure that regions don't cross the boundaries defined by - * [@base,@base+@size). Crossing regions are split at the boundaries, - * which may create at most two more regions. The index of the first - * region inside the range is returned in *@start_rgn and end in *@end_rgn. - * - * RETURNS: - * 0 on success, -errno on failure. - */ -static int __init_memblock memblock_isolate_range(struct memblock_type *type, - phys_addr_t base, phys_addr_t size, - int *start_rgn, int *end_rgn) +static long __init_memblock __memblock_remove(struct memblock_type *type, + phys_addr_t base, phys_addr_t size) { - phys_addr_t end = base + memblock_cap_size(base, &size); + phys_addr_t end = base + size; int i; - *start_rgn = *end_rgn = 0; - - /* we'll create at most two more regions */ - while (type->cnt + 2 > type->max) - if (memblock_double_array(type) < 0) - return -ENOMEM; - + /* Walk through the array for collisions */ for (i = 0; i < type->cnt; i++) { struct memblock_region *rgn = &type->regions[i]; - phys_addr_t rbase = rgn->base; - phys_addr_t rend = rbase + rgn->size; + phys_addr_t rend = rgn->base + rgn->size; - if (rbase >= end) + /* Nothing more to do, exit */ + if (rgn->base > end || rgn->size == 0) break; - if (rend <= base) + + /* If we fully enclose the block, drop it */ + if (base <= rgn->base && end >= rend) { + memblock_remove_region(type, i--); continue; + } - if (rbase < base) { - /* - * @rgn intersects from below. Split and continue - * to process the next region - the new top half. - */ - rgn->base = base; - rgn->size -= base - rbase; - type->total_size -= base - rbase; - memblock_insert_region(type, i, rbase, base - rbase, - memblock_get_region_node(rgn)); - } else if (rend > end) { - /* - * @rgn intersects from above. Split and redo the - * current region - the new bottom half. + /* If we are fully enclosed within a block + * then we need to split it and we are done + */ + if (base > rgn->base && end < rend) { + rgn->size = base - rgn->base; + if (!memblock_add_region(type, end, rend - end)) + return 0; + /* Failure to split is bad, we at least + * restore the block before erroring */ - rgn->base = end; - rgn->size -= end - rbase; - type->total_size -= end - rbase; - memblock_insert_region(type, i--, rbase, end - rbase, - memblock_get_region_node(rgn)); - } else { - /* @rgn is fully contained, record it */ - if (!*end_rgn) - *start_rgn = i; - *end_rgn = i + 1; + rgn->size = rend - rgn->base; + WARN_ON(1); + return -1; } - } - return 0; -} - -static int __init_memblock __memblock_remove(struct memblock_type *type, - phys_addr_t base, phys_addr_t size) -{ - int start_rgn, end_rgn; - int i, ret; + /* Check if we need to trim the bottom of a block */ + if (rgn->base < end && rend > end) { + rgn->size -= end - rgn->base; + rgn->base = end; + break; + } - ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); - if (ret) - return ret; + /* And check if we need to trim the top of a block */ + if (base < rend) + rgn->size -= rend - base; - for (i = end_rgn - 1; i >= start_rgn; i--) - memblock_remove_region(type, i); + } return 0; } -int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) +long __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) { return __memblock_remove(&memblock.memory, base, size); } -int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) +long __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) { - memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n", - (unsigned long long)base, - (unsigned long long)base + size, - (void *)_RET_IP_); - return __memblock_remove(&memblock.reserved, base, size); } -int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) +long __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) { struct memblock_type *_rgn = &memblock.reserved; - memblock_dbg("memblock_reserve: [%#016llx-%#016llx] %pF\n", - (unsigned long long)base, - (unsigned long long)base + size, - (void *)_RET_IP_); BUG_ON(0 == size); - return memblock_add_region(_rgn, base, size, MAX_NUMNODES); + return memblock_add_region(_rgn, base, size); } -/** - * __next_free_mem_range - next function for for_each_free_mem_range() - * @idx: pointer to u64 loop variable - * @nid: nid: node selector, %MAX_NUMNODES for all nodes - * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL - * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL - * @p_nid: ptr to int for nid of the range, can be %NULL - * - * Find the first free area from *@idx which matches @nid, fill the out - * parameters, and update *@idx for the next iteration. The lower 32bit of - * *@idx contains index into memory region and the upper 32bit indexes the - * areas before each reserved region. For example, if reserved regions - * look like the following, - * - * 0:[0-16), 1:[32-48), 2:[128-130) - * - * The upper 32bit indexes the following regions. - * - * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX) - * - * As both region arrays are sorted, the function advances the two indices - * in lockstep and returns each intersection. - */ -void __init_memblock __next_free_mem_range(u64 *idx, int nid, - phys_addr_t *out_start, - phys_addr_t *out_end, int *out_nid) +phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) { - struct memblock_type *mem = &memblock.memory; - struct memblock_type *rsv = &memblock.reserved; - int mi = *idx & 0xffffffff; - int ri = *idx >> 32; - - for ( ; mi < mem->cnt; mi++) { - struct memblock_region *m = &mem->regions[mi]; - phys_addr_t m_start = m->base; - phys_addr_t m_end = m->base + m->size; + phys_addr_t found; - /* only memory regions are associated with nodes, check it */ - if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m)) - continue; + /* We align the size to limit fragmentation. Without this, a lot of + * small allocs quickly eat up the whole reserve array on sparc + */ + size = memblock_align_up(size, align); - /* scan areas before each reservation for intersection */ - for ( ; ri < rsv->cnt + 1; ri++) { - struct memblock_region *r = &rsv->regions[ri]; - phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0; - phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX; - - /* if ri advanced past mi, break out to advance mi */ - if (r_start >= m_end) - break; - /* if the two regions intersect, we're done */ - if (m_start < r_end) { - if (out_start) - *out_start = max(m_start, r_start); - if (out_end) - *out_end = min(m_end, r_end); - if (out_nid) - *out_nid = memblock_get_region_node(m); - /* - * The region which ends first is advanced - * for the next iteration. - */ - if (m_end <= r_end) - mi++; - else - ri++; - *idx = (u32)mi | (u64)ri << 32; - return; - } - } - } + found = memblock_find_base(size, align, 0, max_addr); + if (found != MEMBLOCK_ERROR && + !memblock_add_region(&memblock.reserved, found, size)) + return found; - /* signal end of iteration */ - *idx = ULLONG_MAX; + return 0; } -/** - * __next_free_mem_range_rev - next function for for_each_free_mem_range_reverse() - * @idx: pointer to u64 loop variable - * @nid: nid: node selector, %MAX_NUMNODES for all nodes - * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL - * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL - * @p_nid: ptr to int for nid of the range, can be %NULL - * - * Reverse of __next_free_mem_range(). - */ -void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid, - phys_addr_t *out_start, - phys_addr_t *out_end, int *out_nid) +phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) { - struct memblock_type *mem = &memblock.memory; - struct memblock_type *rsv = &memblock.reserved; - int mi = *idx & 0xffffffff; - int ri = *idx >> 32; - - if (*idx == (u64)ULLONG_MAX) { - mi = mem->cnt - 1; - ri = rsv->cnt; - } + phys_addr_t alloc; - for ( ; mi >= 0; mi--) { - struct memblock_region *m = &mem->regions[mi]; - phys_addr_t m_start = m->base; - phys_addr_t m_end = m->base + m->size; + alloc = __memblock_alloc_base(size, align, max_addr); - /* only memory regions are associated with nodes, check it */ - if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m)) - continue; + if (alloc == 0) + panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", + (unsigned long long) size, (unsigned long long) max_addr); - /* scan areas before each reservation for intersection */ - for ( ; ri >= 0; ri--) { - struct memblock_region *r = &rsv->regions[ri]; - phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0; - phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX; - - /* if ri advanced past mi, break out to advance mi */ - if (r_end <= m_start) - break; - /* if the two regions intersect, we're done */ - if (m_end > r_start) { - if (out_start) - *out_start = max(m_start, r_start); - if (out_end) - *out_end = min(m_end, r_end); - if (out_nid) - *out_nid = memblock_get_region_node(m); - - if (m_start >= r_start) - mi--; - else - ri--; - *idx = (u32)mi | (u64)ri << 32; - return; - } - } - } + return alloc; +} - *idx = ULLONG_MAX; +phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align) +{ + return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); } -#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP + /* - * Common iterator interface used to define for_each_mem_range(). + * Additional node-local allocators. Search for node memory is bottom up + * and walks memblock regions within that node bottom-up as well, but allocation + * within an memblock region is top-down. XXX I plan to fix that at some stage + * + * WARNING: Only available after early_node_map[] has been populated, + * on some architectures, that is after all the calls to add_active_range() + * have been done to populate it. */ -void __init_memblock __next_mem_pfn_range(int *idx, int nid, - unsigned long *out_start_pfn, - unsigned long *out_end_pfn, int *out_nid) -{ - struct memblock_type *type = &memblock.memory; - struct memblock_region *r; - while (++*idx < type->cnt) { - r = &type->regions[*idx]; +phys_addr_t __weak __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid) +{ +#ifdef CONFIG_ARCH_POPULATES_NODE_MAP + /* + * This code originates from sparc which really wants use to walk by addresses + * and returns the nid. This is not very convenient for early_pfn_map[] users + * as the map isn't sorted yet, and it really wants to be walked by nid. + * + * For now, I implement the inefficient method below which walks the early + * map multiple times. Eventually we may want to use an ARCH config option + * to implement a completely different method for both case. + */ + unsigned long start_pfn, end_pfn; + int i; - if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size)) + for (i = 0; i < MAX_NUMNODES; i++) { + get_pfn_range_for_nid(i, &start_pfn, &end_pfn); + if (start < PFN_PHYS(start_pfn) || start >= PFN_PHYS(end_pfn)) continue; - if (nid == MAX_NUMNODES || nid == r->nid) - break; - } - if (*idx >= type->cnt) { - *idx = -1; - return; + *nid = i; + return min(end, PFN_PHYS(end_pfn)); } +#endif + *nid = 0; - if (out_start_pfn) - *out_start_pfn = PFN_UP(r->base); - if (out_end_pfn) - *out_end_pfn = PFN_DOWN(r->base + r->size); - if (out_nid) - *out_nid = r->nid; + return end; } -/** - * memblock_set_node - set node ID on memblock regions - * @base: base of area to set node ID for - * @size: size of area to set node ID for - * @nid: node ID to set - * - * Set the nid of memblock memory regions in [@base,@base+@size) to @nid. - * Regions which cross the area boundaries are split as necessary. - * - * RETURNS: - * 0 on success, -errno on failure. - */ -int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size, - int nid) +static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp, + phys_addr_t size, + phys_addr_t align, int nid) { - struct memblock_type *type = &memblock.memory; - int start_rgn, end_rgn; - int i, ret; + phys_addr_t start, end; - ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); - if (ret) - return ret; + start = mp->base; + end = start + mp->size; - for (i = start_rgn; i < end_rgn; i++) - type->regions[i].nid = nid; + start = memblock_align_up(start, align); + while (start < end) { + phys_addr_t this_end; + int this_nid; - memblock_merge_regions(type); - return 0; -} -#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ - -static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size, - phys_addr_t align, phys_addr_t max_addr, - int nid) -{ - phys_addr_t found; - - found = memblock_find_in_range_node(0, max_addr, size, align, nid); - if (found && !memblock_reserve(found, size)) - return found; + this_end = memblock_nid_range(start, end, &this_nid); + if (this_nid == nid) { + phys_addr_t ret = memblock_find_region(start, this_end, size, align); + if (ret != MEMBLOCK_ERROR && + !memblock_add_region(&memblock.reserved, ret, size)) + return ret; + } + start = this_end; + } - return 0; + return MEMBLOCK_ERROR; } phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid) { - return memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE, nid); -} - -phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) -{ - return memblock_alloc_base_nid(size, align, max_addr, MAX_NUMNODES); -} - -phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) -{ - phys_addr_t alloc; + struct memblock_type *mem = &memblock.memory; + int i; - alloc = __memblock_alloc_base(size, align, max_addr); + BUG_ON(0 == size); - if (alloc == 0) - panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", - (unsigned long long) size, (unsigned long long) max_addr); + /* We align the size to limit fragmentation. Without this, a lot of + * small allocs quickly eat up the whole reserve array on sparc + */ + size = memblock_align_up(size, align); - return alloc; -} + /* We do a bottom-up search for a region with the right + * nid since that's easier considering how memblock_nid_range() + * works + */ + for (i = 0; i < mem->cnt; i++) { + phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i], + size, align, nid); + if (ret != MEMBLOCK_ERROR) + return ret; + } -phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align) -{ - return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); + return 0; } phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) @@ -769,7 +613,7 @@ phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, i if (res) return res; - return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); + return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE); } @@ -777,9 +621,10 @@ phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, i * Remaining API functions */ +/* You must call memblock_analyze() before this. */ phys_addr_t __init memblock_phys_mem_size(void) { - return memblock.memory.total_size; + return memblock.memory_size; } /* lowest address */ @@ -795,28 +640,45 @@ phys_addr_t __init_memblock memblock_end_of_DRAM(void) return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); } -void __init memblock_enforce_memory_limit(phys_addr_t limit) +/* You must call memblock_analyze() after this. */ +void __init memblock_enforce_memory_limit(phys_addr_t memory_limit) { unsigned long i; - phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX; + phys_addr_t limit; + struct memblock_region *p; - if (!limit) + if (!memory_limit) return; - /* find out max address */ + /* Truncate the memblock regions to satisfy the memory limit. */ + limit = memory_limit; for (i = 0; i < memblock.memory.cnt; i++) { - struct memblock_region *r = &memblock.memory.regions[i]; - - if (limit <= r->size) { - max_addr = r->base + limit; - break; + if (limit > memblock.memory.regions[i].size) { + limit -= memblock.memory.regions[i].size; + continue; } - limit -= r->size; + + memblock.memory.regions[i].size = limit; + memblock.memory.cnt = i + 1; + break; } - /* truncate both memory and reserved regions */ - __memblock_remove(&memblock.memory, max_addr, (phys_addr_t)ULLONG_MAX); - __memblock_remove(&memblock.reserved, max_addr, (phys_addr_t)ULLONG_MAX); + memory_limit = memblock_end_of_DRAM(); + + /* And truncate any reserves above the limit also. */ + for (i = 0; i < memblock.reserved.cnt; i++) { + p = &memblock.reserved.regions[i]; + + if (p->base > memory_limit) + p->size = 0; + else if ((p->base + p->size) > memory_limit) + p->size = memory_limit - p->base; + + if (p->size == 0) { + memblock_remove_region(&memblock.reserved, i); + i--; + } + } } static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr) @@ -850,18 +712,16 @@ int __init_memblock memblock_is_memory(phys_addr_t addr) int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) { int idx = memblock_search(&memblock.memory, base); - phys_addr_t end = base + memblock_cap_size(base, &size); if (idx == -1) return 0; return memblock.memory.regions[idx].base <= base && (memblock.memory.regions[idx].base + - memblock.memory.regions[idx].size) >= end; + memblock.memory.regions[idx].size) >= (base + size); } int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) { - memblock_cap_size(base, &size); return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; } @@ -871,45 +731,86 @@ void __init_memblock memblock_set_current_limit(phys_addr_t limit) memblock.current_limit = limit; } -static void __init_memblock memblock_dump(struct memblock_type *type, char *name) +static void __init_memblock memblock_dump(struct memblock_type *region, char *name) { unsigned long long base, size; int i; - pr_info(" %s.cnt = 0x%lx\n", name, type->cnt); + pr_info(" %s.cnt = 0x%lx\n", name, region->cnt); - for (i = 0; i < type->cnt; i++) { - struct memblock_region *rgn = &type->regions[i]; - char nid_buf[32] = ""; - - base = rgn->base; - size = rgn->size; -#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP - if (memblock_get_region_node(rgn) != MAX_NUMNODES) - snprintf(nid_buf, sizeof(nid_buf), " on node %d", - memblock_get_region_node(rgn)); -#endif - pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s\n", - name, i, base, base + size - 1, size, nid_buf); + for (i = 0; i < region->cnt; i++) { + base = region->regions[i].base; + size = region->regions[i].size; + + pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes\n", + name, i, base, base + size - 1, size); } } -void __init_memblock __memblock_dump_all(void) +void __init_memblock memblock_dump_all(void) { + if (!memblock_debug) + return; + pr_info("MEMBLOCK configuration:\n"); - pr_info(" memory size = %#llx reserved size = %#llx\n", - (unsigned long long)memblock.memory.total_size, - (unsigned long long)memblock.reserved.total_size); + pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock.memory_size); memblock_dump(&memblock.memory, "memory"); memblock_dump(&memblock.reserved, "reserved"); } -void __init memblock_allow_resize(void) +void __init memblock_analyze(void) { + int i; + + /* Check marker in the unused last array entry */ + WARN_ON(memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS].base + != MEMBLOCK_INACTIVE); + WARN_ON(memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS].base + != MEMBLOCK_INACTIVE); + + memblock.memory_size = 0; + + for (i = 0; i < memblock.memory.cnt; i++) + memblock.memory_size += memblock.memory.regions[i].size; + + /* We allow resizing from there */ memblock_can_resize = 1; } +void __init memblock_init(void) +{ + static int init_done __initdata = 0; + + if (init_done) + return; + init_done = 1; + + /* Hookup the initial arrays */ + memblock.memory.regions = memblock_memory_init_regions; + memblock.memory.max = INIT_MEMBLOCK_REGIONS; + memblock.reserved.regions = memblock_reserved_init_regions; + memblock.reserved.max = INIT_MEMBLOCK_REGIONS; + + /* Write a marker in the unused last array entry */ + memblock.memory.regions[INIT_MEMBLOCK_REGIONS].base = MEMBLOCK_INACTIVE; + memblock.reserved.regions[INIT_MEMBLOCK_REGIONS].base = MEMBLOCK_INACTIVE; + + /* Create a dummy zero size MEMBLOCK which will get coalesced away later. + * This simplifies the memblock_add() code below... + */ + memblock.memory.regions[0].base = 0; + memblock.memory.regions[0].size = 0; + memblock.memory.cnt = 1; + + /* Ditto. */ + memblock.reserved.regions[0].base = 0; + memblock.reserved.regions[0].size = 0; + memblock.reserved.cnt = 1; + + memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE; +} + static int __init early_memblock(char *p) { if (p && strstr(p, "debug")) @@ -918,7 +819,7 @@ static int __init early_memblock(char *p) } early_param("memblock", early_memblock); -#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK) +#if defined(CONFIG_DEBUG_FS) && !defined(ARCH_DISCARD_MEMBLOCK) static int memblock_debug_show(struct seq_file *m, void *private) { diff --git a/trunk/mm/memcontrol.c b/trunk/mm/memcontrol.c index 94da8ee9e2c2..b63f5f7dfa07 100644 --- a/trunk/mm/memcontrol.c +++ b/trunk/mm/memcontrol.c @@ -50,8 +50,6 @@ #include #include #include "internal.h" -#include -#include #include @@ -288,10 +286,6 @@ struct mem_cgroup { */ struct mem_cgroup_stat_cpu nocpu_base; spinlock_t pcp_counter_lock; - -#ifdef CONFIG_INET - struct tcp_memcontrol tcp_mem; -#endif }; /* Stuffs for move charges at task migration. */ @@ -371,58 +365,7 @@ enum charge_type { static void mem_cgroup_get(struct mem_cgroup *memcg); static void mem_cgroup_put(struct mem_cgroup *memcg); - -/* Writing them here to avoid exposing memcg's inner layout */ -#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM -#ifdef CONFIG_INET -#include -#include - -static bool mem_cgroup_is_root(struct mem_cgroup *memcg); -void sock_update_memcg(struct sock *sk) -{ - /* A socket spends its whole life in the same cgroup */ - if (sk->sk_cgrp) { - WARN_ON(1); - return; - } - if (static_branch(&memcg_socket_limit_enabled)) { - struct mem_cgroup *memcg; - - BUG_ON(!sk->sk_prot->proto_cgroup); - - rcu_read_lock(); - memcg = mem_cgroup_from_task(current); - if (!mem_cgroup_is_root(memcg)) { - mem_cgroup_get(memcg); - sk->sk_cgrp = sk->sk_prot->proto_cgroup(memcg); - } - rcu_read_unlock(); - } -} -EXPORT_SYMBOL(sock_update_memcg); - -void sock_release_memcg(struct sock *sk) -{ - if (static_branch(&memcg_socket_limit_enabled) && sk->sk_cgrp) { - struct mem_cgroup *memcg; - WARN_ON(!sk->sk_cgrp->memcg); - memcg = sk->sk_cgrp->memcg; - mem_cgroup_put(memcg); - } -} - -struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg) -{ - if (!memcg || mem_cgroup_is_root(memcg)) - return NULL; - - return &memcg->tcp_mem.cg_proto; -} -EXPORT_SYMBOL(tcp_proto_cgroup); -#endif /* CONFIG_INET */ -#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */ - +static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); static void drain_all_stock_async(struct mem_cgroup *memcg); static struct mem_cgroup_per_zone * @@ -802,7 +745,7 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) preempt_enable(); } -struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) +static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) { return container_of(cgroup_subsys_state(cont, mem_cgroup_subsys_id), struct mem_cgroup, @@ -4669,36 +4612,6 @@ static int mem_control_numa_stat_open(struct inode *unused, struct file *file) } #endif /* CONFIG_NUMA */ -#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM -static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss) -{ - /* - * Part of this would be better living in a separate allocation - * function, leaving us with just the cgroup tree population work. - * We, however, depend on state such as network's proto_list that - * is only initialized after cgroup creation. I found the less - * cumbersome way to deal with it to defer it all to populate time - */ - return mem_cgroup_sockets_init(cont, ss); -}; - -static void kmem_cgroup_destroy(struct cgroup_subsys *ss, - struct cgroup *cont) -{ - mem_cgroup_sockets_destroy(cont, ss); -} -#else -static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss) -{ - return 0; -} - -static void kmem_cgroup_destroy(struct cgroup_subsys *ss, - struct cgroup *cont) -{ -} -#endif - static struct cftype mem_cgroup_files[] = { { .name = "usage_in_bytes", @@ -4930,13 +4843,12 @@ static void mem_cgroup_put(struct mem_cgroup *memcg) /* * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled. */ -struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) +static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) { if (!memcg->res.parent) return NULL; return mem_cgroup_from_res_counter(memcg->res.parent, res); } -EXPORT_SYMBOL(parent_mem_cgroup); #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP static void __init enable_swap_cgroup(void) @@ -5052,8 +4964,6 @@ static void mem_cgroup_destroy(struct cgroup_subsys *ss, { struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); - kmem_cgroup_destroy(ss, cont); - mem_cgroup_put(memcg); } @@ -5067,10 +4977,6 @@ static int mem_cgroup_populate(struct cgroup_subsys *ss, if (!ret) ret = register_memsw_files(cont, ss); - - if (!ret) - ret = register_kmem_files(cont, ss); - return ret; } diff --git a/trunk/mm/mempolicy.c b/trunk/mm/mempolicy.c index c3fdbcb17658..adc395481813 100644 --- a/trunk/mm/mempolicy.c +++ b/trunk/mm/mempolicy.c @@ -636,7 +636,6 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, struct vm_area_struct *prev; struct vm_area_struct *vma; int err = 0; - pgoff_t pgoff; unsigned long vmstart; unsigned long vmend; @@ -644,21 +643,13 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, if (!vma || vma->vm_start > start) return -EFAULT; - if (start > vma->vm_start) - prev = vma; - for (; vma && vma->vm_start < end; prev = vma, vma = next) { next = vma->vm_next; vmstart = max(start, vma->vm_start); vmend = min(end, vma->vm_end); - if (mpol_equal(vma_policy(vma), new_pol)) - continue; - - pgoff = vma->vm_pgoff + - ((vmstart - vma->vm_start) >> PAGE_SHIFT); prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, - vma->anon_vma, vma->vm_file, pgoff, + vma->anon_vma, vma->vm_file, vma->vm_pgoff, new_pol); if (prev) { vma = prev; diff --git a/trunk/mm/nobootmem.c b/trunk/mm/nobootmem.c index 24f0fc1a56d6..7fa41b4a07bf 100644 --- a/trunk/mm/nobootmem.c +++ b/trunk/mm/nobootmem.c @@ -41,13 +41,14 @@ static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align, if (limit > memblock.current_limit) limit = memblock.current_limit; - addr = memblock_find_in_range_node(goal, limit, size, align, nid); - if (!addr) + addr = find_memory_core_early(nid, size, align, goal, limit); + + if (addr == MEMBLOCK_ERROR) return NULL; ptr = phys_to_virt(addr); memset(ptr, 0, size); - memblock_reserve(addr, size); + memblock_x86_reserve_range(addr, addr + size, "BOOTMEM"); /* * The min_count is set to 0 so that bootmem allocated blocks * are never reported as leaks. @@ -106,27 +107,23 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end) __free_pages_bootmem(pfn_to_page(i), 0); } -unsigned long __init free_low_memory_core_early(int nodeid) +unsigned long __init free_all_memory_core_early(int nodeid) { + int i; + u64 start, end; unsigned long count = 0; - phys_addr_t start, end; - u64 i; - - /* free reserved array temporarily so that it's treated as free area */ - memblock_free_reserved_regions(); - - for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL) { - unsigned long start_pfn = PFN_UP(start); - unsigned long end_pfn = min_t(unsigned long, - PFN_DOWN(end), max_low_pfn); - if (start_pfn < end_pfn) { - __free_pages_memory(start_pfn, end_pfn); - count += end_pfn - start_pfn; - } + struct range *range = NULL; + int nr_range; + + nr_range = get_free_all_memory_range(&range, nodeid); + + for (i = 0; i < nr_range; i++) { + start = range[i].start; + end = range[i].end; + count += end - start; + __free_pages_memory(start, end); } - /* put region array back? */ - memblock_reserve_reserved_regions(); return count; } @@ -140,7 +137,7 @@ unsigned long __init free_all_bootmem_node(pg_data_t *pgdat) { register_page_bootmem_info_node(pgdat); - /* free_low_memory_core_early(MAX_NUMNODES) will be called later */ + /* free_all_memory_core_early(MAX_NUMNODES) will be called later */ return 0; } @@ -158,7 +155,7 @@ unsigned long __init free_all_bootmem(void) * Use MAX_NUMNODES will make sure all ranges in early_node_map[] * will be used instead of only Node0 related */ - return free_low_memory_core_early(MAX_NUMNODES); + return free_all_memory_core_early(MAX_NUMNODES); } /** @@ -175,7 +172,7 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, unsigned long size) { kmemleak_free_part(__va(physaddr), size); - memblock_free(physaddr, size); + memblock_x86_free_range(physaddr, physaddr + size); } /** @@ -190,7 +187,7 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, void __init free_bootmem(unsigned long addr, unsigned long size) { kmemleak_free_part(__va(addr), size); - memblock_free(addr, size); + memblock_x86_free_range(addr, addr + size); } static void * __init ___alloc_bootmem_nopanic(unsigned long size, diff --git a/trunk/mm/page_alloc.c b/trunk/mm/page_alloc.c index bdc804c2d99c..2b8ba3aebf6e 100644 --- a/trunk/mm/page_alloc.c +++ b/trunk/mm/page_alloc.c @@ -181,17 +181,39 @@ static unsigned long __meminitdata nr_kernel_pages; static unsigned long __meminitdata nr_all_pages; static unsigned long __meminitdata dma_reserve; -#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP -static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; -static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; -static unsigned long __initdata required_kernelcore; -static unsigned long __initdata required_movablecore; -static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES]; - -/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ -int movable_zone; -EXPORT_SYMBOL(movable_zone); -#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ +#ifdef CONFIG_ARCH_POPULATES_NODE_MAP + /* + * MAX_ACTIVE_REGIONS determines the maximum number of distinct + * ranges of memory (RAM) that may be registered with add_active_range(). + * Ranges passed to add_active_range() will be merged if possible + * so the number of times add_active_range() can be called is + * related to the number of nodes and the number of holes + */ + #ifdef CONFIG_MAX_ACTIVE_REGIONS + /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */ + #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS + #else + #if MAX_NUMNODES >= 32 + /* If there can be many nodes, allow up to 50 holes per node */ + #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50) + #else + /* By default, allow up to 256 distinct regions */ + #define MAX_ACTIVE_REGIONS 256 + #endif + #endif + + static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS]; + static int __meminitdata nr_nodemap_entries; + static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; + static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; + static unsigned long __initdata required_kernelcore; + static unsigned long __initdata required_movablecore; + static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES]; + + /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ + int movable_zone; + EXPORT_SYMBOL(movable_zone); +#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ #if MAX_NUMNODES > 1 int nr_node_ids __read_mostly = MAX_NUMNODES; @@ -684,10 +706,10 @@ void __meminit __free_pages_bootmem(struct page *page, unsigned int order) int loop; prefetchw(page); - for (loop = 0; loop < (1 << order); loop++) { + for (loop = 0; loop < BITS_PER_LONG; loop++) { struct page *p = &page[loop]; - if (loop + 1 < (1 << order)) + if (loop + 1 < BITS_PER_LONG) prefetchw(p + 1); __ClearPageReserved(p); set_page_count(p, 0); @@ -3715,7 +3737,35 @@ __meminit int init_currently_empty_zone(struct zone *zone, return 0; } -#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP +#ifdef CONFIG_ARCH_POPULATES_NODE_MAP +/* + * Basic iterator support. Return the first range of PFNs for a node + * Note: nid == MAX_NUMNODES returns first region regardless of node + */ +static int __meminit first_active_region_index_in_nid(int nid) +{ + int i; + + for (i = 0; i < nr_nodemap_entries; i++) + if (nid == MAX_NUMNODES || early_node_map[i].nid == nid) + return i; + + return -1; +} + +/* + * Basic iterator support. Return the next active range of PFNs for a node + * Note: nid == MAX_NUMNODES returns next region regardless of node + */ +static int __meminit next_active_region_index_in_nid(int index, int nid) +{ + for (index = index + 1; index < nr_nodemap_entries; index++) + if (nid == MAX_NUMNODES || early_node_map[index].nid == nid) + return index; + + return -1; +} + #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID /* * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. @@ -3725,12 +3775,15 @@ __meminit int init_currently_empty_zone(struct zone *zone, */ int __meminit __early_pfn_to_nid(unsigned long pfn) { - unsigned long start_pfn, end_pfn; - int i, nid; + int i; + + for (i = 0; i < nr_nodemap_entries; i++) { + unsigned long start_pfn = early_node_map[i].start_pfn; + unsigned long end_pfn = early_node_map[i].end_pfn; - for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) if (start_pfn <= pfn && pfn < end_pfn) - return nid; + return early_node_map[i].nid; + } /* This is a memory hole */ return -1; } @@ -3759,6 +3812,11 @@ bool __meminit early_pfn_in_nid(unsigned long pfn, int node) } #endif +/* Basic iterator support to walk early_node_map[] */ +#define for_each_active_range_index_in_nid(i, nid) \ + for (i = first_active_region_index_in_nid(nid); i != -1; \ + i = next_active_region_index_in_nid(i, nid)) + /** * free_bootmem_with_active_regions - Call free_bootmem_node for each active range * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed. @@ -3768,34 +3826,122 @@ bool __meminit early_pfn_in_nid(unsigned long pfn, int node) * add_active_ranges() contain no holes and may be freed, this * this function may be used instead of calling free_bootmem() manually. */ -void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn) +void __init free_bootmem_with_active_regions(int nid, + unsigned long max_low_pfn) { - unsigned long start_pfn, end_pfn; - int i, this_nid; + int i; + + for_each_active_range_index_in_nid(i, nid) { + unsigned long size_pages = 0; + unsigned long end_pfn = early_node_map[i].end_pfn; + + if (early_node_map[i].start_pfn >= max_low_pfn) + continue; - for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) { - start_pfn = min(start_pfn, max_low_pfn); - end_pfn = min(end_pfn, max_low_pfn); + if (end_pfn > max_low_pfn) + end_pfn = max_low_pfn; - if (start_pfn < end_pfn) - free_bootmem_node(NODE_DATA(this_nid), - PFN_PHYS(start_pfn), - (end_pfn - start_pfn) << PAGE_SHIFT); + size_pages = end_pfn - early_node_map[i].start_pfn; + free_bootmem_node(NODE_DATA(early_node_map[i].nid), + PFN_PHYS(early_node_map[i].start_pfn), + size_pages << PAGE_SHIFT); } } +#ifdef CONFIG_HAVE_MEMBLOCK +/* + * Basic iterator support. Return the last range of PFNs for a node + * Note: nid == MAX_NUMNODES returns last region regardless of node + */ +static int __meminit last_active_region_index_in_nid(int nid) +{ + int i; + + for (i = nr_nodemap_entries - 1; i >= 0; i--) + if (nid == MAX_NUMNODES || early_node_map[i].nid == nid) + return i; + + return -1; +} + +/* + * Basic iterator support. Return the previous active range of PFNs for a node + * Note: nid == MAX_NUMNODES returns next region regardless of node + */ +static int __meminit previous_active_region_index_in_nid(int index, int nid) +{ + for (index = index - 1; index >= 0; index--) + if (nid == MAX_NUMNODES || early_node_map[index].nid == nid) + return index; + + return -1; +} + +#define for_each_active_range_index_in_nid_reverse(i, nid) \ + for (i = last_active_region_index_in_nid(nid); i != -1; \ + i = previous_active_region_index_in_nid(i, nid)) + +u64 __init find_memory_core_early(int nid, u64 size, u64 align, + u64 goal, u64 limit) +{ + int i; + + /* Need to go over early_node_map to find out good range for node */ + for_each_active_range_index_in_nid_reverse(i, nid) { + u64 addr; + u64 ei_start, ei_last; + u64 final_start, final_end; + + ei_last = early_node_map[i].end_pfn; + ei_last <<= PAGE_SHIFT; + ei_start = early_node_map[i].start_pfn; + ei_start <<= PAGE_SHIFT; + + final_start = max(ei_start, goal); + final_end = min(ei_last, limit); + + if (final_start >= final_end) + continue; + + addr = memblock_find_in_range(final_start, final_end, size, align); + + if (addr == MEMBLOCK_ERROR) + continue; + + return addr; + } + + return MEMBLOCK_ERROR; +} +#endif + int __init add_from_early_node_map(struct range *range, int az, int nr_range, int nid) { - unsigned long start_pfn, end_pfn; int i; + u64 start, end; /* need to go over early_node_map to find out good range for node */ - for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) - nr_range = add_range(range, az, nr_range, start_pfn, end_pfn); + for_each_active_range_index_in_nid(i, nid) { + start = early_node_map[i].start_pfn; + end = early_node_map[i].end_pfn; + nr_range = add_range(range, az, nr_range, start, end); + } return nr_range; } +void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data) +{ + int i; + int ret; + + for_each_active_range_index_in_nid(i, nid) { + ret = work_fn(early_node_map[i].start_pfn, + early_node_map[i].end_pfn, data); + if (ret) + break; + } +} /** * sparse_memory_present_with_active_regions - Call memory_present for each active range * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used. @@ -3806,11 +3952,12 @@ int __init add_from_early_node_map(struct range *range, int az, */ void __init sparse_memory_present_with_active_regions(int nid) { - unsigned long start_pfn, end_pfn; - int i, this_nid; + int i; - for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) - memory_present(this_nid, start_pfn, end_pfn); + for_each_active_range_index_in_nid(i, nid) + memory_present(early_node_map[i].nid, + early_node_map[i].start_pfn, + early_node_map[i].end_pfn); } /** @@ -3827,15 +3974,13 @@ void __init sparse_memory_present_with_active_regions(int nid) void __meminit get_pfn_range_for_nid(unsigned int nid, unsigned long *start_pfn, unsigned long *end_pfn) { - unsigned long this_start_pfn, this_end_pfn; int i; - *start_pfn = -1UL; *end_pfn = 0; - for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) { - *start_pfn = min(*start_pfn, this_start_pfn); - *end_pfn = max(*end_pfn, this_end_pfn); + for_each_active_range_index_in_nid(i, nid) { + *start_pfn = min(*start_pfn, early_node_map[i].start_pfn); + *end_pfn = max(*end_pfn, early_node_map[i].end_pfn); } if (*start_pfn == -1UL) @@ -3938,16 +4083,46 @@ unsigned long __meminit __absent_pages_in_range(int nid, unsigned long range_start_pfn, unsigned long range_end_pfn) { - unsigned long nr_absent = range_end_pfn - range_start_pfn; - unsigned long start_pfn, end_pfn; - int i; + int i = 0; + unsigned long prev_end_pfn = 0, hole_pages = 0; + unsigned long start_pfn; + + /* Find the end_pfn of the first active range of pfns in the node */ + i = first_active_region_index_in_nid(nid); + if (i == -1) + return 0; + + prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn); + + /* Account for ranges before physical memory on this node */ + if (early_node_map[i].start_pfn > range_start_pfn) + hole_pages = prev_end_pfn - range_start_pfn; + + /* Find all holes for the zone within the node */ + for (; i != -1; i = next_active_region_index_in_nid(i, nid)) { + + /* No need to continue if prev_end_pfn is outside the zone */ + if (prev_end_pfn >= range_end_pfn) + break; + + /* Make sure the end of the zone is not within the hole */ + start_pfn = min(early_node_map[i].start_pfn, range_end_pfn); + prev_end_pfn = max(prev_end_pfn, range_start_pfn); - for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { - start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn); - end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn); - nr_absent -= end_pfn - start_pfn; + /* Update the hole size cound and move on */ + if (start_pfn > range_start_pfn) { + BUG_ON(prev_end_pfn > start_pfn); + hole_pages += start_pfn - prev_end_pfn; + } + prev_end_pfn = early_node_map[i].end_pfn; } - return nr_absent; + + /* Account for ranges past physical memory on this node */ + if (range_end_pfn > prev_end_pfn) + hole_pages += range_end_pfn - + max(range_start_pfn, prev_end_pfn); + + return hole_pages; } /** @@ -3968,14 +4143,14 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid, unsigned long zone_type, unsigned long *ignored) { - unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; - unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; unsigned long node_start_pfn, node_end_pfn; unsigned long zone_start_pfn, zone_end_pfn; get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn); - zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); - zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); + zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type], + node_start_pfn); + zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type], + node_end_pfn); adjust_zone_range_for_zone_movable(nid, zone_type, node_start_pfn, node_end_pfn, @@ -3983,7 +4158,7 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid, return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); } -#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ +#else static inline unsigned long __meminit zone_spanned_pages_in_node(int nid, unsigned long zone_type, unsigned long *zones_size) @@ -4001,7 +4176,7 @@ static inline unsigned long __meminit zone_absent_pages_in_node(int nid, return zholes_size[zone_type]; } -#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ +#endif static void __meminit calculate_node_totalpages(struct pglist_data *pgdat, unsigned long *zones_size, unsigned long *zholes_size) @@ -4224,10 +4399,10 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) */ if (pgdat == NODE_DATA(0)) { mem_map = NODE_DATA(0)->node_mem_map; -#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP +#ifdef CONFIG_ARCH_POPULATES_NODE_MAP if (page_to_pfn(mem_map) != pgdat->node_start_pfn) mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET); -#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ +#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ } #endif #endif /* CONFIG_FLAT_NODE_MEM_MAP */ @@ -4252,7 +4427,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size, free_area_init_core(pgdat, zones_size, zholes_size); } -#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP +#ifdef CONFIG_ARCH_POPULATES_NODE_MAP #if MAX_NUMNODES > 1 /* @@ -4273,6 +4448,170 @@ static inline void setup_nr_node_ids(void) } #endif +/** + * add_active_range - Register a range of PFNs backed by physical memory + * @nid: The node ID the range resides on + * @start_pfn: The start PFN of the available physical memory + * @end_pfn: The end PFN of the available physical memory + * + * These ranges are stored in an early_node_map[] and later used by + * free_area_init_nodes() to calculate zone sizes and holes. If the + * range spans a memory hole, it is up to the architecture to ensure + * the memory is not freed by the bootmem allocator. If possible + * the range being registered will be merged with existing ranges. + */ +void __init add_active_range(unsigned int nid, unsigned long start_pfn, + unsigned long end_pfn) +{ + int i; + + mminit_dprintk(MMINIT_TRACE, "memory_register", + "Entering add_active_range(%d, %#lx, %#lx) " + "%d entries of %d used\n", + nid, start_pfn, end_pfn, + nr_nodemap_entries, MAX_ACTIVE_REGIONS); + + mminit_validate_memmodel_limits(&start_pfn, &end_pfn); + + /* Merge with existing active regions if possible */ + for (i = 0; i < nr_nodemap_entries; i++) { + if (early_node_map[i].nid != nid) + continue; + + /* Skip if an existing region covers this new one */ + if (start_pfn >= early_node_map[i].start_pfn && + end_pfn <= early_node_map[i].end_pfn) + return; + + /* Merge forward if suitable */ + if (start_pfn <= early_node_map[i].end_pfn && + end_pfn > early_node_map[i].end_pfn) { + early_node_map[i].end_pfn = end_pfn; + return; + } + + /* Merge backward if suitable */ + if (start_pfn < early_node_map[i].start_pfn && + end_pfn >= early_node_map[i].start_pfn) { + early_node_map[i].start_pfn = start_pfn; + return; + } + } + + /* Check that early_node_map is large enough */ + if (i >= MAX_ACTIVE_REGIONS) { + printk(KERN_CRIT "More than %d memory regions, truncating\n", + MAX_ACTIVE_REGIONS); + return; + } + + early_node_map[i].nid = nid; + early_node_map[i].start_pfn = start_pfn; + early_node_map[i].end_pfn = end_pfn; + nr_nodemap_entries = i + 1; +} + +/** + * remove_active_range - Shrink an existing registered range of PFNs + * @nid: The node id the range is on that should be shrunk + * @start_pfn: The new PFN of the range + * @end_pfn: The new PFN of the range + * + * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node. + * The map is kept near the end physical page range that has already been + * registered. This function allows an arch to shrink an existing registered + * range. + */ +void __init remove_active_range(unsigned int nid, unsigned long start_pfn, + unsigned long end_pfn) +{ + int i, j; + int removed = 0; + + printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n", + nid, start_pfn, end_pfn); + + /* Find the old active region end and shrink */ + for_each_active_range_index_in_nid(i, nid) { + if (early_node_map[i].start_pfn >= start_pfn && + early_node_map[i].end_pfn <= end_pfn) { + /* clear it */ + early_node_map[i].start_pfn = 0; + early_node_map[i].end_pfn = 0; + removed = 1; + continue; + } + if (early_node_map[i].start_pfn < start_pfn && + early_node_map[i].end_pfn > start_pfn) { + unsigned long temp_end_pfn = early_node_map[i].end_pfn; + early_node_map[i].end_pfn = start_pfn; + if (temp_end_pfn > end_pfn) + add_active_range(nid, end_pfn, temp_end_pfn); + continue; + } + if (early_node_map[i].start_pfn >= start_pfn && + early_node_map[i].end_pfn > end_pfn && + early_node_map[i].start_pfn < end_pfn) { + early_node_map[i].start_pfn = end_pfn; + continue; + } + } + + if (!removed) + return; + + /* remove the blank ones */ + for (i = nr_nodemap_entries - 1; i > 0; i--) { + if (early_node_map[i].nid != nid) + continue; + if (early_node_map[i].end_pfn) + continue; + /* we found it, get rid of it */ + for (j = i; j < nr_nodemap_entries - 1; j++) + memcpy(&early_node_map[j], &early_node_map[j+1], + sizeof(early_node_map[j])); + j = nr_nodemap_entries - 1; + memset(&early_node_map[j], 0, sizeof(early_node_map[j])); + nr_nodemap_entries--; + } +} + +/** + * remove_all_active_ranges - Remove all currently registered regions + * + * During discovery, it may be found that a table like SRAT is invalid + * and an alternative discovery method must be used. This function removes + * all currently registered regions. + */ +void __init remove_all_active_ranges(void) +{ + memset(early_node_map, 0, sizeof(early_node_map)); + nr_nodemap_entries = 0; +} + +/* Compare two active node_active_regions */ +static int __init cmp_node_active_region(const void *a, const void *b) +{ + struct node_active_region *arange = (struct node_active_region *)a; + struct node_active_region *brange = (struct node_active_region *)b; + + /* Done this way to avoid overflows */ + if (arange->start_pfn > brange->start_pfn) + return 1; + if (arange->start_pfn < brange->start_pfn) + return -1; + + return 0; +} + +/* sort the node_map by start_pfn */ +void __init sort_node_map(void) +{ + sort(early_node_map, (size_t)nr_nodemap_entries, + sizeof(struct node_active_region), + cmp_node_active_region, NULL); +} + /** * node_map_pfn_alignment - determine the maximum internode alignment * @@ -4295,11 +4634,15 @@ static inline void setup_nr_node_ids(void) unsigned long __init node_map_pfn_alignment(void) { unsigned long accl_mask = 0, last_end = 0; - unsigned long start, end, mask; int last_nid = -1; - int i, nid; + int i; + + for_each_active_range_index_in_nid(i, MAX_NUMNODES) { + int nid = early_node_map[i].nid; + unsigned long start = early_node_map[i].start_pfn; + unsigned long end = early_node_map[i].end_pfn; + unsigned long mask; - for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) { if (!start || last_nid < 0 || last_nid == nid) { last_nid = nid; last_end = end; @@ -4326,12 +4669,12 @@ unsigned long __init node_map_pfn_alignment(void) /* Find the lowest pfn for a node */ static unsigned long __init find_min_pfn_for_node(int nid) { - unsigned long min_pfn = ULONG_MAX; - unsigned long start_pfn; int i; + unsigned long min_pfn = ULONG_MAX; - for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL) - min_pfn = min(min_pfn, start_pfn); + /* Assuming a sorted map, the first range found has the starting pfn */ + for_each_active_range_index_in_nid(i, nid) + min_pfn = min(min_pfn, early_node_map[i].start_pfn); if (min_pfn == ULONG_MAX) { printk(KERN_WARNING @@ -4360,16 +4703,15 @@ unsigned long __init find_min_pfn_with_active_regions(void) */ static unsigned long __init early_calculate_totalpages(void) { + int i; unsigned long totalpages = 0; - unsigned long start_pfn, end_pfn; - int i, nid; - - for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { - unsigned long pages = end_pfn - start_pfn; + for (i = 0; i < nr_nodemap_entries; i++) { + unsigned long pages = early_node_map[i].end_pfn - + early_node_map[i].start_pfn; totalpages += pages; if (pages) - node_set_state(nid, N_HIGH_MEMORY); + node_set_state(early_node_map[i].nid, N_HIGH_MEMORY); } return totalpages; } @@ -4424,8 +4766,6 @@ static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn) /* Spread kernelcore memory as evenly as possible throughout nodes */ kernelcore_node = required_kernelcore / usable_nodes; for_each_node_state(nid, N_HIGH_MEMORY) { - unsigned long start_pfn, end_pfn; - /* * Recalculate kernelcore_node if the division per node * now exceeds what is necessary to satisfy the requested @@ -4442,10 +4782,13 @@ static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn) kernelcore_remaining = kernelcore_node; /* Go through each range of PFNs within this node */ - for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { + for_each_active_range_index_in_nid(i, nid) { + unsigned long start_pfn, end_pfn; unsigned long size_pages; - start_pfn = max(start_pfn, zone_movable_pfn[nid]); + start_pfn = max(early_node_map[i].start_pfn, + zone_movable_pfn[nid]); + end_pfn = early_node_map[i].end_pfn; if (start_pfn >= end_pfn) continue; @@ -4547,8 +4890,11 @@ static void check_for_regular_memory(pg_data_t *pgdat) */ void __init free_area_init_nodes(unsigned long *max_zone_pfn) { - unsigned long start_pfn, end_pfn; - int i, nid; + unsigned long nid; + int i; + + /* Sort early_node_map as initialisation assumes it is sorted */ + sort_node_map(); /* Record where the zone boundaries are */ memset(arch_zone_lowest_possible_pfn, 0, @@ -4595,9 +4941,11 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn) } /* Print out the early_node_map[] */ - printk("Early memory PFN ranges\n"); - for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) - printk(" %3d: %0#10lx -> %0#10lx\n", nid, start_pfn, end_pfn); + printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries); + for (i = 0; i < nr_nodemap_entries; i++) + printk(" %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid, + early_node_map[i].start_pfn, + early_node_map[i].end_pfn); /* Initialise every node */ mminit_verify_pageflags_layout(); @@ -4650,7 +4998,7 @@ static int __init cmdline_parse_movablecore(char *p) early_param("kernelcore", cmdline_parse_kernelcore); early_param("movablecore", cmdline_parse_movablecore); -#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ +#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ /** * set_dma_reserve - set the specified number of pages reserved in the first zone diff --git a/trunk/mm/slub.c b/trunk/mm/slub.c index 09ccee8fb58e..ed3334d9b6da 100644 --- a/trunk/mm/slub.c +++ b/trunk/mm/slub.c @@ -368,7 +368,7 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page VM_BUG_ON(!irqs_disabled()); #ifdef CONFIG_CMPXCHG_DOUBLE if (s->flags & __CMPXCHG_DOUBLE) { - if (cmpxchg_double(&page->freelist, &page->counters, + if (cmpxchg_double(&page->freelist, freelist_old, counters_old, freelist_new, counters_new)) return 1; @@ -402,7 +402,7 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, { #ifdef CONFIG_CMPXCHG_DOUBLE if (s->flags & __CMPXCHG_DOUBLE) { - if (cmpxchg_double(&page->freelist, &page->counters, + if (cmpxchg_double(&page->freelist, freelist_old, counters_old, freelist_new, counters_new)) return 1; diff --git a/trunk/net/8021q/vlan.c b/trunk/net/8021q/vlan.c index efea35b02e7f..5471628d3ffe 100644 --- a/trunk/net/8021q/vlan.c +++ b/trunk/net/8021q/vlan.c @@ -51,6 +51,27 @@ const char vlan_version[] = DRV_VERSION; /* End of global variables definitions. */ +static void vlan_group_free(struct vlan_group *grp) +{ + int i; + + for (i = 0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++) + kfree(grp->vlan_devices_arrays[i]); + kfree(grp); +} + +static struct vlan_group *vlan_group_alloc(struct net_device *real_dev) +{ + struct vlan_group *grp; + + grp = kzalloc(sizeof(struct vlan_group), GFP_KERNEL); + if (!grp) + return NULL; + + grp->real_dev = real_dev; + return grp; +} + static int vlan_group_prealloc_vid(struct vlan_group *vg, u16 vlan_id) { struct net_device **array; @@ -71,29 +92,32 @@ static int vlan_group_prealloc_vid(struct vlan_group *vg, u16 vlan_id) return 0; } +static void vlan_rcu_free(struct rcu_head *rcu) +{ + vlan_group_free(container_of(rcu, struct vlan_group, rcu)); +} + void unregister_vlan_dev(struct net_device *dev, struct list_head *head) { - struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + struct vlan_dev_info *vlan = vlan_dev_info(dev); struct net_device *real_dev = vlan->real_dev; - struct vlan_info *vlan_info; + const struct net_device_ops *ops = real_dev->netdev_ops; struct vlan_group *grp; u16 vlan_id = vlan->vlan_id; ASSERT_RTNL(); - vlan_info = rtnl_dereference(real_dev->vlan_info); - BUG_ON(!vlan_info); - - grp = &vlan_info->grp; + grp = rtnl_dereference(real_dev->vlgrp); + BUG_ON(!grp); /* Take it out of our own structures, but be sure to interlock with * HW accelerating devices or SW vlan input packet processing if * VLAN is not 0 (leave it there for 802.1p). */ - if (vlan_id) - vlan_vid_del(real_dev, vlan_id); + if (vlan_id && (real_dev->features & NETIF_F_HW_VLAN_FILTER)) + ops->ndo_vlan_rx_kill_vid(real_dev, vlan_id); - grp->nr_vlan_devs--; + grp->nr_vlans--; if (vlan->flags & VLAN_FLAG_GVRP) vlan_gvrp_request_leave(dev); @@ -105,9 +129,16 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head) */ unregister_netdevice_queue(dev, head); - if (grp->nr_vlan_devs == 0) + /* If the group is now empty, kill off the group. */ + if (grp->nr_vlans == 0) { vlan_gvrp_uninit_applicant(real_dev); + RCU_INIT_POINTER(real_dev->vlgrp, NULL); + + /* Free the group, after all cpu's are done. */ + call_rcu(&grp->rcu, vlan_rcu_free); + } + /* Get rid of the vlan's reference to real_dev */ dev_put(real_dev); } @@ -136,26 +167,21 @@ int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id) int register_vlan_dev(struct net_device *dev) { - struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + struct vlan_dev_info *vlan = vlan_dev_info(dev); struct net_device *real_dev = vlan->real_dev; + const struct net_device_ops *ops = real_dev->netdev_ops; u16 vlan_id = vlan->vlan_id; - struct vlan_info *vlan_info; - struct vlan_group *grp; + struct vlan_group *grp, *ngrp = NULL; int err; - err = vlan_vid_add(real_dev, vlan_id); - if (err) - return err; - - vlan_info = rtnl_dereference(real_dev->vlan_info); - /* vlan_info should be there now. vlan_vid_add took care of it */ - BUG_ON(!vlan_info); - - grp = &vlan_info->grp; - if (grp->nr_vlan_devs == 0) { + grp = rtnl_dereference(real_dev->vlgrp); + if (!grp) { + ngrp = grp = vlan_group_alloc(real_dev); + if (!grp) + return -ENOBUFS; err = vlan_gvrp_init_applicant(real_dev); if (err < 0) - goto out_vid_del; + goto out_free_group; } err = vlan_group_prealloc_vid(grp, vlan_id); @@ -166,7 +192,7 @@ int register_vlan_dev(struct net_device *dev) if (err < 0) goto out_uninit_applicant; - /* Account for reference in struct vlan_dev_priv */ + /* Account for reference in struct vlan_dev_info */ dev_hold(real_dev); netif_stacked_transfer_operstate(real_dev, dev); @@ -176,15 +202,24 @@ int register_vlan_dev(struct net_device *dev) * it into our local structure. */ vlan_group_set_device(grp, vlan_id, dev); - grp->nr_vlan_devs++; + grp->nr_vlans++; + + if (ngrp) { + rcu_assign_pointer(real_dev->vlgrp, ngrp); + } + if (real_dev->features & NETIF_F_HW_VLAN_FILTER) + ops->ndo_vlan_rx_add_vid(real_dev, vlan_id); return 0; out_uninit_applicant: - if (grp->nr_vlan_devs == 0) + if (ngrp) vlan_gvrp_uninit_applicant(real_dev); -out_vid_del: - vlan_vid_del(real_dev, vlan_id); +out_free_group: + if (ngrp) { + /* Free the group, after all cpu's are done. */ + call_rcu(&ngrp->rcu, vlan_rcu_free); + } return err; } @@ -232,7 +267,7 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id) snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id); } - new_dev = alloc_netdev(sizeof(struct vlan_dev_priv), name, vlan_setup); + new_dev = alloc_netdev(sizeof(struct vlan_dev_info), name, vlan_setup); if (new_dev == NULL) return -ENOBUFS; @@ -243,10 +278,10 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id) */ new_dev->mtu = real_dev->mtu; - vlan_dev_priv(new_dev)->vlan_id = vlan_id; - vlan_dev_priv(new_dev)->real_dev = real_dev; - vlan_dev_priv(new_dev)->dent = NULL; - vlan_dev_priv(new_dev)->flags = VLAN_FLAG_REORDER_HDR; + vlan_dev_info(new_dev)->vlan_id = vlan_id; + vlan_dev_info(new_dev)->real_dev = real_dev; + vlan_dev_info(new_dev)->dent = NULL; + vlan_dev_info(new_dev)->flags = VLAN_FLAG_REORDER_HDR; new_dev->rtnl_link_ops = &vlan_link_ops; err = register_vlan_dev(new_dev); @@ -263,7 +298,7 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id) static void vlan_sync_address(struct net_device *dev, struct net_device *vlandev) { - struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev); + struct vlan_dev_info *vlan = vlan_dev_info(vlandev); /* May be called without an actual change */ if (!compare_ether_addr(vlan->real_dev_addr, dev->dev_addr)) @@ -325,26 +360,25 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, { struct net_device *dev = ptr; struct vlan_group *grp; - struct vlan_info *vlan_info; int i, flgs; struct net_device *vlandev; - struct vlan_dev_priv *vlan; + struct vlan_dev_info *vlan; LIST_HEAD(list); if (is_vlan_dev(dev)) __vlan_device_event(dev, event); if ((event == NETDEV_UP) && - (dev->features & NETIF_F_HW_VLAN_FILTER)) { + (dev->features & NETIF_F_HW_VLAN_FILTER) && + dev->netdev_ops->ndo_vlan_rx_add_vid) { pr_info("adding VLAN 0 to HW filter on device %s\n", dev->name); - vlan_vid_add(dev, 0); + dev->netdev_ops->ndo_vlan_rx_add_vid(dev, 0); } - vlan_info = rtnl_dereference(dev->vlan_info); - if (!vlan_info) + grp = rtnl_dereference(dev->vlgrp); + if (!grp) goto out; - grp = &vlan_info->grp; /* It is OK that we do not hold the group lock right now, * as we run under the RTNL lock. @@ -413,7 +447,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, if (!(flgs & IFF_UP)) continue; - vlan = vlan_dev_priv(vlandev); + vlan = vlan_dev_info(vlandev); if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING)) dev_change_flags(vlandev, flgs & ~IFF_UP); netif_stacked_transfer_operstate(dev, vlandev); @@ -431,7 +465,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, if (flgs & IFF_UP) continue; - vlan = vlan_dev_priv(vlandev); + vlan = vlan_dev_info(vlandev); if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING)) dev_change_flags(vlandev, flgs | IFF_UP); netif_stacked_transfer_operstate(dev, vlandev); @@ -448,9 +482,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, if (!vlandev) continue; - /* removal of last vid destroys vlan_info, abort + /* unregistration of last vlan destroys group, abort * afterwards */ - if (vlan_info->nr_vids == 1) + if (grp->nr_vlans == 1) i = VLAN_N_VID; unregister_vlan_dev(vlandev, &list); diff --git a/trunk/net/8021q/vlan.h b/trunk/net/8021q/vlan.h index a4886d94c40c..9fd45f3571f9 100644 --- a/trunk/net/8021q/vlan.h +++ b/trunk/net/8021q/vlan.h @@ -3,7 +3,6 @@ #include #include -#include /** @@ -41,10 +40,8 @@ struct vlan_pcpu_stats { u32 tx_dropped; }; -struct netpoll; - /** - * struct vlan_dev_priv - VLAN private device data + * struct vlan_dev_info - VLAN private device data * @nr_ingress_mappings: number of ingress priority mappings * @ingress_priority_map: ingress priority mappings * @nr_egress_mappings: number of egress priority mappings @@ -56,7 +53,7 @@ struct netpoll; * @dent: proc dir entry * @vlan_pcpu_stats: ptr to percpu rx stats */ -struct vlan_dev_priv { +struct vlan_dev_info { unsigned int nr_ingress_mappings; u32 ingress_priority_map[8]; unsigned int nr_egress_mappings; @@ -70,39 +67,13 @@ struct vlan_dev_priv { struct proc_dir_entry *dent; struct vlan_pcpu_stats __percpu *vlan_pcpu_stats; -#ifdef CONFIG_NET_POLL_CONTROLLER - struct netpoll *netpoll; -#endif }; -static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev) +static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev) { return netdev_priv(dev); } -/* if this changes, algorithm will have to be reworked because this - * depends on completely exhausting the VLAN identifier space. Thus - * it gives constant time look-up, but in many cases it wastes memory. - */ -#define VLAN_GROUP_ARRAY_SPLIT_PARTS 8 -#define VLAN_GROUP_ARRAY_PART_LEN (VLAN_N_VID/VLAN_GROUP_ARRAY_SPLIT_PARTS) - -struct vlan_group { - unsigned int nr_vlan_devs; - struct hlist_node hlist; /* linked list */ - struct net_device **vlan_devices_arrays[VLAN_GROUP_ARRAY_SPLIT_PARTS]; -}; - -struct vlan_info { - struct net_device *real_dev; /* The ethernet(like) device - * the vlan is attached to. - */ - struct vlan_group grp; - struct list_head vid_list; - unsigned int nr_vids; - struct rcu_head rcu; -}; - static inline struct net_device *vlan_group_get_device(struct vlan_group *vg, u16 vlan_id) { @@ -126,10 +97,10 @@ static inline void vlan_group_set_device(struct vlan_group *vg, static inline struct net_device *vlan_find_dev(struct net_device *real_dev, u16 vlan_id) { - struct vlan_info *vlan_info = rcu_dereference_rtnl(real_dev->vlan_info); + struct vlan_group *grp = rcu_dereference_rtnl(real_dev->vlgrp); - if (vlan_info) - return vlan_group_get_device(&vlan_info->grp, vlan_id); + if (grp) + return vlan_group_get_device(grp, vlan_id); return NULL; } @@ -150,7 +121,7 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head); static inline u32 vlan_get_ingress_priority(struct net_device *dev, u16 vlan_tci) { - struct vlan_dev_priv *vip = vlan_dev_priv(dev); + struct vlan_dev_info *vip = vlan_dev_info(dev); return vip->ingress_priority_map[(vlan_tci >> VLAN_PRIO_SHIFT) & 0x7]; } diff --git a/trunk/net/8021q/vlan_core.c b/trunk/net/8021q/vlan_core.c index 4d39d802be2c..f5ffc02729d6 100644 --- a/trunk/net/8021q/vlan_core.c +++ b/trunk/net/8021q/vlan_core.c @@ -36,7 +36,7 @@ bool vlan_do_receive(struct sk_buff **skbp, bool last_handler) skb->pkt_type = PACKET_HOST; } - if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) { + if (!(vlan_dev_info(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) { unsigned int offset = skb->data - skb_mac_header(skb); /* @@ -55,7 +55,7 @@ bool vlan_do_receive(struct sk_buff **skbp, bool last_handler) skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci); skb->vlan_tci = 0; - rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats); + rx_stats = this_cpu_ptr(vlan_dev_info(vlan_dev)->vlan_pcpu_stats); u64_stats_update_begin(&rx_stats->syncp); rx_stats->rx_packets++; @@ -71,10 +71,10 @@ bool vlan_do_receive(struct sk_buff **skbp, bool last_handler) struct net_device *__vlan_find_dev_deep(struct net_device *real_dev, u16 vlan_id) { - struct vlan_info *vlan_info = rcu_dereference_rtnl(real_dev->vlan_info); + struct vlan_group *grp = rcu_dereference_rtnl(real_dev->vlgrp); - if (vlan_info) { - return vlan_group_get_device(&vlan_info->grp, vlan_id); + if (grp) { + return vlan_group_get_device(grp, vlan_id); } else { /* * Bonding slaves do not have grp assigned to themselves. @@ -90,13 +90,13 @@ EXPORT_SYMBOL(__vlan_find_dev_deep); struct net_device *vlan_dev_real_dev(const struct net_device *dev) { - return vlan_dev_priv(dev)->real_dev; + return vlan_dev_info(dev)->real_dev; } EXPORT_SYMBOL(vlan_dev_real_dev); u16 vlan_dev_vlan_id(const struct net_device *dev) { - return vlan_dev_priv(dev)->vlan_id; + return vlan_dev_info(dev)->vlan_id; } EXPORT_SYMBOL(vlan_dev_vlan_id); @@ -110,6 +110,39 @@ static struct sk_buff *vlan_reorder_header(struct sk_buff *skb) return skb; } +static void vlan_set_encap_proto(struct sk_buff *skb, struct vlan_hdr *vhdr) +{ + __be16 proto; + unsigned char *rawp; + + /* + * Was a VLAN packet, grab the encapsulated protocol, which the layer + * three protocols care about. + */ + + proto = vhdr->h_vlan_encapsulated_proto; + if (ntohs(proto) >= 1536) { + skb->protocol = proto; + return; + } + + rawp = skb->data; + if (*(unsigned short *) rawp == 0xFFFF) + /* + * This is a magic hack to spot IPX packets. Older Novell + * breaks the protocol design and runs IPX over 802.3 without + * an 802.2 LLC layer. We look for FFFF which isn't a used + * 802.2 SSAP/DSAP. This won't work for fault tolerant netware + * but does for the rest. + */ + skb->protocol = htons(ETH_P_802_3); + else + /* + * Real 802.2 LLC + */ + skb->protocol = htons(ETH_P_802_2); +} + struct sk_buff *vlan_untag(struct sk_buff *skb) { struct vlan_hdr *vhdr; @@ -146,226 +179,3 @@ struct sk_buff *vlan_untag(struct sk_buff *skb) kfree_skb(skb); return NULL; } - - -/* - * vlan info and vid list - */ - -static void vlan_group_free(struct vlan_group *grp) -{ - int i; - - for (i = 0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++) - kfree(grp->vlan_devices_arrays[i]); -} - -static void vlan_info_free(struct vlan_info *vlan_info) -{ - vlan_group_free(&vlan_info->grp); - kfree(vlan_info); -} - -static void vlan_info_rcu_free(struct rcu_head *rcu) -{ - vlan_info_free(container_of(rcu, struct vlan_info, rcu)); -} - -static struct vlan_info *vlan_info_alloc(struct net_device *dev) -{ - struct vlan_info *vlan_info; - - vlan_info = kzalloc(sizeof(struct vlan_info), GFP_KERNEL); - if (!vlan_info) - return NULL; - - vlan_info->real_dev = dev; - INIT_LIST_HEAD(&vlan_info->vid_list); - return vlan_info; -} - -struct vlan_vid_info { - struct list_head list; - unsigned short vid; - int refcount; -}; - -static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info, - unsigned short vid) -{ - struct vlan_vid_info *vid_info; - - list_for_each_entry(vid_info, &vlan_info->vid_list, list) { - if (vid_info->vid == vid) - return vid_info; - } - return NULL; -} - -static struct vlan_vid_info *vlan_vid_info_alloc(unsigned short vid) -{ - struct vlan_vid_info *vid_info; - - vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL); - if (!vid_info) - return NULL; - vid_info->vid = vid; - - return vid_info; -} - -static int __vlan_vid_add(struct vlan_info *vlan_info, unsigned short vid, - struct vlan_vid_info **pvid_info) -{ - struct net_device *dev = vlan_info->real_dev; - const struct net_device_ops *ops = dev->netdev_ops; - struct vlan_vid_info *vid_info; - int err; - - vid_info = vlan_vid_info_alloc(vid); - if (!vid_info) - return -ENOMEM; - - if ((dev->features & NETIF_F_HW_VLAN_FILTER) && - ops->ndo_vlan_rx_add_vid) { - err = ops->ndo_vlan_rx_add_vid(dev, vid); - if (err) { - kfree(vid_info); - return err; - } - } - list_add(&vid_info->list, &vlan_info->vid_list); - vlan_info->nr_vids++; - *pvid_info = vid_info; - return 0; -} - -int vlan_vid_add(struct net_device *dev, unsigned short vid) -{ - struct vlan_info *vlan_info; - struct vlan_vid_info *vid_info; - bool vlan_info_created = false; - int err; - - ASSERT_RTNL(); - - vlan_info = rtnl_dereference(dev->vlan_info); - if (!vlan_info) { - vlan_info = vlan_info_alloc(dev); - if (!vlan_info) - return -ENOMEM; - vlan_info_created = true; - } - vid_info = vlan_vid_info_get(vlan_info, vid); - if (!vid_info) { - err = __vlan_vid_add(vlan_info, vid, &vid_info); - if (err) - goto out_free_vlan_info; - } - vid_info->refcount++; - - if (vlan_info_created) - rcu_assign_pointer(dev->vlan_info, vlan_info); - - return 0; - -out_free_vlan_info: - if (vlan_info_created) - kfree(vlan_info); - return err; -} -EXPORT_SYMBOL(vlan_vid_add); - -static void __vlan_vid_del(struct vlan_info *vlan_info, - struct vlan_vid_info *vid_info) -{ - struct net_device *dev = vlan_info->real_dev; - const struct net_device_ops *ops = dev->netdev_ops; - unsigned short vid = vid_info->vid; - int err; - - if ((dev->features & NETIF_F_HW_VLAN_FILTER) && - ops->ndo_vlan_rx_kill_vid) { - err = ops->ndo_vlan_rx_kill_vid(dev, vid); - if (err) { - pr_warn("failed to kill vid %d for device %s\n", - vid, dev->name); - } - } - list_del(&vid_info->list); - kfree(vid_info); - vlan_info->nr_vids--; -} - -void vlan_vid_del(struct net_device *dev, unsigned short vid) -{ - struct vlan_info *vlan_info; - struct vlan_vid_info *vid_info; - - ASSERT_RTNL(); - - vlan_info = rtnl_dereference(dev->vlan_info); - if (!vlan_info) - return; - - vid_info = vlan_vid_info_get(vlan_info, vid); - if (!vid_info) - return; - vid_info->refcount--; - if (vid_info->refcount == 0) { - __vlan_vid_del(vlan_info, vid_info); - if (vlan_info->nr_vids == 0) { - RCU_INIT_POINTER(dev->vlan_info, NULL); - call_rcu(&vlan_info->rcu, vlan_info_rcu_free); - } - } -} -EXPORT_SYMBOL(vlan_vid_del); - -int vlan_vids_add_by_dev(struct net_device *dev, - const struct net_device *by_dev) -{ - struct vlan_vid_info *vid_info; - struct vlan_info *vlan_info; - int err; - - ASSERT_RTNL(); - - vlan_info = rtnl_dereference(by_dev->vlan_info); - if (!vlan_info) - return 0; - - list_for_each_entry(vid_info, &vlan_info->vid_list, list) { - err = vlan_vid_add(dev, vid_info->vid); - if (err) - goto unwind; - } - return 0; - -unwind: - list_for_each_entry_continue_reverse(vid_info, - &vlan_info->vid_list, - list) { - vlan_vid_del(dev, vid_info->vid); - } - - return err; -} -EXPORT_SYMBOL(vlan_vids_add_by_dev); - -void vlan_vids_del_by_dev(struct net_device *dev, - const struct net_device *by_dev) -{ - struct vlan_vid_info *vid_info; - struct vlan_info *vlan_info; - - ASSERT_RTNL(); - - vlan_info = rtnl_dereference(by_dev->vlan_info); - if (!vlan_info) - return; - - list_for_each_entry(vid_info, &vlan_info->vid_list, list) - vlan_vid_del(dev, vid_info->vid); -} -EXPORT_SYMBOL(vlan_vids_del_by_dev); diff --git a/trunk/net/8021q/vlan_dev.c b/trunk/net/8021q/vlan_dev.c index 9988d4abb372..bc2528624583 100644 --- a/trunk/net/8021q/vlan_dev.c +++ b/trunk/net/8021q/vlan_dev.c @@ -33,7 +33,6 @@ #include "vlan.h" #include "vlanproc.h" #include -#include /* * Rebuild the Ethernet MAC header. This is called after an ARP @@ -73,7 +72,7 @@ vlan_dev_get_egress_qos_mask(struct net_device *dev, struct sk_buff *skb) { struct vlan_priority_tci_mapping *mp; - mp = vlan_dev_priv(dev)->egress_priority_map[(skb->priority & 0xF)]; + mp = vlan_dev_info(dev)->egress_priority_map[(skb->priority & 0xF)]; while (mp) { if (mp->priority == skb->priority) { return mp->vlan_qos; /* This should already be shifted @@ -104,10 +103,10 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev, u16 vlan_tci = 0; int rc; - if (!(vlan_dev_priv(dev)->flags & VLAN_FLAG_REORDER_HDR)) { + if (!(vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR)) { vhdr = (struct vlan_hdr *) skb_push(skb, VLAN_HLEN); - vlan_tci = vlan_dev_priv(dev)->vlan_id; + vlan_tci = vlan_dev_info(dev)->vlan_id; vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb); vhdr->h_vlan_TCI = htons(vlan_tci); @@ -130,7 +129,7 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev, saddr = dev->dev_addr; /* Now make the underlying real hard header */ - dev = vlan_dev_priv(dev)->real_dev; + dev = vlan_dev_info(dev)->real_dev; rc = dev_hard_header(skb, dev, type, daddr, saddr, len + vhdrlen); if (rc > 0) rc += vhdrlen; @@ -150,29 +149,27 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb, * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs... */ if (veth->h_vlan_proto != htons(ETH_P_8021Q) || - vlan_dev_priv(dev)->flags & VLAN_FLAG_REORDER_HDR) { + vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR) { u16 vlan_tci; - vlan_tci = vlan_dev_priv(dev)->vlan_id; + vlan_tci = vlan_dev_info(dev)->vlan_id; vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb); skb = __vlan_hwaccel_put_tag(skb, vlan_tci); } - skb_set_dev(skb, vlan_dev_priv(dev)->real_dev); + skb_set_dev(skb, vlan_dev_info(dev)->real_dev); len = skb->len; - if (netpoll_tx_running(dev)) - return skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev); ret = dev_queue_xmit(skb); if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { struct vlan_pcpu_stats *stats; - stats = this_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats); + stats = this_cpu_ptr(vlan_dev_info(dev)->vlan_pcpu_stats); u64_stats_update_begin(&stats->syncp); stats->tx_packets++; stats->tx_bytes += len; u64_stats_update_end(&stats->syncp); } else { - this_cpu_inc(vlan_dev_priv(dev)->vlan_pcpu_stats->tx_dropped); + this_cpu_inc(vlan_dev_info(dev)->vlan_pcpu_stats->tx_dropped); } return ret; @@ -183,7 +180,7 @@ static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu) /* TODO: gotta make sure the underlying layer can handle it, * maybe an IFF_VLAN_CAPABLE flag for devices? */ - if (vlan_dev_priv(dev)->real_dev->mtu < new_mtu) + if (vlan_dev_info(dev)->real_dev->mtu < new_mtu) return -ERANGE; dev->mtu = new_mtu; @@ -194,7 +191,7 @@ static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu) void vlan_dev_set_ingress_priority(const struct net_device *dev, u32 skb_prio, u16 vlan_prio) { - struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + struct vlan_dev_info *vlan = vlan_dev_info(dev); if (vlan->ingress_priority_map[vlan_prio & 0x7] && !skb_prio) vlan->nr_ingress_mappings--; @@ -207,7 +204,7 @@ void vlan_dev_set_ingress_priority(const struct net_device *dev, int vlan_dev_set_egress_priority(const struct net_device *dev, u32 skb_prio, u16 vlan_prio) { - struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + struct vlan_dev_info *vlan = vlan_dev_info(dev); struct vlan_priority_tci_mapping *mp = NULL; struct vlan_priority_tci_mapping *np; u32 vlan_qos = (vlan_prio << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK; @@ -244,7 +241,7 @@ int vlan_dev_set_egress_priority(const struct net_device *dev, /* Flags are defined in the vlan_flags enum in include/linux/if_vlan.h file. */ int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask) { - struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + struct vlan_dev_info *vlan = vlan_dev_info(dev); u32 old_flags = vlan->flags; if (mask & ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP | @@ -264,12 +261,12 @@ int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask) void vlan_dev_get_realdev_name(const struct net_device *dev, char *result) { - strncpy(result, vlan_dev_priv(dev)->real_dev->name, 23); + strncpy(result, vlan_dev_info(dev)->real_dev->name, 23); } static int vlan_dev_open(struct net_device *dev) { - struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + struct vlan_dev_info *vlan = vlan_dev_info(dev); struct net_device *real_dev = vlan->real_dev; int err; @@ -316,7 +313,7 @@ static int vlan_dev_open(struct net_device *dev) static int vlan_dev_stop(struct net_device *dev) { - struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + struct vlan_dev_info *vlan = vlan_dev_info(dev); struct net_device *real_dev = vlan->real_dev; dev_mc_unsync(real_dev, dev); @@ -335,7 +332,7 @@ static int vlan_dev_stop(struct net_device *dev) static int vlan_dev_set_mac_address(struct net_device *dev, void *p) { - struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; + struct net_device *real_dev = vlan_dev_info(dev)->real_dev; struct sockaddr *addr = p; int err; @@ -361,7 +358,7 @@ static int vlan_dev_set_mac_address(struct net_device *dev, void *p) static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { - struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; + struct net_device *real_dev = vlan_dev_info(dev)->real_dev; const struct net_device_ops *ops = real_dev->netdev_ops; struct ifreq ifrr; int err = -EOPNOTSUPP; @@ -386,7 +383,7 @@ static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa) { - struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; + struct net_device *real_dev = vlan_dev_info(dev)->real_dev; const struct net_device_ops *ops = real_dev->netdev_ops; int err = 0; @@ -400,7 +397,7 @@ static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa) static int vlan_dev_fcoe_ddp_setup(struct net_device *dev, u16 xid, struct scatterlist *sgl, unsigned int sgc) { - struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; + struct net_device *real_dev = vlan_dev_info(dev)->real_dev; const struct net_device_ops *ops = real_dev->netdev_ops; int rc = 0; @@ -412,7 +409,7 @@ static int vlan_dev_fcoe_ddp_setup(struct net_device *dev, u16 xid, static int vlan_dev_fcoe_ddp_done(struct net_device *dev, u16 xid) { - struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; + struct net_device *real_dev = vlan_dev_info(dev)->real_dev; const struct net_device_ops *ops = real_dev->netdev_ops; int len = 0; @@ -424,7 +421,7 @@ static int vlan_dev_fcoe_ddp_done(struct net_device *dev, u16 xid) static int vlan_dev_fcoe_enable(struct net_device *dev) { - struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; + struct net_device *real_dev = vlan_dev_info(dev)->real_dev; const struct net_device_ops *ops = real_dev->netdev_ops; int rc = -EINVAL; @@ -435,7 +432,7 @@ static int vlan_dev_fcoe_enable(struct net_device *dev) static int vlan_dev_fcoe_disable(struct net_device *dev) { - struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; + struct net_device *real_dev = vlan_dev_info(dev)->real_dev; const struct net_device_ops *ops = real_dev->netdev_ops; int rc = -EINVAL; @@ -446,7 +443,7 @@ static int vlan_dev_fcoe_disable(struct net_device *dev) static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type) { - struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; + struct net_device *real_dev = vlan_dev_info(dev)->real_dev; const struct net_device_ops *ops = real_dev->netdev_ops; int rc = -EINVAL; @@ -458,7 +455,7 @@ static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type) static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid, struct scatterlist *sgl, unsigned int sgc) { - struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; + struct net_device *real_dev = vlan_dev_info(dev)->real_dev; const struct net_device_ops *ops = real_dev->netdev_ops; int rc = 0; @@ -471,7 +468,7 @@ static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid, static void vlan_dev_change_rx_flags(struct net_device *dev, int change) { - struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; + struct net_device *real_dev = vlan_dev_info(dev)->real_dev; if (dev->flags & IFF_UP) { if (change & IFF_ALLMULTI) @@ -483,8 +480,8 @@ static void vlan_dev_change_rx_flags(struct net_device *dev, int change) static void vlan_dev_set_rx_mode(struct net_device *vlan_dev) { - dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev); - dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev); + dev_mc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev); + dev_uc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev); } /* @@ -522,7 +519,7 @@ static const struct net_device_ops vlan_netdev_ops; static int vlan_dev_init(struct net_device *dev) { - struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; + struct net_device *real_dev = vlan_dev_info(dev)->real_dev; int subclass = 0; netif_carrier_off(dev); @@ -571,8 +568,8 @@ static int vlan_dev_init(struct net_device *dev) vlan_dev_set_lockdep_class(dev, subclass); - vlan_dev_priv(dev)->vlan_pcpu_stats = alloc_percpu(struct vlan_pcpu_stats); - if (!vlan_dev_priv(dev)->vlan_pcpu_stats) + vlan_dev_info(dev)->vlan_pcpu_stats = alloc_percpu(struct vlan_pcpu_stats); + if (!vlan_dev_info(dev)->vlan_pcpu_stats) return -ENOMEM; return 0; @@ -581,7 +578,7 @@ static int vlan_dev_init(struct net_device *dev) static void vlan_dev_uninit(struct net_device *dev) { struct vlan_priority_tci_mapping *pm; - struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + struct vlan_dev_info *vlan = vlan_dev_info(dev); int i; free_percpu(vlan->vlan_pcpu_stats); @@ -594,17 +591,18 @@ static void vlan_dev_uninit(struct net_device *dev) } } -static netdev_features_t vlan_dev_fix_features(struct net_device *dev, - netdev_features_t features) +static u32 vlan_dev_fix_features(struct net_device *dev, u32 features) { - struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; + struct net_device *real_dev = vlan_dev_info(dev)->real_dev; u32 old_features = features; - features &= real_dev->vlan_features; - features |= NETIF_F_RXCSUM; features &= real_dev->features; + features &= real_dev->vlan_features; features |= old_features & NETIF_F_SOFT_FEATURES; + + if (dev_ethtool_get_rx_csum(real_dev)) + features |= NETIF_F_RXCSUM; features |= NETIF_F_LLTX; return features; @@ -613,7 +611,7 @@ static netdev_features_t vlan_dev_fix_features(struct net_device *dev, static int vlan_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { - const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + const struct vlan_dev_info *vlan = vlan_dev_info(dev); return __ethtool_get_settings(vlan->real_dev, cmd); } @@ -629,7 +627,7 @@ static void vlan_ethtool_get_drvinfo(struct net_device *dev, static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { - if (vlan_dev_priv(dev)->vlan_pcpu_stats) { + if (vlan_dev_info(dev)->vlan_pcpu_stats) { struct vlan_pcpu_stats *p; u32 rx_errors = 0, tx_dropped = 0; int i; @@ -638,7 +636,7 @@ static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, st u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes; unsigned int start; - p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i); + p = per_cpu_ptr(vlan_dev_info(dev)->vlan_pcpu_stats, i); do { start = u64_stats_fetch_begin_bh(&p->syncp); rxpackets = p->rx_packets; @@ -663,57 +661,6 @@ static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, st return stats; } -#ifdef CONFIG_NET_POLL_CONTROLLER -static void vlan_dev_poll_controller(struct net_device *dev) -{ - return; -} - -static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo) -{ - struct vlan_dev_priv *info = vlan_dev_priv(dev); - struct net_device *real_dev = info->real_dev; - struct netpoll *netpoll; - int err = 0; - - netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL); - err = -ENOMEM; - if (!netpoll) - goto out; - - netpoll->dev = real_dev; - strlcpy(netpoll->dev_name, real_dev->name, IFNAMSIZ); - - err = __netpoll_setup(netpoll); - if (err) { - kfree(netpoll); - goto out; - } - - info->netpoll = netpoll; - -out: - return err; -} - -static void vlan_dev_netpoll_cleanup(struct net_device *dev) -{ - struct vlan_dev_priv *info = vlan_dev_priv(dev); - struct netpoll *netpoll = info->netpoll; - - if (!netpoll) - return; - - info->netpoll = NULL; - - /* Wait for transmitting packets to finish before freeing. */ - synchronize_rcu_bh(); - - __netpoll_cleanup(netpoll); - kfree(netpoll); -} -#endif /* CONFIG_NET_POLL_CONTROLLER */ - static const struct ethtool_ops vlan_ethtool_ops = { .get_settings = vlan_ethtool_get_settings, .get_drvinfo = vlan_ethtool_get_drvinfo, @@ -741,11 +688,6 @@ static const struct net_device_ops vlan_netdev_ops = { .ndo_fcoe_disable = vlan_dev_fcoe_disable, .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn, .ndo_fcoe_ddp_target = vlan_dev_fcoe_ddp_target, -#endif -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = vlan_dev_poll_controller, - .ndo_netpoll_setup = vlan_dev_netpoll_setup, - .ndo_netpoll_cleanup = vlan_dev_netpoll_cleanup, #endif .ndo_fix_features = vlan_dev_fix_features, }; diff --git a/trunk/net/8021q/vlan_gvrp.c b/trunk/net/8021q/vlan_gvrp.c index 6f9755352760..061ceceeef12 100644 --- a/trunk/net/8021q/vlan_gvrp.c +++ b/trunk/net/8021q/vlan_gvrp.c @@ -29,7 +29,7 @@ static struct garp_application vlan_gvrp_app __read_mostly = { int vlan_gvrp_request_join(const struct net_device *dev) { - const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + const struct vlan_dev_info *vlan = vlan_dev_info(dev); __be16 vlan_id = htons(vlan->vlan_id); return garp_request_join(vlan->real_dev, &vlan_gvrp_app, @@ -38,7 +38,7 @@ int vlan_gvrp_request_join(const struct net_device *dev) void vlan_gvrp_request_leave(const struct net_device *dev) { - const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + const struct vlan_dev_info *vlan = vlan_dev_info(dev); __be16 vlan_id = htons(vlan->vlan_id); garp_request_leave(vlan->real_dev, &vlan_gvrp_app, diff --git a/trunk/net/8021q/vlan_netlink.c b/trunk/net/8021q/vlan_netlink.c index 50711368ad6a..235c2197dbb6 100644 --- a/trunk/net/8021q/vlan_netlink.c +++ b/trunk/net/8021q/vlan_netlink.c @@ -105,7 +105,7 @@ static int vlan_changelink(struct net_device *dev, static int vlan_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { - struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + struct vlan_dev_info *vlan = vlan_dev_info(dev); struct net_device *real_dev; int err; @@ -149,7 +149,7 @@ static inline size_t vlan_qos_map_size(unsigned int n) static size_t vlan_get_size(const struct net_device *dev) { - struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + struct vlan_dev_info *vlan = vlan_dev_info(dev); return nla_total_size(2) + /* IFLA_VLAN_ID */ sizeof(struct ifla_vlan_flags) + /* IFLA_VLAN_FLAGS */ @@ -159,14 +159,14 @@ static size_t vlan_get_size(const struct net_device *dev) static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev) { - struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + struct vlan_dev_info *vlan = vlan_dev_info(dev); struct vlan_priority_tci_mapping *pm; struct ifla_vlan_flags f; struct ifla_vlan_qos_mapping m; struct nlattr *nest; unsigned int i; - NLA_PUT_U16(skb, IFLA_VLAN_ID, vlan_dev_priv(dev)->vlan_id); + NLA_PUT_U16(skb, IFLA_VLAN_ID, vlan_dev_info(dev)->vlan_id); if (vlan->flags) { f.flags = vlan->flags; f.mask = ~0; @@ -218,7 +218,7 @@ struct rtnl_link_ops vlan_link_ops __read_mostly = { .kind = "vlan", .maxtype = IFLA_VLAN_MAX, .policy = vlan_policy, - .priv_size = sizeof(struct vlan_dev_priv), + .priv_size = sizeof(struct vlan_dev_info), .setup = vlan_setup, .validate = vlan_validate, .newlink = vlan_newlink, diff --git a/trunk/net/8021q/vlanproc.c b/trunk/net/8021q/vlanproc.c index c718fd3664b6..d34b6daf8930 100644 --- a/trunk/net/8021q/vlanproc.c +++ b/trunk/net/8021q/vlanproc.c @@ -168,13 +168,13 @@ int __net_init vlan_proc_init(struct net *net) int vlan_proc_add_dev(struct net_device *vlandev) { - struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev); + struct vlan_dev_info *dev_info = vlan_dev_info(vlandev); struct vlan_net *vn = net_generic(dev_net(vlandev), vlan_net_id); - vlan->dent = + dev_info->dent = proc_create_data(vlandev->name, S_IFREG|S_IRUSR|S_IWUSR, vn->proc_vlan_dir, &vlandev_fops, vlandev); - if (!vlan->dent) + if (!dev_info->dent) return -ENOBUFS; return 0; } @@ -187,10 +187,10 @@ int vlan_proc_rem_dev(struct net_device *vlandev) struct vlan_net *vn = net_generic(dev_net(vlandev), vlan_net_id); /** NOTE: This will consume the memory pointed to by dent, it seems. */ - if (vlan_dev_priv(vlandev)->dent) { - remove_proc_entry(vlan_dev_priv(vlandev)->dent->name, + if (vlan_dev_info(vlandev)->dent) { + remove_proc_entry(vlan_dev_info(vlandev)->dent->name, vn->proc_vlan_dir); - vlan_dev_priv(vlandev)->dent = NULL; + vlan_dev_info(vlandev)->dent = NULL; } return 0; } @@ -268,10 +268,10 @@ static int vlan_seq_show(struct seq_file *seq, void *v) nmtype ? nmtype : "UNKNOWN"); } else { const struct net_device *vlandev = v; - const struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev); + const struct vlan_dev_info *dev_info = vlan_dev_info(vlandev); seq_printf(seq, "%-15s| %d | %s\n", vlandev->name, - vlan->vlan_id, vlan->real_dev->name); + dev_info->vlan_id, dev_info->real_dev->name); } return 0; } @@ -279,7 +279,7 @@ static int vlan_seq_show(struct seq_file *seq, void *v) static int vlandev_seq_show(struct seq_file *seq, void *offset) { struct net_device *vlandev = (struct net_device *) seq->private; - const struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev); + const struct vlan_dev_info *dev_info = vlan_dev_info(vlandev); struct rtnl_link_stats64 temp; const struct rtnl_link_stats64 *stats; static const char fmt64[] = "%30s %12llu\n"; @@ -291,8 +291,8 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset) stats = dev_get_stats(vlandev, &temp); seq_printf(seq, "%s VID: %d REORDER_HDR: %i dev->priv_flags: %hx\n", - vlandev->name, vlan->vlan_id, - (int)(vlan->flags & 1), vlandev->priv_flags); + vlandev->name, dev_info->vlan_id, + (int)(dev_info->flags & 1), vlandev->priv_flags); seq_printf(seq, fmt64, "total frames received", stats->rx_packets); seq_printf(seq, fmt64, "total bytes received", stats->rx_bytes); @@ -300,23 +300,23 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset) seq_puts(seq, "\n"); seq_printf(seq, fmt64, "total frames transmitted", stats->tx_packets); seq_printf(seq, fmt64, "total bytes transmitted", stats->tx_bytes); - seq_printf(seq, "Device: %s", vlan->real_dev->name); + seq_printf(seq, "Device: %s", dev_info->real_dev->name); /* now show all PRIORITY mappings relating to this VLAN */ seq_printf(seq, "\nINGRESS priority mappings: " "0:%u 1:%u 2:%u 3:%u 4:%u 5:%u 6:%u 7:%u\n", - vlan->ingress_priority_map[0], - vlan->ingress_priority_map[1], - vlan->ingress_priority_map[2], - vlan->ingress_priority_map[3], - vlan->ingress_priority_map[4], - vlan->ingress_priority_map[5], - vlan->ingress_priority_map[6], - vlan->ingress_priority_map[7]); + dev_info->ingress_priority_map[0], + dev_info->ingress_priority_map[1], + dev_info->ingress_priority_map[2], + dev_info->ingress_priority_map[3], + dev_info->ingress_priority_map[4], + dev_info->ingress_priority_map[5], + dev_info->ingress_priority_map[6], + dev_info->ingress_priority_map[7]); seq_printf(seq, " EGRESS priority mappings: "); for (i = 0; i < 16; i++) { const struct vlan_priority_tci_mapping *mp - = vlan->egress_priority_map[i]; + = dev_info->egress_priority_map[i]; while (mp) { seq_printf(seq, "%u:%hu ", mp->priority, ((mp->vlan_qos >> 13) & 0x7)); diff --git a/trunk/net/Kconfig b/trunk/net/Kconfig index e07272d0bb2d..a07314844238 100644 --- a/trunk/net/Kconfig +++ b/trunk/net/Kconfig @@ -215,7 +215,6 @@ source "net/sched/Kconfig" source "net/dcb/Kconfig" source "net/dns_resolver/Kconfig" source "net/batman-adv/Kconfig" -source "net/openvswitch/Kconfig" config RPS boolean @@ -233,19 +232,6 @@ config XPS depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS default y -config NETPRIO_CGROUP - tristate "Network priority cgroup" - depends on CGROUPS - ---help--- - Cgroup subsystem for use in assigning processes to network priorities on - a per-interface basis - -config BQL - boolean - depends on SYSFS - select DQL - default y - config HAVE_BPF_JIT bool diff --git a/trunk/net/Makefile b/trunk/net/Makefile index ad432fa4d934..acdde4950de4 100644 --- a/trunk/net/Makefile +++ b/trunk/net/Makefile @@ -69,4 +69,3 @@ obj-$(CONFIG_DNS_RESOLVER) += dns_resolver/ obj-$(CONFIG_CEPH_LIB) += ceph/ obj-$(CONFIG_BATMAN_ADV) += batman-adv/ obj-$(CONFIG_NFC) += nfc/ -obj-$(CONFIG_OPENVSWITCH) += openvswitch/ diff --git a/trunk/net/atm/atm_misc.c b/trunk/net/atm/atm_misc.c index 876fbe83e2e4..f41f02656ff4 100644 --- a/trunk/net/atm/atm_misc.c +++ b/trunk/net/atm/atm_misc.c @@ -26,7 +26,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size, gfp_t gfp_flags) { struct sock *sk = sk_atm(vcc); - int guess = SKB_TRUESIZE(pdu_size); + int guess = atm_guess_pdu2truesize(pdu_size); atm_force_charge(vcc, guess); if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) { diff --git a/trunk/net/atm/br2684.c b/trunk/net/atm/br2684.c index 353fccf1cde3..d07223c834af 100644 --- a/trunk/net/atm/br2684.c +++ b/trunk/net/atm/br2684.c @@ -53,7 +53,6 @@ static const unsigned char ethertype_ipv4[] = { ETHERTYPE_IPV4 }; static const unsigned char ethertype_ipv6[] = { ETHERTYPE_IPV6 }; static const unsigned char llc_oui_pid_pad[] = { LLC, SNAP_BRIDGED, PID_ETHERNET, PAD_BRIDGED }; -static const unsigned char pad[] = { PAD_BRIDGED }; static const unsigned char llc_oui_ipv4[] = { LLC, SNAP_ROUTED, ETHERTYPE_IPV4 }; static const unsigned char llc_oui_ipv6[] = { LLC, SNAP_ROUTED, ETHERTYPE_IPV6 }; @@ -203,10 +202,7 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct net_device *dev, { struct br2684_dev *brdev = BRPRIV(dev); struct atm_vcc *atmvcc; - int minheadroom = (brvcc->encaps == e_llc) ? - ((brdev->payload == p_bridged) ? - sizeof(llc_oui_pid_pad) : sizeof(llc_oui_ipv4)) : - ((brdev->payload == p_bridged) ? BR2684_PAD_LEN : 0); + int minheadroom = (brvcc->encaps == e_llc) ? 10 : 2; if (skb_headroom(skb) < minheadroom) { struct sk_buff *skb2 = skb_realloc_headroom(skb, minheadroom); @@ -454,7 +450,7 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb) skb->pkt_type = PACKET_HOST; } else { /* p_bridged */ /* first 2 chars should be 0 */ - if (memcmp(skb->data, pad, BR2684_PAD_LEN) != 0) + if (*((u16 *) (skb->data)) != 0) goto error; skb_pull(skb, BR2684_PAD_LEN); skb->protocol = eth_type_trans(skb, net_dev); @@ -493,11 +489,15 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb) */ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg) { + struct sk_buff_head queue; + int err; struct br2684_vcc *brvcc; + struct sk_buff *skb, *tmp; + struct sk_buff_head *rq; struct br2684_dev *brdev; struct net_device *net_dev; struct atm_backend_br2684 be; - int err; + unsigned long flags; if (copy_from_user(&be, arg, sizeof be)) return -EFAULT; @@ -550,6 +550,23 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg) atmvcc->push = br2684_push; atmvcc->pop = br2684_pop; + __skb_queue_head_init(&queue); + rq = &sk_atm(atmvcc)->sk_receive_queue; + + spin_lock_irqsave(&rq->lock, flags); + skb_queue_splice_init(rq, &queue); + spin_unlock_irqrestore(&rq->lock, flags); + + skb_queue_walk_safe(&queue, skb, tmp) { + struct net_device *dev; + + br2684_push(atmvcc, skb); + dev = skb->dev; + + dev->stats.rx_bytes -= skb->len; + dev->stats.rx_packets--; + } + /* initialize netdev carrier state */ if (atmvcc->dev->signal == ATM_PHY_SIG_LOST) netif_carrier_off(net_dev); @@ -557,10 +574,6 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg) netif_carrier_on(net_dev); __module_get(THIS_MODULE); - - /* re-process everything received between connection setup and - backend setup */ - vcc_process_recv_queue(atmvcc); return 0; error: @@ -587,7 +600,6 @@ static void br2684_setup(struct net_device *netdev) struct br2684_dev *brdev = BRPRIV(netdev); ether_setup(netdev); - netdev->hard_header_len += sizeof(llc_oui_pid_pad); /* worst case */ brdev->net_dev = netdev; netdev->netdev_ops = &br2684_netdev_ops; @@ -600,7 +612,7 @@ static void br2684_setup_routed(struct net_device *netdev) struct br2684_dev *brdev = BRPRIV(netdev); brdev->net_dev = netdev; - netdev->hard_header_len = sizeof(llc_oui_ipv4); /* worst case */ + netdev->hard_header_len = 0; netdev->netdev_ops = &br2684_netdev_ops_routed; netdev->addr_len = 0; netdev->mtu = 1500; diff --git a/trunk/net/atm/clip.c b/trunk/net/atm/clip.c index c12c2582457c..852394072fa1 100644 --- a/trunk/net/atm/clip.c +++ b/trunk/net/atm/clip.c @@ -33,7 +33,6 @@ #include #include /* for struct rtable and routing */ #include /* icmp_send */ -#include #include /* for HZ */ #include #include /* for htons etc. */ @@ -120,7 +119,7 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc) /* The neighbour entry n->lock is held. */ static int neigh_check_cb(struct neighbour *n) { - struct atmarp_entry *entry = neighbour_priv(n); + struct atmarp_entry *entry = NEIGH2ENTRY(n); struct clip_vcc *cv; for (cv = entry->vccs; cv; cv = cv->next) { @@ -190,13 +189,6 @@ static void clip_push(struct atm_vcc *vcc, struct sk_buff *skb) struct clip_vcc *clip_vcc = CLIP_VCC(vcc); pr_debug("\n"); - - if (!clip_devs) { - atm_return(vcc, skb->truesize); - kfree_skb(skb); - return; - } - if (!skb) { pr_debug("removing VCC %p\n", clip_vcc); if (clip_vcc->entry) @@ -263,10 +255,8 @@ static void clip_pop(struct atm_vcc *vcc, struct sk_buff *skb) static void clip_neigh_solicit(struct neighbour *neigh, struct sk_buff *skb) { - __be32 *ip = (__be32 *) neigh->primary_key; - pr_debug("(neigh %p, skb %p)\n", neigh, skb); - to_atmarpd(act_need, PRIV(neigh->dev)->number, *ip); + to_atmarpd(act_need, PRIV(neigh->dev)->number, NEIGH2ENTRY(neigh)->ip); } static void clip_neigh_error(struct neighbour *neigh, struct sk_buff *skb) @@ -287,24 +277,72 @@ static const struct neigh_ops clip_neigh_ops = { static int clip_constructor(struct neighbour *neigh) { - struct atmarp_entry *entry = neighbour_priv(neigh); + struct atmarp_entry *entry = NEIGH2ENTRY(neigh); + struct net_device *dev = neigh->dev; + struct in_device *in_dev; + struct neigh_parms *parms; - if (neigh->tbl->family != AF_INET) + pr_debug("(neigh %p, entry %p)\n", neigh, entry); + neigh->type = inet_addr_type(&init_net, entry->ip); + if (neigh->type != RTN_UNICAST) return -EINVAL; - if (neigh->type != RTN_UNICAST) + rcu_read_lock(); + in_dev = __in_dev_get_rcu(dev); + if (!in_dev) { + rcu_read_unlock(); return -EINVAL; + } + + parms = in_dev->arp_parms; + __neigh_parms_put(neigh->parms); + neigh->parms = neigh_parms_clone(parms); + rcu_read_unlock(); - neigh->nud_state = NUD_NONE; neigh->ops = &clip_neigh_ops; - neigh->output = neigh->ops->output; + neigh->output = neigh->nud_state & NUD_VALID ? + neigh->ops->connected_output : neigh->ops->output; entry->neigh = neigh; entry->vccs = NULL; entry->expires = jiffies - 1; - return 0; } +static u32 clip_hash(const void *pkey, const struct net_device *dev, __u32 rnd) +{ + return jhash_2words(*(u32 *) pkey, dev->ifindex, rnd); +} + +static struct neigh_table clip_tbl = { + .family = AF_INET, + .entry_size = sizeof(struct neighbour)+sizeof(struct atmarp_entry), + .key_len = 4, + .hash = clip_hash, + .constructor = clip_constructor, + .id = "clip_arp_cache", + + /* parameters are copied from ARP ... */ + .parms = { + .tbl = &clip_tbl, + .base_reachable_time = 30 * HZ, + .retrans_time = 1 * HZ, + .gc_staletime = 60 * HZ, + .reachable_time = 30 * HZ, + .delay_probe_time = 5 * HZ, + .queue_len = 3, + .ucast_probes = 3, + .mcast_probes = 3, + .anycast_delay = 1 * HZ, + .proxy_delay = (8 * HZ) / 10, + .proxy_qlen = 64, + .locktime = 1 * HZ, + }, + .gc_interval = 30 * HZ, + .gc_thresh1 = 128, + .gc_thresh2 = 512, + .gc_thresh3 = 1024, +}; + /* @@@ copy bh locking from arp.c -- need to bh-enable atm code before */ /* @@ -338,19 +376,28 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb, dev->stats.tx_dropped++; return NETDEV_TX_OK; } - n = dst_get_neighbour_noref(dst); + n = dst_get_neighbour(dst); if (!n) { +#if 0 + n = clip_find_neighbour(skb_dst(skb), 1); + if (!n) { + dev_kfree_skb(skb); /* lost that one */ + dev->stats.tx_dropped++; + return 0; + } + dst_set_neighbour(dst, n); +#endif pr_err("NO NEIGHBOUR !\n"); dev_kfree_skb(skb); dev->stats.tx_dropped++; return NETDEV_TX_OK; } - entry = neighbour_priv(n); + entry = NEIGH2ENTRY(n); if (!entry->vccs) { if (time_after(jiffies, entry->expires)) { /* should be resolved */ entry->expires = jiffies + ATMARP_RETRY_DELAY * HZ; - to_atmarpd(act_need, PRIV(dev)->number, *((__be32 *)n->primary_key)); + to_atmarpd(act_need, PRIV(dev)->number, entry->ip); } if (entry->neigh->arp_queue.qlen < ATMARP_MAX_UNRES_PACKETS) skb_queue_tail(&entry->neigh->arp_queue, skb); @@ -401,7 +448,10 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb, static int clip_mkip(struct atm_vcc *vcc, int timeout) { + struct sk_buff_head *rq, queue; struct clip_vcc *clip_vcc; + struct sk_buff *skb, *tmp; + unsigned long flags; if (!vcc->push) return -EBADFD; @@ -422,9 +472,29 @@ static int clip_mkip(struct atm_vcc *vcc, int timeout) vcc->push = clip_push; vcc->pop = clip_pop; - /* re-process everything received between connection setup and MKIP */ - vcc_process_recv_queue(vcc); + __skb_queue_head_init(&queue); + rq = &sk_atm(vcc)->sk_receive_queue; + spin_lock_irqsave(&rq->lock, flags); + skb_queue_splice_init(rq, &queue); + spin_unlock_irqrestore(&rq->lock, flags); + + /* re-process everything received between connection setup and MKIP */ + skb_queue_walk_safe(&queue, skb, tmp) { + if (!clip_devs) { + atm_return(vcc, skb->truesize); + kfree_skb(skb); + } else { + struct net_device *dev = skb->dev; + unsigned int len = skb->len; + + skb_get(skb); + clip_push(vcc, skb); + dev->stats.rx_packets--; + dev->stats.rx_bytes -= len; + kfree_skb(skb); + } + } return 0; } @@ -453,11 +523,11 @@ static int clip_setentry(struct atm_vcc *vcc, __be32 ip) rt = ip_route_output(&init_net, ip, 0, 1, 0); if (IS_ERR(rt)) return PTR_ERR(rt); - neigh = __neigh_lookup(&arp_tbl, &ip, rt->dst.dev, 1); + neigh = __neigh_lookup(&clip_tbl, &ip, rt->dst.dev, 1); ip_rt_put(rt); if (!neigh) return -ENOMEM; - entry = neighbour_priv(neigh); + entry = NEIGH2ENTRY(neigh); if (entry != clip_vcc->entry) { if (!clip_vcc->entry) pr_debug("add\n"); @@ -474,15 +544,13 @@ static int clip_setentry(struct atm_vcc *vcc, __be32 ip) } static const struct net_device_ops clip_netdev_ops = { - .ndo_start_xmit = clip_start_xmit, - .ndo_neigh_construct = clip_constructor, + .ndo_start_xmit = clip_start_xmit, }; static void clip_setup(struct net_device *dev) { dev->netdev_ops = &clip_netdev_ops; dev->type = ARPHRD_ATM; - dev->neigh_priv_len = sizeof(struct atmarp_entry); dev->hard_header_len = RFC1483LLC_LEN; dev->mtu = RFC1626_MTU; dev->tx_queue_len = 100; /* "normal" queue (packets) */ @@ -536,8 +604,10 @@ static int clip_device_event(struct notifier_block *this, unsigned long event, if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; - if (event == NETDEV_UNREGISTER) + if (event == NETDEV_UNREGISTER) { + neigh_ifdown(&clip_tbl, dev); return NOTIFY_DONE; + } /* ignore non-CLIP devices */ if (dev->type != ARPHRD_ATM || dev->netdev_ops != &clip_netdev_ops) @@ -717,10 +787,9 @@ static void svc_addr(struct seq_file *seq, struct sockaddr_atmsvc *addr) /* This means the neighbour entry has no attached VCC objects. */ #define SEQ_NO_VCC_TOKEN ((void *) 2) -static void atmarp_info(struct seq_file *seq, struct neighbour *n, +static void atmarp_info(struct seq_file *seq, struct net_device *dev, struct atmarp_entry *entry, struct clip_vcc *clip_vcc) { - struct net_device *dev = n->dev; unsigned long exp; char buf[17]; int svc, llc, off; @@ -740,7 +809,8 @@ static void atmarp_info(struct seq_file *seq, struct neighbour *n, seq_printf(seq, "%-6s%-4s%-4s%5ld ", dev->name, svc ? "SVC" : "PVC", llc ? "LLC" : "NULL", exp); - off = scnprintf(buf, sizeof(buf) - 1, "%pI4", n->primary_key); + off = scnprintf(buf, sizeof(buf) - 1, "%pI4", + &entry->ip); while (off < 16) buf[off++] = ' '; buf[off] = '\0'; @@ -811,17 +881,14 @@ static void *clip_seq_sub_iter(struct neigh_seq_state *_state, { struct clip_seq_state *state = (struct clip_seq_state *)_state; - if (n->dev->type != ARPHRD_ATM) - return NULL; - - return clip_seq_vcc_walk(state, neighbour_priv(n), pos); + return clip_seq_vcc_walk(state, NEIGH2ENTRY(n), pos); } static void *clip_seq_start(struct seq_file *seq, loff_t * pos) { struct clip_seq_state *state = seq->private; state->ns.neigh_sub_iter = clip_seq_sub_iter; - return neigh_seq_start(seq, pos, &arp_tbl, NEIGH_SEQ_NEIGH_ONLY); + return neigh_seq_start(seq, pos, &clip_tbl, NEIGH_SEQ_NEIGH_ONLY); } static int clip_seq_show(struct seq_file *seq, void *v) @@ -833,10 +900,10 @@ static int clip_seq_show(struct seq_file *seq, void *v) seq_puts(seq, atm_arp_banner); } else { struct clip_seq_state *state = seq->private; - struct clip_vcc *vcc = state->vcc; struct neighbour *n = v; + struct clip_vcc *vcc = state->vcc; - atmarp_info(seq, n, neighbour_priv(n), vcc); + atmarp_info(seq, n->dev, NEIGH2ENTRY(n), vcc); } return 0; } @@ -867,6 +934,9 @@ static void atm_clip_exit_noproc(void); static int __init atm_clip_init(void) { + neigh_table_init_no_netlink(&clip_tbl); + + clip_tbl_hook = &clip_tbl; register_atm_ioctl(&clip_ioctl_ops); register_netdevice_notifier(&clip_dev_notifier); register_inetaddr_notifier(&clip_inet_notifier); @@ -903,6 +973,12 @@ static void atm_clip_exit_noproc(void) */ del_timer_sync(&idle_timer); + /* Next, purge the table, so that the device + * unregister loop below does not hang due to + * device references remaining in the table. + */ + neigh_ifdown(&clip_tbl, NULL); + dev = clip_devs; while (dev) { next = PRIV(dev)->next; @@ -910,6 +986,11 @@ static void atm_clip_exit_noproc(void) free_netdev(dev); dev = next; } + + /* Now it is safe to fully shutdown whole table. */ + neigh_table_clear(&clip_tbl); + + clip_tbl_hook = NULL; } static void __exit atm_clip_exit(void) diff --git a/trunk/net/atm/common.c b/trunk/net/atm/common.c index b4b44dbed645..14ff9fe39989 100644 --- a/trunk/net/atm/common.c +++ b/trunk/net/atm/common.c @@ -214,26 +214,6 @@ void vcc_release_async(struct atm_vcc *vcc, int reply) } EXPORT_SYMBOL(vcc_release_async); -void vcc_process_recv_queue(struct atm_vcc *vcc) -{ - struct sk_buff_head queue, *rq; - struct sk_buff *skb, *tmp; - unsigned long flags; - - __skb_queue_head_init(&queue); - rq = &sk_atm(vcc)->sk_receive_queue; - - spin_lock_irqsave(&rq->lock, flags); - skb_queue_splice_init(rq, &queue); - spin_unlock_irqrestore(&rq->lock, flags); - - skb_queue_walk_safe(&queue, skb, tmp) { - __skb_unlink(skb, &queue); - vcc->push(vcc, skb); - } -} -EXPORT_SYMBOL(vcc_process_recv_queue); - void atm_dev_signal_change(struct atm_dev *dev, char signal) { pr_debug("%s signal=%d dev=%p number=%d dev->signal=%d\n", @@ -522,11 +502,8 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, if (sock->state != SS_CONNECTED) return -ENOTCONN; - - /* only handle MSG_DONTWAIT and MSG_PEEK */ - if (flags & ~(MSG_DONTWAIT | MSG_PEEK)) + if (flags & ~MSG_DONTWAIT) /* only handle MSG_DONTWAIT */ return -EOPNOTSUPP; - vcc = ATM_SD(sock); if (test_bit(ATM_VF_RELEASED, &vcc->flags) || test_bit(ATM_VF_CLOSE, &vcc->flags) || @@ -547,13 +524,8 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, if (error) return error; sock_recv_ts_and_drops(msg, sk, skb); - - if (!(flags & MSG_PEEK)) { - pr_debug("%d -= %d\n", atomic_read(&sk->sk_rmem_alloc), - skb->truesize); - atm_return(vcc, skb->truesize); - } - + pr_debug("%d -= %d\n", atomic_read(&sk->sk_rmem_alloc), skb->truesize); + atm_return(vcc, skb->truesize); skb_free_datagram(sk, skb); return copied; } diff --git a/trunk/net/atm/common.h b/trunk/net/atm/common.h index cc3c2dae4d79..f48a76b6cdf4 100644 --- a/trunk/net/atm/common.h +++ b/trunk/net/atm/common.h @@ -24,7 +24,6 @@ int vcc_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen); int vcc_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen); -void vcc_process_recv_queue(struct atm_vcc *vcc); int atmpvc_init(void); void atmpvc_exit(void); diff --git a/trunk/net/atm/pppoatm.c b/trunk/net/atm/pppoatm.c index df35d9a3b5fe..db4a11c61d15 100644 --- a/trunk/net/atm/pppoatm.c +++ b/trunk/net/atm/pppoatm.c @@ -303,10 +303,6 @@ static int pppoatm_assign_vcc(struct atm_vcc *atmvcc, void __user *arg) atmvcc->push = pppoatm_push; atmvcc->pop = pppoatm_pop; __module_get(THIS_MODULE); - - /* re-process everything received between connection setup and - backend setup */ - vcc_process_recv_queue(atmvcc); return 0; } diff --git a/trunk/net/ax25/af_ax25.c b/trunk/net/ax25/af_ax25.c index 3cd0a0dc91cb..e7c69f4619ec 100644 --- a/trunk/net/ax25/af_ax25.c +++ b/trunk/net/ax25/af_ax25.c @@ -402,14 +402,14 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg) break; case AX25_T1: - if (ax25_ctl.arg < 1 || ax25_ctl.arg > ULONG_MAX / HZ) + if (ax25_ctl.arg < 1) goto einval_put; ax25->rtt = (ax25_ctl.arg * HZ) / 2; ax25->t1 = ax25_ctl.arg * HZ; break; case AX25_T2: - if (ax25_ctl.arg < 1 || ax25_ctl.arg > ULONG_MAX / HZ) + if (ax25_ctl.arg < 1) goto einval_put; ax25->t2 = ax25_ctl.arg * HZ; break; @@ -422,15 +422,10 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg) break; case AX25_T3: - if (ax25_ctl.arg > ULONG_MAX / HZ) - goto einval_put; ax25->t3 = ax25_ctl.arg * HZ; break; case AX25_IDLE: - if (ax25_ctl.arg > ULONG_MAX / (60 * HZ)) - goto einval_put; - ax25->idle = ax25_ctl.arg * 60 * HZ; break; @@ -545,16 +540,15 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname, ax25_cb *ax25; struct net_device *dev; char devname[IFNAMSIZ]; - unsigned long opt; - int res = 0; + int opt, res = 0; if (level != SOL_AX25) return -ENOPROTOOPT; - if (optlen < sizeof(unsigned int)) + if (optlen < sizeof(int)) return -EINVAL; - if (get_user(opt, (unsigned int __user *)optval)) + if (get_user(opt, (int __user *)optval)) return -EFAULT; lock_sock(sk); @@ -577,7 +571,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname, break; case AX25_T1: - if (opt < 1 || opt > ULONG_MAX / HZ) { + if (opt < 1) { res = -EINVAL; break; } @@ -586,7 +580,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname, break; case AX25_T2: - if (opt < 1 || opt > ULONG_MAX / HZ) { + if (opt < 1) { res = -EINVAL; break; } @@ -602,7 +596,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname, break; case AX25_T3: - if (opt < 1 || opt > ULONG_MAX / HZ) { + if (opt < 1) { res = -EINVAL; break; } @@ -610,7 +604,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname, break; case AX25_IDLE: - if (opt > ULONG_MAX / (60 * HZ)) { + if (opt < 0) { res = -EINVAL; break; } @@ -618,7 +612,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname, break; case AX25_BACKOFF: - if (opt > 2) { + if (opt < 0 || opt > 2) { res = -EINVAL; break; } diff --git a/trunk/net/batman-adv/bat_sysfs.c b/trunk/net/batman-adv/bat_sysfs.c index c25492f7d665..b8a7414c3571 100644 --- a/trunk/net/batman-adv/bat_sysfs.c +++ b/trunk/net/batman-adv/bat_sysfs.c @@ -174,7 +174,7 @@ static int store_uint_attr(const char *buff, size_t count, unsigned long uint_val; int ret; - ret = kstrtoul(buff, 10, &uint_val); + ret = strict_strtoul(buff, 10, &uint_val); if (ret) { bat_info(net_dev, "%s: Invalid parameter received: %s\n", @@ -239,7 +239,7 @@ static ssize_t store_vis_mode(struct kobject *kobj, struct attribute *attr, unsigned long val; int ret, vis_mode_tmp = -1; - ret = kstrtoul(buff, 10, &val); + ret = strict_strtoul(buff, 10, &val); if (((count == 2) && (!ret) && (val == VIS_TYPE_CLIENT_UPDATE)) || (strncmp(buff, "client", 6) == 0) || diff --git a/trunk/net/batman-adv/bitarray.c b/trunk/net/batman-adv/bitarray.c index 9bc63b209b3f..0be9ff346fa0 100644 --- a/trunk/net/batman-adv/bitarray.c +++ b/trunk/net/batman-adv/bitarray.c @@ -155,7 +155,7 @@ int bit_get_packet(void *priv, unsigned long *seq_bits, /* sequence number is much newer, probably missed a lot of packets */ if ((seq_num_diff >= TQ_LOCAL_WINDOW_SIZE) - && (seq_num_diff < EXPECTED_SEQNO_RANGE)) { + || (seq_num_diff < EXPECTED_SEQNO_RANGE)) { bat_dbg(DBG_BATMAN, bat_priv, "We missed a lot of packets (%i) !\n", seq_num_diff - 1); diff --git a/trunk/net/batman-adv/gateway_client.c b/trunk/net/batman-adv/gateway_client.c index 24403a7350f7..619fb73b3b76 100644 --- a/trunk/net/batman-adv/gateway_client.c +++ b/trunk/net/batman-adv/gateway_client.c @@ -25,7 +25,6 @@ #include "gateway_common.h" #include "hard-interface.h" #include "originator.h" -#include "translation-table.h" #include "routing.h" #include #include @@ -573,142 +572,108 @@ static bool is_type_dhcprequest(struct sk_buff *skb, int header_len) return ret; } -bool gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len) +int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb, + struct orig_node *old_gw) { struct ethhdr *ethhdr; struct iphdr *iphdr; struct ipv6hdr *ipv6hdr; struct udphdr *udphdr; + struct gw_node *curr_gw; + struct neigh_node *neigh_curr = NULL, *neigh_old = NULL; + unsigned int header_len = 0; + int ret = 1; + + if (atomic_read(&bat_priv->gw_mode) == GW_MODE_OFF) + return 0; /* check for ethernet header */ - if (!pskb_may_pull(skb, *header_len + ETH_HLEN)) - return false; + if (!pskb_may_pull(skb, header_len + ETH_HLEN)) + return 0; ethhdr = (struct ethhdr *)skb->data; - *header_len += ETH_HLEN; + header_len += ETH_HLEN; /* check for initial vlan header */ if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) { - if (!pskb_may_pull(skb, *header_len + VLAN_HLEN)) - return false; + if (!pskb_may_pull(skb, header_len + VLAN_HLEN)) + return 0; ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN); - *header_len += VLAN_HLEN; + header_len += VLAN_HLEN; } /* check for ip header */ switch (ntohs(ethhdr->h_proto)) { case ETH_P_IP: - if (!pskb_may_pull(skb, *header_len + sizeof(*iphdr))) - return false; - iphdr = (struct iphdr *)(skb->data + *header_len); - *header_len += iphdr->ihl * 4; + if (!pskb_may_pull(skb, header_len + sizeof(*iphdr))) + return 0; + iphdr = (struct iphdr *)(skb->data + header_len); + header_len += iphdr->ihl * 4; /* check for udp header */ if (iphdr->protocol != IPPROTO_UDP) - return false; + return 0; break; case ETH_P_IPV6: - if (!pskb_may_pull(skb, *header_len + sizeof(*ipv6hdr))) - return false; - ipv6hdr = (struct ipv6hdr *)(skb->data + *header_len); - *header_len += sizeof(*ipv6hdr); + if (!pskb_may_pull(skb, header_len + sizeof(*ipv6hdr))) + return 0; + ipv6hdr = (struct ipv6hdr *)(skb->data + header_len); + header_len += sizeof(*ipv6hdr); /* check for udp header */ if (ipv6hdr->nexthdr != IPPROTO_UDP) - return false; + return 0; break; default: - return false; + return 0; } - if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr))) - return false; - udphdr = (struct udphdr *)(skb->data + *header_len); - *header_len += sizeof(*udphdr); + if (!pskb_may_pull(skb, header_len + sizeof(*udphdr))) + return 0; + udphdr = (struct udphdr *)(skb->data + header_len); + header_len += sizeof(*udphdr); /* check for bootp port */ if ((ntohs(ethhdr->h_proto) == ETH_P_IP) && (ntohs(udphdr->dest) != 67)) - return false; + return 0; if ((ntohs(ethhdr->h_proto) == ETH_P_IPV6) && (ntohs(udphdr->dest) != 547)) - return false; - - return true; -} - -bool gw_out_of_range(struct bat_priv *bat_priv, - struct sk_buff *skb, struct ethhdr *ethhdr) -{ - struct neigh_node *neigh_curr = NULL, *neigh_old = NULL; - struct orig_node *orig_dst_node = NULL; - struct gw_node *curr_gw = NULL; - bool ret, out_of_range = false; - unsigned int header_len = 0; - uint8_t curr_tq_avg; + return 0; - ret = gw_is_dhcp_target(skb, &header_len); - if (!ret) - goto out; - - orig_dst_node = transtable_search(bat_priv, ethhdr->h_source, - ethhdr->h_dest); - if (!orig_dst_node) - goto out; - - if (!orig_dst_node->gw_flags) - goto out; - - ret = is_type_dhcprequest(skb, header_len); - if (!ret) - goto out; - - switch (atomic_read(&bat_priv->gw_mode)) { - case GW_MODE_SERVER: - /* If we are a GW then we are our best GW. We can artificially - * set the tq towards ourself as the maximum value */ - curr_tq_avg = TQ_MAX_VALUE; - break; - case GW_MODE_CLIENT: - curr_gw = gw_get_selected_gw_node(bat_priv); - if (!curr_gw) - goto out; + if (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER) + return -1; - /* packet is going to our gateway */ - if (curr_gw->orig_node == orig_dst_node) - goto out; - - /* If the dhcp packet has been sent to a different gw, - * we have to evaluate whether the old gw is still - * reliable enough */ - neigh_curr = find_router(bat_priv, curr_gw->orig_node, NULL); - if (!neigh_curr) - goto out; - - curr_tq_avg = neigh_curr->tq_avg; - break; - case GW_MODE_OFF: - default: - goto out; + curr_gw = gw_get_selected_gw_node(bat_priv); + if (!curr_gw) + return 0; + + /* If old_gw != NULL then this packet is unicast. + * So, at this point we have to check the message type: if it is a + * DHCPREQUEST we have to decide whether to drop it or not */ + if (old_gw && curr_gw->orig_node != old_gw) { + if (is_type_dhcprequest(skb, header_len)) { + /* If the dhcp packet has been sent to a different gw, + * we have to evaluate whether the old gw is still + * reliable enough */ + neigh_curr = find_router(bat_priv, curr_gw->orig_node, + NULL); + neigh_old = find_router(bat_priv, old_gw, NULL); + if (!neigh_curr || !neigh_old) + goto free_neigh; + if (neigh_curr->tq_avg - neigh_old->tq_avg < + GW_THRESHOLD) + ret = -1; + } } - - neigh_old = find_router(bat_priv, orig_dst_node, NULL); - if (!neigh_old) - goto out; - - if (curr_tq_avg - neigh_old->tq_avg > GW_THRESHOLD) - out_of_range = true; - -out: - if (orig_dst_node) - orig_node_free_ref(orig_dst_node); - if (curr_gw) - gw_node_free_ref(curr_gw); +free_neigh: if (neigh_old) neigh_node_free_ref(neigh_old); if (neigh_curr) neigh_node_free_ref(neigh_curr); - return out_of_range; + if (curr_gw) + gw_node_free_ref(curr_gw); + return ret; } diff --git a/trunk/net/batman-adv/gateway_client.h b/trunk/net/batman-adv/gateway_client.h index e1edba08eb1d..b9b983c07feb 100644 --- a/trunk/net/batman-adv/gateway_client.h +++ b/trunk/net/batman-adv/gateway_client.h @@ -31,8 +31,7 @@ void gw_node_update(struct bat_priv *bat_priv, void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node); void gw_node_purge(struct bat_priv *bat_priv); int gw_client_seq_print_text(struct seq_file *seq, void *offset); -bool gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len); -bool gw_out_of_range(struct bat_priv *bat_priv, - struct sk_buff *skb, struct ethhdr *ethhdr); +int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb, + struct orig_node *old_gw); #endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */ diff --git a/trunk/net/batman-adv/gateway_common.c b/trunk/net/batman-adv/gateway_common.c index c4ac7b0a2a63..18661af0bc3b 100644 --- a/trunk/net/batman-adv/gateway_common.c +++ b/trunk/net/batman-adv/gateway_common.c @@ -97,7 +97,7 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff, *tmp_ptr = '\0'; } - ret = kstrtol(buff, 10, &ldown); + ret = strict_strtol(buff, 10, &ldown); if (ret) { bat_err(net_dev, "Download speed of gateway mode invalid: %s\n", @@ -122,7 +122,7 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff, *tmp_ptr = '\0'; } - ret = kstrtol(slash_ptr + 1, 10, &lup); + ret = strict_strtol(slash_ptr + 1, 10, &lup); if (ret) { bat_err(net_dev, "Upload speed of gateway mode invalid: " diff --git a/trunk/net/batman-adv/hash.c b/trunk/net/batman-adv/hash.c index d1da29da333b..2a172505f513 100644 --- a/trunk/net/batman-adv/hash.c +++ b/trunk/net/batman-adv/hash.c @@ -25,7 +25,7 @@ /* clears the hash */ static void hash_init(struct hashtable_t *hash) { - uint32_t i; + int i; for (i = 0 ; i < hash->size; i++) { INIT_HLIST_HEAD(&hash->table[i]); @@ -42,7 +42,7 @@ void hash_destroy(struct hashtable_t *hash) } /* allocates and clears the hash */ -struct hashtable_t *hash_new(uint32_t size) +struct hashtable_t *hash_new(int size) { struct hashtable_t *hash; diff --git a/trunk/net/batman-adv/hash.h b/trunk/net/batman-adv/hash.h index 4768717f07f9..d20aa71ba1e8 100644 --- a/trunk/net/batman-adv/hash.h +++ b/trunk/net/batman-adv/hash.h @@ -33,17 +33,17 @@ typedef int (*hashdata_compare_cb)(const struct hlist_node *, const void *); /* the hashfunction, should return an index * based on the key in the data of the first * argument and the size the second */ -typedef uint32_t (*hashdata_choose_cb)(const void *, uint32_t); +typedef int (*hashdata_choose_cb)(const void *, int); typedef void (*hashdata_free_cb)(struct hlist_node *, void *); struct hashtable_t { struct hlist_head *table; /* the hashtable itself with the buckets */ spinlock_t *list_locks; /* spinlock for each hash list entry */ - uint32_t size; /* size of hashtable */ + int size; /* size of hashtable */ }; /* allocates and clears the hash */ -struct hashtable_t *hash_new(uint32_t size); +struct hashtable_t *hash_new(int size); /* free only the hashtable and the hash itself. */ void hash_destroy(struct hashtable_t *hash); @@ -57,7 +57,7 @@ static inline void hash_delete(struct hashtable_t *hash, struct hlist_head *head; struct hlist_node *node, *node_tmp; spinlock_t *list_lock; /* spinlock to protect write access */ - uint32_t i; + int i; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; @@ -93,8 +93,7 @@ static inline int hash_add(struct hashtable_t *hash, hashdata_choose_cb choose, const void *data, struct hlist_node *data_node) { - uint32_t index; - int ret = -1; + int index, ret = -1; struct hlist_head *head; struct hlist_node *node; spinlock_t *list_lock; /* spinlock to protect write access */ @@ -138,7 +137,7 @@ static inline void *hash_remove(struct hashtable_t *hash, hashdata_compare_cb compare, hashdata_choose_cb choose, void *data) { - uint32_t index; + size_t index; struct hlist_node *node; struct hlist_head *head; void *data_save = NULL; diff --git a/trunk/net/batman-adv/icmp_socket.c b/trunk/net/batman-adv/icmp_socket.c index d9c1e7bb7fbf..ac3520e057c0 100644 --- a/trunk/net/batman-adv/icmp_socket.c +++ b/trunk/net/batman-adv/icmp_socket.c @@ -136,9 +136,10 @@ static ssize_t bat_socket_read(struct file *file, char __user *buf, spin_unlock_bh(&socket_client->lock); - packet_len = min(count, socket_packet->icmp_len); - error = copy_to_user(buf, &socket_packet->icmp_packet, packet_len); + error = __copy_to_user(buf, &socket_packet->icmp_packet, + socket_packet->icmp_len); + packet_len = socket_packet->icmp_len; kfree(socket_packet); if (error) @@ -186,7 +187,12 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff, skb_reserve(skb, sizeof(struct ethhdr)); icmp_packet = (struct icmp_packet_rr *)skb_put(skb, packet_len); - if (copy_from_user(icmp_packet, buff, packet_len)) { + if (!access_ok(VERIFY_READ, buff, packet_len)) { + len = -EFAULT; + goto free_skb; + } + + if (__copy_from_user(icmp_packet, buff, packet_len)) { len = -EFAULT; goto free_skb; } @@ -211,7 +217,7 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff, if (icmp_packet->version != COMPAT_VERSION) { icmp_packet->msg_type = PARAMETER_PROBLEM; - icmp_packet->version = COMPAT_VERSION; + icmp_packet->ttl = COMPAT_VERSION; bat_socket_add_packet(socket_client, icmp_packet, packet_len); goto free_skb; } diff --git a/trunk/net/batman-adv/main.h b/trunk/net/batman-adv/main.h index 86354e06eb48..964ad4d8ba33 100644 --- a/trunk/net/batman-adv/main.h +++ b/trunk/net/batman-adv/main.h @@ -28,7 +28,7 @@ #define DRIVER_DEVICE "batman-adv" #ifndef SOURCE_VERSION -#define SOURCE_VERSION "2012.0.0" +#define SOURCE_VERSION "2011.4.0" #endif /* B.A.T.M.A.N. parameters */ diff --git a/trunk/net/batman-adv/originator.c b/trunk/net/batman-adv/originator.c index 0bc2045a2f2e..0e5b77255d99 100644 --- a/trunk/net/batman-adv/originator.c +++ b/trunk/net/batman-adv/originator.c @@ -164,7 +164,7 @@ void originator_free(struct bat_priv *bat_priv) struct hlist_head *head; spinlock_t *list_lock; /* spinlock to protect write access */ struct orig_node *orig_node; - uint32_t i; + int i; if (!hash) return; @@ -350,7 +350,7 @@ static void _purge_orig(struct bat_priv *bat_priv) struct hlist_head *head; spinlock_t *list_lock; /* spinlock to protect write access */ struct orig_node *orig_node; - uint32_t i; + int i; if (!hash) return; @@ -413,8 +413,7 @@ int orig_seq_print_text(struct seq_file *seq, void *offset) int batman_count = 0; int last_seen_secs; int last_seen_msecs; - uint32_t i; - int ret = 0; + int i, ret = 0; primary_if = primary_if_get_selected(bat_priv); @@ -520,8 +519,7 @@ int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num) struct hlist_node *node; struct hlist_head *head; struct orig_node *orig_node; - uint32_t i; - int ret; + int i, ret; /* resize all orig nodes because orig_node->bcast_own(_sum) depend on * if_num */ @@ -603,8 +601,7 @@ int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num) struct hlist_head *head; struct hard_iface *hard_iface_tmp; struct orig_node *orig_node; - uint32_t i; - int ret; + int i, ret; /* resize all orig nodes because orig_node->bcast_own(_sum) depend on * if_num */ diff --git a/trunk/net/batman-adv/originator.h b/trunk/net/batman-adv/originator.h index 67765ffef731..cfc1f60a96a1 100644 --- a/trunk/net/batman-adv/originator.h +++ b/trunk/net/batman-adv/originator.h @@ -42,7 +42,7 @@ int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num); /* hashfunction to choose an entry in a hash table of given size */ /* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */ -static inline uint32_t choose_orig(const void *data, uint32_t size) +static inline int choose_orig(const void *data, int32_t size) { const unsigned char *key = data; uint32_t hash = 0; diff --git a/trunk/net/batman-adv/routing.c b/trunk/net/batman-adv/routing.c index 773e606f9702..f961cc5eade5 100644 --- a/trunk/net/batman-adv/routing.c +++ b/trunk/net/batman-adv/routing.c @@ -39,7 +39,7 @@ void slide_own_bcast_window(struct hard_iface *hard_iface) struct hlist_head *head; struct orig_node *orig_node; unsigned long *word; - uint32_t i; + int i; size_t word_index; for (i = 0; i < hash->size; i++) { @@ -578,7 +578,6 @@ int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if) { struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); struct tt_query_packet *tt_query; - uint16_t tt_len; struct ethhdr *ethhdr; /* drop packet if it has not necessary minimum size */ @@ -617,21 +616,13 @@ int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if) } break; case TT_RESPONSE: - if (is_my_mac(tt_query->dst)) { - /* packet needs to be linearized to access the TT - * changes */ - if (skb_linearize(skb) < 0) - goto out; - - tt_len = tt_query->tt_data * sizeof(struct tt_change); - - /* Ensure we have all the claimed data */ - if (unlikely(skb_headlen(skb) < - sizeof(struct tt_query_packet) + tt_len)) - goto out; + /* packet needs to be linearized to access the TT changes */ + if (skb_linearize(skb) < 0) + goto out; + if (is_my_mac(tt_query->dst)) handle_tt_response(bat_priv, tt_query); - } else { + else { bat_dbg(DBG_TT, bat_priv, "Routing TT_RESPONSE to %pM [%c]\n", tt_query->dst, diff --git a/trunk/net/batman-adv/soft-interface.c b/trunk/net/batman-adv/soft-interface.c index 987c75a775f9..f9cc95728989 100644 --- a/trunk/net/batman-adv/soft-interface.c +++ b/trunk/net/batman-adv/soft-interface.c @@ -563,10 +563,10 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) struct bcast_packet *bcast_packet; struct vlan_ethhdr *vhdr; struct softif_neigh *curr_softif_neigh = NULL; - unsigned int header_len = 0; + struct orig_node *orig_node = NULL; int data_len = skb->len, ret; short vid = -1; - bool do_bcast = false; + bool do_bcast; if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) goto dropped; @@ -598,28 +598,17 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) /* Register the client MAC in the transtable */ tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif); - if (is_multicast_ether_addr(ethhdr->h_dest)) { - do_bcast = true; + orig_node = transtable_search(bat_priv, ethhdr->h_source, + ethhdr->h_dest); + do_bcast = is_multicast_ether_addr(ethhdr->h_dest); + if (do_bcast || (orig_node && orig_node->gw_flags)) { + ret = gw_is_target(bat_priv, skb, orig_node); - switch (atomic_read(&bat_priv->gw_mode)) { - case GW_MODE_SERVER: - /* gateway servers should not send dhcp - * requests into the mesh */ - ret = gw_is_dhcp_target(skb, &header_len); - if (ret) - goto dropped; - break; - case GW_MODE_CLIENT: - /* gateway clients should send dhcp requests - * via unicast to their gateway */ - ret = gw_is_dhcp_target(skb, &header_len); - if (ret) - do_bcast = false; - break; - case GW_MODE_OFF: - default: - break; - } + if (ret < 0) + goto dropped; + + if (ret) + do_bcast = false; } /* ethernet packet should be broadcasted */ @@ -655,12 +644,6 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) /* unicast packet */ } else { - if (atomic_read(&bat_priv->gw_mode) != GW_MODE_OFF) { - ret = gw_out_of_range(bat_priv, skb, ethhdr); - if (ret) - goto dropped; - } - ret = unicast_send_skb(skb, bat_priv); if (ret != 0) goto dropped_freed; @@ -679,6 +662,8 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) softif_neigh_free_ref(curr_softif_neigh); if (primary_if) hardif_free_ref(primary_if); + if (orig_node) + orig_node_free_ref(orig_node); return NETDEV_TX_OK; } @@ -874,7 +859,7 @@ struct net_device *softif_create(const char *name) unreg_sysfs: sysfs_del_meshif(soft_iface); unreg_soft_iface: - unregister_netdevice(soft_iface); + unregister_netdev(soft_iface); return NULL; free_soft_iface: diff --git a/trunk/net/batman-adv/translation-table.c b/trunk/net/batman-adv/translation-table.c index ab8dea8b0b2e..5f09a578d49d 100644 --- a/trunk/net/batman-adv/translation-table.c +++ b/trunk/net/batman-adv/translation-table.c @@ -36,9 +36,18 @@ static void _tt_global_del(struct bat_priv *bat_priv, static void tt_purge(struct work_struct *work); /* returns 1 if they are the same mac addr */ -static int compare_tt(const struct hlist_node *node, const void *data2) +static int compare_ltt(const struct hlist_node *node, const void *data2) { - const void *data1 = container_of(node, struct tt_common_entry, + const void *data1 = container_of(node, struct tt_local_entry, + hash_entry); + + return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); +} + +/* returns 1 if they are the same mac addr */ +static int compare_gtt(const struct hlist_node *node, const void *data2) +{ + const void *data1 = container_of(node, struct tt_global_entry, hash_entry); return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); @@ -51,13 +60,14 @@ static void tt_start_timer(struct bat_priv *bat_priv) msecs_to_jiffies(5000)); } -static struct tt_common_entry *tt_hash_find(struct hashtable_t *hash, - const void *data) +static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv, + const void *data) { + struct hashtable_t *hash = bat_priv->tt_local_hash; struct hlist_head *head; struct hlist_node *node; - struct tt_common_entry *tt_common_entry, *tt_common_entry_tmp = NULL; - uint32_t index; + struct tt_local_entry *tt_local_entry, *tt_local_entry_tmp = NULL; + int index; if (!hash) return NULL; @@ -66,46 +76,51 @@ static struct tt_common_entry *tt_hash_find(struct hashtable_t *hash, head = &hash->table[index]; rcu_read_lock(); - hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) { - if (!compare_eth(tt_common_entry, data)) + hlist_for_each_entry_rcu(tt_local_entry, node, head, hash_entry) { + if (!compare_eth(tt_local_entry, data)) continue; - if (!atomic_inc_not_zero(&tt_common_entry->refcount)) + if (!atomic_inc_not_zero(&tt_local_entry->refcount)) continue; - tt_common_entry_tmp = tt_common_entry; + tt_local_entry_tmp = tt_local_entry; break; } rcu_read_unlock(); - return tt_common_entry_tmp; -} - -static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv, - const void *data) -{ - struct tt_common_entry *tt_common_entry; - struct tt_local_entry *tt_local_entry = NULL; - - tt_common_entry = tt_hash_find(bat_priv->tt_local_hash, data); - if (tt_common_entry) - tt_local_entry = container_of(tt_common_entry, - struct tt_local_entry, common); - return tt_local_entry; + return tt_local_entry_tmp; } static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv, const void *data) { - struct tt_common_entry *tt_common_entry; - struct tt_global_entry *tt_global_entry = NULL; + struct hashtable_t *hash = bat_priv->tt_global_hash; + struct hlist_head *head; + struct hlist_node *node; + struct tt_global_entry *tt_global_entry; + struct tt_global_entry *tt_global_entry_tmp = NULL; + int index; - tt_common_entry = tt_hash_find(bat_priv->tt_global_hash, data); - if (tt_common_entry) - tt_global_entry = container_of(tt_common_entry, - struct tt_global_entry, common); - return tt_global_entry; + if (!hash) + return NULL; + index = choose_orig(data, hash->size); + head = &hash->table[index]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(tt_global_entry, node, head, hash_entry) { + if (!compare_eth(tt_global_entry, data)) + continue; + + if (!atomic_inc_not_zero(&tt_global_entry->refcount)) + continue; + + tt_global_entry_tmp = tt_global_entry; + break; + } + rcu_read_unlock(); + + return tt_global_entry_tmp; } static bool is_out_of_time(unsigned long starting_time, unsigned long timeout) @@ -118,18 +133,15 @@ static bool is_out_of_time(unsigned long starting_time, unsigned long timeout) static void tt_local_entry_free_ref(struct tt_local_entry *tt_local_entry) { - if (atomic_dec_and_test(&tt_local_entry->common.refcount)) - kfree_rcu(tt_local_entry, common.rcu); + if (atomic_dec_and_test(&tt_local_entry->refcount)) + kfree_rcu(tt_local_entry, rcu); } static void tt_global_entry_free_rcu(struct rcu_head *rcu) { - struct tt_common_entry *tt_common_entry; struct tt_global_entry *tt_global_entry; - tt_common_entry = container_of(rcu, struct tt_common_entry, rcu); - tt_global_entry = container_of(tt_common_entry, struct tt_global_entry, - common); + tt_global_entry = container_of(rcu, struct tt_global_entry, rcu); if (tt_global_entry->orig_node) orig_node_free_ref(tt_global_entry->orig_node); @@ -139,9 +151,8 @@ static void tt_global_entry_free_rcu(struct rcu_head *rcu) static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry) { - if (atomic_dec_and_test(&tt_global_entry->common.refcount)) - call_rcu(&tt_global_entry->common.rcu, - tt_global_entry_free_rcu); + if (atomic_dec_and_test(&tt_global_entry->refcount)) + call_rcu(&tt_global_entry->rcu, tt_global_entry_free_rcu); } static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr, @@ -190,7 +201,6 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr, struct bat_priv *bat_priv = netdev_priv(soft_iface); struct tt_local_entry *tt_local_entry = NULL; struct tt_global_entry *tt_global_entry = NULL; - int hash_added; tt_local_entry = tt_local_hash_find(bat_priv, addr); @@ -207,33 +217,26 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr, "Creating new local tt entry: %pM (ttvn: %d)\n", addr, (uint8_t)atomic_read(&bat_priv->ttvn)); - memcpy(tt_local_entry->common.addr, addr, ETH_ALEN); - tt_local_entry->common.flags = NO_FLAGS; - if (is_wifi_iface(ifindex)) - tt_local_entry->common.flags |= TT_CLIENT_WIFI; - atomic_set(&tt_local_entry->common.refcount, 2); + memcpy(tt_local_entry->addr, addr, ETH_ALEN); tt_local_entry->last_seen = jiffies; + tt_local_entry->flags = NO_FLAGS; + if (is_wifi_iface(ifindex)) + tt_local_entry->flags |= TT_CLIENT_WIFI; + atomic_set(&tt_local_entry->refcount, 2); /* the batman interface mac address should never be purged */ if (compare_eth(addr, soft_iface->dev_addr)) - tt_local_entry->common.flags |= TT_CLIENT_NOPURGE; - - hash_added = hash_add(bat_priv->tt_local_hash, compare_tt, choose_orig, - &tt_local_entry->common, - &tt_local_entry->common.hash_entry); - - if (unlikely(hash_added != 0)) { - /* remove the reference for the hash */ - tt_local_entry_free_ref(tt_local_entry); - goto out; - } + tt_local_entry->flags |= TT_CLIENT_NOPURGE; - tt_local_event(bat_priv, addr, tt_local_entry->common.flags); + tt_local_event(bat_priv, addr, tt_local_entry->flags); /* The local entry has to be marked as NEW to avoid to send it in * a full table response going out before the next ttvn increment * (consistency check) */ - tt_local_entry->common.flags |= TT_CLIENT_NEW; + tt_local_entry->flags |= TT_CLIENT_NEW; + + hash_add(bat_priv->tt_local_hash, compare_ltt, choose_orig, + tt_local_entry, &tt_local_entry->hash_entry); /* remove address from global hash if present */ tt_global_entry = tt_global_hash_find(bat_priv, addr); @@ -244,9 +247,10 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr, tt_global_entry->orig_node->tt_poss_change = true; /* The global entry has to be marked as ROAMING and has to be * kept for consistency purpose */ - tt_global_entry->common.flags |= TT_CLIENT_ROAM; + tt_global_entry->flags |= TT_CLIENT_ROAM; tt_global_entry->roam_at = jiffies; - send_roam_adv(bat_priv, tt_global_entry->common.addr, + + send_roam_adv(bat_priv, tt_global_entry->addr, tt_global_entry->orig_node); } out: @@ -308,12 +312,13 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset) struct net_device *net_dev = (struct net_device *)seq->private; struct bat_priv *bat_priv = netdev_priv(net_dev); struct hashtable_t *hash = bat_priv->tt_local_hash; - struct tt_common_entry *tt_common_entry; + struct tt_local_entry *tt_local_entry; struct hard_iface *primary_if; struct hlist_node *node; struct hlist_head *head; - uint32_t i; - int ret = 0; + size_t buf_size, pos; + char *buff; + int i, ret = 0; primary_if = primary_if_get_selected(bat_priv); if (!primary_if) { @@ -334,27 +339,51 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset) "announced via TT (TTVN: %u):\n", net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn)); + buf_size = 1; + /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */ for (i = 0; i < hash->size; i++) { head = &hash->table[i]; rcu_read_lock(); - hlist_for_each_entry_rcu(tt_common_entry, node, + __hlist_for_each_rcu(node, head) + buf_size += 29; + rcu_read_unlock(); + } + + buff = kmalloc(buf_size, GFP_ATOMIC); + if (!buff) { + ret = -ENOMEM; + goto out; + } + + buff[0] = '\0'; + pos = 0; + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(tt_local_entry, node, head, hash_entry) { - seq_printf(seq, " * %pM [%c%c%c%c%c]\n", - tt_common_entry->addr, - (tt_common_entry->flags & + pos += snprintf(buff + pos, 30, " * %pM " + "[%c%c%c%c%c]\n", + tt_local_entry->addr, + (tt_local_entry->flags & TT_CLIENT_ROAM ? 'R' : '.'), - (tt_common_entry->flags & + (tt_local_entry->flags & TT_CLIENT_NOPURGE ? 'P' : '.'), - (tt_common_entry->flags & + (tt_local_entry->flags & TT_CLIENT_NEW ? 'N' : '.'), - (tt_common_entry->flags & + (tt_local_entry->flags & TT_CLIENT_PENDING ? 'X' : '.'), - (tt_common_entry->flags & + (tt_local_entry->flags & TT_CLIENT_WIFI ? 'W' : '.')); } rcu_read_unlock(); } + + seq_printf(seq, "%s", buff); + kfree(buff); out: if (primary_if) hardif_free_ref(primary_if); @@ -365,13 +394,13 @@ static void tt_local_set_pending(struct bat_priv *bat_priv, struct tt_local_entry *tt_local_entry, uint16_t flags) { - tt_local_event(bat_priv, tt_local_entry->common.addr, - tt_local_entry->common.flags | flags); + tt_local_event(bat_priv, tt_local_entry->addr, + tt_local_entry->flags | flags); /* The local client has to be marked as "pending to be removed" but has * to be kept in the table in order to send it in a full table * response issued before the net ttvn increment (consistency check) */ - tt_local_entry->common.flags |= TT_CLIENT_PENDING; + tt_local_entry->flags |= TT_CLIENT_PENDING; } void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr, @@ -387,7 +416,7 @@ void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr, (roaming ? TT_CLIENT_ROAM : NO_FLAGS)); bat_dbg(DBG_TT, bat_priv, "Local tt entry (%pM) pending to be removed: " - "%s\n", tt_local_entry->common.addr, message); + "%s\n", tt_local_entry->addr, message); out: if (tt_local_entry) tt_local_entry_free_ref(tt_local_entry); @@ -397,27 +426,23 @@ static void tt_local_purge(struct bat_priv *bat_priv) { struct hashtable_t *hash = bat_priv->tt_local_hash; struct tt_local_entry *tt_local_entry; - struct tt_common_entry *tt_common_entry; struct hlist_node *node, *node_tmp; struct hlist_head *head; spinlock_t *list_lock; /* protects write access to the hash lists */ - uint32_t i; + int i; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; list_lock = &hash->list_locks[i]; spin_lock_bh(list_lock); - hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, + hlist_for_each_entry_safe(tt_local_entry, node, node_tmp, head, hash_entry) { - tt_local_entry = container_of(tt_common_entry, - struct tt_local_entry, - common); - if (tt_local_entry->common.flags & TT_CLIENT_NOPURGE) + if (tt_local_entry->flags & TT_CLIENT_NOPURGE) continue; /* entry already marked for deletion */ - if (tt_local_entry->common.flags & TT_CLIENT_PENDING) + if (tt_local_entry->flags & TT_CLIENT_PENDING) continue; if (!is_out_of_time(tt_local_entry->last_seen, @@ -428,7 +453,7 @@ static void tt_local_purge(struct bat_priv *bat_priv) TT_CLIENT_DEL); bat_dbg(DBG_TT, bat_priv, "Local tt entry (%pM) " "pending to be removed: timed out\n", - tt_local_entry->common.addr); + tt_local_entry->addr); } spin_unlock_bh(list_lock); } @@ -439,11 +464,10 @@ static void tt_local_table_free(struct bat_priv *bat_priv) { struct hashtable_t *hash; spinlock_t *list_lock; /* protects write access to the hash lists */ - struct tt_common_entry *tt_common_entry; struct tt_local_entry *tt_local_entry; struct hlist_node *node, *node_tmp; struct hlist_head *head; - uint32_t i; + int i; if (!bat_priv->tt_local_hash) return; @@ -455,12 +479,9 @@ static void tt_local_table_free(struct bat_priv *bat_priv) list_lock = &hash->list_locks[i]; spin_lock_bh(list_lock); - hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, + hlist_for_each_entry_safe(tt_local_entry, node, node_tmp, head, hash_entry) { hlist_del_rcu(node); - tt_local_entry = container_of(tt_common_entry, - struct tt_local_entry, - common); tt_local_entry_free_ref(tt_local_entry); } spin_unlock_bh(list_lock); @@ -508,7 +529,6 @@ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node, struct tt_global_entry *tt_global_entry; struct orig_node *orig_node_tmp; int ret = 0; - int hash_added; tt_global_entry = tt_global_hash_find(bat_priv, tt_addr); @@ -519,24 +539,18 @@ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node, if (!tt_global_entry) goto out; - memcpy(tt_global_entry->common.addr, tt_addr, ETH_ALEN); - tt_global_entry->common.flags = NO_FLAGS; - atomic_set(&tt_global_entry->common.refcount, 2); + memcpy(tt_global_entry->addr, tt_addr, ETH_ALEN); /* Assign the new orig_node */ atomic_inc(&orig_node->refcount); tt_global_entry->orig_node = orig_node; tt_global_entry->ttvn = ttvn; + tt_global_entry->flags = NO_FLAGS; tt_global_entry->roam_at = 0; + atomic_set(&tt_global_entry->refcount, 2); - hash_added = hash_add(bat_priv->tt_global_hash, compare_tt, - choose_orig, &tt_global_entry->common, - &tt_global_entry->common.hash_entry); - - if (unlikely(hash_added != 0)) { - /* remove the reference for the hash */ - tt_global_entry_free_ref(tt_global_entry); - goto out_remove; - } + hash_add(bat_priv->tt_global_hash, compare_gtt, + choose_orig, tt_global_entry, + &tt_global_entry->hash_entry); atomic_inc(&orig_node->tt_size); } else { if (tt_global_entry->orig_node != orig_node) { @@ -547,21 +561,20 @@ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node, orig_node_free_ref(orig_node_tmp); atomic_inc(&orig_node->tt_size); } - tt_global_entry->common.flags = NO_FLAGS; tt_global_entry->ttvn = ttvn; + tt_global_entry->flags = NO_FLAGS; tt_global_entry->roam_at = 0; } if (wifi) - tt_global_entry->common.flags |= TT_CLIENT_WIFI; + tt_global_entry->flags |= TT_CLIENT_WIFI; bat_dbg(DBG_TT, bat_priv, "Creating new global tt entry: %pM (via %pM)\n", - tt_global_entry->common.addr, orig_node->orig); + tt_global_entry->addr, orig_node->orig); -out_remove: /* remove address from local hash if present */ - tt_local_remove(bat_priv, tt_global_entry->common.addr, + tt_local_remove(bat_priv, tt_global_entry->addr, "global tt received", roaming); ret = 1; out: @@ -575,13 +588,13 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset) struct net_device *net_dev = (struct net_device *)seq->private; struct bat_priv *bat_priv = netdev_priv(net_dev); struct hashtable_t *hash = bat_priv->tt_global_hash; - struct tt_common_entry *tt_common_entry; struct tt_global_entry *tt_global_entry; struct hard_iface *primary_if; struct hlist_node *node; struct hlist_head *head; - uint32_t i; - int ret = 0; + size_t buf_size, pos; + char *buff; + int i, ret = 0; primary_if = primary_if_get_selected(bat_priv); if (!primary_if) { @@ -604,32 +617,53 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset) seq_printf(seq, " %-13s %s %-15s %s %s\n", "Client", "(TTVN)", "Originator", "(Curr TTVN)", "Flags"); + buf_size = 1; + /* Estimate length for: " * xx:xx:xx:xx:xx:xx (ttvn) via + * xx:xx:xx:xx:xx:xx (cur_ttvn)\n"*/ + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + rcu_read_lock(); + __hlist_for_each_rcu(node, head) + buf_size += 67; + rcu_read_unlock(); + } + + buff = kmalloc(buf_size, GFP_ATOMIC); + if (!buff) { + ret = -ENOMEM; + goto out; + } + + buff[0] = '\0'; + pos = 0; + for (i = 0; i < hash->size; i++) { head = &hash->table[i]; rcu_read_lock(); - hlist_for_each_entry_rcu(tt_common_entry, node, + hlist_for_each_entry_rcu(tt_global_entry, node, head, hash_entry) { - tt_global_entry = container_of(tt_common_entry, - struct tt_global_entry, - common); - seq_printf(seq, " * %pM (%3u) via %pM (%3u) " - "[%c%c%c]\n", - tt_global_entry->common.addr, + pos += snprintf(buff + pos, 69, + " * %pM (%3u) via %pM (%3u) " + "[%c%c%c]\n", tt_global_entry->addr, tt_global_entry->ttvn, tt_global_entry->orig_node->orig, (uint8_t) atomic_read( &tt_global_entry->orig_node-> last_ttvn), - (tt_global_entry->common.flags & + (tt_global_entry->flags & TT_CLIENT_ROAM ? 'R' : '.'), - (tt_global_entry->common.flags & + (tt_global_entry->flags & TT_CLIENT_PENDING ? 'X' : '.'), - (tt_global_entry->common.flags & + (tt_global_entry->flags & TT_CLIENT_WIFI ? 'W' : '.')); } rcu_read_unlock(); } + + seq_printf(seq, "%s", buff); + kfree(buff); out: if (primary_if) hardif_free_ref(primary_if); @@ -645,13 +679,13 @@ static void _tt_global_del(struct bat_priv *bat_priv, bat_dbg(DBG_TT, bat_priv, "Deleting global tt entry %pM (via %pM): %s\n", - tt_global_entry->common.addr, tt_global_entry->orig_node->orig, + tt_global_entry->addr, tt_global_entry->orig_node->orig, message); atomic_dec(&tt_global_entry->orig_node->tt_size); - hash_remove(bat_priv->tt_global_hash, compare_tt, choose_orig, - tt_global_entry->common.addr); + hash_remove(bat_priv->tt_global_hash, compare_gtt, choose_orig, + tt_global_entry->addr); out: if (tt_global_entry) tt_global_entry_free_ref(tt_global_entry); @@ -679,9 +713,9 @@ void tt_global_del(struct bat_priv *bat_priv, * 2) the client roamed to us => we can directly delete * the global entry, since it is useless now. */ tt_local_entry = tt_local_hash_find(bat_priv, - tt_global_entry->common.addr); + tt_global_entry->addr); if (!tt_local_entry) { - tt_global_entry->common.flags |= TT_CLIENT_ROAM; + tt_global_entry->flags |= TT_CLIENT_ROAM; tt_global_entry->roam_at = jiffies; goto out; } @@ -699,8 +733,7 @@ void tt_global_del_orig(struct bat_priv *bat_priv, struct orig_node *orig_node, const char *message) { struct tt_global_entry *tt_global_entry; - struct tt_common_entry *tt_common_entry; - uint32_t i; + int i; struct hashtable_t *hash = bat_priv->tt_global_hash; struct hlist_node *node, *safe; struct hlist_head *head; @@ -714,18 +747,14 @@ void tt_global_del_orig(struct bat_priv *bat_priv, list_lock = &hash->list_locks[i]; spin_lock_bh(list_lock); - hlist_for_each_entry_safe(tt_common_entry, node, safe, + hlist_for_each_entry_safe(tt_global_entry, node, safe, head, hash_entry) { - tt_global_entry = container_of(tt_common_entry, - struct tt_global_entry, - common); if (tt_global_entry->orig_node == orig_node) { bat_dbg(DBG_TT, bat_priv, "Deleting global tt entry %pM " - "(via %pM): %s\n", - tt_global_entry->common.addr, - tt_global_entry->orig_node->orig, - message); + "(via %pM): originator time out\n", + tt_global_entry->addr, + tt_global_entry->orig_node->orig); hlist_del_rcu(node); tt_global_entry_free_ref(tt_global_entry); } @@ -738,24 +767,20 @@ void tt_global_del_orig(struct bat_priv *bat_priv, static void tt_global_roam_purge(struct bat_priv *bat_priv) { struct hashtable_t *hash = bat_priv->tt_global_hash; - struct tt_common_entry *tt_common_entry; struct tt_global_entry *tt_global_entry; struct hlist_node *node, *node_tmp; struct hlist_head *head; spinlock_t *list_lock; /* protects write access to the hash lists */ - uint32_t i; + int i; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; list_lock = &hash->list_locks[i]; spin_lock_bh(list_lock); - hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, + hlist_for_each_entry_safe(tt_global_entry, node, node_tmp, head, hash_entry) { - tt_global_entry = container_of(tt_common_entry, - struct tt_global_entry, - common); - if (!(tt_global_entry->common.flags & TT_CLIENT_ROAM)) + if (!(tt_global_entry->flags & TT_CLIENT_ROAM)) continue; if (!is_out_of_time(tt_global_entry->roam_at, TT_CLIENT_ROAM_TIMEOUT * 1000)) @@ -763,7 +788,7 @@ static void tt_global_roam_purge(struct bat_priv *bat_priv) bat_dbg(DBG_TT, bat_priv, "Deleting global " "tt entry (%pM): Roaming timeout\n", - tt_global_entry->common.addr); + tt_global_entry->addr); atomic_dec(&tt_global_entry->orig_node->tt_size); hlist_del_rcu(node); tt_global_entry_free_ref(tt_global_entry); @@ -777,11 +802,10 @@ static void tt_global_table_free(struct bat_priv *bat_priv) { struct hashtable_t *hash; spinlock_t *list_lock; /* protects write access to the hash lists */ - struct tt_common_entry *tt_common_entry; struct tt_global_entry *tt_global_entry; struct hlist_node *node, *node_tmp; struct hlist_head *head; - uint32_t i; + int i; if (!bat_priv->tt_global_hash) return; @@ -793,12 +817,9 @@ static void tt_global_table_free(struct bat_priv *bat_priv) list_lock = &hash->list_locks[i]; spin_lock_bh(list_lock); - hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, + hlist_for_each_entry_safe(tt_global_entry, node, node_tmp, head, hash_entry) { hlist_del_rcu(node); - tt_global_entry = container_of(tt_common_entry, - struct tt_global_entry, - common); tt_global_entry_free_ref(tt_global_entry); } spin_unlock_bh(list_lock); @@ -814,8 +835,8 @@ static bool _is_ap_isolated(struct tt_local_entry *tt_local_entry, { bool ret = false; - if (tt_local_entry->common.flags & TT_CLIENT_WIFI && - tt_global_entry->common.flags & TT_CLIENT_WIFI) + if (tt_local_entry->flags & TT_CLIENT_WIFI && + tt_global_entry->flags & TT_CLIENT_WIFI) ret = true; return ret; @@ -848,7 +869,7 @@ struct orig_node *transtable_search(struct bat_priv *bat_priv, /* A global client marked as PENDING has already moved from that * originator */ - if (tt_global_entry->common.flags & TT_CLIENT_PENDING) + if (tt_global_entry->flags & TT_CLIENT_PENDING) goto out; orig_node = tt_global_entry->orig_node; @@ -867,34 +888,29 @@ uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node) { uint16_t total = 0, total_one; struct hashtable_t *hash = bat_priv->tt_global_hash; - struct tt_common_entry *tt_common_entry; struct tt_global_entry *tt_global_entry; struct hlist_node *node; struct hlist_head *head; - uint32_t i; - int j; + int i, j; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; rcu_read_lock(); - hlist_for_each_entry_rcu(tt_common_entry, node, + hlist_for_each_entry_rcu(tt_global_entry, node, head, hash_entry) { - tt_global_entry = container_of(tt_common_entry, - struct tt_global_entry, - common); if (compare_eth(tt_global_entry->orig_node, orig_node)) { /* Roaming clients are in the global table for * consistency only. They don't have to be * taken into account while computing the * global crc */ - if (tt_common_entry->flags & TT_CLIENT_ROAM) + if (tt_global_entry->flags & TT_CLIENT_ROAM) continue; total_one = 0; for (j = 0; j < ETH_ALEN; j++) total_one = crc16_byte(total_one, - tt_common_entry->addr[j]); + tt_global_entry->addr[j]); total ^= total_one; } } @@ -909,26 +925,25 @@ uint16_t tt_local_crc(struct bat_priv *bat_priv) { uint16_t total = 0, total_one; struct hashtable_t *hash = bat_priv->tt_local_hash; - struct tt_common_entry *tt_common_entry; + struct tt_local_entry *tt_local_entry; struct hlist_node *node; struct hlist_head *head; - uint32_t i; - int j; + int i, j; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; rcu_read_lock(); - hlist_for_each_entry_rcu(tt_common_entry, node, + hlist_for_each_entry_rcu(tt_local_entry, node, head, hash_entry) { /* not yet committed clients have not to be taken into * account while computing the CRC */ - if (tt_common_entry->flags & TT_CLIENT_NEW) + if (tt_local_entry->flags & TT_CLIENT_NEW) continue; total_one = 0; for (j = 0; j < ETH_ALEN; j++) total_one = crc16_byte(total_one, - tt_common_entry->addr[j]); + tt_local_entry->addr[j]); total ^= total_one; } rcu_read_unlock(); @@ -1017,25 +1032,21 @@ static struct tt_req_node *new_tt_req_node(struct bat_priv *bat_priv, /* data_ptr is useless here, but has to be kept to respect the prototype */ static int tt_local_valid_entry(const void *entry_ptr, const void *data_ptr) { - const struct tt_common_entry *tt_common_entry = entry_ptr; + const struct tt_local_entry *tt_local_entry = entry_ptr; - if (tt_common_entry->flags & TT_CLIENT_NEW) + if (tt_local_entry->flags & TT_CLIENT_NEW) return 0; return 1; } static int tt_global_valid_entry(const void *entry_ptr, const void *data_ptr) { - const struct tt_common_entry *tt_common_entry = entry_ptr; - const struct tt_global_entry *tt_global_entry; + const struct tt_global_entry *tt_global_entry = entry_ptr; const struct orig_node *orig_node = data_ptr; - if (tt_common_entry->flags & TT_CLIENT_ROAM) + if (tt_global_entry->flags & TT_CLIENT_ROAM) return 0; - tt_global_entry = container_of(tt_common_entry, struct tt_global_entry, - common); - return (tt_global_entry->orig_node == orig_node); } @@ -1046,7 +1057,7 @@ static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn, const void *), void *cb_data) { - struct tt_common_entry *tt_common_entry; + struct tt_local_entry *tt_local_entry; struct tt_query_packet *tt_response; struct tt_change *tt_change; struct hlist_node *node; @@ -1054,7 +1065,7 @@ static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn, struct sk_buff *skb = NULL; uint16_t tt_tot, tt_count; ssize_t tt_query_size = sizeof(struct tt_query_packet); - uint32_t i; + int i; if (tt_query_size + tt_len > primary_if->soft_iface->mtu) { tt_len = primary_if->soft_iface->mtu - tt_query_size; @@ -1078,16 +1089,15 @@ static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn, for (i = 0; i < hash->size; i++) { head = &hash->table[i]; - hlist_for_each_entry_rcu(tt_common_entry, node, + hlist_for_each_entry_rcu(tt_local_entry, node, head, hash_entry) { if (tt_count == tt_tot) break; - if ((valid_cb) && (!valid_cb(tt_common_entry, cb_data))) + if ((valid_cb) && (!valid_cb(tt_local_entry, cb_data))) continue; - memcpy(tt_change->addr, tt_common_entry->addr, - ETH_ALEN); + memcpy(tt_change->addr, tt_local_entry->addr, ETH_ALEN); tt_change->flags = NO_FLAGS; tt_count++; @@ -1194,11 +1204,11 @@ static bool send_other_tt_response(struct bat_priv *bat_priv, (tt_request->flags & TT_FULL_TABLE ? 'F' : '.')); /* Let's get the orig node of the REAL destination */ - req_dst_orig_node = orig_hash_find(bat_priv, tt_request->dst); + req_dst_orig_node = get_orig_node(bat_priv, tt_request->dst); if (!req_dst_orig_node) goto out; - res_dst_orig_node = orig_hash_find(bat_priv, tt_request->src); + res_dst_orig_node = get_orig_node(bat_priv, tt_request->src); if (!res_dst_orig_node) goto out; @@ -1324,7 +1334,7 @@ static bool send_my_tt_response(struct bat_priv *bat_priv, my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn); req_ttvn = tt_request->ttvn; - orig_node = orig_hash_find(bat_priv, tt_request->src); + orig_node = get_orig_node(bat_priv, tt_request->src); if (!orig_node) goto out; @@ -1504,7 +1514,7 @@ bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr) goto out; /* Check if the client has been logically deleted (but is kept for * consistency purpose) */ - if (tt_local_entry->common.flags & TT_CLIENT_PENDING) + if (tt_local_entry->flags & TT_CLIENT_PENDING) goto out; ret = true; out: @@ -1727,53 +1737,45 @@ void tt_free(struct bat_priv *bat_priv) kfree(bat_priv->tt_buff); } -/* This function will enable or disable the specified flags for all the entries - * in the given hash table and returns the number of modified entries */ -static uint16_t tt_set_flags(struct hashtable_t *hash, uint16_t flags, - bool enable) +/* This function will reset the specified flags from all the entries in + * the given hash table and will increment num_local_tt for each involved + * entry */ +static void tt_local_reset_flags(struct bat_priv *bat_priv, uint16_t flags) { - uint32_t i; - uint16_t changed_num = 0; + int i; + struct hashtable_t *hash = bat_priv->tt_local_hash; struct hlist_head *head; struct hlist_node *node; - struct tt_common_entry *tt_common_entry; + struct tt_local_entry *tt_local_entry; if (!hash) - goto out; + return; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; rcu_read_lock(); - hlist_for_each_entry_rcu(tt_common_entry, node, + hlist_for_each_entry_rcu(tt_local_entry, node, head, hash_entry) { - if (enable) { - if ((tt_common_entry->flags & flags) == flags) - continue; - tt_common_entry->flags |= flags; - } else { - if (!(tt_common_entry->flags & flags)) - continue; - tt_common_entry->flags &= ~flags; - } - changed_num++; + if (!(tt_local_entry->flags & flags)) + continue; + tt_local_entry->flags &= ~flags; + atomic_inc(&bat_priv->num_local_tt); } rcu_read_unlock(); } -out: - return changed_num; + } /* Purge out all the tt local entries marked with TT_CLIENT_PENDING */ static void tt_local_purge_pending_clients(struct bat_priv *bat_priv) { struct hashtable_t *hash = bat_priv->tt_local_hash; - struct tt_common_entry *tt_common_entry; struct tt_local_entry *tt_local_entry; struct hlist_node *node, *node_tmp; struct hlist_head *head; spinlock_t *list_lock; /* protects write access to the hash lists */ - uint32_t i; + int i; if (!hash) return; @@ -1783,19 +1785,16 @@ static void tt_local_purge_pending_clients(struct bat_priv *bat_priv) list_lock = &hash->list_locks[i]; spin_lock_bh(list_lock); - hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, + hlist_for_each_entry_safe(tt_local_entry, node, node_tmp, head, hash_entry) { - if (!(tt_common_entry->flags & TT_CLIENT_PENDING)) + if (!(tt_local_entry->flags & TT_CLIENT_PENDING)) continue; bat_dbg(DBG_TT, bat_priv, "Deleting local tt entry " - "(%pM): pending\n", tt_common_entry->addr); + "(%pM): pending\n", tt_local_entry->addr); atomic_dec(&bat_priv->num_local_tt); hlist_del_rcu(node); - tt_local_entry = container_of(tt_common_entry, - struct tt_local_entry, - common); tt_local_entry_free_ref(tt_local_entry); } spin_unlock_bh(list_lock); @@ -1805,11 +1804,7 @@ static void tt_local_purge_pending_clients(struct bat_priv *bat_priv) void tt_commit_changes(struct bat_priv *bat_priv) { - uint16_t changed_num = tt_set_flags(bat_priv->tt_local_hash, - TT_CLIENT_NEW, false); - /* all the reset entries have now to be effectively counted as local - * entries */ - atomic_add(changed_num, &bat_priv->num_local_tt); + tt_local_reset_flags(bat_priv, TT_CLIENT_NEW); tt_local_purge_pending_clients(bat_priv); /* Increment the TTVN only once per OGM interval */ diff --git a/trunk/net/batman-adv/types.h b/trunk/net/batman-adv/types.h index e9eb043719ac..ab8d0fe6df5a 100644 --- a/trunk/net/batman-adv/types.h +++ b/trunk/net/batman-adv/types.h @@ -222,24 +222,24 @@ struct socket_packet { struct icmp_packet_rr icmp_packet; }; -struct tt_common_entry { +struct tt_local_entry { uint8_t addr[ETH_ALEN]; struct hlist_node hash_entry; + unsigned long last_seen; uint16_t flags; atomic_t refcount; struct rcu_head rcu; }; -struct tt_local_entry { - struct tt_common_entry common; - unsigned long last_seen; -}; - struct tt_global_entry { - struct tt_common_entry common; + uint8_t addr[ETH_ALEN]; + struct hlist_node hash_entry; /* entry in the global table */ struct orig_node *orig_node; uint8_t ttvn; + uint16_t flags; /* only TT_GLOBAL_ROAM is used */ unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */ + atomic_t refcount; + struct rcu_head rcu; }; struct tt_change_node { diff --git a/trunk/net/batman-adv/vis.c b/trunk/net/batman-adv/vis.c index cc3b9f2f3b5d..f81a6b668b0c 100644 --- a/trunk/net/batman-adv/vis.c +++ b/trunk/net/batman-adv/vis.c @@ -66,7 +66,7 @@ static int vis_info_cmp(const struct hlist_node *node, const void *data2) /* hash function to choose an entry in a hash table of given size */ /* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */ -static uint32_t vis_info_choose(const void *data, uint32_t size) +static int vis_info_choose(const void *data, int size) { const struct vis_info *vis_info = data; const struct vis_packet *packet; @@ -96,7 +96,7 @@ static struct vis_info *vis_hash_find(struct bat_priv *bat_priv, struct hlist_head *head; struct hlist_node *node; struct vis_info *vis_info, *vis_info_tmp = NULL; - uint32_t index; + int index; if (!hash) return NULL; @@ -202,8 +202,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset) HLIST_HEAD(vis_if_list); struct if_list_entry *entry; struct hlist_node *pos, *n; - uint32_t i; - int j, ret = 0; + int i, j, ret = 0; int vis_server = atomic_read(&bat_priv->vis_mode); size_t buff_pos, buf_size; char *buff; @@ -557,8 +556,7 @@ static int find_best_vis_server(struct bat_priv *bat_priv, struct hlist_head *head; struct orig_node *orig_node; struct vis_packet *packet; - int best_tq = -1; - uint32_t i; + int best_tq = -1, i; packet = (struct vis_packet *)info->skb_packet->data; @@ -609,9 +607,8 @@ static int generate_vis_packet(struct bat_priv *bat_priv) struct vis_info *info = bat_priv->my_vis_info; struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data; struct vis_info_entry *entry; - struct tt_common_entry *tt_common_entry; - int best_tq = -1; - uint32_t i; + struct tt_local_entry *tt_local_entry; + int best_tq = -1, i; info->first_seen = jiffies; packet->vis_type = atomic_read(&bat_priv->vis_mode); @@ -672,13 +669,13 @@ static int generate_vis_packet(struct bat_priv *bat_priv) head = &hash->table[i]; rcu_read_lock(); - hlist_for_each_entry_rcu(tt_common_entry, node, head, + hlist_for_each_entry_rcu(tt_local_entry, node, head, hash_entry) { entry = (struct vis_info_entry *) skb_put(info->skb_packet, sizeof(*entry)); memset(entry->src, 0, ETH_ALEN); - memcpy(entry->dest, tt_common_entry->addr, ETH_ALEN); + memcpy(entry->dest, tt_local_entry->addr, ETH_ALEN); entry->quality = 0; /* 0 means TT */ packet->entries++; @@ -699,7 +696,7 @@ static int generate_vis_packet(struct bat_priv *bat_priv) * held */ static void purge_vis_packets(struct bat_priv *bat_priv) { - uint32_t i; + int i; struct hashtable_t *hash = bat_priv->vis_hash; struct hlist_node *node, *node_tmp; struct hlist_head *head; @@ -736,7 +733,7 @@ static void broadcast_vis_packet(struct bat_priv *bat_priv, struct sk_buff *skb; struct hard_iface *hard_iface; uint8_t dstaddr[ETH_ALEN]; - uint32_t i; + int i; packet = (struct vis_packet *)info->skb_packet->data; diff --git a/trunk/net/bluetooth/Kconfig b/trunk/net/bluetooth/Kconfig index 9ec85eb8853d..bfb3dc03c9de 100644 --- a/trunk/net/bluetooth/Kconfig +++ b/trunk/net/bluetooth/Kconfig @@ -6,11 +6,7 @@ menuconfig BT tristate "Bluetooth subsystem support" depends on NET && !S390 depends on RFKILL || !RFKILL - select CRC16 select CRYPTO - select CRYPTO_BLKCIPHER - select CRYPTO_AES - select CRYPTO_ECB help Bluetooth is low-cost, low-power, short-range wireless technology. It was designed as a replacement for cables and other short-range @@ -19,12 +15,10 @@ menuconfig BT Bluetooth can be found at . Linux Bluetooth subsystem consist of several layers: - Bluetooth Core - HCI device and connection manager, scheduler - SCO audio links - L2CAP (Logical Link Control and Adaptation Protocol) - SMP (Security Manager Protocol) on LE (Low Energy) links + Bluetooth Core (HCI device and connection manager, scheduler) HCI Device drivers (Interface to the hardware) + SCO Module (SCO audio links) + L2CAP Module (Logical Link Control and Adaptation Protocol) RFCOMM Module (RFCOMM Protocol) BNEP Module (Bluetooth Network Encapsulation Protocol) CMTP Module (CAPI Message Transport Protocol) @@ -39,6 +33,31 @@ menuconfig BT to Bluetooth kernel modules are provided in the BlueZ packages. For more information, see . +if BT != n + +config BT_L2CAP + bool "L2CAP protocol support" + select CRC16 + select CRYPTO + select CRYPTO_BLKCIPHER + select CRYPTO_AES + select CRYPTO_ECB + help + L2CAP (Logical Link Control and Adaptation Protocol) provides + connection oriented and connection-less data transport. L2CAP + support is required for most Bluetooth applications. + + Also included is support for SMP (Security Manager Protocol) which + is the security layer on top of LE (Low Energy) links. + +config BT_SCO + bool "SCO links support" + help + SCO link provides voice transport over Bluetooth. SCO support is + required for voice applications like Headset and Audio. + +endif + source "net/bluetooth/rfcomm/Kconfig" source "net/bluetooth/bnep/Kconfig" diff --git a/trunk/net/bluetooth/Makefile b/trunk/net/bluetooth/Makefile index 2dc5a5700f53..9b67f3d08fa4 100644 --- a/trunk/net/bluetooth/Makefile +++ b/trunk/net/bluetooth/Makefile @@ -8,5 +8,6 @@ obj-$(CONFIG_BT_BNEP) += bnep/ obj-$(CONFIG_BT_CMTP) += cmtp/ obj-$(CONFIG_BT_HIDP) += hidp/ -bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \ - hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o +bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o hci_sock.o hci_sysfs.o lib.o +bluetooth-$(CONFIG_BT_L2CAP) += l2cap_core.o l2cap_sock.o smp.o +bluetooth-$(CONFIG_BT_SCO) += sco.o diff --git a/trunk/net/bluetooth/af_bluetooth.c b/trunk/net/bluetooth/af_bluetooth.c index cdcfcabb34ab..062124cd89cf 100644 --- a/trunk/net/bluetooth/af_bluetooth.c +++ b/trunk/net/bluetooth/af_bluetooth.c @@ -199,14 +199,15 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock) BT_DBG("parent %p", parent); + local_bh_disable(); list_for_each_safe(p, n, &bt_sk(parent)->accept_q) { sk = (struct sock *) list_entry(p, struct bt_sock, accept_q); - lock_sock(sk); + bh_lock_sock(sk); /* FIXME: Is this check still needed */ if (sk->sk_state == BT_CLOSED) { - release_sock(sk); + bh_unlock_sock(sk); bt_accept_unlink(sk); continue; } @@ -217,12 +218,14 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock) if (newsock) sock_graft(sk, newsock); - release_sock(sk); + bh_unlock_sock(sk); + local_bh_enable(); return sk; } - release_sock(sk); + bh_unlock_sock(sk); } + local_bh_enable(); return NULL; } diff --git a/trunk/net/bluetooth/bnep/Kconfig b/trunk/net/bluetooth/bnep/Kconfig index 71791fc9f6b1..35158b036d54 100644 --- a/trunk/net/bluetooth/bnep/Kconfig +++ b/trunk/net/bluetooth/bnep/Kconfig @@ -1,6 +1,6 @@ config BT_BNEP tristate "BNEP protocol support" - depends on BT + depends on BT && BT_L2CAP select CRC32 help BNEP (Bluetooth Network Encapsulation Protocol) is Ethernet diff --git a/trunk/net/bluetooth/bnep/core.c b/trunk/net/bluetooth/bnep/core.c index a779ec703323..1eea8208b2cc 100644 --- a/trunk/net/bluetooth/bnep/core.c +++ b/trunk/net/bluetooth/bnep/core.c @@ -56,8 +56,8 @@ #define VERSION "1.3" -static bool compress_src = true; -static bool compress_dst = true; +static int compress_src = 1; +static int compress_dst = 1; static LIST_HEAD(bnep_session_list); static DECLARE_RWSEM(bnep_session_sem); @@ -65,13 +65,15 @@ static DECLARE_RWSEM(bnep_session_sem); static struct bnep_session *__bnep_get_session(u8 *dst) { struct bnep_session *s; + struct list_head *p; BT_DBG(""); - list_for_each_entry(s, &bnep_session_list, list) + list_for_each(p, &bnep_session_list) { + s = list_entry(p, struct bnep_session, list); if (!compare_ether_addr(dst, s->eh.h_source)) return s; - + } return NULL; } @@ -663,14 +665,17 @@ static void __bnep_copy_ci(struct bnep_conninfo *ci, struct bnep_session *s) int bnep_get_connlist(struct bnep_connlist_req *req) { - struct bnep_session *s; + struct list_head *p; int err = 0, n = 0; down_read(&bnep_session_sem); - list_for_each_entry(s, &bnep_session_list, list) { + list_for_each(p, &bnep_session_list) { + struct bnep_session *s; struct bnep_conninfo ci; + s = list_entry(p, struct bnep_session, list); + __bnep_copy_ci(&ci, s); if (copy_to_user(req->ci, &ci, sizeof(ci))) { diff --git a/trunk/net/bluetooth/cmtp/Kconfig b/trunk/net/bluetooth/cmtp/Kconfig index 94cbf42ce155..d6b0382f6f3a 100644 --- a/trunk/net/bluetooth/cmtp/Kconfig +++ b/trunk/net/bluetooth/cmtp/Kconfig @@ -1,6 +1,6 @@ config BT_CMTP tristate "CMTP protocol support" - depends on BT && ISDN_CAPI + depends on BT && BT_L2CAP && ISDN_CAPI help CMTP (CAPI Message Transport Protocol) is a transport layer for CAPI messages. CMTP is required for the Bluetooth Common diff --git a/trunk/net/bluetooth/cmtp/core.c b/trunk/net/bluetooth/cmtp/core.c index 6c9c1fd601ca..5a6e634f7fca 100644 --- a/trunk/net/bluetooth/cmtp/core.c +++ b/trunk/net/bluetooth/cmtp/core.c @@ -53,13 +53,15 @@ static LIST_HEAD(cmtp_session_list); static struct cmtp_session *__cmtp_get_session(bdaddr_t *bdaddr) { struct cmtp_session *session; + struct list_head *p; BT_DBG(""); - list_for_each_entry(session, &cmtp_session_list, list) + list_for_each(p, &cmtp_session_list) { + session = list_entry(p, struct cmtp_session, list); if (!bacmp(bdaddr, &session->bdaddr)) return session; - + } return NULL; } @@ -430,16 +432,19 @@ int cmtp_del_connection(struct cmtp_conndel_req *req) int cmtp_get_connlist(struct cmtp_connlist_req *req) { - struct cmtp_session *session; + struct list_head *p; int err = 0, n = 0; BT_DBG(""); down_read(&cmtp_session_sem); - list_for_each_entry(session, &cmtp_session_list, list) { + list_for_each(p, &cmtp_session_list) { + struct cmtp_session *session; struct cmtp_conninfo ci; + session = list_entry(p, struct cmtp_session, list); + __cmtp_copy_session(session, &ci); if (copy_to_user(req->ci, &ci, sizeof(ci))) { diff --git a/trunk/net/bluetooth/hci_conn.c b/trunk/net/bluetooth/hci_conn.c index 3db432473ad5..c1c597e3e198 100644 --- a/trunk/net/bluetooth/hci_conn.c +++ b/trunk/net/bluetooth/hci_conn.c @@ -123,7 +123,7 @@ static void hci_acl_connect_cancel(struct hci_conn *conn) BT_DBG("%p", conn); - if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2) + if (conn->hdev->hci_ver < 2) return; bacpy(&cp.bdaddr, &conn->dst); @@ -275,10 +275,9 @@ void hci_sco_setup(struct hci_conn *conn, __u8 status) } } -static void hci_conn_timeout(struct work_struct *work) +static void hci_conn_timeout(unsigned long arg) { - struct hci_conn *conn = container_of(work, struct hci_conn, - disc_work.work); + struct hci_conn *conn = (void *) arg; struct hci_dev *hdev = conn->hdev; __u8 reason; @@ -312,42 +311,6 @@ static void hci_conn_timeout(struct work_struct *work) hci_dev_unlock(hdev); } -/* Enter sniff mode */ -static void hci_conn_enter_sniff_mode(struct hci_conn *conn) -{ - struct hci_dev *hdev = conn->hdev; - - BT_DBG("conn %p mode %d", conn, conn->mode); - - if (test_bit(HCI_RAW, &hdev->flags)) - return; - - if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn)) - return; - - if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF)) - return; - - if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) { - struct hci_cp_sniff_subrate cp; - cp.handle = cpu_to_le16(conn->handle); - cp.max_latency = cpu_to_le16(0); - cp.min_remote_timeout = cpu_to_le16(0); - cp.min_local_timeout = cpu_to_le16(0); - hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp); - } - - if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { - struct hci_cp_sniff_mode cp; - cp.handle = cpu_to_le16(conn->handle); - cp.max_interval = cpu_to_le16(hdev->sniff_max_interval); - cp.min_interval = cpu_to_le16(hdev->sniff_min_interval); - cp.attempt = cpu_to_le16(4); - cp.timeout = cpu_to_le16(1); - hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp); - } -} - static void hci_conn_idle(unsigned long arg) { struct hci_conn *conn = (void *) arg; @@ -362,8 +325,12 @@ static void hci_conn_auto_accept(unsigned long arg) struct hci_conn *conn = (void *) arg; struct hci_dev *hdev = conn->hdev; + hci_dev_lock(hdev); + hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst), &conn->dst); + + hci_dev_unlock(hdev); } struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) @@ -407,9 +374,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) skb_queue_head_init(&conn->data_q); - INIT_LIST_HEAD(&conn->chan_list);; - - INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout); + setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn); setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn); setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept, (unsigned long) conn); @@ -418,6 +383,8 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) hci_dev_hold(hdev); + tasklet_disable(&hdev->tx_task); + hci_conn_hash_add(hdev, conn); if (hdev->notify) hdev->notify(hdev, HCI_NOTIFY_CONN_ADD); @@ -426,6 +393,8 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) hci_conn_init_sysfs(conn); + tasklet_enable(&hdev->tx_task); + return conn; } @@ -437,7 +406,7 @@ int hci_conn_del(struct hci_conn *conn) del_timer(&conn->idle_timer); - cancel_delayed_work_sync(&conn->disc_work); + del_timer(&conn->disc_timer); del_timer(&conn->auto_accept_timer); @@ -461,13 +430,14 @@ int hci_conn_del(struct hci_conn *conn) } } - - hci_chan_list_flush(conn); + tasklet_disable(&hdev->tx_task); hci_conn_hash_del(hdev, conn); if (hdev->notify) hdev->notify(hdev, HCI_NOTIFY_CONN_DEL); + tasklet_enable(&hdev->tx_task); + skb_queue_purge(&conn->data_q); hci_conn_put_device(conn); @@ -483,13 +453,16 @@ int hci_conn_del(struct hci_conn *conn) struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src) { int use_src = bacmp(src, BDADDR_ANY); - struct hci_dev *hdev = NULL, *d; + struct hci_dev *hdev = NULL; + struct list_head *p; BT_DBG("%s -> %s", batostr(src), batostr(dst)); - read_lock(&hci_dev_list_lock); + read_lock_bh(&hci_dev_list_lock); + + list_for_each(p, &hci_dev_list) { + struct hci_dev *d = list_entry(p, struct hci_dev, list); - list_for_each_entry(d, &hci_dev_list, list) { if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags)) continue; @@ -512,7 +485,7 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src) if (hdev) hdev = hci_dev_hold(hdev); - read_unlock(&hci_dev_list_lock); + read_unlock_bh(&hci_dev_list_lock); return hdev; } EXPORT_SYMBOL(hci_get_route); @@ -793,18 +766,60 @@ void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active) jiffies + msecs_to_jiffies(hdev->idle_timeout)); } +/* Enter sniff mode */ +void hci_conn_enter_sniff_mode(struct hci_conn *conn) +{ + struct hci_dev *hdev = conn->hdev; + + BT_DBG("conn %p mode %d", conn, conn->mode); + + if (test_bit(HCI_RAW, &hdev->flags)) + return; + + if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn)) + return; + + if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF)) + return; + + if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) { + struct hci_cp_sniff_subrate cp; + cp.handle = cpu_to_le16(conn->handle); + cp.max_latency = cpu_to_le16(0); + cp.min_remote_timeout = cpu_to_le16(0); + cp.min_local_timeout = cpu_to_le16(0); + hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp); + } + + if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { + struct hci_cp_sniff_mode cp; + cp.handle = cpu_to_le16(conn->handle); + cp.max_interval = cpu_to_le16(hdev->sniff_max_interval); + cp.min_interval = cpu_to_le16(hdev->sniff_min_interval); + cp.attempt = cpu_to_le16(4); + cp.timeout = cpu_to_le16(1); + hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp); + } +} + /* Drop all connection on the device */ void hci_conn_hash_flush(struct hci_dev *hdev) { struct hci_conn_hash *h = &hdev->conn_hash; - struct hci_conn *c; + struct list_head *p; BT_DBG("hdev %s", hdev->name); - list_for_each_entry_rcu(c, &h->list, list) { + p = h->list.next; + while (p != &h->list) { + struct hci_conn *c; + + c = list_entry(p, struct hci_conn, list); + p = p->next; + c->state = BT_CLOSED; - hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM); + hci_proto_disconn_cfm(c, 0x16); hci_conn_del(c); } } @@ -840,10 +855,10 @@ EXPORT_SYMBOL(hci_conn_put_device); int hci_get_conn_list(void __user *arg) { - register struct hci_conn *c; struct hci_conn_list_req req, *cl; struct hci_conn_info *ci; struct hci_dev *hdev; + struct list_head *p; int n = 0, size, err; if (copy_from_user(&req, arg, sizeof(req))) @@ -866,8 +881,11 @@ int hci_get_conn_list(void __user *arg) ci = cl->conn_info; - hci_dev_lock(hdev); - list_for_each_entry(c, &hdev->conn_hash.list, list) { + hci_dev_lock_bh(hdev); + list_for_each(p, &hdev->conn_hash.list) { + register struct hci_conn *c; + c = list_entry(p, struct hci_conn, list); + bacpy(&(ci + n)->bdaddr, &c->dst); (ci + n)->handle = c->handle; (ci + n)->type = c->type; @@ -877,7 +895,7 @@ int hci_get_conn_list(void __user *arg) if (++n >= req.conn_num) break; } - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); cl->dev_id = hdev->id; cl->conn_num = n; @@ -901,7 +919,7 @@ int hci_get_conn_info(struct hci_dev *hdev, void __user *arg) if (copy_from_user(&req, arg, sizeof(req))) return -EFAULT; - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr); if (conn) { bacpy(&ci.bdaddr, &conn->dst); @@ -911,7 +929,7 @@ int hci_get_conn_info(struct hci_dev *hdev, void __user *arg) ci.state = conn->state; ci.link_mode = conn->link_mode; } - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); if (!conn) return -ENOENT; @@ -927,60 +945,14 @@ int hci_get_auth_info(struct hci_dev *hdev, void __user *arg) if (copy_from_user(&req, arg, sizeof(req))) return -EFAULT; - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr); if (conn) req.type = conn->auth_type; - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); if (!conn) return -ENOENT; return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0; } - -struct hci_chan *hci_chan_create(struct hci_conn *conn) -{ - struct hci_dev *hdev = conn->hdev; - struct hci_chan *chan; - - BT_DBG("%s conn %p", hdev->name, conn); - - chan = kzalloc(sizeof(struct hci_chan), GFP_ATOMIC); - if (!chan) - return NULL; - - chan->conn = conn; - skb_queue_head_init(&chan->data_q); - - list_add_rcu(&chan->list, &conn->chan_list); - - return chan; -} - -int hci_chan_del(struct hci_chan *chan) -{ - struct hci_conn *conn = chan->conn; - struct hci_dev *hdev = conn->hdev; - - BT_DBG("%s conn %p chan %p", hdev->name, conn, chan); - - list_del_rcu(&chan->list); - - synchronize_rcu(); - - skb_queue_purge(&chan->data_q); - kfree(chan); - - return 0; -} - -void hci_chan_list_flush(struct hci_conn *conn) -{ - struct hci_chan *chan; - - BT_DBG("conn %p", conn); - - list_for_each_entry_rcu(chan, &conn->chan_list, list) - hci_chan_del(chan); -} diff --git a/trunk/net/bluetooth/hci_core.c b/trunk/net/bluetooth/hci_core.c index 845da3ee56a0..be84ae33ae36 100644 --- a/trunk/net/bluetooth/hci_core.c +++ b/trunk/net/bluetooth/hci_core.c @@ -1,7 +1,6 @@ /* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2000-2001 Qualcomm Incorporated - Copyright (C) 2011 ProFUSION Embedded Systems Written 2000,2001 by Maxim Krasnyansky @@ -55,11 +54,11 @@ #define AUTO_OFF_TIMEOUT 2000 -int enable_hs; +static void hci_cmd_task(unsigned long arg); +static void hci_rx_task(unsigned long arg); +static void hci_tx_task(unsigned long arg); -static void hci_rx_work(struct work_struct *work); -static void hci_cmd_work(struct work_struct *work); -static void hci_tx_work(struct work_struct *work); +static DEFINE_RWLOCK(hci_task_lock); /* HCI device list */ LIST_HEAD(hci_dev_list); @@ -69,6 +68,10 @@ DEFINE_RWLOCK(hci_dev_list_lock); LIST_HEAD(hci_cb_list); DEFINE_RWLOCK(hci_cb_list_lock); +/* HCI protocols */ +#define HCI_MAX_PROTO 2 +struct hci_proto *hci_proto[HCI_MAX_PROTO]; + /* HCI notifiers list */ static ATOMIC_NOTIFIER_HEAD(hci_notifier); @@ -187,20 +190,33 @@ static void hci_reset_req(struct hci_dev *hdev, unsigned long opt) hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); } -static void bredr_init(struct hci_dev *hdev) +static void hci_init_req(struct hci_dev *hdev, unsigned long opt) { struct hci_cp_delete_stored_link_key cp; + struct sk_buff *skb; __le16 param; __u8 flt_type; - hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED; + BT_DBG("%s %ld", hdev->name, opt); + + /* Driver initialization */ + + /* Special commands */ + while ((skb = skb_dequeue(&hdev->driver_init))) { + bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; + skb->dev = (void *) hdev; + + skb_queue_tail(&hdev->cmd_q, skb); + tasklet_schedule(&hdev->cmd_task); + } + skb_queue_purge(&hdev->driver_init); /* Mandatory initialization */ /* Reset */ if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) { - set_bit(HCI_RESET, &hdev->flags); - hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); + set_bit(HCI_RESET, &hdev->flags); + hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); } /* Read Local Supported Features */ @@ -212,6 +228,18 @@ static void bredr_init(struct hci_dev *hdev) /* Read Buffer Size (ACL mtu, max pkt, etc.) */ hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL); +#if 0 + /* Host buffer size */ + { + struct hci_cp_host_buffer_size cp; + cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE); + cp.sco_mtu = HCI_MAX_SCO_SIZE; + cp.acl_max_pkt = cpu_to_le16(0xffff); + cp.sco_max_pkt = cpu_to_le16(0xffff); + hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp); + } +#endif + /* Read BD Address */ hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL); @@ -239,51 +267,6 @@ static void bredr_init(struct hci_dev *hdev) hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp); } -static void amp_init(struct hci_dev *hdev) -{ - hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED; - - /* Reset */ - hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); - - /* Read Local Version */ - hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL); -} - -static void hci_init_req(struct hci_dev *hdev, unsigned long opt) -{ - struct sk_buff *skb; - - BT_DBG("%s %ld", hdev->name, opt); - - /* Driver initialization */ - - /* Special commands */ - while ((skb = skb_dequeue(&hdev->driver_init))) { - bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; - skb->dev = (void *) hdev; - - skb_queue_tail(&hdev->cmd_q, skb); - queue_work(hdev->workqueue, &hdev->cmd_work); - } - skb_queue_purge(&hdev->driver_init); - - switch (hdev->dev_type) { - case HCI_BREDR: - bredr_init(hdev); - break; - - case HCI_AMP: - amp_init(hdev); - break; - - default: - BT_ERR("Unknown device type %d", hdev->dev_type); - break; - } - -} - static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt) { BT_DBG("%s", hdev->name); @@ -336,7 +319,8 @@ static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt) * Device is held on return. */ struct hci_dev *hci_dev_get(int index) { - struct hci_dev *hdev = NULL, *d; + struct hci_dev *hdev = NULL; + struct list_head *p; BT_DBG("%d", index); @@ -344,7 +328,8 @@ struct hci_dev *hci_dev_get(int index) return NULL; read_lock(&hci_dev_list_lock); - list_for_each_entry(d, &hci_dev_list, list) { + list_for_each(p, &hci_dev_list) { + struct hci_dev *d = list_entry(p, struct hci_dev, list); if (d->id == index) { hdev = hci_dev_hold(d); break; @@ -460,14 +445,14 @@ int hci_inquiry(void __user *arg) if (!hdev) return -ENODEV; - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) { inquiry_cache_flush(hdev); do_inquiry = 1; } - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); timeo = ir.length * msecs_to_jiffies(2000); @@ -489,9 +474,9 @@ int hci_inquiry(void __user *arg) goto done; } - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf); - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); BT_DBG("num_rsp %d", ir.num_rsp); @@ -538,9 +523,8 @@ int hci_dev_open(__u16 dev) if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) set_bit(HCI_RAW, &hdev->flags); - /* Treat all non BR/EDR controllers as raw devices if - enable_hs is not set */ - if (hdev->dev_type != HCI_BREDR && !enable_hs) + /* Treat all non BR/EDR controllers as raw devices for now */ + if (hdev->dev_type != HCI_BREDR) set_bit(HCI_RAW, &hdev->flags); if (hdev->open(hdev)) { @@ -567,16 +551,13 @@ int hci_dev_open(__u16 dev) hci_dev_hold(hdev); set_bit(HCI_UP, &hdev->flags); hci_notify(hdev, HCI_DEV_UP); - if (!test_bit(HCI_SETUP, &hdev->flags)) { - hci_dev_lock(hdev); - mgmt_powered(hdev, 1); - hci_dev_unlock(hdev); - } + if (!test_bit(HCI_SETUP, &hdev->flags)) + mgmt_powered(hdev->id, 1); } else { /* Init failed, cleanup */ - flush_work(&hdev->tx_work); - flush_work(&hdev->cmd_work); - flush_work(&hdev->rx_work); + tasklet_kill(&hdev->rx_task); + tasklet_kill(&hdev->tx_task); + tasklet_kill(&hdev->cmd_task); skb_queue_purge(&hdev->cmd_q); skb_queue_purge(&hdev->rx_q); @@ -612,25 +593,14 @@ static int hci_dev_do_close(struct hci_dev *hdev) return 0; } - /* Flush RX and TX works */ - flush_work(&hdev->tx_work); - flush_work(&hdev->rx_work); - - if (hdev->discov_timeout > 0) { - cancel_delayed_work(&hdev->discov_off); - hdev->discov_timeout = 0; - } + /* Kill RX and TX tasks */ + tasklet_kill(&hdev->rx_task); + tasklet_kill(&hdev->tx_task); - if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags)) - cancel_delayed_work(&hdev->power_off); - - if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags)) - cancel_delayed_work(&hdev->service_cache); - - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); inquiry_cache_flush(hdev); hci_conn_hash_flush(hdev); - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_notify(hdev, HCI_DEV_DOWN); @@ -643,12 +613,12 @@ static int hci_dev_do_close(struct hci_dev *hdev) if (!test_bit(HCI_RAW, &hdev->flags)) { set_bit(HCI_INIT, &hdev->flags); __hci_request(hdev, hci_reset_req, 0, - msecs_to_jiffies(250)); + msecs_to_jiffies(HCI_INIT_TIMEOUT)); clear_bit(HCI_INIT, &hdev->flags); } - /* flush cmd work */ - flush_work(&hdev->cmd_work); + /* Kill cmd task */ + tasklet_kill(&hdev->cmd_task); /* Drop queues */ skb_queue_purge(&hdev->rx_q); @@ -666,9 +636,7 @@ static int hci_dev_do_close(struct hci_dev *hdev) * and no tasks are scheduled. */ hdev->close(hdev); - hci_dev_lock(hdev); - mgmt_powered(hdev, 0); - hci_dev_unlock(hdev); + mgmt_powered(hdev->id, 0); /* Clear flags */ hdev->flags = 0; @@ -702,6 +670,7 @@ int hci_dev_reset(__u16 dev) return -ENODEV; hci_req_lock(hdev); + tasklet_disable(&hdev->tx_task); if (!test_bit(HCI_UP, &hdev->flags)) goto done; @@ -710,10 +679,10 @@ int hci_dev_reset(__u16 dev) skb_queue_purge(&hdev->rx_q); skb_queue_purge(&hdev->cmd_q); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); inquiry_cache_flush(hdev); hci_conn_hash_flush(hdev); - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); if (hdev->flush) hdev->flush(hdev); @@ -726,6 +695,7 @@ int hci_dev_reset(__u16 dev) msecs_to_jiffies(HCI_INIT_TIMEOUT)); done: + tasklet_enable(&hdev->tx_task); hci_req_unlock(hdev); hci_dev_put(hdev); return ret; @@ -824,9 +794,9 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) int hci_get_dev_list(void __user *arg) { - struct hci_dev *hdev; struct hci_dev_list_req *dl; struct hci_dev_req *dr; + struct list_head *p; int n = 0, size, err; __u16 dev_num; @@ -844,10 +814,13 @@ int hci_get_dev_list(void __user *arg) dr = dl->dev_req; - read_lock(&hci_dev_list_lock); - list_for_each_entry(hdev, &hci_dev_list, list) { - if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags)) - cancel_delayed_work(&hdev->power_off); + read_lock_bh(&hci_dev_list_lock); + list_for_each(p, &hci_dev_list) { + struct hci_dev *hdev; + + hdev = list_entry(p, struct hci_dev, list); + + hci_del_off_timer(hdev); if (!test_bit(HCI_MGMT, &hdev->flags)) set_bit(HCI_PAIRABLE, &hdev->flags); @@ -858,7 +831,7 @@ int hci_get_dev_list(void __user *arg) if (++n >= dev_num) break; } - read_unlock(&hci_dev_list_lock); + read_unlock_bh(&hci_dev_list_lock); dl->dev_num = n; size = sizeof(*dl) + n * sizeof(*dr); @@ -882,8 +855,7 @@ int hci_get_dev_info(void __user *arg) if (!hdev) return -ENODEV; - if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags)) - cancel_delayed_work_sync(&hdev->power_off); + hci_del_off_timer(hdev); if (!test_bit(HCI_MGMT, &hdev->flags)) set_bit(HCI_PAIRABLE, &hdev->flags); @@ -940,7 +912,6 @@ struct hci_dev *hci_alloc_dev(void) if (!hdev) return NULL; - hci_init_sysfs(hdev); skb_queue_head_init(&hdev->driver_init); return hdev; @@ -967,41 +938,39 @@ static void hci_power_on(struct work_struct *work) return; if (test_bit(HCI_AUTO_OFF, &hdev->flags)) - schedule_delayed_work(&hdev->power_off, - msecs_to_jiffies(AUTO_OFF_TIMEOUT)); + mod_timer(&hdev->off_timer, + jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT)); if (test_and_clear_bit(HCI_SETUP, &hdev->flags)) - mgmt_index_added(hdev); + mgmt_index_added(hdev->id); } static void hci_power_off(struct work_struct *work) { - struct hci_dev *hdev = container_of(work, struct hci_dev, - power_off.work); + struct hci_dev *hdev = container_of(work, struct hci_dev, power_off); BT_DBG("%s", hdev->name); - clear_bit(HCI_AUTO_OFF, &hdev->flags); - hci_dev_close(hdev->id); } -static void hci_discov_off(struct work_struct *work) +static void hci_auto_off(unsigned long data) { - struct hci_dev *hdev; - u8 scan = SCAN_PAGE; - - hdev = container_of(work, struct hci_dev, discov_off.work); + struct hci_dev *hdev = (struct hci_dev *) data; BT_DBG("%s", hdev->name); - hci_dev_lock(hdev); + clear_bit(HCI_AUTO_OFF, &hdev->flags); - hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan); + queue_work(hdev->workqueue, &hdev->power_off); +} - hdev->discov_timeout = 0; +void hci_del_off_timer(struct hci_dev *hdev) +{ + BT_DBG("%s", hdev->name); - hci_dev_unlock(hdev); + clear_bit(HCI_AUTO_OFF, &hdev->flags); + del_timer(&hdev->off_timer); } int hci_uuids_clear(struct hci_dev *hdev) @@ -1038,11 +1007,16 @@ int hci_link_keys_clear(struct hci_dev *hdev) struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) { - struct link_key *k; + struct list_head *p; + + list_for_each(p, &hdev->link_keys) { + struct link_key *k; + + k = list_entry(p, struct link_key, list); - list_for_each_entry(k, &hdev->link_keys, list) if (bacmp(bdaddr, &k->bdaddr) == 0) return k; + } return NULL; } @@ -1164,7 +1138,7 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key, persistent = hci_persistent_key(hdev, conn, type, old_key_type); - mgmt_new_link_key(hdev, key, persistent); + mgmt_new_key(hdev->id, key, persistent); if (!persistent) { list_del(&key->list); @@ -1207,7 +1181,7 @@ int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr, memcpy(id->rand, rand, sizeof(id->rand)); if (new_key) - mgmt_new_link_key(hdev, key, old_key_type); + mgmt_new_key(hdev->id, key, old_key_type); return 0; } @@ -1235,7 +1209,7 @@ static void hci_cmd_timer(unsigned long arg) BT_ERR("%s command tx timeout", hdev->name); atomic_set(&hdev->cmd_cnt, 1); - queue_work(hdev->workqueue, &hdev->cmd_work); + tasklet_schedule(&hdev->cmd_task); } struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev, @@ -1305,11 +1279,16 @@ int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash, struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr) { - struct bdaddr_list *b; + struct list_head *p; + + list_for_each(p, &hdev->blacklist) { + struct bdaddr_list *b; + + b = list_entry(p, struct bdaddr_list, list); - list_for_each_entry(b, &hdev->blacklist, list) if (bacmp(bdaddr, &b->bdaddr) == 0) return b; + } return NULL; } @@ -1348,30 +1327,31 @@ int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr) list_add(&entry->list, &hdev->blacklist); - return mgmt_device_blocked(hdev, bdaddr); + return mgmt_device_blocked(hdev->id, bdaddr); } int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr) { struct bdaddr_list *entry; - if (bacmp(bdaddr, BDADDR_ANY) == 0) + if (bacmp(bdaddr, BDADDR_ANY) == 0) { return hci_blacklist_clear(hdev); + } entry = hci_blacklist_lookup(hdev, bdaddr); - if (!entry) + if (!entry) { return -ENOENT; + } list_del(&entry->list); kfree(entry); - return mgmt_device_unblocked(hdev, bdaddr); + return mgmt_device_unblocked(hdev->id, bdaddr); } -static void hci_clear_adv_cache(struct work_struct *work) +static void hci_clear_adv_cache(unsigned long arg) { - struct hci_dev *hdev = container_of(work, struct hci_dev, - adv_work.work); + struct hci_dev *hdev = (void *) arg; hci_dev_lock(hdev); @@ -1445,7 +1425,7 @@ int hci_add_adv_entry(struct hci_dev *hdev, int hci_register_dev(struct hci_dev *hdev) { struct list_head *head = &hci_dev_list, *p; - int i, id, error; + int i, id = 0; BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name, hdev->bus, hdev->owner); @@ -1453,12 +1433,7 @@ int hci_register_dev(struct hci_dev *hdev) if (!hdev->open || !hdev->close || !hdev->destruct) return -EINVAL; - /* Do not allow HCI_AMP devices to register at index 0, - * so the index can be used as the AMP controller ID. - */ - id = (hdev->dev_type == HCI_BREDR) ? 0 : 1; - - write_lock(&hci_dev_list_lock); + write_lock_bh(&hci_dev_list_lock); /* Find first available device id */ list_for_each(p, &hci_dev_list) { @@ -1469,13 +1444,12 @@ int hci_register_dev(struct hci_dev *hdev) sprintf(hdev->name, "hci%d", id); hdev->id = id; - list_add_tail(&hdev->list, head); + list_add(&hdev->list, head); atomic_set(&hdev->refcnt, 1); - mutex_init(&hdev->lock); + spin_lock_init(&hdev->lock); hdev->flags = 0; - hdev->dev_flags = 0; hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); hdev->esco_type = (ESCO_HV1); hdev->link_mode = (HCI_LM_ACCEPT); @@ -1485,10 +1459,9 @@ int hci_register_dev(struct hci_dev *hdev) hdev->sniff_max_interval = 800; hdev->sniff_min_interval = 80; - INIT_WORK(&hdev->rx_work, hci_rx_work); - INIT_WORK(&hdev->cmd_work, hci_cmd_work); - INIT_WORK(&hdev->tx_work, hci_tx_work); - + tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev); + tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev); + tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev); skb_queue_head_init(&hdev->rx_q); skb_queue_head_init(&hdev->cmd_q); @@ -1506,8 +1479,6 @@ int hci_register_dev(struct hci_dev *hdev) hci_conn_hash_init(hdev); - INIT_LIST_HEAD(&hdev->mgmt_pending); - INIT_LIST_HEAD(&hdev->blacklist); INIT_LIST_HEAD(&hdev->uuids); @@ -1517,29 +1488,24 @@ int hci_register_dev(struct hci_dev *hdev) INIT_LIST_HEAD(&hdev->remote_oob_data); INIT_LIST_HEAD(&hdev->adv_entries); + setup_timer(&hdev->adv_timer, hci_clear_adv_cache, + (unsigned long) hdev); - INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache); INIT_WORK(&hdev->power_on, hci_power_on); - INIT_DELAYED_WORK(&hdev->power_off, hci_power_off); - - INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off); + INIT_WORK(&hdev->power_off, hci_power_off); + setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev); memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); atomic_set(&hdev->promisc, 0); - write_unlock(&hci_dev_list_lock); + write_unlock_bh(&hci_dev_list_lock); - hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND | - WQ_MEM_RECLAIM, 1); - if (!hdev->workqueue) { - error = -ENOMEM; - goto err; - } + hdev->workqueue = create_singlethread_workqueue(hdev->name); + if (!hdev->workqueue) + goto nomem; - error = hci_add_sysfs(hdev); - if (error < 0) - goto err_wqueue; + hci_register_sysfs(hdev); hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev); @@ -1552,33 +1518,31 @@ int hci_register_dev(struct hci_dev *hdev) set_bit(HCI_AUTO_OFF, &hdev->flags); set_bit(HCI_SETUP, &hdev->flags); - schedule_work(&hdev->power_on); + queue_work(hdev->workqueue, &hdev->power_on); hci_notify(hdev, HCI_DEV_REG); return id; -err_wqueue: - destroy_workqueue(hdev->workqueue); -err: - write_lock(&hci_dev_list_lock); +nomem: + write_lock_bh(&hci_dev_list_lock); list_del(&hdev->list); - write_unlock(&hci_dev_list_lock); + write_unlock_bh(&hci_dev_list_lock); - return error; + return -ENOMEM; } EXPORT_SYMBOL(hci_register_dev); /* Unregister HCI device */ -void hci_unregister_dev(struct hci_dev *hdev) +int hci_unregister_dev(struct hci_dev *hdev) { int i; BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); - write_lock(&hci_dev_list_lock); + write_lock_bh(&hci_dev_list_lock); list_del(&hdev->list); - write_unlock(&hci_dev_list_lock); + write_unlock_bh(&hci_dev_list_lock); hci_dev_do_close(hdev); @@ -1586,15 +1550,8 @@ void hci_unregister_dev(struct hci_dev *hdev) kfree_skb(hdev->reassembly[i]); if (!test_bit(HCI_INIT, &hdev->flags) && - !test_bit(HCI_SETUP, &hdev->flags)) { - hci_dev_lock(hdev); - mgmt_index_removed(hdev); - hci_dev_unlock(hdev); - } - - /* mgmt_index_removed should take care of emptying the - * pending list */ - BUG_ON(!list_empty(&hdev->mgmt_pending)); + !test_bit(HCI_SETUP, &hdev->flags)) + mgmt_index_removed(hdev->id); hci_notify(hdev, HCI_DEV_UNREG); @@ -1603,21 +1560,24 @@ void hci_unregister_dev(struct hci_dev *hdev) rfkill_destroy(hdev->rfkill); } - hci_del_sysfs(hdev); + hci_unregister_sysfs(hdev); - cancel_delayed_work_sync(&hdev->adv_work); + hci_del_off_timer(hdev); + del_timer(&hdev->adv_timer); destroy_workqueue(hdev->workqueue); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); hci_blacklist_clear(hdev); hci_uuids_clear(hdev); hci_link_keys_clear(hdev); hci_remote_oob_data_clear(hdev); hci_adv_entries_clear(hdev); - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); __hci_dev_put(hdev); + + return 0; } EXPORT_SYMBOL(hci_unregister_dev); @@ -1653,8 +1613,9 @@ int hci_recv_frame(struct sk_buff *skb) /* Time stamp */ __net_timestamp(skb); + /* Queue frame for rx task */ skb_queue_tail(&hdev->rx_q, skb); - queue_work(hdev->workqueue, &hdev->rx_work); + tasklet_schedule(&hdev->rx_task); return 0; } @@ -1826,13 +1787,59 @@ EXPORT_SYMBOL(hci_recv_stream_fragment); /* ---- Interface to upper protocols ---- */ +/* Register/Unregister protocols. + * hci_task_lock is used to ensure that no tasks are running. */ +int hci_register_proto(struct hci_proto *hp) +{ + int err = 0; + + BT_DBG("%p name %s id %d", hp, hp->name, hp->id); + + if (hp->id >= HCI_MAX_PROTO) + return -EINVAL; + + write_lock_bh(&hci_task_lock); + + if (!hci_proto[hp->id]) + hci_proto[hp->id] = hp; + else + err = -EEXIST; + + write_unlock_bh(&hci_task_lock); + + return err; +} +EXPORT_SYMBOL(hci_register_proto); + +int hci_unregister_proto(struct hci_proto *hp) +{ + int err = 0; + + BT_DBG("%p name %s id %d", hp, hp->name, hp->id); + + if (hp->id >= HCI_MAX_PROTO) + return -EINVAL; + + write_lock_bh(&hci_task_lock); + + if (hci_proto[hp->id]) + hci_proto[hp->id] = NULL; + else + err = -ENOENT; + + write_unlock_bh(&hci_task_lock); + + return err; +} +EXPORT_SYMBOL(hci_unregister_proto); + int hci_register_cb(struct hci_cb *cb) { BT_DBG("%p name %s", cb, cb->name); - write_lock(&hci_cb_list_lock); + write_lock_bh(&hci_cb_list_lock); list_add(&cb->list, &hci_cb_list); - write_unlock(&hci_cb_list_lock); + write_unlock_bh(&hci_cb_list_lock); return 0; } @@ -1842,9 +1849,9 @@ int hci_unregister_cb(struct hci_cb *cb) { BT_DBG("%p name %s", cb, cb->name); - write_lock(&hci_cb_list_lock); + write_lock_bh(&hci_cb_list_lock); list_del(&cb->list); - write_unlock(&hci_cb_list_lock); + write_unlock_bh(&hci_cb_list_lock); return 0; } @@ -1905,7 +1912,7 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param) hdev->init_last_cmd = opcode; skb_queue_tail(&hdev->cmd_q, skb); - queue_work(hdev->workqueue, &hdev->cmd_work); + tasklet_schedule(&hdev->cmd_task); return 0; } @@ -1941,18 +1948,23 @@ static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags) hdr->dlen = cpu_to_le16(len); } -static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue, - struct sk_buff *skb, __u16 flags) +void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags) { struct hci_dev *hdev = conn->hdev; struct sk_buff *list; + BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags); + + skb->dev = (void *) hdev; + bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; + hci_add_acl_hdr(skb, conn->handle, flags); + list = skb_shinfo(skb)->frag_list; if (!list) { /* Non fragmented */ BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); - skb_queue_tail(queue, skb); + skb_queue_tail(&conn->data_q, skb); } else { /* Fragmented */ BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); @@ -1960,9 +1972,9 @@ static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue, skb_shinfo(skb)->frag_list = NULL; /* Queue all fragments atomically */ - spin_lock(&queue->lock); + spin_lock_bh(&conn->data_q.lock); - __skb_queue_tail(queue, skb); + __skb_queue_tail(&conn->data_q, skb); flags &= ~ACL_START; flags |= ACL_CONT; @@ -1975,27 +1987,13 @@ static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue, BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); - __skb_queue_tail(queue, skb); + __skb_queue_tail(&conn->data_q, skb); } while (list); - spin_unlock(&queue->lock); + spin_unlock_bh(&conn->data_q.lock); } -} - -void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags) -{ - struct hci_conn *conn = chan->conn; - struct hci_dev *hdev = conn->hdev; - - BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags); - - skb->dev = (void *) hdev; - bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; - hci_add_acl_hdr(skb, conn->handle, flags); - - hci_queue_acl(conn, &chan->data_q, skb, flags); - queue_work(hdev->workqueue, &hdev->tx_work); + tasklet_schedule(&hdev->tx_task); } EXPORT_SYMBOL(hci_send_acl); @@ -2018,7 +2016,7 @@ void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) bt_cb(skb)->pkt_type = HCI_SCODATA_PKT; skb_queue_tail(&conn->data_q, skb); - queue_work(hdev->workqueue, &hdev->tx_work); + tasklet_schedule(&hdev->tx_task); } EXPORT_SYMBOL(hci_send_sco); @@ -2028,15 +2026,16 @@ EXPORT_SYMBOL(hci_send_sco); static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote) { struct hci_conn_hash *h = &hdev->conn_hash; - struct hci_conn *conn = NULL, *c; + struct hci_conn *conn = NULL; int num = 0, min = ~0; + struct list_head *p; /* We don't have to lock device here. Connections are always * added and removed with TX task disabled. */ + list_for_each(p, &h->list) { + struct hci_conn *c; + c = list_entry(p, struct hci_conn, list); - rcu_read_lock(); - - list_for_each_entry_rcu(c, &h->list, list) { if (c->type != type || skb_queue_empty(&c->data_q)) continue; @@ -2054,8 +2053,6 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int break; } - rcu_read_unlock(); - if (conn) { int cnt, q; @@ -2087,159 +2084,27 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type) { struct hci_conn_hash *h = &hdev->conn_hash; - struct hci_conn *c; + struct list_head *p; + struct hci_conn *c; BT_ERR("%s link tx timeout", hdev->name); - rcu_read_lock(); - /* Kill stalled connections */ - list_for_each_entry_rcu(c, &h->list, list) { + list_for_each(p, &h->list) { + c = list_entry(p, struct hci_conn, list); if (c->type == type && c->sent) { BT_ERR("%s killing stalled connection %s", hdev->name, batostr(&c->dst)); hci_acl_disconn(c, 0x13); } } - - rcu_read_unlock(); -} - -static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, - int *quote) -{ - struct hci_conn_hash *h = &hdev->conn_hash; - struct hci_chan *chan = NULL; - int num = 0, min = ~0, cur_prio = 0; - struct hci_conn *conn; - int cnt, q, conn_num = 0; - - BT_DBG("%s", hdev->name); - - rcu_read_lock(); - - list_for_each_entry_rcu(conn, &h->list, list) { - struct hci_chan *tmp; - - if (conn->type != type) - continue; - - if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) - continue; - - conn_num++; - - list_for_each_entry_rcu(tmp, &conn->chan_list, list) { - struct sk_buff *skb; - - if (skb_queue_empty(&tmp->data_q)) - continue; - - skb = skb_peek(&tmp->data_q); - if (skb->priority < cur_prio) - continue; - - if (skb->priority > cur_prio) { - num = 0; - min = ~0; - cur_prio = skb->priority; - } - - num++; - - if (conn->sent < min) { - min = conn->sent; - chan = tmp; - } - } - - if (hci_conn_num(hdev, type) == conn_num) - break; - } - - rcu_read_unlock(); - - if (!chan) - return NULL; - - switch (chan->conn->type) { - case ACL_LINK: - cnt = hdev->acl_cnt; - break; - case SCO_LINK: - case ESCO_LINK: - cnt = hdev->sco_cnt; - break; - case LE_LINK: - cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; - break; - default: - cnt = 0; - BT_ERR("Unknown link type"); - } - - q = cnt / num; - *quote = q ? q : 1; - BT_DBG("chan %p quote %d", chan, *quote); - return chan; -} - -static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type) -{ - struct hci_conn_hash *h = &hdev->conn_hash; - struct hci_conn *conn; - int num = 0; - - BT_DBG("%s", hdev->name); - - rcu_read_lock(); - - list_for_each_entry_rcu(conn, &h->list, list) { - struct hci_chan *chan; - - if (conn->type != type) - continue; - - if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) - continue; - - num++; - - list_for_each_entry_rcu(chan, &conn->chan_list, list) { - struct sk_buff *skb; - - if (chan->sent) { - chan->sent = 0; - continue; - } - - if (skb_queue_empty(&chan->data_q)) - continue; - - skb = skb_peek(&chan->data_q); - if (skb->priority >= HCI_PRIO_MAX - 1) - continue; - - skb->priority = HCI_PRIO_MAX - 1; - - BT_DBG("chan %p skb %p promoted to %d", chan, skb, - skb->priority); - } - - if (hci_conn_num(hdev, type) == num) - break; - } - - rcu_read_unlock(); - } static inline void hci_sched_acl(struct hci_dev *hdev) { - struct hci_chan *chan; + struct hci_conn *conn; struct sk_buff *skb; int quote; - unsigned int cnt; BT_DBG("%s", hdev->name); @@ -2253,35 +2118,19 @@ static inline void hci_sched_acl(struct hci_dev *hdev) hci_link_tx_to(hdev, ACL_LINK); } - cnt = hdev->acl_cnt; - - while (hdev->acl_cnt && - (chan = hci_chan_sent(hdev, ACL_LINK, "e))) { - u32 priority = (skb_peek(&chan->data_q))->priority; - while (quote-- && (skb = skb_peek(&chan->data_q))) { - BT_DBG("chan %p skb %p len %d priority %u", chan, skb, - skb->len, skb->priority); - - /* Stop if priority has changed */ - if (skb->priority < priority) - break; - - skb = skb_dequeue(&chan->data_q); + while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, "e))) { + while (quote-- && (skb = skb_dequeue(&conn->data_q))) { + BT_DBG("skb %p len %d", skb, skb->len); - hci_conn_enter_active_mode(chan->conn, - bt_cb(skb)->force_active); + hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active); hci_send_frame(skb); hdev->acl_last_tx = jiffies; hdev->acl_cnt--; - chan->sent++; - chan->conn->sent++; + conn->sent++; } } - - if (cnt != hdev->acl_cnt) - hci_prio_recalculate(hdev, ACL_LINK); } /* Schedule SCO */ @@ -2333,9 +2182,9 @@ static inline void hci_sched_esco(struct hci_dev *hdev) static inline void hci_sched_le(struct hci_dev *hdev) { - struct hci_chan *chan; + struct hci_conn *conn; struct sk_buff *skb; - int quote, cnt, tmp; + int quote, cnt; BT_DBG("%s", hdev->name); @@ -2351,42 +2200,30 @@ static inline void hci_sched_le(struct hci_dev *hdev) } cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt; - tmp = cnt; - while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) { - u32 priority = (skb_peek(&chan->data_q))->priority; - while (quote-- && (skb = skb_peek(&chan->data_q))) { - BT_DBG("chan %p skb %p len %d priority %u", chan, skb, - skb->len, skb->priority); - - /* Stop if priority has changed */ - if (skb->priority < priority) - break; - - skb = skb_dequeue(&chan->data_q); + while (cnt && (conn = hci_low_sent(hdev, LE_LINK, "e))) { + while (quote-- && (skb = skb_dequeue(&conn->data_q))) { + BT_DBG("skb %p len %d", skb, skb->len); hci_send_frame(skb); hdev->le_last_tx = jiffies; cnt--; - chan->sent++; - chan->conn->sent++; + conn->sent++; } } - if (hdev->le_pkts) hdev->le_cnt = cnt; else hdev->acl_cnt = cnt; - - if (cnt != tmp) - hci_prio_recalculate(hdev, LE_LINK); } -static void hci_tx_work(struct work_struct *work) +static void hci_tx_task(unsigned long arg) { - struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work); + struct hci_dev *hdev = (struct hci_dev *) arg; struct sk_buff *skb; + read_lock(&hci_task_lock); + BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt, hdev->le_cnt); @@ -2403,6 +2240,8 @@ static void hci_tx_work(struct work_struct *work) /* Send next queued raw (unknown type) packet */ while ((skb = skb_dequeue(&hdev->raw_q))) hci_send_frame(skb); + + read_unlock(&hci_task_lock); } /* ----- HCI RX task (incoming data processing) ----- */ @@ -2429,11 +2268,16 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); if (conn) { - hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF); + register struct hci_proto *hp; + + hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active); /* Send to upper protocol */ - l2cap_recv_acldata(conn, skb, flags); - return; + hp = hci_proto[HCI_PROTO_L2CAP]; + if (hp && hp->recv_acldata) { + hp->recv_acldata(conn, skb, flags); + return; + } } else { BT_ERR("%s ACL packet for unknown connection handle %d", hdev->name, handle); @@ -2462,9 +2306,14 @@ static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); if (conn) { + register struct hci_proto *hp; + /* Send to upper protocol */ - sco_recv_scodata(conn, skb); - return; + hp = hci_proto[HCI_PROTO_SCO]; + if (hp && hp->recv_scodata) { + hp->recv_scodata(conn, skb); + return; + } } else { BT_ERR("%s SCO packet for unknown connection handle %d", hdev->name, handle); @@ -2473,13 +2322,15 @@ static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) kfree_skb(skb); } -static void hci_rx_work(struct work_struct *work) +static void hci_rx_task(unsigned long arg) { - struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work); + struct hci_dev *hdev = (struct hci_dev *) arg; struct sk_buff *skb; BT_DBG("%s", hdev->name); + read_lock(&hci_task_lock); + while ((skb = skb_dequeue(&hdev->rx_q))) { if (atomic_read(&hdev->promisc)) { /* Send copy to the sockets */ @@ -2504,7 +2355,6 @@ static void hci_rx_work(struct work_struct *work) /* Process frame */ switch (bt_cb(skb)->pkt_type) { case HCI_EVENT_PKT: - BT_DBG("%s Event packet", hdev->name); hci_event_packet(hdev, skb); break; @@ -2523,11 +2373,13 @@ static void hci_rx_work(struct work_struct *work) break; } } + + read_unlock(&hci_task_lock); } -static void hci_cmd_work(struct work_struct *work) +static void hci_cmd_task(unsigned long arg) { - struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work); + struct hci_dev *hdev = (struct hci_dev *) arg; struct sk_buff *skb; BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt)); @@ -2551,38 +2403,7 @@ static void hci_cmd_work(struct work_struct *work) jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT)); } else { skb_queue_head(&hdev->cmd_q, skb); - queue_work(hdev->workqueue, &hdev->cmd_work); + tasklet_schedule(&hdev->cmd_task); } } } - -int hci_do_inquiry(struct hci_dev *hdev, u8 length) -{ - /* General inquiry access code (GIAC) */ - u8 lap[3] = { 0x33, 0x8b, 0x9e }; - struct hci_cp_inquiry cp; - - BT_DBG("%s", hdev->name); - - if (test_bit(HCI_INQUIRY, &hdev->flags)) - return -EINPROGRESS; - - memset(&cp, 0, sizeof(cp)); - memcpy(&cp.lap, lap, sizeof(cp.lap)); - cp.length = length; - - return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp); -} - -int hci_cancel_inquiry(struct hci_dev *hdev) -{ - BT_DBG("%s", hdev->name); - - if (!test_bit(HCI_INQUIRY, &hdev->flags)) - return -EPERM; - - return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL); -} - -module_param(enable_hs, bool, 0644); -MODULE_PARM_DESC(enable_hs, "Enable High Speed"); diff --git a/trunk/net/bluetooth/hci_event.c b/trunk/net/bluetooth/hci_event.c index 4221bd256bdd..643a41b76e2e 100644 --- a/trunk/net/bluetooth/hci_event.c +++ b/trunk/net/bluetooth/hci_event.c @@ -45,7 +45,7 @@ #include #include -static bool enable_le; +static int enable_le; /* Handle HCI Event packets */ @@ -55,18 +55,12 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb) BT_DBG("%s status 0x%x", hdev->name, status); - if (status) { - hci_dev_lock(hdev); - mgmt_stop_discovery_failed(hdev, status); - hci_dev_unlock(hdev); + if (status) return; - } - clear_bit(HCI_INQUIRY, &hdev->flags); - - hci_dev_lock(hdev); - mgmt_discovering(hdev, 0); - hci_dev_unlock(hdev); + if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) && + test_bit(HCI_MGMT, &hdev->flags)) + mgmt_discovering(hdev->id, 0); hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status); @@ -82,6 +76,10 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) if (status) return; + if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) && + test_bit(HCI_MGMT, &hdev->flags)) + mgmt_discovering(hdev->id, 0); + hci_conn_check_pending(hdev); } @@ -194,8 +192,6 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb) clear_bit(HCI_RESET, &hdev->flags); hci_req_complete(hdev, HCI_OP_RESET, status); - - hdev->dev_flags = 0; } static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb) @@ -209,15 +205,13 @@ static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb) if (!sent) return; - hci_dev_lock(hdev); - if (test_bit(HCI_MGMT, &hdev->flags)) - mgmt_set_local_name_complete(hdev, sent, status); + mgmt_set_local_name_complete(hdev->id, sent, status); - if (status == 0) - memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); + if (status) + return; - hci_dev_unlock(hdev); + memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); } static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb) @@ -280,8 +274,7 @@ static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb) static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) { - __u8 param, status = *((__u8 *) skb->data); - int old_pscan, old_iscan; + __u8 status = *((__u8 *) skb->data); void *sent; BT_DBG("%s status 0x%x", hdev->name, status); @@ -290,40 +283,28 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) if (!sent) return; - param = *((__u8 *) sent); - - hci_dev_lock(hdev); - - if (status != 0) { - mgmt_write_scan_failed(hdev, param, status); - hdev->discov_timeout = 0; - goto done; - } + if (!status) { + __u8 param = *((__u8 *) sent); + int old_pscan, old_iscan; - old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags); - old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags); + old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags); + old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags); - if (param & SCAN_INQUIRY) { - set_bit(HCI_ISCAN, &hdev->flags); - if (!old_iscan) - mgmt_discoverable(hdev, 1); - if (hdev->discov_timeout > 0) { - int to = msecs_to_jiffies(hdev->discov_timeout * 1000); - queue_delayed_work(hdev->workqueue, &hdev->discov_off, - to); - } - } else if (old_iscan) - mgmt_discoverable(hdev, 0); + if (param & SCAN_INQUIRY) { + set_bit(HCI_ISCAN, &hdev->flags); + if (!old_iscan) + mgmt_discoverable(hdev->id, 1); + } else if (old_iscan) + mgmt_discoverable(hdev->id, 0); - if (param & SCAN_PAGE) { - set_bit(HCI_PSCAN, &hdev->flags); - if (!old_pscan) - mgmt_connectable(hdev, 1); - } else if (old_pscan) - mgmt_connectable(hdev, 0); + if (param & SCAN_PAGE) { + set_bit(HCI_PSCAN, &hdev->flags); + if (!old_pscan) + mgmt_connectable(hdev->id, 1); + } else if (old_pscan) + mgmt_connectable(hdev->id, 0); + } -done: - hci_dev_unlock(hdev); hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status); } @@ -378,8 +359,11 @@ static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) BT_DBG("%s voice setting 0x%04x", hdev->name, setting); - if (hdev->notify) + if (hdev->notify) { + tasklet_disable(&hdev->tx_task); hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); + tasklet_enable(&hdev->tx_task); + } } static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) @@ -406,8 +390,11 @@ static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb BT_DBG("%s voice setting 0x%04x", hdev->name, setting); - if (hdev->notify) + if (hdev->notify) { + tasklet_disable(&hdev->tx_task); hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); + tasklet_enable(&hdev->tx_task); + } } static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) @@ -494,7 +481,7 @@ static void hci_setup_event_mask(struct hci_dev *hdev) /* CSR 1.1 dongles does not accept any bitfield so don't try to set * any event mask for pre 1.2 devices */ - if (hdev->hci_ver < BLUETOOTH_VER_1_2) + if (hdev->lmp_ver <= 1) return; events[4] |= 0x01; /* Flow Specification Complete */ @@ -556,12 +543,9 @@ static void hci_set_le_support(struct hci_dev *hdev) static void hci_setup(struct hci_dev *hdev) { - if (hdev->dev_type != HCI_BREDR) - return; - hci_setup_event_mask(hdev); - if (hdev->hci_ver > BLUETOOTH_VER_1_1) + if (hdev->hci_ver > 1) hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); if (hdev->features[6] & LMP_SIMPLE_PAIR) { @@ -716,21 +700,6 @@ static void hci_cc_read_local_ext_features(struct hci_dev *hdev, hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status); } -static void hci_cc_read_flow_control_mode(struct hci_dev *hdev, - struct sk_buff *skb) -{ - struct hci_rp_read_flow_control_mode *rp = (void *) skb->data; - - BT_DBG("%s status 0x%x", hdev->name, rp->status); - - if (rp->status) - return; - - hdev->flow_ctl_mode = rp->mode; - - hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status); -} - static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_rp_read_buffer_size *rp = (void *) skb->data; @@ -770,28 +739,6 @@ static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb) hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status); } -static void hci_cc_read_data_block_size(struct hci_dev *hdev, - struct sk_buff *skb) -{ - struct hci_rp_read_data_block_size *rp = (void *) skb->data; - - BT_DBG("%s status 0x%x", hdev->name, rp->status); - - if (rp->status) - return; - - hdev->block_mtu = __le16_to_cpu(rp->max_acl_len); - hdev->block_len = __le16_to_cpu(rp->block_len); - hdev->num_blocks = __le16_to_cpu(rp->num_blocks); - - hdev->block_cnt = hdev->num_blocks; - - BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu, - hdev->block_cnt, hdev->block_len); - - hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status); -} - static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb) { __u8 status = *((__u8 *) skb->data); @@ -801,30 +748,6 @@ static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb) hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status); } -static void hci_cc_read_local_amp_info(struct hci_dev *hdev, - struct sk_buff *skb) -{ - struct hci_rp_read_local_amp_info *rp = (void *) skb->data; - - BT_DBG("%s status 0x%x", hdev->name, rp->status); - - if (rp->status) - return; - - hdev->amp_status = rp->amp_status; - hdev->amp_total_bw = __le32_to_cpu(rp->total_bw); - hdev->amp_max_bw = __le32_to_cpu(rp->max_bw); - hdev->amp_min_latency = __le32_to_cpu(rp->min_latency); - hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu); - hdev->amp_type = rp->amp_type; - hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap); - hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size); - hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to); - hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to); - - hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status); -} - static void hci_cc_delete_stored_link_key(struct hci_dev *hdev, struct sk_buff *skb) { @@ -881,24 +804,19 @@ static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb) BT_DBG("%s status 0x%x", hdev->name, rp->status); - hci_dev_lock(hdev); - if (test_bit(HCI_MGMT, &hdev->flags)) - mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status); + mgmt_pin_code_reply_complete(hdev->id, &rp->bdaddr, rp->status); if (rp->status != 0) - goto unlock; + return; cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY); if (!cp) - goto unlock; + return; conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); if (conn) conn->pin_length = cp->pin_len; - -unlock: - hci_dev_unlock(hdev); } static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) @@ -907,15 +825,10 @@ static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) BT_DBG("%s status 0x%x", hdev->name, rp->status); - hci_dev_lock(hdev); - if (test_bit(HCI_MGMT, &hdev->flags)) - mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr, + mgmt_pin_code_neg_reply_complete(hdev->id, &rp->bdaddr, rp->status); - - hci_dev_unlock(hdev); } - static void hci_cc_le_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) { @@ -942,13 +855,9 @@ static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb) BT_DBG("%s status 0x%x", hdev->name, rp->status); - hci_dev_lock(hdev); - if (test_bit(HCI_MGMT, &hdev->flags)) - mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, + mgmt_user_confirm_reply_complete(hdev->id, &rp->bdaddr, rp->status); - - hci_dev_unlock(hdev); } static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, @@ -958,44 +867,9 @@ static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, BT_DBG("%s status 0x%x", hdev->name, rp->status); - hci_dev_lock(hdev); - - if (test_bit(HCI_MGMT, &hdev->flags)) - mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr, - rp->status); - - hci_dev_unlock(hdev); -} - -static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb) -{ - struct hci_rp_user_confirm_reply *rp = (void *) skb->data; - - BT_DBG("%s status 0x%x", hdev->name, rp->status); - - hci_dev_lock(hdev); - - if (test_bit(HCI_MGMT, &hdev->flags)) - mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, - rp->status); - - hci_dev_unlock(hdev); -} - -static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, - struct sk_buff *skb) -{ - struct hci_rp_user_confirm_reply *rp = (void *) skb->data; - - BT_DBG("%s status 0x%x", hdev->name, rp->status); - - hci_dev_lock(hdev); - if (test_bit(HCI_MGMT, &hdev->flags)) - mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr, + mgmt_user_confirm_neg_reply_complete(hdev->id, &rp->bdaddr, rp->status); - - hci_dev_unlock(hdev); } static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev, @@ -1005,17 +879,8 @@ static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev, BT_DBG("%s status 0x%x", hdev->name, rp->status); - hci_dev_lock(hdev); - mgmt_read_local_oob_data_reply_complete(hdev, rp->hash, + mgmt_read_local_oob_data_reply_complete(hdev->id, rp->hash, rp->randomizer, rp->status); - hci_dev_unlock(hdev); -} - -static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb) -{ - __u8 status = *((__u8 *) skb->data); - - BT_DBG("%s status 0x%x", hdev->name, status); } static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, @@ -1033,28 +898,14 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, if (!cp) return; - switch (cp->enable) { - case LE_SCANNING_ENABLED: - set_bit(HCI_LE_SCAN, &hdev->dev_flags); - - cancel_delayed_work_sync(&hdev->adv_work); + if (cp->enable == 0x01) { + del_timer(&hdev->adv_timer); hci_dev_lock(hdev); hci_adv_entries_clear(hdev); hci_dev_unlock(hdev); - break; - - case LE_SCANNING_DISABLED: - clear_bit(HCI_LE_SCAN, &hdev->dev_flags); - - cancel_delayed_work_sync(&hdev->adv_work); - queue_delayed_work(hdev->workqueue, &hdev->adv_work, - jiffies + ADV_CLEAR_TIMEOUT); - break; - - default: - BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable); - break; + } else if (cp->enable == 0x00) { + mod_timer(&hdev->adv_timer, jiffies + ADV_CLEAR_TIMEOUT); } } @@ -1104,18 +955,12 @@ static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) if (status) { hci_req_complete(hdev, HCI_OP_INQUIRY, status); hci_conn_check_pending(hdev); - hci_dev_lock(hdev); - if (test_bit(HCI_MGMT, &hdev->flags)) - mgmt_start_discovery_failed(hdev, status); - hci_dev_unlock(hdev); return; } - set_bit(HCI_INQUIRY, &hdev->flags); - - hci_dev_lock(hdev); - mgmt_discovering(hdev, 1); - hci_dev_unlock(hdev); + if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags) && + test_bit(HCI_MGMT, &hdev->flags)) + mgmt_discovering(hdev->id, 1); } static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) @@ -1494,16 +1339,13 @@ static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff BT_DBG("%s status %d", hdev->name, status); + if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) && + test_bit(HCI_MGMT, &hdev->flags)) + mgmt_discovering(hdev->id, 0); + hci_req_complete(hdev, HCI_OP_INQUIRY, status); hci_conn_check_pending(hdev); - - if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) - return; - - hci_dev_lock(hdev); - mgmt_discovering(hdev, 0); - hci_dev_unlock(hdev); } static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) @@ -1519,6 +1361,12 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff * hci_dev_lock(hdev); + if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) { + + if (test_bit(HCI_MGMT, &hdev->flags)) + mgmt_discovering(hdev->id, 1); + } + for (; num_rsp; num_rsp--, info++) { bacpy(&data.bdaddr, &info->bdaddr); data.pscan_rep_mode = info->pscan_rep_mode; @@ -1529,8 +1377,8 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff * data.rssi = 0x00; data.ssp_mode = 0x00; hci_inquiry_cache_update(hdev, &data); - mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, - info->dev_class, 0, NULL); + mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class, 0, + NULL); } hci_dev_unlock(hdev); @@ -1564,8 +1412,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s conn->state = BT_CONFIG; hci_conn_hold(conn); conn->disc_timeout = HCI_DISCONN_TIMEOUT; - mgmt_connected(hdev, &ev->bdaddr, conn->type, - conn->dst_type); + mgmt_connected(hdev->id, &ev->bdaddr, conn->type); } else conn->state = BT_CONNECTED; @@ -1587,7 +1434,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s } /* Set packet type for incoming connection */ - if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) { + if (!conn->out && hdev->hci_ver < 3) { struct hci_cp_change_conn_ptype cp; cp.handle = ev->handle; cp.pkt_type = cpu_to_le16(conn->pkt_type); @@ -1597,8 +1444,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s } else { conn->state = BT_CLOSED; if (conn->type == ACL_LINK) - mgmt_connect_failed(hdev, &ev->bdaddr, conn->type, - conn->dst_type, ev->status); + mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status); } if (conn->type == ACL_LINK) @@ -1685,7 +1531,7 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk struct hci_cp_reject_conn_req cp; bacpy(&cp.bdaddr, &ev->bdaddr); - cp.reason = HCI_ERROR_REJ_BAD_ADDR; + cp.reason = 0x0f; hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp); } } @@ -1697,27 +1543,24 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff BT_DBG("%s status %d", hdev->name, ev->status); + if (ev->status) { + mgmt_disconnect_failed(hdev->id); + return; + } + hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (!conn) goto unlock; - if (ev->status == 0) - conn->state = BT_CLOSED; + conn->state = BT_CLOSED; - if (conn->type == ACL_LINK || conn->type == LE_LINK) { - if (ev->status != 0) - mgmt_disconnect_failed(hdev, &conn->dst, ev->status); - else - mgmt_disconnected(hdev, &conn->dst, conn->type, - conn->dst_type); - } + if (conn->type == ACL_LINK || conn->type == LE_LINK) + mgmt_disconnected(hdev->id, &conn->dst); - if (ev->status == 0) { - hci_proto_disconn_cfm(conn, ev->reason); - hci_conn_del(conn); - } + hci_proto_disconn_cfm(conn, ev->reason); + hci_conn_del(conn); unlock: hci_dev_unlock(hdev); @@ -1745,7 +1588,7 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s conn->sec_level = conn->pending_sec_level; } } else { - mgmt_auth_failed(hdev, &conn->dst, ev->status); + mgmt_auth_failed(hdev->id, &conn->dst, ev->status); } clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); @@ -1800,7 +1643,7 @@ static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb hci_dev_lock(hdev); if (ev->status == 0 && test_bit(HCI_MGMT, &hdev->flags)) - mgmt_remote_name(hdev, &ev->bdaddr, ev->name); + mgmt_remote_name(hdev->id, &ev->bdaddr, ev->name); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (!conn) @@ -2051,22 +1894,10 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk hci_cc_read_bd_addr(hdev, skb); break; - case HCI_OP_READ_DATA_BLOCK_SIZE: - hci_cc_read_data_block_size(hdev, skb); - break; - case HCI_OP_WRITE_CA_TIMEOUT: hci_cc_write_ca_timeout(hdev, skb); break; - case HCI_OP_READ_FLOW_CONTROL_MODE: - hci_cc_read_flow_control_mode(hdev, skb); - break; - - case HCI_OP_READ_LOCAL_AMP_INFO: - hci_cc_read_local_amp_info(hdev, skb); - break; - case HCI_OP_DELETE_STORED_LINK_KEY: hci_cc_delete_stored_link_key(hdev, skb); break; @@ -2111,17 +1942,6 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk hci_cc_user_confirm_neg_reply(hdev, skb); break; - case HCI_OP_USER_PASSKEY_REPLY: - hci_cc_user_passkey_reply(hdev, skb); - break; - - case HCI_OP_USER_PASSKEY_NEG_REPLY: - hci_cc_user_passkey_neg_reply(hdev, skb); - - case HCI_OP_LE_SET_SCAN_PARAM: - hci_cc_le_set_scan_param(hdev, skb); - break; - case HCI_OP_LE_SET_SCAN_ENABLE: hci_cc_le_set_scan_enable(hdev, skb); break; @@ -2149,7 +1969,7 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk if (ev->ncmd) { atomic_set(&hdev->cmd_cnt, 1); if (!skb_queue_empty(&hdev->cmd_q)) - queue_work(hdev->workqueue, &hdev->cmd_work); + tasklet_schedule(&hdev->cmd_task); } } @@ -2209,7 +2029,7 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) case HCI_OP_DISCONNECT: if (ev->status != 0) - mgmt_disconnect_failed(hdev, NULL, ev->status); + mgmt_disconnect_failed(hdev->id); break; case HCI_OP_LE_CREATE_CONN: @@ -2231,7 +2051,7 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) { atomic_set(&hdev->cmd_cnt, 1); if (!skb_queue_empty(&hdev->cmd_q)) - queue_work(hdev->workqueue, &hdev->cmd_work); + tasklet_schedule(&hdev->cmd_task); } } @@ -2264,68 +2084,56 @@ static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_num_comp_pkts *ev = (void *) skb->data; + __le16 *ptr; int i; skb_pull(skb, sizeof(*ev)); BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl); - if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) { - BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode); - return; - } - if (skb->len < ev->num_hndl * 4) { BT_DBG("%s bad parameters", hdev->name); return; } - for (i = 0; i < ev->num_hndl; i++) { - struct hci_comp_pkts_info *info = &ev->handles[i]; + tasklet_disable(&hdev->tx_task); + + for (i = 0, ptr = (__le16 *) skb->data; i < ev->num_hndl; i++) { struct hci_conn *conn; __u16 handle, count; - handle = __le16_to_cpu(info->handle); - count = __le16_to_cpu(info->count); + handle = get_unaligned_le16(ptr++); + count = get_unaligned_le16(ptr++); conn = hci_conn_hash_lookup_handle(hdev, handle); - if (!conn) - continue; - - conn->sent -= count; - - switch (conn->type) { - case ACL_LINK: - hdev->acl_cnt += count; - if (hdev->acl_cnt > hdev->acl_pkts) - hdev->acl_cnt = hdev->acl_pkts; - break; - - case LE_LINK: - if (hdev->le_pkts) { - hdev->le_cnt += count; - if (hdev->le_cnt > hdev->le_pkts) - hdev->le_cnt = hdev->le_pkts; - } else { + if (conn) { + conn->sent -= count; + + if (conn->type == ACL_LINK) { hdev->acl_cnt += count; if (hdev->acl_cnt > hdev->acl_pkts) hdev->acl_cnt = hdev->acl_pkts; + } else if (conn->type == LE_LINK) { + if (hdev->le_pkts) { + hdev->le_cnt += count; + if (hdev->le_cnt > hdev->le_pkts) + hdev->le_cnt = hdev->le_pkts; + } else { + hdev->acl_cnt += count; + if (hdev->acl_cnt > hdev->acl_pkts) + hdev->acl_cnt = hdev->acl_pkts; + } + } else { + hdev->sco_cnt += count; + if (hdev->sco_cnt > hdev->sco_pkts) + hdev->sco_cnt = hdev->sco_pkts; } - break; - - case SCO_LINK: - hdev->sco_cnt += count; - if (hdev->sco_cnt > hdev->sco_pkts) - hdev->sco_cnt = hdev->sco_pkts; - break; - - default: - BT_ERR("Unknown type %d conn %p", conn->type, conn); - break; } } - queue_work(hdev->workqueue, &hdev->tx_work); + tasklet_schedule(&hdev->tx_task); + + tasklet_enable(&hdev->tx_task); } static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) @@ -2386,7 +2194,7 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff else secure = 0; - mgmt_pin_code_request(hdev, &ev->bdaddr, secure); + mgmt_pin_code_request(hdev->id, &ev->bdaddr, secure); } unlock: @@ -2555,6 +2363,12 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct hci_dev_lock(hdev); + if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) { + + if (test_bit(HCI_MGMT, &hdev->flags)) + mgmt_discovering(hdev->id, 1); + } + if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) { struct inquiry_info_with_rssi_and_pscan_mode *info; info = (void *) (skb->data + 1); @@ -2569,7 +2383,7 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct data.rssi = info->rssi; data.ssp_mode = 0x00; hci_inquiry_cache_update(hdev, &data); - mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, + mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class, info->rssi, NULL); } @@ -2586,7 +2400,7 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct data.rssi = info->rssi; data.ssp_mode = 0x00; hci_inquiry_cache_update(hdev, &data); - mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, + mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class, info->rssi, NULL); } @@ -2717,6 +2531,12 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct if (!num_rsp) return; + if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) { + + if (test_bit(HCI_MGMT, &hdev->flags)) + mgmt_discovering(hdev->id, 1); + } + hci_dev_lock(hdev); for (; num_rsp; num_rsp--, info++) { @@ -2729,8 +2549,8 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct data.rssi = info->rssi; data.ssp_mode = 0x01; hci_inquiry_cache_update(hdev, &data); - mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, - info->dev_class, info->rssi, info->data); + mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class, + info->rssi, info->data); } hci_dev_unlock(hdev); @@ -2794,7 +2614,7 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff struct hci_cp_io_capability_neg_reply cp; bacpy(&cp.bdaddr, &ev->bdaddr); - cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED; + cp.reason = 0x18; /* Pairing not allowed */ hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, sizeof(cp), &cp); @@ -2886,28 +2706,13 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev, } confirm: - mgmt_user_confirm_request(hdev, &ev->bdaddr, ev->passkey, + mgmt_user_confirm_request(hdev->id, &ev->bdaddr, ev->passkey, confirm_hint); unlock: hci_dev_unlock(hdev); } -static inline void hci_user_passkey_request_evt(struct hci_dev *hdev, - struct sk_buff *skb) -{ - struct hci_ev_user_passkey_req *ev = (void *) skb->data; - - BT_DBG("%s", hdev->name); - - hci_dev_lock(hdev); - - if (test_bit(HCI_MGMT, &hdev->flags)) - mgmt_user_passkey_request(hdev, &ev->bdaddr); - - hci_dev_unlock(hdev); -} - static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_simple_pair_complete *ev = (void *) skb->data; @@ -2927,7 +2732,7 @@ static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_ * event gets always produced as initiator and is also mapped to * the mgmt_auth_failed event */ if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend) && ev->status != 0) - mgmt_auth_failed(hdev, &conn->dst, ev->status); + mgmt_auth_failed(hdev->id, &conn->dst, ev->status); hci_conn_put(conn); @@ -3008,15 +2813,14 @@ static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff } if (ev->status) { - mgmt_connect_failed(hdev, &ev->bdaddr, conn->type, - conn->dst_type, ev->status); + mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status); hci_proto_connect_cfm(conn, ev->status); conn->state = BT_CLOSED; hci_conn_del(conn); goto unlock; } - mgmt_connected(hdev, &ev->bdaddr, conn->type, conn->dst_type); + mgmt_connected(hdev->id, &ev->bdaddr, conn->type); conn->sec_level = BT_SECURITY_LOW; conn->handle = __le16_to_cpu(ev->handle); @@ -3247,10 +3051,6 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) hci_user_confirm_request_evt(hdev, skb); break; - case HCI_EV_USER_PASSKEY_REQUEST: - hci_user_passkey_request_evt(hdev, skb); - break; - case HCI_EV_SIMPLE_PAIR_COMPLETE: hci_simple_pair_complete_evt(hdev, skb); break; @@ -3304,5 +3104,5 @@ void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data) kfree_skb(skb); } -module_param(enable_le, bool, 0644); +module_param(enable_le, bool, 0444); MODULE_PARM_DESC(enable_le, "Enable LE support"); diff --git a/trunk/net/bluetooth/hci_sock.c b/trunk/net/bluetooth/hci_sock.c index 6d94616af312..f6afe3d76a66 100644 --- a/trunk/net/bluetooth/hci_sock.c +++ b/trunk/net/bluetooth/hci_sock.c @@ -49,7 +49,7 @@ #include #include -static bool enable_mgmt; +static int enable_mgmt; /* ----- HCI socket interface ----- */ @@ -188,11 +188,11 @@ static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg) if (copy_from_user(&bdaddr, arg, sizeof(bdaddr))) return -EFAULT; - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); err = hci_blacklist_add(hdev, &bdaddr); - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); return err; } @@ -205,11 +205,11 @@ static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg) if (copy_from_user(&bdaddr, arg, sizeof(bdaddr))) return -EFAULT; - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); err = hci_blacklist_del(hdev, &bdaddr); - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); return err; } @@ -343,11 +343,8 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le if (haddr.hci_channel > HCI_CHANNEL_CONTROL) return -EINVAL; - if (haddr.hci_channel == HCI_CHANNEL_CONTROL) { - if (!enable_mgmt) - return -EINVAL; - set_bit(HCI_PI_MGMT_INIT, &hci_pi(sk)->flags); - } + if (haddr.hci_channel == HCI_CHANNEL_CONTROL && !enable_mgmt) + return -EINVAL; lock_sock(sk); @@ -538,10 +535,10 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock, if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) { skb_queue_tail(&hdev->raw_q, skb); - queue_work(hdev->workqueue, &hdev->tx_work); + tasklet_schedule(&hdev->tx_task); } else { skb_queue_tail(&hdev->cmd_q, skb); - queue_work(hdev->workqueue, &hdev->cmd_work); + tasklet_schedule(&hdev->cmd_task); } } else { if (!capable(CAP_NET_RAW)) { @@ -550,7 +547,7 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock, } skb_queue_tail(&hdev->raw_q, skb); - queue_work(hdev->workqueue, &hdev->tx_work); + tasklet_schedule(&hdev->tx_task); } err = len; diff --git a/trunk/net/bluetooth/hci_sysfs.c b/trunk/net/bluetooth/hci_sysfs.c index 521095614235..661b461cf0b0 100644 --- a/trunk/net/bluetooth/hci_sysfs.c +++ b/trunk/net/bluetooth/hci_sysfs.c @@ -89,35 +89,11 @@ static struct device_type bt_link = { .release = bt_link_release, }; -/* - * The rfcomm tty device will possibly retain even when conn - * is down, and sysfs doesn't support move zombie device, - * so we should move the device before conn device is destroyed. - */ -static int __match_tty(struct device *dev, void *data) -{ - return !strncmp(dev_name(dev), "rfcomm", 6); -} - -void hci_conn_init_sysfs(struct hci_conn *conn) -{ - struct hci_dev *hdev = conn->hdev; - - BT_DBG("conn %p", conn); - - conn->dev.type = &bt_link; - conn->dev.class = bt_class; - conn->dev.parent = &hdev->dev; - - device_initialize(&conn->dev); -} - -void hci_conn_add_sysfs(struct hci_conn *conn) +static void add_conn(struct work_struct *work) { + struct hci_conn *conn = container_of(work, struct hci_conn, work_add); struct hci_dev *hdev = conn->hdev; - BT_DBG("conn %p", conn); - dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); dev_set_drvdata(&conn->dev, conn); @@ -130,8 +106,19 @@ void hci_conn_add_sysfs(struct hci_conn *conn) hci_dev_hold(hdev); } -void hci_conn_del_sysfs(struct hci_conn *conn) +/* + * The rfcomm tty device will possibly retain even when conn + * is down, and sysfs doesn't support move zombie device, + * so we should move the device before conn device is destroyed. + */ +static int __match_tty(struct device *dev, void *data) +{ + return !strncmp(dev_name(dev), "rfcomm", 6); +} + +static void del_conn(struct work_struct *work) { + struct hci_conn *conn = container_of(work, struct hci_conn, work_del); struct hci_dev *hdev = conn->hdev; if (!device_is_registered(&conn->dev)) @@ -153,6 +140,36 @@ void hci_conn_del_sysfs(struct hci_conn *conn) hci_dev_put(hdev); } +void hci_conn_init_sysfs(struct hci_conn *conn) +{ + struct hci_dev *hdev = conn->hdev; + + BT_DBG("conn %p", conn); + + conn->dev.type = &bt_link; + conn->dev.class = bt_class; + conn->dev.parent = &hdev->dev; + + device_initialize(&conn->dev); + + INIT_WORK(&conn->work_add, add_conn); + INIT_WORK(&conn->work_del, del_conn); +} + +void hci_conn_add_sysfs(struct hci_conn *conn) +{ + BT_DBG("conn %p", conn); + + queue_work(conn->hdev->workqueue, &conn->work_add); +} + +void hci_conn_del_sysfs(struct hci_conn *conn) +{ + BT_DBG("conn %p", conn); + + queue_work(conn->hdev->workqueue, &conn->work_del); +} + static inline char *host_bustostr(int bus) { switch (bus) { @@ -386,7 +403,7 @@ static int inquiry_cache_show(struct seq_file *f, void *p) struct inquiry_cache *cache = &hdev->inq_cache; struct inquiry_entry *e; - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); for (e = cache->list; e; e = e->next) { struct inquiry_data *data = &e->data; @@ -399,7 +416,7 @@ static int inquiry_cache_show(struct seq_file *f, void *p) data->rssi, data->ssp_mode, e->timestamp); } - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); return 0; } @@ -419,14 +436,19 @@ static const struct file_operations inquiry_cache_fops = { static int blacklist_show(struct seq_file *f, void *p) { struct hci_dev *hdev = f->private; - struct bdaddr_list *b; + struct list_head *l; - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); + + list_for_each(l, &hdev->blacklist) { + struct bdaddr_list *b; + + b = list_entry(l, struct bdaddr_list, list); - list_for_each_entry(b, &hdev->blacklist, list) seq_printf(f, "%s\n", batostr(&b->bdaddr)); + } - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); return 0; } @@ -463,14 +485,19 @@ static void print_bt_uuid(struct seq_file *f, u8 *uuid) static int uuids_show(struct seq_file *f, void *p) { struct hci_dev *hdev = f->private; - struct bt_uuid *uuid; + struct list_head *l; + + hci_dev_lock_bh(hdev); + + list_for_each(l, &hdev->uuids) { + struct bt_uuid *uuid; - hci_dev_lock(hdev); + uuid = list_entry(l, struct bt_uuid, list); - list_for_each_entry(uuid, &hdev->uuids, list) print_bt_uuid(f, uuid->uuid); + } - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); return 0; } @@ -491,11 +518,11 @@ static int auto_accept_delay_set(void *data, u64 val) { struct hci_dev *hdev = data; - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); hdev->auto_accept_delay = val; - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); return 0; } @@ -504,11 +531,11 @@ static int auto_accept_delay_get(void *data, u64 *val) { struct hci_dev *hdev = data; - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); *val = hdev->auto_accept_delay; - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); return 0; } @@ -516,28 +543,22 @@ static int auto_accept_delay_get(void *data, u64 *val) DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get, auto_accept_delay_set, "%llu\n"); -void hci_init_sysfs(struct hci_dev *hdev) -{ - struct device *dev = &hdev->dev; - - dev->type = &bt_host; - dev->class = bt_class; - - dev_set_drvdata(dev, hdev); - device_initialize(dev); -} - -int hci_add_sysfs(struct hci_dev *hdev) +int hci_register_sysfs(struct hci_dev *hdev) { struct device *dev = &hdev->dev; int err; BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); + dev->type = &bt_host; + dev->class = bt_class; dev->parent = hdev->parent; + dev_set_name(dev, "%s", hdev->name); - err = device_add(dev); + dev_set_drvdata(dev, hdev); + + err = device_register(dev); if (err < 0) return err; @@ -561,7 +582,7 @@ int hci_add_sysfs(struct hci_dev *hdev) return 0; } -void hci_del_sysfs(struct hci_dev *hdev) +void hci_unregister_sysfs(struct hci_dev *hdev) { BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); diff --git a/trunk/net/bluetooth/hidp/Kconfig b/trunk/net/bluetooth/hidp/Kconfig index 4deaca78e91e..86a91543172a 100644 --- a/trunk/net/bluetooth/hidp/Kconfig +++ b/trunk/net/bluetooth/hidp/Kconfig @@ -1,6 +1,6 @@ config BT_HIDP tristate "HIDP protocol support" - depends on BT && INPUT && HID_SUPPORT + depends on BT && BT_L2CAP && INPUT && HID_SUPPORT select HID help HIDP (Human Interface Device Protocol) is a transport layer diff --git a/trunk/net/bluetooth/hidp/core.c b/trunk/net/bluetooth/hidp/core.c index d478be11d562..075a3e920caf 100644 --- a/trunk/net/bluetooth/hidp/core.c +++ b/trunk/net/bluetooth/hidp/core.c @@ -81,20 +81,24 @@ static unsigned char hidp_mkeyspat[] = { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01 }; static struct hidp_session *__hidp_get_session(bdaddr_t *bdaddr) { struct hidp_session *session; + struct list_head *p; BT_DBG(""); - list_for_each_entry(session, &hidp_session_list, list) { + list_for_each(p, &hidp_session_list) { + session = list_entry(p, struct hidp_session, list); if (!bacmp(bdaddr, &session->bdaddr)) return session; } - return NULL; } static void __hidp_link_session(struct hidp_session *session) { + __module_get(THIS_MODULE); list_add(&session->list, &hidp_session_list); + + hci_conn_hold_device(session->conn); } static void __hidp_unlink_session(struct hidp_session *session) @@ -102,6 +106,7 @@ static void __hidp_unlink_session(struct hidp_session *session) hci_conn_put_device(session->conn); list_del(&session->list); + module_put(THIS_MODULE); } static void __hidp_copy_session(struct hidp_session *session, struct hidp_conninfo *ci) @@ -250,9 +255,6 @@ static int __hidp_send_ctrl_message(struct hidp_session *session, BT_DBG("session %p data %p size %d", session, data, size); - if (atomic_read(&session->terminate)) - return -EIO; - skb = alloc_skb(size + 1, GFP_ATOMIC); if (!skb) { BT_ERR("Can't allocate memory for new frame"); @@ -327,7 +329,6 @@ static int hidp_get_raw_report(struct hid_device *hid, struct sk_buff *skb; size_t len; int numbered_reports = hid->report_enum[report_type].numbered; - int ret; switch (report_type) { case HID_FEATURE_REPORT: @@ -351,9 +352,8 @@ static int hidp_get_raw_report(struct hid_device *hid, session->waiting_report_number = numbered_reports ? report_number : -1; set_bit(HIDP_WAITING_FOR_RETURN, &session->flags); data[0] = report_number; - ret = hidp_send_ctrl_message(hid->driver_data, report_type, data, 1); - if (ret) - goto err; + if (hidp_send_ctrl_message(hid->driver_data, report_type, data, 1)) + goto err_eio; /* Wait for the return of the report. The returned report gets put in session->report_return. */ @@ -365,13 +365,11 @@ static int hidp_get_raw_report(struct hid_device *hid, 5*HZ); if (res == 0) { /* timeout */ - ret = -EIO; - goto err; + goto err_eio; } if (res < 0) { /* signal */ - ret = -ERESTARTSYS; - goto err; + goto err_restartsys; } } @@ -392,10 +390,14 @@ static int hidp_get_raw_report(struct hid_device *hid, return len; -err: +err_restartsys: clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags); mutex_unlock(&session->report_mutex); - return ret; + return -ERESTARTSYS; +err_eio: + clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags); + mutex_unlock(&session->report_mutex); + return -EIO; } static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count, @@ -420,10 +422,11 @@ static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, s /* Set up our wait, and send the report request to the device. */ set_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags); - ret = hidp_send_ctrl_message(hid->driver_data, report_type, data, - count); - if (ret) + if (hidp_send_ctrl_message(hid->driver_data, report_type, + data, count)) { + ret = -ENOMEM; goto err; + } /* Wait for the ACK from the device. */ while (test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags)) { @@ -493,9 +496,10 @@ static void hidp_process_handshake(struct hidp_session *session, case HIDP_HSHK_ERR_INVALID_REPORT_ID: case HIDP_HSHK_ERR_UNSUPPORTED_REQUEST: case HIDP_HSHK_ERR_INVALID_PARAMETER: - if (test_and_clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags)) + if (test_bit(HIDP_WAITING_FOR_RETURN, &session->flags)) { + clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags); wake_up_interruptible(&session->report_queue); - + } /* FIXME: Call into SET_ GET_ handlers here */ break; @@ -516,8 +520,10 @@ static void hidp_process_handshake(struct hidp_session *session, } /* Wake up the waiting thread. */ - if (test_and_clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags)) + if (test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags)) { + clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags); wake_up_interruptible(&session->report_queue); + } } static void hidp_process_hid_control(struct hidp_session *session, @@ -657,32 +663,25 @@ static int hidp_send_frame(struct socket *sock, unsigned char *data, int len) return kernel_sendmsg(sock, &msg, &iv, 1, len); } -static void hidp_process_intr_transmit(struct hidp_session *session) +static void hidp_process_transmit(struct hidp_session *session) { struct sk_buff *skb; BT_DBG("session %p", session); - while ((skb = skb_dequeue(&session->intr_transmit))) { - if (hidp_send_frame(session->intr_sock, skb->data, skb->len) < 0) { - skb_queue_head(&session->intr_transmit, skb); + while ((skb = skb_dequeue(&session->ctrl_transmit))) { + if (hidp_send_frame(session->ctrl_sock, skb->data, skb->len) < 0) { + skb_queue_head(&session->ctrl_transmit, skb); break; } hidp_set_timer(session); kfree_skb(skb); } -} - -static void hidp_process_ctrl_transmit(struct hidp_session *session) -{ - struct sk_buff *skb; - BT_DBG("session %p", session); - - while ((skb = skb_dequeue(&session->ctrl_transmit))) { - if (hidp_send_frame(session->ctrl_sock, skb->data, skb->len) < 0) { - skb_queue_head(&session->ctrl_transmit, skb); + while ((skb = skb_dequeue(&session->intr_transmit))) { + if (hidp_send_frame(session->intr_sock, skb->data, skb->len) < 0) { + skb_queue_head(&session->intr_transmit, skb); break; } @@ -701,7 +700,6 @@ static int hidp_session(void *arg) BT_DBG("session %p", session); - __module_get(THIS_MODULE); set_user_nice(current, -15); init_waitqueue_entry(&ctrl_wait, current); @@ -716,25 +714,23 @@ static int hidp_session(void *arg) intr_sk->sk_state != BT_CONNECTED) break; - while ((skb = skb_dequeue(&intr_sk->sk_receive_queue))) { + while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) { skb_orphan(skb); if (!skb_linearize(skb)) - hidp_recv_intr_frame(session, skb); + hidp_recv_ctrl_frame(session, skb); else kfree_skb(skb); } - hidp_process_intr_transmit(session); - - while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) { + while ((skb = skb_dequeue(&intr_sk->sk_receive_queue))) { skb_orphan(skb); if (!skb_linearize(skb)) - hidp_recv_ctrl_frame(session, skb); + hidp_recv_intr_frame(session, skb); else kfree_skb(skb); } - hidp_process_ctrl_transmit(session); + hidp_process_transmit(session); schedule(); set_current_state(TASK_INTERRUPTIBLE); @@ -743,10 +739,6 @@ static int hidp_session(void *arg) remove_wait_queue(sk_sleep(intr_sk), &intr_wait); remove_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait); - clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags); - clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags); - wake_up_interruptible(&session->report_queue); - down_write(&hidp_session_sem); hidp_del_timer(session); @@ -780,37 +772,34 @@ static int hidp_session(void *arg) kfree(session->rd_data); kfree(session); - module_put_and_exit(0); return 0; } -static struct hci_conn *hidp_get_connection(struct hidp_session *session) +static struct device *hidp_get_device(struct hidp_session *session) { bdaddr_t *src = &bt_sk(session->ctrl_sock->sk)->src; bdaddr_t *dst = &bt_sk(session->ctrl_sock->sk)->dst; - struct hci_conn *conn; + struct device *device = NULL; struct hci_dev *hdev; hdev = hci_get_route(dst, src); if (!hdev) return NULL; - hci_dev_lock(hdev); - conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst); - if (conn) - hci_conn_hold_device(conn); - hci_dev_unlock(hdev); + session->conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst); + if (session->conn) + device = &session->conn->dev; hci_dev_put(hdev); - return conn; + return device; } static int hidp_setup_input(struct hidp_session *session, struct hidp_connadd_req *req) { struct input_dev *input; - int i; + int err, i; input = input_allocate_device(); if (!input) @@ -853,10 +842,17 @@ static int hidp_setup_input(struct hidp_session *session, input->relbit[0] |= BIT_MASK(REL_WHEEL); } - input->dev.parent = &session->conn->dev; + input->dev.parent = hidp_get_device(session); input->event = hidp_input_event; + err = input_register_device(input); + if (err < 0) { + input_free_device(input); + session->input = NULL; + return err; + } + return 0; } @@ -953,7 +949,7 @@ static int hidp_setup_hid(struct hidp_session *session, strncpy(hid->phys, batostr(&bt_sk(session->ctrl_sock->sk)->src), 64); strncpy(hid->uniq, batostr(&bt_sk(session->ctrl_sock->sk)->dst), 64); - hid->dev.parent = &session->conn->dev; + hid->dev.parent = hidp_get_device(session); hid->ll_driver = &hidp_hid_driver; hid->hid_get_raw_report = hidp_get_raw_report; @@ -980,20 +976,18 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, bacmp(&bt_sk(ctrl_sock->sk)->dst, &bt_sk(intr_sock->sk)->dst)) return -ENOTUNIQ; + session = kzalloc(sizeof(struct hidp_session), GFP_KERNEL); + if (!session) + return -ENOMEM; + BT_DBG("rd_data %p rd_size %d", req->rd_data, req->rd_size); down_write(&hidp_session_sem); s = __hidp_get_session(&bt_sk(ctrl_sock->sk)->dst); if (s && s->state == BT_CONNECTED) { - up_write(&hidp_session_sem); - return -EEXIST; - } - - session = kzalloc(sizeof(struct hidp_session), GFP_KERNEL); - if (!session) { - up_write(&hidp_session_sem); - return -ENOMEM; + err = -EEXIST; + goto failed; } bacpy(&session->bdaddr, &bt_sk(ctrl_sock->sk)->dst); @@ -1009,12 +1003,6 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, session->intr_sock = intr_sock; session->state = BT_CONNECTED; - session->conn = hidp_get_connection(session); - if (!session->conn) { - err = -ENOTCONN; - goto failed; - } - setup_timer(&session->timer, hidp_idle_timeout, (unsigned long)session); skb_queue_head_init(&session->ctrl_transmit); @@ -1027,11 +1015,9 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, session->flags = req->flags & (1 << HIDP_BLUETOOTH_VENDOR_ID); session->idle_to = req->idle_to; - __hidp_link_session(session); - if (req->rd_size > 0) { err = hidp_setup_hid(session, req); - if (err) + if (err && err != -ENODEV) goto purge; } @@ -1041,6 +1027,8 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, goto purge; } + __hidp_link_session(session); + hidp_set_timer(session); if (session->hid) { @@ -1066,11 +1054,7 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, !session->waiting_for_startup); } - if (session->hid) - err = hid_add_device(session->hid); - else - err = input_register_device(session->input); - + err = hid_add_device(session->hid); if (err < 0) { atomic_inc(&session->terminate); wake_up_process(session->task); @@ -1093,6 +1077,8 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, unlink: hidp_del_timer(session); + __hidp_unlink_session(session); + if (session->input) { input_unregister_device(session->input); session->input = NULL; @@ -1107,8 +1093,6 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, session->rd_data = NULL; purge: - __hidp_unlink_session(session); - skb_queue_purge(&session->ctrl_transmit); skb_queue_purge(&session->intr_transmit); @@ -1150,16 +1134,19 @@ int hidp_del_connection(struct hidp_conndel_req *req) int hidp_get_connlist(struct hidp_connlist_req *req) { - struct hidp_session *session; + struct list_head *p; int err = 0, n = 0; BT_DBG(""); down_read(&hidp_session_sem); - list_for_each_entry(session, &hidp_session_list, list) { + list_for_each(p, &hidp_session_list) { + struct hidp_session *session; struct hidp_conninfo ci; + session = list_entry(p, struct hidp_session, list); + __hidp_copy_session(session, &ci); if (copy_to_user(req->ci, &ci, sizeof(ci))) { diff --git a/trunk/net/bluetooth/l2cap_core.c b/trunk/net/bluetooth/l2cap_core.c index aa78d8c4b93b..17b5b1cd9657 100644 --- a/trunk/net/bluetooth/l2cap_core.c +++ b/trunk/net/bluetooth/l2cap_core.c @@ -3,7 +3,6 @@ Copyright (C) 2000-2001 Qualcomm Incorporated Copyright (C) 2009-2010 Gustavo F. Padovan Copyright (C) 2010 Google Inc. - Copyright (C) 2011 ProFUSION Embedded Systems Written 2000,2001 by Maxim Krasnyansky @@ -57,10 +56,10 @@ #include #include -bool disable_ertm; +int disable_ertm; static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN; -static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, }; +static u8 l2cap_fixed_chan[8] = { 0x02, }; static LIST_HEAD(chan_list); static DEFINE_RWLOCK(chan_list_lock); @@ -77,38 +76,38 @@ static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb); /* ---- L2CAP channels ---- */ -static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid) +static inline void chan_hold(struct l2cap_chan *c) { - struct l2cap_chan *c, *r = NULL; + atomic_inc(&c->refcnt); +} - rcu_read_lock(); +static inline void chan_put(struct l2cap_chan *c) +{ + if (atomic_dec_and_test(&c->refcnt)) + kfree(c); +} - list_for_each_entry_rcu(c, &conn->chan_l, list) { - if (c->dcid == cid) { - r = c; - break; - } +static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid) +{ + struct l2cap_chan *c; + + list_for_each_entry(c, &conn->chan_l, list) { + if (c->dcid == cid) + return c; } + return NULL; - rcu_read_unlock(); - return r; } static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid) { - struct l2cap_chan *c, *r = NULL; - - rcu_read_lock(); + struct l2cap_chan *c; - list_for_each_entry_rcu(c, &conn->chan_l, list) { - if (c->scid == cid) { - r = c; - break; - } + list_for_each_entry(c, &conn->chan_l, list) { + if (c->scid == cid) + return c; } - - rcu_read_unlock(); - return r; + return NULL; } /* Find channel with given SCID. @@ -117,36 +116,34 @@ static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 ci { struct l2cap_chan *c; + read_lock(&conn->chan_lock); c = __l2cap_get_chan_by_scid(conn, cid); if (c) - lock_sock(c->sk); + bh_lock_sock(c->sk); + read_unlock(&conn->chan_lock); return c; } static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident) { - struct l2cap_chan *c, *r = NULL; - - rcu_read_lock(); + struct l2cap_chan *c; - list_for_each_entry_rcu(c, &conn->chan_l, list) { - if (c->ident == ident) { - r = c; - break; - } + list_for_each_entry(c, &conn->chan_l, list) { + if (c->ident == ident) + return c; } - - rcu_read_unlock(); - return r; + return NULL; } static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident) { struct l2cap_chan *c; + read_lock(&conn->chan_lock); c = __l2cap_get_chan_by_ident(conn, ident); if (c) - lock_sock(c->sk); + bh_lock_sock(c->sk); + read_unlock(&conn->chan_lock); return c; } @@ -156,9 +153,12 @@ static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src) list_for_each_entry(c, &chan_list, global_l) { if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src)) - return c; + goto found; } - return NULL; + + c = NULL; +found: + return c; } int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm) @@ -217,51 +217,45 @@ static u16 l2cap_alloc_cid(struct l2cap_conn *conn) return 0; } -static char *state_to_string(int state) +static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout) { - switch(state) { - case BT_CONNECTED: - return "BT_CONNECTED"; - case BT_OPEN: - return "BT_OPEN"; - case BT_BOUND: - return "BT_BOUND"; - case BT_LISTEN: - return "BT_LISTEN"; - case BT_CONNECT: - return "BT_CONNECT"; - case BT_CONNECT2: - return "BT_CONNECT2"; - case BT_CONFIG: - return "BT_CONFIG"; - case BT_DISCONN: - return "BT_DISCONN"; - case BT_CLOSED: - return "BT_CLOSED"; - } + BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout); - return "invalid state"; + if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout))) + chan_hold(chan); } -static void l2cap_state_change(struct l2cap_chan *chan, int state) +static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer) { - BT_DBG("%p %s -> %s", chan, state_to_string(chan->state), - state_to_string(state)); + BT_DBG("chan %p state %d", chan, chan->state); + if (timer_pending(timer) && del_timer(timer)) + chan_put(chan); +} + +static void l2cap_state_change(struct l2cap_chan *chan, int state) +{ chan->state = state; chan->ops->state_change(chan->data, state); } -static void l2cap_chan_timeout(struct work_struct *work) +static void l2cap_chan_timeout(unsigned long arg) { - struct l2cap_chan *chan = container_of(work, struct l2cap_chan, - chan_timer.work); + struct l2cap_chan *chan = (struct l2cap_chan *) arg; struct sock *sk = chan->sk; int reason; BT_DBG("chan %p state %d", chan, chan->state); - lock_sock(sk); + bh_lock_sock(sk); + + if (sock_owned_by_user(sk)) { + /* sk is owned by user. Try again later */ + __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); + bh_unlock_sock(sk); + chan_put(chan); + return; + } if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG) reason = ECONNREFUSED; @@ -273,10 +267,10 @@ static void l2cap_chan_timeout(struct work_struct *work) l2cap_chan_close(chan, reason); - release_sock(sk); + bh_unlock_sock(sk); chan->ops->close(chan->data); - l2cap_chan_put(chan); + chan_put(chan); } struct l2cap_chan *l2cap_chan_create(struct sock *sk) @@ -293,14 +287,12 @@ struct l2cap_chan *l2cap_chan_create(struct sock *sk) list_add(&chan->global_l, &chan_list); write_unlock_bh(&chan_list_lock); - INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout); + setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan); chan->state = BT_OPEN; atomic_set(&chan->refcnt, 1); - BT_DBG("sk %p chan %p", sk, chan); - return chan; } @@ -310,15 +302,15 @@ void l2cap_chan_destroy(struct l2cap_chan *chan) list_del(&chan->global_l); write_unlock_bh(&chan_list_lock); - l2cap_chan_put(chan); + chan_put(chan); } -static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) +static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) { BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, chan->psm, chan->dcid); - conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM; + conn->disc_reason = 0x13; chan->conn = conn; @@ -345,16 +337,9 @@ static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) chan->omtu = L2CAP_DEFAULT_MTU; } - chan->local_id = L2CAP_BESTEFFORT_ID; - chan->local_stype = L2CAP_SERV_BESTEFFORT; - chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE; - chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME; - chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT; - chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO; - - l2cap_chan_hold(chan); + chan_hold(chan); - list_add_rcu(&chan->list, &conn->chan_l); + list_add(&chan->list, &conn->chan_l); } /* Delete channel. @@ -371,10 +356,10 @@ static void l2cap_chan_del(struct l2cap_chan *chan, int err) if (conn) { /* Delete from channel list */ - list_del_rcu(&chan->list); - synchronize_rcu(); - - l2cap_chan_put(chan); + write_lock_bh(&conn->chan_lock); + list_del(&chan->list); + write_unlock_bh(&conn->chan_lock); + chan_put(chan); chan->conn = NULL; hci_conn_put(conn->hcon); @@ -523,7 +508,7 @@ static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan) } /* Service level security */ -int l2cap_chan_check_security(struct l2cap_chan *chan) +static inline int l2cap_check_security(struct l2cap_chan *chan) { struct l2cap_conn *conn = chan->conn; __u8 auth_type; @@ -571,58 +556,34 @@ static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, flags = ACL_START; bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON; - skb->priority = HCI_PRIO_MAX; - - hci_send_acl(conn->hchan, skb, flags); -} -static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb) -{ - struct hci_conn *hcon = chan->conn->hcon; - u16 flags; - - BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len, - skb->priority); - - if (!test_bit(FLAG_FLUSHABLE, &chan->flags) && - lmp_no_flush_capable(hcon->hdev)) - flags = ACL_START_NO_FLUSH; - else - flags = ACL_START; - - bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags); - hci_send_acl(chan->conn->hchan, skb, flags); + hci_send_acl(conn->hcon, skb, flags); } -static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control) +static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control) { struct sk_buff *skb; struct l2cap_hdr *lh; struct l2cap_conn *conn = chan->conn; - int count, hlen; + int count, hlen = L2CAP_HDR_SIZE + 2; + u8 flags; if (chan->state != BT_CONNECTED) return; - if (test_bit(FLAG_EXT_CTRL, &chan->flags)) - hlen = L2CAP_EXT_HDR_SIZE; - else - hlen = L2CAP_ENH_HDR_SIZE; - if (chan->fcs == L2CAP_FCS_CRC16) - hlen += L2CAP_FCS_SIZE; + hlen += 2; - BT_DBG("chan %p, control 0x%8.8x", chan, control); + BT_DBG("chan %p, control 0x%2.2x", chan, control); count = min_t(unsigned int, conn->mtu, hlen); - - control |= __set_sframe(chan); + control |= L2CAP_CTRL_FRAME_TYPE; if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) - control |= __set_ctrl_final(chan); + control |= L2CAP_CTRL_FINAL; if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state)) - control |= __set_ctrl_poll(chan); + control |= L2CAP_CTRL_POLL; skb = bt_skb_alloc(count, GFP_ATOMIC); if (!skb) @@ -631,27 +592,32 @@ static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control) lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE); lh->cid = cpu_to_le16(chan->dcid); - - __put_control(chan, control, skb_put(skb, __ctrl_size(chan))); + put_unaligned_le16(control, skb_put(skb, 2)); if (chan->fcs == L2CAP_FCS_CRC16) { - u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE); - put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); + u16 fcs = crc16(0, (u8 *)lh, count - 2); + put_unaligned_le16(fcs, skb_put(skb, 2)); } - skb->priority = HCI_PRIO_MAX; - l2cap_do_send(chan, skb); + if (lmp_no_flush_capable(conn->hcon->hdev)) + flags = ACL_START_NO_FLUSH; + else + flags = ACL_START; + + bt_cb(skb)->force_active = chan->force_active; + + hci_send_acl(chan->conn->hcon, skb, flags); } -static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control) +static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control) { if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { - control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); + control |= L2CAP_SUPER_RCV_NOT_READY; set_bit(CONN_RNR_SENT, &chan->conn_state); } else - control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); + control |= L2CAP_SUPER_RCV_READY; - control |= __set_reqseq(chan, chan->buffer_seq); + control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; l2cap_send_sframe(chan, control); } @@ -669,7 +635,7 @@ static void l2cap_do_start(struct l2cap_chan *chan) if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) return; - if (l2cap_chan_check_security(chan) && + if (l2cap_check_security(chan) && __l2cap_no_conn_pending(chan)) { struct l2cap_conn_req req; req.scid = cpu_to_le16(chan->scid); @@ -688,7 +654,7 @@ static void l2cap_do_start(struct l2cap_chan *chan) conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; conn->info_ident = l2cap_get_ident(conn); - schedule_delayed_work(&conn->info_timer, + mod_timer(&conn->info_timer, jiffies + msecs_to_jiffies(L2CAP_INFO_TIMEOUT)); l2cap_send_cmd(conn, conn->info_ident, @@ -740,13 +706,13 @@ static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *c /* ---- L2CAP connections ---- */ static void l2cap_conn_start(struct l2cap_conn *conn) { - struct l2cap_chan *chan; + struct l2cap_chan *chan, *tmp; BT_DBG("conn %p", conn); - rcu_read_lock(); + read_lock(&conn->chan_lock); - list_for_each_entry_rcu(chan, &conn->chan_l, list) { + list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) { struct sock *sk = chan->sk; bh_lock_sock(sk); @@ -759,7 +725,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn) if (chan->state == BT_CONNECT) { struct l2cap_conn_req req; - if (!l2cap_chan_check_security(chan) || + if (!l2cap_check_security(chan) || !__l2cap_no_conn_pending(chan)) { bh_unlock_sock(sk); continue; @@ -770,7 +736,9 @@ static void l2cap_conn_start(struct l2cap_conn *conn) &chan->conf_state)) { /* l2cap_chan_close() calls list_del(chan) * so release the lock */ + read_unlock(&conn->chan_lock); l2cap_chan_close(chan, ECONNRESET); + read_lock(&conn->chan_lock); bh_unlock_sock(sk); continue; } @@ -790,7 +758,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn) rsp.scid = cpu_to_le16(chan->dcid); rsp.dcid = cpu_to_le16(chan->scid); - if (l2cap_chan_check_security(chan)) { + if (l2cap_check_security(chan)) { if (bt_sk(sk)->defer_setup) { struct sock *parent = bt_sk(sk)->parent; rsp.result = cpu_to_le16(L2CAP_CR_PEND); @@ -826,7 +794,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn) bh_unlock_sock(sk); } - rcu_read_unlock(); + read_unlock(&conn->chan_lock); } /* Find socket with cid and source bdaddr. @@ -877,7 +845,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn) parent = pchan->sk; - lock_sock(parent); + bh_lock_sock(parent); /* Check for backlog size */ if (sk_acceptq_is_full(parent)) { @@ -891,6 +859,8 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn) sk = chan->sk; + write_lock_bh(&conn->chan_lock); + hci_conn_hold(conn->hcon); bacpy(&bt_sk(sk)->src, conn->src); @@ -898,15 +868,17 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn) bt_accept_enqueue(parent, sk); - l2cap_chan_add(conn, chan); + __l2cap_chan_add(conn, chan); __set_chan_timer(chan, sk->sk_sndtimeo); l2cap_state_change(chan, BT_CONNECTED); parent->sk_data_ready(parent, 0); + write_unlock_bh(&conn->chan_lock); + clean: - release_sock(parent); + bh_unlock_sock(parent); } static void l2cap_chan_ready(struct sock *sk) @@ -938,9 +910,9 @@ static void l2cap_conn_ready(struct l2cap_conn *conn) if (conn->hcon->out && conn->hcon->type == LE_LINK) smp_conn_security(conn, conn->hcon->pending_sec_level); - rcu_read_lock(); + read_lock(&conn->chan_lock); - list_for_each_entry_rcu(chan, &conn->chan_l, list) { + list_for_each_entry(chan, &conn->chan_l, list) { struct sock *sk = chan->sk; bh_lock_sock(sk); @@ -960,7 +932,7 @@ static void l2cap_conn_ready(struct l2cap_conn *conn) bh_unlock_sock(sk); } - rcu_read_unlock(); + read_unlock(&conn->chan_lock); } /* Notify sockets that we cannot guaranty reliability anymore */ @@ -970,22 +942,21 @@ static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err) BT_DBG("conn %p", conn); - rcu_read_lock(); + read_lock(&conn->chan_lock); - list_for_each_entry_rcu(chan, &conn->chan_l, list) { + list_for_each_entry(chan, &conn->chan_l, list) { struct sock *sk = chan->sk; - if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags)) + if (chan->force_reliable) sk->sk_err = err; } - rcu_read_unlock(); + read_unlock(&conn->chan_lock); } -static void l2cap_info_timeout(struct work_struct *work) +static void l2cap_info_timeout(unsigned long arg) { - struct l2cap_conn *conn = container_of(work, struct l2cap_conn, - info_timer.work); + struct l2cap_conn *conn = (void *) arg; conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; conn->info_ident = 0; @@ -1009,19 +980,17 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err) /* Kill channels */ list_for_each_entry_safe(chan, l, &conn->chan_l, list) { sk = chan->sk; - lock_sock(sk); + bh_lock_sock(sk); l2cap_chan_del(chan, err); - release_sock(sk); + bh_unlock_sock(sk); chan->ops->close(chan->data); } - hci_chan_del(conn->hchan); - if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) - __cancel_delayed_work(&conn->info_timer); + del_timer_sync(&conn->info_timer); if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) { - __cancel_delayed_work(&conn->security_timer); + del_timer(&conn->security_timer); smp_chan_destroy(conn); } @@ -1029,10 +998,9 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err) kfree(conn); } -static void security_timeout(struct work_struct *work) +static void security_timeout(unsigned long arg) { - struct l2cap_conn *conn = container_of(work, struct l2cap_conn, - security_timer.work); + struct l2cap_conn *conn = (void *) arg; l2cap_conn_del(conn->hcon, ETIMEDOUT); } @@ -1040,26 +1008,18 @@ static void security_timeout(struct work_struct *work) static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) { struct l2cap_conn *conn = hcon->l2cap_data; - struct hci_chan *hchan; if (conn || status) return conn; - hchan = hci_chan_create(hcon); - if (!hchan) - return NULL; - conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC); - if (!conn) { - hci_chan_del(hchan); + if (!conn) return NULL; - } hcon->l2cap_data = conn; conn->hcon = hcon; - conn->hchan = hchan; - BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan); + BT_DBG("hcon %p conn %p", hcon, conn); if (hcon->hdev->le_mtu && hcon->type == LE_LINK) conn->mtu = hcon->hdev->le_mtu; @@ -1072,19 +1032,29 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) conn->feat_mask = 0; spin_lock_init(&conn->lock); + rwlock_init(&conn->chan_lock); INIT_LIST_HEAD(&conn->chan_l); if (hcon->type == LE_LINK) - INIT_DELAYED_WORK(&conn->security_timer, security_timeout); + setup_timer(&conn->security_timer, security_timeout, + (unsigned long) conn); else - INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout); + setup_timer(&conn->info_timer, l2cap_info_timeout, + (unsigned long) conn); - conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM; + conn->disc_reason = 0x13; return conn; } +static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) +{ + write_lock_bh(&conn->chan_lock); + __l2cap_chan_add(conn, chan); + write_unlock_bh(&conn->chan_lock); +} + /* ---- Socket interface ---- */ /* Find socket with psm and source bdaddr. @@ -1120,10 +1090,11 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr return c1; } -inline int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst) +int l2cap_chan_connect(struct l2cap_chan *chan) { struct sock *sk = chan->sk; bdaddr_t *src = &bt_sk(sk)->src; + bdaddr_t *dst = &bt_sk(sk)->dst; struct l2cap_conn *conn; struct hci_conn *hcon; struct hci_dev *hdev; @@ -1137,62 +1108,7 @@ inline int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdad if (!hdev) return -EHOSTUNREACH; - hci_dev_lock(hdev); - - lock_sock(sk); - - /* PSM must be odd and lsb of upper byte must be 0 */ - if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid && - chan->chan_type != L2CAP_CHAN_RAW) { - err = -EINVAL; - goto done; - } - - if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) { - err = -EINVAL; - goto done; - } - - switch (chan->mode) { - case L2CAP_MODE_BASIC: - break; - case L2CAP_MODE_ERTM: - case L2CAP_MODE_STREAMING: - if (!disable_ertm) - break; - /* fall through */ - default: - err = -ENOTSUPP; - goto done; - } - - switch (sk->sk_state) { - case BT_CONNECT: - case BT_CONNECT2: - case BT_CONFIG: - /* Already connecting */ - err = 0; - goto done; - - case BT_CONNECTED: - /* Already connected */ - err = -EISCONN; - goto done; - - case BT_OPEN: - case BT_BOUND: - /* Can connect */ - break; - - default: - err = -EBADFD; - goto done; - } - - /* Set destination address and psm */ - bacpy(&bt_sk(sk)->dst, src); - chan->psm = psm; - chan->dcid = cid; + hci_dev_lock_bh(hdev); auth_type = l2cap_get_auth_type(chan); @@ -1226,7 +1142,7 @@ inline int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdad if (hcon->state == BT_CONNECTED) { if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { __clear_chan_timer(chan); - if (l2cap_chan_check_security(chan)) + if (l2cap_check_security(chan)) l2cap_state_change(chan, BT_CONNECTED); } else l2cap_do_start(chan); @@ -1235,7 +1151,7 @@ inline int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdad err = 0; done: - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } @@ -1272,18 +1188,17 @@ int __l2cap_wait_ack(struct sock *sk) return err; } -static void l2cap_monitor_timeout(struct work_struct *work) +static void l2cap_monitor_timeout(unsigned long arg) { - struct l2cap_chan *chan = container_of(work, struct l2cap_chan, - monitor_timer.work); + struct l2cap_chan *chan = (void *) arg; struct sock *sk = chan->sk; BT_DBG("chan %p", chan); - lock_sock(sk); + bh_lock_sock(sk); if (chan->retry_count >= chan->remote_max_tx) { l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); - release_sock(sk); + bh_unlock_sock(sk); return; } @@ -1291,25 +1206,24 @@ static void l2cap_monitor_timeout(struct work_struct *work) __set_monitor_timer(chan); l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL); - release_sock(sk); + bh_unlock_sock(sk); } -static void l2cap_retrans_timeout(struct work_struct *work) +static void l2cap_retrans_timeout(unsigned long arg) { - struct l2cap_chan *chan = container_of(work, struct l2cap_chan, - retrans_timer.work); + struct l2cap_chan *chan = (void *) arg; struct sock *sk = chan->sk; BT_DBG("chan %p", chan); - lock_sock(sk); + bh_lock_sock(sk); chan->retry_count = 1; __set_monitor_timer(chan); set_bit(CONN_WAIT_F, &chan->conn_state); l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL); - release_sock(sk); + bh_unlock_sock(sk); } static void l2cap_drop_acked_frames(struct l2cap_chan *chan) @@ -1331,46 +1245,60 @@ static void l2cap_drop_acked_frames(struct l2cap_chan *chan) __clear_retrans_timer(chan); } +static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb) +{ + struct hci_conn *hcon = chan->conn->hcon; + u16 flags; + + BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len); + + if (!chan->flushable && lmp_no_flush_capable(hcon->hdev)) + flags = ACL_START_NO_FLUSH; + else + flags = ACL_START; + + bt_cb(skb)->force_active = chan->force_active; + hci_send_acl(hcon, skb, flags); +} + static void l2cap_streaming_send(struct l2cap_chan *chan) { struct sk_buff *skb; - u32 control; - u16 fcs; + u16 control, fcs; while ((skb = skb_dequeue(&chan->tx_q))) { - control = __get_control(chan, skb->data + L2CAP_HDR_SIZE); - control |= __set_txseq(chan, chan->next_tx_seq); - __put_control(chan, control, skb->data + L2CAP_HDR_SIZE); + control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE); + control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT; + put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE); if (chan->fcs == L2CAP_FCS_CRC16) { - fcs = crc16(0, (u8 *)skb->data, - skb->len - L2CAP_FCS_SIZE); - put_unaligned_le16(fcs, - skb->data + skb->len - L2CAP_FCS_SIZE); + fcs = crc16(0, (u8 *)skb->data, skb->len - 2); + put_unaligned_le16(fcs, skb->data + skb->len - 2); } l2cap_do_send(chan, skb); - chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); + chan->next_tx_seq = (chan->next_tx_seq + 1) % 64; } } -static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq) +static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq) { struct sk_buff *skb, *tx_skb; - u16 fcs; - u32 control; + u16 control, fcs; skb = skb_peek(&chan->tx_q); if (!skb) return; - while (bt_cb(skb)->tx_seq != tx_seq) { + do { + if (bt_cb(skb)->tx_seq == tx_seq) + break; + if (skb_queue_is_last(&chan->tx_q, skb)) return; - skb = skb_queue_next(&chan->tx_q, skb); - } + } while ((skb = skb_queue_next(&chan->tx_q, skb))); if (chan->remote_max_tx && bt_cb(skb)->retries == chan->remote_max_tx) { @@ -1380,23 +1308,20 @@ static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq) tx_skb = skb_clone(skb, GFP_ATOMIC); bt_cb(skb)->retries++; - - control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE); - control &= __get_sar_mask(chan); + control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); + control &= L2CAP_CTRL_SAR; if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) - control |= __set_ctrl_final(chan); + control |= L2CAP_CTRL_FINAL; - control |= __set_reqseq(chan, chan->buffer_seq); - control |= __set_txseq(chan, tx_seq); + control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT) + | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT); - __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE); + put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE); if (chan->fcs == L2CAP_FCS_CRC16) { - fcs = crc16(0, (u8 *)tx_skb->data, - tx_skb->len - L2CAP_FCS_SIZE); - put_unaligned_le16(fcs, - tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE); + fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2); + put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2); } l2cap_do_send(chan, tx_skb); @@ -1405,8 +1330,7 @@ static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq) static int l2cap_ertm_send(struct l2cap_chan *chan) { struct sk_buff *skb, *tx_skb; - u16 fcs; - u32 control; + u16 control, fcs; int nsent = 0; if (chan->state != BT_CONNECTED) @@ -1424,22 +1348,20 @@ static int l2cap_ertm_send(struct l2cap_chan *chan) bt_cb(skb)->retries++; - control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE); - control &= __get_sar_mask(chan); + control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); + control &= L2CAP_CTRL_SAR; if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) - control |= __set_ctrl_final(chan); + control |= L2CAP_CTRL_FINAL; - control |= __set_reqseq(chan, chan->buffer_seq); - control |= __set_txseq(chan, chan->next_tx_seq); + control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT) + | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT); + put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE); - __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE); if (chan->fcs == L2CAP_FCS_CRC16) { - fcs = crc16(0, (u8 *)skb->data, - tx_skb->len - L2CAP_FCS_SIZE); - put_unaligned_le16(fcs, skb->data + - tx_skb->len - L2CAP_FCS_SIZE); + fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2); + put_unaligned_le16(fcs, skb->data + tx_skb->len - 2); } l2cap_do_send(chan, tx_skb); @@ -1447,8 +1369,7 @@ static int l2cap_ertm_send(struct l2cap_chan *chan) __set_retrans_timer(chan); bt_cb(skb)->tx_seq = chan->next_tx_seq; - - chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); + chan->next_tx_seq = (chan->next_tx_seq + 1) % 64; if (bt_cb(skb)->retries == 1) chan->unacked_frames++; @@ -1480,12 +1401,12 @@ static int l2cap_retransmit_frames(struct l2cap_chan *chan) static void l2cap_send_ack(struct l2cap_chan *chan) { - u32 control = 0; + u16 control = 0; - control |= __set_reqseq(chan, chan->buffer_seq); + control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { - control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); + control |= L2CAP_SUPER_RCV_NOT_READY; set_bit(CONN_RNR_SENT, &chan->conn_state); l2cap_send_sframe(chan, control); return; @@ -1494,20 +1415,20 @@ static void l2cap_send_ack(struct l2cap_chan *chan) if (l2cap_ertm_send(chan) > 0) return; - control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); + control |= L2CAP_SUPER_RCV_READY; l2cap_send_sframe(chan, control); } static void l2cap_send_srejtail(struct l2cap_chan *chan) { struct srej_list *tail; - u32 control; + u16 control; - control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ); - control |= __set_ctrl_final(chan); + control = L2CAP_SUPER_SELECT_REJECT; + control |= L2CAP_CTRL_FINAL; tail = list_entry((&chan->srej_l)->prev, struct srej_list, list); - control |= __set_reqseq(chan, tail->tx_seq); + control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT; l2cap_send_sframe(chan, control); } @@ -1535,8 +1456,6 @@ static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, in if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) return -EFAULT; - (*frag)->priority = skb->priority; - sent += count; len -= count; @@ -1546,17 +1465,15 @@ static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, in return sent; } -static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, - struct msghdr *msg, size_t len, - u32 priority) +static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len) { struct sock *sk = chan->sk; struct l2cap_conn *conn = chan->conn; struct sk_buff *skb; - int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE; + int err, count, hlen = L2CAP_HDR_SIZE + 2; struct l2cap_hdr *lh; - BT_DBG("sk %p len %d priority %u", sk, (int)len, priority); + BT_DBG("sk %p len %d", sk, (int)len); count = min_t(unsigned int, (conn->mtu - hlen), len); skb = bt_skb_send_alloc(sk, count + hlen, @@ -1564,8 +1481,6 @@ static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, if (!skb) return ERR_PTR(err); - skb->priority = priority; - /* Create L2CAP header */ lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); lh->cid = cpu_to_le16(chan->dcid); @@ -1580,9 +1495,7 @@ static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, return skb; } -static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, - struct msghdr *msg, size_t len, - u32 priority) +static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len) { struct sock *sk = chan->sk; struct l2cap_conn *conn = chan->conn; @@ -1598,8 +1511,6 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, if (!skb) return ERR_PTR(err); - skb->priority = priority; - /* Create L2CAP header */ lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); lh->cid = cpu_to_le16(chan->dcid); @@ -1615,12 +1526,12 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, - u32 control, u16 sdulen) + u16 control, u16 sdulen) { struct sock *sk = chan->sk; struct l2cap_conn *conn = chan->conn; struct sk_buff *skb; - int err, count, hlen; + int err, count, hlen = L2CAP_HDR_SIZE + 2; struct l2cap_hdr *lh; BT_DBG("sk %p len %d", sk, (int)len); @@ -1628,16 +1539,11 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, if (!conn) return ERR_PTR(-ENOTCONN); - if (test_bit(FLAG_EXT_CTRL, &chan->flags)) - hlen = L2CAP_EXT_HDR_SIZE; - else - hlen = L2CAP_ENH_HDR_SIZE; - if (sdulen) - hlen += L2CAP_SDULEN_SIZE; + hlen += 2; if (chan->fcs == L2CAP_FCS_CRC16) - hlen += L2CAP_FCS_SIZE; + hlen += 2; count = min_t(unsigned int, (conn->mtu - hlen), len); skb = bt_skb_send_alloc(sk, count + hlen, @@ -1649,11 +1555,9 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); lh->cid = cpu_to_le16(chan->dcid); lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); - - __put_control(chan, control, skb_put(skb, __ctrl_size(chan))); - + put_unaligned_le16(control, skb_put(skb, 2)); if (sdulen) - put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE)); + put_unaligned_le16(sdulen, skb_put(skb, 2)); err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb); if (unlikely(err < 0)) { @@ -1662,7 +1566,7 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, } if (chan->fcs == L2CAP_FCS_CRC16) - put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE)); + put_unaligned_le16(0, skb_put(skb, 2)); bt_cb(skb)->retries = 0; return skb; @@ -1672,11 +1576,11 @@ static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, si { struct sk_buff *skb; struct sk_buff_head sar_queue; - u32 control; + u16 control; size_t size = 0; skb_queue_head_init(&sar_queue); - control = __set_ctrl_sar(chan, L2CAP_SAR_START); + control = L2CAP_SDU_START; skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len); if (IS_ERR(skb)) return PTR_ERR(skb); @@ -1689,10 +1593,10 @@ static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, si size_t buflen; if (len > chan->remote_mps) { - control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE); + control = L2CAP_SDU_CONTINUE; buflen = chan->remote_mps; } else { - control = __set_ctrl_sar(chan, L2CAP_SAR_END); + control = L2CAP_SDU_END; buflen = len; } @@ -1713,16 +1617,15 @@ static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, si return size; } -int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, - u32 priority) +int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len) { struct sk_buff *skb; - u32 control; + u16 control; int err; /* Connectionless channel */ if (chan->chan_type == L2CAP_CHAN_CONN_LESS) { - skb = l2cap_create_connless_pdu(chan, msg, len, priority); + skb = l2cap_create_connless_pdu(chan, msg, len); if (IS_ERR(skb)) return PTR_ERR(skb); @@ -1737,7 +1640,7 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, return -EMSGSIZE; /* Create a basic PDU */ - skb = l2cap_create_basic_pdu(chan, msg, len, priority); + skb = l2cap_create_basic_pdu(chan, msg, len); if (IS_ERR(skb)) return PTR_ERR(skb); @@ -1749,7 +1652,7 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, case L2CAP_MODE_STREAMING: /* Entire SDU fits into one PDU */ if (len <= chan->remote_mps) { - control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED); + control = L2CAP_SDU_UNSEGMENTED; skb = l2cap_create_iframe_pdu(chan, msg, len, control, 0); if (IS_ERR(skb)) @@ -1801,9 +1704,8 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) BT_DBG("conn %p", conn); - rcu_read_lock(); - - list_for_each_entry_rcu(chan, &conn->chan_l, list) { + read_lock(&conn->chan_lock); + list_for_each_entry(chan, &conn->chan_l, list) { struct sock *sk = chan->sk; if (chan->chan_type != L2CAP_CHAN_RAW) continue; @@ -1818,8 +1720,7 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) if (chan->ops->recv(chan->data, nskb)) kfree_skb(nskb); } - - rcu_read_unlock(); + read_unlock(&conn->chan_lock); } /* ---- L2CAP signalling commands ---- */ @@ -1949,64 +1850,37 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val) *ptr += L2CAP_CONF_OPT_SIZE + len; } -static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan) +static void l2cap_ack_timeout(unsigned long arg) { - struct l2cap_conf_efs efs; - - switch (chan->mode) { - case L2CAP_MODE_ERTM: - efs.id = chan->local_id; - efs.stype = chan->local_stype; - efs.msdu = cpu_to_le16(chan->local_msdu); - efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime); - efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT); - efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO); - break; - - case L2CAP_MODE_STREAMING: - efs.id = 1; - efs.stype = L2CAP_SERV_BESTEFFORT; - efs.msdu = cpu_to_le16(chan->local_msdu); - efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime); - efs.acc_lat = 0; - efs.flush_to = 0; - break; - - default: - return; - } - - l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs), - (unsigned long) &efs); -} + struct l2cap_chan *chan = (void *) arg; -static void l2cap_ack_timeout(struct work_struct *work) -{ - struct l2cap_chan *chan = container_of(work, struct l2cap_chan, - ack_timer.work); - - BT_DBG("chan %p", chan); - - lock_sock(chan->sk); + bh_lock_sock(chan->sk); l2cap_send_ack(chan); - release_sock(chan->sk); + bh_unlock_sock(chan->sk); } static inline void l2cap_ertm_init(struct l2cap_chan *chan) { + struct sock *sk = chan->sk; + chan->expected_ack_seq = 0; chan->unacked_frames = 0; chan->buffer_seq = 0; chan->num_acked = 0; chan->frames_sent = 0; - INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout); - INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout); - INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout); + setup_timer(&chan->retrans_timer, l2cap_retrans_timeout, + (unsigned long) chan); + setup_timer(&chan->monitor_timer, l2cap_monitor_timeout, + (unsigned long) chan); + setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan); skb_queue_head_init(&chan->srej_q); INIT_LIST_HEAD(&chan->srej_l); + + + sk->sk_backlog_rcv = l2cap_ertm_data_rcv; } static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) @@ -2022,36 +1896,11 @@ static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) } } -static inline bool __l2cap_ews_supported(struct l2cap_chan *chan) -{ - return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW; -} - -static inline bool __l2cap_efs_supported(struct l2cap_chan *chan) -{ - return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW; -} - -static inline void l2cap_txwin_setup(struct l2cap_chan *chan) -{ - if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW && - __l2cap_ews_supported(chan)) { - /* use extended control field */ - set_bit(FLAG_EXT_CTRL, &chan->flags); - chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW; - } else { - chan->tx_win = min_t(u16, chan->tx_win, - L2CAP_DEFAULT_TX_WINDOW); - chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW; - } -} - static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data) { struct l2cap_conf_req *req = data; struct l2cap_conf_rfc rfc = { .mode = chan->mode }; void *ptr = req->data; - u16 size; BT_DBG("chan %p", chan); @@ -2064,9 +1913,6 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data) if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) break; - if (__l2cap_efs_supported(chan)) - set_bit(FLAG_EFS_ENABLE, &chan->flags); - /* fall through */ default: chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask); @@ -2096,27 +1942,17 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data) case L2CAP_MODE_ERTM: rfc.mode = L2CAP_MODE_ERTM; + rfc.txwin_size = chan->tx_win; rfc.max_transmit = chan->max_tx; rfc.retrans_timeout = 0; rfc.monitor_timeout = 0; - - size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu - - L2CAP_EXT_HDR_SIZE - - L2CAP_SDULEN_SIZE - - L2CAP_FCS_SIZE); - rfc.max_pdu_size = cpu_to_le16(size); - - l2cap_txwin_setup(chan); - - rfc.txwin_size = min_t(u16, chan->tx_win, - L2CAP_DEFAULT_TX_WINDOW); + rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE); + if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10) + rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10); l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc); - if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) - l2cap_add_opt_efs(&ptr, chan); - if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS)) break; @@ -2125,10 +1961,6 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data) chan->fcs = L2CAP_FCS_NONE; l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs); } - - if (test_bit(FLAG_EXT_CTRL, &chan->flags)) - l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2, - chan->tx_win); break; case L2CAP_MODE_STREAMING: @@ -2137,19 +1969,13 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data) rfc.max_transmit = 0; rfc.retrans_timeout = 0; rfc.monitor_timeout = 0; - - size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu - - L2CAP_EXT_HDR_SIZE - - L2CAP_SDULEN_SIZE - - L2CAP_FCS_SIZE); - rfc.max_pdu_size = cpu_to_le16(size); + rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE); + if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10) + rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10); l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc); - if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) - l2cap_add_opt_efs(&ptr, chan); - if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS)) break; @@ -2176,11 +2002,8 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data) int type, hint, olen; unsigned long val; struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; - struct l2cap_conf_efs efs; - u8 remote_efs = 0; u16 mtu = L2CAP_DEFAULT_MTU; u16 result = L2CAP_CONF_SUCCESS; - u16 size; BT_DBG("chan %p", chan); @@ -2210,22 +2033,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data) case L2CAP_CONF_FCS: if (val == L2CAP_FCS_NONE) set_bit(CONF_NO_FCS_RECV, &chan->conf_state); - break; - case L2CAP_CONF_EFS: - remote_efs = 1; - if (olen == sizeof(efs)) - memcpy(&efs, (void *) val, olen); - break; - - case L2CAP_CONF_EWS: - if (!enable_hs) - return -ECONNREFUSED; - - set_bit(FLAG_EXT_CTRL, &chan->flags); - set_bit(CONF_EWS_RECV, &chan->conf_state); - chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW; - chan->remote_tx_win = val; break; default: @@ -2250,13 +2058,6 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data) break; } - if (remote_efs) { - if (__l2cap_efs_supported(chan)) - set_bit(FLAG_EFS_ENABLE, &chan->flags); - else - return -ECONNREFUSED; - } - if (chan->mode != rfc.mode) return -ECONNREFUSED; @@ -2275,6 +2076,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data) sizeof(rfc), (unsigned long) &rfc); } + if (result == L2CAP_CONF_SUCCESS) { /* Configure output options and let the other side know * which ones we don't like. */ @@ -2287,26 +2089,6 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data) } l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu); - if (remote_efs) { - if (chan->local_stype != L2CAP_SERV_NOTRAFIC && - efs.stype != L2CAP_SERV_NOTRAFIC && - efs.stype != chan->local_stype) { - - result = L2CAP_CONF_UNACCEPT; - - if (chan->num_conf_req >= 1) - return -ECONNREFUSED; - - l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, - sizeof(efs), - (unsigned long) &efs); - } else { - /* Send PENDING Conf Rsp */ - result = L2CAP_CONF_PENDING; - set_bit(CONF_LOC_CONF_PEND, &chan->conf_state); - } - } - switch (rfc.mode) { case L2CAP_MODE_BASIC: chan->fcs = L2CAP_FCS_NONE; @@ -2314,20 +2096,13 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data) break; case L2CAP_MODE_ERTM: - if (!test_bit(CONF_EWS_RECV, &chan->conf_state)) - chan->remote_tx_win = rfc.txwin_size; - else - rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW; - + chan->remote_tx_win = rfc.txwin_size; chan->remote_max_tx = rfc.max_transmit; - size = min_t(u16, le16_to_cpu(rfc.max_pdu_size), - chan->conn->mtu - - L2CAP_EXT_HDR_SIZE - - L2CAP_SDULEN_SIZE - - L2CAP_FCS_SIZE); - rfc.max_pdu_size = cpu_to_le16(size); - chan->remote_mps = size; + if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10) + rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10); + + chan->remote_mps = le16_to_cpu(rfc.max_pdu_size); rfc.retrans_timeout = le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO); @@ -2339,29 +2114,13 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data) l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc); - if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) { - chan->remote_id = efs.id; - chan->remote_stype = efs.stype; - chan->remote_msdu = le16_to_cpu(efs.msdu); - chan->remote_flush_to = - le32_to_cpu(efs.flush_to); - chan->remote_acc_lat = - le32_to_cpu(efs.acc_lat); - chan->remote_sdu_itime = - le32_to_cpu(efs.sdu_itime); - l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, - sizeof(efs), (unsigned long) &efs); - } break; case L2CAP_MODE_STREAMING: - size = min_t(u16, le16_to_cpu(rfc.max_pdu_size), - chan->conn->mtu - - L2CAP_EXT_HDR_SIZE - - L2CAP_SDULEN_SIZE - - L2CAP_FCS_SIZE); - rfc.max_pdu_size = cpu_to_le16(size); - chan->remote_mps = size; + if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10) + rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10); + + chan->remote_mps = le16_to_cpu(rfc.max_pdu_size); set_bit(CONF_MODE_DONE, &chan->conf_state); @@ -2394,7 +2153,6 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi int type, olen; unsigned long val; struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; - struct l2cap_conf_efs efs; BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data); @@ -2430,26 +2188,6 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc); break; - - case L2CAP_CONF_EWS: - chan->tx_win = min_t(u16, val, - L2CAP_DEFAULT_EXT_WINDOW); - l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2, - chan->tx_win); - break; - - case L2CAP_CONF_EFS: - if (olen == sizeof(efs)) - memcpy(&efs, (void *)val, olen); - - if (chan->local_stype != L2CAP_SERV_NOTRAFIC && - efs.stype != L2CAP_SERV_NOTRAFIC && - efs.stype != chan->local_stype) - return -ECONNREFUSED; - - l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, - sizeof(efs), (unsigned long) &efs); - break; } } @@ -2458,23 +2196,13 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi chan->mode = rfc.mode; - if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) { + if (*result == L2CAP_CONF_SUCCESS) { switch (rfc.mode) { case L2CAP_MODE_ERTM: chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); chan->mps = le16_to_cpu(rfc.max_pdu_size); - - if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) { - chan->local_msdu = le16_to_cpu(efs.msdu); - chan->local_sdu_itime = - le32_to_cpu(efs.sdu_itime); - chan->local_acc_lat = le32_to_cpu(efs.acc_lat); - chan->local_flush_to = - le32_to_cpu(efs.flush_to); - } break; - case L2CAP_MODE_STREAMING: chan->mps = le16_to_cpu(rfc.max_pdu_size); } @@ -2574,7 +2302,7 @@ static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hd if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) && cmd->ident == conn->info_ident) { - __cancel_delayed_work(&conn->info_timer); + del_timer(&conn->info_timer); conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; conn->info_ident = 0; @@ -2607,12 +2335,12 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd parent = pchan->sk; - lock_sock(parent); + bh_lock_sock(parent); /* Check if the ACL is secure enough (if not SDP) */ if (psm != cpu_to_le16(0x0001) && !hci_conn_check_link_mode(conn->hcon)) { - conn->disc_reason = HCI_ERROR_AUTH_FAILURE; + conn->disc_reason = 0x05; result = L2CAP_CR_SEC_BLOCK; goto response; } @@ -2631,8 +2359,11 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd sk = chan->sk; + write_lock_bh(&conn->chan_lock); + /* Check if we already have channel with that dcid */ if (__l2cap_get_chan_by_dcid(conn, scid)) { + write_unlock_bh(&conn->chan_lock); sock_set_flag(sk, SOCK_ZAPPED); chan->ops->close(chan->data); goto response; @@ -2647,7 +2378,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd bt_accept_enqueue(parent, sk); - l2cap_chan_add(conn, chan); + __l2cap_chan_add(conn, chan); dcid = chan->scid; @@ -2656,7 +2387,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd chan->ident = cmd->ident; if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) { - if (l2cap_chan_check_security(chan)) { + if (l2cap_check_security(chan)) { if (bt_sk(sk)->defer_setup) { l2cap_state_change(chan, BT_CONNECT2); result = L2CAP_CR_PEND; @@ -2678,8 +2409,10 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd status = L2CAP_CS_NO_INFO; } + write_unlock_bh(&conn->chan_lock); + response: - release_sock(parent); + bh_unlock_sock(parent); sendresp: rsp.scid = cpu_to_le16(scid); @@ -2695,7 +2428,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; conn->info_ident = l2cap_get_ident(conn); - schedule_delayed_work(&conn->info_timer, + mod_timer(&conn->info_timer, jiffies + msecs_to_jiffies(L2CAP_INFO_TIMEOUT)); l2cap_send_cmd(conn, conn->info_ident, @@ -2761,11 +2494,19 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd break; default: + /* don't delete l2cap channel if sk is owned by user */ + if (sock_owned_by_user(sk)) { + l2cap_state_change(chan, BT_DISCONN); + __clear_chan_timer(chan); + __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); + break; + } + l2cap_chan_del(chan, ECONNREFUSED); break; } - release_sock(sk); + bh_unlock_sock(sk); return 0; } @@ -2871,23 +2612,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr chan->num_conf_req++; } - /* Got Conf Rsp PENDING from remote side and asume we sent - Conf Rsp PENDING in the code above */ - if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) && - test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) { - - /* check compatibility */ - - clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state); - set_bit(CONF_OUTPUT_DONE, &chan->conf_state); - - l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, - l2cap_build_conf_rsp(chan, rsp, - L2CAP_CONF_SUCCESS, 0x0000), rsp); - } - unlock: - release_sock(sk); + bh_unlock_sock(sk); return 0; } @@ -2915,33 +2641,8 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr switch (result) { case L2CAP_CONF_SUCCESS: l2cap_conf_rfc_get(chan, rsp->data, len); - clear_bit(CONF_REM_CONF_PEND, &chan->conf_state); break; - case L2CAP_CONF_PENDING: - set_bit(CONF_REM_CONF_PEND, &chan->conf_state); - - if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) { - char buf[64]; - - len = l2cap_parse_conf_rsp(chan, rsp->data, len, - buf, &result); - if (len < 0) { - l2cap_send_disconn_req(conn, chan, ECONNRESET); - goto done; - } - - /* check compatibility */ - - clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state); - set_bit(CONF_OUTPUT_DONE, &chan->conf_state); - - l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, - l2cap_build_conf_rsp(chan, buf, - L2CAP_CONF_SUCCESS, 0x0000), buf); - } - goto done; - case L2CAP_CONF_UNACCEPT: if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) { char req[64]; @@ -2994,7 +2695,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr } done: - release_sock(sk); + bh_unlock_sock(sk); return 0; } @@ -3023,8 +2724,17 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd sk->sk_shutdown = SHUTDOWN_MASK; + /* don't delete l2cap channel if sk is owned by user */ + if (sock_owned_by_user(sk)) { + l2cap_state_change(chan, BT_DISCONN); + __clear_chan_timer(chan); + __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); + bh_unlock_sock(sk); + return 0; + } + l2cap_chan_del(chan, ECONNRESET); - release_sock(sk); + bh_unlock_sock(sk); chan->ops->close(chan->data); return 0; @@ -3048,8 +2758,17 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd sk = chan->sk; + /* don't delete l2cap channel if sk is owned by user */ + if (sock_owned_by_user(sk)) { + l2cap_state_change(chan,BT_DISCONN); + __clear_chan_timer(chan); + __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); + bh_unlock_sock(sk); + return 0; + } + l2cap_chan_del(chan, 0); - release_sock(sk); + bh_unlock_sock(sk); chan->ops->close(chan->data); return 0; @@ -3073,25 +2792,15 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm if (!disable_ertm) feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING | L2CAP_FEAT_FCS; - if (enable_hs) - feat_mask |= L2CAP_FEAT_EXT_FLOW - | L2CAP_FEAT_EXT_WINDOW; - put_unaligned_le32(feat_mask, rsp->data); l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf), buf); } else if (type == L2CAP_IT_FIXED_CHAN) { u8 buf[12]; struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; - - if (enable_hs) - l2cap_fixed_chan[0] |= L2CAP_FC_A2MP; - else - l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP; - rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); - memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan)); + memcpy(buf + 4, l2cap_fixed_chan, 8); l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf), buf); } else { @@ -3120,7 +2829,7 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) return 0; - __cancel_delayed_work(&conn->info_timer); + del_timer(&conn->info_timer); if (result != L2CAP_IR_SUCCESS) { conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; @@ -3158,165 +2867,6 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm return 0; } -static inline int l2cap_create_channel_req(struct l2cap_conn *conn, - struct l2cap_cmd_hdr *cmd, u16 cmd_len, - void *data) -{ - struct l2cap_create_chan_req *req = data; - struct l2cap_create_chan_rsp rsp; - u16 psm, scid; - - if (cmd_len != sizeof(*req)) - return -EPROTO; - - if (!enable_hs) - return -EINVAL; - - psm = le16_to_cpu(req->psm); - scid = le16_to_cpu(req->scid); - - BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id); - - /* Placeholder: Always reject */ - rsp.dcid = 0; - rsp.scid = cpu_to_le16(scid); - rsp.result = L2CAP_CR_NO_MEM; - rsp.status = L2CAP_CS_NO_INFO; - - l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP, - sizeof(rsp), &rsp); - - return 0; -} - -static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn, - struct l2cap_cmd_hdr *cmd, void *data) -{ - BT_DBG("conn %p", conn); - - return l2cap_connect_rsp(conn, cmd, data); -} - -static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident, - u16 icid, u16 result) -{ - struct l2cap_move_chan_rsp rsp; - - BT_DBG("icid %d, result %d", icid, result); - - rsp.icid = cpu_to_le16(icid); - rsp.result = cpu_to_le16(result); - - l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp); -} - -static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn, - struct l2cap_chan *chan, u16 icid, u16 result) -{ - struct l2cap_move_chan_cfm cfm; - u8 ident; - - BT_DBG("icid %d, result %d", icid, result); - - ident = l2cap_get_ident(conn); - if (chan) - chan->ident = ident; - - cfm.icid = cpu_to_le16(icid); - cfm.result = cpu_to_le16(result); - - l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm); -} - -static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident, - u16 icid) -{ - struct l2cap_move_chan_cfm_rsp rsp; - - BT_DBG("icid %d", icid); - - rsp.icid = cpu_to_le16(icid); - l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp); -} - -static inline int l2cap_move_channel_req(struct l2cap_conn *conn, - struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data) -{ - struct l2cap_move_chan_req *req = data; - u16 icid = 0; - u16 result = L2CAP_MR_NOT_ALLOWED; - - if (cmd_len != sizeof(*req)) - return -EPROTO; - - icid = le16_to_cpu(req->icid); - - BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id); - - if (!enable_hs) - return -EINVAL; - - /* Placeholder: Always refuse */ - l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result); - - return 0; -} - -static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn, - struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data) -{ - struct l2cap_move_chan_rsp *rsp = data; - u16 icid, result; - - if (cmd_len != sizeof(*rsp)) - return -EPROTO; - - icid = le16_to_cpu(rsp->icid); - result = le16_to_cpu(rsp->result); - - BT_DBG("icid %d, result %d", icid, result); - - /* Placeholder: Always unconfirmed */ - l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED); - - return 0; -} - -static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn, - struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data) -{ - struct l2cap_move_chan_cfm *cfm = data; - u16 icid, result; - - if (cmd_len != sizeof(*cfm)) - return -EPROTO; - - icid = le16_to_cpu(cfm->icid); - result = le16_to_cpu(cfm->result); - - BT_DBG("icid %d, result %d", icid, result); - - l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid); - - return 0; -} - -static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn, - struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data) -{ - struct l2cap_move_chan_cfm_rsp *rsp = data; - u16 icid; - - if (cmd_len != sizeof(*rsp)) - return -EPROTO; - - icid = le16_to_cpu(rsp->icid); - - BT_DBG("icid %d", icid); - - return 0; -} - static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency, u16 to_multiplier) { @@ -3429,30 +2979,6 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn, err = l2cap_information_rsp(conn, cmd, data); break; - case L2CAP_CREATE_CHAN_REQ: - err = l2cap_create_channel_req(conn, cmd, cmd_len, data); - break; - - case L2CAP_CREATE_CHAN_RSP: - err = l2cap_create_channel_rsp(conn, cmd, data); - break; - - case L2CAP_MOVE_CHAN_REQ: - err = l2cap_move_channel_req(conn, cmd, cmd_len, data); - break; - - case L2CAP_MOVE_CHAN_RSP: - err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data); - break; - - case L2CAP_MOVE_CHAN_CFM: - err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data); - break; - - case L2CAP_MOVE_CHAN_CFM_RSP: - err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data); - break; - default: BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code); err = -EINVAL; @@ -3531,15 +3057,10 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn, static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb) { u16 our_fcs, rcv_fcs; - int hdr_size; - - if (test_bit(FLAG_EXT_CTRL, &chan->flags)) - hdr_size = L2CAP_EXT_HDR_SIZE; - else - hdr_size = L2CAP_ENH_HDR_SIZE; + int hdr_size = L2CAP_HDR_SIZE + 2; if (chan->fcs == L2CAP_FCS_CRC16) { - skb_trim(skb, skb->len - L2CAP_FCS_SIZE); + skb_trim(skb, skb->len - 2); rcv_fcs = get_unaligned_le16(skb->data + skb->len); our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size); @@ -3551,14 +3072,14 @@ static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb) static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan) { - u32 control = 0; + u16 control = 0; chan->frames_sent = 0; - control |= __set_reqseq(chan, chan->buffer_seq); + control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { - control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); + control |= L2CAP_SUPER_RCV_NOT_READY; l2cap_send_sframe(chan, control); set_bit(CONN_RNR_SENT, &chan->conn_state); } @@ -3570,12 +3091,12 @@ static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan) if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) && chan->frames_sent == 0) { - control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); + control |= L2CAP_SUPER_RCV_READY; l2cap_send_sframe(chan, control); } } -static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar) +static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar) { struct sk_buff *next_skb; int tx_seq_offset, next_tx_seq_offset; @@ -3584,15 +3105,23 @@ static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, bt_cb(skb)->sar = sar; next_skb = skb_peek(&chan->srej_q); + if (!next_skb) { + __skb_queue_tail(&chan->srej_q, skb); + return 0; + } - tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq); + tx_seq_offset = (tx_seq - chan->buffer_seq) % 64; + if (tx_seq_offset < 0) + tx_seq_offset += 64; - while (next_skb) { + do { if (bt_cb(next_skb)->tx_seq == tx_seq) return -EINVAL; - next_tx_seq_offset = __seq_offset(chan, - bt_cb(next_skb)->tx_seq, chan->buffer_seq); + next_tx_seq_offset = (bt_cb(next_skb)->tx_seq - + chan->buffer_seq) % 64; + if (next_tx_seq_offset < 0) + next_tx_seq_offset += 64; if (next_tx_seq_offset > tx_seq_offset) { __skb_queue_before(&chan->srej_q, next_skb, skb); @@ -3600,10 +3129,9 @@ static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, } if (skb_queue_is_last(&chan->srej_q, next_skb)) - next_skb = NULL; - else - next_skb = skb_queue_next(&chan->srej_q, next_skb); - } + break; + + } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb))); __skb_queue_tail(&chan->srej_q, skb); @@ -3629,24 +3157,24 @@ static void append_skb_frag(struct sk_buff *skb, skb->truesize += new_frag->truesize; } -static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control) +static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control) { int err = -EINVAL; - switch (__get_ctrl_sar(chan, control)) { - case L2CAP_SAR_UNSEGMENTED: + switch (control & L2CAP_CTRL_SAR) { + case L2CAP_SDU_UNSEGMENTED: if (chan->sdu) break; err = chan->ops->recv(chan->data, skb); break; - case L2CAP_SAR_START: + case L2CAP_SDU_START: if (chan->sdu) break; chan->sdu_len = get_unaligned_le16(skb->data); - skb_pull(skb, L2CAP_SDULEN_SIZE); + skb_pull(skb, 2); if (chan->sdu_len > chan->imtu) { err = -EMSGSIZE; @@ -3663,7 +3191,7 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u3 err = 0; break; - case L2CAP_SAR_CONTINUE: + case L2CAP_SDU_CONTINUE: if (!chan->sdu) break; @@ -3677,7 +3205,7 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u3 err = 0; break; - case L2CAP_SAR_END: + case L2CAP_SDU_END: if (!chan->sdu) break; @@ -3712,14 +3240,14 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u3 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan) { - u32 control; + u16 control; BT_DBG("chan %p, Enter local busy", chan); set_bit(CONN_LOCAL_BUSY, &chan->conn_state); - control = __set_reqseq(chan, chan->buffer_seq); - control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); + control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; + control |= L2CAP_SUPER_RCV_NOT_READY; l2cap_send_sframe(chan, control); set_bit(CONN_RNR_SENT, &chan->conn_state); @@ -3729,14 +3257,13 @@ static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan) static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan) { - u32 control; + u16 control; if (!test_bit(CONN_RNR_SENT, &chan->conn_state)) goto done; - control = __set_reqseq(chan, chan->buffer_seq); - control |= __set_ctrl_poll(chan); - control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); + control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; + control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL; l2cap_send_sframe(chan, control); chan->retry_count = 1; @@ -3762,10 +3289,10 @@ void l2cap_chan_busy(struct l2cap_chan *chan, int busy) } } -static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq) +static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq) { struct sk_buff *skb; - u32 control; + u16 control; while ((skb = skb_peek(&chan->srej_q)) && !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { @@ -3775,7 +3302,7 @@ static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq) break; skb = skb_dequeue(&chan->srej_q); - control = __set_ctrl_sar(chan, bt_cb(skb)->sar); + control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT; err = l2cap_reassemble_sdu(chan, skb, control); if (err < 0) { @@ -3783,15 +3310,16 @@ static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq) break; } - chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej); - tx_seq = __next_seq(chan, tx_seq); + chan->buffer_seq_srej = + (chan->buffer_seq_srej + 1) % 64; + tx_seq = (tx_seq + 1) % 64; } } -static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq) +static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq) { struct srej_list *l, *tmp; - u32 control; + u16 control; list_for_each_entry_safe(l, tmp, &chan->srej_l, list) { if (l->tx_seq == tx_seq) { @@ -3799,53 +3327,45 @@ static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq) kfree(l); return; } - control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ); - control |= __set_reqseq(chan, l->tx_seq); + control = L2CAP_SUPER_SELECT_REJECT; + control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT; l2cap_send_sframe(chan, control); list_del(&l->list); list_add_tail(&l->list, &chan->srej_l); } } -static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq) +static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq) { struct srej_list *new; - u32 control; + u16 control; while (tx_seq != chan->expected_tx_seq) { - control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ); - control |= __set_reqseq(chan, chan->expected_tx_seq); + control = L2CAP_SUPER_SELECT_REJECT; + control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT; l2cap_send_sframe(chan, control); new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC); - if (!new) - return -ENOMEM; - new->tx_seq = chan->expected_tx_seq; - - chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq); - + chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64; list_add_tail(&new->list, &chan->srej_l); } - - chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq); - - return 0; + chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64; } -static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb) +static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb) { - u16 tx_seq = __get_txseq(chan, rx_control); - u16 req_seq = __get_reqseq(chan, rx_control); - u8 sar = __get_ctrl_sar(chan, rx_control); + u8 tx_seq = __get_txseq(rx_control); + u8 req_seq = __get_reqseq(rx_control); + u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT; int tx_seq_offset, expected_tx_seq_offset; int num_to_ack = (chan->tx_win/6) + 1; int err = 0; - BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len, + BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len, tx_seq, rx_control); - if (__is_ctrl_final(chan, rx_control) && + if (L2CAP_CTRL_FINAL & rx_control && test_bit(CONN_WAIT_F, &chan->conn_state)) { __clear_monitor_timer(chan); if (chan->unacked_frames > 0) @@ -3856,7 +3376,9 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_cont chan->expected_ack_seq = req_seq; l2cap_drop_acked_frames(chan); - tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq); + tx_seq_offset = (tx_seq - chan->buffer_seq) % 64; + if (tx_seq_offset < 0) + tx_seq_offset += 64; /* invalid tx_seq */ if (tx_seq_offset >= chan->tx_win) { @@ -3901,16 +3423,13 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_cont return 0; } } - - err = l2cap_send_srejframe(chan, tx_seq); - if (err < 0) { - l2cap_send_disconn_req(chan->conn, chan, -err); - return err; - } + l2cap_send_srejframe(chan, tx_seq); } } else { - expected_tx_seq_offset = __seq_offset(chan, - chan->expected_tx_seq, chan->buffer_seq); + expected_tx_seq_offset = + (chan->expected_tx_seq - chan->buffer_seq) % 64; + if (expected_tx_seq_offset < 0) + expected_tx_seq_offset += 64; /* duplicated tx_seq */ if (tx_seq_offset < expected_tx_seq_offset) @@ -3928,18 +3447,14 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_cont set_bit(CONN_SEND_PBIT, &chan->conn_state); - err = l2cap_send_srejframe(chan, tx_seq); - if (err < 0) { - l2cap_send_disconn_req(chan->conn, chan, -err); - return err; - } + l2cap_send_srejframe(chan, tx_seq); __clear_ack_timer(chan); } return 0; expected: - chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq); + chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64; if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { bt_cb(skb)->tx_seq = tx_seq; @@ -3949,24 +3464,22 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_cont } err = l2cap_reassemble_sdu(chan, skb, rx_control); - chan->buffer_seq = __next_seq(chan, chan->buffer_seq); - + chan->buffer_seq = (chan->buffer_seq + 1) % 64; if (err < 0) { l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); return err; } - if (__is_ctrl_final(chan, rx_control)) { + if (rx_control & L2CAP_CTRL_FINAL) { if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) l2cap_retransmit_frames(chan); } + __set_ack_timer(chan); chan->num_acked = (chan->num_acked + 1) % num_to_ack; if (chan->num_acked == num_to_ack - 1) l2cap_send_ack(chan); - else - __set_ack_timer(chan); return 0; @@ -3975,15 +3488,15 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_cont return 0; } -static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control) +static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control) { - BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, - __get_reqseq(chan, rx_control), rx_control); + BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control), + rx_control); - chan->expected_ack_seq = __get_reqseq(chan, rx_control); + chan->expected_ack_seq = __get_reqseq(rx_control); l2cap_drop_acked_frames(chan); - if (__is_ctrl_poll(chan, rx_control)) { + if (rx_control & L2CAP_CTRL_POLL) { set_bit(CONN_SEND_FBIT, &chan->conn_state); if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) && @@ -3996,7 +3509,7 @@ static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_co l2cap_send_i_or_rr_or_rnr(chan); } - } else if (__is_ctrl_final(chan, rx_control)) { + } else if (rx_control & L2CAP_CTRL_FINAL) { clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) @@ -4015,18 +3528,18 @@ static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_co } } -static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control) +static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control) { - u16 tx_seq = __get_reqseq(chan, rx_control); + u8 tx_seq = __get_reqseq(rx_control); - BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control); + BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control); clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); chan->expected_ack_seq = tx_seq; l2cap_drop_acked_frames(chan); - if (__is_ctrl_final(chan, rx_control)) { + if (rx_control & L2CAP_CTRL_FINAL) { if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) l2cap_retransmit_frames(chan); } else { @@ -4036,15 +3549,15 @@ static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_c set_bit(CONN_REJ_ACT, &chan->conn_state); } } -static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control) +static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control) { - u16 tx_seq = __get_reqseq(chan, rx_control); + u8 tx_seq = __get_reqseq(rx_control); - BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control); + BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control); clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); - if (__is_ctrl_poll(chan, rx_control)) { + if (rx_control & L2CAP_CTRL_POLL) { chan->expected_ack_seq = tx_seq; l2cap_drop_acked_frames(chan); @@ -4057,7 +3570,7 @@ static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_ chan->srej_save_reqseq = tx_seq; set_bit(CONN_SREJ_ACT, &chan->conn_state); } - } else if (__is_ctrl_final(chan, rx_control)) { + } else if (rx_control & L2CAP_CTRL_FINAL) { if (test_bit(CONN_SREJ_ACT, &chan->conn_state) && chan->srej_save_reqseq == tx_seq) clear_bit(CONN_SREJ_ACT, &chan->conn_state); @@ -4072,39 +3585,37 @@ static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_ } } -static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control) +static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control) { - u16 tx_seq = __get_reqseq(chan, rx_control); + u8 tx_seq = __get_reqseq(rx_control); - BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control); + BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control); set_bit(CONN_REMOTE_BUSY, &chan->conn_state); chan->expected_ack_seq = tx_seq; l2cap_drop_acked_frames(chan); - if (__is_ctrl_poll(chan, rx_control)) + if (rx_control & L2CAP_CTRL_POLL) set_bit(CONN_SEND_FBIT, &chan->conn_state); if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) { __clear_retrans_timer(chan); - if (__is_ctrl_poll(chan, rx_control)) + if (rx_control & L2CAP_CTRL_POLL) l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL); return; } - if (__is_ctrl_poll(chan, rx_control)) { + if (rx_control & L2CAP_CTRL_POLL) l2cap_send_srejtail(chan); - } else { - rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR); - l2cap_send_sframe(chan, rx_control); - } + else + l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY); } -static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb) +static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb) { - BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len); + BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len); - if (__is_ctrl_final(chan, rx_control) && + if (L2CAP_CTRL_FINAL & rx_control && test_bit(CONN_WAIT_F, &chan->conn_state)) { __clear_monitor_timer(chan); if (chan->unacked_frames > 0) @@ -4112,20 +3623,20 @@ static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_cont clear_bit(CONN_WAIT_F, &chan->conn_state); } - switch (__get_ctrl_super(chan, rx_control)) { - case L2CAP_SUPER_RR: + switch (rx_control & L2CAP_CTRL_SUPERVISE) { + case L2CAP_SUPER_RCV_READY: l2cap_data_channel_rrframe(chan, rx_control); break; - case L2CAP_SUPER_REJ: + case L2CAP_SUPER_REJECT: l2cap_data_channel_rejframe(chan, rx_control); break; - case L2CAP_SUPER_SREJ: + case L2CAP_SUPER_SELECT_REJECT: l2cap_data_channel_srejframe(chan, rx_control); break; - case L2CAP_SUPER_RNR: + case L2CAP_SUPER_RCV_NOT_READY: l2cap_data_channel_rnrframe(chan, rx_control); break; } @@ -4137,12 +3648,12 @@ static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_cont static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb) { struct l2cap_chan *chan = l2cap_pi(sk)->chan; - u32 control; - u16 req_seq; + u16 control; + u8 req_seq; int len, next_tx_seq_offset, req_seq_offset; - control = __get_control(chan, skb->data); - skb_pull(skb, __ctrl_size(chan)); + control = get_unaligned_le16(skb->data); + skb_pull(skb, 2); len = skb->len; /* @@ -4153,23 +3664,26 @@ static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb) if (l2cap_check_fcs(chan, skb)) goto drop; - if (__is_sar_start(chan, control) && !__is_sframe(chan, control)) - len -= L2CAP_SDULEN_SIZE; + if (__is_sar_start(control) && __is_iframe(control)) + len -= 2; if (chan->fcs == L2CAP_FCS_CRC16) - len -= L2CAP_FCS_SIZE; + len -= 2; if (len > chan->mps) { l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); goto drop; } - req_seq = __get_reqseq(chan, control); + req_seq = __get_reqseq(control); + req_seq_offset = (req_seq - chan->expected_ack_seq) % 64; + if (req_seq_offset < 0) + req_seq_offset += 64; - req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq); - - next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq, - chan->expected_ack_seq); + next_tx_seq_offset = + (chan->next_tx_seq - chan->expected_ack_seq) % 64; + if (next_tx_seq_offset < 0) + next_tx_seq_offset += 64; /* check for invalid req-seq */ if (req_seq_offset > next_tx_seq_offset) { @@ -4177,7 +3691,7 @@ static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb) goto drop; } - if (!__is_sframe(chan, control)) { + if (__is_iframe(control)) { if (len < 0) { l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); goto drop; @@ -4205,8 +3719,8 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk { struct l2cap_chan *chan; struct sock *sk = NULL; - u32 control; - u16 tx_seq; + u16 control; + u8 tx_seq; int len; chan = l2cap_get_chan_by_scid(conn, cid); @@ -4237,28 +3751,33 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk break; case L2CAP_MODE_ERTM: - l2cap_ertm_data_rcv(sk, skb); + if (!sock_owned_by_user(sk)) { + l2cap_ertm_data_rcv(sk, skb); + } else { + if (sk_add_backlog(sk, skb)) + goto drop; + } goto done; case L2CAP_MODE_STREAMING: - control = __get_control(chan, skb->data); - skb_pull(skb, __ctrl_size(chan)); + control = get_unaligned_le16(skb->data); + skb_pull(skb, 2); len = skb->len; if (l2cap_check_fcs(chan, skb)) goto drop; - if (__is_sar_start(chan, control)) - len -= L2CAP_SDULEN_SIZE; + if (__is_sar_start(control)) + len -= 2; if (chan->fcs == L2CAP_FCS_CRC16) - len -= L2CAP_FCS_SIZE; + len -= 2; - if (len > chan->mps || len < 0 || __is_sframe(chan, control)) + if (len > chan->mps || len < 0 || __is_sframe(control)) goto drop; - tx_seq = __get_txseq(chan, control); + tx_seq = __get_txseq(control); if (chan->expected_tx_seq != tx_seq) { /* Frame(s) missing - must discard partial SDU */ @@ -4270,7 +3789,7 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk /* TODO: Notify userland of missing data */ } - chan->expected_tx_seq = __next_seq(chan, tx_seq); + chan->expected_tx_seq = (tx_seq + 1) % 64; if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE) l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); @@ -4287,7 +3806,7 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk done: if (sk) - release_sock(sk); + bh_unlock_sock(sk); return 0; } @@ -4303,7 +3822,7 @@ static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, str sk = chan->sk; - lock_sock(sk); + bh_lock_sock(sk); BT_DBG("sk %p, len %d", sk, skb->len); @@ -4321,7 +3840,7 @@ static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, str done: if (sk) - release_sock(sk); + bh_unlock_sock(sk); return 0; } @@ -4336,7 +3855,7 @@ static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk = chan->sk; - lock_sock(sk); + bh_lock_sock(sk); BT_DBG("sk %p, len %d", sk, skb->len); @@ -4354,7 +3873,7 @@ static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct done: if (sk) - release_sock(sk); + bh_unlock_sock(sk); return 0; } @@ -4404,11 +3923,14 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb) /* ---- L2CAP interface with lower layer (HCI) ---- */ -int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr) +static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) { int exact = 0, lm1 = 0, lm2 = 0; struct l2cap_chan *c; + if (type != ACL_LINK) + return -EINVAL; + BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr)); /* Find listening sockets and check their link_mode */ @@ -4421,12 +3943,12 @@ int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr) if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) { lm1 |= HCI_LM_ACCEPT; - if (test_bit(FLAG_ROLE_SWITCH, &c->flags)) + if (c->role_switch) lm1 |= HCI_LM_MASTER; exact++; } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) { lm2 |= HCI_LM_ACCEPT; - if (test_bit(FLAG_ROLE_SWITCH, &c->flags)) + if (c->role_switch) lm2 |= HCI_LM_MASTER; } } @@ -4435,12 +3957,15 @@ int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr) return exact ? lm1 : lm2; } -int l2cap_connect_cfm(struct hci_conn *hcon, u8 status) +static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status) { struct l2cap_conn *conn; BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status); + if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK)) + return -EINVAL; + if (!status) { conn = l2cap_conn_add(hcon, status); if (conn) @@ -4451,22 +3976,27 @@ int l2cap_connect_cfm(struct hci_conn *hcon, u8 status) return 0; } -int l2cap_disconn_ind(struct hci_conn *hcon) +static int l2cap_disconn_ind(struct hci_conn *hcon) { struct l2cap_conn *conn = hcon->l2cap_data; BT_DBG("hcon %p", hcon); - if (!conn) - return HCI_ERROR_REMOTE_USER_TERM; + if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn) + return 0x13; + return conn->disc_reason; } -int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason) +static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason) { BT_DBG("hcon %p reason %d", hcon, reason); + if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK)) + return -EINVAL; + l2cap_conn_del(hcon, bt_to_errno(reason)); + return 0; } @@ -4487,7 +4017,7 @@ static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt) } } -int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) +static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) { struct l2cap_conn *conn = hcon->l2cap_data; struct l2cap_chan *chan; @@ -4499,12 +4029,12 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) if (hcon->type == LE_LINK) { smp_distribute_keys(conn, 0); - __cancel_delayed_work(&conn->security_timer); + del_timer(&conn->security_timer); } - rcu_read_lock(); + read_lock(&conn->chan_lock); - list_for_each_entry_rcu(chan, &conn->chan_l, list) { + list_for_each_entry(chan, &conn->chan_l, list) { struct sock *sk = chan->sk; bh_lock_sock(sk); @@ -4582,12 +4112,12 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) bh_unlock_sock(sk); } - rcu_read_unlock(); + read_unlock(&conn->chan_lock); return 0; } -int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) +static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) { struct l2cap_conn *conn = hcon->l2cap_data; @@ -4648,11 +4178,11 @@ int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) BT_ERR("Frame exceeding recv MTU (len %d, " "MTU %d)", len, chan->imtu); - release_sock(sk); + bh_unlock_sock(sk); l2cap_conn_unreliable(conn, ECOMM); goto drop; } - release_sock(sk); + bh_unlock_sock(sk); } /* Allocate skb for the complete frame (with header) */ @@ -4734,6 +4264,17 @@ static const struct file_operations l2cap_debugfs_fops = { static struct dentry *l2cap_debugfs; +static struct hci_proto l2cap_hci_proto = { + .name = "L2CAP", + .id = HCI_PROTO_L2CAP, + .connect_ind = l2cap_connect_ind, + .connect_cfm = l2cap_connect_cfm, + .disconn_ind = l2cap_disconn_ind, + .disconn_cfm = l2cap_disconn_cfm, + .security_cfm = l2cap_security_cfm, + .recv_acldata = l2cap_recv_acldata +}; + int __init l2cap_init(void) { int err; @@ -4742,6 +4283,13 @@ int __init l2cap_init(void) if (err < 0) return err; + err = hci_register_proto(&l2cap_hci_proto); + if (err < 0) { + BT_ERR("L2CAP protocol registration failed"); + bt_sock_unregister(BTPROTO_L2CAP); + goto error; + } + if (bt_debugfs) { l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs, NULL, &l2cap_debugfs_fops); @@ -4750,11 +4298,19 @@ int __init l2cap_init(void) } return 0; + +error: + l2cap_cleanup_sockets(); + return err; } void l2cap_exit(void) { debugfs_remove(l2cap_debugfs); + + if (hci_unregister_proto(&l2cap_hci_proto) < 0) + BT_ERR("L2CAP protocol unregistration failed"); + l2cap_cleanup_sockets(); } diff --git a/trunk/net/bluetooth/l2cap_sock.c b/trunk/net/bluetooth/l2cap_sock.c index 9ca5616166f7..5c406d3136f7 100644 --- a/trunk/net/bluetooth/l2cap_sock.c +++ b/trunk/net/bluetooth/l2cap_sock.c @@ -3,7 +3,6 @@ Copyright (C) 2000-2001 Qualcomm Incorporated Copyright (C) 2009-2010 Gustavo F. Padovan Copyright (C) 2010 Google Inc. - Copyright (C) 2011 ProFUSION Embedded Systems Written 2000,2001 by Maxim Krasnyansky @@ -123,15 +122,70 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al if (la.l2_cid && la.l2_psm) return -EINVAL; - err = l2cap_chan_connect(chan, la.l2_psm, la.l2_cid, &la.l2_bdaddr); + lock_sock(sk); + + if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED + && !(la.l2_psm || la.l2_cid)) { + err = -EINVAL; + goto done; + } + + switch (chan->mode) { + case L2CAP_MODE_BASIC: + break; + case L2CAP_MODE_ERTM: + case L2CAP_MODE_STREAMING: + if (!disable_ertm) + break; + /* fall through */ + default: + err = -ENOTSUPP; + goto done; + } + + switch (sk->sk_state) { + case BT_CONNECT: + case BT_CONNECT2: + case BT_CONFIG: + /* Already connecting */ + goto wait; + + case BT_CONNECTED: + /* Already connected */ + err = -EISCONN; + goto done; + + case BT_OPEN: + case BT_BOUND: + /* Can connect */ + break; + + default: + err = -EBADFD; + goto done; + } + + /* PSM must be odd and lsb of upper byte must be 0 */ + if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 && !la.l2_cid && + chan->chan_type != L2CAP_CHAN_RAW) { + err = -EINVAL; + goto done; + } + + /* Set destination address and psm */ + bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr); + chan->psm = la.l2_psm; + chan->dcid = la.l2_cid; + + err = l2cap_chan_connect(l2cap_pi(sk)->chan); if (err) goto done; +wait: err = bt_sock_wait_state(sk, BT_CONNECTED, sock_sndtimeo(sk, flags & O_NONBLOCK)); done: - if (sock_owned_by_user(sk)) - release_sock(sk); + release_sock(sk); return err; } @@ -280,7 +334,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us opts.mode = chan->mode; opts.fcs = chan->fcs; opts.max_tx = chan->max_tx; - opts.txwin_size = chan->tx_win; + opts.txwin_size = (__u16)chan->tx_win; len = min_t(unsigned int, len, sizeof(opts)); if (copy_to_user(optval, (char *) &opts, len)) @@ -305,10 +359,10 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us break; } - if (test_bit(FLAG_ROLE_SWITCH, &chan->flags)) + if (chan->role_switch) opt |= L2CAP_LM_MASTER; - if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags)) + if (chan->force_reliable) opt |= L2CAP_LM_RELIABLE; if (put_user(opt, (u32 __user *) optval)) @@ -395,8 +449,7 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch break; case BT_FLUSHABLE: - if (put_user(test_bit(FLAG_FLUSHABLE, &chan->flags), - (u32 __user *) optval)) + if (put_user(chan->flushable, (u32 __user *) optval)) err = -EFAULT; break; @@ -408,7 +461,7 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch break; } - pwr.force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags); + pwr.force_active = chan->force_active; len = min_t(unsigned int, len, sizeof(pwr)); if (copy_to_user(optval, (char *) &pwr, len)) @@ -416,16 +469,6 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch break; - case BT_CHANNEL_POLICY: - if (!enable_hs) { - err = -ENOPROTOOPT; - break; - } - - if (put_user(chan->chan_policy, (u32 __user *) optval)) - err = -EFAULT; - break; - default: err = -ENOPROTOOPT; break; @@ -460,7 +503,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us opts.mode = chan->mode; opts.fcs = chan->fcs; opts.max_tx = chan->max_tx; - opts.txwin_size = chan->tx_win; + opts.txwin_size = (__u16)chan->tx_win; len = min_t(unsigned int, sizeof(opts), optlen); if (copy_from_user((char *) &opts, optval, len)) { @@ -468,7 +511,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us break; } - if (opts.txwin_size > L2CAP_DEFAULT_EXT_WINDOW) { + if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) { err = -EINVAL; break; } @@ -492,7 +535,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us chan->omtu = opts.omtu; chan->fcs = opts.fcs; chan->max_tx = opts.max_tx; - chan->tx_win = opts.txwin_size; + chan->tx_win = (__u8)opts.txwin_size; break; case L2CAP_LM: @@ -508,15 +551,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us if (opt & L2CAP_LM_SECURE) chan->sec_level = BT_SECURITY_HIGH; - if (opt & L2CAP_LM_MASTER) - set_bit(FLAG_ROLE_SWITCH, &chan->flags); - else - clear_bit(FLAG_ROLE_SWITCH, &chan->flags); - - if (opt & L2CAP_LM_RELIABLE) - set_bit(FLAG_FORCE_RELIABLE, &chan->flags); - else - clear_bit(FLAG_FORCE_RELIABLE, &chan->flags); + chan->role_switch = (opt & L2CAP_LM_MASTER); + chan->force_reliable = (opt & L2CAP_LM_RELIABLE); break; default: @@ -572,13 +608,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch chan->sec_level = sec.level; - if (!chan->conn) - break; - conn = chan->conn; - - /*change security for LE channels */ - if (chan->scid == L2CAP_CID_LE_DATA) { + if (conn && chan->scid == L2CAP_CID_LE_DATA) { if (!conn->hcon->out) { err = -EINVAL; break; @@ -586,14 +617,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch if (smp_conn_security(conn, sec.level)) break; - sk->sk_state = BT_CONFIG; - /* or for ACL link, under defer_setup time */ - } else if (sk->sk_state == BT_CONNECT2 && - bt_sk(sk)->defer_setup) { - err = l2cap_chan_check_security(chan); - } else { - err = -EINVAL; + err = 0; + sk->sk_state = BT_CONFIG; } break; @@ -632,10 +658,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch } } - if (opt) - set_bit(FLAG_FLUSHABLE, &chan->flags); - else - clear_bit(FLAG_FLUSHABLE, &chan->flags); + chan->flushable = opt; break; case BT_POWER: @@ -652,36 +675,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch err = -EFAULT; break; } - - if (pwr.force_active) - set_bit(FLAG_FORCE_ACTIVE, &chan->flags); - else - clear_bit(FLAG_FORCE_ACTIVE, &chan->flags); - break; - - case BT_CHANNEL_POLICY: - if (!enable_hs) { - err = -ENOPROTOOPT; - break; - } - - if (get_user(opt, (u32 __user *) optval)) { - err = -EFAULT; - break; - } - - if (opt > BT_CHANNEL_POLICY_AMP_PREFERRED) { - err = -EINVAL; - break; - } - - if (chan->mode != L2CAP_MODE_ERTM && - chan->mode != L2CAP_MODE_STREAMING) { - err = -EOPNOTSUPP; - break; - } - - chan->chan_policy = (u8) opt; + chan->force_active = pwr.force_active; break; default: @@ -715,7 +709,7 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms return -ENOTCONN; } - err = l2cap_chan_send(chan, msg, len, sk->sk_priority); + err = l2cap_chan_send(chan, msg, len); release_sock(sk); return err; @@ -937,9 +931,11 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent) chan->fcs = pchan->fcs; chan->max_tx = pchan->max_tx; chan->tx_win = pchan->tx_win; - chan->tx_win_max = pchan->tx_win_max; chan->sec_level = pchan->sec_level; - chan->flags = pchan->flags; + chan->role_switch = pchan->role_switch; + chan->force_reliable = pchan->force_reliable; + chan->flushable = pchan->flushable; + chan->force_active = pchan->force_active; security_sk_clone(parent, sk); } else { @@ -968,10 +964,12 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent) chan->max_tx = L2CAP_DEFAULT_MAX_TX; chan->fcs = L2CAP_FCS_CRC16; chan->tx_win = L2CAP_DEFAULT_TX_WINDOW; - chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW; chan->sec_level = BT_SECURITY_LOW; - chan->flags = 0; - set_bit(FLAG_FORCE_ACTIVE, &chan->flags); + chan->role_switch = 0; + chan->force_reliable = 0; + chan->flushable = BT_FLUSHABLE_OFF; + chan->force_active = BT_POWER_FORCE_ACTIVE_ON; + } /* Default config options */ diff --git a/trunk/net/bluetooth/mgmt.c b/trunk/net/bluetooth/mgmt.c index 2540944d871f..2c7634296866 100644 --- a/trunk/net/bluetooth/mgmt.c +++ b/trunk/net/bluetooth/mgmt.c @@ -22,7 +22,6 @@ /* Bluetooth HCI Management interface */ -#include #include #include #include @@ -30,103 +29,26 @@ #include #include #include -#include #define MGMT_VERSION 0 #define MGMT_REVISION 1 -#define INQUIRY_LEN_BREDR 0x08 /* TGAP(100) */ - -#define SERVICE_CACHE_TIMEOUT (5 * 1000) - struct pending_cmd { struct list_head list; - u16 opcode; + __u16 opcode; int index; void *param; struct sock *sk; void *user_data; }; -/* HCI to MGMT error code conversion table */ -static u8 mgmt_status_table[] = { - MGMT_STATUS_SUCCESS, - MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */ - MGMT_STATUS_NOT_CONNECTED, /* No Connection */ - MGMT_STATUS_FAILED, /* Hardware Failure */ - MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */ - MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */ - MGMT_STATUS_NOT_PAIRED, /* PIN or Key Missing */ - MGMT_STATUS_NO_RESOURCES, /* Memory Full */ - MGMT_STATUS_TIMEOUT, /* Connection Timeout */ - MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */ - MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */ - MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */ - MGMT_STATUS_BUSY, /* Command Disallowed */ - MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */ - MGMT_STATUS_REJECTED, /* Rejected Security */ - MGMT_STATUS_REJECTED, /* Rejected Personal */ - MGMT_STATUS_TIMEOUT, /* Host Timeout */ - MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */ - MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */ - MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */ - MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */ - MGMT_STATUS_DISCONNECTED, /* OE Power Off */ - MGMT_STATUS_DISCONNECTED, /* Connection Terminated */ - MGMT_STATUS_BUSY, /* Repeated Attempts */ - MGMT_STATUS_REJECTED, /* Pairing Not Allowed */ - MGMT_STATUS_FAILED, /* Unknown LMP PDU */ - MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */ - MGMT_STATUS_REJECTED, /* SCO Offset Rejected */ - MGMT_STATUS_REJECTED, /* SCO Interval Rejected */ - MGMT_STATUS_REJECTED, /* Air Mode Rejected */ - MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */ - MGMT_STATUS_FAILED, /* Unspecified Error */ - MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */ - MGMT_STATUS_FAILED, /* Role Change Not Allowed */ - MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */ - MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */ - MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */ - MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */ - MGMT_STATUS_FAILED, /* Unit Link Key Used */ - MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */ - MGMT_STATUS_TIMEOUT, /* Instant Passed */ - MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */ - MGMT_STATUS_FAILED, /* Transaction Collision */ - MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */ - MGMT_STATUS_REJECTED, /* QoS Rejected */ - MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */ - MGMT_STATUS_REJECTED, /* Insufficient Security */ - MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */ - MGMT_STATUS_BUSY, /* Role Switch Pending */ - MGMT_STATUS_FAILED, /* Slot Violation */ - MGMT_STATUS_FAILED, /* Role Switch Failed */ - MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */ - MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */ - MGMT_STATUS_BUSY, /* Host Busy Pairing */ - MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */ - MGMT_STATUS_BUSY, /* Controller Busy */ - MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */ - MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */ - MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */ - MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */ - MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */ -}; - -static u8 mgmt_status(u8 hci_status) -{ - if (hci_status < ARRAY_SIZE(mgmt_status_table)) - return mgmt_status_table[hci_status]; - - return MGMT_STATUS_FAILED; -} +static LIST_HEAD(cmd_list); static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status) { struct sk_buff *skb; struct mgmt_hdr *hdr; struct mgmt_ev_cmd_status *ev; - int err; BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status); @@ -144,11 +66,10 @@ static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status) ev->status = status; put_unaligned_le16(cmd, &ev->opcode); - err = sock_queue_rcv_skb(sk, skb); - if (err < 0) + if (sock_queue_rcv_skb(sk, skb) < 0) kfree_skb(skb); - return err; + return 0; } static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp, @@ -157,7 +78,6 @@ static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp, struct sk_buff *skb; struct mgmt_hdr *hdr; struct mgmt_ev_cmd_complete *ev; - int err; BT_DBG("sock %p", sk); @@ -177,11 +97,10 @@ static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp, if (rp) memcpy(ev->data, rp, rp_len); - err = sock_queue_rcv_skb(sk, skb); - if (err < 0) + if (sock_queue_rcv_skb(sk, skb) < 0) kfree_skb(skb); - return err;; + return 0; } static int read_version(struct sock *sk) @@ -201,7 +120,6 @@ static int read_index_list(struct sock *sk) { struct mgmt_rp_read_index_list *rp; struct list_head *p; - struct hci_dev *d; size_t rp_len; u16 count; int i, err; @@ -225,9 +143,10 @@ static int read_index_list(struct sock *sk) put_unaligned_le16(count, &rp->num_controllers); i = 0; - list_for_each_entry(d, &hci_dev_list, list) { - if (test_and_clear_bit(HCI_AUTO_OFF, &d->flags)) - cancel_delayed_work(&d->power_off); + list_for_each(p, &hci_dev_list) { + struct hci_dev *d = list_entry(p, struct hci_dev, list); + + hci_del_off_timer(d); if (test_bit(HCI_SETUP, &d->flags)) continue; @@ -246,262 +165,6 @@ static int read_index_list(struct sock *sk) return err; } -static u32 get_supported_settings(struct hci_dev *hdev) -{ - u32 settings = 0; - - settings |= MGMT_SETTING_POWERED; - settings |= MGMT_SETTING_CONNECTABLE; - settings |= MGMT_SETTING_FAST_CONNECTABLE; - settings |= MGMT_SETTING_DISCOVERABLE; - settings |= MGMT_SETTING_PAIRABLE; - - if (hdev->features[6] & LMP_SIMPLE_PAIR) - settings |= MGMT_SETTING_SSP; - - if (!(hdev->features[4] & LMP_NO_BREDR)) { - settings |= MGMT_SETTING_BREDR; - settings |= MGMT_SETTING_LINK_SECURITY; - } - - if (hdev->features[4] & LMP_LE) - settings |= MGMT_SETTING_LE; - - return settings; -} - -static u32 get_current_settings(struct hci_dev *hdev) -{ - u32 settings = 0; - - if (test_bit(HCI_UP, &hdev->flags)) - settings |= MGMT_SETTING_POWERED; - else - return settings; - - if (test_bit(HCI_PSCAN, &hdev->flags)) - settings |= MGMT_SETTING_CONNECTABLE; - - if (test_bit(HCI_ISCAN, &hdev->flags)) - settings |= MGMT_SETTING_DISCOVERABLE; - - if (test_bit(HCI_PAIRABLE, &hdev->flags)) - settings |= MGMT_SETTING_PAIRABLE; - - if (!(hdev->features[4] & LMP_NO_BREDR)) - settings |= MGMT_SETTING_BREDR; - - if (hdev->extfeatures[0] & LMP_HOST_LE) - settings |= MGMT_SETTING_LE; - - if (test_bit(HCI_AUTH, &hdev->flags)) - settings |= MGMT_SETTING_LINK_SECURITY; - - if (hdev->ssp_mode > 0) - settings |= MGMT_SETTING_SSP; - - return settings; -} - -#define EIR_FLAGS 0x01 /* flags */ -#define EIR_UUID16_SOME 0x02 /* 16-bit UUID, more available */ -#define EIR_UUID16_ALL 0x03 /* 16-bit UUID, all listed */ -#define EIR_UUID32_SOME 0x04 /* 32-bit UUID, more available */ -#define EIR_UUID32_ALL 0x05 /* 32-bit UUID, all listed */ -#define EIR_UUID128_SOME 0x06 /* 128-bit UUID, more available */ -#define EIR_UUID128_ALL 0x07 /* 128-bit UUID, all listed */ -#define EIR_NAME_SHORT 0x08 /* shortened local name */ -#define EIR_NAME_COMPLETE 0x09 /* complete local name */ -#define EIR_TX_POWER 0x0A /* transmit power level */ -#define EIR_DEVICE_ID 0x10 /* device ID */ - -#define PNP_INFO_SVCLASS_ID 0x1200 - -static u8 bluetooth_base_uuid[] = { - 0xFB, 0x34, 0x9B, 0x5F, 0x80, 0x00, 0x00, 0x80, - 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -}; - -static u16 get_uuid16(u8 *uuid128) -{ - u32 val; - int i; - - for (i = 0; i < 12; i++) { - if (bluetooth_base_uuid[i] != uuid128[i]) - return 0; - } - - memcpy(&val, &uuid128[12], 4); - - val = le32_to_cpu(val); - if (val > 0xffff) - return 0; - - return (u16) val; -} - -static void create_eir(struct hci_dev *hdev, u8 *data) -{ - u8 *ptr = data; - u16 eir_len = 0; - u16 uuid16_list[HCI_MAX_EIR_LENGTH / sizeof(u16)]; - int i, truncated = 0; - struct bt_uuid *uuid; - size_t name_len; - - name_len = strlen(hdev->dev_name); - - if (name_len > 0) { - /* EIR Data type */ - if (name_len > 48) { - name_len = 48; - ptr[1] = EIR_NAME_SHORT; - } else - ptr[1] = EIR_NAME_COMPLETE; - - /* EIR Data length */ - ptr[0] = name_len + 1; - - memcpy(ptr + 2, hdev->dev_name, name_len); - - eir_len += (name_len + 2); - ptr += (name_len + 2); - } - - memset(uuid16_list, 0, sizeof(uuid16_list)); - - /* Group all UUID16 types */ - list_for_each_entry(uuid, &hdev->uuids, list) { - u16 uuid16; - - uuid16 = get_uuid16(uuid->uuid); - if (uuid16 == 0) - return; - - if (uuid16 < 0x1100) - continue; - - if (uuid16 == PNP_INFO_SVCLASS_ID) - continue; - - /* Stop if not enough space to put next UUID */ - if (eir_len + 2 + sizeof(u16) > HCI_MAX_EIR_LENGTH) { - truncated = 1; - break; - } - - /* Check for duplicates */ - for (i = 0; uuid16_list[i] != 0; i++) - if (uuid16_list[i] == uuid16) - break; - - if (uuid16_list[i] == 0) { - uuid16_list[i] = uuid16; - eir_len += sizeof(u16); - } - } - - if (uuid16_list[0] != 0) { - u8 *length = ptr; - - /* EIR Data type */ - ptr[1] = truncated ? EIR_UUID16_SOME : EIR_UUID16_ALL; - - ptr += 2; - eir_len += 2; - - for (i = 0; uuid16_list[i] != 0; i++) { - *ptr++ = (uuid16_list[i] & 0x00ff); - *ptr++ = (uuid16_list[i] & 0xff00) >> 8; - } - - /* EIR Data length */ - *length = (i * sizeof(u16)) + 1; - } -} - -static int update_eir(struct hci_dev *hdev) -{ - struct hci_cp_write_eir cp; - - if (!(hdev->features[6] & LMP_EXT_INQ)) - return 0; - - if (hdev->ssp_mode == 0) - return 0; - - if (test_bit(HCI_SERVICE_CACHE, &hdev->flags)) - return 0; - - memset(&cp, 0, sizeof(cp)); - - create_eir(hdev, cp.data); - - if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0) - return 0; - - memcpy(hdev->eir, cp.data, sizeof(cp.data)); - - return hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp); -} - -static u8 get_service_classes(struct hci_dev *hdev) -{ - struct bt_uuid *uuid; - u8 val = 0; - - list_for_each_entry(uuid, &hdev->uuids, list) - val |= uuid->svc_hint; - - return val; -} - -static int update_class(struct hci_dev *hdev) -{ - u8 cod[3]; - - BT_DBG("%s", hdev->name); - - if (test_bit(HCI_SERVICE_CACHE, &hdev->flags)) - return 0; - - cod[0] = hdev->minor_class; - cod[1] = hdev->major_class; - cod[2] = get_service_classes(hdev); - - if (memcmp(cod, hdev->dev_class, 3) == 0) - return 0; - - return hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod); -} - -static void service_cache_off(struct work_struct *work) -{ - struct hci_dev *hdev = container_of(work, struct hci_dev, - service_cache.work); - - if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags)) - return; - - hci_dev_lock(hdev); - - update_eir(hdev); - update_class(hdev); - - hci_dev_unlock(hdev); -} - -static void mgmt_init_hdev(struct hci_dev *hdev) -{ - if (!test_and_set_bit(HCI_MGMT, &hdev->flags)) - INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off); - - if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->flags)) - schedule_delayed_work(&hdev->service_cache, - msecs_to_jiffies(SERVICE_CACHE_TIMEOUT)); -} - static int read_controller_info(struct sock *sk, u16 index) { struct mgmt_rp_read_info rp; @@ -511,33 +174,40 @@ static int read_controller_info(struct sock *sk, u16 index) hdev = hci_dev_get(index); if (!hdev) - return cmd_status(sk, index, MGMT_OP_READ_INFO, - MGMT_STATUS_INVALID_PARAMS); + return cmd_status(sk, index, MGMT_OP_READ_INFO, ENODEV); - if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags)) - cancel_delayed_work_sync(&hdev->power_off); + hci_del_off_timer(hdev); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); - if (test_and_clear_bit(HCI_PI_MGMT_INIT, &hci_pi(sk)->flags)) - mgmt_init_hdev(hdev); + set_bit(HCI_MGMT, &hdev->flags); memset(&rp, 0, sizeof(rp)); - bacpy(&rp.bdaddr, &hdev->bdaddr); - - rp.version = hdev->hci_ver; + rp.type = hdev->dev_type; - put_unaligned_le16(hdev->manufacturer, &rp.manufacturer); + rp.powered = test_bit(HCI_UP, &hdev->flags); + rp.connectable = test_bit(HCI_PSCAN, &hdev->flags); + rp.discoverable = test_bit(HCI_ISCAN, &hdev->flags); + rp.pairable = test_bit(HCI_PSCAN, &hdev->flags); - rp.supported_settings = cpu_to_le32(get_supported_settings(hdev)); - rp.current_settings = cpu_to_le32(get_current_settings(hdev)); + if (test_bit(HCI_AUTH, &hdev->flags)) + rp.sec_mode = 3; + else if (hdev->ssp_mode > 0) + rp.sec_mode = 4; + else + rp.sec_mode = 2; + bacpy(&rp.bdaddr, &hdev->bdaddr); + memcpy(rp.features, hdev->features, 8); memcpy(rp.dev_class, hdev->dev_class, 3); + put_unaligned_le16(hdev->manufacturer, &rp.manufacturer); + rp.hci_ver = hdev->hci_ver; + put_unaligned_le16(hdev->hci_rev, &rp.hci_rev); memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name)); - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return cmd_complete(sk, index, MGMT_OP_READ_INFO, &rp, sizeof(rp)); @@ -551,8 +221,7 @@ static void mgmt_pending_free(struct pending_cmd *cmd) } static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, - struct hci_dev *hdev, - void *data, u16 len) + u16 index, void *data, u16 len) { struct pending_cmd *cmd; @@ -561,7 +230,7 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, return NULL; cmd->opcode = opcode; - cmd->index = hdev->id; + cmd->index = index; cmd->param = kmalloc(len, GFP_ATOMIC); if (!cmd->param) { @@ -575,36 +244,48 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, cmd->sk = sk; sock_hold(sk); - list_add(&cmd->list, &hdev->mgmt_pending); + list_add(&cmd->list, &cmd_list); return cmd; } -static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, +static void mgmt_pending_foreach(u16 opcode, int index, void (*cb)(struct pending_cmd *cmd, void *data), void *data) { struct list_head *p, *n; - list_for_each_safe(p, n, &hdev->mgmt_pending) { + list_for_each_safe(p, n, &cmd_list) { struct pending_cmd *cmd; cmd = list_entry(p, struct pending_cmd, list); - if (opcode > 0 && cmd->opcode != opcode) + if (cmd->opcode != opcode) + continue; + + if (index >= 0 && cmd->index != index) continue; cb(cmd, data); } } -static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev) +static struct pending_cmd *mgmt_pending_find(u16 opcode, int index) { - struct pending_cmd *cmd; + struct list_head *p; + + list_for_each(p, &cmd_list) { + struct pending_cmd *cmd; + + cmd = list_entry(p, struct pending_cmd, list); + + if (cmd->opcode != opcode) + continue; + + if (index >= 0 && cmd->index != index) + continue; - list_for_each_entry(cmd, &hdev->mgmt_pending, list) { - if (cmd->opcode == opcode) - return cmd; + return cmd; } return NULL; @@ -616,13 +297,6 @@ static void mgmt_pending_remove(struct pending_cmd *cmd) mgmt_pending_free(cmd); } -static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev) -{ - __le32 settings = cpu_to_le32(get_current_settings(hdev)); - - return cmd_complete(sk, hdev->id, opcode, &settings, sizeof(settings)); -} - static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len) { struct mgmt_mode *cp; @@ -635,43 +309,40 @@ static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len) BT_DBG("request for hci%u", index); if (len != sizeof(*cp)) - return cmd_status(sk, index, MGMT_OP_SET_POWERED, - MGMT_STATUS_INVALID_PARAMS); + return cmd_status(sk, index, MGMT_OP_SET_POWERED, EINVAL); hdev = hci_dev_get(index); if (!hdev) - return cmd_status(sk, index, MGMT_OP_SET_POWERED, - MGMT_STATUS_INVALID_PARAMS); + return cmd_status(sk, index, MGMT_OP_SET_POWERED, ENODEV); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); up = test_bit(HCI_UP, &hdev->flags); if ((cp->val && up) || (!cp->val && !up)) { - err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev); + err = cmd_status(sk, index, MGMT_OP_SET_POWERED, EALREADY); goto failed; } - if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) { - err = cmd_status(sk, index, MGMT_OP_SET_POWERED, - MGMT_STATUS_BUSY); + if (mgmt_pending_find(MGMT_OP_SET_POWERED, index)) { + err = cmd_status(sk, index, MGMT_OP_SET_POWERED, EBUSY); goto failed; } - cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len); + cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, index, data, len); if (!cmd) { err = -ENOMEM; goto failed; } if (cp->val) - schedule_work(&hdev->power_on); + queue_work(hdev->workqueue, &hdev->power_on); else - schedule_work(&hdev->power_off.work); + queue_work(hdev->workqueue, &hdev->power_off); err = 0; failed: - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } @@ -679,7 +350,7 @@ static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len) static int set_discoverable(struct sock *sk, u16 index, unsigned char *data, u16 len) { - struct mgmt_cp_set_discoverable *cp; + struct mgmt_mode *cp; struct hci_dev *hdev; struct pending_cmd *cmd; u8 scan; @@ -690,36 +361,32 @@ static int set_discoverable(struct sock *sk, u16 index, unsigned char *data, BT_DBG("request for hci%u", index); if (len != sizeof(*cp)) - return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, - MGMT_STATUS_INVALID_PARAMS); + return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EINVAL); hdev = hci_dev_get(index); if (!hdev) - return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, - MGMT_STATUS_INVALID_PARAMS); + return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENODEV); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); if (!test_bit(HCI_UP, &hdev->flags)) { - err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, - MGMT_STATUS_NOT_POWERED); + err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENETDOWN); goto failed; } - if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || - mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { - err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, - MGMT_STATUS_BUSY); + if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, index) || + mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, index)) { + err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EBUSY); goto failed; } if (cp->val == test_bit(HCI_ISCAN, &hdev->flags) && test_bit(HCI_PSCAN, &hdev->flags)) { - err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev); + err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EALREADY); goto failed; } - cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len); + cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, index, data, len); if (!cmd) { err = -ENOMEM; goto failed; @@ -729,18 +396,13 @@ static int set_discoverable(struct sock *sk, u16 index, unsigned char *data, if (cp->val) scan |= SCAN_INQUIRY; - else - cancel_delayed_work(&hdev->discov_off); err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); if (err < 0) mgmt_pending_remove(cmd); - if (cp->val) - hdev->discov_timeout = get_unaligned_le16(&cp->timeout); - failed: - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; @@ -760,35 +422,31 @@ static int set_connectable(struct sock *sk, u16 index, unsigned char *data, BT_DBG("request for hci%u", index); if (len != sizeof(*cp)) - return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, - MGMT_STATUS_INVALID_PARAMS); + return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EINVAL); hdev = hci_dev_get(index); if (!hdev) - return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, - MGMT_STATUS_INVALID_PARAMS); + return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENODEV); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); if (!test_bit(HCI_UP, &hdev->flags)) { - err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, - MGMT_STATUS_NOT_POWERED); + err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENETDOWN); goto failed; } - if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || - mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { - err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, - MGMT_STATUS_BUSY); + if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, index) || + mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, index)) { + err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EBUSY); goto failed; } if (cp->val == test_bit(HCI_PSCAN, &hdev->flags)) { - err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev); + err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EALREADY); goto failed; } - cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len); + cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, index, data, len); if (!cmd) { err = -ENOMEM; goto failed; @@ -804,14 +462,14 @@ static int set_connectable(struct sock *sk, u16 index, unsigned char *data, mgmt_pending_remove(cmd); failed: - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } -static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, - u16 data_len, struct sock *skip_sk) +static int mgmt_event(u16 event, u16 index, void *data, u16 data_len, + struct sock *skip_sk) { struct sk_buff *skb; struct mgmt_hdr *hdr; @@ -824,10 +482,7 @@ static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, hdr = (void *) skb_put(skb, sizeof(*hdr)); hdr->opcode = cpu_to_le16(event); - if (hdev) - hdr->index = cpu_to_le16(hdev->id); - else - hdr->index = cpu_to_le16(MGMT_INDEX_NONE); + hdr->index = cpu_to_le16(index); hdr->len = cpu_to_le16(data_len); if (data) @@ -839,12 +494,20 @@ static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, return 0; } +static int send_mode_rsp(struct sock *sk, u16 opcode, u16 index, u8 val) +{ + struct mgmt_mode rp; + + rp.val = val; + + return cmd_complete(sk, index, opcode, &rp, sizeof(rp)); +} + static int set_pairable(struct sock *sk, u16 index, unsigned char *data, u16 len) { - struct mgmt_mode *cp; + struct mgmt_mode *cp, ev; struct hci_dev *hdev; - __le32 ev; int err; cp = (void *) data; @@ -852,36 +515,211 @@ static int set_pairable(struct sock *sk, u16 index, unsigned char *data, BT_DBG("request for hci%u", index); if (len != sizeof(*cp)) - return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, - MGMT_STATUS_INVALID_PARAMS); + return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, EINVAL); hdev = hci_dev_get(index); if (!hdev) - return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, - MGMT_STATUS_INVALID_PARAMS); + return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, ENODEV); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); if (cp->val) set_bit(HCI_PAIRABLE, &hdev->flags); else clear_bit(HCI_PAIRABLE, &hdev->flags); - err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev); + err = send_mode_rsp(sk, MGMT_OP_SET_PAIRABLE, index, cp->val); if (err < 0) goto failed; - ev = cpu_to_le32(get_current_settings(hdev)); + ev.val = cp->val; - err = mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), sk); + err = mgmt_event(MGMT_EV_PAIRABLE, index, &ev, sizeof(ev), sk); failed: - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } +#define EIR_FLAGS 0x01 /* flags */ +#define EIR_UUID16_SOME 0x02 /* 16-bit UUID, more available */ +#define EIR_UUID16_ALL 0x03 /* 16-bit UUID, all listed */ +#define EIR_UUID32_SOME 0x04 /* 32-bit UUID, more available */ +#define EIR_UUID32_ALL 0x05 /* 32-bit UUID, all listed */ +#define EIR_UUID128_SOME 0x06 /* 128-bit UUID, more available */ +#define EIR_UUID128_ALL 0x07 /* 128-bit UUID, all listed */ +#define EIR_NAME_SHORT 0x08 /* shortened local name */ +#define EIR_NAME_COMPLETE 0x09 /* complete local name */ +#define EIR_TX_POWER 0x0A /* transmit power level */ +#define EIR_DEVICE_ID 0x10 /* device ID */ + +#define PNP_INFO_SVCLASS_ID 0x1200 + +static u8 bluetooth_base_uuid[] = { + 0xFB, 0x34, 0x9B, 0x5F, 0x80, 0x00, 0x00, 0x80, + 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static u16 get_uuid16(u8 *uuid128) +{ + u32 val; + int i; + + for (i = 0; i < 12; i++) { + if (bluetooth_base_uuid[i] != uuid128[i]) + return 0; + } + + memcpy(&val, &uuid128[12], 4); + + val = le32_to_cpu(val); + if (val > 0xffff) + return 0; + + return (u16) val; +} + +static void create_eir(struct hci_dev *hdev, u8 *data) +{ + u8 *ptr = data; + u16 eir_len = 0; + u16 uuid16_list[HCI_MAX_EIR_LENGTH / sizeof(u16)]; + int i, truncated = 0; + struct list_head *p; + size_t name_len; + + name_len = strlen(hdev->dev_name); + + if (name_len > 0) { + /* EIR Data type */ + if (name_len > 48) { + name_len = 48; + ptr[1] = EIR_NAME_SHORT; + } else + ptr[1] = EIR_NAME_COMPLETE; + + /* EIR Data length */ + ptr[0] = name_len + 1; + + memcpy(ptr + 2, hdev->dev_name, name_len); + + eir_len += (name_len + 2); + ptr += (name_len + 2); + } + + memset(uuid16_list, 0, sizeof(uuid16_list)); + + /* Group all UUID16 types */ + list_for_each(p, &hdev->uuids) { + struct bt_uuid *uuid = list_entry(p, struct bt_uuid, list); + u16 uuid16; + + uuid16 = get_uuid16(uuid->uuid); + if (uuid16 == 0) + return; + + if (uuid16 < 0x1100) + continue; + + if (uuid16 == PNP_INFO_SVCLASS_ID) + continue; + + /* Stop if not enough space to put next UUID */ + if (eir_len + 2 + sizeof(u16) > HCI_MAX_EIR_LENGTH) { + truncated = 1; + break; + } + + /* Check for duplicates */ + for (i = 0; uuid16_list[i] != 0; i++) + if (uuid16_list[i] == uuid16) + break; + + if (uuid16_list[i] == 0) { + uuid16_list[i] = uuid16; + eir_len += sizeof(u16); + } + } + + if (uuid16_list[0] != 0) { + u8 *length = ptr; + + /* EIR Data type */ + ptr[1] = truncated ? EIR_UUID16_SOME : EIR_UUID16_ALL; + + ptr += 2; + eir_len += 2; + + for (i = 0; uuid16_list[i] != 0; i++) { + *ptr++ = (uuid16_list[i] & 0x00ff); + *ptr++ = (uuid16_list[i] & 0xff00) >> 8; + } + + /* EIR Data length */ + *length = (i * sizeof(u16)) + 1; + } +} + +static int update_eir(struct hci_dev *hdev) +{ + struct hci_cp_write_eir cp; + + if (!(hdev->features[6] & LMP_EXT_INQ)) + return 0; + + if (hdev->ssp_mode == 0) + return 0; + + if (test_bit(HCI_SERVICE_CACHE, &hdev->flags)) + return 0; + + memset(&cp, 0, sizeof(cp)); + + create_eir(hdev, cp.data); + + if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0) + return 0; + + memcpy(hdev->eir, cp.data, sizeof(cp.data)); + + return hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp); +} + +static u8 get_service_classes(struct hci_dev *hdev) +{ + struct list_head *p; + u8 val = 0; + + list_for_each(p, &hdev->uuids) { + struct bt_uuid *uuid = list_entry(p, struct bt_uuid, list); + + val |= uuid->svc_hint; + } + + return val; +} + +static int update_class(struct hci_dev *hdev) +{ + u8 cod[3]; + + BT_DBG("%s", hdev->name); + + if (test_bit(HCI_SERVICE_CACHE, &hdev->flags)) + return 0; + + cod[0] = hdev->minor_class; + cod[1] = hdev->major_class; + cod[2] = get_service_classes(hdev); + + if (memcmp(cod, hdev->dev_class, 3) == 0) + return 0; + + return hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod); +} + static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len) { struct mgmt_cp_add_uuid *cp; @@ -894,15 +732,13 @@ static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len) BT_DBG("request for hci%u", index); if (len != sizeof(*cp)) - return cmd_status(sk, index, MGMT_OP_ADD_UUID, - MGMT_STATUS_INVALID_PARAMS); + return cmd_status(sk, index, MGMT_OP_ADD_UUID, EINVAL); hdev = hci_dev_get(index); if (!hdev) - return cmd_status(sk, index, MGMT_OP_ADD_UUID, - MGMT_STATUS_INVALID_PARAMS); + return cmd_status(sk, index, MGMT_OP_ADD_UUID, ENODEV); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC); if (!uuid) { @@ -926,7 +762,7 @@ static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len) err = cmd_complete(sk, index, MGMT_OP_ADD_UUID, NULL, 0); failed: - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; @@ -945,15 +781,13 @@ static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len) BT_DBG("request for hci%u", index); if (len != sizeof(*cp)) - return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, - MGMT_STATUS_INVALID_PARAMS); + return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, EINVAL); hdev = hci_dev_get(index); if (!hdev) - return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, - MGMT_STATUS_INVALID_PARAMS); + return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENODEV); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) { err = hci_uuids_clear(hdev); @@ -973,8 +807,7 @@ static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len) } if (found == 0) { - err = cmd_status(sk, index, MGMT_OP_REMOVE_UUID, - MGMT_STATUS_INVALID_PARAMS); + err = cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENOENT); goto unlock; } @@ -989,7 +822,7 @@ static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len) err = cmd_complete(sk, index, MGMT_OP_REMOVE_UUID, NULL, 0); unlock: - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; @@ -1007,71 +840,97 @@ static int set_dev_class(struct sock *sk, u16 index, unsigned char *data, BT_DBG("request for hci%u", index); if (len != sizeof(*cp)) - return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, - MGMT_STATUS_INVALID_PARAMS); + return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, EINVAL); hdev = hci_dev_get(index); if (!hdev) - return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, - MGMT_STATUS_INVALID_PARAMS); + return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, ENODEV); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); hdev->major_class = cp->major; hdev->minor_class = cp->minor; - if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags)) { - hci_dev_unlock(hdev); - cancel_delayed_work_sync(&hdev->service_cache); - hci_dev_lock(hdev); - update_eir(hdev); - } - err = update_class(hdev); if (err == 0) err = cmd_complete(sk, index, MGMT_OP_SET_DEV_CLASS, NULL, 0); - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } -static int load_link_keys(struct sock *sk, u16 index, unsigned char *data, - u16 len) +static int set_service_cache(struct sock *sk, u16 index, unsigned char *data, + u16 len) { struct hci_dev *hdev; - struct mgmt_cp_load_link_keys *cp; + struct mgmt_cp_set_service_cache *cp; + int err; + + cp = (void *) data; + + if (len != sizeof(*cp)) + return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, EINVAL); + + hdev = hci_dev_get(index); + if (!hdev) + return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, ENODEV); + + hci_dev_lock_bh(hdev); + + BT_DBG("hci%u enable %d", index, cp->enable); + + if (cp->enable) { + set_bit(HCI_SERVICE_CACHE, &hdev->flags); + err = 0; + } else { + clear_bit(HCI_SERVICE_CACHE, &hdev->flags); + err = update_class(hdev); + if (err == 0) + err = update_eir(hdev); + } + + if (err == 0) + err = cmd_complete(sk, index, MGMT_OP_SET_SERVICE_CACHE, NULL, + 0); + + hci_dev_unlock_bh(hdev); + hci_dev_put(hdev); + + return err; +} + +static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len) +{ + struct hci_dev *hdev; + struct mgmt_cp_load_keys *cp; u16 key_count, expected_len; int i; cp = (void *) data; if (len < sizeof(*cp)) - return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS, - MGMT_STATUS_INVALID_PARAMS); + return -EINVAL; key_count = get_unaligned_le16(&cp->key_count); - expected_len = sizeof(*cp) + key_count * - sizeof(struct mgmt_link_key_info); + expected_len = sizeof(*cp) + key_count * sizeof(struct mgmt_key_info); if (expected_len != len) { - BT_ERR("load_link_keys: expected %u bytes, got %u bytes", + BT_ERR("load_keys: expected %u bytes, got %u bytes", len, expected_len); - return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS, - MGMT_STATUS_INVALID_PARAMS); + return -EINVAL; } hdev = hci_dev_get(index); if (!hdev) - return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS, - MGMT_STATUS_INVALID_PARAMS); + return cmd_status(sk, index, MGMT_OP_LOAD_KEYS, ENODEV); BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys, key_count); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); hci_link_keys_clear(hdev); @@ -1083,84 +942,58 @@ static int load_link_keys(struct sock *sk, u16 index, unsigned char *data, clear_bit(HCI_DEBUG_KEYS, &hdev->flags); for (i = 0; i < key_count; i++) { - struct mgmt_link_key_info *key = &cp->keys[i]; + struct mgmt_key_info *key = &cp->keys[i]; hci_add_link_key(hdev, NULL, 0, &key->bdaddr, key->val, key->type, key->pin_len); } - cmd_complete(sk, index, MGMT_OP_LOAD_LINK_KEYS, NULL, 0); - - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return 0; } -static int remove_keys(struct sock *sk, u16 index, unsigned char *data, - u16 len) +static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len) { struct hci_dev *hdev; - struct mgmt_cp_remove_keys *cp; - struct mgmt_rp_remove_keys rp; - struct hci_cp_disconnect dc; - struct pending_cmd *cmd; + struct mgmt_cp_remove_key *cp; struct hci_conn *conn; int err; cp = (void *) data; if (len != sizeof(*cp)) - return cmd_status(sk, index, MGMT_OP_REMOVE_KEYS, - MGMT_STATUS_INVALID_PARAMS); + return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, EINVAL); hdev = hci_dev_get(index); if (!hdev) - return cmd_status(sk, index, MGMT_OP_REMOVE_KEYS, - MGMT_STATUS_INVALID_PARAMS); + return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, ENODEV); - hci_dev_lock(hdev); - - memset(&rp, 0, sizeof(rp)); - bacpy(&rp.bdaddr, &cp->bdaddr); - rp.status = MGMT_STATUS_FAILED; + hci_dev_lock_bh(hdev); err = hci_remove_link_key(hdev, &cp->bdaddr); if (err < 0) { - rp.status = MGMT_STATUS_NOT_PAIRED; + err = cmd_status(sk, index, MGMT_OP_REMOVE_KEY, -err); goto unlock; } - if (!test_bit(HCI_UP, &hdev->flags) || !cp->disconnect) { - err = cmd_complete(sk, index, MGMT_OP_REMOVE_KEYS, &rp, - sizeof(rp)); + err = 0; + + if (!test_bit(HCI_UP, &hdev->flags) || !cp->disconnect) goto unlock; - } conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); - if (!conn) { - err = cmd_complete(sk, index, MGMT_OP_REMOVE_KEYS, &rp, - sizeof(rp)); - goto unlock; - } + if (conn) { + struct hci_cp_disconnect dc; - cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_KEYS, hdev, cp, sizeof(*cp)); - if (!cmd) { - err = -ENOMEM; - goto unlock; + put_unaligned_le16(conn->handle, &dc.handle); + dc.reason = 0x13; /* Remote User Terminated Connection */ + err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc); } - put_unaligned_le16(conn->handle, &dc.handle); - dc.reason = 0x13; /* Remote User Terminated Connection */ - err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc); - if (err < 0) - mgmt_pending_remove(cmd); - unlock: - if (err < 0) - err = cmd_complete(sk, index, MGMT_OP_REMOVE_KEYS, &rp, - sizeof(rp)); - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; @@ -1180,25 +1013,21 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len) cp = (void *) data; if (len != sizeof(*cp)) - return cmd_status(sk, index, MGMT_OP_DISCONNECT, - MGMT_STATUS_INVALID_PARAMS); + return cmd_status(sk, index, MGMT_OP_DISCONNECT, EINVAL); hdev = hci_dev_get(index); if (!hdev) - return cmd_status(sk, index, MGMT_OP_DISCONNECT, - MGMT_STATUS_INVALID_PARAMS); + return cmd_status(sk, index, MGMT_OP_DISCONNECT, ENODEV); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); if (!test_bit(HCI_UP, &hdev->flags)) { - err = cmd_status(sk, index, MGMT_OP_DISCONNECT, - MGMT_STATUS_NOT_POWERED); + err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENETDOWN); goto failed; } - if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) { - err = cmd_status(sk, index, MGMT_OP_DISCONNECT, - MGMT_STATUS_BUSY); + if (mgmt_pending_find(MGMT_OP_DISCONNECT, index)) { + err = cmd_status(sk, index, MGMT_OP_DISCONNECT, EBUSY); goto failed; } @@ -1207,12 +1036,11 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len) conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr); if (!conn) { - err = cmd_status(sk, index, MGMT_OP_DISCONNECT, - MGMT_STATUS_NOT_CONNECTED); + err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENOTCONN); goto failed; } - cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len); + cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, index, data, len); if (!cmd) { err = -ENOMEM; goto failed; @@ -1226,36 +1054,16 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len) mgmt_pending_remove(cmd); failed: - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } -static u8 link_to_mgmt(u8 link_type, u8 addr_type) -{ - switch (link_type) { - case LE_LINK: - switch (addr_type) { - case ADDR_LE_DEV_PUBLIC: - return MGMT_ADDR_LE_PUBLIC; - case ADDR_LE_DEV_RANDOM: - return MGMT_ADDR_LE_RANDOM; - default: - return MGMT_ADDR_INVALID; - } - case ACL_LINK: - return MGMT_ADDR_BREDR; - default: - return MGMT_ADDR_INVALID; - } -} - static int get_connections(struct sock *sk, u16 index) { struct mgmt_rp_get_connections *rp; struct hci_dev *hdev; - struct hci_conn *c; struct list_head *p; size_t rp_len; u16 count; @@ -1265,17 +1073,16 @@ static int get_connections(struct sock *sk, u16 index) hdev = hci_dev_get(index); if (!hdev) - return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS, - MGMT_STATUS_INVALID_PARAMS); + return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS, ENODEV); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); count = 0; list_for_each(p, &hdev->conn_hash.list) { count++; } - rp_len = sizeof(*rp) + (count * sizeof(struct mgmt_addr_info)); + rp_len = sizeof(*rp) + (count * sizeof(bdaddr_t)); rp = kmalloc(rp_len, GFP_ATOMIC); if (!rp) { err = -ENOMEM; @@ -1285,22 +1092,17 @@ static int get_connections(struct sock *sk, u16 index) put_unaligned_le16(count, &rp->conn_count); i = 0; - list_for_each_entry(c, &hdev->conn_hash.list, list) { - bacpy(&rp->addr[i].bdaddr, &c->dst); - rp->addr[i].type = link_to_mgmt(c->type, c->dst_type); - if (rp->addr[i].type == MGMT_ADDR_INVALID) - continue; - i++; - } + list_for_each(p, &hdev->conn_hash.list) { + struct hci_conn *c = list_entry(p, struct hci_conn, list); - /* Recalculate length in case of filtered SCO connections, etc */ - rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info)); + bacpy(&rp->conn[i++], &c->dst); + } err = cmd_complete(sk, index, MGMT_OP_GET_CONNECTIONS, rp, rp_len); unlock: kfree(rp); - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } @@ -1311,7 +1113,7 @@ static int send_pin_code_neg_reply(struct sock *sk, u16 index, struct pending_cmd *cmd; int err; - cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp, + cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, index, cp, sizeof(*cp)); if (!cmd) return -ENOMEM; @@ -1340,26 +1142,22 @@ static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data, cp = (void *) data; if (len != sizeof(*cp)) - return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, - MGMT_STATUS_INVALID_PARAMS); + return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, EINVAL); hdev = hci_dev_get(index); if (!hdev) - return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, - MGMT_STATUS_INVALID_PARAMS); + return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENODEV); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); if (!test_bit(HCI_UP, &hdev->flags)) { - err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, - MGMT_STATUS_NOT_POWERED); + err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENETDOWN); goto failed; } conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); if (!conn) { - err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, - MGMT_STATUS_NOT_CONNECTED); + err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENOTCONN); goto failed; } @@ -1371,12 +1169,12 @@ static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data, err = send_pin_code_neg_reply(sk, index, hdev, &ncp); if (err >= 0) err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, - MGMT_STATUS_INVALID_PARAMS); + EINVAL); goto failed; } - cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len); + cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, index, data, len); if (!cmd) { err = -ENOMEM; goto failed; @@ -1391,7 +1189,7 @@ static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data, mgmt_pending_remove(cmd); failed: - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; @@ -1410,25 +1208,25 @@ static int pin_code_neg_reply(struct sock *sk, u16 index, unsigned char *data, if (len != sizeof(*cp)) return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, - MGMT_STATUS_INVALID_PARAMS); + EINVAL); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, - MGMT_STATUS_INVALID_PARAMS); + ENODEV); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); if (!test_bit(HCI_UP, &hdev->flags)) { err = cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, - MGMT_STATUS_NOT_POWERED); + ENETDOWN); goto failed; } err = send_pin_code_neg_reply(sk, index, hdev, cp); failed: - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; @@ -1445,22 +1243,20 @@ static int set_io_capability(struct sock *sk, u16 index, unsigned char *data, cp = (void *) data; if (len != sizeof(*cp)) - return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, - MGMT_STATUS_INVALID_PARAMS); + return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, EINVAL); hdev = hci_dev_get(index); if (!hdev) - return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, - MGMT_STATUS_INVALID_PARAMS); + return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, ENODEV); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); hdev->io_capability = cp->io_capability; BT_DBG("%s IO capability set to 0x%02x", hdev->name, hdev->io_capability); - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return cmd_complete(sk, index, MGMT_OP_SET_IO_CAPABILITY, NULL, 0); @@ -1469,12 +1265,19 @@ static int set_io_capability(struct sock *sk, u16 index, unsigned char *data, static inline struct pending_cmd *find_pairing(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; - struct pending_cmd *cmd; + struct list_head *p; + + list_for_each(p, &cmd_list) { + struct pending_cmd *cmd; + + cmd = list_entry(p, struct pending_cmd, list); - list_for_each_entry(cmd, &hdev->mgmt_pending, list) { if (cmd->opcode != MGMT_OP_PAIR_DEVICE) continue; + if (cmd->index != hdev->id) + continue; + if (cmd->user_data != conn) continue; @@ -1489,8 +1292,7 @@ static void pairing_complete(struct pending_cmd *cmd, u8 status) struct mgmt_rp_pair_device rp; struct hci_conn *conn = cmd->user_data; - bacpy(&rp.addr.bdaddr, &conn->dst); - rp.addr.type = link_to_mgmt(conn->type, conn->dst_type); + bacpy(&rp.bdaddr, &conn->dst); rp.status = status; cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, &rp, sizeof(rp)); @@ -1512,18 +1314,20 @@ static void pairing_complete_cb(struct hci_conn *conn, u8 status) BT_DBG("status %u", status); cmd = find_pairing(conn); - if (!cmd) + if (!cmd) { BT_DBG("Unable to find a pending command"); - else - pairing_complete(cmd, status); + return; + } + + pairing_complete(cmd, status); } static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len) { struct hci_dev *hdev; struct mgmt_cp_pair_device *cp; - struct mgmt_rp_pair_device rp; struct pending_cmd *cmd; + struct adv_entry *entry; u8 sec_level, auth_type; struct hci_conn *conn; int err; @@ -1533,15 +1337,13 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len) cp = (void *) data; if (len != sizeof(*cp)) - return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, - MGMT_STATUS_INVALID_PARAMS); + return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, EINVAL); hdev = hci_dev_get(index); if (!hdev) - return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, - MGMT_STATUS_INVALID_PARAMS); + return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, ENODEV); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); sec_level = BT_SECURITY_MEDIUM; if (cp->io_cap == 0x03) @@ -1549,33 +1351,26 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len) else auth_type = HCI_AT_DEDICATED_BONDING_MITM; - if (cp->addr.type == MGMT_ADDR_BREDR) - conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr, sec_level, + entry = hci_find_adv_entry(hdev, &cp->bdaddr); + if (entry) + conn = hci_connect(hdev, LE_LINK, &cp->bdaddr, sec_level, auth_type); else - conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr, sec_level, + conn = hci_connect(hdev, ACL_LINK, &cp->bdaddr, sec_level, auth_type); - memset(&rp, 0, sizeof(rp)); - bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); - rp.addr.type = cp->addr.type; - if (IS_ERR(conn)) { - rp.status = -PTR_ERR(conn); - err = cmd_complete(sk, index, MGMT_OP_PAIR_DEVICE, - &rp, sizeof(rp)); + err = PTR_ERR(conn); goto unlock; } if (conn->connect_cfm_cb) { hci_conn_put(conn); - rp.status = EBUSY; - err = cmd_complete(sk, index, MGMT_OP_PAIR_DEVICE, - &rp, sizeof(rp)); + err = cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, EBUSY); goto unlock; } - cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len); + cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, index, data, len); if (!cmd) { err = -ENOMEM; hci_conn_put(conn); @@ -1583,7 +1378,7 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len) } /* For LE, just connecting isn't a proof that the pairing finished */ - if (cp->addr.type == MGMT_ADDR_BREDR) + if (!entry) conn->connect_cfm_cb = pairing_complete_cb; conn->security_cfm_cb = pairing_complete_cb; @@ -1598,151 +1393,62 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len) err = 0; unlock: - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } -static int user_pairing_resp(struct sock *sk, u16 index, bdaddr_t *bdaddr, - u16 mgmt_op, u16 hci_op, __le32 passkey) +static int user_confirm_reply(struct sock *sk, u16 index, unsigned char *data, + u16 len, int success) { + struct mgmt_cp_user_confirm_reply *cp = (void *) data; + u16 mgmt_op, hci_op; struct pending_cmd *cmd; struct hci_dev *hdev; - struct hci_conn *conn; int err; - hdev = hci_dev_get(index); - if (!hdev) - return cmd_status(sk, index, mgmt_op, - MGMT_STATUS_INVALID_PARAMS); - - hci_dev_lock(hdev); + BT_DBG(""); - if (!test_bit(HCI_UP, &hdev->flags)) { - err = cmd_status(sk, index, mgmt_op, MGMT_STATUS_NOT_POWERED); - goto done; + if (success) { + mgmt_op = MGMT_OP_USER_CONFIRM_REPLY; + hci_op = HCI_OP_USER_CONFIRM_REPLY; + } else { + mgmt_op = MGMT_OP_USER_CONFIRM_NEG_REPLY; + hci_op = HCI_OP_USER_CONFIRM_NEG_REPLY; } - /* - * Check for an existing ACL link, if present pair via - * HCI commands. - * - * If no ACL link is present, check for an LE link and if - * present, pair via the SMP engine. - * - * If neither ACL nor LE links are present, fail with error. - */ - conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr); - if (!conn) { - conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr); - if (!conn) { - err = cmd_status(sk, index, mgmt_op, - MGMT_STATUS_NOT_CONNECTED); - goto done; - } + if (len != sizeof(*cp)) + return cmd_status(sk, index, mgmt_op, EINVAL); - /* Continue with pairing via SMP */ - err = smp_user_confirm_reply(conn, mgmt_op, passkey); + hdev = hci_dev_get(index); + if (!hdev) + return cmd_status(sk, index, mgmt_op, ENODEV); - if (!err) - err = cmd_status(sk, index, mgmt_op, - MGMT_STATUS_SUCCESS); - else - err = cmd_status(sk, index, mgmt_op, - MGMT_STATUS_FAILED); + hci_dev_lock_bh(hdev); - goto done; + if (!test_bit(HCI_UP, &hdev->flags)) { + err = cmd_status(sk, index, mgmt_op, ENETDOWN); + goto failed; } - cmd = mgmt_pending_add(sk, mgmt_op, hdev, bdaddr, sizeof(*bdaddr)); + cmd = mgmt_pending_add(sk, mgmt_op, index, data, len); if (!cmd) { err = -ENOMEM; - goto done; + goto failed; } - /* Continue with pairing via HCI */ - if (hci_op == HCI_OP_USER_PASSKEY_REPLY) { - struct hci_cp_user_passkey_reply cp; - - bacpy(&cp.bdaddr, bdaddr); - cp.passkey = passkey; - err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp); - } else - err = hci_send_cmd(hdev, hci_op, sizeof(*bdaddr), bdaddr); - + err = hci_send_cmd(hdev, hci_op, sizeof(cp->bdaddr), &cp->bdaddr); if (err < 0) mgmt_pending_remove(cmd); -done: - hci_dev_unlock(hdev); +failed: + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } -static int user_confirm_reply(struct sock *sk, u16 index, void *data, u16 len) -{ - struct mgmt_cp_user_confirm_reply *cp = (void *) data; - - BT_DBG(""); - - if (len != sizeof(*cp)) - return cmd_status(sk, index, MGMT_OP_USER_CONFIRM_REPLY, - MGMT_STATUS_INVALID_PARAMS); - - return user_pairing_resp(sk, index, &cp->bdaddr, - MGMT_OP_USER_CONFIRM_REPLY, - HCI_OP_USER_CONFIRM_REPLY, 0); -} - -static int user_confirm_neg_reply(struct sock *sk, u16 index, void *data, - u16 len) -{ - struct mgmt_cp_user_confirm_neg_reply *cp = data; - - BT_DBG(""); - - if (len != sizeof(*cp)) - return cmd_status(sk, index, MGMT_OP_USER_CONFIRM_NEG_REPLY, - MGMT_STATUS_INVALID_PARAMS); - - return user_pairing_resp(sk, index, &cp->bdaddr, - MGMT_OP_USER_CONFIRM_NEG_REPLY, - HCI_OP_USER_CONFIRM_NEG_REPLY, 0); -} - -static int user_passkey_reply(struct sock *sk, u16 index, void *data, u16 len) -{ - struct mgmt_cp_user_passkey_reply *cp = (void *) data; - - BT_DBG(""); - - if (len != sizeof(*cp)) - return cmd_status(sk, index, MGMT_OP_USER_PASSKEY_REPLY, - EINVAL); - - return user_pairing_resp(sk, index, &cp->bdaddr, - MGMT_OP_USER_PASSKEY_REPLY, - HCI_OP_USER_PASSKEY_REPLY, cp->passkey); -} - -static int user_passkey_neg_reply(struct sock *sk, u16 index, void *data, - u16 len) -{ - struct mgmt_cp_user_passkey_neg_reply *cp = (void *) data; - - BT_DBG(""); - - if (len != sizeof(*cp)) - return cmd_status(sk, index, MGMT_OP_USER_PASSKEY_NEG_REPLY, - EINVAL); - - return user_pairing_resp(sk, index, &cp->bdaddr, - MGMT_OP_USER_PASSKEY_NEG_REPLY, - HCI_OP_USER_PASSKEY_NEG_REPLY, 0); -} - static int set_local_name(struct sock *sk, u16 index, unsigned char *data, u16 len) { @@ -1755,17 +1461,15 @@ static int set_local_name(struct sock *sk, u16 index, unsigned char *data, BT_DBG(""); if (len != sizeof(*mgmt_cp)) - return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, - MGMT_STATUS_INVALID_PARAMS); + return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, EINVAL); hdev = hci_dev_get(index); if (!hdev) - return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, - MGMT_STATUS_INVALID_PARAMS); + return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, ENODEV); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); - cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len); + cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, index, data, len); if (!cmd) { err = -ENOMEM; goto failed; @@ -1778,7 +1482,7 @@ static int set_local_name(struct sock *sk, u16 index, unsigned char *data, mgmt_pending_remove(cmd); failed: - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; @@ -1795,29 +1499,28 @@ static int read_local_oob_data(struct sock *sk, u16 index) hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, - MGMT_STATUS_INVALID_PARAMS); + ENODEV); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); if (!test_bit(HCI_UP, &hdev->flags)) { err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, - MGMT_STATUS_NOT_POWERED); + ENETDOWN); goto unlock; } if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) { err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, - MGMT_STATUS_NOT_SUPPORTED); + EOPNOTSUPP); goto unlock; } - if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) { - err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, - MGMT_STATUS_BUSY); + if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, index)) { + err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, EBUSY); goto unlock; } - cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0); + cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, index, NULL, 0); if (!cmd) { err = -ENOMEM; goto unlock; @@ -1828,7 +1531,7 @@ static int read_local_oob_data(struct sock *sk, u16 index) mgmt_pending_remove(cmd); unlock: - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; @@ -1845,25 +1548,24 @@ static int add_remote_oob_data(struct sock *sk, u16 index, unsigned char *data, if (len != sizeof(*cp)) return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, - MGMT_STATUS_INVALID_PARAMS); + EINVAL); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, - MGMT_STATUS_INVALID_PARAMS); + ENODEV); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); err = hci_add_remote_oob_data(hdev, &cp->bdaddr, cp->hash, cp->randomizer); if (err < 0) - err = cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, - MGMT_STATUS_FAILED); + err = cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, -err); else err = cmd_complete(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, NULL, 0); - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; @@ -1880,68 +1582,62 @@ static int remove_remote_oob_data(struct sock *sk, u16 index, if (len != sizeof(*cp)) return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA, - MGMT_STATUS_INVALID_PARAMS); + EINVAL); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA, - MGMT_STATUS_INVALID_PARAMS); + ENODEV); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); err = hci_remove_remote_oob_data(hdev, &cp->bdaddr); if (err < 0) err = cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA, - MGMT_STATUS_INVALID_PARAMS); + -err); else err = cmd_complete(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA, NULL, 0); - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } -static int start_discovery(struct sock *sk, u16 index, - unsigned char *data, u16 len) +static int start_discovery(struct sock *sk, u16 index) { - struct mgmt_cp_start_discovery *cp = (void *) data; + u8 lap[3] = { 0x33, 0x8b, 0x9e }; + struct hci_cp_inquiry cp; struct pending_cmd *cmd; struct hci_dev *hdev; int err; BT_DBG("hci%u", index); - if (len != sizeof(*cp)) - return cmd_status(sk, index, MGMT_OP_START_DISCOVERY, - MGMT_STATUS_INVALID_PARAMS); - hdev = hci_dev_get(index); if (!hdev) - return cmd_status(sk, index, MGMT_OP_START_DISCOVERY, - MGMT_STATUS_INVALID_PARAMS); - - hci_dev_lock(hdev); + return cmd_status(sk, index, MGMT_OP_START_DISCOVERY, ENODEV); - if (!test_bit(HCI_UP, &hdev->flags)) { - err = cmd_status(sk, index, MGMT_OP_START_DISCOVERY, - MGMT_STATUS_NOT_POWERED); - goto failed; - } + hci_dev_lock_bh(hdev); - cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0); + cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, index, NULL, 0); if (!cmd) { err = -ENOMEM; goto failed; } - err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR); + memset(&cp, 0, sizeof(cp)); + memcpy(&cp.lap, lap, 3); + cp.length = 0x08; + cp.num_rsp = 0x00; + + err = hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp); if (err < 0) mgmt_pending_remove(cmd); failed: - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; @@ -1957,23 +1653,22 @@ static int stop_discovery(struct sock *sk, u16 index) hdev = hci_dev_get(index); if (!hdev) - return cmd_status(sk, index, MGMT_OP_STOP_DISCOVERY, - MGMT_STATUS_INVALID_PARAMS); + return cmd_status(sk, index, MGMT_OP_STOP_DISCOVERY, ENODEV); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); - cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0); + cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, index, NULL, 0); if (!cmd) { err = -ENOMEM; goto failed; } - err = hci_cancel_inquiry(hdev); + err = hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL); if (err < 0) mgmt_pending_remove(cmd); failed: - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; @@ -1983,6 +1678,7 @@ static int block_device(struct sock *sk, u16 index, unsigned char *data, u16 len) { struct hci_dev *hdev; + struct pending_cmd *cmd; struct mgmt_cp_block_device *cp = (void *) data; int err; @@ -1990,24 +1686,33 @@ static int block_device(struct sock *sk, u16 index, unsigned char *data, if (len != sizeof(*cp)) return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE, - MGMT_STATUS_INVALID_PARAMS); + EINVAL); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE, - MGMT_STATUS_INVALID_PARAMS); + ENODEV); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); + + cmd = mgmt_pending_add(sk, MGMT_OP_BLOCK_DEVICE, index, NULL, 0); + if (!cmd) { + err = -ENOMEM; + goto failed; + } err = hci_blacklist_add(hdev, &cp->bdaddr); + if (err < 0) - err = cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE, - MGMT_STATUS_FAILED); + err = cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE, -err); else err = cmd_complete(sk, index, MGMT_OP_BLOCK_DEVICE, NULL, 0); - hci_dev_unlock(hdev); + mgmt_pending_remove(cmd); + +failed: + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; @@ -2017,6 +1722,7 @@ static int unblock_device(struct sock *sk, u16 index, unsigned char *data, u16 len) { struct hci_dev *hdev; + struct pending_cmd *cmd; struct mgmt_cp_unblock_device *cp = (void *) data; int err; @@ -2024,25 +1730,33 @@ static int unblock_device(struct sock *sk, u16 index, unsigned char *data, if (len != sizeof(*cp)) return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE, - MGMT_STATUS_INVALID_PARAMS); + EINVAL); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE, - MGMT_STATUS_INVALID_PARAMS); + ENODEV); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); + + cmd = mgmt_pending_add(sk, MGMT_OP_UNBLOCK_DEVICE, index, NULL, 0); + if (!cmd) { + err = -ENOMEM; + goto failed; + } err = hci_blacklist_del(hdev, &cp->bdaddr); if (err < 0) - err = cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE, - MGMT_STATUS_INVALID_PARAMS); + err = cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE, -err); else err = cmd_complete(sk, index, MGMT_OP_UNBLOCK_DEVICE, NULL, 0); - hci_dev_unlock(hdev); + mgmt_pending_remove(cmd); + +failed: + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; @@ -2052,7 +1766,7 @@ static int set_fast_connectable(struct sock *sk, u16 index, unsigned char *data, u16 len) { struct hci_dev *hdev; - struct mgmt_mode *cp = (void *) data; + struct mgmt_cp_set_fast_connectable *cp = (void *) data; struct hci_cp_write_page_scan_activity acp; u8 type; int err; @@ -2061,16 +1775,16 @@ static int set_fast_connectable(struct sock *sk, u16 index, if (len != sizeof(*cp)) return cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE, - MGMT_STATUS_INVALID_PARAMS); + EINVAL); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE, - MGMT_STATUS_INVALID_PARAMS); + ENODEV); hci_dev_lock(hdev); - if (cp->val) { + if (cp->enable) { type = PAGE_SCAN_TYPE_INTERLACED; acp.interval = 0x0024; /* 22.5 msec page scan interval */ } else { @@ -2084,14 +1798,14 @@ static int set_fast_connectable(struct sock *sk, u16 index, sizeof(acp), &acp); if (err < 0) { err = cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE, - MGMT_STATUS_FAILED); + -err); goto done; } err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type); if (err < 0) { err = cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE, - MGMT_STATUS_FAILED); + -err); goto done; } @@ -2154,10 +1868,6 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) case MGMT_OP_SET_CONNECTABLE: err = set_connectable(sk, index, buf + sizeof(*hdr), len); break; - case MGMT_OP_SET_FAST_CONNECTABLE: - err = set_fast_connectable(sk, index, buf + sizeof(*hdr), - len); - break; case MGMT_OP_SET_PAIRABLE: err = set_pairable(sk, index, buf + sizeof(*hdr), len); break; @@ -2170,11 +1880,14 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) case MGMT_OP_SET_DEV_CLASS: err = set_dev_class(sk, index, buf + sizeof(*hdr), len); break; - case MGMT_OP_LOAD_LINK_KEYS: - err = load_link_keys(sk, index, buf + sizeof(*hdr), len); + case MGMT_OP_SET_SERVICE_CACHE: + err = set_service_cache(sk, index, buf + sizeof(*hdr), len); + break; + case MGMT_OP_LOAD_KEYS: + err = load_keys(sk, index, buf + sizeof(*hdr), len); break; - case MGMT_OP_REMOVE_KEYS: - err = remove_keys(sk, index, buf + sizeof(*hdr), len); + case MGMT_OP_REMOVE_KEY: + err = remove_key(sk, index, buf + sizeof(*hdr), len); break; case MGMT_OP_DISCONNECT: err = disconnect(sk, index, buf + sizeof(*hdr), len); @@ -2195,18 +1908,10 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) err = pair_device(sk, index, buf + sizeof(*hdr), len); break; case MGMT_OP_USER_CONFIRM_REPLY: - err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len); + err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len, 1); break; case MGMT_OP_USER_CONFIRM_NEG_REPLY: - err = user_confirm_neg_reply(sk, index, buf + sizeof(*hdr), - len); - break; - case MGMT_OP_USER_PASSKEY_REPLY: - err = user_passkey_reply(sk, index, buf + sizeof(*hdr), len); - break; - case MGMT_OP_USER_PASSKEY_NEG_REPLY: - err = user_passkey_neg_reply(sk, index, buf + sizeof(*hdr), - len); + err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len, 0); break; case MGMT_OP_SET_LOCAL_NAME: err = set_local_name(sk, index, buf + sizeof(*hdr), len); @@ -2222,7 +1927,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) len); break; case MGMT_OP_START_DISCOVERY: - err = start_discovery(sk, index, buf + sizeof(*hdr), len); + err = start_discovery(sk, index); break; case MGMT_OP_STOP_DISCOVERY: err = stop_discovery(sk, index); @@ -2233,10 +1938,13 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) case MGMT_OP_UNBLOCK_DEVICE: err = unblock_device(sk, index, buf + sizeof(*hdr), len); break; + case MGMT_OP_SET_FAST_CONNECTABLE: + err = set_fast_connectable(sk, index, buf + sizeof(*hdr), + len); + break; default: BT_DBG("Unknown op %u", opcode); - err = cmd_status(sk, index, opcode, - MGMT_STATUS_UNKNOWN_COMMAND); + err = cmd_status(sk, index, opcode, 0x01); break; } @@ -2250,39 +1958,30 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) return err; } -static void cmd_status_rsp(struct pending_cmd *cmd, void *data) -{ - u8 *status = data; - - cmd_status(cmd->sk, cmd->index, cmd->opcode, *status); - mgmt_pending_remove(cmd); -} - -int mgmt_index_added(struct hci_dev *hdev) +int mgmt_index_added(u16 index) { - return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL); + return mgmt_event(MGMT_EV_INDEX_ADDED, index, NULL, 0, NULL); } -int mgmt_index_removed(struct hci_dev *hdev) +int mgmt_index_removed(u16 index) { - u8 status = ENODEV; - - mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status); - - return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL); + return mgmt_event(MGMT_EV_INDEX_REMOVED, index, NULL, 0, NULL); } struct cmd_lookup { u8 val; struct sock *sk; - struct hci_dev *hdev; }; -static void settings_rsp(struct pending_cmd *cmd, void *data) +static void mode_rsp(struct pending_cmd *cmd, void *data) { + struct mgmt_mode *cp = cmd->param; struct cmd_lookup *match = data; - send_settings_rsp(cmd->sk, cmd->opcode, match->hdev); + if (cp->val != match->val) + return; + + send_mode_rsp(cmd->sk, cmd->opcode, cmd->index, cp->val); list_del(&cmd->list); @@ -2294,23 +1993,17 @@ static void settings_rsp(struct pending_cmd *cmd, void *data) mgmt_pending_free(cmd); } -int mgmt_powered(struct hci_dev *hdev, u8 powered) +int mgmt_powered(u16 index, u8 powered) { - struct cmd_lookup match = { powered, NULL, hdev }; - __le32 ev; + struct mgmt_mode ev; + struct cmd_lookup match = { powered, NULL }; int ret; - mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match); - - if (!powered) { - u8 status = ENETDOWN; - mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status); - } + mgmt_pending_foreach(MGMT_OP_SET_POWERED, index, mode_rsp, &match); - ev = cpu_to_le32(get_current_settings(hdev)); + ev.val = powered; - ret = mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), - match.sk); + ret = mgmt_event(MGMT_EV_POWERED, index, &ev, sizeof(ev), match.sk); if (match.sk) sock_put(match.sk); @@ -2318,36 +2011,36 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered) return ret; } -int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable) +int mgmt_discoverable(u16 index, u8 discoverable) { - struct cmd_lookup match = { discoverable, NULL, hdev }; - __le32 ev; + struct mgmt_mode ev; + struct cmd_lookup match = { discoverable, NULL }; int ret; - mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp, &match); + mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, index, mode_rsp, &match); - ev = cpu_to_le32(get_current_settings(hdev)); + ev.val = discoverable; - ret = mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), + ret = mgmt_event(MGMT_EV_DISCOVERABLE, index, &ev, sizeof(ev), match.sk); + if (match.sk) sock_put(match.sk); return ret; } -int mgmt_connectable(struct hci_dev *hdev, u8 connectable) +int mgmt_connectable(u16 index, u8 connectable) { - __le32 ev; - struct cmd_lookup match = { connectable, NULL, hdev }; + struct mgmt_mode ev; + struct cmd_lookup match = { connectable, NULL }; int ret; - mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev, settings_rsp, - &match); + mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, index, mode_rsp, &match); - ev = cpu_to_le32(get_current_settings(hdev)); + ev.val = connectable; - ret = mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), match.sk); + ret = mgmt_event(MGMT_EV_CONNECTABLE, index, &ev, sizeof(ev), match.sk); if (match.sk) sock_put(match.sk); @@ -2355,25 +2048,9 @@ int mgmt_connectable(struct hci_dev *hdev, u8 connectable) return ret; } -int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status) -{ - u8 mgmt_err = mgmt_status(status); - - if (scan & SCAN_PAGE) - mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev, - cmd_status_rsp, &mgmt_err); - - if (scan & SCAN_INQUIRY) - mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, - cmd_status_rsp, &mgmt_err); - - return 0; -} - -int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, - u8 persistent) +int mgmt_new_key(u16 index, struct link_key *key, u8 persistent) { - struct mgmt_ev_new_link_key ev; + struct mgmt_ev_new_key ev; memset(&ev, 0, sizeof(ev)); @@ -2383,18 +2060,17 @@ int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, memcpy(ev.key.val, key->val, 16); ev.key.pin_len = key->pin_len; - return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL); + return mgmt_event(MGMT_EV_NEW_KEY, index, &ev, sizeof(ev), NULL); } -int mgmt_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, - u8 addr_type) +int mgmt_connected(u16 index, bdaddr_t *bdaddr, u8 link_type) { - struct mgmt_addr_info ev; + struct mgmt_ev_connected ev; bacpy(&ev.bdaddr, bdaddr); - ev.type = link_to_mgmt(link_type, addr_type); + ev.link_type = link_type; - return mgmt_event(MGMT_EV_CONNECTED, hdev, &ev, sizeof(ev), NULL); + return mgmt_event(MGMT_EV_CONNECTED, index, &ev, sizeof(ev), NULL); } static void disconnect_rsp(struct pending_cmd *cmd, void *data) @@ -2404,7 +2080,6 @@ static void disconnect_rsp(struct pending_cmd *cmd, void *data) struct mgmt_rp_disconnect rp; bacpy(&rp.bdaddr, &cp->bdaddr); - rp.status = 0; cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, &rp, sizeof(rp)); @@ -2414,110 +2089,75 @@ static void disconnect_rsp(struct pending_cmd *cmd, void *data) mgmt_pending_remove(cmd); } -static void remove_keys_rsp(struct pending_cmd *cmd, void *data) -{ - u8 *status = data; - struct mgmt_cp_remove_keys *cp = cmd->param; - struct mgmt_rp_remove_keys rp; - - memset(&rp, 0, sizeof(rp)); - bacpy(&rp.bdaddr, &cp->bdaddr); - if (status != NULL) - rp.status = *status; - - cmd_complete(cmd->sk, cmd->index, MGMT_OP_REMOVE_KEYS, &rp, - sizeof(rp)); - - mgmt_pending_remove(cmd); -} - -int mgmt_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, - u8 addr_type) +int mgmt_disconnected(u16 index, bdaddr_t *bdaddr) { - struct mgmt_addr_info ev; + struct mgmt_ev_disconnected ev; struct sock *sk = NULL; int err; - mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk); + mgmt_pending_foreach(MGMT_OP_DISCONNECT, index, disconnect_rsp, &sk); bacpy(&ev.bdaddr, bdaddr); - ev.type = link_to_mgmt(link_type, addr_type); - err = mgmt_event(MGMT_EV_DISCONNECTED, hdev, &ev, sizeof(ev), sk); + err = mgmt_event(MGMT_EV_DISCONNECTED, index, &ev, sizeof(ev), sk); if (sk) sock_put(sk); - mgmt_pending_foreach(MGMT_OP_REMOVE_KEYS, hdev, remove_keys_rsp, NULL); - return err; } -int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status) +int mgmt_disconnect_failed(u16 index) { struct pending_cmd *cmd; - u8 mgmt_err = mgmt_status(status); int err; - cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev); + cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, index); if (!cmd) return -ENOENT; - if (bdaddr) { - struct mgmt_rp_disconnect rp; - - bacpy(&rp.bdaddr, bdaddr); - rp.status = status; - - err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, - &rp, sizeof(rp)); - } else - err = cmd_status(cmd->sk, hdev->id, MGMT_OP_DISCONNECT, - mgmt_err); + err = cmd_status(cmd->sk, index, MGMT_OP_DISCONNECT, EIO); mgmt_pending_remove(cmd); return err; } -int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, - u8 addr_type, u8 status) +int mgmt_connect_failed(u16 index, bdaddr_t *bdaddr, u8 status) { struct mgmt_ev_connect_failed ev; - bacpy(&ev.addr.bdaddr, bdaddr); - ev.addr.type = link_to_mgmt(link_type, addr_type); - ev.status = mgmt_status(status); + bacpy(&ev.bdaddr, bdaddr); + ev.status = status; - return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL); + return mgmt_event(MGMT_EV_CONNECT_FAILED, index, &ev, sizeof(ev), NULL); } -int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure) +int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr, u8 secure) { struct mgmt_ev_pin_code_request ev; bacpy(&ev.bdaddr, bdaddr); ev.secure = secure; - return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), + return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, index, &ev, sizeof(ev), NULL); } -int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, - u8 status) +int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status) { struct pending_cmd *cmd; struct mgmt_rp_pin_code_reply rp; int err; - cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev); + cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, index); if (!cmd) return -ENOENT; bacpy(&rp.bdaddr, bdaddr); - rp.status = mgmt_status(status); + rp.status = status; - err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, &rp, + err = cmd_complete(cmd->sk, index, MGMT_OP_PIN_CODE_REPLY, &rp, sizeof(rp)); mgmt_pending_remove(cmd); @@ -2525,21 +2165,20 @@ int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, return err; } -int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, - u8 status) +int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status) { struct pending_cmd *cmd; struct mgmt_rp_pin_code_reply rp; int err; - cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev); + cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, index); if (!cmd) return -ENOENT; bacpy(&rp.bdaddr, bdaddr); - rp.status = mgmt_status(status); + rp.status = status; - err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY, &rp, + err = cmd_complete(cmd->sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, &rp, sizeof(rp)); mgmt_pending_remove(cmd); @@ -2547,119 +2186,97 @@ int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, return err; } -int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr, - __le32 value, u8 confirm_hint) +int mgmt_user_confirm_request(u16 index, bdaddr_t *bdaddr, __le32 value, + u8 confirm_hint) { struct mgmt_ev_user_confirm_request ev; - BT_DBG("%s", hdev->name); + BT_DBG("hci%u", index); bacpy(&ev.bdaddr, bdaddr); ev.confirm_hint = confirm_hint; put_unaligned_le32(value, &ev.value); - return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev), - NULL); -} - -int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr) -{ - struct mgmt_ev_user_passkey_request ev; - - BT_DBG("%s", hdev->name); - - bacpy(&ev.bdaddr, bdaddr); - - return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev), + return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, index, &ev, sizeof(ev), NULL); } -static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, - u8 status, u8 opcode) +static int confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status, + u8 opcode) { struct pending_cmd *cmd; struct mgmt_rp_user_confirm_reply rp; int err; - cmd = mgmt_pending_find(opcode, hdev); + cmd = mgmt_pending_find(opcode, index); if (!cmd) return -ENOENT; bacpy(&rp.bdaddr, bdaddr); - rp.status = mgmt_status(status); - err = cmd_complete(cmd->sk, hdev->id, opcode, &rp, sizeof(rp)); + rp.status = status; + err = cmd_complete(cmd->sk, index, opcode, &rp, sizeof(rp)); mgmt_pending_remove(cmd); return err; } -int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, - u8 status) +int mgmt_user_confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status) { - return user_pairing_resp_complete(hdev, bdaddr, status, + return confirm_reply_complete(index, bdaddr, status, MGMT_OP_USER_CONFIRM_REPLY); } -int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, - bdaddr_t *bdaddr, u8 status) +int mgmt_user_confirm_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status) { - return user_pairing_resp_complete(hdev, bdaddr, status, + return confirm_reply_complete(index, bdaddr, status, MGMT_OP_USER_CONFIRM_NEG_REPLY); } -int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, - u8 status) -{ - return user_pairing_resp_complete(hdev, bdaddr, status, - MGMT_OP_USER_PASSKEY_REPLY); -} - -int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, - bdaddr_t *bdaddr, u8 status) -{ - return user_pairing_resp_complete(hdev, bdaddr, status, - MGMT_OP_USER_PASSKEY_NEG_REPLY); -} - -int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status) +int mgmt_auth_failed(u16 index, bdaddr_t *bdaddr, u8 status) { struct mgmt_ev_auth_failed ev; bacpy(&ev.bdaddr, bdaddr); - ev.status = mgmt_status(status); + ev.status = status; - return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL); + return mgmt_event(MGMT_EV_AUTH_FAILED, index, &ev, sizeof(ev), NULL); } -int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status) +int mgmt_set_local_name_complete(u16 index, u8 *name, u8 status) { struct pending_cmd *cmd; + struct hci_dev *hdev; struct mgmt_cp_set_local_name ev; int err; memset(&ev, 0, sizeof(ev)); memcpy(ev.name, name, HCI_MAX_NAME_LENGTH); - cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev); + cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, index); if (!cmd) goto send_event; if (status) { - err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, - mgmt_status(status)); + err = cmd_status(cmd->sk, index, MGMT_OP_SET_LOCAL_NAME, EIO); goto failed; } - update_eir(hdev); + hdev = hci_dev_get(index); + if (hdev) { + hci_dev_lock_bh(hdev); + update_eir(hdev); + hci_dev_unlock_bh(hdev); + hci_dev_put(hdev); + } - err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, &ev, + err = cmd_complete(cmd->sk, index, MGMT_OP_SET_LOCAL_NAME, &ev, sizeof(ev)); if (err < 0) goto failed; send_event: - err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev), + err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, index, &ev, sizeof(ev), cmd ? cmd->sk : NULL); failed: @@ -2668,31 +2285,29 @@ int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status) return err; } -int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash, - u8 *randomizer, u8 status) +int mgmt_read_local_oob_data_reply_complete(u16 index, u8 *hash, u8 *randomizer, + u8 status) { struct pending_cmd *cmd; int err; - BT_DBG("%s status %u", hdev->name, status); + BT_DBG("hci%u status %u", index, status); - cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev); + cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, index); if (!cmd) return -ENOENT; if (status) { - err = cmd_status(cmd->sk, hdev->id, - MGMT_OP_READ_LOCAL_OOB_DATA, - mgmt_status(status)); + err = cmd_status(cmd->sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, + EIO); } else { struct mgmt_rp_read_local_oob_data rp; memcpy(rp.hash, hash, sizeof(rp.hash)); memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer)); - err = cmd_complete(cmd->sk, hdev->id, - MGMT_OP_READ_LOCAL_OOB_DATA, - &rp, sizeof(rp)); + err = cmd_complete(cmd->sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, + &rp, sizeof(rp)); } mgmt_pending_remove(cmd); @@ -2700,15 +2315,14 @@ int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash, return err; } -int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, - u8 addr_type, u8 *dev_class, s8 rssi, u8 *eir) +int mgmt_device_found(u16 index, bdaddr_t *bdaddr, u8 *dev_class, s8 rssi, + u8 *eir) { struct mgmt_ev_device_found ev; memset(&ev, 0, sizeof(ev)); - bacpy(&ev.addr.bdaddr, bdaddr); - ev.addr.type = link_to_mgmt(link_type, addr_type); + bacpy(&ev.bdaddr, bdaddr); ev.rssi = rssi; if (eir) @@ -2717,10 +2331,10 @@ int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, if (dev_class) memcpy(ev.dev_class, dev_class, sizeof(ev.dev_class)); - return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, &ev, sizeof(ev), NULL); + return mgmt_event(MGMT_EV_DEVICE_FOUND, index, &ev, sizeof(ev), NULL); } -int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name) +int mgmt_remote_name(u16 index, bdaddr_t *bdaddr, u8 *name) { struct mgmt_ev_remote_name ev; @@ -2729,79 +2343,37 @@ int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name) bacpy(&ev.bdaddr, bdaddr); memcpy(ev.name, name, HCI_MAX_NAME_LENGTH); - return mgmt_event(MGMT_EV_REMOTE_NAME, hdev, &ev, sizeof(ev), NULL); -} - -int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status) -{ - struct pending_cmd *cmd; - int err; - - cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev); - if (!cmd) - return -ENOENT; - - err = cmd_status(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status)); - mgmt_pending_remove(cmd); - - return err; -} - -int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status) -{ - struct pending_cmd *cmd; - int err; - - cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev); - if (!cmd) - return -ENOENT; - - err = cmd_status(cmd->sk, hdev->id, cmd->opcode, status); - mgmt_pending_remove(cmd); - - return err; + return mgmt_event(MGMT_EV_REMOTE_NAME, index, &ev, sizeof(ev), NULL); } -int mgmt_discovering(struct hci_dev *hdev, u8 discovering) +int mgmt_discovering(u16 index, u8 discovering) { - struct pending_cmd *cmd; - - if (discovering) - cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev); - else - cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev); - - if (cmd != NULL) { - cmd_complete(cmd->sk, hdev->id, cmd->opcode, NULL, 0); - mgmt_pending_remove(cmd); - } - - return mgmt_event(MGMT_EV_DISCOVERING, hdev, &discovering, + return mgmt_event(MGMT_EV_DISCOVERING, index, &discovering, sizeof(discovering), NULL); } -int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr) +int mgmt_device_blocked(u16 index, bdaddr_t *bdaddr) { struct pending_cmd *cmd; struct mgmt_ev_device_blocked ev; - cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev); + cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, index); bacpy(&ev.bdaddr, bdaddr); - return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev), - cmd ? cmd->sk : NULL); + return mgmt_event(MGMT_EV_DEVICE_BLOCKED, index, &ev, sizeof(ev), + cmd ? cmd->sk : NULL); } -int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr) +int mgmt_device_unblocked(u16 index, bdaddr_t *bdaddr) { struct pending_cmd *cmd; struct mgmt_ev_device_unblocked ev; - cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev); + cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, index); bacpy(&ev.bdaddr, bdaddr); - return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev), - cmd ? cmd->sk : NULL); + return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, index, &ev, sizeof(ev), + cmd ? cmd->sk : NULL); } diff --git a/trunk/net/bluetooth/rfcomm/Kconfig b/trunk/net/bluetooth/rfcomm/Kconfig index 22e718b554e4..405a0e61e7dc 100644 --- a/trunk/net/bluetooth/rfcomm/Kconfig +++ b/trunk/net/bluetooth/rfcomm/Kconfig @@ -1,6 +1,6 @@ config BT_RFCOMM tristate "RFCOMM protocol support" - depends on BT + depends on BT && BT_L2CAP help RFCOMM provides connection oriented stream transport. RFCOMM support is required for Dialup Networking, OBEX and other Bluetooth diff --git a/trunk/net/bluetooth/rfcomm/core.c b/trunk/net/bluetooth/rfcomm/core.c index 501649bf5596..2d28dfe98389 100644 --- a/trunk/net/bluetooth/rfcomm/core.c +++ b/trunk/net/bluetooth/rfcomm/core.c @@ -51,8 +51,8 @@ #define VERSION "1.11" -static bool disable_cfc; -static bool l2cap_ertm; +static int disable_cfc; +static int l2cap_ertm; static int channel_mtu = -1; static unsigned int l2cap_mtu = RFCOMM_MAX_L2CAP_MTU; @@ -377,11 +377,13 @@ static void rfcomm_dlc_unlink(struct rfcomm_dlc *d) static struct rfcomm_dlc *rfcomm_dlc_get(struct rfcomm_session *s, u8 dlci) { struct rfcomm_dlc *d; + struct list_head *p; - list_for_each_entry(d, &s->dlcs, list) + list_for_each(p, &s->dlcs) { + d = list_entry(p, struct rfcomm_dlc, list); if (d->dlci == dlci) return d; - + } return NULL; } @@ -749,6 +751,7 @@ void rfcomm_session_getaddr(struct rfcomm_session *s, bdaddr_t *src, bdaddr_t *d /* ---- RFCOMM frame sending ---- */ static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len) { + struct socket *sock = s->sock; struct kvec iv = { data, len }; struct msghdr msg; @@ -756,14 +759,7 @@ static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len) memset(&msg, 0, sizeof(msg)); - return kernel_sendmsg(s->sock, &msg, &iv, 1, len); -} - -static int rfcomm_send_cmd(struct rfcomm_session *s, struct rfcomm_cmd *cmd) -{ - BT_DBG("%p cmd %u", s, cmd->ctrl); - - return rfcomm_send_frame(s, (void *) cmd, sizeof(*cmd)); + return kernel_sendmsg(sock, &msg, &iv, 1, len); } static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci) @@ -777,7 +773,7 @@ static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci) cmd.len = __len8(0); cmd.fcs = __fcs2((u8 *) &cmd); - return rfcomm_send_cmd(s, &cmd); + return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd)); } static int rfcomm_send_ua(struct rfcomm_session *s, u8 dlci) @@ -791,7 +787,7 @@ static int rfcomm_send_ua(struct rfcomm_session *s, u8 dlci) cmd.len = __len8(0); cmd.fcs = __fcs2((u8 *) &cmd); - return rfcomm_send_cmd(s, &cmd); + return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd)); } static int rfcomm_send_disc(struct rfcomm_session *s, u8 dlci) @@ -805,7 +801,7 @@ static int rfcomm_send_disc(struct rfcomm_session *s, u8 dlci) cmd.len = __len8(0); cmd.fcs = __fcs2((u8 *) &cmd); - return rfcomm_send_cmd(s, &cmd); + return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd)); } static int rfcomm_queue_disc(struct rfcomm_dlc *d) @@ -841,7 +837,7 @@ static int rfcomm_send_dm(struct rfcomm_session *s, u8 dlci) cmd.len = __len8(0); cmd.fcs = __fcs2((u8 *) &cmd); - return rfcomm_send_cmd(s, &cmd); + return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd)); } static int rfcomm_send_nsc(struct rfcomm_session *s, int cr, u8 type) @@ -2125,13 +2121,15 @@ static struct hci_cb rfcomm_cb = { static int rfcomm_dlc_debugfs_show(struct seq_file *f, void *x) { struct rfcomm_session *s; + struct list_head *pp, *p; rfcomm_lock(); - list_for_each_entry(s, &session_list, list) { - struct rfcomm_dlc *d; - list_for_each_entry(d, &s->dlcs, list) { + list_for_each(p, &session_list) { + s = list_entry(p, struct rfcomm_session, list); + list_for_each(pp, &s->dlcs) { struct sock *sk = s->sock->sk; + struct rfcomm_dlc *d = list_entry(pp, struct rfcomm_dlc, list); seq_printf(f, "%s %s %ld %d %d %d %d\n", batostr(&bt_sk(sk)->src), diff --git a/trunk/net/bluetooth/rfcomm/sock.c b/trunk/net/bluetooth/rfcomm/sock.c index aea2bdd1510f..5417f6127323 100644 --- a/trunk/net/bluetooth/rfcomm/sock.c +++ b/trunk/net/bluetooth/rfcomm/sock.c @@ -600,8 +600,6 @@ static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock, break; } - skb->priority = sk->sk_priority; - err = rfcomm_dlc_send(d, skb); if (err < 0) { kfree_skb(skb); diff --git a/trunk/net/bluetooth/rfcomm/tty.c b/trunk/net/bluetooth/rfcomm/tty.c index fa8f4de53b99..c258796313e0 100644 --- a/trunk/net/bluetooth/rfcomm/tty.c +++ b/trunk/net/bluetooth/rfcomm/tty.c @@ -34,7 +34,6 @@ #include #include #include -#include #include #include @@ -66,7 +65,7 @@ struct rfcomm_dev { struct rfcomm_dlc *dlc; struct tty_struct *tty; wait_queue_head_t wait; - struct work_struct wakeup_task; + struct tasklet_struct wakeup_task; struct device *tty_dev; @@ -82,7 +81,7 @@ static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb); static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err); static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig); -static void rfcomm_tty_wakeup(struct work_struct *work); +static void rfcomm_tty_wakeup(unsigned long arg); /* ---- Device functions ---- */ static void rfcomm_dev_destruct(struct rfcomm_dev *dev) @@ -134,10 +133,13 @@ static inline void rfcomm_dev_put(struct rfcomm_dev *dev) static struct rfcomm_dev *__rfcomm_dev_get(int id) { struct rfcomm_dev *dev; + struct list_head *p; - list_for_each_entry(dev, &rfcomm_dev_list, list) + list_for_each(p, &rfcomm_dev_list) { + dev = list_entry(p, struct rfcomm_dev, list); if (dev->id == id) return dev; + } return NULL; } @@ -195,7 +197,7 @@ static DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL); static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc) { - struct rfcomm_dev *dev, *entry; + struct rfcomm_dev *dev; struct list_head *head = &rfcomm_dev_list, *p; int err = 0; @@ -210,8 +212,8 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc) if (req->dev_id < 0) { dev->id = 0; - list_for_each_entry(entry, &rfcomm_dev_list, list) { - if (entry->id != dev->id) + list_for_each(p, &rfcomm_dev_list) { + if (list_entry(p, struct rfcomm_dev, list)->id != dev->id) break; dev->id++; @@ -220,7 +222,9 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc) } else { dev->id = req->dev_id; - list_for_each_entry(entry, &rfcomm_dev_list, list) { + list_for_each(p, &rfcomm_dev_list) { + struct rfcomm_dev *entry = list_entry(p, struct rfcomm_dev, list); + if (entry->id == dev->id) { err = -EADDRINUSE; goto out; @@ -253,7 +257,7 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc) atomic_set(&dev->opened, 0); init_waitqueue_head(&dev->wait); - INIT_WORK(&dev->wakeup_task, rfcomm_tty_wakeup); + tasklet_init(&dev->wakeup_task, rfcomm_tty_wakeup, (unsigned long) dev); skb_queue_head_init(&dev->pending); @@ -347,7 +351,7 @@ static void rfcomm_wfree(struct sk_buff *skb) struct rfcomm_dev *dev = (void *) skb->sk; atomic_sub(skb->truesize, &dev->wmem_alloc); if (test_bit(RFCOMM_TTY_ATTACHED, &dev->flags)) - queue_work(system_nrt_wq, &dev->wakeup_task); + tasklet_schedule(&dev->wakeup_task); rfcomm_dev_put(dev); } @@ -451,9 +455,9 @@ static int rfcomm_release_dev(void __user *arg) static int rfcomm_get_dev_list(void __user *arg) { - struct rfcomm_dev *dev; struct rfcomm_dev_list_req *dl; struct rfcomm_dev_info *di; + struct list_head *p; int n = 0, size, err; u16 dev_num; @@ -475,7 +479,8 @@ static int rfcomm_get_dev_list(void __user *arg) read_lock_bh(&rfcomm_dev_lock); - list_for_each_entry(dev, &rfcomm_dev_list, list) { + list_for_each(p, &rfcomm_dev_list) { + struct rfcomm_dev *dev = list_entry(p, struct rfcomm_dev, list); if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags)) continue; (di + n)->id = dev->id; @@ -630,10 +635,9 @@ static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig) } /* ---- TTY functions ---- */ -static void rfcomm_tty_wakeup(struct work_struct *work) +static void rfcomm_tty_wakeup(unsigned long arg) { - struct rfcomm_dev *dev = container_of(work, struct rfcomm_dev, - wakeup_task); + struct rfcomm_dev *dev = (void *) arg; struct tty_struct *tty = dev->tty; if (!tty) return; @@ -758,7 +762,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp) rfcomm_dlc_close(dev->dlc, 0); clear_bit(RFCOMM_TTY_ATTACHED, &dev->flags); - cancel_work_sync(&dev->wakeup_task); + tasklet_kill(&dev->wakeup_task); rfcomm_dlc_lock(dev->dlc); tty->driver_data = NULL; @@ -1151,11 +1155,9 @@ static const struct tty_operations rfcomm_ops = { int __init rfcomm_init_ttys(void) { - int error; - rfcomm_tty_driver = alloc_tty_driver(RFCOMM_TTY_PORTS); if (!rfcomm_tty_driver) - return -ENOMEM; + return -1; rfcomm_tty_driver->owner = THIS_MODULE; rfcomm_tty_driver->driver_name = "rfcomm"; @@ -1170,11 +1172,10 @@ int __init rfcomm_init_ttys(void) rfcomm_tty_driver->init_termios.c_lflag &= ~ICANON; tty_set_operations(rfcomm_tty_driver, &rfcomm_ops); - error = tty_register_driver(rfcomm_tty_driver); - if (error) { + if (tty_register_driver(rfcomm_tty_driver)) { BT_ERR("Can't register RFCOMM TTY driver"); put_tty_driver(rfcomm_tty_driver); - return error; + return -1; } BT_INFO("RFCOMM TTY layer initialized"); diff --git a/trunk/net/bluetooth/sco.c b/trunk/net/bluetooth/sco.c index 5dc2f2126fac..a324b009e34b 100644 --- a/trunk/net/bluetooth/sco.c +++ b/trunk/net/bluetooth/sco.c @@ -51,7 +51,7 @@ #include #include -static bool disable_esco; +static int disable_esco; static const struct proto_ops sco_sock_ops; @@ -189,7 +189,7 @@ static int sco_connect(struct sock *sk) if (!hdev) return -EHOSTUNREACH; - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); if (lmp_esco_capable(hdev) && !disable_esco) type = ESCO_LINK; @@ -225,7 +225,7 @@ static int sco_connect(struct sock *sk) } done: - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } @@ -893,12 +893,15 @@ static void sco_conn_ready(struct sco_conn *conn) } /* ----- SCO interface with lower layer (HCI) ----- */ -int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr) +static int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type) { register struct sock *sk; struct hlist_node *node; int lm = 0; + if (type != SCO_LINK && type != ESCO_LINK) + return -EINVAL; + BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr)); /* Find listening sockets */ @@ -918,9 +921,13 @@ int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr) return lm; } -int sco_connect_cfm(struct hci_conn *hcon, __u8 status) +static int sco_connect_cfm(struct hci_conn *hcon, __u8 status) { BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status); + + if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK) + return -EINVAL; + if (!status) { struct sco_conn *conn; @@ -933,15 +940,19 @@ int sco_connect_cfm(struct hci_conn *hcon, __u8 status) return 0; } -int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason) +static int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason) { BT_DBG("hcon %p reason %d", hcon, reason); + if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK) + return -EINVAL; + sco_conn_del(hcon, bt_to_errno(reason)); + return 0; } -int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb) +static int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb) { struct sco_conn *conn = hcon->sco_data; @@ -1017,6 +1028,15 @@ static const struct net_proto_family sco_sock_family_ops = { .create = sco_sock_create, }; +static struct hci_proto sco_hci_proto = { + .name = "SCO", + .id = HCI_PROTO_SCO, + .connect_ind = sco_connect_ind, + .connect_cfm = sco_connect_cfm, + .disconn_cfm = sco_disconn_cfm, + .recv_scodata = sco_recv_scodata +}; + int __init sco_init(void) { int err; @@ -1031,6 +1051,13 @@ int __init sco_init(void) goto error; } + err = hci_register_proto(&sco_hci_proto); + if (err < 0) { + BT_ERR("SCO protocol registration failed"); + bt_sock_unregister(BTPROTO_SCO); + goto error; + } + if (bt_debugfs) { sco_debugfs = debugfs_create_file("sco", 0444, bt_debugfs, NULL, &sco_debugfs_fops); @@ -1054,6 +1081,9 @@ void __exit sco_exit(void) if (bt_sock_unregister(BTPROTO_SCO) < 0) BT_ERR("SCO socket unregistration failed"); + if (hci_unregister_proto(&sco_hci_proto) < 0) + BT_ERR("SCO protocol unregistration failed"); + proto_unregister(&sco_proto); } diff --git a/trunk/net/bluetooth/smp.c b/trunk/net/bluetooth/smp.c index 32c47de30344..759b63572641 100644 --- a/trunk/net/bluetooth/smp.c +++ b/trunk/net/bluetooth/smp.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include @@ -182,53 +181,30 @@ static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data) if (!skb) return; - skb->priority = HCI_PRIO_MAX; - hci_send_acl(conn->hchan, skb, 0); + hci_send_acl(conn->hcon, skb, 0); - cancel_delayed_work_sync(&conn->security_timer); - schedule_delayed_work(&conn->security_timer, + mod_timer(&conn->security_timer, jiffies + msecs_to_jiffies(SMP_TIMEOUT)); } -static __u8 authreq_to_seclevel(__u8 authreq) -{ - if (authreq & SMP_AUTH_MITM) - return BT_SECURITY_HIGH; - else - return BT_SECURITY_MEDIUM; -} - -static __u8 seclevel_to_authreq(__u8 sec_level) -{ - switch (sec_level) { - case BT_SECURITY_HIGH: - return SMP_AUTH_MITM | SMP_AUTH_BONDING; - case BT_SECURITY_MEDIUM: - return SMP_AUTH_BONDING; - default: - return SMP_AUTH_NONE; - } -} - static void build_pairing_cmd(struct l2cap_conn *conn, struct smp_cmd_pairing *req, struct smp_cmd_pairing *rsp, __u8 authreq) { - u8 dist_keys = 0; + u8 dist_keys; + dist_keys = 0; if (test_bit(HCI_PAIRABLE, &conn->hcon->hdev->flags)) { dist_keys = SMP_DIST_ENC_KEY; authreq |= SMP_AUTH_BONDING; - } else { - authreq &= ~SMP_AUTH_BONDING; } if (rsp == NULL) { req->io_capability = conn->hcon->io_capability; req->oob_flag = SMP_OOB_NOT_PRESENT; req->max_key_size = SMP_MAX_ENC_KEY_SIZE; - req->init_key_dist = 0; + req->init_key_dist = dist_keys; req->resp_key_dist = dist_keys; req->auth_req = authreq; return; @@ -237,7 +213,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn, rsp->io_capability = conn->hcon->io_capability; rsp->oob_flag = SMP_OOB_NOT_PRESENT; rsp->max_key_size = SMP_MAX_ENC_KEY_SIZE; - rsp->init_key_dist = 0; + rsp->init_key_dist = req->init_key_dist & dist_keys; rsp->resp_key_dist = req->resp_key_dist & dist_keys; rsp->auth_req = authreq; } @@ -255,107 +231,6 @@ static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size) return 0; } -static void smp_failure(struct l2cap_conn *conn, u8 reason, u8 send) -{ - if (send) - smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason), - &reason); - - clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->hcon->pend); - mgmt_auth_failed(conn->hcon->hdev, conn->dst, reason); - cancel_delayed_work_sync(&conn->security_timer); - smp_chan_destroy(conn); -} - -#define JUST_WORKS 0x00 -#define JUST_CFM 0x01 -#define REQ_PASSKEY 0x02 -#define CFM_PASSKEY 0x03 -#define REQ_OOB 0x04 -#define OVERLAP 0xFF - -static const u8 gen_method[5][5] = { - { JUST_WORKS, JUST_CFM, REQ_PASSKEY, JUST_WORKS, REQ_PASSKEY }, - { JUST_WORKS, JUST_CFM, REQ_PASSKEY, JUST_WORKS, REQ_PASSKEY }, - { CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, CFM_PASSKEY }, - { JUST_WORKS, JUST_CFM, JUST_WORKS, JUST_WORKS, JUST_CFM }, - { CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, OVERLAP }, -}; - -static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth, - u8 local_io, u8 remote_io) -{ - struct hci_conn *hcon = conn->hcon; - struct smp_chan *smp = conn->smp_chan; - u8 method; - u32 passkey = 0; - int ret = 0; - - /* Initialize key for JUST WORKS */ - memset(smp->tk, 0, sizeof(smp->tk)); - clear_bit(SMP_FLAG_TK_VALID, &smp->smp_flags); - - BT_DBG("tk_request: auth:%d lcl:%d rem:%d", auth, local_io, remote_io); - - /* If neither side wants MITM, use JUST WORKS */ - /* If either side has unknown io_caps, use JUST WORKS */ - /* Otherwise, look up method from the table */ - if (!(auth & SMP_AUTH_MITM) || - local_io > SMP_IO_KEYBOARD_DISPLAY || - remote_io > SMP_IO_KEYBOARD_DISPLAY) - method = JUST_WORKS; - else - method = gen_method[local_io][remote_io]; - - /* If not bonding, don't ask user to confirm a Zero TK */ - if (!(auth & SMP_AUTH_BONDING) && method == JUST_CFM) - method = JUST_WORKS; - - /* If Just Works, Continue with Zero TK */ - if (method == JUST_WORKS) { - set_bit(SMP_FLAG_TK_VALID, &smp->smp_flags); - return 0; - } - - /* Not Just Works/Confirm results in MITM Authentication */ - if (method != JUST_CFM) - set_bit(SMP_FLAG_MITM_AUTH, &smp->smp_flags); - - /* If both devices have Keyoard-Display I/O, the master - * Confirms and the slave Enters the passkey. - */ - if (method == OVERLAP) { - if (hcon->link_mode & HCI_LM_MASTER) - method = CFM_PASSKEY; - else - method = REQ_PASSKEY; - } - - /* Generate random passkey. Not valid until confirmed. */ - if (method == CFM_PASSKEY) { - u8 key[16]; - - memset(key, 0, sizeof(key)); - get_random_bytes(&passkey, sizeof(passkey)); - passkey %= 1000000; - put_unaligned_le32(passkey, key); - swap128(key, smp->tk); - BT_DBG("PassKey: %d", passkey); - } - - hci_dev_lock(hcon->hdev); - - if (method == REQ_PASSKEY) - ret = mgmt_user_passkey_request(hcon->hdev, conn->dst); - else - ret = mgmt_user_confirm_request(hcon->hdev, conn->dst, - cpu_to_le32(passkey), 0); - - hci_dev_unlock(hcon->hdev); - - return ret; -} - static void confirm_work(struct work_struct *work) { struct smp_chan *smp = container_of(work, struct smp_chan, confirm); @@ -388,15 +263,14 @@ static void confirm_work(struct work_struct *work) goto error; } - clear_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags); - swap128(res, cp.confirm_val); smp_send_cmd(smp->conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp); return; error: - smp_failure(conn, reason, 1); + smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason), &reason); + smp_chan_destroy(conn); } static void random_work(struct work_struct *work) @@ -479,7 +353,8 @@ static void random_work(struct work_struct *work) return; error: - smp_failure(conn, reason, 1); + smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason), &reason); + smp_chan_destroy(conn); } static struct smp_chan *smp_chan_create(struct l2cap_conn *conn) @@ -495,7 +370,6 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn) smp->conn = conn; conn->smp_chan = smp; - conn->hcon->smp_conn = conn; hci_conn_hold(conn->hcon); @@ -504,73 +378,19 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn) void smp_chan_destroy(struct l2cap_conn *conn) { - struct smp_chan *smp = conn->smp_chan; - - clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend); - - if (smp->tfm) - crypto_free_blkcipher(smp->tfm); - - kfree(smp); - conn->smp_chan = NULL; - conn->hcon->smp_conn = NULL; + kfree(conn->smp_chan); hci_conn_put(conn->hcon); } -int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey) -{ - struct l2cap_conn *conn = hcon->smp_conn; - struct smp_chan *smp; - u32 value; - u8 key[16]; - - BT_DBG(""); - - if (!conn) - return -ENOTCONN; - - smp = conn->smp_chan; - - switch (mgmt_op) { - case MGMT_OP_USER_PASSKEY_REPLY: - value = le32_to_cpu(passkey); - memset(key, 0, sizeof(key)); - BT_DBG("PassKey: %d", value); - put_unaligned_le32(value, key); - swap128(key, smp->tk); - /* Fall Through */ - case MGMT_OP_USER_CONFIRM_REPLY: - set_bit(SMP_FLAG_TK_VALID, &smp->smp_flags); - break; - case MGMT_OP_USER_PASSKEY_NEG_REPLY: - case MGMT_OP_USER_CONFIRM_NEG_REPLY: - smp_failure(conn, SMP_PASSKEY_ENTRY_FAILED, 1); - return 0; - default: - smp_failure(conn, SMP_PASSKEY_ENTRY_FAILED, 1); - return -EOPNOTSUPP; - } - - /* If it is our turn to send Pairing Confirm, do so now */ - if (test_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags)) - queue_work(hcon->hdev->workqueue, &smp->confirm); - - return 0; -} - static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) { struct smp_cmd_pairing rsp, *req = (void *) skb->data; struct smp_chan *smp; u8 key_size; - u8 auth = SMP_AUTH_NONE; int ret; BT_DBG("conn %p", conn); - if (conn->hcon->link_mode & HCI_LM_MASTER) - return SMP_CMD_NOTSUPP; - if (!test_and_set_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend)) smp = smp_chan_create(conn); @@ -580,16 +400,19 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) memcpy(&smp->preq[1], req, sizeof(*req)); skb_pull(skb, sizeof(*req)); - /* We didn't start the pairing, so match remote */ - if (req->auth_req & SMP_AUTH_BONDING) - auth = req->auth_req; + if (req->oob_flag) + return SMP_OOB_NOT_AVAIL; - build_pairing_cmd(conn, req, &rsp, auth); + /* We didn't start the pairing, so no requirements */ + build_pairing_cmd(conn, req, &rsp, SMP_AUTH_NONE); key_size = min(req->max_key_size, rsp.max_key_size); if (check_enc_key_size(conn, key_size)) return SMP_ENC_KEY_SIZE; + /* Just works */ + memset(smp->tk, 0, sizeof(smp->tk)); + ret = smp_rand(smp->prnd); if (ret) return SMP_UNSPECIFIED; @@ -599,11 +422,6 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) smp_send_cmd(conn, SMP_CMD_PAIRING_RSP, sizeof(rsp), &rsp); - /* Request setup of TK */ - ret = tk_request(conn, 0, auth, rsp.io_capability, req->io_capability); - if (ret) - return SMP_UNSPECIFIED; - return 0; } @@ -612,14 +430,11 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb) struct smp_cmd_pairing *req, *rsp = (void *) skb->data; struct smp_chan *smp = conn->smp_chan; struct hci_dev *hdev = conn->hcon->hdev; - u8 key_size, auth = SMP_AUTH_NONE; + u8 key_size; int ret; BT_DBG("conn %p", conn); - if (!(conn->hcon->link_mode & HCI_LM_MASTER)) - return SMP_CMD_NOTSUPP; - skb_pull(skb, sizeof(*rsp)); req = (void *) &smp->preq[1]; @@ -628,6 +443,12 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb) if (check_enc_key_size(conn, key_size)) return SMP_ENC_KEY_SIZE; + if (rsp->oob_flag) + return SMP_OOB_NOT_AVAIL; + + /* Just works */ + memset(smp->tk, 0, sizeof(smp->tk)); + ret = smp_rand(smp->prnd); if (ret) return SMP_UNSPECIFIED; @@ -635,22 +456,6 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb) smp->prsp[0] = SMP_CMD_PAIRING_RSP; memcpy(&smp->prsp[1], rsp, sizeof(*rsp)); - if ((req->auth_req & SMP_AUTH_BONDING) && - (rsp->auth_req & SMP_AUTH_BONDING)) - auth = SMP_AUTH_BONDING; - - auth |= (req->auth_req | rsp->auth_req) & SMP_AUTH_MITM; - - ret = tk_request(conn, 0, auth, rsp->io_capability, req->io_capability); - if (ret) - return SMP_UNSPECIFIED; - - set_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags); - - /* Can't compose response until we have been confirmed */ - if (!test_bit(SMP_FLAG_TK_VALID, &smp->smp_flags)) - return 0; - queue_work(hdev->workqueue, &smp->confirm); return 0; @@ -672,10 +477,8 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb) swap128(smp->prnd, random); smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(random), random); - } else if (test_bit(SMP_FLAG_TK_VALID, &smp->smp_flags)) { - queue_work(hdev->workqueue, &smp->confirm); } else { - set_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags); + queue_work(hdev->workqueue, &smp->confirm); } return 0; @@ -728,7 +531,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb) BT_DBG("conn %p", conn); - hcon->pending_sec_level = authreq_to_seclevel(rp->auth_req); + hcon->pending_sec_level = BT_SECURITY_MEDIUM; if (smp_ltk_encrypt(conn)) return 0; @@ -755,7 +558,6 @@ int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level) { struct hci_conn *hcon = conn->hcon; struct smp_chan *smp = conn->smp_chan; - __u8 authreq; BT_DBG("conn %p hcon %p level 0x%2.2x", conn, hcon, sec_level); @@ -776,22 +578,18 @@ int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level) return 0; smp = smp_chan_create(conn); - if (!smp) - return 1; - - authreq = seclevel_to_authreq(sec_level); if (hcon->link_mode & HCI_LM_MASTER) { struct smp_cmd_pairing cp; - build_pairing_cmd(conn, &cp, NULL, authreq); + build_pairing_cmd(conn, &cp, NULL, SMP_AUTH_NONE); smp->preq[0] = SMP_CMD_PAIRING_REQ; memcpy(&smp->preq[1], &cp, sizeof(cp)); smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp); } else { struct smp_cmd_security_req cp; - cp.auth_req = authreq; + cp.auth_req = SMP_AUTH_NONE; smp_send_cmd(conn, SMP_CMD_SECURITY_REQ, sizeof(cp), &cp); } @@ -820,7 +618,7 @@ static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb) skb_pull(skb, sizeof(*rp)); - hci_add_ltk(conn->hcon->hdev, 1, conn->dst, smp->smp_key_size, + hci_add_ltk(conn->hcon->hdev, 1, conn->src, smp->smp_key_size, rp->ediv, rp->rand, smp->tk); smp_distribute_keys(conn, 1); @@ -848,7 +646,6 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb) break; case SMP_CMD_PAIRING_FAIL: - smp_failure(conn, skb->data[0], 0); reason = 0; err = -EPERM; break; @@ -894,7 +691,8 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb) done: if (reason) - smp_failure(conn, reason, 1); + smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason), + &reason); kfree_skb(skb); return err; @@ -983,7 +781,7 @@ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force) if (conn->hcon->out || force) { clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend); - cancel_delayed_work_sync(&conn->security_timer); + del_timer(&conn->security_timer); smp_chan_destroy(conn); } diff --git a/trunk/net/bridge/br.c b/trunk/net/bridge/br.c index ba780cc8e515..f20c4fd915a8 100644 --- a/trunk/net/bridge/br.c +++ b/trunk/net/bridge/br.c @@ -62,7 +62,7 @@ static int __init br_init(void) brioctl_set(br_ioctl_deviceless_stub); -#if IS_ENABLED(CONFIG_ATM_LANE) +#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE) br_fdb_test_addr_hook = br_fdb_test_addr; #endif @@ -93,7 +93,7 @@ static void __exit br_deinit(void) rcu_barrier(); /* Wait for completion of call_rcu()'s */ br_netfilter_fini(); -#if IS_ENABLED(CONFIG_ATM_LANE) +#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE) br_fdb_test_addr_hook = NULL; #endif diff --git a/trunk/net/bridge/br_device.c b/trunk/net/bridge/br_device.c index 71773b014e0c..feb77ea7b58e 100644 --- a/trunk/net/bridge/br_device.c +++ b/trunk/net/bridge/br_device.c @@ -170,11 +170,8 @@ static int br_set_mac_address(struct net_device *dev, void *p) return -EINVAL; spin_lock_bh(&br->lock); - if (compare_ether_addr(dev->dev_addr, addr->sa_data)) { - memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); - br_fdb_change_mac_address(br, addr->sa_data); - br_stp_change_bridge_id(br, addr->sa_data); - } + memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + br_stp_change_bridge_id(br, addr->sa_data); br->flags |= BR_SET_MAC_ADDR; spin_unlock_bh(&br->lock); @@ -189,8 +186,7 @@ static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info) strcpy(info->bus_info, "N/A"); } -static netdev_features_t br_fix_features(struct net_device *dev, - netdev_features_t features) +static u32 br_fix_features(struct net_device *dev, u32 features) { struct net_bridge *br = netdev_priv(dev); @@ -345,10 +341,10 @@ void br_dev_setup(struct net_device *dev) dev->priv_flags = IFF_EBRIDGE; dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | - NETIF_F_GSO_MASK | NETIF_F_HW_CSUM | NETIF_F_LLTX | + NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL | NETIF_F_HW_VLAN_TX; dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | - NETIF_F_GSO_MASK | NETIF_F_HW_CSUM | + NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_HW_VLAN_TX; br->dev = dev; diff --git a/trunk/net/bridge/br_fdb.c b/trunk/net/bridge/br_fdb.c index f963f6b1884f..c8e7861b88b0 100644 --- a/trunk/net/bridge/br_fdb.c +++ b/trunk/net/bridge/br_fdb.c @@ -28,8 +28,7 @@ static struct kmem_cache *br_fdb_cache __read_mostly; static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source, const unsigned char *addr); -static void fdb_notify(struct net_bridge *br, - const struct net_bridge_fdb_entry *, int); +static void fdb_notify(const struct net_bridge_fdb_entry *, int); static u32 fdb_salt __read_mostly; @@ -81,10 +80,10 @@ static void fdb_rcu_free(struct rcu_head *head) kmem_cache_free(br_fdb_cache, ent); } -static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f) +static inline void fdb_delete(struct net_bridge_fdb_entry *f) { + fdb_notify(f, RTM_DELNEIGH); hlist_del_rcu(&f->hlist); - fdb_notify(br, f, RTM_DELNEIGH); call_rcu(&f->rcu, fdb_rcu_free); } @@ -115,7 +114,7 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr) } /* delete old one */ - fdb_delete(br, f); + fdb_delete(f); goto insert; } } @@ -127,18 +126,6 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr) spin_unlock_bh(&br->hash_lock); } -void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr) -{ - struct net_bridge_fdb_entry *f; - - /* If old entry was unassociated with any port, then delete it. */ - f = __br_fdb_get(br, br->dev->dev_addr); - if (f && f->is_local && !f->dst) - fdb_delete(br, f); - - fdb_insert(br, NULL, newaddr); -} - void br_fdb_cleanup(unsigned long _data) { struct net_bridge *br = (struct net_bridge *)_data; @@ -157,7 +144,7 @@ void br_fdb_cleanup(unsigned long _data) continue; this_timer = f->updated + delay; if (time_before_eq(this_timer, jiffies)) - fdb_delete(br, f); + fdb_delete(f); else if (time_before(this_timer, next_timer)) next_timer = this_timer; } @@ -178,7 +165,7 @@ void br_fdb_flush(struct net_bridge *br) struct hlist_node *h, *n; hlist_for_each_entry_safe(f, h, n, &br->hash[i], hlist) { if (!f->is_static) - fdb_delete(br, f); + fdb_delete(f); } } spin_unlock_bh(&br->hash_lock); @@ -222,7 +209,7 @@ void br_fdb_delete_by_port(struct net_bridge *br, } } - fdb_delete(br, f); + fdb_delete(f); skip_delete: ; } } @@ -247,7 +234,7 @@ struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br, return NULL; } -#if IS_ENABLED(CONFIG_ATM_LANE) +#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE) /* Interface used by ATM LANE hook to test * if an addr is on some other bridge port */ int br_fdb_test_addr(struct net_device *dev, unsigned char *addr) @@ -262,7 +249,7 @@ int br_fdb_test_addr(struct net_device *dev, unsigned char *addr) ret = 0; else { fdb = __br_fdb_get(port->br, addr); - ret = fdb && fdb->dst && fdb->dst->dev != dev && + ret = fdb && fdb->dst->dev != dev && fdb->dst->state == BR_STATE_FORWARDING; } rcu_read_unlock(); @@ -294,10 +281,6 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf, if (has_expired(br, f)) continue; - /* ignore pseudo entry for local MAC address */ - if (!f->dst) - continue; - if (skip) { --skip; continue; @@ -364,6 +347,7 @@ static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head, fdb->is_static = 0; fdb->updated = fdb->used = jiffies; hlist_add_head_rcu(&fdb->hlist, head); + fdb_notify(fdb, RTM_NEWNEIGH); } return fdb; } @@ -387,7 +371,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source, br_warn(br, "adding interface %s with same address " "as a received packet\n", source->dev->name); - fdb_delete(br, fdb); + fdb_delete(fdb); } fdb = fdb_create(head, source, addr); @@ -395,7 +379,6 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source, return -ENOMEM; fdb->is_local = fdb->is_static = 1; - fdb_notify(br, fdb, RTM_NEWNEIGH); return 0; } @@ -441,11 +424,9 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, } } else { spin_lock(&br->hash_lock); - if (likely(!fdb_find(head, addr))) { - fdb = fdb_create(head, source, addr); - if (fdb) - fdb_notify(br, fdb, RTM_NEWNEIGH); - } + if (likely(!fdb_find(head, addr))) + fdb_create(head, source, addr); + /* else we lose race and someone else inserts * it first, don't bother updating */ @@ -465,7 +446,7 @@ static int fdb_to_nud(const struct net_bridge_fdb_entry *fdb) return NUD_REACHABLE; } -static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br, +static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge_fdb_entry *fdb, u32 pid, u32 seq, int type, unsigned int flags) { @@ -478,13 +459,14 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br, if (nlh == NULL) return -EMSGSIZE; + ndm = nlmsg_data(nlh); ndm->ndm_family = AF_BRIDGE; ndm->ndm_pad1 = 0; ndm->ndm_pad2 = 0; ndm->ndm_flags = 0; ndm->ndm_type = 0; - ndm->ndm_ifindex = fdb->dst ? fdb->dst->dev->ifindex : br->dev->ifindex; + ndm->ndm_ifindex = fdb->dst->dev->ifindex; ndm->ndm_state = fdb_to_nud(fdb); NLA_PUT(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr); @@ -509,10 +491,9 @@ static inline size_t fdb_nlmsg_size(void) + nla_total_size(sizeof(struct nda_cacheinfo)); } -static void fdb_notify(struct net_bridge *br, - const struct net_bridge_fdb_entry *fdb, int type) +static void fdb_notify(const struct net_bridge_fdb_entry *fdb, int type) { - struct net *net = dev_net(br->dev); + struct net *net = dev_net(fdb->dst->dev); struct sk_buff *skb; int err = -ENOBUFS; @@ -520,7 +501,7 @@ static void fdb_notify(struct net_bridge *br, if (skb == NULL) goto errout; - err = fdb_fill_info(skb, br, fdb, 0, 0, type, 0); + err = fdb_fill_info(skb, fdb, 0, 0, type, 0); if (err < 0) { /* -EMSGSIZE implies BUG in fdb_nlmsg_size() */ WARN_ON(err == -EMSGSIZE); @@ -557,7 +538,7 @@ int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb) if (idx < cb->args[0]) goto skip; - if (fdb_fill_info(skb, br, f, + if (fdb_fill_info(skb, f, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, RTM_NEWNEIGH, @@ -575,7 +556,7 @@ int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb) return skb->len; } -/* Update (create or replace) forwarding database entry */ +/* Create new static fdb entry */ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr, __u16 state, __u16 flags) { @@ -591,25 +572,19 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr, fdb = fdb_create(head, source, addr); if (!fdb) return -ENOMEM; - fdb_notify(br, fdb, RTM_NEWNEIGH); } else { if (flags & NLM_F_EXCL) return -EEXIST; - } - - if (fdb_to_nud(fdb) != state) { - if (state & NUD_PERMANENT) - fdb->is_local = fdb->is_static = 1; - else if (state & NUD_NOARP) { - fdb->is_local = 0; - fdb->is_static = 1; - } else - fdb->is_local = fdb->is_static = 0; - fdb->updated = fdb->used = jiffies; - fdb_notify(br, fdb, RTM_NEWNEIGH); + if (flags & NLM_F_REPLACE) + fdb->updated = fdb->used = jiffies; + fdb->is_local = fdb->is_static = 0; } + if (state & NUD_PERMANENT) + fdb->is_local = fdb->is_static = 1; + else if (state & NUD_NOARP) + fdb->is_static = 1; return 0; } @@ -652,11 +627,6 @@ int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) return -EINVAL; } - if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) { - pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state); - return -EINVAL; - } - p = br_port_get_rtnl(dev); if (p == NULL) { pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n", @@ -664,15 +634,9 @@ int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) return -EINVAL; } - if (ndm->ndm_flags & NTF_USE) { - rcu_read_lock(); - br_fdb_update(p->br, p, addr); - rcu_read_unlock(); - } else { - spin_lock_bh(&p->br->hash_lock); - err = fdb_add_entry(p, addr, ndm->ndm_state, nlh->nlmsg_flags); - spin_unlock_bh(&p->br->hash_lock); - } + spin_lock_bh(&p->br->hash_lock); + err = fdb_add_entry(p, addr, ndm->ndm_state, nlh->nlmsg_flags); + spin_unlock_bh(&p->br->hash_lock); return err; } @@ -687,7 +651,7 @@ static int fdb_delete_by_addr(struct net_bridge_port *p, const u8 *addr) if (!fdb) return -ENOENT; - fdb_delete(p->br, fdb); + fdb_delete(fdb); return 0; } diff --git a/trunk/net/bridge/br_forward.c b/trunk/net/bridge/br_forward.c index 61f65344e711..ee64287f1290 100644 --- a/trunk/net/bridge/br_forward.c +++ b/trunk/net/bridge/br_forward.c @@ -98,7 +98,7 @@ static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb) /* called with rcu_read_lock */ void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) { - if (to && should_deliver(to, skb)) { + if (should_deliver(to, skb)) { __br_deliver(to, skb); return; } diff --git a/trunk/net/bridge/br_if.c b/trunk/net/bridge/br_if.c index 0a942fbccc9a..f603e5b0b930 100644 --- a/trunk/net/bridge/br_if.c +++ b/trunk/net/bridge/br_if.c @@ -296,11 +296,10 @@ int br_min_mtu(const struct net_bridge *br) /* * Recomputes features using slave's features */ -netdev_features_t br_features_recompute(struct net_bridge *br, - netdev_features_t features) +u32 br_features_recompute(struct net_bridge *br, u32 features) { struct net_bridge_port *p; - netdev_features_t mask; + u32 mask; if (list_empty(&br->port_list)) return features; diff --git a/trunk/net/bridge/br_multicast.c b/trunk/net/bridge/br_multicast.c index 568d5bf17534..a5f4e5769809 100644 --- a/trunk/net/bridge/br_multicast.c +++ b/trunk/net/bridge/br_multicast.c @@ -24,7 +24,7 @@ #include #include #include -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) #include #include #include @@ -36,7 +36,7 @@ #define mlock_dereference(X, br) \ rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock)) -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) static inline int ipv6_is_transient_multicast(const struct in6_addr *addr) { if (ipv6_addr_is_multicast(addr) && IPV6_ADDR_MC_FLAG_TRANSIENT(addr)) @@ -52,7 +52,7 @@ static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) switch (a->proto) { case htons(ETH_P_IP): return a->u.ip4 == b->u.ip4; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case htons(ETH_P_IPV6): return ipv6_addr_equal(&a->u.ip6, &b->u.ip6); #endif @@ -65,7 +65,7 @@ static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip) return jhash_1word(mdb->secret, (__force u32)ip) & (mdb->max - 1); } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb, const struct in6_addr *ip) { @@ -79,7 +79,7 @@ static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb, switch (ip->proto) { case htons(ETH_P_IP): return __br_ip4_hash(mdb, ip->u.ip4); -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case htons(ETH_P_IPV6): return __br_ip6_hash(mdb, &ip->u.ip6); #endif @@ -121,13 +121,13 @@ static struct net_bridge_mdb_entry *br_mdb_ip4_get( return br_mdb_ip_get(mdb, &br_dst); } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) static struct net_bridge_mdb_entry *br_mdb_ip6_get( struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst) { struct br_ip br_dst; - br_dst.u.ip6 = *dst; + ipv6_addr_copy(&br_dst.u.ip6, dst); br_dst.proto = htons(ETH_P_IPV6); return br_mdb_ip_get(mdb, &br_dst); @@ -152,9 +152,9 @@ struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, case htons(ETH_P_IP): ip.u.ip4 = ip_hdr(skb)->daddr; break; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case htons(ETH_P_IPV6): - ip.u.ip6 = ipv6_hdr(skb)->daddr; + ipv6_addr_copy(&ip.u.ip6, &ipv6_hdr(skb)->daddr); break; #endif default: @@ -411,7 +411,7 @@ static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br, return skb; } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, const struct in6_addr *group) { @@ -474,7 +474,7 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, mldq->mld_cksum = 0; mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval)); mldq->mld_reserved = 0; - mldq->mld_mca = *group; + ipv6_addr_copy(&mldq->mld_mca, group); /* checksum */ mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, @@ -496,7 +496,7 @@ static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br, switch (addr->proto) { case htons(ETH_P_IP): return br_ip4_multicast_alloc_query(br, addr->u.ip4); -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case htons(ETH_P_IPV6): return br_ip6_multicast_alloc_query(br, &addr->u.ip6); #endif @@ -773,7 +773,7 @@ static int br_ip4_multicast_add_group(struct net_bridge *br, return br_multicast_add_group(br, port, &br_group); } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) static int br_ip6_multicast_add_group(struct net_bridge *br, struct net_bridge_port *port, const struct in6_addr *group) @@ -783,7 +783,7 @@ static int br_ip6_multicast_add_group(struct net_bridge *br, if (!ipv6_is_transient_multicast(group)) return 0; - br_group.u.ip6 = *group; + ipv6_addr_copy(&br_group.u.ip6, group); br_group.proto = htons(ETH_P_IPV6); return br_multicast_add_group(br, port, &br_group); @@ -845,7 +845,7 @@ static void br_multicast_send_query(struct net_bridge *br, br_group.proto = htons(ETH_P_IP); __br_multicast_send_query(br, port, &br_group); -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) br_group.proto = htons(ETH_P_IPV6); __br_multicast_send_query(br, port, &br_group); #endif @@ -989,7 +989,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br, return err; } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) static int br_ip6_multicast_mld2_report(struct net_bridge *br, struct net_bridge_port *port, struct sk_buff *skb) @@ -1185,7 +1185,7 @@ static int br_ip4_multicast_query(struct net_bridge *br, return err; } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) static int br_ip6_multicast_query(struct net_bridge *br, struct net_bridge_port *port, struct sk_buff *skb) @@ -1334,7 +1334,7 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br, br_multicast_leave_group(br, port, &br_group); } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) static void br_ip6_multicast_leave_group(struct net_bridge *br, struct net_bridge_port *port, const struct in6_addr *group) @@ -1344,7 +1344,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br, if (!ipv6_is_transient_multicast(group)) return; - br_group.u.ip6 = *group; + ipv6_addr_copy(&br_group.u.ip6, group); br_group.proto = htons(ETH_P_IPV6); br_multicast_leave_group(br, port, &br_group); @@ -1449,7 +1449,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br, return err; } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) static int br_multicast_ipv6_rcv(struct net_bridge *br, struct net_bridge_port *port, struct sk_buff *skb) @@ -1458,7 +1458,6 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br, const struct ipv6hdr *ip6h; u8 icmp6_type; u8 nexthdr; - __be16 frag_off; unsigned len; int offset; int err; @@ -1484,7 +1483,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br, return -EINVAL; nexthdr = ip6h->nexthdr; - offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr, &frag_off); + offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr); if (offset < 0 || nexthdr != IPPROTO_ICMPV6) return 0; @@ -1596,7 +1595,7 @@ int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, switch (skb->protocol) { case htons(ETH_P_IP): return br_multicast_ipv4_rcv(br, port, skb); -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case htons(ETH_P_IPV6): return br_multicast_ipv6_rcv(br, port, skb); #endif diff --git a/trunk/net/bridge/br_netfilter.c b/trunk/net/bridge/br_netfilter.c index 84122472656c..fa8b8f763580 100644 --- a/trunk/net/bridge/br_netfilter.c +++ b/trunk/net/bridge/br_netfilter.c @@ -362,7 +362,7 @@ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb) if (!skb->dev) goto free_skb; dst = skb_dst(skb); - neigh = dst_get_neighbour_noref(dst); + neigh = dst_get_neighbour(dst); if (neigh->hh.hh_len) { neigh_hh_bridge(&neigh->hh, skb); skb->dev = nf_bridge->physindev; @@ -813,7 +813,7 @@ static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff *skb, return NF_STOLEN; } -#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV4) +#if defined(CONFIG_NF_CONNTRACK_IPV4) || defined(CONFIG_NF_CONNTRACK_IPV4_MODULE) static int br_nf_dev_queue_xmit(struct sk_buff *skb) { int ret; diff --git a/trunk/net/bridge/br_private.h b/trunk/net/bridge/br_private.h index 0b67a63ad7a8..d7d6fb05411f 100644 --- a/trunk/net/bridge/br_private.h +++ b/trunk/net/bridge/br_private.h @@ -56,7 +56,7 @@ struct br_ip { union { __be32 ip4; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) struct in6_addr ip6; #endif } u; @@ -348,7 +348,6 @@ extern void br_fdb_fini(void); extern void br_fdb_flush(struct net_bridge *br); extern void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr); -extern void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr); extern void br_fdb_cleanup(unsigned long arg); extern void br_fdb_delete_by_port(struct net_bridge *br, const struct net_bridge_port *p, int do_all); @@ -388,8 +387,7 @@ extern int br_add_if(struct net_bridge *br, extern int br_del_if(struct net_bridge *br, struct net_device *dev); extern int br_min_mtu(const struct net_bridge *br); -extern netdev_features_t br_features_recompute(struct net_bridge *br, - netdev_features_t features); +extern u32 br_features_recompute(struct net_bridge *br, u32 features); /* br_input.c */ extern int br_handle_frame_finish(struct sk_buff *skb); @@ -537,7 +535,7 @@ extern void br_stp_port_timer_init(struct net_bridge_port *p); extern unsigned long br_timer_value(const struct timer_list *timer); /* br.c */ -#if IS_ENABLED(CONFIG_ATM_LANE) +#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE) extern int (*br_fdb_test_addr_hook)(struct net_device *dev, unsigned char *addr); #endif diff --git a/trunk/net/bridge/netfilter/ebt_ip6.c b/trunk/net/bridge/netfilter/ebt_ip6.c index 99c85668f551..2ed0056a39a8 100644 --- a/trunk/net/bridge/netfilter/ebt_ip6.c +++ b/trunk/net/bridge/netfilter/ebt_ip6.c @@ -55,10 +55,9 @@ ebt_ip6_mt(const struct sk_buff *skb, struct xt_action_param *par) return false; if (info->bitmask & EBT_IP6_PROTO) { uint8_t nexthdr = ih6->nexthdr; - __be16 frag_off; int offset_ph; - offset_ph = ipv6_skip_exthdr(skb, sizeof(_ip6h), &nexthdr, &frag_off); + offset_ph = ipv6_skip_exthdr(skb, sizeof(_ip6h), &nexthdr); if (offset_ph == -1) return false; if (FWINV(info->protocol != nexthdr, EBT_IP6_PROTO)) diff --git a/trunk/net/bridge/netfilter/ebt_log.c b/trunk/net/bridge/netfilter/ebt_log.c index f88ee537fb2b..6e5a8bb9b940 100644 --- a/trunk/net/bridge/netfilter/ebt_log.c +++ b/trunk/net/bridge/netfilter/ebt_log.c @@ -107,13 +107,12 @@ ebt_log_packet(u_int8_t pf, unsigned int hooknum, goto out; } -#if IS_ENABLED(CONFIG_BRIDGE_EBT_IP6) +#if defined(CONFIG_BRIDGE_EBT_IP6) || defined(CONFIG_BRIDGE_EBT_IP6_MODULE) if ((bitmask & EBT_LOG_IP6) && eth_hdr(skb)->h_proto == htons(ETH_P_IPV6)) { const struct ipv6hdr *ih; struct ipv6hdr _iph; uint8_t nexthdr; - __be16 frag_off; int offset_ph; ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph); @@ -124,7 +123,7 @@ ebt_log_packet(u_int8_t pf, unsigned int hooknum, printk(" IPv6 SRC=%pI6 IPv6 DST=%pI6, IPv6 priority=0x%01X, Next Header=%d", &ih->saddr, &ih->daddr, ih->priority, ih->nexthdr); nexthdr = ih->nexthdr; - offset_ph = ipv6_skip_exthdr(skb, sizeof(_iph), &nexthdr, &frag_off); + offset_ph = ipv6_skip_exthdr(skb, sizeof(_iph), &nexthdr); if (offset_ph == -1) goto out; print_ports(skb, nexthdr, offset_ph); diff --git a/trunk/net/caif/Kconfig b/trunk/net/caif/Kconfig index 936361e5a2b6..529750da9624 100644 --- a/trunk/net/caif/Kconfig +++ b/trunk/net/caif/Kconfig @@ -40,14 +40,3 @@ config CAIF_NETDEV If you select to build it as a built-in then the main CAIF device must also be a built-in. If unsure say Y. - -config CAIF_USB - tristate "CAIF USB support" - depends on CAIF - default n - ---help--- - Say Y if you are using CAIF over USB CDC NCM. - This can be either built-in or a loadable module, - If you select to build it as a built-in then the main CAIF device must - also be a built-in. - If unsure say N. diff --git a/trunk/net/caif/Makefile b/trunk/net/caif/Makefile index cc2b51154d03..ebcd4e7e6f47 100644 --- a/trunk/net/caif/Makefile +++ b/trunk/net/caif/Makefile @@ -10,6 +10,5 @@ caif-y := caif_dev.o \ obj-$(CONFIG_CAIF) += caif.o obj-$(CONFIG_CAIF_NETDEV) += chnl_net.o obj-$(CONFIG_CAIF) += caif_socket.o -obj-$(CONFIG_CAIF_USB) += caif_usb.o export-y := caif.o diff --git a/trunk/net/caif/caif_dev.c b/trunk/net/caif/caif_dev.c index b0ce14fbf6ef..f1fa1f6e658d 100644 --- a/trunk/net/caif/caif_dev.c +++ b/trunk/net/caif/caif_dev.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include #include @@ -25,7 +24,6 @@ #include #include #include -#include MODULE_LICENSE("GPL"); @@ -35,10 +33,6 @@ struct caif_device_entry { struct list_head list; struct net_device *netdev; int __percpu *pcpu_refcnt; - spinlock_t flow_lock; - struct sk_buff *xoff_skb; - void (*xoff_skb_dtor)(struct sk_buff *skb); - bool xoff; }; struct caif_device_entry_list { @@ -53,14 +47,13 @@ struct caif_net { }; static int caif_net_id; -static int q_high = 50; /* Percent */ struct cfcnfg *get_cfcnfg(struct net *net) { struct caif_net *caifn; + BUG_ON(!net); caifn = net_generic(net, caif_net_id); - if (!caifn) - return NULL; + BUG_ON(!caifn); return caifn->cfg; } EXPORT_SYMBOL(get_cfcnfg); @@ -68,9 +61,9 @@ EXPORT_SYMBOL(get_cfcnfg); static struct caif_device_entry_list *caif_device_list(struct net *net) { struct caif_net *caifn; + BUG_ON(!net); caifn = net_generic(net, caif_net_id); - if (!caifn) - return NULL; + BUG_ON(!caifn); return &caifn->caifdevs; } @@ -99,8 +92,7 @@ static struct caif_device_entry *caif_device_alloc(struct net_device *dev) struct caif_device_entry *caifd; caifdevs = caif_device_list(dev_net(dev)); - if (!caifdevs) - return NULL; + BUG_ON(!caifdevs); caifd = kzalloc(sizeof(*caifd), GFP_KERNEL); if (!caifd) @@ -120,9 +112,7 @@ static struct caif_device_entry *caif_get(struct net_device *dev) struct caif_device_entry_list *caifdevs = caif_device_list(dev_net(dev)); struct caif_device_entry *caifd; - if (!caifdevs) - return NULL; - + BUG_ON(!caifdevs); list_for_each_entry_rcu(caifd, &caifdevs->list, list) { if (caifd->netdev == dev) return caifd; @@ -130,106 +120,15 @@ static struct caif_device_entry *caif_get(struct net_device *dev) return NULL; } -void caif_flow_cb(struct sk_buff *skb) -{ - struct caif_device_entry *caifd; - void (*dtor)(struct sk_buff *skb) = NULL; - bool send_xoff; - - WARN_ON(skb->dev == NULL); - - rcu_read_lock(); - caifd = caif_get(skb->dev); - caifd_hold(caifd); - rcu_read_unlock(); - - spin_lock_bh(&caifd->flow_lock); - send_xoff = caifd->xoff; - caifd->xoff = 0; - if (!WARN_ON(caifd->xoff_skb_dtor == NULL)) { - WARN_ON(caifd->xoff_skb != skb); - dtor = caifd->xoff_skb_dtor; - caifd->xoff_skb = NULL; - caifd->xoff_skb_dtor = NULL; - } - spin_unlock_bh(&caifd->flow_lock); - - if (dtor) - dtor(skb); - - if (send_xoff) - caifd->layer.up-> - ctrlcmd(caifd->layer.up, - _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND, - caifd->layer.id); - caifd_put(caifd); -} - static int transmit(struct cflayer *layer, struct cfpkt *pkt) { - int err, high = 0, qlen = 0; - struct caif_dev_common *caifdev; + int err; struct caif_device_entry *caifd = container_of(layer, struct caif_device_entry, layer); struct sk_buff *skb; - struct netdev_queue *txq; - - rcu_read_lock_bh(); skb = cfpkt_tonative(pkt); skb->dev = caifd->netdev; - skb_reset_network_header(skb); - skb->protocol = htons(ETH_P_CAIF); - caifdev = netdev_priv(caifd->netdev); - - /* Check if we need to handle xoff */ - if (likely(caifd->netdev->tx_queue_len == 0)) - goto noxoff; - - if (unlikely(caifd->xoff)) - goto noxoff; - - if (likely(!netif_queue_stopped(caifd->netdev))) { - /* If we run with a TX queue, check if the queue is too long*/ - txq = netdev_get_tx_queue(skb->dev, 0); - qlen = qdisc_qlen(rcu_dereference_bh(txq->qdisc)); - - if (likely(qlen == 0)) - goto noxoff; - - high = (caifd->netdev->tx_queue_len * q_high) / 100; - if (likely(qlen < high)) - goto noxoff; - } - - /* Hold lock while accessing xoff */ - spin_lock_bh(&caifd->flow_lock); - if (caifd->xoff) { - spin_unlock_bh(&caifd->flow_lock); - goto noxoff; - } - - /* - * Handle flow off, we do this by temporary hi-jacking this - * skb's destructor function, and replace it with our own - * flow-on callback. The callback will set flow-on and call - * the original destructor. - */ - - pr_debug("queue has stopped(%d) or is full (%d > %d)\n", - netif_queue_stopped(caifd->netdev), - qlen, high); - caifd->xoff = 1; - caifd->xoff_skb = skb; - caifd->xoff_skb_dtor = skb->destructor; - skb->destructor = caif_flow_cb; - spin_unlock_bh(&caifd->flow_lock); - - caifd->layer.up->ctrlcmd(caifd->layer.up, - _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND, - caifd->layer.id); -noxoff: - rcu_read_unlock_bh(); err = dev_queue_xmit(skb); if (err > 0) @@ -273,10 +172,7 @@ static int receive(struct sk_buff *skb, struct net_device *dev, /* Release reference to stack upwards */ caifd_put(caifd); - - if (err != 0) - err = NET_RX_DROP; - return err; + return 0; } static struct packet_type caif_packet_type __read_mostly = { @@ -307,57 +203,6 @@ static void dev_flowctrl(struct net_device *dev, int on) caifd_put(caifd); } -void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, - struct cflayer *link_support, int head_room, - struct cflayer **layer, int (**rcv_func)( - struct sk_buff *, struct net_device *, - struct packet_type *, struct net_device *)) -{ - struct caif_device_entry *caifd; - enum cfcnfg_phy_preference pref; - struct cfcnfg *cfg = get_cfcnfg(dev_net(dev)); - struct caif_device_entry_list *caifdevs; - - caifdevs = caif_device_list(dev_net(dev)); - if (!cfg || !caifdevs) - return; - caifd = caif_device_alloc(dev); - if (!caifd) - return; - *layer = &caifd->layer; - spin_lock_init(&caifd->flow_lock); - - switch (caifdev->link_select) { - case CAIF_LINK_HIGH_BANDW: - pref = CFPHYPREF_HIGH_BW; - break; - case CAIF_LINK_LOW_LATENCY: - pref = CFPHYPREF_LOW_LAT; - break; - default: - pref = CFPHYPREF_HIGH_BW; - break; - } - mutex_lock(&caifdevs->lock); - list_add_rcu(&caifd->list, &caifdevs->list); - - strncpy(caifd->layer.name, dev->name, - sizeof(caifd->layer.name) - 1); - caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0; - caifd->layer.transmit = transmit; - cfcnfg_add_phy_layer(cfg, - dev, - &caifd->layer, - pref, - link_support, - caifdev->use_fcs, - head_room); - mutex_unlock(&caifdevs->lock); - if (rcv_func) - *rcv_func = receive; -} -EXPORT_SYMBOL(caif_enroll_dev); - /* notify Caif of device events */ static int caif_device_notify(struct notifier_block *me, unsigned long what, void *arg) @@ -365,40 +210,62 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what, struct net_device *dev = arg; struct caif_device_entry *caifd = NULL; struct caif_dev_common *caifdev; + enum cfcnfg_phy_preference pref; + enum cfcnfg_phy_type phy_type; struct cfcnfg *cfg; - struct cflayer *layer, *link_support; - int head_room = 0; struct caif_device_entry_list *caifdevs; - cfg = get_cfcnfg(dev_net(dev)); - caifdevs = caif_device_list(dev_net(dev)); - if (!cfg || !caifdevs) + if (dev->type != ARPHRD_CAIF) return 0; - caifd = caif_get(dev); - if (caifd == NULL && dev->type != ARPHRD_CAIF) + cfg = get_cfcnfg(dev_net(dev)); + if (cfg == NULL) return 0; + caifdevs = caif_device_list(dev_net(dev)); + switch (what) { case NETDEV_REGISTER: - if (caifd != NULL) - break; + caifd = caif_device_alloc(dev); + if (!caifd) + return 0; caifdev = netdev_priv(dev); + caifdev->flowctrl = dev_flowctrl; - link_support = NULL; - if (caifdev->use_frag) { - head_room = 1; - link_support = cfserl_create(dev->ifindex, - caifdev->use_stx); - if (!link_support) { - pr_warn("Out of memory\n"); - break; - } + caifd->layer.transmit = transmit; + + if (caifdev->use_frag) + phy_type = CFPHYTYPE_FRAG; + else + phy_type = CFPHYTYPE_CAIF; + + switch (caifdev->link_select) { + case CAIF_LINK_HIGH_BANDW: + pref = CFPHYPREF_HIGH_BW; + break; + case CAIF_LINK_LOW_LATENCY: + pref = CFPHYPREF_LOW_LAT; + break; + default: + pref = CFPHYPREF_HIGH_BW; + break; } - caif_enroll_dev(dev, caifdev, link_support, head_room, - &layer, NULL); - caifdev->flowctrl = dev_flowctrl; + strncpy(caifd->layer.name, dev->name, + sizeof(caifd->layer.name) - 1); + caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0; + + mutex_lock(&caifdevs->lock); + list_add_rcu(&caifd->list, &caifdevs->list); + + cfcnfg_add_phy_layer(cfg, + phy_type, + dev, + &caifd->layer, + pref, + caifdev->use_fcs, + caifdev->use_stx); + mutex_unlock(&caifdevs->lock); break; case NETDEV_UP: @@ -410,7 +277,6 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what, break; } - caifd->xoff = 0; cfcnfg_set_phy_state(cfg, &caifd->layer, true); rcu_read_unlock(); @@ -432,24 +298,6 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what, caifd->layer.up->ctrlcmd(caifd->layer.up, _CAIF_CTRLCMD_PHYIF_DOWN_IND, caifd->layer.id); - - spin_lock_bh(&caifd->flow_lock); - - /* - * Replace our xoff-destructor with original destructor. - * We trust that skb->destructor *always* is called before - * the skb reference is invalid. The hijacked SKB destructor - * takes the flow_lock so manipulating the skb->destructor here - * should be safe. - */ - if (caifd->xoff_skb_dtor != NULL && caifd->xoff_skb != NULL) - caifd->xoff_skb->destructor = caifd->xoff_skb_dtor; - - caifd->xoff = 0; - caifd->xoff_skb_dtor = NULL; - caifd->xoff_skb = NULL; - - spin_unlock_bh(&caifd->flow_lock); caifd_put(caifd); break; @@ -505,15 +353,15 @@ static struct notifier_block caif_device_notifier = { static int caif_init_net(struct net *net) { struct caif_net *caifn = net_generic(net, caif_net_id); - if (WARN_ON(!caifn)) - return -EINVAL; - + BUG_ON(!caifn); INIT_LIST_HEAD(&caifn->caifdevs.list); mutex_init(&caifn->caifdevs.lock); caifn->cfg = cfcnfg_create(); - if (!caifn->cfg) + if (!caifn->cfg) { + pr_warn("can't create cfcnfg\n"); return -ENOMEM; + } return 0; } @@ -523,14 +371,17 @@ static void caif_exit_net(struct net *net) struct caif_device_entry *caifd, *tmp; struct caif_device_entry_list *caifdevs = caif_device_list(net); - struct cfcnfg *cfg = get_cfcnfg(net); - - if (!cfg || !caifdevs) - return; + struct cfcnfg *cfg; rtnl_lock(); mutex_lock(&caifdevs->lock); + cfg = get_cfcnfg(net); + if (cfg == NULL) { + mutex_unlock(&caifdevs->lock); + return; + } + list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) { int i = 0; list_del_rcu(&caifd->list); diff --git a/trunk/net/caif/caif_usb.c b/trunk/net/caif/caif_usb.c deleted file mode 100644 index 5fc9eca8cd41..000000000000 --- a/trunk/net/caif/caif_usb.c +++ /dev/null @@ -1,208 +0,0 @@ -/* - * CAIF USB handler - * Copyright (C) ST-Ericsson AB 2011 - * Author: Sjur Brendeland/sjur.brandeland@stericsson.com - * License terms: GNU General Public License (GPL) version 2 - * - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -MODULE_LICENSE("GPL"); - -#define CFUSB_PAD_DESCR_SZ 1 /* Alignment descriptor length */ -#define CFUSB_ALIGNMENT 4 /* Number of bytes to align. */ -#define CFUSB_MAX_HEADLEN (CFUSB_PAD_DESCR_SZ + CFUSB_ALIGNMENT-1) -#define STE_USB_VID 0x04cc /* USB Product ID for ST-Ericsson */ -#define STE_USB_PID_CAIF 0x2306 /* Product id for CAIF Modems */ - -struct cfusbl { - struct cflayer layer; - u8 tx_eth_hdr[ETH_HLEN]; -}; - -static bool pack_added; - -static int cfusbl_receive(struct cflayer *layr, struct cfpkt *pkt) -{ - u8 hpad; - - /* Remove padding. */ - cfpkt_extr_head(pkt, &hpad, 1); - cfpkt_extr_head(pkt, NULL, hpad); - return layr->up->receive(layr->up, pkt); -} - -static int cfusbl_transmit(struct cflayer *layr, struct cfpkt *pkt) -{ - struct caif_payload_info *info; - u8 hpad; - u8 zeros[CFUSB_ALIGNMENT]; - struct sk_buff *skb; - struct cfusbl *usbl = container_of(layr, struct cfusbl, layer); - - skb = cfpkt_tonative(pkt); - - skb_reset_network_header(skb); - skb->protocol = htons(ETH_P_IP); - - info = cfpkt_info(pkt); - hpad = (info->hdr_len + CFUSB_PAD_DESCR_SZ) & (CFUSB_ALIGNMENT - 1); - - if (skb_headroom(skb) < ETH_HLEN + CFUSB_PAD_DESCR_SZ + hpad) { - pr_warn("Headroom to small\n"); - kfree_skb(skb); - return -EIO; - } - memset(zeros, 0, hpad); - - cfpkt_add_head(pkt, zeros, hpad); - cfpkt_add_head(pkt, &hpad, 1); - cfpkt_add_head(pkt, usbl->tx_eth_hdr, sizeof(usbl->tx_eth_hdr)); - return layr->dn->transmit(layr->dn, pkt); -} - -static void cfusbl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, - int phyid) -{ - if (layr->up && layr->up->ctrlcmd) - layr->up->ctrlcmd(layr->up, ctrl, layr->id); -} - -struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN], - u8 braddr[ETH_ALEN]) -{ - struct cfusbl *this = kmalloc(sizeof(struct cfusbl), GFP_ATOMIC); - - if (!this) { - pr_warn("Out of memory\n"); - return NULL; - } - caif_assert(offsetof(struct cfusbl, layer) == 0); - - memset(this, 0, sizeof(struct cflayer)); - this->layer.receive = cfusbl_receive; - this->layer.transmit = cfusbl_transmit; - this->layer.ctrlcmd = cfusbl_ctrlcmd; - snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "usb%d", phyid); - this->layer.id = phyid; - - /* - * Construct TX ethernet header: - * 0-5 destination address - * 5-11 source address - * 12-13 protocol type - */ - memcpy(&this->tx_eth_hdr[ETH_ALEN], braddr, ETH_ALEN); - memcpy(&this->tx_eth_hdr[ETH_ALEN], ethaddr, ETH_ALEN); - this->tx_eth_hdr[12] = cpu_to_be16(ETH_P_802_EX1) & 0xff; - this->tx_eth_hdr[13] = (cpu_to_be16(ETH_P_802_EX1) >> 8) & 0xff; - pr_debug("caif ethernet TX-header dst:%pM src:%pM type:%02x%02x\n", - this->tx_eth_hdr, this->tx_eth_hdr + ETH_ALEN, - this->tx_eth_hdr[12], this->tx_eth_hdr[13]); - - return (struct cflayer *) this; -} - -static struct packet_type caif_usb_type __read_mostly = { - .type = cpu_to_be16(ETH_P_802_EX1), -}; - -static int cfusbl_device_notify(struct notifier_block *me, unsigned long what, - void *arg) -{ - struct net_device *dev = arg; - struct caif_dev_common common; - struct cflayer *layer, *link_support; - struct usbnet *usbnet = netdev_priv(dev); - struct usb_device *usbdev = usbnet->udev; - struct ethtool_drvinfo drvinfo; - - /* - * Quirks: High-jack ethtool to find if we have a NCM device, - * and find it's VID/PID. - */ - if (dev->ethtool_ops == NULL || dev->ethtool_ops->get_drvinfo == NULL) - return 0; - - dev->ethtool_ops->get_drvinfo(dev, &drvinfo); - if (strncmp(drvinfo.driver, "cdc_ncm", 7) != 0) - return 0; - - pr_debug("USB CDC NCM device VID:0x%4x PID:0x%4x\n", - le16_to_cpu(usbdev->descriptor.idVendor), - le16_to_cpu(usbdev->descriptor.idProduct)); - - /* Check for VID/PID that supports CAIF */ - if (!(le16_to_cpu(usbdev->descriptor.idVendor) == STE_USB_VID && - le16_to_cpu(usbdev->descriptor.idProduct) == STE_USB_PID_CAIF)) - return 0; - - if (what == NETDEV_UNREGISTER) - module_put(THIS_MODULE); - - if (what != NETDEV_REGISTER) - return 0; - - __module_get(THIS_MODULE); - - memset(&common, 0, sizeof(common)); - common.use_frag = false; - common.use_fcs = false; - common.use_stx = false; - common.link_select = CAIF_LINK_HIGH_BANDW; - common.flowctrl = NULL; - - link_support = cfusbl_create(dev->ifindex, dev->dev_addr, - dev->broadcast); - - if (!link_support) - return -ENOMEM; - - if (dev->num_tx_queues > 1) - pr_warn("USB device uses more than one tx queue\n"); - - caif_enroll_dev(dev, &common, link_support, CFUSB_MAX_HEADLEN, - &layer, &caif_usb_type.func); - if (!pack_added) - dev_add_pack(&caif_usb_type); - pack_added = true; - - strncpy(layer->name, dev->name, - sizeof(layer->name) - 1); - layer->name[sizeof(layer->name) - 1] = 0; - - return 0; -} - -static struct notifier_block caif_device_notifier = { - .notifier_call = cfusbl_device_notify, - .priority = 0, -}; - -static int __init cfusbl_init(void) -{ - return register_netdevice_notifier(&caif_device_notifier); -} - -static void __exit cfusbl_exit(void) -{ - unregister_netdevice_notifier(&caif_device_notifier); - dev_remove_pack(&caif_usb_type); -} - -module_init(cfusbl_init); -module_exit(cfusbl_exit); diff --git a/trunk/net/caif/cfcnfg.c b/trunk/net/caif/cfcnfg.c index 598aafb4cb51..00523ecc4ced 100644 --- a/trunk/net/caif/cfcnfg.c +++ b/trunk/net/caif/cfcnfg.c @@ -45,8 +45,8 @@ struct cfcnfg_phyinfo { /* Interface index */ int ifindex; - /* Protocol head room added for CAIF link layer */ - int head_room; + /* Use Start of frame extension */ + bool use_stx; /* Use Start of frame checksum */ bool use_fcs; @@ -187,11 +187,11 @@ int caif_disconnect_client(struct net *net, struct cflayer *adap_layer) if (channel_id != 0) { struct cflayer *servl; servl = cfmuxl_remove_uplayer(cfg->mux, channel_id); - cfctrl_linkdown_req(cfg->ctrl, channel_id, adap_layer); if (servl != NULL) layer_set_up(servl, NULL); } else pr_debug("nothing to disconnect\n"); + cfctrl_linkdown_req(cfg->ctrl, channel_id, adap_layer); /* Do RCU sync before initiating cleanup */ synchronize_rcu(); @@ -350,7 +350,9 @@ int caif_connect_client(struct net *net, struct caif_connect_request *conn_req, *ifindex = phy->ifindex; *proto_tail = 2; - *proto_head = protohead[param.linktype] + phy->head_room; + *proto_head = + + protohead[param.linktype] + (phy->use_stx ? 1 : 0); rcu_read_unlock(); @@ -458,13 +460,13 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv, } void -cfcnfg_add_phy_layer(struct cfcnfg *cnfg, +cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type, struct net_device *dev, struct cflayer *phy_layer, enum cfcnfg_phy_preference pref, - struct cflayer *link_support, - bool fcs, int head_room) + bool fcs, bool stx) { struct cflayer *frml; + struct cflayer *phy_driver = NULL; struct cfcnfg_phyinfo *phyinfo = NULL; int i; u8 phyid; @@ -480,13 +482,26 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, goto got_phyid; } pr_warn("Too many CAIF Link Layers (max 6)\n"); - goto out; + goto out_err; got_phyid: phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC); if (!phyinfo) goto out_err; + switch (phy_type) { + case CFPHYTYPE_FRAG: + phy_driver = + cfserl_create(CFPHYTYPE_FRAG, phyid, stx); + if (!phy_driver) + goto out_err; + break; + case CFPHYTYPE_CAIF: + phy_driver = NULL; + break; + default: + goto out_err; + } phy_layer->id = phyid; phyinfo->pref = pref; phyinfo->id = phyid; @@ -494,7 +509,7 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, phyinfo->dev_info.dev = dev; phyinfo->phy_layer = phy_layer; phyinfo->ifindex = dev->ifindex; - phyinfo->head_room = head_room; + phyinfo->use_stx = stx; phyinfo->use_fcs = fcs; frml = cffrml_create(phyid, fcs); @@ -504,23 +519,23 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, phyinfo->frm_layer = frml; layer_set_up(frml, cnfg->mux); - if (link_support != NULL) { - link_support->id = phyid; - layer_set_dn(frml, link_support); - layer_set_up(link_support, frml); - layer_set_dn(link_support, phy_layer); - layer_set_up(phy_layer, link_support); + if (phy_driver != NULL) { + phy_driver->id = phyid; + layer_set_dn(frml, phy_driver); + layer_set_up(phy_driver, frml); + layer_set_dn(phy_driver, phy_layer); + layer_set_up(phy_layer, phy_driver); } else { layer_set_dn(frml, phy_layer); layer_set_up(phy_layer, frml); } list_add_rcu(&phyinfo->node, &cnfg->phys); -out: mutex_unlock(&cnfg->lock); return; out_err: + kfree(phy_driver); kfree(phyinfo); mutex_unlock(&cnfg->lock); } diff --git a/trunk/net/caif/cfpkt_skbuff.c b/trunk/net/caif/cfpkt_skbuff.c index e335ba859b97..df08c47183d4 100644 --- a/trunk/net/caif/cfpkt_skbuff.c +++ b/trunk/net/caif/cfpkt_skbuff.c @@ -63,6 +63,7 @@ static inline struct cfpkt *skb_to_pkt(struct sk_buff *skb) return (struct cfpkt *) skb; } + struct cfpkt *cfpkt_fromnative(enum caif_direction dir, void *nativepkt) { struct cfpkt *pkt = skb_to_pkt(nativepkt); @@ -104,12 +105,14 @@ void cfpkt_destroy(struct cfpkt *pkt) kfree_skb(skb); } + inline bool cfpkt_more(struct cfpkt *pkt) { struct sk_buff *skb = pkt_to_skb(pkt); return skb->len > 0; } + int cfpkt_peek_head(struct cfpkt *pkt, void *data, u16 len) { struct sk_buff *skb = pkt_to_skb(pkt); @@ -141,11 +144,9 @@ int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len) } from = skb_pull(skb, len); from -= len; - if (data) - memcpy(data, from, len); + memcpy(data, from, len); return 0; } -EXPORT_SYMBOL(cfpkt_extr_head); int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len) { @@ -169,11 +170,13 @@ int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len) return 0; } + int cfpkt_pad_trail(struct cfpkt *pkt, u16 len) { return cfpkt_add_body(pkt, NULL, len); } + int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len) { struct sk_buff *skb = pkt_to_skb(pkt); @@ -252,19 +255,21 @@ int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len) memcpy(to, data, len); return 0; } -EXPORT_SYMBOL(cfpkt_add_head); + inline int cfpkt_add_trail(struct cfpkt *pkt, const void *data, u16 len) { return cfpkt_add_body(pkt, data, len); } + inline u16 cfpkt_getlen(struct cfpkt *pkt) { struct sk_buff *skb = pkt_to_skb(pkt); return skb->len; } + inline u16 cfpkt_iterate(struct cfpkt *pkt, u16 (*iter_func)(u16, void *, u16), u16 data) @@ -282,6 +287,7 @@ inline u16 cfpkt_iterate(struct cfpkt *pkt, return iter_func(data, pkt->skb.data, cfpkt_getlen(pkt)); } + int cfpkt_setlen(struct cfpkt *pkt, u16 len) { struct sk_buff *skb = pkt_to_skb(pkt); @@ -393,4 +399,3 @@ struct caif_payload_info *cfpkt_info(struct cfpkt *pkt) { return (struct caif_payload_info *)&pkt_to_skb(pkt)->cb; } -EXPORT_SYMBOL(cfpkt_info); diff --git a/trunk/net/caif/cfrfml.c b/trunk/net/caif/cfrfml.c index 6dc75d4f8d94..81660f809713 100644 --- a/trunk/net/caif/cfrfml.c +++ b/trunk/net/caif/cfrfml.c @@ -190,7 +190,7 @@ static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt) static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt) { - caif_assert(cfpkt_getlen(pkt) < rfml->fragment_size + RFM_HEAD_SIZE); + caif_assert(cfpkt_getlen(pkt) < rfml->fragment_size); /* Add info for MUX-layer to route the packet out. */ cfpkt_info(pkt)->channel_id = rfml->serv.layer.id; diff --git a/trunk/net/caif/cfserl.c b/trunk/net/caif/cfserl.c index 8e68b97f13ee..797c8d165993 100644 --- a/trunk/net/caif/cfserl.c +++ b/trunk/net/caif/cfserl.c @@ -31,7 +31,7 @@ static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt); static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, int phyid); -struct cflayer *cfserl_create(int instance, bool use_stx) +struct cflayer *cfserl_create(int type, int instance, bool use_stx) { struct cfserl *this = kzalloc(sizeof(struct cfserl), GFP_ATOMIC); if (!this) @@ -40,6 +40,7 @@ struct cflayer *cfserl_create(int instance, bool use_stx) this->layer.receive = cfserl_receive; this->layer.transmit = cfserl_transmit; this->layer.ctrlcmd = cfserl_ctrlcmd; + this->layer.type = type; this->usestx = use_stx; spin_lock_init(&this->sync); snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "ser1"); diff --git a/trunk/net/core/Makefile b/trunk/net/core/Makefile index 674641b13aea..0d357b1c4e57 100644 --- a/trunk/net/core/Makefile +++ b/trunk/net/core/Makefile @@ -3,13 +3,12 @@ # obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \ - gen_stats.o gen_estimator.o net_namespace.o secure_seq.o flow_dissector.o + gen_stats.o gen_estimator.o net_namespace.o secure_seq.o obj-$(CONFIG_SYSCTL) += sysctl_net_core.o obj-y += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \ - neighbour.o rtnetlink.o utils.o link_watch.o filter.o \ - sock_diag.o + neighbour.o rtnetlink.o utils.o link_watch.o filter.o obj-$(CONFIG_XFRM) += flow.o obj-y += net-sysfs.o @@ -20,4 +19,3 @@ obj-$(CONFIG_FIB_RULES) += fib_rules.o obj-$(CONFIG_TRACEPOINTS) += net-traces.o obj-$(CONFIG_NET_DROP_MONITOR) += drop_monitor.o obj-$(CONFIG_NETWORK_PHY_TIMESTAMPING) += timestamping.o -obj-$(CONFIG_NETPRIO_CGROUP) += netprio_cgroup.o diff --git a/trunk/net/core/dev.c b/trunk/net/core/dev.c index f494675471a9..5a13edfc9f73 100644 --- a/trunk/net/core/dev.c +++ b/trunk/net/core/dev.c @@ -133,9 +133,10 @@ #include #include #include +#include +#include +#include #include -#include -#include #include "net-sysfs.h" @@ -1319,6 +1320,8 @@ EXPORT_SYMBOL(dev_close); */ void dev_disable_lro(struct net_device *dev) { + u32 flags; + /* * If we're trying to disable lro on a vlan device * use the underlying physical device instead @@ -1326,9 +1329,15 @@ void dev_disable_lro(struct net_device *dev) if (is_vlan_dev(dev)) dev = vlan_dev_real_dev(dev); - dev->wanted_features &= ~NETIF_F_LRO; - netdev_update_features(dev); + if (dev->ethtool_ops && dev->ethtool_ops->get_flags) + flags = dev->ethtool_ops->get_flags(dev); + else + flags = ethtool_op_get_flags(dev); + if (!(flags & ETH_FLAG_LRO)) + return; + + __ethtool_set_flags(dev, flags & ~ETH_FLAG_LRO); if (unlikely(dev->features & NETIF_F_LRO)) netdev_WARN(dev, "failed to disable LRO!\n"); } @@ -1441,55 +1450,34 @@ int call_netdevice_notifiers(unsigned long val, struct net_device *dev) } EXPORT_SYMBOL(call_netdevice_notifiers); -static struct jump_label_key netstamp_needed __read_mostly; -#ifdef HAVE_JUMP_LABEL -/* We are not allowed to call jump_label_dec() from irq context - * If net_disable_timestamp() is called from irq context, defer the - * jump_label_dec() calls. - */ -static atomic_t netstamp_needed_deferred; -#endif +/* When > 0 there are consumers of rx skb time stamps */ +static atomic_t netstamp_needed = ATOMIC_INIT(0); void net_enable_timestamp(void) { -#ifdef HAVE_JUMP_LABEL - int deferred = atomic_xchg(&netstamp_needed_deferred, 0); - - if (deferred) { - while (--deferred) - jump_label_dec(&netstamp_needed); - return; - } -#endif - WARN_ON(in_interrupt()); - jump_label_inc(&netstamp_needed); + atomic_inc(&netstamp_needed); } EXPORT_SYMBOL(net_enable_timestamp); void net_disable_timestamp(void) { -#ifdef HAVE_JUMP_LABEL - if (in_interrupt()) { - atomic_inc(&netstamp_needed_deferred); - return; - } -#endif - jump_label_dec(&netstamp_needed); + atomic_dec(&netstamp_needed); } EXPORT_SYMBOL(net_disable_timestamp); static inline void net_timestamp_set(struct sk_buff *skb) { - skb->tstamp.tv64 = 0; - if (static_branch(&netstamp_needed)) + if (atomic_read(&netstamp_needed)) __net_timestamp(skb); + else + skb->tstamp.tv64 = 0; } -#define net_timestamp_check(COND, SKB) \ - if (static_branch(&netstamp_needed)) { \ - if ((COND) && !(SKB)->tstamp.tv64) \ - __net_timestamp(SKB); \ - } \ +static inline void net_timestamp_check(struct sk_buff *skb) +{ + if (!skb->tstamp.tv64 && atomic_read(&netstamp_needed)) + __net_timestamp(skb); +} static int net_hwtstamp_validate(struct ifreq *ifr) { @@ -1936,8 +1924,7 @@ EXPORT_SYMBOL(skb_checksum_help); * It may return NULL if the skb requires no segmentation. This is * only possible when GSO is used for verifying header integrity. */ -struct sk_buff *skb_gso_segment(struct sk_buff *skb, - netdev_features_t features) +struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features) { struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); struct packet_type *ptype; @@ -1967,9 +1954,9 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) dev->ethtool_ops->get_drvinfo(dev, &info); - WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d ip_summed=%d\n", - info.driver, dev ? &dev->features : NULL, - skb->sk ? &skb->sk->sk_route_caps : NULL, + WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d ip_summed=%d\n", + info.driver, dev ? dev->features : 0L, + skb->sk ? skb->sk->sk_route_caps : 0L, skb->len, skb->data_len, skb->ip_summed); if (skb_header_cloned(skb) && @@ -2078,7 +2065,7 @@ static void dev_gso_skb_destructor(struct sk_buff *skb) * This function segments the given skb and stores the list of segments * in skb->next. */ -static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features) +static int dev_gso_segment(struct sk_buff *skb, int features) { struct sk_buff *segs; @@ -2117,7 +2104,7 @@ static inline void skb_orphan_try(struct sk_buff *skb) } } -static bool can_checksum_protocol(netdev_features_t features, __be16 protocol) +static bool can_checksum_protocol(unsigned long features, __be16 protocol) { return ((features & NETIF_F_GEN_CSUM) || ((features & NETIF_F_V4_CSUM) && @@ -2128,8 +2115,7 @@ static bool can_checksum_protocol(netdev_features_t features, __be16 protocol) protocol == htons(ETH_P_FCOE))); } -static netdev_features_t harmonize_features(struct sk_buff *skb, - __be16 protocol, netdev_features_t features) +static u32 harmonize_features(struct sk_buff *skb, __be16 protocol, u32 features) { if (!can_checksum_protocol(features, protocol)) { features &= ~NETIF_F_ALL_CSUM; @@ -2141,10 +2127,10 @@ static netdev_features_t harmonize_features(struct sk_buff *skb, return features; } -netdev_features_t netif_skb_features(struct sk_buff *skb) +u32 netif_skb_features(struct sk_buff *skb) { __be16 protocol = skb->protocol; - netdev_features_t features = skb->dev->features; + u32 features = skb->dev->features; if (protocol == htons(ETH_P_8021Q)) { struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; @@ -2190,7 +2176,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, unsigned int skb_len; if (likely(!skb->next)) { - netdev_features_t features; + u32 features; /* * If device doesn't need skb->dst, release it right now while @@ -2271,7 +2257,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, return rc; } txq_trans_update(txq); - if (unlikely(netif_xmit_stopped(txq) && skb->next)) + if (unlikely(netif_tx_queue_stopped(txq) && skb->next)) return NETDEV_TX_BUSY; } while (skb->next); @@ -2471,18 +2457,6 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, return rc; } -#if IS_ENABLED(CONFIG_NETPRIO_CGROUP) -static void skb_update_prio(struct sk_buff *skb) -{ - struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap); - - if ((!skb->priority) && (skb->sk) && map) - skb->priority = map->priomap[skb->sk->sk_cgrp_prioidx]; -} -#else -#define skb_update_prio(skb) -#endif - static DEFINE_PER_CPU(int, xmit_recursion); #define RECURSION_LIMIT 10 @@ -2523,8 +2497,6 @@ int dev_queue_xmit(struct sk_buff *skb) */ rcu_read_lock_bh(); - skb_update_prio(skb); - txq = dev_pick_tx(dev, skb); q = rcu_dereference_bh(txq->qdisc); @@ -2559,7 +2531,7 @@ int dev_queue_xmit(struct sk_buff *skb) HARD_TX_LOCK(dev, txq, cpu); - if (!netif_xmit_stopped(txq)) { + if (!netif_tx_queue_stopped(txq)) { __this_cpu_inc(xmit_recursion); rc = dev_hard_start_xmit(skb, dev, txq); __this_cpu_dec(xmit_recursion); @@ -2620,28 +2592,123 @@ static inline void ____napi_schedule(struct softnet_data *sd, */ void __skb_get_rxhash(struct sk_buff *skb) { - struct flow_keys keys; - u32 hash; + int nhoff, hash = 0, poff; + const struct ipv6hdr *ip6; + const struct iphdr *ip; + const struct vlan_hdr *vlan; + u8 ip_proto; + u32 addr1, addr2; + u16 proto; + union { + u32 v32; + u16 v16[2]; + } ports; + + nhoff = skb_network_offset(skb); + proto = skb->protocol; + +again: + switch (proto) { + case __constant_htons(ETH_P_IP): +ip: + if (!pskb_may_pull(skb, sizeof(*ip) + nhoff)) + goto done; - if (!skb_flow_dissect(skb, &keys)) - return; + ip = (const struct iphdr *) (skb->data + nhoff); + if (ip_is_fragment(ip)) + ip_proto = 0; + else + ip_proto = ip->protocol; + addr1 = (__force u32) ip->saddr; + addr2 = (__force u32) ip->daddr; + nhoff += ip->ihl * 4; + break; + case __constant_htons(ETH_P_IPV6): +ipv6: + if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff)) + goto done; + + ip6 = (const struct ipv6hdr *) (skb->data + nhoff); + ip_proto = ip6->nexthdr; + addr1 = (__force u32) ip6->saddr.s6_addr32[3]; + addr2 = (__force u32) ip6->daddr.s6_addr32[3]; + nhoff += 40; + break; + case __constant_htons(ETH_P_8021Q): + if (!pskb_may_pull(skb, sizeof(*vlan) + nhoff)) + goto done; + vlan = (const struct vlan_hdr *) (skb->data + nhoff); + proto = vlan->h_vlan_encapsulated_proto; + nhoff += sizeof(*vlan); + goto again; + case __constant_htons(ETH_P_PPP_SES): + if (!pskb_may_pull(skb, PPPOE_SES_HLEN + nhoff)) + goto done; + proto = *((__be16 *) (skb->data + nhoff + + sizeof(struct pppoe_hdr))); + nhoff += PPPOE_SES_HLEN; + switch (proto) { + case __constant_htons(PPP_IP): + goto ip; + case __constant_htons(PPP_IPV6): + goto ipv6; + default: + goto done; + } + default: + goto done; + } - if (keys.ports) { - if ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]) - swap(keys.port16[0], keys.port16[1]); - skb->l4_rxhash = 1; + switch (ip_proto) { + case IPPROTO_GRE: + if (pskb_may_pull(skb, nhoff + 16)) { + u8 *h = skb->data + nhoff; + __be16 flags = *(__be16 *)h; + + /* + * Only look inside GRE if version zero and no + * routing + */ + if (!(flags & (GRE_VERSION|GRE_ROUTING))) { + proto = *(__be16 *)(h + 2); + nhoff += 4; + if (flags & GRE_CSUM) + nhoff += 4; + if (flags & GRE_KEY) + nhoff += 4; + if (flags & GRE_SEQ) + nhoff += 4; + goto again; + } + } + break; + case IPPROTO_IPIP: + goto again; + default: + break; + } + + ports.v32 = 0; + poff = proto_ports_offset(ip_proto); + if (poff >= 0) { + nhoff += poff; + if (pskb_may_pull(skb, nhoff + 4)) { + ports.v32 = * (__force u32 *) (skb->data + nhoff); + if (ports.v16[1] < ports.v16[0]) + swap(ports.v16[0], ports.v16[1]); + skb->l4_rxhash = 1; + } } /* get a consistent hash (same value on both flow directions) */ - if ((__force u32)keys.dst < (__force u32)keys.src) - swap(keys.dst, keys.src); + if (addr2 < addr1) + swap(addr1, addr2); - hash = jhash_3words((__force u32)keys.dst, - (__force u32)keys.src, - (__force u32)keys.ports, hashrnd); + hash = jhash_3words(addr1, addr2, ports.v32, hashrnd); if (!hash) hash = 1; +done: skb->rxhash = hash; } EXPORT_SYMBOL(__skb_get_rxhash); @@ -2652,8 +2719,6 @@ EXPORT_SYMBOL(__skb_get_rxhash); struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly; EXPORT_SYMBOL(rps_sock_flow_table); -struct jump_label_key rps_needed __read_mostly; - static struct rps_dev_flow * set_rps_cpu(struct net_device *dev, struct sk_buff *skb, struct rps_dev_flow *rflow, u16 next_cpu) @@ -2933,11 +2998,12 @@ int netif_rx(struct sk_buff *skb) if (netpoll_rx(skb)) return NET_RX_DROP; - net_timestamp_check(netdev_tstamp_prequeue, skb); + if (netdev_tstamp_prequeue) + net_timestamp_check(skb); trace_netif_rx(skb); #ifdef CONFIG_RPS - if (static_branch(&rps_needed)) { + { struct rps_dev_flow voidflow, *rflow = &voidflow; int cpu; @@ -2952,13 +3018,14 @@ int netif_rx(struct sk_buff *skb) rcu_read_unlock(); preempt_enable(); - } else -#endif + } +#else { unsigned int qtail; ret = enqueue_to_backlog(skb, get_cpu(), &qtail); put_cpu(); } +#endif return ret; } EXPORT_SYMBOL(netif_rx); @@ -3164,7 +3231,8 @@ static int __netif_receive_skb(struct sk_buff *skb) int ret = NET_RX_DROP; __be16 type; - net_timestamp_check(!netdev_tstamp_prequeue, skb); + if (!netdev_tstamp_prequeue) + net_timestamp_check(skb); trace_netif_receive_skb(skb); @@ -3295,13 +3363,14 @@ static int __netif_receive_skb(struct sk_buff *skb) */ int netif_receive_skb(struct sk_buff *skb) { - net_timestamp_check(netdev_tstamp_prequeue, skb); + if (netdev_tstamp_prequeue) + net_timestamp_check(skb); if (skb_defer_rx_timestamp(skb)) return NET_RX_SUCCESS; #ifdef CONFIG_RPS - if (static_branch(&rps_needed)) { + { struct rps_dev_flow voidflow, *rflow = &voidflow; int cpu, ret; @@ -3312,12 +3381,16 @@ int netif_receive_skb(struct sk_buff *skb) if (cpu >= 0) { ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); rcu_read_unlock(); - return ret; + } else { + rcu_read_unlock(); + ret = __netif_receive_skb(skb); } - rcu_read_unlock(); + + return ret; } -#endif +#else return __netif_receive_skb(skb); +#endif } EXPORT_SYMBOL(netif_receive_skb); @@ -4466,7 +4539,7 @@ static void dev_change_rx_flags(struct net_device *dev, int flags) static int __dev_set_promiscuity(struct net_device *dev, int inc) { - unsigned int old_flags = dev->flags; + unsigned short old_flags = dev->flags; uid_t uid; gid_t gid; @@ -4523,7 +4596,7 @@ static int __dev_set_promiscuity(struct net_device *dev, int inc) */ int dev_set_promiscuity(struct net_device *dev, int inc) { - unsigned int old_flags = dev->flags; + unsigned short old_flags = dev->flags; int err; err = __dev_set_promiscuity(dev, inc); @@ -4550,7 +4623,7 @@ EXPORT_SYMBOL(dev_set_promiscuity); int dev_set_allmulti(struct net_device *dev, int inc) { - unsigned int old_flags = dev->flags; + unsigned short old_flags = dev->flags; ASSERT_RTNL(); @@ -4653,7 +4726,7 @@ EXPORT_SYMBOL(dev_get_flags); int __dev_change_flags(struct net_device *dev, unsigned int flags) { - unsigned int old_flags = dev->flags; + int old_flags = dev->flags; int ret; ASSERT_RTNL(); @@ -4736,10 +4809,10 @@ void __dev_notify_flags(struct net_device *dev, unsigned int old_flags) * Change settings on device based state flags. The flags are * in the userspace exported format. */ -int dev_change_flags(struct net_device *dev, unsigned int flags) +int dev_change_flags(struct net_device *dev, unsigned flags) { - int ret; - unsigned int changes, old_flags = dev->flags; + int ret, changes; + int old_flags = dev->flags; ret = __dev_change_flags(dev, flags); if (ret < 0) @@ -5296,8 +5369,7 @@ static void rollback_registered(struct net_device *dev) list_del(&single); } -static netdev_features_t netdev_fix_features(struct net_device *dev, - netdev_features_t features) +static u32 netdev_fix_features(struct net_device *dev, u32 features) { /* Fix illegal checksum combinations */ if ((features & NETIF_F_HW_CSUM) && @@ -5306,6 +5378,12 @@ static netdev_features_t netdev_fix_features(struct net_device *dev, features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); } + if ((features & NETIF_F_NO_CSUM) && + (features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { + netdev_warn(dev, "mixed no checksumming and other settings.\n"); + features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM); + } + /* Fix illegal SG+CSUM combinations. */ if ((features & NETIF_F_SG) && !(features & NETIF_F_ALL_CSUM)) { @@ -5353,7 +5431,7 @@ static netdev_features_t netdev_fix_features(struct net_device *dev, int __netdev_update_features(struct net_device *dev) { - netdev_features_t features; + u32 features; int err = 0; ASSERT_RTNL(); @@ -5369,16 +5447,16 @@ int __netdev_update_features(struct net_device *dev) if (dev->features == features) return 0; - netdev_dbg(dev, "Features changed: %pNF -> %pNF\n", - &dev->features, &features); + netdev_dbg(dev, "Features changed: 0x%08x -> 0x%08x\n", + dev->features, features); if (dev->netdev_ops->ndo_set_features) err = dev->netdev_ops->ndo_set_features(dev, features); if (unlikely(err < 0)) { netdev_err(dev, - "set_features() failed (%d); wanted %pNF, left %pNF\n", - err, &features, &dev->features); + "set_features() failed (%d); wanted 0x%08x, left 0x%08x\n", + err, features, dev->features); return -1; } @@ -5477,9 +5555,6 @@ static void netdev_init_one_queue(struct net_device *dev, queue->xmit_lock_owner = -1; netdev_queue_numa_node_write(queue, NUMA_NO_NODE); queue->dev = dev; -#ifdef CONFIG_BQL - dql_init(&queue->dql, HZ); -#endif } static int netif_alloc_netdev_queues(struct net_device *dev) @@ -5565,12 +5640,11 @@ int register_netdevice(struct net_device *dev) dev->wanted_features = dev->features & dev->hw_features; /* Turn on no cache copy if HW is doing checksum */ - if (!(dev->flags & IFF_LOOPBACK)) { - dev->hw_features |= NETIF_F_NOCACHE_COPY; - if (dev->features & NETIF_F_ALL_CSUM) { - dev->wanted_features |= NETIF_F_NOCACHE_COPY; - dev->features |= NETIF_F_NOCACHE_COPY; - } + dev->hw_features |= NETIF_F_NOCACHE_COPY; + if ((dev->features & NETIF_F_ALL_CSUM) && + !(dev->features & NETIF_F_NO_CSUM)) { + dev->wanted_features |= NETIF_F_NOCACHE_COPY; + dev->features |= NETIF_F_NOCACHE_COPY; } /* Make NETIF_F_HIGHDMA inheritable to VLAN devices. @@ -6306,8 +6380,7 @@ static int dev_cpu_callback(struct notifier_block *nfb, * @one to the master device with current feature set @all. Will not * enable anything that is off in @mask. Returns the new feature set. */ -netdev_features_t netdev_increment_features(netdev_features_t all, - netdev_features_t one, netdev_features_t mask) +u32 netdev_increment_features(u32 all, u32 one, u32 mask) { if (mask & NETIF_F_GEN_CSUM) mask |= NETIF_F_ALL_CSUM; @@ -6316,6 +6389,10 @@ netdev_features_t netdev_increment_features(netdev_features_t all, all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask; all &= one | ~NETIF_F_ALL_FOR_ALL; + /* If device needs checksumming, downgrade to it. */ + if (all & (NETIF_F_ALL_CSUM & ~NETIF_F_NO_CSUM)) + all &= ~NETIF_F_NO_CSUM; + /* If one device supports hw checksumming, set for all. */ if (all & NETIF_F_GEN_CSUM) all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM); diff --git a/trunk/net/core/dst.c b/trunk/net/core/dst.c index 43d94cedbf7c..d5e2c4c09107 100644 --- a/trunk/net/core/dst.c +++ b/trunk/net/core/dst.c @@ -366,7 +366,7 @@ static void dst_ifdown(struct dst_entry *dst, struct net_device *dev, dev_hold(dst->dev); dev_put(dev); rcu_read_lock(); - neigh = dst_get_neighbour_noref(dst); + neigh = dst_get_neighbour(dst); if (neigh && neigh->dev == dev) { neigh->dev = dst->dev; dev_hold(dst->dev); diff --git a/trunk/net/core/ethtool.c b/trunk/net/core/ethtool.c index 921aa2b4b415..f44481707124 100644 --- a/trunk/net/core/ethtool.c +++ b/trunk/net/core/ethtool.c @@ -36,44 +36,235 @@ u32 ethtool_op_get_link(struct net_device *dev) } EXPORT_SYMBOL(ethtool_op_get_link); +u32 ethtool_op_get_tx_csum(struct net_device *dev) +{ + return (dev->features & NETIF_F_ALL_CSUM) != 0; +} +EXPORT_SYMBOL(ethtool_op_get_tx_csum); + +int ethtool_op_set_tx_csum(struct net_device *dev, u32 data) +{ + if (data) + dev->features |= NETIF_F_IP_CSUM; + else + dev->features &= ~NETIF_F_IP_CSUM; + + return 0; +} +EXPORT_SYMBOL(ethtool_op_set_tx_csum); + +int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data) +{ + if (data) + dev->features |= NETIF_F_HW_CSUM; + else + dev->features &= ~NETIF_F_HW_CSUM; + + return 0; +} +EXPORT_SYMBOL(ethtool_op_set_tx_hw_csum); + +int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data) +{ + if (data) + dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + else + dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); + + return 0; +} +EXPORT_SYMBOL(ethtool_op_set_tx_ipv6_csum); + +u32 ethtool_op_get_sg(struct net_device *dev) +{ + return (dev->features & NETIF_F_SG) != 0; +} +EXPORT_SYMBOL(ethtool_op_get_sg); + +int ethtool_op_set_sg(struct net_device *dev, u32 data) +{ + if (data) + dev->features |= NETIF_F_SG; + else + dev->features &= ~NETIF_F_SG; + + return 0; +} +EXPORT_SYMBOL(ethtool_op_set_sg); + +u32 ethtool_op_get_tso(struct net_device *dev) +{ + return (dev->features & NETIF_F_TSO) != 0; +} +EXPORT_SYMBOL(ethtool_op_get_tso); + +int ethtool_op_set_tso(struct net_device *dev, u32 data) +{ + if (data) + dev->features |= NETIF_F_TSO; + else + dev->features &= ~NETIF_F_TSO; + + return 0; +} +EXPORT_SYMBOL(ethtool_op_set_tso); + +u32 ethtool_op_get_ufo(struct net_device *dev) +{ + return (dev->features & NETIF_F_UFO) != 0; +} +EXPORT_SYMBOL(ethtool_op_get_ufo); + +int ethtool_op_set_ufo(struct net_device *dev, u32 data) +{ + if (data) + dev->features |= NETIF_F_UFO; + else + dev->features &= ~NETIF_F_UFO; + return 0; +} +EXPORT_SYMBOL(ethtool_op_set_ufo); + +/* the following list of flags are the same as their associated + * NETIF_F_xxx values in include/linux/netdevice.h + */ +static const u32 flags_dup_features = + (ETH_FLAG_LRO | ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN | ETH_FLAG_NTUPLE | + ETH_FLAG_RXHASH); + +u32 ethtool_op_get_flags(struct net_device *dev) +{ + /* in the future, this function will probably contain additional + * handling for flags which are not so easily handled + * by a simple masking operation + */ + + return dev->features & flags_dup_features; +} +EXPORT_SYMBOL(ethtool_op_get_flags); + +/* Check if device can enable (or disable) particular feature coded in "data" + * argument. Flags "supported" describe features that can be toggled by device. + * If feature can not be toggled, it state (enabled or disabled) must match + * hardcoded device features state, otherwise flags are marked as invalid. + */ +bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported) +{ + u32 features = dev->features & flags_dup_features; + /* "data" can contain only flags_dup_features bits, + * see __ethtool_set_flags */ + + return (features & ~supported) != (data & ~supported); +} +EXPORT_SYMBOL(ethtool_invalid_flags); + +int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported) +{ + if (ethtool_invalid_flags(dev, data, supported)) + return -EINVAL; + + dev->features = ((dev->features & ~flags_dup_features) | + (data & flags_dup_features)); + return 0; +} +EXPORT_SYMBOL(ethtool_op_set_flags); + /* Handlers for each ethtool command */ -#define ETHTOOL_DEV_FEATURE_WORDS ((NETDEV_FEATURE_COUNT + 31) / 32) - -static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] = { - [NETIF_F_SG_BIT] = "tx-scatter-gather", - [NETIF_F_IP_CSUM_BIT] = "tx-checksum-ipv4", - [NETIF_F_HW_CSUM_BIT] = "tx-checksum-ip-generic", - [NETIF_F_IPV6_CSUM_BIT] = "tx-checksum-ipv6", - [NETIF_F_HIGHDMA_BIT] = "highdma", - [NETIF_F_FRAGLIST_BIT] = "tx-scatter-gather-fraglist", - [NETIF_F_HW_VLAN_TX_BIT] = "tx-vlan-hw-insert", - - [NETIF_F_HW_VLAN_RX_BIT] = "rx-vlan-hw-parse", - [NETIF_F_HW_VLAN_FILTER_BIT] = "rx-vlan-filter", - [NETIF_F_VLAN_CHALLENGED_BIT] = "vlan-challenged", - [NETIF_F_GSO_BIT] = "tx-generic-segmentation", - [NETIF_F_LLTX_BIT] = "tx-lockless", - [NETIF_F_NETNS_LOCAL_BIT] = "netns-local", - [NETIF_F_GRO_BIT] = "rx-gro", - [NETIF_F_LRO_BIT] = "rx-lro", - - [NETIF_F_TSO_BIT] = "tx-tcp-segmentation", - [NETIF_F_UFO_BIT] = "tx-udp-fragmentation", - [NETIF_F_GSO_ROBUST_BIT] = "tx-gso-robust", - [NETIF_F_TSO_ECN_BIT] = "tx-tcp-ecn-segmentation", - [NETIF_F_TSO6_BIT] = "tx-tcp6-segmentation", - [NETIF_F_FSO_BIT] = "tx-fcoe-segmentation", - - [NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc", - [NETIF_F_SCTP_CSUM_BIT] = "tx-checksum-sctp", - [NETIF_F_FCOE_MTU_BIT] = "fcoe-mtu", - [NETIF_F_NTUPLE_BIT] = "rx-ntuple-filter", - [NETIF_F_RXHASH_BIT] = "rx-hashing", - [NETIF_F_RXCSUM_BIT] = "rx-checksum", - [NETIF_F_NOCACHE_COPY_BIT] = "tx-nocache-copy", - [NETIF_F_LOOPBACK_BIT] = "loopback", -}; +#define ETHTOOL_DEV_FEATURE_WORDS 1 + +static void ethtool_get_features_compat(struct net_device *dev, + struct ethtool_get_features_block *features) +{ + if (!dev->ethtool_ops) + return; + + /* getting RX checksum */ + if (dev->ethtool_ops->get_rx_csum) + if (dev->ethtool_ops->get_rx_csum(dev)) + features[0].active |= NETIF_F_RXCSUM; + + /* mark legacy-changeable features */ + if (dev->ethtool_ops->set_sg) + features[0].available |= NETIF_F_SG; + if (dev->ethtool_ops->set_tx_csum) + features[0].available |= NETIF_F_ALL_CSUM; + if (dev->ethtool_ops->set_tso) + features[0].available |= NETIF_F_ALL_TSO; + if (dev->ethtool_ops->set_rx_csum) + features[0].available |= NETIF_F_RXCSUM; + if (dev->ethtool_ops->set_flags) + features[0].available |= flags_dup_features; +} + +static int ethtool_set_feature_compat(struct net_device *dev, + int (*legacy_set)(struct net_device *, u32), + struct ethtool_set_features_block *features, u32 mask) +{ + u32 do_set; + + if (!legacy_set) + return 0; + + if (!(features[0].valid & mask)) + return 0; + + features[0].valid &= ~mask; + + do_set = !!(features[0].requested & mask); + + if (legacy_set(dev, do_set) < 0) + netdev_info(dev, + "Legacy feature change (%s) failed for 0x%08x\n", + do_set ? "set" : "clear", mask); + + return 1; +} + +static int ethtool_set_flags_compat(struct net_device *dev, + int (*legacy_set)(struct net_device *, u32), + struct ethtool_set_features_block *features, u32 mask) +{ + u32 value; + + if (!legacy_set) + return 0; + + if (!(features[0].valid & mask)) + return 0; + + value = dev->features & ~features[0].valid; + value |= features[0].requested; + + features[0].valid &= ~mask; + + if (legacy_set(dev, value & mask) < 0) + netdev_info(dev, "Legacy flags change failed\n"); + + return 1; +} + +static int ethtool_set_features_compat(struct net_device *dev, + struct ethtool_set_features_block *features) +{ + int compat; + + if (!dev->ethtool_ops) + return 0; + + compat = ethtool_set_feature_compat(dev, dev->ethtool_ops->set_sg, + features, NETIF_F_SG); + compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_tx_csum, + features, NETIF_F_ALL_CSUM); + compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_tso, + features, NETIF_F_ALL_TSO); + compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_rx_csum, + features, NETIF_F_RXCSUM); + compat |= ethtool_set_flags_compat(dev, dev->ethtool_ops->set_flags, + features, flags_dup_features); + + return compat; +} static int ethtool_get_features(struct net_device *dev, void __user *useraddr) { @@ -81,21 +272,18 @@ static int ethtool_get_features(struct net_device *dev, void __user *useraddr) .cmd = ETHTOOL_GFEATURES, .size = ETHTOOL_DEV_FEATURE_WORDS, }; - struct ethtool_get_features_block features[ETHTOOL_DEV_FEATURE_WORDS]; + struct ethtool_get_features_block features[ETHTOOL_DEV_FEATURE_WORDS] = { + { + .available = dev->hw_features, + .requested = dev->wanted_features, + .active = dev->features, + .never_changed = NETIF_F_NEVER_CHANGE, + }, + }; u32 __user *sizeaddr; u32 copy_size; - int i; - /* in case feature bits run out again */ - BUILD_BUG_ON(ETHTOOL_DEV_FEATURE_WORDS * sizeof(u32) > sizeof(netdev_features_t)); - - for (i = 0; i < ETHTOOL_DEV_FEATURE_WORDS; ++i) { - features[i].available = (u32)(dev->hw_features >> (32 * i)); - features[i].requested = (u32)(dev->wanted_features >> (32 * i)); - features[i].active = (u32)(dev->features >> (32 * i)); - features[i].never_changed = - (u32)(NETIF_F_NEVER_CHANGE >> (32 * i)); - } + ethtool_get_features_compat(dev, features); sizeaddr = useraddr + offsetof(struct ethtool_gfeatures, size); if (get_user(copy_size, sizeaddr)) @@ -117,8 +305,7 @@ static int ethtool_set_features(struct net_device *dev, void __user *useraddr) { struct ethtool_sfeatures cmd; struct ethtool_set_features_block features[ETHTOOL_DEV_FEATURE_WORDS]; - netdev_features_t wanted = 0, valid = 0; - int i, ret = 0; + int ret = 0; if (copy_from_user(&cmd, useraddr, sizeof(cmd))) return -EFAULT; @@ -130,29 +317,65 @@ static int ethtool_set_features(struct net_device *dev, void __user *useraddr) if (copy_from_user(features, useraddr, sizeof(features))) return -EFAULT; - for (i = 0; i < ETHTOOL_DEV_FEATURE_WORDS; ++i) { - valid |= (netdev_features_t)features[i].valid << (32 * i); - wanted |= (netdev_features_t)features[i].requested << (32 * i); - } - - if (valid & ~NETIF_F_ETHTOOL_BITS) + if (features[0].valid & ~NETIF_F_ETHTOOL_BITS) return -EINVAL; - if (valid & ~dev->hw_features) { - valid &= dev->hw_features; + if (ethtool_set_features_compat(dev, features)) + ret |= ETHTOOL_F_COMPAT; + + if (features[0].valid & ~dev->hw_features) { + features[0].valid &= dev->hw_features; ret |= ETHTOOL_F_UNSUPPORTED; } - dev->wanted_features &= ~valid; - dev->wanted_features |= wanted & valid; + dev->wanted_features &= ~features[0].valid; + dev->wanted_features |= features[0].valid & features[0].requested; __netdev_update_features(dev); - if ((dev->wanted_features ^ dev->features) & valid) + if ((dev->wanted_features ^ dev->features) & features[0].valid) ret |= ETHTOOL_F_WISH; return ret; } +static const char netdev_features_strings[ETHTOOL_DEV_FEATURE_WORDS * 32][ETH_GSTRING_LEN] = { + /* NETIF_F_SG */ "tx-scatter-gather", + /* NETIF_F_IP_CSUM */ "tx-checksum-ipv4", + /* NETIF_F_NO_CSUM */ "tx-checksum-unneeded", + /* NETIF_F_HW_CSUM */ "tx-checksum-ip-generic", + /* NETIF_F_IPV6_CSUM */ "tx-checksum-ipv6", + /* NETIF_F_HIGHDMA */ "highdma", + /* NETIF_F_FRAGLIST */ "tx-scatter-gather-fraglist", + /* NETIF_F_HW_VLAN_TX */ "tx-vlan-hw-insert", + + /* NETIF_F_HW_VLAN_RX */ "rx-vlan-hw-parse", + /* NETIF_F_HW_VLAN_FILTER */ "rx-vlan-filter", + /* NETIF_F_VLAN_CHALLENGED */ "vlan-challenged", + /* NETIF_F_GSO */ "tx-generic-segmentation", + /* NETIF_F_LLTX */ "tx-lockless", + /* NETIF_F_NETNS_LOCAL */ "netns-local", + /* NETIF_F_GRO */ "rx-gro", + /* NETIF_F_LRO */ "rx-lro", + + /* NETIF_F_TSO */ "tx-tcp-segmentation", + /* NETIF_F_UFO */ "tx-udp-fragmentation", + /* NETIF_F_GSO_ROBUST */ "tx-gso-robust", + /* NETIF_F_TSO_ECN */ "tx-tcp-ecn-segmentation", + /* NETIF_F_TSO6 */ "tx-tcp6-segmentation", + /* NETIF_F_FSO */ "tx-fcoe-segmentation", + "", + "", + + /* NETIF_F_FCOE_CRC */ "tx-checksum-fcoe-crc", + /* NETIF_F_SCTP_CSUM */ "tx-checksum-sctp", + /* NETIF_F_FCOE_MTU */ "fcoe-mtu", + /* NETIF_F_NTUPLE */ "rx-ntuple-filter", + /* NETIF_F_RXHASH */ "rx-hashing", + /* NETIF_F_RXCSUM */ "rx-checksum", + /* NETIF_F_NOCACHE_COPY */ "tx-nocache-copy", + /* NETIF_F_LOOPBACK */ "loopback", +}; + static int __ethtool_get_sset_count(struct net_device *dev, int sset) { const struct ethtool_ops *ops = dev->ethtool_ops; @@ -179,7 +402,7 @@ static void __ethtool_get_strings(struct net_device *dev, ops->get_strings(dev, stringset, data); } -static netdev_features_t ethtool_get_feature_mask(u32 eth_cmd) +static u32 ethtool_get_feature_mask(u32 eth_cmd) { /* feature masks of legacy discrete ethtool ops */ @@ -210,82 +433,136 @@ static netdev_features_t ethtool_get_feature_mask(u32 eth_cmd) } } +static void *__ethtool_get_one_feature_actor(struct net_device *dev, u32 ethcmd) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + + if (!ops) + return NULL; + + switch (ethcmd) { + case ETHTOOL_GTXCSUM: + return ops->get_tx_csum; + case ETHTOOL_GRXCSUM: + return ops->get_rx_csum; + case ETHTOOL_SSG: + return ops->get_sg; + case ETHTOOL_STSO: + return ops->get_tso; + case ETHTOOL_SUFO: + return ops->get_ufo; + default: + return NULL; + } +} + +static u32 __ethtool_get_rx_csum_oldbug(struct net_device *dev) +{ + return !!(dev->features & NETIF_F_ALL_CSUM); +} + static int ethtool_get_one_feature(struct net_device *dev, char __user *useraddr, u32 ethcmd) { - netdev_features_t mask = ethtool_get_feature_mask(ethcmd); + u32 mask = ethtool_get_feature_mask(ethcmd); struct ethtool_value edata = { .cmd = ethcmd, .data = !!(dev->features & mask), }; + /* compatibility with discrete get_ ops */ + if (!(dev->hw_features & mask)) { + u32 (*actor)(struct net_device *); + + actor = __ethtool_get_one_feature_actor(dev, ethcmd); + + /* bug compatibility with old get_rx_csum */ + if (ethcmd == ETHTOOL_GRXCSUM && !actor) + actor = __ethtool_get_rx_csum_oldbug; + + if (actor) + edata.data = actor(dev); + } + if (copy_to_user(useraddr, &edata, sizeof(edata))) return -EFAULT; return 0; } +static int __ethtool_set_tx_csum(struct net_device *dev, u32 data); +static int __ethtool_set_rx_csum(struct net_device *dev, u32 data); +static int __ethtool_set_sg(struct net_device *dev, u32 data); +static int __ethtool_set_tso(struct net_device *dev, u32 data); +static int __ethtool_set_ufo(struct net_device *dev, u32 data); + static int ethtool_set_one_feature(struct net_device *dev, void __user *useraddr, u32 ethcmd) { struct ethtool_value edata; - netdev_features_t mask; + u32 mask; if (copy_from_user(&edata, useraddr, sizeof(edata))) return -EFAULT; mask = ethtool_get_feature_mask(ethcmd); mask &= dev->hw_features; - if (!mask) - return -EOPNOTSUPP; - - if (edata.data) - dev->wanted_features |= mask; - else - dev->wanted_features &= ~mask; - - __netdev_update_features(dev); - - return 0; -} - -#define ETH_ALL_FLAGS (ETH_FLAG_LRO | ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN | \ - ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH) -#define ETH_ALL_FEATURES (NETIF_F_LRO | NETIF_F_HW_VLAN_RX | \ - NETIF_F_HW_VLAN_TX | NETIF_F_NTUPLE | NETIF_F_RXHASH) + if (mask) { + if (edata.data) + dev->wanted_features |= mask; + else + dev->wanted_features &= ~mask; -static u32 __ethtool_get_flags(struct net_device *dev) -{ - u32 flags = 0; + __netdev_update_features(dev); + return 0; + } - if (dev->features & NETIF_F_LRO) flags |= ETH_FLAG_LRO; - if (dev->features & NETIF_F_HW_VLAN_RX) flags |= ETH_FLAG_RXVLAN; - if (dev->features & NETIF_F_HW_VLAN_TX) flags |= ETH_FLAG_TXVLAN; - if (dev->features & NETIF_F_NTUPLE) flags |= ETH_FLAG_NTUPLE; - if (dev->features & NETIF_F_RXHASH) flags |= ETH_FLAG_RXHASH; + /* Driver is not converted to ndo_fix_features or does not + * support changing this offload. In the latter case it won't + * have corresponding ethtool_ops field set. + * + * Following part is to be removed after all drivers advertise + * their changeable features in netdev->hw_features and stop + * using discrete offload setting ops. + */ - return flags; + switch (ethcmd) { + case ETHTOOL_STXCSUM: + return __ethtool_set_tx_csum(dev, edata.data); + case ETHTOOL_SRXCSUM: + return __ethtool_set_rx_csum(dev, edata.data); + case ETHTOOL_SSG: + return __ethtool_set_sg(dev, edata.data); + case ETHTOOL_STSO: + return __ethtool_set_tso(dev, edata.data); + case ETHTOOL_SUFO: + return __ethtool_set_ufo(dev, edata.data); + default: + return -EOPNOTSUPP; + } } -static int __ethtool_set_flags(struct net_device *dev, u32 data) +int __ethtool_set_flags(struct net_device *dev, u32 data) { - netdev_features_t features = 0, changed; + u32 changed; - if (data & ~ETH_ALL_FLAGS) + if (data & ~flags_dup_features) return -EINVAL; - if (data & ETH_FLAG_LRO) features |= NETIF_F_LRO; - if (data & ETH_FLAG_RXVLAN) features |= NETIF_F_HW_VLAN_RX; - if (data & ETH_FLAG_TXVLAN) features |= NETIF_F_HW_VLAN_TX; - if (data & ETH_FLAG_NTUPLE) features |= NETIF_F_NTUPLE; - if (data & ETH_FLAG_RXHASH) features |= NETIF_F_RXHASH; + /* legacy set_flags() op */ + if (dev->ethtool_ops->set_flags) { + if (unlikely(dev->hw_features & flags_dup_features)) + netdev_warn(dev, + "driver BUG: mixed hw_features and set_flags()\n"); + return dev->ethtool_ops->set_flags(dev, data); + } /* allow changing only bits set in hw_features */ - changed = (features ^ dev->features) & ETH_ALL_FEATURES; + changed = (data ^ dev->features) & flags_dup_features; if (changed & ~dev->hw_features) return (changed & dev->hw_features) ? -EINVAL : -EOPNOTSUPP; dev->wanted_features = - (dev->wanted_features & ~changed) | (features & changed); + (dev->wanted_features & ~changed) | (data & dev->hw_features); __netdev_update_features(dev); @@ -439,7 +716,6 @@ static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, { struct ethtool_rxnfc info; size_t info_size = sizeof(info); - int rc; if (!dev->ethtool_ops->set_rxnfc) return -EOPNOTSUPP; @@ -455,15 +731,7 @@ static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, if (copy_from_user(&info, useraddr, info_size)) return -EFAULT; - rc = dev->ethtool_ops->set_rxnfc(dev, &info); - if (rc) - return rc; - - if (cmd == ETHTOOL_SRXCLSRLINS && - copy_to_user(useraddr, &info, info_size)) - return -EFAULT; - - return 0; + return dev->ethtool_ops->set_rxnfc(dev, &info); } static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, @@ -524,44 +792,34 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev, void __user *useraddr) { - u32 user_size, dev_size; - u32 *indir; + struct ethtool_rxfh_indir *indir; + u32 table_size; + size_t full_size; int ret; - if (!dev->ethtool_ops->get_rxfh_indir_size || - !dev->ethtool_ops->get_rxfh_indir) - return -EOPNOTSUPP; - dev_size = dev->ethtool_ops->get_rxfh_indir_size(dev); - if (dev_size == 0) + if (!dev->ethtool_ops->get_rxfh_indir) return -EOPNOTSUPP; - if (copy_from_user(&user_size, + if (copy_from_user(&table_size, useraddr + offsetof(struct ethtool_rxfh_indir, size), - sizeof(user_size))) + sizeof(table_size))) return -EFAULT; - if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh_indir, size), - &dev_size, sizeof(dev_size))) - return -EFAULT; - - /* If the user buffer size is 0, this is just a query for the - * device table size. Otherwise, if it's smaller than the - * device table size it's an error. - */ - if (user_size < dev_size) - return user_size == 0 ? 0 : -EINVAL; - - indir = kcalloc(dev_size, sizeof(indir[0]), GFP_USER); + if (table_size > + (KMALLOC_MAX_SIZE - sizeof(*indir)) / sizeof(*indir->ring_index)) + return -ENOMEM; + full_size = sizeof(*indir) + sizeof(*indir->ring_index) * table_size; + indir = kzalloc(full_size, GFP_USER); if (!indir) return -ENOMEM; + indir->cmd = ETHTOOL_GRXFHINDIR; + indir->size = table_size; ret = dev->ethtool_ops->get_rxfh_indir(dev, indir); if (ret) goto out; - if (copy_to_user(useraddr + - offsetof(struct ethtool_rxfh_indir, ring_index[0]), - indir, dev_size * sizeof(indir[0]))) + if (copy_to_user(useraddr, indir, full_size)) ret = -EFAULT; out: @@ -572,56 +830,30 @@ static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev, static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev, void __user *useraddr) { - struct ethtool_rxnfc rx_rings; - u32 user_size, dev_size, i; - u32 *indir; + struct ethtool_rxfh_indir *indir; + u32 table_size; + size_t full_size; int ret; - if (!dev->ethtool_ops->get_rxfh_indir_size || - !dev->ethtool_ops->set_rxfh_indir || - !dev->ethtool_ops->get_rxnfc) - return -EOPNOTSUPP; - dev_size = dev->ethtool_ops->get_rxfh_indir_size(dev); - if (dev_size == 0) + if (!dev->ethtool_ops->set_rxfh_indir) return -EOPNOTSUPP; - if (copy_from_user(&user_size, + if (copy_from_user(&table_size, useraddr + offsetof(struct ethtool_rxfh_indir, size), - sizeof(user_size))) + sizeof(table_size))) return -EFAULT; - if (user_size != 0 && user_size != dev_size) - return -EINVAL; - - indir = kcalloc(dev_size, sizeof(indir[0]), GFP_USER); + if (table_size > + (KMALLOC_MAX_SIZE - sizeof(*indir)) / sizeof(*indir->ring_index)) + return -ENOMEM; + full_size = sizeof(*indir) + sizeof(*indir->ring_index) * table_size; + indir = kmalloc(full_size, GFP_USER); if (!indir) return -ENOMEM; - rx_rings.cmd = ETHTOOL_GRXRINGS; - ret = dev->ethtool_ops->get_rxnfc(dev, &rx_rings, NULL); - if (ret) + if (copy_from_user(indir, useraddr, full_size)) { + ret = -EFAULT; goto out; - - if (user_size == 0) { - for (i = 0; i < dev_size; i++) - indir[i] = ethtool_rxfh_indir_default(i, rx_rings.data); - } else { - if (copy_from_user(indir, - useraddr + - offsetof(struct ethtool_rxfh_indir, - ring_index[0]), - dev_size * sizeof(indir[0]))) { - ret = -EFAULT; - goto out; - } - - /* Validate ring indices */ - for (i = 0; i < dev_size; i++) { - if (indir[i] >= rx_rings.data) { - ret = -EINVAL; - goto out; - } - } } ret = dev->ethtool_ops->set_rxfh_indir(dev, indir); @@ -631,6 +863,58 @@ static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev, return ret; } +/* + * ethtool does not (or did not) set masks for flow parameters that are + * not specified, so if both value and mask are 0 then this must be + * treated as equivalent to a mask with all bits set. Implement that + * here rather than in drivers. + */ +static void rx_ntuple_fix_masks(struct ethtool_rx_ntuple_flow_spec *fs) +{ + struct ethtool_tcpip4_spec *entry = &fs->h_u.tcp_ip4_spec; + struct ethtool_tcpip4_spec *mask = &fs->m_u.tcp_ip4_spec; + + if (fs->flow_type != TCP_V4_FLOW && + fs->flow_type != UDP_V4_FLOW && + fs->flow_type != SCTP_V4_FLOW) + return; + + if (!(entry->ip4src | mask->ip4src)) + mask->ip4src = htonl(0xffffffff); + if (!(entry->ip4dst | mask->ip4dst)) + mask->ip4dst = htonl(0xffffffff); + if (!(entry->psrc | mask->psrc)) + mask->psrc = htons(0xffff); + if (!(entry->pdst | mask->pdst)) + mask->pdst = htons(0xffff); + if (!(entry->tos | mask->tos)) + mask->tos = 0xff; + if (!(fs->vlan_tag | fs->vlan_tag_mask)) + fs->vlan_tag_mask = 0xffff; + if (!(fs->data | fs->data_mask)) + fs->data_mask = 0xffffffffffffffffULL; +} + +static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev, + void __user *useraddr) +{ + struct ethtool_rx_ntuple cmd; + const struct ethtool_ops *ops = dev->ethtool_ops; + + if (!ops->set_rx_ntuple) + return -EOPNOTSUPP; + + if (!(dev->features & NETIF_F_NTUPLE)) + return -EINVAL; + + if (copy_from_user(&cmd, useraddr, sizeof(cmd))) + return -EFAULT; + + rx_ntuple_fix_masks(&cmd.fs); + + return ops->set_rx_ntuple(dev, &cmd); +} + static int ethtool_get_regs(struct net_device *dev, char __user *useraddr) { struct ethtool_regs regs; @@ -947,6 +1231,81 @@ static int ethtool_set_pauseparam(struct net_device *dev, void __user *useraddr) return dev->ethtool_ops->set_pauseparam(dev, &pauseparam); } +static int __ethtool_set_sg(struct net_device *dev, u32 data) +{ + int err; + + if (!dev->ethtool_ops->set_sg) + return -EOPNOTSUPP; + + if (data && !(dev->features & NETIF_F_ALL_CSUM)) + return -EINVAL; + + if (!data && dev->ethtool_ops->set_tso) { + err = dev->ethtool_ops->set_tso(dev, 0); + if (err) + return err; + } + + if (!data && dev->ethtool_ops->set_ufo) { + err = dev->ethtool_ops->set_ufo(dev, 0); + if (err) + return err; + } + return dev->ethtool_ops->set_sg(dev, data); +} + +static int __ethtool_set_tx_csum(struct net_device *dev, u32 data) +{ + int err; + + if (!dev->ethtool_ops->set_tx_csum) + return -EOPNOTSUPP; + + if (!data && dev->ethtool_ops->set_sg) { + err = __ethtool_set_sg(dev, 0); + if (err) + return err; + } + + return dev->ethtool_ops->set_tx_csum(dev, data); +} + +static int __ethtool_set_rx_csum(struct net_device *dev, u32 data) +{ + if (!dev->ethtool_ops->set_rx_csum) + return -EOPNOTSUPP; + + if (!data) + dev->features &= ~NETIF_F_GRO; + + return dev->ethtool_ops->set_rx_csum(dev, data); +} + +static int __ethtool_set_tso(struct net_device *dev, u32 data) +{ + if (!dev->ethtool_ops->set_tso) + return -EOPNOTSUPP; + + if (data && !(dev->features & NETIF_F_SG)) + return -EINVAL; + + return dev->ethtool_ops->set_tso(dev, data); +} + +static int __ethtool_set_ufo(struct net_device *dev, u32 data) +{ + if (!dev->ethtool_ops->set_ufo) + return -EOPNOTSUPP; + if (data && !(dev->features & NETIF_F_SG)) + return -EINVAL; + if (data && !((dev->features & NETIF_F_GEN_CSUM) || + (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)) + == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) + return -EINVAL; + return dev->ethtool_ops->set_ufo(dev, data); +} + static int ethtool_self_test(struct net_device *dev, char __user *useraddr) { struct ethtool_test test; @@ -1412,7 +1771,9 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) break; case ETHTOOL_GFLAGS: rc = ethtool_get_value(dev, useraddr, ethcmd, - __ethtool_get_flags); + (dev->ethtool_ops->get_flags ? + dev->ethtool_ops->get_flags : + ethtool_op_get_flags)); break; case ETHTOOL_SFLAGS: rc = ethtool_set_value(dev, useraddr, __ethtool_set_flags); @@ -1443,6 +1804,9 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) case ETHTOOL_RESET: rc = ethtool_reset(dev, useraddr); break; + case ETHTOOL_SRXNTUPLE: + rc = ethtool_set_rx_ntuple(dev, useraddr); + break; case ETHTOOL_GSSET_INFO: rc = ethtool_get_sset_info(dev, useraddr); break; diff --git a/trunk/net/core/flow_dissector.c b/trunk/net/core/flow_dissector.c deleted file mode 100644 index 0985b9b14b80..000000000000 --- a/trunk/net/core/flow_dissector.c +++ /dev/null @@ -1,143 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* copy saddr & daddr, possibly using 64bit load/store - * Equivalent to : flow->src = iph->saddr; - * flow->dst = iph->daddr; - */ -static void iph_to_flow_copy_addrs(struct flow_keys *flow, const struct iphdr *iph) -{ - BUILD_BUG_ON(offsetof(typeof(*flow), dst) != - offsetof(typeof(*flow), src) + sizeof(flow->src)); - memcpy(&flow->src, &iph->saddr, sizeof(flow->src) + sizeof(flow->dst)); -} - -bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow) -{ - int poff, nhoff = skb_network_offset(skb); - u8 ip_proto; - __be16 proto = skb->protocol; - - memset(flow, 0, sizeof(*flow)); - -again: - switch (proto) { - case __constant_htons(ETH_P_IP): { - const struct iphdr *iph; - struct iphdr _iph; -ip: - iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph); - if (!iph) - return false; - - if (ip_is_fragment(iph)) - ip_proto = 0; - else - ip_proto = iph->protocol; - iph_to_flow_copy_addrs(flow, iph); - nhoff += iph->ihl * 4; - break; - } - case __constant_htons(ETH_P_IPV6): { - const struct ipv6hdr *iph; - struct ipv6hdr _iph; -ipv6: - iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph); - if (!iph) - return false; - - ip_proto = iph->nexthdr; - flow->src = iph->saddr.s6_addr32[3]; - flow->dst = iph->daddr.s6_addr32[3]; - nhoff += sizeof(struct ipv6hdr); - break; - } - case __constant_htons(ETH_P_8021Q): { - const struct vlan_hdr *vlan; - struct vlan_hdr _vlan; - - vlan = skb_header_pointer(skb, nhoff, sizeof(_vlan), &_vlan); - if (!vlan) - return false; - - proto = vlan->h_vlan_encapsulated_proto; - nhoff += sizeof(*vlan); - goto again; - } - case __constant_htons(ETH_P_PPP_SES): { - struct { - struct pppoe_hdr hdr; - __be16 proto; - } *hdr, _hdr; - hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr); - if (!hdr) - return false; - proto = hdr->proto; - nhoff += PPPOE_SES_HLEN; - switch (proto) { - case __constant_htons(PPP_IP): - goto ip; - case __constant_htons(PPP_IPV6): - goto ipv6; - default: - return false; - } - } - default: - return false; - } - - switch (ip_proto) { - case IPPROTO_GRE: { - struct gre_hdr { - __be16 flags; - __be16 proto; - } *hdr, _hdr; - - hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr); - if (!hdr) - return false; - /* - * Only look inside GRE if version zero and no - * routing - */ - if (!(hdr->flags & (GRE_VERSION|GRE_ROUTING))) { - proto = hdr->proto; - nhoff += 4; - if (hdr->flags & GRE_CSUM) - nhoff += 4; - if (hdr->flags & GRE_KEY) - nhoff += 4; - if (hdr->flags & GRE_SEQ) - nhoff += 4; - goto again; - } - break; - } - case IPPROTO_IPIP: - goto again; - default: - break; - } - - flow->ip_proto = ip_proto; - poff = proto_ports_offset(ip_proto); - if (poff >= 0) { - __be32 *ports, _ports; - - nhoff += poff; - ports = skb_header_pointer(skb, nhoff, sizeof(_ports), &_ports); - if (ports) - flow->ports = *ports; - } - - return true; -} -EXPORT_SYMBOL(skb_flow_dissect); diff --git a/trunk/net/core/neighbour.c b/trunk/net/core/neighbour.c index e287346e0934..5ac07d31fbc9 100644 --- a/trunk/net/core/neighbour.c +++ b/trunk/net/core/neighbour.c @@ -238,7 +238,6 @@ static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev) it to safe state. */ skb_queue_purge(&n->arp_queue); - n->arp_queue_len_bytes = 0; n->output = neigh_blackhole; if (n->nud_state & NUD_VALID) n->nud_state = NUD_NOARP; @@ -273,7 +272,7 @@ int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev) } EXPORT_SYMBOL(neigh_ifdown); -static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device *dev) +static struct neighbour *neigh_alloc(struct neigh_table *tbl) { struct neighbour *n = NULL; unsigned long now = jiffies; @@ -288,15 +287,7 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device goto out_entries; } - if (tbl->entry_size) - n = kzalloc(tbl->entry_size, GFP_ATOMIC); - else { - int sz = sizeof(*n) + tbl->key_len; - - sz = ALIGN(sz, NEIGH_PRIV_ALIGN); - sz += dev->neigh_priv_len; - n = kzalloc(sz, GFP_ATOMIC); - } + n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC); if (!n) goto out_entries; @@ -322,18 +313,11 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device goto out; } -static void neigh_get_hash_rnd(u32 *x) -{ - get_random_bytes(x, sizeof(*x)); - *x |= 1; -} - static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift) { size_t size = (1 << shift) * sizeof(struct neighbour *); struct neigh_hash_table *ret; struct neighbour __rcu **buckets; - int i; ret = kmalloc(sizeof(*ret), GFP_ATOMIC); if (!ret) @@ -350,8 +334,8 @@ static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift) } ret->hash_buckets = buckets; ret->hash_shift = shift; - for (i = 0; i < NEIGH_NUM_HASH_RND; i++) - neigh_get_hash_rnd(&ret->hash_rnd[i]); + get_random_bytes(&ret->hash_rnd, sizeof(ret->hash_rnd)); + ret->hash_rnd |= 1; return ret; } @@ -478,7 +462,7 @@ struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey, u32 hash_val; int key_len = tbl->key_len; int error; - struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev); + struct neighbour *n1, *rc, *n = neigh_alloc(tbl); struct neigh_hash_table *nht; if (!n) { @@ -496,14 +480,6 @@ struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey, goto out_neigh_release; } - if (dev->netdev_ops->ndo_neigh_construct) { - error = dev->netdev_ops->ndo_neigh_construct(n); - if (error < 0) { - rc = ERR_PTR(error); - goto out_neigh_release; - } - } - /* Device specific setup. */ if (n->parms->neigh_setup && (error = n->parms->neigh_setup(n)) < 0) { @@ -701,14 +677,18 @@ static inline void neigh_parms_put(struct neigh_parms *parms) neigh_parms_destroy(parms); } +static void neigh_destroy_rcu(struct rcu_head *head) +{ + struct neighbour *neigh = container_of(head, struct neighbour, rcu); + + kmem_cache_free(neigh->tbl->kmem_cachep, neigh); +} /* * neighbour must already be out of the table; * */ void neigh_destroy(struct neighbour *neigh) { - struct net_device *dev = neigh->dev; - NEIGH_CACHE_STAT_INC(neigh->tbl, destroys); if (!neigh->dead) { @@ -722,18 +702,14 @@ void neigh_destroy(struct neighbour *neigh) printk(KERN_WARNING "Impossible event.\n"); skb_queue_purge(&neigh->arp_queue); - neigh->arp_queue_len_bytes = 0; - - if (dev->netdev_ops->ndo_neigh_destroy) - dev->netdev_ops->ndo_neigh_destroy(neigh); - dev_put(dev); + dev_put(neigh->dev); neigh_parms_put(neigh->parms); NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh); atomic_dec(&neigh->tbl->entries); - kfree_rcu(neigh, rcu); + call_rcu(&neigh->rcu, neigh_destroy_rcu); } EXPORT_SYMBOL(neigh_destroy); @@ -866,7 +842,6 @@ static void neigh_invalidate(struct neighbour *neigh) write_lock(&neigh->lock); } skb_queue_purge(&neigh->arp_queue); - neigh->arp_queue_len_bytes = 0; } static void neigh_probe(struct neighbour *neigh) @@ -1005,20 +980,15 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) if (neigh->nud_state == NUD_INCOMPLETE) { if (skb) { - while (neigh->arp_queue_len_bytes + skb->truesize > - neigh->parms->queue_len_bytes) { + if (skb_queue_len(&neigh->arp_queue) >= + neigh->parms->queue_len) { struct sk_buff *buff; - buff = __skb_dequeue(&neigh->arp_queue); - if (!buff) - break; - neigh->arp_queue_len_bytes -= buff->truesize; kfree_skb(buff); NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards); } skb_dst_force(skb); __skb_queue_tail(&neigh->arp_queue, skb); - neigh->arp_queue_len_bytes += skb->truesize; } rc = 1; } @@ -1197,7 +1167,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, rcu_read_lock(); /* On shaper/eql skb->dst->neighbour != neigh :( */ - if (dst && (n2 = dst_get_neighbour_noref(dst)) != NULL) + if (dst && (n2 = dst_get_neighbour(dst)) != NULL) n1 = n2; n1->output(n1, skb); rcu_read_unlock(); @@ -1205,7 +1175,6 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, write_lock_bh(&neigh->lock); } skb_queue_purge(&neigh->arp_queue); - neigh->arp_queue_len_bytes = 0; } out: if (update_isrouter) { @@ -1508,6 +1477,11 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl) tbl->parms.reachable_time = neigh_rand_reach_time(tbl->parms.base_reachable_time); + if (!tbl->kmem_cachep) + tbl->kmem_cachep = + kmem_cache_create(tbl->id, tbl->entry_size, 0, + SLAB_HWCACHE_ALIGN|SLAB_PANIC, + NULL); tbl->stats = alloc_percpu(struct neigh_statistics); if (!tbl->stats) panic("cannot create neighbour cache statistics"); @@ -1592,6 +1566,9 @@ int neigh_table_clear(struct neigh_table *tbl) free_percpu(tbl->stats); tbl->stats = NULL; + kmem_cache_destroy(tbl->kmem_cachep); + tbl->kmem_cachep = NULL; + return 0; } EXPORT_SYMBOL(neigh_table_clear); @@ -1770,11 +1747,7 @@ static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms) NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex); NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)); - NLA_PUT_U32(skb, NDTPA_QUEUE_LENBYTES, parms->queue_len_bytes); - /* approximative value for deprecated QUEUE_LEN (in packets) */ - NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, - DIV_ROUND_UP(parms->queue_len_bytes, - SKB_TRUESIZE(ETH_FRAME_LEN))); + NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len); NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen); NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes); NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes); @@ -1835,7 +1808,7 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl, rcu_read_lock_bh(); nht = rcu_dereference_bh(tbl->nht); - ndc.ndtc_hash_rnd = nht->hash_rnd[0]; + ndc.ndtc_hash_rnd = nht->hash_rnd; ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1); rcu_read_unlock_bh(); @@ -2001,11 +1974,7 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) switch (i) { case NDTPA_QUEUE_LEN: - p->queue_len_bytes = nla_get_u32(tbp[i]) * - SKB_TRUESIZE(ETH_FRAME_LEN); - break; - case NDTPA_QUEUE_LENBYTES: - p->queue_len_bytes = nla_get_u32(tbp[i]); + p->queue_len = nla_get_u32(tbp[i]); break; case NDTPA_PROXY_QLEN: p->proxy_qlen = nla_get_u32(tbp[i]); @@ -2669,158 +2638,117 @@ EXPORT_SYMBOL(neigh_app_ns); #ifdef CONFIG_SYSCTL -static int proc_unres_qlen(ctl_table *ctl, int write, void __user *buffer, - size_t *lenp, loff_t *ppos) -{ - int size, ret; - ctl_table tmp = *ctl; - - tmp.data = &size; - size = DIV_ROUND_UP(*(int *)ctl->data, SKB_TRUESIZE(ETH_FRAME_LEN)); - ret = proc_dointvec(&tmp, write, buffer, lenp, ppos); - if (write && !ret) - *(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN); - return ret; -} - -enum { - NEIGH_VAR_MCAST_PROBE, - NEIGH_VAR_UCAST_PROBE, - NEIGH_VAR_APP_PROBE, - NEIGH_VAR_RETRANS_TIME, - NEIGH_VAR_BASE_REACHABLE_TIME, - NEIGH_VAR_DELAY_PROBE_TIME, - NEIGH_VAR_GC_STALETIME, - NEIGH_VAR_QUEUE_LEN, - NEIGH_VAR_QUEUE_LEN_BYTES, - NEIGH_VAR_PROXY_QLEN, - NEIGH_VAR_ANYCAST_DELAY, - NEIGH_VAR_PROXY_DELAY, - NEIGH_VAR_LOCKTIME, - NEIGH_VAR_RETRANS_TIME_MS, - NEIGH_VAR_BASE_REACHABLE_TIME_MS, - NEIGH_VAR_GC_INTERVAL, - NEIGH_VAR_GC_THRESH1, - NEIGH_VAR_GC_THRESH2, - NEIGH_VAR_GC_THRESH3, - NEIGH_VAR_MAX -}; +#define NEIGH_VARS_MAX 19 static struct neigh_sysctl_table { struct ctl_table_header *sysctl_header; - struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1]; + struct ctl_table neigh_vars[NEIGH_VARS_MAX]; char *dev_name; } neigh_sysctl_template __read_mostly = { .neigh_vars = { - [NEIGH_VAR_MCAST_PROBE] = { + { .procname = "mcast_solicit", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, - [NEIGH_VAR_UCAST_PROBE] = { + { .procname = "ucast_solicit", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, - [NEIGH_VAR_APP_PROBE] = { + { .procname = "app_solicit", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, - [NEIGH_VAR_RETRANS_TIME] = { + { .procname = "retrans_time", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_userhz_jiffies, }, - [NEIGH_VAR_BASE_REACHABLE_TIME] = { + { .procname = "base_reachable_time", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, - [NEIGH_VAR_DELAY_PROBE_TIME] = { + { .procname = "delay_first_probe_time", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, - [NEIGH_VAR_GC_STALETIME] = { + { .procname = "gc_stale_time", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, - [NEIGH_VAR_QUEUE_LEN] = { + { .procname = "unres_qlen", .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_unres_qlen, - }, - [NEIGH_VAR_QUEUE_LEN_BYTES] = { - .procname = "unres_qlen_bytes", - .maxlen = sizeof(int), - .mode = 0644, .proc_handler = proc_dointvec, }, - [NEIGH_VAR_PROXY_QLEN] = { + { .procname = "proxy_qlen", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, - [NEIGH_VAR_ANYCAST_DELAY] = { + { .procname = "anycast_delay", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_userhz_jiffies, }, - [NEIGH_VAR_PROXY_DELAY] = { + { .procname = "proxy_delay", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_userhz_jiffies, }, - [NEIGH_VAR_LOCKTIME] = { + { .procname = "locktime", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_userhz_jiffies, }, - [NEIGH_VAR_RETRANS_TIME_MS] = { + { .procname = "retrans_time_ms", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_ms_jiffies, }, - [NEIGH_VAR_BASE_REACHABLE_TIME_MS] = { + { .procname = "base_reachable_time_ms", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_ms_jiffies, }, - [NEIGH_VAR_GC_INTERVAL] = { + { .procname = "gc_interval", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, - [NEIGH_VAR_GC_THRESH1] = { + { .procname = "gc_thresh1", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, - [NEIGH_VAR_GC_THRESH2] = { + { .procname = "gc_thresh2", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, - [NEIGH_VAR_GC_THRESH3] = { + { .procname = "gc_thresh3", .maxlen = sizeof(int), .mode = 0644, @@ -2853,49 +2781,47 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, if (!t) goto err; - t->neigh_vars[NEIGH_VAR_MCAST_PROBE].data = &p->mcast_probes; - t->neigh_vars[NEIGH_VAR_UCAST_PROBE].data = &p->ucast_probes; - t->neigh_vars[NEIGH_VAR_APP_PROBE].data = &p->app_probes; - t->neigh_vars[NEIGH_VAR_RETRANS_TIME].data = &p->retrans_time; - t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].data = &p->base_reachable_time; - t->neigh_vars[NEIGH_VAR_DELAY_PROBE_TIME].data = &p->delay_probe_time; - t->neigh_vars[NEIGH_VAR_GC_STALETIME].data = &p->gc_staletime; - t->neigh_vars[NEIGH_VAR_QUEUE_LEN].data = &p->queue_len_bytes; - t->neigh_vars[NEIGH_VAR_QUEUE_LEN_BYTES].data = &p->queue_len_bytes; - t->neigh_vars[NEIGH_VAR_PROXY_QLEN].data = &p->proxy_qlen; - t->neigh_vars[NEIGH_VAR_ANYCAST_DELAY].data = &p->anycast_delay; - t->neigh_vars[NEIGH_VAR_PROXY_DELAY].data = &p->proxy_delay; - t->neigh_vars[NEIGH_VAR_LOCKTIME].data = &p->locktime; - t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].data = &p->retrans_time; - t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].data = &p->base_reachable_time; + t->neigh_vars[0].data = &p->mcast_probes; + t->neigh_vars[1].data = &p->ucast_probes; + t->neigh_vars[2].data = &p->app_probes; + t->neigh_vars[3].data = &p->retrans_time; + t->neigh_vars[4].data = &p->base_reachable_time; + t->neigh_vars[5].data = &p->delay_probe_time; + t->neigh_vars[6].data = &p->gc_staletime; + t->neigh_vars[7].data = &p->queue_len; + t->neigh_vars[8].data = &p->proxy_qlen; + t->neigh_vars[9].data = &p->anycast_delay; + t->neigh_vars[10].data = &p->proxy_delay; + t->neigh_vars[11].data = &p->locktime; + t->neigh_vars[12].data = &p->retrans_time; + t->neigh_vars[13].data = &p->base_reachable_time; if (dev) { dev_name_source = dev->name; /* Terminate the table early */ - memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0, - sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL])); + memset(&t->neigh_vars[14], 0, sizeof(t->neigh_vars[14])); } else { dev_name_source = neigh_path[NEIGH_CTL_PATH_DEV].procname; - t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1); - t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1; - t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2; - t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = (int *)(p + 1) + 3; + t->neigh_vars[14].data = (int *)(p + 1); + t->neigh_vars[15].data = (int *)(p + 1) + 1; + t->neigh_vars[16].data = (int *)(p + 1) + 2; + t->neigh_vars[17].data = (int *)(p + 1) + 3; } if (handler) { /* RetransTime */ - t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler; - t->neigh_vars[NEIGH_VAR_RETRANS_TIME].extra1 = dev; + t->neigh_vars[3].proc_handler = handler; + t->neigh_vars[3].extra1 = dev; /* ReachableTime */ - t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler; - t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].extra1 = dev; + t->neigh_vars[4].proc_handler = handler; + t->neigh_vars[4].extra1 = dev; /* RetransTime (in milliseconds)*/ - t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler; - t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].extra1 = dev; + t->neigh_vars[12].proc_handler = handler; + t->neigh_vars[12].extra1 = dev; /* ReachableTime (in milliseconds) */ - t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler; - t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].extra1 = dev; + t->neigh_vars[13].proc_handler = handler; + t->neigh_vars[13].extra1 = dev; } t->dev_name = kstrdup(dev_name_source, GFP_KERNEL); diff --git a/trunk/net/core/net-sysfs.c b/trunk/net/core/net-sysfs.c index abf4393a77b3..385aefe53648 100644 --- a/trunk/net/core/net-sysfs.c +++ b/trunk/net/core/net-sysfs.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include "net-sysfs.h" @@ -607,12 +606,9 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue, rcu_assign_pointer(queue->rps_map, map); spin_unlock(&rps_map_lock); - if (map) - jump_label_inc(&rps_needed); - if (old_map) { + if (old_map) kfree_rcu(old_map, rcu); - jump_label_dec(&rps_needed); - } + free_cpumask_var(mask); return len; } @@ -622,15 +618,15 @@ static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, char *buf) { struct rps_dev_flow_table *flow_table; - unsigned long val = 0; + unsigned int val = 0; rcu_read_lock(); flow_table = rcu_dereference(queue->rps_flow_table); if (flow_table) - val = (unsigned long)flow_table->mask + 1; + val = flow_table->mask + 1; rcu_read_unlock(); - return sprintf(buf, "%lu\n", val); + return sprintf(buf, "%u\n", val); } static void rps_dev_flow_table_release_work(struct work_struct *work) @@ -654,46 +650,36 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, struct rx_queue_attribute *attr, const char *buf, size_t len) { - unsigned long mask, count; + unsigned int count; + char *endp; struct rps_dev_flow_table *table, *old_table; static DEFINE_SPINLOCK(rps_dev_flow_lock); - int rc; if (!capable(CAP_NET_ADMIN)) return -EPERM; - rc = kstrtoul(buf, 0, &count); - if (rc < 0) - return rc; + count = simple_strtoul(buf, &endp, 0); + if (endp == buf) + return -EINVAL; if (count) { - mask = count - 1; - /* mask = roundup_pow_of_two(count) - 1; - * without overflows... - */ - while ((mask | (mask >> 1)) != mask) - mask |= (mask >> 1); - /* On 64 bit arches, must check mask fits in table->mask (u32), - * and on 32bit arches, must check RPS_DEV_FLOW_TABLE_SIZE(mask + 1) - * doesnt overflow. - */ -#if BITS_PER_LONG > 32 - if (mask > (unsigned long)(u32)mask) + int i; + + if (count > INT_MAX) return -EINVAL; -#else - if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1)) + count = roundup_pow_of_two(count); + if (count > (ULONG_MAX - sizeof(struct rps_dev_flow_table)) / sizeof(struct rps_dev_flow)) { /* Enforce a limit to prevent overflow */ return -EINVAL; } -#endif - table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1)); + table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(count)); if (!table) return -ENOMEM; - table->mask = mask; - for (count = 0; count <= mask; count++) - table->flows[count].cpu = RPS_NO_CPU; + table->mask = count - 1; + for (i = 0; i < count; i++) + table->flows[i].cpu = RPS_NO_CPU; } else table = NULL; @@ -797,7 +783,7 @@ net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num) #endif } -#ifdef CONFIG_SYSFS +#ifdef CONFIG_XPS /* * netdev_queue sysfs structures and functions. */ @@ -843,133 +829,6 @@ static const struct sysfs_ops netdev_queue_sysfs_ops = { .store = netdev_queue_attr_store, }; -static ssize_t show_trans_timeout(struct netdev_queue *queue, - struct netdev_queue_attribute *attribute, - char *buf) -{ - unsigned long trans_timeout; - - spin_lock_irq(&queue->_xmit_lock); - trans_timeout = queue->trans_timeout; - spin_unlock_irq(&queue->_xmit_lock); - - return sprintf(buf, "%lu", trans_timeout); -} - -static struct netdev_queue_attribute queue_trans_timeout = - __ATTR(tx_timeout, S_IRUGO, show_trans_timeout, NULL); - -#ifdef CONFIG_BQL -/* - * Byte queue limits sysfs structures and functions. - */ -static ssize_t bql_show(char *buf, unsigned int value) -{ - return sprintf(buf, "%u\n", value); -} - -static ssize_t bql_set(const char *buf, const size_t count, - unsigned int *pvalue) -{ - unsigned int value; - int err; - - if (!strcmp(buf, "max") || !strcmp(buf, "max\n")) - value = DQL_MAX_LIMIT; - else { - err = kstrtouint(buf, 10, &value); - if (err < 0) - return err; - if (value > DQL_MAX_LIMIT) - return -EINVAL; - } - - *pvalue = value; - - return count; -} - -static ssize_t bql_show_hold_time(struct netdev_queue *queue, - struct netdev_queue_attribute *attr, - char *buf) -{ - struct dql *dql = &queue->dql; - - return sprintf(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time)); -} - -static ssize_t bql_set_hold_time(struct netdev_queue *queue, - struct netdev_queue_attribute *attribute, - const char *buf, size_t len) -{ - struct dql *dql = &queue->dql; - unsigned value; - int err; - - err = kstrtouint(buf, 10, &value); - if (err < 0) - return err; - - dql->slack_hold_time = msecs_to_jiffies(value); - - return len; -} - -static struct netdev_queue_attribute bql_hold_time_attribute = - __ATTR(hold_time, S_IRUGO | S_IWUSR, bql_show_hold_time, - bql_set_hold_time); - -static ssize_t bql_show_inflight(struct netdev_queue *queue, - struct netdev_queue_attribute *attr, - char *buf) -{ - struct dql *dql = &queue->dql; - - return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed); -} - -static struct netdev_queue_attribute bql_inflight_attribute = - __ATTR(inflight, S_IRUGO | S_IWUSR, bql_show_inflight, NULL); - -#define BQL_ATTR(NAME, FIELD) \ -static ssize_t bql_show_ ## NAME(struct netdev_queue *queue, \ - struct netdev_queue_attribute *attr, \ - char *buf) \ -{ \ - return bql_show(buf, queue->dql.FIELD); \ -} \ - \ -static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \ - struct netdev_queue_attribute *attr, \ - const char *buf, size_t len) \ -{ \ - return bql_set(buf, len, &queue->dql.FIELD); \ -} \ - \ -static struct netdev_queue_attribute bql_ ## NAME ## _attribute = \ - __ATTR(NAME, S_IRUGO | S_IWUSR, bql_show_ ## NAME, \ - bql_set_ ## NAME); - -BQL_ATTR(limit, limit) -BQL_ATTR(limit_max, max_limit) -BQL_ATTR(limit_min, min_limit) - -static struct attribute *dql_attrs[] = { - &bql_limit_attribute.attr, - &bql_limit_max_attribute.attr, - &bql_limit_min_attribute.attr, - &bql_hold_time_attribute.attr, - &bql_inflight_attribute.attr, - NULL -}; - -static struct attribute_group dql_group = { - .name = "byte_queue_limits", - .attrs = dql_attrs, -}; -#endif /* CONFIG_BQL */ - -#ifdef CONFIG_XPS static inline unsigned int get_netdev_queue_index(struct netdev_queue *queue) { struct net_device *dev = queue->dev; @@ -1034,52 +893,6 @@ static DEFINE_MUTEX(xps_map_mutex); #define xmap_dereference(P) \ rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex)) -static void xps_queue_release(struct netdev_queue *queue) -{ - struct net_device *dev = queue->dev; - struct xps_dev_maps *dev_maps; - struct xps_map *map; - unsigned long index; - int i, pos, nonempty = 0; - - index = get_netdev_queue_index(queue); - - mutex_lock(&xps_map_mutex); - dev_maps = xmap_dereference(dev->xps_maps); - - if (dev_maps) { - for_each_possible_cpu(i) { - map = xmap_dereference(dev_maps->cpu_map[i]); - if (!map) - continue; - - for (pos = 0; pos < map->len; pos++) - if (map->queues[pos] == index) - break; - - if (pos < map->len) { - if (map->len > 1) - map->queues[pos] = - map->queues[--map->len]; - else { - RCU_INIT_POINTER(dev_maps->cpu_map[i], - NULL); - kfree_rcu(map, rcu); - map = NULL; - } - } - if (map) - nonempty = 1; - } - - if (!nonempty) { - RCU_INIT_POINTER(dev->xps_maps, NULL); - kfree_rcu(dev_maps, rcu); - } - } - mutex_unlock(&xps_map_mutex); -} - static ssize_t store_xps_map(struct netdev_queue *queue, struct netdev_queue_attribute *attribute, const char *buf, size_t len) @@ -1091,7 +904,7 @@ static ssize_t store_xps_map(struct netdev_queue *queue, struct xps_map *map, *new_map; struct xps_dev_maps *dev_maps, *new_dev_maps; int nonempty = 0; - int numa_node_id = -2; + int numa_node = -2; if (!capable(CAP_NET_ADMIN)) return -EPERM; @@ -1134,10 +947,10 @@ static ssize_t store_xps_map(struct netdev_queue *queue, need_set = cpumask_test_cpu(cpu, mask) && cpu_online(cpu); #ifdef CONFIG_NUMA if (need_set) { - if (numa_node_id == -2) - numa_node_id = cpu_to_node(cpu); - else if (numa_node_id != cpu_to_node(cpu)) - numa_node_id = -1; + if (numa_node == -2) + numa_node = cpu_to_node(cpu); + else if (numa_node != cpu_to_node(cpu)) + numa_node = -1; } #endif if (need_set && pos >= map_len) { @@ -1187,7 +1000,7 @@ static ssize_t store_xps_map(struct netdev_queue *queue, if (dev_maps) kfree_rcu(dev_maps, rcu); - netdev_queue_numa_node_write(queue, (numa_node_id >= 0) ? numa_node_id : + netdev_queue_numa_node_write(queue, (numa_node >= 0) ? numa_node : NUMA_NO_NODE); mutex_unlock(&xps_map_mutex); @@ -1210,23 +1023,58 @@ static ssize_t store_xps_map(struct netdev_queue *queue, static struct netdev_queue_attribute xps_cpus_attribute = __ATTR(xps_cpus, S_IRUGO | S_IWUSR, show_xps_map, store_xps_map); -#endif /* CONFIG_XPS */ static struct attribute *netdev_queue_default_attrs[] = { - &queue_trans_timeout.attr, -#ifdef CONFIG_XPS &xps_cpus_attribute.attr, -#endif NULL }; static void netdev_queue_release(struct kobject *kobj) { struct netdev_queue *queue = to_netdev_queue(kobj); + struct net_device *dev = queue->dev; + struct xps_dev_maps *dev_maps; + struct xps_map *map; + unsigned long index; + int i, pos, nonempty = 0; -#ifdef CONFIG_XPS - xps_queue_release(queue); -#endif + index = get_netdev_queue_index(queue); + + mutex_lock(&xps_map_mutex); + dev_maps = xmap_dereference(dev->xps_maps); + + if (dev_maps) { + for_each_possible_cpu(i) { + map = xmap_dereference(dev_maps->cpu_map[i]); + if (!map) + continue; + + for (pos = 0; pos < map->len; pos++) + if (map->queues[pos] == index) + break; + + if (pos < map->len) { + if (map->len > 1) + map->queues[pos] = + map->queues[--map->len]; + else { + RCU_INIT_POINTER(dev_maps->cpu_map[i], + NULL); + kfree_rcu(map, rcu); + map = NULL; + } + } + if (map) + nonempty = 1; + } + + if (!nonempty) { + RCU_INIT_POINTER(dev->xps_maps, NULL); + kfree_rcu(dev_maps, rcu); + } + } + + mutex_unlock(&xps_map_mutex); memset(kobj, 0, sizeof(*kobj)); dev_put(queue->dev); @@ -1247,29 +1095,22 @@ static int netdev_queue_add_kobject(struct net_device *net, int index) kobj->kset = net->queues_kset; error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL, "tx-%u", index); - if (error) - goto exit; - -#ifdef CONFIG_BQL - error = sysfs_create_group(kobj, &dql_group); - if (error) - goto exit; -#endif + if (error) { + kobject_put(kobj); + return error; + } kobject_uevent(kobj, KOBJ_ADD); dev_hold(queue->dev); - return 0; -exit: - kobject_put(kobj); return error; } -#endif /* CONFIG_SYSFS */ +#endif /* CONFIG_XPS */ int netdev_queue_update_kobjects(struct net_device *net, int old_num, int new_num) { -#ifdef CONFIG_SYSFS +#ifdef CONFIG_XPS int i; int error = 0; @@ -1281,26 +1122,20 @@ netdev_queue_update_kobjects(struct net_device *net, int old_num, int new_num) } } - while (--i >= new_num) { - struct netdev_queue *queue = net->_tx + i; - -#ifdef CONFIG_BQL - sysfs_remove_group(&queue->kobj, &dql_group); -#endif - kobject_put(&queue->kobj); - } + while (--i >= new_num) + kobject_put(&net->_tx[i].kobj); return error; #else return 0; -#endif /* CONFIG_SYSFS */ +#endif } static int register_queue_kobjects(struct net_device *net) { int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0; -#ifdef CONFIG_SYSFS +#if defined(CONFIG_RPS) || defined(CONFIG_XPS) net->queues_kset = kset_create_and_add("queues", NULL, &net->dev.kobj); if (!net->queues_kset) @@ -1341,7 +1176,7 @@ static void remove_queue_kobjects(struct net_device *net) net_rx_queue_update_kobjects(net, real_rx, 0); netdev_queue_update_kobjects(net, real_tx, 0); -#ifdef CONFIG_SYSFS +#if defined(CONFIG_RPS) || defined(CONFIG_XPS) kset_unregister(net->queues_kset); #endif } diff --git a/trunk/net/core/netpoll.c b/trunk/net/core/netpoll.c index 0d38808a2305..cf64c1ffa4cd 100644 --- a/trunk/net/core/netpoll.c +++ b/trunk/net/core/netpoll.c @@ -76,7 +76,7 @@ static void queue_process(struct work_struct *work) local_irq_save(flags); __netif_tx_lock(txq, smp_processor_id()); - if (netif_xmit_frozen_or_stopped(txq) || + if (netif_tx_queue_frozen_or_stopped(txq) || ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) { skb_queue_head(&npinfo->txq, skb); __netif_tx_unlock(txq); @@ -317,7 +317,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; tries > 0; --tries) { if (__netif_tx_trylock(txq)) { - if (!netif_xmit_stopped(txq)) { + if (!netif_tx_queue_stopped(txq)) { status = ops->ndo_start_xmit(skb, dev); if (status == NETDEV_TX_OK) txq_trans_update(txq); @@ -422,7 +422,6 @@ static void arp_reply(struct sk_buff *skb) struct sk_buff *send_skb; struct netpoll *np, *tmp; unsigned long flags; - int hlen, tlen; int hits = 0; if (list_empty(&npinfo->rx_np)) @@ -480,9 +479,8 @@ static void arp_reply(struct sk_buff *skb) if (tip != np->local_ip) continue; - hlen = LL_RESERVED_SPACE(np->dev); - tlen = np->dev->needed_tailroom; - send_skb = find_skb(np, size + hlen + tlen, hlen); + send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev), + LL_RESERVED_SPACE(np->dev)); if (!send_skb) continue; diff --git a/trunk/net/core/netprio_cgroup.c b/trunk/net/core/netprio_cgroup.c deleted file mode 100644 index 3a9fd4826b75..000000000000 --- a/trunk/net/core/netprio_cgroup.c +++ /dev/null @@ -1,344 +0,0 @@ -/* - * net/core/netprio_cgroup.c Priority Control Group - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - * Authors: Neil Horman - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, - struct cgroup *cgrp); -static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp); -static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp); - -struct cgroup_subsys net_prio_subsys = { - .name = "net_prio", - .create = cgrp_create, - .destroy = cgrp_destroy, - .populate = cgrp_populate, -#ifdef CONFIG_NETPRIO_CGROUP - .subsys_id = net_prio_subsys_id, -#endif - .module = THIS_MODULE -}; - -#define PRIOIDX_SZ 128 - -static unsigned long prioidx_map[PRIOIDX_SZ]; -static DEFINE_SPINLOCK(prioidx_map_lock); -static atomic_t max_prioidx = ATOMIC_INIT(0); - -static inline struct cgroup_netprio_state *cgrp_netprio_state(struct cgroup *cgrp) -{ - return container_of(cgroup_subsys_state(cgrp, net_prio_subsys_id), - struct cgroup_netprio_state, css); -} - -static int get_prioidx(u32 *prio) -{ - unsigned long flags; - u32 prioidx; - - spin_lock_irqsave(&prioidx_map_lock, flags); - prioidx = find_first_zero_bit(prioidx_map, sizeof(unsigned long) * PRIOIDX_SZ); - set_bit(prioidx, prioidx_map); - spin_unlock_irqrestore(&prioidx_map_lock, flags); - if (prioidx == sizeof(unsigned long) * PRIOIDX_SZ) - return -ENOSPC; - - atomic_set(&max_prioidx, prioidx); - *prio = prioidx; - return 0; -} - -static void put_prioidx(u32 idx) -{ - unsigned long flags; - - spin_lock_irqsave(&prioidx_map_lock, flags); - clear_bit(idx, prioidx_map); - spin_unlock_irqrestore(&prioidx_map_lock, flags); -} - -static void extend_netdev_table(struct net_device *dev, u32 new_len) -{ - size_t new_size = sizeof(struct netprio_map) + - ((sizeof(u32) * new_len)); - struct netprio_map *new_priomap = kzalloc(new_size, GFP_KERNEL); - struct netprio_map *old_priomap; - int i; - - old_priomap = rtnl_dereference(dev->priomap); - - if (!new_priomap) { - printk(KERN_WARNING "Unable to alloc new priomap!\n"); - return; - } - - for (i = 0; - old_priomap && (i < old_priomap->priomap_len); - i++) - new_priomap->priomap[i] = old_priomap->priomap[i]; - - new_priomap->priomap_len = new_len; - - rcu_assign_pointer(dev->priomap, new_priomap); - if (old_priomap) - kfree_rcu(old_priomap, rcu); -} - -static void update_netdev_tables(void) -{ - struct net_device *dev; - u32 max_len = atomic_read(&max_prioidx); - struct netprio_map *map; - - rtnl_lock(); - for_each_netdev(&init_net, dev) { - map = rtnl_dereference(dev->priomap); - if ((!map) || - (map->priomap_len < max_len)) - extend_netdev_table(dev, max_len); - } - rtnl_unlock(); -} - -static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, - struct cgroup *cgrp) -{ - struct cgroup_netprio_state *cs; - int ret; - - cs = kzalloc(sizeof(*cs), GFP_KERNEL); - if (!cs) - return ERR_PTR(-ENOMEM); - - if (cgrp->parent && cgrp_netprio_state(cgrp->parent)->prioidx) { - kfree(cs); - return ERR_PTR(-EINVAL); - } - - ret = get_prioidx(&cs->prioidx); - if (ret != 0) { - printk(KERN_WARNING "No space in priority index array\n"); - kfree(cs); - return ERR_PTR(ret); - } - - return &cs->css; -} - -static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) -{ - struct cgroup_netprio_state *cs; - struct net_device *dev; - struct netprio_map *map; - - cs = cgrp_netprio_state(cgrp); - rtnl_lock(); - for_each_netdev(&init_net, dev) { - map = rtnl_dereference(dev->priomap); - if (map) - map->priomap[cs->prioidx] = 0; - } - rtnl_unlock(); - put_prioidx(cs->prioidx); - kfree(cs); -} - -static u64 read_prioidx(struct cgroup *cgrp, struct cftype *cft) -{ - return (u64)cgrp_netprio_state(cgrp)->prioidx; -} - -static int read_priomap(struct cgroup *cont, struct cftype *cft, - struct cgroup_map_cb *cb) -{ - struct net_device *dev; - u32 prioidx = cgrp_netprio_state(cont)->prioidx; - u32 priority; - struct netprio_map *map; - - rcu_read_lock(); - for_each_netdev_rcu(&init_net, dev) { - map = rcu_dereference(dev->priomap); - priority = map ? map->priomap[prioidx] : 0; - cb->fill(cb, dev->name, priority); - } - rcu_read_unlock(); - return 0; -} - -static int write_priomap(struct cgroup *cgrp, struct cftype *cft, - const char *buffer) -{ - char *devname = kstrdup(buffer, GFP_KERNEL); - int ret = -EINVAL; - u32 prioidx = cgrp_netprio_state(cgrp)->prioidx; - unsigned long priority; - char *priostr; - struct net_device *dev; - struct netprio_map *map; - - if (!devname) - return -ENOMEM; - - /* - * Minimally sized valid priomap string - */ - if (strlen(devname) < 3) - goto out_free_devname; - - priostr = strstr(devname, " "); - if (!priostr) - goto out_free_devname; - - /* - *Separate the devname from the associated priority - *and advance the priostr poitner to the priority value - */ - *priostr = '\0'; - priostr++; - - /* - * If the priostr points to NULL, we're at the end of the passed - * in string, and its not a valid write - */ - if (*priostr == '\0') - goto out_free_devname; - - ret = kstrtoul(priostr, 10, &priority); - if (ret < 0) - goto out_free_devname; - - ret = -ENODEV; - - dev = dev_get_by_name(&init_net, devname); - if (!dev) - goto out_free_devname; - - update_netdev_tables(); - ret = 0; - rcu_read_lock(); - map = rcu_dereference(dev->priomap); - if (map) - map->priomap[prioidx] = priority; - rcu_read_unlock(); - dev_put(dev); - -out_free_devname: - kfree(devname); - return ret; -} - -static struct cftype ss_files[] = { - { - .name = "prioidx", - .read_u64 = read_prioidx, - }, - { - .name = "ifpriomap", - .read_map = read_priomap, - .write_string = write_priomap, - }, -}; - -static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) -{ - return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files)); -} - -static int netprio_device_event(struct notifier_block *unused, - unsigned long event, void *ptr) -{ - struct net_device *dev = ptr; - struct netprio_map *old; - u32 max_len = atomic_read(&max_prioidx); - - /* - * Note this is called with rtnl_lock held so we have update side - * protection on our rcu assignments - */ - - switch (event) { - - case NETDEV_REGISTER: - if (max_len) - extend_netdev_table(dev, max_len); - break; - case NETDEV_UNREGISTER: - old = rtnl_dereference(dev->priomap); - RCU_INIT_POINTER(dev->priomap, NULL); - if (old) - kfree_rcu(old, rcu); - break; - } - return NOTIFY_DONE; -} - -static struct notifier_block netprio_device_notifier = { - .notifier_call = netprio_device_event -}; - -static int __init init_cgroup_netprio(void) -{ - int ret; - - ret = cgroup_load_subsys(&net_prio_subsys); - if (ret) - goto out; -#ifndef CONFIG_NETPRIO_CGROUP - smp_wmb(); - net_prio_subsys_id = net_prio_subsys.subsys_id; -#endif - - register_netdevice_notifier(&netprio_device_notifier); - -out: - return ret; -} - -static void __exit exit_cgroup_netprio(void) -{ - struct netprio_map *old; - struct net_device *dev; - - unregister_netdevice_notifier(&netprio_device_notifier); - - cgroup_unload_subsys(&net_prio_subsys); - -#ifndef CONFIG_NETPRIO_CGROUP - net_prio_subsys_id = -1; - synchronize_rcu(); -#endif - - rtnl_lock(); - for_each_netdev(&init_net, dev) { - old = rtnl_dereference(dev->priomap); - RCU_INIT_POINTER(dev->priomap, NULL); - if (old) - kfree_rcu(old, rcu); - } - rtnl_unlock(); -} - -module_init(init_cgroup_netprio); -module_exit(exit_cgroup_netprio); -MODULE_LICENSE("GPL v2"); diff --git a/trunk/net/core/pktgen.c b/trunk/net/core/pktgen.c index 449fe0f068f8..0001c243b35c 100644 --- a/trunk/net/core/pktgen.c +++ b/trunk/net/core/pktgen.c @@ -1304,7 +1304,7 @@ static ssize_t pktgen_if_write(struct file *file, scan_ip6(buf, pkt_dev->in6_daddr.s6_addr); snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_daddr); - pkt_dev->cur_in6_daddr = pkt_dev->in6_daddr; + ipv6_addr_copy(&pkt_dev->cur_in6_daddr, &pkt_dev->in6_daddr); if (debug) printk(KERN_DEBUG "pktgen: dst6 set to: %s\n", buf); @@ -1327,7 +1327,8 @@ static ssize_t pktgen_if_write(struct file *file, scan_ip6(buf, pkt_dev->min_in6_daddr.s6_addr); snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->min_in6_daddr); - pkt_dev->cur_in6_daddr = pkt_dev->min_in6_daddr; + ipv6_addr_copy(&pkt_dev->cur_in6_daddr, + &pkt_dev->min_in6_daddr); if (debug) printk(KERN_DEBUG "pktgen: dst6_min set to: %s\n", buf); @@ -1370,7 +1371,7 @@ static ssize_t pktgen_if_write(struct file *file, scan_ip6(buf, pkt_dev->in6_saddr.s6_addr); snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_saddr); - pkt_dev->cur_in6_saddr = pkt_dev->in6_saddr; + ipv6_addr_copy(&pkt_dev->cur_in6_saddr, &pkt_dev->in6_saddr); if (debug) printk(KERN_DEBUG "pktgen: src6 set to: %s\n", buf); @@ -2078,7 +2079,9 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) ifp = ifp->if_next) { if (ifp->scope == IFA_LINK && !(ifp->flags & IFA_F_TENTATIVE)) { - pkt_dev->cur_in6_saddr = ifp->addr; + ipv6_addr_copy(&pkt_dev-> + cur_in6_saddr, + &ifp->addr); err = 0; break; } @@ -2955,8 +2958,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, iph->payload_len = htons(sizeof(struct udphdr) + datalen); iph->nexthdr = IPPROTO_UDP; - iph->daddr = pkt_dev->cur_in6_daddr; - iph->saddr = pkt_dev->cur_in6_saddr; + ipv6_addr_copy(&iph->daddr, &pkt_dev->cur_in6_daddr); + ipv6_addr_copy(&iph->saddr, &pkt_dev->cur_in6_saddr); skb->mac_header = (skb->network_header - ETH_HLEN - pkt_dev->pkt_overhead); @@ -3342,7 +3345,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) __netif_tx_lock_bh(txq); - if (unlikely(netif_xmit_frozen_or_stopped(txq))) { + if (unlikely(netif_tx_queue_frozen_or_stopped(txq))) { ret = NETDEV_TX_BUSY; pkt_dev->last_ok = 0; goto unlock; diff --git a/trunk/net/core/rtnetlink.c b/trunk/net/core/rtnetlink.c index dbf2ddafd52d..9083e82bdae5 100644 --- a/trunk/net/core/rtnetlink.c +++ b/trunk/net/core/rtnetlink.c @@ -273,17 +273,6 @@ EXPORT_SYMBOL_GPL(rtnl_unregister_all); static LIST_HEAD(link_ops); -static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind) -{ - const struct rtnl_link_ops *ops; - - list_for_each_entry(ops, &link_ops, list) { - if (!strcmp(ops->kind, kind)) - return ops; - } - return NULL; -} - /** * __rtnl_link_register - Register rtnl_link_ops with rtnetlink. * @ops: struct rtnl_link_ops * to register @@ -296,9 +285,6 @@ static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind) */ int __rtnl_link_register(struct rtnl_link_ops *ops) { - if (rtnl_link_ops_get(ops->kind)) - return -EEXIST; - if (!ops->dellink) ops->dellink = unregister_netdevice_queue; @@ -365,6 +351,17 @@ void rtnl_link_unregister(struct rtnl_link_ops *ops) } EXPORT_SYMBOL_GPL(rtnl_link_unregister); +static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind) +{ + const struct rtnl_link_ops *ops; + + list_for_each_entry(ops, &link_ops, list) { + if (!strcmp(ops->kind, kind)) + return ops; + } + return NULL; +} + static size_t rtnl_link_get_size(const struct net_device *dev) { const struct rtnl_link_ops *ops = dev->rtnl_link_ops; diff --git a/trunk/net/core/secure_seq.c b/trunk/net/core/secure_seq.c index 6fd44606fdd1..925991ae6f52 100644 --- a/trunk/net/core/secure_seq.c +++ b/trunk/net/core/secure_seq.c @@ -36,7 +36,7 @@ static u32 seq_scale(u32 seq) } #endif -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) __u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr, __be16 sport, __be16 dport) { @@ -134,7 +134,7 @@ u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport) EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral); #endif -#if IS_ENABLED(CONFIG_IP_DCCP) +#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE) u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport) { @@ -156,7 +156,7 @@ u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr, } EXPORT_SYMBOL(secure_dccp_sequence_number); -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr, __be16 sport, __be16 dport) { diff --git a/trunk/net/core/skbuff.c b/trunk/net/core/skbuff.c index da0c97f2fab4..3c30ee4a5710 100644 --- a/trunk/net/core/skbuff.c +++ b/trunk/net/core/skbuff.c @@ -244,55 +244,6 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, } EXPORT_SYMBOL(__alloc_skb); -/** - * build_skb - build a network buffer - * @data: data buffer provided by caller - * - * Allocate a new &sk_buff. Caller provides space holding head and - * skb_shared_info. @data must have been allocated by kmalloc() - * The return is the new skb buffer. - * On a failure the return is %NULL, and @data is not freed. - * Notes : - * Before IO, driver allocates only data buffer where NIC put incoming frame - * Driver should add room at head (NET_SKB_PAD) and - * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) - * After IO, driver calls build_skb(), to allocate sk_buff and populate it - * before giving packet to stack. - * RX rings only contains data buffers, not full skbs. - */ -struct sk_buff *build_skb(void *data) -{ - struct skb_shared_info *shinfo; - struct sk_buff *skb; - unsigned int size; - - skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); - if (!skb) - return NULL; - - size = ksize(data) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - - memset(skb, 0, offsetof(struct sk_buff, tail)); - skb->truesize = SKB_TRUESIZE(size); - atomic_set(&skb->users, 1); - skb->head = data; - skb->data = data; - skb_reset_tail_pointer(skb); - skb->end = skb->tail + size; -#ifdef NET_SKBUFF_DATA_USES_OFFSET - skb->mac_header = ~0U; -#endif - - /* make sure we initialize shinfo sequentially */ - shinfo = skb_shinfo(skb); - memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); - atomic_set(&shinfo->dataref, 1); - kmemcheck_annotate_variable(shinfo->destructor_arg); - - return skb; -} -EXPORT_SYMBOL(build_skb); - /** * __netdev_alloc_skb - allocate an skbuff for rx on a specific device * @dev: network device to receive on @@ -452,7 +403,7 @@ static void skb_release_head_state(struct sk_buff *skb) WARN_ON(in_irq()); skb->destructor(skb); } -#if IS_ENABLED(CONFIG_NF_CONNTRACK) +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) nf_conntrack_put(skb->nfct); #endif #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED @@ -602,14 +553,15 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) new->ip_summed = old->ip_summed; skb_copy_queue_mapping(new, old); new->priority = old->priority; -#if IS_ENABLED(CONFIG_IP_VS) +#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) new->ipvs_property = old->ipvs_property; #endif new->protocol = old->protocol; new->mark = old->mark; new->skb_iif = old->skb_iif; __nf_copy(new, old); -#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) +#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ + defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) new->nf_trace = old->nf_trace; #endif #ifdef CONFIG_NET_SCHED @@ -839,9 +791,8 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) EXPORT_SYMBOL(skb_copy); /** - * __pskb_copy - create copy of an sk_buff with private head. + * pskb_copy - create copy of an sk_buff with private head. * @skb: buffer to copy - * @headroom: headroom of new skb * @gfp_mask: allocation priority * * Make a copy of both an &sk_buff and part of its data, located @@ -852,16 +803,16 @@ EXPORT_SYMBOL(skb_copy); * The returned buffer has a reference count of 1. */ -struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask) +struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) { - unsigned int size = skb_headlen(skb) + headroom; + unsigned int size = skb_end_pointer(skb) - skb->head; struct sk_buff *n = alloc_skb(size, gfp_mask); if (!n) goto out; /* Set the data pointer */ - skb_reserve(n, headroom); + skb_reserve(n, skb_headroom(skb)); /* Set the tail pointer and length */ skb_put(n, skb_headlen(skb)); /* Copy the bytes */ @@ -897,7 +848,7 @@ struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask) out: return n; } -EXPORT_SYMBOL(__pskb_copy); +EXPORT_SYMBOL(pskb_copy); /** * pskb_expand_head - reallocate header of &sk_buff @@ -2670,7 +2621,7 @@ EXPORT_SYMBOL_GPL(skb_pull_rcsum); * a pointer to the first in a list of new skbs for the segments. * In case of error it returns ERR_PTR(err). */ -struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) +struct sk_buff *skb_segment(struct sk_buff *skb, u32 features) { struct sk_buff *segs = NULL; struct sk_buff *tail = NULL; @@ -3218,26 +3169,6 @@ void skb_tstamp_tx(struct sk_buff *orig_skb, } EXPORT_SYMBOL_GPL(skb_tstamp_tx); -void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) -{ - struct sock *sk = skb->sk; - struct sock_exterr_skb *serr; - int err; - - skb->wifi_acked_valid = 1; - skb->wifi_acked = acked; - - serr = SKB_EXT_ERR(skb); - memset(serr, 0, sizeof(*serr)); - serr->ee.ee_errno = ENOMSG; - serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; - - err = sock_queue_err_skb(sk, skb); - if (err) - kfree_skb(skb); -} -EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); - /** * skb_partial_csum_set - set up and verify partial csum values for packet diff --git a/trunk/net/core/sock.c b/trunk/net/core/sock.c index 002939cfc069..b23f174ab84c 100644 --- a/trunk/net/core/sock.c +++ b/trunk/net/core/sock.c @@ -111,7 +111,6 @@ #include #include #include -#include #include #include @@ -126,7 +125,6 @@ #include #include #include -#include #include @@ -136,46 +134,6 @@ #include #endif -static DEFINE_MUTEX(proto_list_mutex); -static LIST_HEAD(proto_list); - -#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM -int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss) -{ - struct proto *proto; - int ret = 0; - - mutex_lock(&proto_list_mutex); - list_for_each_entry(proto, &proto_list, node) { - if (proto->init_cgroup) { - ret = proto->init_cgroup(cgrp, ss); - if (ret) - goto out; - } - } - - mutex_unlock(&proto_list_mutex); - return ret; -out: - list_for_each_entry_continue_reverse(proto, &proto_list, node) - if (proto->destroy_cgroup) - proto->destroy_cgroup(cgrp, ss); - mutex_unlock(&proto_list_mutex); - return ret; -} - -void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss) -{ - struct proto *proto; - - mutex_lock(&proto_list_mutex); - list_for_each_entry_reverse(proto, &proto_list, node) - if (proto->destroy_cgroup) - proto->destroy_cgroup(cgrp, ss); - mutex_unlock(&proto_list_mutex); -} -#endif - /* * Each address family might have different locking rules, so we have * one slock key per address family: @@ -183,9 +141,6 @@ void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss) static struct lock_class_key af_family_keys[AF_MAX]; static struct lock_class_key af_family_slock_keys[AF_MAX]; -struct jump_label_key memcg_socket_limit_enabled; -EXPORT_SYMBOL(memcg_socket_limit_enabled); - /* * Make lock validator output more readable. (we pre-construct these * strings build-time, so that runtime initialization of socket @@ -266,16 +221,10 @@ __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX; int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); EXPORT_SYMBOL(sysctl_optmem_max); -#if defined(CONFIG_CGROUPS) -#if !defined(CONFIG_NET_CLS_CGROUP) +#if defined(CONFIG_CGROUPS) && !defined(CONFIG_NET_CLS_CGROUP) int net_cls_subsys_id = -1; EXPORT_SYMBOL_GPL(net_cls_subsys_id); #endif -#if !defined(CONFIG_NETPRIO_CGROUP) -int net_prio_subsys_id = -1; -EXPORT_SYMBOL_GPL(net_prio_subsys_id); -#endif -#endif static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) { @@ -320,14 +269,14 @@ static void sock_warn_obsolete_bsdism(const char *name) } } -#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)) - -static void sock_disable_timestamp(struct sock *sk, unsigned long flags) +static void sock_disable_timestamp(struct sock *sk, int flag) { - if (sk->sk_flags & flags) { - sk->sk_flags &= ~flags; - if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP)) + if (sock_flag(sk, flag)) { + sock_reset_flag(sk, flag); + if (!sock_flag(sk, SOCK_TIMESTAMP) && + !sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE)) { net_disable_timestamp(); + } } } @@ -729,7 +678,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname, SOCK_TIMESTAMPING_RX_SOFTWARE); else sock_disable_timestamp(sk, - (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)); + SOCK_TIMESTAMPING_RX_SOFTWARE); sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE, val & SOF_TIMESTAMPING_SOFTWARE); sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE, @@ -787,11 +736,6 @@ int sock_setsockopt(struct socket *sock, int level, int optname, case SO_RXQ_OVFL: sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool); break; - - case SO_WIFI_STATUS: - sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool); - break; - default: ret = -ENOPROTOOPT; break; @@ -1013,10 +957,6 @@ int sock_getsockopt(struct socket *sock, int level, int optname, v.val = !!sock_flag(sk, SOCK_RXQ_OVFL); break; - case SO_WIFI_STATUS: - v.val = !!sock_flag(sk, SOCK_WIFI_STATUS); - break; - default: return -ENOPROTOOPT; } @@ -1167,18 +1107,6 @@ void sock_update_classid(struct sock *sk) sk->sk_classid = classid; } EXPORT_SYMBOL(sock_update_classid); - -void sock_update_netprioidx(struct sock *sk) -{ - struct cgroup_netprio_state *state; - if (in_interrupt()) - return; - rcu_read_lock(); - state = task_netprio_state(current); - sk->sk_cgrp_prioidx = state ? state->prioidx : 0; - rcu_read_unlock(); -} -EXPORT_SYMBOL_GPL(sock_update_netprioidx); #endif /** @@ -1206,7 +1134,6 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority, atomic_set(&sk->sk_wmem_alloc, 1); sock_update_classid(sk); - sock_update_netprioidx(sk); } return sk; @@ -1227,7 +1154,8 @@ static void __sk_free(struct sock *sk) RCU_INIT_POINTER(sk->sk_filter, NULL); } - sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP); + sock_disable_timestamp(sk, SOCK_TIMESTAMP); + sock_disable_timestamp(sk, SOCK_TIMESTAMPING_RX_SOFTWARE); if (atomic_read(&sk->sk_omem_alloc)) printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n", @@ -1272,14 +1200,7 @@ void sk_release_kernel(struct sock *sk) } EXPORT_SYMBOL(sk_release_kernel); -/** - * sk_clone_lock - clone a socket, and lock its clone - * @sk: the socket to clone - * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) - * - * Caller must unlock socket even in error path (bh_unlock_sock(newsk)) - */ -struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) +struct sock *sk_clone(const struct sock *sk, const gfp_t priority) { struct sock *newsk; @@ -1363,15 +1284,16 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) newsk->sk_wq = NULL; if (newsk->sk_prot->sockets_allocated) - sk_sockets_allocated_inc(newsk); + percpu_counter_inc(newsk->sk_prot->sockets_allocated); - if (newsk->sk_flags & SK_FLAGS_TIMESTAMP) + if (sock_flag(newsk, SOCK_TIMESTAMP) || + sock_flag(newsk, SOCK_TIMESTAMPING_RX_SOFTWARE)) net_enable_timestamp(); } out: return newsk; } -EXPORT_SYMBOL_GPL(sk_clone_lock); +EXPORT_SYMBOL_GPL(sk_clone); void sk_setup_caps(struct sock *sk, struct dst_entry *dst) { @@ -1751,34 +1673,30 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind) struct proto *prot = sk->sk_prot; int amt = sk_mem_pages(size); long allocated; - int parent_status = UNDER_LIMIT; sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; - - allocated = sk_memory_allocated_add(sk, amt, &parent_status); + allocated = atomic_long_add_return(amt, prot->memory_allocated); /* Under limit. */ - if (parent_status == UNDER_LIMIT && - allocated <= sk_prot_mem_limits(sk, 0)) { - sk_leave_memory_pressure(sk); + if (allocated <= prot->sysctl_mem[0]) { + if (prot->memory_pressure && *prot->memory_pressure) + *prot->memory_pressure = 0; return 1; } - /* Under pressure. (we or our parents) */ - if ((parent_status > SOFT_LIMIT) || - allocated > sk_prot_mem_limits(sk, 1)) - sk_enter_memory_pressure(sk); + /* Under pressure. */ + if (allocated > prot->sysctl_mem[1]) + if (prot->enter_memory_pressure) + prot->enter_memory_pressure(sk); - /* Over hard limit (we or our parents) */ - if ((parent_status == OVER_LIMIT) || - (allocated > sk_prot_mem_limits(sk, 2))) + /* Over hard limit. */ + if (allocated > prot->sysctl_mem[2]) goto suppress_allocation; /* guarantee minimum buffer size under pressure */ if (kind == SK_MEM_RECV) { if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0]) return 1; - } else { /* SK_MEM_SEND */ if (sk->sk_type == SOCK_STREAM) { if (sk->sk_wmem_queued < prot->sysctl_wmem[0]) @@ -1788,13 +1706,13 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind) return 1; } - if (sk_has_memory_pressure(sk)) { + if (prot->memory_pressure) { int alloc; - if (!sk_under_memory_pressure(sk)) + if (!*prot->memory_pressure) return 1; - alloc = sk_sockets_allocated_read_positive(sk); - if (sk_prot_mem_limits(sk, 2) > alloc * + alloc = percpu_counter_read_positive(prot->sockets_allocated); + if (prot->sysctl_mem[2] > alloc * sk_mem_pages(sk->sk_wmem_queued + atomic_read(&sk->sk_rmem_alloc) + sk->sk_forward_alloc)) @@ -1817,9 +1735,7 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind) /* Alas. Undo changes. */ sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; - - sk_memory_allocated_sub(sk, amt, parent_status); - + atomic_long_sub(amt, prot->memory_allocated); return 0; } EXPORT_SYMBOL(__sk_mem_schedule); @@ -1830,13 +1746,15 @@ EXPORT_SYMBOL(__sk_mem_schedule); */ void __sk_mem_reclaim(struct sock *sk) { - sk_memory_allocated_sub(sk, - sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, 0); + struct proto *prot = sk->sk_prot; + + atomic_long_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, + prot->memory_allocated); sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1; - if (sk_under_memory_pressure(sk) && - (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0))) - sk_leave_memory_pressure(sk); + if (prot->memory_pressure && *prot->memory_pressure && + (atomic_long_read(prot->memory_allocated) < prot->sysctl_mem[0])) + *prot->memory_pressure = 0; } EXPORT_SYMBOL(__sk_mem_reclaim); @@ -2207,15 +2125,16 @@ EXPORT_SYMBOL(sock_get_timestampns); void sock_enable_timestamp(struct sock *sk, int flag) { if (!sock_flag(sk, flag)) { - unsigned long previous_flags = sk->sk_flags; - sock_set_flag(sk, flag); /* * we just set one of the two flags which require net * time stamping, but time stamping might have been on * already because of the other one */ - if (!(previous_flags & SK_FLAGS_TIMESTAMP)) + if (!sock_flag(sk, + flag == SOCK_TIMESTAMP ? + SOCK_TIMESTAMPING_RX_SOFTWARE : + SOCK_TIMESTAMP)) net_enable_timestamp(); } } @@ -2327,6 +2246,9 @@ void sk_common_release(struct sock *sk) } EXPORT_SYMBOL(sk_common_release); +static DEFINE_RWLOCK(proto_list_lock); +static LIST_HEAD(proto_list); + #ifdef CONFIG_PROC_FS #define PROTO_INUSE_NR 64 /* should be enough for the first time */ struct prot_inuse { @@ -2475,10 +2397,10 @@ int proto_register(struct proto *prot, int alloc_slab) } } - mutex_lock(&proto_list_mutex); + write_lock(&proto_list_lock); list_add(&prot->node, &proto_list); assign_proto_idx(prot); - mutex_unlock(&proto_list_mutex); + write_unlock(&proto_list_lock); return 0; out_free_timewait_sock_slab_name: @@ -2501,10 +2423,10 @@ EXPORT_SYMBOL(proto_register); void proto_unregister(struct proto *prot) { - mutex_lock(&proto_list_mutex); + write_lock(&proto_list_lock); release_proto_idx(prot); list_del(&prot->node); - mutex_unlock(&proto_list_mutex); + write_unlock(&proto_list_lock); if (prot->slab != NULL) { kmem_cache_destroy(prot->slab); @@ -2527,9 +2449,9 @@ EXPORT_SYMBOL(proto_unregister); #ifdef CONFIG_PROC_FS static void *proto_seq_start(struct seq_file *seq, loff_t *pos) - __acquires(proto_list_mutex) + __acquires(proto_list_lock) { - mutex_lock(&proto_list_mutex); + read_lock(&proto_list_lock); return seq_list_start_head(&proto_list, *pos); } @@ -2539,36 +2461,25 @@ static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos) } static void proto_seq_stop(struct seq_file *seq, void *v) - __releases(proto_list_mutex) + __releases(proto_list_lock) { - mutex_unlock(&proto_list_mutex); + read_unlock(&proto_list_lock); } static char proto_method_implemented(const void *method) { return method == NULL ? 'n' : 'y'; } -static long sock_prot_memory_allocated(struct proto *proto) -{ - return proto->memory_allocated != NULL ? proto_memory_allocated(proto): -1L; -} - -static char *sock_prot_memory_pressure(struct proto *proto) -{ - return proto->memory_pressure != NULL ? - proto_memory_pressure(proto) ? "yes" : "no" : "NI"; -} static void proto_seq_printf(struct seq_file *seq, struct proto *proto) { - seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s " "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n", proto->name, proto->obj_size, sock_prot_inuse_get(seq_file_net(seq), proto), - sock_prot_memory_allocated(proto), - sock_prot_memory_pressure(proto), + proto->memory_allocated != NULL ? atomic_long_read(proto->memory_allocated) : -1L, + proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI", proto->max_header, proto->slab == NULL ? "no" : "yes", module_name(proto->owner), diff --git a/trunk/net/core/sock_diag.c b/trunk/net/core/sock_diag.c deleted file mode 100644 index b9868e1fd62c..000000000000 --- a/trunk/net/core/sock_diag.c +++ /dev/null @@ -1,192 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -static struct sock_diag_handler *sock_diag_handlers[AF_MAX]; -static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh); -static DEFINE_MUTEX(sock_diag_table_mutex); - -int sock_diag_check_cookie(void *sk, __u32 *cookie) -{ - if ((cookie[0] != INET_DIAG_NOCOOKIE || - cookie[1] != INET_DIAG_NOCOOKIE) && - ((u32)(unsigned long)sk != cookie[0] || - (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1])) - return -ESTALE; - else - return 0; -} -EXPORT_SYMBOL_GPL(sock_diag_check_cookie); - -void sock_diag_save_cookie(void *sk, __u32 *cookie) -{ - cookie[0] = (u32)(unsigned long)sk; - cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1); -} -EXPORT_SYMBOL_GPL(sock_diag_save_cookie); - -int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype) -{ - __u32 *mem; - - mem = RTA_DATA(__RTA_PUT(skb, attrtype, SK_MEMINFO_VARS * sizeof(__u32))); - - mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk); - mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf; - mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk); - mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf; - mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc; - mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued; - mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc); - - return 0; - -rtattr_failure: - return -EMSGSIZE; -} -EXPORT_SYMBOL_GPL(sock_diag_put_meminfo); - -void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)) -{ - mutex_lock(&sock_diag_table_mutex); - inet_rcv_compat = fn; - mutex_unlock(&sock_diag_table_mutex); -} -EXPORT_SYMBOL_GPL(sock_diag_register_inet_compat); - -void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)) -{ - mutex_lock(&sock_diag_table_mutex); - inet_rcv_compat = NULL; - mutex_unlock(&sock_diag_table_mutex); -} -EXPORT_SYMBOL_GPL(sock_diag_unregister_inet_compat); - -int sock_diag_register(struct sock_diag_handler *hndl) -{ - int err = 0; - - if (hndl->family >= AF_MAX) - return -EINVAL; - - mutex_lock(&sock_diag_table_mutex); - if (sock_diag_handlers[hndl->family]) - err = -EBUSY; - else - sock_diag_handlers[hndl->family] = hndl; - mutex_unlock(&sock_diag_table_mutex); - - return err; -} -EXPORT_SYMBOL_GPL(sock_diag_register); - -void sock_diag_unregister(struct sock_diag_handler *hnld) -{ - int family = hnld->family; - - if (family >= AF_MAX) - return; - - mutex_lock(&sock_diag_table_mutex); - BUG_ON(sock_diag_handlers[family] != hnld); - sock_diag_handlers[family] = NULL; - mutex_unlock(&sock_diag_table_mutex); -} -EXPORT_SYMBOL_GPL(sock_diag_unregister); - -static inline struct sock_diag_handler *sock_diag_lock_handler(int family) -{ - if (sock_diag_handlers[family] == NULL) - request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, - NETLINK_SOCK_DIAG, family); - - mutex_lock(&sock_diag_table_mutex); - return sock_diag_handlers[family]; -} - -static inline void sock_diag_unlock_handler(struct sock_diag_handler *h) -{ - mutex_unlock(&sock_diag_table_mutex); -} - -static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) -{ - int err; - struct sock_diag_req *req = NLMSG_DATA(nlh); - struct sock_diag_handler *hndl; - - if (nlmsg_len(nlh) < sizeof(*req)) - return -EINVAL; - - hndl = sock_diag_lock_handler(req->sdiag_family); - if (hndl == NULL) - err = -ENOENT; - else - err = hndl->dump(skb, nlh); - sock_diag_unlock_handler(hndl); - - return err; -} - -static int sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) -{ - int ret; - - switch (nlh->nlmsg_type) { - case TCPDIAG_GETSOCK: - case DCCPDIAG_GETSOCK: - if (inet_rcv_compat == NULL) - request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, - NETLINK_SOCK_DIAG, AF_INET); - - mutex_lock(&sock_diag_table_mutex); - if (inet_rcv_compat != NULL) - ret = inet_rcv_compat(skb, nlh); - else - ret = -EOPNOTSUPP; - mutex_unlock(&sock_diag_table_mutex); - - return ret; - case SOCK_DIAG_BY_FAMILY: - return __sock_diag_rcv_msg(skb, nlh); - default: - return -EINVAL; - } -} - -static DEFINE_MUTEX(sock_diag_mutex); - -static void sock_diag_rcv(struct sk_buff *skb) -{ - mutex_lock(&sock_diag_mutex); - netlink_rcv_skb(skb, &sock_diag_rcv_msg); - mutex_unlock(&sock_diag_mutex); -} - -struct sock *sock_diag_nlsk; -EXPORT_SYMBOL_GPL(sock_diag_nlsk); - -static int __init sock_diag_init(void) -{ - sock_diag_nlsk = netlink_kernel_create(&init_net, NETLINK_SOCK_DIAG, 0, - sock_diag_rcv, NULL, THIS_MODULE); - return sock_diag_nlsk == NULL ? -ENOMEM : 0; -} - -static void __exit sock_diag_exit(void) -{ - netlink_kernel_release(sock_diag_nlsk); -} - -module_init(sock_diag_init); -module_exit(sock_diag_exit); -MODULE_LICENSE("GPL"); -MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_SOCK_DIAG); diff --git a/trunk/net/core/sysctl_net_core.c b/trunk/net/core/sysctl_net_core.c index d05559d4d9cd..77a65f031488 100644 --- a/trunk/net/core/sysctl_net_core.c +++ b/trunk/net/core/sysctl_net_core.c @@ -68,13 +68,8 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write, if (sock_table != orig_sock_table) { rcu_assign_pointer(rps_sock_flow_table, sock_table); - if (sock_table) - jump_label_inc(&rps_needed); - if (orig_sock_table) { - jump_label_dec(&rps_needed); - synchronize_rcu(); - vfree(orig_sock_table); - } + synchronize_rcu(); + vfree(orig_sock_table); } } diff --git a/trunk/net/dccp/ccids/ccid2.c b/trunk/net/dccp/ccids/ccid2.c index f053198e730c..67164bb6ae4d 100644 --- a/trunk/net/dccp/ccids/ccid2.c +++ b/trunk/net/dccp/ccids/ccid2.c @@ -29,7 +29,7 @@ #ifdef CONFIG_IP_DCCP_CCID2_DEBUG -static bool ccid2_debug; +static int ccid2_debug; #define ccid2_pr_debug(format, a...) DCCP_PR_DEBUG(ccid2_debug, format, ##a) #else #define ccid2_pr_debug(format, a...) @@ -174,7 +174,7 @@ static void ccid2_hc_tx_rto_expire(unsigned long data) /* * Congestion window validation (RFC 2861). */ -static bool ccid2_do_cwv = true; +static int ccid2_do_cwv = 1; module_param(ccid2_do_cwv, bool, 0644); MODULE_PARM_DESC(ccid2_do_cwv, "Perform RFC2861 Congestion Window Validation"); diff --git a/trunk/net/dccp/ccids/ccid3.c b/trunk/net/dccp/ccids/ccid3.c index 560627307200..3d604e1349c0 100644 --- a/trunk/net/dccp/ccids/ccid3.c +++ b/trunk/net/dccp/ccids/ccid3.c @@ -38,7 +38,7 @@ #include #ifdef CONFIG_IP_DCCP_CCID3_DEBUG -static bool ccid3_debug; +static int ccid3_debug; #define ccid3_pr_debug(format, a...) DCCP_PR_DEBUG(ccid3_debug, format, ##a) #else #define ccid3_pr_debug(format, a...) diff --git a/trunk/net/dccp/ccids/lib/tfrc.c b/trunk/net/dccp/ccids/lib/tfrc.c index 62b5828acde0..1f94b7e01d39 100644 --- a/trunk/net/dccp/ccids/lib/tfrc.c +++ b/trunk/net/dccp/ccids/lib/tfrc.c @@ -8,7 +8,7 @@ #include "tfrc.h" #ifdef CONFIG_IP_DCCP_TFRC_DEBUG -bool tfrc_debug; +int tfrc_debug; module_param(tfrc_debug, bool, 0644); MODULE_PARM_DESC(tfrc_debug, "Enable TFRC debug messages"); #endif diff --git a/trunk/net/dccp/ccids/lib/tfrc.h b/trunk/net/dccp/ccids/lib/tfrc.h index ed698c42a5fb..f8ee3f549770 100644 --- a/trunk/net/dccp/ccids/lib/tfrc.h +++ b/trunk/net/dccp/ccids/lib/tfrc.h @@ -21,7 +21,7 @@ #include "packet_history.h" #ifdef CONFIG_IP_DCCP_TFRC_DEBUG -extern bool tfrc_debug; +extern int tfrc_debug; #define tfrc_pr_debug(format, a...) DCCP_PR_DEBUG(tfrc_debug, format, ##a) #else #define tfrc_pr_debug(format, a...) diff --git a/trunk/net/dccp/dccp.h b/trunk/net/dccp/dccp.h index 29d6bb629a6c..583490aaf56f 100644 --- a/trunk/net/dccp/dccp.h +++ b/trunk/net/dccp/dccp.h @@ -39,7 +39,7 @@ "%s: " fmt, __func__, ##a) #ifdef CONFIG_IP_DCCP_DEBUG -extern bool dccp_debug; +extern int dccp_debug; #define dccp_pr_debug(format, a...) DCCP_PR_DEBUG(dccp_debug, format, ##a) #define dccp_pr_debug_cat(format, a...) DCCP_PRINTK(dccp_debug, format, ##a) #define dccp_debug(fmt, a...) dccp_pr_debug_cat(KERN_DEBUG fmt, ##a) @@ -357,7 +357,7 @@ static inline int dccp_bad_service_code(const struct sock *sk, struct dccp_skb_cb { union { struct inet_skb_parm h4; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) struct inet6_skb_parm h6; #endif } header; diff --git a/trunk/net/dccp/diag.c b/trunk/net/dccp/diag.c index 8f1625753377..b21f261da75e 100644 --- a/trunk/net/dccp/diag.c +++ b/trunk/net/dccp/diag.c @@ -48,23 +48,11 @@ static void dccp_diag_get_info(struct sock *sk, struct inet_diag_msg *r, dccp_get_info(sk, _info); } -static void dccp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, - struct inet_diag_req *r, struct nlattr *bc) -{ - inet_diag_dump_icsk(&dccp_hashinfo, skb, cb, r, bc); -} - -static int dccp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh, - struct inet_diag_req *req) -{ - return inet_diag_dump_one_icsk(&dccp_hashinfo, in_skb, nlh, req); -} - static const struct inet_diag_handler dccp_diag_handler = { - .dump = dccp_diag_dump, - .dump_one = dccp_diag_dump_one, + .idiag_hashinfo = &dccp_hashinfo, .idiag_get_info = dccp_diag_get_info, - .idiag_type = IPPROTO_DCCP, + .idiag_type = DCCPDIAG_GETSOCK, + .idiag_info_size = sizeof(struct tcp_info), }; static int __init dccp_diag_init(void) @@ -83,4 +71,4 @@ module_exit(dccp_diag_fini); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Arnaldo Carvalho de Melo "); MODULE_DESCRIPTION("DCCP inet_diag handler"); -MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-33 /* AF_INET - IPPROTO_DCCP */); +MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_INET_DIAG, DCCPDIAG_GETSOCK); diff --git a/trunk/net/dccp/feat.c b/trunk/net/dccp/feat.c index 78a2ad70e1b0..23cea0ee3101 100644 --- a/trunk/net/dccp/feat.c +++ b/trunk/net/dccp/feat.c @@ -490,8 +490,8 @@ static int dccp_feat_push_change(struct list_head *fn_list, u8 feat, u8 local, new->feat_num = feat; new->is_local = local; new->state = FEAT_INITIALISING; - new->needs_confirm = false; - new->empty_confirm = false; + new->needs_confirm = 0; + new->empty_confirm = 0; new->val = *fval; new->needs_mandatory = mandatory; @@ -517,12 +517,12 @@ static int dccp_feat_push_confirm(struct list_head *fn_list, u8 feat, u8 local, new->feat_num = feat; new->is_local = local; new->state = FEAT_STABLE; /* transition in 6.6.2 */ - new->needs_confirm = true; + new->needs_confirm = 1; new->empty_confirm = (fval == NULL); new->val.nn = 0; /* zeroes the whole structure */ if (!new->empty_confirm) new->val = *fval; - new->needs_mandatory = false; + new->needs_mandatory = 0; return 0; } @@ -1155,7 +1155,7 @@ static u8 dccp_feat_change_recv(struct list_head *fn, u8 is_mandatory, u8 opt, } if (dccp_feat_reconcile(&entry->val, val, len, server, true)) { - entry->empty_confirm = false; + entry->empty_confirm = 0; } else if (is_mandatory) { return DCCP_RESET_CODE_MANDATORY_ERROR; } else if (entry->state == FEAT_INITIALISING) { @@ -1171,10 +1171,10 @@ static u8 dccp_feat_change_recv(struct list_head *fn, u8 is_mandatory, u8 opt, defval = dccp_feat_default_value(feat); if (!dccp_feat_reconcile(&entry->val, &defval, 1, server, true)) return DCCP_RESET_CODE_OPTION_ERROR; - entry->empty_confirm = true; + entry->empty_confirm = 1; } - entry->needs_confirm = true; - entry->needs_mandatory = false; + entry->needs_confirm = 1; + entry->needs_mandatory = 0; entry->state = FEAT_STABLE; return 0; diff --git a/trunk/net/dccp/ipv4.c b/trunk/net/dccp/ipv4.c index 1c67fe8ff90d..3f4e5414c8e5 100644 --- a/trunk/net/dccp/ipv4.c +++ b/trunk/net/dccp/ipv4.c @@ -474,11 +474,10 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk, struct sk_buff *skb) { struct rtable *rt; - const struct iphdr *iph = ip_hdr(skb); struct flowi4 fl4 = { .flowi4_oif = skb_rtable(skb)->rt_iif, - .daddr = iph->saddr, - .saddr = iph->daddr, + .daddr = ip_hdr(skb)->saddr, + .saddr = ip_hdr(skb)->daddr, .flowi4_tos = RT_CONN_FLAGS(sk), .flowi4_proto = sk->sk_protocol, .fl4_sport = dccp_hdr(skb)->dccph_dport, diff --git a/trunk/net/dccp/ipv6.c b/trunk/net/dccp/ipv6.c index ce903f747e64..17ee85ce148d 100644 --- a/trunk/net/dccp/ipv6.c +++ b/trunk/net/dccp/ipv6.c @@ -150,8 +150,8 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, */ memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_DCCP; - fl6.daddr = np->daddr; - fl6.saddr = np->saddr; + ipv6_addr_copy(&fl6.daddr, &np->daddr); + ipv6_addr_copy(&fl6.saddr, &np->saddr); fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.fl6_dport = inet->inet_dport; fl6.fl6_sport = inet->inet_sport; @@ -244,8 +244,8 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req, memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_DCCP; - fl6.daddr = ireq6->rmt_addr; - fl6.saddr = ireq6->loc_addr; + ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr); + ipv6_addr_copy(&fl6.saddr, &ireq6->loc_addr); fl6.flowlabel = 0; fl6.flowi6_oif = ireq6->iif; fl6.fl6_dport = inet_rsk(req)->rmt_port; @@ -270,7 +270,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req, dh->dccph_checksum = dccp_v6_csum_finish(skb, &ireq6->loc_addr, &ireq6->rmt_addr); - fl6.daddr = ireq6->rmt_addr; + ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr); err = ip6_xmit(sk, skb, &fl6, opt, np->tclass); err = net_xmit_eval(err); } @@ -313,8 +313,8 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb) &rxip6h->daddr); memset(&fl6, 0, sizeof(fl6)); - fl6.daddr = rxip6h->saddr; - fl6.saddr = rxip6h->daddr; + ipv6_addr_copy(&fl6.daddr, &rxip6h->saddr); + ipv6_addr_copy(&fl6.saddr, &rxip6h->daddr); fl6.flowi6_proto = IPPROTO_DCCP; fl6.flowi6_oif = inet6_iif(rxskb); @@ -419,8 +419,8 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) goto drop_and_free; ireq6 = inet6_rsk(req); - ireq6->rmt_addr = ipv6_hdr(skb)->saddr; - ireq6->loc_addr = ipv6_hdr(skb)->daddr; + ipv6_addr_copy(&ireq6->rmt_addr, &ipv6_hdr(skb)->saddr); + ipv6_addr_copy(&ireq6->loc_addr, &ipv6_hdr(skb)->daddr); if (ipv6_opt_accepted(sk, skb) || np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || @@ -491,7 +491,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk, ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr); - newnp->rcv_saddr = newnp->saddr; + ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr); inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped; newsk->sk_backlog_rcv = dccp_v4_do_rcv; @@ -526,9 +526,9 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk, memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_DCCP; - fl6.daddr = ireq6->rmt_addr; + ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr); final_p = fl6_update_dst(&fl6, opt, &final); - fl6.saddr = ireq6->loc_addr; + ipv6_addr_copy(&fl6.saddr, &ireq6->loc_addr); fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.fl6_dport = inet_rsk(req)->rmt_port; fl6.fl6_sport = inet_rsk(req)->loc_port; @@ -559,9 +559,9 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk, memcpy(newnp, np, sizeof(struct ipv6_pinfo)); - newnp->daddr = ireq6->rmt_addr; - newnp->saddr = ireq6->loc_addr; - newnp->rcv_saddr = ireq6->loc_addr; + ipv6_addr_copy(&newnp->daddr, &ireq6->rmt_addr); + ipv6_addr_copy(&newnp->saddr, &ireq6->loc_addr); + ipv6_addr_copy(&newnp->rcv_saddr, &ireq6->loc_addr); newsk->sk_bound_dev_if = ireq6->iif; /* Now IPv6 options... @@ -877,7 +877,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); if (flowlabel == NULL) return -EINVAL; - usin->sin6_addr = flowlabel->dst; + ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst); fl6_sock_release(flowlabel); } } @@ -910,7 +910,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, return -EINVAL; } - np->daddr = usin->sin6_addr; + ipv6_addr_copy(&np->daddr, &usin->sin6_addr); np->flow_label = fl6.flowlabel; /* @@ -949,8 +949,8 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, saddr = &np->rcv_saddr; fl6.flowi6_proto = IPPROTO_DCCP; - fl6.daddr = np->daddr; - fl6.saddr = saddr ? *saddr : np->saddr; + ipv6_addr_copy(&fl6.daddr, &np->daddr); + ipv6_addr_copy(&fl6.saddr, saddr ? saddr : &np->saddr); fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.fl6_dport = usin->sin6_port; fl6.fl6_sport = inet->inet_sport; @@ -966,11 +966,11 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, if (saddr == NULL) { saddr = &fl6.saddr; - np->rcv_saddr = *saddr; + ipv6_addr_copy(&np->rcv_saddr, saddr); } /* set the source address */ - np->saddr = *saddr; + ipv6_addr_copy(&np->saddr, saddr); inet->inet_rcv_saddr = LOOPBACK4_IPV6; __ip6_dst_store(sk, dst, NULL, NULL); diff --git a/trunk/net/dccp/minisocks.c b/trunk/net/dccp/minisocks.c index 5a7f90bbffac..d7041a0963af 100644 --- a/trunk/net/dccp/minisocks.c +++ b/trunk/net/dccp/minisocks.c @@ -53,15 +53,15 @@ void dccp_time_wait(struct sock *sk, int state, int timeo) if (tw != NULL) { const struct inet_connection_sock *icsk = inet_csk(sk); const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) if (tw->tw_family == PF_INET6) { const struct ipv6_pinfo *np = inet6_sk(sk); struct inet6_timewait_sock *tw6; tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot); tw6 = inet6_twsk((struct sock *)tw); - tw6->tw_v6_daddr = np->daddr; - tw6->tw_v6_rcv_saddr = np->rcv_saddr; + ipv6_addr_copy(&tw6->tw_v6_daddr, &np->daddr); + ipv6_addr_copy(&tw6->tw_v6_rcv_saddr, &np->rcv_saddr); tw->tw_ipv6only = np->ipv6only; } #endif @@ -100,7 +100,7 @@ struct sock *dccp_create_openreq_child(struct sock *sk, * (* Generate a new socket and switch to that socket *) * Set S := new socket for this port pair */ - struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC); + struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC); if (newsk != NULL) { struct dccp_request_sock *dreq = dccp_rsk(req); diff --git a/trunk/net/dccp/options.c b/trunk/net/dccp/options.c index 68fa6b7a3e01..4b2ab657ac8e 100644 --- a/trunk/net/dccp/options.c +++ b/trunk/net/dccp/options.c @@ -544,7 +544,7 @@ int dccp_insert_fn_opt(struct sk_buff *skb, u8 type, u8 feat, } if (unlikely(val == NULL || len == 0)) - len = repeat_first = false; + len = repeat_first = 0; tot_len = 3 + repeat_first + len; if (DCCP_SKB_CB(skb)->dccpd_opt_len + tot_len > DCCP_MAX_OPT_LEN) { diff --git a/trunk/net/dccp/probe.c b/trunk/net/dccp/probe.c index 0a8d6ebd9b45..33d0e6297c21 100644 --- a/trunk/net/dccp/probe.c +++ b/trunk/net/dccp/probe.c @@ -152,17 +152,6 @@ static const struct file_operations dccpprobe_fops = { .llseek = noop_llseek, }; -static __init int setup_jprobe(void) -{ - int ret = register_jprobe(&dccp_send_probe); - - if (ret) { - request_module("dccp"); - ret = register_jprobe(&dccp_send_probe); - } - return ret; -} - static __init int dccpprobe_init(void) { int ret = -ENOMEM; @@ -174,7 +163,8 @@ static __init int dccpprobe_init(void) if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops)) goto err0; - ret = setup_jprobe(); + try_then_request_module((ret = register_jprobe(&dccp_send_probe)) == 0, + "dccp"); if (ret) goto err1; diff --git a/trunk/net/dccp/proto.c b/trunk/net/dccp/proto.c index 7065c0ae1e7b..e742f90a6858 100644 --- a/trunk/net/dccp/proto.c +++ b/trunk/net/dccp/proto.c @@ -1099,7 +1099,7 @@ module_param(thash_entries, int, 0444); MODULE_PARM_DESC(thash_entries, "Number of ehash buckets"); #ifdef CONFIG_IP_DCCP_DEBUG -bool dccp_debug; +int dccp_debug; module_param(dccp_debug, bool, 0644); MODULE_PARM_DESC(dccp_debug, "Enable debug messages"); diff --git a/trunk/net/decnet/dn_neigh.c b/trunk/net/decnet/dn_neigh.c index befe426491ba..7f0eb087dc11 100644 --- a/trunk/net/decnet/dn_neigh.c +++ b/trunk/net/decnet/dn_neigh.c @@ -88,9 +88,9 @@ static const struct neigh_ops dn_phase3_ops = { static u32 dn_neigh_hash(const void *pkey, const struct net_device *dev, - __u32 *hash_rnd) + __u32 hash_rnd) { - return jhash_2words(*(__u16 *)pkey, 0, hash_rnd[0]); + return jhash_2words(*(__u16 *)pkey, 0, hash_rnd); } struct neigh_table dn_neigh_table = { @@ -107,7 +107,7 @@ struct neigh_table dn_neigh_table = { .gc_staletime = 60 * HZ, .reachable_time = 30 * HZ, .delay_probe_time = 5 * HZ, - .queue_len_bytes = 64*1024, + .queue_len = 3, .ucast_probes = 0, .app_probes = 0, .mcast_probes = 0, @@ -202,7 +202,7 @@ static int dn_neigh_output_packet(struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); struct dn_route *rt = (struct dn_route *)dst; - struct neighbour *neigh = dst_get_neighbour_noref(dst); + struct neighbour *neigh = dst_get_neighbour(dst); struct net_device *dev = neigh->dev; char mac_addr[ETH_ALEN]; diff --git a/trunk/net/decnet/dn_route.c b/trunk/net/decnet/dn_route.c index f31ce72dca65..94f4ec036669 100644 --- a/trunk/net/decnet/dn_route.c +++ b/trunk/net/decnet/dn_route.c @@ -244,7 +244,7 @@ static int dn_dst_gc(struct dst_ops *ops) */ static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu) { - struct neighbour *n = dst_get_neighbour_noref(dst); + struct neighbour *n = dst_get_neighbour(dst); u32 min_mtu = 230; struct dn_dev *dn; @@ -713,7 +713,7 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type static int dn_to_neigh_output(struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); - struct neighbour *n = dst_get_neighbour_noref(dst); + struct neighbour *n = dst_get_neighbour(dst); return n->output(n, skb); } @@ -728,7 +728,7 @@ static int dn_output(struct sk_buff *skb) int err = -EINVAL; - if ((neigh = dst_get_neighbour_noref(dst)) == NULL) + if ((neigh = dst_get_neighbour(dst)) == NULL) goto error; skb->dev = dev; @@ -852,7 +852,7 @@ static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res) } rt->rt_type = res->type; - if (dev != NULL && dst_get_neighbour_noref(&rt->dst) == NULL) { + if (dev != NULL && dst_get_neighbour(&rt->dst) == NULL) { n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev); if (IS_ERR(n)) return PTR_ERR(n); diff --git a/trunk/net/dsa/Kconfig b/trunk/net/dsa/Kconfig index 274791cd7a35..c53ded2a98df 100644 --- a/trunk/net/dsa/Kconfig +++ b/trunk/net/dsa/Kconfig @@ -1,5 +1,5 @@ -config NET_DSA - tristate "Distributed Switch Architecture support" +menuconfig NET_DSA + bool "Distributed Switch Architecture support" default n depends on EXPERIMENTAL && NETDEVICES && !S390 select PHYLIB @@ -23,4 +23,38 @@ config NET_DSA_TAG_TRAILER bool default n + +# switch drivers +config NET_DSA_MV88E6XXX + bool + default n + +config NET_DSA_MV88E6060 + bool "Marvell 88E6060 ethernet switch chip support" + select NET_DSA_TAG_TRAILER + ---help--- + This enables support for the Marvell 88E6060 ethernet switch + chip. + +config NET_DSA_MV88E6XXX_NEED_PPU + bool + default n + +config NET_DSA_MV88E6131 + bool "Marvell 88E6085/6095/6095F/6131 ethernet switch chip support" + select NET_DSA_MV88E6XXX + select NET_DSA_MV88E6XXX_NEED_PPU + select NET_DSA_TAG_DSA + ---help--- + This enables support for the Marvell 88E6085/6095/6095F/6131 + ethernet switch chips. + +config NET_DSA_MV88E6123_61_65 + bool "Marvell 88E6123/6161/6165 ethernet switch chip support" + select NET_DSA_MV88E6XXX + select NET_DSA_TAG_EDSA + ---help--- + This enables support for the Marvell 88E6123/6161/6165 + ethernet switch chips. + endif diff --git a/trunk/net/dsa/Makefile b/trunk/net/dsa/Makefile index 7b9fcbbeda5d..2374faff4dea 100644 --- a/trunk/net/dsa/Makefile +++ b/trunk/net/dsa/Makefile @@ -1,8 +1,13 @@ -# the core -obj-$(CONFIG_NET_DSA) += dsa_core.o -dsa_core-y += dsa.o slave.o - # tagging formats -dsa_core-$(CONFIG_NET_DSA_TAG_DSA) += tag_dsa.o -dsa_core-$(CONFIG_NET_DSA_TAG_EDSA) += tag_edsa.o -dsa_core-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o +obj-$(CONFIG_NET_DSA_TAG_DSA) += tag_dsa.o +obj-$(CONFIG_NET_DSA_TAG_EDSA) += tag_edsa.o +obj-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o + +# switch drivers +obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx.o +obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o +obj-$(CONFIG_NET_DSA_MV88E6123_61_65) += mv88e6123_61_65.o +obj-$(CONFIG_NET_DSA_MV88E6131) += mv88e6131.o + +# the core +obj-$(CONFIG_NET_DSA) += dsa.o slave.o diff --git a/trunk/net/dsa/dsa.c b/trunk/net/dsa/dsa.c index 88e7c2f3fa0d..0dc1589343c3 100644 --- a/trunk/net/dsa/dsa.c +++ b/trunk/net/dsa/dsa.c @@ -29,7 +29,6 @@ void register_switch_driver(struct dsa_switch_driver *drv) list_add_tail(&drv->list, &dsa_switch_drivers); mutex_unlock(&dsa_switch_drivers_mutex); } -EXPORT_SYMBOL_GPL(register_switch_driver); void unregister_switch_driver(struct dsa_switch_driver *drv) { @@ -37,7 +36,6 @@ void unregister_switch_driver(struct dsa_switch_driver *drv) list_del_init(&drv->list); mutex_unlock(&dsa_switch_drivers_mutex); } -EXPORT_SYMBOL_GPL(unregister_switch_driver); static struct dsa_switch_driver * dsa_switch_probe(struct mii_bus *bus, int sw_addr, char **_name) @@ -201,6 +199,29 @@ static void dsa_switch_destroy(struct dsa_switch *ds) } +/* hooks for ethertype-less tagging formats *********************************/ +/* + * The original DSA tag format and some other tag formats have no + * ethertype, which means that we need to add a little hack to the + * networking receive path to make sure that received frames get + * the right ->protocol assigned to them when one of those tag + * formats is in use. + */ +bool dsa_uses_dsa_tags(void *dsa_ptr) +{ + struct dsa_switch_tree *dst = dsa_ptr; + + return !!(dst->tag_protocol == htons(ETH_P_DSA)); +} + +bool dsa_uses_trailer_tags(void *dsa_ptr) +{ + struct dsa_switch_tree *dst = dsa_ptr; + + return !!(dst->tag_protocol == htons(ETH_P_TRAILER)); +} + + /* link polling *************************************************************/ static void dsa_link_poll_work(struct work_struct *ugly) { @@ -398,36 +419,12 @@ static struct platform_driver dsa_driver = { static int __init dsa_init_module(void) { - int rc; - - rc = platform_driver_register(&dsa_driver); - if (rc) - return rc; - -#ifdef CONFIG_NET_DSA_TAG_DSA - dev_add_pack(&dsa_packet_type); -#endif -#ifdef CONFIG_NET_DSA_TAG_EDSA - dev_add_pack(&edsa_packet_type); -#endif -#ifdef CONFIG_NET_DSA_TAG_TRAILER - dev_add_pack(&trailer_packet_type); -#endif - return 0; + return platform_driver_register(&dsa_driver); } module_init(dsa_init_module); static void __exit dsa_cleanup_module(void) { -#ifdef CONFIG_NET_DSA_TAG_TRAILER - dev_remove_pack(&trailer_packet_type); -#endif -#ifdef CONFIG_NET_DSA_TAG_EDSA - dev_remove_pack(&edsa_packet_type); -#endif -#ifdef CONFIG_NET_DSA_TAG_DSA - dev_remove_pack(&dsa_packet_type); -#endif platform_driver_unregister(&dsa_driver); } module_exit(dsa_cleanup_module); diff --git a/trunk/net/dsa/dsa_priv.h b/trunk/net/dsa/dsa_priv.h index d4cf5cc747e3..4b0ea0540442 100644 --- a/trunk/net/dsa/dsa_priv.h +++ b/trunk/net/dsa/dsa_priv.h @@ -11,9 +11,97 @@ #ifndef __DSA_PRIV_H #define __DSA_PRIV_H +#include #include +#include +#include #include +struct dsa_switch { + /* + * Parent switch tree, and switch index. + */ + struct dsa_switch_tree *dst; + int index; + + /* + * Configuration data for this switch. + */ + struct dsa_chip_data *pd; + + /* + * The used switch driver. + */ + struct dsa_switch_driver *drv; + + /* + * Reference to mii bus to use. + */ + struct mii_bus *master_mii_bus; + + /* + * Slave mii_bus and devices for the individual ports. + */ + u32 dsa_port_mask; + u32 phys_port_mask; + struct mii_bus *slave_mii_bus; + struct net_device *ports[DSA_MAX_PORTS]; +}; + +struct dsa_switch_tree { + /* + * Configuration data for the platform device that owns + * this dsa switch tree instance. + */ + struct dsa_platform_data *pd; + + /* + * Reference to network device to use, and which tagging + * protocol to use. + */ + struct net_device *master_netdev; + __be16 tag_protocol; + + /* + * The switch and port to which the CPU is attached. + */ + s8 cpu_switch; + s8 cpu_port; + + /* + * Link state polling. + */ + int link_poll_needed; + struct work_struct link_poll_work; + struct timer_list link_poll_timer; + + /* + * Data for the individual switch chips. + */ + struct dsa_switch *ds[DSA_MAX_SWITCHES]; +}; + +static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p) +{ + return !!(ds->index == ds->dst->cpu_switch && p == ds->dst->cpu_port); +} + +static inline u8 dsa_upstream_port(struct dsa_switch *ds) +{ + struct dsa_switch_tree *dst = ds->dst; + + /* + * If this is the root switch (i.e. the switch that connects + * to the CPU), return the cpu port number on this switch. + * Else return the (DSA) port number that connects to the + * switch that is one hop closer to the cpu. + */ + if (dst->cpu_switch == ds->index) + return dst->cpu_port; + else + return ds->pd->rtable[dst->cpu_switch]; +} + struct dsa_slave_priv { /* * The linux network interface corresponding to this @@ -35,8 +123,44 @@ struct dsa_slave_priv { struct phy_device *phy; }; +struct dsa_switch_driver { + struct list_head list; + + __be16 tag_protocol; + int priv_size; + + /* + * Probing and setup. + */ + char *(*probe)(struct mii_bus *bus, int sw_addr); + int (*setup)(struct dsa_switch *ds); + int (*set_addr)(struct dsa_switch *ds, u8 *addr); + + /* + * Access to the switch's PHY registers. + */ + int (*phy_read)(struct dsa_switch *ds, int port, int regnum); + int (*phy_write)(struct dsa_switch *ds, int port, + int regnum, u16 val); + + /* + * Link state polling and IRQ handling. + */ + void (*poll_link)(struct dsa_switch *ds); + + /* + * ethtool hardware statistics. + */ + void (*get_strings)(struct dsa_switch *ds, int port, uint8_t *data); + void (*get_ethtool_stats)(struct dsa_switch *ds, + int port, uint64_t *data); + int (*get_sset_count)(struct dsa_switch *ds); +}; + /* dsa.c */ extern char dsa_driver_version[]; +void register_switch_driver(struct dsa_switch_driver *type); +void unregister_switch_driver(struct dsa_switch_driver *type); /* slave.c */ void dsa_slave_mii_bus_init(struct dsa_switch *ds); @@ -46,15 +170,12 @@ struct net_device *dsa_slave_create(struct dsa_switch *ds, /* tag_dsa.c */ netdev_tx_t dsa_xmit(struct sk_buff *skb, struct net_device *dev); -extern struct packet_type dsa_packet_type; /* tag_edsa.c */ netdev_tx_t edsa_xmit(struct sk_buff *skb, struct net_device *dev); -extern struct packet_type edsa_packet_type; /* tag_trailer.c */ netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev); -extern struct packet_type trailer_packet_type; #endif diff --git a/trunk/drivers/net/dsa/mv88e6060.c b/trunk/net/dsa/mv88e6060.c similarity index 96% rename from trunk/drivers/net/dsa/mv88e6060.c rename to trunk/net/dsa/mv88e6060.c index 7fc4e81d4d43..8f4ff5a2c813 100644 --- a/trunk/drivers/net/dsa/mv88e6060.c +++ b/trunk/net/dsa/mv88e6060.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include "dsa_priv.h" #define REG_PORT(p) (8 + (p)) #define REG_GLOBAL 0x0f @@ -286,8 +286,3 @@ static void __exit mv88e6060_cleanup(void) unregister_switch_driver(&mv88e6060_switch_driver); } module_exit(mv88e6060_cleanup); - -MODULE_AUTHOR("Lennert Buytenhek "); -MODULE_DESCRIPTION("Driver for Marvell 88E6060 ethernet switch chip"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:mv88e6060"); diff --git a/trunk/drivers/net/dsa/mv88e6123_61_65.c b/trunk/net/dsa/mv88e6123_61_65.c similarity index 96% rename from trunk/drivers/net/dsa/mv88e6123_61_65.c rename to trunk/net/dsa/mv88e6123_61_65.c index c0a458fc698f..52faaa21a4d9 100644 --- a/trunk/drivers/net/dsa/mv88e6123_61_65.c +++ b/trunk/net/dsa/mv88e6123_61_65.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include "dsa_priv.h" #include "mv88e6xxx.h" static char *mv88e6123_61_65_probe(struct mii_bus *bus, int sw_addr) @@ -419,7 +419,7 @@ static int mv88e6123_61_65_get_sset_count(struct dsa_switch *ds) return ARRAY_SIZE(mv88e6123_61_65_hw_stats); } -struct dsa_switch_driver mv88e6123_61_65_switch_driver = { +static struct dsa_switch_driver mv88e6123_61_65_switch_driver = { .tag_protocol = cpu_to_be16(ETH_P_EDSA), .priv_size = sizeof(struct mv88e6xxx_priv_state), .probe = mv88e6123_61_65_probe, @@ -433,6 +433,15 @@ struct dsa_switch_driver mv88e6123_61_65_switch_driver = { .get_sset_count = mv88e6123_61_65_get_sset_count, }; -MODULE_ALIAS("platform:mv88e6123"); -MODULE_ALIAS("platform:mv88e6161"); -MODULE_ALIAS("platform:mv88e6165"); +static int __init mv88e6123_61_65_init(void) +{ + register_switch_driver(&mv88e6123_61_65_switch_driver); + return 0; +} +module_init(mv88e6123_61_65_init); + +static void __exit mv88e6123_61_65_cleanup(void) +{ + unregister_switch_driver(&mv88e6123_61_65_switch_driver); +} +module_exit(mv88e6123_61_65_cleanup); diff --git a/trunk/drivers/net/dsa/mv88e6131.c b/trunk/net/dsa/mv88e6131.c similarity index 96% rename from trunk/drivers/net/dsa/mv88e6131.c rename to trunk/net/dsa/mv88e6131.c index e0eb68243834..9bd1061fa4ee 100644 --- a/trunk/drivers/net/dsa/mv88e6131.c +++ b/trunk/net/dsa/mv88e6131.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include "dsa_priv.h" #include "mv88e6xxx.h" /* @@ -415,7 +415,7 @@ static int mv88e6131_get_sset_count(struct dsa_switch *ds) return ARRAY_SIZE(mv88e6131_hw_stats); } -struct dsa_switch_driver mv88e6131_switch_driver = { +static struct dsa_switch_driver mv88e6131_switch_driver = { .tag_protocol = cpu_to_be16(ETH_P_DSA), .priv_size = sizeof(struct mv88e6xxx_priv_state), .probe = mv88e6131_probe, @@ -429,7 +429,15 @@ struct dsa_switch_driver mv88e6131_switch_driver = { .get_sset_count = mv88e6131_get_sset_count, }; -MODULE_ALIAS("platform:mv88e6085"); -MODULE_ALIAS("platform:mv88e6095"); -MODULE_ALIAS("platform:mv88e6095f"); -MODULE_ALIAS("platform:mv88e6131"); +static int __init mv88e6131_init(void) +{ + register_switch_driver(&mv88e6131_switch_driver); + return 0; +} +module_init(mv88e6131_init); + +static void __exit mv88e6131_cleanup(void) +{ + unregister_switch_driver(&mv88e6131_switch_driver); +} +module_exit(mv88e6131_cleanup); diff --git a/trunk/drivers/net/dsa/mv88e6xxx.c b/trunk/net/dsa/mv88e6xxx.c similarity index 93% rename from trunk/drivers/net/dsa/mv88e6xxx.c rename to trunk/net/dsa/mv88e6xxx.c index 5467c040824a..efe661a9def4 100644 --- a/trunk/drivers/net/dsa/mv88e6xxx.c +++ b/trunk/net/dsa/mv88e6xxx.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include "dsa_priv.h" #include "mv88e6xxx.h" /* @@ -520,30 +520,3 @@ void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, mutex_unlock(&ps->stats_mutex); } - -static int __init mv88e6xxx_init(void) -{ -#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131) - register_switch_driver(&mv88e6131_switch_driver); -#endif -#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65) - register_switch_driver(&mv88e6123_61_65_switch_driver); -#endif - return 0; -} -module_init(mv88e6xxx_init); - -static void __exit mv88e6xxx_cleanup(void) -{ -#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65) - unregister_switch_driver(&mv88e6123_61_65_switch_driver); -#endif -#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131) - unregister_switch_driver(&mv88e6131_switch_driver); -#endif -} -module_exit(mv88e6xxx_cleanup); - -MODULE_AUTHOR("Lennert Buytenhek "); -MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips"); -MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/net/dsa/mv88e6xxx.h b/trunk/net/dsa/mv88e6xxx.h similarity index 95% rename from trunk/drivers/net/dsa/mv88e6xxx.h rename to trunk/net/dsa/mv88e6xxx.h index fc2cd7b90e8d..61156ca26a0d 100644 --- a/trunk/drivers/net/dsa/mv88e6xxx.h +++ b/trunk/net/dsa/mv88e6xxx.h @@ -71,9 +71,6 @@ void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int nr_stats, struct mv88e6xxx_hw_stat *stats, int port, uint64_t *data); -extern struct dsa_switch_driver mv88e6131_switch_driver; -extern struct dsa_switch_driver mv88e6123_61_65_switch_driver; - #define REG_READ(addr, reg) \ ({ \ int __ret; \ diff --git a/trunk/net/dsa/tag_dsa.c b/trunk/net/dsa/tag_dsa.c index cacce1e22f9c..98dfe80b4538 100644 --- a/trunk/net/dsa/tag_dsa.c +++ b/trunk/net/dsa/tag_dsa.c @@ -186,7 +186,20 @@ static int dsa_rcv(struct sk_buff *skb, struct net_device *dev, return 0; } -struct packet_type dsa_packet_type __read_mostly = { +static struct packet_type dsa_packet_type __read_mostly = { .type = cpu_to_be16(ETH_P_DSA), .func = dsa_rcv, }; + +static int __init dsa_init_module(void) +{ + dev_add_pack(&dsa_packet_type); + return 0; +} +module_init(dsa_init_module); + +static void __exit dsa_cleanup_module(void) +{ + dev_remove_pack(&dsa_packet_type); +} +module_exit(dsa_cleanup_module); diff --git a/trunk/net/dsa/tag_edsa.c b/trunk/net/dsa/tag_edsa.c index e70c43c25e64..6f383322ad25 100644 --- a/trunk/net/dsa/tag_edsa.c +++ b/trunk/net/dsa/tag_edsa.c @@ -205,7 +205,20 @@ static int edsa_rcv(struct sk_buff *skb, struct net_device *dev, return 0; } -struct packet_type edsa_packet_type __read_mostly = { +static struct packet_type edsa_packet_type __read_mostly = { .type = cpu_to_be16(ETH_P_EDSA), .func = edsa_rcv, }; + +static int __init edsa_init_module(void) +{ + dev_add_pack(&edsa_packet_type); + return 0; +} +module_init(edsa_init_module); + +static void __exit edsa_cleanup_module(void) +{ + dev_remove_pack(&edsa_packet_type); +} +module_exit(edsa_cleanup_module); diff --git a/trunk/net/dsa/tag_trailer.c b/trunk/net/dsa/tag_trailer.c index 94bc260d015d..d6d7d0add3cb 100644 --- a/trunk/net/dsa/tag_trailer.c +++ b/trunk/net/dsa/tag_trailer.c @@ -114,7 +114,20 @@ static int trailer_rcv(struct sk_buff *skb, struct net_device *dev, return 0; } -struct packet_type trailer_packet_type __read_mostly = { +static struct packet_type trailer_packet_type __read_mostly = { .type = cpu_to_be16(ETH_P_TRAILER), .func = trailer_rcv, }; + +static int __init trailer_init_module(void) +{ + dev_add_pack(&trailer_packet_type); + return 0; +} +module_init(trailer_init_module); + +static void __exit trailer_cleanup_module(void) +{ + dev_remove_pack(&trailer_packet_type); +} +module_exit(trailer_cleanup_module); diff --git a/trunk/net/econet/af_econet.c b/trunk/net/econet/af_econet.c index 7e717cb35ad1..1c1f26c5d672 100644 --- a/trunk/net/econet/af_econet.c +++ b/trunk/net/econet/af_econet.c @@ -322,7 +322,6 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock, /* Real hardware Econet. We're not worthy etc. */ #ifdef CONFIG_ECONET_NATIVE unsigned short proto = 0; - int hlen, tlen; int res; if (len + 15 > dev->mtu) { @@ -332,14 +331,12 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock, dev_hold(dev); - hlen = LL_RESERVED_SPACE(dev); - tlen = dev->needed_tailroom; - skb = sock_alloc_send_skb(sk, len + hlen + tlen, + skb = sock_alloc_send_skb(sk, len + LL_ALLOCATED_SPACE(dev), msg->msg_flags & MSG_DONTWAIT, &err); if (skb == NULL) goto out_unlock; - skb_reserve(skb, hlen); + skb_reserve(skb, LL_RESERVED_SPACE(dev)); skb_reset_network_header(skb); eb = (struct ec_cb *)&skb->cb; diff --git a/trunk/net/ieee802154/6lowpan.c b/trunk/net/ieee802154/6lowpan.c index e4ecc1eef98c..19d6aefe97d4 100644 --- a/trunk/net/ieee802154/6lowpan.c +++ b/trunk/net/ieee802154/6lowpan.c @@ -50,6 +50,8 @@ * SUCH DAMAGE. */ +#define DEBUG + #include #include #include @@ -111,20 +113,6 @@ struct lowpan_dev_record { struct list_head list; }; -struct lowpan_fragment { - struct sk_buff *skb; /* skb to be assembled */ - spinlock_t lock; /* concurency lock */ - u16 length; /* length to be assemled */ - u32 bytes_rcv; /* bytes received */ - u16 tag; /* current fragment tag */ - struct timer_list timer; /* assembling timer */ - struct list_head list; /* fragments list */ -}; - -static unsigned short fragment_tag; -static LIST_HEAD(lowpan_fragments); -spinlock_t flist_lock; - static inline struct lowpan_dev_info *lowpan_dev_info(const struct net_device *dev) { @@ -246,50 +234,6 @@ lowpan_uncompress_addr(struct sk_buff *skb, struct in6_addr *ipaddr, return 0; } -static void -lowpan_compress_udp_header(u8 **hc06_ptr, struct sk_buff *skb) -{ - struct udphdr *uh = udp_hdr(skb); - - pr_debug("(%s): UDP header compression\n", __func__); - - if (((uh->source & LOWPAN_NHC_UDP_4BIT_MASK) == - LOWPAN_NHC_UDP_4BIT_PORT) && - ((uh->dest & LOWPAN_NHC_UDP_4BIT_MASK) == - LOWPAN_NHC_UDP_4BIT_PORT)) { - pr_debug("(%s): both ports compression to 4 bits\n", __func__); - **hc06_ptr = LOWPAN_NHC_UDP_CS_P_11; - **(hc06_ptr + 1) = /* subtraction is faster */ - (u8)((uh->dest - LOWPAN_NHC_UDP_4BIT_PORT) + - ((uh->source & LOWPAN_NHC_UDP_4BIT_PORT) << 4)); - *hc06_ptr += 2; - } else if ((uh->dest & LOWPAN_NHC_UDP_8BIT_MASK) == - LOWPAN_NHC_UDP_8BIT_PORT) { - pr_debug("(%s): remove 8 bits of dest\n", __func__); - **hc06_ptr = LOWPAN_NHC_UDP_CS_P_01; - memcpy(*hc06_ptr + 1, &uh->source, 2); - **(hc06_ptr + 3) = (u8)(uh->dest - LOWPAN_NHC_UDP_8BIT_PORT); - *hc06_ptr += 4; - } else if ((uh->source & LOWPAN_NHC_UDP_8BIT_MASK) == - LOWPAN_NHC_UDP_8BIT_PORT) { - pr_debug("(%s): remove 8 bits of source\n", __func__); - **hc06_ptr = LOWPAN_NHC_UDP_CS_P_10; - memcpy(*hc06_ptr + 1, &uh->dest, 2); - **(hc06_ptr + 3) = (u8)(uh->source - LOWPAN_NHC_UDP_8BIT_PORT); - *hc06_ptr += 4; - } else { - pr_debug("(%s): can't compress header\n", __func__); - **hc06_ptr = LOWPAN_NHC_UDP_CS_P_00; - memcpy(*hc06_ptr + 1, &uh->source, 2); - memcpy(*hc06_ptr + 3, &uh->dest, 2); - *hc06_ptr += 5; - } - - /* checksum is always inline */ - memcpy(*hc06_ptr, &uh->check, 2); - *hc06_ptr += 2; -} - static u8 lowpan_fetch_skb_u8(struct sk_buff *skb) { u8 ret; @@ -300,73 +244,6 @@ static u8 lowpan_fetch_skb_u8(struct sk_buff *skb) return ret; } -static u16 lowpan_fetch_skb_u16(struct sk_buff *skb) -{ - u16 ret; - - BUG_ON(!pskb_may_pull(skb, 2)); - - ret = skb->data[0] | (skb->data[1] << 8); - skb_pull(skb, 2); - return ret; -} - -static int -lowpan_uncompress_udp_header(struct sk_buff *skb) -{ - struct udphdr *uh = udp_hdr(skb); - u8 tmp; - - tmp = lowpan_fetch_skb_u8(skb); - - if ((tmp & LOWPAN_NHC_UDP_MASK) == LOWPAN_NHC_UDP_ID) { - pr_debug("(%s): UDP header uncompression\n", __func__); - switch (tmp & LOWPAN_NHC_UDP_CS_P_11) { - case LOWPAN_NHC_UDP_CS_P_00: - memcpy(&uh->source, &skb->data[0], 2); - memcpy(&uh->dest, &skb->data[2], 2); - skb_pull(skb, 4); - break; - case LOWPAN_NHC_UDP_CS_P_01: - memcpy(&uh->source, &skb->data[0], 2); - uh->dest = - skb->data[2] + LOWPAN_NHC_UDP_8BIT_PORT; - skb_pull(skb, 3); - break; - case LOWPAN_NHC_UDP_CS_P_10: - uh->source = skb->data[0] + LOWPAN_NHC_UDP_8BIT_PORT; - memcpy(&uh->dest, &skb->data[1], 2); - skb_pull(skb, 3); - break; - case LOWPAN_NHC_UDP_CS_P_11: - uh->source = - LOWPAN_NHC_UDP_4BIT_PORT + (skb->data[0] >> 4); - uh->dest = - LOWPAN_NHC_UDP_4BIT_PORT + (skb->data[0] & 0x0f); - skb_pull(skb, 1); - break; - default: - pr_debug("(%s) ERROR: unknown UDP format\n", __func__); - goto err; - break; - } - - pr_debug("(%s): uncompressed UDP ports: src = %d, dst = %d\n", - __func__, uh->source, uh->dest); - - /* copy checksum */ - memcpy(&uh->check, &skb->data[0], 2); - skb_pull(skb, 2); - } else { - pr_debug("(%s): ERROR: unsupported NH format\n", __func__); - goto err; - } - - return 0; -err: - return -EINVAL; -} - static int lowpan_header_create(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *_daddr, @@ -465,6 +342,8 @@ static int lowpan_header_create(struct sk_buff *skb, if (hdr->nexthdr == UIP_PROTO_UDP) iphc0 |= LOWPAN_IPHC_NH_C; +/* TODO: next header compression */ + if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) { *hc06_ptr = hdr->nexthdr; hc06_ptr += 1; @@ -552,9 +431,8 @@ static int lowpan_header_create(struct sk_buff *skb, } } - /* UDP header compression */ - if (hdr->nexthdr == UIP_PROTO_UDP) - lowpan_compress_udp_header(&hc06_ptr, skb); + /* TODO: UDP header compression */ + /* TODO: Next Header compression */ head[0] = iphc0; head[1] = iphc1; @@ -589,7 +467,6 @@ static int lowpan_header_create(struct sk_buff *skb, memcpy(&(sa.hwaddr), saddr, 8); mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA; - return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev, type, (void *)&da, (void *)&sa, skb->len); } @@ -634,21 +511,6 @@ static int lowpan_skb_deliver(struct sk_buff *skb, struct ipv6hdr *hdr) return stat; } -static void lowpan_fragment_timer_expired(unsigned long entry_addr) -{ - struct lowpan_fragment *entry = (struct lowpan_fragment *)entry_addr; - - pr_debug("%s: timer expired for frame with tag %d\n", __func__, - entry->tag); - - spin_lock(&flist_lock); - list_del(&entry->list); - spin_unlock(&flist_lock); - - dev_kfree_skb(entry->skb); - kfree(entry); -} - static int lowpan_process_data(struct sk_buff *skb) { @@ -663,107 +525,6 @@ lowpan_process_data(struct sk_buff *skb) if (skb->len < 2) goto drop; iphc0 = lowpan_fetch_skb_u8(skb); - - /* fragments assembling */ - switch (iphc0 & LOWPAN_DISPATCH_MASK) { - case LOWPAN_DISPATCH_FRAG1: - case LOWPAN_DISPATCH_FRAGN: - { - struct lowpan_fragment *frame; - u8 len, offset; - u16 tag; - bool found = false; - - len = lowpan_fetch_skb_u8(skb); /* frame length */ - tag = lowpan_fetch_skb_u16(skb); - - /* - * check if frame assembling with the same tag is - * already in progress - */ - spin_lock(&flist_lock); - - list_for_each_entry(frame, &lowpan_fragments, list) - if (frame->tag == tag) { - found = true; - break; - } - - /* alloc new frame structure */ - if (!found) { - frame = kzalloc(sizeof(struct lowpan_fragment), - GFP_ATOMIC); - if (!frame) - goto unlock_and_drop; - - INIT_LIST_HEAD(&frame->list); - - frame->length = (iphc0 & 7) | (len << 3); - frame->tag = tag; - - /* allocate buffer for frame assembling */ - frame->skb = alloc_skb(frame->length + - sizeof(struct ipv6hdr), GFP_ATOMIC); - - if (!frame->skb) { - kfree(frame); - goto unlock_and_drop; - } - - frame->skb->priority = skb->priority; - frame->skb->dev = skb->dev; - - /* reserve headroom for uncompressed ipv6 header */ - skb_reserve(frame->skb, sizeof(struct ipv6hdr)); - skb_put(frame->skb, frame->length); - - init_timer(&frame->timer); - /* time out is the same as for ipv6 - 60 sec */ - frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT; - frame->timer.data = (unsigned long)frame; - frame->timer.function = lowpan_fragment_timer_expired; - - add_timer(&frame->timer); - - list_add_tail(&frame->list, &lowpan_fragments); - } - - if ((iphc0 & LOWPAN_DISPATCH_MASK) == LOWPAN_DISPATCH_FRAG1) - goto unlock_and_drop; - - offset = lowpan_fetch_skb_u8(skb); /* fetch offset */ - - /* if payload fits buffer, copy it */ - if (likely((offset * 8 + skb->len) <= frame->length)) - skb_copy_to_linear_data_offset(frame->skb, offset * 8, - skb->data, skb->len); - else - goto unlock_and_drop; - - frame->bytes_rcv += skb->len; - - /* frame assembling complete */ - if ((frame->bytes_rcv == frame->length) && - frame->timer.expires > jiffies) { - /* if timer haven't expired - first of all delete it */ - del_timer(&frame->timer); - list_del(&frame->list); - spin_unlock(&flist_lock); - - dev_kfree_skb(skb); - skb = frame->skb; - kfree(frame); - iphc0 = lowpan_fetch_skb_u8(skb); - break; - } - spin_unlock(&flist_lock); - - return kfree_skb(skb), 0; - } - default: - break; - } - iphc1 = lowpan_fetch_skb_u8(skb); _saddr = mac_cb(skb)->sa.hwaddr; @@ -898,10 +659,7 @@ lowpan_process_data(struct sk_buff *skb) goto drop; } - /* UDP data uncompression */ - if (iphc0 & LOWPAN_IPHC_NH_C) - if (lowpan_uncompress_udp_header(skb)) - goto drop; + /* TODO: UDP header parse */ /* Not fragmented package */ hdr.payload_len = htons(skb->len); @@ -916,9 +674,6 @@ lowpan_process_data(struct sk_buff *skb) lowpan_raw_dump_table(__func__, "raw header dump", (u8 *)&hdr, sizeof(hdr)); return lowpan_skb_deliver(skb, &hdr); - -unlock_and_drop: - spin_unlock(&flist_lock); drop: kfree_skb(skb); return -EINVAL; @@ -937,115 +692,18 @@ static int lowpan_set_address(struct net_device *dev, void *p) return 0; } -static int lowpan_get_mac_header_length(struct sk_buff *skb) -{ - /* - * Currently long addressing mode is supported only, so the overall - * header size is 21: - * FC SeqNum DPAN DA SA Sec - * 2 + 1 + 2 + 8 + 8 + 0 = 21 - */ - return 21; -} - -static int -lowpan_fragment_xmit(struct sk_buff *skb, u8 *head, - int mlen, int plen, int offset) -{ - struct sk_buff *frag; - int hlen, ret; - - /* if payload length is zero, therefore it's a first fragment */ - hlen = (plen == 0 ? LOWPAN_FRAG1_HEAD_SIZE : LOWPAN_FRAGN_HEAD_SIZE); - - lowpan_raw_dump_inline(__func__, "6lowpan fragment header", head, hlen); - - frag = dev_alloc_skb(hlen + mlen + plen + IEEE802154_MFR_SIZE); - if (!frag) - return -ENOMEM; - - frag->priority = skb->priority; - frag->dev = skb->dev; - - /* copy header, MFR and payload */ - memcpy(skb_put(frag, mlen), skb->data, mlen); - memcpy(skb_put(frag, hlen), head, hlen); - - if (plen) - skb_copy_from_linear_data_offset(skb, offset + mlen, - skb_put(frag, plen), plen); - - lowpan_raw_dump_table(__func__, " raw fragment dump", frag->data, - frag->len); - - ret = dev_queue_xmit(frag); - - return ret; -} - -static int -lowpan_skb_fragmentation(struct sk_buff *skb) -{ - int err, header_length, payload_length, tag, offset = 0; - u8 head[5]; - - header_length = lowpan_get_mac_header_length(skb); - payload_length = skb->len - header_length; - tag = fragment_tag++; - - /* first fragment header */ - head[0] = LOWPAN_DISPATCH_FRAG1 | (payload_length & 0x7); - head[1] = (payload_length >> 3) & 0xff; - head[2] = tag & 0xff; - head[3] = tag >> 8; - - err = lowpan_fragment_xmit(skb, head, header_length, 0, 0); - - /* next fragment header */ - head[0] &= ~LOWPAN_DISPATCH_FRAG1; - head[0] |= LOWPAN_DISPATCH_FRAGN; - - while ((payload_length - offset > 0) && (err >= 0)) { - int len = LOWPAN_FRAG_SIZE; - - head[4] = offset / 8; - - if (payload_length - offset < len) - len = payload_length - offset; - - err = lowpan_fragment_xmit(skb, head, header_length, - len, offset); - offset += len; - } - - return err; -} - static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev) { - int err = -1; + int err = 0; pr_debug("(%s): package xmit\n", __func__); skb->dev = lowpan_dev_info(dev)->real_dev; if (skb->dev == NULL) { pr_debug("(%s) ERROR: no real wpan device found\n", __func__); - goto error; - } - - if (skb->len <= IEEE802154_MTU) { + dev_kfree_skb(skb); + } else err = dev_queue_xmit(skb); - goto out; - } - - pr_debug("(%s): frame is too big, fragmentation is needed\n", - __func__); - err = lowpan_skb_fragmentation(skb); -error: - dev_kfree_skb(skb); -out: - if (err < 0) - pr_debug("(%s): ERROR: xmit failed\n", __func__); return (err < 0 ? NETDEV_TX_BUSY : NETDEV_TX_OK); } @@ -1072,12 +730,13 @@ static void lowpan_setup(struct net_device *dev) dev->addr_len = IEEE802154_ADDR_LEN; memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN); dev->type = ARPHRD_IEEE802154; + dev->features = NETIF_F_NO_CSUM; /* Frame Control + Sequence Number + Address fields + Security Header */ dev->hard_header_len = 2 + 1 + 20 + 14; dev->needed_tailroom = 2; /* FCS */ dev->mtu = 1281; dev->tx_queue_len = 0; - dev->flags = IFF_BROADCAST | IFF_MULTICAST; + dev->flags = IFF_NOARP | IFF_BROADCAST; dev->watchdog_timeo = 0; dev->netdev_ops = &lowpan_netdev_ops; @@ -1106,15 +765,8 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev, goto drop; /* check that it's our buffer */ - switch (skb->data[0] & 0xe0) { - case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */ - case LOWPAN_DISPATCH_FRAG1: /* first fragment header */ - case LOWPAN_DISPATCH_FRAGN: /* next fragments headers */ + if ((skb->data[0] & 0xe0) == 0x60) lowpan_process_data(skb); - break; - default: - break; - } return NET_RX_SUCCESS; diff --git a/trunk/net/ieee802154/6lowpan.h b/trunk/net/ieee802154/6lowpan.h index aeff3f310482..5d8cf80b930d 100644 --- a/trunk/net/ieee802154/6lowpan.h +++ b/trunk/net/ieee802154/6lowpan.h @@ -159,24 +159,6 @@ #define LOWPAN_DISPATCH_FRAG1 0xc0 /* 11000xxx */ #define LOWPAN_DISPATCH_FRAGN 0xe0 /* 11100xxx */ -#define LOWPAN_DISPATCH_MASK 0xf8 /* 11111000 */ - -#define LOWPAN_FRAG_TIMEOUT (HZ * 60) /* time-out 60 sec */ - -#define LOWPAN_FRAG1_HEAD_SIZE 0x4 -#define LOWPAN_FRAGN_HEAD_SIZE 0x5 - -/* - * According IEEE802.15.4 standard: - * - MTU is 127 octets - * - maximum MHR size is 37 octets - * - MFR size is 2 octets - * - * so minimal payload size that we may guarantee is: - * MTU - MHR - MFR = 88 octets - */ -#define LOWPAN_FRAG_SIZE 88 - /* * Values of fields within the IPHC encoding first byte * (C stands for compressed and I for inline) @@ -219,11 +201,6 @@ #define LOWPAN_NHC_UDP_CHECKSUMC 0x04 #define LOWPAN_NHC_UDP_CHECKSUMI 0x00 -#define LOWPAN_NHC_UDP_4BIT_PORT 0xF0B0 -#define LOWPAN_NHC_UDP_4BIT_MASK 0xFFF0 -#define LOWPAN_NHC_UDP_8BIT_PORT 0xF000 -#define LOWPAN_NHC_UDP_8BIT_MASK 0xFF00 - /* values for port compression, _with checksum_ ie bit 5 set to 0 */ #define LOWPAN_NHC_UDP_CS_P_00 0xF0 /* all inline */ #define LOWPAN_NHC_UDP_CS_P_01 0xF1 /* source 16bit inline, diff --git a/trunk/net/ieee802154/dgram.c b/trunk/net/ieee802154/dgram.c index 1b09eaabaac1..faecf648123f 100644 --- a/trunk/net/ieee802154/dgram.c +++ b/trunk/net/ieee802154/dgram.c @@ -209,7 +209,6 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk, unsigned mtu; struct sk_buff *skb; struct dgram_sock *ro = dgram_sk(sk); - int hlen, tlen; int err; if (msg->msg_flags & MSG_OOB) { @@ -230,15 +229,13 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk, mtu = dev->mtu; pr_debug("name = %s, mtu = %u\n", dev->name, mtu); - hlen = LL_RESERVED_SPACE(dev); - tlen = dev->needed_tailroom; - skb = sock_alloc_send_skb(sk, hlen + tlen + size, + skb = sock_alloc_send_skb(sk, LL_ALLOCATED_SPACE(dev) + size, msg->msg_flags & MSG_DONTWAIT, &err); if (!skb) goto out_dev; - skb_reserve(skb, hlen); + skb_reserve(skb, LL_RESERVED_SPACE(dev)); skb_reset_network_header(skb); diff --git a/trunk/net/ieee802154/raw.c b/trunk/net/ieee802154/raw.c index f96bae8fd330..10970ca85748 100644 --- a/trunk/net/ieee802154/raw.c +++ b/trunk/net/ieee802154/raw.c @@ -108,7 +108,6 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, struct net_device *dev; unsigned mtu; struct sk_buff *skb; - int hlen, tlen; int err; if (msg->msg_flags & MSG_OOB) { @@ -138,14 +137,12 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, goto out_dev; } - hlen = LL_RESERVED_SPACE(dev); - tlen = dev->needed_tailroom; - skb = sock_alloc_send_skb(sk, hlen + tlen + size, + skb = sock_alloc_send_skb(sk, LL_ALLOCATED_SPACE(dev) + size, msg->msg_flags & MSG_DONTWAIT, &err); if (!skb) goto out_dev; - skb_reserve(skb, hlen); + skb_reserve(skb, LL_RESERVED_SPACE(dev)); skb_reset_mac_header(skb); skb_reset_network_header(skb); diff --git a/trunk/net/ipv4/Kconfig b/trunk/net/ipv4/Kconfig index 1a8f93bd2d4f..cbb505ba9324 100644 --- a/trunk/net/ipv4/Kconfig +++ b/trunk/net/ipv4/Kconfig @@ -409,10 +409,6 @@ config INET_TCP_DIAG depends on INET_DIAG def_tristate INET_DIAG -config INET_UDP_DIAG - depends on INET_DIAG - def_tristate INET_DIAG && IPV6 - menuconfig TCP_CONG_ADVANCED bool "TCP: advanced congestion control" ---help--- diff --git a/trunk/net/ipv4/Makefile b/trunk/net/ipv4/Makefile index ff75d3bbcd6a..f2dc69cffb57 100644 --- a/trunk/net/ipv4/Makefile +++ b/trunk/net/ipv4/Makefile @@ -34,7 +34,6 @@ obj-$(CONFIG_IP_PNP) += ipconfig.o obj-$(CONFIG_NETFILTER) += netfilter.o netfilter/ obj-$(CONFIG_INET_DIAG) += inet_diag.o obj-$(CONFIG_INET_TCP_DIAG) += tcp_diag.o -obj-$(CONFIG_INET_UDP_DIAG) += udp_diag.o obj-$(CONFIG_NET_TCPPROBE) += tcp_probe.o obj-$(CONFIG_TCP_CONG_BIC) += tcp_bic.o obj-$(CONFIG_TCP_CONG_CUBIC) += tcp_cubic.o @@ -48,7 +47,6 @@ obj-$(CONFIG_TCP_CONG_SCALABLE) += tcp_scalable.o obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o obj-$(CONFIG_TCP_CONG_YEAH) += tcp_yeah.o obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o -obj-$(CONFIG_CGROUP_MEM_RES_CTLR_KMEM) += tcp_memcontrol.o obj-$(CONFIG_NETLABEL) += cipso_ipv4.o obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \ diff --git a/trunk/net/ipv4/af_inet.c b/trunk/net/ipv4/af_inet.c index f7b5670744f0..1b5096a9875a 100644 --- a/trunk/net/ipv4/af_inet.c +++ b/trunk/net/ipv4/af_inet.c @@ -1250,8 +1250,7 @@ static int inet_gso_send_check(struct sk_buff *skb) return err; } -static struct sk_buff *inet_gso_segment(struct sk_buff *skb, - netdev_features_t features) +static struct sk_buff *inet_gso_segment(struct sk_buff *skb, u32 features) { struct sk_buff *segs = ERR_PTR(-EINVAL); struct iphdr *iph; @@ -1573,9 +1572,9 @@ static __net_init int ipv4_mib_init_net(struct net *net) sizeof(struct icmp_mib), __alignof__(struct icmp_mib)) < 0) goto err_icmp_mib; - net->mib.icmpmsg_statistics = kzalloc(sizeof(struct icmpmsg_mib), - GFP_KERNEL); - if (!net->mib.icmpmsg_statistics) + if (snmp_mib_init((void __percpu **)net->mib.icmpmsg_statistics, + sizeof(struct icmpmsg_mib), + __alignof__(struct icmpmsg_mib)) < 0) goto err_icmpmsg_mib; tcp_mib_init(net); @@ -1599,7 +1598,7 @@ static __net_init int ipv4_mib_init_net(struct net *net) static __net_exit void ipv4_mib_exit_net(struct net *net) { - kfree(net->mib.icmpmsg_statistics); + snmp_mib_free((void __percpu **)net->mib.icmpmsg_statistics); snmp_mib_free((void __percpu **)net->mib.icmp_statistics); snmp_mib_free((void __percpu **)net->mib.udplite_statistics); snmp_mib_free((void __percpu **)net->mib.udp_statistics); @@ -1672,8 +1671,6 @@ static int __init inet_init(void) ip_static_sysctl_init(); #endif - tcp_prot.sysctl_mem = init_net.ipv4.sysctl_tcp_mem; - /* * Add all the base protocols. */ diff --git a/trunk/net/ipv4/arp.c b/trunk/net/ipv4/arp.c index 59402be133f0..96a164aa1367 100644 --- a/trunk/net/ipv4/arp.c +++ b/trunk/net/ipv4/arp.c @@ -112,6 +112,11 @@ #include #include #include +#if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE) +#include +struct neigh_table *clip_tbl_hook; +EXPORT_SYMBOL(clip_tbl_hook); +#endif #include #include @@ -121,7 +126,7 @@ /* * Interface to generic neighbour cache. */ -static u32 arp_hash(const void *pkey, const struct net_device *dev, __u32 *hash_rnd); +static u32 arp_hash(const void *pkey, const struct net_device *dev, __u32 rnd); static int arp_constructor(struct neighbour *neigh); static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb); static void arp_error_report(struct neighbour *neigh, struct sk_buff *skb); @@ -159,6 +164,7 @@ static const struct neigh_ops arp_broken_ops = { struct neigh_table arp_tbl = { .family = AF_INET, + .entry_size = sizeof(struct neighbour) + 4, .key_len = 4, .hash = arp_hash, .constructor = arp_constructor, @@ -171,7 +177,7 @@ struct neigh_table arp_tbl = { .gc_staletime = 60 * HZ, .reachable_time = 30 * HZ, .delay_probe_time = 5 * HZ, - .queue_len_bytes = 64*1024, + .queue_len = 3, .ucast_probes = 3, .mcast_probes = 3, .anycast_delay = 1 * HZ, @@ -215,9 +221,9 @@ int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir) static u32 arp_hash(const void *pkey, const struct net_device *dev, - __u32 *hash_rnd) + __u32 hash_rnd) { - return arp_hashfn(*(u32 *)pkey, dev, *hash_rnd); + return arp_hashfn(*(u32 *)pkey, dev, hash_rnd); } static int arp_constructor(struct neighbour *neigh) @@ -277,9 +283,9 @@ static int arp_constructor(struct neighbour *neigh) default: break; case ARPHRD_ROSE: -#if IS_ENABLED(CONFIG_AX25) +#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) case ARPHRD_AX25: -#if IS_ENABLED(CONFIG_NETROM) +#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE) case ARPHRD_NETROM: #endif neigh->ops = &arp_broken_ops; @@ -586,18 +592,16 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, struct sk_buff *skb; struct arphdr *arp; unsigned char *arp_ptr; - int hlen = LL_RESERVED_SPACE(dev); - int tlen = dev->needed_tailroom; /* * Allocate a buffer */ - skb = alloc_skb(arp_hdr_len(dev) + hlen + tlen, GFP_ATOMIC); + skb = alloc_skb(arp_hdr_len(dev) + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC); if (skb == NULL) return NULL; - skb_reserve(skb, hlen); + skb_reserve(skb, LL_RESERVED_SPACE(dev)); skb_reset_network_header(skb); arp = (struct arphdr *) skb_put(skb, arp_hdr_len(dev)); skb->dev = dev; @@ -629,13 +633,13 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, arp->ar_pro = htons(ETH_P_IP); break; -#if IS_ENABLED(CONFIG_AX25) +#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) case ARPHRD_AX25: arp->ar_hrd = htons(ARPHRD_AX25); arp->ar_pro = htons(AX25_P_IP); break; -#if IS_ENABLED(CONFIG_NETROM) +#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE) case ARPHRD_NETROM: arp->ar_hrd = htons(ARPHRD_NETROM); arp->ar_pro = htons(AX25_P_IP); @@ -643,13 +647,13 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, #endif #endif -#if IS_ENABLED(CONFIG_FDDI) +#if defined(CONFIG_FDDI) || defined(CONFIG_FDDI_MODULE) case ARPHRD_FDDI: arp->ar_hrd = htons(ARPHRD_ETHER); arp->ar_pro = htons(ETH_P_IP); break; #endif -#if IS_ENABLED(CONFIG_TR) +#if defined(CONFIG_TR) || defined(CONFIG_TR_MODULE) case ARPHRD_IEEE802_TR: arp->ar_hrd = htons(ARPHRD_IEEE802); arp->ar_pro = htons(ETH_P_IP); @@ -1036,7 +1040,7 @@ static int arp_req_set(struct net *net, struct arpreq *r, return -EINVAL; } switch (dev->type) { -#if IS_ENABLED(CONFIG_FDDI) +#if defined(CONFIG_FDDI) || defined(CONFIG_FDDI_MODULE) case ARPHRD_FDDI: /* * According to RFC 1390, FDDI devices should accept ARP @@ -1282,7 +1286,7 @@ void __init arp_init(void) } #ifdef CONFIG_PROC_FS -#if IS_ENABLED(CONFIG_AX25) +#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) /* ------------------------------------------------------------------------ */ /* @@ -1330,7 +1334,7 @@ static void arp_format_neigh_entry(struct seq_file *seq, read_lock(&n->lock); /* Convert hardware address to XX:XX:XX:XX ... form. */ -#if IS_ENABLED(CONFIG_AX25) +#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) if (hatype == ARPHRD_AX25 || hatype == ARPHRD_NETROM) ax2asc2((ax25_address *)n->ha, hbuffer); else { @@ -1343,7 +1347,7 @@ static void arp_format_neigh_entry(struct seq_file *seq, if (k != 0) --k; hbuffer[k] = 0; -#if IS_ENABLED(CONFIG_AX25) +#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) } #endif sprintf(tbuf, "%pI4", n->primary_key); diff --git a/trunk/net/ipv4/fib_rules.c b/trunk/net/ipv4/fib_rules.c index 799fc790b3cf..46339ba7a2d3 100644 --- a/trunk/net/ipv4/fib_rules.c +++ b/trunk/net/ipv4/fib_rules.c @@ -67,7 +67,6 @@ int fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res) return err; } -EXPORT_SYMBOL_GPL(fib_lookup); static int fib4_rule_action(struct fib_rule *rule, struct flowi *flp, int flags, struct fib_lookup_arg *arg) diff --git a/trunk/net/ipv4/fib_trie.c b/trunk/net/ipv4/fib_trie.c index d04b13ae18fe..37b671185c81 100644 --- a/trunk/net/ipv4/fib_trie.c +++ b/trunk/net/ipv4/fib_trie.c @@ -1607,7 +1607,6 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp, rcu_read_unlock(); return ret; } -EXPORT_SYMBOL_GPL(fib_table_lookup); /* * Remove the leaf and return parent. diff --git a/trunk/net/ipv4/igmp.c b/trunk/net/ipv4/igmp.c index fa057d105bef..b2ca095cb9da 100644 --- a/trunk/net/ipv4/igmp.c +++ b/trunk/net/ipv4/igmp.c @@ -304,11 +304,9 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) struct igmpv3_report *pig; struct net *net = dev_net(dev); struct flowi4 fl4; - int hlen = LL_RESERVED_SPACE(dev); - int tlen = dev->needed_tailroom; while (1) { - skb = alloc_skb(size + hlen + tlen, + skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC | __GFP_NOWARN); if (skb) break; @@ -329,7 +327,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) skb_dst_set(skb, &rt->dst); skb->dev = dev; - skb_reserve(skb, hlen); + skb_reserve(skb, LL_RESERVED_SPACE(dev)); skb_reset_network_header(skb); pip = ip_hdr(skb); @@ -649,7 +647,6 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, __be32 group = pmc ? pmc->multiaddr : 0; struct flowi4 fl4; __be32 dst; - int hlen, tlen; if (type == IGMPV3_HOST_MEMBERSHIP_REPORT) return igmpv3_send_report(in_dev, pmc); @@ -664,9 +661,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, if (IS_ERR(rt)) return -1; - hlen = LL_RESERVED_SPACE(dev); - tlen = dev->needed_tailroom; - skb = alloc_skb(IGMP_SIZE + hlen + tlen, GFP_ATOMIC); + skb = alloc_skb(IGMP_SIZE+LL_ALLOCATED_SPACE(dev), GFP_ATOMIC); if (skb == NULL) { ip_rt_put(rt); return -1; @@ -674,7 +669,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, skb_dst_set(skb, &rt->dst); - skb_reserve(skb, hlen); + skb_reserve(skb, LL_RESERVED_SPACE(dev)); skb_reset_network_header(skb); iph = ip_hdr(skb); @@ -1579,7 +1574,7 @@ static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode, * Add multicast single-source filter to the interface list */ static int ip_mc_add1_src(struct ip_mc_list *pmc, int sfmode, - __be32 *psfsrc) + __be32 *psfsrc, int delta) { struct ip_sf_list *psf, *psf_prev; @@ -1714,7 +1709,7 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode, pmc->sfcount[sfmode]++; err = 0; for (i=0; i #include -#include static const struct inet_diag_handler **inet_diag_table; @@ -46,22 +45,24 @@ struct inet_diag_entry { u16 userlocks; }; +static struct sock *idiagnl; + #define INET_DIAG_PUT(skb, attrtype, attrlen) \ RTA_DATA(__RTA_PUT(skb, attrtype, attrlen)) static DEFINE_MUTEX(inet_diag_table_mutex); -static const struct inet_diag_handler *inet_diag_lock_handler(int proto) +static const struct inet_diag_handler *inet_diag_lock_handler(int type) { - if (!inet_diag_table[proto]) - request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK, - NETLINK_SOCK_DIAG, AF_INET, proto); + if (!inet_diag_table[type]) + request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, + NETLINK_INET_DIAG, type); mutex_lock(&inet_diag_table_mutex); - if (!inet_diag_table[proto]) + if (!inet_diag_table[type]) return ERR_PTR(-ENOENT); - return inet_diag_table[proto]; + return inet_diag_table[type]; } static inline void inet_diag_unlock_handler( @@ -70,21 +71,21 @@ static inline void inet_diag_unlock_handler( mutex_unlock(&inet_diag_table_mutex); } -int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, - struct sk_buff *skb, struct inet_diag_req *req, - u32 pid, u32 seq, u16 nlmsg_flags, +static int inet_csk_diag_fill(struct sock *sk, + struct sk_buff *skb, + int ext, u32 pid, u32 seq, u16 nlmsg_flags, const struct nlmsghdr *unlh) { const struct inet_sock *inet = inet_sk(sk); + const struct inet_connection_sock *icsk = inet_csk(sk); struct inet_diag_msg *r; struct nlmsghdr *nlh; void *info = NULL; struct inet_diag_meminfo *minfo = NULL; unsigned char *b = skb_tail_pointer(skb); const struct inet_diag_handler *handler; - int ext = req->idiag_ext; - handler = inet_diag_table[req->sdiag_protocol]; + handler = inet_diag_table[unlh->nlmsg_type]; BUG_ON(handler == NULL); nlh = NLMSG_PUT(skb, pid, seq, unlh->nlmsg_type, sizeof(*r)); @@ -96,13 +97,25 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, if (ext & (1 << (INET_DIAG_MEMINFO - 1))) minfo = INET_DIAG_PUT(skb, INET_DIAG_MEMINFO, sizeof(*minfo)); + if (ext & (1 << (INET_DIAG_INFO - 1))) + info = INET_DIAG_PUT(skb, INET_DIAG_INFO, + handler->idiag_info_size); + + if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) { + const size_t len = strlen(icsk->icsk_ca_ops->name); + + strcpy(INET_DIAG_PUT(skb, INET_DIAG_CONG, len + 1), + icsk->icsk_ca_ops->name); + } + r->idiag_family = sk->sk_family; r->idiag_state = sk->sk_state; r->idiag_timer = 0; r->idiag_retrans = 0; r->id.idiag_if = sk->sk_bound_dev_if; - sock_diag_save_cookie(sk, r->id.idiag_cookie); + r->id.idiag_cookie[0] = (u32)(unsigned long)sk; + r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1); r->id.idiag_sport = inet->inet_sport; r->id.idiag_dport = inet->inet_dport; @@ -115,35 +128,19 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, if (ext & (1 << (INET_DIAG_TOS - 1))) RTA_PUT_U8(skb, INET_DIAG_TOS, inet->tos); -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) if (r->idiag_family == AF_INET6) { const struct ipv6_pinfo *np = inet6_sk(sk); - *(struct in6_addr *)r->id.idiag_src = np->rcv_saddr; - *(struct in6_addr *)r->id.idiag_dst = np->daddr; if (ext & (1 << (INET_DIAG_TCLASS - 1))) RTA_PUT_U8(skb, INET_DIAG_TCLASS, np->tclass); - } -#endif - - r->idiag_uid = sock_i_uid(sk); - r->idiag_inode = sock_i_ino(sk); - - if (minfo) { - minfo->idiag_rmem = sk_rmem_alloc_get(sk); - minfo->idiag_wmem = sk->sk_wmem_queued; - minfo->idiag_fmem = sk->sk_forward_alloc; - minfo->idiag_tmem = sk_wmem_alloc_get(sk); - } - if (ext & (1 << (INET_DIAG_SKMEMINFO - 1))) - if (sock_diag_put_meminfo(sk, skb, INET_DIAG_SKMEMINFO)) - goto rtattr_failure; - - if (icsk == NULL) { - r->idiag_rqueue = r->idiag_wqueue = 0; - goto out; + ipv6_addr_copy((struct in6_addr *)r->id.idiag_src, + &np->rcv_saddr); + ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst, + &np->daddr); } +#endif #define EXPIRES_IN_MS(tmo) DIV_ROUND_UP((tmo - jiffies) * 1000, HZ) @@ -165,14 +162,14 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, } #undef EXPIRES_IN_MS - if (ext & (1 << (INET_DIAG_INFO - 1))) - info = INET_DIAG_PUT(skb, INET_DIAG_INFO, sizeof(struct tcp_info)); - - if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) { - const size_t len = strlen(icsk->icsk_ca_ops->name); + r->idiag_uid = sock_i_uid(sk); + r->idiag_inode = sock_i_ino(sk); - strcpy(INET_DIAG_PUT(skb, INET_DIAG_CONG, len + 1), - icsk->icsk_ca_ops->name); + if (minfo) { + minfo->idiag_rmem = sk_rmem_alloc_get(sk); + minfo->idiag_wmem = sk->sk_wmem_queued; + minfo->idiag_fmem = sk->sk_forward_alloc; + minfo->idiag_tmem = sk_wmem_alloc_get(sk); } handler->idiag_get_info(sk, r, info); @@ -181,7 +178,6 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, icsk->icsk_ca_ops && icsk->icsk_ca_ops->get_info) icsk->icsk_ca_ops->get_info(sk, ext, skb); -out: nlh->nlmsg_len = skb_tail_pointer(skb) - b; return skb->len; @@ -190,20 +186,10 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, nlmsg_trim(skb, b); return -EMSGSIZE; } -EXPORT_SYMBOL_GPL(inet_sk_diag_fill); - -static int inet_csk_diag_fill(struct sock *sk, - struct sk_buff *skb, struct inet_diag_req *req, - u32 pid, u32 seq, u16 nlmsg_flags, - const struct nlmsghdr *unlh) -{ - return inet_sk_diag_fill(sk, inet_csk(sk), - skb, req, pid, seq, nlmsg_flags, unlh); -} static int inet_twsk_diag_fill(struct inet_timewait_sock *tw, - struct sk_buff *skb, struct inet_diag_req *req, - u32 pid, u32 seq, u16 nlmsg_flags, + struct sk_buff *skb, int ext, u32 pid, + u32 seq, u16 nlmsg_flags, const struct nlmsghdr *unlh) { long tmo; @@ -224,7 +210,8 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw, r->idiag_family = tw->tw_family; r->idiag_retrans = 0; r->id.idiag_if = tw->tw_bound_dev_if; - sock_diag_save_cookie(tw, r->id.idiag_cookie); + r->id.idiag_cookie[0] = (u32)(unsigned long)tw; + r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1); r->id.idiag_sport = tw->tw_sport; r->id.idiag_dport = tw->tw_dport; r->id.idiag_src[0] = tw->tw_rcv_saddr; @@ -236,13 +223,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw, r->idiag_wqueue = 0; r->idiag_uid = 0; r->idiag_inode = 0; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) if (tw->tw_family == AF_INET6) { const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw); - *(struct in6_addr *)r->id.idiag_src = tw6->tw_v6_rcv_saddr; - *(struct in6_addr *)r->id.idiag_dst = tw6->tw_v6_daddr; + ipv6_addr_copy((struct in6_addr *)r->id.idiag_src, + &tw6->tw_v6_rcv_saddr); + ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst, + &tw6->tw_v6_daddr); } #endif nlh->nlmsg_len = skb_tail_pointer(skb) - previous_tail; @@ -253,31 +242,42 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw, } static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, - struct inet_diag_req *r, u32 pid, u32 seq, u16 nlmsg_flags, + int ext, u32 pid, u32 seq, u16 nlmsg_flags, const struct nlmsghdr *unlh) { if (sk->sk_state == TCP_TIME_WAIT) return inet_twsk_diag_fill((struct inet_timewait_sock *)sk, - skb, r, pid, seq, nlmsg_flags, + skb, ext, pid, seq, nlmsg_flags, unlh); - return inet_csk_diag_fill(sk, skb, r, pid, seq, nlmsg_flags, unlh); + return inet_csk_diag_fill(sk, skb, ext, pid, seq, nlmsg_flags, unlh); } -int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_skb, - const struct nlmsghdr *nlh, struct inet_diag_req *req) +static int inet_diag_get_exact(struct sk_buff *in_skb, + const struct nlmsghdr *nlh) { int err; struct sock *sk; + struct inet_diag_req *req = NLMSG_DATA(nlh); struct sk_buff *rep; + struct inet_hashinfo *hashinfo; + const struct inet_diag_handler *handler; + handler = inet_diag_lock_handler(nlh->nlmsg_type); + if (IS_ERR(handler)) { + err = PTR_ERR(handler); + goto unlock; + } + + hashinfo = handler->idiag_hashinfo; err = -EINVAL; - if (req->sdiag_family == AF_INET) { + + if (req->idiag_family == AF_INET) { sk = inet_lookup(&init_net, hashinfo, req->id.idiag_dst[0], req->id.idiag_dport, req->id.idiag_src[0], req->id.idiag_sport, req->id.idiag_if); } -#if IS_ENABLED(CONFIG_IPV6) - else if (req->sdiag_family == AF_INET6) { +#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) + else if (req->idiag_family == AF_INET6) { sk = inet6_lookup(&init_net, hashinfo, (struct in6_addr *)req->id.idiag_dst, req->id.idiag_dport, @@ -287,26 +287,29 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_s } #endif else { - goto out_nosk; + goto unlock; } err = -ENOENT; if (sk == NULL) - goto out_nosk; + goto unlock; - err = sock_diag_check_cookie(sk, req->id.idiag_cookie); - if (err) + err = -ESTALE; + if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE || + req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) && + ((u32)(unsigned long)sk != req->id.idiag_cookie[0] || + (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1])) goto out; err = -ENOMEM; rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) + sizeof(struct inet_diag_meminfo) + - sizeof(struct tcp_info) + 64)), + handler->idiag_info_size + 64)), GFP_KERNEL); if (!rep) goto out; - err = sk_diag_fill(sk, rep, req, + err = sk_diag_fill(sk, rep, req->idiag_ext, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, 0, nlh); if (err < 0) { @@ -314,7 +317,7 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_s kfree_skb(rep); goto out; } - err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid, + err = netlink_unicast(idiagnl, rep, NETLINK_CB(in_skb).pid, MSG_DONTWAIT); if (err > 0) err = 0; @@ -326,25 +329,8 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_s else sock_put(sk); } -out_nosk: - return err; -} -EXPORT_SYMBOL_GPL(inet_diag_dump_one_icsk); - -static int inet_diag_get_exact(struct sk_buff *in_skb, - const struct nlmsghdr *nlh, - struct inet_diag_req *req) -{ - const struct inet_diag_handler *handler; - int err; - - handler = inet_diag_lock_handler(req->sdiag_protocol); - if (IS_ERR(handler)) - err = PTR_ERR(handler); - else - err = handler->dump_one(in_skb, nlh, req); +unlock: inet_diag_unlock_handler(handler); - return err; } @@ -375,12 +361,9 @@ static int bitstring_match(const __be32 *a1, const __be32 *a2, int bits) } -static int inet_diag_bc_run(const struct nlattr *_bc, - const struct inet_diag_entry *entry) +static int inet_diag_bc_run(const void *bc, int len, + const struct inet_diag_entry *entry) { - const void *bc = nla_data(_bc); - int len = nla_len(_bc); - while (len > 0) { int yes = 1; const struct inet_diag_bc_op *op = bc; @@ -454,35 +437,6 @@ static int inet_diag_bc_run(const struct nlattr *_bc, return len == 0; } -int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk) -{ - struct inet_diag_entry entry; - struct inet_sock *inet = inet_sk(sk); - - if (bc == NULL) - return 1; - - entry.family = sk->sk_family; -#if IS_ENABLED(CONFIG_IPV6) - if (entry.family == AF_INET6) { - struct ipv6_pinfo *np = inet6_sk(sk); - - entry.saddr = np->rcv_saddr.s6_addr32; - entry.daddr = np->daddr.s6_addr32; - } else -#endif - { - entry.saddr = &inet->inet_rcv_saddr; - entry.daddr = &inet->inet_daddr; - } - entry.sport = inet->inet_num; - entry.dport = ntohs(inet->inet_dport); - entry.userlocks = sk->sk_userlocks; - - return inet_diag_bc_run(bc, &entry); -} -EXPORT_SYMBOL_GPL(inet_diag_bc_sk); - static int valid_cc(const void *bc, int len, int cc) { while (len >= 0) { @@ -539,29 +493,57 @@ static int inet_diag_bc_audit(const void *bytecode, int bytecode_len) static int inet_csk_diag_dump(struct sock *sk, struct sk_buff *skb, - struct netlink_callback *cb, - struct inet_diag_req *r, - const struct nlattr *bc) + struct netlink_callback *cb) { - if (!inet_diag_bc_sk(bc, sk)) - return 0; + struct inet_diag_req *r = NLMSG_DATA(cb->nlh); + + if (nlmsg_attrlen(cb->nlh, sizeof(*r))) { + struct inet_diag_entry entry; + const struct nlattr *bc = nlmsg_find_attr(cb->nlh, + sizeof(*r), + INET_DIAG_REQ_BYTECODE); + struct inet_sock *inet = inet_sk(sk); + + entry.family = sk->sk_family; +#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) + if (entry.family == AF_INET6) { + struct ipv6_pinfo *np = inet6_sk(sk); + + entry.saddr = np->rcv_saddr.s6_addr32; + entry.daddr = np->daddr.s6_addr32; + } else +#endif + { + entry.saddr = &inet->inet_rcv_saddr; + entry.daddr = &inet->inet_daddr; + } + entry.sport = inet->inet_num; + entry.dport = ntohs(inet->inet_dport); + entry.userlocks = sk->sk_userlocks; - return inet_csk_diag_fill(sk, skb, r, + if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry)) + return 0; + } + + return inet_csk_diag_fill(sk, skb, r->idiag_ext, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); } static int inet_twsk_diag_dump(struct inet_timewait_sock *tw, struct sk_buff *skb, - struct netlink_callback *cb, - struct inet_diag_req *r, - const struct nlattr *bc) + struct netlink_callback *cb) { - if (bc != NULL) { + struct inet_diag_req *r = NLMSG_DATA(cb->nlh); + + if (nlmsg_attrlen(cb->nlh, sizeof(*r))) { struct inet_diag_entry entry; + const struct nlattr *bc = nlmsg_find_attr(cb->nlh, + sizeof(*r), + INET_DIAG_REQ_BYTECODE); entry.family = tw->tw_family; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) if (tw->tw_family == AF_INET6) { struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw); @@ -577,11 +559,11 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw, entry.dport = ntohs(tw->tw_dport); entry.userlocks = 0; - if (!inet_diag_bc_run(bc, &entry)) + if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry)) return 0; } - return inet_twsk_diag_fill(tw, skb, r, + return inet_twsk_diag_fill(tw, skb, r->idiag_ext, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); } @@ -607,7 +589,8 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, r->idiag_retrans = req->retrans; r->id.idiag_if = sk->sk_bound_dev_if; - sock_diag_save_cookie(req, r->id.idiag_cookie); + r->id.idiag_cookie[0] = (u32)(unsigned long)req; + r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1); tmo = req->expires - jiffies; if (tmo < 0) @@ -622,10 +605,12 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, r->idiag_wqueue = 0; r->idiag_uid = sock_i_uid(sk); r->idiag_inode = 0; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) if (r->idiag_family == AF_INET6) { - *(struct in6_addr *)r->id.idiag_src = inet6_rsk(req)->loc_addr; - *(struct in6_addr *)r->id.idiag_dst = inet6_rsk(req)->rmt_addr; + ipv6_addr_copy((struct in6_addr *)r->id.idiag_src, + &inet6_rsk(req)->loc_addr); + ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst, + &inet6_rsk(req)->rmt_addr); } #endif nlh->nlmsg_len = skb_tail_pointer(skb) - b; @@ -638,13 +623,13 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, } static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, - struct netlink_callback *cb, - struct inet_diag_req *r, - const struct nlattr *bc) + struct netlink_callback *cb) { struct inet_diag_entry entry; + struct inet_diag_req *r = NLMSG_DATA(cb->nlh); struct inet_connection_sock *icsk = inet_csk(sk); struct listen_sock *lopt; + const struct nlattr *bc = NULL; struct inet_sock *inet = inet_sk(sk); int j, s_j; int reqnum, s_reqnum; @@ -664,7 +649,9 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, if (!lopt || !lopt->qlen) goto out; - if (bc != NULL) { + if (nlmsg_attrlen(cb->nlh, sizeof(*r))) { + bc = nlmsg_find_attr(cb->nlh, sizeof(*r), + INET_DIAG_REQ_BYTECODE); entry.sport = inet->inet_num; entry.userlocks = sk->sk_userlocks; } @@ -684,20 +671,21 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, if (bc) { entry.saddr = -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) (entry.family == AF_INET6) ? inet6_rsk(req)->loc_addr.s6_addr32 : #endif &ireq->loc_addr; entry.daddr = -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) (entry.family == AF_INET6) ? inet6_rsk(req)->rmt_addr.s6_addr32 : #endif &ireq->rmt_addr; entry.dport = ntohs(ireq->rmt_port); - if (!inet_diag_bc_run(bc, &entry)) + if (!inet_diag_bc_run(nla_data(bc), + nla_len(bc), &entry)) continue; } @@ -720,11 +708,19 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, return err; } -void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb, - struct netlink_callback *cb, struct inet_diag_req *r, struct nlattr *bc) +static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) { int i, num; int s_i, s_num; + struct inet_diag_req *r = NLMSG_DATA(cb->nlh); + const struct inet_diag_handler *handler; + struct inet_hashinfo *hashinfo; + + handler = inet_diag_lock_handler(cb->nlh->nlmsg_type); + if (IS_ERR(handler)) + goto unlock; + + hashinfo = handler->idiag_hashinfo; s_i = cb->args[1]; s_num = num = cb->args[2]; @@ -749,10 +745,6 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb, continue; } - if (r->sdiag_family != AF_UNSPEC && - sk->sk_family != r->sdiag_family) - goto next_listen; - if (r->id.idiag_sport != inet->inet_sport && r->id.idiag_sport) goto next_listen; @@ -762,7 +754,7 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb, cb->args[3] > 0) goto syn_recv; - if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) { + if (inet_csk_diag_dump(sk, skb, cb) < 0) { spin_unlock_bh(&ilb->lock); goto done; } @@ -771,7 +763,7 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb, if (!(r->idiag_states & TCPF_SYN_RECV)) goto next_listen; - if (inet_diag_dump_reqs(skb, sk, cb, r, bc) < 0) { + if (inet_diag_dump_reqs(skb, sk, cb) < 0) { spin_unlock_bh(&ilb->lock); goto done; } @@ -793,7 +785,7 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb, } if (!(r->idiag_states & ~(TCPF_LISTEN | TCPF_SYN_RECV))) - goto out; + goto unlock; for (i = s_i; i <= hashinfo->ehash_mask; i++) { struct inet_ehash_bucket *head = &hashinfo->ehash[i]; @@ -818,16 +810,13 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb, goto next_normal; if (!(r->idiag_states & (1 << sk->sk_state))) goto next_normal; - if (r->sdiag_family != AF_UNSPEC && - sk->sk_family != r->sdiag_family) - goto next_normal; if (r->id.idiag_sport != inet->inet_sport && r->id.idiag_sport) goto next_normal; if (r->id.idiag_dport != inet->inet_dport && r->id.idiag_dport) goto next_normal; - if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) { + if (inet_csk_diag_dump(sk, skb, cb) < 0) { spin_unlock_bh(lock); goto done; } @@ -843,16 +832,13 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb, if (num < s_num) goto next_dying; - if (r->sdiag_family != AF_UNSPEC && - tw->tw_family != r->sdiag_family) - goto next_dying; if (r->id.idiag_sport != tw->tw_sport && r->id.idiag_sport) goto next_dying; if (r->id.idiag_dport != tw->tw_dport && r->id.idiag_dport) goto next_dying; - if (inet_twsk_diag_dump(tw, skb, cb, r, bc) < 0) { + if (inet_twsk_diag_dump(tw, skb, cb) < 0) { spin_unlock_bh(lock); goto done; } @@ -866,85 +852,15 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb, done: cb->args[1] = i; cb->args[2] = num; -out: - ; -} -EXPORT_SYMBOL_GPL(inet_diag_dump_icsk); - -static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, - struct inet_diag_req *r, struct nlattr *bc) -{ - const struct inet_diag_handler *handler; - - handler = inet_diag_lock_handler(r->sdiag_protocol); - if (!IS_ERR(handler)) - handler->dump(skb, cb, r, bc); +unlock: inet_diag_unlock_handler(handler); - return skb->len; } -static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) +static int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) { - struct nlattr *bc = NULL; int hdrlen = sizeof(struct inet_diag_req); - if (nlmsg_attrlen(cb->nlh, hdrlen)) - bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE); - - return __inet_diag_dump(skb, cb, (struct inet_diag_req *)NLMSG_DATA(cb->nlh), bc); -} - -static inline int inet_diag_type2proto(int type) -{ - switch (type) { - case TCPDIAG_GETSOCK: - return IPPROTO_TCP; - case DCCPDIAG_GETSOCK: - return IPPROTO_DCCP; - default: - return 0; - } -} - -static int inet_diag_dump_compat(struct sk_buff *skb, struct netlink_callback *cb) -{ - struct inet_diag_req_compat *rc = NLMSG_DATA(cb->nlh); - struct inet_diag_req req; - struct nlattr *bc = NULL; - int hdrlen = sizeof(struct inet_diag_req_compat); - - req.sdiag_family = AF_UNSPEC; /* compatibility */ - req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type); - req.idiag_ext = rc->idiag_ext; - req.idiag_states = rc->idiag_states; - req.id = rc->id; - - if (nlmsg_attrlen(cb->nlh, hdrlen)) - bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE); - - return __inet_diag_dump(skb, cb, &req, bc); -} - -static int inet_diag_get_exact_compat(struct sk_buff *in_skb, - const struct nlmsghdr *nlh) -{ - struct inet_diag_req_compat *rc = NLMSG_DATA(nlh); - struct inet_diag_req req; - - req.sdiag_family = rc->idiag_family; - req.sdiag_protocol = inet_diag_type2proto(nlh->nlmsg_type); - req.idiag_ext = rc->idiag_ext; - req.idiag_states = rc->idiag_states; - req.id = rc->id; - - return inet_diag_get_exact(in_skb, nlh, &req); -} - -static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh) -{ - int hdrlen = sizeof(struct inet_diag_req_compat); - if (nlh->nlmsg_type >= INET_DIAG_GETSOCK_MAX || nlmsg_len(nlh) < hdrlen) return -EINVAL; @@ -961,54 +877,28 @@ static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh) return -EINVAL; } - return netlink_dump_start(sock_diag_nlsk, skb, nlh, - inet_diag_dump_compat, NULL, 0); - } - - return inet_diag_get_exact_compat(skb, nlh); -} - -static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) -{ - int hdrlen = sizeof(struct inet_diag_req); - - if (nlmsg_len(h) < hdrlen) - return -EINVAL; - - if (h->nlmsg_flags & NLM_F_DUMP) { - if (nlmsg_attrlen(h, hdrlen)) { - struct nlattr *attr; - attr = nlmsg_find_attr(h, hdrlen, - INET_DIAG_REQ_BYTECODE); - if (attr == NULL || - nla_len(attr) < sizeof(struct inet_diag_bc_op) || - inet_diag_bc_audit(nla_data(attr), nla_len(attr))) - return -EINVAL; - } - - return netlink_dump_start(sock_diag_nlsk, skb, h, + return netlink_dump_start(idiagnl, skb, nlh, inet_diag_dump, NULL, 0); } - return inet_diag_get_exact(skb, h, (struct inet_diag_req *)NLMSG_DATA(h)); + return inet_diag_get_exact(skb, nlh); } -static struct sock_diag_handler inet_diag_handler = { - .family = AF_INET, - .dump = inet_diag_handler_dump, -}; +static DEFINE_MUTEX(inet_diag_mutex); -static struct sock_diag_handler inet6_diag_handler = { - .family = AF_INET6, - .dump = inet_diag_handler_dump, -}; +static void inet_diag_rcv(struct sk_buff *skb) +{ + mutex_lock(&inet_diag_mutex); + netlink_rcv_skb(skb, &inet_diag_rcv_msg); + mutex_unlock(&inet_diag_mutex); +} int inet_diag_register(const struct inet_diag_handler *h) { const __u16 type = h->idiag_type; int err = -EINVAL; - if (type >= IPPROTO_MAX) + if (type >= INET_DIAG_GETSOCK_MAX) goto out; mutex_lock(&inet_diag_table_mutex); @@ -1027,7 +917,7 @@ void inet_diag_unregister(const struct inet_diag_handler *h) { const __u16 type = h->idiag_type; - if (type >= IPPROTO_MAX) + if (type >= INET_DIAG_GETSOCK_MAX) return; mutex_lock(&inet_diag_table_mutex); @@ -1038,7 +928,7 @@ EXPORT_SYMBOL_GPL(inet_diag_unregister); static int __init inet_diag_init(void) { - const int inet_diag_table_size = (IPPROTO_MAX * + const int inet_diag_table_size = (INET_DIAG_GETSOCK_MAX * sizeof(struct inet_diag_handler *)); int err = -ENOMEM; @@ -1046,35 +936,25 @@ static int __init inet_diag_init(void) if (!inet_diag_table) goto out; - err = sock_diag_register(&inet_diag_handler); - if (err) - goto out_free_nl; - - err = sock_diag_register(&inet6_diag_handler); - if (err) - goto out_free_inet; - - sock_diag_register_inet_compat(inet_diag_rcv_msg_compat); + idiagnl = netlink_kernel_create(&init_net, NETLINK_INET_DIAG, 0, + inet_diag_rcv, NULL, THIS_MODULE); + if (idiagnl == NULL) + goto out_free_table; + err = 0; out: return err; - -out_free_inet: - sock_diag_unregister(&inet_diag_handler); -out_free_nl: +out_free_table: kfree(inet_diag_table); goto out; } static void __exit inet_diag_exit(void) { - sock_diag_unregister(&inet6_diag_handler); - sock_diag_unregister(&inet_diag_handler); - sock_diag_unregister_inet_compat(inet_diag_rcv_msg_compat); + netlink_kernel_release(idiagnl); kfree(inet_diag_table); } module_init(inet_diag_init); module_exit(inet_diag_exit); MODULE_LICENSE("GPL"); -MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2 /* AF_INET */); -MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 10 /* AF_INET6 */); +MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_INET_DIAG); diff --git a/trunk/net/ipv4/ip_gre.c b/trunk/net/ipv4/ip_gre.c index 2b53a1f7abf6..d55110e93120 100644 --- a/trunk/net/ipv4/ip_gre.c +++ b/trunk/net/ipv4/ip_gre.c @@ -46,7 +46,7 @@ #include #include -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) #include #include #include @@ -171,7 +171,7 @@ struct pcpu_tstats { unsigned long rx_bytes; unsigned long tx_packets; unsigned long tx_bytes; -} __attribute__((aligned(4*sizeof(unsigned long)))); +}; static struct net_device_stats *ipgre_get_stats(struct net_device *dev) { @@ -729,9 +729,9 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev if ((dst = rt->rt_gateway) == 0) goto tx_error_icmp; } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) else if (skb->protocol == htons(ETH_P_IPV6)) { - struct neighbour *neigh = dst_get_neighbour_noref(skb_dst(skb)); + struct neighbour *neigh = dst_get_neighbour(skb_dst(skb)); const struct in6_addr *addr6; int addr_type; @@ -799,7 +799,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev goto tx_error; } } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) else if (skb->protocol == htons(ETH_P_IPV6)) { struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb); @@ -835,8 +835,6 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev if (skb_headroom(skb) < max_headroom || skb_shared(skb)|| (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); - if (max_headroom > dev->needed_headroom) - dev->needed_headroom = max_headroom; if (!new_skb) { ip_rt_put(rt); dev->stats.tx_dropped++; @@ -875,7 +873,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev if ((iph->ttl = tiph->ttl) == 0) { if (skb->protocol == htons(ETH_P_IP)) iph->ttl = old_iph->ttl; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) else if (skb->protocol == htons(ETH_P_IPV6)) iph->ttl = ((const struct ipv6hdr *)old_iph)->hop_limit; #endif diff --git a/trunk/net/ipv4/ip_output.c b/trunk/net/ipv4/ip_output.c index ff302bde8890..0bc95f3977d2 100644 --- a/trunk/net/ipv4/ip_output.c +++ b/trunk/net/ipv4/ip_output.c @@ -206,7 +206,7 @@ static inline int ip_finish_output2(struct sk_buff *skb) } rcu_read_lock(); - neigh = dst_get_neighbour_noref(dst); + neigh = dst_get_neighbour(dst); if (neigh) { int res = neigh_output(neigh, skb); @@ -319,20 +319,6 @@ int ip_output(struct sk_buff *skb) !(IPCB(skb)->flags & IPSKB_REROUTED)); } -/* - * copy saddr and daddr, possibly using 64bit load/stores - * Equivalent to : - * iph->saddr = fl4->saddr; - * iph->daddr = fl4->daddr; - */ -static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4) -{ - BUILD_BUG_ON(offsetof(typeof(*fl4), daddr) != - offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr)); - memcpy(&iph->saddr, &fl4->saddr, - sizeof(fl4->saddr) + sizeof(fl4->daddr)); -} - int ip_queue_xmit(struct sk_buff *skb, struct flowi *fl) { struct sock *sk = skb->sk; @@ -395,8 +381,8 @@ int ip_queue_xmit(struct sk_buff *skb, struct flowi *fl) iph->frag_off = 0; iph->ttl = ip_select_ttl(inet, &rt->dst); iph->protocol = sk->sk_protocol; - ip_copy_addrs(iph, fl4); - + iph->saddr = fl4->saddr; + iph->daddr = fl4->daddr; /* Transport layer set skb->h.foo itself. */ if (inet_opt && inet_opt->opt.optlen) { @@ -1351,7 +1337,8 @@ struct sk_buff *__ip_make_skb(struct sock *sk, ip_select_ident(iph, &rt->dst, sk); iph->ttl = ttl; iph->protocol = sk->sk_protocol; - ip_copy_addrs(iph, fl4); + iph->saddr = fl4->saddr; + iph->daddr = fl4->daddr; if (opt) { iph->ihl += opt->optlen>>2; diff --git a/trunk/net/ipv4/ip_sockglue.c b/trunk/net/ipv4/ip_sockglue.c index 8aa87c19fa00..09ff51bf16a4 100644 --- a/trunk/net/ipv4/ip_sockglue.c +++ b/trunk/net/ipv4/ip_sockglue.c @@ -37,7 +37,7 @@ #include #include #include -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) #include #endif @@ -55,13 +55,20 @@ /* * SOL_IP control messages. */ -#define PKTINFO_SKB_CB(__skb) ((struct in_pktinfo *)((__skb)->cb)) static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb) { - struct in_pktinfo info = *PKTINFO_SKB_CB(skb); + struct in_pktinfo info; + struct rtable *rt = skb_rtable(skb); info.ipi_addr.s_addr = ip_hdr(skb)->daddr; + if (rt) { + info.ipi_ifindex = rt->rt_iif; + info.ipi_spec_dst.s_addr = rt->rt_spec_dst; + } else { + info.ipi_ifindex = 0; + info.ipi_spec_dst.s_addr = 0; + } put_cmsg(msg, SOL_IP, IP_PKTINFO, sizeof(info), &info); } @@ -508,7 +515,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, sock_owned_by_user(sk)); if (inet->is_icsk) { struct inet_connection_sock *icsk = inet_csk(sk); -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) if (sk->sk_family == PF_INET || (!((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) && @@ -519,7 +526,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, if (opt) icsk->icsk_ext_hdr_len += opt->opt.optlen; icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie); -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) } #endif } @@ -985,28 +992,20 @@ static int do_ip_setsockopt(struct sock *sk, int level, } /** - * ipv4_pktinfo_prepare - transfert some info from rtable to skb + * ip_queue_rcv_skb - Queue an skb into sock receive queue * @sk: socket * @skb: buffer * - * To support IP_CMSG_PKTINFO option, we store rt_iif and rt_spec_dst - * in skb->cb[] before dst drop. - * This way, receiver doesnt make cache line misses to read rtable. + * Queues an skb into socket receive queue. If IP_CMSG_PKTINFO option + * is not set, we drop skb dst entry now, while dst cache line is hot. */ -void ipv4_pktinfo_prepare(struct sk_buff *skb) +int ip_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { - struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb); - const struct rtable *rt = skb_rtable(skb); - - if (rt) { - pktinfo->ipi_ifindex = rt->rt_iif; - pktinfo->ipi_spec_dst.s_addr = rt->rt_spec_dst; - } else { - pktinfo->ipi_ifindex = 0; - pktinfo->ipi_spec_dst.s_addr = 0; - } - skb_dst_drop(skb); + if (!(inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO)) + skb_dst_drop(skb); + return sock_queue_rcv_skb(sk, skb); } +EXPORT_SYMBOL(ip_queue_rcv_skb); int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) diff --git a/trunk/net/ipv4/ipconfig.c b/trunk/net/ipv4/ipconfig.c index 7e4ec9fc2cef..99ec116bef14 100644 --- a/trunk/net/ipv4/ipconfig.c +++ b/trunk/net/ipv4/ipconfig.c @@ -767,15 +767,13 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d struct sk_buff *skb; struct bootp_pkt *b; struct iphdr *h; - int hlen = LL_RESERVED_SPACE(dev); - int tlen = dev->needed_tailroom; /* Allocate packet */ - skb = alloc_skb(sizeof(struct bootp_pkt) + hlen + tlen + 15, + skb = alloc_skb(sizeof(struct bootp_pkt) + LL_ALLOCATED_SPACE(dev) + 15, GFP_KERNEL); if (!skb) return; - skb_reserve(skb, hlen); + skb_reserve(skb, LL_RESERVED_SPACE(dev)); b = (struct bootp_pkt *) skb_put(skb, sizeof(struct bootp_pkt)); memset(b, 0, sizeof(struct bootp_pkt)); @@ -828,13 +826,8 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d skb->dev = dev; skb->protocol = htons(ETH_P_IP); if (dev_hard_header(skb, dev, ntohs(skb->protocol), - dev->broadcast, dev->dev_addr, skb->len) < 0) { - kfree_skb(skb); - printk("E"); - return; - } - - if (dev_queue_xmit(skb) < 0) + dev->broadcast, dev->dev_addr, skb->len) < 0 || + dev_queue_xmit(skb) < 0) printk("E"); } diff --git a/trunk/net/ipv4/ipip.c b/trunk/net/ipv4/ipip.c index 413ed1ba7a5a..0b2e7329abda 100644 --- a/trunk/net/ipv4/ipip.c +++ b/trunk/net/ipv4/ipip.c @@ -148,7 +148,7 @@ struct pcpu_tstats { unsigned long rx_bytes; unsigned long tx_packets; unsigned long tx_bytes; -} __attribute__((aligned(4*sizeof(unsigned long)))); +}; static struct net_device_stats *ipip_get_stats(struct net_device *dev) { diff --git a/trunk/net/ipv4/ipmr.c b/trunk/net/ipv4/ipmr.c index 8e54490ee3f4..76a7f07b38b6 100644 --- a/trunk/net/ipv4/ipmr.c +++ b/trunk/net/ipv4/ipmr.c @@ -1520,6 +1520,7 @@ static int ipmr_device_event(struct notifier_block *this, unsigned long event, v struct mr_table *mrt; struct vif_device *v; int ct; + LIST_HEAD(list); if (event != NETDEV_UNREGISTER) return NOTIFY_DONE; @@ -1528,9 +1529,10 @@ static int ipmr_device_event(struct notifier_block *this, unsigned long event, v v = &mrt->vif_table[0]; for (ct = 0; ct < mrt->maxvif; ct++, v++) { if (v->dev == dev) - vif_delete(mrt, ct, 1, NULL); + vif_delete(mrt, ct, 1, &list); } } + unregister_netdevice_many(&list); return NOTIFY_DONE; } diff --git a/trunk/net/ipv4/netfilter/Kconfig b/trunk/net/ipv4/netfilter/Kconfig index 74dfc9e5211f..f19f2182894c 100644 --- a/trunk/net/ipv4/netfilter/Kconfig +++ b/trunk/net/ipv4/netfilter/Kconfig @@ -27,7 +27,7 @@ config NF_CONNTRACK_IPV4 config NF_CONNTRACK_PROC_COMPAT bool "proc/sysctl compatibility with old connection tracking" - depends on NF_CONNTRACK_PROCFS && NF_CONNTRACK_IPV4 + depends on NF_CONNTRACK_IPV4 default y help This option enables /proc and sysctl compatibility with the old @@ -76,21 +76,11 @@ config IP_NF_MATCH_AH config IP_NF_MATCH_ECN tristate '"ecn" match support' depends on NETFILTER_ADVANCED - select NETFILTER_XT_MATCH_ECN - ---help--- - This is a backwards-compat option for the user's convenience - (e.g. when running oldconfig). It selects - CONFIG_NETFILTER_XT_MATCH_ECN. - -config IP_NF_MATCH_RPFILTER - tristate '"rpfilter" reverse path filter match support' - depends on NETFILTER_ADVANCED - ---help--- - This option allows you to match packets whose replies would - go out via the interface the packet came in. + help + This option adds a `ECN' match, which allows you to match against + the IPv4 and TCP header ECN fields. To compile it as a module, choose M here. If unsure, say N. - The module will be called ipt_rpfilter. config IP_NF_MATCH_TTL tristate '"ttl" match support' diff --git a/trunk/net/ipv4/netfilter/Makefile b/trunk/net/ipv4/netfilter/Makefile index 213a462b739b..dca2082ec683 100644 --- a/trunk/net/ipv4/netfilter/Makefile +++ b/trunk/net/ipv4/netfilter/Makefile @@ -49,7 +49,7 @@ obj-$(CONFIG_IP_NF_SECURITY) += iptable_security.o # matches obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o -obj-$(CONFIG_IP_NF_MATCH_RPFILTER) += ipt_rpfilter.o +obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o # targets obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o diff --git a/trunk/net/ipv4/netfilter/ip_queue.c b/trunk/net/ipv4/netfilter/ip_queue.c index a057fe64debd..e59aabd0eae4 100644 --- a/trunk/net/ipv4/netfilter/ip_queue.c +++ b/trunk/net/ipv4/netfilter/ip_queue.c @@ -404,7 +404,6 @@ __ipq_rcv_skb(struct sk_buff *skb) int status, type, pid, flags; unsigned int nlmsglen, skblen; struct nlmsghdr *nlh; - bool enable_timestamp = false; skblen = skb->len; if (skblen < sizeof(*nlh)) @@ -442,13 +441,12 @@ __ipq_rcv_skb(struct sk_buff *skb) RCV_SKB_FAIL(-EBUSY); } } else { - enable_timestamp = true; + net_enable_timestamp(); peer_pid = pid; } spin_unlock_bh(&queue_lock); - if (enable_timestamp) - net_enable_timestamp(); + status = ipq_receive_peer(NLMSG_DATA(nlh), type, nlmsglen - NLMSG_LENGTH(0)); if (status < 0) diff --git a/trunk/net/ipv4/netfilter/ipt_MASQUERADE.c b/trunk/net/ipv4/netfilter/ipt_MASQUERADE.c index 2f210c79dc87..9931152a78b5 100644 --- a/trunk/net/ipv4/netfilter/ipt_MASQUERADE.c +++ b/trunk/net/ipv4/netfilter/ipt_MASQUERADE.c @@ -30,9 +30,9 @@ MODULE_DESCRIPTION("Xtables: automatic-address SNAT"); /* FIXME: Multiple targets. --RR */ static int masquerade_tg_check(const struct xt_tgchk_param *par) { - const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; + const struct nf_nat_multi_range_compat *mr = par->targinfo; - if (mr->range[0].flags & NF_NAT_RANGE_MAP_IPS) { + if (mr->range[0].flags & IP_NAT_RANGE_MAP_IPS) { pr_debug("bad MAP_IPS.\n"); return -EINVAL; } @@ -49,8 +49,8 @@ masquerade_tg(struct sk_buff *skb, const struct xt_action_param *par) struct nf_conn *ct; struct nf_conn_nat *nat; enum ip_conntrack_info ctinfo; - struct nf_nat_ipv4_range newrange; - const struct nf_nat_ipv4_multi_range_compat *mr; + struct nf_nat_range newrange; + const struct nf_nat_multi_range_compat *mr; const struct rtable *rt; __be32 newsrc; @@ -79,13 +79,13 @@ masquerade_tg(struct sk_buff *skb, const struct xt_action_param *par) nat->masq_index = par->out->ifindex; /* Transfer from original range. */ - newrange = ((struct nf_nat_ipv4_range) - { mr->range[0].flags | NF_NAT_RANGE_MAP_IPS, + newrange = ((struct nf_nat_range) + { mr->range[0].flags | IP_NAT_RANGE_MAP_IPS, newsrc, newsrc, mr->range[0].min, mr->range[0].max }); /* Hand modified range to generic setup. */ - return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC); + return nf_nat_setup_info(ct, &newrange, IP_NAT_MANIP_SRC); } static int @@ -139,7 +139,7 @@ static struct xt_target masquerade_tg_reg __read_mostly = { .name = "MASQUERADE", .family = NFPROTO_IPV4, .target = masquerade_tg, - .targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat), + .targetsize = sizeof(struct nf_nat_multi_range_compat), .table = "nat", .hooks = 1 << NF_INET_POST_ROUTING, .checkentry = masquerade_tg_check, diff --git a/trunk/net/ipv4/netfilter/ipt_NETMAP.c b/trunk/net/ipv4/netfilter/ipt_NETMAP.c index b5bfbbabf70d..6cdb298f1035 100644 --- a/trunk/net/ipv4/netfilter/ipt_NETMAP.c +++ b/trunk/net/ipv4/netfilter/ipt_NETMAP.c @@ -24,9 +24,9 @@ MODULE_DESCRIPTION("Xtables: 1:1 NAT mapping of IPv4 subnets"); static int netmap_tg_check(const struct xt_tgchk_param *par) { - const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; + const struct nf_nat_multi_range_compat *mr = par->targinfo; - if (!(mr->range[0].flags & NF_NAT_RANGE_MAP_IPS)) { + if (!(mr->range[0].flags & IP_NAT_RANGE_MAP_IPS)) { pr_debug("bad MAP_IPS.\n"); return -EINVAL; } @@ -43,8 +43,8 @@ netmap_tg(struct sk_buff *skb, const struct xt_action_param *par) struct nf_conn *ct; enum ip_conntrack_info ctinfo; __be32 new_ip, netmask; - const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; - struct nf_nat_ipv4_range newrange; + const struct nf_nat_multi_range_compat *mr = par->targinfo; + struct nf_nat_range newrange; NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING || par->hooknum == NF_INET_POST_ROUTING || @@ -61,8 +61,8 @@ netmap_tg(struct sk_buff *skb, const struct xt_action_param *par) new_ip = ip_hdr(skb)->saddr & ~netmask; new_ip |= mr->range[0].min_ip & netmask; - newrange = ((struct nf_nat_ipv4_range) - { mr->range[0].flags | NF_NAT_RANGE_MAP_IPS, + newrange = ((struct nf_nat_range) + { mr->range[0].flags | IP_NAT_RANGE_MAP_IPS, new_ip, new_ip, mr->range[0].min, mr->range[0].max }); @@ -74,7 +74,7 @@ static struct xt_target netmap_tg_reg __read_mostly = { .name = "NETMAP", .family = NFPROTO_IPV4, .target = netmap_tg, - .targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat), + .targetsize = sizeof(struct nf_nat_multi_range_compat), .table = "nat", .hooks = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_POST_ROUTING) | diff --git a/trunk/net/ipv4/netfilter/ipt_REDIRECT.c b/trunk/net/ipv4/netfilter/ipt_REDIRECT.c index 7c0103a5203e..18a0656505a0 100644 --- a/trunk/net/ipv4/netfilter/ipt_REDIRECT.c +++ b/trunk/net/ipv4/netfilter/ipt_REDIRECT.c @@ -28,9 +28,9 @@ MODULE_DESCRIPTION("Xtables: Connection redirection to localhost"); /* FIXME: Take multiple ranges --RR */ static int redirect_tg_check(const struct xt_tgchk_param *par) { - const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; + const struct nf_nat_multi_range_compat *mr = par->targinfo; - if (mr->range[0].flags & NF_NAT_RANGE_MAP_IPS) { + if (mr->range[0].flags & IP_NAT_RANGE_MAP_IPS) { pr_debug("bad MAP_IPS.\n"); return -EINVAL; } @@ -47,8 +47,8 @@ redirect_tg(struct sk_buff *skb, const struct xt_action_param *par) struct nf_conn *ct; enum ip_conntrack_info ctinfo; __be32 newdst; - const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; - struct nf_nat_ipv4_range newrange; + const struct nf_nat_multi_range_compat *mr = par->targinfo; + struct nf_nat_range newrange; NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING || par->hooknum == NF_INET_LOCAL_OUT); @@ -76,20 +76,20 @@ redirect_tg(struct sk_buff *skb, const struct xt_action_param *par) } /* Transfer from original range. */ - newrange = ((struct nf_nat_ipv4_range) - { mr->range[0].flags | NF_NAT_RANGE_MAP_IPS, + newrange = ((struct nf_nat_range) + { mr->range[0].flags | IP_NAT_RANGE_MAP_IPS, newdst, newdst, mr->range[0].min, mr->range[0].max }); /* Hand modified range to generic setup. */ - return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST); + return nf_nat_setup_info(ct, &newrange, IP_NAT_MANIP_DST); } static struct xt_target redirect_tg_reg __read_mostly = { .name = "REDIRECT", .family = NFPROTO_IPV4, .target = redirect_tg, - .targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat), + .targetsize = sizeof(struct nf_nat_multi_range_compat), .table = "nat", .hooks = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT), .checkentry = redirect_tg_check, diff --git a/trunk/net/ipv4/netfilter/ipt_ULOG.c b/trunk/net/ipv4/netfilter/ipt_ULOG.c index ba5756d20165..b5508151e547 100644 --- a/trunk/net/ipv4/netfilter/ipt_ULOG.c +++ b/trunk/net/ipv4/netfilter/ipt_ULOG.c @@ -65,7 +65,7 @@ static unsigned int flushtimeout = 10; module_param(flushtimeout, uint, 0600); MODULE_PARM_DESC(flushtimeout, "buffer flush timeout (hundredths of a second)"); -static bool nflog = true; +static int nflog = 1; module_param(nflog, bool, 0400); MODULE_PARM_DESC(nflog, "register as internal netfilter logging module"); diff --git a/trunk/net/ipv4/netfilter/ipt_ecn.c b/trunk/net/ipv4/netfilter/ipt_ecn.c new file mode 100644 index 000000000000..2b57e52c746c --- /dev/null +++ b/trunk/net/ipv4/netfilter/ipt_ecn.c @@ -0,0 +1,127 @@ +/* IP tables module for matching the value of the IPv4 and TCP ECN bits + * + * (C) 2002 by Harald Welte + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +MODULE_AUTHOR("Harald Welte "); +MODULE_DESCRIPTION("Xtables: Explicit Congestion Notification (ECN) flag match for IPv4"); +MODULE_LICENSE("GPL"); + +static inline bool match_ip(const struct sk_buff *skb, + const struct ipt_ecn_info *einfo) +{ + return ((ip_hdr(skb)->tos & IPT_ECN_IP_MASK) == einfo->ip_ect) ^ + !!(einfo->invert & IPT_ECN_OP_MATCH_IP); +} + +static inline bool match_tcp(const struct sk_buff *skb, + const struct ipt_ecn_info *einfo, + bool *hotdrop) +{ + struct tcphdr _tcph; + const struct tcphdr *th; + + /* In practice, TCP match does this, so can't fail. But let's + * be good citizens. + */ + th = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph); + if (th == NULL) { + *hotdrop = false; + return false; + } + + if (einfo->operation & IPT_ECN_OP_MATCH_ECE) { + if (einfo->invert & IPT_ECN_OP_MATCH_ECE) { + if (th->ece == 1) + return false; + } else { + if (th->ece == 0) + return false; + } + } + + if (einfo->operation & IPT_ECN_OP_MATCH_CWR) { + if (einfo->invert & IPT_ECN_OP_MATCH_CWR) { + if (th->cwr == 1) + return false; + } else { + if (th->cwr == 0) + return false; + } + } + + return true; +} + +static bool ecn_mt(const struct sk_buff *skb, struct xt_action_param *par) +{ + const struct ipt_ecn_info *info = par->matchinfo; + + if (info->operation & IPT_ECN_OP_MATCH_IP) + if (!match_ip(skb, info)) + return false; + + if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR)) { + if (!match_tcp(skb, info, &par->hotdrop)) + return false; + } + + return true; +} + +static int ecn_mt_check(const struct xt_mtchk_param *par) +{ + const struct ipt_ecn_info *info = par->matchinfo; + const struct ipt_ip *ip = par->entryinfo; + + if (info->operation & IPT_ECN_OP_MATCH_MASK) + return -EINVAL; + + if (info->invert & IPT_ECN_OP_MATCH_MASK) + return -EINVAL; + + if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR) && + (ip->proto != IPPROTO_TCP || ip->invflags & IPT_INV_PROTO)) { + pr_info("cannot match TCP bits in rule for non-tcp packets\n"); + return -EINVAL; + } + + return 0; +} + +static struct xt_match ecn_mt_reg __read_mostly = { + .name = "ecn", + .family = NFPROTO_IPV4, + .match = ecn_mt, + .matchsize = sizeof(struct ipt_ecn_info), + .checkentry = ecn_mt_check, + .me = THIS_MODULE, +}; + +static int __init ecn_mt_init(void) +{ + return xt_register_match(&ecn_mt_reg); +} + +static void __exit ecn_mt_exit(void) +{ + xt_unregister_match(&ecn_mt_reg); +} + +module_init(ecn_mt_init); +module_exit(ecn_mt_exit); diff --git a/trunk/net/ipv4/netfilter/ipt_rpfilter.c b/trunk/net/ipv4/netfilter/ipt_rpfilter.c deleted file mode 100644 index 31371be8174b..000000000000 --- a/trunk/net/ipv4/netfilter/ipt_rpfilter.c +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Copyright (c) 2011 Florian Westphal - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * based on fib_frontend.c; Author: Alexey Kuznetsov, - */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Florian Westphal "); -MODULE_DESCRIPTION("iptables: ipv4 reverse path filter match"); - -/* don't try to find route from mcast/bcast/zeronet */ -static __be32 rpfilter_get_saddr(__be32 addr) -{ - if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr) || - ipv4_is_zeronet(addr)) - return 0; - return addr; -} - -static bool rpfilter_lookup_reverse(struct flowi4 *fl4, - const struct net_device *dev, u8 flags) -{ - struct fib_result res; - bool dev_match; - struct net *net = dev_net(dev); - int ret __maybe_unused; - - if (fib_lookup(net, fl4, &res)) - return false; - - if (res.type != RTN_UNICAST) { - if (res.type != RTN_LOCAL || !(flags & XT_RPFILTER_ACCEPT_LOCAL)) - return false; - } - dev_match = false; -#ifdef CONFIG_IP_ROUTE_MULTIPATH - for (ret = 0; ret < res.fi->fib_nhs; ret++) { - struct fib_nh *nh = &res.fi->fib_nh[ret]; - - if (nh->nh_dev == dev) { - dev_match = true; - break; - } - } -#else - if (FIB_RES_DEV(res) == dev) - dev_match = true; -#endif - if (dev_match || flags & XT_RPFILTER_LOOSE) - return FIB_RES_NH(res).nh_scope <= RT_SCOPE_HOST; - return dev_match; -} - -static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) -{ - const struct xt_rpfilter_info *info; - const struct iphdr *iph; - struct flowi4 flow; - bool invert; - - info = par->matchinfo; - invert = info->flags & XT_RPFILTER_INVERT; - - if (par->in->flags & IFF_LOOPBACK) - return true ^ invert; - - iph = ip_hdr(skb); - if (ipv4_is_multicast(iph->daddr)) { - if (ipv4_is_zeronet(iph->saddr)) - return ipv4_is_local_multicast(iph->daddr) ^ invert; - flow.flowi4_iif = 0; - } else { - flow.flowi4_iif = dev_net(par->in)->loopback_dev->ifindex; - } - - flow.daddr = iph->saddr; - flow.saddr = rpfilter_get_saddr(iph->daddr); - flow.flowi4_oif = 0; - flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0; - flow.flowi4_tos = RT_TOS(iph->tos); - flow.flowi4_scope = RT_SCOPE_UNIVERSE; - - return rpfilter_lookup_reverse(&flow, par->in, info->flags) ^ invert; -} - -static int rpfilter_check(const struct xt_mtchk_param *par) -{ - const struct xt_rpfilter_info *info = par->matchinfo; - unsigned int options = ~XT_RPFILTER_OPTION_MASK; - if (info->flags & options) { - pr_info("unknown options encountered"); - return -EINVAL; - } - - if (strcmp(par->table, "mangle") != 0 && - strcmp(par->table, "raw") != 0) { - pr_info("match only valid in the \'raw\' " - "or \'mangle\' tables, not \'%s\'.\n", par->table); - return -EINVAL; - } - - return 0; -} - -static struct xt_match rpfilter_mt_reg __read_mostly = { - .name = "rpfilter", - .family = NFPROTO_IPV4, - .checkentry = rpfilter_check, - .match = rpfilter_mt, - .matchsize = sizeof(struct xt_rpfilter_info), - .hooks = (1 << NF_INET_PRE_ROUTING), - .me = THIS_MODULE -}; - -static int __init rpfilter_mt_init(void) -{ - return xt_register_match(&rpfilter_mt_reg); -} - -static void __exit rpfilter_mt_exit(void) -{ - xt_unregister_match(&rpfilter_mt_reg); -} - -module_init(rpfilter_mt_init); -module_exit(rpfilter_mt_exit); diff --git a/trunk/net/ipv4/netfilter/iptable_filter.c b/trunk/net/ipv4/netfilter/iptable_filter.c index 0e58f09e59fb..c37641e819f2 100644 --- a/trunk/net/ipv4/netfilter/iptable_filter.c +++ b/trunk/net/ipv4/netfilter/iptable_filter.c @@ -52,7 +52,7 @@ iptable_filter_hook(unsigned int hook, struct sk_buff *skb, static struct nf_hook_ops *filter_ops __read_mostly; /* Default to forward because I got too much mail already. */ -static bool forward = NF_ACCEPT; +static int forward = NF_ACCEPT; module_param(forward, bool, 0000); static int __net_init iptable_filter_net_init(struct net *net) diff --git a/trunk/net/ipv4/netfilter/nf_nat_core.c b/trunk/net/ipv4/netfilter/nf_nat_core.c index acdd002bb540..447bc5cfdc6c 100644 --- a/trunk/net/ipv4/netfilter/nf_nat_core.c +++ b/trunk/net/ipv4/netfilter/nf_nat_core.c @@ -30,6 +30,7 @@ #include #include #include +#include #include static DEFINE_SPINLOCK(nf_nat_lock); @@ -56,7 +57,7 @@ hash_by_src(const struct net *net, u16 zone, /* Original src, to ensure we map it consistently if poss. */ hash = jhash_3words((__force u32)tuple->src.u3.ip, (__force u32)tuple->src.u.all ^ zone, - tuple->dst.protonum, nf_conntrack_hash_rnd); + tuple->dst.protonum, 0); return ((u64)hash * net->ipv4.nat_htable_size) >> 32; } @@ -81,14 +82,14 @@ EXPORT_SYMBOL(nf_nat_used_tuple); * that meet the constraints of range. */ static int in_range(const struct nf_conntrack_tuple *tuple, - const struct nf_nat_ipv4_range *range) + const struct nf_nat_range *range) { const struct nf_nat_protocol *proto; int ret = 0; /* If we are supposed to map IPs, then we must be in the range specified, otherwise let this drag us onto a new src IP. */ - if (range->flags & NF_NAT_RANGE_MAP_IPS) { + if (range->flags & IP_NAT_RANGE_MAP_IPS) { if (ntohl(tuple->src.u3.ip) < ntohl(range->min_ip) || ntohl(tuple->src.u3.ip) > ntohl(range->max_ip)) return 0; @@ -96,8 +97,8 @@ in_range(const struct nf_conntrack_tuple *tuple, rcu_read_lock(); proto = __nf_nat_proto_find(tuple->dst.protonum); - if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) || - proto->in_range(tuple, NF_NAT_MANIP_SRC, + if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) || + proto->in_range(tuple, IP_NAT_MANIP_SRC, &range->min, &range->max)) ret = 1; rcu_read_unlock(); @@ -122,7 +123,7 @@ static int find_appropriate_src(struct net *net, u16 zone, const struct nf_conntrack_tuple *tuple, struct nf_conntrack_tuple *result, - const struct nf_nat_ipv4_range *range) + const struct nf_nat_range *range) { unsigned int h = hash_by_src(net, zone, tuple); const struct nf_conn_nat *nat; @@ -156,7 +157,7 @@ find_appropriate_src(struct net *net, u16 zone, */ static void find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple, - const struct nf_nat_ipv4_range *range, + const struct nf_nat_range *range, const struct nf_conn *ct, enum nf_nat_manip_type maniptype) { @@ -165,10 +166,10 @@ find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple, u_int32_t minip, maxip, j; /* No IP mapping? Do nothing. */ - if (!(range->flags & NF_NAT_RANGE_MAP_IPS)) + if (!(range->flags & IP_NAT_RANGE_MAP_IPS)) return; - if (maniptype == NF_NAT_MANIP_SRC) + if (maniptype == IP_NAT_MANIP_SRC) var_ipp = &tuple->src.u3.ip; else var_ipp = &tuple->dst.u3.ip; @@ -188,7 +189,7 @@ find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple, minip = ntohl(range->min_ip); maxip = ntohl(range->max_ip); j = jhash_2words((__force u32)tuple->src.u3.ip, - range->flags & NF_NAT_RANGE_PERSISTENT ? + range->flags & IP_NAT_RANGE_PERSISTENT ? 0 : (__force u32)tuple->dst.u3.ip ^ zone, 0); j = ((u64)j * (maxip - minip + 1)) >> 32; *var_ipp = htonl(minip + j); @@ -203,7 +204,7 @@ find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple, static void get_unique_tuple(struct nf_conntrack_tuple *tuple, const struct nf_conntrack_tuple *orig_tuple, - const struct nf_nat_ipv4_range *range, + const struct nf_nat_range *range, struct nf_conn *ct, enum nf_nat_manip_type maniptype) { @@ -218,8 +219,8 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple, This is only required for source (ie. NAT/masq) mappings. So far, we don't do local source mappings, so multiple manips not an issue. */ - if (maniptype == NF_NAT_MANIP_SRC && - !(range->flags & NF_NAT_RANGE_PROTO_RANDOM)) { + if (maniptype == IP_NAT_MANIP_SRC && + !(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) { /* try the original tuple first */ if (in_range(orig_tuple, range)) { if (!nf_nat_used_tuple(orig_tuple, ct)) { @@ -246,8 +247,8 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple, proto = __nf_nat_proto_find(orig_tuple->dst.protonum); /* Only bother mapping if it's not already in range and unique */ - if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM)) { - if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) { + if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) { + if (range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) { if (proto->in_range(tuple, maniptype, &range->min, &range->max) && (range->min.all == range->max.all || @@ -266,7 +267,7 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple, unsigned int nf_nat_setup_info(struct nf_conn *ct, - const struct nf_nat_ipv4_range *range, + const struct nf_nat_range *range, enum nf_nat_manip_type maniptype) { struct net *net = nf_ct_net(ct); @@ -283,8 +284,8 @@ nf_nat_setup_info(struct nf_conn *ct, } } - NF_CT_ASSERT(maniptype == NF_NAT_MANIP_SRC || - maniptype == NF_NAT_MANIP_DST); + NF_CT_ASSERT(maniptype == IP_NAT_MANIP_SRC || + maniptype == IP_NAT_MANIP_DST); BUG_ON(nf_nat_initialized(ct, maniptype)); /* What we've got will look like inverse of reply. Normally @@ -305,13 +306,13 @@ nf_nat_setup_info(struct nf_conn *ct, nf_conntrack_alter_reply(ct, &reply); /* Non-atomic: we own this at the moment. */ - if (maniptype == NF_NAT_MANIP_SRC) + if (maniptype == IP_NAT_MANIP_SRC) ct->status |= IPS_SRC_NAT; else ct->status |= IPS_DST_NAT; } - if (maniptype == NF_NAT_MANIP_SRC) { + if (maniptype == IP_NAT_MANIP_SRC) { unsigned int srchash; srchash = hash_by_src(net, nf_ct_zone(ct), @@ -326,7 +327,7 @@ nf_nat_setup_info(struct nf_conn *ct, } /* It's done. */ - if (maniptype == NF_NAT_MANIP_DST) + if (maniptype == IP_NAT_MANIP_DST) ct->status |= IPS_DST_NAT_DONE; else ct->status |= IPS_SRC_NAT_DONE; @@ -360,7 +361,7 @@ manip_pkt(u_int16_t proto, iph = (void *)skb->data + iphdroff; - if (maniptype == NF_NAT_MANIP_SRC) { + if (maniptype == IP_NAT_MANIP_SRC) { csum_replace4(&iph->check, iph->saddr, target->src.u3.ip); iph->saddr = target->src.u3.ip; } else { @@ -380,7 +381,7 @@ unsigned int nf_nat_packet(struct nf_conn *ct, unsigned long statusbit; enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum); - if (mtype == NF_NAT_MANIP_SRC) + if (mtype == IP_NAT_MANIP_SRC) statusbit = IPS_SRC_NAT; else statusbit = IPS_DST_NAT; @@ -413,7 +414,8 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct, struct icmphdr icmp; struct iphdr ip; } *inside; - struct nf_conntrack_tuple target; + const struct nf_conntrack_l4proto *l4proto; + struct nf_conntrack_tuple inner, target; int hdrlen = ip_hdrlen(skb); enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); unsigned long statusbit; @@ -445,7 +447,7 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct, return 0; } - if (manip == NF_NAT_MANIP_SRC) + if (manip == IP_NAT_MANIP_SRC) statusbit = IPS_SRC_NAT; else statusbit = IPS_DST_NAT; @@ -461,6 +463,16 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct, "dir %s\n", skb, manip, dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY"); + /* rcu_read_lock()ed by nf_hook_slow */ + l4proto = __nf_ct_l4proto_find(PF_INET, inside->ip.protocol); + + if (!nf_ct_get_tuple(skb, hdrlen + sizeof(struct icmphdr), + (hdrlen + + sizeof(struct icmphdr) + inside->ip.ihl * 4), + (u_int16_t)AF_INET, inside->ip.protocol, + &inner, l3proto, l4proto)) + return 0; + /* Change inner back to look like incoming packet. We do the opposite manip on this hook to normal, because it might not pass all hooks (locally-generated ICMP). Consider incoming @@ -563,6 +575,26 @@ static struct nf_ct_ext_type nat_extend __read_mostly = { #include #include +static const struct nf_nat_protocol * +nf_nat_proto_find_get(u_int8_t protonum) +{ + const struct nf_nat_protocol *p; + + rcu_read_lock(); + p = __nf_nat_proto_find(protonum); + if (!try_module_get(p->me)) + p = &nf_nat_unknown_protocol; + rcu_read_unlock(); + + return p; +} + +static void +nf_nat_proto_put(const struct nf_nat_protocol *p) +{ + module_put(p->me); +} + static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = { [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 }, [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 }, @@ -570,7 +602,7 @@ static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = { static int nfnetlink_parse_nat_proto(struct nlattr *attr, const struct nf_conn *ct, - struct nf_nat_ipv4_range *range) + struct nf_nat_range *range) { struct nlattr *tb[CTA_PROTONAT_MAX+1]; const struct nf_nat_protocol *npt; @@ -580,23 +612,21 @@ static int nfnetlink_parse_nat_proto(struct nlattr *attr, if (err < 0) return err; - rcu_read_lock(); - npt = __nf_nat_proto_find(nf_ct_protonum(ct)); + npt = nf_nat_proto_find_get(nf_ct_protonum(ct)); if (npt->nlattr_to_range) err = npt->nlattr_to_range(tb, range); - rcu_read_unlock(); + nf_nat_proto_put(npt); return err; } static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = { [CTA_NAT_MINIP] = { .type = NLA_U32 }, [CTA_NAT_MAXIP] = { .type = NLA_U32 }, - [CTA_NAT_PROTO] = { .type = NLA_NESTED }, }; static int nfnetlink_parse_nat(const struct nlattr *nat, - const struct nf_conn *ct, struct nf_nat_ipv4_range *range) + const struct nf_conn *ct, struct nf_nat_range *range) { struct nlattr *tb[CTA_NAT_MAX+1]; int err; @@ -616,7 +646,7 @@ nfnetlink_parse_nat(const struct nlattr *nat, range->max_ip = nla_get_be32(tb[CTA_NAT_MAXIP]); if (range->min_ip) - range->flags |= NF_NAT_RANGE_MAP_IPS; + range->flags |= IP_NAT_RANGE_MAP_IPS; if (!tb[CTA_NAT_PROTO]) return 0; @@ -633,7 +663,7 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct, enum nf_nat_manip_type manip, const struct nlattr *attr) { - struct nf_nat_ipv4_range range; + struct nf_nat_range range; if (nfnetlink_parse_nat(attr, ct, &range) < 0) return -EINVAL; diff --git a/trunk/net/ipv4/netfilter/nf_nat_h323.c b/trunk/net/ipv4/netfilter/nf_nat_h323.c index dc1dd912baf4..b9a1136addbd 100644 --- a/trunk/net/ipv4/netfilter/nf_nat_h323.c +++ b/trunk/net/ipv4/netfilter/nf_nat_h323.c @@ -398,7 +398,7 @@ static int nat_h245(struct sk_buff *skb, struct nf_conn *ct, static void ip_nat_q931_expect(struct nf_conn *new, struct nf_conntrack_expect *this) { - struct nf_nat_ipv4_range range; + struct nf_nat_range range; if (this->tuple.src.u3.ip != 0) { /* Only accept calls from GK */ nf_nat_follow_master(new, this); @@ -409,16 +409,16 @@ static void ip_nat_q931_expect(struct nf_conn *new, BUG_ON(new->status & IPS_NAT_DONE_MASK); /* Change src to where master sends to */ - range.flags = NF_NAT_RANGE_MAP_IPS; + range.flags = IP_NAT_RANGE_MAP_IPS; range.min_ip = range.max_ip = new->tuplehash[!this->dir].tuple.src.u3.ip; - nf_nat_setup_info(new, &range, NF_NAT_MANIP_SRC); + nf_nat_setup_info(new, &range, IP_NAT_MANIP_SRC); /* For DST manip, map port here to where it's expected. */ - range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED); + range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED); range.min = range.max = this->saved_proto; range.min_ip = range.max_ip = new->master->tuplehash[!this->dir].tuple.src.u3.ip; - nf_nat_setup_info(new, &range, NF_NAT_MANIP_DST); + nf_nat_setup_info(new, &range, IP_NAT_MANIP_DST); } /****************************************************************************/ @@ -496,21 +496,21 @@ static int nat_q931(struct sk_buff *skb, struct nf_conn *ct, static void ip_nat_callforwarding_expect(struct nf_conn *new, struct nf_conntrack_expect *this) { - struct nf_nat_ipv4_range range; + struct nf_nat_range range; /* This must be a fresh one. */ BUG_ON(new->status & IPS_NAT_DONE_MASK); /* Change src to where master sends to */ - range.flags = NF_NAT_RANGE_MAP_IPS; + range.flags = IP_NAT_RANGE_MAP_IPS; range.min_ip = range.max_ip = new->tuplehash[!this->dir].tuple.src.u3.ip; - nf_nat_setup_info(new, &range, NF_NAT_MANIP_SRC); + nf_nat_setup_info(new, &range, IP_NAT_MANIP_SRC); /* For DST manip, map port here to where it's expected. */ - range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED); + range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED); range.min = range.max = this->saved_proto; range.min_ip = range.max_ip = this->saved_ip; - nf_nat_setup_info(new, &range, NF_NAT_MANIP_DST); + nf_nat_setup_info(new, &range, IP_NAT_MANIP_DST); } /****************************************************************************/ diff --git a/trunk/net/ipv4/netfilter/nf_nat_helper.c b/trunk/net/ipv4/netfilter/nf_nat_helper.c index af65958f6308..ebc5f8894f99 100644 --- a/trunk/net/ipv4/netfilter/nf_nat_helper.c +++ b/trunk/net/ipv4/netfilter/nf_nat_helper.c @@ -253,6 +253,12 @@ nf_nat_mangle_udp_packet(struct sk_buff *skb, struct udphdr *udph; int datalen, oldlen; + /* UDP helpers might accidentally mangle the wrong packet */ + iph = ip_hdr(skb); + if (skb->len < iph->ihl*4 + sizeof(*udph) + + match_offset + match_len) + return 0; + if (!skb_make_writable(skb, skb->len)) return 0; @@ -424,22 +430,22 @@ nf_nat_seq_adjust(struct sk_buff *skb, void nf_nat_follow_master(struct nf_conn *ct, struct nf_conntrack_expect *exp) { - struct nf_nat_ipv4_range range; + struct nf_nat_range range; /* This must be a fresh one. */ BUG_ON(ct->status & IPS_NAT_DONE_MASK); /* Change src to where master sends to */ - range.flags = NF_NAT_RANGE_MAP_IPS; + range.flags = IP_NAT_RANGE_MAP_IPS; range.min_ip = range.max_ip = ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip; - nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC); + nf_nat_setup_info(ct, &range, IP_NAT_MANIP_SRC); /* For DST manip, map port here to where it's expected. */ - range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED); + range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED); range.min = range.max = exp->saved_proto; range.min_ip = range.max_ip = ct->master->tuplehash[!exp->dir].tuple.src.u3.ip; - nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST); + nf_nat_setup_info(ct, &range, IP_NAT_MANIP_DST); } EXPORT_SYMBOL(nf_nat_follow_master); diff --git a/trunk/net/ipv4/netfilter/nf_nat_pptp.c b/trunk/net/ipv4/netfilter/nf_nat_pptp.c index c273d58980ae..3e8284ba46b8 100644 --- a/trunk/net/ipv4/netfilter/nf_nat_pptp.c +++ b/trunk/net/ipv4/netfilter/nf_nat_pptp.c @@ -47,7 +47,7 @@ static void pptp_nat_expected(struct nf_conn *ct, struct nf_conntrack_tuple t; const struct nf_ct_pptp_master *ct_pptp_info; const struct nf_nat_pptp *nat_pptp_info; - struct nf_nat_ipv4_range range; + struct nf_nat_range range; ct_pptp_info = &nfct_help(master)->help.ct_pptp_info; nat_pptp_info = &nfct_nat(master)->help.nat_pptp_info; @@ -88,24 +88,24 @@ static void pptp_nat_expected(struct nf_conn *ct, BUG_ON(ct->status & IPS_NAT_DONE_MASK); /* Change src to where master sends to */ - range.flags = NF_NAT_RANGE_MAP_IPS; + range.flags = IP_NAT_RANGE_MAP_IPS; range.min_ip = range.max_ip = ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip; if (exp->dir == IP_CT_DIR_ORIGINAL) { - range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; + range.flags |= IP_NAT_RANGE_PROTO_SPECIFIED; range.min = range.max = exp->saved_proto; } - nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC); + nf_nat_setup_info(ct, &range, IP_NAT_MANIP_SRC); /* For DST manip, map port here to where it's expected. */ - range.flags = NF_NAT_RANGE_MAP_IPS; + range.flags = IP_NAT_RANGE_MAP_IPS; range.min_ip = range.max_ip = ct->master->tuplehash[!exp->dir].tuple.src.u3.ip; if (exp->dir == IP_CT_DIR_REPLY) { - range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; + range.flags |= IP_NAT_RANGE_PROTO_SPECIFIED; range.min = range.max = exp->saved_proto; } - nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST); + nf_nat_setup_info(ct, &range, IP_NAT_MANIP_DST); } /* outbound packets == from PNS to PAC */ diff --git a/trunk/net/ipv4/netfilter/nf_nat_proto_common.c b/trunk/net/ipv4/netfilter/nf_nat_proto_common.c index 9993bc93e102..a3d997618602 100644 --- a/trunk/net/ipv4/netfilter/nf_nat_proto_common.c +++ b/trunk/net/ipv4/netfilter/nf_nat_proto_common.c @@ -26,7 +26,7 @@ bool nf_nat_proto_in_range(const struct nf_conntrack_tuple *tuple, { __be16 port; - if (maniptype == NF_NAT_MANIP_SRC) + if (maniptype == IP_NAT_MANIP_SRC) port = tuple->src.u.all; else port = tuple->dst.u.all; @@ -37,7 +37,7 @@ bool nf_nat_proto_in_range(const struct nf_conntrack_tuple *tuple, EXPORT_SYMBOL_GPL(nf_nat_proto_in_range); void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple, - const struct nf_nat_ipv4_range *range, + const struct nf_nat_range *range, enum nf_nat_manip_type maniptype, const struct nf_conn *ct, u_int16_t *rover) @@ -46,15 +46,15 @@ void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple, __be16 *portptr; u_int16_t off; - if (maniptype == NF_NAT_MANIP_SRC) + if (maniptype == IP_NAT_MANIP_SRC) portptr = &tuple->src.u.all; else portptr = &tuple->dst.u.all; /* If no range specified... */ - if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) { + if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) { /* If it's dst rewrite, can't change port */ - if (maniptype == NF_NAT_MANIP_DST) + if (maniptype == IP_NAT_MANIP_DST) return; if (ntohs(*portptr) < 1024) { @@ -75,9 +75,9 @@ void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple, range_size = ntohs(range->max.all) - min + 1; } - if (range->flags & NF_NAT_RANGE_PROTO_RANDOM) + if (range->flags & IP_NAT_RANGE_PROTO_RANDOM) off = secure_ipv4_port_ephemeral(tuple->src.u3.ip, tuple->dst.u3.ip, - maniptype == NF_NAT_MANIP_SRC + maniptype == IP_NAT_MANIP_SRC ? tuple->dst.u.all : tuple->src.u.all); else @@ -87,7 +87,7 @@ void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple, *portptr = htons(min + off % range_size); if (++i != range_size && nf_nat_used_tuple(tuple, ct)) continue; - if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM)) + if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) *rover = off; return; } @@ -96,19 +96,31 @@ void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple, EXPORT_SYMBOL_GPL(nf_nat_proto_unique_tuple); #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) +int nf_nat_proto_range_to_nlattr(struct sk_buff *skb, + const struct nf_nat_range *range) +{ + NLA_PUT_BE16(skb, CTA_PROTONAT_PORT_MIN, range->min.all); + NLA_PUT_BE16(skb, CTA_PROTONAT_PORT_MAX, range->max.all); + return 0; + +nla_put_failure: + return -1; +} +EXPORT_SYMBOL_GPL(nf_nat_proto_nlattr_to_range); + int nf_nat_proto_nlattr_to_range(struct nlattr *tb[], - struct nf_nat_ipv4_range *range) + struct nf_nat_range *range) { if (tb[CTA_PROTONAT_PORT_MIN]) { range->min.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MIN]); range->max.all = range->min.tcp.port; - range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED; + range->flags |= IP_NAT_RANGE_PROTO_SPECIFIED; } if (tb[CTA_PROTONAT_PORT_MAX]) { range->max.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MAX]); - range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED; + range->flags |= IP_NAT_RANGE_PROTO_SPECIFIED; } return 0; } -EXPORT_SYMBOL_GPL(nf_nat_proto_nlattr_to_range); +EXPORT_SYMBOL_GPL(nf_nat_proto_range_to_nlattr); #endif diff --git a/trunk/net/ipv4/netfilter/nf_nat_proto_dccp.c b/trunk/net/ipv4/netfilter/nf_nat_proto_dccp.c index 3f67138d187c..570faf2667b2 100644 --- a/trunk/net/ipv4/netfilter/nf_nat_proto_dccp.c +++ b/trunk/net/ipv4/netfilter/nf_nat_proto_dccp.c @@ -24,7 +24,7 @@ static u_int16_t dccp_port_rover; static void dccp_unique_tuple(struct nf_conntrack_tuple *tuple, - const struct nf_nat_ipv4_range *range, + const struct nf_nat_range *range, enum nf_nat_manip_type maniptype, const struct nf_conn *ct) { @@ -54,7 +54,7 @@ dccp_manip_pkt(struct sk_buff *skb, iph = (struct iphdr *)(skb->data + iphdroff); hdr = (struct dccp_hdr *)(skb->data + hdroff); - if (maniptype == NF_NAT_MANIP_SRC) { + if (maniptype == IP_NAT_MANIP_SRC) { oldip = iph->saddr; newip = tuple->src.u3.ip; newport = tuple->src.u.dccp.port; @@ -80,10 +80,12 @@ dccp_manip_pkt(struct sk_buff *skb, static const struct nf_nat_protocol nf_nat_protocol_dccp = { .protonum = IPPROTO_DCCP, + .me = THIS_MODULE, .manip_pkt = dccp_manip_pkt, .in_range = nf_nat_proto_in_range, .unique_tuple = dccp_unique_tuple, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) + .range_to_nlattr = nf_nat_proto_range_to_nlattr, .nlattr_to_range = nf_nat_proto_nlattr_to_range, #endif }; diff --git a/trunk/net/ipv4/netfilter/nf_nat_proto_gre.c b/trunk/net/ipv4/netfilter/nf_nat_proto_gre.c index 46ba0b9ab985..bc8d83a31c73 100644 --- a/trunk/net/ipv4/netfilter/nf_nat_proto_gre.c +++ b/trunk/net/ipv4/netfilter/nf_nat_proto_gre.c @@ -39,7 +39,7 @@ MODULE_DESCRIPTION("Netfilter NAT protocol helper module for GRE"); /* generate unique tuple ... */ static void gre_unique_tuple(struct nf_conntrack_tuple *tuple, - const struct nf_nat_ipv4_range *range, + const struct nf_nat_range *range, enum nf_nat_manip_type maniptype, const struct nf_conn *ct) { @@ -52,12 +52,12 @@ gre_unique_tuple(struct nf_conntrack_tuple *tuple, if (!ct->master) return; - if (maniptype == NF_NAT_MANIP_SRC) + if (maniptype == IP_NAT_MANIP_SRC) keyptr = &tuple->src.u.gre.key; else keyptr = &tuple->dst.u.gre.key; - if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) { + if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) { pr_debug("%p: NATing GRE PPTP\n", ct); min = 1; range_size = 0xffff; @@ -99,7 +99,7 @@ gre_manip_pkt(struct sk_buff *skb, unsigned int iphdroff, /* we only have destination manip of a packet, since 'source key' * is not present in the packet itself */ - if (maniptype != NF_NAT_MANIP_DST) + if (maniptype != IP_NAT_MANIP_DST) return true; switch (greh->version) { case GRE_VERSION_1701: @@ -119,10 +119,12 @@ gre_manip_pkt(struct sk_buff *skb, unsigned int iphdroff, static const struct nf_nat_protocol gre = { .protonum = IPPROTO_GRE, + .me = THIS_MODULE, .manip_pkt = gre_manip_pkt, .in_range = nf_nat_proto_in_range, .unique_tuple = gre_unique_tuple, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) + .range_to_nlattr = nf_nat_proto_range_to_nlattr, .nlattr_to_range = nf_nat_proto_nlattr_to_range, #endif }; diff --git a/trunk/net/ipv4/netfilter/nf_nat_proto_icmp.c b/trunk/net/ipv4/netfilter/nf_nat_proto_icmp.c index b35172851bae..9f4dc1235dc7 100644 --- a/trunk/net/ipv4/netfilter/nf_nat_proto_icmp.c +++ b/trunk/net/ipv4/netfilter/nf_nat_proto_icmp.c @@ -30,7 +30,7 @@ icmp_in_range(const struct nf_conntrack_tuple *tuple, static void icmp_unique_tuple(struct nf_conntrack_tuple *tuple, - const struct nf_nat_ipv4_range *range, + const struct nf_nat_range *range, enum nf_nat_manip_type maniptype, const struct nf_conn *ct) { @@ -40,7 +40,7 @@ icmp_unique_tuple(struct nf_conntrack_tuple *tuple, range_size = ntohs(range->max.icmp.id) - ntohs(range->min.icmp.id) + 1; /* If no range specified... */ - if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) + if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) range_size = 0xFFFF; for (i = 0; ; ++id) { @@ -74,10 +74,12 @@ icmp_manip_pkt(struct sk_buff *skb, const struct nf_nat_protocol nf_nat_protocol_icmp = { .protonum = IPPROTO_ICMP, + .me = THIS_MODULE, .manip_pkt = icmp_manip_pkt, .in_range = icmp_in_range, .unique_tuple = icmp_unique_tuple, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) + .range_to_nlattr = nf_nat_proto_range_to_nlattr, .nlattr_to_range = nf_nat_proto_nlattr_to_range, #endif }; diff --git a/trunk/net/ipv4/netfilter/nf_nat_proto_sctp.c b/trunk/net/ipv4/netfilter/nf_nat_proto_sctp.c index 3cce9b6c1c29..bd5a80a62a5b 100644 --- a/trunk/net/ipv4/netfilter/nf_nat_proto_sctp.c +++ b/trunk/net/ipv4/netfilter/nf_nat_proto_sctp.c @@ -19,7 +19,7 @@ static u_int16_t nf_sctp_port_rover; static void sctp_unique_tuple(struct nf_conntrack_tuple *tuple, - const struct nf_nat_ipv4_range *range, + const struct nf_nat_range *range, enum nf_nat_manip_type maniptype, const struct nf_conn *ct) { @@ -46,7 +46,7 @@ sctp_manip_pkt(struct sk_buff *skb, iph = (struct iphdr *)(skb->data + iphdroff); hdr = (struct sctphdr *)(skb->data + hdroff); - if (maniptype == NF_NAT_MANIP_SRC) { + if (maniptype == IP_NAT_MANIP_SRC) { /* Get rid of src ip and src pt */ oldip = iph->saddr; newip = tuple->src.u3.ip; @@ -70,10 +70,12 @@ sctp_manip_pkt(struct sk_buff *skb, static const struct nf_nat_protocol nf_nat_protocol_sctp = { .protonum = IPPROTO_SCTP, + .me = THIS_MODULE, .manip_pkt = sctp_manip_pkt, .in_range = nf_nat_proto_in_range, .unique_tuple = sctp_unique_tuple, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) + .range_to_nlattr = nf_nat_proto_range_to_nlattr, .nlattr_to_range = nf_nat_proto_nlattr_to_range, #endif }; diff --git a/trunk/net/ipv4/netfilter/nf_nat_proto_tcp.c b/trunk/net/ipv4/netfilter/nf_nat_proto_tcp.c index 9fb4b4e72bbf..0d67bb80130f 100644 --- a/trunk/net/ipv4/netfilter/nf_nat_proto_tcp.c +++ b/trunk/net/ipv4/netfilter/nf_nat_proto_tcp.c @@ -23,7 +23,7 @@ static u_int16_t tcp_port_rover; static void tcp_unique_tuple(struct nf_conntrack_tuple *tuple, - const struct nf_nat_ipv4_range *range, + const struct nf_nat_range *range, enum nf_nat_manip_type maniptype, const struct nf_conn *ct) { @@ -55,7 +55,7 @@ tcp_manip_pkt(struct sk_buff *skb, iph = (struct iphdr *)(skb->data + iphdroff); hdr = (struct tcphdr *)(skb->data + hdroff); - if (maniptype == NF_NAT_MANIP_SRC) { + if (maniptype == IP_NAT_MANIP_SRC) { /* Get rid of src ip and src pt */ oldip = iph->saddr; newip = tuple->src.u3.ip; @@ -82,10 +82,12 @@ tcp_manip_pkt(struct sk_buff *skb, const struct nf_nat_protocol nf_nat_protocol_tcp = { .protonum = IPPROTO_TCP, + .me = THIS_MODULE, .manip_pkt = tcp_manip_pkt, .in_range = nf_nat_proto_in_range, .unique_tuple = tcp_unique_tuple, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) + .range_to_nlattr = nf_nat_proto_range_to_nlattr, .nlattr_to_range = nf_nat_proto_nlattr_to_range, #endif }; diff --git a/trunk/net/ipv4/netfilter/nf_nat_proto_udp.c b/trunk/net/ipv4/netfilter/nf_nat_proto_udp.c index 9883336e628f..0b1b8601cba7 100644 --- a/trunk/net/ipv4/netfilter/nf_nat_proto_udp.c +++ b/trunk/net/ipv4/netfilter/nf_nat_proto_udp.c @@ -22,7 +22,7 @@ static u_int16_t udp_port_rover; static void udp_unique_tuple(struct nf_conntrack_tuple *tuple, - const struct nf_nat_ipv4_range *range, + const struct nf_nat_range *range, enum nf_nat_manip_type maniptype, const struct nf_conn *ct) { @@ -47,7 +47,7 @@ udp_manip_pkt(struct sk_buff *skb, iph = (struct iphdr *)(skb->data + iphdroff); hdr = (struct udphdr *)(skb->data + hdroff); - if (maniptype == NF_NAT_MANIP_SRC) { + if (maniptype == IP_NAT_MANIP_SRC) { /* Get rid of src ip and src pt */ oldip = iph->saddr; newip = tuple->src.u3.ip; @@ -73,10 +73,12 @@ udp_manip_pkt(struct sk_buff *skb, const struct nf_nat_protocol nf_nat_protocol_udp = { .protonum = IPPROTO_UDP, + .me = THIS_MODULE, .manip_pkt = udp_manip_pkt, .in_range = nf_nat_proto_in_range, .unique_tuple = udp_unique_tuple, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) + .range_to_nlattr = nf_nat_proto_range_to_nlattr, .nlattr_to_range = nf_nat_proto_nlattr_to_range, #endif }; diff --git a/trunk/net/ipv4/netfilter/nf_nat_proto_udplite.c b/trunk/net/ipv4/netfilter/nf_nat_proto_udplite.c index d24d10a7beb2..f83ef23e2ab7 100644 --- a/trunk/net/ipv4/netfilter/nf_nat_proto_udplite.c +++ b/trunk/net/ipv4/netfilter/nf_nat_proto_udplite.c @@ -21,7 +21,7 @@ static u_int16_t udplite_port_rover; static void udplite_unique_tuple(struct nf_conntrack_tuple *tuple, - const struct nf_nat_ipv4_range *range, + const struct nf_nat_range *range, enum nf_nat_manip_type maniptype, const struct nf_conn *ct) { @@ -47,7 +47,7 @@ udplite_manip_pkt(struct sk_buff *skb, iph = (struct iphdr *)(skb->data + iphdroff); hdr = (struct udphdr *)(skb->data + hdroff); - if (maniptype == NF_NAT_MANIP_SRC) { + if (maniptype == IP_NAT_MANIP_SRC) { /* Get rid of src ip and src pt */ oldip = iph->saddr; newip = tuple->src.u3.ip; @@ -72,10 +72,12 @@ udplite_manip_pkt(struct sk_buff *skb, static const struct nf_nat_protocol nf_nat_protocol_udplite = { .protonum = IPPROTO_UDPLITE, + .me = THIS_MODULE, .manip_pkt = udplite_manip_pkt, .in_range = nf_nat_proto_in_range, .unique_tuple = udplite_unique_tuple, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) + .range_to_nlattr = nf_nat_proto_range_to_nlattr, .nlattr_to_range = nf_nat_proto_nlattr_to_range, #endif }; diff --git a/trunk/net/ipv4/netfilter/nf_nat_proto_unknown.c b/trunk/net/ipv4/netfilter/nf_nat_proto_unknown.c index e0afe8112b1c..a50f2bc1c732 100644 --- a/trunk/net/ipv4/netfilter/nf_nat_proto_unknown.c +++ b/trunk/net/ipv4/netfilter/nf_nat_proto_unknown.c @@ -27,7 +27,7 @@ static bool unknown_in_range(const struct nf_conntrack_tuple *tuple, } static void unknown_unique_tuple(struct nf_conntrack_tuple *tuple, - const struct nf_nat_ipv4_range *range, + const struct nf_nat_range *range, enum nf_nat_manip_type maniptype, const struct nf_conn *ct) { @@ -46,6 +46,7 @@ unknown_manip_pkt(struct sk_buff *skb, } const struct nf_nat_protocol nf_nat_unknown_protocol = { + /* .me isn't set: getting a ref to this cannot fail. */ .manip_pkt = unknown_manip_pkt, .in_range = unknown_in_range, .unique_tuple = unknown_unique_tuple, diff --git a/trunk/net/ipv4/netfilter/nf_nat_rule.c b/trunk/net/ipv4/netfilter/nf_nat_rule.c index d2a9dc314e0e..733c9abc1cbd 100644 --- a/trunk/net/ipv4/netfilter/nf_nat_rule.c +++ b/trunk/net/ipv4/netfilter/nf_nat_rule.c @@ -44,7 +44,7 @@ ipt_snat_target(struct sk_buff *skb, const struct xt_action_param *par) { struct nf_conn *ct; enum ip_conntrack_info ctinfo; - const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; + const struct nf_nat_multi_range_compat *mr = par->targinfo; NF_CT_ASSERT(par->hooknum == NF_INET_POST_ROUTING || par->hooknum == NF_INET_LOCAL_IN); @@ -56,7 +56,7 @@ ipt_snat_target(struct sk_buff *skb, const struct xt_action_param *par) ctinfo == IP_CT_RELATED_REPLY)); NF_CT_ASSERT(par->out != NULL); - return nf_nat_setup_info(ct, &mr->range[0], NF_NAT_MANIP_SRC); + return nf_nat_setup_info(ct, &mr->range[0], IP_NAT_MANIP_SRC); } static unsigned int @@ -64,7 +64,7 @@ ipt_dnat_target(struct sk_buff *skb, const struct xt_action_param *par) { struct nf_conn *ct; enum ip_conntrack_info ctinfo; - const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; + const struct nf_nat_multi_range_compat *mr = par->targinfo; NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING || par->hooknum == NF_INET_LOCAL_OUT); @@ -74,12 +74,12 @@ ipt_dnat_target(struct sk_buff *skb, const struct xt_action_param *par) /* Connection must be valid and new. */ NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED)); - return nf_nat_setup_info(ct, &mr->range[0], NF_NAT_MANIP_DST); + return nf_nat_setup_info(ct, &mr->range[0], IP_NAT_MANIP_DST); } static int ipt_snat_checkentry(const struct xt_tgchk_param *par) { - const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; + const struct nf_nat_multi_range_compat *mr = par->targinfo; /* Must be a valid range */ if (mr->rangesize != 1) { @@ -91,7 +91,7 @@ static int ipt_snat_checkentry(const struct xt_tgchk_param *par) static int ipt_dnat_checkentry(const struct xt_tgchk_param *par) { - const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; + const struct nf_nat_multi_range_compat *mr = par->targinfo; /* Must be a valid range */ if (mr->rangesize != 1) { @@ -105,13 +105,13 @@ static unsigned int alloc_null_binding(struct nf_conn *ct, unsigned int hooknum) { /* Force range to this IP; let proto decide mapping for - per-proto parts (hence not NF_NAT_RANGE_PROTO_SPECIFIED). + per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED). */ - struct nf_nat_ipv4_range range; + struct nf_nat_range range; range.flags = 0; pr_debug("Allocating NULL binding for %p (%pI4)\n", ct, - HOOK2MANIP(hooknum) == NF_NAT_MANIP_SRC ? + HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC ? &ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip : &ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip); @@ -140,7 +140,7 @@ int nf_nat_rule_find(struct sk_buff *skb, static struct xt_target ipt_snat_reg __read_mostly = { .name = "SNAT", .target = ipt_snat_target, - .targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat), + .targetsize = sizeof(struct nf_nat_multi_range_compat), .table = "nat", .hooks = (1 << NF_INET_POST_ROUTING) | (1 << NF_INET_LOCAL_IN), .checkentry = ipt_snat_checkentry, @@ -150,7 +150,7 @@ static struct xt_target ipt_snat_reg __read_mostly = { static struct xt_target ipt_dnat_reg __read_mostly = { .name = "DNAT", .target = ipt_dnat_target, - .targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat), + .targetsize = sizeof(struct nf_nat_multi_range_compat), .table = "nat", .hooks = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT), .checkentry = ipt_dnat_checkentry, diff --git a/trunk/net/ipv4/netfilter/nf_nat_sip.c b/trunk/net/ipv4/netfilter/nf_nat_sip.c index d0319f96269f..78844d9208f1 100644 --- a/trunk/net/ipv4/netfilter/nf_nat_sip.c +++ b/trunk/net/ipv4/netfilter/nf_nat_sip.c @@ -249,25 +249,25 @@ static void ip_nat_sip_seq_adjust(struct sk_buff *skb, s16 off) static void ip_nat_sip_expected(struct nf_conn *ct, struct nf_conntrack_expect *exp) { - struct nf_nat_ipv4_range range; + struct nf_nat_range range; /* This must be a fresh one. */ BUG_ON(ct->status & IPS_NAT_DONE_MASK); /* For DST manip, map port here to where it's expected. */ - range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED); + range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED); range.min = range.max = exp->saved_proto; range.min_ip = range.max_ip = exp->saved_ip; - nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST); + nf_nat_setup_info(ct, &range, IP_NAT_MANIP_DST); /* Change src to where master sends to, but only if the connection * actually came from the same source. */ if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip == ct->master->tuplehash[exp->dir].tuple.src.u3.ip) { - range.flags = NF_NAT_RANGE_MAP_IPS; + range.flags = IP_NAT_RANGE_MAP_IPS; range.min_ip = range.max_ip = ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip; - nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC); + nf_nat_setup_info(ct, &range, IP_NAT_MANIP_SRC); } } diff --git a/trunk/net/ipv4/netfilter/nf_nat_standalone.c b/trunk/net/ipv4/netfilter/nf_nat_standalone.c index 3828a4229822..92900482edea 100644 --- a/trunk/net/ipv4/netfilter/nf_nat_standalone.c +++ b/trunk/net/ipv4/netfilter/nf_nat_standalone.c @@ -137,7 +137,7 @@ nf_nat_fn(unsigned int hooknum, return ret; } else pr_debug("Already setup manip %s for ct %p\n", - maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST", + maniptype == IP_NAT_MANIP_SRC ? "SRC" : "DST", ct); break; diff --git a/trunk/net/ipv4/proc.c b/trunk/net/ipv4/proc.c index 3569d8ecaeac..466ea8bb7a4d 100644 --- a/trunk/net/ipv4/proc.c +++ b/trunk/net/ipv4/proc.c @@ -56,17 +56,17 @@ static int sockstat_seq_show(struct seq_file *seq, void *v) local_bh_disable(); orphans = percpu_counter_sum_positive(&tcp_orphan_count); - sockets = proto_sockets_allocated_sum_positive(&tcp_prot); + sockets = percpu_counter_sum_positive(&tcp_sockets_allocated); local_bh_enable(); socket_seq_show(seq); seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %ld\n", sock_prot_inuse_get(net, &tcp_prot), orphans, tcp_death_row.tw_count, sockets, - proto_memory_allocated(&tcp_prot)); + atomic_long_read(&tcp_memory_allocated)); seq_printf(seq, "UDP: inuse %d mem %ld\n", sock_prot_inuse_get(net, &udp_prot), - proto_memory_allocated(&udp_prot)); + atomic_long_read(&udp_memory_allocated)); seq_printf(seq, "UDPLITE: inuse %d\n", sock_prot_inuse_get(net, &udplite_prot)); seq_printf(seq, "RAW: inuse %d\n", @@ -288,7 +288,7 @@ static void icmpmsg_put(struct seq_file *seq) count = 0; for (i = 0; i < ICMPMSG_MIB_MAX; i++) { - val = atomic_long_read(&net->mib.icmpmsg_statistics->mibs[i]); + val = snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics, i); if (val) { type[count] = i; vals[count++] = val; @@ -307,7 +307,6 @@ static void icmp_put(struct seq_file *seq) { int i; struct net *net = seq->private; - atomic_long_t *ptr = net->mib.icmpmsg_statistics->mibs; seq_puts(seq, "\nIcmp: InMsgs InErrors"); for (i=0; icmpmibmap[i].name != NULL; i++) @@ -320,13 +319,15 @@ static void icmp_put(struct seq_file *seq) snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_INERRORS)); for (i=0; icmpmibmap[i].name != NULL; i++) seq_printf(seq, " %lu", - atomic_long_read(ptr + icmpmibmap[i].index)); + snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics, + icmpmibmap[i].index)); seq_printf(seq, " %lu %lu", snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTMSGS), snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTERRORS)); for (i=0; icmpmibmap[i].name != NULL; i++) seq_printf(seq, " %lu", - atomic_long_read(ptr + (icmpmibmap[i].index | 0x100))); + snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics, + icmpmibmap[i].index | 0x100)); } /* diff --git a/trunk/net/ipv4/raw.c b/trunk/net/ipv4/raw.c index 3ccda5ae8a27..007e2eb769d3 100644 --- a/trunk/net/ipv4/raw.c +++ b/trunk/net/ipv4/raw.c @@ -292,8 +292,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb) { /* Charge it to the socket. */ - ipv4_pktinfo_prepare(skb); - if (sock_queue_rcv_skb(sk, skb) < 0) { + if (ip_queue_rcv_skb(sk, skb) < 0) { kfree_skb(skb); return NET_RX_DROP; } @@ -328,7 +327,6 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4, unsigned int iphlen; int err; struct rtable *rt = *rtp; - int hlen, tlen; if (length > rt->dst.dev->mtu) { ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, @@ -338,14 +336,12 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4, if (flags&MSG_PROBE) goto out; - hlen = LL_RESERVED_SPACE(rt->dst.dev); - tlen = rt->dst.dev->needed_tailroom; skb = sock_alloc_send_skb(sk, - length + hlen + tlen + 15, + length + LL_ALLOCATED_SPACE(rt->dst.dev) + 15, flags & MSG_DONTWAIT, &err); if (skb == NULL) goto error; - skb_reserve(skb, hlen); + skb_reserve(skb, LL_RESERVED_SPACE(rt->dst.dev)); skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; diff --git a/trunk/net/ipv4/route.c b/trunk/net/ipv4/route.c index bcacf54e5418..94cdbc55ca7e 100644 --- a/trunk/net/ipv4/route.c +++ b/trunk/net/ipv4/route.c @@ -109,6 +109,7 @@ #ifdef CONFIG_SYSCTL #include #endif +#include #include #define RT_FL_TOS(oldflp4) \ @@ -424,7 +425,7 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v) int len, HHUptod; rcu_read_lock(); - n = dst_get_neighbour_noref(&r->dst); + n = dst_get_neighbour(&r->dst); HHUptod = (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0; rcu_read_unlock(); @@ -1114,18 +1115,23 @@ static int slow_chain_length(const struct rtable *head) static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, const void *daddr) { + struct neigh_table *tbl = &arp_tbl; static const __be32 inaddr_any = 0; struct net_device *dev = dst->dev; const __be32 *pkey = daddr; struct neighbour *n; +#if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE) + if (dev->type == ARPHRD_ATM) + tbl = clip_tbl_hook; +#endif if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT)) pkey = &inaddr_any; - n = __ipv4_neigh_lookup(&arp_tbl, dev, *(__force u32 *)pkey); + n = __ipv4_neigh_lookup(tbl, dev, *(__force u32 *)pkey); if (n) return n; - return neigh_create(&arp_tbl, pkey, dev); + return neigh_create(tbl, pkey, dev); } static int rt_bind_neighbour(struct rtable *rt) diff --git a/trunk/net/ipv4/syncookies.c b/trunk/net/ipv4/syncookies.c index 51fdbb490437..90f6544c13e2 100644 --- a/trunk/net/ipv4/syncookies.c +++ b/trunk/net/ipv4/syncookies.c @@ -245,7 +245,7 @@ bool cookie_check_timestamp(struct tcp_options_received *tcp_opt, bool *ecn_ok) if (!sysctl_tcp_timestamps) return false; - tcp_opt->sack_ok = (options & (1 << 4)) ? TCP_SACK_SEEN : 0; + tcp_opt->sack_ok = (options >> 4) & 0x1; *ecn_ok = (options >> 5) & 1; if (*ecn_ok && !sysctl_tcp_ecn) return false; diff --git a/trunk/net/ipv4/sysctl_net_ipv4.c b/trunk/net/ipv4/sysctl_net_ipv4.c index 4aa7e9dc0cbb..69fd7201129a 100644 --- a/trunk/net/ipv4/sysctl_net_ipv4.c +++ b/trunk/net/ipv4/sysctl_net_ipv4.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include @@ -24,7 +23,6 @@ #include #include #include -#include static int zero; static int tcp_retr1_max = 255; @@ -75,7 +73,7 @@ static int ipv4_local_port_range(ctl_table *table, int write, } -static void inet_get_ping_group_range_table(struct ctl_table *table, gid_t *low, gid_t *high) +void inet_get_ping_group_range_table(struct ctl_table *table, gid_t *low, gid_t *high) { gid_t *data = table->data; unsigned seq; @@ -88,7 +86,7 @@ static void inet_get_ping_group_range_table(struct ctl_table *table, gid_t *low, } /* Update system visible IP port range */ -static void set_ping_group_range(struct ctl_table *table, gid_t range[2]) +static void set_ping_group_range(struct ctl_table *table, int range[2]) { gid_t *data = table->data; write_seqlock(&sysctl_local_ports.lock); @@ -176,49 +174,6 @@ static int proc_allowed_congestion_control(ctl_table *ctl, return ret; } -static int ipv4_tcp_mem(ctl_table *ctl, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos) -{ - int ret; - unsigned long vec[3]; - struct net *net = current->nsproxy->net_ns; -#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM - struct mem_cgroup *memcg; -#endif - - ctl_table tmp = { - .data = &vec, - .maxlen = sizeof(vec), - .mode = ctl->mode, - }; - - if (!write) { - ctl->data = &net->ipv4.sysctl_tcp_mem; - return proc_doulongvec_minmax(ctl, write, buffer, lenp, ppos); - } - - ret = proc_doulongvec_minmax(&tmp, write, buffer, lenp, ppos); - if (ret) - return ret; - -#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM - rcu_read_lock(); - memcg = mem_cgroup_from_task(current); - - tcp_prot_mem(memcg, vec[0], 0); - tcp_prot_mem(memcg, vec[1], 1); - tcp_prot_mem(memcg, vec[2], 2); - rcu_read_unlock(); -#endif - - net->ipv4.sysctl_tcp_mem[0] = vec[0]; - net->ipv4.sysctl_tcp_mem[1] = vec[1]; - net->ipv4.sysctl_tcp_mem[2] = vec[2]; - - return 0; -} - static struct ctl_table ipv4_table[] = { { .procname = "tcp_timestamps", @@ -477,6 +432,13 @@ static struct ctl_table ipv4_table[] = { .mode = 0644, .proc_handler = proc_dointvec }, + { + .procname = "tcp_mem", + .data = &sysctl_tcp_mem, + .maxlen = sizeof(sysctl_tcp_mem), + .mode = 0644, + .proc_handler = proc_doulongvec_minmax + }, { .procname = "tcp_wmem", .data = &sysctl_tcp_wmem, @@ -759,12 +721,6 @@ static struct ctl_table ipv4_net_table[] = { .mode = 0644, .proc_handler = ipv4_ping_group_range, }, - { - .procname = "tcp_mem", - .maxlen = sizeof(init_net.ipv4.sysctl_tcp_mem), - .mode = 0644, - .proc_handler = ipv4_tcp_mem, - }, { } }; @@ -778,7 +734,6 @@ EXPORT_SYMBOL_GPL(net_ipv4_ctl_path); static __net_init int ipv4_sysctl_init_net(struct net *net) { struct ctl_table *table; - unsigned long limit; table = ipv4_net_table; if (!net_eq(net, &init_net)) { @@ -814,12 +769,6 @@ static __net_init int ipv4_sysctl_init_net(struct net *net) net->ipv4.sysctl_rt_cache_rebuild_count = 4; - limit = nr_free_buffer_pages() / 8; - limit = max(limit, 128UL); - net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3; - net->ipv4.sysctl_tcp_mem[1] = limit; - net->ipv4.sysctl_tcp_mem[2] = net->ipv4.sysctl_tcp_mem[0] * 2; - net->ipv4.ipv4_hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, table); if (net->ipv4.ipv4_hdr == NULL) diff --git a/trunk/net/ipv4/tcp.c b/trunk/net/ipv4/tcp.c index 9bcdec3ad772..34f5db1e1c8b 100644 --- a/trunk/net/ipv4/tcp.c +++ b/trunk/net/ipv4/tcp.c @@ -282,9 +282,11 @@ int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT; struct percpu_counter tcp_orphan_count; EXPORT_SYMBOL_GPL(tcp_orphan_count); +long sysctl_tcp_mem[3] __read_mostly; int sysctl_tcp_wmem[3] __read_mostly; int sysctl_tcp_rmem[3] __read_mostly; +EXPORT_SYMBOL(sysctl_tcp_mem); EXPORT_SYMBOL(sysctl_tcp_rmem); EXPORT_SYMBOL(sysctl_tcp_wmem); @@ -886,18 +888,18 @@ int tcp_sendpage(struct sock *sk, struct page *page, int offset, } EXPORT_SYMBOL(tcp_sendpage); -static inline int select_size(const struct sock *sk, bool sg) +#define TCP_PAGE(sk) (sk->sk_sndmsg_page) +#define TCP_OFF(sk) (sk->sk_sndmsg_off) + +static inline int select_size(const struct sock *sk, int sg) { const struct tcp_sock *tp = tcp_sk(sk); int tmp = tp->mss_cache; if (sg) { - if (sk_can_gso(sk)) { - /* Small frames wont use a full page: - * Payload will immediately follow tcp header. - */ - tmp = SKB_WITH_OVERHEAD(2048 - MAX_TCP_HEADER); - } else { + if (sk_can_gso(sk)) + tmp = 0; + else { int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER); if (tmp >= pgbreak && @@ -915,9 +917,9 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, struct iovec *iov; struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; - int iovlen, flags, err, copied; + int iovlen, flags; int mss_now, size_goal; - bool sg; + int sg, err, copied; long timeo; lock_sock(sk); @@ -944,7 +946,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) goto out_err; - sg = !!(sk->sk_route_caps & NETIF_F_SG); + sg = sk->sk_route_caps & NETIF_F_SG; while (--iovlen >= 0) { size_t seglen = iov->iov_len; @@ -1003,13 +1005,8 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, } else { int merge = 0; int i = skb_shinfo(skb)->nr_frags; - struct page *page = sk->sk_sndmsg_page; - int off; - - if (page && page_count(page) == 1) - sk->sk_sndmsg_off = 0; - - off = sk->sk_sndmsg_off; + struct page *page = TCP_PAGE(sk); + int off = TCP_OFF(sk); if (skb_can_coalesce(skb, i, page, off) && off != PAGE_SIZE) { @@ -1026,7 +1023,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, } else if (page) { if (off == PAGE_SIZE) { put_page(page); - sk->sk_sndmsg_page = page = NULL; + TCP_PAGE(sk) = page = NULL; off = 0; } } else @@ -1052,9 +1049,9 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, /* If this page was new, give it to the * socket so it does not get leaked. */ - if (!sk->sk_sndmsg_page) { - sk->sk_sndmsg_page = page; - sk->sk_sndmsg_off = 0; + if (!TCP_PAGE(sk)) { + TCP_PAGE(sk) = page; + TCP_OFF(sk) = 0; } goto do_error; } @@ -1064,15 +1061,15 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); } else { skb_fill_page_desc(skb, i, page, off, copy); - if (sk->sk_sndmsg_page) { + if (TCP_PAGE(sk)) { get_page(page); } else if (off + copy < PAGE_SIZE) { get_page(page); - sk->sk_sndmsg_page = page; + TCP_PAGE(sk) = page; } } - sk->sk_sndmsg_off = off + copy; + TCP_OFF(sk) = off + copy; } if (!copied) @@ -2656,8 +2653,7 @@ int compat_tcp_getsockopt(struct sock *sk, int level, int optname, EXPORT_SYMBOL(compat_tcp_getsockopt); #endif -struct sk_buff *tcp_tso_segment(struct sk_buff *skb, - netdev_features_t features) +struct sk_buff *tcp_tso_segment(struct sk_buff *skb, u32 features) { struct sk_buff *segs = ERR_PTR(-EINVAL); struct tcphdr *th; @@ -3276,9 +3272,14 @@ void __init tcp_init(void) sysctl_tcp_max_orphans = cnt / 2; sysctl_max_syn_backlog = max(128, cnt / 256); + limit = nr_free_buffer_pages() / 8; + limit = max(limit, 128UL); + sysctl_tcp_mem[0] = limit / 4 * 3; + sysctl_tcp_mem[1] = limit; + sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2; + /* Set per-socket limits to no more than 1/128 the pressure threshold */ - limit = ((unsigned long)init_net.ipv4.sysctl_tcp_mem[1]) - << (PAGE_SHIFT - 7); + limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7); max_share = min(4UL*1024*1024, limit); sysctl_tcp_wmem[0] = SK_MEM_QUANTUM; diff --git a/trunk/net/ipv4/tcp_cong.c b/trunk/net/ipv4/tcp_cong.c index fc6d475f488f..850c737e08e2 100644 --- a/trunk/net/ipv4/tcp_cong.c +++ b/trunk/net/ipv4/tcp_cong.c @@ -292,7 +292,7 @@ int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight) left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd && left * tp->mss_cache < sk->sk_gso_max_size) return 1; - return left <= tcp_max_tso_deferred_mss(tp); + return left <= tcp_max_burst(tp); } EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited); diff --git a/trunk/net/ipv4/tcp_diag.c b/trunk/net/ipv4/tcp_diag.c index 8cd357a8be79..939edb3b8e4d 100644 --- a/trunk/net/ipv4/tcp_diag.c +++ b/trunk/net/ipv4/tcp_diag.c @@ -34,23 +34,11 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r, tcp_get_info(sk, info); } -static void tcp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, - struct inet_diag_req *r, struct nlattr *bc) -{ - inet_diag_dump_icsk(&tcp_hashinfo, skb, cb, r, bc); -} - -static int tcp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh, - struct inet_diag_req *req) -{ - return inet_diag_dump_one_icsk(&tcp_hashinfo, in_skb, nlh, req); -} - static const struct inet_diag_handler tcp_diag_handler = { - .dump = tcp_diag_dump, - .dump_one = tcp_diag_dump_one, + .idiag_hashinfo = &tcp_hashinfo, .idiag_get_info = tcp_diag_get_info, - .idiag_type = IPPROTO_TCP, + .idiag_type = TCPDIAG_GETSOCK, + .idiag_info_size = sizeof(struct tcp_info), }; static int __init tcp_diag_init(void) @@ -66,4 +54,4 @@ static void __exit tcp_diag_exit(void) module_init(tcp_diag_init); module_exit(tcp_diag_exit); MODULE_LICENSE("GPL"); -MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-6 /* AF_INET - IPPROTO_TCP */); +MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_INET_DIAG, TCPDIAG_GETSOCK); diff --git a/trunk/net/ipv4/tcp_input.c b/trunk/net/ipv4/tcp_input.c index 2877c3e09587..52b5c2d0ecd0 100644 --- a/trunk/net/ipv4/tcp_input.c +++ b/trunk/net/ipv4/tcp_input.c @@ -322,7 +322,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb) /* Check #1 */ if (tp->rcv_ssthresh < tp->window_clamp && (int)tp->rcv_ssthresh < tcp_space(sk) && - !sk_under_memory_pressure(sk)) { + !tcp_memory_pressure) { int incr; /* Check #2. Increase window, if skb with such overhead @@ -411,8 +411,8 @@ static void tcp_clamp_window(struct sock *sk) if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] && !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) && - !sk_under_memory_pressure(sk) && - sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) { + !tcp_memory_pressure && + atomic_long_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) { sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc), sysctl_tcp_rmem[2]); } @@ -865,13 +865,13 @@ static void tcp_disable_fack(struct tcp_sock *tp) /* RFC3517 uses different metric in lost marker => reset on change */ if (tcp_is_fack(tp)) tp->lost_skb_hint = NULL; - tp->rx_opt.sack_ok &= ~TCP_FACK_ENABLED; + tp->rx_opt.sack_ok &= ~2; } /* Take a notice that peer is sending D-SACKs */ static void tcp_dsack_seen(struct tcp_sock *tp) { - tp->rx_opt.sack_ok |= TCP_DSACK_SEEN; + tp->rx_opt.sack_ok |= 4; } /* Initialize metrics on socket. */ @@ -2663,7 +2663,7 @@ static void DBGUNDO(struct sock *sk, const char *msg) tp->snd_ssthresh, tp->prior_ssthresh, tp->packets_out); } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) else if (sk->sk_family == AF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", @@ -2858,7 +2858,7 @@ static void tcp_try_keep_open(struct sock *sk) struct tcp_sock *tp = tcp_sk(sk); int state = TCP_CA_Open; - if (tcp_left_out(tp) || tcp_any_retrans_done(sk)) + if (tcp_left_out(tp) || tcp_any_retrans_done(sk) || tp->undo_marker) state = TCP_CA_Disorder; if (inet_csk(sk)->icsk_ca_state != state) { @@ -2881,8 +2881,7 @@ static void tcp_try_to_open(struct sock *sk, int flag) if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) { tcp_try_keep_open(sk); - if (inet_csk(sk)->icsk_ca_state != TCP_CA_Open) - tcp_moderate_cwnd(tp); + tcp_moderate_cwnd(tp); } else { tcp_cwnd_down(sk, flag); } @@ -3010,11 +3009,11 @@ static void tcp_update_cwnd_in_recovery(struct sock *sk, int newly_acked_sacked, * tcp_xmit_retransmit_queue(). */ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, - int newly_acked_sacked, bool is_dupack, - int flag) + int newly_acked_sacked, int flag) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); + int is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && (tcp_fackets_out(tp) > tp->reordering)); int fast_rexmit = 0, mib_idx; @@ -3067,6 +3066,17 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, } break; + case TCP_CA_Disorder: + tcp_try_undo_dsack(sk); + if (!tp->undo_marker || + /* For SACK case do not Open to allow to undo + * catching for all duplicate ACKs. */ + tcp_is_reno(tp) || tp->snd_una != tp->high_seq) { + tp->undo_marker = 0; + tcp_set_ca_state(sk, TCP_CA_Open); + } + break; + case TCP_CA_Recovery: if (tcp_is_reno(tp)) tcp_reset_reno_sack(tp); @@ -3107,7 +3117,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, tcp_add_reno_sack(sk); } - if (icsk->icsk_ca_state <= TCP_CA_Disorder) + if (icsk->icsk_ca_state == TCP_CA_Disorder) tcp_try_undo_dsack(sk); if (!tcp_time_to_recover(sk)) { @@ -3671,12 +3681,10 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) u32 prior_snd_una = tp->snd_una; u32 ack_seq = TCP_SKB_CB(skb)->seq; u32 ack = TCP_SKB_CB(skb)->ack_seq; - bool is_dupack = false; u32 prior_in_flight; u32 prior_fackets; int prior_packets; int prior_sacked = tp->sacked_out; - int pkts_acked = 0; int newly_acked_sacked = 0; int frto_cwnd = 0; @@ -3749,7 +3757,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) /* See if we can take anything off of the retransmit queue. */ flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una); - pkts_acked = prior_packets - tp->packets_out; newly_acked_sacked = (prior_packets - prior_sacked) - (tp->packets_out - tp->sacked_out); @@ -3764,9 +3771,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) if ((flag & FLAG_DATA_ACKED) && !frto_cwnd && tcp_may_raise_cwnd(sk, flag)) tcp_cong_avoid(sk, ack, prior_in_flight); - is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); - tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, - is_dupack, flag); + tcp_fastretrans_alert(sk, prior_packets - tp->packets_out, + newly_acked_sacked, flag); } else { if ((flag & FLAG_DATA_ACKED) && !frto_cwnd) tcp_cong_avoid(sk, ack, prior_in_flight); @@ -3778,10 +3784,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) return 1; no_queue: - /* If data was DSACKed, see if we can undo a cwnd reduction. */ - if (flag & FLAG_DSACKING_ACK) - tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, - is_dupack, flag); /* If this ack opens up a zero window, clear backoff. It was * being used to time the probes, and is probably far higher than * it needs to be for normal retransmission. @@ -3795,14 +3797,10 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) return -1; old_ack: - /* If data was SACKed, tag it and see if we should send more data. - * If data was DSACKed, see if we can undo a cwnd reduction. - */ if (TCP_SKB_CB(skb)->sacked) { - flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); - newly_acked_sacked = tp->sacked_out - prior_sacked; - tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, - is_dupack, flag); + tcp_sacktag_write_queue(sk, skb, prior_snd_una); + if (icsk->icsk_ca_state == TCP_CA_Open) + tcp_try_keep_open(sk); } SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt); @@ -3878,7 +3876,7 @@ void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *o case TCPOPT_SACK_PERM: if (opsize == TCPOLEN_SACK_PERM && th->syn && !estab && sysctl_tcp_sack) { - opt_rx->sack_ok = TCP_SACK_SEEN; + opt_rx->sack_ok = 1; tcp_sack_reset(opt_rx); } break; @@ -4866,7 +4864,7 @@ static int tcp_prune_queue(struct sock *sk) if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) tcp_clamp_window(sk); - else if (sk_under_memory_pressure(sk)) + else if (tcp_memory_pressure) tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); tcp_collapse_ofo_queue(sk); @@ -4932,11 +4930,11 @@ static int tcp_should_expand_sndbuf(const struct sock *sk) return 0; /* If we are under global TCP memory pressure, do not expand. */ - if (sk_under_memory_pressure(sk)) + if (tcp_memory_pressure) return 0; /* If we are under soft global TCP memory pressure, do not expand. */ - if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0)) + if (atomic_long_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0]) return 0; /* If we filled the congestion window, do not expand. */ @@ -5811,8 +5809,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, goto discard; if (th->syn) { - if (th->fin) - goto discard; if (icsk->icsk_af_ops->conn_request(sk, skb) < 0) return 1; diff --git a/trunk/net/ipv4/tcp_ipv4.c b/trunk/net/ipv4/tcp_ipv4.c index 1eb4ad57670e..a9db4b1a2215 100644 --- a/trunk/net/ipv4/tcp_ipv4.c +++ b/trunk/net/ipv4/tcp_ipv4.c @@ -73,7 +73,6 @@ #include #include #include -#include #include #include @@ -1512,7 +1511,6 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, return NULL; put_and_exit: tcp_clear_xmit_timers(newsk); - tcp_cleanup_congestion_control(newsk); bh_unlock_sock(newsk); sock_put(newsk); goto exit; @@ -1918,8 +1916,7 @@ static int tcp_v4_init_sock(struct sock *sk) sk->sk_rcvbuf = sysctl_tcp_rmem[1]; local_bh_disable(); - sock_update_memcg(sk); - sk_sockets_allocated_inc(sk); + percpu_counter_inc(&tcp_sockets_allocated); local_bh_enable(); return 0; @@ -1975,8 +1972,7 @@ void tcp_v4_destroy_sock(struct sock *sk) tp->cookie_values = NULL; } - sk_sockets_allocated_dec(sk); - sock_release_memcg(sk); + percpu_counter_dec(&tcp_sockets_allocated); } EXPORT_SYMBOL(tcp_v4_destroy_sock); @@ -2623,6 +2619,7 @@ struct proto tcp_prot = { .orphan_count = &tcp_orphan_count, .memory_allocated = &tcp_memory_allocated, .memory_pressure = &tcp_memory_pressure, + .sysctl_mem = sysctl_tcp_mem, .sysctl_wmem = sysctl_tcp_wmem, .sysctl_rmem = sysctl_tcp_rmem, .max_header = MAX_TCP_HEADER, @@ -2636,14 +2633,10 @@ struct proto tcp_prot = { .compat_setsockopt = compat_tcp_setsockopt, .compat_getsockopt = compat_tcp_getsockopt, #endif -#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM - .init_cgroup = tcp_init_cgroup, - .destroy_cgroup = tcp_destroy_cgroup, - .proto_cgroup = tcp_proto_cgroup, -#endif }; EXPORT_SYMBOL(tcp_prot); + static int __net_init tcp_sk_init(struct net *net) { return inet_ctl_sock_create(&net->ipv4.tcp_sock, diff --git a/trunk/net/ipv4/tcp_memcontrol.c b/trunk/net/ipv4/tcp_memcontrol.c deleted file mode 100644 index 7fed04f875c1..000000000000 --- a/trunk/net/ipv4/tcp_memcontrol.c +++ /dev/null @@ -1,272 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include - -static u64 tcp_cgroup_read(struct cgroup *cont, struct cftype *cft); -static int tcp_cgroup_write(struct cgroup *cont, struct cftype *cft, - const char *buffer); -static int tcp_cgroup_reset(struct cgroup *cont, unsigned int event); - -static struct cftype tcp_files[] = { - { - .name = "kmem.tcp.limit_in_bytes", - .write_string = tcp_cgroup_write, - .read_u64 = tcp_cgroup_read, - .private = RES_LIMIT, - }, - { - .name = "kmem.tcp.usage_in_bytes", - .read_u64 = tcp_cgroup_read, - .private = RES_USAGE, - }, - { - .name = "kmem.tcp.failcnt", - .private = RES_FAILCNT, - .trigger = tcp_cgroup_reset, - .read_u64 = tcp_cgroup_read, - }, - { - .name = "kmem.tcp.max_usage_in_bytes", - .private = RES_MAX_USAGE, - .trigger = tcp_cgroup_reset, - .read_u64 = tcp_cgroup_read, - }, -}; - -static inline struct tcp_memcontrol *tcp_from_cgproto(struct cg_proto *cg_proto) -{ - return container_of(cg_proto, struct tcp_memcontrol, cg_proto); -} - -static void memcg_tcp_enter_memory_pressure(struct sock *sk) -{ - if (sk->sk_cgrp->memory_pressure) - *sk->sk_cgrp->memory_pressure = 1; -} -EXPORT_SYMBOL(memcg_tcp_enter_memory_pressure); - -int tcp_init_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss) -{ - /* - * The root cgroup does not use res_counters, but rather, - * rely on the data already collected by the network - * subsystem - */ - struct res_counter *res_parent = NULL; - struct cg_proto *cg_proto, *parent_cg; - struct tcp_memcontrol *tcp; - struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); - struct mem_cgroup *parent = parent_mem_cgroup(memcg); - struct net *net = current->nsproxy->net_ns; - - cg_proto = tcp_prot.proto_cgroup(memcg); - if (!cg_proto) - goto create_files; - - tcp = tcp_from_cgproto(cg_proto); - - tcp->tcp_prot_mem[0] = net->ipv4.sysctl_tcp_mem[0]; - tcp->tcp_prot_mem[1] = net->ipv4.sysctl_tcp_mem[1]; - tcp->tcp_prot_mem[2] = net->ipv4.sysctl_tcp_mem[2]; - tcp->tcp_memory_pressure = 0; - - parent_cg = tcp_prot.proto_cgroup(parent); - if (parent_cg) - res_parent = parent_cg->memory_allocated; - - res_counter_init(&tcp->tcp_memory_allocated, res_parent); - percpu_counter_init(&tcp->tcp_sockets_allocated, 0); - - cg_proto->enter_memory_pressure = memcg_tcp_enter_memory_pressure; - cg_proto->memory_pressure = &tcp->tcp_memory_pressure; - cg_proto->sysctl_mem = tcp->tcp_prot_mem; - cg_proto->memory_allocated = &tcp->tcp_memory_allocated; - cg_proto->sockets_allocated = &tcp->tcp_sockets_allocated; - cg_proto->memcg = memcg; - -create_files: - return cgroup_add_files(cgrp, ss, tcp_files, - ARRAY_SIZE(tcp_files)); -} -EXPORT_SYMBOL(tcp_init_cgroup); - -void tcp_destroy_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss) -{ - struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); - struct cg_proto *cg_proto; - struct tcp_memcontrol *tcp; - u64 val; - - cg_proto = tcp_prot.proto_cgroup(memcg); - if (!cg_proto) - return; - - tcp = tcp_from_cgproto(cg_proto); - percpu_counter_destroy(&tcp->tcp_sockets_allocated); - - val = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_USAGE); - - if (val != RESOURCE_MAX) - jump_label_dec(&memcg_socket_limit_enabled); -} -EXPORT_SYMBOL(tcp_destroy_cgroup); - -static int tcp_update_limit(struct mem_cgroup *memcg, u64 val) -{ - struct net *net = current->nsproxy->net_ns; - struct tcp_memcontrol *tcp; - struct cg_proto *cg_proto; - u64 old_lim; - int i; - int ret; - - cg_proto = tcp_prot.proto_cgroup(memcg); - if (!cg_proto) - return -EINVAL; - - if (val > RESOURCE_MAX) - val = RESOURCE_MAX; - - tcp = tcp_from_cgproto(cg_proto); - - old_lim = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT); - ret = res_counter_set_limit(&tcp->tcp_memory_allocated, val); - if (ret) - return ret; - - for (i = 0; i < 3; i++) - tcp->tcp_prot_mem[i] = min_t(long, val >> PAGE_SHIFT, - net->ipv4.sysctl_tcp_mem[i]); - - if (val == RESOURCE_MAX && old_lim != RESOURCE_MAX) - jump_label_dec(&memcg_socket_limit_enabled); - else if (old_lim == RESOURCE_MAX && val != RESOURCE_MAX) - jump_label_inc(&memcg_socket_limit_enabled); - - return 0; -} - -static int tcp_cgroup_write(struct cgroup *cont, struct cftype *cft, - const char *buffer) -{ - struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); - unsigned long long val; - int ret = 0; - - switch (cft->private) { - case RES_LIMIT: - /* see memcontrol.c */ - ret = res_counter_memparse_write_strategy(buffer, &val); - if (ret) - break; - ret = tcp_update_limit(memcg, val); - break; - default: - ret = -EINVAL; - break; - } - return ret; -} - -static u64 tcp_read_stat(struct mem_cgroup *memcg, int type, u64 default_val) -{ - struct tcp_memcontrol *tcp; - struct cg_proto *cg_proto; - - cg_proto = tcp_prot.proto_cgroup(memcg); - if (!cg_proto) - return default_val; - - tcp = tcp_from_cgproto(cg_proto); - return res_counter_read_u64(&tcp->tcp_memory_allocated, type); -} - -static u64 tcp_read_usage(struct mem_cgroup *memcg) -{ - struct tcp_memcontrol *tcp; - struct cg_proto *cg_proto; - - cg_proto = tcp_prot.proto_cgroup(memcg); - if (!cg_proto) - return atomic_long_read(&tcp_memory_allocated) << PAGE_SHIFT; - - tcp = tcp_from_cgproto(cg_proto); - return res_counter_read_u64(&tcp->tcp_memory_allocated, RES_USAGE); -} - -static u64 tcp_cgroup_read(struct cgroup *cont, struct cftype *cft) -{ - struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); - u64 val; - - switch (cft->private) { - case RES_LIMIT: - val = tcp_read_stat(memcg, RES_LIMIT, RESOURCE_MAX); - break; - case RES_USAGE: - val = tcp_read_usage(memcg); - break; - case RES_FAILCNT: - case RES_MAX_USAGE: - val = tcp_read_stat(memcg, cft->private, 0); - break; - default: - BUG(); - } - return val; -} - -static int tcp_cgroup_reset(struct cgroup *cont, unsigned int event) -{ - struct mem_cgroup *memcg; - struct tcp_memcontrol *tcp; - struct cg_proto *cg_proto; - - memcg = mem_cgroup_from_cont(cont); - cg_proto = tcp_prot.proto_cgroup(memcg); - if (!cg_proto) - return 0; - tcp = tcp_from_cgproto(cg_proto); - - switch (event) { - case RES_MAX_USAGE: - res_counter_reset_max(&tcp->tcp_memory_allocated); - break; - case RES_FAILCNT: - res_counter_reset_failcnt(&tcp->tcp_memory_allocated); - break; - } - - return 0; -} - -unsigned long long tcp_max_memory(const struct mem_cgroup *memcg) -{ - struct tcp_memcontrol *tcp; - struct cg_proto *cg_proto; - - cg_proto = tcp_prot.proto_cgroup((struct mem_cgroup *)memcg); - if (!cg_proto) - return 0; - - tcp = tcp_from_cgproto(cg_proto); - return res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT); -} - -void tcp_prot_mem(struct mem_cgroup *memcg, long val, int idx) -{ - struct tcp_memcontrol *tcp; - struct cg_proto *cg_proto; - - cg_proto = tcp_prot.proto_cgroup(memcg); - if (!cg_proto) - return; - - tcp = tcp_from_cgproto(cg_proto); - - tcp->tcp_prot_mem[idx] = val; -} diff --git a/trunk/net/ipv4/tcp_minisocks.c b/trunk/net/ipv4/tcp_minisocks.c index 550e755747e0..66363b689ad6 100644 --- a/trunk/net/ipv4/tcp_minisocks.c +++ b/trunk/net/ipv4/tcp_minisocks.c @@ -336,15 +336,15 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) tcptw->tw_ts_recent = tp->rx_opt.ts_recent; tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) if (tw->tw_family == PF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); struct inet6_timewait_sock *tw6; tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot); tw6 = inet6_twsk((struct sock *)tw); - tw6->tw_v6_daddr = np->daddr; - tw6->tw_v6_rcv_saddr = np->rcv_saddr; + ipv6_addr_copy(&tw6->tw_v6_daddr, &np->daddr); + ipv6_addr_copy(&tw6->tw_v6_rcv_saddr, &np->rcv_saddr); tw->tw_tclass = np->tclass; tw->tw_ipv6only = np->ipv6only; } @@ -425,7 +425,7 @@ static inline void TCP_ECN_openreq_child(struct tcp_sock *tp, */ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb) { - struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC); + struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC); if (newsk != NULL) { const struct inet_request_sock *ireq = inet_rsk(req); @@ -495,9 +495,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, newtp->frto_counter = 0; newtp->frto_highmark = 0; - if (newicsk->icsk_ca_ops != &tcp_init_congestion_ops && - !try_module_get(newicsk->icsk_ca_ops->owner)) - newicsk->icsk_ca_ops = &tcp_init_congestion_ops; + newicsk->icsk_ca_ops = &tcp_init_congestion_ops; tcp_set_ca_state(newsk, TCP_CA_Open); tcp_init_xmit_timers(newsk); diff --git a/trunk/net/ipv4/tcp_output.c b/trunk/net/ipv4/tcp_output.c index 8c8de2780c7a..63170e297540 100644 --- a/trunk/net/ipv4/tcp_output.c +++ b/trunk/net/ipv4/tcp_output.c @@ -1093,13 +1093,6 @@ static void __pskb_trim_head(struct sk_buff *skb, int len) { int i, k, eat; - eat = min_t(int, len, skb_headlen(skb)); - if (eat) { - __skb_pull(skb, eat); - len -= eat; - if (!len) - return; - } eat = len; k = 0; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { @@ -1131,7 +1124,11 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) return -ENOMEM; - __pskb_trim_head(skb, len); + /* If len == headlen, we avoid __skb_pull to preserve alignment. */ + if (unlikely(len < skb_headlen(skb))) + __skb_pull(skb, len); + else + __pskb_trim_head(skb, len - skb_headlen(skb)); TCP_SKB_CB(skb)->seq += len; skb->ip_summed = CHECKSUM_PARTIAL; @@ -1584,7 +1581,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) * frame, so if we have space for more than 3 frames * then send now. */ - if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache) + if (limit > tcp_max_burst(tp) * tp->mss_cache) goto send_now; } @@ -1922,7 +1919,7 @@ u32 __tcp_select_window(struct sock *sk) if (free_space < (full_space >> 1)) { icsk->icsk_ack.quick = 0; - if (sk_under_memory_pressure(sk)) + if (tcp_memory_pressure) tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); @@ -2150,15 +2147,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) */ TCP_SKB_CB(skb)->when = tcp_time_stamp; - /* make sure skb->data is aligned on arches that require it */ - if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) { - struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER, - GFP_ATOMIC); - err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : - -ENOBUFS; - } else { - err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); - } + err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); if (err == 0) { /* Update global TCP statistics. */ diff --git a/trunk/net/ipv4/tcp_timer.c b/trunk/net/ipv4/tcp_timer.c index a516d1e399df..2e0f0af76c19 100644 --- a/trunk/net/ipv4/tcp_timer.c +++ b/trunk/net/ipv4/tcp_timer.c @@ -171,13 +171,13 @@ static int tcp_write_timeout(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); int retry_until; - bool do_reset, syn_set = false; + bool do_reset, syn_set = 0; if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { if (icsk->icsk_retransmits) dst_negative_advice(sk); retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; - syn_set = true; + syn_set = 1; } else { if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) { /* Black hole detection */ @@ -261,7 +261,7 @@ static void tcp_delack_timer(unsigned long data) } out: - if (sk_under_memory_pressure(sk)) + if (tcp_memory_pressure) sk_mem_reclaim(sk); out_unlock: bh_unlock_sock(sk); @@ -340,7 +340,7 @@ void tcp_retransmit_timer(struct sock *sk) &inet->inet_daddr, ntohs(inet->inet_dport), inet->inet_num, tp->snd_una, tp->snd_nxt); } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) else if (sk->sk_family == AF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); LIMIT_NETDEBUG(KERN_DEBUG "TCP: Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n", diff --git a/trunk/net/ipv4/tunnel4.c b/trunk/net/ipv4/tunnel4.c index 01775983b997..ac3b3ee4b07c 100644 --- a/trunk/net/ipv4/tunnel4.c +++ b/trunk/net/ipv4/tunnel4.c @@ -105,7 +105,7 @@ static int tunnel4_rcv(struct sk_buff *skb) return 0; } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) static int tunnel64_rcv(struct sk_buff *skb) { struct xfrm_tunnel *handler; @@ -134,7 +134,7 @@ static void tunnel4_err(struct sk_buff *skb, u32 info) break; } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) static void tunnel64_err(struct sk_buff *skb, u32 info) { struct xfrm_tunnel *handler; @@ -152,7 +152,7 @@ static const struct net_protocol tunnel4_protocol = { .netns_ok = 1, }; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) static const struct net_protocol tunnel64_protocol = { .handler = tunnel64_rcv, .err_handler = tunnel64_err, @@ -167,7 +167,7 @@ static int __init tunnel4_init(void) printk(KERN_ERR "tunnel4 init: can't add protocol\n"); return -EAGAIN; } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) if (inet_add_protocol(&tunnel64_protocol, IPPROTO_IPV6)) { printk(KERN_ERR "tunnel64 init: can't add protocol\n"); inet_del_protocol(&tunnel4_protocol, IPPROTO_IPIP); @@ -179,7 +179,7 @@ static int __init tunnel4_init(void) static void __exit tunnel4_fini(void) { -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) if (inet_del_protocol(&tunnel64_protocol, IPPROTO_IPV6)) printk(KERN_ERR "tunnel64 close: can't remove protocol\n"); #endif diff --git a/trunk/net/ipv4/udp.c b/trunk/net/ipv4/udp.c index 5d075b5f70fc..5a65eeac1d29 100644 --- a/trunk/net/ipv4/udp.c +++ b/trunk/net/ipv4/udp.c @@ -445,7 +445,7 @@ static struct sock *udp4_lib_lookup2(struct net *net, /* UDP is nearly always wildcards out the wazoo, it makes no sense to try * harder than this. -DaveM */ -struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, +static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, __be32 daddr, __be16 dport, int dif, struct udp_table *udptable) { @@ -512,7 +512,6 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, rcu_read_unlock(); return result; } -EXPORT_SYMBOL_GPL(__udp4_lib_lookup); static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb, __be16 sport, __be16 dport, @@ -1359,7 +1358,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) if (inet_sk(sk)->inet_daddr) sock_rps_save_rxhash(sk, skb); - rc = sock_queue_rcv_skb(sk, skb); + rc = ip_queue_rcv_skb(sk, skb); if (rc < 0) { int is_udplite = IS_UDPLITE(sk); @@ -1475,7 +1474,6 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) rc = 0; - ipv4_pktinfo_prepare(skb); bh_lock_sock(sk); if (!sock_owned_by_user(sk)) rc = __udp_queue_rcv_skb(sk, skb); @@ -2249,8 +2247,7 @@ int udp4_ufo_send_check(struct sk_buff *skb) return 0; } -struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, - netdev_features_t features) +struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, u32 features) { struct sk_buff *segs = ERR_PTR(-EINVAL); unsigned int mss; diff --git a/trunk/net/ipv4/udp_diag.c b/trunk/net/ipv4/udp_diag.c deleted file mode 100644 index 69f8a7ca63dd..000000000000 --- a/trunk/net/ipv4/udp_diag.c +++ /dev/null @@ -1,201 +0,0 @@ -/* - * udp_diag.c Module for monitoring UDP transport protocols sockets. - * - * Authors: Pavel Emelyanov, - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - - -#include -#include -#include -#include -#include -#include -#include - -static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, - struct netlink_callback *cb, struct inet_diag_req *req, - struct nlattr *bc) -{ - if (!inet_diag_bc_sk(bc, sk)) - return 0; - - return inet_sk_diag_fill(sk, NULL, skb, req, NETLINK_CB(cb->skb).pid, - cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); -} - -static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb, - const struct nlmsghdr *nlh, struct inet_diag_req *req) -{ - int err = -EINVAL; - struct sock *sk; - struct sk_buff *rep; - - if (req->sdiag_family == AF_INET) - sk = __udp4_lib_lookup(&init_net, - req->id.idiag_src[0], req->id.idiag_sport, - req->id.idiag_dst[0], req->id.idiag_dport, - req->id.idiag_if, tbl); -#if IS_ENABLED(CONFIG_IPV6) - else if (req->sdiag_family == AF_INET6) - sk = __udp6_lib_lookup(&init_net, - (struct in6_addr *)req->id.idiag_src, - req->id.idiag_sport, - (struct in6_addr *)req->id.idiag_dst, - req->id.idiag_dport, - req->id.idiag_if, tbl); -#endif - else - goto out_nosk; - - err = -ENOENT; - if (sk == NULL) - goto out_nosk; - - err = sock_diag_check_cookie(sk, req->id.idiag_cookie); - if (err) - goto out; - - err = -ENOMEM; - rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) + - sizeof(struct inet_diag_meminfo) + - 64)), GFP_KERNEL); - if (!rep) - goto out; - - err = inet_sk_diag_fill(sk, NULL, rep, req, - NETLINK_CB(in_skb).pid, - nlh->nlmsg_seq, 0, nlh); - if (err < 0) { - WARN_ON(err == -EMSGSIZE); - kfree_skb(rep); - goto out; - } - err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid, - MSG_DONTWAIT); - if (err > 0) - err = 0; -out: - if (sk) - sock_put(sk); -out_nosk: - return err; -} - -static void udp_dump(struct udp_table *table, struct sk_buff *skb, struct netlink_callback *cb, - struct inet_diag_req *r, struct nlattr *bc) -{ - int num, s_num, slot, s_slot; - - s_slot = cb->args[0]; - num = s_num = cb->args[1]; - - for (slot = s_slot; slot <= table->mask; num = s_num = 0, slot++) { - struct sock *sk; - struct hlist_nulls_node *node; - struct udp_hslot *hslot = &table->hash[slot]; - - if (hlist_nulls_empty(&hslot->head)) - continue; - - spin_lock_bh(&hslot->lock); - sk_nulls_for_each(sk, node, &hslot->head) { - struct inet_sock *inet = inet_sk(sk); - - if (num < s_num) - goto next; - if (!(r->idiag_states & (1 << sk->sk_state))) - goto next; - if (r->sdiag_family != AF_UNSPEC && - sk->sk_family != r->sdiag_family) - goto next; - if (r->id.idiag_sport != inet->inet_sport && - r->id.idiag_sport) - goto next; - if (r->id.idiag_dport != inet->inet_dport && - r->id.idiag_dport) - goto next; - - if (sk_diag_dump(sk, skb, cb, r, bc) < 0) { - spin_unlock_bh(&hslot->lock); - goto done; - } -next: - num++; - } - spin_unlock_bh(&hslot->lock); - } -done: - cb->args[0] = slot; - cb->args[1] = num; -} - -static void udp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, - struct inet_diag_req *r, struct nlattr *bc) -{ - udp_dump(&udp_table, skb, cb, r, bc); -} - -static int udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh, - struct inet_diag_req *req) -{ - return udp_dump_one(&udp_table, in_skb, nlh, req); -} - -static const struct inet_diag_handler udp_diag_handler = { - .dump = udp_diag_dump, - .dump_one = udp_diag_dump_one, - .idiag_type = IPPROTO_UDP, -}; - -static void udplite_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, - struct inet_diag_req *r, struct nlattr *bc) -{ - udp_dump(&udplite_table, skb, cb, r, bc); -} - -static int udplite_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh, - struct inet_diag_req *req) -{ - return udp_dump_one(&udplite_table, in_skb, nlh, req); -} - -static const struct inet_diag_handler udplite_diag_handler = { - .dump = udplite_diag_dump, - .dump_one = udplite_diag_dump_one, - .idiag_type = IPPROTO_UDPLITE, -}; - -static int __init udp_diag_init(void) -{ - int err; - - err = inet_diag_register(&udp_diag_handler); - if (err) - goto out; - err = inet_diag_register(&udplite_diag_handler); - if (err) - goto out_lite; -out: - return err; -out_lite: - inet_diag_unregister(&udp_diag_handler); - goto out; -} - -static void __exit udp_diag_exit(void) -{ - inet_diag_unregister(&udplite_diag_handler); - inet_diag_unregister(&udp_diag_handler); -} - -module_init(udp_diag_init); -module_exit(udp_diag_exit); -MODULE_LICENSE("GPL"); -MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-17 /* AF_INET - IPPROTO_UDP */); -MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-136 /* AF_INET - IPPROTO_UDPLITE */); diff --git a/trunk/net/ipv4/xfrm4_tunnel.c b/trunk/net/ipv4/xfrm4_tunnel.c index 9247d9d70e9d..82806455e859 100644 --- a/trunk/net/ipv4/xfrm4_tunnel.c +++ b/trunk/net/ipv4/xfrm4_tunnel.c @@ -64,7 +64,7 @@ static struct xfrm_tunnel xfrm_tunnel_handler __read_mostly = { .priority = 2, }; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) static struct xfrm_tunnel xfrm64_tunnel_handler __read_mostly = { .handler = xfrm_tunnel_rcv, .err_handler = xfrm_tunnel_err, @@ -84,7 +84,7 @@ static int __init ipip_init(void) xfrm_unregister_type(&ipip_type, AF_INET); return -EAGAIN; } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) if (xfrm4_tunnel_register(&xfrm64_tunnel_handler, AF_INET6)) { printk(KERN_INFO "ipip init: can't add xfrm handler for AF_INET6\n"); xfrm4_tunnel_deregister(&xfrm_tunnel_handler, AF_INET); @@ -97,7 +97,7 @@ static int __init ipip_init(void) static void __exit ipip_fini(void) { -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) if (xfrm4_tunnel_deregister(&xfrm64_tunnel_handler, AF_INET6)) printk(KERN_INFO "ipip close: can't remove xfrm handler for AF_INET6\n"); #endif diff --git a/trunk/net/ipv6/addrconf.c b/trunk/net/ipv6/addrconf.c index 0ba0866230c9..36806def8cfd 100644 --- a/trunk/net/ipv6/addrconf.c +++ b/trunk/net/ipv6/addrconf.c @@ -630,13 +630,13 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen, goto out; } - rt = addrconf_dst_alloc(idev, addr, false); + rt = addrconf_dst_alloc(idev, addr, 0); if (IS_ERR(rt)) { err = PTR_ERR(rt); goto out; } - ifa->addr = *addr; + ipv6_addr_copy(&ifa->addr, addr); spin_lock_init(&ifa->lock); spin_lock_init(&ifa->state_lock); @@ -650,6 +650,16 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen, ifa->rt = rt; + /* + * part one of RFC 4429, section 3.3 + * We should not configure an address as + * optimistic if we do not yet know the link + * layer address of our nexhop router + */ + + if (dst_get_neighbour_raw(&rt->dst) == NULL) + ifa->flags &= ~IFA_F_OPTIMISTIC; + ifa->idev = idev; in6_dev_hold(idev); /* For caller */ @@ -797,7 +807,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp) ip6_del_rt(rt); rt = NULL; } else if (!(rt->rt6i_flags & RTF_EXPIRES)) { - rt->dst.expires = expires; + rt->rt6i_expires = expires; rt->rt6i_flags |= RTF_EXPIRES; } } @@ -1218,7 +1228,7 @@ int ipv6_dev_get_saddr(struct net *net, struct net_device *dst_dev, if (!hiscore->ifa) return -EADDRNOTAVAIL; - *saddr = hiscore->ifa->addr; + ipv6_addr_copy(saddr, &hiscore->ifa->addr); in6_ifa_put(hiscore->ifa); return 0; } @@ -1239,7 +1249,7 @@ int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr, list_for_each_entry(ifp, &idev->addr_list, if_list) { if (ifp->scope == IFA_LINK && !(ifp->flags & banned_flags)) { - *addr = ifp->addr; + ipv6_addr_copy(addr, &ifp->addr); err = 0; break; } @@ -1690,7 +1700,7 @@ addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev, .fc_protocol = RTPROT_KERNEL, }; - cfg.fc_dst = *pfx; + ipv6_addr_copy(&cfg.fc_dst, pfx); /* Prevent useless cloning on PtP SIT. This thing is done here expecting that the whole @@ -1723,7 +1733,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx, if (!fn) goto out; for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) { - if (rt->dst.dev->ifindex != dev->ifindex) + if (rt->rt6i_dev->ifindex != dev->ifindex) continue; if ((rt->rt6i_flags & flags) != flags) continue; @@ -1803,7 +1813,7 @@ static struct inet6_dev *addrconf_add_dev(struct net_device *dev) return idev; } -void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao) +void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len) { struct prefix_info *pinfo; __u32 valid_lft; @@ -1881,11 +1891,11 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao) rt = NULL; } else if (addrconf_finite_timeout(rt_expires)) { /* not infinity */ - rt->dst.expires = jiffies + rt_expires; + rt->rt6i_expires = jiffies + rt_expires; rt->rt6i_flags |= RTF_EXPIRES; } else { rt->rt6i_flags &= ~RTF_EXPIRES; - rt->dst.expires = 0; + rt->rt6i_expires = 0; } } else if (valid_lft) { clock_t expires = 0; @@ -1934,7 +1944,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao) #ifdef CONFIG_IPV6_OPTIMISTIC_DAD if (in6_dev->cnf.optimistic_dad && - !net->ipv6.devconf_all->forwarding && sllao) + !net->ipv6.devconf_all->forwarding) addr_flags = IFA_F_OPTIMISTIC; #endif @@ -3068,39 +3078,20 @@ static void addrconf_dad_run(struct inet6_dev *idev) struct if6_iter_state { struct seq_net_private p; int bucket; - int offset; }; -static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos) +static struct inet6_ifaddr *if6_get_first(struct seq_file *seq) { struct inet6_ifaddr *ifa = NULL; struct if6_iter_state *state = seq->private; struct net *net = seq_file_net(seq); - int p = 0; - /* initial bucket if pos is 0 */ - if (pos == 0) { - state->bucket = 0; - state->offset = 0; - } - - for (; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) { + for (state->bucket = 0; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) { struct hlist_node *n; hlist_for_each_entry_rcu_bh(ifa, n, &inet6_addr_lst[state->bucket], - addr_lst) { - /* sync with offset */ - if (p < state->offset) { - p++; - continue; - } - state->offset++; + addr_lst) if (net_eq(dev_net(ifa->idev->dev), net)) return ifa; - } - - /* prepare for next bucket */ - state->offset = 0; - p = 0; } return NULL; } @@ -3112,17 +3103,13 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq, struct net *net = seq_file_net(seq); struct hlist_node *n = &ifa->addr_lst; - hlist_for_each_entry_continue_rcu_bh(ifa, n, addr_lst) { - state->offset++; + hlist_for_each_entry_continue_rcu_bh(ifa, n, addr_lst) if (net_eq(dev_net(ifa->idev->dev), net)) return ifa; - } while (++state->bucket < IN6_ADDR_HSIZE) { - state->offset = 0; hlist_for_each_entry_rcu_bh(ifa, n, &inet6_addr_lst[state->bucket], addr_lst) { - state->offset++; if (net_eq(dev_net(ifa->idev->dev), net)) return ifa; } @@ -3131,11 +3118,21 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq, return NULL; } +static struct inet6_ifaddr *if6_get_idx(struct seq_file *seq, loff_t pos) +{ + struct inet6_ifaddr *ifa = if6_get_first(seq); + + if (ifa) + while (pos && (ifa = if6_get_next(seq, ifa)) != NULL) + --pos; + return pos ? NULL : ifa; +} + static void *if6_seq_start(struct seq_file *seq, loff_t *pos) __acquires(rcu_bh) { rcu_read_lock_bh(); - return if6_get_first(seq, *pos); + return if6_get_idx(seq, *pos); } static void *if6_seq_next(struct seq_file *seq, void *v, loff_t *pos) diff --git a/trunk/net/ipv6/af_inet6.c b/trunk/net/ipv6/af_inet6.c index 273f48d1df2e..d27c797f9f05 100644 --- a/trunk/net/ipv6/af_inet6.c +++ b/trunk/net/ipv6/af_inet6.c @@ -347,7 +347,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) */ v4addr = LOOPBACK4_IPV6; if (!(addr_type & IPV6_ADDR_MULTICAST)) { - if (!(inet->freebind || inet->transparent) && + if (!inet->transparent && !ipv6_chk_addr(net, &addr->sin6_addr, dev, 0)) { err = -EADDRNOTAVAIL; @@ -361,10 +361,10 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) inet->inet_rcv_saddr = v4addr; inet->inet_saddr = v4addr; - np->rcv_saddr = addr->sin6_addr; + ipv6_addr_copy(&np->rcv_saddr, &addr->sin6_addr); if (!(addr_type & IPV6_ADDR_MULTICAST)) - np->saddr = addr->sin6_addr; + ipv6_addr_copy(&np->saddr, &addr->sin6_addr); /* Make sure we are allowed to bind here. */ if (sk->sk_prot->get_port(sk, snum)) { @@ -458,14 +458,14 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr, peer == 1) return -ENOTCONN; sin->sin6_port = inet->inet_dport; - sin->sin6_addr = np->daddr; + ipv6_addr_copy(&sin->sin6_addr, &np->daddr); if (np->sndflow) sin->sin6_flowinfo = np->flow_label; } else { if (ipv6_addr_any(&np->rcv_saddr)) - sin->sin6_addr = np->saddr; + ipv6_addr_copy(&sin->sin6_addr, &np->saddr); else - sin->sin6_addr = np->rcv_saddr; + ipv6_addr_copy(&sin->sin6_addr, &np->rcv_saddr); sin->sin6_port = inet->inet_sport; } @@ -660,8 +660,8 @@ int inet6_sk_rebuild_header(struct sock *sk) memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = sk->sk_protocol; - fl6.daddr = np->daddr; - fl6.saddr = np->saddr; + ipv6_addr_copy(&fl6.daddr, &np->daddr); + ipv6_addr_copy(&fl6.saddr, &np->saddr); fl6.flowlabel = np->flow_label; fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.flowi6_mark = sk->sk_mark; @@ -769,8 +769,7 @@ static int ipv6_gso_send_check(struct sk_buff *skb) return err; } -static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, - netdev_features_t features) +static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, u32 features) { struct sk_buff *segs = ERR_PTR(-EINVAL); struct ipv6hdr *ipv6h; @@ -986,9 +985,9 @@ static int __net_init ipv6_init_mibs(struct net *net) sizeof(struct icmpv6_mib), __alignof__(struct icmpv6_mib)) < 0) goto err_icmp_mib; - net->mib.icmpv6msg_statistics = kzalloc(sizeof(struct icmpv6msg_mib), - GFP_KERNEL); - if (!net->mib.icmpv6msg_statistics) + if (snmp_mib_init((void __percpu **)net->mib.icmpv6msg_statistics, + sizeof(struct icmpv6msg_mib), + __alignof__(struct icmpv6msg_mib)) < 0) goto err_icmpmsg_mib; return 0; @@ -1009,7 +1008,7 @@ static void ipv6_cleanup_mibs(struct net *net) snmp_mib_free((void __percpu **)net->mib.udplite_stats_in6); snmp_mib_free((void __percpu **)net->mib.ipv6_statistics); snmp_mib_free((void __percpu **)net->mib.icmpv6_statistics); - kfree(net->mib.icmpv6msg_statistics); + snmp_mib_free((void __percpu **)net->mib.icmpv6msg_statistics); } static int __net_init inet6_net_init(struct net *net) @@ -1116,8 +1115,6 @@ static int __init inet6_init(void) if (err) goto static_sysctl_fail; #endif - tcpv6_prot.sysctl_mem = init_net.ipv4.sysctl_tcp_mem; - /* * ipngwg API draft makes clear that the correct semantics * for TCP and UDP is to consider one TCP and UDP instance diff --git a/trunk/net/ipv6/ah6.c b/trunk/net/ipv6/ah6.c index 2ae79dbeec2f..4c0f894d0843 100644 --- a/trunk/net/ipv6/ah6.c +++ b/trunk/net/ipv6/ah6.c @@ -193,9 +193,9 @@ static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *des printk(KERN_WARNING "destopt hao: invalid header length: %u\n", hao->length); goto bad; } - final_addr = hao->addr; - hao->addr = iph->saddr; - iph->saddr = final_addr; + ipv6_addr_copy(&final_addr, &hao->addr); + ipv6_addr_copy(&hao->addr, &iph->saddr); + ipv6_addr_copy(&iph->saddr, &final_addr); } break; } @@ -241,13 +241,13 @@ static void ipv6_rearrange_rthdr(struct ipv6hdr *iph, struct ipv6_rt_hdr *rthdr) segments = rthdr->hdrlen >> 1; addrs = ((struct rt0_hdr *)rthdr)->addr; - final_addr = addrs[segments - 1]; + ipv6_addr_copy(&final_addr, addrs + segments - 1); addrs += segments - segments_left; memmove(addrs + 1, addrs, (segments_left - 1) * sizeof(*addrs)); - addrs[0] = iph->daddr; - iph->daddr = final_addr; + ipv6_addr_copy(addrs, &iph->daddr); + ipv6_addr_copy(&iph->daddr, &final_addr); } static int ipv6_clear_mutable_options(struct ipv6hdr *iph, int len, int dir) diff --git a/trunk/net/ipv6/anycast.c b/trunk/net/ipv6/anycast.c index 59402b4637f9..674255f5e6b7 100644 --- a/trunk/net/ipv6/anycast.c +++ b/trunk/net/ipv6/anycast.c @@ -75,7 +75,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr) if (pac == NULL) return -ENOMEM; pac->acl_next = NULL; - pac->acl_addr = *addr; + ipv6_addr_copy(&pac->acl_addr, addr); rcu_read_lock(); if (ifindex == 0) { @@ -83,7 +83,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr) rt = rt6_lookup(net, addr, NULL, 0, 0); if (rt) { - dev = rt->dst.dev; + dev = rt->rt6i_dev; dst_release(&rt->dst); } else if (ishost) { err = -EADDRNOTAVAIL; @@ -289,14 +289,14 @@ int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr) goto out; } - rt = addrconf_dst_alloc(idev, addr, true); + rt = addrconf_dst_alloc(idev, addr, 1); if (IS_ERR(rt)) { kfree(aca); err = PTR_ERR(rt); goto out; } - aca->aca_addr = *addr; + ipv6_addr_copy(&aca->aca_addr, addr); aca->aca_idev = idev; aca->aca_rt = rt; aca->aca_users = 1; diff --git a/trunk/net/ipv6/datagram.c b/trunk/net/ipv6/datagram.c index ae08aee1773c..e2480691c220 100644 --- a/trunk/net/ipv6/datagram.c +++ b/trunk/net/ipv6/datagram.c @@ -71,7 +71,7 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); if (flowlabel == NULL) return -EINVAL; - usin->sin6_addr = flowlabel->dst; + ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst); } } @@ -143,7 +143,7 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) } } - np->daddr = *daddr; + ipv6_addr_copy(&np->daddr, daddr); np->flow_label = fl6.flowlabel; inet->inet_dport = usin->sin6_port; @@ -154,8 +154,8 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) */ fl6.flowi6_proto = sk->sk_protocol; - fl6.daddr = np->daddr; - fl6.saddr = np->saddr; + ipv6_addr_copy(&fl6.daddr, &np->daddr); + ipv6_addr_copy(&fl6.saddr, &np->saddr); fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.flowi6_mark = sk->sk_mark; fl6.fl6_dport = inet->inet_dport; @@ -179,10 +179,10 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) /* source address lookup done in ip6_dst_lookup */ if (ipv6_addr_any(&np->saddr)) - np->saddr = fl6.saddr; + ipv6_addr_copy(&np->saddr, &fl6.saddr); if (ipv6_addr_any(&np->rcv_saddr)) { - np->rcv_saddr = fl6.saddr; + ipv6_addr_copy(&np->rcv_saddr, &fl6.saddr); inet->inet_rcv_saddr = LOOPBACK4_IPV6; if (sk->sk_prot->rehash) sk->sk_prot->rehash(sk); @@ -257,7 +257,7 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info) skb_put(skb, sizeof(struct ipv6hdr)); skb_reset_network_header(skb); iph = ipv6_hdr(skb); - iph->daddr = fl6->daddr; + ipv6_addr_copy(&iph->daddr, &fl6->daddr); serr = SKB_EXT_ERR(skb); serr->ee.ee_errno = err; @@ -294,7 +294,7 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu) skb_put(skb, sizeof(struct ipv6hdr)); skb_reset_network_header(skb); iph = ipv6_hdr(skb); - iph->daddr = fl6->daddr; + ipv6_addr_copy(&iph->daddr, &fl6->daddr); mtu_info = IP6CBMTU(skb); @@ -303,7 +303,7 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu) mtu_info->ip6m_addr.sin6_port = 0; mtu_info->ip6m_addr.sin6_flowinfo = 0; mtu_info->ip6m_addr.sin6_scope_id = fl6->flowi6_oif; - mtu_info->ip6m_addr.sin6_addr = ipv6_hdr(skb)->daddr; + ipv6_addr_copy(&mtu_info->ip6m_addr.sin6_addr, &ipv6_hdr(skb)->daddr); __skb_pull(skb, skb_tail_pointer(skb) - skb->data); skb_reset_transport_header(skb); @@ -354,8 +354,8 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) sin->sin6_port = serr->port; sin->sin6_scope_id = 0; if (skb->protocol == htons(ETH_P_IPV6)) { - sin->sin6_addr = - *(struct in6_addr *)(nh + serr->addr_offset); + ipv6_addr_copy(&sin->sin6_addr, + (struct in6_addr *)(nh + serr->addr_offset)); if (np->sndflow) sin->sin6_flowinfo = (*(__be32 *)(nh + serr->addr_offset - 24) & @@ -376,7 +376,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) sin->sin6_flowinfo = 0; sin->sin6_scope_id = 0; if (skb->protocol == htons(ETH_P_IPV6)) { - sin->sin6_addr = ipv6_hdr(skb)->saddr; + ipv6_addr_copy(&sin->sin6_addr, &ipv6_hdr(skb)->saddr); if (np->rxopt.all) datagram_recv_ctl(sk, msg, skb); if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL) @@ -451,7 +451,7 @@ int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len) sin->sin6_flowinfo = 0; sin->sin6_port = 0; sin->sin6_scope_id = mtu_info.ip6m_addr.sin6_scope_id; - sin->sin6_addr = mtu_info.ip6m_addr.sin6_addr; + ipv6_addr_copy(&sin->sin6_addr, &mtu_info.ip6m_addr.sin6_addr); } put_cmsg(msg, SOL_IPV6, IPV6_PATHMTU, sizeof(mtu_info), &mtu_info); @@ -475,7 +475,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) struct in6_pktinfo src_info; src_info.ipi6_ifindex = opt->iif; - src_info.ipi6_addr = ipv6_hdr(skb)->daddr; + ipv6_addr_copy(&src_info.ipi6_addr, &ipv6_hdr(skb)->daddr); put_cmsg(msg, SOL_IPV6, IPV6_PKTINFO, sizeof(src_info), &src_info); } @@ -550,7 +550,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) struct in6_pktinfo src_info; src_info.ipi6_ifindex = opt->iif; - src_info.ipi6_addr = ipv6_hdr(skb)->daddr; + ipv6_addr_copy(&src_info.ipi6_addr, &ipv6_hdr(skb)->daddr); put_cmsg(msg, SOL_IPV6, IPV6_2292PKTINFO, sizeof(src_info), &src_info); } if (np->rxopt.bits.rxohlim) { @@ -584,7 +584,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) */ sin6.sin6_family = AF_INET6; - sin6.sin6_addr = ipv6_hdr(skb)->daddr; + ipv6_addr_copy(&sin6.sin6_addr, &ipv6_hdr(skb)->daddr); sin6.sin6_port = ports[1]; sin6.sin6_flowinfo = 0; sin6.sin6_scope_id = 0; @@ -654,12 +654,12 @@ int datagram_send_ctl(struct net *net, struct sock *sk, if (addr_type != IPV6_ADDR_ANY) { int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL; - if (!(inet_sk(sk)->freebind || inet_sk(sk)->transparent) && + if (!inet_sk(sk)->transparent && !ipv6_chk_addr(net, &src_info->ipi6_addr, strict ? dev : NULL, 0)) err = -EINVAL; else - fl6->saddr = src_info->ipi6_addr; + ipv6_addr_copy(&fl6->saddr, &src_info->ipi6_addr); } rcu_read_unlock(); diff --git a/trunk/net/ipv6/exthdrs.c b/trunk/net/ipv6/exthdrs.c index 3d641b6e9b09..bf22a225f422 100644 --- a/trunk/net/ipv6/exthdrs.c +++ b/trunk/net/ipv6/exthdrs.c @@ -243,9 +243,9 @@ static int ipv6_dest_hao(struct sk_buff *skb, int optoff) if (skb->ip_summed == CHECKSUM_COMPLETE) skb->ip_summed = CHECKSUM_NONE; - tmp_addr = ipv6h->saddr; - ipv6h->saddr = hao->addr; - hao->addr = tmp_addr; + ipv6_addr_copy(&tmp_addr, &ipv6h->saddr); + ipv6_addr_copy(&ipv6h->saddr, &hao->addr); + ipv6_addr_copy(&hao->addr, &tmp_addr); if (skb->tstamp.tv64 == 0) __net_timestamp(skb); @@ -461,9 +461,9 @@ static int ipv6_rthdr_rcv(struct sk_buff *skb) return -1; } - daddr = *addr; - *addr = ipv6_hdr(skb)->daddr; - ipv6_hdr(skb)->daddr = daddr; + ipv6_addr_copy(&daddr, addr); + ipv6_addr_copy(addr, &ipv6_hdr(skb)->daddr); + ipv6_addr_copy(&ipv6_hdr(skb)->daddr, &daddr); skb_dst_drop(skb); ip6_route_input(skb); @@ -690,7 +690,7 @@ static void ipv6_push_rthdr(struct sk_buff *skb, u8 *proto, memcpy(phdr->addr, ihdr->addr + 1, (hops - 1) * sizeof(struct in6_addr)); - phdr->addr[hops - 1] = **addr_p; + ipv6_addr_copy(phdr->addr + (hops - 1), *addr_p); *addr_p = ihdr->addr; phdr->rt_hdr.nexthdr = *proto; @@ -888,8 +888,8 @@ struct in6_addr *fl6_update_dst(struct flowi6 *fl6, if (!opt || !opt->srcrt) return NULL; - *orig = fl6->daddr; - fl6->daddr = *((struct rt0_hdr *)opt->srcrt)->addr; + ipv6_addr_copy(orig, &fl6->daddr); + ipv6_addr_copy(&fl6->daddr, ((struct rt0_hdr *)opt->srcrt)->addr); return orig; } diff --git a/trunk/net/ipv6/exthdrs_core.c b/trunk/net/ipv6/exthdrs_core.c index 72957f4a7c6c..37f548b7f6dc 100644 --- a/trunk/net/ipv6/exthdrs_core.c +++ b/trunk/net/ipv6/exthdrs_core.c @@ -57,9 +57,6 @@ int ipv6_ext_hdr(u8 nexthdr) * it returns NULL. * - First fragment header is skipped, not-first ones * are considered as unparsable. - * - Reports the offset field of the final fragment header so it is - * possible to tell whether this is a first fragment, later fragment, - * or not fragmented. * - ESP is unparsable for now and considered like * normal payload protocol. * - Note also special handling of AUTH header. Thanks to IPsec wizards. @@ -67,13 +64,10 @@ int ipv6_ext_hdr(u8 nexthdr) * --ANK (980726) */ -int ipv6_skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp, - __be16 *frag_offp) +int ipv6_skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp) { u8 nexthdr = *nexthdrp; - *frag_offp = 0; - while (ipv6_ext_hdr(nexthdr)) { struct ipv6_opt_hdr _hdr, *hp; int hdrlen; @@ -93,8 +87,7 @@ int ipv6_skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp, if (fp == NULL) return -1; - *frag_offp = *fp; - if (ntohs(*frag_offp) & ~0x7) + if (ntohs(*fp) & ~0x7) break; hdrlen = 8; } else if (nexthdr == NEXTHDR_AUTH) diff --git a/trunk/net/ipv6/fib6_rules.c b/trunk/net/ipv6/fib6_rules.c index b6c573152067..295571576f83 100644 --- a/trunk/net/ipv6/fib6_rules.c +++ b/trunk/net/ipv6/fib6_rules.c @@ -96,7 +96,7 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, if (!ipv6_prefix_equal(&saddr, &r->src.addr, r->src.plen)) goto again; - flp6->saddr = saddr; + ipv6_addr_copy(&flp6->saddr, &saddr); } goto out; } diff --git a/trunk/net/ipv6/icmp.c b/trunk/net/ipv6/icmp.c index 01d46bff63c3..90868fb42757 100644 --- a/trunk/net/ipv6/icmp.c +++ b/trunk/net/ipv6/icmp.c @@ -135,12 +135,11 @@ static int is_ineligible(struct sk_buff *skb) int ptr = (u8 *)(ipv6_hdr(skb) + 1) - skb->data; int len = skb->len - ptr; __u8 nexthdr = ipv6_hdr(skb)->nexthdr; - __be16 frag_off; if (len < 0) return 1; - ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr, &frag_off); + ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr); if (ptr < 0) return 0; if (nexthdr == IPPROTO_ICMPV6) { @@ -291,9 +290,9 @@ static void mip6_addr_swap(struct sk_buff *skb) if (likely(off >= 0)) { hao = (struct ipv6_destopt_hao *) (skb_network_header(skb) + off); - tmp = iph->saddr; - iph->saddr = hao->addr; - hao->addr = tmp; + ipv6_addr_copy(&tmp, &iph->saddr); + ipv6_addr_copy(&iph->saddr, &hao->addr); + ipv6_addr_copy(&hao->addr, &tmp); } } } @@ -445,9 +444,9 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_ICMPV6; - fl6.daddr = hdr->saddr; + ipv6_addr_copy(&fl6.daddr, &hdr->saddr); if (saddr) - fl6.saddr = *saddr; + ipv6_addr_copy(&fl6.saddr, saddr); fl6.flowi6_oif = iif; fl6.fl6_icmp_type = type; fl6.fl6_icmp_code = code; @@ -539,9 +538,9 @@ static void icmpv6_echo_reply(struct sk_buff *skb) memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_ICMPV6; - fl6.daddr = ipv6_hdr(skb)->saddr; + ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->saddr); if (saddr) - fl6.saddr = *saddr; + ipv6_addr_copy(&fl6.saddr, saddr); fl6.flowi6_oif = skb->dev->ifindex; fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY; security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); @@ -597,7 +596,6 @@ static void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info) int inner_offset; int hash; u8 nexthdr; - __be16 frag_off; if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) return; @@ -605,8 +603,7 @@ static void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info) nexthdr = ((struct ipv6hdr *)skb->data)->nexthdr; if (ipv6_ext_hdr(nexthdr)) { /* now skip over extension headers */ - inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), - &nexthdr, &frag_off); + inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr); if (inner_offset<0) return; } else { @@ -789,8 +786,8 @@ void icmpv6_flow_init(struct sock *sk, struct flowi6 *fl6, int oif) { memset(fl6, 0, sizeof(*fl6)); - fl6->saddr = *saddr; - fl6->daddr = *daddr; + ipv6_addr_copy(&fl6->saddr, saddr); + ipv6_addr_copy(&fl6->daddr, daddr); fl6->flowi6_proto = IPPROTO_ICMPV6; fl6->fl6_icmp_type = type; fl6->fl6_icmp_code = 0; diff --git a/trunk/net/ipv6/inet6_connection_sock.c b/trunk/net/ipv6/inet6_connection_sock.c index 02dd203d9eac..1567fb120392 100644 --- a/trunk/net/ipv6/inet6_connection_sock.c +++ b/trunk/net/ipv6/inet6_connection_sock.c @@ -65,9 +65,9 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk, memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_TCP; - fl6.daddr = treq->rmt_addr; + ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr); final_p = fl6_update_dst(&fl6, np->opt, &final); - fl6.saddr = treq->loc_addr; + ipv6_addr_copy(&fl6.saddr, &treq->loc_addr); fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.flowi6_mark = sk->sk_mark; fl6.fl6_dport = inet_rsk(req)->rmt_port; @@ -157,7 +157,7 @@ void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr) struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) uaddr; sin6->sin6_family = AF_INET6; - sin6->sin6_addr = np->daddr; + ipv6_addr_copy(&sin6->sin6_addr, &np->daddr); sin6->sin6_port = inet_sk(sk)->inet_dport; /* We do not store received flowlabel for TCP */ sin6->sin6_flowinfo = 0; @@ -215,8 +215,8 @@ int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused) memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = sk->sk_protocol; - fl6.daddr = np->daddr; - fl6.saddr = np->saddr; + ipv6_addr_copy(&fl6.daddr, &np->daddr); + ipv6_addr_copy(&fl6.saddr, &np->saddr); fl6.flowlabel = np->flow_label; IP6_ECN_flow_xmit(sk, fl6.flowlabel); fl6.flowi6_oif = sk->sk_bound_dev_if; @@ -246,7 +246,7 @@ int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused) skb_dst_set_noref(skb, dst); /* Restore final destination back after routing done */ - fl6.daddr = np->daddr; + ipv6_addr_copy(&fl6.daddr, &np->daddr); res = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass); rcu_read_unlock(); diff --git a/trunk/net/ipv6/ip6_fib.c b/trunk/net/ipv6/ip6_fib.c index b82bcde53f7a..93718f3db79b 100644 --- a/trunk/net/ipv6/ip6_fib.c +++ b/trunk/net/ipv6/ip6_fib.c @@ -190,7 +190,7 @@ static struct fib6_table *fib6_alloc_table(struct net *net, u32 id) struct fib6_table *table; table = kzalloc(sizeof(*table), GFP_ATOMIC); - if (table) { + if (table != NULL) { table->tb6_id = id; table->tb6_root.leaf = net->ipv6.ip6_null_entry; table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO; @@ -210,7 +210,7 @@ struct fib6_table *fib6_new_table(struct net *net, u32 id) return tb; tb = fib6_alloc_table(net, id); - if (tb) + if (tb != NULL) fib6_link_table(net, tb); return tb; @@ -367,7 +367,7 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) s_e = cb->args[1]; w = (void *)cb->args[2]; - if (!w) { + if (w == NULL) { /* New dump: * * 1. hook callback destructor. @@ -379,7 +379,7 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) * 2. allocate and initialize walker. */ w = kzalloc(sizeof(*w), GFP_ATOMIC); - if (!w) + if (w == NULL) return -ENOMEM; w->func = fib6_dump_node; cb->args[2] = (long)w; @@ -425,8 +425,7 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr, int addrlen, int plen, - int offset, int allow_create, - int replace_required) + int offset) { struct fib6_node *fn, *in, *ln; struct fib6_node *pn = NULL; @@ -448,18 +447,8 @@ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr, * Prefix match */ if (plen < fn->fn_bit || - !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit)) { - if (!allow_create) { - if (replace_required) { - pr_warn("IPv6: Can't replace route, " - "no match found\n"); - return ERR_PTR(-ENOENT); - } - pr_warn("IPv6: NLM_F_CREATE should be set " - "when creating new route\n"); - } + !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit)) goto insert_above; - } /* * Exact match ? @@ -467,7 +456,7 @@ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr, if (plen == fn->fn_bit) { /* clean up an intermediate node */ - if (!(fn->fn_flags & RTN_RTINFO)) { + if ((fn->fn_flags & RTN_RTINFO) == 0) { rt6_release(fn->leaf); fn->leaf = NULL; } @@ -488,23 +477,6 @@ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr, fn = dir ? fn->right: fn->left; } while (fn); - if (!allow_create) { - /* We should not create new node because - * NLM_F_REPLACE was specified without NLM_F_CREATE - * I assume it is safe to require NLM_F_CREATE when - * REPLACE flag is used! Later we may want to remove the - * check for replace_required, because according - * to netlink specification, NLM_F_CREATE - * MUST be specified if new route is created. - * That would keep IPv6 consistent with IPv4 - */ - if (replace_required) { - pr_warn("IPv6: Can't replace route, no match found\n"); - return ERR_PTR(-ENOENT); - } - pr_warn("IPv6: NLM_F_CREATE should be set " - "when creating new route\n"); - } /* * We walked to the bottom of tree. * Create new leaf node without children. @@ -512,7 +484,7 @@ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr, ln = node_alloc(); - if (!ln) + if (ln == NULL) return NULL; ln->fn_bit = plen; @@ -555,7 +527,7 @@ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr, in = node_alloc(); ln = node_alloc(); - if (!in || !ln) { + if (in == NULL || ln == NULL) { if (in) node_free(in); if (ln) @@ -609,7 +581,7 @@ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr, ln = node_alloc(); - if (!ln) + if (ln == NULL) return NULL; ln->fn_bit = plen; @@ -642,15 +614,10 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, { struct rt6_info *iter = NULL; struct rt6_info **ins; - int replace = (info->nlh && - (info->nlh->nlmsg_flags & NLM_F_REPLACE)); - int add = (!info->nlh || - (info->nlh->nlmsg_flags & NLM_F_CREATE)); - int found = 0; ins = &fn->leaf; - for (iter = fn->leaf; iter; iter = iter->dst.rt6_next) { + for (iter = fn->leaf; iter; iter=iter->dst.rt6_next) { /* * Search for duplicates */ @@ -659,24 +626,17 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, /* * Same priority level */ - if (info->nlh && - (info->nlh->nlmsg_flags & NLM_F_EXCL)) - return -EEXIST; - if (replace) { - found++; - break; - } - if (iter->dst.dev == rt->dst.dev && + if (iter->rt6i_dev == rt->rt6i_dev && iter->rt6i_idev == rt->rt6i_idev && ipv6_addr_equal(&iter->rt6i_gateway, &rt->rt6i_gateway)) { - if (!(iter->rt6i_flags & RTF_EXPIRES)) + if (!(iter->rt6i_flags&RTF_EXPIRES)) return -EEXIST; - iter->dst.expires = rt->dst.expires; - if (!(rt->rt6i_flags & RTF_EXPIRES)) { + iter->rt6i_expires = rt->rt6i_expires; + if (!(rt->rt6i_flags&RTF_EXPIRES)) { iter->rt6i_flags &= ~RTF_EXPIRES; - iter->dst.expires = 0; + iter->rt6i_expires = 0; } return -EEXIST; } @@ -695,40 +655,17 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, /* * insert node */ - if (!replace) { - if (!add) - pr_warn("IPv6: NLM_F_CREATE should be set when creating new route\n"); - -add: - rt->dst.rt6_next = iter; - *ins = rt; - rt->rt6i_node = fn; - atomic_inc(&rt->rt6i_ref); - inet6_rt_notify(RTM_NEWROUTE, rt, info); - info->nl_net->ipv6.rt6_stats->fib_rt_entries++; - - if (!(fn->fn_flags & RTN_RTINFO)) { - info->nl_net->ipv6.rt6_stats->fib_route_nodes++; - fn->fn_flags |= RTN_RTINFO; - } - } else { - if (!found) { - if (add) - goto add; - pr_warn("IPv6: NLM_F_REPLACE set, but no existing node found!\n"); - return -ENOENT; - } - *ins = rt; - rt->rt6i_node = fn; - rt->dst.rt6_next = iter->dst.rt6_next; - atomic_inc(&rt->rt6i_ref); - inet6_rt_notify(RTM_NEWROUTE, rt, info); - rt6_release(iter); - if (!(fn->fn_flags & RTN_RTINFO)) { - info->nl_net->ipv6.rt6_stats->fib_route_nodes++; - fn->fn_flags |= RTN_RTINFO; - } + rt->dst.rt6_next = iter; + *ins = rt; + rt->rt6i_node = fn; + atomic_inc(&rt->rt6i_ref); + inet6_rt_notify(RTM_NEWROUTE, rt, info); + info->nl_net->ipv6.rt6_stats->fib_rt_entries++; + + if ((fn->fn_flags & RTN_RTINFO) == 0) { + info->nl_net->ipv6.rt6_stats->fib_route_nodes++; + fn->fn_flags |= RTN_RTINFO; } return 0; @@ -737,7 +674,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, static __inline__ void fib6_start_gc(struct net *net, struct rt6_info *rt) { if (!timer_pending(&net->ipv6.ip6_fib_timer) && - (rt->rt6i_flags & (RTF_EXPIRES | RTF_CACHE))) + (rt->rt6i_flags & (RTF_EXPIRES|RTF_CACHE))) mod_timer(&net->ipv6.ip6_fib_timer, jiffies + net->ipv6.sysctl.ip6_rt_gc_interval); } @@ -759,28 +696,11 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info) { struct fib6_node *fn, *pn = NULL; int err = -ENOMEM; - int allow_create = 1; - int replace_required = 0; - - if (info->nlh) { - if (!(info->nlh->nlmsg_flags & NLM_F_CREATE)) - allow_create = 0; - if (info->nlh->nlmsg_flags & NLM_F_REPLACE) - replace_required = 1; - } - if (!allow_create && !replace_required) - pr_warn("IPv6: RTM_NEWROUTE with no NLM_F_CREATE or NLM_F_REPLACE\n"); fn = fib6_add_1(root, &rt->rt6i_dst.addr, sizeof(struct in6_addr), - rt->rt6i_dst.plen, offsetof(struct rt6_info, rt6i_dst), - allow_create, replace_required); - - if (IS_ERR(fn)) { - err = PTR_ERR(fn); - fn = NULL; - } + rt->rt6i_dst.plen, offsetof(struct rt6_info, rt6i_dst)); - if (!fn) + if (fn == NULL) goto out; pn = fn; @@ -789,7 +709,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info) if (rt->rt6i_src.plen) { struct fib6_node *sn; - if (!fn->subtree) { + if (fn->subtree == NULL) { struct fib6_node *sfn; /* @@ -804,7 +724,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info) /* Create subtree root node */ sfn = node_alloc(); - if (!sfn) + if (sfn == NULL) goto st_failure; sfn->leaf = info->nl_net->ipv6.ip6_null_entry; @@ -816,10 +736,9 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info) sn = fib6_add_1(sfn, &rt->rt6i_src.addr, sizeof(struct in6_addr), rt->rt6i_src.plen, - offsetof(struct rt6_info, rt6i_src), - allow_create, replace_required); + offsetof(struct rt6_info, rt6i_src)); - if (!sn) { + if (sn == NULL) { /* If it is failed, discard just allocated root, and then (in st_failure) stale node in main tree. @@ -834,18 +753,13 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info) } else { sn = fib6_add_1(fn->subtree, &rt->rt6i_src.addr, sizeof(struct in6_addr), rt->rt6i_src.plen, - offsetof(struct rt6_info, rt6i_src), - allow_create, replace_required); + offsetof(struct rt6_info, rt6i_src)); - if (IS_ERR(sn)) { - err = PTR_ERR(sn); - sn = NULL; - } - if (!sn) + if (sn == NULL) goto st_failure; } - if (!fn->leaf) { + if (fn->leaf == NULL) { fn->leaf = rt; atomic_inc(&rt->rt6i_ref); } @@ -854,9 +768,10 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info) #endif err = fib6_add_rt2node(fn, rt, info); - if (!err) { + + if (err == 0) { fib6_start_gc(info->nl_net, rt); - if (!(rt->rt6i_flags & RTF_CACHE)) + if (!(rt->rt6i_flags&RTF_CACHE)) fib6_prune_clones(info->nl_net, pn, rt); } @@ -904,7 +819,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info) */ struct lookup_args { - int offset; /* key offset on rt6_info */ + int offset; /* key offset on rt6_info */ const struct in6_addr *addr; /* search key */ }; @@ -934,10 +849,11 @@ static struct fib6_node * fib6_lookup_1(struct fib6_node *root, fn = next; continue; } + break; } - while (fn) { + while(fn) { if (FIB6_SUBTREE(fn) || fn->fn_flags & RTN_RTINFO) { struct rt6key *key; @@ -984,7 +900,8 @@ struct fib6_node * fib6_lookup(struct fib6_node *root, const struct in6_addr *da }; fn = fib6_lookup_1(root, daddr ? args : args + 1); - if (!fn || fn->fn_flags & RTN_TL_ROOT) + + if (fn == NULL || fn->fn_flags & RTN_TL_ROOT) fn = root; return fn; @@ -1044,7 +961,7 @@ struct fib6_node * fib6_locate(struct fib6_node *root, } #endif - if (fn && fn->fn_flags & RTN_RTINFO) + if (fn && fn->fn_flags&RTN_RTINFO) return fn; return NULL; @@ -1058,13 +975,14 @@ struct fib6_node * fib6_locate(struct fib6_node *root, static struct rt6_info *fib6_find_prefix(struct net *net, struct fib6_node *fn) { - if (fn->fn_flags & RTN_ROOT) + if (fn->fn_flags&RTN_ROOT) return net->ipv6.ip6_null_entry; - while (fn) { - if (fn->left) + while(fn) { + if(fn->left) return fn->left->leaf; - if (fn->right) + + if(fn->right) return fn->right->leaf; fn = FIB6_SUBTREE(fn); @@ -1102,12 +1020,12 @@ static struct fib6_node *fib6_repair_tree(struct net *net, if (children == 3 || FIB6_SUBTREE(fn) #ifdef CONFIG_IPV6_SUBTREES /* Subtree root (i.e. fn) may have one child */ - || (children && fn->fn_flags & RTN_ROOT) + || (children && fn->fn_flags&RTN_ROOT) #endif ) { fn->leaf = fib6_find_prefix(net, fn); #if RT6_DEBUG >= 2 - if (!fn->leaf) { + if (fn->leaf==NULL) { WARN_ON(!fn->leaf); fn->leaf = net->ipv6.ip6_null_entry; } @@ -1140,7 +1058,7 @@ static struct fib6_node *fib6_repair_tree(struct net *net, read_lock(&fib6_walker_lock); FOR_WALKERS(w) { - if (!child) { + if (child == NULL) { if (w->root == fn) { w->root = w->node = NULL; RT6_TRACE("W %p adjusted by delroot 1\n", w); @@ -1169,7 +1087,7 @@ static struct fib6_node *fib6_repair_tree(struct net *net, read_unlock(&fib6_walker_lock); node_free(fn); - if (pn->fn_flags & RTN_RTINFO || FIB6_SUBTREE(pn)) + if (pn->fn_flags&RTN_RTINFO || FIB6_SUBTREE(pn)) return pn; rt6_release(pn->leaf); @@ -1203,7 +1121,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp, if (w->state == FWS_C && w->leaf == rt) { RT6_TRACE("walker %p adjusted by delroute\n", w); w->leaf = rt->dst.rt6_next; - if (!w->leaf) + if (w->leaf == NULL) w->state = FWS_U; } } @@ -1212,7 +1130,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp, rt->dst.rt6_next = NULL; /* If it was last route, expunge its radix tree node */ - if (!fn->leaf) { + if (fn->leaf == NULL) { fn->fn_flags &= ~RTN_RTINFO; net->ipv6.rt6_stats->fib_route_nodes--; fn = fib6_repair_tree(net, fn); @@ -1226,7 +1144,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp, * to still alive ones. */ while (fn) { - if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) { + if (!(fn->fn_flags&RTN_RTINFO) && fn->leaf == rt) { fn->leaf = fib6_find_prefix(net, fn); atomic_inc(&fn->leaf->rt6i_ref); rt6_release(rt); @@ -1253,17 +1171,17 @@ int fib6_del(struct rt6_info *rt, struct nl_info *info) return -ENOENT; } #endif - if (!fn || rt == net->ipv6.ip6_null_entry) + if (fn == NULL || rt == net->ipv6.ip6_null_entry) return -ENOENT; WARN_ON(!(fn->fn_flags & RTN_RTINFO)); - if (!(rt->rt6i_flags & RTF_CACHE)) { + if (!(rt->rt6i_flags&RTF_CACHE)) { struct fib6_node *pn = fn; #ifdef CONFIG_IPV6_SUBTREES /* clones of this route might be in another subtree */ if (rt->rt6i_src.plen) { - while (!(pn->fn_flags & RTN_ROOT)) + while (!(pn->fn_flags&RTN_ROOT)) pn = pn->parent; pn = pn->parent; } @@ -1314,11 +1232,11 @@ static int fib6_walk_continue(struct fib6_walker_t *w) for (;;) { fn = w->node; - if (!fn) + if (fn == NULL) return 0; if (w->prune && fn != w->root && - fn->fn_flags & RTN_RTINFO && w->state < FWS_C) { + fn->fn_flags&RTN_RTINFO && w->state < FWS_C) { w->state = FWS_C; w->leaf = fn->leaf; } @@ -1347,7 +1265,7 @@ static int fib6_walk_continue(struct fib6_walker_t *w) w->state = FWS_C; w->leaf = fn->leaf; case FWS_C: - if (w->leaf && fn->fn_flags & RTN_RTINFO) { + if (w->leaf && fn->fn_flags&RTN_RTINFO) { int err; if (w->count < w->skip) { @@ -1462,26 +1380,6 @@ static void fib6_clean_tree(struct net *net, struct fib6_node *root, fib6_walk(&c.w); } -void fib6_clean_all_ro(struct net *net, int (*func)(struct rt6_info *, void *arg), - int prune, void *arg) -{ - struct fib6_table *table; - struct hlist_node *node; - struct hlist_head *head; - unsigned int h; - - rcu_read_lock(); - for (h = 0; h < FIB6_TABLE_HASHSZ; h++) { - head = &net->ipv6.fib_table_hash[h]; - hlist_for_each_entry_rcu(table, node, head, tb6_hlist) { - read_lock_bh(&table->tb6_lock); - fib6_clean_tree(net, &table->tb6_root, - func, prune, arg); - read_unlock_bh(&table->tb6_lock); - } - } - rcu_read_unlock(); -} void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg), int prune, void *arg) { @@ -1541,8 +1439,8 @@ static int fib6_age(struct rt6_info *rt, void *arg) * only if they are not in use now. */ - if (rt->rt6i_flags & RTF_EXPIRES && rt->dst.expires) { - if (time_after(now, rt->dst.expires)) { + if (rt->rt6i_flags&RTF_EXPIRES && rt->rt6i_expires) { + if (time_after(now, rt->rt6i_expires)) { RT6_TRACE("expiring %p\n", rt); return -1; } @@ -1553,7 +1451,7 @@ static int fib6_age(struct rt6_info *rt, void *arg) RT6_TRACE("aging clone %p\n", rt); return -1; } else if ((rt->rt6i_flags & RTF_GATEWAY) && - (!(dst_get_neighbour_noref_raw(&rt->dst)->flags & NTF_ROUTER))) { + (!(dst_get_neighbour_raw(&rt->dst)->flags & NTF_ROUTER))) { RT6_TRACE("purging route %p via non-router but gateway\n", rt); return -1; diff --git a/trunk/net/ipv6/ip6_flowlabel.c b/trunk/net/ipv6/ip6_flowlabel.c index b7867a1215b1..4566dbd916d3 100644 --- a/trunk/net/ipv6/ip6_flowlabel.c +++ b/trunk/net/ipv6/ip6_flowlabel.c @@ -386,7 +386,7 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq, err = -EINVAL; goto done; } - fl->dst = freq->flr_dst; + ipv6_addr_copy(&fl->dst, &freq->flr_dst); atomic_set(&fl->users, 1); switch (fl->share) { case IPV6_FL_S_EXCL: diff --git a/trunk/net/ipv6/ip6_input.c b/trunk/net/ipv6/ip6_input.c index 1ca5d45a12e8..a46c64eb0a66 100644 --- a/trunk/net/ipv6/ip6_input.c +++ b/trunk/net/ipv6/ip6_input.c @@ -280,7 +280,6 @@ int ip6_mc_input(struct sk_buff *skb) u8 *ptr = skb_network_header(skb) + opt->ra; struct icmp6hdr *icmp6; u8 nexthdr = hdr->nexthdr; - __be16 frag_off; int offset; /* Check if the value of Router Alert @@ -294,7 +293,7 @@ int ip6_mc_input(struct sk_buff *skb) goto out; } offset = ipv6_skip_exthdr(skb, sizeof(*hdr), - &nexthdr, &frag_off); + &nexthdr); if (offset < 0) goto out; diff --git a/trunk/net/ipv6/ip6_output.c b/trunk/net/ipv6/ip6_output.c index d97e07183ce9..ec562713db9b 100644 --- a/trunk/net/ipv6/ip6_output.c +++ b/trunk/net/ipv6/ip6_output.c @@ -136,7 +136,7 @@ static int ip6_finish_output2(struct sk_buff *skb) } rcu_read_lock(); - neigh = dst_get_neighbour_noref(dst); + neigh = dst_get_neighbour(dst); if (neigh) { int res = neigh_output(neigh, skb); @@ -238,8 +238,8 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, hdr->nexthdr = proto; hdr->hop_limit = hlimit; - hdr->saddr = fl6->saddr; - hdr->daddr = *first_hop; + ipv6_addr_copy(&hdr->saddr, &fl6->saddr); + ipv6_addr_copy(&hdr->daddr, first_hop); skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; @@ -290,8 +290,8 @@ int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev, hdr->nexthdr = proto; hdr->hop_limit = np->hop_limit; - hdr->saddr = *saddr; - hdr->daddr = *daddr; + ipv6_addr_copy(&hdr->saddr, saddr); + ipv6_addr_copy(&hdr->daddr, daddr); return 0; } @@ -329,11 +329,10 @@ static int ip6_forward_proxy_check(struct sk_buff *skb) { struct ipv6hdr *hdr = ipv6_hdr(skb); u8 nexthdr = hdr->nexthdr; - __be16 frag_off; int offset; if (ipv6_ext_hdr(nexthdr)) { - offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off); + offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr); if (offset < 0) return 0; } else @@ -463,7 +462,7 @@ int ip6_forward(struct sk_buff *skb) send redirects to source routed frames. We don't send redirects to frames decapsulated from IPsec. */ - n = dst_get_neighbour_noref(dst); + n = dst_get_neighbour(dst); if (skb->dev == dst->dev && n && opt->srcrt == 0 && !skb_sec_path(skb)) { struct in6_addr *target = NULL; struct rt6_info *rt; @@ -632,7 +631,6 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) struct ipv6hdr *tmp_hdr; struct frag_hdr *fh; unsigned int mtu, hlen, left, len; - int hroom, troom; __be32 frag_id = 0; int ptr, offset = 0, err=0; u8 *prevhdr, nexthdr = 0; @@ -799,8 +797,6 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) */ *prevhdr = NEXTHDR_FRAGMENT; - hroom = LL_RESERVED_SPACE(rt->dst.dev); - troom = rt->dst.dev->needed_tailroom; /* * Keep copying data until we run out. @@ -819,8 +815,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) * Allocate buffer. */ - if ((frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) + - hroom + troom, GFP_ATOMIC)) == NULL) { + if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_ALLOCATED_SPACE(rt->dst.dev), GFP_ATOMIC)) == NULL) { NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n"); IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); @@ -833,7 +828,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) */ ip6_copy_metadata(frag, skb); - skb_reserve(frag, hroom); + skb_reserve(frag, LL_RESERVED_SPACE(rt->dst.dev)); skb_put(frag, len + hlen + sizeof(struct frag_hdr)); skb_reset_network_header(frag); fh = (struct frag_hdr *)(skb_network_header(frag) + hlen); @@ -983,7 +978,7 @@ static int ip6_dst_lookup_tail(struct sock *sk, * dst entry of the nexthop router */ rcu_read_lock(); - n = dst_get_neighbour_noref(*dst); + n = dst_get_neighbour(*dst); if (n && !(n->nud_state & NUD_VALID)) { struct inet6_ifaddr *ifp; struct flowi6 fl_gw6; @@ -1064,7 +1059,7 @@ struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, if (err) return ERR_PTR(err); if (final_dst) - fl6->daddr = *final_dst; + ipv6_addr_copy(&fl6->daddr, final_dst); if (can_sleep) fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP; @@ -1100,7 +1095,7 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, if (err) return ERR_PTR(err); if (final_dst) - fl6->daddr = *final_dst; + ipv6_addr_copy(&fl6->daddr, final_dst); if (can_sleep) fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP; @@ -1593,7 +1588,7 @@ int ip6_push_pending_frames(struct sock *sk) if (np->pmtudisc < IPV6_PMTUDISC_DO) skb->local_df = 1; - *final_dst = fl6->daddr; + ipv6_addr_copy(final_dst, &fl6->daddr); __skb_pull(skb, skb_network_header_len(skb)); if (opt && opt->opt_flen) ipv6_push_frag_opts(skb, opt, &proto); @@ -1609,8 +1604,8 @@ int ip6_push_pending_frames(struct sock *sk) hdr->hop_limit = np->cork.hop_limit; hdr->nexthdr = proto; - hdr->saddr = fl6->saddr; - hdr->daddr = *final_dst; + ipv6_addr_copy(&hdr->saddr, &fl6->saddr); + ipv6_addr_copy(&hdr->daddr, final_dst); skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; diff --git a/trunk/net/ipv6/ip6_tunnel.c b/trunk/net/ipv6/ip6_tunnel.c index e1f7761815f3..4e2e9ff67ef2 100644 --- a/trunk/net/ipv6/ip6_tunnel.c +++ b/trunk/net/ipv6/ip6_tunnel.c @@ -93,7 +93,7 @@ struct pcpu_tstats { unsigned long rx_bytes; unsigned long tx_packets; unsigned long tx_bytes; -} __attribute__((aligned(4*sizeof(unsigned long)))); +}; static struct net_device_stats *ip6_get_stats(struct net_device *dev) { @@ -653,8 +653,8 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, NULL, 0, 0); - if (rt && rt->dst.dev) - skb2->dev = rt->dst.dev; + if (rt && rt->rt6i_dev) + skb2->dev = rt->rt6i_dev; icmpv6_send(skb2, rel_type, rel_code, rel_info); @@ -979,8 +979,8 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield); ipv6h->hop_limit = t->parms.hop_limit; ipv6h->nexthdr = proto; - ipv6h->saddr = fl6->saddr; - ipv6h->daddr = fl6->daddr; + ipv6_addr_copy(&ipv6h->saddr, &fl6->saddr); + ipv6_addr_copy(&ipv6h->daddr, &fl6->daddr); nf_reset(skb); pkt_len = skb->len; err = ip6_local_out(skb); @@ -1155,8 +1155,8 @@ static void ip6_tnl_link_config(struct ip6_tnl *t) memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr)); /* Set up flowi template */ - fl6->saddr = p->laddr; - fl6->daddr = p->raddr; + ipv6_addr_copy(&fl6->saddr, &p->laddr); + ipv6_addr_copy(&fl6->daddr, &p->raddr); fl6->flowi6_oif = p->link; fl6->flowlabel = 0; @@ -1185,11 +1185,11 @@ static void ip6_tnl_link_config(struct ip6_tnl *t) if (rt == NULL) return; - if (rt->dst.dev) { - dev->hard_header_len = rt->dst.dev->hard_header_len + + if (rt->rt6i_dev) { + dev->hard_header_len = rt->rt6i_dev->hard_header_len + sizeof (struct ipv6hdr); - dev->mtu = rt->dst.dev->mtu - sizeof (struct ipv6hdr); + dev->mtu = rt->rt6i_dev->mtu - sizeof (struct ipv6hdr); if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) dev->mtu-=8; @@ -1212,8 +1212,8 @@ static void ip6_tnl_link_config(struct ip6_tnl *t) static int ip6_tnl_change(struct ip6_tnl *t, struct ip6_tnl_parm *p) { - t->parms.laddr = p->laddr; - t->parms.raddr = p->raddr; + ipv6_addr_copy(&t->parms.laddr, &p->laddr); + ipv6_addr_copy(&t->parms.raddr, &p->raddr); t->parms.flags = p->flags; t->parms.hop_limit = p->hop_limit; t->parms.encap_limit = p->encap_limit; diff --git a/trunk/net/ipv6/ip6mr.c b/trunk/net/ipv6/ip6mr.c index c7e95c8c579f..449a9185b8f2 100644 --- a/trunk/net/ipv6/ip6mr.c +++ b/trunk/net/ipv6/ip6mr.c @@ -1105,8 +1105,8 @@ static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt, msg->im6_msgtype = MRT6MSG_WHOLEPKT; msg->im6_mif = mrt->mroute_reg_vif_num; msg->im6_pad = 0; - msg->im6_src = ipv6_hdr(pkt)->saddr; - msg->im6_dst = ipv6_hdr(pkt)->daddr; + ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr); + ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr); skb->ip_summed = CHECKSUM_UNNECESSARY; } else @@ -1131,8 +1131,8 @@ static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt, msg->im6_msgtype = assert; msg->im6_mif = mifi; msg->im6_pad = 0; - msg->im6_src = ipv6_hdr(pkt)->saddr; - msg->im6_dst = ipv6_hdr(pkt)->daddr; + ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr); + ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr); skb_dst_set(skb, dst_clone(skb_dst(pkt))); skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -2181,8 +2181,8 @@ int ip6mr_get_route(struct net *net, iph->payload_len = 0; iph->nexthdr = IPPROTO_NONE; iph->hop_limit = 0; - iph->saddr = rt->rt6i_src.addr; - iph->daddr = rt->rt6i_dst.addr; + ipv6_addr_copy(&iph->saddr, &rt->rt6i_src.addr); + ipv6_addr_copy(&iph->daddr, &rt->rt6i_dst.addr); err = ip6mr_cache_unresolved(mrt, vif, skb2); read_unlock(&mrt_lock); diff --git a/trunk/net/ipv6/ipv6_sockglue.c b/trunk/net/ipv6/ipv6_sockglue.c index 18a2719003c3..26cb08c84b74 100644 --- a/trunk/net/ipv6/ipv6_sockglue.c +++ b/trunk/net/ipv6/ipv6_sockglue.c @@ -435,7 +435,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, goto e_inval; np->sticky_pktinfo.ipi6_ifindex = pkt.ipi6_ifindex; - np->sticky_pktinfo.ipi6_addr = pkt.ipi6_addr; + ipv6_addr_copy(&np->sticky_pktinfo.ipi6_addr, &pkt.ipi6_addr); retv = 0; break; } @@ -980,7 +980,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, struct in6_pktinfo src_info; src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif : np->sticky_pktinfo.ipi6_ifindex; - src_info.ipi6_addr = np->mcast_oif ? np->daddr : np->sticky_pktinfo.ipi6_addr; + np->mcast_oif? ipv6_addr_copy(&src_info.ipi6_addr, &np->daddr) : + ipv6_addr_copy(&src_info.ipi6_addr, &(np->sticky_pktinfo.ipi6_addr)); put_cmsg(&msg, SOL_IPV6, IPV6_PKTINFO, sizeof(src_info), &src_info); } if (np->rxopt.bits.rxhlim) { @@ -991,7 +992,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, struct in6_pktinfo src_info; src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif : np->sticky_pktinfo.ipi6_ifindex; - src_info.ipi6_addr = np->mcast_oif ? np->daddr : np->sticky_pktinfo.ipi6_addr; + np->mcast_oif? ipv6_addr_copy(&src_info.ipi6_addr, &np->daddr) : + ipv6_addr_copy(&src_info.ipi6_addr, &(np->sticky_pktinfo.ipi6_addr)); put_cmsg(&msg, SOL_IPV6, IPV6_2292PKTINFO, sizeof(src_info), &src_info); } if (np->rxopt.bits.rxohlim) { diff --git a/trunk/net/ipv6/mcast.c b/trunk/net/ipv6/mcast.c index b853f06cc148..ee7839f4d6e3 100644 --- a/trunk/net/ipv6/mcast.c +++ b/trunk/net/ipv6/mcast.c @@ -155,14 +155,14 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) return -ENOMEM; mc_lst->next = NULL; - mc_lst->addr = *addr; + ipv6_addr_copy(&mc_lst->addr, addr); rcu_read_lock(); if (ifindex == 0) { struct rt6_info *rt; rt = rt6_lookup(net, addr, NULL, 0, 0); if (rt) { - dev = rt->dst.dev; + dev = rt->rt6i_dev; dst_release(&rt->dst); } } else @@ -256,7 +256,7 @@ static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net, struct rt6_info *rt = rt6_lookup(net, group, NULL, 0, 0); if (rt) { - dev = rt->dst.dev; + dev = rt->rt6i_dev; dev_hold(dev); dst_release(&rt->dst); } @@ -858,7 +858,7 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr) setup_timer(&mc->mca_timer, igmp6_timer_handler, (unsigned long)mc); - mc->mca_addr = *addr; + ipv6_addr_copy(&mc->mca_addr, addr); mc->idev = idev; /* (reference taken) */ mc->mca_users = 1; /* mca_stamp should be updated upon changes */ @@ -1343,15 +1343,13 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size) struct mld2_report *pmr; struct in6_addr addr_buf; const struct in6_addr *saddr; - int hlen = LL_RESERVED_SPACE(dev); - int tlen = dev->needed_tailroom; int err; u8 ra[8] = { IPPROTO_ICMPV6, 0, IPV6_TLV_ROUTERALERT, 2, 0, 0, IPV6_TLV_PADN, 0 }; /* we assume size > sizeof(ra) here */ - size += hlen + tlen; + size += LL_ALLOCATED_SPACE(dev); /* limit our allocations to order-0 page */ size = min_t(int, size, SKB_MAX_ORDER(0, 0)); skb = sock_alloc_send_skb(sk, size, 1, &err); @@ -1359,7 +1357,7 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size) if (!skb) return NULL; - skb_reserve(skb, hlen); + skb_reserve(skb, LL_RESERVED_SPACE(dev)); if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) { /* : @@ -1410,11 +1408,18 @@ static void mld_sendpack(struct sk_buff *skb) csum_partial(skb_transport_header(skb), mldlen, 0)); + dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr); + + if (!dst) { + err = -ENOMEM; + goto err_out; + } + icmpv6_flow_init(net->ipv6.igmp_sk, &fl6, ICMPV6_MLD2_REPORT, &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, skb->dev->ifindex); - dst = icmp6_dst_alloc(skb->dev, NULL, &fl6); + dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0); err = 0; if (IS_ERR(dst)) { err = PTR_ERR(dst); @@ -1718,8 +1723,6 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type) struct mld_msg *hdr; const struct in6_addr *snd_addr, *saddr; struct in6_addr addr_buf; - int hlen = LL_RESERVED_SPACE(dev); - int tlen = dev->needed_tailroom; int err, len, payload_len, full_len; u8 ra[8] = { IPPROTO_ICMPV6, 0, IPV6_TLV_ROUTERALERT, 2, 0, 0, @@ -1741,7 +1744,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type) IPSTATS_MIB_OUT, full_len); rcu_read_unlock(); - skb = sock_alloc_send_skb(sk, hlen + tlen + full_len, 1, &err); + skb = sock_alloc_send_skb(sk, LL_ALLOCATED_SPACE(dev) + full_len, 1, &err); if (skb == NULL) { rcu_read_lock(); @@ -1751,7 +1754,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type) return; } - skb_reserve(skb, hlen); + skb_reserve(skb, LL_RESERVED_SPACE(dev)); if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) { /* : @@ -1769,7 +1772,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type) hdr = (struct mld_msg *) skb_put(skb, sizeof(struct mld_msg)); memset(hdr, 0, sizeof(struct mld_msg)); hdr->mld_type = type; - hdr->mld_mca = *addr; + ipv6_addr_copy(&hdr->mld_mca, addr); hdr->mld_cksum = csum_ipv6_magic(saddr, snd_addr, len, IPPROTO_ICMPV6, @@ -1778,10 +1781,17 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type) rcu_read_lock(); idev = __in6_dev_get(skb->dev); + dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr); + if (!dst) { + err = -ENOMEM; + goto err_out; + } + icmpv6_flow_init(sk, &fl6, type, &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, skb->dev->ifindex); - dst = icmp6_dst_alloc(skb->dev, NULL, &fl6); + + dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0); if (IS_ERR(dst)) { err = PTR_ERR(dst); goto err_out; @@ -1904,7 +1914,7 @@ static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca, * Add multicast single-source filter to the interface list */ static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode, - const struct in6_addr *psfsrc) + const struct in6_addr *psfsrc, int delta) { struct ip6_sf_list *psf, *psf_prev; @@ -2035,7 +2045,7 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca, pmc->mca_sfcount[sfmode]++; err = 0; for (i=0; itv_sec; mip6_report_rl.stamp.tv_usec = stamp->tv_usec; mip6_report_rl.iif = iif; - mip6_report_rl.src = *src; - mip6_report_rl.dst = *dst; + ipv6_addr_copy(&mip6_report_rl.src, src); + ipv6_addr_copy(&mip6_report_rl.dst, dst); allow = 1; } spin_unlock_bh(&mip6_report_rl.lock); diff --git a/trunk/net/ipv6/ndisc.c b/trunk/net/ipv6/ndisc.c index d8f02ef88e59..0cb78d7ddaf5 100644 --- a/trunk/net/ipv6/ndisc.c +++ b/trunk/net/ipv6/ndisc.c @@ -93,7 +93,7 @@ static u32 ndisc_hash(const void *pkey, const struct net_device *dev, - __u32 *hash_rnd); + __u32 rnd); static int ndisc_constructor(struct neighbour *neigh); static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb); static void ndisc_error_report(struct neighbour *neigh, struct sk_buff *skb); @@ -126,6 +126,7 @@ static const struct neigh_ops ndisc_direct_ops = { struct neigh_table nd_tbl = { .family = AF_INET6, + .entry_size = sizeof(struct neighbour) + sizeof(struct in6_addr), .key_len = sizeof(struct in6_addr), .hash = ndisc_hash, .constructor = ndisc_constructor, @@ -140,7 +141,7 @@ struct neigh_table nd_tbl = { .gc_staletime = 60 * HZ, .reachable_time = ND_REACHABLE_TIME, .delay_probe_time = 5 * HZ, - .queue_len_bytes = 64*1024, + .queue_len = 3, .ucast_probes = 3, .mcast_probes = 3, .anycast_delay = 1 * HZ, @@ -349,9 +350,16 @@ EXPORT_SYMBOL(ndisc_mc_map); static u32 ndisc_hash(const void *pkey, const struct net_device *dev, - __u32 *hash_rnd) + __u32 hash_rnd) { - return ndisc_hashfn(pkey, dev, hash_rnd); + const u32 *p32 = pkey; + u32 addr_hash, i; + + addr_hash = 0; + for (i = 0; i < (sizeof(struct in6_addr) / sizeof(u32)); i++) + addr_hash ^= *p32++; + + return jhash_2words(addr_hash, dev->ifindex, hash_rnd); } static int ndisc_constructor(struct neighbour *neigh) @@ -438,8 +446,6 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev, struct sock *sk = net->ipv6.ndisc_sk; struct sk_buff *skb; struct icmp6hdr *hdr; - int hlen = LL_RESERVED_SPACE(dev); - int tlen = dev->needed_tailroom; int len; int err; u8 *opt; @@ -453,7 +459,7 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev, skb = sock_alloc_send_skb(sk, (MAX_HEADER + sizeof(struct ipv6hdr) + - len + hlen + tlen), + len + LL_ALLOCATED_SPACE(dev)), 1, &err); if (!skb) { ND_PRINTK0(KERN_ERR @@ -462,7 +468,7 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev, return NULL; } - skb_reserve(skb, hlen); + skb_reserve(skb, LL_RESERVED_SPACE(dev)); ip6_nd_hdr(sk, skb, dev, saddr, daddr, IPPROTO_ICMPV6, len); skb->transport_header = skb->tail; @@ -473,7 +479,7 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev, opt = skb_transport_header(skb) + sizeof(struct icmp6hdr); if (target) { - *(struct in6_addr *)opt = *target; + ipv6_addr_copy((struct in6_addr *)opt, target); opt += sizeof(*target); } @@ -509,7 +515,14 @@ void ndisc_send_skb(struct sk_buff *skb, type = icmp6h->icmp6_type; icmpv6_flow_init(sk, &fl6, type, saddr, daddr, dev->ifindex); - dst = icmp6_dst_alloc(dev, neigh, &fl6); + + dst = icmp6_dst_alloc(dev, neigh, daddr); + if (!dst) { + kfree_skb(skb); + return; + } + + dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0); if (IS_ERR(dst)) { kfree_skb(skb); return; @@ -1224,7 +1237,7 @@ static void ndisc_router_discovery(struct sk_buff *skb) rt = rt6_get_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev); if (rt) - neigh = dst_get_neighbour_noref(&rt->dst); + neigh = dst_get_neighbour(&rt->dst); if (rt && lifetime == 0) { neigh_clone(neigh); @@ -1244,7 +1257,7 @@ static void ndisc_router_discovery(struct sk_buff *skb) return; } - neigh = dst_get_neighbour_noref(&rt->dst); + neigh = dst_get_neighbour(&rt->dst); if (neigh == NULL) { ND_PRINTK0(KERN_ERR "ICMPv6 RA: %s() got default router without neighbour.\n", @@ -1258,7 +1271,7 @@ static void ndisc_router_discovery(struct sk_buff *skb) } if (rt) - rt->dst.expires = jiffies + (HZ * lifetime); + rt->rt6i_expires = jiffies + (HZ * lifetime); if (ra_msg->icmph.icmp6_hop_limit) { in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit; @@ -1368,9 +1381,7 @@ static void ndisc_router_discovery(struct sk_buff *skb) for (p = ndopts.nd_opts_pi; p; p = ndisc_next_option(p, ndopts.nd_opts_pi_end)) { - addrconf_prefix_rcv(skb->dev, (u8 *)p, - (p->nd_opt_len) << 3, - ndopts.nd_opts_src_lladdr != NULL); + addrconf_prefix_rcv(skb->dev, (u8*)p, (p->nd_opt_len) << 3); } } @@ -1522,7 +1533,6 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, struct inet6_dev *idev; struct flowi6 fl6; u8 *opt; - int hlen, tlen; int rd_len; int err; u8 ha_buf[MAX_ADDR_LEN], *ha = NULL; @@ -1580,11 +1590,9 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, rd_len &= ~0x7; len += rd_len; - hlen = LL_RESERVED_SPACE(dev); - tlen = dev->needed_tailroom; buff = sock_alloc_send_skb(sk, (MAX_HEADER + sizeof(struct ipv6hdr) + - len + hlen + tlen), + len + LL_ALLOCATED_SPACE(dev)), 1, &err); if (buff == NULL) { ND_PRINTK0(KERN_ERR @@ -1593,7 +1601,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, goto release; } - skb_reserve(buff, hlen); + skb_reserve(buff, LL_RESERVED_SPACE(dev)); ip6_nd_hdr(sk, buff, dev, &saddr_buf, &ipv6_hdr(skb)->saddr, IPPROTO_ICMPV6, len); @@ -1609,9 +1617,9 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, */ addrp = (struct in6_addr *)(icmph + 1); - *addrp = *target; + ipv6_addr_copy(addrp, target); addrp++; - *addrp = ipv6_hdr(skb)->daddr; + ipv6_addr_copy(addrp, &ipv6_hdr(skb)->daddr); opt = (u8*) (addrp + 1); diff --git a/trunk/net/ipv6/netfilter/Kconfig b/trunk/net/ipv6/netfilter/Kconfig index 9a68fb5b9e77..f792b34cbe9c 100644 --- a/trunk/net/ipv6/netfilter/Kconfig +++ b/trunk/net/ipv6/netfilter/Kconfig @@ -125,16 +125,6 @@ config IP6_NF_MATCH_MH To compile it as a module, choose M here. If unsure, say N. -config IP6_NF_MATCH_RPFILTER - tristate '"rpfilter" reverse path filter match support' - depends on NETFILTER_ADVANCED - ---help--- - This option allows you to match packets whose replies would - go out via the interface the packet came in. - - To compile it as a module, choose M here. If unsure, say N. - The module will be called ip6t_rpfilter. - config IP6_NF_MATCH_RT tristate '"rt" Routing header match support' depends on NETFILTER_ADVANCED diff --git a/trunk/net/ipv6/netfilter/Makefile b/trunk/net/ipv6/netfilter/Makefile index 2eaed96db02c..abfee91ce816 100644 --- a/trunk/net/ipv6/netfilter/Makefile +++ b/trunk/net/ipv6/netfilter/Makefile @@ -27,7 +27,6 @@ obj-$(CONFIG_IP6_NF_MATCH_FRAG) += ip6t_frag.o obj-$(CONFIG_IP6_NF_MATCH_IPV6HEADER) += ip6t_ipv6header.o obj-$(CONFIG_IP6_NF_MATCH_MH) += ip6t_mh.o obj-$(CONFIG_IP6_NF_MATCH_OPTS) += ip6t_hbh.o -obj-$(CONFIG_IP6_NF_MATCH_RPFILTER) += ip6t_rpfilter.o obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o # targets diff --git a/trunk/net/ipv6/netfilter/ip6_queue.c b/trunk/net/ipv6/netfilter/ip6_queue.c index fb80a23c6640..e63c3972a739 100644 --- a/trunk/net/ipv6/netfilter/ip6_queue.c +++ b/trunk/net/ipv6/netfilter/ip6_queue.c @@ -405,7 +405,6 @@ __ipq_rcv_skb(struct sk_buff *skb) int status, type, pid, flags; unsigned int nlmsglen, skblen; struct nlmsghdr *nlh; - bool enable_timestamp = false; skblen = skb->len; if (skblen < sizeof(*nlh)) @@ -443,13 +442,11 @@ __ipq_rcv_skb(struct sk_buff *skb) RCV_SKB_FAIL(-EBUSY); } } else { - enable_timestamp = true; + net_enable_timestamp(); peer_pid = pid; } spin_unlock_bh(&queue_lock); - if (enable_timestamp) - net_enable_timestamp(); status = ipq_receive_peer(NLMSG_DATA(nlh), type, nlmsglen - NLMSG_LENGTH(0)); diff --git a/trunk/net/ipv6/netfilter/ip6t_REJECT.c b/trunk/net/ipv6/netfilter/ip6t_REJECT.c index aad2fa41cf46..a5a4c5dd5396 100644 --- a/trunk/net/ipv6/netfilter/ip6t_REJECT.c +++ b/trunk/net/ipv6/netfilter/ip6t_REJECT.c @@ -49,7 +49,6 @@ static void send_reset(struct net *net, struct sk_buff *oldskb) const __u8 tclass = DEFAULT_TOS_VALUE; struct dst_entry *dst = NULL; u8 proto; - __be16 frag_off; struct flowi6 fl6; if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) || @@ -59,7 +58,7 @@ static void send_reset(struct net *net, struct sk_buff *oldskb) } proto = oip6h->nexthdr; - tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto, &frag_off); + tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto); if ((tcphoff < 0) || (tcphoff > oldskb->len)) { pr_debug("Cannot get TCP header.\n"); @@ -94,8 +93,8 @@ static void send_reset(struct net *net, struct sk_buff *oldskb) memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_TCP; - fl6.saddr = oip6h->daddr; - fl6.daddr = oip6h->saddr; + ipv6_addr_copy(&fl6.saddr, &oip6h->daddr); + ipv6_addr_copy(&fl6.daddr, &oip6h->saddr); fl6.fl6_sport = otcph.dest; fl6.fl6_dport = otcph.source; security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6)); @@ -130,8 +129,8 @@ static void send_reset(struct net *net, struct sk_buff *oldskb) *(__be32 *)ip6h = htonl(0x60000000 | (tclass << 20)); ip6h->hop_limit = ip6_dst_hoplimit(dst); ip6h->nexthdr = IPPROTO_TCP; - ip6h->saddr = oip6h->daddr; - ip6h->daddr = oip6h->saddr; + ipv6_addr_copy(&ip6h->saddr, &oip6h->daddr); + ipv6_addr_copy(&ip6h->daddr, &oip6h->saddr); tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); /* Truncate to length (no data) */ diff --git a/trunk/net/ipv6/netfilter/ip6t_rpfilter.c b/trunk/net/ipv6/netfilter/ip6t_rpfilter.c deleted file mode 100644 index 5d1d8b04d694..000000000000 --- a/trunk/net/ipv6/netfilter/ip6t_rpfilter.c +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Copyright (c) 2011 Florian Westphal - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include -#include -#include -#include -#include -#include - -#include -#include - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Florian Westphal "); -MODULE_DESCRIPTION("Xtables: IPv6 reverse path filter match"); - -static bool rpfilter_addr_unicast(const struct in6_addr *addr) -{ - int addr_type = ipv6_addr_type(addr); - return addr_type & IPV6_ADDR_UNICAST; -} - -static bool rpfilter_lookup_reverse6(const struct sk_buff *skb, - const struct net_device *dev, u8 flags) -{ - struct rt6_info *rt; - struct ipv6hdr *iph = ipv6_hdr(skb); - bool ret = false; - struct flowi6 fl6 = { - .flowlabel = (* (__be32 *) iph) & IPV6_FLOWINFO_MASK, - .flowi6_proto = iph->nexthdr, - .daddr = iph->saddr, - }; - int lookup_flags; - - if (rpfilter_addr_unicast(&iph->daddr)) { - memcpy(&fl6.saddr, &iph->daddr, sizeof(struct in6_addr)); - lookup_flags = RT6_LOOKUP_F_HAS_SADDR; - } else { - lookup_flags = 0; - } - - fl6.flowi6_mark = flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0; - if ((flags & XT_RPFILTER_LOOSE) == 0) { - fl6.flowi6_oif = dev->ifindex; - lookup_flags |= RT6_LOOKUP_F_IFACE; - } - - rt = (void *) ip6_route_lookup(dev_net(dev), &fl6, lookup_flags); - if (rt->dst.error) - goto out; - - if (rt->rt6i_flags & (RTF_REJECT|RTF_ANYCAST)) - goto out; - - if (rt->rt6i_flags & RTF_LOCAL) { - ret = flags & XT_RPFILTER_ACCEPT_LOCAL; - goto out; - } - - if (rt->rt6i_idev->dev == dev || (flags & XT_RPFILTER_LOOSE)) - ret = true; - out: - dst_release(&rt->dst); - return ret; -} - -static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) -{ - const struct xt_rpfilter_info *info = par->matchinfo; - int saddrtype; - struct ipv6hdr *iph; - bool invert = info->flags & XT_RPFILTER_INVERT; - - if (par->in->flags & IFF_LOOPBACK) - return true ^ invert; - - iph = ipv6_hdr(skb); - saddrtype = ipv6_addr_type(&iph->saddr); - if (unlikely(saddrtype == IPV6_ADDR_ANY)) - return true ^ invert; /* not routable: forward path will drop it */ - - return rpfilter_lookup_reverse6(skb, par->in, info->flags) ^ invert; -} - -static int rpfilter_check(const struct xt_mtchk_param *par) -{ - const struct xt_rpfilter_info *info = par->matchinfo; - unsigned int options = ~XT_RPFILTER_OPTION_MASK; - - if (info->flags & options) { - pr_info("unknown options encountered"); - return -EINVAL; - } - - if (strcmp(par->table, "mangle") != 0 && - strcmp(par->table, "raw") != 0) { - pr_info("match only valid in the \'raw\' " - "or \'mangle\' tables, not \'%s\'.\n", par->table); - return -EINVAL; - } - - return 0; -} - -static struct xt_match rpfilter_mt_reg __read_mostly = { - .name = "rpfilter", - .family = NFPROTO_IPV6, - .checkentry = rpfilter_check, - .match = rpfilter_mt, - .matchsize = sizeof(struct xt_rpfilter_info), - .hooks = (1 << NF_INET_PRE_ROUTING), - .me = THIS_MODULE -}; - -static int __init rpfilter_mt_init(void) -{ - return xt_register_match(&rpfilter_mt_reg); -} - -static void __exit rpfilter_mt_exit(void) -{ - xt_unregister_match(&rpfilter_mt_reg); -} - -module_init(rpfilter_mt_init); -module_exit(rpfilter_mt_exit); diff --git a/trunk/net/ipv6/netfilter/ip6table_filter.c b/trunk/net/ipv6/netfilter/ip6table_filter.c index a8f6da97e3b2..c9e37c8fd62c 100644 --- a/trunk/net/ipv6/netfilter/ip6table_filter.c +++ b/trunk/net/ipv6/netfilter/ip6table_filter.c @@ -44,7 +44,7 @@ ip6table_filter_hook(unsigned int hook, struct sk_buff *skb, static struct nf_hook_ops *filter_ops __read_mostly; /* Default to forward because I got too much mail already. */ -static bool forward = NF_ACCEPT; +static int forward = NF_ACCEPT; module_param(forward, bool, 0000); static int __net_init ip6table_filter_net_init(struct net *net) diff --git a/trunk/net/ipv6/proc.c b/trunk/net/ipv6/proc.c index fdeb6d03da81..1008ce94bc33 100644 --- a/trunk/net/ipv6/proc.c +++ b/trunk/net/ipv6/proc.c @@ -142,7 +142,11 @@ static const struct snmp_mib snmp6_udplite6_list[] = { SNMP_MIB_SENTINEL }; -static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, atomic_long_t *smib) +/* can be called either with percpu mib (pcpumib != NULL), + * or shared one (smib != NULL) + */ +static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void __percpu **pcpumib, + atomic_long_t *smib) { char name[32]; int i; @@ -159,14 +163,14 @@ static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, atomic_long_t *smib) snprintf(name, sizeof(name), "Icmp6%s%s", i & 0x100 ? "Out" : "In", p); seq_printf(seq, "%-32s\t%lu\n", name, - atomic_long_read(smib + i)); + pcpumib ? snmp_fold_field(pcpumib, i) : atomic_long_read(smib + i)); } /* print by number (nonzero only) - ICMPMsgStat format */ for (i = 0; i < ICMP6MSG_MIB_MAX; i++) { unsigned long val; - val = atomic_long_read(smib + i); + val = pcpumib ? snmp_fold_field(pcpumib, i) : atomic_long_read(smib + i); if (!val) continue; snprintf(name, sizeof(name), "Icmp6%sType%u", @@ -211,7 +215,8 @@ static int snmp6_seq_show(struct seq_file *seq, void *v) snmp6_ipstats_list, offsetof(struct ipstats_mib, syncp)); snmp6_seq_show_item(seq, (void __percpu **)net->mib.icmpv6_statistics, NULL, snmp6_icmp6_list); - snmp6_seq_show_icmpv6msg(seq, net->mib.icmpv6msg_statistics->mibs); + snmp6_seq_show_icmpv6msg(seq, + (void __percpu **)net->mib.icmpv6msg_statistics, NULL); snmp6_seq_show_item(seq, (void __percpu **)net->mib.udp_stats_in6, NULL, snmp6_udp6_list); snmp6_seq_show_item(seq, (void __percpu **)net->mib.udplite_stats_in6, @@ -241,7 +246,7 @@ static int snmp6_dev_seq_show(struct seq_file *seq, void *v) snmp6_ipstats_list); snmp6_seq_show_item(seq, NULL, idev->stats.icmpv6dev->mibs, snmp6_icmp6_list); - snmp6_seq_show_icmpv6msg(seq, idev->stats.icmpv6msgdev->mibs); + snmp6_seq_show_icmpv6msg(seq, NULL, idev->stats.icmpv6msgdev->mibs); return 0; } diff --git a/trunk/net/ipv6/raw.c b/trunk/net/ipv6/raw.c index a4894f4f1944..331af3b882ac 100644 --- a/trunk/net/ipv6/raw.c +++ b/trunk/net/ipv6/raw.c @@ -299,9 +299,9 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) } inet->inet_rcv_saddr = inet->inet_saddr = v4addr; - np->rcv_saddr = addr->sin6_addr; + ipv6_addr_copy(&np->rcv_saddr, &addr->sin6_addr); if (!(addr_type & IPV6_ADDR_MULTICAST)) - np->saddr = addr->sin6_addr; + ipv6_addr_copy(&np->saddr, &addr->sin6_addr); err = 0; out_unlock: rcu_read_unlock(); @@ -383,8 +383,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb) } /* Charge it to the socket. */ - skb_dst_drop(skb); - if (sock_queue_rcv_skb(sk, skb) < 0) { + if (ip_queue_rcv_skb(sk, skb) < 0) { kfree_skb(skb); return NET_RX_DROP; } @@ -495,7 +494,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk, if (sin6) { sin6->sin6_family = AF_INET6; sin6->sin6_port = 0; - sin6->sin6_addr = ipv6_hdr(skb)->saddr; + ipv6_addr_copy(&sin6->sin6_addr, &ipv6_hdr(skb)->saddr); sin6->sin6_flowinfo = 0; sin6->sin6_scope_id = 0; if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) @@ -611,8 +610,6 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length, struct sk_buff *skb; int err; struct rt6_info *rt = (struct rt6_info *)*dstp; - int hlen = LL_RESERVED_SPACE(rt->dst.dev); - int tlen = rt->dst.dev->needed_tailroom; if (length > rt->dst.dev->mtu) { ipv6_local_error(sk, EMSGSIZE, fl6, rt->dst.dev->mtu); @@ -622,11 +619,11 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length, goto out; skb = sock_alloc_send_skb(sk, - length + hlen + tlen + 15, + length + LL_ALLOCATED_SPACE(rt->dst.dev) + 15, flags & MSG_DONTWAIT, &err); if (skb == NULL) goto error; - skb_reserve(skb, hlen); + skb_reserve(skb, LL_RESERVED_SPACE(rt->dst.dev)); skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; @@ -846,11 +843,11 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, goto out; if (!ipv6_addr_any(daddr)) - fl6.daddr = *daddr; + ipv6_addr_copy(&fl6.daddr, daddr); else fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */ if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr)) - fl6.saddr = np->saddr; + ipv6_addr_copy(&fl6.saddr, &np->saddr); final_p = fl6_update_dst(&fl6, opt, &final); diff --git a/trunk/net/ipv6/reassembly.c b/trunk/net/ipv6/reassembly.c index b69fae76a6f1..dfb164e9051a 100644 --- a/trunk/net/ipv6/reassembly.c +++ b/trunk/net/ipv6/reassembly.c @@ -153,8 +153,8 @@ void ip6_frag_init(struct inet_frag_queue *q, void *a) fq->id = arg->id; fq->user = arg->user; - fq->saddr = *arg->src; - fq->daddr = *arg->dst; + ipv6_addr_copy(&fq->saddr, arg->src); + ipv6_addr_copy(&fq->daddr, arg->dst); } EXPORT_SYMBOL(ip6_frag_init); diff --git a/trunk/net/ipv6/route.c b/trunk/net/ipv6/route.c index 07361dfa8085..b582a0a0f1c5 100644 --- a/trunk/net/ipv6/route.c +++ b/trunk/net/ipv6/route.c @@ -62,6 +62,17 @@ #include #endif +/* Set to 3 to get tracing. */ +#define RT6_DEBUG 2 + +#if RT6_DEBUG >= 3 +#define RDBG(x) printk x +#define RT6_TRACE(x...) printk(KERN_DEBUG x) +#else +#define RDBG(x) +#define RT6_TRACE(x...) do { ; } while (0) +#endif + static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort, const struct in6_addr *dest); static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie); @@ -123,23 +134,7 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old) static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst, const void *daddr) { - struct neighbour *n = __ipv6_neigh_lookup(&nd_tbl, dst->dev, daddr); - if (n) - return n; - return neigh_create(&nd_tbl, daddr, dst->dev); -} - -static int rt6_bind_neighbour(struct rt6_info *rt, struct net_device *dev) -{ - struct neighbour *n = __ipv6_neigh_lookup(&nd_tbl, dev, &rt->rt6i_gateway); - if (!n) { - n = neigh_create(&nd_tbl, &rt->rt6i_gateway, dev); - if (IS_ERR(n)) - return PTR_ERR(n); - } - dst_set_neighbour(&rt->dst, n); - - return 0; + return __neigh_lookup_errno(&nd_tbl, daddr, dst->dev); } static struct dst_ops ip6_dst_ops_template = { @@ -252,9 +247,9 @@ static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops, { struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, flags); - if (rt) + if (rt != NULL) memset(&rt->rt6i_table, 0, - sizeof(*rt) - sizeof(struct dst_entry)); + sizeof(*rt) - sizeof(struct dst_entry)); return rt; } @@ -268,7 +263,7 @@ static void ip6_dst_destroy(struct dst_entry *dst) if (!(rt->dst.flags & DST_HOST)) dst_destroy_metrics_generic(dst); - if (idev) { + if (idev != NULL) { rt->rt6i_idev = NULL; in6_dev_put(idev); } @@ -304,10 +299,10 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev, struct net_device *loopback_dev = dev_net(dev)->loopback_dev; - if (dev != loopback_dev && idev && idev->dev == dev) { + if (dev != loopback_dev && idev != NULL && idev->dev == dev) { struct inet6_dev *loopback_idev = in6_dev_get(loopback_dev); - if (loopback_idev) { + if (loopback_idev != NULL) { rt->rt6i_idev = loopback_idev; in6_dev_put(idev); } @@ -317,7 +312,7 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev, static __inline__ int rt6_check_expired(const struct rt6_info *rt) { return (rt->rt6i_flags & RTF_EXPIRES) && - time_after(jiffies, rt->dst.expires); + time_after(jiffies, rt->rt6i_expires); } static inline int rt6_need_strict(const struct in6_addr *daddr) @@ -343,13 +338,13 @@ static inline struct rt6_info *rt6_device_match(struct net *net, goto out; for (sprt = rt; sprt; sprt = sprt->dst.rt6_next) { - struct net_device *dev = sprt->dst.dev; + struct net_device *dev = sprt->rt6i_dev; if (oif) { if (dev->ifindex == oif) return sprt; if (dev->flags & IFF_LOOPBACK) { - if (!sprt->rt6i_idev || + if (sprt->rt6i_idev == NULL || sprt->rt6i_idev->dev->ifindex != oif) { if (flags & RT6_LOOKUP_F_IFACE && oif) continue; @@ -390,7 +385,7 @@ static void rt6_probe(struct rt6_info *rt) * to no more than one per minute. */ rcu_read_lock(); - neigh = rt ? dst_get_neighbour_noref(&rt->dst) : NULL; + neigh = rt ? dst_get_neighbour(&rt->dst) : NULL; if (!neigh || (neigh->nud_state & NUD_VALID)) goto out; read_lock_bh(&neigh->lock); @@ -404,7 +399,7 @@ static void rt6_probe(struct rt6_info *rt) target = (struct in6_addr *)&neigh->primary_key; addrconf_addr_solict_mult(target, &mcaddr); - ndisc_send_ns(rt->dst.dev, NULL, target, &mcaddr, NULL); + ndisc_send_ns(rt->rt6i_dev, NULL, target, &mcaddr, NULL); } else { read_unlock_bh(&neigh->lock); } @@ -422,7 +417,7 @@ static inline void rt6_probe(struct rt6_info *rt) */ static inline int rt6_check_dev(struct rt6_info *rt, int oif) { - struct net_device *dev = rt->dst.dev; + struct net_device *dev = rt->rt6i_dev; if (!oif || dev->ifindex == oif) return 2; if ((dev->flags & IFF_LOOPBACK) && @@ -437,7 +432,7 @@ static inline int rt6_check_neigh(struct rt6_info *rt) int m; rcu_read_lock(); - neigh = dst_get_neighbour_noref(&rt->dst); + neigh = dst_get_neighbour(&rt->dst); if (rt->rt6i_flags & RTF_NONEXTHOP || !(rt->rt6i_flags & RTF_GATEWAY)) m = 1; @@ -523,6 +518,9 @@ static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict) struct rt6_info *match, *rt0; struct net *net; + RT6_TRACE("%s(fn->leaf=%p, oif=%d)\n", + __func__, fn->leaf, oif); + rt0 = fn->rr_ptr; if (!rt0) fn->rr_ptr = rt0 = fn->leaf; @@ -541,7 +539,10 @@ static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict) fn->rr_ptr = next; } - net = dev_net(rt0->dst.dev); + RT6_TRACE("%s() => %p\n", + __func__, match); + + net = dev_net(rt0->rt6i_dev); return match ? match : net->ipv6.ip6_null_entry; } @@ -610,7 +611,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len, if (!addrconf_finite_timeout(lifetime)) { rt->rt6i_flags &= ~RTF_EXPIRES; } else { - rt->dst.expires = jiffies + HZ * lifetime; + rt->rt6i_expires = jiffies + HZ * lifetime; rt->rt6i_flags |= RTF_EXPIRES; } dst_release(&rt->dst); @@ -635,7 +636,7 @@ do { \ goto restart; \ } \ } \ -} while (0) +} while(0) static struct rt6_info *ip6_pol_route_lookup(struct net *net, struct fib6_table *table, @@ -657,13 +658,6 @@ static struct rt6_info *ip6_pol_route_lookup(struct net *net, } -struct dst_entry * ip6_route_lookup(struct net *net, struct flowi6 *fl6, - int flags) -{ - return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_lookup); -} -EXPORT_SYMBOL_GPL(ip6_route_lookup); - struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr, const struct in6_addr *saddr, int oif, int strict) { @@ -712,7 +706,7 @@ static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info) int ip6_ins_rt(struct rt6_info *rt) { struct nl_info info = { - .nl_net = dev_net(rt->dst.dev), + .nl_net = dev_net(rt->rt6i_dev), }; return __ip6_ins_rt(rt, &info); } @@ -730,27 +724,29 @@ static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort, rt = ip6_rt_copy(ort, daddr); if (rt) { + struct neighbour *neigh; int attempts = !in_softirq(); - if (!(rt->rt6i_flags & RTF_GATEWAY)) { + if (!(rt->rt6i_flags&RTF_GATEWAY)) { if (ort->rt6i_dst.plen != 128 && ipv6_addr_equal(&ort->rt6i_dst.addr, daddr)) rt->rt6i_flags |= RTF_ANYCAST; - rt->rt6i_gateway = *daddr; + ipv6_addr_copy(&rt->rt6i_gateway, daddr); } rt->rt6i_flags |= RTF_CACHE; #ifdef CONFIG_IPV6_SUBTREES if (rt->rt6i_src.plen && saddr) { - rt->rt6i_src.addr = *saddr; + ipv6_addr_copy(&rt->rt6i_src.addr, saddr); rt->rt6i_src.plen = 128; } #endif retry: - if (rt6_bind_neighbour(rt, rt->dst.dev)) { - struct net *net = dev_net(rt->dst.dev); + neigh = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway); + if (IS_ERR(neigh)) { + struct net *net = dev_net(rt->rt6i_dev); int saved_rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval; int saved_rt_elasticity = @@ -775,6 +771,8 @@ static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort, dst_free(&rt->dst); return NULL; } + dst_set_neighbour(&rt->dst, neigh); + } return rt; @@ -787,7 +785,7 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort, if (rt) { rt->rt6i_flags |= RTF_CACHE; - dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_noref_raw(&ort->dst))); + dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_raw(&ort->dst))); } return rt; } @@ -821,7 +819,7 @@ static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, dst_hold(&rt->dst); read_unlock_bh(&table->tb6_lock); - if (!dst_get_neighbour_noref_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP)) + if (!dst_get_neighbour_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP)) nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr); else if (!(rt->dst.flags & DST_HOST)) nrt = rt6_alloc_clone(rt, &fl6->daddr); @@ -877,7 +875,7 @@ void ip6_route_input(struct sk_buff *skb) .flowi6_iif = skb->dev->ifindex, .daddr = iph->daddr, .saddr = iph->saddr, - .flowlabel = (* (__be32 *) iph) & IPV6_FLOWINFO_MASK, + .flowlabel = (* (__be32 *) iph)&IPV6_FLOWINFO_MASK, .flowi6_mark = skb->mark, .flowi6_proto = iph->nexthdr, }; @@ -934,9 +932,9 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori rt->rt6i_idev = ort->rt6i_idev; if (rt->rt6i_idev) in6_dev_hold(rt->rt6i_idev); - rt->dst.expires = 0; + rt->rt6i_expires = 0; - rt->rt6i_gateway = ort->rt6i_gateway; + ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway); rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES; rt->rt6i_metric = 0; @@ -999,7 +997,7 @@ static void ip6_link_failure(struct sk_buff *skb) rt = (struct rt6_info *) skb_dst(skb); if (rt) { - if (rt->rt6i_flags & RTF_CACHE) { + if (rt->rt6i_flags&RTF_CACHE) { dst_set_expires(&rt->dst, 0); rt->rt6i_flags |= RTF_EXPIRES; } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) @@ -1069,38 +1067,34 @@ static DEFINE_SPINLOCK(icmp6_dst_lock); struct dst_entry *icmp6_dst_alloc(struct net_device *dev, struct neighbour *neigh, - struct flowi6 *fl6) + const struct in6_addr *addr) { - struct dst_entry *dst; struct rt6_info *rt; struct inet6_dev *idev = in6_dev_get(dev); struct net *net = dev_net(dev); - if (unlikely(!idev)) + if (unlikely(idev == NULL)) return NULL; rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, dev, 0); - if (unlikely(!rt)) { + if (unlikely(rt == NULL)) { in6_dev_put(idev); - dst = ERR_PTR(-ENOMEM); goto out; } if (neigh) neigh_hold(neigh); else { - neigh = ip6_neigh_lookup(&rt->dst, &fl6->daddr); - if (IS_ERR(neigh)) { - dst_free(&rt->dst); - return ERR_CAST(neigh); - } + neigh = ndisc_get_neigh(dev, addr); + if (IS_ERR(neigh)) + neigh = NULL; } rt->dst.flags |= DST_HOST; rt->dst.output = ip6_output; dst_set_neighbour(&rt->dst, neigh); atomic_set(&rt->dst.__refcnt, 1); - rt->rt6i_dst.addr = fl6->daddr; + ipv6_addr_copy(&rt->rt6i_dst.addr, addr); rt->rt6i_dst.plen = 128; rt->rt6i_idev = idev; dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255); @@ -1112,10 +1106,8 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev, fib6_force_start_gc(net); - dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0); - out: - return dst; + return &rt->dst; } int icmp6_dst_gc(void) @@ -1245,30 +1237,21 @@ int ip6_route_add(struct fib6_config *cfg) if (cfg->fc_metric == 0) cfg->fc_metric = IP6_RT_PRIO_USER; - err = -ENOBUFS; - if (cfg->fc_nlinfo.nlh && - !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) { - table = fib6_get_table(net, cfg->fc_table); - if (!table) { - printk(KERN_WARNING "IPv6: NLM_F_CREATE should be specified when creating new route\n"); - table = fib6_new_table(net, cfg->fc_table); - } - } else { - table = fib6_new_table(net, cfg->fc_table); - } - - if (!table) + table = fib6_new_table(net, cfg->fc_table); + if (table == NULL) { + err = -ENOBUFS; goto out; + } rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, NULL, DST_NOCOUNT); - if (!rt) { + if (rt == NULL) { err = -ENOMEM; goto out; } rt->dst.obsolete = -1; - rt->dst.expires = (cfg->fc_flags & RTF_EXPIRES) ? + rt->rt6i_expires = (cfg->fc_flags & RTF_EXPIRES) ? jiffies + clock_t_to_jiffies(cfg->fc_expires) : 0; @@ -1311,9 +1294,8 @@ int ip6_route_add(struct fib6_config *cfg) they would result in kernel looping; promote them to reject routes */ if ((cfg->fc_flags & RTF_REJECT) || - (dev && (dev->flags & IFF_LOOPBACK) && - !(addr_type & IPV6_ADDR_LOOPBACK) && - !(cfg->fc_flags & RTF_LOCAL))) { + (dev && (dev->flags&IFF_LOOPBACK) && !(addr_type&IPV6_ADDR_LOOPBACK) + && !(cfg->fc_flags&RTF_LOCAL))) { /* hold loopback dev/idev if we haven't done so. */ if (dev != net->loopback_dev) { if (dev) { @@ -1340,7 +1322,7 @@ int ip6_route_add(struct fib6_config *cfg) int gwa_type; gw_addr = &cfg->fc_gateway; - rt->rt6i_gateway = *gw_addr; + ipv6_addr_copy(&rt->rt6i_gateway, gw_addr); gwa_type = ipv6_addr_type(gw_addr); if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) { @@ -1354,26 +1336,26 @@ int ip6_route_add(struct fib6_config *cfg) some exceptions. --ANK */ err = -EINVAL; - if (!(gwa_type & IPV6_ADDR_UNICAST)) + if (!(gwa_type&IPV6_ADDR_UNICAST)) goto out; grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1); err = -EHOSTUNREACH; - if (!grt) + if (grt == NULL) goto out; if (dev) { - if (dev != grt->dst.dev) { + if (dev != grt->rt6i_dev) { dst_release(&grt->dst); goto out; } } else { - dev = grt->dst.dev; + dev = grt->rt6i_dev; idev = grt->rt6i_idev; dev_hold(dev); in6_dev_hold(grt->rt6i_idev); } - if (!(grt->rt6i_flags & RTF_GATEWAY)) + if (!(grt->rt6i_flags&RTF_GATEWAY)) err = 0; dst_release(&grt->dst); @@ -1381,12 +1363,12 @@ int ip6_route_add(struct fib6_config *cfg) goto out; } err = -EINVAL; - if (!dev || (dev->flags & IFF_LOOPBACK)) + if (dev == NULL || (dev->flags&IFF_LOOPBACK)) goto out; } err = -ENODEV; - if (!dev) + if (dev == NULL) goto out; if (!ipv6_addr_any(&cfg->fc_prefsrc)) { @@ -1394,15 +1376,18 @@ int ip6_route_add(struct fib6_config *cfg) err = -EINVAL; goto out; } - rt->rt6i_prefsrc.addr = cfg->fc_prefsrc; + ipv6_addr_copy(&rt->rt6i_prefsrc.addr, &cfg->fc_prefsrc); rt->rt6i_prefsrc.plen = 128; } else rt->rt6i_prefsrc.plen = 0; if (cfg->fc_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) { - err = rt6_bind_neighbour(rt, dev); - if (err) + struct neighbour *n = __neigh_lookup_errno(&nd_tbl, &rt->rt6i_gateway, dev); + if (IS_ERR(n)) { + err = PTR_ERR(n); goto out; + } + dst_set_neighbour(&rt->dst, n); } rt->rt6i_flags = cfg->fc_flags; @@ -1448,7 +1433,7 @@ static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info) { int err; struct fib6_table *table; - struct net *net = dev_net(rt->dst.dev); + struct net *net = dev_net(rt->rt6i_dev); if (rt == net->ipv6.ip6_null_entry) return -ENOENT; @@ -1467,7 +1452,7 @@ static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info) int ip6_del_rt(struct rt6_info *rt) { struct nl_info info = { - .nl_net = dev_net(rt->dst.dev), + .nl_net = dev_net(rt->rt6i_dev), }; return __ip6_del_rt(rt, &info); } @@ -1480,7 +1465,7 @@ static int ip6_route_del(struct fib6_config *cfg) int err = -ESRCH; table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table); - if (!table) + if (table == NULL) return err; read_lock_bh(&table->tb6_lock); @@ -1492,8 +1477,8 @@ static int ip6_route_del(struct fib6_config *cfg) if (fn) { for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) { if (cfg->fc_ifindex && - (!rt->dst.dev || - rt->dst.dev->ifindex != cfg->fc_ifindex)) + (rt->rt6i_dev == NULL || + rt->rt6i_dev->ifindex != cfg->fc_ifindex)) continue; if (cfg->fc_flags & RTF_GATEWAY && !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway)) @@ -1555,7 +1540,7 @@ static struct rt6_info *__ip6_route_redirect(struct net *net, continue; if (!(rt->rt6i_flags & RTF_GATEWAY)) continue; - if (fl6->flowi6_oif != rt->dst.dev->ifindex) + if (fl6->flowi6_oif != rt->rt6i_dev->ifindex) continue; if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway)) continue; @@ -1588,7 +1573,7 @@ static struct rt6_info *ip6_route_redirect(const struct in6_addr *dest, }, }; - rdfl.gateway = *gateway; + ipv6_addr_copy(&rdfl.gateway, gateway); if (rt6_need_strict(dest)) flags |= RT6_LOOKUP_F_IFACE; @@ -1633,18 +1618,18 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src, dst_confirm(&rt->dst); /* Duplicate redirect: silently ignore. */ - if (neigh == dst_get_neighbour_noref_raw(&rt->dst)) + if (neigh == dst_get_neighbour_raw(&rt->dst)) goto out; nrt = ip6_rt_copy(rt, dest); - if (!nrt) + if (nrt == NULL) goto out; nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE; if (on_link) nrt->rt6i_flags &= ~RTF_GATEWAY; - nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key; + ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key); dst_set_neighbour(&nrt->dst, neigh_clone(neigh)); if (ip6_ins_rt(nrt)) @@ -1654,7 +1639,7 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src, netevent.new = &nrt->dst; call_netevent_notifiers(NETEVENT_REDIRECT, &netevent); - if (rt->rt6i_flags & RTF_CACHE) { + if (rt->rt6i_flags&RTF_CACHE) { ip6_del_rt(rt); return; } @@ -1675,7 +1660,7 @@ static void rt6_do_pmtu_disc(const struct in6_addr *daddr, const struct in6_addr int allfrag = 0; again: rt = rt6_lookup(net, daddr, saddr, ifindex, 0); - if (!rt) + if (rt == NULL) return; if (rt6_check_expired(rt)) { @@ -1725,7 +1710,7 @@ static void rt6_do_pmtu_disc(const struct in6_addr *daddr, const struct in6_addr 1. It is connected route. Action: COW 2. It is gatewayed route or NONEXTHOP route. Action: clone it. */ - if (!dst_get_neighbour_noref_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP)) + if (!dst_get_neighbour_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP)) nrt = rt6_alloc_cow(rt, daddr, saddr); else nrt = rt6_alloc_clone(rt, daddr); @@ -1781,7 +1766,7 @@ void rt6_pmtu_discovery(const struct in6_addr *daddr, const struct in6_addr *sad static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort, const struct in6_addr *dest) { - struct net *net = dev_net(ort->dst.dev); + struct net *net = dev_net(ort->rt6i_dev); struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, ort->dst.dev, 0); @@ -1790,7 +1775,7 @@ static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort, rt->dst.output = ort->dst.output; rt->dst.flags |= DST_HOST; - rt->rt6i_dst.addr = *dest; + ipv6_addr_copy(&rt->rt6i_dst.addr, dest); rt->rt6i_dst.plen = 128; dst_copy_metrics(&rt->dst, &ort->dst); rt->dst.error = ort->dst.error; @@ -1798,9 +1783,9 @@ static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort, if (rt->rt6i_idev) in6_dev_hold(rt->rt6i_idev); rt->dst.lastuse = jiffies; - rt->dst.expires = 0; + rt->rt6i_expires = 0; - rt->rt6i_gateway = ort->rt6i_gateway; + ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway); rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES; rt->rt6i_metric = 0; @@ -1823,7 +1808,7 @@ static struct rt6_info *rt6_get_route_info(struct net *net, struct fib6_table *table; table = fib6_get_table(net, RT6_TABLE_INFO); - if (!table) + if (table == NULL) return NULL; write_lock_bh(&table->tb6_lock); @@ -1832,7 +1817,7 @@ static struct rt6_info *rt6_get_route_info(struct net *net, goto out; for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) { - if (rt->dst.dev->ifindex != ifindex) + if (rt->rt6i_dev->ifindex != ifindex) continue; if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY)) continue; @@ -1863,8 +1848,8 @@ static struct rt6_info *rt6_add_route_info(struct net *net, .fc_nlinfo.nl_net = net, }; - cfg.fc_dst = *prefix; - cfg.fc_gateway = *gwaddr; + ipv6_addr_copy(&cfg.fc_dst, prefix); + ipv6_addr_copy(&cfg.fc_gateway, gwaddr); /* We should treat it as a default route if prefix length is 0. */ if (!prefixlen) @@ -1882,12 +1867,12 @@ struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_dev struct fib6_table *table; table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT); - if (!table) + if (table == NULL) return NULL; write_lock_bh(&table->tb6_lock); for (rt = table->tb6_root.leaf; rt; rt=rt->dst.rt6_next) { - if (dev == rt->dst.dev && + if (dev == rt->rt6i_dev && ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) && ipv6_addr_equal(&rt->rt6i_gateway, addr)) break; @@ -1913,7 +1898,7 @@ struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr, .fc_nlinfo.nl_net = dev_net(dev), }; - cfg.fc_gateway = *gwaddr; + ipv6_addr_copy(&cfg.fc_gateway, gwaddr); ip6_route_add(&cfg); @@ -1927,7 +1912,7 @@ void rt6_purge_dflt_routers(struct net *net) /* NOTE: Keep consistent with rt6_get_dflt_router */ table = fib6_get_table(net, RT6_TABLE_DFLT); - if (!table) + if (table == NULL) return; restart: @@ -1959,9 +1944,9 @@ static void rtmsg_to_fib6_config(struct net *net, cfg->fc_nlinfo.nl_net = net; - cfg->fc_dst = rtmsg->rtmsg_dst; - cfg->fc_src = rtmsg->rtmsg_src; - cfg->fc_gateway = rtmsg->rtmsg_gateway; + ipv6_addr_copy(&cfg->fc_dst, &rtmsg->rtmsg_dst); + ipv6_addr_copy(&cfg->fc_src, &rtmsg->rtmsg_src); + ipv6_addr_copy(&cfg->fc_gateway, &rtmsg->rtmsg_gateway); } int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg) @@ -2060,14 +2045,14 @@ static int ip6_pkt_prohibit_out(struct sk_buff *skb) struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, const struct in6_addr *addr, - bool anycast) + int anycast) { struct net *net = dev_net(idev->dev); struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, net->loopback_dev, 0); - int err; + struct neighbour *neigh; - if (!rt) { + if (rt == NULL) { if (net_ratelimit()) pr_warning("IPv6: Maximum number of routes reached," " consider increasing route/max_size.\n"); @@ -2087,13 +2072,15 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, rt->rt6i_flags |= RTF_ANYCAST; else rt->rt6i_flags |= RTF_LOCAL; - err = rt6_bind_neighbour(rt, rt->dst.dev); - if (err) { + neigh = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway); + if (IS_ERR(neigh)) { dst_free(&rt->dst); - return ERR_PTR(err); + + return ERR_CAST(neigh); } + dst_set_neighbour(&rt->dst, neigh); - rt->rt6i_dst.addr = *addr; + ipv6_addr_copy(&rt->rt6i_dst.addr, addr); rt->rt6i_dst.plen = 128; rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL); @@ -2111,7 +2098,7 @@ int ip6_route_get_saddr(struct net *net, struct inet6_dev *idev = ip6_dst_idev((struct dst_entry*)rt); int err = 0; if (rt->rt6i_prefsrc.plen) - *saddr = rt->rt6i_prefsrc.addr; + ipv6_addr_copy(saddr, &rt->rt6i_prefsrc.addr); else err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL, daddr, prefs, saddr); @@ -2131,7 +2118,7 @@ static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg) struct net *net = ((struct arg_dev_net_ip *)arg)->net; struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr; - if (((void *)rt->dst.dev == dev || !dev) && + if (((void *)rt->rt6i_dev == dev || dev == NULL) && rt != net->ipv6.ip6_null_entry && ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) { /* remove prefsrc entry */ @@ -2161,10 +2148,11 @@ static int fib6_ifdown(struct rt6_info *rt, void *arg) const struct arg_dev_net *adn = arg; const struct net_device *dev = adn->dev; - if ((rt->dst.dev == dev || !dev) && - rt != adn->net->ipv6.ip6_null_entry) + if ((rt->rt6i_dev == dev || dev == NULL) && + rt != adn->net->ipv6.ip6_null_entry) { + RT6_TRACE("deleted by ifdown %p\n", rt); return -1; - + } return 0; } @@ -2197,7 +2185,7 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg) */ idev = __in6_dev_get(arg->dev); - if (!idev) + if (idev == NULL) return 0; /* For administrative MTU increase, there is no way to discover @@ -2214,7 +2202,7 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg) also have the lowest MTU, TOO BIG MESSAGE will be lead to PMTU discouvery. */ - if (rt->dst.dev == arg->dev && + if (rt->rt6i_dev == arg->dev && !dst_metric_locked(&rt->dst, RTAX_MTU) && (dst_mtu(&rt->dst) >= arg->mtu || (dst_mtu(&rt->dst) < arg->mtu && @@ -2363,13 +2351,11 @@ static int rt6_fill_node(struct net *net, int iif, int type, u32 pid, u32 seq, int prefix, int nowait, unsigned int flags) { - const struct inet_peer *peer; struct rtmsg *rtm; struct nlmsghdr *nlh; long expires; u32 table; struct neighbour *n; - u32 ts, tsage; if (prefix) { /* user wants prefix routes only */ if (!(rt->rt6i_flags & RTF_PREFIX_RT)) { @@ -2379,7 +2365,7 @@ static int rt6_fill_node(struct net *net, } nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtm), flags); - if (!nlh) + if (nlh == NULL) return -EMSGSIZE; rtm = nlmsg_data(nlh); @@ -2393,25 +2379,25 @@ static int rt6_fill_node(struct net *net, table = RT6_TABLE_UNSPEC; rtm->rtm_table = table; NLA_PUT_U32(skb, RTA_TABLE, table); - if (rt->rt6i_flags & RTF_REJECT) + if (rt->rt6i_flags&RTF_REJECT) rtm->rtm_type = RTN_UNREACHABLE; - else if (rt->rt6i_flags & RTF_LOCAL) + else if (rt->rt6i_flags&RTF_LOCAL) rtm->rtm_type = RTN_LOCAL; - else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK)) + else if (rt->rt6i_dev && (rt->rt6i_dev->flags&IFF_LOOPBACK)) rtm->rtm_type = RTN_LOCAL; else rtm->rtm_type = RTN_UNICAST; rtm->rtm_flags = 0; rtm->rtm_scope = RT_SCOPE_UNIVERSE; rtm->rtm_protocol = rt->rt6i_protocol; - if (rt->rt6i_flags & RTF_DYNAMIC) + if (rt->rt6i_flags&RTF_DYNAMIC) rtm->rtm_protocol = RTPROT_REDIRECT; else if (rt->rt6i_flags & RTF_ADDRCONF) rtm->rtm_protocol = RTPROT_KERNEL; - else if (rt->rt6i_flags & RTF_DEFAULT) + else if (rt->rt6i_flags&RTF_DEFAULT) rtm->rtm_protocol = RTPROT_RA; - if (rt->rt6i_flags & RTF_CACHE) + if (rt->rt6i_flags&RTF_CACHE) rtm->rtm_flags |= RTM_F_CLONED; if (dst) { @@ -2451,7 +2437,7 @@ static int rt6_fill_node(struct net *net, if (rt->rt6i_prefsrc.plen) { struct in6_addr saddr_buf; - saddr_buf = rt->rt6i_prefsrc.addr; + ipv6_addr_copy(&saddr_buf, &rt->rt6i_prefsrc.addr); NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf); } @@ -2459,31 +2445,24 @@ static int rt6_fill_node(struct net *net, goto nla_put_failure; rcu_read_lock(); - n = dst_get_neighbour_noref(&rt->dst); + n = dst_get_neighbour(&rt->dst); if (n) NLA_PUT(skb, RTA_GATEWAY, 16, &n->primary_key); rcu_read_unlock(); if (rt->dst.dev) - NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex); + NLA_PUT_U32(skb, RTA_OIF, rt->rt6i_dev->ifindex); NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric); if (!(rt->rt6i_flags & RTF_EXPIRES)) expires = 0; - else if (rt->dst.expires - jiffies < INT_MAX) - expires = rt->dst.expires - jiffies; + else if (rt->rt6i_expires - jiffies < INT_MAX) + expires = rt->rt6i_expires - jiffies; else expires = INT_MAX; - peer = rt->rt6i_peer; - ts = tsage = 0; - if (peer && peer->tcp_ts_stamp) { - ts = peer->tcp_ts; - tsage = get_seconds() - peer->tcp_ts_stamp; - } - - if (rtnl_put_cacheinfo(skb, &rt->dst, 0, ts, tsage, + if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0, expires, rt->dst.error) < 0) goto nla_put_failure; @@ -2532,14 +2511,14 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr)) goto errout; - fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]); + ipv6_addr_copy(&fl6.saddr, nla_data(tb[RTA_SRC])); } if (tb[RTA_DST]) { if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr)) goto errout; - fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]); + ipv6_addr_copy(&fl6.daddr, nla_data(tb[RTA_DST])); } if (tb[RTA_IIF]) @@ -2558,7 +2537,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void } skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); - if (!skb) { + if (skb == NULL) { err = -ENOBUFS; goto errout; } @@ -2593,10 +2572,10 @@ void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info) int err; err = -ENOBUFS; - seq = info->nlh ? info->nlh->nlmsg_seq : 0; + seq = info->nlh != NULL ? info->nlh->nlmsg_seq : 0; skb = nlmsg_new(rt6_nlmsg_size(), gfp_any()); - if (!skb) + if (skb == NULL) goto errout; err = rt6_fill_node(net, skb, rt, NULL, NULL, 0, @@ -2663,7 +2642,7 @@ static int rt6_info_route(struct rt6_info *rt, void *p_arg) seq_puts(m, "00000000000000000000000000000000 00 "); #endif rcu_read_lock(); - n = dst_get_neighbour_noref(&rt->dst); + n = dst_get_neighbour(&rt->dst); if (n) { seq_printf(m, "%pi6", n->primary_key); } else { @@ -2673,14 +2652,14 @@ static int rt6_info_route(struct rt6_info *rt, void *p_arg) seq_printf(m, " %08x %08x %08x %08x %8s\n", rt->rt6i_metric, atomic_read(&rt->dst.__refcnt), rt->dst.__use, rt->rt6i_flags, - rt->dst.dev ? rt->dst.dev->name : ""); + rt->rt6i_dev ? rt->rt6i_dev->name : ""); return 0; } static int ipv6_route_show(struct seq_file *m, void *v) { struct net *net = (struct net *)m->private; - fib6_clean_all_ro(net, rt6_info_route, 0, m); + fib6_clean_all(net, rt6_info_route, 0, m); return 0; } diff --git a/trunk/net/ipv6/sit.c b/trunk/net/ipv6/sit.c index 3b6dac956bb0..96f3623618e3 100644 --- a/trunk/net/ipv6/sit.c +++ b/trunk/net/ipv6/sit.c @@ -91,7 +91,7 @@ struct pcpu_tstats { unsigned long rx_bytes; unsigned long tx_packets; unsigned long tx_bytes; -} __attribute__((aligned(4*sizeof(unsigned long)))); +}; static struct net_device_stats *ipip6_get_stats(struct net_device *dev) { @@ -682,7 +682,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, struct neighbour *neigh = NULL; if (skb_dst(skb)) - neigh = dst_get_neighbour_noref(skb_dst(skb)); + neigh = dst_get_neighbour(skb_dst(skb)); if (neigh == NULL) { if (net_ratelimit()) @@ -707,7 +707,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, struct neighbour *neigh = NULL; if (skb_dst(skb)) - neigh = dst_get_neighbour_noref(skb_dst(skb)); + neigh = dst_get_neighbour(skb_dst(skb)); if (neigh == NULL) { if (net_ratelimit()) @@ -916,7 +916,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) goto done; #ifdef CONFIG_IPV6_SIT_6RD } else { - ip6rd.prefix = t->ip6rd.prefix; + ipv6_addr_copy(&ip6rd.prefix, &t->ip6rd.prefix); ip6rd.relay_prefix = t->ip6rd.relay_prefix; ip6rd.prefixlen = t->ip6rd.prefixlen; ip6rd.relay_prefixlen = t->ip6rd.relay_prefixlen; @@ -1084,7 +1084,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) if (relay_prefix != ip6rd.relay_prefix) goto done; - t->ip6rd.prefix = prefix; + ipv6_addr_copy(&t->ip6rd.prefix, &prefix); t->ip6rd.relay_prefix = relay_prefix; t->ip6rd.prefixlen = ip6rd.prefixlen; t->ip6rd.relay_prefixlen = ip6rd.relay_prefixlen; diff --git a/trunk/net/ipv6/syncookies.c b/trunk/net/ipv6/syncookies.c index 8e951d8d3b81..5a0d6648bbbc 100644 --- a/trunk/net/ipv6/syncookies.c +++ b/trunk/net/ipv6/syncookies.c @@ -200,8 +200,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) req->mss = mss; ireq->rmt_port = th->source; ireq->loc_port = th->dest; - ireq6->rmt_addr = ipv6_hdr(skb)->saddr; - ireq6->loc_addr = ipv6_hdr(skb)->daddr; + ipv6_addr_copy(&ireq6->rmt_addr, &ipv6_hdr(skb)->saddr); + ipv6_addr_copy(&ireq6->loc_addr, &ipv6_hdr(skb)->daddr); if (ipv6_opt_accepted(sk, skb) || np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { @@ -237,9 +237,9 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) struct flowi6 fl6; memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_TCP; - fl6.daddr = ireq6->rmt_addr; + ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr); final_p = fl6_update_dst(&fl6, np->opt, &final); - fl6.saddr = ireq6->loc_addr; + ipv6_addr_copy(&fl6.saddr, &ireq6->loc_addr); fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.flowi6_mark = sk->sk_mark; fl6.fl6_dport = inet_rsk(req)->rmt_port; diff --git a/trunk/net/ipv6/tcp_ipv6.c b/trunk/net/ipv6/tcp_ipv6.c index 906c7ca43542..2dea4bb7b54a 100644 --- a/trunk/net/ipv6/tcp_ipv6.c +++ b/trunk/net/ipv6/tcp_ipv6.c @@ -62,7 +62,6 @@ #include #include #include -#include #include @@ -154,7 +153,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); if (flowlabel == NULL) return -EINVAL; - usin->sin6_addr = flowlabel->dst; + ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst); fl6_sock_release(flowlabel); } } @@ -196,7 +195,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, tp->write_seq = 0; } - np->daddr = usin->sin6_addr; + ipv6_addr_copy(&np->daddr, &usin->sin6_addr); np->flow_label = fl6.flowlabel; /* @@ -245,8 +244,9 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, saddr = &np->rcv_saddr; fl6.flowi6_proto = IPPROTO_TCP; - fl6.daddr = np->daddr; - fl6.saddr = saddr ? *saddr : np->saddr; + ipv6_addr_copy(&fl6.daddr, &np->daddr); + ipv6_addr_copy(&fl6.saddr, + (saddr ? saddr : &np->saddr)); fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.flowi6_mark = sk->sk_mark; fl6.fl6_dport = usin->sin6_port; @@ -264,11 +264,11 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, if (saddr == NULL) { saddr = &fl6.saddr; - np->rcv_saddr = *saddr; + ipv6_addr_copy(&np->rcv_saddr, saddr); } /* set the source address */ - np->saddr = *saddr; + ipv6_addr_copy(&np->saddr, saddr); inet->inet_rcv_saddr = LOOPBACK4_IPV6; sk->sk_gso_type = SKB_GSO_TCPV6; @@ -398,8 +398,8 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, */ memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_TCP; - fl6.daddr = np->daddr; - fl6.saddr = np->saddr; + ipv6_addr_copy(&fl6.daddr, &np->daddr); + ipv6_addr_copy(&fl6.saddr, &np->saddr); fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.flowi6_mark = sk->sk_mark; fl6.fl6_dport = inet->inet_dport; @@ -489,8 +489,8 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_TCP; - fl6.daddr = treq->rmt_addr; - fl6.saddr = treq->loc_addr; + ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr); + ipv6_addr_copy(&fl6.saddr, &treq->loc_addr); fl6.flowlabel = 0; fl6.flowi6_oif = treq->iif; fl6.flowi6_mark = sk->sk_mark; @@ -512,7 +512,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, if (skb) { __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr); - fl6.daddr = treq->rmt_addr; + ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr); err = ip6_xmit(sk, skb, &fl6, opt, np->tclass); err = net_xmit_eval(err); } @@ -617,7 +617,8 @@ static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer, tp->md5sig_info->alloced6++; } - tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr = *peer; + ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr, + peer); tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey; tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen; @@ -749,8 +750,8 @@ static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp, bp = &hp->md5_blk.ip6; /* 1. TCP pseudo-header (RFC2460) */ - bp->saddr = *saddr; - bp->daddr = *daddr; + ipv6_addr_copy(&bp->saddr, saddr); + ipv6_addr_copy(&bp->daddr, daddr); bp->protocol = cpu_to_be32(IPPROTO_TCP); bp->len = cpu_to_be32(nbytes); @@ -1038,8 +1039,8 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, #endif memset(&fl6, 0, sizeof(fl6)); - fl6.daddr = ipv6_hdr(skb)->saddr; - fl6.saddr = ipv6_hdr(skb)->daddr; + ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->saddr); + ipv6_addr_copy(&fl6.saddr, &ipv6_hdr(skb)->daddr); buff->ip_summed = CHECKSUM_PARTIAL; buff->csum = 0; @@ -1249,8 +1250,8 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) tcp_openreq_init(req, &tmp_opt, skb); treq = inet6_rsk(req); - treq->rmt_addr = ipv6_hdr(skb)->saddr; - treq->loc_addr = ipv6_hdr(skb)->daddr; + ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr); + ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr); if (!want_cookie || tmp_opt.tstamp_ok) TCP_ECN_create_request(req, tcp_hdr(skb)); @@ -1380,7 +1381,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr); - newnp->rcv_saddr = newnp->saddr; + ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr); inet_csk(newsk)->icsk_af_ops = &ipv6_mapped; newsk->sk_backlog_rcv = tcp_v4_do_rcv; @@ -1444,9 +1445,9 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, memcpy(newnp, np, sizeof(struct ipv6_pinfo)); - newnp->daddr = treq->rmt_addr; - newnp->saddr = treq->loc_addr; - newnp->rcv_saddr = treq->loc_addr; + ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr); + ipv6_addr_copy(&newnp->saddr, &treq->loc_addr); + ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr); newsk->sk_bound_dev_if = treq->iif; /* Now IPv6 options... @@ -1995,8 +1996,7 @@ static int tcp_v6_init_sock(struct sock *sk) sk->sk_rcvbuf = sysctl_tcp_rmem[1]; local_bh_disable(); - sock_update_memcg(sk); - sk_sockets_allocated_inc(sk); + percpu_counter_inc(&tcp_sockets_allocated); local_bh_enable(); return 0; @@ -2215,6 +2215,7 @@ struct proto tcpv6_prot = { .memory_allocated = &tcp_memory_allocated, .memory_pressure = &tcp_memory_pressure, .orphan_count = &tcp_orphan_count, + .sysctl_mem = sysctl_tcp_mem, .sysctl_wmem = sysctl_tcp_wmem, .sysctl_rmem = sysctl_tcp_rmem, .max_header = MAX_TCP_HEADER, @@ -2228,9 +2229,6 @@ struct proto tcpv6_prot = { .compat_setsockopt = compat_tcp_setsockopt, .compat_getsockopt = compat_tcp_getsockopt, #endif -#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM - .proto_cgroup = tcp_proto_cgroup, -#endif }; static const struct inet6_protocol tcpv6_protocol = { diff --git a/trunk/net/ipv6/udp.c b/trunk/net/ipv6/udp.c index 4f96b5c63685..8c2541915183 100644 --- a/trunk/net/ipv6/udp.c +++ b/trunk/net/ipv6/udp.c @@ -238,7 +238,7 @@ static struct sock *udp6_lib_lookup2(struct net *net, return result; } -struct sock *__udp6_lib_lookup(struct net *net, +static struct sock *__udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, __be16 dport, int dif, struct udp_table *udptable) @@ -305,7 +305,6 @@ struct sock *__udp6_lib_lookup(struct net *net, rcu_read_unlock(); return result; } -EXPORT_SYMBOL_GPL(__udp6_lib_lookup); static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb, __be16 sport, __be16 dport, @@ -419,7 +418,8 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr, &sin6->sin6_addr); else { - sin6->sin6_addr = ipv6_hdr(skb)->saddr; + ipv6_addr_copy(&sin6->sin6_addr, + &ipv6_hdr(skb)->saddr); if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) sin6->sin6_scope_id = IP6CB(skb)->iif; } @@ -539,9 +539,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) goto drop; } - skb_dst_drop(skb); - rc = sock_queue_rcv_skb(sk, skb); - if (rc < 0) { + if ((rc = ip_queue_rcv_skb(sk, skb)) < 0) { /* Note that an ENOMEM error is charged twice */ if (rc == -ENOMEM) UDP6_INC_STATS_BH(sock_net(sk), @@ -1116,11 +1114,11 @@ int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk, fl6.flowi6_proto = sk->sk_protocol; if (!ipv6_addr_any(daddr)) - fl6.daddr = *daddr; + ipv6_addr_copy(&fl6.daddr, daddr); else fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */ if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr)) - fl6.saddr = np->saddr; + ipv6_addr_copy(&fl6.saddr, &np->saddr); fl6.fl6_sport = inet->inet_sport; final_p = fl6_update_dst(&fl6, opt, &final); @@ -1301,8 +1299,7 @@ static int udp6_ufo_send_check(struct sk_buff *skb) return 0; } -static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, - netdev_features_t features) +static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features) { struct sk_buff *segs = ERR_PTR(-EINVAL); unsigned int mss; diff --git a/trunk/net/ipv6/xfrm6_mode_beet.c b/trunk/net/ipv6/xfrm6_mode_beet.c index a81ce9450750..3437d7d4eed6 100644 --- a/trunk/net/ipv6/xfrm6_mode_beet.c +++ b/trunk/net/ipv6/xfrm6_mode_beet.c @@ -72,8 +72,8 @@ static int xfrm6_beet_output(struct xfrm_state *x, struct sk_buff *skb) top_iph->nexthdr = IPPROTO_BEETPH; } - top_iph->saddr = *(struct in6_addr *)&x->props.saddr; - top_iph->daddr = *(struct in6_addr *)&x->id.daddr; + ipv6_addr_copy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr); + ipv6_addr_copy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr); return 0; } @@ -99,8 +99,8 @@ static int xfrm6_beet_input(struct xfrm_state *x, struct sk_buff *skb) ip6h = ipv6_hdr(skb); ip6h->payload_len = htons(skb->len - size); - ip6h->daddr = *(struct in6_addr *)&x->sel.daddr.a6; - ip6h->saddr = *(struct in6_addr *)&x->sel.saddr.a6; + ipv6_addr_copy(&ip6h->daddr, (struct in6_addr *) &x->sel.daddr.a6); + ipv6_addr_copy(&ip6h->saddr, (struct in6_addr *) &x->sel.saddr.a6); err = 0; out: return err; diff --git a/trunk/net/ipv6/xfrm6_mode_tunnel.c b/trunk/net/ipv6/xfrm6_mode_tunnel.c index 261e6e6f487e..4d6edff0498f 100644 --- a/trunk/net/ipv6/xfrm6_mode_tunnel.c +++ b/trunk/net/ipv6/xfrm6_mode_tunnel.c @@ -55,8 +55,8 @@ static int xfrm6_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) dsfield &= ~INET_ECN_MASK; ipv6_change_dsfield(top_iph, 0, dsfield); top_iph->hop_limit = ip6_dst_hoplimit(dst->child); - top_iph->saddr = *(struct in6_addr *)&x->props.saddr; - top_iph->daddr = *(struct in6_addr *)&x->id.daddr; + ipv6_addr_copy(&top_iph->saddr, (const struct in6_addr *)&x->props.saddr); + ipv6_addr_copy(&top_iph->daddr, (const struct in6_addr *)&x->id.daddr); return 0; } diff --git a/trunk/net/ipv6/xfrm6_output.c b/trunk/net/ipv6/xfrm6_output.c index 4eeff89c1aaa..faae41737fca 100644 --- a/trunk/net/ipv6/xfrm6_output.c +++ b/trunk/net/ipv6/xfrm6_output.c @@ -49,7 +49,7 @@ static void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu) struct sock *sk = skb->sk; fl6.flowi6_oif = sk->sk_bound_dev_if; - fl6.daddr = ipv6_hdr(skb)->daddr; + ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->daddr); ipv6_local_rxpmtu(sk, &fl6, mtu); } @@ -60,7 +60,7 @@ static void xfrm6_local_error(struct sk_buff *skb, u32 mtu) struct sock *sk = skb->sk; fl6.fl6_dport = inet_sk(sk)->inet_dport; - fl6.daddr = ipv6_hdr(skb)->daddr; + ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->daddr); ipv6_local_error(sk, EMSGSIZE, &fl6, mtu); } diff --git a/trunk/net/ipv6/xfrm6_policy.c b/trunk/net/ipv6/xfrm6_policy.c index 8ea65e032733..d879f7efbd10 100644 --- a/trunk/net/ipv6/xfrm6_policy.c +++ b/trunk/net/ipv6/xfrm6_policy.c @@ -132,8 +132,8 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse) memset(fl6, 0, sizeof(struct flowi6)); fl6->flowi6_mark = skb->mark; - fl6->daddr = reverse ? hdr->saddr : hdr->daddr; - fl6->saddr = reverse ? hdr->daddr : hdr->saddr; + ipv6_addr_copy(&fl6->daddr, reverse ? &hdr->saddr : &hdr->daddr); + ipv6_addr_copy(&fl6->saddr, reverse ? &hdr->daddr : &hdr->saddr); while (nh + offset + 1 < skb->data || pskb_may_pull(skb, nh + offset + 1 - skb->data)) { diff --git a/trunk/net/ipv6/xfrm6_state.c b/trunk/net/ipv6/xfrm6_state.c index 3f2f7c4ab721..f2d72b8a3faa 100644 --- a/trunk/net/ipv6/xfrm6_state.c +++ b/trunk/net/ipv6/xfrm6_state.c @@ -27,8 +27,8 @@ __xfrm6_init_tempsel(struct xfrm_selector *sel, const struct flowi *fl) /* Initialize temporary selector matching only * to current session. */ - *(struct in6_addr *)&sel->daddr = fl6->daddr; - *(struct in6_addr *)&sel->saddr = fl6->saddr; + ipv6_addr_copy((struct in6_addr *)&sel->daddr, &fl6->daddr); + ipv6_addr_copy((struct in6_addr *)&sel->saddr, &fl6->saddr); sel->dport = xfrm_flowi_dport(fl, &fl6->uli); sel->dport_mask = htons(0xffff); sel->sport = xfrm_flowi_sport(fl, &fl6->uli); diff --git a/trunk/net/irda/af_irda.c b/trunk/net/irda/af_irda.c index bb14c3477680..c24f25ab67d3 100644 --- a/trunk/net/irda/af_irda.c +++ b/trunk/net/irda/af_irda.c @@ -2558,8 +2558,8 @@ static int irda_getsockopt(struct socket *sock, int level, int optname, self->errno = 0; setup_timer(&self->watchdog, irda_discovery_timeout, (unsigned long)self); - mod_timer(&self->watchdog, - jiffies + msecs_to_jiffies(val)); + self->watchdog.expires = jiffies + (val * HZ/1000); + add_timer(&(self->watchdog)); /* Wait for IR-LMP to call us back */ __wait_event_interruptible(self->query_wait, diff --git a/trunk/net/irda/irlan/irlan_common.c b/trunk/net/irda/irlan/irlan_common.c index 579617cca125..779117636270 100644 --- a/trunk/net/irda/irlan/irlan_common.c +++ b/trunk/net/irda/irlan/irlan_common.c @@ -67,7 +67,7 @@ static void *ckey; static void *skey; /* Module parameters */ -static bool eth; /* Use "eth" or "irlan" name for devices */ +static int eth; /* Use "eth" or "irlan" name for devices */ static int access = ACCESS_PEER; /* PEER, DIRECT or HOSTED */ #ifdef CONFIG_PROC_FS diff --git a/trunk/net/irda/irttp.c b/trunk/net/irda/irttp.c index 5c93f2952b08..32e3bb026110 100644 --- a/trunk/net/irda/irttp.c +++ b/trunk/net/irda/irttp.c @@ -1461,12 +1461,14 @@ struct tsap_cb *irttp_dup(struct tsap_cb *orig, void *instance) } /* Allocate a new instance */ - new = kmemdup(orig, sizeof(struct tsap_cb), GFP_ATOMIC); + new = kmalloc(sizeof(struct tsap_cb), GFP_ATOMIC); if (!new) { IRDA_DEBUG(0, "%s(), unable to kmalloc\n", __func__); spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags); return NULL; } + /* Dup */ + memcpy(new, orig, sizeof(struct tsap_cb)); spin_lock_init(&new->lock); /* We don't need the old instance any more */ diff --git a/trunk/net/iucv/af_iucv.c b/trunk/net/iucv/af_iucv.c index d5c5b8fd1d01..274d150320c0 100644 --- a/trunk/net/iucv/af_iucv.c +++ b/trunk/net/iucv/af_iucv.c @@ -130,17 +130,6 @@ static inline void low_nmcpy(unsigned char *dst, char *src) memcpy(&dst[8], src, 8); } -static void iucv_skb_queue_purge(struct sk_buff_head *list) -{ - struct sk_buff *skb; - - while ((skb = skb_dequeue(list)) != NULL) { - if (skb->dev) - dev_put(skb->dev); - kfree_skb(skb); - } -} - static int afiucv_pm_prepare(struct device *dev) { #ifdef CONFIG_PM_DEBUG @@ -175,9 +164,10 @@ static int afiucv_pm_freeze(struct device *dev) read_lock(&iucv_sk_list.lock); sk_for_each(sk, node, &iucv_sk_list.head) { iucv = iucv_sk(sk); - iucv_skb_queue_purge(&iucv->send_skb_q); + skb_queue_purge(&iucv->send_skb_q); skb_queue_purge(&iucv->backlog_skb_q); switch (sk->sk_state) { + case IUCV_SEVERED: case IUCV_DISCONN: case IUCV_CLOSING: case IUCV_CONNECTED: @@ -222,6 +212,7 @@ static int afiucv_pm_restore_thaw(struct device *dev) sk->sk_state_change(sk); break; case IUCV_DISCONN: + case IUCV_SEVERED: case IUCV_CLOSING: case IUCV_LISTEN: case IUCV_BOUND: @@ -375,7 +366,9 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, if (imsg) memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message)); - skb->dev = dev_get_by_index(net, sock->sk_bound_dev_if); + rcu_read_lock(); + skb->dev = dev_get_by_index_rcu(net, sock->sk_bound_dev_if); + rcu_read_unlock(); if (!skb->dev) return -ENODEV; if (!(skb->dev->flags & IFF_UP)) @@ -395,7 +388,6 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, err = dev_queue_xmit(skb); if (err) { skb_unlink(nskb, &iucv->send_skb_q); - dev_put(nskb->dev); kfree_skb(nskb); } else { atomic_sub(confirm_recv, &iucv->msg_recv); @@ -404,6 +396,25 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, return err; } +/* Timers */ +static void iucv_sock_timeout(unsigned long arg) +{ + struct sock *sk = (struct sock *)arg; + + bh_lock_sock(sk); + sk->sk_err = ETIMEDOUT; + sk->sk_state_change(sk); + bh_unlock_sock(sk); + + iucv_sock_kill(sk); + sock_put(sk); +} + +static void iucv_sock_clear_timer(struct sock *sk) +{ + sk_stop_timer(sk, &sk->sk_timer); +} + static struct sock *__iucv_get_sock_by_name(char *nm) { struct sock *sk; @@ -456,6 +467,7 @@ static void iucv_sock_close(struct sock *sk) int err, blen; struct sk_buff *skb; + iucv_sock_clear_timer(sk); lock_sock(sk); switch (sk->sk_state) { @@ -469,14 +481,16 @@ static void iucv_sock_close(struct sock *sk) blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN; skb = sock_alloc_send_skb(sk, blen, 1, &err); if (skb) { - skb_reserve(skb, blen); + skb_reserve(skb, + sizeof(struct af_iucv_trans_hdr) + + ETH_HLEN); err = afiucv_hs_send(NULL, sk, skb, AF_IUCV_FLAG_FIN); } sk->sk_state = IUCV_DISCONN; sk->sk_state_change(sk); } - case IUCV_DISCONN: /* fall through */ + case IUCV_DISCONN: sk->sk_state = IUCV_CLOSING; sk->sk_state_change(sk); @@ -506,7 +520,7 @@ static void iucv_sock_close(struct sock *sk) sk->sk_err = ECONNRESET; sk->sk_state_change(sk); - iucv_skb_queue_purge(&iucv->send_skb_q); + skb_queue_purge(&iucv->send_skb_q); skb_queue_purge(&iucv->backlog_skb_q); break; @@ -567,6 +581,8 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio) sk->sk_protocol = proto; sk->sk_state = IUCV_OPEN; + setup_timer(&sk->sk_timer, iucv_sock_timeout, (unsigned long)sk); + iucv_sock_link(&iucv_sk_list, sk); return sk; } @@ -659,12 +675,16 @@ struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock) } if (sk->sk_state == IUCV_CONNECTED || - sk->sk_state == IUCV_DISCONN || + sk->sk_state == IUCV_SEVERED || + sk->sk_state == IUCV_DISCONN || /* due to PM restore */ !newsock) { iucv_accept_unlink(sk); if (newsock) sock_graft(sk, newsock); + if (sk->sk_state == IUCV_SEVERED) + sk->sk_state = IUCV_DISCONN; + release_sock(sk); return sk; } @@ -719,7 +739,7 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr, if (!memcmp(dev->perm_addr, uid, 8)) { memcpy(iucv->src_name, sa->siucv_name, 8); memcpy(iucv->src_user_id, sa->siucv_user_id, 8); - sk->sk_bound_dev_if = dev->ifindex; + sock->sk->sk_bound_dev_if = dev->ifindex; sk->sk_state = IUCV_BOUND; iucv->transport = AF_IUCV_TRANS_HIPER; if (!iucv->msglimit) @@ -754,13 +774,16 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr, static int iucv_sock_autobind(struct sock *sk) { struct iucv_sock *iucv = iucv_sk(sk); + char query_buffer[80]; char name[12]; int err = 0; - if (unlikely(!pr_iucv)) + /* Set the userid and name */ + cpcmd("QUERY USERID", query_buffer, sizeof(query_buffer), &err); + if (unlikely(err)) return -EPROTO; - memcpy(iucv->src_user_id, iucv_userid, 8); + memcpy(iucv->src_user_id, query_buffer, 8); write_lock_bh(&iucv_sk_list.lock); @@ -1202,8 +1225,6 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, return len; fail: - if (skb->dev) - dev_put(skb->dev); kfree_skb(skb); out: release_sock(sk); @@ -1336,7 +1357,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, int blen; int err = 0; - if ((sk->sk_state == IUCV_DISCONN) && + if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) && list_empty(&iucv->message_q.list)) @@ -1420,7 +1441,9 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, ETH_HLEN; sskb = sock_alloc_send_skb(sk, blen, 1, &err); if (sskb) { - skb_reserve(sskb, blen); + skb_reserve(sskb, + sizeof(struct af_iucv_trans_hdr) + + ETH_HLEN); err = afiucv_hs_send(NULL, sk, sskb, AF_IUCV_FLAG_WIN); } @@ -1483,7 +1506,7 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock, if (sk->sk_state == IUCV_CLOSED) mask |= POLLHUP; - if (sk->sk_state == IUCV_DISCONN) + if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) mask |= POLLIN; if (sock_writeable(sk)) @@ -1510,6 +1533,7 @@ static int iucv_sock_shutdown(struct socket *sock, int how) switch (sk->sk_state) { case IUCV_DISCONN: case IUCV_CLOSING: + case IUCV_SEVERED: case IUCV_CLOSED: err = -ENOTCONN; goto fail; @@ -1864,7 +1888,10 @@ static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16]) { struct sock *sk = path->private; - sk->sk_state = IUCV_DISCONN; + if (!list_empty(&iucv_sk(sk)->accept_q)) + sk->sk_state = IUCV_SEVERED; + else + sk->sk_state = IUCV_DISCONN; sk->sk_state_change(sk); } @@ -2024,7 +2051,10 @@ static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb) /* other end of connection closed */ if (iucv) { bh_lock_sock(sk); - sk->sk_state = IUCV_DISCONN; + if (!list_empty(&iucv->accept_q)) + sk->sk_state = IUCV_SEVERED; + else + sk->sk_state = IUCV_DISCONN; sk->sk_state_change(sk); bh_unlock_sock(sk); } @@ -2179,8 +2209,6 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, break; case 0: /* plain data frame */ - memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class, - CB_TRGCLS_LEN); err = afiucv_hs_callback_rx(sk, skb); break; default: @@ -2231,7 +2259,6 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb, case TX_NOTIFY_OK: __skb_unlink(this, list); iucv_sock_wake_msglim(sk); - dev_put(this->dev); kfree_skb(this); break; case TX_NOTIFY_PENDING: @@ -2242,7 +2269,6 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb, atomic_dec(&iucv->pendings); if (atomic_read(&iucv->pendings) <= 0) iucv_sock_wake_msglim(sk); - dev_put(this->dev); kfree_skb(this); break; case TX_NOTIFY_UNREACHABLE: @@ -2251,9 +2277,11 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb, case TX_NOTIFY_GENERALERROR: case TX_NOTIFY_DELAYED_GENERALERROR: __skb_unlink(this, list); - dev_put(this->dev); kfree_skb(this); - sk->sk_state = IUCV_DISCONN; + if (!list_empty(&iucv->accept_q)) + sk->sk_state = IUCV_SEVERED; + else + sk->sk_state = IUCV_DISCONN; sk->sk_state_change(sk); break; } @@ -2263,13 +2291,6 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb, } spin_unlock_irqrestore(&list->lock, flags); - if (sk->sk_state == IUCV_CLOSING) { - if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { - sk->sk_state = IUCV_CLOSED; - sk->sk_state_change(sk); - } - } - out_unlock: bh_unlock_sock(sk); } diff --git a/trunk/net/key/af_key.c b/trunk/net/key/af_key.c index 11dbb2255ccb..1e733e9073d0 100644 --- a/trunk/net/key/af_key.c +++ b/trunk/net/key/af_key.c @@ -375,7 +375,7 @@ static int verify_address_len(const void *p) const struct sadb_address *sp = p; const struct sockaddr *addr = (const struct sockaddr *)(sp + 1); const struct sockaddr_in *sin; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) const struct sockaddr_in6 *sin6; #endif int len; @@ -387,7 +387,7 @@ static int verify_address_len(const void *p) sp->sadb_address_prefixlen > 32) return -EINVAL; break; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case AF_INET6: len = DIV_ROUND_UP(sizeof(*sp) + sizeof(*sin6), sizeof(uint64_t)); if (sp->sadb_address_len != len || @@ -469,7 +469,7 @@ static int present_and_same_family(const struct sadb_address *src, if (s_addr->sa_family != d_addr->sa_family) return 0; if (s_addr->sa_family != AF_INET -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) && s_addr->sa_family != AF_INET6 #endif ) @@ -579,7 +579,7 @@ static inline int pfkey_sockaddr_len(sa_family_t family) switch (family) { case AF_INET: return sizeof(struct sockaddr_in); -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case AF_INET6: return sizeof(struct sockaddr_in6); #endif @@ -595,7 +595,7 @@ int pfkey_sockaddr_extract(const struct sockaddr *sa, xfrm_address_t *xaddr) xaddr->a4 = ((struct sockaddr_in *)sa)->sin_addr.s_addr; return AF_INET; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case AF_INET6: memcpy(xaddr->a6, &((struct sockaddr_in6 *)sa)->sin6_addr, @@ -639,7 +639,7 @@ static struct xfrm_state *pfkey_xfrm_state_lookup(struct net *net, const struct case AF_INET: xaddr = (xfrm_address_t *)&((const struct sockaddr_in *)(addr + 1))->sin_addr; break; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case AF_INET6: xaddr = (xfrm_address_t *)&((const struct sockaddr_in6 *)(addr + 1))->sin6_addr; break; @@ -705,14 +705,14 @@ static unsigned int pfkey_sockaddr_fill(const xfrm_address_t *xaddr, __be16 port memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); return 32; } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case AF_INET6: { struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sa; sin6->sin6_family = AF_INET6; sin6->sin6_port = port; sin6->sin6_flowinfo = 0; - sin6->sin6_addr = *(struct in6_addr *)xaddr->a6; + ipv6_addr_copy(&sin6->sin6_addr, (const struct in6_addr *)xaddr->a6); sin6->sin6_scope_id = 0; return 128; } @@ -1311,7 +1311,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, const struct sadb_ xdaddr = (xfrm_address_t *)&((struct sockaddr_in *)(daddr + 1))->sin_addr.s_addr; xsaddr = (xfrm_address_t *)&((struct sockaddr_in *)(saddr + 1))->sin_addr.s_addr; break; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case AF_INET6: xdaddr = (xfrm_address_t *)&((struct sockaddr_in6 *)(daddr + 1))->sin6_addr; xsaddr = (xfrm_address_t *)&((struct sockaddr_in6 *)(saddr + 1))->sin6_addr; @@ -3146,7 +3146,7 @@ static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt, return NULL; } break; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case AF_INET6: if (opt != IPV6_IPSEC_POLICY) { *dir = -EOPNOTSUPP; diff --git a/trunk/net/mac80211/Kconfig b/trunk/net/mac80211/Kconfig index 96ddb72760b9..7d3b438755f0 100644 --- a/trunk/net/mac80211/Kconfig +++ b/trunk/net/mac80211/Kconfig @@ -247,3 +247,15 @@ config MAC80211_DEBUG_COUNTERS and show them in debugfs. If unsure, say N. + +config MAC80211_DRIVER_API_TRACER + bool "Driver API tracer" + depends on MAC80211_DEBUG_MENU + depends on EVENT_TRACING + help + Say Y here to make mac80211 register with the ftrace + framework for the driver API -- you can then see which + driver methods it is calling and which API functions + drivers are calling by looking at the trace. + + If unsure, say Y. diff --git a/trunk/net/mac80211/Makefile b/trunk/net/mac80211/Makefile index d540c3b160f3..fdb54e61d637 100644 --- a/trunk/net/mac80211/Makefile +++ b/trunk/net/mac80211/Makefile @@ -24,8 +24,7 @@ mac80211-y := \ util.o \ wme.o \ event.o \ - chan.o \ - driver-trace.o + chan.o mac80211-$(CONFIG_MAC80211_LEDS) += led.o mac80211-$(CONFIG_MAC80211_DEBUGFS) += \ @@ -42,6 +41,7 @@ mac80211-$(CONFIG_MAC80211_MESH) += \ mac80211-$(CONFIG_PM) += pm.o +mac80211-$(CONFIG_MAC80211_DRIVER_API_TRACER) += driver-trace.o CFLAGS_driver-trace.o := -I$(src) # objects for PID algorithm diff --git a/trunk/net/mac80211/agg-rx.c b/trunk/net/mac80211/agg-rx.c index 96debba2c407..93b243422659 100644 --- a/trunk/net/mac80211/agg-rx.c +++ b/trunk/net/mac80211/agg-rx.c @@ -73,11 +73,8 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, RCU_INIT_POINTER(sta->ampdu_mlme.tid_rx[tid], NULL); #ifdef CONFIG_MAC80211_HT_DEBUG - printk(KERN_DEBUG - "Rx BA session stop requested for %pM tid %u %s reason: %d\n", - sta->sta.addr, tid, - initiator == WLAN_BACK_RECIPIENT ? "recipient" : "inititator", - (int)reason); + printk(KERN_DEBUG "Rx BA session stop requested for %pM tid %u\n", + sta->sta.addr, tid); #endif /* CONFIG_MAC80211_HT_DEBUG */ if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP, @@ -88,7 +85,7 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, /* check if this is a self generated aggregation halt */ if (initiator == WLAN_BACK_RECIPIENT && tx) ieee80211_send_delba(sta->sdata, sta->sta.addr, - tid, WLAN_BACK_RECIPIENT, reason); + tid, 0, reason); del_timer_sync(&tid_rx->session_timer); del_timer_sync(&tid_rx->reorder_timer); @@ -112,7 +109,7 @@ void ieee80211_stop_rx_ba_session(struct ieee80211_vif *vif, u16 ba_rx_bitmap, int i; rcu_read_lock(); - sta = sta_info_get_bss(sdata, addr); + sta = sta_info_get(sdata, addr); if (!sta) { rcu_read_unlock(); return; @@ -180,13 +177,10 @@ static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *d memcpy(mgmt->da, da, ETH_ALEN); memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); if (sdata->vif.type == NL80211_IFTYPE_AP || - sdata->vif.type == NL80211_IFTYPE_AP_VLAN || - sdata->vif.type == NL80211_IFTYPE_MESH_POINT) + sdata->vif.type == NL80211_IFTYPE_AP_VLAN) memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); else if (sdata->vif.type == NL80211_IFTYPE_STATION) memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN); - else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) - memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); diff --git a/trunk/net/mac80211/agg-tx.c b/trunk/net/mac80211/agg-tx.c index 76be61744198..2e4b961648d4 100644 --- a/trunk/net/mac80211/agg-tx.c +++ b/trunk/net/mac80211/agg-tx.c @@ -55,8 +55,6 @@ * @ampdu_action function will be called with the action * %IEEE80211_AMPDU_TX_STOP. In this case, the call must not fail, * and the driver must later call ieee80211_stop_tx_ba_cb_irqsafe(). - * Note that the sta can get destroyed before the BA tear down is - * complete. */ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata, @@ -80,13 +78,10 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata, memcpy(mgmt->da, da, ETH_ALEN); memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); if (sdata->vif.type == NL80211_IFTYPE_AP || - sdata->vif.type == NL80211_IFTYPE_AP_VLAN || - sdata->vif.type == NL80211_IFTYPE_MESH_POINT) + sdata->vif.type == NL80211_IFTYPE_AP_VLAN) memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); else if (sdata->vif.type == NL80211_IFTYPE_STATION) memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN); - else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) - memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); @@ -107,7 +102,7 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata, mgmt->u.action.u.addba_req.start_seq_num = cpu_to_le16(start_seq_num << 4); - ieee80211_tx_skb_tid(sdata, skb, tid); + ieee80211_tx_skb(sdata, skb); } void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn) @@ -136,7 +131,7 @@ void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn) bar->start_seq_num = cpu_to_le16(ssn); IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; - ieee80211_tx_skb_tid(sdata, skb, tid); + ieee80211_tx_skb(sdata, skb); } EXPORT_SYMBOL(ieee80211_send_bar); @@ -190,7 +185,6 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, #endif /* CONFIG_MAC80211_HT_DEBUG */ del_timer_sync(&tid_tx->addba_resp_timer); - del_timer_sync(&tid_tx->session_timer); /* * After this packets are no longer handed right through @@ -392,7 +386,6 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) #endif spin_lock_bh(&sta->lock); - sta->ampdu_mlme.last_addba_req_time[tid] = jiffies; sta->ampdu_mlme.addba_req_num[tid]++; spin_unlock_bh(&sta->lock); @@ -403,28 +396,6 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) tid_tx->timeout); } -/* - * After accepting the AddBA Response we activated a timer, - * resetting it after each frame that we send. - */ -static void sta_tx_agg_session_timer_expired(unsigned long data) -{ - /* not an elegant detour, but there is no choice as the timer passes - * only one argument, and various sta_info are needed here, so init - * flow in sta_info_create gives the TID as data, while the timer_to_id - * array gives the sta through container_of */ - u8 *ptid = (u8 *)data; - u8 *timer_to_id = ptid - *ptid; - struct sta_info *sta = container_of(timer_to_id, struct sta_info, - timer_to_tid[0]); - -#ifdef CONFIG_MAC80211_HT_DEBUG - printk(KERN_DEBUG "tx session timer expired on tid %d\n", (u16)*ptid); -#endif - - ieee80211_stop_tx_ba_session(&sta->sta, *ptid); -} - int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, u16 timeout) { @@ -449,11 +420,15 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, pubsta->addr, tid); #endif /* CONFIG_MAC80211_HT_DEBUG */ + /* + * The aggregation code is not prepared to handle + * anything but STA/AP due to the BSSID handling. + * IBSS could work in the code but isn't supported + * by drivers or the standard. + */ if (sdata->vif.type != NL80211_IFTYPE_STATION && - sdata->vif.type != NL80211_IFTYPE_MESH_POINT && sdata->vif.type != NL80211_IFTYPE_AP_VLAN && - sdata->vif.type != NL80211_IFTYPE_AP && - sdata->vif.type != NL80211_IFTYPE_ADHOC) + sdata->vif.type != NL80211_IFTYPE_AP) return -EINVAL; if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) { @@ -464,27 +439,6 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, return -EINVAL; } - /* - * 802.11n-2009 11.5.1.1: If the initiating STA is an HT STA, is a - * member of an IBSS, and has no other existing Block Ack agreement - * with the recipient STA, then the initiating STA shall transmit a - * Probe Request frame to the recipient STA and shall not transmit an - * ADDBA Request frame unless it receives a Probe Response frame - * from the recipient within dot11ADDBAFailureTimeout. - * - * The probe request mechanism for ADDBA is currently not implemented, - * but we only build up Block Ack session with HT STAs. This information - * is set when we receive a bss info from a probe response or a beacon. - */ - if (sta->sdata->vif.type == NL80211_IFTYPE_ADHOC && - !sta->sta.ht_cap.ht_supported) { -#ifdef CONFIG_MAC80211_HT_DEBUG - printk(KERN_DEBUG "BA request denied - IBSS STA %pM" - "does not advertise HT support\n", pubsta->addr); -#endif /* CONFIG_MAC80211_HT_DEBUG */ - return -EINVAL; - } - spin_lock_bh(&sta->lock); /* we have tried too many times, receiver does not want A-MPDU */ @@ -493,24 +447,6 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, goto err_unlock_sta; } - /* - * if we have tried more than HT_AGG_BURST_RETRIES times we - * will spread our requests in time to avoid stalling connection - * for too long - */ - if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_BURST_RETRIES && - time_before(jiffies, sta->ampdu_mlme.last_addba_req_time[tid] + - HT_AGG_RETRIES_PERIOD)) { -#ifdef CONFIG_MAC80211_HT_DEBUG - printk(KERN_DEBUG "BA request denied - " - "waiting a grace period after %d failed requests " - "on tid %u\n", - sta->ampdu_mlme.addba_req_num[tid], tid); -#endif /* CONFIG_MAC80211_HT_DEBUG */ - ret = -EBUSY; - goto err_unlock_sta; - } - tid_tx = rcu_dereference_protected_tid_tx(sta, tid); /* check if the TID is not in aggregation flow already */ if (tid_tx || sta->ampdu_mlme.tid_start_tx[tid]) { @@ -534,16 +470,11 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, tid_tx->timeout = timeout; - /* response timer */ + /* Tx timer */ tid_tx->addba_resp_timer.function = sta_addba_resp_timer_expired; tid_tx->addba_resp_timer.data = (unsigned long)&sta->timer_to_tid[tid]; init_timer(&tid_tx->addba_resp_timer); - /* tx timer */ - tid_tx->session_timer.function = sta_tx_agg_session_timer_expired; - tid_tx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid]; - init_timer(&tid_tx->session_timer); - /* assign a dialog token */ sta->ampdu_mlme.dialog_token_allocator++; tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator; @@ -616,7 +547,7 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid) } mutex_lock(&local->sta_mtx); - sta = sta_info_get_bss(sdata, ra); + sta = sta_info_get(sdata, ra); if (!sta) { mutex_unlock(&local->sta_mtx); #ifdef CONFIG_MAC80211_HT_DEBUG @@ -745,7 +676,7 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid) mutex_lock(&local->sta_mtx); - sta = sta_info_get_bss(sdata, ra); + sta = sta_info_get(sdata, ra); if (!sta) { #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "Could not find station: %pM\n", ra); @@ -883,11 +814,6 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local, ieee80211_agg_tx_operational(local, sta, tid); sta->ampdu_mlme.addba_req_num[tid] = 0; - - if (tid_tx->timeout) - mod_timer(&tid_tx->session_timer, - TU_TO_EXP_TIME(tid_tx->timeout)); - } else { ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR, true); diff --git a/trunk/net/mac80211/cfg.c b/trunk/net/mac80211/cfg.c index 850bb96bd680..d06c65fa5526 100644 --- a/trunk/net/mac80211/cfg.c +++ b/trunk/net/mac80211/cfg.c @@ -102,16 +102,6 @@ static int ieee80211_change_iface(struct wiphy *wiphy, return 0; } -static int ieee80211_set_noack_map(struct wiphy *wiphy, - struct net_device *dev, - u16 noack_map) -{ - struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); - - sdata->noack_map = noack_map; - return 0; -} - static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev, u8 key_idx, bool pairwise, const u8 *mac_addr, struct key_params *params) @@ -355,8 +345,7 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) STATION_INFO_RX_DROP_MISC | STATION_INFO_BSS_PARAM | STATION_INFO_CONNECTED_TIME | - STATION_INFO_STA_FLAGS | - STATION_INFO_BEACON_LOSS_COUNT; + STATION_INFO_STA_FLAGS; do_posix_clock_monotonic_gettime(&uptime); sinfo->connected_time = uptime.tv_sec - sta->last_connected; @@ -369,7 +358,6 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) sinfo->tx_retries = sta->tx_retry_count; sinfo->tx_failed = sta->tx_retry_failed; sinfo->rx_dropped_misc = sta->rx_dropped; - sinfo->beacon_loss_count = sta->beacon_loss_count; if ((sta->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) || (sta->local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)) { @@ -423,8 +411,7 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) | BIT(NL80211_STA_FLAG_WME) | BIT(NL80211_STA_FLAG_MFP) | - BIT(NL80211_STA_FLAG_AUTHENTICATED) | - BIT(NL80211_STA_FLAG_TDLS_PEER); + BIT(NL80211_STA_FLAG_AUTHENTICATED); if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHORIZED); if (test_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE)) @@ -435,8 +422,6 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_MFP); if (test_sta_flag(sta, WLAN_STA_AUTH)) sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHENTICATED); - if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) - sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_TDLS_PEER); } @@ -503,31 +488,6 @@ static void ieee80211_config_ap_ssid(struct ieee80211_sub_if_data *sdata, (params->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE); } -static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata, - u8 *resp, size_t resp_len) -{ - struct sk_buff *new, *old; - - if (!resp || !resp_len) - return -EINVAL; - - old = rtnl_dereference(sdata->u.ap.probe_resp); - - new = dev_alloc_skb(resp_len); - if (!new) - return -ENOMEM; - - memcpy(skb_put(new, resp_len), resp, resp_len); - - rcu_assign_pointer(sdata->u.ap.probe_resp, new); - synchronize_rcu(); - - if (old) - dev_kfree_skb(old); - - return 0; -} - /* * This handles both adding a beacon and setting new beacon info */ @@ -538,7 +498,6 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata, int new_head_len, new_tail_len; int size; int err = -EINVAL; - u32 changed = 0; old = rtnl_dereference(sdata->u.ap.beacon); @@ -622,17 +581,11 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata, kfree(old); - err = ieee80211_set_probe_resp(sdata, params->probe_resp, - params->probe_resp_len); - if (!err) - changed |= BSS_CHANGED_AP_PROBE_RESP; - ieee80211_config_ap_ssid(sdata, params); - changed |= BSS_CHANGED_BEACON_ENABLED | - BSS_CHANGED_BEACON | - BSS_CHANGED_SSID; - ieee80211_bss_info_change_notify(sdata, changed); + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED | + BSS_CHANGED_BEACON | + BSS_CHANGED_SSID); return 0; } @@ -641,8 +594,6 @@ static int ieee80211_add_beacon(struct wiphy *wiphy, struct net_device *dev, { struct ieee80211_sub_if_data *sdata; struct beacon_data *old; - struct ieee80211_sub_if_data *vlan; - int ret; sdata = IEEE80211_DEV_TO_SUB_IF(dev); @@ -650,24 +601,7 @@ static int ieee80211_add_beacon(struct wiphy *wiphy, struct net_device *dev, if (old) return -EALREADY; - ret = ieee80211_config_beacon(sdata, params); - if (ret) - return ret; - - /* - * Apply control port protocol, this allows us to - * not encrypt dynamic WEP control frames. - */ - sdata->control_port_protocol = params->crypto.control_port_ethertype; - sdata->control_port_no_encrypt = params->crypto.control_port_no_encrypt; - list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) { - vlan->control_port_protocol = - params->crypto.control_port_ethertype; - vlan->control_port_no_encrypt = - params->crypto.control_port_no_encrypt; - } - - return 0; + return ieee80211_config_beacon(sdata, params); } static int ieee80211_set_beacon(struct wiphy *wiphy, struct net_device *dev, @@ -748,11 +682,10 @@ static void ieee80211_send_layer2_update(struct sta_info *sta) netif_rx_ni(skb); } -static int sta_apply_parameters(struct ieee80211_local *local, - struct sta_info *sta, - struct station_parameters *params) +static void sta_apply_parameters(struct ieee80211_local *local, + struct sta_info *sta, + struct station_parameters *params) { - int ret = 0; u32 rates; int i, j; struct ieee80211_supported_band *sband; @@ -764,59 +697,13 @@ static int sta_apply_parameters(struct ieee80211_local *local, mask = params->sta_flags_mask; set = params->sta_flags_set; - /* - * In mesh mode, we can clear AUTHENTICATED flag but must - * also make ASSOCIATED follow appropriately for the driver - * API. See also below, after AUTHORIZED changes. - */ - if (mask & BIT(NL80211_STA_FLAG_AUTHENTICATED)) { - /* cfg80211 should not allow this in non-mesh modes */ - if (WARN_ON(!ieee80211_vif_is_mesh(&sdata->vif))) - return -EINVAL; - - if (set & BIT(NL80211_STA_FLAG_AUTHENTICATED) && - !test_sta_flag(sta, WLAN_STA_AUTH)) { - ret = sta_info_move_state_checked(sta, - IEEE80211_STA_AUTH); - if (ret) - return ret; - ret = sta_info_move_state_checked(sta, - IEEE80211_STA_ASSOC); - if (ret) - return ret; - } - } - if (mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) { if (set & BIT(NL80211_STA_FLAG_AUTHORIZED)) - ret = sta_info_move_state_checked(sta, - IEEE80211_STA_AUTHORIZED); + set_sta_flag(sta, WLAN_STA_AUTHORIZED); else - ret = sta_info_move_state_checked(sta, - IEEE80211_STA_ASSOC); - if (ret) - return ret; - } - - if (mask & BIT(NL80211_STA_FLAG_AUTHENTICATED)) { - /* cfg80211 should not allow this in non-mesh modes */ - if (WARN_ON(!ieee80211_vif_is_mesh(&sdata->vif))) - return -EINVAL; - - if (!(set & BIT(NL80211_STA_FLAG_AUTHENTICATED)) && - test_sta_flag(sta, WLAN_STA_AUTH)) { - ret = sta_info_move_state_checked(sta, - IEEE80211_STA_AUTH); - if (ret) - return ret; - ret = sta_info_move_state_checked(sta, - IEEE80211_STA_NONE); - if (ret) - return ret; - } + clear_sta_flag(sta, WLAN_STA_AUTHORIZED); } - if (mask & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE)) { if (set & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE)) set_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE); @@ -841,6 +728,13 @@ static int sta_apply_parameters(struct ieee80211_local *local, clear_sta_flag(sta, WLAN_STA_MFP); } + if (mask & BIT(NL80211_STA_FLAG_AUTHENTICATED)) { + if (set & BIT(NL80211_STA_FLAG_AUTHENTICATED)) + set_sta_flag(sta, WLAN_STA_AUTH); + else + clear_sta_flag(sta, WLAN_STA_AUTH); + } + if (mask & BIT(NL80211_STA_FLAG_TDLS_PEER)) { if (set & BIT(NL80211_STA_FLAG_TDLS_PEER)) set_sta_flag(sta, WLAN_STA_TDLS_PEER); @@ -884,7 +778,7 @@ static int sta_apply_parameters(struct ieee80211_local *local, } if (params->ht_capa) - ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, + ieee80211_ht_cap_ie_to_sta_ht_cap(sband, params->ht_capa, &sta->sta.ht_cap); @@ -912,8 +806,6 @@ static int sta_apply_parameters(struct ieee80211_local *local, } #endif } - - return 0; } static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev, @@ -940,25 +832,22 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev, if (is_multicast_ether_addr(mac)) return -EINVAL; + /* Only TDLS-supporting stations can add TDLS peers */ + if ((params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) && + !((wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) && + sdata->vif.type == NL80211_IFTYPE_STATION)) + return -ENOTSUPP; + sta = sta_info_alloc(sdata, mac, GFP_KERNEL); if (!sta) return -ENOMEM; - sta_info_move_state(sta, IEEE80211_STA_AUTH); - sta_info_move_state(sta, IEEE80211_STA_ASSOC); + set_sta_flag(sta, WLAN_STA_AUTH); + set_sta_flag(sta, WLAN_STA_ASSOC); - err = sta_apply_parameters(local, sta, params); - if (err) { - sta_info_free(local, sta); - return err; - } + sta_apply_parameters(local, sta, params); - /* - * for TDLS, rate control should be initialized only when supported - * rates are known. - */ - if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER)) - rate_control_rate_init(sta); + rate_control_rate_init(sta); layer2_update = sdata->vif.type == NL80211_IFTYPE_AP_VLAN || sdata->vif.type == NL80211_IFTYPE_AP; @@ -1002,19 +891,19 @@ static int ieee80211_change_station(struct wiphy *wiphy, struct sta_info *sta; struct ieee80211_sub_if_data *vlansdata; - mutex_lock(&local->sta_mtx); + rcu_read_lock(); sta = sta_info_get_bss(sdata, mac); if (!sta) { - mutex_unlock(&local->sta_mtx); + rcu_read_unlock(); return -ENOENT; } - /* in station mode, supported rates are only valid with TDLS */ - if (sdata->vif.type == NL80211_IFTYPE_STATION && - params->supported_rates && - !test_sta_flag(sta, WLAN_STA_TDLS_PEER)) { - mutex_unlock(&local->sta_mtx); + /* The TDLS bit cannot be toggled after the STA was added */ + if ((params->sta_flags_mask & BIT(NL80211_STA_FLAG_TDLS_PEER)) && + !!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) != + !!test_sta_flag(sta, WLAN_STA_TDLS_PEER)) { + rcu_read_unlock(); return -EINVAL; } @@ -1023,13 +912,13 @@ static int ieee80211_change_station(struct wiphy *wiphy, if (vlansdata->vif.type != NL80211_IFTYPE_AP_VLAN && vlansdata->vif.type != NL80211_IFTYPE_AP) { - mutex_unlock(&local->sta_mtx); + rcu_read_unlock(); return -EINVAL; } if (params->vlan->ieee80211_ptr->use_4addr) { if (vlansdata->u.vlan.sta) { - mutex_unlock(&local->sta_mtx); + rcu_read_unlock(); return -EBUSY; } @@ -1042,10 +931,7 @@ static int ieee80211_change_station(struct wiphy *wiphy, sta_apply_parameters(local, sta, params); - if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) && params->supported_rates) - rate_control_rate_init(sta); - - mutex_unlock(&local->sta_mtx); + rcu_read_unlock(); if (sdata->vif.type == NL80211_IFTYPE_STATION && params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) @@ -1237,8 +1123,6 @@ static int copy_mesh_setup(struct ieee80211_if_mesh *ifmsh, { u8 *new_ie; const u8 *old_ie; - struct ieee80211_sub_if_data *sdata = container_of(ifmsh, - struct ieee80211_sub_if_data, u.mesh); /* allocate information elements */ new_ie = NULL; @@ -1265,10 +1149,6 @@ static int copy_mesh_setup(struct ieee80211_if_mesh *ifmsh, if (setup->is_secure) ifmsh->security |= IEEE80211_MESH_SEC_SECURED; - /* mcast rate setting in Mesh Node */ - memcpy(sdata->vif.bss_conf.mcast_rate, setup->mcast_rate, - sizeof(setup->mcast_rate)); - return 0; } @@ -1314,9 +1194,6 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy, if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, mask)) conf->dot11MeshHWMPpreqMinInterval = nconf->dot11MeshHWMPpreqMinInterval; - if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL, mask)) - conf->dot11MeshHWMPperrMinInterval = - nconf->dot11MeshHWMPperrMinInterval; if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, mask)) conf->dot11MeshHWMPnetDiameterTraversalTime = @@ -1517,7 +1394,7 @@ static int ieee80211_set_channel(struct wiphy *wiphy, (old_oper_type != local->_oper_channel_type)) ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); - if (sdata && sdata->vif.type != NL80211_IFTYPE_MONITOR && + if ((sdata && sdata->vif.type != NL80211_IFTYPE_MONITOR) && old_vif_oper_type != sdata->vif.bss_conf.channel_type) ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT); @@ -2040,7 +1917,7 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev, enum nl80211_channel_type channel_type, bool channel_type_valid, unsigned int wait, const u8 *buf, size_t len, bool no_cck, - bool dont_wait_for_ack, u64 *cookie) + u64 *cookie) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; @@ -2048,15 +1925,10 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev, struct sta_info *sta; struct ieee80211_work *wk; const struct ieee80211_mgmt *mgmt = (void *)buf; - u32 flags; + u32 flags = IEEE80211_TX_INTFL_NL80211_FRAME_TX | + IEEE80211_TX_CTL_REQ_TX_STATUS; bool is_offchan = false; - if (dont_wait_for_ack) - flags = IEEE80211_TX_CTL_NO_ACK; - else - flags = IEEE80211_TX_INTFL_NL80211_FRAME_TX | - IEEE80211_TX_CTL_REQ_TX_STATUS; - /* Check that we are on the requested channel for transmission */ if (chan != local->tmp_channel && chan != local->oper_channel) @@ -2616,82 +2488,6 @@ static int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, return 0; } -static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev, - const u8 *peer, u64 *cookie) -{ - struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); - struct ieee80211_local *local = sdata->local; - struct ieee80211_qos_hdr *nullfunc; - struct sk_buff *skb; - int size = sizeof(*nullfunc); - __le16 fc; - bool qos; - struct ieee80211_tx_info *info; - struct sta_info *sta; - - rcu_read_lock(); - sta = sta_info_get(sdata, peer); - if (sta) { - qos = test_sta_flag(sta, WLAN_STA_WME); - rcu_read_unlock(); - } else { - rcu_read_unlock(); - return -ENOLINK; - } - - if (qos) { - fc = cpu_to_le16(IEEE80211_FTYPE_DATA | - IEEE80211_STYPE_QOS_NULLFUNC | - IEEE80211_FCTL_FROMDS); - } else { - size -= 2; - fc = cpu_to_le16(IEEE80211_FTYPE_DATA | - IEEE80211_STYPE_NULLFUNC | - IEEE80211_FCTL_FROMDS); - } - - skb = dev_alloc_skb(local->hw.extra_tx_headroom + size); - if (!skb) - return -ENOMEM; - - skb->dev = dev; - - skb_reserve(skb, local->hw.extra_tx_headroom); - - nullfunc = (void *) skb_put(skb, size); - nullfunc->frame_control = fc; - nullfunc->duration_id = 0; - memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN); - memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN); - memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN); - nullfunc->seq_ctrl = 0; - - info = IEEE80211_SKB_CB(skb); - - info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS | - IEEE80211_TX_INTFL_NL80211_FRAME_TX; - - skb_set_queue_mapping(skb, IEEE80211_AC_VO); - skb->priority = 7; - if (qos) - nullfunc->qos_ctrl = cpu_to_le16(7); - - local_bh_disable(); - ieee80211_xmit(sdata, skb); - local_bh_enable(); - - *cookie = (unsigned long) skb; - return 0; -} - -static struct ieee80211_channel * -ieee80211_wiphy_get_channel(struct wiphy *wiphy) -{ - struct ieee80211_local *local = wiphy_priv(wiphy); - - return local->oper_channel; -} - struct cfg80211_ops mac80211_config_ops = { .add_virtual_intf = ieee80211_add_iface, .del_virtual_intf = ieee80211_del_iface, @@ -2757,7 +2553,4 @@ struct cfg80211_ops mac80211_config_ops = { .set_rekey_data = ieee80211_set_rekey_data, .tdls_oper = ieee80211_tdls_oper, .tdls_mgmt = ieee80211_tdls_mgmt, - .probe_client = ieee80211_probe_client, - .get_channel = ieee80211_wiphy_get_channel, - .set_noack_map = ieee80211_set_noack_map, }; diff --git a/trunk/net/mac80211/debugfs.c b/trunk/net/mac80211/debugfs.c index 90baea53e7c5..883996b2f99f 100644 --- a/trunk/net/mac80211/debugfs.c +++ b/trunk/net/mac80211/debugfs.c @@ -97,6 +97,40 @@ static const struct file_operations reset_ops = { .llseek = noop_llseek, }; +static ssize_t noack_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_local *local = file->private_data; + + return mac80211_format_buffer(user_buf, count, ppos, "%d\n", + local->wifi_wme_noack_test); +} + +static ssize_t noack_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_local *local = file->private_data; + char buf[10]; + size_t len; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + buf[len] = '\0'; + + local->wifi_wme_noack_test = !!simple_strtoul(buf, NULL, 0); + + return count; +} + +static const struct file_operations noack_ops = { + .read = noack_read, + .write = noack_write, + .open = mac80211_open_file_generic, + .llseek = default_llseek, +}; + static ssize_t uapsd_queues_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { @@ -156,7 +190,7 @@ static ssize_t uapsd_max_sp_len_write(struct file *file, return -EFAULT; buf[len] = '\0'; - ret = kstrtoul(buf, 0, &val); + ret = strict_strtoul(buf, 0, &val); if (ret) return -EINVAL; @@ -364,6 +398,7 @@ void debugfs_hw_add(struct ieee80211_local *local) DEBUGFS_ADD(wep_iv); DEBUGFS_ADD(queues); DEBUGFS_ADD_MODE(reset, 0200); + DEBUGFS_ADD(noack); DEBUGFS_ADD(uapsd_queues); DEBUGFS_ADD(uapsd_max_sp_len); DEBUGFS_ADD(channel_type); diff --git a/trunk/net/mac80211/debugfs_netdev.c b/trunk/net/mac80211/debugfs_netdev.c index 176c08ffb13c..9352819a986b 100644 --- a/trunk/net/mac80211/debugfs_netdev.c +++ b/trunk/net/mac80211/debugfs_netdev.c @@ -321,7 +321,6 @@ static ssize_t ieee80211_if_parse_tkip_mic_test( __IEEE80211_IF_FILE_W(tkip_mic_test); /* AP attributes */ -IEEE80211_IF_FILE(num_sta_authorized, u.ap.num_sta_authorized, ATOMIC); IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC); IEEE80211_IF_FILE(dtim_count, u.ap.dtim_count, DEC); @@ -406,8 +405,6 @@ IEEE80211_IF_FILE(dot11MeshHWMPactivePathTimeout, u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout, DEC); IEEE80211_IF_FILE(dot11MeshHWMPpreqMinInterval, u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval, DEC); -IEEE80211_IF_FILE(dot11MeshHWMPperrMinInterval, - u.mesh.mshcfg.dot11MeshHWMPperrMinInterval, DEC); IEEE80211_IF_FILE(dot11MeshHWMPnetDiameterTraversalTime, u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime, DEC); IEEE80211_IF_FILE(dot11MeshHWMPmaxPREQretries, @@ -459,7 +456,6 @@ static void add_ap_files(struct ieee80211_sub_if_data *sdata) DEBUGFS_ADD(rc_rateidx_mask_2ghz); DEBUGFS_ADD(rc_rateidx_mask_5ghz); - DEBUGFS_ADD(num_sta_authorized); DEBUGFS_ADD(num_sta_ps); DEBUGFS_ADD(dtim_count); DEBUGFS_ADD(num_buffered_multicast); @@ -538,7 +534,6 @@ static void add_mesh_config(struct ieee80211_sub_if_data *sdata) MESHPARAMS_ADD(dot11MeshMaxPeerLinks); MESHPARAMS_ADD(dot11MeshHWMPactivePathTimeout); MESHPARAMS_ADD(dot11MeshHWMPpreqMinInterval); - MESHPARAMS_ADD(dot11MeshHWMPperrMinInterval); MESHPARAMS_ADD(dot11MeshHWMPnetDiameterTraversalTime); MESHPARAMS_ADD(dot11MeshHWMPmaxPREQretries); MESHPARAMS_ADD(path_refresh_time); diff --git a/trunk/net/mac80211/debugfs_sta.c b/trunk/net/mac80211/debugfs_sta.c index 2406b3e7393f..3110cbdc501b 100644 --- a/trunk/net/mac80211/debugfs_sta.c +++ b/trunk/net/mac80211/debugfs_sta.c @@ -63,10 +63,10 @@ static ssize_t sta_flags_read(struct file *file, char __user *userbuf, test_sta_flag(sta, WLAN_STA_##flg) ? #flg "\n" : "" int res = scnprintf(buf, sizeof(buf), - "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", + "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", TEST(AUTH), TEST(ASSOC), TEST(PS_STA), TEST(PS_DRIVER), TEST(AUTHORIZED), - TEST(SHORT_PREAMBLE), + TEST(SHORT_PREAMBLE), TEST(ASSOC_AP), TEST(WME), TEST(WDS), TEST(CLEAR_PS_FILT), TEST(MFP), TEST(BLOCK_BA), TEST(PSPOLL), TEST(UAPSD), TEST(SP), TEST(TDLS_PEER), diff --git a/trunk/net/mac80211/driver-ops.h b/trunk/net/mac80211/driver-ops.h index e8960ae39861..5f165d7eb2db 100644 --- a/trunk/net/mac80211/driver-ops.h +++ b/trunk/net/mac80211/driver-ops.h @@ -5,34 +5,11 @@ #include "ieee80211_i.h" #include "driver-trace.h" -static inline void check_sdata_in_driver(struct ieee80211_sub_if_data *sdata) -{ - WARN_ON(!(sdata->flags & IEEE80211_SDATA_IN_DRIVER)); -} - -static inline struct ieee80211_sub_if_data * -get_bss_sdata(struct ieee80211_sub_if_data *sdata) -{ - if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) - sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, - u.ap); - - return sdata; -} - static inline void drv_tx(struct ieee80211_local *local, struct sk_buff *skb) { local->ops->tx(&local->hw, skb); } -static inline void drv_tx_frags(struct ieee80211_local *local, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct sk_buff_head *skbs) -{ - local->ops->tx_frags(&local->hw, vif, sta, skbs); -} - static inline int drv_start(struct ieee80211_local *local) { int ret; @@ -92,23 +69,15 @@ static inline int drv_resume(struct ieee80211_local *local) #endif static inline int drv_add_interface(struct ieee80211_local *local, - struct ieee80211_sub_if_data *sdata) + struct ieee80211_vif *vif) { int ret; might_sleep(); - if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN || - sdata->vif.type == NL80211_IFTYPE_MONITOR)) - return -EINVAL; - - trace_drv_add_interface(local, sdata); - ret = local->ops->add_interface(&local->hw, &sdata->vif); + trace_drv_add_interface(local, vif_to_sdata(vif)); + ret = local->ops->add_interface(&local->hw, vif); trace_drv_return_int(local, ret); - - if (ret == 0) - sdata->flags |= IEEE80211_SDATA_IN_DRIVER; - return ret; } @@ -120,8 +89,6 @@ static inline int drv_change_interface(struct ieee80211_local *local, might_sleep(); - check_sdata_in_driver(sdata); - trace_drv_change_interface(local, sdata, type, p2p); ret = local->ops->change_interface(&local->hw, &sdata->vif, type, p2p); trace_drv_return_int(local, ret); @@ -129,15 +96,12 @@ static inline int drv_change_interface(struct ieee80211_local *local, } static inline void drv_remove_interface(struct ieee80211_local *local, - struct ieee80211_sub_if_data *sdata) + struct ieee80211_vif *vif) { might_sleep(); - check_sdata_in_driver(sdata); - - trace_drv_remove_interface(local, sdata); - local->ops->remove_interface(&local->hw, &sdata->vif); - sdata->flags &= ~IEEE80211_SDATA_IN_DRIVER; + trace_drv_remove_interface(local, vif_to_sdata(vif)); + local->ops->remove_interface(&local->hw, vif); trace_drv_return_void(local); } @@ -160,8 +124,6 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local, { might_sleep(); - check_sdata_in_driver(sdata); - trace_drv_bss_info_changed(local, sdata, info, changed); if (local->ops->bss_info_changed) local->ops->bss_info_changed(&local->hw, &sdata->vif, info, changed); @@ -177,8 +139,6 @@ static inline int drv_tx_sync(struct ieee80211_local *local, might_sleep(); - check_sdata_in_driver(sdata); - trace_drv_tx_sync(local, sdata, bssid, type); if (local->ops->tx_sync) ret = local->ops->tx_sync(&local->hw, &sdata->vif, @@ -194,8 +154,6 @@ static inline void drv_finish_tx_sync(struct ieee80211_local *local, { might_sleep(); - check_sdata_in_driver(sdata); - trace_drv_finish_tx_sync(local, sdata, bssid, type); if (local->ops->finish_tx_sync) local->ops->finish_tx_sync(&local->hw, &sdata->vif, @@ -253,8 +211,6 @@ static inline int drv_set_key(struct ieee80211_local *local, might_sleep(); - check_sdata_in_driver(sdata); - trace_drv_set_key(local, cmd, sdata, sta, key); ret = local->ops->set_key(&local->hw, cmd, &sdata->vif, sta, key); trace_drv_return_int(local, ret); @@ -272,8 +228,6 @@ static inline void drv_update_tkip_key(struct ieee80211_local *local, if (sta) ista = &sta->sta; - check_sdata_in_driver(sdata); - trace_drv_update_tkip_key(local, sdata, conf, ista, iv32); if (local->ops->update_tkip_key) local->ops->update_tkip_key(&local->hw, &sdata->vif, conf, @@ -289,8 +243,6 @@ static inline int drv_hw_scan(struct ieee80211_local *local, might_sleep(); - check_sdata_in_driver(sdata); - trace_drv_hw_scan(local, sdata); ret = local->ops->hw_scan(&local->hw, &sdata->vif, req); trace_drv_return_int(local, ret); @@ -302,8 +254,6 @@ static inline void drv_cancel_hw_scan(struct ieee80211_local *local, { might_sleep(); - check_sdata_in_driver(sdata); - trace_drv_cancel_hw_scan(local, sdata); local->ops->cancel_hw_scan(&local->hw, &sdata->vif); trace_drv_return_void(local); @@ -319,8 +269,6 @@ drv_sched_scan_start(struct ieee80211_local *local, might_sleep(); - check_sdata_in_driver(sdata); - trace_drv_sched_scan_start(local, sdata); ret = local->ops->sched_scan_start(&local->hw, &sdata->vif, req, ies); @@ -333,8 +281,6 @@ static inline void drv_sched_scan_stop(struct ieee80211_local *local, { might_sleep(); - check_sdata_in_driver(sdata); - trace_drv_sched_scan_stop(local, sdata); local->ops->sched_scan_stop(&local->hw, &sdata->vif); trace_drv_return_void(local); @@ -431,9 +377,6 @@ static inline void drv_sta_notify(struct ieee80211_local *local, enum sta_notify_cmd cmd, struct ieee80211_sta *sta) { - sdata = get_bss_sdata(sdata); - check_sdata_in_driver(sdata); - trace_drv_sta_notify(local, sdata, cmd, sta); if (local->ops->sta_notify) local->ops->sta_notify(&local->hw, &sdata->vif, cmd, sta); @@ -448,9 +391,6 @@ static inline int drv_sta_add(struct ieee80211_local *local, might_sleep(); - sdata = get_bss_sdata(sdata); - check_sdata_in_driver(sdata); - trace_drv_sta_add(local, sdata, sta); if (local->ops->sta_add) ret = local->ops->sta_add(&local->hw, &sdata->vif, sta); @@ -466,9 +406,6 @@ static inline void drv_sta_remove(struct ieee80211_local *local, { might_sleep(); - sdata = get_bss_sdata(sdata); - check_sdata_in_driver(sdata); - trace_drv_sta_remove(local, sdata, sta); if (local->ops->sta_remove) local->ops->sta_remove(&local->hw, &sdata->vif, sta); @@ -484,8 +421,6 @@ static inline int drv_conf_tx(struct ieee80211_local *local, might_sleep(); - check_sdata_in_driver(sdata); - trace_drv_conf_tx(local, sdata, queue, params); if (local->ops->conf_tx) ret = local->ops->conf_tx(&local->hw, &sdata->vif, @@ -501,8 +436,6 @@ static inline u64 drv_get_tsf(struct ieee80211_local *local, might_sleep(); - check_sdata_in_driver(sdata); - trace_drv_get_tsf(local, sdata); if (local->ops->get_tsf) ret = local->ops->get_tsf(&local->hw, &sdata->vif); @@ -516,8 +449,6 @@ static inline void drv_set_tsf(struct ieee80211_local *local, { might_sleep(); - check_sdata_in_driver(sdata); - trace_drv_set_tsf(local, sdata, tsf); if (local->ops->set_tsf) local->ops->set_tsf(&local->hw, &sdata->vif, tsf); @@ -529,8 +460,6 @@ static inline void drv_reset_tsf(struct ieee80211_local *local, { might_sleep(); - check_sdata_in_driver(sdata); - trace_drv_reset_tsf(local, sdata); if (local->ops->reset_tsf) local->ops->reset_tsf(&local->hw, &sdata->vif); @@ -560,9 +489,6 @@ static inline int drv_ampdu_action(struct ieee80211_local *local, might_sleep(); - sdata = get_bss_sdata(sdata); - check_sdata_in_driver(sdata); - trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn, buf_size); if (local->ops->ampdu_action) @@ -718,8 +644,6 @@ static inline int drv_set_bitrate_mask(struct ieee80211_local *local, might_sleep(); - check_sdata_in_driver(sdata); - trace_drv_set_bitrate_mask(local, sdata, mask); if (local->ops->set_bitrate_mask) ret = local->ops->set_bitrate_mask(&local->hw, @@ -733,8 +657,6 @@ static inline void drv_set_rekey_data(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct cfg80211_gtk_rekey_data *data) { - check_sdata_in_driver(sdata); - trace_drv_set_rekey_data(local, sdata, data); if (local->ops->set_rekey_data) local->ops->set_rekey_data(&local->hw, &sdata->vif, data); diff --git a/trunk/net/mac80211/driver-trace.h b/trunk/net/mac80211/driver-trace.h index 6e9df8fd8fb8..2af4fca55337 100644 --- a/trunk/net/mac80211/driver-trace.h +++ b/trunk/net/mac80211/driver-trace.h @@ -5,6 +5,17 @@ #include #include "ieee80211_i.h" +#if !defined(CONFIG_MAC80211_DRIVER_API_TRACER) || defined(__CHECKER__) +#undef TRACE_EVENT +#define TRACE_EVENT(name, proto, ...) \ +static inline void trace_ ## name(proto) {} +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(...) +#undef DEFINE_EVENT +#define DEFINE_EVENT(evt_class, name, proto, ...) \ +static inline void trace_ ## name(proto) {} +#endif + #undef TRACE_SYSTEM #define TRACE_SYSTEM mac80211 diff --git a/trunk/net/mac80211/ht.c b/trunk/net/mac80211/ht.c index f25fff7607d8..f0fb737efa86 100644 --- a/trunk/net/mac80211/ht.c +++ b/trunk/net/mac80211/ht.c @@ -19,84 +19,7 @@ #include "ieee80211_i.h" #include "rate.h" -bool ieee80111_cfg_override_disables_ht40(struct ieee80211_sub_if_data *sdata) -{ - const __le16 flg = cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40); - if ((sdata->u.mgd.ht_capa_mask.cap_info & flg) && - !(sdata->u.mgd.ht_capa.cap_info & flg)) - return true; - return false; -} - -static void __check_htcap_disable(struct ieee80211_sub_if_data *sdata, - struct ieee80211_sta_ht_cap *ht_cap, - u16 flag) -{ - __le16 le_flag = cpu_to_le16(flag); - if (sdata->u.mgd.ht_capa_mask.cap_info & le_flag) { - if (!(sdata->u.mgd.ht_capa.cap_info & le_flag)) - ht_cap->cap &= ~flag; - } -} - -void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata, - struct ieee80211_sta_ht_cap *ht_cap) -{ - u8 *scaps = (u8 *)(&sdata->u.mgd.ht_capa.mcs.rx_mask); - u8 *smask = (u8 *)(&sdata->u.mgd.ht_capa_mask.mcs.rx_mask); - int i; - - if (sdata->vif.type != NL80211_IFTYPE_STATION) { - /* AP interfaces call this code when adding new stations, - * so just silently ignore non station interfaces. - */ - return; - } - - /* NOTE: If you add more over-rides here, update register_hw - * ht_capa_mod_msk logic in main.c as well. - * And, if this method can ever change ht_cap.ht_supported, fix - * the check in ieee80211_add_ht_ie. - */ - - /* check for HT over-rides, MCS rates first. */ - for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) { - u8 m = smask[i]; - ht_cap->mcs.rx_mask[i] &= ~m; /* turn off all masked bits */ - /* Add back rates that are supported */ - ht_cap->mcs.rx_mask[i] |= (m & scaps[i]); - } - - /* Force removal of HT-40 capabilities? */ - __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_SUP_WIDTH_20_40); - __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_SGI_40); - - /* Allow user to disable the max-AMSDU bit. */ - __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_MAX_AMSDU); - - /* Allow user to decrease AMPDU factor */ - if (sdata->u.mgd.ht_capa_mask.ampdu_params_info & - IEEE80211_HT_AMPDU_PARM_FACTOR) { - u8 n = sdata->u.mgd.ht_capa.ampdu_params_info - & IEEE80211_HT_AMPDU_PARM_FACTOR; - if (n < ht_cap->ampdu_factor) - ht_cap->ampdu_factor = n; - } - - /* Allow the user to increase AMPDU density. */ - if (sdata->u.mgd.ht_capa_mask.ampdu_params_info & - IEEE80211_HT_AMPDU_PARM_DENSITY) { - u8 n = (sdata->u.mgd.ht_capa.ampdu_params_info & - IEEE80211_HT_AMPDU_PARM_DENSITY) - >> IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT; - if (n > ht_cap->ampdu_density) - ht_cap->ampdu_density = n; - } -} - - -void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata, - struct ieee80211_supported_band *sband, +void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband, struct ieee80211_ht_cap *ht_cap_ie, struct ieee80211_sta_ht_cap *ht_cap) { @@ -180,12 +103,6 @@ void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata, /* handle MCS rate 32 too */ if (sband->ht_cap.mcs.rx_mask[32/8] & ht_cap_ie->mcs.rx_mask[32/8] & 1) ht_cap->mcs.rx_mask[32/8] |= 1; - - /* - * If user has specified capability over-rides, take care - * of that here. - */ - ieee80211_apply_htcap_overrides(sdata, ht_cap); } void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta, bool tx) @@ -279,13 +196,10 @@ void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, memcpy(mgmt->da, da, ETH_ALEN); memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); if (sdata->vif.type == NL80211_IFTYPE_AP || - sdata->vif.type == NL80211_IFTYPE_AP_VLAN || - sdata->vif.type == NL80211_IFTYPE_MESH_POINT) + sdata->vif.type == NL80211_IFTYPE_AP_VLAN) memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); else if (sdata->vif.type == NL80211_IFTYPE_STATION) memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN); - else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) - memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); @@ -300,7 +214,7 @@ void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, mgmt->u.action.u.delba.params = cpu_to_le16(params); mgmt->u.action.u.delba.reason_code = cpu_to_le16(reason_code); - ieee80211_tx_skb_tid(sdata, skb, tid); + ieee80211_tx_skb(sdata, skb); } void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata, diff --git a/trunk/net/mac80211/ibss.c b/trunk/net/mac80211/ibss.c index f8a32bf98216..ede9a8b341ac 100644 --- a/trunk/net/mac80211/ibss.c +++ b/trunk/net/mac80211/ibss.c @@ -77,7 +77,6 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, struct cfg80211_bss *bss; u32 bss_change; u8 supp_rates[IEEE80211_MAX_SUPP_RATES]; - enum nl80211_channel_type channel_type; lockdep_assert_held(&ifibss->mtx); @@ -98,7 +97,6 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, /* if merging, indicate to driver that we leave the old IBSS */ if (sdata->vif.bss_conf.ibss_joined) { sdata->vif.bss_conf.ibss_joined = false; - netif_carrier_off(sdata->dev); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IBSS); } @@ -106,16 +104,8 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0; - channel_type = ifibss->channel_type; - if (channel_type > NL80211_CHAN_HT20 && - !cfg80211_can_beacon_sec_chan(local->hw.wiphy, chan, channel_type)) - channel_type = NL80211_CHAN_HT20; - if (!ieee80211_set_channel_type(local, sdata, channel_type)) { - /* can only fail due to HT40+/- mismatch */ - channel_type = NL80211_CHAN_HT20; - WARN_ON(!ieee80211_set_channel_type(local, sdata, - NL80211_CHAN_HT20)); - } + local->oper_channel = chan; + WARN_ON(!ieee80211_set_channel_type(local, sdata, NL80211_CHAN_NO_HT)); ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); sband = local->hw.wiphy->bands[chan->band]; @@ -181,19 +171,6 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, memcpy(skb_put(skb, ifibss->ie_len), ifibss->ie, ifibss->ie_len); - /* add HT capability and information IEs */ - if (channel_type && sband->ht_cap.ht_supported) { - pos = skb_put(skb, 4 + - sizeof(struct ieee80211_ht_cap) + - sizeof(struct ieee80211_ht_info)); - pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap, - sband->ht_cap.cap); - pos = ieee80211_ie_build_ht_info(pos, - &sband->ht_cap, - chan, - channel_type); - } - if (local->hw.queues >= 4) { pos = skb_put(skb, 9); *pos++ = WLAN_EID_VENDOR_SPECIFIC; @@ -217,7 +194,6 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, bss_change |= BSS_CHANGED_BEACON; bss_change |= BSS_CHANGED_BEACON_ENABLED; bss_change |= BSS_CHANGED_BASIC_RATES; - bss_change |= BSS_CHANGED_HT; bss_change |= BSS_CHANGED_IBSS; sdata->vif.bss_conf.ibss_joined = true; ieee80211_bss_info_change_notify(sdata, bss_change); @@ -231,7 +207,6 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, bss = cfg80211_inform_bss_frame(local->hw.wiphy, local->hw.conf.channel, mgmt, skb->len, 0, GFP_KERNEL); cfg80211_put_bss(bss); - netif_carrier_on(sdata->dev); cfg80211_ibss_joined(sdata->dev, ifibss->bssid, GFP_KERNEL); } @@ -275,80 +250,6 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, cbss->tsf); } -static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta) - __acquires(RCU) -{ - struct ieee80211_sub_if_data *sdata = sta->sdata; - u8 addr[ETH_ALEN]; - - memcpy(addr, sta->sta.addr, ETH_ALEN); - -#ifdef CONFIG_MAC80211_VERBOSE_DEBUG - wiphy_debug(sdata->local->hw.wiphy, - "Adding new IBSS station %pM (dev=%s)\n", - addr, sdata->name); -#endif - - sta_info_move_state(sta, IEEE80211_STA_AUTH); - sta_info_move_state(sta, IEEE80211_STA_ASSOC); - sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED); - - rate_control_rate_init(sta); - - /* If it fails, maybe we raced another insertion? */ - if (sta_info_insert_rcu(sta)) - return sta_info_get(sdata, addr); - return sta; -} - -static struct sta_info * -ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, - const u8 *bssid, const u8 *addr, - u32 supp_rates) - __acquires(RCU) -{ - struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; - struct ieee80211_local *local = sdata->local; - struct sta_info *sta; - int band = local->hw.conf.channel->band; - - /* - * XXX: Consider removing the least recently used entry and - * allow new one to be added. - */ - if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) { - if (net_ratelimit()) - printk(KERN_DEBUG "%s: No room for a new IBSS STA entry %pM\n", - sdata->name, addr); - rcu_read_lock(); - return NULL; - } - - if (ifibss->state == IEEE80211_IBSS_MLME_SEARCH) { - rcu_read_lock(); - return NULL; - } - - if (compare_ether_addr(bssid, sdata->u.ibss.bssid)) { - rcu_read_lock(); - return NULL; - } - - sta = sta_info_alloc(sdata, addr, GFP_KERNEL); - if (!sta) { - rcu_read_lock(); - return NULL; - } - - sta->last_rx = jiffies; - - /* make sure mandatory rates are always added */ - sta->sta.supp_rates[band] = supp_rates | - ieee80211_mandatory_rates(local, band); - - return ieee80211_ibss_finish_sta(sta); -} - static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len, @@ -365,8 +266,6 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, u64 beacon_timestamp, rx_timestamp; u32 supp_rates = 0; enum ieee80211_band band = rx_status->band; - struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band]; - bool rates_updated = false; if (elems->ds_params && elems->ds_params_len == 1) freq = ieee80211_channel_to_frequency(elems->ds_params[0], @@ -406,51 +305,17 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, prev_rates, sta->sta.supp_rates[band]); #endif - rates_updated = true; + rate_control_rate_init(sta); } - } else { - rcu_read_unlock(); + } else sta = ieee80211_ibss_add_sta(sdata, mgmt->bssid, - mgmt->sa, supp_rates); - } + mgmt->sa, supp_rates, + GFP_ATOMIC); } if (sta && elems->wmm_info) set_sta_flag(sta, WLAN_STA_WME); - if (sta && elems->ht_info_elem && elems->ht_cap_elem && - sdata->u.ibss.channel_type != NL80211_CHAN_NO_HT) { - /* we both use HT */ - struct ieee80211_sta_ht_cap sta_ht_cap_new; - enum nl80211_channel_type channel_type = - ieee80211_ht_info_to_channel_type( - elems->ht_info_elem); - - ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, - elems->ht_cap_elem, - &sta_ht_cap_new); - - /* - * fall back to HT20 if we don't use or use - * the other extension channel - */ - if ((channel_type == NL80211_CHAN_HT40MINUS || - channel_type == NL80211_CHAN_HT40PLUS) && - channel_type != sdata->u.ibss.channel_type) - sta_ht_cap_new.cap &= - ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; - - if (memcmp(&sta->sta.ht_cap, &sta_ht_cap_new, - sizeof(sta_ht_cap_new))) { - memcpy(&sta->sta.ht_cap, &sta_ht_cap_new, - sizeof(sta_ht_cap_new)); - rates_updated = true; - } - } - - if (sta && rates_updated) - rate_control_rate_init(sta); - rcu_read_unlock(); } @@ -539,17 +404,21 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, ieee80211_sta_join_ibss(sdata, bss); supp_rates = ieee80211_sta_get_rates(local, elems, band); ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, - supp_rates); - rcu_read_unlock(); + supp_rates, GFP_KERNEL); } put_bss: ieee80211_rx_bss_put(local, bss); } -void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata, - const u8 *bssid, const u8 *addr, - u32 supp_rates) +/* + * Add a new IBSS station, will also be called by the RX code when, + * in IBSS mode, receiving a frame from a yet-unknown station, hence + * must be callable in atomic context. + */ +struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, + u8 *bssid, u8 *addr, u32 supp_rates, + gfp_t gfp) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct ieee80211_local *local = sdata->local; @@ -564,29 +433,37 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata, if (net_ratelimit()) printk(KERN_DEBUG "%s: No room for a new IBSS STA entry %pM\n", sdata->name, addr); - return; + return NULL; } if (ifibss->state == IEEE80211_IBSS_MLME_SEARCH) - return; + return NULL; if (compare_ether_addr(bssid, sdata->u.ibss.bssid)) - return; + return NULL; + +#ifdef CONFIG_MAC80211_VERBOSE_DEBUG + wiphy_debug(local->hw.wiphy, "Adding new IBSS station %pM (dev=%s)\n", + addr, sdata->name); +#endif - sta = sta_info_alloc(sdata, addr, GFP_ATOMIC); + sta = sta_info_alloc(sdata, addr, gfp); if (!sta) - return; + return NULL; sta->last_rx = jiffies; + set_sta_flag(sta, WLAN_STA_AUTHORIZED); /* make sure mandatory rates are always added */ sta->sta.supp_rates[band] = supp_rates | ieee80211_mandatory_rates(local, band); - spin_lock(&ifibss->incomplete_lock); - list_add(&sta->list, &ifibss->incomplete_stations); - spin_unlock(&ifibss->incomplete_lock); - ieee80211_queue_work(&local->hw, &sdata->work); + rate_control_rate_init(sta); + + /* If it fails, maybe we raced another insertion? */ + if (sta_info_insert(sta)) + return sta_info_get(sdata, addr); + return sta; } static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata) @@ -925,7 +802,6 @@ void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; - struct sta_info *sta; mutex_lock(&ifibss->mtx); @@ -937,19 +813,6 @@ void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata) if (!ifibss->ssid_len) goto out; - spin_lock_bh(&ifibss->incomplete_lock); - while (!list_empty(&ifibss->incomplete_stations)) { - sta = list_first_entry(&ifibss->incomplete_stations, - struct sta_info, list); - list_del(&sta->list); - spin_unlock_bh(&ifibss->incomplete_lock); - - ieee80211_ibss_finish_sta(sta); - rcu_read_unlock(); - spin_lock_bh(&ifibss->incomplete_lock); - } - spin_unlock_bh(&ifibss->incomplete_lock); - switch (ifibss->state) { case IEEE80211_IBSS_MLME_SEARCH: ieee80211_sta_find_ibss(sdata); @@ -1008,8 +871,6 @@ void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata) setup_timer(&ifibss->timer, ieee80211_ibss_timer, (unsigned long) sdata); mutex_init(&ifibss->mtx); - INIT_LIST_HEAD(&ifibss->incomplete_stations); - spin_lock_init(&ifibss->incomplete_lock); } /* scan finished notification */ @@ -1033,18 +894,12 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, struct cfg80211_ibss_params *params) { struct sk_buff *skb; - u32 changed = 0; skb = dev_alloc_skb(sdata->local->hw.extra_tx_headroom + - sizeof(struct ieee80211_hdr_3addr) + - 12 /* struct ieee80211_mgmt.u.beacon */ + - 2 + IEEE80211_MAX_SSID_LEN /* max SSID */ + - 2 + 8 /* max Supported Rates */ + - 3 /* max DS params */ + - 4 /* IBSS params */ + - 2 + (IEEE80211_MAX_SUPP_RATES - 8) + - 2 + sizeof(struct ieee80211_ht_cap) + - 2 + sizeof(struct ieee80211_ht_info) + + 36 /* bitrates */ + + 34 /* SSID */ + + 3 /* DS params */ + + 4 /* IBSS params */ + params->ie_len); if (!skb) return -ENOMEM; @@ -1065,18 +920,13 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, sdata->vif.bss_conf.beacon_int = params->beacon_interval; sdata->u.ibss.channel = params->channel; - sdata->u.ibss.channel_type = params->channel_type; sdata->u.ibss.fixed_channel = params->channel_fixed; /* fix ourselves to that channel now already */ if (params->channel_fixed) { sdata->local->oper_channel = params->channel; - if (!ieee80211_set_channel_type(sdata->local, sdata, - params->channel_type)) { - mutex_unlock(&sdata->u.ibss.mtx); - kfree_skb(skb); - return -EINVAL; - } + WARN_ON(!ieee80211_set_channel_type(sdata->local, sdata, + NL80211_CHAN_NO_HT)); } if (params->ie) { @@ -1099,23 +949,6 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, ieee80211_recalc_idle(sdata->local); mutex_unlock(&sdata->local->mtx); - /* - * 802.11n-2009 9.13.3.1: In an IBSS, the HT Protection field is - * reserved, but an HT STA shall protect HT transmissions as though - * the HT Protection field were set to non-HT mixed mode. - * - * In an IBSS, the RIFS Mode field of the HT Operation element is - * also reserved, but an HT STA shall operate as though this field - * were set to 1. - */ - - sdata->vif.bss_conf.ht_operation_mode |= - IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED - | IEEE80211_HT_PARAM_RIFS_MODE; - - changed |= BSS_CHANGED_HT; - ieee80211_bss_info_change_notify(sdata, changed); - ieee80211_queue_work(&sdata->local->hw, &sdata->work); return 0; @@ -1129,7 +962,6 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) struct cfg80211_bss *cbss; u16 capability; int active_ibss; - struct sta_info *sta; mutex_lock(&sdata->u.ibss.mtx); @@ -1159,20 +991,6 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) sta_info_flush(sdata->local, sdata); - spin_lock_bh(&ifibss->incomplete_lock); - while (!list_empty(&ifibss->incomplete_stations)) { - sta = list_first_entry(&ifibss->incomplete_stations, - struct sta_info, list); - list_del(&sta->list); - spin_unlock_bh(&ifibss->incomplete_lock); - - sta_info_free(local, sta); - spin_lock_bh(&ifibss->incomplete_lock); - } - spin_unlock_bh(&ifibss->incomplete_lock); - - netif_carrier_off(sdata->dev); - /* remove beacon */ kfree(sdata->u.ibss.ie); skb = rcu_dereference_protected(sdata->u.ibss.presp, diff --git a/trunk/net/mac80211/ieee80211_i.h b/trunk/net/mac80211/ieee80211_i.h index 2f0642d9e154..ea10a51babda 100644 --- a/trunk/net/mac80211/ieee80211_i.h +++ b/trunk/net/mac80211/ieee80211_i.h @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include @@ -142,7 +141,6 @@ typedef unsigned __bitwise__ ieee80211_tx_result; struct ieee80211_tx_data { struct sk_buff *skb; - struct sk_buff_head skbs; struct ieee80211_local *local; struct ieee80211_sub_if_data *sdata; struct sta_info *sta; @@ -186,15 +184,12 @@ enum ieee80211_packet_rx_flags { * enum ieee80211_rx_flags - RX data flags * * @IEEE80211_RX_CMNTR: received on cooked monitor already - * @IEEE80211_RX_BEACON_REPORTED: This frame was already reported - * to cfg80211_report_obss_beacon(). * * These flags are used across handling multiple interfaces * for a single frame. */ enum ieee80211_rx_flags { IEEE80211_RX_CMNTR = BIT(0), - IEEE80211_RX_BEACON_REPORTED = BIT(1), }; struct ieee80211_rx_data { @@ -233,7 +228,6 @@ struct beacon_data { struct ieee80211_if_ap { struct beacon_data __rcu *beacon; - struct sk_buff __rcu *probe_resp; struct list_head vlans; @@ -243,7 +237,6 @@ struct ieee80211_if_ap { u8 tim[sizeof(unsigned long) * BITS_TO_LONGS(IEEE80211_MAX_AID + 1)]; struct sk_buff_head ps_bc_buf; atomic_t num_sta_ps; /* number of stations in PS mode */ - atomic_t num_sta_authorized; /* number of authorized stations */ int dtim_count; bool dtim_bc_mc; }; @@ -450,9 +443,6 @@ struct ieee80211_if_managed { */ int rssi_min_thold, rssi_max_thold; int last_ave_beacon_signal; - - struct ieee80211_ht_cap ht_capa; /* configured ht-cap over-rides */ - struct ieee80211_ht_cap ht_capa_mask; /* Valid parts of ht_capa */ }; struct ieee80211_if_ibss { @@ -475,16 +465,12 @@ struct ieee80211_if_ibss { u8 ssid_len, ie_len; u8 *ie; struct ieee80211_channel *channel; - enum nl80211_channel_type channel_type; unsigned long ibss_join_req; /* probe response/beacon for IBSS */ struct sk_buff __rcu *presp; struct sk_buff *skb; - spinlock_t incomplete_lock; - struct list_head incomplete_stations; - enum { IEEE80211_IBSS_MLME_SEARCH, IEEE80211_IBSS_MLME_JOINED, @@ -519,9 +505,7 @@ struct ieee80211_if_mesh { atomic_t mpaths; /* Timestamp of last SN update */ unsigned long last_sn_update; - /* Time when it's ok to send next PERR */ - unsigned long next_perr; - /* Timestamp of last PREQ sent */ + /* Timestamp of last SN sent */ unsigned long last_preq; struct mesh_rmc *rmc; spinlock_t mesh_preq_queue_lock; @@ -559,7 +543,6 @@ struct ieee80211_if_mesh { * associated stations and deliver multicast frames both * back to wireless media and to the local net stack. * @IEEE80211_SDATA_DISCONNECT_RESUME: Disconnect after resume. - * @IEEE80211_SDATA_IN_DRIVER: indicates interface was added to driver */ enum ieee80211_sub_if_data_flags { IEEE80211_SDATA_ALLMULTI = BIT(0), @@ -567,7 +550,6 @@ enum ieee80211_sub_if_data_flags { IEEE80211_SDATA_OPERATING_GMODE = BIT(2), IEEE80211_SDATA_DONT_BRIDGE_PACKETS = BIT(3), IEEE80211_SDATA_DISCONNECT_RESUME = BIT(4), - IEEE80211_SDATA_IN_DRIVER = BIT(5), }; /** @@ -618,9 +600,6 @@ struct ieee80211_sub_if_data { struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX]; unsigned int fragment_next; - /* TID bitmap for NoAck policy */ - u16 noack_map; - struct ieee80211_key __rcu *keys[NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS]; struct ieee80211_key __rcu *default_unicast_key; struct ieee80211_key __rcu *default_multicast_key; @@ -743,16 +722,17 @@ enum { * operating channel * @SCAN_SET_CHANNEL: Set the next channel to be scanned * @SCAN_SEND_PROBE: Send probe requests and wait for probe responses - * @SCAN_SUSPEND: Suspend the scan and go back to operating channel to - * send out data - * @SCAN_RESUME: Resume the scan and scan the next channel + * @SCAN_LEAVE_OPER_CHANNEL: Leave the operating channel, notify the AP + * about us leaving the channel and stop all associated STA interfaces + * @SCAN_ENTER_OPER_CHANNEL: Enter the operating channel again, notify the + * AP about us being back and restart all associated STA interfaces */ enum mac80211_scan_state { SCAN_DECISION, SCAN_SET_CHANNEL, SCAN_SEND_PROBE, - SCAN_SUSPEND, - SCAN_RESUME, + SCAN_LEAVE_OPER_CHANNEL, + SCAN_ENTER_OPER_CHANNEL, }; struct ieee80211_local { @@ -855,15 +835,18 @@ struct ieee80211_local { /* Station data */ /* - * The mutex only protects the list, hash table and - * counter, reads are done with RCU. + * The mutex only protects the list and counter, + * reads are done in RCU. + * Additionally, the lock protects the hash table, + * the pending list and each BSS's TIM bitmap. */ struct mutex sta_mtx; - spinlock_t tim_lock; + spinlock_t sta_lock; unsigned long num_sta; - struct list_head sta_list; + struct list_head sta_list, sta_pending_list; struct sta_info __rcu *sta_hash[STA_HASH_SIZE]; struct timer_list sta_cleanup; + struct work_struct sta_finish_work; int sta_generation; struct sk_buff_head pending[IEEE80211_MAX_QUEUES]; @@ -968,6 +951,7 @@ struct ieee80211_local { int total_ps_buffered; /* total number of all buffered unicast and * multicast packets for power saving stations */ + int wifi_wme_noack_test; unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */ /* @@ -1028,9 +1012,6 @@ struct ieee80211_local { u32 hw_roc_cookie; bool hw_roc_for_tx; - struct idr ack_status_frames; - spinlock_t ack_status_lock; - /* dummy netdev for use w/ NAPI */ struct net_device napi_dev; @@ -1049,69 +1030,6 @@ struct ieee80211_ra_tid { u16 tid; }; -/* Parsed Information Elements */ -struct ieee802_11_elems { - u8 *ie_start; - size_t total_len; - - /* pointers to IEs */ - u8 *ssid; - u8 *supp_rates; - u8 *fh_params; - u8 *ds_params; - u8 *cf_params; - struct ieee80211_tim_ie *tim; - u8 *ibss_params; - u8 *challenge; - u8 *wpa; - u8 *rsn; - u8 *erp_info; - u8 *ext_supp_rates; - u8 *wmm_info; - u8 *wmm_param; - struct ieee80211_ht_cap *ht_cap_elem; - struct ieee80211_ht_info *ht_info_elem; - struct ieee80211_meshconf_ie *mesh_config; - u8 *mesh_id; - u8 *peering; - u8 *preq; - u8 *prep; - u8 *perr; - struct ieee80211_rann_ie *rann; - u8 *ch_switch_elem; - u8 *country_elem; - u8 *pwr_constr_elem; - u8 *quiet_elem; /* first quite element */ - u8 *timeout_int; - - /* length of them, respectively */ - u8 ssid_len; - u8 supp_rates_len; - u8 fh_params_len; - u8 ds_params_len; - u8 cf_params_len; - u8 tim_len; - u8 ibss_params_len; - u8 challenge_len; - u8 wpa_len; - u8 rsn_len; - u8 erp_info_len; - u8 ext_supp_rates_len; - u8 wmm_info_len; - u8 wmm_param_len; - u8 mesh_id_len; - u8 peering_len; - u8 preq_len; - u8 prep_len; - u8 perr_len; - u8 ch_switch_elem_len; - u8 country_elem_len; - u8 pwr_constr_elem_len; - u8 quiet_elem_len; - u8 num_of_quiet_elem; /* can be more the one */ - u8 timeout_int_len; -}; - static inline struct ieee80211_local *hw_to_local( struct ieee80211_hw *hw) { @@ -1172,8 +1090,9 @@ void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata); /* IBSS code */ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local); void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata); -void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata, - const u8 *bssid, const u8 *addr, u32 supp_rates); +struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, + u8 *bssid, u8 *addr, u32 supp_rates, + gfp_t gfp); int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, struct cfg80211_ibss_params *params); int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata); @@ -1221,9 +1140,13 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata); void ieee80211_sched_scan_stopped_work(struct work_struct *work); /* off-channel helpers */ +bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local); +void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local, + bool tell_ap); void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local, bool offchannel_ps_enable); void ieee80211_offchannel_return(struct ieee80211_local *local, + bool enable_beaconing, bool offchannel_ps_disable); void ieee80211_hw_roc_setup(struct ieee80211_local *local); @@ -1256,11 +1179,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, struct net_device *dev); /* HT */ -bool ieee80111_cfg_override_disables_ht40(struct ieee80211_sub_if_data *sdata); -void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata, - struct ieee80211_sta_ht_cap *ht_cap); -void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata, - struct ieee80211_supported_band *sband, +void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband, struct ieee80211_ht_cap *ht_cap_ie, struct ieee80211_sta_ht_cap *ht_cap); void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, @@ -1347,16 +1266,7 @@ void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int ke gfp_t gfp); void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata); void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); - -void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata, - struct sk_buff *skb, int tid); -static void inline ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, - struct sk_buff *skb) -{ - /* Send all internal mgmt frames on VO. Accordingly set TID to 7. */ - ieee80211_tx_skb_tid(sdata, skb, 7); -} - +void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); void ieee802_11_parse_elems(u8 *start, size_t len, struct ieee802_11_elems *elems); u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, @@ -1424,12 +1334,6 @@ void ieee80211_recalc_smps(struct ieee80211_local *local); size_t ieee80211_ie_split(const u8 *ies, size_t ielen, const u8 *ids, int n_ids, size_t offset); size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset); -u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, - u16 cap); -u8 *ieee80211_ie_build_ht_info(u8 *pos, - struct ieee80211_sta_ht_cap *ht_cap, - struct ieee80211_channel *channel, - enum nl80211_channel_type channel_type); /* internal work items */ void ieee80211_work_init(struct ieee80211_local *local); @@ -1458,8 +1362,6 @@ ieee80211_get_channel_mode(struct ieee80211_local *local, bool ieee80211_set_channel_type(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, enum nl80211_channel_type chantype); -enum nl80211_channel_type -ieee80211_ht_info_to_channel_type(struct ieee80211_ht_info *ht_info); #ifdef CONFIG_MAC80211_NOINLINE #define debug_noinline noinline diff --git a/trunk/net/mac80211/iface.c b/trunk/net/mac80211/iface.c index e47768cb8cb3..30d73552e9ab 100644 --- a/trunk/net/mac80211/iface.c +++ b/trunk/net/mac80211/iface.c @@ -188,22 +188,11 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up) if (!is_valid_ether_addr(sdata->u.wds.remote_addr)) return -ENOLINK; break; - case NL80211_IFTYPE_AP_VLAN: { - struct ieee80211_sub_if_data *master; - + case NL80211_IFTYPE_AP_VLAN: if (!sdata->bss) return -ENOLINK; - list_add(&sdata->u.vlan.list, &sdata->bss->vlans); - - master = container_of(sdata->bss, - struct ieee80211_sub_if_data, u.ap); - sdata->control_port_protocol = - master->control_port_protocol; - sdata->control_port_no_encrypt = - master->control_port_no_encrypt; break; - } case NL80211_IFTYPE_AP: sdata->bss = &sdata->u.ap; break; @@ -276,7 +265,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up) break; default: if (coming_up) { - res = drv_add_interface(local, sdata); + res = drv_add_interface(local, &sdata->vif); if (res) goto err_stop; } @@ -293,18 +282,10 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up) changed |= ieee80211_reset_erp_info(sdata); ieee80211_bss_info_change_notify(sdata, changed); - if (sdata->vif.type == NL80211_IFTYPE_STATION || - sdata->vif.type == NL80211_IFTYPE_ADHOC) + if (sdata->vif.type == NL80211_IFTYPE_STATION) netif_carrier_off(dev); else netif_carrier_on(dev); - - /* - * set default queue parameters so drivers don't - * need to initialise the hardware if the hardware - * doesn't start up with sane defaults - */ - ieee80211_set_wmm_default(sdata); } set_bit(SDATA_STATE_RUNNING, &sdata->state); @@ -318,9 +299,8 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up) goto err_del_interface; } - sta_info_move_state(sta, IEEE80211_STA_AUTH); - sta_info_move_state(sta, IEEE80211_STA_ASSOC); - sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED); + /* no atomic bitop required since STA is not live yet */ + set_sta_flag(sta, WLAN_STA_AUTHORIZED); res = sta_info_insert(sta); if (res) { @@ -349,8 +329,15 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up) if (coming_up) local->open_count++; - if (hw_reconf_flags) + if (hw_reconf_flags) { ieee80211_hw_config(local, hw_reconf_flags); + /* + * set default queue parameters so drivers don't + * need to initialise the hardware if the hardware + * doesn't start up with sane defaults + */ + ieee80211_set_wmm_default(sdata); + } ieee80211_recalc_ps(local, -1); @@ -358,7 +345,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up) return 0; err_del_interface: - drv_remove_interface(local, sdata); + drv_remove_interface(local, &sdata->vif); err_stop: if (!local->open_count) drv_stop(local); @@ -463,19 +450,15 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, struct ieee80211_sub_if_data *vlan, *tmpsdata; struct beacon_data *old_beacon = rtnl_dereference(sdata->u.ap.beacon); - struct sk_buff *old_probe_resp = - rtnl_dereference(sdata->u.ap.probe_resp); /* sdata_running will return false, so this will disable */ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); - /* remove beacon and probe response */ + /* remove beacon */ RCU_INIT_POINTER(sdata->u.ap.beacon, NULL); - RCU_INIT_POINTER(sdata->u.ap.probe_resp, NULL); synchronize_rcu(); kfree(old_beacon); - kfree_skb(old_probe_resp); /* down all dependent devices, that is VLANs */ list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans, @@ -537,7 +520,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, ieee80211_free_keys(sdata); if (going_down) - drv_remove_interface(local, sdata); + drv_remove_interface(local, &sdata->vif); } sdata->bss = NULL; @@ -673,6 +656,7 @@ static u16 ieee80211_monitor_select_queue(struct net_device *dev, struct ieee80211_local *local = sdata->local; struct ieee80211_hdr *hdr; struct ieee80211_radiotap_header *rtap = (void *)skb->data; + u8 *p; if (local->hw.queues < 4) return 0; @@ -683,7 +667,19 @@ static u16 ieee80211_monitor_select_queue(struct net_device *dev, hdr = (void *)((u8 *)skb->data + le16_to_cpu(rtap->it_len)); - return ieee80211_select_queue_80211(local, skb, hdr); + if (!ieee80211_is_data(hdr->frame_control)) { + skb->priority = 7; + return ieee802_1d_to_ac[skb->priority]; + } + if (!ieee80211_is_data_qos(hdr->frame_control)) { + skb->priority = 0; + return ieee802_1d_to_ac[skb->priority]; + } + + p = ieee80211_get_qos_ctl(hdr); + skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK; + + return ieee80211_downgrade_queue(local, skb); } static const struct net_device_ops ieee80211_monitorif_ops = { @@ -854,8 +850,6 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata, sdata->control_port_protocol = cpu_to_be16(ETH_P_PAE); sdata->control_port_no_encrypt = false; - sdata->noack_map = 0; - /* only monitor differs */ sdata->dev->type = ARPHRD_ETHER; diff --git a/trunk/net/mac80211/key.c b/trunk/net/mac80211/key.c index 87a89741432d..fb02ea52d2c2 100644 --- a/trunk/net/mac80211/key.c +++ b/trunk/net/mac80211/key.c @@ -134,13 +134,9 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key) key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || - (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) || - (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))) + (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV))) sdata->crypto_tx_tailroom_needed_cnt--; - WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) && - (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)); - return 0; } @@ -183,8 +179,7 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key) sdata = key->sdata; if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || - (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) || - (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))) + (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV))) increment_tailroom_need_count(sdata); if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) diff --git a/trunk/net/mac80211/main.c b/trunk/net/mac80211/main.c index 0a0d94ad9b08..cae443563ec9 100644 --- a/trunk/net/mac80211/main.c +++ b/trunk/net/mac80211/main.c @@ -47,7 +47,7 @@ void ieee80211_configure_filter(struct ieee80211_local *local) if (atomic_read(&local->iff_allmultis)) new_flags |= FIF_ALLMULTI; - if (local->monitors || test_bit(SCAN_SW_SCANNING, &local->scanning)) + if (local->monitors || local->scanning) new_flags |= FIF_BCN_PRBRESP_PROMISC; if (local->fif_probe_req || local->probe_req_reg) @@ -92,9 +92,50 @@ static void ieee80211_reconfig_filter(struct work_struct *work) ieee80211_configure_filter(local); } +/* + * Returns true if we are logically configured to be on + * the operating channel AND the hardware-conf is currently + * configured on the operating channel. Compares channel-type + * as well. + */ +bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local) +{ + struct ieee80211_channel *chan, *scan_chan; + enum nl80211_channel_type channel_type; + + /* This logic needs to match logic in ieee80211_hw_config */ + if (local->scan_channel) { + chan = local->scan_channel; + /* If scanning on oper channel, use whatever channel-type + * is currently in use. + */ + if (chan == local->oper_channel) + channel_type = local->_oper_channel_type; + else + channel_type = NL80211_CHAN_NO_HT; + } else if (local->tmp_channel) { + chan = scan_chan = local->tmp_channel; + channel_type = local->tmp_channel_type; + } else { + chan = local->oper_channel; + channel_type = local->_oper_channel_type; + } + + if (chan != local->oper_channel || + channel_type != local->_oper_channel_type) + return false; + + /* Check current hardware-config against oper_channel. */ + if ((local->oper_channel != local->hw.conf.channel) || + (local->_oper_channel_type != local->hw.conf.channel_type)) + return false; + + return true; +} + int ieee80211_hw_config(struct ieee80211_local *local, u32 changed) { - struct ieee80211_channel *chan; + struct ieee80211_channel *chan, *scan_chan; int ret = 0; int power; enum nl80211_channel_type channel_type; @@ -102,12 +143,14 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed) might_sleep(); + scan_chan = local->scan_channel; + /* If this off-channel logic ever changes, ieee80211_on_oper_channel * may need to change as well. */ offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL; - if (local->scan_channel) { - chan = local->scan_channel; + if (scan_chan) { + chan = scan_chan; /* If scanning on oper channel, use whatever channel-type * is currently in use. */ @@ -116,7 +159,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed) else channel_type = NL80211_CHAN_NO_HT; } else if (local->tmp_channel) { - chan = local->tmp_channel; + chan = scan_chan = local->tmp_channel; channel_type = local->tmp_channel_type; } else { chan = local->oper_channel; @@ -150,8 +193,8 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed) changed |= IEEE80211_CONF_CHANGE_SMPS; } - if (test_bit(SCAN_SW_SCANNING, &local->scanning) || - test_bit(SCAN_HW_SCANNING, &local->scanning)) + if ((local->scanning & SCAN_SW_SCANNING) || + (local->scanning & SCAN_HW_SCANNING)) power = chan->max_power; else power = local->power_constr_level ? @@ -393,6 +436,9 @@ static int ieee80211_ifa_changed(struct notifier_block *nb, sdata = IEEE80211_DEV_TO_SUB_IF(ndev); bss_conf = &sdata->vif.bss_conf; + if (!ieee80211_sdata_running(sdata)) + return NOTIFY_DONE; + /* ARP filtering is only supported in managed mode */ if (sdata->vif.type != NL80211_IFTYPE_STATION) return NOTIFY_DONE; @@ -421,7 +467,7 @@ static int ieee80211_ifa_changed(struct notifier_block *nb, } bss_conf->arp_addr_cnt = c; - /* Configure driver only if associated (which also implies it is up) */ + /* Configure driver only if associated */ if (ifmgd->associated) { bss_conf->arp_filter_enabled = sdata->arp_filter_state; ieee80211_bss_info_change_notify(sdata, @@ -514,19 +560,6 @@ ieee80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = { }, }; -static const struct ieee80211_ht_cap mac80211_ht_capa_mod_mask = { - .ampdu_params_info = IEEE80211_HT_AMPDU_PARM_FACTOR | - IEEE80211_HT_AMPDU_PARM_DENSITY, - - .cap_info = cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40 | - IEEE80211_HT_CAP_MAX_AMSDU | - IEEE80211_HT_CAP_SGI_40), - .mcs = { - .rx_mask = { 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, }, - }, -}; - struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, const struct ieee80211_ops *ops) { @@ -562,13 +595,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, wiphy->flags |= WIPHY_FLAG_NETNS_OK | WIPHY_FLAG_4ADDR_AP | - WIPHY_FLAG_4ADDR_STATION | - WIPHY_FLAG_REPORTS_OBSS | - WIPHY_FLAG_OFFCHAN_TX | - WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; - - wiphy->features = NL80211_FEATURE_SK_TX_STATUS | - NL80211_FEATURE_HT_IBSS; + WIPHY_FLAG_4ADDR_STATION; if (!ops->set_key) wiphy->flags |= WIPHY_FLAG_IBSS_RSN; @@ -581,7 +608,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN); - BUG_ON(!ops->tx && !ops->tx_frags); + BUG_ON(!ops->tx); BUG_ON(!ops->start); BUG_ON(!ops->stop); BUG_ON(!ops->config); @@ -601,7 +628,6 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, local->user_power_level = -1; local->uapsd_queues = IEEE80211_DEFAULT_UAPSD_QUEUES; local->uapsd_max_sp_len = IEEE80211_DEFAULT_MAX_SP_LEN; - wiphy->ht_capa_mod_mask = &mac80211_ht_capa_mod_mask; INIT_LIST_HEAD(&local->interfaces); @@ -644,11 +670,6 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, INIT_WORK(&local->sched_scan_stopped_work, ieee80211_sched_scan_stopped_work); - spin_lock_init(&local->ack_status_lock); - idr_init(&local->ack_status_frames); - /* preallocate at least one entry */ - idr_pre_get(&local->ack_status_frames, GFP_KERNEL); - sta_info_init(local); for (i = 0; i < IEEE80211_MAX_QUEUES; i++) { @@ -1030,13 +1051,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) } EXPORT_SYMBOL(ieee80211_unregister_hw); -static int ieee80211_free_ack_frame(int id, void *p, void *data) -{ - WARN_ONCE(1, "Have pending ack frames!\n"); - kfree_skb(p); - return 0; -} - void ieee80211_free_hw(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); @@ -1047,10 +1061,6 @@ void ieee80211_free_hw(struct ieee80211_hw *hw) if (local->wiphy_ciphers_allocated) kfree(local->hw.wiphy->cipher_suites); - idr_for_each(&local->ack_status_frames, - ieee80211_free_ack_frame, NULL); - idr_destroy(&local->ack_status_frames); - wiphy_free(local->hw.wiphy); } EXPORT_SYMBOL(ieee80211_free_hw); diff --git a/trunk/net/mac80211/mesh.c b/trunk/net/mac80211/mesh.c index c707c8bf6d2c..a7078fdba8ca 100644 --- a/trunk/net/mac80211/mesh.c +++ b/trunk/net/mac80211/mesh.c @@ -76,7 +76,6 @@ static void ieee80211_mesh_housekeeping_timer(unsigned long data) bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; - struct ieee80211_local *local = sdata->local; /* * As support for each feature is added, check for matching @@ -88,23 +87,15 @@ bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_dat * - MDA enabled * - Power management control on fc */ - if (!(ifmsh->mesh_id_len == ie->mesh_id_len && - memcmp(ifmsh->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 && - (ifmsh->mesh_pp_id == ie->mesh_config->meshconf_psel) && - (ifmsh->mesh_pm_id == ie->mesh_config->meshconf_pmetric) && - (ifmsh->mesh_cc_id == ie->mesh_config->meshconf_congest) && - (ifmsh->mesh_sp_id == ie->mesh_config->meshconf_synch) && - (ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth))) - goto mismatch; - - /* disallow peering with mismatched channel types for now */ - if (ie->ht_info_elem && - (local->_oper_channel_type != - ieee80211_ht_info_to_channel_type(ie->ht_info_elem))) - goto mismatch; - - return true; -mismatch: + if (ifmsh->mesh_id_len == ie->mesh_id_len && + memcmp(ifmsh->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 && + (ifmsh->mesh_pp_id == ie->mesh_config->meshconf_psel) && + (ifmsh->mesh_pm_id == ie->mesh_config->meshconf_pmetric) && + (ifmsh->mesh_cc_id == ie->mesh_config->meshconf_congest) && + (ifmsh->mesh_sp_id == ie->mesh_config->meshconf_synch) && + (ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth)) + return true; + return false; } @@ -350,49 +341,6 @@ int mesh_add_ds_params_ie(struct sk_buff *skb, return 0; } -int mesh_add_ht_cap_ie(struct sk_buff *skb, - struct ieee80211_sub_if_data *sdata) -{ - struct ieee80211_local *local = sdata->local; - struct ieee80211_supported_band *sband; - u8 *pos; - - sband = local->hw.wiphy->bands[local->oper_channel->band]; - if (!sband->ht_cap.ht_supported || - local->_oper_channel_type == NL80211_CHAN_NO_HT) - return 0; - - if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_cap)) - return -ENOMEM; - - pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_cap)); - ieee80211_ie_build_ht_cap(pos, &sband->ht_cap, sband->ht_cap.cap); - - return 0; -} - -int mesh_add_ht_info_ie(struct sk_buff *skb, - struct ieee80211_sub_if_data *sdata) -{ - struct ieee80211_local *local = sdata->local; - struct ieee80211_channel *channel = local->oper_channel; - enum nl80211_channel_type channel_type = local->_oper_channel_type; - struct ieee80211_supported_band *sband = - local->hw.wiphy->bands[channel->band]; - struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap; - u8 *pos; - - if (!ht_cap->ht_supported || channel_type == NL80211_CHAN_NO_HT) - return 0; - - if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_info)) - return -ENOMEM; - - pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_info)); - ieee80211_ie_build_ht_info(pos, ht_cap, channel, channel_type); - - return 0; -} static void ieee80211_mesh_path_timer(unsigned long data) { struct ieee80211_sub_if_data *sdata = @@ -749,7 +697,6 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata) atomic_set(&ifmsh->mpaths, 0); mesh_rmc_init(sdata); ifmsh->last_preq = jiffies; - ifmsh->next_perr = jiffies; /* Allocate all mesh structures when creating the first mesh interface. */ if (!mesh_allocated) ieee80211s_init(); diff --git a/trunk/net/mac80211/mesh.h b/trunk/net/mac80211/mesh.h index bd14bd26a2b6..8c00e2d1d636 100644 --- a/trunk/net/mac80211/mesh.h +++ b/trunk/net/mac80211/mesh.h @@ -31,8 +31,6 @@ * @MESH_PATH_FIXED: the mesh path has been manually set and should not be * modified * @MESH_PATH_RESOLVED: the mesh path can has been resolved - * @MESH_PATH_REQ_QUEUED: there is an unsent path request for this destination - * already queued up, waiting for the discovery process to start. * * MESH_PATH_RESOLVED is used by the mesh path timer to * decide when to stop or cancel the mesh path discovery. @@ -43,7 +41,6 @@ enum mesh_path_flags { MESH_PATH_SN_VALID = BIT(2), MESH_PATH_FIXED = BIT(3), MESH_PATH_RESOLVED = BIT(4), - MESH_PATH_REQ_QUEUED = BIT(5), }; /** @@ -215,10 +212,6 @@ int mesh_add_vendor_ies(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata); int mesh_add_ds_params_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata); -int mesh_add_ht_cap_ie(struct sk_buff *skb, - struct ieee80211_sub_if_data *sdata); -int mesh_add_ht_info_ie(struct sk_buff *skb, - struct ieee80211_sub_if_data *sdata); void mesh_rmc_free(struct ieee80211_sub_if_data *sdata); int mesh_rmc_init(struct ieee80211_sub_if_data *sdata); void ieee80211s_init(void); @@ -233,8 +226,6 @@ void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh); /* Mesh paths */ int mesh_nexthop_lookup(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata); -int mesh_nexthop_resolve(struct sk_buff *skb, - struct ieee80211_sub_if_data *sdata); void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata); struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata); diff --git a/trunk/net/mac80211/mesh_hwmp.c b/trunk/net/mac80211/mesh_hwmp.c index 73abb7524b2c..174040a42887 100644 --- a/trunk/net/mac80211/mesh_hwmp.c +++ b/trunk/net/mac80211/mesh_hwmp.c @@ -113,20 +113,20 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags, struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; - struct sk_buff *skb; + struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); struct ieee80211_mgmt *mgmt; - u8 *pos, ie_len; - int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) + - sizeof(mgmt->u.action.u.mesh_action); + u8 *pos; + int ie_len; - skb = dev_alloc_skb(local->hw.extra_tx_headroom + - hdr_len + - 2 + 37); /* max HWMP IE */ if (!skb) return -1; skb_reserve(skb, local->hw.extra_tx_headroom); - mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len); - memset(mgmt, 0, hdr_len); + /* 25 is the size of the common mgmt part (24) plus the size of the + * common action part (1) + */ + mgmt = (struct ieee80211_mgmt *) + skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action)); + memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action)); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); @@ -240,24 +240,20 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn, struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; - struct sk_buff *skb; - struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); struct ieee80211_mgmt *mgmt; - u8 *pos, ie_len; - int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) + - sizeof(mgmt->u.action.u.mesh_action); + u8 *pos; + int ie_len; - if (time_before(jiffies, ifmsh->next_perr)) - return -EAGAIN; - - skb = dev_alloc_skb(local->hw.extra_tx_headroom + - hdr_len + - 2 + 15 /* PERR IE */); if (!skb) return -1; skb_reserve(skb, local->tx_headroom + local->hw.extra_tx_headroom); - mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len); - memset(mgmt, 0, hdr_len); + /* 25 is the size of the common mgmt part (24) plus the size of the + * common action part (1) + */ + mgmt = (struct ieee80211_mgmt *) + skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action)); + memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action)); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); @@ -294,8 +290,6 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn, /* see note in function header */ prepare_frame_for_deferred_tx(sdata, skb); - ifmsh->next_perr = TU_TO_EXP_TIME( - ifmsh->mshcfg.dot11MeshHWMPperrMinInterval); ieee80211_add_pending_skb(local, skb); return 0; } @@ -399,13 +393,15 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata, orig_metric = PREQ_IE_METRIC(hwmp_ie); break; case MPATH_PREP: - /* Originator here refers to the MP that was the target in the - * Path Request. We divert from the nomenclature in the draft + /* Originator here refers to the MP that was the destination in + * the Path Request. The draft refers to that MP as the + * destination address, even though usually it is the origin of + * the PREP frame. We divert from the nomenclature in the draft * so that we can easily use a single function to gather path * information from both PREQ and PREP frames. */ - orig_addr = PREP_IE_TARGET_ADDR(hwmp_ie); - orig_sn = PREP_IE_TARGET_SN(hwmp_ie); + orig_addr = PREP_IE_ORIG_ADDR(hwmp_ie); + orig_sn = PREP_IE_ORIG_SN(hwmp_ie); orig_lifetime = PREP_IE_LIFETIME(hwmp_ie); orig_metric = PREP_IE_METRIC(hwmp_ie); break; @@ -566,9 +562,9 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata, ttl = ifmsh->mshcfg.element_ttl; if (ttl != 0) { mhwmp_dbg("replying to the PREQ"); - mesh_path_sel_frame_tx(MPATH_PREP, 0, orig_addr, - cpu_to_le32(orig_sn), 0, target_addr, - cpu_to_le32(target_sn), mgmt->sa, 0, ttl, + mesh_path_sel_frame_tx(MPATH_PREP, 0, target_addr, + cpu_to_le32(target_sn), 0, orig_addr, + cpu_to_le32(orig_sn), mgmt->sa, 0, ttl, cpu_to_le32(lifetime), cpu_to_le32(metric), 0, sdata); } else @@ -622,8 +618,14 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata, mhwmp_dbg("received PREP from %pM", PREP_IE_ORIG_ADDR(prep_elem)); - orig_addr = PREP_IE_ORIG_ADDR(prep_elem); - if (memcmp(orig_addr, sdata->vif.addr, ETH_ALEN) == 0) + /* Note that we divert from the draft nomenclature and denominate + * destination to what the draft refers to as origininator. So in this + * function destnation refers to the final destination of the PREP, + * which corresponds with the originator of the PREQ which this PREP + * replies + */ + target_addr = PREP_IE_TARGET_ADDR(prep_elem); + if (memcmp(target_addr, sdata->vif.addr, ETH_ALEN) == 0) /* destination, no forwarding required */ return; @@ -634,7 +636,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata, } rcu_read_lock(); - mpath = mesh_path_lookup(orig_addr, sdata); + mpath = mesh_path_lookup(target_addr, sdata); if (mpath) spin_lock_bh(&mpath->state_lock); else @@ -649,7 +651,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata, flags = PREP_IE_FLAGS(prep_elem); lifetime = PREP_IE_LIFETIME(prep_elem); hopcount = PREP_IE_HOPCOUNT(prep_elem) + 1; - target_addr = PREP_IE_TARGET_ADDR(prep_elem); + orig_addr = PREP_IE_ORIG_ADDR(prep_elem); target_sn = PREP_IE_TARGET_SN(prep_elem); orig_sn = PREP_IE_ORIG_SN(prep_elem); @@ -865,20 +867,9 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags) return; } - spin_lock(&mpath->state_lock); - if (mpath->flags & MESH_PATH_REQ_QUEUED) { - spin_unlock(&mpath->state_lock); - spin_unlock_bh(&ifmsh->mesh_preq_queue_lock); - kfree(preq_node); - return; - } - memcpy(preq_node->dst, mpath->dst, ETH_ALEN); preq_node->flags = flags; - mpath->flags |= MESH_PATH_REQ_QUEUED; - spin_unlock(&mpath->state_lock); - list_add_tail(&preq_node->list, &ifmsh->preq_queue.list); ++ifmsh->preq_queue_len; spin_unlock_bh(&ifmsh->mesh_preq_queue_lock); @@ -930,7 +921,6 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata) goto enddiscovery; spin_lock_bh(&mpath->state_lock); - mpath->flags &= ~MESH_PATH_REQ_QUEUED; if (preq_node->flags & PREQ_Q_F_START) { if (mpath->flags & MESH_PATH_RESOLVING) { spin_unlock_bh(&mpath->state_lock); @@ -982,97 +972,71 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata) kfree(preq_node); } -/* mesh_nexthop_resolve - lookup next hop for given skb and start path - * discovery if no forwarding information is found. +/** + * mesh_nexthop_lookup - put the appropriate next hop on a mesh frame * * @skb: 802.11 frame to be sent * @sdata: network subif the frame will be sent through * - * Returns: 0 if the next hop was found and -ENOENT if the frame was queued. - * skb is freeed here if no mpath could be allocated. + * Returns: 0 if the next hop was found. Nonzero otherwise. If no next hop is + * found, the function will start a path discovery and queue the frame so it is + * sent when the path is resolved. This means the caller must not free the skb + * in this case. */ -int mesh_nexthop_resolve(struct sk_buff *skb, - struct ieee80211_sub_if_data *sdata) +int mesh_nexthop_lookup(struct sk_buff *skb, + struct ieee80211_sub_if_data *sdata) { - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct mesh_path *mpath; struct sk_buff *skb_to_free = NULL; + struct mesh_path *mpath; + struct sta_info *next_hop; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; u8 *target_addr = hdr->addr3; int err = 0; rcu_read_lock(); - err = mesh_nexthop_lookup(skb, sdata); - if (!err) - goto endlookup; - - /* no nexthop found, start resolving */ mpath = mesh_path_lookup(target_addr, sdata); + if (!mpath) { mesh_path_add(target_addr, sdata); mpath = mesh_path_lookup(target_addr, sdata); if (!mpath) { - mesh_path_discard_frame(skb, sdata); + sdata->u.mesh.mshstats.dropped_frames_no_route++; err = -ENOSPC; goto endlookup; } } - if (!(mpath->flags & MESH_PATH_RESOLVING)) - mesh_queue_preq(mpath, PREQ_Q_F_START); - - if (skb_queue_len(&mpath->frame_queue) >= MESH_FRAME_QUEUE_LEN) - skb_to_free = skb_dequeue(&mpath->frame_queue); - - info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; - ieee80211_set_qos_hdr(sdata, skb); - skb_queue_tail(&mpath->frame_queue, skb); - err = -ENOENT; - if (skb_to_free) - mesh_path_discard_frame(skb_to_free, sdata); - -endlookup: - rcu_read_unlock(); - return err; -} -/** - * mesh_nexthop_lookup - put the appropriate next hop on a mesh frame. Calling - * this function is considered "using" the associated mpath, so preempt a path - * refresh if this mpath expires soon. - * - * @skb: 802.11 frame to be sent - * @sdata: network subif the frame will be sent through - * - * Returns: 0 if the next hop was found. Nonzero otherwise. - */ -int mesh_nexthop_lookup(struct sk_buff *skb, - struct ieee80211_sub_if_data *sdata) -{ - struct mesh_path *mpath; - struct sta_info *next_hop; - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; - u8 *target_addr = hdr->addr3; - int err = -ENOENT; - - rcu_read_lock(); - mpath = mesh_path_lookup(target_addr, sdata); - - if (!mpath || !(mpath->flags & MESH_PATH_ACTIVE)) - goto endlookup; + if (mpath->flags & MESH_PATH_ACTIVE) { + if (time_after(jiffies, + mpath->exp_time - + msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) && + !memcmp(sdata->vif.addr, hdr->addr4, ETH_ALEN) && + !(mpath->flags & MESH_PATH_RESOLVING) && + !(mpath->flags & MESH_PATH_FIXED)) { + mesh_queue_preq(mpath, + PREQ_Q_F_START | PREQ_Q_F_REFRESH); + } + next_hop = rcu_dereference(mpath->next_hop); + if (next_hop) + memcpy(hdr->addr1, next_hop->sta.addr, ETH_ALEN); + else + err = -ENOENT; + } else { + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + if (!(mpath->flags & MESH_PATH_RESOLVING)) { + /* Start discovery only if it is not running yet */ + mesh_queue_preq(mpath, PREQ_Q_F_START); + } - if (time_after(jiffies, - mpath->exp_time - - msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) && - !memcmp(sdata->vif.addr, hdr->addr4, ETH_ALEN) && - !(mpath->flags & MESH_PATH_RESOLVING) && - !(mpath->flags & MESH_PATH_FIXED)) - mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH); + if (skb_queue_len(&mpath->frame_queue) >= + MESH_FRAME_QUEUE_LEN) + skb_to_free = skb_dequeue(&mpath->frame_queue); - next_hop = rcu_dereference(mpath->next_hop); - if (next_hop) { - memcpy(hdr->addr1, next_hop->sta.addr, ETH_ALEN); - memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN); - err = 0; + info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; + skb_queue_tail(&mpath->frame_queue, skb); + if (skb_to_free) + mesh_path_discard_frame(skb_to_free, sdata); + err = -ENOENT; } endlookup: @@ -1097,7 +1061,6 @@ void mesh_path_timer(unsigned long data) } else if (mpath->discovery_retries < max_preq_retries(sdata)) { ++mpath->discovery_retries; mpath->discovery_timeout *= 2; - mpath->flags &= ~MESH_PATH_REQ_QUEUED; spin_unlock_bh(&mpath->state_lock); mesh_queue_preq(mpath, 0); } else { diff --git a/trunk/net/mac80211/mesh_pathtbl.c b/trunk/net/mac80211/mesh_pathtbl.c index edf167e3b8f3..7f54c5042235 100644 --- a/trunk/net/mac80211/mesh_pathtbl.c +++ b/trunk/net/mac80211/mesh_pathtbl.c @@ -69,6 +69,8 @@ static inline struct mesh_table *resize_dereference_mpp_paths(void) lockdep_is_held(&pathtbl_resize_lock)); } +static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath); + /* * CAREFUL -- "tbl" must not be an expression, * in particular not an rcu_dereference(), since @@ -211,6 +213,7 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta) struct ieee80211_hdr *hdr; struct sk_buff_head tmpq; unsigned long flags; + struct ieee80211_sub_if_data *sdata = mpath->sdata; rcu_assign_pointer(mpath->next_hop, sta); @@ -221,7 +224,8 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta) while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) { hdr = (struct ieee80211_hdr *) skb->data; memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN); - memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN); + skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb)); + ieee80211_set_qos_hdr(sdata, skb); __skb_queue_tail(&tmpq, skb); } @@ -265,7 +269,6 @@ static void prepare_for_gate(struct sk_buff *skb, char *dst_addr, next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr; memcpy(hdr->addr1, next_hop, ETH_ALEN); rcu_read_unlock(); - memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN); memcpy(hdr->addr3, dst_addr, ETH_ALEN); } @@ -420,18 +423,21 @@ static void mesh_gate_node_reclaim(struct rcu_head *rp) } /** - * mesh_path_add_gate - add the given mpath to a mesh gate to our path table - * @mpath: gate path to add to table + * mesh_gate_add - mark mpath as path to a mesh gate and add to known_gates + * @mesh_tbl: table which contains known_gates list + * @mpath: mpath to known mesh gate + * + * Returns: 0 on success + * */ -int mesh_path_add_gate(struct mesh_path *mpath) +static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath) { - struct mesh_table *tbl; struct mpath_node *gate, *new_gate; struct hlist_node *n; int err; rcu_read_lock(); - tbl = rcu_dereference(mesh_paths); + tbl = rcu_dereference(tbl); hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list) if (gate->mpath == mpath) { @@ -475,6 +481,8 @@ static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath) struct mpath_node *gate; struct hlist_node *p, *q; + tbl = rcu_dereference(tbl); + hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list) if (gate->mpath == mpath) { spin_lock_bh(&tbl->gates_lock); @@ -492,6 +500,16 @@ static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath) return 0; } +/** + * + * mesh_path_add_gate - add the given mpath to a mesh gate to our path table + * @mpath: gate path to add to table + */ +int mesh_path_add_gate(struct mesh_path *mpath) +{ + return mesh_gate_add(mesh_paths, mpath); +} + /** * mesh_gate_num - number of gates known to this interface * @sdata: subif data @@ -973,11 +991,38 @@ int mesh_path_send_to_gates(struct mesh_path *mpath) * @skb: frame to discard * @sdata: network subif the frame was to be sent through * + * If the frame was being forwarded from another MP, a PERR frame will be sent + * to the precursor. The precursor's address (i.e. the previous hop) was saved + * in addr1 of the frame-to-be-forwarded, and would only be overwritten once + * the destination is successfully resolved. + * * Locking: the function must me called within a rcu_read_lock region */ void mesh_path_discard_frame(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata) { + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct mesh_path *mpath; + u32 sn = 0; + __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_NOFORWARD); + + if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) { + u8 *ra, *da; + + da = hdr->addr3; + ra = hdr->addr1; + rcu_read_lock(); + mpath = mesh_path_lookup(da, sdata); + if (mpath) { + spin_lock_bh(&mpath->state_lock); + sn = ++mpath->sn; + spin_unlock_bh(&mpath->state_lock); + } + rcu_read_unlock(); + mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, skb->data, + cpu_to_le32(sn), reason, ra, sdata); + } + kfree_skb(skb); sdata->u.mesh.mshstats.dropped_frames_no_route++; } diff --git a/trunk/net/mac80211/mesh_plink.c b/trunk/net/mac80211/mesh_plink.c index 41ef1b476442..7e57f5d07f66 100644 --- a/trunk/net/mac80211/mesh_plink.c +++ b/trunk/net/mac80211/mesh_plink.c @@ -80,15 +80,11 @@ static inline void mesh_plink_fsm_restart(struct sta_info *sta) * on it in the lifecycle management section! */ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata, - u8 *hw_addr, u32 rates, - struct ieee802_11_elems *elems) + u8 *hw_addr, u32 rates) { struct ieee80211_local *local = sdata->local; - struct ieee80211_supported_band *sband; struct sta_info *sta; - sband = local->hw.wiphy->bands[local->oper_channel->band]; - if (local->num_sta >= MESH_MAX_PLINKS) return NULL; @@ -96,17 +92,10 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata, if (!sta) return NULL; - sta_info_move_state(sta, IEEE80211_STA_AUTH); - sta_info_move_state(sta, IEEE80211_STA_ASSOC); - sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED); - + set_sta_flag(sta, WLAN_STA_AUTH); + set_sta_flag(sta, WLAN_STA_AUTHORIZED); set_sta_flag(sta, WLAN_STA_WME); - sta->sta.supp_rates[local->hw.conf.channel->band] = rates; - if (elems->ht_cap_elem) - ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, - elems->ht_cap_elem, - &sta->sta.ht_cap); rate_control_rate_init(sta); return sta; @@ -164,31 +153,23 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, enum ieee80211_self_protected_actioncode action, u8 *da, __le16 llid, __le16 plid, __le16 reason) { struct ieee80211_local *local = sdata->local; - struct sk_buff *skb; + struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400 + + sdata->u.mesh.ie_len); struct ieee80211_mgmt *mgmt; bool include_plid = false; + int ie_len = 4; u16 peering_proto = 0; - u8 *pos, ie_len = 4; - int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.self_prot) + - sizeof(mgmt->u.action.u.self_prot); - - skb = dev_alloc_skb(local->hw.extra_tx_headroom + - hdr_len + - 2 + /* capability info */ - 2 + /* AID */ - 2 + 8 + /* supported rates */ - 2 + (IEEE80211_MAX_SUPP_RATES - 8) + - 2 + sdata->u.mesh.mesh_id_len + - 2 + sizeof(struct ieee80211_meshconf_ie) + - 2 + sizeof(struct ieee80211_ht_cap) + - 2 + sizeof(struct ieee80211_ht_info) + - 2 + 8 + /* peering IE */ - sdata->u.mesh.ie_len); + u8 *pos; + if (!skb) return -1; skb_reserve(skb, local->hw.extra_tx_headroom); - mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len); - memset(mgmt, 0, hdr_len); + /* 25 is the size of the common mgmt part (24) plus the size of the + * common action part (1) + */ + mgmt = (struct ieee80211_mgmt *) + skb_put(skb, 25 + sizeof(mgmt->u.action.u.self_prot)); + memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.self_prot)); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); memcpy(mgmt->da, da, ETH_ALEN); @@ -254,13 +235,6 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, memcpy(pos, &reason, 2); pos += 2; } - - if (action != WLAN_SP_MESH_PEERING_CLOSE) { - if (mesh_add_ht_cap_ie(skb, sdata) || - mesh_add_ht_info_ie(skb, sdata)) - return -1; - } - if (mesh_add_vendor_ies(skb, sdata)) return -1; @@ -287,7 +261,7 @@ void mesh_neighbour_update(u8 *hw_addr, u32 rates, elems->ie_start, elems->total_len, GFP_KERNEL); else - sta = mesh_plink_alloc(sdata, hw_addr, rates, elems); + sta = mesh_plink_alloc(sdata, hw_addr, rates); if (!sta) return; if (sta_info_insert_rcu(sta)) { @@ -578,7 +552,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m } rates = ieee80211_sta_get_rates(local, &elems, rx_status->band); - sta = mesh_plink_alloc(sdata, mgmt->sa, rates, &elems); + sta = mesh_plink_alloc(sdata, mgmt->sa, rates); if (!sta) { mpl_dbg("Mesh plink error: plink table full\n"); return; diff --git a/trunk/net/mac80211/mlme.c b/trunk/net/mac80211/mlme.c index ecb4c84c1bb3..b1b1bb368f70 100644 --- a/trunk/net/mac80211/mlme.c +++ b/trunk/net/mac80211/mlme.c @@ -209,7 +209,6 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata, channel_type = NL80211_CHAN_HT20; if (!(ap_ht_cap_flags & IEEE80211_HT_CAP_40MHZ_INTOLERANT) && - !ieee80111_cfg_override_disables_ht40(sdata) && (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) && (hti->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) { switch(hti->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { @@ -819,7 +818,7 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work) } if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) && - !(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) { + (!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED))) { netif_tx_stop_all_queues(sdata->dev); if (drv_tx_frames_pending(local)) @@ -1121,8 +1120,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, /* on the next assoc, re-program HT parameters */ sdata->ht_opmode_valid = false; - memset(&ifmgd->ht_capa, 0, sizeof(ifmgd->ht_capa)); - memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask)); local->power_constr_level = 0; @@ -1362,6 +1359,9 @@ static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata) ieee80211_set_disassoc(sdata, true, true); mutex_unlock(&ifmgd->mtx); + mutex_lock(&local->mtx); + ieee80211_recalc_idle(local); + mutex_unlock(&local->mtx); /* * must be outside lock due to cfg80211, * but that's not a problem. @@ -1370,10 +1370,6 @@ static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata) IEEE80211_STYPE_DEAUTH, WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, NULL, true); - - mutex_lock(&local->mtx); - ieee80211_recalc_idle(local); - mutex_unlock(&local->mtx); } void ieee80211_beacon_connection_loss_work(struct work_struct *work) @@ -1381,16 +1377,6 @@ void ieee80211_beacon_connection_loss_work(struct work_struct *work) struct ieee80211_sub_if_data *sdata = container_of(work, struct ieee80211_sub_if_data, u.mgd.beacon_connection_loss_work); - struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; - struct sta_info *sta; - - if (ifmgd->associated) { - rcu_read_lock(); - sta = sta_info_get(sdata, ifmgd->bssid); - if (sta) - sta->beacon_loss_count++; - rcu_read_unlock(); - } if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR) __ieee80211_connection_loss(sdata); @@ -1482,47 +1468,6 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata, return RX_MGMT_CFG80211_DISASSOC; } -static void ieee80211_get_rates(struct ieee80211_supported_band *sband, - u8 *supp_rates, unsigned int supp_rates_len, - u32 *rates, u32 *basic_rates, - bool *have_higher_than_11mbit, - int *min_rate, int *min_rate_index) -{ - int i, j; - - for (i = 0; i < supp_rates_len; i++) { - int rate = (supp_rates[i] & 0x7f) * 5; - bool is_basic = !!(supp_rates[i] & 0x80); - - if (rate > 110) - *have_higher_than_11mbit = true; - - /* - * BSS_MEMBERSHIP_SELECTOR_HT_PHY is defined in 802.11n-2009 - * 7.3.2.2 as a magic value instead of a rate. Hence, skip it. - * - * Note: Even through the membership selector and the basic - * rate flag share the same bit, they are not exactly - * the same. - */ - if (!!(supp_rates[i] & 0x80) && - (supp_rates[i] & 0x7f) == BSS_MEMBERSHIP_SELECTOR_HT_PHY) - continue; - - for (j = 0; j < sband->n_bitrates; j++) { - if (sband->bitrates[j].bitrate == rate) { - *rates |= BIT(j); - if (is_basic) - *basic_rates |= BIT(j); - if (rate < *min_rate) { - *min_rate = rate; - *min_rate_index = j; - } - break; - } - } - } -} static bool ieee80211_assoc_success(struct ieee80211_work *wk, struct ieee80211_mgmt *mgmt, size_t len) @@ -1539,7 +1484,7 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk, struct ieee802_11_elems elems; struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; u32 changed = 0; - int err; + int i, j, err; bool have_higher_than_11mbit = false; u16 ap_ht_cap_flags; int min_rate = INT_MAX, min_rate_index = -1; @@ -1587,23 +1532,57 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk, return false; } - sta_info_move_state(sta, IEEE80211_STA_AUTH); - sta_info_move_state(sta, IEEE80211_STA_ASSOC); + set_sta_flag(sta, WLAN_STA_AUTH); + set_sta_flag(sta, WLAN_STA_ASSOC); + set_sta_flag(sta, WLAN_STA_ASSOC_AP); if (!(ifmgd->flags & IEEE80211_STA_CONTROL_PORT)) - sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED); + set_sta_flag(sta, WLAN_STA_AUTHORIZED); rates = 0; basic_rates = 0; sband = local->hw.wiphy->bands[wk->chan->band]; - ieee80211_get_rates(sband, elems.supp_rates, elems.supp_rates_len, - &rates, &basic_rates, &have_higher_than_11mbit, - &min_rate, &min_rate_index); + for (i = 0; i < elems.supp_rates_len; i++) { + int rate = (elems.supp_rates[i] & 0x7f) * 5; + bool is_basic = !!(elems.supp_rates[i] & 0x80); - ieee80211_get_rates(sband, elems.ext_supp_rates, - elems.ext_supp_rates_len, &rates, &basic_rates, - &have_higher_than_11mbit, - &min_rate, &min_rate_index); + if (rate > 110) + have_higher_than_11mbit = true; + + for (j = 0; j < sband->n_bitrates; j++) { + if (sband->bitrates[j].bitrate == rate) { + rates |= BIT(j); + if (is_basic) + basic_rates |= BIT(j); + if (rate < min_rate) { + min_rate = rate; + min_rate_index = j; + } + break; + } + } + } + + for (i = 0; i < elems.ext_supp_rates_len; i++) { + int rate = (elems.ext_supp_rates[i] & 0x7f) * 5; + bool is_basic = !!(elems.ext_supp_rates[i] & 0x80); + + if (rate > 110) + have_higher_than_11mbit = true; + + for (j = 0; j < sband->n_bitrates; j++) { + if (sband->bitrates[j].bitrate == rate) { + rates |= BIT(j); + if (is_basic) + basic_rates |= BIT(j); + if (rate < min_rate) { + min_rate = rate; + min_rate_index = j; + } + break; + } + } + } /* * some buggy APs don't advertise basic_rates. use the lowest @@ -1626,7 +1605,7 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk, sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE; if (elems.ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) - ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, + ieee80211_ht_cap_ie_to_sta_ht_cap(sband, elems.ht_cap_elem, &sta->sta.ht_cap); ap_ht_cap_flags = sta->sta.ht_cap.cap; @@ -1995,7 +1974,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; - ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, + ieee80211_ht_cap_ie_to_sta_ht_cap(sband, elems.ht_cap_elem, &sta->sta.ht_cap); ap_ht_cap_flags = sta->sta.ht_cap.cap; @@ -2149,6 +2128,9 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata, ieee80211_set_disassoc(sdata, true, true); mutex_unlock(&ifmgd->mtx); + mutex_lock(&local->mtx); + ieee80211_recalc_idle(local); + mutex_unlock(&local->mtx); /* * must be outside lock due to cfg80211, * but that's not a problem. @@ -2156,11 +2138,6 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata, ieee80211_send_deauth_disassoc(sdata, bssid, IEEE80211_STYPE_DEAUTH, reason, NULL, true); - - mutex_lock(&local->mtx); - ieee80211_recalc_idle(local); - mutex_unlock(&local->mtx); - mutex_lock(&ifmgd->mtx); } @@ -2381,7 +2358,6 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata) (unsigned long) sdata); ifmgd->flags = 0; - ifmgd->powersave = sdata->wdev.ps; mutex_init(&ifmgd->mtx); @@ -2656,13 +2632,6 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, ifmgd->flags |= IEEE80211_STA_DISABLE_11N; - if (req->flags & ASSOC_REQ_DISABLE_HT) - ifmgd->flags |= IEEE80211_STA_DISABLE_11N; - - memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa)); - memcpy(&ifmgd->ht_capa_mask, &req->ht_capa_mask, - sizeof(ifmgd->ht_capa_mask)); - if (req->ie && req->ie_len) { memcpy(wk->ie, req->ie, req->ie_len); wk->ie_len = req->ie_len; diff --git a/trunk/net/mac80211/offchannel.c b/trunk/net/mac80211/offchannel.c index f054e94901a2..3d414411a96e 100644 --- a/trunk/net/mac80211/offchannel.c +++ b/trunk/net/mac80211/offchannel.c @@ -138,16 +138,31 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local, mutex_unlock(&local->iflist_mtx); } +void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local, + bool tell_ap) +{ + struct ieee80211_sub_if_data *sdata; + + mutex_lock(&local->iflist_mtx); + list_for_each_entry(sdata, &local->interfaces, list) { + if (!ieee80211_sdata_running(sdata)) + continue; + + if (sdata->vif.type == NL80211_IFTYPE_STATION && + sdata->u.mgd.associated) + ieee80211_offchannel_ps_enable(sdata, tell_ap); + } + mutex_unlock(&local->iflist_mtx); +} + void ieee80211_offchannel_return(struct ieee80211_local *local, + bool enable_beaconing, bool offchannel_ps_disable) { struct ieee80211_sub_if_data *sdata; mutex_lock(&local->iflist_mtx); list_for_each_entry(sdata, &local->interfaces, list) { - if (sdata->vif.type != NL80211_IFTYPE_MONITOR) - clear_bit(SDATA_STATE_OFFCHANNEL, &sdata->state); - if (!ieee80211_sdata_running(sdata)) continue; @@ -159,6 +174,7 @@ void ieee80211_offchannel_return(struct ieee80211_local *local, } if (sdata->vif.type != NL80211_IFTYPE_MONITOR) { + clear_bit(SDATA_STATE_OFFCHANNEL, &sdata->state); /* * This may wake up queues even though the driver * currently has them stopped. This is not very @@ -172,9 +188,11 @@ void ieee80211_offchannel_return(struct ieee80211_local *local, netif_tx_wake_all_queues(sdata->dev); } - if (sdata->vif.type == NL80211_IFTYPE_AP || - sdata->vif.type == NL80211_IFTYPE_ADHOC || - sdata->vif.type == NL80211_IFTYPE_MESH_POINT) + /* Check to see if we should re-enable beaconing */ + if (enable_beaconing && + (sdata->vif.type == NL80211_IFTYPE_AP || + sdata->vif.type == NL80211_IFTYPE_ADHOC || + sdata->vif.type == NL80211_IFTYPE_MESH_POINT)) ieee80211_bss_info_change_notify( sdata, BSS_CHANGED_BEACON_ENABLED); } @@ -194,6 +212,8 @@ static void ieee80211_hw_roc_start(struct work_struct *work) return; } + ieee80211_recalc_idle(local); + if (local->hw_roc_skb) { sdata = IEEE80211_DEV_TO_SUB_IF(local->hw_roc_dev); ieee80211_tx_skb(sdata, local->hw_roc_skb); @@ -207,8 +227,6 @@ static void ieee80211_hw_roc_start(struct work_struct *work) GFP_KERNEL); } - ieee80211_recalc_idle(local); - mutex_unlock(&local->mtx); } diff --git a/trunk/net/mac80211/pm.c b/trunk/net/mac80211/pm.c index 596efaf50e09..9ee7164b207c 100644 --- a/trunk/net/mac80211/pm.c +++ b/trunk/net/mac80211/pm.c @@ -125,7 +125,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); - drv_remove_interface(local, sdata); + drv_remove_interface(local, &sdata->vif); } /* stop hardware - this must stop RX */ diff --git a/trunk/net/mac80211/rc80211_minstrel.c b/trunk/net/mac80211/rc80211_minstrel.c index b39dda523f39..58a89554b788 100644 --- a/trunk/net/mac80211/rc80211_minstrel.c +++ b/trunk/net/mac80211/rc80211_minstrel.c @@ -334,8 +334,8 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta, static void -calc_rate_durations(struct ieee80211_local *local, struct minstrel_rate *d, - struct ieee80211_rate *rate) +calc_rate_durations(struct minstrel_sta_info *mi, struct ieee80211_local *local, + struct minstrel_rate *d, struct ieee80211_rate *rate) { int erp = !!(rate->flags & IEEE80211_RATE_ERP_G); @@ -402,7 +402,8 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband, mr->rix = i; mr->bitrate = sband->bitrates[i].bitrate / 5; - calc_rate_durations(local, mr, &sband->bitrates[i]); + calc_rate_durations(mi, local, mr, + &sband->bitrates[i]); /* calculate maximum number of retransmissions before * fallback (based on maximum segment size) */ diff --git a/trunk/net/mac80211/rc80211_minstrel_ht.c b/trunk/net/mac80211/rc80211_minstrel_ht.c index ff5f7b84e825..cdb28535716b 100644 --- a/trunk/net/mac80211/rc80211_minstrel_ht.c +++ b/trunk/net/mac80211/rc80211_minstrel_ht.c @@ -36,17 +36,8 @@ /* Transmit duration for the raw data part of an average sized packet */ #define MCS_DURATION(streams, sgi, bps) MCS_SYMBOL_TIME(sgi, MCS_NSYMS((streams) * (bps))) -/* - * Define group sort order: HT40 -> SGI -> #streams - */ -#define GROUP_IDX(_streams, _sgi, _ht40) \ - MINSTREL_MAX_STREAMS * 2 * _ht40 + \ - MINSTREL_MAX_STREAMS * _sgi + \ - _streams - 1 - /* MCS rate information for an MCS group */ -#define MCS_GROUP(_streams, _sgi, _ht40) \ - [GROUP_IDX(_streams, _sgi, _ht40)] = { \ +#define MCS_GROUP(_streams, _sgi, _ht40) { \ .streams = _streams, \ .flags = \ (_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) | \ @@ -67,9 +58,6 @@ * To enable sufficiently targeted rate sampling, MCS rates are divided into * groups, based on the number of streams and flags (HT40, SGI) that they * use. - * - * Sortorder has to be fixed for GROUP_IDX macro to be applicable: - * HT40 -> SGI -> #streams */ const struct mcs_group minstrel_mcs_groups[] = { MCS_GROUP(1, 0, 0), @@ -114,9 +102,21 @@ minstrel_ewma(int old, int new, int weight) static int minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate) { - return GROUP_IDX((rate->idx / MCS_GROUP_RATES) + 1, - !!(rate->flags & IEEE80211_TX_RC_SHORT_GI), - !!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)); + int streams = (rate->idx / MCS_GROUP_RATES) + 1; + u32 flags = IEEE80211_TX_RC_SHORT_GI | IEEE80211_TX_RC_40_MHZ_WIDTH; + int i; + + for (i = 0; i < ARRAY_SIZE(minstrel_mcs_groups); i++) { + if (minstrel_mcs_groups[i].streams != streams) + continue; + if (minstrel_mcs_groups[i].flags != (rate->flags & flags)) + continue; + + return i; + } + + WARN_ON(1); + return 0; } static inline struct minstrel_rate_stats * @@ -130,7 +130,7 @@ minstrel_get_ratestats(struct minstrel_ht_sta *mi, int index) * Recalculate success probabilities and counters for a rate using EWMA */ static void -minstrel_calc_rate_ewma(struct minstrel_rate_stats *mr) +minstrel_calc_rate_ewma(struct minstrel_priv *mp, struct minstrel_rate_stats *mr) { if (unlikely(mr->attempts > 0)) { mr->sample_skipped = 0; @@ -156,7 +156,8 @@ minstrel_calc_rate_ewma(struct minstrel_rate_stats *mr) * the expected number of retransmissions and their expected length */ static void -minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate) +minstrel_ht_calc_tp(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, + int group, int rate) { struct minstrel_rate_stats *mr; unsigned int usecs; @@ -225,8 +226,8 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) mr = &mg->rates[i]; mr->retry_updated = false; index = MCS_GROUP_RATES * group + i; - minstrel_calc_rate_ewma(mr); - minstrel_ht_calc_tp(mi, group, i); + minstrel_calc_rate_ewma(mp, mr); + minstrel_ht_calc_tp(mp, mi, group, i); if (!mr->cur_tp) continue; @@ -299,10 +300,10 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) static bool minstrel_ht_txstat_valid(struct ieee80211_tx_rate *rate) { - if (rate->idx < 0) + if (!rate->count) return false; - if (!rate->count) + if (rate->idx < 0) return false; return !!(rate->flags & IEEE80211_TX_RC_MCS); @@ -356,7 +357,7 @@ minstrel_downgrade_rate(struct minstrel_ht_sta *mi, unsigned int *idx, } static void -minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb) +minstrel_aggr_check(struct minstrel_priv *mp, struct ieee80211_sta *pubsta, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct sta_info *sta = container_of(pubsta, struct sta_info, sta); @@ -454,7 +455,7 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband, if (time_after(jiffies, mi->stats_update + (mp->update_interval / 2 * HZ) / 1000)) { minstrel_ht_update_stats(mp, mi); if (!(info->flags & IEEE80211_TX_CTL_AMPDU)) - minstrel_aggr_check(sta, skb); + minstrel_aggr_check(mp, sta, skb); } } @@ -514,6 +515,7 @@ minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, static void minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, struct ieee80211_tx_rate *rate, int index, + struct ieee80211_tx_rate_control *txrc, bool sample, bool rtscts) { const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES]; @@ -626,11 +628,11 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, if (sample_idx >= 0) { sample = true; minstrel_ht_set_rate(mp, mi, &ar[0], sample_idx, - true, false); + txrc, true, false); info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; } else { minstrel_ht_set_rate(mp, mi, &ar[0], mi->max_tp_rate, - false, false); + txrc, false, false); } if (mp->hw->max_rates >= 3) { @@ -641,13 +643,13 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, */ if (sample_idx >= 0) minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate, - false, false); + txrc, false, false); else minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate2, - false, true); + txrc, false, true); minstrel_ht_set_rate(mp, mi, &ar[2], mi->max_prob_rate, - false, !sample); + txrc, false, !sample); ar[3].count = 0; ar[3].idx = -1; @@ -658,7 +660,7 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, * max_tp_rate -> max_prob_rate by default. */ minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_prob_rate, - false, !sample); + txrc, false, !sample); ar[2].count = 0; ar[2].idx = -1; diff --git a/trunk/net/mac80211/rc80211_pid_algo.c b/trunk/net/mac80211/rc80211_pid_algo.c index 502d3ecc4a79..aeda65466f3e 100644 --- a/trunk/net/mac80211/rc80211_pid_algo.c +++ b/trunk/net/mac80211/rc80211_pid_algo.c @@ -318,7 +318,7 @@ rate_control_pid_rate_init(void *priv, struct ieee80211_supported_band *sband, rinfo[i].diff = i * pinfo->norm_offset; } for (i = 1; i < sband->n_bitrates; i++) { - s = false; + s = 0; for (j = 0; j < sband->n_bitrates - i; j++) if (unlikely(sband->bitrates[rinfo[j].index].bitrate > sband->bitrates[rinfo[j + 1].index].bitrate)) { @@ -327,7 +327,7 @@ rate_control_pid_rate_init(void *priv, struct ieee80211_supported_band *sband, rinfo[j + 1].index = tmp; rinfo[rinfo[j].index].rev_index = j; rinfo[rinfo[j + 1].index].rev_index = j + 1; - s = true; + s = 1; } if (!s) break; diff --git a/trunk/net/mac80211/rx.c b/trunk/net/mac80211/rx.c index f407427c642f..fb123e2e081a 100644 --- a/trunk/net/mac80211/rx.c +++ b/trunk/net/mac80211/rx.c @@ -28,7 +28,6 @@ #include "wpa.h" #include "tkip.h" #include "wme.h" -#include "rate.h" /* * monitor mode reception @@ -749,11 +748,10 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx) struct ieee80211_local *local = rx->local; struct ieee80211_hw *hw = &local->hw; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; - struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct sta_info *sta = rx->sta; struct tid_ampdu_rx *tid_agg_rx; u16 sc; - u8 tid, ack_policy; + int tid; if (!ieee80211_is_data_qos(hdr->frame_control)) goto dont_reorder; @@ -766,8 +764,6 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx) if (!sta) goto dont_reorder; - ack_policy = *ieee80211_get_qos_ctl(hdr) & - IEEE80211_QOS_CTL_ACK_POLICY_MASK; tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); @@ -778,15 +774,6 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx) if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) goto dont_reorder; - /* not part of a BA session */ - if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK && - ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL) - goto dont_reorder; - - /* not actually part of this BA session */ - if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) - goto dont_reorder; - /* new, potentially un-ordered, ampdu frame - process it */ /* reset session timer */ @@ -871,13 +858,6 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx) rx->sdata->control_port_protocol) return RX_CONTINUE; } - - if (rx->sdata->vif.type == NL80211_IFTYPE_AP && - cfg80211_rx_spurious_frame(rx->sdata->dev, - hdr->addr2, - GFP_ATOMIC)) - return RX_DROP_UNUSABLE; - return RX_DROP_MONITOR; } @@ -1347,20 +1327,15 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) /* * If we receive a 4-addr nullfunc frame from a STA - * that was not moved to a 4-addr STA vlan yet send - * the event to userspace and for older hostapd drop - * the frame to the monitor interface. + * that was not moved to a 4-addr STA vlan yet, drop + * the frame to the monitor interface, to make sure + * that hostapd sees it */ if (ieee80211_has_a4(hdr->frame_control) && (rx->sdata->vif.type == NL80211_IFTYPE_AP || (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && - !rx->sdata->u.vlan.sta))) { - if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT)) - cfg80211_rx_unexpected_4addr_frame( - rx->sdata->dev, sta->sta.addr, - GFP_ATOMIC); + !rx->sdata->u.vlan.sta))) return RX_DROP_MONITOR; - } /* * Update counter and free packet here to avoid * counting this as a dropped packed. @@ -1576,6 +1551,25 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) return RX_CONTINUE; } +static ieee80211_rx_result debug_noinline +ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx) +{ + u8 *data = rx->skb->data; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data; + + if (!ieee80211_is_data_qos(hdr->frame_control)) + return RX_CONTINUE; + + /* remove the qos control field, update frame type and meta-data */ + memmove(data + IEEE80211_QOS_CTL_LEN, data, + ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN); + hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN); + /* change frame type to non QOS */ + hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA); + + return RX_CONTINUE; +} + static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) { @@ -1808,12 +1802,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx) } if (xmit_skb) { - /* - * Send to wireless media and increase priority by 256 to - * keep the received priority instead of reclassifying - * the frame (see cfg80211_classify8021d). - */ - xmit_skb->priority += 256; + /* send to wireless media */ xmit_skb->protocol = htons(ETH_P_802_3); skb_reset_network_header(xmit_skb); skb_reset_mac_header(xmit_skb); @@ -1882,16 +1871,13 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) static ieee80211_rx_result ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) { - struct ieee80211_hdr *fwd_hdr, *hdr; - struct ieee80211_tx_info *info; + struct ieee80211_hdr *hdr; struct ieee80211s_hdr *mesh_hdr; + unsigned int hdrlen; struct sk_buff *skb = rx->skb, *fwd_skb; struct ieee80211_local *local = rx->local; struct ieee80211_sub_if_data *sdata = rx->sdata; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); - struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; - __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_NOFORWARD); - u16 q, hdrlen; hdr = (struct ieee80211_hdr *) skb->data; hdrlen = ieee80211_hdrlen(hdr->frame_control); @@ -1907,7 +1893,14 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) return RX_CONTINUE; if (!mesh_hdr->ttl) + /* illegal frame */ + return RX_DROP_MONITOR; + + if (ieee80211_queue_stopped(&local->hw, skb_get_queue_mapping(skb))) { + IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh, + dropped_frames_congestion); return RX_DROP_MONITOR; + } if (mesh_hdr->flags & MESH_FLAGS_AE) { struct mesh_path *mppath; @@ -1940,50 +1933,60 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) compare_ether_addr(sdata->vif.addr, hdr->addr3) == 0) return RX_CONTINUE; - q = ieee80211_select_queue_80211(local, skb, hdr); - if (ieee80211_queue_stopped(&local->hw, q)) { - IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion); - return RX_DROP_MONITOR; - } - skb_set_queue_mapping(skb, q); + mesh_hdr->ttl--; - if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) - goto out; - - if (!--mesh_hdr->ttl) { - IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl); - return RX_DROP_MONITOR; - } - - fwd_skb = skb_copy(skb, GFP_ATOMIC); - if (!fwd_skb) { - if (net_ratelimit()) - printk(KERN_DEBUG "%s: failed to clone mesh frame\n", - sdata->name); - goto out; - } - - fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data; - info = IEEE80211_SKB_CB(fwd_skb); - memset(info, 0, sizeof(*info)); - info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; - info->control.vif = &rx->sdata->vif; - info->control.jiffies = jiffies; - if (is_multicast_ether_addr(fwd_hdr->addr1)) { - IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast); - memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN); - } else if (!mesh_nexthop_lookup(fwd_skb, sdata)) { - IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast); - } else { - /* unable to resolve next hop */ - mesh_path_error_tx(ifmsh->mshcfg.element_ttl, fwd_hdr->addr3, - 0, reason, fwd_hdr->addr2, sdata); - IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route); - return RX_DROP_MONITOR; + if (status->rx_flags & IEEE80211_RX_RA_MATCH) { + if (!mesh_hdr->ttl) + IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh, + dropped_frames_ttl); + else { + struct ieee80211_hdr *fwd_hdr; + struct ieee80211_tx_info *info; + + fwd_skb = skb_copy(skb, GFP_ATOMIC); + + if (!fwd_skb && net_ratelimit()) + printk(KERN_DEBUG "%s: failed to clone mesh frame\n", + sdata->name); + if (!fwd_skb) + goto out; + + fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data; + memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN); + info = IEEE80211_SKB_CB(fwd_skb); + memset(info, 0, sizeof(*info)); + info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; + info->control.vif = &rx->sdata->vif; + if (is_multicast_ether_addr(fwd_hdr->addr1)) { + IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh, + fwded_mcast); + skb_set_queue_mapping(fwd_skb, + ieee80211_select_queue(sdata, fwd_skb)); + ieee80211_set_qos_hdr(sdata, fwd_skb); + } else { + int err; + /* + * Save TA to addr1 to send TA a path error if a + * suitable next hop is not found + */ + memcpy(fwd_hdr->addr1, fwd_hdr->addr2, + ETH_ALEN); + err = mesh_nexthop_lookup(fwd_skb, sdata); + /* Failed to immediately resolve next hop: + * fwded frame was dropped or will be added + * later to the pending skb queue. */ + if (err) + return RX_DROP_MONITOR; + + IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh, + fwded_unicast); + } + IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh, + fwded_frames); + ieee80211_add_pending_skb(local, fwd_skb); + } } - IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames); - ieee80211_add_pending_skb(local, fwd_skb); out: if (is_multicast_ether_addr(hdr->addr1) || sdata->dev->flags & IFF_PROMISC) @@ -2011,17 +2014,12 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx) return RX_DROP_MONITOR; /* - * Send unexpected-4addr-frame event to hostapd. For older versions, - * also drop the frame to cooked monitor interfaces. + * Allow the cooked monitor interface of an AP to see 4-addr frames so + * that a 4-addr station can be detected and moved into a separate VLAN */ if (ieee80211_has_a4(hdr->frame_control) && - sdata->vif.type == NL80211_IFTYPE_AP) { - if (rx->sta && - !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT)) - cfg80211_rx_unexpected_4addr_frame( - rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC); + sdata->vif.type == NL80211_IFTYPE_AP) return RX_DROP_MONITOR; - } err = __ieee80211_data_to_8023(rx, &port_control); if (unlikely(err)) @@ -2176,18 +2174,6 @@ ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx) if (!ieee80211_is_mgmt(mgmt->frame_control)) return RX_DROP_MONITOR; - if (rx->sdata->vif.type == NL80211_IFTYPE_AP && - ieee80211_is_beacon(mgmt->frame_control) && - !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) { - struct ieee80211_rx_status *status; - - status = IEEE80211_SKB_RXCB(rx->skb); - cfg80211_report_obss_beacon(rx->local->hw.wiphy, - rx->skb->data, rx->skb->len, - status->freq, GFP_ATOMIC); - rx->flags |= IEEE80211_RX_BEACON_REPORTED; - } - if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) return RX_DROP_MONITOR; @@ -2220,69 +2206,16 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) return RX_DROP_UNUSABLE; switch (mgmt->u.action.category) { - case WLAN_CATEGORY_HT: - /* reject HT action frames from stations not supporting HT */ - if (!rx->sta->sta.ht_cap.ht_supported) - goto invalid; - - if (sdata->vif.type != NL80211_IFTYPE_STATION && - sdata->vif.type != NL80211_IFTYPE_MESH_POINT && - sdata->vif.type != NL80211_IFTYPE_AP_VLAN && - sdata->vif.type != NL80211_IFTYPE_AP && - sdata->vif.type != NL80211_IFTYPE_ADHOC) - break; - - /* verify action & smps_control are present */ - if (len < IEEE80211_MIN_ACTION_SIZE + 2) - goto invalid; - - switch (mgmt->u.action.u.ht_smps.action) { - case WLAN_HT_ACTION_SMPS: { - struct ieee80211_supported_band *sband; - u8 smps; - - /* convert to HT capability */ - switch (mgmt->u.action.u.ht_smps.smps_control) { - case WLAN_HT_SMPS_CONTROL_DISABLED: - smps = WLAN_HT_CAP_SM_PS_DISABLED; - break; - case WLAN_HT_SMPS_CONTROL_STATIC: - smps = WLAN_HT_CAP_SM_PS_STATIC; - break; - case WLAN_HT_SMPS_CONTROL_DYNAMIC: - smps = WLAN_HT_CAP_SM_PS_DYNAMIC; - break; - default: - goto invalid; - } - smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT; - - /* if no change do nothing */ - if ((rx->sta->sta.ht_cap.cap & - IEEE80211_HT_CAP_SM_PS) == smps) - goto handled; - - rx->sta->sta.ht_cap.cap &= ~IEEE80211_HT_CAP_SM_PS; - rx->sta->sta.ht_cap.cap |= smps; - - sband = rx->local->hw.wiphy->bands[status->band]; - - rate_control_rate_update(local, sband, rx->sta, - IEEE80211_RC_SMPS_CHANGED, - local->_oper_channel_type); - goto handled; - } - default: - goto invalid; - } - - break; case WLAN_CATEGORY_BACK: + /* + * The aggregation code is not prepared to handle + * anything but STA/AP due to the BSSID handling; + * IBSS could work in the code but isn't supported + * by drivers or the standard. + */ if (sdata->vif.type != NL80211_IFTYPE_STATION && - sdata->vif.type != NL80211_IFTYPE_MESH_POINT && sdata->vif.type != NL80211_IFTYPE_AP_VLAN && - sdata->vif.type != NL80211_IFTYPE_AP && - sdata->vif.type != NL80211_IFTYPE_ADHOC) + sdata->vif.type != NL80211_IFTYPE_AP) break; /* verify action_code is present */ @@ -2560,10 +2493,6 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, goto out_free_skb; rx->flags |= IEEE80211_RX_CMNTR; - /* If there are no cooked monitor interfaces, just free the SKB */ - if (!local->cooked_mntrs) - goto out_free_skb; - if (skb_headroom(skb) < sizeof(*rthdr) && pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC)) goto out_free_skb; @@ -2699,6 +2628,7 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx) if (ieee80211_vif_is_mesh(&rx->sdata->vif)) CALL_RXH(ieee80211_rx_h_mesh_fwding); #endif + CALL_RXH(ieee80211_rx_h_remove_qos_control) CALL_RXH(ieee80211_rx_h_amsdu) CALL_RXH(ieee80211_rx_h_data) CALL_RXH(ieee80211_rx_h_ctrl); @@ -2818,8 +2748,8 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx, rate_idx = 0; /* TODO: HT rates */ else rate_idx = status->rate_idx; - ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2, - BIT(rate_idx)); + rx->sta = ieee80211_ibss_add_sta(sdata, bssid, + hdr->addr2, BIT(rate_idx), GFP_ATOMIC); } break; case NL80211_IFTYPE_MESH_POINT: @@ -2840,17 +2770,10 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx, return 0; } else if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) { - /* - * Accept public action frames even when the - * BSSID doesn't match, this is used for P2P - * and location updates. Note that mac80211 - * itself never looks at these frames. - */ - if (!(status->rx_flags & IEEE80211_RX_IN_SCAN) && - ieee80211_is_public_action(hdr, skb->len)) - return 1; if (!(status->rx_flags & IEEE80211_RX_IN_SCAN) && - !ieee80211_is_beacon(hdr->frame_control)) + !ieee80211_is_beacon(hdr->frame_control) && + !(ieee80211_is_action(hdr->frame_control) && + sdata->vif.p2p)) return 0; status->rx_flags &= ~IEEE80211_RX_RA_MATCH; } diff --git a/trunk/net/mac80211/scan.c b/trunk/net/mac80211/scan.c index 9270771702fe..105436dbb90d 100644 --- a/trunk/net/mac80211/scan.c +++ b/trunk/net/mac80211/scan.c @@ -106,7 +106,7 @@ ieee80211_bss_info_update(struct ieee80211_local *local, /* save the ERP value so that it is available at association time */ if (elems->erp_info && elems->erp_info_len >= 1) { bss->erp_value = elems->erp_info[0]; - bss->has_erp_value = true; + bss->has_erp_value = 1; } if (elems->tim) { @@ -213,7 +213,12 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) if (bss) ieee80211_rx_bss_put(sdata->local, bss); - if (channel == sdata->local->oper_channel) + /* If we are on-operating-channel, and this packet is for the + * current channel, pass the pkt on up the stack so that + * the rest of the stack can make use of it. + */ + if (ieee80211_cfg_on_oper_channel(sdata->local) + && (channel == sdata->local->oper_channel)) return RX_CONTINUE; dev_kfree_skb(skb); @@ -259,6 +264,8 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted, bool was_hw_scan) { struct ieee80211_local *local = hw_to_local(hw); + bool on_oper_chan; + bool enable_beacons = false; lockdep_assert_held(&local->mtx); @@ -291,13 +298,25 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted, local->scanning = 0; local->scan_channel = NULL; - /* Set power back to normal operating levels. */ - ieee80211_hw_config(local, 0); + on_oper_chan = ieee80211_cfg_on_oper_channel(local); + + if (was_hw_scan || !on_oper_chan) + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); + else + /* Set power back to normal operating levels. */ + ieee80211_hw_config(local, 0); if (!was_hw_scan) { + bool on_oper_chan2; ieee80211_configure_filter(local); drv_sw_scan_complete(local); - ieee80211_offchannel_return(local, true); + on_oper_chan2 = ieee80211_cfg_on_oper_channel(local); + /* We should always be on-channel at this point. */ + WARN_ON(!on_oper_chan2); + if (on_oper_chan2 && (on_oper_chan != on_oper_chan2)) + enable_beacons = true; + + ieee80211_offchannel_return(local, enable_beacons, true); } ieee80211_recalc_idle(local); @@ -342,7 +361,11 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local) local->next_scan_state = SCAN_DECISION; local->scan_channel_idx = 0; - ieee80211_offchannel_stop_vifs(local, true); + /* We always want to use off-channel PS, even if we + * are not really leaving oper-channel. Don't + * tell the AP though, as long as we are on-channel. + */ + ieee80211_offchannel_enable_all_ps(local, false); ieee80211_configure_filter(local); @@ -350,7 +373,8 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local) ieee80211_hw_config(local, 0); ieee80211_queue_delayed_work(&local->hw, - &local->scan_work, 0); + &local->scan_work, + IEEE80211_CHANNEL_TIME); return 0; } @@ -486,39 +510,96 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local, next_chan = local->scan_req->channels[local->scan_channel_idx]; + if (ieee80211_cfg_on_oper_channel(local)) { + /* We're currently on operating channel. */ + if (next_chan == local->oper_channel) + /* We don't need to move off of operating channel. */ + local->next_scan_state = SCAN_SET_CHANNEL; + else + /* + * We do need to leave operating channel, as next + * scan is somewhere else. + */ + local->next_scan_state = SCAN_LEAVE_OPER_CHANNEL; + } else { + /* + * we're currently scanning a different channel, let's + * see if we can scan another channel without interfering + * with the current traffic situation. + * + * Since we don't know if the AP has pending frames for us + * we can only check for our tx queues and use the current + * pm_qos requirements for rx. Hence, if no tx traffic occurs + * at all we will scan as many channels in a row as the pm_qos + * latency allows us to. Additionally we also check for the + * currently negotiated listen interval to prevent losing + * frames unnecessarily. + * + * Otherwise switch back to the operating channel. + */ + + bad_latency = time_after(jiffies + + ieee80211_scan_get_channel_time(next_chan), + local->leave_oper_channel_time + + usecs_to_jiffies(pm_qos_request(PM_QOS_NETWORK_LATENCY))); + + listen_int_exceeded = time_after(jiffies + + ieee80211_scan_get_channel_time(next_chan), + local->leave_oper_channel_time + + usecs_to_jiffies(min_beacon_int * 1024) * + local->hw.conf.listen_interval); + + if (associated && ( !tx_empty || bad_latency || + listen_int_exceeded)) + local->next_scan_state = SCAN_ENTER_OPER_CHANNEL; + else + local->next_scan_state = SCAN_SET_CHANNEL; + } + + *next_delay = 0; +} + +static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *local, + unsigned long *next_delay) +{ + /* PS will already be in off-channel mode, + * we do that once at the beginning of scanning. + */ + ieee80211_offchannel_stop_vifs(local, false); + /* - * we're currently scanning a different channel, let's - * see if we can scan another channel without interfering - * with the current traffic situation. - * - * Since we don't know if the AP has pending frames for us - * we can only check for our tx queues and use the current - * pm_qos requirements for rx. Hence, if no tx traffic occurs - * at all we will scan as many channels in a row as the pm_qos - * latency allows us to. Additionally we also check for the - * currently negotiated listen interval to prevent losing - * frames unnecessarily. - * - * Otherwise switch back to the operating channel. + * What if the nullfunc frames didn't arrive? */ + drv_flush(local, false); + if (local->ops->flush) + *next_delay = 0; + else + *next_delay = HZ / 10; - bad_latency = time_after(jiffies + - ieee80211_scan_get_channel_time(next_chan), - local->leave_oper_channel_time + - usecs_to_jiffies(pm_qos_request(PM_QOS_NETWORK_LATENCY))); + /* remember when we left the operating channel */ + local->leave_oper_channel_time = jiffies; - listen_int_exceeded = time_after(jiffies + - ieee80211_scan_get_channel_time(next_chan), - local->leave_oper_channel_time + - usecs_to_jiffies(min_beacon_int * 1024) * - local->hw.conf.listen_interval); + /* advance to the next channel to be scanned */ + local->next_scan_state = SCAN_SET_CHANNEL; +} - if (associated && (!tx_empty || bad_latency || listen_int_exceeded)) - local->next_scan_state = SCAN_SUSPEND; - else - local->next_scan_state = SCAN_SET_CHANNEL; +static void ieee80211_scan_state_enter_oper_channel(struct ieee80211_local *local, + unsigned long *next_delay) +{ + /* switch back to the operating channel */ + local->scan_channel = NULL; + if (!ieee80211_cfg_on_oper_channel(local)) + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); - *next_delay = 0; + /* + * Re-enable vifs and beaconing. Leave PS + * in off-channel state..will put that back + * on-channel at the end of scanning. + */ + ieee80211_offchannel_return(local, true, false); + + *next_delay = HZ / 5; + local->next_scan_state = SCAN_DECISION; } static void ieee80211_scan_state_set_channel(struct ieee80211_local *local, @@ -532,8 +613,10 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local, local->scan_channel = chan; - if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL)) - skip = 1; + /* Only call hw-config if we really need to change channels. */ + if (chan != local->hw.conf.channel) + if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL)) + skip = 1; /* advance state machine to next channel/band */ local->scan_channel_idx++; @@ -590,44 +673,6 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local, local->next_scan_state = SCAN_DECISION; } -static void ieee80211_scan_state_suspend(struct ieee80211_local *local, - unsigned long *next_delay) -{ - /* switch back to the operating channel */ - local->scan_channel = NULL; - ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); - - /* - * Re-enable vifs and beaconing. Leave PS - * in off-channel state..will put that back - * on-channel at the end of scanning. - */ - ieee80211_offchannel_return(local, false); - - *next_delay = HZ / 5; - /* afterwards, resume scan & go to next channel */ - local->next_scan_state = SCAN_RESUME; -} - -static void ieee80211_scan_state_resume(struct ieee80211_local *local, - unsigned long *next_delay) -{ - /* PS already is in off-channel mode */ - ieee80211_offchannel_stop_vifs(local, false); - - if (local->ops->flush) { - drv_flush(local, false); - *next_delay = 0; - } else - *next_delay = HZ / 10; - - /* remember when we left the operating channel */ - local->leave_oper_channel_time = jiffies; - - /* advance to the next channel to be scanned */ - local->next_scan_state = SCAN_SET_CHANNEL; -} - void ieee80211_scan_work(struct work_struct *work) { struct ieee80211_local *local = @@ -698,11 +743,11 @@ void ieee80211_scan_work(struct work_struct *work) case SCAN_SEND_PROBE: ieee80211_scan_state_send_probe(local, &next_delay); break; - case SCAN_SUSPEND: - ieee80211_scan_state_suspend(local, &next_delay); + case SCAN_LEAVE_OPER_CHANNEL: + ieee80211_scan_state_leave_oper_channel(local, &next_delay); break; - case SCAN_RESUME: - ieee80211_scan_state_resume(local, &next_delay); + case SCAN_ENTER_OPER_CHANNEL: + ieee80211_scan_state_enter_oper_channel(local, &next_delay); break; } } while (next_delay == 0); diff --git a/trunk/net/mac80211/sta_info.c b/trunk/net/mac80211/sta_info.c index b197136aea2c..8eaa746ec7a2 100644 --- a/trunk/net/mac80211/sta_info.c +++ b/trunk/net/mac80211/sta_info.c @@ -62,14 +62,14 @@ * freed before they are done using it. */ -/* Caller must hold local->sta_mtx */ +/* Caller must hold local->sta_lock */ static int sta_info_hash_del(struct ieee80211_local *local, struct sta_info *sta) { struct sta_info *s; s = rcu_dereference_protected(local->sta_hash[STA_HASH(sta->sta.addr)], - lockdep_is_held(&local->sta_mtx)); + lockdep_is_held(&local->sta_lock)); if (!s) return -ENOENT; if (s == sta) { @@ -81,7 +81,7 @@ static int sta_info_hash_del(struct ieee80211_local *local, while (rcu_access_pointer(s->hnext) && rcu_access_pointer(s->hnext) != sta) s = rcu_dereference_protected(s->hnext, - lockdep_is_held(&local->sta_mtx)); + lockdep_is_held(&local->sta_lock)); if (rcu_access_pointer(s->hnext)) { RCU_INIT_POINTER(s->hnext, sta->hnext); return 0; @@ -98,12 +98,14 @@ struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata, struct sta_info *sta; sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)], + lockdep_is_held(&local->sta_lock) || lockdep_is_held(&local->sta_mtx)); while (sta) { if (sta->sdata == sdata && !sta->dummy && memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) break; sta = rcu_dereference_check(sta->hnext, + lockdep_is_held(&local->sta_lock) || lockdep_is_held(&local->sta_mtx)); } return sta; @@ -117,12 +119,14 @@ struct sta_info *sta_info_get_rx(struct ieee80211_sub_if_data *sdata, struct sta_info *sta; sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)], + lockdep_is_held(&local->sta_lock) || lockdep_is_held(&local->sta_mtx)); while (sta) { if (sta->sdata == sdata && memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) break; sta = rcu_dereference_check(sta->hnext, + lockdep_is_held(&local->sta_lock) || lockdep_is_held(&local->sta_mtx)); } return sta; @@ -139,6 +143,7 @@ struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata, struct sta_info *sta; sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)], + lockdep_is_held(&local->sta_lock) || lockdep_is_held(&local->sta_mtx)); while (sta) { if ((sta->sdata == sdata || @@ -147,6 +152,7 @@ struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata, memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) break; sta = rcu_dereference_check(sta->hnext, + lockdep_is_held(&local->sta_lock) || lockdep_is_held(&local->sta_mtx)); } return sta; @@ -163,6 +169,7 @@ struct sta_info *sta_info_get_bss_rx(struct ieee80211_sub_if_data *sdata, struct sta_info *sta; sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)], + lockdep_is_held(&local->sta_lock) || lockdep_is_held(&local->sta_mtx)); while (sta) { if ((sta->sdata == sdata || @@ -170,6 +177,7 @@ struct sta_info *sta_info_get_bss_rx(struct ieee80211_sub_if_data *sdata, memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) break; sta = rcu_dereference_check(sta->hnext, + lockdep_is_held(&local->sta_lock) || lockdep_is_held(&local->sta_mtx)); } return sta; @@ -196,17 +204,16 @@ struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata, } /** - * sta_info_free - free STA + * __sta_info_free - internal STA free helper * * @local: pointer to the global information * @sta: STA info to free * * This function must undo everything done by sta_info_alloc() - * that may happen before sta_info_insert(). It may only be - * called when sta_info_insert() has not been attempted (and - * if that fails, the station is freed anyway.) + * that may happen before sta_info_insert(). */ -void sta_info_free(struct ieee80211_local *local, struct sta_info *sta) +static void __sta_info_free(struct ieee80211_local *local, + struct sta_info *sta) { if (sta->rate_ctrl) { rate_control_free_sta(sta); @@ -220,11 +227,10 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta) kfree(sta); } -/* Caller must hold local->sta_mtx */ +/* Caller must hold local->sta_lock */ static void sta_info_hash_add(struct ieee80211_local *local, struct sta_info *sta) { - lockdep_assert_held(&local->sta_mtx); sta->hnext = local->sta_hash[STA_HASH(sta->sta.addr)]; RCU_INIT_POINTER(local->sta_hash[STA_HASH(sta->sta.addr)], sta); } @@ -274,7 +280,7 @@ static int sta_prepare_rate_control(struct ieee80211_local *local, } struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, - const u8 *addr, gfp_t gfp) + u8 *addr, gfp_t gfp) { struct ieee80211_local *local = sdata->local; struct sta_info *sta; @@ -332,6 +338,102 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, return sta; } +static int sta_info_finish_insert(struct sta_info *sta, + bool async, bool dummy_reinsert) +{ + struct ieee80211_local *local = sta->local; + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct station_info sinfo; + unsigned long flags; + int err = 0; + + lockdep_assert_held(&local->sta_mtx); + + if (!sta->dummy || dummy_reinsert) { + /* notify driver */ + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + sdata = container_of(sdata->bss, + struct ieee80211_sub_if_data, + u.ap); + err = drv_sta_add(local, sdata, &sta->sta); + if (err) { + if (!async) + return err; + printk(KERN_DEBUG "%s: failed to add IBSS STA %pM to " + "driver (%d) - keeping it anyway.\n", + sdata->name, sta->sta.addr, err); + } else { + sta->uploaded = true; +#ifdef CONFIG_MAC80211_VERBOSE_DEBUG + if (async) + wiphy_debug(local->hw.wiphy, + "Finished adding IBSS STA %pM\n", + sta->sta.addr); +#endif + } + + sdata = sta->sdata; + } + + if (!dummy_reinsert) { + if (!async) { + local->num_sta++; + local->sta_generation++; + smp_mb(); + + /* make the station visible */ + spin_lock_irqsave(&local->sta_lock, flags); + sta_info_hash_add(local, sta); + spin_unlock_irqrestore(&local->sta_lock, flags); + } + + list_add(&sta->list, &local->sta_list); + } else { + sta->dummy = false; + } + + if (!sta->dummy) { + ieee80211_sta_debugfs_add(sta); + rate_control_add_sta_debugfs(sta); + + memset(&sinfo, 0, sizeof(sinfo)); + sinfo.filled = 0; + sinfo.generation = local->sta_generation; + cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL); + } + + return 0; +} + +static void sta_info_finish_pending(struct ieee80211_local *local) +{ + struct sta_info *sta; + unsigned long flags; + + spin_lock_irqsave(&local->sta_lock, flags); + while (!list_empty(&local->sta_pending_list)) { + sta = list_first_entry(&local->sta_pending_list, + struct sta_info, list); + list_del(&sta->list); + spin_unlock_irqrestore(&local->sta_lock, flags); + + sta_info_finish_insert(sta, true, false); + + spin_lock_irqsave(&local->sta_lock, flags); + } + spin_unlock_irqrestore(&local->sta_lock, flags); +} + +static void sta_info_finish_work(struct work_struct *work) +{ + struct ieee80211_local *local = + container_of(work, struct ieee80211_local, sta_finish_work); + + mutex_lock(&local->sta_mtx); + sta_info_finish_pending(local); + mutex_unlock(&local->sta_mtx); +} + static int sta_info_insert_check(struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = sta->sdata; @@ -351,24 +453,70 @@ static int sta_info_insert_check(struct sta_info *sta) return 0; } +static int sta_info_insert_ibss(struct sta_info *sta) __acquires(RCU) +{ + struct ieee80211_local *local = sta->local; + struct ieee80211_sub_if_data *sdata = sta->sdata; + unsigned long flags; + + spin_lock_irqsave(&local->sta_lock, flags); + /* check if STA exists already */ + if (sta_info_get_bss_rx(sdata, sta->sta.addr)) { + spin_unlock_irqrestore(&local->sta_lock, flags); + rcu_read_lock(); + return -EEXIST; + } + + local->num_sta++; + local->sta_generation++; + smp_mb(); + sta_info_hash_add(local, sta); + + list_add_tail(&sta->list, &local->sta_pending_list); + + rcu_read_lock(); + spin_unlock_irqrestore(&local->sta_lock, flags); + +#ifdef CONFIG_MAC80211_VERBOSE_DEBUG + wiphy_debug(local->hw.wiphy, "Added IBSS STA %pM\n", + sta->sta.addr); +#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ + + ieee80211_queue_work(&local->hw, &local->sta_finish_work); + + return 0; +} + /* * should be called with sta_mtx locked * this function replaces the mutex lock * with a RCU lock */ -static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU) +static int sta_info_insert_non_ibss(struct sta_info *sta) __acquires(RCU) { struct ieee80211_local *local = sta->local; struct ieee80211_sub_if_data *sdata = sta->sdata; + unsigned long flags; struct sta_info *exist_sta; bool dummy_reinsert = false; int err = 0; lockdep_assert_held(&local->sta_mtx); + /* + * On first glance, this will look racy, because the code + * in this function, which inserts a station with sleeping, + * unlocks the sta_lock between checking existence in the + * hash table and inserting into it. + * + * However, it is not racy against itself because it keeps + * the mutex locked. + */ + + spin_lock_irqsave(&local->sta_lock, flags); /* * check if STA exists already. - * only accept a scenario of a second call to sta_info_insert_finish + * only accept a scenario of a second call to sta_info_insert_non_ibss * with a dummy station entry that was inserted earlier * in that case - assume that the dummy station flag should * be removed. @@ -378,47 +526,20 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU) if (exist_sta == sta && sta->dummy) { dummy_reinsert = true; } else { - err = -EEXIST; - goto out_err; + spin_unlock_irqrestore(&local->sta_lock, flags); + mutex_unlock(&local->sta_mtx); + rcu_read_lock(); + return -EEXIST; } } - if (!sta->dummy || dummy_reinsert) { - /* notify driver */ - err = drv_sta_add(local, sdata, &sta->sta); - if (err) { - if (sdata->vif.type != NL80211_IFTYPE_ADHOC) - goto out_err; - printk(KERN_DEBUG "%s: failed to add IBSS STA %pM to " - "driver (%d) - keeping it anyway.\n", - sdata->name, sta->sta.addr, err); - } else - sta->uploaded = true; - } - - if (!dummy_reinsert) { - local->num_sta++; - local->sta_generation++; - smp_mb(); - - /* make the station visible */ - sta_info_hash_add(local, sta); - - list_add(&sta->list, &local->sta_list); - } else { - sta->dummy = false; - } - - if (!sta->dummy) { - struct station_info sinfo; - - ieee80211_sta_debugfs_add(sta); - rate_control_add_sta_debugfs(sta); + spin_unlock_irqrestore(&local->sta_lock, flags); - memset(&sinfo, 0, sizeof(sinfo)); - sinfo.filled = 0; - sinfo.generation = local->sta_generation; - cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL); + err = sta_info_finish_insert(sta, false, dummy_reinsert); + if (err) { + mutex_unlock(&local->sta_mtx); + rcu_read_lock(); + return err; } #ifdef CONFIG_MAC80211_VERBOSE_DEBUG @@ -434,35 +555,54 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU) mesh_accept_plinks_update(sdata); return 0; - out_err: - mutex_unlock(&local->sta_mtx); - rcu_read_lock(); - return err; } int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU) { struct ieee80211_local *local = sta->local; + struct ieee80211_sub_if_data *sdata = sta->sdata; int err = 0; - might_sleep(); - err = sta_info_insert_check(sta); if (err) { rcu_read_lock(); goto out_free; } + /* + * In ad-hoc mode, we sometimes need to insert stations + * from tasklet context from the RX path. To avoid races, + * always do so in that case -- see the comment below. + */ + if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { + err = sta_info_insert_ibss(sta); + if (err) + goto out_free; + + return 0; + } + + /* + * It might seem that the function called below is in race against + * the function call above that atomically inserts the station... That, + * however, is not true because the above code can only + * be invoked for IBSS interfaces, and the below code will + * not be -- and the two do not race against each other as + * the hash table also keys off the interface. + */ + + might_sleep(); + mutex_lock(&local->sta_mtx); - err = sta_info_insert_finish(sta); + err = sta_info_insert_non_ibss(sta); if (err) goto out_free; return 0; out_free: BUG_ON(!err); - sta_info_free(local, sta); + __sta_info_free(local, sta); return err; } @@ -489,7 +629,7 @@ int sta_info_reinsert(struct sta_info *sta) might_sleep(); - err = sta_info_insert_finish(sta); + err = sta_info_insert_non_ibss(sta); rcu_read_unlock(); return err; } @@ -576,7 +716,7 @@ void sta_info_recalc_tim(struct sta_info *sta) } done: - spin_lock_irqsave(&local->tim_lock, flags); + spin_lock_irqsave(&local->sta_lock, flags); if (indicate_tim) __bss_tim_set(bss, sta->sta.aid); @@ -589,7 +729,7 @@ void sta_info_recalc_tim(struct sta_info *sta) local->tim_in_locked_section = false; } - spin_unlock_irqrestore(&local->tim_lock, flags); + spin_unlock_irqrestore(&local->sta_lock, flags); } static bool sta_info_buffer_expired(struct sta_info *sta, struct sk_buff *skb) @@ -713,8 +853,8 @@ static int __must_check __sta_info_destroy(struct sta_info *sta) { struct ieee80211_local *local; struct ieee80211_sub_if_data *sdata; + unsigned long flags; int ret, i, ac; - struct tid_ampdu_tx *tid_tx; might_sleep(); @@ -733,12 +873,15 @@ static int __must_check __sta_info_destroy(struct sta_info *sta) set_sta_flag(sta, WLAN_STA_BLOCK_BA); ieee80211_sta_tear_down_BA_sessions(sta, true); + spin_lock_irqsave(&local->sta_lock, flags); ret = sta_info_hash_del(local, sta); + /* this might still be the pending list ... which is fine */ + if (!ret) + list_del(&sta->list); + spin_unlock_irqrestore(&local->sta_lock, flags); if (ret) return ret; - list_del(&sta->list); - mutex_lock(&local->key_mtx); for (i = 0; i < NUM_DEFAULT_KEYS; i++) __ieee80211_key_free(key_mtx_dereference(local, sta->gtk[i])); @@ -765,9 +908,6 @@ static int __must_check __sta_info_destroy(struct sta_info *sta) if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) RCU_INIT_POINTER(sdata->u.vlan.sta, NULL); - while (sta->sta_state > IEEE80211_STA_NONE) - sta_info_move_state(sta, sta->sta_state - 1); - if (sta->uploaded) { if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) sdata = container_of(sdata->bss, @@ -813,36 +953,7 @@ static int __must_check __sta_info_destroy(struct sta_info *sta) } #endif - /* There could be some memory leaks because of ampdu tx pending queue - * not being freed before destroying the station info. - * - * Make sure that such queues are purged before freeing the station - * info. - * TODO: We have to somehow postpone the full destruction - * until the aggregation stop completes. Refer - * http://thread.gmane.org/gmane.linux.kernel.wireless.general/81936 - */ - - mutex_lock(&sta->ampdu_mlme.mtx); - - for (i = 0; i < STA_TID_NUM; i++) { - tid_tx = rcu_dereference_protected_tid_tx(sta, i); - if (!tid_tx) - continue; - if (skb_queue_len(&tid_tx->pending)) { -#ifdef CONFIG_MAC80211_HT_DEBUG - wiphy_debug(local->hw.wiphy, "TX A-MPDU purging %d " - "packets for tid=%d\n", - skb_queue_len(&tid_tx->pending), i); -#endif /* CONFIG_MAC80211_HT_DEBUG */ - __skb_queue_purge(&tid_tx->pending); - } - kfree_rcu(tid_tx, rcu_head); - } - - mutex_unlock(&sta->ampdu_mlme.mtx); - - sta_info_free(local, sta); + __sta_info_free(local, sta); return 0; } @@ -898,9 +1009,11 @@ static void sta_info_cleanup(unsigned long data) void sta_info_init(struct ieee80211_local *local) { - spin_lock_init(&local->tim_lock); + spin_lock_init(&local->sta_lock); mutex_init(&local->sta_mtx); INIT_LIST_HEAD(&local->sta_list); + INIT_LIST_HEAD(&local->sta_pending_list); + INIT_WORK(&local->sta_finish_work, sta_info_finish_work); setup_timer(&local->sta_cleanup, sta_info_cleanup, (unsigned long)local); @@ -929,6 +1042,9 @@ int sta_info_flush(struct ieee80211_local *local, might_sleep(); mutex_lock(&local->sta_mtx); + + sta_info_finish_pending(local); + list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { if (!sdata || sdata == sta->sdata) WARN_ON(__sta_info_destroy(sta)); @@ -945,11 +1061,7 @@ void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, *tmp; mutex_lock(&local->sta_mtx); - - list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { - if (sdata != sta->sdata) - continue; - + list_for_each_entry_safe(sta, tmp, &local->sta_list, list) if (time_after(jiffies, sta->last_rx + exp_time)) { #ifdef CONFIG_MAC80211_IBSS_DEBUG printk(KERN_DEBUG "%s: expiring inactive STA %pM\n", @@ -957,8 +1069,6 @@ void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata, #endif WARN_ON(__sta_info_destroy(sta)); } - } - mutex_unlock(&local->sta_mtx); } @@ -1407,56 +1517,3 @@ void ieee80211_sta_set_buffered(struct ieee80211_sta *pubsta, sta_info_recalc_tim(sta); } EXPORT_SYMBOL(ieee80211_sta_set_buffered); - -int sta_info_move_state_checked(struct sta_info *sta, - enum ieee80211_sta_state new_state) -{ - might_sleep(); - - if (sta->sta_state == new_state) - return 0; - - switch (new_state) { - case IEEE80211_STA_NONE: - if (sta->sta_state == IEEE80211_STA_AUTH) - clear_bit(WLAN_STA_AUTH, &sta->_flags); - else - return -EINVAL; - break; - case IEEE80211_STA_AUTH: - if (sta->sta_state == IEEE80211_STA_NONE) - set_bit(WLAN_STA_AUTH, &sta->_flags); - else if (sta->sta_state == IEEE80211_STA_ASSOC) - clear_bit(WLAN_STA_ASSOC, &sta->_flags); - else - return -EINVAL; - break; - case IEEE80211_STA_ASSOC: - if (sta->sta_state == IEEE80211_STA_AUTH) { - set_bit(WLAN_STA_ASSOC, &sta->_flags); - } else if (sta->sta_state == IEEE80211_STA_AUTHORIZED) { - if (sta->sdata->vif.type == NL80211_IFTYPE_AP) - atomic_dec(&sta->sdata->u.ap.num_sta_authorized); - clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags); - } else - return -EINVAL; - break; - case IEEE80211_STA_AUTHORIZED: - if (sta->sta_state == IEEE80211_STA_ASSOC) { - if (sta->sdata->vif.type == NL80211_IFTYPE_AP) - atomic_inc(&sta->sdata->u.ap.num_sta_authorized); - set_bit(WLAN_STA_AUTHORIZED, &sta->_flags); - } else - return -EINVAL; - break; - default: - WARN(1, "invalid state %d", new_state); - return -EINVAL; - } - - printk(KERN_DEBUG "%s: moving STA %pM to state %d\n", - sta->sdata->name, sta->sta.addr, new_state); - sta->sta_state = new_state; - - return 0; -} diff --git a/trunk/net/mac80211/sta_info.h b/trunk/net/mac80211/sta_info.h index 6f77f12dc3fc..8c8ce05ad26f 100644 --- a/trunk/net/mac80211/sta_info.h +++ b/trunk/net/mac80211/sta_info.h @@ -30,6 +30,7 @@ * when virtual port control is not in use. * @WLAN_STA_SHORT_PREAMBLE: Station is capable of receiving short-preamble * frames. + * @WLAN_STA_ASSOC_AP: We're associated to that station, it is an AP. * @WLAN_STA_WME: Station is a QoS-STA. * @WLAN_STA_WDS: Station is one of our WDS peers. * @WLAN_STA_CLEAR_PS_FILT: Clear PS filter in hardware (using the @@ -51,7 +52,6 @@ * unblocks the station. * @WLAN_STA_SP: Station is in a service period, so don't try to * reply to other uAPSD trigger frames or PS-Poll. - * @WLAN_STA_4ADDR_EVENT: 4-addr event was already sent for this frame. */ enum ieee80211_sta_info_flags { WLAN_STA_AUTH, @@ -59,6 +59,7 @@ enum ieee80211_sta_info_flags { WLAN_STA_PS_STA, WLAN_STA_AUTHORIZED, WLAN_STA_SHORT_PREAMBLE, + WLAN_STA_ASSOC_AP, WLAN_STA_WME, WLAN_STA_WDS, WLAN_STA_CLEAR_PS_FILT, @@ -70,22 +71,11 @@ enum ieee80211_sta_info_flags { WLAN_STA_TDLS_PEER_AUTH, WLAN_STA_UAPSD, WLAN_STA_SP, - WLAN_STA_4ADDR_EVENT, -}; - -enum ieee80211_sta_state { - /* NOTE: These need to be ordered correctly! */ - IEEE80211_STA_NONE, - IEEE80211_STA_AUTH, - IEEE80211_STA_ASSOC, - IEEE80211_STA_AUTHORIZED, }; #define STA_TID_NUM 16 #define ADDBA_RESP_INTERVAL HZ -#define HT_AGG_MAX_RETRIES 15 -#define HT_AGG_BURST_RETRIES 3 -#define HT_AGG_RETRIES_PERIOD (15 * HZ) +#define HT_AGG_MAX_RETRIES 0x3 #define HT_AGG_STATE_DRV_READY 0 #define HT_AGG_STATE_RESPONSE_RECEIVED 1 @@ -98,7 +88,6 @@ enum ieee80211_sta_state { * struct tid_ampdu_tx - TID aggregation information (Tx). * * @rcu_head: rcu head for freeing structure - * @session_timer: check if we keep Tx-ing on the TID (by timeout value) * @addba_resp_timer: timer for peer's response to addba request * @pending: pending frames queue -- use sta's spinlock to protect * @dialog_token: dialog token for aggregation session @@ -121,7 +110,6 @@ enum ieee80211_sta_state { */ struct tid_ampdu_tx { struct rcu_head rcu_head; - struct timer_list session_timer; struct timer_list addba_resp_timer; struct sk_buff_head pending; unsigned long state; @@ -181,7 +169,6 @@ struct tid_ampdu_rx { * @tid_tx: aggregation info for Tx per TID * @tid_start_tx: sessions where start was requested * @addba_req_num: number of times addBA request has been sent. - * @last_addba_req_time: timestamp of the last addBA request. * @dialog_token_allocator: dialog token enumerator for each new session; * @work: work struct for starting/stopping aggregation * @tid_rx_timer_expired: bitmap indicating on which TIDs the @@ -201,7 +188,6 @@ struct sta_ampdu_mlme { struct work_struct work; struct tid_ampdu_tx __rcu *tid_tx[STA_TID_NUM]; struct tid_ampdu_tx *tid_start_tx[STA_TID_NUM]; - unsigned long last_addba_req_time[STA_TID_NUM]; u8 addba_req_num[STA_TID_NUM]; u8 dialog_token_allocator; }; @@ -274,8 +260,6 @@ struct sta_ampdu_mlme { * @dummy: indicate a dummy station created for receiving * EAP frames before association * @sta: station information we share with the driver - * @sta_state: duplicates information about station state (for debug) - * @beacon_loss_count: number of times beacon loss has triggered */ struct sta_info { /* General information, mostly static */ @@ -297,8 +281,6 @@ struct sta_info { bool uploaded; - enum ieee80211_sta_state sta_state; - /* use the accessors defined below */ unsigned long _flags; @@ -368,7 +350,6 @@ struct sta_info { #endif unsigned int lost_packets; - unsigned int beacon_loss_count; /* should be right in front of sta to be in the same cache line */ bool dummy; @@ -388,18 +369,12 @@ static inline enum nl80211_plink_state sta_plink_state(struct sta_info *sta) static inline void set_sta_flag(struct sta_info *sta, enum ieee80211_sta_info_flags flag) { - WARN_ON(flag == WLAN_STA_AUTH || - flag == WLAN_STA_ASSOC || - flag == WLAN_STA_AUTHORIZED); set_bit(flag, &sta->_flags); } static inline void clear_sta_flag(struct sta_info *sta, enum ieee80211_sta_info_flags flag) { - WARN_ON(flag == WLAN_STA_AUTH || - flag == WLAN_STA_ASSOC || - flag == WLAN_STA_AUTHORIZED); clear_bit(flag, &sta->_flags); } @@ -412,32 +387,9 @@ static inline int test_sta_flag(struct sta_info *sta, static inline int test_and_clear_sta_flag(struct sta_info *sta, enum ieee80211_sta_info_flags flag) { - WARN_ON(flag == WLAN_STA_AUTH || - flag == WLAN_STA_ASSOC || - flag == WLAN_STA_AUTHORIZED); return test_and_clear_bit(flag, &sta->_flags); } -static inline int test_and_set_sta_flag(struct sta_info *sta, - enum ieee80211_sta_info_flags flag) -{ - WARN_ON(flag == WLAN_STA_AUTH || - flag == WLAN_STA_ASSOC || - flag == WLAN_STA_AUTHORIZED); - return test_and_set_bit(flag, &sta->_flags); -} - -int sta_info_move_state_checked(struct sta_info *sta, - enum ieee80211_sta_state new_state); - -static inline void sta_info_move_state(struct sta_info *sta, - enum ieee80211_sta_state new_state) -{ - int ret = sta_info_move_state_checked(sta, new_state); - WARN_ON_ONCE(ret); -} - - void ieee80211_assign_tid_tx(struct sta_info *sta, int tid, struct tid_ampdu_tx *tid_tx); @@ -528,10 +480,7 @@ struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata, * until sta_info_insert(). */ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, - const u8 *addr, gfp_t gfp); - -void sta_info_free(struct ieee80211_local *local, struct sta_info *sta); - + u8 *addr, gfp_t gfp); /* * Insert STA info into hash table/list, returns zero or a * -EEXIST if (if the same MAC address is already present). @@ -542,6 +491,7 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta); */ int sta_info_insert(struct sta_info *sta); int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU); +int sta_info_insert_atomic(struct sta_info *sta); int sta_info_reinsert(struct sta_info *sta); int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata, diff --git a/trunk/net/mac80211/status.c b/trunk/net/mac80211/status.c index 30c265c98f73..16518f386117 100644 --- a/trunk/net/mac80211/status.c +++ b/trunk/net/mac80211/status.c @@ -340,6 +340,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + u16 frag, type; __le16 fc; struct ieee80211_supported_band *sband; struct ieee80211_sub_if_data *sdata; @@ -475,8 +476,12 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) * Fragments are passed to low-level drivers as separate skbs, so these * are actually fragments, not frames. Update frame counters only for * the first fragment of the frame. */ + + frag = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; + type = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_FTYPE; + if (info->flags & IEEE80211_TX_STAT_ACK) { - if (ieee80211_is_first_frag(hdr->seq_ctrl)) { + if (frag == 0) { local->dot11TransmittedFrameCount++; if (is_multicast_ether_addr(hdr->addr1)) local->dot11MulticastTransmittedFrameCount++; @@ -491,11 +496,11 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) * with a multicast address in the address 1 field of type Data * or Management. */ if (!is_multicast_ether_addr(hdr->addr1) || - ieee80211_is_data(fc) || - ieee80211_is_mgmt(fc)) + type == IEEE80211_FTYPE_DATA || + type == IEEE80211_FTYPE_MGMT) local->dot11TransmittedFragmentCount++; } else { - if (ieee80211_is_first_frag(hdr->seq_ctrl)) + if (frag == 0) local->dot11FailedCount++; } @@ -512,54 +517,27 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) } if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) { + struct ieee80211_work *wk; u64 cookie = (unsigned long)skb; - if (ieee80211_is_nullfunc(hdr->frame_control) || - ieee80211_is_qos_nullfunc(hdr->frame_control)) { - bool acked = info->flags & IEEE80211_TX_STAT_ACK; - cfg80211_probe_status(skb->dev, hdr->addr1, - cookie, acked, GFP_ATOMIC); - } else { - struct ieee80211_work *wk; - - rcu_read_lock(); - list_for_each_entry_rcu(wk, &local->work_list, list) { - if (wk->type != IEEE80211_WORK_OFFCHANNEL_TX) - continue; - if (wk->offchan_tx.frame != skb) - continue; - wk->offchan_tx.status = true; - break; - } - rcu_read_unlock(); - if (local->hw_roc_skb_for_status == skb) { - cookie = local->hw_roc_cookie ^ 2; - local->hw_roc_skb_for_status = NULL; - } - - cfg80211_mgmt_tx_status( - skb->dev, cookie, skb->data, skb->len, - !!(info->flags & IEEE80211_TX_STAT_ACK), - GFP_ATOMIC); + rcu_read_lock(); + list_for_each_entry_rcu(wk, &local->work_list, list) { + if (wk->type != IEEE80211_WORK_OFFCHANNEL_TX) + continue; + if (wk->offchan_tx.frame != skb) + continue; + wk->offchan_tx.status = true; + break; + } + rcu_read_unlock(); + if (local->hw_roc_skb_for_status == skb) { + cookie = local->hw_roc_cookie ^ 2; + local->hw_roc_skb_for_status = NULL; } - } - if (unlikely(info->ack_frame_id)) { - struct sk_buff *ack_skb; - unsigned long flags; - - spin_lock_irqsave(&local->ack_status_lock, flags); - ack_skb = idr_find(&local->ack_status_frames, - info->ack_frame_id); - if (ack_skb) - idr_remove(&local->ack_status_frames, - info->ack_frame_id); - spin_unlock_irqrestore(&local->ack_status_lock, flags); - - /* consumes ack_skb */ - if (ack_skb) - skb_complete_wifi_ack(ack_skb, - info->flags & IEEE80211_TX_STAT_ACK); + cfg80211_mgmt_tx_status( + skb->dev, cookie, skb->data, skb->len, + !!(info->flags & IEEE80211_TX_STAT_ACK), GFP_ATOMIC); } /* this was a transmitted frame, but now we want to reuse it */ @@ -567,7 +545,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) /* Need to make a copy before skb->cb gets cleared */ send_to_cooked = !!(info->flags & IEEE80211_TX_CTL_INJECTED) || - !(ieee80211_is_data(fc)); + (type != IEEE80211_FTYPE_DATA); /* * This is a bit racy but we can avoid a lot of work @@ -632,29 +610,3 @@ void ieee80211_report_low_ack(struct ieee80211_sta *pubsta, u32 num_packets) num_packets, GFP_ATOMIC); } EXPORT_SYMBOL(ieee80211_report_low_ack); - -void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb) -{ - struct ieee80211_local *local = hw_to_local(hw); - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - - if (unlikely(info->ack_frame_id)) { - struct sk_buff *ack_skb; - unsigned long flags; - - spin_lock_irqsave(&local->ack_status_lock, flags); - ack_skb = idr_find(&local->ack_status_frames, - info->ack_frame_id); - if (ack_skb) - idr_remove(&local->ack_status_frames, - info->ack_frame_id); - spin_unlock_irqrestore(&local->ack_status_lock, flags); - - /* consumes ack_skb */ - if (ack_skb) - dev_kfree_skb_any(ack_skb); - } - - dev_kfree_skb_any(skb); -} -EXPORT_SYMBOL(ieee80211_free_txskb); diff --git a/trunk/net/mac80211/tx.c b/trunk/net/mac80211/tx.c index edcd1c7ab83f..1f8b120146d1 100644 --- a/trunk/net/mac80211/tx.c +++ b/trunk/net/mac80211/tx.c @@ -36,8 +36,7 @@ /* misc utils */ -static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, - struct sk_buff *skb, int group_addr, +static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr, int next_frag_len) { int rate, mrate, erp, dur, i; @@ -45,7 +44,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, struct ieee80211_local *local = tx->local; struct ieee80211_supported_band *sband; struct ieee80211_hdr *hdr; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); /* assume HW handles this */ if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS) @@ -77,7 +76,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, * at the highest possible rate belonging to the PHY rates in the * BSSBasicRateSet */ - hdr = (struct ieee80211_hdr *)skb->data; + hdr = (struct ieee80211_hdr *)tx->skb->data; if (ieee80211_is_ctl(hdr->frame_control)) { /* TODO: These control frames are not currently sent by * mac80211, but should they be implemented, this function @@ -151,15 +150,11 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, rate = mrate; } - /* Don't calculate ACKs for QoS Frames with NoAck Policy set */ - if (ieee80211_is_data_qos(hdr->frame_control) && - *(ieee80211_get_qos_ctl(hdr)) | IEEE80211_QOS_CTL_ACK_POLICY_NOACK) - dur = 0; - else - /* Time needed to transmit ACK - * (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up - * to closest integer */ - dur = ieee80211_frame_duration(local, 10, rate, erp, + /* Time needed to transmit ACK + * (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up + * to closest integer */ + + dur = ieee80211_frame_duration(local, 10, rate, erp, tx->sdata->vif.bss_conf.use_short_preamble); if (next_frag_len) { @@ -295,6 +290,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) if (likely(tx->flags & IEEE80211_TX_UNICAST)) { if (unlikely(!assoc && + tx->sdata->vif.type != NL80211_IFTYPE_ADHOC && ieee80211_is_data(hdr->frame_control))) { #ifdef CONFIG_MAC80211_VERBOSE_DEBUG printk(KERN_DEBUG "%s: dropped data frame to not " @@ -304,14 +300,17 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc); return TX_DROP; } - } else if (unlikely(tx->sdata->vif.type == NL80211_IFTYPE_AP && - ieee80211_is_data(hdr->frame_control) && - !atomic_read(&tx->sdata->u.ap.num_sta_authorized))) { - /* - * No associated STAs - no need to send multicast - * frames. - */ - return TX_DROP; + } else { + if (unlikely(ieee80211_is_data(hdr->frame_control) && + tx->local->num_sta == 0 && + tx->sdata->vif.type != NL80211_IFTYPE_ADHOC)) { + /* + * No associated STAs - no need to send multicast + * frames. + */ + return TX_DROP; + } + return TX_CONTINUE; } return TX_CONTINUE; @@ -573,6 +572,8 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) switch (tx->key->conf.cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: + if (ieee80211_is_auth(hdr->frame_control)) + break; case WLAN_CIPHER_SUITE_TKIP: if (!ieee80211_is_data_present(hdr->frame_control)) tx->key = NULL; @@ -636,7 +637,6 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx) else txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1; txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP || - tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT || tx->sdata->vif.type == NL80211_IFTYPE_ADHOC); /* set up RTS protection if desired */ @@ -844,13 +844,11 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx) return TX_CONTINUE; } -static int ieee80211_fragment(struct ieee80211_tx_data *tx, +static int ieee80211_fragment(struct ieee80211_local *local, struct sk_buff *skb, int hdrlen, int frag_threshold) { - struct ieee80211_local *local = tx->local; - struct ieee80211_tx_info *info; - struct sk_buff *tmp; + struct sk_buff *tail = skb, *tmp; int per_fragm = frag_threshold - hdrlen - FCS_LEN; int pos = hdrlen + per_fragm; int rem = skb->len - hdrlen - per_fragm; @@ -858,8 +856,6 @@ static int ieee80211_fragment(struct ieee80211_tx_data *tx, if (WARN_ON(rem < 0)) return -EINVAL; - /* first fragment was already added to queue by caller */ - while (rem) { int fraglen = per_fragm; @@ -872,21 +868,12 @@ static int ieee80211_fragment(struct ieee80211_tx_data *tx, IEEE80211_ENCRYPT_TAILROOM); if (!tmp) return -ENOMEM; - - __skb_queue_tail(&tx->skbs, tmp); - + tail->next = tmp; + tail = tmp; skb_reserve(tmp, local->tx_headroom + IEEE80211_ENCRYPT_HEADROOM); /* copy control information */ memcpy(tmp->cb, skb->cb, sizeof(tmp->cb)); - - info = IEEE80211_SKB_CB(tmp); - info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT | - IEEE80211_TX_CTL_FIRST_FRAGMENT); - - if (rem) - info->flags |= IEEE80211_TX_CTL_MORE_FRAMES; - skb_copy_queue_mapping(tmp, skb); tmp->priority = skb->priority; tmp->dev = skb->dev; @@ -898,7 +885,6 @@ static int ieee80211_fragment(struct ieee80211_tx_data *tx, pos += fraglen; } - /* adjust first fragment's length */ skb->len = hdrlen + per_fragm; return 0; } @@ -913,10 +899,6 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) int hdrlen; int fragnum; - /* no matter what happens, tx->skb moves to tx->skbs */ - __skb_queue_tail(&tx->skbs, skb); - tx->skb = NULL; - if (info->flags & IEEE80211_TX_CTL_DONTFRAG) return TX_CONTINUE; @@ -945,21 +927,21 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) * of the fragments then we will simply pretend to accept the skb * but store it away as pending. */ - if (ieee80211_fragment(tx, skb, hdrlen, frag_threshold)) + if (ieee80211_fragment(tx->local, skb, hdrlen, frag_threshold)) return TX_DROP; /* update duration/seq/flags of fragments */ fragnum = 0; - - skb_queue_walk(&tx->skbs, skb) { + do { int next_len; const __le16 morefrags = cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); hdr = (void *)skb->data; info = IEEE80211_SKB_CB(skb); - if (!skb_queue_is_last(&tx->skbs, skb)) { + if (skb->next) { hdr->frame_control |= morefrags; + next_len = skb->next->len; /* * No multi-rate retries for fragmented frames, that * would completely throw off the NAV at other STAs. @@ -974,9 +956,10 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) hdr->frame_control &= ~morefrags; next_len = 0; } + hdr->duration_id = ieee80211_duration(tx, 0, next_len); hdr->seq_ctrl |= cpu_to_le16(fragnum & IEEE80211_SCTL_FRAG); fragnum++; - } + } while ((skb = skb->next)); return TX_CONTINUE; } @@ -984,16 +967,16 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) static ieee80211_tx_result debug_noinline ieee80211_tx_h_stats(struct ieee80211_tx_data *tx) { - struct sk_buff *skb; + struct sk_buff *skb = tx->skb; if (!tx->sta) return TX_CONTINUE; tx->sta->tx_packets++; - skb_queue_walk(&tx->skbs, skb) { + do { tx->sta->tx_fragments++; tx->sta->tx_bytes += skb->len; - } + } while ((skb = skb->next)); return TX_CONTINUE; } @@ -1032,25 +1015,21 @@ ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx) static ieee80211_tx_result debug_noinline ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx) { - struct sk_buff *skb; + struct sk_buff *skb = tx->skb; struct ieee80211_hdr *hdr; int next_len; bool group_addr; - skb_queue_walk(&tx->skbs, skb) { + do { hdr = (void *) skb->data; if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) break; /* must not overwrite AID */ - if (!skb_queue_is_last(&tx->skbs, skb)) { - struct sk_buff *next = skb_queue_next(&tx->skbs, skb); - next_len = next->len; - } else - next_len = 0; + next_len = skb->next ? skb->next->len : 0; group_addr = is_multicast_ether_addr(hdr->addr1); hdr->duration_id = - ieee80211_duration(tx, skb, group_addr, next_len); - } + ieee80211_duration(tx, group_addr, next_len); + } while ((skb = skb->next)); return TX_CONTINUE; } @@ -1064,11 +1043,9 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx, int tid) { bool queued = false; - bool reset_agg_timer = false; if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) { info->flags |= IEEE80211_TX_CTL_AMPDU; - reset_agg_timer = true; } else if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) { /* * nothing -- this aggregation session is being started @@ -1100,7 +1077,6 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx, /* do nothing, let packet pass through */ } else if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) { info->flags |= IEEE80211_TX_CTL_AMPDU; - reset_agg_timer = true; } else { queued = true; info->control.vif = &tx->sdata->vif; @@ -1110,11 +1086,6 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx, spin_unlock(&tx->sta->lock); } - /* reset session timer */ - if (reset_agg_timer && tid_tx->timeout) - mod_timer(&tid_tx->session_timer, - TU_TO_EXP_TIME(tid_tx->timeout)); - return queued; } @@ -1137,7 +1108,6 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, tx->local = local; tx->sdata = sdata; tx->channel = local->hw.conf.channel; - __skb_queue_head_init(&tx->skbs); /* * If this flag is set to true anywhere, and we get here, @@ -1182,8 +1152,16 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, if (is_multicast_ether_addr(hdr->addr1)) { tx->flags &= ~IEEE80211_TX_UNICAST; info->flags |= IEEE80211_TX_CTL_NO_ACK; - } else + } else { tx->flags |= IEEE80211_TX_UNICAST; + if (unlikely(local->wifi_wme_noack_test)) + info->flags |= IEEE80211_TX_CTL_NO_ACK; + /* + * Flags are initialized to 0. Hence, no need to + * explicitly unset IEEE80211_TX_CTL_NO_ACK since + * it might already be set for injected frames. + */ + } if (!(info->flags & IEEE80211_TX_CTL_DONTFRAG)) { if (!(tx->flags & IEEE80211_TX_UNICAST) || @@ -1202,18 +1180,22 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, return TX_CONTINUE; } -static bool ieee80211_tx_frags(struct ieee80211_local *local, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct sk_buff_head *skbs, - bool txpending) +/* + * Returns false if the frame couldn't be transmitted but was queued instead. + */ +static bool __ieee80211_tx(struct ieee80211_local *local, struct sk_buff **skbp, + struct sta_info *sta, bool txpending) { - struct sk_buff *skb, *tmp; + struct sk_buff *skb = *skbp, *next; struct ieee80211_tx_info *info; + struct ieee80211_sub_if_data *sdata; unsigned long flags; + int len; + bool fragm = false; - skb_queue_walk_safe(skbs, skb, tmp) { + while (skb) { int q = skb_get_queue_mapping(skb); + __le16 fc; spin_lock_irqsave(&local->queue_stop_reason_lock, flags); if (local->queue_stop_reasons[q] || @@ -1223,11 +1205,24 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local, * transmission from the tx-pending tasklet when the * queue is woken again. */ - if (txpending) - skb_queue_splice_init(skbs, &local->pending[q]); - else - skb_queue_splice_tail_init(skbs, - &local->pending[q]); + + do { + next = skb->next; + skb->next = NULL; + /* + * NB: If txpending is true, next must already + * be NULL since we must've gone through this + * loop before already; therefore we can just + * queue the frame to the head without worrying + * about reordering of fragments. + */ + if (unlikely(txpending)) + __skb_queue_head(&local->pending[q], + skb); + else + __skb_queue_tail(&local->pending[q], + skb); + } while ((skb = next)); spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); @@ -1236,72 +1231,47 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local, spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); info = IEEE80211_SKB_CB(skb); - info->control.vif = vif; - info->control.sta = sta; - - __skb_unlink(skb, skbs); - drv_tx(local, skb); - } - return true; -} + if (fragm) + info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT | + IEEE80211_TX_CTL_FIRST_FRAGMENT); -/* - * Returns false if the frame couldn't be transmitted but was queued instead. - */ -static bool __ieee80211_tx(struct ieee80211_local *local, - struct sk_buff_head *skbs, int led_len, - struct sta_info *sta, bool txpending) -{ - struct ieee80211_tx_info *info; - struct ieee80211_sub_if_data *sdata; - struct ieee80211_vif *vif; - struct ieee80211_sta *pubsta; - struct sk_buff *skb; - bool result = true; - __le16 fc; + next = skb->next; + len = skb->len; - if (WARN_ON(skb_queue_empty(skbs))) - return true; - - skb = skb_peek(skbs); - fc = ((struct ieee80211_hdr *)skb->data)->frame_control; - info = IEEE80211_SKB_CB(skb); - sdata = vif_to_sdata(info->control.vif); - if (sta && !sta->uploaded) - sta = NULL; + if (next) + info->flags |= IEEE80211_TX_CTL_MORE_FRAMES; - if (sta) - pubsta = &sta->sta; - else - pubsta = NULL; + sdata = vif_to_sdata(info->control.vif); - switch (sdata->vif.type) { - case NL80211_IFTYPE_MONITOR: - sdata = NULL; - vif = NULL; - break; - case NL80211_IFTYPE_AP_VLAN: - sdata = container_of(sdata->bss, - struct ieee80211_sub_if_data, u.ap); - /* fall through */ - default: - vif = &sdata->vif; - break; - } + switch (sdata->vif.type) { + case NL80211_IFTYPE_MONITOR: + info->control.vif = NULL; + break; + case NL80211_IFTYPE_AP_VLAN: + info->control.vif = &container_of(sdata->bss, + struct ieee80211_sub_if_data, u.ap)->vif; + break; + default: + /* keep */ + break; + } - if (local->ops->tx_frags) - drv_tx_frags(local, vif, pubsta, skbs); - else - result = ieee80211_tx_frags(local, vif, pubsta, skbs, - txpending); + if (sta && sta->uploaded) + info->control.sta = &sta->sta; + else + info->control.sta = NULL; - ieee80211_tpt_led_trig_tx(local, fc, led_len); - ieee80211_led_tx(local, 1); + fc = ((struct ieee80211_hdr *)skb->data)->frame_control; + drv_tx(local, skb); - WARN_ON_ONCE(!skb_queue_empty(skbs)); + ieee80211_tpt_led_trig_tx(local, fc, len); + *skbp = skb = next; + ieee80211_led_tx(local, 1); + fragm = true; + } - return result; + return true; } /* @@ -1310,7 +1280,8 @@ static bool __ieee80211_tx(struct ieee80211_local *local, */ static int invoke_tx_handlers(struct ieee80211_tx_data *tx) { - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); + struct sk_buff *skb = tx->skb; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); ieee80211_tx_result res = TX_DROP; #define CALL_TXH(txh) \ @@ -1328,11 +1299,8 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx) if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)) CALL_TXH(ieee80211_tx_h_rate_ctrl); - if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION)) { - __skb_queue_tail(&tx->skbs, tx->skb); - tx->skb = NULL; + if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION)) goto txh_done; - } CALL_TXH(ieee80211_tx_h_michael_mic_add); CALL_TXH(ieee80211_tx_h_sequence); @@ -1347,10 +1315,13 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx) txh_done: if (unlikely(res == TX_DROP)) { I802_DEBUG_INC(tx->local->tx_handlers_drop); - if (tx->skb) - dev_kfree_skb(tx->skb); - else - __skb_queue_purge(&tx->skbs); + while (skb) { + struct sk_buff *next; + + next = skb->next; + dev_kfree_skb(skb); + skb = next; + } return -1; } else if (unlikely(res == TX_QUEUED)) { I802_DEBUG_INC(tx->local->tx_handlers_queued); @@ -1371,7 +1342,6 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata, ieee80211_tx_result res_prepare; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); bool result = true; - int led_len; if (unlikely(skb->len < 10)) { dev_kfree_skb(skb); @@ -1381,7 +1351,6 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata, rcu_read_lock(); /* initialises tx */ - led_len = skb->len; res_prepare = ieee80211_tx_prepare(sdata, &tx, skb); if (unlikely(res_prepare == TX_DROP)) { @@ -1395,8 +1364,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata, info->band = tx.channel->band; if (!invoke_tx_handlers(&tx)) - result = __ieee80211_tx(local, &tx.skbs, led_len, - tx.sta, txpending); + result = __ieee80211_tx(local, &tx.skb, tx.sta, txpending); out: rcu_read_unlock(); return result; @@ -1463,7 +1431,7 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) if (ieee80211_vif_is_mesh(&sdata->vif) && ieee80211_is_data(hdr->frame_control) && !is_multicast_ether_addr(hdr->addr1)) - if (mesh_nexthop_resolve(skb, sdata)) { + if (mesh_nexthop_lookup(skb, sdata)) { /* skb queued: don't free */ rcu_read_unlock(); return; @@ -1717,10 +1685,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, int nh_pos, h_pos; struct sta_info *sta = NULL; bool wme_sta = false, authorized = false, tdls_auth = false; + struct sk_buff *tmp_skb; bool tdls_direct = false; - bool multicast; - u32 info_flags = 0; - u16 info_id = 0; if (unlikely(skb->len < ETH_HLEN)) { ret = NETDEV_TX_OK; @@ -1907,8 +1873,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, * if it is a multicast address (which can only happen * in AP mode) */ - multicast = is_multicast_ether_addr(hdr.addr1); - if (!multicast) { + if (!is_multicast_ether_addr(hdr.addr1)) { rcu_read_lock(); sta = sta_info_get(sdata, hdr.addr1); if (sta) { @@ -1949,54 +1914,11 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, goto fail; } - if (unlikely(!multicast && skb->sk && - skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS)) { - struct sk_buff *orig_skb = skb; - - skb = skb_clone(skb, GFP_ATOMIC); - if (skb) { - unsigned long flags; - int id, r; - - spin_lock_irqsave(&local->ack_status_lock, flags); - r = idr_get_new_above(&local->ack_status_frames, - orig_skb, 1, &id); - if (r == -EAGAIN) { - idr_pre_get(&local->ack_status_frames, - GFP_ATOMIC); - r = idr_get_new_above(&local->ack_status_frames, - orig_skb, 1, &id); - } - if (WARN_ON(!id) || id > 0xffff) { - idr_remove(&local->ack_status_frames, id); - r = -ERANGE; - } - spin_unlock_irqrestore(&local->ack_status_lock, flags); - - if (!r) { - info_id = id; - info_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; - } else if (skb_shared(skb)) { - kfree_skb(orig_skb); - } else { - kfree_skb(skb); - skb = orig_skb; - } - } else { - /* couldn't clone -- lose tx status ... */ - skb = orig_skb; - } - } - /* * If the skb is shared we need to obtain our own copy. */ if (skb_shared(skb)) { - struct sk_buff *tmp_skb = skb; - - /* can't happen -- skb is a clone if info_id != 0 */ - WARN_ON(info_id); - + tmp_skb = skb; skb = skb_clone(skb, GFP_ATOMIC); kfree_skb(tmp_skb); @@ -2097,10 +2019,6 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, memset(info, 0, sizeof(*info)); dev->trans_start = jiffies; - - info->flags = info_flags; - info->ack_frame_id = info_id; - ieee80211_xmit(sdata, skb); return NETDEV_TX_OK; @@ -2144,15 +2062,10 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local, if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) { result = ieee80211_tx(sdata, skb, true); } else { - struct sk_buff_head skbs; - - __skb_queue_head_init(&skbs); - __skb_queue_tail(&skbs, skb); - hdr = (struct ieee80211_hdr *)skb->data; sta = sta_info_get(sdata, hdr->addr1); - result = __ieee80211_tx(local, &skbs, skb->len, sta, true); + result = __ieee80211_tx(local, &skb, sta, true); } return result; @@ -2265,10 +2178,10 @@ static void ieee80211_beacon_add_tim(struct ieee80211_if_ap *bss, /* Bitmap control */ *pos++ = n1 | aid0; /* Part Virt Bitmap */ - skb_put(skb, n2 - n1); memcpy(pos, bss->tim + n1, n2 - n1 + 1); tim[1] = n2 - n1 + 4; + skb_put(skb, n2 - n1); } else { *pos++ = aid0; /* Bitmap control */ *pos++ = 0; /* Part Virt Bitmap */ @@ -2333,9 +2246,9 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, } else { unsigned long flags; - spin_lock_irqsave(&local->tim_lock, flags); + spin_lock_irqsave(&local->sta_lock, flags); ieee80211_beacon_add_tim(ap, skb, beacon); - spin_unlock_irqrestore(&local->tim_lock, flags); + spin_unlock_irqrestore(&local->sta_lock, flags); } if (tim_offset) @@ -2366,31 +2279,22 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, } else if (ieee80211_vif_is_mesh(&sdata->vif)) { struct ieee80211_mgmt *mgmt; u8 *pos; - int hdr_len = offsetof(struct ieee80211_mgmt, u.beacon) + - sizeof(mgmt->u.beacon); #ifdef CONFIG_MAC80211_MESH if (!sdata->u.mesh.mesh_id_len) goto out; #endif - skb = dev_alloc_skb(local->tx_headroom + - hdr_len + - 2 + /* NULL SSID */ - 2 + 8 + /* supported rates */ - 2 + 3 + /* DS params */ - 2 + (IEEE80211_MAX_SUPP_RATES - 8) + - 2 + sizeof(struct ieee80211_ht_cap) + - 2 + sizeof(struct ieee80211_ht_info) + - 2 + sdata->u.mesh.mesh_id_len + - 2 + sizeof(struct ieee80211_meshconf_ie) + - sdata->u.mesh.ie_len); + /* headroom, head length, tail length and maximum TIM length */ + skb = dev_alloc_skb(local->tx_headroom + 400 + + sdata->u.mesh.ie_len); if (!skb) goto out; skb_reserve(skb, local->hw.extra_tx_headroom); - mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len); - memset(mgmt, 0, hdr_len); + mgmt = (struct ieee80211_mgmt *) + skb_put(skb, 24 + sizeof(mgmt->u.beacon)); + memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon)); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON); memset(mgmt->da, 0xff, ETH_ALEN); @@ -2409,8 +2313,6 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, mesh_add_ds_params_ie(skb, sdata) || ieee80211_add_ext_srates_ie(&sdata->vif, skb) || mesh_add_rsn_ie(skb, sdata) || - mesh_add_ht_cap_ie(skb, sdata) || - mesh_add_ht_info_ie(skb, sdata) || mesh_add_meshid_ie(skb, sdata) || mesh_add_meshconf_ie(skb, sdata) || mesh_add_vendor_ies(skb, sdata)) { @@ -2453,37 +2355,6 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, } EXPORT_SYMBOL(ieee80211_beacon_get_tim); -struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) -{ - struct ieee80211_if_ap *ap = NULL; - struct sk_buff *presp = NULL, *skb = NULL; - struct ieee80211_hdr *hdr; - struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); - - if (sdata->vif.type != NL80211_IFTYPE_AP) - return NULL; - - rcu_read_lock(); - - ap = &sdata->u.ap; - presp = rcu_dereference(ap->probe_resp); - if (!presp) - goto out; - - skb = skb_copy(presp, GFP_ATOMIC); - if (!skb) - goto out; - - hdr = (struct ieee80211_hdr *) skb->data; - memset(hdr->addr1, 0, sizeof(hdr->addr1)); - -out: - rcu_read_unlock(); - return skb; -} -EXPORT_SYMBOL(ieee80211_proberesp_get); - struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { @@ -2696,15 +2567,15 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw, } EXPORT_SYMBOL(ieee80211_get_buffered_bc); -void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata, - struct sk_buff *skb, int tid) +void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { skb_set_mac_header(skb, 0); skb_set_network_header(skb, 0); skb_set_transport_header(skb, 0); - skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]); - skb->priority = tid; + /* Send all internal mgmt frames on VO. Accordingly set TID to 7. */ + skb_set_queue_mapping(skb, IEEE80211_AC_VO); + skb->priority = 7; /* * The other path calling ieee80211_xmit is from the tasklet, diff --git a/trunk/net/mac80211/util.c b/trunk/net/mac80211/util.c index 9919892575f4..d5230ecc784d 100644 --- a/trunk/net/mac80211/util.c +++ b/trunk/net/mac80211/util.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include #include @@ -97,13 +96,13 @@ u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx) { - struct sk_buff *skb; + struct sk_buff *skb = tx->skb; struct ieee80211_hdr *hdr; - skb_queue_walk(&tx->skbs, skb) { + do { hdr = (struct ieee80211_hdr *) skb->data; hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); - } + } while ((skb = skb->next)); } int ieee80211_frame_duration(struct ieee80211_local *local, size_t len, @@ -565,172 +564,6 @@ void ieee80211_queue_delayed_work(struct ieee80211_hw *hw, } EXPORT_SYMBOL(ieee80211_queue_delayed_work); -u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, - struct ieee802_11_elems *elems, - u64 filter, u32 crc) -{ - size_t left = len; - u8 *pos = start; - bool calc_crc = filter != 0; - - memset(elems, 0, sizeof(*elems)); - elems->ie_start = start; - elems->total_len = len; - - while (left >= 2) { - u8 id, elen; - - id = *pos++; - elen = *pos++; - left -= 2; - - if (elen > left) - break; - - if (calc_crc && id < 64 && (filter & (1ULL << id))) - crc = crc32_be(crc, pos - 2, elen + 2); - - switch (id) { - case WLAN_EID_SSID: - elems->ssid = pos; - elems->ssid_len = elen; - break; - case WLAN_EID_SUPP_RATES: - elems->supp_rates = pos; - elems->supp_rates_len = elen; - break; - case WLAN_EID_FH_PARAMS: - elems->fh_params = pos; - elems->fh_params_len = elen; - break; - case WLAN_EID_DS_PARAMS: - elems->ds_params = pos; - elems->ds_params_len = elen; - break; - case WLAN_EID_CF_PARAMS: - elems->cf_params = pos; - elems->cf_params_len = elen; - break; - case WLAN_EID_TIM: - if (elen >= sizeof(struct ieee80211_tim_ie)) { - elems->tim = (void *)pos; - elems->tim_len = elen; - } - break; - case WLAN_EID_IBSS_PARAMS: - elems->ibss_params = pos; - elems->ibss_params_len = elen; - break; - case WLAN_EID_CHALLENGE: - elems->challenge = pos; - elems->challenge_len = elen; - break; - case WLAN_EID_VENDOR_SPECIFIC: - if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 && - pos[2] == 0xf2) { - /* Microsoft OUI (00:50:F2) */ - - if (calc_crc) - crc = crc32_be(crc, pos - 2, elen + 2); - - if (pos[3] == 1) { - /* OUI Type 1 - WPA IE */ - elems->wpa = pos; - elems->wpa_len = elen; - } else if (elen >= 5 && pos[3] == 2) { - /* OUI Type 2 - WMM IE */ - if (pos[4] == 0) { - elems->wmm_info = pos; - elems->wmm_info_len = elen; - } else if (pos[4] == 1) { - elems->wmm_param = pos; - elems->wmm_param_len = elen; - } - } - } - break; - case WLAN_EID_RSN: - elems->rsn = pos; - elems->rsn_len = elen; - break; - case WLAN_EID_ERP_INFO: - elems->erp_info = pos; - elems->erp_info_len = elen; - break; - case WLAN_EID_EXT_SUPP_RATES: - elems->ext_supp_rates = pos; - elems->ext_supp_rates_len = elen; - break; - case WLAN_EID_HT_CAPABILITY: - if (elen >= sizeof(struct ieee80211_ht_cap)) - elems->ht_cap_elem = (void *)pos; - break; - case WLAN_EID_HT_INFORMATION: - if (elen >= sizeof(struct ieee80211_ht_info)) - elems->ht_info_elem = (void *)pos; - break; - case WLAN_EID_MESH_ID: - elems->mesh_id = pos; - elems->mesh_id_len = elen; - break; - case WLAN_EID_MESH_CONFIG: - if (elen >= sizeof(struct ieee80211_meshconf_ie)) - elems->mesh_config = (void *)pos; - break; - case WLAN_EID_PEER_MGMT: - elems->peering = pos; - elems->peering_len = elen; - break; - case WLAN_EID_PREQ: - elems->preq = pos; - elems->preq_len = elen; - break; - case WLAN_EID_PREP: - elems->prep = pos; - elems->prep_len = elen; - break; - case WLAN_EID_PERR: - elems->perr = pos; - elems->perr_len = elen; - break; - case WLAN_EID_RANN: - if (elen >= sizeof(struct ieee80211_rann_ie)) - elems->rann = (void *)pos; - break; - case WLAN_EID_CHANNEL_SWITCH: - elems->ch_switch_elem = pos; - elems->ch_switch_elem_len = elen; - break; - case WLAN_EID_QUIET: - if (!elems->quiet_elem) { - elems->quiet_elem = pos; - elems->quiet_elem_len = elen; - } - elems->num_of_quiet_elem++; - break; - case WLAN_EID_COUNTRY: - elems->country_elem = pos; - elems->country_elem_len = elen; - break; - case WLAN_EID_PWR_CONSTRAINT: - elems->pwr_constr_elem = pos; - elems->pwr_constr_elem_len = elen; - break; - case WLAN_EID_TIMEOUT_INTERVAL: - elems->timeout_int = pos; - elems->timeout_int_len = elen; - break; - default: - break; - } - - left -= elen; - pos += elen; - } - - return crc; -} - void ieee802_11_parse_elems(u8 *start, size_t len, struct ieee802_11_elems *elems) { @@ -979,9 +812,23 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer, offset = noffset; } - if (sband->ht_cap.ht_supported) - pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap, - sband->ht_cap.cap); + if (sband->ht_cap.ht_supported) { + u16 cap = sband->ht_cap.cap; + __le16 tmp; + + *pos++ = WLAN_EID_HT_CAPABILITY; + *pos++ = sizeof(struct ieee80211_ht_cap); + memset(pos, 0, sizeof(struct ieee80211_ht_cap)); + tmp = cpu_to_le16(cap); + memcpy(pos, &tmp, sizeof(u16)); + pos += sizeof(u16); + *pos++ = sband->ht_cap.ampdu_factor | + (sband->ht_cap.ampdu_density << + IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT); + memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs)); + pos += sizeof(sband->ht_cap.mcs); + pos += 2 + 4 + 1; /* ext info, BF cap, antsel */ + } /* * If adding more here, adjust code in main.c @@ -1142,6 +989,16 @@ int ieee80211_reconfig(struct ieee80211_local *local) */ } #endif + + /* setup fragmentation threshold */ + drv_set_frag_threshold(local, hw->wiphy->frag_threshold); + + /* setup RTS threshold */ + drv_set_rts_threshold(local, hw->wiphy->rts_threshold); + + /* reset coverage class */ + drv_set_coverage_class(local, hw->wiphy->coverage_class); + /* everything else happens only if HW was up & running */ if (!local->open_count) goto wake_up; @@ -1160,15 +1017,6 @@ int ieee80211_reconfig(struct ieee80211_local *local) return res; } - /* setup fragmentation threshold */ - drv_set_frag_threshold(local, hw->wiphy->frag_threshold); - - /* setup RTS threshold */ - drv_set_rts_threshold(local, hw->wiphy->rts_threshold); - - /* reset coverage class */ - drv_set_coverage_class(local, hw->wiphy->coverage_class); - ieee80211_led_radio(local, true); ieee80211_mod_tpt_led_trig(local, IEEE80211_TPT_LEDTRIG_FL_RADIO, 0); @@ -1178,7 +1026,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN && sdata->vif.type != NL80211_IFTYPE_MONITOR && ieee80211_sdata_running(sdata)) - res = drv_add_interface(local, sdata); + res = drv_add_interface(local, &sdata->vif); } /* add STAs back */ @@ -1228,13 +1076,11 @@ int ieee80211_reconfig(struct ieee80211_local *local) BSS_CHANGED_BEACON_INT | BSS_CHANGED_BSSID | BSS_CHANGED_CQM | - BSS_CHANGED_QOS | - BSS_CHANGED_IDLE; + BSS_CHANGED_QOS; switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: - changed |= BSS_CHANGED_ASSOC | - BSS_CHANGED_ARP_FILTER; + changed |= BSS_CHANGED_ASSOC; mutex_lock(&sdata->u.mgd.mtx); ieee80211_bss_info_change_notify(sdata, changed); mutex_unlock(&sdata->u.mgd.mtx); @@ -1244,10 +1090,6 @@ int ieee80211_reconfig(struct ieee80211_local *local) /* fall through */ case NL80211_IFTYPE_AP: changed |= BSS_CHANGED_SSID; - - if (sdata->vif.type == NL80211_IFTYPE_AP) - changed |= BSS_CHANGED_AP_PROBE_RESP; - /* fall through */ case NL80211_IFTYPE_MESH_POINT: changed |= BSS_CHANGED_BEACON | @@ -1269,8 +1111,6 @@ int ieee80211_reconfig(struct ieee80211_local *local) } } - ieee80211_recalc_ps(local, -1); - /* * Clear the WLAN_STA_BLOCK_BA flag so new aggregation * sessions can be established after a resume. @@ -1526,108 +1366,6 @@ void ieee80211_disable_rssi_reports(struct ieee80211_vif *vif) } EXPORT_SYMBOL(ieee80211_disable_rssi_reports); -u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, - u16 cap) -{ - __le16 tmp; - - *pos++ = WLAN_EID_HT_CAPABILITY; - *pos++ = sizeof(struct ieee80211_ht_cap); - memset(pos, 0, sizeof(struct ieee80211_ht_cap)); - - /* capability flags */ - tmp = cpu_to_le16(cap); - memcpy(pos, &tmp, sizeof(u16)); - pos += sizeof(u16); - - /* AMPDU parameters */ - *pos++ = ht_cap->ampdu_factor | - (ht_cap->ampdu_density << - IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT); - - /* MCS set */ - memcpy(pos, &ht_cap->mcs, sizeof(ht_cap->mcs)); - pos += sizeof(ht_cap->mcs); - - /* extended capabilities */ - pos += sizeof(__le16); - - /* BF capabilities */ - pos += sizeof(__le32); - - /* antenna selection */ - pos += sizeof(u8); - - return pos; -} - -u8 *ieee80211_ie_build_ht_info(u8 *pos, - struct ieee80211_sta_ht_cap *ht_cap, - struct ieee80211_channel *channel, - enum nl80211_channel_type channel_type) -{ - struct ieee80211_ht_info *ht_info; - /* Build HT Information */ - *pos++ = WLAN_EID_HT_INFORMATION; - *pos++ = sizeof(struct ieee80211_ht_info); - ht_info = (struct ieee80211_ht_info *)pos; - ht_info->control_chan = - ieee80211_frequency_to_channel(channel->center_freq); - switch (channel_type) { - case NL80211_CHAN_HT40MINUS: - ht_info->ht_param = IEEE80211_HT_PARAM_CHA_SEC_BELOW; - break; - case NL80211_CHAN_HT40PLUS: - ht_info->ht_param = IEEE80211_HT_PARAM_CHA_SEC_ABOVE; - break; - case NL80211_CHAN_HT20: - default: - ht_info->ht_param = IEEE80211_HT_PARAM_CHA_SEC_NONE; - break; - } - if (ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) - ht_info->ht_param |= IEEE80211_HT_PARAM_CHAN_WIDTH_ANY; - - /* - * Note: According to 802.11n-2009 9.13.3.1, HT Protection field and - * RIFS Mode are reserved in IBSS mode, therefore keep them at 0 - */ - ht_info->operation_mode = 0x0000; - ht_info->stbc_param = 0x0000; - - /* It seems that Basic MCS set and Supported MCS set - are identical for the first 10 bytes */ - memset(&ht_info->basic_set, 0, 16); - memcpy(&ht_info->basic_set, &ht_cap->mcs, 10); - - return pos + sizeof(struct ieee80211_ht_info); -} - -enum nl80211_channel_type -ieee80211_ht_info_to_channel_type(struct ieee80211_ht_info *ht_info) -{ - enum nl80211_channel_type channel_type; - - if (!ht_info) - return NL80211_CHAN_NO_HT; - - switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { - case IEEE80211_HT_PARAM_CHA_SEC_NONE: - channel_type = NL80211_CHAN_HT20; - break; - case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: - channel_type = NL80211_CHAN_HT40PLUS; - break; - case IEEE80211_HT_PARAM_CHA_SEC_BELOW: - channel_type = NL80211_CHAN_HT40MINUS; - break; - default: - channel_type = NL80211_CHAN_NO_HT; - } - - return channel_type; -} - int ieee80211_add_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); diff --git a/trunk/net/mac80211/wep.c b/trunk/net/mac80211/wep.c index 68ad351479df..a1c6bfd55f0f 100644 --- a/trunk/net/mac80211/wep.c +++ b/trunk/net/mac80211/wep.c @@ -330,12 +330,13 @@ ieee80211_crypto_wep_encrypt(struct ieee80211_tx_data *tx) ieee80211_tx_set_protected(tx); - skb_queue_walk(&tx->skbs, skb) { + skb = tx->skb; + do { if (wep_encrypt_skb(tx, skb) < 0) { I802_DEBUG_INC(tx->local->tx_handlers_drop_wep); return TX_DROP; } - } + } while ((skb = skb->next)); return TX_CONTINUE; } diff --git a/trunk/net/mac80211/wme.c b/trunk/net/mac80211/wme.c index 89511be3111e..fd52e695c071 100644 --- a/trunk/net/mac80211/wme.c +++ b/trunk/net/mac80211/wme.c @@ -52,30 +52,6 @@ static int wme_downgrade_ac(struct sk_buff *skb) } } -/* Indicate which queue to use for this fully formed 802.11 frame */ -u16 ieee80211_select_queue_80211(struct ieee80211_local *local, - struct sk_buff *skb, - struct ieee80211_hdr *hdr) -{ - u8 *p; - - if (local->hw.queues < 4) - return 0; - - if (!ieee80211_is_data(hdr->frame_control)) { - skb->priority = 7; - return ieee802_1d_to_ac[skb->priority]; - } - if (!ieee80211_is_data_qos(hdr->frame_control)) { - skb->priority = 0; - return ieee802_1d_to_ac[skb->priority]; - } - - p = ieee80211_get_qos_ctl(hdr); - skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK; - - return ieee80211_downgrade_queue(local, skb); -} /* Indicate which queue to use. */ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, @@ -107,7 +83,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, break; #ifdef CONFIG_MAC80211_MESH case NL80211_IFTYPE_MESH_POINT: - qos = true; + ra = skb->data; break; #endif case NL80211_IFTYPE_STATION: @@ -163,24 +139,16 @@ void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (void *)skb->data; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); /* Fill in the QoS header if there is one. */ if (ieee80211_is_data_qos(hdr->frame_control)) { u8 *p = ieee80211_get_qos_ctl(hdr); - u8 ack_policy, tid; + u8 ack_policy = 0, tid; tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; - /* preserve EOSP bit */ - ack_policy = *p & IEEE80211_QOS_CTL_EOSP; - - if (is_multicast_ether_addr(hdr->addr1) || - sdata->noack_map & BIT(tid)) { + if (unlikely(sdata->local->wifi_wme_noack_test)) ack_policy |= IEEE80211_QOS_CTL_ACK_POLICY_NOACK; - info->flags |= IEEE80211_TX_CTL_NO_ACK; - } - /* qos header is 2 bytes */ *p++ = ack_policy | tid; *p = ieee80211_vif_is_mesh(&sdata->vif) ? diff --git a/trunk/net/mac80211/wme.h b/trunk/net/mac80211/wme.h index 94edceb617ff..34e166fbf4d4 100644 --- a/trunk/net/mac80211/wme.h +++ b/trunk/net/mac80211/wme.h @@ -15,9 +15,6 @@ extern const int ieee802_1d_to_ac[8]; -u16 ieee80211_select_queue_80211(struct ieee80211_local *local, - struct sk_buff *skb, - struct ieee80211_hdr *hdr); u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata, diff --git a/trunk/net/mac80211/work.c b/trunk/net/mac80211/work.c index c6dd01a05291..6c53b6d1002b 100644 --- a/trunk/net/mac80211/work.c +++ b/trunk/net/mac80211/work.c @@ -94,8 +94,7 @@ static int ieee80211_compatible_rates(const u8 *supp_rates, int supp_rates_len, /* frame sending functions */ -static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata, - struct sk_buff *skb, const u8 *ht_info_ie, +static void ieee80211_add_ht_ie(struct sk_buff *skb, const u8 *ht_info_ie, struct ieee80211_supported_band *sband, struct ieee80211_channel *channel, enum ieee80211_smps_mode smps) @@ -103,10 +102,8 @@ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata, struct ieee80211_ht_info *ht_info; u8 *pos; u32 flags = channel->flags; - u16 cap; - struct ieee80211_sta_ht_cap ht_cap; - - BUILD_BUG_ON(sizeof(ht_cap) != sizeof(sband->ht_cap)); + u16 cap = sband->ht_cap.cap; + __le16 tmp; if (!sband->ht_cap.ht_supported) return; @@ -117,13 +114,9 @@ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata, if (ht_info_ie[1] < sizeof(struct ieee80211_ht_info)) return; - memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap)); - ieee80211_apply_htcap_overrides(sdata, &ht_cap); - ht_info = (struct ieee80211_ht_info *)(ht_info_ie + 2); /* determine capability flags */ - cap = ht_cap.cap; switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: @@ -161,8 +154,34 @@ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata, } /* reserve and fill IE */ + pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2); - ieee80211_ie_build_ht_cap(pos, &ht_cap, cap); + *pos++ = WLAN_EID_HT_CAPABILITY; + *pos++ = sizeof(struct ieee80211_ht_cap); + memset(pos, 0, sizeof(struct ieee80211_ht_cap)); + + /* capability flags */ + tmp = cpu_to_le16(cap); + memcpy(pos, &tmp, sizeof(u16)); + pos += sizeof(u16); + + /* AMPDU parameters */ + *pos++ = sband->ht_cap.ampdu_factor | + (sband->ht_cap.ampdu_density << + IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT); + + /* MCS set */ + memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs)); + pos += sizeof(sband->ht_cap.mcs); + + /* extended capabilities */ + pos += sizeof(__le16); + + /* BF capabilities */ + pos += sizeof(__le32); + + /* antenna selection */ + pos += sizeof(u8); } static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata, @@ -337,7 +356,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata, if (wk->assoc.use_11n && wk->assoc.wmm_used && local->hw.queues >= 4) - ieee80211_add_ht_ie(sdata, skb, wk->assoc.ht_information_ie, + ieee80211_add_ht_ie(skb, wk->assoc.ht_information_ie, sband, wk->chan, wk->assoc.smps); /* if present, add any custom non-vendor IEs that go after HT */ @@ -862,6 +881,44 @@ static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local, kfree_skb(skb); } +static bool ieee80211_work_ct_coexists(enum nl80211_channel_type wk_ct, + enum nl80211_channel_type oper_ct) +{ + switch (wk_ct) { + case NL80211_CHAN_NO_HT: + return true; + case NL80211_CHAN_HT20: + if (oper_ct != NL80211_CHAN_NO_HT) + return true; + return false; + case NL80211_CHAN_HT40MINUS: + case NL80211_CHAN_HT40PLUS: + return (wk_ct == oper_ct); + } + WARN_ON(1); /* shouldn't get here */ + return false; +} + +static enum nl80211_channel_type +ieee80211_calc_ct(enum nl80211_channel_type wk_ct, + enum nl80211_channel_type oper_ct) +{ + switch (wk_ct) { + case NL80211_CHAN_NO_HT: + return oper_ct; + case NL80211_CHAN_HT20: + if (oper_ct != NL80211_CHAN_NO_HT) + return oper_ct; + return wk_ct; + case NL80211_CHAN_HT40MINUS: + case NL80211_CHAN_HT40PLUS: + return wk_ct; + } + WARN_ON(1); /* shouldn't get here */ + return wk_ct; +} + + static void ieee80211_work_timer(unsigned long data) { struct ieee80211_local *local = (void *) data; @@ -912,12 +969,51 @@ static void ieee80211_work_work(struct work_struct *work) } if (!started && !local->tmp_channel) { - ieee80211_offchannel_stop_vifs(local, true); + bool on_oper_chan; + bool tmp_chan_changed = false; + bool on_oper_chan2; + enum nl80211_channel_type wk_ct; + on_oper_chan = ieee80211_cfg_on_oper_channel(local); + + /* Work with existing channel type if possible. */ + wk_ct = wk->chan_type; + if (wk->chan == local->hw.conf.channel) + wk_ct = ieee80211_calc_ct(wk->chan_type, + local->hw.conf.channel_type); + + if (local->tmp_channel) + if ((local->tmp_channel != wk->chan) || + (local->tmp_channel_type != wk_ct)) + tmp_chan_changed = true; local->tmp_channel = wk->chan; - local->tmp_channel_type = wk->chan_type; - - ieee80211_hw_config(local, 0); + local->tmp_channel_type = wk_ct; + /* + * Leave the station vifs in awake mode if they + * happen to be on the same channel as + * the requested channel. + */ + on_oper_chan2 = ieee80211_cfg_on_oper_channel(local); + if (on_oper_chan != on_oper_chan2) { + if (on_oper_chan2) { + /* going off oper channel, PS too */ + ieee80211_offchannel_stop_vifs(local, + true); + ieee80211_hw_config(local, 0); + } else { + /* going on channel, but leave PS + * off-channel. */ + ieee80211_hw_config(local, 0); + ieee80211_offchannel_return(local, + true, + false); + } + } else if (tmp_chan_changed) + /* Still off-channel, but on some other + * channel, so update hardware. + * PS should already be off-channel. + */ + ieee80211_hw_config(local, 0); started = true; wk->timeout = jiffies; @@ -986,17 +1082,34 @@ static void ieee80211_work_work(struct work_struct *work) list_for_each_entry(wk, &local->work_list, list) { if (!wk->started) continue; - if (wk->chan != local->tmp_channel || - wk->chan_type != local->tmp_channel_type) + if (wk->chan != local->tmp_channel) + continue; + if (!ieee80211_work_ct_coexists(wk->chan_type, + local->tmp_channel_type)) continue; remain_off_channel = true; } if (!remain_off_channel && local->tmp_channel) { local->tmp_channel = NULL; - ieee80211_hw_config(local, 0); + /* If tmp_channel wasn't operating channel, then + * we need to go back on-channel. + * NOTE: If we can ever be here while scannning, + * or if the hw_config() channel config logic changes, + * then we may need to do a more thorough check to see if + * we still need to do a hardware config. Currently, + * we cannot be here while scanning, however. + */ + if (!ieee80211_cfg_on_oper_channel(local)) + ieee80211_hw_config(local, 0); - ieee80211_offchannel_return(local, true); + /* At the least, we need to disable offchannel_ps, + * so just go ahead and run the entire offchannel + * return logic here. We *could* skip enabling + * beaconing if we were already on-oper-channel + * as a future optimization. + */ + ieee80211_offchannel_return(local, true, true); /* give connection some time to breathe */ run_again(local, jiffies + HZ/2); diff --git a/trunk/net/mac80211/wpa.c b/trunk/net/mac80211/wpa.c index 93aab0715e8a..f614ce7bb6e3 100644 --- a/trunk/net/mac80211/wpa.c +++ b/trunk/net/mac80211/wpa.c @@ -223,14 +223,14 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) ieee80211_tx_result ieee80211_crypto_tkip_encrypt(struct ieee80211_tx_data *tx) { - struct sk_buff *skb; + struct sk_buff *skb = tx->skb; ieee80211_tx_set_protected(tx); - skb_queue_walk(&tx->skbs, skb) { + do { if (tkip_encrypt_skb(tx, skb) < 0) return TX_DROP; - } + } while ((skb = skb->next)); return TX_CONTINUE; } @@ -390,8 +390,7 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) u8 scratch[6 * AES_BLOCK_SIZE]; if (info->control.hw_key && - !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) && - !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) { + !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV)) { /* * hwaccel has no need for preallocated room for CCMP * header or MIC fields @@ -413,12 +412,6 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) pos = skb_push(skb, CCMP_HDR_LEN); memmove(pos, pos + CCMP_HDR_LEN, hdrlen); - - /* the HW only needs room for the IV, but not the actual IV */ - if (info->control.hw_key && - (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) - return 0; - hdr = (struct ieee80211_hdr *) pos; pos += hdrlen; @@ -449,14 +442,14 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) ieee80211_tx_result ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx) { - struct sk_buff *skb; + struct sk_buff *skb = tx->skb; ieee80211_tx_set_protected(tx); - skb_queue_walk(&tx->skbs, skb) { + do { if (ccmp_encrypt_skb(tx, skb) < 0) return TX_DROP; - } + } while ((skb = skb->next)); return TX_CONTINUE; } @@ -554,22 +547,15 @@ static inline void bip_ipn_swap(u8 *d, const u8 *s) ieee80211_tx_result ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx) { - struct sk_buff *skb; - struct ieee80211_tx_info *info; + struct sk_buff *skb = tx->skb; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_key *key = tx->key; struct ieee80211_mmie *mmie; u8 aad[20]; u64 pn64; - if (WARN_ON(skb_queue_len(&tx->skbs) != 1)) - return TX_DROP; - - skb = skb_peek(&tx->skbs); - - info = IEEE80211_SKB_CB(skb); - if (info->control.hw_key) - return TX_CONTINUE; + return 0; if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie))) return TX_DROP; diff --git a/trunk/net/netfilter/Kconfig b/trunk/net/netfilter/Kconfig index f8ac4ef0b794..d5597b759ba3 100644 --- a/trunk/net/netfilter/Kconfig +++ b/trunk/net/netfilter/Kconfig @@ -4,14 +4,6 @@ menu "Core Netfilter Configuration" config NETFILTER_NETLINK tristate -config NETFILTER_NETLINK_ACCT -tristate "Netfilter NFACCT over NFNETLINK interface" - depends on NETFILTER_ADVANCED - select NETFILTER_NETLINK - help - If this option is enabled, the kernel will include support - for extended accounting via NFNETLINK. - config NETFILTER_NETLINK_QUEUE tristate "Netfilter NFQUEUE over NFNETLINK interface" depends on NETFILTER_ADVANCED @@ -83,16 +75,6 @@ config NF_CONNTRACK_ZONES If unsure, say `N'. -config NF_CONNTRACK_PROCFS - bool "Supply CT list in procfs (OBSOLETE)" - default y - depends on PROC_FS - ---help--- - This option enables for the list of known conntrack entries - to be shown in procfs under net/netfilter/nf_conntrack. This - is considered obsolete in favor of using the conntrack(8) - tool which uses Netlink. - config NF_CONNTRACK_EVENTS bool "Connection tracking events" depends on NETFILTER_ADVANCED @@ -788,15 +770,6 @@ config NETFILTER_XT_MATCH_DSCP To compile it as a module, choose M here. If unsure, say N. -config NETFILTER_XT_MATCH_ECN - tristate '"ecn" match support' - depends on NETFILTER_ADVANCED - ---help--- - This option adds an "ECN" match, which allows you to match against - the IPv4 and TCP header ECN fields. - - To compile it as a module, choose M here. If unsure, say N. - config NETFILTER_XT_MATCH_ESP tristate '"esp" match support' depends on NETFILTER_ADVANCED @@ -906,16 +879,6 @@ config NETFILTER_XT_MATCH_MULTIPORT To compile it as a module, choose M here. If unsure, say N. -config NETFILTER_XT_MATCH_NFACCT - tristate '"nfacct" match support' - depends on NETFILTER_ADVANCED - select NETFILTER_NETLINK_ACCT - help - This option allows you to use the extended accounting through - nfnetlink_acct. - - To compile it as a module, choose M here. If unsure, say N. - config NETFILTER_XT_MATCH_OSF tristate '"osf" Passive OS fingerprint match' depends on NETFILTER_ADVANCED && NETFILTER_NETLINK diff --git a/trunk/net/netfilter/Makefile b/trunk/net/netfilter/Makefile index 40f4c3d636c5..1a02853df863 100644 --- a/trunk/net/netfilter/Makefile +++ b/trunk/net/netfilter/Makefile @@ -7,7 +7,6 @@ nf_conntrack-$(CONFIG_NF_CONNTRACK_EVENTS) += nf_conntrack_ecache.o obj-$(CONFIG_NETFILTER) = netfilter.o obj-$(CONFIG_NETFILTER_NETLINK) += nfnetlink.o -obj-$(CONFIG_NETFILTER_NETLINK_ACCT) += nfnetlink_acct.o obj-$(CONFIG_NETFILTER_NETLINK_QUEUE) += nfnetlink_queue.o obj-$(CONFIG_NETFILTER_NETLINK_LOG) += nfnetlink_log.o @@ -81,7 +80,6 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CPU) += xt_cpu.o obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o -obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o @@ -92,7 +90,6 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_LENGTH) += xt_length.o obj-$(CONFIG_NETFILTER_XT_MATCH_LIMIT) += xt_limit.o obj-$(CONFIG_NETFILTER_XT_MATCH_MAC) += xt_mac.o obj-$(CONFIG_NETFILTER_XT_MATCH_MULTIPORT) += xt_multiport.o -obj-$(CONFIG_NETFILTER_XT_MATCH_NFACCT) += xt_nfacct.o obj-$(CONFIG_NETFILTER_XT_MATCH_OSF) += xt_osf.o obj-$(CONFIG_NETFILTER_XT_MATCH_OWNER) += xt_owner.o obj-$(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) += xt_physdev.o diff --git a/trunk/net/netfilter/core.c b/trunk/net/netfilter/core.c index b4e8ff05b301..afca6c78948c 100644 --- a/trunk/net/netfilter/core.c +++ b/trunk/net/netfilter/core.c @@ -54,12 +54,6 @@ EXPORT_SYMBOL_GPL(nf_unregister_afinfo); struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS] __read_mostly; EXPORT_SYMBOL(nf_hooks); - -#if defined(CONFIG_JUMP_LABEL) -struct jump_label_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; -EXPORT_SYMBOL(nf_hooks_needed); -#endif - static DEFINE_MUTEX(nf_hook_mutex); int nf_register_hook(struct nf_hook_ops *reg) @@ -76,9 +70,6 @@ int nf_register_hook(struct nf_hook_ops *reg) } list_add_rcu(®->list, elem->list.prev); mutex_unlock(&nf_hook_mutex); -#if defined(CONFIG_JUMP_LABEL) - jump_label_inc(&nf_hooks_needed[reg->pf][reg->hooknum]); -#endif return 0; } EXPORT_SYMBOL(nf_register_hook); @@ -88,9 +79,7 @@ void nf_unregister_hook(struct nf_hook_ops *reg) mutex_lock(&nf_hook_mutex); list_del_rcu(®->list); mutex_unlock(&nf_hook_mutex); -#if defined(CONFIG_JUMP_LABEL) - jump_label_dec(&nf_hooks_needed[reg->pf][reg->hooknum]); -#endif + synchronize_net(); } EXPORT_SYMBOL(nf_unregister_hook); @@ -229,7 +218,7 @@ int skb_make_writable(struct sk_buff *skb, unsigned int writable_len) } EXPORT_SYMBOL(skb_make_writable); -#if IS_ENABLED(CONFIG_NF_CONNTRACK) +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) /* This does not belong here, but locally generated errors need it if connection tracking in use: without this, connection may not be in hash table, and hence manufactured ICMP or RST packets will not be associated with it. */ diff --git a/trunk/net/netfilter/ipset/ip_set_getport.c b/trunk/net/netfilter/ipset/ip_set_getport.c index 1f03556666f4..052579fe389a 100644 --- a/trunk/net/netfilter/ipset/ip_set_getport.c +++ b/trunk/net/netfilter/ipset/ip_set_getport.c @@ -109,18 +109,16 @@ ip_set_get_ip4_port(const struct sk_buff *skb, bool src, } EXPORT_SYMBOL_GPL(ip_set_get_ip4_port); -#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) +#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) bool ip_set_get_ip6_port(const struct sk_buff *skb, bool src, __be16 *port, u8 *proto) { int protoff; u8 nexthdr; - __be16 frag_off; nexthdr = ipv6_hdr(skb)->nexthdr; - protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, - &frag_off); + protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr); if (protoff < 0) return false; diff --git a/trunk/net/netfilter/ipset/ip_set_hash_ip.c b/trunk/net/netfilter/ipset/ip_set_hash_ip.c index 4015fcaf87bc..f2d576e6b769 100644 --- a/trunk/net/netfilter/ipset/ip_set_hash_ip.c +++ b/trunk/net/netfilter/ipset/ip_set_hash_ip.c @@ -241,7 +241,7 @@ hash_ip6_data_isnull(const struct hash_ip6_elem *elem) static inline void hash_ip6_data_copy(struct hash_ip6_elem *dst, const struct hash_ip6_elem *src) { - dst->ip.in6 = src->ip.in6; + ipv6_addr_copy(&dst->ip.in6, &src->ip.in6); } static inline void diff --git a/trunk/net/netfilter/ipset/ip_set_hash_net.c b/trunk/net/netfilter/ipset/ip_set_hash_net.c index 28988196775e..60d016541c58 100644 --- a/trunk/net/netfilter/ipset/ip_set_hash_net.c +++ b/trunk/net/netfilter/ipset/ip_set_hash_net.c @@ -267,7 +267,7 @@ static inline void hash_net6_data_copy(struct hash_net6_elem *dst, const struct hash_net6_elem *src) { - dst->ip.in6 = src->ip.in6; + ipv6_addr_copy(&dst->ip.in6, &src->ip.in6); dst->cidr = src->cidr; } diff --git a/trunk/net/netfilter/ipvs/Kconfig b/trunk/net/netfilter/ipvs/Kconfig index af4c0b8c5275..70bd1d0774c6 100644 --- a/trunk/net/netfilter/ipvs/Kconfig +++ b/trunk/net/netfilter/ipvs/Kconfig @@ -232,21 +232,6 @@ config IP_VS_NQ If you want to compile it in kernel, say Y. To compile it as a module, choose M here. If unsure, say N. -comment 'IPVS SH scheduler' - -config IP_VS_SH_TAB_BITS - int "IPVS source hashing table size (the Nth power of 2)" - range 4 20 - default 8 - ---help--- - The source hashing scheduler maps source IPs to destinations - stored in a hash table. This table is tiled by each destination - until all slots in the table are filled. When using weights to - allow destinations to receive more connections, the table is - tiled an amount proportional to the weights specified. The table - needs to be large enough to effectively fit all the destinations - multiplied by their respective weights. - comment 'IPVS application helper' config IP_VS_FTP diff --git a/trunk/net/netfilter/ipvs/ip_vs_conn.c b/trunk/net/netfilter/ipvs/ip_vs_conn.c index 29fa5badde75..12571fb2881c 100644 --- a/trunk/net/netfilter/ipvs/ip_vs_conn.c +++ b/trunk/net/netfilter/ipvs/ip_vs_conn.c @@ -616,7 +616,7 @@ struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp) if ((cp) && (!cp->dest)) { dest = ip_vs_find_dest(ip_vs_conn_net(cp), cp->af, &cp->daddr, cp->dport, &cp->vaddr, cp->vport, - cp->protocol, cp->fwmark, cp->flags); + cp->protocol, cp->fwmark); ip_vs_bind_dest(cp, dest); return dest; } else diff --git a/trunk/net/netfilter/ipvs/ip_vs_core.c b/trunk/net/netfilter/ipvs/ip_vs_core.c index 611c3359b94d..093cc327020f 100644 --- a/trunk/net/netfilter/ipvs/ip_vs_core.c +++ b/trunk/net/netfilter/ipvs/ip_vs_core.c @@ -983,7 +983,7 @@ static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related, if (!cp) return NF_ACCEPT; - snet.in6 = iph->saddr; + ipv6_addr_copy(&snet.in6, &iph->saddr); return handle_response_icmp(AF_INET6, skb, &snet, cih->nexthdr, cp, pp, offset, sizeof(struct ipv6hdr)); } diff --git a/trunk/net/netfilter/ipvs/ip_vs_ctl.c b/trunk/net/netfilter/ipvs/ip_vs_ctl.c index b3afe189af61..008bf97cc91a 100644 --- a/trunk/net/netfilter/ipvs/ip_vs_ctl.c +++ b/trunk/net/netfilter/ipvs/ip_vs_ctl.c @@ -85,7 +85,7 @@ static int __ip_vs_addr_is_local_v6(struct net *net, }; rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6); - if (rt && rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK)) + if (rt && rt->rt6i_dev && (rt->rt6i_dev->flags & IFF_LOOPBACK)) return 1; return 0; @@ -619,21 +619,15 @@ struct ip_vs_dest *ip_vs_find_dest(struct net *net, int af, const union nf_inet_addr *daddr, __be16 dport, const union nf_inet_addr *vaddr, - __be16 vport, __u16 protocol, __u32 fwmark, - __u32 flags) + __be16 vport, __u16 protocol, __u32 fwmark) { struct ip_vs_dest *dest; struct ip_vs_service *svc; - __be16 port = dport; svc = ip_vs_service_get(net, af, fwmark, protocol, vaddr, vport); if (!svc) return NULL; - if (fwmark && (flags & IP_VS_CONN_F_FWD_MASK) != IP_VS_CONN_F_MASQ) - port = 0; - dest = ip_vs_lookup_dest(svc, daddr, port); - if (!dest) - dest = ip_vs_lookup_dest(svc, daddr, port ^ dport); + dest = ip_vs_lookup_dest(svc, daddr, dport); if (dest) atomic_inc(&dest->refcnt); ip_vs_service_put(svc); diff --git a/trunk/net/netfilter/ipvs/ip_vs_pe_sip.c b/trunk/net/netfilter/ipvs/ip_vs_pe_sip.c index 1aa5cac748c4..13d607ae9c52 100644 --- a/trunk/net/netfilter/ipvs/ip_vs_pe_sip.c +++ b/trunk/net/netfilter/ipvs/ip_vs_pe_sip.c @@ -108,7 +108,7 @@ static bool ip_vs_sip_ct_match(const struct ip_vs_conn_param *p, struct ip_vs_conn *ct) { - bool ret = false; + bool ret = 0; if (ct->af == p->af && ip_vs_addr_equal(p->af, p->caddr, &ct->caddr) && @@ -121,7 +121,7 @@ static bool ip_vs_sip_ct_match(const struct ip_vs_conn_param *p, ct->protocol == p->protocol && ct->pe_data && ct->pe_data_len == p->pe_data_len && !memcmp(ct->pe_data, p->pe_data, p->pe_data_len)) - ret = true; + ret = 1; IP_VS_DBG_BUF(9, "SIP template match %s %s->%s:%d %s\n", ip_vs_proto_name(p->protocol), diff --git a/trunk/net/netfilter/ipvs/ip_vs_sh.c b/trunk/net/netfilter/ipvs/ip_vs_sh.c index 069e8d4d5c01..33815f4fb451 100644 --- a/trunk/net/netfilter/ipvs/ip_vs_sh.c +++ b/trunk/net/netfilter/ipvs/ip_vs_sh.c @@ -30,11 +30,6 @@ * server is dead or overloaded, the load balancer can bypass the cache * server and send requests to the original server directly. * - * The weight destination attribute can be used to control the - * distribution of connections to the destinations in servernode. The - * greater the weight, the more connections the destination - * will receive. - * */ #define KMSG_COMPONENT "IPVS" @@ -104,11 +99,9 @@ ip_vs_sh_assign(struct ip_vs_sh_bucket *tbl, struct ip_vs_service *svc) struct ip_vs_sh_bucket *b; struct list_head *p; struct ip_vs_dest *dest; - int d_count; b = tbl; p = &svc->destinations; - d_count = 0; for (i=0; idest = NULL; @@ -120,16 +113,7 @@ ip_vs_sh_assign(struct ip_vs_sh_bucket *tbl, struct ip_vs_service *svc) atomic_inc(&dest->refcnt); b->dest = dest; - IP_VS_DBG_BUF(6, "assigned i: %d dest: %s weight: %d\n", - i, IP_VS_DBG_ADDR(svc->af, &dest->addr), - atomic_read(&dest->weight)); - - /* Don't move to next dest until filling weight */ - if (++d_count >= atomic_read(&dest->weight)) { - p = p->next; - d_count = 0; - } - + p = p->next; } b++; } diff --git a/trunk/net/netfilter/ipvs/ip_vs_sync.c b/trunk/net/netfilter/ipvs/ip_vs_sync.c index 8a0d6d6889f0..3cdd479f9b5d 100644 --- a/trunk/net/netfilter/ipvs/ip_vs_sync.c +++ b/trunk/net/netfilter/ipvs/ip_vs_sync.c @@ -603,9 +603,9 @@ void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp) #ifdef CONFIG_IP_VS_IPV6 if (cp->af == AF_INET6) { p += sizeof(struct ip_vs_sync_v6); - s->v6.caddr = cp->caddr.in6; - s->v6.vaddr = cp->vaddr.in6; - s->v6.daddr = cp->daddr.in6; + ipv6_addr_copy(&s->v6.caddr, &cp->caddr.in6); + ipv6_addr_copy(&s->v6.vaddr, &cp->vaddr.in6); + ipv6_addr_copy(&s->v6.daddr, &cp->daddr.in6); } else #endif { @@ -740,7 +740,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param, * but still handled. */ dest = ip_vs_find_dest(net, type, daddr, dport, param->vaddr, - param->vport, protocol, fwmark, flags); + param->vport, protocol, fwmark); /* Set the approprite ativity flag */ if (protocol == IPPROTO_TCP) { diff --git a/trunk/net/netfilter/ipvs/ip_vs_xmit.c b/trunk/net/netfilter/ipvs/ip_vs_xmit.c index 7fd66dec859d..aa2d7206ee8a 100644 --- a/trunk/net/netfilter/ipvs/ip_vs_xmit.c +++ b/trunk/net/netfilter/ipvs/ip_vs_xmit.c @@ -207,7 +207,7 @@ __ip_vs_reroute_locally(struct sk_buff *skb) static inline int __ip_vs_is_local_route6(struct rt6_info *rt) { - return rt->dst.dev && rt->dst.dev->flags & IFF_LOOPBACK; + return rt->rt6i_dev && rt->rt6i_dev->flags & IFF_LOOPBACK; } static struct dst_entry * @@ -235,7 +235,7 @@ __ip_vs_route_output_v6(struct net *net, struct in6_addr *daddr, goto out_err; } } - *ret_saddr = fl6.saddr; + ipv6_addr_copy(ret_saddr, &fl6.saddr); return dst; out_err: @@ -279,7 +279,7 @@ __ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest, atomic_read(&rt->dst.__refcnt)); } if (ret_saddr) - *ret_saddr = dest->dst_saddr.in6; + ipv6_addr_copy(ret_saddr, &dest->dst_saddr.in6); spin_unlock(&dest->dst_lock); } else { dst = __ip_vs_route_output_v6(net, daddr, ret_saddr, do_xfrm); @@ -541,7 +541,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, * Avoid duplicate tuple in reply direction for NAT traffic * to local address when connection is sync-ed */ -#if IS_ENABLED(CONFIG_NF_CONNTRACK) +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) if (cp->flags & IP_VS_CONN_F_SYNC && local) { enum ip_conntrack_info ctinfo; struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo); @@ -658,7 +658,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, * Avoid duplicate tuple in reply direction for NAT traffic * to local address when connection is sync-ed */ -#if IS_ENABLED(CONFIG_NF_CONNTRACK) +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) if (cp->flags & IP_VS_CONN_F_SYNC && local) { enum ip_conntrack_info ctinfo; struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo); @@ -705,7 +705,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, /* mangle the packet */ if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp)) goto tx_error; - ipv6_hdr(skb)->daddr = cp->daddr.in6; + ipv6_addr_copy(&ipv6_hdr(skb)->daddr, &cp->daddr.in6); if (!local || !skb->dev) { /* drop the old route when skb is not shared */ @@ -967,8 +967,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, be16_add_cpu(&iph->payload_len, sizeof(*old_iph)); iph->priority = old_iph->priority; memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl)); - iph->daddr = cp->daddr.in6; - iph->saddr = saddr; + ipv6_addr_copy(&iph->daddr, &cp->daddr.in6); + ipv6_addr_copy(&iph->saddr, &saddr); iph->hop_limit = old_iph->hop_limit; /* Another hack: avoid icmp_send in ip_fragment */ @@ -1173,7 +1173,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, * Avoid duplicate tuple in reply direction for NAT traffic * to local address when connection is sync-ed */ -#if IS_ENABLED(CONFIG_NF_CONNTRACK) +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) if (cp->flags & IP_VS_CONN_F_SYNC && local) { enum ip_conntrack_info ctinfo; struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo); @@ -1293,7 +1293,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, * Avoid duplicate tuple in reply direction for NAT traffic * to local address when connection is sync-ed */ -#if IS_ENABLED(CONFIG_NF_CONNTRACK) +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) if (cp->flags & IP_VS_CONN_F_SYNC && local) { enum ip_conntrack_info ctinfo; struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo); diff --git a/trunk/net/netfilter/nf_conntrack_acct.c b/trunk/net/netfilter/nf_conntrack_acct.c index f4f8cda05986..369df3f08d42 100644 --- a/trunk/net/netfilter/nf_conntrack_acct.c +++ b/trunk/net/netfilter/nf_conntrack_acct.c @@ -18,7 +18,7 @@ #include #include -static bool nf_ct_acct __read_mostly; +static int nf_ct_acct __read_mostly; module_param_named(acct, nf_ct_acct, bool, 0644); MODULE_PARM_DESC(acct, "Enable connection tracking flow accounting."); @@ -46,8 +46,8 @@ seq_print_acct(struct seq_file *s, const struct nf_conn *ct, int dir) return 0; return seq_printf(s, "packets=%llu bytes=%llu ", - (unsigned long long)atomic64_read(&acct[dir].packets), - (unsigned long long)atomic64_read(&acct[dir].bytes)); + (unsigned long long)acct[dir].packets, + (unsigned long long)acct[dir].bytes); }; EXPORT_SYMBOL_GPL(seq_print_acct); diff --git a/trunk/net/netfilter/nf_conntrack_core.c b/trunk/net/netfilter/nf_conntrack_core.c index e875f8902db3..7202b0631cd6 100644 --- a/trunk/net/netfilter/nf_conntrack_core.c +++ b/trunk/net/netfilter/nf_conntrack_core.c @@ -67,7 +67,6 @@ DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked); EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked); unsigned int nf_conntrack_hash_rnd __read_mostly; -EXPORT_SYMBOL_GPL(nf_conntrack_hash_rnd); static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone) { @@ -1045,8 +1044,10 @@ void __nf_ct_refresh_acct(struct nf_conn *ct, acct = nf_conn_acct_find(ct); if (acct) { - atomic64_inc(&acct[CTINFO2DIR(ctinfo)].packets); - atomic64_add(skb->len, &acct[CTINFO2DIR(ctinfo)].bytes); + spin_lock_bh(&ct->lock); + acct[CTINFO2DIR(ctinfo)].packets++; + acct[CTINFO2DIR(ctinfo)].bytes += skb->len; + spin_unlock_bh(&ct->lock); } } } @@ -1062,9 +1063,11 @@ bool __nf_ct_kill_acct(struct nf_conn *ct, acct = nf_conn_acct_find(ct); if (acct) { - atomic64_inc(&acct[CTINFO2DIR(ctinfo)].packets); - atomic64_add(skb->len - skb_network_offset(skb), - &acct[CTINFO2DIR(ctinfo)].bytes); + spin_lock_bh(&ct->lock); + acct[CTINFO2DIR(ctinfo)].packets++; + acct[CTINFO2DIR(ctinfo)].bytes += + skb->len - skb_network_offset(skb); + spin_unlock_bh(&ct->lock); } } @@ -1084,7 +1087,7 @@ static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = { }; #endif -#if IS_ENABLED(CONFIG_NF_CT_NETLINK) +#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) #include #include @@ -1339,7 +1342,8 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls) get_order(sz)); if (!hash) { printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n"); - hash = vzalloc(sz); + hash = __vmalloc(sz, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, + PAGE_KERNEL); } if (hash && nulls) diff --git a/trunk/net/netfilter/nf_conntrack_expect.c b/trunk/net/netfilter/nf_conntrack_expect.c index 4147ba3f653c..340c80d968d4 100644 --- a/trunk/net/netfilter/nf_conntrack_expect.c +++ b/trunk/net/netfilter/nf_conntrack_expect.c @@ -38,6 +38,8 @@ unsigned int nf_ct_expect_max __read_mostly; static struct kmem_cache *nf_ct_expect_cachep __read_mostly; +static HLIST_HEAD(nf_ct_userspace_expect_list); + /* nf_conntrack_expect helper functions */ void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp, u32 pid, int report) @@ -45,14 +47,14 @@ void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp, struct nf_conn_help *master_help = nfct_help(exp->master); struct net *net = nf_ct_exp_net(exp); - NF_CT_ASSERT(master_help); NF_CT_ASSERT(!timer_pending(&exp->timeout)); hlist_del_rcu(&exp->hnode); net->ct.expect_count--; hlist_del(&exp->lnode); - master_help->expecting[exp->class]--; + if (!(exp->flags & NF_CT_EXPECT_USERSPACE)) + master_help->expecting[exp->class]--; nf_ct_expect_event_report(IPEXP_DESTROY, exp, pid, report); nf_ct_expect_put(exp); @@ -312,34 +314,37 @@ void nf_ct_expect_put(struct nf_conntrack_expect *exp) } EXPORT_SYMBOL_GPL(nf_ct_expect_put); -static int nf_ct_expect_insert(struct nf_conntrack_expect *exp) +static void nf_ct_expect_insert(struct nf_conntrack_expect *exp) { struct nf_conn_help *master_help = nfct_help(exp->master); - struct nf_conntrack_helper *helper; struct net *net = nf_ct_exp_net(exp); + const struct nf_conntrack_expect_policy *p; unsigned int h = nf_ct_expect_dst_hash(&exp->tuple); /* two references : one for hash insert, one for the timer */ atomic_add(2, &exp->use); - hlist_add_head(&exp->lnode, &master_help->expectations); - master_help->expecting[exp->class]++; + if (master_help) { + hlist_add_head(&exp->lnode, &master_help->expectations); + master_help->expecting[exp->class]++; + } else if (exp->flags & NF_CT_EXPECT_USERSPACE) + hlist_add_head(&exp->lnode, &nf_ct_userspace_expect_list); hlist_add_head_rcu(&exp->hnode, &net->ct.expect_hash[h]); net->ct.expect_count++; setup_timer(&exp->timeout, nf_ct_expectation_timed_out, (unsigned long)exp); - helper = rcu_dereference_protected(master_help->helper, - lockdep_is_held(&nf_conntrack_lock)); - if (helper) { - exp->timeout.expires = jiffies + - helper->expect_policy[exp->class].timeout * HZ; + if (master_help) { + p = &rcu_dereference_protected( + master_help->helper, + lockdep_is_held(&nf_conntrack_lock) + )->expect_policy[exp->class]; + exp->timeout.expires = jiffies + p->timeout * HZ; } add_timer(&exp->timeout); NF_CT_STAT_INC(net, expect_create); - return 0; } /* Race with expectations being used means we could have none to find; OK. */ @@ -384,13 +389,14 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) struct nf_conntrack_expect *i; struct nf_conn *master = expect->master; struct nf_conn_help *master_help = nfct_help(master); - struct nf_conntrack_helper *helper; struct net *net = nf_ct_exp_net(expect); struct hlist_node *n; unsigned int h; int ret = 1; - if (!master_help) { + /* Don't allow expectations created from kernel-space with no helper */ + if (!(expect->flags & NF_CT_EXPECT_USERSPACE) && + (!master_help || (master_help && !master_help->helper))) { ret = -ESHUTDOWN; goto out; } @@ -408,10 +414,11 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) } } /* Will be over limit? */ - helper = rcu_dereference_protected(master_help->helper, - lockdep_is_held(&nf_conntrack_lock)); - if (helper) { - p = &helper->expect_policy[expect->class]; + if (master_help) { + p = &rcu_dereference_protected( + master_help->helper, + lockdep_is_held(&nf_conntrack_lock) + )->expect_policy[expect->class]; if (p->max_expected && master_help->expecting[expect->class] >= p->max_expected) { evict_oldest_expect(master, expect); @@ -443,9 +450,8 @@ int nf_ct_expect_related_report(struct nf_conntrack_expect *expect, if (ret <= 0) goto out; - ret = nf_ct_expect_insert(expect); - if (ret < 0) - goto out; + ret = 0; + nf_ct_expect_insert(expect); spin_unlock_bh(&nf_conntrack_lock); nf_ct_expect_event_report(IPEXP_NEW, expect, pid, report); return ret; @@ -455,7 +461,22 @@ int nf_ct_expect_related_report(struct nf_conntrack_expect *expect, } EXPORT_SYMBOL_GPL(nf_ct_expect_related_report); -#ifdef CONFIG_NF_CONNTRACK_PROCFS +void nf_ct_remove_userspace_expectations(void) +{ + struct nf_conntrack_expect *exp; + struct hlist_node *n, *next; + + hlist_for_each_entry_safe(exp, n, next, + &nf_ct_userspace_expect_list, lnode) { + if (del_timer(&exp->timeout)) { + nf_ct_unlink_expect(exp); + nf_ct_expect_put(exp); + } + } +} +EXPORT_SYMBOL_GPL(nf_ct_remove_userspace_expectations); + +#ifdef CONFIG_PROC_FS struct ct_expect_iter_state { struct seq_net_private p; unsigned int bucket; @@ -583,25 +604,25 @@ static const struct file_operations exp_file_ops = { .llseek = seq_lseek, .release = seq_release_net, }; -#endif /* CONFIG_NF_CONNTRACK_PROCFS */ +#endif /* CONFIG_PROC_FS */ static int exp_proc_init(struct net *net) { -#ifdef CONFIG_NF_CONNTRACK_PROCFS +#ifdef CONFIG_PROC_FS struct proc_dir_entry *proc; proc = proc_net_fops_create(net, "nf_conntrack_expect", 0440, &exp_file_ops); if (!proc) return -ENOMEM; -#endif /* CONFIG_NF_CONNTRACK_PROCFS */ +#endif /* CONFIG_PROC_FS */ return 0; } static void exp_proc_remove(struct net *net) { -#ifdef CONFIG_NF_CONNTRACK_PROCFS +#ifdef CONFIG_PROC_FS proc_net_remove(net, "nf_conntrack_expect"); -#endif /* CONFIG_NF_CONNTRACK_PROCFS */ +#endif /* CONFIG_PROC_FS */ } module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400); diff --git a/trunk/net/netfilter/nf_conntrack_ftp.c b/trunk/net/netfilter/nf_conntrack_ftp.c index 8c5c95c6d34f..6f5801eac999 100644 --- a/trunk/net/netfilter/nf_conntrack_ftp.c +++ b/trunk/net/netfilter/nf_conntrack_ftp.c @@ -42,7 +42,7 @@ static u_int16_t ports[MAX_PORTS]; static unsigned int ports_c; module_param_array(ports, ushort, &ports_c, 0400); -static bool loose; +static int loose; module_param(loose, bool, 0600); unsigned int (*nf_nat_ftp_hook)(struct sk_buff *skb, diff --git a/trunk/net/netfilter/nf_conntrack_h323_main.c b/trunk/net/netfilter/nf_conntrack_h323_main.c index 722291f8af72..f03c2d4539f6 100644 --- a/trunk/net/netfilter/nf_conntrack_h323_main.c +++ b/trunk/net/netfilter/nf_conntrack_h323_main.c @@ -42,7 +42,7 @@ static int gkrouted_only __read_mostly = 1; module_param(gkrouted_only, int, 0600); MODULE_PARM_DESC(gkrouted_only, "only accept calls from gatekeeper"); -static bool callforward_filter __read_mostly = true; +static int callforward_filter __read_mostly = 1; module_param(callforward_filter, bool, 0600); MODULE_PARM_DESC(callforward_filter, "only create call forwarding expectations " "if both endpoints are on different sides " @@ -743,16 +743,17 @@ static int callforward_do_filter(const union nf_inet_addr *src, } break; } -#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV6) +#if defined(CONFIG_NF_CONNTRACK_IPV6) || \ + defined(CONFIG_NF_CONNTRACK_IPV6_MODULE) case AF_INET6: { struct flowi6 fl1, fl2; struct rt6_info *rt1, *rt2; memset(&fl1, 0, sizeof(fl1)); - fl1.daddr = src->in6; + ipv6_addr_copy(&fl1.daddr, &src->in6); memset(&fl2, 0, sizeof(fl2)); - fl2.daddr = dst->in6; + ipv6_addr_copy(&fl2.daddr, &dst->in6); if (!afinfo->route(&init_net, (struct dst_entry **)&rt1, flowi6_to_flowi(&fl1), false)) { if (!afinfo->route(&init_net, (struct dst_entry **)&rt2, diff --git a/trunk/net/netfilter/nf_conntrack_helper.c b/trunk/net/netfilter/nf_conntrack_helper.c index c9e0de08aa87..93c4bdbfc1ae 100644 --- a/trunk/net/netfilter/nf_conntrack_helper.c +++ b/trunk/net/netfilter/nf_conntrack_helper.c @@ -121,18 +121,6 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl, int ret = 0; if (tmpl != NULL) { - /* we've got a userspace helper. */ - if (tmpl->status & IPS_USERSPACE_HELPER) { - help = nf_ct_helper_ext_add(ct, flags); - if (help == NULL) { - ret = -ENOMEM; - goto out; - } - rcu_assign_pointer(help->helper, NULL); - __set_bit(IPS_USERSPACE_HELPER_BIT, &ct->status); - ret = 0; - goto out; - } help = nfct_help(tmpl); if (help != NULL) helper = help->helper; diff --git a/trunk/net/netfilter/nf_conntrack_netlink.c b/trunk/net/netfilter/nf_conntrack_netlink.c index e07dc3ae930e..ef21b221f036 100644 --- a/trunk/net/netfilter/nf_conntrack_netlink.c +++ b/trunk/net/netfilter/nf_conntrack_netlink.c @@ -135,7 +135,7 @@ ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct) static inline int ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct) { - long timeout = ((long)ct->timeout.expires - (long)jiffies) / HZ; + long timeout = (ct->timeout.expires - jiffies) / HZ; if (timeout < 0) timeout = 0; @@ -203,18 +203,25 @@ ctnetlink_dump_helpinfo(struct sk_buff *skb, const struct nf_conn *ct) } static int -dump_counters(struct sk_buff *skb, u64 pkts, u64 bytes, - enum ip_conntrack_dir dir) +ctnetlink_dump_counters(struct sk_buff *skb, const struct nf_conn *ct, + enum ip_conntrack_dir dir) { enum ctattr_type type = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG; struct nlattr *nest_count; + const struct nf_conn_counter *acct; + + acct = nf_conn_acct_find(ct); + if (!acct) + return 0; nest_count = nla_nest_start(skb, type | NLA_F_NESTED); if (!nest_count) goto nla_put_failure; - NLA_PUT_BE64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts)); - NLA_PUT_BE64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes)); + NLA_PUT_BE64(skb, CTA_COUNTERS_PACKETS, + cpu_to_be64(acct[dir].packets)); + NLA_PUT_BE64(skb, CTA_COUNTERS_BYTES, + cpu_to_be64(acct[dir].bytes)); nla_nest_end(skb, nest_count); @@ -224,27 +231,6 @@ dump_counters(struct sk_buff *skb, u64 pkts, u64 bytes, return -1; } -static int -ctnetlink_dump_counters(struct sk_buff *skb, const struct nf_conn *ct, - enum ip_conntrack_dir dir, int type) -{ - struct nf_conn_counter *acct; - u64 pkts, bytes; - - acct = nf_conn_acct_find(ct); - if (!acct) - return 0; - - if (type == IPCTNL_MSG_CT_GET_CTRZERO) { - pkts = atomic64_xchg(&acct[dir].packets, 0); - bytes = atomic64_xchg(&acct[dir].bytes, 0); - } else { - pkts = atomic64_read(&acct[dir].packets); - bytes = atomic64_read(&acct[dir].bytes); - } - return dump_counters(skb, pkts, bytes, dir); -} - static int ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct) { @@ -407,15 +393,15 @@ ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct) } static int -ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type, - struct nf_conn *ct) +ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, + int event, struct nf_conn *ct) { struct nlmsghdr *nlh; struct nfgenmsg *nfmsg; struct nlattr *nest_parms; - unsigned int flags = pid ? NLM_F_MULTI : 0, event; + unsigned int flags = pid ? NLM_F_MULTI : 0; - event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_NEW); + event |= NFNL_SUBSYS_CTNETLINK << 8; nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags); if (nlh == NULL) goto nlmsg_failure; @@ -444,8 +430,8 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type, if (ctnetlink_dump_status(skb, ct) < 0 || ctnetlink_dump_timeout(skb, ct) < 0 || - ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL, type) < 0 || - ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY, type) < 0 || + ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 || + ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0 || ctnetlink_dump_timestamp(skb, ct) < 0 || ctnetlink_dump_protoinfo(skb, ct) < 0 || ctnetlink_dump_helpinfo(skb, ct) < 0 || @@ -626,10 +612,8 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item) goto nla_put_failure; if (events & (1 << IPCT_DESTROY)) { - if (ctnetlink_dump_counters(skb, ct, - IP_CT_DIR_ORIGINAL, type) < 0 || - ctnetlink_dump_counters(skb, ct, - IP_CT_DIR_REPLY, type) < 0 || + if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 || + ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0 || ctnetlink_dump_timestamp(skb, ct) < 0) goto nla_put_failure; } else { @@ -725,13 +709,20 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) } if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, - NFNL_MSG_TYPE( - cb->nlh->nlmsg_type), - ct) < 0) { + IPCTNL_MSG_CT_NEW, ct) < 0) { nf_conntrack_get(&ct->ct_general); cb->args[1] = (unsigned long)ct; goto out; } + + if (NFNL_MSG_TYPE(cb->nlh->nlmsg_type) == + IPCTNL_MSG_CT_GET_CTRZERO) { + struct nf_conn_counter *acct; + + acct = nf_conn_acct_find(ct); + if (acct) + memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX])); + } } if (cb->args[1]) { cb->args[1] = 0; @@ -1010,7 +1001,7 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb, rcu_read_lock(); err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, - NFNL_MSG_TYPE(nlh->nlmsg_type), ct); + IPCTNL_MSG_CT_NEW, ct); rcu_read_unlock(); nf_ct_put(ct); if (err <= 0) @@ -1096,14 +1087,14 @@ ctnetlink_change_nat(struct nf_conn *ct, const struct nlattr * const cda[]) if (cda[CTA_NAT_DST]) { ret = ctnetlink_parse_nat_setup(ct, - NF_NAT_MANIP_DST, + IP_NAT_MANIP_DST, cda[CTA_NAT_DST]); if (ret < 0) return ret; } if (cda[CTA_NAT_SRC]) { ret = ctnetlink_parse_nat_setup(ct, - NF_NAT_MANIP_SRC, + IP_NAT_MANIP_SRC, cda[CTA_NAT_SRC]); if (ret < 0) return ret; @@ -1367,15 +1358,12 @@ ctnetlink_create_conntrack(struct net *net, u16 zone, nf_ct_protonum(ct)); if (helper == NULL) { rcu_read_unlock(); - spin_unlock_bh(&nf_conntrack_lock); #ifdef CONFIG_MODULES if (request_module("nfct-helper-%s", helpname) < 0) { - spin_lock_bh(&nf_conntrack_lock); err = -EOPNOTSUPP; goto err1; } - spin_lock_bh(&nf_conntrack_lock); rcu_read_lock(); helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct), @@ -1650,7 +1638,7 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb, const struct nf_conntrack_expect *exp) { struct nf_conn *master = exp->master; - long timeout = ((long)exp->timeout.expires - (long)jiffies) / HZ; + long timeout = (exp->timeout.expires - jiffies) / HZ; struct nf_conn_help *help; if (timeout < 0) @@ -1859,9 +1847,7 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb, if (err < 0) return err; - if (cda[CTA_EXPECT_TUPLE]) - err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3); - else if (cda[CTA_EXPECT_MASTER]) + if (cda[CTA_EXPECT_MASTER]) err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3); else return -EINVAL; @@ -1883,30 +1869,25 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb, err = -ENOMEM; skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); - if (skb2 == NULL) { - nf_ct_expect_put(exp); + if (skb2 == NULL) goto out; - } rcu_read_lock(); err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, exp); rcu_read_unlock(); - nf_ct_expect_put(exp); if (err <= 0) goto free; - err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT); - if (err < 0) - goto out; + nf_ct_expect_put(exp); - return 0; + return netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT); free: kfree_skb(skb2); out: - /* this avoids a loop in nfnetlink. */ - return err == -EAGAIN ? -ENOBUFS : err; + nf_ct_expect_put(exp); + return err; } static int @@ -2042,10 +2023,6 @@ ctnetlink_create_expect(struct net *net, u16 zone, } help = nfct_help(ct); if (!help) { - err = -EOPNOTSUPP; - goto out; - } - if (test_bit(IPS_USERSPACE_HELPER_BIT, &ct->status)) { if (!cda[CTA_EXPECT_TIMEOUT]) { err = -EINVAL; goto out; @@ -2270,6 +2247,7 @@ static void __exit ctnetlink_exit(void) { pr_info("ctnetlink: unregistering from nfnetlink.\n"); + nf_ct_remove_userspace_expectations(); unregister_pernet_subsys(&ctnetlink_net_ops); nfnetlink_subsys_unregister(&ctnl_exp_subsys); nfnetlink_subsys_unregister(&ctnl_subsys); diff --git a/trunk/net/netfilter/nf_conntrack_proto_dccp.c b/trunk/net/netfilter/nf_conntrack_proto_dccp.c index d6dde6dc09e6..2e664a69d7db 100644 --- a/trunk/net/netfilter/nf_conntrack_proto_dccp.c +++ b/trunk/net/netfilter/nf_conntrack_proto_dccp.c @@ -629,7 +629,7 @@ static int dccp_print_conntrack(struct seq_file *s, struct nf_conn *ct) return seq_printf(s, "%s ", dccp_state_names[ct->proto.dccp.state]); } -#if IS_ENABLED(CONFIG_NF_CT_NETLINK) +#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) static int dccp_to_nlattr(struct sk_buff *skb, struct nlattr *nla, struct nf_conn *ct) { @@ -770,7 +770,7 @@ static struct nf_conntrack_l4proto dccp_proto4 __read_mostly = { .error = dccp_error, .print_tuple = dccp_print_tuple, .print_conntrack = dccp_print_conntrack, -#if IS_ENABLED(CONFIG_NF_CT_NETLINK) +#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) .to_nlattr = dccp_to_nlattr, .nlattr_size = dccp_nlattr_size, .from_nlattr = nlattr_to_dccp, @@ -792,7 +792,7 @@ static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = { .error = dccp_error, .print_tuple = dccp_print_tuple, .print_conntrack = dccp_print_conntrack, -#if IS_ENABLED(CONFIG_NF_CT_NETLINK) +#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) .to_nlattr = dccp_to_nlattr, .nlattr_size = dccp_nlattr_size, .from_nlattr = nlattr_to_dccp, diff --git a/trunk/net/netfilter/nf_conntrack_proto_gre.c b/trunk/net/netfilter/nf_conntrack_proto_gre.c index f0338791b822..d69facdd9a7a 100644 --- a/trunk/net/netfilter/nf_conntrack_proto_gre.c +++ b/trunk/net/netfilter/nf_conntrack_proto_gre.c @@ -291,7 +291,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = { .new = gre_new, .destroy = gre_destroy, .me = THIS_MODULE, -#if IS_ENABLED(CONFIG_NF_CT_NETLINK) +#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, diff --git a/trunk/net/netfilter/nf_conntrack_proto_sctp.c b/trunk/net/netfilter/nf_conntrack_proto_sctp.c index afa69136061a..6772b1154654 100644 --- a/trunk/net/netfilter/nf_conntrack_proto_sctp.c +++ b/trunk/net/netfilter/nf_conntrack_proto_sctp.c @@ -461,7 +461,7 @@ static bool sctp_new(struct nf_conn *ct, const struct sk_buff *skb, return true; } -#if IS_ENABLED(CONFIG_NF_CT_NETLINK) +#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) #include #include @@ -666,7 +666,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = { .packet = sctp_packet, .new = sctp_new, .me = THIS_MODULE, -#if IS_ENABLED(CONFIG_NF_CT_NETLINK) +#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) .to_nlattr = sctp_to_nlattr, .nlattr_size = sctp_nlattr_size, .from_nlattr = nlattr_to_sctp, @@ -696,7 +696,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = { .packet = sctp_packet, .new = sctp_new, .me = THIS_MODULE, -#if IS_ENABLED(CONFIG_NF_CT_NETLINK) +#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) .to_nlattr = sctp_to_nlattr, .nlattr_size = sctp_nlattr_size, .from_nlattr = nlattr_to_sctp, diff --git a/trunk/net/netfilter/nf_conntrack_proto_tcp.c b/trunk/net/netfilter/nf_conntrack_proto_tcp.c index 97b9f3ebf28c..8235b86b4e87 100644 --- a/trunk/net/netfilter/nf_conntrack_proto_tcp.c +++ b/trunk/net/netfilter/nf_conntrack_proto_tcp.c @@ -1126,7 +1126,7 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb, return true; } -#if IS_ENABLED(CONFIG_NF_CT_NETLINK) +#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) #include #include @@ -1447,7 +1447,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly = .packet = tcp_packet, .new = tcp_new, .error = tcp_error, -#if IS_ENABLED(CONFIG_NF_CT_NETLINK) +#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) .to_nlattr = tcp_to_nlattr, .nlattr_size = tcp_nlattr_size, .from_nlattr = nlattr_to_tcp, @@ -1479,7 +1479,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 __read_mostly = .packet = tcp_packet, .new = tcp_new, .error = tcp_error, -#if IS_ENABLED(CONFIG_NF_CT_NETLINK) +#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) .to_nlattr = tcp_to_nlattr, .nlattr_size = tcp_nlattr_size, .from_nlattr = nlattr_to_tcp, diff --git a/trunk/net/netfilter/nf_conntrack_proto_udp.c b/trunk/net/netfilter/nf_conntrack_proto_udp.c index 5f35757fbff0..8289088b8218 100644 --- a/trunk/net/netfilter/nf_conntrack_proto_udp.c +++ b/trunk/net/netfilter/nf_conntrack_proto_udp.c @@ -188,7 +188,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly = .packet = udp_packet, .new = udp_new, .error = udp_error, -#if IS_ENABLED(CONFIG_NF_CT_NETLINK) +#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, @@ -216,7 +216,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 __read_mostly = .packet = udp_packet, .new = udp_new, .error = udp_error, -#if IS_ENABLED(CONFIG_NF_CT_NETLINK) +#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, diff --git a/trunk/net/netfilter/nf_conntrack_proto_udplite.c b/trunk/net/netfilter/nf_conntrack_proto_udplite.c index f52ca1181013..263b5a72588d 100644 --- a/trunk/net/netfilter/nf_conntrack_proto_udplite.c +++ b/trunk/net/netfilter/nf_conntrack_proto_udplite.c @@ -174,7 +174,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly = .packet = udplite_packet, .new = udplite_new, .error = udplite_error, -#if IS_ENABLED(CONFIG_NF_CT_NETLINK) +#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, @@ -198,7 +198,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly = .packet = udplite_packet, .new = udplite_new, .error = udplite_error, -#if IS_ENABLED(CONFIG_NF_CT_NETLINK) +#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, diff --git a/trunk/net/netfilter/nf_conntrack_standalone.c b/trunk/net/netfilter/nf_conntrack_standalone.c index 885f5ab9bc28..05e9feb101c3 100644 --- a/trunk/net/netfilter/nf_conntrack_standalone.c +++ b/trunk/net/netfilter/nf_conntrack_standalone.c @@ -34,7 +34,7 @@ MODULE_LICENSE("GPL"); -#ifdef CONFIG_NF_CONNTRACK_PROCFS +#ifdef CONFIG_PROC_FS int print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple, const struct nf_conntrack_l3proto *l3proto, @@ -396,7 +396,7 @@ static int nf_conntrack_standalone_init_proc(struct net *net) static void nf_conntrack_standalone_fini_proc(struct net *net) { } -#endif /* CONFIG_NF_CONNTRACK_PROCFS */ +#endif /* CONFIG_PROC_FS */ /* Sysctl support */ diff --git a/trunk/net/netfilter/nf_conntrack_timestamp.c b/trunk/net/netfilter/nf_conntrack_timestamp.c index e8d27afbbdb9..af7dd31af0a1 100644 --- a/trunk/net/netfilter/nf_conntrack_timestamp.c +++ b/trunk/net/netfilter/nf_conntrack_timestamp.c @@ -15,7 +15,7 @@ #include #include -static bool nf_ct_tstamp __read_mostly; +static int nf_ct_tstamp __read_mostly; module_param_named(tstamp, nf_ct_tstamp, bool, 0644); MODULE_PARM_DESC(tstamp, "Enable connection tracking flow timestamping."); diff --git a/trunk/net/netfilter/nfnetlink_acct.c b/trunk/net/netfilter/nfnetlink_acct.c deleted file mode 100644 index 11ba013e47f6..000000000000 --- a/trunk/net/netfilter/nfnetlink_acct.c +++ /dev/null @@ -1,361 +0,0 @@ -/* - * (C) 2011 Pablo Neira Ayuso - * (C) 2011 Intra2net AG - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation (or any later at your option). - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Pablo Neira Ayuso "); -MODULE_DESCRIPTION("nfacct: Extended Netfilter accounting infrastructure"); - -static LIST_HEAD(nfnl_acct_list); - -struct nf_acct { - atomic64_t pkts; - atomic64_t bytes; - struct list_head head; - atomic_t refcnt; - char name[NFACCT_NAME_MAX]; - struct rcu_head rcu_head; -}; - -static int -nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb, - const struct nlmsghdr *nlh, const struct nlattr * const tb[]) -{ - struct nf_acct *nfacct, *matching = NULL; - char *acct_name; - - if (!tb[NFACCT_NAME]) - return -EINVAL; - - acct_name = nla_data(tb[NFACCT_NAME]); - - list_for_each_entry(nfacct, &nfnl_acct_list, head) { - if (strncmp(nfacct->name, acct_name, NFACCT_NAME_MAX) != 0) - continue; - - if (nlh->nlmsg_flags & NLM_F_EXCL) - return -EEXIST; - - matching = nfacct; - break; - } - - if (matching) { - if (nlh->nlmsg_flags & NLM_F_REPLACE) { - /* reset counters if you request a replacement. */ - atomic64_set(&matching->pkts, 0); - atomic64_set(&matching->bytes, 0); - return 0; - } - return -EBUSY; - } - - nfacct = kzalloc(sizeof(struct nf_acct), GFP_KERNEL); - if (nfacct == NULL) - return -ENOMEM; - - strncpy(nfacct->name, nla_data(tb[NFACCT_NAME]), NFACCT_NAME_MAX); - - if (tb[NFACCT_BYTES]) { - atomic64_set(&nfacct->bytes, - be64_to_cpu(nla_get_u64(tb[NFACCT_BYTES]))); - } - if (tb[NFACCT_PKTS]) { - atomic64_set(&nfacct->pkts, - be64_to_cpu(nla_get_u64(tb[NFACCT_PKTS]))); - } - atomic_set(&nfacct->refcnt, 1); - list_add_tail_rcu(&nfacct->head, &nfnl_acct_list); - return 0; -} - -static int -nfnl_acct_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type, - int event, struct nf_acct *acct) -{ - struct nlmsghdr *nlh; - struct nfgenmsg *nfmsg; - unsigned int flags = pid ? NLM_F_MULTI : 0; - u64 pkts, bytes; - - event |= NFNL_SUBSYS_ACCT << 8; - nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags); - if (nlh == NULL) - goto nlmsg_failure; - - nfmsg = nlmsg_data(nlh); - nfmsg->nfgen_family = AF_UNSPEC; - nfmsg->version = NFNETLINK_V0; - nfmsg->res_id = 0; - - NLA_PUT_STRING(skb, NFACCT_NAME, acct->name); - - if (type == NFNL_MSG_ACCT_GET_CTRZERO) { - pkts = atomic64_xchg(&acct->pkts, 0); - bytes = atomic64_xchg(&acct->bytes, 0); - } else { - pkts = atomic64_read(&acct->pkts); - bytes = atomic64_read(&acct->bytes); - } - NLA_PUT_BE64(skb, NFACCT_PKTS, cpu_to_be64(pkts)); - NLA_PUT_BE64(skb, NFACCT_BYTES, cpu_to_be64(bytes)); - NLA_PUT_BE32(skb, NFACCT_USE, htonl(atomic_read(&acct->refcnt))); - - nlmsg_end(skb, nlh); - return skb->len; - -nlmsg_failure: -nla_put_failure: - nlmsg_cancel(skb, nlh); - return -1; -} - -static int -nfnl_acct_dump(struct sk_buff *skb, struct netlink_callback *cb) -{ - struct nf_acct *cur, *last; - - if (cb->args[2]) - return 0; - - last = (struct nf_acct *)cb->args[1]; - if (cb->args[1]) - cb->args[1] = 0; - - rcu_read_lock(); - list_for_each_entry_rcu(cur, &nfnl_acct_list, head) { - if (last && cur != last) - continue; - - if (nfnl_acct_fill_info(skb, NETLINK_CB(cb->skb).pid, - cb->nlh->nlmsg_seq, - NFNL_MSG_TYPE(cb->nlh->nlmsg_type), - NFNL_MSG_ACCT_NEW, cur) < 0) { - cb->args[1] = (unsigned long)cur; - break; - } - } - if (!cb->args[1]) - cb->args[2] = 1; - rcu_read_unlock(); - return skb->len; -} - -static int -nfnl_acct_get(struct sock *nfnl, struct sk_buff *skb, - const struct nlmsghdr *nlh, const struct nlattr * const tb[]) -{ - int ret = -ENOENT; - struct nf_acct *cur; - char *acct_name; - - if (nlh->nlmsg_flags & NLM_F_DUMP) { - return netlink_dump_start(nfnl, skb, nlh, nfnl_acct_dump, - NULL, 0); - } - - if (!tb[NFACCT_NAME]) - return -EINVAL; - acct_name = nla_data(tb[NFACCT_NAME]); - - list_for_each_entry(cur, &nfnl_acct_list, head) { - struct sk_buff *skb2; - - if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX)!= 0) - continue; - - skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); - if (skb2 == NULL) { - ret = -ENOMEM; - break; - } - - ret = nfnl_acct_fill_info(skb2, NETLINK_CB(skb).pid, - nlh->nlmsg_seq, - NFNL_MSG_TYPE(nlh->nlmsg_type), - NFNL_MSG_ACCT_NEW, cur); - if (ret <= 0) { - kfree_skb(skb2); - break; - } - ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).pid, - MSG_DONTWAIT); - if (ret > 0) - ret = 0; - - /* this avoids a loop in nfnetlink. */ - return ret == -EAGAIN ? -ENOBUFS : ret; - } - return ret; -} - -/* try to delete object, fail if it is still in use. */ -static int nfnl_acct_try_del(struct nf_acct *cur) -{ - int ret = 0; - - /* we want to avoid races with nfnl_acct_find_get. */ - if (atomic_dec_and_test(&cur->refcnt)) { - /* We are protected by nfnl mutex. */ - list_del_rcu(&cur->head); - kfree_rcu(cur, rcu_head); - } else { - /* still in use, restore reference counter. */ - atomic_inc(&cur->refcnt); - ret = -EBUSY; - } - return ret; -} - -static int -nfnl_acct_del(struct sock *nfnl, struct sk_buff *skb, - const struct nlmsghdr *nlh, const struct nlattr * const tb[]) -{ - char *acct_name; - struct nf_acct *cur; - int ret = -ENOENT; - - if (!tb[NFACCT_NAME]) { - list_for_each_entry(cur, &nfnl_acct_list, head) - nfnl_acct_try_del(cur); - - return 0; - } - acct_name = nla_data(tb[NFACCT_NAME]); - - list_for_each_entry(cur, &nfnl_acct_list, head) { - if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX) != 0) - continue; - - ret = nfnl_acct_try_del(cur); - if (ret < 0) - return ret; - - break; - } - return ret; -} - -static const struct nla_policy nfnl_acct_policy[NFACCT_MAX+1] = { - [NFACCT_NAME] = { .type = NLA_NUL_STRING, .len = NFACCT_NAME_MAX-1 }, - [NFACCT_BYTES] = { .type = NLA_U64 }, - [NFACCT_PKTS] = { .type = NLA_U64 }, -}; - -static const struct nfnl_callback nfnl_acct_cb[NFNL_MSG_ACCT_MAX] = { - [NFNL_MSG_ACCT_NEW] = { .call = nfnl_acct_new, - .attr_count = NFACCT_MAX, - .policy = nfnl_acct_policy }, - [NFNL_MSG_ACCT_GET] = { .call = nfnl_acct_get, - .attr_count = NFACCT_MAX, - .policy = nfnl_acct_policy }, - [NFNL_MSG_ACCT_GET_CTRZERO] = { .call = nfnl_acct_get, - .attr_count = NFACCT_MAX, - .policy = nfnl_acct_policy }, - [NFNL_MSG_ACCT_DEL] = { .call = nfnl_acct_del, - .attr_count = NFACCT_MAX, - .policy = nfnl_acct_policy }, -}; - -static const struct nfnetlink_subsystem nfnl_acct_subsys = { - .name = "acct", - .subsys_id = NFNL_SUBSYS_ACCT, - .cb_count = NFNL_MSG_ACCT_MAX, - .cb = nfnl_acct_cb, -}; - -MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_ACCT); - -struct nf_acct *nfnl_acct_find_get(const char *acct_name) -{ - struct nf_acct *cur, *acct = NULL; - - rcu_read_lock(); - list_for_each_entry_rcu(cur, &nfnl_acct_list, head) { - if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX)!= 0) - continue; - - if (!try_module_get(THIS_MODULE)) - goto err; - - if (!atomic_inc_not_zero(&cur->refcnt)) { - module_put(THIS_MODULE); - goto err; - } - - acct = cur; - break; - } -err: - rcu_read_unlock(); - return acct; -} -EXPORT_SYMBOL_GPL(nfnl_acct_find_get); - -void nfnl_acct_put(struct nf_acct *acct) -{ - atomic_dec(&acct->refcnt); - module_put(THIS_MODULE); -} -EXPORT_SYMBOL_GPL(nfnl_acct_put); - -void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct) -{ - atomic64_inc(&nfacct->pkts); - atomic64_add(skb->len, &nfacct->bytes); -} -EXPORT_SYMBOL_GPL(nfnl_acct_update); - -static int __init nfnl_acct_init(void) -{ - int ret; - - pr_info("nfnl_acct: registering with nfnetlink.\n"); - ret = nfnetlink_subsys_register(&nfnl_acct_subsys); - if (ret < 0) { - pr_err("nfnl_acct_init: cannot register with nfnetlink.\n"); - goto err_out; - } - return 0; -err_out: - return ret; -} - -static void __exit nfnl_acct_exit(void) -{ - struct nf_acct *cur, *tmp; - - pr_info("nfnl_acct: unregistering from nfnetlink.\n"); - nfnetlink_subsys_unregister(&nfnl_acct_subsys); - - list_for_each_entry_safe(cur, tmp, &nfnl_acct_list, head) { - list_del_rcu(&cur->head); - /* We are sure that our objects have no clients at this point, - * it's safe to release them all without checking refcnt. */ - kfree_rcu(cur, rcu_head); - } -} - -module_init(nfnl_acct_init); -module_exit(nfnl_acct_exit); diff --git a/trunk/net/netfilter/xt_AUDIT.c b/trunk/net/netfilter/xt_AUDIT.c index ba92824086f3..4bca15a0c385 100644 --- a/trunk/net/netfilter/xt_AUDIT.c +++ b/trunk/net/netfilter/xt_AUDIT.c @@ -98,7 +98,6 @@ static void audit_ip6(struct audit_buffer *ab, struct sk_buff *skb) struct ipv6hdr _ip6h; const struct ipv6hdr *ih; u8 nexthdr; - __be16 frag_off; int offset; ih = skb_header_pointer(skb, skb_network_offset(skb), sizeof(_ip6h), &_ip6h); @@ -109,7 +108,7 @@ static void audit_ip6(struct audit_buffer *ab, struct sk_buff *skb) nexthdr = ih->nexthdr; offset = ipv6_skip_exthdr(skb, skb_network_offset(skb) + sizeof(_ip6h), - &nexthdr, &frag_off); + &nexthdr); audit_log_format(ab, " saddr=%pI6c daddr=%pI6c proto=%hhu", &ih->saddr, &ih->daddr, nexthdr); diff --git a/trunk/net/netfilter/xt_CT.c b/trunk/net/netfilter/xt_CT.c index 8e87123f1373..0221d10de75a 100644 --- a/trunk/net/netfilter/xt_CT.c +++ b/trunk/net/netfilter/xt_CT.c @@ -62,8 +62,8 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par) int ret = 0; u8 proto; - if (info->flags & ~(XT_CT_NOTRACK | XT_CT_USERSPACE_HELPER)) - return -EOPNOTSUPP; + if (info->flags & ~XT_CT_NOTRACK) + return -EINVAL; if (info->flags & XT_CT_NOTRACK) { ct = nf_ct_untracked_get(); @@ -92,9 +92,7 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par) GFP_KERNEL)) goto err3; - if (info->flags & XT_CT_USERSPACE_HELPER) { - __set_bit(IPS_USERSPACE_HELPER_BIT, &ct->status); - } else if (info->helper[0]) { + if (info->helper[0]) { ret = -ENOENT; proto = xt_ct_find_proto(par); if (!proto) { diff --git a/trunk/net/netfilter/xt_NFQUEUE.c b/trunk/net/netfilter/xt_NFQUEUE.c index 95237c89607a..d4f4b5d66b20 100644 --- a/trunk/net/netfilter/xt_NFQUEUE.c +++ b/trunk/net/netfilter/xt_NFQUEUE.c @@ -49,7 +49,7 @@ static u32 hash_v4(const struct sk_buff *skb) return jhash_2words((__force u32)ipaddr, iph->protocol, jhash_initval); } -#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) +#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) static u32 hash_v6(const struct sk_buff *skb) { const struct ipv6hdr *ip6h = ipv6_hdr(skb); @@ -74,7 +74,7 @@ nfqueue_tg_v1(struct sk_buff *skb, const struct xt_action_param *par) if (par->family == NFPROTO_IPV4) queue = (((u64) hash_v4(skb) * info->queues_total) >> 32) + queue; -#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) +#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) else if (par->family == NFPROTO_IPV6) queue = (((u64) hash_v6(skb) * info->queues_total) >> 32) + queue; diff --git a/trunk/net/netfilter/xt_TCPMSS.c b/trunk/net/netfilter/xt_TCPMSS.c index 190ad37c5cf8..9e63b43faeed 100644 --- a/trunk/net/netfilter/xt_TCPMSS.c +++ b/trunk/net/netfilter/xt_TCPMSS.c @@ -161,7 +161,7 @@ static u_int32_t tcpmss_reverse_mtu(const struct sk_buff *skb, struct flowi6 *fl6 = &fl.u.ip6; memset(fl6, 0, sizeof(*fl6)); - fl6->daddr = ipv6_hdr(skb)->saddr; + ipv6_addr_copy(&fl6->daddr, &ipv6_hdr(skb)->saddr); } rcu_read_lock(); ai = nf_get_afinfo(family); @@ -198,18 +198,17 @@ tcpmss_tg4(struct sk_buff *skb, const struct xt_action_param *par) return XT_CONTINUE; } -#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) +#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) static unsigned int tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par) { struct ipv6hdr *ipv6h = ipv6_hdr(skb); u8 nexthdr; - __be16 frag_off; int tcphoff; int ret; nexthdr = ipv6h->nexthdr; - tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr, &frag_off); + tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr); if (tcphoff < 0) return NF_DROP; ret = tcpmss_mangle_packet(skb, par->targinfo, @@ -260,7 +259,7 @@ static int tcpmss_tg4_check(const struct xt_tgchk_param *par) return -EINVAL; } -#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) +#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) static int tcpmss_tg6_check(const struct xt_tgchk_param *par) { const struct xt_tcpmss_info *info = par->targinfo; @@ -293,7 +292,7 @@ static struct xt_target tcpmss_tg_reg[] __read_mostly = { .proto = IPPROTO_TCP, .me = THIS_MODULE, }, -#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) +#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) { .family = NFPROTO_IPV6, .name = "TCPMSS", diff --git a/trunk/net/netfilter/xt_TCPOPTSTRIP.c b/trunk/net/netfilter/xt_TCPOPTSTRIP.c index 25fd1c4e1eec..9dc9ecfdd546 100644 --- a/trunk/net/netfilter/xt_TCPOPTSTRIP.c +++ b/trunk/net/netfilter/xt_TCPOPTSTRIP.c @@ -80,17 +80,16 @@ tcpoptstrip_tg4(struct sk_buff *skb, const struct xt_action_param *par) sizeof(struct iphdr) + sizeof(struct tcphdr)); } -#if IS_ENABLED(CONFIG_IP6_NF_MANGLE) +#if defined(CONFIG_IP6_NF_MANGLE) || defined(CONFIG_IP6_NF_MANGLE_MODULE) static unsigned int tcpoptstrip_tg6(struct sk_buff *skb, const struct xt_action_param *par) { struct ipv6hdr *ipv6h = ipv6_hdr(skb); int tcphoff; u_int8_t nexthdr; - __be16 frag_off; nexthdr = ipv6h->nexthdr; - tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr, &frag_off); + tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr); if (tcphoff < 0) return NF_DROP; @@ -109,7 +108,7 @@ static struct xt_target tcpoptstrip_tg_reg[] __read_mostly = { .targetsize = sizeof(struct xt_tcpoptstrip_target_info), .me = THIS_MODULE, }, -#if IS_ENABLED(CONFIG_IP6_NF_MANGLE) +#if defined(CONFIG_IP6_NF_MANGLE) || defined(CONFIG_IP6_NF_MANGLE_MODULE) { .name = "TCPOPTSTRIP", .family = NFPROTO_IPV6, diff --git a/trunk/net/netfilter/xt_TEE.c b/trunk/net/netfilter/xt_TEE.c index 3aae66facf9f..5f054a0dbbb1 100644 --- a/trunk/net/netfilter/xt_TEE.c +++ b/trunk/net/netfilter/xt_TEE.c @@ -25,10 +25,13 @@ #include #include -#if IS_ENABLED(CONFIG_NF_CONNTRACK) +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) # define WITH_CONNTRACK 1 # include #endif +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +# define WITH_IPV6 1 +#endif struct xt_tee_priv { struct notifier_block notifier; @@ -133,7 +136,7 @@ tee_tg4(struct sk_buff *skb, const struct xt_action_param *par) return XT_CONTINUE; } -#if IS_ENABLED(CONFIG_IPV6) +#ifdef WITH_IPV6 static bool tee_tg_route6(struct sk_buff *skb, const struct xt_tee_tginfo *info) { @@ -193,7 +196,7 @@ tee_tg6(struct sk_buff *skb, const struct xt_action_param *par) } return XT_CONTINUE; } -#endif +#endif /* WITH_IPV6 */ static int tee_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) @@ -273,7 +276,7 @@ static struct xt_target tee_tg_reg[] __read_mostly = { .destroy = tee_tg_destroy, .me = THIS_MODULE, }, -#if IS_ENABLED(CONFIG_IPV6) +#ifdef WITH_IPV6 { .name = "TEE", .revision = 1, diff --git a/trunk/net/netfilter/xt_TPROXY.c b/trunk/net/netfilter/xt_TPROXY.c index 35a959a096e0..dcfd57eb9d02 100644 --- a/trunk/net/netfilter/xt_TPROXY.c +++ b/trunk/net/netfilter/xt_TPROXY.c @@ -22,7 +22,7 @@ #include -#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) +#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) #define XT_TPROXY_HAVE_IPV6 1 #include #include diff --git a/trunk/net/netfilter/xt_addrtype.c b/trunk/net/netfilter/xt_addrtype.c index 49c5ff7f6dd6..b77d383cec78 100644 --- a/trunk/net/netfilter/xt_addrtype.c +++ b/trunk/net/netfilter/xt_addrtype.c @@ -16,7 +16,7 @@ #include #include -#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) +#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) #include #include #include @@ -31,7 +31,7 @@ MODULE_DESCRIPTION("Xtables: address type match"); MODULE_ALIAS("ipt_addrtype"); MODULE_ALIAS("ip6t_addrtype"); -#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) +#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) static u32 match_lookup_rt6(struct net *net, const struct net_device *dev, const struct in6_addr *addr) { @@ -42,7 +42,7 @@ static u32 match_lookup_rt6(struct net *net, const struct net_device *dev, int route_err; memset(&flow, 0, sizeof(flow)); - flow.daddr = *addr; + ipv6_addr_copy(&flow.daddr, addr); if (dev) flow.flowi6_oif = dev->ifindex; @@ -149,7 +149,7 @@ addrtype_mt_v1(const struct sk_buff *skb, struct xt_action_param *par) else if (info->flags & XT_ADDRTYPE_LIMIT_IFACE_OUT) dev = par->out; -#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) +#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) if (par->family == NFPROTO_IPV6) return addrtype_mt6(net, dev, skb, info); #endif @@ -190,7 +190,7 @@ static int addrtype_mt_checkentry_v1(const struct xt_mtchk_param *par) return -EINVAL; } -#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) +#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) if (par->family == NFPROTO_IPV6) { if ((info->source | info->dest) & XT_ADDRTYPE_BLACKHOLE) { pr_err("ipv6 BLACKHOLE matching not supported\n"); diff --git a/trunk/net/netfilter/xt_connbytes.c b/trunk/net/netfilter/xt_connbytes.c index e595e07a759b..9ddf1c3bfb39 100644 --- a/trunk/net/netfilter/xt_connbytes.c +++ b/trunk/net/netfilter/xt_connbytes.c @@ -40,46 +40,46 @@ connbytes_mt(const struct sk_buff *skb, struct xt_action_param *par) case XT_CONNBYTES_PKTS: switch (sinfo->direction) { case XT_CONNBYTES_DIR_ORIGINAL: - what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].packets); + what = counters[IP_CT_DIR_ORIGINAL].packets; break; case XT_CONNBYTES_DIR_REPLY: - what = atomic64_read(&counters[IP_CT_DIR_REPLY].packets); + what = counters[IP_CT_DIR_REPLY].packets; break; case XT_CONNBYTES_DIR_BOTH: - what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].packets); - what += atomic64_read(&counters[IP_CT_DIR_REPLY].packets); + what = counters[IP_CT_DIR_ORIGINAL].packets; + what += counters[IP_CT_DIR_REPLY].packets; break; } break; case XT_CONNBYTES_BYTES: switch (sinfo->direction) { case XT_CONNBYTES_DIR_ORIGINAL: - what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].bytes); + what = counters[IP_CT_DIR_ORIGINAL].bytes; break; case XT_CONNBYTES_DIR_REPLY: - what = atomic64_read(&counters[IP_CT_DIR_REPLY].bytes); + what = counters[IP_CT_DIR_REPLY].bytes; break; case XT_CONNBYTES_DIR_BOTH: - what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].bytes); - what += atomic64_read(&counters[IP_CT_DIR_REPLY].bytes); + what = counters[IP_CT_DIR_ORIGINAL].bytes; + what += counters[IP_CT_DIR_REPLY].bytes; break; } break; case XT_CONNBYTES_AVGPKT: switch (sinfo->direction) { case XT_CONNBYTES_DIR_ORIGINAL: - bytes = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].bytes); - pkts = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].packets); + bytes = counters[IP_CT_DIR_ORIGINAL].bytes; + pkts = counters[IP_CT_DIR_ORIGINAL].packets; break; case XT_CONNBYTES_DIR_REPLY: - bytes = atomic64_read(&counters[IP_CT_DIR_REPLY].bytes); - pkts = atomic64_read(&counters[IP_CT_DIR_REPLY].packets); + bytes = counters[IP_CT_DIR_REPLY].bytes; + pkts = counters[IP_CT_DIR_REPLY].packets; break; case XT_CONNBYTES_DIR_BOTH: - bytes = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].bytes) + - atomic64_read(&counters[IP_CT_DIR_REPLY].bytes); - pkts = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].packets) + - atomic64_read(&counters[IP_CT_DIR_REPLY].packets); + bytes = counters[IP_CT_DIR_ORIGINAL].bytes + + counters[IP_CT_DIR_REPLY].bytes; + pkts = counters[IP_CT_DIR_ORIGINAL].packets + + counters[IP_CT_DIR_REPLY].packets; break; } if (pkts != 0) diff --git a/trunk/net/netfilter/xt_ecn.c b/trunk/net/netfilter/xt_ecn.c deleted file mode 100644 index 3c831a8efebc..000000000000 --- a/trunk/net/netfilter/xt_ecn.c +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Xtables module for matching the value of the IPv4/IPv6 and TCP ECN bits - * - * (C) 2002 by Harald Welte - * (C) 2011 Patrick McHardy - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -MODULE_AUTHOR("Harald Welte "); -MODULE_DESCRIPTION("Xtables: Explicit Congestion Notification (ECN) flag match"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("ipt_ecn"); -MODULE_ALIAS("ip6t_ecn"); - -static bool match_tcp(const struct sk_buff *skb, struct xt_action_param *par) -{ - const struct xt_ecn_info *einfo = par->matchinfo; - struct tcphdr _tcph; - const struct tcphdr *th; - - /* In practice, TCP match does this, so can't fail. But let's - * be good citizens. - */ - th = skb_header_pointer(skb, par->thoff, sizeof(_tcph), &_tcph); - if (th == NULL) - return false; - - if (einfo->operation & XT_ECN_OP_MATCH_ECE) { - if (einfo->invert & XT_ECN_OP_MATCH_ECE) { - if (th->ece == 1) - return false; - } else { - if (th->ece == 0) - return false; - } - } - - if (einfo->operation & XT_ECN_OP_MATCH_CWR) { - if (einfo->invert & XT_ECN_OP_MATCH_CWR) { - if (th->cwr == 1) - return false; - } else { - if (th->cwr == 0) - return false; - } - } - - return true; -} - -static inline bool match_ip(const struct sk_buff *skb, - const struct xt_ecn_info *einfo) -{ - return ((ip_hdr(skb)->tos & XT_ECN_IP_MASK) == einfo->ip_ect) ^ - !!(einfo->invert & XT_ECN_OP_MATCH_IP); -} - -static bool ecn_mt4(const struct sk_buff *skb, struct xt_action_param *par) -{ - const struct xt_ecn_info *info = par->matchinfo; - - if (info->operation & XT_ECN_OP_MATCH_IP && !match_ip(skb, info)) - return false; - - if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) && - !match_tcp(skb, par)) - return false; - - return true; -} - -static int ecn_mt_check4(const struct xt_mtchk_param *par) -{ - const struct xt_ecn_info *info = par->matchinfo; - const struct ipt_ip *ip = par->entryinfo; - - if (info->operation & XT_ECN_OP_MATCH_MASK) - return -EINVAL; - - if (info->invert & XT_ECN_OP_MATCH_MASK) - return -EINVAL; - - if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) && - (ip->proto != IPPROTO_TCP || ip->invflags & IPT_INV_PROTO)) { - pr_info("cannot match TCP bits in rule for non-tcp packets\n"); - return -EINVAL; - } - - return 0; -} - -static inline bool match_ipv6(const struct sk_buff *skb, - const struct xt_ecn_info *einfo) -{ - return (((ipv6_hdr(skb)->flow_lbl[0] >> 4) & XT_ECN_IP_MASK) == - einfo->ip_ect) ^ - !!(einfo->invert & XT_ECN_OP_MATCH_IP); -} - -static bool ecn_mt6(const struct sk_buff *skb, struct xt_action_param *par) -{ - const struct xt_ecn_info *info = par->matchinfo; - - if (info->operation & XT_ECN_OP_MATCH_IP && !match_ipv6(skb, info)) - return false; - - if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) && - !match_tcp(skb, par)) - return false; - - return true; -} - -static int ecn_mt_check6(const struct xt_mtchk_param *par) -{ - const struct xt_ecn_info *info = par->matchinfo; - const struct ip6t_ip6 *ip = par->entryinfo; - - if (info->operation & XT_ECN_OP_MATCH_MASK) - return -EINVAL; - - if (info->invert & XT_ECN_OP_MATCH_MASK) - return -EINVAL; - - if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) && - (ip->proto != IPPROTO_TCP || ip->invflags & IP6T_INV_PROTO)) { - pr_info("cannot match TCP bits in rule for non-tcp packets\n"); - return -EINVAL; - } - - return 0; -} - -static struct xt_match ecn_mt_reg[] __read_mostly = { - { - .name = "ecn", - .family = NFPROTO_IPV4, - .match = ecn_mt4, - .matchsize = sizeof(struct xt_ecn_info), - .checkentry = ecn_mt_check4, - .me = THIS_MODULE, - }, - { - .name = "ecn", - .family = NFPROTO_IPV6, - .match = ecn_mt6, - .matchsize = sizeof(struct xt_ecn_info), - .checkentry = ecn_mt_check6, - .me = THIS_MODULE, - }, -}; - -static int __init ecn_mt_init(void) -{ - return xt_register_matches(ecn_mt_reg, ARRAY_SIZE(ecn_mt_reg)); -} - -static void __exit ecn_mt_exit(void) -{ - xt_unregister_matches(ecn_mt_reg, ARRAY_SIZE(ecn_mt_reg)); -} - -module_init(ecn_mt_init); -module_exit(ecn_mt_exit); diff --git a/trunk/net/netfilter/xt_hashlimit.c b/trunk/net/netfilter/xt_hashlimit.c index 8e4992101875..dfd52bad1523 100644 --- a/trunk/net/netfilter/xt_hashlimit.c +++ b/trunk/net/netfilter/xt_hashlimit.c @@ -21,7 +21,7 @@ #include #include #include -#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) +#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) #include #include #endif @@ -64,7 +64,7 @@ struct dsthash_dst { __be32 src; __be32 dst; } ip; -#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) +#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) struct { __be32 src[4]; __be32 dst[4]; @@ -413,7 +413,7 @@ static inline __be32 maskl(__be32 a, unsigned int l) return l ? htonl(ntohl(a) & ~0 << (32 - l)) : 0; } -#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) +#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) static void hashlimit_ipv6_mask(__be32 *i, unsigned int p) { switch (p) { @@ -445,7 +445,6 @@ hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo, { __be16 _ports[2], *ports; u8 nexthdr; - __be16 frag_off; int poff; memset(dst, 0, sizeof(*dst)); @@ -464,7 +463,7 @@ hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo, return 0; nexthdr = ip_hdr(skb)->protocol; break; -#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) +#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) case NFPROTO_IPV6: if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP) { memcpy(&dst->ip6.dst, &ipv6_hdr(skb)->daddr, @@ -481,7 +480,7 @@ hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo, (XT_HASHLIMIT_HASH_DPT | XT_HASHLIMIT_HASH_SPT))) return 0; nexthdr = ipv6_hdr(skb)->nexthdr; - protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, &frag_off); + protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr); if ((int)protoff < 0) return -1; break; @@ -616,7 +615,7 @@ static struct xt_match hashlimit_mt_reg[] __read_mostly = { .destroy = hashlimit_mt_destroy, .me = THIS_MODULE, }, -#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) +#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) { .name = "hashlimit", .revision = 1, @@ -693,7 +692,7 @@ static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family, ent->rateinfo.credit, ent->rateinfo.credit_cap, ent->rateinfo.cost); break; -#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) +#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) case NFPROTO_IPV6: res = seq_printf(s, "%ld %pI6:%u->%pI6:%u %u %u %u\n", (long)(ent->expires - jiffies)/HZ, @@ -761,7 +760,7 @@ static int __net_init hashlimit_proc_net_init(struct net *net) hashlimit_net->ipt_hashlimit = proc_mkdir("ipt_hashlimit", net->proc_net); if (!hashlimit_net->ipt_hashlimit) return -ENOMEM; -#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) +#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) hashlimit_net->ip6t_hashlimit = proc_mkdir("ip6t_hashlimit", net->proc_net); if (!hashlimit_net->ip6t_hashlimit) { proc_net_remove(net, "ipt_hashlimit"); @@ -774,7 +773,7 @@ static int __net_init hashlimit_proc_net_init(struct net *net) static void __net_exit hashlimit_proc_net_exit(struct net *net) { proc_net_remove(net, "ipt_hashlimit"); -#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) +#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) proc_net_remove(net, "ip6t_hashlimit"); #endif } diff --git a/trunk/net/netfilter/xt_nfacct.c b/trunk/net/netfilter/xt_nfacct.c deleted file mode 100644 index b3be0ef21f19..000000000000 --- a/trunk/net/netfilter/xt_nfacct.c +++ /dev/null @@ -1,76 +0,0 @@ -/* - * (C) 2011 Pablo Neira Ayuso - * (C) 2011 Intra2net AG - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 (or any - * later at your option) as published by the Free Software Foundation. - */ -#include -#include - -#include -#include -#include - -MODULE_AUTHOR("Pablo Neira Ayuso "); -MODULE_DESCRIPTION("Xtables: match for the extended accounting infrastructure"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("ipt_nfacct"); -MODULE_ALIAS("ip6t_nfacct"); - -static bool nfacct_mt(const struct sk_buff *skb, struct xt_action_param *par) -{ - const struct xt_nfacct_match_info *info = par->targinfo; - - nfnl_acct_update(skb, info->nfacct); - - return true; -} - -static int -nfacct_mt_checkentry(const struct xt_mtchk_param *par) -{ - struct xt_nfacct_match_info *info = par->matchinfo; - struct nf_acct *nfacct; - - nfacct = nfnl_acct_find_get(info->name); - if (nfacct == NULL) { - pr_info("xt_nfacct: accounting object with name `%s' " - "does not exists\n", info->name); - return -ENOENT; - } - info->nfacct = nfacct; - return 0; -} - -static void -nfacct_mt_destroy(const struct xt_mtdtor_param *par) -{ - const struct xt_nfacct_match_info *info = par->matchinfo; - - nfnl_acct_put(info->nfacct); -} - -static struct xt_match nfacct_mt_reg __read_mostly = { - .name = "nfacct", - .family = NFPROTO_UNSPEC, - .checkentry = nfacct_mt_checkentry, - .match = nfacct_mt, - .destroy = nfacct_mt_destroy, - .matchsize = sizeof(struct xt_nfacct_match_info), - .me = THIS_MODULE, -}; - -static int __init nfacct_mt_init(void) -{ - return xt_register_match(&nfacct_mt_reg); -} - -static void __exit nfacct_mt_exit(void) -{ - xt_unregister_match(&nfacct_mt_reg); -} - -module_init(nfacct_mt_init); -module_exit(nfacct_mt_exit); diff --git a/trunk/net/netfilter/xt_socket.c b/trunk/net/netfilter/xt_socket.c index 72bb07f57f97..fe39f7e913df 100644 --- a/trunk/net/netfilter/xt_socket.c +++ b/trunk/net/netfilter/xt_socket.c @@ -22,7 +22,7 @@ #include #include -#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) +#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) #define XT_SOCKET_HAVE_IPV6 1 #include #include @@ -30,7 +30,7 @@ #include -#if IS_ENABLED(CONFIG_NF_CONNTRACK) +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) #define XT_SOCKET_HAVE_CONNTRACK 1 #include #endif @@ -214,7 +214,6 @@ extract_icmp6_fields(const struct sk_buff *skb, struct icmp6hdr *icmph, _icmph; __be16 *ports, _ports[2]; u8 inside_nexthdr; - __be16 inside_fragoff; int inside_hdrlen; icmph = skb_header_pointer(skb, outside_hdrlen, @@ -230,8 +229,7 @@ extract_icmp6_fields(const struct sk_buff *skb, return 1; inside_nexthdr = inside_iph->nexthdr; - inside_hdrlen = ipv6_skip_exthdr(skb, outside_hdrlen + sizeof(_icmph) + sizeof(_inside_iph), - &inside_nexthdr, &inside_fragoff); + inside_hdrlen = ipv6_skip_exthdr(skb, outside_hdrlen + sizeof(_icmph) + sizeof(_inside_iph), &inside_nexthdr); if (inside_hdrlen < 0) return 1; /* hjm: Packet has no/incomplete transport layer headers. */ diff --git a/trunk/net/netlabel/netlabel_addrlist.c b/trunk/net/netlabel/netlabel_addrlist.c index 6f1701322fb6..96b749dacc34 100644 --- a/trunk/net/netlabel/netlabel_addrlist.c +++ b/trunk/net/netlabel/netlabel_addrlist.c @@ -96,7 +96,7 @@ struct netlbl_af4list *netlbl_af4list_search_exact(__be32 addr, } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) /** * netlbl_af6list_search - Search for a matching IPv6 address entry * @addr: IPv6 address @@ -185,7 +185,7 @@ int netlbl_af4list_add(struct netlbl_af4list *entry, struct list_head *head) return 0; } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) /** * netlbl_af6list_add - Add a new IPv6 address entry to a list * @entry: address entry @@ -263,7 +263,7 @@ struct netlbl_af4list *netlbl_af4list_remove(__be32 addr, __be32 mask, return entry; } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) /** * netlbl_af6list_remove_entry - Remove an IPv6 address entry * @entry: address entry @@ -342,7 +342,7 @@ void netlbl_af4list_audit_addr(struct audit_buffer *audit_buf, } } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) /** * netlbl_af6list_audit_addr - Audit an IPv6 address * @audit_buf: audit buffer diff --git a/trunk/net/netlabel/netlabel_addrlist.h b/trunk/net/netlabel/netlabel_addrlist.h index a1287ce18130..fdbc1d2c7352 100644 --- a/trunk/net/netlabel/netlabel_addrlist.h +++ b/trunk/net/netlabel/netlabel_addrlist.h @@ -133,7 +133,7 @@ static inline void netlbl_af4list_audit_addr(struct audit_buffer *audit_buf, } #endif -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) #define __af6list_entry(ptr) container_of(ptr, struct netlbl_af6list, list) diff --git a/trunk/net/netlabel/netlabel_domainhash.c b/trunk/net/netlabel/netlabel_domainhash.c index 38204112b9f4..3f905e5370c2 100644 --- a/trunk/net/netlabel/netlabel_domainhash.c +++ b/trunk/net/netlabel/netlabel_domainhash.c @@ -78,7 +78,7 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry) struct netlbl_dom_map *ptr; struct netlbl_af4list *iter4; struct netlbl_af4list *tmp4; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) struct netlbl_af6list *iter6; struct netlbl_af6list *tmp6; #endif /* IPv6 */ @@ -90,7 +90,7 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry) netlbl_af4list_remove_entry(iter4); kfree(netlbl_domhsh_addr4_entry(iter4)); } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) netlbl_af6list_foreach_safe(iter6, tmp6, &ptr->type_def.addrsel->list6) { netlbl_af6list_remove_entry(iter6); @@ -217,7 +217,7 @@ static void netlbl_domhsh_audit_add(struct netlbl_dom_map *entry, cipsov4 = map4->type_def.cipsov4; netlbl_af4list_audit_addr(audit_buf, 0, NULL, addr4->addr, addr4->mask); -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) } else if (addr6 != NULL) { struct netlbl_domaddr6_map *map6; map6 = netlbl_domhsh_addr6_entry(addr6); @@ -306,7 +306,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry, struct netlbl_dom_map *entry_old; struct netlbl_af4list *iter4; struct netlbl_af4list *tmp4; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) struct netlbl_af6list *iter6; struct netlbl_af6list *tmp6; #endif /* IPv6 */ @@ -338,7 +338,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry, &entry->type_def.addrsel->list4) netlbl_domhsh_audit_add(entry, iter4, NULL, ret_val, audit_info); -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) netlbl_af6list_foreach_rcu(iter6, &entry->type_def.addrsel->list6) netlbl_domhsh_audit_add(entry, NULL, iter6, @@ -365,7 +365,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry, ret_val = -EEXIST; goto add_return; } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) netlbl_af6list_foreach_rcu(iter6, &entry->type_def.addrsel->list6) if (netlbl_af6list_search_exact(&iter6->addr, @@ -386,7 +386,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry, if (ret_val != 0) goto add_return; } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) netlbl_af6list_foreach_safe(iter6, tmp6, &entry->type_def.addrsel->list6) { netlbl_af6list_remove_entry(iter6); @@ -510,7 +510,7 @@ int netlbl_domhsh_remove_af4(const char *domain, struct netlbl_dom_map *entry_map; struct netlbl_af4list *entry_addr; struct netlbl_af4list *iter4; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) struct netlbl_af6list *iter6; #endif /* IPv6 */ struct netlbl_domaddr4_map *entry; @@ -533,7 +533,7 @@ int netlbl_domhsh_remove_af4(const char *domain, goto remove_af4_failure; netlbl_af4list_foreach_rcu(iter4, &entry_map->type_def.addrsel->list4) goto remove_af4_single_addr; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) netlbl_af6list_foreach_rcu(iter6, &entry_map->type_def.addrsel->list6) goto remove_af4_single_addr; #endif /* IPv6 */ @@ -644,7 +644,7 @@ struct netlbl_domaddr4_map *netlbl_domhsh_getentry_af4(const char *domain, return netlbl_domhsh_addr4_entry(addr_iter); } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) /** * netlbl_domhsh_getentry_af6 - Get an entry from the domain hash table * @domain: the domain name to search for diff --git a/trunk/net/netlabel/netlabel_domainhash.h b/trunk/net/netlabel/netlabel_domainhash.h index 90872c4ca30f..bfcc0f7024c5 100644 --- a/trunk/net/netlabel/netlabel_domainhash.h +++ b/trunk/net/netlabel/netlabel_domainhash.h @@ -104,7 +104,7 @@ int netlbl_domhsh_walk(u32 *skip_bkt, int (*callback) (struct netlbl_dom_map *entry, void *arg), void *cb_arg); -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) struct netlbl_domaddr6_map *netlbl_domhsh_getentry_af6(const char *domain, const struct in6_addr *addr); #endif /* IPv6 */ diff --git a/trunk/net/netlabel/netlabel_kapi.c b/trunk/net/netlabel/netlabel_kapi.c index 2560e7b441c6..824f184f7a9b 100644 --- a/trunk/net/netlabel/netlabel_kapi.c +++ b/trunk/net/netlabel/netlabel_kapi.c @@ -147,7 +147,7 @@ int netlbl_cfg_unlbl_map_add(const char *domain, goto cfg_unlbl_map_add_failure; break; } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case AF_INET6: { const struct in6_addr *addr6 = addr; const struct in6_addr *mask6 = mask; @@ -155,12 +155,12 @@ int netlbl_cfg_unlbl_map_add(const char *domain, if (map6 == NULL) goto cfg_unlbl_map_add_failure; map6->type = NETLBL_NLTYPE_UNLABELED; - map6->list.addr = *addr6; + ipv6_addr_copy(&map6->list.addr, addr6); map6->list.addr.s6_addr32[0] &= mask6->s6_addr32[0]; map6->list.addr.s6_addr32[1] &= mask6->s6_addr32[1]; map6->list.addr.s6_addr32[2] &= mask6->s6_addr32[2]; map6->list.addr.s6_addr32[3] &= mask6->s6_addr32[3]; - map6->list.mask = *mask6; + ipv6_addr_copy(&map6->list.mask, mask6); map6->list.valid = 1; ret_val = netlbl_af6list_add(&map6->list, &addrmap->list6); @@ -227,7 +227,7 @@ int netlbl_cfg_unlbl_static_add(struct net *net, case AF_INET: addr_len = sizeof(struct in_addr); break; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case AF_INET6: addr_len = sizeof(struct in6_addr); break; @@ -270,7 +270,7 @@ int netlbl_cfg_unlbl_static_del(struct net *net, case AF_INET: addr_len = sizeof(struct in_addr); break; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case AF_INET6: addr_len = sizeof(struct in6_addr); break; @@ -673,7 +673,7 @@ int netlbl_sock_setattr(struct sock *sk, ret_val = -ENOENT; } break; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case AF_INET6: /* since we don't support any IPv6 labeling protocols right * now we can optimize everything away until we do */ @@ -724,7 +724,7 @@ int netlbl_sock_getattr(struct sock *sk, case AF_INET: ret_val = cipso_v4_sock_getattr(sk, secattr); break; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case AF_INET6: ret_val = -ENOMSG; break; @@ -782,7 +782,7 @@ int netlbl_conn_setattr(struct sock *sk, ret_val = -ENOENT; } break; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case AF_INET6: /* since we don't support any IPv6 labeling protocols right * now we can optimize everything away until we do */ @@ -853,7 +853,7 @@ int netlbl_req_setattr(struct request_sock *req, ret_val = -ENOENT; } break; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case AF_INET6: /* since we don't support any IPv6 labeling protocols right * now we can optimize everything away until we do */ @@ -926,7 +926,7 @@ int netlbl_skbuff_setattr(struct sk_buff *skb, ret_val = -ENOENT; } break; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case AF_INET6: /* since we don't support any IPv6 labeling protocols right * now we can optimize everything away until we do */ @@ -965,7 +965,7 @@ int netlbl_skbuff_getattr(const struct sk_buff *skb, cipso_v4_skbuff_getattr(skb, secattr) == 0) return 0; break; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case AF_INET6: break; #endif /* IPv6 */ diff --git a/trunk/net/netlabel/netlabel_mgmt.c b/trunk/net/netlabel/netlabel_mgmt.c index 4809e2e48b02..bfa555869775 100644 --- a/trunk/net/netlabel/netlabel_mgmt.c +++ b/trunk/net/netlabel/netlabel_mgmt.c @@ -184,7 +184,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info, entry->type = NETLBL_NLTYPE_ADDRSELECT; entry->type_def.addrsel = addrmap; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) } else if (info->attrs[NLBL_MGMT_A_IPV6ADDR]) { struct in6_addr *addr; struct in6_addr *mask; @@ -216,12 +216,12 @@ static int netlbl_mgmt_add_common(struct genl_info *info, ret_val = -ENOMEM; goto add_failure; } - map->list.addr = *addr; + ipv6_addr_copy(&map->list.addr, addr); map->list.addr.s6_addr32[0] &= mask->s6_addr32[0]; map->list.addr.s6_addr32[1] &= mask->s6_addr32[1]; map->list.addr.s6_addr32[2] &= mask->s6_addr32[2]; map->list.addr.s6_addr32[3] &= mask->s6_addr32[3]; - map->list.mask = *mask; + ipv6_addr_copy(&map->list.mask, mask); map->list.valid = 1; map->type = entry->type; @@ -270,7 +270,7 @@ static int netlbl_mgmt_listentry(struct sk_buff *skb, struct nlattr *nla_a; struct nlattr *nla_b; struct netlbl_af4list *iter4; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) struct netlbl_af6list *iter6; #endif @@ -324,7 +324,7 @@ static int netlbl_mgmt_listentry(struct sk_buff *skb, nla_nest_end(skb, nla_b); } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) netlbl_af6list_foreach_rcu(iter6, &entry->type_def.addrsel->list6) { struct netlbl_domaddr6_map *map6; diff --git a/trunk/net/netlabel/netlabel_unlabeled.c b/trunk/net/netlabel/netlabel_unlabeled.c index 4b5fa0fe78fd..e251c2c88521 100644 --- a/trunk/net/netlabel/netlabel_unlabeled.c +++ b/trunk/net/netlabel/netlabel_unlabeled.c @@ -170,7 +170,7 @@ static void netlbl_unlhsh_free_iface(struct rcu_head *entry) struct netlbl_unlhsh_iface *iface; struct netlbl_af4list *iter4; struct netlbl_af4list *tmp4; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) struct netlbl_af6list *iter6; struct netlbl_af6list *tmp6; #endif /* IPv6 */ @@ -184,7 +184,7 @@ static void netlbl_unlhsh_free_iface(struct rcu_head *entry) netlbl_af4list_remove_entry(iter4); kfree(netlbl_unlhsh_addr4_entry(iter4)); } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) netlbl_af6list_foreach_safe(iter6, tmp6, &iface->addr6_list) { netlbl_af6list_remove_entry(iter6); kfree(netlbl_unlhsh_addr6_entry(iter6)); @@ -274,7 +274,7 @@ static int netlbl_unlhsh_add_addr4(struct netlbl_unlhsh_iface *iface, return ret_val; } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) /** * netlbl_unlhsh_add_addr6 - Add a new IPv6 address entry to the hash table * @iface: the associated interface entry @@ -300,12 +300,12 @@ static int netlbl_unlhsh_add_addr6(struct netlbl_unlhsh_iface *iface, if (entry == NULL) return -ENOMEM; - entry->list.addr = *addr; + ipv6_addr_copy(&entry->list.addr, addr); entry->list.addr.s6_addr32[0] &= mask->s6_addr32[0]; entry->list.addr.s6_addr32[1] &= mask->s6_addr32[1]; entry->list.addr.s6_addr32[2] &= mask->s6_addr32[2]; entry->list.addr.s6_addr32[3] &= mask->s6_addr32[3]; - entry->list.mask = *mask; + ipv6_addr_copy(&entry->list.mask, mask); entry->list.valid = 1; entry->secid = secid; @@ -436,7 +436,7 @@ int netlbl_unlhsh_add(struct net *net, mask4->s_addr); break; } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case sizeof(struct in6_addr): { const struct in6_addr *addr6 = addr; const struct in6_addr *mask6 = mask; @@ -531,7 +531,7 @@ static int netlbl_unlhsh_remove_addr4(struct net *net, return 0; } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) /** * netlbl_unlhsh_remove_addr6 - Remove an IPv6 address entry * @net: network namespace @@ -606,14 +606,14 @@ static int netlbl_unlhsh_remove_addr6(struct net *net, static void netlbl_unlhsh_condremove_iface(struct netlbl_unlhsh_iface *iface) { struct netlbl_af4list *iter4; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) struct netlbl_af6list *iter6; #endif /* IPv6 */ spin_lock(&netlbl_unlhsh_lock); netlbl_af4list_foreach_rcu(iter4, &iface->addr4_list) goto unlhsh_condremove_failure; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) netlbl_af6list_foreach_rcu(iter6, &iface->addr6_list) goto unlhsh_condremove_failure; #endif /* IPv6 */ @@ -680,7 +680,7 @@ int netlbl_unlhsh_remove(struct net *net, iface, addr, mask, audit_info); break; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case sizeof(struct in6_addr): ret_val = netlbl_unlhsh_remove_addr6(net, iface, addr, mask, @@ -1196,7 +1196,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb, struct netlbl_unlhsh_iface *iface; struct list_head *iter_list; struct netlbl_af4list *addr4; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) struct netlbl_af6list *addr6; #endif @@ -1228,7 +1228,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb, goto unlabel_staticlist_return; } } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) netlbl_af6list_foreach_rcu(addr6, &iface->addr6_list) { if (iter_addr6++ < skip_addr6) @@ -1277,7 +1277,7 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb, u32 skip_addr6 = cb->args[1]; u32 iter_addr4 = 0; struct netlbl_af4list *addr4; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) u32 iter_addr6 = 0; struct netlbl_af6list *addr6; #endif @@ -1303,7 +1303,7 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb, goto unlabel_staticlistdef_return; } } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) netlbl_af6list_foreach_rcu(addr6, &iface->addr6_list) { if (iter_addr6++ < skip_addr6) continue; @@ -1494,7 +1494,7 @@ int netlbl_unlabel_getattr(const struct sk_buff *skb, secattr->attr.secid = netlbl_unlhsh_addr4_entry(addr4)->secid; break; } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case PF_INET6: { struct ipv6hdr *hdr6; struct netlbl_af6list *addr6; diff --git a/trunk/net/netlink/af_netlink.c b/trunk/net/netlink/af_netlink.c index 629b06182f3f..1201b6d4183d 100644 --- a/trunk/net/netlink/af_netlink.c +++ b/trunk/net/netlink/af_netlink.c @@ -139,12 +139,12 @@ static atomic_t nl_table_users = ATOMIC_INIT(0); static ATOMIC_NOTIFIER_HEAD(netlink_chain); -static inline u32 netlink_group_mask(u32 group) +static u32 netlink_group_mask(u32 group) { return group ? 1 << (group - 1) : 0; } -static inline struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid) +static struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid) { return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask]; } @@ -226,7 +226,8 @@ netlink_unlock_table(void) wake_up(&nl_table_wait); } -static struct sock *netlink_lookup(struct net *net, int protocol, u32 pid) +static inline struct sock *netlink_lookup(struct net *net, int protocol, + u32 pid) { struct nl_pid_hash *hash = &nl_table[protocol].hash; struct hlist_head *head; @@ -247,7 +248,7 @@ static struct sock *netlink_lookup(struct net *net, int protocol, u32 pid) return sk; } -static struct hlist_head *nl_pid_hash_zalloc(size_t size) +static inline struct hlist_head *nl_pid_hash_zalloc(size_t size) { if (size <= PAGE_SIZE) return kzalloc(size, GFP_ATOMIC); @@ -257,7 +258,7 @@ static struct hlist_head *nl_pid_hash_zalloc(size_t size) get_order(size)); } -static void nl_pid_hash_free(struct hlist_head *table, size_t size) +static inline void nl_pid_hash_free(struct hlist_head *table, size_t size) { if (size <= PAGE_SIZE) kfree(table); @@ -577,7 +578,7 @@ static int netlink_autobind(struct socket *sock) return err; } -static inline int netlink_capable(const struct socket *sock, unsigned int flag) +static inline int netlink_capable(struct socket *sock, unsigned int flag) { return (nl_table[sock->sk->sk_protocol].nl_nonroot & flag) || capable(CAP_NET_ADMIN); @@ -845,7 +846,8 @@ void netlink_detachskb(struct sock *sk, struct sk_buff *skb) sock_put(sk); } -static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation) +static inline struct sk_buff *netlink_trim(struct sk_buff *skb, + gfp_t allocation) { int delta; @@ -869,7 +871,7 @@ static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation) return skb; } -static void netlink_rcv_wake(struct sock *sk) +static inline void netlink_rcv_wake(struct sock *sk) { struct netlink_sock *nlk = nlk_sk(sk); @@ -879,7 +881,7 @@ static void netlink_rcv_wake(struct sock *sk) wake_up_interruptible(&nlk->wait); } -static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb) +static inline int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb) { int ret; struct netlink_sock *nlk = nlk_sk(sk); @@ -950,7 +952,8 @@ int netlink_has_listeners(struct sock *sk, unsigned int group) } EXPORT_SYMBOL_GPL(netlink_has_listeners); -static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb) +static inline int netlink_broadcast_deliver(struct sock *sk, + struct sk_buff *skb) { struct netlink_sock *nlk = nlk_sk(sk); @@ -959,7 +962,7 @@ static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb) skb_set_owner_r(skb, sk); skb_queue_tail(&sk->sk_receive_queue, skb); sk->sk_data_ready(sk, skb->len); - return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1); + return atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf; } return -1; } @@ -979,7 +982,7 @@ struct netlink_broadcast_data { void *tx_data; }; -static int do_one_broadcast(struct sock *sk, +static inline int do_one_broadcast(struct sock *sk, struct netlink_broadcast_data *p) { struct netlink_sock *nlk = nlk_sk(sk); @@ -1107,7 +1110,8 @@ struct netlink_set_err_data { int code; }; -static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p) +static inline int do_one_set_err(struct sock *sk, + struct netlink_set_err_data *p) { struct netlink_sock *nlk = nlk_sk(sk); int ret = 0; diff --git a/trunk/net/netlink/genetlink.c b/trunk/net/netlink/genetlink.c index a403b618faa5..482fa571b4ee 100644 --- a/trunk/net/netlink/genetlink.c +++ b/trunk/net/netlink/genetlink.c @@ -33,14 +33,6 @@ void genl_unlock(void) } EXPORT_SYMBOL(genl_unlock); -#ifdef CONFIG_PROVE_LOCKING -int lockdep_genl_is_held(void) -{ - return lockdep_is_held(&genl_mutex); -} -EXPORT_SYMBOL(lockdep_genl_is_held); -#endif - #define GENL_FAM_TAB_SIZE 16 #define GENL_FAM_TAB_MASK (GENL_FAM_TAB_SIZE - 1) @@ -106,7 +98,7 @@ static struct genl_ops *genl_get_cmd(u8 cmd, struct genl_family *family) /* Of course we are going to have problems once we hit * 2^16 alive types, but that can only happen by year 2K */ -static u16 genl_generate_id(void) +static inline u16 genl_generate_id(void) { static u16 id_gen_idx = GENL_MIN_ID; int i; @@ -792,15 +784,6 @@ static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info) name = nla_data(info->attrs[CTRL_ATTR_FAMILY_NAME]); res = genl_family_find_byname(name); -#ifdef CONFIG_MODULES - if (res == NULL) { - genl_unlock(); - request_module("net-pf-%d-proto-%d-type-%s", - PF_NETLINK, NETLINK_GENERIC, name); - genl_lock(); - res = genl_family_find_byname(name); - } -#endif err = -ENOENT; } @@ -963,16 +946,3 @@ int genlmsg_multicast_allns(struct sk_buff *skb, u32 pid, unsigned int group, return genlmsg_mcast(skb, pid, group, flags); } EXPORT_SYMBOL(genlmsg_multicast_allns); - -void genl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group, - struct nlmsghdr *nlh, gfp_t flags) -{ - struct sock *sk = net->genl_sock; - int report = 0; - - if (nlh) - report = nlmsg_report(nlh); - - nlmsg_notify(sk, skb, pid, group, report, flags); -} -EXPORT_SYMBOL(genl_notify); diff --git a/trunk/net/netrom/af_netrom.c b/trunk/net/netrom/af_netrom.c index 7dab229bfbcc..732152f718e0 100644 --- a/trunk/net/netrom/af_netrom.c +++ b/trunk/net/netrom/af_netrom.c @@ -306,26 +306,26 @@ static int nr_setsockopt(struct socket *sock, int level, int optname, { struct sock *sk = sock->sk; struct nr_sock *nr = nr_sk(sk); - unsigned long opt; + int opt; if (level != SOL_NETROM) return -ENOPROTOOPT; - if (optlen < sizeof(unsigned int)) + if (optlen < sizeof(int)) return -EINVAL; - if (get_user(opt, (unsigned int __user *)optval)) + if (get_user(opt, (int __user *)optval)) return -EFAULT; switch (optname) { case NETROM_T1: - if (opt < 1 || opt > ULONG_MAX / HZ) + if (opt < 1) return -EINVAL; nr->t1 = opt * HZ; return 0; case NETROM_T2: - if (opt < 1 || opt > ULONG_MAX / HZ) + if (opt < 1) return -EINVAL; nr->t2 = opt * HZ; return 0; @@ -337,13 +337,13 @@ static int nr_setsockopt(struct socket *sock, int level, int optname, return 0; case NETROM_T4: - if (opt < 1 || opt > ULONG_MAX / HZ) + if (opt < 1) return -EINVAL; nr->t4 = opt * HZ; return 0; case NETROM_IDLE: - if (opt > ULONG_MAX / (60 * HZ)) + if (opt < 0) return -EINVAL; nr->idle = opt * 60 * HZ; return 0; @@ -1244,8 +1244,7 @@ static int nr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) case SIOCADDRT: case SIOCDELRT: case SIOCNRDECOBS: - if (!capable(CAP_NET_ADMIN)) - return -EPERM; + if (!capable(CAP_NET_ADMIN)) return -EPERM; return nr_rt_ioctl(cmd, argp); default: diff --git a/trunk/net/netrom/nr_route.c b/trunk/net/netrom/nr_route.c index 2cf330162d7e..915a87ba23e1 100644 --- a/trunk/net/netrom/nr_route.c +++ b/trunk/net/netrom/nr_route.c @@ -670,17 +670,14 @@ int nr_rt_ioctl(unsigned int cmd, void __user *arg) case SIOCADDRT: if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct))) return -EFAULT; - if (nr_route.ndigis > AX25_MAX_DIGIS) - return -EINVAL; if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL) return -EINVAL; + if (nr_route.ndigis < 0 || nr_route.ndigis > AX25_MAX_DIGIS) { + dev_put(dev); + return -EINVAL; + } switch (nr_route.type) { case NETROM_NODE: - if (strnlen(nr_route.mnemonic, 7) == 7) { - ret = -EINVAL; - break; - } - ret = nr_add_node(&nr_route.callsign, nr_route.mnemonic, &nr_route.neighbour, diff --git a/trunk/net/nfc/Kconfig b/trunk/net/nfc/Kconfig index 44c865b86d6f..58cddadf8e8e 100644 --- a/trunk/net/nfc/Kconfig +++ b/trunk/net/nfc/Kconfig @@ -14,6 +14,5 @@ menuconfig NFC be called nfc. source "net/nfc/nci/Kconfig" -source "net/nfc/llcp/Kconfig" source "drivers/nfc/Kconfig" diff --git a/trunk/net/nfc/Makefile b/trunk/net/nfc/Makefile index 7b4a6dcfa566..fbb550f2377b 100644 --- a/trunk/net/nfc/Makefile +++ b/trunk/net/nfc/Makefile @@ -6,4 +6,3 @@ obj-$(CONFIG_NFC) += nfc.o obj-$(CONFIG_NFC_NCI) += nci/ nfc-objs := core.o netlink.o af_nfc.o rawsock.o -nfc-$(CONFIG_NFC_LLCP) += llcp/llcp.o llcp/commands.o llcp/sock.o diff --git a/trunk/net/nfc/core.c b/trunk/net/nfc/core.c index 3ddf6e698df0..47e02c1b8c02 100644 --- a/trunk/net/nfc/core.c +++ b/trunk/net/nfc/core.c @@ -21,13 +21,10 @@ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ -#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ - #include #include #include #include -#include #include "nfc.h" @@ -36,6 +33,25 @@ int nfc_devlist_generation; DEFINE_MUTEX(nfc_devlist_mutex); +int nfc_printk(const char *level, const char *format, ...) +{ + struct va_format vaf; + va_list args; + int r; + + va_start(args, format); + + vaf.fmt = format; + vaf.va = &args; + + r = printk("%sNFC: %pV\n", level, &vaf); + + va_end(args); + + return r; +} +EXPORT_SYMBOL(nfc_printk); + /** * nfc_dev_up - turn on the NFC device * @@ -47,7 +63,7 @@ int nfc_dev_up(struct nfc_dev *dev) { int rc = 0; - pr_debug("dev_name=%s\n", dev_name(&dev->dev)); + nfc_dbg("dev_name=%s", dev_name(&dev->dev)); device_lock(&dev->dev); @@ -81,7 +97,7 @@ int nfc_dev_down(struct nfc_dev *dev) { int rc = 0; - pr_debug("dev_name=%s\n", dev_name(&dev->dev)); + nfc_dbg("dev_name=%s", dev_name(&dev->dev)); device_lock(&dev->dev); @@ -123,8 +139,7 @@ int nfc_start_poll(struct nfc_dev *dev, u32 protocols) { int rc; - pr_debug("dev_name=%s protocols=0x%x\n", - dev_name(&dev->dev), protocols); + nfc_dbg("dev_name=%s protocols=0x%x", dev_name(&dev->dev), protocols); if (!protocols) return -EINVAL; @@ -159,7 +174,7 @@ int nfc_stop_poll(struct nfc_dev *dev) { int rc = 0; - pr_debug("dev_name=%s\n", dev_name(&dev->dev)); + nfc_dbg("dev_name=%s", dev_name(&dev->dev)); device_lock(&dev->dev); @@ -181,86 +196,6 @@ int nfc_stop_poll(struct nfc_dev *dev) return rc; } -int nfc_dep_link_up(struct nfc_dev *dev, int target_index, - u8 comm_mode, u8 rf_mode) -{ - int rc = 0; - - pr_debug("dev_name=%s comm:%d rf:%d\n", - dev_name(&dev->dev), comm_mode, rf_mode); - - if (!dev->ops->dep_link_up) - return -EOPNOTSUPP; - - device_lock(&dev->dev); - - if (!device_is_registered(&dev->dev)) { - rc = -ENODEV; - goto error; - } - - if (dev->dep_link_up == true) { - rc = -EALREADY; - goto error; - } - - rc = dev->ops->dep_link_up(dev, target_index, comm_mode, rf_mode); - -error: - device_unlock(&dev->dev); - return rc; -} - -int nfc_dep_link_down(struct nfc_dev *dev) -{ - int rc = 0; - - pr_debug("dev_name=%s\n", dev_name(&dev->dev)); - - if (!dev->ops->dep_link_down) - return -EOPNOTSUPP; - - device_lock(&dev->dev); - - if (!device_is_registered(&dev->dev)) { - rc = -ENODEV; - goto error; - } - - if (dev->dep_link_up == false) { - rc = -EALREADY; - goto error; - } - - if (dev->dep_rf_mode == NFC_RF_TARGET) { - rc = -EOPNOTSUPP; - goto error; - } - - rc = dev->ops->dep_link_down(dev); - if (!rc) { - dev->dep_link_up = false; - nfc_llcp_mac_is_down(dev); - nfc_genl_dep_link_down_event(dev); - } - -error: - device_unlock(&dev->dev); - return rc; -} - -int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx, - u8 comm_mode, u8 rf_mode) -{ - dev->dep_link_up = true; - dev->dep_rf_mode = rf_mode; - - nfc_llcp_mac_is_up(dev, target_idx, comm_mode, rf_mode); - - return nfc_genl_dep_link_up_event(dev, target_idx, comm_mode, rf_mode); -} -EXPORT_SYMBOL(nfc_dep_link_is_up); - /** * nfc_activate_target - prepare the target for data exchange * @@ -272,8 +207,8 @@ int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol) { int rc; - pr_debug("dev_name=%s target_idx=%u protocol=%u\n", - dev_name(&dev->dev), target_idx, protocol); + nfc_dbg("dev_name=%s target_idx=%u protocol=%u", dev_name(&dev->dev), + target_idx, protocol); device_lock(&dev->dev); @@ -301,8 +236,7 @@ int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx) { int rc = 0; - pr_debug("dev_name=%s target_idx=%u\n", - dev_name(&dev->dev), target_idx); + nfc_dbg("dev_name=%s target_idx=%u", dev_name(&dev->dev), target_idx); device_lock(&dev->dev); @@ -337,8 +271,8 @@ int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, { int rc; - pr_debug("dev_name=%s target_idx=%u skb->len=%u\n", - dev_name(&dev->dev), target_idx, skb->len); + nfc_dbg("dev_name=%s target_idx=%u skb->len=%u", dev_name(&dev->dev), + target_idx, skb->len); device_lock(&dev->dev); @@ -355,54 +289,13 @@ int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, return rc; } -int nfc_set_remote_general_bytes(struct nfc_dev *dev, u8 *gb, u8 gb_len) -{ - pr_debug("dev_name=%s gb_len=%d\n", - dev_name(&dev->dev), gb_len); - - if (gb_len > NFC_MAX_GT_LEN) - return -EINVAL; - - return nfc_llcp_set_remote_gb(dev, gb, gb_len); -} -EXPORT_SYMBOL(nfc_set_remote_general_bytes); - -u8 *nfc_get_local_general_bytes(struct nfc_dev *dev, u8 *gt_len) -{ - return nfc_llcp_general_bytes(dev, gt_len); -} -EXPORT_SYMBOL(nfc_get_local_general_bytes); - /** - * nfc_alloc_send_skb - allocate a skb for data exchange responses + * nfc_alloc_skb - allocate a skb for data exchange responses * * @size: size to allocate * @gfp: gfp flags */ -struct sk_buff *nfc_alloc_send_skb(struct nfc_dev *dev, struct sock *sk, - unsigned int flags, unsigned int size, - unsigned int *err) -{ - struct sk_buff *skb; - unsigned int total_size; - - total_size = size + - dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE; - - skb = sock_alloc_send_skb(sk, total_size, flags & MSG_DONTWAIT, err); - if (skb) - skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE); - - return skb; -} - -/** - * nfc_alloc_recv_skb - allocate a skb for data exchange responses - * - * @size: size to allocate - * @gfp: gfp flags - */ -struct sk_buff *nfc_alloc_recv_skb(unsigned int size, gfp_t gfp) +struct sk_buff *nfc_alloc_skb(unsigned int size, gfp_t gfp) { struct sk_buff *skb; unsigned int total_size; @@ -415,7 +308,7 @@ struct sk_buff *nfc_alloc_recv_skb(unsigned int size, gfp_t gfp) return skb; } -EXPORT_SYMBOL(nfc_alloc_recv_skb); +EXPORT_SYMBOL(nfc_alloc_skb); /** * nfc_targets_found - inform that targets were found @@ -433,7 +326,7 @@ int nfc_targets_found(struct nfc_dev *dev, struct nfc_target *targets, { int i; - pr_debug("dev_name=%s n_targets=%d\n", dev_name(&dev->dev), n_targets); + nfc_dbg("dev_name=%s n_targets=%d", dev_name(&dev->dev), n_targets); dev->polling = false; @@ -467,7 +360,7 @@ static void nfc_release(struct device *d) { struct nfc_dev *dev = to_nfc_dev(d); - pr_debug("dev_name=%s\n", dev_name(&dev->dev)); + nfc_dbg("dev_name=%s", dev_name(&dev->dev)); nfc_genl_data_exit(&dev->genl_data); kfree(dev->targets); @@ -553,7 +446,7 @@ int nfc_register_device(struct nfc_dev *dev) { int rc; - pr_debug("dev_name=%s\n", dev_name(&dev->dev)); + nfc_dbg("dev_name=%s", dev_name(&dev->dev)); mutex_lock(&nfc_devlist_mutex); nfc_devlist_generation++; @@ -563,14 +456,11 @@ int nfc_register_device(struct nfc_dev *dev) if (rc < 0) return rc; - rc = nfc_llcp_register_device(dev); - if (rc) - pr_err("Could not register llcp device\n"); - rc = nfc_genl_device_added(dev); if (rc) - pr_debug("The userspace won't be notified that the device %s was added\n", - dev_name(&dev->dev)); + nfc_dbg("The userspace won't be notified that the device %s was" + " added", dev_name(&dev->dev)); + return 0; } @@ -585,7 +475,7 @@ void nfc_unregister_device(struct nfc_dev *dev) { int rc; - pr_debug("dev_name=%s\n", dev_name(&dev->dev)); + nfc_dbg("dev_name=%s", dev_name(&dev->dev)); mutex_lock(&nfc_devlist_mutex); nfc_devlist_generation++; @@ -598,12 +488,10 @@ void nfc_unregister_device(struct nfc_dev *dev) mutex_unlock(&nfc_devlist_mutex); - nfc_llcp_unregister_device(dev); - rc = nfc_genl_device_removed(dev); if (rc) - pr_debug("The userspace won't be notified that the device %s was removed\n", - dev_name(&dev->dev)); + nfc_dbg("The userspace won't be notified that the device %s" + " was removed", dev_name(&dev->dev)); } EXPORT_SYMBOL(nfc_unregister_device); @@ -612,7 +500,7 @@ static int __init nfc_init(void) { int rc; - pr_info("NFC Core ver %s\n", VERSION); + nfc_info("NFC Core ver %s", VERSION); rc = class_register(&nfc_class); if (rc) @@ -629,10 +517,6 @@ static int __init nfc_init(void) if (rc) goto err_rawsock; - rc = nfc_llcp_init(); - if (rc) - goto err_llcp_sock; - rc = af_nfc_init(); if (rc) goto err_af_nfc; @@ -640,8 +524,6 @@ static int __init nfc_init(void) return 0; err_af_nfc: - nfc_llcp_exit(); -err_llcp_sock: rawsock_exit(); err_rawsock: nfc_genl_exit(); @@ -653,7 +535,6 @@ static int __init nfc_init(void) static void __exit nfc_exit(void) { af_nfc_exit(); - nfc_llcp_exit(); rawsock_exit(); nfc_genl_exit(); class_unregister(&nfc_class); diff --git a/trunk/net/nfc/llcp/Kconfig b/trunk/net/nfc/llcp/Kconfig deleted file mode 100644 index fbf5e8150908..000000000000 --- a/trunk/net/nfc/llcp/Kconfig +++ /dev/null @@ -1,7 +0,0 @@ -config NFC_LLCP - depends on NFC && EXPERIMENTAL - bool "NFC LLCP support (EXPERIMENTAL)" - default n - help - Say Y here if you want to build support for a kernel NFC LLCP - implementation. \ No newline at end of file diff --git a/trunk/net/nfc/llcp/commands.c b/trunk/net/nfc/llcp/commands.c deleted file mode 100644 index 151f2ef429c4..000000000000 --- a/trunk/net/nfc/llcp/commands.c +++ /dev/null @@ -1,399 +0,0 @@ -/* - * Copyright (C) 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the - * Free Software Foundation, Inc., - * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - */ - -#define pr_fmt(fmt) "llcp: %s: " fmt, __func__ - -#include -#include -#include -#include - -#include - -#include "../nfc.h" -#include "llcp.h" - -static u8 llcp_tlv_length[LLCP_TLV_MAX] = { - 0, - 1, /* VERSION */ - 2, /* MIUX */ - 2, /* WKS */ - 1, /* LTO */ - 1, /* RW */ - 0, /* SN */ - 1, /* OPT */ - 0, /* SDREQ */ - 2, /* SDRES */ - -}; - -static u8 llcp_tlv8(u8 *tlv, u8 type) -{ - if (tlv[0] != type || tlv[1] != llcp_tlv_length[tlv[0]]) - return 0; - - return tlv[2]; -} - -static u8 llcp_tlv16(u8 *tlv, u8 type) -{ - if (tlv[0] != type || tlv[1] != llcp_tlv_length[tlv[0]]) - return 0; - - return be16_to_cpu(*((__be16 *)(tlv + 2))); -} - - -static u8 llcp_tlv_version(u8 *tlv) -{ - return llcp_tlv8(tlv, LLCP_TLV_VERSION); -} - -static u16 llcp_tlv_miux(u8 *tlv) -{ - return llcp_tlv16(tlv, LLCP_TLV_MIUX) & 0x7f; -} - -static u16 llcp_tlv_wks(u8 *tlv) -{ - return llcp_tlv16(tlv, LLCP_TLV_WKS); -} - -static u16 llcp_tlv_lto(u8 *tlv) -{ - return llcp_tlv8(tlv, LLCP_TLV_LTO); -} - -static u8 llcp_tlv_opt(u8 *tlv) -{ - return llcp_tlv8(tlv, LLCP_TLV_OPT); -} - -static u8 llcp_tlv_rw(u8 *tlv) -{ - return llcp_tlv8(tlv, LLCP_TLV_RW) & 0xf; -} - -u8 *nfc_llcp_build_tlv(u8 type, u8 *value, u8 value_length, u8 *tlv_length) -{ - u8 *tlv, length; - - pr_debug("type %d\n", type); - - if (type >= LLCP_TLV_MAX) - return NULL; - - length = llcp_tlv_length[type]; - if (length == 0 && value_length == 0) - return NULL; - else - length = value_length; - - *tlv_length = 2 + length; - tlv = kzalloc(2 + length, GFP_KERNEL); - if (tlv == NULL) - return tlv; - - tlv[0] = type; - tlv[1] = length; - memcpy(tlv + 2, value, length); - - return tlv; -} - -int nfc_llcp_parse_tlv(struct nfc_llcp_local *local, - u8 *tlv_array, u16 tlv_array_len) -{ - u8 *tlv = tlv_array, type, length, offset = 0; - - pr_debug("TLV array length %d\n", tlv_array_len); - - if (local == NULL) - return -ENODEV; - - while (offset < tlv_array_len) { - type = tlv[0]; - length = tlv[1]; - - pr_debug("type 0x%x length %d\n", type, length); - - switch (type) { - case LLCP_TLV_VERSION: - local->remote_version = llcp_tlv_version(tlv); - break; - case LLCP_TLV_MIUX: - local->remote_miu = llcp_tlv_miux(tlv) + 128; - break; - case LLCP_TLV_WKS: - local->remote_wks = llcp_tlv_wks(tlv); - break; - case LLCP_TLV_LTO: - local->remote_lto = llcp_tlv_lto(tlv) * 10; - break; - case LLCP_TLV_OPT: - local->remote_opt = llcp_tlv_opt(tlv); - break; - case LLCP_TLV_RW: - local->remote_rw = llcp_tlv_rw(tlv); - break; - default: - pr_err("Invalid gt tlv value 0x%x\n", type); - break; - } - - offset += length + 2; - tlv += length + 2; - } - - pr_debug("version 0x%x miu %d lto %d opt 0x%x wks 0x%x rw %d\n", - local->remote_version, local->remote_miu, - local->remote_lto, local->remote_opt, - local->remote_wks, local->remote_rw); - - return 0; -} - -static struct sk_buff *llcp_add_header(struct sk_buff *pdu, - u8 dsap, u8 ssap, u8 ptype) -{ - u8 header[2]; - - pr_debug("ptype 0x%x dsap 0x%x ssap 0x%x\n", ptype, dsap, ssap); - - header[0] = (u8)((dsap << 2) | (ptype >> 2)); - header[1] = (u8)((ptype << 6) | ssap); - - pr_debug("header 0x%x 0x%x\n", header[0], header[1]); - - memcpy(skb_put(pdu, LLCP_HEADER_SIZE), header, LLCP_HEADER_SIZE); - - return pdu; -} - -static struct sk_buff *llcp_add_tlv(struct sk_buff *pdu, u8 *tlv, u8 tlv_length) -{ - /* XXX Add an skb length check */ - - if (tlv == NULL) - return NULL; - - memcpy(skb_put(pdu, tlv_length), tlv, tlv_length); - - return pdu; -} - -static struct sk_buff *llcp_allocate_pdu(struct nfc_llcp_sock *sock, - u8 cmd, u16 size) -{ - struct sk_buff *skb; - int err; - - if (sock->ssap == 0) - return NULL; - - skb = nfc_alloc_send_skb(sock->dev, &sock->sk, MSG_DONTWAIT, - size + LLCP_HEADER_SIZE, &err); - if (skb == NULL) { - pr_err("Could not allocate PDU\n"); - return NULL; - } - - skb = llcp_add_header(skb, sock->dsap, sock->ssap, cmd); - - return skb; -} - -int nfc_llcp_disconnect(struct nfc_llcp_sock *sock) -{ - struct sk_buff *skb; - struct nfc_dev *dev; - struct nfc_llcp_local *local; - u16 size = 0; - - pr_debug("Sending DISC\n"); - - local = sock->local; - if (local == NULL) - return -ENODEV; - - dev = sock->dev; - if (dev == NULL) - return -ENODEV; - - size += LLCP_HEADER_SIZE; - size += dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE; - - skb = alloc_skb(size, GFP_ATOMIC); - if (skb == NULL) - return -ENOMEM; - - skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE); - - skb = llcp_add_header(skb, sock->ssap, sock->dsap, LLCP_PDU_DISC); - - skb_queue_tail(&local->tx_queue, skb); - - return 0; -} - -int nfc_llcp_send_symm(struct nfc_dev *dev) -{ - struct sk_buff *skb; - struct nfc_llcp_local *local; - u16 size = 0; - - pr_debug("Sending SYMM\n"); - - local = nfc_llcp_find_local(dev); - if (local == NULL) - return -ENODEV; - - size += LLCP_HEADER_SIZE; - size += dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE; - - skb = alloc_skb(size, GFP_KERNEL); - if (skb == NULL) - return -ENOMEM; - - skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE); - - skb = llcp_add_header(skb, 0, 0, LLCP_PDU_SYMM); - - return nfc_data_exchange(dev, local->target_idx, skb, - nfc_llcp_recv, local); -} - -int nfc_llcp_send_connect(struct nfc_llcp_sock *sock) -{ - struct nfc_llcp_local *local; - struct sk_buff *skb; - u8 *service_name_tlv = NULL, service_name_tlv_length; - int err; - u16 size = 0; - - pr_debug("Sending CONNECT\n"); - - local = sock->local; - if (local == NULL) - return -ENODEV; - - if (sock->service_name != NULL) { - service_name_tlv = nfc_llcp_build_tlv(LLCP_TLV_SN, - sock->service_name, - sock->service_name_len, - &service_name_tlv_length); - size += service_name_tlv_length; - } - - pr_debug("SKB size %d SN length %zu\n", size, sock->service_name_len); - - skb = llcp_allocate_pdu(sock, LLCP_PDU_CONNECT, size); - if (skb == NULL) { - err = -ENOMEM; - goto error_tlv; - } - - if (service_name_tlv != NULL) - skb = llcp_add_tlv(skb, service_name_tlv, - service_name_tlv_length); - - skb_queue_tail(&local->tx_queue, skb); - - return 0; - -error_tlv: - pr_err("error %d\n", err); - - kfree(service_name_tlv); - - return err; -} - -int nfc_llcp_send_cc(struct nfc_llcp_sock *sock) -{ - struct nfc_llcp_local *local; - struct sk_buff *skb; - - pr_debug("Sending CC\n"); - - local = sock->local; - if (local == NULL) - return -ENODEV; - - skb = llcp_allocate_pdu(sock, LLCP_PDU_CC, 0); - if (skb == NULL) - return -ENOMEM; - - skb_queue_tail(&local->tx_queue, skb); - - return 0; -} - -int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason) -{ - struct sk_buff *skb; - struct nfc_dev *dev; - u16 size = 1; /* Reason code */ - - pr_debug("Sending DM reason 0x%x\n", reason); - - if (local == NULL) - return -ENODEV; - - dev = local->dev; - if (dev == NULL) - return -ENODEV; - - size += LLCP_HEADER_SIZE; - size += dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE; - - skb = alloc_skb(size, GFP_KERNEL); - if (skb == NULL) - return -ENOMEM; - - skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE); - - skb = llcp_add_header(skb, ssap, dsap, LLCP_PDU_DM); - - memcpy(skb_put(skb, 1), &reason, 1); - - skb_queue_head(&local->tx_queue, skb); - - return 0; -} - -int nfc_llcp_send_disconnect(struct nfc_llcp_sock *sock) -{ - struct sk_buff *skb; - struct nfc_llcp_local *local; - - pr_debug("Send DISC\n"); - - local = sock->local; - if (local == NULL) - return -ENODEV; - - skb = llcp_allocate_pdu(sock, LLCP_PDU_DISC, 0); - if (skb == NULL) - return -ENOMEM; - - skb_queue_head(&local->tx_queue, skb); - - return 0; -} diff --git a/trunk/net/nfc/llcp/llcp.c b/trunk/net/nfc/llcp/llcp.c deleted file mode 100644 index 1d32680807d6..000000000000 --- a/trunk/net/nfc/llcp/llcp.c +++ /dev/null @@ -1,971 +0,0 @@ -/* - * Copyright (C) 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the - * Free Software Foundation, Inc., - * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - */ - -#define pr_fmt(fmt) "llcp: %s: " fmt, __func__ - -#include -#include -#include -#include - -#include "../nfc.h" -#include "llcp.h" - -static u8 llcp_magic[3] = {0x46, 0x66, 0x6d}; - -static struct list_head llcp_devices; - -static void nfc_llcp_socket_release(struct nfc_llcp_local *local) -{ - struct nfc_llcp_sock *parent, *s, *n; - struct sock *sk, *parent_sk; - int i; - - - mutex_lock(&local->socket_lock); - - for (i = 0; i < LLCP_MAX_SAP; i++) { - parent = local->sockets[i]; - if (parent == NULL) - continue; - - /* Release all child sockets */ - list_for_each_entry_safe(s, n, &parent->list, list) { - list_del(&s->list); - sk = &s->sk; - - lock_sock(sk); - - if (sk->sk_state == LLCP_CONNECTED) - nfc_put_device(s->dev); - - sk->sk_state = LLCP_CLOSED; - sock_set_flag(sk, SOCK_DEAD); - - release_sock(sk); - } - - parent_sk = &parent->sk; - - lock_sock(parent_sk); - - if (parent_sk->sk_state == LLCP_LISTEN) { - struct nfc_llcp_sock *lsk, *n; - struct sock *accept_sk; - - list_for_each_entry_safe(lsk, n, &parent->accept_queue, - accept_queue) { - accept_sk = &lsk->sk; - lock_sock(accept_sk); - - nfc_llcp_accept_unlink(accept_sk); - - accept_sk->sk_state = LLCP_CLOSED; - sock_set_flag(accept_sk, SOCK_DEAD); - - release_sock(accept_sk); - - sock_orphan(accept_sk); - } - } - - if (parent_sk->sk_state == LLCP_CONNECTED) - nfc_put_device(parent->dev); - - parent_sk->sk_state = LLCP_CLOSED; - sock_set_flag(parent_sk, SOCK_DEAD); - - release_sock(parent_sk); - } - - mutex_unlock(&local->socket_lock); -} - -static void nfc_llcp_timeout_work(struct work_struct *work) -{ - struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local, - timeout_work); - - nfc_dep_link_down(local->dev); -} - -static void nfc_llcp_symm_timer(unsigned long data) -{ - struct nfc_llcp_local *local = (struct nfc_llcp_local *) data; - - pr_err("SYMM timeout\n"); - - queue_work(local->timeout_wq, &local->timeout_work); -} - -struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev) -{ - struct nfc_llcp_local *local, *n; - - list_for_each_entry_safe(local, n, &llcp_devices, list) - if (local->dev == dev) - return local; - - pr_debug("No device found\n"); - - return NULL; -} - -static char *wks[] = { - NULL, - NULL, /* SDP */ - "urn:nfc:sn:ip", - "urn:nfc:sn:obex", - "urn:nfc:sn:snep", -}; - -static int nfc_llcp_wks_sap(char *service_name, size_t service_name_len) -{ - int sap, num_wks; - - pr_debug("%s\n", service_name); - - if (service_name == NULL) - return -EINVAL; - - num_wks = ARRAY_SIZE(wks); - - for (sap = 0 ; sap < num_wks; sap++) { - if (wks[sap] == NULL) - continue; - - if (strncmp(wks[sap], service_name, service_name_len) == 0) - return sap; - } - - return -EINVAL; -} - -u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local, - struct nfc_llcp_sock *sock) -{ - mutex_lock(&local->sdp_lock); - - if (sock->service_name != NULL && sock->service_name_len > 0) { - int ssap = nfc_llcp_wks_sap(sock->service_name, - sock->service_name_len); - - if (ssap > 0) { - pr_debug("WKS %d\n", ssap); - - /* This is a WKS, let's check if it's free */ - if (local->local_wks & BIT(ssap)) { - mutex_unlock(&local->sdp_lock); - - return LLCP_SAP_MAX; - } - - set_bit(BIT(ssap), &local->local_wks); - mutex_unlock(&local->sdp_lock); - - return ssap; - } - - /* - * This is not a well known service, - * we should try to find a local SDP free spot - */ - ssap = find_first_zero_bit(&local->local_sdp, LLCP_SDP_NUM_SAP); - if (ssap == LLCP_SDP_NUM_SAP) { - mutex_unlock(&local->sdp_lock); - - return LLCP_SAP_MAX; - } - - pr_debug("SDP ssap %d\n", LLCP_WKS_NUM_SAP + ssap); - - set_bit(BIT(ssap), &local->local_sdp); - mutex_unlock(&local->sdp_lock); - - return LLCP_WKS_NUM_SAP + ssap; - - } else if (sock->ssap != 0) { - if (sock->ssap < LLCP_WKS_NUM_SAP) { - if (!(local->local_wks & BIT(sock->ssap))) { - set_bit(BIT(sock->ssap), &local->local_wks); - mutex_unlock(&local->sdp_lock); - - return sock->ssap; - } - - } else if (sock->ssap < LLCP_SDP_NUM_SAP) { - if (!(local->local_sdp & - BIT(sock->ssap - LLCP_WKS_NUM_SAP))) { - set_bit(BIT(sock->ssap - LLCP_WKS_NUM_SAP), - &local->local_sdp); - mutex_unlock(&local->sdp_lock); - - return sock->ssap; - } - } - } - - mutex_unlock(&local->sdp_lock); - - return LLCP_SAP_MAX; -} - -u8 nfc_llcp_get_local_ssap(struct nfc_llcp_local *local) -{ - u8 local_ssap; - - mutex_lock(&local->sdp_lock); - - local_ssap = find_first_zero_bit(&local->local_sap, LLCP_LOCAL_NUM_SAP); - if (local_ssap == LLCP_LOCAL_NUM_SAP) { - mutex_unlock(&local->sdp_lock); - return LLCP_SAP_MAX; - } - - set_bit(BIT(local_ssap), &local->local_sap); - - mutex_unlock(&local->sdp_lock); - - return local_ssap + LLCP_LOCAL_SAP_OFFSET; -} - -void nfc_llcp_put_ssap(struct nfc_llcp_local *local, u8 ssap) -{ - u8 local_ssap; - unsigned long *sdp; - - if (ssap < LLCP_WKS_NUM_SAP) { - local_ssap = ssap; - sdp = &local->local_wks; - } else if (ssap < LLCP_LOCAL_NUM_SAP) { - local_ssap = ssap - LLCP_WKS_NUM_SAP; - sdp = &local->local_sdp; - } else if (ssap < LLCP_MAX_SAP) { - local_ssap = ssap - LLCP_LOCAL_NUM_SAP; - sdp = &local->local_sap; - } else { - return; - } - - mutex_lock(&local->sdp_lock); - - clear_bit(1 << local_ssap, sdp); - - mutex_unlock(&local->sdp_lock); -} - -u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, u8 *general_bytes_len) -{ - struct nfc_llcp_local *local; - - local = nfc_llcp_find_local(dev); - if (local == NULL) { - *general_bytes_len = 0; - return NULL; - } - - *general_bytes_len = local->gb_len; - - return local->gb; -} - -static int nfc_llcp_build_gb(struct nfc_llcp_local *local) -{ - u8 *gb_cur, *version_tlv, version, version_length; - u8 *lto_tlv, lto, lto_length; - u8 *wks_tlv, wks_length; - u8 gb_len = 0; - - version = LLCP_VERSION_11; - version_tlv = nfc_llcp_build_tlv(LLCP_TLV_VERSION, &version, - 1, &version_length); - gb_len += version_length; - - /* 1500 ms */ - lto = 150; - lto_tlv = nfc_llcp_build_tlv(LLCP_TLV_VERSION, <o, 1, <o_length); - gb_len += lto_length; - - pr_debug("Local wks 0x%lx\n", local->local_wks); - wks_tlv = nfc_llcp_build_tlv(LLCP_TLV_WKS, (u8 *)&local->local_wks, 2, - &wks_length); - gb_len += wks_length; - - gb_len += ARRAY_SIZE(llcp_magic); - - if (gb_len > NFC_MAX_GT_LEN) { - kfree(version_tlv); - return -EINVAL; - } - - gb_cur = local->gb; - - memcpy(gb_cur, llcp_magic, ARRAY_SIZE(llcp_magic)); - gb_cur += ARRAY_SIZE(llcp_magic); - - memcpy(gb_cur, version_tlv, version_length); - gb_cur += version_length; - - memcpy(gb_cur, lto_tlv, lto_length); - gb_cur += lto_length; - - memcpy(gb_cur, wks_tlv, wks_length); - gb_cur += wks_length; - - kfree(version_tlv); - kfree(lto_tlv); - - local->gb_len = gb_len; - - return 0; -} - -int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len) -{ - struct nfc_llcp_local *local = nfc_llcp_find_local(dev); - - if (local == NULL) { - pr_err("No LLCP device\n"); - return -ENODEV; - } - - memset(local->remote_gb, 0, NFC_MAX_GT_LEN); - memcpy(local->remote_gb, gb, gb_len); - local->remote_gb_len = gb_len; - - if (local->remote_gb == NULL || - local->remote_gb_len == 0) - return -ENODEV; - - if (memcmp(local->remote_gb, llcp_magic, 3)) { - pr_err("MAC does not support LLCP\n"); - return -EINVAL; - } - - return nfc_llcp_parse_tlv(local, - &local->remote_gb[3], local->remote_gb_len - 3); -} - -static void nfc_llcp_tx_work(struct work_struct *work) -{ - struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local, - tx_work); - struct sk_buff *skb; - - skb = skb_dequeue(&local->tx_queue); - if (skb != NULL) { - pr_debug("Sending pending skb\n"); - nfc_data_exchange(local->dev, local->target_idx, - skb, nfc_llcp_recv, local); - } else { - nfc_llcp_send_symm(local->dev); - } - - mod_timer(&local->link_timer, - jiffies + msecs_to_jiffies(local->remote_lto)); -} - -static u8 nfc_llcp_dsap(struct sk_buff *pdu) -{ - return (pdu->data[0] & 0xfc) >> 2; -} - -static u8 nfc_llcp_ptype(struct sk_buff *pdu) -{ - return ((pdu->data[0] & 0x03) << 2) | ((pdu->data[1] & 0xc0) >> 6); -} - -static u8 nfc_llcp_ssap(struct sk_buff *pdu) -{ - return pdu->data[1] & 0x3f; -} - -static u8 nfc_llcp_ns(struct sk_buff *pdu) -{ - return pdu->data[2] >> 4; -} - -static u8 nfc_llcp_nr(struct sk_buff *pdu) -{ - return pdu->data[2] & 0xf; -} - -static void nfc_llcp_set_nrns(struct nfc_llcp_sock *sock, struct sk_buff *pdu) -{ - pdu->data[2] = (sock->send_n << 4) | ((sock->recv_n - 1) % 16); - sock->send_n = (sock->send_n + 1) % 16; - sock->recv_ack_n = (sock->recv_n - 1) % 16; -} - -static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local, - u8 ssap, u8 dsap) -{ - struct nfc_llcp_sock *sock, *llcp_sock, *n; - - if (ssap == 0 && dsap == 0) - return NULL; - - mutex_lock(&local->socket_lock); - sock = local->sockets[ssap]; - if (sock == NULL) { - mutex_unlock(&local->socket_lock); - return NULL; - } - - pr_debug("root dsap %d (%d)\n", sock->dsap, dsap); - - if (sock->dsap == dsap) { - sock_hold(&sock->sk); - mutex_unlock(&local->socket_lock); - return sock; - } - - list_for_each_entry_safe(llcp_sock, n, &sock->list, list) { - pr_debug("llcp_sock %p sk %p dsap %d\n", llcp_sock, - &llcp_sock->sk, llcp_sock->dsap); - if (llcp_sock->dsap == dsap) { - sock_hold(&llcp_sock->sk); - mutex_unlock(&local->socket_lock); - return llcp_sock; - } - } - - pr_err("Could not find socket for %d %d\n", ssap, dsap); - - mutex_unlock(&local->socket_lock); - - return NULL; -} - -static void nfc_llcp_sock_put(struct nfc_llcp_sock *sock) -{ - sock_put(&sock->sk); -} - -static u8 *nfc_llcp_connect_sn(struct sk_buff *skb, size_t *sn_len) -{ - u8 *tlv = &skb->data[2], type, length; - size_t tlv_array_len = skb->len - LLCP_HEADER_SIZE, offset = 0; - - while (offset < tlv_array_len) { - type = tlv[0]; - length = tlv[1]; - - pr_debug("type 0x%x length %d\n", type, length); - - if (type == LLCP_TLV_SN) { - *sn_len = length; - return &tlv[2]; - } - - offset += length + 2; - tlv += length + 2; - } - - return NULL; -} - -static void nfc_llcp_recv_connect(struct nfc_llcp_local *local, - struct sk_buff *skb) -{ - struct sock *new_sk, *parent; - struct nfc_llcp_sock *sock, *new_sock; - u8 dsap, ssap, bound_sap, reason; - - dsap = nfc_llcp_dsap(skb); - ssap = nfc_llcp_ssap(skb); - - pr_debug("%d %d\n", dsap, ssap); - - nfc_llcp_parse_tlv(local, &skb->data[LLCP_HEADER_SIZE], - skb->len - LLCP_HEADER_SIZE); - - if (dsap != LLCP_SAP_SDP) { - bound_sap = dsap; - - mutex_lock(&local->socket_lock); - sock = local->sockets[dsap]; - if (sock == NULL) { - mutex_unlock(&local->socket_lock); - reason = LLCP_DM_NOBOUND; - goto fail; - } - - sock_hold(&sock->sk); - mutex_unlock(&local->socket_lock); - - lock_sock(&sock->sk); - - if (sock->dsap == LLCP_SAP_SDP && - sock->sk.sk_state == LLCP_LISTEN) - goto enqueue; - } else { - u8 *sn; - size_t sn_len; - - sn = nfc_llcp_connect_sn(skb, &sn_len); - if (sn == NULL) { - reason = LLCP_DM_NOBOUND; - goto fail; - } - - pr_debug("Service name length %zu\n", sn_len); - - mutex_lock(&local->socket_lock); - for (bound_sap = 0; bound_sap < LLCP_LOCAL_SAP_OFFSET; - bound_sap++) { - sock = local->sockets[bound_sap]; - if (sock == NULL) - continue; - - if (sock->service_name == NULL || - sock->service_name_len == 0) - continue; - - if (sock->service_name_len != sn_len) - continue; - - if (sock->dsap == LLCP_SAP_SDP && - sock->sk.sk_state == LLCP_LISTEN && - !memcmp(sn, sock->service_name, sn_len)) { - pr_debug("Found service name at SAP %d\n", - bound_sap); - sock_hold(&sock->sk); - mutex_unlock(&local->socket_lock); - - lock_sock(&sock->sk); - - goto enqueue; - } - } - mutex_unlock(&local->socket_lock); - } - - reason = LLCP_DM_NOBOUND; - goto fail; - -enqueue: - parent = &sock->sk; - - if (sk_acceptq_is_full(parent)) { - reason = LLCP_DM_REJ; - release_sock(&sock->sk); - sock_put(&sock->sk); - goto fail; - } - - new_sk = nfc_llcp_sock_alloc(NULL, parent->sk_type, - GFP_ATOMIC); - if (new_sk == NULL) { - reason = LLCP_DM_REJ; - release_sock(&sock->sk); - sock_put(&sock->sk); - goto fail; - } - - new_sock = nfc_llcp_sock(new_sk); - new_sock->dev = local->dev; - new_sock->local = local; - new_sock->nfc_protocol = sock->nfc_protocol; - new_sock->ssap = bound_sap; - new_sock->dsap = ssap; - new_sock->parent = parent; - - pr_debug("new sock %p sk %p\n", new_sock, &new_sock->sk); - - list_add_tail(&new_sock->list, &sock->list); - - nfc_llcp_accept_enqueue(&sock->sk, new_sk); - - nfc_get_device(local->dev->idx); - - new_sk->sk_state = LLCP_CONNECTED; - - /* Wake the listening processes */ - parent->sk_data_ready(parent, 0); - - /* Send CC */ - nfc_llcp_send_cc(new_sock); - - release_sock(&sock->sk); - sock_put(&sock->sk); - - return; - -fail: - /* Send DM */ - nfc_llcp_send_dm(local, dsap, ssap, reason); - - return; - -} - -static void nfc_llcp_recv_hdlc(struct nfc_llcp_local *local, - struct sk_buff *skb) -{ - struct nfc_llcp_sock *llcp_sock; - struct sock *sk; - u8 dsap, ssap, ptype, ns, nr; - - ptype = nfc_llcp_ptype(skb); - dsap = nfc_llcp_dsap(skb); - ssap = nfc_llcp_ssap(skb); - ns = nfc_llcp_ns(skb); - nr = nfc_llcp_nr(skb); - - pr_debug("%d %d R %d S %d\n", dsap, ssap, nr, ns); - - llcp_sock = nfc_llcp_sock_get(local, dsap, ssap); - if (llcp_sock == NULL) { - nfc_llcp_send_dm(local, dsap, ssap, LLCP_DM_NOCONN); - return; - } - - sk = &llcp_sock->sk; - lock_sock(sk); - if (sk->sk_state == LLCP_CLOSED) { - release_sock(sk); - nfc_llcp_sock_put(llcp_sock); - } - - if (ns == llcp_sock->recv_n) - llcp_sock->recv_n = (llcp_sock->recv_n + 1) % 16; - else - pr_err("Received out of sequence I PDU\n"); - - /* Pass the payload upstream */ - if (ptype == LLCP_PDU_I) { - pr_debug("I frame, queueing on %p\n", &llcp_sock->sk); - - skb_pull(skb, LLCP_HEADER_SIZE + LLCP_SEQUENCE_SIZE); - if (sock_queue_rcv_skb(&llcp_sock->sk, skb)) { - pr_err("receive queue is full\n"); - skb_queue_head(&llcp_sock->tx_backlog_queue, skb); - } - } - - /* Remove skbs from the pending queue */ - if (llcp_sock->send_ack_n != nr) { - struct sk_buff *s, *tmp; - - llcp_sock->send_ack_n = nr; - - skb_queue_walk_safe(&llcp_sock->tx_pending_queue, s, tmp) - if (nfc_llcp_ns(s) <= nr) { - skb_unlink(s, &llcp_sock->tx_pending_queue); - kfree_skb(s); - } - } - - /* Queue some I frames for transmission */ - while (llcp_sock->remote_ready && - skb_queue_len(&llcp_sock->tx_pending_queue) <= local->remote_rw) { - struct sk_buff *pdu, *pending_pdu; - - pdu = skb_dequeue(&llcp_sock->tx_queue); - if (pdu == NULL) - break; - - /* Update N(S)/N(R) */ - nfc_llcp_set_nrns(llcp_sock, pdu); - - pending_pdu = skb_clone(pdu, GFP_KERNEL); - - skb_queue_tail(&local->tx_queue, pdu); - skb_queue_tail(&llcp_sock->tx_pending_queue, pending_pdu); - } - - release_sock(sk); - nfc_llcp_sock_put(llcp_sock); -} - -static void nfc_llcp_recv_disc(struct nfc_llcp_local *local, - struct sk_buff *skb) -{ - struct nfc_llcp_sock *llcp_sock; - struct sock *sk; - u8 dsap, ssap; - - dsap = nfc_llcp_dsap(skb); - ssap = nfc_llcp_ssap(skb); - - llcp_sock = nfc_llcp_sock_get(local, dsap, ssap); - if (llcp_sock == NULL) { - nfc_llcp_send_dm(local, dsap, ssap, LLCP_DM_NOCONN); - return; - } - - sk = &llcp_sock->sk; - lock_sock(sk); - if (sk->sk_state == LLCP_CLOSED) { - release_sock(sk); - nfc_llcp_sock_put(llcp_sock); - } - - - if (sk->sk_state == LLCP_CONNECTED) { - nfc_put_device(local->dev); - sk->sk_state = LLCP_CLOSED; - sk->sk_state_change(sk); - } - - nfc_llcp_send_dm(local, dsap, ssap, LLCP_DM_DISC); - - release_sock(sk); - nfc_llcp_sock_put(llcp_sock); -} - -static void nfc_llcp_recv_cc(struct nfc_llcp_local *local, - struct sk_buff *skb) -{ - struct nfc_llcp_sock *llcp_sock; - u8 dsap, ssap; - - - dsap = nfc_llcp_dsap(skb); - ssap = nfc_llcp_ssap(skb); - - llcp_sock = nfc_llcp_sock_get(local, dsap, ssap); - - if (llcp_sock == NULL) - llcp_sock = nfc_llcp_sock_get(local, dsap, LLCP_SAP_SDP); - - if (llcp_sock == NULL) { - pr_err("Invalid CC\n"); - nfc_llcp_send_dm(local, dsap, ssap, LLCP_DM_NOCONN); - - return; - } - - llcp_sock->dsap = ssap; - - nfc_llcp_parse_tlv(local, &skb->data[LLCP_HEADER_SIZE], - skb->len - LLCP_HEADER_SIZE); - - nfc_llcp_sock_put(llcp_sock); -} - -static void nfc_llcp_rx_work(struct work_struct *work) -{ - struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local, - rx_work); - u8 dsap, ssap, ptype; - struct sk_buff *skb; - - skb = local->rx_pending; - if (skb == NULL) { - pr_debug("No pending SKB\n"); - return; - } - - ptype = nfc_llcp_ptype(skb); - dsap = nfc_llcp_dsap(skb); - ssap = nfc_llcp_ssap(skb); - - pr_debug("ptype 0x%x dsap 0x%x ssap 0x%x\n", ptype, dsap, ssap); - - switch (ptype) { - case LLCP_PDU_SYMM: - pr_debug("SYMM\n"); - break; - - case LLCP_PDU_CONNECT: - pr_debug("CONNECT\n"); - nfc_llcp_recv_connect(local, skb); - break; - - case LLCP_PDU_DISC: - pr_debug("DISC\n"); - nfc_llcp_recv_disc(local, skb); - break; - - case LLCP_PDU_CC: - pr_debug("CC\n"); - nfc_llcp_recv_cc(local, skb); - break; - - case LLCP_PDU_I: - case LLCP_PDU_RR: - pr_debug("I frame\n"); - nfc_llcp_recv_hdlc(local, skb); - break; - - } - - queue_work(local->tx_wq, &local->tx_work); - kfree_skb(local->rx_pending); - local->rx_pending = NULL; - - return; -} - -void nfc_llcp_recv(void *data, struct sk_buff *skb, int err) -{ - struct nfc_llcp_local *local = (struct nfc_llcp_local *) data; - - pr_debug("Received an LLCP PDU\n"); - if (err < 0) { - pr_err("err %d", err); - return; - } - - local->rx_pending = skb_get(skb); - del_timer(&local->link_timer); - queue_work(local->rx_wq, &local->rx_work); - - return; -} - -void nfc_llcp_mac_is_down(struct nfc_dev *dev) -{ - struct nfc_llcp_local *local; - - local = nfc_llcp_find_local(dev); - if (local == NULL) - return; - - /* Close and purge all existing sockets */ - nfc_llcp_socket_release(local); -} - -void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx, - u8 comm_mode, u8 rf_mode) -{ - struct nfc_llcp_local *local; - - pr_debug("rf mode %d\n", rf_mode); - - local = nfc_llcp_find_local(dev); - if (local == NULL) - return; - - local->target_idx = target_idx; - local->comm_mode = comm_mode; - local->rf_mode = rf_mode; - - if (rf_mode == NFC_RF_INITIATOR) { - pr_debug("Queueing Tx work\n"); - - queue_work(local->tx_wq, &local->tx_work); - } else { - mod_timer(&local->link_timer, - jiffies + msecs_to_jiffies(local->remote_lto)); - } -} - -int nfc_llcp_register_device(struct nfc_dev *ndev) -{ - struct device *dev = &ndev->dev; - struct nfc_llcp_local *local; - char name[32]; - int err; - - local = kzalloc(sizeof(struct nfc_llcp_local), GFP_KERNEL); - if (local == NULL) - return -ENOMEM; - - local->dev = ndev; - INIT_LIST_HEAD(&local->list); - mutex_init(&local->sdp_lock); - mutex_init(&local->socket_lock); - init_timer(&local->link_timer); - local->link_timer.data = (unsigned long) local; - local->link_timer.function = nfc_llcp_symm_timer; - - skb_queue_head_init(&local->tx_queue); - INIT_WORK(&local->tx_work, nfc_llcp_tx_work); - snprintf(name, sizeof(name), "%s_llcp_tx_wq", dev_name(dev)); - local->tx_wq = alloc_workqueue(name, - WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM, 1); - if (local->tx_wq == NULL) { - err = -ENOMEM; - goto err_local; - } - - local->rx_pending = NULL; - INIT_WORK(&local->rx_work, nfc_llcp_rx_work); - snprintf(name, sizeof(name), "%s_llcp_rx_wq", dev_name(dev)); - local->rx_wq = alloc_workqueue(name, - WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM, 1); - if (local->rx_wq == NULL) { - err = -ENOMEM; - goto err_tx_wq; - } - - INIT_WORK(&local->timeout_work, nfc_llcp_timeout_work); - snprintf(name, sizeof(name), "%s_llcp_timeout_wq", dev_name(dev)); - local->timeout_wq = alloc_workqueue(name, - WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM, 1); - if (local->timeout_wq == NULL) { - err = -ENOMEM; - goto err_rx_wq; - } - - nfc_llcp_build_gb(local); - - local->remote_miu = LLCP_DEFAULT_MIU; - local->remote_lto = LLCP_DEFAULT_LTO; - local->remote_rw = LLCP_DEFAULT_RW; - - list_add(&llcp_devices, &local->list); - - return 0; - -err_rx_wq: - destroy_workqueue(local->rx_wq); - -err_tx_wq: - destroy_workqueue(local->tx_wq); - -err_local: - kfree(local); - - return 0; -} - -void nfc_llcp_unregister_device(struct nfc_dev *dev) -{ - struct nfc_llcp_local *local = nfc_llcp_find_local(dev); - - if (local == NULL) { - pr_debug("No such device\n"); - return; - } - - list_del(&local->list); - nfc_llcp_socket_release(local); - del_timer_sync(&local->link_timer); - skb_queue_purge(&local->tx_queue); - destroy_workqueue(local->tx_wq); - destroy_workqueue(local->rx_wq); - kfree_skb(local->rx_pending); - kfree(local); -} - -int __init nfc_llcp_init(void) -{ - INIT_LIST_HEAD(&llcp_devices); - - return nfc_llcp_sock_init(); -} - -void nfc_llcp_exit(void) -{ - nfc_llcp_sock_exit(); -} diff --git a/trunk/net/nfc/llcp/llcp.h b/trunk/net/nfc/llcp/llcp.h deleted file mode 100644 index 0ad2e3361584..000000000000 --- a/trunk/net/nfc/llcp/llcp.h +++ /dev/null @@ -1,193 +0,0 @@ -/* - * Copyright (C) 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the - * Free Software Foundation, Inc., - * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - */ - -enum llcp_state { - LLCP_CONNECTED = 1, /* wait_for_packet() wants that */ - LLCP_CLOSED, - LLCP_BOUND, - LLCP_LISTEN, -}; - -#define LLCP_DEFAULT_LTO 100 -#define LLCP_DEFAULT_RW 1 -#define LLCP_DEFAULT_MIU 128 - -#define LLCP_WKS_NUM_SAP 16 -#define LLCP_SDP_NUM_SAP 16 -#define LLCP_LOCAL_NUM_SAP 32 -#define LLCP_LOCAL_SAP_OFFSET (LLCP_WKS_NUM_SAP + LLCP_SDP_NUM_SAP) -#define LLCP_MAX_SAP (LLCP_WKS_NUM_SAP + LLCP_SDP_NUM_SAP + LLCP_LOCAL_NUM_SAP) - -struct nfc_llcp_sock; - -struct nfc_llcp_local { - struct list_head list; - struct nfc_dev *dev; - - struct mutex sdp_lock; - struct mutex socket_lock; - - struct timer_list link_timer; - struct sk_buff_head tx_queue; - struct workqueue_struct *tx_wq; - struct work_struct tx_work; - struct workqueue_struct *rx_wq; - struct work_struct rx_work; - struct sk_buff *rx_pending; - struct workqueue_struct *timeout_wq; - struct work_struct timeout_work; - - u32 target_idx; - u8 rf_mode; - u8 comm_mode; - unsigned long local_wks; /* Well known services */ - unsigned long local_sdp; /* Local services */ - unsigned long local_sap; /* Local SAPs, not available for discovery */ - - /* local */ - u8 gb[NFC_MAX_GT_LEN]; - u8 gb_len; - - /* remote */ - u8 remote_gb[NFC_MAX_GT_LEN]; - u8 remote_gb_len; - - u8 remote_version; - u16 remote_miu; - u16 remote_lto; - u8 remote_opt; - u16 remote_wks; - u8 remote_rw; - - /* sockets array */ - struct nfc_llcp_sock *sockets[LLCP_MAX_SAP]; -}; - -struct nfc_llcp_sock { - struct sock sk; - struct list_head list; - struct nfc_dev *dev; - struct nfc_llcp_local *local; - u32 target_idx; - u32 nfc_protocol; - - u8 ssap; - u8 dsap; - char *service_name; - size_t service_name_len; - - /* Link variables */ - u8 send_n; - u8 send_ack_n; - u8 recv_n; - u8 recv_ack_n; - - /* Is the remote peer ready to receive */ - u8 remote_ready; - - struct sk_buff_head tx_queue; - struct sk_buff_head tx_pending_queue; - struct sk_buff_head tx_backlog_queue; - - struct list_head accept_queue; - struct sock *parent; -}; - -#define nfc_llcp_sock(sk) ((struct nfc_llcp_sock *) (sk)) -#define nfc_llcp_dev(sk) (nfc_llcp_sock((sk))->dev) - -#define LLCP_HEADER_SIZE 2 -#define LLCP_SEQUENCE_SIZE 1 - -/* LLCP versions: 1.1 is 1.0 plus SDP */ -#define LLCP_VERSION_10 0x10 -#define LLCP_VERSION_11 0x11 - -/* LLCP PDU types */ -#define LLCP_PDU_SYMM 0x0 -#define LLCP_PDU_PAX 0x1 -#define LLCP_PDU_AGF 0x2 -#define LLCP_PDU_UI 0x3 -#define LLCP_PDU_CONNECT 0x4 -#define LLCP_PDU_DISC 0x5 -#define LLCP_PDU_CC 0x6 -#define LLCP_PDU_DM 0x7 -#define LLCP_PDU_FRMR 0x8 -#define LLCP_PDU_SNL 0x9 -#define LLCP_PDU_I 0xc -#define LLCP_PDU_RR 0xd -#define LLCP_PDU_RNR 0xe - -/* Parameters TLV types */ -#define LLCP_TLV_VERSION 0x1 -#define LLCP_TLV_MIUX 0x2 -#define LLCP_TLV_WKS 0x3 -#define LLCP_TLV_LTO 0x4 -#define LLCP_TLV_RW 0x5 -#define LLCP_TLV_SN 0x6 -#define LLCP_TLV_OPT 0x7 -#define LLCP_TLV_SDREQ 0x8 -#define LLCP_TLV_SDRES 0x9 -#define LLCP_TLV_MAX 0xa - -/* Well known LLCP SAP */ -#define LLCP_SAP_SDP 0x1 -#define LLCP_SAP_IP 0x2 -#define LLCP_SAP_OBEX 0x3 -#define LLCP_SAP_SNEP 0x4 -#define LLCP_SAP_MAX 0xff - -/* Disconnection reason code */ -#define LLCP_DM_DISC 0x00 -#define LLCP_DM_NOCONN 0x01 -#define LLCP_DM_NOBOUND 0x02 -#define LLCP_DM_REJ 0x03 - - -struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev); -u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local, - struct nfc_llcp_sock *sock); -u8 nfc_llcp_get_local_ssap(struct nfc_llcp_local *local); -void nfc_llcp_put_ssap(struct nfc_llcp_local *local, u8 ssap); - -/* Sock API */ -struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp); -void nfc_llcp_sock_free(struct nfc_llcp_sock *sock); -void nfc_llcp_accept_unlink(struct sock *sk); -void nfc_llcp_accept_enqueue(struct sock *parent, struct sock *sk); -struct sock *nfc_llcp_accept_dequeue(struct sock *sk, struct socket *newsock); - -/* TLV API */ -int nfc_llcp_parse_tlv(struct nfc_llcp_local *local, - u8 *tlv_array, u16 tlv_array_len); - -/* Commands API */ -void nfc_llcp_recv(void *data, struct sk_buff *skb, int err); -u8 *nfc_llcp_build_tlv(u8 type, u8 *value, u8 value_length, u8 *tlv_length); -void nfc_llcp_recv(void *data, struct sk_buff *skb, int err); -int nfc_llcp_disconnect(struct nfc_llcp_sock *sock); -int nfc_llcp_send_symm(struct nfc_dev *dev); -int nfc_llcp_send_connect(struct nfc_llcp_sock *sock); -int nfc_llcp_send_cc(struct nfc_llcp_sock *sock); -int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason); -int nfc_llcp_send_disconnect(struct nfc_llcp_sock *sock); - -/* Socket API */ -int __init nfc_llcp_sock_init(void); -void nfc_llcp_sock_exit(void); diff --git a/trunk/net/nfc/llcp/sock.c b/trunk/net/nfc/llcp/sock.c deleted file mode 100644 index f738ccd535f1..000000000000 --- a/trunk/net/nfc/llcp/sock.c +++ /dev/null @@ -1,675 +0,0 @@ -/* - * Copyright (C) 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the - * Free Software Foundation, Inc., - * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - */ - -#define pr_fmt(fmt) "llcp: %s: " fmt, __func__ - -#include -#include -#include -#include - -#include "../nfc.h" -#include "llcp.h" - -static struct proto llcp_sock_proto = { - .name = "NFC_LLCP", - .owner = THIS_MODULE, - .obj_size = sizeof(struct nfc_llcp_sock), -}; - -static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) -{ - struct sock *sk = sock->sk; - struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); - struct nfc_llcp_local *local; - struct nfc_dev *dev; - struct sockaddr_nfc_llcp llcp_addr; - int len, ret = 0; - - pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family); - - if (!addr || addr->sa_family != AF_NFC) - return -EINVAL; - - memset(&llcp_addr, 0, sizeof(llcp_addr)); - len = min_t(unsigned int, sizeof(llcp_addr), alen); - memcpy(&llcp_addr, addr, len); - - /* This is going to be a listening socket, dsap must be 0 */ - if (llcp_addr.dsap != 0) - return -EINVAL; - - lock_sock(sk); - - if (sk->sk_state != LLCP_CLOSED) { - ret = -EBADFD; - goto error; - } - - dev = nfc_get_device(llcp_addr.dev_idx); - if (dev == NULL) { - ret = -ENODEV; - goto error; - } - - local = nfc_llcp_find_local(dev); - if (local == NULL) { - ret = -ENODEV; - goto put_dev; - } - - llcp_sock->dev = dev; - llcp_sock->local = local; - llcp_sock->nfc_protocol = llcp_addr.nfc_protocol; - llcp_sock->service_name_len = min_t(unsigned int, - llcp_addr.service_name_len, NFC_LLCP_MAX_SERVICE_NAME); - llcp_sock->service_name = kmemdup(llcp_addr.service_name, - llcp_sock->service_name_len, GFP_KERNEL); - - llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock); - if (llcp_sock->ssap == LLCP_MAX_SAP) - goto put_dev; - - local->sockets[llcp_sock->ssap] = llcp_sock; - - pr_debug("Socket bound to SAP %d\n", llcp_sock->ssap); - - sk->sk_state = LLCP_BOUND; - -put_dev: - nfc_put_device(dev); - -error: - release_sock(sk); - return ret; -} - -static int llcp_sock_listen(struct socket *sock, int backlog) -{ - struct sock *sk = sock->sk; - int ret = 0; - - pr_debug("sk %p backlog %d\n", sk, backlog); - - lock_sock(sk); - - if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM) - || sk->sk_state != LLCP_BOUND) { - ret = -EBADFD; - goto error; - } - - sk->sk_max_ack_backlog = backlog; - sk->sk_ack_backlog = 0; - - pr_debug("Socket listening\n"); - sk->sk_state = LLCP_LISTEN; - -error: - release_sock(sk); - - return ret; -} - -void nfc_llcp_accept_unlink(struct sock *sk) -{ - struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); - - pr_debug("state %d\n", sk->sk_state); - - list_del_init(&llcp_sock->accept_queue); - sk_acceptq_removed(llcp_sock->parent); - llcp_sock->parent = NULL; - - sock_put(sk); -} - -void nfc_llcp_accept_enqueue(struct sock *parent, struct sock *sk) -{ - struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); - struct nfc_llcp_sock *llcp_sock_parent = nfc_llcp_sock(parent); - - /* Lock will be free from unlink */ - sock_hold(sk); - - list_add_tail(&llcp_sock->accept_queue, - &llcp_sock_parent->accept_queue); - llcp_sock->parent = parent; - sk_acceptq_added(parent); -} - -struct sock *nfc_llcp_accept_dequeue(struct sock *parent, - struct socket *newsock) -{ - struct nfc_llcp_sock *lsk, *n, *llcp_parent; - struct sock *sk; - - llcp_parent = nfc_llcp_sock(parent); - - list_for_each_entry_safe(lsk, n, &llcp_parent->accept_queue, - accept_queue) { - sk = &lsk->sk; - lock_sock(sk); - - if (sk->sk_state == LLCP_CLOSED) { - release_sock(sk); - nfc_llcp_accept_unlink(sk); - continue; - } - - if (sk->sk_state == LLCP_CONNECTED || !newsock) { - nfc_llcp_accept_unlink(sk); - if (newsock) - sock_graft(sk, newsock); - - release_sock(sk); - - pr_debug("Returning sk state %d\n", sk->sk_state); - - return sk; - } - - release_sock(sk); - } - - return NULL; -} - -static int llcp_sock_accept(struct socket *sock, struct socket *newsock, - int flags) -{ - DECLARE_WAITQUEUE(wait, current); - struct sock *sk = sock->sk, *new_sk; - long timeo; - int ret = 0; - - pr_debug("parent %p\n", sk); - - lock_sock_nested(sk, SINGLE_DEPTH_NESTING); - - if (sk->sk_state != LLCP_LISTEN) { - ret = -EBADFD; - goto error; - } - - timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); - - /* Wait for an incoming connection. */ - add_wait_queue_exclusive(sk_sleep(sk), &wait); - while (!(new_sk = nfc_llcp_accept_dequeue(sk, newsock))) { - set_current_state(TASK_INTERRUPTIBLE); - - if (!timeo) { - ret = -EAGAIN; - break; - } - - if (signal_pending(current)) { - ret = sock_intr_errno(timeo); - break; - } - - release_sock(sk); - timeo = schedule_timeout(timeo); - lock_sock_nested(sk, SINGLE_DEPTH_NESTING); - } - __set_current_state(TASK_RUNNING); - remove_wait_queue(sk_sleep(sk), &wait); - - if (ret) - goto error; - - newsock->state = SS_CONNECTED; - - pr_debug("new socket %p\n", new_sk); - -error: - release_sock(sk); - - return ret; -} - -static int llcp_sock_getname(struct socket *sock, struct sockaddr *addr, - int *len, int peer) -{ - struct sockaddr_nfc_llcp *llcp_addr = (struct sockaddr_nfc_llcp *) addr; - struct sock *sk = sock->sk; - struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); - - pr_debug("%p\n", sk); - - addr->sa_family = AF_NFC; - *len = sizeof(struct sockaddr_nfc_llcp); - - llcp_addr->dev_idx = llcp_sock->dev->idx; - llcp_addr->dsap = llcp_sock->dsap; - llcp_addr->ssap = llcp_sock->ssap; - llcp_addr->service_name_len = llcp_sock->service_name_len; - memcpy(llcp_addr->service_name, llcp_sock->service_name, - llcp_addr->service_name_len); - - return 0; -} - -static inline unsigned int llcp_accept_poll(struct sock *parent) -{ - struct nfc_llcp_sock *llcp_sock, *n, *parent_sock; - struct sock *sk; - - parent_sock = nfc_llcp_sock(parent); - - list_for_each_entry_safe(llcp_sock, n, &parent_sock->accept_queue, - accept_queue) { - sk = &llcp_sock->sk; - - if (sk->sk_state == LLCP_CONNECTED) - return POLLIN | POLLRDNORM; - } - - return 0; -} - -static unsigned int llcp_sock_poll(struct file *file, struct socket *sock, - poll_table *wait) -{ - struct sock *sk = sock->sk; - unsigned int mask = 0; - - pr_debug("%p\n", sk); - - sock_poll_wait(file, sk_sleep(sk), wait); - - if (sk->sk_state == LLCP_LISTEN) - return llcp_accept_poll(sk); - - if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) - mask |= POLLERR; - - if (!skb_queue_empty(&sk->sk_receive_queue)) - mask |= POLLIN; - - if (sk->sk_state == LLCP_CLOSED) - mask |= POLLHUP; - - return mask; -} - -static int llcp_sock_release(struct socket *sock) -{ - struct sock *sk = sock->sk; - struct nfc_llcp_local *local; - struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); - - if (!sk) - return 0; - - pr_debug("%p\n", sk); - - local = llcp_sock->local; - if (local == NULL) - return -ENODEV; - - mutex_lock(&local->socket_lock); - - if (llcp_sock == local->sockets[llcp_sock->ssap]) { - local->sockets[llcp_sock->ssap] = NULL; - } else { - struct nfc_llcp_sock *parent, *s, *n; - - parent = local->sockets[llcp_sock->ssap]; - - list_for_each_entry_safe(s, n, &parent->list, list) - if (llcp_sock == s) { - list_del(&s->list); - break; - } - - } - - mutex_unlock(&local->socket_lock); - - lock_sock(sk); - - /* Send a DISC */ - if (sk->sk_state == LLCP_CONNECTED) - nfc_llcp_disconnect(llcp_sock); - - if (sk->sk_state == LLCP_LISTEN) { - struct nfc_llcp_sock *lsk, *n; - struct sock *accept_sk; - - list_for_each_entry_safe(lsk, n, &llcp_sock->accept_queue, - accept_queue) { - accept_sk = &lsk->sk; - lock_sock(accept_sk); - - nfc_llcp_disconnect(lsk); - nfc_llcp_accept_unlink(accept_sk); - - release_sock(accept_sk); - - sock_set_flag(sk, SOCK_DEAD); - sock_orphan(accept_sk); - sock_put(accept_sk); - } - } - - /* Freeing the SAP */ - if ((sk->sk_state == LLCP_CONNECTED - && llcp_sock->ssap > LLCP_LOCAL_SAP_OFFSET) || - sk->sk_state == LLCP_BOUND || - sk->sk_state == LLCP_LISTEN) - nfc_llcp_put_ssap(llcp_sock->local, llcp_sock->ssap); - - sock_set_flag(sk, SOCK_DEAD); - - release_sock(sk); - - sock_orphan(sk); - sock_put(sk); - - return 0; -} - -static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr, - int len, int flags) -{ - struct sock *sk = sock->sk; - struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); - struct sockaddr_nfc_llcp *addr = (struct sockaddr_nfc_llcp *)_addr; - struct nfc_dev *dev; - struct nfc_llcp_local *local; - int ret = 0; - - pr_debug("sock %p sk %p flags 0x%x\n", sock, sk, flags); - - if (!addr || len < sizeof(struct sockaddr_nfc) || - addr->sa_family != AF_NFC) { - pr_err("Invalid socket\n"); - return -EINVAL; - } - - if (addr->service_name_len == 0 && addr->dsap == 0) { - pr_err("Missing service name or dsap\n"); - return -EINVAL; - } - - pr_debug("addr dev_idx=%u target_idx=%u protocol=%u\n", addr->dev_idx, - addr->target_idx, addr->nfc_protocol); - - lock_sock(sk); - - if (sk->sk_state == LLCP_CONNECTED) { - ret = -EISCONN; - goto error; - } - - dev = nfc_get_device(addr->dev_idx); - if (dev == NULL) { - ret = -ENODEV; - goto error; - } - - local = nfc_llcp_find_local(dev); - if (local == NULL) { - ret = -ENODEV; - goto put_dev; - } - - device_lock(&dev->dev); - if (dev->dep_link_up == false) { - ret = -ENOLINK; - device_unlock(&dev->dev); - goto put_dev; - } - device_unlock(&dev->dev); - - if (local->rf_mode == NFC_RF_INITIATOR && - addr->target_idx != local->target_idx) { - ret = -ENOLINK; - goto put_dev; - } - - llcp_sock->dev = dev; - llcp_sock->local = local; - llcp_sock->ssap = nfc_llcp_get_local_ssap(local); - if (llcp_sock->ssap == LLCP_SAP_MAX) { - ret = -ENOMEM; - goto put_dev; - } - if (addr->service_name_len == 0) - llcp_sock->dsap = addr->dsap; - else - llcp_sock->dsap = LLCP_SAP_SDP; - llcp_sock->nfc_protocol = addr->nfc_protocol; - llcp_sock->service_name_len = min_t(unsigned int, - addr->service_name_len, NFC_LLCP_MAX_SERVICE_NAME); - llcp_sock->service_name = kmemdup(addr->service_name, - llcp_sock->service_name_len, GFP_KERNEL); - - local->sockets[llcp_sock->ssap] = llcp_sock; - - ret = nfc_llcp_send_connect(llcp_sock); - if (ret) - goto put_dev; - - sk->sk_state = LLCP_CONNECTED; - - release_sock(sk); - return 0; - -put_dev: - nfc_put_device(dev); - -error: - release_sock(sk); - return ret; -} - -static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len, int flags) -{ - int noblock = flags & MSG_DONTWAIT; - struct sock *sk = sock->sk; - unsigned int copied, rlen; - struct sk_buff *skb, *cskb; - int err = 0; - - pr_debug("%p %zu\n", sk, len); - - lock_sock(sk); - - if (sk->sk_state == LLCP_CLOSED && - skb_queue_empty(&sk->sk_receive_queue)) { - release_sock(sk); - return 0; - } - - release_sock(sk); - - if (flags & (MSG_OOB)) - return -EOPNOTSUPP; - - skb = skb_recv_datagram(sk, flags, noblock, &err); - if (!skb) { - pr_err("Recv datagram failed state %d %d %d", - sk->sk_state, err, sock_error(sk)); - - if (sk->sk_shutdown & RCV_SHUTDOWN) - return 0; - - return err; - } - - rlen = skb->len; /* real length of skb */ - copied = min_t(unsigned int, rlen, len); - - cskb = skb; - if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) { - if (!(flags & MSG_PEEK)) - skb_queue_head(&sk->sk_receive_queue, skb); - return -EFAULT; - } - - /* Mark read part of skb as used */ - if (!(flags & MSG_PEEK)) { - - /* SOCK_STREAM: re-queue skb if it contains unreceived data */ - if (sk->sk_type == SOCK_STREAM) { - skb_pull(skb, copied); - if (skb->len) { - skb_queue_head(&sk->sk_receive_queue, skb); - goto done; - } - } - - kfree_skb(skb); - } - - /* XXX Queue backlogged skbs */ - -done: - /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */ - if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC)) - copied = rlen; - - return copied; -} - -static const struct proto_ops llcp_sock_ops = { - .family = PF_NFC, - .owner = THIS_MODULE, - .bind = llcp_sock_bind, - .connect = llcp_sock_connect, - .release = llcp_sock_release, - .socketpair = sock_no_socketpair, - .accept = llcp_sock_accept, - .getname = llcp_sock_getname, - .poll = llcp_sock_poll, - .ioctl = sock_no_ioctl, - .listen = llcp_sock_listen, - .shutdown = sock_no_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, - .sendmsg = sock_no_sendmsg, - .recvmsg = llcp_sock_recvmsg, - .mmap = sock_no_mmap, -}; - -static void llcp_sock_destruct(struct sock *sk) -{ - struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); - - pr_debug("%p\n", sk); - - if (sk->sk_state == LLCP_CONNECTED) - nfc_put_device(llcp_sock->dev); - - skb_queue_purge(&sk->sk_receive_queue); - - nfc_llcp_sock_free(llcp_sock); - - if (!sock_flag(sk, SOCK_DEAD)) { - pr_err("Freeing alive NFC LLCP socket %p\n", sk); - return; - } -} - -struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp) -{ - struct sock *sk; - struct nfc_llcp_sock *llcp_sock; - - sk = sk_alloc(&init_net, PF_NFC, gfp, &llcp_sock_proto); - if (!sk) - return NULL; - - llcp_sock = nfc_llcp_sock(sk); - - sock_init_data(sock, sk); - sk->sk_state = LLCP_CLOSED; - sk->sk_protocol = NFC_SOCKPROTO_LLCP; - sk->sk_type = type; - sk->sk_destruct = llcp_sock_destruct; - - llcp_sock->ssap = 0; - llcp_sock->dsap = LLCP_SAP_SDP; - llcp_sock->send_n = llcp_sock->send_ack_n = 0; - llcp_sock->recv_n = llcp_sock->recv_ack_n = 0; - llcp_sock->remote_ready = 1; - skb_queue_head_init(&llcp_sock->tx_queue); - skb_queue_head_init(&llcp_sock->tx_pending_queue); - skb_queue_head_init(&llcp_sock->tx_backlog_queue); - INIT_LIST_HEAD(&llcp_sock->list); - INIT_LIST_HEAD(&llcp_sock->accept_queue); - - if (sock != NULL) - sock->state = SS_UNCONNECTED; - - return sk; -} - -void nfc_llcp_sock_free(struct nfc_llcp_sock *sock) -{ - kfree(sock->service_name); - - skb_queue_purge(&sock->tx_queue); - skb_queue_purge(&sock->tx_pending_queue); - skb_queue_purge(&sock->tx_backlog_queue); - - list_del_init(&sock->accept_queue); - - sock->parent = NULL; -} - -static int llcp_sock_create(struct net *net, struct socket *sock, - const struct nfc_protocol *nfc_proto) -{ - struct sock *sk; - - pr_debug("%p\n", sock); - - if (sock->type != SOCK_STREAM && sock->type != SOCK_DGRAM) - return -ESOCKTNOSUPPORT; - - sock->ops = &llcp_sock_ops; - - sk = nfc_llcp_sock_alloc(sock, sock->type, GFP_ATOMIC); - if (sk == NULL) - return -ENOMEM; - - return 0; -} - -static const struct nfc_protocol llcp_nfc_proto = { - .id = NFC_SOCKPROTO_LLCP, - .proto = &llcp_sock_proto, - .owner = THIS_MODULE, - .create = llcp_sock_create -}; - -int __init nfc_llcp_sock_init(void) -{ - return nfc_proto_register(&llcp_nfc_proto); -} - -void nfc_llcp_sock_exit(void) -{ - nfc_proto_unregister(&llcp_nfc_proto); -} diff --git a/trunk/net/nfc/nci/core.c b/trunk/net/nfc/nci/core.c index 7650139a1a05..ea66034499ce 100644 --- a/trunk/net/nfc/nci/core.c +++ b/trunk/net/nfc/nci/core.c @@ -25,8 +25,6 @@ * */ -#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ - #include #include #include @@ -81,7 +79,7 @@ static int __nci_request(struct nci_dev *ndev, &ndev->req_completion, timeout); - pr_debug("wait_for_completion return %ld\n", completion_rc); + nfc_dbg("wait_for_completion return %ld", completion_rc); if (completion_rc > 0) { switch (ndev->req_status) { @@ -98,8 +96,8 @@ static int __nci_request(struct nci_dev *ndev, break; } } else { - pr_err("wait_for_completion_interruptible_timeout failed %ld\n", - completion_rc); + nfc_err("wait_for_completion_interruptible_timeout failed %ld", + completion_rc); rc = ((completion_rc == 0) ? (-ETIMEDOUT) : (completion_rc)); } @@ -128,10 +126,7 @@ static inline int nci_request(struct nci_dev *ndev, static void nci_reset_req(struct nci_dev *ndev, unsigned long opt) { - struct nci_core_reset_cmd cmd; - - cmd.reset_type = NCI_RESET_TYPE_RESET_CONFIG; - nci_send_cmd(ndev, NCI_OP_CORE_RESET_CMD, 1, &cmd); + nci_send_cmd(ndev, NCI_OP_CORE_RESET_CMD, 0, NULL); } static void nci_init_req(struct nci_dev *ndev, unsigned long opt) @@ -141,11 +136,17 @@ static void nci_init_req(struct nci_dev *ndev, unsigned long opt) static void nci_init_complete_req(struct nci_dev *ndev, unsigned long opt) { + struct nci_core_conn_create_cmd conn_cmd; struct nci_rf_disc_map_cmd cmd; struct disc_map_config *cfg = cmd.mapping_configs; __u8 *num = &cmd.num_mapping_configs; int i; + /* create static rf connection */ + conn_cmd.target_handle = 0; + conn_cmd.num_target_specific_params = 0; + nci_send_cmd(ndev, NCI_OP_CORE_CONN_CREATE_CMD, 2, &conn_cmd); + /* set rf mapping configurations */ *num = 0; @@ -154,16 +155,14 @@ static void nci_init_complete_req(struct nci_dev *ndev, unsigned long opt) if (ndev->supported_rf_interfaces[i] == NCI_RF_INTERFACE_ISO_DEP) { cfg[*num].rf_protocol = NCI_RF_PROTOCOL_ISO_DEP; - cfg[*num].mode = NCI_DISC_MAP_MODE_POLL | - NCI_DISC_MAP_MODE_LISTEN; - cfg[*num].rf_interface = NCI_RF_INTERFACE_ISO_DEP; + cfg[*num].mode = NCI_DISC_MAP_MODE_BOTH; + cfg[*num].rf_interface_type = NCI_RF_INTERFACE_ISO_DEP; (*num)++; } else if (ndev->supported_rf_interfaces[i] == NCI_RF_INTERFACE_NFC_DEP) { cfg[*num].rf_protocol = NCI_RF_PROTOCOL_NFC_DEP; - cfg[*num].mode = NCI_DISC_MAP_MODE_POLL | - NCI_DISC_MAP_MODE_LISTEN; - cfg[*num].rf_interface = NCI_RF_INTERFACE_NFC_DEP; + cfg[*num].mode = NCI_DISC_MAP_MODE_BOTH; + cfg[*num].rf_interface_type = NCI_RF_INTERFACE_NFC_DEP; (*num)++; } @@ -188,16 +187,16 @@ static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt) || protocols & NFC_PROTO_MIFARE_MASK || protocols & NFC_PROTO_ISO14443_MASK || protocols & NFC_PROTO_NFC_DEP_MASK)) { - cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = - NCI_NFC_A_PASSIVE_POLL_MODE; + cmd.disc_configs[cmd.num_disc_configs].type = + NCI_DISCOVERY_TYPE_POLL_A_PASSIVE; cmd.disc_configs[cmd.num_disc_configs].frequency = 1; cmd.num_disc_configs++; } if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) && (protocols & NFC_PROTO_ISO14443_MASK)) { - cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = - NCI_NFC_B_PASSIVE_POLL_MODE; + cmd.disc_configs[cmd.num_disc_configs].type = + NCI_DISCOVERY_TYPE_POLL_B_PASSIVE; cmd.disc_configs[cmd.num_disc_configs].frequency = 1; cmd.num_disc_configs++; } @@ -205,8 +204,8 @@ static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt) if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) && (protocols & NFC_PROTO_FELICA_MASK || protocols & NFC_PROTO_NFC_DEP_MASK)) { - cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = - NCI_NFC_F_PASSIVE_POLL_MODE; + cmd.disc_configs[cmd.num_disc_configs].type = + NCI_DISCOVERY_TYPE_POLL_F_PASSIVE; cmd.disc_configs[cmd.num_disc_configs].frequency = 1; cmd.num_disc_configs++; } @@ -327,6 +326,8 @@ static void nci_cmd_timer(unsigned long arg) { struct nci_dev *ndev = (void *) arg; + nfc_dbg("entry"); + atomic_set(&ndev->cmd_cnt, 1); queue_work(ndev->cmd_wq, &ndev->cmd_work); } @@ -335,6 +336,8 @@ static int nci_dev_up(struct nfc_dev *nfc_dev) { struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); + nfc_dbg("entry"); + return nci_open_device(ndev); } @@ -342,6 +345,8 @@ static int nci_dev_down(struct nfc_dev *nfc_dev) { struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); + nfc_dbg("entry"); + return nci_close_device(ndev); } @@ -350,18 +355,20 @@ static int nci_start_poll(struct nfc_dev *nfc_dev, __u32 protocols) struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); int rc; + nfc_dbg("entry"); + if (test_bit(NCI_DISCOVERY, &ndev->flags)) { - pr_err("unable to start poll, since poll is already active\n"); + nfc_err("unable to start poll, since poll is already active"); return -EBUSY; } if (ndev->target_active_prot) { - pr_err("there is an active target\n"); + nfc_err("there is an active target"); return -EBUSY; } if (test_bit(NCI_POLL_ACTIVE, &ndev->flags)) { - pr_debug("target is active, implicitly deactivate...\n"); + nfc_dbg("target is active, implicitly deactivate..."); rc = nci_request(ndev, nci_rf_deactivate_req, 0, msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT)); @@ -382,8 +389,10 @@ static void nci_stop_poll(struct nfc_dev *nfc_dev) { struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); + nfc_dbg("entry"); + if (!test_bit(NCI_DISCOVERY, &ndev->flags)) { - pr_err("unable to stop poll, since poll is not active\n"); + nfc_err("unable to stop poll, since poll is not active"); return; } @@ -396,21 +405,21 @@ static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx, { struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); - pr_debug("target_idx %d, protocol 0x%x\n", target_idx, protocol); + nfc_dbg("entry, target_idx %d, protocol 0x%x", target_idx, protocol); if (!test_bit(NCI_POLL_ACTIVE, &ndev->flags)) { - pr_err("there is no available target to activate\n"); + nfc_err("there is no available target to activate"); return -EINVAL; } if (ndev->target_active_prot) { - pr_err("there is already an active target\n"); + nfc_err("there is already an active target"); return -EBUSY; } if (!(ndev->target_available_prots & (1 << protocol))) { - pr_err("target does not support the requested protocol 0x%x\n", - protocol); + nfc_err("target does not support the requested protocol 0x%x", + protocol); return -EINVAL; } @@ -424,10 +433,10 @@ static void nci_deactivate_target(struct nfc_dev *nfc_dev, __u32 target_idx) { struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); - pr_debug("target_idx %d\n", target_idx); + nfc_dbg("entry, target_idx %d", target_idx); if (!ndev->target_active_prot) { - pr_err("unable to deactivate target, no active target\n"); + nfc_err("unable to deactivate target, no active target"); return; } @@ -447,10 +456,10 @@ static int nci_data_exchange(struct nfc_dev *nfc_dev, __u32 target_idx, struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); int rc; - pr_debug("target_idx %d, len %d\n", target_idx, skb->len); + nfc_dbg("entry, target_idx %d, len %d", target_idx, skb->len); if (!ndev->target_active_prot) { - pr_err("unable to exchange data, no active target\n"); + nfc_err("unable to exchange data, no active target"); return -EINVAL; } @@ -461,7 +470,7 @@ static int nci_data_exchange(struct nfc_dev *nfc_dev, __u32 target_idx, ndev->data_exchange_cb = cb; ndev->data_exchange_cb_context = cb_context; - rc = nci_send_data(ndev, NCI_STATIC_RF_CONN_ID, skb); + rc = nci_send_data(ndev, ndev->conn_id, skb); if (rc) clear_bit(NCI_DATA_EXCHANGE, &ndev->flags); @@ -493,7 +502,7 @@ struct nci_dev *nci_allocate_device(struct nci_ops *ops, { struct nci_dev *ndev; - pr_debug("supported_protocols 0x%x\n", supported_protocols); + nfc_dbg("entry, supported_protocols 0x%x", supported_protocols); if (!ops->open || !ops->close || !ops->send) return NULL; @@ -533,6 +542,8 @@ EXPORT_SYMBOL(nci_allocate_device); */ void nci_free_device(struct nci_dev *ndev) { + nfc_dbg("entry"); + nfc_free_device(ndev->nfc_dev); kfree(ndev); } @@ -549,6 +560,8 @@ int nci_register_device(struct nci_dev *ndev) struct device *dev = &ndev->nfc_dev->dev; char name[32]; + nfc_dbg("entry"); + rc = nfc_register_device(ndev->nfc_dev); if (rc) goto exit; @@ -611,6 +624,8 @@ EXPORT_SYMBOL(nci_register_device); */ void nci_unregister_device(struct nci_dev *ndev) { + nfc_dbg("entry"); + nci_close_device(ndev); destroy_workqueue(ndev->cmd_wq); @@ -630,7 +645,7 @@ int nci_recv_frame(struct sk_buff *skb) { struct nci_dev *ndev = (struct nci_dev *) skb->dev; - pr_debug("len %d\n", skb->len); + nfc_dbg("entry, len %d", skb->len); if (!ndev || (!test_bit(NCI_UP, &ndev->flags) && !test_bit(NCI_INIT, &ndev->flags))) { @@ -650,7 +665,7 @@ static int nci_send_frame(struct sk_buff *skb) { struct nci_dev *ndev = (struct nci_dev *) skb->dev; - pr_debug("len %d\n", skb->len); + nfc_dbg("entry, len %d", skb->len); if (!ndev) { kfree_skb(skb); @@ -669,11 +684,11 @@ int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload) struct nci_ctrl_hdr *hdr; struct sk_buff *skb; - pr_debug("opcode 0x%x, plen %d\n", opcode, plen); + nfc_dbg("entry, opcode 0x%x, plen %d", opcode, plen); skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + plen), GFP_KERNEL); if (!skb) { - pr_err("no memory for command\n"); + nfc_err("no memory for command"); return -ENOMEM; } @@ -703,7 +718,7 @@ static void nci_tx_work(struct work_struct *work) struct nci_dev *ndev = container_of(work, struct nci_dev, tx_work); struct sk_buff *skb; - pr_debug("credits_cnt %d\n", atomic_read(&ndev->credits_cnt)); + nfc_dbg("entry, credits_cnt %d", atomic_read(&ndev->credits_cnt)); /* Send queued tx data */ while (atomic_read(&ndev->credits_cnt)) { @@ -711,15 +726,12 @@ static void nci_tx_work(struct work_struct *work) if (!skb) return; - /* Check if data flow control is used */ - if (atomic_read(&ndev->credits_cnt) != - NCI_DATA_FLOW_CONTROL_NOT_USED) - atomic_dec(&ndev->credits_cnt); + atomic_dec(&ndev->credits_cnt); - pr_debug("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d\n", - nci_pbf(skb->data), - nci_conn_id(skb->data), - nci_plen(skb->data)); + nfc_dbg("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d", + nci_pbf(skb->data), + nci_conn_id(skb->data), + nci_plen(skb->data)); nci_send_frame(skb); } @@ -748,7 +760,7 @@ static void nci_rx_work(struct work_struct *work) break; default: - pr_err("unknown MT 0x%x\n", nci_mt(skb->data)); + nfc_err("unknown MT 0x%x", nci_mt(skb->data)); kfree_skb(skb); break; } @@ -762,7 +774,7 @@ static void nci_cmd_work(struct work_struct *work) struct nci_dev *ndev = container_of(work, struct nci_dev, cmd_work); struct sk_buff *skb; - pr_debug("cmd_cnt %d\n", atomic_read(&ndev->cmd_cnt)); + nfc_dbg("entry, cmd_cnt %d", atomic_read(&ndev->cmd_cnt)); /* Send queued command */ if (atomic_read(&ndev->cmd_cnt)) { @@ -772,11 +784,11 @@ static void nci_cmd_work(struct work_struct *work) atomic_dec(&ndev->cmd_cnt); - pr_debug("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n", - nci_pbf(skb->data), - nci_opcode_gid(nci_opcode(skb->data)), - nci_opcode_oid(nci_opcode(skb->data)), - nci_plen(skb->data)); + nfc_dbg("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d", + nci_pbf(skb->data), + nci_opcode_gid(nci_opcode(skb->data)), + nci_opcode_oid(nci_opcode(skb->data)), + nci_plen(skb->data)); nci_send_frame(skb); diff --git a/trunk/net/nfc/nci/data.c b/trunk/net/nfc/nci/data.c index e5756b30e602..e5ed90fc1a9c 100644 --- a/trunk/net/nfc/nci/data.c +++ b/trunk/net/nfc/nci/data.c @@ -21,8 +21,6 @@ * */ -#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ - #include #include #include @@ -42,7 +40,7 @@ void nci_data_exchange_complete(struct nci_dev *ndev, data_exchange_cb_t cb = ndev->data_exchange_cb; void *cb_context = ndev->data_exchange_cb_context; - pr_debug("len %d, err %d\n", skb ? skb->len : 0, err); + nfc_dbg("entry, len %d, err %d", ((skb) ? (skb->len) : (0)), err); if (cb) { ndev->data_exchange_cb = NULL; @@ -51,7 +49,7 @@ void nci_data_exchange_complete(struct nci_dev *ndev, /* forward skb to nfc core */ cb(cb_context, skb, err); } else if (skb) { - pr_err("no rx callback, dropping rx data...\n"); + nfc_err("no rx callback, dropping rx data..."); /* no waiting callback, free skb */ kfree_skb(skb); @@ -92,13 +90,12 @@ static int nci_queue_tx_data_frags(struct nci_dev *ndev, int frag_len; int rc = 0; - pr_debug("conn_id 0x%x, total_len %d\n", conn_id, total_len); + nfc_dbg("entry, conn_id 0x%x, total_len %d", conn_id, total_len); __skb_queue_head_init(&frags_q); while (total_len) { - frag_len = - min_t(int, total_len, ndev->max_data_pkt_payload_size); + frag_len = min_t(int, total_len, ndev->max_pkt_payload_size); skb_frag = nci_skb_alloc(ndev, (NCI_DATA_HDR_SIZE + frag_len), @@ -121,8 +118,8 @@ static int nci_queue_tx_data_frags(struct nci_dev *ndev, data += frag_len; total_len -= frag_len; - pr_debug("frag_len %d, remaining total_len %d\n", - frag_len, total_len); + nfc_dbg("frag_len %d, remaining total_len %d", + frag_len, total_len); } /* queue all fragments atomically */ @@ -151,10 +148,10 @@ int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb) { int rc = 0; - pr_debug("conn_id 0x%x, plen %d\n", conn_id, skb->len); + nfc_dbg("entry, conn_id 0x%x, plen %d", conn_id, skb->len); /* check if the packet need to be fragmented */ - if (skb->len <= ndev->max_data_pkt_payload_size) { + if (skb->len <= ndev->max_pkt_payload_size) { /* no need to fragment packet */ nci_push_data_hdr(ndev, conn_id, skb, NCI_PBF_LAST); @@ -163,7 +160,7 @@ int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb) /* fragment packet and queue the fragments */ rc = nci_queue_tx_data_frags(ndev, conn_id, skb); if (rc) { - pr_err("failed to fragment tx data packet\n"); + nfc_err("failed to fragment tx data packet"); goto free_exit; } } @@ -193,7 +190,7 @@ static void nci_add_rx_data_frag(struct nci_dev *ndev, /* first, make enough room for the already accumulated data */ if (skb_cow_head(skb, reassembly_len)) { - pr_err("error adding room for accumulated rx data\n"); + nfc_err("error adding room for accumulated rx data"); kfree_skb(skb); skb = 0; @@ -230,19 +227,19 @@ void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb) { __u8 pbf = nci_pbf(skb->data); - pr_debug("len %d\n", skb->len); + nfc_dbg("entry, len %d", skb->len); - pr_debug("NCI RX: MT=data, PBF=%d, conn_id=%d, plen=%d\n", - nci_pbf(skb->data), - nci_conn_id(skb->data), - nci_plen(skb->data)); + nfc_dbg("NCI RX: MT=data, PBF=%d, conn_id=%d, plen=%d", + nci_pbf(skb->data), + nci_conn_id(skb->data), + nci_plen(skb->data)); /* strip the nci data header */ skb_pull(skb, NCI_DATA_HDR_SIZE); if (ndev->target_active_prot == NFC_PROTO_MIFARE) { /* frame I/F => remove the status byte */ - pr_debug("NFC_PROTO_MIFARE => remove the status byte\n"); + nfc_dbg("NFC_PROTO_MIFARE => remove the status byte"); skb_trim(skb, (skb->len - 1)); } diff --git a/trunk/net/nfc/nci/lib.c b/trunk/net/nfc/nci/lib.c index 6a63e5eb483d..b19dc2fa90e1 100644 --- a/trunk/net/nfc/nci/lib.c +++ b/trunk/net/nfc/nci/lib.c @@ -42,9 +42,12 @@ int nci_to_errno(__u8 code) case NCI_STATUS_REJECTED: return -EBUSY; - case NCI_STATUS_RF_FRAME_CORRUPTED: + case NCI_STATUS_MESSAGE_CORRUPTED: return -EBADMSG; + case NCI_STATUS_BUFFER_FULL: + return -ENOBUFS; + case NCI_STATUS_NOT_INITIALIZED: return -EHOSTDOWN; @@ -77,6 +80,12 @@ int nci_to_errno(__u8 code) case NCI_STATUS_NFCEE_TIMEOUT_ERROR: return -ETIMEDOUT; + case NCI_STATUS_RF_LINK_LOSS_ERROR: + return -ENOLINK; + + case NCI_STATUS_MAX_ACTIVE_NFCEE_INTERFACES_REACHED: + return -EDQUOT; + case NCI_STATUS_FAILED: default: return -ENOSYS; diff --git a/trunk/net/nfc/nci/ntf.c b/trunk/net/nfc/nci/ntf.c index b16a8dc2afbe..96633f5cda4f 100644 --- a/trunk/net/nfc/nci/ntf.c +++ b/trunk/net/nfc/nci/ntf.c @@ -25,8 +25,6 @@ * */ -#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ - #include #include #include @@ -45,21 +43,18 @@ static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev, struct nci_core_conn_credit_ntf *ntf = (void *) skb->data; int i; - pr_debug("num_entries %d\n", ntf->num_entries); + nfc_dbg("entry, num_entries %d", ntf->num_entries); if (ntf->num_entries > NCI_MAX_NUM_CONN) ntf->num_entries = NCI_MAX_NUM_CONN; /* update the credits */ for (i = 0; i < ntf->num_entries; i++) { - ntf->conn_entries[i].conn_id = - nci_conn_id(&ntf->conn_entries[i].conn_id); - - pr_debug("entry[%d]: conn_id %d, credits %d\n", - i, ntf->conn_entries[i].conn_id, - ntf->conn_entries[i].credits); + nfc_dbg("entry[%d]: conn_id %d, credits %d", i, + ntf->conn_entries[i].conn_id, + ntf->conn_entries[i].credits); - if (ntf->conn_entries[i].conn_id == NCI_STATIC_RF_CONN_ID) { + if (ntf->conn_entries[i].conn_id == ndev->conn_id) { /* found static rf connection */ atomic_add(ntf->conn_entries[i].credits, &ndev->credits_cnt); @@ -71,34 +66,31 @@ static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev, queue_work(ndev->tx_wq, &ndev->tx_work); } -static void nci_core_conn_intf_error_ntf_packet(struct nci_dev *ndev, - struct sk_buff *skb) +static void nci_rf_field_info_ntf_packet(struct nci_dev *ndev, + struct sk_buff *skb) { - struct nci_core_intf_error_ntf *ntf = (void *) skb->data; + struct nci_rf_field_info_ntf *ntf = (void *) skb->data; - ntf->conn_id = nci_conn_id(&ntf->conn_id); - - pr_debug("status 0x%x, conn_id %d\n", ntf->status, ntf->conn_id); - - /* complete the data exchange transaction, if exists */ - if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags)) - nci_data_exchange_complete(ndev, NULL, -EIO); + nfc_dbg("entry, rf_field_status %d", ntf->rf_field_status); } -static __u8 *nci_extract_rf_params_nfca_passive_poll(struct nci_dev *ndev, - struct nci_rf_intf_activated_ntf *ntf, __u8 *data) +static int nci_rf_activate_nfca_passive_poll(struct nci_dev *ndev, + struct nci_rf_activate_ntf *ntf, __u8 *data) { struct rf_tech_specific_params_nfca_poll *nfca_poll; + struct activation_params_nfca_poll_iso_dep *nfca_poll_iso_dep; nfca_poll = &ntf->rf_tech_specific_params.nfca_poll; + nfca_poll_iso_dep = &ntf->activation_params.nfca_poll_iso_dep; nfca_poll->sens_res = __le16_to_cpu(*((__u16 *)data)); data += 2; nfca_poll->nfcid1_len = *data++; - pr_debug("sens_res 0x%x, nfcid1_len %d\n", - nfca_poll->sens_res, nfca_poll->nfcid1_len); + nfc_dbg("sens_res 0x%x, nfcid1_len %d", + nfca_poll->sens_res, + nfca_poll->nfcid1_len); memcpy(nfca_poll->nfcid1, data, nfca_poll->nfcid1_len); data += nfca_poll->nfcid1_len; @@ -108,32 +100,32 @@ static __u8 *nci_extract_rf_params_nfca_passive_poll(struct nci_dev *ndev, if (nfca_poll->sel_res_len != 0) nfca_poll->sel_res = *data++; - pr_debug("sel_res_len %d, sel_res 0x%x\n", - nfca_poll->sel_res_len, - nfca_poll->sel_res); + ntf->rf_interface_type = *data++; + ntf->activation_params_len = *data++; - return data; -} + nfc_dbg("sel_res_len %d, sel_res 0x%x, rf_interface_type %d, activation_params_len %d", + nfca_poll->sel_res_len, + nfca_poll->sel_res, + ntf->rf_interface_type, + ntf->activation_params_len); -static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev, - struct nci_rf_intf_activated_ntf *ntf, __u8 *data) -{ - struct activation_params_nfca_poll_iso_dep *nfca_poll; - - switch (ntf->activation_rf_tech_and_mode) { - case NCI_NFC_A_PASSIVE_POLL_MODE: - nfca_poll = &ntf->activation_params.nfca_poll_iso_dep; - nfca_poll->rats_res_len = *data++; - if (nfca_poll->rats_res_len > 0) { - memcpy(nfca_poll->rats_res, + switch (ntf->rf_interface_type) { + case NCI_RF_INTERFACE_ISO_DEP: + nfca_poll_iso_dep->rats_res_len = *data++; + if (nfca_poll_iso_dep->rats_res_len > 0) { + memcpy(nfca_poll_iso_dep->rats_res, data, - nfca_poll->rats_res_len); + nfca_poll_iso_dep->rats_res_len); } break; + case NCI_RF_INTERFACE_FRAME: + /* no activation params */ + break; + default: - pr_err("unsupported activation_rf_tech_and_mode 0x%x\n", - ntf->activation_rf_tech_and_mode); + nfc_err("unsupported rf_interface_type 0x%x", + ntf->rf_interface_type); return -EPROTO; } @@ -141,7 +133,7 @@ static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev, } static void nci_target_found(struct nci_dev *ndev, - struct nci_rf_intf_activated_ntf *ntf) + struct nci_rf_activate_ntf *ntf) { struct nfc_target nfc_tgt; @@ -149,121 +141,66 @@ static void nci_target_found(struct nci_dev *ndev, nfc_tgt.supported_protocols = NFC_PROTO_MIFARE_MASK; else if (ntf->rf_protocol == NCI_RF_PROTOCOL_ISO_DEP) /* 4A */ nfc_tgt.supported_protocols = NFC_PROTO_ISO14443_MASK; - else - nfc_tgt.supported_protocols = 0; nfc_tgt.sens_res = ntf->rf_tech_specific_params.nfca_poll.sens_res; nfc_tgt.sel_res = ntf->rf_tech_specific_params.nfca_poll.sel_res; - nfc_tgt.nfcid1_len = ntf->rf_tech_specific_params.nfca_poll.nfcid1_len; - if (nfc_tgt.nfcid1_len > 0) { - memcpy(nfc_tgt.nfcid1, - ntf->rf_tech_specific_params.nfca_poll.nfcid1, - nfc_tgt.nfcid1_len); - } if (!(nfc_tgt.supported_protocols & ndev->poll_prots)) { - pr_debug("the target found does not have the desired protocol\n"); + nfc_dbg("the target found does not have the desired protocol"); return; } - pr_debug("new target found, supported_protocols 0x%x\n", - nfc_tgt.supported_protocols); + nfc_dbg("new target found, supported_protocols 0x%x", + nfc_tgt.supported_protocols); ndev->target_available_prots = nfc_tgt.supported_protocols; - ndev->max_data_pkt_payload_size = ntf->max_data_pkt_payload_size; - ndev->initial_num_credits = ntf->initial_num_credits; - - /* set the available credits to initial value */ - atomic_set(&ndev->credits_cnt, ndev->initial_num_credits); nfc_targets_found(ndev->nfc_dev, &nfc_tgt, 1); } -static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev, - struct sk_buff *skb) +static void nci_rf_activate_ntf_packet(struct nci_dev *ndev, + struct sk_buff *skb) { - struct nci_rf_intf_activated_ntf ntf; + struct nci_rf_activate_ntf ntf; __u8 *data = skb->data; - int err = 0; + int rc = -1; clear_bit(NCI_DISCOVERY, &ndev->flags); set_bit(NCI_POLL_ACTIVE, &ndev->flags); - ntf.rf_discovery_id = *data++; - ntf.rf_interface = *data++; + ntf.target_handle = *data++; ntf.rf_protocol = *data++; - ntf.activation_rf_tech_and_mode = *data++; - ntf.max_data_pkt_payload_size = *data++; - ntf.initial_num_credits = *data++; + ntf.rf_tech_and_mode = *data++; ntf.rf_tech_specific_params_len = *data++; - pr_debug("rf_discovery_id %d\n", ntf.rf_discovery_id); - pr_debug("rf_interface 0x%x\n", ntf.rf_interface); - pr_debug("rf_protocol 0x%x\n", ntf.rf_protocol); - pr_debug("activation_rf_tech_and_mode 0x%x\n", - ntf.activation_rf_tech_and_mode); - pr_debug("max_data_pkt_payload_size 0x%x\n", - ntf.max_data_pkt_payload_size); - pr_debug("initial_num_credits 0x%x\n", ntf.initial_num_credits); - pr_debug("rf_tech_specific_params_len %d\n", - ntf.rf_tech_specific_params_len); - - if (ntf.rf_tech_specific_params_len > 0) { - switch (ntf.activation_rf_tech_and_mode) { - case NCI_NFC_A_PASSIVE_POLL_MODE: - data = nci_extract_rf_params_nfca_passive_poll(ndev, - &ntf, data); - break; - - default: - pr_err("unsupported activation_rf_tech_and_mode 0x%x\n", - ntf.activation_rf_tech_and_mode); - return; - } - } + nfc_dbg("target_handle %d, rf_protocol 0x%x, rf_tech_and_mode 0x%x, rf_tech_specific_params_len %d", + ntf.target_handle, + ntf.rf_protocol, + ntf.rf_tech_and_mode, + ntf.rf_tech_specific_params_len); - ntf.data_exch_rf_tech_and_mode = *data++; - ntf.data_exch_tx_bit_rate = *data++; - ntf.data_exch_rx_bit_rate = *data++; - ntf.activation_params_len = *data++; - - pr_debug("data_exch_rf_tech_and_mode 0x%x\n", - ntf.data_exch_rf_tech_and_mode); - pr_debug("data_exch_tx_bit_rate 0x%x\n", - ntf.data_exch_tx_bit_rate); - pr_debug("data_exch_rx_bit_rate 0x%x\n", - ntf.data_exch_rx_bit_rate); - pr_debug("activation_params_len %d\n", - ntf.activation_params_len); - - if (ntf.activation_params_len > 0) { - switch (ntf.rf_interface) { - case NCI_RF_INTERFACE_ISO_DEP: - err = nci_extract_activation_params_iso_dep(ndev, - &ntf, data); - break; - - case NCI_RF_INTERFACE_FRAME: - /* no activation params */ - break; - - default: - pr_err("unsupported rf_interface 0x%x\n", - ntf.rf_interface); - return; - } + switch (ntf.rf_tech_and_mode) { + case NCI_NFC_A_PASSIVE_POLL_MODE: + rc = nci_rf_activate_nfca_passive_poll(ndev, &ntf, + data); + break; + + default: + nfc_err("unsupported rf_tech_and_mode 0x%x", + ntf.rf_tech_and_mode); + return; } - if (!err) + if (!rc) nci_target_found(ndev, &ntf); } static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb) { - struct nci_rf_deactivate_ntf *ntf = (void *) skb->data; + __u8 type = skb->data[0]; - pr_debug("entry, type 0x%x, reason 0x%x\n", ntf->type, ntf->reason); + nfc_dbg("entry, type 0x%x", type); clear_bit(NCI_POLL_ACTIVE, &ndev->flags); ndev->target_active_prot = 0; @@ -286,11 +223,11 @@ void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb) { __u16 ntf_opcode = nci_opcode(skb->data); - pr_debug("NCI RX: MT=ntf, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n", - nci_pbf(skb->data), - nci_opcode_gid(ntf_opcode), - nci_opcode_oid(ntf_opcode), - nci_plen(skb->data)); + nfc_dbg("NCI RX: MT=ntf, PBF=%d, GID=0x%x, OID=0x%x, plen=%d", + nci_pbf(skb->data), + nci_opcode_gid(ntf_opcode), + nci_opcode_oid(ntf_opcode), + nci_plen(skb->data)); /* strip the nci control header */ skb_pull(skb, NCI_CTRL_HDR_SIZE); @@ -300,12 +237,12 @@ void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb) nci_core_conn_credits_ntf_packet(ndev, skb); break; - case NCI_OP_CORE_INTF_ERROR_NTF: - nci_core_conn_intf_error_ntf_packet(ndev, skb); + case NCI_OP_RF_FIELD_INFO_NTF: + nci_rf_field_info_ntf_packet(ndev, skb); break; - case NCI_OP_RF_INTF_ACTIVATED_NTF: - nci_rf_intf_activated_ntf_packet(ndev, skb); + case NCI_OP_RF_ACTIVATE_NTF: + nci_rf_activate_ntf_packet(ndev, skb); break; case NCI_OP_RF_DEACTIVATE_NTF: @@ -313,7 +250,7 @@ void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb) break; default: - pr_err("unknown ntf opcode 0x%x\n", ntf_opcode); + nfc_err("unknown ntf opcode 0x%x", ntf_opcode); break; } diff --git a/trunk/net/nfc/nci/rsp.c b/trunk/net/nfc/nci/rsp.c index 2840ae2f3615..0403d4cd0917 100644 --- a/trunk/net/nfc/nci/rsp.c +++ b/trunk/net/nfc/nci/rsp.c @@ -25,8 +25,6 @@ * */ -#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ - #include #include #include @@ -42,13 +40,12 @@ static void nci_core_reset_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) { struct nci_core_reset_rsp *rsp = (void *) skb->data; - pr_debug("status 0x%x\n", rsp->status); + nfc_dbg("entry, status 0x%x", rsp->status); - if (rsp->status == NCI_STATUS_OK) { + if (rsp->status == NCI_STATUS_OK) ndev->nci_ver = rsp->nci_ver; - pr_debug("nci_ver 0x%x, config_status 0x%x\n", - rsp->nci_ver, rsp->config_status); - } + + nfc_dbg("nci_ver 0x%x", ndev->nci_ver); nci_req_complete(ndev, rsp->status); } @@ -58,16 +55,16 @@ static void nci_core_init_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) struct nci_core_init_rsp_1 *rsp_1 = (void *) skb->data; struct nci_core_init_rsp_2 *rsp_2; - pr_debug("status 0x%x\n", rsp_1->status); + nfc_dbg("entry, status 0x%x", rsp_1->status); if (rsp_1->status != NCI_STATUS_OK) - goto exit; + return; ndev->nfcc_features = __le32_to_cpu(rsp_1->nfcc_features); ndev->num_supported_rf_interfaces = rsp_1->num_supported_rf_interfaces; if (ndev->num_supported_rf_interfaces > - NCI_MAX_SUPPORTED_RF_INTERFACES) { + NCI_MAX_SUPPORTED_RF_INTERFACES) { ndev->num_supported_rf_interfaces = NCI_MAX_SUPPORTED_RF_INTERFACES; } @@ -76,56 +73,76 @@ static void nci_core_init_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) rsp_1->supported_rf_interfaces, ndev->num_supported_rf_interfaces); - rsp_2 = (void *) (skb->data + 6 + rsp_1->num_supported_rf_interfaces); + rsp_2 = (void *) (skb->data + 6 + ndev->num_supported_rf_interfaces); ndev->max_logical_connections = rsp_2->max_logical_connections; ndev->max_routing_table_size = __le16_to_cpu(rsp_2->max_routing_table_size); - ndev->max_ctrl_pkt_payload_len = - rsp_2->max_ctrl_pkt_payload_len; - ndev->max_size_for_large_params = - __le16_to_cpu(rsp_2->max_size_for_large_params); - ndev->manufact_id = - rsp_2->manufact_id; - ndev->manufact_specific_info = - __le32_to_cpu(rsp_2->manufact_specific_info); - - pr_debug("nfcc_features 0x%x\n", - ndev->nfcc_features); - pr_debug("num_supported_rf_interfaces %d\n", - ndev->num_supported_rf_interfaces); - pr_debug("supported_rf_interfaces[0] 0x%x\n", - ndev->supported_rf_interfaces[0]); - pr_debug("supported_rf_interfaces[1] 0x%x\n", - ndev->supported_rf_interfaces[1]); - pr_debug("supported_rf_interfaces[2] 0x%x\n", - ndev->supported_rf_interfaces[2]); - pr_debug("supported_rf_interfaces[3] 0x%x\n", - ndev->supported_rf_interfaces[3]); - pr_debug("max_logical_connections %d\n", - ndev->max_logical_connections); - pr_debug("max_routing_table_size %d\n", - ndev->max_routing_table_size); - pr_debug("max_ctrl_pkt_payload_len %d\n", - ndev->max_ctrl_pkt_payload_len); - pr_debug("max_size_for_large_params %d\n", - ndev->max_size_for_large_params); - pr_debug("manufact_id 0x%x\n", - ndev->manufact_id); - pr_debug("manufact_specific_info 0x%x\n", - ndev->manufact_specific_info); - -exit: + ndev->max_control_packet_payload_length = + rsp_2->max_control_packet_payload_length; + ndev->rf_sending_buffer_size = + __le16_to_cpu(rsp_2->rf_sending_buffer_size); + ndev->rf_receiving_buffer_size = + __le16_to_cpu(rsp_2->rf_receiving_buffer_size); + ndev->manufacturer_id = + __le16_to_cpu(rsp_2->manufacturer_id); + + nfc_dbg("nfcc_features 0x%x", + ndev->nfcc_features); + nfc_dbg("num_supported_rf_interfaces %d", + ndev->num_supported_rf_interfaces); + nfc_dbg("supported_rf_interfaces[0] 0x%x", + ndev->supported_rf_interfaces[0]); + nfc_dbg("supported_rf_interfaces[1] 0x%x", + ndev->supported_rf_interfaces[1]); + nfc_dbg("supported_rf_interfaces[2] 0x%x", + ndev->supported_rf_interfaces[2]); + nfc_dbg("supported_rf_interfaces[3] 0x%x", + ndev->supported_rf_interfaces[3]); + nfc_dbg("max_logical_connections %d", + ndev->max_logical_connections); + nfc_dbg("max_routing_table_size %d", + ndev->max_routing_table_size); + nfc_dbg("max_control_packet_payload_length %d", + ndev->max_control_packet_payload_length); + nfc_dbg("rf_sending_buffer_size %d", + ndev->rf_sending_buffer_size); + nfc_dbg("rf_receiving_buffer_size %d", + ndev->rf_receiving_buffer_size); + nfc_dbg("manufacturer_id 0x%x", + ndev->manufacturer_id); + nci_req_complete(ndev, rsp_1->status); } +static void nci_core_conn_create_rsp_packet(struct nci_dev *ndev, + struct sk_buff *skb) +{ + struct nci_core_conn_create_rsp *rsp = (void *) skb->data; + + nfc_dbg("entry, status 0x%x", rsp->status); + + if (rsp->status != NCI_STATUS_OK) + return; + + ndev->max_pkt_payload_size = rsp->max_pkt_payload_size; + ndev->initial_num_credits = rsp->initial_num_credits; + ndev->conn_id = rsp->conn_id; + + atomic_set(&ndev->credits_cnt, ndev->initial_num_credits); + + nfc_dbg("max_pkt_payload_size %d", ndev->max_pkt_payload_size); + nfc_dbg("initial_num_credits %d", ndev->initial_num_credits); + nfc_dbg("conn_id %d", ndev->conn_id); +} + static void nci_rf_disc_map_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) { __u8 status = skb->data[0]; - pr_debug("status 0x%x\n", status); + nfc_dbg("entry, status 0x%x", status); nci_req_complete(ndev, status); } @@ -134,7 +151,7 @@ static void nci_rf_disc_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) { __u8 status = skb->data[0]; - pr_debug("status 0x%x\n", status); + nfc_dbg("entry, status 0x%x", status); if (status == NCI_STATUS_OK) set_bit(NCI_DISCOVERY, &ndev->flags); @@ -147,7 +164,7 @@ static void nci_rf_deactivate_rsp_packet(struct nci_dev *ndev, { __u8 status = skb->data[0]; - pr_debug("status 0x%x\n", status); + nfc_dbg("entry, status 0x%x", status); clear_bit(NCI_DISCOVERY, &ndev->flags); @@ -161,11 +178,11 @@ void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) /* we got a rsp, stop the cmd timer */ del_timer(&ndev->cmd_timer); - pr_debug("NCI RX: MT=rsp, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n", - nci_pbf(skb->data), - nci_opcode_gid(rsp_opcode), - nci_opcode_oid(rsp_opcode), - nci_plen(skb->data)); + nfc_dbg("NCI RX: MT=rsp, PBF=%d, GID=0x%x, OID=0x%x, plen=%d", + nci_pbf(skb->data), + nci_opcode_gid(rsp_opcode), + nci_opcode_oid(rsp_opcode), + nci_plen(skb->data)); /* strip the nci control header */ skb_pull(skb, NCI_CTRL_HDR_SIZE); @@ -179,6 +196,10 @@ void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) nci_core_init_rsp_packet(ndev, skb); break; + case NCI_OP_CORE_CONN_CREATE_RSP: + nci_core_conn_create_rsp_packet(ndev, skb); + break; + case NCI_OP_RF_DISCOVER_MAP_RSP: nci_rf_disc_map_rsp_packet(ndev, skb); break; @@ -192,7 +213,7 @@ void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) break; default: - pr_err("unknown rsp opcode 0x%x\n", rsp_opcode); + nfc_err("unknown rsp opcode 0x%x", rsp_opcode); break; } diff --git a/trunk/net/nfc/netlink.c b/trunk/net/nfc/netlink.c index 6989dfa28ee2..03f8818e1f16 100644 --- a/trunk/net/nfc/netlink.c +++ b/trunk/net/nfc/netlink.c @@ -21,8 +21,6 @@ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ -#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ - #include #include #include @@ -46,8 +44,6 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = { [NFC_ATTR_DEVICE_NAME] = { .type = NLA_STRING, .len = NFC_DEVICE_NAME_MAXSIZE }, [NFC_ATTR_PROTOCOLS] = { .type = NLA_U32 }, - [NFC_ATTR_COMM_MODE] = { .type = NLA_U8 }, - [NFC_ATTR_RF_MODE] = { .type = NLA_U8 }, }; static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target, @@ -55,6 +51,8 @@ static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target, { void *hdr; + nfc_dbg("entry"); + hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, &nfc_genl_family, flags, NFC_CMD_GET_TARGET); if (!hdr) @@ -67,9 +65,6 @@ static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target, target->supported_protocols); NLA_PUT_U16(msg, NFC_ATTR_TARGET_SENS_RES, target->sens_res); NLA_PUT_U8(msg, NFC_ATTR_TARGET_SEL_RES, target->sel_res); - if (target->nfcid1_len > 0) - NLA_PUT(msg, NFC_ATTR_TARGET_NFCID1, target->nfcid1_len, - target->nfcid1); return genlmsg_end(msg, hdr); @@ -110,6 +105,8 @@ static int nfc_genl_dump_targets(struct sk_buff *skb, struct nfc_dev *dev = (struct nfc_dev *) cb->args[1]; int rc; + nfc_dbg("entry"); + if (!dev) { dev = __get_device_from_cb(cb); if (IS_ERR(dev)) @@ -142,6 +139,8 @@ static int nfc_genl_dump_targets_done(struct netlink_callback *cb) { struct nfc_dev *dev = (struct nfc_dev *) cb->args[1]; + nfc_dbg("entry"); + if (dev) nfc_put_device(dev); @@ -153,6 +152,8 @@ int nfc_genl_targets_found(struct nfc_dev *dev) struct sk_buff *msg; void *hdr; + nfc_dbg("entry"); + dev->genl_data.poll_req_pid = 0; msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); @@ -182,6 +183,8 @@ int nfc_genl_device_added(struct nfc_dev *dev) struct sk_buff *msg; void *hdr; + nfc_dbg("entry"); + msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (!msg) return -ENOMEM; @@ -213,6 +216,8 @@ int nfc_genl_device_removed(struct nfc_dev *dev) struct sk_buff *msg; void *hdr; + nfc_dbg("entry"); + msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (!msg) return -ENOMEM; @@ -244,6 +249,8 @@ static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev, { void *hdr; + nfc_dbg("entry"); + hdr = genlmsg_put(msg, pid, seq, &nfc_genl_family, flags, NFC_CMD_GET_DEVICE); if (!hdr) @@ -270,6 +277,8 @@ static int nfc_genl_dump_devices(struct sk_buff *skb, struct nfc_dev *dev = (struct nfc_dev *) cb->args[1]; bool first_call = false; + nfc_dbg("entry"); + if (!iter) { first_call = true; iter = kmalloc(sizeof(struct class_dev_iter), GFP_KERNEL); @@ -310,81 +319,14 @@ static int nfc_genl_dump_devices_done(struct netlink_callback *cb) { struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0]; + nfc_dbg("entry"); + nfc_device_iter_exit(iter); kfree(iter); return 0; } -int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx, - u8 comm_mode, u8 rf_mode) -{ - struct sk_buff *msg; - void *hdr; - - pr_debug("DEP link is up\n"); - - msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); - if (!msg) - return -ENOMEM; - - hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, - NFC_CMD_DEP_LINK_UP); - if (!hdr) - goto free_msg; - - NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx); - if (rf_mode == NFC_RF_INITIATOR) - NLA_PUT_U32(msg, NFC_ATTR_TARGET_INDEX, target_idx); - NLA_PUT_U8(msg, NFC_ATTR_COMM_MODE, comm_mode); - NLA_PUT_U8(msg, NFC_ATTR_RF_MODE, rf_mode); - - genlmsg_end(msg, hdr); - - dev->dep_link_up = true; - - genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_ATOMIC); - - return 0; - -nla_put_failure: - genlmsg_cancel(msg, hdr); -free_msg: - nlmsg_free(msg); - return -EMSGSIZE; -} - -int nfc_genl_dep_link_down_event(struct nfc_dev *dev) -{ - struct sk_buff *msg; - void *hdr; - - pr_debug("DEP link is down\n"); - - msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); - if (!msg) - return -ENOMEM; - - hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, - NFC_CMD_DEP_LINK_DOWN); - if (!hdr) - goto free_msg; - - NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx); - - genlmsg_end(msg, hdr); - - genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_ATOMIC); - - return 0; - -nla_put_failure: - genlmsg_cancel(msg, hdr); -free_msg: - nlmsg_free(msg); - return -EMSGSIZE; -} - static int nfc_genl_get_device(struct sk_buff *skb, struct genl_info *info) { struct sk_buff *msg; @@ -392,6 +334,8 @@ static int nfc_genl_get_device(struct sk_buff *skb, struct genl_info *info) u32 idx; int rc = -ENOBUFS; + nfc_dbg("entry"); + if (!info->attrs[NFC_ATTR_DEVICE_INDEX]) return -EINVAL; @@ -429,6 +373,8 @@ static int nfc_genl_dev_up(struct sk_buff *skb, struct genl_info *info) int rc; u32 idx; + nfc_dbg("entry"); + if (!info->attrs[NFC_ATTR_DEVICE_INDEX]) return -EINVAL; @@ -450,6 +396,8 @@ static int nfc_genl_dev_down(struct sk_buff *skb, struct genl_info *info) int rc; u32 idx; + nfc_dbg("entry"); + if (!info->attrs[NFC_ATTR_DEVICE_INDEX]) return -EINVAL; @@ -472,7 +420,7 @@ static int nfc_genl_start_poll(struct sk_buff *skb, struct genl_info *info) u32 idx; u32 protocols; - pr_debug("Poll start\n"); + nfc_dbg("entry"); if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || !info->attrs[NFC_ATTR_PROTOCOLS]) @@ -503,6 +451,8 @@ static int nfc_genl_stop_poll(struct sk_buff *skb, struct genl_info *info) int rc; u32 idx; + nfc_dbg("entry"); + if (!info->attrs[NFC_ATTR_DEVICE_INDEX]) return -EINVAL; @@ -528,67 +478,6 @@ static int nfc_genl_stop_poll(struct sk_buff *skb, struct genl_info *info) return rc; } -static int nfc_genl_dep_link_up(struct sk_buff *skb, struct genl_info *info) -{ - struct nfc_dev *dev; - int rc, tgt_idx; - u32 idx; - u8 comm, rf; - - pr_debug("DEP link up\n"); - - if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || - !info->attrs[NFC_ATTR_COMM_MODE] || - !info->attrs[NFC_ATTR_RF_MODE]) - return -EINVAL; - - idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); - if (!info->attrs[NFC_ATTR_TARGET_INDEX]) - tgt_idx = NFC_TARGET_IDX_ANY; - else - tgt_idx = nla_get_u32(info->attrs[NFC_ATTR_TARGET_INDEX]); - - comm = nla_get_u8(info->attrs[NFC_ATTR_COMM_MODE]); - rf = nla_get_u8(info->attrs[NFC_ATTR_RF_MODE]); - - if (comm != NFC_COMM_ACTIVE && comm != NFC_COMM_PASSIVE) - return -EINVAL; - - if (rf != NFC_RF_INITIATOR && comm != NFC_RF_TARGET) - return -EINVAL; - - dev = nfc_get_device(idx); - if (!dev) - return -ENODEV; - - rc = nfc_dep_link_up(dev, tgt_idx, comm, rf); - - nfc_put_device(dev); - - return rc; -} - -static int nfc_genl_dep_link_down(struct sk_buff *skb, struct genl_info *info) -{ - struct nfc_dev *dev; - int rc; - u32 idx; - - if (!info->attrs[NFC_ATTR_DEVICE_INDEX]) - return -EINVAL; - - idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); - - dev = nfc_get_device(idx); - if (!dev) - return -ENODEV; - - rc = nfc_dep_link_down(dev); - - nfc_put_device(dev); - return rc; -} - static struct genl_ops nfc_genl_ops[] = { { .cmd = NFC_CMD_GET_DEVICE, @@ -617,16 +506,6 @@ static struct genl_ops nfc_genl_ops[] = { .doit = nfc_genl_stop_poll, .policy = nfc_genl_policy, }, - { - .cmd = NFC_CMD_DEP_LINK_UP, - .doit = nfc_genl_dep_link_up, - .policy = nfc_genl_policy, - }, - { - .cmd = NFC_CMD_DEP_LINK_DOWN, - .doit = nfc_genl_dep_link_down, - .policy = nfc_genl_policy, - }, { .cmd = NFC_CMD_GET_TARGET, .dumpit = nfc_genl_dump_targets, @@ -645,16 +524,18 @@ static int nfc_genl_rcv_nl_event(struct notifier_block *this, if (event != NETLINK_URELEASE || n->protocol != NETLINK_GENERIC) goto out; - pr_debug("NETLINK_URELEASE event from id %d\n", n->pid); + nfc_dbg("NETLINK_URELEASE event from id %d", n->pid); nfc_device_iter_init(&iter); dev = nfc_device_iter_next(&iter); while (dev) { + mutex_lock(&dev->genl_data.genl_data_mutex); if (dev->genl_data.poll_req_pid == n->pid) { nfc_stop_poll(dev); dev->genl_data.poll_req_pid = 0; } + mutex_unlock(&dev->genl_data.genl_data_mutex); dev = nfc_device_iter_next(&iter); } diff --git a/trunk/net/nfc/nfc.h b/trunk/net/nfc/nfc.h index 6d28d75995b0..d86583f4831d 100644 --- a/trunk/net/nfc/nfc.h +++ b/trunk/net/nfc/nfc.h @@ -27,6 +27,13 @@ #include #include +__printf(2, 3) +int nfc_printk(const char *level, const char *fmt, ...); + +#define nfc_info(fmt, arg...) nfc_printk(KERN_INFO, fmt, ##arg) +#define nfc_err(fmt, arg...) nfc_printk(KERN_ERR, fmt, ##arg) +#define nfc_dbg(fmt, arg...) pr_debug(fmt "\n", ##arg) + struct nfc_protocol { int id; struct proto *proto; @@ -46,60 +53,6 @@ struct nfc_rawsock { #define to_rawsock_sk(_tx_work) \ ((struct sock *) container_of(_tx_work, struct nfc_rawsock, tx_work)) -#ifdef CONFIG_NFC_LLCP - -void nfc_llcp_mac_is_down(struct nfc_dev *dev); -void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx, - u8 comm_mode, u8 rf_mode); -int nfc_llcp_register_device(struct nfc_dev *dev); -void nfc_llcp_unregister_device(struct nfc_dev *dev); -int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len); -u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, u8 *general_bytes_len); -int __init nfc_llcp_init(void); -void nfc_llcp_exit(void); - -#else - -static inline void nfc_llcp_mac_is_down(struct nfc_dev *dev) -{ -} - -static inline void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx, - u8 comm_mode, u8 rf_mode) -{ -} - -static inline int nfc_llcp_register_device(struct nfc_dev *dev) -{ - return 0; -} - -static inline void nfc_llcp_unregister_device(struct nfc_dev *dev) -{ -} - -static inline int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len) -{ - return 0; -} - -static inline u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, u8 *gb_len) -{ - *gb_len = 0; - return NULL; -} - -static inline int nfc_llcp_init(void) -{ - return 0; -} - -static inline void nfc_llcp_exit(void) -{ -} - -#endif - int __init rawsock_init(void); void rawsock_exit(void); @@ -122,10 +75,6 @@ int nfc_genl_targets_found(struct nfc_dev *dev); int nfc_genl_device_added(struct nfc_dev *dev); int nfc_genl_device_removed(struct nfc_dev *dev); -int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx, - u8 comm_mode, u8 rf_mode); -int nfc_genl_dep_link_down_event(struct nfc_dev *dev); - struct nfc_dev *nfc_get_device(unsigned idx); static inline void nfc_put_device(struct nfc_dev *dev) @@ -160,11 +109,6 @@ int nfc_start_poll(struct nfc_dev *dev, u32 protocols); int nfc_stop_poll(struct nfc_dev *dev); -int nfc_dep_link_up(struct nfc_dev *dev, int target_idx, - u8 comm_mode, u8 rf_mode); - -int nfc_dep_link_down(struct nfc_dev *dev); - int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol); int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx); diff --git a/trunk/net/nfc/rawsock.c b/trunk/net/nfc/rawsock.c index 2e2f8c6a61fe..ee7b2b365ef2 100644 --- a/trunk/net/nfc/rawsock.c +++ b/trunk/net/nfc/rawsock.c @@ -21,8 +21,6 @@ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ -#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ - #include #include #include @@ -31,7 +29,7 @@ static void rawsock_write_queue_purge(struct sock *sk) { - pr_debug("sk=%p\n", sk); + nfc_dbg("sk=%p", sk); spin_lock_bh(&sk->sk_write_queue.lock); __skb_queue_purge(&sk->sk_write_queue); @@ -41,7 +39,7 @@ static void rawsock_write_queue_purge(struct sock *sk) static void rawsock_report_error(struct sock *sk, int err) { - pr_debug("sk=%p err=%d\n", sk, err); + nfc_dbg("sk=%p err=%d", sk, err); sk->sk_shutdown = SHUTDOWN_MASK; sk->sk_err = -err; @@ -54,7 +52,7 @@ static int rawsock_release(struct socket *sock) { struct sock *sk = sock->sk; - pr_debug("sock=%p\n", sock); + nfc_dbg("sock=%p", sock); sock_orphan(sk); sock_put(sk); @@ -70,14 +68,14 @@ static int rawsock_connect(struct socket *sock, struct sockaddr *_addr, struct nfc_dev *dev; int rc = 0; - pr_debug("sock=%p sk=%p flags=%d\n", sock, sk, flags); + nfc_dbg("sock=%p sk=%p flags=%d", sock, sk, flags); if (!addr || len < sizeof(struct sockaddr_nfc) || addr->sa_family != AF_NFC) return -EINVAL; - pr_debug("addr dev_idx=%u target_idx=%u protocol=%u\n", - addr->dev_idx, addr->target_idx, addr->nfc_protocol); + nfc_dbg("addr dev_idx=%u target_idx=%u protocol=%u", addr->dev_idx, + addr->target_idx, addr->nfc_protocol); lock_sock(sk); @@ -138,7 +136,7 @@ static void rawsock_data_exchange_complete(void *context, struct sk_buff *skb, BUG_ON(in_irq()); - pr_debug("sk=%p err=%d\n", sk, err); + nfc_dbg("sk=%p err=%d", sk, err); if (err) goto error; @@ -174,7 +172,7 @@ static void rawsock_tx_work(struct work_struct *work) struct sk_buff *skb; int rc; - pr_debug("sk=%p target_idx=%u\n", sk, target_idx); + nfc_dbg("sk=%p target_idx=%u", sk, target_idx); if (sk->sk_shutdown & SEND_SHUTDOWN) { rawsock_write_queue_purge(sk); @@ -200,7 +198,7 @@ static int rawsock_sendmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb; int rc; - pr_debug("sock=%p sk=%p len=%zu\n", sock, sk, len); + nfc_dbg("sock=%p sk=%p len=%zu", sock, sk, len); if (msg->msg_namelen) return -EOPNOTSUPP; @@ -208,10 +206,13 @@ static int rawsock_sendmsg(struct kiocb *iocb, struct socket *sock, if (sock->state != SS_CONNECTED) return -ENOTCONN; - skb = nfc_alloc_send_skb(dev, sk, msg->msg_flags, len, &rc); - if (skb == NULL) + skb = sock_alloc_send_skb(sk, len + dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE, + msg->msg_flags & MSG_DONTWAIT, &rc); + if (!skb) return rc; + skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE); + rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); if (rc < 0) { kfree_skb(skb); @@ -238,7 +239,7 @@ static int rawsock_recvmsg(struct kiocb *iocb, struct socket *sock, int copied; int rc; - pr_debug("sock=%p sk=%p len=%zu flags=%d\n", sock, sk, len, flags); + nfc_dbg("sock=%p sk=%p len=%zu flags=%d", sock, sk, len, flags); skb = skb_recv_datagram(sk, flags, noblock, &rc); if (!skb) @@ -282,7 +283,7 @@ static const struct proto_ops rawsock_ops = { static void rawsock_destruct(struct sock *sk) { - pr_debug("sk=%p\n", sk); + nfc_dbg("sk=%p", sk); if (sk->sk_state == TCP_ESTABLISHED) { nfc_deactivate_target(nfc_rawsock(sk)->dev, @@ -293,7 +294,7 @@ static void rawsock_destruct(struct sock *sk) skb_queue_purge(&sk->sk_receive_queue); if (!sock_flag(sk, SOCK_DEAD)) { - pr_err("Freeing alive NFC raw socket %p\n", sk); + nfc_err("Freeing alive NFC raw socket %p", sk); return; } } @@ -303,14 +304,14 @@ static int rawsock_create(struct net *net, struct socket *sock, { struct sock *sk; - pr_debug("sock=%p\n", sock); + nfc_dbg("sock=%p", sock); if (sock->type != SOCK_SEQPACKET) return -ESOCKTNOSUPPORT; sock->ops = &rawsock_ops; - sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto); + sk = sk_alloc(net, PF_NFC, GFP_KERNEL, nfc_proto->proto); if (!sk) return -ENOMEM; diff --git a/trunk/net/openvswitch/Kconfig b/trunk/net/openvswitch/Kconfig deleted file mode 100644 index d9ea33c361be..000000000000 --- a/trunk/net/openvswitch/Kconfig +++ /dev/null @@ -1,28 +0,0 @@ -# -# Open vSwitch -# - -config OPENVSWITCH - tristate "Open vSwitch" - ---help--- - Open vSwitch is a multilayer Ethernet switch targeted at virtualized - environments. In addition to supporting a variety of features - expected in a traditional hardware switch, it enables fine-grained - programmatic extension and flow-based control of the network. This - control is useful in a wide variety of applications but is - particularly important in multi-server virtualization deployments, - which are often characterized by highly dynamic endpoints and the - need to maintain logical abstractions for multiple tenants. - - The Open vSwitch datapath provides an in-kernel fast path for packet - forwarding. It is complemented by a userspace daemon, ovs-vswitchd, - which is able to accept configuration from a variety of sources and - translate it into packet processing rules. - - See http://openvswitch.org for more information and userspace - utilities. - - To compile this code as a module, choose M here: the module will be - called openvswitch. - - If unsure, say N. diff --git a/trunk/net/openvswitch/Makefile b/trunk/net/openvswitch/Makefile deleted file mode 100644 index 15e7384745c1..000000000000 --- a/trunk/net/openvswitch/Makefile +++ /dev/null @@ -1,14 +0,0 @@ -# -# Makefile for Open vSwitch. -# - -obj-$(CONFIG_OPENVSWITCH) += openvswitch.o - -openvswitch-y := \ - actions.o \ - datapath.o \ - dp_notify.o \ - flow.o \ - vport.o \ - vport-internal_dev.o \ - vport-netdev.o \ diff --git a/trunk/net/openvswitch/actions.c b/trunk/net/openvswitch/actions.c deleted file mode 100644 index 2725d1bdf291..000000000000 --- a/trunk/net/openvswitch/actions.c +++ /dev/null @@ -1,415 +0,0 @@ -/* - * Copyright (c) 2007-2011 Nicira Networks. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "datapath.h" -#include "vport.h" - -static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, - const struct nlattr *attr, int len, bool keep_skb); - -static int make_writable(struct sk_buff *skb, int write_len) -{ - if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) - return 0; - - return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); -} - -/* remove VLAN header from packet and update csum accrodingly. */ -static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci) -{ - struct vlan_hdr *vhdr; - int err; - - err = make_writable(skb, VLAN_ETH_HLEN); - if (unlikely(err)) - return err; - - if (skb->ip_summed == CHECKSUM_COMPLETE) - skb->csum = csum_sub(skb->csum, csum_partial(skb->data - + ETH_HLEN, VLAN_HLEN, 0)); - - vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); - *current_tci = vhdr->h_vlan_TCI; - - memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); - __skb_pull(skb, VLAN_HLEN); - - vlan_set_encap_proto(skb, vhdr); - skb->mac_header += VLAN_HLEN; - skb_reset_mac_len(skb); - - return 0; -} - -static int pop_vlan(struct sk_buff *skb) -{ - __be16 tci; - int err; - - if (likely(vlan_tx_tag_present(skb))) { - skb->vlan_tci = 0; - } else { - if (unlikely(skb->protocol != htons(ETH_P_8021Q) || - skb->len < VLAN_ETH_HLEN)) - return 0; - - err = __pop_vlan_tci(skb, &tci); - if (err) - return err; - } - /* move next vlan tag to hw accel tag */ - if (likely(skb->protocol != htons(ETH_P_8021Q) || - skb->len < VLAN_ETH_HLEN)) - return 0; - - err = __pop_vlan_tci(skb, &tci); - if (unlikely(err)) - return err; - - __vlan_hwaccel_put_tag(skb, ntohs(tci)); - return 0; -} - -static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vlan) -{ - if (unlikely(vlan_tx_tag_present(skb))) { - u16 current_tag; - - /* push down current VLAN tag */ - current_tag = vlan_tx_tag_get(skb); - - if (!__vlan_put_tag(skb, current_tag)) - return -ENOMEM; - - if (skb->ip_summed == CHECKSUM_COMPLETE) - skb->csum = csum_add(skb->csum, csum_partial(skb->data - + ETH_HLEN, VLAN_HLEN, 0)); - - } - __vlan_hwaccel_put_tag(skb, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT); - return 0; -} - -static int set_eth_addr(struct sk_buff *skb, - const struct ovs_key_ethernet *eth_key) -{ - int err; - err = make_writable(skb, ETH_HLEN); - if (unlikely(err)) - return err; - - memcpy(eth_hdr(skb)->h_source, eth_key->eth_src, ETH_ALEN); - memcpy(eth_hdr(skb)->h_dest, eth_key->eth_dst, ETH_ALEN); - - return 0; -} - -static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh, - __be32 *addr, __be32 new_addr) -{ - int transport_len = skb->len - skb_transport_offset(skb); - - if (nh->protocol == IPPROTO_TCP) { - if (likely(transport_len >= sizeof(struct tcphdr))) - inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb, - *addr, new_addr, 1); - } else if (nh->protocol == IPPROTO_UDP) { - if (likely(transport_len >= sizeof(struct udphdr))) - inet_proto_csum_replace4(&udp_hdr(skb)->check, skb, - *addr, new_addr, 1); - } - - csum_replace4(&nh->check, *addr, new_addr); - skb->rxhash = 0; - *addr = new_addr; -} - -static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl) -{ - csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8)); - nh->ttl = new_ttl; -} - -static int set_ipv4(struct sk_buff *skb, const struct ovs_key_ipv4 *ipv4_key) -{ - struct iphdr *nh; - int err; - - err = make_writable(skb, skb_network_offset(skb) + - sizeof(struct iphdr)); - if (unlikely(err)) - return err; - - nh = ip_hdr(skb); - - if (ipv4_key->ipv4_src != nh->saddr) - set_ip_addr(skb, nh, &nh->saddr, ipv4_key->ipv4_src); - - if (ipv4_key->ipv4_dst != nh->daddr) - set_ip_addr(skb, nh, &nh->daddr, ipv4_key->ipv4_dst); - - if (ipv4_key->ipv4_tos != nh->tos) - ipv4_change_dsfield(nh, 0, ipv4_key->ipv4_tos); - - if (ipv4_key->ipv4_ttl != nh->ttl) - set_ip_ttl(skb, nh, ipv4_key->ipv4_ttl); - - return 0; -} - -/* Must follow make_writable() since that can move the skb data. */ -static void set_tp_port(struct sk_buff *skb, __be16 *port, - __be16 new_port, __sum16 *check) -{ - inet_proto_csum_replace2(check, skb, *port, new_port, 0); - *port = new_port; - skb->rxhash = 0; -} - -static int set_udp_port(struct sk_buff *skb, - const struct ovs_key_udp *udp_port_key) -{ - struct udphdr *uh; - int err; - - err = make_writable(skb, skb_transport_offset(skb) + - sizeof(struct udphdr)); - if (unlikely(err)) - return err; - - uh = udp_hdr(skb); - if (udp_port_key->udp_src != uh->source) - set_tp_port(skb, &uh->source, udp_port_key->udp_src, &uh->check); - - if (udp_port_key->udp_dst != uh->dest) - set_tp_port(skb, &uh->dest, udp_port_key->udp_dst, &uh->check); - - return 0; -} - -static int set_tcp_port(struct sk_buff *skb, - const struct ovs_key_tcp *tcp_port_key) -{ - struct tcphdr *th; - int err; - - err = make_writable(skb, skb_transport_offset(skb) + - sizeof(struct tcphdr)); - if (unlikely(err)) - return err; - - th = tcp_hdr(skb); - if (tcp_port_key->tcp_src != th->source) - set_tp_port(skb, &th->source, tcp_port_key->tcp_src, &th->check); - - if (tcp_port_key->tcp_dst != th->dest) - set_tp_port(skb, &th->dest, tcp_port_key->tcp_dst, &th->check); - - return 0; -} - -static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port) -{ - struct vport *vport; - - if (unlikely(!skb)) - return -ENOMEM; - - vport = rcu_dereference(dp->ports[out_port]); - if (unlikely(!vport)) { - kfree_skb(skb); - return -ENODEV; - } - - ovs_vport_send(vport, skb); - return 0; -} - -static int output_userspace(struct datapath *dp, struct sk_buff *skb, - const struct nlattr *attr) -{ - struct dp_upcall_info upcall; - const struct nlattr *a; - int rem; - - upcall.cmd = OVS_PACKET_CMD_ACTION; - upcall.key = &OVS_CB(skb)->flow->key; - upcall.userdata = NULL; - upcall.pid = 0; - - for (a = nla_data(attr), rem = nla_len(attr); rem > 0; - a = nla_next(a, &rem)) { - switch (nla_type(a)) { - case OVS_USERSPACE_ATTR_USERDATA: - upcall.userdata = a; - break; - - case OVS_USERSPACE_ATTR_PID: - upcall.pid = nla_get_u32(a); - break; - } - } - - return ovs_dp_upcall(dp, skb, &upcall); -} - -static int sample(struct datapath *dp, struct sk_buff *skb, - const struct nlattr *attr) -{ - const struct nlattr *acts_list = NULL; - const struct nlattr *a; - int rem; - - for (a = nla_data(attr), rem = nla_len(attr); rem > 0; - a = nla_next(a, &rem)) { - switch (nla_type(a)) { - case OVS_SAMPLE_ATTR_PROBABILITY: - if (net_random() >= nla_get_u32(a)) - return 0; - break; - - case OVS_SAMPLE_ATTR_ACTIONS: - acts_list = a; - break; - } - } - - return do_execute_actions(dp, skb, nla_data(acts_list), - nla_len(acts_list), true); -} - -static int execute_set_action(struct sk_buff *skb, - const struct nlattr *nested_attr) -{ - int err = 0; - - switch (nla_type(nested_attr)) { - case OVS_KEY_ATTR_PRIORITY: - skb->priority = nla_get_u32(nested_attr); - break; - - case OVS_KEY_ATTR_ETHERNET: - err = set_eth_addr(skb, nla_data(nested_attr)); - break; - - case OVS_KEY_ATTR_IPV4: - err = set_ipv4(skb, nla_data(nested_attr)); - break; - - case OVS_KEY_ATTR_TCP: - err = set_tcp_port(skb, nla_data(nested_attr)); - break; - - case OVS_KEY_ATTR_UDP: - err = set_udp_port(skb, nla_data(nested_attr)); - break; - } - - return err; -} - -/* Execute a list of actions against 'skb'. */ -static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, - const struct nlattr *attr, int len, bool keep_skb) -{ - /* Every output action needs a separate clone of 'skb', but the common - * case is just a single output action, so that doing a clone and - * then freeing the original skbuff is wasteful. So the following code - * is slightly obscure just to avoid that. */ - int prev_port = -1; - const struct nlattr *a; - int rem; - - for (a = attr, rem = len; rem > 0; - a = nla_next(a, &rem)) { - int err = 0; - - if (prev_port != -1) { - do_output(dp, skb_clone(skb, GFP_ATOMIC), prev_port); - prev_port = -1; - } - - switch (nla_type(a)) { - case OVS_ACTION_ATTR_OUTPUT: - prev_port = nla_get_u32(a); - break; - - case OVS_ACTION_ATTR_USERSPACE: - output_userspace(dp, skb, a); - break; - - case OVS_ACTION_ATTR_PUSH_VLAN: - err = push_vlan(skb, nla_data(a)); - if (unlikely(err)) /* skb already freed. */ - return err; - break; - - case OVS_ACTION_ATTR_POP_VLAN: - err = pop_vlan(skb); - break; - - case OVS_ACTION_ATTR_SET: - err = execute_set_action(skb, nla_data(a)); - break; - - case OVS_ACTION_ATTR_SAMPLE: - err = sample(dp, skb, a); - break; - } - - if (unlikely(err)) { - kfree_skb(skb); - return err; - } - } - - if (prev_port != -1) { - if (keep_skb) - skb = skb_clone(skb, GFP_ATOMIC); - - do_output(dp, skb, prev_port); - } else if (!keep_skb) - consume_skb(skb); - - return 0; -} - -/* Execute a list of actions against 'skb'. */ -int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb) -{ - struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts); - - return do_execute_actions(dp, skb, acts->actions, - acts->actions_len, false); -} diff --git a/trunk/net/openvswitch/datapath.c b/trunk/net/openvswitch/datapath.c deleted file mode 100644 index 9a2725114e99..000000000000 --- a/trunk/net/openvswitch/datapath.c +++ /dev/null @@ -1,1912 +0,0 @@ -/* - * Copyright (c) 2007-2011 Nicira Networks. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "datapath.h" -#include "flow.h" -#include "vport-internal_dev.h" - -/** - * DOC: Locking: - * - * Writes to device state (add/remove datapath, port, set operations on vports, - * etc.) are protected by RTNL. - * - * Writes to other state (flow table modifications, set miscellaneous datapath - * parameters, etc.) are protected by genl_mutex. The RTNL lock nests inside - * genl_mutex. - * - * Reads are protected by RCU. - * - * There are a few special cases (mostly stats) that have their own - * synchronization but they nest under all of above and don't interact with - * each other. - */ - -/* Global list of datapaths to enable dumping them all out. - * Protected by genl_mutex. - */ -static LIST_HEAD(dps); - -#define REHASH_FLOW_INTERVAL (10 * 60 * HZ) -static void rehash_flow_table(struct work_struct *work); -static DECLARE_DELAYED_WORK(rehash_flow_wq, rehash_flow_table); - -static struct vport *new_vport(const struct vport_parms *); -static int queue_gso_packets(int dp_ifindex, struct sk_buff *, - const struct dp_upcall_info *); -static int queue_userspace_packet(int dp_ifindex, struct sk_buff *, - const struct dp_upcall_info *); - -/* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */ -static struct datapath *get_dp(int dp_ifindex) -{ - struct datapath *dp = NULL; - struct net_device *dev; - - rcu_read_lock(); - dev = dev_get_by_index_rcu(&init_net, dp_ifindex); - if (dev) { - struct vport *vport = ovs_internal_dev_get_vport(dev); - if (vport) - dp = vport->dp; - } - rcu_read_unlock(); - - return dp; -} - -/* Must be called with rcu_read_lock or RTNL lock. */ -const char *ovs_dp_name(const struct datapath *dp) -{ - struct vport *vport = rcu_dereference_rtnl(dp->ports[OVSP_LOCAL]); - return vport->ops->get_name(vport); -} - -static int get_dpifindex(struct datapath *dp) -{ - struct vport *local; - int ifindex; - - rcu_read_lock(); - - local = rcu_dereference(dp->ports[OVSP_LOCAL]); - if (local) - ifindex = local->ops->get_ifindex(local); - else - ifindex = 0; - - rcu_read_unlock(); - - return ifindex; -} - -static void destroy_dp_rcu(struct rcu_head *rcu) -{ - struct datapath *dp = container_of(rcu, struct datapath, rcu); - - ovs_flow_tbl_destroy((__force struct flow_table *)dp->table); - free_percpu(dp->stats_percpu); - kfree(dp); -} - -/* Called with RTNL lock and genl_lock. */ -static struct vport *new_vport(const struct vport_parms *parms) -{ - struct vport *vport; - - vport = ovs_vport_add(parms); - if (!IS_ERR(vport)) { - struct datapath *dp = parms->dp; - - rcu_assign_pointer(dp->ports[parms->port_no], vport); - list_add(&vport->node, &dp->port_list); - } - - return vport; -} - -/* Called with RTNL lock. */ -void ovs_dp_detach_port(struct vport *p) -{ - ASSERT_RTNL(); - - /* First drop references to device. */ - list_del(&p->node); - rcu_assign_pointer(p->dp->ports[p->port_no], NULL); - - /* Then destroy it. */ - ovs_vport_del(p); -} - -/* Must be called with rcu_read_lock. */ -void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb) -{ - struct datapath *dp = p->dp; - struct sw_flow *flow; - struct dp_stats_percpu *stats; - struct sw_flow_key key; - u64 *stats_counter; - int error; - int key_len; - - stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id()); - - /* Extract flow from 'skb' into 'key'. */ - error = ovs_flow_extract(skb, p->port_no, &key, &key_len); - if (unlikely(error)) { - kfree_skb(skb); - return; - } - - /* Look up flow. */ - flow = ovs_flow_tbl_lookup(rcu_dereference(dp->table), &key, key_len); - if (unlikely(!flow)) { - struct dp_upcall_info upcall; - - upcall.cmd = OVS_PACKET_CMD_MISS; - upcall.key = &key; - upcall.userdata = NULL; - upcall.pid = p->upcall_pid; - ovs_dp_upcall(dp, skb, &upcall); - consume_skb(skb); - stats_counter = &stats->n_missed; - goto out; - } - - OVS_CB(skb)->flow = flow; - - stats_counter = &stats->n_hit; - ovs_flow_used(OVS_CB(skb)->flow, skb); - ovs_execute_actions(dp, skb); - -out: - /* Update datapath statistics. */ - u64_stats_update_begin(&stats->sync); - (*stats_counter)++; - u64_stats_update_end(&stats->sync); -} - -static struct genl_family dp_packet_genl_family = { - .id = GENL_ID_GENERATE, - .hdrsize = sizeof(struct ovs_header), - .name = OVS_PACKET_FAMILY, - .version = OVS_PACKET_VERSION, - .maxattr = OVS_PACKET_ATTR_MAX -}; - -int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb, - const struct dp_upcall_info *upcall_info) -{ - struct dp_stats_percpu *stats; - int dp_ifindex; - int err; - - if (upcall_info->pid == 0) { - err = -ENOTCONN; - goto err; - } - - dp_ifindex = get_dpifindex(dp); - if (!dp_ifindex) { - err = -ENODEV; - goto err; - } - - if (!skb_is_gso(skb)) - err = queue_userspace_packet(dp_ifindex, skb, upcall_info); - else - err = queue_gso_packets(dp_ifindex, skb, upcall_info); - if (err) - goto err; - - return 0; - -err: - stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id()); - - u64_stats_update_begin(&stats->sync); - stats->n_lost++; - u64_stats_update_end(&stats->sync); - - return err; -} - -static int queue_gso_packets(int dp_ifindex, struct sk_buff *skb, - const struct dp_upcall_info *upcall_info) -{ - struct dp_upcall_info later_info; - struct sw_flow_key later_key; - struct sk_buff *segs, *nskb; - int err; - - segs = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM); - if (IS_ERR(skb)) - return PTR_ERR(skb); - - /* Queue all of the segments. */ - skb = segs; - do { - err = queue_userspace_packet(dp_ifindex, skb, upcall_info); - if (err) - break; - - if (skb == segs && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) { - /* The initial flow key extracted by ovs_flow_extract() - * in this case is for a first fragment, so we need to - * properly mark later fragments. - */ - later_key = *upcall_info->key; - later_key.ip.frag = OVS_FRAG_TYPE_LATER; - - later_info = *upcall_info; - later_info.key = &later_key; - upcall_info = &later_info; - } - } while ((skb = skb->next)); - - /* Free all of the segments. */ - skb = segs; - do { - nskb = skb->next; - if (err) - kfree_skb(skb); - else - consume_skb(skb); - } while ((skb = nskb)); - return err; -} - -static int queue_userspace_packet(int dp_ifindex, struct sk_buff *skb, - const struct dp_upcall_info *upcall_info) -{ - struct ovs_header *upcall; - struct sk_buff *nskb = NULL; - struct sk_buff *user_skb; /* to be queued to userspace */ - struct nlattr *nla; - unsigned int len; - int err; - - if (vlan_tx_tag_present(skb)) { - nskb = skb_clone(skb, GFP_ATOMIC); - if (!nskb) - return -ENOMEM; - - nskb = __vlan_put_tag(nskb, vlan_tx_tag_get(nskb)); - if (!skb) - return -ENOMEM; - - nskb->vlan_tci = 0; - skb = nskb; - } - - if (nla_attr_size(skb->len) > USHRT_MAX) { - err = -EFBIG; - goto out; - } - - len = sizeof(struct ovs_header); - len += nla_total_size(skb->len); - len += nla_total_size(FLOW_BUFSIZE); - if (upcall_info->cmd == OVS_PACKET_CMD_ACTION) - len += nla_total_size(8); - - user_skb = genlmsg_new(len, GFP_ATOMIC); - if (!user_skb) { - err = -ENOMEM; - goto out; - } - - upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family, - 0, upcall_info->cmd); - upcall->dp_ifindex = dp_ifindex; - - nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY); - ovs_flow_to_nlattrs(upcall_info->key, user_skb); - nla_nest_end(user_skb, nla); - - if (upcall_info->userdata) - nla_put_u64(user_skb, OVS_PACKET_ATTR_USERDATA, - nla_get_u64(upcall_info->userdata)); - - nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len); - - skb_copy_and_csum_dev(skb, nla_data(nla)); - - err = genlmsg_unicast(&init_net, user_skb, upcall_info->pid); - -out: - kfree_skb(nskb); - return err; -} - -/* Called with genl_mutex. */ -static int flush_flows(int dp_ifindex) -{ - struct flow_table *old_table; - struct flow_table *new_table; - struct datapath *dp; - - dp = get_dp(dp_ifindex); - if (!dp) - return -ENODEV; - - old_table = genl_dereference(dp->table); - new_table = ovs_flow_tbl_alloc(TBL_MIN_BUCKETS); - if (!new_table) - return -ENOMEM; - - rcu_assign_pointer(dp->table, new_table); - - ovs_flow_tbl_deferred_destroy(old_table); - return 0; -} - -static int validate_actions(const struct nlattr *attr, - const struct sw_flow_key *key, int depth); - -static int validate_sample(const struct nlattr *attr, - const struct sw_flow_key *key, int depth) -{ - const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1]; - const struct nlattr *probability, *actions; - const struct nlattr *a; - int rem; - - memset(attrs, 0, sizeof(attrs)); - nla_for_each_nested(a, attr, rem) { - int type = nla_type(a); - if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type]) - return -EINVAL; - attrs[type] = a; - } - if (rem) - return -EINVAL; - - probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY]; - if (!probability || nla_len(probability) != sizeof(u32)) - return -EINVAL; - - actions = attrs[OVS_SAMPLE_ATTR_ACTIONS]; - if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN)) - return -EINVAL; - return validate_actions(actions, key, depth + 1); -} - -static int validate_set(const struct nlattr *a, - const struct sw_flow_key *flow_key) -{ - const struct nlattr *ovs_key = nla_data(a); - int key_type = nla_type(ovs_key); - - /* There can be only one key in a action */ - if (nla_total_size(nla_len(ovs_key)) != nla_len(a)) - return -EINVAL; - - if (key_type > OVS_KEY_ATTR_MAX || - nla_len(ovs_key) != ovs_key_lens[key_type]) - return -EINVAL; - - switch (key_type) { - const struct ovs_key_ipv4 *ipv4_key; - - case OVS_KEY_ATTR_PRIORITY: - case OVS_KEY_ATTR_ETHERNET: - break; - - case OVS_KEY_ATTR_IPV4: - if (flow_key->eth.type != htons(ETH_P_IP)) - return -EINVAL; - - if (!flow_key->ipv4.addr.src || !flow_key->ipv4.addr.dst) - return -EINVAL; - - ipv4_key = nla_data(ovs_key); - if (ipv4_key->ipv4_proto != flow_key->ip.proto) - return -EINVAL; - - if (ipv4_key->ipv4_frag != flow_key->ip.frag) - return -EINVAL; - - break; - - case OVS_KEY_ATTR_TCP: - if (flow_key->ip.proto != IPPROTO_TCP) - return -EINVAL; - - if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst) - return -EINVAL; - - break; - - case OVS_KEY_ATTR_UDP: - if (flow_key->ip.proto != IPPROTO_UDP) - return -EINVAL; - - if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst) - return -EINVAL; - break; - - default: - return -EINVAL; - } - - return 0; -} - -static int validate_userspace(const struct nlattr *attr) -{ - static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] = { - [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 }, - [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_U64 }, - }; - struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1]; - int error; - - error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX, - attr, userspace_policy); - if (error) - return error; - - if (!a[OVS_USERSPACE_ATTR_PID] || - !nla_get_u32(a[OVS_USERSPACE_ATTR_PID])) - return -EINVAL; - - return 0; -} - -static int validate_actions(const struct nlattr *attr, - const struct sw_flow_key *key, int depth) -{ - const struct nlattr *a; - int rem, err; - - if (depth >= SAMPLE_ACTION_DEPTH) - return -EOVERFLOW; - - nla_for_each_nested(a, attr, rem) { - /* Expected argument lengths, (u32)-1 for variable length. */ - static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = { - [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32), - [OVS_ACTION_ATTR_USERSPACE] = (u32)-1, - [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan), - [OVS_ACTION_ATTR_POP_VLAN] = 0, - [OVS_ACTION_ATTR_SET] = (u32)-1, - [OVS_ACTION_ATTR_SAMPLE] = (u32)-1 - }; - const struct ovs_action_push_vlan *vlan; - int type = nla_type(a); - - if (type > OVS_ACTION_ATTR_MAX || - (action_lens[type] != nla_len(a) && - action_lens[type] != (u32)-1)) - return -EINVAL; - - switch (type) { - case OVS_ACTION_ATTR_UNSPEC: - return -EINVAL; - - case OVS_ACTION_ATTR_USERSPACE: - err = validate_userspace(a); - if (err) - return err; - break; - - case OVS_ACTION_ATTR_OUTPUT: - if (nla_get_u32(a) >= DP_MAX_PORTS) - return -EINVAL; - break; - - - case OVS_ACTION_ATTR_POP_VLAN: - break; - - case OVS_ACTION_ATTR_PUSH_VLAN: - vlan = nla_data(a); - if (vlan->vlan_tpid != htons(ETH_P_8021Q)) - return -EINVAL; - if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT))) - return -EINVAL; - break; - - case OVS_ACTION_ATTR_SET: - err = validate_set(a, key); - if (err) - return err; - break; - - case OVS_ACTION_ATTR_SAMPLE: - err = validate_sample(a, key, depth); - if (err) - return err; - break; - - default: - return -EINVAL; - } - } - - if (rem > 0) - return -EINVAL; - - return 0; -} - -static void clear_stats(struct sw_flow *flow) -{ - flow->used = 0; - flow->tcp_flags = 0; - flow->packet_count = 0; - flow->byte_count = 0; -} - -static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) -{ - struct ovs_header *ovs_header = info->userhdr; - struct nlattr **a = info->attrs; - struct sw_flow_actions *acts; - struct sk_buff *packet; - struct sw_flow *flow; - struct datapath *dp; - struct ethhdr *eth; - int len; - int err; - int key_len; - - err = -EINVAL; - if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] || - !a[OVS_PACKET_ATTR_ACTIONS] || - nla_len(a[OVS_PACKET_ATTR_PACKET]) < ETH_HLEN) - goto err; - - len = nla_len(a[OVS_PACKET_ATTR_PACKET]); - packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL); - err = -ENOMEM; - if (!packet) - goto err; - skb_reserve(packet, NET_IP_ALIGN); - - memcpy(__skb_put(packet, len), nla_data(a[OVS_PACKET_ATTR_PACKET]), len); - - skb_reset_mac_header(packet); - eth = eth_hdr(packet); - - /* Normally, setting the skb 'protocol' field would be handled by a - * call to eth_type_trans(), but it assumes there's a sending - * device, which we may not have. */ - if (ntohs(eth->h_proto) >= 1536) - packet->protocol = eth->h_proto; - else - packet->protocol = htons(ETH_P_802_2); - - /* Build an sw_flow for sending this packet. */ - flow = ovs_flow_alloc(); - err = PTR_ERR(flow); - if (IS_ERR(flow)) - goto err_kfree_skb; - - err = ovs_flow_extract(packet, -1, &flow->key, &key_len); - if (err) - goto err_flow_free; - - err = ovs_flow_metadata_from_nlattrs(&flow->key.phy.priority, - &flow->key.phy.in_port, - a[OVS_PACKET_ATTR_KEY]); - if (err) - goto err_flow_free; - - err = validate_actions(a[OVS_PACKET_ATTR_ACTIONS], &flow->key, 0); - if (err) - goto err_flow_free; - - flow->hash = ovs_flow_hash(&flow->key, key_len); - - acts = ovs_flow_actions_alloc(a[OVS_PACKET_ATTR_ACTIONS]); - err = PTR_ERR(acts); - if (IS_ERR(acts)) - goto err_flow_free; - rcu_assign_pointer(flow->sf_acts, acts); - - OVS_CB(packet)->flow = flow; - packet->priority = flow->key.phy.priority; - - rcu_read_lock(); - dp = get_dp(ovs_header->dp_ifindex); - err = -ENODEV; - if (!dp) - goto err_unlock; - - local_bh_disable(); - err = ovs_execute_actions(dp, packet); - local_bh_enable(); - rcu_read_unlock(); - - ovs_flow_free(flow); - return err; - -err_unlock: - rcu_read_unlock(); -err_flow_free: - ovs_flow_free(flow); -err_kfree_skb: - kfree_skb(packet); -err: - return err; -} - -static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = { - [OVS_PACKET_ATTR_PACKET] = { .type = NLA_UNSPEC }, - [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED }, - [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED }, -}; - -static struct genl_ops dp_packet_genl_ops[] = { - { .cmd = OVS_PACKET_CMD_EXECUTE, - .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ - .policy = packet_policy, - .doit = ovs_packet_cmd_execute - } -}; - -static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats) -{ - int i; - struct flow_table *table = genl_dereference(dp->table); - - stats->n_flows = ovs_flow_tbl_count(table); - - stats->n_hit = stats->n_missed = stats->n_lost = 0; - for_each_possible_cpu(i) { - const struct dp_stats_percpu *percpu_stats; - struct dp_stats_percpu local_stats; - unsigned int start; - - percpu_stats = per_cpu_ptr(dp->stats_percpu, i); - - do { - start = u64_stats_fetch_begin_bh(&percpu_stats->sync); - local_stats = *percpu_stats; - } while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start)); - - stats->n_hit += local_stats.n_hit; - stats->n_missed += local_stats.n_missed; - stats->n_lost += local_stats.n_lost; - } -} - -static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = { - [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED }, - [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED }, - [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG }, -}; - -static struct genl_family dp_flow_genl_family = { - .id = GENL_ID_GENERATE, - .hdrsize = sizeof(struct ovs_header), - .name = OVS_FLOW_FAMILY, - .version = OVS_FLOW_VERSION, - .maxattr = OVS_FLOW_ATTR_MAX -}; - -static struct genl_multicast_group ovs_dp_flow_multicast_group = { - .name = OVS_FLOW_MCGROUP -}; - -/* Called with genl_lock. */ -static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp, - struct sk_buff *skb, u32 pid, - u32 seq, u32 flags, u8 cmd) -{ - const int skb_orig_len = skb->len; - const struct sw_flow_actions *sf_acts; - struct ovs_flow_stats stats; - struct ovs_header *ovs_header; - struct nlattr *nla; - unsigned long used; - u8 tcp_flags; - int err; - - sf_acts = rcu_dereference_protected(flow->sf_acts, - lockdep_genl_is_held()); - - ovs_header = genlmsg_put(skb, pid, seq, &dp_flow_genl_family, flags, cmd); - if (!ovs_header) - return -EMSGSIZE; - - ovs_header->dp_ifindex = get_dpifindex(dp); - - nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY); - if (!nla) - goto nla_put_failure; - err = ovs_flow_to_nlattrs(&flow->key, skb); - if (err) - goto error; - nla_nest_end(skb, nla); - - spin_lock_bh(&flow->lock); - used = flow->used; - stats.n_packets = flow->packet_count; - stats.n_bytes = flow->byte_count; - tcp_flags = flow->tcp_flags; - spin_unlock_bh(&flow->lock); - - if (used) - NLA_PUT_U64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)); - - if (stats.n_packets) - NLA_PUT(skb, OVS_FLOW_ATTR_STATS, - sizeof(struct ovs_flow_stats), &stats); - - if (tcp_flags) - NLA_PUT_U8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags); - - /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if - * this is the first flow to be dumped into 'skb'. This is unusual for - * Netlink but individual action lists can be longer than - * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this. - * The userspace caller can always fetch the actions separately if it - * really wants them. (Most userspace callers in fact don't care.) - * - * This can only fail for dump operations because the skb is always - * properly sized for single flows. - */ - err = nla_put(skb, OVS_FLOW_ATTR_ACTIONS, sf_acts->actions_len, - sf_acts->actions); - if (err < 0 && skb_orig_len) - goto error; - - return genlmsg_end(skb, ovs_header); - -nla_put_failure: - err = -EMSGSIZE; -error: - genlmsg_cancel(skb, ovs_header); - return err; -} - -static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow) -{ - const struct sw_flow_actions *sf_acts; - int len; - - sf_acts = rcu_dereference_protected(flow->sf_acts, - lockdep_genl_is_held()); - - /* OVS_FLOW_ATTR_KEY */ - len = nla_total_size(FLOW_BUFSIZE); - /* OVS_FLOW_ATTR_ACTIONS */ - len += nla_total_size(sf_acts->actions_len); - /* OVS_FLOW_ATTR_STATS */ - len += nla_total_size(sizeof(struct ovs_flow_stats)); - /* OVS_FLOW_ATTR_TCP_FLAGS */ - len += nla_total_size(1); - /* OVS_FLOW_ATTR_USED */ - len += nla_total_size(8); - - len += NLMSG_ALIGN(sizeof(struct ovs_header)); - - return genlmsg_new(len, GFP_KERNEL); -} - -static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow, - struct datapath *dp, - u32 pid, u32 seq, u8 cmd) -{ - struct sk_buff *skb; - int retval; - - skb = ovs_flow_cmd_alloc_info(flow); - if (!skb) - return ERR_PTR(-ENOMEM); - - retval = ovs_flow_cmd_fill_info(flow, dp, skb, pid, seq, 0, cmd); - BUG_ON(retval < 0); - return skb; -} - -static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) -{ - struct nlattr **a = info->attrs; - struct ovs_header *ovs_header = info->userhdr; - struct sw_flow_key key; - struct sw_flow *flow; - struct sk_buff *reply; - struct datapath *dp; - struct flow_table *table; - int error; - int key_len; - - /* Extract key. */ - error = -EINVAL; - if (!a[OVS_FLOW_ATTR_KEY]) - goto error; - error = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]); - if (error) - goto error; - - /* Validate actions. */ - if (a[OVS_FLOW_ATTR_ACTIONS]) { - error = validate_actions(a[OVS_FLOW_ATTR_ACTIONS], &key, 0); - if (error) - goto error; - } else if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW) { - error = -EINVAL; - goto error; - } - - dp = get_dp(ovs_header->dp_ifindex); - error = -ENODEV; - if (!dp) - goto error; - - table = genl_dereference(dp->table); - flow = ovs_flow_tbl_lookup(table, &key, key_len); - if (!flow) { - struct sw_flow_actions *acts; - - /* Bail out if we're not allowed to create a new flow. */ - error = -ENOENT; - if (info->genlhdr->cmd == OVS_FLOW_CMD_SET) - goto error; - - /* Expand table, if necessary, to make room. */ - if (ovs_flow_tbl_need_to_expand(table)) { - struct flow_table *new_table; - - new_table = ovs_flow_tbl_expand(table); - if (!IS_ERR(new_table)) { - rcu_assign_pointer(dp->table, new_table); - ovs_flow_tbl_deferred_destroy(table); - table = genl_dereference(dp->table); - } - } - - /* Allocate flow. */ - flow = ovs_flow_alloc(); - if (IS_ERR(flow)) { - error = PTR_ERR(flow); - goto error; - } - flow->key = key; - clear_stats(flow); - - /* Obtain actions. */ - acts = ovs_flow_actions_alloc(a[OVS_FLOW_ATTR_ACTIONS]); - error = PTR_ERR(acts); - if (IS_ERR(acts)) - goto error_free_flow; - rcu_assign_pointer(flow->sf_acts, acts); - - /* Put flow in bucket. */ - flow->hash = ovs_flow_hash(&key, key_len); - ovs_flow_tbl_insert(table, flow); - - reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid, - info->snd_seq, - OVS_FLOW_CMD_NEW); - } else { - /* We found a matching flow. */ - struct sw_flow_actions *old_acts; - struct nlattr *acts_attrs; - - /* Bail out if we're not allowed to modify an existing flow. - * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL - * because Generic Netlink treats the latter as a dump - * request. We also accept NLM_F_EXCL in case that bug ever - * gets fixed. - */ - error = -EEXIST; - if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW && - info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) - goto error; - - /* Update actions. */ - old_acts = rcu_dereference_protected(flow->sf_acts, - lockdep_genl_is_held()); - acts_attrs = a[OVS_FLOW_ATTR_ACTIONS]; - if (acts_attrs && - (old_acts->actions_len != nla_len(acts_attrs) || - memcmp(old_acts->actions, nla_data(acts_attrs), - old_acts->actions_len))) { - struct sw_flow_actions *new_acts; - - new_acts = ovs_flow_actions_alloc(acts_attrs); - error = PTR_ERR(new_acts); - if (IS_ERR(new_acts)) - goto error; - - rcu_assign_pointer(flow->sf_acts, new_acts); - ovs_flow_deferred_free_acts(old_acts); - } - - reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid, - info->snd_seq, OVS_FLOW_CMD_NEW); - - /* Clear stats. */ - if (a[OVS_FLOW_ATTR_CLEAR]) { - spin_lock_bh(&flow->lock); - clear_stats(flow); - spin_unlock_bh(&flow->lock); - } - } - - if (!IS_ERR(reply)) - genl_notify(reply, genl_info_net(info), info->snd_pid, - ovs_dp_flow_multicast_group.id, info->nlhdr, - GFP_KERNEL); - else - netlink_set_err(init_net.genl_sock, 0, - ovs_dp_flow_multicast_group.id, PTR_ERR(reply)); - return 0; - -error_free_flow: - ovs_flow_free(flow); -error: - return error; -} - -static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info) -{ - struct nlattr **a = info->attrs; - struct ovs_header *ovs_header = info->userhdr; - struct sw_flow_key key; - struct sk_buff *reply; - struct sw_flow *flow; - struct datapath *dp; - struct flow_table *table; - int err; - int key_len; - - if (!a[OVS_FLOW_ATTR_KEY]) - return -EINVAL; - err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]); - if (err) - return err; - - dp = get_dp(ovs_header->dp_ifindex); - if (!dp) - return -ENODEV; - - table = genl_dereference(dp->table); - flow = ovs_flow_tbl_lookup(table, &key, key_len); - if (!flow) - return -ENOENT; - - reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid, - info->snd_seq, OVS_FLOW_CMD_NEW); - if (IS_ERR(reply)) - return PTR_ERR(reply); - - return genlmsg_reply(reply, info); -} - -static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info) -{ - struct nlattr **a = info->attrs; - struct ovs_header *ovs_header = info->userhdr; - struct sw_flow_key key; - struct sk_buff *reply; - struct sw_flow *flow; - struct datapath *dp; - struct flow_table *table; - int err; - int key_len; - - if (!a[OVS_FLOW_ATTR_KEY]) - return flush_flows(ovs_header->dp_ifindex); - err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]); - if (err) - return err; - - dp = get_dp(ovs_header->dp_ifindex); - if (!dp) - return -ENODEV; - - table = genl_dereference(dp->table); - flow = ovs_flow_tbl_lookup(table, &key, key_len); - if (!flow) - return -ENOENT; - - reply = ovs_flow_cmd_alloc_info(flow); - if (!reply) - return -ENOMEM; - - ovs_flow_tbl_remove(table, flow); - - err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_pid, - info->snd_seq, 0, OVS_FLOW_CMD_DEL); - BUG_ON(err < 0); - - ovs_flow_deferred_free(flow); - - genl_notify(reply, genl_info_net(info), info->snd_pid, - ovs_dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL); - return 0; -} - -static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) -{ - struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh)); - struct datapath *dp; - struct flow_table *table; - - dp = get_dp(ovs_header->dp_ifindex); - if (!dp) - return -ENODEV; - - table = genl_dereference(dp->table); - - for (;;) { - struct sw_flow *flow; - u32 bucket, obj; - - bucket = cb->args[0]; - obj = cb->args[1]; - flow = ovs_flow_tbl_next(table, &bucket, &obj); - if (!flow) - break; - - if (ovs_flow_cmd_fill_info(flow, dp, skb, - NETLINK_CB(cb->skb).pid, - cb->nlh->nlmsg_seq, NLM_F_MULTI, - OVS_FLOW_CMD_NEW) < 0) - break; - - cb->args[0] = bucket; - cb->args[1] = obj; - } - return skb->len; -} - -static struct genl_ops dp_flow_genl_ops[] = { - { .cmd = OVS_FLOW_CMD_NEW, - .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ - .policy = flow_policy, - .doit = ovs_flow_cmd_new_or_set - }, - { .cmd = OVS_FLOW_CMD_DEL, - .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ - .policy = flow_policy, - .doit = ovs_flow_cmd_del - }, - { .cmd = OVS_FLOW_CMD_GET, - .flags = 0, /* OK for unprivileged users. */ - .policy = flow_policy, - .doit = ovs_flow_cmd_get, - .dumpit = ovs_flow_cmd_dump - }, - { .cmd = OVS_FLOW_CMD_SET, - .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ - .policy = flow_policy, - .doit = ovs_flow_cmd_new_or_set, - }, -}; - -static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = { - [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, - [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 }, -}; - -static struct genl_family dp_datapath_genl_family = { - .id = GENL_ID_GENERATE, - .hdrsize = sizeof(struct ovs_header), - .name = OVS_DATAPATH_FAMILY, - .version = OVS_DATAPATH_VERSION, - .maxattr = OVS_DP_ATTR_MAX -}; - -static struct genl_multicast_group ovs_dp_datapath_multicast_group = { - .name = OVS_DATAPATH_MCGROUP -}; - -static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb, - u32 pid, u32 seq, u32 flags, u8 cmd) -{ - struct ovs_header *ovs_header; - struct ovs_dp_stats dp_stats; - int err; - - ovs_header = genlmsg_put(skb, pid, seq, &dp_datapath_genl_family, - flags, cmd); - if (!ovs_header) - goto error; - - ovs_header->dp_ifindex = get_dpifindex(dp); - - rcu_read_lock(); - err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp)); - rcu_read_unlock(); - if (err) - goto nla_put_failure; - - get_dp_stats(dp, &dp_stats); - NLA_PUT(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats); - - return genlmsg_end(skb, ovs_header); - -nla_put_failure: - genlmsg_cancel(skb, ovs_header); -error: - return -EMSGSIZE; -} - -static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 pid, - u32 seq, u8 cmd) -{ - struct sk_buff *skb; - int retval; - - skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); - if (!skb) - return ERR_PTR(-ENOMEM); - - retval = ovs_dp_cmd_fill_info(dp, skb, pid, seq, 0, cmd); - if (retval < 0) { - kfree_skb(skb); - return ERR_PTR(retval); - } - return skb; -} - -/* Called with genl_mutex and optionally with RTNL lock also. */ -static struct datapath *lookup_datapath(struct ovs_header *ovs_header, - struct nlattr *a[OVS_DP_ATTR_MAX + 1]) -{ - struct datapath *dp; - - if (!a[OVS_DP_ATTR_NAME]) - dp = get_dp(ovs_header->dp_ifindex); - else { - struct vport *vport; - - rcu_read_lock(); - vport = ovs_vport_locate(nla_data(a[OVS_DP_ATTR_NAME])); - dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL; - rcu_read_unlock(); - } - return dp ? dp : ERR_PTR(-ENODEV); -} - -static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) -{ - struct nlattr **a = info->attrs; - struct vport_parms parms; - struct sk_buff *reply; - struct datapath *dp; - struct vport *vport; - int err; - - err = -EINVAL; - if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID]) - goto err; - - rtnl_lock(); - err = -ENODEV; - if (!try_module_get(THIS_MODULE)) - goto err_unlock_rtnl; - - err = -ENOMEM; - dp = kzalloc(sizeof(*dp), GFP_KERNEL); - if (dp == NULL) - goto err_put_module; - INIT_LIST_HEAD(&dp->port_list); - - /* Allocate table. */ - err = -ENOMEM; - rcu_assign_pointer(dp->table, ovs_flow_tbl_alloc(TBL_MIN_BUCKETS)); - if (!dp->table) - goto err_free_dp; - - dp->stats_percpu = alloc_percpu(struct dp_stats_percpu); - if (!dp->stats_percpu) { - err = -ENOMEM; - goto err_destroy_table; - } - - /* Set up our datapath device. */ - parms.name = nla_data(a[OVS_DP_ATTR_NAME]); - parms.type = OVS_VPORT_TYPE_INTERNAL; - parms.options = NULL; - parms.dp = dp; - parms.port_no = OVSP_LOCAL; - parms.upcall_pid = nla_get_u32(a[OVS_DP_ATTR_UPCALL_PID]); - - vport = new_vport(&parms); - if (IS_ERR(vport)) { - err = PTR_ERR(vport); - if (err == -EBUSY) - err = -EEXIST; - - goto err_destroy_percpu; - } - - reply = ovs_dp_cmd_build_info(dp, info->snd_pid, - info->snd_seq, OVS_DP_CMD_NEW); - err = PTR_ERR(reply); - if (IS_ERR(reply)) - goto err_destroy_local_port; - - list_add_tail(&dp->list_node, &dps); - rtnl_unlock(); - - genl_notify(reply, genl_info_net(info), info->snd_pid, - ovs_dp_datapath_multicast_group.id, info->nlhdr, - GFP_KERNEL); - return 0; - -err_destroy_local_port: - ovs_dp_detach_port(rtnl_dereference(dp->ports[OVSP_LOCAL])); -err_destroy_percpu: - free_percpu(dp->stats_percpu); -err_destroy_table: - ovs_flow_tbl_destroy(genl_dereference(dp->table)); -err_free_dp: - kfree(dp); -err_put_module: - module_put(THIS_MODULE); -err_unlock_rtnl: - rtnl_unlock(); -err: - return err; -} - -static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info) -{ - struct vport *vport, *next_vport; - struct sk_buff *reply; - struct datapath *dp; - int err; - - rtnl_lock(); - dp = lookup_datapath(info->userhdr, info->attrs); - err = PTR_ERR(dp); - if (IS_ERR(dp)) - goto exit_unlock; - - reply = ovs_dp_cmd_build_info(dp, info->snd_pid, - info->snd_seq, OVS_DP_CMD_DEL); - err = PTR_ERR(reply); - if (IS_ERR(reply)) - goto exit_unlock; - - list_for_each_entry_safe(vport, next_vport, &dp->port_list, node) - if (vport->port_no != OVSP_LOCAL) - ovs_dp_detach_port(vport); - - list_del(&dp->list_node); - ovs_dp_detach_port(rtnl_dereference(dp->ports[OVSP_LOCAL])); - - /* rtnl_unlock() will wait until all the references to devices that - * are pending unregistration have been dropped. We do it here to - * ensure that any internal devices (which contain DP pointers) are - * fully destroyed before freeing the datapath. - */ - rtnl_unlock(); - - call_rcu(&dp->rcu, destroy_dp_rcu); - module_put(THIS_MODULE); - - genl_notify(reply, genl_info_net(info), info->snd_pid, - ovs_dp_datapath_multicast_group.id, info->nlhdr, - GFP_KERNEL); - - return 0; - -exit_unlock: - rtnl_unlock(); - return err; -} - -static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info) -{ - struct sk_buff *reply; - struct datapath *dp; - int err; - - dp = lookup_datapath(info->userhdr, info->attrs); - if (IS_ERR(dp)) - return PTR_ERR(dp); - - reply = ovs_dp_cmd_build_info(dp, info->snd_pid, - info->snd_seq, OVS_DP_CMD_NEW); - if (IS_ERR(reply)) { - err = PTR_ERR(reply); - netlink_set_err(init_net.genl_sock, 0, - ovs_dp_datapath_multicast_group.id, err); - return 0; - } - - genl_notify(reply, genl_info_net(info), info->snd_pid, - ovs_dp_datapath_multicast_group.id, info->nlhdr, - GFP_KERNEL); - - return 0; -} - -static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info) -{ - struct sk_buff *reply; - struct datapath *dp; - - dp = lookup_datapath(info->userhdr, info->attrs); - if (IS_ERR(dp)) - return PTR_ERR(dp); - - reply = ovs_dp_cmd_build_info(dp, info->snd_pid, - info->snd_seq, OVS_DP_CMD_NEW); - if (IS_ERR(reply)) - return PTR_ERR(reply); - - return genlmsg_reply(reply, info); -} - -static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) -{ - struct datapath *dp; - int skip = cb->args[0]; - int i = 0; - - list_for_each_entry(dp, &dps, list_node) { - if (i < skip) - continue; - if (ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).pid, - cb->nlh->nlmsg_seq, NLM_F_MULTI, - OVS_DP_CMD_NEW) < 0) - break; - i++; - } - - cb->args[0] = i; - - return skb->len; -} - -static struct genl_ops dp_datapath_genl_ops[] = { - { .cmd = OVS_DP_CMD_NEW, - .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ - .policy = datapath_policy, - .doit = ovs_dp_cmd_new - }, - { .cmd = OVS_DP_CMD_DEL, - .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ - .policy = datapath_policy, - .doit = ovs_dp_cmd_del - }, - { .cmd = OVS_DP_CMD_GET, - .flags = 0, /* OK for unprivileged users. */ - .policy = datapath_policy, - .doit = ovs_dp_cmd_get, - .dumpit = ovs_dp_cmd_dump - }, - { .cmd = OVS_DP_CMD_SET, - .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ - .policy = datapath_policy, - .doit = ovs_dp_cmd_set, - }, -}; - -static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = { - [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, - [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) }, - [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 }, - [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 }, - [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 }, - [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED }, -}; - -static struct genl_family dp_vport_genl_family = { - .id = GENL_ID_GENERATE, - .hdrsize = sizeof(struct ovs_header), - .name = OVS_VPORT_FAMILY, - .version = OVS_VPORT_VERSION, - .maxattr = OVS_VPORT_ATTR_MAX -}; - -struct genl_multicast_group ovs_dp_vport_multicast_group = { - .name = OVS_VPORT_MCGROUP -}; - -/* Called with RTNL lock or RCU read lock. */ -static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb, - u32 pid, u32 seq, u32 flags, u8 cmd) -{ - struct ovs_header *ovs_header; - struct ovs_vport_stats vport_stats; - int err; - - ovs_header = genlmsg_put(skb, pid, seq, &dp_vport_genl_family, - flags, cmd); - if (!ovs_header) - return -EMSGSIZE; - - ovs_header->dp_ifindex = get_dpifindex(vport->dp); - - NLA_PUT_U32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no); - NLA_PUT_U32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type); - NLA_PUT_STRING(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)); - NLA_PUT_U32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid); - - ovs_vport_get_stats(vport, &vport_stats); - NLA_PUT(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats), - &vport_stats); - - err = ovs_vport_get_options(vport, skb); - if (err == -EMSGSIZE) - goto error; - - return genlmsg_end(skb, ovs_header); - -nla_put_failure: - err = -EMSGSIZE; -error: - genlmsg_cancel(skb, ovs_header); - return err; -} - -/* Called with RTNL lock or RCU read lock. */ -struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 pid, - u32 seq, u8 cmd) -{ - struct sk_buff *skb; - int retval; - - skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); - if (!skb) - return ERR_PTR(-ENOMEM); - - retval = ovs_vport_cmd_fill_info(vport, skb, pid, seq, 0, cmd); - if (retval < 0) { - kfree_skb(skb); - return ERR_PTR(retval); - } - return skb; -} - -/* Called with RTNL lock or RCU read lock. */ -static struct vport *lookup_vport(struct ovs_header *ovs_header, - struct nlattr *a[OVS_VPORT_ATTR_MAX + 1]) -{ - struct datapath *dp; - struct vport *vport; - - if (a[OVS_VPORT_ATTR_NAME]) { - vport = ovs_vport_locate(nla_data(a[OVS_VPORT_ATTR_NAME])); - if (!vport) - return ERR_PTR(-ENODEV); - return vport; - } else if (a[OVS_VPORT_ATTR_PORT_NO]) { - u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]); - - if (port_no >= DP_MAX_PORTS) - return ERR_PTR(-EFBIG); - - dp = get_dp(ovs_header->dp_ifindex); - if (!dp) - return ERR_PTR(-ENODEV); - - vport = rcu_dereference_rtnl(dp->ports[port_no]); - if (!vport) - return ERR_PTR(-ENOENT); - return vport; - } else - return ERR_PTR(-EINVAL); -} - -static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info) -{ - struct nlattr **a = info->attrs; - struct ovs_header *ovs_header = info->userhdr; - struct vport_parms parms; - struct sk_buff *reply; - struct vport *vport; - struct datapath *dp; - u32 port_no; - int err; - - err = -EINVAL; - if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] || - !a[OVS_VPORT_ATTR_UPCALL_PID]) - goto exit; - - rtnl_lock(); - dp = get_dp(ovs_header->dp_ifindex); - err = -ENODEV; - if (!dp) - goto exit_unlock; - - if (a[OVS_VPORT_ATTR_PORT_NO]) { - port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]); - - err = -EFBIG; - if (port_no >= DP_MAX_PORTS) - goto exit_unlock; - - vport = rtnl_dereference(dp->ports[port_no]); - err = -EBUSY; - if (vport) - goto exit_unlock; - } else { - for (port_no = 1; ; port_no++) { - if (port_no >= DP_MAX_PORTS) { - err = -EFBIG; - goto exit_unlock; - } - vport = rtnl_dereference(dp->ports[port_no]); - if (!vport) - break; - } - } - - parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]); - parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]); - parms.options = a[OVS_VPORT_ATTR_OPTIONS]; - parms.dp = dp; - parms.port_no = port_no; - parms.upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]); - - vport = new_vport(&parms); - err = PTR_ERR(vport); - if (IS_ERR(vport)) - goto exit_unlock; - - reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq, - OVS_VPORT_CMD_NEW); - if (IS_ERR(reply)) { - err = PTR_ERR(reply); - ovs_dp_detach_port(vport); - goto exit_unlock; - } - genl_notify(reply, genl_info_net(info), info->snd_pid, - ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL); - -exit_unlock: - rtnl_unlock(); -exit: - return err; -} - -static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info) -{ - struct nlattr **a = info->attrs; - struct sk_buff *reply; - struct vport *vport; - int err; - - rtnl_lock(); - vport = lookup_vport(info->userhdr, a); - err = PTR_ERR(vport); - if (IS_ERR(vport)) - goto exit_unlock; - - err = 0; - if (a[OVS_VPORT_ATTR_TYPE] && - nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) - err = -EINVAL; - - if (!err && a[OVS_VPORT_ATTR_OPTIONS]) - err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]); - if (!err && a[OVS_VPORT_ATTR_UPCALL_PID]) - vport->upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]); - - reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq, - OVS_VPORT_CMD_NEW); - if (IS_ERR(reply)) { - err = PTR_ERR(reply); - netlink_set_err(init_net.genl_sock, 0, - ovs_dp_vport_multicast_group.id, err); - return 0; - } - - genl_notify(reply, genl_info_net(info), info->snd_pid, - ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL); - -exit_unlock: - rtnl_unlock(); - return err; -} - -static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info) -{ - struct nlattr **a = info->attrs; - struct sk_buff *reply; - struct vport *vport; - int err; - - rtnl_lock(); - vport = lookup_vport(info->userhdr, a); - err = PTR_ERR(vport); - if (IS_ERR(vport)) - goto exit_unlock; - - if (vport->port_no == OVSP_LOCAL) { - err = -EINVAL; - goto exit_unlock; - } - - reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq, - OVS_VPORT_CMD_DEL); - err = PTR_ERR(reply); - if (IS_ERR(reply)) - goto exit_unlock; - - ovs_dp_detach_port(vport); - - genl_notify(reply, genl_info_net(info), info->snd_pid, - ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL); - -exit_unlock: - rtnl_unlock(); - return err; -} - -static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info) -{ - struct nlattr **a = info->attrs; - struct ovs_header *ovs_header = info->userhdr; - struct sk_buff *reply; - struct vport *vport; - int err; - - rcu_read_lock(); - vport = lookup_vport(ovs_header, a); - err = PTR_ERR(vport); - if (IS_ERR(vport)) - goto exit_unlock; - - reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq, - OVS_VPORT_CMD_NEW); - err = PTR_ERR(reply); - if (IS_ERR(reply)) - goto exit_unlock; - - rcu_read_unlock(); - - return genlmsg_reply(reply, info); - -exit_unlock: - rcu_read_unlock(); - return err; -} - -static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) -{ - struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh)); - struct datapath *dp; - u32 port_no; - int retval; - - dp = get_dp(ovs_header->dp_ifindex); - if (!dp) - return -ENODEV; - - rcu_read_lock(); - for (port_no = cb->args[0]; port_no < DP_MAX_PORTS; port_no++) { - struct vport *vport; - - vport = rcu_dereference(dp->ports[port_no]); - if (!vport) - continue; - - if (ovs_vport_cmd_fill_info(vport, skb, NETLINK_CB(cb->skb).pid, - cb->nlh->nlmsg_seq, NLM_F_MULTI, - OVS_VPORT_CMD_NEW) < 0) - break; - } - rcu_read_unlock(); - - cb->args[0] = port_no; - retval = skb->len; - - return retval; -} - -static void rehash_flow_table(struct work_struct *work) -{ - struct datapath *dp; - - genl_lock(); - - list_for_each_entry(dp, &dps, list_node) { - struct flow_table *old_table = genl_dereference(dp->table); - struct flow_table *new_table; - - new_table = ovs_flow_tbl_rehash(old_table); - if (!IS_ERR(new_table)) { - rcu_assign_pointer(dp->table, new_table); - ovs_flow_tbl_deferred_destroy(old_table); - } - } - - genl_unlock(); - - schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL); -} - -static struct genl_ops dp_vport_genl_ops[] = { - { .cmd = OVS_VPORT_CMD_NEW, - .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ - .policy = vport_policy, - .doit = ovs_vport_cmd_new - }, - { .cmd = OVS_VPORT_CMD_DEL, - .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ - .policy = vport_policy, - .doit = ovs_vport_cmd_del - }, - { .cmd = OVS_VPORT_CMD_GET, - .flags = 0, /* OK for unprivileged users. */ - .policy = vport_policy, - .doit = ovs_vport_cmd_get, - .dumpit = ovs_vport_cmd_dump - }, - { .cmd = OVS_VPORT_CMD_SET, - .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ - .policy = vport_policy, - .doit = ovs_vport_cmd_set, - }, -}; - -struct genl_family_and_ops { - struct genl_family *family; - struct genl_ops *ops; - int n_ops; - struct genl_multicast_group *group; -}; - -static const struct genl_family_and_ops dp_genl_families[] = { - { &dp_datapath_genl_family, - dp_datapath_genl_ops, ARRAY_SIZE(dp_datapath_genl_ops), - &ovs_dp_datapath_multicast_group }, - { &dp_vport_genl_family, - dp_vport_genl_ops, ARRAY_SIZE(dp_vport_genl_ops), - &ovs_dp_vport_multicast_group }, - { &dp_flow_genl_family, - dp_flow_genl_ops, ARRAY_SIZE(dp_flow_genl_ops), - &ovs_dp_flow_multicast_group }, - { &dp_packet_genl_family, - dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops), - NULL }, -}; - -static void dp_unregister_genl(int n_families) -{ - int i; - - for (i = 0; i < n_families; i++) - genl_unregister_family(dp_genl_families[i].family); -} - -static int dp_register_genl(void) -{ - int n_registered; - int err; - int i; - - n_registered = 0; - for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) { - const struct genl_family_and_ops *f = &dp_genl_families[i]; - - err = genl_register_family_with_ops(f->family, f->ops, - f->n_ops); - if (err) - goto error; - n_registered++; - - if (f->group) { - err = genl_register_mc_group(f->family, f->group); - if (err) - goto error; - } - } - - return 0; - -error: - dp_unregister_genl(n_registered); - return err; -} - -static int __init dp_init(void) -{ - struct sk_buff *dummy_skb; - int err; - - BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb)); - - pr_info("Open vSwitch switching datapath\n"); - - err = ovs_flow_init(); - if (err) - goto error; - - err = ovs_vport_init(); - if (err) - goto error_flow_exit; - - err = register_netdevice_notifier(&ovs_dp_device_notifier); - if (err) - goto error_vport_exit; - - err = dp_register_genl(); - if (err < 0) - goto error_unreg_notifier; - - schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL); - - return 0; - -error_unreg_notifier: - unregister_netdevice_notifier(&ovs_dp_device_notifier); -error_vport_exit: - ovs_vport_exit(); -error_flow_exit: - ovs_flow_exit(); -error: - return err; -} - -static void dp_cleanup(void) -{ - cancel_delayed_work_sync(&rehash_flow_wq); - rcu_barrier(); - dp_unregister_genl(ARRAY_SIZE(dp_genl_families)); - unregister_netdevice_notifier(&ovs_dp_device_notifier); - ovs_vport_exit(); - ovs_flow_exit(); -} - -module_init(dp_init); -module_exit(dp_cleanup); - -MODULE_DESCRIPTION("Open vSwitch switching datapath"); -MODULE_LICENSE("GPL"); diff --git a/trunk/net/openvswitch/datapath.h b/trunk/net/openvswitch/datapath.h deleted file mode 100644 index 5b9f884b7055..000000000000 --- a/trunk/net/openvswitch/datapath.h +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Copyright (c) 2007-2011 Nicira Networks. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA - */ - -#ifndef DATAPATH_H -#define DATAPATH_H 1 - -#include -#include -#include -#include -#include -#include -#include - -#include "flow.h" - -struct vport; - -#define DP_MAX_PORTS 1024 -#define SAMPLE_ACTION_DEPTH 3 - -/** - * struct dp_stats_percpu - per-cpu packet processing statistics for a given - * datapath. - * @n_hit: Number of received packets for which a matching flow was found in - * the flow table. - * @n_miss: Number of received packets that had no matching flow in the flow - * table. The sum of @n_hit and @n_miss is the number of packets that have - * been received by the datapath. - * @n_lost: Number of received packets that had no matching flow in the flow - * table that could not be sent to userspace (normally due to an overflow in - * one of the datapath's queues). - */ -struct dp_stats_percpu { - u64 n_hit; - u64 n_missed; - u64 n_lost; - struct u64_stats_sync sync; -}; - -/** - * struct datapath - datapath for flow-based packet switching - * @rcu: RCU callback head for deferred destruction. - * @list_node: Element in global 'dps' list. - * @n_flows: Number of flows currently in flow table. - * @table: Current flow table. Protected by genl_lock and RCU. - * @ports: Map from port number to &struct vport. %OVSP_LOCAL port - * always exists, other ports may be %NULL. Protected by RTNL and RCU. - * @port_list: List of all ports in @ports in arbitrary order. RTNL required - * to iterate or modify. - * @stats_percpu: Per-CPU datapath statistics. - * - * Context: See the comment on locking at the top of datapath.c for additional - * locking information. - */ -struct datapath { - struct rcu_head rcu; - struct list_head list_node; - - /* Flow table. */ - struct flow_table __rcu *table; - - /* Switch ports. */ - struct vport __rcu *ports[DP_MAX_PORTS]; - struct list_head port_list; - - /* Stats. */ - struct dp_stats_percpu __percpu *stats_percpu; -}; - -/** - * struct ovs_skb_cb - OVS data in skb CB - * @flow: The flow associated with this packet. May be %NULL if no flow. - */ -struct ovs_skb_cb { - struct sw_flow *flow; -}; -#define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb) - -/** - * struct dp_upcall - metadata to include with a packet to send to userspace - * @cmd: One of %OVS_PACKET_CMD_*. - * @key: Becomes %OVS_PACKET_ATTR_KEY. Must be nonnull. - * @userdata: If nonnull, its u64 value is extracted and passed to userspace as - * %OVS_PACKET_ATTR_USERDATA. - * @pid: Netlink PID to which packet should be sent. If @pid is 0 then no - * packet is sent and the packet is accounted in the datapath's @n_lost - * counter. - */ -struct dp_upcall_info { - u8 cmd; - const struct sw_flow_key *key; - const struct nlattr *userdata; - u32 pid; -}; - -extern struct notifier_block ovs_dp_device_notifier; -extern struct genl_multicast_group ovs_dp_vport_multicast_group; - -void ovs_dp_process_received_packet(struct vport *, struct sk_buff *); -void ovs_dp_detach_port(struct vport *); -int ovs_dp_upcall(struct datapath *, struct sk_buff *, - const struct dp_upcall_info *); - -const char *ovs_dp_name(const struct datapath *dp); -struct sk_buff *ovs_vport_cmd_build_info(struct vport *, u32 pid, u32 seq, - u8 cmd); - -int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb); -#endif /* datapath.h */ diff --git a/trunk/net/openvswitch/dp_notify.c b/trunk/net/openvswitch/dp_notify.c deleted file mode 100644 index 46736518c453..000000000000 --- a/trunk/net/openvswitch/dp_notify.c +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright (c) 2007-2011 Nicira Networks. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA - */ - -#include -#include - -#include "datapath.h" -#include "vport-internal_dev.h" -#include "vport-netdev.h" - -static int dp_device_event(struct notifier_block *unused, unsigned long event, - void *ptr) -{ - struct net_device *dev = ptr; - struct vport *vport; - - if (ovs_is_internal_dev(dev)) - vport = ovs_internal_dev_get_vport(dev); - else - vport = ovs_netdev_get_vport(dev); - - if (!vport) - return NOTIFY_DONE; - - switch (event) { - case NETDEV_UNREGISTER: - if (!ovs_is_internal_dev(dev)) { - struct sk_buff *notify; - - notify = ovs_vport_cmd_build_info(vport, 0, 0, - OVS_VPORT_CMD_DEL); - ovs_dp_detach_port(vport); - if (IS_ERR(notify)) { - netlink_set_err(init_net.genl_sock, 0, - ovs_dp_vport_multicast_group.id, - PTR_ERR(notify)); - break; - } - - genlmsg_multicast(notify, 0, ovs_dp_vport_multicast_group.id, - GFP_KERNEL); - } - break; - } - - return NOTIFY_DONE; -} - -struct notifier_block ovs_dp_device_notifier = { - .notifier_call = dp_device_event -}; diff --git a/trunk/net/openvswitch/flow.c b/trunk/net/openvswitch/flow.c deleted file mode 100644 index fe7f020a843e..000000000000 --- a/trunk/net/openvswitch/flow.c +++ /dev/null @@ -1,1346 +0,0 @@ -/* - * Copyright (c) 2007-2011 Nicira Networks. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA - */ - -#include "flow.h" -#include "datapath.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static struct kmem_cache *flow_cache; - -static int check_header(struct sk_buff *skb, int len) -{ - if (unlikely(skb->len < len)) - return -EINVAL; - if (unlikely(!pskb_may_pull(skb, len))) - return -ENOMEM; - return 0; -} - -static bool arphdr_ok(struct sk_buff *skb) -{ - return pskb_may_pull(skb, skb_network_offset(skb) + - sizeof(struct arp_eth_header)); -} - -static int check_iphdr(struct sk_buff *skb) -{ - unsigned int nh_ofs = skb_network_offset(skb); - unsigned int ip_len; - int err; - - err = check_header(skb, nh_ofs + sizeof(struct iphdr)); - if (unlikely(err)) - return err; - - ip_len = ip_hdrlen(skb); - if (unlikely(ip_len < sizeof(struct iphdr) || - skb->len < nh_ofs + ip_len)) - return -EINVAL; - - skb_set_transport_header(skb, nh_ofs + ip_len); - return 0; -} - -static bool tcphdr_ok(struct sk_buff *skb) -{ - int th_ofs = skb_transport_offset(skb); - int tcp_len; - - if (unlikely(!pskb_may_pull(skb, th_ofs + sizeof(struct tcphdr)))) - return false; - - tcp_len = tcp_hdrlen(skb); - if (unlikely(tcp_len < sizeof(struct tcphdr) || - skb->len < th_ofs + tcp_len)) - return false; - - return true; -} - -static bool udphdr_ok(struct sk_buff *skb) -{ - return pskb_may_pull(skb, skb_transport_offset(skb) + - sizeof(struct udphdr)); -} - -static bool icmphdr_ok(struct sk_buff *skb) -{ - return pskb_may_pull(skb, skb_transport_offset(skb) + - sizeof(struct icmphdr)); -} - -u64 ovs_flow_used_time(unsigned long flow_jiffies) -{ - struct timespec cur_ts; - u64 cur_ms, idle_ms; - - ktime_get_ts(&cur_ts); - idle_ms = jiffies_to_msecs(jiffies - flow_jiffies); - cur_ms = (u64)cur_ts.tv_sec * MSEC_PER_SEC + - cur_ts.tv_nsec / NSEC_PER_MSEC; - - return cur_ms - idle_ms; -} - -#define SW_FLOW_KEY_OFFSET(field) \ - (offsetof(struct sw_flow_key, field) + \ - FIELD_SIZEOF(struct sw_flow_key, field)) - -static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key, - int *key_lenp) -{ - unsigned int nh_ofs = skb_network_offset(skb); - unsigned int nh_len; - int payload_ofs; - struct ipv6hdr *nh; - uint8_t nexthdr; - __be16 frag_off; - int err; - - *key_lenp = SW_FLOW_KEY_OFFSET(ipv6.label); - - err = check_header(skb, nh_ofs + sizeof(*nh)); - if (unlikely(err)) - return err; - - nh = ipv6_hdr(skb); - nexthdr = nh->nexthdr; - payload_ofs = (u8 *)(nh + 1) - skb->data; - - key->ip.proto = NEXTHDR_NONE; - key->ip.tos = ipv6_get_dsfield(nh); - key->ip.ttl = nh->hop_limit; - key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL); - key->ipv6.addr.src = nh->saddr; - key->ipv6.addr.dst = nh->daddr; - - payload_ofs = ipv6_skip_exthdr(skb, payload_ofs, &nexthdr, &frag_off); - if (unlikely(payload_ofs < 0)) - return -EINVAL; - - if (frag_off) { - if (frag_off & htons(~0x7)) - key->ip.frag = OVS_FRAG_TYPE_LATER; - else - key->ip.frag = OVS_FRAG_TYPE_FIRST; - } - - nh_len = payload_ofs - nh_ofs; - skb_set_transport_header(skb, nh_ofs + nh_len); - key->ip.proto = nexthdr; - return nh_len; -} - -static bool icmp6hdr_ok(struct sk_buff *skb) -{ - return pskb_may_pull(skb, skb_transport_offset(skb) + - sizeof(struct icmp6hdr)); -} - -#define TCP_FLAGS_OFFSET 13 -#define TCP_FLAG_MASK 0x3f - -void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb) -{ - u8 tcp_flags = 0; - - if (flow->key.eth.type == htons(ETH_P_IP) && - flow->key.ip.proto == IPPROTO_TCP) { - u8 *tcp = (u8 *)tcp_hdr(skb); - tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK; - } - - spin_lock(&flow->lock); - flow->used = jiffies; - flow->packet_count++; - flow->byte_count += skb->len; - flow->tcp_flags |= tcp_flags; - spin_unlock(&flow->lock); -} - -struct sw_flow_actions *ovs_flow_actions_alloc(const struct nlattr *actions) -{ - int actions_len = nla_len(actions); - struct sw_flow_actions *sfa; - - /* At least DP_MAX_PORTS actions are required to be able to flood a - * packet to every port. Factor of 2 allows for setting VLAN tags, - * etc. */ - if (actions_len > 2 * DP_MAX_PORTS * nla_total_size(4)) - return ERR_PTR(-EINVAL); - - sfa = kmalloc(sizeof(*sfa) + actions_len, GFP_KERNEL); - if (!sfa) - return ERR_PTR(-ENOMEM); - - sfa->actions_len = actions_len; - memcpy(sfa->actions, nla_data(actions), actions_len); - return sfa; -} - -struct sw_flow *ovs_flow_alloc(void) -{ - struct sw_flow *flow; - - flow = kmem_cache_alloc(flow_cache, GFP_KERNEL); - if (!flow) - return ERR_PTR(-ENOMEM); - - spin_lock_init(&flow->lock); - flow->sf_acts = NULL; - - return flow; -} - -static struct hlist_head *find_bucket(struct flow_table *table, u32 hash) -{ - hash = jhash_1word(hash, table->hash_seed); - return flex_array_get(table->buckets, - (hash & (table->n_buckets - 1))); -} - -static struct flex_array *alloc_buckets(unsigned int n_buckets) -{ - struct flex_array *buckets; - int i, err; - - buckets = flex_array_alloc(sizeof(struct hlist_head *), - n_buckets, GFP_KERNEL); - if (!buckets) - return NULL; - - err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL); - if (err) { - flex_array_free(buckets); - return NULL; - } - - for (i = 0; i < n_buckets; i++) - INIT_HLIST_HEAD((struct hlist_head *) - flex_array_get(buckets, i)); - - return buckets; -} - -static void free_buckets(struct flex_array *buckets) -{ - flex_array_free(buckets); -} - -struct flow_table *ovs_flow_tbl_alloc(int new_size) -{ - struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL); - - if (!table) - return NULL; - - table->buckets = alloc_buckets(new_size); - - if (!table->buckets) { - kfree(table); - return NULL; - } - table->n_buckets = new_size; - table->count = 0; - table->node_ver = 0; - table->keep_flows = false; - get_random_bytes(&table->hash_seed, sizeof(u32)); - - return table; -} - -void ovs_flow_tbl_destroy(struct flow_table *table) -{ - int i; - - if (!table) - return; - - if (table->keep_flows) - goto skip_flows; - - for (i = 0; i < table->n_buckets; i++) { - struct sw_flow *flow; - struct hlist_head *head = flex_array_get(table->buckets, i); - struct hlist_node *node, *n; - int ver = table->node_ver; - - hlist_for_each_entry_safe(flow, node, n, head, hash_node[ver]) { - hlist_del_rcu(&flow->hash_node[ver]); - ovs_flow_free(flow); - } - } - -skip_flows: - free_buckets(table->buckets); - kfree(table); -} - -static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu) -{ - struct flow_table *table = container_of(rcu, struct flow_table, rcu); - - ovs_flow_tbl_destroy(table); -} - -void ovs_flow_tbl_deferred_destroy(struct flow_table *table) -{ - if (!table) - return; - - call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb); -} - -struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *last) -{ - struct sw_flow *flow; - struct hlist_head *head; - struct hlist_node *n; - int ver; - int i; - - ver = table->node_ver; - while (*bucket < table->n_buckets) { - i = 0; - head = flex_array_get(table->buckets, *bucket); - hlist_for_each_entry_rcu(flow, n, head, hash_node[ver]) { - if (i < *last) { - i++; - continue; - } - *last = i + 1; - return flow; - } - (*bucket)++; - *last = 0; - } - - return NULL; -} - -static void flow_table_copy_flows(struct flow_table *old, struct flow_table *new) -{ - int old_ver; - int i; - - old_ver = old->node_ver; - new->node_ver = !old_ver; - - /* Insert in new table. */ - for (i = 0; i < old->n_buckets; i++) { - struct sw_flow *flow; - struct hlist_head *head; - struct hlist_node *n; - - head = flex_array_get(old->buckets, i); - - hlist_for_each_entry(flow, n, head, hash_node[old_ver]) - ovs_flow_tbl_insert(new, flow); - } - old->keep_flows = true; -} - -static struct flow_table *__flow_tbl_rehash(struct flow_table *table, int n_buckets) -{ - struct flow_table *new_table; - - new_table = ovs_flow_tbl_alloc(n_buckets); - if (!new_table) - return ERR_PTR(-ENOMEM); - - flow_table_copy_flows(table, new_table); - - return new_table; -} - -struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table) -{ - return __flow_tbl_rehash(table, table->n_buckets); -} - -struct flow_table *ovs_flow_tbl_expand(struct flow_table *table) -{ - return __flow_tbl_rehash(table, table->n_buckets * 2); -} - -void ovs_flow_free(struct sw_flow *flow) -{ - if (unlikely(!flow)) - return; - - kfree((struct sf_flow_acts __force *)flow->sf_acts); - kmem_cache_free(flow_cache, flow); -} - -/* RCU callback used by ovs_flow_deferred_free. */ -static void rcu_free_flow_callback(struct rcu_head *rcu) -{ - struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu); - - ovs_flow_free(flow); -} - -/* Schedules 'flow' to be freed after the next RCU grace period. - * The caller must hold rcu_read_lock for this to be sensible. */ -void ovs_flow_deferred_free(struct sw_flow *flow) -{ - call_rcu(&flow->rcu, rcu_free_flow_callback); -} - -/* RCU callback used by ovs_flow_deferred_free_acts. */ -static void rcu_free_acts_callback(struct rcu_head *rcu) -{ - struct sw_flow_actions *sf_acts = container_of(rcu, - struct sw_flow_actions, rcu); - kfree(sf_acts); -} - -/* Schedules 'sf_acts' to be freed after the next RCU grace period. - * The caller must hold rcu_read_lock for this to be sensible. */ -void ovs_flow_deferred_free_acts(struct sw_flow_actions *sf_acts) -{ - call_rcu(&sf_acts->rcu, rcu_free_acts_callback); -} - -static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key) -{ - struct qtag_prefix { - __be16 eth_type; /* ETH_P_8021Q */ - __be16 tci; - }; - struct qtag_prefix *qp; - - if (unlikely(skb->len < sizeof(struct qtag_prefix) + sizeof(__be16))) - return 0; - - if (unlikely(!pskb_may_pull(skb, sizeof(struct qtag_prefix) + - sizeof(__be16)))) - return -ENOMEM; - - qp = (struct qtag_prefix *) skb->data; - key->eth.tci = qp->tci | htons(VLAN_TAG_PRESENT); - __skb_pull(skb, sizeof(struct qtag_prefix)); - - return 0; -} - -static __be16 parse_ethertype(struct sk_buff *skb) -{ - struct llc_snap_hdr { - u8 dsap; /* Always 0xAA */ - u8 ssap; /* Always 0xAA */ - u8 ctrl; - u8 oui[3]; - __be16 ethertype; - }; - struct llc_snap_hdr *llc; - __be16 proto; - - proto = *(__be16 *) skb->data; - __skb_pull(skb, sizeof(__be16)); - - if (ntohs(proto) >= 1536) - return proto; - - if (skb->len < sizeof(struct llc_snap_hdr)) - return htons(ETH_P_802_2); - - if (unlikely(!pskb_may_pull(skb, sizeof(struct llc_snap_hdr)))) - return htons(0); - - llc = (struct llc_snap_hdr *) skb->data; - if (llc->dsap != LLC_SAP_SNAP || - llc->ssap != LLC_SAP_SNAP || - (llc->oui[0] | llc->oui[1] | llc->oui[2]) != 0) - return htons(ETH_P_802_2); - - __skb_pull(skb, sizeof(struct llc_snap_hdr)); - return llc->ethertype; -} - -static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key, - int *key_lenp, int nh_len) -{ - struct icmp6hdr *icmp = icmp6_hdr(skb); - int error = 0; - int key_len; - - /* The ICMPv6 type and code fields use the 16-bit transport port - * fields, so we need to store them in 16-bit network byte order. - */ - key->ipv6.tp.src = htons(icmp->icmp6_type); - key->ipv6.tp.dst = htons(icmp->icmp6_code); - key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); - - if (icmp->icmp6_code == 0 && - (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION || - icmp->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT)) { - int icmp_len = skb->len - skb_transport_offset(skb); - struct nd_msg *nd; - int offset; - - key_len = SW_FLOW_KEY_OFFSET(ipv6.nd); - - /* In order to process neighbor discovery options, we need the - * entire packet. - */ - if (unlikely(icmp_len < sizeof(*nd))) - goto out; - if (unlikely(skb_linearize(skb))) { - error = -ENOMEM; - goto out; - } - - nd = (struct nd_msg *)skb_transport_header(skb); - key->ipv6.nd.target = nd->target; - key_len = SW_FLOW_KEY_OFFSET(ipv6.nd); - - icmp_len -= sizeof(*nd); - offset = 0; - while (icmp_len >= 8) { - struct nd_opt_hdr *nd_opt = - (struct nd_opt_hdr *)(nd->opt + offset); - int opt_len = nd_opt->nd_opt_len * 8; - - if (unlikely(!opt_len || opt_len > icmp_len)) - goto invalid; - - /* Store the link layer address if the appropriate - * option is provided. It is considered an error if - * the same link layer option is specified twice. - */ - if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LL_ADDR - && opt_len == 8) { - if (unlikely(!is_zero_ether_addr(key->ipv6.nd.sll))) - goto invalid; - memcpy(key->ipv6.nd.sll, - &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN); - } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR - && opt_len == 8) { - if (unlikely(!is_zero_ether_addr(key->ipv6.nd.tll))) - goto invalid; - memcpy(key->ipv6.nd.tll, - &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN); - } - - icmp_len -= opt_len; - offset += opt_len; - } - } - - goto out; - -invalid: - memset(&key->ipv6.nd.target, 0, sizeof(key->ipv6.nd.target)); - memset(key->ipv6.nd.sll, 0, sizeof(key->ipv6.nd.sll)); - memset(key->ipv6.nd.tll, 0, sizeof(key->ipv6.nd.tll)); - -out: - *key_lenp = key_len; - return error; -} - -/** - * ovs_flow_extract - extracts a flow key from an Ethernet frame. - * @skb: sk_buff that contains the frame, with skb->data pointing to the - * Ethernet header - * @in_port: port number on which @skb was received. - * @key: output flow key - * @key_lenp: length of output flow key - * - * The caller must ensure that skb->len >= ETH_HLEN. - * - * Returns 0 if successful, otherwise a negative errno value. - * - * Initializes @skb header pointers as follows: - * - * - skb->mac_header: the Ethernet header. - * - * - skb->network_header: just past the Ethernet header, or just past the - * VLAN header, to the first byte of the Ethernet payload. - * - * - skb->transport_header: If key->dl_type is ETH_P_IP or ETH_P_IPV6 - * on output, then just past the IP header, if one is present and - * of a correct length, otherwise the same as skb->network_header. - * For other key->dl_type values it is left untouched. - */ -int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key, - int *key_lenp) -{ - int error = 0; - int key_len = SW_FLOW_KEY_OFFSET(eth); - struct ethhdr *eth; - - memset(key, 0, sizeof(*key)); - - key->phy.priority = skb->priority; - key->phy.in_port = in_port; - - skb_reset_mac_header(skb); - - /* Link layer. We are guaranteed to have at least the 14 byte Ethernet - * header in the linear data area. - */ - eth = eth_hdr(skb); - memcpy(key->eth.src, eth->h_source, ETH_ALEN); - memcpy(key->eth.dst, eth->h_dest, ETH_ALEN); - - __skb_pull(skb, 2 * ETH_ALEN); - - if (vlan_tx_tag_present(skb)) - key->eth.tci = htons(skb->vlan_tci); - else if (eth->h_proto == htons(ETH_P_8021Q)) - if (unlikely(parse_vlan(skb, key))) - return -ENOMEM; - - key->eth.type = parse_ethertype(skb); - if (unlikely(key->eth.type == htons(0))) - return -ENOMEM; - - skb_reset_network_header(skb); - __skb_push(skb, skb->data - skb_mac_header(skb)); - - /* Network layer. */ - if (key->eth.type == htons(ETH_P_IP)) { - struct iphdr *nh; - __be16 offset; - - key_len = SW_FLOW_KEY_OFFSET(ipv4.addr); - - error = check_iphdr(skb); - if (unlikely(error)) { - if (error == -EINVAL) { - skb->transport_header = skb->network_header; - error = 0; - } - goto out; - } - - nh = ip_hdr(skb); - key->ipv4.addr.src = nh->saddr; - key->ipv4.addr.dst = nh->daddr; - - key->ip.proto = nh->protocol; - key->ip.tos = nh->tos; - key->ip.ttl = nh->ttl; - - offset = nh->frag_off & htons(IP_OFFSET); - if (offset) { - key->ip.frag = OVS_FRAG_TYPE_LATER; - goto out; - } - if (nh->frag_off & htons(IP_MF) || - skb_shinfo(skb)->gso_type & SKB_GSO_UDP) - key->ip.frag = OVS_FRAG_TYPE_FIRST; - - /* Transport layer. */ - if (key->ip.proto == IPPROTO_TCP) { - key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); - if (tcphdr_ok(skb)) { - struct tcphdr *tcp = tcp_hdr(skb); - key->ipv4.tp.src = tcp->source; - key->ipv4.tp.dst = tcp->dest; - } - } else if (key->ip.proto == IPPROTO_UDP) { - key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); - if (udphdr_ok(skb)) { - struct udphdr *udp = udp_hdr(skb); - key->ipv4.tp.src = udp->source; - key->ipv4.tp.dst = udp->dest; - } - } else if (key->ip.proto == IPPROTO_ICMP) { - key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); - if (icmphdr_ok(skb)) { - struct icmphdr *icmp = icmp_hdr(skb); - /* The ICMP type and code fields use the 16-bit - * transport port fields, so we need to store - * them in 16-bit network byte order. */ - key->ipv4.tp.src = htons(icmp->type); - key->ipv4.tp.dst = htons(icmp->code); - } - } - - } else if (key->eth.type == htons(ETH_P_ARP) && arphdr_ok(skb)) { - struct arp_eth_header *arp; - - arp = (struct arp_eth_header *)skb_network_header(skb); - - if (arp->ar_hrd == htons(ARPHRD_ETHER) - && arp->ar_pro == htons(ETH_P_IP) - && arp->ar_hln == ETH_ALEN - && arp->ar_pln == 4) { - - /* We only match on the lower 8 bits of the opcode. */ - if (ntohs(arp->ar_op) <= 0xff) - key->ip.proto = ntohs(arp->ar_op); - - if (key->ip.proto == ARPOP_REQUEST - || key->ip.proto == ARPOP_REPLY) { - memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src)); - memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst)); - memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN); - memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN); - key_len = SW_FLOW_KEY_OFFSET(ipv4.arp); - } - } - } else if (key->eth.type == htons(ETH_P_IPV6)) { - int nh_len; /* IPv6 Header + Extensions */ - - nh_len = parse_ipv6hdr(skb, key, &key_len); - if (unlikely(nh_len < 0)) { - if (nh_len == -EINVAL) - skb->transport_header = skb->network_header; - else - error = nh_len; - goto out; - } - - if (key->ip.frag == OVS_FRAG_TYPE_LATER) - goto out; - if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) - key->ip.frag = OVS_FRAG_TYPE_FIRST; - - /* Transport layer. */ - if (key->ip.proto == NEXTHDR_TCP) { - key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); - if (tcphdr_ok(skb)) { - struct tcphdr *tcp = tcp_hdr(skb); - key->ipv6.tp.src = tcp->source; - key->ipv6.tp.dst = tcp->dest; - } - } else if (key->ip.proto == NEXTHDR_UDP) { - key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); - if (udphdr_ok(skb)) { - struct udphdr *udp = udp_hdr(skb); - key->ipv6.tp.src = udp->source; - key->ipv6.tp.dst = udp->dest; - } - } else if (key->ip.proto == NEXTHDR_ICMP) { - key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); - if (icmp6hdr_ok(skb)) { - error = parse_icmpv6(skb, key, &key_len, nh_len); - if (error < 0) - goto out; - } - } - } - -out: - *key_lenp = key_len; - return error; -} - -u32 ovs_flow_hash(const struct sw_flow_key *key, int key_len) -{ - return jhash2((u32 *)key, DIV_ROUND_UP(key_len, sizeof(u32)), 0); -} - -struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table, - struct sw_flow_key *key, int key_len) -{ - struct sw_flow *flow; - struct hlist_node *n; - struct hlist_head *head; - u32 hash; - - hash = ovs_flow_hash(key, key_len); - - head = find_bucket(table, hash); - hlist_for_each_entry_rcu(flow, n, head, hash_node[table->node_ver]) { - - if (flow->hash == hash && - !memcmp(&flow->key, key, key_len)) { - return flow; - } - } - return NULL; -} - -void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow) -{ - struct hlist_head *head; - - head = find_bucket(table, flow->hash); - hlist_add_head_rcu(&flow->hash_node[table->node_ver], head); - table->count++; -} - -void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) -{ - hlist_del_rcu(&flow->hash_node[table->node_ver]); - table->count--; - BUG_ON(table->count < 0); -} - -/* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */ -const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = { - [OVS_KEY_ATTR_ENCAP] = -1, - [OVS_KEY_ATTR_PRIORITY] = sizeof(u32), - [OVS_KEY_ATTR_IN_PORT] = sizeof(u32), - [OVS_KEY_ATTR_ETHERNET] = sizeof(struct ovs_key_ethernet), - [OVS_KEY_ATTR_VLAN] = sizeof(__be16), - [OVS_KEY_ATTR_ETHERTYPE] = sizeof(__be16), - [OVS_KEY_ATTR_IPV4] = sizeof(struct ovs_key_ipv4), - [OVS_KEY_ATTR_IPV6] = sizeof(struct ovs_key_ipv6), - [OVS_KEY_ATTR_TCP] = sizeof(struct ovs_key_tcp), - [OVS_KEY_ATTR_UDP] = sizeof(struct ovs_key_udp), - [OVS_KEY_ATTR_ICMP] = sizeof(struct ovs_key_icmp), - [OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6), - [OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp), - [OVS_KEY_ATTR_ND] = sizeof(struct ovs_key_nd), -}; - -static int ipv4_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_len, - const struct nlattr *a[], u32 *attrs) -{ - const struct ovs_key_icmp *icmp_key; - const struct ovs_key_tcp *tcp_key; - const struct ovs_key_udp *udp_key; - - switch (swkey->ip.proto) { - case IPPROTO_TCP: - if (!(*attrs & (1 << OVS_KEY_ATTR_TCP))) - return -EINVAL; - *attrs &= ~(1 << OVS_KEY_ATTR_TCP); - - *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); - tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]); - swkey->ipv4.tp.src = tcp_key->tcp_src; - swkey->ipv4.tp.dst = tcp_key->tcp_dst; - break; - - case IPPROTO_UDP: - if (!(*attrs & (1 << OVS_KEY_ATTR_UDP))) - return -EINVAL; - *attrs &= ~(1 << OVS_KEY_ATTR_UDP); - - *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); - udp_key = nla_data(a[OVS_KEY_ATTR_UDP]); - swkey->ipv4.tp.src = udp_key->udp_src; - swkey->ipv4.tp.dst = udp_key->udp_dst; - break; - - case IPPROTO_ICMP: - if (!(*attrs & (1 << OVS_KEY_ATTR_ICMP))) - return -EINVAL; - *attrs &= ~(1 << OVS_KEY_ATTR_ICMP); - - *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); - icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]); - swkey->ipv4.tp.src = htons(icmp_key->icmp_type); - swkey->ipv4.tp.dst = htons(icmp_key->icmp_code); - break; - } - - return 0; -} - -static int ipv6_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_len, - const struct nlattr *a[], u32 *attrs) -{ - const struct ovs_key_icmpv6 *icmpv6_key; - const struct ovs_key_tcp *tcp_key; - const struct ovs_key_udp *udp_key; - - switch (swkey->ip.proto) { - case IPPROTO_TCP: - if (!(*attrs & (1 << OVS_KEY_ATTR_TCP))) - return -EINVAL; - *attrs &= ~(1 << OVS_KEY_ATTR_TCP); - - *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); - tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]); - swkey->ipv6.tp.src = tcp_key->tcp_src; - swkey->ipv6.tp.dst = tcp_key->tcp_dst; - break; - - case IPPROTO_UDP: - if (!(*attrs & (1 << OVS_KEY_ATTR_UDP))) - return -EINVAL; - *attrs &= ~(1 << OVS_KEY_ATTR_UDP); - - *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); - udp_key = nla_data(a[OVS_KEY_ATTR_UDP]); - swkey->ipv6.tp.src = udp_key->udp_src; - swkey->ipv6.tp.dst = udp_key->udp_dst; - break; - - case IPPROTO_ICMPV6: - if (!(*attrs & (1 << OVS_KEY_ATTR_ICMPV6))) - return -EINVAL; - *attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6); - - *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); - icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]); - swkey->ipv6.tp.src = htons(icmpv6_key->icmpv6_type); - swkey->ipv6.tp.dst = htons(icmpv6_key->icmpv6_code); - - if (swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_SOLICITATION) || - swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) { - const struct ovs_key_nd *nd_key; - - if (!(*attrs & (1 << OVS_KEY_ATTR_ND))) - return -EINVAL; - *attrs &= ~(1 << OVS_KEY_ATTR_ND); - - *key_len = SW_FLOW_KEY_OFFSET(ipv6.nd); - nd_key = nla_data(a[OVS_KEY_ATTR_ND]); - memcpy(&swkey->ipv6.nd.target, nd_key->nd_target, - sizeof(swkey->ipv6.nd.target)); - memcpy(swkey->ipv6.nd.sll, nd_key->nd_sll, ETH_ALEN); - memcpy(swkey->ipv6.nd.tll, nd_key->nd_tll, ETH_ALEN); - } - break; - } - - return 0; -} - -static int parse_flow_nlattrs(const struct nlattr *attr, - const struct nlattr *a[], u32 *attrsp) -{ - const struct nlattr *nla; - u32 attrs; - int rem; - - attrs = 0; - nla_for_each_nested(nla, attr, rem) { - u16 type = nla_type(nla); - int expected_len; - - if (type > OVS_KEY_ATTR_MAX || attrs & (1 << type)) - return -EINVAL; - - expected_len = ovs_key_lens[type]; - if (nla_len(nla) != expected_len && expected_len != -1) - return -EINVAL; - - attrs |= 1 << type; - a[type] = nla; - } - if (rem) - return -EINVAL; - - *attrsp = attrs; - return 0; -} - -/** - * ovs_flow_from_nlattrs - parses Netlink attributes into a flow key. - * @swkey: receives the extracted flow key. - * @key_lenp: number of bytes used in @swkey. - * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute - * sequence. - */ -int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, - const struct nlattr *attr) -{ - const struct nlattr *a[OVS_KEY_ATTR_MAX + 1]; - const struct ovs_key_ethernet *eth_key; - int key_len; - u32 attrs; - int err; - - memset(swkey, 0, sizeof(struct sw_flow_key)); - key_len = SW_FLOW_KEY_OFFSET(eth); - - err = parse_flow_nlattrs(attr, a, &attrs); - if (err) - return err; - - /* Metadata attributes. */ - if (attrs & (1 << OVS_KEY_ATTR_PRIORITY)) { - swkey->phy.priority = nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]); - attrs &= ~(1 << OVS_KEY_ATTR_PRIORITY); - } - if (attrs & (1 << OVS_KEY_ATTR_IN_PORT)) { - u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]); - if (in_port >= DP_MAX_PORTS) - return -EINVAL; - swkey->phy.in_port = in_port; - attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT); - } else { - swkey->phy.in_port = USHRT_MAX; - } - - /* Data attributes. */ - if (!(attrs & (1 << OVS_KEY_ATTR_ETHERNET))) - return -EINVAL; - attrs &= ~(1 << OVS_KEY_ATTR_ETHERNET); - - eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]); - memcpy(swkey->eth.src, eth_key->eth_src, ETH_ALEN); - memcpy(swkey->eth.dst, eth_key->eth_dst, ETH_ALEN); - - if (attrs & (1u << OVS_KEY_ATTR_ETHERTYPE) && - nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]) == htons(ETH_P_8021Q)) { - const struct nlattr *encap; - __be16 tci; - - if (attrs != ((1 << OVS_KEY_ATTR_VLAN) | - (1 << OVS_KEY_ATTR_ETHERTYPE) | - (1 << OVS_KEY_ATTR_ENCAP))) - return -EINVAL; - - encap = a[OVS_KEY_ATTR_ENCAP]; - tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]); - if (tci & htons(VLAN_TAG_PRESENT)) { - swkey->eth.tci = tci; - - err = parse_flow_nlattrs(encap, a, &attrs); - if (err) - return err; - } else if (!tci) { - /* Corner case for truncated 802.1Q header. */ - if (nla_len(encap)) - return -EINVAL; - - swkey->eth.type = htons(ETH_P_8021Q); - *key_lenp = key_len; - return 0; - } else { - return -EINVAL; - } - } - - if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) { - swkey->eth.type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]); - if (ntohs(swkey->eth.type) < 1536) - return -EINVAL; - attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE); - } else { - swkey->eth.type = htons(ETH_P_802_2); - } - - if (swkey->eth.type == htons(ETH_P_IP)) { - const struct ovs_key_ipv4 *ipv4_key; - - if (!(attrs & (1 << OVS_KEY_ATTR_IPV4))) - return -EINVAL; - attrs &= ~(1 << OVS_KEY_ATTR_IPV4); - - key_len = SW_FLOW_KEY_OFFSET(ipv4.addr); - ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]); - if (ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX) - return -EINVAL; - swkey->ip.proto = ipv4_key->ipv4_proto; - swkey->ip.tos = ipv4_key->ipv4_tos; - swkey->ip.ttl = ipv4_key->ipv4_ttl; - swkey->ip.frag = ipv4_key->ipv4_frag; - swkey->ipv4.addr.src = ipv4_key->ipv4_src; - swkey->ipv4.addr.dst = ipv4_key->ipv4_dst; - - if (swkey->ip.frag != OVS_FRAG_TYPE_LATER) { - err = ipv4_flow_from_nlattrs(swkey, &key_len, a, &attrs); - if (err) - return err; - } - } else if (swkey->eth.type == htons(ETH_P_IPV6)) { - const struct ovs_key_ipv6 *ipv6_key; - - if (!(attrs & (1 << OVS_KEY_ATTR_IPV6))) - return -EINVAL; - attrs &= ~(1 << OVS_KEY_ATTR_IPV6); - - key_len = SW_FLOW_KEY_OFFSET(ipv6.label); - ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]); - if (ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX) - return -EINVAL; - swkey->ipv6.label = ipv6_key->ipv6_label; - swkey->ip.proto = ipv6_key->ipv6_proto; - swkey->ip.tos = ipv6_key->ipv6_tclass; - swkey->ip.ttl = ipv6_key->ipv6_hlimit; - swkey->ip.frag = ipv6_key->ipv6_frag; - memcpy(&swkey->ipv6.addr.src, ipv6_key->ipv6_src, - sizeof(swkey->ipv6.addr.src)); - memcpy(&swkey->ipv6.addr.dst, ipv6_key->ipv6_dst, - sizeof(swkey->ipv6.addr.dst)); - - if (swkey->ip.frag != OVS_FRAG_TYPE_LATER) { - err = ipv6_flow_from_nlattrs(swkey, &key_len, a, &attrs); - if (err) - return err; - } - } else if (swkey->eth.type == htons(ETH_P_ARP)) { - const struct ovs_key_arp *arp_key; - - if (!(attrs & (1 << OVS_KEY_ATTR_ARP))) - return -EINVAL; - attrs &= ~(1 << OVS_KEY_ATTR_ARP); - - key_len = SW_FLOW_KEY_OFFSET(ipv4.arp); - arp_key = nla_data(a[OVS_KEY_ATTR_ARP]); - swkey->ipv4.addr.src = arp_key->arp_sip; - swkey->ipv4.addr.dst = arp_key->arp_tip; - if (arp_key->arp_op & htons(0xff00)) - return -EINVAL; - swkey->ip.proto = ntohs(arp_key->arp_op); - memcpy(swkey->ipv4.arp.sha, arp_key->arp_sha, ETH_ALEN); - memcpy(swkey->ipv4.arp.tha, arp_key->arp_tha, ETH_ALEN); - } - - if (attrs) - return -EINVAL; - *key_lenp = key_len; - - return 0; -} - -/** - * ovs_flow_metadata_from_nlattrs - parses Netlink attributes into a flow key. - * @in_port: receives the extracted input port. - * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute - * sequence. - * - * This parses a series of Netlink attributes that form a flow key, which must - * take the same form accepted by flow_from_nlattrs(), but only enough of it to - * get the metadata, that is, the parts of the flow key that cannot be - * extracted from the packet itself. - */ -int ovs_flow_metadata_from_nlattrs(u32 *priority, u16 *in_port, - const struct nlattr *attr) -{ - const struct nlattr *nla; - int rem; - - *in_port = USHRT_MAX; - *priority = 0; - - nla_for_each_nested(nla, attr, rem) { - int type = nla_type(nla); - - if (type <= OVS_KEY_ATTR_MAX && ovs_key_lens[type] > 0) { - if (nla_len(nla) != ovs_key_lens[type]) - return -EINVAL; - - switch (type) { - case OVS_KEY_ATTR_PRIORITY: - *priority = nla_get_u32(nla); - break; - - case OVS_KEY_ATTR_IN_PORT: - if (nla_get_u32(nla) >= DP_MAX_PORTS) - return -EINVAL; - *in_port = nla_get_u32(nla); - break; - } - } - } - if (rem) - return -EINVAL; - return 0; -} - -int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) -{ - struct ovs_key_ethernet *eth_key; - struct nlattr *nla, *encap; - - if (swkey->phy.priority) - NLA_PUT_U32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority); - - if (swkey->phy.in_port != USHRT_MAX) - NLA_PUT_U32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port); - - nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key)); - if (!nla) - goto nla_put_failure; - eth_key = nla_data(nla); - memcpy(eth_key->eth_src, swkey->eth.src, ETH_ALEN); - memcpy(eth_key->eth_dst, swkey->eth.dst, ETH_ALEN); - - if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) { - NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_P_8021Q)); - NLA_PUT_BE16(skb, OVS_KEY_ATTR_VLAN, swkey->eth.tci); - encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP); - if (!swkey->eth.tci) - goto unencap; - } else { - encap = NULL; - } - - if (swkey->eth.type == htons(ETH_P_802_2)) - goto unencap; - - NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type); - - if (swkey->eth.type == htons(ETH_P_IP)) { - struct ovs_key_ipv4 *ipv4_key; - - nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4, sizeof(*ipv4_key)); - if (!nla) - goto nla_put_failure; - ipv4_key = nla_data(nla); - ipv4_key->ipv4_src = swkey->ipv4.addr.src; - ipv4_key->ipv4_dst = swkey->ipv4.addr.dst; - ipv4_key->ipv4_proto = swkey->ip.proto; - ipv4_key->ipv4_tos = swkey->ip.tos; - ipv4_key->ipv4_ttl = swkey->ip.ttl; - ipv4_key->ipv4_frag = swkey->ip.frag; - } else if (swkey->eth.type == htons(ETH_P_IPV6)) { - struct ovs_key_ipv6 *ipv6_key; - - nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key)); - if (!nla) - goto nla_put_failure; - ipv6_key = nla_data(nla); - memcpy(ipv6_key->ipv6_src, &swkey->ipv6.addr.src, - sizeof(ipv6_key->ipv6_src)); - memcpy(ipv6_key->ipv6_dst, &swkey->ipv6.addr.dst, - sizeof(ipv6_key->ipv6_dst)); - ipv6_key->ipv6_label = swkey->ipv6.label; - ipv6_key->ipv6_proto = swkey->ip.proto; - ipv6_key->ipv6_tclass = swkey->ip.tos; - ipv6_key->ipv6_hlimit = swkey->ip.ttl; - ipv6_key->ipv6_frag = swkey->ip.frag; - } else if (swkey->eth.type == htons(ETH_P_ARP)) { - struct ovs_key_arp *arp_key; - - nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key)); - if (!nla) - goto nla_put_failure; - arp_key = nla_data(nla); - memset(arp_key, 0, sizeof(struct ovs_key_arp)); - arp_key->arp_sip = swkey->ipv4.addr.src; - arp_key->arp_tip = swkey->ipv4.addr.dst; - arp_key->arp_op = htons(swkey->ip.proto); - memcpy(arp_key->arp_sha, swkey->ipv4.arp.sha, ETH_ALEN); - memcpy(arp_key->arp_tha, swkey->ipv4.arp.tha, ETH_ALEN); - } - - if ((swkey->eth.type == htons(ETH_P_IP) || - swkey->eth.type == htons(ETH_P_IPV6)) && - swkey->ip.frag != OVS_FRAG_TYPE_LATER) { - - if (swkey->ip.proto == IPPROTO_TCP) { - struct ovs_key_tcp *tcp_key; - - nla = nla_reserve(skb, OVS_KEY_ATTR_TCP, sizeof(*tcp_key)); - if (!nla) - goto nla_put_failure; - tcp_key = nla_data(nla); - if (swkey->eth.type == htons(ETH_P_IP)) { - tcp_key->tcp_src = swkey->ipv4.tp.src; - tcp_key->tcp_dst = swkey->ipv4.tp.dst; - } else if (swkey->eth.type == htons(ETH_P_IPV6)) { - tcp_key->tcp_src = swkey->ipv6.tp.src; - tcp_key->tcp_dst = swkey->ipv6.tp.dst; - } - } else if (swkey->ip.proto == IPPROTO_UDP) { - struct ovs_key_udp *udp_key; - - nla = nla_reserve(skb, OVS_KEY_ATTR_UDP, sizeof(*udp_key)); - if (!nla) - goto nla_put_failure; - udp_key = nla_data(nla); - if (swkey->eth.type == htons(ETH_P_IP)) { - udp_key->udp_src = swkey->ipv4.tp.src; - udp_key->udp_dst = swkey->ipv4.tp.dst; - } else if (swkey->eth.type == htons(ETH_P_IPV6)) { - udp_key->udp_src = swkey->ipv6.tp.src; - udp_key->udp_dst = swkey->ipv6.tp.dst; - } - } else if (swkey->eth.type == htons(ETH_P_IP) && - swkey->ip.proto == IPPROTO_ICMP) { - struct ovs_key_icmp *icmp_key; - - nla = nla_reserve(skb, OVS_KEY_ATTR_ICMP, sizeof(*icmp_key)); - if (!nla) - goto nla_put_failure; - icmp_key = nla_data(nla); - icmp_key->icmp_type = ntohs(swkey->ipv4.tp.src); - icmp_key->icmp_code = ntohs(swkey->ipv4.tp.dst); - } else if (swkey->eth.type == htons(ETH_P_IPV6) && - swkey->ip.proto == IPPROTO_ICMPV6) { - struct ovs_key_icmpv6 *icmpv6_key; - - nla = nla_reserve(skb, OVS_KEY_ATTR_ICMPV6, - sizeof(*icmpv6_key)); - if (!nla) - goto nla_put_failure; - icmpv6_key = nla_data(nla); - icmpv6_key->icmpv6_type = ntohs(swkey->ipv6.tp.src); - icmpv6_key->icmpv6_code = ntohs(swkey->ipv6.tp.dst); - - if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION || - icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) { - struct ovs_key_nd *nd_key; - - nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key)); - if (!nla) - goto nla_put_failure; - nd_key = nla_data(nla); - memcpy(nd_key->nd_target, &swkey->ipv6.nd.target, - sizeof(nd_key->nd_target)); - memcpy(nd_key->nd_sll, swkey->ipv6.nd.sll, ETH_ALEN); - memcpy(nd_key->nd_tll, swkey->ipv6.nd.tll, ETH_ALEN); - } - } - } - -unencap: - if (encap) - nla_nest_end(skb, encap); - - return 0; - -nla_put_failure: - return -EMSGSIZE; -} - -/* Initializes the flow module. - * Returns zero if successful or a negative error code. */ -int ovs_flow_init(void) -{ - flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0, - 0, NULL); - if (flow_cache == NULL) - return -ENOMEM; - - return 0; -} - -/* Uninitializes the flow module. */ -void ovs_flow_exit(void) -{ - kmem_cache_destroy(flow_cache); -} diff --git a/trunk/net/openvswitch/flow.h b/trunk/net/openvswitch/flow.h deleted file mode 100644 index 2747dc2c4ac1..000000000000 --- a/trunk/net/openvswitch/flow.h +++ /dev/null @@ -1,199 +0,0 @@ -/* - * Copyright (c) 2007-2011 Nicira Networks. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA - */ - -#ifndef FLOW_H -#define FLOW_H 1 - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -struct sk_buff; - -struct sw_flow_actions { - struct rcu_head rcu; - u32 actions_len; - struct nlattr actions[]; -}; - -struct sw_flow_key { - struct { - u32 priority; /* Packet QoS priority. */ - u16 in_port; /* Input switch port (or USHRT_MAX). */ - } phy; - struct { - u8 src[ETH_ALEN]; /* Ethernet source address. */ - u8 dst[ETH_ALEN]; /* Ethernet destination address. */ - __be16 tci; /* 0 if no VLAN, VLAN_TAG_PRESENT set otherwise. */ - __be16 type; /* Ethernet frame type. */ - } eth; - struct { - u8 proto; /* IP protocol or lower 8 bits of ARP opcode. */ - u8 tos; /* IP ToS. */ - u8 ttl; /* IP TTL/hop limit. */ - u8 frag; /* One of OVS_FRAG_TYPE_*. */ - } ip; - union { - struct { - struct { - __be32 src; /* IP source address. */ - __be32 dst; /* IP destination address. */ - } addr; - union { - struct { - __be16 src; /* TCP/UDP source port. */ - __be16 dst; /* TCP/UDP destination port. */ - } tp; - struct { - u8 sha[ETH_ALEN]; /* ARP source hardware address. */ - u8 tha[ETH_ALEN]; /* ARP target hardware address. */ - } arp; - }; - } ipv4; - struct { - struct { - struct in6_addr src; /* IPv6 source address. */ - struct in6_addr dst; /* IPv6 destination address. */ - } addr; - __be32 label; /* IPv6 flow label. */ - struct { - __be16 src; /* TCP/UDP source port. */ - __be16 dst; /* TCP/UDP destination port. */ - } tp; - struct { - struct in6_addr target; /* ND target address. */ - u8 sll[ETH_ALEN]; /* ND source link layer address. */ - u8 tll[ETH_ALEN]; /* ND target link layer address. */ - } nd; - } ipv6; - }; -}; - -struct sw_flow { - struct rcu_head rcu; - struct hlist_node hash_node[2]; - u32 hash; - - struct sw_flow_key key; - struct sw_flow_actions __rcu *sf_acts; - - spinlock_t lock; /* Lock for values below. */ - unsigned long used; /* Last used time (in jiffies). */ - u64 packet_count; /* Number of packets matched. */ - u64 byte_count; /* Number of bytes matched. */ - u8 tcp_flags; /* Union of seen TCP flags. */ -}; - -struct arp_eth_header { - __be16 ar_hrd; /* format of hardware address */ - __be16 ar_pro; /* format of protocol address */ - unsigned char ar_hln; /* length of hardware address */ - unsigned char ar_pln; /* length of protocol address */ - __be16 ar_op; /* ARP opcode (command) */ - - /* Ethernet+IPv4 specific members. */ - unsigned char ar_sha[ETH_ALEN]; /* sender hardware address */ - unsigned char ar_sip[4]; /* sender IP address */ - unsigned char ar_tha[ETH_ALEN]; /* target hardware address */ - unsigned char ar_tip[4]; /* target IP address */ -} __packed; - -int ovs_flow_init(void); -void ovs_flow_exit(void); - -struct sw_flow *ovs_flow_alloc(void); -void ovs_flow_deferred_free(struct sw_flow *); -void ovs_flow_free(struct sw_flow *flow); - -struct sw_flow_actions *ovs_flow_actions_alloc(const struct nlattr *); -void ovs_flow_deferred_free_acts(struct sw_flow_actions *); - -int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *, - int *key_lenp); -void ovs_flow_used(struct sw_flow *, struct sk_buff *); -u64 ovs_flow_used_time(unsigned long flow_jiffies); - -/* Upper bound on the length of a nlattr-formatted flow key. The longest - * nlattr-formatted flow key would be: - * - * struct pad nl hdr total - * ------ --- ------ ----- - * OVS_KEY_ATTR_PRIORITY 4 -- 4 8 - * OVS_KEY_ATTR_IN_PORT 4 -- 4 8 - * OVS_KEY_ATTR_ETHERNET 12 -- 4 16 - * OVS_KEY_ATTR_8021Q 4 -- 4 8 - * OVS_KEY_ATTR_ETHERTYPE 2 2 4 8 - * OVS_KEY_ATTR_IPV6 40 -- 4 44 - * OVS_KEY_ATTR_ICMPV6 2 2 4 8 - * OVS_KEY_ATTR_ND 28 -- 4 32 - * ------------------------------------------------- - * total 132 - */ -#define FLOW_BUFSIZE 132 - -int ovs_flow_to_nlattrs(const struct sw_flow_key *, struct sk_buff *); -int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, - const struct nlattr *); -int ovs_flow_metadata_from_nlattrs(u32 *priority, u16 *in_port, - const struct nlattr *); - -#define TBL_MIN_BUCKETS 1024 - -struct flow_table { - struct flex_array *buckets; - unsigned int count, n_buckets; - struct rcu_head rcu; - int node_ver; - u32 hash_seed; - bool keep_flows; -}; - -static inline int ovs_flow_tbl_count(struct flow_table *table) -{ - return table->count; -} - -static inline int ovs_flow_tbl_need_to_expand(struct flow_table *table) -{ - return (table->count > table->n_buckets); -} - -struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table, - struct sw_flow_key *key, int len); -void ovs_flow_tbl_destroy(struct flow_table *table); -void ovs_flow_tbl_deferred_destroy(struct flow_table *table); -struct flow_table *ovs_flow_tbl_alloc(int new_size); -struct flow_table *ovs_flow_tbl_expand(struct flow_table *table); -struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table); -void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow); -void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow); -u32 ovs_flow_hash(const struct sw_flow_key *key, int key_len); - -struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *idx); -extern const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1]; - -#endif /* flow.h */ diff --git a/trunk/net/openvswitch/vport-internal_dev.c b/trunk/net/openvswitch/vport-internal_dev.c deleted file mode 100644 index 8fc28b86f2b3..000000000000 --- a/trunk/net/openvswitch/vport-internal_dev.c +++ /dev/null @@ -1,241 +0,0 @@ -/* - * Copyright (c) 2007-2011 Nicira Networks. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "datapath.h" -#include "vport-internal_dev.h" -#include "vport-netdev.h" - -struct internal_dev { - struct vport *vport; -}; - -static struct internal_dev *internal_dev_priv(struct net_device *netdev) -{ - return netdev_priv(netdev); -} - -/* This function is only called by the kernel network layer.*/ -static struct rtnl_link_stats64 *internal_dev_get_stats(struct net_device *netdev, - struct rtnl_link_stats64 *stats) -{ - struct vport *vport = ovs_internal_dev_get_vport(netdev); - struct ovs_vport_stats vport_stats; - - ovs_vport_get_stats(vport, &vport_stats); - - /* The tx and rx stats need to be swapped because the - * switch and host OS have opposite perspectives. */ - stats->rx_packets = vport_stats.tx_packets; - stats->tx_packets = vport_stats.rx_packets; - stats->rx_bytes = vport_stats.tx_bytes; - stats->tx_bytes = vport_stats.rx_bytes; - stats->rx_errors = vport_stats.tx_errors; - stats->tx_errors = vport_stats.rx_errors; - stats->rx_dropped = vport_stats.tx_dropped; - stats->tx_dropped = vport_stats.rx_dropped; - - return stats; -} - -static int internal_dev_mac_addr(struct net_device *dev, void *p) -{ - struct sockaddr *addr = p; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); - return 0; -} - -/* Called with rcu_read_lock_bh. */ -static int internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev) -{ - rcu_read_lock(); - ovs_vport_receive(internal_dev_priv(netdev)->vport, skb); - rcu_read_unlock(); - return 0; -} - -static int internal_dev_open(struct net_device *netdev) -{ - netif_start_queue(netdev); - return 0; -} - -static int internal_dev_stop(struct net_device *netdev) -{ - netif_stop_queue(netdev); - return 0; -} - -static void internal_dev_getinfo(struct net_device *netdev, - struct ethtool_drvinfo *info) -{ - strcpy(info->driver, "openvswitch"); -} - -static const struct ethtool_ops internal_dev_ethtool_ops = { - .get_drvinfo = internal_dev_getinfo, - .get_link = ethtool_op_get_link, -}; - -static int internal_dev_change_mtu(struct net_device *netdev, int new_mtu) -{ - if (new_mtu < 68) - return -EINVAL; - - netdev->mtu = new_mtu; - return 0; -} - -static void internal_dev_destructor(struct net_device *dev) -{ - struct vport *vport = ovs_internal_dev_get_vport(dev); - - ovs_vport_free(vport); - free_netdev(dev); -} - -static const struct net_device_ops internal_dev_netdev_ops = { - .ndo_open = internal_dev_open, - .ndo_stop = internal_dev_stop, - .ndo_start_xmit = internal_dev_xmit, - .ndo_set_mac_address = internal_dev_mac_addr, - .ndo_change_mtu = internal_dev_change_mtu, - .ndo_get_stats64 = internal_dev_get_stats, -}; - -static void do_setup(struct net_device *netdev) -{ - ether_setup(netdev); - - netdev->netdev_ops = &internal_dev_netdev_ops; - - netdev->priv_flags &= ~IFF_TX_SKB_SHARING; - netdev->destructor = internal_dev_destructor; - SET_ETHTOOL_OPS(netdev, &internal_dev_ethtool_ops); - netdev->tx_queue_len = 0; - - netdev->features = NETIF_F_LLTX | NETIF_F_SG | NETIF_F_FRAGLIST | - NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | NETIF_F_TSO; - - netdev->vlan_features = netdev->features; - netdev->features |= NETIF_F_HW_VLAN_TX; - netdev->hw_features = netdev->features & ~NETIF_F_LLTX; - random_ether_addr(netdev->dev_addr); -} - -static struct vport *internal_dev_create(const struct vport_parms *parms) -{ - struct vport *vport; - struct netdev_vport *netdev_vport; - struct internal_dev *internal_dev; - int err; - - vport = ovs_vport_alloc(sizeof(struct netdev_vport), - &ovs_internal_vport_ops, parms); - if (IS_ERR(vport)) { - err = PTR_ERR(vport); - goto error; - } - - netdev_vport = netdev_vport_priv(vport); - - netdev_vport->dev = alloc_netdev(sizeof(struct internal_dev), - parms->name, do_setup); - if (!netdev_vport->dev) { - err = -ENOMEM; - goto error_free_vport; - } - - internal_dev = internal_dev_priv(netdev_vport->dev); - internal_dev->vport = vport; - - err = register_netdevice(netdev_vport->dev); - if (err) - goto error_free_netdev; - - dev_set_promiscuity(netdev_vport->dev, 1); - netif_start_queue(netdev_vport->dev); - - return vport; - -error_free_netdev: - free_netdev(netdev_vport->dev); -error_free_vport: - ovs_vport_free(vport); -error: - return ERR_PTR(err); -} - -static void internal_dev_destroy(struct vport *vport) -{ - struct netdev_vport *netdev_vport = netdev_vport_priv(vport); - - netif_stop_queue(netdev_vport->dev); - dev_set_promiscuity(netdev_vport->dev, -1); - - /* unregister_netdevice() waits for an RCU grace period. */ - unregister_netdevice(netdev_vport->dev); -} - -static int internal_dev_recv(struct vport *vport, struct sk_buff *skb) -{ - struct net_device *netdev = netdev_vport_priv(vport)->dev; - int len; - - len = skb->len; - skb->dev = netdev; - skb->pkt_type = PACKET_HOST; - skb->protocol = eth_type_trans(skb, netdev); - - netif_rx(skb); - - return len; -} - -const struct vport_ops ovs_internal_vport_ops = { - .type = OVS_VPORT_TYPE_INTERNAL, - .create = internal_dev_create, - .destroy = internal_dev_destroy, - .get_name = ovs_netdev_get_name, - .get_ifindex = ovs_netdev_get_ifindex, - .send = internal_dev_recv, -}; - -int ovs_is_internal_dev(const struct net_device *netdev) -{ - return netdev->netdev_ops == &internal_dev_netdev_ops; -} - -struct vport *ovs_internal_dev_get_vport(struct net_device *netdev) -{ - if (!ovs_is_internal_dev(netdev)) - return NULL; - - return internal_dev_priv(netdev)->vport; -} diff --git a/trunk/net/openvswitch/vport-internal_dev.h b/trunk/net/openvswitch/vport-internal_dev.h deleted file mode 100644 index 3454447c5f11..000000000000 --- a/trunk/net/openvswitch/vport-internal_dev.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2007-2011 Nicira Networks. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA - */ - -#ifndef VPORT_INTERNAL_DEV_H -#define VPORT_INTERNAL_DEV_H 1 - -#include "datapath.h" -#include "vport.h" - -int ovs_is_internal_dev(const struct net_device *); -struct vport *ovs_internal_dev_get_vport(struct net_device *); - -#endif /* vport-internal_dev.h */ diff --git a/trunk/net/openvswitch/vport-netdev.c b/trunk/net/openvswitch/vport-netdev.c deleted file mode 100644 index c1068aed03d1..000000000000 --- a/trunk/net/openvswitch/vport-netdev.c +++ /dev/null @@ -1,198 +0,0 @@ -/* - * Copyright (c) 2007-2011 Nicira Networks. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "datapath.h" -#include "vport-internal_dev.h" -#include "vport-netdev.h" - -/* Must be called with rcu_read_lock. */ -static void netdev_port_receive(struct vport *vport, struct sk_buff *skb) -{ - if (unlikely(!vport)) { - kfree_skb(skb); - return; - } - - /* Make our own copy of the packet. Otherwise we will mangle the - * packet for anyone who came before us (e.g. tcpdump via AF_PACKET). - * (No one comes after us, since we tell handle_bridge() that we took - * the packet.) */ - skb = skb_share_check(skb, GFP_ATOMIC); - if (unlikely(!skb)) - return; - - skb_push(skb, ETH_HLEN); - ovs_vport_receive(vport, skb); -} - -/* Called with rcu_read_lock and bottom-halves disabled. */ -static rx_handler_result_t netdev_frame_hook(struct sk_buff **pskb) -{ - struct sk_buff *skb = *pskb; - struct vport *vport; - - if (unlikely(skb->pkt_type == PACKET_LOOPBACK)) - return RX_HANDLER_PASS; - - vport = ovs_netdev_get_vport(skb->dev); - - netdev_port_receive(vport, skb); - - return RX_HANDLER_CONSUMED; -} - -static struct vport *netdev_create(const struct vport_parms *parms) -{ - struct vport *vport; - struct netdev_vport *netdev_vport; - int err; - - vport = ovs_vport_alloc(sizeof(struct netdev_vport), - &ovs_netdev_vport_ops, parms); - if (IS_ERR(vport)) { - err = PTR_ERR(vport); - goto error; - } - - netdev_vport = netdev_vport_priv(vport); - - netdev_vport->dev = dev_get_by_name(&init_net, parms->name); - if (!netdev_vport->dev) { - err = -ENODEV; - goto error_free_vport; - } - - if (netdev_vport->dev->flags & IFF_LOOPBACK || - netdev_vport->dev->type != ARPHRD_ETHER || - ovs_is_internal_dev(netdev_vport->dev)) { - err = -EINVAL; - goto error_put; - } - - err = netdev_rx_handler_register(netdev_vport->dev, netdev_frame_hook, - vport); - if (err) - goto error_put; - - dev_set_promiscuity(netdev_vport->dev, 1); - netdev_vport->dev->priv_flags |= IFF_OVS_DATAPATH; - - return vport; - -error_put: - dev_put(netdev_vport->dev); -error_free_vport: - ovs_vport_free(vport); -error: - return ERR_PTR(err); -} - -static void netdev_destroy(struct vport *vport) -{ - struct netdev_vport *netdev_vport = netdev_vport_priv(vport); - - netdev_vport->dev->priv_flags &= ~IFF_OVS_DATAPATH; - netdev_rx_handler_unregister(netdev_vport->dev); - dev_set_promiscuity(netdev_vport->dev, -1); - - synchronize_rcu(); - - dev_put(netdev_vport->dev); - ovs_vport_free(vport); -} - -const char *ovs_netdev_get_name(const struct vport *vport) -{ - const struct netdev_vport *netdev_vport = netdev_vport_priv(vport); - return netdev_vport->dev->name; -} - -int ovs_netdev_get_ifindex(const struct vport *vport) -{ - const struct netdev_vport *netdev_vport = netdev_vport_priv(vport); - return netdev_vport->dev->ifindex; -} - -static unsigned packet_length(const struct sk_buff *skb) -{ - unsigned length = skb->len - ETH_HLEN; - - if (skb->protocol == htons(ETH_P_8021Q)) - length -= VLAN_HLEN; - - return length; -} - -static int netdev_send(struct vport *vport, struct sk_buff *skb) -{ - struct netdev_vport *netdev_vport = netdev_vport_priv(vport); - int mtu = netdev_vport->dev->mtu; - int len; - - if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) { - if (net_ratelimit()) - pr_warn("%s: dropped over-mtu packet: %d > %d\n", - ovs_dp_name(vport->dp), packet_length(skb), mtu); - goto error; - } - - if (unlikely(skb_warn_if_lro(skb))) - goto error; - - skb->dev = netdev_vport->dev; - len = skb->len; - dev_queue_xmit(skb); - - return len; - -error: - kfree_skb(skb); - ovs_vport_record_error(vport, VPORT_E_TX_DROPPED); - return 0; -} - -/* Returns null if this device is not attached to a datapath. */ -struct vport *ovs_netdev_get_vport(struct net_device *dev) -{ - if (likely(dev->priv_flags & IFF_OVS_DATAPATH)) - return (struct vport *) - rcu_dereference_rtnl(dev->rx_handler_data); - else - return NULL; -} - -const struct vport_ops ovs_netdev_vport_ops = { - .type = OVS_VPORT_TYPE_NETDEV, - .create = netdev_create, - .destroy = netdev_destroy, - .get_name = ovs_netdev_get_name, - .get_ifindex = ovs_netdev_get_ifindex, - .send = netdev_send, -}; diff --git a/trunk/net/openvswitch/vport-netdev.h b/trunk/net/openvswitch/vport-netdev.h deleted file mode 100644 index fd9b008a0e6e..000000000000 --- a/trunk/net/openvswitch/vport-netdev.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (c) 2007-2011 Nicira Networks. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA - */ - -#ifndef VPORT_NETDEV_H -#define VPORT_NETDEV_H 1 - -#include - -#include "vport.h" - -struct vport *ovs_netdev_get_vport(struct net_device *dev); - -struct netdev_vport { - struct net_device *dev; -}; - -static inline struct netdev_vport * -netdev_vport_priv(const struct vport *vport) -{ - return vport_priv(vport); -} - -const char *ovs_netdev_get_name(const struct vport *); -const char *ovs_netdev_get_config(const struct vport *); -int ovs_netdev_get_ifindex(const struct vport *); - -#endif /* vport_netdev.h */ diff --git a/trunk/net/openvswitch/vport.c b/trunk/net/openvswitch/vport.c deleted file mode 100644 index 7f0ef3794c51..000000000000 --- a/trunk/net/openvswitch/vport.c +++ /dev/null @@ -1,398 +0,0 @@ -/* - * Copyright (c) 2007-2011 Nicira Networks. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "vport.h" -#include "vport-internal_dev.h" - -/* List of statically compiled vport implementations. Don't forget to also - * add yours to the list at the bottom of vport.h. */ -static const struct vport_ops *vport_ops_list[] = { - &ovs_netdev_vport_ops, - &ovs_internal_vport_ops, -}; - -/* Protected by RCU read lock for reading, RTNL lock for writing. */ -static struct hlist_head *dev_table; -#define VPORT_HASH_BUCKETS 1024 - -/** - * ovs_vport_init - initialize vport subsystem - * - * Called at module load time to initialize the vport subsystem. - */ -int ovs_vport_init(void) -{ - dev_table = kzalloc(VPORT_HASH_BUCKETS * sizeof(struct hlist_head), - GFP_KERNEL); - if (!dev_table) - return -ENOMEM; - - return 0; -} - -/** - * ovs_vport_exit - shutdown vport subsystem - * - * Called at module exit time to shutdown the vport subsystem. - */ -void ovs_vport_exit(void) -{ - kfree(dev_table); -} - -static struct hlist_head *hash_bucket(const char *name) -{ - unsigned int hash = full_name_hash(name, strlen(name)); - return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)]; -} - -/** - * ovs_vport_locate - find a port that has already been created - * - * @name: name of port to find - * - * Must be called with RTNL or RCU read lock. - */ -struct vport *ovs_vport_locate(const char *name) -{ - struct hlist_head *bucket = hash_bucket(name); - struct vport *vport; - struct hlist_node *node; - - hlist_for_each_entry_rcu(vport, node, bucket, hash_node) - if (!strcmp(name, vport->ops->get_name(vport))) - return vport; - - return NULL; -} - -/** - * ovs_vport_alloc - allocate and initialize new vport - * - * @priv_size: Size of private data area to allocate. - * @ops: vport device ops - * - * Allocate and initialize a new vport defined by @ops. The vport will contain - * a private data area of size @priv_size that can be accessed using - * vport_priv(). vports that are no longer needed should be released with - * vport_free(). - */ -struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops, - const struct vport_parms *parms) -{ - struct vport *vport; - size_t alloc_size; - - alloc_size = sizeof(struct vport); - if (priv_size) { - alloc_size = ALIGN(alloc_size, VPORT_ALIGN); - alloc_size += priv_size; - } - - vport = kzalloc(alloc_size, GFP_KERNEL); - if (!vport) - return ERR_PTR(-ENOMEM); - - vport->dp = parms->dp; - vport->port_no = parms->port_no; - vport->upcall_pid = parms->upcall_pid; - vport->ops = ops; - - vport->percpu_stats = alloc_percpu(struct vport_percpu_stats); - if (!vport->percpu_stats) { - kfree(vport); - return ERR_PTR(-ENOMEM); - } - - spin_lock_init(&vport->stats_lock); - - return vport; -} - -/** - * ovs_vport_free - uninitialize and free vport - * - * @vport: vport to free - * - * Frees a vport allocated with vport_alloc() when it is no longer needed. - * - * The caller must ensure that an RCU grace period has passed since the last - * time @vport was in a datapath. - */ -void ovs_vport_free(struct vport *vport) -{ - free_percpu(vport->percpu_stats); - kfree(vport); -} - -/** - * ovs_vport_add - add vport device (for kernel callers) - * - * @parms: Information about new vport. - * - * Creates a new vport with the specified configuration (which is dependent on - * device type). RTNL lock must be held. - */ -struct vport *ovs_vport_add(const struct vport_parms *parms) -{ - struct vport *vport; - int err = 0; - int i; - - ASSERT_RTNL(); - - for (i = 0; i < ARRAY_SIZE(vport_ops_list); i++) { - if (vport_ops_list[i]->type == parms->type) { - vport = vport_ops_list[i]->create(parms); - if (IS_ERR(vport)) { - err = PTR_ERR(vport); - goto out; - } - - hlist_add_head_rcu(&vport->hash_node, - hash_bucket(vport->ops->get_name(vport))); - return vport; - } - } - - err = -EAFNOSUPPORT; - -out: - return ERR_PTR(err); -} - -/** - * ovs_vport_set_options - modify existing vport device (for kernel callers) - * - * @vport: vport to modify. - * @port: New configuration. - * - * Modifies an existing device with the specified configuration (which is - * dependent on device type). RTNL lock must be held. - */ -int ovs_vport_set_options(struct vport *vport, struct nlattr *options) -{ - ASSERT_RTNL(); - - if (!vport->ops->set_options) - return -EOPNOTSUPP; - return vport->ops->set_options(vport, options); -} - -/** - * ovs_vport_del - delete existing vport device - * - * @vport: vport to delete. - * - * Detaches @vport from its datapath and destroys it. It is possible to fail - * for reasons such as lack of memory. RTNL lock must be held. - */ -void ovs_vport_del(struct vport *vport) -{ - ASSERT_RTNL(); - - hlist_del_rcu(&vport->hash_node); - - vport->ops->destroy(vport); -} - -/** - * ovs_vport_get_stats - retrieve device stats - * - * @vport: vport from which to retrieve the stats - * @stats: location to store stats - * - * Retrieves transmit, receive, and error stats for the given device. - * - * Must be called with RTNL lock or rcu_read_lock. - */ -void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats) -{ - int i; - - memset(stats, 0, sizeof(*stats)); - - /* We potentially have 2 sources of stats that need to be combined: - * those we have collected (split into err_stats and percpu_stats) from - * set_stats() and device error stats from netdev->get_stats() (for - * errors that happen downstream and therefore aren't reported through - * our vport_record_error() function). - * Stats from first source are reported by ovs (OVS_VPORT_ATTR_STATS). - * netdev-stats can be directly read over netlink-ioctl. - */ - - spin_lock_bh(&vport->stats_lock); - - stats->rx_errors = vport->err_stats.rx_errors; - stats->tx_errors = vport->err_stats.tx_errors; - stats->tx_dropped = vport->err_stats.tx_dropped; - stats->rx_dropped = vport->err_stats.rx_dropped; - - spin_unlock_bh(&vport->stats_lock); - - for_each_possible_cpu(i) { - const struct vport_percpu_stats *percpu_stats; - struct vport_percpu_stats local_stats; - unsigned int start; - - percpu_stats = per_cpu_ptr(vport->percpu_stats, i); - - do { - start = u64_stats_fetch_begin_bh(&percpu_stats->sync); - local_stats = *percpu_stats; - } while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start)); - - stats->rx_bytes += local_stats.rx_bytes; - stats->rx_packets += local_stats.rx_packets; - stats->tx_bytes += local_stats.tx_bytes; - stats->tx_packets += local_stats.tx_packets; - } -} - -/** - * ovs_vport_get_options - retrieve device options - * - * @vport: vport from which to retrieve the options. - * @skb: sk_buff where options should be appended. - * - * Retrieves the configuration of the given device, appending an - * %OVS_VPORT_ATTR_OPTIONS attribute that in turn contains nested - * vport-specific attributes to @skb. - * - * Returns 0 if successful, -EMSGSIZE if @skb has insufficient room, or another - * negative error code if a real error occurred. If an error occurs, @skb is - * left unmodified. - * - * Must be called with RTNL lock or rcu_read_lock. - */ -int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb) -{ - struct nlattr *nla; - - nla = nla_nest_start(skb, OVS_VPORT_ATTR_OPTIONS); - if (!nla) - return -EMSGSIZE; - - if (vport->ops->get_options) { - int err = vport->ops->get_options(vport, skb); - if (err) { - nla_nest_cancel(skb, nla); - return err; - } - } - - nla_nest_end(skb, nla); - return 0; -} - -/** - * ovs_vport_receive - pass up received packet to the datapath for processing - * - * @vport: vport that received the packet - * @skb: skb that was received - * - * Must be called with rcu_read_lock. The packet cannot be shared and - * skb->data should point to the Ethernet header. The caller must have already - * called compute_ip_summed() to initialize the checksumming fields. - */ -void ovs_vport_receive(struct vport *vport, struct sk_buff *skb) -{ - struct vport_percpu_stats *stats; - - stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id()); - - u64_stats_update_begin(&stats->sync); - stats->rx_packets++; - stats->rx_bytes += skb->len; - u64_stats_update_end(&stats->sync); - - ovs_dp_process_received_packet(vport, skb); -} - -/** - * ovs_vport_send - send a packet on a device - * - * @vport: vport on which to send the packet - * @skb: skb to send - * - * Sends the given packet and returns the length of data sent. Either RTNL - * lock or rcu_read_lock must be held. - */ -int ovs_vport_send(struct vport *vport, struct sk_buff *skb) -{ - int sent = vport->ops->send(vport, skb); - - if (likely(sent)) { - struct vport_percpu_stats *stats; - - stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id()); - - u64_stats_update_begin(&stats->sync); - stats->tx_packets++; - stats->tx_bytes += sent; - u64_stats_update_end(&stats->sync); - } - return sent; -} - -/** - * ovs_vport_record_error - indicate device error to generic stats layer - * - * @vport: vport that encountered the error - * @err_type: one of enum vport_err_type types to indicate the error type - * - * If using the vport generic stats layer indicate that an error of the given - * type has occured. - */ -void ovs_vport_record_error(struct vport *vport, enum vport_err_type err_type) -{ - spin_lock(&vport->stats_lock); - - switch (err_type) { - case VPORT_E_RX_DROPPED: - vport->err_stats.rx_dropped++; - break; - - case VPORT_E_RX_ERROR: - vport->err_stats.rx_errors++; - break; - - case VPORT_E_TX_DROPPED: - vport->err_stats.tx_dropped++; - break; - - case VPORT_E_TX_ERROR: - vport->err_stats.tx_errors++; - break; - }; - - spin_unlock(&vport->stats_lock); -} diff --git a/trunk/net/openvswitch/vport.h b/trunk/net/openvswitch/vport.h deleted file mode 100644 index 19609629dabd..000000000000 --- a/trunk/net/openvswitch/vport.h +++ /dev/null @@ -1,205 +0,0 @@ -/* - * Copyright (c) 2007-2011 Nicira Networks. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA - */ - -#ifndef VPORT_H -#define VPORT_H 1 - -#include -#include -#include -#include -#include - -#include "datapath.h" - -struct vport; -struct vport_parms; - -/* The following definitions are for users of the vport subsytem: */ - -int ovs_vport_init(void); -void ovs_vport_exit(void); - -struct vport *ovs_vport_add(const struct vport_parms *); -void ovs_vport_del(struct vport *); - -struct vport *ovs_vport_locate(const char *name); - -void ovs_vport_get_stats(struct vport *, struct ovs_vport_stats *); - -int ovs_vport_set_options(struct vport *, struct nlattr *options); -int ovs_vport_get_options(const struct vport *, struct sk_buff *); - -int ovs_vport_send(struct vport *, struct sk_buff *); - -/* The following definitions are for implementers of vport devices: */ - -struct vport_percpu_stats { - u64 rx_bytes; - u64 rx_packets; - u64 tx_bytes; - u64 tx_packets; - struct u64_stats_sync sync; -}; - -struct vport_err_stats { - u64 rx_dropped; - u64 rx_errors; - u64 tx_dropped; - u64 tx_errors; -}; - -/** - * struct vport - one port within a datapath - * @rcu: RCU callback head for deferred destruction. - * @port_no: Index into @dp's @ports array. - * @dp: Datapath to which this port belongs. - * @node: Element in @dp's @port_list. - * @upcall_pid: The Netlink port to use for packets received on this port that - * miss the flow table. - * @hash_node: Element in @dev_table hash table in vport.c. - * @ops: Class structure. - * @percpu_stats: Points to per-CPU statistics used and maintained by vport - * @stats_lock: Protects @err_stats; - * @err_stats: Points to error statistics used and maintained by vport - */ -struct vport { - struct rcu_head rcu; - u16 port_no; - struct datapath *dp; - struct list_head node; - u32 upcall_pid; - - struct hlist_node hash_node; - const struct vport_ops *ops; - - struct vport_percpu_stats __percpu *percpu_stats; - - spinlock_t stats_lock; - struct vport_err_stats err_stats; -}; - -/** - * struct vport_parms - parameters for creating a new vport - * - * @name: New vport's name. - * @type: New vport's type. - * @options: %OVS_VPORT_ATTR_OPTIONS attribute from Netlink message, %NULL if - * none was supplied. - * @dp: New vport's datapath. - * @port_no: New vport's port number. - */ -struct vport_parms { - const char *name; - enum ovs_vport_type type; - struct nlattr *options; - - /* For ovs_vport_alloc(). */ - struct datapath *dp; - u16 port_no; - u32 upcall_pid; -}; - -/** - * struct vport_ops - definition of a type of virtual port - * - * @type: %OVS_VPORT_TYPE_* value for this type of virtual port. - * @create: Create a new vport configured as specified. On success returns - * a new vport allocated with ovs_vport_alloc(), otherwise an ERR_PTR() value. - * @destroy: Destroys a vport. Must call vport_free() on the vport but not - * before an RCU grace period has elapsed. - * @set_options: Modify the configuration of an existing vport. May be %NULL - * if modification is not supported. - * @get_options: Appends vport-specific attributes for the configuration of an - * existing vport to a &struct sk_buff. May be %NULL for a vport that does not - * have any configuration. - * @get_name: Get the device's name. - * @get_config: Get the device's configuration. - * @get_ifindex: Get the system interface index associated with the device. - * May be null if the device does not have an ifindex. - * @send: Send a packet on the device. Returns the length of the packet sent. - */ -struct vport_ops { - enum ovs_vport_type type; - - /* Called with RTNL lock. */ - struct vport *(*create)(const struct vport_parms *); - void (*destroy)(struct vport *); - - int (*set_options)(struct vport *, struct nlattr *); - int (*get_options)(const struct vport *, struct sk_buff *); - - /* Called with rcu_read_lock or RTNL lock. */ - const char *(*get_name)(const struct vport *); - void (*get_config)(const struct vport *, void *); - int (*get_ifindex)(const struct vport *); - - int (*send)(struct vport *, struct sk_buff *); -}; - -enum vport_err_type { - VPORT_E_RX_DROPPED, - VPORT_E_RX_ERROR, - VPORT_E_TX_DROPPED, - VPORT_E_TX_ERROR, -}; - -struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *, - const struct vport_parms *); -void ovs_vport_free(struct vport *); - -#define VPORT_ALIGN 8 - -/** - * vport_priv - access private data area of vport - * - * @vport: vport to access - * - * If a nonzero size was passed in priv_size of vport_alloc() a private data - * area was allocated on creation. This allows that area to be accessed and - * used for any purpose needed by the vport implementer. - */ -static inline void *vport_priv(const struct vport *vport) -{ - return (u8 *)vport + ALIGN(sizeof(struct vport), VPORT_ALIGN); -} - -/** - * vport_from_priv - lookup vport from private data pointer - * - * @priv: Start of private data area. - * - * It is sometimes useful to translate from a pointer to the private data - * area to the vport, such as in the case where the private data pointer is - * the result of a hash table lookup. @priv must point to the start of the - * private data area. - */ -static inline struct vport *vport_from_priv(const void *priv) -{ - return (struct vport *)(priv - ALIGN(sizeof(struct vport), VPORT_ALIGN)); -} - -void ovs_vport_receive(struct vport *, struct sk_buff *); -void ovs_vport_record_error(struct vport *, enum vport_err_type err_type); - -/* List of statically compiled vport implementations. Don't forget to also - * add yours to the list at the top of vport.c. */ -extern const struct vport_ops ovs_netdev_vport_ops; -extern const struct vport_ops ovs_internal_vport_ops; - -#endif /* vport.h */ diff --git a/trunk/net/packet/af_packet.c b/trunk/net/packet/af_packet.c index 2dbb32b988c4..3891702b81df 100644 --- a/trunk/net/packet/af_packet.c +++ b/trunk/net/packet/af_packet.c @@ -1499,11 +1499,10 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock, if (!skb) { size_t reserved = LL_RESERVED_SPACE(dev); - int tlen = dev->needed_tailroom; unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0; rcu_read_unlock(); - skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL); + skb = sock_wmalloc(sk, len + reserved, 0, GFP_KERNEL); if (skb == NULL) return -ENOBUFS; /* FIXME: Save some space for broken drivers that write a hard @@ -1943,7 +1942,7 @@ static void tpacket_destruct_skb(struct sk_buff *skb) static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, void *frame, struct net_device *dev, int size_max, - __be16 proto, unsigned char *addr, int hlen) + __be16 proto, unsigned char *addr) { union { struct tpacket_hdr *h1; @@ -1977,7 +1976,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, return -EMSGSIZE; } - skb_reserve(skb, hlen); + skb_reserve(skb, LL_RESERVED_SPACE(dev)); skb_reset_network_header(skb); data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll); @@ -2052,7 +2051,6 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) unsigned char *addr; int len_sum = 0; int status = 0; - int hlen, tlen; mutex_lock(&po->pg_vec_lock); @@ -2101,17 +2099,16 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) } status = TP_STATUS_SEND_REQUEST; - hlen = LL_RESERVED_SPACE(dev); - tlen = dev->needed_tailroom; skb = sock_alloc_send_skb(&po->sk, - hlen + tlen + sizeof(struct sockaddr_ll), + LL_ALLOCATED_SPACE(dev) + + sizeof(struct sockaddr_ll), 0, &err); if (unlikely(skb == NULL)) goto out_status; tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto, - addr, hlen); + addr); if (unlikely(tp_len < 0)) { if (po->tp_loss) { @@ -2208,7 +2205,6 @@ static int packet_snd(struct socket *sock, int vnet_hdr_len; struct packet_sock *po = pkt_sk(sk); unsigned short gso_type = 0; - int hlen, tlen; /* * Get and verify the address. @@ -2293,9 +2289,8 @@ static int packet_snd(struct socket *sock, goto out_unlock; err = -ENOBUFS; - hlen = LL_RESERVED_SPACE(dev); - tlen = dev->needed_tailroom; - skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, vnet_hdr.hdr_len, + skb = packet_alloc_skb(sk, LL_ALLOCATED_SPACE(dev), + LL_RESERVED_SPACE(dev), len, vnet_hdr.hdr_len, msg->msg_flags & MSG_DONTWAIT, &err); if (skb == NULL) goto out_unlock; @@ -2453,12 +2448,8 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protoc { struct packet_sock *po = pkt_sk(sk); - if (po->fanout) { - if (dev) - dev_put(dev); - + if (po->fanout) return -EINVAL; - } lock_sock(sk); diff --git a/trunk/net/phonet/pep.c b/trunk/net/phonet/pep.c index 9f60008740e3..2ba6e9fb4cbc 100644 --- a/trunk/net/phonet/pep.c +++ b/trunk/net/phonet/pep.c @@ -534,29 +534,6 @@ static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb) return pipe_handler_send_created_ind(sk); } -static int pep_enableresp_rcv(struct sock *sk, struct sk_buff *skb) -{ - struct pnpipehdr *hdr = pnp_hdr(skb); - - if (hdr->error_code != PN_PIPE_NO_ERROR) - return -ECONNREFUSED; - - return pep_indicate(sk, PNS_PIPE_ENABLED_IND, 0 /* sub-blocks */, - NULL, 0, GFP_ATOMIC); - -} - -static void pipe_start_flow_control(struct sock *sk) -{ - struct pep_sock *pn = pep_sk(sk); - - if (!pn_flow_safe(pn->tx_fc)) { - atomic_set(&pn->tx_credits, 1); - sk->sk_write_space(sk); - } - pipe_grant_credits(sk, GFP_ATOMIC); -} - /* Queue an skb to an actively connected sock. * Socket lock must be held. */ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb) @@ -602,25 +579,13 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb) sk->sk_state = TCP_CLOSE_WAIT; break; } - if (pn->init_enable == PN_PIPE_DISABLE) - sk->sk_state = TCP_SYN_RECV; - else { - sk->sk_state = TCP_ESTABLISHED; - pipe_start_flow_control(sk); - } - break; - - case PNS_PEP_ENABLE_RESP: - if (sk->sk_state != TCP_SYN_SENT) - break; - - if (pep_enableresp_rcv(sk, skb)) { - sk->sk_state = TCP_CLOSE_WAIT; - break; - } sk->sk_state = TCP_ESTABLISHED; - pipe_start_flow_control(sk); + if (!pn_flow_safe(pn->tx_fc)) { + atomic_set(&pn->tx_credits, 1); + sk->sk_write_space(sk); + } + pipe_grant_credits(sk, GFP_ATOMIC); break; case PNS_PEP_DISCONNECT_RESP: @@ -899,32 +864,14 @@ static int pep_sock_connect(struct sock *sk, struct sockaddr *addr, int len) int err; u8 data[4] = { 0 /* sub-blocks */, PAD, PAD, PAD }; - if (pn->pipe_handle == PN_PIPE_INVALID_HANDLE) - pn->pipe_handle = 1; /* anything but INVALID_HANDLE */ - + pn->pipe_handle = 1; /* anything but INVALID_HANDLE */ err = pipe_handler_request(sk, PNS_PEP_CONNECT_REQ, - pn->init_enable, data, 4); + PN_PIPE_ENABLE, data, 4); if (err) { pn->pipe_handle = PN_PIPE_INVALID_HANDLE; return err; } - sk->sk_state = TCP_SYN_SENT; - - return 0; -} - -static int pep_sock_enable(struct sock *sk, struct sockaddr *addr, int len) -{ - int err; - - err = pipe_handler_request(sk, PNS_PEP_ENABLE_REQ, PAD, - NULL, 0); - if (err) - return err; - - sk->sk_state = TCP_SYN_SENT; - return 0; } @@ -932,14 +879,11 @@ static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg) { struct pep_sock *pn = pep_sk(sk); int answ; - int ret = -ENOIOCTLCMD; switch (cmd) { case SIOCINQ: - if (sk->sk_state == TCP_LISTEN) { - ret = -EINVAL; - break; - } + if (sk->sk_state == TCP_LISTEN) + return -EINVAL; lock_sock(sk); if (sock_flag(sk, SOCK_URGINLINE) && @@ -950,22 +894,10 @@ static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg) else answ = 0; release_sock(sk); - ret = put_user(answ, (int __user *)arg); - break; - - case SIOCPNENABLEPIPE: - lock_sock(sk); - if (sk->sk_state == TCP_SYN_SENT) - ret = -EBUSY; - else if (sk->sk_state == TCP_ESTABLISHED) - ret = -EISCONN; - else - ret = pep_sock_enable(sk, NULL, 0); - release_sock(sk); - break; + return put_user(answ, (int __user *)arg); } - return ret; + return -ENOIOCTLCMD; } static int pep_init(struct sock *sk) @@ -1028,18 +960,6 @@ static int pep_setsockopt(struct sock *sk, int level, int optname, } goto out_norel; - case PNPIPE_HANDLE: - if ((sk->sk_state == TCP_CLOSE) && - (val >= 0) && (val < PN_PIPE_INVALID_HANDLE)) - pn->pipe_handle = val; - else - err = -EINVAL; - break; - - case PNPIPE_INITSTATE: - pn->init_enable = !!val; - break; - default: err = -ENOPROTOOPT; } @@ -1075,10 +995,6 @@ static int pep_getsockopt(struct sock *sk, int level, int optname, return -EINVAL; break; - case PNPIPE_INITSTATE: - val = pn->init_enable; - break; - default: return -ENOPROTOOPT; } diff --git a/trunk/net/rfkill/core.c b/trunk/net/rfkill/core.c index 354760ebbbd2..5be19575c340 100644 --- a/trunk/net/rfkill/core.c +++ b/trunk/net/rfkill/core.c @@ -644,7 +644,7 @@ static ssize_t rfkill_soft_store(struct device *dev, if (!capable(CAP_NET_ADMIN)) return -EPERM; - err = kstrtoul(buf, 0, &state); + err = strict_strtoul(buf, 0, &state); if (err) return err; @@ -688,7 +688,7 @@ static ssize_t rfkill_state_store(struct device *dev, if (!capable(CAP_NET_ADMIN)) return -EPERM; - err = kstrtoul(buf, 0, &state); + err = strict_strtoul(buf, 0, &state); if (err) return err; diff --git a/trunk/net/rfkill/rfkill-gpio.c b/trunk/net/rfkill/rfkill-gpio.c index 865adb61685a..128677d69056 100644 --- a/trunk/net/rfkill/rfkill-gpio.c +++ b/trunk/net/rfkill/rfkill-gpio.c @@ -105,7 +105,7 @@ static int rfkill_gpio_probe(struct platform_device *pdev) ret = pdata->gpio_runtime_setup(pdev); if (ret) { pr_warn("%s: can't set up gpio\n", __func__); - goto fail_alloc; + return ret; } } @@ -220,7 +220,18 @@ static struct platform_driver rfkill_gpio_driver = { }, }; -module_platform_driver(rfkill_gpio_driver); +static int __init rfkill_gpio_init(void) +{ + return platform_driver_register(&rfkill_gpio_driver); +} + +static void __exit rfkill_gpio_exit(void) +{ + platform_driver_unregister(&rfkill_gpio_driver); +} + +module_init(rfkill_gpio_init); +module_exit(rfkill_gpio_exit); MODULE_DESCRIPTION("gpio rfkill"); MODULE_AUTHOR("NVIDIA"); diff --git a/trunk/net/rfkill/rfkill-regulator.c b/trunk/net/rfkill/rfkill-regulator.c index 11da3018a853..3ca7277a3c36 100644 --- a/trunk/net/rfkill/rfkill-regulator.c +++ b/trunk/net/rfkill/rfkill-regulator.c @@ -36,12 +36,12 @@ static int rfkill_regulator_set_block(void *data, bool blocked) if (blocked) { if (rfkill_data->reg_enabled) { regulator_disable(rfkill_data->vcc); - rfkill_data->reg_enabled = false; + rfkill_data->reg_enabled = 0; } } else { if (!rfkill_data->reg_enabled) { regulator_enable(rfkill_data->vcc); - rfkill_data->reg_enabled = true; + rfkill_data->reg_enabled = 1; } } @@ -96,7 +96,7 @@ static int __devinit rfkill_regulator_probe(struct platform_device *pdev) if (regulator_is_enabled(vcc)) { dev_dbg(&pdev->dev, "Regulator already enabled\n"); - rfkill_data->reg_enabled = true; + rfkill_data->reg_enabled = 1; } rfkill_data->vcc = vcc; rfkill_data->rf_kill = rf_kill; @@ -144,7 +144,17 @@ static struct platform_driver rfkill_regulator_driver = { }, }; -module_platform_driver(rfkill_regulator_driver); +static int __init rfkill_regulator_init(void) +{ + return platform_driver_register(&rfkill_regulator_driver); +} +module_init(rfkill_regulator_init); + +static void __exit rfkill_regulator_exit(void) +{ + platform_driver_unregister(&rfkill_regulator_driver); +} +module_exit(rfkill_regulator_exit); MODULE_AUTHOR("Guiming Zhuo "); MODULE_AUTHOR("Antonio Ospite "); diff --git a/trunk/net/rxrpc/ar-ack.c b/trunk/net/rxrpc/ar-ack.c index c3126e864f3c..f99cfce7ca97 100644 --- a/trunk/net/rxrpc/ar-ack.c +++ b/trunk/net/rxrpc/ar-ack.c @@ -195,7 +195,7 @@ static void rxrpc_resend(struct rxrpc_call *call) sp = rxrpc_skb(txb); if (sp->need_resend) { - sp->need_resend = false; + sp->need_resend = 0; /* each Tx packet has a new serial number */ sp->hdr.serial = @@ -216,7 +216,7 @@ static void rxrpc_resend(struct rxrpc_call *call) } if (time_after_eq(jiffies + 1, sp->resend_at)) { - sp->need_resend = true; + sp->need_resend = 1; resend |= 1; } else if (resend & 2) { if (time_before(sp->resend_at, resend_at)) @@ -265,7 +265,7 @@ static void rxrpc_resend_timer(struct rxrpc_call *call) if (sp->need_resend) { ; } else if (time_after_eq(jiffies + 1, sp->resend_at)) { - sp->need_resend = true; + sp->need_resend = 1; resend |= 1; } else if (resend & 2) { if (time_before(sp->resend_at, resend_at)) @@ -314,11 +314,11 @@ static int rxrpc_process_soft_ACKs(struct rxrpc_call *call, switch (sacks[loop]) { case RXRPC_ACK_TYPE_ACK: - sp->need_resend = false; + sp->need_resend = 0; *p_txb |= 1; break; case RXRPC_ACK_TYPE_NACK: - sp->need_resend = true; + sp->need_resend = 1; *p_txb &= ~1; resend = 1; break; @@ -344,13 +344,13 @@ static int rxrpc_process_soft_ACKs(struct rxrpc_call *call, if (*p_txb & 1) { /* packet must have been discarded */ - sp->need_resend = true; + sp->need_resend = 1; *p_txb &= ~1; resend |= 1; } else if (sp->need_resend) { ; } else if (time_after_eq(jiffies + 1, sp->resend_at)) { - sp->need_resend = true; + sp->need_resend = 1; resend |= 1; } else if (resend & 2) { if (time_before(sp->resend_at, resend_at)) diff --git a/trunk/net/rxrpc/ar-key.c b/trunk/net/rxrpc/ar-key.c index 4cba13e46ffd..43ea7de2fc8e 100644 --- a/trunk/net/rxrpc/ar-key.c +++ b/trunk/net/rxrpc/ar-key.c @@ -306,9 +306,10 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td, td->data_len = len; if (len > 0) { - td->data = kmemdup(xdr, len, GFP_KERNEL); + td->data = kmalloc(len, GFP_KERNEL); if (!td->data) return -ENOMEM; + memcpy(td->data, xdr, len); len = (len + 3) & ~3; toklen -= len; xdr += len >> 2; @@ -400,9 +401,10 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen, _debug("ticket len %u", len); if (len > 0) { - *_ticket = kmemdup(xdr, len, GFP_KERNEL); + *_ticket = kmalloc(len, GFP_KERNEL); if (!*_ticket) return -ENOMEM; + memcpy(*_ticket, xdr, len); len = (len + 3) & ~3; toklen -= len; xdr += len >> 2; diff --git a/trunk/net/rxrpc/ar-output.c b/trunk/net/rxrpc/ar-output.c index 16ae88762d00..338d793c7113 100644 --- a/trunk/net/rxrpc/ar-output.c +++ b/trunk/net/rxrpc/ar-output.c @@ -486,7 +486,7 @@ static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb, _proto("Tx DATA %%%u { #%u }", ntohl(sp->hdr.serial), ntohl(sp->hdr.seq)); - sp->need_resend = false; + sp->need_resend = 0; sp->resend_at = jiffies + rxrpc_resend_timeout * HZ; if (!test_and_set_bit(RXRPC_CALL_RUN_RTIMER, &call->flags)) { _debug("run timer"); @@ -508,7 +508,7 @@ static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb, if (ret < 0) { _debug("need instant resend %d", ret); - sp->need_resend = true; + sp->need_resend = 1; rxrpc_instant_resend(call); } diff --git a/trunk/net/sched/cls_flow.c b/trunk/net/sched/cls_flow.c index 1d8bd0dbcd1f..7b582300d051 100644 --- a/trunk/net/sched/cls_flow.c +++ b/trunk/net/sched/cls_flow.c @@ -26,8 +26,6 @@ #include #include #include -#include - #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) #include #endif @@ -68,37 +66,134 @@ static inline u32 addr_fold(void *addr) return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0); } -static u32 flow_get_src(const struct sk_buff *skb, const struct flow_keys *flow) +static u32 flow_get_src(const struct sk_buff *skb, int nhoff) { - if (flow->src) - return ntohl(flow->src); + __be32 *data = NULL, hdata; + + switch (skb->protocol) { + case htons(ETH_P_IP): + data = skb_header_pointer(skb, + nhoff + offsetof(struct iphdr, + saddr), + 4, &hdata); + break; + case htons(ETH_P_IPV6): + data = skb_header_pointer(skb, + nhoff + offsetof(struct ipv6hdr, + saddr.s6_addr32[3]), + 4, &hdata); + break; + } + + if (data) + return ntohl(*data); return addr_fold(skb->sk); } -static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow) +static u32 flow_get_dst(const struct sk_buff *skb, int nhoff) { - if (flow->dst) - return ntohl(flow->dst); + __be32 *data = NULL, hdata; + + switch (skb->protocol) { + case htons(ETH_P_IP): + data = skb_header_pointer(skb, + nhoff + offsetof(struct iphdr, + daddr), + 4, &hdata); + break; + case htons(ETH_P_IPV6): + data = skb_header_pointer(skb, + nhoff + offsetof(struct ipv6hdr, + daddr.s6_addr32[3]), + 4, &hdata); + break; + } + + if (data) + return ntohl(*data); return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol; } -static u32 flow_get_proto(const struct sk_buff *skb, const struct flow_keys *flow) +static u32 flow_get_proto(const struct sk_buff *skb, int nhoff) { - return flow->ip_proto; + __u8 *data = NULL, hdata; + + switch (skb->protocol) { + case htons(ETH_P_IP): + data = skb_header_pointer(skb, + nhoff + offsetof(struct iphdr, + protocol), + 1, &hdata); + break; + case htons(ETH_P_IPV6): + data = skb_header_pointer(skb, + nhoff + offsetof(struct ipv6hdr, + nexthdr), + 1, &hdata); + break; + } + if (data) + return *data; + return 0; } -static u32 flow_get_proto_src(const struct sk_buff *skb, const struct flow_keys *flow) +/* helper function to get either src or dst port */ +static __be16 *flow_get_proto_common(const struct sk_buff *skb, int nhoff, + __be16 *_port, int dst) { - if (flow->ports) - return ntohs(flow->port16[0]); + __be16 *port = NULL; + int poff; + + switch (skb->protocol) { + case htons(ETH_P_IP): { + struct iphdr *iph, _iph; + + iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph); + if (!iph) + break; + if (ip_is_fragment(iph)) + break; + poff = proto_ports_offset(iph->protocol); + if (poff >= 0) + port = skb_header_pointer(skb, + nhoff + iph->ihl * 4 + poff + dst, + sizeof(*_port), _port); + break; + } + case htons(ETH_P_IPV6): { + struct ipv6hdr *iph, _iph; + + iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph); + if (!iph) + break; + poff = proto_ports_offset(iph->nexthdr); + if (poff >= 0) + port = skb_header_pointer(skb, + nhoff + sizeof(*iph) + poff + dst, + sizeof(*_port), _port); + break; + } + } + + return port; +} + +static u32 flow_get_proto_src(const struct sk_buff *skb, int nhoff) +{ + __be16 _port, *port = flow_get_proto_common(skb, nhoff, &_port, 0); + + if (port) + return ntohs(*port); return addr_fold(skb->sk); } -static u32 flow_get_proto_dst(const struct sk_buff *skb, const struct flow_keys *flow) +static u32 flow_get_proto_dst(const struct sk_buff *skb, int nhoff) { - if (flow->ports) - return ntohs(flow->port16[1]); + __be16 _port, *port = flow_get_proto_common(skb, nhoff, &_port, 2); + + if (port) + return ntohs(*port); return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol; } @@ -144,7 +239,7 @@ static u32 flow_get_nfct(const struct sk_buff *skb) }) #endif -static u32 flow_get_nfct_src(const struct sk_buff *skb, const struct flow_keys *flow) +static u32 flow_get_nfct_src(const struct sk_buff *skb, int nhoff) { switch (skb->protocol) { case htons(ETH_P_IP): @@ -153,10 +248,10 @@ static u32 flow_get_nfct_src(const struct sk_buff *skb, const struct flow_keys * return ntohl(CTTUPLE(skb, src.u3.ip6[3])); } fallback: - return flow_get_src(skb, flow); + return flow_get_src(skb, nhoff); } -static u32 flow_get_nfct_dst(const struct sk_buff *skb, const struct flow_keys *flow) +static u32 flow_get_nfct_dst(const struct sk_buff *skb, int nhoff) { switch (skb->protocol) { case htons(ETH_P_IP): @@ -165,21 +260,21 @@ static u32 flow_get_nfct_dst(const struct sk_buff *skb, const struct flow_keys * return ntohl(CTTUPLE(skb, dst.u3.ip6[3])); } fallback: - return flow_get_dst(skb, flow); + return flow_get_dst(skb, nhoff); } -static u32 flow_get_nfct_proto_src(const struct sk_buff *skb, const struct flow_keys *flow) +static u32 flow_get_nfct_proto_src(const struct sk_buff *skb, int nhoff) { return ntohs(CTTUPLE(skb, src.u.all)); fallback: - return flow_get_proto_src(skb, flow); + return flow_get_proto_src(skb, nhoff); } -static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb, const struct flow_keys *flow) +static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb, int nhoff) { return ntohs(CTTUPLE(skb, dst.u.all)); fallback: - return flow_get_proto_dst(skb, flow); + return flow_get_proto_dst(skb, nhoff); } static u32 flow_get_rtclassid(const struct sk_buff *skb) @@ -219,19 +314,21 @@ static u32 flow_get_rxhash(struct sk_buff *skb) return skb_get_rxhash(skb); } -static u32 flow_key_get(struct sk_buff *skb, int key, struct flow_keys *flow) +static u32 flow_key_get(struct sk_buff *skb, int key) { + int nhoff = skb_network_offset(skb); + switch (key) { case FLOW_KEY_SRC: - return flow_get_src(skb, flow); + return flow_get_src(skb, nhoff); case FLOW_KEY_DST: - return flow_get_dst(skb, flow); + return flow_get_dst(skb, nhoff); case FLOW_KEY_PROTO: - return flow_get_proto(skb, flow); + return flow_get_proto(skb, nhoff); case FLOW_KEY_PROTO_SRC: - return flow_get_proto_src(skb, flow); + return flow_get_proto_src(skb, nhoff); case FLOW_KEY_PROTO_DST: - return flow_get_proto_dst(skb, flow); + return flow_get_proto_dst(skb, nhoff); case FLOW_KEY_IIF: return flow_get_iif(skb); case FLOW_KEY_PRIORITY: @@ -241,13 +338,13 @@ static u32 flow_key_get(struct sk_buff *skb, int key, struct flow_keys *flow) case FLOW_KEY_NFCT: return flow_get_nfct(skb); case FLOW_KEY_NFCT_SRC: - return flow_get_nfct_src(skb, flow); + return flow_get_nfct_src(skb, nhoff); case FLOW_KEY_NFCT_DST: - return flow_get_nfct_dst(skb, flow); + return flow_get_nfct_dst(skb, nhoff); case FLOW_KEY_NFCT_PROTO_SRC: - return flow_get_nfct_proto_src(skb, flow); + return flow_get_nfct_proto_src(skb, nhoff); case FLOW_KEY_NFCT_PROTO_DST: - return flow_get_nfct_proto_dst(skb, flow); + return flow_get_nfct_proto_dst(skb, nhoff); case FLOW_KEY_RTCLASSID: return flow_get_rtclassid(skb); case FLOW_KEY_SKUID: @@ -264,16 +361,6 @@ static u32 flow_key_get(struct sk_buff *skb, int key, struct flow_keys *flow) } } -#define FLOW_KEYS_NEEDED ((1 << FLOW_KEY_SRC) | \ - (1 << FLOW_KEY_DST) | \ - (1 << FLOW_KEY_PROTO) | \ - (1 << FLOW_KEY_PROTO_SRC) | \ - (1 << FLOW_KEY_PROTO_DST) | \ - (1 << FLOW_KEY_NFCT_SRC) | \ - (1 << FLOW_KEY_NFCT_DST) | \ - (1 << FLOW_KEY_NFCT_PROTO_SRC) | \ - (1 << FLOW_KEY_NFCT_PROTO_DST)) - static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res) { @@ -285,20 +372,17 @@ static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp, int r; list_for_each_entry(f, &head->filters, list) { - u32 keys[FLOW_KEY_MAX + 1]; - struct flow_keys flow_keys; + u32 keys[f->nkeys]; if (!tcf_em_tree_match(skb, &f->ematches, NULL)) continue; keymask = f->keymask; - if (keymask & FLOW_KEYS_NEEDED) - skb_flow_dissect(skb, &flow_keys); for (n = 0; n < f->nkeys; n++) { key = ffs(keymask) - 1; keymask &= ~(1 << key); - keys[n] = flow_key_get(skb, key, &flow_keys); + keys[n] = flow_key_get(skb, key); } if (f->mode == FLOW_MODE_HASH) diff --git a/trunk/net/sched/sch_api.c b/trunk/net/sched/sch_api.c index 3d8981fde301..dca6c1a576f7 100644 --- a/trunk/net/sched/sch_api.c +++ b/trunk/net/sched/sch_api.c @@ -618,24 +618,20 @@ void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash, } EXPORT_SYMBOL(qdisc_class_hash_remove); -/* Allocate an unique handle from space managed by kernel - * Possible range is [8000-FFFF]:0000 (0x8000 values) - */ +/* Allocate an unique handle from space managed by kernel */ + static u32 qdisc_alloc_handle(struct net_device *dev) { - int i = 0x8000; + int i = 0x10000; static u32 autohandle = TC_H_MAKE(0x80000000U, 0); do { autohandle += TC_H_MAKE(0x10000U, 0); if (autohandle == TC_H_MAKE(TC_H_ROOT, 0)) autohandle = TC_H_MAKE(0x80000000U, 0); - if (!qdisc_lookup(dev, autohandle)) - return autohandle; - cond_resched(); - } while (--i > 0); + } while (qdisc_lookup(dev, autohandle) && --i > 0); - return 0; + return i > 0 ? autohandle : 0; } void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n) diff --git a/trunk/net/sched/sch_choke.c b/trunk/net/sched/sch_choke.c index e465064d39a3..3422b25df9e4 100644 --- a/trunk/net/sched/sch_choke.c +++ b/trunk/net/sched/sch_choke.c @@ -19,7 +19,10 @@ #include #include #include -#include +#include +#include +#include +#include /* CHOKe stateless AQM for fair bandwidth allocation @@ -57,7 +60,6 @@ struct choke_sched_data { struct red_parms parms; /* Variables */ - struct red_vars vars; struct tcf_proto *filter_list; struct { u32 prob_drop; /* Early probability drops */ @@ -140,10 +142,85 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx) --sch->q.qlen; } +/* + * Compare flow of two packets + * Returns true only if source and destination address and port match. + * false for special cases + */ +static bool choke_match_flow(struct sk_buff *skb1, + struct sk_buff *skb2) +{ + int off1, off2, poff; + const u32 *ports1, *ports2; + u8 ip_proto; + __u32 hash1; + + if (skb1->protocol != skb2->protocol) + return false; + + /* Use hash value as quick check + * Assumes that __skb_get_rxhash makes IP header and ports linear + */ + hash1 = skb_get_rxhash(skb1); + if (!hash1 || hash1 != skb_get_rxhash(skb2)) + return false; + + /* Probably match, but be sure to avoid hash collisions */ + off1 = skb_network_offset(skb1); + off2 = skb_network_offset(skb2); + + switch (skb1->protocol) { + case __constant_htons(ETH_P_IP): { + const struct iphdr *ip1, *ip2; + + ip1 = (const struct iphdr *) (skb1->data + off1); + ip2 = (const struct iphdr *) (skb2->data + off2); + + ip_proto = ip1->protocol; + if (ip_proto != ip2->protocol || + ip1->saddr != ip2->saddr || ip1->daddr != ip2->daddr) + return false; + + if (ip_is_fragment(ip1) | ip_is_fragment(ip2)) + ip_proto = 0; + off1 += ip1->ihl * 4; + off2 += ip2->ihl * 4; + break; + } + + case __constant_htons(ETH_P_IPV6): { + const struct ipv6hdr *ip1, *ip2; + + ip1 = (const struct ipv6hdr *) (skb1->data + off1); + ip2 = (const struct ipv6hdr *) (skb2->data + off2); + + ip_proto = ip1->nexthdr; + if (ip_proto != ip2->nexthdr || + ipv6_addr_cmp(&ip1->saddr, &ip2->saddr) || + ipv6_addr_cmp(&ip1->daddr, &ip2->daddr)) + return false; + off1 += 40; + off2 += 40; + } + + default: /* Maybe compare MAC header here? */ + return false; + } + + poff = proto_ports_offset(ip_proto); + if (poff < 0) + return true; + + off1 += poff; + off2 += poff; + + ports1 = (__force u32 *)(skb1->data + off1); + ports2 = (__force u32 *)(skb2->data + off2); + return *ports1 == *ports2; +} + struct choke_skb_cb { - u16 classid; - u8 keys_valid; - struct flow_keys keys; + u16 classid; }; static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb) @@ -163,32 +240,6 @@ static u16 choke_get_classid(const struct sk_buff *skb) return choke_skb_cb(skb)->classid; } -/* - * Compare flow of two packets - * Returns true only if source and destination address and port match. - * false for special cases - */ -static bool choke_match_flow(struct sk_buff *skb1, - struct sk_buff *skb2) -{ - if (skb1->protocol != skb2->protocol) - return false; - - if (!choke_skb_cb(skb1)->keys_valid) { - choke_skb_cb(skb1)->keys_valid = 1; - skb_flow_dissect(skb1, &choke_skb_cb(skb1)->keys); - } - - if (!choke_skb_cb(skb2)->keys_valid) { - choke_skb_cb(skb2)->keys_valid = 1; - skb_flow_dissect(skb2, &choke_skb_cb(skb2)->keys); - } - - return !memcmp(&choke_skb_cb(skb1)->keys, - &choke_skb_cb(skb2)->keys, - sizeof(struct flow_keys)); -} - /* * Classify flow using either: * 1. pre-existing classification result in skb @@ -266,7 +317,7 @@ static bool choke_match_random(const struct choke_sched_data *q, static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct choke_sched_data *q = qdisc_priv(sch); - const struct red_parms *p = &q->parms; + struct red_parms *p = &q->parms; int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; if (q->filter_list) { @@ -275,15 +326,14 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) goto other_drop; /* Packet was eaten by filter */ } - choke_skb_cb(skb)->keys_valid = 0; /* Compute average queue usage (see RED) */ - q->vars.qavg = red_calc_qavg(p, &q->vars, sch->q.qlen); - if (red_is_idling(&q->vars)) - red_end_of_idle_period(&q->vars); + p->qavg = red_calc_qavg(p, sch->q.qlen); + if (red_is_idling(p)) + red_end_of_idle_period(p); /* Is queue small? */ - if (q->vars.qavg <= p->qth_min) - q->vars.qcount = -1; + if (p->qavg <= p->qth_min) + p->qcount = -1; else { unsigned int idx; @@ -295,8 +345,8 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) } /* Queue is large, always mark/drop */ - if (q->vars.qavg > p->qth_max) { - q->vars.qcount = -1; + if (p->qavg > p->qth_max) { + p->qcount = -1; sch->qstats.overlimits++; if (use_harddrop(q) || !use_ecn(q) || @@ -306,10 +356,10 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) } q->stats.forced_mark++; - } else if (++q->vars.qcount) { - if (red_mark_probability(p, &q->vars, q->vars.qavg)) { - q->vars.qcount = 0; - q->vars.qR = red_random(p); + } else if (++p->qcount) { + if (red_mark_probability(p, p->qavg)) { + p->qcount = 0; + p->qR = red_random(p); sch->qstats.overlimits++; if (!use_ecn(q) || !INET_ECN_set_ce(skb)) { @@ -320,7 +370,7 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) q->stats.prob_mark++; } } else - q->vars.qR = red_random(p); + p->qR = red_random(p); } /* Admit new packet */ @@ -354,8 +404,8 @@ static struct sk_buff *choke_dequeue(struct Qdisc *sch) struct sk_buff *skb; if (q->head == q->tail) { - if (!red_is_idling(&q->vars)) - red_start_of_idle_period(&q->vars); + if (!red_is_idling(&q->parms)) + red_start_of_idle_period(&q->parms); return NULL; } @@ -378,8 +428,8 @@ static unsigned int choke_drop(struct Qdisc *sch) if (len > 0) q->stats.other++; else { - if (!red_is_idling(&q->vars)) - red_start_of_idle_period(&q->vars); + if (!red_is_idling(&q->parms)) + red_start_of_idle_period(&q->parms); } return len; @@ -389,13 +439,12 @@ static void choke_reset(struct Qdisc *sch) { struct choke_sched_data *q = qdisc_priv(sch); - red_restart(&q->vars); + red_restart(&q->parms); } static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = { [TCA_CHOKE_PARMS] = { .len = sizeof(struct tc_red_qopt) }, [TCA_CHOKE_STAB] = { .len = RED_STAB_SIZE }, - [TCA_CHOKE_MAX_P] = { .type = NLA_U32 }, }; @@ -417,7 +466,6 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt) int err; struct sk_buff **old = NULL; unsigned int mask; - u32 max_P; if (opt == NULL) return -EINVAL; @@ -430,8 +478,6 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt) tb[TCA_CHOKE_STAB] == NULL) return -EINVAL; - max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0; - ctl = nla_data(tb[TCA_CHOKE_PARMS]); if (ctl->limit > CHOKE_MAX_QUEUE) @@ -481,12 +527,10 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt) red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog, ctl->Scell_log, - nla_data(tb[TCA_CHOKE_STAB]), - max_P); - red_set_vars(&q->vars); + nla_data(tb[TCA_CHOKE_STAB])); if (q->head == q->tail) - red_end_of_idle_period(&q->vars); + red_end_of_idle_period(&q->parms); sch_tree_unlock(sch); choke_free(old); @@ -517,7 +561,6 @@ static int choke_dump(struct Qdisc *sch, struct sk_buff *skb) goto nla_put_failure; NLA_PUT(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt); - NLA_PUT_U32(skb, TCA_CHOKE_MAX_P, q->parms.max_P); return nla_nest_end(skb, opts); nla_put_failure: diff --git a/trunk/net/sched/sch_generic.c b/trunk/net/sched/sch_generic.c index 67fc573e013a..69fca2798804 100644 --- a/trunk/net/sched/sch_generic.c +++ b/trunk/net/sched/sch_generic.c @@ -60,7 +60,7 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q) /* check the reason of requeuing without tx lock first */ txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); - if (!netif_xmit_frozen_or_stopped(txq)) { + if (!netif_tx_queue_frozen_or_stopped(txq)) { q->gso_skb = NULL; q->q.qlen--; } else @@ -121,7 +121,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, spin_unlock(root_lock); HARD_TX_LOCK(dev, txq, smp_processor_id()); - if (!netif_xmit_frozen_or_stopped(txq)) + if (!netif_tx_queue_frozen_or_stopped(txq)) ret = dev_hard_start_xmit(skb, dev, txq); HARD_TX_UNLOCK(dev, txq); @@ -143,7 +143,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, ret = dev_requeue_skb(skb, q); } - if (ret && netif_xmit_frozen_or_stopped(txq)) + if (ret && netif_tx_queue_frozen_or_stopped(txq)) ret = 0; return ret; @@ -242,11 +242,10 @@ static void dev_watchdog(unsigned long arg) * old device drivers set dev->trans_start */ trans_start = txq->trans_start ? : dev->trans_start; - if (netif_xmit_stopped(txq) && + if (netif_tx_queue_stopped(txq) && time_after(jiffies, (trans_start + dev->watchdog_timeo))) { some_queue_timedout = 1; - txq->trans_timeout++; break; } } diff --git a/trunk/net/sched/sch_gred.c b/trunk/net/sched/sch_gred.c index 0b15236be7b6..6cd8ddfb512d 100644 --- a/trunk/net/sched/sch_gred.c +++ b/trunk/net/sched/sch_gred.c @@ -34,14 +34,13 @@ struct gred_sched; struct gred_sched_data { u32 limit; /* HARD maximal queue length */ - u32 DP; /* the drop parameters */ + u32 DP; /* the drop pramaters */ u32 bytesin; /* bytes seen on virtualQ so far*/ u32 packetsin; /* packets seen on virtualQ so far*/ u32 backlog; /* bytes on the virtualQ */ u8 prio; /* the prio of this vq */ struct red_parms parms; - struct red_vars vars; struct red_stats stats; }; @@ -56,7 +55,7 @@ struct gred_sched { u32 red_flags; u32 DPs; u32 def; - struct red_vars wred_set; + struct red_parms wred_set; }; static inline int gred_wred_mode(struct gred_sched *table) @@ -126,17 +125,17 @@ static inline u16 tc_index_to_dp(struct sk_buff *skb) return skb->tc_index & GRED_VQ_MASK; } -static inline void gred_load_wred_set(const struct gred_sched *table, +static inline void gred_load_wred_set(struct gred_sched *table, struct gred_sched_data *q) { - q->vars.qavg = table->wred_set.qavg; - q->vars.qidlestart = table->wred_set.qidlestart; + q->parms.qavg = table->wred_set.qavg; + q->parms.qidlestart = table->wred_set.qidlestart; } static inline void gred_store_wred_set(struct gred_sched *table, struct gred_sched_data *q) { - table->wred_set.qavg = q->vars.qavg; + table->wred_set.qavg = q->parms.qavg; } static inline int gred_use_ecn(struct gred_sched *t) @@ -171,7 +170,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch) goto drop; } - /* fix tc_index? --could be controversial but needed for + /* fix tc_index? --could be controvesial but needed for requeueing */ skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp; } @@ -182,8 +181,8 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch) for (i = 0; i < t->DPs; i++) { if (t->tab[i] && t->tab[i]->prio < q->prio && - !red_is_idling(&t->tab[i]->vars)) - qavg += t->tab[i]->vars.qavg; + !red_is_idling(&t->tab[i]->parms)) + qavg += t->tab[i]->parms.qavg; } } @@ -194,17 +193,15 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (gred_wred_mode(t)) gred_load_wred_set(t, q); - q->vars.qavg = red_calc_qavg(&q->parms, - &q->vars, - gred_backlog(t, q, sch)); + q->parms.qavg = red_calc_qavg(&q->parms, gred_backlog(t, q, sch)); - if (red_is_idling(&q->vars)) - red_end_of_idle_period(&q->vars); + if (red_is_idling(&q->parms)) + red_end_of_idle_period(&q->parms); if (gred_wred_mode(t)) gred_store_wred_set(t, q); - switch (red_action(&q->parms, &q->vars, q->vars.qavg + qavg)) { + switch (red_action(&q->parms, q->parms.qavg + qavg)) { case RED_DONT_MARK: break; @@ -263,7 +260,7 @@ static struct sk_buff *gred_dequeue(struct Qdisc *sch) q->backlog -= qdisc_pkt_len(skb); if (!q->backlog && !gred_wred_mode(t)) - red_start_of_idle_period(&q->vars); + red_start_of_idle_period(&q->parms); } return skb; @@ -296,7 +293,7 @@ static unsigned int gred_drop(struct Qdisc *sch) q->stats.other++; if (!q->backlog && !gred_wred_mode(t)) - red_start_of_idle_period(&q->vars); + red_start_of_idle_period(&q->parms); } qdisc_drop(skb, sch); @@ -323,7 +320,7 @@ static void gred_reset(struct Qdisc *sch) if (!q) continue; - red_restart(&q->vars); + red_restart(&q->parms); q->backlog = 0; } } @@ -382,31 +379,29 @@ static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps) } static inline int gred_change_vq(struct Qdisc *sch, int dp, - struct tc_gred_qopt *ctl, int prio, - u8 *stab, u32 max_P, - struct gred_sched_data **prealloc) + struct tc_gred_qopt *ctl, int prio, u8 *stab) { struct gred_sched *table = qdisc_priv(sch); - struct gred_sched_data *q = table->tab[dp]; + struct gred_sched_data *q; - if (!q) { - table->tab[dp] = q = *prealloc; - *prealloc = NULL; - if (!q) + if (table->tab[dp] == NULL) { + table->tab[dp] = kzalloc(sizeof(*q), GFP_ATOMIC); + if (table->tab[dp] == NULL) return -ENOMEM; } + q = table->tab[dp]; q->DP = dp; q->prio = prio; q->limit = ctl->limit; if (q->backlog == 0) - red_end_of_idle_period(&q->vars); + red_end_of_idle_period(&q->parms); red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog, - ctl->Scell_log, stab, max_P); - red_set_vars(&q->vars); + ctl->Scell_log, stab); + return 0; } @@ -414,7 +409,6 @@ static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = { [TCA_GRED_PARMS] = { .len = sizeof(struct tc_gred_qopt) }, [TCA_GRED_STAB] = { .len = 256 }, [TCA_GRED_DPS] = { .len = sizeof(struct tc_gred_sopt) }, - [TCA_GRED_MAX_P] = { .type = NLA_U32 }, }; static int gred_change(struct Qdisc *sch, struct nlattr *opt) @@ -424,8 +418,6 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt) struct nlattr *tb[TCA_GRED_MAX + 1]; int err, prio = GRED_DEF_PRIO; u8 *stab; - u32 max_P; - struct gred_sched_data *prealloc; if (opt == NULL) return -EINVAL; @@ -441,8 +433,6 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt) tb[TCA_GRED_STAB] == NULL) return -EINVAL; - max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0; - err = -EINVAL; ctl = nla_data(tb[TCA_GRED_PARMS]); stab = nla_data(tb[TCA_GRED_STAB]); @@ -465,10 +455,9 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt) prio = ctl->prio; } - prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL); sch_tree_lock(sch); - err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc); + err = gred_change_vq(sch, ctl->DP, ctl, prio, stab); if (err < 0) goto errout_locked; @@ -482,7 +471,6 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt) errout_locked: sch_tree_unlock(sch); - kfree(prealloc); errout: return err; } @@ -510,7 +498,6 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) struct gred_sched *table = qdisc_priv(sch); struct nlattr *parms, *opts = NULL; int i; - u32 max_p[MAX_DPs]; struct tc_gred_sopt sopt = { .DPs = table->DPs, .def_DP = table->def, @@ -522,14 +509,6 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) if (opts == NULL) goto nla_put_failure; NLA_PUT(skb, TCA_GRED_DPS, sizeof(sopt), &sopt); - - for (i = 0; i < MAX_DPs; i++) { - struct gred_sched_data *q = table->tab[i]; - - max_p[i] = q ? q->parms.max_P : 0; - } - NLA_PUT(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p); - parms = nla_nest_start(skb, TCA_GRED_PARMS); if (parms == NULL) goto nla_put_failure; @@ -566,12 +545,12 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) opt.bytesin = q->bytesin; if (gred_wred_mode(table)) { - q->vars.qidlestart = - table->tab[table->def]->vars.qidlestart; - q->vars.qavg = table->tab[table->def]->vars.qavg; + q->parms.qidlestart = + table->tab[table->def]->parms.qidlestart; + q->parms.qavg = table->tab[table->def]->parms.qavg; } - opt.qave = red_calc_qavg(&q->parms, &q->vars, q->vars.qavg); + opt.qave = red_calc_qavg(&q->parms, q->parms.qavg); append_opt: if (nla_append(skb, sizeof(opt), &opt) < 0) diff --git a/trunk/net/sched/sch_hfsc.c b/trunk/net/sched/sch_hfsc.c index 9bdca2e011e9..6488e6425652 100644 --- a/trunk/net/sched/sch_hfsc.c +++ b/trunk/net/sched/sch_hfsc.c @@ -1368,7 +1368,6 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct tc_hfsc_stats xstats; cl->qstats.qlen = cl->qdisc->q.qlen; - cl->qstats.backlog = cl->qdisc->qstats.backlog; xstats.level = cl->level; xstats.period = cl->cl_vtperiod; xstats.work = cl->cl_total; @@ -1562,15 +1561,6 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb) struct hfsc_sched *q = qdisc_priv(sch); unsigned char *b = skb_tail_pointer(skb); struct tc_hfsc_qopt qopt; - struct hfsc_class *cl; - struct hlist_node *n; - unsigned int i; - - sch->qstats.backlog = 0; - for (i = 0; i < q->clhash.hashsize; i++) { - hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode) - sch->qstats.backlog += cl->qdisc->qstats.backlog; - } qopt.defcls = q->defcls; NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt); diff --git a/trunk/net/sched/sch_multiq.c b/trunk/net/sched/sch_multiq.c index 49131d7a7446..edc1950e0e77 100644 --- a/trunk/net/sched/sch_multiq.c +++ b/trunk/net/sched/sch_multiq.c @@ -107,8 +107,7 @@ static struct sk_buff *multiq_dequeue(struct Qdisc *sch) /* Check that target subqueue is available before * pulling an skb to avoid head-of-line blocking. */ - if (!netif_xmit_stopped( - netdev_get_tx_queue(qdisc_dev(sch), q->curband))) { + if (!__netif_subqueue_stopped(qdisc_dev(sch), q->curband)) { qdisc = q->queues[q->curband]; skb = qdisc->dequeue(qdisc); if (skb) { @@ -139,8 +138,7 @@ static struct sk_buff *multiq_peek(struct Qdisc *sch) /* Check that target subqueue is available before * pulling an skb to avoid head-of-line blocking. */ - if (!netif_xmit_stopped( - netdev_get_tx_queue(qdisc_dev(sch), curband))) { + if (!__netif_subqueue_stopped(qdisc_dev(sch), curband)) { qdisc = q->queues[curband]; skb = qdisc->ops->peek(qdisc); if (skb) diff --git a/trunk/net/sched/sch_netem.c b/trunk/net/sched/sch_netem.c index e7e1d0b57b3d..eb3b9a86c6ed 100644 --- a/trunk/net/sched/sch_netem.c +++ b/trunk/net/sched/sch_netem.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include @@ -67,11 +66,7 @@ */ struct netem_sched_data { - /* internal t(ime)fifo qdisc uses sch->q and sch->limit */ - - /* optional qdisc for classful handling (NULL at netem init) */ struct Qdisc *qdisc; - struct qdisc_watchdog watchdog; psched_tdiff_t latency; @@ -84,11 +79,6 @@ struct netem_sched_data { u32 duplicate; u32 reorder; u32 corrupt; - u32 rate; - s32 packet_overhead; - u32 cell_size; - u32 cell_size_reciprocal; - s32 cell_overhead; struct crndstate { u32 last; @@ -121,9 +111,7 @@ struct netem_sched_data { }; -/* Time stamp put into socket buffer control block - * Only valid when skbs are in our internal t(ime)fifo queue. - */ +/* Time stamp put into socket buffer control block */ struct netem_skb_cb { psched_time_t time_to_send; }; @@ -310,51 +298,6 @@ static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma, return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu; } -static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q) -{ - u64 ticks; - - len += q->packet_overhead; - - if (q->cell_size) { - u32 cells = reciprocal_divide(len, q->cell_size_reciprocal); - - if (len > cells * q->cell_size) /* extra cell needed for remainder */ - cells++; - len = cells * (q->cell_size + q->cell_overhead); - } - - ticks = (u64)len * NSEC_PER_SEC; - - do_div(ticks, q->rate); - return PSCHED_NS2TICKS(ticks); -} - -static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) -{ - struct sk_buff_head *list = &sch->q; - psched_time_t tnext = netem_skb_cb(nskb)->time_to_send; - struct sk_buff *skb; - - if (likely(skb_queue_len(list) < sch->limit)) { - skb = skb_peek_tail(list); - /* Optimize for add at tail */ - if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send)) - return qdisc_enqueue_tail(nskb, sch); - - skb_queue_reverse_walk(list, skb) { - if (tnext >= netem_skb_cb(skb)->time_to_send) - break; - } - - __skb_queue_after(list, skb, nskb); - sch->qstats.backlog += qdisc_pkt_len(nskb); - return NET_XMIT_SUCCESS; - } - - return qdisc_reshape_fail(nskb, sch); -} - /* * Insert one skb into qdisc. * Note: parent depends on return value to account for queue length. @@ -428,27 +371,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) &q->delay_cor, q->delay_dist); now = psched_get_time(); - - if (q->rate) { - struct sk_buff_head *list = &sch->q; - - delay += packet_len_2_sched_time(skb->len, q); - - if (!skb_queue_empty(list)) { - /* - * Last packet in queue is reference point (now). - * First packet in queue is already in flight, - * calculate this time bonus and substract - * from delay. - */ - delay -= now - netem_skb_cb(skb_peek(list))->time_to_send; - now = netem_skb_cb(skb_peek_tail(list))->time_to_send; - } - } - cb->time_to_send = now + delay; ++q->counter; - ret = tfifo_enqueue(skb, sch); + ret = qdisc_enqueue(skb, q->qdisc); } else { /* * Do re-ordering by putting one out of N packets at the front @@ -457,9 +382,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) cb->time_to_send = psched_get_time(); q->counter = 0; - __skb_queue_head(&sch->q, skb); - sch->qstats.backlog += qdisc_pkt_len(skb); - sch->qstats.requeues++; + __skb_queue_head(&q->qdisc->q, skb); + q->qdisc->qstats.backlog += qdisc_pkt_len(skb); + q->qdisc->qstats.requeues++; ret = NET_XMIT_SUCCESS; } @@ -470,20 +395,19 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) } } + sch->q.qlen++; return NET_XMIT_SUCCESS; } static unsigned int netem_drop(struct Qdisc *sch) { struct netem_sched_data *q = qdisc_priv(sch); - unsigned int len; + unsigned int len = 0; - len = qdisc_queue_drop(sch); - if (!len && q->qdisc && q->qdisc->ops->drop) - len = q->qdisc->ops->drop(q->qdisc); - if (len) + if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) { + sch->q.qlen--; sch->qstats.drops++; - + } return len; } @@ -495,16 +419,16 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) if (qdisc_is_throttled(sch)) return NULL; -tfifo_dequeue: - skb = qdisc_peek_head(sch); + skb = q->qdisc->ops->peek(q->qdisc); if (skb) { const struct netem_skb_cb *cb = netem_skb_cb(skb); + psched_time_t now = psched_get_time(); /* if more time remaining? */ - if (cb->time_to_send <= psched_get_time()) { - skb = qdisc_dequeue_tail(sch); + if (cb->time_to_send <= now) { + skb = qdisc_dequeue_peeked(q->qdisc); if (unlikely(!skb)) - goto qdisc_dequeue; + return NULL; #ifdef CONFIG_NET_CLS_ACT /* @@ -515,37 +439,15 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) skb->tstamp.tv64 = 0; #endif - if (q->qdisc) { - int err = qdisc_enqueue(skb, q->qdisc); - - if (unlikely(err != NET_XMIT_SUCCESS)) { - if (net_xmit_drop_count(err)) { - sch->qstats.drops++; - qdisc_tree_decrease_qlen(sch, 1); - } - } - goto tfifo_dequeue; - } -deliver: + sch->q.qlen--; qdisc_unthrottled(sch); qdisc_bstats_update(sch, skb); return skb; } - if (q->qdisc) { - skb = q->qdisc->ops->dequeue(q->qdisc); - if (skb) - goto deliver; - } qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send); } -qdisc_dequeue: - if (q->qdisc) { - skb = q->qdisc->ops->dequeue(q->qdisc); - if (skb) - goto deliver; - } return NULL; } @@ -553,9 +455,8 @@ static void netem_reset(struct Qdisc *sch) { struct netem_sched_data *q = qdisc_priv(sch); - qdisc_reset_queue(sch); - if (q->qdisc) - qdisc_reset(q->qdisc); + qdisc_reset(q->qdisc); + sch->q.qlen = 0; qdisc_watchdog_cancel(&q->watchdog); } @@ -587,7 +488,7 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr) return -EINVAL; s = sizeof(struct disttable) + n * sizeof(s16); - d = kmalloc(s, GFP_KERNEL | __GFP_NOWARN); + d = kmalloc(s, GFP_KERNEL); if (!d) d = vmalloc(s); if (!d) @@ -600,10 +501,9 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr) root_lock = qdisc_root_sleeping_lock(sch); spin_lock_bh(root_lock); - swap(q->delay_dist, d); + dist_free(q->delay_dist); + q->delay_dist = d; spin_unlock_bh(root_lock); - - dist_free(d); return 0; } @@ -635,19 +535,6 @@ static void get_corrupt(struct Qdisc *sch, const struct nlattr *attr) init_crandom(&q->corrupt_cor, r->correlation); } -static void get_rate(struct Qdisc *sch, const struct nlattr *attr) -{ - struct netem_sched_data *q = qdisc_priv(sch); - const struct tc_netem_rate *r = nla_data(attr); - - q->rate = r->rate; - q->packet_overhead = r->packet_overhead; - q->cell_size = r->cell_size; - if (q->cell_size) - q->cell_size_reciprocal = reciprocal_value(q->cell_size); - q->cell_overhead = r->cell_overhead; -} - static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr) { struct netem_sched_data *q = qdisc_priv(sch); @@ -661,7 +548,7 @@ static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr) case NETEM_LOSS_GI: { const struct tc_netem_gimodel *gi = nla_data(la); - if (nla_len(la) < sizeof(struct tc_netem_gimodel)) { + if (nla_len(la) != sizeof(struct tc_netem_gimodel)) { pr_info("netem: incorrect gi model size\n"); return -EINVAL; } @@ -680,8 +567,8 @@ static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr) case NETEM_LOSS_GE: { const struct tc_netem_gemodel *ge = nla_data(la); - if (nla_len(la) < sizeof(struct tc_netem_gemodel)) { - pr_info("netem: incorrect ge model size\n"); + if (nla_len(la) != sizeof(struct tc_netem_gemodel)) { + pr_info("netem: incorrect gi model size\n"); return -EINVAL; } @@ -707,7 +594,6 @@ static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = { [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) }, [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) }, [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) }, - [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) }, [TCA_NETEM_LOSS] = { .type = NLA_NESTED }, }; @@ -745,7 +631,11 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt) if (ret < 0) return ret; - sch->limit = qopt->limit; + ret = fifo_set_limit(q->qdisc, qopt->limit); + if (ret) { + pr_info("netem: can't set fifo limit\n"); + return ret; + } q->latency = qopt->latency; q->jitter = qopt->jitter; @@ -776,9 +666,6 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt) if (tb[TCA_NETEM_CORRUPT]) get_corrupt(sch, tb[TCA_NETEM_CORRUPT]); - if (tb[TCA_NETEM_RATE]) - get_rate(sch, tb[TCA_NETEM_RATE]); - q->loss_model = CLG_RANDOM; if (tb[TCA_NETEM_LOSS]) ret = get_loss_clg(sch, tb[TCA_NETEM_LOSS]); @@ -786,6 +673,88 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt) return ret; } +/* + * Special case version of FIFO queue for use by netem. + * It queues in order based on timestamps in skb's + */ +struct fifo_sched_data { + u32 limit; + psched_time_t oldest; +}; + +static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) +{ + struct fifo_sched_data *q = qdisc_priv(sch); + struct sk_buff_head *list = &sch->q; + psched_time_t tnext = netem_skb_cb(nskb)->time_to_send; + struct sk_buff *skb; + + if (likely(skb_queue_len(list) < q->limit)) { + /* Optimize for add at tail */ + if (likely(skb_queue_empty(list) || tnext >= q->oldest)) { + q->oldest = tnext; + return qdisc_enqueue_tail(nskb, sch); + } + + skb_queue_reverse_walk(list, skb) { + const struct netem_skb_cb *cb = netem_skb_cb(skb); + + if (tnext >= cb->time_to_send) + break; + } + + __skb_queue_after(list, skb, nskb); + + sch->qstats.backlog += qdisc_pkt_len(nskb); + + return NET_XMIT_SUCCESS; + } + + return qdisc_reshape_fail(nskb, sch); +} + +static int tfifo_init(struct Qdisc *sch, struct nlattr *opt) +{ + struct fifo_sched_data *q = qdisc_priv(sch); + + if (opt) { + struct tc_fifo_qopt *ctl = nla_data(opt); + if (nla_len(opt) < sizeof(*ctl)) + return -EINVAL; + + q->limit = ctl->limit; + } else + q->limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1); + + q->oldest = PSCHED_PASTPERFECT; + return 0; +} + +static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb) +{ + struct fifo_sched_data *q = qdisc_priv(sch); + struct tc_fifo_qopt opt = { .limit = q->limit }; + + NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); + return skb->len; + +nla_put_failure: + return -1; +} + +static struct Qdisc_ops tfifo_qdisc_ops __read_mostly = { + .id = "tfifo", + .priv_size = sizeof(struct fifo_sched_data), + .enqueue = tfifo_enqueue, + .dequeue = qdisc_dequeue_head, + .peek = qdisc_peek_head, + .drop = qdisc_queue_drop, + .init = tfifo_init, + .reset = qdisc_reset_queue, + .change = tfifo_init, + .dump = tfifo_dump, +}; + static int netem_init(struct Qdisc *sch, struct nlattr *opt) { struct netem_sched_data *q = qdisc_priv(sch); @@ -797,9 +766,18 @@ static int netem_init(struct Qdisc *sch, struct nlattr *opt) qdisc_watchdog_init(&q->watchdog, sch); q->loss_model = CLG_RANDOM; + q->qdisc = qdisc_create_dflt(sch->dev_queue, &tfifo_qdisc_ops, + TC_H_MAKE(sch->handle, 1)); + if (!q->qdisc) { + pr_notice("netem: qdisc create tfifo qdisc failed\n"); + return -ENOMEM; + } + ret = netem_change(sch, opt); - if (ret) + if (ret) { pr_info("netem: change failed\n"); + qdisc_destroy(q->qdisc); + } return ret; } @@ -808,8 +786,7 @@ static void netem_destroy(struct Qdisc *sch) struct netem_sched_data *q = qdisc_priv(sch); qdisc_watchdog_cancel(&q->watchdog); - if (q->qdisc) - qdisc_destroy(q->qdisc); + qdisc_destroy(q->qdisc); dist_free(q->delay_dist); } @@ -869,7 +846,6 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) struct tc_netem_corr cor; struct tc_netem_reorder reorder; struct tc_netem_corrupt corrupt; - struct tc_netem_rate rate; qopt.latency = q->latency; qopt.jitter = q->jitter; @@ -892,12 +868,6 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) corrupt.correlation = q->corrupt_cor.rho; NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt); - rate.rate = q->rate; - rate.packet_overhead = q->packet_overhead; - rate.cell_size = q->cell_size; - rate.cell_overhead = q->cell_overhead; - NLA_PUT(skb, TCA_NETEM_RATE, sizeof(rate), &rate); - if (dump_loss_model(q, skb) != 0) goto nla_put_failure; @@ -913,7 +883,7 @@ static int netem_dump_class(struct Qdisc *sch, unsigned long cl, { struct netem_sched_data *q = qdisc_priv(sch); - if (cl != 1 || !q->qdisc) /* only one class */ + if (cl != 1) /* only one class */ return -ENOENT; tcm->tcm_handle |= TC_H_MIN(1); @@ -927,13 +897,14 @@ static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, { struct netem_sched_data *q = qdisc_priv(sch); + if (new == NULL) + new = &noop_qdisc; + sch_tree_lock(sch); *old = q->qdisc; q->qdisc = new; - if (*old) { - qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); - qdisc_reset(*old); - } + qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); + qdisc_reset(*old); sch_tree_unlock(sch); return 0; diff --git a/trunk/net/sched/sch_qfq.c b/trunk/net/sched/sch_qfq.c index e68cb440756a..103343408593 100644 --- a/trunk/net/sched/sch_qfq.c +++ b/trunk/net/sched/sch_qfq.c @@ -211,7 +211,6 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr *tb[TCA_QFQ_MAX + 1]; u32 weight, lmax, inv_w; int i, err; - int delta_w; if (tca[TCA_OPTIONS] == NULL) { pr_notice("qfq: no options\n"); @@ -233,10 +232,9 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, inv_w = ONE_FP / weight; weight = ONE_FP / inv_w; - delta_w = weight - (cl ? ONE_FP / cl->inv_w : 0); - if (q->wsum + delta_w > QFQ_MAX_WSUM) { + if (q->wsum + weight > QFQ_MAX_WSUM) { pr_notice("qfq: total weight out of range (%u + %u)\n", - delta_w, q->wsum); + weight, q->wsum); return -EINVAL; } @@ -258,12 +256,13 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, return err; } - if (inv_w != cl->inv_w) { - sch_tree_lock(sch); - q->wsum += delta_w; + sch_tree_lock(sch); + if (tb[TCA_QFQ_WEIGHT]) { + q->wsum = weight - ONE_FP / cl->inv_w; cl->inv_w = inv_w; - sch_tree_unlock(sch); } + sch_tree_unlock(sch); + return 0; } @@ -278,6 +277,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, i = qfq_calc_index(cl->inv_w, cl->lmax); cl->grp = &q->groups[i]; + q->wsum += weight; cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid); @@ -294,7 +294,6 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, return err; } } - q->wsum += weight; sch_tree_lock(sch); qdisc_class_hash_insert(&q->clhash, &cl->common); @@ -818,11 +817,11 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch) static void qfq_update_start(struct qfq_sched *q, struct qfq_class *cl) { unsigned long mask; - u64 limit, roundedF; + uint32_t limit, roundedF; int slot_shift = cl->grp->slot_shift; roundedF = qfq_round_down(cl->F, slot_shift); - limit = qfq_round_down(q->V, slot_shift) + (1ULL << slot_shift); + limit = qfq_round_down(q->V, slot_shift) + (1UL << slot_shift); if (!qfq_gt(cl->F, q->V) || qfq_gt(roundedF, limit)) { /* timestamp was stale */ diff --git a/trunk/net/sched/sch_red.c b/trunk/net/sched/sch_red.c index a5cc3012cf42..d617161f8dd3 100644 --- a/trunk/net/sched/sch_red.c +++ b/trunk/net/sched/sch_red.c @@ -39,9 +39,7 @@ struct red_sched_data { u32 limit; /* HARD maximal queue length */ unsigned char flags; - struct timer_list adapt_timer; struct red_parms parms; - struct red_vars vars; struct red_stats stats; struct Qdisc *qdisc; }; @@ -62,14 +60,12 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch) struct Qdisc *child = q->qdisc; int ret; - q->vars.qavg = red_calc_qavg(&q->parms, - &q->vars, - child->qstats.backlog); + q->parms.qavg = red_calc_qavg(&q->parms, child->qstats.backlog); - if (red_is_idling(&q->vars)) - red_end_of_idle_period(&q->vars); + if (red_is_idling(&q->parms)) + red_end_of_idle_period(&q->parms); - switch (red_action(&q->parms, &q->vars, q->vars.qavg)) { + switch (red_action(&q->parms, q->parms.qavg)) { case RED_DONT_MARK: break; @@ -120,8 +116,8 @@ static struct sk_buff *red_dequeue(struct Qdisc *sch) qdisc_bstats_update(sch, skb); sch->q.qlen--; } else { - if (!red_is_idling(&q->vars)) - red_start_of_idle_period(&q->vars); + if (!red_is_idling(&q->parms)) + red_start_of_idle_period(&q->parms); } return skb; } @@ -147,8 +143,8 @@ static unsigned int red_drop(struct Qdisc *sch) return len; } - if (!red_is_idling(&q->vars)) - red_start_of_idle_period(&q->vars); + if (!red_is_idling(&q->parms)) + red_start_of_idle_period(&q->parms); return 0; } @@ -159,21 +155,18 @@ static void red_reset(struct Qdisc *sch) qdisc_reset(q->qdisc); sch->q.qlen = 0; - red_restart(&q->vars); + red_restart(&q->parms); } static void red_destroy(struct Qdisc *sch) { struct red_sched_data *q = qdisc_priv(sch); - - del_timer_sync(&q->adapt_timer); qdisc_destroy(q->qdisc); } static const struct nla_policy red_policy[TCA_RED_MAX + 1] = { [TCA_RED_PARMS] = { .len = sizeof(struct tc_red_qopt) }, [TCA_RED_STAB] = { .len = RED_STAB_SIZE }, - [TCA_RED_MAX_P] = { .type = NLA_U32 }, }; static int red_change(struct Qdisc *sch, struct nlattr *opt) @@ -183,7 +176,6 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt) struct tc_red_qopt *ctl; struct Qdisc *child = NULL; int err; - u32 max_P; if (opt == NULL) return -EINVAL; @@ -196,8 +188,6 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt) tb[TCA_RED_STAB] == NULL) return -EINVAL; - max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0; - ctl = nla_data(tb[TCA_RED_PARMS]); if (ctl->limit > 0) { @@ -215,42 +205,22 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt) q->qdisc = child; } - red_set_parms(&q->parms, - ctl->qth_min, ctl->qth_max, ctl->Wlog, - ctl->Plog, ctl->Scell_log, - nla_data(tb[TCA_RED_STAB]), - max_P); - red_set_vars(&q->vars); - - del_timer(&q->adapt_timer); - if (ctl->flags & TC_RED_ADAPTATIVE) - mod_timer(&q->adapt_timer, jiffies + HZ/2); + red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog, + ctl->Plog, ctl->Scell_log, + nla_data(tb[TCA_RED_STAB])); if (!q->qdisc->q.qlen) - red_start_of_idle_period(&q->vars); + red_start_of_idle_period(&q->parms); sch_tree_unlock(sch); return 0; } -static inline void red_adaptative_timer(unsigned long arg) -{ - struct Qdisc *sch = (struct Qdisc *)arg; - struct red_sched_data *q = qdisc_priv(sch); - spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch)); - - spin_lock(root_lock); - red_adaptative_algo(&q->parms, &q->vars); - mod_timer(&q->adapt_timer, jiffies + HZ/2); - spin_unlock(root_lock); -} - static int red_init(struct Qdisc *sch, struct nlattr *opt) { struct red_sched_data *q = qdisc_priv(sch); q->qdisc = &noop_qdisc; - setup_timer(&q->adapt_timer, red_adaptative_timer, (unsigned long)sch); return red_change(sch, opt); } @@ -273,7 +243,6 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb) if (opts == NULL) goto nla_put_failure; NLA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt); - NLA_PUT_U32(skb, TCA_RED_MAX_P, q->parms.max_P); return nla_nest_end(skb, opts); nla_put_failure: diff --git a/trunk/net/sched/sch_sfb.c b/trunk/net/sched/sch_sfb.c index 96e42cae4c7a..e83c272c0325 100644 --- a/trunk/net/sched/sch_sfb.c +++ b/trunk/net/sched/sch_sfb.c @@ -26,7 +26,6 @@ #include #include #include -#include /* * SFB uses two B[l][n] : L x N arrays of bins (L levels, N bins per level) @@ -287,7 +286,6 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch) u32 minqlen = ~0; u32 r, slot, salt, sfbhash; int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; - struct flow_keys keys; if (unlikely(sch->q.qlen >= q->limit)) { sch->qstats.overlimits++; @@ -311,19 +309,13 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch) /* If using external classifiers, get result and record it. */ if (!sfb_classify(skb, q, &ret, &salt)) goto other_drop; - keys.src = salt; - keys.dst = 0; - keys.ports = 0; } else { - skb_flow_dissect(skb, &keys); + salt = skb_get_rxhash(skb); } slot = q->slot; - sfbhash = jhash_3words((__force u32)keys.dst, - (__force u32)keys.src, - (__force u32)keys.ports, - q->bins[slot].perturbation); + sfbhash = jhash_1word(salt, q->bins[slot].perturbation); if (!sfbhash) sfbhash = 1; sfb_skb_cb(skb)->hashes[slot] = sfbhash; @@ -355,10 +347,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (unlikely(p_min >= SFB_MAX_PROB)) { /* Inelastic flow */ if (q->double_buffering) { - sfbhash = jhash_3words((__force u32)keys.dst, - (__force u32)keys.src, - (__force u32)keys.ports, - q->bins[slot].perturbation); + sfbhash = jhash_1word(salt, q->bins[slot].perturbation); if (!sfbhash) sfbhash = 1; sfb_skb_cb(skb)->hashes[slot] = sfbhash; diff --git a/trunk/net/sched/sch_sfq.c b/trunk/net/sched/sch_sfq.c index 0a7964009e8c..4f5510e2bd6f 100644 --- a/trunk/net/sched/sch_sfq.c +++ b/trunk/net/sched/sch_sfq.c @@ -17,13 +17,14 @@ #include #include #include +#include #include #include #include #include +#include #include #include -#include /* Stochastic Fairness Queuing algorithm. @@ -66,18 +67,16 @@ SFQ is superior for this purpose. IMPLEMENTATION: - This implementation limits : - - maximal queue length per flow to 127 packets. - - max mtu to 2^18-1; - - max 65408 flows, - - number of hash buckets to 65536. + This implementation limits maximal queue length to 128; + max mtu to 2^18-1; max 128 flows, number of hash buckets to 1024. + The only goal of this restrictions was that all data + fit into one 4K page on 32bit arches. It is easy to increase these values, but not in flight. */ -#define SFQ_MAX_DEPTH 127 /* max number of packets per flow */ -#define SFQ_DEFAULT_FLOWS 128 -#define SFQ_MAX_FLOWS (0x10000 - SFQ_MAX_DEPTH - 1) /* max number of flows */ -#define SFQ_EMPTY_SLOT 0xffff +#define SFQ_DEPTH 128 /* max number of packets per flow */ +#define SFQ_SLOTS 128 /* max number of flows */ +#define SFQ_EMPTY_SLOT 255 #define SFQ_DEFAULT_HASH_DIVISOR 1024 /* We use 16 bits to store allot, and want to handle packets up to 64K @@ -86,13 +85,13 @@ #define SFQ_ALLOT_SHIFT 3 #define SFQ_ALLOT_SIZE(X) DIV_ROUND_UP(X, 1 << SFQ_ALLOT_SHIFT) -/* This type should contain at least SFQ_MAX_DEPTH + 1 + SFQ_MAX_FLOWS values */ -typedef u16 sfq_index; +/* This type should contain at least SFQ_DEPTH + SFQ_SLOTS values */ +typedef unsigned char sfq_index; /* * We dont use pointers to save space. - * Small indexes [0 ... SFQ_MAX_FLOWS - 1] are 'pointers' to slots[] array - * while following values [SFQ_MAX_FLOWS ... SFQ_MAX_FLOWS + SFQ_MAX_DEPTH] + * Small indexes [0 ... SFQ_SLOTS - 1] are 'pointers' to slots[] array + * while following values [SFQ_SLOTS ... SFQ_SLOTS + SFQ_DEPTH - 1] * are 'pointers' to dep[] array */ struct sfq_head { @@ -104,38 +103,28 @@ struct sfq_slot { struct sk_buff *skblist_next; struct sk_buff *skblist_prev; sfq_index qlen; /* number of skbs in skblist */ - sfq_index next; /* next slot in sfq RR chain */ + sfq_index next; /* next slot in sfq chain */ struct sfq_head dep; /* anchor in dep[] chains */ unsigned short hash; /* hash value (index in ht[]) */ short allot; /* credit for this slot */ }; struct sfq_sched_data { -/* frequently used fields */ - int limit; /* limit of total number of packets in this qdisc */ +/* Parameters */ + int perturb_period; + unsigned int quantum; /* Allotment per round: MUST BE >= MTU */ + int limit; unsigned int divisor; /* number of slots in hash table */ - unsigned int maxflows; /* number of flows in flows array */ - int headdrop; - int maxdepth; /* limit of packets per flow */ - - u32 perturbation; +/* Variables */ struct tcf_proto *filter_list; + struct timer_list perturb_timer; + u32 perturbation; sfq_index cur_depth; /* depth of longest slot */ unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */ struct sfq_slot *tail; /* current slot in round */ - sfq_index *ht; /* Hash table ('divisor' slots) */ - struct sfq_slot *slots; /* Flows table ('maxflows' entries) */ - - struct sfq_head dep[SFQ_MAX_DEPTH + 1]; - /* Linked lists of slots, indexed by depth - * dep[0] : list of unused flows - * dep[1] : list of flows with 1 packet - * dep[X] : list of flows with X packets - */ - - int perturb_period; - unsigned int quantum; /* Allotment per round: MUST BE >= MTU */ - struct timer_list perturb_timer; + sfq_index *ht; /* Hash table (divisor slots) */ + struct sfq_slot slots[SFQ_SLOTS]; + struct sfq_head dep[SFQ_DEPTH]; /* Linked list of slots, indexed by depth */ }; /* @@ -143,36 +132,66 @@ struct sfq_sched_data { */ static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val) { - if (val < SFQ_MAX_FLOWS) + if (val < SFQ_SLOTS) return &q->slots[val].dep; - return &q->dep[val - SFQ_MAX_FLOWS]; + return &q->dep[val - SFQ_SLOTS]; } -/* - * In order to be able to quickly rehash our queue when timer changes - * q->perturbation, we store flow_keys in skb->cb[] - */ -struct sfq_skb_cb { - struct flow_keys keys; -}; - -static inline struct sfq_skb_cb *sfq_skb_cb(const struct sk_buff *skb) +static unsigned int sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1) { - BUILD_BUG_ON(sizeof(skb->cb) < - sizeof(struct qdisc_skb_cb) + sizeof(struct sfq_skb_cb)); - return (struct sfq_skb_cb *)qdisc_skb_cb(skb)->data; + return jhash_2words(h, h1, q->perturbation) & (q->divisor - 1); } -static unsigned int sfq_hash(const struct sfq_sched_data *q, - const struct sk_buff *skb) +static unsigned int sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) { - const struct flow_keys *keys = &sfq_skb_cb(skb)->keys; - unsigned int hash; + u32 h, h2; + + switch (skb->protocol) { + case htons(ETH_P_IP): + { + const struct iphdr *iph; + int poff; + + if (!pskb_network_may_pull(skb, sizeof(*iph))) + goto err; + iph = ip_hdr(skb); + h = (__force u32)iph->daddr; + h2 = (__force u32)iph->saddr ^ iph->protocol; + if (ip_is_fragment(iph)) + break; + poff = proto_ports_offset(iph->protocol); + if (poff >= 0 && + pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) { + iph = ip_hdr(skb); + h2 ^= *(u32 *)((void *)iph + iph->ihl * 4 + poff); + } + break; + } + case htons(ETH_P_IPV6): + { + const struct ipv6hdr *iph; + int poff; + + if (!pskb_network_may_pull(skb, sizeof(*iph))) + goto err; + iph = ipv6_hdr(skb); + h = (__force u32)iph->daddr.s6_addr32[3]; + h2 = (__force u32)iph->saddr.s6_addr32[3] ^ iph->nexthdr; + poff = proto_ports_offset(iph->nexthdr); + if (poff >= 0 && + pskb_network_may_pull(skb, sizeof(*iph) + 4 + poff)) { + iph = ipv6_hdr(skb); + h2 ^= *(u32 *)((void *)iph + sizeof(*iph) + poff); + } + break; + } + default: +err: + h = (unsigned long)skb_dst(skb) ^ (__force u32)skb->protocol; + h2 = (unsigned long)skb->sk; + } - hash = jhash_3words((__force u32)keys->dst, - (__force u32)keys->src ^ keys->ip_proto, - (__force u32)keys->ports, q->perturbation); - return hash & (q->divisor - 1); + return sfq_fold_hash(q, h, h2); } static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch, @@ -187,10 +206,8 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch, TC_H_MIN(skb->priority) <= q->divisor) return TC_H_MIN(skb->priority); - if (!q->filter_list) { - skb_flow_dissect(skb, &sfq_skb_cb(skb)->keys); + if (!q->filter_list) return sfq_hash(q, skb) + 1; - } *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; result = tc_classify(skb, q->filter_list, &res); @@ -211,19 +228,18 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch, } /* - * x : slot number [0 .. SFQ_MAX_FLOWS - 1] + * x : slot number [0 .. SFQ_SLOTS - 1] */ static inline void sfq_link(struct sfq_sched_data *q, sfq_index x) { sfq_index p, n; - struct sfq_slot *slot = &q->slots[x]; - int qlen = slot->qlen; + int qlen = q->slots[x].qlen; - p = qlen + SFQ_MAX_FLOWS; + p = qlen + SFQ_SLOTS; n = q->dep[qlen].next; - slot->dep.next = n; - slot->dep.prev = p; + q->slots[x].dep.next = n; + q->slots[x].dep.prev = p; q->dep[qlen].next = x; /* sfq_dep_head(q, p)->next = x */ sfq_dep_head(q, n)->prev = x; @@ -288,7 +304,6 @@ static inline struct sk_buff *slot_dequeue_head(struct sfq_slot *slot) static inline void slot_queue_init(struct sfq_slot *slot) { - memset(slot, 0, sizeof(*slot)); slot->skblist_prev = slot->skblist_next = (struct sk_buff *)slot; } @@ -319,7 +334,7 @@ static unsigned int sfq_drop(struct Qdisc *sch) x = q->dep[d].next; slot = &q->slots[x]; drop: - skb = q->headdrop ? slot_dequeue_head(slot) : slot_dequeue_tail(slot); + skb = slot_dequeue_tail(slot); len = qdisc_pkt_len(skb); sfq_dec(q, x); kfree_skb(skb); @@ -363,27 +378,16 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) slot = &q->slots[x]; if (x == SFQ_EMPTY_SLOT) { x = q->dep[0].next; /* get a free slot */ - if (x >= SFQ_MAX_FLOWS) - return qdisc_drop(skb, sch); q->ht[hash] = x; slot = &q->slots[x]; slot->hash = hash; } - if (slot->qlen >= q->maxdepth) { - struct sk_buff *head; - - if (!q->headdrop) - return qdisc_drop(skb, sch); - - head = slot_dequeue_head(slot); - sch->qstats.backlog -= qdisc_pkt_len(head); - qdisc_drop(head, sch); - - sch->qstats.backlog += qdisc_pkt_len(skb); - slot_queue_add(slot, skb); - return NET_XMIT_CN; - } + /* If selected queue has length q->limit, do simple tail drop, + * i.e. drop _this_ packet. + */ + if (slot->qlen >= q->limit) + return qdisc_drop(skb, sch); sch->qstats.backlog += qdisc_pkt_len(skb); slot_queue_add(slot, skb); @@ -391,11 +395,11 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (slot->qlen == 1) { /* The flow is new */ if (q->tail == NULL) { /* It is the first flow */ slot->next = x; - q->tail = slot; } else { slot->next = q->tail->next; q->tail->next = x; } + q->tail = slot; slot->allot = q->scaled_quantum; } if (++sch->q.qlen <= q->limit) @@ -464,83 +468,12 @@ sfq_reset(struct Qdisc *sch) kfree_skb(skb); } -/* - * When q->perturbation is changed, we rehash all queued skbs - * to avoid OOO (Out Of Order) effects. - * We dont use sfq_dequeue()/sfq_enqueue() because we dont want to change - * counters. - */ -static void sfq_rehash(struct Qdisc *sch) -{ - struct sfq_sched_data *q = qdisc_priv(sch); - struct sk_buff *skb; - int i; - struct sfq_slot *slot; - struct sk_buff_head list; - int dropped = 0; - - __skb_queue_head_init(&list); - - for (i = 0; i < q->maxflows; i++) { - slot = &q->slots[i]; - if (!slot->qlen) - continue; - while (slot->qlen) { - skb = slot_dequeue_head(slot); - sfq_dec(q, i); - __skb_queue_tail(&list, skb); - } - q->ht[slot->hash] = SFQ_EMPTY_SLOT; - } - q->tail = NULL; - - while ((skb = __skb_dequeue(&list)) != NULL) { - unsigned int hash = sfq_hash(q, skb); - sfq_index x = q->ht[hash]; - - slot = &q->slots[x]; - if (x == SFQ_EMPTY_SLOT) { - x = q->dep[0].next; /* get a free slot */ - if (x >= SFQ_MAX_FLOWS) { -drop: sch->qstats.backlog -= qdisc_pkt_len(skb); - kfree_skb(skb); - dropped++; - continue; - } - q->ht[hash] = x; - slot = &q->slots[x]; - slot->hash = hash; - } - if (slot->qlen >= q->maxdepth) - goto drop; - slot_queue_add(slot, skb); - sfq_inc(q, x); - if (slot->qlen == 1) { /* The flow is new */ - if (q->tail == NULL) { /* It is the first flow */ - slot->next = x; - } else { - slot->next = q->tail->next; - q->tail->next = x; - } - q->tail = slot; - slot->allot = q->scaled_quantum; - } - } - sch->q.qlen -= dropped; - qdisc_tree_decrease_qlen(sch, dropped); -} - static void sfq_perturbation(unsigned long arg) { struct Qdisc *sch = (struct Qdisc *)arg; struct sfq_sched_data *q = qdisc_priv(sch); - spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch)); - spin_lock(root_lock); q->perturbation = net_random(); - if (!q->filter_list && q->tail) - sfq_rehash(sch); - spin_unlock(root_lock); if (q->perturb_period) mod_timer(&q->perturb_timer, jiffies + q->perturb_period); @@ -550,39 +483,23 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt) { struct sfq_sched_data *q = qdisc_priv(sch); struct tc_sfq_qopt *ctl = nla_data(opt); - struct tc_sfq_qopt_v1 *ctl_v1 = NULL; unsigned int qlen; if (opt->nla_len < nla_attr_size(sizeof(*ctl))) return -EINVAL; - if (opt->nla_len >= nla_attr_size(sizeof(*ctl_v1))) - ctl_v1 = nla_data(opt); + if (ctl->divisor && (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536)) return -EINVAL; sch_tree_lock(sch); - if (ctl->quantum) { - q->quantum = ctl->quantum; - q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum); - } + q->quantum = ctl->quantum ? : psched_mtu(qdisc_dev(sch)); + q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum); q->perturb_period = ctl->perturb_period * HZ; - if (ctl->flows) - q->maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS); - if (ctl->divisor) { + if (ctl->limit) + q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1); + if (ctl->divisor) q->divisor = ctl->divisor; - q->maxflows = min_t(u32, q->maxflows, q->divisor); - } - if (ctl_v1) { - if (ctl_v1->depth) - q->maxdepth = min_t(u32, ctl_v1->depth, SFQ_MAX_DEPTH); - q->headdrop = ctl_v1->headdrop; - } - if (ctl->limit) { - q->limit = min_t(u32, ctl->limit, q->maxdepth * q->maxflows); - q->maxflows = min_t(u32, q->maxflows, q->limit); - } - qlen = sch->q.qlen; while (sch->q.qlen > q->limit) sfq_drop(sch); @@ -597,77 +514,46 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt) return 0; } -static void *sfq_alloc(size_t sz) -{ - void *ptr = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN); - - if (!ptr) - ptr = vmalloc(sz); - return ptr; -} - -static void sfq_free(void *addr) -{ - if (addr) { - if (is_vmalloc_addr(addr)) - vfree(addr); - else - kfree(addr); - } -} - -static void sfq_destroy(struct Qdisc *sch) -{ - struct sfq_sched_data *q = qdisc_priv(sch); - - tcf_destroy_chain(&q->filter_list); - q->perturb_period = 0; - del_timer_sync(&q->perturb_timer); - sfq_free(q->ht); - sfq_free(q->slots); -} - static int sfq_init(struct Qdisc *sch, struct nlattr *opt) { struct sfq_sched_data *q = qdisc_priv(sch); + size_t sz; int i; q->perturb_timer.function = sfq_perturbation; q->perturb_timer.data = (unsigned long)sch; init_timer_deferrable(&q->perturb_timer); - for (i = 0; i < SFQ_MAX_DEPTH + 1; i++) { - q->dep[i].next = i + SFQ_MAX_FLOWS; - q->dep[i].prev = i + SFQ_MAX_FLOWS; + for (i = 0; i < SFQ_DEPTH; i++) { + q->dep[i].next = i + SFQ_SLOTS; + q->dep[i].prev = i + SFQ_SLOTS; } - q->limit = SFQ_MAX_DEPTH; - q->maxdepth = SFQ_MAX_DEPTH; + q->limit = SFQ_DEPTH - 1; q->cur_depth = 0; q->tail = NULL; q->divisor = SFQ_DEFAULT_HASH_DIVISOR; - q->maxflows = SFQ_DEFAULT_FLOWS; - q->quantum = psched_mtu(qdisc_dev(sch)); - q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum); - q->perturb_period = 0; - q->perturbation = net_random(); - - if (opt) { + if (opt == NULL) { + q->quantum = psched_mtu(qdisc_dev(sch)); + q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum); + q->perturb_period = 0; + q->perturbation = net_random(); + } else { int err = sfq_change(sch, opt); if (err) return err; } - q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor); - q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows); - if (!q->ht || !q->slots) { - sfq_destroy(sch); + sz = sizeof(q->ht[0]) * q->divisor; + q->ht = kmalloc(sz, GFP_KERNEL); + if (!q->ht && sz > PAGE_SIZE) + q->ht = vmalloc(sz); + if (!q->ht) return -ENOMEM; - } for (i = 0; i < q->divisor; i++) q->ht[i] = SFQ_EMPTY_SLOT; - for (i = 0; i < q->maxflows; i++) { + for (i = 0; i < SFQ_SLOTS; i++) { slot_queue_init(&q->slots[i]); sfq_link(q, i); } @@ -678,20 +564,31 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt) return 0; } +static void sfq_destroy(struct Qdisc *sch) +{ + struct sfq_sched_data *q = qdisc_priv(sch); + + tcf_destroy_chain(&q->filter_list); + q->perturb_period = 0; + del_timer_sync(&q->perturb_timer); + if (is_vmalloc_addr(q->ht)) + vfree(q->ht); + else + kfree(q->ht); +} + static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb) { struct sfq_sched_data *q = qdisc_priv(sch); unsigned char *b = skb_tail_pointer(skb); - struct tc_sfq_qopt_v1 opt; - - memset(&opt, 0, sizeof(opt)); - opt.v0.quantum = q->quantum; - opt.v0.perturb_period = q->perturb_period / HZ; - opt.v0.limit = q->limit; - opt.v0.divisor = q->divisor; - opt.v0.flows = q->maxflows; - opt.depth = q->maxdepth; - opt.headdrop = q->headdrop; + struct tc_sfq_qopt opt; + + opt.quantum = q->quantum; + opt.perturb_period = q->perturb_period / HZ; + + opt.limit = q->limit; + opt.divisor = q->divisor; + opt.flows = q->limit; NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); diff --git a/trunk/net/sched/sch_tbf.c b/trunk/net/sched/sch_tbf.c index b8e156319d7b..1dcfb5223a86 100644 --- a/trunk/net/sched/sch_tbf.c +++ b/trunk/net/sched/sch_tbf.c @@ -346,7 +346,6 @@ static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb) struct nlattr *nest; struct tc_tbf_qopt opt; - sch->qstats.backlog = q->qdisc->qstats.backlog; nest = nla_nest_start(skb, TCA_OPTIONS); if (nest == NULL) goto nla_put_failure; diff --git a/trunk/net/sched/sch_teql.c b/trunk/net/sched/sch_teql.c index 45326599fda3..4f4c52c0eeb3 100644 --- a/trunk/net/sched/sch_teql.c +++ b/trunk/net/sched/sch_teql.c @@ -277,7 +277,7 @@ static inline int teql_resolve(struct sk_buff *skb, return 0; rcu_read_lock(); - mn = dst_get_neighbour_noref(dst); + mn = dst_get_neighbour(dst); res = mn ? __teql_resolve(skb, skb_res, dev, txq, mn) : 0; rcu_read_unlock(); @@ -310,7 +310,7 @@ static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev) if (slave_txq->qdisc_sleeping != q) continue; - if (netif_xmit_stopped(netdev_get_tx_queue(slave, subq)) || + if (__netif_subqueue_stopped(slave, subq) || !netif_running(slave)) { busy = 1; continue; @@ -321,7 +321,7 @@ static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev) if (__netif_tx_trylock(slave_txq)) { unsigned int length = qdisc_pkt_len(skb); - if (!netif_xmit_frozen_or_stopped(slave_txq) && + if (!netif_tx_queue_frozen_or_stopped(slave_txq) && slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) { txq_trans_update(slave_txq); __netif_tx_unlock(slave_txq); @@ -333,7 +333,7 @@ static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev) } __netif_tx_unlock(slave_txq); } - if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0))) + if (netif_queue_stopped(dev)) busy = 1; break; case 1: diff --git a/trunk/net/sctp/input.c b/trunk/net/sctp/input.c index 80f71af71384..b7692aab6e9c 100644 --- a/trunk/net/sctp/input.c +++ b/trunk/net/sctp/input.c @@ -105,7 +105,7 @@ static inline int sctp_rcv_checksum(struct sk_buff *skb) struct sctp_input_cb { union { struct inet_skb_parm h4; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) struct inet6_skb_parm h6; #endif } header; diff --git a/trunk/net/sctp/ipv6.c b/trunk/net/sctp/ipv6.c index 91f479121c55..810427833bcd 100644 --- a/trunk/net/sctp/ipv6.c +++ b/trunk/net/sctp/ipv6.c @@ -107,7 +107,7 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev, if (addr) { addr->a.v6.sin6_family = AF_INET6; addr->a.v6.sin6_port = 0; - addr->a.v6.sin6_addr = ifa->addr; + ipv6_addr_copy(&addr->a.v6.sin6_addr, &ifa->addr); addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex; addr->valid = 1; spin_lock_bh(&sctp_local_addr_lock); @@ -219,8 +219,8 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport) /* Fill in the dest address from the route entry passed with the skb * and the source address from the transport. */ - fl6.daddr = transport->ipaddr.v6.sin6_addr; - fl6.saddr = transport->saddr.v6.sin6_addr; + ipv6_addr_copy(&fl6.daddr, &transport->ipaddr.v6.sin6_addr); + ipv6_addr_copy(&fl6.saddr, &transport->saddr.v6.sin6_addr); fl6.flowlabel = np->flow_label; IP6_ECN_flow_xmit(sk, fl6.flowlabel); @@ -231,7 +231,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport) if (np->opt && np->opt->srcrt) { struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt; - fl6.daddr = *rt0->addr; + ipv6_addr_copy(&fl6.daddr, rt0->addr); } SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n", @@ -265,7 +265,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, sctp_scope_t scope; memset(fl6, 0, sizeof(struct flowi6)); - fl6->daddr = daddr->v6.sin6_addr; + ipv6_addr_copy(&fl6->daddr, &daddr->v6.sin6_addr); fl6->fl6_dport = daddr->v6.sin6_port; fl6->flowi6_proto = IPPROTO_SCTP; if (ipv6_addr_type(&daddr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) @@ -277,7 +277,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, fl6->fl6_sport = htons(asoc->base.bind_addr.port); if (saddr) { - fl6->saddr = saddr->v6.sin6_addr; + ipv6_addr_copy(&fl6->saddr, &saddr->v6.sin6_addr); fl6->fl6_sport = saddr->v6.sin6_port; SCTP_DEBUG_PRINTK("SRC=%pI6 - ", &fl6->saddr); } @@ -334,7 +334,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, } rcu_read_unlock(); if (baddr) { - fl6->saddr = baddr->v6.sin6_addr; + ipv6_addr_copy(&fl6->saddr, &baddr->v6.sin6_addr); fl6->fl6_sport = baddr->v6.sin6_port; dst = ip6_dst_lookup_flow(sk, fl6, NULL, false); } @@ -375,7 +375,7 @@ static void sctp_v6_get_saddr(struct sctp_sock *sk, if (t->dst) { saddr->v6.sin6_family = AF_INET6; - saddr->v6.sin6_addr = fl6->saddr; + ipv6_addr_copy(&saddr->v6.sin6_addr, &fl6->saddr); } } @@ -400,7 +400,7 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist, if (addr) { addr->a.v6.sin6_family = AF_INET6; addr->a.v6.sin6_port = 0; - addr->a.v6.sin6_addr = ifp->addr; + ipv6_addr_copy(&addr->a.v6.sin6_addr, &ifp->addr); addr->a.v6.sin6_scope_id = dev->ifindex; addr->valid = 1; INIT_LIST_HEAD(&addr->list); @@ -416,6 +416,7 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist, static void sctp_v6_from_skb(union sctp_addr *addr,struct sk_buff *skb, int is_saddr) { + void *from; __be16 *port; struct sctphdr *sh; @@ -427,11 +428,12 @@ static void sctp_v6_from_skb(union sctp_addr *addr,struct sk_buff *skb, sh = sctp_hdr(skb); if (is_saddr) { *port = sh->source; - addr->v6.sin6_addr = ipv6_hdr(skb)->saddr; + from = &ipv6_hdr(skb)->saddr; } else { *port = sh->dest; - addr->v6.sin6_addr = ipv6_hdr(skb)->daddr; + from = &ipv6_hdr(skb)->daddr; } + ipv6_addr_copy(&addr->v6.sin6_addr, from); } /* Initialize an sctp_addr from a socket. */ @@ -439,7 +441,7 @@ static void sctp_v6_from_sk(union sctp_addr *addr, struct sock *sk) { addr->v6.sin6_family = AF_INET6; addr->v6.sin6_port = 0; - addr->v6.sin6_addr = inet6_sk(sk)->rcv_saddr; + ipv6_addr_copy(&addr->v6.sin6_addr, &inet6_sk(sk)->rcv_saddr); } /* Initialize sk->sk_rcv_saddr from sctp_addr. */ @@ -452,7 +454,7 @@ static void sctp_v6_to_sk_saddr(union sctp_addr *addr, struct sock *sk) inet6_sk(sk)->rcv_saddr.s6_addr32[3] = addr->v4.sin_addr.s_addr; } else { - inet6_sk(sk)->rcv_saddr = addr->v6.sin6_addr; + ipv6_addr_copy(&inet6_sk(sk)->rcv_saddr, &addr->v6.sin6_addr); } } @@ -465,7 +467,7 @@ static void sctp_v6_to_sk_daddr(union sctp_addr *addr, struct sock *sk) inet6_sk(sk)->daddr.s6_addr32[2] = htonl(0x0000ffff); inet6_sk(sk)->daddr.s6_addr32[3] = addr->v4.sin_addr.s_addr; } else { - inet6_sk(sk)->daddr = addr->v6.sin6_addr; + ipv6_addr_copy(&inet6_sk(sk)->daddr, &addr->v6.sin6_addr); } } @@ -477,7 +479,7 @@ static void sctp_v6_from_addr_param(union sctp_addr *addr, addr->v6.sin6_family = AF_INET6; addr->v6.sin6_port = port; addr->v6.sin6_flowinfo = 0; /* BUG */ - addr->v6.sin6_addr = param->v6.addr; + ipv6_addr_copy(&addr->v6.sin6_addr, ¶m->v6.addr); addr->v6.sin6_scope_id = iif; } @@ -491,7 +493,7 @@ static int sctp_v6_to_addr_param(const union sctp_addr *addr, param->v6.param_hdr.type = SCTP_PARAM_IPV6_ADDRESS; param->v6.param_hdr.length = htons(length); - param->v6.addr = addr->v6.sin6_addr; + ipv6_addr_copy(¶m->v6.addr, &addr->v6.sin6_addr); return length; } @@ -502,7 +504,7 @@ static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr, { addr->sa.sa_family = AF_INET6; addr->v6.sin6_port = port; - addr->v6.sin6_addr = *saddr; + ipv6_addr_copy(&addr->v6.sin6_addr, saddr); } /* Compare addresses exactly. @@ -757,7 +759,7 @@ static void sctp_inet6_event_msgname(struct sctp_ulpevent *event, } sin6from = &asoc->peer.primary_addr.v6; - sin6->sin6_addr = sin6from->sin6_addr; + ipv6_addr_copy(&sin6->sin6_addr, &sin6from->sin6_addr); if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) sin6->sin6_scope_id = sin6from->sin6_scope_id; } @@ -785,7 +787,7 @@ static void sctp_inet6_skb_msgname(struct sk_buff *skb, char *msgname, } /* Otherwise, just copy the v6 address. */ - sin6->sin6_addr = ipv6_hdr(skb)->saddr; + ipv6_addr_copy(&sin6->sin6_addr, &ipv6_hdr(skb)->saddr); if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) { struct sctp_ulpevent *ev = sctp_skb2event(skb); sin6->sin6_scope_id = ev->iif; diff --git a/trunk/net/sctp/protocol.c b/trunk/net/sctp/protocol.c index 5942d27b1444..6f6ad8686833 100644 --- a/trunk/net/sctp/protocol.c +++ b/trunk/net/sctp/protocol.c @@ -637,7 +637,7 @@ void sctp_addr_wq_timeout_handler(unsigned long arg) " for cmd %d at entry %p\n", &sctp_addr_waitq, &addrw->a, addrw->state, addrw); -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) /* Now we send an ASCONF for each association */ /* Note. we currently don't handle link local IPv6 addressees */ if (addrw->a.sa.sa_family == AF_INET6) { diff --git a/trunk/net/sctp/sm_make_chunk.c b/trunk/net/sctp/sm_make_chunk.c index a85eeeb55dd0..0121e0ab0351 100644 --- a/trunk/net/sctp/sm_make_chunk.c +++ b/trunk/net/sctp/sm_make_chunk.c @@ -3400,10 +3400,8 @@ int sctp_process_asconf_ack(struct sctp_association *asoc, asconf_len -= length; } - if (no_err && asoc->src_out_of_asoc_ok) { + if (no_err && asoc->src_out_of_asoc_ok) asoc->src_out_of_asoc_ok = 0; - sctp_transport_immediate_rtx(asoc->peer.primary_path); - } /* Free the cached last sent asconf chunk. */ list_del_init(&asconf->transmitted_list); diff --git a/trunk/net/sctp/sm_sideeffect.c b/trunk/net/sctp/sm_sideeffect.c index 1ff51c9d18d5..76388b083f28 100644 --- a/trunk/net/sctp/sm_sideeffect.c +++ b/trunk/net/sctp/sm_sideeffect.c @@ -666,7 +666,6 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds, struct sctp_chunk *chunk) { sctp_sender_hb_info_t *hbinfo; - int was_unconfirmed = 0; /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the * HEARTBEAT should clear the error counter of the destination @@ -693,11 +692,9 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds, /* Mark the destination transport address as active if it is not so * marked. */ - if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED)) { - was_unconfirmed = 1; + if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED)) sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP, SCTP_HEARTBEAT_SUCCESS); - } /* The receiver of the HEARTBEAT ACK should also perform an * RTT measurement for that destination transport address @@ -715,9 +712,6 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds, /* Update the heartbeat timer. */ if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t))) sctp_transport_hold(t); - - if (was_unconfirmed && asoc->peer.transport_count == 1) - sctp_transport_immediate_rtx(t); } diff --git a/trunk/net/sctp/socket.c b/trunk/net/sctp/socket.c index 408ebd0e7330..54a7cd2fdd7a 100644 --- a/trunk/net/sctp/socket.c +++ b/trunk/net/sctp/socket.c @@ -804,7 +804,7 @@ static int sctp_send_asconf_del_ip(struct sock *sk, struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)addrs; - asoc->asconf_addr_del_pending->v6.sin6_addr = sin6->sin6_addr; + ipv6_addr_copy(&asoc->asconf_addr_del_pending->v6.sin6_addr, &sin6->sin6_addr); } SCTP_DEBUG_PRINTK_IPADDR("send_asconf_del_ip: keep the last address asoc: %p ", " at %p\n", asoc, asoc->asconf_addr_del_pending, @@ -6839,7 +6839,7 @@ struct proto sctp_prot = { .sockets_allocated = &sctp_sockets_allocated, }; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) struct proto sctpv6_prot = { .name = "SCTPv6", @@ -6870,4 +6870,4 @@ struct proto sctpv6_prot = { .memory_allocated = &sctp_memory_allocated, .sockets_allocated = &sctp_sockets_allocated, }; -#endif /* IS_ENABLED(CONFIG_IPV6) */ +#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ diff --git a/trunk/net/sctp/transport.c b/trunk/net/sctp/transport.c index 3889330b7b04..394c57ca2f54 100644 --- a/trunk/net/sctp/transport.c +++ b/trunk/net/sctp/transport.c @@ -641,19 +641,3 @@ void sctp_transport_reset(struct sctp_transport *t) t->cacc.next_tsn_at_change = 0; t->cacc.cacc_saw_newack = 0; } - -/* Schedule retransmission on the given transport */ -void sctp_transport_immediate_rtx(struct sctp_transport *t) -{ - /* Stop pending T3_rtx_timer */ - if (timer_pending(&t->T3_rtx_timer)) { - (void)del_timer(&t->T3_rtx_timer); - sctp_transport_put(t); - } - sctp_retransmit(&t->asoc->outqueue, t, SCTP_RTXR_T3_RTX); - if (!timer_pending(&t->T3_rtx_timer)) { - if (!mod_timer(&t->T3_rtx_timer, jiffies + t->rto)) - sctp_transport_hold(t); - } - return; -} diff --git a/trunk/net/socket.c b/trunk/net/socket.c index e56162cd65b0..2877647f347b 100644 --- a/trunk/net/socket.c +++ b/trunk/net/socket.c @@ -538,8 +538,6 @@ int sock_tx_timestamp(struct sock *sk, __u8 *tx_flags) *tx_flags |= SKBTX_HW_TSTAMP; if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE)) *tx_flags |= SKBTX_SW_TSTAMP; - if (sock_flag(sk, SOCK_WIFI_STATUS)) - *tx_flags |= SKBTX_WIFI_STATUS; return 0; } EXPORT_SYMBOL(sock_tx_timestamp); @@ -551,8 +549,6 @@ static inline int __sock_sendmsg_nosec(struct kiocb *iocb, struct socket *sock, sock_update_classid(sock->sk); - sock_update_netprioidx(sock->sk); - si->sock = sock; si->scm = NULL; si->msg = msg; @@ -678,22 +674,6 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, } EXPORT_SYMBOL_GPL(__sock_recv_timestamp); -void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk, - struct sk_buff *skb) -{ - int ack; - - if (!sock_flag(sk, SOCK_WIFI_STATUS)) - return; - if (!skb->wifi_acked_valid) - return; - - ack = skb->wifi_acked; - - put_cmsg(msg, SOL_SOCKET, SCM_WIFI_STATUS, sizeof(ack), &ack); -} -EXPORT_SYMBOL_GPL(__sock_recv_wifi_status); - static inline void sock_recv_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { @@ -2758,10 +2738,10 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32) case ETHTOOL_GRXRINGS: case ETHTOOL_GRXCLSRLCNT: case ETHTOOL_GRXCLSRULE: - case ETHTOOL_SRXCLSRLINS: convert_out = true; /* fall through */ case ETHTOOL_SRXCLSRLDEL: + case ETHTOOL_SRXCLSRLINS: buf_size += sizeof(struct ethtool_rxnfc); convert_in = true; break; @@ -2903,7 +2883,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd, return dev_ioctl(net, cmd, uifr); default: - return -ENOIOCTLCMD; + return -EINVAL; } } @@ -3230,6 +3210,20 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock, return sock_do_ioctl(net, sock, cmd, arg); } + /* Prevent warning from compat_sys_ioctl, these always + * result in -EINVAL in the native case anyway. */ + switch (cmd) { + case SIOCRTMSG: + case SIOCGIFCOUNT: + case SIOCSRARP: + case SIOCGRARP: + case SIOCDRARP: + case SIOCSIFLINK: + case SIOCGIFSLAVE: + case SIOCSIFSLAVE: + return -EINVAL; + } + return -ENOIOCTLCMD; } diff --git a/trunk/net/sunrpc/addr.c b/trunk/net/sunrpc/addr.c index ee77742e0ed6..67a655ee82a9 100644 --- a/trunk/net/sunrpc/addr.c +++ b/trunk/net/sunrpc/addr.c @@ -21,7 +21,7 @@ #include #include -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) static size_t rpc_ntop6_noscopeid(const struct sockaddr *sap, char *buf, const int buflen) @@ -91,7 +91,7 @@ static size_t rpc_ntop6(const struct sockaddr *sap, return len; } -#else /* !IS_ENABLED(CONFIG_IPV6) */ +#else /* !(defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)) */ static size_t rpc_ntop6_noscopeid(const struct sockaddr *sap, char *buf, const int buflen) @@ -105,7 +105,7 @@ static size_t rpc_ntop6(const struct sockaddr *sap, return 0; } -#endif /* !IS_ENABLED(CONFIG_IPV6) */ +#endif /* !(defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)) */ static int rpc_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen) @@ -155,7 +155,7 @@ static size_t rpc_pton4(const char *buf, const size_t buflen, return sizeof(struct sockaddr_in); } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) static int rpc_parse_scope_id(const char *buf, const size_t buflen, const char *delim, struct sockaddr_in6 *sin6) { diff --git a/trunk/net/sunrpc/svc.c b/trunk/net/sunrpc/svc.c index 9d01d46b05f3..6e038884ae0c 100644 --- a/trunk/net/sunrpc/svc.c +++ b/trunk/net/sunrpc/svc.c @@ -826,7 +826,7 @@ static int __svc_rpcb_register4(const u32 program, const u32 version, return error; } -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) /* * Register an "inet6" protocol family netid with the local * rpcbind daemon via an rpcbind v4 SET request. @@ -872,7 +872,7 @@ static int __svc_rpcb_register6(const u32 program, const u32 version, return error; } -#endif /* IS_ENABLED(CONFIG_IPV6) */ +#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ /* * Register a kernel RPC service via rpcbind version 4. @@ -893,11 +893,11 @@ static int __svc_register(const char *progname, error = __svc_rpcb_register4(program, version, protocol, port); break; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case PF_INET6: error = __svc_rpcb_register6(program, version, protocol, port); -#endif +#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ } if (error < 0) diff --git a/trunk/net/sunrpc/svc_xprt.c b/trunk/net/sunrpc/svc_xprt.c index 38649cfa4e81..447cd0eb415c 100644 --- a/trunk/net/sunrpc/svc_xprt.c +++ b/trunk/net/sunrpc/svc_xprt.c @@ -179,13 +179,13 @@ static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl, .sin_addr.s_addr = htonl(INADDR_ANY), .sin_port = htons(port), }; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) struct sockaddr_in6 sin6 = { .sin6_family = AF_INET6, .sin6_addr = IN6ADDR_ANY_INIT, .sin6_port = htons(port), }; -#endif +#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ struct sockaddr *sap; size_t len; @@ -194,12 +194,12 @@ static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl, sap = (struct sockaddr *)&sin; len = sizeof(sin); break; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case PF_INET6: sap = (struct sockaddr *)&sin6; len = sizeof(sin6); break; -#endif +#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ default: return ERR_PTR(-EAFNOSUPPORT); } diff --git a/trunk/net/sunrpc/svcauth_unix.c b/trunk/net/sunrpc/svcauth_unix.c index 01153ead1dba..ce136323da8b 100644 --- a/trunk/net/sunrpc/svcauth_unix.c +++ b/trunk/net/sunrpc/svcauth_unix.c @@ -134,7 +134,7 @@ static void ip_map_init(struct cache_head *cnew, struct cache_head *citem) struct ip_map *item = container_of(citem, struct ip_map, h); strcpy(new->m_class, item->m_class); - new->m_addr = item->m_addr; + ipv6_addr_copy(&new->m_addr, &item->m_addr); } static void update(struct cache_head *cnew, struct cache_head *citem) { @@ -220,7 +220,7 @@ static int ip_map_parse(struct cache_detail *cd, ipv6_addr_set_v4mapped(address.s4.sin_addr.s_addr, &sin6.sin6_addr); break; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case AF_INET6: memcpy(&sin6, &address.s6, sizeof(sin6)); break; @@ -274,7 +274,7 @@ static int ip_map_show(struct seq_file *m, } im = container_of(h, struct ip_map, h); /* class addr domain */ - addr = im->m_addr; + ipv6_addr_copy(&addr, &im->m_addr); if (test_bit(CACHE_VALID, &h->flags) && !test_bit(CACHE_NEGATIVE, &h->flags)) @@ -297,7 +297,7 @@ static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class, struct cache_head *ch; strcpy(ip.m_class, class); - ip.m_addr = *addr; + ipv6_addr_copy(&ip.m_addr, addr); ch = sunrpc_cache_lookup(cd, &ip.h, hash_str(class, IP_HASHBITS) ^ hash_ip6(*addr)); diff --git a/trunk/net/sunrpc/svcsock.c b/trunk/net/sunrpc/svcsock.c index 4653286fcc9e..71bed1c1c77a 100644 --- a/trunk/net/sunrpc/svcsock.c +++ b/trunk/net/sunrpc/svcsock.c @@ -157,7 +157,7 @@ static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh) cmh->cmsg_level = SOL_IPV6; cmh->cmsg_type = IPV6_PKTINFO; pki->ipi6_ifindex = daddr->sin6_scope_id; - pki->ipi6_addr = daddr->sin6_addr; + ipv6_addr_copy(&pki->ipi6_addr, &daddr->sin6_addr); cmh->cmsg_len = CMSG_LEN(sizeof(*pki)); } break; @@ -523,7 +523,7 @@ static int svc_udp_get_dest_address6(struct svc_rqst *rqstp, return 0; daddr->sin6_family = AF_INET6; - daddr->sin6_addr = pki->ipi6_addr; + ipv6_addr_copy(&daddr->sin6_addr, &pki->ipi6_addr); daddr->sin6_scope_id = pki->ipi6_ifindex; return 1; } diff --git a/trunk/net/tipc/bcast.c b/trunk/net/tipc/bcast.c index 8eb87b11d100..28908f54459e 100644 --- a/trunk/net/tipc/bcast.c +++ b/trunk/net/tipc/bcast.c @@ -46,7 +46,7 @@ #define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */ /** - * struct tipc_bcbearer_pair - a pair of bearers used by broadcast link + * struct bcbearer_pair - a pair of bearers used by broadcast link * @primary: pointer to primary bearer * @secondary: pointer to secondary bearer * @@ -54,13 +54,13 @@ * to be paired. */ -struct tipc_bcbearer_pair { +struct bcbearer_pair { struct tipc_bearer *primary; struct tipc_bearer *secondary; }; /** - * struct tipc_bcbearer - bearer used by broadcast link + * struct bcbearer - bearer used by broadcast link * @bearer: (non-standard) broadcast bearer structure * @media: (non-standard) broadcast media structure * @bpairs: array of bearer pairs @@ -74,47 +74,50 @@ struct tipc_bcbearer_pair { * prevented through use of the spinlock "bc_lock". */ -struct tipc_bcbearer { +struct bcbearer { struct tipc_bearer bearer; - struct tipc_media media; - struct tipc_bcbearer_pair bpairs[MAX_BEARERS]; - struct tipc_bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1]; + struct media media; + struct bcbearer_pair bpairs[MAX_BEARERS]; + struct bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1]; struct tipc_node_map remains; struct tipc_node_map remains_new; }; /** - * struct tipc_bclink - link used for broadcast messages + * struct bclink - link used for broadcast messages * @link: (non-standard) broadcast link structure * @node: (non-standard) node structure representing b'cast link's peer node - * @bcast_nodes: map of broadcast-capable nodes * @retransmit_to: node that most recently requested a retransmit * * Handles sequence numbering, fragmentation, bundling, etc. */ -struct tipc_bclink { - struct tipc_link link; +struct bclink { + struct link link; struct tipc_node node; - struct tipc_node_map bcast_nodes; struct tipc_node *retransmit_to; }; -static struct tipc_bcbearer bcast_bearer; -static struct tipc_bclink bcast_link; - -static struct tipc_bcbearer *bcbearer = &bcast_bearer; -static struct tipc_bclink *bclink = &bcast_link; -static struct tipc_link *bcl = &bcast_link.link; +static struct bcbearer *bcbearer; +static struct bclink *bclink; +static struct link *bcl; static DEFINE_SPINLOCK(bc_lock); +/* broadcast-capable node map */ +struct tipc_node_map tipc_bcast_nmap; + const char tipc_bclink_name[] = "broadcast-link"; static void tipc_nmap_diff(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b, struct tipc_node_map *nm_diff); +static u32 buf_seqno(struct sk_buff *buf) +{ + return msg_seqno(buf_msg(buf)); +} + static u32 bcbuf_acks(struct sk_buff *buf) { return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle; @@ -130,19 +133,6 @@ static void bcbuf_decr_acks(struct sk_buff *buf) bcbuf_set_acks(buf, bcbuf_acks(buf) - 1); } -void tipc_bclink_add_node(u32 addr) -{ - spin_lock_bh(&bc_lock); - tipc_nmap_add(&bclink->bcast_nodes, addr); - spin_unlock_bh(&bc_lock); -} - -void tipc_bclink_remove_node(u32 addr) -{ - spin_lock_bh(&bc_lock); - tipc_nmap_remove(&bclink->bcast_nodes, addr); - spin_unlock_bh(&bc_lock); -} static void bclink_set_last_sent(void) { @@ -232,36 +222,14 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) struct sk_buff *next; unsigned int released = 0; - spin_lock_bh(&bc_lock); - - /* Bail out if tx queue is empty (no clean up is required) */ - crs = bcl->first_out; - if (!crs) - goto exit; + if (less_eq(acked, n_ptr->bclink.acked)) + return; - /* Determine which messages need to be acknowledged */ - if (acked == INVALID_LINK_SEQ) { - /* - * Contact with specified node has been lost, so need to - * acknowledge sent messages only (if other nodes still exist) - * or both sent and unsent messages (otherwise) - */ - if (bclink->bcast_nodes.count) - acked = bcl->fsm_msg_cnt; - else - acked = bcl->next_out_no; - } else { - /* - * Bail out if specified sequence number does not correspond - * to a message that has been sent and not yet acknowledged - */ - if (less(acked, buf_seqno(crs)) || - less(bcl->fsm_msg_cnt, acked) || - less_eq(acked, n_ptr->bclink.acked)) - goto exit; - } + spin_lock_bh(&bc_lock); /* Skip over packets that node has previously acknowledged */ + + crs = bcl->first_out; while (crs && less_eq(buf_seqno(crs), n_ptr->bclink.acked)) crs = crs->next; @@ -269,15 +237,7 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) while (crs && less_eq(buf_seqno(crs), acked)) { next = crs->next; - - if (crs != bcl->next_out) - bcbuf_decr_acks(crs); - else { - bcbuf_set_acks(crs, 0); - bcl->next_out = next; - bclink_set_last_sent(); - } - + bcbuf_decr_acks(crs); if (bcbuf_acks(crs) == 0) { bcl->first_out = next; bcl->out_queue_size--; @@ -296,7 +256,6 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) } if (unlikely(released && !list_empty(&bcl->waiting_ports))) tipc_link_wakeup_ports(bcl, 0); -exit: spin_unlock_bh(&bc_lock); } @@ -308,7 +267,7 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) static void bclink_send_ack(struct tipc_node *n_ptr) { - struct tipc_link *l_ptr = n_ptr->active_links[n_ptr->addr & 1]; + struct link *l_ptr = n_ptr->active_links[n_ptr->addr & 1]; if (l_ptr != NULL) tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); @@ -443,19 +402,13 @@ int tipc_bclink_send_msg(struct sk_buff *buf) spin_lock_bh(&bc_lock); - if (!bclink->bcast_nodes.count) { - res = msg_data_sz(buf_msg(buf)); - buf_discard(buf); - goto exit; - } - res = tipc_link_send_buf(bcl, buf); - if (likely(res >= 0)) { + if (likely(res > 0)) bclink_set_last_sent(); - bcl->stats.queue_sz_counts++; - bcl->stats.accu_queue_sz += bcl->out_queue_size; - } -exit: + + bcl->stats.queue_sz_counts++; + bcl->stats.accu_queue_sz += bcl->out_queue_size; + spin_unlock_bh(&bc_lock); return res; } @@ -619,13 +572,13 @@ static int tipc_bcbearer_send(struct sk_buff *buf, if (likely(!msg_non_seq(buf_msg(buf)))) { struct tipc_msg *msg; - bcbuf_set_acks(buf, bclink->bcast_nodes.count); + bcbuf_set_acks(buf, tipc_bcast_nmap.count); msg = buf_msg(buf); msg_set_non_seq(msg, 1); msg_set_mc_netid(msg, tipc_net_id); bcl->stats.sent_info++; - if (WARN_ON(!bclink->bcast_nodes.count)) { + if (WARN_ON(!tipc_bcast_nmap.count)) { dump_stack(); return 0; } @@ -633,7 +586,7 @@ static int tipc_bcbearer_send(struct sk_buff *buf, /* Send buffer over bearers until all targets reached */ - bcbearer->remains = bclink->bcast_nodes; + bcbearer->remains = tipc_bcast_nmap; for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) { struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary; @@ -677,8 +630,8 @@ static int tipc_bcbearer_send(struct sk_buff *buf, void tipc_bcbearer_sort(void) { - struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp; - struct tipc_bcbearer_pair *bp_curr; + struct bcbearer_pair *bp_temp = bcbearer->bpairs_temp; + struct bcbearer_pair *bp_curr; int b_index; int pri; @@ -799,13 +752,25 @@ int tipc_bclink_set_queue_limits(u32 limit) return 0; } -void tipc_bclink_init(void) +int tipc_bclink_init(void) { + bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC); + bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC); + if (!bcbearer || !bclink) { + warn("Broadcast link creation failed, no memory\n"); + kfree(bcbearer); + bcbearer = NULL; + kfree(bclink); + bclink = NULL; + return -ENOMEM; + } + INIT_LIST_HEAD(&bcbearer->bearer.cong_links); bcbearer->bearer.media = &bcbearer->media; bcbearer->media.send_msg = tipc_bcbearer_send; sprintf(bcbearer->media.name, "tipc-broadcast"); + bcl = &bclink->link; INIT_LIST_HEAD(&bcl->waiting_ports); bcl->next_out_no = 1; spin_lock_init(&bclink->node.lock); @@ -815,16 +780,22 @@ void tipc_bclink_init(void) bcl->b_ptr = &bcbearer->bearer; bcl->state = WORKING_WORKING; strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME); + + return 0; } void tipc_bclink_stop(void) { spin_lock_bh(&bc_lock); - tipc_link_stop(bcl); + if (bcbearer) { + tipc_link_stop(bcl); + bcl = NULL; + kfree(bclink); + bclink = NULL; + kfree(bcbearer); + bcbearer = NULL; + } spin_unlock_bh(&bc_lock); - - memset(bclink, 0, sizeof(*bclink)); - memset(bcbearer, 0, sizeof(*bcbearer)); } @@ -893,9 +864,9 @@ static void tipc_nmap_diff(struct tipc_node_map *nm_a, * tipc_port_list_add - add a port to a port list, ensuring no duplicates */ -void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port) +void tipc_port_list_add(struct port_list *pl_ptr, u32 port) { - struct tipc_port_list *item = pl_ptr; + struct port_list *item = pl_ptr; int i; int item_sz = PLSIZE; int cnt = pl_ptr->count; @@ -927,10 +898,10 @@ void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port) * */ -void tipc_port_list_free(struct tipc_port_list *pl_ptr) +void tipc_port_list_free(struct port_list *pl_ptr) { - struct tipc_port_list *item; - struct tipc_port_list *next; + struct port_list *item; + struct port_list *next; for (item = pl_ptr->next; item; item = next) { next = item->next; diff --git a/trunk/net/tipc/bcast.h b/trunk/net/tipc/bcast.h index b009666c60b0..06740da5ae61 100644 --- a/trunk/net/tipc/bcast.h +++ b/trunk/net/tipc/bcast.h @@ -51,18 +51,20 @@ struct tipc_node_map { u32 map[MAX_NODES / WSIZE]; }; +extern struct tipc_node_map tipc_bcast_nmap; + #define PLSIZE 32 /** - * struct tipc_port_list - set of node local destination ports + * struct port_list - set of node local destination ports * @count: # of ports in set (only valid for first entry in list) * @next: pointer to next entry in list * @ports: array of port references */ -struct tipc_port_list { +struct port_list { int count; - struct tipc_port_list *next; + struct port_list *next; u32 ports[PLSIZE]; }; @@ -83,13 +85,11 @@ static inline int tipc_nmap_equal(struct tipc_node_map *nm_a, struct tipc_node_m return !memcmp(nm_a, nm_b, sizeof(*nm_a)); } -void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port); -void tipc_port_list_free(struct tipc_port_list *pl_ptr); +void tipc_port_list_add(struct port_list *pl_ptr, u32 port); +void tipc_port_list_free(struct port_list *pl_ptr); -void tipc_bclink_init(void); +int tipc_bclink_init(void); void tipc_bclink_stop(void); -void tipc_bclink_add_node(u32 addr); -void tipc_bclink_remove_node(u32 addr); struct tipc_node *tipc_bclink_retransmit_to(void); void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked); int tipc_bclink_send_msg(struct sk_buff *buf); diff --git a/trunk/net/tipc/bearer.c b/trunk/net/tipc/bearer.c index 329fb659fae4..e2202de3d93e 100644 --- a/trunk/net/tipc/bearer.c +++ b/trunk/net/tipc/bearer.c @@ -41,7 +41,7 @@ #define MAX_ADDR_STR 32 -static struct tipc_media *media_list[MAX_MEDIA]; +static struct media media_list[MAX_MEDIA]; static u32 media_count; struct tipc_bearer tipc_bearers[MAX_BEARERS]; @@ -65,31 +65,17 @@ static int media_name_valid(const char *name) } /** - * tipc_media_find - locates specified media object by name + * media_find - locates specified media object by name */ -struct tipc_media *tipc_media_find(const char *name) +static struct media *media_find(const char *name) { + struct media *m_ptr; u32 i; - for (i = 0; i < media_count; i++) { - if (!strcmp(media_list[i]->name, name)) - return media_list[i]; - } - return NULL; -} - -/** - * media_find_id - locates specified media object by type identifier - */ - -static struct tipc_media *media_find_id(u8 type) -{ - u32 i; - - for (i = 0; i < media_count; i++) { - if (media_list[i]->type_id == type) - return media_list[i]; + for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) { + if (!strcmp(m_ptr->name, name)) + return m_ptr; } return NULL; } @@ -100,34 +86,87 @@ static struct tipc_media *media_find_id(u8 type) * Bearers for this media type must be activated separately at a later stage. */ -int tipc_register_media(struct tipc_media *m_ptr) +int tipc_register_media(u32 media_type, + char *name, + int (*enable)(struct tipc_bearer *), + void (*disable)(struct tipc_bearer *), + int (*send_msg)(struct sk_buff *, + struct tipc_bearer *, + struct tipc_media_addr *), + char *(*addr2str)(struct tipc_media_addr *a, + char *str_buf, int str_size), + struct tipc_media_addr *bcast_addr, + const u32 bearer_priority, + const u32 link_tolerance, /* [ms] */ + const u32 send_window_limit) { + struct media *m_ptr; + u32 media_id; + u32 i; int res = -EINVAL; write_lock_bh(&tipc_net_lock); - if (!media_name_valid(m_ptr->name)) + if (tipc_mode != TIPC_NET_MODE) { + warn("Media <%s> rejected, not in networked mode yet\n", name); goto exit; - if ((m_ptr->bcast_addr.media_id != m_ptr->type_id) || - !m_ptr->bcast_addr.broadcast) + } + if (!media_name_valid(name)) { + warn("Media <%s> rejected, illegal name\n", name); goto exit; - if (m_ptr->priority > TIPC_MAX_LINK_PRI) + } + if (!bcast_addr) { + warn("Media <%s> rejected, no broadcast address\n", name); goto exit; - if ((m_ptr->tolerance < TIPC_MIN_LINK_TOL) || - (m_ptr->tolerance > TIPC_MAX_LINK_TOL)) + } + if ((bearer_priority < TIPC_MIN_LINK_PRI) || + (bearer_priority > TIPC_MAX_LINK_PRI)) { + warn("Media <%s> rejected, illegal priority (%u)\n", name, + bearer_priority); goto exit; - if (media_count >= MAX_MEDIA) + } + if ((link_tolerance < TIPC_MIN_LINK_TOL) || + (link_tolerance > TIPC_MAX_LINK_TOL)) { + warn("Media <%s> rejected, illegal tolerance (%u)\n", name, + link_tolerance); goto exit; - if (tipc_media_find(m_ptr->name) || media_find_id(m_ptr->type_id)) + } + + media_id = media_count++; + if (media_id >= MAX_MEDIA) { + warn("Media <%s> rejected, media limit reached (%u)\n", name, + MAX_MEDIA); + media_count--; goto exit; + } + for (i = 0; i < media_id; i++) { + if (media_list[i].type_id == media_type) { + warn("Media <%s> rejected, duplicate type (%u)\n", name, + media_type); + media_count--; + goto exit; + } + if (!strcmp(name, media_list[i].name)) { + warn("Media <%s> rejected, duplicate name\n", name); + media_count--; + goto exit; + } + } - media_list[media_count] = m_ptr; - media_count++; + m_ptr = &media_list[media_id]; + m_ptr->type_id = media_type; + m_ptr->send_msg = send_msg; + m_ptr->enable_bearer = enable; + m_ptr->disable_bearer = disable; + m_ptr->addr2str = addr2str; + memcpy(&m_ptr->bcast_addr, bcast_addr, sizeof(*bcast_addr)); + strcpy(m_ptr->name, name); + m_ptr->priority = bearer_priority; + m_ptr->tolerance = link_tolerance; + m_ptr->window = send_window_limit; res = 0; exit: write_unlock_bh(&tipc_net_lock); - if (res) - warn("Media <%s> registration error\n", m_ptr->name); return res; } @@ -137,19 +176,27 @@ int tipc_register_media(struct tipc_media *m_ptr) void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a) { - char addr_str[MAX_ADDR_STR]; - struct tipc_media *m_ptr; + struct media *m_ptr; + u32 media_type; + u32 i; - m_ptr = media_find_id(a->media_id); + media_type = ntohl(a->type); + for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) { + if (m_ptr->type_id == media_type) + break; + } + + if ((i < media_count) && (m_ptr->addr2str != NULL)) { + char addr_str[MAX_ADDR_STR]; - if (m_ptr && !m_ptr->addr2str(a, addr_str, sizeof(addr_str))) - tipc_printf(pb, "%s(%s)", m_ptr->name, addr_str); - else { - u32 i; + tipc_printf(pb, "%s(%s)", m_ptr->name, + m_ptr->addr2str(a, addr_str, sizeof(addr_str))); + } else { + unchar *addr = (unchar *)&a->dev_addr; - tipc_printf(pb, "UNKNOWN(%u)", a->media_id); - for (i = 0; i < sizeof(a->value); i++) - tipc_printf(pb, "-%02x", a->value[i]); + tipc_printf(pb, "UNKNOWN(%u)", media_type); + for (i = 0; i < (sizeof(*a) - sizeof(a->type)); i++) + tipc_printf(pb, "-%02x", addr[i]); } } @@ -160,6 +207,7 @@ void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a) struct sk_buff *tipc_media_get_names(void) { struct sk_buff *buf; + struct media *m_ptr; int i; buf = tipc_cfg_reply_alloc(MAX_MEDIA * TLV_SPACE(TIPC_MAX_MEDIA_NAME)); @@ -167,10 +215,9 @@ struct sk_buff *tipc_media_get_names(void) return NULL; read_lock_bh(&tipc_net_lock); - for (i = 0; i < media_count; i++) { - tipc_cfg_append_tlv(buf, TIPC_TLV_MEDIA_NAME, - media_list[i]->name, - strlen(media_list[i]->name) + 1); + for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) { + tipc_cfg_append_tlv(buf, TIPC_TLV_MEDIA_NAME, m_ptr->name, + strlen(m_ptr->name) + 1); } read_unlock_bh(&tipc_net_lock); return buf; @@ -185,7 +232,7 @@ struct sk_buff *tipc_media_get_names(void) */ static int bearer_name_validate(const char *name, - struct tipc_bearer_names *name_parts) + struct bearer_name *name_parts) { char name_copy[TIPC_MAX_BEARER_NAME]; char *media_name; @@ -229,10 +276,10 @@ static int bearer_name_validate(const char *name, } /** - * tipc_bearer_find - locates bearer object with matching bearer name + * bearer_find - locates bearer object with matching bearer name */ -struct tipc_bearer *tipc_bearer_find(const char *name) +static struct tipc_bearer *bearer_find(const char *name) { struct tipc_bearer *b_ptr; u32 i; @@ -271,6 +318,7 @@ struct tipc_bearer *tipc_bearer_find_interface(const char *if_name) struct sk_buff *tipc_bearer_get_names(void) { struct sk_buff *buf; + struct media *m_ptr; struct tipc_bearer *b_ptr; int i, j; @@ -279,10 +327,10 @@ struct sk_buff *tipc_bearer_get_names(void) return NULL; read_lock_bh(&tipc_net_lock); - for (i = 0; i < media_count; i++) { + for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) { for (j = 0; j < MAX_BEARERS; j++) { b_ptr = &tipc_bearers[j]; - if (b_ptr->active && (b_ptr->media == media_list[i])) { + if (b_ptr->active && (b_ptr->media == m_ptr)) { tipc_cfg_append_tlv(buf, TIPC_TLV_BEARER_NAME, b_ptr->name, strlen(b_ptr->name) + 1); @@ -318,7 +366,7 @@ void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest) static int bearer_push(struct tipc_bearer *b_ptr) { u32 res = 0; - struct tipc_link *ln, *tln; + struct link *ln, *tln; if (b_ptr->blocked) return 0; @@ -364,8 +412,7 @@ void tipc_continue(struct tipc_bearer *b_ptr) * bearer.lock is busy */ -static void tipc_bearer_schedule_unlocked(struct tipc_bearer *b_ptr, - struct tipc_link *l_ptr) +static void tipc_bearer_schedule_unlocked(struct tipc_bearer *b_ptr, struct link *l_ptr) { list_move_tail(&l_ptr->link_list, &b_ptr->cong_links); } @@ -378,7 +425,7 @@ static void tipc_bearer_schedule_unlocked(struct tipc_bearer *b_ptr, * bearer.lock is free */ -void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr) +void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct link *l_ptr) { spin_lock_bh(&b_ptr->lock); tipc_bearer_schedule_unlocked(b_ptr, l_ptr); @@ -391,8 +438,7 @@ void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr) * and if there is, try to resolve it before returning. * 'tipc_net_lock' is read_locked when this function is called */ -int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr, - struct tipc_link *l_ptr) +int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr, struct link *l_ptr) { int res = 1; @@ -411,7 +457,7 @@ int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr, * tipc_bearer_congested - determines if bearer is currently congested */ -int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr) +int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct link *l_ptr) { if (unlikely(b_ptr->blocked)) return 1; @@ -427,8 +473,8 @@ int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr) int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority) { struct tipc_bearer *b_ptr; - struct tipc_media *m_ptr; - struct tipc_bearer_names b_names; + struct media *m_ptr; + struct bearer_name b_name; char addr_string[16]; u32 bearer_id; u32 with_this_prio; @@ -440,7 +486,7 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority) name); return -ENOPROTOOPT; } - if (!bearer_name_validate(name, &b_names)) { + if (!bearer_name_validate(name, &b_name)) { warn("Bearer <%s> rejected, illegal name\n", name); return -EINVAL; } @@ -465,10 +511,10 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority) write_lock_bh(&tipc_net_lock); - m_ptr = tipc_media_find(b_names.media_name); + m_ptr = media_find(b_name.media_name); if (!m_ptr) { warn("Bearer <%s> rejected, media <%s> not registered\n", name, - b_names.media_name); + b_name.media_name); goto exit; } @@ -515,8 +561,6 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority) b_ptr->identity = bearer_id; b_ptr->media = m_ptr; - b_ptr->tolerance = m_ptr->tolerance; - b_ptr->window = m_ptr->window; b_ptr->net_plane = bearer_id + 'A'; b_ptr->active = 1; b_ptr->priority = priority; @@ -546,11 +590,11 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority) int tipc_block_bearer(const char *name) { struct tipc_bearer *b_ptr = NULL; - struct tipc_link *l_ptr; - struct tipc_link *temp_l_ptr; + struct link *l_ptr; + struct link *temp_l_ptr; read_lock_bh(&tipc_net_lock); - b_ptr = tipc_bearer_find(name); + b_ptr = bearer_find(name); if (!b_ptr) { warn("Attempt to block unknown bearer <%s>\n", name); read_unlock_bh(&tipc_net_lock); @@ -581,8 +625,8 @@ int tipc_block_bearer(const char *name) static void bearer_disable(struct tipc_bearer *b_ptr) { - struct tipc_link *l_ptr; - struct tipc_link *temp_l_ptr; + struct link *l_ptr; + struct link *temp_l_ptr; info("Disabling bearer <%s>\n", b_ptr->name); spin_lock_bh(&b_ptr->lock); @@ -604,7 +648,7 @@ int tipc_disable_bearer(const char *name) int res; write_lock_bh(&tipc_net_lock); - b_ptr = tipc_bearer_find(name); + b_ptr = bearer_find(name); if (b_ptr == NULL) { warn("Attempt to disable unknown bearer <%s>\n", name); res = -EINVAL; diff --git a/trunk/net/tipc/bearer.h b/trunk/net/tipc/bearer.h index d3eac56b8c21..d696f9e414e3 100644 --- a/trunk/net/tipc/bearer.h +++ b/trunk/net/tipc/bearer.h @@ -42,46 +42,33 @@ #define MAX_BEARERS 2 #define MAX_MEDIA 2 -/* - * Identifiers associated with TIPC message header media address info - * - * - address info field is 20 bytes long - * - media type identifier located at offset 3 - * - remaining bytes vary according to media type - */ - -#define TIPC_MEDIA_ADDR_SIZE 20 -#define TIPC_MEDIA_TYPE_OFFSET 3 - /* * Identifiers of supported TIPC media types */ #define TIPC_MEDIA_TYPE_ETH 1 /* - * struct tipc_media_addr - destination address used by TIPC bearers - * @value: address info (format defined by media) - * @media_id: TIPC media type identifier - * @broadcast: non-zero if address is a broadcast address + * Destination address structure used by TIPC bearers when sending messages + * + * IMPORTANT: The fields of this structure MUST be stored using the specified + * byte order indicated below, as the structure is exchanged between nodes + * as part of a link setup process. */ - struct tipc_media_addr { - u8 value[TIPC_MEDIA_ADDR_SIZE]; - u8 media_id; - u8 broadcast; + __be32 type; /* bearer type (network byte order) */ + union { + __u8 eth_addr[6]; /* 48 bit Ethernet addr (byte array) */ + } dev_addr; }; struct tipc_bearer; /** - * struct tipc_media - TIPC media information available to internal users + * struct media - TIPC media information available to internal users * @send_msg: routine which handles buffer transmission * @enable_bearer: routine which enables a bearer * @disable_bearer: routine which disables a bearer - * @addr2str: routine which converts media address to string - * @str2addr: routine which converts media address from string - * @addr2msg: routine which converts media address to protocol message area - * @msg2addr: routine which converts media address from protocol message area + * @addr2str: routine which converts bearer's address to string form * @bcast_addr: media address used in broadcasting * @priority: default link (and bearer) priority * @tolerance: default time (in ms) before declaring link failure @@ -90,16 +77,14 @@ struct tipc_bearer; * @name: media name */ -struct tipc_media { +struct media { int (*send_msg)(struct sk_buff *buf, struct tipc_bearer *b_ptr, struct tipc_media_addr *dest); int (*enable_bearer)(struct tipc_bearer *b_ptr); void (*disable_bearer)(struct tipc_bearer *b_ptr); - int (*addr2str)(struct tipc_media_addr *a, char *str_buf, int str_size); - int (*str2addr)(struct tipc_media_addr *a, char *str_buf); - int (*addr2msg)(struct tipc_media_addr *a, char *msg_area); - int (*msg2addr)(struct tipc_media_addr *a, char *msg_area); + char *(*addr2str)(struct tipc_media_addr *a, + char *str_buf, int str_size); struct tipc_media_addr bcast_addr; u32 priority; u32 tolerance; @@ -118,8 +103,6 @@ struct tipc_media { * @name: bearer name (format = media:interface) * @media: ptr to media structure associated with bearer * @priority: default link priority for bearer - * @window: default window size for bearer - * @tolerance: default link tolerance for bearer * @identity: array index of this bearer within TIPC bearer array * @link_req: ptr to (optional) structure making periodic link setup requests * @links: list of non-congested links associated with bearer @@ -139,12 +122,10 @@ struct tipc_bearer { struct tipc_media_addr addr; /* initalized by media */ char name[TIPC_MAX_BEARER_NAME]; spinlock_t lock; - struct tipc_media *media; + struct media *media; u32 priority; - u32 window; - u32 tolerance; u32 identity; - struct tipc_link_req *link_req; + struct link_req *link_req; struct list_head links; struct list_head cong_links; int active; @@ -152,19 +133,28 @@ struct tipc_bearer { struct tipc_node_map nodes; }; -struct tipc_bearer_names { +struct bearer_name { char media_name[TIPC_MAX_MEDIA_NAME]; char if_name[TIPC_MAX_IF_NAME]; }; -struct tipc_link; +struct link; extern struct tipc_bearer tipc_bearers[]; /* * TIPC routines available to supported media types */ -int tipc_register_media(struct tipc_media *m_ptr); +int tipc_register_media(u32 media_type, + char *media_name, int (*enable)(struct tipc_bearer *), + void (*disable)(struct tipc_bearer *), + int (*send_msg)(struct sk_buff *, + struct tipc_bearer *, struct tipc_media_addr *), + char *(*addr2str)(struct tipc_media_addr *a, + char *str_buf, int str_size), + struct tipc_media_addr *bcast_addr, const u32 bearer_priority, + const u32 link_tolerance, /* [ms] */ + const u32 send_window_limit); void tipc_recv_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr); @@ -180,21 +170,16 @@ int tipc_disable_bearer(const char *name); int tipc_eth_media_start(void); void tipc_eth_media_stop(void); -int tipc_media_set_priority(const char *name, u32 new_value); -int tipc_media_set_window(const char *name, u32 new_value); void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a); struct sk_buff *tipc_media_get_names(void); struct sk_buff *tipc_bearer_get_names(void); void tipc_bearer_add_dest(struct tipc_bearer *b_ptr, u32 dest); void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest); -void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr); -struct tipc_bearer *tipc_bearer_find(const char *name); +void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct link *l_ptr); struct tipc_bearer *tipc_bearer_find_interface(const char *if_name); -struct tipc_media *tipc_media_find(const char *name); -int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr, - struct tipc_link *l_ptr); -int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr); +int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr, struct link *l_ptr); +int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct link *l_ptr); void tipc_bearer_stop(void); void tipc_bearer_lock_push(struct tipc_bearer *b_ptr); diff --git a/trunk/net/tipc/config.c b/trunk/net/tipc/config.c index 4785bf26cdf4..b25a396b7e1e 100644 --- a/trunk/net/tipc/config.c +++ b/trunk/net/tipc/config.c @@ -184,12 +184,13 @@ static struct sk_buff *cfg_set_own_addr(void) " (cannot change node address once assigned)"); /* - * Must temporarily release configuration spinlock while switching into - * networking mode as it calls tipc_eth_media_start(), which may sleep. - * Releasing the lock is harmless as other locally-issued configuration - * commands won't occur until this one completes, and remotely-issued - * configuration commands can't be received until a local configuration - * command to enable the first bearer is received and processed. + * Must release all spinlocks before calling start_net() because + * Linux version of TIPC calls eth_media_start() which calls + * register_netdevice_notifier() which may block! + * + * Temporarily releasing the lock should be harmless for non-Linux TIPC, + * but Linux version of eth_media_start() should really be reworked + * so that it can be called with spinlocks held. */ spin_unlock_bh(&config_lock); diff --git a/trunk/net/tipc/core.c b/trunk/net/tipc/core.c index 2691cd57b8a8..c21331d58fdb 100644 --- a/trunk/net/tipc/core.c +++ b/trunk/net/tipc/core.c @@ -99,8 +99,8 @@ struct sk_buff *tipc_buf_acquire(u32 size) static void tipc_core_stop_net(void) { - tipc_net_stop(); tipc_eth_media_stop(); + tipc_net_stop(); } /** diff --git a/trunk/net/tipc/discover.c b/trunk/net/tipc/discover.c index a00e5f811569..f2fb96e86ee8 100644 --- a/trunk/net/tipc/discover.c +++ b/trunk/net/tipc/discover.c @@ -45,7 +45,7 @@ /** - * struct tipc_link_req - information about an ongoing link setup request + * struct link_req - information about an ongoing link setup request * @bearer: bearer issuing requests * @dest: destination address for request messages * @domain: network domain to which links can be established @@ -54,7 +54,7 @@ * @timer: timer governing period between requests * @timer_intv: current interval between requests (in ms) */ -struct tipc_link_req { +struct link_req { struct tipc_bearer *bearer; struct tipc_media_addr dest; u32 domain; @@ -84,7 +84,7 @@ static struct sk_buff *tipc_disc_init_msg(u32 type, msg_set_non_seq(msg, 1); msg_set_dest_domain(msg, dest_domain); msg_set_bc_netid(msg, tipc_net_id); - b_ptr->media->addr2msg(&b_ptr->addr, msg_media_addr(msg)); + msg_set_media_addr(msg, &b_ptr->addr); } return buf; } @@ -120,7 +120,7 @@ static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr, void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr) { struct tipc_node *n_ptr; - struct tipc_link *link; + struct link *link; struct tipc_media_addr media_addr, *addr; struct sk_buff *rbuf; struct tipc_msg *msg = buf_msg(buf); @@ -130,15 +130,12 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr) u32 type = msg_type(msg); int link_fully_up; - media_addr.broadcast = 1; - b_ptr->media->msg2addr(&media_addr, msg_media_addr(msg)); + msg_get_media_addr(msg, &media_addr); buf_discard(buf); /* Validate discovery message from requesting node */ if (net_id != tipc_net_id) return; - if (media_addr.broadcast) - return; if (!tipc_addr_domain_valid(dest)) return; if (!tipc_addr_node_valid(orig)) @@ -218,7 +215,7 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr) * and is either not currently searching or is searching at a slow rate */ -static void disc_update(struct tipc_link_req *req) +static void disc_update(struct link_req *req) { if (!req->num_nodes) { if ((req->timer_intv == TIPC_LINK_REQ_INACTIVE) || @@ -234,7 +231,7 @@ static void disc_update(struct tipc_link_req *req) * @req: ptr to link request structure */ -void tipc_disc_add_dest(struct tipc_link_req *req) +void tipc_disc_add_dest(struct link_req *req) { req->num_nodes++; } @@ -244,7 +241,7 @@ void tipc_disc_add_dest(struct tipc_link_req *req) * @req: ptr to link request structure */ -void tipc_disc_remove_dest(struct tipc_link_req *req) +void tipc_disc_remove_dest(struct link_req *req) { req->num_nodes--; disc_update(req); @@ -255,7 +252,7 @@ void tipc_disc_remove_dest(struct tipc_link_req *req) * @req: ptr to link request structure */ -static void disc_send_msg(struct tipc_link_req *req) +static void disc_send_msg(struct link_req *req) { if (!req->bearer->blocked) tipc_bearer_send(req->bearer, req->buf, &req->dest); @@ -268,7 +265,7 @@ static void disc_send_msg(struct tipc_link_req *req) * Called whenever a link setup request timer associated with a bearer expires. */ -static void disc_timeout(struct tipc_link_req *req) +static void disc_timeout(struct link_req *req) { int max_delay; @@ -316,7 +313,7 @@ static void disc_timeout(struct tipc_link_req *req) int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest, u32 dest_domain) { - struct tipc_link_req *req; + struct link_req *req; req = kmalloc(sizeof(*req), GFP_ATOMIC); if (!req) @@ -345,7 +342,7 @@ int tipc_disc_create(struct tipc_bearer *b_ptr, * @req: ptr to link request structure */ -void tipc_disc_delete(struct tipc_link_req *req) +void tipc_disc_delete(struct link_req *req) { k_cancel_timer(&req->timer); k_term_timer(&req->timer); diff --git a/trunk/net/tipc/discover.h b/trunk/net/tipc/discover.h index 75b67c403aa3..a3af595b86cb 100644 --- a/trunk/net/tipc/discover.h +++ b/trunk/net/tipc/discover.h @@ -37,13 +37,13 @@ #ifndef _TIPC_DISCOVER_H #define _TIPC_DISCOVER_H -struct tipc_link_req; +struct link_req; int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest, u32 dest_domain); -void tipc_disc_delete(struct tipc_link_req *req); -void tipc_disc_add_dest(struct tipc_link_req *req); -void tipc_disc_remove_dest(struct tipc_link_req *req); +void tipc_disc_delete(struct link_req *req); +void tipc_disc_add_dest(struct link_req *req); +void tipc_disc_remove_dest(struct link_req *req); void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr); #endif diff --git a/trunk/net/tipc/eth_media.c b/trunk/net/tipc/eth_media.c index 527e3f0e165d..e728d4ce2a1b 100644 --- a/trunk/net/tipc/eth_media.c +++ b/trunk/net/tipc/eth_media.c @@ -38,44 +38,27 @@ #include "bearer.h" #define MAX_ETH_BEARERS MAX_BEARERS - -#define ETH_ADDR_OFFSET 4 /* message header offset of MAC address */ +#define ETH_LINK_PRIORITY TIPC_DEF_LINK_PRI +#define ETH_LINK_TOLERANCE TIPC_DEF_LINK_TOL +#define ETH_LINK_WINDOW TIPC_DEF_LINK_WIN /** * struct eth_bearer - Ethernet bearer data structure * @bearer: ptr to associated "generic" bearer structure * @dev: ptr to associated Ethernet network device * @tipc_packet_type: used in binding TIPC to Ethernet driver - * @cleanup: work item used when disabling bearer */ struct eth_bearer { struct tipc_bearer *bearer; struct net_device *dev; struct packet_type tipc_packet_type; - struct work_struct cleanup; }; -static struct tipc_media eth_media_info; static struct eth_bearer eth_bearers[MAX_ETH_BEARERS]; static int eth_started; static struct notifier_block notifier; -/** - * eth_media_addr_set - initialize Ethernet media address structure - * - * Media-dependent "value" field stores MAC address in first 6 bytes - * and zeroes out the remaining bytes. - */ - -static void eth_media_addr_set(struct tipc_media_addr *a, char *mac) -{ - memcpy(a->value, mac, ETH_ALEN); - memset(a->value + ETH_ALEN, 0, sizeof(a->value) - ETH_ALEN); - a->media_id = TIPC_MEDIA_TYPE_ETH; - a->broadcast = !memcmp(mac, eth_media_info.bcast_addr.value, ETH_ALEN); -} - /** * send_msg - send a TIPC message out over an Ethernet interface */ @@ -102,7 +85,7 @@ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr, skb_reset_network_header(clone); clone->dev = dev; - dev_hard_header(clone, dev, ETH_P_TIPC, dest->value, + dev_hard_header(clone, dev, ETH_P_TIPC, &dest->dev_addr.eth_addr, dev->dev_addr, clone->len); dev_queue_xmit(clone); return 0; @@ -189,41 +172,22 @@ static int enable_bearer(struct tipc_bearer *tb_ptr) tb_ptr->usr_handle = (void *)eb_ptr; tb_ptr->mtu = dev->mtu; tb_ptr->blocked = 0; - eth_media_addr_set(&tb_ptr->addr, (char *)dev->dev_addr); + tb_ptr->addr.type = htonl(TIPC_MEDIA_TYPE_ETH); + memcpy(&tb_ptr->addr.dev_addr, dev->dev_addr, ETH_ALEN); return 0; } -/** - * cleanup_bearer - break association between Ethernet bearer and interface - * - * This routine must be invoked from a work queue because it can sleep. - */ - -static void cleanup_bearer(struct work_struct *work) -{ - struct eth_bearer *eb_ptr = - container_of(work, struct eth_bearer, cleanup); - - dev_remove_pack(&eb_ptr->tipc_packet_type); - dev_put(eb_ptr->dev); - eb_ptr->dev = NULL; -} - /** * disable_bearer - detach TIPC bearer from an Ethernet interface * - * Mark Ethernet bearer as inactive so that incoming buffers are thrown away, - * then get worker thread to complete bearer cleanup. (Can't do cleanup - * here because cleanup code needs to sleep and caller holds spinlocks.) + * We really should do dev_remove_pack() here, but this function can not be + * called at tasklet level. => Use eth_bearer->bearer as a flag to throw away + * incoming buffers, & postpone dev_remove_pack() to eth_media_stop() on exit. */ static void disable_bearer(struct tipc_bearer *tb_ptr) { - struct eth_bearer *eb_ptr = (struct eth_bearer *)tb_ptr->usr_handle; - - eb_ptr->bearer = NULL; - INIT_WORK(&eb_ptr->cleanup, cleanup_bearer); - schedule_work(&eb_ptr->cleanup); + ((struct eth_bearer *)tb_ptr->usr_handle)->bearer = NULL; } /** @@ -282,81 +246,17 @@ static int recv_notification(struct notifier_block *nb, unsigned long evt, * eth_addr2str - convert Ethernet address to string */ -static int eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size) -{ - if (str_size < 18) /* 18 = strlen("aa:bb:cc:dd:ee:ff\0") */ - return 1; - - sprintf(str_buf, "%pM", a->value); - return 0; -} - -/** - * eth_str2addr - convert string to Ethernet address - */ - -static int eth_str2addr(struct tipc_media_addr *a, char *str_buf) -{ - char mac[ETH_ALEN]; - int r; - - r = sscanf(str_buf, "%02x:%02x:%02x:%02x:%02x:%02x", - (u32 *)&mac[0], (u32 *)&mac[1], (u32 *)&mac[2], - (u32 *)&mac[3], (u32 *)&mac[4], (u32 *)&mac[5]); - - if (r != ETH_ALEN) - return 1; - - eth_media_addr_set(a, mac); - return 0; -} - -/** - * eth_str2addr - convert Ethernet address format to message header format - */ - -static int eth_addr2msg(struct tipc_media_addr *a, char *msg_area) +static char *eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size) { - memset(msg_area, 0, TIPC_MEDIA_ADDR_SIZE); - msg_area[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_ETH; - memcpy(msg_area + ETH_ADDR_OFFSET, a->value, ETH_ALEN); - return 0; -} + unchar *addr = (unchar *)&a->dev_addr; -/** - * eth_str2addr - convert message header address format to Ethernet format - */ - -static int eth_msg2addr(struct tipc_media_addr *a, char *msg_area) -{ - if (msg_area[TIPC_MEDIA_TYPE_OFFSET] != TIPC_MEDIA_TYPE_ETH) - return 1; - - eth_media_addr_set(a, msg_area + ETH_ADDR_OFFSET); - return 0; + if (str_size < 18) + *str_buf = '\0'; + else + sprintf(str_buf, "%pM", addr); + return str_buf; } -/* - * Ethernet media registration info - */ - -static struct tipc_media eth_media_info = { - .send_msg = send_msg, - .enable_bearer = enable_bearer, - .disable_bearer = disable_bearer, - .addr2str = eth_addr2str, - .str2addr = eth_str2addr, - .addr2msg = eth_addr2msg, - .msg2addr = eth_msg2addr, - .bcast_addr = { { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, - TIPC_MEDIA_TYPE_ETH, 1 }, - .priority = TIPC_DEF_LINK_PRI, - .tolerance = TIPC_DEF_LINK_TOL, - .window = TIPC_DEF_LINK_WIN, - .type_id = TIPC_MEDIA_TYPE_ETH, - .name = "eth" -}; - /** * tipc_eth_media_start - activate Ethernet bearer support * @@ -366,12 +266,21 @@ static struct tipc_media eth_media_info = { int tipc_eth_media_start(void) { + struct tipc_media_addr bcast_addr; int res; if (eth_started) return -EINVAL; - res = tipc_register_media(ð_media_info); + bcast_addr.type = htonl(TIPC_MEDIA_TYPE_ETH); + memset(&bcast_addr.dev_addr, 0xff, ETH_ALEN); + + memset(eth_bearers, 0, sizeof(eth_bearers)); + + res = tipc_register_media(TIPC_MEDIA_TYPE_ETH, "eth", + enable_bearer, disable_bearer, send_msg, + eth_addr2str, &bcast_addr, ETH_LINK_PRIORITY, + ETH_LINK_TOLERANCE, ETH_LINK_WINDOW); if (res) return res; @@ -389,10 +298,22 @@ int tipc_eth_media_start(void) void tipc_eth_media_stop(void) { + int i; + if (!eth_started) return; - flush_scheduled_work(); unregister_netdevice_notifier(¬ifier); + for (i = 0; i < MAX_ETH_BEARERS ; i++) { + if (eth_bearers[i].bearer) { + eth_bearers[i].bearer->blocked = 1; + eth_bearers[i].bearer = NULL; + } + if (eth_bearers[i].dev) { + dev_remove_pack(ð_bearers[i].tipc_packet_type); + dev_put(eth_bearers[i].dev); + } + } + memset(ð_bearers, 0, sizeof(eth_bearers)); eth_started = 0; } diff --git a/trunk/net/tipc/link.c b/trunk/net/tipc/link.c index ac1832a66f8a..ae98a72da11a 100644 --- a/trunk/net/tipc/link.c +++ b/trunk/net/tipc/link.c @@ -71,36 +71,35 @@ #define START_CHANGEOVER 100000u /** - * struct tipc_link_name - deconstructed link name + * struct link_name - deconstructed link name * @addr_local: network address of node at this end * @if_local: name of interface at this end * @addr_peer: network address of node at far end * @if_peer: name of interface at far end */ -struct tipc_link_name { +struct link_name { u32 addr_local; char if_local[TIPC_MAX_IF_NAME]; u32 addr_peer; char if_peer[TIPC_MAX_IF_NAME]; }; -static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, +static void link_handle_out_of_seq_msg(struct link *l_ptr, struct sk_buff *buf); -static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf); -static int link_recv_changeover_msg(struct tipc_link **l_ptr, - struct sk_buff **buf); -static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance); +static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf); +static int link_recv_changeover_msg(struct link **l_ptr, struct sk_buff **buf); +static void link_set_supervision_props(struct link *l_ptr, u32 tolerance); static int link_send_sections_long(struct tipc_port *sender, struct iovec const *msg_sect, u32 num_sect, unsigned int total_len, u32 destnode); -static void link_check_defragm_bufs(struct tipc_link *l_ptr); -static void link_state_event(struct tipc_link *l_ptr, u32 event); -static void link_reset_statistics(struct tipc_link *l_ptr); -static void link_print(struct tipc_link *l_ptr, const char *str); -static void link_start(struct tipc_link *l_ptr); -static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf); +static void link_check_defragm_bufs(struct link *l_ptr); +static void link_state_event(struct link *l_ptr, u32 event); +static void link_reset_statistics(struct link *l_ptr); +static void link_print(struct link *l_ptr, const char *str); +static void link_start(struct link *l_ptr); +static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf); /* * Simple link routines @@ -111,7 +110,7 @@ static unsigned int align(unsigned int i) return (i + 3) & ~3u; } -static void link_init_max_pkt(struct tipc_link *l_ptr) +static void link_init_max_pkt(struct link *l_ptr) { u32 max_pkt; @@ -128,14 +127,14 @@ static void link_init_max_pkt(struct tipc_link *l_ptr) l_ptr->max_pkt_probes = 0; } -static u32 link_next_sent(struct tipc_link *l_ptr) +static u32 link_next_sent(struct link *l_ptr) { if (l_ptr->next_out) - return buf_seqno(l_ptr->next_out); + return msg_seqno(buf_msg(l_ptr->next_out)); return mod(l_ptr->next_out_no); } -static u32 link_last_sent(struct tipc_link *l_ptr) +static u32 link_last_sent(struct link *l_ptr) { return mod(link_next_sent(l_ptr) - 1); } @@ -144,29 +143,28 @@ static u32 link_last_sent(struct tipc_link *l_ptr) * Simple non-static link routines (i.e. referenced outside this file) */ -int tipc_link_is_up(struct tipc_link *l_ptr) +int tipc_link_is_up(struct link *l_ptr) { if (!l_ptr) return 0; return link_working_working(l_ptr) || link_working_unknown(l_ptr); } -int tipc_link_is_active(struct tipc_link *l_ptr) +int tipc_link_is_active(struct link *l_ptr) { return (l_ptr->owner->active_links[0] == l_ptr) || (l_ptr->owner->active_links[1] == l_ptr); } /** - * link_name_validate - validate & (optionally) deconstruct tipc_link name + * link_name_validate - validate & (optionally) deconstruct link name * @name - ptr to link name string * @name_parts - ptr to area for link name components (or NULL if not needed) * * Returns 1 if link name is valid, otherwise 0. */ -static int link_name_validate(const char *name, - struct tipc_link_name *name_parts) +static int link_name_validate(const char *name, struct link_name *name_parts) { char name_copy[TIPC_MAX_LINK_NAME]; char *addr_local; @@ -240,7 +238,7 @@ static int link_name_validate(const char *name, * tipc_node_delete() is called.) */ -static void link_timeout(struct tipc_link *l_ptr) +static void link_timeout(struct link *l_ptr) { tipc_node_lock(l_ptr->owner); @@ -289,7 +287,7 @@ static void link_timeout(struct tipc_link *l_ptr) tipc_node_unlock(l_ptr->owner); } -static void link_set_timer(struct tipc_link *l_ptr, u32 time) +static void link_set_timer(struct link *l_ptr, u32 time) { k_start_timer(&l_ptr->timer, time); } @@ -303,11 +301,11 @@ static void link_set_timer(struct tipc_link *l_ptr, u32 time) * Returns pointer to link. */ -struct tipc_link *tipc_link_create(struct tipc_node *n_ptr, +struct link *tipc_link_create(struct tipc_node *n_ptr, struct tipc_bearer *b_ptr, const struct tipc_media_addr *media_addr) { - struct tipc_link *l_ptr; + struct link *l_ptr; struct tipc_msg *msg; char *if_name; char addr_string[16]; @@ -345,7 +343,7 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr, l_ptr->checkpoint = 1; l_ptr->peer_session = INVALID_SESSION; l_ptr->b_ptr = b_ptr; - link_set_supervision_props(l_ptr, b_ptr->tolerance); + link_set_supervision_props(l_ptr, b_ptr->media->tolerance); l_ptr->state = RESET_UNKNOWN; l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg; @@ -357,7 +355,7 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr, strcpy((char *)msg_data(msg), if_name); l_ptr->priority = b_ptr->priority; - tipc_link_set_queue_limits(l_ptr, b_ptr->window); + tipc_link_set_queue_limits(l_ptr, b_ptr->media->window); link_init_max_pkt(l_ptr); @@ -384,7 +382,7 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr, * to avoid a potential deadlock situation. */ -void tipc_link_delete(struct tipc_link *l_ptr) +void tipc_link_delete(struct link *l_ptr) { if (!l_ptr) { err("Attempt to delete non-existent link\n"); @@ -403,7 +401,7 @@ void tipc_link_delete(struct tipc_link *l_ptr) kfree(l_ptr); } -static void link_start(struct tipc_link *l_ptr) +static void link_start(struct link *l_ptr) { tipc_node_lock(l_ptr->owner); link_state_event(l_ptr, STARTING_EVT); @@ -420,7 +418,7 @@ static void link_start(struct tipc_link *l_ptr) * has abated. */ -static int link_schedule_port(struct tipc_link *l_ptr, u32 origport, u32 sz) +static int link_schedule_port(struct link *l_ptr, u32 origport, u32 sz) { struct tipc_port *p_ptr; @@ -442,7 +440,7 @@ static int link_schedule_port(struct tipc_link *l_ptr, u32 origport, u32 sz) return -ELINKCONG; } -void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all) +void tipc_link_wakeup_ports(struct link *l_ptr, int all) { struct tipc_port *p_ptr; struct tipc_port *temp_p_ptr; @@ -477,7 +475,7 @@ void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all) * @l_ptr: pointer to link */ -static void link_release_outqueue(struct tipc_link *l_ptr) +static void link_release_outqueue(struct link *l_ptr) { struct sk_buff *buf = l_ptr->first_out; struct sk_buff *next; @@ -496,7 +494,7 @@ static void link_release_outqueue(struct tipc_link *l_ptr) * @l_ptr: pointer to link */ -void tipc_link_reset_fragments(struct tipc_link *l_ptr) +void tipc_link_reset_fragments(struct link *l_ptr) { struct sk_buff *buf = l_ptr->defragm_buf; struct sk_buff *next; @@ -514,7 +512,7 @@ void tipc_link_reset_fragments(struct tipc_link *l_ptr) * @l_ptr: pointer to link */ -void tipc_link_stop(struct tipc_link *l_ptr) +void tipc_link_stop(struct link *l_ptr) { struct sk_buff *buf; struct sk_buff *next; @@ -539,7 +537,7 @@ void tipc_link_stop(struct tipc_link *l_ptr) l_ptr->proto_msg_queue = NULL; } -void tipc_link_reset(struct tipc_link *l_ptr) +void tipc_link_reset(struct link *l_ptr) { struct sk_buff *buf; u32 prev_state = l_ptr->state; @@ -599,7 +597,7 @@ void tipc_link_reset(struct tipc_link *l_ptr) } -static void link_activate(struct tipc_link *l_ptr) +static void link_activate(struct link *l_ptr) { l_ptr->next_in_no = l_ptr->stats.recv_info = 1; tipc_node_link_up(l_ptr->owner, l_ptr); @@ -612,9 +610,9 @@ static void link_activate(struct tipc_link *l_ptr) * @event: state machine event to process */ -static void link_state_event(struct tipc_link *l_ptr, unsigned event) +static void link_state_event(struct link *l_ptr, unsigned event) { - struct tipc_link *other; + struct link *other; u32 cont_intv = l_ptr->continuity_interval; if (!l_ptr->started && (event != STARTING_EVT)) @@ -786,7 +784,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned event) * the tail of an existing one. */ -static int link_bundle_buf(struct tipc_link *l_ptr, +static int link_bundle_buf(struct link *l_ptr, struct sk_buff *bundler, struct sk_buff *buf) { @@ -815,7 +813,7 @@ static int link_bundle_buf(struct tipc_link *l_ptr, return 1; } -static void link_add_to_outqueue(struct tipc_link *l_ptr, +static void link_add_to_outqueue(struct link *l_ptr, struct sk_buff *buf, struct tipc_msg *msg) { @@ -836,7 +834,7 @@ static void link_add_to_outqueue(struct tipc_link *l_ptr, l_ptr->stats.max_queue_sz = l_ptr->out_queue_size; } -static void link_add_chain_to_outqueue(struct tipc_link *l_ptr, +static void link_add_chain_to_outqueue(struct link *l_ptr, struct sk_buff *buf_chain, u32 long_msgno) { @@ -861,7 +859,7 @@ static void link_add_chain_to_outqueue(struct tipc_link *l_ptr, * has failed, and from link_send() */ -int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf) +int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf) { struct tipc_msg *msg = buf_msg(buf); u32 size = msg_size(msg); @@ -956,7 +954,7 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf) int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector) { - struct tipc_link *l_ptr; + struct link *l_ptr; struct tipc_node *n_ptr; int res = -ELINKCONG; @@ -990,7 +988,7 @@ int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector) void tipc_link_send_names(struct list_head *message_list, u32 dest) { struct tipc_node *n_ptr; - struct tipc_link *l_ptr; + struct link *l_ptr; struct sk_buff *buf; struct sk_buff *temp_buf; @@ -1029,7 +1027,7 @@ void tipc_link_send_names(struct list_head *message_list, u32 dest) * Link is locked. Returns user data length. */ -static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf, +static int link_send_buf_fast(struct link *l_ptr, struct sk_buff *buf, u32 *used_max_pkt) { struct tipc_msg *msg = buf_msg(buf); @@ -1063,7 +1061,7 @@ static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf, */ int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode) { - struct tipc_link *l_ptr; + struct link *l_ptr; struct tipc_node *n_ptr; int res; u32 selector = msg_origport(buf_msg(buf)) & 1; @@ -1102,7 +1100,7 @@ int tipc_link_send_sections_fast(struct tipc_port *sender, u32 destaddr) { struct tipc_msg *hdr = &sender->phdr; - struct tipc_link *l_ptr; + struct link *l_ptr; struct sk_buff *buf; struct tipc_node *node; int res; @@ -1197,7 +1195,7 @@ static int link_send_sections_long(struct tipc_port *sender, unsigned int total_len, u32 destaddr) { - struct tipc_link *l_ptr; + struct link *l_ptr; struct tipc_node *node; struct tipc_msg *hdr = &sender->phdr; u32 dsz = total_len; @@ -1344,7 +1342,7 @@ static int link_send_sections_long(struct tipc_port *sender, /* * tipc_link_push_packet: Push one unsent packet to the media */ -u32 tipc_link_push_packet(struct tipc_link *l_ptr) +u32 tipc_link_push_packet(struct link *l_ptr) { struct sk_buff *buf = l_ptr->first_out; u32 r_q_size = l_ptr->retransm_queue_size; @@ -1356,7 +1354,7 @@ u32 tipc_link_push_packet(struct tipc_link *l_ptr) if (r_q_size && buf) { u32 last = lesser(mod(r_q_head + r_q_size), link_last_sent(l_ptr)); - u32 first = buf_seqno(buf); + u32 first = msg_seqno(buf_msg(buf)); while (buf && less(first, r_q_head)) { first = mod(first + 1); @@ -1405,7 +1403,7 @@ u32 tipc_link_push_packet(struct tipc_link *l_ptr) if (buf) { struct tipc_msg *msg = buf_msg(buf); u32 next = msg_seqno(msg); - u32 first = buf_seqno(l_ptr->first_out); + u32 first = msg_seqno(buf_msg(l_ptr->first_out)); if (mod(next - first) < l_ptr->queue_limit[0]) { msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); @@ -1428,7 +1426,7 @@ u32 tipc_link_push_packet(struct tipc_link *l_ptr) * push_queue(): push out the unsent messages of a link where * congestion has abated. Node is locked */ -void tipc_link_push_queue(struct tipc_link *l_ptr) +void tipc_link_push_queue(struct link *l_ptr) { u32 res; @@ -1472,8 +1470,7 @@ static void link_reset_all(unsigned long addr) read_unlock_bh(&tipc_net_lock); } -static void link_retransmit_failure(struct tipc_link *l_ptr, - struct sk_buff *buf) +static void link_retransmit_failure(struct link *l_ptr, struct sk_buff *buf) { struct tipc_msg *msg = buf_msg(buf); @@ -1517,7 +1514,7 @@ static void link_retransmit_failure(struct tipc_link *l_ptr, } } -void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf, +void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf, u32 retransmits) { struct tipc_msg *msg; @@ -1561,7 +1558,7 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf, } else { tipc_bearer_schedule(l_ptr->b_ptr, l_ptr); l_ptr->stats.bearer_congs++; - l_ptr->retransm_queue_head = buf_seqno(buf); + l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf)); l_ptr->retransm_queue_size = retransmits; return; } @@ -1574,7 +1571,7 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf, * link_insert_deferred_queue - insert deferred messages back into receive chain */ -static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr, +static struct sk_buff *link_insert_deferred_queue(struct link *l_ptr, struct sk_buff *buf) { u32 seq_no; @@ -1582,7 +1579,7 @@ static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr, if (l_ptr->oldest_deferred_in == NULL) return buf; - seq_no = buf_seqno(l_ptr->oldest_deferred_in); + seq_no = msg_seqno(buf_msg(l_ptr->oldest_deferred_in)); if (seq_no == mod(l_ptr->next_in_no)) { l_ptr->newest_deferred_in->next = buf; buf = l_ptr->oldest_deferred_in; @@ -1656,7 +1653,7 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr) read_lock_bh(&tipc_net_lock); while (head) { struct tipc_node *n_ptr; - struct tipc_link *l_ptr; + struct link *l_ptr; struct sk_buff *crs; struct sk_buff *buf = head; struct tipc_msg *msg; @@ -1736,12 +1733,14 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr) /* Release acked messages */ - if (tipc_node_is_up(n_ptr) && n_ptr->bclink.supported) - tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg)); + if (less(n_ptr->bclink.acked, msg_bcast_ack(msg))) { + if (tipc_node_is_up(n_ptr) && n_ptr->bclink.supported) + tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg)); + } crs = l_ptr->first_out; while ((crs != l_ptr->next_out) && - less_eq(buf_seqno(crs), ackd)) { + less_eq(msg_seqno(buf_msg(crs)), ackd)) { struct sk_buff *next = crs->next; buf_discard(crs); @@ -1864,7 +1863,7 @@ u32 tipc_link_defer_pkt(struct sk_buff **head, { struct sk_buff *prev = NULL; struct sk_buff *crs = *head; - u32 seq_no = buf_seqno(buf); + u32 seq_no = msg_seqno(buf_msg(buf)); buf->next = NULL; @@ -1875,7 +1874,7 @@ u32 tipc_link_defer_pkt(struct sk_buff **head, } /* Last ? */ - if (less(buf_seqno(*tail), seq_no)) { + if (less(msg_seqno(buf_msg(*tail)), seq_no)) { (*tail)->next = buf; *tail = buf; return 1; @@ -1909,10 +1908,10 @@ u32 tipc_link_defer_pkt(struct sk_buff **head, * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet */ -static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, +static void link_handle_out_of_seq_msg(struct link *l_ptr, struct sk_buff *buf) { - u32 seq_no = buf_seqno(buf); + u32 seq_no = msg_seqno(buf_msg(buf)); if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) { link_recv_proto_msg(l_ptr, buf); @@ -1947,9 +1946,8 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, /* * Send protocol message to the other endpoint. */ -void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ, - int probe_msg, u32 gap, u32 tolerance, - u32 priority, u32 ack_mtu) +void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg, + u32 gap, u32 tolerance, u32 priority, u32 ack_mtu) { struct sk_buff *buf = NULL; struct tipc_msg *msg = l_ptr->pmsg; @@ -1975,10 +1973,10 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ, if (!tipc_link_is_up(l_ptr)) return; if (l_ptr->next_out) - next_sent = buf_seqno(l_ptr->next_out); + next_sent = msg_seqno(buf_msg(l_ptr->next_out)); msg_set_next_sent(msg, next_sent); if (l_ptr->oldest_deferred_in) { - u32 rec = buf_seqno(l_ptr->oldest_deferred_in); + u32 rec = msg_seqno(buf_msg(l_ptr->oldest_deferred_in)); gap = mod(rec - mod(l_ptr->next_in_no)); } msg_set_seq_gap(msg, gap); @@ -2066,7 +2064,7 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ, * change at any time. The node with lowest address rules */ -static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf) +static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf) { u32 rec_gap = 0; u32 max_pkt_info; @@ -2199,12 +2197,12 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf) * tipc_link_tunnel(): Send one message via a link belonging to * another bearer. Owner node is locked. */ -static void tipc_link_tunnel(struct tipc_link *l_ptr, +static void tipc_link_tunnel(struct link *l_ptr, struct tipc_msg *tunnel_hdr, struct tipc_msg *msg, u32 selector) { - struct tipc_link *tunnel; + struct link *tunnel; struct sk_buff *buf; u32 length = msg_size(msg); @@ -2233,11 +2231,11 @@ static void tipc_link_tunnel(struct tipc_link *l_ptr, * Owner node is locked. */ -void tipc_link_changeover(struct tipc_link *l_ptr) +void tipc_link_changeover(struct link *l_ptr) { u32 msgcount = l_ptr->out_queue_size; struct sk_buff *crs = l_ptr->first_out; - struct tipc_link *tunnel = l_ptr->owner->active_links[0]; + struct link *tunnel = l_ptr->owner->active_links[0]; struct tipc_msg tunnel_hdr; int split_bundles; @@ -2296,7 +2294,7 @@ void tipc_link_changeover(struct tipc_link *l_ptr) } } -void tipc_link_send_duplicate(struct tipc_link *l_ptr, struct tipc_link *tunnel) +void tipc_link_send_duplicate(struct link *l_ptr, struct link *tunnel) { struct sk_buff *iter; struct tipc_msg tunnel_hdr; @@ -2360,11 +2358,11 @@ static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos) * via other link. Node is locked. Return extracted buffer. */ -static int link_recv_changeover_msg(struct tipc_link **l_ptr, +static int link_recv_changeover_msg(struct link **l_ptr, struct sk_buff **buf) { struct sk_buff *tunnel_buf = *buf; - struct tipc_link *dest_link; + struct link *dest_link; struct tipc_msg *msg; struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf); u32 msg_typ = msg_type(tunnel_msg); @@ -2464,7 +2462,7 @@ void tipc_link_recv_bundle(struct sk_buff *buf) * The buffer is complete, inclusive total message length. * Returns user data length. */ -static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf) +static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf) { struct sk_buff *buf_chain = NULL; struct sk_buff *buf_chain_tail = (struct sk_buff *)&buf_chain; @@ -2593,7 +2591,7 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb, /* Is there an incomplete message waiting for this fragment? */ - while (pbuf && ((buf_seqno(pbuf) != long_msg_seq_no) || + while (pbuf && ((msg_seqno(buf_msg(pbuf)) != long_msg_seq_no) || (msg_orignode(fragm) != msg_orignode(buf_msg(pbuf))))) { prev = pbuf; pbuf = pbuf->next; @@ -2660,7 +2658,7 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb, * @l_ptr: pointer to link */ -static void link_check_defragm_bufs(struct tipc_link *l_ptr) +static void link_check_defragm_bufs(struct link *l_ptr) { struct sk_buff *prev = NULL; struct sk_buff *next = NULL; @@ -2690,7 +2688,7 @@ static void link_check_defragm_bufs(struct tipc_link *l_ptr) -static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance) +static void link_set_supervision_props(struct link *l_ptr, u32 tolerance) { if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL)) return; @@ -2702,7 +2700,7 @@ static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance) } -void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window) +void tipc_link_set_queue_limits(struct link *l_ptr, u32 window) { /* Data messages from this node, inclusive FIRST_FRAGM */ l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window; @@ -2732,12 +2730,11 @@ void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window) * Returns pointer to link (or 0 if invalid link name). */ -static struct tipc_link *link_find_link(const char *name, - struct tipc_node **node) +static struct link *link_find_link(const char *name, struct tipc_node **node) { - struct tipc_link_name link_name_parts; + struct link_name link_name_parts; struct tipc_bearer *b_ptr; - struct tipc_link *l_ptr; + struct link *l_ptr; if (!link_name_validate(name, &link_name_parts)) return NULL; @@ -2757,113 +2754,13 @@ static struct tipc_link *link_find_link(const char *name, return l_ptr; } -/** - * link_value_is_valid -- validate proposed link tolerance/priority/window - * - * @cmd - value type (TIPC_CMD_SET_LINK_*) - * @new_value - the new value - * - * Returns 1 if value is within range, 0 if not. - */ - -static int link_value_is_valid(u16 cmd, u32 new_value) -{ - switch (cmd) { - case TIPC_CMD_SET_LINK_TOL: - return (new_value >= TIPC_MIN_LINK_TOL) && - (new_value <= TIPC_MAX_LINK_TOL); - case TIPC_CMD_SET_LINK_PRI: - return (new_value <= TIPC_MAX_LINK_PRI); - case TIPC_CMD_SET_LINK_WINDOW: - return (new_value >= TIPC_MIN_LINK_WIN) && - (new_value <= TIPC_MAX_LINK_WIN); - } - return 0; -} - - -/** - * link_cmd_set_value - change priority/tolerance/window for link/bearer/media - * @name - ptr to link, bearer, or media name - * @new_value - new value of link, bearer, or media setting - * @cmd - which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*) - * - * Caller must hold 'tipc_net_lock' to ensure link/bearer/media is not deleted. - * - * Returns 0 if value updated and negative value on error. - */ - -static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd) -{ - struct tipc_node *node; - struct tipc_link *l_ptr; - struct tipc_bearer *b_ptr; - struct tipc_media *m_ptr; - - l_ptr = link_find_link(name, &node); - if (l_ptr) { - /* - * acquire node lock for tipc_link_send_proto_msg(). - * see "TIPC locking policy" in net.c. - */ - tipc_node_lock(node); - switch (cmd) { - case TIPC_CMD_SET_LINK_TOL: - link_set_supervision_props(l_ptr, new_value); - tipc_link_send_proto_msg(l_ptr, - STATE_MSG, 0, 0, new_value, 0, 0); - break; - case TIPC_CMD_SET_LINK_PRI: - l_ptr->priority = new_value; - tipc_link_send_proto_msg(l_ptr, - STATE_MSG, 0, 0, 0, new_value, 0); - break; - case TIPC_CMD_SET_LINK_WINDOW: - tipc_link_set_queue_limits(l_ptr, new_value); - break; - } - tipc_node_unlock(node); - return 0; - } - - b_ptr = tipc_bearer_find(name); - if (b_ptr) { - switch (cmd) { - case TIPC_CMD_SET_LINK_TOL: - b_ptr->tolerance = new_value; - return 0; - case TIPC_CMD_SET_LINK_PRI: - b_ptr->priority = new_value; - return 0; - case TIPC_CMD_SET_LINK_WINDOW: - b_ptr->window = new_value; - return 0; - } - return -EINVAL; - } - - m_ptr = tipc_media_find(name); - if (!m_ptr) - return -ENODEV; - switch (cmd) { - case TIPC_CMD_SET_LINK_TOL: - m_ptr->tolerance = new_value; - return 0; - case TIPC_CMD_SET_LINK_PRI: - m_ptr->priority = new_value; - return 0; - case TIPC_CMD_SET_LINK_WINDOW: - m_ptr->window = new_value; - return 0; - } - return -EINVAL; -} - struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space, u16 cmd) { struct tipc_link_config *args; u32 new_value; + struct link *l_ptr; + struct tipc_node *node; int res; if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG)) @@ -2872,10 +2769,6 @@ struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space args = (struct tipc_link_config *)TLV_DATA(req_tlv_area); new_value = ntohl(args->value); - if (!link_value_is_valid(cmd, new_value)) - return tipc_cfg_reply_error_string( - "cannot change, value invalid"); - if (!strcmp(args->name, tipc_bclink_name)) { if ((cmd == TIPC_CMD_SET_LINK_WINDOW) && (tipc_bclink_set_queue_limits(new_value) == 0)) @@ -2885,7 +2778,43 @@ struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space } read_lock_bh(&tipc_net_lock); - res = link_cmd_set_value(args->name, new_value, cmd); + l_ptr = link_find_link(args->name, &node); + if (!l_ptr) { + read_unlock_bh(&tipc_net_lock); + return tipc_cfg_reply_error_string("link not found"); + } + + tipc_node_lock(node); + res = -EINVAL; + switch (cmd) { + case TIPC_CMD_SET_LINK_TOL: + if ((new_value >= TIPC_MIN_LINK_TOL) && + (new_value <= TIPC_MAX_LINK_TOL)) { + link_set_supervision_props(l_ptr, new_value); + tipc_link_send_proto_msg(l_ptr, STATE_MSG, + 0, 0, new_value, 0, 0); + res = 0; + } + break; + case TIPC_CMD_SET_LINK_PRI: + if ((new_value >= TIPC_MIN_LINK_PRI) && + (new_value <= TIPC_MAX_LINK_PRI)) { + l_ptr->priority = new_value; + tipc_link_send_proto_msg(l_ptr, STATE_MSG, + 0, 0, 0, new_value, 0); + res = 0; + } + break; + case TIPC_CMD_SET_LINK_WINDOW: + if ((new_value >= TIPC_MIN_LINK_WIN) && + (new_value <= TIPC_MAX_LINK_WIN)) { + tipc_link_set_queue_limits(l_ptr, new_value); + res = 0; + } + break; + } + tipc_node_unlock(node); + read_unlock_bh(&tipc_net_lock); if (res) return tipc_cfg_reply_error_string("cannot change link setting"); @@ -2898,7 +2827,7 @@ struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space * @l_ptr: pointer to link */ -static void link_reset_statistics(struct tipc_link *l_ptr) +static void link_reset_statistics(struct link *l_ptr) { memset(&l_ptr->stats, 0, sizeof(l_ptr->stats)); l_ptr->stats.sent_info = l_ptr->next_out_no; @@ -2908,7 +2837,7 @@ static void link_reset_statistics(struct tipc_link *l_ptr) struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space) { char *link_name; - struct tipc_link *l_ptr; + struct link *l_ptr; struct tipc_node *node; if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME)) @@ -2956,7 +2885,7 @@ static u32 percent(u32 count, u32 total) static int tipc_link_stats(const char *name, char *buf, const u32 buf_size) { struct print_buf pb; - struct tipc_link *l_ptr; + struct link *l_ptr; struct tipc_node *node; char *status; u32 profile_total = 0; @@ -3078,7 +3007,7 @@ struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_s u32 tipc_link_get_max_pkt(u32 dest, u32 selector) { struct tipc_node *n_ptr; - struct tipc_link *l_ptr; + struct link *l_ptr; u32 res = MAX_PKT_DEFAULT; if (dest == tipc_own_addr) @@ -3097,7 +3026,7 @@ u32 tipc_link_get_max_pkt(u32 dest, u32 selector) return res; } -static void link_print(struct tipc_link *l_ptr, const char *str) +static void link_print(struct link *l_ptr, const char *str) { char print_area[256]; struct print_buf pb; @@ -3117,12 +3046,13 @@ static void link_print(struct tipc_link *l_ptr, const char *str) tipc_printf(buf, "NXI(%u):", mod(l_ptr->next_in_no)); tipc_printf(buf, "SQUE"); if (l_ptr->first_out) { - tipc_printf(buf, "[%u..", buf_seqno(l_ptr->first_out)); + tipc_printf(buf, "[%u..", msg_seqno(buf_msg(l_ptr->first_out))); if (l_ptr->next_out) - tipc_printf(buf, "%u..", buf_seqno(l_ptr->next_out)); - tipc_printf(buf, "%u]", buf_seqno(l_ptr->last_out)); - if ((mod(buf_seqno(l_ptr->last_out) - - buf_seqno(l_ptr->first_out)) + tipc_printf(buf, "%u..", + msg_seqno(buf_msg(l_ptr->next_out))); + tipc_printf(buf, "%u]", msg_seqno(buf_msg(l_ptr->last_out))); + if ((mod(msg_seqno(buf_msg(l_ptr->last_out)) - + msg_seqno(buf_msg(l_ptr->first_out))) != (l_ptr->out_queue_size - 1)) || (l_ptr->last_out->next != NULL)) { tipc_printf(buf, "\nSend queue inconsistency\n"); @@ -3134,8 +3064,8 @@ static void link_print(struct tipc_link *l_ptr, const char *str) tipc_printf(buf, "[]"); tipc_printf(buf, "SQSIZ(%u)", l_ptr->out_queue_size); if (l_ptr->oldest_deferred_in) { - u32 o = buf_seqno(l_ptr->oldest_deferred_in); - u32 n = buf_seqno(l_ptr->newest_deferred_in); + u32 o = msg_seqno(buf_msg(l_ptr->oldest_deferred_in)); + u32 n = msg_seqno(buf_msg(l_ptr->newest_deferred_in)); tipc_printf(buf, ":RQUE[%u..%u]", o, n); if (l_ptr->deferred_inqueue_sz != mod((n + 1) - o)) { tipc_printf(buf, ":RQSIZ(%u)", diff --git a/trunk/net/tipc/link.h b/trunk/net/tipc/link.h index 73c18c140e1d..e56cb532913e 100644 --- a/trunk/net/tipc/link.h +++ b/trunk/net/tipc/link.h @@ -44,12 +44,6 @@ #define PUSH_FAILED 1 #define PUSH_FINISHED 2 -/* - * Out-of-range value for link sequence numbers - */ - -#define INVALID_LINK_SEQ 0x10000 - /* * Link states */ @@ -67,7 +61,7 @@ #define MAX_PKT_DEFAULT 1500 /** - * struct tipc_link - TIPC link data structure + * struct link - TIPC link data structure * @addr: network address of link's peer node * @name: link name character string * @media_addr: media address to use when sending messages over link @@ -115,7 +109,7 @@ * @stats: collects statistics regarding link activity */ -struct tipc_link { +struct link { u32 addr; char name[TIPC_MAX_LINK_NAME]; struct tipc_media_addr media_addr; @@ -213,24 +207,24 @@ struct tipc_link { struct tipc_port; -struct tipc_link *tipc_link_create(struct tipc_node *n_ptr, +struct link *tipc_link_create(struct tipc_node *n_ptr, struct tipc_bearer *b_ptr, const struct tipc_media_addr *media_addr); -void tipc_link_delete(struct tipc_link *l_ptr); -void tipc_link_changeover(struct tipc_link *l_ptr); -void tipc_link_send_duplicate(struct tipc_link *l_ptr, struct tipc_link *dest); -void tipc_link_reset_fragments(struct tipc_link *l_ptr); -int tipc_link_is_up(struct tipc_link *l_ptr); -int tipc_link_is_active(struct tipc_link *l_ptr); -u32 tipc_link_push_packet(struct tipc_link *l_ptr); -void tipc_link_stop(struct tipc_link *l_ptr); +void tipc_link_delete(struct link *l_ptr); +void tipc_link_changeover(struct link *l_ptr); +void tipc_link_send_duplicate(struct link *l_ptr, struct link *dest); +void tipc_link_reset_fragments(struct link *l_ptr); +int tipc_link_is_up(struct link *l_ptr); +int tipc_link_is_active(struct link *l_ptr); +u32 tipc_link_push_packet(struct link *l_ptr); +void tipc_link_stop(struct link *l_ptr); struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space, u16 cmd); struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space); struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space); -void tipc_link_reset(struct tipc_link *l_ptr); +void tipc_link_reset(struct link *l_ptr); int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector); void tipc_link_send_names(struct list_head *message_list, u32 dest); -int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf); +int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf); u32 tipc_link_get_max_pkt(u32 dest, u32 selector); int tipc_link_send_sections_fast(struct tipc_port *sender, struct iovec const *msg_sect, @@ -241,26 +235,19 @@ void tipc_link_recv_bundle(struct sk_buff *buf); int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb, struct tipc_msg **msg); -void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ, int prob, - u32 gap, u32 tolerance, u32 priority, - u32 acked_mtu); -void tipc_link_push_queue(struct tipc_link *l_ptr); +void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int prob, u32 gap, + u32 tolerance, u32 priority, u32 acked_mtu); +void tipc_link_push_queue(struct link *l_ptr); u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail, struct sk_buff *buf); -void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all); -void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window); -void tipc_link_retransmit(struct tipc_link *l_ptr, - struct sk_buff *start, u32 retransmits); +void tipc_link_wakeup_ports(struct link *l_ptr, int all); +void tipc_link_set_queue_limits(struct link *l_ptr, u32 window); +void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *start, u32 retransmits); /* * Link sequence number manipulation routines (uses modulo 2**16 arithmetic) */ -static inline u32 buf_seqno(struct sk_buff *buf) -{ - return msg_seqno(buf_msg(buf)); -} - static inline u32 mod(u32 x) { return x & 0xffffu; @@ -295,32 +282,32 @@ static inline u32 lesser(u32 left, u32 right) * Link status checking routines */ -static inline int link_working_working(struct tipc_link *l_ptr) +static inline int link_working_working(struct link *l_ptr) { return l_ptr->state == WORKING_WORKING; } -static inline int link_working_unknown(struct tipc_link *l_ptr) +static inline int link_working_unknown(struct link *l_ptr) { return l_ptr->state == WORKING_UNKNOWN; } -static inline int link_reset_unknown(struct tipc_link *l_ptr) +static inline int link_reset_unknown(struct link *l_ptr) { return l_ptr->state == RESET_UNKNOWN; } -static inline int link_reset_reset(struct tipc_link *l_ptr) +static inline int link_reset_reset(struct link *l_ptr) { return l_ptr->state == RESET_RESET; } -static inline int link_blocked(struct tipc_link *l_ptr) +static inline int link_blocked(struct link *l_ptr) { return l_ptr->exp_msg_count || l_ptr->blocked; } -static inline int link_congested(struct tipc_link *l_ptr) +static inline int link_congested(struct link *l_ptr) { return l_ptr->out_queue_size >= l_ptr->queue_limit[0]; } diff --git a/trunk/net/tipc/msg.c b/trunk/net/tipc/msg.c index 3e4d3e29be61..83d50967910c 100644 --- a/trunk/net/tipc/msg.c +++ b/trunk/net/tipc/msg.c @@ -333,14 +333,11 @@ void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str) } if (msg_user(msg) == LINK_CONFIG) { - struct tipc_media_addr orig; - + u32 *raw = (u32 *)msg; + struct tipc_media_addr *orig = (struct tipc_media_addr *)&raw[5]; tipc_printf(buf, ":DDOM(%x):", msg_dest_domain(msg)); tipc_printf(buf, ":NETID(%u):", msg_bc_netid(msg)); - memcpy(orig.value, msg_media_addr(msg), sizeof(orig.value)); - orig.media_id = 0; - orig.broadcast = 0; - tipc_media_addr_printf(buf, &orig); + tipc_media_addr_printf(buf, orig); } if (msg_user(msg) == BCAST_PROTOCOL) { tipc_printf(buf, "BCNACK:AFTER(%u):", msg_bcgap_after(msg)); diff --git a/trunk/net/tipc/msg.h b/trunk/net/tipc/msg.h index 7b0cda167107..d93178f2e852 100644 --- a/trunk/net/tipc/msg.h +++ b/trunk/net/tipc/msg.h @@ -78,8 +78,6 @@ #define MAX_MSG_SIZE (MAX_H_SIZE + TIPC_MAX_USER_MSG_SIZE) -#define TIPC_MEDIA_ADDR_OFFSET 5 - struct tipc_msg { __be32 hdr[15]; @@ -684,10 +682,6 @@ static inline void msg_set_redundant_link(struct tipc_msg *m, u32 r) msg_set_bits(m, 5, 12, 0x1, r); } -static inline char *msg_media_addr(struct tipc_msg *m) -{ - return (char *)&m->hdr[TIPC_MEDIA_ADDR_OFFSET]; -} /* * Word 9 @@ -740,4 +734,14 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect, u32 num_sect, unsigned int total_len, int max_size, int usrmem, struct sk_buff **buf); +static inline void msg_set_media_addr(struct tipc_msg *m, struct tipc_media_addr *a) +{ + memcpy(&((int *)m)[5], a, sizeof(*a)); +} + +static inline void msg_get_media_addr(struct tipc_msg *m, struct tipc_media_addr *a) +{ + memcpy(a, &((int *)m)[5], sizeof(*a)); +} + #endif diff --git a/trunk/net/tipc/name_distr.c b/trunk/net/tipc/name_distr.c index 98ebb37f1808..b7ca1bd7b151 100644 --- a/trunk/net/tipc/name_distr.c +++ b/trunk/net/tipc/name_distr.c @@ -176,7 +176,7 @@ void tipc_named_withdraw(struct publication *publ) void tipc_named_node_up(unsigned long nodearg) { struct tipc_node *n_ptr; - struct tipc_link *l_ptr; + struct link *l_ptr; struct publication *publ; struct distr_item *item = NULL; struct sk_buff *buf = NULL; @@ -322,9 +322,10 @@ void tipc_named_recv(struct sk_buff *buf) /** * tipc_named_reinit - re-initialize local publication list * - * This routine is called whenever TIPC networking is enabled. + * This routine is called whenever TIPC networking is (re)enabled. * All existing publications by this node that have "cluster" or "zone" scope - * are updated to reflect the node's new network address. + * are updated to reflect the node's current network address. + * (If the node's address is unchanged, the update loop terminates immediately.) */ void tipc_named_reinit(void) @@ -332,9 +333,10 @@ void tipc_named_reinit(void) struct publication *publ; write_lock_bh(&tipc_nametbl_lock); - - list_for_each_entry(publ, &publ_root, local_list) + list_for_each_entry(publ, &publ_root, local_list) { + if (publ->node == tipc_own_addr) + break; publ->node = tipc_own_addr; - + } write_unlock_bh(&tipc_nametbl_lock); } diff --git a/trunk/net/tipc/name_table.c b/trunk/net/tipc/name_table.c index 89eb5621ebba..46e6b6c2ecc9 100644 --- a/trunk/net/tipc/name_table.c +++ b/trunk/net/tipc/name_table.c @@ -251,8 +251,8 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq, u32 type, u32 lower, u32 upper, u32 scope, u32 node, u32 port, u32 key) { - struct tipc_subscription *s; - struct tipc_subscription *st; + struct subscription *s; + struct subscription *st; struct publication *publ; struct sub_seq *sseq; struct name_info *info; @@ -381,7 +381,7 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i struct sub_seq *sseq = nameseq_find_subseq(nseq, inst); struct name_info *info; struct sub_seq *free; - struct tipc_subscription *s, *st; + struct subscription *s, *st; int removed_subseq = 0; if (!sseq) @@ -448,8 +448,7 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i * sequence overlapping with the requested sequence */ -static void tipc_nameseq_subscribe(struct name_seq *nseq, - struct tipc_subscription *s) +static void tipc_nameseq_subscribe(struct name_seq *nseq, struct subscription *s) { struct sub_seq *sseq = nseq->sseqs; @@ -626,7 +625,7 @@ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode) */ int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit, - struct tipc_port_list *dports) + struct port_list *dports) { struct name_seq *seq; struct sub_seq *sseq; @@ -740,7 +739,7 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key) * tipc_nametbl_subscribe - add a subscription object to the name table */ -void tipc_nametbl_subscribe(struct tipc_subscription *s) +void tipc_nametbl_subscribe(struct subscription *s) { u32 type = s->seq.type; struct name_seq *seq; @@ -764,7 +763,7 @@ void tipc_nametbl_subscribe(struct tipc_subscription *s) * tipc_nametbl_unsubscribe - remove a subscription object from name table */ -void tipc_nametbl_unsubscribe(struct tipc_subscription *s) +void tipc_nametbl_unsubscribe(struct subscription *s) { struct name_seq *seq; diff --git a/trunk/net/tipc/name_table.h b/trunk/net/tipc/name_table.h index 8086b42f92ad..62d77e5e902e 100644 --- a/trunk/net/tipc/name_table.h +++ b/trunk/net/tipc/name_table.h @@ -39,8 +39,8 @@ #include "node_subscr.h" -struct tipc_subscription; -struct tipc_port_list; +struct subscription; +struct port_list; /* * TIPC name types reserved for internal TIPC use (both current and planned) @@ -90,7 +90,7 @@ extern rwlock_t tipc_nametbl_lock; struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space); u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *node); int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit, - struct tipc_port_list *dports); + struct port_list *dports); int tipc_nametbl_publish_rsv(u32 ref, unsigned int scope, struct tipc_name_seq const *seq); struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper, @@ -100,8 +100,8 @@ struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper, u32 scope, u32 node, u32 ref, u32 key); struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower, u32 node, u32 ref, u32 key); -void tipc_nametbl_subscribe(struct tipc_subscription *s); -void tipc_nametbl_unsubscribe(struct tipc_subscription *s); +void tipc_nametbl_subscribe(struct subscription *s); +void tipc_nametbl_unsubscribe(struct subscription *s); int tipc_nametbl_init(void); void tipc_nametbl_stop(void); diff --git a/trunk/net/tipc/net.c b/trunk/net/tipc/net.c index 61afee7e8291..fafef6c3c0f6 100644 --- a/trunk/net/tipc/net.c +++ b/trunk/net/tipc/net.c @@ -174,6 +174,7 @@ void tipc_net_route_msg(struct sk_buff *buf) int tipc_net_start(u32 addr) { char addr_string[16]; + int res; if (tipc_mode != TIPC_NODE_MODE) return -ENOPROTOOPT; @@ -186,7 +187,9 @@ int tipc_net_start(u32 addr) tipc_named_reinit(); tipc_port_reinit(); - tipc_bclink_init(); + res = tipc_bclink_init(); + if (res) + return res; tipc_k_signal((Handler)tipc_subscr_start, 0); tipc_k_signal((Handler)tipc_cfg_init, 0); @@ -204,8 +207,8 @@ void tipc_net_stop(void) if (tipc_mode != TIPC_NET_MODE) return; write_lock_bh(&tipc_net_lock); - tipc_mode = TIPC_NODE_MODE; tipc_bearer_stop(); + tipc_mode = TIPC_NODE_MODE; tipc_bclink_stop(); list_for_each_entry_safe(node, t_node, &tipc_node_list, list) tipc_node_delete(node); diff --git a/trunk/net/tipc/node.c b/trunk/net/tipc/node.c index 6b226faad89f..27b4bb0cca6c 100644 --- a/trunk/net/tipc/node.c +++ b/trunk/net/tipc/node.c @@ -136,9 +136,9 @@ void tipc_node_delete(struct tipc_node *n_ptr) * Link becomes active (alone or shared) or standby, depending on its priority. */ -void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr) +void tipc_node_link_up(struct tipc_node *n_ptr, struct link *l_ptr) { - struct tipc_link **active = &n_ptr->active_links[0]; + struct link **active = &n_ptr->active_links[0]; n_ptr->working_links++; @@ -171,14 +171,14 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr) static void node_select_active_links(struct tipc_node *n_ptr) { - struct tipc_link **active = &n_ptr->active_links[0]; + struct link **active = &n_ptr->active_links[0]; u32 i; u32 highest_prio = 0; active[0] = active[1] = NULL; for (i = 0; i < MAX_BEARERS; i++) { - struct tipc_link *l_ptr = n_ptr->links[i]; + struct link *l_ptr = n_ptr->links[i]; if (!l_ptr || !tipc_link_is_up(l_ptr) || (l_ptr->priority < highest_prio)) @@ -197,9 +197,9 @@ static void node_select_active_links(struct tipc_node *n_ptr) * tipc_node_link_down - handle loss of link */ -void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr) +void tipc_node_link_down(struct tipc_node *n_ptr, struct link *l_ptr) { - struct tipc_link **active; + struct link **active; n_ptr->working_links--; @@ -239,14 +239,14 @@ int tipc_node_is_up(struct tipc_node *n_ptr) return tipc_node_active_links(n_ptr); } -void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr) +void tipc_node_attach_link(struct tipc_node *n_ptr, struct link *l_ptr) { n_ptr->links[l_ptr->b_ptr->identity] = l_ptr; atomic_inc(&tipc_num_links); n_ptr->link_cnt++; } -void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr) +void tipc_node_detach_link(struct tipc_node *n_ptr, struct link *l_ptr) { n_ptr->links[l_ptr->b_ptr->identity] = NULL; atomic_dec(&tipc_num_links); @@ -307,7 +307,7 @@ static void node_established_contact(struct tipc_node *n_ptr) n_ptr->bclink.acked = tipc_bclink_get_last_sent(); if (n_ptr->bclink.supported) { - tipc_bclink_add_node(n_ptr->addr); + tipc_nmap_add(&tipc_bcast_nmap, n_ptr->addr); if (n_ptr->addr < tipc_own_addr) tipc_own_tag++; } @@ -350,8 +350,9 @@ static void node_lost_contact(struct tipc_node *n_ptr) n_ptr->bclink.defragm = NULL; } - tipc_bclink_remove_node(n_ptr->addr); - tipc_bclink_acknowledge(n_ptr, INVALID_LINK_SEQ); + tipc_nmap_remove(&tipc_bcast_nmap, n_ptr->addr); + tipc_bclink_acknowledge(n_ptr, + mod(n_ptr->bclink.acked + 10000)); if (n_ptr->addr < tipc_own_addr) tipc_own_tag--; @@ -360,7 +361,7 @@ static void node_lost_contact(struct tipc_node *n_ptr) /* Abort link changeover */ for (i = 0; i < MAX_BEARERS; i++) { - struct tipc_link *l_ptr = n_ptr->links[i]; + struct link *l_ptr = n_ptr->links[i]; if (!l_ptr) continue; l_ptr->reset_checkpoint = l_ptr->next_in_no; diff --git a/trunk/net/tipc/node.h b/trunk/net/tipc/node.h index 0b1c5f8b6996..4f15cb40aaa4 100644 --- a/trunk/net/tipc/node.h +++ b/trunk/net/tipc/node.h @@ -79,8 +79,8 @@ struct tipc_node { struct hlist_node hash; struct list_head list; struct list_head nsub; - struct tipc_link *active_links[2]; - struct tipc_link *links[MAX_BEARERS]; + struct link *active_links[2]; + struct link *links[MAX_BEARERS]; int link_cnt; int working_links; int block_setup; @@ -117,10 +117,10 @@ extern u32 tipc_own_tag; struct tipc_node *tipc_node_find(u32 addr); struct tipc_node *tipc_node_create(u32 addr); void tipc_node_delete(struct tipc_node *n_ptr); -void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr); -void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr); -void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr); -void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr); +void tipc_node_attach_link(struct tipc_node *n_ptr, struct link *l_ptr); +void tipc_node_detach_link(struct tipc_node *n_ptr, struct link *l_ptr); +void tipc_node_link_down(struct tipc_node *n_ptr, struct link *l_ptr); +void tipc_node_link_up(struct tipc_node *n_ptr, struct link *l_ptr); int tipc_node_active_links(struct tipc_node *n_ptr); int tipc_node_redundant_links(struct tipc_node *n_ptr); int tipc_node_is_up(struct tipc_node *n_ptr); diff --git a/trunk/net/tipc/port.c b/trunk/net/tipc/port.c index d91efc69e6f9..54d812a5a4d9 100644 --- a/trunk/net/tipc/port.c +++ b/trunk/net/tipc/port.c @@ -80,7 +80,7 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, struct tipc_msg *hdr; struct sk_buff *buf; struct sk_buff *ibuf = NULL; - struct tipc_port_list dports = {0, NULL, }; + struct port_list dports = {0, NULL, }; struct tipc_port *oport = tipc_port_deref(ref); int ext_targets; int res; @@ -142,11 +142,11 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, * If there is no port list, perform a lookup to create one */ -void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp) +void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp) { struct tipc_msg *msg; - struct tipc_port_list dports = {0, NULL, }; - struct tipc_port_list *item = dp; + struct port_list dports = {0, NULL, }; + struct port_list *item = dp; int cnt = 0; msg = buf_msg(buf); diff --git a/trunk/net/tipc/port.h b/trunk/net/tipc/port.h index f751807e2a91..b9aa34195aec 100644 --- a/trunk/net/tipc/port.h +++ b/trunk/net/tipc/port.h @@ -151,7 +151,7 @@ struct tipc_port { }; extern spinlock_t tipc_port_list_lock; -struct tipc_port_list; +struct port_list; /* * TIPC port manipulation routines @@ -228,7 +228,7 @@ int tipc_port_reject_sections(struct tipc_port *p_ptr, struct tipc_msg *hdr, unsigned int total_len, int err); struct sk_buff *tipc_port_get_ports(void); void tipc_port_recv_proto_msg(struct sk_buff *buf); -void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp); +void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp); void tipc_port_reinit(void); /** diff --git a/trunk/net/tipc/ref.c b/trunk/net/tipc/ref.c index 9e37b7812c3c..83116892528b 100644 --- a/trunk/net/tipc/ref.c +++ b/trunk/net/tipc/ref.c @@ -110,7 +110,8 @@ int tipc_ref_table_init(u32 requested_size, u32 start) /* allocate table & mark all entries as uninitialized */ - table = vzalloc(actual_size * sizeof(struct reference)); + table = __vmalloc(actual_size * sizeof(struct reference), + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); if (table == NULL) return -ENOMEM; diff --git a/trunk/net/tipc/socket.c b/trunk/net/tipc/socket.c index e2f7c5d370ba..42b8324ff2ee 100644 --- a/trunk/net/tipc/socket.c +++ b/trunk/net/tipc/socket.c @@ -185,6 +185,9 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol, /* Validate arguments */ + if (!net_eq(net, &init_net)) + return -EAFNOSUPPORT; + if (unlikely(protocol != 0)) return -EPROTONOSUPPORT; diff --git a/trunk/net/tipc/subscr.c b/trunk/net/tipc/subscr.c index 8c49566da8f3..198371723b41 100644 --- a/trunk/net/tipc/subscr.c +++ b/trunk/net/tipc/subscr.c @@ -40,14 +40,14 @@ #include "subscr.h" /** - * struct tipc_subscriber - TIPC network topology subscriber + * struct subscriber - TIPC network topology subscriber * @port_ref: object reference to server port connecting to subscriber * @lock: pointer to spinlock controlling access to subscriber's server port * @subscriber_list: adjacent subscribers in top. server's list of subscribers * @subscription_list: list of subscription objects for this subscriber */ -struct tipc_subscriber { +struct subscriber { u32 port_ref; spinlock_t *lock; struct list_head subscriber_list; @@ -92,7 +92,7 @@ static u32 htohl(u32 in, int swap) * try to take the lock if the message is rejected and returned! */ -static void subscr_send_event(struct tipc_subscription *sub, +static void subscr_send_event(struct subscription *sub, u32 found_lower, u32 found_upper, u32 event, @@ -118,7 +118,7 @@ static void subscr_send_event(struct tipc_subscription *sub, * Returns 1 if there is overlap, otherwise 0. */ -int tipc_subscr_overlap(struct tipc_subscription *sub, +int tipc_subscr_overlap(struct subscription *sub, u32 found_lower, u32 found_upper) @@ -138,7 +138,7 @@ int tipc_subscr_overlap(struct tipc_subscription *sub, * Protected by nameseq.lock in name_table.c */ -void tipc_subscr_report_overlap(struct tipc_subscription *sub, +void tipc_subscr_report_overlap(struct subscription *sub, u32 found_lower, u32 found_upper, u32 event, @@ -158,7 +158,7 @@ void tipc_subscr_report_overlap(struct tipc_subscription *sub, * subscr_timeout - subscription timeout has occurred */ -static void subscr_timeout(struct tipc_subscription *sub) +static void subscr_timeout(struct subscription *sub) { struct tipc_port *server_port; @@ -205,7 +205,7 @@ static void subscr_timeout(struct tipc_subscription *sub) * Called with subscriber port locked. */ -static void subscr_del(struct tipc_subscription *sub) +static void subscr_del(struct subscription *sub) { tipc_nametbl_unsubscribe(sub); list_del(&sub->subscription_list); @@ -224,11 +224,11 @@ static void subscr_del(struct tipc_subscription *sub) * simply wait for it to be released, then claim it.) */ -static void subscr_terminate(struct tipc_subscriber *subscriber) +static void subscr_terminate(struct subscriber *subscriber) { u32 port_ref; - struct tipc_subscription *sub; - struct tipc_subscription *sub_temp; + struct subscription *sub; + struct subscription *sub_temp; /* Invalidate subscriber reference */ @@ -278,10 +278,10 @@ static void subscr_terminate(struct tipc_subscriber *subscriber) */ static void subscr_cancel(struct tipc_subscr *s, - struct tipc_subscriber *subscriber) + struct subscriber *subscriber) { - struct tipc_subscription *sub; - struct tipc_subscription *sub_temp; + struct subscription *sub; + struct subscription *sub_temp; int found = 0; /* Find first matching subscription, exit if not found */ @@ -314,10 +314,10 @@ static void subscr_cancel(struct tipc_subscr *s, * Called with subscriber port locked. */ -static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s, - struct tipc_subscriber *subscriber) +static struct subscription *subscr_subscribe(struct tipc_subscr *s, + struct subscriber *subscriber) { - struct tipc_subscription *sub; + struct subscription *sub; int swap; /* Determine subscriber's endianness */ @@ -393,7 +393,7 @@ static void subscr_conn_shutdown_event(void *usr_handle, unsigned int size, int reason) { - struct tipc_subscriber *subscriber = usr_handle; + struct subscriber *subscriber = usr_handle; spinlock_t *subscriber_lock; if (tipc_port_lock(port_ref) == NULL) @@ -416,9 +416,9 @@ static void subscr_conn_msg_event(void *usr_handle, const unchar *data, u32 size) { - struct tipc_subscriber *subscriber = usr_handle; + struct subscriber *subscriber = usr_handle; spinlock_t *subscriber_lock; - struct tipc_subscription *sub; + struct subscription *sub; /* * Lock subscriber's server port (& make a local copy of lock pointer, @@ -471,12 +471,12 @@ static void subscr_named_msg_event(void *usr_handle, struct tipc_portid const *orig, struct tipc_name_seq const *dest) { - struct tipc_subscriber *subscriber; + struct subscriber *subscriber; u32 server_port_ref; /* Create subscriber object */ - subscriber = kzalloc(sizeof(struct tipc_subscriber), GFP_ATOMIC); + subscriber = kzalloc(sizeof(struct subscriber), GFP_ATOMIC); if (subscriber == NULL) { warn("Subscriber rejected, no memory\n"); return; @@ -568,8 +568,8 @@ int tipc_subscr_start(void) void tipc_subscr_stop(void) { - struct tipc_subscriber *subscriber; - struct tipc_subscriber *subscriber_temp; + struct subscriber *subscriber; + struct subscriber *subscriber_temp; spinlock_t *subscriber_lock; if (topsrv.setup_port) { diff --git a/trunk/net/tipc/subscr.h b/trunk/net/tipc/subscr.h index ef6529c8456f..4b06ef6f8401 100644 --- a/trunk/net/tipc/subscr.h +++ b/trunk/net/tipc/subscr.h @@ -37,10 +37,10 @@ #ifndef _TIPC_SUBSCR_H #define _TIPC_SUBSCR_H -struct tipc_subscription; +struct subscription; /** - * struct tipc_subscription - TIPC network topology subscription object + * struct subscription - TIPC network topology subscription object * @seq: name sequence associated with subscription * @timeout: duration of subscription (in ms) * @filter: event filtering to be done for subscription @@ -52,7 +52,7 @@ struct tipc_subscription; * @evt: template for events generated by subscription */ -struct tipc_subscription { +struct subscription { struct tipc_name_seq seq; u32 timeout; u32 filter; @@ -64,11 +64,11 @@ struct tipc_subscription { struct tipc_event evt; }; -int tipc_subscr_overlap(struct tipc_subscription *sub, +int tipc_subscr_overlap(struct subscription *sub, u32 found_lower, u32 found_upper); -void tipc_subscr_report_overlap(struct tipc_subscription *sub, +void tipc_subscr_report_overlap(struct subscription *sub, u32 found_lower, u32 found_upper, u32 event, diff --git a/trunk/net/unix/Kconfig b/trunk/net/unix/Kconfig index c2128b10e5f9..5a69733bcdad 100644 --- a/trunk/net/unix/Kconfig +++ b/trunk/net/unix/Kconfig @@ -19,10 +19,3 @@ config UNIX Say Y unless you know what you are doing. -config UNIX_DIAG - tristate "UNIX: socket monitoring interface" - depends on UNIX - default UNIX - ---help--- - Support for UNIX socket monitoring interface used by the ss tool. - If unsure, say Y. diff --git a/trunk/net/unix/Makefile b/trunk/net/unix/Makefile index b663c607b1c6..b852a2bde9a8 100644 --- a/trunk/net/unix/Makefile +++ b/trunk/net/unix/Makefile @@ -6,6 +6,3 @@ obj-$(CONFIG_UNIX) += unix.o unix-y := af_unix.o garbage.o unix-$(CONFIG_SYSCTL) += sysctl_net_unix.o - -obj-$(CONFIG_UNIX_DIAG) += unix_diag.o -unix_diag-y := diag.o diff --git a/trunk/net/unix/af_unix.c b/trunk/net/unix/af_unix.c index 7cc3d7b23d1c..b595a3d8679f 100644 --- a/trunk/net/unix/af_unix.c +++ b/trunk/net/unix/af_unix.c @@ -115,10 +115,8 @@ #include #include -struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1]; -EXPORT_SYMBOL_GPL(unix_socket_table); -DEFINE_SPINLOCK(unix_table_lock); -EXPORT_SYMBOL_GPL(unix_table_lock); +static struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1]; +static DEFINE_SPINLOCK(unix_table_lock); static atomic_long_t unix_nr_socks; #define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE]) @@ -174,7 +172,7 @@ static inline int unix_recvq_full(struct sock const *sk) return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog; } -struct sock *unix_peer_get(struct sock *s) +static struct sock *unix_peer_get(struct sock *s) { struct sock *peer; @@ -185,7 +183,6 @@ struct sock *unix_peer_get(struct sock *s) unix_state_unlock(s); return peer; } -EXPORT_SYMBOL_GPL(unix_peer_get); static inline void unix_release_addr(struct unix_address *addr) { @@ -2065,36 +2062,6 @@ static int unix_shutdown(struct socket *sock, int mode) return 0; } -long unix_inq_len(struct sock *sk) -{ - struct sk_buff *skb; - long amount = 0; - - if (sk->sk_state == TCP_LISTEN) - return -EINVAL; - - spin_lock(&sk->sk_receive_queue.lock); - if (sk->sk_type == SOCK_STREAM || - sk->sk_type == SOCK_SEQPACKET) { - skb_queue_walk(&sk->sk_receive_queue, skb) - amount += skb->len; - } else { - skb = skb_peek(&sk->sk_receive_queue); - if (skb) - amount = skb->len; - } - spin_unlock(&sk->sk_receive_queue.lock); - - return amount; -} -EXPORT_SYMBOL_GPL(unix_inq_len); - -long unix_outq_len(struct sock *sk) -{ - return sk_wmem_alloc_get(sk); -} -EXPORT_SYMBOL_GPL(unix_outq_len); - static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; @@ -2103,16 +2070,33 @@ static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) switch (cmd) { case SIOCOUTQ: - amount = unix_outq_len(sk); + amount = sk_wmem_alloc_get(sk); err = put_user(amount, (int __user *)arg); break; case SIOCINQ: - amount = unix_inq_len(sk); - if (amount < 0) - err = amount; - else + { + struct sk_buff *skb; + + if (sk->sk_state == TCP_LISTEN) { + err = -EINVAL; + break; + } + + spin_lock(&sk->sk_receive_queue.lock); + if (sk->sk_type == SOCK_STREAM || + sk->sk_type == SOCK_SEQPACKET) { + skb_queue_walk(&sk->sk_receive_queue, skb) + amount += skb->len; + } else { + skb = skb_peek(&sk->sk_receive_queue); + if (skb) + amount = skb->len; + } + spin_unlock(&sk->sk_receive_queue.lock); err = put_user(amount, (int __user *)arg); - break; + break; + } + default: err = -ENOIOCTLCMD; break; diff --git a/trunk/net/unix/diag.c b/trunk/net/unix/diag.c deleted file mode 100644 index 6b7697fd911b..000000000000 --- a/trunk/net/unix/diag.c +++ /dev/null @@ -1,329 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define UNIX_DIAG_PUT(skb, attrtype, attrlen) \ - RTA_DATA(__RTA_PUT(skb, attrtype, attrlen)) - -static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb) -{ - struct unix_address *addr = unix_sk(sk)->addr; - char *s; - - if (addr) { - s = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_NAME, addr->len - sizeof(short)); - memcpy(s, addr->name->sun_path, addr->len - sizeof(short)); - } - - return 0; - -rtattr_failure: - return -EMSGSIZE; -} - -static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb) -{ - struct dentry *dentry = unix_sk(sk)->dentry; - struct unix_diag_vfs *uv; - - if (dentry) { - uv = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_VFS, sizeof(*uv)); - uv->udiag_vfs_ino = dentry->d_inode->i_ino; - uv->udiag_vfs_dev = dentry->d_sb->s_dev; - } - - return 0; - -rtattr_failure: - return -EMSGSIZE; -} - -static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb) -{ - struct sock *peer; - int ino; - - peer = unix_peer_get(sk); - if (peer) { - unix_state_lock(peer); - ino = sock_i_ino(peer); - unix_state_unlock(peer); - sock_put(peer); - - RTA_PUT_U32(nlskb, UNIX_DIAG_PEER, ino); - } - - return 0; -rtattr_failure: - return -EMSGSIZE; -} - -static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb) -{ - struct sk_buff *skb; - u32 *buf; - int i; - - if (sk->sk_state == TCP_LISTEN) { - spin_lock(&sk->sk_receive_queue.lock); - buf = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_ICONS, - sk->sk_receive_queue.qlen * sizeof(u32)); - i = 0; - skb_queue_walk(&sk->sk_receive_queue, skb) { - struct sock *req, *peer; - - req = skb->sk; - /* - * The state lock is outer for the same sk's - * queue lock. With the other's queue locked it's - * OK to lock the state. - */ - unix_state_lock_nested(req); - peer = unix_sk(req)->peer; - buf[i++] = (peer ? sock_i_ino(peer) : 0); - unix_state_unlock(req); - } - spin_unlock(&sk->sk_receive_queue.lock); - } - - return 0; - -rtattr_failure: - spin_unlock(&sk->sk_receive_queue.lock); - return -EMSGSIZE; -} - -static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb) -{ - struct unix_diag_rqlen *rql; - - rql = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_RQLEN, sizeof(*rql)); - - if (sk->sk_state == TCP_LISTEN) { - rql->udiag_rqueue = sk->sk_receive_queue.qlen; - rql->udiag_wqueue = sk->sk_max_ack_backlog; - } else { - rql->udiag_rqueue = (__u32)unix_inq_len(sk); - rql->udiag_wqueue = (__u32)unix_outq_len(sk); - } - - return 0; - -rtattr_failure: - return -EMSGSIZE; -} - -static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req, - u32 pid, u32 seq, u32 flags, int sk_ino) -{ - unsigned char *b = skb_tail_pointer(skb); - struct nlmsghdr *nlh; - struct unix_diag_msg *rep; - - nlh = NLMSG_PUT(skb, pid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep)); - nlh->nlmsg_flags = flags; - - rep = NLMSG_DATA(nlh); - - rep->udiag_family = AF_UNIX; - rep->udiag_type = sk->sk_type; - rep->udiag_state = sk->sk_state; - rep->udiag_ino = sk_ino; - sock_diag_save_cookie(sk, rep->udiag_cookie); - - if ((req->udiag_show & UDIAG_SHOW_NAME) && - sk_diag_dump_name(sk, skb)) - goto nlmsg_failure; - - if ((req->udiag_show & UDIAG_SHOW_VFS) && - sk_diag_dump_vfs(sk, skb)) - goto nlmsg_failure; - - if ((req->udiag_show & UDIAG_SHOW_PEER) && - sk_diag_dump_peer(sk, skb)) - goto nlmsg_failure; - - if ((req->udiag_show & UDIAG_SHOW_ICONS) && - sk_diag_dump_icons(sk, skb)) - goto nlmsg_failure; - - if ((req->udiag_show & UDIAG_SHOW_RQLEN) && - sk_diag_show_rqlen(sk, skb)) - goto nlmsg_failure; - - if ((req->udiag_show & UDIAG_SHOW_MEMINFO) && - sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO)) - goto nlmsg_failure; - - nlh->nlmsg_len = skb_tail_pointer(skb) - b; - return skb->len; - -nlmsg_failure: - nlmsg_trim(skb, b); - return -EMSGSIZE; -} - -static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req, - u32 pid, u32 seq, u32 flags) -{ - int sk_ino; - - unix_state_lock(sk); - sk_ino = sock_i_ino(sk); - unix_state_unlock(sk); - - if (!sk_ino) - return 0; - - return sk_diag_fill(sk, skb, req, pid, seq, flags, sk_ino); -} - -static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) -{ - struct unix_diag_req *req; - int num, s_num, slot, s_slot; - - req = NLMSG_DATA(cb->nlh); - - s_slot = cb->args[0]; - num = s_num = cb->args[1]; - - spin_lock(&unix_table_lock); - for (slot = s_slot; slot <= UNIX_HASH_SIZE; s_num = 0, slot++) { - struct sock *sk; - struct hlist_node *node; - - num = 0; - sk_for_each(sk, node, &unix_socket_table[slot]) { - if (num < s_num) - goto next; - if (!(req->udiag_states & (1 << sk->sk_state))) - goto next; - if (sk_diag_dump(sk, skb, req, - NETLINK_CB(cb->skb).pid, - cb->nlh->nlmsg_seq, - NLM_F_MULTI) < 0) - goto done; -next: - num++; - } - } -done: - spin_unlock(&unix_table_lock); - cb->args[0] = slot; - cb->args[1] = num; - - return skb->len; -} - -static struct sock *unix_lookup_by_ino(int ino) -{ - int i; - struct sock *sk; - - spin_lock(&unix_table_lock); - for (i = 0; i <= UNIX_HASH_SIZE; i++) { - struct hlist_node *node; - - sk_for_each(sk, node, &unix_socket_table[i]) - if (ino == sock_i_ino(sk)) { - sock_hold(sk); - spin_unlock(&unix_table_lock); - - return sk; - } - } - - spin_unlock(&unix_table_lock); - return NULL; -} - -static int unix_diag_get_exact(struct sk_buff *in_skb, - const struct nlmsghdr *nlh, - struct unix_diag_req *req) -{ - int err = -EINVAL; - struct sock *sk; - struct sk_buff *rep; - unsigned int extra_len; - - if (req->udiag_ino == 0) - goto out_nosk; - - sk = unix_lookup_by_ino(req->udiag_ino); - err = -ENOENT; - if (sk == NULL) - goto out_nosk; - - err = sock_diag_check_cookie(sk, req->udiag_cookie); - if (err) - goto out; - - extra_len = 256; -again: - err = -ENOMEM; - rep = alloc_skb(NLMSG_SPACE((sizeof(struct unix_diag_msg) + extra_len)), - GFP_KERNEL); - if (!rep) - goto out; - - err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).pid, - nlh->nlmsg_seq, 0, req->udiag_ino); - if (err < 0) { - kfree_skb(rep); - extra_len += 256; - if (extra_len >= PAGE_SIZE) - goto out; - - goto again; - } - err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid, - MSG_DONTWAIT); - if (err > 0) - err = 0; -out: - if (sk) - sock_put(sk); -out_nosk: - return err; -} - -static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) -{ - int hdrlen = sizeof(struct unix_diag_req); - - if (nlmsg_len(h) < hdrlen) - return -EINVAL; - - if (h->nlmsg_flags & NLM_F_DUMP) - return netlink_dump_start(sock_diag_nlsk, skb, h, - unix_diag_dump, NULL, 0); - else - return unix_diag_get_exact(skb, h, (struct unix_diag_req *)NLMSG_DATA(h)); -} - -static struct sock_diag_handler unix_diag_handler = { - .family = AF_UNIX, - .dump = unix_diag_handler_dump, -}; - -static int __init unix_diag_init(void) -{ - return sock_diag_register(&unix_diag_handler); -} - -static void __exit unix_diag_exit(void) -{ - sock_diag_unregister(&unix_diag_handler); -} - -module_init(unix_diag_init); -module_exit(unix_diag_exit); -MODULE_LICENSE("GPL"); -MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 1 /* AF_LOCAL */); diff --git a/trunk/net/wireless/Kconfig b/trunk/net/wireless/Kconfig index 2e4444fedbe0..1f1ef70f34f2 100644 --- a/trunk/net/wireless/Kconfig +++ b/trunk/net/wireless/Kconfig @@ -121,16 +121,15 @@ config CFG80211_WEXT config WIRELESS_EXT_SYSFS bool "Wireless extensions sysfs files" + default y depends on WEXT_CORE && SYSFS help This option enables the deprecated wireless statistics files in /sys/class/net/*/wireless/. The same information is available via the ioctls as well. - Say N. If you know you have ancient tools requiring it, - like very old versions of hal (prior to 0.5.12 release), - say Y and update the tools as soon as possible as this - option will be removed soon. + Say Y if you have programs using it, like old versions of + hal. config LIB80211 tristate "Common routines for IEEE802.11 drivers" diff --git a/trunk/net/wireless/chan.c b/trunk/net/wireless/chan.c index 2fcfe0993ca2..17cd0c04d139 100644 --- a/trunk/net/wireless/chan.c +++ b/trunk/net/wireless/chan.c @@ -6,7 +6,6 @@ * Copyright 2009 Johannes Berg */ -#include #include #include "core.h" @@ -45,9 +44,9 @@ rdev_freq_to_chan(struct cfg80211_registered_device *rdev, return chan; } -int cfg80211_can_beacon_sec_chan(struct wiphy *wiphy, - struct ieee80211_channel *chan, - enum nl80211_channel_type channel_type) +static bool can_beacon_sec_chan(struct wiphy *wiphy, + struct ieee80211_channel *chan, + enum nl80211_channel_type channel_type) { struct ieee80211_channel *sec_chan; int diff; @@ -76,7 +75,6 @@ int cfg80211_can_beacon_sec_chan(struct wiphy *wiphy, return true; } -EXPORT_SYMBOL(cfg80211_can_beacon_sec_chan); int cfg80211_set_freq(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, int freq, @@ -111,8 +109,8 @@ int cfg80211_set_freq(struct cfg80211_registered_device *rdev, switch (channel_type) { case NL80211_CHAN_HT40PLUS: case NL80211_CHAN_HT40MINUS: - if (!cfg80211_can_beacon_sec_chan(&rdev->wiphy, chan, - channel_type)) { + if (!can_beacon_sec_chan(&rdev->wiphy, chan, + channel_type)) { printk(KERN_DEBUG "cfg80211: Secondary channel not " "allowed to initiate communication\n"); diff --git a/trunk/net/wireless/core.c b/trunk/net/wireless/core.c index ccdfed897651..220f3bd176f8 100644 --- a/trunk/net/wireless/core.c +++ b/trunk/net/wireless/core.c @@ -492,10 +492,6 @@ int wiphy_register(struct wiphy *wiphy) !(wiphy->wowlan.flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY))) return -EINVAL; - if (WARN_ON(wiphy->ap_sme_capa && - !(wiphy->flags & WIPHY_FLAG_HAVE_AP_SME))) - return -EINVAL; - if (WARN_ON(wiphy->addresses && !wiphy->n_addresses)) return -EINVAL; diff --git a/trunk/net/wireless/core.h b/trunk/net/wireless/core.h index 43ad9c81efcf..b9ec3061ed72 100644 --- a/trunk/net/wireless/core.h +++ b/trunk/net/wireless/core.h @@ -54,8 +54,6 @@ struct cfg80211_registered_device { int opencount; /* also protected by devlist_mtx */ wait_queue_head_t dev_wait; - u32 ap_beacons_nlpid; - /* BSSes/scanning */ spinlock_t bss_lock; struct list_head bss_list; @@ -249,11 +247,12 @@ struct cfg80211_event { u16 status; } cr; struct { + struct ieee80211_channel *channel; + u8 bssid[ETH_ALEN]; const u8 *req_ie; const u8 *resp_ie; size_t req_ie_len; size_t resp_ie_len; - struct cfg80211_bss *bss; } rm; struct { const u8 *ie; @@ -340,17 +339,13 @@ int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, const u8 *bssid, const u8 *prev_bssid, const u8 *ssid, int ssid_len, const u8 *ie, int ie_len, bool use_mfp, - struct cfg80211_crypto_settings *crypt, - u32 assoc_flags, struct ieee80211_ht_cap *ht_capa, - struct ieee80211_ht_cap *ht_capa_mask); + struct cfg80211_crypto_settings *crypt); int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, struct net_device *dev, struct ieee80211_channel *chan, const u8 *bssid, const u8 *prev_bssid, const u8 *ssid, int ssid_len, const u8 *ie, int ie_len, bool use_mfp, - struct cfg80211_crypto_settings *crypt, - u32 assoc_flags, struct ieee80211_ht_cap *ht_capa, - struct ieee80211_ht_cap *ht_capa_mask); + struct cfg80211_crypto_settings *crypt); int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, struct net_device *dev, const u8 *bssid, const u8 *ie, int ie_len, u16 reason, @@ -381,9 +376,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, enum nl80211_channel_type channel_type, bool channel_type_valid, unsigned int wait, const u8 *buf, size_t len, bool no_cck, - bool dont_wait_for_ack, u64 *cookie); -void cfg80211_oper_and_ht_capa(struct ieee80211_ht_cap *ht_capa, - const struct ieee80211_ht_cap *ht_capa_mask); + u64 *cookie); /* SME */ int __cfg80211_connect(struct cfg80211_registered_device *rdev, @@ -402,7 +395,8 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev, struct net_device *dev, u16 reason, bool wextev); void __cfg80211_roamed(struct wireless_dev *wdev, - struct cfg80211_bss *bss, + struct ieee80211_channel *channel, + const u8 *bssid, const u8 *req_ie, size_t req_ie_len, const u8 *resp_ie, size_t resp_ie_len); int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev, diff --git a/trunk/net/wireless/genregdb.awk b/trunk/net/wireless/genregdb.awk index 9392f8cbb901..53c143f5e770 100644 --- a/trunk/net/wireless/genregdb.awk +++ b/trunk/net/wireless/genregdb.awk @@ -7,17 +7,10 @@ # # Copyright 2009 John W. Linville # -# Permission to use, copy, modify, and/or distribute this software for any -# purpose with or without fee is hereby granted, provided that the above -# copyright notice and this permission notice appear in all copies. +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. # -# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. BEGIN { active = 0 diff --git a/trunk/net/wireless/mesh.c b/trunk/net/wireless/mesh.c index 8c550df13037..b7b7868f4128 100644 --- a/trunk/net/wireless/mesh.c +++ b/trunk/net/wireless/mesh.c @@ -20,7 +20,6 @@ * interface */ #define MESH_PREQ_MIN_INT 10 -#define MESH_PERR_MIN_INT 100 #define MESH_DIAM_TRAVERSAL_TIME 50 /* @@ -48,7 +47,6 @@ const struct mesh_config default_mesh_config = { .dot11MeshMaxPeerLinks = MESH_MAX_ESTAB_PLINKS, .dot11MeshHWMPactivePathTimeout = MESH_PATH_TIMEOUT, .dot11MeshHWMPpreqMinInterval = MESH_PREQ_MIN_INT, - .dot11MeshHWMPperrMinInterval = MESH_PERR_MIN_INT, .dot11MeshHWMPnetDiameterTraversalTime = MESH_DIAM_TRAVERSAL_TIME, .dot11MeshHWMPmaxPREQretries = MESH_MAX_PREQ_RETRIES, .path_refresh_time = MESH_PATH_REFRESH_TIME, diff --git a/trunk/net/wireless/mlme.c b/trunk/net/wireless/mlme.c index 438dfc105b4a..21fc9702f81c 100644 --- a/trunk/net/wireless/mlme.c +++ b/trunk/net/wireless/mlme.c @@ -501,32 +501,13 @@ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, return err; } -/* Do a logical ht_capa &= ht_capa_mask. */ -void cfg80211_oper_and_ht_capa(struct ieee80211_ht_cap *ht_capa, - const struct ieee80211_ht_cap *ht_capa_mask) -{ - int i; - u8 *p1, *p2; - if (!ht_capa_mask) { - memset(ht_capa, 0, sizeof(*ht_capa)); - return; - } - - p1 = (u8*)(ht_capa); - p2 = (u8*)(ht_capa_mask); - for (i = 0; iieee80211_ptr; struct cfg80211_assoc_request req; @@ -556,15 +537,6 @@ int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, memcpy(&req.crypto, crypt, sizeof(req.crypto)); req.use_mfp = use_mfp; req.prev_bssid = prev_bssid; - req.flags = assoc_flags; - if (ht_capa) - memcpy(&req.ht_capa, ht_capa, sizeof(req.ht_capa)); - if (ht_capa_mask) - memcpy(&req.ht_capa_mask, ht_capa_mask, - sizeof(req.ht_capa_mask)); - cfg80211_oper_and_ht_capa(&req.ht_capa_mask, - rdev->wiphy.ht_capa_mod_mask); - req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len, WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); if (!req.bss) { @@ -602,17 +574,14 @@ int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, const u8 *bssid, const u8 *prev_bssid, const u8 *ssid, int ssid_len, const u8 *ie, int ie_len, bool use_mfp, - struct cfg80211_crypto_settings *crypt, - u32 assoc_flags, struct ieee80211_ht_cap *ht_capa, - struct ieee80211_ht_cap *ht_capa_mask) + struct cfg80211_crypto_settings *crypt) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err; wdev_lock(wdev); err = __cfg80211_mlme_assoc(rdev, dev, chan, bssid, prev_bssid, - ssid, ssid_len, ie, ie_len, use_mfp, crypt, - assoc_flags, ht_capa, ht_capa_mask); + ssid, ssid_len, ie, ie_len, use_mfp, crypt); wdev_unlock(wdev); return err; @@ -910,9 +879,6 @@ void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlpid) } spin_unlock_bh(&wdev->mgmt_registrations_lock); - - if (nlpid == wdev->ap_unexpected_nlpid) - wdev->ap_unexpected_nlpid = 0; } void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev) @@ -935,7 +901,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, enum nl80211_channel_type channel_type, bool channel_type_valid, unsigned int wait, const u8 *buf, size_t len, bool no_cck, - bool dont_wait_for_ack, u64 *cookie) + u64 *cookie) { struct wireless_dev *wdev = dev->ieee80211_ptr; const struct ieee80211_mgmt *mgmt; @@ -1026,8 +992,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, /* Transmit the Action frame as requested by user space */ return rdev->ops->mgmt_tx(&rdev->wiphy, dev, chan, offchan, channel_type, channel_type_valid, - wait, buf, len, no_cck, dont_wait_for_ack, - cookie); + wait, buf, len, no_cck, cookie); } bool cfg80211_rx_mgmt(struct net_device *dev, int freq, const u8 *buf, @@ -1142,30 +1107,3 @@ void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index, nl80211_pmksa_candidate_notify(rdev, dev, index, bssid, preauth, gfp); } EXPORT_SYMBOL(cfg80211_pmksa_candidate_notify); - -bool cfg80211_rx_spurious_frame(struct net_device *dev, - const u8 *addr, gfp_t gfp) -{ - struct wireless_dev *wdev = dev->ieee80211_ptr; - - if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP && - wdev->iftype != NL80211_IFTYPE_P2P_GO)) - return false; - - return nl80211_unexpected_frame(dev, addr, gfp); -} -EXPORT_SYMBOL(cfg80211_rx_spurious_frame); - -bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev, - const u8 *addr, gfp_t gfp) -{ - struct wireless_dev *wdev = dev->ieee80211_ptr; - - if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP && - wdev->iftype != NL80211_IFTYPE_P2P_GO && - wdev->iftype != NL80211_IFTYPE_AP_VLAN)) - return false; - - return nl80211_unexpected_4addr_frame(dev, addr, gfp); -} -EXPORT_SYMBOL(cfg80211_rx_unexpected_4addr_frame); diff --git a/trunk/net/wireless/nl80211.c b/trunk/net/wireless/nl80211.c index b3d3cf8931cb..ffafda5022c2 100644 --- a/trunk/net/wireless/nl80211.c +++ b/trunk/net/wireless/nl80211.c @@ -47,21 +47,22 @@ static struct genl_family nl80211_fam = { }; /* internal helper: get rdev and dev */ -static int get_rdev_dev_by_ifindex(struct net *netns, struct nlattr **attrs, - struct cfg80211_registered_device **rdev, - struct net_device **dev) +static int get_rdev_dev_by_info_ifindex(struct genl_info *info, + struct cfg80211_registered_device **rdev, + struct net_device **dev) { + struct nlattr **attrs = info->attrs; int ifindex; if (!attrs[NL80211_ATTR_IFINDEX]) return -EINVAL; ifindex = nla_get_u32(attrs[NL80211_ATTR_IFINDEX]); - *dev = dev_get_by_index(netns, ifindex); + *dev = dev_get_by_index(genl_info_net(info), ifindex); if (!*dev) return -ENODEV; - *rdev = cfg80211_get_dev_from_ifindex(netns, ifindex); + *rdev = cfg80211_get_dev_from_ifindex(genl_info_net(info), ifindex); if (IS_ERR(*rdev)) { dev_put(*dev); return PTR_ERR(*rdev); @@ -97,7 +98,7 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = { [NL80211_ATTR_KEY_IDX] = { .type = NLA_U8 }, [NL80211_ATTR_KEY_CIPHER] = { .type = NLA_U32 }, [NL80211_ATTR_KEY_DEFAULT] = { .type = NLA_FLAG }, - [NL80211_ATTR_KEY_SEQ] = { .type = NLA_BINARY, .len = 16 }, + [NL80211_ATTR_KEY_SEQ] = { .type = NLA_BINARY, .len = 8 }, [NL80211_ATTR_KEY_TYPE] = { .type = NLA_U32 }, [NL80211_ATTR_BEACON_INTERVAL] = { .type = NLA_U32 }, @@ -195,15 +196,6 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = { [NL80211_ATTR_TDLS_OPERATION] = { .type = NLA_U8 }, [NL80211_ATTR_TDLS_SUPPORT] = { .type = NLA_FLAG }, [NL80211_ATTR_TDLS_EXTERNAL_SETUP] = { .type = NLA_FLAG }, - [NL80211_ATTR_DONT_WAIT_FOR_ACK] = { .type = NLA_FLAG }, - [NL80211_ATTR_PROBE_RESP] = { .type = NLA_BINARY, - .len = IEEE80211_MAX_DATA_LEN }, - [NL80211_ATTR_DFS_REGION] = { .type = NLA_U8 }, - [NL80211_ATTR_DISABLE_HT] = { .type = NLA_FLAG }, - [NL80211_ATTR_HT_CAPABILITY_MASK] = { - .len = NL80211_HT_CAPABILITY_LEN - }, - [NL80211_ATTR_NOACK_MAP] = { .type = NLA_U16 }, }; /* policy for the key attributes */ @@ -211,7 +203,7 @@ static const struct nla_policy nl80211_key_policy[NL80211_KEY_MAX + 1] = { [NL80211_KEY_DATA] = { .type = NLA_BINARY, .len = WLAN_MAX_KEY_LEN }, [NL80211_KEY_IDX] = { .type = NLA_U8 }, [NL80211_KEY_CIPHER] = { .type = NLA_U32 }, - [NL80211_KEY_SEQ] = { .type = NLA_BINARY, .len = 16 }, + [NL80211_KEY_SEQ] = { .type = NLA_BINARY, .len = 8 }, [NL80211_KEY_DEFAULT] = { .type = NLA_FLAG }, [NL80211_KEY_DEFAULT_MGMT] = { .type = NLA_FLAG }, [NL80211_KEY_TYPE] = { .type = NLA_U32 }, @@ -766,10 +758,6 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX, dev->wiphy.available_antennas_rx); - if (dev->wiphy.flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD) - NLA_PUT_U32(msg, NL80211_ATTR_PROBE_RESP_OFFLOAD, - dev->wiphy.probe_resp_offload); - if ((dev->wiphy.available_antennas_tx || dev->wiphy.available_antennas_rx) && dev->ops->get_antenna) { u32 tx_ant = 0, rx_ant = 0; @@ -886,8 +874,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, CMD(set_pmksa, SET_PMKSA); CMD(del_pmksa, DEL_PMKSA); CMD(flush_pmksa, FLUSH_PMKSA); - if (dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL) - CMD(remain_on_channel, REMAIN_ON_CHANNEL); + CMD(remain_on_channel, REMAIN_ON_CHANNEL); CMD(set_bitrate_mask, SET_TX_BITRATE_MASK); CMD(mgmt_tx, FRAME); CMD(mgmt_tx_cancel_wait, FRAME_WAIT_CANCEL); @@ -903,16 +890,6 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, } if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) CMD(sched_scan_start, START_SCHED_SCAN); - CMD(probe_client, PROBE_CLIENT); - CMD(set_noack_map, SET_NOACK_MAP); - if (dev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS) { - i++; - NLA_PUT_U32(msg, i, NL80211_CMD_REGISTER_BEACONS); - } - -#ifdef CONFIG_NL80211_TESTMODE - CMD(testmode_cmd, TESTMODE); -#endif #undef CMD @@ -928,12 +905,11 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, nla_nest_end(msg, nl_cmds); - if (dev->ops->remain_on_channel && - dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL) + if (dev->ops->remain_on_channel) NLA_PUT_U32(msg, NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION, dev->wiphy.max_remain_on_channel_duration); - if (dev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX) + if (dev->ops->mgmt_tx_cancel_wait) NLA_PUT_FLAG(msg, NL80211_ATTR_OFFCHANNEL_TX_OK); if (mgmt_stypes) { @@ -1031,17 +1007,6 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, if (nl80211_put_iface_combinations(&dev->wiphy, msg)) goto nla_put_failure; - if (dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME) - NLA_PUT_U32(msg, NL80211_ATTR_DEVICE_AP_SME, - dev->wiphy.ap_sme_capa); - - NLA_PUT_U32(msg, NL80211_ATTR_FEATURE_FLAGS, dev->wiphy.features); - - if (dev->wiphy.ht_capa_mod_mask) - NLA_PUT(msg, NL80211_ATTR_HT_CAPABILITY_MASK, - sizeof(*dev->wiphy.ht_capa_mod_mask), - dev->wiphy.ht_capa_mod_mask); - return genlmsg_end(msg, hdr); nla_put_failure: @@ -1760,23 +1725,6 @@ static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info) return rdev->ops->del_virtual_intf(&rdev->wiphy, dev); } -static int nl80211_set_noack_map(struct sk_buff *skb, struct genl_info *info) -{ - struct cfg80211_registered_device *rdev = info->user_ptr[0]; - struct net_device *dev = info->user_ptr[1]; - u16 noack_map; - - if (!info->attrs[NL80211_ATTR_NOACK_MAP]) - return -EINVAL; - - if (!rdev->ops->set_noack_map) - return -EOPNOTSUPP; - - noack_map = nla_get_u16(info->attrs[NL80211_ATTR_NOACK_MAP]); - - return rdev->ops->set_noack_map(&rdev->wiphy, dev, noack_map); -} - struct get_key_cookie { struct sk_buff *msg; int error; @@ -2207,13 +2155,6 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info) nla_len(info->attrs[NL80211_ATTR_IE_ASSOC_RESP]); } - if (info->attrs[NL80211_ATTR_PROBE_RESP]) { - params.probe_resp = - nla_data(info->attrs[NL80211_ATTR_PROBE_RESP]); - params.probe_resp_len = - nla_len(info->attrs[NL80211_ATTR_PROBE_RESP]); - } - err = call(&rdev->wiphy, dev, ¶ms); if (!err && params.interval) wdev->beacon_interval = params.interval; @@ -2246,7 +2187,6 @@ static const struct nla_policy sta_flags_policy[NL80211_STA_FLAG_MAX + 1] = { [NL80211_STA_FLAG_WME] = { .type = NLA_FLAG }, [NL80211_STA_FLAG_MFP] = { .type = NLA_FLAG }, [NL80211_STA_FLAG_AUTHENTICATED] = { .type = NLA_FLAG }, - [NL80211_STA_FLAG_TDLS_PEER] = { .type = NLA_FLAG }, }; static int parse_station_flags(struct genl_info *info, @@ -2390,9 +2330,6 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq, if (sinfo->filled & STATION_INFO_TX_FAILED) NLA_PUT_U32(msg, NL80211_STA_INFO_TX_FAILED, sinfo->tx_failed); - if (sinfo->filled & STATION_INFO_BEACON_LOSS_COUNT) - NLA_PUT_U32(msg, NL80211_STA_INFO_BEACON_LOSS, - sinfo->beacon_loss_count); if (sinfo->filled & STATION_INFO_BSS_PARAM) { bss_param = nla_nest_start(msg, NL80211_STA_INFO_BSS_PARAM); if (!bss_param) @@ -2516,34 +2453,26 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info) /* * Get vlan interface making sure it is running and on the right wiphy. */ -static struct net_device *get_vlan(struct genl_info *info, - struct cfg80211_registered_device *rdev) +static int get_vlan(struct genl_info *info, + struct cfg80211_registered_device *rdev, + struct net_device **vlan) { struct nlattr *vlanattr = info->attrs[NL80211_ATTR_STA_VLAN]; - struct net_device *v; - int ret; - - if (!vlanattr) - return NULL; - - v = dev_get_by_index(genl_info_net(info), nla_get_u32(vlanattr)); - if (!v) - return ERR_PTR(-ENODEV); - - if (!v->ieee80211_ptr || v->ieee80211_ptr->wiphy != &rdev->wiphy) { - ret = -EINVAL; - goto error; - } - - if (!netif_running(v)) { - ret = -ENETDOWN; - goto error; + *vlan = NULL; + + if (vlanattr) { + *vlan = dev_get_by_index(genl_info_net(info), + nla_get_u32(vlanattr)); + if (!*vlan) + return -ENODEV; + if (!(*vlan)->ieee80211_ptr) + return -EINVAL; + if ((*vlan)->ieee80211_ptr->wiphy != &rdev->wiphy) + return -EINVAL; + if (!netif_running(*vlan)) + return -ENETDOWN; } - - return v; - error: - dev_put(v); - return ERR_PTR(ret); + return 0; } static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info) @@ -2582,9 +2511,6 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info) params.ht_capa = nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]); - if (!rdev->ops->change_station) - return -EOPNOTSUPP; - if (parse_station_flags(info, ¶ms)) return -EINVAL; @@ -2596,84 +2522,73 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info) params.plink_state = nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_STATE]); + err = get_vlan(info, rdev, ¶ms.vlan); + if (err) + goto out; + + /* validate settings */ + err = 0; + switch (dev->ieee80211_ptr->iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_P2P_GO: /* disallow mesh-specific things */ if (params.plink_action) - return -EINVAL; - - /* TDLS can't be set, ... */ - if (params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) - return -EINVAL; - /* - * ... but don't bother the driver with it. This works around - * a hostapd/wpa_supplicant issue -- it always includes the - * TLDS_PEER flag in the mask even for AP mode. - */ - params.sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER); - - /* accept only the listed bits */ - if (params.sta_flags_mask & - ~(BIT(NL80211_STA_FLAG_AUTHORIZED) | - BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) | - BIT(NL80211_STA_FLAG_WME) | - BIT(NL80211_STA_FLAG_MFP))) - return -EINVAL; - - /* must be last in here for error handling */ - params.vlan = get_vlan(info, rdev); - if (IS_ERR(params.vlan)) - return PTR_ERR(params.vlan); + err = -EINVAL; break; case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_STATION: /* disallow things sta doesn't support */ if (params.plink_action) - return -EINVAL; + err = -EINVAL; + if (params.vlan) + err = -EINVAL; + if (params.supported_rates && + !(params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))) + err = -EINVAL; if (params.ht_capa) - return -EINVAL; + err = -EINVAL; if (params.listen_interval >= 0) - return -EINVAL; - /* - * Don't allow userspace to change the TDLS_PEER flag, - * but silently ignore attempts to change it since we - * don't have state here to verify that it doesn't try - * to change the flag. - */ - params.sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER); - - /* reject any changes other than AUTHORIZED */ - if (params.sta_flags_mask & ~BIT(NL80211_STA_FLAG_AUTHORIZED)) - return -EINVAL; + err = -EINVAL; + if (params.sta_flags_mask & + ~(BIT(NL80211_STA_FLAG_AUTHORIZED) | + BIT(NL80211_STA_FLAG_TDLS_PEER))) + err = -EINVAL; + /* can't change the TDLS bit */ + if (!(params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) && + (params.sta_flags_mask & BIT(NL80211_STA_FLAG_TDLS_PEER))) + err = -EINVAL; break; case NL80211_IFTYPE_MESH_POINT: /* disallow things mesh doesn't support */ if (params.vlan) - return -EINVAL; + err = -EINVAL; if (params.ht_capa) - return -EINVAL; + err = -EINVAL; if (params.listen_interval >= 0) - return -EINVAL; - /* - * No special handling for TDLS here -- the userspace - * mesh code doesn't have this bug. - */ + err = -EINVAL; if (params.sta_flags_mask & ~(BIT(NL80211_STA_FLAG_AUTHENTICATED) | BIT(NL80211_STA_FLAG_MFP) | BIT(NL80211_STA_FLAG_AUTHORIZED))) - return -EINVAL; + err = -EINVAL; break; default: - return -EOPNOTSUPP; + err = -EINVAL; } - /* be aware of params.vlan when changing code here */ + if (err) + goto out; + + if (!rdev->ops->change_station) { + err = -EOPNOTSUPP; + goto out; + } err = rdev->ops->change_station(&rdev->wiphy, dev, mac_addr, ¶ms); + out: if (params.vlan) dev_put(params.vlan); @@ -2728,81 +2643,70 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) params.plink_action = nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]); - if (!rdev->ops->add_station) - return -EOPNOTSUPP; - if (parse_station_flags(info, ¶ms)) return -EINVAL; - switch (dev->ieee80211_ptr->iftype) { - case NL80211_IFTYPE_AP: - case NL80211_IFTYPE_AP_VLAN: - case NL80211_IFTYPE_P2P_GO: - /* parse WME attributes if sta is WME capable */ - if ((rdev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) && - (params.sta_flags_set & BIT(NL80211_STA_FLAG_WME)) && - info->attrs[NL80211_ATTR_STA_WME]) { - struct nlattr *tb[NL80211_STA_WME_MAX + 1]; - struct nlattr *nla; - - nla = info->attrs[NL80211_ATTR_STA_WME]; - err = nla_parse_nested(tb, NL80211_STA_WME_MAX, nla, - nl80211_sta_wme_policy); - if (err) - return err; + /* parse WME attributes if sta is WME capable */ + if ((rdev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) && + (params.sta_flags_set & BIT(NL80211_STA_FLAG_WME)) && + info->attrs[NL80211_ATTR_STA_WME]) { + struct nlattr *tb[NL80211_STA_WME_MAX + 1]; + struct nlattr *nla; - if (tb[NL80211_STA_WME_UAPSD_QUEUES]) - params.uapsd_queues = - nla_get_u8(tb[NL80211_STA_WME_UAPSD_QUEUES]); - if (params.uapsd_queues & - ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK) - return -EINVAL; + nla = info->attrs[NL80211_ATTR_STA_WME]; + err = nla_parse_nested(tb, NL80211_STA_WME_MAX, nla, + nl80211_sta_wme_policy); + if (err) + return err; - if (tb[NL80211_STA_WME_MAX_SP]) - params.max_sp = - nla_get_u8(tb[NL80211_STA_WME_MAX_SP]); + if (tb[NL80211_STA_WME_UAPSD_QUEUES]) + params.uapsd_queues = + nla_get_u8(tb[NL80211_STA_WME_UAPSD_QUEUES]); + if (params.uapsd_queues & ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK) + return -EINVAL; - if (params.max_sp & - ~IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK) - return -EINVAL; + if (tb[NL80211_STA_WME_MAX_SP]) + params.max_sp = + nla_get_u8(tb[NL80211_STA_WME_MAX_SP]); - params.sta_modify_mask |= STATION_PARAM_APPLY_UAPSD; - } - /* TDLS peers cannot be added */ - if (params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) + if (params.max_sp & ~IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK) return -EINVAL; - /* but don't bother the driver with it */ - params.sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER); - /* must be last in here for error handling */ - params.vlan = get_vlan(info, rdev); - if (IS_ERR(params.vlan)) - return PTR_ERR(params.vlan); - break; - case NL80211_IFTYPE_MESH_POINT: - /* TDLS peers cannot be added */ - if (params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) - return -EINVAL; - break; - case NL80211_IFTYPE_STATION: - /* Only TDLS peers can be added */ - if (!(params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))) - return -EINVAL; - /* Can only add if TDLS ... */ - if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS)) - return -EOPNOTSUPP; - /* ... with external setup is supported */ - if (!(rdev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP)) - return -EOPNOTSUPP; - break; - default: - return -EOPNOTSUPP; + params.sta_modify_mask |= STATION_PARAM_APPLY_UAPSD; } - /* be aware of params.vlan when changing code here */ + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && + dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN && + dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT && + dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO && + dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) + return -EINVAL; + + /* + * Only managed stations can add TDLS peers, and only when the + * wiphy supports external TDLS setup. + */ + if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION && + !((params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) && + (rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) && + (rdev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP))) + return -EINVAL; + + err = get_vlan(info, rdev, ¶ms.vlan); + if (err) + goto out; + + /* validate settings */ + err = 0; + + if (!rdev->ops->add_station) { + err = -EOPNOTSUPP; + goto out; + } err = rdev->ops->add_station(&rdev->wiphy, dev, mac_addr, ¶ms); + out: if (params.vlan) dev_put(params.vlan); return err; @@ -3223,8 +3127,6 @@ static int nl80211_get_mesh_config(struct sk_buff *skb, cur_params.dot11MeshHWMPactivePathTimeout); NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, cur_params.dot11MeshHWMPpreqMinInterval); - NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL, - cur_params.dot11MeshHWMPperrMinInterval); NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, cur_params.dot11MeshHWMPnetDiameterTraversalTime); NLA_PUT_U8(msg, NL80211_MESHCONF_HWMP_ROOTMODE, @@ -3259,7 +3161,6 @@ static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_A [NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT] = { .type = NLA_U16 }, [NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT] = { .type = NLA_U32 }, [NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL] = { .type = NLA_U16 }, - [NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL] = { .type = NLA_U16 }, [NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME] = { .type = NLA_U16 }, [NL80211_MESHCONF_HWMP_ROOTMODE] = { .type = NLA_U8 }, [NL80211_MESHCONF_HWMP_RANN_INTERVAL] = { .type = NLA_U16 }, @@ -3334,9 +3235,6 @@ do {\ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPpreqMinInterval, mask, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, nla_get_u16); - FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPperrMinInterval, - mask, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL, - nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPnetDiameterTraversalTime, mask, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, @@ -3459,9 +3357,6 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info) NLA_PUT_STRING(msg, NL80211_ATTR_REG_ALPHA2, cfg80211_regdomain->alpha2); - if (cfg80211_regdomain->dfs_region) - NLA_PUT_U8(msg, NL80211_ATTR_DFS_REGION, - cfg80211_regdomain->dfs_region); nl_reg_rules = nla_nest_start(msg, NL80211_ATTR_REG_RULES); if (!nl_reg_rules) @@ -3520,7 +3415,6 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info) char *alpha2 = NULL; int rem_reg_rules = 0, r = 0; u32 num_rules = 0, rule_idx = 0, size_of_regd; - u8 dfs_region = 0; struct ieee80211_regdomain *rd = NULL; if (!info->attrs[NL80211_ATTR_REG_ALPHA2]) @@ -3531,9 +3425,6 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info) alpha2 = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]); - if (info->attrs[NL80211_ATTR_DFS_REGION]) - dfs_region = nla_get_u8(info->attrs[NL80211_ATTR_DFS_REGION]); - nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES], rem_reg_rules) { num_rules++; @@ -3561,13 +3452,6 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info) rd->alpha2[0] = alpha2[0]; rd->alpha2[1] = alpha2[1]; - /* - * Disable DFS master mode if the DFS region was - * not supported or known on this kernel. - */ - if (reg_supported_dfs_region(dfs_region)) - rd->dfs_region = dfs_region; - nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES], rem_reg_rules) { nla_parse(tb, NL80211_REG_RULE_ATTR_MAX, @@ -4475,9 +4359,6 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info) const u8 *bssid, *ssid, *ie = NULL, *prev_bssid = NULL; int err, ssid_len, ie_len = 0; bool use_mfp = false; - u32 flags = 0; - struct ieee80211_ht_cap *ht_capa = NULL; - struct ieee80211_ht_cap *ht_capa_mask = NULL; if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) return -EINVAL; @@ -4521,25 +4402,11 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info) if (info->attrs[NL80211_ATTR_PREV_BSSID]) prev_bssid = nla_data(info->attrs[NL80211_ATTR_PREV_BSSID]); - if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_HT])) - flags |= ASSOC_REQ_DISABLE_HT; - - if (info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]) - ht_capa_mask = - nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]); - - if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) { - if (!ht_capa_mask) - return -EINVAL; - ht_capa = nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]); - } - err = nl80211_crypto_settings(rdev, info, &crypto, 1); if (!err) err = cfg80211_mlme_assoc(rdev, dev, chan, bssid, prev_bssid, ssid, ssid_len, ie, ie_len, use_mfp, - &crypto, flags, ht_capa, - ht_capa_mask); + &crypto); return err; } @@ -4710,41 +4577,13 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info) ibss.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); } - if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) { - enum nl80211_channel_type channel_type; - - channel_type = nla_get_u32( - info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]); - if (channel_type != NL80211_CHAN_NO_HT && - channel_type != NL80211_CHAN_HT20 && - channel_type != NL80211_CHAN_HT40MINUS && - channel_type != NL80211_CHAN_HT40PLUS) - return -EINVAL; - - if (channel_type != NL80211_CHAN_NO_HT && - !(wiphy->features & NL80211_FEATURE_HT_IBSS)) - return -EINVAL; - - ibss.channel_type = channel_type; - } else { - ibss.channel_type = NL80211_CHAN_NO_HT; - } - - ibss.channel = rdev_freq_to_chan(rdev, - nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]), - ibss.channel_type); + ibss.channel = ieee80211_get_channel(wiphy, + nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ])); if (!ibss.channel || ibss.channel->flags & IEEE80211_CHAN_NO_IBSS || ibss.channel->flags & IEEE80211_CHAN_DISABLED) return -EINVAL; - /* Both channels should be able to initiate communication */ - if ((ibss.channel_type == NL80211_CHAN_HT40PLUS || - ibss.channel_type == NL80211_CHAN_HT40MINUS) && - !cfg80211_can_beacon_sec_chan(&rdev->wiphy, ibss.channel, - ibss.channel_type)) - return -EINVAL; - ibss.channel_fixed = !!info->attrs[NL80211_ATTR_FREQ_FIXED]; ibss.privacy = !!info->attrs[NL80211_ATTR_PRIVACY]; @@ -4823,7 +4662,7 @@ static int nl80211_testmode_do(struct sk_buff *skb, struct genl_info *info) static int nl80211_testmode_dump(struct sk_buff *skb, struct netlink_callback *cb) { - struct cfg80211_registered_device *rdev; + struct cfg80211_registered_device *dev; int err; long phy_idx; void *data = NULL; @@ -4841,21 +4680,9 @@ static int nl80211_testmode_dump(struct sk_buff *skb, nl80211_policy); if (err) return err; - if (nl80211_fam.attrbuf[NL80211_ATTR_WIPHY]) { - phy_idx = nla_get_u32( - nl80211_fam.attrbuf[NL80211_ATTR_WIPHY]); - } else { - struct net_device *netdev; - - err = get_rdev_dev_by_ifindex(sock_net(skb->sk), - nl80211_fam.attrbuf, - &rdev, &netdev); - if (err) - return err; - dev_put(netdev); - phy_idx = rdev->wiphy_idx; - cfg80211_unlock_rdev(rdev); - } + if (!nl80211_fam.attrbuf[NL80211_ATTR_WIPHY]) + return -EINVAL; + phy_idx = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_WIPHY]); if (nl80211_fam.attrbuf[NL80211_ATTR_TESTDATA]) cb->args[1] = (long)nl80211_fam.attrbuf[NL80211_ATTR_TESTDATA]; @@ -4867,15 +4694,15 @@ static int nl80211_testmode_dump(struct sk_buff *skb, } mutex_lock(&cfg80211_mutex); - rdev = cfg80211_rdev_by_wiphy_idx(phy_idx); - if (!rdev) { + dev = cfg80211_rdev_by_wiphy_idx(phy_idx); + if (!dev) { mutex_unlock(&cfg80211_mutex); return -ENOENT; } - cfg80211_lock_rdev(rdev); + cfg80211_lock_rdev(dev); mutex_unlock(&cfg80211_mutex); - if (!rdev->ops->testmode_dump) { + if (!dev->ops->testmode_dump) { err = -EOPNOTSUPP; goto out_err; } @@ -4886,7 +4713,7 @@ static int nl80211_testmode_dump(struct sk_buff *skb, NL80211_CMD_TESTMODE); struct nlattr *tmdata; - if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx) < 0) { + if (nla_put_u32(skb, NL80211_ATTR_WIPHY, dev->wiphy_idx) < 0) { genlmsg_cancel(skb, hdr); break; } @@ -4896,8 +4723,8 @@ static int nl80211_testmode_dump(struct sk_buff *skb, genlmsg_cancel(skb, hdr); break; } - err = rdev->ops->testmode_dump(&rdev->wiphy, skb, cb, - data, data_len); + err = dev->ops->testmode_dump(&dev->wiphy, skb, cb, + data, data_len); nla_nest_end(skb, tmdata); if (err == -ENOBUFS || err == -ENOENT) { @@ -4915,7 +4742,7 @@ static int nl80211_testmode_dump(struct sk_buff *skb, /* see above */ cb->args[0] = phy_idx + 1; out_err: - cfg80211_unlock_rdev(rdev); + cfg80211_unlock_rdev(dev); return err; } @@ -5069,22 +4896,6 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) return PTR_ERR(connkeys); } - if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_HT])) - connect.flags |= ASSOC_REQ_DISABLE_HT; - - if (info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]) - memcpy(&connect.ht_capa_mask, - nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]), - sizeof(connect.ht_capa_mask)); - - if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) { - if (!info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]) - return -EINVAL; - memcpy(&connect.ht_capa, - nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]), - sizeof(connect.ht_capa)); - } - err = cfg80211_connect(rdev, dev, &connect, connkeys); if (err) kfree(connkeys); @@ -5272,8 +5083,7 @@ static int nl80211_remain_on_channel(struct sk_buff *skb, duration > rdev->wiphy.max_remain_on_channel_duration) return -EINVAL; - if (!rdev->ops->remain_on_channel || - !(rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL)) + if (!rdev->ops->remain_on_channel) return -EOPNOTSUPP; if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) { @@ -5461,13 +5271,12 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info) bool channel_type_valid = false; u32 freq; int err; - void *hdr = NULL; + void *hdr; u64 cookie; - struct sk_buff *msg = NULL; + struct sk_buff *msg; unsigned int wait = 0; - bool offchan, no_cck, dont_wait_for_ack; - - dont_wait_for_ack = info->attrs[NL80211_ATTR_DONT_WAIT_FOR_ACK]; + bool offchan; + bool no_cck; if (!info->attrs[NL80211_ATTR_FRAME] || !info->attrs[NL80211_ATTR_WIPHY_FREQ]) @@ -5486,7 +5295,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info) return -EOPNOTSUPP; if (info->attrs[NL80211_ATTR_DURATION]) { - if (!(rdev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX)) + if (!rdev->ops->mgmt_tx_cancel_wait) return -EINVAL; wait = nla_get_u32(info->attrs[NL80211_ATTR_DURATION]); } @@ -5504,9 +5313,6 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info) offchan = info->attrs[NL80211_ATTR_OFFCHANNEL_TX_OK]; - if (offchan && !(rdev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX)) - return -EINVAL; - no_cck = nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]); freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); @@ -5514,36 +5320,29 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info) if (chan == NULL) return -EINVAL; - if (!dont_wait_for_ack) { - msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); - if (!msg) - return -ENOMEM; + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; - hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0, - NL80211_CMD_FRAME); + hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0, + NL80211_CMD_FRAME); - if (IS_ERR(hdr)) { - err = PTR_ERR(hdr); - goto free_msg; - } + if (IS_ERR(hdr)) { + err = PTR_ERR(hdr); + goto free_msg; } - err = cfg80211_mlme_mgmt_tx(rdev, dev, chan, offchan, channel_type, channel_type_valid, wait, nla_data(info->attrs[NL80211_ATTR_FRAME]), nla_len(info->attrs[NL80211_ATTR_FRAME]), - no_cck, dont_wait_for_ack, &cookie); + no_cck, &cookie); if (err) goto free_msg; - if (msg) { - NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie); - - genlmsg_end(msg, hdr); - return genlmsg_reply(msg, info); - } + NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie); - return 0; + genlmsg_end(msg, hdr); + return genlmsg_reply(msg, info); nla_put_failure: err = -ENOBUFS; @@ -5741,11 +5540,6 @@ static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info) setup.mesh_id = nla_data(info->attrs[NL80211_ATTR_MESH_ID]); setup.mesh_id_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]); - if (info->attrs[NL80211_ATTR_MCAST_RATE] && - !nl80211_parse_mcast_rate(rdev, setup.mcast_rate, - nla_get_u32(info->attrs[NL80211_ATTR_MCAST_RATE]))) - return -EINVAL; - if (info->attrs[NL80211_ATTR_MESH_SETUP]) { /* parse additional setup parameters if given */ err = nl80211_parse_mesh_setup(info, &setup); @@ -6038,91 +5832,6 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info) return err; } -static int nl80211_register_unexpected_frame(struct sk_buff *skb, - struct genl_info *info) -{ - struct net_device *dev = info->user_ptr[1]; - struct wireless_dev *wdev = dev->ieee80211_ptr; - - if (wdev->iftype != NL80211_IFTYPE_AP && - wdev->iftype != NL80211_IFTYPE_P2P_GO) - return -EINVAL; - - if (wdev->ap_unexpected_nlpid) - return -EBUSY; - - wdev->ap_unexpected_nlpid = info->snd_pid; - return 0; -} - -static int nl80211_probe_client(struct sk_buff *skb, - struct genl_info *info) -{ - struct cfg80211_registered_device *rdev = info->user_ptr[0]; - struct net_device *dev = info->user_ptr[1]; - struct wireless_dev *wdev = dev->ieee80211_ptr; - struct sk_buff *msg; - void *hdr; - const u8 *addr; - u64 cookie; - int err; - - if (wdev->iftype != NL80211_IFTYPE_AP && - wdev->iftype != NL80211_IFTYPE_P2P_GO) - return -EOPNOTSUPP; - - if (!info->attrs[NL80211_ATTR_MAC]) - return -EINVAL; - - if (!rdev->ops->probe_client) - return -EOPNOTSUPP; - - msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); - if (!msg) - return -ENOMEM; - - hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0, - NL80211_CMD_PROBE_CLIENT); - - if (IS_ERR(hdr)) { - err = PTR_ERR(hdr); - goto free_msg; - } - - addr = nla_data(info->attrs[NL80211_ATTR_MAC]); - - err = rdev->ops->probe_client(&rdev->wiphy, dev, addr, &cookie); - if (err) - goto free_msg; - - NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie); - - genlmsg_end(msg, hdr); - - return genlmsg_reply(msg, info); - - nla_put_failure: - err = -ENOBUFS; - free_msg: - nlmsg_free(msg); - return err; -} - -static int nl80211_register_beacons(struct sk_buff *skb, struct genl_info *info) -{ - struct cfg80211_registered_device *rdev = info->user_ptr[0]; - - if (!(rdev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS)) - return -EOPNOTSUPP; - - if (rdev->ap_beacons_nlpid) - return -EBUSY; - - rdev->ap_beacons_nlpid = info->snd_pid; - - return 0; -} - #define NL80211_FLAG_NEED_WIPHY 0x01 #define NL80211_FLAG_NEED_NETDEV 0x02 #define NL80211_FLAG_NEED_RTNL 0x04 @@ -6150,8 +5859,7 @@ static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb, } info->user_ptr[0] = rdev; } else if (ops->internal_flags & NL80211_FLAG_NEED_NETDEV) { - err = get_rdev_dev_by_ifindex(genl_info_net(info), info->attrs, - &rdev, &dev); + err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); if (err) { if (rtnl) rtnl_unlock(); @@ -6679,39 +6387,6 @@ static struct genl_ops nl80211_ops[] = { .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, - { - .cmd = NL80211_CMD_UNEXPECTED_FRAME, - .doit = nl80211_register_unexpected_frame, - .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, - .internal_flags = NL80211_FLAG_NEED_NETDEV | - NL80211_FLAG_NEED_RTNL, - }, - { - .cmd = NL80211_CMD_PROBE_CLIENT, - .doit = nl80211_probe_client, - .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, - .internal_flags = NL80211_FLAG_NEED_NETDEV | - NL80211_FLAG_NEED_RTNL, - }, - { - .cmd = NL80211_CMD_REGISTER_BEACONS, - .doit = nl80211_register_beacons, - .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, - .internal_flags = NL80211_FLAG_NEED_WIPHY | - NL80211_FLAG_NEED_RTNL, - }, - { - .cmd = NL80211_CMD_SET_NOACK_MAP, - .doit = nl80211_set_noack_map, - .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, - .internal_flags = NL80211_FLAG_NEED_NETDEV | - NL80211_FLAG_NEED_RTNL, - }, - }; static struct genl_multicast_group nl80211_mlme_mcgrp = { @@ -6964,7 +6639,10 @@ void nl80211_send_reg_change_event(struct regulatory_request *request) if (wiphy_idx_valid(request->wiphy_idx)) NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, request->wiphy_idx); - genlmsg_end(msg, hdr); + if (genlmsg_end(msg, hdr) < 0) { + nlmsg_free(msg); + return; + } rcu_read_lock(); genlmsg_multicast_allns(msg, 0, nl80211_regulatory_mcgrp.id, @@ -7000,7 +6678,10 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev, NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf); - genlmsg_end(msg, hdr); + if (genlmsg_end(msg, hdr) < 0) { + nlmsg_free(msg); + return; + } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); @@ -7081,7 +6762,10 @@ static void nl80211_send_mlme_timeout(struct cfg80211_registered_device *rdev, NLA_PUT_FLAG(msg, NL80211_ATTR_TIMED_OUT); NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr); - genlmsg_end(msg, hdr); + if (genlmsg_end(msg, hdr) < 0) { + nlmsg_free(msg); + return; + } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); @@ -7137,7 +6821,10 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev, if (resp_ie) NLA_PUT(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie); - genlmsg_end(msg, hdr); + if (genlmsg_end(msg, hdr) < 0) { + nlmsg_free(msg); + return; + } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); @@ -7175,7 +6862,10 @@ void nl80211_send_roamed(struct cfg80211_registered_device *rdev, if (resp_ie) NLA_PUT(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie); - genlmsg_end(msg, hdr); + if (genlmsg_end(msg, hdr) < 0) { + nlmsg_free(msg); + return; + } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); @@ -7213,7 +6903,10 @@ void nl80211_send_disconnected(struct cfg80211_registered_device *rdev, if (ie) NLA_PUT(msg, NL80211_ATTR_IE, ie_len, ie); - genlmsg_end(msg, hdr); + if (genlmsg_end(msg, hdr) < 0) { + nlmsg_free(msg); + return; + } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, GFP_KERNEL); @@ -7246,7 +6939,10 @@ void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev, NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid); - genlmsg_end(msg, hdr); + if (genlmsg_end(msg, hdr) < 0) { + nlmsg_free(msg); + return; + } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); @@ -7281,7 +6977,10 @@ void nl80211_send_new_peer_candidate(struct cfg80211_registered_device *rdev, if (ie_len && ie) NLA_PUT(msg, NL80211_ATTR_IE, ie_len , ie); - genlmsg_end(msg, hdr); + if (genlmsg_end(msg, hdr) < 0) { + nlmsg_free(msg); + return; + } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); @@ -7320,7 +7019,10 @@ void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev, if (tsc) NLA_PUT(msg, NL80211_ATTR_KEY_SEQ, 6, tsc); - genlmsg_end(msg, hdr); + if (genlmsg_end(msg, hdr) < 0) { + nlmsg_free(msg); + return; + } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); @@ -7371,7 +7073,10 @@ void nl80211_send_beacon_hint_event(struct wiphy *wiphy, goto nla_put_failure; nla_nest_end(msg, nl_freq); - genlmsg_end(msg, hdr); + if (genlmsg_end(msg, hdr) < 0) { + nlmsg_free(msg); + return; + } rcu_read_lock(); genlmsg_multicast_allns(msg, 0, nl80211_regulatory_mcgrp.id, @@ -7414,7 +7119,10 @@ static void nl80211_send_remain_on_chan_event( if (cmd == NL80211_CMD_REMAIN_ON_CHANNEL) NLA_PUT_U32(msg, NL80211_ATTR_DURATION, duration); - genlmsg_end(msg, hdr); + if (genlmsg_end(msg, hdr) < 0) { + nlmsg_free(msg); + return; + } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); @@ -7485,7 +7193,10 @@ void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev, NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr); - genlmsg_end(msg, hdr); + if (genlmsg_end(msg, hdr) < 0) { + nlmsg_free(msg); + return; + } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); @@ -7496,68 +7207,13 @@ void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev, nlmsg_free(msg); } -static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd, - const u8 *addr, gfp_t gfp) -{ - struct wireless_dev *wdev = dev->ieee80211_ptr; - struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); - struct sk_buff *msg; - void *hdr; - int err; - u32 nlpid = ACCESS_ONCE(wdev->ap_unexpected_nlpid); - - if (!nlpid) - return false; - - msg = nlmsg_new(100, gfp); - if (!msg) - return true; - - hdr = nl80211hdr_put(msg, 0, 0, 0, cmd); - if (!hdr) { - nlmsg_free(msg); - return true; - } - - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); - NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr); - - err = genlmsg_end(msg, hdr); - if (err < 0) { - nlmsg_free(msg); - return true; - } - - genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid); - return true; - - nla_put_failure: - genlmsg_cancel(msg, hdr); - nlmsg_free(msg); - return true; -} - -bool nl80211_unexpected_frame(struct net_device *dev, const u8 *addr, gfp_t gfp) -{ - return __nl80211_unexpected_frame(dev, NL80211_CMD_UNEXPECTED_FRAME, - addr, gfp); -} - -bool nl80211_unexpected_4addr_frame(struct net_device *dev, - const u8 *addr, gfp_t gfp) -{ - return __nl80211_unexpected_frame(dev, - NL80211_CMD_UNEXPECTED_4ADDR_FRAME, - addr, gfp); -} - int nl80211_send_mgmt(struct cfg80211_registered_device *rdev, struct net_device *netdev, u32 nlpid, int freq, const u8 *buf, size_t len, gfp_t gfp) { struct sk_buff *msg; void *hdr; + int err; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) @@ -7574,9 +7230,16 @@ int nl80211_send_mgmt(struct cfg80211_registered_device *rdev, NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq); NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf); - genlmsg_end(msg, hdr); + err = genlmsg_end(msg, hdr); + if (err < 0) { + nlmsg_free(msg); + return err; + } - return genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid); + err = genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid); + if (err < 0) + return err; + return 0; nla_put_failure: genlmsg_cancel(msg, hdr); @@ -7609,7 +7272,10 @@ void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev, if (ack) NLA_PUT_FLAG(msg, NL80211_ATTR_ACK); - genlmsg_end(msg, hdr); + if (genlmsg_end(msg, hdr) < 0) { + nlmsg_free(msg); + return; + } genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, gfp); return; @@ -7651,7 +7317,10 @@ nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev, nla_nest_end(msg, pinfoattr); - genlmsg_end(msg, hdr); + if (genlmsg_end(msg, hdr) < 0) { + nlmsg_free(msg); + return; + } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); @@ -7693,7 +7362,10 @@ void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev, nla_nest_end(msg, rekey_attr); - genlmsg_end(msg, hdr); + if (genlmsg_end(msg, hdr) < 0) { + nlmsg_free(msg); + return; + } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); @@ -7736,7 +7408,10 @@ void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev, nla_nest_end(msg, attr); - genlmsg_end(msg, hdr); + if (genlmsg_end(msg, hdr) < 0) { + nlmsg_free(msg); + return; + } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); @@ -7778,45 +7453,7 @@ nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev, nla_nest_end(msg, pinfoattr); - genlmsg_end(msg, hdr); - - genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, - nl80211_mlme_mcgrp.id, gfp); - return; - - nla_put_failure: - genlmsg_cancel(msg, hdr); - nlmsg_free(msg); -} - -void cfg80211_probe_status(struct net_device *dev, const u8 *addr, - u64 cookie, bool acked, gfp_t gfp) -{ - struct wireless_dev *wdev = dev->ieee80211_ptr; - struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); - struct sk_buff *msg; - void *hdr; - int err; - - msg = nlmsg_new(NLMSG_GOODSIZE, gfp); - if (!msg) - return; - - hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_PROBE_CLIENT); - if (!hdr) { - nlmsg_free(msg); - return; - } - - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); - NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr); - NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie); - if (acked) - NLA_PUT_FLAG(msg, NL80211_ATTR_ACK); - - err = genlmsg_end(msg, hdr); - if (err < 0) { + if (genlmsg_end(msg, hdr) < 0) { nlmsg_free(msg); return; } @@ -7829,45 +7466,6 @@ void cfg80211_probe_status(struct net_device *dev, const u8 *addr, genlmsg_cancel(msg, hdr); nlmsg_free(msg); } -EXPORT_SYMBOL(cfg80211_probe_status); - -void cfg80211_report_obss_beacon(struct wiphy *wiphy, - const u8 *frame, size_t len, - int freq, gfp_t gfp) -{ - struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); - struct sk_buff *msg; - void *hdr; - u32 nlpid = ACCESS_ONCE(rdev->ap_beacons_nlpid); - - if (!nlpid) - return; - - msg = nlmsg_new(len + 100, gfp); - if (!msg) - return; - - hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FRAME); - if (!hdr) { - nlmsg_free(msg); - return; - } - - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - if (freq) - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq); - NLA_PUT(msg, NL80211_ATTR_FRAME, len, frame); - - genlmsg_end(msg, hdr); - - genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid); - return; - - nla_put_failure: - genlmsg_cancel(msg, hdr); - nlmsg_free(msg); -} -EXPORT_SYMBOL(cfg80211_report_obss_beacon); static int nl80211_netlink_notify(struct notifier_block * nb, unsigned long state, @@ -7882,12 +7480,9 @@ static int nl80211_netlink_notify(struct notifier_block * nb, rcu_read_lock(); - list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) { + list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) list_for_each_entry_rcu(wdev, &rdev->netdev_list, list) cfg80211_mlme_unregister_socket(wdev, notify->pid); - if (rdev->ap_beacons_nlpid == notify->pid) - rdev->ap_beacons_nlpid = 0; - } rcu_read_unlock(); diff --git a/trunk/net/wireless/nl80211.h b/trunk/net/wireless/nl80211.h index 12bf4d185abe..f24a1fbeaf19 100644 --- a/trunk/net/wireless/nl80211.h +++ b/trunk/net/wireless/nl80211.h @@ -117,9 +117,4 @@ void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev, struct net_device *netdev, int index, const u8 *bssid, bool preauth, gfp_t gfp); -bool nl80211_unexpected_frame(struct net_device *dev, - const u8 *addr, gfp_t gfp); -bool nl80211_unexpected_4addr_frame(struct net_device *dev, - const u8 *addr, gfp_t gfp); - #endif /* __NET_WIRELESS_NL80211_H */ diff --git a/trunk/net/wireless/reg.c b/trunk/net/wireless/reg.c index f65feaad155f..3302c56f60d1 100644 --- a/trunk/net/wireless/reg.c +++ b/trunk/net/wireless/reg.c @@ -2,22 +2,13 @@ * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2007 Johannes Berg - * Copyright 2008-2011 Luis R. Rodriguez + * Copyright 2008 Luis R. Rodriguez * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ - /** * DOC: Wireless regulatory infrastructure * @@ -882,22 +873,10 @@ static void handle_channel(struct wiphy *wiphy, chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags); chan->max_antenna_gain = min(chan->orig_mag, (int) MBI_TO_DBI(power_rule->max_antenna_gain)); - if (chan->orig_mpwr) { - /* - * Devices that have their own custom regulatory domain - * but also use WIPHY_FLAG_STRICT_REGULATORY will follow the - * passed country IE power settings. - */ - if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE && - wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY && - wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) { - chan->max_power = - MBM_TO_DBM(power_rule->max_eirp); - } else { - chan->max_power = min(chan->orig_mpwr, - (int) MBM_TO_DBM(power_rule->max_eirp)); - } - } else + if (chan->orig_mpwr) + chan->max_power = min(chan->orig_mpwr, + (int) MBM_TO_DBM(power_rule->max_eirp)); + else chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp); } @@ -1160,8 +1139,6 @@ static void wiphy_update_regulatory(struct wiphy *wiphy, if (ignore_reg_update(wiphy, initiator)) return; - last_request->dfs_region = cfg80211_regdomain->dfs_region; - for (band = 0; band < IEEE80211_NUM_BANDS; band++) { if (wiphy->bands[band]) handle_band(wiphy, band, initiator); @@ -1184,21 +1161,9 @@ void regulatory_update(struct wiphy *wiphy, static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator) { struct cfg80211_registered_device *rdev; - struct wiphy *wiphy; - list_for_each_entry(rdev, &cfg80211_rdev_list, list) { - wiphy = &rdev->wiphy; - wiphy_update_regulatory(wiphy, initiator); - /* - * Regulatory updates set by CORE are ignored for custom - * regulatory cards. Let us notify the changes to the driver, - * as some drivers used this to restore its orig_* reg domain. - */ - if (initiator == NL80211_REGDOM_SET_BY_CORE && - wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY && - wiphy->reg_notifier) - wiphy->reg_notifier(wiphy, last_request); - } + list_for_each_entry(rdev, &cfg80211_rdev_list, list) + wiphy_update_regulatory(&rdev->wiphy, initiator); } static void handle_channel_custom(struct wiphy *wiphy, @@ -1489,18 +1454,18 @@ static int __regulatory_hint(struct wiphy *wiphy, } /* This processes *all* regulatory hints */ -static void reg_process_hint(struct regulatory_request *reg_request, - enum nl80211_reg_initiator reg_initiator) +static void reg_process_hint(struct regulatory_request *reg_request) { int r = 0; struct wiphy *wiphy = NULL; + enum nl80211_reg_initiator initiator = reg_request->initiator; BUG_ON(!reg_request->alpha2); if (wiphy_idx_valid(reg_request->wiphy_idx)) wiphy = wiphy_idx_to_wiphy(reg_request->wiphy_idx); - if (reg_initiator == NL80211_REGDOM_SET_BY_DRIVER && + if (reg_request->initiator == NL80211_REGDOM_SET_BY_DRIVER && !wiphy) { kfree(reg_request); return; @@ -1510,7 +1475,7 @@ static void reg_process_hint(struct regulatory_request *reg_request, /* This is required so that the orig_* parameters are saved */ if (r == -EALREADY && wiphy && wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) { - wiphy_update_regulatory(wiphy, reg_initiator); + wiphy_update_regulatory(wiphy, initiator); return; } @@ -1519,7 +1484,7 @@ static void reg_process_hint(struct regulatory_request *reg_request, * source of bogus requests. */ if (r != -EALREADY && - reg_initiator == NL80211_REGDOM_SET_BY_USER) + reg_request->initiator == NL80211_REGDOM_SET_BY_USER) schedule_delayed_work(®_timeout, msecs_to_jiffies(3142)); } @@ -1556,7 +1521,7 @@ static void reg_process_pending_hints(void) spin_unlock(®_requests_lock); - reg_process_hint(reg_request, reg_request->initiator); + reg_process_hint(reg_request); out: mutex_unlock(®_mutex); @@ -1801,26 +1766,6 @@ static void restore_alpha2(char *alpha2, bool reset_user) REG_DBG_PRINT("Restoring regulatory settings\n"); } -static void restore_custom_reg_settings(struct wiphy *wiphy) -{ - struct ieee80211_supported_band *sband; - enum ieee80211_band band; - struct ieee80211_channel *chan; - int i; - - for (band = 0; band < IEEE80211_NUM_BANDS; band++) { - sband = wiphy->bands[band]; - if (!sband) - continue; - for (i = 0; i < sband->n_channels; i++) { - chan = &sband->channels[i]; - chan->flags = chan->orig_flags; - chan->max_antenna_gain = chan->orig_mag; - chan->max_power = chan->orig_mpwr; - } - } -} - /* * Restoring regulatory settings involves ingoring any * possibly stale country IE information and user regulatory @@ -1839,11 +1784,9 @@ static void restore_custom_reg_settings(struct wiphy *wiphy) static void restore_regulatory_settings(bool reset_user) { char alpha2[2]; - char world_alpha2[2]; struct reg_beacon *reg_beacon, *btmp; struct regulatory_request *reg_request, *tmp; LIST_HEAD(tmp_reg_req_list); - struct cfg80211_registered_device *rdev; mutex_lock(&cfg80211_mutex); mutex_lock(®_mutex); @@ -1891,18 +1834,11 @@ static void restore_regulatory_settings(bool reset_user) /* First restore to the basic regulatory settings */ cfg80211_regdomain = cfg80211_world_regdom; - world_alpha2[0] = cfg80211_regdomain->alpha2[0]; - world_alpha2[1] = cfg80211_regdomain->alpha2[1]; - - list_for_each_entry(rdev, &cfg80211_rdev_list, list) { - if (rdev->wiphy.flags & WIPHY_FLAG_CUSTOM_REGULATORY) - restore_custom_reg_settings(&rdev->wiphy); - } mutex_unlock(®_mutex); mutex_unlock(&cfg80211_mutex); - regulatory_hint_core(world_alpha2); + regulatory_hint_core(cfg80211_regdomain->alpha2); /* * This restores the ieee80211_regdom module parameter @@ -1999,7 +1935,7 @@ static void print_rd_rules(const struct ieee80211_regdomain *rd) const struct ieee80211_freq_range *freq_range = NULL; const struct ieee80211_power_rule *power_rule = NULL; - pr_info(" (start_freq - end_freq @ bandwidth), (max_antenna_gain, max_eirp)\n"); + pr_info(" (start_freq - end_freq @ bandwidth), (max_antenna_gain, max_eirp)\n"); for (i = 0; i < rd->n_reg_rules; i++) { reg_rule = &rd->reg_rules[i]; @@ -2011,14 +1947,14 @@ static void print_rd_rules(const struct ieee80211_regdomain *rd) * in certain regions */ if (power_rule->max_antenna_gain) - pr_info(" (%d KHz - %d KHz @ %d KHz), (%d mBi, %d mBm)\n", + pr_info(" (%d KHz - %d KHz @ %d KHz), (%d mBi, %d mBm)\n", freq_range->start_freq_khz, freq_range->end_freq_khz, freq_range->max_bandwidth_khz, power_rule->max_antenna_gain, power_rule->max_eirp); else - pr_info(" (%d KHz - %d KHz @ %d KHz), (N/A, %d mBm)\n", + pr_info(" (%d KHz - %d KHz @ %d KHz), (N/A, %d mBm)\n", freq_range->start_freq_khz, freq_range->end_freq_khz, freq_range->max_bandwidth_khz, @@ -2026,42 +1962,6 @@ static void print_rd_rules(const struct ieee80211_regdomain *rd) } } -bool reg_supported_dfs_region(u8 dfs_region) -{ - switch (dfs_region) { - case NL80211_DFS_UNSET: - case NL80211_DFS_FCC: - case NL80211_DFS_ETSI: - case NL80211_DFS_JP: - return true; - default: - REG_DBG_PRINT("Ignoring uknown DFS master region: %d\n", - dfs_region); - return false; - } -} - -static void print_dfs_region(u8 dfs_region) -{ - if (!dfs_region) - return; - - switch (dfs_region) { - case NL80211_DFS_FCC: - pr_info(" DFS Master region FCC"); - break; - case NL80211_DFS_ETSI: - pr_info(" DFS Master region ETSI"); - break; - case NL80211_DFS_JP: - pr_info(" DFS Master region JP"); - break; - default: - pr_info(" DFS Master region Uknown"); - break; - } -} - static void print_regdomain(const struct ieee80211_regdomain *rd) { @@ -2089,7 +1989,6 @@ static void print_regdomain(const struct ieee80211_regdomain *rd) pr_info("Regulatory domain changed to country: %c%c\n", rd->alpha2[0], rd->alpha2[1]); } - print_dfs_region(rd->dfs_region); print_rd_rules(rd); } diff --git a/trunk/net/wireless/reg.h b/trunk/net/wireless/reg.h index e2aaaf525a22..4a56799d868d 100644 --- a/trunk/net/wireless/reg.h +++ b/trunk/net/wireless/reg.h @@ -1,26 +1,10 @@ #ifndef __NET_WIRELESS_REG_H #define __NET_WIRELESS_REG_H -/* - * Copyright 2008-2011 Luis R. Rodriguez - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ extern const struct ieee80211_regdomain *cfg80211_regdomain; bool is_world_regdom(const char *alpha2); bool reg_is_valid_request(const char *alpha2); -bool reg_supported_dfs_region(u8 dfs_region); int regulatory_hint_user(const char *alpha2); diff --git a/trunk/net/wireless/regdb.h b/trunk/net/wireless/regdb.h index 3279cfcefb0c..818222c92513 100644 --- a/trunk/net/wireless/regdb.h +++ b/trunk/net/wireless/regdb.h @@ -1,22 +1,6 @@ #ifndef __REGDB_H__ #define __REGDB_H__ -/* - * Copyright 2009 John W. Linville - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - extern const struct ieee80211_regdomain *reg_regdb[]; extern int reg_regdb_size; diff --git a/trunk/net/wireless/scan.c b/trunk/net/wireless/scan.c index 31119e32e092..dc23b31594e0 100644 --- a/trunk/net/wireless/scan.c +++ b/trunk/net/wireless/scan.c @@ -355,8 +355,8 @@ static bool is_mesh(struct cfg80211_bss *a, sizeof(struct ieee80211_meshconf_ie) - 2) == 0; } -static int cmp_bss_core(struct cfg80211_bss *a, - struct cfg80211_bss *b) +static int cmp_bss(struct cfg80211_bss *a, + struct cfg80211_bss *b) { int r; @@ -378,15 +378,7 @@ static int cmp_bss_core(struct cfg80211_bss *a, b->len_information_elements); } - return memcmp(a->bssid, b->bssid, ETH_ALEN); -} - -static int cmp_bss(struct cfg80211_bss *a, - struct cfg80211_bss *b) -{ - int r; - - r = cmp_bss_core(a, b); + r = memcmp(a->bssid, b->bssid, ETH_ALEN); if (r) return r; @@ -397,52 +389,6 @@ static int cmp_bss(struct cfg80211_bss *a, b->len_information_elements); } -static int cmp_hidden_bss(struct cfg80211_bss *a, - struct cfg80211_bss *b) -{ - const u8 *ie1; - const u8 *ie2; - int i; - int r; - - r = cmp_bss_core(a, b); - if (r) - return r; - - ie1 = cfg80211_find_ie(WLAN_EID_SSID, - a->information_elements, - a->len_information_elements); - ie2 = cfg80211_find_ie(WLAN_EID_SSID, - b->information_elements, - b->len_information_elements); - - /* Key comparator must use same algorithm in any rb-tree - * search function (order is important), otherwise ordering - * of items in the tree is broken and search gives incorrect - * results. This code uses same order as cmp_ies() does. */ - - /* sort missing IE before (left of) present IE */ - if (!ie1) - return -1; - if (!ie2) - return 1; - - /* zero-size SSID is used as an indication of the hidden bss */ - if (!ie2[1]) - return 0; - - /* sort by length first, then by contents */ - if (ie1[1] != ie2[1]) - return ie2[1] - ie1[1]; - - /* zeroed SSID ie is another indication of a hidden bss */ - for (i = 0; i < ie2[1]; i++) - if (ie2[i + 2]) - return -1; - - return 0; -} - struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy, struct ieee80211_channel *channel, const u8 *bssid, @@ -558,48 +504,6 @@ rb_find_bss(struct cfg80211_registered_device *dev, return NULL; } -static struct cfg80211_internal_bss * -rb_find_hidden_bss(struct cfg80211_registered_device *dev, - struct cfg80211_internal_bss *res) -{ - struct rb_node *n = dev->bss_tree.rb_node; - struct cfg80211_internal_bss *bss; - int r; - - while (n) { - bss = rb_entry(n, struct cfg80211_internal_bss, rbn); - r = cmp_hidden_bss(&res->pub, &bss->pub); - - if (r == 0) - return bss; - else if (r < 0) - n = n->rb_left; - else - n = n->rb_right; - } - - return NULL; -} - -static void -copy_hidden_ies(struct cfg80211_internal_bss *res, - struct cfg80211_internal_bss *hidden) -{ - if (unlikely(res->pub.beacon_ies)) - return; - if (WARN_ON(!hidden->pub.beacon_ies)) - return; - - res->pub.beacon_ies = kmalloc(hidden->pub.len_beacon_ies, GFP_ATOMIC); - if (unlikely(!res->pub.beacon_ies)) - return; - - res->beacon_ies_allocated = true; - res->pub.len_beacon_ies = hidden->pub.len_beacon_ies; - memcpy(res->pub.beacon_ies, hidden->pub.beacon_ies, - res->pub.len_beacon_ies); -} - static struct cfg80211_internal_bss * cfg80211_bss_update(struct cfg80211_registered_device *dev, struct cfg80211_internal_bss *res) @@ -703,21 +607,6 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev, kref_put(&res->ref, bss_release); } else { - struct cfg80211_internal_bss *hidden; - - /* First check if the beacon is a probe response from - * a hidden bss. If so, copy beacon ies (with nullified - * ssid) into the probe response bss entry (with real ssid). - * It is required basically for PSM implementation - * (probe responses do not contain tim ie) */ - - /* TODO: The code is not trying to update existing probe - * response bss entries when beacon ies are - * getting changed. */ - hidden = rb_find_hidden_bss(dev, res); - if (hidden) - copy_hidden_ies(res, hidden); - /* this "consumes" the reference */ list_add_tail(&res->list, &dev->bss_list); rb_insert_bss(dev, res); diff --git a/trunk/net/wireless/sme.c b/trunk/net/wireless/sme.c index 7b9ecaed96be..0acfdc9beacf 100644 --- a/trunk/net/wireless/sme.c +++ b/trunk/net/wireless/sme.c @@ -190,9 +190,7 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev) prev_bssid, params->ssid, params->ssid_len, params->ie, params->ie_len, - false, ¶ms->crypto, - params->flags, ¶ms->ht_capa, - ¶ms->ht_capa_mask); + false, ¶ms->crypto); if (err) __cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid, NULL, 0, @@ -553,35 +551,45 @@ void cfg80211_connect_result(struct net_device *dev, const u8 *bssid, EXPORT_SYMBOL(cfg80211_connect_result); void __cfg80211_roamed(struct wireless_dev *wdev, - struct cfg80211_bss *bss, + struct ieee80211_channel *channel, + const u8 *bssid, const u8 *req_ie, size_t req_ie_len, const u8 *resp_ie, size_t resp_ie_len) { + struct cfg80211_bss *bss; #ifdef CONFIG_CFG80211_WEXT union iwreq_data wrqu; #endif + ASSERT_WDEV_LOCK(wdev); if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION && wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)) - goto out; + return; if (wdev->sme_state != CFG80211_SME_CONNECTED) - goto out; + return; /* internal error -- how did we get to CONNECTED w/o BSS? */ if (WARN_ON(!wdev->current_bss)) { - goto out; + return; } cfg80211_unhold_bss(wdev->current_bss); cfg80211_put_bss(&wdev->current_bss->pub); wdev->current_bss = NULL; + bss = cfg80211_get_bss(wdev->wiphy, channel, bssid, + wdev->ssid, wdev->ssid_len, + WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); + + if (WARN_ON(!bss)) + return; + cfg80211_hold_bss(bss_from_pub(bss)); wdev->current_bss = bss_from_pub(bss); - nl80211_send_roamed(wiphy_to_dev(wdev->wiphy), wdev->netdev, bss->bssid, + nl80211_send_roamed(wiphy_to_dev(wdev->wiphy), wdev->netdev, bssid, req_ie, req_ie_len, resp_ie, resp_ie_len, GFP_KERNEL); @@ -602,15 +610,11 @@ void __cfg80211_roamed(struct wireless_dev *wdev, memset(&wrqu, 0, sizeof(wrqu)); wrqu.ap_addr.sa_family = ARPHRD_ETHER; - memcpy(wrqu.ap_addr.sa_data, bss->bssid, ETH_ALEN); - memcpy(wdev->wext.prev_bssid, bss->bssid, ETH_ALEN); + memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN); + memcpy(wdev->wext.prev_bssid, bssid, ETH_ALEN); wdev->wext.prev_bssid_valid = true; wireless_send_event(wdev->netdev, SIOCGIWAP, &wrqu, NULL); #endif - - return; -out: - cfg80211_put_bss(bss); } void cfg80211_roamed(struct net_device *dev, @@ -618,27 +622,6 @@ void cfg80211_roamed(struct net_device *dev, const u8 *bssid, const u8 *req_ie, size_t req_ie_len, const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp) -{ - struct wireless_dev *wdev = dev->ieee80211_ptr; - struct cfg80211_bss *bss; - - CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTED); - - bss = cfg80211_get_bss(wdev->wiphy, channel, bssid, wdev->ssid, - wdev->ssid_len, WLAN_CAPABILITY_ESS, - WLAN_CAPABILITY_ESS); - if (WARN_ON(!bss)) - return; - - cfg80211_roamed_bss(dev, bss, req_ie, req_ie_len, resp_ie, - resp_ie_len, gfp); -} -EXPORT_SYMBOL(cfg80211_roamed); - -void cfg80211_roamed_bss(struct net_device *dev, - struct cfg80211_bss *bss, const u8 *req_ie, - size_t req_ie_len, const u8 *resp_ie, - size_t resp_ie_len, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); @@ -647,30 +630,26 @@ void cfg80211_roamed_bss(struct net_device *dev, CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTED); - if (WARN_ON(!bss)) - return; - ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp); - if (!ev) { - cfg80211_put_bss(bss); + if (!ev) return; - } ev->type = EVENT_ROAMED; + ev->rm.channel = channel; + memcpy(ev->rm.bssid, bssid, ETH_ALEN); ev->rm.req_ie = ((u8 *)ev) + sizeof(*ev); ev->rm.req_ie_len = req_ie_len; memcpy((void *)ev->rm.req_ie, req_ie, req_ie_len); ev->rm.resp_ie = ((u8 *)ev) + sizeof(*ev) + req_ie_len; ev->rm.resp_ie_len = resp_ie_len; memcpy((void *)ev->rm.resp_ie, resp_ie, resp_ie_len); - ev->rm.bss = bss; spin_lock_irqsave(&wdev->event_lock, flags); list_add_tail(&ev->list, &wdev->event_list); spin_unlock_irqrestore(&wdev->event_lock, flags); queue_work(cfg80211_wq, &rdev->event_work); } -EXPORT_SYMBOL(cfg80211_roamed_bss); +EXPORT_SYMBOL(cfg80211_roamed); void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, size_t ie_len, u16 reason, bool from_ap) @@ -795,9 +774,6 @@ int __cfg80211_connect(struct cfg80211_registered_device *rdev, wdev->connect_keys = NULL; } - cfg80211_oper_and_ht_capa(&connect->ht_capa_mask, - rdev->wiphy.ht_capa_mod_mask); - if (connkeys && connkeys->def >= 0) { int idx; u32 cipher; diff --git a/trunk/net/wireless/util.c b/trunk/net/wireless/util.c index 9aa9db6c8141..4dde429441d2 100644 --- a/trunk/net/wireless/util.c +++ b/trunk/net/wireless/util.c @@ -7,9 +7,9 @@ #include #include #include +#include #include #include -#include #include "core.h" struct ieee80211_rate * @@ -240,6 +240,17 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev, return 0; } +/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */ +/* Ethernet-II snap header (RFC1042 for most EtherTypes) */ +const unsigned char rfc1042_header[] __aligned(2) = + { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; +EXPORT_SYMBOL(rfc1042_header); + +/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */ +const unsigned char bridge_tunnel_header[] __aligned(2) = + { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 }; +EXPORT_SYMBOL(bridge_tunnel_header); + unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc) { unsigned int hdrlen = 24; @@ -651,10 +662,7 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb) switch (skb->protocol) { case htons(ETH_P_IP): - dscp = ipv4_get_dsfield(ip_hdr(skb)) & 0xfc; - break; - case htons(ETH_P_IPV6): - dscp = ipv6_get_dsfield(ipv6_hdr(skb)) & 0xfc; + dscp = ip_hdr(skb)->tos & 0xfc; break; default: return 0; @@ -744,9 +752,9 @@ static void cfg80211_process_wdev_events(struct wireless_dev *wdev) NULL); break; case EVENT_ROAMED: - __cfg80211_roamed(wdev, ev->rm.bss, ev->rm.req_ie, - ev->rm.req_ie_len, ev->rm.resp_ie, - ev->rm.resp_ie_len); + __cfg80211_roamed(wdev, ev->rm.channel, ev->rm.bssid, + ev->rm.req_ie, ev->rm.req_ie_len, + ev->rm.resp_ie, ev->rm.resp_ie_len); break; case EVENT_DISCONNECTED: __cfg80211_disconnected(wdev->netdev, @@ -1043,13 +1051,169 @@ int ieee80211_get_ratemask(struct ieee80211_supported_band *sband, return 0; } -/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */ -/* Ethernet-II snap header (RFC1042 for most EtherTypes) */ -const unsigned char rfc1042_header[] __aligned(2) = - { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; -EXPORT_SYMBOL(rfc1042_header); +u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, + struct ieee802_11_elems *elems, + u64 filter, u32 crc) +{ + size_t left = len; + u8 *pos = start; + bool calc_crc = filter != 0; -/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */ -const unsigned char bridge_tunnel_header[] __aligned(2) = - { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 }; -EXPORT_SYMBOL(bridge_tunnel_header); + memset(elems, 0, sizeof(*elems)); + elems->ie_start = start; + elems->total_len = len; + + while (left >= 2) { + u8 id, elen; + + id = *pos++; + elen = *pos++; + left -= 2; + + if (elen > left) + break; + + if (calc_crc && id < 64 && (filter & (1ULL << id))) + crc = crc32_be(crc, pos - 2, elen + 2); + + switch (id) { + case WLAN_EID_SSID: + elems->ssid = pos; + elems->ssid_len = elen; + break; + case WLAN_EID_SUPP_RATES: + elems->supp_rates = pos; + elems->supp_rates_len = elen; + break; + case WLAN_EID_FH_PARAMS: + elems->fh_params = pos; + elems->fh_params_len = elen; + break; + case WLAN_EID_DS_PARAMS: + elems->ds_params = pos; + elems->ds_params_len = elen; + break; + case WLAN_EID_CF_PARAMS: + elems->cf_params = pos; + elems->cf_params_len = elen; + break; + case WLAN_EID_TIM: + if (elen >= sizeof(struct ieee80211_tim_ie)) { + elems->tim = (void *)pos; + elems->tim_len = elen; + } + break; + case WLAN_EID_IBSS_PARAMS: + elems->ibss_params = pos; + elems->ibss_params_len = elen; + break; + case WLAN_EID_CHALLENGE: + elems->challenge = pos; + elems->challenge_len = elen; + break; + case WLAN_EID_VENDOR_SPECIFIC: + if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 && + pos[2] == 0xf2) { + /* Microsoft OUI (00:50:F2) */ + + if (calc_crc) + crc = crc32_be(crc, pos - 2, elen + 2); + + if (pos[3] == 1) { + /* OUI Type 1 - WPA IE */ + elems->wpa = pos; + elems->wpa_len = elen; + } else if (elen >= 5 && pos[3] == 2) { + /* OUI Type 2 - WMM IE */ + if (pos[4] == 0) { + elems->wmm_info = pos; + elems->wmm_info_len = elen; + } else if (pos[4] == 1) { + elems->wmm_param = pos; + elems->wmm_param_len = elen; + } + } + } + break; + case WLAN_EID_RSN: + elems->rsn = pos; + elems->rsn_len = elen; + break; + case WLAN_EID_ERP_INFO: + elems->erp_info = pos; + elems->erp_info_len = elen; + break; + case WLAN_EID_EXT_SUPP_RATES: + elems->ext_supp_rates = pos; + elems->ext_supp_rates_len = elen; + break; + case WLAN_EID_HT_CAPABILITY: + if (elen >= sizeof(struct ieee80211_ht_cap)) + elems->ht_cap_elem = (void *)pos; + break; + case WLAN_EID_HT_INFORMATION: + if (elen >= sizeof(struct ieee80211_ht_info)) + elems->ht_info_elem = (void *)pos; + break; + case WLAN_EID_MESH_ID: + elems->mesh_id = pos; + elems->mesh_id_len = elen; + break; + case WLAN_EID_MESH_CONFIG: + if (elen >= sizeof(struct ieee80211_meshconf_ie)) + elems->mesh_config = (void *)pos; + break; + case WLAN_EID_PEER_MGMT: + elems->peering = pos; + elems->peering_len = elen; + break; + case WLAN_EID_PREQ: + elems->preq = pos; + elems->preq_len = elen; + break; + case WLAN_EID_PREP: + elems->prep = pos; + elems->prep_len = elen; + break; + case WLAN_EID_PERR: + elems->perr = pos; + elems->perr_len = elen; + break; + case WLAN_EID_RANN: + if (elen >= sizeof(struct ieee80211_rann_ie)) + elems->rann = (void *)pos; + break; + case WLAN_EID_CHANNEL_SWITCH: + elems->ch_switch_elem = pos; + elems->ch_switch_elem_len = elen; + break; + case WLAN_EID_QUIET: + if (!elems->quiet_elem) { + elems->quiet_elem = pos; + elems->quiet_elem_len = elen; + } + elems->num_of_quiet_elem++; + break; + case WLAN_EID_COUNTRY: + elems->country_elem = pos; + elems->country_elem_len = elen; + break; + case WLAN_EID_PWR_CONSTRAINT: + elems->pwr_constr_elem = pos; + elems->pwr_constr_elem_len = elen; + break; + case WLAN_EID_TIMEOUT_INTERVAL: + elems->timeout_int = pos; + elems->timeout_int_len = elen; + break; + default: + break; + } + + left -= elen; + pos += elen; + } + + return crc; +} +EXPORT_SYMBOL(ieee802_11_parse_elems_crc); diff --git a/trunk/net/wireless/wext-compat.c b/trunk/net/wireless/wext-compat.c index 3c24eb97e9d7..6897436b1d3f 100644 --- a/trunk/net/wireless/wext-compat.c +++ b/trunk/net/wireless/wext-compat.c @@ -819,24 +819,12 @@ static int cfg80211_wext_giwfreq(struct net_device *dev, struct iw_freq *freq, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; - struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); - struct ieee80211_channel *chan; switch (wdev->iftype) { case NL80211_IFTYPE_STATION: return cfg80211_mgd_wext_giwfreq(dev, info, freq, extra); case NL80211_IFTYPE_ADHOC: return cfg80211_ibss_wext_giwfreq(dev, info, freq, extra); - case NL80211_IFTYPE_MONITOR: - if (!rdev->ops->get_channel) - return -EINVAL; - - chan = rdev->ops->get_channel(wdev->wiphy); - if (!chan) - return -EINVAL; - freq->m = chan->center_freq; - freq->e = 6; - return 0; default: if (!wdev->channel) return -EINVAL; diff --git a/trunk/net/x25/af_x25.c b/trunk/net/x25/af_x25.c index a306bc66000e..3e16c6abde4f 100644 --- a/trunk/net/x25/af_x25.c +++ b/trunk/net/x25/af_x25.c @@ -232,7 +232,7 @@ static int x25_device_event(struct notifier_block *this, unsigned long event, return NOTIFY_DONE; if (dev->type == ARPHRD_X25 -#if IS_ENABLED(CONFIG_LLC) +#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE) || dev->type == ARPHRD_ETHER #endif ) { diff --git a/trunk/net/x25/x25_dev.c b/trunk/net/x25/x25_dev.c index f0ce862d1f46..fa2b41888bd9 100644 --- a/trunk/net/x25/x25_dev.c +++ b/trunk/net/x25/x25_dev.c @@ -161,7 +161,7 @@ void x25_establish_link(struct x25_neigh *nb) *ptr = X25_IFACE_CONNECT; break; -#if IS_ENABLED(CONFIG_LLC) +#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE) case ARPHRD_ETHER: return; #endif @@ -180,7 +180,7 @@ void x25_terminate_link(struct x25_neigh *nb) struct sk_buff *skb; unsigned char *ptr; -#if IS_ENABLED(CONFIG_LLC) +#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE) if (nb->dev->type == ARPHRD_ETHER) return; #endif @@ -213,7 +213,7 @@ void x25_send_frame(struct sk_buff *skb, struct x25_neigh *nb) *dptr = X25_IFACE_DATA; break; -#if IS_ENABLED(CONFIG_LLC) +#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE) case ARPHRD_ETHER: kfree_skb(skb); return; diff --git a/trunk/net/x25/x25_route.c b/trunk/net/x25/x25_route.c index cf6366270054..97d77c532d8c 100644 --- a/trunk/net/x25/x25_route.c +++ b/trunk/net/x25/x25_route.c @@ -134,7 +134,7 @@ struct net_device *x25_dev_get(char *devname) if (dev && (!(dev->flags & IFF_UP) || (dev->type != ARPHRD_X25 -#if IS_ENABLED(CONFIG_LLC) +#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE) && dev->type != ARPHRD_ETHER #endif ))){ diff --git a/trunk/net/xfrm/xfrm_policy.c b/trunk/net/xfrm/xfrm_policy.c index 7661576b6f45..9049a5caeb25 100644 --- a/trunk/net/xfrm/xfrm_policy.c +++ b/trunk/net/xfrm/xfrm_policy.c @@ -61,8 +61,8 @@ __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl) { const struct flowi4 *fl4 = &fl->u.ip4; - return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) && - addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) && + return addr_match(&fl4->daddr, &sel->daddr, sel->prefixlen_d) && + addr_match(&fl4->saddr, &sel->saddr, sel->prefixlen_s) && !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) && !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) && (fl4->flowi4_proto == sel->proto || !sel->proto) && @@ -1340,7 +1340,7 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) case AF_INET: dst_ops = &net->xfrm.xfrm4_dst_ops; break; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case AF_INET6: dst_ops = &net->xfrm.xfrm6_dst_ops; break; @@ -1499,7 +1499,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, goto free_dst; /* Copy neighbour for reachability confirmation */ - dst_set_neighbour(dst0, neigh_clone(dst_get_neighbour_noref(dst))); + dst_set_neighbour(dst0, neigh_clone(dst_get_neighbour(dst))); xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len); xfrm_init_pmtu(dst_prev); @@ -2445,7 +2445,7 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) case AF_INET: xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops; break; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case AF_INET6: xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops; break; @@ -2495,7 +2495,7 @@ static void __net_init xfrm_dst_ops_init(struct net *net) afinfo = xfrm_policy_afinfo[AF_INET]; if (afinfo) net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) afinfo = xfrm_policy_afinfo[AF_INET6]; if (afinfo) net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops; diff --git a/trunk/net/xfrm/xfrm_state.c b/trunk/net/xfrm/xfrm_state.c index 5b228f97d4b3..9414b9c5b1e4 100644 --- a/trunk/net/xfrm/xfrm_state.c +++ b/trunk/net/xfrm/xfrm_state.c @@ -1035,12 +1035,16 @@ static struct xfrm_state *__find_acq_core(struct net *net, struct xfrm_mark *m, break; case AF_INET6: - *(struct in6_addr *)x->sel.daddr.a6 = *(struct in6_addr *)daddr; - *(struct in6_addr *)x->sel.saddr.a6 = *(struct in6_addr *)saddr; + ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6, + (const struct in6_addr *)daddr); + ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6, + (const struct in6_addr *)saddr); x->sel.prefixlen_d = 128; x->sel.prefixlen_s = 128; - *(struct in6_addr *)x->props.saddr.a6 = *(struct in6_addr *)saddr; - *(struct in6_addr *)x->id.daddr.a6 = *(struct in6_addr *)daddr; + ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6, + (const struct in6_addr *)saddr); + ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6, + (const struct in6_addr *)daddr); break; } diff --git a/trunk/net/xfrm/xfrm_user.c b/trunk/net/xfrm/xfrm_user.c index e0d747a2e803..d0a42df5160e 100644 --- a/trunk/net/xfrm/xfrm_user.c +++ b/trunk/net/xfrm/xfrm_user.c @@ -28,7 +28,7 @@ #include #include #include -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) #include #endif @@ -150,7 +150,7 @@ static int verify_newsa_info(struct xfrm_usersa_info *p, break; case AF_INET6: -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) break; #else err = -EAFNOSUPPORT; @@ -201,7 +201,7 @@ static int verify_newsa_info(struct xfrm_usersa_info *p, goto out; break; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case IPPROTO_DSTOPTS: case IPPROTO_ROUTING: if (attrs[XFRMA_ALG_COMP] || @@ -1160,7 +1160,7 @@ static int verify_newpolicy_info(struct xfrm_userpolicy_info *p) break; case AF_INET6: -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) break; #else return -EAFNOSUPPORT; @@ -1231,7 +1231,7 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family) switch (ut[i].family) { case AF_INET: break; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case AF_INET6: break; #endif @@ -2604,7 +2604,7 @@ static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt, return NULL; } break; -#if IS_ENABLED(CONFIG_IPV6) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case AF_INET6: if (opt != IPV6_XFRM_POLICY) { *dir = -EOPNOTSUPP; diff --git a/trunk/security/lsm_audit.c b/trunk/security/lsm_audit.c index 7bd6f138236b..893af8a2fa1e 100644 --- a/trunk/security/lsm_audit.c +++ b/trunk/security/lsm_audit.c @@ -114,20 +114,19 @@ int ipv6_skb_to_auditdata(struct sk_buff *skb, int offset, ret = 0; struct ipv6hdr *ip6; u8 nexthdr; - __be16 frag_off; ip6 = ipv6_hdr(skb); if (ip6 == NULL) return -EINVAL; - ad->u.net.v6info.saddr = ip6->saddr; - ad->u.net.v6info.daddr = ip6->daddr; + ipv6_addr_copy(&ad->u.net.v6info.saddr, &ip6->saddr); + ipv6_addr_copy(&ad->u.net.v6info.daddr, &ip6->daddr); ret = 0; /* IPv6 can have several extension header before the Transport header * skip them */ offset = skb_network_offset(skb); offset += sizeof(*ip6); nexthdr = ip6->nexthdr; - offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off); + offset = ipv6_skip_exthdr(skb, offset, &nexthdr); if (offset < 0) return 0; if (proto) diff --git a/trunk/security/security.c b/trunk/security/security.c index e2f684aeb70c..0c6cc69c8f86 100644 --- a/trunk/security/security.c +++ b/trunk/security/security.c @@ -381,7 +381,7 @@ int security_old_inode_init_security(struct inode *inode, struct inode *dir, void **value, size_t *len) { if (unlikely(IS_PRIVATE(inode))) - return -EOPNOTSUPP; + return 0; return security_ops->inode_init_security(inode, dir, qstr, name, value, len); } diff --git a/trunk/security/selinux/hooks.c b/trunk/security/selinux/hooks.c index 86305c2f555a..1126c10a5e82 100644 --- a/trunk/security/selinux/hooks.c +++ b/trunk/security/selinux/hooks.c @@ -1090,7 +1090,7 @@ static inline u16 socket_type_to_security_class(int family, int type, int protoc return SECCLASS_NETLINK_ROUTE_SOCKET; case NETLINK_FIREWALL: return SECCLASS_NETLINK_FIREWALL_SOCKET; - case NETLINK_SOCK_DIAG: + case NETLINK_INET_DIAG: return SECCLASS_NETLINK_TCPDIAG_SOCKET; case NETLINK_NFLOG: return SECCLASS_NETLINK_NFLOG_SOCKET; @@ -3561,20 +3561,19 @@ static int selinux_parse_skb_ipv6(struct sk_buff *skb, u8 nexthdr; int ret = -EINVAL, offset; struct ipv6hdr _ipv6h, *ip6; - __be16 frag_off; offset = skb_network_offset(skb); ip6 = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h); if (ip6 == NULL) goto out; - ad->u.net.v6info.saddr = ip6->saddr; - ad->u.net.v6info.daddr = ip6->daddr; + ipv6_addr_copy(&ad->u.net.v6info.saddr, &ip6->saddr); + ipv6_addr_copy(&ad->u.net.v6info.daddr, &ip6->daddr); ret = 0; nexthdr = ip6->nexthdr; offset += sizeof(_ipv6h); - offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off); + offset = ipv6_skip_exthdr(skb, offset, &nexthdr); if (offset < 0) goto out; @@ -3872,7 +3871,7 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in if (family == PF_INET) ad.u.net.v4info.saddr = addr4->sin_addr.s_addr; else - ad.u.net.v6info.saddr = addr6->sin6_addr; + ipv6_addr_copy(&ad.u.net.v6info.saddr, &addr6->sin6_addr); err = avc_has_perm(sksec->sid, sid, sksec->sclass, node_perm, &ad); diff --git a/trunk/security/selinux/netnode.c b/trunk/security/selinux/netnode.c index 86365857c088..3bf46abaa688 100644 --- a/trunk/security/selinux/netnode.c +++ b/trunk/security/selinux/netnode.c @@ -220,7 +220,7 @@ static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid) case PF_INET6: ret = security_node_sid(PF_INET6, addr, sizeof(struct in6_addr), sid); - new->nsec.addr.ipv6 = *(struct in6_addr *)addr; + ipv6_addr_copy(&new->nsec.addr.ipv6, addr); break; default: BUG(); diff --git a/trunk/sound/soc/codecs/wm8776.c b/trunk/sound/soc/codecs/wm8776.c index d3b0a20744f1..bfdc52370ad0 100644 --- a/trunk/sound/soc/codecs/wm8776.c +++ b/trunk/sound/soc/codecs/wm8776.c @@ -235,7 +235,6 @@ static int wm8776_hw_params(struct snd_pcm_substream *substream, switch (snd_pcm_format_width(params_format(params))) { case 16: iface = 0; - break; case 20: iface = 0x10; break; diff --git a/trunk/tools/perf/Documentation/perf-annotate.txt b/trunk/tools/perf/Documentation/perf-annotate.txt index c89f9e1453f7..fe6762ed56bd 100644 --- a/trunk/tools/perf/Documentation/perf-annotate.txt +++ b/trunk/tools/perf/Documentation/perf-annotate.txt @@ -22,7 +22,7 @@ OPTIONS ------- -i:: --input=:: - Input file name. (default: perf.data unless stdin is a fifo) + Input file name. (default: perf.data) -d:: --dsos=:: @@ -66,7 +66,7 @@ OPTIONS used. This interfaces starts by centering on the line with more samples, TAB/UNTAB cycles through the lines with more samples. --C:: +-c:: --cpu:: Only report samples for the list of CPUs provided. Multiple CPUs can be provided as a comma-separated list with no space: 0,1. Ranges of CPUs are specified with -: 0-2. Default is to report samples on all diff --git a/trunk/tools/perf/Documentation/perf-buildid-list.txt b/trunk/tools/perf/Documentation/perf-buildid-list.txt index 25c52efcc7f0..cc22325ffd1b 100644 --- a/trunk/tools/perf/Documentation/perf-buildid-list.txt +++ b/trunk/tools/perf/Documentation/perf-buildid-list.txt @@ -26,7 +26,7 @@ OPTIONS Show only DSOs with hits. -i:: --input=:: - Input file name. (default: perf.data unless stdin is a fifo) + Input file name. (default: perf.data) -f:: --force:: Don't do ownership validation. diff --git a/trunk/tools/perf/Documentation/perf-evlist.txt b/trunk/tools/perf/Documentation/perf-evlist.txt index 0507ec7bad71..0cada9e053dc 100644 --- a/trunk/tools/perf/Documentation/perf-evlist.txt +++ b/trunk/tools/perf/Documentation/perf-evlist.txt @@ -18,7 +18,7 @@ OPTIONS ------- -i:: --input=:: - Input file name. (default: perf.data unless stdin is a fifo) + Input file name. (default: perf.data) SEE ALSO -------- diff --git a/trunk/tools/perf/Documentation/perf-kmem.txt b/trunk/tools/perf/Documentation/perf-kmem.txt index 7c8fbbf3f61c..a52fcde894c7 100644 --- a/trunk/tools/perf/Documentation/perf-kmem.txt +++ b/trunk/tools/perf/Documentation/perf-kmem.txt @@ -23,7 +23,7 @@ OPTIONS ------- -i :: --input=:: - Select the input file (default: perf.data unless stdin is a fifo) + Select the input file (default: perf.data) --caller:: Show per-callsite statistics diff --git a/trunk/tools/perf/Documentation/perf-lock.txt b/trunk/tools/perf/Documentation/perf-lock.txt index d6b2a4f2108b..4a26a2f3a6a3 100644 --- a/trunk/tools/perf/Documentation/perf-lock.txt +++ b/trunk/tools/perf/Documentation/perf-lock.txt @@ -29,7 +29,7 @@ COMMON OPTIONS -i:: --input=:: - Input file name. (default: perf.data unless stdin is a fifo) + Input file name. -v:: --verbose:: diff --git a/trunk/tools/perf/Documentation/perf-record.txt b/trunk/tools/perf/Documentation/perf-record.txt index 2937f7e14bb7..5a520f825295 100644 --- a/trunk/tools/perf/Documentation/perf-record.txt +++ b/trunk/tools/perf/Documentation/perf-record.txt @@ -89,7 +89,7 @@ OPTIONS -m:: --mmap-pages=:: - Number of mmap data pages. Must be a power of two. + Number of mmap data pages. -g:: --call-graph:: diff --git a/trunk/tools/perf/Documentation/perf-report.txt b/trunk/tools/perf/Documentation/perf-report.txt index 9b430e98712e..212f24d672e1 100644 --- a/trunk/tools/perf/Documentation/perf-report.txt +++ b/trunk/tools/perf/Documentation/perf-report.txt @@ -19,7 +19,7 @@ OPTIONS ------- -i:: --input=:: - Input file name. (default: perf.data unless stdin is a fifo) + Input file name. (default: perf.data) -v:: --verbose:: @@ -39,7 +39,7 @@ OPTIONS -T:: --threads:: Show per-thread event counters --c:: +-C:: --comms=:: Only consider symbols in these comms. CSV that understands file://filename entries. @@ -80,10 +80,9 @@ OPTIONS --dump-raw-trace:: Dump raw trace in ASCII. --g [type,min[,limit],order]:: +-g [type,min,order]:: --call-graph:: - Display call chains using type, min percent threshold, optional print - limit and order. + Display call chains using type, min percent threshold and order. type can be either: - flat: single column, linear exposure of call chains. - graph: use a graph tree, displaying absolute overhead rates. @@ -129,7 +128,7 @@ OPTIONS --symfs=:: Look for files with symbols relative to this directory. --C:: +-c:: --cpu:: Only report samples for the list of CPUs provided. Multiple CPUs can be provided as a comma-separated list with no space: 0,1. Ranges of CPUs are specified with -: 0-2. Default is to report samples on all diff --git a/trunk/tools/perf/Documentation/perf-sched.txt b/trunk/tools/perf/Documentation/perf-sched.txt index 8ff4df956951..5b212b57f70b 100644 --- a/trunk/tools/perf/Documentation/perf-sched.txt +++ b/trunk/tools/perf/Documentation/perf-sched.txt @@ -40,7 +40,7 @@ OPTIONS ------- -i:: --input=:: - Input file name. (default: perf.data unless stdin is a fifo) + Input file name. (default: perf.data) -v:: --verbose:: diff --git a/trunk/tools/perf/Documentation/perf-script.txt b/trunk/tools/perf/Documentation/perf-script.txt index 2f6cef43da25..dec87ecb530e 100644 --- a/trunk/tools/perf/Documentation/perf-script.txt +++ b/trunk/tools/perf/Documentation/perf-script.txt @@ -106,7 +106,7 @@ OPTIONS -i:: --input=:: - Input file name. (default: perf.data unless stdin is a fifo) + Input file name. -d:: --debug-mode:: @@ -182,17 +182,12 @@ OPTIONS --hide-call-graph:: When printing symbols do not display call chain. --C:: +-c:: --cpu:: Only report samples for the list of CPUs provided. Multiple CPUs can be provided as a comma-separated list with no space: 0,1. Ranges of CPUs are specified with -: 0-2. Default is to report samples on all CPUs. --c:: ---comms=:: - Only display events for these comms. CSV that understands - file://filename entries. - -I:: --show-info:: Display extended information about the perf.data file. This adds diff --git a/trunk/tools/perf/Documentation/perf-test.txt b/trunk/tools/perf/Documentation/perf-test.txt index b24ac40fcd58..2c3b462f64b0 100644 --- a/trunk/tools/perf/Documentation/perf-test.txt +++ b/trunk/tools/perf/Documentation/perf-test.txt @@ -8,19 +8,13 @@ perf-test - Runs sanity tests. SYNOPSIS -------- [verse] -'perf test [] [{list |[|]}]' +'perf test ' DESCRIPTION ----------- This command does assorted sanity tests, initially through linked routines but also will look for a directory with more tests in the form of scripts. -To get a list of available tests use 'perf test list', specifying a test name -fragment will show all tests that have it. - -To run just specific tests, inform test name fragments or the numbers obtained -from 'perf test list'. - OPTIONS ------- -v:: diff --git a/trunk/tools/perf/Documentation/perf-timechart.txt b/trunk/tools/perf/Documentation/perf-timechart.txt index 1632b0efc757..d7b79e2ba2ad 100644 --- a/trunk/tools/perf/Documentation/perf-timechart.txt +++ b/trunk/tools/perf/Documentation/perf-timechart.txt @@ -27,7 +27,7 @@ OPTIONS Select the output file (default: output.svg) -i:: --input=:: - Select the input file (default: perf.data unless stdin is a fifo) + Select the input file (default: perf.data) -w:: --width=:: Select the width of the SVG file (default: 1000) diff --git a/trunk/tools/perf/Makefile b/trunk/tools/perf/Makefile index ac86d67b636e..b98e3075646b 100644 --- a/trunk/tools/perf/Makefile +++ b/trunk/tools/perf/Makefile @@ -278,7 +278,6 @@ LIB_H += util/strbuf.h LIB_H += util/strlist.h LIB_H += util/strfilter.h LIB_H += util/svghelper.h -LIB_H += util/tool.h LIB_H += util/run-command.h LIB_H += util/sigchain.h LIB_H += util/symbol.h diff --git a/trunk/tools/perf/arch/powerpc/util/dwarf-regs.c b/trunk/tools/perf/arch/powerpc/util/dwarf-regs.c index 7cdd61d0e27c..48ae0c5e3f73 100644 --- a/trunk/tools/perf/arch/powerpc/util/dwarf-regs.c +++ b/trunk/tools/perf/arch/powerpc/util/dwarf-regs.c @@ -9,10 +9,7 @@ * 2 of the License, or (at your option) any later version. */ -#include -#ifndef __UCLIBC__ #include -#endif #include diff --git a/trunk/tools/perf/builtin-annotate.c b/trunk/tools/perf/builtin-annotate.c index 214ba7f9f577..46b4c24f338e 100644 --- a/trunk/tools/perf/builtin-annotate.c +++ b/trunk/tools/perf/builtin-annotate.c @@ -27,32 +27,32 @@ #include "util/sort.h" #include "util/hist.h" #include "util/session.h" -#include "util/tool.h" #include -struct perf_annotate { - struct perf_tool tool; - char const *input_name; - bool force, use_tui, use_stdio; - bool full_paths; - bool print_line; - const char *sym_hist_filter; - const char *cpu_list; - DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); -}; +static char const *input_name = "perf.data"; + +static bool force, use_tui, use_stdio; + +static bool full_paths; + +static bool print_line; -static int perf_evsel__add_sample(struct perf_evsel *evsel, - struct perf_sample *sample, - struct addr_location *al, - struct perf_annotate *ann) +static const char *sym_hist_filter; + +static const char *cpu_list; +static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); + +static int perf_evlist__add_sample(struct perf_evlist *evlist, + struct perf_sample *sample, + struct perf_evsel *evsel, + struct addr_location *al) { struct hist_entry *he; int ret; - if (ann->sym_hist_filter != NULL && - (al->sym == NULL || - strcmp(ann->sym_hist_filter, al->sym->name) != 0)) { + if (sym_hist_filter != NULL && + (al->sym == NULL || strcmp(sym_hist_filter, al->sym->name) != 0)) { /* We're only interested in a symbol named sym_hist_filter */ if (al->sym != NULL) { rb_erase(&al->sym->rb_node, @@ -69,7 +69,8 @@ static int perf_evsel__add_sample(struct perf_evsel *evsel, ret = 0; if (he->ms.sym != NULL) { struct annotation *notes = symbol__annotation(he->ms.sym); - if (notes->src == NULL && symbol__alloc_hist(he->ms.sym) < 0) + if (notes->src == NULL && + symbol__alloc_hist(he->ms.sym, evlist->nr_entries) < 0) return -ENOMEM; ret = hist_entry__inc_addr_samples(he, evsel->idx, al->addr); @@ -80,26 +81,25 @@ static int perf_evsel__add_sample(struct perf_evsel *evsel, return ret; } -static int process_sample_event(struct perf_tool *tool, - union perf_event *event, +static int process_sample_event(union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, - struct machine *machine) + struct perf_session *session) { - struct perf_annotate *ann = container_of(tool, struct perf_annotate, tool); struct addr_location al; - if (perf_event__preprocess_sample(event, machine, &al, sample, + if (perf_event__preprocess_sample(event, session, &al, sample, symbol__annotate_init) < 0) { pr_warning("problem processing %d event, skipping it.\n", event->header.type); return -1; } - if (ann->cpu_list && !test_bit(sample->cpu, ann->cpu_bitmap)) + if (cpu_list && !test_bit(sample->cpu, cpu_bitmap)) return 0; - if (!al.filtered && perf_evsel__add_sample(evsel, sample, &al, ann)) { + if (!al.filtered && + perf_evlist__add_sample(session->evlist, sample, evsel, &al)) { pr_warning("problem incrementing symbol count, " "skipping event\n"); return -1; @@ -108,15 +108,14 @@ static int process_sample_event(struct perf_tool *tool, return 0; } -static int hist_entry__tty_annotate(struct hist_entry *he, int evidx, - struct perf_annotate *ann) +static int hist_entry__tty_annotate(struct hist_entry *he, int evidx) { return symbol__tty_annotate(he->ms.sym, he->ms.map, evidx, - ann->print_line, ann->full_paths, 0, 0); + print_line, full_paths, 0, 0); } static void hists__find_annotations(struct hists *self, int evidx, - struct perf_annotate *ann) + int nr_events) { struct rb_node *nd = rb_first(&self->entries), *next; int key = K_RIGHT; @@ -139,7 +138,8 @@ static void hists__find_annotations(struct hists *self, int evidx, } if (use_browser > 0) { - key = hist_entry__tui_annotate(he, evidx, NULL, NULL, 0); + key = hist_entry__tui_annotate(he, evidx, nr_events, + NULL, NULL, 0); switch (key) { case K_RIGHT: next = rb_next(nd); @@ -154,7 +154,7 @@ static void hists__find_annotations(struct hists *self, int evidx, if (next != NULL) nd = next; } else { - hist_entry__tty_annotate(he, evidx, ann); + hist_entry__tty_annotate(he, evidx); nd = rb_next(nd); /* * Since we have a hist_entry per IP for the same @@ -167,26 +167,33 @@ static void hists__find_annotations(struct hists *self, int evidx, } } -static int __cmd_annotate(struct perf_annotate *ann) +static struct perf_event_ops event_ops = { + .sample = process_sample_event, + .mmap = perf_event__process_mmap, + .comm = perf_event__process_comm, + .fork = perf_event__process_task, + .ordered_samples = true, + .ordering_requires_timestamps = true, +}; + +static int __cmd_annotate(void) { int ret; struct perf_session *session; struct perf_evsel *pos; u64 total_nr_samples; - session = perf_session__new(ann->input_name, O_RDONLY, - ann->force, false, &ann->tool); + session = perf_session__new(input_name, O_RDONLY, force, false, &event_ops); if (session == NULL) return -ENOMEM; - if (ann->cpu_list) { - ret = perf_session__cpu_bitmap(session, ann->cpu_list, - ann->cpu_bitmap); + if (cpu_list) { + ret = perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap); if (ret) goto out_delete; } - ret = perf_session__process_events(session, &ann->tool); + ret = perf_session__process_events(session, &event_ops); if (ret) goto out_delete; @@ -210,12 +217,13 @@ static int __cmd_annotate(struct perf_annotate *ann) total_nr_samples += nr_samples; hists__collapse_resort(hists); hists__output_resort(hists); - hists__find_annotations(hists, pos->idx, ann); + hists__find_annotations(hists, pos->idx, + session->evlist->nr_entries); } } if (total_nr_samples == 0) { - ui__warning("The %s file has no samples!\n", session->filename); + ui__warning("The %s file has no samples!\n", input_name); goto out_delete; } out_delete: @@ -239,41 +247,29 @@ static const char * const annotate_usage[] = { NULL }; -int cmd_annotate(int argc, const char **argv, const char *prefix __used) -{ - struct perf_annotate annotate = { - .tool = { - .sample = process_sample_event, - .mmap = perf_event__process_mmap, - .comm = perf_event__process_comm, - .fork = perf_event__process_task, - .ordered_samples = true, - .ordering_requires_timestamps = true, - }, - }; - const struct option options[] = { - OPT_STRING('i', "input", &annotate.input_name, "file", +static const struct option options[] = { + OPT_STRING('i', "input", &input_name, "file", "input file name"), OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]", "only consider symbols in these dsos"), - OPT_STRING('s', "symbol", &annotate.sym_hist_filter, "symbol", + OPT_STRING('s', "symbol", &sym_hist_filter, "symbol", "symbol to annotate"), - OPT_BOOLEAN('f', "force", &annotate.force, "don't complain, do it"), + OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"), OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"), - OPT_BOOLEAN(0, "tui", &annotate.use_tui, "Use the TUI interface"), - OPT_BOOLEAN(0, "stdio", &annotate.use_stdio, "Use the stdio interface"), + OPT_BOOLEAN(0, "tui", &use_tui, "Use the TUI interface"), + OPT_BOOLEAN(0, "stdio", &use_stdio, "Use the stdio interface"), OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, "file", "vmlinux pathname"), OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules, "load module symbols - WARNING: use only with -k and LIVE kernel"), - OPT_BOOLEAN('l', "print-line", &annotate.print_line, + OPT_BOOLEAN('l', "print-line", &print_line, "print matching source lines (may be slow)"), - OPT_BOOLEAN('P', "full-paths", &annotate.full_paths, + OPT_BOOLEAN('P', "full-paths", &full_paths, "Don't shorten the displayed pathnames"), - OPT_STRING('C', "cpu", &annotate.cpu_list, "cpu", "list of cpus to profile"), + OPT_STRING('c', "cpu", &cpu_list, "cpu", "list of cpus to profile"), OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", "Look for files with symbols relative to this directory"), OPT_BOOLEAN(0, "source", &symbol_conf.annotate_src, @@ -283,13 +279,15 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __used) OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style", "Specify disassembler style (e.g. -M intel for intel syntax)"), OPT_END() - }; +}; +int cmd_annotate(int argc, const char **argv, const char *prefix __used) +{ argc = parse_options(argc, argv, options, annotate_usage, 0); - if (annotate.use_stdio) + if (use_stdio) use_browser = 0; - else if (annotate.use_tui) + else if (use_tui) use_browser = 1; setup_browser(true); @@ -310,7 +308,7 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __used) if (argc > 1) usage_with_options(annotate_usage, options); - annotate.sym_hist_filter = argv[0]; + sym_hist_filter = argv[0]; } if (field_sep && *field_sep == '.') { @@ -318,5 +316,5 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __used) return -1; } - return __cmd_annotate(&annotate); + return __cmd_annotate(); } diff --git a/trunk/tools/perf/builtin-buildid-list.c b/trunk/tools/perf/builtin-buildid-list.c index 52480467e9ff..cb690a65bf02 100644 --- a/trunk/tools/perf/builtin-buildid-list.c +++ b/trunk/tools/perf/builtin-buildid-list.c @@ -18,7 +18,7 @@ #include -static const char *input_name; +static char const *input_name = "perf.data"; static bool force; static bool show_kernel; static bool with_hits; @@ -39,6 +39,24 @@ static const struct option options[] = { OPT_END() }; +static int perf_session__list_build_ids(void) +{ + struct perf_session *session; + + session = perf_session__new(input_name, O_RDONLY, force, false, + &build_id__mark_dso_hit_ops); + if (session == NULL) + return -1; + + if (with_hits) + perf_session__process_events(session, &build_id__mark_dso_hit_ops); + + perf_session__fprintf_dsos_buildid(session, stdout, with_hits); + + perf_session__delete(session); + return 0; +} + static int sysfs__fprintf_build_id(FILE *fp) { u8 kallsyms_build_id[BUILD_ID_SIZE]; @@ -67,37 +85,18 @@ static int filename__fprintf_build_id(const char *name, FILE *fp) return fprintf(fp, "%s\n", sbuild_id); } -static int perf_session__list_build_ids(void) -{ - struct perf_session *session; - - elf_version(EV_CURRENT); - - session = perf_session__new(input_name, O_RDONLY, force, false, - &build_id__mark_dso_hit_ops); - if (session == NULL) - return -1; - - /* - * See if this is an ELF file first: - */ - if (filename__fprintf_build_id(session->filename, stdout)) - goto out; - - if (with_hits) - perf_session__process_events(session, &build_id__mark_dso_hit_ops); - - perf_session__fprintf_dsos_buildid(session, stdout, with_hits); -out: - perf_session__delete(session); - return 0; -} - static int __cmd_buildid_list(void) { if (show_kernel) return sysfs__fprintf_build_id(stdout); + elf_version(EV_CURRENT); + /* + * See if this is an ELF file first: + */ + if (filename__fprintf_build_id(input_name, stdout)) + return 0; + return perf_session__list_build_ids(); } diff --git a/trunk/tools/perf/builtin-diff.c b/trunk/tools/perf/builtin-diff.c index 4f19513d7dda..b39f3a1ee7dc 100644 --- a/trunk/tools/perf/builtin-diff.c +++ b/trunk/tools/perf/builtin-diff.c @@ -9,9 +9,7 @@ #include "util/debug.h" #include "util/event.h" #include "util/hist.h" -#include "util/evsel.h" #include "util/session.h" -#include "util/tool.h" #include "util/sort.h" #include "util/symbol.h" #include "util/util.h" @@ -32,15 +30,14 @@ static int hists__add_entry(struct hists *self, return -ENOMEM; } -static int diff__process_sample_event(struct perf_tool *tool __used, - union perf_event *event, +static int diff__process_sample_event(union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel __used, - struct machine *machine) + struct perf_session *session) { struct addr_location al; - if (perf_event__preprocess_sample(event, machine, &al, sample, NULL) < 0) { + if (perf_event__preprocess_sample(event, session, &al, sample, NULL) < 0) { pr_warning("problem processing %d event, skipping it.\n", event->header.type); return -1; @@ -49,16 +46,16 @@ static int diff__process_sample_event(struct perf_tool *tool __used, if (al.filtered || al.sym == NULL) return 0; - if (hists__add_entry(&evsel->hists, &al, sample->period)) { + if (hists__add_entry(&session->hists, &al, sample->period)) { pr_warning("problem incrementing symbol period, skipping event\n"); return -1; } - evsel->hists.stats.total_period += sample->period; + session->hists.stats.total_period += sample->period; return 0; } -static struct perf_tool perf_diff = { +static struct perf_event_ops event_ops = { .sample = diff__process_sample_event, .mmap = perf_event__process_mmap, .comm = perf_event__process_comm, @@ -148,13 +145,13 @@ static int __cmd_diff(void) int ret, i; struct perf_session *session[2]; - session[0] = perf_session__new(input_old, O_RDONLY, force, false, &perf_diff); - session[1] = perf_session__new(input_new, O_RDONLY, force, false, &perf_diff); + session[0] = perf_session__new(input_old, O_RDONLY, force, false, &event_ops); + session[1] = perf_session__new(input_new, O_RDONLY, force, false, &event_ops); if (session[0] == NULL || session[1] == NULL) return -ENOMEM; for (i = 0; i < 2; ++i) { - ret = perf_session__process_events(session[i], &perf_diff); + ret = perf_session__process_events(session[i], &event_ops); if (ret) goto out_delete; } diff --git a/trunk/tools/perf/builtin-evlist.c b/trunk/tools/perf/builtin-evlist.c index 26760322c4f4..4c5e9e04a41f 100644 --- a/trunk/tools/perf/builtin-evlist.c +++ b/trunk/tools/perf/builtin-evlist.c @@ -15,7 +15,7 @@ #include "util/parse-options.h" #include "util/session.h" -static const char *input_name; +static char const *input_name = "perf.data"; static int __cmd_evlist(void) { diff --git a/trunk/tools/perf/builtin-inject.c b/trunk/tools/perf/builtin-inject.c index 09c106193e65..8dfc12bb119b 100644 --- a/trunk/tools/perf/builtin-inject.c +++ b/trunk/tools/perf/builtin-inject.c @@ -9,7 +9,6 @@ #include "perf.h" #include "util/session.h" -#include "util/tool.h" #include "util/debug.h" #include "util/parse-options.h" @@ -17,9 +16,8 @@ static char const *input_name = "-"; static bool inject_build_ids; -static int perf_event__repipe_synth(struct perf_tool *tool __used, - union perf_event *event, - struct machine *machine __used) +static int perf_event__repipe_synth(union perf_event *event, + struct perf_session *session __used) { uint32_t size; void *buf = event; @@ -38,70 +36,41 @@ static int perf_event__repipe_synth(struct perf_tool *tool __used, return 0; } -static int perf_event__repipe_op2_synth(struct perf_tool *tool, - union perf_event *event, - struct perf_session *session __used) -{ - return perf_event__repipe_synth(tool, event, NULL); -} - -static int perf_event__repipe_event_type_synth(struct perf_tool *tool, - union perf_event *event) -{ - return perf_event__repipe_synth(tool, event, NULL); -} - -static int perf_event__repipe_tracing_data_synth(union perf_event *event, - struct perf_session *session __used) -{ - return perf_event__repipe_synth(NULL, event, NULL); -} - -static int perf_event__repipe_attr(union perf_event *event, - struct perf_evlist **pevlist __used) -{ - return perf_event__repipe_synth(NULL, event, NULL); -} - -static int perf_event__repipe(struct perf_tool *tool, - union perf_event *event, +static int perf_event__repipe(union perf_event *event, struct perf_sample *sample __used, - struct machine *machine) + struct perf_session *session) { - return perf_event__repipe_synth(tool, event, machine); + return perf_event__repipe_synth(event, session); } -static int perf_event__repipe_sample(struct perf_tool *tool, - union perf_event *event, +static int perf_event__repipe_sample(union perf_event *event, struct perf_sample *sample __used, struct perf_evsel *evsel __used, - struct machine *machine) + struct perf_session *session) { - return perf_event__repipe_synth(tool, event, machine); + return perf_event__repipe_synth(event, session); } -static int perf_event__repipe_mmap(struct perf_tool *tool, - union perf_event *event, +static int perf_event__repipe_mmap(union perf_event *event, struct perf_sample *sample, - struct machine *machine) + struct perf_session *session) { int err; - err = perf_event__process_mmap(tool, event, sample, machine); - perf_event__repipe(tool, event, sample, machine); + err = perf_event__process_mmap(event, sample, session); + perf_event__repipe(event, sample, session); return err; } -static int perf_event__repipe_task(struct perf_tool *tool, - union perf_event *event, +static int perf_event__repipe_task(union perf_event *event, struct perf_sample *sample, - struct machine *machine) + struct perf_session *session) { int err; - err = perf_event__process_task(tool, event, sample, machine); - perf_event__repipe(tool, event, sample, machine); + err = perf_event__process_task(event, sample, session); + perf_event__repipe(event, sample, session); return err; } @@ -111,7 +80,7 @@ static int perf_event__repipe_tracing_data(union perf_event *event, { int err; - perf_event__repipe_synth(NULL, event, NULL); + perf_event__repipe_synth(event, session); err = perf_event__process_tracing_data(event, session); return err; @@ -131,10 +100,10 @@ static int dso__read_build_id(struct dso *self) return -1; } -static int dso__inject_build_id(struct dso *self, struct perf_tool *tool, - struct machine *machine) +static int dso__inject_build_id(struct dso *self, struct perf_session *session) { u16 misc = PERF_RECORD_MISC_USER; + struct machine *machine; int err; if (dso__read_build_id(self) < 0) { @@ -142,11 +111,17 @@ static int dso__inject_build_id(struct dso *self, struct perf_tool *tool, return -1; } + machine = perf_session__find_host_machine(session); + if (machine == NULL) { + pr_err("Can't find machine for session\n"); + return -1; + } + if (self->kernel) misc = PERF_RECORD_MISC_KERNEL; - err = perf_event__synthesize_build_id(tool, self, misc, perf_event__repipe, - machine); + err = perf_event__synthesize_build_id(self, misc, perf_event__repipe, + machine, session); if (err) { pr_err("Can't synthesize build_id event for %s\n", self->long_name); return -1; @@ -155,11 +130,10 @@ static int dso__inject_build_id(struct dso *self, struct perf_tool *tool, return 0; } -static int perf_event__inject_buildid(struct perf_tool *tool, - union perf_event *event, +static int perf_event__inject_buildid(union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel __used, - struct machine *machine) + struct perf_session *session) { struct addr_location al; struct thread *thread; @@ -167,21 +141,21 @@ static int perf_event__inject_buildid(struct perf_tool *tool, cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; - thread = machine__findnew_thread(machine, event->ip.pid); + thread = perf_session__findnew(session, event->ip.pid); if (thread == NULL) { pr_err("problem processing %d event, skipping it.\n", event->header.type); goto repipe; } - thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION, - event->ip.ip, &al); + thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION, + event->ip.pid, event->ip.ip, &al); if (al.map != NULL) { if (!al.map->dso->hit) { al.map->dso->hit = 1; if (map__load(al.map, NULL) >= 0) { - dso__inject_build_id(al.map->dso, tool, machine); + dso__inject_build_id(al.map->dso, session); /* * If this fails, too bad, let the other side * account this as unresolved. @@ -194,24 +168,24 @@ static int perf_event__inject_buildid(struct perf_tool *tool, } repipe: - perf_event__repipe(tool, event, sample, machine); + perf_event__repipe(event, sample, session); return 0; } -struct perf_tool perf_inject = { +struct perf_event_ops inject_ops = { .sample = perf_event__repipe_sample, .mmap = perf_event__repipe, .comm = perf_event__repipe, .fork = perf_event__repipe, .exit = perf_event__repipe, .lost = perf_event__repipe, - .read = perf_event__repipe_sample, + .read = perf_event__repipe, .throttle = perf_event__repipe, .unthrottle = perf_event__repipe, - .attr = perf_event__repipe_attr, - .event_type = perf_event__repipe_event_type_synth, - .tracing_data = perf_event__repipe_tracing_data_synth, - .build_id = perf_event__repipe_op2_synth, + .attr = perf_event__repipe_synth, + .event_type = perf_event__repipe_synth, + .tracing_data = perf_event__repipe_synth, + .build_id = perf_event__repipe_synth, }; extern volatile int session_done; @@ -229,17 +203,17 @@ static int __cmd_inject(void) signal(SIGINT, sig_handler); if (inject_build_ids) { - perf_inject.sample = perf_event__inject_buildid; - perf_inject.mmap = perf_event__repipe_mmap; - perf_inject.fork = perf_event__repipe_task; - perf_inject.tracing_data = perf_event__repipe_tracing_data; + inject_ops.sample = perf_event__inject_buildid; + inject_ops.mmap = perf_event__repipe_mmap; + inject_ops.fork = perf_event__repipe_task; + inject_ops.tracing_data = perf_event__repipe_tracing_data; } - session = perf_session__new(input_name, O_RDONLY, false, true, &perf_inject); + session = perf_session__new(input_name, O_RDONLY, false, true, &inject_ops); if (session == NULL) return -ENOMEM; - ret = perf_session__process_events(session, &perf_inject); + ret = perf_session__process_events(session, &inject_ops); perf_session__delete(session); diff --git a/trunk/tools/perf/builtin-kmem.c b/trunk/tools/perf/builtin-kmem.c index fe1ad8f21961..225e963df105 100644 --- a/trunk/tools/perf/builtin-kmem.c +++ b/trunk/tools/perf/builtin-kmem.c @@ -7,7 +7,6 @@ #include "util/thread.h" #include "util/header.h" #include "util/session.h" -#include "util/tool.h" #include "util/parse-options.h" #include "util/trace-event.h" @@ -19,7 +18,7 @@ struct alloc_stat; typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *); -static const char *input_name; +static char const *input_name = "perf.data"; static int alloc_flag; static int caller_flag; @@ -304,13 +303,12 @@ static void process_raw_event(union perf_event *raw_event __used, void *data, } } -static int process_sample_event(struct perf_tool *tool __used, - union perf_event *event, +static int process_sample_event(union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel __used, - struct machine *machine) + struct perf_session *session) { - struct thread *thread = machine__findnew_thread(machine, event->ip.pid); + struct thread *thread = perf_session__findnew(session, event->ip.pid); if (thread == NULL) { pr_debug("problem processing %d event, skipping it.\n", @@ -326,7 +324,7 @@ static int process_sample_event(struct perf_tool *tool __used, return 0; } -static struct perf_tool perf_kmem = { +static struct perf_event_ops event_ops = { .sample = process_sample_event, .comm = perf_event__process_comm, .ordered_samples = true, @@ -485,7 +483,7 @@ static int __cmd_kmem(void) { int err = -EINVAL; struct perf_session *session = perf_session__new(input_name, O_RDONLY, - 0, false, &perf_kmem); + 0, false, &event_ops); if (session == NULL) return -ENOMEM; @@ -496,7 +494,7 @@ static int __cmd_kmem(void) goto out_delete; setup_pager(); - err = perf_session__process_events(session, &perf_kmem); + err = perf_session__process_events(session, &event_ops); if (err != 0) goto out_delete; sort_result(); diff --git a/trunk/tools/perf/builtin-kvm.c b/trunk/tools/perf/builtin-kvm.c index 032324a76b87..34d1e853829d 100644 --- a/trunk/tools/perf/builtin-kvm.c +++ b/trunk/tools/perf/builtin-kvm.c @@ -38,7 +38,7 @@ static const struct option kvm_options[] = { OPT_BOOLEAN(0, "guest", &perf_guest, "Collect guest os data"), OPT_BOOLEAN(0, "host", &perf_host, - "Collect host os data"), + "Collect guest os data"), OPT_STRING(0, "guestmount", &symbol_conf.guestmount, "directory", "guest mount directory under which every guest os" " instance has a subdir"), diff --git a/trunk/tools/perf/builtin-lock.c b/trunk/tools/perf/builtin-lock.c index 2296c391d0f5..899080ace267 100644 --- a/trunk/tools/perf/builtin-lock.c +++ b/trunk/tools/perf/builtin-lock.c @@ -12,7 +12,6 @@ #include "util/debug.h" #include "util/session.h" -#include "util/tool.h" #include #include @@ -326,7 +325,7 @@ static struct lock_stat *lock_stat_findnew(void *addr, const char *name) die("memory allocation failed\n"); } -static const char *input_name; +static char const *input_name = "perf.data"; struct raw_event_sample { u32 size; @@ -846,13 +845,12 @@ static void dump_info(void) die("Unknown type of information\n"); } -static int process_sample_event(struct perf_tool *tool __used, - union perf_event *event, +static int process_sample_event(union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel __used, - struct machine *machine) + struct perf_session *s) { - struct thread *thread = machine__findnew_thread(machine, sample->tid); + struct thread *thread = perf_session__findnew(s, sample->tid); if (thread == NULL) { pr_debug("problem processing %d event, skipping it.\n", @@ -865,7 +863,7 @@ static int process_sample_event(struct perf_tool *tool __used, return 0; } -static struct perf_tool eops = { +static struct perf_event_ops eops = { .sample = process_sample_event, .comm = perf_event__process_comm, .ordered_samples = true, diff --git a/trunk/tools/perf/builtin-probe.c b/trunk/tools/perf/builtin-probe.c index 59d43abfbfec..710ae3d0a489 100644 --- a/trunk/tools/perf/builtin-probe.c +++ b/trunk/tools/perf/builtin-probe.c @@ -46,6 +46,7 @@ #define DEFAULT_VAR_FILTER "!__k???tab_* & !__crc_*" #define DEFAULT_FUNC_FILTER "!_*" +#define MAX_PATH_LEN 256 /* Session management structure */ static struct { diff --git a/trunk/tools/perf/builtin-record.c b/trunk/tools/perf/builtin-record.c index 0abfb18b911f..6ab58cc99d53 100644 --- a/trunk/tools/perf/builtin-record.c +++ b/trunk/tools/perf/builtin-record.c @@ -22,7 +22,6 @@ #include "util/evsel.h" #include "util/debug.h" #include "util/session.h" -#include "util/tool.h" #include "util/symbol.h" #include "util/cpumap.h" #include "util/thread_map.h" @@ -36,36 +35,55 @@ enum write_mode_t { WRITE_APPEND }; -struct perf_record { - struct perf_tool tool; - struct perf_record_opts opts; - u64 bytes_written; - const char *output_name; - struct perf_evlist *evlist; - struct perf_session *session; - const char *progname; - int output; - unsigned int page_size; - int realtime_prio; - enum write_mode_t write_mode; - bool no_buildid; - bool no_buildid_cache; - bool force; - bool file_new; - bool append_file; - long samples; - off_t post_processing_offset; -}; - -static void advance_output(struct perf_record *rec, size_t size) +static u64 user_interval = ULLONG_MAX; +static u64 default_interval = 0; + +static unsigned int page_size; +static unsigned int mmap_pages = UINT_MAX; +static unsigned int user_freq = UINT_MAX; +static int freq = 1000; +static int output; +static int pipe_output = 0; +static const char *output_name = NULL; +static bool group = false; +static int realtime_prio = 0; +static bool nodelay = false; +static bool raw_samples = false; +static bool sample_id_all_avail = true; +static bool system_wide = false; +static pid_t target_pid = -1; +static pid_t target_tid = -1; +static pid_t child_pid = -1; +static bool no_inherit = false; +static enum write_mode_t write_mode = WRITE_FORCE; +static bool call_graph = false; +static bool inherit_stat = false; +static bool no_samples = false; +static bool sample_address = false; +static bool sample_time = false; +static bool no_buildid = false; +static bool no_buildid_cache = false; +static struct perf_evlist *evsel_list; + +static long samples = 0; +static u64 bytes_written = 0; + +static int file_new = 1; +static off_t post_processing_offset; + +static struct perf_session *session; +static const char *cpu_list; +static const char *progname; + +static void advance_output(size_t size) { - rec->bytes_written += size; + bytes_written += size; } -static void write_output(struct perf_record *rec, void *buf, size_t size) +static void write_output(void *buf, size_t size) { while (size) { - int ret = write(rec->output, buf, size); + int ret = write(output, buf, size); if (ret < 0) die("failed to write"); @@ -73,33 +91,30 @@ static void write_output(struct perf_record *rec, void *buf, size_t size) size -= ret; buf += ret; - rec->bytes_written += ret; + bytes_written += ret; } } -static int process_synthesized_event(struct perf_tool *tool, - union perf_event *event, +static int process_synthesized_event(union perf_event *event, struct perf_sample *sample __used, - struct machine *machine __used) + struct perf_session *self __used) { - struct perf_record *rec = container_of(tool, struct perf_record, tool); - write_output(rec, event, event->header.size); + write_output(event, event->header.size); return 0; } -static void perf_record__mmap_read(struct perf_record *rec, - struct perf_mmap *md) +static void mmap_read(struct perf_mmap *md) { unsigned int head = perf_mmap__read_head(md); unsigned int old = md->prev; - unsigned char *data = md->base + rec->page_size; + unsigned char *data = md->base + page_size; unsigned long size; void *buf; if (old == head) return; - rec->samples++; + samples++; size = head - old; @@ -108,14 +123,14 @@ static void perf_record__mmap_read(struct perf_record *rec, size = md->mask + 1 - (old & md->mask); old += size; - write_output(rec, buf, size); + write_output(buf, size); } buf = &data[old & md->mask]; size = head - old; old += size; - write_output(rec, buf, size); + write_output(buf, size); md->prev = old; perf_mmap__write_tail(md, old); @@ -134,18 +149,17 @@ static void sig_handler(int sig) signr = sig; } -static void perf_record__sig_exit(int exit_status __used, void *arg) +static void sig_atexit(void) { - struct perf_record *rec = arg; int status; - if (rec->evlist->workload.pid > 0) { + if (child_pid > 0) { if (!child_finished) - kill(rec->evlist->workload.pid, SIGTERM); + kill(child_pid, SIGTERM); wait(&status); if (WIFSIGNALED(status)) - psignal(WTERMSIG(status), rec->progname); + psignal(WTERMSIG(status), progname); } if (signr == -1 || signr == SIGUSR1) @@ -155,6 +169,78 @@ static void perf_record__sig_exit(int exit_status __used, void *arg) kill(getpid(), signr); } +static void config_attr(struct perf_evsel *evsel, struct perf_evlist *evlist) +{ + struct perf_event_attr *attr = &evsel->attr; + int track = !evsel->idx; /* only the first counter needs these */ + + attr->disabled = 1; + attr->inherit = !no_inherit; + attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | + PERF_FORMAT_TOTAL_TIME_RUNNING | + PERF_FORMAT_ID; + + attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID; + + if (evlist->nr_entries > 1) + attr->sample_type |= PERF_SAMPLE_ID; + + /* + * We default some events to a 1 default interval. But keep + * it a weak assumption overridable by the user. + */ + if (!attr->sample_period || (user_freq != UINT_MAX && + user_interval != ULLONG_MAX)) { + if (freq) { + attr->sample_type |= PERF_SAMPLE_PERIOD; + attr->freq = 1; + attr->sample_freq = freq; + } else { + attr->sample_period = default_interval; + } + } + + if (no_samples) + attr->sample_freq = 0; + + if (inherit_stat) + attr->inherit_stat = 1; + + if (sample_address) { + attr->sample_type |= PERF_SAMPLE_ADDR; + attr->mmap_data = track; + } + + if (call_graph) + attr->sample_type |= PERF_SAMPLE_CALLCHAIN; + + if (system_wide) + attr->sample_type |= PERF_SAMPLE_CPU; + + if (sample_id_all_avail && + (sample_time || system_wide || !no_inherit || cpu_list)) + attr->sample_type |= PERF_SAMPLE_TIME; + + if (raw_samples) { + attr->sample_type |= PERF_SAMPLE_TIME; + attr->sample_type |= PERF_SAMPLE_RAW; + attr->sample_type |= PERF_SAMPLE_CPU; + } + + if (nodelay) { + attr->watermark = 0; + attr->wakeup_events = 1; + } + + attr->mmap = track; + attr->comm = track; + + if (target_pid == -1 && target_tid == -1 && !system_wide) { + attr->disabled = 1; + attr->enable_on_exec = 1; + } +} + static bool perf_evlist__equal(struct perf_evlist *evlist, struct perf_evlist *other) { @@ -174,16 +260,14 @@ static bool perf_evlist__equal(struct perf_evlist *evlist, return true; } -static void perf_record__open(struct perf_record *rec) +static void open_counters(struct perf_evlist *evlist) { struct perf_evsel *pos, *first; - struct perf_evlist *evlist = rec->evlist; - struct perf_session *session = rec->session; - struct perf_record_opts *opts = &rec->opts; - first = list_entry(evlist->entries.next, struct perf_evsel, node); + if (evlist->cpus->map[0] < 0) + no_inherit = true; - perf_evlist__config_attrs(evlist, opts); + first = list_entry(evlist->entries.next, struct perf_evsel, node); list_for_each_entry(pos, &evlist->entries, node) { struct perf_event_attr *attr = &pos->attr; @@ -202,27 +286,29 @@ static void perf_record__open(struct perf_record *rec) */ bool time_needed = attr->sample_type & PERF_SAMPLE_TIME; - if (opts->group && pos != first) + if (group && pos != first) group_fd = first->fd; + + config_attr(pos, evlist); retry_sample_id: - attr->sample_id_all = opts->sample_id_all_avail ? 1 : 0; + attr->sample_id_all = sample_id_all_avail ? 1 : 0; try_again: - if (perf_evsel__open(pos, evlist->cpus, evlist->threads, - opts->group, group_fd) < 0) { + if (perf_evsel__open(pos, evlist->cpus, evlist->threads, group, + group_fd) < 0) { int err = errno; if (err == EPERM || err == EACCES) { ui__error_paranoid(); exit(EXIT_FAILURE); - } else if (err == ENODEV && opts->cpu_list) { + } else if (err == ENODEV && cpu_list) { die("No such device - did you specify" " an out-of-range profile CPU?\n"); - } else if (err == EINVAL && opts->sample_id_all_avail) { + } else if (err == EINVAL && sample_id_all_avail) { /* * Old kernel, no attr->sample_id_type_all field */ - opts->sample_id_all_avail = false; - if (!opts->sample_time && !opts->raw_samples && !time_needed) + sample_id_all_avail = false; + if (!sample_time && !raw_samples && !time_needed) attr->sample_type &= ~PERF_SAMPLE_TIME; goto retry_sample_id; @@ -272,20 +358,10 @@ static void perf_record__open(struct perf_record *rec) exit(-1); } - if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) { - if (errno == EPERM) - die("Permission error mapping pages.\n" - "Consider increasing " - "/proc/sys/kernel/perf_event_mlock_kb,\n" - "or try again with a smaller value of -m/--mmap_pages.\n" - "(current value: %d)\n", opts->mmap_pages); - else if (!is_power_of_2(opts->mmap_pages)) - die("--mmap_pages/-m value must be a power of two."); - + if (perf_evlist__mmap(evlist, mmap_pages, false) < 0) die("failed to mmap with %d (%s)\n", errno, strerror(errno)); - } - if (rec->file_new) + if (file_new) session->evlist = evlist; else { if (!perf_evlist__equal(session->evlist, evlist)) { @@ -297,32 +373,29 @@ static void perf_record__open(struct perf_record *rec) perf_session__update_sample_type(session); } -static int process_buildids(struct perf_record *rec) +static int process_buildids(void) { - u64 size = lseek(rec->output, 0, SEEK_CUR); + u64 size = lseek(output, 0, SEEK_CUR); if (size == 0) return 0; - rec->session->fd = rec->output; - return __perf_session__process_events(rec->session, rec->post_processing_offset, - size - rec->post_processing_offset, + session->fd = output; + return __perf_session__process_events(session, post_processing_offset, + size - post_processing_offset, size, &build_id__mark_dso_hit_ops); } -static void perf_record__exit(int status __used, void *arg) +static void atexit_header(void) { - struct perf_record *rec = arg; - - if (!rec->opts.pipe_output) { - rec->session->header.data_size += rec->bytes_written; - - if (!rec->no_buildid) - process_buildids(rec); - perf_session__write_header(rec->session, rec->evlist, - rec->output, true); - perf_session__delete(rec->session); - perf_evlist__delete(rec->evlist); + if (!pipe_output) { + session->header.data_size += bytes_written; + + if (!no_buildid) + process_buildids(); + perf_session__write_header(session, evsel_list, output, true); + perf_session__delete(session); + perf_evlist__delete(evsel_list); symbol__exit(); } } @@ -330,7 +403,7 @@ static void perf_record__exit(int status __used, void *arg) static void perf_event__synthesize_guest_os(struct machine *machine, void *data) { int err; - struct perf_tool *tool = data; + struct perf_session *psession = data; if (machine__is_host(machine)) return; @@ -343,8 +416,8 @@ static void perf_event__synthesize_guest_os(struct machine *machine, void *data) *method is used to avoid symbol missing when the first addr is *in module instead of in guest kernel. */ - err = perf_event__synthesize_modules(tool, process_synthesized_event, - machine); + err = perf_event__synthesize_modules(process_synthesized_event, + psession, machine); if (err < 0) pr_err("Couldn't record guest kernel [%d]'s reference" " relocation symbol.\n", machine->pid); @@ -353,11 +426,12 @@ static void perf_event__synthesize_guest_os(struct machine *machine, void *data) * We use _stext for guest kernel because guest kernel's /proc/kallsyms * have no _text sometimes. */ - err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event, - machine, "_text"); + err = perf_event__synthesize_kernel_mmap(process_synthesized_event, + psession, machine, "_text"); if (err < 0) - err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event, - machine, "_stext"); + err = perf_event__synthesize_kernel_mmap(process_synthesized_event, + psession, machine, + "_stext"); if (err < 0) pr_err("Couldn't record guest kernel [%d]'s reference" " relocation symbol.\n", machine->pid); @@ -368,71 +442,73 @@ static struct perf_event_header finished_round_event = { .type = PERF_RECORD_FINISHED_ROUND, }; -static void perf_record__mmap_read_all(struct perf_record *rec) +static void mmap_read_all(void) { int i; - for (i = 0; i < rec->evlist->nr_mmaps; i++) { - if (rec->evlist->mmap[i].base) - perf_record__mmap_read(rec, &rec->evlist->mmap[i]); + for (i = 0; i < evsel_list->nr_mmaps; i++) { + if (evsel_list->mmap[i].base) + mmap_read(&evsel_list->mmap[i]); } - if (perf_header__has_feat(&rec->session->header, HEADER_TRACE_INFO)) - write_output(rec, &finished_round_event, sizeof(finished_round_event)); + if (perf_header__has_feat(&session->header, HEADER_TRACE_INFO)) + write_output(&finished_round_event, sizeof(finished_round_event)); } -static int __cmd_record(struct perf_record *rec, int argc, const char **argv) +static int __cmd_record(int argc, const char **argv) { struct stat st; int flags; - int err, output; + int err; unsigned long waking = 0; + int child_ready_pipe[2], go_pipe[2]; const bool forks = argc > 0; + char buf; struct machine *machine; - struct perf_tool *tool = &rec->tool; - struct perf_record_opts *opts = &rec->opts; - struct perf_evlist *evsel_list = rec->evlist; - const char *output_name = rec->output_name; - struct perf_session *session; - rec->progname = argv[0]; + progname = argv[0]; - rec->page_size = sysconf(_SC_PAGE_SIZE); + page_size = sysconf(_SC_PAGE_SIZE); - on_exit(perf_record__sig_exit, rec); + atexit(sig_atexit); signal(SIGCHLD, sig_handler); signal(SIGINT, sig_handler); signal(SIGUSR1, sig_handler); + if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) { + perror("failed to create pipes"); + exit(-1); + } + if (!output_name) { if (!fstat(STDOUT_FILENO, &st) && S_ISFIFO(st.st_mode)) - opts->pipe_output = true; + pipe_output = 1; else - rec->output_name = output_name = "perf.data"; + output_name = "perf.data"; } if (output_name) { if (!strcmp(output_name, "-")) - opts->pipe_output = true; + pipe_output = 1; else if (!stat(output_name, &st) && st.st_size) { - if (rec->write_mode == WRITE_FORCE) { + if (write_mode == WRITE_FORCE) { char oldname[PATH_MAX]; snprintf(oldname, sizeof(oldname), "%s.old", output_name); unlink(oldname); rename(output_name, oldname); } - } else if (rec->write_mode == WRITE_APPEND) { - rec->write_mode = WRITE_FORCE; + } else if (write_mode == WRITE_APPEND) { + write_mode = WRITE_FORCE; } } flags = O_CREAT|O_RDWR; - if (rec->write_mode == WRITE_APPEND) - rec->file_new = 0; + if (write_mode == WRITE_APPEND) + file_new = 0; else flags |= O_TRUNC; - if (opts->pipe_output) + if (pipe_output) output = STDOUT_FILENO; else output = open(output_name, flags, S_IRUSR | S_IWUSR); @@ -441,21 +517,17 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) exit(-1); } - rec->output = output; - session = perf_session__new(output_name, O_WRONLY, - rec->write_mode == WRITE_FORCE, false, NULL); + write_mode == WRITE_FORCE, false, NULL); if (session == NULL) { pr_err("Not enough memory for reading perf file header\n"); return -1; } - rec->session = session; - - if (!rec->no_buildid) + if (!no_buildid) perf_header__set_feat(&session->header, HEADER_BUILD_ID); - if (!rec->file_new) { + if (!file_new) { err = perf_session__read_header(session, output); if (err < 0) goto out_delete_session; @@ -477,57 +549,94 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) perf_header__set_feat(&session->header, HEADER_NUMA_TOPOLOGY); perf_header__set_feat(&session->header, HEADER_CPUID); + /* 512 kiB: default amount of unprivileged mlocked memory */ + if (mmap_pages == UINT_MAX) + mmap_pages = (512 * 1024) / page_size; + if (forks) { - err = perf_evlist__prepare_workload(evsel_list, opts, argv); - if (err < 0) { - pr_err("Couldn't run the workload!\n"); - goto out_delete_session; + child_pid = fork(); + if (child_pid < 0) { + perror("failed to fork"); + exit(-1); + } + + if (!child_pid) { + if (pipe_output) + dup2(2, 1); + close(child_ready_pipe[0]); + close(go_pipe[1]); + fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC); + + /* + * Do a dummy execvp to get the PLT entry resolved, + * so we avoid the resolver overhead on the real + * execvp call. + */ + execvp("", (char **)argv); + + /* + * Tell the parent we're ready to go + */ + close(child_ready_pipe[1]); + + /* + * Wait until the parent tells us to go. + */ + if (read(go_pipe[0], &buf, 1) == -1) + perror("unable to read pipe"); + + execvp(argv[0], (char **)argv); + + perror(argv[0]); + kill(getppid(), SIGUSR1); + exit(-1); } + + if (!system_wide && target_tid == -1 && target_pid == -1) + evsel_list->threads->map[0] = child_pid; + + close(child_ready_pipe[1]); + close(go_pipe[0]); + /* + * wait for child to settle + */ + if (read(child_ready_pipe[0], &buf, 1) == -1) { + perror("unable to read pipe"); + exit(-1); + } + close(child_ready_pipe[0]); } - perf_record__open(rec); + open_counters(evsel_list); /* - * perf_session__delete(session) will be called at perf_record__exit() + * perf_session__delete(session) will be called at atexit_header() */ - on_exit(perf_record__exit, rec); + atexit(atexit_header); - if (opts->pipe_output) { + if (pipe_output) { err = perf_header__write_pipe(output); if (err < 0) return err; - } else if (rec->file_new) { + } else if (file_new) { err = perf_session__write_header(session, evsel_list, output, false); if (err < 0) return err; } - if (!!rec->no_buildid - && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) { - pr_err("Couldn't generating buildids. " - "Use --no-buildid to profile anyway.\n"); - return -1; - } + post_processing_offset = lseek(output, 0, SEEK_CUR); - rec->post_processing_offset = lseek(output, 0, SEEK_CUR); - - machine = perf_session__find_host_machine(session); - if (!machine) { - pr_err("Couldn't find native kernel information.\n"); - return -1; - } - - if (opts->pipe_output) { - err = perf_event__synthesize_attrs(tool, session, - process_synthesized_event); + if (pipe_output) { + err = perf_session__synthesize_attrs(session, + process_synthesized_event); if (err < 0) { pr_err("Couldn't synthesize attrs.\n"); return err; } - err = perf_event__synthesize_event_types(tool, process_synthesized_event, - machine); + err = perf_event__synthesize_event_types(process_synthesized_event, + session); if (err < 0) { pr_err("Couldn't synthesize event_types.\n"); return err; @@ -542,49 +651,56 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) * return this more properly and also * propagate errors that now are calling die() */ - err = perf_event__synthesize_tracing_data(tool, output, evsel_list, - process_synthesized_event); + err = perf_event__synthesize_tracing_data(output, evsel_list, + process_synthesized_event, + session); if (err <= 0) { pr_err("Couldn't record tracing data.\n"); return err; } - advance_output(rec, err); + advance_output(err); } } - err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event, - machine, "_text"); + machine = perf_session__find_host_machine(session); + if (!machine) { + pr_err("Couldn't find native kernel information.\n"); + return -1; + } + + err = perf_event__synthesize_kernel_mmap(process_synthesized_event, + session, machine, "_text"); if (err < 0) - err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event, - machine, "_stext"); + err = perf_event__synthesize_kernel_mmap(process_synthesized_event, + session, machine, "_stext"); if (err < 0) pr_err("Couldn't record kernel reference relocation symbol\n" "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n" "Check /proc/kallsyms permission or run as root.\n"); - err = perf_event__synthesize_modules(tool, process_synthesized_event, - machine); + err = perf_event__synthesize_modules(process_synthesized_event, + session, machine); if (err < 0) pr_err("Couldn't record kernel module information.\n" "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n" "Check /proc/modules permission or run as root.\n"); if (perf_guest) - perf_session__process_machines(session, tool, + perf_session__process_machines(session, perf_event__synthesize_guest_os); - if (!opts->system_wide) - perf_event__synthesize_thread_map(tool, evsel_list->threads, + if (!system_wide) + perf_event__synthesize_thread_map(evsel_list->threads, process_synthesized_event, - machine); + session); else - perf_event__synthesize_threads(tool, process_synthesized_event, - machine); + perf_event__synthesize_threads(process_synthesized_event, + session); - if (rec->realtime_prio) { + if (realtime_prio) { struct sched_param param; - param.sched_priority = rec->realtime_prio; + param.sched_priority = realtime_prio; if (sched_setscheduler(0, SCHED_FIFO, ¶m)) { pr_err("Could not set realtime priority.\n"); exit(-1); @@ -597,14 +713,14 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) * Let the child rip */ if (forks) - perf_evlist__start_workload(evsel_list); + close(go_pipe[1]); for (;;) { - int hits = rec->samples; + int hits = samples; - perf_record__mmap_read_all(rec); + mmap_read_all(); - if (hits == rec->samples) { + if (hits == samples) { if (done) break; err = poll(evsel_list->pollfd, evsel_list->nr_fds, -1); @@ -625,9 +741,9 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) */ fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n", - (double)rec->bytes_written / 1024.0 / 1024.0, + (double)bytes_written / 1024.0 / 1024.0, output_name, - rec->bytes_written / 24); + bytes_written / 24); return 0; @@ -642,89 +758,58 @@ static const char * const record_usage[] = { NULL }; -/* - * XXX Ideally would be local to cmd_record() and passed to a perf_record__new - * because we need to have access to it in perf_record__exit, that is called - * after cmd_record() exits, but since record_options need to be accessible to - * builtin-script, leave it here. - * - * At least we don't ouch it in all the other functions here directly. - * - * Just say no to tons of global variables, sigh. - */ -static struct perf_record record = { - .opts = { - .target_pid = -1, - .target_tid = -1, - .mmap_pages = UINT_MAX, - .user_freq = UINT_MAX, - .user_interval = ULLONG_MAX, - .freq = 1000, - .sample_id_all_avail = true, - }, - .write_mode = WRITE_FORCE, - .file_new = true, -}; +static bool force, append_file; -/* - * XXX Will stay a global variable till we fix builtin-script.c to stop messing - * with it and switch to use the library functions in perf_evlist that came - * from builtin-record.c, i.e. use perf_record_opts, - * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record', - * using pipes, etc. - */ const struct option record_options[] = { - OPT_CALLBACK('e', "event", &record.evlist, "event", + OPT_CALLBACK('e', "event", &evsel_list, "event", "event selector. use 'perf list' to list available events", parse_events_option), - OPT_CALLBACK(0, "filter", &record.evlist, "filter", + OPT_CALLBACK(0, "filter", &evsel_list, "filter", "event filter", parse_filter), - OPT_INTEGER('p', "pid", &record.opts.target_pid, + OPT_INTEGER('p', "pid", &target_pid, "record events on existing process id"), - OPT_INTEGER('t', "tid", &record.opts.target_tid, + OPT_INTEGER('t', "tid", &target_tid, "record events on existing thread id"), - OPT_INTEGER('r', "realtime", &record.realtime_prio, + OPT_INTEGER('r', "realtime", &realtime_prio, "collect data with this RT SCHED_FIFO priority"), - OPT_BOOLEAN('D', "no-delay", &record.opts.no_delay, + OPT_BOOLEAN('D', "no-delay", &nodelay, "collect data without buffering"), - OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples, + OPT_BOOLEAN('R', "raw-samples", &raw_samples, "collect raw sample records from all opened counters"), - OPT_BOOLEAN('a', "all-cpus", &record.opts.system_wide, + OPT_BOOLEAN('a', "all-cpus", &system_wide, "system-wide collection from all CPUs"), - OPT_BOOLEAN('A', "append", &record.append_file, + OPT_BOOLEAN('A', "append", &append_file, "append to the output file to do incremental profiling"), - OPT_STRING('C', "cpu", &record.opts.cpu_list, "cpu", + OPT_STRING('C', "cpu", &cpu_list, "cpu", "list of cpus to monitor"), - OPT_BOOLEAN('f', "force", &record.force, + OPT_BOOLEAN('f', "force", &force, "overwrite existing data file (deprecated)"), - OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"), - OPT_STRING('o', "output", &record.output_name, "file", + OPT_U64('c', "count", &user_interval, "event period to sample"), + OPT_STRING('o', "output", &output_name, "file", "output file name"), - OPT_BOOLEAN('i', "no-inherit", &record.opts.no_inherit, + OPT_BOOLEAN('i', "no-inherit", &no_inherit, "child tasks do not inherit counters"), - OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"), - OPT_UINTEGER('m', "mmap-pages", &record.opts.mmap_pages, - "number of mmap data pages"), - OPT_BOOLEAN(0, "group", &record.opts.group, + OPT_UINTEGER('F', "freq", &user_freq, "profile at this frequency"), + OPT_UINTEGER('m', "mmap-pages", &mmap_pages, "number of mmap data pages"), + OPT_BOOLEAN(0, "group", &group, "put the counters into a counter group"), - OPT_BOOLEAN('g', "call-graph", &record.opts.call_graph, + OPT_BOOLEAN('g', "call-graph", &call_graph, "do call-graph (stack chain/backtrace) recording"), OPT_INCR('v', "verbose", &verbose, "be more verbose (show counter open errors, etc)"), OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"), - OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat, + OPT_BOOLEAN('s', "stat", &inherit_stat, "per thread counts"), - OPT_BOOLEAN('d', "data", &record.opts.sample_address, + OPT_BOOLEAN('d', "data", &sample_address, "Sample addresses"), - OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"), - OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"), - OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples, + OPT_BOOLEAN('T', "timestamp", &sample_time, "Sample timestamps"), + OPT_BOOLEAN('n', "no-samples", &no_samples, "don't sample"), - OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache, + OPT_BOOLEAN('N', "no-buildid-cache", &no_buildid_cache, "do not update the buildid cache"), - OPT_BOOLEAN('B', "no-buildid", &record.no_buildid, + OPT_BOOLEAN('B', "no-buildid", &no_buildid, "do not collect buildids in perf.data"), - OPT_CALLBACK('G', "cgroup", &record.evlist, "name", + OPT_CALLBACK('G', "cgroup", &evsel_list, "name", "monitor event in cgroup name only", parse_cgroups), OPT_END() @@ -734,8 +819,6 @@ int cmd_record(int argc, const char **argv, const char *prefix __used) { int err = -ENOMEM; struct perf_evsel *pos; - struct perf_evlist *evsel_list; - struct perf_record *rec = &record; perf_header__set_cmdline(argc, argv); @@ -743,25 +826,23 @@ int cmd_record(int argc, const char **argv, const char *prefix __used) if (evsel_list == NULL) return -ENOMEM; - rec->evlist = evsel_list; - argc = parse_options(argc, argv, record_options, record_usage, PARSE_OPT_STOP_AT_NON_OPTION); - if (!argc && rec->opts.target_pid == -1 && rec->opts.target_tid == -1 && - !rec->opts.system_wide && !rec->opts.cpu_list) + if (!argc && target_pid == -1 && target_tid == -1 && + !system_wide && !cpu_list) usage_with_options(record_usage, record_options); - if (rec->force && rec->append_file) { + if (force && append_file) { fprintf(stderr, "Can't overwrite and append at the same time." " You need to choose between -f and -A"); usage_with_options(record_usage, record_options); - } else if (rec->append_file) { - rec->write_mode = WRITE_APPEND; + } else if (append_file) { + write_mode = WRITE_APPEND; } else { - rec->write_mode = WRITE_FORCE; + write_mode = WRITE_FORCE; } - if (nr_cgroups && !rec->opts.system_wide) { + if (nr_cgroups && !system_wide) { fprintf(stderr, "cgroup monitoring only available in" " system-wide mode\n"); usage_with_options(record_usage, record_options); @@ -779,7 +860,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __used) "If some relocation was applied (e.g. kexec) symbols may be misresolved\n" "even with a suitable vmlinux or kallsyms file.\n\n"); - if (rec->no_buildid_cache || rec->no_buildid) + if (no_buildid_cache || no_buildid) disable_buildid_cache(); if (evsel_list->nr_entries == 0 && @@ -788,37 +869,43 @@ int cmd_record(int argc, const char **argv, const char *prefix __used) goto out_symbol_exit; } - if (rec->opts.target_pid != -1) - rec->opts.target_tid = rec->opts.target_pid; + if (target_pid != -1) + target_tid = target_pid; - if (perf_evlist__create_maps(evsel_list, rec->opts.target_pid, - rec->opts.target_tid, rec->opts.cpu_list) < 0) + if (perf_evlist__create_maps(evsel_list, target_pid, + target_tid, cpu_list) < 0) usage_with_options(record_usage, record_options); list_for_each_entry(pos, &evsel_list->entries, node) { + if (perf_evsel__alloc_fd(pos, evsel_list->cpus->nr, + evsel_list->threads->nr) < 0) + goto out_free_fd; if (perf_header__push_event(pos->attr.config, event_name(pos))) goto out_free_fd; } - if (rec->opts.user_interval != ULLONG_MAX) - rec->opts.default_interval = rec->opts.user_interval; - if (rec->opts.user_freq != UINT_MAX) - rec->opts.freq = rec->opts.user_freq; + if (perf_evlist__alloc_pollfd(evsel_list) < 0) + goto out_free_fd; + + if (user_interval != ULLONG_MAX) + default_interval = user_interval; + if (user_freq != UINT_MAX) + freq = user_freq; /* * User specified count overrides default frequency. */ - if (rec->opts.default_interval) - rec->opts.freq = 0; - else if (rec->opts.freq) { - rec->opts.default_interval = rec->opts.freq; + if (default_interval) + freq = 0; + else if (freq) { + default_interval = freq; } else { fprintf(stderr, "frequency and count are zero, aborting\n"); err = -EINVAL; goto out_free_fd; } - err = __cmd_record(&record, argc, argv); + err = __cmd_record(argc, argv); out_free_fd: perf_evlist__delete_maps(evsel_list); out_symbol_exit: diff --git a/trunk/tools/perf/builtin-report.c b/trunk/tools/perf/builtin-report.c index 25d34d483e49..4d7c8340c326 100644 --- a/trunk/tools/perf/builtin-report.c +++ b/trunk/tools/perf/builtin-report.c @@ -25,7 +25,6 @@ #include "util/evsel.h" #include "util/header.h" #include "util/session.h" -#include "util/tool.h" #include "util/parse-options.h" #include "util/parse-events.h" @@ -36,35 +35,38 @@ #include -struct perf_report { - struct perf_tool tool; - struct perf_session *session; - char const *input_name; - bool force, use_tui, use_stdio; - bool hide_unresolved; - bool dont_use_callchains; - bool show_full_info; - bool show_threads; - bool inverted_callchain; - struct perf_read_values show_threads_values; - const char *pretty_printing_style; - symbol_filter_t annotate_init; - const char *cpu_list; - DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); -}; +static char const *input_name = "perf.data"; + +static bool force, use_tui, use_stdio; +static bool hide_unresolved; +static bool dont_use_callchains; +static bool show_full_info; + +static bool show_threads; +static struct perf_read_values show_threads_values; + +static const char default_pretty_printing_style[] = "normal"; +static const char *pretty_printing_style = default_pretty_printing_style; + +static char callchain_default_opt[] = "fractal,0.5,callee"; +static bool inverted_callchain; +static symbol_filter_t annotate_init; + +static const char *cpu_list; +static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); -static int perf_evsel__add_hist_entry(struct perf_evsel *evsel, - struct addr_location *al, - struct perf_sample *sample, - struct machine *machine) +static int perf_session__add_hist_entry(struct perf_session *session, + struct addr_location *al, + struct perf_sample *sample, + struct perf_evsel *evsel) { struct symbol *parent = NULL; int err = 0; struct hist_entry *he; if ((sort__has_parent || symbol_conf.use_callchain) && sample->callchain) { - err = machine__resolve_callchain(machine, evsel, al->thread, - sample->callchain, &parent); + err = perf_session__resolve_callchain(session, al->thread, + sample->callchain, &parent); if (err) return err; } @@ -74,8 +76,7 @@ static int perf_evsel__add_hist_entry(struct perf_evsel *evsel, return -ENOMEM; if (symbol_conf.use_callchain) { - err = callchain_append(he->callchain, - &evsel->hists.callchain_cursor, + err = callchain_append(he->callchain, &session->callchain_cursor, sample->period); if (err) return err; @@ -91,7 +92,8 @@ static int perf_evsel__add_hist_entry(struct perf_evsel *evsel, assert(evsel != NULL); err = -ENOMEM; - if (notes->src == NULL && symbol__alloc_hist(he->ms.sym) < 0) + if (notes->src == NULL && + symbol__alloc_hist(he->ms.sym, session->evlist->nr_entries) < 0) goto out; err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr); @@ -104,32 +106,30 @@ static int perf_evsel__add_hist_entry(struct perf_evsel *evsel, } -static int process_sample_event(struct perf_tool *tool, - union perf_event *event, +static int process_sample_event(union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, - struct machine *machine) + struct perf_session *session) { - struct perf_report *rep = container_of(tool, struct perf_report, tool); struct addr_location al; - if (perf_event__preprocess_sample(event, machine, &al, sample, - rep->annotate_init) < 0) { + if (perf_event__preprocess_sample(event, session, &al, sample, + annotate_init) < 0) { fprintf(stderr, "problem processing %d event, skipping it.\n", event->header.type); return -1; } - if (al.filtered || (rep->hide_unresolved && al.sym == NULL)) + if (al.filtered || (hide_unresolved && al.sym == NULL)) return 0; - if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap)) + if (cpu_list && !test_bit(sample->cpu, cpu_bitmap)) return 0; if (al.map != NULL) al.map->dso->hit = 1; - if (perf_evsel__add_hist_entry(evsel, &al, sample, machine)) { + if (perf_session__add_hist_entry(session, &al, sample, evsel)) { pr_debug("problem incrementing symbol period, skipping event\n"); return -1; } @@ -137,17 +137,15 @@ static int process_sample_event(struct perf_tool *tool, return 0; } -static int process_read_event(struct perf_tool *tool, - union perf_event *event, +static int process_read_event(union perf_event *event, struct perf_sample *sample __used, - struct perf_evsel *evsel, - struct machine *machine __used) + struct perf_session *session) { - struct perf_report *rep = container_of(tool, struct perf_report, tool); - - if (rep->show_threads) { + struct perf_evsel *evsel = perf_evlist__id2evsel(session->evlist, + event->read.id); + if (show_threads) { const char *name = evsel ? event_name(evsel) : "unknown"; - perf_read_values_add_value(&rep->show_threads_values, + perf_read_values_add_value(&show_threads_values, event->read.pid, event->read.tid, event->read.id, name, @@ -161,10 +159,8 @@ static int process_read_event(struct perf_tool *tool, return 0; } -static int perf_report__setup_sample_type(struct perf_report *rep) +static int perf_session__setup_sample_type(struct perf_session *self) { - struct perf_session *self = rep->session; - if (!(self->sample_type & PERF_SAMPLE_CALLCHAIN)) { if (sort__has_parent) { ui__warning("Selected --sort parent, but no " @@ -177,8 +173,7 @@ static int perf_report__setup_sample_type(struct perf_report *rep) "you call 'perf record' without -g?\n"); return -1; } - } else if (!rep->dont_use_callchains && - callchain_param.mode != CHAIN_NONE && + } else if (!dont_use_callchains && callchain_param.mode != CHAIN_NONE && !symbol_conf.use_callchain) { symbol_conf.use_callchain = true; if (callchain_register_param(&callchain_param) < 0) { @@ -191,6 +186,22 @@ static int perf_report__setup_sample_type(struct perf_report *rep) return 0; } +static struct perf_event_ops event_ops = { + .sample = process_sample_event, + .mmap = perf_event__process_mmap, + .comm = perf_event__process_comm, + .exit = perf_event__process_task, + .fork = perf_event__process_task, + .lost = perf_event__process_lost, + .read = process_read_event, + .attr = perf_event__process_attr, + .event_type = perf_event__process_event_type, + .tracing_data = perf_event__process_tracing_data, + .build_id = perf_event__process_build_id, + .ordered_samples = true, + .ordering_requires_timestamps = true, +}; + extern volatile int session_done; static void sig_handler(int sig __used) @@ -213,7 +224,6 @@ static size_t hists__fprintf_nr_sample_events(struct hists *self, } static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist, - struct perf_report *rep, const char *help) { struct perf_evsel *pos; @@ -231,18 +241,18 @@ static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist, parent_pattern == default_parent_pattern) { fprintf(stdout, "#\n# (%s)\n#\n", help); - if (rep->show_threads) { - bool style = !strcmp(rep->pretty_printing_style, "raw"); - perf_read_values_display(stdout, &rep->show_threads_values, + if (show_threads) { + bool style = !strcmp(pretty_printing_style, "raw"); + perf_read_values_display(stdout, &show_threads_values, style); - perf_read_values_destroy(&rep->show_threads_values); + perf_read_values_destroy(&show_threads_values); } } return 0; } -static int __cmd_report(struct perf_report *rep) +static int __cmd_report(void) { int ret = -EINVAL; u64 nr_samples; @@ -254,31 +264,27 @@ static int __cmd_report(struct perf_report *rep) signal(SIGINT, sig_handler); - session = perf_session__new(rep->input_name, O_RDONLY, - rep->force, false, &rep->tool); + session = perf_session__new(input_name, O_RDONLY, force, false, &event_ops); if (session == NULL) return -ENOMEM; - rep->session = session; - - if (rep->cpu_list) { - ret = perf_session__cpu_bitmap(session, rep->cpu_list, - rep->cpu_bitmap); + if (cpu_list) { + ret = perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap); if (ret) goto out_delete; } if (use_browser <= 0) - perf_session__fprintf_info(session, stdout, rep->show_full_info); + perf_session__fprintf_info(session, stdout, show_full_info); - if (rep->show_threads) - perf_read_values_init(&rep->show_threads_values); + if (show_threads) + perf_read_values_init(&show_threads_values); - ret = perf_report__setup_sample_type(rep); + ret = perf_session__setup_sample_type(session); if (ret) goto out_delete; - ret = perf_session__process_events(session, &rep->tool); + ret = perf_session__process_events(session, &event_ops); if (ret) goto out_delete; @@ -321,7 +327,7 @@ static int __cmd_report(struct perf_report *rep) } if (nr_samples == 0) { - ui__warning("The %s file has no samples!\n", session->filename); + ui__warning("The %s file has no samples!\n", input_name); goto out_delete; } @@ -329,7 +335,7 @@ static int __cmd_report(struct perf_report *rep) perf_evlist__tui_browse_hists(session->evlist, help, NULL, NULL, 0); } else - perf_evlist__tty_browse_hists(session->evlist, rep, help); + perf_evlist__tty_browse_hists(session->evlist, help); out_delete: /* @@ -348,9 +354,9 @@ static int __cmd_report(struct perf_report *rep) } static int -parse_callchain_opt(const struct option *opt, const char *arg, int unset) +parse_callchain_opt(const struct option *opt __used, const char *arg, + int unset) { - struct perf_report *rep = (struct perf_report *)opt->value; char *tok, *tok2; char *endptr; @@ -358,7 +364,7 @@ parse_callchain_opt(const struct option *opt, const char *arg, int unset) * --no-call-graph */ if (unset) { - rep->dont_use_callchains = true; + dont_use_callchains = true; return 0; } @@ -406,7 +412,7 @@ parse_callchain_opt(const struct option *opt, const char *arg, int unset) goto setup; if (tok2[0] != 'c') { - callchain_param.print_limit = strtoul(tok2, &endptr, 0); + callchain_param.print_limit = strtod(tok2, &endptr); tok2 = strtok(NULL, ","); if (!tok2) goto setup; @@ -427,34 +433,13 @@ parse_callchain_opt(const struct option *opt, const char *arg, int unset) return 0; } -int cmd_report(int argc, const char **argv, const char *prefix __used) -{ - struct stat st; - char callchain_default_opt[] = "fractal,0.5,callee"; - const char * const report_usage[] = { - "perf report []", - NULL - }; - struct perf_report report = { - .tool = { - .sample = process_sample_event, - .mmap = perf_event__process_mmap, - .comm = perf_event__process_comm, - .exit = perf_event__process_task, - .fork = perf_event__process_task, - .lost = perf_event__process_lost, - .read = process_read_event, - .attr = perf_event__process_attr, - .event_type = perf_event__process_event_type, - .tracing_data = perf_event__process_tracing_data, - .build_id = perf_event__process_build_id, - .ordered_samples = true, - .ordering_requires_timestamps = true, - }, - .pretty_printing_style = "normal", - }; - const struct option options[] = { - OPT_STRING('i', "input", &report.input_name, "file", +static const char * const report_usage[] = { + "perf report [] ", + NULL +}; + +static const struct option options[] = { + OPT_STRING('i', "input", &input_name, "file", "input file name"), OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"), @@ -464,18 +449,17 @@ int cmd_report(int argc, const char **argv, const char *prefix __used) "file", "vmlinux pathname"), OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, "file", "kallsyms pathname"), - OPT_BOOLEAN('f', "force", &report.force, "don't complain, do it"), + OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules, "load module symbols - WARNING: use only with -k and LIVE kernel"), OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples, "Show a column with the number of samples"), - OPT_BOOLEAN('T', "threads", &report.show_threads, + OPT_BOOLEAN('T', "threads", &show_threads, "Show per-thread event counters"), - OPT_STRING(0, "pretty", &report.pretty_printing_style, "key", + OPT_STRING(0, "pretty", &pretty_printing_style, "key", "pretty printing style key: normal raw"), - OPT_BOOLEAN(0, "tui", &report.use_tui, "Use the TUI interface"), - OPT_BOOLEAN(0, "stdio", &report.use_stdio, - "Use the stdio interface"), + OPT_BOOLEAN(0, "tui", &use_tui, "Use the TUI interface"), + OPT_BOOLEAN(0, "stdio", &use_stdio, "Use the stdio interface"), OPT_STRING('s', "sort", &sort_order, "key[,key2...]", "sort by key(s): pid, comm, dso, symbol, parent"), OPT_BOOLEAN(0, "showcpuutilization", &symbol_conf.show_cpu_utilization, @@ -484,14 +468,13 @@ int cmd_report(int argc, const char **argv, const char *prefix __used) "regex filter to identify parent, see: '--sort parent'"), OPT_BOOLEAN('x', "exclude-other", &symbol_conf.exclude_other, "Only display entries with parent-match"), - OPT_CALLBACK_DEFAULT('g', "call-graph", &report, "output_type,min_percent[,print_limit],call_order", - "Display callchains using output_type (graph, flat, fractal, or none) , min percent threshold, optional print limit and callchain order. " + OPT_CALLBACK_DEFAULT('g', "call-graph", NULL, "output_type,min_percent, call_order", + "Display callchains using output_type (graph, flat, fractal, or none) , min percent threshold and callchain order. " "Default: fractal,0.5,callee", &parse_callchain_opt, callchain_default_opt), - OPT_BOOLEAN('G', "inverted", &report.inverted_callchain, - "alias for inverted call graph"), + OPT_BOOLEAN('G', "inverted", &inverted_callchain, "alias for inverted call graph"), OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]", "only consider symbols in these dsos"), - OPT_STRING('c', "comms", &symbol_conf.comm_list_str, "comm[,comm...]", + OPT_STRING('C', "comms", &symbol_conf.comm_list_str, "comm[,comm...]", "only consider symbols in these comms"), OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]", "only consider these symbols"), @@ -501,13 +484,12 @@ int cmd_report(int argc, const char **argv, const char *prefix __used) OPT_STRING('t', "field-separator", &symbol_conf.field_sep, "separator", "separator for columns, no spaces will be added between " "columns '.' is reserved."), - OPT_BOOLEAN('U', "hide-unresolved", &report.hide_unresolved, + OPT_BOOLEAN('U', "hide-unresolved", &hide_unresolved, "Only display entries resolved to a symbol"), OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", "Look for files with symbols relative to this directory"), - OPT_STRING('C', "cpu", &report.cpu_list, "cpu", - "list of cpus to profile"), - OPT_BOOLEAN('I', "show-info", &report.show_full_info, + OPT_STRING('c', "cpu", &cpu_list, "cpu", "list of cpus to profile"), + OPT_BOOLEAN('I', "show-info", &show_full_info, "Display extended information about perf.data file"), OPT_BOOLEAN(0, "source", &symbol_conf.annotate_src, "Interleave source code with assembly code (default)"), @@ -518,30 +500,24 @@ int cmd_report(int argc, const char **argv, const char *prefix __used) OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period, "Show a column with the sum of periods"), OPT_END() - }; +}; +int cmd_report(int argc, const char **argv, const char *prefix __used) +{ argc = parse_options(argc, argv, options, report_usage, 0); - if (report.use_stdio) + if (use_stdio) use_browser = 0; - else if (report.use_tui) + else if (use_tui) use_browser = 1; - if (report.inverted_callchain) + if (inverted_callchain) callchain_param.order = ORDER_CALLER; - if (!report.input_name || !strlen(report.input_name)) { - if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode)) - report.input_name = "-"; - else - report.input_name = "perf.data"; - } - - if (strcmp(report.input_name, "-") != 0) + if (strcmp(input_name, "-") != 0) setup_browser(true); else use_browser = 0; - /* * Only in the newt browser we are doing integrated annotation, * so don't allocate extra space that won't be used in the stdio @@ -549,7 +525,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __used) */ if (use_browser > 0) { symbol_conf.priv_size = sizeof(struct annotation); - report.annotate_init = symbol__annotate_init; + annotate_init = symbol__annotate_init; /* * For searching by name on the "Browse map details". * providing it only in verbose mode not to bloat too @@ -596,5 +572,5 @@ int cmd_report(int argc, const char **argv, const char *prefix __used) sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list, "comm", stdout); sort_entry__setup_elide(&sort_sym, symbol_conf.sym_list, "symbol", stdout); - return __cmd_report(&report); + return __cmd_report(); } diff --git a/trunk/tools/perf/builtin-sched.c b/trunk/tools/perf/builtin-sched.c index fb8b5f83b4a0..5177964943e7 100644 --- a/trunk/tools/perf/builtin-sched.c +++ b/trunk/tools/perf/builtin-sched.c @@ -2,14 +2,11 @@ #include "perf.h" #include "util/util.h" -#include "util/evlist.h" #include "util/cache.h" -#include "util/evsel.h" #include "util/symbol.h" #include "util/thread.h" #include "util/header.h" #include "util/session.h" -#include "util/tool.h" #include "util/parse-options.h" #include "util/trace-event.h" @@ -22,7 +19,7 @@ #include #include -static const char *input_name; +static char const *input_name = "perf.data"; static char default_sort_order[] = "avg, max, switch, runtime"; static const char *sort_order = default_sort_order; @@ -726,21 +723,21 @@ struct trace_migrate_task_event { struct trace_sched_handler { void (*switch_event)(struct trace_switch_event *, - struct machine *, + struct perf_session *, struct event *, int cpu, u64 timestamp, struct thread *thread); void (*runtime_event)(struct trace_runtime_event *, - struct machine *, + struct perf_session *, struct event *, int cpu, u64 timestamp, struct thread *thread); void (*wakeup_event)(struct trace_wakeup_event *, - struct machine *, + struct perf_session *, struct event *, int cpu, u64 timestamp, @@ -753,7 +750,7 @@ struct trace_sched_handler { struct thread *thread); void (*migrate_task_event)(struct trace_migrate_task_event *, - struct machine *machine, + struct perf_session *session, struct event *, int cpu, u64 timestamp, @@ -763,7 +760,7 @@ struct trace_sched_handler { static void replay_wakeup_event(struct trace_wakeup_event *wakeup_event, - struct machine *machine __used, + struct perf_session *session __used, struct event *event, int cpu __used, u64 timestamp __used, @@ -790,7 +787,7 @@ static u64 cpu_last_switched[MAX_CPUS]; static void replay_switch_event(struct trace_switch_event *switch_event, - struct machine *machine __used, + struct perf_session *session __used, struct event *event, int cpu, u64 timestamp, @@ -1024,7 +1021,7 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp) static void latency_switch_event(struct trace_switch_event *switch_event, - struct machine *machine, + struct perf_session *session, struct event *event __used, int cpu, u64 timestamp, @@ -1048,8 +1045,8 @@ latency_switch_event(struct trace_switch_event *switch_event, die("hm, delta: %" PRIu64 " < 0 ?\n", delta); - sched_out = machine__findnew_thread(machine, switch_event->prev_pid); - sched_in = machine__findnew_thread(machine, switch_event->next_pid); + sched_out = perf_session__findnew(session, switch_event->prev_pid); + sched_in = perf_session__findnew(session, switch_event->next_pid); out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid); if (!out_events) { @@ -1077,13 +1074,13 @@ latency_switch_event(struct trace_switch_event *switch_event, static void latency_runtime_event(struct trace_runtime_event *runtime_event, - struct machine *machine, + struct perf_session *session, struct event *event __used, int cpu, u64 timestamp, struct thread *this_thread __used) { - struct thread *thread = machine__findnew_thread(machine, runtime_event->pid); + struct thread *thread = perf_session__findnew(session, runtime_event->pid); struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); BUG_ON(cpu >= MAX_CPUS || cpu < 0); @@ -1100,7 +1097,7 @@ latency_runtime_event(struct trace_runtime_event *runtime_event, static void latency_wakeup_event(struct trace_wakeup_event *wakeup_event, - struct machine *machine, + struct perf_session *session, struct event *__event __used, int cpu __used, u64 timestamp, @@ -1114,7 +1111,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, if (!wakeup_event->success) return; - wakee = machine__findnew_thread(machine, wakeup_event->pid); + wakee = perf_session__findnew(session, wakeup_event->pid); atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid); if (!atoms) { thread_atoms_insert(wakee); @@ -1148,7 +1145,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, static void latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, - struct machine *machine, + struct perf_session *session, struct event *__event __used, int cpu __used, u64 timestamp, @@ -1164,7 +1161,7 @@ latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, if (profile_cpu == -1) return; - migrant = machine__findnew_thread(machine, migrate_task_event->pid); + migrant = perf_session__findnew(session, migrate_task_event->pid); atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid); if (!atoms) { thread_atoms_insert(migrant); @@ -1359,13 +1356,12 @@ static void sort_lat(void) static struct trace_sched_handler *trace_handler; static void -process_sched_wakeup_event(struct perf_tool *tool __used, +process_sched_wakeup_event(void *data, struct perf_session *session, struct event *event, - struct perf_sample *sample, - struct machine *machine, - struct thread *thread) + int cpu __used, + u64 timestamp __used, + struct thread *thread __used) { - void *data = sample->raw_data; struct trace_wakeup_event wakeup_event; FILL_COMMON_FIELDS(wakeup_event, event, data); @@ -1377,8 +1373,8 @@ process_sched_wakeup_event(struct perf_tool *tool __used, FILL_FIELD(wakeup_event, cpu, event, data); if (trace_handler->wakeup_event) - trace_handler->wakeup_event(&wakeup_event, machine, event, - sample->cpu, sample->time, thread); + trace_handler->wakeup_event(&wakeup_event, session, event, + cpu, timestamp, thread); } /* @@ -1396,7 +1392,7 @@ static char next_shortname2 = '0'; static void map_switch_event(struct trace_switch_event *switch_event, - struct machine *machine, + struct perf_session *session, struct event *event __used, int this_cpu, u64 timestamp, @@ -1424,8 +1420,8 @@ map_switch_event(struct trace_switch_event *switch_event, die("hm, delta: %" PRIu64 " < 0 ?\n", delta); - sched_out = machine__findnew_thread(machine, switch_event->prev_pid); - sched_in = machine__findnew_thread(machine, switch_event->next_pid); + sched_out = perf_session__findnew(session, switch_event->prev_pid); + sched_in = perf_session__findnew(session, switch_event->next_pid); curr_thread[this_cpu] = sched_in; @@ -1473,15 +1469,14 @@ map_switch_event(struct trace_switch_event *switch_event, } } + static void -process_sched_switch_event(struct perf_tool *tool __used, +process_sched_switch_event(void *data, struct perf_session *session, struct event *event, - struct perf_sample *sample, - struct machine *machine, - struct thread *thread) + int this_cpu, + u64 timestamp __used, + struct thread *thread __used) { - int this_cpu = sample->cpu; - void *data = sample->raw_data; struct trace_switch_event switch_event; FILL_COMMON_FIELDS(switch_event, event, data); @@ -1503,20 +1498,19 @@ process_sched_switch_event(struct perf_tool *tool __used, nr_context_switch_bugs++; } if (trace_handler->switch_event) - trace_handler->switch_event(&switch_event, machine, event, - this_cpu, sample->time, thread); + trace_handler->switch_event(&switch_event, session, event, + this_cpu, timestamp, thread); curr_pid[this_cpu] = switch_event.next_pid; } static void -process_sched_runtime_event(struct perf_tool *tool __used, - struct event *event, - struct perf_sample *sample, - struct machine *machine, - struct thread *thread) +process_sched_runtime_event(void *data, struct perf_session *session, + struct event *event, + int cpu __used, + u64 timestamp __used, + struct thread *thread __used) { - void *data = sample->raw_data; struct trace_runtime_event runtime_event; FILL_ARRAY(runtime_event, comm, event, data); @@ -1525,18 +1519,16 @@ process_sched_runtime_event(struct perf_tool *tool __used, FILL_FIELD(runtime_event, vruntime, event, data); if (trace_handler->runtime_event) - trace_handler->runtime_event(&runtime_event, machine, event, - sample->cpu, sample->time, thread); + trace_handler->runtime_event(&runtime_event, session, event, cpu, timestamp, thread); } static void -process_sched_fork_event(struct perf_tool *tool __used, +process_sched_fork_event(void *data, struct event *event, - struct perf_sample *sample, - struct machine *machine __used, - struct thread *thread) + int cpu __used, + u64 timestamp __used, + struct thread *thread __used) { - void *data = sample->raw_data; struct trace_fork_event fork_event; FILL_COMMON_FIELDS(fork_event, event, data); @@ -1548,14 +1540,13 @@ process_sched_fork_event(struct perf_tool *tool __used, if (trace_handler->fork_event) trace_handler->fork_event(&fork_event, event, - sample->cpu, sample->time, thread); + cpu, timestamp, thread); } static void -process_sched_exit_event(struct perf_tool *tool __used, - struct event *event, - struct perf_sample *sample __used, - struct machine *machine __used, +process_sched_exit_event(struct event *event, + int cpu __used, + u64 timestamp __used, struct thread *thread __used) { if (verbose) @@ -1563,13 +1554,12 @@ process_sched_exit_event(struct perf_tool *tool __used, } static void -process_sched_migrate_task_event(struct perf_tool *tool __used, - struct event *event, - struct perf_sample *sample, - struct machine *machine, - struct thread *thread) +process_sched_migrate_task_event(void *data, struct perf_session *session, + struct event *event, + int cpu __used, + u64 timestamp __used, + struct thread *thread __used) { - void *data = sample->raw_data; struct trace_migrate_task_event migrate_task_event; FILL_COMMON_FIELDS(migrate_task_event, event, data); @@ -1580,47 +1570,67 @@ process_sched_migrate_task_event(struct perf_tool *tool __used, FILL_FIELD(migrate_task_event, cpu, event, data); if (trace_handler->migrate_task_event) - trace_handler->migrate_task_event(&migrate_task_event, machine, - event, sample->cpu, - sample->time, thread); + trace_handler->migrate_task_event(&migrate_task_event, session, + event, cpu, timestamp, thread); } -typedef void (*tracepoint_handler)(struct perf_tool *tool, struct event *event, - struct perf_sample *sample, - struct machine *machine, - struct thread *thread); +static void process_raw_event(union perf_event *raw_event __used, + struct perf_session *session, void *data, int cpu, + u64 timestamp, struct thread *thread) +{ + struct event *event; + int type; + + + type = trace_parse_common_type(data); + event = trace_find_event(type); + + if (!strcmp(event->name, "sched_switch")) + process_sched_switch_event(data, session, event, cpu, timestamp, thread); + if (!strcmp(event->name, "sched_stat_runtime")) + process_sched_runtime_event(data, session, event, cpu, timestamp, thread); + if (!strcmp(event->name, "sched_wakeup")) + process_sched_wakeup_event(data, session, event, cpu, timestamp, thread); + if (!strcmp(event->name, "sched_wakeup_new")) + process_sched_wakeup_event(data, session, event, cpu, timestamp, thread); + if (!strcmp(event->name, "sched_process_fork")) + process_sched_fork_event(data, event, cpu, timestamp, thread); + if (!strcmp(event->name, "sched_process_exit")) + process_sched_exit_event(event, cpu, timestamp, thread); + if (!strcmp(event->name, "sched_migrate_task")) + process_sched_migrate_task_event(data, session, event, cpu, timestamp, thread); +} -static int perf_sched__process_tracepoint_sample(struct perf_tool *tool, - union perf_event *event __used, - struct perf_sample *sample, - struct perf_evsel *evsel, - struct machine *machine) +static int process_sample_event(union perf_event *event, + struct perf_sample *sample, + struct perf_evsel *evsel __used, + struct perf_session *session) { - struct thread *thread = machine__findnew_thread(machine, sample->pid); + struct thread *thread; + + if (!(session->sample_type & PERF_SAMPLE_RAW)) + return 0; + thread = perf_session__findnew(session, sample->pid); if (thread == NULL) { - pr_debug("problem processing %s event, skipping it.\n", - evsel->name); + pr_debug("problem processing %d event, skipping it.\n", + event->header.type); return -1; } - evsel->hists.stats.total_period += sample->period; - hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE); + dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); - if (evsel->handler.func != NULL) { - tracepoint_handler f = evsel->handler.func; + if (profile_cpu != -1 && profile_cpu != (int)sample->cpu) + return 0; - if (evsel->handler.data == NULL) - evsel->handler.data = trace_find_event(evsel->attr.config); - - f(tool, evsel->handler.data, sample, machine, thread); - } + process_raw_event(event, session, sample->raw_data, sample->cpu, + sample->time, thread); return 0; } -static struct perf_tool perf_sched = { - .sample = perf_sched__process_tracepoint_sample, +static struct perf_event_ops event_ops = { + .sample = process_sample_event, .comm = perf_event__process_comm, .lost = perf_event__process_lost, .fork = perf_event__process_task, @@ -1630,25 +1640,13 @@ static struct perf_tool perf_sched = { static void read_events(bool destroy, struct perf_session **psession) { int err = -EINVAL; - const struct perf_evsel_str_handler handlers[] = { - { "sched:sched_switch", process_sched_switch_event, }, - { "sched:sched_stat_runtime", process_sched_runtime_event, }, - { "sched:sched_wakeup", process_sched_wakeup_event, }, - { "sched:sched_wakeup_new", process_sched_wakeup_event, }, - { "sched:sched_process_fork", process_sched_fork_event, }, - { "sched:sched_process_exit", process_sched_exit_event, }, - { "sched:sched_migrate_task", process_sched_migrate_task_event, }, - }; struct perf_session *session = perf_session__new(input_name, O_RDONLY, - 0, false, &perf_sched); + 0, false, &event_ops); if (session == NULL) die("No Memory"); - err = perf_evlist__set_tracepoints_handlers_array(session->evlist, handlers); - assert(err == 0); - if (perf_session__has_traces(session, "record -R")) { - err = perf_session__process_events(session, &perf_sched); + err = perf_session__process_events(session, &event_ops); if (err) die("Failed to process events, error %d", err); diff --git a/trunk/tools/perf/builtin-script.c b/trunk/tools/perf/builtin-script.c index fd1909afcfd6..2f62a2952269 100644 --- a/trunk/tools/perf/builtin-script.c +++ b/trunk/tools/perf/builtin-script.c @@ -7,7 +7,6 @@ #include "util/header.h" #include "util/parse-options.h" #include "util/session.h" -#include "util/tool.h" #include "util/symbol.h" #include "util/thread.h" #include "util/trace-event.h" @@ -24,7 +23,6 @@ static u64 nr_unordered; extern const struct option record_options[]; static bool no_callchain; static bool show_full_info; -static bool system_wide; static const char *cpu_list; static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); @@ -317,7 +315,7 @@ static bool sample_addr_correlates_sym(struct perf_event_attr *attr) static void print_sample_addr(union perf_event *event, struct perf_sample *sample, - struct machine *machine, + struct perf_session *session, struct thread *thread, struct perf_event_attr *attr) { @@ -330,11 +328,11 @@ static void print_sample_addr(union perf_event *event, if (!sample_addr_correlates_sym(attr)) return; - thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION, - sample->addr, &al); + thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION, + event->ip.pid, sample->addr, &al); if (!al.map) - thread__find_addr_map(thread, machine, cpumode, MAP__VARIABLE, - sample->addr, &al); + thread__find_addr_map(thread, session, cpumode, MAP__VARIABLE, + event->ip.pid, sample->addr, &al); al.cpu = sample->cpu; al.sym = NULL; @@ -364,7 +362,7 @@ static void print_sample_addr(union perf_event *event, static void process_event(union perf_event *event __unused, struct perf_sample *sample, struct perf_evsel *evsel, - struct machine *machine, + struct perf_session *session, struct thread *thread) { struct perf_event_attr *attr = &evsel->attr; @@ -379,15 +377,15 @@ static void process_event(union perf_event *event __unused, sample->raw_size); if (PRINT_FIELD(ADDR)) - print_sample_addr(event, sample, machine, thread, attr); + print_sample_addr(event, sample, session, thread, attr); if (PRINT_FIELD(IP)) { if (!symbol_conf.use_callchain) printf(" "); else printf("\n"); - perf_event__print_ip(event, sample, machine, evsel, - PRINT_FIELD(SYM), PRINT_FIELD(DSO)); + perf_session__print_ip(event, sample, session, + PRINT_FIELD(SYM), PRINT_FIELD(DSO)); } printf("\n"); @@ -434,16 +432,14 @@ static int cleanup_scripting(void) return scripting_ops->stop_script(); } -static const char *input_name; +static char const *input_name = "perf.data"; -static int process_sample_event(struct perf_tool *tool __used, - union perf_event *event, +static int process_sample_event(union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, - struct machine *machine) + struct perf_session *session) { - struct addr_location al; - struct thread *thread = machine__findnew_thread(machine, event->ip.tid); + struct thread *thread = perf_session__findnew(session, event->ip.pid); if (thread == NULL) { pr_debug("problem processing %d event, skipping it.\n", @@ -462,25 +458,16 @@ static int process_sample_event(struct perf_tool *tool __used, return 0; } - if (perf_event__preprocess_sample(event, machine, &al, sample, 0) < 0) { - pr_err("problem processing %d event, skipping it.\n", - event->header.type); - return -1; - } - - if (al.filtered) - return 0; - if (cpu_list && !test_bit(sample->cpu, cpu_bitmap)) return 0; - scripting_ops->process_event(event, sample, evsel, machine, thread); + scripting_ops->process_event(event, sample, evsel, session, thread); - evsel->hists.stats.total_period += sample->period; + session->hists.stats.total_period += sample->period; return 0; } -static struct perf_tool perf_script = { +static struct perf_event_ops event_ops = { .sample = process_sample_event, .mmap = perf_event__process_mmap, .comm = perf_event__process_comm, @@ -507,7 +494,7 @@ static int __cmd_script(struct perf_session *session) signal(SIGINT, sig_handler); - ret = perf_session__process_events(session, &perf_script); + ret = perf_session__process_events(session, &event_ops); if (debug_mode) pr_err("Misordered timestamps: %" PRIu64 "\n", nr_unordered); @@ -536,6 +523,12 @@ static struct script_spec *script_spec__new(const char *spec, return s; } +static void script_spec__delete(struct script_spec *s) +{ + free(s->spec); + free(s); +} + static void script_spec__add(struct script_spec *s) { list_add_tail(&s->node, &script_specs); @@ -561,11 +554,16 @@ static struct script_spec *script_spec__findnew(const char *spec, s = script_spec__new(spec, ops); if (!s) - return NULL; + goto out_delete_spec; script_spec__add(s); return s; + +out_delete_spec: + script_spec__delete(s); + + return NULL; } int script_spec_register(const char *spec, struct scripting_ops *ops) @@ -683,8 +681,7 @@ static int parse_output_fields(const struct option *opt __used, type = PERF_TYPE_RAW; else { fprintf(stderr, "Invalid event type in field string.\n"); - rc = -EINVAL; - goto out; + return -EINVAL; } if (output[type].user_set) @@ -926,24 +923,6 @@ static int read_script_info(struct script_desc *desc, const char *filename) return 0; } -static char *get_script_root(struct dirent *script_dirent, const char *suffix) -{ - char *script_root, *str; - - script_root = strdup(script_dirent->d_name); - if (!script_root) - return NULL; - - str = (char *)ends_with(script_root, suffix); - if (!str) { - free(script_root); - return NULL; - } - - *str = '\0'; - return script_root; -} - static int list_available_scripts(const struct option *opt __used, const char *s __used, int unset __used) { @@ -955,6 +934,7 @@ static int list_available_scripts(const struct option *opt __used, struct script_desc *desc; char first_half[BUFSIZ]; char *script_root; + char *str; snprintf(scripts_path, MAXPATHLEN, "%s/scripts", perf_exec_path()); @@ -970,14 +950,16 @@ static int list_available_scripts(const struct option *opt __used, continue; for_each_script(lang_path, lang_dir, script_dirent, script_next) { - script_root = get_script_root(&script_dirent, REPORT_SUFFIX); - if (script_root) { + script_root = strdup(script_dirent.d_name); + str = (char *)ends_with(script_root, REPORT_SUFFIX); + if (str) { + *str = '\0'; desc = script_desc__findnew(script_root); snprintf(script_path, MAXPATHLEN, "%s/%s", lang_path, script_dirent.d_name); read_script_info(desc, script_path); - free(script_root); } + free(script_root); } } @@ -999,7 +981,8 @@ static char *get_script_path(const char *script_root, const char *suffix) char script_path[MAXPATHLEN]; DIR *scripts_dir, *lang_dir; char lang_path[MAXPATHLEN]; - char *__script_root; + char *str, *__script_root; + char *path = NULL; snprintf(scripts_path, MAXPATHLEN, "%s/scripts", perf_exec_path()); @@ -1015,18 +998,23 @@ static char *get_script_path(const char *script_root, const char *suffix) continue; for_each_script(lang_path, lang_dir, script_dirent, script_next) { - __script_root = get_script_root(&script_dirent, suffix); - if (__script_root && !strcmp(script_root, __script_root)) { - free(__script_root); + __script_root = strdup(script_dirent.d_name); + str = (char *)ends_with(__script_root, suffix); + if (str) { + *str = '\0'; + if (strcmp(__script_root, script_root)) + continue; snprintf(script_path, MAXPATHLEN, "%s/%s", lang_path, script_dirent.d_name); - return strdup(script_path); + path = strdup(script_path); + free(__script_root); + break; } free(__script_root); } } - return NULL; + return path; } static bool is_top_script(const char *script_path) @@ -1095,11 +1083,7 @@ static const struct option options[] = { OPT_CALLBACK('f', "fields", NULL, "str", "comma separated output fields prepend with 'type:'. Valid types: hw,sw,trace,raw. Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,addr", parse_output_fields), - OPT_BOOLEAN('a', "all-cpus", &system_wide, - "system-wide collection from all CPUs"), - OPT_STRING('C', "cpu", &cpu_list, "cpu", "list of cpus to profile"), - OPT_STRING('c', "comms", &symbol_conf.comm_list_str, "comm[,comm...]", - "only display events for these comms"), + OPT_STRING('c', "cpu", &cpu_list, "cpu", "list of cpus to profile"), OPT_BOOLEAN('I', "show-info", &show_full_info, "display extended information from perf.data file"), OPT_END() @@ -1126,6 +1110,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __used) struct perf_session *session; char *script_path = NULL; const char **__argv; + bool system_wide; int i, j, err; setup_scripting(); @@ -1193,17 +1178,15 @@ int cmd_script(int argc, const char **argv, const char *prefix __used) } if (!pid) { + system_wide = true; j = 0; dup2(live_pipe[1], 1); close(live_pipe[0]); - if (is_top_script(argv[0])) { - system_wide = true; - } else if (!system_wide) { + if (!is_top_script(argv[0])) system_wide = !have_cmd(argc - rep_args, &argv[rep_args]); - } __argv = malloc((argc + 6) * sizeof(const char *)); if (!__argv) @@ -1251,11 +1234,10 @@ int cmd_script(int argc, const char **argv, const char *prefix __used) script_path = rep_script_path; if (script_path) { + system_wide = false; j = 0; - if (!rec_script_path) - system_wide = false; - else if (!system_wide) + if (rec_script_path) system_wide = !have_cmd(argc - 1, &argv[1]); __argv = malloc((argc + 2) * sizeof(const char *)); @@ -1279,7 +1261,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __used) if (!script_name) setup_pager(); - session = perf_session__new(input_name, O_RDONLY, 0, false, &perf_script); + session = perf_session__new(input_name, O_RDONLY, 0, false, &event_ops); if (session == NULL) return -ENOMEM; @@ -1305,7 +1287,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __used) return -1; } - input = open(session->filename, O_RDONLY); /* input_name */ + input = open(input_name, O_RDONLY); if (input < 0) { perror("failed to open file"); exit(-1); diff --git a/trunk/tools/perf/builtin-stat.c b/trunk/tools/perf/builtin-stat.c index f5d2a63eba66..955930e0a5c3 100644 --- a/trunk/tools/perf/builtin-stat.c +++ b/trunk/tools/perf/builtin-stat.c @@ -578,33 +578,6 @@ static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg) avg / avg_stats(&walltime_nsecs_stats)); } -/* used for get_ratio_color() */ -enum grc_type { - GRC_STALLED_CYCLES_FE, - GRC_STALLED_CYCLES_BE, - GRC_CACHE_MISSES, - GRC_MAX_NR -}; - -static const char *get_ratio_color(enum grc_type type, double ratio) -{ - static const double grc_table[GRC_MAX_NR][3] = { - [GRC_STALLED_CYCLES_FE] = { 50.0, 30.0, 10.0 }, - [GRC_STALLED_CYCLES_BE] = { 75.0, 50.0, 20.0 }, - [GRC_CACHE_MISSES] = { 20.0, 10.0, 5.0 }, - }; - const char *color = PERF_COLOR_NORMAL; - - if (ratio > grc_table[type][0]) - color = PERF_COLOR_RED; - else if (ratio > grc_table[type][1]) - color = PERF_COLOR_MAGENTA; - else if (ratio > grc_table[type][2]) - color = PERF_COLOR_YELLOW; - - return color; -} - static void print_stalled_cycles_frontend(int cpu, struct perf_evsel *evsel __used, double avg) { double total, ratio = 0.0; @@ -615,7 +588,13 @@ static void print_stalled_cycles_frontend(int cpu, struct perf_evsel *evsel __us if (total) ratio = avg / total * 100.0; - color = get_ratio_color(GRC_STALLED_CYCLES_FE, ratio); + color = PERF_COLOR_NORMAL; + if (ratio > 50.0) + color = PERF_COLOR_RED; + else if (ratio > 30.0) + color = PERF_COLOR_MAGENTA; + else if (ratio > 10.0) + color = PERF_COLOR_YELLOW; fprintf(output, " # "); color_fprintf(output, color, "%6.2f%%", ratio); @@ -632,7 +611,13 @@ static void print_stalled_cycles_backend(int cpu, struct perf_evsel *evsel __use if (total) ratio = avg / total * 100.0; - color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio); + color = PERF_COLOR_NORMAL; + if (ratio > 75.0) + color = PERF_COLOR_RED; + else if (ratio > 50.0) + color = PERF_COLOR_MAGENTA; + else if (ratio > 20.0) + color = PERF_COLOR_YELLOW; fprintf(output, " # "); color_fprintf(output, color, "%6.2f%%", ratio); @@ -649,7 +634,13 @@ static void print_branch_misses(int cpu, struct perf_evsel *evsel __used, double if (total) ratio = avg / total * 100.0; - color = get_ratio_color(GRC_CACHE_MISSES, ratio); + color = PERF_COLOR_NORMAL; + if (ratio > 20.0) + color = PERF_COLOR_RED; + else if (ratio > 10.0) + color = PERF_COLOR_MAGENTA; + else if (ratio > 5.0) + color = PERF_COLOR_YELLOW; fprintf(output, " # "); color_fprintf(output, color, "%6.2f%%", ratio); @@ -666,7 +657,13 @@ static void print_l1_dcache_misses(int cpu, struct perf_evsel *evsel __used, dou if (total) ratio = avg / total * 100.0; - color = get_ratio_color(GRC_CACHE_MISSES, ratio); + color = PERF_COLOR_NORMAL; + if (ratio > 20.0) + color = PERF_COLOR_RED; + else if (ratio > 10.0) + color = PERF_COLOR_MAGENTA; + else if (ratio > 5.0) + color = PERF_COLOR_YELLOW; fprintf(output, " # "); color_fprintf(output, color, "%6.2f%%", ratio); @@ -683,7 +680,13 @@ static void print_l1_icache_misses(int cpu, struct perf_evsel *evsel __used, dou if (total) ratio = avg / total * 100.0; - color = get_ratio_color(GRC_CACHE_MISSES, ratio); + color = PERF_COLOR_NORMAL; + if (ratio > 20.0) + color = PERF_COLOR_RED; + else if (ratio > 10.0) + color = PERF_COLOR_MAGENTA; + else if (ratio > 5.0) + color = PERF_COLOR_YELLOW; fprintf(output, " # "); color_fprintf(output, color, "%6.2f%%", ratio); @@ -700,7 +703,13 @@ static void print_dtlb_cache_misses(int cpu, struct perf_evsel *evsel __used, do if (total) ratio = avg / total * 100.0; - color = get_ratio_color(GRC_CACHE_MISSES, ratio); + color = PERF_COLOR_NORMAL; + if (ratio > 20.0) + color = PERF_COLOR_RED; + else if (ratio > 10.0) + color = PERF_COLOR_MAGENTA; + else if (ratio > 5.0) + color = PERF_COLOR_YELLOW; fprintf(output, " # "); color_fprintf(output, color, "%6.2f%%", ratio); @@ -717,7 +726,13 @@ static void print_itlb_cache_misses(int cpu, struct perf_evsel *evsel __used, do if (total) ratio = avg / total * 100.0; - color = get_ratio_color(GRC_CACHE_MISSES, ratio); + color = PERF_COLOR_NORMAL; + if (ratio > 20.0) + color = PERF_COLOR_RED; + else if (ratio > 10.0) + color = PERF_COLOR_MAGENTA; + else if (ratio > 5.0) + color = PERF_COLOR_YELLOW; fprintf(output, " # "); color_fprintf(output, color, "%6.2f%%", ratio); @@ -734,7 +749,13 @@ static void print_ll_cache_misses(int cpu, struct perf_evsel *evsel __used, doub if (total) ratio = avg / total * 100.0; - color = get_ratio_color(GRC_CACHE_MISSES, ratio); + color = PERF_COLOR_NORMAL; + if (ratio > 20.0) + color = PERF_COLOR_RED; + else if (ratio > 10.0) + color = PERF_COLOR_MAGENTA; + else if (ratio > 5.0) + color = PERF_COLOR_YELLOW; fprintf(output, " # "); color_fprintf(output, color, "%6.2f%%", ratio); @@ -1087,13 +1108,22 @@ static const struct option options[] = { */ static int add_default_attributes(void) { + struct perf_evsel *pos; + size_t attr_nr = 0; + size_t c; + /* Set attrs if no event is selected and !null_run: */ if (null_run) return 0; if (!evsel_list->nr_entries) { - if (perf_evlist__add_attrs_array(evsel_list, default_attrs) < 0) - return -1; + for (c = 0; c < ARRAY_SIZE(default_attrs); c++) { + pos = perf_evsel__new(default_attrs + c, c + attr_nr); + if (pos == NULL) + return -1; + perf_evlist__add(evsel_list, pos); + } + attr_nr += c; } /* Detailed events get appended to the event list: */ @@ -1102,21 +1132,38 @@ static int add_default_attributes(void) return 0; /* Append detailed run extra attributes: */ - if (perf_evlist__add_attrs_array(evsel_list, detailed_attrs) < 0) - return -1; + for (c = 0; c < ARRAY_SIZE(detailed_attrs); c++) { + pos = perf_evsel__new(detailed_attrs + c, c + attr_nr); + if (pos == NULL) + return -1; + perf_evlist__add(evsel_list, pos); + } + attr_nr += c; if (detailed_run < 2) return 0; /* Append very detailed run extra attributes: */ - if (perf_evlist__add_attrs_array(evsel_list, very_detailed_attrs) < 0) - return -1; + for (c = 0; c < ARRAY_SIZE(very_detailed_attrs); c++) { + pos = perf_evsel__new(very_detailed_attrs + c, c + attr_nr); + if (pos == NULL) + return -1; + perf_evlist__add(evsel_list, pos); + } if (detailed_run < 3) return 0; /* Append very, very detailed run extra attributes: */ - return perf_evlist__add_attrs_array(evsel_list, very_very_detailed_attrs); + for (c = 0; c < ARRAY_SIZE(very_very_detailed_attrs); c++) { + pos = perf_evsel__new(very_very_detailed_attrs + c, c + attr_nr); + if (pos == NULL) + return -1; + perf_evlist__add(evsel_list, pos); + } + + + return 0; } int cmd_stat(int argc, const char **argv, const char *prefix __used) @@ -1220,7 +1267,8 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used) list_for_each_entry(pos, &evsel_list->entries, node) { if (perf_evsel__alloc_stat_priv(pos) < 0 || - perf_evsel__alloc_counts(pos, evsel_list->cpus->nr) < 0) + perf_evsel__alloc_counts(pos, evsel_list->cpus->nr) < 0 || + perf_evsel__alloc_fd(pos, evsel_list->cpus->nr, evsel_list->threads->nr) < 0) goto out_free_fd; } diff --git a/trunk/tools/perf/builtin-test.c b/trunk/tools/perf/builtin-test.c index 2b9a7f497a20..831d1baeac37 100644 --- a/trunk/tools/perf/builtin-test.c +++ b/trunk/tools/perf/builtin-test.c @@ -7,7 +7,6 @@ #include "util/cache.h" #include "util/debug.h" -#include "util/debugfs.h" #include "util/evlist.h" #include "util/parse-options.h" #include "util/parse-events.h" @@ -15,6 +14,8 @@ #include "util/thread_map.h" #include "../../include/linux/hw_breakpoint.h" +static long page_size; + static int vmlinux_matches_kallsyms_filter(struct map *map __used, struct symbol *sym) { bool *visited = symbol__priv(sym); @@ -30,7 +31,6 @@ static int test__vmlinux_matches_kallsyms(void) struct map *kallsyms_map, *vmlinux_map; struct machine kallsyms, vmlinux; enum map_type type = MAP__FUNCTION; - long page_size = sysconf(_SC_PAGE_SIZE); struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", }; /* @@ -247,7 +247,7 @@ static int trace_event__id(const char *evname) if (asprintf(&filename, "%s/syscalls/%s/id", - tracing_events_path, evname) < 0) + debugfs_path, evname) < 0) return -1; fd = open(filename, O_RDONLY); @@ -603,7 +603,7 @@ static int test__basic_mmap(void) #define TEST_ASSERT_VAL(text, cond) \ do { \ - if (!(cond)) { \ + if (!cond) { \ pr_debug("FAILED %s:%d %s\n", __FILE__, __LINE__, text); \ return -1; \ } \ @@ -759,103 +759,6 @@ static int test__checkevent_breakpoint_w(struct perf_evlist *evlist) return 0; } -static int test__checkevent_tracepoint_modifier(struct perf_evlist *evlist) -{ - struct perf_evsel *evsel = list_entry(evlist->entries.next, - struct perf_evsel, node); - - TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); - TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); - TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); - TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); - - return test__checkevent_tracepoint(evlist); -} - -static int -test__checkevent_tracepoint_multi_modifier(struct perf_evlist *evlist) -{ - struct perf_evsel *evsel; - - TEST_ASSERT_VAL("wrong number of entries", evlist->nr_entries > 1); - - list_for_each_entry(evsel, &evlist->entries, node) { - TEST_ASSERT_VAL("wrong exclude_user", - !evsel->attr.exclude_user); - TEST_ASSERT_VAL("wrong exclude_kernel", - evsel->attr.exclude_kernel); - TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); - TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); - } - - return test__checkevent_tracepoint_multi(evlist); -} - -static int test__checkevent_raw_modifier(struct perf_evlist *evlist) -{ - struct perf_evsel *evsel = list_entry(evlist->entries.next, - struct perf_evsel, node); - - TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); - TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); - TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); - TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip); - - return test__checkevent_raw(evlist); -} - -static int test__checkevent_numeric_modifier(struct perf_evlist *evlist) -{ - struct perf_evsel *evsel = list_entry(evlist->entries.next, - struct perf_evsel, node); - - TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); - TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); - TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); - TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip); - - return test__checkevent_numeric(evlist); -} - -static int test__checkevent_symbolic_name_modifier(struct perf_evlist *evlist) -{ - struct perf_evsel *evsel = list_entry(evlist->entries.next, - struct perf_evsel, node); - - TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); - TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); - TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); - TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); - - return test__checkevent_symbolic_name(evlist); -} - -static int test__checkevent_symbolic_alias_modifier(struct perf_evlist *evlist) -{ - struct perf_evsel *evsel = list_entry(evlist->entries.next, - struct perf_evsel, node); - - TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); - TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); - TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); - TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); - - return test__checkevent_symbolic_alias(evlist); -} - -static int test__checkevent_genhw_modifier(struct perf_evlist *evlist) -{ - struct perf_evsel *evsel = list_entry(evlist->entries.next, - struct perf_evsel, node); - - TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); - TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); - TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); - TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip); - - return test__checkevent_genhw(evlist); -} - static struct test__event_st { const char *name; __u32 type; @@ -905,34 +808,6 @@ static struct test__event_st { .name = "mem:0:w", .check = test__checkevent_breakpoint_w, }, - { - .name = "syscalls:sys_enter_open:k", - .check = test__checkevent_tracepoint_modifier, - }, - { - .name = "syscalls:*:u", - .check = test__checkevent_tracepoint_multi_modifier, - }, - { - .name = "r1:kp", - .check = test__checkevent_raw_modifier, - }, - { - .name = "1:1:hp", - .check = test__checkevent_numeric_modifier, - }, - { - .name = "instructions:h", - .check = test__checkevent_symbolic_name_modifier, - }, - { - .name = "faults:u", - .check = test__checkevent_symbolic_alias_modifier, - }, - { - .name = "L1-dcache-load-miss:kp", - .check = test__checkevent_genhw_modifier, - }, }; #define TEST__EVENTS_CNT (sizeof(test__events) / sizeof(struct test__event_st)) @@ -966,336 +841,6 @@ static int test__parse_events(void) return ret; } - -static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t **maskp, - size_t *sizep) -{ - cpu_set_t *mask; - size_t size; - int i, cpu = -1, nrcpus = 1024; -realloc: - mask = CPU_ALLOC(nrcpus); - size = CPU_ALLOC_SIZE(nrcpus); - CPU_ZERO_S(size, mask); - - if (sched_getaffinity(pid, size, mask) == -1) { - CPU_FREE(mask); - if (errno == EINVAL && nrcpus < (1024 << 8)) { - nrcpus = nrcpus << 2; - goto realloc; - } - perror("sched_getaffinity"); - return -1; - } - - for (i = 0; i < nrcpus; i++) { - if (CPU_ISSET_S(i, size, mask)) { - if (cpu == -1) { - cpu = i; - *maskp = mask; - *sizep = size; - } else - CPU_CLR_S(i, size, mask); - } - } - - if (cpu == -1) - CPU_FREE(mask); - - return cpu; -} - -static int test__PERF_RECORD(void) -{ - struct perf_record_opts opts = { - .target_pid = -1, - .target_tid = -1, - .no_delay = true, - .freq = 10, - .mmap_pages = 256, - .sample_id_all_avail = true, - }; - cpu_set_t *cpu_mask = NULL; - size_t cpu_mask_size = 0; - struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); - struct perf_evsel *evsel; - struct perf_sample sample; - const char *cmd = "sleep"; - const char *argv[] = { cmd, "1", NULL, }; - char *bname; - u64 sample_type, prev_time = 0; - bool found_cmd_mmap = false, - found_libc_mmap = false, - found_vdso_mmap = false, - found_ld_mmap = false; - int err = -1, errs = 0, i, wakeups = 0, sample_size; - u32 cpu; - int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, }; - - if (evlist == NULL || argv == NULL) { - pr_debug("Not enough memory to create evlist\n"); - goto out; - } - - /* - * We need at least one evsel in the evlist, use the default - * one: "cycles". - */ - err = perf_evlist__add_default(evlist); - if (err < 0) { - pr_debug("Not enough memory to create evsel\n"); - goto out_delete_evlist; - } - - /* - * Create maps of threads and cpus to monitor. In this case - * we start with all threads and cpus (-1, -1) but then in - * perf_evlist__prepare_workload we'll fill in the only thread - * we're monitoring, the one forked there. - */ - err = perf_evlist__create_maps(evlist, opts.target_pid, - opts.target_tid, opts.cpu_list); - if (err < 0) { - pr_debug("Not enough memory to create thread/cpu maps\n"); - goto out_delete_evlist; - } - - /* - * Prepare the workload in argv[] to run, it'll fork it, and then wait - * for perf_evlist__start_workload() to exec it. This is done this way - * so that we have time to open the evlist (calling sys_perf_event_open - * on all the fds) and then mmap them. - */ - err = perf_evlist__prepare_workload(evlist, &opts, argv); - if (err < 0) { - pr_debug("Couldn't run the workload!\n"); - goto out_delete_evlist; - } - - /* - * Config the evsels, setting attr->comm on the first one, etc. - */ - evsel = list_entry(evlist->entries.next, struct perf_evsel, node); - evsel->attr.sample_type |= PERF_SAMPLE_CPU; - evsel->attr.sample_type |= PERF_SAMPLE_TID; - evsel->attr.sample_type |= PERF_SAMPLE_TIME; - perf_evlist__config_attrs(evlist, &opts); - - err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask, - &cpu_mask_size); - if (err < 0) { - pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno)); - goto out_delete_evlist; - } - - cpu = err; - - /* - * So that we can check perf_sample.cpu on all the samples. - */ - if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, cpu_mask) < 0) { - pr_debug("sched_setaffinity: %s\n", strerror(errno)); - goto out_free_cpu_mask; - } - - /* - * Call sys_perf_event_open on all the fds on all the evsels, - * grouping them if asked to. - */ - err = perf_evlist__open(evlist, opts.group); - if (err < 0) { - pr_debug("perf_evlist__open: %s\n", strerror(errno)); - goto out_delete_evlist; - } - - /* - * mmap the first fd on a given CPU and ask for events for the other - * fds in the same CPU to be injected in the same mmap ring buffer - * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)). - */ - err = perf_evlist__mmap(evlist, opts.mmap_pages, false); - if (err < 0) { - pr_debug("perf_evlist__mmap: %s\n", strerror(errno)); - goto out_delete_evlist; - } - - /* - * We'll need these two to parse the PERF_SAMPLE_* fields in each - * event. - */ - sample_type = perf_evlist__sample_type(evlist); - sample_size = __perf_evsel__sample_size(sample_type); - - /* - * Now that all is properly set up, enable the events, they will - * count just on workload.pid, which will start... - */ - perf_evlist__enable(evlist); - - /* - * Now! - */ - perf_evlist__start_workload(evlist); - - while (1) { - int before = total_events; - - for (i = 0; i < evlist->nr_mmaps; i++) { - union perf_event *event; - - while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { - const u32 type = event->header.type; - const char *name = perf_event__name(type); - - ++total_events; - if (type < PERF_RECORD_MAX) - nr_events[type]++; - - err = perf_event__parse_sample(event, sample_type, - sample_size, true, - &sample, false); - if (err < 0) { - if (verbose) - perf_event__fprintf(event, stderr); - pr_debug("Couldn't parse sample\n"); - goto out_err; - } - - if (verbose) { - pr_info("%" PRIu64" %d ", sample.time, sample.cpu); - perf_event__fprintf(event, stderr); - } - - if (prev_time > sample.time) { - pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n", - name, prev_time, sample.time); - ++errs; - } - - prev_time = sample.time; - - if (sample.cpu != cpu) { - pr_debug("%s with unexpected cpu, expected %d, got %d\n", - name, cpu, sample.cpu); - ++errs; - } - - if ((pid_t)sample.pid != evlist->workload.pid) { - pr_debug("%s with unexpected pid, expected %d, got %d\n", - name, evlist->workload.pid, sample.pid); - ++errs; - } - - if ((pid_t)sample.tid != evlist->workload.pid) { - pr_debug("%s with unexpected tid, expected %d, got %d\n", - name, evlist->workload.pid, sample.tid); - ++errs; - } - - if ((type == PERF_RECORD_COMM || - type == PERF_RECORD_MMAP || - type == PERF_RECORD_FORK || - type == PERF_RECORD_EXIT) && - (pid_t)event->comm.pid != evlist->workload.pid) { - pr_debug("%s with unexpected pid/tid\n", name); - ++errs; - } - - if ((type == PERF_RECORD_COMM || - type == PERF_RECORD_MMAP) && - event->comm.pid != event->comm.tid) { - pr_debug("%s with different pid/tid!\n", name); - ++errs; - } - - switch (type) { - case PERF_RECORD_COMM: - if (strcmp(event->comm.comm, cmd)) { - pr_debug("%s with unexpected comm!\n", name); - ++errs; - } - break; - case PERF_RECORD_EXIT: - goto found_exit; - case PERF_RECORD_MMAP: - bname = strrchr(event->mmap.filename, '/'); - if (bname != NULL) { - if (!found_cmd_mmap) - found_cmd_mmap = !strcmp(bname + 1, cmd); - if (!found_libc_mmap) - found_libc_mmap = !strncmp(bname + 1, "libc", 4); - if (!found_ld_mmap) - found_ld_mmap = !strncmp(bname + 1, "ld", 2); - } else if (!found_vdso_mmap) - found_vdso_mmap = !strcmp(event->mmap.filename, "[vdso]"); - break; - - case PERF_RECORD_SAMPLE: - /* Just ignore samples for now */ - break; - default: - pr_debug("Unexpected perf_event->header.type %d!\n", - type); - ++errs; - } - } - } - - /* - * We don't use poll here because at least at 3.1 times the - * PERF_RECORD_{!SAMPLE} events don't honour - * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does. - */ - if (total_events == before && false) - poll(evlist->pollfd, evlist->nr_fds, -1); - - sleep(1); - if (++wakeups > 5) { - pr_debug("No PERF_RECORD_EXIT event!\n"); - break; - } - } - -found_exit: - if (nr_events[PERF_RECORD_COMM] > 1) { - pr_debug("Excessive number of PERF_RECORD_COMM events!\n"); - ++errs; - } - - if (nr_events[PERF_RECORD_COMM] == 0) { - pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd); - ++errs; - } - - if (!found_cmd_mmap) { - pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd); - ++errs; - } - - if (!found_libc_mmap) { - pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc"); - ++errs; - } - - if (!found_ld_mmap) { - pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld"); - ++errs; - } - - if (!found_vdso_mmap) { - pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]"); - ++errs; - } -out_err: - perf_evlist__munmap(evlist); -out_free_cpu_mask: - CPU_FREE(cpu_mask); -out_delete_evlist: - perf_evlist__delete(evlist); -out: - return (err < 0 || errs > 0) ? -1 : 0; -} - static struct test { const char *desc; int (*func)(void); @@ -1320,90 +865,46 @@ static struct test { .desc = "parse events tests", .func = test__parse_events, }, - { - .desc = "Validate PERF_RECORD_* events & perf_sample fields", - .func = test__PERF_RECORD, - }, { .func = NULL, }, }; -static bool perf_test__matches(int curr, int argc, const char *argv[]) -{ - int i; - - if (argc == 0) - return true; - - for (i = 0; i < argc; ++i) { - char *end; - long nr = strtoul(argv[i], &end, 10); - - if (*end == '\0') { - if (nr == curr + 1) - return true; - continue; - } - - if (strstr(tests[curr].desc, argv[i])) - return true; - } - - return false; -} - -static int __cmd_test(int argc, const char *argv[]) +static int __cmd_test(void) { int i = 0; - while (tests[i].func) { - int curr = i++, err; - - if (!perf_test__matches(curr, argc, argv)) - continue; + page_size = sysconf(_SC_PAGE_SIZE); - pr_info("%2d: %s:", i, tests[curr].desc); + while (tests[i].func) { + int err; + pr_info("%2d: %s:", i + 1, tests[i].desc); pr_debug("\n--- start ---\n"); - err = tests[curr].func(); - pr_debug("---- end ----\n%s:", tests[curr].desc); + err = tests[i].func(); + pr_debug("---- end ----\n%s:", tests[i].desc); pr_info(" %s\n", err ? "FAILED!\n" : "Ok"); + ++i; } return 0; } -static int perf_test__list(int argc, const char **argv) -{ - int i = 0; - - while (tests[i].func) { - int curr = i++; - - if (argc > 1 && !strstr(tests[curr].desc, argv[1])) - continue; - - pr_info("%2d: %s\n", i, tests[curr].desc); - } - - return 0; -} - -int cmd_test(int argc, const char **argv, const char *prefix __used) -{ - const char * const test_usage[] = { - "perf test [] [{list |[|]}]", +static const char * const test_usage[] = { + "perf test []", NULL, - }; - const struct option test_options[] = { +}; + +static const struct option test_options[] = { OPT_INTEGER('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"), OPT_END() - }; +}; +int cmd_test(int argc, const char **argv, const char *prefix __used) +{ argc = parse_options(argc, argv, test_options, test_usage, 0); - if (argc >= 1 && !strcmp(argv[0], "list")) - return perf_test__list(argc, argv); + if (argc) + usage_with_options(test_usage, test_options); symbol_conf.priv_size = sizeof(int); symbol_conf.sort_by_name = true; @@ -1414,5 +915,5 @@ int cmd_test(int argc, const char **argv, const char *prefix __used) setup_pager(); - return __cmd_test(argc, argv); + return __cmd_test(); } diff --git a/trunk/tools/perf/builtin-timechart.c b/trunk/tools/perf/builtin-timechart.c index 3b75b2e21ea5..aa26f4d66d10 100644 --- a/trunk/tools/perf/builtin-timechart.c +++ b/trunk/tools/perf/builtin-timechart.c @@ -19,7 +19,6 @@ #include "util/color.h" #include #include "util/cache.h" -#include "util/evsel.h" #include #include "util/symbol.h" #include "util/callchain.h" @@ -32,14 +31,13 @@ #include "util/event.h" #include "util/session.h" #include "util/svghelper.h" -#include "util/tool.h" #define SUPPORT_OLD_POWER_EVENTS 1 #define PWR_EVENT_EXIT -1 -static const char *input_name; -static const char *output_name = "output.svg"; +static char const *input_name = "perf.data"; +static char const *output_name = "output.svg"; static unsigned int numcpus; static u64 min_freq; /* Lowest CPU frequency seen */ @@ -275,28 +273,25 @@ static int cpus_cstate_state[MAX_CPUS]; static u64 cpus_pstate_start_times[MAX_CPUS]; static u64 cpus_pstate_state[MAX_CPUS]; -static int process_comm_event(struct perf_tool *tool __used, - union perf_event *event, +static int process_comm_event(union perf_event *event, struct perf_sample *sample __used, - struct machine *machine __used) + struct perf_session *session __used) { pid_set_comm(event->comm.tid, event->comm.comm); return 0; } -static int process_fork_event(struct perf_tool *tool __used, - union perf_event *event, +static int process_fork_event(union perf_event *event, struct perf_sample *sample __used, - struct machine *machine __used) + struct perf_session *session __used) { pid_fork(event->fork.pid, event->fork.ppid, event->fork.time); return 0; } -static int process_exit_event(struct perf_tool *tool __used, - union perf_event *event, +static int process_exit_event(union perf_event *event, struct perf_sample *sample __used, - struct machine *machine __used) + struct perf_session *session __used) { pid_exit(event->fork.pid, event->fork.time); return 0; @@ -491,15 +486,14 @@ static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te) } -static int process_sample_event(struct perf_tool *tool __used, - union perf_event *event __used, +static int process_sample_event(union perf_event *event __used, struct perf_sample *sample, - struct perf_evsel *evsel, - struct machine *machine __used) + struct perf_evsel *evsel __used, + struct perf_session *session) { struct trace_entry *te; - if (evsel->attr.sample_type & PERF_SAMPLE_TIME) { + if (session->sample_type & PERF_SAMPLE_TIME) { if (!first_time || first_time > sample->time) first_time = sample->time; if (last_time < sample->time) @@ -507,7 +501,7 @@ static int process_sample_event(struct perf_tool *tool __used, } te = (void *)sample->raw_data; - if ((evsel->attr.sample_type & PERF_SAMPLE_RAW) && sample->raw_size > 0) { + if (session->sample_type & PERF_SAMPLE_RAW && sample->raw_size > 0) { char *event_str; #ifdef SUPPORT_OLD_POWER_EVENTS struct power_entry_old *peo; @@ -980,7 +974,7 @@ static void write_svg_file(const char *filename) svg_close(); } -static struct perf_tool perf_timechart = { +static struct perf_event_ops event_ops = { .comm = process_comm_event, .fork = process_fork_event, .exit = process_exit_event, @@ -991,7 +985,7 @@ static struct perf_tool perf_timechart = { static int __cmd_timechart(void) { struct perf_session *session = perf_session__new(input_name, O_RDONLY, - 0, false, &perf_timechart); + 0, false, &event_ops); int ret = -EINVAL; if (session == NULL) @@ -1000,7 +994,7 @@ static int __cmd_timechart(void) if (!perf_session__has_traces(session, "timechart record")) goto out_delete; - ret = perf_session__process_events(session, &perf_timechart); + ret = perf_session__process_events(session, &event_ops); if (ret) goto out_delete; diff --git a/trunk/tools/perf/builtin-top.c b/trunk/tools/perf/builtin-top.c index 4f81eeb99875..c9cdedb58134 100644 --- a/trunk/tools/perf/builtin-top.c +++ b/trunk/tools/perf/builtin-top.c @@ -64,6 +64,44 @@ #include #include +static struct perf_top top = { + .count_filter = 5, + .delay_secs = 2, + .target_pid = -1, + .target_tid = -1, + .freq = 1000, /* 1 KHz */ +}; + +static bool system_wide = false; + +static bool use_tui, use_stdio; + +static bool sort_has_symbols; + +static bool dont_use_callchains; +static char callchain_default_opt[] = "fractal,0.5,callee"; + + +static int default_interval = 0; + +static bool kptr_restrict_warned; +static bool vmlinux_warned; +static bool inherit = false; +static int realtime_prio = 0; +static bool group = false; +static bool sample_id_all_avail = true; +static unsigned int mmap_pages = 128; + +static bool dump_symtab = false; + +static struct winsize winsize; + +static const char *sym_filter = NULL; +static int sym_pcnt_filter = 5; + +/* + * Source functions + */ void get_term_dimensions(struct winsize *ws) { @@ -87,23 +125,21 @@ void get_term_dimensions(struct winsize *ws) ws->ws_col = 80; } -static void perf_top__update_print_entries(struct perf_top *top) +static void update_print_entries(struct winsize *ws) { - top->print_entries = top->winsize.ws_row; + top.print_entries = ws->ws_row; - if (top->print_entries > 9) - top->print_entries -= 9; + if (top.print_entries > 9) + top.print_entries -= 9; } -static void perf_top__sig_winch(int sig __used, siginfo_t *info __used, void *arg) +static void sig_winch_handler(int sig __used) { - struct perf_top *top = arg; - - get_term_dimensions(&top->winsize); - perf_top__update_print_entries(top); + get_term_dimensions(&winsize); + update_print_entries(&winsize); } -static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he) +static int parse_source(struct hist_entry *he) { struct symbol *sym; struct annotation *notes; @@ -134,7 +170,7 @@ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he) pthread_mutex_lock(¬es->lock); - if (symbol__alloc_hist(sym) < 0) { + if (symbol__alloc_hist(sym, top.evlist->nr_entries) < 0) { pthread_mutex_unlock(¬es->lock); pr_err("Not enough memory for annotating '%s' symbol!\n", sym->name); @@ -145,7 +181,7 @@ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he) err = symbol__annotate(sym, map, 0); if (err == 0) { out_assign: - top->sym_filter_entry = he; + top.sym_filter_entry = he; } pthread_mutex_unlock(¬es->lock); @@ -158,16 +194,14 @@ static void __zero_source_counters(struct hist_entry *he) symbol__annotate_zero_histograms(sym); } -static void perf_top__record_precise_ip(struct perf_top *top, - struct hist_entry *he, - int counter, u64 ip) +static void record_precise_ip(struct hist_entry *he, int counter, u64 ip) { struct annotation *notes; struct symbol *sym; if (he == NULL || he->ms.sym == NULL || - ((top->sym_filter_entry == NULL || - top->sym_filter_entry->ms.sym != he->ms.sym) && use_browser != 1)) + ((top.sym_filter_entry == NULL || + top.sym_filter_entry->ms.sym != he->ms.sym) && use_browser != 1)) return; sym = he->ms.sym; @@ -176,7 +210,8 @@ static void perf_top__record_precise_ip(struct perf_top *top, if (pthread_mutex_trylock(¬es->lock)) return; - if (notes->src == NULL && symbol__alloc_hist(sym) < 0) { + if (notes->src == NULL && + symbol__alloc_hist(sym, top.evlist->nr_entries) < 0) { pthread_mutex_unlock(¬es->lock); pr_err("Not enough memory for annotating '%s' symbol!\n", sym->name); @@ -190,9 +225,8 @@ static void perf_top__record_precise_ip(struct perf_top *top, pthread_mutex_unlock(¬es->lock); } -static void perf_top__show_details(struct perf_top *top) +static void show_details(struct hist_entry *he) { - struct hist_entry *he = top->sym_filter_entry; struct annotation *notes; struct symbol *symbol; int more; @@ -208,15 +242,15 @@ static void perf_top__show_details(struct perf_top *top) if (notes->src == NULL) goto out_unlock; - printf("Showing %s for %s\n", event_name(top->sym_evsel), symbol->name); - printf(" Events Pcnt (>=%d%%)\n", top->sym_pcnt_filter); + printf("Showing %s for %s\n", event_name(top.sym_evsel), symbol->name); + printf(" Events Pcnt (>=%d%%)\n", sym_pcnt_filter); - more = symbol__annotate_printf(symbol, he->ms.map, top->sym_evsel->idx, - 0, top->sym_pcnt_filter, top->print_entries, 4); - if (top->zero) - symbol__annotate_zero_histogram(symbol, top->sym_evsel->idx); + more = symbol__annotate_printf(symbol, he->ms.map, top.sym_evsel->idx, + 0, sym_pcnt_filter, top.print_entries, 4); + if (top.zero) + symbol__annotate_zero_histogram(symbol, top.sym_evsel->idx); else - symbol__annotate_decay_histogram(symbol, top->sym_evsel->idx); + symbol__annotate_decay_histogram(symbol, top.sym_evsel->idx); if (more != 0) printf("%d lines not displayed, maybe increase display entries [e]\n", more); out_unlock: @@ -225,9 +259,11 @@ static void perf_top__show_details(struct perf_top *top) static const char CONSOLE_CLEAR[] = ""; -static struct hist_entry *perf_evsel__add_hist_entry(struct perf_evsel *evsel, - struct addr_location *al, - struct perf_sample *sample) +static struct hist_entry * + perf_session__add_hist_entry(struct perf_session *session, + struct addr_location *al, + struct perf_sample *sample, + struct perf_evsel *evsel) { struct hist_entry *he; @@ -235,51 +271,50 @@ static struct hist_entry *perf_evsel__add_hist_entry(struct perf_evsel *evsel, if (he == NULL) return NULL; - evsel->hists.stats.total_period += sample->period; + session->hists.stats.total_period += sample->period; hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE); return he; } -static void perf_top__print_sym_table(struct perf_top *top) +static void print_sym_table(void) { char bf[160]; int printed = 0; - const int win_width = top->winsize.ws_col - 1; + const int win_width = winsize.ws_col - 1; puts(CONSOLE_CLEAR); - perf_top__header_snprintf(top, bf, sizeof(bf)); + perf_top__header_snprintf(&top, bf, sizeof(bf)); printf("%s\n", bf); - perf_top__reset_sample_counters(top); + perf_top__reset_sample_counters(&top); printf("%-*.*s\n", win_width, win_width, graph_dotted_line); - if (top->sym_evsel->hists.stats.nr_lost_warned != - top->sym_evsel->hists.stats.nr_events[PERF_RECORD_LOST]) { - top->sym_evsel->hists.stats.nr_lost_warned = - top->sym_evsel->hists.stats.nr_events[PERF_RECORD_LOST]; + if (top.sym_evsel->hists.stats.nr_lost_warned != + top.sym_evsel->hists.stats.nr_events[PERF_RECORD_LOST]) { + top.sym_evsel->hists.stats.nr_lost_warned = + top.sym_evsel->hists.stats.nr_events[PERF_RECORD_LOST]; color_fprintf(stdout, PERF_COLOR_RED, "WARNING: LOST %d chunks, Check IO/CPU overload", - top->sym_evsel->hists.stats.nr_lost_warned); + top.sym_evsel->hists.stats.nr_lost_warned); ++printed; } - if (top->sym_filter_entry) { - perf_top__show_details(top); + if (top.sym_filter_entry) { + show_details(top.sym_filter_entry); return; } - hists__collapse_resort_threaded(&top->sym_evsel->hists); - hists__output_resort_threaded(&top->sym_evsel->hists); - hists__decay_entries_threaded(&top->sym_evsel->hists, - top->hide_user_symbols, - top->hide_kernel_symbols); - hists__output_recalc_col_len(&top->sym_evsel->hists, - top->winsize.ws_row - 3); + hists__collapse_resort_threaded(&top.sym_evsel->hists); + hists__output_resort_threaded(&top.sym_evsel->hists); + hists__decay_entries_threaded(&top.sym_evsel->hists, + top.hide_user_symbols, + top.hide_kernel_symbols); + hists__output_recalc_col_len(&top.sym_evsel->hists, winsize.ws_row - 3); putchar('\n'); - hists__fprintf(&top->sym_evsel->hists, NULL, false, false, - top->winsize.ws_row - 4 - printed, win_width, stdout); + hists__fprintf(&top.sym_evsel->hists, NULL, false, false, + winsize.ws_row - 4 - printed, win_width, stdout); } static void prompt_integer(int *target, const char *msg) @@ -317,17 +352,17 @@ static void prompt_percent(int *target, const char *msg) *target = tmp; } -static void perf_top__prompt_symbol(struct perf_top *top, const char *msg) +static void prompt_symbol(struct hist_entry **target, const char *msg) { char *buf = malloc(0), *p; - struct hist_entry *syme = top->sym_filter_entry, *n, *found = NULL; + struct hist_entry *syme = *target, *n, *found = NULL; struct rb_node *next; size_t dummy = 0; /* zero counters of active symbol */ if (syme) { __zero_source_counters(syme); - top->sym_filter_entry = NULL; + *target = NULL; } fprintf(stdout, "\n%s: ", msg); @@ -338,7 +373,7 @@ static void perf_top__prompt_symbol(struct perf_top *top, const char *msg) if (p) *p = 0; - next = rb_first(&top->sym_evsel->hists.entries); + next = rb_first(&top.sym_evsel->hists.entries); while (next) { n = rb_entry(next, struct hist_entry, rb_node); if (n->ms.sym && !strcmp(buf, n->ms.sym->name)) { @@ -351,46 +386,47 @@ static void perf_top__prompt_symbol(struct perf_top *top, const char *msg) if (!found) { fprintf(stderr, "Sorry, %s is not active.\n", buf); sleep(1); + return; } else - perf_top__parse_source(top, found); + parse_source(found); out_free: free(buf); } -static void perf_top__print_mapped_keys(struct perf_top *top) +static void print_mapped_keys(void) { char *name = NULL; - if (top->sym_filter_entry) { - struct symbol *sym = top->sym_filter_entry->ms.sym; + if (top.sym_filter_entry) { + struct symbol *sym = top.sym_filter_entry->ms.sym; name = sym->name; } fprintf(stdout, "\nMapped keys:\n"); - fprintf(stdout, "\t[d] display refresh delay. \t(%d)\n", top->delay_secs); - fprintf(stdout, "\t[e] display entries (lines). \t(%d)\n", top->print_entries); + fprintf(stdout, "\t[d] display refresh delay. \t(%d)\n", top.delay_secs); + fprintf(stdout, "\t[e] display entries (lines). \t(%d)\n", top.print_entries); - if (top->evlist->nr_entries > 1) - fprintf(stdout, "\t[E] active event counter. \t(%s)\n", event_name(top->sym_evsel)); + if (top.evlist->nr_entries > 1) + fprintf(stdout, "\t[E] active event counter. \t(%s)\n", event_name(top.sym_evsel)); - fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", top->count_filter); + fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", top.count_filter); - fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", top->sym_pcnt_filter); + fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", sym_pcnt_filter); fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL"); fprintf(stdout, "\t[S] stop annotation.\n"); fprintf(stdout, "\t[K] hide kernel_symbols symbols. \t(%s)\n", - top->hide_kernel_symbols ? "yes" : "no"); + top.hide_kernel_symbols ? "yes" : "no"); fprintf(stdout, "\t[U] hide user symbols. \t(%s)\n", - top->hide_user_symbols ? "yes" : "no"); - fprintf(stdout, "\t[z] toggle sample zeroing. \t(%d)\n", top->zero ? 1 : 0); + top.hide_user_symbols ? "yes" : "no"); + fprintf(stdout, "\t[z] toggle sample zeroing. \t(%d)\n", top.zero ? 1 : 0); fprintf(stdout, "\t[qQ] quit.\n"); } -static int perf_top__key_mapped(struct perf_top *top, int c) +static int key_mapped(int c) { switch (c) { case 'd': @@ -406,7 +442,7 @@ static int perf_top__key_mapped(struct perf_top *top, int c) case 'S': return 1; case 'E': - return top->evlist->nr_entries > 1 ? 1 : 0; + return top.evlist->nr_entries > 1 ? 1 : 0; default: break; } @@ -414,13 +450,13 @@ static int perf_top__key_mapped(struct perf_top *top, int c) return 0; } -static void perf_top__handle_keypress(struct perf_top *top, int c) +static void handle_keypress(int c) { - if (!perf_top__key_mapped(top, c)) { + if (!key_mapped(c)) { struct pollfd stdin_poll = { .fd = 0, .events = POLLIN }; struct termios tc, save; - perf_top__print_mapped_keys(top); + print_mapped_keys(); fprintf(stdout, "\nEnter selection, or unmapped key to continue: "); fflush(stdout); @@ -435,86 +471,81 @@ static void perf_top__handle_keypress(struct perf_top *top, int c) c = getc(stdin); tcsetattr(0, TCSAFLUSH, &save); - if (!perf_top__key_mapped(top, c)) + if (!key_mapped(c)) return; } switch (c) { case 'd': - prompt_integer(&top->delay_secs, "Enter display delay"); - if (top->delay_secs < 1) - top->delay_secs = 1; + prompt_integer(&top.delay_secs, "Enter display delay"); + if (top.delay_secs < 1) + top.delay_secs = 1; break; case 'e': - prompt_integer(&top->print_entries, "Enter display entries (lines)"); - if (top->print_entries == 0) { - struct sigaction act = { - .sa_sigaction = perf_top__sig_winch, - .sa_flags = SA_SIGINFO, - }; - perf_top__sig_winch(SIGWINCH, NULL, top); - sigaction(SIGWINCH, &act, NULL); + prompt_integer(&top.print_entries, "Enter display entries (lines)"); + if (top.print_entries == 0) { + sig_winch_handler(SIGWINCH); + signal(SIGWINCH, sig_winch_handler); } else signal(SIGWINCH, SIG_DFL); break; case 'E': - if (top->evlist->nr_entries > 1) { + if (top.evlist->nr_entries > 1) { /* Select 0 as the default event: */ int counter = 0; fprintf(stderr, "\nAvailable events:"); - list_for_each_entry(top->sym_evsel, &top->evlist->entries, node) - fprintf(stderr, "\n\t%d %s", top->sym_evsel->idx, event_name(top->sym_evsel)); + list_for_each_entry(top.sym_evsel, &top.evlist->entries, node) + fprintf(stderr, "\n\t%d %s", top.sym_evsel->idx, event_name(top.sym_evsel)); prompt_integer(&counter, "Enter details event counter"); - if (counter >= top->evlist->nr_entries) { - top->sym_evsel = list_entry(top->evlist->entries.next, struct perf_evsel, node); - fprintf(stderr, "Sorry, no such event, using %s.\n", event_name(top->sym_evsel)); + if (counter >= top.evlist->nr_entries) { + top.sym_evsel = list_entry(top.evlist->entries.next, struct perf_evsel, node); + fprintf(stderr, "Sorry, no such event, using %s.\n", event_name(top.sym_evsel)); sleep(1); break; } - list_for_each_entry(top->sym_evsel, &top->evlist->entries, node) - if (top->sym_evsel->idx == counter) + list_for_each_entry(top.sym_evsel, &top.evlist->entries, node) + if (top.sym_evsel->idx == counter) break; } else - top->sym_evsel = list_entry(top->evlist->entries.next, struct perf_evsel, node); + top.sym_evsel = list_entry(top.evlist->entries.next, struct perf_evsel, node); break; case 'f': - prompt_integer(&top->count_filter, "Enter display event count filter"); + prompt_integer(&top.count_filter, "Enter display event count filter"); break; case 'F': - prompt_percent(&top->sym_pcnt_filter, - "Enter details display event filter (percent)"); + prompt_percent(&sym_pcnt_filter, "Enter details display event filter (percent)"); break; case 'K': - top->hide_kernel_symbols = !top->hide_kernel_symbols; + top.hide_kernel_symbols = !top.hide_kernel_symbols; break; case 'q': case 'Q': printf("exiting.\n"); - if (top->dump_symtab) - perf_session__fprintf_dsos(top->session, stderr); + if (dump_symtab) + perf_session__fprintf_dsos(top.session, stderr); exit(0); case 's': - perf_top__prompt_symbol(top, "Enter details symbol"); + prompt_symbol(&top.sym_filter_entry, "Enter details symbol"); break; case 'S': - if (!top->sym_filter_entry) + if (!top.sym_filter_entry) break; else { - struct hist_entry *syme = top->sym_filter_entry; + struct hist_entry *syme = top.sym_filter_entry; - top->sym_filter_entry = NULL; + top.sym_filter_entry = NULL; __zero_source_counters(syme); } break; case 'U': - top->hide_user_symbols = !top->hide_user_symbols; + top.hide_user_symbols = !top.hide_user_symbols; break; case 'z': - top->zero = !top->zero; + top.zero = !top.zero; break; default: break; @@ -532,30 +563,28 @@ static void perf_top__sort_new_samples(void *arg) hists__collapse_resort_threaded(&t->sym_evsel->hists); hists__output_resort_threaded(&t->sym_evsel->hists); hists__decay_entries_threaded(&t->sym_evsel->hists, - t->hide_user_symbols, - t->hide_kernel_symbols); + top.hide_user_symbols, + top.hide_kernel_symbols); } -static void *display_thread_tui(void *arg) +static void *display_thread_tui(void *arg __used) { - struct perf_top *top = arg; const char *help = "For a higher level overview, try: perf top --sort comm,dso"; - perf_top__sort_new_samples(top); - perf_evlist__tui_browse_hists(top->evlist, help, + perf_top__sort_new_samples(&top); + perf_evlist__tui_browse_hists(top.evlist, help, perf_top__sort_new_samples, - top, top->delay_secs); + &top, top.delay_secs); exit_browser(0); exit(0); return NULL; } -static void *display_thread(void *arg) +static void *display_thread(void *arg __used) { struct pollfd stdin_poll = { .fd = 0, .events = POLLIN }; struct termios tc, save; - struct perf_top *top = arg; int delay_msecs, c; tcgetattr(0, &save); @@ -566,13 +595,13 @@ static void *display_thread(void *arg) pthread__unblock_sigwinch(); repeat: - delay_msecs = top->delay_secs * 1000; + delay_msecs = top.delay_secs * 1000; tcsetattr(0, TCSANOW, &tc); /* trash return*/ getc(stdin); while (1) { - perf_top__print_sym_table(top); + print_sym_table(); /* * Either timeout expired or we got an EINTR due to SIGWINCH, * refresh screen in both cases. @@ -592,7 +621,7 @@ static void *display_thread(void *arg) c = getc(stdin); tcsetattr(0, TCSAFLUSH, &save); - perf_top__handle_keypress(top, c); + handle_keypress(c); goto repeat; return NULL; @@ -644,17 +673,47 @@ static int symbol_filter(struct map *map __used, struct symbol *sym) return 0; } -static void perf_event__process_sample(struct perf_tool *tool, - const union perf_event *event, +static void perf_event__process_sample(const union perf_event *event, struct perf_evsel *evsel, struct perf_sample *sample, - struct machine *machine) + struct perf_session *session) { - struct perf_top *top = container_of(tool, struct perf_top, tool); struct symbol *parent = NULL; u64 ip = event->ip.ip; struct addr_location al; + struct machine *machine; int err; + u8 origin = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; + + ++top.samples; + + switch (origin) { + case PERF_RECORD_MISC_USER: + ++top.us_samples; + if (top.hide_user_symbols) + return; + machine = perf_session__find_host_machine(session); + break; + case PERF_RECORD_MISC_KERNEL: + ++top.kernel_samples; + if (top.hide_kernel_symbols) + return; + machine = perf_session__find_host_machine(session); + break; + case PERF_RECORD_MISC_GUEST_KERNEL: + ++top.guest_kernel_samples; + machine = perf_session__find_machine(session, event->ip.pid); + break; + case PERF_RECORD_MISC_GUEST_USER: + ++top.guest_us_samples; + /* + * TODO: we don't process guest user from host side + * except simple counting. + */ + return; + default: + return; + } if (!machine && perf_guest) { pr_err("Can't find guest [%d]'s kernel information\n", @@ -663,14 +722,14 @@ static void perf_event__process_sample(struct perf_tool *tool, } if (event->header.misc & PERF_RECORD_MISC_EXACT_IP) - top->exact_samples++; + top.exact_samples++; - if (perf_event__preprocess_sample(event, machine, &al, sample, + if (perf_event__preprocess_sample(event, session, &al, sample, symbol_filter) < 0 || al.filtered) return; - if (!top->kptr_restrict_warned && + if (!kptr_restrict_warned && symbol_conf.kptr_restrict && al.cpumode == PERF_RECORD_MISC_KERNEL) { ui__warning( @@ -681,7 +740,7 @@ static void perf_event__process_sample(struct perf_tool *tool, " modules" : ""); if (use_browser <= 0) sleep(5); - top->kptr_restrict_warned = true; + kptr_restrict_warned = true; } if (al.sym == NULL) { @@ -697,7 +756,7 @@ static void perf_event__process_sample(struct perf_tool *tool, * --hide-kernel-symbols, even if the user specifies an * invalid --vmlinux ;-) */ - if (!top->kptr_restrict_warned && !top->vmlinux_warned && + if (!kptr_restrict_warned && !vmlinux_warned && al.map == machine->vmlinux_maps[MAP__FUNCTION] && RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) { if (symbol_conf.vmlinux_name) { @@ -710,7 +769,7 @@ static void perf_event__process_sample(struct perf_tool *tool, if (use_browser <= 0) sleep(5); - top->vmlinux_warned = true; + vmlinux_warned = true; } } @@ -719,109 +778,70 @@ static void perf_event__process_sample(struct perf_tool *tool, if ((sort__has_parent || symbol_conf.use_callchain) && sample->callchain) { - err = machine__resolve_callchain(machine, evsel, al.thread, - sample->callchain, &parent); + err = perf_session__resolve_callchain(session, al.thread, + sample->callchain, &parent); if (err) return; } - he = perf_evsel__add_hist_entry(evsel, &al, sample); + he = perf_session__add_hist_entry(session, &al, sample, evsel); if (he == NULL) { pr_err("Problem incrementing symbol period, skipping event\n"); return; } if (symbol_conf.use_callchain) { - err = callchain_append(he->callchain, &evsel->hists.callchain_cursor, + err = callchain_append(he->callchain, &session->callchain_cursor, sample->period); if (err) return; } - if (top->sort_has_symbols) - perf_top__record_precise_ip(top, he, evsel->idx, ip); + if (sort_has_symbols) + record_precise_ip(he, evsel->idx, ip); } return; } -static void perf_top__mmap_read_idx(struct perf_top *top, int idx) +static void perf_session__mmap_read_idx(struct perf_session *self, int idx) { struct perf_sample sample; struct perf_evsel *evsel; - struct perf_session *session = top->session; union perf_event *event; - struct machine *machine; - u8 origin; int ret; - while ((event = perf_evlist__mmap_read(top->evlist, idx)) != NULL) { - ret = perf_session__parse_sample(session, event, &sample); + while ((event = perf_evlist__mmap_read(top.evlist, idx)) != NULL) { + ret = perf_session__parse_sample(self, event, &sample); if (ret) { pr_err("Can't parse sample, err = %d\n", ret); continue; } - evsel = perf_evlist__id2evsel(session->evlist, sample.id); + evsel = perf_evlist__id2evsel(self->evlist, sample.id); assert(evsel != NULL); - origin = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; - if (event->header.type == PERF_RECORD_SAMPLE) - ++top->samples; - - switch (origin) { - case PERF_RECORD_MISC_USER: - ++top->us_samples; - if (top->hide_user_symbols) - continue; - machine = perf_session__find_host_machine(session); - break; - case PERF_RECORD_MISC_KERNEL: - ++top->kernel_samples; - if (top->hide_kernel_symbols) - continue; - machine = perf_session__find_host_machine(session); - break; - case PERF_RECORD_MISC_GUEST_KERNEL: - ++top->guest_kernel_samples; - machine = perf_session__find_machine(session, event->ip.pid); - break; - case PERF_RECORD_MISC_GUEST_USER: - ++top->guest_us_samples; - /* - * TODO: we don't process guest user from host side - * except simple counting. - */ - /* Fall thru */ - default: - continue; - } - - - if (event->header.type == PERF_RECORD_SAMPLE) { - perf_event__process_sample(&top->tool, event, evsel, - &sample, machine); - } else if (event->header.type < PERF_RECORD_MAX) { + perf_event__process_sample(event, evsel, &sample, self); + else if (event->header.type < PERF_RECORD_MAX) { hists__inc_nr_events(&evsel->hists, event->header.type); - perf_event__process(&top->tool, event, &sample, machine); + perf_event__process(event, &sample, self); } else - ++session->hists.stats.nr_unknown_events; + ++self->hists.stats.nr_unknown_events; } } -static void perf_top__mmap_read(struct perf_top *top) +static void perf_session__mmap_read(struct perf_session *self) { int i; - for (i = 0; i < top->evlist->nr_mmaps; i++) - perf_top__mmap_read_idx(top, i); + for (i = 0; i < top.evlist->nr_mmaps; i++) + perf_session__mmap_read_idx(self, i); } -static void perf_top__start_counters(struct perf_top *top) +static void start_counters(struct perf_evlist *evlist) { struct perf_evsel *counter, *first; - struct perf_evlist *evlist = top->evlist; first = list_entry(evlist->entries.next, struct perf_evsel, node); @@ -829,15 +849,15 @@ static void perf_top__start_counters(struct perf_top *top) struct perf_event_attr *attr = &counter->attr; struct xyarray *group_fd = NULL; - if (top->group && counter != first) + if (group && counter != first) group_fd = first->fd; attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; - if (top->freq) { + if (top.freq) { attr->sample_type |= PERF_SAMPLE_PERIOD; attr->freq = 1; - attr->sample_freq = top->freq; + attr->sample_freq = top.freq; } if (evlist->nr_entries > 1) { @@ -850,23 +870,23 @@ static void perf_top__start_counters(struct perf_top *top) attr->mmap = 1; attr->comm = 1; - attr->inherit = top->inherit; + attr->inherit = inherit; retry_sample_id: - attr->sample_id_all = top->sample_id_all_avail ? 1 : 0; + attr->sample_id_all = sample_id_all_avail ? 1 : 0; try_again: - if (perf_evsel__open(counter, top->evlist->cpus, - top->evlist->threads, top->group, + if (perf_evsel__open(counter, top.evlist->cpus, + top.evlist->threads, group, group_fd) < 0) { int err = errno; if (err == EPERM || err == EACCES) { ui__error_paranoid(); goto out_err; - } else if (err == EINVAL && top->sample_id_all_avail) { + } else if (err == EINVAL && sample_id_all_avail) { /* * Old kernel, no attr->sample_id_type_all field */ - top->sample_id_all_avail = false; + sample_id_all_avail = false; goto retry_sample_id; } /* @@ -900,7 +920,7 @@ static void perf_top__start_counters(struct perf_top *top) } } - if (perf_evlist__mmap(evlist, top->mmap_pages, false) < 0) { + if (perf_evlist__mmap(evlist, mmap_pages, false) < 0) { ui__warning("Failed to mmap with %d (%s)\n", errno, strerror(errno)); goto out_err; @@ -913,14 +933,14 @@ static void perf_top__start_counters(struct perf_top *top) exit(0); } -static int perf_top__setup_sample_type(struct perf_top *top) +static int setup_sample_type(void) { - if (!top->sort_has_symbols) { + if (!sort_has_symbols) { if (symbol_conf.use_callchain) { ui__warning("Selected -g but \"sym\" not present in --sort/-s."); return -EINVAL; } - } else if (!top->dont_use_callchains && callchain_param.mode != CHAIN_NONE) { + } else if (!dont_use_callchains && callchain_param.mode != CHAIN_NONE) { if (callchain_register_param(&callchain_param) < 0) { ui__warning("Can't register callchain params.\n"); return -EINVAL; @@ -930,7 +950,7 @@ static int perf_top__setup_sample_type(struct perf_top *top) return 0; } -static int __cmd_top(struct perf_top *top) +static int __cmd_top(void) { pthread_t thread; int ret; @@ -938,40 +958,39 @@ static int __cmd_top(struct perf_top *top) * FIXME: perf_session__new should allow passing a O_MMAP, so that all this * mmap reading, etc is encapsulated in it. Use O_WRONLY for now. */ - top->session = perf_session__new(NULL, O_WRONLY, false, false, NULL); - if (top->session == NULL) + top.session = perf_session__new(NULL, O_WRONLY, false, false, NULL); + if (top.session == NULL) return -ENOMEM; - ret = perf_top__setup_sample_type(top); + ret = setup_sample_type(); if (ret) goto out_delete; - if (top->target_tid != -1) - perf_event__synthesize_thread_map(&top->tool, top->evlist->threads, - perf_event__process, - &top->session->host_machine); + if (top.target_tid != -1) + perf_event__synthesize_thread_map(top.evlist->threads, + perf_event__process, top.session); else - perf_event__synthesize_threads(&top->tool, perf_event__process, - &top->session->host_machine); - perf_top__start_counters(top); - top->session->evlist = top->evlist; - perf_session__update_sample_type(top->session); + perf_event__synthesize_threads(perf_event__process, top.session); + + start_counters(top.evlist); + top.session->evlist = top.evlist; + perf_session__update_sample_type(top.session); /* Wait for a minimal set of events before starting the snapshot */ - poll(top->evlist->pollfd, top->evlist->nr_fds, 100); + poll(top.evlist->pollfd, top.evlist->nr_fds, 100); - perf_top__mmap_read(top); + perf_session__mmap_read(top.session); if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui : - display_thread), top)) { + display_thread), NULL)) { printf("Could not create display thread.\n"); exit(-1); } - if (top->realtime_prio) { + if (realtime_prio) { struct sched_param param; - param.sched_priority = top->realtime_prio; + param.sched_priority = realtime_prio; if (sched_setscheduler(0, SCHED_FIFO, ¶m)) { printf("Could not set realtime priority.\n"); exit(-1); @@ -979,25 +998,25 @@ static int __cmd_top(struct perf_top *top) } while (1) { - u64 hits = top->samples; + u64 hits = top.samples; - perf_top__mmap_read(top); + perf_session__mmap_read(top.session); - if (hits == top->samples) - ret = poll(top->evlist->pollfd, top->evlist->nr_fds, 100); + if (hits == top.samples) + ret = poll(top.evlist->pollfd, top.evlist->nr_fds, 100); } out_delete: - perf_session__delete(top->session); - top->session = NULL; + perf_session__delete(top.session); + top.session = NULL; return 0; } static int -parse_callchain_opt(const struct option *opt, const char *arg, int unset) +parse_callchain_opt(const struct option *opt __used, const char *arg, + int unset) { - struct perf_top *top = (struct perf_top *)opt->value; char *tok, *tok2; char *endptr; @@ -1005,7 +1024,7 @@ parse_callchain_opt(const struct option *opt, const char *arg, int unset) * --no-call-graph */ if (unset) { - top->dont_use_callchains = true; + dont_use_callchains = true; return 0; } @@ -1033,7 +1052,9 @@ parse_callchain_opt(const struct option *opt, const char *arg, int unset) symbol_conf.use_callchain = false; return 0; - } else + } + + else return -1; /* get the min percentage */ @@ -1077,32 +1098,17 @@ static const char * const top_usage[] = { NULL }; -int cmd_top(int argc, const char **argv, const char *prefix __used) -{ - struct perf_evsel *pos; - int status = -ENOMEM; - struct perf_top top = { - .count_filter = 5, - .delay_secs = 2, - .target_pid = -1, - .target_tid = -1, - .freq = 1000, /* 1 KHz */ - .sample_id_all_avail = true, - .mmap_pages = 128, - .sym_pcnt_filter = 5, - }; - char callchain_default_opt[] = "fractal,0.5,callee"; - const struct option options[] = { +static const struct option options[] = { OPT_CALLBACK('e', "event", &top.evlist, "event", "event selector. use 'perf list' to list available events", parse_events_option), - OPT_INTEGER('c', "count", &top.default_interval, + OPT_INTEGER('c', "count", &default_interval, "event period to sample"), OPT_INTEGER('p', "pid", &top.target_pid, "profile events on existing process id"), OPT_INTEGER('t', "tid", &top.target_tid, "profile events on existing thread id"), - OPT_BOOLEAN('a', "all-cpus", &top.system_wide, + OPT_BOOLEAN('a', "all-cpus", &system_wide, "system-wide collection from all CPUs"), OPT_STRING('C', "cpu", &top.cpu_list, "cpu", "list of cpus to monitor"), @@ -1110,20 +1116,20 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) "file", "vmlinux pathname"), OPT_BOOLEAN('K', "hide_kernel_symbols", &top.hide_kernel_symbols, "hide kernel symbols"), - OPT_UINTEGER('m', "mmap-pages", &top.mmap_pages, "number of mmap data pages"), - OPT_INTEGER('r', "realtime", &top.realtime_prio, + OPT_UINTEGER('m', "mmap-pages", &mmap_pages, "number of mmap data pages"), + OPT_INTEGER('r', "realtime", &realtime_prio, "collect data with this RT SCHED_FIFO priority"), OPT_INTEGER('d', "delay", &top.delay_secs, "number of seconds to delay between refreshes"), - OPT_BOOLEAN('D', "dump-symtab", &top.dump_symtab, + OPT_BOOLEAN('D', "dump-symtab", &dump_symtab, "dump the symbol table used for profiling"), OPT_INTEGER('f', "count-filter", &top.count_filter, "only display functions with more events than this"), - OPT_BOOLEAN('g', "group", &top.group, + OPT_BOOLEAN('g', "group", &group, "put the counters into a counter group"), - OPT_BOOLEAN('i', "inherit", &top.inherit, + OPT_BOOLEAN('i', "inherit", &inherit, "child tasks inherit counters"), - OPT_STRING(0, "sym-annotate", &top.sym_filter, "symbol name", + OPT_STRING(0, "sym-annotate", &sym_filter, "symbol name", "symbol to annotate"), OPT_BOOLEAN('z', "zero", &top.zero, "zero history across updates"), @@ -1133,15 +1139,15 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) "display this many functions"), OPT_BOOLEAN('U', "hide_user_symbols", &top.hide_user_symbols, "hide user symbols"), - OPT_BOOLEAN(0, "tui", &top.use_tui, "Use the TUI interface"), - OPT_BOOLEAN(0, "stdio", &top.use_stdio, "Use the stdio interface"), + OPT_BOOLEAN(0, "tui", &use_tui, "Use the TUI interface"), + OPT_BOOLEAN(0, "stdio", &use_stdio, "Use the stdio interface"), OPT_INCR('v', "verbose", &verbose, "be more verbose (show counter open errors, etc)"), OPT_STRING('s', "sort", &sort_order, "key[,key2...]", "sort by key(s): pid, comm, dso, symbol, parent"), OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples, "Show a column with the number of samples"), - OPT_CALLBACK_DEFAULT('G', "call-graph", &top, "output_type,min_percent, call_order", + OPT_CALLBACK_DEFAULT('G', "call-graph", NULL, "output_type,min_percent, call_order", "Display callchains using output_type (graph, flat, fractal, or none), min percent threshold and callchain order. " "Default: fractal,0.5,callee", &parse_callchain_opt, callchain_default_opt), @@ -1160,7 +1166,12 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style", "Specify disassembler style (e.g. -M intel for intel syntax)"), OPT_END() - }; +}; + +int cmd_top(int argc, const char **argv, const char *prefix __used) +{ + struct perf_evsel *pos; + int status = -ENOMEM; top.evlist = perf_evlist__new(NULL, NULL); if (top.evlist == NULL) @@ -1177,9 +1188,9 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) setup_sorting(top_usage, options); - if (top.use_stdio) + if (use_stdio) use_browser = 0; - else if (top.use_tui) + else if (use_tui) use_browser = 1; setup_browser(false); @@ -1204,31 +1215,38 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) return -ENOMEM; } - symbol_conf.nr_events = top.evlist->nr_entries; - if (top.delay_secs < 1) top.delay_secs = 1; /* * User specified count overrides default frequency. */ - if (top.default_interval) + if (default_interval) top.freq = 0; else if (top.freq) { - top.default_interval = top.freq; + default_interval = top.freq; } else { fprintf(stderr, "frequency and count are zero, aborting\n"); exit(EXIT_FAILURE); } list_for_each_entry(pos, &top.evlist->entries, node) { + if (perf_evsel__alloc_fd(pos, top.evlist->cpus->nr, + top.evlist->threads->nr) < 0) + goto out_free_fd; /* * Fill in the ones not specifically initialized via -c: */ - if (!pos->attr.sample_period) - pos->attr.sample_period = top.default_interval; + if (pos->attr.sample_period) + continue; + + pos->attr.sample_period = default_interval; } + if (perf_evlist__alloc_pollfd(top.evlist) < 0 || + perf_evlist__alloc_mmap(top.evlist) < 0) + goto out_free_fd; + top.sym_evsel = list_entry(top.evlist->entries.next, struct perf_evsel, node); symbol_conf.priv_size = sizeof(struct annotation); @@ -1245,20 +1263,16 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) * Avoid annotation data structures overhead when symbols aren't on the * sort list. */ - top.sort_has_symbols = sort_sym.list.next != NULL; + sort_has_symbols = sort_sym.list.next != NULL; - get_term_dimensions(&top.winsize); + get_term_dimensions(&winsize); if (top.print_entries == 0) { - struct sigaction act = { - .sa_sigaction = perf_top__sig_winch, - .sa_flags = SA_SIGINFO, - }; - perf_top__update_print_entries(&top); - sigaction(SIGWINCH, &act, NULL); + update_print_entries(&winsize); + signal(SIGWINCH, sig_winch_handler); } - status = __cmd_top(&top); - + status = __cmd_top(); +out_free_fd: perf_evlist__delete(top.evlist); return status; diff --git a/trunk/tools/perf/perf.c b/trunk/tools/perf/perf.c index 2b2e225a4d4c..73d0cac8b67e 100644 --- a/trunk/tools/perf/perf.c +++ b/trunk/tools/perf/perf.c @@ -29,6 +29,8 @@ struct pager_config { int val; }; +static char debugfs_mntpt[MAXPATHLEN]; + static int pager_command_config(const char *var, const char *value, void *data) { struct pager_config *c = data; @@ -79,6 +81,15 @@ static void commit_pager_choice(void) } } +static void set_debugfs_path(void) +{ + char *path; + + path = getenv(PERF_DEBUGFS_ENVIRONMENT); + snprintf(debugfs_path, MAXPATHLEN, "%s/%s", path ?: debugfs_mntpt, + "tracing/events"); +} + static int handle_options(const char ***argv, int *argc, int *envchanged) { int handled = 0; @@ -150,14 +161,15 @@ static int handle_options(const char ***argv, int *argc, int *envchanged) fprintf(stderr, "No directory given for --debugfs-dir.\n"); usage(perf_usage_string); } - debugfs_set_path((*argv)[1]); + strncpy(debugfs_mntpt, (*argv)[1], MAXPATHLEN); + debugfs_mntpt[MAXPATHLEN - 1] = '\0'; if (envchanged) *envchanged = 1; (*argv)++; (*argc)--; } else if (!prefixcmp(cmd, CMD_DEBUGFS_DIR)) { - debugfs_set_path(cmd + strlen(CMD_DEBUGFS_DIR)); - fprintf(stderr, "dir: %s\n", debugfs_mountpoint); + strncpy(debugfs_mntpt, cmd + strlen(CMD_DEBUGFS_DIR), MAXPATHLEN); + debugfs_mntpt[MAXPATHLEN - 1] = '\0'; if (envchanged) *envchanged = 1; } else { @@ -269,6 +281,7 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv) if (use_pager == -1 && p->option & USE_PAGER) use_pager = 1; commit_pager_choice(); + set_debugfs_path(); status = p->fn(argc, argv, prefix); exit_browser(status); @@ -403,6 +416,17 @@ static int run_argv(int *argcp, const char ***argv) return done_alias; } +/* mini /proc/mounts parser: searching for "^blah /mount/point debugfs" */ +static void get_debugfs_mntpt(void) +{ + const char *path = debugfs_mount(NULL); + + if (path) + strncpy(debugfs_mntpt, path, sizeof(debugfs_mntpt)); + else + debugfs_mntpt[0] = '\0'; +} + static void pthread__block_sigwinch(void) { sigset_t set; @@ -429,7 +453,7 @@ int main(int argc, const char **argv) if (!cmd) cmd = "perf-help"; /* get debugfs mount point from /proc/mounts */ - debugfs_mount(NULL); + get_debugfs_mntpt(); /* * "perf-xxxx" is the same as "perf xxxx", but we obviously: * @@ -452,6 +476,7 @@ int main(int argc, const char **argv) argc--; handle_options(&argv, &argc, NULL); commit_pager_choice(); + set_debugfs_path(); set_buildid_dir(); if (argc > 0) { diff --git a/trunk/tools/perf/perf.h b/trunk/tools/perf/perf.h index 64f8bee31ced..914c895510f7 100644 --- a/trunk/tools/perf/perf.h +++ b/trunk/tools/perf/perf.h @@ -185,28 +185,4 @@ extern const char perf_version_string[]; void pthread__unblock_sigwinch(void); -struct perf_record_opts { - pid_t target_pid; - pid_t target_tid; - bool call_graph; - bool group; - bool inherit_stat; - bool no_delay; - bool no_inherit; - bool no_samples; - bool pipe_output; - bool raw_samples; - bool sample_address; - bool sample_time; - bool sample_id_all_avail; - bool system_wide; - bool period; - unsigned int freq; - unsigned int mmap_pages; - unsigned int user_freq; - u64 default_interval; - u64 user_interval; - const char *cpu_list; -}; - #endif diff --git a/trunk/tools/perf/util/annotate.c b/trunk/tools/perf/util/annotate.c index 011ed2676604..119e996035c8 100644 --- a/trunk/tools/perf/util/annotate.c +++ b/trunk/tools/perf/util/annotate.c @@ -25,17 +25,17 @@ int symbol__annotate_init(struct map *map __used, struct symbol *sym) return 0; } -int symbol__alloc_hist(struct symbol *sym) +int symbol__alloc_hist(struct symbol *sym, int nevents) { struct annotation *notes = symbol__annotation(sym); size_t sizeof_sym_hist = (sizeof(struct sym_hist) + (sym->end - sym->start) * sizeof(u64)); - notes->src = zalloc(sizeof(*notes->src) + symbol_conf.nr_events * sizeof_sym_hist); + notes->src = zalloc(sizeof(*notes->src) + nevents * sizeof_sym_hist); if (notes->src == NULL) return -1; notes->src->sizeof_sym_hist = sizeof_sym_hist; - notes->src->nr_histograms = symbol_conf.nr_events; + notes->src->nr_histograms = nevents; INIT_LIST_HEAD(¬es->src->source); return 0; } @@ -334,7 +334,7 @@ int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize) disassembler_style ? "-M " : "", disassembler_style ? disassembler_style : "", map__rip_2objdump(map, sym->start), - map__rip_2objdump(map, sym->end+1), + map__rip_2objdump(map, sym->end), symbol_conf.annotate_asm_raw ? "" : "--no-show-raw", symbol_conf.annotate_src ? "-S" : "", symfs_filename, filename); diff --git a/trunk/tools/perf/util/annotate.h b/trunk/tools/perf/util/annotate.h index efa5dc82bfae..d9072523d342 100644 --- a/trunk/tools/perf/util/annotate.h +++ b/trunk/tools/perf/util/annotate.h @@ -72,7 +72,7 @@ static inline struct annotation *symbol__annotation(struct symbol *sym) int symbol__inc_addr_samples(struct symbol *sym, struct map *map, int evidx, u64 addr); -int symbol__alloc_hist(struct symbol *sym); +int symbol__alloc_hist(struct symbol *sym, int nevents); void symbol__annotate_zero_histograms(struct symbol *sym); int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize); @@ -99,7 +99,8 @@ static inline int symbol__tui_annotate(struct symbol *sym __used, } #else int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx, - void(*timer)(void *arg), void *arg, int delay_secs); + int nr_events, void(*timer)(void *arg), void *arg, + int delay_secs); #endif extern const char *disassembler_style; diff --git a/trunk/tools/perf/util/build-id.c b/trunk/tools/perf/util/build-id.c index dff9c7a725f4..a91cd99f26ea 100644 --- a/trunk/tools/perf/util/build-id.c +++ b/trunk/tools/perf/util/build-id.c @@ -13,18 +13,15 @@ #include "symbol.h" #include #include "debug.h" -#include "session.h" -#include "tool.h" -static int build_id__mark_dso_hit(struct perf_tool *tool __used, - union perf_event *event, +static int build_id__mark_dso_hit(union perf_event *event, struct perf_sample *sample __used, struct perf_evsel *evsel __used, - struct machine *machine) + struct perf_session *session) { struct addr_location al; u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; - struct thread *thread = machine__findnew_thread(machine, event->ip.pid); + struct thread *thread = perf_session__findnew(session, event->ip.pid); if (thread == NULL) { pr_err("problem processing %d event, skipping it.\n", @@ -32,8 +29,8 @@ static int build_id__mark_dso_hit(struct perf_tool *tool __used, return -1; } - thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION, - event->ip.ip, &al); + thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION, + event->ip.pid, event->ip.ip, &al); if (al.map != NULL) al.map->dso->hit = 1; @@ -41,26 +38,25 @@ static int build_id__mark_dso_hit(struct perf_tool *tool __used, return 0; } -static int perf_event__exit_del_thread(struct perf_tool *tool __used, - union perf_event *event, +static int perf_event__exit_del_thread(union perf_event *event, struct perf_sample *sample __used, - struct machine *machine) + struct perf_session *session) { - struct thread *thread = machine__findnew_thread(machine, event->fork.tid); + struct thread *thread = perf_session__findnew(session, event->fork.tid); dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid, event->fork.ppid, event->fork.ptid); if (thread) { - rb_erase(&thread->rb_node, &machine->threads); - machine->last_match = NULL; + rb_erase(&thread->rb_node, &session->threads); + session->last_match = NULL; thread__delete(thread); } return 0; } -struct perf_tool build_id__mark_dso_hit_ops = { +struct perf_event_ops build_id__mark_dso_hit_ops = { .sample = build_id__mark_dso_hit, .mmap = perf_event__process_mmap, .fork = perf_event__process_task, diff --git a/trunk/tools/perf/util/build-id.h b/trunk/tools/perf/util/build-id.h index a993ba87d996..5dafb00eaa06 100644 --- a/trunk/tools/perf/util/build-id.h +++ b/trunk/tools/perf/util/build-id.h @@ -3,7 +3,7 @@ #include "session.h" -extern struct perf_tool build_id__mark_dso_hit_ops; +extern struct perf_event_ops build_id__mark_dso_hit_ops; char *dso__build_id_filename(struct dso *self, char *bf, size_t size); diff --git a/trunk/tools/perf/util/callchain.h b/trunk/tools/perf/util/callchain.h index 7f9c0f1ae3a9..9b4ff16cac96 100644 --- a/trunk/tools/perf/util/callchain.h +++ b/trunk/tools/perf/util/callchain.h @@ -101,9 +101,6 @@ int callchain_append(struct callchain_root *root, int callchain_merge(struct callchain_cursor *cursor, struct callchain_root *dst, struct callchain_root *src); -struct ip_callchain; -union perf_event; - bool ip_callchain__valid(struct ip_callchain *chain, const union perf_event *event); /* diff --git a/trunk/tools/perf/util/cgroup.c b/trunk/tools/perf/util/cgroup.c index dbe2f16b1a1a..96bee5c46008 100644 --- a/trunk/tools/perf/util/cgroup.c +++ b/trunk/tools/perf/util/cgroup.c @@ -3,6 +3,7 @@ #include "parse-options.h" #include "evsel.h" #include "cgroup.h" +#include "debugfs.h" /* MAX_PATH, STR() */ #include "evlist.h" int nr_cgroups; @@ -11,7 +12,7 @@ static int cgroupfs_find_mountpoint(char *buf, size_t maxlen) { FILE *fp; - char mountpoint[PATH_MAX + 1], tokens[PATH_MAX + 1], type[PATH_MAX + 1]; + char mountpoint[MAX_PATH+1], tokens[MAX_PATH+1], type[MAX_PATH+1]; char *token, *saved_ptr = NULL; int found = 0; @@ -24,8 +25,8 @@ cgroupfs_find_mountpoint(char *buf, size_t maxlen) * and inspect every cgroupfs mount point to find one that has * perf_event subsystem */ - while (fscanf(fp, "%*s %"STR(PATH_MAX)"s %"STR(PATH_MAX)"s %" - STR(PATH_MAX)"s %*d %*d\n", + while (fscanf(fp, "%*s %"STR(MAX_PATH)"s %"STR(MAX_PATH)"s %" + STR(MAX_PATH)"s %*d %*d\n", mountpoint, type, tokens) == 3) { if (!strcmp(type, "cgroup")) { @@ -56,15 +57,15 @@ cgroupfs_find_mountpoint(char *buf, size_t maxlen) static int open_cgroup(char *name) { - char path[PATH_MAX + 1]; - char mnt[PATH_MAX + 1]; + char path[MAX_PATH+1]; + char mnt[MAX_PATH+1]; int fd; - if (cgroupfs_find_mountpoint(mnt, PATH_MAX + 1)) + if (cgroupfs_find_mountpoint(mnt, MAX_PATH+1)) return -1; - snprintf(path, PATH_MAX, "%s/%s", mnt, name); + snprintf(path, MAX_PATH, "%s/%s", mnt, name); fd = open(path, O_RDONLY); if (fd == -1) diff --git a/trunk/tools/perf/util/config.c b/trunk/tools/perf/util/config.c index 0deac6a14b65..80d9598db31a 100644 --- a/trunk/tools/perf/util/config.c +++ b/trunk/tools/perf/util/config.c @@ -1,8 +1,5 @@ /* - * config.c - * - * Helper functions for parsing config items. - * Originally copied from GIT source. + * GIT - The information manager from hell * * Copyright (C) Linus Torvalds, 2005 * Copyright (C) Johannes Schindelin, 2005 diff --git a/trunk/tools/perf/util/debugfs.c b/trunk/tools/perf/util/debugfs.c index ffc35e748e89..a88fefc0cc0a 100644 --- a/trunk/tools/perf/util/debugfs.c +++ b/trunk/tools/perf/util/debugfs.c @@ -2,12 +2,8 @@ #include "debugfs.h" #include "cache.h" -#include -#include - static int debugfs_premounted; -char debugfs_mountpoint[PATH_MAX + 1] = "/sys/kernel/debug"; -char tracing_events_path[PATH_MAX + 1] = "/sys/kernel/debug/tracing/events"; +static char debugfs_mountpoint[MAX_PATH+1]; static const char *debugfs_known_mountpoints[] = { "/sys/kernel/debug/", @@ -66,9 +62,11 @@ const char *debugfs_find_mountpoint(void) /* give up and parse /proc/mounts */ fp = fopen("/proc/mounts", "r"); if (fp == NULL) - return NULL; + die("Can't open /proc/mounts for read"); - while (fscanf(fp, "%*s %" STR(PATH_MAX) "s %99s %*s %*d %*d\n", + while (fscanf(fp, "%*s %" + STR(MAX_PATH) + "s %99s %*s %*d %*d\n", debugfs_mountpoint, type) == 2) { if (strcmp(type, "debugfs") == 0) break; @@ -108,12 +106,6 @@ int debugfs_valid_entry(const char *path) return 0; } -static void debugfs_set_tracing_events_path(const char *mountpoint) -{ - snprintf(tracing_events_path, sizeof(tracing_events_path), "%s/%s", - mountpoint, "tracing/events"); -} - /* mount the debugfs somewhere if it's not mounted */ char *debugfs_mount(const char *mountpoint) @@ -121,7 +113,7 @@ char *debugfs_mount(const char *mountpoint) /* see if it's already mounted */ if (debugfs_find_mountpoint()) { debugfs_premounted = 1; - goto out; + return debugfs_mountpoint; } /* if not mounted and no argument */ @@ -137,17 +129,10 @@ char *debugfs_mount(const char *mountpoint) return NULL; /* save the mountpoint */ - debugfs_found = 1; strncpy(debugfs_mountpoint, mountpoint, sizeof(debugfs_mountpoint)); -out: - debugfs_set_tracing_events_path(debugfs_mountpoint); - return debugfs_mountpoint; -} + debugfs_found = 1; -void debugfs_set_path(const char *mountpoint) -{ - snprintf(debugfs_mountpoint, sizeof(debugfs_mountpoint), "%s", mountpoint); - debugfs_set_tracing_events_path(mountpoint); + return debugfs_mountpoint; } /* umount the debugfs */ @@ -173,7 +158,7 @@ int debugfs_umount(void) int debugfs_write(const char *entry, const char *value) { - char path[PATH_MAX + 1]; + char path[MAX_PATH+1]; int ret, count; int fd; @@ -218,7 +203,7 @@ int debugfs_write(const char *entry, const char *value) */ int debugfs_read(const char *entry, char *buffer, size_t size) { - char path[PATH_MAX + 1]; + char path[MAX_PATH+1]; int ret; int fd; diff --git a/trunk/tools/perf/util/debugfs.h b/trunk/tools/perf/util/debugfs.h index 4a878f735eb0..83a02879745f 100644 --- a/trunk/tools/perf/util/debugfs.h +++ b/trunk/tools/perf/util/debugfs.h @@ -1,18 +1,25 @@ #ifndef __DEBUGFS_H__ #define __DEBUGFS_H__ -const char *debugfs_find_mountpoint(void); -int debugfs_valid_mountpoint(const char *debugfs); -int debugfs_valid_entry(const char *path); -char *debugfs_mount(const char *mountpoint); -int debugfs_umount(void); -void debugfs_set_path(const char *mountpoint); -int debugfs_write(const char *entry, const char *value); -int debugfs_read(const char *entry, char *buffer, size_t size); -void debugfs_force_cleanup(void); -int debugfs_make_path(const char *element, char *buffer, int size); +#include -extern char debugfs_mountpoint[]; -extern char tracing_events_path[]; +#ifndef MAX_PATH +# define MAX_PATH 256 +#endif + +#ifndef STR +# define _STR(x) #x +# define STR(x) _STR(x) +#endif + +extern const char *debugfs_find_mountpoint(void); +extern int debugfs_valid_mountpoint(const char *debugfs); +extern int debugfs_valid_entry(const char *path); +extern char *debugfs_mount(const char *mountpoint); +extern int debugfs_umount(void); +extern int debugfs_write(const char *entry, const char *value); +extern int debugfs_read(const char *entry, char *buffer, size_t size); +extern void debugfs_force_cleanup(void); +extern int debugfs_make_path(const char *element, char *buffer, int size); #endif /* __DEBUGFS_H__ */ diff --git a/trunk/tools/perf/util/event.c b/trunk/tools/perf/util/event.c index 73ddaf06b8e7..437f8ca679a0 100644 --- a/trunk/tools/perf/util/event.c +++ b/trunk/tools/perf/util/event.c @@ -1,6 +1,7 @@ #include #include "event.h" #include "debug.h" +#include "session.h" #include "sort.h" #include "string.h" #include "strlist.h" @@ -43,27 +44,36 @@ static struct perf_sample synth_sample = { .period = 1, }; -static pid_t perf_event__get_comm_tgid(pid_t pid, char *comm, size_t len) +static pid_t perf_event__synthesize_comm(union perf_event *event, pid_t pid, + int full, perf_event__handler_t process, + struct perf_session *session) { char filename[PATH_MAX]; char bf[BUFSIZ]; FILE *fp; size_t size = 0; - pid_t tgid = -1; + DIR *tasks; + struct dirent dirent, *next; + pid_t tgid = 0; snprintf(filename, sizeof(filename), "/proc/%d/status", pid); fp = fopen(filename, "r"); if (fp == NULL) { +out_race: + /* + * We raced with a task exiting - just return: + */ pr_debug("couldn't open %s\n", filename); return 0; } - while (!comm[0] || (tgid < 0)) { + memset(&event->comm, 0, sizeof(event->comm)); + + while (!event->comm.comm[0] || !event->comm.pid) { if (fgets(bf, sizeof(bf), fp) == NULL) { - pr_warning("couldn't get COMM and pgid, malformed %s\n", - filename); - break; + pr_warning("couldn't get COMM and pgid, malformed %s\n", filename); + goto out; } if (memcmp(bf, "Name:", 5) == 0) { @@ -71,65 +81,33 @@ static pid_t perf_event__get_comm_tgid(pid_t pid, char *comm, size_t len) while (*name && isspace(*name)) ++name; size = strlen(name) - 1; - if (size >= len) - size = len - 1; - memcpy(comm, name, size); - + memcpy(event->comm.comm, name, size++); } else if (memcmp(bf, "Tgid:", 5) == 0) { char *tgids = bf + 5; while (*tgids && isspace(*tgids)) ++tgids; - tgid = atoi(tgids); + tgid = event->comm.pid = atoi(tgids); } } - fclose(fp); - - return tgid; -} - -static pid_t perf_event__synthesize_comm(struct perf_tool *tool, - union perf_event *event, pid_t pid, - int full, - perf_event__handler_t process, - struct machine *machine) -{ - char filename[PATH_MAX]; - size_t size; - DIR *tasks; - struct dirent dirent, *next; - pid_t tgid; - - memset(&event->comm, 0, sizeof(event->comm)); - - tgid = perf_event__get_comm_tgid(pid, event->comm.comm, - sizeof(event->comm.comm)); - if (tgid < 0) - goto out; - - event->comm.pid = tgid; event->comm.header.type = PERF_RECORD_COMM; - - size = strlen(event->comm.comm) + 1; size = ALIGN(size, sizeof(u64)); - memset(event->comm.comm + size, 0, machine->id_hdr_size); + memset(event->comm.comm + size, 0, session->id_hdr_size); event->comm.header.size = (sizeof(event->comm) - (sizeof(event->comm.comm) - size) + - machine->id_hdr_size); + session->id_hdr_size); if (!full) { event->comm.tid = pid; - process(tool, event, &synth_sample, machine); + process(event, &synth_sample, session); goto out; } snprintf(filename, sizeof(filename), "/proc/%d/task", pid); tasks = opendir(filename); - if (tasks == NULL) { - pr_debug("couldn't open %s\n", filename); - return 0; - } + if (tasks == NULL) + goto out_race; while (!readdir_r(tasks, &dirent, &next) && next) { char *end; @@ -137,32 +115,22 @@ static pid_t perf_event__synthesize_comm(struct perf_tool *tool, if (*end) continue; - /* already have tgid; jut want to update the comm */ - (void) perf_event__get_comm_tgid(pid, event->comm.comm, - sizeof(event->comm.comm)); - - size = strlen(event->comm.comm) + 1; - size = ALIGN(size, sizeof(u64)); - memset(event->comm.comm + size, 0, machine->id_hdr_size); - event->comm.header.size = (sizeof(event->comm) - - (sizeof(event->comm.comm) - size) + - machine->id_hdr_size); - event->comm.tid = pid; - process(tool, event, &synth_sample, machine); + process(event, &synth_sample, session); } closedir(tasks); out: + fclose(fp); + return tgid; } -static int perf_event__synthesize_mmap_events(struct perf_tool *tool, - union perf_event *event, +static int perf_event__synthesize_mmap_events(union perf_event *event, pid_t pid, pid_t tgid, perf_event__handler_t process, - struct machine *machine) + struct perf_session *session) { char filename[PATH_MAX]; FILE *fp; @@ -225,12 +193,12 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool, event->mmap.len -= event->mmap.start; event->mmap.header.size = (sizeof(event->mmap) - (sizeof(event->mmap.filename) - size)); - memset(event->mmap.filename + size, 0, machine->id_hdr_size); - event->mmap.header.size += machine->id_hdr_size; + memset(event->mmap.filename + size, 0, session->id_hdr_size); + event->mmap.header.size += session->id_hdr_size; event->mmap.pid = tgid; event->mmap.tid = pid; - process(tool, event, &synth_sample, machine); + process(event, &synth_sample, session); } } @@ -238,14 +206,14 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool, return 0; } -int perf_event__synthesize_modules(struct perf_tool *tool, - perf_event__handler_t process, +int perf_event__synthesize_modules(perf_event__handler_t process, + struct perf_session *session, struct machine *machine) { struct rb_node *nd; struct map_groups *kmaps = &machine->kmaps; union perf_event *event = zalloc((sizeof(event->mmap) + - machine->id_hdr_size)); + session->id_hdr_size)); if (event == NULL) { pr_debug("Not enough memory synthesizing mmap event " "for kernel modules\n"); @@ -275,15 +243,15 @@ int perf_event__synthesize_modules(struct perf_tool *tool, event->mmap.header.type = PERF_RECORD_MMAP; event->mmap.header.size = (sizeof(event->mmap) - (sizeof(event->mmap.filename) - size)); - memset(event->mmap.filename + size, 0, machine->id_hdr_size); - event->mmap.header.size += machine->id_hdr_size; + memset(event->mmap.filename + size, 0, session->id_hdr_size); + event->mmap.header.size += session->id_hdr_size; event->mmap.start = pos->start; event->mmap.len = pos->end - pos->start; event->mmap.pid = machine->pid; memcpy(event->mmap.filename, pos->dso->long_name, pos->dso->long_name_len + 1); - process(tool, event, &synth_sample, machine); + process(event, &synth_sample, session); } free(event); @@ -292,69 +260,40 @@ int perf_event__synthesize_modules(struct perf_tool *tool, static int __event__synthesize_thread(union perf_event *comm_event, union perf_event *mmap_event, - pid_t pid, int full, - perf_event__handler_t process, - struct perf_tool *tool, - struct machine *machine) + pid_t pid, perf_event__handler_t process, + struct perf_session *session) { - pid_t tgid = perf_event__synthesize_comm(tool, comm_event, pid, full, - process, machine); + pid_t tgid = perf_event__synthesize_comm(comm_event, pid, 1, process, + session); if (tgid == -1) return -1; - return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, - process, machine); + return perf_event__synthesize_mmap_events(mmap_event, pid, tgid, + process, session); } -int perf_event__synthesize_thread_map(struct perf_tool *tool, - struct thread_map *threads, +int perf_event__synthesize_thread_map(struct thread_map *threads, perf_event__handler_t process, - struct machine *machine) + struct perf_session *session) { union perf_event *comm_event, *mmap_event; - int err = -1, thread, j; + int err = -1, thread; - comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size); + comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size); if (comm_event == NULL) goto out; - mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size); + mmap_event = malloc(sizeof(mmap_event->mmap) + session->id_hdr_size); if (mmap_event == NULL) goto out_free_comm; err = 0; for (thread = 0; thread < threads->nr; ++thread) { if (__event__synthesize_thread(comm_event, mmap_event, - threads->map[thread], 0, - process, tool, machine)) { + threads->map[thread], + process, session)) { err = -1; break; } - - /* - * comm.pid is set to thread group id by - * perf_event__synthesize_comm - */ - if ((int) comm_event->comm.pid != threads->map[thread]) { - bool need_leader = true; - - /* is thread group leader in thread_map? */ - for (j = 0; j < threads->nr; ++j) { - if ((int) comm_event->comm.pid == threads->map[j]) { - need_leader = false; - break; - } - } - - /* if not, generate events for it */ - if (need_leader && - __event__synthesize_thread(comm_event, - mmap_event, - comm_event->comm.pid, 0, - process, tool, machine)) { - err = -1; - break; - } - } } free(mmap_event); out_free_comm: @@ -363,20 +302,19 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool, return err; } -int perf_event__synthesize_threads(struct perf_tool *tool, - perf_event__handler_t process, - struct machine *machine) +int perf_event__synthesize_threads(perf_event__handler_t process, + struct perf_session *session) { DIR *proc; struct dirent dirent, *next; union perf_event *comm_event, *mmap_event; int err = -1; - comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size); + comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size); if (comm_event == NULL) goto out; - mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size); + mmap_event = malloc(sizeof(mmap_event->mmap) + session->id_hdr_size); if (mmap_event == NULL) goto out_free_comm; @@ -391,8 +329,8 @@ int perf_event__synthesize_threads(struct perf_tool *tool, if (*end) /* only interested in proper numerical dirents */ continue; - __event__synthesize_thread(comm_event, mmap_event, pid, 1, - process, tool, machine); + __event__synthesize_thread(comm_event, mmap_event, pid, + process, session); } closedir(proc); @@ -427,8 +365,8 @@ static int find_symbol_cb(void *arg, const char *name, char type, return 1; } -int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, - perf_event__handler_t process, +int perf_event__synthesize_kernel_mmap(perf_event__handler_t process, + struct perf_session *session, struct machine *machine, const char *symbol_name) { @@ -445,7 +383,7 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, */ struct process_symbol_args args = { .name = symbol_name, }; union perf_event *event = zalloc((sizeof(event->mmap) + - machine->id_hdr_size)); + session->id_hdr_size)); if (event == NULL) { pr_debug("Not enough memory synthesizing mmap event " "for kernel modules\n"); @@ -479,32 +417,25 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, size = ALIGN(size, sizeof(u64)); event->mmap.header.type = PERF_RECORD_MMAP; event->mmap.header.size = (sizeof(event->mmap) - - (sizeof(event->mmap.filename) - size) + machine->id_hdr_size); + (sizeof(event->mmap.filename) - size) + session->id_hdr_size); event->mmap.pgoff = args.start; event->mmap.start = map->start; event->mmap.len = map->end - event->mmap.start; event->mmap.pid = machine->pid; - err = process(tool, event, &synth_sample, machine); + err = process(event, &synth_sample, session); free(event); return err; } -size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp) -{ - return fprintf(fp, ": %s:%d\n", event->comm.comm, event->comm.tid); -} - -int perf_event__process_comm(struct perf_tool *tool __used, - union perf_event *event, +int perf_event__process_comm(union perf_event *event, struct perf_sample *sample __used, - struct machine *machine) + struct perf_session *session) { - struct thread *thread = machine__findnew_thread(machine, event->comm.tid); + struct thread *thread = perf_session__findnew(session, event->comm.tid); - if (dump_trace) - perf_event__fprintf_comm(event, stdout); + dump_printf(": %s:%d\n", event->comm.comm, event->comm.tid); if (thread == NULL || thread__set_comm(thread, event->comm.comm)) { dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); @@ -514,13 +445,13 @@ int perf_event__process_comm(struct perf_tool *tool __used, return 0; } -int perf_event__process_lost(struct perf_tool *tool __used, - union perf_event *event, +int perf_event__process_lost(union perf_event *event, struct perf_sample *sample __used, - struct machine *machine __used) + struct perf_session *session) { dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n", event->lost.id, event->lost.lost); + session->hists.stats.total_lost += event->lost.lost; return 0; } @@ -537,15 +468,21 @@ static void perf_event__set_kernel_mmap_len(union perf_event *event, maps[MAP__FUNCTION]->end = ~0ULL; } -static int perf_event__process_kernel_mmap(struct perf_tool *tool __used, - union perf_event *event, - struct machine *machine) +static int perf_event__process_kernel_mmap(union perf_event *event, + struct perf_session *session) { struct map *map; char kmmap_prefix[PATH_MAX]; + struct machine *machine; enum dso_kernel_type kernel_type; bool is_kernel_mmap; + machine = perf_session__findnew_machine(session, event->mmap.pid); + if (!machine) { + pr_err("Can't find id %d's machine\n", event->mmap.pid); + goto out_problem; + } + machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix)); if (machine__is_host(machine)) kernel_type = DSO_TYPE_KERNEL; @@ -612,9 +549,9 @@ static int perf_event__process_kernel_mmap(struct perf_tool *tool __used, * time /proc/sys/kernel/kptr_restrict was non zero. */ if (event->mmap.pgoff != 0) { - maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, - symbol_name, - event->mmap.pgoff); + perf_session__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, + symbol_name, + event->mmap.pgoff); } if (machine__is_default_guest(machine)) { @@ -630,35 +567,32 @@ static int perf_event__process_kernel_mmap(struct perf_tool *tool __used, return -1; } -size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp) -{ - return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %s\n", - event->mmap.pid, event->mmap.tid, event->mmap.start, - event->mmap.len, event->mmap.pgoff, event->mmap.filename); -} - -int perf_event__process_mmap(struct perf_tool *tool, - union perf_event *event, +int perf_event__process_mmap(union perf_event *event, struct perf_sample *sample __used, - struct machine *machine) + struct perf_session *session) { + struct machine *machine; struct thread *thread; struct map *map; u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; int ret = 0; - if (dump_trace) - perf_event__fprintf_mmap(event, stdout); + dump_printf(" %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %s\n", + event->mmap.pid, event->mmap.tid, event->mmap.start, + event->mmap.len, event->mmap.pgoff, event->mmap.filename); if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL || cpumode == PERF_RECORD_MISC_KERNEL) { - ret = perf_event__process_kernel_mmap(tool, event, machine); + ret = perf_event__process_kernel_mmap(event, session); if (ret < 0) goto out_problem; return 0; } - thread = machine__findnew_thread(machine, event->mmap.pid); + machine = perf_session__find_host_machine(session); + if (machine == NULL) + goto out_problem; + thread = perf_session__findnew(session, event->mmap.pid); if (thread == NULL) goto out_problem; map = map__new(&machine->user_dsos, event->mmap.start, @@ -676,26 +610,18 @@ int perf_event__process_mmap(struct perf_tool *tool, return 0; } -size_t perf_event__fprintf_task(union perf_event *event, FILE *fp) -{ - return fprintf(fp, "(%d:%d):(%d:%d)\n", - event->fork.pid, event->fork.tid, - event->fork.ppid, event->fork.ptid); -} - -int perf_event__process_task(struct perf_tool *tool __used, - union perf_event *event, +int perf_event__process_task(union perf_event *event, struct perf_sample *sample __used, - struct machine *machine) + struct perf_session *session) { - struct thread *thread = machine__findnew_thread(machine, event->fork.tid); - struct thread *parent = machine__findnew_thread(machine, event->fork.ptid); + struct thread *thread = perf_session__findnew(session, event->fork.tid); + struct thread *parent = perf_session__findnew(session, event->fork.ptid); - if (dump_trace) - perf_event__fprintf_task(event, stdout); + dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid, + event->fork.ppid, event->fork.ptid); if (event->header.type == PERF_RECORD_EXIT) { - machine__remove_thread(machine, thread); + perf_session__remove_thread(session, thread); return 0; } @@ -708,45 +634,22 @@ int perf_event__process_task(struct perf_tool *tool __used, return 0; } -size_t perf_event__fprintf(union perf_event *event, FILE *fp) -{ - size_t ret = fprintf(fp, "PERF_RECORD_%s", - perf_event__name(event->header.type)); - - switch (event->header.type) { - case PERF_RECORD_COMM: - ret += perf_event__fprintf_comm(event, fp); - break; - case PERF_RECORD_FORK: - case PERF_RECORD_EXIT: - ret += perf_event__fprintf_task(event, fp); - break; - case PERF_RECORD_MMAP: - ret += perf_event__fprintf_mmap(event, fp); - break; - default: - ret += fprintf(fp, "\n"); - } - - return ret; -} - -int perf_event__process(struct perf_tool *tool, union perf_event *event, - struct perf_sample *sample, struct machine *machine) +int perf_event__process(union perf_event *event, struct perf_sample *sample, + struct perf_session *session) { switch (event->header.type) { case PERF_RECORD_COMM: - perf_event__process_comm(tool, event, sample, machine); + perf_event__process_comm(event, sample, session); break; case PERF_RECORD_MMAP: - perf_event__process_mmap(tool, event, sample, machine); + perf_event__process_mmap(event, sample, session); break; case PERF_RECORD_FORK: case PERF_RECORD_EXIT: - perf_event__process_task(tool, event, sample, machine); + perf_event__process_task(event, sample, session); break; case PERF_RECORD_LOST: - perf_event__process_lost(tool, event, sample, machine); + perf_event__process_lost(event, sample, session); default: break; } @@ -755,29 +658,36 @@ int perf_event__process(struct perf_tool *tool, union perf_event *event, } void thread__find_addr_map(struct thread *self, - struct machine *machine, u8 cpumode, - enum map_type type, u64 addr, + struct perf_session *session, u8 cpumode, + enum map_type type, pid_t pid, u64 addr, struct addr_location *al) { struct map_groups *mg = &self->mg; + struct machine *machine = NULL; al->thread = self; al->addr = addr; al->cpumode = cpumode; al->filtered = false; - if (machine == NULL) { - al->map = NULL; - return; - } - if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) { al->level = 'k'; + machine = perf_session__find_host_machine(session); + if (machine == NULL) { + al->map = NULL; + return; + } mg = &machine->kmaps; } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) { al->level = '.'; + machine = perf_session__find_host_machine(session); } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) { al->level = 'g'; + machine = perf_session__find_machine(session, pid); + if (machine == NULL) { + al->map = NULL; + return; + } mg = &machine->kmaps; } else { /* @@ -823,12 +733,13 @@ void thread__find_addr_map(struct thread *self, al->addr = al->map->map_ip(al->map, al->addr); } -void thread__find_addr_location(struct thread *thread, struct machine *machine, - u8 cpumode, enum map_type type, u64 addr, +void thread__find_addr_location(struct thread *self, + struct perf_session *session, u8 cpumode, + enum map_type type, pid_t pid, u64 addr, struct addr_location *al, symbol_filter_t filter) { - thread__find_addr_map(thread, machine, cpumode, type, addr, al); + thread__find_addr_map(self, session, cpumode, type, pid, addr, al); if (al->map != NULL) al->sym = map__find_symbol(al->map, al->addr, filter); else @@ -836,13 +747,13 @@ void thread__find_addr_location(struct thread *thread, struct machine *machine, } int perf_event__preprocess_sample(const union perf_event *event, - struct machine *machine, + struct perf_session *session, struct addr_location *al, struct perf_sample *sample, symbol_filter_t filter) { u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; - struct thread *thread = machine__findnew_thread(machine, event->ip.pid); + struct thread *thread = perf_session__findnew(session, event->ip.pid); if (thread == NULL) return -1; @@ -853,18 +764,18 @@ int perf_event__preprocess_sample(const union perf_event *event, dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); /* - * Have we already created the kernel maps for this machine? + * Have we already created the kernel maps for the host machine? * * This should have happened earlier, when we processed the kernel MMAP * events, but for older perf.data files there was no such thing, so do * it now. */ if (cpumode == PERF_RECORD_MISC_KERNEL && - machine->vmlinux_maps[MAP__FUNCTION] == NULL) - machine__create_kernel_maps(machine); + session->host_machine.vmlinux_maps[MAP__FUNCTION] == NULL) + machine__create_kernel_maps(&session->host_machine); - thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION, - event->ip.ip, al); + thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION, + event->ip.pid, event->ip.ip, al); dump_printf(" ...... dso: %s\n", al->map ? al->map->dso->long_name : al->level == 'H' ? "[hypervisor]" : ""); @@ -872,14 +783,13 @@ int perf_event__preprocess_sample(const union perf_event *event, al->cpu = sample->cpu; if (al->map) { - struct dso *dso = al->map->dso; - if (symbol_conf.dso_list && - (!dso || !(strlist__has_entry(symbol_conf.dso_list, - dso->short_name) || - (dso->short_name != dso->long_name && - strlist__has_entry(symbol_conf.dso_list, - dso->long_name))))) + (!al->map || !al->map->dso || + !(strlist__has_entry(symbol_conf.dso_list, + al->map->dso->short_name) || + (al->map->dso->short_name != al->map->dso->long_name && + strlist__has_entry(symbol_conf.dso_list, + al->map->dso->long_name))))) goto out_filtered; al->sym = map__find_symbol(al->map, al->addr, filter); diff --git a/trunk/tools/perf/util/event.h b/trunk/tools/perf/util/event.h index cbdeaad9c5e5..357a85b85248 100644 --- a/trunk/tools/perf/util/event.h +++ b/trunk/tools/perf/util/event.h @@ -2,7 +2,6 @@ #define __PERF_RECORD_H #include -#include #include "../perf.h" #include "map.h" @@ -142,54 +141,43 @@ union perf_event { void perf_event__print_totals(void); -struct perf_tool; +struct perf_session; struct thread_map; -typedef int (*perf_event__handler_t)(struct perf_tool *tool, - union perf_event *event, +typedef int (*perf_event__handler_synth_t)(union perf_event *event, + struct perf_session *session); +typedef int (*perf_event__handler_t)(union perf_event *event, struct perf_sample *sample, - struct machine *machine); + struct perf_session *session); -int perf_event__synthesize_thread_map(struct perf_tool *tool, - struct thread_map *threads, +int perf_event__synthesize_thread_map(struct thread_map *threads, perf_event__handler_t process, - struct machine *machine); -int perf_event__synthesize_threads(struct perf_tool *tool, - perf_event__handler_t process, - struct machine *machine); -int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, - perf_event__handler_t process, + struct perf_session *session); +int perf_event__synthesize_threads(perf_event__handler_t process, + struct perf_session *session); +int perf_event__synthesize_kernel_mmap(perf_event__handler_t process, + struct perf_session *session, struct machine *machine, const char *symbol_name); -int perf_event__synthesize_modules(struct perf_tool *tool, - perf_event__handler_t process, +int perf_event__synthesize_modules(perf_event__handler_t process, + struct perf_session *session, struct machine *machine); -int perf_event__process_comm(struct perf_tool *tool, - union perf_event *event, - struct perf_sample *sample, - struct machine *machine); -int perf_event__process_lost(struct perf_tool *tool, - union perf_event *event, - struct perf_sample *sample, - struct machine *machine); -int perf_event__process_mmap(struct perf_tool *tool, - union perf_event *event, - struct perf_sample *sample, - struct machine *machine); -int perf_event__process_task(struct perf_tool *tool, - union perf_event *event, - struct perf_sample *sample, - struct machine *machine); -int perf_event__process(struct perf_tool *tool, - union perf_event *event, - struct perf_sample *sample, - struct machine *machine); +int perf_event__process_comm(union perf_event *event, struct perf_sample *sample, + struct perf_session *session); +int perf_event__process_lost(union perf_event *event, struct perf_sample *sample, + struct perf_session *session); +int perf_event__process_mmap(union perf_event *event, struct perf_sample *sample, + struct perf_session *session); +int perf_event__process_task(union perf_event *event, struct perf_sample *sample, + struct perf_session *session); +int perf_event__process(union perf_event *event, struct perf_sample *sample, + struct perf_session *session); struct addr_location; int perf_event__preprocess_sample(const union perf_event *self, - struct machine *machine, + struct perf_session *session, struct addr_location *al, struct perf_sample *sample, symbol_filter_t filter); @@ -199,13 +187,5 @@ const char *perf_event__name(unsigned int id); int perf_event__parse_sample(const union perf_event *event, u64 type, int sample_size, bool sample_id_all, struct perf_sample *sample, bool swapped); -int perf_event__synthesize_sample(union perf_event *event, u64 type, - const struct perf_sample *sample, - bool swapped); - -size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp); -size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp); -size_t perf_event__fprintf_task(union perf_event *event, FILE *fp); -size_t perf_event__fprintf(union perf_event *event, FILE *fp); #endif /* __PERF_RECORD_H */ diff --git a/trunk/tools/perf/util/evlist.c b/trunk/tools/perf/util/evlist.c index fa1837088ca8..fbb4b4ab9cc6 100644 --- a/trunk/tools/perf/util/evlist.c +++ b/trunk/tools/perf/util/evlist.c @@ -6,16 +6,12 @@ * * Released under the GPL v2. (and only v2, not any later version) */ -#include "util.h" -#include "debugfs.h" #include #include "cpumap.h" #include "thread_map.h" #include "evlist.h" #include "evsel.h" -#include - -#include "parse-events.h" +#include "util.h" #include @@ -34,7 +30,6 @@ void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, INIT_HLIST_HEAD(&evlist->heads[i]); INIT_LIST_HEAD(&evlist->entries); perf_evlist__set_maps(evlist, cpus, threads); - evlist->workload.pid = -1; } struct perf_evlist *perf_evlist__new(struct cpu_map *cpus, @@ -48,22 +43,6 @@ struct perf_evlist *perf_evlist__new(struct cpu_map *cpus, return evlist; } -void perf_evlist__config_attrs(struct perf_evlist *evlist, - struct perf_record_opts *opts) -{ - struct perf_evsel *evsel; - - if (evlist->cpus->map[0] < 0) - opts->no_inherit = true; - - list_for_each_entry(evsel, &evlist->entries, node) { - perf_evsel__config(evsel, opts); - - if (evlist->nr_entries > 1) - evsel->attr.sample_type |= PERF_SAMPLE_ID; - } -} - static void perf_evlist__purge(struct perf_evlist *evlist) { struct perf_evsel *pos, *n; @@ -97,14 +76,6 @@ void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) ++evlist->nr_entries; } -static void perf_evlist__splice_list_tail(struct perf_evlist *evlist, - struct list_head *list, - int nr_entries) -{ - list_splice_tail(list, &evlist->entries); - evlist->nr_entries += nr_entries; -} - int perf_evlist__add_default(struct perf_evlist *evlist) { struct perf_event_attr attr = { @@ -129,126 +100,6 @@ int perf_evlist__add_default(struct perf_evlist *evlist) return -ENOMEM; } -int perf_evlist__add_attrs(struct perf_evlist *evlist, - struct perf_event_attr *attrs, size_t nr_attrs) -{ - struct perf_evsel *evsel, *n; - LIST_HEAD(head); - size_t i; - - for (i = 0; i < nr_attrs; i++) { - evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i); - if (evsel == NULL) - goto out_delete_partial_list; - list_add_tail(&evsel->node, &head); - } - - perf_evlist__splice_list_tail(evlist, &head, nr_attrs); - - return 0; - -out_delete_partial_list: - list_for_each_entry_safe(evsel, n, &head, node) - perf_evsel__delete(evsel); - return -1; -} - -static int trace_event__id(const char *evname) -{ - char *filename, *colon; - int err = -1, fd; - - if (asprintf(&filename, "%s/%s/id", tracing_events_path, evname) < 0) - return -1; - - colon = strrchr(filename, ':'); - if (colon != NULL) - *colon = '/'; - - fd = open(filename, O_RDONLY); - if (fd >= 0) { - char id[16]; - if (read(fd, id, sizeof(id)) > 0) - err = atoi(id); - close(fd); - } - - free(filename); - return err; -} - -int perf_evlist__add_tracepoints(struct perf_evlist *evlist, - const char *tracepoints[], - size_t nr_tracepoints) -{ - int err; - size_t i; - struct perf_event_attr *attrs = zalloc(nr_tracepoints * sizeof(*attrs)); - - if (attrs == NULL) - return -1; - - for (i = 0; i < nr_tracepoints; i++) { - err = trace_event__id(tracepoints[i]); - - if (err < 0) - goto out_free_attrs; - - attrs[i].type = PERF_TYPE_TRACEPOINT; - attrs[i].config = err; - attrs[i].sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | - PERF_SAMPLE_CPU); - attrs[i].sample_period = 1; - } - - err = perf_evlist__add_attrs(evlist, attrs, nr_tracepoints); -out_free_attrs: - free(attrs); - return err; -} - -static struct perf_evsel * - perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id) -{ - struct perf_evsel *evsel; - - list_for_each_entry(evsel, &evlist->entries, node) { - if (evsel->attr.type == PERF_TYPE_TRACEPOINT && - (int)evsel->attr.config == id) - return evsel; - } - - return NULL; -} - -int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist, - const struct perf_evsel_str_handler *assocs, - size_t nr_assocs) -{ - struct perf_evsel *evsel; - int err; - size_t i; - - for (i = 0; i < nr_assocs; i++) { - err = trace_event__id(assocs[i].name); - if (err < 0) - goto out; - - evsel = perf_evlist__find_tracepoint_by_id(evlist, err); - if (evsel == NULL) - continue; - - err = -EEXIST; - if (evsel->handler.func != NULL) - goto out; - evsel->handler.func = assocs[i].handler; - } - - err = 0; -out: - return err; -} - void perf_evlist__disable(struct perf_evlist *evlist) { int cpu, thread; @@ -275,7 +126,7 @@ void perf_evlist__enable(struct perf_evlist *evlist) } } -static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) +int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) { int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries; evlist->pollfd = malloc(sizeof(struct pollfd) * nfds); @@ -431,7 +282,7 @@ void perf_evlist__munmap(struct perf_evlist *evlist) evlist->mmap = NULL; } -static int perf_evlist__alloc_mmap(struct perf_evlist *evlist) +int perf_evlist__alloc_mmap(struct perf_evlist *evlist) { evlist->nr_mmaps = evlist->cpus->nr; if (evlist->cpus->map[0] == -1) @@ -447,10 +298,8 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, evlist->mmap[idx].mask = mask; evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot, MAP_SHARED, fd, 0); - if (evlist->mmap[idx].base == MAP_FAILED) { - evlist->mmap[idx].base = NULL; + if (evlist->mmap[idx].base == MAP_FAILED) return -1; - } perf_evlist__add_pollfd(evlist, fd); return 0; @@ -551,22 +400,14 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, in * * Using perf_evlist__read_on_cpu does this automatically. */ -int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, - bool overwrite) +int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite) { unsigned int page_size = sysconf(_SC_PAGE_SIZE); + int mask = pages * page_size - 1; struct perf_evsel *evsel; const struct cpu_map *cpus = evlist->cpus; const struct thread_map *threads = evlist->threads; - int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask; - - /* 512 kiB: default amount of unprivileged mlocked memory */ - if (pages == UINT_MAX) - pages = (512 * 1024) / page_size; - else if (!is_power_of_2(pages)) - return -EINVAL; - - mask = pages * page_size - 1; + int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE); if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0) return -ENOMEM; @@ -671,38 +512,6 @@ u64 perf_evlist__sample_type(const struct perf_evlist *evlist) return first->attr.sample_type; } -u16 perf_evlist__id_hdr_size(const struct perf_evlist *evlist) -{ - struct perf_evsel *first; - struct perf_sample *data; - u64 sample_type; - u16 size = 0; - - first = list_entry(evlist->entries.next, struct perf_evsel, node); - - if (!first->attr.sample_id_all) - goto out; - - sample_type = first->attr.sample_type; - - if (sample_type & PERF_SAMPLE_TID) - size += sizeof(data->tid) * 2; - - if (sample_type & PERF_SAMPLE_TIME) - size += sizeof(data->time); - - if (sample_type & PERF_SAMPLE_ID) - size += sizeof(data->id); - - if (sample_type & PERF_SAMPLE_STREAM_ID) - size += sizeof(data->stream_id); - - if (sample_type & PERF_SAMPLE_CPU) - size += sizeof(data->cpu) * 2; -out: - return size; -} - bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist) { struct perf_evsel *pos, *first; @@ -760,97 +569,3 @@ int perf_evlist__open(struct perf_evlist *evlist, bool group) return err; } - -int perf_evlist__prepare_workload(struct perf_evlist *evlist, - struct perf_record_opts *opts, - const char *argv[]) -{ - int child_ready_pipe[2], go_pipe[2]; - char bf; - - if (pipe(child_ready_pipe) < 0) { - perror("failed to create 'ready' pipe"); - return -1; - } - - if (pipe(go_pipe) < 0) { - perror("failed to create 'go' pipe"); - goto out_close_ready_pipe; - } - - evlist->workload.pid = fork(); - if (evlist->workload.pid < 0) { - perror("failed to fork"); - goto out_close_pipes; - } - - if (!evlist->workload.pid) { - if (opts->pipe_output) - dup2(2, 1); - - close(child_ready_pipe[0]); - close(go_pipe[1]); - fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC); - - /* - * Do a dummy execvp to get the PLT entry resolved, - * so we avoid the resolver overhead on the real - * execvp call. - */ - execvp("", (char **)argv); - - /* - * Tell the parent we're ready to go - */ - close(child_ready_pipe[1]); - - /* - * Wait until the parent tells us to go. - */ - if (read(go_pipe[0], &bf, 1) == -1) - perror("unable to read pipe"); - - execvp(argv[0], (char **)argv); - - perror(argv[0]); - kill(getppid(), SIGUSR1); - exit(-1); - } - - if (!opts->system_wide && opts->target_tid == -1 && opts->target_pid == -1) - evlist->threads->map[0] = evlist->workload.pid; - - close(child_ready_pipe[1]); - close(go_pipe[0]); - /* - * wait for child to settle - */ - if (read(child_ready_pipe[0], &bf, 1) == -1) { - perror("unable to read pipe"); - goto out_close_pipes; - } - - evlist->workload.cork_fd = go_pipe[1]; - close(child_ready_pipe[0]); - return 0; - -out_close_pipes: - close(go_pipe[0]); - close(go_pipe[1]); -out_close_ready_pipe: - close(child_ready_pipe[0]); - close(child_ready_pipe[1]); - return -1; -} - -int perf_evlist__start_workload(struct perf_evlist *evlist) -{ - if (evlist->workload.cork_fd > 0) { - /* - * Remove the cork, let it rip! - */ - return close(evlist->workload.cork_fd); - } - - return 0; -} diff --git a/trunk/tools/perf/util/evlist.h b/trunk/tools/perf/util/evlist.h index 8922aeed0467..1779ffef7828 100644 --- a/trunk/tools/perf/util/evlist.h +++ b/trunk/tools/perf/util/evlist.h @@ -2,16 +2,12 @@ #define __PERF_EVLIST_H 1 #include -#include #include "../perf.h" #include "event.h" -#include "util.h" -#include struct pollfd; struct thread_map; struct cpu_map; -struct perf_record_opts; #define PERF_EVLIST__HLIST_BITS 8 #define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS) @@ -23,10 +19,6 @@ struct perf_evlist { int nr_fds; int nr_mmaps; int mmap_len; - struct { - int cork_fd; - pid_t pid; - } workload; bool overwrite; union perf_event event_copy; struct perf_mmap *mmap; @@ -36,11 +28,6 @@ struct perf_evlist { struct perf_evsel *selected; }; -struct perf_evsel_str_handler { - const char *name; - void *handler; -}; - struct perf_evsel; struct perf_evlist *perf_evlist__new(struct cpu_map *cpus, @@ -52,26 +39,11 @@ void perf_evlist__delete(struct perf_evlist *evlist); void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry); int perf_evlist__add_default(struct perf_evlist *evlist); -int perf_evlist__add_attrs(struct perf_evlist *evlist, - struct perf_event_attr *attrs, size_t nr_attrs); -int perf_evlist__add_tracepoints(struct perf_evlist *evlist, - const char *tracepoints[], size_t nr_tracepoints); -int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist, - const struct perf_evsel_str_handler *assocs, - size_t nr_assocs); - -#define perf_evlist__add_attrs_array(evlist, array) \ - perf_evlist__add_attrs(evlist, array, ARRAY_SIZE(array)) - -#define perf_evlist__add_tracepoints_array(evlist, array) \ - perf_evlist__add_tracepoints(evlist, array, ARRAY_SIZE(array)) - -#define perf_evlist__set_tracepoints_handlers_array(evlist, array) \ - perf_evlist__set_tracepoints_handlers(evlist, array, ARRAY_SIZE(array)) void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel, int cpu, int thread, u64 id); +int perf_evlist__alloc_pollfd(struct perf_evlist *evlist); void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd); struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id); @@ -80,16 +52,8 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx); int perf_evlist__open(struct perf_evlist *evlist, bool group); -void perf_evlist__config_attrs(struct perf_evlist *evlist, - struct perf_record_opts *opts); - -int perf_evlist__prepare_workload(struct perf_evlist *evlist, - struct perf_record_opts *opts, - const char *argv[]); -int perf_evlist__start_workload(struct perf_evlist *evlist); - -int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, - bool overwrite); +int perf_evlist__alloc_mmap(struct perf_evlist *evlist); +int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite); void perf_evlist__munmap(struct perf_evlist *evlist); void perf_evlist__disable(struct perf_evlist *evlist); @@ -113,7 +77,6 @@ int perf_evlist__set_filters(struct perf_evlist *evlist); u64 perf_evlist__sample_type(const struct perf_evlist *evlist); bool perf_evlist__sample_id_all(const const struct perf_evlist *evlist); -u16 perf_evlist__id_hdr_size(const struct perf_evlist *evlist); bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist); bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist); diff --git a/trunk/tools/perf/util/evsel.c b/trunk/tools/perf/util/evsel.c index 667f3b78bb2c..d7915d4e77cb 100644 --- a/trunk/tools/perf/util/evsel.c +++ b/trunk/tools/perf/util/evsel.c @@ -63,79 +63,6 @@ struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx) return evsel; } -void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts) -{ - struct perf_event_attr *attr = &evsel->attr; - int track = !evsel->idx; /* only the first counter needs these */ - - attr->sample_id_all = opts->sample_id_all_avail ? 1 : 0; - attr->inherit = !opts->no_inherit; - attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | - PERF_FORMAT_TOTAL_TIME_RUNNING | - PERF_FORMAT_ID; - - attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID; - - /* - * We default some events to a 1 default interval. But keep - * it a weak assumption overridable by the user. - */ - if (!attr->sample_period || (opts->user_freq != UINT_MAX && - opts->user_interval != ULLONG_MAX)) { - if (opts->freq) { - attr->sample_type |= PERF_SAMPLE_PERIOD; - attr->freq = 1; - attr->sample_freq = opts->freq; - } else { - attr->sample_period = opts->default_interval; - } - } - - if (opts->no_samples) - attr->sample_freq = 0; - - if (opts->inherit_stat) - attr->inherit_stat = 1; - - if (opts->sample_address) { - attr->sample_type |= PERF_SAMPLE_ADDR; - attr->mmap_data = track; - } - - if (opts->call_graph) - attr->sample_type |= PERF_SAMPLE_CALLCHAIN; - - if (opts->system_wide) - attr->sample_type |= PERF_SAMPLE_CPU; - - if (opts->period) - attr->sample_type |= PERF_SAMPLE_PERIOD; - - if (opts->sample_id_all_avail && - (opts->sample_time || opts->system_wide || - !opts->no_inherit || opts->cpu_list)) - attr->sample_type |= PERF_SAMPLE_TIME; - - if (opts->raw_samples) { - attr->sample_type |= PERF_SAMPLE_TIME; - attr->sample_type |= PERF_SAMPLE_RAW; - attr->sample_type |= PERF_SAMPLE_CPU; - } - - if (opts->no_delay) { - attr->watermark = 0; - attr->wakeup_events = 1; - } - - attr->mmap = track; - attr->comm = track; - - if (opts->target_pid == -1 && opts->target_tid == -1 && !opts->system_wide) { - attr->disabled = 1; - attr->enable_on_exec = 1; - } -} - int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) { int cpu, thread; @@ -460,7 +387,7 @@ int perf_event__parse_sample(const union perf_event *event, u64 type, u32 val32[2]; } u; - memset(data, 0, sizeof(*data)); + data->cpu = data->pid = data->tid = -1; data->stream_id = data->id = data->time = -1ULL; @@ -577,82 +504,3 @@ int perf_event__parse_sample(const union perf_event *event, u64 type, return 0; } - -int perf_event__synthesize_sample(union perf_event *event, u64 type, - const struct perf_sample *sample, - bool swapped) -{ - u64 *array; - - /* - * used for cross-endian analysis. See git commit 65014ab3 - * for why this goofiness is needed. - */ - union { - u64 val64; - u32 val32[2]; - } u; - - array = event->sample.array; - - if (type & PERF_SAMPLE_IP) { - event->ip.ip = sample->ip; - array++; - } - - if (type & PERF_SAMPLE_TID) { - u.val32[0] = sample->pid; - u.val32[1] = sample->tid; - if (swapped) { - /* - * Inverse of what is done in perf_event__parse_sample - */ - u.val32[0] = bswap_32(u.val32[0]); - u.val32[1] = bswap_32(u.val32[1]); - u.val64 = bswap_64(u.val64); - } - - *array = u.val64; - array++; - } - - if (type & PERF_SAMPLE_TIME) { - *array = sample->time; - array++; - } - - if (type & PERF_SAMPLE_ADDR) { - *array = sample->addr; - array++; - } - - if (type & PERF_SAMPLE_ID) { - *array = sample->id; - array++; - } - - if (type & PERF_SAMPLE_STREAM_ID) { - *array = sample->stream_id; - array++; - } - - if (type & PERF_SAMPLE_CPU) { - u.val32[0] = sample->cpu; - if (swapped) { - /* - * Inverse of what is done in perf_event__parse_sample - */ - u.val32[0] = bswap_32(u.val32[0]); - u.val64 = bswap_64(u.val64); - } - *array = u.val64; - array++; - } - - if (type & PERF_SAMPLE_PERIOD) { - *array = sample->period; - array++; - } - - return 0; -} diff --git a/trunk/tools/perf/util/evsel.h b/trunk/tools/perf/util/evsel.h index 326b8e4d5035..b1d15e6f7ae3 100644 --- a/trunk/tools/perf/util/evsel.h +++ b/trunk/tools/perf/util/evsel.h @@ -61,17 +61,12 @@ struct perf_evsel { off_t id_offset; }; struct cgroup_sel *cgrp; - struct { - void *func; - void *data; - } handler; bool supported; }; struct cpu_map; struct thread_map; struct perf_evlist; -struct perf_record_opts; struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx); void perf_evsel__init(struct perf_evsel *evsel, @@ -79,9 +74,6 @@ void perf_evsel__init(struct perf_evsel *evsel, void perf_evsel__exit(struct perf_evsel *evsel); void perf_evsel__delete(struct perf_evsel *evsel); -void perf_evsel__config(struct perf_evsel *evsel, - struct perf_record_opts *opts); - int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads); int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads); int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus); diff --git a/trunk/tools/perf/util/header.c b/trunk/tools/perf/util/header.c index 3e7e0b09c12c..33c17a2b2a81 100644 --- a/trunk/tools/perf/util/header.c +++ b/trunk/tools/perf/util/header.c @@ -8,7 +8,6 @@ #include #include #include -#include #include #include "evlist.h" @@ -29,6 +28,9 @@ static struct perf_trace_event_type *events; static u32 header_argc; static const char **header_argv; +static int dsos__write_buildid_table(struct perf_header *header, int fd); +static int perf_session__cache_build_ids(struct perf_session *session); + int perf_header__push_event(u64 id, const char *name) { if (strlen(name) > MAX_EVENT_NAME) @@ -185,252 +187,6 @@ perf_header__set_cmdline(int argc, const char **argv) return 0; } -#define dsos__for_each_with_build_id(pos, head) \ - list_for_each_entry(pos, head, node) \ - if (!pos->has_build_id) \ - continue; \ - else - -static int __dsos__write_buildid_table(struct list_head *head, pid_t pid, - u16 misc, int fd) -{ - struct dso *pos; - - dsos__for_each_with_build_id(pos, head) { - int err; - struct build_id_event b; - size_t len; - - if (!pos->hit) - continue; - len = pos->long_name_len + 1; - len = ALIGN(len, NAME_ALIGN); - memset(&b, 0, sizeof(b)); - memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id)); - b.pid = pid; - b.header.misc = misc; - b.header.size = sizeof(b) + len; - err = do_write(fd, &b, sizeof(b)); - if (err < 0) - return err; - err = write_padded(fd, pos->long_name, - pos->long_name_len + 1, len); - if (err < 0) - return err; - } - - return 0; -} - -static int machine__write_buildid_table(struct machine *machine, int fd) -{ - int err; - u16 kmisc = PERF_RECORD_MISC_KERNEL, - umisc = PERF_RECORD_MISC_USER; - - if (!machine__is_host(machine)) { - kmisc = PERF_RECORD_MISC_GUEST_KERNEL; - umisc = PERF_RECORD_MISC_GUEST_USER; - } - - err = __dsos__write_buildid_table(&machine->kernel_dsos, machine->pid, - kmisc, fd); - if (err == 0) - err = __dsos__write_buildid_table(&machine->user_dsos, - machine->pid, umisc, fd); - return err; -} - -static int dsos__write_buildid_table(struct perf_header *header, int fd) -{ - struct perf_session *session = container_of(header, - struct perf_session, header); - struct rb_node *nd; - int err = machine__write_buildid_table(&session->host_machine, fd); - - if (err) - return err; - - for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { - struct machine *pos = rb_entry(nd, struct machine, rb_node); - err = machine__write_buildid_table(pos, fd); - if (err) - break; - } - return err; -} - -int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, - const char *name, bool is_kallsyms) -{ - const size_t size = PATH_MAX; - char *realname, *filename = zalloc(size), - *linkname = zalloc(size), *targetname; - int len, err = -1; - - if (is_kallsyms) { - if (symbol_conf.kptr_restrict) { - pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n"); - return 0; - } - realname = (char *)name; - } else - realname = realpath(name, NULL); - - if (realname == NULL || filename == NULL || linkname == NULL) - goto out_free; - - len = snprintf(filename, size, "%s%s%s", - debugdir, is_kallsyms ? "/" : "", realname); - if (mkdir_p(filename, 0755)) - goto out_free; - - snprintf(filename + len, sizeof(filename) - len, "/%s", sbuild_id); - - if (access(filename, F_OK)) { - if (is_kallsyms) { - if (copyfile("/proc/kallsyms", filename)) - goto out_free; - } else if (link(realname, filename) && copyfile(name, filename)) - goto out_free; - } - - len = snprintf(linkname, size, "%s/.build-id/%.2s", - debugdir, sbuild_id); - - if (access(linkname, X_OK) && mkdir_p(linkname, 0755)) - goto out_free; - - snprintf(linkname + len, size - len, "/%s", sbuild_id + 2); - targetname = filename + strlen(debugdir) - 5; - memcpy(targetname, "../..", 5); - - if (symlink(targetname, linkname) == 0) - err = 0; -out_free: - if (!is_kallsyms) - free(realname); - free(filename); - free(linkname); - return err; -} - -static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size, - const char *name, const char *debugdir, - bool is_kallsyms) -{ - char sbuild_id[BUILD_ID_SIZE * 2 + 1]; - - build_id__sprintf(build_id, build_id_size, sbuild_id); - - return build_id_cache__add_s(sbuild_id, debugdir, name, is_kallsyms); -} - -int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir) -{ - const size_t size = PATH_MAX; - char *filename = zalloc(size), - *linkname = zalloc(size); - int err = -1; - - if (filename == NULL || linkname == NULL) - goto out_free; - - snprintf(linkname, size, "%s/.build-id/%.2s/%s", - debugdir, sbuild_id, sbuild_id + 2); - - if (access(linkname, F_OK)) - goto out_free; - - if (readlink(linkname, filename, size - 1) < 0) - goto out_free; - - if (unlink(linkname)) - goto out_free; - - /* - * Since the link is relative, we must make it absolute: - */ - snprintf(linkname, size, "%s/.build-id/%.2s/%s", - debugdir, sbuild_id, filename); - - if (unlink(linkname)) - goto out_free; - - err = 0; -out_free: - free(filename); - free(linkname); - return err; -} - -static int dso__cache_build_id(struct dso *dso, const char *debugdir) -{ - bool is_kallsyms = dso->kernel && dso->long_name[0] != '/'; - - return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), - dso->long_name, debugdir, is_kallsyms); -} - -static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir) -{ - struct dso *pos; - int err = 0; - - dsos__for_each_with_build_id(pos, head) - if (dso__cache_build_id(pos, debugdir)) - err = -1; - - return err; -} - -static int machine__cache_build_ids(struct machine *machine, const char *debugdir) -{ - int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir); - ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir); - return ret; -} - -static int perf_session__cache_build_ids(struct perf_session *session) -{ - struct rb_node *nd; - int ret; - char debugdir[PATH_MAX]; - - snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir); - - if (mkdir(debugdir, 0755) != 0 && errno != EEXIST) - return -1; - - ret = machine__cache_build_ids(&session->host_machine, debugdir); - - for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { - struct machine *pos = rb_entry(nd, struct machine, rb_node); - ret |= machine__cache_build_ids(pos, debugdir); - } - return ret ? -1 : 0; -} - -static bool machine__read_build_ids(struct machine *machine, bool with_hits) -{ - bool ret = __dsos__read_build_ids(&machine->kernel_dsos, with_hits); - ret |= __dsos__read_build_ids(&machine->user_dsos, with_hits); - return ret; -} - -static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits) -{ - struct rb_node *nd; - bool ret = machine__read_build_ids(&session->host_machine, with_hits); - - for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { - struct machine *pos = rb_entry(nd, struct machine, rb_node); - ret |= machine__read_build_ids(pos, with_hits); - } - - return ret; -} - static int write_trace_info(int fd, struct perf_header *h __used, struct perf_evlist *evlist) { @@ -446,9 +202,6 @@ static int write_build_id(int fd, struct perf_header *h, session = container_of(h, struct perf_session, header); - if (!perf_session__read_build_ids(session, true)) - return -1; - err = dsos__write_buildid_table(h, fd); if (err < 0) { pr_debug("failed to write buildid table\n"); @@ -1312,30 +1065,26 @@ struct feature_ops { bool full_only; }; -#define FEAT_OPA(n, func) \ - [n] = { .name = #n, .write = write_##func, .print = print_##func } -#define FEAT_OPF(n, func) \ - [n] = { .name = #n, .write = write_##func, .print = print_##func, .full_only = true } - -/* feature_ops not implemented: */ -#define print_trace_info NULL -#define print_build_id NULL +#define FEAT_OPA(n, w, p) \ + [n] = { .name = #n, .write = w, .print = p } +#define FEAT_OPF(n, w, p) \ + [n] = { .name = #n, .write = w, .print = p, .full_only = true } static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = { - FEAT_OPA(HEADER_TRACE_INFO, trace_info), - FEAT_OPA(HEADER_BUILD_ID, build_id), - FEAT_OPA(HEADER_HOSTNAME, hostname), - FEAT_OPA(HEADER_OSRELEASE, osrelease), - FEAT_OPA(HEADER_VERSION, version), - FEAT_OPA(HEADER_ARCH, arch), - FEAT_OPA(HEADER_NRCPUS, nrcpus), - FEAT_OPA(HEADER_CPUDESC, cpudesc), - FEAT_OPA(HEADER_CPUID, cpuid), - FEAT_OPA(HEADER_TOTAL_MEM, total_mem), - FEAT_OPA(HEADER_EVENT_DESC, event_desc), - FEAT_OPA(HEADER_CMDLINE, cmdline), - FEAT_OPF(HEADER_CPU_TOPOLOGY, cpu_topology), - FEAT_OPF(HEADER_NUMA_TOPOLOGY, numa_topology), + FEAT_OPA(HEADER_TRACE_INFO, write_trace_info, NULL), + FEAT_OPA(HEADER_BUILD_ID, write_build_id, NULL), + FEAT_OPA(HEADER_HOSTNAME, write_hostname, print_hostname), + FEAT_OPA(HEADER_OSRELEASE, write_osrelease, print_osrelease), + FEAT_OPA(HEADER_VERSION, write_version, print_version), + FEAT_OPA(HEADER_ARCH, write_arch, print_arch), + FEAT_OPA(HEADER_NRCPUS, write_nrcpus, print_nrcpus), + FEAT_OPA(HEADER_CPUDESC, write_cpudesc, print_cpudesc), + FEAT_OPA(HEADER_CPUID, write_cpuid, print_cpuid), + FEAT_OPA(HEADER_TOTAL_MEM, write_total_mem, print_total_mem), + FEAT_OPA(HEADER_EVENT_DESC, write_event_desc, print_event_desc), + FEAT_OPA(HEADER_CMDLINE, write_cmdline, print_cmdline), + FEAT_OPF(HEADER_CPU_TOPOLOGY, write_cpu_topology, print_cpu_topology), + FEAT_OPF(HEADER_NUMA_TOPOLOGY, write_numa_topology, print_numa_topology), }; struct header_print_data { @@ -1354,9 +1103,9 @@ static int perf_file_section__fprintf_info(struct perf_file_section *section, "%d, continuing...\n", section->offset, feat); return 0; } - if (feat >= HEADER_LAST_FEATURE) { + if (feat < HEADER_TRACE_INFO || feat >= HEADER_LAST_FEATURE) { pr_warning("unknown feature %d\n", feat); - return 0; + return -1; } if (!feat_ops[feat].print) return 0; @@ -1383,16 +1132,260 @@ int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full) return 0; } -static int do_write_feat(int fd, struct perf_header *h, int type, - struct perf_file_section **p, - struct perf_evlist *evlist) -{ +#define dsos__for_each_with_build_id(pos, head) \ + list_for_each_entry(pos, head, node) \ + if (!pos->has_build_id) \ + continue; \ + else + +static int __dsos__write_buildid_table(struct list_head *head, pid_t pid, + u16 misc, int fd) +{ + struct dso *pos; + + dsos__for_each_with_build_id(pos, head) { + int err; + struct build_id_event b; + size_t len; + + if (!pos->hit) + continue; + len = pos->long_name_len + 1; + len = ALIGN(len, NAME_ALIGN); + memset(&b, 0, sizeof(b)); + memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id)); + b.pid = pid; + b.header.misc = misc; + b.header.size = sizeof(b) + len; + err = do_write(fd, &b, sizeof(b)); + if (err < 0) + return err; + err = write_padded(fd, pos->long_name, + pos->long_name_len + 1, len); + if (err < 0) + return err; + } + + return 0; +} + +static int machine__write_buildid_table(struct machine *machine, int fd) +{ + int err; + u16 kmisc = PERF_RECORD_MISC_KERNEL, + umisc = PERF_RECORD_MISC_USER; + + if (!machine__is_host(machine)) { + kmisc = PERF_RECORD_MISC_GUEST_KERNEL; + umisc = PERF_RECORD_MISC_GUEST_USER; + } + + err = __dsos__write_buildid_table(&machine->kernel_dsos, machine->pid, + kmisc, fd); + if (err == 0) + err = __dsos__write_buildid_table(&machine->user_dsos, + machine->pid, umisc, fd); + return err; +} + +static int dsos__write_buildid_table(struct perf_header *header, int fd) +{ + struct perf_session *session = container_of(header, + struct perf_session, header); + struct rb_node *nd; + int err = machine__write_buildid_table(&session->host_machine, fd); + + if (err) + return err; + + for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { + struct machine *pos = rb_entry(nd, struct machine, rb_node); + err = machine__write_buildid_table(pos, fd); + if (err) + break; + } + return err; +} + +int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, + const char *name, bool is_kallsyms) +{ + const size_t size = PATH_MAX; + char *realname, *filename = zalloc(size), + *linkname = zalloc(size), *targetname; + int len, err = -1; + + if (is_kallsyms) { + if (symbol_conf.kptr_restrict) { + pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n"); + return 0; + } + realname = (char *)name; + } else + realname = realpath(name, NULL); + + if (realname == NULL || filename == NULL || linkname == NULL) + goto out_free; + + len = snprintf(filename, size, "%s%s%s", + debugdir, is_kallsyms ? "/" : "", realname); + if (mkdir_p(filename, 0755)) + goto out_free; + + snprintf(filename + len, sizeof(filename) - len, "/%s", sbuild_id); + + if (access(filename, F_OK)) { + if (is_kallsyms) { + if (copyfile("/proc/kallsyms", filename)) + goto out_free; + } else if (link(realname, filename) && copyfile(name, filename)) + goto out_free; + } + + len = snprintf(linkname, size, "%s/.build-id/%.2s", + debugdir, sbuild_id); + + if (access(linkname, X_OK) && mkdir_p(linkname, 0755)) + goto out_free; + + snprintf(linkname + len, size - len, "/%s", sbuild_id + 2); + targetname = filename + strlen(debugdir) - 5; + memcpy(targetname, "../..", 5); + + if (symlink(targetname, linkname) == 0) + err = 0; +out_free: + if (!is_kallsyms) + free(realname); + free(filename); + free(linkname); + return err; +} + +static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size, + const char *name, const char *debugdir, + bool is_kallsyms) +{ + char sbuild_id[BUILD_ID_SIZE * 2 + 1]; + + build_id__sprintf(build_id, build_id_size, sbuild_id); + + return build_id_cache__add_s(sbuild_id, debugdir, name, is_kallsyms); +} + +int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir) +{ + const size_t size = PATH_MAX; + char *filename = zalloc(size), + *linkname = zalloc(size); + int err = -1; + + if (filename == NULL || linkname == NULL) + goto out_free; + + snprintf(linkname, size, "%s/.build-id/%.2s/%s", + debugdir, sbuild_id, sbuild_id + 2); + + if (access(linkname, F_OK)) + goto out_free; + + if (readlink(linkname, filename, size - 1) < 0) + goto out_free; + + if (unlink(linkname)) + goto out_free; + + /* + * Since the link is relative, we must make it absolute: + */ + snprintf(linkname, size, "%s/.build-id/%.2s/%s", + debugdir, sbuild_id, filename); + + if (unlink(linkname)) + goto out_free; + + err = 0; +out_free: + free(filename); + free(linkname); + return err; +} + +static int dso__cache_build_id(struct dso *dso, const char *debugdir) +{ + bool is_kallsyms = dso->kernel && dso->long_name[0] != '/'; + + return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), + dso->long_name, debugdir, is_kallsyms); +} + +static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir) +{ + struct dso *pos; + int err = 0; + + dsos__for_each_with_build_id(pos, head) + if (dso__cache_build_id(pos, debugdir)) + err = -1; + + return err; +} + +static int machine__cache_build_ids(struct machine *machine, const char *debugdir) +{ + int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir); + ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir); + return ret; +} + +static int perf_session__cache_build_ids(struct perf_session *session) +{ + struct rb_node *nd; + int ret; + char debugdir[PATH_MAX]; + + snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir); + + if (mkdir(debugdir, 0755) != 0 && errno != EEXIST) + return -1; + + ret = machine__cache_build_ids(&session->host_machine, debugdir); + + for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { + struct machine *pos = rb_entry(nd, struct machine, rb_node); + ret |= machine__cache_build_ids(pos, debugdir); + } + return ret ? -1 : 0; +} + +static bool machine__read_build_ids(struct machine *machine, bool with_hits) +{ + bool ret = __dsos__read_build_ids(&machine->kernel_dsos, with_hits); + ret |= __dsos__read_build_ids(&machine->user_dsos, with_hits); + return ret; +} + +static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits) +{ + struct rb_node *nd; + bool ret = machine__read_build_ids(&session->host_machine, with_hits); + + for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { + struct machine *pos = rb_entry(nd, struct machine, rb_node); + ret |= machine__read_build_ids(pos, with_hits); + } + + return ret; +} + +static int do_write_feat(int fd, struct perf_header *h, int type, + struct perf_file_section **p, + struct perf_evlist *evlist) +{ int err; int ret = 0; if (perf_header__has_feat(h, type)) { - if (!feat_ops[type].write) - return -1; (*p)->offset = lseek(fd, 0, SEEK_CUR); @@ -1415,12 +1408,18 @@ static int perf_header__adds_write(struct perf_header *header, struct perf_evlist *evlist, int fd) { int nr_sections; + struct perf_session *session; struct perf_file_section *feat_sec, *p; int sec_size; u64 sec_start; - int feat; int err; + session = container_of(header, struct perf_session, header); + + if (perf_header__has_feat(header, HEADER_BUILD_ID && + !perf_session__read_build_ids(session, true))) + perf_header__clear_feat(header, HEADER_BUILD_ID); + nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS); if (!nr_sections) return 0; @@ -1434,11 +1433,64 @@ static int perf_header__adds_write(struct perf_header *header, sec_start = header->data_offset + header->data_size; lseek(fd, sec_start + sec_size, SEEK_SET); - for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) { - if (do_write_feat(fd, header, feat, &p, evlist)) - perf_header__clear_feat(header, feat); + err = do_write_feat(fd, header, HEADER_TRACE_INFO, &p, evlist); + if (err) + goto out_free; + + err = do_write_feat(fd, header, HEADER_BUILD_ID, &p, evlist); + if (err) { + perf_header__clear_feat(header, HEADER_BUILD_ID); + goto out_free; } + err = do_write_feat(fd, header, HEADER_HOSTNAME, &p, evlist); + if (err) + perf_header__clear_feat(header, HEADER_HOSTNAME); + + err = do_write_feat(fd, header, HEADER_OSRELEASE, &p, evlist); + if (err) + perf_header__clear_feat(header, HEADER_OSRELEASE); + + err = do_write_feat(fd, header, HEADER_VERSION, &p, evlist); + if (err) + perf_header__clear_feat(header, HEADER_VERSION); + + err = do_write_feat(fd, header, HEADER_ARCH, &p, evlist); + if (err) + perf_header__clear_feat(header, HEADER_ARCH); + + err = do_write_feat(fd, header, HEADER_NRCPUS, &p, evlist); + if (err) + perf_header__clear_feat(header, HEADER_NRCPUS); + + err = do_write_feat(fd, header, HEADER_CPUDESC, &p, evlist); + if (err) + perf_header__clear_feat(header, HEADER_CPUDESC); + + err = do_write_feat(fd, header, HEADER_CPUID, &p, evlist); + if (err) + perf_header__clear_feat(header, HEADER_CPUID); + + err = do_write_feat(fd, header, HEADER_TOTAL_MEM, &p, evlist); + if (err) + perf_header__clear_feat(header, HEADER_TOTAL_MEM); + + err = do_write_feat(fd, header, HEADER_CMDLINE, &p, evlist); + if (err) + perf_header__clear_feat(header, HEADER_CMDLINE); + + err = do_write_feat(fd, header, HEADER_EVENT_DESC, &p, evlist); + if (err) + perf_header__clear_feat(header, HEADER_EVENT_DESC); + + err = do_write_feat(fd, header, HEADER_CPU_TOPOLOGY, &p, evlist); + if (err) + perf_header__clear_feat(header, HEADER_CPU_TOPOLOGY); + + err = do_write_feat(fd, header, HEADER_NUMA_TOPOLOGY, &p, evlist); + if (err) + perf_header__clear_feat(header, HEADER_NUMA_TOPOLOGY); + lseek(fd, sec_start, SEEK_SET); /* * may write more than needed due to dropped feature, but @@ -1447,6 +1499,7 @@ static int perf_header__adds_write(struct perf_header *header, err = do_write(fd, feat_sec, sec_size); if (err < 0) pr_debug("failed to write feature section\n"); +out_free: free(feat_sec); return err; } @@ -1584,20 +1637,20 @@ static int perf_header__getbuffer64(struct perf_header *header, int perf_header__process_sections(struct perf_header *header, int fd, void *data, int (*process)(struct perf_file_section *section, - struct perf_header *ph, - int feat, int fd, void *data)) + struct perf_header *ph, + int feat, int fd, void *data)) { - struct perf_file_section *feat_sec, *sec; + struct perf_file_section *feat_sec; int nr_sections; int sec_size; - int feat; - int err; + int idx = 0; + int err = -1, feat = 1; nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS); if (!nr_sections) return 0; - feat_sec = sec = calloc(sizeof(*feat_sec), nr_sections); + feat_sec = calloc(sizeof(*feat_sec), nr_sections); if (!feat_sec) return -1; @@ -1605,16 +1658,20 @@ int perf_header__process_sections(struct perf_header *header, int fd, lseek(fd, header->data_offset + header->data_size, SEEK_SET); - err = perf_header__getbuffer64(header, fd, feat_sec, sec_size); - if (err < 0) + if (perf_header__getbuffer64(header, fd, feat_sec, sec_size)) goto out_free; - for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) { - err = process(sec++, header, feat, fd, data); - if (err < 0) - goto out_free; - } err = 0; + while (idx < nr_sections && feat < HEADER_LAST_FEATURE) { + if (perf_header__has_feat(header, feat)) { + struct perf_file_section *sec = &feat_sec[idx++]; + + err = process(sec, header, feat, fd, data); + if (err < 0) + break; + } + ++feat; + } out_free: free(feat_sec); return err; @@ -1849,21 +1906,32 @@ static int perf_file_section__process(struct perf_file_section *section, return 0; } - if (feat >= HEADER_LAST_FEATURE) { - pr_debug("unknown feature %d, continuing...\n", feat); - return 0; - } - switch (feat) { case HEADER_TRACE_INFO: trace_report(fd, false); break; + case HEADER_BUILD_ID: if (perf_header__read_build_ids(ph, fd, section->offset, section->size)) pr_debug("Failed to read buildids, continuing...\n"); break; - default: + + case HEADER_HOSTNAME: + case HEADER_OSRELEASE: + case HEADER_VERSION: + case HEADER_ARCH: + case HEADER_NRCPUS: + case HEADER_CPUDESC: + case HEADER_CPUID: + case HEADER_TOTAL_MEM: + case HEADER_CMDLINE: + case HEADER_EVENT_DESC: + case HEADER_CPU_TOPOLOGY: + case HEADER_NUMA_TOPOLOGY: break; + + default: + pr_debug("unknown feature %d, continuing...\n", feat); } return 0; @@ -1973,8 +2041,6 @@ int perf_session__read_header(struct perf_session *session, int fd) lseek(fd, tmp, SEEK_SET); } - symbol_conf.nr_events = nr_attrs; - if (f_header.event_types.size) { lseek(fd, f_header.event_types.offset, SEEK_SET); events = malloc(f_header.event_types.size); @@ -2002,9 +2068,9 @@ int perf_session__read_header(struct perf_session *session, int fd) return -ENOMEM; } -int perf_event__synthesize_attr(struct perf_tool *tool, - struct perf_event_attr *attr, u16 ids, u64 *id, - perf_event__handler_t process) +int perf_event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id, + perf_event__handler_t process, + struct perf_session *session) { union perf_event *ev; size_t size; @@ -2026,23 +2092,22 @@ int perf_event__synthesize_attr(struct perf_tool *tool, ev->attr.header.type = PERF_RECORD_HEADER_ATTR; ev->attr.header.size = size; - err = process(tool, ev, NULL, NULL); + err = process(ev, NULL, session); free(ev); return err; } -int perf_event__synthesize_attrs(struct perf_tool *tool, - struct perf_session *session, +int perf_session__synthesize_attrs(struct perf_session *session, perf_event__handler_t process) { struct perf_evsel *attr; int err = 0; list_for_each_entry(attr, &session->evlist->entries, node) { - err = perf_event__synthesize_attr(tool, &attr->attr, attr->ids, - attr->id, process); + err = perf_event__synthesize_attr(&attr->attr, attr->ids, + attr->id, process, session); if (err) { pr_debug("failed to create perf header attribute\n"); return err; @@ -2053,23 +2118,23 @@ int perf_event__synthesize_attrs(struct perf_tool *tool, } int perf_event__process_attr(union perf_event *event, - struct perf_evlist **pevlist) + struct perf_session *session) { unsigned int i, ids, n_ids; struct perf_evsel *evsel; - struct perf_evlist *evlist = *pevlist; - if (evlist == NULL) { - *pevlist = evlist = perf_evlist__new(NULL, NULL); - if (evlist == NULL) + if (session->evlist == NULL) { + session->evlist = perf_evlist__new(NULL, NULL); + if (session->evlist == NULL) return -ENOMEM; } - evsel = perf_evsel__new(&event->attr.attr, evlist->nr_entries); + evsel = perf_evsel__new(&event->attr.attr, + session->evlist->nr_entries); if (evsel == NULL) return -ENOMEM; - perf_evlist__add(evlist, evsel); + perf_evlist__add(session->evlist, evsel); ids = event->header.size; ids -= (void *)&event->attr.id - (void *)event; @@ -2083,16 +2148,18 @@ int perf_event__process_attr(union perf_event *event, return -ENOMEM; for (i = 0; i < n_ids; i++) { - perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]); + perf_evlist__id_add(session->evlist, evsel, 0, i, + event->attr.id[i]); } + perf_session__update_sample_type(session); + return 0; } -int perf_event__synthesize_event_type(struct perf_tool *tool, - u64 event_id, char *name, +int perf_event__synthesize_event_type(u64 event_id, char *name, perf_event__handler_t process, - struct machine *machine) + struct perf_session *session) { union perf_event ev; size_t size = 0; @@ -2110,14 +2177,13 @@ int perf_event__synthesize_event_type(struct perf_tool *tool, ev.event_type.header.size = sizeof(ev.event_type) - (sizeof(ev.event_type.event_type.name) - size); - err = process(tool, &ev, NULL, machine); + err = process(&ev, NULL, session); return err; } -int perf_event__synthesize_event_types(struct perf_tool *tool, - perf_event__handler_t process, - struct machine *machine) +int perf_event__synthesize_event_types(perf_event__handler_t process, + struct perf_session *session) { struct perf_trace_event_type *type; int i, err = 0; @@ -2125,9 +2191,9 @@ int perf_event__synthesize_event_types(struct perf_tool *tool, for (i = 0; i < event_count; i++) { type = &events[i]; - err = perf_event__synthesize_event_type(tool, type->event_id, + err = perf_event__synthesize_event_type(type->event_id, type->name, process, - machine); + session); if (err) { pr_debug("failed to create perf header event type\n"); return err; @@ -2137,8 +2203,8 @@ int perf_event__synthesize_event_types(struct perf_tool *tool, return err; } -int perf_event__process_event_type(struct perf_tool *tool __unused, - union perf_event *event) +int perf_event__process_event_type(union perf_event *event, + struct perf_session *session __unused) { if (perf_header__push_event(event->event_type.event_type.event_id, event->event_type.event_type.name) < 0) @@ -2147,9 +2213,9 @@ int perf_event__process_event_type(struct perf_tool *tool __unused, return 0; } -int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, - struct perf_evlist *evlist, - perf_event__handler_t process) +int perf_event__synthesize_tracing_data(int fd, struct perf_evlist *evlist, + perf_event__handler_t process, + struct perf_session *session __unused) { union perf_event ev; struct tracing_data *tdata; @@ -2180,7 +2246,7 @@ int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, ev.tracing_data.header.size = sizeof(ev.tracing_data); ev.tracing_data.size = aligned_size; - process(tool, &ev, NULL, NULL); + process(&ev, NULL, session); /* * The put function will copy all the tracing data @@ -2222,10 +2288,10 @@ int perf_event__process_tracing_data(union perf_event *event, return size_read + padding; } -int perf_event__synthesize_build_id(struct perf_tool *tool, - struct dso *pos, u16 misc, +int perf_event__synthesize_build_id(struct dso *pos, u16 misc, perf_event__handler_t process, - struct machine *machine) + struct machine *machine, + struct perf_session *session) { union perf_event ev; size_t len; @@ -2245,13 +2311,12 @@ int perf_event__synthesize_build_id(struct perf_tool *tool, ev.build_id.header.size = sizeof(ev.build_id) + len; memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len); - err = process(tool, &ev, NULL, machine); + err = process(&ev, NULL, session); return err; } -int perf_event__process_build_id(struct perf_tool *tool __used, - union perf_event *event, +int perf_event__process_build_id(union perf_event *event, struct perf_session *session) { __event_process_build_id(&event->build_id, diff --git a/trunk/tools/perf/util/header.h b/trunk/tools/perf/util/header.h index ac4ec956024e..3d5a742f4a2a 100644 --- a/trunk/tools/perf/util/header.h +++ b/trunk/tools/perf/util/header.h @@ -10,8 +10,7 @@ #include enum { - HEADER_RESERVED = 0, /* always cleared */ - HEADER_TRACE_INFO = 1, + HEADER_TRACE_INFO = 1, HEADER_BUILD_ID, HEADER_HOSTNAME, @@ -28,9 +27,10 @@ enum { HEADER_NUMA_TOPOLOGY, HEADER_LAST_FEATURE, - HEADER_FEAT_BITS = 256, }; +#define HEADER_FEAT_BITS 256 + struct perf_file_section { u64 offset; u64 size; @@ -68,7 +68,6 @@ struct perf_header { }; struct perf_evlist; -struct perf_session; int perf_session__read_header(struct perf_session *session, int fd); int perf_session__write_header(struct perf_session *session, @@ -97,36 +96,32 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, const char *name, bool is_kallsyms); int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir); -int perf_event__synthesize_attr(struct perf_tool *tool, - struct perf_event_attr *attr, u16 ids, u64 *id, - perf_event__handler_t process); -int perf_event__synthesize_attrs(struct perf_tool *tool, - struct perf_session *session, - perf_event__handler_t process); -int perf_event__process_attr(union perf_event *event, struct perf_evlist **pevlist); +int perf_event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id, + perf_event__handler_t process, + struct perf_session *session); +int perf_session__synthesize_attrs(struct perf_session *session, + perf_event__handler_t process); +int perf_event__process_attr(union perf_event *event, struct perf_session *session); -int perf_event__synthesize_event_type(struct perf_tool *tool, - u64 event_id, char *name, +int perf_event__synthesize_event_type(u64 event_id, char *name, perf_event__handler_t process, - struct machine *machine); -int perf_event__synthesize_event_types(struct perf_tool *tool, - perf_event__handler_t process, - struct machine *machine); -int perf_event__process_event_type(struct perf_tool *tool, - union perf_event *event); - -int perf_event__synthesize_tracing_data(struct perf_tool *tool, - int fd, struct perf_evlist *evlist, - perf_event__handler_t process); + struct perf_session *session); +int perf_event__synthesize_event_types(perf_event__handler_t process, + struct perf_session *session); +int perf_event__process_event_type(union perf_event *event, + struct perf_session *session); + +int perf_event__synthesize_tracing_data(int fd, struct perf_evlist *evlist, + perf_event__handler_t process, + struct perf_session *session); int perf_event__process_tracing_data(union perf_event *event, struct perf_session *session); -int perf_event__synthesize_build_id(struct perf_tool *tool, - struct dso *pos, u16 misc, +int perf_event__synthesize_build_id(struct dso *pos, u16 misc, perf_event__handler_t process, - struct machine *machine); -int perf_event__process_build_id(struct perf_tool *tool, - union perf_event *event, + struct machine *machine, + struct perf_session *session); +int perf_event__process_build_id(union perf_event *event, struct perf_session *session); /* diff --git a/trunk/tools/perf/util/hist.h b/trunk/tools/perf/util/hist.h index ff6f9d56ea41..89289c8e935e 100644 --- a/trunk/tools/perf/util/hist.h +++ b/trunk/tools/perf/util/hist.h @@ -117,6 +117,7 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __used, static inline int hist_entry__tui_annotate(struct hist_entry *self __used, int evidx __used, + int nr_events __used, void(*timer)(void *arg) __used, void *arg __used, int delay_secs __used) @@ -127,7 +128,7 @@ static inline int hist_entry__tui_annotate(struct hist_entry *self __used, #define K_RIGHT -2 #else #include "ui/keysyms.h" -int hist_entry__tui_annotate(struct hist_entry *he, int evidx, +int hist_entry__tui_annotate(struct hist_entry *he, int evidx, int nr_events, void(*timer)(void *arg), void *arg, int delay_secs); int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help, diff --git a/trunk/tools/perf/util/include/linux/bitops.h b/trunk/tools/perf/util/include/linux/bitops.h index 62cdee78db7b..305c8484f200 100644 --- a/trunk/tools/perf/util/include/linux/bitops.h +++ b/trunk/tools/perf/util/include/linux/bitops.h @@ -9,17 +9,6 @@ #define BITS_PER_BYTE 8 #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) -#define for_each_set_bit(bit, addr, size) \ - for ((bit) = find_first_bit((addr), (size)); \ - (bit) < (size); \ - (bit) = find_next_bit((addr), (size), (bit) + 1)) - -/* same as for_each_set_bit() but use bit as value to start with */ -#define for_each_set_bit_cont(bit, addr, size) \ - for ((bit) = find_next_bit((addr), (size), (bit)); \ - (bit) < (size); \ - (bit) = find_next_bit((addr), (size), (bit) + 1)) - static inline void set_bit(int nr, unsigned long *addr) { addr[nr / BITS_PER_LONG] |= 1UL << (nr % BITS_PER_LONG); @@ -41,111 +30,4 @@ static inline unsigned long hweight_long(unsigned long w) return sizeof(w) == 4 ? hweight32(w) : hweight64(w); } -#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) - -/** - * __ffs - find first bit in word. - * @word: The word to search - * - * Undefined if no bit exists, so code should check against 0 first. - */ -static __always_inline unsigned long __ffs(unsigned long word) -{ - int num = 0; - -#if BITS_PER_LONG == 64 - if ((word & 0xffffffff) == 0) { - num += 32; - word >>= 32; - } -#endif - if ((word & 0xffff) == 0) { - num += 16; - word >>= 16; - } - if ((word & 0xff) == 0) { - num += 8; - word >>= 8; - } - if ((word & 0xf) == 0) { - num += 4; - word >>= 4; - } - if ((word & 0x3) == 0) { - num += 2; - word >>= 2; - } - if ((word & 0x1) == 0) - num += 1; - return num; -} - -/* - * Find the first set bit in a memory region. - */ -static inline unsigned long -find_first_bit(const unsigned long *addr, unsigned long size) -{ - const unsigned long *p = addr; - unsigned long result = 0; - unsigned long tmp; - - while (size & ~(BITS_PER_LONG-1)) { - if ((tmp = *(p++))) - goto found; - result += BITS_PER_LONG; - size -= BITS_PER_LONG; - } - if (!size) - return result; - - tmp = (*p) & (~0UL >> (BITS_PER_LONG - size)); - if (tmp == 0UL) /* Are any bits set? */ - return result + size; /* Nope. */ -found: - return result + __ffs(tmp); -} - -/* - * Find the next set bit in a memory region. - */ -static inline unsigned long -find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset) -{ - const unsigned long *p = addr + BITOP_WORD(offset); - unsigned long result = offset & ~(BITS_PER_LONG-1); - unsigned long tmp; - - if (offset >= size) - return size; - size -= result; - offset %= BITS_PER_LONG; - if (offset) { - tmp = *(p++); - tmp &= (~0UL << offset); - if (size < BITS_PER_LONG) - goto found_first; - if (tmp) - goto found_middle; - size -= BITS_PER_LONG; - result += BITS_PER_LONG; - } - while (size & ~(BITS_PER_LONG-1)) { - if ((tmp = *(p++))) - goto found_middle; - result += BITS_PER_LONG; - size -= BITS_PER_LONG; - } - if (!size) - return result; - tmp = *p; - -found_first: - tmp &= (~0UL >> (BITS_PER_LONG - size)); - if (tmp == 0UL) /* Are any bits set? */ - return result + size; /* Nope. */ -found_middle: - return result + __ffs(tmp); -} - #endif diff --git a/trunk/tools/perf/util/map.c b/trunk/tools/perf/util/map.c index 316aa0ab7122..78284b13e808 100644 --- a/trunk/tools/perf/util/map.c +++ b/trunk/tools/perf/util/map.c @@ -562,10 +562,6 @@ int machine__init(struct machine *self, const char *root_dir, pid_t pid) INIT_LIST_HEAD(&self->user_dsos); INIT_LIST_HEAD(&self->kernel_dsos); - self->threads = RB_ROOT; - INIT_LIST_HEAD(&self->dead_threads); - self->last_match = NULL; - self->kmaps.machine = self; self->pid = pid; self->root_dir = strdup(root_dir); diff --git a/trunk/tools/perf/util/map.h b/trunk/tools/perf/util/map.h index 2b8017f8a930..890d85545d0f 100644 --- a/trunk/tools/perf/util/map.h +++ b/trunk/tools/perf/util/map.h @@ -18,11 +18,9 @@ enum map_type { extern const char *map_type__name[MAP__NR_TYPES]; struct dso; -struct ip_callchain; struct ref_reloc_sym; struct map_groups; struct machine; -struct perf_evsel; struct map { union { @@ -63,11 +61,7 @@ struct map_groups { struct machine { struct rb_node rb_node; pid_t pid; - u16 id_hdr_size; char *root_dir; - struct rb_root threads; - struct list_head dead_threads; - struct thread *last_match; struct list_head user_dsos; struct list_head kernel_dsos; struct map_groups kmaps; @@ -154,13 +148,6 @@ int machine__init(struct machine *self, const char *root_dir, pid_t pid); void machine__exit(struct machine *self); void machine__delete(struct machine *self); -int machine__resolve_callchain(struct machine *machine, - struct perf_evsel *evsel, struct thread *thread, - struct ip_callchain *chain, - struct symbol **parent); -int maps__set_kallsyms_ref_reloc_sym(struct map **maps, const char *symbol_name, - u64 addr); - /* * Default guest kernel is defined by parameter --guestkallsyms * and --guestmodules @@ -203,12 +190,6 @@ struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg, struct map **mapp, symbol_filter_t filter); - -struct thread *machine__findnew_thread(struct machine *machine, pid_t pid); -void machine__remove_thread(struct machine *machine, struct thread *th); - -size_t machine__fprintf(struct machine *machine, FILE *fp); - static inline struct symbol *machine__find_kernel_symbol(struct machine *self, enum map_type type, u64 addr, diff --git a/trunk/tools/perf/util/parse-events.c b/trunk/tools/perf/util/parse-events.c index 531c283fc0c5..928918b796b2 100644 --- a/trunk/tools/perf/util/parse-events.c +++ b/trunk/tools/perf/util/parse-events.c @@ -25,6 +25,8 @@ enum event_result { EVT_HANDLED_ALL }; +char debugfs_path[MAXPATHLEN]; + #define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x #define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x @@ -38,7 +40,6 @@ static struct event_symbol event_symbols[] = { { CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" }, { CHW(BRANCH_MISSES), "branch-misses", "" }, { CHW(BUS_CYCLES), "bus-cycles", "" }, - { CHW(REF_CPU_CYCLES), "ref-cycles", "" }, { CSW(CPU_CLOCK), "cpu-clock", "" }, { CSW(TASK_CLOCK), "task-clock", "" }, @@ -69,7 +70,6 @@ static const char *hw_event_names[PERF_COUNT_HW_MAX] = { "bus-cycles", "stalled-cycles-frontend", "stalled-cycles-backend", - "ref-cycles", }; static const char *sw_event_names[PERF_COUNT_SW_MAX] = { @@ -140,7 +140,7 @@ static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir) char evt_path[MAXPATHLEN]; int fd; - snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", tracing_events_path, + snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path, sys_dir->d_name, evt_dir->d_name); fd = open(evt_path, O_RDONLY); if (fd < 0) @@ -171,16 +171,16 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config) char evt_path[MAXPATHLEN]; char dir_path[MAXPATHLEN]; - if (debugfs_valid_mountpoint(tracing_events_path)) + if (debugfs_valid_mountpoint(debugfs_path)) return NULL; - sys_dir = opendir(tracing_events_path); + sys_dir = opendir(debugfs_path); if (!sys_dir) return NULL; for_each_subsystem(sys_dir, sys_dirent, sys_next) { - snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path, + snprintf(dir_path, MAXPATHLEN, "%s/%s", debugfs_path, sys_dirent.d_name); evt_dir = opendir(dir_path); if (!evt_dir) @@ -447,7 +447,7 @@ parse_single_tracepoint_event(char *sys_name, u64 id; int fd; - snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", tracing_events_path, + snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path, sys_name, evt_name); fd = open(evt_path, O_RDONLY); @@ -485,7 +485,7 @@ parse_multiple_tracepoint_event(struct perf_evlist *evlist, char *sys_name, struct dirent *evt_ent; DIR *evt_dir; - snprintf(evt_path, MAXPATHLEN, "%s/%s", tracing_events_path, sys_name); + snprintf(evt_path, MAXPATHLEN, "%s/%s", debugfs_path, sys_name); evt_dir = opendir(evt_path); if (!evt_dir) { @@ -528,7 +528,7 @@ parse_tracepoint_event(struct perf_evlist *evlist, const char **strp, char sys_name[MAX_EVENT_LENGTH]; unsigned int sys_length, evt_length; - if (debugfs_valid_mountpoint(tracing_events_path)) + if (debugfs_valid_mountpoint(debugfs_path)) return 0; evt_name = strchr(*strp, ':'); @@ -920,10 +920,10 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob) char evt_path[MAXPATHLEN]; char dir_path[MAXPATHLEN]; - if (debugfs_valid_mountpoint(tracing_events_path)) + if (debugfs_valid_mountpoint(debugfs_path)) return; - sys_dir = opendir(tracing_events_path); + sys_dir = opendir(debugfs_path); if (!sys_dir) return; @@ -932,7 +932,7 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob) !strglobmatch(sys_dirent.d_name, subsys_glob)) continue; - snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path, + snprintf(dir_path, MAXPATHLEN, "%s/%s", debugfs_path, sys_dirent.d_name); evt_dir = opendir(dir_path); if (!evt_dir) @@ -964,16 +964,16 @@ int is_valid_tracepoint(const char *event_string) char evt_path[MAXPATHLEN]; char dir_path[MAXPATHLEN]; - if (debugfs_valid_mountpoint(tracing_events_path)) + if (debugfs_valid_mountpoint(debugfs_path)) return 0; - sys_dir = opendir(tracing_events_path); + sys_dir = opendir(debugfs_path); if (!sys_dir) return 0; for_each_subsystem(sys_dir, sys_dirent, sys_next) { - snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path, + snprintf(dir_path, MAXPATHLEN, "%s/%s", debugfs_path, sys_dirent.d_name); evt_dir = opendir(dir_path); if (!evt_dir) diff --git a/trunk/tools/perf/util/parse-events.h b/trunk/tools/perf/util/parse-events.h index 7e0cbe75d5f1..2f8e375e038d 100644 --- a/trunk/tools/perf/util/parse-events.h +++ b/trunk/tools/perf/util/parse-events.h @@ -39,6 +39,7 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob); int print_hwcache_events(const char *event_glob); extern int is_valid_tracepoint(const char *event_string); +extern char debugfs_path[]; extern int valid_debugfs_mount(const char *debugfs); #endif /* __PERF_PARSE_EVENTS_H */ diff --git a/trunk/tools/perf/util/probe-finder.h b/trunk/tools/perf/util/probe-finder.h index 17e94d0c36f9..1132c8f0ce89 100644 --- a/trunk/tools/perf/util/probe-finder.h +++ b/trunk/tools/perf/util/probe-finder.h @@ -5,6 +5,7 @@ #include "util.h" #include "probe-event.h" +#define MAX_PATH_LEN 256 #define MAX_PROBE_BUFFER 1024 #define MAX_PROBES 128 diff --git a/trunk/tools/perf/util/scripting-engines/trace-event-perl.c b/trunk/tools/perf/util/scripting-engines/trace-event-perl.c index e30749e38a9b..74350ffb57fe 100644 --- a/trunk/tools/perf/util/scripting-engines/trace-event-perl.c +++ b/trunk/tools/perf/util/scripting-engines/trace-event-perl.c @@ -27,10 +27,7 @@ #include "../../perf.h" #include "../util.h" -#include "../thread.h" -#include "../event.h" #include "../trace-event.h" -#include "../evsel.h" #include #include @@ -248,11 +245,11 @@ static inline struct event *find_cache_event(int type) return event; } -static void perl_process_tracepoint(union perf_event *pevent __unused, - struct perf_sample *sample, - struct perf_evsel *evsel, - struct machine *machine __unused, - struct thread *thread) +static void perl_process_event(union perf_event *pevent __unused, + struct perf_sample *sample, + struct perf_evsel *evsel, + struct perf_session *session __unused, + struct thread *thread) { struct format_field *field; static char handler[256]; @@ -268,9 +265,6 @@ static void perl_process_tracepoint(union perf_event *pevent __unused, dSP; - if (evsel->attr.type != PERF_TYPE_TRACEPOINT) - return; - type = trace_parse_common_type(data); event = find_cache_event(type); @@ -338,42 +332,6 @@ static void perl_process_tracepoint(union perf_event *pevent __unused, LEAVE; } -static void perl_process_event_generic(union perf_event *pevent __unused, - struct perf_sample *sample, - struct perf_evsel *evsel __unused, - struct machine *machine __unused, - struct thread *thread __unused) -{ - dSP; - - if (!get_cv("process_event", 0)) - return; - - ENTER; - SAVETMPS; - PUSHMARK(SP); - XPUSHs(sv_2mortal(newSVpvn((const char *)pevent, pevent->header.size))); - XPUSHs(sv_2mortal(newSVpvn((const char *)&evsel->attr, sizeof(evsel->attr)))); - XPUSHs(sv_2mortal(newSVpvn((const char *)sample, sizeof(*sample)))); - XPUSHs(sv_2mortal(newSVpvn((const char *)sample->raw_data, sample->raw_size))); - PUTBACK; - call_pv("process_event", G_SCALAR); - SPAGAIN; - PUTBACK; - FREETMPS; - LEAVE; -} - -static void perl_process_event(union perf_event *pevent, - struct perf_sample *sample, - struct perf_evsel *evsel, - struct machine *machine, - struct thread *thread) -{ - perl_process_tracepoint(pevent, sample, evsel, machine, thread); - perl_process_event_generic(pevent, sample, evsel, machine, thread); -} - static void run_start_sub(void) { dSP; /* access to Perl stack */ @@ -595,28 +553,7 @@ static int perl_generate_script(const char *outfile) fprintf(ofp, "sub print_header\n{\n" "\tmy ($event_name, $cpu, $secs, $nsecs, $pid, $comm) = @_;\n\n" "\tprintf(\"%%-20s %%5u %%05u.%%09u %%8u %%-20s \",\n\t " - "$event_name, $cpu, $secs, $nsecs, $pid, $comm);\n}\n"); - - fprintf(ofp, - "\n# Packed byte string args of process_event():\n" - "#\n" - "# $event:\tunion perf_event\tutil/event.h\n" - "# $attr:\tstruct perf_event_attr\tlinux/perf_event.h\n" - "# $sample:\tstruct perf_sample\tutil/event.h\n" - "# $raw_data:\tperf_sample->raw_data\tutil/event.h\n" - "\n" - "sub process_event\n" - "{\n" - "\tmy ($event, $attr, $sample, $raw_data) = @_;\n" - "\n" - "\tmy @event\t= unpack(\"LSS\", $event);\n" - "\tmy @attr\t= unpack(\"LLQQQQQLLQQ\", $attr);\n" - "\tmy @sample\t= unpack(\"QLLQQQQQLL\", $sample);\n" - "\tmy @raw_data\t= unpack(\"C*\", $raw_data);\n" - "\n" - "\tuse Data::Dumper;\n" - "\tprint Dumper \\@event, \\@attr, \\@sample, \\@raw_data;\n" - "}\n"); + "$event_name, $cpu, $secs, $nsecs, $pid, $comm);\n}"); fclose(ofp); diff --git a/trunk/tools/perf/util/scripting-engines/trace-event-python.c b/trunk/tools/perf/util/scripting-engines/trace-event-python.c index 0b2a48783172..6ccf70e8d8f2 100644 --- a/trunk/tools/perf/util/scripting-engines/trace-event-python.c +++ b/trunk/tools/perf/util/scripting-engines/trace-event-python.c @@ -29,8 +29,6 @@ #include "../../perf.h" #include "../util.h" -#include "../event.h" -#include "../thread.h" #include "../trace-event.h" PyMODINIT_FUNC initperf_trace_context(void); @@ -209,7 +207,7 @@ static inline struct event *find_cache_event(int type) static void python_process_event(union perf_event *pevent __unused, struct perf_sample *sample, struct perf_evsel *evsel __unused, - struct machine *machine __unused, + struct perf_session *session __unused, struct thread *thread) { PyObject *handler, *retval, *context, *t, *obj, *dict = NULL; diff --git a/trunk/tools/perf/util/session.c b/trunk/tools/perf/util/session.c index b5ca2558c7bb..0f4555ce9063 100644 --- a/trunk/tools/perf/util/session.c +++ b/trunk/tools/perf/util/session.c @@ -10,7 +10,6 @@ #include "evlist.h" #include "evsel.h" #include "session.h" -#include "tool.h" #include "sort.h" #include "util.h" #include "cpumap.h" @@ -79,13 +78,39 @@ static int perf_session__open(struct perf_session *self, bool force) return -1; } +static void perf_session__id_header_size(struct perf_session *session) +{ + struct perf_sample *data; + u64 sample_type = session->sample_type; + u16 size = 0; + + if (!session->sample_id_all) + goto out; + + if (sample_type & PERF_SAMPLE_TID) + size += sizeof(data->tid) * 2; + + if (sample_type & PERF_SAMPLE_TIME) + size += sizeof(data->time); + + if (sample_type & PERF_SAMPLE_ID) + size += sizeof(data->id); + + if (sample_type & PERF_SAMPLE_STREAM_ID) + size += sizeof(data->stream_id); + + if (sample_type & PERF_SAMPLE_CPU) + size += sizeof(data->cpu) * 2; +out: + session->id_hdr_size = size; +} + void perf_session__update_sample_type(struct perf_session *self) { self->sample_type = perf_evlist__sample_type(self->evlist); self->sample_size = __perf_evsel__sample_size(self->sample_type); self->sample_id_all = perf_evlist__sample_id_all(self->evlist); - self->id_hdr_size = perf_evlist__id_hdr_size(self->evlist); - self->host_machine.id_hdr_size = self->id_hdr_size; + perf_session__id_header_size(self); } int perf_session__create_kernel_maps(struct perf_session *self) @@ -105,26 +130,18 @@ static void perf_session__destroy_kernel_maps(struct perf_session *self) struct perf_session *perf_session__new(const char *filename, int mode, bool force, bool repipe, - struct perf_tool *tool) + struct perf_event_ops *ops) { - struct perf_session *self; - struct stat st; - size_t len; - - if (!filename || !strlen(filename)) { - if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode)) - filename = "-"; - else - filename = "perf.data"; - } - - len = strlen(filename); - self = zalloc(sizeof(*self) + len); + size_t len = filename ? strlen(filename) + 1 : 0; + struct perf_session *self = zalloc(sizeof(*self) + len); if (self == NULL) goto out; memcpy(self->filename, filename, len); + self->threads = RB_ROOT; + INIT_LIST_HEAD(&self->dead_threads); + self->last_match = NULL; /* * On 64bit we can mmap the data file in one go. No need for tiny mmap * slices. On 32bit we use 32MB. @@ -154,10 +171,10 @@ struct perf_session *perf_session__new(const char *filename, int mode, goto out_delete; } - if (tool && tool->ordering_requires_timestamps && - tool->ordered_samples && !self->sample_id_all) { + if (ops && ops->ordering_requires_timestamps && + ops->ordered_samples && !self->sample_id_all) { dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); - tool->ordered_samples = false; + ops->ordered_samples = false; } out: @@ -167,22 +184,17 @@ struct perf_session *perf_session__new(const char *filename, int mode, return NULL; } -static void machine__delete_dead_threads(struct machine *machine) +static void perf_session__delete_dead_threads(struct perf_session *self) { struct thread *n, *t; - list_for_each_entry_safe(t, n, &machine->dead_threads, node) { + list_for_each_entry_safe(t, n, &self->dead_threads, node) { list_del(&t->node); thread__delete(t); } } -static void perf_session__delete_dead_threads(struct perf_session *session) -{ - machine__delete_dead_threads(&session->host_machine); -} - -static void machine__delete_threads(struct machine *self) +static void perf_session__delete_threads(struct perf_session *self) { struct rb_node *nd = rb_first(&self->threads); @@ -195,11 +207,6 @@ static void machine__delete_threads(struct machine *self) } } -static void perf_session__delete_threads(struct perf_session *session) -{ - machine__delete_threads(&session->host_machine); -} - void perf_session__delete(struct perf_session *self) { perf_session__destroy_kernel_maps(self); @@ -210,7 +217,7 @@ void perf_session__delete(struct perf_session *self) free(self); } -void machine__remove_thread(struct machine *self, struct thread *th) +void perf_session__remove_thread(struct perf_session *self, struct thread *th) { self->last_match = NULL; rb_erase(&th->rb_node, &self->threads); @@ -229,16 +236,16 @@ static bool symbol__match_parent_regex(struct symbol *sym) return 0; } -int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel, - struct thread *thread, - struct ip_callchain *chain, - struct symbol **parent) +int perf_session__resolve_callchain(struct perf_session *self, + struct thread *thread, + struct ip_callchain *chain, + struct symbol **parent) { u8 cpumode = PERF_RECORD_MISC_USER; unsigned int i; int err; - callchain_cursor_reset(&evsel->hists.callchain_cursor); + callchain_cursor_reset(&self->callchain_cursor); for (i = 0; i < chain->nr; i++) { u64 ip; @@ -265,7 +272,7 @@ int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel, al.filtered = false; thread__find_addr_location(thread, self, cpumode, - MAP__FUNCTION, ip, &al, NULL); + MAP__FUNCTION, thread->pid, ip, &al, NULL); if (al.sym != NULL) { if (sort__has_parent && !*parent && symbol__match_parent_regex(al.sym)) @@ -274,7 +281,7 @@ int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel, break; } - err = callchain_cursor_append(&evsel->hists.callchain_cursor, + err = callchain_cursor_append(&self->callchain_cursor, ip, al.map, al.sym); if (err) return err; @@ -283,91 +290,75 @@ int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel, return 0; } -static int process_event_synth_tracing_data_stub(union perf_event *event __used, - struct perf_session *session __used) -{ - dump_printf(": unhandled!\n"); - return 0; -} - -static int process_event_synth_attr_stub(union perf_event *event __used, - struct perf_evlist **pevlist __used) +static int process_event_synth_stub(union perf_event *event __used, + struct perf_session *session __used) { dump_printf(": unhandled!\n"); return 0; } -static int process_event_sample_stub(struct perf_tool *tool __used, - union perf_event *event __used, +static int process_event_sample_stub(union perf_event *event __used, struct perf_sample *sample __used, struct perf_evsel *evsel __used, - struct machine *machine __used) + struct perf_session *session __used) { dump_printf(": unhandled!\n"); return 0; } -static int process_event_stub(struct perf_tool *tool __used, - union perf_event *event __used, +static int process_event_stub(union perf_event *event __used, struct perf_sample *sample __used, - struct machine *machine __used) + struct perf_session *session __used) { dump_printf(": unhandled!\n"); return 0; } -static int process_finished_round_stub(struct perf_tool *tool __used, - union perf_event *event __used, - struct perf_session *perf_session __used) +static int process_finished_round_stub(union perf_event *event __used, + struct perf_session *session __used, + struct perf_event_ops *ops __used) { dump_printf(": unhandled!\n"); return 0; } -static int process_event_type_stub(struct perf_tool *tool __used, - union perf_event *event __used) -{ - dump_printf(": unhandled!\n"); - return 0; -} +static int process_finished_round(union perf_event *event, + struct perf_session *session, + struct perf_event_ops *ops); -static int process_finished_round(struct perf_tool *tool, - union perf_event *event, - struct perf_session *session); - -static void perf_tool__fill_defaults(struct perf_tool *tool) +static void perf_event_ops__fill_defaults(struct perf_event_ops *handler) { - if (tool->sample == NULL) - tool->sample = process_event_sample_stub; - if (tool->mmap == NULL) - tool->mmap = process_event_stub; - if (tool->comm == NULL) - tool->comm = process_event_stub; - if (tool->fork == NULL) - tool->fork = process_event_stub; - if (tool->exit == NULL) - tool->exit = process_event_stub; - if (tool->lost == NULL) - tool->lost = perf_event__process_lost; - if (tool->read == NULL) - tool->read = process_event_sample_stub; - if (tool->throttle == NULL) - tool->throttle = process_event_stub; - if (tool->unthrottle == NULL) - tool->unthrottle = process_event_stub; - if (tool->attr == NULL) - tool->attr = process_event_synth_attr_stub; - if (tool->event_type == NULL) - tool->event_type = process_event_type_stub; - if (tool->tracing_data == NULL) - tool->tracing_data = process_event_synth_tracing_data_stub; - if (tool->build_id == NULL) - tool->build_id = process_finished_round_stub; - if (tool->finished_round == NULL) { - if (tool->ordered_samples) - tool->finished_round = process_finished_round; + if (handler->sample == NULL) + handler->sample = process_event_sample_stub; + if (handler->mmap == NULL) + handler->mmap = process_event_stub; + if (handler->comm == NULL) + handler->comm = process_event_stub; + if (handler->fork == NULL) + handler->fork = process_event_stub; + if (handler->exit == NULL) + handler->exit = process_event_stub; + if (handler->lost == NULL) + handler->lost = perf_event__process_lost; + if (handler->read == NULL) + handler->read = process_event_stub; + if (handler->throttle == NULL) + handler->throttle = process_event_stub; + if (handler->unthrottle == NULL) + handler->unthrottle = process_event_stub; + if (handler->attr == NULL) + handler->attr = process_event_synth_stub; + if (handler->event_type == NULL) + handler->event_type = process_event_synth_stub; + if (handler->tracing_data == NULL) + handler->tracing_data = process_event_synth_stub; + if (handler->build_id == NULL) + handler->build_id = process_event_synth_stub; + if (handler->finished_round == NULL) { + if (handler->ordered_samples) + handler->finished_round = process_finished_round; else - tool->finished_round = process_finished_round_stub; + handler->finished_round = process_finished_round_stub; } } @@ -499,11 +490,11 @@ static void perf_session_free_sample_buffers(struct perf_session *session) static int perf_session_deliver_event(struct perf_session *session, union perf_event *event, struct perf_sample *sample, - struct perf_tool *tool, + struct perf_event_ops *ops, u64 file_offset); static void flush_sample_queue(struct perf_session *s, - struct perf_tool *tool) + struct perf_event_ops *ops) { struct ordered_samples *os = &s->ordered_samples; struct list_head *head = &os->samples; @@ -514,7 +505,7 @@ static void flush_sample_queue(struct perf_session *s, unsigned idx = 0, progress_next = os->nr_samples / 16; int ret; - if (!tool->ordered_samples || !limit) + if (!ops->ordered_samples || !limit) return; list_for_each_entry_safe(iter, tmp, head, list) { @@ -525,7 +516,7 @@ static void flush_sample_queue(struct perf_session *s, if (ret) pr_err("Can't parse sample, err = %d\n", ret); else - perf_session_deliver_event(s, iter->event, &sample, tool, + perf_session_deliver_event(s, iter->event, &sample, ops, iter->file_offset); os->last_flush = iter->timestamp; @@ -587,11 +578,11 @@ static void flush_sample_queue(struct perf_session *s, * Flush every events below timestamp 7 * etc... */ -static int process_finished_round(struct perf_tool *tool, - union perf_event *event __used, - struct perf_session *session) +static int process_finished_round(union perf_event *event __used, + struct perf_session *session, + struct perf_event_ops *ops) { - flush_sample_queue(session, tool); + flush_sample_queue(session, ops); session->ordered_samples.next_flush = session->ordered_samples.max_timestamp; return 0; @@ -746,26 +737,13 @@ static void dump_sample(struct perf_session *session, union perf_event *event, callchain__printf(sample); } -static struct machine * - perf_session__find_machine_for_cpumode(struct perf_session *session, - union perf_event *event) -{ - const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; - - if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) - return perf_session__find_machine(session, event->ip.pid); - - return perf_session__find_host_machine(session); -} - static int perf_session_deliver_event(struct perf_session *session, union perf_event *event, struct perf_sample *sample, - struct perf_tool *tool, + struct perf_event_ops *ops, u64 file_offset) { struct perf_evsel *evsel; - struct machine *machine; dump_event(session, event, file_offset, sample); @@ -787,8 +765,6 @@ static int perf_session_deliver_event(struct perf_session *session, hists__inc_nr_events(&evsel->hists, event->header.type); } - machine = perf_session__find_machine_for_cpumode(session, event); - switch (event->header.type) { case PERF_RECORD_SAMPLE: dump_sample(session, event, sample); @@ -796,25 +772,23 @@ static int perf_session_deliver_event(struct perf_session *session, ++session->hists.stats.nr_unknown_id; return -1; } - return tool->sample(tool, event, sample, evsel, machine); + return ops->sample(event, sample, evsel, session); case PERF_RECORD_MMAP: - return tool->mmap(tool, event, sample, machine); + return ops->mmap(event, sample, session); case PERF_RECORD_COMM: - return tool->comm(tool, event, sample, machine); + return ops->comm(event, sample, session); case PERF_RECORD_FORK: - return tool->fork(tool, event, sample, machine); + return ops->fork(event, sample, session); case PERF_RECORD_EXIT: - return tool->exit(tool, event, sample, machine); + return ops->exit(event, sample, session); case PERF_RECORD_LOST: - if (tool->lost == perf_event__process_lost) - session->hists.stats.total_lost += event->lost.lost; - return tool->lost(tool, event, sample, machine); + return ops->lost(event, sample, session); case PERF_RECORD_READ: - return tool->read(tool, event, sample, evsel, machine); + return ops->read(event, sample, session); case PERF_RECORD_THROTTLE: - return tool->throttle(tool, event, sample, machine); + return ops->throttle(event, sample, session); case PERF_RECORD_UNTHROTTLE: - return tool->unthrottle(tool, event, sample, machine); + return ops->unthrottle(event, sample, session); default: ++session->hists.stats.nr_unknown_events; return -1; @@ -838,29 +812,24 @@ static int perf_session__preprocess_sample(struct perf_session *session, } static int perf_session__process_user_event(struct perf_session *session, union perf_event *event, - struct perf_tool *tool, u64 file_offset) + struct perf_event_ops *ops, u64 file_offset) { - int err; - dump_event(session, event, file_offset, NULL); /* These events are processed right away */ switch (event->header.type) { case PERF_RECORD_HEADER_ATTR: - err = tool->attr(event, &session->evlist); - if (err == 0) - perf_session__update_sample_type(session); - return err; + return ops->attr(event, session); case PERF_RECORD_HEADER_EVENT_TYPE: - return tool->event_type(tool, event); + return ops->event_type(event, session); case PERF_RECORD_HEADER_TRACING_DATA: /* setup for reading amidst mmap */ lseek(session->fd, file_offset, SEEK_SET); - return tool->tracing_data(event, session); + return ops->tracing_data(event, session); case PERF_RECORD_HEADER_BUILD_ID: - return tool->build_id(tool, event, session); + return ops->build_id(event, session); case PERF_RECORD_FINISHED_ROUND: - return tool->finished_round(tool, event, session); + return ops->finished_round(event, session, ops); default: return -EINVAL; } @@ -868,7 +837,7 @@ static int perf_session__process_user_event(struct perf_session *session, union static int perf_session__process_event(struct perf_session *session, union perf_event *event, - struct perf_tool *tool, + struct perf_event_ops *ops, u64 file_offset) { struct perf_sample sample; @@ -884,7 +853,7 @@ static int perf_session__process_event(struct perf_session *session, hists__inc_nr_events(&session->hists, event->header.type); if (event->header.type >= PERF_RECORD_USER_TYPE_START) - return perf_session__process_user_event(session, event, tool, file_offset); + return perf_session__process_user_event(session, event, ops, file_offset); /* * For all kernel events we get the sample data @@ -897,14 +866,14 @@ static int perf_session__process_event(struct perf_session *session, if (perf_session__preprocess_sample(session, event, &sample)) return 0; - if (tool->ordered_samples) { + if (ops->ordered_samples) { ret = perf_session_queue_event(session, event, &sample, file_offset); if (ret != -ETIME) return ret; } - return perf_session_deliver_event(session, event, &sample, tool, + return perf_session_deliver_event(session, event, &sample, ops, file_offset); } @@ -915,11 +884,6 @@ void perf_event_header__bswap(struct perf_event_header *self) self->size = bswap_16(self->size); } -struct thread *perf_session__findnew(struct perf_session *session, pid_t pid) -{ - return machine__findnew_thread(&session->host_machine, pid); -} - static struct thread *perf_session__register_idle_thread(struct perf_session *self) { struct thread *thread = perf_session__findnew(self, 0); @@ -933,9 +897,9 @@ static struct thread *perf_session__register_idle_thread(struct perf_session *se } static void perf_session__warn_about_errors(const struct perf_session *session, - const struct perf_tool *tool) + const struct perf_event_ops *ops) { - if (tool->lost == perf_event__process_lost && + if (ops->lost == perf_event__process_lost && session->hists.stats.nr_events[PERF_RECORD_LOST] != 0) { ui__warning("Processed %d events and lost %d chunks!\n\n" "Check IO/CPU overload!\n\n", @@ -970,7 +934,7 @@ static void perf_session__warn_about_errors(const struct perf_session *session, volatile int session_done; static int __perf_session__process_pipe_events(struct perf_session *self, - struct perf_tool *tool) + struct perf_event_ops *ops) { union perf_event event; uint32_t size; @@ -979,7 +943,7 @@ static int __perf_session__process_pipe_events(struct perf_session *self, int err; void *p; - perf_tool__fill_defaults(tool); + perf_event_ops__fill_defaults(ops); head = 0; more: @@ -1015,7 +979,8 @@ static int __perf_session__process_pipe_events(struct perf_session *self, } } - if ((skip = perf_session__process_event(self, &event, tool, head)) < 0) { + if (size == 0 || + (skip = perf_session__process_event(self, &event, ops, head)) < 0) { dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n", head, event.header.size, event.header.type); /* @@ -1038,7 +1003,7 @@ static int __perf_session__process_pipe_events(struct perf_session *self, done: err = 0; out_err: - perf_session__warn_about_errors(self, tool); + perf_session__warn_about_errors(self, ops); perf_session_free_sample_buffers(self); return err; } @@ -1069,7 +1034,7 @@ fetch_mmaped_event(struct perf_session *session, int __perf_session__process_events(struct perf_session *session, u64 data_offset, u64 data_size, - u64 file_size, struct perf_tool *tool) + u64 file_size, struct perf_event_ops *ops) { u64 head, page_offset, file_offset, file_pos, progress_next; int err, mmap_prot, mmap_flags, map_idx = 0; @@ -1078,7 +1043,7 @@ int __perf_session__process_events(struct perf_session *session, union perf_event *event; uint32_t size; - perf_tool__fill_defaults(tool); + perf_event_ops__fill_defaults(ops); page_size = sysconf(_SC_PAGESIZE); @@ -1133,7 +1098,7 @@ int __perf_session__process_events(struct perf_session *session, size = event->header.size; if (size == 0 || - perf_session__process_event(session, event, tool, file_pos) < 0) { + perf_session__process_event(session, event, ops, file_pos) < 0) { dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n", file_offset + head, event->header.size, event->header.type); @@ -1162,15 +1127,15 @@ int __perf_session__process_events(struct perf_session *session, err = 0; /* do the final flush for ordered samples */ session->ordered_samples.next_flush = ULLONG_MAX; - flush_sample_queue(session, tool); + flush_sample_queue(session, ops); out_err: - perf_session__warn_about_errors(session, tool); + perf_session__warn_about_errors(session, ops); perf_session_free_sample_buffers(session); return err; } int perf_session__process_events(struct perf_session *self, - struct perf_tool *tool) + struct perf_event_ops *ops) { int err; @@ -1181,9 +1146,9 @@ int perf_session__process_events(struct perf_session *self, err = __perf_session__process_events(self, self->header.data_offset, self->header.data_size, - self->size, tool); + self->size, ops); else - err = __perf_session__process_pipe_events(self, tool); + err = __perf_session__process_pipe_events(self, ops); return err; } @@ -1198,8 +1163,9 @@ bool perf_session__has_traces(struct perf_session *self, const char *msg) return true; } -int maps__set_kallsyms_ref_reloc_sym(struct map **maps, - const char *symbol_name, u64 addr) +int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps, + const char *symbol_name, + u64 addr) { char *bracket; enum map_type i; @@ -1258,27 +1224,6 @@ size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp) return ret; } -size_t perf_session__fprintf(struct perf_session *session, FILE *fp) -{ - /* - * FIXME: Here we have to actually print all the machines in this - * session, not just the host... - */ - return machine__fprintf(&session->host_machine, fp); -} - -void perf_session__remove_thread(struct perf_session *session, - struct thread *th) -{ - /* - * FIXME: This one makes no sense, we need to remove the thread from - * the machine it belongs to, perf_session can have many machines, so - * doing it always on ->host_machine is wrong. Fix when auditing all - * the 'perf kvm' code. - */ - machine__remove_thread(&session->host_machine, th); -} - struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, unsigned int type) { @@ -1291,16 +1236,17 @@ struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, return NULL; } -void perf_event__print_ip(union perf_event *event, struct perf_sample *sample, - struct machine *machine, struct perf_evsel *evsel, - int print_sym, int print_dso) +void perf_session__print_ip(union perf_event *event, + struct perf_sample *sample, + struct perf_session *session, + int print_sym, int print_dso) { struct addr_location al; const char *symname, *dsoname; - struct callchain_cursor *cursor = &evsel->hists.callchain_cursor; + struct callchain_cursor *cursor = &session->callchain_cursor; struct callchain_cursor_node *node; - if (perf_event__preprocess_sample(event, machine, &al, sample, + if (perf_event__preprocess_sample(event, session, &al, sample, NULL) < 0) { error("problem processing %d event, skipping it.\n", event->header.type); @@ -1309,7 +1255,7 @@ void perf_event__print_ip(union perf_event *event, struct perf_sample *sample, if (symbol_conf.use_callchain && sample->callchain) { - if (machine__resolve_callchain(machine, evsel, al.thread, + if (perf_session__resolve_callchain(session, al.thread, sample->callchain, NULL) != 0) { if (verbose) error("Failed to resolve callchain. Skipping\n"); diff --git a/trunk/tools/perf/util/session.h b/trunk/tools/perf/util/session.h index 37bc38381fb6..6e393c98eb34 100644 --- a/trunk/tools/perf/util/session.h +++ b/trunk/tools/perf/util/session.h @@ -30,6 +30,9 @@ struct perf_session { struct perf_header header; unsigned long size; unsigned long mmap_window; + struct rb_root threads; + struct list_head dead_threads; + struct thread *last_match; struct machine host_machine; struct rb_root machines; struct perf_evlist *evlist; @@ -50,31 +53,65 @@ struct perf_session { int cwdlen; char *cwd; struct ordered_samples ordered_samples; - char filename[1]; + struct callchain_cursor callchain_cursor; + char filename[0]; }; -struct perf_tool; +struct perf_evsel; +struct perf_event_ops; + +typedef int (*event_sample)(union perf_event *event, struct perf_sample *sample, + struct perf_evsel *evsel, struct perf_session *session); +typedef int (*event_op)(union perf_event *self, struct perf_sample *sample, + struct perf_session *session); +typedef int (*event_synth_op)(union perf_event *self, + struct perf_session *session); +typedef int (*event_op2)(union perf_event *self, struct perf_session *session, + struct perf_event_ops *ops); + +struct perf_event_ops { + event_sample sample; + event_op mmap, + comm, + fork, + exit, + lost, + read, + throttle, + unthrottle; + event_synth_op attr, + event_type, + tracing_data, + build_id; + event_op2 finished_round; + bool ordered_samples; + bool ordering_requires_timestamps; +}; struct perf_session *perf_session__new(const char *filename, int mode, bool force, bool repipe, - struct perf_tool *tool); + struct perf_event_ops *ops); void perf_session__delete(struct perf_session *self); void perf_event_header__bswap(struct perf_event_header *self); int __perf_session__process_events(struct perf_session *self, u64 data_offset, u64 data_size, u64 size, - struct perf_tool *tool); + struct perf_event_ops *ops); int perf_session__process_events(struct perf_session *self, - struct perf_tool *tool); + struct perf_event_ops *event_ops); -int perf_session__resolve_callchain(struct perf_session *self, struct perf_evsel *evsel, +int perf_session__resolve_callchain(struct perf_session *self, struct thread *thread, struct ip_callchain *chain, struct symbol **parent); bool perf_session__has_traces(struct perf_session *self, const char *msg); +int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps, + const char *symbol_name, + u64 addr); + void mem_bswap_64(void *src, int byte_size); void perf_event__attr_swap(struct perf_event_attr *attr); @@ -107,16 +144,12 @@ struct machine *perf_session__findnew_machine(struct perf_session *self, pid_t p static inline void perf_session__process_machines(struct perf_session *self, - struct perf_tool *tool, machine__process_t process) { - process(&self->host_machine, tool); - return machines__process(&self->machines, process, tool); + process(&self->host_machine, self); + return machines__process(&self->machines, process, self); } -struct thread *perf_session__findnew(struct perf_session *self, pid_t pid); -size_t perf_session__fprintf(struct perf_session *self, FILE *fp); - size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp); size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, @@ -134,20 +167,13 @@ static inline int perf_session__parse_sample(struct perf_session *session, session->header.needs_swap); } -static inline int perf_session__synthesize_sample(struct perf_session *session, - union perf_event *event, - const struct perf_sample *sample) -{ - return perf_event__synthesize_sample(event, session->sample_type, - sample, session->header.needs_swap); -} - struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, unsigned int type); -void perf_event__print_ip(union perf_event *event, struct perf_sample *sample, - struct machine *machine, struct perf_evsel *evsel, - int print_sym, int print_dso); +void perf_session__print_ip(union perf_event *event, + struct perf_sample *sample, + struct perf_session *session, + int print_sym, int print_dso); int perf_session__cpu_bitmap(struct perf_session *session, const char *cpu_list, unsigned long *cpu_bitmap); diff --git a/trunk/tools/perf/util/setup.py b/trunk/tools/perf/util/setup.py index 36d4c5619575..95d370074928 100644 --- a/trunk/tools/perf/util/setup.py +++ b/trunk/tools/perf/util/setup.py @@ -27,8 +27,7 @@ def finalize_options(self): perf = Extension('perf', sources = ['util/python.c', 'util/ctype.c', 'util/evlist.c', 'util/evsel.c', 'util/cpumap.c', 'util/thread_map.c', - 'util/util.c', 'util/xyarray.c', 'util/cgroup.c', - 'util/debugfs.c'], + 'util/util.c', 'util/xyarray.c', 'util/cgroup.c'], include_dirs = ['util/include'], extra_compile_args = cflags, ) diff --git a/trunk/tools/perf/util/symbol.c b/trunk/tools/perf/util/symbol.c index 215d50f2042e..632b50c7bc26 100644 --- a/trunk/tools/perf/util/symbol.c +++ b/trunk/tools/perf/util/symbol.c @@ -1757,7 +1757,7 @@ static int map_groups__set_modules_path_dir(struct map_groups *mg, struct stat st; /*sshfs might return bad dent->d_type, so we have to stat*/ - snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name); + sprintf(path, "%s/%s", dir_name, dent->d_name); if (stat(path, &st)) continue; @@ -1766,6 +1766,8 @@ static int map_groups__set_modules_path_dir(struct map_groups *mg, !strcmp(dent->d_name, "..")) continue; + snprintf(path, sizeof(path), "%s/%s", + dir_name, dent->d_name); ret = map_groups__set_modules_path_dir(mg, path); if (ret < 0) goto out; @@ -1786,6 +1788,9 @@ static int map_groups__set_modules_path_dir(struct map_groups *mg, if (map == NULL) continue; + snprintf(path, sizeof(path), "%s/%s", + dir_name, dent->d_name); + long_name = strdup(path); if (long_name == NULL) { ret = -1; @@ -2604,10 +2609,10 @@ int symbol__init(void) symbol_conf.initialized = true; return 0; -out_free_comm_list: - strlist__delete(symbol_conf.comm_list); out_free_dso_list: strlist__delete(symbol_conf.dso_list); +out_free_comm_list: + strlist__delete(symbol_conf.comm_list); return -1; } diff --git a/trunk/tools/perf/util/symbol.h b/trunk/tools/perf/util/symbol.h index 123c2e14353e..29f8d742e92f 100644 --- a/trunk/tools/perf/util/symbol.h +++ b/trunk/tools/perf/util/symbol.h @@ -68,7 +68,6 @@ struct strlist; struct symbol_conf { unsigned short priv_size; - unsigned short nr_events; bool try_vmlinux_path, use_modules, sort_by_name, diff --git a/trunk/tools/perf/util/thread.c b/trunk/tools/perf/util/thread.c index fb4b7ea6752f..d5d3b22250f3 100644 --- a/trunk/tools/perf/util/thread.c +++ b/trunk/tools/perf/util/thread.c @@ -61,7 +61,7 @@ static size_t thread__fprintf(struct thread *self, FILE *fp) map_groups__fprintf(&self->mg, verbose, fp); } -struct thread *machine__findnew_thread(struct machine *self, pid_t pid) +struct thread *perf_session__findnew(struct perf_session *self, pid_t pid) { struct rb_node **p = &self->threads.rb_node; struct rb_node *parent = NULL; @@ -125,12 +125,12 @@ int thread__fork(struct thread *self, struct thread *parent) return 0; } -size_t machine__fprintf(struct machine *machine, FILE *fp) +size_t perf_session__fprintf(struct perf_session *self, FILE *fp) { size_t ret = 0; struct rb_node *nd; - for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) { + for (nd = rb_first(&self->threads); nd; nd = rb_next(nd)) { struct thread *pos = rb_entry(nd, struct thread, rb_node); ret += thread__fprintf(pos, fp); diff --git a/trunk/tools/perf/util/thread.h b/trunk/tools/perf/util/thread.h index 70c2c13ff679..e5f2401c1b5e 100644 --- a/trunk/tools/perf/util/thread.h +++ b/trunk/tools/perf/util/thread.h @@ -18,14 +18,16 @@ struct thread { int comm_len; }; -struct machine; +struct perf_session; void thread__delete(struct thread *self); int thread__set_comm(struct thread *self, const char *comm); int thread__comm_len(struct thread *self); +struct thread *perf_session__findnew(struct perf_session *self, pid_t pid); void thread__insert_map(struct thread *self, struct map *map); int thread__fork(struct thread *self, struct thread *parent); +size_t perf_session__fprintf(struct perf_session *self, FILE *fp); static inline struct map *thread__find_map(struct thread *self, enum map_type type, u64 addr) @@ -33,12 +35,14 @@ static inline struct map *thread__find_map(struct thread *self, return self ? map_groups__find(&self->mg, type, addr) : NULL; } -void thread__find_addr_map(struct thread *thread, struct machine *machine, - u8 cpumode, enum map_type type, u64 addr, +void thread__find_addr_map(struct thread *self, + struct perf_session *session, u8 cpumode, + enum map_type type, pid_t pid, u64 addr, struct addr_location *al); -void thread__find_addr_location(struct thread *thread, struct machine *machine, - u8 cpumode, enum map_type type, u64 addr, +void thread__find_addr_location(struct thread *self, + struct perf_session *session, u8 cpumode, + enum map_type type, pid_t pid, u64 addr, struct addr_location *al, symbol_filter_t filter); #endif /* __PERF_THREAD_H */ diff --git a/trunk/tools/perf/util/tool.h b/trunk/tools/perf/util/tool.h deleted file mode 100644 index b0e1aadba8d5..000000000000 --- a/trunk/tools/perf/util/tool.h +++ /dev/null @@ -1,50 +0,0 @@ -#ifndef __PERF_TOOL_H -#define __PERF_TOOL_H - -#include - -struct perf_session; -union perf_event; -struct perf_evlist; -struct perf_evsel; -struct perf_sample; -struct perf_tool; -struct machine; - -typedef int (*event_sample)(struct perf_tool *tool, union perf_event *event, - struct perf_sample *sample, - struct perf_evsel *evsel, struct machine *machine); - -typedef int (*event_op)(struct perf_tool *tool, union perf_event *event, - struct perf_sample *sample, struct machine *machine); - -typedef int (*event_attr_op)(union perf_event *event, - struct perf_evlist **pevlist); -typedef int (*event_simple_op)(struct perf_tool *tool, union perf_event *event); - -typedef int (*event_synth_op)(union perf_event *event, - struct perf_session *session); - -typedef int (*event_op2)(struct perf_tool *tool, union perf_event *event, - struct perf_session *session); - -struct perf_tool { - event_sample sample, - read; - event_op mmap, - comm, - fork, - exit, - lost, - throttle, - unthrottle; - event_attr_op attr; - event_synth_op tracing_data; - event_simple_op event_type; - event_op2 finished_round, - build_id; - bool ordered_samples; - bool ordering_requires_timestamps; -}; - -#endif /* __PERF_TOOL_H */ diff --git a/trunk/tools/perf/util/top.h b/trunk/tools/perf/util/top.h index a248f3c2c60d..399650967958 100644 --- a/trunk/tools/perf/util/top.h +++ b/trunk/tools/perf/util/top.h @@ -1,17 +1,15 @@ #ifndef __PERF_TOP_H #define __PERF_TOP_H 1 -#include "tool.h" #include "types.h" +#include "../perf.h" #include -#include struct perf_evlist; struct perf_evsel; struct perf_session; struct perf_top { - struct perf_tool tool; struct perf_evlist *evlist; /* * Symbols will be added here in perf_event__process_sample and will @@ -25,26 +23,10 @@ struct perf_top { int freq; pid_t target_pid, target_tid; bool hide_kernel_symbols, hide_user_symbols, zero; - bool system_wide; - bool use_tui, use_stdio; - bool sort_has_symbols; - bool dont_use_callchains; - bool kptr_restrict_warned; - bool vmlinux_warned; - bool inherit; - bool group; - bool sample_id_all_avail; - bool dump_symtab; const char *cpu_list; struct hist_entry *sym_filter_entry; struct perf_evsel *sym_evsel; struct perf_session *session; - struct winsize winsize; - unsigned int mmap_pages; - int default_interval; - int realtime_prio; - int sym_pcnt_filter; - const char *sym_filter; }; size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size); diff --git a/trunk/tools/perf/util/trace-event-info.c b/trunk/tools/perf/util/trace-event-info.c index ac6830d8292b..d2655f08bcc0 100644 --- a/trunk/tools/perf/util/trace-event-info.c +++ b/trunk/tools/perf/util/trace-event-info.c @@ -18,8 +18,7 @@ * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ -#include -#include "util.h" +#define _GNU_SOURCE #include #include #include @@ -32,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -44,6 +44,10 @@ #define VERSION "0.5" +#define _STR(x) #x +#define STR(x) _STR(x) +#define MAX_PATH 256 + #define TRACE_CTRL "tracing_on" #define TRACE "trace" #define AVAILABLE "available_tracers" @@ -69,6 +73,26 @@ struct events { }; + +static void die(const char *fmt, ...) +{ + va_list ap; + int ret = errno; + + if (errno) + perror("perf"); + else + ret = -1; + + va_start(ap, fmt); + fprintf(stderr, " "); + vfprintf(stderr, fmt, ap); + va_end(ap); + + fprintf(stderr, "\n"); + exit(ret); +} + void *malloc_or_die(unsigned int size) { void *data; diff --git a/trunk/tools/perf/util/trace-event-scripting.c b/trunk/tools/perf/util/trace-event-scripting.c index a3fdf55f317b..c9dcbec7d800 100644 --- a/trunk/tools/perf/util/trace-event-scripting.c +++ b/trunk/tools/perf/util/trace-event-scripting.c @@ -39,7 +39,7 @@ static int stop_script_unsupported(void) static void process_event_unsupported(union perf_event *event __unused, struct perf_sample *sample __unused, struct perf_evsel *evsel __unused, - struct machine *machine __unused, + struct perf_session *session __unused, struct thread *thread __unused) { } diff --git a/trunk/tools/perf/util/trace-event.h b/trunk/tools/perf/util/trace-event.h index 58ae14c5baac..a84100817649 100644 --- a/trunk/tools/perf/util/trace-event.h +++ b/trunk/tools/perf/util/trace-event.h @@ -3,11 +3,7 @@ #include #include "parse-events.h" - -struct machine; -struct perf_sample; -union perf_event; -struct thread; +#include "session.h" #define __unused __attribute__((unused)) @@ -296,7 +292,7 @@ struct scripting_ops { void (*process_event) (union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, - struct machine *machine, + struct perf_session *session, struct thread *thread); int (*generate_script) (const char *outfile); }; diff --git a/trunk/tools/perf/util/ui/browsers/annotate.c b/trunk/tools/perf/util/ui/browsers/annotate.c index 295a9c93f945..0575905d1205 100644 --- a/trunk/tools/perf/util/ui/browsers/annotate.c +++ b/trunk/tools/perf/util/ui/browsers/annotate.c @@ -224,7 +224,7 @@ static bool annotate_browser__toggle_source(struct annotate_browser *browser) } static int annotate_browser__run(struct annotate_browser *self, int evidx, - void(*timer)(void *arg), + int nr_events, void(*timer)(void *arg), void *arg, int delay_secs) { struct rb_node *nd = NULL; @@ -328,7 +328,8 @@ static int annotate_browser__run(struct annotate_browser *self, int evidx, notes = symbol__annotation(target); pthread_mutex_lock(¬es->lock); - if (notes->src == NULL && symbol__alloc_hist(target) < 0) { + if (notes->src == NULL && + symbol__alloc_hist(target, nr_events) < 0) { pthread_mutex_unlock(¬es->lock); ui__warning("Not enough memory for annotating '%s' symbol!\n", target->name); @@ -336,7 +337,7 @@ static int annotate_browser__run(struct annotate_browser *self, int evidx, } pthread_mutex_unlock(¬es->lock); - symbol__tui_annotate(target, ms->map, evidx, + symbol__tui_annotate(target, ms->map, evidx, nr_events, timer, arg, delay_secs); } continue; @@ -357,15 +358,15 @@ static int annotate_browser__run(struct annotate_browser *self, int evidx, return key; } -int hist_entry__tui_annotate(struct hist_entry *he, int evidx, +int hist_entry__tui_annotate(struct hist_entry *he, int evidx, int nr_events, void(*timer)(void *arg), void *arg, int delay_secs) { - return symbol__tui_annotate(he->ms.sym, he->ms.map, evidx, + return symbol__tui_annotate(he->ms.sym, he->ms.map, evidx, nr_events, timer, arg, delay_secs); } int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx, - void(*timer)(void *arg), void *arg, + int nr_events, void(*timer)(void *arg), void *arg, int delay_secs) { struct objdump_line *pos, *n; @@ -418,7 +419,8 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx, browser.b.nr_entries = browser.nr_entries; browser.b.entries = ¬es->src->source, browser.b.width += 18; /* Percentage */ - ret = annotate_browser__run(&browser, evidx, timer, arg, delay_secs); + ret = annotate_browser__run(&browser, evidx, nr_events, + timer, arg, delay_secs); list_for_each_entry_safe(pos, n, ¬es->src->source, node) { list_del(&pos->node); objdump_line__free(pos); diff --git a/trunk/tools/perf/util/ui/browsers/hists.c b/trunk/tools/perf/util/ui/browsers/hists.c index 1212a386a033..d0c94b459685 100644 --- a/trunk/tools/perf/util/ui/browsers/hists.c +++ b/trunk/tools/perf/util/ui/browsers/hists.c @@ -1020,7 +1020,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, * Don't let this be freed, say, by hists__decay_entry. */ he->used = true; - err = hist_entry__tui_annotate(he, evsel->idx, + err = hist_entry__tui_annotate(he, evsel->idx, nr_events, timer, arg, delay_secs); he->used = false; ui_browser__update_nr_entries(&browser->b, browser->hists->nr_entries); diff --git a/trunk/tools/perf/util/ui/progress.c b/trunk/tools/perf/util/ui/progress.c index 13aa64e50e11..295e366b6311 100644 --- a/trunk/tools/perf/util/ui/progress.c +++ b/trunk/tools/perf/util/ui/progress.c @@ -14,9 +14,6 @@ void ui_progress__update(u64 curr, u64 total, const char *title) if (use_browser <= 0) return; - if (total == 0) - return; - ui__refresh_dimensions(true); pthread_mutex_lock(&ui__lock); y = SLtt_Screen_Rows / 2 - 2; diff --git a/trunk/tools/perf/util/usage.c b/trunk/tools/perf/util/usage.c index d76d1c0ff98f..e16bf9a707e8 100644 --- a/trunk/tools/perf/util/usage.c +++ b/trunk/tools/perf/util/usage.c @@ -1,8 +1,5 @@ /* - * usage.c - * - * Various reporting routines. - * Originally copied from GIT source. + * GIT - The information manager from hell * * Copyright (C) Linus Torvalds, 2005 */ diff --git a/trunk/tools/perf/util/util.h b/trunk/tools/perf/util/util.h index 37be34dff798..0128906bac88 100644 --- a/trunk/tools/perf/util/util.h +++ b/trunk/tools/perf/util/util.h @@ -245,15 +245,4 @@ int readn(int fd, void *buf, size_t size); #define _STR(x) #x #define STR(x) _STR(x) -/* - * Determine whether some value is a power of two, where zero is - * *not* considered a power of two. - */ - -static inline __attribute__((const)) -bool is_power_of_2(unsigned long n) -{ - return (n != 0 && ((n & (n - 1)) == 0)); -} - #endif diff --git a/trunk/tools/perf/util/values.c b/trunk/tools/perf/util/values.c index 697c8b4e59cc..bdd33470b235 100644 --- a/trunk/tools/perf/util/values.c +++ b/trunk/tools/perf/util/values.c @@ -32,7 +32,6 @@ void perf_read_values_destroy(struct perf_read_values *values) for (i = 0; i < values->threads; i++) free(values->value[i]); - free(values->value); free(values->pid); free(values->tid); free(values->counterrawid); diff --git a/trunk/virt/kvm/assigned-dev.c b/trunk/virt/kvm/assigned-dev.c index 758e3b36d4cf..3ad0925d23a9 100644 --- a/trunk/virt/kvm/assigned-dev.c +++ b/trunk/virt/kvm/assigned-dev.c @@ -17,8 +17,6 @@ #include #include #include -#include -#include #include "irq.h" static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head, @@ -482,76 +480,12 @@ static int kvm_vm_ioctl_deassign_dev_irq(struct kvm *kvm, return r; } -/* - * We want to test whether the caller has been granted permissions to - * use this device. To be able to configure and control the device, - * the user needs access to PCI configuration space and BAR resources. - * These are accessed through PCI sysfs. PCI config space is often - * passed to the process calling this ioctl via file descriptor, so we - * can't rely on access to that file. We can check for permissions - * on each of the BAR resource files, which is a pretty clear - * indicator that the user has been granted access to the device. - */ -static int probe_sysfs_permissions(struct pci_dev *dev) -{ -#ifdef CONFIG_SYSFS - int i; - bool bar_found = false; - - for (i = PCI_STD_RESOURCES; i <= PCI_STD_RESOURCE_END; i++) { - char *kpath, *syspath; - struct path path; - struct inode *inode; - int r; - - if (!pci_resource_len(dev, i)) - continue; - - kpath = kobject_get_path(&dev->dev.kobj, GFP_KERNEL); - if (!kpath) - return -ENOMEM; - - /* Per sysfs-rules, sysfs is always at /sys */ - syspath = kasprintf(GFP_KERNEL, "/sys%s/resource%d", kpath, i); - kfree(kpath); - if (!syspath) - return -ENOMEM; - - r = kern_path(syspath, LOOKUP_FOLLOW, &path); - kfree(syspath); - if (r) - return r; - - inode = path.dentry->d_inode; - - r = inode_permission(inode, MAY_READ | MAY_WRITE | MAY_ACCESS); - path_put(&path); - if (r) - return r; - - bar_found = true; - } - - /* If no resources, probably something special */ - if (!bar_found) - return -EPERM; - - return 0; -#else - return -EINVAL; /* No way to control the device without sysfs */ -#endif -} - static int kvm_vm_ioctl_assign_device(struct kvm *kvm, struct kvm_assigned_pci_dev *assigned_dev) { int r = 0, idx; struct kvm_assigned_dev_kernel *match; struct pci_dev *dev; - u8 header_type; - - if (!(assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)) - return -EINVAL; mutex_lock(&kvm->lock); idx = srcu_read_lock(&kvm->srcu); @@ -579,18 +513,6 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm, r = -EINVAL; goto out_free; } - - /* Don't allow bridges to be assigned */ - pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type); - if ((header_type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL) { - r = -EPERM; - goto out_put; - } - - r = probe_sysfs_permissions(dev); - if (r) - goto out_put; - if (pci_enable_device(dev)) { printk(KERN_INFO "%s: Could not enable PCI device\n", __func__); r = -EBUSY; @@ -622,14 +544,16 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm, list_add(&match->list, &kvm->arch.assigned_dev_head); - if (!kvm->arch.iommu_domain) { - r = kvm_iommu_map_guest(kvm); + if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) { + if (!kvm->arch.iommu_domain) { + r = kvm_iommu_map_guest(kvm); + if (r) + goto out_list_del; + } + r = kvm_assign_device(kvm, match); if (r) goto out_list_del; } - r = kvm_assign_device(kvm, match); - if (r) - goto out_list_del; out: srcu_read_unlock(&kvm->srcu, idx); @@ -669,7 +593,8 @@ static int kvm_vm_ioctl_deassign_device(struct kvm *kvm, goto out; } - kvm_deassign_device(kvm, match); + if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) + kvm_deassign_device(kvm, match); kvm_free_assigned_device(kvm, match);